diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 302d8bc29f..d8d84686f7 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,7 +1,7 @@ { "ImportPath": "k8s.io/ingress", - "GoVersion": "go1.7", - "GodepVersion": "v74", + "GoVersion": "go1.8", + "GodepVersion": "v79", "Packages": [ "./..." ], @@ -9,7 +9,7 @@ { "ImportPath": "bitbucket.org/ww/goautoneg", "Comment": "null-5", - "Rev": "'75cd24fc2f2c2a2088577d12123ddee5f54e0675'" + "Rev": "75cd24fc2f2c2a2088577d12123ddee5f54e0675" }, { "ImportPath": "cloud.google.com/go/compute/metadata", @@ -34,55 +34,10 @@ "ImportPath": "github.com/beorn7/perks/quantile", "Rev": "3ac7bf7a47d159a033b107610db8a1b6575507a4" }, - { - "ImportPath": "github.com/blang/semver", - "Comment": "v3.0.1", - "Rev": "31b736133b98f26d5e078ec9eb591666edfd091f" - }, - { - "ImportPath": "github.com/coreos/go-oidc/http", - "Rev": "5644a2f50e2d2d5ba0b474bc5bc55fea1925936d" - }, - { - "ImportPath": "github.com/coreos/go-oidc/jose", - "Rev": "5644a2f50e2d2d5ba0b474bc5bc55fea1925936d" - }, - { - "ImportPath": "github.com/coreos/go-oidc/key", - "Rev": "5644a2f50e2d2d5ba0b474bc5bc55fea1925936d" - }, - { - "ImportPath": "github.com/coreos/go-oidc/oauth2", - "Rev": "5644a2f50e2d2d5ba0b474bc5bc55fea1925936d" - }, - { - "ImportPath": "github.com/coreos/go-oidc/oidc", - "Rev": "5644a2f50e2d2d5ba0b474bc5bc55fea1925936d" - }, - { - "ImportPath": "github.com/coreos/pkg/health", - "Comment": "v2-8-gfa29b1d", - "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" - }, - { - "ImportPath": "github.com/coreos/pkg/httputil", - "Comment": "v2-8-gfa29b1d", - "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" - }, - { - "ImportPath": "github.com/coreos/pkg/timeutil", - "Comment": "v2-8-gfa29b1d", - "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" - }, { "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d" }, - { - "ImportPath": "github.com/dgrijalva/jwt-go", - "Comment": "v3.0.0-4-g01aeca5", - "Rev": "01aeca54ebda6e0fbfafd0a524d234159c05ec20" - }, { "ImportPath": "github.com/docker/distribution/digest", "Comment": "v2.4.0-rc.1-38-gcd27f17", @@ -93,78 +48,20 @@ "Comment": "v2.4.0-rc.1-38-gcd27f17", "Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51" }, - { - "ImportPath": "github.com/docker/engine-api/types", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/engine-api/types/blkiodev", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/engine-api/types/container", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/engine-api/types/filters", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/engine-api/types/network", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/engine-api/types/registry", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/engine-api/types/strslice", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/engine-api/types/versions", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/go-connections/nat", - "Comment": "v0.2.0-2-gf549a93", - "Rev": "f549a9393d05688dff0992ef3efd8bbe6c628aeb" - }, - { - "ImportPath": "github.com/docker/go-units", - "Comment": "v0.1.0-21-g0bbddae", - "Rev": "0bbddae09c5a5419a8c6dcdd7ff90da3d450393b" - }, { "ImportPath": "github.com/emicklei/go-restful", - "Comment": "v1.2-79-g89ef8af", - "Rev": "89ef8af493ab468a45a42bb0d89a06fccdd2fb22" + "Comment": "v1.2-96-g09691a3", + "Rev": "09691a3b6378b740595c1002f40c34dd5f218a22" }, { "ImportPath": "github.com/emicklei/go-restful/log", - "Comment": "v1.2-79-g89ef8af", - "Rev": "89ef8af493ab468a45a42bb0d89a06fccdd2fb22" + "Comment": "v1.2-96-g09691a3", + "Rev": "09691a3b6378b740595c1002f40c34dd5f218a22" }, { "ImportPath": "github.com/emicklei/go-restful/swagger", - "Comment": "v1.2-79-g89ef8af", - "Rev": "89ef8af493ab468a45a42bb0d89a06fccdd2fb22" - }, - { - "ImportPath": "github.com/evanphx/json-patch", - "Rev": "465937c80b3c07a7c7ad20cc934898646a91c1de" - }, - { - "ImportPath": "github.com/exponent-io/jsonpath", - "Rev": "d6023ce2651d8eafb5c75bb0c7167536102ec9f5" + "Comment": "v1.2-96-g09691a3", + "Rev": "09691a3b6378b740595c1002f40c34dd5f218a22" }, { "ImportPath": "github.com/ghodss/yaml", @@ -188,12 +85,12 @@ }, { "ImportPath": "github.com/gogo/protobuf/proto", - "Comment": "v0.2-33-ge18d7aa", + "Comment": "v0.2-33-ge18d7aa8", "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173" }, { "ImportPath": "github.com/gogo/protobuf/sortkeys", - "Comment": "v0.2-33-ge18d7aa", + "Comment": "v0.2-33-ge18d7aa8", "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173" }, { @@ -210,7 +107,7 @@ }, { "ImportPath": "github.com/google/gofuzz", - "Rev": "bbcb9da2d746f8bdbd6a936686a0a6067ada0ec5" + "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" }, { "ImportPath": "github.com/howeyc/gopass", @@ -221,14 +118,6 @@ "Comment": "0.1.3-8-g6633656", "Rev": "6633656539c1639d9d78127b7d47c622b5d7b6dc" }, - { - "ImportPath": "github.com/inconshreveable/mousetrap", - "Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" - }, - { - "ImportPath": "github.com/jonboulle/clockwork", - "Rev": "72f9bd7c4e0c2a40055ab3d0f09654f730cce982" - }, { "ImportPath": "github.com/juju/ratelimit", "Rev": "77ed1c8a01217656d2080ad51981f6e99adaa177" @@ -282,13 +171,13 @@ }, { "ImportPath": "github.com/prometheus/client_golang/prometheus", - "Comment": "v0.8.0-42-g575f371", - "Rev": "575f371f7862609249a1be4c9145f429fe065e32" + "Comment": "v0.8.0-60-g738ed6c", + "Rev": "738ed6c0b9b30ec936d4b74c1600ae76324efff5" }, { "ImportPath": "github.com/prometheus/client_golang/prometheus/promhttp", - "Comment": "v0.8.0-42-g575f371", - "Rev": "575f371f7862609249a1be4c9145f429fe065e32" + "Comment": "v0.8.0-60-g738ed6c", + "Rev": "738ed6c0b9b30ec936d4b74c1600ae76324efff5" }, { "ImportPath": "github.com/prometheus/client_model/go", @@ -297,35 +186,31 @@ }, { "ImportPath": "github.com/prometheus/common/expfmt", - "Rev": "6d76b79f239843a04e8ad8dfd8fcadfa3920236f" + "Rev": "49fee292b27bfff7f354ee0f64e1bc4850462edf" }, { "ImportPath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", - "Rev": "6d76b79f239843a04e8ad8dfd8fcadfa3920236f" + "Rev": "49fee292b27bfff7f354ee0f64e1bc4850462edf" }, { "ImportPath": "github.com/prometheus/common/model", - "Rev": "6d76b79f239843a04e8ad8dfd8fcadfa3920236f" + "Rev": "49fee292b27bfff7f354ee0f64e1bc4850462edf" }, { "ImportPath": "github.com/prometheus/procfs", - "Rev": "fcdb11ccb4389efb1b210b7ffb623ab71c5fdd60" - }, - { - "ImportPath": "github.com/spf13/cobra", - "Rev": "f62e98d28ab7ad31d707ba837a966378465c7b57" + "Rev": "454a56f35412459b5e684fd5ec0f9211b94f002a" }, { "ImportPath": "github.com/spf13/pflag", - "Rev": "5ccb023bc27df288a957c5e994cd44fd19619465" + "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" }, { "ImportPath": "github.com/ugorji/go/codec", - "Rev": "f1f1a805ed361a0e078bb537e4ea78cd37dcf065" + "Rev": "ded73eae5db7e7a0ef6f55aace87a2873c5d2b74" }, { "ImportPath": "golang.org/x/crypto/ssh/terminal", - "Rev": "1f22c0103821b9390939b6776727195525381532" + "Rev": "d172538b2cfce0c13cee31e647d0367aa8cd2486" }, { "ImportPath": "golang.org/x/net/context", @@ -417,23 +302,23 @@ }, { "ImportPath": "google.golang.org/api/compute/v1", - "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" + "Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb" }, { "ImportPath": "google.golang.org/api/container/v1", - "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" + "Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb" }, { "ImportPath": "google.golang.org/api/gensupport", - "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" + "Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb" }, { "ImportPath": "google.golang.org/api/googleapi", - "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" + "Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb" }, { "ImportPath": "google.golang.org/api/googleapi/internal/uritemplates", - "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" + "Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb" }, { "ImportPath": "google.golang.org/appengine", @@ -498,994 +383,1085 @@ "Rev": "53feefa2559fb8dfa8d81baad31be332c97d6c77" }, { - "ImportPath": "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/api/errors", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/api/meta", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/api/resource", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/federation/apis/federation", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/apimachinery", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/federation/apis/federation/install", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/apimachinery/announced", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/federation/apis/federation/v1beta1", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/apimachinery/registered", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/conversion", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/conversion/queryparams", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/fields", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/annotations", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/labels", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/endpoints", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/openapi", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/errors", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/runtime", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/events", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/runtime/schema", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/install", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/meta", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/json", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/meta/metatypes", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/protobuf", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/pod", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/recognizer", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/resource", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/streaming", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/rest", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/versioning", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/service", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/selection", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/testapi", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/types", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/unversioned", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/util/diff", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/unversioned/validation", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/util/errors", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/util", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/util/framer", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/v1", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/util/intstr", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/validation", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/util/json", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/validation/path", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/util/mergepatch", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apimachinery", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/util/net", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apimachinery/announced", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/util/rand", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apimachinery/registered", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/util/runtime", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/apps", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/util/sets", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/install", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/util/strategicpatch", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1beta1", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/util/uuid", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/util/validation", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/install", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/util/validation/field", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/v1beta1", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/util/wait", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/util/yaml", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/install", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/version", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/pkg/watch", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/json", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/install", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/reflect", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v1", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/apiserver/pkg/server/healthz", + "Rev": "fb15924f43bd9a27225106aa70510c5e06cd2ef8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/batch", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/discovery", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/install", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/discovery/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v1", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/scheme", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/install", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig/install", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1beta1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/install", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/validation", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1beta1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/imagepolicy", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/imagepolicy/install", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/policy", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/install", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/v1beta1", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/install", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v2alpha1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/storage", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/certificates/v1beta1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/install", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/util", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/core/v1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1beta1", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/core/v1/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/auth/authenticator", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/extensions/v1beta1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/auth/user", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/capabilities", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/policy/v1beta1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/cache", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1beta1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/settings/v1alpha1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1beta1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/listers/core/v1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/api", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/api/install", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/api/v1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/apps", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/apps/install", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/apps/v1beta1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/authentication", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/authentication/install", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/authentication/v1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/authentication/v1beta1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/authorization", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/authorization/install", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/authorization/v1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/leaderelection", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/authorization/v1beta1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/leaderelection/resourcelock", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/autoscaling", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/metrics", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/autoscaling/install", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/record", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/autoscaling/v1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/restclient", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/retry", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/batch", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/testing/core", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/batch/install", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/transport", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/batch/v1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/typed/discovery", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/batch/v2alpha1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/typed/discovery/fake", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/certificates", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/typed/dynamic", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/certificates/install", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/certificates/v1beta1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/auth", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/extensions", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/extensions/install", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/extensions/v1beta1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/policy", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/policy/install", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/policy/v1beta1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/rbac", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/controller", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/rbac/install", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/controller/deployment/util", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/rbac/v1alpha1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/conversion", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/rbac/v1beta1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/conversion/queryparams", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/settings", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/settings/install", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/fieldpath", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/settings/v1alpha1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/fields", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/storage", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/genericapiserver/openapi/common", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/storage/install", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/healthz", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/storage/v1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubectl", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/apis/storage/v1beta1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/util", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/util", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubectl/resource", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/util/parsers", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/qos", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/pkg/version", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/types", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/rest", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/labels", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/rest/watch", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/master/ports", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/testing", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/tools/auth", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/runtime", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/tools/cache", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/tools/clientcmd", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/json", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/tools/clientcmd/api", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/protobuf", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/tools/clientcmd/api/latest", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/recognizer", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/tools/clientcmd/api/v1", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/streaming", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/tools/metrics", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/versioning", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/tools/record", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/security/apparmor", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/transport", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/util/cert", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/util/clock", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/selection", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/util/flowcontrol", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/serviceaccount", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/util/homedir", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/storage", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/util/integer", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/types", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/client-go/util/workqueue", + "Comment": "v2.0.0-alpha.0-111-g7615377", + "Rev": "76153773eaa3a268131d3d993290a194a1370585" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/api", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/cert", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/api/install", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/chmod", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/api/v1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/chown", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/api/v1/service", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/clock", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/apps", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/config", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/install", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/diff", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1beta1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/errors", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/exec", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/install", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/v1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/flag", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/v1beta1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/flowcontrol", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/framer", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/install", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/hash", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/homedir", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/integer", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/intstr", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/install", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/io", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/json", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/jsonpath", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/batch", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/labels", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/install", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/mount", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/net", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/net/sets", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/node", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/install", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/parsers", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/v1beta1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/pod", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/rand", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/install", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/runtime", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/sets", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/policy", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/slice", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/install", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/strategicpatch", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/v1beta1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/sysctl", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/uuid", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/install", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/validation", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/validation/field", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/wait", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/settings", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/workqueue", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/settings/install", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/yaml", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/settings/v1alpha1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/version", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/storage", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/volume", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/install", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/volume/util", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1beta1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/watch", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/pkg/watch/versioned", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/client/auth", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/client/auth/gcp", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/client/auth/oidc", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/third_party/forked/golang/json", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/third_party/forked/golang/reflect", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" }, { - "ImportPath": "k8s.io/kubernetes/third_party/forked/golang/template", - "Comment": "v1.5.1", - "Rev": "82450d03cb057bab0950214ef122b67c83fb11df" + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/chmod", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/chown", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/exec", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/io", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/mount", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/net/sets", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/parsers", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/sysctl", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/volume", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/volume/util", + "Comment": "v1.6.0", + "Rev": "fff5156092b56e6bd60fff75aad4dc9de6b6ef37" } ] } diff --git a/controllers/gce/backends/backends.go b/controllers/gce/backends/backends.go index 81edc32e3b..115ad52bcf 100644 --- a/controllers/gce/backends/backends.go +++ b/controllers/gce/backends/backends.go @@ -23,10 +23,11 @@ import ( "strings" "time" - "k8s.io/kubernetes/pkg/util/sets" - "github.com/golang/glog" + compute "google.golang.org/api/compute/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/ingress/controllers/gce/healthchecks" "k8s.io/ingress/controllers/gce/instances" "k8s.io/ingress/controllers/gce/storage" diff --git a/controllers/gce/backends/backends_test.go b/controllers/gce/backends/backends_test.go index 08afd35f6a..5766f6919c 100644 --- a/controllers/gce/backends/backends_test.go +++ b/controllers/gce/backends/backends_test.go @@ -21,13 +21,13 @@ import ( "testing" compute "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/ingress/controllers/gce/healthchecks" "k8s.io/ingress/controllers/gce/instances" "k8s.io/ingress/controllers/gce/storage" "k8s.io/ingress/controllers/gce/utils" - "k8s.io/kubernetes/pkg/util/sets" - - "google.golang.org/api/googleapi" ) const defaultZone = "zone-a" diff --git a/controllers/gce/backends/fakes.go b/controllers/gce/backends/fakes.go index 6ad7460f57..2fe73cf864 100644 --- a/controllers/gce/backends/fakes.go +++ b/controllers/gce/backends/fakes.go @@ -20,8 +20,9 @@ import ( "fmt" compute "google.golang.org/api/compute/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/ingress/controllers/gce/utils" - "k8s.io/kubernetes/pkg/client/cache" ) // NewFakeBackendServices creates a new fake backend services manager. diff --git a/controllers/gce/controller/cluster_manager.go b/controllers/gce/controller/cluster_manager.go index 9936444131..79d21bc17f 100644 --- a/controllers/gce/controller/cluster_manager.go +++ b/controllers/gce/controller/cluster_manager.go @@ -22,16 +22,17 @@ import ( "os" "time" + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/cloudprovider" + gce "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" + "k8s.io/ingress/controllers/gce/backends" "k8s.io/ingress/controllers/gce/firewalls" "k8s.io/ingress/controllers/gce/healthchecks" "k8s.io/ingress/controllers/gce/instances" "k8s.io/ingress/controllers/gce/loadbalancers" "k8s.io/ingress/controllers/gce/utils" - "k8s.io/kubernetes/pkg/cloudprovider" - gce "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" - - "github.com/golang/glog" ) const ( diff --git a/controllers/gce/controller/controller.go b/controllers/gce/controller/controller.go index 3a039222bb..9bf22ee974 100644 --- a/controllers/gce/controller/controller.go +++ b/controllers/gce/controller/controller.go @@ -23,20 +23,21 @@ import ( "sync" "time" - "k8s.io/ingress/controllers/gce/loadbalancers" - "k8s.io/ingress/controllers/gce/utils" + "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/cache" - client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - "k8s.io/kubernetes/pkg/client/record" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/watch" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/client-go/kubernetes" + unversionedcore "k8s.io/client-go/kubernetes/typed/core/v1" + listers "k8s.io/client-go/listers/core/v1" + base_api "k8s.io/client-go/pkg/api" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" - "github.com/golang/glog" + "k8s.io/ingress/controllers/gce/loadbalancers" + "k8s.io/ingress/controllers/gce/utils" ) var ( @@ -57,16 +58,16 @@ var ( // LoadBalancerController watches the kubernetes api and adds/removes services // from the loadbalancer, via loadBalancerConfig. type LoadBalancerController struct { - client client.Interface - ingController *cache.Controller - nodeController *cache.Controller - svcController *cache.Controller - podController *cache.Controller + client kubernetes.Interface + ingController cache.Controller + nodeController cache.Controller + svcController cache.Controller + podController cache.Controller ingLister StoreToIngressLister - nodeLister cache.StoreToNodeLister - svcLister cache.StoreToServiceLister + nodeLister StoreToNodeLister + svcLister StoreToServiceLister // Health checks are the readiness probes of containers on pods. - podLister cache.StoreToPodLister + podLister StoreToPodLister // TODO: Watch secrets CloudClusterManager *ClusterManager recorder record.EventRecorder @@ -91,7 +92,7 @@ type LoadBalancerController struct { // - clusterManager: A ClusterManager capable of creating all cloud resources // required for L7 loadbalancing. // - resyncPeriod: Watchers relist from the Kubernetes API server this often. -func NewLoadBalancerController(kubeClient client.Interface, clusterManager *ClusterManager, resyncPeriod time.Duration, namespace string) (*LoadBalancerController, error) { +func NewLoadBalancerController(kubeClient kubernetes.Interface, clusterManager *ClusterManager, resyncPeriod time.Duration, namespace string) (*LoadBalancerController, error) { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{ @@ -101,7 +102,7 @@ func NewLoadBalancerController(kubeClient client.Interface, clusterManager *Clus client: kubeClient, CloudClusterManager: clusterManager, stopCh: make(chan struct{}), - recorder: eventBroadcaster.NewRecorder( + recorder: eventBroadcaster.NewRecorder(base_api.Scheme, api.EventSource{Component: "loadbalancer-controller"}), } lbc.nodeQueue = NewTaskQueue(lbc.syncNodes) @@ -140,10 +141,7 @@ func NewLoadBalancerController(kubeClient client.Interface, clusterManager *Clus }, } lbc.ingLister.Store, lbc.ingController = cache.NewInformer( - &cache.ListWatch{ - ListFunc: ingressListFunc(lbc.client, namespace), - WatchFunc: ingressWatchFunc(lbc.client, namespace), - }, + cache.NewListWatchFromClient(lbc.client.Extensions().RESTClient(), "ingresses", namespace, fields.Everything()), &extensions.Ingress{}, resyncPeriod, pathHandlers) // Service watch handlers @@ -173,30 +171,14 @@ func NewLoadBalancerController(kubeClient client.Interface, clusterManager *Clus cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ) - nodeHandlers := cache.ResourceEventHandlerFuncs{ - AddFunc: lbc.nodeQueue.enqueue, - DeleteFunc: lbc.nodeQueue.enqueue, - // Nodes are updated every 10s and we don't care, so no update handler. - } // Node watch handlers - lbc.nodeLister.Store, lbc.nodeController = cache.NewInformer( - &cache.ListWatch{ - ListFunc: func(opts api.ListOptions) (runtime.Object, error) { - return lbc.client.Core().RESTClient().Get(). - Resource("nodes"). - FieldsSelectorParam(fields.Everything()). - Do(). - Get() - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return lbc.client.Core().RESTClient().Get(). - Prefix("watch"). - Resource("nodes"). - FieldsSelectorParam(fields.Everything()). - Param("resourceVersion", options.ResourceVersion).Watch() - }, - }, - &api.Node{}, 0, nodeHandlers) + lbc.nodeLister.Indexer, lbc.nodeController = cache.NewIndexerInformer( + cache.NewListWatchFromClient(lbc.client.Core().RESTClient(), "nodes", api.NamespaceAll, fields.Everything()), + &api.Node{}, + resyncPeriod, + cache.ResourceEventHandlerFuncs{}, + cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, + ) lbc.tr = &GCETranslator{&lbc} lbc.tlsLoader = &apiServerTLSLoader{client: lbc.client} @@ -205,18 +187,6 @@ func NewLoadBalancerController(kubeClient client.Interface, clusterManager *Clus return &lbc, nil } -func ingressListFunc(c client.Interface, ns string) func(api.ListOptions) (runtime.Object, error) { - return func(opts api.ListOptions) (runtime.Object, error) { - return c.Extensions().Ingresses(ns).List(opts) - } -} - -func ingressWatchFunc(c client.Interface, ns string) func(options api.ListOptions) (watch.Interface, error) { - return func(options api.ListOptions) (watch.Interface, error) { - return c.Extensions().Ingresses(ns).Watch(options) - } -} - // enqueueIngressForService enqueues all the Ingress' for a Service. func (lbc *LoadBalancerController) enqueueIngressForService(obj interface{}) { svc := obj.(*api.Service) @@ -377,7 +347,7 @@ func (lbc *LoadBalancerController) updateIngressStatus(l7 *loadbalancers.L7, ing // Update IP through update/status endpoint ip := l7.GetIP() - currIng, err := ingClient.Get(ing.Name) + currIng, err := ingClient.Get(ing.Name, metav1.GetOptions{}) if err != nil { return err } @@ -401,7 +371,7 @@ func (lbc *LoadBalancerController) updateIngressStatus(l7 *loadbalancers.L7, ing } } // Update annotations through /update endpoint - currIng, err = ingClient.Get(ing.Name) + currIng, err = ingClient.Get(ing.Name, metav1.GetOptions{}) if err != nil { return err } @@ -464,7 +434,7 @@ func (lbc *LoadBalancerController) syncNodes(key string) error { return nil } -func getNodeReadyPredicate() cache.NodeConditionPredicate { +func getNodeReadyPredicate() listers.NodeConditionPredicate { return func(node *api.Node) bool { for ix := range node.Status.Conditions { condition := &node.Status.Conditions[ix] @@ -479,7 +449,7 @@ func getNodeReadyPredicate() cache.NodeConditionPredicate { // getReadyNodeNames returns names of schedulable, ready nodes from the node lister. func (lbc *LoadBalancerController) getReadyNodeNames() ([]string, error) { nodeNames := []string{} - nodes, err := lbc.nodeLister.NodeCondition(getNodeReadyPredicate()).List() + nodes, err := listers.NewNodeLister(lbc.nodeLister.Indexer).ListWithPredicate(getNodeReadyPredicate()) if err != nil { return nodeNames, err } diff --git a/controllers/gce/controller/controller_test.go b/controllers/gce/controller/controller_test.go index f8d905b449..538cc2fdc7 100644 --- a/controllers/gce/controller/controller_test.go +++ b/controllers/gce/controller/controller_test.go @@ -24,18 +24,18 @@ import ( compute "google.golang.org/api/compute/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/pkg/api" + api_v1 "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + "k8s.io/ingress/controllers/gce/firewalls" "k8s.io/ingress/controllers/gce/loadbalancers" "k8s.io/ingress/controllers/gce/utils" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/testapi" - "k8s.io/kubernetes/pkg/apis/extensions" - client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/util/intstr" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/util/uuid" ) const testClusterName = "testcluster" @@ -52,8 +52,9 @@ func defaultBackendName(clusterName string) string { // newLoadBalancerController create a loadbalancer controller. func newLoadBalancerController(t *testing.T, cm *fakeClusterManager, masterURL string) *LoadBalancerController { - client := client.NewForConfigOrDie(&restclient.Config{Host: masterURL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) - lb, err := NewLoadBalancerController(client, cm.ClusterManager, 1*time.Second, api.NamespaceAll) + kubeClient := fake.NewSimpleClientset() + //ContentConfig: restclient.ContentConfig{GroupVersion: testapi_v1.Default.GroupVersion()} + lb, err := NewLoadBalancerController(kubeClient, cm.ClusterManager, 1*time.Second, api_v1.NamespaceAll) if err != nil { t.Fatalf("%v", err) } @@ -95,7 +96,7 @@ func toIngressRules(hostRules map[string]utils.FakeIngressRuleValueMap) []extens // newIngress returns a new Ingress with the given path map. func newIngress(hostRules map[string]utils.FakeIngressRuleValueMap) *extensions.Ingress { return &extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: fmt.Sprintf("%v", uuid.NewUUID()), Namespace: api.NamespaceNone, }, @@ -107,8 +108,8 @@ func newIngress(hostRules map[string]utils.FakeIngressRuleValueMap) *extensions. Rules: toIngressRules(hostRules), }, Status: extensions.IngressStatus{ - LoadBalancer: api.LoadBalancerStatus{ - Ingress: []api.LoadBalancerIngress{ + LoadBalancer: api_v1.LoadBalancerStatus{ + Ingress: []api_v1.LoadBalancerIngress{ {IP: testIPManager.ip()}, }, }, @@ -178,21 +179,21 @@ func addIngress(lbc *LoadBalancerController, ing *extensions.Ingress, pm *nodePo } for _, rule := range ing.Spec.Rules { for _, path := range rule.HTTP.Paths { - svc := &api.Service{ - ObjectMeta: api.ObjectMeta{ + svc := &api_v1.Service{ + ObjectMeta: meta_v1.ObjectMeta{ Name: path.Backend.ServiceName, Namespace: ing.Namespace, }, } - var svcPort api.ServicePort + var svcPort api_v1.ServicePort switch path.Backend.ServicePort.Type { case intstr.Int: - svcPort = api.ServicePort{Port: path.Backend.ServicePort.IntVal} + svcPort = api_v1.ServicePort{Port: path.Backend.ServicePort.IntVal} default: - svcPort = api.ServicePort{Name: path.Backend.ServicePort.StrVal} + svcPort = api_v1.ServicePort{Name: path.Backend.ServicePort.StrVal} } svcPort.NodePort = int32(pm.getNodePort(path.Backend.ServiceName)) - svc.Spec.Ports = []api.ServicePort{svcPort} + svc.Spec.Ports = []api_v1.ServicePort{svcPort} lbc.svcLister.Indexer.Add(svc) } } @@ -373,8 +374,8 @@ func TestLbNoService(t *testing.T) { // Creates the service, next sync should have complete url map. pm := newPortManager(1, 65536) addIngress(lbc, ing, pm) - lbc.enqueueIngressForService(&api.Service{ - ObjectMeta: api.ObjectMeta{ + lbc.enqueueIngressForService(&api_v1.Service{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo1svc", Namespace: ing.Namespace, }, diff --git a/controllers/gce/controller/fakes.go b/controllers/gce/controller/fakes.go index a4870593c3..dc8a80f586 100644 --- a/controllers/gce/controller/fakes.go +++ b/controllers/gce/controller/fakes.go @@ -17,10 +17,10 @@ limitations under the License. package controller import ( - "k8s.io/kubernetes/pkg/util/intstr" - "k8s.io/kubernetes/pkg/util/sets" - compute "google.golang.org/api/compute/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/ingress/controllers/gce/backends" "k8s.io/ingress/controllers/gce/firewalls" "k8s.io/ingress/controllers/gce/healthchecks" diff --git a/controllers/gce/controller/tls.go b/controllers/gce/controller/tls.go index 70bf7e00cd..979afb867d 100644 --- a/controllers/gce/controller/tls.go +++ b/controllers/gce/controller/tls.go @@ -19,13 +19,14 @@ package controller import ( "fmt" - "k8s.io/ingress/controllers/gce/loadbalancers" + "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" - "github.com/golang/glog" + "k8s.io/ingress/controllers/gce/loadbalancers" ) // secretLoaders returns a type containing all the secrets of an Ingress. @@ -44,7 +45,7 @@ func (n *noOPValidator) validate(certs *loadbalancers.TLSCerts) error { // apiServerTLSLoader loads TLS certs from the apiserver. type apiServerTLSLoader struct { noOPValidator - client client.Interface + client kubernetes.Interface } func (t *apiServerTLSLoader) load(ing *extensions.Ingress) (*loadbalancers.TLSCerts, error) { @@ -59,7 +60,7 @@ func (t *apiServerTLSLoader) load(ing *extensions.Ingress) (*loadbalancers.TLSCe secretName := ing.Spec.TLS[0].SecretName // TODO: Replace this for a secret watcher. glog.V(3).Infof("Retrieving secret for ing %v with name %v", ing.Name, secretName) - secret, err := t.client.Core().Secrets(ing.Namespace).Get(secretName) + secret, err := t.client.Core().Secrets(ing.Namespace).Get(secretName, meta_v1.GetOptions{}) if err != nil { return nil, err } diff --git a/controllers/gce/controller/util_test.go b/controllers/gce/controller/util_test.go index 38f969c636..4c3b1e8697 100644 --- a/controllers/gce/controller/util_test.go +++ b/controllers/gce/controller/util_test.go @@ -21,10 +21,10 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/util/intstr" - "k8s.io/kubernetes/pkg/util/sets" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + api_v1 "k8s.io/client-go/pkg/api/v1" ) // Pods created in loops start from this time, for routines that @@ -98,7 +98,7 @@ func TestProbeGetter(t *testing.T) { 3001: "/healthz", 3002: "/foo", } - addPods(lbc, nodePortToHealthCheck, api.NamespaceDefault) + addPods(lbc, nodePortToHealthCheck, api_v1.NamespaceDefault) for p, exp := range nodePortToHealthCheck { got, err := lbc.tr.HealthCheck(p) if err != nil { @@ -115,9 +115,9 @@ func TestProbeGetterNamedPort(t *testing.T) { nodePortToHealthCheck := map[int64]string{ 3001: "/healthz", } - addPods(lbc, nodePortToHealthCheck, api.NamespaceDefault) + addPods(lbc, nodePortToHealthCheck, api_v1.NamespaceDefault) for _, p := range lbc.podLister.Indexer.List() { - pod := p.(*api.Pod) + pod := p.(*api_v1.Pod) pod.Spec.Containers[0].Ports[0].Name = "test" pod.Spec.Containers[0].ReadinessProbe.Handler.HTTPGet.Port = intstr.IntOrString{Type: intstr.String, StrVal: "test"} } @@ -136,24 +136,24 @@ func TestProbeGetterCrossNamespace(t *testing.T) { cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm, "") - firstPod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + firstPod := &api_v1.Pod{ + ObjectMeta: meta_v1.ObjectMeta{ // labels match those added by "addPods", but ns and health check // path is different. If this pod was created in the same ns, it // would become the health check. Labels: map[string]string{"app-3001": "test"}, Name: fmt.Sprintf("test-pod-new-ns"), Namespace: "new-ns", - CreationTimestamp: unversioned.NewTime(firstPodCreationTime.Add(-time.Duration(time.Hour))), + CreationTimestamp: meta_v1.NewTime(firstPodCreationTime.Add(-time.Duration(time.Hour))), }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: api_v1.PodSpec{ + Containers: []api_v1.Container{ { - Ports: []api.ContainerPort{{ContainerPort: 80}}, - ReadinessProbe: &api.Probe{ - Handler: api.Handler{ - HTTPGet: &api.HTTPGetAction{ - Scheme: api.URISchemeHTTP, + Ports: []api_v1.ContainerPort{{ContainerPort: 80}}, + ReadinessProbe: &api_v1.Probe{ + Handler: api_v1.Handler{ + HTTPGet: &api_v1.HTTPGetAction{ + Scheme: api_v1.URISchemeHTTP, Path: "/badpath", Port: intstr.IntOrString{ Type: intstr.Int, @@ -170,7 +170,7 @@ func TestProbeGetterCrossNamespace(t *testing.T) { nodePortToHealthCheck := map[int64]string{ 3001: "/healthz", } - addPods(lbc, nodePortToHealthCheck, api.NamespaceDefault) + addPods(lbc, nodePortToHealthCheck, api_v1.NamespaceDefault) for p, exp := range nodePortToHealthCheck { got, err := lbc.tr.HealthCheck(p) @@ -186,10 +186,10 @@ func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string delay := time.Minute for np, u := range nodePortToHealthCheck { l := map[string]string{fmt.Sprintf("app-%d", np): "test"} - svc := &api.Service{ - Spec: api.ServiceSpec{ + svc := &api_v1.Service{ + Spec: api_v1.ServiceSpec{ Selector: l, - Ports: []api.ServicePort{ + Ports: []api_v1.ServicePort{ { NodePort: int32(np), TargetPort: intstr.IntOrString{ @@ -204,21 +204,21 @@ func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string svc.Namespace = ns lbc.svcLister.Indexer.Add(svc) - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &api_v1.Pod{ + ObjectMeta: meta_v1.ObjectMeta{ Labels: l, Name: fmt.Sprintf("%d", np), Namespace: ns, - CreationTimestamp: unversioned.NewTime(firstPodCreationTime.Add(delay)), + CreationTimestamp: meta_v1.NewTime(firstPodCreationTime.Add(delay)), }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: api_v1.PodSpec{ + Containers: []api_v1.Container{ { - Ports: []api.ContainerPort{{Name: "test", ContainerPort: 80}}, - ReadinessProbe: &api.Probe{ - Handler: api.Handler{ - HTTPGet: &api.HTTPGetAction{ - Scheme: api.URISchemeHTTP, + Ports: []api_v1.ContainerPort{{Name: "test", ContainerPort: 80}}, + ReadinessProbe: &api_v1.Probe{ + Handler: api_v1.Handler{ + HTTPGet: &api_v1.HTTPGetAction{ + Scheme: api_v1.URISchemeHTTP, Path: u, Port: intstr.IntOrString{ Type: intstr.Int, @@ -239,20 +239,20 @@ func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string func addNodes(lbc *LoadBalancerController, zoneToNode map[string][]string) { for zone, nodes := range zoneToNode { for _, node := range nodes { - n := &api.Node{ - ObjectMeta: api.ObjectMeta{ + n := &api_v1.Node{ + ObjectMeta: meta_v1.ObjectMeta{ Name: node, Labels: map[string]string{ zoneKey: zone, }, }, - Status: api.NodeStatus{ - Conditions: []api.NodeCondition{ - {Type: api.NodeReady, Status: api.ConditionTrue}, + Status: api_v1.NodeStatus{ + Conditions: []api_v1.NodeCondition{ + {Type: api_v1.NodeReady, Status: api_v1.ConditionTrue}, }, }, } - lbc.nodeLister.Store.Add(n) + lbc.nodeLister.Indexer.Add(n) } } lbc.CloudClusterManager.instancePool.Init(lbc.tr) diff --git a/controllers/gce/controller/utils.go b/controllers/gce/controller/utils.go index ef8241578d..2dee238f0c 100644 --- a/controllers/gce/controller/utils.go +++ b/controllers/gce/controller/utils.go @@ -22,19 +22,23 @@ import ( "strconv" "time" + "github.com/golang/glog" + compute "google.golang.org/api/compute/v1" + "k8s.io/apimachinery/pkg/api/meta" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + listers "k8s.io/client-go/listers/core/v1" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/ingress/controllers/gce/loadbalancers" "k8s.io/ingress/controllers/gce/utils" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/cache" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/util/intstr" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/util/wait" - "k8s.io/kubernetes/pkg/util/workqueue" - - "github.com/golang/glog" ) const ( @@ -202,6 +206,41 @@ type StoreToIngressLister struct { cache.Store } +// StoreToNodeLister makes a Store that lists Node. +type StoreToNodeLister struct { + cache.Indexer +} + +// StoreToServiceLister makes a Store that lists Service. +type StoreToServiceLister struct { + cache.Indexer +} + +// StoreToPodLister makes a Store that lists Pods. +type StoreToPodLister struct { + cache.Indexer +} + +func (s *StoreToPodLister) List(selector labels.Selector) (ret []*api.Pod, err error) { + err = ListAll(s.Indexer, selector, func(m interface{}) { + ret = append(ret, m.(*api.Pod)) + }) + return ret, err +} + +func ListAll(store cache.Store, selector labels.Selector, appendFn cache.AppendFunc) error { + for _, m := range store.List() { + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + } + return nil +} + // List lists all Ingress' in the store. func (s *StoreToIngressLister) List() (ing extensions.IngressList, err error) { for _, m := range s.Store.List() { @@ -336,7 +375,7 @@ func (t *GCETranslator) toGCEBackend(be *extensions.IngressBackend, ns string) ( func (t *GCETranslator) getServiceNodePort(be extensions.IngressBackend, namespace string) (int, error) { obj, exists, err := t.svcLister.Indexer.Get( &api.Service{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: be.ServiceName, Namespace: namespace, }, @@ -411,7 +450,7 @@ func getZone(n *api.Node) string { // GetZoneForNode returns the zone for a given node by looking up its zone label. func (t *GCETranslator) GetZoneForNode(name string) (string, error) { - nodes, err := t.nodeLister.NodeCondition(getNodeReadyPredicate()).List() + nodes, err := listers.NewNodeLister(t.nodeLister.Indexer).ListWithPredicate(getNodeReadyPredicate()) if err != nil { return "", err } @@ -428,7 +467,7 @@ func (t *GCETranslator) GetZoneForNode(name string) (string, error) { // ListZones returns a list of zones this Kubernetes cluster spans. func (t *GCETranslator) ListZones() ([]string, error) { zones := sets.String{} - readyNodes, err := t.nodeLister.NodeCondition(getNodeReadyPredicate()).List() + readyNodes, err := listers.NewNodeLister(t.nodeLister.Indexer).ListWithPredicate(getNodeReadyPredicate()) if err != nil { return zones.List(), err } @@ -502,14 +541,12 @@ func isSimpleHTTPProbe(probe *api.Probe) bool { // the request path, callers are responsible for swapping this out for the // appropriate default. func (t *GCETranslator) HealthCheck(port int64) (*compute.HttpHealthCheck, error) { - sl, err := t.svcLister.List(labels.Everything()) - if err != nil { - return nil, err - } + sl := t.svcLister.List() var ingresses []extensions.Ingress var healthCheck *compute.HttpHealthCheck // Find the label and target port of the one service with the given nodePort - for _, s := range sl { + for _, as := range sl { + s := as.(*api.Service) for _, p := range s.Spec.Ports { // only one Service can match this nodePort, try and look up diff --git a/controllers/gce/examples/https/make_secret.go b/controllers/gce/examples/https/make_secret.go index 16853cce44..8cb6ced9c1 100644 --- a/controllers/gce/examples/https/make_secret.go +++ b/controllers/gce/examples/https/make_secret.go @@ -26,9 +26,11 @@ import ( "io/ioutil" "log" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/runtime" + registered "k8s.io/apimachinery/pkg/apimachinery/registered" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/pkg/api" + api_v1 "k8s.io/client-go/pkg/api/v1" // This installs the legacy v1 API _ "k8s.io/kubernetes/pkg/api/install" @@ -58,14 +60,19 @@ func main() { } tlsCrt := read(*crt) tlsKey := read(*key) - secret := &api.Secret{ - ObjectMeta: api.ObjectMeta{ + secret := &api_v1.Secret{ + ObjectMeta: meta_v1.ObjectMeta{ Name: *name, }, Data: map[string][]byte{ - api.TLSCertKey: tlsCrt, - api.TLSPrivateKeyKey: tlsKey, + api_v1.TLSCertKey: tlsCrt, + api_v1.TLSPrivateKeyKey: tlsKey, }, } - fmt.Printf(runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), secret)) + + arm, err := registered.NewAPIRegistrationManager("") + if err != nil { + log.Fatalf("%v", err) + } + fmt.Printf(runtime.EncodeOrDie(api.Codecs.LegacyCodec(arm.EnabledVersions()...), secret)) } diff --git a/controllers/gce/firewalls/fakes.go b/controllers/gce/firewalls/fakes.go index 71c90c321b..73f1a56f1b 100644 --- a/controllers/gce/firewalls/fakes.go +++ b/controllers/gce/firewalls/fakes.go @@ -20,8 +20,9 @@ import ( "fmt" compute "google.golang.org/api/compute/v1" - "k8s.io/ingress/controllers/gce/utils" netset "k8s.io/kubernetes/pkg/util/net/sets" + + "k8s.io/ingress/controllers/gce/utils" ) type fakeFirewallRules struct { diff --git a/controllers/gce/firewalls/firewalls.go b/controllers/gce/firewalls/firewalls.go index 7a4d6603ce..1d40b9aa06 100644 --- a/controllers/gce/firewalls/firewalls.go +++ b/controllers/gce/firewalls/firewalls.go @@ -17,13 +17,15 @@ limitations under the License. package firewalls import ( - "github.com/golang/glog" "strconv" + "github.com/golang/glog" + compute "google.golang.org/api/compute/v1" - "k8s.io/ingress/controllers/gce/utils" + "k8s.io/apimachinery/pkg/util/sets" netset "k8s.io/kubernetes/pkg/util/net/sets" - "k8s.io/kubernetes/pkg/util/sets" + + "k8s.io/ingress/controllers/gce/utils" ) // Src ranges from which the GCE L7 performs health checks. diff --git a/controllers/gce/healthchecks/fakes.go b/controllers/gce/healthchecks/fakes.go index 18984d1224..07240ccefc 100644 --- a/controllers/gce/healthchecks/fakes.go +++ b/controllers/gce/healthchecks/fakes.go @@ -20,6 +20,7 @@ import ( "fmt" compute "google.golang.org/api/compute/v1" + "k8s.io/ingress/controllers/gce/utils" ) diff --git a/controllers/gce/healthchecks/healthchecks.go b/controllers/gce/healthchecks/healthchecks.go index 978b7e657e..6b37392ab9 100644 --- a/controllers/gce/healthchecks/healthchecks.go +++ b/controllers/gce/healthchecks/healthchecks.go @@ -17,11 +17,12 @@ limitations under the License. package healthchecks import ( - compute "google.golang.org/api/compute/v1" + "net/http" "github.com/golang/glog" + compute "google.golang.org/api/compute/v1" + "k8s.io/ingress/controllers/gce/utils" - "net/http" ) // HealthChecks manages health checks. diff --git a/controllers/gce/instances/fakes.go b/controllers/gce/instances/fakes.go index df2ee26fad..389eccf367 100644 --- a/controllers/gce/instances/fakes.go +++ b/controllers/gce/instances/fakes.go @@ -20,8 +20,9 @@ import ( "fmt" compute "google.golang.org/api/compute/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/ingress/controllers/gce/utils" - "k8s.io/kubernetes/pkg/util/sets" ) // NewFakeInstanceGroups creates a new FakeInstanceGroups. diff --git a/controllers/gce/instances/instances.go b/controllers/gce/instances/instances.go index c72eaf7ce7..b49f7cd014 100644 --- a/controllers/gce/instances/instances.go +++ b/controllers/gce/instances/instances.go @@ -21,12 +21,13 @@ import ( "net/http" "strings" + "github.com/golang/glog" + compute "google.golang.org/api/compute/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/ingress/controllers/gce/storage" "k8s.io/ingress/controllers/gce/utils" - "k8s.io/kubernetes/pkg/util/sets" - - "github.com/golang/glog" ) const ( diff --git a/controllers/gce/instances/instances_test.go b/controllers/gce/instances/instances_test.go index 4081f69d4b..9baca18a1e 100644 --- a/controllers/gce/instances/instances_test.go +++ b/controllers/gce/instances/instances_test.go @@ -19,7 +19,7 @@ package instances import ( "testing" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/sets" ) const defaultZone = "default-zone" diff --git a/controllers/gce/loadbalancers/fakes.go b/controllers/gce/loadbalancers/fakes.go index 6dfdcd941b..6860b1bffa 100644 --- a/controllers/gce/loadbalancers/fakes.go +++ b/controllers/gce/loadbalancers/fakes.go @@ -21,8 +21,9 @@ import ( "testing" compute "google.golang.org/api/compute/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/ingress/controllers/gce/utils" - "k8s.io/kubernetes/pkg/util/sets" ) var testIPManager = testIP{} diff --git a/controllers/gce/loadbalancers/loadbalancers.go b/controllers/gce/loadbalancers/loadbalancers.go index 7e04fb2b0d..dd5fd1574b 100644 --- a/controllers/gce/loadbalancers/loadbalancers.go +++ b/controllers/gce/loadbalancers/loadbalancers.go @@ -25,13 +25,14 @@ import ( "reflect" "strings" + "github.com/golang/glog" + compute "google.golang.org/api/compute/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/ingress/controllers/gce/backends" "k8s.io/ingress/controllers/gce/storage" "k8s.io/ingress/controllers/gce/utils" - "k8s.io/kubernetes/pkg/util/sets" - - "github.com/golang/glog" ) const ( diff --git a/controllers/gce/loadbalancers/loadbalancers_test.go b/controllers/gce/loadbalancers/loadbalancers_test.go index 6ed940f144..a2f54d9a17 100644 --- a/controllers/gce/loadbalancers/loadbalancers_test.go +++ b/controllers/gce/loadbalancers/loadbalancers_test.go @@ -21,11 +21,12 @@ import ( "testing" compute "google.golang.org/api/compute/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/ingress/controllers/gce/backends" "k8s.io/ingress/controllers/gce/healthchecks" "k8s.io/ingress/controllers/gce/instances" "k8s.io/ingress/controllers/gce/utils" - "k8s.io/kubernetes/pkg/util/sets" ) const ( diff --git a/controllers/gce/main.go b/controllers/gce/main.go index a306956a28..fde1b5f8d5 100644 --- a/controllers/gce/main.go +++ b/controllers/gce/main.go @@ -26,20 +26,23 @@ import ( "syscall" "time" + "github.com/golang/glog" + + "github.com/prometheus/client_golang/prometheus/promhttp" flag "github.com/spf13/pflag" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/pkg/api" + api_v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/ingress/controllers/gce/controller" "k8s.io/ingress/controllers/gce/loadbalancers" "k8s.io/ingress/controllers/gce/storage" "k8s.io/ingress/controllers/gce/utils" - "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/client/restclient" - kubectl_util "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/util/wait" - - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus/promhttp" ) // Entrypoint of GLBC. Example invocation: @@ -50,9 +53,9 @@ import ( // $ glbc --proxy="http://localhost:proxyport" const ( - // lbApiPort is the port on which the loadbalancer controller serves a + // lbAPIPort is the port on which the loadbalancer controller serves a // minimal api (/healthz, /delete-all-and-quit etc). - lbApiPort = 8081 + lbAPIPort = 8081 // A delimiter used for clarity in naming GCE resources. clusterNameDelimiter = "--" @@ -119,7 +122,7 @@ var ( `Path to a file containing the gce config. If left unspecified this controller only works with default zones.`) - healthzPort = flags.Int("healthz-port", lbApiPort, + healthzPort = flags.Int("healthz-port", lbAPIPort, `Port to run healthz server. Must match the health check port in yaml.`) ) @@ -171,7 +174,7 @@ func main() { // We only really need a binary switch from light, v(2) logging to // heavier debug style V(4) logging, which we use --verbose for. flags.Parse(os.Args) - clientConfig := kubectl_util.DefaultClientConfig(flags) + //clientConfig := kubectl_util.DefaultClientConfig(flags) // Set glog verbosity levels, unconditionally set --alsologtostderr. go_flag.Lookup("logtostderr").Value.Set("true") @@ -183,20 +186,23 @@ func main() { glog.Fatalf("Please specify --default-backend") } - var config *restclient.Config + var config *rest.Config // Create kubeclient if *inCluster { - if config, err = restclient.InClusterConfig(); err != nil { + if config, err = rest.InClusterConfig(); err != nil { glog.Fatalf("error creating client configuration: %v", err) } } else { - config, err = clientConfig.ClientConfig() + config, err = clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{}, + &clientcmd.ConfigOverrides{}).ClientConfig() + if err != nil { glog.Fatalf("error creating client configuration: %v", err) } } - kubeClient, err := client.NewForConfig(config) + kubeClient, err := kubernetes.NewForConfig(config) if err != nil { glog.Fatalf("Failed to create client: %v.", err) } @@ -247,7 +253,7 @@ func main() { } } -func newNamer(kubeClient client.Interface, clusterName string, fwName string) (*utils.Namer, error) { +func newNamer(kubeClient kubernetes.Interface, clusterName string, fwName string) (*utils.Namer, error) { name, err := getClusterUID(kubeClient, clusterName) if err != nil { return nil, err @@ -329,7 +335,7 @@ func useDefaultOrLookupVault(cfgVault *storage.ConfigMapVault, cm_key, default_n // backwards compatibility, the firewall name will default to the cluster UID. // Use getFlagOrLookupVault to obtain a stored or overridden value for the firewall name. // else, use the cluster UID as a backup (this retains backwards compatibility). -func getFirewallName(kubeClient client.Interface, name, cluster_uid string) (string, error) { +func getFirewallName(kubeClient kubernetes.Interface, name, cluster_uid string) (string, error) { cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName) if fw_name, err := useDefaultOrLookupVault(cfgVault, storage.ProviderDataKey, name); err != nil { return "", err @@ -347,7 +353,7 @@ func getFirewallName(kubeClient client.Interface, name, cluster_uid string) (str // else, check if there are any working Ingresses // - remember that "" is the cluster uid // else, allocate a new uid -func getClusterUID(kubeClient client.Interface, name string) (string, error) { +func getClusterUID(kubeClient kubernetes.Interface, name string) (string, error) { cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName) if name, err := useDefaultOrLookupVault(cfgVault, storage.UidDataKey, name); err != nil { return "", err @@ -356,7 +362,9 @@ func getClusterUID(kubeClient client.Interface, name string) (string, error) { } // Check if the cluster has an Ingress with ip - ings, err := kubeClient.Extensions().Ingresses(api.NamespaceAll).List(api.ListOptions{LabelSelector: labels.Everything()}) + ings, err := kubeClient.Extensions().Ingresses(api.NamespaceAll).List(meta_v1.ListOptions{ + LabelSelector: labels.Everything().String(), + }) if err != nil { return "", err } @@ -387,11 +395,11 @@ func getClusterUID(kubeClient client.Interface, name string) (string, error) { } // getNodePort waits for the Service, and returns it's first node port. -func getNodePort(client client.Interface, ns, name string) (nodePort int64, err error) { - var svc *api.Service +func getNodePort(client kubernetes.Interface, ns, name string) (nodePort int64, err error) { + var svc *api_v1.Service glog.V(3).Infof("Waiting for %v/%v", ns, name) wait.Poll(1*time.Second, 5*time.Minute, func() (bool, error) { - svc, err = client.Core().Services(ns).Get(name) + svc, err = client.Core().Services(ns).Get(name, meta_v1.GetOptions{}) if err != nil { return false, nil } diff --git a/controllers/gce/storage/configmaps.go b/controllers/gce/storage/configmaps.go index ace030b9f5..94a656e2f1 100644 --- a/controllers/gce/storage/configmaps.go +++ b/controllers/gce/storage/configmaps.go @@ -22,10 +22,12 @@ import ( "sync" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/client/cache" - client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + api "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" ) const ( @@ -72,7 +74,7 @@ func (c *ConfigMapVault) Put(key, val string) error { c.storeLock.Lock() defer c.storeLock.Unlock() apiObj := &api.ConfigMap{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Name: c.name, Namespace: c.namespace, }, @@ -121,7 +123,7 @@ func (c *ConfigMapVault) Delete() error { // NewConfigMapVault creates a config map client. // This client is essentially meant to abstract out the details of // configmaps and the API, and just store/retrieve a single value, the cluster uid. -func NewConfigMapVault(c client.Interface, uidNs, uidConfigMapName string) *ConfigMapVault { +func NewConfigMapVault(c kubernetes.Interface, uidNs, uidConfigMapName string) *ConfigMapVault { return &ConfigMapVault{ ConfigMapStore: NewConfigMapStore(c), namespace: uidNs, @@ -148,7 +150,7 @@ type ConfigMapStore interface { // through cache. type APIServerConfigMapStore struct { ConfigMapStore - client client.Interface + client kubernetes.Interface } // Add adds the given config map to the apiserver's store. @@ -168,7 +170,7 @@ func (a *APIServerConfigMapStore) Update(obj interface{}) error { // Delete deletes the existing config map object. func (a *APIServerConfigMapStore) Delete(obj interface{}) error { cfg := obj.(*api.ConfigMap) - return a.client.Core().ConfigMaps(cfg.Namespace).Delete(cfg.Name, &api.DeleteOptions{}) + return a.client.Core().ConfigMaps(cfg.Namespace).Delete(cfg.Name, &metav1.DeleteOptions{}) } // GetByKey returns the config map for a given key. @@ -179,7 +181,7 @@ func (a *APIServerConfigMapStore) GetByKey(key string) (item interface{}, exists return nil, false, fmt.Errorf("failed to get key %v, unexpecte format, expecting ns/name", key) } ns, name := nsName[0], nsName[1] - cfg, err := a.client.Core().ConfigMaps(ns).Get(name) + cfg, err := a.client.Core().ConfigMaps(ns).Get(name, metav1.GetOptions{}) if err != nil { // Translate not found errors to found=false, err=nil if errors.IsNotFound(err) { @@ -192,6 +194,6 @@ func (a *APIServerConfigMapStore) GetByKey(key string) (item interface{}, exists // NewConfigMapStore returns a config map store capable of persisting updates // to apiserver. -func NewConfigMapStore(c client.Interface) ConfigMapStore { +func NewConfigMapStore(c kubernetes.Interface) ConfigMapStore { return &APIServerConfigMapStore{ConfigMapStore: cache.NewStore(cache.MetaNamespaceKeyFunc), client: c} } diff --git a/controllers/gce/storage/configmaps_test.go b/controllers/gce/storage/configmaps_test.go index 8d25d66711..42d6e47cb6 100644 --- a/controllers/gce/storage/configmaps_test.go +++ b/controllers/gce/storage/configmaps_test.go @@ -19,7 +19,7 @@ package storage import ( "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/client-go/pkg/api" ) func TestConfigMapUID(t *testing.T) { diff --git a/controllers/gce/storage/pools.go b/controllers/gce/storage/pools.go index df4d1d8855..2ced272dec 100644 --- a/controllers/gce/storage/pools.go +++ b/controllers/gce/storage/pools.go @@ -21,8 +21,9 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/client/cache" - "k8s.io/kubernetes/pkg/util/wait" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" ) // Snapshotter is an interface capable of providing a consistent snapshot of diff --git a/controllers/gce/utils/utils.go b/controllers/gce/utils/utils.go index 1ed36769f6..288258d9c9 100644 --- a/controllers/gce/utils/utils.go +++ b/controllers/gce/utils/utils.go @@ -18,6 +18,7 @@ package utils import ( "fmt" + "regexp" "strconv" "strings" "sync" @@ -25,7 +26,6 @@ import ( "github.com/golang/glog" compute "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" - "regexp" ) const ( diff --git a/controllers/nginx/pkg/cmd/controller/nginx.go b/controllers/nginx/pkg/cmd/controller/nginx.go index f8685deeeb..91d1f48e3c 100644 --- a/controllers/nginx/pkg/cmd/controller/nginx.go +++ b/controllers/nginx/pkg/cmd/controller/nginx.go @@ -32,7 +32,7 @@ import ( "github.com/golang/glog" "github.com/spf13/pflag" - "k8s.io/kubernetes/pkg/api" + api_v1 "k8s.io/client-go/pkg/api/v1" "strings" @@ -71,7 +71,7 @@ func newNGINXController() ingress.Controller { } n := &NGINXController{ binary: ngx, - configmap: &api.ConfigMap{}, + configmap: &api_v1.ConfigMap{}, } var onChange func() @@ -108,7 +108,7 @@ Error loading new template : %v type NGINXController struct { t *ngx_template.Template - configmap *api.ConfigMap + configmap *api_v1.ConfigMap storeLister ingress.StoreLister @@ -308,7 +308,7 @@ Error: %v } // SetConfig sets the configured configmap -func (n *NGINXController) SetConfig(cmap *api.ConfigMap) { +func (n *NGINXController) SetConfig(cmap *api_v1.ConfigMap) { n.configmap = cmap } @@ -383,7 +383,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) ([]byte, er } if exists { - setHeaders = cmap.(*api.ConfigMap).Data + setHeaders = cmap.(*api_v1.ConfigMap).Data } } @@ -396,7 +396,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) ([]byte, er } if exists { - secret := s.(*api.Secret) + secret := s.(*api_v1.Secret) nsSecName := strings.Replace(secretName, "/", "-", -1) dh, ok := secret.Data["dhparam.pem"] diff --git a/controllers/nginx/pkg/config/config.go b/controllers/nginx/pkg/config/config.go index aab72dfbd3..951d2833e1 100644 --- a/controllers/nginx/pkg/config/config.go +++ b/controllers/nginx/pkg/config/config.go @@ -200,6 +200,10 @@ type Configuration struct { // http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers SSLCiphers string `json:"ssl-ciphers,omitempty"` + // Specifies a curve for ECDHE ciphers. + // http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ecdh_curve + SSLECDHCurve string `json:"ssl-ecdh-curve,omitempty"` + // The secret that contains Diffie-Hellman key to help with "Perfect Forward Secrecy" // https://www.openssl.org/docs/manmaster/apps/dhparam.html // https://wiki.mozilla.org/Security/Server_Side_TLS#DHE_handshake_and_dhparam @@ -280,6 +284,7 @@ func NewDefault() Configuration { ShowServerTokens: true, SSLBufferSize: sslBufferSize, SSLCiphers: sslCiphers, + SSLECDHCurve: "secp384r1", SSLProtocols: sslProtocols, SSLSessionCache: true, SSLSessionCacheSize: sslSessionCacheSize, diff --git a/controllers/nginx/pkg/template/template.go b/controllers/nginx/pkg/template/template.go index 90ae4fe598..fec6f7dee6 100644 --- a/controllers/nginx/pkg/template/template.go +++ b/controllers/nginx/pkg/template/template.go @@ -26,7 +26,7 @@ import ( "strings" text_template "text/template" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/sets" "github.com/golang/glog" diff --git a/controllers/nginx/rootfs/etc/nginx/template/nginx.tmpl b/controllers/nginx/rootfs/etc/nginx/template/nginx.tmpl index 87426e1f60..128bb8b553 100644 --- a/controllers/nginx/rootfs/etc/nginx/template/nginx.tmpl +++ b/controllers/nginx/rootfs/etc/nginx/template/nginx.tmpl @@ -183,6 +183,8 @@ http { ssl_dyn_rec_size_lo 0; {{ end }} + ssl_ecdh_curve {{ $cfg.SSLECDHCurve }}; + {{ if .CustomErrors }} # Custom error pages proxy_intercept_errors on; diff --git a/core/pkg/cache/main.go b/core/pkg/cache/main.go deleted file mode 100644 index 04257a0ba9..0000000000 --- a/core/pkg/cache/main.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import "k8s.io/kubernetes/pkg/client/cache" - -// StoreToIngressLister makes a Store that lists Ingress. -type StoreToIngressLister struct { - cache.Store -} - -// StoreToSecretsLister makes a Store that lists Secrets. -type StoreToSecretsLister struct { - cache.Store -} - -// StoreToConfigmapLister makes a Store that lists Configmap. -type StoreToConfigmapLister struct { - cache.Store -} diff --git a/core/pkg/cache/main_test.go b/core/pkg/cache/main_test.go deleted file mode 100644 index 72dc7617ef..0000000000 --- a/core/pkg/cache/main_test.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "testing" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/cache" - "k8s.io/kubernetes/pkg/util/sets" -) - -func TestStoreToIngressLister(t *testing.T) { - store := cache.NewStore(cache.MetaNamespaceKeyFunc) - ids := sets.NewString("foo", "bar", "baz") - for id := range ids { - store.Add(&extensions.Ingress{ObjectMeta: api.ObjectMeta{Name: id}}) - } - sml := StoreToIngressLister{store} - - gotIngress := sml.List() - got := make([]string, len(gotIngress)) - for ix := range gotIngress { - ing, ok := gotIngress[ix].(*extensions.Ingress) - if !ok { - t.Errorf("expected an Ingress type") - } - got[ix] = ing.Name - } - if !ids.HasAll(got...) || len(got) != len(ids) { - t.Errorf("expected %v, got %v", ids, got) - } -} - -func TestStoreToSecretsLister(t *testing.T) { - store := cache.NewStore(cache.MetaNamespaceKeyFunc) - ids := sets.NewString("foo", "bar", "baz") - for id := range ids { - store.Add(&api.Secret{ObjectMeta: api.ObjectMeta{Name: id}}) - } - sml := StoreToSecretsLister{store} - - gotIngress := sml.List() - got := make([]string, len(gotIngress)) - for ix := range gotIngress { - s, ok := gotIngress[ix].(*api.Secret) - if !ok { - t.Errorf("expected a Secret type") - } - got[ix] = s.Name - } - if !ids.HasAll(got...) || len(got) != len(ids) { - t.Errorf("expected %v, got %v", ids, got) - } -} - -func TestStoreToConfigmapLister(t *testing.T) { - store := cache.NewStore(cache.MetaNamespaceKeyFunc) - ids := sets.NewString("foo", "bar", "baz") - for id := range ids { - store.Add(&api.ConfigMap{ObjectMeta: api.ObjectMeta{Name: id}}) - } - sml := StoreToConfigmapLister{store} - - gotIngress := sml.List() - got := make([]string, len(gotIngress)) - for ix := range gotIngress { - m, ok := gotIngress[ix].(*api.ConfigMap) - if !ok { - t.Errorf("expected an Ingress type") - } - got[ix] = m.Name - } - if !ids.HasAll(got...) || len(got) != len(ids) { - t.Errorf("expected %v, got %v", ids, got) - } -} diff --git a/core/pkg/ingress/annotations/auth/main.go b/core/pkg/ingress/annotations/auth/main.go index fe2167f03f..33abf407cf 100644 --- a/core/pkg/ingress/annotations/auth/main.go +++ b/core/pkg/ingress/annotations/auth/main.go @@ -24,8 +24,8 @@ import ( "regexp" "github.com/pkg/errors" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" ing_errors "k8s.io/ingress/core/pkg/ingress/errors" diff --git a/core/pkg/ingress/annotations/auth/main_test.go b/core/pkg/ingress/annotations/auth/main_test.go index 6d4027313d..aec7549b39 100644 --- a/core/pkg/ingress/annotations/auth/main_test.go +++ b/core/pkg/ingress/annotations/auth/main_test.go @@ -24,9 +24,11 @@ import ( "time" "github.com/pkg/errors" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/util/intstr" + + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" ) func buildIngress() *extensions.Ingress { @@ -36,7 +38,7 @@ func buildIngress() *extensions.Ingress { } return &extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, @@ -73,7 +75,7 @@ func (m mockSecret) GetSecret(name string) (*api.Secret, error) { } return &api.Secret{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Namespace: api.NamespaceDefault, Name: "demo-secret", }, diff --git a/core/pkg/ingress/annotations/authreq/main.go b/core/pkg/ingress/annotations/authreq/main.go index 91c56b9f6b..ce3707a847 100644 --- a/core/pkg/ingress/annotations/authreq/main.go +++ b/core/pkg/ingress/annotations/authreq/main.go @@ -20,7 +20,7 @@ import ( "net/url" "strings" - "k8s.io/kubernetes/pkg/apis/extensions" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" ing_errors "k8s.io/ingress/core/pkg/ingress/errors" diff --git a/core/pkg/ingress/annotations/authreq/main_test.go b/core/pkg/ingress/annotations/authreq/main_test.go index 75cd6d2b7a..91e790d889 100644 --- a/core/pkg/ingress/annotations/authreq/main_test.go +++ b/core/pkg/ingress/annotations/authreq/main_test.go @@ -20,9 +20,11 @@ import ( "fmt" "testing" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/util/intstr" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + + "k8s.io/apimachinery/pkg/util/intstr" ) func buildIngress() *extensions.Ingress { @@ -32,7 +34,7 @@ func buildIngress() *extensions.Ingress { } return &extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, diff --git a/core/pkg/ingress/annotations/authtls/main.go b/core/pkg/ingress/annotations/authtls/main.go index c4172e51cf..2de8b22c02 100644 --- a/core/pkg/ingress/annotations/authtls/main.go +++ b/core/pkg/ingress/annotations/authtls/main.go @@ -18,7 +18,7 @@ package authtls import ( "github.com/pkg/errors" - "k8s.io/kubernetes/pkg/apis/extensions" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" ing_errors "k8s.io/ingress/core/pkg/ingress/errors" diff --git a/core/pkg/ingress/annotations/authtls/main_test.go b/core/pkg/ingress/annotations/authtls/main_test.go index b1f4886f64..50057503b7 100644 --- a/core/pkg/ingress/annotations/authtls/main_test.go +++ b/core/pkg/ingress/annotations/authtls/main_test.go @@ -19,9 +19,10 @@ package authtls import ( "testing" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/util/intstr" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" ) func buildIngress() *extensions.Ingress { @@ -31,7 +32,7 @@ func buildIngress() *extensions.Ingress { } return &extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, diff --git a/core/pkg/ingress/annotations/class/main.go b/core/pkg/ingress/annotations/class/main.go index d9e8629384..1c1066e169 100644 --- a/core/pkg/ingress/annotations/class/main.go +++ b/core/pkg/ingress/annotations/class/main.go @@ -18,7 +18,7 @@ package class import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/apis/extensions" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" "k8s.io/ingress/core/pkg/ingress/errors" diff --git a/core/pkg/ingress/annotations/class/main_test.go b/core/pkg/ingress/annotations/class/main_test.go index bf204052f5..f48525078b 100644 --- a/core/pkg/ingress/annotations/class/main_test.go +++ b/core/pkg/ingress/annotations/class/main_test.go @@ -19,8 +19,9 @@ package class import ( "testing" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" ) func TestIsValidClass(t *testing.T) { @@ -40,7 +41,7 @@ func TestIsValidClass(t *testing.T) { } ing := &extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, diff --git a/core/pkg/ingress/annotations/cors/main.go b/core/pkg/ingress/annotations/cors/main.go index 71195863f6..c460f8215c 100644 --- a/core/pkg/ingress/annotations/cors/main.go +++ b/core/pkg/ingress/annotations/cors/main.go @@ -17,7 +17,7 @@ limitations under the License. package cors import ( - "k8s.io/kubernetes/pkg/apis/extensions" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" ) diff --git a/core/pkg/ingress/annotations/cors/main_test.go b/core/pkg/ingress/annotations/cors/main_test.go index 61521f301c..480356ca80 100644 --- a/core/pkg/ingress/annotations/cors/main_test.go +++ b/core/pkg/ingress/annotations/cors/main_test.go @@ -19,8 +19,9 @@ package cors import ( "testing" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" ) const ( @@ -45,7 +46,7 @@ func TestParse(t *testing.T) { } ing := &extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, diff --git a/core/pkg/ingress/annotations/healthcheck/main.go b/core/pkg/ingress/annotations/healthcheck/main.go index 493195154c..973e911dc9 100644 --- a/core/pkg/ingress/annotations/healthcheck/main.go +++ b/core/pkg/ingress/annotations/healthcheck/main.go @@ -17,7 +17,7 @@ limitations under the License. package healthcheck import ( - "k8s.io/kubernetes/pkg/apis/extensions" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" "k8s.io/ingress/core/pkg/ingress/resolver" diff --git a/core/pkg/ingress/annotations/healthcheck/main_test.go b/core/pkg/ingress/annotations/healthcheck/main_test.go index 21166cbe04..09215b17bc 100644 --- a/core/pkg/ingress/annotations/healthcheck/main_test.go +++ b/core/pkg/ingress/annotations/healthcheck/main_test.go @@ -19,9 +19,10 @@ package healthcheck import ( "testing" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/util/intstr" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/defaults" ) @@ -33,7 +34,7 @@ func buildIngress() *extensions.Ingress { } return &extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, diff --git a/core/pkg/ingress/annotations/ipwhitelist/main.go b/core/pkg/ingress/annotations/ipwhitelist/main.go index d42f0fad3e..3c043967b5 100644 --- a/core/pkg/ingress/annotations/ipwhitelist/main.go +++ b/core/pkg/ingress/annotations/ipwhitelist/main.go @@ -21,7 +21,8 @@ import ( "strings" "github.com/pkg/errors" - "k8s.io/kubernetes/pkg/apis/extensions" + + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/util/net/sets" "k8s.io/ingress/core/pkg/ingress/annotations/parser" diff --git a/core/pkg/ingress/annotations/ipwhitelist/main_test.go b/core/pkg/ingress/annotations/ipwhitelist/main_test.go index 190a36f896..ba2c946a1c 100644 --- a/core/pkg/ingress/annotations/ipwhitelist/main_test.go +++ b/core/pkg/ingress/annotations/ipwhitelist/main_test.go @@ -20,9 +20,10 @@ import ( "reflect" "testing" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/util/intstr" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/defaults" "k8s.io/ingress/core/pkg/ingress/errors" @@ -35,7 +36,7 @@ func buildIngress() *extensions.Ingress { } return &extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, diff --git a/core/pkg/ingress/annotations/parser/main.go b/core/pkg/ingress/annotations/parser/main.go index bff6f2210e..a35c80a380 100644 --- a/core/pkg/ingress/annotations/parser/main.go +++ b/core/pkg/ingress/annotations/parser/main.go @@ -19,7 +19,7 @@ package parser import ( "strconv" - "k8s.io/kubernetes/pkg/apis/extensions" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/errors" ) diff --git a/core/pkg/ingress/annotations/parser/main_test.go b/core/pkg/ingress/annotations/parser/main_test.go index 099e765568..8dd2db1b5d 100644 --- a/core/pkg/ingress/annotations/parser/main_test.go +++ b/core/pkg/ingress/annotations/parser/main_test.go @@ -19,13 +19,14 @@ package parser import ( "testing" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" ) func buildIngress() *extensions.Ingress { return &extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, diff --git a/core/pkg/ingress/annotations/portinredirect/main.go b/core/pkg/ingress/annotations/portinredirect/main.go index 1b16e8dcd3..2f264f32d3 100644 --- a/core/pkg/ingress/annotations/portinredirect/main.go +++ b/core/pkg/ingress/annotations/portinredirect/main.go @@ -17,7 +17,7 @@ limitations under the License. package portinredirect import ( - "k8s.io/kubernetes/pkg/apis/extensions" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" "k8s.io/ingress/core/pkg/ingress/resolver" diff --git a/core/pkg/ingress/annotations/portinredirect/main_test.go b/core/pkg/ingress/annotations/portinredirect/main_test.go index cb6403fd17..46fe8ad921 100644 --- a/core/pkg/ingress/annotations/portinredirect/main_test.go +++ b/core/pkg/ingress/annotations/portinredirect/main_test.go @@ -19,9 +19,10 @@ package portinredirect import ( "testing" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/util/intstr" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "fmt" @@ -35,7 +36,7 @@ func buildIngress() *extensions.Ingress { } return &extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, diff --git a/core/pkg/ingress/annotations/proxy/main.go b/core/pkg/ingress/annotations/proxy/main.go index ab7d2e07aa..e097edd245 100644 --- a/core/pkg/ingress/annotations/proxy/main.go +++ b/core/pkg/ingress/annotations/proxy/main.go @@ -17,7 +17,7 @@ limitations under the License. package proxy import ( - "k8s.io/kubernetes/pkg/apis/extensions" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" "k8s.io/ingress/core/pkg/ingress/resolver" diff --git a/core/pkg/ingress/annotations/proxy/main_test.go b/core/pkg/ingress/annotations/proxy/main_test.go index 8671c42a42..11e85dc30f 100644 --- a/core/pkg/ingress/annotations/proxy/main_test.go +++ b/core/pkg/ingress/annotations/proxy/main_test.go @@ -19,9 +19,10 @@ package proxy import ( "testing" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/util/intstr" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/defaults" ) @@ -33,7 +34,7 @@ func buildIngress() *extensions.Ingress { } return &extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, diff --git a/core/pkg/ingress/annotations/ratelimit/main.go b/core/pkg/ingress/annotations/ratelimit/main.go index 31850a90b7..06a0160b71 100644 --- a/core/pkg/ingress/annotations/ratelimit/main.go +++ b/core/pkg/ingress/annotations/ratelimit/main.go @@ -19,7 +19,7 @@ package ratelimit import ( "fmt" - "k8s.io/kubernetes/pkg/apis/extensions" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" ) diff --git a/core/pkg/ingress/annotations/ratelimit/main_test.go b/core/pkg/ingress/annotations/ratelimit/main_test.go index 46ad25d541..4718851de2 100644 --- a/core/pkg/ingress/annotations/ratelimit/main_test.go +++ b/core/pkg/ingress/annotations/ratelimit/main_test.go @@ -19,9 +19,11 @@ package ratelimit import ( "testing" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/util/intstr" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + + "k8s.io/apimachinery/pkg/util/intstr" ) func buildIngress() *extensions.Ingress { @@ -31,7 +33,7 @@ func buildIngress() *extensions.Ingress { } return &extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, diff --git a/core/pkg/ingress/annotations/rewrite/main.go b/core/pkg/ingress/annotations/rewrite/main.go index e522a275a9..771fc80a79 100644 --- a/core/pkg/ingress/annotations/rewrite/main.go +++ b/core/pkg/ingress/annotations/rewrite/main.go @@ -17,7 +17,7 @@ limitations under the License. package rewrite import ( - "k8s.io/kubernetes/pkg/apis/extensions" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" "k8s.io/ingress/core/pkg/ingress/resolver" diff --git a/core/pkg/ingress/annotations/rewrite/main_test.go b/core/pkg/ingress/annotations/rewrite/main_test.go index 2a32521848..38f224d9da 100644 --- a/core/pkg/ingress/annotations/rewrite/main_test.go +++ b/core/pkg/ingress/annotations/rewrite/main_test.go @@ -19,9 +19,10 @@ package rewrite import ( "testing" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/util/intstr" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/defaults" ) @@ -37,7 +38,7 @@ func buildIngress() *extensions.Ingress { } return &extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, diff --git a/core/pkg/ingress/annotations/secureupstream/main.go b/core/pkg/ingress/annotations/secureupstream/main.go index 732b577cd6..331b44b96a 100644 --- a/core/pkg/ingress/annotations/secureupstream/main.go +++ b/core/pkg/ingress/annotations/secureupstream/main.go @@ -17,7 +17,7 @@ limitations under the License. package secureupstream import ( - "k8s.io/kubernetes/pkg/apis/extensions" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" ) diff --git a/core/pkg/ingress/annotations/secureupstream/main_test.go b/core/pkg/ingress/annotations/secureupstream/main_test.go index a841e28c71..388cc8ddc4 100644 --- a/core/pkg/ingress/annotations/secureupstream/main_test.go +++ b/core/pkg/ingress/annotations/secureupstream/main_test.go @@ -19,9 +19,11 @@ package secureupstream import ( "testing" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/util/intstr" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + + "k8s.io/apimachinery/pkg/util/intstr" ) func buildIngress() *extensions.Ingress { @@ -31,7 +33,7 @@ func buildIngress() *extensions.Ingress { } return &extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, diff --git a/core/pkg/ingress/annotations/service/service.go b/core/pkg/ingress/annotations/service/service.go index 0615645c80..b268e63ed5 100644 --- a/core/pkg/ingress/annotations/service/service.go +++ b/core/pkg/ingress/annotations/service/service.go @@ -21,9 +21,9 @@ import ( "fmt" "strconv" - "k8s.io/kubernetes/pkg/api" - "github.com/golang/glog" + + api_v1 "k8s.io/client-go/pkg/api/v1" ) const ( @@ -54,7 +54,7 @@ func (npm namedPortMapping) getPortMappings() map[string]string { } // GetPortMapping returns the number of the named port or an error if is not valid -func GetPortMapping(name string, s *api.Service) (int32, error) { +func GetPortMapping(name string, s *api_v1.Service) (int32, error) { if s == nil { return -1, fmt.Errorf("impossible to extract por mapping from %v (missing service)", name) } diff --git a/core/pkg/ingress/annotations/service/service_test.go b/core/pkg/ingress/annotations/service/service_test.go index abfa0f907a..2eaf016aa3 100644 --- a/core/pkg/ingress/annotations/service/service_test.go +++ b/core/pkg/ingress/annotations/service/service_test.go @@ -18,9 +18,10 @@ package service import ( "encoding/json" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" "testing" + + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + api "k8s.io/client-go/pkg/api/v1" ) func fakeService(npa bool, ps bool, expectedP string) *api.Service { @@ -41,11 +42,11 @@ func fakeService(npa bool, ps bool, expectedP string) *api.Service { // fake service return &api.Service{ - TypeMeta: unversioned.TypeMeta{ + TypeMeta: meta_v1.TypeMeta{ Kind: "ingress", APIVersion: "v1", }, - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Annotations: map[string]string{ fakeNpa: string(fakePs), }, diff --git a/core/pkg/ingress/annotations/sessionaffinity/main.go b/core/pkg/ingress/annotations/sessionaffinity/main.go index d506e844ef..d6760f156e 100644 --- a/core/pkg/ingress/annotations/sessionaffinity/main.go +++ b/core/pkg/ingress/annotations/sessionaffinity/main.go @@ -21,7 +21,7 @@ import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/apis/extensions" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" ) diff --git a/core/pkg/ingress/annotations/sessionaffinity/main_test.go b/core/pkg/ingress/annotations/sessionaffinity/main_test.go index 3a3a172004..4a44389b1e 100644 --- a/core/pkg/ingress/annotations/sessionaffinity/main_test.go +++ b/core/pkg/ingress/annotations/sessionaffinity/main_test.go @@ -19,9 +19,10 @@ package sessionaffinity import ( "testing" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/util/intstr" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" ) func buildIngress() *extensions.Ingress { @@ -31,7 +32,7 @@ func buildIngress() *extensions.Ingress { } return &extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, diff --git a/core/pkg/ingress/annotations/snippet/main.go b/core/pkg/ingress/annotations/snippet/main.go index d88cfbc326..8a6970d1b4 100644 --- a/core/pkg/ingress/annotations/snippet/main.go +++ b/core/pkg/ingress/annotations/snippet/main.go @@ -17,7 +17,7 @@ limitations under the License. package snippet import ( - "k8s.io/kubernetes/pkg/apis/extensions" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" ) diff --git a/core/pkg/ingress/annotations/snippet/main_test.go b/core/pkg/ingress/annotations/snippet/main_test.go index 269996a950..450a214871 100644 --- a/core/pkg/ingress/annotations/snippet/main_test.go +++ b/core/pkg/ingress/annotations/snippet/main_test.go @@ -19,8 +19,9 @@ package snippet import ( "testing" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" ) func TestParse(t *testing.T) { @@ -40,7 +41,7 @@ func TestParse(t *testing.T) { } ing := &extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, diff --git a/core/pkg/ingress/annotations/sslpassthrough/main.go b/core/pkg/ingress/annotations/sslpassthrough/main.go index 99fcd0ab3a..e6895c0685 100644 --- a/core/pkg/ingress/annotations/sslpassthrough/main.go +++ b/core/pkg/ingress/annotations/sslpassthrough/main.go @@ -17,7 +17,7 @@ limitations under the License. package sslpassthrough import ( - "k8s.io/kubernetes/pkg/apis/extensions" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" ing_errors "k8s.io/ingress/core/pkg/ingress/errors" diff --git a/core/pkg/ingress/annotations/sslpassthrough/main_test.go b/core/pkg/ingress/annotations/sslpassthrough/main_test.go index 3f1f6286be..fc4c990e5a 100644 --- a/core/pkg/ingress/annotations/sslpassthrough/main_test.go +++ b/core/pkg/ingress/annotations/sslpassthrough/main_test.go @@ -19,14 +19,16 @@ package sslpassthrough import ( "testing" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/util/intstr" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + + "k8s.io/apimachinery/pkg/util/intstr" ) func buildIngress() *extensions.Ingress { return &extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, diff --git a/core/pkg/ingress/controller/annotations.go b/core/pkg/ingress/controller/annotations.go index b0ab4218b8..75b7df684e 100644 --- a/core/pkg/ingress/controller/annotations.go +++ b/core/pkg/ingress/controller/annotations.go @@ -21,8 +21,8 @@ import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/auth" "k8s.io/ingress/core/pkg/ingress/annotations/authreq" diff --git a/core/pkg/ingress/controller/annotations_test.go b/core/pkg/ingress/controller/annotations_test.go index 0b0aa5d91e..7b612a7c62 100644 --- a/core/pkg/ingress/controller/annotations_test.go +++ b/core/pkg/ingress/controller/annotations_test.go @@ -19,9 +19,10 @@ package controller import ( "testing" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/util/intstr" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/defaults" "k8s.io/ingress/core/pkg/ingress/resolver" @@ -75,7 +76,7 @@ func buildIngress() *extensions.Ingress { } return &extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, @@ -251,7 +252,7 @@ func TestCertificateAuthSecret(t *testing.T) { resolver := mockCfg{} resolver.MockSecrets = map[string]*api.Secret{ "default/foo_secret": { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo_secret_name", }, }, diff --git a/core/pkg/ingress/controller/backend_ssl.go b/core/pkg/ingress/controller/backend_ssl.go index 74d1da3d0b..419b057442 100644 --- a/core/pkg/ingress/controller/backend_ssl.go +++ b/core/pkg/ingress/controller/backend_ssl.go @@ -24,9 +24,9 @@ import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/cache" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + "k8s.io/client-go/tools/cache" "k8s.io/ingress/core/pkg/ingress" "k8s.io/ingress/core/pkg/ingress/annotations/parser" diff --git a/core/pkg/ingress/controller/controller.go b/core/pkg/ingress/controller/controller.go index 2cc4101829..a97fe6cfc2 100644 --- a/core/pkg/ingress/controller/controller.go +++ b/core/pkg/ingress/controller/controller.go @@ -29,17 +29,17 @@ import ( "github.com/golang/glog" "github.com/kylelemons/godebug/pretty" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/cache" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - "k8s.io/kubernetes/pkg/client/record" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/util/flowcontrol" - "k8s.io/kubernetes/pkg/util/intstr" - - cache_store "k8s.io/ingress/core/pkg/cache" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/util/intstr" + clientset "k8s.io/client-go/kubernetes" + unversionedcore "k8s.io/client-go/kubernetes/typed/core/v1" + def_api "k8s.io/client-go/pkg/api" + api "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/flowcontrol" + "k8s.io/ingress/core/pkg/ingress" "k8s.io/ingress/core/pkg/ingress/annotations/class" "k8s.io/ingress/core/pkg/ingress/annotations/healthcheck" @@ -48,6 +48,7 @@ import ( "k8s.io/ingress/core/pkg/ingress/defaults" "k8s.io/ingress/core/pkg/ingress/resolver" "k8s.io/ingress/core/pkg/ingress/status" + "k8s.io/ingress/core/pkg/ingress/store" "k8s.io/ingress/core/pkg/k8s" ssl "k8s.io/ingress/core/pkg/net/ssl" local_strings "k8s.io/ingress/core/pkg/strings" @@ -70,19 +71,19 @@ var ( type GenericController struct { cfg *Configuration - ingController *cache.Controller - endpController *cache.Controller - svcController *cache.Controller - nodeController *cache.Controller - secrController *cache.Controller - mapController *cache.Controller + ingController cache.Controller + endpController cache.Controller + svcController cache.Controller + nodeController cache.Controller + secrController cache.Controller + mapController cache.Controller - ingLister cache_store.StoreToIngressLister - svcLister cache.StoreToServiceLister - nodeLister cache.StoreToNodeLister - endpLister cache.StoreToEndpointsLister - secrLister cache_store.StoreToSecretsLister - mapLister cache_store.StoreToConfigmapLister + ingLister store.IngressLister + svcLister store.ServiceLister + nodeLister store.NodeLister + endpLister store.EndpointLister + secrLister store.SecretLister + mapLister store.ConfigMapLister annotations annotationExtractor @@ -149,7 +150,7 @@ func newIngressController(config *Configuration) *GenericController { stopLock: &sync.Mutex{}, stopCh: make(chan struct{}), syncRateLimiter: flowcontrol.NewTokenBucketRateLimiter(0.1, 1), - recorder: eventBroadcaster.NewRecorder(api.EventSource{ + recorder: eventBroadcaster.NewRecorder(def_api.Scheme, api.EventSource{ Component: "ingress-controller", }), sslCertTracker: newSSLCertTracker(), @@ -306,12 +307,9 @@ func newIngressController(config *Configuration) *GenericController { cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "configmaps", api.NamespaceAll, fields.Everything()), &api.ConfigMap{}, ic.cfg.ResyncPeriod, mapEventHandler) - ic.svcLister.Indexer, ic.svcController = cache.NewIndexerInformer( + ic.svcLister.Store, ic.svcController = cache.NewInformer( cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "services", ic.cfg.Namespace, fields.Everything()), - &api.Service{}, - ic.cfg.ResyncPeriod, - cache.ResourceEventHandlerFuncs{}, - cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + &api.Service{}, ic.cfg.ResyncPeriod, cache.ResourceEventHandlerFuncs{}) ic.nodeLister.Store, ic.nodeController = cache.NewInformer( cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "nodes", api.NamespaceAll, fields.Everything()), @@ -498,7 +496,7 @@ func (ic *GenericController) getStreamServices(configmapName string, proto api.P continue } - svcObj, svcExists, err := ic.svcLister.Indexer.GetByKey(nsName) + svcObj, svcExists, err := ic.svcLister.Store.GetByKey(nsName) if err != nil { glog.Warningf("error getting service %v: %v", nsName, err) continue @@ -562,7 +560,7 @@ func (ic *GenericController) getDefaultUpstream() *ingress.Backend { Name: defUpstreamName, } svcKey := ic.cfg.DefaultService - svcObj, svcExists, err := ic.svcLister.Indexer.GetByKey(svcKey) + svcObj, svcExists, err := ic.svcLister.Store.GetByKey(svcKey) if err != nil { glog.Warningf("unexpected error searching the default backend %v: %v", ic.cfg.DefaultService, err) upstream.Endpoints = append(upstream.Endpoints, newDefaultServer()) @@ -624,6 +622,12 @@ func (ic *GenericController) getBackendServers() ([]*ingress.Backend, []*ingress server = servers[defServerName] } + // Add annotations to location of default backend (root context) + if len(server.Locations) > 0 { + loc := server.Locations[0] + mergeLocationAnnotations(loc, anns) + } + if rule.HTTP == nil && host != defServerName { glog.V(3).Infof("ingress rule %v/%v does not contains HTTP rules. using default backend", ing.Namespace, ing.Name) @@ -803,7 +807,7 @@ func (ic *GenericController) createUpstreams(data []interface{}) map[string]*ing // to a service. func (ic *GenericController) serviceEndpoints(svcKey, backendPort string, hz *healthcheck.Upstream) ([]ingress.Endpoint, error) { - svcObj, svcExists, err := ic.svcLister.Indexer.GetByKey(svcKey) + svcObj, svcExists, err := ic.svcLister.Store.GetByKey(svcKey) var upstreams []ingress.Endpoint if err != nil { diff --git a/core/pkg/ingress/controller/launch.go b/core/pkg/ingress/controller/launch.go index d532c06532..145ee59379 100644 --- a/core/pkg/ingress/controller/launch.go +++ b/core/pkg/ingress/controller/launch.go @@ -14,11 +14,12 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/spf13/pflag" - "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" - clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" - "k8s.io/kubernetes/pkg/healthz" + "k8s.io/apiserver/pkg/server/healthz" + "k8s.io/client-go/kubernetes" + api "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmd_api "k8s.io/client-go/tools/clientcmd/api" "k8s.io/ingress/core/pkg/ingress" "k8s.io/ingress/core/pkg/k8s" @@ -208,19 +209,35 @@ const ( defaultBurst = 1e6 ) +// buildConfigFromFlags builds REST config based on master URL and kubeconfig path. +// If both of them are empty then in cluster config is used. +func buildConfigFromFlags(masterURL, kubeconfigPath string) (*rest.Config, error) { + if kubeconfigPath == "" && masterURL == "" { + kubeconfig, err := rest.InClusterConfig() + if err != nil { + return nil, err + } + + return kubeconfig, nil + } + + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, + &clientcmd.ConfigOverrides{ + ClusterInfo: clientcmd_api.Cluster{ + Server: masterURL, + }, + }).ClientConfig() +} + // createApiserverClient creates new Kubernetes Apiserver client. When kubeconfig or apiserverHost param is empty // the function assumes that it is running inside a Kubernetes cluster and attempts to // discover the Apiserver. Otherwise, it connects to the Apiserver specified. // // apiserverHost param is in the format of protocol://address:port/pathPrefix, e.g.http://localhost:8001. // kubeConfig location of kubeconfig file -func createApiserverClient(apiserverHost string, kubeConfig string) (*client.Clientset, error) { - - clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( - &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeConfig}, - &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: apiserverHost}}) - - cfg, err := clientConfig.ClientConfig() +func createApiserverClient(apiserverHost string, kubeConfig string) (*kubernetes.Clientset, error) { + cfg, err := buildConfigFromFlags(apiserverHost, kubeConfig) if err != nil { return nil, err } @@ -231,7 +248,7 @@ func createApiserverClient(apiserverHost string, kubeConfig string) (*client.Cli glog.Infof("Creating API server client for %s", cfg.Host) - client, err := client.NewForConfig(cfg) + client, err := kubernetes.NewForConfig(cfg) if err != nil { return nil, err diff --git a/core/pkg/ingress/controller/named_port.go b/core/pkg/ingress/controller/named_port.go index da28df1359..a3a5551515 100644 --- a/core/pkg/ingress/controller/named_port.go +++ b/core/pkg/ingress/controller/named_port.go @@ -24,9 +24,10 @@ import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - podutil "k8s.io/kubernetes/pkg/api/pod" - "k8s.io/kubernetes/pkg/labels" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" + api_v1 "k8s.io/client-go/pkg/api/v1" "k8s.io/ingress/core/pkg/ingress/annotations/service" ) @@ -35,11 +36,11 @@ import ( // named port. If the annotation in the service does not exist or is not equals // to the port mapping obtained from the pod the service must be updated to reflect // the current state -func (ic *GenericController) checkSvcForUpdate(svc *api.Service) error { +func (ic *GenericController) checkSvcForUpdate(svc *api_v1.Service) error { // get the pods associated with the service // TODO: switch this to a watch - pods, err := ic.cfg.Client.Core().Pods(svc.Namespace).List(api.ListOptions{ - LabelSelector: labels.Set(svc.Spec.Selector).AsSelector(), + pods, err := ic.cfg.Client.Core().Pods(svc.Namespace).List(meta_v1.ListOptions{ + LabelSelector: labels.Set(svc.Spec.Selector).AsSelector().String(), }) if err != nil { @@ -60,7 +61,7 @@ func (ic *GenericController) checkSvcForUpdate(svc *api.Service) error { _, err := strconv.Atoi(servicePort.TargetPort.StrVal) if err != nil { - portNum, err := podutil.FindPort(pod, servicePort) + portNum, err := findPort(pod, servicePort) if err != nil { glog.V(4).Infof("failed to find port for service %s/%s: %v", portNum, svc.Namespace, svc.Name, err) continue @@ -82,7 +83,7 @@ func (ic *GenericController) checkSvcForUpdate(svc *api.Service) error { if len(namedPorts) > 0 && !reflect.DeepEqual(curNamedPort, namedPorts) { data, _ := json.Marshal(namedPorts) - newSvc, err := ic.cfg.Client.Core().Services(svc.Namespace).Get(svc.Name) + newSvc, err := ic.cfg.Client.Core().Services(svc.Namespace).Get(svc.Name, meta_v1.GetOptions{}) if err != nil { return fmt.Errorf("error getting service %v/%v: %v", svc.Namespace, svc.Name, err) } @@ -103,3 +104,26 @@ func (ic *GenericController) checkSvcForUpdate(svc *api.Service) error { return nil } + +// FindPort locates the container port for the given pod and portName. If the +// targetPort is a number, use that. If the targetPort is a string, look that +// string up in all named ports in all containers in the target pod. If no +// match is found, fail. +func findPort(pod *api_v1.Pod, svcPort *api_v1.ServicePort) (int, error) { + portName := svcPort.TargetPort + switch portName.Type { + case intstr.String: + name := portName.StrVal + for _, container := range pod.Spec.Containers { + for _, port := range container.Ports { + if port.Name == name && port.Protocol == svcPort.Protocol { + return int(port.ContainerPort), nil + } + } + } + case intstr.Int: + return portName.IntValue(), nil + } + + return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID) +} diff --git a/core/pkg/ingress/controller/named_port_test.go b/core/pkg/ingress/controller/named_port_test.go index c92ad9806e..0f87a4e0c4 100644 --- a/core/pkg/ingress/controller/named_port_test.go +++ b/core/pkg/ingress/controller/named_port_test.go @@ -20,51 +20,55 @@ import ( "reflect" "testing" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/pkg/api" + api_v1 "k8s.io/client-go/pkg/api/v1" "k8s.io/ingress/core/pkg/ingress/annotations/service" - "k8s.io/kubernetes/pkg/api" - testclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" - "k8s.io/kubernetes/pkg/util/intstr" ) -func buildSimpleClientSet() *testclient.Clientset { - return testclient.NewSimpleClientset( - &api.PodList{Items: []api.Pod{ - { - ObjectMeta: api.ObjectMeta{ - Name: "foo1", - Namespace: api.NamespaceDefault, - Labels: map[string]string{ - "lable_sig": "foo_pod", +func buildSimpleClientSet() *fake.Clientset { + return fake.NewSimpleClientset( + &api_v1.PodList{ + Items: []api_v1.Pod{ + { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo1", + Namespace: api.NamespaceDefault, + Labels: map[string]string{ + "lable_sig": "foo_pod", + }, }, - }, - Spec: api.PodSpec{ - NodeName: "foo_node_1", - Containers: []api.Container{ - { - Ports: []api.ContainerPort{ - { - Name: "foo1_named_port_c1", - Protocol: api.ProtocolTCP, - ContainerPort: 80, + Spec: api_v1.PodSpec{ + NodeName: "foo_node_1", + Containers: []api_v1.Container{ + { + Ports: []api_v1.ContainerPort{ + { + Name: "foo1_named_port_c1", + Protocol: api_v1.ProtocolTCP, + ContainerPort: 80, + }, }, }, }, }, }, - }, - { - ObjectMeta: api.ObjectMeta{ - Name: "foo1", - Namespace: api.NamespaceSystem, - Labels: map[string]string{ - "lable_sig": "foo_pod", + { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo1", + Namespace: api.NamespaceSystem, + Labels: map[string]string{ + "lable_sig": "foo_pod", + }, }, }, }, - }}, - &api.ServiceList{Items: []api.Service{ + }, + &api_v1.ServiceList{Items: []api_v1.Service{ { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Namespace: api.NamespaceDefault, Name: "named_port_test_service", }, @@ -81,14 +85,13 @@ func buildGenericController() *GenericController { } } -func buildService() *api.Service { - return &api.Service{ - ObjectMeta: api.ObjectMeta{ +func buildService() *api_v1.Service { + return &api_v1.Service{ + ObjectMeta: meta_v1.ObjectMeta{ Namespace: api.NamespaceSystem, Name: "named_port_test_service", }, - - Spec: api.ServiceSpec{ + Spec: api_v1.ServiceSpec{ ClusterIP: "10.10.10.10", }, } @@ -98,17 +101,17 @@ func TestCheckSvcForUpdate(t *testing.T) { foos := []struct { n string ns string - sps []api.ServicePort + sps []api_v1.ServicePort sl map[string]string er string }{ { "pods_have_not_been_found_in_this_namespace", api.NamespaceSystem, - []api.ServicePort{ - {Name: "foo_port_1", Port: 8080, Protocol: api.ProtocolTCP, TargetPort: intstr.FromString("foo1_named_port_c1")}, - {Name: "foo_port_2", Port: 8181, Protocol: api.ProtocolTCP, TargetPort: intstr.FromInt(81)}, - {Name: "foo_port_3", Port: 8282, Protocol: api.ProtocolTCP, TargetPort: intstr.FromString("")}, + []api_v1.ServicePort{ + {Name: "foo_port_1", Port: 8080, Protocol: api_v1.ProtocolTCP, TargetPort: intstr.FromString("foo1_named_port_c1")}, + {Name: "foo_port_2", Port: 8181, Protocol: api_v1.ProtocolTCP, TargetPort: intstr.FromInt(81)}, + {Name: "foo_port_3", Port: 8282, Protocol: api_v1.ProtocolTCP, TargetPort: intstr.FromString("")}, }, map[string]string{ "lable_sig": "foo_pod", @@ -118,10 +121,10 @@ func TestCheckSvcForUpdate(t *testing.T) { { "ports_have_not_been_found_in_this_pod", api.NamespaceDefault, - []api.ServicePort{ - {Name: "foo_port_1", Port: 8080, Protocol: api.ProtocolTCP, TargetPort: intstr.FromString("foo1_named_port_cXX")}, - {Name: "foo_port_2", Port: 8181, Protocol: api.ProtocolTCP, TargetPort: intstr.FromInt(81)}, - {Name: "foo_port_3", Port: 8282, Protocol: api.ProtocolTCP, TargetPort: intstr.FromString("")}, + []api_v1.ServicePort{ + {Name: "foo_port_1", Port: 8080, Protocol: api_v1.ProtocolTCP, TargetPort: intstr.FromString("foo1_named_port_cXX")}, + {Name: "foo_port_2", Port: 8181, Protocol: api_v1.ProtocolTCP, TargetPort: intstr.FromInt(81)}, + {Name: "foo_port_3", Port: 8282, Protocol: api_v1.ProtocolTCP, TargetPort: intstr.FromString("")}, }, map[string]string{ "lable_sig": "foo_pod", @@ -132,10 +135,10 @@ func TestCheckSvcForUpdate(t *testing.T) { { "ports_fixed", api.NamespaceDefault, - []api.ServicePort{ - {Name: "foo_port_1", Port: 8080, Protocol: api.ProtocolTCP, TargetPort: intstr.FromInt(80)}, - {Name: "foo_port_2", Port: 8181, Protocol: api.ProtocolTCP, TargetPort: intstr.FromInt(81)}, - {Name: "foo_port_3", Port: 8282, Protocol: api.ProtocolTCP, TargetPort: intstr.FromString("")}, + []api_v1.ServicePort{ + {Name: "foo_port_1", Port: 8080, Protocol: api_v1.ProtocolTCP, TargetPort: intstr.FromInt(80)}, + {Name: "foo_port_2", Port: 8181, Protocol: api_v1.ProtocolTCP, TargetPort: intstr.FromInt(81)}, + {Name: "foo_port_3", Port: 8282, Protocol: api_v1.ProtocolTCP, TargetPort: intstr.FromString("")}, }, map[string]string{ "lable_sig": "foo_pod", @@ -145,10 +148,10 @@ func TestCheckSvcForUpdate(t *testing.T) { { "nil_selector", api.NamespaceDefault, - []api.ServicePort{ - {Name: "foo_port_1", Port: 8080, Protocol: api.ProtocolTCP, TargetPort: intstr.FromString("foo1_named_port_c1")}, - {Name: "foo_port_2", Port: 8181, Protocol: api.ProtocolTCP, TargetPort: intstr.FromInt(81)}, - {Name: "foo_port_3", Port: 8282, Protocol: api.ProtocolTCP, TargetPort: intstr.FromString("")}, + []api_v1.ServicePort{ + {Name: "foo_port_1", Port: 8080, Protocol: api_v1.ProtocolTCP, TargetPort: intstr.FromString("foo1_named_port_c1")}, + {Name: "foo_port_2", Port: 8181, Protocol: api_v1.ProtocolTCP, TargetPort: intstr.FromInt(81)}, + {Name: "foo_port_3", Port: 8282, Protocol: api_v1.ProtocolTCP, TargetPort: intstr.FromString("")}, }, nil, "{\"foo1_named_port_c1\":\"80\"}", @@ -156,10 +159,10 @@ func TestCheckSvcForUpdate(t *testing.T) { { "normal_update", api.NamespaceDefault, - []api.ServicePort{ - {Name: "foo_port_1", Port: 8080, Protocol: api.ProtocolTCP, TargetPort: intstr.FromString("foo1_named_port_c1")}, - {Name: "foo_port_2", Port: 8181, Protocol: api.ProtocolTCP, TargetPort: intstr.FromInt(81)}, - {Name: "foo_port_3", Port: 8282, Protocol: api.ProtocolTCP, TargetPort: intstr.FromString("")}, + []api_v1.ServicePort{ + {Name: "foo_port_1", Port: 8080, Protocol: api_v1.ProtocolTCP, TargetPort: intstr.FromString("foo1_named_port_c1")}, + {Name: "foo_port_2", Port: 8181, Protocol: api_v1.ProtocolTCP, TargetPort: intstr.FromInt(81)}, + {Name: "foo_port_3", Port: 8282, Protocol: api_v1.ProtocolTCP, TargetPort: intstr.FromString("")}, }, map[string]string{ "lable_sig": "foo_pod", @@ -181,7 +184,7 @@ func TestCheckSvcForUpdate(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - rs, _ := gc.cfg.Client.Core().Services(api.NamespaceDefault).Get("named_port_test_service") + rs, _ := gc.cfg.Client.Core().Services(api.NamespaceDefault).Get("named_port_test_service", meta_v1.GetOptions{}) rr := rs.ObjectMeta.Annotations[service.NamedPortAnnotation] if !reflect.DeepEqual(rr, foo.er) { t.Errorf("Returned %s, but expected %s for %s", rr, foo.er, foo.n) diff --git a/core/pkg/ingress/controller/util_test.go b/core/pkg/ingress/controller/util_test.go index b4da882d23..2aa6d5a205 100644 --- a/core/pkg/ingress/controller/util_test.go +++ b/core/pkg/ingress/controller/util_test.go @@ -17,9 +17,8 @@ limitations under the License. package controller import ( - "testing" - "reflect" + "testing" "k8s.io/ingress/core/pkg/ingress" "k8s.io/ingress/core/pkg/ingress/annotations/auth" diff --git a/core/pkg/ingress/resolver/main.go b/core/pkg/ingress/resolver/main.go index a11b35f58b..0a701fb312 100644 --- a/core/pkg/ingress/resolver/main.go +++ b/core/pkg/ingress/resolver/main.go @@ -17,7 +17,7 @@ limitations under the License. package resolver import ( - "k8s.io/kubernetes/pkg/api" + api "k8s.io/client-go/pkg/api/v1" "k8s.io/ingress/core/pkg/ingress/defaults" ) diff --git a/core/pkg/ingress/sort_ingress.go b/core/pkg/ingress/sort_ingress.go index cc5f2d76d8..5498468444 100644 --- a/core/pkg/ingress/sort_ingress.go +++ b/core/pkg/ingress/sort_ingress.go @@ -17,8 +17,8 @@ limitations under the License. package ingress import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" ) // BackendByNameServers sorts upstreams by name @@ -69,7 +69,7 @@ func (c LocationByPath) Less(i, j int) bool { // SSLCert describes a SSL certificate to be used in a server type SSLCert struct { - api.ObjectMeta `json:"metadata,omitempty"` + meta_v1.ObjectMeta `json:"metadata,omitempty"` // CAFileName contains the path to the file with the root certificate CAFileName string `json:"caFileName"` // PemFileName contains the path to the file with the certificate and key concatenated @@ -82,4 +82,6 @@ type SSLCert struct { } // GetObjectKind implements the ObjectKind interface as a noop -func (s SSLCert) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } +func (s SSLCert) GetObjectKind() schema.ObjectKind { + return schema.EmptyObjectKind +} diff --git a/core/pkg/ingress/sort_ingress_test.go b/core/pkg/ingress/sort_ingress_test.go index 52ceb7029b..8c6010a989 100644 --- a/core/pkg/ingress/sort_ingress_test.go +++ b/core/pkg/ingress/sort_ingress_test.go @@ -19,7 +19,7 @@ package ingress import ( "testing" - "k8s.io/kubernetes/pkg/api" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func buildBackendByNameServers() BackendByNameServers { @@ -365,7 +365,7 @@ func TestLocationByPathLess(t *testing.T) { func TestGetObjectKindForSSLCert(t *testing.T) { fk := &SSLCert{ - ObjectMeta: api.ObjectMeta{}, + ObjectMeta: meta_v1.ObjectMeta{}, CAFileName: "ca_file", PemFileName: "pemfile", PemSHA: "pem_sha", diff --git a/core/pkg/ingress/status/election.go b/core/pkg/ingress/status/election.go index 0710f1d825..17fc0dd0f6 100644 --- a/core/pkg/ingress/status/election.go +++ b/core/pkg/ingress/status/election.go @@ -23,16 +23,19 @@ import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/client/leaderelection" - "k8s.io/kubernetes/pkg/client/leaderelection/resourcelock" - "k8s.io/kubernetes/pkg/client/record" + "k8s.io/apimachinery/pkg/api/errors" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + client "k8s.io/client-go/kubernetes" + def_api "k8s.io/client-go/pkg/api" + api "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/record" + + "k8s.io/ingress/core/pkg/ingress/status/leaderelection" + "k8s.io/ingress/core/pkg/ingress/status/leaderelection/resourcelock" ) func getCurrentLeader(electionID, namespace string, c client.Interface) (string, *api.Endpoints, error) { - endpoints, err := c.Core().Endpoints(namespace).Get(electionID) + endpoints, err := c.Core().Endpoints(namespace).Get(electionID, meta_v1.GetOptions{}) if err != nil { return "", nil, err } @@ -56,11 +59,11 @@ func NewElection(electionID, callback func(leader string), c client.Interface) (*leaderelection.LeaderElector, error) { - _, err := c.Core().Endpoints(namespace).Get(electionID) + _, err := c.Core().Endpoints(namespace).Get(electionID, meta_v1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { _, err = c.Core().Endpoints(namespace).Create(&api.Endpoints{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: electionID, }, }) @@ -93,13 +96,13 @@ func NewElection(electionID, if err != nil { return nil, err } - recorder := broadcaster.NewRecorder(api.EventSource{ + recorder := broadcaster.NewRecorder(def_api.Scheme, api.EventSource{ Component: "ingress-leader-elector", Host: hostname, }) lock := resourcelock.EndpointsLock{ - EndpointsMeta: api.ObjectMeta{Namespace: namespace, Name: electionID}, + EndpointsMeta: meta_v1.ObjectMeta{Namespace: namespace, Name: electionID}, Client: c, LockConfig: resourcelock.ResourceLockConfig{ Identity: id, diff --git a/core/pkg/ingress/status/election_test.go b/core/pkg/ingress/status/election_test.go index 4726aa8afe..3e6504d00d 100644 --- a/core/pkg/ingress/status/election_test.go +++ b/core/pkg/ingress/status/election_test.go @@ -21,23 +21,25 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - tc "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" - "k8s.io/kubernetes/pkg/client/leaderelection/resourcelock" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/pkg/api" + api_v1 "k8s.io/client-go/pkg/api/v1" + + "k8s.io/ingress/core/pkg/ingress/status/leaderelection/resourcelock" ) func TestGetCurrentLeaderLeaderExist(t *testing.T) { fkER := resourcelock.LeaderElectionRecord{ HolderIdentity: "currentLeader", LeaseDurationSeconds: 30, - AcquireTime: unversioned.Now(), - RenewTime: unversioned.Now(), + AcquireTime: meta_v1.NewTime(time.Now()), + RenewTime: meta_v1.NewTime(time.Now()), LeaderTransitions: 3, } leaderInfo, _ := json.Marshal(fkER) - fkEndpoints := api.Endpoints{ - ObjectMeta: api.ObjectMeta{ + fkEndpoints := api_v1.Endpoints{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "ingress-controller-test", Namespace: api.NamespaceSystem, Annotations: map[string]string{ @@ -45,7 +47,7 @@ func TestGetCurrentLeaderLeaderExist(t *testing.T) { }, }, } - fk := tc.NewSimpleClientset(&api.EndpointsList{Items: []api.Endpoints{fkEndpoints}}) + fk := fake.NewSimpleClientset(&api_v1.EndpointsList{Items: []api_v1.Endpoints{fkEndpoints}}) identity, endpoints, err := getCurrentLeader("ingress-controller-test", api.NamespaceSystem, fk) if err != nil { t.Fatalf("expected identitiy and endpoints but returned error %s", err) @@ -61,14 +63,14 @@ func TestGetCurrentLeaderLeaderExist(t *testing.T) { } func TestGetCurrentLeaderLeaderNotExist(t *testing.T) { - fkEndpoints := api.Endpoints{ - ObjectMeta: api.ObjectMeta{ + fkEndpoints := api_v1.Endpoints{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "ingress-controller-test", Namespace: api.NamespaceSystem, Annotations: map[string]string{}, }, } - fk := tc.NewSimpleClientset(&api.EndpointsList{Items: []api.Endpoints{fkEndpoints}}) + fk := fake.NewSimpleClientset(&api_v1.EndpointsList{Items: []api_v1.Endpoints{fkEndpoints}}) identity, endpoints, err := getCurrentLeader("ingress-controller-test", api.NamespaceSystem, fk) if err != nil { t.Fatalf("unexpeted error: %v", err) @@ -84,8 +86,8 @@ func TestGetCurrentLeaderLeaderNotExist(t *testing.T) { } func TestGetCurrentLeaderAnnotationError(t *testing.T) { - fkEndpoints := api.Endpoints{ - ObjectMeta: api.ObjectMeta{ + fkEndpoints := api_v1.Endpoints{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "ingress-controller-test", Namespace: api.NamespaceSystem, Annotations: map[string]string{ @@ -93,7 +95,7 @@ func TestGetCurrentLeaderAnnotationError(t *testing.T) { }, }, } - fk := tc.NewSimpleClientset(&api.EndpointsList{Items: []api.Endpoints{fkEndpoints}}) + fk := fake.NewSimpleClientset(&api_v1.EndpointsList{Items: []api_v1.Endpoints{fkEndpoints}}) _, _, err := getCurrentLeader("ingress-controller-test", api.NamespaceSystem, fk) if err == nil { t.Errorf("expected error") @@ -101,15 +103,15 @@ func TestGetCurrentLeaderAnnotationError(t *testing.T) { } func TestNewElection(t *testing.T) { - fk := tc.NewSimpleClientset(&api.EndpointsList{Items: []api.Endpoints{ + fk := fake.NewSimpleClientset(&api_v1.EndpointsList{Items: []api_v1.Endpoints{ { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "ingress-controller-test", Namespace: api.NamespaceSystem, }, }, { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "ingress-controller-test-020", Namespace: api.NamespaceSystem, }, diff --git a/vendor/k8s.io/kubernetes/pkg/client/leaderelection/leaderelection.go b/core/pkg/ingress/status/leaderelection/leaderelection.go similarity index 82% rename from vendor/k8s.io/kubernetes/pkg/client/leaderelection/leaderelection.go rename to core/pkg/ingress/status/leaderelection/leaderelection.go index a5c752dc8c..c7f2c2b7d3 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/leaderelection/leaderelection.go +++ b/core/pkg/ingress/status/leaderelection/leaderelection.go @@ -53,12 +53,11 @@ import ( "reflect" "time" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/componentconfig" - rl "k8s.io/kubernetes/pkg/client/leaderelection/resourcelock" - "k8s.io/kubernetes/pkg/util/runtime" - "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + rl "k8s.io/ingress/core/pkg/ingress/status/leaderelection/resourcelock" "github.com/golang/glog" "github.com/spf13/pflag" @@ -71,6 +70,31 @@ const ( DefaultRetryPeriod = 2 * time.Second ) +// LeaderElectionConfiguration defines the configuration of leader election +// clients for components that can run with leader election enabled. +type LeaderElectionConfiguration struct { + // leaderElect enables a leader election client to gain leadership + // before executing the main loop. Enable this when running replicated + // components for high availability. + LeaderElect bool + // leaseDuration is the duration that non-leader candidates will wait + // after observing a leadership renewal until attempting to acquire + // leadership of a led but unrenewed leader slot. This is effectively the + // maximum duration that a leader can be stopped before it is replaced + // by another candidate. This is only applicable if leader election is + // enabled. + LeaseDuration metav1.Duration + // renewDeadline is the interval between attempts by the acting master to + // renew a leadership slot before it stops leading. This must be less + // than or equal to the lease duration. This is only applicable if leader + // election is enabled. + RenewDeadline metav1.Duration + // retryPeriod is the duration the clients should wait between attempting + // acquisition and renewal of a leadership. This is only applicable if + // leader election is enabled. + RetryPeriod metav1.Duration +} + // NewLeadereElector creates a LeaderElector from a LeaderElecitionConfig func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) { if lec.LeaseDuration <= lec.RenewDeadline { @@ -176,16 +200,17 @@ func (le *LeaderElector) IsLeader() bool { // acquire loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew succeeds. func (le *LeaderElector) acquire() { stop := make(chan struct{}) + glog.Infof("attempting to acquire leader lease...") wait.JitterUntil(func() { succeeded := le.tryAcquireOrRenew() le.maybeReportTransition() desc := le.config.Lock.Describe() if !succeeded { - glog.V(4).Infof("failed to renew lease %v", desc) + glog.V(4).Infof("failed to acquire lease %v", desc) return } le.config.Lock.RecordEvent("became leader") - glog.Infof("sucessfully acquired lease %v", desc) + glog.Infof("successfully acquired lease %v", desc) close(stop) }, le.config.RetryPeriod, JitterFactor, true, stop) } @@ -213,7 +238,7 @@ func (le *LeaderElector) renew() { // else it tries to renew the lease if it has already been acquired. Returns true // on success else returns false. func (le *LeaderElector) tryAcquireOrRenew() bool { - now := unversioned.Now() + now := metav1.Now() leaderElectionRecord := rl.LeaderElectionRecord{ HolderIdentity: le.config.Lock.Identity(), LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second), @@ -244,7 +269,7 @@ func (le *LeaderElector) tryAcquireOrRenew() bool { } if le.observedTime.Add(le.config.LeaseDuration).After(now.Time) && oldLeaderElectionRecord.HolderIdentity != le.config.Lock.Identity() { - glog.Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity) + glog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity) return false } @@ -252,6 +277,7 @@ func (le *LeaderElector) tryAcquireOrRenew() bool { // here. Let's correct it before updating. if oldLeaderElectionRecord.HolderIdentity == le.config.Lock.Identity() { leaderElectionRecord.AcquireTime = oldLeaderElectionRecord.AcquireTime + leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions } else { leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + 1 } @@ -276,17 +302,17 @@ func (l *LeaderElector) maybeReportTransition() { } } -func DefaultLeaderElectionConfiguration() componentconfig.LeaderElectionConfiguration { - return componentconfig.LeaderElectionConfiguration{ +func DefaultLeaderElectionConfiguration() LeaderElectionConfiguration { + return LeaderElectionConfiguration{ LeaderElect: false, - LeaseDuration: unversioned.Duration{Duration: DefaultLeaseDuration}, - RenewDeadline: unversioned.Duration{Duration: DefaultRenewDeadline}, - RetryPeriod: unversioned.Duration{Duration: DefaultRetryPeriod}, + LeaseDuration: metav1.Duration{Duration: DefaultLeaseDuration}, + RenewDeadline: metav1.Duration{Duration: DefaultRenewDeadline}, + RetryPeriod: metav1.Duration{Duration: DefaultRetryPeriod}, } } // BindFlags binds the common LeaderElectionCLIConfig flags to a flagset -func BindFlags(l *componentconfig.LeaderElectionConfiguration, fs *pflag.FlagSet) { +func BindFlags(l *LeaderElectionConfiguration, fs *pflag.FlagSet) { fs.BoolVar(&l.LeaderElect, "leader-elect", l.LeaderElect, ""+ "Start a leader election client and gain leadership before "+ "executing the main loop. Enable this when running replicated "+ diff --git a/vendor/k8s.io/kubernetes/pkg/client/leaderelection/resourcelock/endpointslock.go b/core/pkg/ingress/status/leaderelection/resourcelock/endpointslock.go similarity index 86% rename from vendor/k8s.io/kubernetes/pkg/client/leaderelection/resourcelock/endpointslock.go rename to core/pkg/ingress/status/leaderelection/resourcelock/endpointslock.go index 56749661df..26d5782b62 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/leaderelection/resourcelock/endpointslock.go +++ b/core/pkg/ingress/status/leaderelection/resourcelock/endpointslock.go @@ -21,23 +21,24 @@ import ( "errors" "fmt" - "k8s.io/kubernetes/pkg/api" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/pkg/api/v1" ) type EndpointsLock struct { // EndpointsMeta should contain a Name and a Namespace of an // Endpoints object that the LeaderElector will attempt to lead. - EndpointsMeta api.ObjectMeta + EndpointsMeta meta_v1.ObjectMeta Client clientset.Interface LockConfig ResourceLockConfig - e *api.Endpoints + e *v1.Endpoints } func (el *EndpointsLock) Get() (*LeaderElectionRecord, error) { var record LeaderElectionRecord var err error - el.e, err = el.Client.Core().Endpoints(el.EndpointsMeta.Namespace).Get(el.EndpointsMeta.Name) + el.e, err = el.Client.Core().Endpoints(el.EndpointsMeta.Namespace).Get(el.EndpointsMeta.Name, meta_v1.GetOptions{}) if err != nil { return nil, err } @@ -58,8 +59,8 @@ func (el *EndpointsLock) Create(ler LeaderElectionRecord) error { if err != nil { return err } - el.e, err = el.Client.Core().Endpoints(el.EndpointsMeta.Namespace).Create(&api.Endpoints{ - ObjectMeta: api.ObjectMeta{ + el.e, err = el.Client.Core().Endpoints(el.EndpointsMeta.Namespace).Create(&v1.Endpoints{ + ObjectMeta: meta_v1.ObjectMeta{ Name: el.EndpointsMeta.Name, Namespace: el.EndpointsMeta.Namespace, Annotations: map[string]string{ @@ -87,7 +88,7 @@ func (el *EndpointsLock) Update(ler LeaderElectionRecord) error { // RecordEvent in leader election while adding meta-data func (el *EndpointsLock) RecordEvent(s string) { events := fmt.Sprintf("%v %v", el.LockConfig.Identity, s) - el.LockConfig.EventRecorder.Eventf(&api.Endpoints{ObjectMeta: el.e.ObjectMeta}, api.EventTypeNormal, "LeaderElection", events) + el.LockConfig.EventRecorder.Eventf(&v1.Endpoints{ObjectMeta: el.e.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events) } // Describe is used to convert details on current resource lock diff --git a/core/pkg/ingress/status/leaderelection/resourcelock/interface.go b/core/pkg/ingress/status/leaderelection/resourcelock/interface.go new file mode 100644 index 0000000000..7b3775b9b2 --- /dev/null +++ b/core/pkg/ingress/status/leaderelection/resourcelock/interface.go @@ -0,0 +1,71 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourcelock + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" +) + +const ( + LeaderElectionRecordAnnotationKey = "control-plane.alpha.kubernetes.io/leader" +) + +// LeaderElectionRecord is the record that is stored in the leader election annotation. +// This information should be used for observational purposes only and could be replaced +// with a random string (e.g. UUID) with only slight modification of this code. +// TODO(mikedanese): this should potentially be versioned +type LeaderElectionRecord struct { + HolderIdentity string `json:"holderIdentity"` + LeaseDurationSeconds int `json:"leaseDurationSeconds"` + AcquireTime metav1.Time `json:"acquireTime"` + RenewTime metav1.Time `json:"renewTime"` + LeaderTransitions int `json:"leaderTransitions"` +} + +// ResourceLockConfig common data that exists across different +// resource locks +type ResourceLockConfig struct { + Identity string + EventRecorder record.EventRecorder +} + +// Interface offers a common interface for locking on arbitrary +// resources used in leader election. The Interface is used +// to hide the details on specific implementations in order to allow +// them to change over time. This interface is strictly for use +// by the leaderelection code. +type Interface interface { + // Get returns the LeaderElectionRecord + Get() (*LeaderElectionRecord, error) + + // Create attempts to create a LeaderElectionRecord + Create(ler LeaderElectionRecord) error + + // Update will update and existing LeaderElectionRecord + Update(ler LeaderElectionRecord) error + + // RecordEvent is used to record events + RecordEvent(string) + + // Identity will return the locks Identity + Identity() string + + // Describe is used to convert details on current resource lock + // into a string + Describe() string +} diff --git a/core/pkg/ingress/status/status.go b/core/pkg/ingress/status/status.go index 6e54d422b2..05af3329ec 100644 --- a/core/pkg/ingress/status/status.go +++ b/core/pkg/ingress/status/status.go @@ -24,15 +24,16 @@ import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/client/leaderelection" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/util/wait" - - cache_store "k8s.io/ingress/core/pkg/cache" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + api_v1 "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + "k8s.io/ingress/core/pkg/ingress/annotations/class" + "k8s.io/ingress/core/pkg/ingress/status/leaderelection" + "k8s.io/ingress/core/pkg/ingress/store" "k8s.io/ingress/core/pkg/k8s" "k8s.io/ingress/core/pkg/strings" "k8s.io/ingress/core/pkg/task" @@ -52,7 +53,7 @@ type Sync interface { type Config struct { Client clientset.Interface PublishService string - IngressLister cache_store.StoreToIngressLister + IngressLister store.IngressLister ElectionID string DefaultIngressClass string @@ -111,7 +112,7 @@ func (s statusSync) Shutdown() { } glog.Infof("removing address from ingress status (%v)", addrs) - s.updateStatus([]api.LoadBalancerIngress{}) + s.updateStatus([]api_v1.LoadBalancerIngress{}) } func (s *statusSync) run() { @@ -132,6 +133,11 @@ func (s *statusSync) sync(key interface{}) error { s.runLock.Lock() defer s.runLock.Unlock() + if s.syncQueue.IsShuttingDown() { + glog.V(2).Infof("skipping Ingress status update (shutting down in progress)") + return nil + } + if !s.elector.IsLeader() { glog.V(2).Infof("skipping Ingress status update (I am not the current leader)") return nil @@ -191,7 +197,7 @@ func NewStatusSyncer(config Config) Sync { func (s *statusSync) runningAddresess() ([]string, error) { if s.PublishService != "" { ns, name, _ := k8s.ParseNameNS(s.PublishService) - svc, err := s.Client.Core().Services(ns).Get(name) + svc, err := s.Client.Core().Services(ns).Get(name, meta_v1.GetOptions{}) if err != nil { return nil, err } @@ -209,8 +215,8 @@ func (s *statusSync) runningAddresess() ([]string, error) { } // get information about all the pods running the ingress controller - pods, err := s.Client.Core().Pods(s.pod.Namespace).List(api.ListOptions{ - LabelSelector: labels.SelectorFromSet(s.pod.Labels), + pods, err := s.Client.Core().Pods(s.pod.Namespace).List(meta_v1.ListOptions{ + LabelSelector: labels.SelectorFromSet(s.pod.Labels).String(), }) if err != nil { return nil, err @@ -227,13 +233,13 @@ func (s *statusSync) runningAddresess() ([]string, error) { } // sliceToStatus converts a slice of IP and/or hostnames to LoadBalancerIngress -func sliceToStatus(endpoints []string) []api.LoadBalancerIngress { - lbi := []api.LoadBalancerIngress{} +func sliceToStatus(endpoints []string) []api_v1.LoadBalancerIngress { + lbi := []api_v1.LoadBalancerIngress{} for _, ep := range endpoints { if net.ParseIP(ep) == nil { - lbi = append(lbi, api.LoadBalancerIngress{Hostname: ep}) + lbi = append(lbi, api_v1.LoadBalancerIngress{Hostname: ep}) } else { - lbi = append(lbi, api.LoadBalancerIngress{IP: ep}) + lbi = append(lbi, api_v1.LoadBalancerIngress{IP: ep}) } } @@ -241,7 +247,7 @@ func sliceToStatus(endpoints []string) []api.LoadBalancerIngress { return lbi } -func (s *statusSync) updateStatus(newIPs []api.LoadBalancerIngress) { +func (s *statusSync) updateStatus(newIPs []api_v1.LoadBalancerIngress) { ings := s.IngressLister.List() var wg sync.WaitGroup wg.Add(len(ings)) @@ -249,13 +255,14 @@ func (s *statusSync) updateStatus(newIPs []api.LoadBalancerIngress) { ing := cur.(*extensions.Ingress) if !class.IsValid(ing, s.Config.IngressClass, s.Config.DefaultIngressClass) { + wg.Done() continue } go func(wg *sync.WaitGroup, ing *extensions.Ingress) { defer wg.Done() ingClient := s.Client.Extensions().Ingresses(ing.Namespace) - currIng, err := ingClient.Get(ing.Name) + currIng, err := ingClient.Get(ing.Name, meta_v1.GetOptions{}) if err != nil { glog.Errorf("unexpected error searching Ingress %v/%v: %v", ing.Namespace, ing.Name, err) return @@ -280,7 +287,7 @@ func (s *statusSync) updateStatus(newIPs []api.LoadBalancerIngress) { wg.Wait() } -func ingressSliceEqual(lhs, rhs []api.LoadBalancerIngress) bool { +func ingressSliceEqual(lhs, rhs []api_v1.LoadBalancerIngress) bool { if len(lhs) != len(rhs) { return false } @@ -297,7 +304,7 @@ func ingressSliceEqual(lhs, rhs []api.LoadBalancerIngress) bool { } // loadBalancerIngressByIP sorts LoadBalancerIngress using the field IP -type loadBalancerIngressByIP []api.LoadBalancerIngress +type loadBalancerIngressByIP []api_v1.LoadBalancerIngress func (c loadBalancerIngressByIP) Len() int { return len(c) } func (c loadBalancerIngressByIP) Swap(i, j int) { c[i], c[j] = c[j], c[i] } diff --git a/core/pkg/ingress/status/status_test.go b/core/pkg/ingress/status/status_test.go index 57e9f1e840..e5b3f26f60 100644 --- a/core/pkg/ingress/status/status_test.go +++ b/core/pkg/ingress/status/status_test.go @@ -23,18 +23,21 @@ import ( "testing" "time" - cache_store "k8s.io/ingress/core/pkg/cache" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + testclient "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/pkg/api" + api_v1 "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + "k8s.io/client-go/tools/cache" + + "k8s.io/ingress/core/pkg/ingress/annotations/class" + cache_store "k8s.io/ingress/core/pkg/ingress/store" "k8s.io/ingress/core/pkg/k8s" "k8s.io/ingress/core/pkg/task" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/cache" - testclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" - "k8s.io/kubernetes/pkg/util/sets" ) func buildLoadBalancerIngressByIP() loadBalancerIngressByIP { - return []api.LoadBalancerIngress{ + return []api_v1.LoadBalancerIngress{ { IP: "10.0.0.1", Hostname: "foo1", @@ -56,100 +59,101 @@ func buildLoadBalancerIngressByIP() loadBalancerIngressByIP { func buildSimpleClientSet() *testclient.Clientset { return testclient.NewSimpleClientset( - &api.PodList{Items: []api.Pod{ + &api_v1.PodList{Items: []api_v1.Pod{ { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo1", - Namespace: api.NamespaceDefault, + Namespace: api_v1.NamespaceDefault, Labels: map[string]string{ "lable_sig": "foo_pod", }, }, - Spec: api.PodSpec{ + Spec: api_v1.PodSpec{ NodeName: "foo_node_2", }, }, { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo2", - Namespace: api.NamespaceDefault, + Namespace: api_v1.NamespaceDefault, Labels: map[string]string{ "lable_sig": "foo_no", }, }, }, { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo3", Namespace: api.NamespaceSystem, Labels: map[string]string{ "lable_sig": "foo_pod", }, }, - Spec: api.PodSpec{ + Spec: api_v1.PodSpec{ NodeName: "foo_node_2", }, }, }}, - &api.ServiceList{Items: []api.Service{ + &api_v1.ServiceList{Items: []api_v1.Service{ { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: api_v1.NamespaceDefault, }, - Status: api.ServiceStatus{ - LoadBalancer: api.LoadBalancerStatus{ + Status: api_v1.ServiceStatus{ + LoadBalancer: api_v1.LoadBalancerStatus{ Ingress: buildLoadBalancerIngressByIP(), }, }, }, { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo_non_exist", - Namespace: api.NamespaceDefault, + Namespace: api_v1.NamespaceDefault, }, }, }}, - &api.NodeList{Items: []api.Node{ + &api_v1.NodeList{Items: []api_v1.Node{ { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo_node_1", }, - Status: api.NodeStatus{ - Addresses: []api.NodeAddress{ + Status: api_v1.NodeStatus{ + Addresses: []api_v1.NodeAddress{ { - Type: api.NodeLegacyHostIP, + Type: api_v1.NodeLegacyHostIP, Address: "10.0.0.1", }, { - Type: api.NodeExternalIP, + Type: api_v1.NodeExternalIP, Address: "10.0.0.2", }, }, }, }, { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo_node_2", }, - Status: api.NodeStatus{ - Addresses: []api.NodeAddress{ + Status: api_v1.NodeStatus{ + Addresses: []api_v1.NodeAddress{ { - Type: api.NodeLegacyHostIP, + Type: api_v1.NodeLegacyHostIP, Address: "11.0.0.1", }, { - Type: api.NodeExternalIP, + Type: api_v1.NodeExternalIP, Address: "11.0.0.2", }, }, }, }, }}, - &api.EndpointsList{Items: []api.Endpoints{ + &api_v1.EndpointsList{Items: []api_v1.Endpoints{ { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "ingress-controller-leader", - Namespace: api.NamespaceDefault, + Namespace: api_v1.NamespaceDefault, + SelfLink: "/api/v1/namespaces/default/endpoints/ingress-controller-leader", }, }}}, &extensions.IngressList{Items: buildExtensionsIngresses()}, @@ -163,13 +167,13 @@ func fakeSynFn(interface{}) error { func buildExtensionsIngresses() []extensions.Ingress { return []extensions.Ingress{ { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo_ingress_1", - Namespace: api.NamespaceDefault, + Namespace: api_v1.NamespaceDefault, }, Status: extensions.IngressStatus{ - LoadBalancer: api.LoadBalancerStatus{ - Ingress: []api.LoadBalancerIngress{ + LoadBalancer: api_v1.LoadBalancerStatus{ + Ingress: []api_v1.LoadBalancerIngress{ { IP: "10.0.0.1", Hostname: "foo1", @@ -179,48 +183,64 @@ func buildExtensionsIngresses() []extensions.Ingress { }, }, { - ObjectMeta: api.ObjectMeta{ - Name: "foo_ingress_2", + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo_ingress_different_class", Namespace: api.NamespaceDefault, + Annotations: map[string]string{ + class.IngressKey: "no-nginx", + }, }, Status: extensions.IngressStatus{ - LoadBalancer: api.LoadBalancerStatus{ - Ingress: []api.LoadBalancerIngress{}, + LoadBalancer: api_v1.LoadBalancerStatus{ + Ingress: []api_v1.LoadBalancerIngress{ + { + IP: "0.0.0.0", + Hostname: "foo.bar.com", + }, + }, + }, + }, + }, + { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo_ingress_2", + Namespace: api_v1.NamespaceDefault, + }, + Status: extensions.IngressStatus{ + LoadBalancer: api_v1.LoadBalancerStatus{ + Ingress: []api_v1.LoadBalancerIngress{}, }, }, }, } } -func buildIngressLIstener() cache_store.StoreToIngressLister { +func buildIngressListener() cache_store.IngressLister { store := cache.NewStore(cache.MetaNamespaceKeyFunc) - ids := sets.NewString("foo_ingress_non_01") - for id := range ids { - store.Add(&extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ - Name: id, - Namespace: api.NamespaceDefault, - }}) - } store.Add(&extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo_ingress_non_01", + Namespace: api_v1.NamespaceDefault, + }}) + store.Add(&extensions.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "foo_ingress_1", - Namespace: api.NamespaceDefault, + Namespace: api_v1.NamespaceDefault, }, Status: extensions.IngressStatus{ - LoadBalancer: api.LoadBalancerStatus{ + LoadBalancer: api_v1.LoadBalancerStatus{ Ingress: buildLoadBalancerIngressByIP(), }, }, }) - return cache_store.StoreToIngressLister{Store: store} + return cache_store.IngressLister{Store: store} } func buildStatusSync() statusSync { return statusSync{ pod: &k8s.PodInfo{ Name: "foo_base_pod", - Namespace: api.NamespaceDefault, + Namespace: api_v1.NamespaceDefault, Labels: map[string]string{ "lable_sig": "foo_pod", }, @@ -229,8 +249,8 @@ func buildStatusSync() statusSync { syncQueue: task.NewTaskQueue(fakeSynFn), Config: Config{ Client: buildSimpleClientSet(), - PublishService: api.NamespaceDefault + "/" + "foo", - IngressLister: buildIngressLIstener(), + PublishService: api_v1.NamespaceDefault + "/" + "foo", + IngressLister: buildIngressListener(), }, } } @@ -238,11 +258,13 @@ func buildStatusSync() statusSync { func TestStatusActions(t *testing.T) { // make sure election can be created os.Setenv("POD_NAME", "foo1") - os.Setenv("POD_NAMESPACE", api.NamespaceDefault) + os.Setenv("POD_NAMESPACE", api_v1.NamespaceDefault) c := Config{ - Client: buildSimpleClientSet(), - PublishService: "", - IngressLister: buildIngressLIstener(), + Client: buildSimpleClientSet(), + PublishService: "", + IngressLister: buildIngressListener(), + DefaultIngressClass: "nginx", + IngressClass: "", } // create object fkSync := NewStatusSyncer(c) @@ -261,10 +283,10 @@ func TestStatusActions(t *testing.T) { fk.sync("just-test") // PublishService is empty, so the running address is: ["11.0.0.2"] // after updated, the ingress's ip should only be "11.0.0.2" - newIPs := []api.LoadBalancerIngress{{ + newIPs := []api_v1.LoadBalancerIngress{{ IP: "11.0.0.2", }} - fooIngress1, err1 := fk.Client.Extensions().Ingresses(api.NamespaceDefault).Get("foo_ingress_1") + fooIngress1, err1 := fk.Client.Extensions().Ingresses(api_v1.NamespaceDefault).Get("foo_ingress_1", meta_v1.GetOptions{}) if err1 != nil { t.Fatalf("unexpected error") } @@ -276,8 +298,8 @@ func TestStatusActions(t *testing.T) { // execute shutdown fk.Shutdown() // ingress should be empty - newIPs2 := []api.LoadBalancerIngress{} - fooIngress2, err2 := fk.Client.Extensions().Ingresses(api.NamespaceDefault).Get("foo_ingress_1") + newIPs2 := []api_v1.LoadBalancerIngress{} + fooIngress2, err2 := fk.Client.Extensions().Ingresses(api_v1.NamespaceDefault).Get("foo_ingress_1", meta_v1.GetOptions{}) if err2 != nil { t.Fatalf("unexpected error") } @@ -286,6 +308,14 @@ func TestStatusActions(t *testing.T) { t.Fatalf("returned %v but expected %v", fooIngress2CurIPs, newIPs2) } + oic, err := fk.Client.Extensions().Ingresses(api.NamespaceDefault).Get("foo_ingress_different_class", meta_v1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected error") + } + if oic.Status.LoadBalancer.Ingress[0].IP != "0.0.0.0" && oic.Status.LoadBalancer.Ingress[0].Hostname != "foo.bar.com" { + t.Fatalf("invalid ingress status for rule with different class") + } + // end test ns <- struct{}{} } @@ -346,7 +376,7 @@ func TestUpdateStatus(t *testing.T) { sort.Sort(loadBalancerIngressByIP(newIPs)) fk.updateStatus(newIPs) - fooIngress1, err1 := fk.Client.Extensions().Ingresses(api.NamespaceDefault).Get("foo_ingress_1") + fooIngress1, err1 := fk.Client.Extensions().Ingresses(api_v1.NamespaceDefault).Get("foo_ingress_1", meta_v1.GetOptions{}) if err1 != nil { t.Fatalf("unexpected error") } @@ -355,13 +385,13 @@ func TestUpdateStatus(t *testing.T) { t.Fatalf("returned %v but expected %v", fooIngress1CurIPs, newIPs) } - fooIngress2, err2 := fk.Client.Extensions().Ingresses(api.NamespaceDefault).Get("foo_ingress_2") + fooIngress2, err2 := fk.Client.Extensions().Ingresses(api_v1.NamespaceDefault).Get("foo_ingress_2", meta_v1.GetOptions{}) if err2 != nil { t.Fatalf("unexpected error") } fooIngress2CurIPs := fooIngress2.Status.LoadBalancer.Ingress - if !ingressSliceEqual(fooIngress2CurIPs, []api.LoadBalancerIngress{}) { - t.Fatalf("returned %v but expected %v", fooIngress2CurIPs, []api.LoadBalancerIngress{}) + if !ingressSliceEqual(fooIngress2CurIPs, []api_v1.LoadBalancerIngress{}) { + t.Fatalf("returned %v but expected %v", fooIngress2CurIPs, []api_v1.LoadBalancerIngress{}) } } @@ -375,7 +405,7 @@ func TestSliceToStatus(t *testing.T) { r := sliceToStatus(fkEndpoints) if r == nil { - t.Fatalf("returned nil but expected a valid []api.LoadBalancerIngress") + t.Fatalf("returned nil but expected a valid []api_v1.LoadBalancerIngress") } rl := len(r) if rl != 3 { @@ -383,21 +413,21 @@ func TestSliceToStatus(t *testing.T) { } re1 := r[0] if re1.Hostname != "opensource-k8s-ingress" { - t.Fatalf("returned %v but expected %v", re1, api.LoadBalancerIngress{Hostname: "opensource-k8s-ingress"}) + t.Fatalf("returned %v but expected %v", re1, api_v1.LoadBalancerIngress{Hostname: "opensource-k8s-ingress"}) } re2 := r[1] if re2.IP != "10.0.0.1" { - t.Fatalf("returned %v but expected %v", re2, api.LoadBalancerIngress{IP: "10.0.0.1"}) + t.Fatalf("returned %v but expected %v", re2, api_v1.LoadBalancerIngress{IP: "10.0.0.1"}) } re3 := r[2] if re3.IP != "2001:db8::68" { - t.Fatalf("returned %v but expected %v", re3, api.LoadBalancerIngress{IP: "2001:db8::68"}) + t.Fatalf("returned %v but expected %v", re3, api_v1.LoadBalancerIngress{IP: "2001:db8::68"}) } } func TestIngressSliceEqual(t *testing.T) { fk1 := buildLoadBalancerIngressByIP() - fk2 := append(buildLoadBalancerIngressByIP(), api.LoadBalancerIngress{ + fk2 := append(buildLoadBalancerIngressByIP(), api_v1.LoadBalancerIngress{ IP: "10.0.0.5", Hostname: "foo5", }) @@ -407,8 +437,8 @@ func TestIngressSliceEqual(t *testing.T) { fk4[2].IP = "11.0.0.3" fooTests := []struct { - lhs []api.LoadBalancerIngress - rhs []api.LoadBalancerIngress + lhs []api_v1.LoadBalancerIngress + rhs []api_v1.LoadBalancerIngress er bool }{ {fk1, fk1, true}, @@ -417,7 +447,7 @@ func TestIngressSliceEqual(t *testing.T) { {fk4, fk1, false}, {fk1, nil, false}, {nil, nil, true}, - {[]api.LoadBalancerIngress{}, []api.LoadBalancerIngress{}, true}, + {[]api_v1.LoadBalancerIngress{}, []api_v1.LoadBalancerIngress{}, true}, } for _, fooTest := range fooTests { @@ -433,7 +463,7 @@ func TestLoadBalancerIngressByIPLen(t *testing.T) { ips loadBalancerIngressByIP el int }{ - {[]api.LoadBalancerIngress{}, 0}, + {[]api_v1.LoadBalancerIngress{}, 0}, {buildLoadBalancerIngressByIP(), 4}, {nil, 0}, } diff --git a/core/pkg/ingress/store/main.go b/core/pkg/ingress/store/main.go new file mode 100644 index 0000000000..4fb6f79290 --- /dev/null +++ b/core/pkg/ingress/store/main.go @@ -0,0 +1,71 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + "fmt" + + api "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" +) + +// IngressLister makes a Store that lists Ingress. +type IngressLister struct { + cache.Store +} + +// SecretsLister makes a Store that lists Secrets. +type SecretsLister struct { + cache.Store +} + +// ConfigMapLister makes a Store that lists Configmaps. +type ConfigMapLister struct { + cache.Store +} + +// ServiceLister makes a Store that lists Services. +type ServiceLister struct { + cache.Store +} + +// NodeLister makes a Store that lists Nodes. +type NodeLister struct { + cache.Store +} + +// EndpointLister makes a Store that lists Endpoints. +type EndpointLister struct { + cache.Store +} + +// GetServiceEndpoints returns the endpoints of a service, matched on service name. +func (s *EndpointLister) GetServiceEndpoints(svc *api.Service) (ep api.Endpoints, err error) { + for _, m := range s.Store.List() { + ep = *m.(*api.Endpoints) + if svc.Name == ep.Name && svc.Namespace == ep.Namespace { + return ep, nil + } + } + err = fmt.Errorf("could not find endpoints for service: %v", svc.Name) + return +} + +// SecretLister makes a Store that lists Secres. +type SecretLister struct { + cache.Store +} diff --git a/core/pkg/ingress/types.go b/core/pkg/ingress/types.go index 8121abe4d4..e787d82b59 100644 --- a/core/pkg/ingress/types.go +++ b/core/pkg/ingress/types.go @@ -19,12 +19,10 @@ package ingress import ( "github.com/spf13/pflag" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/cache" - "k8s.io/kubernetes/pkg/healthz" - "k8s.io/kubernetes/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apiserver/pkg/server/healthz" + api "k8s.io/client-go/pkg/api/v1" - cache_store "k8s.io/ingress/core/pkg/cache" "k8s.io/ingress/core/pkg/ingress/annotations/auth" "k8s.io/ingress/core/pkg/ingress/annotations/authreq" "k8s.io/ingress/core/pkg/ingress/annotations/authtls" @@ -33,6 +31,7 @@ import ( "k8s.io/ingress/core/pkg/ingress/annotations/ratelimit" "k8s.io/ingress/core/pkg/ingress/annotations/rewrite" "k8s.io/ingress/core/pkg/ingress/defaults" + "k8s.io/ingress/core/pkg/ingress/store" ) var ( @@ -103,12 +102,12 @@ type Controller interface { // StoreLister returns the configured stores for ingresses, services, // endpoints, secrets and configmaps. type StoreLister struct { - Ingress cache_store.StoreToIngressLister - Service cache.StoreToServiceLister - Node cache.StoreToNodeLister - Endpoint cache.StoreToEndpointsLister - Secret cache_store.StoreToSecretsLister - ConfigMap cache_store.StoreToConfigmapLister + Ingress store.IngressLister + Service store.ServiceLister + Node store.NodeLister + Endpoint store.EndpointLister + Secret store.SecretLister + ConfigMap store.ConfigMapLister } // BackendInfo returns information about the backend. diff --git a/core/pkg/k8s/main.go b/core/pkg/k8s/main.go index af0b5df735..8a317cfdcf 100644 --- a/core/pkg/k8s/main.go +++ b/core/pkg/k8s/main.go @@ -21,8 +21,9 @@ import ( "os" "strings" - "k8s.io/kubernetes/pkg/api" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + api "k8s.io/client-go/pkg/api/v1" ) // IsValidService checks if exists a service with the specified name @@ -31,7 +32,7 @@ func IsValidService(kubeClient clientset.Interface, name string) (*api.Service, if err != nil { return nil, err } - return kubeClient.Core().Services(ns).Get(name) + return kubeClient.Core().Services(ns).Get(name, meta_v1.GetOptions{}) } // isValidConfigMap check if exists a configmap with the specified name @@ -43,7 +44,7 @@ func IsValidConfigMap(kubeClient clientset.Interface, fullName string) (*api.Con return nil, err } - configMap, err := kubeClient.Core().ConfigMaps(ns).Get(name) + configMap, err := kubeClient.Core().ConfigMaps(ns).Get(name, meta_v1.GetOptions{}) if err != nil { return nil, fmt.Errorf("configmap not found: %v", err) @@ -55,7 +56,7 @@ func IsValidConfigMap(kubeClient clientset.Interface, fullName string) (*api.Con // isValidNamespace chck if exists a namespace with the specified name func IsValidNamespace(kubeClient clientset.Interface, name string) (*api.Namespace, error) { - return kubeClient.Core().Namespaces().Get(name) + return kubeClient.Core().Namespaces().Get(name, meta_v1.GetOptions{}) } // IsValidSecret checks if exists a secret with the specified name @@ -64,7 +65,7 @@ func IsValidSecret(kubeClient clientset.Interface, name string) (*api.Secret, er if err != nil { return nil, err } - return kubeClient.Core().Secrets(ns).Get(name) + return kubeClient.Core().Secrets(ns).Get(name, meta_v1.GetOptions{}) } // ParseNameNS parses a string searching a namespace and name @@ -80,7 +81,7 @@ func ParseNameNS(input string) (string, string, error) { // GetNodeIP returns the IP address of a node in the cluster func GetNodeIP(kubeClient clientset.Interface, name string) string { var externalIP string - node, err := kubeClient.Core().Nodes().Get(name) + node, err := kubeClient.Core().Nodes().Get(name, meta_v1.GetOptions{}) if err != nil { return externalIP } @@ -120,7 +121,7 @@ func GetPodDetails(kubeClient clientset.Interface) (*PodInfo, error) { return nil, fmt.Errorf("unable to get POD information (missing POD_NAME or POD_NAMESPACE environment variable") } - pod, _ := kubeClient.Core().Pods(podNs).Get(podName) + pod, _ := kubeClient.Core().Pods(podNs).Get(podName, meta_v1.GetOptions{}) if pod == nil { return nil, fmt.Errorf("unable to get POD information") } diff --git a/core/pkg/k8s/main_test.go b/core/pkg/k8s/main_test.go index 58f640dec1..962db0759d 100644 --- a/core/pkg/k8s/main_test.go +++ b/core/pkg/k8s/main_test.go @@ -17,11 +17,12 @@ limitations under the License. package k8s import ( + "os" "testing" - "k8s.io/kubernetes/pkg/api" - testclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" - "os" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + testclient "k8s.io/client-go/kubernetes/fake" + api "k8s.io/client-go/pkg/api/v1" ) func TestParseNameNS(t *testing.T) { @@ -57,7 +58,7 @@ func TestParseNameNS(t *testing.T) { func TestIsValidService(t *testing.T) { fk := testclient.NewSimpleClientset(&api.Service{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Namespace: api.NamespaceDefault, Name: "demo", }, @@ -88,7 +89,7 @@ func TestIsValidService(t *testing.T) { func TestIsValidNamespace(t *testing.T) { fk := testclient.NewSimpleClientset(&api.Namespace{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "default", }, }) @@ -112,7 +113,7 @@ func TestIsValidNamespace(t *testing.T) { func TestIsValidConfigMap(t *testing.T) { fk := testclient.NewSimpleClientset(&api.ConfigMap{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Namespace: api.NamespaceDefault, Name: "demo", }, @@ -145,7 +146,7 @@ func TestIsValidConfigMap(t *testing.T) { func TestIsValidSecret(t *testing.T) { fk := testclient.NewSimpleClientset(&api.Secret{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Namespace: api.NamespaceDefault, Name: "demo", }, @@ -184,7 +185,7 @@ func TestGetNodeIP(t *testing.T) { // node not exist {testclient.NewSimpleClientset(&api.NodeList{Items: []api.Node{{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "demo", }, Status: api.NodeStatus{ @@ -199,7 +200,7 @@ func TestGetNodeIP(t *testing.T) { // node exist {testclient.NewSimpleClientset(&api.NodeList{Items: []api.Node{{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "demo", }, Status: api.NodeStatus{ @@ -215,7 +216,7 @@ func TestGetNodeIP(t *testing.T) { // search the correct node {testclient.NewSimpleClientset(&api.NodeList{Items: []api.Node{ { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "demo1", }, Status: api.NodeStatus{ @@ -228,7 +229,7 @@ func TestGetNodeIP(t *testing.T) { }, }, { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "demo2", }, Status: api.NodeStatus{ @@ -244,7 +245,7 @@ func TestGetNodeIP(t *testing.T) { // get NodeExternalIP {testclient.NewSimpleClientset(&api.NodeList{Items: []api.Node{{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "demo", }, Status: api.NodeStatus{ @@ -262,7 +263,7 @@ func TestGetNodeIP(t *testing.T) { // get NodeLegacyHostIP {testclient.NewSimpleClientset(&api.NodeList{Items: []api.Node{{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "demo", }, Status: api.NodeStatus{ @@ -307,7 +308,7 @@ func TestGetPodDetails(t *testing.T) { // success to get PodInfo fkClient := testclient.NewSimpleClientset( &api.PodList{Items: []api.Pod{{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "testpod", Namespace: api.NamespaceDefault, Labels: map[string]string{ @@ -317,7 +318,7 @@ func TestGetPodDetails(t *testing.T) { }, }}}, &api.NodeList{Items: []api.Node{{ - ObjectMeta: api.ObjectMeta{ + ObjectMeta: meta_v1.ObjectMeta{ Name: "demo", }, Status: api.NodeStatus{ diff --git a/core/pkg/task/queue.go b/core/pkg/task/queue.go index d29f3c2c93..a6786a6229 100644 --- a/core/pkg/task/queue.go +++ b/core/pkg/task/queue.go @@ -22,9 +22,9 @@ import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/client/cache" - "k8s.io/kubernetes/pkg/util/wait" - "k8s.io/kubernetes/pkg/util/workqueue" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" ) var ( diff --git a/docs/catalog.md b/docs/catalog.md index 2612790c7f..3ac81c2004 100644 --- a/docs/catalog.md +++ b/docs/catalog.md @@ -4,3 +4,4 @@ This is a non-comprehensive list of existing ingress controllers. * [Dummy controller backend](/examples/custom-controller) * [HAProxy Ingress controller](https://github.com/jcmoraisjr/haproxy-ingress) +* [traefik](https://docs.traefik.io/toml/#kubernetes-ingress-backend) diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 877dc152bf..2f74d0ab8c 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -31,9 +31,12 @@ The Ingress controller needs information from apiserver. Therefore, authenticati 1. _Service Account:_ This is recommended, because nothing has to be configured. The Ingress controller will use information provided by the system to communicate with the API server. See 'Service Account' section for details. -2. _Kubeconfig file:_ In some Kubernetes environments service accounts are not available. In this case a manual configuration is required. The Ingress controller binary can be started with the `--kubeconfig` flag. The value of the flag is a path to a file specifying how to connect to the API server. +2. _Kubeconfig file:_ In some Kubernetes environments service accounts are not available. In this case a manual configuration is required. The Ingress controller binary can be started with the `--kubeconfig` flag. The value of the flag is a path to a file specifying how to connect to the API server. Using the `--kubeconfig` does not requires the flag `--apiserver-host`. The format of the file is identical to `~/.kube/config` which is used by kubectl to connect to the API server. See 'kubeconfig' section for details. +3. _Using the flag `--apiserver-host`:_ Using this flag `--apiserver-host=http://localhost:8080` it is possible to specify an unsecure api server or reach a remote kubernetes cluster using [kubectl proxy](https://kubernetes.io/docs/user-guide/kubectl/kubectl_proxy/). +Please do not use this approach in production. + In the diagram below you can see the full authentication flow with all options, starting with the browser on the lower left hand side. ``` @@ -155,6 +158,7 @@ If you want to use a kubeconfig file for authentication, create a deployment fil *Note:* the important part is the flag `--kubeconfig=/etc/kubernetes/kubeconfig.yaml`. + ``` kind: Service apiVersion: v1 diff --git a/examples/custom-controller/server.go b/examples/custom-controller/server.go index 5c6eea4b76..a0554fceb6 100644 --- a/examples/custom-controller/server.go +++ b/examples/custom-controller/server.go @@ -8,11 +8,12 @@ import ( "github.com/spf13/pflag" + api "k8s.io/client-go/pkg/api/v1" + nginxconfig "k8s.io/ingress/controllers/nginx/pkg/config" "k8s.io/ingress/core/pkg/ingress" "k8s.io/ingress/core/pkg/ingress/controller" "k8s.io/ingress/core/pkg/ingress/defaults" - "k8s.io/kubernetes/pkg/api" ) func main() { @@ -89,3 +90,7 @@ func (n DummyController) OverrideFlags(*pflag.FlagSet) { func (n DummyController) SetListers(lister ingress.StoreLister) { } + +func (n DummyController) DefaultIngressClass() string { + return "dummy" +} diff --git a/vendor/cloud.google.com/go/AUTHORS b/vendor/cloud.google.com/go/AUTHORS new file mode 100644 index 0000000000..c364af1da0 --- /dev/null +++ b/vendor/cloud.google.com/go/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of cloud authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. + +Filippo Valsorda +Google Inc. +Ingo Oeser +Palm Stone Games, Inc. +Paweł Knap +Péter Szilágyi +Tyler Treat diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS new file mode 100644 index 0000000000..07509ccb7c --- /dev/null +++ b/vendor/cloud.google.com/go/CONTRIBUTORS @@ -0,0 +1,34 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name + +# Keep the list alphabetically sorted. + +Andreas Litt +Andrew Gerrand +Brad Fitzpatrick +Burcu Dogan +Dave Day +David Sansome +David Symonds +Filippo Valsorda +Glenn Lewis +Ingo Oeser +Johan Euphrosine +Jonathan Amsterdam +Luna Duclos +Michael McGreevy +Omar Jarjur +Paweł Knap +Péter Szilágyi +Sarah Adams +Toby Burress +Tuo Shan +Tyler Treat diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/LICENSE deleted file mode 100644 index 5ba5c86fcb..0000000000 --- a/vendor/github.com/blang/semver/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License - -Copyright (c) 2014 Benedikt Lang - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md deleted file mode 100644 index 5171c5c557..0000000000 --- a/vendor/github.com/blang/semver/README.md +++ /dev/null @@ -1,142 +0,0 @@ -semver for golang [![Build Status](https://drone.io/github.com/blang/semver/status.png)](https://drone.io/github.com/blang/semver/latest) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) -====== - -semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. - -Usage ------ -```bash -$ go get github.com/blang/semver -``` -Note: Always vendor your dependencies or fix on a specific version tag. - -```go -import github.com/blang/semver -v1, err := semver.Make("1.0.0-beta") -v2, err := semver.Make("2.0.0-beta") -v1.Compare(v2) -``` - -Also check the [GoDocs](http://godoc.org/github.com/blang/semver). - -Why should I use this lib? ------ - -- Fully spec compatible -- No reflection -- No regex -- Fully tested (Coverage >99%) -- Readable parsing/validation errors -- Fast (See [Benchmarks](#benchmarks)) -- Only Stdlib -- Uses values instead of pointers -- Many features, see below - - -Features ------ - -- Parsing and validation at all levels -- Comparator-like comparisons -- Compare Helper Methods -- InPlace manipulation -- Sortable (implements sort.Interface) -- database/sql compatible (sql.Scanner/Valuer) -- encoding/json compatible (json.Marshaler/Unmarshaler) - - -Example ------ - -Have a look at full examples in [examples/main.go](examples/main.go) - -```go -import github.com/blang/semver - -v, err := semver.Make("0.0.1-alpha.preview+123.github") -fmt.Printf("Major: %d\n", v.Major) -fmt.Printf("Minor: %d\n", v.Minor) -fmt.Printf("Patch: %d\n", v.Patch) -fmt.Printf("Pre: %s\n", v.Pre) -fmt.Printf("Build: %s\n", v.Build) - -// Prerelease versions array -if len(v.Pre) > 0 { - fmt.Println("Prerelease versions:") - for i, pre := range v.Pre { - fmt.Printf("%d: %q\n", i, pre) - } -} - -// Build meta data array -if len(v.Build) > 0 { - fmt.Println("Build meta data:") - for i, build := range v.Build { - fmt.Printf("%d: %q\n", i, build) - } -} - -v001, err := semver.Make("0.0.1") -// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE -v001.GT(v) == true -v.LT(v001) == true -v.GTE(v) == true -v.LTE(v) == true - -// Or use v.Compare(v2) for comparisons (-1, 0, 1): -v001.Compare(v) == 1 -v.Compare(v001) == -1 -v.Compare(v) == 0 - -// Manipulate Version in place: -v.Pre[0], err = semver.NewPRVersion("beta") -if err != nil { - fmt.Printf("Error parsing pre release version: %q", err) -} - -fmt.Println("\nValidate versions:") -v.Build[0] = "?" - -err = v.Validate() -if err != nil { - fmt.Printf("Validation failed: %s\n", err) -} -``` - -Benchmarks ------ - - BenchmarkParseSimple 5000000 328 ns/op 49 B/op 1 allocs/op - BenchmarkParseComplex 1000000 2105 ns/op 263 B/op 7 allocs/op - BenchmarkParseAverage 1000000 1301 ns/op 168 B/op 4 allocs/op - BenchmarkStringSimple 10000000 130 ns/op 5 B/op 1 allocs/op - BenchmarkStringLarger 5000000 280 ns/op 32 B/op 2 allocs/op - BenchmarkStringComplex 3000000 512 ns/op 80 B/op 3 allocs/op - BenchmarkStringAverage 5000000 387 ns/op 47 B/op 2 allocs/op - BenchmarkValidateSimple 500000000 7.92 ns/op 0 B/op 0 allocs/op - BenchmarkValidateComplex 2000000 923 ns/op 0 B/op 0 allocs/op - BenchmarkValidateAverage 5000000 452 ns/op 0 B/op 0 allocs/op - BenchmarkCompareSimple 100000000 11.2 ns/op 0 B/op 0 allocs/op - BenchmarkCompareComplex 50000000 40.9 ns/op 0 B/op 0 allocs/op - BenchmarkCompareAverage 50000000 43.8 ns/op 0 B/op 0 allocs/op - BenchmarkSort 5000000 436 ns/op 259 B/op 2 allocs/op - -See benchmark cases at [semver_test.go](semver_test.go) - - -Motivation ------ - -I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like. - - -Contribution ------ - -Feel free to make a pull request. For bigger changes create a issue first to discuss about it. - - -License ------ - -See [LICENSE](LICENSE) file. diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go deleted file mode 100644 index a74bf7c449..0000000000 --- a/vendor/github.com/blang/semver/json.go +++ /dev/null @@ -1,23 +0,0 @@ -package semver - -import ( - "encoding/json" -) - -// MarshalJSON implements the encoding/json.Marshaler interface. -func (v Version) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements the encoding/json.Unmarshaler interface. -func (v *Version) UnmarshalJSON(data []byte) (err error) { - var versionString string - - if err = json.Unmarshal(data, &versionString); err != nil { - return - } - - *v, err = Parse(versionString) - - return -} diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go deleted file mode 100644 index bbf85ce972..0000000000 --- a/vendor/github.com/blang/semver/semver.go +++ /dev/null @@ -1,395 +0,0 @@ -package semver - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -const ( - numbers string = "0123456789" - alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" - alphanum = alphas + numbers -) - -// SpecVersion is the latest fully supported spec version of semver -var SpecVersion = Version{ - Major: 2, - Minor: 0, - Patch: 0, -} - -// Version represents a semver compatible version -type Version struct { - Major uint64 - Minor uint64 - Patch uint64 - Pre []PRVersion - Build []string //No Precendence -} - -// Version to string -func (v Version) String() string { - b := make([]byte, 0, 5) - b = strconv.AppendUint(b, v.Major, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Minor, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Patch, 10) - - if len(v.Pre) > 0 { - b = append(b, '-') - b = append(b, v.Pre[0].String()...) - - for _, pre := range v.Pre[1:] { - b = append(b, '.') - b = append(b, pre.String()...) - } - } - - if len(v.Build) > 0 { - b = append(b, '+') - b = append(b, v.Build[0]...) - - for _, build := range v.Build[1:] { - b = append(b, '.') - b = append(b, build...) - } - } - - return string(b) -} - -// Equals checks if v is equal to o. -func (v Version) Equals(o Version) bool { - return (v.Compare(o) == 0) -} - -// EQ checks if v is equal to o. -func (v Version) EQ(o Version) bool { - return (v.Compare(o) == 0) -} - -// NE checks if v is not equal to o. -func (v Version) NE(o Version) bool { - return (v.Compare(o) != 0) -} - -// GT checks if v is greater than o. -func (v Version) GT(o Version) bool { - return (v.Compare(o) == 1) -} - -// GTE checks if v is greater than or equal to o. -func (v Version) GTE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// GE checks if v is greater than or equal to o. -func (v Version) GE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// LT checks if v is less than o. -func (v Version) LT(o Version) bool { - return (v.Compare(o) == -1) -} - -// LTE checks if v is less than or equal to o. -func (v Version) LTE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// LE checks if v is less than or equal to o. -func (v Version) LE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// Compare compares Versions v to o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v Version) Compare(o Version) int { - if v.Major != o.Major { - if v.Major > o.Major { - return 1 - } - return -1 - } - if v.Minor != o.Minor { - if v.Minor > o.Minor { - return 1 - } - return -1 - } - if v.Patch != o.Patch { - if v.Patch > o.Patch { - return 1 - } - return -1 - } - - // Quick comparison if a version has no prerelease versions - if len(v.Pre) == 0 && len(o.Pre) == 0 { - return 0 - } else if len(v.Pre) == 0 && len(o.Pre) > 0 { - return 1 - } else if len(v.Pre) > 0 && len(o.Pre) == 0 { - return -1 - } - - i := 0 - for ; i < len(v.Pre) && i < len(o.Pre); i++ { - if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { - continue - } else if comp == 1 { - return 1 - } else { - return -1 - } - } - - // If all pr versions are the equal but one has further prversion, this one greater - if i == len(v.Pre) && i == len(o.Pre) { - return 0 - } else if i == len(v.Pre) && i < len(o.Pre) { - return -1 - } else { - return 1 - } - -} - -// Validate validates v and returns error in case -func (v Version) Validate() error { - // Major, Minor, Patch already validated using uint64 - - for _, pre := range v.Pre { - if !pre.IsNum { //Numeric prerelease versions already uint64 - if len(pre.VersionStr) == 0 { - return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) - } - if !containsOnly(pre.VersionStr, alphanum) { - return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) - } - } - } - - for _, build := range v.Build { - if len(build) == 0 { - return fmt.Errorf("Build meta data can not be empty %q", build) - } - if !containsOnly(build, alphanum) { - return fmt.Errorf("Invalid character(s) found in build meta data %q", build) - } - } - - return nil -} - -// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error -func New(s string) (vp *Version, err error) { - v, err := Parse(s) - vp = &v - return -} - -// Make is an alias for Parse, parses version string and returns a validated Version or error -func Make(s string) (Version, error) { - return Parse(s) -} - -// Parse parses version string and returns a validated Version or error -func Parse(s string) (Version, error) { - if len(s) == 0 { - return Version{}, errors.New("Version string empty") - } - - // Split into major.minor.(patch+pr+meta) - parts := strings.SplitN(s, ".", 3) - if len(parts) != 3 { - return Version{}, errors.New("No Major.Minor.Patch elements found") - } - - // Major - if !containsOnly(parts[0], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) - } - if hasLeadingZeroes(parts[0]) { - return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) - } - major, err := strconv.ParseUint(parts[0], 10, 64) - if err != nil { - return Version{}, err - } - - // Minor - if !containsOnly(parts[1], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) - } - if hasLeadingZeroes(parts[1]) { - return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) - } - minor, err := strconv.ParseUint(parts[1], 10, 64) - if err != nil { - return Version{}, err - } - - v := Version{} - v.Major = major - v.Minor = minor - - var build, prerelease []string - patchStr := parts[2] - - if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { - build = strings.Split(patchStr[buildIndex+1:], ".") - patchStr = patchStr[:buildIndex] - } - - if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { - prerelease = strings.Split(patchStr[preIndex+1:], ".") - patchStr = patchStr[:preIndex] - } - - if !containsOnly(patchStr, numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) - } - if hasLeadingZeroes(patchStr) { - return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) - } - patch, err := strconv.ParseUint(patchStr, 10, 64) - if err != nil { - return Version{}, err - } - - v.Patch = patch - - // Prerelease - for _, prstr := range prerelease { - parsedPR, err := NewPRVersion(prstr) - if err != nil { - return Version{}, err - } - v.Pre = append(v.Pre, parsedPR) - } - - // Build meta data - for _, str := range build { - if len(str) == 0 { - return Version{}, errors.New("Build meta data is empty") - } - if !containsOnly(str, alphanum) { - return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) - } - v.Build = append(v.Build, str) - } - - return v, nil -} - -// MustParse is like Parse but panics if the version cannot be parsed. -func MustParse(s string) Version { - v, err := Parse(s) - if err != nil { - panic(`semver: Parse(` + s + `): ` + err.Error()) - } - return v -} - -// PRVersion represents a PreRelease Version -type PRVersion struct { - VersionStr string - VersionNum uint64 - IsNum bool -} - -// NewPRVersion creates a new valid prerelease version -func NewPRVersion(s string) (PRVersion, error) { - if len(s) == 0 { - return PRVersion{}, errors.New("Prerelease is empty") - } - v := PRVersion{} - if containsOnly(s, numbers) { - if hasLeadingZeroes(s) { - return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) - } - num, err := strconv.ParseUint(s, 10, 64) - - // Might never be hit, but just in case - if err != nil { - return PRVersion{}, err - } - v.VersionNum = num - v.IsNum = true - } else if containsOnly(s, alphanum) { - v.VersionStr = s - v.IsNum = false - } else { - return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) - } - return v, nil -} - -// IsNumeric checks if prerelease-version is numeric -func (v PRVersion) IsNumeric() bool { - return v.IsNum -} - -// Compare compares two PreRelease Versions v and o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v PRVersion) Compare(o PRVersion) int { - if v.IsNum && !o.IsNum { - return -1 - } else if !v.IsNum && o.IsNum { - return 1 - } else if v.IsNum && o.IsNum { - if v.VersionNum == o.VersionNum { - return 0 - } else if v.VersionNum > o.VersionNum { - return 1 - } else { - return -1 - } - } else { // both are Alphas - if v.VersionStr == o.VersionStr { - return 0 - } else if v.VersionStr > o.VersionStr { - return 1 - } else { - return -1 - } - } -} - -// PreRelease version to string -func (v PRVersion) String() string { - if v.IsNum { - return strconv.FormatUint(v.VersionNum, 10) - } - return v.VersionStr -} - -func containsOnly(s string, set string) bool { - return strings.IndexFunc(s, func(r rune) bool { - return !strings.ContainsRune(set, r) - }) == -1 -} - -func hasLeadingZeroes(s string) bool { - return len(s) > 1 && s[0] == '0' -} - -// NewBuildVersion creates a new valid build version -func NewBuildVersion(s string) (string, error) { - if len(s) == 0 { - return "", errors.New("Buildversion is empty") - } - if !containsOnly(s, alphanum) { - return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) - } - return s, nil -} diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go deleted file mode 100644 index e18f880826..0000000000 --- a/vendor/github.com/blang/semver/sort.go +++ /dev/null @@ -1,28 +0,0 @@ -package semver - -import ( - "sort" -) - -// Versions represents multiple versions. -type Versions []Version - -// Len returns length of version collection -func (s Versions) Len() int { - return len(s) -} - -// Swap swaps two versions inside the collection by its indices -func (s Versions) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Less checks if version at index i is less than version at index j -func (s Versions) Less(i, j int) bool { - return s[i].LT(s[j]) -} - -// Sort sorts a slice of versions -func Sort(versions []Version) { - sort.Sort(Versions(versions)) -} diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go deleted file mode 100644 index eb4d802666..0000000000 --- a/vendor/github.com/blang/semver/sql.go +++ /dev/null @@ -1,30 +0,0 @@ -package semver - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements the database/sql.Scanner interface. -func (v *Version) Scan(src interface{}) (err error) { - var str string - switch src := src.(type) { - case string: - str = src - case []byte: - str = string(src) - default: - return fmt.Errorf("Version.Scan: cannot convert %T to string.", src) - } - - if t, err := Parse(str); err == nil { - *v = t - } - - return -} - -// Value implements the database/sql/driver.Valuer interface. -func (v Version) Value() (driver.Value, error) { - return v.String(), nil -} diff --git a/vendor/github.com/coreos/go-oidc/LICENSE b/vendor/github.com/coreos/go-oidc/LICENSE deleted file mode 100644 index e06d208186..0000000000 --- a/vendor/github.com/coreos/go-oidc/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/coreos/go-oidc/NOTICE b/vendor/github.com/coreos/go-oidc/NOTICE deleted file mode 100644 index b39ddfa5cb..0000000000 --- a/vendor/github.com/coreos/go-oidc/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2014 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-oidc/http/client.go b/vendor/github.com/coreos/go-oidc/http/client.go deleted file mode 100644 index fd079b4950..0000000000 --- a/vendor/github.com/coreos/go-oidc/http/client.go +++ /dev/null @@ -1,7 +0,0 @@ -package http - -import "net/http" - -type Client interface { - Do(*http.Request) (*http.Response, error) -} diff --git a/vendor/github.com/coreos/go-oidc/http/http.go b/vendor/github.com/coreos/go-oidc/http/http.go deleted file mode 100644 index c3f5121513..0000000000 --- a/vendor/github.com/coreos/go-oidc/http/http.go +++ /dev/null @@ -1,156 +0,0 @@ -package http - -import ( - "encoding/base64" - "encoding/json" - "errors" - "log" - "net/http" - "net/url" - "path" - "strconv" - "strings" - "time" -) - -func WriteError(w http.ResponseWriter, code int, msg string) { - e := struct { - Error string `json:"error"` - }{ - Error: msg, - } - b, err := json.Marshal(e) - if err != nil { - log.Printf("go-oidc: failed to marshal %#v: %v", e, err) - code = http.StatusInternalServerError - b = []byte(`{"error":"server_error"}`) - } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(code) - w.Write(b) -} - -// BasicAuth parses a username and password from the request's -// Authorization header. This was pulled from golang master: -// https://codereview.appspot.com/76540043 -func BasicAuth(r *http.Request) (username, password string, ok bool) { - auth := r.Header.Get("Authorization") - if auth == "" { - return - } - - if !strings.HasPrefix(auth, "Basic ") { - return - } - c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) - if err != nil { - return - } - cs := string(c) - s := strings.IndexByte(cs, ':') - if s < 0 { - return - } - return cs[:s], cs[s+1:], true -} - -func cacheControlMaxAge(hdr string) (time.Duration, bool, error) { - for _, field := range strings.Split(hdr, ",") { - parts := strings.SplitN(strings.TrimSpace(field), "=", 2) - k := strings.ToLower(strings.TrimSpace(parts[0])) - if k != "max-age" { - continue - } - - if len(parts) == 1 { - return 0, false, errors.New("max-age has no value") - } - - v := strings.TrimSpace(parts[1]) - if v == "" { - return 0, false, errors.New("max-age has empty value") - } - - age, err := strconv.Atoi(v) - if err != nil { - return 0, false, err - } - - if age <= 0 { - return 0, false, nil - } - - return time.Duration(age) * time.Second, true, nil - } - - return 0, false, nil -} - -func expires(date, expires string) (time.Duration, bool, error) { - if date == "" || expires == "" { - return 0, false, nil - } - - te, err := time.Parse(time.RFC1123, expires) - if err != nil { - return 0, false, err - } - - td, err := time.Parse(time.RFC1123, date) - if err != nil { - return 0, false, err - } - - ttl := te.Sub(td) - - // headers indicate data already expired, caller should not - // have to care about this case - if ttl <= 0 { - return 0, false, nil - } - - return ttl, true, nil -} - -func Cacheable(hdr http.Header) (time.Duration, bool, error) { - ttl, ok, err := cacheControlMaxAge(hdr.Get("Cache-Control")) - if err != nil || ok { - return ttl, ok, err - } - - return expires(hdr.Get("Date"), hdr.Get("Expires")) -} - -// MergeQuery appends additional query values to an existing URL. -func MergeQuery(u url.URL, q url.Values) url.URL { - uv := u.Query() - for k, vs := range q { - for _, v := range vs { - uv.Add(k, v) - } - } - u.RawQuery = uv.Encode() - return u -} - -// NewResourceLocation appends a resource id to the end of the requested URL path. -func NewResourceLocation(reqURL *url.URL, id string) string { - var u url.URL - u = *reqURL - u.Path = path.Join(u.Path, id) - u.RawQuery = "" - u.Fragment = "" - return u.String() -} - -// CopyRequest returns a clone of the provided *http.Request. -// The returned object is a shallow copy of the struct and a -// deep copy of its Header field. -func CopyRequest(r *http.Request) *http.Request { - r2 := *r - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return &r2 -} diff --git a/vendor/github.com/coreos/go-oidc/http/url.go b/vendor/github.com/coreos/go-oidc/http/url.go deleted file mode 100644 index df60eb1a6b..0000000000 --- a/vendor/github.com/coreos/go-oidc/http/url.go +++ /dev/null @@ -1,29 +0,0 @@ -package http - -import ( - "errors" - "net/url" -) - -// ParseNonEmptyURL checks that a string is a parsable URL which is also not empty -// since `url.Parse("")` does not return an error. Must contian a scheme and a host. -func ParseNonEmptyURL(u string) (*url.URL, error) { - if u == "" { - return nil, errors.New("url is empty") - } - - ur, err := url.Parse(u) - if err != nil { - return nil, err - } - - if ur.Scheme == "" { - return nil, errors.New("url scheme is empty") - } - - if ur.Host == "" { - return nil, errors.New("url host is empty") - } - - return ur, nil -} diff --git a/vendor/github.com/coreos/go-oidc/jose/claims.go b/vendor/github.com/coreos/go-oidc/jose/claims.go deleted file mode 100644 index 8b48bfd230..0000000000 --- a/vendor/github.com/coreos/go-oidc/jose/claims.go +++ /dev/null @@ -1,126 +0,0 @@ -package jose - -import ( - "encoding/json" - "fmt" - "math" - "time" -) - -type Claims map[string]interface{} - -func (c Claims) Add(name string, value interface{}) { - c[name] = value -} - -func (c Claims) StringClaim(name string) (string, bool, error) { - cl, ok := c[name] - if !ok { - return "", false, nil - } - - v, ok := cl.(string) - if !ok { - return "", false, fmt.Errorf("unable to parse claim as string: %v", name) - } - - return v, true, nil -} - -func (c Claims) StringsClaim(name string) ([]string, bool, error) { - cl, ok := c[name] - if !ok { - return nil, false, nil - } - - if v, ok := cl.([]string); ok { - return v, true, nil - } - - // When unmarshaled, []string will become []interface{}. - if v, ok := cl.([]interface{}); ok { - var ret []string - for _, vv := range v { - str, ok := vv.(string) - if !ok { - return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name) - } - ret = append(ret, str) - } - return ret, true, nil - } - - return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name) -} - -func (c Claims) Int64Claim(name string) (int64, bool, error) { - cl, ok := c[name] - if !ok { - return 0, false, nil - } - - v, ok := cl.(int64) - if !ok { - vf, ok := cl.(float64) - if !ok { - return 0, false, fmt.Errorf("unable to parse claim as int64: %v", name) - } - v = int64(vf) - } - - return v, true, nil -} - -func (c Claims) Float64Claim(name string) (float64, bool, error) { - cl, ok := c[name] - if !ok { - return 0, false, nil - } - - v, ok := cl.(float64) - if !ok { - vi, ok := cl.(int64) - if !ok { - return 0, false, fmt.Errorf("unable to parse claim as float64: %v", name) - } - v = float64(vi) - } - - return v, true, nil -} - -func (c Claims) TimeClaim(name string) (time.Time, bool, error) { - v, ok, err := c.Float64Claim(name) - if !ok || err != nil { - return time.Time{}, ok, err - } - - s := math.Trunc(v) - ns := (v - s) * math.Pow(10, 9) - return time.Unix(int64(s), int64(ns)).UTC(), true, nil -} - -func decodeClaims(payload []byte) (Claims, error) { - var c Claims - if err := json.Unmarshal(payload, &c); err != nil { - return nil, fmt.Errorf("malformed JWT claims, unable to decode: %v", err) - } - return c, nil -} - -func marshalClaims(c Claims) ([]byte, error) { - b, err := json.Marshal(c) - if err != nil { - return nil, err - } - return b, nil -} - -func encodeClaims(c Claims) (string, error) { - b, err := marshalClaims(c) - if err != nil { - return "", err - } - - return encodeSegment(b), nil -} diff --git a/vendor/github.com/coreos/go-oidc/jose/jose.go b/vendor/github.com/coreos/go-oidc/jose/jose.go deleted file mode 100644 index 6209926596..0000000000 --- a/vendor/github.com/coreos/go-oidc/jose/jose.go +++ /dev/null @@ -1,112 +0,0 @@ -package jose - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "strings" -) - -const ( - HeaderMediaType = "typ" - HeaderKeyAlgorithm = "alg" - HeaderKeyID = "kid" -) - -const ( - // Encryption Algorithm Header Parameter Values for JWS - // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-6 - AlgHS256 = "HS256" - AlgHS384 = "HS384" - AlgHS512 = "HS512" - AlgRS256 = "RS256" - AlgRS384 = "RS384" - AlgRS512 = "RS512" - AlgES256 = "ES256" - AlgES384 = "ES384" - AlgES512 = "ES512" - AlgPS256 = "PS256" - AlgPS384 = "PS384" - AlgPS512 = "PS512" - AlgNone = "none" -) - -const ( - // Algorithm Header Parameter Values for JWE - // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#section-4.1 - AlgRSA15 = "RSA1_5" - AlgRSAOAEP = "RSA-OAEP" - AlgRSAOAEP256 = "RSA-OAEP-256" - AlgA128KW = "A128KW" - AlgA192KW = "A192KW" - AlgA256KW = "A256KW" - AlgDir = "dir" - AlgECDHES = "ECDH-ES" - AlgECDHESA128KW = "ECDH-ES+A128KW" - AlgECDHESA192KW = "ECDH-ES+A192KW" - AlgECDHESA256KW = "ECDH-ES+A256KW" - AlgA128GCMKW = "A128GCMKW" - AlgA192GCMKW = "A192GCMKW" - AlgA256GCMKW = "A256GCMKW" - AlgPBES2HS256A128KW = "PBES2-HS256+A128KW" - AlgPBES2HS384A192KW = "PBES2-HS384+A192KW" - AlgPBES2HS512A256KW = "PBES2-HS512+A256KW" -) - -const ( - // Encryption Algorithm Header Parameter Values for JWE - // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-22 - EncA128CBCHS256 = "A128CBC-HS256" - EncA128CBCHS384 = "A128CBC-HS384" - EncA256CBCHS512 = "A256CBC-HS512" - EncA128GCM = "A128GCM" - EncA192GCM = "A192GCM" - EncA256GCM = "A256GCM" -) - -type JOSEHeader map[string]string - -func (j JOSEHeader) Validate() error { - if _, exists := j[HeaderKeyAlgorithm]; !exists { - return fmt.Errorf("header missing %q parameter", HeaderKeyAlgorithm) - } - - return nil -} - -func decodeHeader(seg string) (JOSEHeader, error) { - b, err := decodeSegment(seg) - if err != nil { - return nil, err - } - - var h JOSEHeader - err = json.Unmarshal(b, &h) - if err != nil { - return nil, err - } - - return h, nil -} - -func encodeHeader(h JOSEHeader) (string, error) { - b, err := json.Marshal(h) - if err != nil { - return "", err - } - - return encodeSegment(b), nil -} - -// Decode JWT specific base64url encoding with padding stripped -func decodeSegment(seg string) ([]byte, error) { - if l := len(seg) % 4; l != 0 { - seg += strings.Repeat("=", 4-l) - } - return base64.URLEncoding.DecodeString(seg) -} - -// Encode JWT specific base64url encoding with padding stripped -func encodeSegment(seg []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") -} diff --git a/vendor/github.com/coreos/go-oidc/jose/jwk.go b/vendor/github.com/coreos/go-oidc/jose/jwk.go deleted file mode 100644 index b7a8e23558..0000000000 --- a/vendor/github.com/coreos/go-oidc/jose/jwk.go +++ /dev/null @@ -1,135 +0,0 @@ -package jose - -import ( - "bytes" - "encoding/base64" - "encoding/binary" - "encoding/json" - "math/big" - "strings" -) - -// JSON Web Key -// https://tools.ietf.org/html/draft-ietf-jose-json-web-key-36#page-5 -type JWK struct { - ID string - Type string - Alg string - Use string - Exponent int - Modulus *big.Int - Secret []byte -} - -type jwkJSON struct { - ID string `json:"kid"` - Type string `json:"kty"` - Alg string `json:"alg"` - Use string `json:"use"` - Exponent string `json:"e"` - Modulus string `json:"n"` -} - -func (j *JWK) MarshalJSON() ([]byte, error) { - t := jwkJSON{ - ID: j.ID, - Type: j.Type, - Alg: j.Alg, - Use: j.Use, - Exponent: encodeExponent(j.Exponent), - Modulus: encodeModulus(j.Modulus), - } - - return json.Marshal(&t) -} - -func (j *JWK) UnmarshalJSON(data []byte) error { - var t jwkJSON - err := json.Unmarshal(data, &t) - if err != nil { - return err - } - - e, err := decodeExponent(t.Exponent) - if err != nil { - return err - } - - n, err := decodeModulus(t.Modulus) - if err != nil { - return err - } - - j.ID = t.ID - j.Type = t.Type - j.Alg = t.Alg - j.Use = t.Use - j.Exponent = e - j.Modulus = n - - return nil -} - -type JWKSet struct { - Keys []JWK `json:"keys"` -} - -func decodeExponent(e string) (int, error) { - decE, err := decodeBase64URLPaddingOptional(e) - if err != nil { - return 0, err - } - var eBytes []byte - if len(decE) < 8 { - eBytes = make([]byte, 8-len(decE), 8) - eBytes = append(eBytes, decE...) - } else { - eBytes = decE - } - eReader := bytes.NewReader(eBytes) - var E uint64 - err = binary.Read(eReader, binary.BigEndian, &E) - if err != nil { - return 0, err - } - return int(E), nil -} - -func encodeExponent(e int) string { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, uint64(e)) - var idx int - for ; idx < 8; idx++ { - if b[idx] != 0x0 { - break - } - } - return base64.URLEncoding.EncodeToString(b[idx:]) -} - -// Turns a URL encoded modulus of a key into a big int. -func decodeModulus(n string) (*big.Int, error) { - decN, err := decodeBase64URLPaddingOptional(n) - if err != nil { - return nil, err - } - N := big.NewInt(0) - N.SetBytes(decN) - return N, nil -} - -func encodeModulus(n *big.Int) string { - return base64.URLEncoding.EncodeToString(n.Bytes()) -} - -// decodeBase64URLPaddingOptional decodes Base64 whether there is padding or not. -// The stdlib version currently doesn't handle this. -// We can get rid of this is if this bug: -// https://github.com/golang/go/issues/4237 -// ever closes. -func decodeBase64URLPaddingOptional(e string) ([]byte, error) { - if m := len(e) % 4; m != 0 { - e += strings.Repeat("=", 4-m) - } - return base64.URLEncoding.DecodeString(e) -} diff --git a/vendor/github.com/coreos/go-oidc/jose/jws.go b/vendor/github.com/coreos/go-oidc/jose/jws.go deleted file mode 100644 index 1049ece831..0000000000 --- a/vendor/github.com/coreos/go-oidc/jose/jws.go +++ /dev/null @@ -1,51 +0,0 @@ -package jose - -import ( - "fmt" - "strings" -) - -type JWS struct { - RawHeader string - Header JOSEHeader - RawPayload string - Payload []byte - Signature []byte -} - -// Given a raw encoded JWS token parses it and verifies the structure. -func ParseJWS(raw string) (JWS, error) { - parts := strings.Split(raw, ".") - if len(parts) != 3 { - return JWS{}, fmt.Errorf("malformed JWS, only %d segments", len(parts)) - } - - rawSig := parts[2] - jws := JWS{ - RawHeader: parts[0], - RawPayload: parts[1], - } - - header, err := decodeHeader(jws.RawHeader) - if err != nil { - return JWS{}, fmt.Errorf("malformed JWS, unable to decode header, %s", err) - } - if err = header.Validate(); err != nil { - return JWS{}, fmt.Errorf("malformed JWS, %s", err) - } - jws.Header = header - - payload, err := decodeSegment(jws.RawPayload) - if err != nil { - return JWS{}, fmt.Errorf("malformed JWS, unable to decode payload: %s", err) - } - jws.Payload = payload - - sig, err := decodeSegment(rawSig) - if err != nil { - return JWS{}, fmt.Errorf("malformed JWS, unable to decode signature: %s", err) - } - jws.Signature = sig - - return jws, nil -} diff --git a/vendor/github.com/coreos/go-oidc/jose/jwt.go b/vendor/github.com/coreos/go-oidc/jose/jwt.go deleted file mode 100644 index 3b3e9634b0..0000000000 --- a/vendor/github.com/coreos/go-oidc/jose/jwt.go +++ /dev/null @@ -1,82 +0,0 @@ -package jose - -import "strings" - -type JWT JWS - -func ParseJWT(token string) (jwt JWT, err error) { - jws, err := ParseJWS(token) - if err != nil { - return - } - - return JWT(jws), nil -} - -func NewJWT(header JOSEHeader, claims Claims) (jwt JWT, err error) { - jwt = JWT{} - - jwt.Header = header - jwt.Header[HeaderMediaType] = "JWT" - - claimBytes, err := marshalClaims(claims) - if err != nil { - return - } - jwt.Payload = claimBytes - - eh, err := encodeHeader(header) - if err != nil { - return - } - jwt.RawHeader = eh - - ec, err := encodeClaims(claims) - if err != nil { - return - } - jwt.RawPayload = ec - - return -} - -func (j *JWT) KeyID() (string, bool) { - kID, ok := j.Header[HeaderKeyID] - return kID, ok -} - -func (j *JWT) Claims() (Claims, error) { - return decodeClaims(j.Payload) -} - -// Encoded data part of the token which may be signed. -func (j *JWT) Data() string { - return strings.Join([]string{j.RawHeader, j.RawPayload}, ".") -} - -// Full encoded JWT token string in format: header.claims.signature -func (j *JWT) Encode() string { - d := j.Data() - s := encodeSegment(j.Signature) - return strings.Join([]string{d, s}, ".") -} - -func NewSignedJWT(claims Claims, s Signer) (*JWT, error) { - header := JOSEHeader{ - HeaderKeyAlgorithm: s.Alg(), - HeaderKeyID: s.ID(), - } - - jwt, err := NewJWT(header, claims) - if err != nil { - return nil, err - } - - sig, err := s.Sign([]byte(jwt.Data())) - if err != nil { - return nil, err - } - jwt.Signature = sig - - return &jwt, nil -} diff --git a/vendor/github.com/coreos/go-oidc/jose/sig.go b/vendor/github.com/coreos/go-oidc/jose/sig.go deleted file mode 100755 index 7b2b253cca..0000000000 --- a/vendor/github.com/coreos/go-oidc/jose/sig.go +++ /dev/null @@ -1,24 +0,0 @@ -package jose - -import ( - "fmt" -) - -type Verifier interface { - ID() string - Alg() string - Verify(sig []byte, data []byte) error -} - -type Signer interface { - Verifier - Sign(data []byte) (sig []byte, err error) -} - -func NewVerifier(jwk JWK) (Verifier, error) { - if jwk.Type != "RSA" { - return nil, fmt.Errorf("unsupported key type %q", jwk.Type) - } - - return NewVerifierRSA(jwk) -} diff --git a/vendor/github.com/coreos/go-oidc/jose/sig_hmac.go b/vendor/github.com/coreos/go-oidc/jose/sig_hmac.go deleted file mode 100755 index b3ca3ef3d4..0000000000 --- a/vendor/github.com/coreos/go-oidc/jose/sig_hmac.go +++ /dev/null @@ -1,67 +0,0 @@ -package jose - -import ( - "bytes" - "crypto" - "crypto/hmac" - _ "crypto/sha256" - "errors" - "fmt" -) - -type VerifierHMAC struct { - KeyID string - Hash crypto.Hash - Secret []byte -} - -type SignerHMAC struct { - VerifierHMAC -} - -func NewVerifierHMAC(jwk JWK) (*VerifierHMAC, error) { - if jwk.Alg != "" && jwk.Alg != "HS256" { - return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg) - } - - v := VerifierHMAC{ - KeyID: jwk.ID, - Secret: jwk.Secret, - Hash: crypto.SHA256, - } - - return &v, nil -} - -func (v *VerifierHMAC) ID() string { - return v.KeyID -} - -func (v *VerifierHMAC) Alg() string { - return "HS256" -} - -func (v *VerifierHMAC) Verify(sig []byte, data []byte) error { - h := hmac.New(v.Hash.New, v.Secret) - h.Write(data) - if !bytes.Equal(sig, h.Sum(nil)) { - return errors.New("invalid hmac signature") - } - return nil -} - -func NewSignerHMAC(kid string, secret []byte) *SignerHMAC { - return &SignerHMAC{ - VerifierHMAC: VerifierHMAC{ - KeyID: kid, - Secret: secret, - Hash: crypto.SHA256, - }, - } -} - -func (s *SignerHMAC) Sign(data []byte) ([]byte, error) { - h := hmac.New(s.Hash.New, s.Secret) - h.Write(data) - return h.Sum(nil), nil -} diff --git a/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go b/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go deleted file mode 100755 index 004e45dd83..0000000000 --- a/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go +++ /dev/null @@ -1,67 +0,0 @@ -package jose - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "fmt" -) - -type VerifierRSA struct { - KeyID string - Hash crypto.Hash - PublicKey rsa.PublicKey -} - -type SignerRSA struct { - PrivateKey rsa.PrivateKey - VerifierRSA -} - -func NewVerifierRSA(jwk JWK) (*VerifierRSA, error) { - if jwk.Alg != "" && jwk.Alg != "RS256" { - return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg) - } - - v := VerifierRSA{ - KeyID: jwk.ID, - PublicKey: rsa.PublicKey{ - N: jwk.Modulus, - E: jwk.Exponent, - }, - Hash: crypto.SHA256, - } - - return &v, nil -} - -func NewSignerRSA(kid string, key rsa.PrivateKey) *SignerRSA { - return &SignerRSA{ - PrivateKey: key, - VerifierRSA: VerifierRSA{ - KeyID: kid, - PublicKey: key.PublicKey, - Hash: crypto.SHA256, - }, - } -} - -func (v *VerifierRSA) ID() string { - return v.KeyID -} - -func (v *VerifierRSA) Alg() string { - return "RS256" -} - -func (v *VerifierRSA) Verify(sig []byte, data []byte) error { - h := v.Hash.New() - h.Write(data) - return rsa.VerifyPKCS1v15(&v.PublicKey, v.Hash, h.Sum(nil), sig) -} - -func (s *SignerRSA) Sign(data []byte) ([]byte, error) { - h := s.Hash.New() - h.Write(data) - return rsa.SignPKCS1v15(rand.Reader, &s.PrivateKey, s.Hash, h.Sum(nil)) -} diff --git a/vendor/github.com/coreos/go-oidc/key/key.go b/vendor/github.com/coreos/go-oidc/key/key.go deleted file mode 100644 index 208c1fc149..0000000000 --- a/vendor/github.com/coreos/go-oidc/key/key.go +++ /dev/null @@ -1,153 +0,0 @@ -package key - -import ( - "crypto/rand" - "crypto/rsa" - "encoding/hex" - "encoding/json" - "io" - "time" - - "github.com/coreos/go-oidc/jose" -) - -func NewPublicKey(jwk jose.JWK) *PublicKey { - return &PublicKey{jwk: jwk} -} - -type PublicKey struct { - jwk jose.JWK -} - -func (k *PublicKey) MarshalJSON() ([]byte, error) { - return json.Marshal(&k.jwk) -} - -func (k *PublicKey) UnmarshalJSON(data []byte) error { - var jwk jose.JWK - if err := json.Unmarshal(data, &jwk); err != nil { - return err - } - k.jwk = jwk - return nil -} - -func (k *PublicKey) ID() string { - return k.jwk.ID -} - -func (k *PublicKey) Verifier() (jose.Verifier, error) { - return jose.NewVerifierRSA(k.jwk) -} - -type PrivateKey struct { - KeyID string - PrivateKey *rsa.PrivateKey -} - -func (k *PrivateKey) ID() string { - return k.KeyID -} - -func (k *PrivateKey) Signer() jose.Signer { - return jose.NewSignerRSA(k.ID(), *k.PrivateKey) -} - -func (k *PrivateKey) JWK() jose.JWK { - return jose.JWK{ - ID: k.KeyID, - Type: "RSA", - Alg: "RS256", - Use: "sig", - Exponent: k.PrivateKey.PublicKey.E, - Modulus: k.PrivateKey.PublicKey.N, - } -} - -type KeySet interface { - ExpiresAt() time.Time -} - -type PublicKeySet struct { - keys []PublicKey - index map[string]*PublicKey - expiresAt time.Time -} - -func NewPublicKeySet(jwks []jose.JWK, exp time.Time) *PublicKeySet { - keys := make([]PublicKey, len(jwks)) - index := make(map[string]*PublicKey) - for i, jwk := range jwks { - keys[i] = *NewPublicKey(jwk) - index[keys[i].ID()] = &keys[i] - } - return &PublicKeySet{ - keys: keys, - index: index, - expiresAt: exp, - } -} - -func (s *PublicKeySet) ExpiresAt() time.Time { - return s.expiresAt -} - -func (s *PublicKeySet) Keys() []PublicKey { - return s.keys -} - -func (s *PublicKeySet) Key(id string) *PublicKey { - return s.index[id] -} - -type PrivateKeySet struct { - keys []*PrivateKey - ActiveKeyID string - expiresAt time.Time -} - -func NewPrivateKeySet(keys []*PrivateKey, exp time.Time) *PrivateKeySet { - return &PrivateKeySet{ - keys: keys, - ActiveKeyID: keys[0].ID(), - expiresAt: exp.UTC(), - } -} - -func (s *PrivateKeySet) Keys() []*PrivateKey { - return s.keys -} - -func (s *PrivateKeySet) ExpiresAt() time.Time { - return s.expiresAt -} - -func (s *PrivateKeySet) Active() *PrivateKey { - for i, k := range s.keys { - if k.ID() == s.ActiveKeyID { - return s.keys[i] - } - } - - return nil -} - -type GeneratePrivateKeyFunc func() (*PrivateKey, error) - -func GeneratePrivateKey() (*PrivateKey, error) { - pk, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return nil, err - } - keyID := make([]byte, 20) - if _, err := io.ReadFull(rand.Reader, keyID); err != nil { - return nil, err - } - - k := PrivateKey{ - KeyID: hex.EncodeToString(keyID), - PrivateKey: pk, - } - - return &k, nil -} diff --git a/vendor/github.com/coreos/go-oidc/key/manager.go b/vendor/github.com/coreos/go-oidc/key/manager.go deleted file mode 100644 index 476ab6a8d2..0000000000 --- a/vendor/github.com/coreos/go-oidc/key/manager.go +++ /dev/null @@ -1,99 +0,0 @@ -package key - -import ( - "errors" - "time" - - "github.com/jonboulle/clockwork" - - "github.com/coreos/go-oidc/jose" - "github.com/coreos/pkg/health" -) - -type PrivateKeyManager interface { - ExpiresAt() time.Time - Signer() (jose.Signer, error) - JWKs() ([]jose.JWK, error) - PublicKeys() ([]PublicKey, error) - - WritableKeySetRepo - health.Checkable -} - -func NewPrivateKeyManager() PrivateKeyManager { - return &privateKeyManager{ - clock: clockwork.NewRealClock(), - } -} - -type privateKeyManager struct { - keySet *PrivateKeySet - clock clockwork.Clock -} - -func (m *privateKeyManager) ExpiresAt() time.Time { - if m.keySet == nil { - return m.clock.Now().UTC() - } - - return m.keySet.ExpiresAt() -} - -func (m *privateKeyManager) Signer() (jose.Signer, error) { - if err := m.Healthy(); err != nil { - return nil, err - } - - return m.keySet.Active().Signer(), nil -} - -func (m *privateKeyManager) JWKs() ([]jose.JWK, error) { - if err := m.Healthy(); err != nil { - return nil, err - } - - keys := m.keySet.Keys() - jwks := make([]jose.JWK, len(keys)) - for i, k := range keys { - jwks[i] = k.JWK() - } - return jwks, nil -} - -func (m *privateKeyManager) PublicKeys() ([]PublicKey, error) { - jwks, err := m.JWKs() - if err != nil { - return nil, err - } - keys := make([]PublicKey, len(jwks)) - for i, jwk := range jwks { - keys[i] = *NewPublicKey(jwk) - } - return keys, nil -} - -func (m *privateKeyManager) Healthy() error { - if m.keySet == nil { - return errors.New("private key manager uninitialized") - } - - if len(m.keySet.Keys()) == 0 { - return errors.New("private key manager zero keys") - } - - if m.keySet.ExpiresAt().Before(m.clock.Now().UTC()) { - return errors.New("private key manager keys expired") - } - - return nil -} - -func (m *privateKeyManager) Set(keySet KeySet) error { - privKeySet, ok := keySet.(*PrivateKeySet) - if !ok { - return errors.New("unable to cast to PrivateKeySet") - } - - m.keySet = privKeySet - return nil -} diff --git a/vendor/github.com/coreos/go-oidc/key/repo.go b/vendor/github.com/coreos/go-oidc/key/repo.go deleted file mode 100644 index 1acdeb3614..0000000000 --- a/vendor/github.com/coreos/go-oidc/key/repo.go +++ /dev/null @@ -1,55 +0,0 @@ -package key - -import ( - "errors" - "sync" -) - -var ErrorNoKeys = errors.New("no keys found") - -type WritableKeySetRepo interface { - Set(KeySet) error -} - -type ReadableKeySetRepo interface { - Get() (KeySet, error) -} - -type PrivateKeySetRepo interface { - WritableKeySetRepo - ReadableKeySetRepo -} - -func NewPrivateKeySetRepo() PrivateKeySetRepo { - return &memPrivateKeySetRepo{} -} - -type memPrivateKeySetRepo struct { - mu sync.RWMutex - pks PrivateKeySet -} - -func (r *memPrivateKeySetRepo) Set(ks KeySet) error { - pks, ok := ks.(*PrivateKeySet) - if !ok { - return errors.New("unable to cast to PrivateKeySet") - } else if pks == nil { - return errors.New("nil KeySet") - } - - r.mu.Lock() - defer r.mu.Unlock() - - r.pks = *pks - return nil -} - -func (r *memPrivateKeySetRepo) Get() (KeySet, error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.pks.keys == nil { - return nil, ErrorNoKeys - } - return KeySet(&r.pks), nil -} diff --git a/vendor/github.com/coreos/go-oidc/key/rotate.go b/vendor/github.com/coreos/go-oidc/key/rotate.go deleted file mode 100644 index bc6cdfb1bd..0000000000 --- a/vendor/github.com/coreos/go-oidc/key/rotate.go +++ /dev/null @@ -1,159 +0,0 @@ -package key - -import ( - "errors" - "log" - "time" - - ptime "github.com/coreos/pkg/timeutil" - "github.com/jonboulle/clockwork" -) - -var ( - ErrorPrivateKeysExpired = errors.New("private keys have expired") -) - -func NewPrivateKeyRotator(repo PrivateKeySetRepo, ttl time.Duration) *PrivateKeyRotator { - return &PrivateKeyRotator{ - repo: repo, - ttl: ttl, - - keep: 2, - generateKey: GeneratePrivateKey, - clock: clockwork.NewRealClock(), - } -} - -type PrivateKeyRotator struct { - repo PrivateKeySetRepo - generateKey GeneratePrivateKeyFunc - clock clockwork.Clock - keep int - ttl time.Duration -} - -func (r *PrivateKeyRotator) expiresAt() time.Time { - return r.clock.Now().UTC().Add(r.ttl) -} - -func (r *PrivateKeyRotator) Healthy() error { - pks, err := r.privateKeySet() - if err != nil { - return err - } - - if r.clock.Now().After(pks.ExpiresAt()) { - return ErrorPrivateKeysExpired - } - - return nil -} - -func (r *PrivateKeyRotator) privateKeySet() (*PrivateKeySet, error) { - ks, err := r.repo.Get() - if err != nil { - return nil, err - } - - pks, ok := ks.(*PrivateKeySet) - if !ok { - return nil, errors.New("unable to cast to PrivateKeySet") - } - return pks, nil -} - -func (r *PrivateKeyRotator) nextRotation() (time.Duration, error) { - pks, err := r.privateKeySet() - if err == ErrorNoKeys { - return 0, nil - } - if err != nil { - return 0, err - } - - now := r.clock.Now() - - // Ideally, we want to rotate after half the TTL has elapsed. - idealRotationTime := pks.ExpiresAt().Add(-r.ttl / 2) - - // If we are past the ideal rotation time, rotate immediatly. - return max(0, idealRotationTime.Sub(now)), nil -} - -func max(a, b time.Duration) time.Duration { - if a > b { - return a - } - return b -} - -func (r *PrivateKeyRotator) Run() chan struct{} { - attempt := func() { - k, err := r.generateKey() - if err != nil { - log.Printf("go-oidc: failed generating signing key: %v", err) - return - } - - exp := r.expiresAt() - if err := rotatePrivateKeys(r.repo, k, r.keep, exp); err != nil { - log.Printf("go-oidc: key rotation failed: %v", err) - return - } - } - - stop := make(chan struct{}) - go func() { - for { - var nextRotation time.Duration - var sleep time.Duration - var err error - for { - if nextRotation, err = r.nextRotation(); err == nil { - break - } - sleep = ptime.ExpBackoff(sleep, time.Minute) - log.Printf("go-oidc: error getting nextRotation, retrying in %v: %v", sleep, err) - time.Sleep(sleep) - } - - select { - case <-r.clock.After(nextRotation): - attempt() - case <-stop: - return - } - } - }() - - return stop -} - -func rotatePrivateKeys(repo PrivateKeySetRepo, k *PrivateKey, keep int, exp time.Time) error { - ks, err := repo.Get() - if err != nil && err != ErrorNoKeys { - return err - } - - var keys []*PrivateKey - if ks != nil { - pks, ok := ks.(*PrivateKeySet) - if !ok { - return errors.New("unable to cast to PrivateKeySet") - } - keys = pks.Keys() - } - - keys = append([]*PrivateKey{k}, keys...) - if l := len(keys); l > keep { - keys = keys[0:keep] - } - - nks := PrivateKeySet{ - keys: keys, - ActiveKeyID: k.ID(), - expiresAt: exp, - } - - return repo.Set(KeySet(&nks)) -} diff --git a/vendor/github.com/coreos/go-oidc/key/sync.go b/vendor/github.com/coreos/go-oidc/key/sync.go deleted file mode 100644 index b887f7b5a5..0000000000 --- a/vendor/github.com/coreos/go-oidc/key/sync.go +++ /dev/null @@ -1,91 +0,0 @@ -package key - -import ( - "errors" - "log" - "time" - - "github.com/jonboulle/clockwork" - - "github.com/coreos/pkg/timeutil" -) - -func NewKeySetSyncer(r ReadableKeySetRepo, w WritableKeySetRepo) *KeySetSyncer { - return &KeySetSyncer{ - readable: r, - writable: w, - clock: clockwork.NewRealClock(), - } -} - -type KeySetSyncer struct { - readable ReadableKeySetRepo - writable WritableKeySetRepo - clock clockwork.Clock -} - -func (s *KeySetSyncer) Run() chan struct{} { - stop := make(chan struct{}) - go func() { - var failing bool - var next time.Duration - for { - exp, err := syncKeySet(s.readable, s.writable, s.clock) - if err != nil || exp == 0 { - if !failing { - failing = true - next = time.Second - } else { - next = timeutil.ExpBackoff(next, time.Minute) - } - if exp == 0 { - log.Printf("Synced to already expired key set, retrying in %v: %v", next, err) - - } else { - log.Printf("Failed syncing key set, retrying in %v: %v", next, err) - } - } else { - failing = false - next = exp / 2 - } - - select { - case <-s.clock.After(next): - continue - case <-stop: - return - } - } - }() - - return stop -} - -func Sync(r ReadableKeySetRepo, w WritableKeySetRepo) (time.Duration, error) { - return syncKeySet(r, w, clockwork.NewRealClock()) -} - -// syncKeySet copies the keyset from r to the KeySet at w and returns the duration in which the KeySet will expire. -// If keyset has already expired, returns a zero duration. -func syncKeySet(r ReadableKeySetRepo, w WritableKeySetRepo, clock clockwork.Clock) (exp time.Duration, err error) { - var ks KeySet - ks, err = r.Get() - if err != nil { - return - } - - if ks == nil { - err = errors.New("no source KeySet") - return - } - - if err = w.Set(ks); err != nil { - return - } - - now := clock.Now() - if ks.ExpiresAt().After(now) { - exp = ks.ExpiresAt().Sub(now) - } - return -} diff --git a/vendor/github.com/coreos/go-oidc/oauth2/error.go b/vendor/github.com/coreos/go-oidc/oauth2/error.go deleted file mode 100644 index 50d890949a..0000000000 --- a/vendor/github.com/coreos/go-oidc/oauth2/error.go +++ /dev/null @@ -1,29 +0,0 @@ -package oauth2 - -const ( - ErrorAccessDenied = "access_denied" - ErrorInvalidClient = "invalid_client" - ErrorInvalidGrant = "invalid_grant" - ErrorInvalidRequest = "invalid_request" - ErrorServerError = "server_error" - ErrorUnauthorizedClient = "unauthorized_client" - ErrorUnsupportedGrantType = "unsupported_grant_type" - ErrorUnsupportedResponseType = "unsupported_response_type" -) - -type Error struct { - Type string `json:"error"` - Description string `json:"error_description,omitempty"` - State string `json:"state,omitempty"` -} - -func (e *Error) Error() string { - if e.Description != "" { - return e.Type + ": " + e.Description - } - return e.Type -} - -func NewError(typ string) *Error { - return &Error{Type: typ} -} diff --git a/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go b/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go deleted file mode 100644 index 72d1d6715b..0000000000 --- a/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go +++ /dev/null @@ -1,416 +0,0 @@ -package oauth2 - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "mime" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - - phttp "github.com/coreos/go-oidc/http" -) - -// ResponseTypesEqual compares two response_type values. If either -// contains a space, it is treated as an unordered list. For example, -// comparing "code id_token" and "id_token code" would evaluate to true. -func ResponseTypesEqual(r1, r2 string) bool { - if !strings.Contains(r1, " ") || !strings.Contains(r2, " ") { - // fast route, no split needed - return r1 == r2 - } - - // split, sort, and compare - r1Fields := strings.Fields(r1) - r2Fields := strings.Fields(r2) - if len(r1Fields) != len(r2Fields) { - return false - } - sort.Strings(r1Fields) - sort.Strings(r2Fields) - for i, r1Field := range r1Fields { - if r1Field != r2Fields[i] { - return false - } - } - return true -} - -const ( - // OAuth2.0 response types registered by OIDC. - // - // See: https://openid.net/specs/oauth-v2-multiple-response-types-1_0.html#RegistryContents - ResponseTypeCode = "code" - ResponseTypeCodeIDToken = "code id_token" - ResponseTypeCodeIDTokenToken = "code id_token token" - ResponseTypeIDToken = "id_token" - ResponseTypeIDTokenToken = "id_token token" - ResponseTypeToken = "token" - ResponseTypeNone = "none" -) - -const ( - GrantTypeAuthCode = "authorization_code" - GrantTypeClientCreds = "client_credentials" - GrantTypeUserCreds = "password" - GrantTypeImplicit = "implicit" - GrantTypeRefreshToken = "refresh_token" - - AuthMethodClientSecretPost = "client_secret_post" - AuthMethodClientSecretBasic = "client_secret_basic" - AuthMethodClientSecretJWT = "client_secret_jwt" - AuthMethodPrivateKeyJWT = "private_key_jwt" -) - -type Config struct { - Credentials ClientCredentials - Scope []string - RedirectURL string - AuthURL string - TokenURL string - - // Must be one of the AuthMethodXXX methods above. Right now, only - // AuthMethodClientSecretPost and AuthMethodClientSecretBasic are supported. - AuthMethod string -} - -type Client struct { - hc phttp.Client - creds ClientCredentials - scope []string - authURL *url.URL - redirectURL *url.URL - tokenURL *url.URL - authMethod string -} - -type ClientCredentials struct { - ID string - Secret string -} - -func NewClient(hc phttp.Client, cfg Config) (c *Client, err error) { - if len(cfg.Credentials.ID) == 0 { - err = errors.New("missing client id") - return - } - - if len(cfg.Credentials.Secret) == 0 { - err = errors.New("missing client secret") - return - } - - if cfg.AuthMethod == "" { - cfg.AuthMethod = AuthMethodClientSecretBasic - } else if cfg.AuthMethod != AuthMethodClientSecretPost && cfg.AuthMethod != AuthMethodClientSecretBasic { - err = fmt.Errorf("auth method %q is not supported", cfg.AuthMethod) - return - } - - au, err := phttp.ParseNonEmptyURL(cfg.AuthURL) - if err != nil { - return - } - - tu, err := phttp.ParseNonEmptyURL(cfg.TokenURL) - if err != nil { - return - } - - // Allow empty redirect URL in the case where the client - // only needs to verify a given token. - ru, err := url.Parse(cfg.RedirectURL) - if err != nil { - return - } - - c = &Client{ - creds: cfg.Credentials, - scope: cfg.Scope, - redirectURL: ru, - authURL: au, - tokenURL: tu, - hc: hc, - authMethod: cfg.AuthMethod, - } - - return -} - -// Return the embedded HTTP client -func (c *Client) HttpClient() phttp.Client { - return c.hc -} - -// Generate the url for initial redirect to oauth provider. -func (c *Client) AuthCodeURL(state, accessType, prompt string) string { - v := c.commonURLValues() - v.Set("state", state) - if strings.ToLower(accessType) == "offline" { - v.Set("access_type", "offline") - } - - if prompt != "" { - v.Set("prompt", prompt) - } - v.Set("response_type", "code") - - q := v.Encode() - u := *c.authURL - if u.RawQuery == "" { - u.RawQuery = q - } else { - u.RawQuery += "&" + q - } - return u.String() -} - -func (c *Client) commonURLValues() url.Values { - return url.Values{ - "redirect_uri": {c.redirectURL.String()}, - "scope": {strings.Join(c.scope, " ")}, - "client_id": {c.creds.ID}, - } -} - -func (c *Client) newAuthenticatedRequest(urlToken string, values url.Values) (*http.Request, error) { - var req *http.Request - var err error - switch c.authMethod { - case AuthMethodClientSecretPost: - values.Set("client_secret", c.creds.Secret) - req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode())) - if err != nil { - return nil, err - } - case AuthMethodClientSecretBasic: - req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode())) - if err != nil { - return nil, err - } - encodedID := url.QueryEscape(c.creds.ID) - encodedSecret := url.QueryEscape(c.creds.Secret) - req.SetBasicAuth(encodedID, encodedSecret) - default: - panic("misconfigured client: auth method not supported") - } - - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - return req, nil - -} - -// ClientCredsToken posts the client id and secret to obtain a token scoped to the OAuth2 client via the "client_credentials" grant type. -// May not be supported by all OAuth2 servers. -func (c *Client) ClientCredsToken(scope []string) (result TokenResponse, err error) { - v := url.Values{ - "scope": {strings.Join(scope, " ")}, - "grant_type": {GrantTypeClientCreds}, - } - - req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v) - if err != nil { - return - } - - resp, err := c.hc.Do(req) - if err != nil { - return - } - defer resp.Body.Close() - - return parseTokenResponse(resp) -} - -// UserCredsToken posts the username and password to obtain a token scoped to the OAuth2 client via the "password" grant_type -// May not be supported by all OAuth2 servers. -func (c *Client) UserCredsToken(username, password string) (result TokenResponse, err error) { - v := url.Values{ - "scope": {strings.Join(c.scope, " ")}, - "grant_type": {GrantTypeUserCreds}, - "username": {username}, - "password": {password}, - } - - req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v) - if err != nil { - return - } - - resp, err := c.hc.Do(req) - if err != nil { - return - } - defer resp.Body.Close() - - return parseTokenResponse(resp) -} - -// RequestToken requests a token from the Token Endpoint with the specified grantType. -// If 'grantType' == GrantTypeAuthCode, then 'value' should be the authorization code. -// If 'grantType' == GrantTypeRefreshToken, then 'value' should be the refresh token. -func (c *Client) RequestToken(grantType, value string) (result TokenResponse, err error) { - v := c.commonURLValues() - - v.Set("grant_type", grantType) - v.Set("client_secret", c.creds.Secret) - switch grantType { - case GrantTypeAuthCode: - v.Set("code", value) - case GrantTypeRefreshToken: - v.Set("refresh_token", value) - default: - err = fmt.Errorf("unsupported grant_type: %v", grantType) - return - } - - req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v) - if err != nil { - return - } - - resp, err := c.hc.Do(req) - if err != nil { - return - } - defer resp.Body.Close() - - return parseTokenResponse(resp) -} - -func parseTokenResponse(resp *http.Response) (result TokenResponse, err error) { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return - } - badStatusCode := resp.StatusCode < 200 || resp.StatusCode > 299 - - contentType, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) - if err != nil { - return - } - - result = TokenResponse{ - RawBody: body, - } - - newError := func(typ, desc, state string) error { - if typ == "" { - return fmt.Errorf("unrecognized error %s", body) - } - return &Error{typ, desc, state} - } - - if contentType == "application/x-www-form-urlencoded" || contentType == "text/plain" { - var vals url.Values - vals, err = url.ParseQuery(string(body)) - if err != nil { - return - } - if error := vals.Get("error"); error != "" || badStatusCode { - err = newError(error, vals.Get("error_description"), vals.Get("state")) - return - } - e := vals.Get("expires_in") - if e == "" { - e = vals.Get("expires") - } - if e != "" { - result.Expires, err = strconv.Atoi(e) - if err != nil { - return - } - } - result.AccessToken = vals.Get("access_token") - result.TokenType = vals.Get("token_type") - result.IDToken = vals.Get("id_token") - result.RefreshToken = vals.Get("refresh_token") - result.Scope = vals.Get("scope") - } else { - var r struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - IDToken string `json:"id_token"` - RefreshToken string `json:"refresh_token"` - Scope string `json:"scope"` - State string `json:"state"` - ExpiresIn json.Number `json:"expires_in"` // Azure AD returns string - Expires int `json:"expires"` - Error string `json:"error"` - Desc string `json:"error_description"` - } - if err = json.Unmarshal(body, &r); err != nil { - return - } - if r.Error != "" || badStatusCode { - err = newError(r.Error, r.Desc, r.State) - return - } - result.AccessToken = r.AccessToken - result.TokenType = r.TokenType - result.IDToken = r.IDToken - result.RefreshToken = r.RefreshToken - result.Scope = r.Scope - if expiresIn, err := r.ExpiresIn.Int64(); err != nil { - result.Expires = r.Expires - } else { - result.Expires = int(expiresIn) - } - } - return -} - -type TokenResponse struct { - AccessToken string - TokenType string - Expires int - IDToken string - RefreshToken string // OPTIONAL. - Scope string // OPTIONAL, if identical to the scope requested by the client, otherwise, REQUIRED. - RawBody []byte // In case callers need some other non-standard info from the token response -} - -type AuthCodeRequest struct { - ResponseType string - ClientID string - RedirectURL *url.URL - Scope []string - State string -} - -func ParseAuthCodeRequest(q url.Values) (AuthCodeRequest, error) { - acr := AuthCodeRequest{ - ResponseType: q.Get("response_type"), - ClientID: q.Get("client_id"), - State: q.Get("state"), - Scope: make([]string, 0), - } - - qs := strings.TrimSpace(q.Get("scope")) - if qs != "" { - acr.Scope = strings.Split(qs, " ") - } - - err := func() error { - if acr.ClientID == "" { - return NewError(ErrorInvalidRequest) - } - - redirectURL := q.Get("redirect_uri") - if redirectURL != "" { - ru, err := url.Parse(redirectURL) - if err != nil { - return NewError(ErrorInvalidRequest) - } - acr.RedirectURL = ru - } - - return nil - }() - - return acr, err -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/client.go b/vendor/github.com/coreos/go-oidc/oidc/client.go deleted file mode 100644 index 7a3cb40f64..0000000000 --- a/vendor/github.com/coreos/go-oidc/oidc/client.go +++ /dev/null @@ -1,846 +0,0 @@ -package oidc - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/mail" - "net/url" - "sync" - "time" - - phttp "github.com/coreos/go-oidc/http" - "github.com/coreos/go-oidc/jose" - "github.com/coreos/go-oidc/key" - "github.com/coreos/go-oidc/oauth2" -) - -const ( - // amount of time that must pass after the last key sync - // completes before another attempt may begin - keySyncWindow = 5 * time.Second -) - -var ( - DefaultScope = []string{"openid", "email", "profile"} - - supportedAuthMethods = map[string]struct{}{ - oauth2.AuthMethodClientSecretBasic: struct{}{}, - oauth2.AuthMethodClientSecretPost: struct{}{}, - } -) - -type ClientCredentials oauth2.ClientCredentials - -type ClientIdentity struct { - Credentials ClientCredentials - Metadata ClientMetadata -} - -type JWAOptions struct { - // SigningAlg specifies an JWA alg for signing JWTs. - // - // Specifying this field implies different actions depending on the context. It may - // require objects be serialized and signed as a JWT instead of plain JSON, or - // require an existing JWT object use the specified alg. - // - // See: http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata - SigningAlg string - // EncryptionAlg, if provided, specifies that the returned or sent object be stored - // (or nested) within a JWT object and encrypted with the provided JWA alg. - EncryptionAlg string - // EncryptionEnc specifies the JWA enc algorithm to use with EncryptionAlg. If - // EncryptionAlg is provided and EncryptionEnc is omitted, this field defaults - // to A128CBC-HS256. - // - // If EncryptionEnc is provided EncryptionAlg must also be specified. - EncryptionEnc string -} - -func (opt JWAOptions) valid() error { - if opt.EncryptionEnc != "" && opt.EncryptionAlg == "" { - return errors.New("encryption encoding provided with no encryption algorithm") - } - return nil -} - -func (opt JWAOptions) defaults() JWAOptions { - if opt.EncryptionAlg != "" && opt.EncryptionEnc == "" { - opt.EncryptionEnc = jose.EncA128CBCHS256 - } - return opt -} - -var ( - // Ensure ClientMetadata satisfies these interfaces. - _ json.Marshaler = &ClientMetadata{} - _ json.Unmarshaler = &ClientMetadata{} -) - -// ClientMetadata holds metadata that the authorization server associates -// with a client identifier. The fields range from human-facing display -// strings such as client name, to items that impact the security of the -// protocol, such as the list of valid redirect URIs. -// -// See http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata -// -// TODO: support language specific claim representations -// http://openid.net/specs/openid-connect-registration-1_0.html#LanguagesAndScripts -type ClientMetadata struct { - RedirectURIs []url.URL // Required - - // A list of OAuth 2.0 "response_type" values that the client wishes to restrict - // itself to. Either "code", "token", or another registered extension. - // - // If omitted, only "code" will be used. - ResponseTypes []string - // A list of OAuth 2.0 grant types the client wishes to restrict itself to. - // The grant type values used by OIDC are "authorization_code", "implicit", - // and "refresh_token". - // - // If ommitted, only "authorization_code" will be used. - GrantTypes []string - // "native" or "web". If omitted, "web". - ApplicationType string - - // List of email addresses. - Contacts []mail.Address - // Name of client to be presented to the end-user. - ClientName string - // URL that references a logo for the Client application. - LogoURI *url.URL - // URL of the home page of the Client. - ClientURI *url.URL - // Profile data policies and terms of use to be provided to the end user. - PolicyURI *url.URL - TermsOfServiceURI *url.URL - - // URL to or the value of the client's JSON Web Key Set document. - JWKSURI *url.URL - JWKS *jose.JWKSet - - // URL referencing a flie with a single JSON array of redirect URIs. - SectorIdentifierURI *url.URL - - SubjectType string - - // Options to restrict the JWS alg and enc values used for server responses and requests. - IDTokenResponseOptions JWAOptions - UserInfoResponseOptions JWAOptions - RequestObjectOptions JWAOptions - - // Client requested authorization method and signing options for the token endpoint. - // - // Defaults to "client_secret_basic" - TokenEndpointAuthMethod string - TokenEndpointAuthSigningAlg string - - // DefaultMaxAge specifies the maximum amount of time in seconds before an authorized - // user must reauthroize. - // - // If 0, no limitation is placed on the maximum. - DefaultMaxAge int64 - // RequireAuthTime specifies if the auth_time claim in the ID token is required. - RequireAuthTime bool - - // Default Authentication Context Class Reference values for authentication requests. - DefaultACRValues []string - - // URI that a third party can use to initiate a login by the relaying party. - // - // See: http://openid.net/specs/openid-connect-core-1_0.html#ThirdPartyInitiatedLogin - InitiateLoginURI *url.URL - // Pre-registered request_uri values that may be cached by the server. - RequestURIs []url.URL -} - -// Defaults returns a shallow copy of ClientMetadata with default -// values replacing omitted fields. -func (m ClientMetadata) Defaults() ClientMetadata { - if len(m.ResponseTypes) == 0 { - m.ResponseTypes = []string{oauth2.ResponseTypeCode} - } - if len(m.GrantTypes) == 0 { - m.GrantTypes = []string{oauth2.GrantTypeAuthCode} - } - if m.ApplicationType == "" { - m.ApplicationType = "web" - } - if m.TokenEndpointAuthMethod == "" { - m.TokenEndpointAuthMethod = oauth2.AuthMethodClientSecretBasic - } - m.IDTokenResponseOptions = m.IDTokenResponseOptions.defaults() - m.UserInfoResponseOptions = m.UserInfoResponseOptions.defaults() - m.RequestObjectOptions = m.RequestObjectOptions.defaults() - return m -} - -func (m *ClientMetadata) MarshalJSON() ([]byte, error) { - e := m.toEncodableStruct() - return json.Marshal(&e) -} - -func (m *ClientMetadata) UnmarshalJSON(data []byte) error { - var e encodableClientMetadata - if err := json.Unmarshal(data, &e); err != nil { - return err - } - meta, err := e.toStruct() - if err != nil { - return err - } - if err := meta.Valid(); err != nil { - return err - } - *m = meta - return nil -} - -type encodableClientMetadata struct { - RedirectURIs []string `json:"redirect_uris"` // Required - ResponseTypes []string `json:"response_types,omitempty"` - GrantTypes []string `json:"grant_types,omitempty"` - ApplicationType string `json:"application_type,omitempty"` - Contacts []string `json:"contacts,omitempty"` - ClientName string `json:"client_name,omitempty"` - LogoURI string `json:"logo_uri,omitempty"` - ClientURI string `json:"client_uri,omitempty"` - PolicyURI string `json:"policy_uri,omitempty"` - TermsOfServiceURI string `json:"tos_uri,omitempty"` - JWKSURI string `json:"jwks_uri,omitempty"` - JWKS *jose.JWKSet `json:"jwks,omitempty"` - SectorIdentifierURI string `json:"sector_identifier_uri,omitempty"` - SubjectType string `json:"subject_type,omitempty"` - IDTokenSignedResponseAlg string `json:"id_token_signed_response_alg,omitempty"` - IDTokenEncryptedResponseAlg string `json:"id_token_encrypted_response_alg,omitempty"` - IDTokenEncryptedResponseEnc string `json:"id_token_encrypted_response_enc,omitempty"` - UserInfoSignedResponseAlg string `json:"userinfo_signed_response_alg,omitempty"` - UserInfoEncryptedResponseAlg string `json:"userinfo_encrypted_response_alg,omitempty"` - UserInfoEncryptedResponseEnc string `json:"userinfo_encrypted_response_enc,omitempty"` - RequestObjectSigningAlg string `json:"request_object_signing_alg,omitempty"` - RequestObjectEncryptionAlg string `json:"request_object_encryption_alg,omitempty"` - RequestObjectEncryptionEnc string `json:"request_object_encryption_enc,omitempty"` - TokenEndpointAuthMethod string `json:"token_endpoint_auth_method,omitempty"` - TokenEndpointAuthSigningAlg string `json:"token_endpoint_auth_signing_alg,omitempty"` - DefaultMaxAge int64 `json:"default_max_age,omitempty"` - RequireAuthTime bool `json:"require_auth_time,omitempty"` - DefaultACRValues []string `json:"default_acr_values,omitempty"` - InitiateLoginURI string `json:"initiate_login_uri,omitempty"` - RequestURIs []string `json:"request_uris,omitempty"` -} - -func (c *encodableClientMetadata) toStruct() (ClientMetadata, error) { - p := stickyErrParser{} - m := ClientMetadata{ - RedirectURIs: p.parseURIs(c.RedirectURIs, "redirect_uris"), - ResponseTypes: c.ResponseTypes, - GrantTypes: c.GrantTypes, - ApplicationType: c.ApplicationType, - Contacts: p.parseEmails(c.Contacts, "contacts"), - ClientName: c.ClientName, - LogoURI: p.parseURI(c.LogoURI, "logo_uri"), - ClientURI: p.parseURI(c.ClientURI, "client_uri"), - PolicyURI: p.parseURI(c.PolicyURI, "policy_uri"), - TermsOfServiceURI: p.parseURI(c.TermsOfServiceURI, "tos_uri"), - JWKSURI: p.parseURI(c.JWKSURI, "jwks_uri"), - JWKS: c.JWKS, - SectorIdentifierURI: p.parseURI(c.SectorIdentifierURI, "sector_identifier_uri"), - SubjectType: c.SubjectType, - TokenEndpointAuthMethod: c.TokenEndpointAuthMethod, - TokenEndpointAuthSigningAlg: c.TokenEndpointAuthSigningAlg, - DefaultMaxAge: c.DefaultMaxAge, - RequireAuthTime: c.RequireAuthTime, - DefaultACRValues: c.DefaultACRValues, - InitiateLoginURI: p.parseURI(c.InitiateLoginURI, "initiate_login_uri"), - RequestURIs: p.parseURIs(c.RequestURIs, "request_uris"), - IDTokenResponseOptions: JWAOptions{ - c.IDTokenSignedResponseAlg, - c.IDTokenEncryptedResponseAlg, - c.IDTokenEncryptedResponseEnc, - }, - UserInfoResponseOptions: JWAOptions{ - c.UserInfoSignedResponseAlg, - c.UserInfoEncryptedResponseAlg, - c.UserInfoEncryptedResponseEnc, - }, - RequestObjectOptions: JWAOptions{ - c.RequestObjectSigningAlg, - c.RequestObjectEncryptionAlg, - c.RequestObjectEncryptionEnc, - }, - } - if p.firstErr != nil { - return ClientMetadata{}, p.firstErr - } - return m, nil -} - -// stickyErrParser parses URIs and email addresses. Once it encounters -// a parse error, subsequent calls become no-op. -type stickyErrParser struct { - firstErr error -} - -func (p *stickyErrParser) parseURI(s, field string) *url.URL { - if p.firstErr != nil || s == "" { - return nil - } - u, err := url.Parse(s) - if err == nil { - if u.Host == "" { - err = errors.New("no host in URI") - } else if u.Scheme != "http" && u.Scheme != "https" { - err = errors.New("invalid URI scheme") - } - } - if err != nil { - p.firstErr = fmt.Errorf("failed to parse %s: %v", field, err) - return nil - } - return u -} - -func (p *stickyErrParser) parseURIs(s []string, field string) []url.URL { - if p.firstErr != nil || len(s) == 0 { - return nil - } - uris := make([]url.URL, len(s)) - for i, val := range s { - if val == "" { - p.firstErr = fmt.Errorf("invalid URI in field %s", field) - return nil - } - if u := p.parseURI(val, field); u != nil { - uris[i] = *u - } - } - return uris -} - -func (p *stickyErrParser) parseEmails(s []string, field string) []mail.Address { - if p.firstErr != nil || len(s) == 0 { - return nil - } - addrs := make([]mail.Address, len(s)) - for i, addr := range s { - if addr == "" { - p.firstErr = fmt.Errorf("invalid email in field %s", field) - return nil - } - a, err := mail.ParseAddress(addr) - if err != nil { - p.firstErr = fmt.Errorf("invalid email in field %s: %v", field, err) - return nil - } - addrs[i] = *a - } - return addrs -} - -func (m *ClientMetadata) toEncodableStruct() encodableClientMetadata { - return encodableClientMetadata{ - RedirectURIs: urisToStrings(m.RedirectURIs), - ResponseTypes: m.ResponseTypes, - GrantTypes: m.GrantTypes, - ApplicationType: m.ApplicationType, - Contacts: emailsToStrings(m.Contacts), - ClientName: m.ClientName, - LogoURI: uriToString(m.LogoURI), - ClientURI: uriToString(m.ClientURI), - PolicyURI: uriToString(m.PolicyURI), - TermsOfServiceURI: uriToString(m.TermsOfServiceURI), - JWKSURI: uriToString(m.JWKSURI), - JWKS: m.JWKS, - SectorIdentifierURI: uriToString(m.SectorIdentifierURI), - SubjectType: m.SubjectType, - IDTokenSignedResponseAlg: m.IDTokenResponseOptions.SigningAlg, - IDTokenEncryptedResponseAlg: m.IDTokenResponseOptions.EncryptionAlg, - IDTokenEncryptedResponseEnc: m.IDTokenResponseOptions.EncryptionEnc, - UserInfoSignedResponseAlg: m.UserInfoResponseOptions.SigningAlg, - UserInfoEncryptedResponseAlg: m.UserInfoResponseOptions.EncryptionAlg, - UserInfoEncryptedResponseEnc: m.UserInfoResponseOptions.EncryptionEnc, - RequestObjectSigningAlg: m.RequestObjectOptions.SigningAlg, - RequestObjectEncryptionAlg: m.RequestObjectOptions.EncryptionAlg, - RequestObjectEncryptionEnc: m.RequestObjectOptions.EncryptionEnc, - TokenEndpointAuthMethod: m.TokenEndpointAuthMethod, - TokenEndpointAuthSigningAlg: m.TokenEndpointAuthSigningAlg, - DefaultMaxAge: m.DefaultMaxAge, - RequireAuthTime: m.RequireAuthTime, - DefaultACRValues: m.DefaultACRValues, - InitiateLoginURI: uriToString(m.InitiateLoginURI), - RequestURIs: urisToStrings(m.RequestURIs), - } -} - -func uriToString(u *url.URL) string { - if u == nil { - return "" - } - return u.String() -} - -func urisToStrings(urls []url.URL) []string { - if len(urls) == 0 { - return nil - } - sli := make([]string, len(urls)) - for i, u := range urls { - sli[i] = u.String() - } - return sli -} - -func emailsToStrings(addrs []mail.Address) []string { - if len(addrs) == 0 { - return nil - } - sli := make([]string, len(addrs)) - for i, addr := range addrs { - sli[i] = addr.String() - } - return sli -} - -// Valid determines if a ClientMetadata conforms with the OIDC specification. -// -// Valid is called by UnmarshalJSON. -// -// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for -// URLs fields where the OIDC spec requires it. This may change in future releases -// of this package. See: https://github.com/coreos/go-oidc/issues/34 -func (m *ClientMetadata) Valid() error { - if len(m.RedirectURIs) == 0 { - return errors.New("zero redirect URLs") - } - - validURI := func(u *url.URL, fieldName string) error { - if u.Host == "" { - return fmt.Errorf("no host for uri field %s", fieldName) - } - if u.Scheme != "http" && u.Scheme != "https" { - return fmt.Errorf("uri field %s scheme is not http or https", fieldName) - } - return nil - } - - uris := []struct { - val *url.URL - name string - }{ - {m.LogoURI, "logo_uri"}, - {m.ClientURI, "client_uri"}, - {m.PolicyURI, "policy_uri"}, - {m.TermsOfServiceURI, "tos_uri"}, - {m.JWKSURI, "jwks_uri"}, - {m.SectorIdentifierURI, "sector_identifier_uri"}, - {m.InitiateLoginURI, "initiate_login_uri"}, - } - - for _, uri := range uris { - if uri.val == nil { - continue - } - if err := validURI(uri.val, uri.name); err != nil { - return err - } - } - - uriLists := []struct { - vals []url.URL - name string - }{ - {m.RedirectURIs, "redirect_uris"}, - {m.RequestURIs, "request_uris"}, - } - for _, list := range uriLists { - for _, uri := range list.vals { - if err := validURI(&uri, list.name); err != nil { - return err - } - } - } - - options := []struct { - option JWAOptions - name string - }{ - {m.IDTokenResponseOptions, "id_token response"}, - {m.UserInfoResponseOptions, "userinfo response"}, - {m.RequestObjectOptions, "request_object"}, - } - for _, option := range options { - if err := option.option.valid(); err != nil { - return fmt.Errorf("invalid JWA values for %s: %v", option.name, err) - } - } - return nil -} - -type ClientRegistrationResponse struct { - ClientID string // Required - ClientSecret string - RegistrationAccessToken string - RegistrationClientURI string - // If IsZero is true, unspecified. - ClientIDIssuedAt time.Time - // Time at which the client_secret will expire. - // If IsZero is true, it will not expire. - ClientSecretExpiresAt time.Time - - ClientMetadata -} - -type encodableClientRegistrationResponse struct { - ClientID string `json:"client_id"` // Required - ClientSecret string `json:"client_secret,omitempty"` - RegistrationAccessToken string `json:"registration_access_token,omitempty"` - RegistrationClientURI string `json:"registration_client_uri,omitempty"` - ClientIDIssuedAt int64 `json:"client_id_issued_at,omitempty"` - // Time at which the client_secret will expire, in seconds since the epoch. - // If 0 it will not expire. - ClientSecretExpiresAt int64 `json:"client_secret_expires_at"` // Required - - encodableClientMetadata -} - -func unixToSec(t time.Time) int64 { - if t.IsZero() { - return 0 - } - return t.Unix() -} - -func (c *ClientRegistrationResponse) MarshalJSON() ([]byte, error) { - e := encodableClientRegistrationResponse{ - ClientID: c.ClientID, - ClientSecret: c.ClientSecret, - RegistrationAccessToken: c.RegistrationAccessToken, - RegistrationClientURI: c.RegistrationClientURI, - ClientIDIssuedAt: unixToSec(c.ClientIDIssuedAt), - ClientSecretExpiresAt: unixToSec(c.ClientSecretExpiresAt), - encodableClientMetadata: c.ClientMetadata.toEncodableStruct(), - } - return json.Marshal(&e) -} - -func secToUnix(sec int64) time.Time { - if sec == 0 { - return time.Time{} - } - return time.Unix(sec, 0) -} - -func (c *ClientRegistrationResponse) UnmarshalJSON(data []byte) error { - var e encodableClientRegistrationResponse - if err := json.Unmarshal(data, &e); err != nil { - return err - } - if e.ClientID == "" { - return errors.New("no client_id in client registration response") - } - metadata, err := e.encodableClientMetadata.toStruct() - if err != nil { - return err - } - *c = ClientRegistrationResponse{ - ClientID: e.ClientID, - ClientSecret: e.ClientSecret, - RegistrationAccessToken: e.RegistrationAccessToken, - RegistrationClientURI: e.RegistrationClientURI, - ClientIDIssuedAt: secToUnix(e.ClientIDIssuedAt), - ClientSecretExpiresAt: secToUnix(e.ClientSecretExpiresAt), - ClientMetadata: metadata, - } - return nil -} - -type ClientConfig struct { - HTTPClient phttp.Client - Credentials ClientCredentials - Scope []string - RedirectURL string - ProviderConfig ProviderConfig - KeySet key.PublicKeySet -} - -func NewClient(cfg ClientConfig) (*Client, error) { - // Allow empty redirect URL in the case where the client - // only needs to verify a given token. - ru, err := url.Parse(cfg.RedirectURL) - if err != nil { - return nil, fmt.Errorf("invalid redirect URL: %v", err) - } - - c := Client{ - credentials: cfg.Credentials, - httpClient: cfg.HTTPClient, - scope: cfg.Scope, - redirectURL: ru.String(), - providerConfig: newProviderConfigRepo(cfg.ProviderConfig), - keySet: cfg.KeySet, - } - - if c.httpClient == nil { - c.httpClient = http.DefaultClient - } - - if c.scope == nil { - c.scope = make([]string, len(DefaultScope)) - copy(c.scope, DefaultScope) - } - - return &c, nil -} - -type Client struct { - httpClient phttp.Client - providerConfig *providerConfigRepo - credentials ClientCredentials - redirectURL string - scope []string - keySet key.PublicKeySet - providerSyncer *ProviderConfigSyncer - - keySetSyncMutex sync.RWMutex - lastKeySetSync time.Time -} - -func (c *Client) Healthy() error { - now := time.Now().UTC() - - cfg := c.providerConfig.Get() - - if cfg.Empty() { - return errors.New("oidc client provider config empty") - } - - if !cfg.ExpiresAt.IsZero() && cfg.ExpiresAt.Before(now) { - return errors.New("oidc client provider config expired") - } - - return nil -} - -func (c *Client) OAuthClient() (*oauth2.Client, error) { - cfg := c.providerConfig.Get() - authMethod, err := chooseAuthMethod(cfg) - if err != nil { - return nil, err - } - - ocfg := oauth2.Config{ - Credentials: oauth2.ClientCredentials(c.credentials), - RedirectURL: c.redirectURL, - AuthURL: cfg.AuthEndpoint.String(), - TokenURL: cfg.TokenEndpoint.String(), - Scope: c.scope, - AuthMethod: authMethod, - } - - return oauth2.NewClient(c.httpClient, ocfg) -} - -func chooseAuthMethod(cfg ProviderConfig) (string, error) { - if len(cfg.TokenEndpointAuthMethodsSupported) == 0 { - return oauth2.AuthMethodClientSecretBasic, nil - } - - for _, authMethod := range cfg.TokenEndpointAuthMethodsSupported { - if _, ok := supportedAuthMethods[authMethod]; ok { - return authMethod, nil - } - } - - return "", errors.New("no supported auth methods") -} - -// SyncProviderConfig starts the provider config syncer -func (c *Client) SyncProviderConfig(discoveryURL string) chan struct{} { - r := NewHTTPProviderConfigGetter(c.httpClient, discoveryURL) - s := NewProviderConfigSyncer(r, c.providerConfig) - stop := s.Run() - s.WaitUntilInitialSync() - return stop -} - -func (c *Client) maybeSyncKeys() error { - tooSoon := func() bool { - return time.Now().UTC().Before(c.lastKeySetSync.Add(keySyncWindow)) - } - - // ignore request to sync keys if a sync operation has been - // attempted too recently - if tooSoon() { - return nil - } - - c.keySetSyncMutex.Lock() - defer c.keySetSyncMutex.Unlock() - - // check again, as another goroutine may have been holding - // the lock while updating the keys - if tooSoon() { - return nil - } - - cfg := c.providerConfig.Get() - r := NewRemotePublicKeyRepo(c.httpClient, cfg.KeysEndpoint.String()) - w := &clientKeyRepo{client: c} - _, err := key.Sync(r, w) - c.lastKeySetSync = time.Now().UTC() - - return err -} - -type clientKeyRepo struct { - client *Client -} - -func (r *clientKeyRepo) Set(ks key.KeySet) error { - pks, ok := ks.(*key.PublicKeySet) - if !ok { - return errors.New("unable to cast to PublicKey") - } - r.client.keySet = *pks - return nil -} - -func (c *Client) ClientCredsToken(scope []string) (jose.JWT, error) { - cfg := c.providerConfig.Get() - - if !cfg.SupportsGrantType(oauth2.GrantTypeClientCreds) { - return jose.JWT{}, fmt.Errorf("%v grant type is not supported", oauth2.GrantTypeClientCreds) - } - - oac, err := c.OAuthClient() - if err != nil { - return jose.JWT{}, err - } - - t, err := oac.ClientCredsToken(scope) - if err != nil { - return jose.JWT{}, err - } - - jwt, err := jose.ParseJWT(t.IDToken) - if err != nil { - return jose.JWT{}, err - } - - return jwt, c.VerifyJWT(jwt) -} - -// ExchangeAuthCode exchanges an OAuth2 auth code for an OIDC JWT ID token. -func (c *Client) ExchangeAuthCode(code string) (jose.JWT, error) { - oac, err := c.OAuthClient() - if err != nil { - return jose.JWT{}, err - } - - t, err := oac.RequestToken(oauth2.GrantTypeAuthCode, code) - if err != nil { - return jose.JWT{}, err - } - - jwt, err := jose.ParseJWT(t.IDToken) - if err != nil { - return jose.JWT{}, err - } - - return jwt, c.VerifyJWT(jwt) -} - -// RefreshToken uses a refresh token to exchange for a new OIDC JWT ID Token. -func (c *Client) RefreshToken(refreshToken string) (jose.JWT, error) { - oac, err := c.OAuthClient() - if err != nil { - return jose.JWT{}, err - } - - t, err := oac.RequestToken(oauth2.GrantTypeRefreshToken, refreshToken) - if err != nil { - return jose.JWT{}, err - } - - jwt, err := jose.ParseJWT(t.IDToken) - if err != nil { - return jose.JWT{}, err - } - - return jwt, c.VerifyJWT(jwt) -} - -func (c *Client) VerifyJWT(jwt jose.JWT) error { - var keysFunc func() []key.PublicKey - if kID, ok := jwt.KeyID(); ok { - keysFunc = c.keysFuncWithID(kID) - } else { - keysFunc = c.keysFuncAll() - } - - v := NewJWTVerifier( - c.providerConfig.Get().Issuer.String(), - c.credentials.ID, - c.maybeSyncKeys, keysFunc) - - return v.Verify(jwt) -} - -// keysFuncWithID returns a function that retrieves at most unexpired -// public key from the Client that matches the provided ID -func (c *Client) keysFuncWithID(kID string) func() []key.PublicKey { - return func() []key.PublicKey { - c.keySetSyncMutex.RLock() - defer c.keySetSyncMutex.RUnlock() - - if c.keySet.ExpiresAt().Before(time.Now()) { - return []key.PublicKey{} - } - - k := c.keySet.Key(kID) - if k == nil { - return []key.PublicKey{} - } - - return []key.PublicKey{*k} - } -} - -// keysFuncAll returns a function that retrieves all unexpired public -// keys from the Client -func (c *Client) keysFuncAll() func() []key.PublicKey { - return func() []key.PublicKey { - c.keySetSyncMutex.RLock() - defer c.keySetSyncMutex.RUnlock() - - if c.keySet.ExpiresAt().Before(time.Now()) { - return []key.PublicKey{} - } - - return c.keySet.Keys() - } -} - -type providerConfigRepo struct { - mu sync.RWMutex - config ProviderConfig // do not access directly, use Get() -} - -func newProviderConfigRepo(pc ProviderConfig) *providerConfigRepo { - return &providerConfigRepo{sync.RWMutex{}, pc} -} - -// returns an error to implement ProviderConfigSetter -func (r *providerConfigRepo) Set(cfg ProviderConfig) error { - r.mu.Lock() - defer r.mu.Unlock() - r.config = cfg - return nil -} - -func (r *providerConfigRepo) Get() ProviderConfig { - r.mu.RLock() - defer r.mu.RUnlock() - return r.config -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/identity.go b/vendor/github.com/coreos/go-oidc/oidc/identity.go deleted file mode 100644 index 9bfa8e3439..0000000000 --- a/vendor/github.com/coreos/go-oidc/oidc/identity.go +++ /dev/null @@ -1,44 +0,0 @@ -package oidc - -import ( - "errors" - "time" - - "github.com/coreos/go-oidc/jose" -) - -type Identity struct { - ID string - Name string - Email string - ExpiresAt time.Time -} - -func IdentityFromClaims(claims jose.Claims) (*Identity, error) { - if claims == nil { - return nil, errors.New("nil claim set") - } - - var ident Identity - var err error - var ok bool - - if ident.ID, ok, err = claims.StringClaim("sub"); err != nil { - return nil, err - } else if !ok { - return nil, errors.New("missing required claim: sub") - } - - if ident.Email, _, err = claims.StringClaim("email"); err != nil { - return nil, err - } - - exp, ok, err := claims.TimeClaim("exp") - if err != nil { - return nil, err - } else if ok { - ident.ExpiresAt = exp - } - - return &ident, nil -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/interface.go b/vendor/github.com/coreos/go-oidc/oidc/interface.go deleted file mode 100644 index 248cac0b4d..0000000000 --- a/vendor/github.com/coreos/go-oidc/oidc/interface.go +++ /dev/null @@ -1,3 +0,0 @@ -package oidc - -type LoginFunc func(ident Identity, sessionKey string) (redirectURL string, err error) diff --git a/vendor/github.com/coreos/go-oidc/oidc/key.go b/vendor/github.com/coreos/go-oidc/oidc/key.go deleted file mode 100755 index 82a0f567d5..0000000000 --- a/vendor/github.com/coreos/go-oidc/oidc/key.go +++ /dev/null @@ -1,67 +0,0 @@ -package oidc - -import ( - "encoding/json" - "errors" - "net/http" - "time" - - phttp "github.com/coreos/go-oidc/http" - "github.com/coreos/go-oidc/jose" - "github.com/coreos/go-oidc/key" -) - -// DefaultPublicKeySetTTL is the default TTL set on the PublicKeySet if no -// Cache-Control header is provided by the JWK Set document endpoint. -const DefaultPublicKeySetTTL = 24 * time.Hour - -// NewRemotePublicKeyRepo is responsible for fetching the JWK Set document. -func NewRemotePublicKeyRepo(hc phttp.Client, ep string) *remotePublicKeyRepo { - return &remotePublicKeyRepo{hc: hc, ep: ep} -} - -type remotePublicKeyRepo struct { - hc phttp.Client - ep string -} - -// Get returns a PublicKeySet fetched from the JWK Set document endpoint. A TTL -// is set on the Key Set to avoid it having to be re-retrieved for every -// encryption event. This TTL is typically controlled by the endpoint returning -// a Cache-Control header, but defaults to 24 hours if no Cache-Control header -// is found. -func (r *remotePublicKeyRepo) Get() (key.KeySet, error) { - req, err := http.NewRequest("GET", r.ep, nil) - if err != nil { - return nil, err - } - - resp, err := r.hc.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var d struct { - Keys []jose.JWK `json:"keys"` - } - if err := json.NewDecoder(resp.Body).Decode(&d); err != nil { - return nil, err - } - - if len(d.Keys) == 0 { - return nil, errors.New("zero keys in response") - } - - ttl, ok, err := phttp.Cacheable(resp.Header) - if err != nil { - return nil, err - } - if !ok { - ttl = DefaultPublicKeySetTTL - } - - exp := time.Now().UTC().Add(ttl) - ks := key.NewPublicKeySet(d.Keys, exp) - return ks, nil -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/provider.go b/vendor/github.com/coreos/go-oidc/oidc/provider.go deleted file mode 100644 index ca2838440b..0000000000 --- a/vendor/github.com/coreos/go-oidc/oidc/provider.go +++ /dev/null @@ -1,690 +0,0 @@ -package oidc - -import ( - "encoding/json" - "errors" - "fmt" - "log" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/coreos/pkg/timeutil" - "github.com/jonboulle/clockwork" - - phttp "github.com/coreos/go-oidc/http" - "github.com/coreos/go-oidc/oauth2" -) - -const ( - // Subject Identifier types defined by the OIDC spec. Specifies if the provider - // should provide the same sub claim value to all clients (public) or a unique - // value for each client (pairwise). - // - // See: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes - SubjectTypePublic = "public" - SubjectTypePairwise = "pairwise" -) - -var ( - // Default values for omitted provider config fields. - // - // Use ProviderConfig's Defaults method to fill a provider config with these values. - DefaultGrantTypesSupported = []string{oauth2.GrantTypeAuthCode, oauth2.GrantTypeImplicit} - DefaultResponseModesSupported = []string{"query", "fragment"} - DefaultTokenEndpointAuthMethodsSupported = []string{oauth2.AuthMethodClientSecretBasic} - DefaultClaimTypesSupported = []string{"normal"} -) - -const ( - MaximumProviderConfigSyncInterval = 24 * time.Hour - MinimumProviderConfigSyncInterval = time.Minute - - discoveryConfigPath = "/.well-known/openid-configuration" -) - -// internally configurable for tests -var minimumProviderConfigSyncInterval = MinimumProviderConfigSyncInterval - -var ( - // Ensure ProviderConfig satisfies these interfaces. - _ json.Marshaler = &ProviderConfig{} - _ json.Unmarshaler = &ProviderConfig{} -) - -// ProviderConfig represents the OpenID Provider Metadata specifying what -// configurations a provider supports. -// -// See: http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata -type ProviderConfig struct { - Issuer *url.URL // Required - AuthEndpoint *url.URL // Required - TokenEndpoint *url.URL // Required if grant types other than "implicit" are supported - UserInfoEndpoint *url.URL - KeysEndpoint *url.URL // Required - RegistrationEndpoint *url.URL - EndSessionEndpoint *url.URL - CheckSessionIFrame *url.URL - - // Servers MAY choose not to advertise some supported scope values even when this - // parameter is used, although those defined in OpenID Core SHOULD be listed, if supported. - ScopesSupported []string - // OAuth2.0 response types supported. - ResponseTypesSupported []string // Required - // OAuth2.0 response modes supported. - // - // If omitted, defaults to DefaultResponseModesSupported. - ResponseModesSupported []string - // OAuth2.0 grant types supported. - // - // If omitted, defaults to DefaultGrantTypesSupported. - GrantTypesSupported []string - ACRValuesSupported []string - // SubjectTypesSupported specifies strategies for providing values for the sub claim. - SubjectTypesSupported []string // Required - - // JWA signing and encryption algorith values supported for ID tokens. - IDTokenSigningAlgValues []string // Required - IDTokenEncryptionAlgValues []string - IDTokenEncryptionEncValues []string - - // JWA signing and encryption algorith values supported for user info responses. - UserInfoSigningAlgValues []string - UserInfoEncryptionAlgValues []string - UserInfoEncryptionEncValues []string - - // JWA signing and encryption algorith values supported for request objects. - ReqObjSigningAlgValues []string - ReqObjEncryptionAlgValues []string - ReqObjEncryptionEncValues []string - - TokenEndpointAuthMethodsSupported []string - TokenEndpointAuthSigningAlgValuesSupported []string - DisplayValuesSupported []string - ClaimTypesSupported []string - ClaimsSupported []string - ServiceDocs *url.URL - ClaimsLocalsSupported []string - UILocalsSupported []string - ClaimsParameterSupported bool - RequestParameterSupported bool - RequestURIParamaterSupported bool - RequireRequestURIRegistration bool - - Policy *url.URL - TermsOfService *url.URL - - // Not part of the OpenID Provider Metadata - ExpiresAt time.Time -} - -// Defaults returns a shallow copy of ProviderConfig with default -// values replacing omitted fields. -// -// var cfg oidc.ProviderConfig -// // Fill provider config with default values for omitted fields. -// cfg = cfg.Defaults() -// -func (p ProviderConfig) Defaults() ProviderConfig { - setDefault := func(val *[]string, defaultVal []string) { - if len(*val) == 0 { - *val = defaultVal - } - } - setDefault(&p.GrantTypesSupported, DefaultGrantTypesSupported) - setDefault(&p.ResponseModesSupported, DefaultResponseModesSupported) - setDefault(&p.TokenEndpointAuthMethodsSupported, DefaultTokenEndpointAuthMethodsSupported) - setDefault(&p.ClaimTypesSupported, DefaultClaimTypesSupported) - return p -} - -func (p *ProviderConfig) MarshalJSON() ([]byte, error) { - e := p.toEncodableStruct() - return json.Marshal(&e) -} - -func (p *ProviderConfig) UnmarshalJSON(data []byte) error { - var e encodableProviderConfig - if err := json.Unmarshal(data, &e); err != nil { - return err - } - conf, err := e.toStruct() - if err != nil { - return err - } - if err := conf.Valid(); err != nil { - return err - } - *p = conf - return nil -} - -type encodableProviderConfig struct { - Issuer string `json:"issuer"` - AuthEndpoint string `json:"authorization_endpoint"` - TokenEndpoint string `json:"token_endpoint"` - UserInfoEndpoint string `json:"userinfo_endpoint,omitempty"` - KeysEndpoint string `json:"jwks_uri"` - RegistrationEndpoint string `json:"registration_endpoint,omitempty"` - EndSessionEndpoint string `json:"end_session_endpoint,omitempty"` - CheckSessionIFrame string `json:"check_session_iframe,omitempty"` - - // Use 'omitempty' for all slices as per OIDC spec: - // "Claims that return multiple values are represented as JSON arrays. - // Claims with zero elements MUST be omitted from the response." - // http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse - - ScopesSupported []string `json:"scopes_supported,omitempty"` - ResponseTypesSupported []string `json:"response_types_supported,omitempty"` - ResponseModesSupported []string `json:"response_modes_supported,omitempty"` - GrantTypesSupported []string `json:"grant_types_supported,omitempty"` - ACRValuesSupported []string `json:"acr_values_supported,omitempty"` - SubjectTypesSupported []string `json:"subject_types_supported,omitempty"` - - IDTokenSigningAlgValues []string `json:"id_token_signing_alg_values_supported,omitempty"` - IDTokenEncryptionAlgValues []string `json:"id_token_encryption_alg_values_supported,omitempty"` - IDTokenEncryptionEncValues []string `json:"id_token_encryption_enc_values_supported,omitempty"` - UserInfoSigningAlgValues []string `json:"userinfo_signing_alg_values_supported,omitempty"` - UserInfoEncryptionAlgValues []string `json:"userinfo_encryption_alg_values_supported,omitempty"` - UserInfoEncryptionEncValues []string `json:"userinfo_encryption_enc_values_supported,omitempty"` - ReqObjSigningAlgValues []string `json:"request_object_signing_alg_values_supported,omitempty"` - ReqObjEncryptionAlgValues []string `json:"request_object_encryption_alg_values_supported,omitempty"` - ReqObjEncryptionEncValues []string `json:"request_object_encryption_enc_values_supported,omitempty"` - - TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported,omitempty"` - TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported,omitempty"` - - DisplayValuesSupported []string `json:"display_values_supported,omitempty"` - ClaimTypesSupported []string `json:"claim_types_supported,omitempty"` - ClaimsSupported []string `json:"claims_supported,omitempty"` - ServiceDocs string `json:"service_documentation,omitempty"` - ClaimsLocalsSupported []string `json:"claims_locales_supported,omitempty"` - UILocalsSupported []string `json:"ui_locales_supported,omitempty"` - ClaimsParameterSupported bool `json:"claims_parameter_supported,omitempty"` - RequestParameterSupported bool `json:"request_parameter_supported,omitempty"` - RequestURIParamaterSupported bool `json:"request_uri_parameter_supported,omitempty"` - RequireRequestURIRegistration bool `json:"require_request_uri_registration,omitempty"` - - Policy string `json:"op_policy_uri,omitempty"` - TermsOfService string `json:"op_tos_uri,omitempty"` -} - -func (cfg ProviderConfig) toEncodableStruct() encodableProviderConfig { - return encodableProviderConfig{ - Issuer: uriToString(cfg.Issuer), - AuthEndpoint: uriToString(cfg.AuthEndpoint), - TokenEndpoint: uriToString(cfg.TokenEndpoint), - UserInfoEndpoint: uriToString(cfg.UserInfoEndpoint), - KeysEndpoint: uriToString(cfg.KeysEndpoint), - RegistrationEndpoint: uriToString(cfg.RegistrationEndpoint), - EndSessionEndpoint: uriToString(cfg.EndSessionEndpoint), - CheckSessionIFrame: uriToString(cfg.CheckSessionIFrame), - ScopesSupported: cfg.ScopesSupported, - ResponseTypesSupported: cfg.ResponseTypesSupported, - ResponseModesSupported: cfg.ResponseModesSupported, - GrantTypesSupported: cfg.GrantTypesSupported, - ACRValuesSupported: cfg.ACRValuesSupported, - SubjectTypesSupported: cfg.SubjectTypesSupported, - IDTokenSigningAlgValues: cfg.IDTokenSigningAlgValues, - IDTokenEncryptionAlgValues: cfg.IDTokenEncryptionAlgValues, - IDTokenEncryptionEncValues: cfg.IDTokenEncryptionEncValues, - UserInfoSigningAlgValues: cfg.UserInfoSigningAlgValues, - UserInfoEncryptionAlgValues: cfg.UserInfoEncryptionAlgValues, - UserInfoEncryptionEncValues: cfg.UserInfoEncryptionEncValues, - ReqObjSigningAlgValues: cfg.ReqObjSigningAlgValues, - ReqObjEncryptionAlgValues: cfg.ReqObjEncryptionAlgValues, - ReqObjEncryptionEncValues: cfg.ReqObjEncryptionEncValues, - TokenEndpointAuthMethodsSupported: cfg.TokenEndpointAuthMethodsSupported, - TokenEndpointAuthSigningAlgValuesSupported: cfg.TokenEndpointAuthSigningAlgValuesSupported, - DisplayValuesSupported: cfg.DisplayValuesSupported, - ClaimTypesSupported: cfg.ClaimTypesSupported, - ClaimsSupported: cfg.ClaimsSupported, - ServiceDocs: uriToString(cfg.ServiceDocs), - ClaimsLocalsSupported: cfg.ClaimsLocalsSupported, - UILocalsSupported: cfg.UILocalsSupported, - ClaimsParameterSupported: cfg.ClaimsParameterSupported, - RequestParameterSupported: cfg.RequestParameterSupported, - RequestURIParamaterSupported: cfg.RequestURIParamaterSupported, - RequireRequestURIRegistration: cfg.RequireRequestURIRegistration, - Policy: uriToString(cfg.Policy), - TermsOfService: uriToString(cfg.TermsOfService), - } -} - -func (e encodableProviderConfig) toStruct() (ProviderConfig, error) { - p := stickyErrParser{} - conf := ProviderConfig{ - Issuer: p.parseURI(e.Issuer, "issuer"), - AuthEndpoint: p.parseURI(e.AuthEndpoint, "authorization_endpoint"), - TokenEndpoint: p.parseURI(e.TokenEndpoint, "token_endpoint"), - UserInfoEndpoint: p.parseURI(e.UserInfoEndpoint, "userinfo_endpoint"), - KeysEndpoint: p.parseURI(e.KeysEndpoint, "jwks_uri"), - RegistrationEndpoint: p.parseURI(e.RegistrationEndpoint, "registration_endpoint"), - EndSessionEndpoint: p.parseURI(e.EndSessionEndpoint, "end_session_endpoint"), - CheckSessionIFrame: p.parseURI(e.CheckSessionIFrame, "check_session_iframe"), - ScopesSupported: e.ScopesSupported, - ResponseTypesSupported: e.ResponseTypesSupported, - ResponseModesSupported: e.ResponseModesSupported, - GrantTypesSupported: e.GrantTypesSupported, - ACRValuesSupported: e.ACRValuesSupported, - SubjectTypesSupported: e.SubjectTypesSupported, - IDTokenSigningAlgValues: e.IDTokenSigningAlgValues, - IDTokenEncryptionAlgValues: e.IDTokenEncryptionAlgValues, - IDTokenEncryptionEncValues: e.IDTokenEncryptionEncValues, - UserInfoSigningAlgValues: e.UserInfoSigningAlgValues, - UserInfoEncryptionAlgValues: e.UserInfoEncryptionAlgValues, - UserInfoEncryptionEncValues: e.UserInfoEncryptionEncValues, - ReqObjSigningAlgValues: e.ReqObjSigningAlgValues, - ReqObjEncryptionAlgValues: e.ReqObjEncryptionAlgValues, - ReqObjEncryptionEncValues: e.ReqObjEncryptionEncValues, - TokenEndpointAuthMethodsSupported: e.TokenEndpointAuthMethodsSupported, - TokenEndpointAuthSigningAlgValuesSupported: e.TokenEndpointAuthSigningAlgValuesSupported, - DisplayValuesSupported: e.DisplayValuesSupported, - ClaimTypesSupported: e.ClaimTypesSupported, - ClaimsSupported: e.ClaimsSupported, - ServiceDocs: p.parseURI(e.ServiceDocs, "service_documentation"), - ClaimsLocalsSupported: e.ClaimsLocalsSupported, - UILocalsSupported: e.UILocalsSupported, - ClaimsParameterSupported: e.ClaimsParameterSupported, - RequestParameterSupported: e.RequestParameterSupported, - RequestURIParamaterSupported: e.RequestURIParamaterSupported, - RequireRequestURIRegistration: e.RequireRequestURIRegistration, - Policy: p.parseURI(e.Policy, "op_policy-uri"), - TermsOfService: p.parseURI(e.TermsOfService, "op_tos_uri"), - } - if p.firstErr != nil { - return ProviderConfig{}, p.firstErr - } - return conf, nil -} - -// Empty returns if a ProviderConfig holds no information. -// -// This case generally indicates a ProviderConfigGetter has experienced an error -// and has nothing to report. -func (p ProviderConfig) Empty() bool { - return p.Issuer == nil -} - -func contains(sli []string, ele string) bool { - for _, s := range sli { - if s == ele { - return true - } - } - return false -} - -// Valid determines if a ProviderConfig conforms with the OIDC specification. -// If Valid returns successfully it guarantees required field are non-nil and -// URLs are well formed. -// -// Valid is called by UnmarshalJSON. -// -// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for -// URLs fields where the OIDC spec requires it. This may change in future releases -// of this package. See: https://github.com/coreos/go-oidc/issues/34 -func (p ProviderConfig) Valid() error { - grantTypes := p.GrantTypesSupported - if len(grantTypes) == 0 { - grantTypes = DefaultGrantTypesSupported - } - implicitOnly := true - for _, grantType := range grantTypes { - if grantType != oauth2.GrantTypeImplicit { - implicitOnly = false - break - } - } - - if len(p.SubjectTypesSupported) == 0 { - return errors.New("missing required field subject_types_supported") - } - if len(p.IDTokenSigningAlgValues) == 0 { - return errors.New("missing required field id_token_signing_alg_values_supported") - } - - if len(p.ScopesSupported) != 0 && !contains(p.ScopesSupported, "openid") { - return errors.New("scoped_supported must be unspecified or include 'openid'") - } - - if !contains(p.IDTokenSigningAlgValues, "RS256") { - return errors.New("id_token_signing_alg_values_supported must include 'RS256'") - } - if contains(p.TokenEndpointAuthMethodsSupported, "none") { - return errors.New("token_endpoint_auth_signing_alg_values_supported cannot include 'none'") - } - - uris := []struct { - val *url.URL - name string - required bool - }{ - {p.Issuer, "issuer", true}, - {p.AuthEndpoint, "authorization_endpoint", true}, - {p.TokenEndpoint, "token_endpoint", !implicitOnly}, - {p.UserInfoEndpoint, "userinfo_endpoint", false}, - {p.KeysEndpoint, "jwks_uri", true}, - {p.RegistrationEndpoint, "registration_endpoint", false}, - {p.EndSessionEndpoint, "end_session_endpoint", false}, - {p.CheckSessionIFrame, "check_session_iframe", false}, - {p.ServiceDocs, "service_documentation", false}, - {p.Policy, "op_policy_uri", false}, - {p.TermsOfService, "op_tos_uri", false}, - } - - for _, uri := range uris { - if uri.val == nil { - if !uri.required { - continue - } - return fmt.Errorf("empty value for required uri field %s", uri.name) - } - if uri.val.Host == "" { - return fmt.Errorf("no host for uri field %s", uri.name) - } - if uri.val.Scheme != "http" && uri.val.Scheme != "https" { - return fmt.Errorf("uri field %s schemeis not http or https", uri.name) - } - } - return nil -} - -// Supports determines if provider supports a client given their respective metadata. -func (p ProviderConfig) Supports(c ClientMetadata) error { - if err := p.Valid(); err != nil { - return fmt.Errorf("invalid provider config: %v", err) - } - if err := c.Valid(); err != nil { - return fmt.Errorf("invalid client config: %v", err) - } - - // Fill default values for omitted fields - c = c.Defaults() - p = p.Defaults() - - // Do the supported values list the requested one? - supports := []struct { - supported []string - requested string - name string - }{ - {p.IDTokenSigningAlgValues, c.IDTokenResponseOptions.SigningAlg, "id_token_signed_response_alg"}, - {p.IDTokenEncryptionAlgValues, c.IDTokenResponseOptions.EncryptionAlg, "id_token_encryption_response_alg"}, - {p.IDTokenEncryptionEncValues, c.IDTokenResponseOptions.EncryptionEnc, "id_token_encryption_response_enc"}, - {p.UserInfoSigningAlgValues, c.UserInfoResponseOptions.SigningAlg, "userinfo_signed_response_alg"}, - {p.UserInfoEncryptionAlgValues, c.UserInfoResponseOptions.EncryptionAlg, "userinfo_encryption_response_alg"}, - {p.UserInfoEncryptionEncValues, c.UserInfoResponseOptions.EncryptionEnc, "userinfo_encryption_response_enc"}, - {p.ReqObjSigningAlgValues, c.RequestObjectOptions.SigningAlg, "request_object_signing_alg"}, - {p.ReqObjEncryptionAlgValues, c.RequestObjectOptions.EncryptionAlg, "request_object_encryption_alg"}, - {p.ReqObjEncryptionEncValues, c.RequestObjectOptions.EncryptionEnc, "request_object_encryption_enc"}, - } - for _, field := range supports { - if field.requested == "" { - continue - } - if !contains(field.supported, field.requested) { - return fmt.Errorf("provider does not support requested value for field %s", field.name) - } - } - - stringsEqual := func(s1, s2 string) bool { return s1 == s2 } - - // For lists, are the list of requested values a subset of the supported ones? - supportsAll := []struct { - supported []string - requested []string - name string - // OAuth2.0 response_type can be space separated lists where order doesn't matter. - // For example "id_token token" is the same as "token id_token" - // Support a custom compare method. - comp func(s1, s2 string) bool - }{ - {p.GrantTypesSupported, c.GrantTypes, "grant_types", stringsEqual}, - {p.ResponseTypesSupported, c.ResponseTypes, "response_type", oauth2.ResponseTypesEqual}, - } - for _, field := range supportsAll { - requestLoop: - for _, req := range field.requested { - for _, sup := range field.supported { - if field.comp(req, sup) { - continue requestLoop - } - } - return fmt.Errorf("provider does not support requested value for field %s", field.name) - } - } - - // TODO(ericchiang): Are there more checks we feel comfortable with begin strict about? - - return nil -} - -func (p ProviderConfig) SupportsGrantType(grantType string) bool { - var supported []string - if len(p.GrantTypesSupported) == 0 { - supported = DefaultGrantTypesSupported - } else { - supported = p.GrantTypesSupported - } - - for _, t := range supported { - if t == grantType { - return true - } - } - return false -} - -type ProviderConfigGetter interface { - Get() (ProviderConfig, error) -} - -type ProviderConfigSetter interface { - Set(ProviderConfig) error -} - -type ProviderConfigSyncer struct { - from ProviderConfigGetter - to ProviderConfigSetter - clock clockwork.Clock - - initialSyncDone bool - initialSyncWait sync.WaitGroup -} - -func NewProviderConfigSyncer(from ProviderConfigGetter, to ProviderConfigSetter) *ProviderConfigSyncer { - return &ProviderConfigSyncer{ - from: from, - to: to, - clock: clockwork.NewRealClock(), - } -} - -func (s *ProviderConfigSyncer) Run() chan struct{} { - stop := make(chan struct{}) - - var next pcsStepper - next = &pcsStepNext{aft: time.Duration(0)} - - s.initialSyncWait.Add(1) - go func() { - for { - select { - case <-s.clock.After(next.after()): - next = next.step(s.sync) - case <-stop: - return - } - } - }() - - return stop -} - -func (s *ProviderConfigSyncer) WaitUntilInitialSync() { - s.initialSyncWait.Wait() -} - -func (s *ProviderConfigSyncer) sync() (time.Duration, error) { - cfg, err := s.from.Get() - if err != nil { - return 0, err - } - - if err = s.to.Set(cfg); err != nil { - return 0, fmt.Errorf("error setting provider config: %v", err) - } - - if !s.initialSyncDone { - s.initialSyncWait.Done() - s.initialSyncDone = true - } - - return nextSyncAfter(cfg.ExpiresAt, s.clock), nil -} - -type pcsStepFunc func() (time.Duration, error) - -type pcsStepper interface { - after() time.Duration - step(pcsStepFunc) pcsStepper -} - -type pcsStepNext struct { - aft time.Duration -} - -func (n *pcsStepNext) after() time.Duration { - return n.aft -} - -func (n *pcsStepNext) step(fn pcsStepFunc) (next pcsStepper) { - ttl, err := fn() - if err == nil { - next = &pcsStepNext{aft: ttl} - } else { - next = &pcsStepRetry{aft: time.Second} - log.Printf("go-oidc: provider config sync falied, retyring in %v: %v", next.after(), err) - } - return -} - -type pcsStepRetry struct { - aft time.Duration -} - -func (r *pcsStepRetry) after() time.Duration { - return r.aft -} - -func (r *pcsStepRetry) step(fn pcsStepFunc) (next pcsStepper) { - ttl, err := fn() - if err == nil { - next = &pcsStepNext{aft: ttl} - } else { - next = &pcsStepRetry{aft: timeutil.ExpBackoff(r.aft, time.Minute)} - log.Printf("go-oidc: provider config sync falied, retyring in %v: %v", next.after(), err) - } - return -} - -func nextSyncAfter(exp time.Time, clock clockwork.Clock) time.Duration { - if exp.IsZero() { - return MaximumProviderConfigSyncInterval - } - - t := exp.Sub(clock.Now()) / 2 - if t > MaximumProviderConfigSyncInterval { - t = MaximumProviderConfigSyncInterval - } else if t < minimumProviderConfigSyncInterval { - t = minimumProviderConfigSyncInterval - } - - return t -} - -type httpProviderConfigGetter struct { - hc phttp.Client - issuerURL string - clock clockwork.Clock -} - -func NewHTTPProviderConfigGetter(hc phttp.Client, issuerURL string) *httpProviderConfigGetter { - return &httpProviderConfigGetter{ - hc: hc, - issuerURL: issuerURL, - clock: clockwork.NewRealClock(), - } -} - -func (r *httpProviderConfigGetter) Get() (cfg ProviderConfig, err error) { - // If the Issuer value contains a path component, any terminating / MUST be removed before - // appending /.well-known/openid-configuration. - // https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationRequest - discoveryURL := strings.TrimSuffix(r.issuerURL, "/") + discoveryConfigPath - req, err := http.NewRequest("GET", discoveryURL, nil) - if err != nil { - return - } - - resp, err := r.hc.Do(req) - if err != nil { - return - } - defer resp.Body.Close() - - if err = json.NewDecoder(resp.Body).Decode(&cfg); err != nil { - return - } - - var ttl time.Duration - var ok bool - ttl, ok, err = phttp.Cacheable(resp.Header) - if err != nil { - return - } else if ok { - cfg.ExpiresAt = r.clock.Now().UTC().Add(ttl) - } - - // The issuer value returned MUST be identical to the Issuer URL that was directly used to retrieve the configuration information. - // http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationValidation - if !urlEqual(cfg.Issuer.String(), r.issuerURL) { - err = fmt.Errorf(`"issuer" in config (%v) does not match provided issuer URL (%v)`, cfg.Issuer, r.issuerURL) - return - } - - return -} - -func FetchProviderConfig(hc phttp.Client, issuerURL string) (ProviderConfig, error) { - if hc == nil { - hc = http.DefaultClient - } - - g := NewHTTPProviderConfigGetter(hc, issuerURL) - return g.Get() -} - -func WaitForProviderConfig(hc phttp.Client, issuerURL string) (pcfg ProviderConfig) { - return waitForProviderConfig(hc, issuerURL, clockwork.NewRealClock()) -} - -func waitForProviderConfig(hc phttp.Client, issuerURL string, clock clockwork.Clock) (pcfg ProviderConfig) { - var sleep time.Duration - var err error - for { - pcfg, err = FetchProviderConfig(hc, issuerURL) - if err == nil { - break - } - - sleep = timeutil.ExpBackoff(sleep, time.Minute) - fmt.Printf("Failed fetching provider config, trying again in %v: %v\n", sleep, err) - time.Sleep(sleep) - } - - return -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/transport.go b/vendor/github.com/coreos/go-oidc/oidc/transport.go deleted file mode 100644 index 61c926d7fe..0000000000 --- a/vendor/github.com/coreos/go-oidc/oidc/transport.go +++ /dev/null @@ -1,88 +0,0 @@ -package oidc - -import ( - "fmt" - "net/http" - "sync" - - phttp "github.com/coreos/go-oidc/http" - "github.com/coreos/go-oidc/jose" -) - -type TokenRefresher interface { - // Verify checks if the provided token is currently valid or not. - Verify(jose.JWT) error - - // Refresh attempts to authenticate and retrieve a new token. - Refresh() (jose.JWT, error) -} - -type ClientCredsTokenRefresher struct { - Issuer string - OIDCClient *Client -} - -func (c *ClientCredsTokenRefresher) Verify(jwt jose.JWT) (err error) { - _, err = VerifyClientClaims(jwt, c.Issuer) - return -} - -func (c *ClientCredsTokenRefresher) Refresh() (jwt jose.JWT, err error) { - if err = c.OIDCClient.Healthy(); err != nil { - err = fmt.Errorf("unable to authenticate, unhealthy OIDC client: %v", err) - return - } - - jwt, err = c.OIDCClient.ClientCredsToken([]string{"openid"}) - if err != nil { - err = fmt.Errorf("unable to verify auth code with issuer: %v", err) - return - } - - return -} - -type AuthenticatedTransport struct { - TokenRefresher - http.RoundTripper - - mu sync.Mutex - jwt jose.JWT -} - -func (t *AuthenticatedTransport) verifiedJWT() (jose.JWT, error) { - t.mu.Lock() - defer t.mu.Unlock() - - if t.TokenRefresher.Verify(t.jwt) == nil { - return t.jwt, nil - } - - jwt, err := t.TokenRefresher.Refresh() - if err != nil { - return jose.JWT{}, fmt.Errorf("unable to acquire valid JWT: %v", err) - } - - t.jwt = jwt - return t.jwt, nil -} - -// SetJWT sets the JWT held by the Transport. -// This is useful for cases in which you want to set an initial JWT. -func (t *AuthenticatedTransport) SetJWT(jwt jose.JWT) { - t.mu.Lock() - defer t.mu.Unlock() - - t.jwt = jwt -} - -func (t *AuthenticatedTransport) RoundTrip(r *http.Request) (*http.Response, error) { - jwt, err := t.verifiedJWT() - if err != nil { - return nil, err - } - - req := phttp.CopyRequest(r) - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", jwt.Encode())) - return t.RoundTripper.RoundTrip(req) -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/util.go b/vendor/github.com/coreos/go-oidc/oidc/util.go deleted file mode 100644 index f2a5a195e4..0000000000 --- a/vendor/github.com/coreos/go-oidc/oidc/util.go +++ /dev/null @@ -1,109 +0,0 @@ -package oidc - -import ( - "crypto/rand" - "encoding/base64" - "errors" - "fmt" - "net" - "net/http" - "net/url" - "strings" - "time" - - "github.com/coreos/go-oidc/jose" -) - -// RequestTokenExtractor funcs extract a raw encoded token from a request. -type RequestTokenExtractor func(r *http.Request) (string, error) - -// ExtractBearerToken is a RequestTokenExtractor which extracts a bearer token from a request's -// Authorization header. -func ExtractBearerToken(r *http.Request) (string, error) { - ah := r.Header.Get("Authorization") - if ah == "" { - return "", errors.New("missing Authorization header") - } - - if len(ah) <= 6 || strings.ToUpper(ah[0:6]) != "BEARER" { - return "", errors.New("should be a bearer token") - } - - val := ah[7:] - if len(val) == 0 { - return "", errors.New("bearer token is empty") - } - - return val, nil -} - -// CookieTokenExtractor returns a RequestTokenExtractor which extracts a token from the named cookie in a request. -func CookieTokenExtractor(cookieName string) RequestTokenExtractor { - return func(r *http.Request) (string, error) { - ck, err := r.Cookie(cookieName) - if err != nil { - return "", fmt.Errorf("token cookie not found in request: %v", err) - } - - if ck.Value == "" { - return "", errors.New("token cookie found but is empty") - } - - return ck.Value, nil - } -} - -func NewClaims(iss, sub string, aud interface{}, iat, exp time.Time) jose.Claims { - return jose.Claims{ - // required - "iss": iss, - "sub": sub, - "aud": aud, - "iat": iat.Unix(), - "exp": exp.Unix(), - } -} - -func GenClientID(hostport string) (string, error) { - b, err := randBytes(32) - if err != nil { - return "", err - } - - var host string - if strings.Contains(hostport, ":") { - host, _, err = net.SplitHostPort(hostport) - if err != nil { - return "", err - } - } else { - host = hostport - } - - return fmt.Sprintf("%s@%s", base64.URLEncoding.EncodeToString(b), host), nil -} - -func randBytes(n int) ([]byte, error) { - b := make([]byte, n) - got, err := rand.Read(b) - if err != nil { - return nil, err - } else if n != got { - return nil, errors.New("unable to generate enough random data") - } - return b, nil -} - -// urlEqual checks two urls for equality using only the host and path portions. -func urlEqual(url1, url2 string) bool { - u1, err := url.Parse(url1) - if err != nil { - return false - } - u2, err := url.Parse(url2) - if err != nil { - return false - } - - return strings.ToLower(u1.Host+u1.Path) == strings.ToLower(u2.Host+u2.Path) -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/verification.go b/vendor/github.com/coreos/go-oidc/oidc/verification.go deleted file mode 100644 index d9c6afa697..0000000000 --- a/vendor/github.com/coreos/go-oidc/oidc/verification.go +++ /dev/null @@ -1,190 +0,0 @@ -package oidc - -import ( - "errors" - "fmt" - "time" - - "github.com/jonboulle/clockwork" - - "github.com/coreos/go-oidc/jose" - "github.com/coreos/go-oidc/key" -) - -func VerifySignature(jwt jose.JWT, keys []key.PublicKey) (bool, error) { - jwtBytes := []byte(jwt.Data()) - for _, k := range keys { - v, err := k.Verifier() - if err != nil { - return false, err - } - if v.Verify(jwt.Signature, jwtBytes) == nil { - return true, nil - } - } - return false, nil -} - -// containsString returns true if the given string(needle) is found -// in the string array(haystack). -func containsString(needle string, haystack []string) bool { - for _, v := range haystack { - if v == needle { - return true - } - } - return false -} - -// Verify claims in accordance with OIDC spec -// http://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation -func VerifyClaims(jwt jose.JWT, issuer, clientID string) error { - now := time.Now().UTC() - - claims, err := jwt.Claims() - if err != nil { - return err - } - - ident, err := IdentityFromClaims(claims) - if err != nil { - return err - } - - if ident.ExpiresAt.Before(now) { - return errors.New("token is expired") - } - - // iss REQUIRED. Issuer Identifier for the Issuer of the response. - // The iss value is a case sensitive URL using the https scheme that contains scheme, - // host, and optionally, port number and path components and no query or fragment components. - if iss, exists := claims["iss"].(string); exists { - if !urlEqual(iss, issuer) { - return fmt.Errorf("invalid claim value: 'iss'. expected=%s, found=%s.", issuer, iss) - } - } else { - return errors.New("missing claim: 'iss'") - } - - // iat REQUIRED. Time at which the JWT was issued. - // Its value is a JSON number representing the number of seconds from 1970-01-01T0:0:0Z - // as measured in UTC until the date/time. - if _, exists := claims["iat"].(float64); !exists { - return errors.New("missing claim: 'iat'") - } - - // aud REQUIRED. Audience(s) that this ID Token is intended for. - // It MUST contain the OAuth 2.0 client_id of the Relying Party as an audience value. - // It MAY also contain identifiers for other audiences. In the general case, the aud - // value is an array of case sensitive strings. In the common special case when there - // is one audience, the aud value MAY be a single case sensitive string. - if aud, ok, err := claims.StringClaim("aud"); err == nil && ok { - if aud != clientID { - return fmt.Errorf("invalid claims, 'aud' claim and 'client_id' do not match, aud=%s, client_id=%s", aud, clientID) - } - } else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok { - if !containsString(clientID, aud) { - return fmt.Errorf("invalid claims, cannot find 'client_id' in 'aud' claim, aud=%v, client_id=%s", aud, clientID) - } - } else { - return errors.New("invalid claim value: 'aud' is required, and should be either string or string array") - } - - return nil -} - -// VerifyClientClaims verifies all the required claims are valid for a "client credentials" JWT. -// Returns the client ID if valid, or an error if invalid. -func VerifyClientClaims(jwt jose.JWT, issuer string) (string, error) { - claims, err := jwt.Claims() - if err != nil { - return "", fmt.Errorf("failed to parse JWT claims: %v", err) - } - - iss, ok, err := claims.StringClaim("iss") - if err != nil { - return "", fmt.Errorf("failed to parse 'iss' claim: %v", err) - } else if !ok { - return "", errors.New("missing required 'iss' claim") - } else if !urlEqual(iss, issuer) { - return "", fmt.Errorf("'iss' claim does not match expected issuer, iss=%s", iss) - } - - sub, ok, err := claims.StringClaim("sub") - if err != nil { - return "", fmt.Errorf("failed to parse 'sub' claim: %v", err) - } else if !ok { - return "", errors.New("missing required 'sub' claim") - } - - if aud, ok, err := claims.StringClaim("aud"); err == nil && ok { - if aud != sub { - return "", fmt.Errorf("invalid claims, 'aud' claim and 'sub' claim do not match, aud=%s, sub=%s", aud, sub) - } - } else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok { - if !containsString(sub, aud) { - return "", fmt.Errorf("invalid claims, cannot find 'sud' in 'aud' claim, aud=%v, sub=%s", aud, sub) - } - } else { - return "", errors.New("invalid claim value: 'aud' is required, and should be either string or string array") - } - - now := time.Now().UTC() - exp, ok, err := claims.TimeClaim("exp") - if err != nil { - return "", fmt.Errorf("failed to parse 'exp' claim: %v", err) - } else if !ok { - return "", errors.New("missing required 'exp' claim") - } else if exp.Before(now) { - return "", fmt.Errorf("token already expired at: %v", exp) - } - - return sub, nil -} - -type JWTVerifier struct { - issuer string - clientID string - syncFunc func() error - keysFunc func() []key.PublicKey - clock clockwork.Clock -} - -func NewJWTVerifier(issuer, clientID string, syncFunc func() error, keysFunc func() []key.PublicKey) JWTVerifier { - return JWTVerifier{ - issuer: issuer, - clientID: clientID, - syncFunc: syncFunc, - keysFunc: keysFunc, - clock: clockwork.NewRealClock(), - } -} - -func (v *JWTVerifier) Verify(jwt jose.JWT) error { - // Verify claims before verifying the signature. This is an optimization to throw out - // tokens we know are invalid without undergoing an expensive signature check and - // possibly a re-sync event. - if err := VerifyClaims(jwt, v.issuer, v.clientID); err != nil { - return fmt.Errorf("oidc: JWT claims invalid: %v", err) - } - - ok, err := VerifySignature(jwt, v.keysFunc()) - if err != nil { - return fmt.Errorf("oidc: JWT signature verification failed: %v", err) - } else if ok { - return nil - } - - if err = v.syncFunc(); err != nil { - return fmt.Errorf("oidc: failed syncing KeySet: %v", err) - } - - ok, err = VerifySignature(jwt, v.keysFunc()) - if err != nil { - return fmt.Errorf("oidc: JWT signature verification failed: %v", err) - } else if !ok { - return errors.New("oidc: unable to verify JWT signature: no matching keys") - } - - return nil -} diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE deleted file mode 100644 index e06d208186..0000000000 --- a/vendor/github.com/coreos/pkg/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE deleted file mode 100644 index b39ddfa5cb..0000000000 --- a/vendor/github.com/coreos/pkg/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2014 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/pkg/health/README.md b/vendor/github.com/coreos/pkg/health/README.md deleted file mode 100644 index 5ec34c21e0..0000000000 --- a/vendor/github.com/coreos/pkg/health/README.md +++ /dev/null @@ -1,11 +0,0 @@ -health -==== - -A simple framework for implementing an HTTP health check endpoint on servers. - -Users implement their `health.Checkable` types, and create a `health.Checker`, from which they can get an `http.HandlerFunc` using `health.Checker.MakeHealthHandlerFunc`. - -### Documentation - -For more details, visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/health) - diff --git a/vendor/github.com/coreos/pkg/health/health.go b/vendor/github.com/coreos/pkg/health/health.go deleted file mode 100644 index a1c3610fa5..0000000000 --- a/vendor/github.com/coreos/pkg/health/health.go +++ /dev/null @@ -1,127 +0,0 @@ -package health - -import ( - "expvar" - "fmt" - "log" - "net/http" - - "github.com/coreos/pkg/httputil" -) - -// Checkables should return nil when the thing they are checking is healthy, and an error otherwise. -type Checkable interface { - Healthy() error -} - -// Checker provides a way to make an endpoint which can be probed for system health. -type Checker struct { - // Checks are the Checkables to be checked when probing. - Checks []Checkable - - // Unhealthyhandler is called when one or more of the checks are unhealthy. - // If not provided DefaultUnhealthyHandler is called. - UnhealthyHandler UnhealthyHandler - - // HealthyHandler is called when all checks are healthy. - // If not provided, DefaultHealthyHandler is called. - HealthyHandler http.HandlerFunc -} - -func (c Checker) ServeHTTP(w http.ResponseWriter, r *http.Request) { - unhealthyHandler := c.UnhealthyHandler - if unhealthyHandler == nil { - unhealthyHandler = DefaultUnhealthyHandler - } - - successHandler := c.HealthyHandler - if successHandler == nil { - successHandler = DefaultHealthyHandler - } - - if r.Method != "GET" { - w.Header().Set("Allow", "GET") - w.WriteHeader(http.StatusMethodNotAllowed) - return - } - - if err := Check(c.Checks); err != nil { - unhealthyHandler(w, r, err) - return - } - - successHandler(w, r) -} - -type UnhealthyHandler func(w http.ResponseWriter, r *http.Request, err error) - -type StatusResponse struct { - Status string `json:"status"` - Details *StatusResponseDetails `json:"details,omitempty"` -} - -type StatusResponseDetails struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func Check(checks []Checkable) (err error) { - errs := []error{} - for _, c := range checks { - if e := c.Healthy(); e != nil { - errs = append(errs, e) - } - } - - switch len(errs) { - case 0: - err = nil - case 1: - err = errs[0] - default: - err = fmt.Errorf("multiple health check failure: %v", errs) - } - - return -} - -func DefaultHealthyHandler(w http.ResponseWriter, r *http.Request) { - err := httputil.WriteJSONResponse(w, http.StatusOK, StatusResponse{ - Status: "ok", - }) - if err != nil { - // TODO(bobbyrullo): replace with logging from new logging pkg, - // once it lands. - log.Printf("Failed to write JSON response: %v", err) - } -} - -func DefaultUnhealthyHandler(w http.ResponseWriter, r *http.Request, err error) { - writeErr := httputil.WriteJSONResponse(w, http.StatusInternalServerError, StatusResponse{ - Status: "error", - Details: &StatusResponseDetails{ - Code: http.StatusInternalServerError, - Message: err.Error(), - }, - }) - if writeErr != nil { - // TODO(bobbyrullo): replace with logging from new logging pkg, - // once it lands. - log.Printf("Failed to write JSON response: %v", err) - } -} - -// ExpvarHandler is copied from https://golang.org/src/expvar/expvar.go, where it's sadly unexported. -func ExpvarHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintf(w, "{\n") - first := true - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintf(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintf(w, "\n}\n") -} diff --git a/vendor/github.com/coreos/pkg/httputil/README.md b/vendor/github.com/coreos/pkg/httputil/README.md deleted file mode 100644 index 44fa751c4a..0000000000 --- a/vendor/github.com/coreos/pkg/httputil/README.md +++ /dev/null @@ -1,13 +0,0 @@ -httputil -==== - -Common code for dealing with HTTP. - -Includes: - -* Code for returning JSON responses. - -### Documentation - -Visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/httputil) - diff --git a/vendor/github.com/coreos/pkg/httputil/cookie.go b/vendor/github.com/coreos/pkg/httputil/cookie.go deleted file mode 100644 index c37a37bb27..0000000000 --- a/vendor/github.com/coreos/pkg/httputil/cookie.go +++ /dev/null @@ -1,21 +0,0 @@ -package httputil - -import ( - "net/http" - "time" -) - -// DeleteCookies effectively deletes all named cookies -// by wiping all data and setting to expire immediately. -func DeleteCookies(w http.ResponseWriter, cookieNames ...string) { - for _, n := range cookieNames { - c := &http.Cookie{ - Name: n, - Value: "", - Path: "/", - MaxAge: -1, - Expires: time.Time{}, - } - http.SetCookie(w, c) - } -} diff --git a/vendor/github.com/coreos/pkg/httputil/json.go b/vendor/github.com/coreos/pkg/httputil/json.go deleted file mode 100644 index 0b09235033..0000000000 --- a/vendor/github.com/coreos/pkg/httputil/json.go +++ /dev/null @@ -1,27 +0,0 @@ -package httputil - -import ( - "encoding/json" - "net/http" -) - -const ( - JSONContentType = "application/json" -) - -func WriteJSONResponse(w http.ResponseWriter, code int, resp interface{}) error { - enc, err := json.Marshal(resp) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - return err - } - - w.Header().Set("Content-Type", JSONContentType) - w.WriteHeader(code) - - _, err = w.Write(enc) - if err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/coreos/pkg/timeutil/backoff.go b/vendor/github.com/coreos/pkg/timeutil/backoff.go deleted file mode 100644 index b34fb49661..0000000000 --- a/vendor/github.com/coreos/pkg/timeutil/backoff.go +++ /dev/null @@ -1,15 +0,0 @@ -package timeutil - -import ( - "time" -) - -func ExpBackoff(prev, max time.Duration) time.Duration { - if prev == 0 { - return time.Second - } - if prev > max/2 { - return max - } - return 2 * prev -} diff --git a/vendor/github.com/dgrijalva/jwt-go/.gitignore b/vendor/github.com/dgrijalva/jwt-go/.gitignore deleted file mode 100644 index 80bed650ec..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.DS_Store -bin - - diff --git a/vendor/github.com/dgrijalva/jwt-go/.travis.yml b/vendor/github.com/dgrijalva/jwt-go/.travis.yml deleted file mode 100644 index bde823d8ab..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - tip diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/dgrijalva/jwt-go/LICENSE deleted file mode 100644 index df83a9c2f0..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/LICENSE +++ /dev/null @@ -1,8 +0,0 @@ -Copyright (c) 2012 Dave Grijalva - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md deleted file mode 100644 index fd62e94905..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md +++ /dev/null @@ -1,96 +0,0 @@ -## Migration Guide from v2 -> v3 - -Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. - -### `Token.Claims` is now an interface type - -The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. - -`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. - -The old example for parsing a token looked like this.. - -```go - if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -is now directly mapped to... - -```go - if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { - claims := token.Claims.(jwt.MapClaims) - fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) - } -``` - -`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. - -```go - type MyCustomClaims struct { - User string - *StandardClaims - } - - if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { - claims := token.Claims.(*MyCustomClaims) - fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) - } -``` - -### `ParseFromRequest` has been moved - -To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. - -`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. - -This simple parsing example: - -```go - if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -is directly mapped to: - -```go - if token, err := request.ParseFromRequest(tokenString, request.OAuth2Extractor, req, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -There are several concrete `Extractor` types provided for your convenience: - -* `HeaderExtractor` will search a list of headers until one contains content. -* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. -* `MultiExtractor` will try a list of `Extractors` in order until one returns content. -* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. -* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument -* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header - - -### RSA signing methods no longer accept `[]byte` keys - -Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. - -To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. - -```go - func keyLookupFunc(*Token) (interface{}, error) { - // Don't forget to validate the alg is what you expect: - if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { - return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) - } - - // Look up key - key, err := lookupPublicKey(token.Header["kid"]) - if err != nil { - return nil, err - } - - // Unpack key from PEM encoded PKCS8 - return jwt.ParseRSAPublicKeyFromPEM(key) - } -``` diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md deleted file mode 100644 index f48365fafb..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/README.md +++ /dev/null @@ -1,85 +0,0 @@ -A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) - -[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) - -**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. - -**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect. - - -## What the heck is a JWT? - -JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. - -In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. - -The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. - -The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. - -## What's in the box? - -This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. - -## Examples - -See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: - -* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) -* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) -* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) - -## Extensions - -This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. - -Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go - -## Compliance - -This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: - -* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. - -## Project Status & Versioning - -This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). - -This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). - -While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning. - -## Usage Tips - -### Signing vs Encryption - -A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: - -* The author of the token was in the possession of the signing secret -* The data has not been modified since it was signed - -It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. - -### Choosing a Signing Method - -There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. - -Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. - -Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. - -### JWT and OAuth - -It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. - -Without going too far down the rabbit hole, here's a description of the interaction of these technologies: - -* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. -* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. -* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. - -## More - -Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). - -The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in to documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md deleted file mode 100644 index b605b45093..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md +++ /dev/null @@ -1,105 +0,0 @@ -## `jwt-go` Version History - -#### 3.0.0 - -* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code - * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. - * `ParseFromRequest` has been moved to `request` subpackage and usage has changed - * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. -* Other Additions and Changes - * Added `Claims` interface type to allow users to decode the claims into a custom type - * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. - * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage - * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` - * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. - * Added several new, more specific, validation errors to error type bitmask - * Moved examples from README to executable example files - * Signing method registry is now thread safe - * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) - -#### 2.7.0 - -This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. - -* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying -* Error text for expired tokens includes how long it's been expired -* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` -* Documentation updates - -#### 2.6.0 - -* Exposed inner error within ValidationError -* Fixed validation errors when using UseJSONNumber flag -* Added several unit tests - -#### 2.5.0 - -* Added support for signing method none. You shouldn't use this. The API tries to make this clear. -* Updated/fixed some documentation -* Added more helpful error message when trying to parse tokens that begin with `BEARER ` - -#### 2.4.0 - -* Added new type, Parser, to allow for configuration of various parsing parameters - * You can now specify a list of valid signing methods. Anything outside this set will be rejected. - * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON -* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) -* Fixed some bugs with ECDSA parsing - -#### 2.3.0 - -* Added support for ECDSA signing methods -* Added support for RSA PSS signing methods (requires go v1.4) - -#### 2.2.0 - -* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. - -#### 2.1.0 - -Backwards compatible API change that was missed in 2.0.0. - -* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` - -#### 2.0.0 - -There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. - -The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. - -It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. - -* **Compatibility Breaking Changes** - * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` - * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` - * `KeyFunc` now returns `interface{}` instead of `[]byte` - * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key - * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key -* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodHS256` - * Added public package global `SigningMethodHS384` - * Added public package global `SigningMethodHS512` -* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodRS256` - * Added public package global `SigningMethodRS384` - * Added public package global `SigningMethodRS512` -* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. -* Refactored the RSA implementation to be easier to read -* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` - -#### 1.0.2 - -* Fixed bug in parsing public keys from certificates -* Added more tests around the parsing of keys for RS256 -* Code refactoring in RS256 implementation. No functional changes - -#### 1.0.1 - -* Fixed panic if RS256 signing method was passed an invalid key - -#### 1.0.0 - -* First versioned release -* API stabilized -* Supports creating, signing, parsing, and validating JWT tokens -* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/dgrijalva/jwt-go/claims.go b/vendor/github.com/dgrijalva/jwt-go/claims.go deleted file mode 100644 index f0228f02e0..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/claims.go +++ /dev/null @@ -1,134 +0,0 @@ -package jwt - -import ( - "crypto/subtle" - "fmt" - "time" -) - -// For a type to be a Claims object, it must just have a Valid method that determines -// if the token is invalid for any supported reason -type Claims interface { - Valid() error -} - -// Structured version of Claims Section, as referenced at -// https://tools.ietf.org/html/rfc7519#section-4.1 -// See examples for how to use this with your own claim types -type StandardClaims struct { - Audience string `json:"aud,omitempty"` - ExpiresAt int64 `json:"exp,omitempty"` - Id string `json:"jti,omitempty"` - IssuedAt int64 `json:"iat,omitempty"` - Issuer string `json:"iss,omitempty"` - NotBefore int64 `json:"nbf,omitempty"` - Subject string `json:"sub,omitempty"` -} - -// Validates time based claims "exp, iat, nbf". -// There is no accounting for clock skew. -// As well, if any of the above claims are not in the token, it will still -// be considered a valid claim. -func (c StandardClaims) Valid() error { - vErr := new(ValidationError) - now := TimeFunc().Unix() - - // The claims below are optional, by default, so if they are set to the - // default value in Go, let's not fail the verification for them. - if c.VerifyExpiresAt(now, false) == false { - delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) - vErr.Inner = fmt.Errorf("token is expired by %v", delta) - vErr.Errors |= ValidationErrorExpired - } - - if c.VerifyIssuedAt(now, false) == false { - vErr.Inner = fmt.Errorf("Token used before issued") - vErr.Errors |= ValidationErrorIssuedAt - } - - if c.VerifyNotBefore(now, false) == false { - vErr.Inner = fmt.Errorf("token is not valid yet") - vErr.Errors |= ValidationErrorNotValidYet - } - - if vErr.valid() { - return nil - } - - return vErr -} - -// Compares the aud claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { - return verifyAud(c.Audience, cmp, req) -} - -// Compares the exp claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { - return verifyExp(c.ExpiresAt, cmp, req) -} - -// Compares the iat claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { - return verifyIat(c.IssuedAt, cmp, req) -} - -// Compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { - return verifyIss(c.Issuer, cmp, req) -} - -// Compares the nbf claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { - return verifyNbf(c.NotBefore, cmp, req) -} - -// ----- helpers - -func verifyAud(aud string, cmp string, required bool) bool { - if aud == "" { - return !required - } - if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 { - return true - } else { - return false - } -} - -func verifyExp(exp int64, now int64, required bool) bool { - if exp == 0 { - return !required - } - return now <= exp -} - -func verifyIat(iat int64, now int64, required bool) bool { - if iat == 0 { - return !required - } - return now >= iat -} - -func verifyIss(iss string, cmp string, required bool) bool { - if iss == "" { - return !required - } - if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { - return true - } else { - return false - } -} - -func verifyNbf(nbf int64, now int64, required bool) bool { - if nbf == 0 { - return !required - } - return now >= nbf -} diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/dgrijalva/jwt-go/doc.go deleted file mode 100644 index a86dc1a3b3..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html -// -// See README.md for more info. -package jwt diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go deleted file mode 100644 index 2f59a22236..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go +++ /dev/null @@ -1,147 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rand" - "errors" - "math/big" -) - -var ( - // Sadly this is missing from crypto/ecdsa compared to crypto/rsa - ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") -) - -// Implements the ECDSA family of signing methods signing methods -type SigningMethodECDSA struct { - Name string - Hash crypto.Hash - KeySize int - CurveBits int -} - -// Specific instances for EC256 and company -var ( - SigningMethodES256 *SigningMethodECDSA - SigningMethodES384 *SigningMethodECDSA - SigningMethodES512 *SigningMethodECDSA -) - -func init() { - // ES256 - SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} - RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { - return SigningMethodES256 - }) - - // ES384 - SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} - RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { - return SigningMethodES384 - }) - - // ES512 - SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} - RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { - return SigningMethodES512 - }) -} - -func (m *SigningMethodECDSA) Alg() string { - return m.Name -} - -// Implements the Verify method from SigningMethod -// For this verify method, key must be an ecdsa.PublicKey struct -func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - // Get the key - var ecdsaKey *ecdsa.PublicKey - switch k := key.(type) { - case *ecdsa.PublicKey: - ecdsaKey = k - default: - return ErrInvalidKeyType - } - - if len(sig) != 2*m.KeySize { - return ErrECDSAVerification - } - - r := big.NewInt(0).SetBytes(sig[:m.KeySize]) - s := big.NewInt(0).SetBytes(sig[m.KeySize:]) - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { - return nil - } else { - return ErrECDSAVerification - } -} - -// Implements the Sign method from SigningMethod -// For this signing method, key must be an ecdsa.PrivateKey struct -func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { - // Get the key - var ecdsaKey *ecdsa.PrivateKey - switch k := key.(type) { - case *ecdsa.PrivateKey: - ecdsaKey = k - default: - return "", ErrInvalidKeyType - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return r, s - if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { - curveBits := ecdsaKey.Curve.Params().BitSize - - if m.CurveBits != curveBits { - return "", ErrInvalidKey - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes += 1 - } - - // We serialize the outpus (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) - - return EncodeSegment(out), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go deleted file mode 100644 index d19624b726..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go +++ /dev/null @@ -1,67 +0,0 @@ -package jwt - -import ( - "crypto/ecdsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") - ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") -) - -// Parse PEM encoded Elliptic Curve Private Key Structure -func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { - return nil, err - } - - var pkey *ecdsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { - return nil, ErrNotECPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 public key -func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *ecdsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { - return nil, ErrNotECPublicKey - } - - return pkey, nil -} diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go deleted file mode 100644 index 662df19d4e..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/errors.go +++ /dev/null @@ -1,63 +0,0 @@ -package jwt - -import ( - "errors" -) - -// Error constants -var ( - ErrInvalidKey = errors.New("key is invalid") - ErrInvalidKeyType = errors.New("key is of invalid type") - ErrHashUnavailable = errors.New("the requested hash function is unavailable") -) - -// The errors that might occur when parsing and validating a token -const ( - ValidationErrorMalformed uint32 = 1 << iota // Token is malformed - ValidationErrorUnverifiable // Token could not be verified because of signing problems - ValidationErrorSignatureInvalid // Signature validation failed - - // Standard Claim validation errors - ValidationErrorAudience // AUD validation failed - ValidationErrorExpired // EXP validation failed - ValidationErrorIssuedAt // IAT validation failed - ValidationErrorIssuer // ISS validation failed - ValidationErrorNotValidYet // NBF validation failed - ValidationErrorId // JTI validation failed - ValidationErrorClaimsInvalid // Generic claims validation error -) - -// Helper for constructing a ValidationError with a string error message -func NewValidationError(errorText string, errorFlags uint32) *ValidationError { - return &ValidationError{ - text: errorText, - Errors: errorFlags, - } -} - -// The error from Parse if token is not valid -type ValidationError struct { - Inner error // stores the error returned by external dependencies, i.e.: KeyFunc - Errors uint32 // bitfield. see ValidationError... constants - text string // errors that do not have a valid error just have text -} - -// Validation error is an error type -func (e ValidationError) Error() string { - if e.Inner != nil { - return e.Inner.Error() - } else if e.text != "" { - return e.text - } else { - return "token is invalid" - } - return e.Inner.Error() -} - -// No errors -func (e *ValidationError) valid() bool { - if e.Errors > 0 { - return false - } - return true -} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go deleted file mode 100644 index c229919254..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/hmac.go +++ /dev/null @@ -1,94 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/hmac" - "errors" -) - -// Implements the HMAC-SHA family of signing methods signing methods -type SigningMethodHMAC struct { - Name string - Hash crypto.Hash -} - -// Specific instances for HS256 and company -var ( - SigningMethodHS256 *SigningMethodHMAC - SigningMethodHS384 *SigningMethodHMAC - SigningMethodHS512 *SigningMethodHMAC - ErrSignatureInvalid = errors.New("signature is invalid") -) - -func init() { - // HS256 - SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { - return SigningMethodHS256 - }) - - // HS384 - SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { - return SigningMethodHS384 - }) - - // HS512 - SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { - return SigningMethodHS512 - }) -} - -func (m *SigningMethodHMAC) Alg() string { - return m.Name -} - -// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. -func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { - // Verify the key is the right type - keyBytes, ok := key.([]byte) - if !ok { - return ErrInvalidKeyType - } - - // Decode signature, for comparison - sig, err := DecodeSegment(signature) - if err != nil { - return err - } - - // Can we use the specified hashing method? - if !m.Hash.Available() { - return ErrHashUnavailable - } - - // This signing method is symmetric, so we validate the signature - // by reproducing the signature from the signing string and key, then - // comparing that against the provided signature. - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - if !hmac.Equal(sig, hasher.Sum(nil)) { - return ErrSignatureInvalid - } - - // No validation errors. Signature is good. - return nil -} - -// Implements the Sign method from SigningMethod for this signing method. -// Key must be []byte -func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { - if keyBytes, ok := key.([]byte); ok { - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - - return EncodeSegment(hasher.Sum(nil)), nil - } - - return "", ErrInvalidKey -} diff --git a/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/map_claims.go deleted file mode 100644 index 291213c460..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/map_claims.go +++ /dev/null @@ -1,94 +0,0 @@ -package jwt - -import ( - "encoding/json" - "errors" - // "fmt" -) - -// Claims type that uses the map[string]interface{} for JSON decoding -// This is the default claims type if you don't supply one -type MapClaims map[string]interface{} - -// Compares the aud claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyAudience(cmp string, req bool) bool { - aud, _ := m["aud"].(string) - return verifyAud(aud, cmp, req) -} - -// Compares the exp claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { - switch exp := m["exp"].(type) { - case float64: - return verifyExp(int64(exp), cmp, req) - case json.Number: - v, _ := exp.Int64() - return verifyExp(v, cmp, req) - } - return req == false -} - -// Compares the iat claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { - switch iat := m["iat"].(type) { - case float64: - return verifyIat(int64(iat), cmp, req) - case json.Number: - v, _ := iat.Int64() - return verifyIat(v, cmp, req) - } - return req == false -} - -// Compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { - iss, _ := m["iss"].(string) - return verifyIss(iss, cmp, req) -} - -// Compares the nbf claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { - switch nbf := m["nbf"].(type) { - case float64: - return verifyNbf(int64(nbf), cmp, req) - case json.Number: - v, _ := nbf.Int64() - return verifyNbf(v, cmp, req) - } - return req == false -} - -// Validates time based claims "exp, iat, nbf". -// There is no accounting for clock skew. -// As well, if any of the above claims are not in the token, it will still -// be considered a valid claim. -func (m MapClaims) Valid() error { - vErr := new(ValidationError) - now := TimeFunc().Unix() - - if m.VerifyExpiresAt(now, false) == false { - vErr.Inner = errors.New("Token is expired") - vErr.Errors |= ValidationErrorExpired - } - - if m.VerifyIssuedAt(now, false) == false { - vErr.Inner = errors.New("Token used before issued") - vErr.Errors |= ValidationErrorIssuedAt - } - - if m.VerifyNotBefore(now, false) == false { - vErr.Inner = errors.New("Token is not valid yet") - vErr.Errors |= ValidationErrorNotValidYet - } - - if vErr.valid() { - return nil - } - - return vErr -} diff --git a/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/dgrijalva/jwt-go/none.go deleted file mode 100644 index f04d189d06..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/none.go +++ /dev/null @@ -1,52 +0,0 @@ -package jwt - -// Implements the none signing method. This is required by the spec -// but you probably should never use it. -var SigningMethodNone *signingMethodNone - -const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" - -var NoneSignatureTypeDisallowedError error - -type signingMethodNone struct{} -type unsafeNoneMagicConstant string - -func init() { - SigningMethodNone = &signingMethodNone{} - NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) - - RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { - return SigningMethodNone - }) -} - -func (m *signingMethodNone) Alg() string { - return "none" -} - -// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { - // Key must be UnsafeAllowNoneSignatureType to prevent accidentally - // accepting 'none' signing method - if _, ok := key.(unsafeNoneMagicConstant); !ok { - return NoneSignatureTypeDisallowedError - } - // If signing method is none, signature must be an empty string - if signature != "" { - return NewValidationError( - "'none' signing method with non-empty signature", - ValidationErrorSignatureInvalid, - ) - } - - // Accept 'none' signing method. - return nil -} - -// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { - if _, ok := key.(unsafeNoneMagicConstant); ok { - return "", nil - } - return "", NoneSignatureTypeDisallowedError -} diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go deleted file mode 100644 index 7bf1c4ea08..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/parser.go +++ /dev/null @@ -1,131 +0,0 @@ -package jwt - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" -) - -type Parser struct { - ValidMethods []string // If populated, only these methods will be considered valid - UseJSONNumber bool // Use JSON Number format in JSON decoder - SkipClaimsValidation bool // Skip claims validation during token parsing -} - -// Parse, validate, and return a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) -} - -func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - parts := strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) - } - - var err error - token := &Token{Raw: tokenString} - - // parse Header - var headerBytes []byte - if headerBytes, err = DecodeSegment(parts[0]); err != nil { - if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { - return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) - } - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - if err = json.Unmarshal(headerBytes, &token.Header); err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // parse Claims - var claimBytes []byte - token.Claims = claims - - if claimBytes, err = DecodeSegment(parts[1]); err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) - if p.UseJSONNumber { - dec.UseNumber() - } - // JSON Decode. Special case for map type to avoid weird pointer behavior - if c, ok := token.Claims.(MapClaims); ok { - err = dec.Decode(&c) - } else { - err = dec.Decode(&claims) - } - // Handle decode error - if err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // Lookup signature method - if method, ok := token.Header["alg"].(string); ok { - if token.Method = GetSigningMethod(method); token.Method == nil { - return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) - } - } else { - return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) - } - - // Verify signing method is in the required set - if p.ValidMethods != nil { - var signingMethodValid = false - var alg = token.Method.Alg() - for _, m := range p.ValidMethods { - if m == alg { - signingMethodValid = true - break - } - } - if !signingMethodValid { - // signing method is not in the listed set - return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) - } - } - - // Lookup key - var key interface{} - if keyFunc == nil { - // keyFunc was not provided. short circuiting validation - return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) - } - if key, err = keyFunc(token); err != nil { - // keyFunc returned an error - return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} - } - - vErr := &ValidationError{} - - // Validate Claims - if !p.SkipClaimsValidation { - if err := token.Claims.Valid(); err != nil { - - // If the Claims Valid returned an error, check if it is a validation error, - // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set - if e, ok := err.(*ValidationError); !ok { - vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} - } else { - vErr = e - } - } - } - - // Perform validation - token.Signature = parts[2] - if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { - vErr.Inner = err - vErr.Errors |= ValidationErrorSignatureInvalid - } - - if vErr.valid() { - token.Valid = true - return token, nil - } - - return token, vErr -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go deleted file mode 100644 index 0ae0b1984e..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa.go +++ /dev/null @@ -1,100 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// Implements the RSA family of signing methods signing methods -type SigningMethodRSA struct { - Name string - Hash crypto.Hash -} - -// Specific instances for RS256 and company -var ( - SigningMethodRS256 *SigningMethodRSA - SigningMethodRS384 *SigningMethodRSA - SigningMethodRS512 *SigningMethodRSA -) - -func init() { - // RS256 - SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { - return SigningMethodRS256 - }) - - // RS384 - SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { - return SigningMethodRS384 - }) - - // RS512 - SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { - return SigningMethodRS512 - }) -} - -func (m *SigningMethodRSA) Alg() string { - return m.Name -} - -// Implements the Verify method from SigningMethod -// For this signing method, must be an rsa.PublicKey structure. -func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - var rsaKey *rsa.PublicKey - var ok bool - - if rsaKey, ok = key.(*rsa.PublicKey); !ok { - return ErrInvalidKeyType - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) -} - -// Implements the Sign method from SigningMethod -// For this signing method, must be an rsa.PrivateKey structure. -func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { - var rsaKey *rsa.PrivateKey - var ok bool - - // Validate type of key - if rsaKey, ok = key.(*rsa.PrivateKey); !ok { - return "", ErrInvalidKey - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { - return EncodeSegment(sigBytes), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go deleted file mode 100644 index 10ee9db8a4..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go +++ /dev/null @@ -1,126 +0,0 @@ -// +build go1.4 - -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// Implements the RSAPSS family of signing methods signing methods -type SigningMethodRSAPSS struct { - *SigningMethodRSA - Options *rsa.PSSOptions -} - -// Specific instances for RS/PS and company -var ( - SigningMethodPS256 *SigningMethodRSAPSS - SigningMethodPS384 *SigningMethodRSAPSS - SigningMethodPS512 *SigningMethodRSAPSS -) - -func init() { - // PS256 - SigningMethodPS256 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS256", - Hash: crypto.SHA256, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA256, - }, - } - RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { - return SigningMethodPS256 - }) - - // PS384 - SigningMethodPS384 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS384", - Hash: crypto.SHA384, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA384, - }, - } - RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { - return SigningMethodPS384 - }) - - // PS512 - SigningMethodPS512 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS512", - Hash: crypto.SHA512, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA512, - }, - } - RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { - return SigningMethodPS512 - }) -} - -// Implements the Verify method from SigningMethod -// For this verify method, key must be an rsa.PublicKey struct -func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - var rsaKey *rsa.PublicKey - switch k := key.(type) { - case *rsa.PublicKey: - rsaKey = k - default: - return ErrInvalidKey - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) -} - -// Implements the Sign method from SigningMethod -// For this signing method, key must be an rsa.PrivateKey struct -func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { - var rsaKey *rsa.PrivateKey - - switch k := key.(type) { - case *rsa.PrivateKey: - rsaKey = k - default: - return "", ErrInvalidKeyType - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { - return EncodeSegment(sigBytes), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go deleted file mode 100644 index 213a90dbbf..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go +++ /dev/null @@ -1,69 +0,0 @@ -package jwt - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") - ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") - ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") -) - -// Parse PEM encoded PKCS1 or PKCS8 private key -func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - var parsedKey interface{} - if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { - return nil, err - } - } - - var pkey *rsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, ErrNotRSAPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 public key -func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *rsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { - return nil, ErrNotRSAPublicKey - } - - return pkey, nil -} diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/signing_method.go deleted file mode 100644 index ed1f212b21..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/signing_method.go +++ /dev/null @@ -1,35 +0,0 @@ -package jwt - -import ( - "sync" -) - -var signingMethods = map[string]func() SigningMethod{} -var signingMethodLock = new(sync.RWMutex) - -// Implement SigningMethod to add new methods for signing or verifying tokens. -type SigningMethod interface { - Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid - Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error - Alg() string // returns the alg identifier for this method (example: 'HS256') -} - -// Register the "alg" name and a factory function for signing method. -// This is typically done during init() in the method's implementation -func RegisterSigningMethod(alg string, f func() SigningMethod) { - signingMethodLock.Lock() - defer signingMethodLock.Unlock() - - signingMethods[alg] = f -} - -// Get a signing method from an "alg" string -func GetSigningMethod(alg string) (method SigningMethod) { - signingMethodLock.RLock() - defer signingMethodLock.RUnlock() - - if methodF, ok := signingMethods[alg]; ok { - method = methodF() - } - return -} diff --git a/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/dgrijalva/jwt-go/token.go deleted file mode 100644 index d637e0867c..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/token.go +++ /dev/null @@ -1,108 +0,0 @@ -package jwt - -import ( - "encoding/base64" - "encoding/json" - "strings" - "time" -) - -// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). -// You can override it to use another time value. This is useful for testing or if your -// server uses a different time zone than your tokens. -var TimeFunc = time.Now - -// Parse methods use this callback function to supply -// the key for verification. The function receives the parsed, -// but unverified Token. This allows you to use properties in the -// Header of the token (such as `kid`) to identify which key to use. -type Keyfunc func(*Token) (interface{}, error) - -// A JWT Token. Different fields will be used depending on whether you're -// creating or parsing/verifying a token. -type Token struct { - Raw string // The raw token. Populated when you Parse a token - Method SigningMethod // The signing method used or to be used - Header map[string]interface{} // The first segment of the token - Claims Claims // The second segment of the token - Signature string // The third segment of the token. Populated when you Parse a token - Valid bool // Is the token valid? Populated when you Parse/Verify a token -} - -// Create a new Token. Takes a signing method -func New(method SigningMethod) *Token { - return NewWithClaims(method, MapClaims{}) -} - -func NewWithClaims(method SigningMethod, claims Claims) *Token { - return &Token{ - Header: map[string]interface{}{ - "typ": "JWT", - "alg": method.Alg(), - }, - Claims: claims, - Method: method, - } -} - -// Get the complete, signed token -func (t *Token) SignedString(key interface{}) (string, error) { - var sig, sstr string - var err error - if sstr, err = t.SigningString(); err != nil { - return "", err - } - if sig, err = t.Method.Sign(sstr, key); err != nil { - return "", err - } - return strings.Join([]string{sstr, sig}, "."), nil -} - -// Generate the signing string. This is the -// most expensive part of the whole deal. Unless you -// need this for something special, just go straight for -// the SignedString. -func (t *Token) SigningString() (string, error) { - var err error - parts := make([]string, 2) - for i, _ := range parts { - var jsonValue []byte - if i == 0 { - if jsonValue, err = json.Marshal(t.Header); err != nil { - return "", err - } - } else { - if jsonValue, err = json.Marshal(t.Claims); err != nil { - return "", err - } - } - - parts[i] = EncodeSegment(jsonValue) - } - return strings.Join(parts, "."), nil -} - -// Parse, validate, and return a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return new(Parser).Parse(tokenString, keyFunc) -} - -func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) -} - -// Encode JWT specific base64url encoding with padding stripped -func EncodeSegment(seg []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") -} - -// Decode JWT specific base64url encoding with padding stripped -func DecodeSegment(seg string) ([]byte, error) { - if l := len(seg) % 4; l > 0 { - seg += strings.Repeat("=", 4-l) - } - - return base64.URLEncoding.DecodeString(seg) -} diff --git a/vendor/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS new file mode 100644 index 0000000000..0857b62fc9 --- /dev/null +++ b/vendor/github.com/docker/distribution/AUTHORS @@ -0,0 +1,128 @@ +Aaron Lehmann +Aaron Vinson +Adam Enger +Adrian Mouat +Ahmet Alp Balkan +Alex Chan +Alex Elman +amitshukla +Amy Lindburg +Andrew Meredith +Andrew T Nguyen +Andrey Kostov +Andy Goldstein +Anton Tiurin +Antonio Mercado +Antonio Murdaca +Arnaud Porterie +Arthur Baars +Asuka Suzuki +Avi Miller +Ayose Cazorla +BadZen +Ben Firshman +bin liu +Brian Bland +burnettk +Carson A +Chris Dillon +Daisuke Fujita +Darren Shepherd +Dave Trombley +Dave Tucker +David Lawrence +David Verhasselt +David Xia +davidli +Dejan Golja +Derek McGowan +Diogo Mónica +DJ Enriquez +Donald Huang +Doug Davis +Eric Yang +farmerworking +Felix Yan +Florentin Raud +Frederick F. Kautz IV +gabriell nascimento +harche +Henri Gomez +Hu Keping +Hua Wang +HuKeping +Ian Babrou +igayoso +Jack Griffin +Jason Freidman +Jeff Nickoloff +Jessie Frazelle +Jianqing Wang +John Starks +Jon Poler +Jonathan Boulle +Jordan Liggitt +Josh Hawn +Julien Fernandez +Keerthan Mala +Kelsey Hightower +Kenneth Lim +Kenny Leung +Li Yi +Liu Hua +liuchang0812 +Louis Kottmann +Luke Carpenter +Mary Anthony +Matt Bentley +Matt Duch +Matt Moore +Matt Robenolt +Michael Prokop +Michal Minar +Miquel Sabaté +Morgan Bauer +moxiegirl +Nathan Sullivan +nevermosby +Nghia Tran +Nuutti Kotivuori +Oilbeater +Olivier Gambier +Olivier Jacques +Omer Cohen +Patrick Devine +Philip Misiowiec +Richard Scothern +Rodolfo Carvalho +Rusty Conover +Sean Boran +Sebastiaan van Stijn +Sharif Nassar +Shawn Falkner-Horine +Shreyas Karnik +Simon Thulbourn +Spencer Rinehart +Stefan Weil +Stephen J Day +Sungho Moon +Sven Dowideit +Sylvain Baubeau +Ted Reed +tgic +Thomas Sjögren +Tianon Gravi +Tibor Vass +Tonis Tiigi +Trevor Pounds +Troels Thomsen +Vincent Batts +Vincent Demeester +Vincent Giersch +W. Trevor King +weiyuan.yl +xg.song +xiekeyang +Yann ROBERT +yuzou +姜继忠 diff --git a/vendor/github.com/docker/engine-api/LICENSE b/vendor/github.com/docker/engine-api/LICENSE deleted file mode 100644 index c157bff96a..0000000000 --- a/vendor/github.com/docker/engine-api/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/engine-api/types/auth.go b/vendor/github.com/docker/engine-api/types/auth.go deleted file mode 100644 index 056af6b842..0000000000 --- a/vendor/github.com/docker/engine-api/types/auth.go +++ /dev/null @@ -1,22 +0,0 @@ -package types - -// AuthConfig contains authorization information for connecting to a Registry -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} diff --git a/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go b/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go deleted file mode 100644 index 931ae10ab1..0000000000 --- a/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go +++ /dev/null @@ -1,23 +0,0 @@ -package blkiodev - -import "fmt" - -// WeightDevice is a structure that holds device:weight pair -type WeightDevice struct { - Path string - Weight uint16 -} - -func (w *WeightDevice) String() string { - return fmt.Sprintf("%s:%d", w.Path, w.Weight) -} - -// ThrottleDevice is a structure that holds device:rate_per_second pair -type ThrottleDevice struct { - Path string - Rate uint64 -} - -func (t *ThrottleDevice) String() string { - return fmt.Sprintf("%s:%d", t.Path, t.Rate) -} diff --git a/vendor/github.com/docker/engine-api/types/client.go b/vendor/github.com/docker/engine-api/types/client.go deleted file mode 100644 index fa3b2cfb45..0000000000 --- a/vendor/github.com/docker/engine-api/types/client.go +++ /dev/null @@ -1,235 +0,0 @@ -package types - -import ( - "bufio" - "io" - "net" - - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/filters" - "github.com/docker/go-units" -) - -// ContainerAttachOptions holds parameters to attach to a container. -type ContainerAttachOptions struct { - Stream bool - Stdin bool - Stdout bool - Stderr bool - DetachKeys string -} - -// ContainerCommitOptions holds parameters to commit changes into a container. -type ContainerCommitOptions struct { - Reference string - Comment string - Author string - Changes []string - Pause bool - Config *container.Config -} - -// ContainerExecInspect holds information returned by exec inspect. -type ContainerExecInspect struct { - ExecID string - ContainerID string - Running bool - ExitCode int -} - -// ContainerListOptions holds parameters to list containers with. -type ContainerListOptions struct { - Quiet bool - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filter filters.Args -} - -// ContainerLogsOptions holds parameters to filter logs with. -type ContainerLogsOptions struct { - ShowStdout bool - ShowStderr bool - Since string - Timestamps bool - Follow bool - Tail string - Details bool -} - -// ContainerRemoveOptions holds parameters to remove containers. -type ContainerRemoveOptions struct { - RemoveVolumes bool - RemoveLinks bool - Force bool -} - -// CopyToContainerOptions holds information -// about files to copy into a container -type CopyToContainerOptions struct { - AllowOverwriteDirWithFile bool -} - -// EventsOptions hold parameters to filter events with. -type EventsOptions struct { - Since string - Until string - Filters filters.Args -} - -// NetworkListOptions holds parameters to filter the list of networks with. -type NetworkListOptions struct { - Filters filters.Args -} - -// HijackedResponse holds connection information for a hijacked request. -type HijackedResponse struct { - Conn net.Conn - Reader *bufio.Reader -} - -// Close closes the hijacked connection and reader. -func (h *HijackedResponse) Close() { - h.Conn.Close() -} - -// CloseWriter is an interface that implements structs -// that close input streams to prevent from writing. -type CloseWriter interface { - CloseWrite() error -} - -// CloseWrite closes a readWriter for writing. -func (h *HijackedResponse) CloseWrite() error { - if conn, ok := h.Conn.(CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// ImageBuildOptions holds the information -// necessary to build images. -type ImageBuildOptions struct { - Tags []string - SuppressOutput bool - RemoteContext string - NoCache bool - Remove bool - ForceRemove bool - PullParent bool - Isolation container.Isolation - CPUSetCPUs string - CPUSetMems string - CPUShares int64 - CPUQuota int64 - CPUPeriod int64 - Memory int64 - MemorySwap int64 - CgroupParent string - ShmSize int64 - Dockerfile string - Ulimits []*units.Ulimit - BuildArgs map[string]string - AuthConfigs map[string]AuthConfig - Context io.Reader - Labels map[string]string -} - -// ImageBuildResponse holds information -// returned by a server after building -// an image. -type ImageBuildResponse struct { - Body io.ReadCloser - OSType string -} - -// ImageCreateOptions holds information to create images. -type ImageCreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry -} - -// ImageImportSource holds source information for ImageImport -type ImageImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName) - SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source) -} - -// ImageImportOptions holds information to import images from the client host. -type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image -} - -// ImageListOptions holds parameters to filter the list of images with. -type ImageListOptions struct { - MatchName string - All bool - Filters filters.Args -} - -// ImageLoadResponse returns information to the client about a load process. -type ImageLoadResponse struct { - // Body must be closed to avoid a resource leak - Body io.ReadCloser - JSON bool -} - -// ImagePullOptions holds information to pull images. -type ImagePullOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc -} - -// RequestPrivilegeFunc is a function interface that -// clients can supply to retry operations after -// getting an authorization error. -// This function returns the registry authentication -// header value in base 64 format, or an error -// if the privilege request fails. -type RequestPrivilegeFunc func() (string, error) - -//ImagePushOptions holds information to push images. -type ImagePushOptions ImagePullOptions - -// ImageRemoveOptions holds parameters to remove images. -type ImageRemoveOptions struct { - Force bool - PruneChildren bool -} - -// ImageSearchOptions holds parameters to search images with. -type ImageSearchOptions struct { - RegistryAuth string - PrivilegeFunc RequestPrivilegeFunc - Filters filters.Args -} - -// ImageTagOptions holds parameters to tag an image -type ImageTagOptions struct { - Force bool -} - -// ResizeOptions holds parameters to resize a tty. -// It can be used to resize container ttys and -// exec process ttys too. -type ResizeOptions struct { - Height int - Width int -} - -// VersionResponse holds version information for the client and the server -type VersionResponse struct { - Client *Version - Server *Version -} - -// ServerOK returns true when the client could connect to the docker server -// and parse the information received. It returns false otherwise. -func (v VersionResponse) ServerOK() bool { - return v.Server != nil -} diff --git a/vendor/github.com/docker/engine-api/types/configs.go b/vendor/github.com/docker/engine-api/types/configs.go deleted file mode 100644 index 7d4fcb343e..0000000000 --- a/vendor/github.com/docker/engine-api/types/configs.go +++ /dev/null @@ -1,53 +0,0 @@ -package types - -import ( - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/network" -) - -// configs holds structs used for internal communication between the -// frontend (such as an http server) and the backend (such as the -// docker daemon). - -// ContainerCreateConfig is the parameter set to ContainerCreate() -type ContainerCreateConfig struct { - Name string - Config *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig - AdjustCPUShares bool -} - -// ContainerRmConfig holds arguments for the container remove -// operation. This struct is used to tell the backend what operations -// to perform. -type ContainerRmConfig struct { - ForceRemove, RemoveVolume, RemoveLink bool -} - -// ContainerCommitConfig contains build configs for commit operation, -// and is used when making a commit with the current state of the container. -type ContainerCommitConfig struct { - Pause bool - Repo string - Tag string - Author string - Comment string - // merge container config into commit config before commit - MergeConfigs bool - Config *container.Config -} - -// ExecConfig is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. -type ExecConfig struct { - User string // User that will run the command - Privileged bool // Is the container in privileged mode - Tty bool // Attach standard streams to a tty. - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStderr bool // Attach the standard output - AttachStdout bool // Attach the standard error - Detach bool // Execute in detach mode - DetachKeys string // Escape keys for detach - Cmd []string // Execution commands and args -} diff --git a/vendor/github.com/docker/engine-api/types/container/config.go b/vendor/github.com/docker/engine-api/types/container/config.go deleted file mode 100644 index 1dfc408348..0000000000 --- a/vendor/github.com/docker/engine-api/types/container/config.go +++ /dev/null @@ -1,37 +0,0 @@ -package container - -import ( - "github.com/docker/engine-api/types/strslice" - "github.com/docker/go-connections/nat" -) - -// Config contains the configuration data about a container. -// It should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -// All fields added to this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -type Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - Image string // Name of the image as it was passed by the operator (eg. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container -} diff --git a/vendor/github.com/docker/engine-api/types/container/host_config.go b/vendor/github.com/docker/engine-api/types/container/host_config.go deleted file mode 100644 index 2446c1904d..0000000000 --- a/vendor/github.com/docker/engine-api/types/container/host_config.go +++ /dev/null @@ -1,320 +0,0 @@ -package container - -import ( - "strings" - - "github.com/docker/engine-api/types/blkiodev" - "github.com/docker/engine-api/types/strslice" - "github.com/docker/go-connections/nat" - "github.com/docker/go-units" -) - -// NetworkMode represents the container network stack. -type NetworkMode string - -// Isolation represents the isolation technology of a container. The supported -// values are platform specific -type Isolation string - -// IsDefault indicates the default isolation technology of a container. On Linux this -// is the native driver. On Windows, this is a Windows Server Container. -func (i Isolation) IsDefault() bool { - return strings.ToLower(string(i)) == "default" || string(i) == "" -} - -// IpcMode represents the container ipc stack. -type IpcMode string - -// IsPrivate indicates whether the container uses its private ipc stack. -func (n IpcMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's ipc stack. -func (n IpcMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's ipc stack. -func (n IpcMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the ipc stack is valid. -func (n IpcMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true -} - -// Container returns the name of the container ipc stack is going to be used. -func (n IpcMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UsernsMode represents userns mode in the container. -type UsernsMode string - -// IsHost indicates whether the container uses the host's userns. -func (n UsernsMode) IsHost() bool { - return n == "host" -} - -// IsPrivate indicates whether the container uses the a private userns. -func (n UsernsMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// Valid indicates whether the userns is valid. -func (n UsernsMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// CgroupSpec represents the cgroup to use for the container. -type CgroupSpec string - -// IsContainer indicates whether the container is using another container cgroup -func (c CgroupSpec) IsContainer() bool { - parts := strings.SplitN(string(c), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the cgroup spec is valid. -func (c CgroupSpec) Valid() bool { - return c.IsContainer() || c == "" -} - -// Container returns the name of the container whose cgroup will be used. -func (c CgroupSpec) Container() string { - parts := strings.SplitN(string(c), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UTSMode represents the UTS namespace of the container. -type UTSMode string - -// IsPrivate indicates whether the container uses its private UTS namespace. -func (n UTSMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// IsHost indicates whether the container uses the host's UTS namespace. -func (n UTSMode) IsHost() bool { - return n == "host" -} - -// Valid indicates whether the UTS namespace is valid. -func (n UTSMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// PidMode represents the pid namespace of the container. -type PidMode string - -// IsPrivate indicates whether the container uses its own new pid namespace. -func (n PidMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's pid namespace. -func (n PidMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's pid namespace. -func (n PidMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the pid namespace is valid. -func (n PidMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true -} - -// Container returns the name of the container whose pid namespace is going to be used. -func (n PidMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// DeviceMapping represents the device mapping between the host and the container. -type DeviceMapping struct { - PathOnHost string - PathInContainer string - CgroupPermissions string -} - -// RestartPolicy represents the restart policies of the container. -type RestartPolicy struct { - Name string - MaximumRetryCount int -} - -// IsNone indicates whether the container has the "no" restart policy. -// This means the container will not automatically restart when exiting. -func (rp *RestartPolicy) IsNone() bool { - return rp.Name == "no" -} - -// IsAlways indicates whether the container has the "always" restart policy. -// This means the container will automatically restart regardless of the exit status. -func (rp *RestartPolicy) IsAlways() bool { - return rp.Name == "always" -} - -// IsOnFailure indicates whether the container has the "on-failure" restart policy. -// This means the container will automatically restart of exiting with a non-zero exit status. -func (rp *RestartPolicy) IsOnFailure() bool { - return rp.Name == "on-failure" -} - -// IsUnlessStopped indicates whether the container has the -// "unless-stopped" restart policy. This means the container will -// automatically restart unless user has put it to stopped state. -func (rp *RestartPolicy) IsUnlessStopped() bool { - return rp.Name == "unless-stopped" -} - -// IsSame compares two RestartPolicy to see if they are the same -func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { - return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount -} - -// LogConfig represents the logging configuration of the container. -type LogConfig struct { - Type string - Config map[string]string -} - -// Resources contains container's resources (cgroups config, ulimits...) -type Resources struct { - // Applicable to all platforms - CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) - Memory int64 // Memory limit (in bytes) - - // Applicable to UNIX platforms - CgroupParent string // Parent cgroup. - BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) - BlkioWeightDevice []*blkiodev.WeightDevice - BlkioDeviceReadBps []*blkiodev.ThrottleDevice - BlkioDeviceWriteBps []*blkiodev.ThrottleDevice - BlkioDeviceReadIOps []*blkiodev.ThrottleDevice - BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice - CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period - CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota - CpusetCpus string // CpusetCpus 0-2, 0,1 - CpusetMems string // CpusetMems 0-2, 0,1 - Devices []DeviceMapping // List of devices to map inside the container - DiskQuota int64 // Disk limit (in bytes) - KernelMemory int64 // Kernel memory limit (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit int64 // Setting pids limit for a container - Ulimits []*units.Ulimit // List of ulimits to be set in the container - - // Applicable to Windows - CPUCount int64 `json:"CpuCount"` // CPU count - CPUPercent int64 `json:"CpuPercent"` // CPU percent - IOMaximumIOps uint64 // Maximum IOps for the container system drive - IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive - NetworkMaximumBandwidth uint64 // Maximum bandwidth of the network endpoint in bytes per second -} - -// UpdateConfig holds the mutable attributes of a Container. -// Those attributes can be updated at runtime. -type UpdateConfig struct { - // Contains container's resources (cgroups, ulimits) - Resources - RestartPolicy RestartPolicy -} - -// HostConfig the non-portable Config structure of a container. -// Here, "non-portable" means "dependent of the host we are running on". -// Portable information *should* appear in Config. -type HostConfig struct { - // Applicable to all platforms - Binds []string // List of volume bindings for this container - ContainerIDFile string // File (path) where the containerId is written - LogConfig LogConfig // Configuration of the logs for this container - NetworkMode NetworkMode // Network mode to use for the container - PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host - RestartPolicy RestartPolicy // Restart policy to be used for the container - AutoRemove bool // Automatically remove container when it exits - VolumeDriver string // Name of the volume driver used to mount volumes - VolumesFrom []string // List of volumes to take from other container - - // Applicable to UNIX platforms - CapAdd strslice.StrSlice // List of kernel capabilities to add to the container - CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for - ExtraHosts []string // List of extra hosts - GroupAdd []string // List of additional groups that the container process will run as - IpcMode IpcMode // IPC namespace to use for the container - Cgroup CgroupSpec // Cgroup to use for the container - Links []string // List of links (in the name:alias form) - OomScoreAdj int // Container preference for OOM-killing - PidMode PidMode // PID namespace to use for the container - Privileged bool // Is the container in privileged mode - PublishAllPorts bool // Should docker publish all exposed port for the container - ReadonlyRootfs bool // Is the container root filesystem in read-only - SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. - StorageOpt map[string]string // Storage driver options per container. - Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container - UTSMode UTSMode // UTS namespace to use for the container - UsernsMode UsernsMode // The user namespace to use for the container - ShmSize int64 // Total shm memory usage - Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container - - // Applicable to Windows - ConsoleSize [2]int // Initial console size - Isolation Isolation // Isolation technology of the container (eg default, hyperv) - - // Contains container's resources (cgroups, ulimits) - Resources -} diff --git a/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go b/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go deleted file mode 100644 index 4171059a47..0000000000 --- a/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go +++ /dev/null @@ -1,81 +0,0 @@ -// +build !windows - -package container - -import "strings" - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() -} - -// IsPrivate indicates whether container uses it's private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsBridge() { - return "bridge" - } else if n.IsHost() { - return "host" - } else if n.IsContainer() { - return "container" - } else if n.IsNone() { - return "none" - } else if n.IsDefault() { - return "default" - } else if n.IsUserDefined() { - return n.UserDefined() - } - return "" -} - -// IsBridge indicates whether container uses the bridge network stack -func (n NetworkMode) IsBridge() bool { - return n == "bridge" -} - -// IsHost indicates whether container uses the host network stack. -func (n NetworkMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether container uses a container network stack. -func (n NetworkMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// ConnectedContainer is the id of the container which network this container is connected to. -func (n NetworkMode) ConnectedContainer() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() -} - -//UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} diff --git a/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go b/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go deleted file mode 100644 index 0ee332ba68..0000000000 --- a/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go +++ /dev/null @@ -1,87 +0,0 @@ -package container - -import ( - "strings" -) - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// IsContainer indicates whether container uses a container network stack. -// Returns false as windows doesn't support this mode -func (n NetworkMode) IsContainer() bool { - return false -} - -// IsBridge indicates whether container uses the bridge network stack -// in windows it is given the name NAT -func (n NetworkMode) IsBridge() bool { - return n == "nat" -} - -// IsHost indicates whether container uses the host network stack. -// returns false as this is not supported by windows -func (n NetworkMode) IsHost() bool { - return false -} - -// IsPrivate indicates whether container uses its private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// ConnectedContainer is the id of the container which network this container is connected to. -// Returns blank string on windows -func (n NetworkMode) ConnectedContainer() string { - return "" -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsNone() && !n.IsBridge() -} - -// IsHyperV indicates the use of a Hyper-V partition for isolation -func (i Isolation) IsHyperV() bool { - return strings.ToLower(string(i)) == "hyperv" -} - -// IsProcess indicates the use of process isolation -func (i Isolation) IsProcess() bool { - return strings.ToLower(string(i)) == "process" -} - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() || i.IsHyperV() || i.IsProcess() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsDefault() { - return "default" - } else if n.IsBridge() { - return "nat" - } else if n.IsNone() { - return "none" - } else if n.IsUserDefined() { - return n.UserDefined() - } - - return "" -} - -//UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} diff --git a/vendor/github.com/docker/engine-api/types/filters/parse.go b/vendor/github.com/docker/engine-api/types/filters/parse.go deleted file mode 100644 index 0e0d7e3805..0000000000 --- a/vendor/github.com/docker/engine-api/types/filters/parse.go +++ /dev/null @@ -1,295 +0,0 @@ -// Package filters provides helper function to parse and handle command line -// filter, used for example in docker ps or docker images commands. -package filters - -import ( - "encoding/json" - "errors" - "fmt" - "regexp" - "strings" - - "github.com/docker/engine-api/types/versions" -) - -// Args stores filter arguments as map key:{map key: bool}. -// It contains an aggregation of the map of arguments (which are in the form -// of -f 'key=value') based on the key, and stores values for the same key -// in a map with string keys and boolean values. -// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' -// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} -type Args struct { - fields map[string]map[string]bool -} - -// NewArgs initializes a new Args struct. -func NewArgs() Args { - return Args{fields: map[string]map[string]bool{}} -} - -// ParseFlag parses the argument to the filter flag. Like -// -// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` -// -// If prev map is provided, then it is appended to, and returned. By default a new -// map is created. -func ParseFlag(arg string, prev Args) (Args, error) { - filters := prev - if len(arg) == 0 { - return filters, nil - } - - if !strings.Contains(arg, "=") { - return filters, ErrBadFormat - } - - f := strings.SplitN(arg, "=", 2) - - name := strings.ToLower(strings.TrimSpace(f[0])) - value := strings.TrimSpace(f[1]) - - filters.Add(name, value) - - return filters, nil -} - -// ErrBadFormat is an error returned in case of bad format for a filter. -var ErrBadFormat = errors.New("bad format of filter (expected name=value)") - -// ToParam packs the Args into a string for easy transport from client to server. -func ToParam(a Args) (string, error) { - // this way we don't URL encode {}, just empty space - if a.Len() == 0 { - return "", nil - } - - buf, err := json.Marshal(a.fields) - if err != nil { - return "", err - } - return string(buf), nil -} - -// ToParamWithVersion packs the Args into a string for easy transport from client to server. -// The generated string will depend on the specified version (corresponding to the API version). -func ToParamWithVersion(version string, a Args) (string, error) { - // this way we don't URL encode {}, just empty space - if a.Len() == 0 { - return "", nil - } - - // for daemons older than v1.10, filter must be of the form map[string][]string - buf := []byte{} - err := errors.New("") - if version != "" && versions.LessThan(version, "1.22") { - buf, err = json.Marshal(convertArgsToSlice(a.fields)) - } else { - buf, err = json.Marshal(a.fields) - } - if err != nil { - return "", err - } - return string(buf), nil -} - -// FromParam unpacks the filter Args. -func FromParam(p string) (Args, error) { - if len(p) == 0 { - return NewArgs(), nil - } - - r := strings.NewReader(p) - d := json.NewDecoder(r) - - m := map[string]map[string]bool{} - if err := d.Decode(&m); err != nil { - r.Seek(0, 0) - - // Allow parsing old arguments in slice format. - // Because other libraries might be sending them in this format. - deprecated := map[string][]string{} - if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { - m = deprecatedArgs(deprecated) - } else { - return NewArgs(), err - } - } - return Args{m}, nil -} - -// Get returns the list of values associates with a field. -// It returns a slice of strings to keep backwards compatibility with old code. -func (filters Args) Get(field string) []string { - values := filters.fields[field] - if values == nil { - return make([]string, 0) - } - slice := make([]string, 0, len(values)) - for key := range values { - slice = append(slice, key) - } - return slice -} - -// Add adds a new value to a filter field. -func (filters Args) Add(name, value string) { - if _, ok := filters.fields[name]; ok { - filters.fields[name][value] = true - } else { - filters.fields[name] = map[string]bool{value: true} - } -} - -// Del removes a value from a filter field. -func (filters Args) Del(name, value string) { - if _, ok := filters.fields[name]; ok { - delete(filters.fields[name], value) - } -} - -// Len returns the number of fields in the arguments. -func (filters Args) Len() int { - return len(filters.fields) -} - -// MatchKVList returns true if the values for the specified field matches the ones -// from the sources. -// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, -// field is 'label' and sources are {'label1': '1', 'label2': '2'} -// it returns true. -func (filters Args) MatchKVList(field string, sources map[string]string) bool { - fieldValues := filters.fields[field] - - //do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - - if sources == nil || len(sources) == 0 { - return false - } - - for name2match := range fieldValues { - testKV := strings.SplitN(name2match, "=", 2) - - v, ok := sources[testKV[0]] - if !ok { - return false - } - if len(testKV) == 2 && testKV[1] != v { - return false - } - } - - return true -} - -// Match returns true if the values for the specified field matches the source string -// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, -// field is 'image.name' and source is 'ubuntu' -// it returns true. -func (filters Args) Match(field, source string) bool { - if filters.ExactMatch(field, source) { - return true - } - - fieldValues := filters.fields[field] - for name2match := range fieldValues { - match, err := regexp.MatchString(name2match, source) - if err != nil { - continue - } - if match { - return true - } - } - return false -} - -// ExactMatch returns true if the source matches exactly one of the filters. -func (filters Args) ExactMatch(field, source string) bool { - fieldValues, ok := filters.fields[field] - //do not filter if there is no filter set or cannot determine filter - if !ok || len(fieldValues) == 0 { - return true - } - - // try to match full name value to avoid O(N) regular expression matching - if fieldValues[source] { - return true - } - return false -} - -// FuzzyMatch returns true if the source matches exactly one of the filters, -// or the source has one of the filters as a prefix. -func (filters Args) FuzzyMatch(field, source string) bool { - if filters.ExactMatch(field, source) { - return true - } - - fieldValues := filters.fields[field] - for prefix := range fieldValues { - if strings.HasPrefix(source, prefix) { - return true - } - } - return false -} - -// Include returns true if the name of the field to filter is in the filters. -func (filters Args) Include(field string) bool { - _, ok := filters.fields[field] - return ok -} - -// Validate ensures that all the fields in the filter are valid. -// It returns an error as soon as it finds an invalid field. -func (filters Args) Validate(accepted map[string]bool) error { - for name := range filters.fields { - if !accepted[name] { - return fmt.Errorf("Invalid filter '%s'", name) - } - } - return nil -} - -// WalkValues iterates over the list of filtered values for a field. -// It stops the iteration if it finds an error and it returns that error. -func (filters Args) WalkValues(field string, op func(value string) error) error { - if _, ok := filters.fields[field]; !ok { - return nil - } - for v := range filters.fields[field] { - if err := op(v); err != nil { - return err - } - } - return nil -} - -func deprecatedArgs(d map[string][]string) map[string]map[string]bool { - m := map[string]map[string]bool{} - for k, v := range d { - values := map[string]bool{} - for _, vv := range v { - values[vv] = true - } - m[k] = values - } - return m -} - -func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { - m := map[string][]string{} - for k, v := range f { - values := []string{} - for kk := range v { - if v[kk] { - values = append(values, kk) - } - } - m[k] = values - } - return m -} diff --git a/vendor/github.com/docker/engine-api/types/network/network.go b/vendor/github.com/docker/engine-api/types/network/network.go deleted file mode 100644 index bce60f5eec..0000000000 --- a/vendor/github.com/docker/engine-api/types/network/network.go +++ /dev/null @@ -1,52 +0,0 @@ -package network - -// Address represents an IP address -type Address struct { - Addr string - PrefixLen int -} - -// IPAM represents IP Address Management -type IPAM struct { - Driver string - Options map[string]string //Per network IPAM driver options - Config []IPAMConfig -} - -// IPAMConfig represents IPAM configurations -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -// EndpointIPAMConfig represents IPAM configurations for the endpoint -type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` -} - -// EndpointSettings stores the network endpoint details -type EndpointSettings struct { - // Configurations - IPAMConfig *EndpointIPAMConfig - Links []string - Aliases []string - // Operational data - NetworkID string - EndpointID string - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - MacAddress string -} - -// NetworkingConfig represents the container's networking configuration for each of its interfaces -// Carries the networking configs specified in the `docker run` and `docker network connect` commands -type NetworkingConfig struct { - EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network -} diff --git a/vendor/github.com/docker/engine-api/types/registry/registry.go b/vendor/github.com/docker/engine-api/types/registry/registry.go deleted file mode 100644 index d2aca6f024..0000000000 --- a/vendor/github.com/docker/engine-api/types/registry/registry.go +++ /dev/null @@ -1,99 +0,0 @@ -package registry - -import ( - "encoding/json" - "net" -) - -// ServiceConfig stores daemon registry services configuration. -type ServiceConfig struct { - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string -} - -// NetIPNet is the net.IPNet type, which can be marshalled and -// unmarshalled to JSON -type NetIPNet net.IPNet - -// MarshalJSON returns the JSON representation of the IPNet -func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(ipnet).String()) -} - -// UnmarshalJSON sets the IPNet from a byte array of JSON -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { - var ipnetStr string - if err = json.Unmarshal(b, &ipnetStr); err == nil { - var cidr *net.IPNet - if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { - *ipnet = NetIPNet(*cidr) - } - } - return -} - -// IndexInfo contains information about a registry -// -// RepositoryInfo Examples: -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } -// -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } -type IndexInfo struct { - // Name is the name of the registry, such as "docker.io" - Name string - // Mirrors is a list of mirrors, expressed as URIs - Mirrors []string - // Secure is set to false if the registry is part of the list of - // insecure registries. Insecure registries accept HTTP and/or accept - // HTTPS with certificates from unknown CAs. - Secure bool - // Official indicates whether this is an official registry - Official bool -} - -// SearchResult describes a search result returned from a registry -type SearchResult struct { - // StarCount indicates the number of stars this repository has - StarCount int `json:"star_count"` - // IsOfficial is true if the result is from an official repository. - IsOfficial bool `json:"is_official"` - // Name is the name of the repository - Name string `json:"name"` - // IsAutomated indicates whether the result is automated - IsAutomated bool `json:"is_automated"` - // Description is a textual description of the repository - Description string `json:"description"` -} - -// SearchResults lists a collection search results returned from a registry -type SearchResults struct { - // Query contains the query string that generated the search results - Query string `json:"query"` - // NumResults indicates the number of results the query returned - NumResults int `json:"num_results"` - // Results is a slice containing the actual results for the search - Results []SearchResult `json:"results"` -} diff --git a/vendor/github.com/docker/engine-api/types/seccomp.go b/vendor/github.com/docker/engine-api/types/seccomp.go deleted file mode 100644 index e0305a9e37..0000000000 --- a/vendor/github.com/docker/engine-api/types/seccomp.go +++ /dev/null @@ -1,68 +0,0 @@ -package types - -// Seccomp represents the config for a seccomp profile for syscall restriction. -type Seccomp struct { - DefaultAction Action `json:"defaultAction"` - Architectures []Arch `json:"architectures"` - Syscalls []*Syscall `json:"syscalls"` -} - -// Arch used for additional architectures -type Arch string - -// Additional architectures permitted to be used for system calls -// By default only the native architecture of the kernel is permitted -const ( - ArchX86 Arch = "SCMP_ARCH_X86" - ArchX86_64 Arch = "SCMP_ARCH_X86_64" - ArchX32 Arch = "SCMP_ARCH_X32" - ArchARM Arch = "SCMP_ARCH_ARM" - ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" - ArchMIPS Arch = "SCMP_ARCH_MIPS" - ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" - ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" - ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" - ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" - ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" -) - -// Action taken upon Seccomp rule match -type Action string - -// Define actions for Seccomp rules -const ( - ActKill Action = "SCMP_ACT_KILL" - ActTrap Action = "SCMP_ACT_TRAP" - ActErrno Action = "SCMP_ACT_ERRNO" - ActTrace Action = "SCMP_ACT_TRACE" - ActAllow Action = "SCMP_ACT_ALLOW" -) - -// Operator used to match syscall arguments in Seccomp -type Operator string - -// Define operators for syscall arguments in Seccomp -const ( - OpNotEqual Operator = "SCMP_CMP_NE" - OpLessThan Operator = "SCMP_CMP_LT" - OpLessEqual Operator = "SCMP_CMP_LE" - OpEqualTo Operator = "SCMP_CMP_EQ" - OpGreaterEqual Operator = "SCMP_CMP_GE" - OpGreaterThan Operator = "SCMP_CMP_GT" - OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" -) - -// Arg used for matching specific syscall arguments in Seccomp -type Arg struct { - Index uint `json:"index"` - Value uint64 `json:"value"` - ValueTwo uint64 `json:"valueTwo"` - Op Operator `json:"op"` -} - -// Syscall is used to match a syscall in Seccomp -type Syscall struct { - Name string `json:"name"` - Action Action `json:"action"` - Args []*Arg `json:"args"` -} diff --git a/vendor/github.com/docker/engine-api/types/stats.go b/vendor/github.com/docker/engine-api/types/stats.go deleted file mode 100644 index b420ebe7f6..0000000000 --- a/vendor/github.com/docker/engine-api/types/stats.go +++ /dev/null @@ -1,115 +0,0 @@ -// Package types is used for API stability in the types and response to the -// consumers of the API stats endpoint. -package types - -import "time" - -// ThrottlingData stores CPU throttling stats of one running container -type ThrottlingData struct { - // Number of periods with throttling active - Periods uint64 `json:"periods"` - // Number of periods when the container hits its throttling limit. - ThrottledPeriods uint64 `json:"throttled_periods"` - // Aggregate time the container was throttled for in nanoseconds. - ThrottledTime uint64 `json:"throttled_time"` -} - -// CPUUsage stores All CPU stats aggregated since container inception. -type CPUUsage struct { - // Total CPU time consumed. - // Units: nanoseconds. - TotalUsage uint64 `json:"total_usage"` - // Total CPU time consumed per core. - // Units: nanoseconds. - PercpuUsage []uint64 `json:"percpu_usage"` - // Time spent by tasks of the cgroup in kernel mode. - // Units: nanoseconds. - UsageInKernelmode uint64 `json:"usage_in_kernelmode"` - // Time spent by tasks of the cgroup in user mode. - // Units: nanoseconds. - UsageInUsermode uint64 `json:"usage_in_usermode"` -} - -// CPUStats aggregates and wraps all CPU related info of container -type CPUStats struct { - CPUUsage CPUUsage `json:"cpu_usage"` - SystemUsage uint64 `json:"system_cpu_usage"` - ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` -} - -// MemoryStats aggregates All memory stats since container inception -type MemoryStats struct { - // current res_counter usage for memory - Usage uint64 `json:"usage"` - // maximum usage ever recorded. - MaxUsage uint64 `json:"max_usage"` - // TODO(vishh): Export these as stronger types. - // all the stats exported via memory.stat. - Stats map[string]uint64 `json:"stats"` - // number of times memory usage hits limits. - Failcnt uint64 `json:"failcnt"` - Limit uint64 `json:"limit"` -} - -// BlkioStatEntry is one small entity to store a piece of Blkio stats -// TODO Windows: This can be factored out -type BlkioStatEntry struct { - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` - Op string `json:"op"` - Value uint64 `json:"value"` -} - -// BlkioStats stores All IO service stats for data read and write -// TODO Windows: This can be factored out -type BlkioStats struct { - // number of bytes transferred to and from the block device - IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` - IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` - IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` - IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` - IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` - IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` - SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` -} - -// NetworkStats aggregates All network stats of one container -// TODO Windows: This will require refactoring -type NetworkStats struct { - RxBytes uint64 `json:"rx_bytes"` - RxPackets uint64 `json:"rx_packets"` - RxErrors uint64 `json:"rx_errors"` - RxDropped uint64 `json:"rx_dropped"` - TxBytes uint64 `json:"tx_bytes"` - TxPackets uint64 `json:"tx_packets"` - TxErrors uint64 `json:"tx_errors"` - TxDropped uint64 `json:"tx_dropped"` -} - -// PidsStats contains the stats of a container's pids -type PidsStats struct { - // Current is the number of pids in the cgroup - Current uint64 `json:"current,omitempty"` - // Limit is the hard limit on the number of pids in the cgroup. - // A "Limit" of 0 means that there is no limit. - Limit uint64 `json:"limit,omitempty"` -} - -// Stats is Ultimate struct aggregating all types of stats of one container -type Stats struct { - Read time.Time `json:"read"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` - CPUStats CPUStats `json:"cpu_stats,omitempty"` - MemoryStats MemoryStats `json:"memory_stats,omitempty"` - BlkioStats BlkioStats `json:"blkio_stats,omitempty"` - PidsStats PidsStats `json:"pids_stats,omitempty"` -} - -// StatsJSON is newly used Networks -type StatsJSON struct { - Stats - - // Networks request version >=1.21 - Networks map[string]NetworkStats `json:"networks,omitempty"` -} diff --git a/vendor/github.com/docker/engine-api/types/strslice/strslice.go b/vendor/github.com/docker/engine-api/types/strslice/strslice.go deleted file mode 100644 index bad493fb89..0000000000 --- a/vendor/github.com/docker/engine-api/types/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/github.com/docker/engine-api/types/types.go b/vendor/github.com/docker/engine-api/types/types.go deleted file mode 100644 index cb2dc9ac9d..0000000000 --- a/vendor/github.com/docker/engine-api/types/types.go +++ /dev/null @@ -1,473 +0,0 @@ -package types - -import ( - "os" - "time" - - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/network" - "github.com/docker/engine-api/types/registry" - "github.com/docker/go-connections/nat" -) - -// ContainerCreateResponse contains the information returned to a client on the -// creation of a new container. -type ContainerCreateResponse struct { - // ID is the ID of the created container. - ID string `json:"Id"` - - // Warnings are any warnings encountered during the creation of the container. - Warnings []string `json:"Warnings"` -} - -// ContainerExecCreateResponse contains response of Remote API: -// POST "/containers/{name:.*}/exec" -type ContainerExecCreateResponse struct { - // ID is the exec ID. - ID string `json:"Id"` -} - -// ContainerUpdateResponse contains response of Remote API: -// POST /containers/{name:.*}/update -type ContainerUpdateResponse struct { - // Warnings are any warnings encountered during the updating of the container. - Warnings []string `json:"Warnings"` -} - -// AuthResponse contains response of Remote API: -// POST "/auth" -type AuthResponse struct { - // Status is the authentication status - Status string `json:"Status"` - - // IdentityToken is an opaque token used for authenticating - // a user after a successful login. - IdentityToken string `json:"IdentityToken,omitempty"` -} - -// ContainerWaitResponse contains response of Remote API: -// POST "/containers/"+containerID+"/wait" -type ContainerWaitResponse struct { - // StatusCode is the status code of the wait job - StatusCode int `json:"StatusCode"` -} - -// ContainerCommitResponse contains response of Remote API: -// POST "/commit?container="+containerID -type ContainerCommitResponse struct { - ID string `json:"Id"` -} - -// ContainerChange contains response of Remote API: -// GET "/containers/{name:.*}/changes" -type ContainerChange struct { - Kind int - Path string -} - -// ImageHistory contains response of Remote API: -// GET "/images/{name:.*}/history" -type ImageHistory struct { - ID string `json:"Id"` - Created int64 - CreatedBy string - Tags []string - Size int64 - Comment string -} - -// ImageDelete contains response of Remote API: -// DELETE "/images/{name:.*}" -type ImageDelete struct { - Untagged string `json:",omitempty"` - Deleted string `json:",omitempty"` -} - -// Image contains response of Remote API: -// GET "/images/json" -type Image struct { - ID string `json:"Id"` - ParentID string `json:"ParentId"` - RepoTags []string - RepoDigests []string - Created int64 - Size int64 - VirtualSize int64 - Labels map[string]string -} - -// GraphDriverData returns Image's graph driver config info -// when calling inspect command -type GraphDriverData struct { - Name string - Data map[string]string -} - -// RootFS returns Image's RootFS description including the layer IDs. -type RootFS struct { - Type string - Layers []string `json:",omitempty"` - BaseLayer string `json:",omitempty"` -} - -// ImageInspect contains response of Remote API: -// GET "/images/{name:.*}/json" -type ImageInspect struct { - ID string `json:"Id"` - RepoTags []string - RepoDigests []string - Parent string - Comment string - Created string - Container string - ContainerConfig *container.Config - DockerVersion string - Author string - Config *container.Config - Architecture string - Os string - Size int64 - VirtualSize int64 - GraphDriver GraphDriverData - RootFS RootFS -} - -// Port stores open ports info of container -// e.g. {"PrivatePort": 8080, "PublicPort": 80, "Type": "tcp"} -type Port struct { - IP string `json:",omitempty"` - PrivatePort int - PublicPort int `json:",omitempty"` - Type string -} - -// Container contains response of Remote API: -// GET "/containers/json" -type Container struct { - ID string `json:"Id"` - Names []string - Image string - ImageID string - Command string - Created int64 - Ports []Port - SizeRw int64 `json:",omitempty"` - SizeRootFs int64 `json:",omitempty"` - Labels map[string]string - State string - Status string - HostConfig struct { - NetworkMode string `json:",omitempty"` - } - NetworkSettings *SummaryNetworkSettings - Mounts []MountPoint -} - -// CopyConfig contains request body of Remote API: -// POST "/containers/"+containerID+"/copy" -type CopyConfig struct { - Resource string -} - -// ContainerPathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. -type ContainerPathStat struct { - Name string `json:"name"` - Size int64 `json:"size"` - Mode os.FileMode `json:"mode"` - Mtime time.Time `json:"mtime"` - LinkTarget string `json:"linkTarget"` -} - -// ContainerProcessList contains response of Remote API: -// GET "/containers/{name:.*}/top" -type ContainerProcessList struct { - Processes [][]string - Titles []string -} - -// Version contains response of Remote API: -// GET "/version" -type Version struct { - Version string - APIVersion string `json:"ApiVersion"` - GitCommit string - GoVersion string - Os string - Arch string - KernelVersion string `json:",omitempty"` - Experimental bool `json:",omitempty"` - BuildTime string `json:",omitempty"` -} - -// Info contains response of Remote API: -// GET "/info" -type Info struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool - CPUCfsPeriod bool `json:"CpuCfsPeriod"` - CPUCfsQuota bool `json:"CpuCfsQuota"` - CPUShares bool - CPUSet bool - IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` - Debug bool - NFd int - OomKillDisable bool - NGoroutines int - SystemTime string - ExecutionDriver string - LoggingDriver string - CgroupDriver string - NEventsListener int - KernelVersion string - OperatingSystem string - OSType string - Architecture string - IndexServerAddress string - RegistryConfig *registry.ServiceConfig - NCPU int - MemTotal int64 - DockerRootDir string - HTTPProxy string `json:"HttpProxy"` - HTTPSProxy string `json:"HttpsProxy"` - NoProxy string - Name string - Labels []string - ExperimentalBuild bool - ServerVersion string - ClusterStore string - ClusterAdvertise string - SecurityOptions []string -} - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by Info struct -type PluginsInfo struct { - // List of Volume plugins registered - Volume []string - // List of Network plugins registered - Network []string - // List of Authorization plugins registered - Authorization []string -} - -// ExecStartCheck is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package -type ExecStartCheck struct { - // ExecStart will first check if it's detached - Detach bool - // Check if there's a tty - Tty bool -} - -// ContainerState stores container's running state -// it's part of ContainerJSONBase and will return by "inspect" command -type ContainerState struct { - Status string - Running bool - Paused bool - Restarting bool - OOMKilled bool - Dead bool - Pid int - ExitCode int - Error string - StartedAt string - FinishedAt string -} - -// ContainerNode stores information about the node that a container -// is running on. It's only available in Docker Swarm -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int - Labels map[string]string -} - -// ContainerJSONBase contains response of Remote API: -// GET "/containers/{name:.*}/json" -type ContainerJSONBase struct { - ID string `json:"Id"` - Created string - Path string - Args []string - State *ContainerState - Image string - ResolvConfPath string - HostnamePath string - HostsPath string - LogPath string - Node *ContainerNode `json:",omitempty"` - Name string - RestartCount int - Driver string - MountLabel string - ProcessLabel string - AppArmorProfile string - ExecIDs []string - HostConfig *container.HostConfig - GraphDriver GraphDriverData - SizeRw *int64 `json:",omitempty"` - SizeRootFs *int64 `json:",omitempty"` -} - -// ContainerJSON is newly used struct along with MountPoint -type ContainerJSON struct { - *ContainerJSONBase - Mounts []MountPoint - Config *container.Config - NetworkSettings *NetworkSettings -} - -// NetworkSettings exposes the network settings in the api -type NetworkSettings struct { - NetworkSettingsBase - DefaultNetworkSettings - Networks map[string]*network.EndpointSettings -} - -// SummaryNetworkSettings provides a summary of container's networks -// in /containers/json -type SummaryNetworkSettings struct { - Networks map[string]*network.EndpointSettings -} - -// NetworkSettingsBase holds basic information about networks -type NetworkSettingsBase struct { - Bridge string - SandboxID string - HairpinMode bool - LinkLocalIPv6Address string - LinkLocalIPv6PrefixLen int - Ports nat.PortMap - SandboxKey string - SecondaryIPAddresses []network.Address - SecondaryIPv6Addresses []network.Address -} - -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. -type DefaultNetworkSettings struct { - EndpointID string - Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - IPAddress string - IPPrefixLen int - IPv6Gateway string - MacAddress string -} - -// MountPoint represents a mount point configuration inside the container. -type MountPoint struct { - Name string `json:",omitempty"` - Source string - Destination string - Driver string `json:",omitempty"` - Mode string - RW bool - Propagation string -} - -// Volume represents the configuration of a volume for the remote API -type Volume struct { - Name string // Name is the name of the volume - Driver string // Driver is the Driver name used to create the volume - Mountpoint string // Mountpoint is the location on disk of the volume - Status map[string]interface{} `json:",omitempty"` // Status provides low-level status information about the volume - Labels map[string]string // Labels is metadata specific to the volume - Scope string // Scope describes the level at which the volume exists (e.g. `global` for cluster-wide or `local` for machine level) -} - -// VolumesListResponse contains the response for the remote API: -// GET "/volumes" -type VolumesListResponse struct { - Volumes []*Volume // Volumes is the list of volumes being returned - Warnings []string // Warnings is a list of warnings that occurred when getting the list from the volume drivers -} - -// VolumeCreateRequest contains the response for the remote API: -// POST "/volumes/create" -type VolumeCreateRequest struct { - Name string // Name is the requested name of the volume - Driver string // Driver is the name of the driver that should be used to create the volume - DriverOpts map[string]string // DriverOpts holds the driver specific options to use for when creating the volume. - Labels map[string]string // Labels holds metadata specific to the volume being created. -} - -// NetworkResource is the body of the "get network" http response message -type NetworkResource struct { - Name string - ID string `json:"Id"` - Scope string - Driver string - EnableIPv6 bool - IPAM network.IPAM - Internal bool - Containers map[string]EndpointResource - Options map[string]string - Labels map[string]string -} - -// EndpointResource contains network resources allocated and used for a container in a network -type EndpointResource struct { - Name string - EndpointID string - MacAddress string - IPv4Address string - IPv6Address string -} - -// NetworkCreate is the expected body of the "create network" http request message -type NetworkCreate struct { - CheckDuplicate bool - Driver string - EnableIPv6 bool - IPAM network.IPAM - Internal bool - Options map[string]string - Labels map[string]string -} - -// NetworkCreateRequest is the request message sent to the server for network create call. -type NetworkCreateRequest struct { - NetworkCreate - Name string -} - -// NetworkCreateResponse is the response message sent by the server for network create call -type NetworkCreateResponse struct { - ID string `json:"Id"` - Warning string -} - -// NetworkConnect represents the data to be used to connect a container to the network -type NetworkConnect struct { - Container string - EndpointConfig *network.EndpointSettings `json:",omitempty"` -} - -// NetworkDisconnect represents the data to be used to disconnect a container from the network -type NetworkDisconnect struct { - Container string - Force bool -} diff --git a/vendor/github.com/docker/engine-api/types/versions/README.md b/vendor/github.com/docker/engine-api/types/versions/README.md deleted file mode 100644 index 76c516e6a3..0000000000 --- a/vendor/github.com/docker/engine-api/types/versions/README.md +++ /dev/null @@ -1,14 +0,0 @@ -## Legacy API type versions - -This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. - -Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. - -### Package name conventions - -The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: - -1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. -2. We cannot use `_` because golint complains abount it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. - -For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/engine-api/types/versions/compare.go b/vendor/github.com/docker/engine-api/types/versions/compare.go deleted file mode 100644 index 611d4fed66..0000000000 --- a/vendor/github.com/docker/engine-api/types/versions/compare.go +++ /dev/null @@ -1,62 +0,0 @@ -package versions - -import ( - "strconv" - "strings" -) - -// compare compares two version strings -// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. -func compare(v1, v2 string) int { - var ( - currTab = strings.Split(v1, ".") - otherTab = strings.Split(v2, ".") - ) - - max := len(currTab) - if len(otherTab) > max { - max = len(otherTab) - } - for i := 0; i < max; i++ { - var currInt, otherInt int - - if len(currTab) > i { - currInt, _ = strconv.Atoi(currTab[i]) - } - if len(otherTab) > i { - otherInt, _ = strconv.Atoi(otherTab[i]) - } - if currInt > otherInt { - return 1 - } - if otherInt > currInt { - return -1 - } - } - return 0 -} - -// LessThan checks if a version is less than another -func LessThan(v, other string) bool { - return compare(v, other) == -1 -} - -// LessThanOrEqualTo checks if a version is less than or equal to another -func LessThanOrEqualTo(v, other string) bool { - return compare(v, other) <= 0 -} - -// GreaterThan checks if a version is greater than another -func GreaterThan(v, other string) bool { - return compare(v, other) == 1 -} - -// GreaterThanOrEqualTo checks if a version is greater than or equal to another -func GreaterThanOrEqualTo(v, other string) bool { - return compare(v, other) >= 0 -} - -// Equal checks if a version is equal to another -func Equal(v, other string) bool { - return compare(v, other) == 0 -} diff --git a/vendor/github.com/docker/go-connections/LICENSE b/vendor/github.com/docker/go-connections/LICENSE deleted file mode 100644 index b55b37bc31..0000000000 --- a/vendor/github.com/docker/go-connections/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go deleted file mode 100644 index 3d469165ab..0000000000 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ /dev/null @@ -1,223 +0,0 @@ -// Package nat is a convenience package for manipulation of strings describing network ports. -package nat - -import ( - "fmt" - "net" - "strconv" - "strings" -) - -const ( - // portSpecTemplate is the expected format for port specifications - portSpecTemplate = "ip:hostPort:containerPort" -) - -// PortBinding represents a binding between a Host IP address and a Host Port -type PortBinding struct { - // HostIP is the host IP Address - HostIP string `json:"HostIp"` - // HostPort is the host port number - HostPort string -} - -// PortMap is a collection of PortBinding indexed by Port -type PortMap map[Port][]PortBinding - -// PortSet is a collection of structs indexed by Port -type PortSet map[Port]struct{} - -// Port is a string containing port number and protocol in the format "80/tcp" -type Port string - -// NewPort creates a new instance of a Port given a protocol and port number or port range -func NewPort(proto, port string) (Port, error) { - // Check for parsing issues on "port" now so we can avoid having - // to check it later on. - - portStartInt, portEndInt, err := ParsePortRangeToInt(port) - if err != nil { - return "", err - } - - if portStartInt == portEndInt { - return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil - } - return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil -} - -// ParsePort parses the port number string and returns an int -func ParsePort(rawPort string) (int, error) { - if len(rawPort) == 0 { - return 0, nil - } - port, err := strconv.ParseUint(rawPort, 10, 16) - if err != nil { - return 0, err - } - return int(port), nil -} - -// ParsePortRangeToInt parses the port range string and returns start/end ints -func ParsePortRangeToInt(rawPort string) (int, int, error) { - if len(rawPort) == 0 { - return 0, 0, nil - } - start, end, err := ParsePortRange(rawPort) - if err != nil { - return 0, 0, err - } - return int(start), int(end), nil -} - -// Proto returns the protocol of a Port -func (p Port) Proto() string { - proto, _ := SplitProtoPort(string(p)) - return proto -} - -// Port returns the port number of a Port -func (p Port) Port() string { - _, port := SplitProtoPort(string(p)) - return port -} - -// Int returns the port number of a Port as an int -func (p Port) Int() int { - portStr := p.Port() - if len(portStr) == 0 { - return 0 - } - - // We don't need to check for an error because we're going to - // assume that any error would have been found, and reported, in NewPort() - port, _ := strconv.ParseUint(portStr, 10, 16) - return int(port) -} - -// Range returns the start/end port numbers of a Port range as ints -func (p Port) Range() (int, int, error) { - return ParsePortRangeToInt(p.Port()) -} - -// SplitProtoPort splits a port in the format of proto/port -func SplitProtoPort(rawPort string) (string, string) { - parts := strings.Split(rawPort, "/") - l := len(parts) - if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { - return "", "" - } - if l == 1 { - return "tcp", rawPort - } - if len(parts[1]) == 0 { - return "tcp", parts[0] - } - return parts[1], parts[0] -} - -func validateProto(proto string) bool { - for _, availableProto := range []string{"tcp", "udp"} { - if availableProto == proto { - return true - } - } - return false -} - -// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses -// these in to the internal types -func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { - var ( - exposedPorts = make(map[Port]struct{}, len(ports)) - bindings = make(map[Port][]PortBinding) - ) - - for _, rawPort := range ports { - proto := "tcp" - - if i := strings.LastIndex(rawPort, "/"); i != -1 { - proto = rawPort[i+1:] - rawPort = rawPort[:i] - } - if !strings.Contains(rawPort, ":") { - rawPort = fmt.Sprintf("::%s", rawPort) - } else if len(strings.Split(rawPort, ":")) == 2 { - rawPort = fmt.Sprintf(":%s", rawPort) - } - - parts, err := PartParser(portSpecTemplate, rawPort) - if err != nil { - return nil, nil, err - } - - var ( - containerPort = parts["containerPort"] - rawIP = parts["ip"] - hostPort = parts["hostPort"] - ) - - if rawIP != "" && net.ParseIP(rawIP) == nil { - return nil, nil, fmt.Errorf("Invalid ip address: %s", rawIP) - } - if containerPort == "" { - return nil, nil, fmt.Errorf("No port specified: %s", rawPort) - } - - startPort, endPort, err := ParsePortRange(containerPort) - if err != nil { - return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort) - } - - var startHostPort, endHostPort uint64 = 0, 0 - if len(hostPort) > 0 { - startHostPort, endHostPort, err = ParsePortRange(hostPort) - if err != nil { - return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort) - } - } - - if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { - // Allow host port range iff containerPort is not a range. - // In this case, use the host port range as the dynamic - // host port range to allocate into. - if endPort != startPort { - return nil, nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) - } - } - - if !validateProto(strings.ToLower(proto)) { - return nil, nil, fmt.Errorf("Invalid proto: %s", proto) - } - - for i := uint64(0); i <= (endPort - startPort); i++ { - containerPort = strconv.FormatUint(startPort+i, 10) - if len(hostPort) > 0 { - hostPort = strconv.FormatUint(startHostPort+i, 10) - } - // Set hostPort to a range only if there is a single container port - // and a dynamic host port. - if startPort == endPort && startHostPort != endHostPort { - hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) - } - port, err := NewPort(strings.ToLower(proto), containerPort) - if err != nil { - return nil, nil, err - } - if _, exists := exposedPorts[port]; !exists { - exposedPorts[port] = struct{}{} - } - - binding := PortBinding{ - HostIP: rawIP, - HostPort: hostPort, - } - bslice, exists := bindings[port] - if !exists { - bslice = []PortBinding{} - } - bindings[port] = append(bslice, binding) - } - } - return exposedPorts, bindings, nil -} diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go deleted file mode 100644 index 872050205f..0000000000 --- a/vendor/github.com/docker/go-connections/nat/parse.go +++ /dev/null @@ -1,56 +0,0 @@ -package nat - -import ( - "fmt" - "strconv" - "strings" -) - -// PartParser parses and validates the specified string (data) using the specified template -// e.g. ip:public:private -> 192.168.0.1:80:8000 -func PartParser(template, data string) (map[string]string, error) { - // ip:public:private - var ( - templateParts = strings.Split(template, ":") - parts = strings.Split(data, ":") - out = make(map[string]string, len(templateParts)) - ) - if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) - } - - for i, t := range templateParts { - value := "" - if len(parts) > i { - value = parts[i] - } - out[t] = value - } - return out, nil -} - -// ParsePortRange parses and validates the specified string as a port-range (8000-9000) -func ParsePortRange(ports string) (uint64, uint64, error) { - if ports == "" { - return 0, 0, fmt.Errorf("Empty string specified for ports.") - } - if !strings.Contains(ports, "-") { - start, err := strconv.ParseUint(ports, 10, 16) - end := start - return start, end, err - } - - parts := strings.Split(ports, "-") - start, err := strconv.ParseUint(parts[0], 10, 16) - if err != nil { - return 0, 0, err - } - end, err := strconv.ParseUint(parts[1], 10, 16) - if err != nil { - return 0, 0, err - } - if end < start { - return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) - } - return start, end, nil -} diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go deleted file mode 100644 index ce950171e3..0000000000 --- a/vendor/github.com/docker/go-connections/nat/sort.go +++ /dev/null @@ -1,96 +0,0 @@ -package nat - -import ( - "sort" - "strings" -) - -type portSorter struct { - ports []Port - by func(i, j Port) bool -} - -func (s *portSorter) Len() int { - return len(s.ports) -} - -func (s *portSorter) Swap(i, j int) { - s.ports[i], s.ports[j] = s.ports[j], s.ports[i] -} - -func (s *portSorter) Less(i, j int) bool { - ip := s.ports[i] - jp := s.ports[j] - - return s.by(ip, jp) -} - -// Sort sorts a list of ports using the provided predicate -// This function should compare `i` and `j`, returning true if `i` is -// considered to be less than `j` -func Sort(ports []Port, predicate func(i, j Port) bool) { - s := &portSorter{ports, predicate} - sort.Sort(s) -} - -type portMapEntry struct { - port Port - binding PortBinding -} - -type portMapSorter []portMapEntry - -func (s portMapSorter) Len() int { return len(s) } -func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// sort the port so that the order is: -// 1. port with larger specified bindings -// 2. larger port -// 3. port with tcp protocol -func (s portMapSorter) Less(i, j int) bool { - pi, pj := s[i].port, s[j].port - hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) - return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") -} - -// SortPortMap sorts the list of ports and their respected mapping. The ports -// will explicit HostPort will be placed first. -func SortPortMap(ports []Port, bindings PortMap) { - s := portMapSorter{} - for _, p := range ports { - if binding, ok := bindings[p]; ok { - for _, b := range binding { - s = append(s, portMapEntry{port: p, binding: b}) - } - bindings[p] = []PortBinding{} - } else { - s = append(s, portMapEntry{port: p}) - } - } - - sort.Sort(s) - var ( - i int - pm = make(map[Port]struct{}) - ) - // reorder ports - for _, entry := range s { - if _, ok := pm[entry.port]; !ok { - ports[i] = entry.port - pm[entry.port] = struct{}{} - i++ - } - // reorder bindings for this port - if _, ok := bindings[entry.port]; ok { - bindings[entry.port] = append(bindings[entry.port], entry.binding) - } - } -} - -func toInt(s string) uint64 { - i, _, err := ParsePortRange(s) - if err != nil { - i = 0 - } - return i -} diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md deleted file mode 100644 index 9ea86d784e..0000000000 --- a/vendor/github.com/docker/go-units/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# Contributing to go-units - -Want to hack on go-units? Awesome! Here are instructions to get you started. - -go-units is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/LICENSE.code b/vendor/github.com/docker/go-units/LICENSE.code deleted file mode 100644 index b55b37bc31..0000000000 --- a/vendor/github.com/docker/go-units/LICENSE.code +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/go-units/LICENSE.docs b/vendor/github.com/docker/go-units/LICENSE.docs deleted file mode 100644 index e26cd4fc8e..0000000000 --- a/vendor/github.com/docker/go-units/LICENSE.docs +++ /dev/null @@ -1,425 +0,0 @@ -Attribution-ShareAlike 4.0 International - -======================================================================= - -Creative Commons Corporation ("Creative Commons") is not a law firm and -does not provide legal services or legal advice. Distribution of -Creative Commons public licenses does not create a lawyer-client or -other relationship. Creative Commons makes its licenses and related -information available on an "as-is" basis. Creative Commons gives no -warranties regarding its licenses, any material licensed under their -terms and conditions, or any related information. Creative Commons -disclaims all liability for damages resulting from their use to the -fullest extent possible. - -Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and -conditions that creators and other rights holders may use to share -original works of authorship and other material subject to copyright -and certain other rights specified in the public license below. The -following considerations are for informational purposes only, are not -exhaustive, and do not form part of our licenses. - - Considerations for licensors: Our public licenses are - intended for use by those authorized to give the public - permission to use material in ways otherwise restricted by - copyright and certain other rights. Our licenses are - irrevocable. Licensors should read and understand the terms - and conditions of the license they choose before applying it. - Licensors should also secure all rights necessary before - applying our licenses so that the public can reuse the - material as expected. Licensors should clearly mark any - material not subject to the license. This includes other CC- - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - wiki.creativecommons.org/Considerations_for_licensors - - Considerations for the public: By using one of our public - licenses, a licensor grants the public permission to use the - licensed material under specified terms and conditions. If - the licensor's permission is not necessary for any reason--for - example, because of any applicable exception or limitation to - copyright--then that use is not regulated by the license. Our - licenses grant only permissions under copyright and certain - other rights that a licensor has authority to grant. Use of - the licensed material may still be restricted for other - reasons, including because others have copyright or other - rights in the material. A licensor may make special requests, - such as asking that all changes be marked or described. - Although not required by our licenses, you are encouraged to - respect those requests where reasonable. More_considerations - for the public: - wiki.creativecommons.org/Considerations_for_licensees - -======================================================================= - -Creative Commons Attribution-ShareAlike 4.0 International Public -License - -By exercising the Licensed Rights (defined below), You accept and agree -to be bound by the terms and conditions of this Creative Commons -Attribution-ShareAlike 4.0 International Public License ("Public -License"). To the extent this Public License may be interpreted as a -contract, You are granted the Licensed Rights in consideration of Your -acceptance of these terms and conditions, and the Licensor grants You -such rights in consideration of benefits the Licensor receives from -making the Licensed Material available under these terms and -conditions. - - -Section 1 -- Definitions. - - a. Adapted Material means material subject to Copyright and Similar - Rights that is derived from or based upon the Licensed Material - and in which the Licensed Material is translated, altered, - arranged, transformed, or otherwise modified in a manner requiring - permission under the Copyright and Similar Rights held by the - Licensor. For purposes of this Public License, where the Licensed - Material is a musical work, performance, or sound recording, - Adapted Material is always produced where the Licensed Material is - synched in timed relation with a moving image. - - b. Adapter's License means the license You apply to Your Copyright - and Similar Rights in Your contributions to Adapted Material in - accordance with the terms and conditions of this Public License. - - c. BY-SA Compatible License means a license listed at - creativecommons.org/compatiblelicenses, approved by Creative - Commons as essentially the equivalent of this Public License. - - d. Copyright and Similar Rights means copyright and/or similar rights - closely related to copyright including, without limitation, - performance, broadcast, sound recording, and Sui Generis Database - Rights, without regard to how the rights are labeled or - categorized. For purposes of this Public License, the rights - specified in Section 2(b)(1)-(2) are not Copyright and Similar - Rights. - - e. Effective Technological Measures means those measures that, in the - absence of proper authority, may not be circumvented under laws - fulfilling obligations under Article 11 of the WIPO Copyright - Treaty adopted on December 20, 1996, and/or similar international - agreements. - - f. Exceptions and Limitations means fair use, fair dealing, and/or - any other exception or limitation to Copyright and Similar Rights - that applies to Your use of the Licensed Material. - - g. License Elements means the license attributes listed in the name - of a Creative Commons Public License. The License Elements of this - Public License are Attribution and ShareAlike. - - h. Licensed Material means the artistic or literary work, database, - or other material to which the Licensor applied this Public - License. - - i. Licensed Rights means the rights granted to You subject to the - terms and conditions of this Public License, which are limited to - all Copyright and Similar Rights that apply to Your use of the - Licensed Material and that the Licensor has authority to license. - - j. Licensor means the individual(s) or entity(ies) granting rights - under this Public License. - - k. Share means to provide material to the public by any means or - process that requires permission under the Licensed Rights, such - as reproduction, public display, public performance, distribution, - dissemination, communication, or importation, and to make material - available to the public including in ways that members of the - public may access the material from a place and at a time - individually chosen by them. - - l. Sui Generis Database Rights means rights other than copyright - resulting from Directive 96/9/EC of the European Parliament and of - the Council of 11 March 1996 on the legal protection of databases, - as amended and/or succeeded, as well as other essentially - equivalent rights anywhere in the world. - - m. You means the individual or entity exercising the Licensed Rights - under this Public License. Your has a corresponding meaning. - - -Section 2 -- Scope. - - a. License grant. - - 1. Subject to the terms and conditions of this Public License, - the Licensor hereby grants You a worldwide, royalty-free, - non-sublicensable, non-exclusive, irrevocable license to - exercise the Licensed Rights in the Licensed Material to: - - a. reproduce and Share the Licensed Material, in whole or - in part; and - - b. produce, reproduce, and Share Adapted Material. - - 2. Exceptions and Limitations. For the avoidance of doubt, where - Exceptions and Limitations apply to Your use, this Public - License does not apply, and You do not need to comply with - its terms and conditions. - - 3. Term. The term of this Public License is specified in Section - 6(a). - - 4. Media and formats; technical modifications allowed. The - Licensor authorizes You to exercise the Licensed Rights in - all media and formats whether now known or hereafter created, - and to make technical modifications necessary to do so. The - Licensor waives and/or agrees not to assert any right or - authority to forbid You from making technical modifications - necessary to exercise the Licensed Rights, including - technical modifications necessary to circumvent Effective - Technological Measures. For purposes of this Public License, - simply making modifications authorized by this Section 2(a) - (4) never produces Adapted Material. - - 5. Downstream recipients. - - a. Offer from the Licensor -- Licensed Material. Every - recipient of the Licensed Material automatically - receives an offer from the Licensor to exercise the - Licensed Rights under the terms and conditions of this - Public License. - - b. Additional offer from the Licensor -- Adapted Material. - Every recipient of Adapted Material from You - automatically receives an offer from the Licensor to - exercise the Licensed Rights in the Adapted Material - under the conditions of the Adapter's License You apply. - - c. No downstream restrictions. You may not offer or impose - any additional or different terms or conditions on, or - apply any Effective Technological Measures to, the - Licensed Material if doing so restricts exercise of the - Licensed Rights by any recipient of the Licensed - Material. - - 6. No endorsement. Nothing in this Public License constitutes or - may be construed as permission to assert or imply that You - are, or that Your use of the Licensed Material is, connected - with, or sponsored, endorsed, or granted official status by, - the Licensor or others designated to receive attribution as - provided in Section 3(a)(1)(A)(i). - - b. Other rights. - - 1. Moral rights, such as the right of integrity, are not - licensed under this Public License, nor are publicity, - privacy, and/or other similar personality rights; however, to - the extent possible, the Licensor waives and/or agrees not to - assert any such rights held by the Licensor to the limited - extent necessary to allow You to exercise the Licensed - Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this - Public License. - - 3. To the extent possible, the Licensor waives any right to - collect royalties from You for the exercise of the Licensed - Rights, whether directly or through a collecting society - under any voluntary or waivable statutory or compulsory - licensing scheme. In all other cases the Licensor expressly - reserves any right to collect such royalties. - - -Section 3 -- License Conditions. - -Your exercise of the Licensed Rights is expressly made subject to the -following conditions. - - a. Attribution. - - 1. If You Share the Licensed Material (including in modified - form), You must: - - a. retain the following if it is supplied by the Licensor - with the Licensed Material: - - i. identification of the creator(s) of the Licensed - Material and any others designated to receive - attribution, in any reasonable manner requested by - the Licensor (including by pseudonym if - designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of - warranties; - - v. a URI or hyperlink to the Licensed Material to the - extent reasonably practicable; - - b. indicate if You modified the Licensed Material and - retain an indication of any previous modifications; and - - c. indicate the Licensed Material is licensed under this - Public License, and include the text of, or the URI or - hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any - reasonable manner based on the medium, means, and context in - which You Share the Licensed Material. For example, it may be - reasonable to satisfy the conditions by providing a URI or - hyperlink to a resource that includes the required - information. - - 3. If requested by the Licensor, You must remove any of the - information required by Section 3(a)(1)(A) to the extent - reasonably practicable. - - b. ShareAlike. - - In addition to the conditions in Section 3(a), if You Share - Adapted Material You produce, the following conditions also apply. - - 1. The Adapter's License You apply must be a Creative Commons - license with the same License Elements, this version or - later, or a BY-SA Compatible License. - - 2. You must include the text of, or the URI or hyperlink to, the - Adapter's License You apply. You may satisfy this condition - in any reasonable manner based on the medium, means, and - context in which You Share Adapted Material. - - 3. You may not offer or impose any additional or different terms - or conditions on, or apply any Effective Technological - Measures to, Adapted Material that restrict exercise of the - rights granted under the Adapter's License You apply. - - -Section 4 -- Sui Generis Database Rights. - -Where the Licensed Rights include Sui Generis Database Rights that -apply to Your use of the Licensed Material: - - a. for the avoidance of doubt, Section 2(a)(1) grants You the right - to extract, reuse, reproduce, and Share all or a substantial - portion of the contents of the database; - - b. if You include all or a substantial portion of the database - contents in a database in which You have Sui Generis Database - Rights, then the database in which You have Sui Generis Database - Rights (but not its individual contents) is Adapted Material, - - including for purposes of Section 3(b); and - c. You must comply with the conditions in Section 3(a) if You Share - all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not -replace Your obligations under this Public License where the Licensed -Rights include other Copyright and Similar Rights. - - -Section 5 -- Disclaimer of Warranties and Limitation of Liability. - - a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE - EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS - AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF - ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, - IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, - WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, - ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT - KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT - ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. - - b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE - TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, - NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, - INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, - COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR - USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN - ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR - DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR - IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. - - c. The disclaimer of warranties and limitation of liability provided - above shall be interpreted in a manner that, to the extent - possible, most closely approximates an absolute disclaimer and - waiver of all liability. - - -Section 6 -- Term and Termination. - - a. This Public License applies for the term of the Copyright and - Similar Rights licensed here. However, if You fail to comply with - this Public License, then Your rights under this Public License - terminate automatically. - - b. Where Your right to use the Licensed Material has terminated under - Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided - it is cured within 30 days of Your discovery of the - violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any - right the Licensor may have to seek remedies for Your violations - of this Public License. - - c. For the avoidance of doubt, the Licensor may also offer the - Licensed Material under separate terms or conditions or stop - distributing the Licensed Material at any time; however, doing so - will not terminate this Public License. - - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public - License. - - -Section 7 -- Other Terms and Conditions. - - a. The Licensor shall not be bound by any additional or different - terms or conditions communicated by You unless expressly agreed. - - b. Any arrangements, understandings, or agreements regarding the - Licensed Material not stated herein are separate from and - independent of the terms and conditions of this Public License. - - -Section 8 -- Interpretation. - - a. For the avoidance of doubt, this Public License does not, and - shall not be interpreted to, reduce, limit, restrict, or impose - conditions on any use of the Licensed Material that could lawfully - be made without permission under this Public License. - - b. To the extent possible, if any provision of this Public License is - deemed unenforceable, it shall be automatically reformed to the - minimum extent necessary to make it enforceable. If the provision - cannot be reformed, it shall be severed from this Public License - without affecting the enforceability of the remaining terms and - conditions. - - c. No term or condition of this Public License will be waived and no - failure to comply consented to unless expressly agreed to by the - Licensor. - - d. Nothing in this Public License constitutes or may be interpreted - as a limitation upon, or waiver of, any privileges and immunities - that apply to the Licensor or You, including from the legal - processes of any jurisdiction or authority. - - -======================================================================= - -Creative Commons is not a party to its public licenses. -Notwithstanding, Creative Commons may elect to apply one of its public -licenses to material it publishes and in those instances will be -considered the "Licensor." Except for the limited purpose of indicating -that material is shared under a Creative Commons public license or as -otherwise permitted by the Creative Commons policies published at -creativecommons.org/policies, Creative Commons does not authorize the -use of the trademark "Creative Commons" or any other trademark or logo -of Creative Commons without its prior written consent including, -without limitation, in connection with any unauthorized modifications -to any of its public licenses or any other arrangements, -understandings, or agreements concerning use of licensed material. For -the avoidance of doubt, this paragraph does not form part of the public -licenses. - -Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS deleted file mode 100644 index 477be8b214..0000000000 --- a/vendor/github.com/docker/go-units/MAINTAINERS +++ /dev/null @@ -1,27 +0,0 @@ -# go-connections maintainers file -# -# This file describes who runs the docker/go-connections project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "calavera", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - [people.calavera] - Name = "David Calavera" - Email = "david.calavera@gmail.com" - GitHub = "calavera" diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md deleted file mode 100644 index 3ce4d79dac..0000000000 --- a/vendor/github.com/docker/go-units/README.md +++ /dev/null @@ -1,18 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) - -# Introduction - -go-units is a library to transform human friendly measurements into machine friendly values. - -## Usage - -See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. - -## Copyright and license - -Copyright © 2015 Docker, Inc. All rights reserved, except as follows. Code -is released under the Apache 2.0 license. The README.md file, and files in the -"docs" folder are licensed under the Creative Commons Attribution 4.0 -International License under the terms and conditions set forth in the file -"LICENSE.docs". You may obtain a duplicate copy of the same license, titled -CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml deleted file mode 100644 index 9043b35478..0000000000 --- a/vendor/github.com/docker/go-units/circle.yml +++ /dev/null @@ -1,11 +0,0 @@ -dependencies: - post: - # install golint - - go get github.com/golang/lint/golint - -test: - pre: - # run analysis before tests - - go vet ./... - - test -z "$(golint ./... | tee /dev/stderr)" - - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go deleted file mode 100644 index c219a8a968..0000000000 --- a/vendor/github.com/docker/go-units/duration.go +++ /dev/null @@ -1,33 +0,0 @@ -// Package units provides helper function to parse and print size and time units -// in human-readable format. -package units - -import ( - "fmt" - "time" -) - -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.). -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours()); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*3 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%d years", int(d.Hours())/24/365) -} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go deleted file mode 100644 index 3b59daff31..0000000000 --- a/vendor/github.com/docker/go-units/size.go +++ /dev/null @@ -1,95 +0,0 @@ -package units - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -// See: http://en.wikipedia.org/wiki/Binary_prefix -const ( - // Decimal - - KB = 1000 - MB = 1000 * KB - GB = 1000 * MB - TB = 1000 * GB - PB = 1000 * TB - - // Binary - - KiB = 1024 - MiB = 1024 * KiB - GiB = 1024 * MiB - TiB = 1024 * GiB - PiB = 1024 * TiB -) - -type unitMap map[string]int64 - -var ( - decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} - binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) -) - -var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} -var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - -// CustomSize returns a human-readable approximation of a size -// using custom format. -func CustomSize(format string, size float64, base float64, _map []string) string { - i := 0 - for size >= base { - size = size / base - i++ - } - return fmt.Sprintf(format, size, _map[i]) -} - -// HumanSize returns a human-readable approximation of a size -// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). -func HumanSize(size float64) string { - return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs) -} - -// BytesSize returns a human-readable size in bytes, kibibytes, -// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). -func BytesSize(size float64) string { - return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) -} - -// FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB"). -func FromHumanSize(size string) (int64, error) { - return parseSize(size, decimalMap) -} - -// RAMInBytes parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and -// returns the number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (int64, error) { - return parseSize(size, binaryMap) -} - -// Parses the human-readable size string into the amount it represents. -func parseSize(sizeStr string, uMap unitMap) (int64, error) { - matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 3 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - - size, err := strconv.ParseInt(matches[1], 10, 0) - if err != nil { - return -1, err - } - - unitPrefix := strings.ToLower(matches[2]) - if mul, ok := uMap[unitPrefix]; ok { - size *= mul - } - - return size, nil -} diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go deleted file mode 100644 index 5ac7fd825f..0000000000 --- a/vendor/github.com/docker/go-units/ulimit.go +++ /dev/null @@ -1,118 +0,0 @@ -package units - -import ( - "fmt" - "strconv" - "strings" -) - -// Ulimit is a human friendly version of Rlimit. -type Ulimit struct { - Name string - Hard int64 - Soft int64 -} - -// Rlimit specifies the resource limits, such as max open files. -type Rlimit struct { - Type int `json:"type,omitempty"` - Hard uint64 `json:"hard,omitempty"` - Soft uint64 `json:"soft,omitempty"` -} - -const ( - // magic numbers for making the syscall - // some of these are defined in the syscall package, but not all. - // Also since Windows client doesn't get access to the syscall package, need to - // define these here - rlimitAs = 9 - rlimitCore = 4 - rlimitCPU = 0 - rlimitData = 2 - rlimitFsize = 1 - rlimitLocks = 10 - rlimitMemlock = 8 - rlimitMsgqueue = 12 - rlimitNice = 13 - rlimitNofile = 7 - rlimitNproc = 6 - rlimitRss = 5 - rlimitRtprio = 14 - rlimitRttime = 15 - rlimitSigpending = 11 - rlimitStack = 3 -) - -var ulimitNameMapping = map[string]int{ - //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. - "core": rlimitCore, - "cpu": rlimitCPU, - "data": rlimitData, - "fsize": rlimitFsize, - "locks": rlimitLocks, - "memlock": rlimitMemlock, - "msgqueue": rlimitMsgqueue, - "nice": rlimitNice, - "nofile": rlimitNofile, - "nproc": rlimitNproc, - "rss": rlimitRss, - "rtprio": rlimitRtprio, - "rttime": rlimitRttime, - "sigpending": rlimitSigpending, - "stack": rlimitStack, -} - -// ParseUlimit parses and returns a Ulimit from the specified string. -func ParseUlimit(val string) (*Ulimit, error) { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid ulimit argument: %s", val) - } - - if _, exists := ulimitNameMapping[parts[0]]; !exists { - return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) - } - - var ( - soft int64 - hard = &soft // default to soft in case no hard was set - temp int64 - err error - ) - switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { - case 2: - temp, err = strconv.ParseInt(limitVals[1], 10, 64) - if err != nil { - return nil, err - } - hard = &temp - fallthrough - case 1: - soft, err = strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) - } - - if soft > *hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) - } - - return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil -} - -// GetRlimit returns the RLimit corresponding to Ulimit. -func (u *Ulimit) GetRlimit() (*Rlimit, error) { - t, exists := ulimitNameMapping[u.Name] - if !exists { - return nil, fmt.Errorf("invalid ulimit name %s", u.Name) - } - - return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil -} - -func (u *Ulimit) String() string { - return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) -} diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/CHANGES.md index 070bca7cdc..133da800ae 100644 --- a/vendor/github.com/emicklei/go-restful/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/CHANGES.md @@ -1,5 +1,13 @@ Change history of go-restful = +2016-11-26 +- Default change! now use CurlyRouter (was RouterJSR311) +- Default change! no more caching of request content +- Default change! do not recover from panics + +2016-09-22 +- fix the DefaultRequestContentType feature + 2016-02-14 - take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response - add constructors for custom entity accessors for xml and json diff --git a/vendor/github.com/emicklei/go-restful/container.go b/vendor/github.com/emicklei/go-restful/container.go index 4e53cccb93..79eb3c4cfa 100644 --- a/vendor/github.com/emicklei/go-restful/container.go +++ b/vendor/github.com/emicklei/go-restful/container.go @@ -25,10 +25,10 @@ type Container struct { ServeMux *http.ServeMux isRegisteredOnRoot bool containerFilters []FilterFunction - doNotRecover bool // default is false + doNotRecover bool // default is true recoverHandleFunc RecoverHandleFunction serviceErrorHandleFunc ServiceErrorHandleFunction - router RouteSelector // default is a RouterJSR311, CurlyRouter is the faster alternative + router RouteSelector // default is a CurlyRouter (RouterJSR311 is a slower alternative) contentEncodingEnabled bool // default is false } @@ -39,10 +39,10 @@ func NewContainer() *Container { ServeMux: http.NewServeMux(), isRegisteredOnRoot: false, containerFilters: []FilterFunction{}, - doNotRecover: false, + doNotRecover: true, recoverHandleFunc: logStackOnRecover, serviceErrorHandleFunc: writeServiceError, - router: RouterJSR311{}, + router: CurlyRouter{}, contentEncodingEnabled: false} } @@ -69,7 +69,7 @@ func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) { // DoNotRecover controls whether panics will be caught to return HTTP 500. // If set to true, Route functions are responsible for handling any error situation. -// Default value is false = recover from panics. This has performance implications. +// Default value is true. func (c *Container) DoNotRecover(doNot bool) { c.doNotRecover = doNot } diff --git a/vendor/github.com/emicklei/go-restful/curly.go b/vendor/github.com/emicklei/go-restful/curly.go index 185300dbc7..79f1f5aa20 100644 --- a/vendor/github.com/emicklei/go-restful/curly.go +++ b/vendor/github.com/emicklei/go-restful/curly.go @@ -108,11 +108,13 @@ func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, reque return (matched && err == nil), false } +var jsr311Router = RouterJSR311{} + // detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type // headers of the Request. See also RouterJSR311 in jsr311.go func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) { // tracing is done inside detectRoute - return RouterJSR311{}.detectRoute(candidateRoutes.routes(), httpRequest) + return jsr311Router.detectRoute(candidateRoutes.routes(), httpRequest) } // detectWebService returns the best matching webService given the list of path tokens. diff --git a/vendor/github.com/emicklei/go-restful/doc.go b/vendor/github.com/emicklei/go-restful/doc.go index d40405bf76..21b26ac5a4 100644 --- a/vendor/github.com/emicklei/go-restful/doc.go +++ b/vendor/github.com/emicklei/go-restful/doc.go @@ -145,22 +145,11 @@ Performance options This package has several options that affect the performance of your service. It is important to understand them and how you can change it. - restful.DefaultContainer.Router(CurlyRouter{}) - -The default router is the RouterJSR311 which is an implementation of its spec (http://jsr311.java.net/nonav/releases/1.1/spec/spec.html). -However, it uses regular expressions for all its routes which, depending on your usecase, may consume a significant amount of time. -The CurlyRouter implementation is more lightweight that also allows you to use wildcards and expressions, but only if needed. - - restful.DefaultContainer.DoNotRecover(true) + restful.DefaultContainer.DoNotRecover(false) DoNotRecover controls whether panics will be caught to return HTTP 500. -If set to true, Route functions are responsible for handling any error situation. -Default value is false; it will recover from panics. This has performance implications. - - restful.SetCacheReadEntity(false) - -SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable. -If you expect to read large amounts of payload data, and you do not use this feature, you should set it to false. +If set to false, the container will recover from panics. +Default value is true restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20)) diff --git a/vendor/github.com/emicklei/go-restful/filter.go b/vendor/github.com/emicklei/go-restful/filter.go index 4b86656e17..c23bfb591a 100644 --- a/vendor/github.com/emicklei/go-restful/filter.go +++ b/vendor/github.com/emicklei/go-restful/filter.go @@ -24,3 +24,12 @@ func (f *FilterChain) ProcessFilter(request *Request, response *Response) { // FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction type FilterFunction func(*Request, *Response, *FilterChain) + +// NoBrowserCacheFilter is a filter function to set HTTP headers that disable browser caching +// See examples/restful-no-cache-filter.go for usage +func NoBrowserCacheFilter(req *Request, resp *Response, chain *FilterChain) { + resp.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1. + resp.Header().Set("Pragma", "no-cache") // HTTP 1.0. + resp.Header().Set("Expires", "0") // Proxies. + chain.ProcessFilter(req, resp) +} diff --git a/vendor/github.com/emicklei/go-restful/request.go b/vendor/github.com/emicklei/go-restful/request.go index 3e4234697d..295e6acde3 100644 --- a/vendor/github.com/emicklei/go-restful/request.go +++ b/vendor/github.com/emicklei/go-restful/request.go @@ -13,7 +13,7 @@ import ( var defaultRequestContentType string -var doCacheReadEntityBytes = true +var doCacheReadEntityBytes = false // Request is a wrapper for a http Request that provides convenience methods type Request struct { @@ -107,10 +107,15 @@ func (r *Request) ReadEntity(entityPointer interface{}) (err error) { r.Request.Body = zlibReader } - // lookup the EntityReader + // lookup the EntityReader, use defaultRequestContentType if needed and provided entityReader, ok := entityAccessRegistry.accessorAt(contentType) if !ok { - return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType) + if len(defaultRequestContentType) != 0 { + entityReader, ok = entityAccessRegistry.accessorAt(defaultRequestContentType) + } + if !ok { + return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType) + } } return entityReader.Read(r, entityPointer) } diff --git a/vendor/github.com/emicklei/go-restful/swagger/config.go b/vendor/github.com/emicklei/go-restful/swagger/config.go index 510d6fc133..18f8e57d90 100644 --- a/vendor/github.com/emicklei/go-restful/swagger/config.go +++ b/vendor/github.com/emicklei/go-restful/swagger/config.go @@ -2,6 +2,7 @@ package swagger import ( "net/http" + "reflect" "github.com/emicklei/go-restful" ) @@ -9,8 +10,13 @@ import ( // PostBuildDeclarationMapFunc can be used to modify the api declaration map. type PostBuildDeclarationMapFunc func(apiDeclarationMap *ApiDeclarationList) +// MapSchemaFormatFunc can be used to modify typeName at definition time. type MapSchemaFormatFunc func(typeName string) string +// MapModelTypeNameFunc can be used to return the desired typeName for a given +// type. It will return false if the default name should be used. +type MapModelTypeNameFunc func(t reflect.Type) (string, bool) + type Config struct { // url where the services are available, e.g. http://localhost:8080 // if left empty then the basePath of Swagger is taken from the actual request @@ -33,6 +39,8 @@ type Config struct { PostBuildHandler PostBuildDeclarationMapFunc // Swagger global info struct Info Info - // [optional] If set, model builder should call this handler to get addition typename-to-swagger-format-field convertion. + // [optional] If set, model builder should call this handler to get addition typename-to-swagger-format-field conversion. SchemaFormatHandler MapSchemaFormatFunc + // [optional] If set, model builder should call this handler to retrieve the name for a given type. + ModelTypeNameHandler MapModelTypeNameFunc } diff --git a/vendor/github.com/emicklei/go-restful/swagger/model_builder.go b/vendor/github.com/emicklei/go-restful/swagger/model_builder.go index 398e83049c..d40786f251 100644 --- a/vendor/github.com/emicklei/go-restful/swagger/model_builder.go +++ b/vendor/github.com/emicklei/go-restful/swagger/model_builder.go @@ -43,6 +43,12 @@ func (b modelBuilder) addModelFrom(sample interface{}) { } func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model { + // Turn pointers into simpler types so further checks are + // correct. + if st.Kind() == reflect.Ptr { + st = st.Elem() + } + modelName := b.keyFrom(st) if nameOverride != "" { modelName = nameOverride @@ -137,6 +143,11 @@ func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, mod return "", "", prop } + if field.Name == "XMLName" && field.Type.String() == "xml.Name" { + // property is metadata for the xml.Name attribute, can be skipped + return "", "", prop + } + if tag := field.Tag.Get("modelDescription"); tag != "" { modelDescription = tag } @@ -155,7 +166,7 @@ func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, mod prop.Type = &pType } if prop.Format == "" { - prop.Format = b.jsonSchemaFormat(fieldType.String()) + prop.Format = b.jsonSchemaFormat(b.keyFrom(fieldType)) } return jsonName, modelDescription, prop } @@ -192,13 +203,14 @@ func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, mod return jsonName, modelDescription, prop } - if b.isPrimitiveType(fieldType.String()) { - mapped := b.jsonSchemaType(fieldType.String()) + fieldTypeName := b.keyFrom(fieldType) + if b.isPrimitiveType(fieldTypeName) { + mapped := b.jsonSchemaType(fieldTypeName) prop.Type = &mapped - prop.Format = b.jsonSchemaFormat(fieldType.String()) + prop.Format = b.jsonSchemaFormat(fieldTypeName) return jsonName, modelDescription, prop } - modelType := fieldType.String() + modelType := b.keyFrom(fieldType) prop.Ref = &modelType if fieldType.Name() == "" { // override type of anonymous structs @@ -272,7 +284,7 @@ func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonNam } // simple struct b.addModel(fieldType, "") - var pType = fieldType.String() + var pType = b.keyFrom(fieldType) prop.Ref = &pType return jsonName, prop } @@ -336,10 +348,11 @@ func (b modelBuilder) buildPointerTypeProperty(field reflect.StructField, jsonNa } } else { // non-array, pointer type - var pType = b.jsonSchemaType(fieldType.String()[1:]) // no star, include pkg path - if b.isPrimitiveType(fieldType.String()[1:]) { + fieldTypeName := b.keyFrom(fieldType.Elem()) + var pType = b.jsonSchemaType(fieldTypeName) // no star, include pkg path + if b.isPrimitiveType(fieldTypeName) { prop.Type = &pType - prop.Format = b.jsonSchemaFormat(fieldType.String()[1:]) + prop.Format = b.jsonSchemaFormat(fieldTypeName) return jsonName, prop } prop.Ref = &pType @@ -355,7 +368,7 @@ func (b modelBuilder) buildPointerTypeProperty(field reflect.StructField, jsonNa func (b modelBuilder) getElementTypeName(modelName, jsonName string, t reflect.Type) string { if t.Kind() == reflect.Ptr { - return t.String()[1:] + t = t.Elem() } if t.Name() == "" { return modelName + "." + jsonName @@ -365,6 +378,11 @@ func (b modelBuilder) getElementTypeName(modelName, jsonName string, t reflect.T func (b modelBuilder) keyFrom(st reflect.Type) string { key := st.String() + if b.Config != nil && b.Config.ModelTypeNameHandler != nil { + if name, ok := b.Config.ModelTypeNameHandler(st); ok { + key = name + } + } if len(st.Name()) == 0 { // unnamed type // Swagger UI has special meaning for [ key = strings.Replace(key, "[]", "||", -1) diff --git a/vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go b/vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go index 04fff2c57a..a433b6b702 100644 --- a/vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go +++ b/vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go @@ -33,6 +33,21 @@ func (prop *ModelProperty) setMaximum(field reflect.StructField) { func (prop *ModelProperty) setType(field reflect.StructField) { if tag := field.Tag.Get("type"); tag != "" { + // Check if the first two characters of the type tag are + // intended to emulate slice/array behaviour. + // + // If type is intended to be a slice/array then add the + // overriden type to the array item instead of the main property + if len(tag) > 2 && tag[0:2] == "[]" { + pType := "array" + prop.Type = &pType + prop.Items = new(Item) + + iType := tag[2:] + prop.Items.Type = &iType + return + } + prop.Type = &tag } } diff --git a/vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go b/vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go index 58dd625902..f12b6b1074 100644 --- a/vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go +++ b/vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go @@ -277,7 +277,7 @@ func composeResponseMessages(route restful.Route, decl *ApiDeclaration, config * } // sort by code codes := sort.IntSlice{} - for code, _ := range route.ResponseErrors { + for code := range route.ResponseErrors { codes = append(codes, code) } codes.Sort() diff --git a/vendor/github.com/evanphx/json-patch/.travis.yml b/vendor/github.com/evanphx/json-patch/.travis.yml deleted file mode 100644 index ed5cb244c8..0000000000 --- a/vendor/github.com/evanphx/json-patch/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go - -go: - - 1.4 - - 1.3 - -install: - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi - -script: - - go test -cover ./... - -notifications: - email: false diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE deleted file mode 100644 index 0eb9b72d84..0000000000 --- a/vendor/github.com/evanphx/json-patch/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014, Evan Phoenix -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -* Neither the name of the Evan Phoenix nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md deleted file mode 100644 index d0d826bacd..0000000000 --- a/vendor/github.com/evanphx/json-patch/README.md +++ /dev/null @@ -1,29 +0,0 @@ -## JSON-Patch - -Provides the ability to modify and test a JSON according to a -[RFC6902 JSON patch](http://tools.ietf.org/html/rfc6902) and [RFC7396 JSON Merge Patch](https://tools.ietf.org/html/rfc7396). - -*Version*: **1.0** - -[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) - -[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) - -### API Usage - -* Given a `[]byte`, obtain a Patch object - - `obj, err := jsonpatch.DecodePatch(patch)` - -* Apply the patch and get a new document back - - `out, err := obj.Apply(doc)` - -* Create a JSON Merge Patch document based on two json documents (a to b): - - `mergeDoc, err := jsonpatch.CreateMergePatch(a, b)` - -* Bonus API: compare documents for structural equality - - `jsonpatch.Equal(doca, docb)` - diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go deleted file mode 100644 index 330b9b528e..0000000000 --- a/vendor/github.com/evanphx/json-patch/merge.go +++ /dev/null @@ -1,306 +0,0 @@ -package jsonpatch - -import ( - "encoding/json" - "fmt" - "reflect" - "strings" -) - -func merge(cur, patch *lazyNode) *lazyNode { - curDoc, err := cur.intoDoc() - - if err != nil { - pruneNulls(patch) - return patch - } - - patchDoc, err := patch.intoDoc() - - if err != nil { - return patch - } - - mergeDocs(curDoc, patchDoc) - - return cur -} - -func mergeDocs(doc, patch *partialDoc) { - for k, v := range *patch { - k := decodePatchKey(k) - if v == nil { - delete(*doc, k) - } else { - cur, ok := (*doc)[k] - - if !ok || cur == nil { - pruneNulls(v) - (*doc)[k] = v - } else { - (*doc)[k] = merge(cur, v) - } - } - } -} - -func pruneNulls(n *lazyNode) { - sub, err := n.intoDoc() - - if err == nil { - pruneDocNulls(sub) - } else { - ary, err := n.intoAry() - - if err == nil { - pruneAryNulls(ary) - } - } -} - -func pruneDocNulls(doc *partialDoc) *partialDoc { - for k, v := range *doc { - if v == nil { - delete(*doc, k) - } else { - pruneNulls(v) - } - } - - return doc -} - -func pruneAryNulls(ary *partialArray) *partialArray { - newAry := []*lazyNode{} - - for _, v := range *ary { - if v != nil { - pruneNulls(v) - newAry = append(newAry, v) - } - } - - *ary = newAry - - return ary -} - -var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") -var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") - -// MergePatch merges the patchData into the docData. -func MergePatch(docData, patchData []byte) ([]byte, error) { - doc := &partialDoc{} - - docErr := json.Unmarshal(docData, doc) - - patch := &partialDoc{} - - patchErr := json.Unmarshal(patchData, patch) - - if _, ok := docErr.(*json.SyntaxError); ok { - return nil, errBadJSONDoc - } - - if _, ok := patchErr.(*json.SyntaxError); ok { - return nil, errBadJSONPatch - } - - if docErr == nil && *doc == nil { - return nil, errBadJSONDoc - } - - if patchErr == nil && *patch == nil { - return nil, errBadJSONPatch - } - - if docErr != nil || patchErr != nil { - // Not an error, just not a doc, so we turn straight into the patch - if patchErr == nil { - doc = pruneDocNulls(patch) - } else { - patchAry := &partialArray{} - patchErr = json.Unmarshal(patchData, patchAry) - - if patchErr != nil { - return nil, errBadJSONPatch - } - - pruneAryNulls(patchAry) - - out, patchErr := json.Marshal(patchAry) - - if patchErr != nil { - return nil, errBadJSONPatch - } - - return out, nil - } - } else { - mergeDocs(doc, patch) - } - - return json.Marshal(doc) -} - -// CreateMergePatch creates a merge patch as specified in http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 -// -// 'a' is original, 'b' is the modified document. Both are to be given as json encoded content. -// The function will return a mergeable json document with differences from a to b. -// -// An error will be returned if any of the two documents are invalid. -func CreateMergePatch(a, b []byte) ([]byte, error) { - aI := map[string]interface{}{} - bI := map[string]interface{}{} - err := json.Unmarshal(a, &aI) - if err != nil { - return nil, errBadJSONDoc - } - err = json.Unmarshal(b, &bI) - if err != nil { - return nil, errBadJSONDoc - } - dest, err := getDiff(aI, bI) - if err != nil { - return nil, err - } - return json.Marshal(dest) -} - -// Returns true if the array matches (must be json types). -// As is idiomatic for go, an empty array is not the same as a nil array. -func matchesArray(a, b []interface{}) bool { - if len(a) != len(b) { - return false - } - if (a == nil && b != nil) || (a != nil && b == nil) { - return false - } - for i := range a { - if !matchesValue(a[i], b[i]) { - return false - } - } - return true -} - -// Returns true if the values matches (must be json types) -// The types of the values must match, otherwise it will always return false -// If two map[string]interface{} are given, all elements must match. -func matchesValue(av, bv interface{}) bool { - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - return false - } - switch at := av.(type) { - case string: - bt := bv.(string) - if bt == at { - return true - } - case float64: - bt := bv.(float64) - if bt == at { - return true - } - case bool: - bt := bv.(bool) - if bt == at { - return true - } - case map[string]interface{}: - bt := bv.(map[string]interface{}) - for key := range at { - if !matchesValue(at[key], bt[key]) { - return false - } - } - for key := range bt { - if !matchesValue(at[key], bt[key]) { - return false - } - } - return true - case []interface{}: - bt := bv.([]interface{}) - return matchesArray(at, bt) - } - return false -} - -// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. -func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { - into := map[string]interface{}{} - for key, bv := range b { - escapedKey := encodePatchKey(key) - av, ok := a[key] - // value was added - if !ok { - into[escapedKey] = bv - continue - } - // If types have changed, replace completely - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - into[escapedKey] = bv - continue - } - // Types are the same, compare values - switch at := av.(type) { - case map[string]interface{}: - bt := bv.(map[string]interface{}) - dst := make(map[string]interface{}, len(bt)) - dst, err := getDiff(at, bt) - if err != nil { - return nil, err - } - if len(dst) > 0 { - into[escapedKey] = dst - } - case string, float64, bool: - if !matchesValue(av, bv) { - into[escapedKey] = bv - } - case []interface{}: - bt := bv.([]interface{}) - if !matchesArray(at, bt) { - into[escapedKey] = bv - } - case nil: - switch bv.(type) { - case nil: - // Both nil, fine. - default: - into[escapedKey] = bv - } - default: - panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) - } - } - // Now add all deleted values as nil - for key := range a { - _, found := b[key] - if !found { - into[key] = nil - } - } - return into, nil -} - -// From http://tools.ietf.org/html/rfc6901#section-4 : -// -// Evaluation of each reference token begins by decoding any escaped -// character sequence. This is performed by first transforming any -// occurrence of the sequence '~1' to '/', and then transforming any -// occurrence of the sequence '~0' to '~'. - -var ( - rfc6901Encoder = strings.NewReplacer("~", "~0", "/", "~1") - rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") -) - -func decodePatchKey(k string) string { - return rfc6901Decoder.Replace(k) -} - -func encodePatchKey(k string) string { - return rfc6901Encoder.Replace(k) -} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go deleted file mode 100644 index 8988da54bb..0000000000 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ /dev/null @@ -1,579 +0,0 @@ -package jsonpatch - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" -) - -const ( - eRaw = iota - eDoc - eAry -) - -type lazyNode struct { - raw *json.RawMessage - doc partialDoc - ary partialArray - which int -} - -type operation map[string]*json.RawMessage - -// Patch is an ordered collection of operations. -type Patch []operation - -type partialDoc map[string]*lazyNode -type partialArray []*lazyNode - -type container interface { - get(key string) (*lazyNode, error) - set(key string, val *lazyNode) error - add(key string, val *lazyNode) error - remove(key string) error -} - -func newLazyNode(raw *json.RawMessage) *lazyNode { - return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} -} - -func (n *lazyNode) MarshalJSON() ([]byte, error) { - switch n.which { - case eRaw: - return json.Marshal(n.raw) - case eDoc: - return json.Marshal(n.doc) - case eAry: - return json.Marshal(n.ary) - default: - return nil, fmt.Errorf("Unknown type") - } -} - -func (n *lazyNode) UnmarshalJSON(data []byte) error { - dest := make(json.RawMessage, len(data)) - copy(dest, data) - n.raw = &dest - n.which = eRaw - return nil -} - -func (n *lazyNode) intoDoc() (*partialDoc, error) { - if n.which == eDoc { - return &n.doc, nil - } - - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return nil, err - } - - n.which = eDoc - return &n.doc, nil -} - -func (n *lazyNode) intoAry() (*partialArray, error) { - if n.which == eAry { - return &n.ary, nil - } - - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return nil, err - } - - n.which = eAry - return &n.ary, nil -} - -func (n *lazyNode) compact() []byte { - buf := &bytes.Buffer{} - - err := json.Compact(buf, *n.raw) - - if err != nil { - return *n.raw - } - - return buf.Bytes() -} - -func (n *lazyNode) tryDoc() bool { - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return false - } - - n.which = eDoc - return true -} - -func (n *lazyNode) tryAry() bool { - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return false - } - - n.which = eAry - return true -} - -func (n *lazyNode) equal(o *lazyNode) bool { - if n.which == eRaw { - if !n.tryDoc() && !n.tryAry() { - if o.which != eRaw { - return false - } - - return bytes.Equal(n.compact(), o.compact()) - } - } - - if n.which == eDoc { - if o.which == eRaw { - if !o.tryDoc() { - return false - } - } - - if o.which != eDoc { - return false - } - - for k, v := range n.doc { - ov, ok := o.doc[k] - - if !ok { - return false - } - - if v == nil && ov == nil { - continue - } - - if !v.equal(ov) { - return false - } - } - - return true - } - - if o.which != eAry && !o.tryAry() { - return false - } - - if len(n.ary) != len(o.ary) { - return false - } - - for idx, val := range n.ary { - if !val.equal(o.ary[idx]) { - return false - } - } - - return true -} - -func (o operation) kind() string { - if obj, ok := o["op"]; ok { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown" - } - - return op - } - - return "unknown" -} - -func (o operation) path() string { - if obj, ok := o["path"]; ok { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown" - } - - return op - } - - return "unknown" -} - -func (o operation) from() string { - if obj, ok := o["from"]; ok { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown" - } - - return op - } - - return "unknown" -} - -func (o operation) value() *lazyNode { - if obj, ok := o["value"]; ok { - return newLazyNode(obj) - } - - return nil -} - -func isArray(buf []byte) bool { -Loop: - for _, c := range buf { - switch c { - case ' ': - case '\n': - case '\t': - continue - case '[': - return true - default: - break Loop - } - } - - return false -} - -func findObject(pd *partialDoc, path string) (container, string) { - doc := container(pd) - - split := strings.Split(path, "/") - - parts := split[1 : len(split)-1] - - key := split[len(split)-1] - - var err error - - for _, part := range parts { - - next, ok := doc.get(decodePatchKey(part)) - - if next == nil || ok != nil { - return nil, "" - } - - if isArray(*next.raw) { - doc, err = next.intoAry() - - if err != nil { - return nil, "" - } - } else { - doc, err = next.intoDoc() - - if err != nil { - return nil, "" - } - } - } - - return doc, decodePatchKey(key) -} - -func (d *partialDoc) set(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) add(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) get(key string) (*lazyNode, error) { - return (*d)[key], nil -} - -func (d *partialDoc) remove(key string) error { - _, ok := (*d)[key] - if !ok { - return fmt.Errorf("Unable to remove nonexistant key: %s", key) - } - - delete(*d, key) - return nil -} - -func (d *partialArray) set(key string, val *lazyNode) error { - if key == "-" { - *d = append(*d, val) - return nil - } - - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - sz := len(*d) - if idx+1 > sz { - sz = idx + 1 - } - - ary := make([]*lazyNode, sz) - - cur := *d - - copy(ary, cur) - - if idx >= len(ary) { - fmt.Printf("huh?: %#v[%d] %s, %s\n", ary, idx) - } - - ary[idx] = val - - *d = ary - return nil -} - -func (d *partialArray) add(key string, val *lazyNode) error { - if key == "-" { - *d = append(*d, val) - return nil - } - - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - ary := make([]*lazyNode, len(*d)+1) - - cur := *d - - copy(ary[0:idx], cur[0:idx]) - ary[idx] = val - copy(ary[idx+1:], cur[idx:]) - - *d = ary - return nil -} - -func (d *partialArray) get(key string) (*lazyNode, error) { - idx, err := strconv.Atoi(key) - - if err != nil { - return nil, err - } - - if idx >= len(*d) { - return nil, fmt.Errorf("Unable to access invalid index: %d", idx) - } - - return (*d)[idx], nil -} - -func (d *partialArray) remove(key string) error { - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - cur := *d - - if idx >= len(cur) { - return fmt.Errorf("Unable to remove invalid index: %d", idx) - } - - ary := make([]*lazyNode, len(cur)-1) - - copy(ary[0:idx], cur[0:idx]) - copy(ary[idx:], cur[idx+1:]) - - *d = ary - return nil - -} - -func (p Patch) add(doc *partialDoc, op operation) error { - path := op.path() - - con, key := findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: %s", path) - } - - return con.add(key, op.value()) -} - -func (p Patch) remove(doc *partialDoc, op operation) error { - path := op.path() - - con, key := findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: %s", path) - } - - return con.remove(key) -} - -func (p Patch) replace(doc *partialDoc, op operation) error { - path := op.path() - - con, key := findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path) - } - - return con.set(key, op.value()) -} - -func (p Patch) move(doc *partialDoc, op operation) error { - from := op.from() - - con, key := findObject(doc, from) - - if con == nil { - return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from) - } - - val, err := con.get(key) - if err != nil { - return err - } - - err = con.remove(key) - if err != nil { - return err - } - - path := op.path() - - con, key = findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path) - } - - return con.set(key, val) -} - -func (p Patch) test(doc *partialDoc, op operation) error { - path := op.path() - - con, key := findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path) - } - - val, err := con.get(key) - - if err != nil { - return err - } - - if val == nil { - if op.value().raw == nil { - return nil - } else { - return fmt.Errorf("Testing value %s failed", path) - } - } - - if val.equal(op.value()) { - return nil - } - - return fmt.Errorf("Testing value %s failed", path) -} - -// Equal indicates if 2 JSON documents have the same structural equality. -func Equal(a, b []byte) bool { - ra := make(json.RawMessage, len(a)) - copy(ra, a) - la := newLazyNode(&ra) - - rb := make(json.RawMessage, len(b)) - copy(rb, b) - lb := newLazyNode(&rb) - - return la.equal(lb) -} - -// DecodePatch decodes the passed JSON document as an RFC 6902 patch. -func DecodePatch(buf []byte) (Patch, error) { - var p Patch - - err := json.Unmarshal(buf, &p) - - if err != nil { - return nil, err - } - - return p, nil -} - -// Apply mutates a JSON document according to the patch, and returns the new -// document. -func (p Patch) Apply(doc []byte) ([]byte, error) { - return p.ApplyIndent(doc, "") -} - -// ApplyIndent mutates a JSON document according to the patch, and returns the new -// document indented. -func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { - pd := &partialDoc{} - - err := json.Unmarshal(doc, pd) - - if err != nil { - return nil, err - } - - err = nil - - for _, op := range p { - switch op.kind() { - case "add": - err = p.add(pd, op) - case "remove": - err = p.remove(pd, op) - case "replace": - err = p.replace(pd, op) - case "move": - err = p.move(pd, op) - case "test": - err = p.test(pd, op) - default: - err = fmt.Errorf("Unexpected kind: %s", op.kind()) - } - - if err != nil { - return nil, err - } - } - - if indent != "" { - return json.MarshalIndent(pd, "", indent) - } - - return json.Marshal(pd) -} diff --git a/vendor/github.com/exponent-io/jsonpath/.gitignore b/vendor/github.com/exponent-io/jsonpath/.gitignore deleted file mode 100644 index daf913b1b3..0000000000 --- a/vendor/github.com/exponent-io/jsonpath/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/exponent-io/jsonpath/.travis.yml b/vendor/github.com/exponent-io/jsonpath/.travis.yml deleted file mode 100644 index f4f458a416..0000000000 --- a/vendor/github.com/exponent-io/jsonpath/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go - -go: - - 1.5 - - tip diff --git a/vendor/github.com/exponent-io/jsonpath/LICENSE b/vendor/github.com/exponent-io/jsonpath/LICENSE deleted file mode 100644 index 5419772507..0000000000 --- a/vendor/github.com/exponent-io/jsonpath/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Exponent Labs LLC - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/exponent-io/jsonpath/README.md b/vendor/github.com/exponent-io/jsonpath/README.md deleted file mode 100644 index 382fb3138c..0000000000 --- a/vendor/github.com/exponent-io/jsonpath/README.md +++ /dev/null @@ -1,66 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/exponent-io/jsonpath?status.svg)](https://godoc.org/github.com/exponent-io/jsonpath) -[![Build Status](https://travis-ci.org/exponent-io/jsonpath.svg?branch=master)](https://travis-ci.org/exponent-io/jsonpath) - -# jsonpath - -This package extends the [json.Decoder](https://golang.org/pkg/encoding/json/#Decoder) to support navigating a stream of JSON tokens. You should be able to use this extended Decoder places where a json.Decoder would have been used. - -This Decoder has the following enhancements... - * The [Scan](https://godoc.org/github.com/exponent-io/jsonpath/#Decoder.Scan) method supports scanning a JSON stream while extracting particular values along the way using [PathActions](https://godoc.org/github.com/exponent-io/jsonpath#PathActions). - * The [SeekTo](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.SeekTo) method supports seeking forward in a JSON token stream to a particular path. - * The [Path](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Path) method returns the path of the most recently parsed token. - * The [Token](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Token) method has been modified to distinguish between strings that are object keys and strings that are values. Object key strings are returned as the [KeyString](https://godoc.org/github.com/exponent-io/jsonpath#KeyString) type rather than a native string. - -## Installation - - go get -u github.com/exponent-io/jsonpath - -## Example Usage - -#### SeekTo - -```go -import "github.com/exponent-io/jsonpath" - -var j = []byte(`[ - {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}}, - {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}} -]`) - -w := json.NewDecoder(bytes.NewReader(j)) -var v interface{} - -w.SeekTo(1, "Point", "G") -w.Decode(&v) // v is 218 -``` - -#### Scan with PathActions - -```go -var j = []byte(`{"colors":[ - {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10, "A": 58}}, - {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255, "A": 231}} -]}`) - -var actions PathActions - -// Extract the value at Point.A -actions.Add(func(d *Decoder) error { - var alpha int - err := d.Decode(&alpha) - fmt.Printf("Alpha: %v\n", alpha) - return err -}, "Point", "A") - -w := NewDecoder(bytes.NewReader(j)) -w.SeekTo("colors", 0) - -var ok = true -var err error -for ok { - ok, err = w.Scan(&actions) - if err != nil && err != io.EOF { - panic(err) - } -} -``` diff --git a/vendor/github.com/exponent-io/jsonpath/decoder.go b/vendor/github.com/exponent-io/jsonpath/decoder.go deleted file mode 100644 index 31de46c738..0000000000 --- a/vendor/github.com/exponent-io/jsonpath/decoder.go +++ /dev/null @@ -1,210 +0,0 @@ -package jsonpath - -import ( - "encoding/json" - "io" -) - -// KeyString is returned from Decoder.Token to represent each key in a JSON object value. -type KeyString string - -// Decoder extends the Go runtime's encoding/json.Decoder to support navigating in a stream of JSON tokens. -type Decoder struct { - json.Decoder - - path JsonPath - context jsonContext -} - -// NewDecoder creates a new instance of the extended JSON Decoder. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{Decoder: *json.NewDecoder(r)} -} - -// SeekTo causes the Decoder to move forward to a given path in the JSON structure. -// -// The path argument must consist of strings or integers. Each string specifies an JSON object key, and -// each integer specifies an index into a JSON array. -// -// Consider the JSON structure -// -// { "a": [0,"s",12e4,{"b":0,"v":35} ] } -// -// SeekTo("a",3,"v") will move to the value referenced by the "a" key in the current object, -// followed by a move to the 4th value (index 3) in the array, followed by a move to the value at key "v". -// In this example, a subsequent call to the decoder's Decode() would unmarshal the value 35. -// -// SeekTo returns a boolean value indicating whether a match was found. -// -// Decoder is intended to be used with a stream of tokens. As a result it navigates forward only. -func (d *Decoder) SeekTo(path ...interface{}) (bool, error) { - - if len(path) == 0 { - return len(d.path) == 0, nil - } - last := len(path) - 1 - if i, ok := path[last].(int); ok { - path[last] = i - 1 - } - - for { - if d.path.Equal(path) { - return true, nil - } - _, err := d.Token() - if err == io.EOF { - return false, nil - } else if err != nil { - return false, err - } - } -} - -// Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v. This is -// equivalent to encoding/json.Decode(). -func (d *Decoder) Decode(v interface{}) error { - switch d.context { - case objValue: - d.context = objKey - break - case arrValue: - d.path.incTop() - break - } - return d.Decoder.Decode(v) -} - -// Path returns a slice of string and/or int values representing the path from the root of the JSON object to the -// position of the most-recently parsed token. -func (d *Decoder) Path() JsonPath { - p := make(JsonPath, len(d.path)) - copy(p, d.path) - return p -} - -// Token is equivalent to the Token() method on json.Decoder. The primary difference is that it distinguishes -// between strings that are keys and and strings that are values. String tokens that are object keys are returned as a -// KeyString rather than as a native string. -func (d *Decoder) Token() (json.Token, error) { - t, err := d.Decoder.Token() - if err != nil { - return t, err - } - - if t == nil { - switch d.context { - case objValue: - d.context = objKey - break - case arrValue: - d.path.incTop() - break - } - return t, err - } - - switch t := t.(type) { - case json.Delim: - switch t { - case json.Delim('{'): - if d.context == arrValue { - d.path.incTop() - } - d.path.push("") - d.context = objKey - break - case json.Delim('}'): - d.path.pop() - d.context = d.path.inferContext() - break - case json.Delim('['): - if d.context == arrValue { - d.path.incTop() - } - d.path.push(-1) - d.context = arrValue - break - case json.Delim(']'): - d.path.pop() - d.context = d.path.inferContext() - break - } - case float64, json.Number, bool: - switch d.context { - case objValue: - d.context = objKey - break - case arrValue: - d.path.incTop() - break - } - break - case string: - switch d.context { - case objKey: - d.path.nameTop(t) - d.context = objValue - return KeyString(t), err - case objValue: - d.context = objKey - case arrValue: - d.path.incTop() - } - break - } - - return t, err -} - -// Scan moves forward over the JSON stream consuming all the tokens at the current level (current object, current array) -// invoking each matching PathAction along the way. -// -// Scan returns true if there are more contiguous values to scan (for example in an array). -func (d *Decoder) Scan(ext *PathActions) (bool, error) { - - rootPath := d.Path() - - // If this is an array path, increment the root path in our local copy. - if rootPath.inferContext() == arrValue { - rootPath.incTop() - } - - for { - // advance the token position - _, err := d.Token() - if err != nil { - return false, err - } - - match: - var relPath JsonPath - - // capture the new JSON path - path := d.Path() - - if len(path) > len(rootPath) { - // capture the path relative to where the scan started - relPath = path[len(rootPath):] - } else { - // if the path is not longer than the root, then we are done with this scan - // return boolean flag indicating if there are more items to scan at the same level - return d.Decoder.More(), nil - } - - // match the relative path against the path actions - if node := ext.node.match(relPath); node != nil { - if node.action != nil { - // we have a match so execute the action - err = node.action(d) - if err != nil { - return d.Decoder.More(), err - } - // The action may have advanced the decoder. If we are in an array, advancing it further would - // skip tokens. So, if we are scanning an array, jump to the top without advancing the token. - if d.path.inferContext() == arrValue && d.Decoder.More() { - goto match - } - } - } - } -} diff --git a/vendor/github.com/exponent-io/jsonpath/path.go b/vendor/github.com/exponent-io/jsonpath/path.go deleted file mode 100644 index d7db2ad336..0000000000 --- a/vendor/github.com/exponent-io/jsonpath/path.go +++ /dev/null @@ -1,67 +0,0 @@ -// Extends the Go runtime's json.Decoder enabling navigation of a stream of json tokens. -package jsonpath - -import "fmt" - -type jsonContext int - -const ( - none jsonContext = iota - objKey - objValue - arrValue -) - -// AnyIndex can be used in a pattern to match any array index. -const AnyIndex = -2 - -// JsonPath is a slice of strings and/or integers. Each string specifies an JSON object key, and -// each integer specifies an index into a JSON array. -type JsonPath []interface{} - -func (p *JsonPath) push(n interface{}) { *p = append(*p, n) } -func (p *JsonPath) pop() { *p = (*p)[:len(*p)-1] } - -// increment the index at the top of the stack (must be an array index) -func (p *JsonPath) incTop() { (*p)[len(*p)-1] = (*p)[len(*p)-1].(int) + 1 } - -// name the key at the top of the stack (must be an object key) -func (p *JsonPath) nameTop(n string) { (*p)[len(*p)-1] = n } - -// infer the context from the item at the top of the stack -func (p *JsonPath) inferContext() jsonContext { - if len(*p) == 0 { - return none - } - t := (*p)[len(*p)-1] - switch t.(type) { - case string: - return objKey - case int: - return arrValue - default: - panic(fmt.Sprintf("Invalid stack type %T", t)) - } -} - -// Equal tests for equality between two JsonPath types. -func (p *JsonPath) Equal(o JsonPath) bool { - if len(*p) != len(o) { - return false - } - for i, v := range *p { - if v != o[i] { - return false - } - } - return true -} - -func (p *JsonPath) HasPrefix(o JsonPath) bool { - for i, v := range o { - if v != (*p)[i] { - return false - } - } - return true -} diff --git a/vendor/github.com/exponent-io/jsonpath/pathaction.go b/vendor/github.com/exponent-io/jsonpath/pathaction.go deleted file mode 100644 index 497ed686ca..0000000000 --- a/vendor/github.com/exponent-io/jsonpath/pathaction.go +++ /dev/null @@ -1,61 +0,0 @@ -package jsonpath - -// pathNode is used to construct a trie of paths to be matched -type pathNode struct { - matchOn interface{} // string, or integer - childNodes []pathNode - action DecodeAction -} - -// match climbs the trie to find a node that matches the given JSON path. -func (n *pathNode) match(path JsonPath) *pathNode { - var node *pathNode = n - for _, ps := range path { - found := false - for i, n := range node.childNodes { - if n.matchOn == ps { - node = &node.childNodes[i] - found = true - break - } else if _, ok := ps.(int); ok && n.matchOn == AnyIndex { - node = &node.childNodes[i] - found = true - break - } - } - if !found { - return nil - } - } - return node -} - -// PathActions represents a collection of DecodeAction functions that should be called at certain path positions -// when scanning the JSON stream. PathActions can be created once and used many times in one or more JSON streams. -type PathActions struct { - node pathNode -} - -// DecodeAction handlers are called by the Decoder when scanning objects. See PathActions.Add for more detail. -type DecodeAction func(d *Decoder) error - -// Add specifies an action to call on the Decoder when the specified path is encountered. -func (je *PathActions) Add(action DecodeAction, path ...interface{}) { - - var node *pathNode = &je.node - for _, ps := range path { - found := false - for i, n := range node.childNodes { - if n.matchOn == ps { - node = &node.childNodes[i] - found = true - break - } - } - if !found { - node.childNodes = append(node.childNodes, pathNode{matchOn: ps}) - node = &node.childNodes[len(node.childNodes)-1] - } - } - node.action = action -} diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS new file mode 100644 index 0000000000..d2c3b418fe --- /dev/null +++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS @@ -0,0 +1,15 @@ +Anton Povarov +Clayton Coleman +Denis Smirnov +DongYun Kang +Dwayne Schultz +Georg Apitz +Gustav Paul +John Tuley +Laurent +Patrick Lee +Stephen J Day +Tamir Duberstein +Todd Eisenberger +Tormod Erevik Lea +Walter Schulze diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS new file mode 100644 index 0000000000..15167cd746 --- /dev/null +++ b/vendor/github.com/golang/protobuf/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS new file mode 100644 index 0000000000..1c4577e968 --- /dev/null +++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md index 68fcf2cabb..64869af347 100644 --- a/vendor/github.com/google/gofuzz/README.md +++ b/vendor/github.com/google/gofuzz/README.md @@ -14,21 +14,21 @@ This is useful for testing: Import with ```import "github.com/google/gofuzz"``` You can use it on single variables: -``` +```go f := fuzz.New() var myInt int f.Fuzz(&myInt) // myInt gets a random value. ``` You can use it on maps: -``` +```go f := fuzz.New().NilChance(0).NumElements(1, 1) var myMap map[ComplexKeyType]string f.Fuzz(&myMap) // myMap will have exactly one element. ``` Customize the chance of getting a nil pointer: -``` +```go f := fuzz.New().NilChance(.5) var fancyStruct struct { A, B, C, D *string @@ -37,7 +37,7 @@ f.Fuzz(&fancyStruct) // About half the pointers should be set. ``` You can even customize the randomization completely if needed: -``` +```go type MyEnum string const ( A MyEnum = "A" diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go index 42d9a48b3e..4f888fbc8f 100644 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -129,7 +129,7 @@ func (f *Fuzzer) genElementCount() int { if f.minElements == f.maxElements { return f.minElements } - return f.minElements + f.r.Intn(f.maxElements-f.minElements) + return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) } func (f *Fuzzer) genShouldFill() bool { @@ -229,12 +229,19 @@ func (f *Fuzzer) doFuzz(v reflect.Value, flags uint64) { return } v.Set(reflect.Zero(v.Type())) + case reflect.Array: + if f.genShouldFill() { + n := v.Len() + for i := 0; i < n; i++ { + f.doFuzz(v.Index(i), 0) + } + return + } + v.Set(reflect.Zero(v.Type())) case reflect.Struct: for i := 0; i < v.NumField(); i++ { f.doFuzz(v.Field(i), 0) } - case reflect.Array: - fallthrough case reflect.Chan: fallthrough case reflect.Func: diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE deleted file mode 100644 index 5f0d1fb6a7..0000000000 --- a/vendor/github.com/inconshreveable/mousetrap/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2014 Alan Shreve - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md deleted file mode 100644 index 7a950d1774..0000000000 --- a/vendor/github.com/inconshreveable/mousetrap/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# mousetrap - -mousetrap is a tiny library that answers a single question. - -On a Windows machine, was the process invoked by someone double clicking on -the executable file while browsing in explorer? - -### Motivation - -Windows developers unfamiliar with command line tools will often "double-click" -the executable for a tool. Because most CLI tools print the help and then exit -when invoked without arguments, this is often very frustrating for those users. - -mousetrap provides a way to detect these invocations so that you can provide -more helpful behavior and instructions on how to run the CLI tool. To see what -this looks like, both from an organizational and a technical perspective, see -https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ - -### The interface - -The library exposes a single interface: - - func StartedByExplorer() (bool) diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go deleted file mode 100644 index 9d2d8a4bab..0000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_others.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package mousetrap - -// StartedByExplorer returns true if the program was invoked by the user -// double-clicking on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -// -// On non-Windows platforms, it always returns false. -func StartedByExplorer() bool { - return false -} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go deleted file mode 100644 index 336142a5e3..0000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go +++ /dev/null @@ -1,98 +0,0 @@ -// +build windows -// +build !go1.4 - -package mousetrap - -import ( - "fmt" - "os" - "syscall" - "unsafe" -) - -const ( - // defined by the Win32 API - th32cs_snapprocess uintptr = 0x2 -) - -var ( - kernel = syscall.MustLoadDLL("kernel32.dll") - CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot") - Process32First = kernel.MustFindProc("Process32FirstW") - Process32Next = kernel.MustFindProc("Process32NextW") -) - -// ProcessEntry32 structure defined by the Win32 API -type processEntry32 struct { - dwSize uint32 - cntUsage uint32 - th32ProcessID uint32 - th32DefaultHeapID int - th32ModuleID uint32 - cntThreads uint32 - th32ParentProcessID uint32 - pcPriClassBase int32 - dwFlags uint32 - szExeFile [syscall.MAX_PATH]uint16 -} - -func getProcessEntry(pid int) (pe *processEntry32, err error) { - snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0)) - if snapshot == uintptr(syscall.InvalidHandle) { - err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1) - return - } - defer syscall.CloseHandle(syscall.Handle(snapshot)) - - var processEntry processEntry32 - processEntry.dwSize = uint32(unsafe.Sizeof(processEntry)) - ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32First: %v", e1) - return - } - - for { - if processEntry.th32ProcessID == uint32(pid) { - pe = &processEntry - return - } - - ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32Next: %v", e1) - return - } - } -} - -func getppid() (pid int, err error) { - pe, err := getProcessEntry(os.Getpid()) - if err != nil { - return - } - - pid = int(pe.th32ParentProcessID) - return -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - ppid, err := getppid() - if err != nil { - return false - } - - pe, err := getProcessEntry(ppid) - if err != nil { - return false - } - - name := syscall.UTF16ToString(pe.szExeFile[:]) - return name == "explorer.exe" -} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go deleted file mode 100644 index 9a28e57c3c..0000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build windows -// +build go1.4 - -package mousetrap - -import ( - "os" - "syscall" - "unsafe" -) - -func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { - snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) - if err != nil { - return nil, err - } - defer syscall.CloseHandle(snapshot) - var procEntry syscall.ProcessEntry32 - procEntry.Size = uint32(unsafe.Sizeof(procEntry)) - if err = syscall.Process32First(snapshot, &procEntry); err != nil { - return nil, err - } - for { - if procEntry.ProcessID == uint32(pid) { - return &procEntry, nil - } - err = syscall.Process32Next(snapshot, &procEntry) - if err != nil { - return nil, err - } - } -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - pe, err := getProcessEntry(os.Getppid()) - if err != nil { - return false - } - return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) -} diff --git a/vendor/github.com/jonboulle/clockwork/.gitignore b/vendor/github.com/jonboulle/clockwork/.gitignore deleted file mode 100644 index 010c242bd8..0000000000 --- a/vendor/github.com/jonboulle/clockwork/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test - -*.swp diff --git a/vendor/github.com/jonboulle/clockwork/LICENSE b/vendor/github.com/jonboulle/clockwork/LICENSE deleted file mode 100644 index 5c304d1a4a..0000000000 --- a/vendor/github.com/jonboulle/clockwork/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/jonboulle/clockwork/README.md b/vendor/github.com/jonboulle/clockwork/README.md deleted file mode 100644 index 236c86a244..0000000000 --- a/vendor/github.com/jonboulle/clockwork/README.md +++ /dev/null @@ -1,58 +0,0 @@ -clockwork -========= - -a simple fake clock for golang - -# Usage - -Replace uses of the `time` package with the `clockwork.Clock` interface instead. - -For example, instead of using `time.Sleep` directly: - -``` -func my_func() { - time.Sleep(3 * time.Second) - do_something() -} -``` - -inject a clock and use its `Sleep` method instead: - -``` -func my_func(clock clockwork.Clock) { - clock.Sleep(3 * time.Second) - do_something() -} -``` - -Now you can easily test `my_func` with a `FakeClock`: - -``` -func TestMyFunc(t *testing.T) { - c := clockwork.NewFakeClock() - - // Start our sleepy function - my_func(c) - - // Ensure we wait until my_func is sleeping - c.BlockUntil(1) - - assert_state() - - // Advance the FakeClock forward in time - c.Advance(3) - - assert_state() -} -``` - -and in production builds, simply inject the real clock instead: -``` -my_func(clockwork.NewRealClock()) -``` - -See [example_test.go](example_test.go) for a full example. - -# Credits - -Inspired by @wickman's [threaded fake clock](https://gist.github.com/wickman/3840816), and the [Golang playground](http://blog.golang.org/playground#Faking time) diff --git a/vendor/github.com/jonboulle/clockwork/clockwork.go b/vendor/github.com/jonboulle/clockwork/clockwork.go deleted file mode 100644 index 4b6918b522..0000000000 --- a/vendor/github.com/jonboulle/clockwork/clockwork.go +++ /dev/null @@ -1,161 +0,0 @@ -package clockwork - -import ( - "sync" - "time" -) - -// Clock provides an interface that packages can use instead of directly -// using the time module, so that chronology-related behavior can be tested -type Clock interface { - After(d time.Duration) <-chan time.Time - Sleep(d time.Duration) - Now() time.Time -} - -// FakeClock provides an interface for a clock which can be -// manually advanced through time -type FakeClock interface { - Clock - // Advance advances the FakeClock to a new point in time, ensuring any existing - // sleepers are notified appropriately before returning - Advance(d time.Duration) - // BlockUntil will block until the FakeClock has the given number of - // sleepers (callers of Sleep or After) - BlockUntil(n int) -} - -// NewRealClock returns a Clock which simply delegates calls to the actual time -// package; it should be used by packages in production. -func NewRealClock() Clock { - return &realClock{} -} - -// NewFakeClock returns a FakeClock implementation which can be -// manually advanced through time for testing. -func NewFakeClock() FakeClock { - return &fakeClock{ - l: sync.RWMutex{}, - } -} - -type realClock struct{} - -func (rc *realClock) After(d time.Duration) <-chan time.Time { - return time.After(d) -} - -func (rc *realClock) Sleep(d time.Duration) { - time.Sleep(d) -} - -func (rc *realClock) Now() time.Time { - return time.Now() -} - -type fakeClock struct { - sleepers []*sleeper - blockers []*blocker - time time.Time - - l sync.RWMutex -} - -// sleeper represents a caller of After or Sleep -type sleeper struct { - until time.Time - done chan time.Time -} - -// blocker represents a caller of BlockUntil -type blocker struct { - count int - ch chan struct{} -} - -// After mimics time.After; it waits for the given duration to elapse on the -// fakeClock, then sends the current time on the returned channel. -func (fc *fakeClock) After(d time.Duration) <-chan time.Time { - fc.l.Lock() - defer fc.l.Unlock() - now := fc.time - done := make(chan time.Time, 1) - if d.Nanoseconds() == 0 { - // special case - trigger immediately - done <- now - } else { - // otherwise, add to the set of sleepers - s := &sleeper{ - until: now.Add(d), - done: done, - } - fc.sleepers = append(fc.sleepers, s) - // and notify any blockers - fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) - } - return done -} - -// notifyBlockers notifies all the blockers waiting until the -// given number of sleepers are waiting on the fakeClock. It -// returns an updated slice of blockers (i.e. those still waiting) -func notifyBlockers(blockers []*blocker, count int) (newBlockers []*blocker) { - for _, b := range blockers { - if b.count == count { - close(b.ch) - } else { - newBlockers = append(newBlockers, b) - } - } - return -} - -// Sleep blocks until the given duration has passed on the fakeClock -func (fc *fakeClock) Sleep(d time.Duration) { - <-fc.After(d) -} - -// Time returns the current time of the fakeClock -func (fc *fakeClock) Now() time.Time { - fc.l.Lock() - defer fc.l.Unlock() - return fc.time -} - -// Advance advances fakeClock to a new point in time, ensuring channels from any -// previous invocations of After are notified appropriately before returning -func (fc *fakeClock) Advance(d time.Duration) { - fc.l.Lock() - defer fc.l.Unlock() - end := fc.time.Add(d) - var newSleepers []*sleeper - for _, s := range fc.sleepers { - if end.Sub(s.until) >= 0 { - s.done <- end - } else { - newSleepers = append(newSleepers, s) - } - } - fc.sleepers = newSleepers - fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) - fc.time = end -} - -// BlockUntil will block until the fakeClock has the given number of sleepers -// (callers of Sleep or After) -func (fc *fakeClock) BlockUntil(n int) { - fc.l.Lock() - // Fast path: current number of sleepers is what we're looking for - if len(fc.sleepers) == n { - fc.l.Unlock() - return - } - // Otherwise, set up a new blocker - b := &blocker{ - count: n, - ch: make(chan struct{}), - } - fc.blockers = append(fc.blockers, b) - fc.l.Unlock() - <-b.ch -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 618c4dee98..1fdef9edb0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -17,7 +17,7 @@ // Pushgateway (package push). // // All exported functions and methods are safe to be used concurrently unless -//specified otherwise. +// specified otherwise. // // A Basic Example // @@ -69,7 +69,7 @@ // Metrics // // The number of exported identifiers in this package might appear a bit -// overwhelming. Hovever, in addition to the basic plumbing shown in the example +// overwhelming. However, in addition to the basic plumbing shown in the example // above, you only need to understand the different metric types and their // vector versions for basic usage. // @@ -95,8 +95,8 @@ // SummaryVec, HistogramVec, and UntypedVec are not. // // To create instances of Metrics and their vector versions, you need a suitable -// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, -// HistogramOpts, or UntypedOpts. +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or +// UntypedOpts. // // Custom Collectors and constant Metrics // @@ -114,8 +114,8 @@ // Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and // NewConstSummary (and their respective Must… versions). That will happen in // the Collect method. The Describe method has to return separate Desc -// instances, representative of the “throw-away” metrics to be created -// later. NewDesc comes in handy to create those Desc instances. +// instances, representative of the “throw-away” metrics to be created later. +// NewDesc comes in handy to create those Desc instances. // // The Collector example illustrates the use case. You can also look at the // source code of the processCollector (mirroring process metrics), the @@ -129,32 +129,32 @@ // Advanced Uses of the Registry // // While MustRegister is the by far most common way of registering a Collector, -// sometimes you might want to handle the errors the registration might -// cause. As suggested by the name, MustRegister panics if an error occurs. With -// the Register function, the error is returned and can be handled. +// sometimes you might want to handle the errors the registration might cause. +// As suggested by the name, MustRegister panics if an error occurs. With the +// Register function, the error is returned and can be handled. // // An error is returned if the registered Collector is incompatible or // inconsistent with already registered metrics. The registry aims for -// consistency of the collected metrics according to the Prometheus data -// model. Inconsistencies are ideally detected at registration time, not at -// collect time. The former will usually be detected at start-up time of a -// program, while the latter will only happen at scrape time, possibly not even -// on the first scrape if the inconsistency only becomes relevant later. That is -// the main reason why a Collector and a Metric have to describe themselves to -// the registry. +// consistency of the collected metrics according to the Prometheus data model. +// Inconsistencies are ideally detected at registration time, not at collect +// time. The former will usually be detected at start-up time of a program, +// while the latter will only happen at scrape time, possibly not even on the +// first scrape if the inconsistency only becomes relevant later. That is the +// main reason why a Collector and a Metric have to describe themselves to the +// registry. // // So far, everything we did operated on the so-called default registry, as it // can be found in the global DefaultRegistry variable. With NewRegistry, you // can create a custom registry, or you can even implement the Registerer or -// Gatherer interfaces yourself. The methods Register and Unregister work in -// the same way on a custom registry as the global functions Register and -// Unregister on the default registry. +// Gatherer interfaces yourself. The methods Register and Unregister work in the +// same way on a custom registry as the global functions Register and Unregister +// on the default registry. // -// There are a number of uses for custom registries: You can use registries -// with special properties, see NewPedanticRegistry. You can avoid global state, -// as it is imposed by the DefaultRegistry. You can use multiple registries at -// the same time to expose different metrics in different ways. You can use -// separate registries for testing purposes. +// There are a number of uses for custom registries: You can use registries with +// special properties, see NewPedanticRegistry. You can avoid global state, as +// it is imposed by the DefaultRegistry. You can use multiple registries at the +// same time to expose different metrics in different ways. You can use separate +// registries for testing purposes. // // Also note that the DefaultRegistry comes registered with a Collector for Go // runtime metrics (via NewGoCollector) and a Collector for process metrics (via @@ -166,16 +166,20 @@ // The Registry implements the Gatherer interface. The caller of the Gather // method can then expose the gathered metrics in some way. Usually, the metrics // are served via HTTP on the /metrics endpoint. That's happening in the example -// above. The tools to expose metrics via HTTP are in the promhttp -// sub-package. (The top-level functions in the prometheus package are -// deprecated.) +// above. The tools to expose metrics via HTTP are in the promhttp sub-package. +// (The top-level functions in the prometheus package are deprecated.) // // Pushing to the Pushgateway // // Function for pushing to the Pushgateway can be found in the push sub-package. // +// Graphite Bridge +// +// Functions and examples to push metrics from a Gatherer to Graphite can be +// found in the graphite sub-package. +// // Other Means of Exposition // -// More ways of exposing metrics can easily be added. Sending metrics to -// Graphite would be an example that will soon be implemented. +// More ways of exposing metrics can easily be added by following the approaches +// of the existing implementations. package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index 6dea674c00..1ae9b8fbcc 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -8,8 +8,9 @@ import ( ) type goCollector struct { - goroutines Gauge - gcDesc *Desc + goroutinesDesc *Desc + threadsDesc *Desc + gcDesc *Desc // metrics to describe and collect metrics memStatsMetrics @@ -19,11 +20,14 @@ type goCollector struct { // go process. func NewGoCollector() Collector { return &goCollector{ - goroutines: NewGauge(GaugeOpts{ - Namespace: "go", - Name: "goroutines", - Help: "Number of goroutines that currently exist.", - }), + goroutinesDesc: NewDesc( + "go_goroutines", + "Number of goroutines that currently exist.", + nil, nil), + threadsDesc: NewDesc( + "go_threads", + "Number of OS threads created", + nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", "A summary of the GC invocation durations.", @@ -224,9 +228,9 @@ func memstatNamespace(s string) string { // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { - ch <- c.goroutines.Desc() + ch <- c.goroutinesDesc + ch <- c.threadsDesc ch <- c.gcDesc - for _, i := range c.metrics { ch <- i.desc } @@ -234,8 +238,9 @@ func (c *goCollector) Describe(ch chan<- *Desc) { // Collect returns the current state of all metrics of the collector. func (c *goCollector) Collect(ch chan<- Metric) { - c.goroutines.Set(float64(runtime.NumGoroutine())) - ch <- c.goroutines + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) + n, _ := runtime.ThreadCreateProfile(nil) + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) var stats debug.GCStats stats.PauseQuantiles = make([]time.Duration, 5) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go index a39727730c..d74fb4881e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -172,6 +172,9 @@ func nowSeries(t ...time.Time) nower { // httputil.ReverseProxy is a prominent example for a handler // performing such writes. // +// - It has additional issues with HTTP/2, cf. +// https://github.com/prometheus/client_golang/issues/272. +// // Upcoming versions of this package will provide ways of instrumenting HTTP // handlers that are more flexible and have fewer issues. Please prefer direct // instrumentation in the meantime. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 78d5f193ce..8c6b5bd8ee 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -243,7 +243,7 @@ func (r *Registry) Register(c Collector) error { }() r.mtx.Lock() defer r.mtx.Unlock() - // Coduct various tests... + // Conduct various tests... for desc := range descChan { // Is the descriptor valid at all? diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index 7d3e8109d0..ff75ce5851 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -44,7 +44,7 @@ var errInconsistentCardinality = errors.New("inconsistent label cardinality") // ValueType. This is a low-level building block used by the library to back the // implementations of Counter, Gauge, and Untyped. type value struct { - // valBits containst the bits of the represented float64 value. It has + // valBits contains the bits of the represented float64 value. It has // to go first in the struct to guarantee alignment for atomic // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG valBits uint64 diff --git a/vendor/github.com/prometheus/client_model/AUTHORS.md b/vendor/github.com/prometheus/client_model/AUTHORS.md new file mode 100644 index 0000000000..e8b3efa6ac --- /dev/null +++ b/vendor/github.com/prometheus/client_model/AUTHORS.md @@ -0,0 +1,13 @@ +The Prometheus project was started by Matt T. Proud (emeritus) and +Julius Volz in 2012. + +Maintainers of this repository: + +* Björn Rabenstein + +The following individuals have contributed code to this repository +(listed in alphabetical order): + +* Björn Rabenstein +* Matt T. Proud +* Tobias Schmidt diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 487fdc6cca..a7a42d5ef4 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -31,6 +31,7 @@ type Decoder interface { Decode(*dto.MetricFamily) error } +// DecodeOptions contains options used by the Decoder and in sample extraction. type DecodeOptions struct { // Timestamp is added to each value from the stream that has no explicit timestamp set. Timestamp model.Time @@ -142,6 +143,8 @@ func (d *textDecoder) Decode(v *dto.MetricFamily) error { return nil } +// SampleDecoder wraps a Decoder to extract samples from the metric families +// decoded by the wrapped Decoder. type SampleDecoder struct { Dec Decoder Opts *DecodeOptions @@ -149,37 +152,51 @@ type SampleDecoder struct { f dto.MetricFamily } +// Decode calls the Decode method of the wrapped Decoder and then extracts the +// samples from the decoded MetricFamily into the provided model.Vector. func (sd *SampleDecoder) Decode(s *model.Vector) error { - if err := sd.Dec.Decode(&sd.f); err != nil { + err := sd.Dec.Decode(&sd.f) + if err != nil { return err } - *s = extractSamples(&sd.f, sd.Opts) - return nil + *s, err = extractSamples(&sd.f, sd.Opts) + return err } -// Extract samples builds a slice of samples from the provided metric families. -func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector { - var all model.Vector +// ExtractSamples builds a slice of samples from the provided metric +// families. If an error occurs during sample extraction, it continues to +// extract from the remaining metric families. The returned error is the last +// error that has occured. +func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { + var ( + all model.Vector + lastErr error + ) for _, f := range fams { - all = append(all, extractSamples(f, o)...) + some, err := extractSamples(f, o) + if err != nil { + lastErr = err + continue + } + all = append(all, some...) } - return all + return all, lastErr } -func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector { +func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { switch f.GetType() { case dto.MetricType_COUNTER: - return extractCounter(o, f) + return extractCounter(o, f), nil case dto.MetricType_GAUGE: - return extractGauge(o, f) + return extractGauge(o, f), nil case dto.MetricType_SUMMARY: - return extractSummary(o, f) + return extractSummary(o, f), nil case dto.MetricType_UNTYPED: - return extractUntyped(o, f) + return extractUntyped(o, f), nil case dto.MetricType_HISTOGRAM: - return extractHistogram(o, f) + return extractHistogram(o, f), nil } - panic("expfmt.extractSamples: unknown metric family type") + return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) } func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index fae10f6ebe..371ac75037 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -11,14 +11,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -// A package for reading and writing Prometheus metrics. +// Package expfmt contains tools for reading and writing Prometheus metrics. package expfmt +// Format specifies the HTTP content type of the different wire protocols. type Format string +// Constants to assemble the Content-Type values for the different wire protocols. const ( - TextVersion = "0.0.4" - + TextVersion = "0.0.4" ProtoType = `application/vnd.google.protobuf` ProtoProtocol = `io.prometheus.client.MetricFamily` ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index 7728abaeea..c9ed3ffd82 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -129,11 +129,8 @@ func (s *Sample) Equal(o *Sample) bool { if !s.Timestamp.Equal(o.Timestamp) { return false } - if s.Value.Equal(o.Value) { - return false - } - return true + return s.Value.Equal(o.Value) } func (s Sample) String() string { diff --git a/vendor/github.com/prometheus/procfs/.travis.yml b/vendor/github.com/prometheus/procfs/.travis.yml index a9e28bf5d1..25e169dd01 100644 --- a/vendor/github.com/prometheus/procfs/.travis.yml +++ b/vendor/github.com/prometheus/procfs/.travis.yml @@ -1,5 +1,7 @@ sudo: false language: go go: - - 1.6.4 - - 1.7.4 + - 1.3 + - 1.4 + - 1.5 + - tip diff --git a/vendor/github.com/prometheus/procfs/AUTHORS.md b/vendor/github.com/prometheus/procfs/AUTHORS.md index d558635602..f1c27ccb01 100644 --- a/vendor/github.com/prometheus/procfs/AUTHORS.md +++ b/vendor/github.com/prometheus/procfs/AUTHORS.md @@ -13,8 +13,7 @@ The following individuals have contributed code to this repository * David Cournapeau * Ji-Hoon, Seol * Jonas Große Sundrup -* Julius Volz -* Matt Layher +* Julius Volz * Matthias Rampke * Nicky Gerritsen * Rémi Audebert diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile index c264a49d17..e8acbbc5ec 100644 --- a/vendor/github.com/prometheus/procfs/Makefile +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -1,5 +1,5 @@ ci: - ! gofmt -l *.go | read nothing + go fmt go vet go test -v ./... go get github.com/golang/lint/golint diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index 49aaab0505..6a8d97b11e 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -27,7 +27,14 @@ func NewFS(mountPoint string) (FS, error) { return FS(mountPoint), nil } -// Path returns the path of the given subsystem relative to the procfs root. -func (fs FS) Path(p ...string) string { - return path.Join(append([]string{string(fs)}, p...)...) +func (fs FS) stat(p string) (os.FileInfo, error) { + return os.Stat(path.Join(string(fs), p)) +} + +func (fs FS) open(p string) (*os.File, error) { + return os.Open(path.Join(string(fs), p)) +} + +func (fs FS) readlink(p string) (string, error) { + return os.Readlink(path.Join(string(fs), p)) } diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index e7012f7323..26da5000e3 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -8,7 +8,6 @@ import ( "io" "io/ioutil" "net" - "os" "strconv" "strings" ) @@ -59,7 +58,7 @@ func NewIPVSStats() (IPVSStats, error) { // NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. func (fs FS) NewIPVSStats() (IPVSStats, error) { - file, err := os.Open(fs.Path("net/ip_vs_stats")) + file, err := fs.open("net/ip_vs_stats") if err != nil { return IPVSStats{}, err } @@ -128,7 +127,7 @@ func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { // NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { - file, err := os.Open(fs.Path("net/ip_vs")) + file, err := fs.open("net/ip_vs") if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index d7a248c0df..09ed6b5ebc 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -3,6 +3,7 @@ package procfs import ( "fmt" "io/ioutil" + "path" "regexp" "strconv" "strings" @@ -31,22 +32,36 @@ type MDStat struct { // ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { - mdStatusFilePath := fs.Path("mdstat") + mdStatusFilePath := path.Join(string(fs), "mdstat") content, err := ioutil.ReadFile(mdStatusFilePath) if err != nil { return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) } - mdStates := []MDStat{} - lines := strings.Split(string(content), "\n") + mdStatusFile := string(content) + + lines := strings.Split(mdStatusFile, "\n") + var currentMD string + + // Each md has at least the deviceline, statusline and one empty line afterwards + // so we will have probably something of the order len(lines)/3 devices + // so we use that for preallocation. + estimateMDs := len(lines) / 3 + mdStates := make([]MDStat, 0, estimateMDs) + for i, l := range lines { if l == "" { + // Skip entirely empty lines. continue } + if l[0] == ' ' { + // Those lines are not the beginning of a md-section. continue } + if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { + // We aren't interested in lines with general info. continue } @@ -54,30 +69,32 @@ func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { if len(mainLine) < 3 { return mdStates, fmt.Errorf("error parsing mdline: %s", l) } - mdName := mainLine[0] - activityState := mainLine[2] + currentMD = mainLine[0] // name of md-device + activityState := mainLine[2] // activity status of said md-device if len(lines) <= i+3 { - return mdStates, fmt.Errorf( - "error parsing %s: too few lines for md device %s", - mdStatusFilePath, - mdName, - ) + return mdStates, fmt.Errorf("error parsing %s: entry for %s has fewer lines than expected", mdStatusFilePath, currentMD) } - active, total, size, err := evalStatusline(lines[i+1]) + active, total, size, err := evalStatusline(lines[i+1]) // parse statusline, always present if err != nil { return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) } - // j is the line number of the syncing-line. - j := i + 2 - if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + // + // Now get the number of synced blocks. + // + + // Get the line number of the syncing-line. + var j int + if strings.Contains(lines[i+2], "bitmap") { // then skip the bitmap line j = i + 3 + } else { + j = i + 2 } - // If device is syncing at the moment, get the number of currently - // synced bytes, otherwise that number equals the size of the device. + // If device is syncing at the moment, get the number of currently synced bytes, + // otherwise that number equals the size of the device. syncedBlocks := size if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { syncedBlocks, err = evalBuildline(lines[j]) @@ -86,14 +103,8 @@ func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { } } - mdStates = append(mdStates, MDStat{ - Name: mdName, - ActivityState: activityState, - DisksActive: active, - DisksTotal: total, - BlocksTotal: size, - BlocksSynced: syncedBlocks, - }) + mdStates = append(mdStates, MDStat{currentMD, activityState, active, total, size, syncedBlocks}) + } return mdStates, nil @@ -101,38 +112,47 @@ func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { func evalStatusline(statusline string) (active, total, size int64, err error) { matches := statuslineRE.FindStringSubmatch(statusline) - if len(matches) != 4 { - return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) + + // +1 to make it more obvious that the whole string containing the info is also returned as matches[0]. + if len(matches) != 3+1 { + return 0, 0, 0, fmt.Errorf("unexpected number matches found in statusline: %s", statusline) } size, err = strconv.ParseInt(matches[1], 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline) } total, err = strconv.ParseInt(matches[2], 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline) } active, err = strconv.ParseInt(matches[3], 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline) } return active, total, size, nil } -func evalBuildline(buildline string) (syncedBlocks int64, err error) { +// Gets the size that has already been synced out of the sync-line. +func evalBuildline(buildline string) (int64, error) { matches := buildlineRE.FindStringSubmatch(buildline) - if len(matches) != 2 { - return 0, fmt.Errorf("unexpected buildline: %s", buildline) + + // +1 to make it more obvious that the whole string containing the info is also returned as matches[0]. + if len(matches) < 1+1 { + return 0, fmt.Errorf("too few matches found in buildline: %s", buildline) + } + + if len(matches) > 1+1 { + return 0, fmt.Errorf("too many matches found in buildline: %s", buildline) } - syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + syncedSize, err := strconv.ParseInt(matches[1], 10, 64) if err != nil { return 0, fmt.Errorf("%s in buildline: %s", err, buildline) } - return syncedBlocks, nil + return syncedSize, nil } diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go deleted file mode 100644 index 3b7d3ec84d..0000000000 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ /dev/null @@ -1,552 +0,0 @@ -package procfs - -// While implementing parsing of /proc/[pid]/mountstats, this blog was used -// heavily as a reference: -// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex -// -// Special thanks to Chris Siebenmann for all of his posts explaining the -// various statistics available for NFS. - -import ( - "bufio" - "fmt" - "io" - "strconv" - "strings" - "time" -) - -// Constants shared between multiple functions. -const ( - deviceEntryLen = 8 - - fieldBytesLen = 8 - fieldEventsLen = 27 - - statVersion10 = "1.0" - statVersion11 = "1.1" - - fieldTransport10Len = 10 - fieldTransport11Len = 13 -) - -// A Mount is a device mount parsed from /proc/[pid]/mountstats. -type Mount struct { - // Name of the device. - Device string - // The mount point of the device. - Mount string - // The filesystem type used by the device. - Type string - // If available additional statistics related to this Mount. - // Use a type assertion to determine if additional statistics are available. - Stats MountStats -} - -// A MountStats is a type which contains detailed statistics for a specific -// type of Mount. -type MountStats interface { - mountStats() -} - -// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. -type MountStatsNFS struct { - // The version of statistics provided. - StatVersion string - // The age of the NFS mount. - Age time.Duration - // Statistics related to byte counters for various operations. - Bytes NFSBytesStats - // Statistics related to various NFS event occurrences. - Events NFSEventsStats - // Statistics broken down by filesystem operation. - Operations []NFSOperationStats - // Statistics about the NFS RPC transport. - Transport NFSTransportStats -} - -// mountStats implements MountStats. -func (m MountStatsNFS) mountStats() {} - -// A NFSBytesStats contains statistics about the number of bytes read and written -// by an NFS client to and from an NFS server. -type NFSBytesStats struct { - // Number of bytes read using the read() syscall. - Read int - // Number of bytes written using the write() syscall. - Write int - // Number of bytes read using the read() syscall in O_DIRECT mode. - DirectRead int - // Number of bytes written using the write() syscall in O_DIRECT mode. - DirectWrite int - // Number of bytes read from the NFS server, in total. - ReadTotal int - // Number of bytes written to the NFS server, in total. - WriteTotal int - // Number of pages read directly via mmap()'d files. - ReadPages int - // Number of pages written directly via mmap()'d files. - WritePages int -} - -// A NFSEventsStats contains statistics about NFS event occurrences. -type NFSEventsStats struct { - // Number of times cached inode attributes are re-validated from the server. - InodeRevalidate int - // Number of times cached dentry nodes are re-validated from the server. - DnodeRevalidate int - // Number of times an inode cache is cleared. - DataInvalidate int - // Number of times cached inode attributes are invalidated. - AttributeInvalidate int - // Number of times files or directories have been open()'d. - VFSOpen int - // Number of times a directory lookup has occurred. - VFSLookup int - // Number of times permissions have been checked. - VFSAccess int - // Number of updates (and potential writes) to pages. - VFSUpdatePage int - // Number of pages read directly via mmap()'d files. - VFSReadPage int - // Number of times a group of pages have been read. - VFSReadPages int - // Number of pages written directly via mmap()'d files. - VFSWritePage int - // Number of times a group of pages have been written. - VFSWritePages int - // Number of times directory entries have been read with getdents(). - VFSGetdents int - // Number of times attributes have been set on inodes. - VFSSetattr int - // Number of pending writes that have been forcefully flushed to the server. - VFSFlush int - // Number of times fsync() has been called on directories and files. - VFSFsync int - // Number of times locking has been attemped on a file. - VFSLock int - // Number of times files have been closed and released. - VFSFileRelease int - // Unknown. Possibly unused. - CongestionWait int - // Number of times files have been truncated. - Truncation int - // Number of times a file has been grown due to writes beyond its existing end. - WriteExtension int - // Number of times a file was removed while still open by another process. - SillyRename int - // Number of times the NFS server gave less data than expected while reading. - ShortRead int - // Number of times the NFS server wrote less data than expected while writing. - ShortWrite int - // Number of times the NFS server indicated EJUKEBOX; retrieving data from - // offline storage. - JukeboxDelay int - // Number of NFS v4.1+ pNFS reads. - PNFSRead int - // Number of NFS v4.1+ pNFS writes. - PNFSWrite int -} - -// A NFSOperationStats contains statistics for a single operation. -type NFSOperationStats struct { - // The name of the operation. - Operation string - // Number of requests performed for this operation. - Requests int - // Number of times an actual RPC request has been transmitted for this operation. - Transmissions int - // Number of times a request has had a major timeout. - MajorTimeouts int - // Number of bytes sent for this operation, including RPC headers and payload. - BytesSent int - // Number of bytes received for this operation, including RPC headers and payload. - BytesReceived int - // Duration all requests spent queued for transmission before they were sent. - CumulativeQueueTime time.Duration - // Duration it took to get a reply back after the request was transmitted. - CumulativeTotalResponseTime time.Duration - // Duration from when a request was enqueued to when it was completely handled. - CumulativeTotalRequestTime time.Duration -} - -// A NFSTransportStats contains statistics for the NFS mount RPC requests and -// responses. -type NFSTransportStats struct { - // The local port used for the NFS mount. - Port int - // Number of times the client has had to establish a connection from scratch - // to the NFS server. - Bind int - // Number of times the client has made a TCP connection to the NFS server. - Connect int - // Duration (in jiffies, a kernel internal unit of time) the NFS mount has - // spent waiting for connections to the server to be established. - ConnectIdleTime int - // Duration since the NFS mount last saw any RPC traffic. - IdleTime time.Duration - // Number of RPC requests for this mount sent to the NFS server. - Sends int - // Number of RPC responses for this mount received from the NFS server. - Receives int - // Number of times the NFS server sent a response with a transaction ID - // unknown to this client. - BadTransactionIDs int - // A running counter, incremented on each request as the current difference - // ebetween sends and receives. - CumulativeActiveRequests int - // A running counter, incremented on each request by the current backlog - // queue size. - CumulativeBacklog int - - // Stats below only available with stat version 1.1. - - // Maximum number of simultaneously active RPC requests ever used. - MaximumRPCSlotsUsed int - // A running counter, incremented on each request as the current size of the - // sending queue. - CumulativeSendingQueue int - // A running counter, incremented on each request as the current size of the - // pending queue. - CumulativePendingQueue int -} - -// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice -// of Mount structures containing detailed information about each mount. -// If available, statistics for each mount are parsed as well. -func parseMountStats(r io.Reader) ([]*Mount, error) { - const ( - device = "device" - statVersionPrefix = "statvers=" - - nfs3Type = "nfs" - nfs4Type = "nfs4" - ) - - var mounts []*Mount - - s := bufio.NewScanner(r) - for s.Scan() { - // Only look for device entries in this function - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 || ss[0] != device { - continue - } - - m, err := parseMount(ss) - if err != nil { - return nil, err - } - - // Does this mount also possess statistics information? - if len(ss) > deviceEntryLen { - // Only NFSv3 and v4 are supported for parsing statistics - if m.Type != nfs3Type && m.Type != nfs4Type { - return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) - } - - statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) - - stats, err := parseMountStatsNFS(s, statVersion) - if err != nil { - return nil, err - } - - m.Stats = stats - } - - mounts = append(mounts, m) - } - - return mounts, s.Err() -} - -// parseMount parses an entry in /proc/[pid]/mountstats in the format: -// device [device] mounted on [mount] with fstype [type] -func parseMount(ss []string) (*Mount, error) { - if len(ss) < deviceEntryLen { - return nil, fmt.Errorf("invalid device entry: %v", ss) - } - - // Check for specific words appearing at specific indices to ensure - // the format is consistent with what we expect - format := []struct { - i int - s string - }{ - {i: 0, s: "device"}, - {i: 2, s: "mounted"}, - {i: 3, s: "on"}, - {i: 5, s: "with"}, - {i: 6, s: "fstype"}, - } - - for _, f := range format { - if ss[f.i] != f.s { - return nil, fmt.Errorf("invalid device entry: %v", ss) - } - } - - return &Mount{ - Device: ss[1], - Mount: ss[4], - Type: ss[7], - }, nil -} - -// parseMountStatsNFS parses a MountStatsNFS by scanning additional information -// related to NFS statistics. -func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { - // Field indicators for parsing specific types of data - const ( - fieldAge = "age:" - fieldBytes = "bytes:" - fieldEvents = "events:" - fieldPerOpStats = "per-op" - fieldTransport = "xprt:" - ) - - stats := &MountStatsNFS{ - StatVersion: statVersion, - } - - for s.Scan() { - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 { - break - } - if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) - } - - switch ss[0] { - case fieldAge: - // Age integer is in seconds - d, err := time.ParseDuration(ss[1] + "s") - if err != nil { - return nil, err - } - - stats.Age = d - case fieldBytes: - bstats, err := parseNFSBytesStats(ss[1:]) - if err != nil { - return nil, err - } - - stats.Bytes = *bstats - case fieldEvents: - estats, err := parseNFSEventsStats(ss[1:]) - if err != nil { - return nil, err - } - - stats.Events = *estats - case fieldTransport: - if len(ss) < 3 { - return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) - } - - tstats, err := parseNFSTransportStats(ss[2:], statVersion) - if err != nil { - return nil, err - } - - stats.Transport = *tstats - } - - // When encountering "per-operation statistics", we must break this - // loop and parse them seperately to ensure we can terminate parsing - // before reaching another device entry; hence why this 'if' statement - // is not just another switch case - if ss[0] == fieldPerOpStats { - break - } - } - - if err := s.Err(); err != nil { - return nil, err - } - - // NFS per-operation stats appear last before the next device entry - perOpStats, err := parseNFSOperationStats(s) - if err != nil { - return nil, err - } - - stats.Operations = perOpStats - - return stats, nil -} - -// parseNFSBytesStats parses a NFSBytesStats line using an input set of -// integer fields. -func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { - if len(ss) != fieldBytesLen { - return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) - } - - ns := make([]int, 0, fieldBytesLen) - for _, s := range ss { - n, err := strconv.Atoi(s) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - return &NFSBytesStats{ - Read: ns[0], - Write: ns[1], - DirectRead: ns[2], - DirectWrite: ns[3], - ReadTotal: ns[4], - WriteTotal: ns[5], - ReadPages: ns[6], - WritePages: ns[7], - }, nil -} - -// parseNFSEventsStats parses a NFSEventsStats line using an input set of -// integer fields. -func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { - if len(ss) != fieldEventsLen { - return nil, fmt.Errorf("invalid NFS events stats: %v", ss) - } - - ns := make([]int, 0, fieldEventsLen) - for _, s := range ss { - n, err := strconv.Atoi(s) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - return &NFSEventsStats{ - InodeRevalidate: ns[0], - DnodeRevalidate: ns[1], - DataInvalidate: ns[2], - AttributeInvalidate: ns[3], - VFSOpen: ns[4], - VFSLookup: ns[5], - VFSAccess: ns[6], - VFSUpdatePage: ns[7], - VFSReadPage: ns[8], - VFSReadPages: ns[9], - VFSWritePage: ns[10], - VFSWritePages: ns[11], - VFSGetdents: ns[12], - VFSSetattr: ns[13], - VFSFlush: ns[14], - VFSFsync: ns[15], - VFSLock: ns[16], - VFSFileRelease: ns[17], - CongestionWait: ns[18], - Truncation: ns[19], - WriteExtension: ns[20], - SillyRename: ns[21], - ShortRead: ns[22], - ShortWrite: ns[23], - JukeboxDelay: ns[24], - PNFSRead: ns[25], - PNFSWrite: ns[26], - }, nil -} - -// parseNFSOperationStats parses a slice of NFSOperationStats by scanning -// additional information about per-operation statistics until an empty -// line is reached. -func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { - const ( - // Number of expected fields in each per-operation statistics set - numFields = 9 - ) - - var ops []NFSOperationStats - - for s.Scan() { - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 { - // Must break when reading a blank line after per-operation stats to - // enable top-level function to parse the next device entry - break - } - - if len(ss) != numFields { - return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) - } - - // Skip string operation name for integers - ns := make([]int, 0, numFields-1) - for _, st := range ss[1:] { - n, err := strconv.Atoi(st) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - ops = append(ops, NFSOperationStats{ - Operation: strings.TrimSuffix(ss[0], ":"), - Requests: ns[0], - Transmissions: ns[1], - MajorTimeouts: ns[2], - BytesSent: ns[3], - BytesReceived: ns[4], - CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, - CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, - CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, - }) - } - - return ops, s.Err() -} - -// parseNFSTransportStats parses a NFSTransportStats line using an input set of -// integer fields matched to a specific stats version. -func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { - switch statVersion { - case statVersion10: - if len(ss) != fieldTransport10Len { - return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) - } - case statVersion11: - if len(ss) != fieldTransport11Len { - return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) - } - default: - return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) - } - - // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay - // in a v1.0 response - ns := make([]int, 0, fieldTransport11Len) - for _, s := range ss { - n, err := strconv.Atoi(s) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - return &NFSTransportStats{ - Port: ns[0], - Bind: ns[1], - Connect: ns[2], - ConnectIdleTime: ns[3], - IdleTime: time.Duration(ns[4]) * time.Second, - Sends: ns[5], - Receives: ns[6], - BadTransactionIDs: ns[7], - CumulativeActiveRequests: ns[8], - CumulativeBacklog: ns[9], - MaximumRPCSlotsUsed: ns[10], - CumulativeSendingQueue: ns[11], - CumulativePendingQueue: ns[12], - }, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 8717e1fe0d..efc8502789 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -4,6 +4,7 @@ import ( "fmt" "io/ioutil" "os" + "path" "strconv" "strings" ) @@ -41,7 +42,7 @@ func NewProc(pid int) (Proc, error) { return fs.NewProc(pid) } -// AllProcs returns a list of all currently available processes under /proc. +// AllProcs returns a list of all currently avaible processes under /proc. func AllProcs() (Procs, error) { fs, err := NewFS(DefaultMountPoint) if err != nil { @@ -52,7 +53,7 @@ func AllProcs() (Procs, error) { // Self returns a process for the current process. func (fs FS) Self() (Proc, error) { - p, err := os.Readlink(fs.Path("self")) + p, err := fs.readlink("self") if err != nil { return Proc{}, err } @@ -65,15 +66,15 @@ func (fs FS) Self() (Proc, error) { // NewProc returns a process for the given pid. func (fs FS) NewProc(pid int) (Proc, error) { - if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { + if _, err := fs.stat(strconv.Itoa(pid)); err != nil { return Proc{}, err } return Proc{PID: pid, fs: fs}, nil } -// AllProcs returns a list of all currently available processes. +// AllProcs returns a list of all currently avaible processes. func (fs FS) AllProcs() (Procs, error) { - d, err := os.Open(fs.Path()) + d, err := fs.open("") if err != nil { return Procs{}, err } @@ -98,7 +99,7 @@ func (fs FS) AllProcs() (Procs, error) { // CmdLine returns the command line of a process. func (p Proc) CmdLine() ([]string, error) { - f, err := os.Open(p.path("cmdline")) + f, err := p.open("cmdline") if err != nil { return nil, err } @@ -116,25 +117,10 @@ func (p Proc) CmdLine() ([]string, error) { return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil } -// Comm returns the command name of a process. -func (p Proc) Comm() (string, error) { - f, err := os.Open(p.path("comm")) - if err != nil { - return "", err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return "", err - } - - return strings.TrimSpace(string(data)), nil -} - // Executable returns the absolute path of the executable command of a process. func (p Proc) Executable() (string, error) { - exe, err := os.Readlink(p.path("exe")) + exe, err := p.readlink("exe") + if os.IsNotExist(err) { return "", nil } @@ -172,7 +158,7 @@ func (p Proc) FileDescriptorTargets() ([]string, error) { targets := make([]string, len(names)) for i, name := range names { - target, err := os.Readlink(p.path("fd", name)) + target, err := p.readlink("fd/" + name) if err == nil { targets[i] = target } @@ -192,20 +178,8 @@ func (p Proc) FileDescriptorsLen() (int, error) { return len(fds), nil } -// MountStats retrieves statistics and configuration for mount points in a -// process's namespace. -func (p Proc) MountStats() ([]*Mount, error) { - f, err := os.Open(p.path("mountstats")) - if err != nil { - return nil, err - } - defer f.Close() - - return parseMountStats(f) -} - func (p Proc) fileDescriptors() ([]string, error) { - d, err := os.Open(p.path("fd")) + d, err := p.open("fd") if err != nil { return nil, err } @@ -219,6 +193,10 @@ func (p Proc) fileDescriptors() ([]string, error) { return names, nil } -func (p Proc) path(pa ...string) string { - return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) +func (p Proc) open(pa string) (*os.File, error) { + return p.fs.open(path.Join(strconv.Itoa(p.PID), pa)) +} + +func (p Proc) readlink(pa string) (string, error) { + return p.fs.readlink(path.Join(strconv.Itoa(p.PID), pa)) } diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go index b4e31d7ba3..7c6dc86970 100644 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -3,7 +3,6 @@ package procfs import ( "fmt" "io/ioutil" - "os" ) // ProcIO models the content of /proc//io. @@ -30,7 +29,7 @@ type ProcIO struct { func (p Proc) NewIO() (ProcIO, error) { pio := ProcIO{} - f, err := os.Open(p.path("io")) + f, err := p.open("io") if err != nil { return pio, err } diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index 2df997ce11..9f080b9f62 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -3,56 +3,29 @@ package procfs import ( "bufio" "fmt" - "os" "regexp" "strconv" ) // ProcLimits represents the soft limits for each of the process's resource -// limits. For more information see getrlimit(2): -// http://man7.org/linux/man-pages/man2/getrlimit.2.html. +// limits. type ProcLimits struct { - // CPU time limit in seconds. - CPUTime int - // Maximum size of files that the process may create. - FileSize int - // Maximum size of the process's data segment (initialized data, - // uninitialized data, and heap). - DataSize int - // Maximum size of the process stack in bytes. - StackSize int - // Maximum size of a core file. - CoreFileSize int - // Limit of the process's resident set in pages. - ResidentSet int - // Maximum number of processes that can be created for the real user ID of - // the calling process. - Processes int - // Value one greater than the maximum file descriptor number that can be - // opened by this process. - OpenFiles int - // Maximum number of bytes of memory that may be locked into RAM. - LockedMemory int - // Maximum size of the process's virtual memory address space in bytes. - AddressSpace int - // Limit on the combined number of flock(2) locks and fcntl(2) leases that - // this process may establish. - FileLocks int - // Limit of signals that may be queued for the real user ID of the calling - // process. - PendingSignals int - // Limit on the number of bytes that can be allocated for POSIX message - // queues for the real user ID of the calling process. - MsqqueueSize int - // Limit of the nice priority set using setpriority(2) or nice(2). - NicePriority int - // Limit of the real-time priority set using sched_setscheduler(2) or - // sched_setparam(2). + CPUTime int + FileSize int + DataSize int + StackSize int + CoreFileSize int + ResidentSet int + Processes int + OpenFiles int + LockedMemory int + AddressSpace int + FileLocks int + PendingSignals int + MsqqueueSize int + NicePriority int RealtimePriority int - // Limit (in microseconds) on the amount of CPU time that a process - // scheduled under a real-time scheduling policy may consume without making - // a blocking system call. - RealtimeTimeout int + RealtimeTimeout int } const ( @@ -66,7 +39,7 @@ var ( // NewLimits returns the current soft limits of the process. func (p Proc) NewLimits() (ProcLimits, error) { - f, err := os.Open(p.path("limits")) + f, err := p.open("limits") if err != nil { return ProcLimits{}, err } @@ -87,7 +60,7 @@ func (p Proc) NewLimits() (ProcLimits, error) { case "Max cpu time": l.CPUTime, err = parseInt(fields[1]) case "Max file size": - l.FileSize, err = parseInt(fields[1]) + l.FileLocks, err = parseInt(fields[1]) case "Max data size": l.DataSize, err = parseInt(fields[1]) case "Max stack size": @@ -117,6 +90,7 @@ func (p Proc) NewLimits() (ProcLimits, error) { case "Max realtime timeout": l.RealtimeTimeout, err = parseInt(fields[1]) } + if err != nil { return ProcLimits{}, err } diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 724e271b9e..30a403b6c7 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -7,15 +7,15 @@ import ( "os" ) -// Originally, this USER_HZ value was dynamically retrieved via a sysconf call -// which required cgo. However, that caused a lot of problems regarding +// Originally, this USER_HZ value was dynamically retrieved via a sysconf call which +// required cgo. However, that caused a lot of problems regarding // cross-compilation. Alternatives such as running a binary to determine the -// value, or trying to derive it in some other way were all problematic. After -// much research it was determined that USER_HZ is actually hardcoded to 100 on -// all Go-supported platforms as of the time of this writing. This is why we -// decided to hardcode it here as well. It is not impossible that there could -// be systems with exceptions, but they should be very exotic edge cases, and -// in that case, the worst outcome will be two misreported metrics. +// value, or trying to derive it in some other way were all problematic. +// After much research it was determined that USER_HZ is actually hardcoded to +// 100 on all Go-supported platforms as of the time of this writing. This is +// why we decided to hardcode it here as well. It is not impossible that there +// could be systems with exceptions, but they should be very exotic edge cases, +// and in that case, the worst outcome will be two misreported metrics. // // See also the following discussions: // @@ -91,7 +91,7 @@ type ProcStat struct { // NewStat returns the current status information of the process. func (p Proc) NewStat() (ProcStat, error) { - f, err := os.Open(p.path("stat")) + f, err := p.open("stat") if err != nil { return ProcStat{}, err } diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 1ca217e8c7..26fefb0fa0 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -3,7 +3,6 @@ package procfs import ( "bufio" "fmt" - "os" "strconv" "strings" ) @@ -26,7 +25,7 @@ func NewStat() (Stat, error) { // NewStat returns an information about current kernel/system statistics. func (fs FS) NewStat() (Stat, error) { - f, err := os.Open(fs.Path("stat")) + f, err := fs.open("stat") if err != nil { return Stat{}, err } diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore deleted file mode 100644 index 1b8c7c2611..0000000000 --- a/vendor/github.com/spf13/cobra/.gitignore +++ /dev/null @@ -1,36 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore -# swap -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -# session -Session.vim -# temporary -.netrwhist -*~ -# auto-generated tag files -tags - -*.exe - -cobra.test diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap deleted file mode 100644 index 94ec53068a..0000000000 --- a/vendor/github.com/spf13/cobra/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -Steve Francia -Bjørn Erik Pedersen -Fabiano Franz diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml deleted file mode 100644 index 6e84be54d1..0000000000 --- a/vendor/github.com/spf13/cobra/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go -go: - - 1.4.3 - - 1.5.4 - - 1.6.3 - - tip - -matrix: - allow_failures: - - go: tip - -before_install: - - mkdir -p bin - - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.4.3/shellcheck - - chmod +x bin/shellcheck -script: - - PATH=$PATH:$PWD/bin go test -v ./... - - go build diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt deleted file mode 100644 index 298f0e2665..0000000000 --- a/vendor/github.com/spf13/cobra/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md deleted file mode 100644 index b338a0e442..0000000000 --- a/vendor/github.com/spf13/cobra/README.md +++ /dev/null @@ -1,898 +0,0 @@ -![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) - -Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files. - -Many of the most widely used Go projects are built using Cobra including: - -* [Kubernetes](http://kubernetes.io/) -* [Hugo](http://gohugo.io) -* [rkt](https://github.com/coreos/rkt) -* [etcd](https://github.com/coreos/etcd) -* [Docker (distribution)](https://github.com/docker/distribution) -* [OpenShift](https://www.openshift.com/) -* [Delve](https://github.com/derekparker/delve) -* [GopherJS](http://www.gopherjs.org/) -* [CockroachDB](http://www.cockroachlabs.com/) -* [Bleve](http://www.blevesearch.com/) -* [ProjectAtomic (enterprise)](http://www.projectatomic.io/) -* [Parse (CLI)](https://parse.com/) -* [GiantSwarm's swarm](https://github.com/giantswarm/cli) -* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) - - -[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) -[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra) -[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) - -![cobra](https://cloud.githubusercontent.com/assets/173412/10911369/84832a8e-8212-11e5-9f82-cc96660a4794.gif) - -# Overview - -Cobra is a library providing a simple interface to create powerful modern CLI -interfaces similar to git & go tools. - -Cobra is also an application that will generate your application scaffolding to rapidly -develop a Cobra-based application. - -Cobra provides: -* Easy subcommand-based CLIs: `app server`, `app fetch`, etc. -* Fully POSIX-compliant flags (including short & long versions) -* Nested subcommands -* Global, local and cascading flags -* Easy generation of applications & commands with `cobra create appname` & `cobra add cmdname` -* Intelligent suggestions (`app srver`... did you mean `app server`?) -* Automatic help generation for commands and flags -* Automatic detailed help for `app help [command]` -* Automatic help flag recognition of `-h`, `--help`, etc. -* Automatically generated bash autocomplete for your application -* Automatically generated man pages for your application -* Command aliases so you can change things without breaking them -* The flexibilty to define your own help, usage, etc. -* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps - -Cobra has an exceptionally clean interface and simple design without needless -constructors or initialization methods. - -Applications built with Cobra commands are designed to be as user-friendly as -possible. Flags can be placed before or after the command (as long as a -confusing space isn’t provided). Both short and long flags can be used. A -command need not even be fully typed. Help is automatically generated and -available for the application or for a specific command using either the help -command or the `--help` flag. - -# Concepts - -Cobra is built on a structure of commands, arguments & flags. - -**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. - -The best applications will read like sentences when used. Users will know how -to use the application because they will natively understand how to use it. - -The pattern to follow is -`APPNAME VERB NOUN --ADJECTIVE.` - or -`APPNAME COMMAND ARG --FLAG` - -A few good real world examples may better illustrate this point. - -In the following example, 'server' is a command, and 'port' is a flag: - - > hugo server --port=1313 - -In this command we are telling Git to clone the url bare. - - > git clone URL --bare - -## Commands - -Command is the central point of the application. Each interaction that -the application supports will be contained in a Command. A command can -have children commands and optionally run an action. - -In the example above, 'server' is the command. - -A Command has the following structure: - -```go -type Command struct { - Use string // The one-line usage message. - Short string // The short description shown in the 'help' output. - Long string // The long message shown in the 'help ' output. - Run func(cmd *Command, args []string) // Run runs the command. -} -``` - -## Flags - -A Flag is a way to modify the behavior of a command. Cobra supports -fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/). -A Cobra command can define flags that persist through to children commands -and flags that are only available to that command. - -In the example above, 'port' is the flag. - -Flag functionality is provided by the [pflag -library](https://github.com/ogier/pflag), a fork of the flag standard library -which maintains the same interface while adding POSIX compliance. - -## Usage - -Cobra works by creating a set of commands and then organizing them into a tree. -The tree defines the structure of the application. - -Once each command is defined with its corresponding flags, then the -tree is assigned to the commander which is finally executed. - -# Installing -Using Cobra is easy. First, use `go get` to install the latest version -of the library. This command will install the `cobra` generator executible -along with the library: - - > go get -v github.com/spf13/cobra/cobra - -Next, include Cobra in your application: - -```go -import "github.com/spf13/cobra" -``` - -# Getting Started - -While you are welcome to provide your own organization, typically a Cobra based -application will follow the following organizational structure. - -``` - ▾ appName/ - ▾ cmd/ - add.go - your.go - commands.go - here.go - main.go -``` - -In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. - -```go -package main - -import "{pathToYourApp}/cmd" - -func main() { - if err := cmd.RootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(-1) - } -} -``` - -## Using the Cobra Generator - -Cobra provides its own program that will create your application and add any -commands you want. It's the easiest way to incorporate Cobra into your application. - -In order to use the cobra command, compile it using the following command: - - > go install github.com/spf13/cobra/cobra - -This will create the cobra executable under your go path bin directory! - -### cobra init - -The `cobra init [yourApp]` command will create your initial application code -for you. It is a very powerful application that will populate your program with -the right structure so you can immediately enjoy all the benefits of Cobra. It -will also automatically apply the license you specify to your application. - -Cobra init is pretty smart. You can provide it a full path, or simply a path -similar to what is expected in the import. - -``` -cobra init github.com/spf13/newAppName -``` - -### cobra add - -Once an application is initialized Cobra can create additional commands for you. -Let's say you created an app and you wanted the following commands for it: - -* app serve -* app config -* app config create - -In your project directory (where your main.go file is) you would run the following: - -``` -cobra add serve -cobra add config -cobra add create -p 'configCmd' -``` - -Once you have run these three commands you would have an app structure that would look like: - -``` - ▾ app/ - ▾ cmd/ - serve.go - config.go - create.go - main.go -``` - -at this point you can run `go run main.go` and it would run your app. `go run -main.go serve`, `go run main.go config`, `go run main.go config create` along -with `go run main.go help serve`, etc would all work. - -Obviously you haven't added your own code to these yet, the commands are ready -for you to give them their tasks. Have fun. - -### Configuring the cobra generator - -The cobra generator will be easier to use if you provide a simple configuration -file which will help you eliminate providing a bunch of repeated information in -flags over and over. - -An example ~/.cobra.yaml file: - -```yaml -author: Steve Francia -license: MIT -``` - -You can specify no license by setting `license` to `none` or you can specify -a custom license: - -```yaml -license: - header: This file is part of {{ .appName }}. - text: | - {{ .copyright }} - - This is my license. There are many like it, but this one is mine. - My license is my best friend. It is my life. I must master it as I must - master my life. -``` - -## Manually implementing Cobra - -To manually implement cobra you need to create a bare main.go file and a RootCmd file. -You will optionally provide additional commands as you see fit. - -### Create the root command - -The root command represents your binary itself. - - -#### Manually create rootCmd - -Cobra doesn't require any special constructors. Simply create your commands. - -Ideally you place this in app/cmd/root.go: - -```go -var RootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, -} -``` - -You will additionally define flags and handle configuration in your init() function. - -for example cmd/root.go: - -```go -func init() { - cobra.OnInitialize(initConfig) - RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - RootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/") - RootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution") - RootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)") - RootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration") - viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("projectbase", RootCmd.PersistentFlags().Lookup("projectbase")) - viper.BindPFlag("useViper", RootCmd.PersistentFlags().Lookup("viper")) - viper.SetDefault("author", "NAME HERE ") - viper.SetDefault("license", "apache") -} -``` - -### Create your main.go - -With the root command you need to have your main function execute it. -Execute should be run on the root for clarity, though it can be called on any command. - -In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. - -```go -package main - -import "{pathToYourApp}/cmd" - -func main() { - if err := cmd.RootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(-1) - } -} -``` - - -### Create additional commands - -Additional commands can be defined and typically are each given their own file -inside of the cmd/ directory. - -If you wanted to create a version command you would create cmd/version.go and -populate it with the following: - -```go -package cmd - -import ( - "github.com/spf13/cobra" -) - -func init() { - RootCmd.AddCommand(versionCmd) -} - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, -} -``` - -### Attach command to its parent - - -If you notice in the above example we attach the command to its parent. In -this case the parent is the rootCmd. In this example we are attaching it to the -root, but commands can be attached at any level. - -```go -RootCmd.AddCommand(versionCmd) -``` - -### Remove a command from its parent - -Removing a command is not a common action in simple programs, but it allows 3rd -parties to customize an existing command tree. - -In this example, we remove the existing `VersionCmd` command of an existing -root command, and we replace it with our own version: - -```go -mainlib.RootCmd.RemoveCommand(mainlib.VersionCmd) -mainlib.RootCmd.AddCommand(versionCmd) -``` - -## Working with Flags - -Flags provide modifiers to control how the action command operates. - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - -```go -var Verbose bool -var Source string -``` - -There are two different approaches to assign a flag. - -### Persistent Flags - -A flag can be 'persistent' meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags, assign a flag as a persistent flag on the root. - -```go -RootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") -``` - -### Local Flags - -A flag can also be assigned locally which will only apply to that specific command. - -```go -RootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") -``` - - -## Example - -In the example below, we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - -```go -package main - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -func main() { - - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. - For many years people have printed back to the screen. - `, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. - Echo works a lot like print, except it has a child command. - `, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [# times] [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing - a count and a string.`, - Run: func(cmd *cobra.Command, args []string) { - for i := 0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() -} -``` - -For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). - -## The Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally, help will also -support all other commands as input. Say, for instance, you have a command called -'create' without any additional configuration; Cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by Cobra. Nothing beyond the -command and flag definitions are needed. - - > hugo help - - hugo is the main command, used to build your Hugo site. - - Hugo is a Fast and Flexible Static Site Generator - built with love by spf13 and friends in Go. - - Complete documentation is available at http://gohugo.io/. - - Usage: - hugo [flags] - hugo [command] - - Available Commands: - server Hugo runs its own webserver to render the files - version Print the version number of Hugo - config Print the site configuration - check Check content in the source directory - benchmark Benchmark hugo by building a site a number of times. - convert Convert your content to different formats - new Create new content for your site - list Listing out various types of content - undraft Undraft changes the content's draft status from 'True' to 'False' - genautocomplete Generate shell autocompletion script for Hugo - gendoc Generate Markdown documentation for the Hugo CLI. - genman Generate man page for Hugo - import Import your site from others. - - Flags: - -b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/ - -D, --buildDrafts[=false]: include content marked as draft - -F, --buildFuture[=false]: include content with publishdate in the future - --cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/ - --canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL - --config="": config file (default is path/config.yaml|json|toml) - -d, --destination="": filesystem path to write files to - --disableRSS[=false]: Do not build RSS files - --disableSitemap[=false]: Do not build Sitemap file - --editor="": edit new content with this editor, if provided - --ignoreCache[=false]: Ignores the cache directory for reading but still writes to it - --log[=false]: Enable Logging - --logFile="": Log File path (if set, logging enabled automatically) - --noTimes[=false]: Don't sync modification time of files - --pluralizeListTitles[=true]: Pluralize titles in lists using inflect - --preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu") - -s, --source="": filesystem path to read files relative from - --stepAnalysis[=false]: display memory and timing of different steps of the program - -t, --theme="": theme to use (located in /themes/THEMENAME/) - --uglyURLs[=false]: if true, use /filename.html instead of /filename/ - -v, --verbose[=false]: verbose output - --verboseLog[=false]: verbose logging - -w, --watch[=false]: watch filesystem for changes and recreate as needed - - Use "hugo [command] --help" for more information about a command. - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact, you can provide your own if you want. - -### Defining your own help - -You can provide your own Help command or your own template for the default command to use. - -The default help command is - -```go -func (c *Command) initHelp() { - if c.helpCommand == nil { - c.helpCommand = &Command{ - Use: "help [command]", - Short: "Help about any command", - Long: `Help provides help for any command in the application. - Simply type ` + c.Name() + ` help [path to command] for full details.`, - Run: c.HelpFunc(), - } - } - c.AddCommand(c.helpCommand) -} -``` - -You can provide your own command, function or template through the following methods: - -```go -command.SetHelpCommand(cmd *Command) - -command.SetHelpFunc(f func(*Command, []string)) - -command.SetHelpTemplate(s string) -``` - -The latter two will also apply to any children commands. - -## Usage - -When the user provides an invalid flag or invalid command, Cobra responds by -showing the user the 'usage'. - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of its output. - - Usage: - hugo [flags] - hugo [command] - - Available Commands: - server Hugo runs its own webserver to render the files - version Print the version number of Hugo - config Print the site configuration - check Check content in the source directory - benchmark Benchmark hugo by building a site a number of times. - convert Convert your content to different formats - new Create new content for your site - list Listing out various types of content - undraft Undraft changes the content's draft status from 'True' to 'False' - genautocomplete Generate shell autocompletion script for Hugo - gendoc Generate Markdown documentation for the Hugo CLI. - genman Generate man page for Hugo - import Import your site from others. - - Flags: - -b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/ - -D, --buildDrafts[=false]: include content marked as draft - -F, --buildFuture[=false]: include content with publishdate in the future - --cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/ - --canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL - --config="": config file (default is path/config.yaml|json|toml) - -d, --destination="": filesystem path to write files to - --disableRSS[=false]: Do not build RSS files - --disableSitemap[=false]: Do not build Sitemap file - --editor="": edit new content with this editor, if provided - --ignoreCache[=false]: Ignores the cache directory for reading but still writes to it - --log[=false]: Enable Logging - --logFile="": Log File path (if set, logging enabled automatically) - --noTimes[=false]: Don't sync modification time of files - --pluralizeListTitles[=true]: Pluralize titles in lists using inflect - --preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu") - -s, --source="": filesystem path to read files relative from - --stepAnalysis[=false]: display memory and timing of different steps of the program - -t, --theme="": theme to use (located in /themes/THEMENAME/) - --uglyURLs[=false]: if true, use /filename.html instead of /filename/ - -v, --verbose[=false]: verbose output - --verboseLog[=false]: verbose logging - -w, --watch[=false]: watch filesystem for changes and recreate as needed - -### Defining your own usage -You can provide your own usage function or template for Cobra to use. - -The default usage function is: - -```go -return func(c *Command) error { - err := tmpl(c.Out(), c.UsageTemplate(), c) - return err -} -``` - -Like help, the function and template are overridable through public methods: - -```go -command.SetUsageFunc(f func(*Command) error) - -command.SetUsageTemplate(s string) -``` - -## PreRun or PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherrited by children if they do not declare their own. These function are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistentPostRun` - -An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My subcommand", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - _ = rootCmd.Execute() - fmt.Print("\n") - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - _ = rootCmd.Execute() -} -``` - - -## Alternative Error Handling - -Cobra also has functions where the return signature is an error. This allows for errors to bubble up to the top, -providing a way to handle the errors in one location. The current list of functions that return an error is: - -* PersistentPreRunE -* PreRunE -* RunE -* PostRunE -* PersistentPostRunE - -If you would like to silence the default `error` and `usage` output in favor of your own, you can set `SilenceUsage` -and `SilenceErrors` to `false` on the command. A child command respects these flags if they are set on the parent -command. - -**Example Usage using RunE:** - -```go -package main - -import ( - "errors" - "log" - - "github.com/spf13/cobra" -) - -func main() { - var rootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - RunE: func(cmd *cobra.Command, args []string) error { - // Do Stuff Here - return errors.New("some random error") - }, - } - - if err := rootCmd.Execute(); err != nil { - log.Fatal(err) - } -} -``` - -## Suggestions when "unknown command" happens - -Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: - -``` -$ hugo srever -Error: unknown command "srever" for "hugo" - -Did you mean this? - server - -Run 'hugo --help' for usage. -``` - -Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. - -If you need to disable suggestions or tweak the string distance in your command, use: - -```go -command.DisableSuggestions = true -``` - -or - -```go -command.SuggestionsMinimumDistance = 1 -``` - -You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: - -``` -$ kubectl remove -Error: unknown command "remove" for "kubectl" - -Did you mean this? - delete - -Run 'kubectl help' for usage. -``` - -## Generating Markdown-formatted documentation for your command - -Cobra can generate a Markdown-formatted document based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Markdown Docs](doc/md_docs.md). - -## Generating man pages for your command - -Cobra can generate a man page based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Man Docs](doc/man_docs.md). - -## Generating bash completions for your command - -Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md). - -## Debugging - -Cobra provides a ‘DebugFlags’ method on a command which, when called, will print -out everything Cobra knows about the flags for each command. - -### Example - -```go -command.DebugFlags() -``` - -## Release Notes -* **0.9.0** June 17, 2014 - * flags can appears anywhere in the args (provided they are unambiguous) - * --help prints usage screen for app or command - * Prefix matching for commands - * Cleaner looking help and usage output - * Extensive test suite -* **0.8.0** Nov 5, 2013 - * Reworked interface to remove commander completely - * Command now primary structure - * No initialization needed - * Usage & Help templates & functions definable at any level - * Updated Readme -* **0.7.0** Sept 24, 2013 - * Needs more eyes - * Test suite - * Support for automatic error messages - * Support for help command - * Support for printing to any io.Writer instead of os.Stderr - * Support for persistent flags which cascade down tree - * Ready for integration into Hugo -* **0.1.0** Sept 3, 2013 - * Implement first draft - -## Extensions - -Libraries for extending Cobra: - -* [cmdns](https://github.com/gosuri/cmdns): Enables name spacing a command's immediate children. It provides an alternative way to structure subcommands, similar to `heroku apps:create` and `ovrclk clusters:launch`. - -## ToDo -* Launch proper documentation site - -## Contributing - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Add some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Contributors - -Names in no particular order: - -* [spf13](https://github.com/spf13), -[eparis](https://github.com/eparis), -[bep](https://github.com/bep), and many more! - -## License - -Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) - - -[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge") diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go deleted file mode 100644 index 236dee67f2..0000000000 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ /dev/null @@ -1,630 +0,0 @@ -package cobra - -import ( - "fmt" - "io" - "os" - "sort" - "strings" - - "github.com/spf13/pflag" -) - -const ( - BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extentions" - BashCompCustom = "cobra_annotation_bash_completion_custom" - BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" - BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" -) - -func preamble(out io.Writer, name string) error { - _, err := fmt.Fprintf(out, "# bash completion for %-36s -*- shell-script -*-\n", name) - if err != nil { - return err - } - _, err = fmt.Fprint(out, ` -__debug() -{ - if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then - echo "$*" >> "${BASH_COMP_DEBUG_FILE}" - fi -} - -# Homebrew on Macs have version 1.3 of bash-completion which doesn't include -# _init_completion. This is a very minimal version of that function. -__my_init_completion() -{ - COMPREPLY=() - _get_comp_words_by_ref "$@" cur prev words cword -} - -__index_of_word() -{ - local w word=$1 - shift - index=0 - for w in "$@"; do - [[ $w = "$word" ]] && return - index=$((index+1)) - done - index=-1 -} - -__contains_word() -{ - local w word=$1; shift - for w in "$@"; do - [[ $w = "$word" ]] && return - done - return 1 -} - -__handle_reply() -{ - __debug "${FUNCNAME[0]}" - case $cur in - -*) - if [[ $(type -t compopt) = "builtin" ]]; then - compopt -o nospace - fi - local allflags - if [ ${#must_have_one_flag[@]} -ne 0 ]; then - allflags=("${must_have_one_flag[@]}") - else - allflags=("${flags[*]} ${two_word_flags[*]}") - fi - COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") ) - if [[ $(type -t compopt) = "builtin" ]]; then - [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace - fi - - # complete after --flag=abc - if [[ $cur == *=* ]]; then - if [[ $(type -t compopt) = "builtin" ]]; then - compopt +o nospace - fi - - local index flag - flag="${cur%%=*}" - __index_of_word "${flag}" "${flags_with_completion[@]}" - if [[ ${index} -ge 0 ]]; then - COMPREPLY=() - PREFIX="" - cur="${cur#*=}" - ${flags_completion[${index}]} - if [ -n "${ZSH_VERSION}" ]; then - # zfs completion needs --flag= prefix - eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" - fi - fi - fi - return 0; - ;; - esac - - # check if we are handling a flag with special work handling - local index - __index_of_word "${prev}" "${flags_with_completion[@]}" - if [[ ${index} -ge 0 ]]; then - ${flags_completion[${index}]} - return - fi - - # we are parsing a flag and don't have a special handler, no completion - if [[ ${cur} != "${words[cword]}" ]]; then - return - fi - - local completions - completions=("${commands[@]}") - if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then - completions=("${must_have_one_noun[@]}") - fi - if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then - completions+=("${must_have_one_flag[@]}") - fi - COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") ) - - if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then - COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") ) - fi - - if [[ ${#COMPREPLY[@]} -eq 0 ]]; then - declare -F __custom_func >/dev/null && __custom_func - fi - - __ltrim_colon_completions "$cur" -} - -# The arguments should be in the form "ext1|ext2|extn" -__handle_filename_extension_flag() -{ - local ext="$1" - _filedir "@(${ext})" -} - -__handle_subdirs_in_dir_flag() -{ - local dir="$1" - pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 -} - -__handle_flag() -{ - __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - # if a command required a flag, and we found it, unset must_have_one_flag() - local flagname=${words[c]} - local flagvalue - # if the word contained an = - if [[ ${words[c]} == *"="* ]]; then - flagvalue=${flagname#*=} # take in as flagvalue after the = - flagname=${flagname%%=*} # strip everything after the = - flagname="${flagname}=" # but put the = back - fi - __debug "${FUNCNAME[0]}: looking for ${flagname}" - if __contains_word "${flagname}" "${must_have_one_flag[@]}"; then - must_have_one_flag=() - fi - - # if you set a flag which only applies to this command, don't show subcommands - if __contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then - commands=() - fi - - # keep flag value with flagname as flaghash - if [ -n "${flagvalue}" ] ; then - flaghash[${flagname}]=${flagvalue} - elif [ -n "${words[ $((c+1)) ]}" ] ; then - flaghash[${flagname}]=${words[ $((c+1)) ]} - else - flaghash[${flagname}]="true" # pad "true" for bool flag - fi - - # skip the argument to a two word flag - if __contains_word "${words[c]}" "${two_word_flags[@]}"; then - c=$((c+1)) - # if we are looking for a flags value, don't show commands - if [[ $c -eq $cword ]]; then - commands=() - fi - fi - - c=$((c+1)) - -} - -__handle_noun() -{ - __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - if __contains_word "${words[c]}" "${must_have_one_noun[@]}"; then - must_have_one_noun=() - elif __contains_word "${words[c]}" "${noun_aliases[@]}"; then - must_have_one_noun=() - fi - - nouns+=("${words[c]}") - c=$((c+1)) -} - -__handle_command() -{ - __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - local next_command - if [[ -n ${last_command} ]]; then - next_command="_${last_command}_${words[c]//:/__}" - else - if [[ $c -eq 0 ]]; then - next_command="_$(basename "${words[c]//:/__}")" - else - next_command="_${words[c]//:/__}" - fi - fi - c=$((c+1)) - __debug "${FUNCNAME[0]}: looking for ${next_command}" - declare -F $next_command >/dev/null && $next_command -} - -__handle_word() -{ - if [[ $c -ge $cword ]]; then - __handle_reply - return - fi - __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - if [[ "${words[c]}" == -* ]]; then - __handle_flag - elif __contains_word "${words[c]}" "${commands[@]}"; then - __handle_command - elif [[ $c -eq 0 ]] && __contains_word "$(basename "${words[c]}")" "${commands[@]}"; then - __handle_command - else - __handle_noun - fi - __handle_word -} - -`) - return err -} - -func postscript(w io.Writer, name string) error { - name = strings.Replace(name, ":", "__", -1) - _, err := fmt.Fprintf(w, "__start_%s()\n", name) - if err != nil { - return err - } - _, err = fmt.Fprintf(w, `{ - local cur prev words cword - declare -A flaghash 2>/dev/null || : - if declare -F _init_completion >/dev/null 2>&1; then - _init_completion -s || return - else - __my_init_completion -n "=" || return - fi - - local c=0 - local flags=() - local two_word_flags=() - local local_nonpersistent_flags=() - local flags_with_completion=() - local flags_completion=() - local commands=("%s") - local must_have_one_flag=() - local must_have_one_noun=() - local last_command - local nouns=() - - __handle_word -} - -`, name) - if err != nil { - return err - } - _, err = fmt.Fprintf(w, `if [[ $(type -t compopt) = "builtin" ]]; then - complete -o default -F __start_%s %s -else - complete -o default -o nospace -F __start_%s %s -fi - -`, name, name, name, name) - if err != nil { - return err - } - _, err = fmt.Fprintf(w, "# ex: ts=4 sw=4 et filetype=sh\n") - return err -} - -func writeCommands(cmd *Command, w io.Writer) error { - if _, err := fmt.Fprintf(w, " commands=()\n"); err != nil { - return err - } - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c == cmd.helpCommand { - continue - } - if _, err := fmt.Fprintf(w, " commands+=(%q)\n", c.Name()); err != nil { - return err - } - } - _, err := fmt.Fprintf(w, "\n") - return err -} - -func writeFlagHandler(name string, annotations map[string][]string, w io.Writer) error { - for key, value := range annotations { - switch key { - case BashCompFilenameExt: - _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) - if err != nil { - return err - } - - if len(value) > 0 { - ext := "__handle_filename_extension_flag " + strings.Join(value, "|") - _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) - } else { - ext := "_filedir" - _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) - } - if err != nil { - return err - } - case BashCompCustom: - _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) - if err != nil { - return err - } - if len(value) > 0 { - handlers := strings.Join(value, "; ") - _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", handlers) - } else { - _, err = fmt.Fprintf(w, " flags_completion+=(:)\n") - } - if err != nil { - return err - } - case BashCompSubdirsInDir: - _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) - - if len(value) == 1 { - ext := "__handle_subdirs_in_dir_flag " + value[0] - _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) - } else { - ext := "_filedir -d" - _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) - } - if err != nil { - return err - } - } - } - return nil -} - -func writeShortFlag(flag *pflag.Flag, w io.Writer) error { - b := (len(flag.NoOptDefVal) > 0) - name := flag.Shorthand - format := " " - if !b { - format += "two_word_" - } - format += "flags+=(\"-%s\")\n" - if _, err := fmt.Fprintf(w, format, name); err != nil { - return err - } - return writeFlagHandler("-"+name, flag.Annotations, w) -} - -func writeFlag(flag *pflag.Flag, w io.Writer) error { - b := (len(flag.NoOptDefVal) > 0) - name := flag.Name - format := " flags+=(\"--%s" - if !b { - format += "=" - } - format += "\")\n" - if _, err := fmt.Fprintf(w, format, name); err != nil { - return err - } - return writeFlagHandler("--"+name, flag.Annotations, w) -} - -func writeLocalNonPersistentFlag(flag *pflag.Flag, w io.Writer) error { - b := (len(flag.NoOptDefVal) > 0) - name := flag.Name - format := " local_nonpersistent_flags+=(\"--%s" - if !b { - format += "=" - } - format += "\")\n" - if _, err := fmt.Fprintf(w, format, name); err != nil { - return err - } - return nil -} - -func writeFlags(cmd *Command, w io.Writer) error { - _, err := fmt.Fprintf(w, ` flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - -`) - if err != nil { - return err - } - localNonPersistentFlags := cmd.LocalNonPersistentFlags() - var visitErr error - cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - if err := writeFlag(flag, w); err != nil { - visitErr = err - return - } - if len(flag.Shorthand) > 0 { - if err := writeShortFlag(flag, w); err != nil { - visitErr = err - return - } - } - if localNonPersistentFlags.Lookup(flag.Name) != nil { - if err := writeLocalNonPersistentFlag(flag, w); err != nil { - visitErr = err - return - } - } - }) - if visitErr != nil { - return visitErr - } - cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { - if err := writeFlag(flag, w); err != nil { - visitErr = err - return - } - if len(flag.Shorthand) > 0 { - if err := writeShortFlag(flag, w); err != nil { - visitErr = err - return - } - } - }) - if visitErr != nil { - return visitErr - } - - _, err = fmt.Fprintf(w, "\n") - return err -} - -func writeRequiredFlag(cmd *Command, w io.Writer) error { - if _, err := fmt.Fprintf(w, " must_have_one_flag=()\n"); err != nil { - return err - } - flags := cmd.NonInheritedFlags() - var visitErr error - flags.VisitAll(func(flag *pflag.Flag) { - for key := range flag.Annotations { - switch key { - case BashCompOneRequiredFlag: - format := " must_have_one_flag+=(\"--%s" - b := (flag.Value.Type() == "bool") - if !b { - format += "=" - } - format += "\")\n" - if _, err := fmt.Fprintf(w, format, flag.Name); err != nil { - visitErr = err - return - } - - if len(flag.Shorthand) > 0 { - if _, err := fmt.Fprintf(w, " must_have_one_flag+=(\"-%s\")\n", flag.Shorthand); err != nil { - visitErr = err - return - } - } - } - } - }) - return visitErr -} - -func writeRequiredNouns(cmd *Command, w io.Writer) error { - if _, err := fmt.Fprintf(w, " must_have_one_noun=()\n"); err != nil { - return err - } - sort.Sort(sort.StringSlice(cmd.ValidArgs)) - for _, value := range cmd.ValidArgs { - if _, err := fmt.Fprintf(w, " must_have_one_noun+=(%q)\n", value); err != nil { - return err - } - } - return nil -} - -func writeArgAliases(cmd *Command, w io.Writer) error { - if _, err := fmt.Fprintf(w, " noun_aliases=()\n"); err != nil { - return err - } - sort.Sort(sort.StringSlice(cmd.ArgAliases)) - for _, value := range cmd.ArgAliases { - if _, err := fmt.Fprintf(w, " noun_aliases+=(%q)\n", value); err != nil { - return err - } - } - return nil -} - -func gen(cmd *Command, w io.Writer) error { - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c == cmd.helpCommand { - continue - } - if err := gen(c, w); err != nil { - return err - } - } - commandName := cmd.CommandPath() - commandName = strings.Replace(commandName, " ", "_", -1) - commandName = strings.Replace(commandName, ":", "__", -1) - if _, err := fmt.Fprintf(w, "_%s()\n{\n", commandName); err != nil { - return err - } - if _, err := fmt.Fprintf(w, " last_command=%q\n", commandName); err != nil { - return err - } - if err := writeCommands(cmd, w); err != nil { - return err - } - if err := writeFlags(cmd, w); err != nil { - return err - } - if err := writeRequiredFlag(cmd, w); err != nil { - return err - } - if err := writeRequiredNouns(cmd, w); err != nil { - return err - } - if err := writeArgAliases(cmd, w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "}\n\n"); err != nil { - return err - } - return nil -} - -func (cmd *Command) GenBashCompletion(w io.Writer) error { - if err := preamble(w, cmd.Name()); err != nil { - return err - } - if len(cmd.BashCompletionFunction) > 0 { - if _, err := fmt.Fprintf(w, "%s\n", cmd.BashCompletionFunction); err != nil { - return err - } - } - if err := gen(cmd, w); err != nil { - return err - } - return postscript(w, cmd.Name()) -} - -func (cmd *Command) GenBashCompletionFile(filename string) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return cmd.GenBashCompletion(outFile) -} - -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag, if it exists. -func (cmd *Command) MarkFlagRequired(name string) error { - return MarkFlagRequired(cmd.Flags(), name) -} - -// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag, if it exists. -func (cmd *Command) MarkPersistentFlagRequired(name string) error { - return MarkFlagRequired(cmd.PersistentFlags(), name) -} - -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag in the flag set, if it exists. -func MarkFlagRequired(flags *pflag.FlagSet, name string) error { - return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) -} - -// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func (cmd *Command) MarkFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(cmd.Flags(), name, extensions...) -} - -// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. -// Generated bash autocompletion will call the bash function f for the flag. -func (cmd *Command) MarkFlagCustom(name string, f string) error { - return MarkFlagCustom(cmd.Flags(), name, f) -} - -// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func (cmd *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(cmd.PersistentFlags(), name, extensions...) -} - -// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { - return flags.SetAnnotation(name, BashCompFilenameExt, extensions) -} - -// MarkFlagCustom adds the BashCompCustom annotation to the named flag in the flag set, if it exists. -// Generated bash autocompletion will call the bash function f for the flag. -func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { - return flags.SetAnnotation(name, BashCompCustom, []string{f}) -} diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md deleted file mode 100644 index 6e3b71f13d..0000000000 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ /dev/null @@ -1,206 +0,0 @@ -# Generating Bash Completions For Your Own cobra.Command - -Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows: - -```go -package main - -import ( - "io/ioutil" - "os" - - "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd" -) - -func main() { - kubectl := cmd.NewFactory(nil).NewKubectlCommand(os.Stdin, ioutil.Discard, ioutil.Discard) - kubectl.GenBashCompletionFile("out.sh") -} -``` - -That will get you completions of subcommands and flags. If you make additional annotations to your code, you can get even more intelligent and flexible behavior. - -## Creating your own custom functions - -Some more actual code that works in kubernetes: - -```bash -const ( - bash_completion_func = `__kubectl_parse_get() -{ - local kubectl_output out - if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then - out=($(echo "${kubectl_output}" | awk '{print $1}')) - COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) - fi -} - -__kubectl_get_resource() -{ - if [[ ${#nouns[@]} -eq 0 ]]; then - return 1 - fi - __kubectl_parse_get ${nouns[${#nouns[@]} -1]} - if [[ $? -eq 0 ]]; then - return 0 - fi -} - -__custom_func() { - case ${last_command} in - kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) - __kubectl_get_resource - return - ;; - *) - ;; - esac -} -`) -``` - -And then I set that in my command definition: - -```go -cmds := &cobra.Command{ - Use: "kubectl", - Short: "kubectl controls the Kubernetes cluster manager", - Long: `kubectl controls the Kubernetes cluster manager. - -Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, - Run: runHelp, - BashCompletionFunction: bash_completion_func, -} -``` - -The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__custom_func()` to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! - -## Have the completions code complete your 'nouns' - -In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like: - -```go -validArgs []string = { "pod", "node", "service", "replicationcontroller" } - -cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", - Short: "Display one or many resources", - Long: get_long, - Example: get_example, - Run: func(cmd *cobra.Command, args []string) { - err := RunGet(f, out, cmd, args) - util.CheckErr(err) - }, - ValidArgs: validArgs, -} -``` - -Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like - -```bash -# kubectl get [tab][tab] -node pod replicationcontroller service -``` - -## Plural form and shortcuts for nouns - -If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`: - -```go` -argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } - -cmd := &cobra.Command{ - ... - ValidArgs: validArgs, - ArgAliases: argAliases -} -``` - -The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by -the completion algorithm if entered manually, e.g. in: - -```bash -# kubectl get rc [tab][tab] -backend frontend database -``` - -Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns -in this example again instead of the replication controllers. - -## Mark flags as required - -Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy. - -```go -cmd.MarkFlagRequired("pod") -cmd.MarkFlagRequired("container") -``` - -and you'll get something like - -```bash -# kubectl exec [tab][tab][tab] --c --container= -p --pod= -``` - -# Specify valid filename extensions for flags that take a filename - -In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions. - -```go - annotations := []string{"json", "yaml", "yml"} - annotation := make(map[string][]string) - annotation[cobra.BashCompFilenameExt] = annotations - - flag := &pflag.Flag{ - Name: "filename", - Shorthand: "f", - Usage: usage, - Value: value, - DefValue: value.String(), - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -Now when you run a command with this filename flag you'll get something like - -```bash -# kubectl create -f -test/ example/ rpmbuild/ -hello.yml test.json -``` - -So while there are many other files in the CWD it only shows me subdirs and those with valid extensions. - -# Specifiy custom flag completion - -Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specifiy -a custom flag completion function with cobra.BashCompCustom: - -```go - annotation := make(map[string][]string) - annotation[cobra.BashCompFilenameExt] = []string{"__kubectl_get_namespaces"} - - flag := &pflag.Flag{ - Name: "namespace", - Usage: usage, - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction` -value, e.g.: - -```bash -__kubectl_get_namespaces() -{ - local template - template="{{ range .items }}{{ .metadata.name }} {{ end }}" - local kubectl_out - if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then - COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) ) - fi -} -``` diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go deleted file mode 100644 index 93a2c0f3a7..0000000000 --- a/vendor/github.com/spf13/cobra/cobra.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright © 2013 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Commands similar to git, go tools and other modern CLI tools -// inspired by go, go-Commander, gh and subcommand - -package cobra - -import ( - "fmt" - "io" - "reflect" - "strconv" - "strings" - "text/template" - "unicode" -) - -var templateFuncs = template.FuncMap{ - "trim": strings.TrimSpace, - "trimRightSpace": trimRightSpace, - "appendIfNotPresent": appendIfNotPresent, - "rpad": rpad, - "gt": Gt, - "eq": Eq, -} - -var initializers []func() - -// automatic prefix matching can be a dangerous thing to automatically enable in CLI tools. -// Set this to true to enable it -var EnablePrefixMatching = false - -//EnableCommandSorting controls sorting of the slice of commands, which is turned on by default. -//To disable sorting, set it to false. -var EnableCommandSorting = true - -//AddTemplateFunc adds a template function that's available to Usage and Help -//template generation. -func AddTemplateFunc(name string, tmplFunc interface{}) { - templateFuncs[name] = tmplFunc -} - -//AddTemplateFuncs adds multiple template functions availalble to Usage and -//Help template generation. -func AddTemplateFuncs(tmplFuncs template.FuncMap) { - for k, v := range tmplFuncs { - templateFuncs[k] = v - } -} - -//OnInitialize takes a series of func() arguments and appends them to a slice of func(). -func OnInitialize(y ...func()) { - for _, x := range y { - initializers = append(initializers, x) - } -} - -//Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, -//Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as -//ints and then compared. -func Gt(a interface{}, b interface{}) bool { - var left, right int64 - av := reflect.ValueOf(a) - - switch av.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - left = int64(av.Len()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - left = av.Int() - case reflect.String: - left, _ = strconv.ParseInt(av.String(), 10, 64) - } - - bv := reflect.ValueOf(b) - - switch bv.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - right = int64(bv.Len()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - right = bv.Int() - case reflect.String: - right, _ = strconv.ParseInt(bv.String(), 10, 64) - } - - return left > right -} - -//Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. -func Eq(a interface{}, b interface{}) bool { - av := reflect.ValueOf(a) - bv := reflect.ValueOf(b) - - switch av.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - panic("Eq called on unsupported type") - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return av.Int() == bv.Int() - case reflect.String: - return av.String() == bv.String() - } - return false -} - -func trimRightSpace(s string) string { - return strings.TrimRightFunc(s, unicode.IsSpace) -} - -// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s -func appendIfNotPresent(s, stringToAppend string) string { - if strings.Contains(s, stringToAppend) { - return s - } - return s + " " + stringToAppend -} - -//rpad adds padding to the right of a string -func rpad(s string, padding int) string { - template := fmt.Sprintf("%%-%ds", padding) - return fmt.Sprintf(template, s) -} - -// tmpl executes the given template text on data, writing the result to w. -func tmpl(w io.Writer, text string, data interface{}) error { - t := template.New("top") - t.Funcs(templateFuncs) - template.Must(t.Parse(text)) - return t.Execute(w, data) -} - -// ld compares two strings and returns the levenshtein distance between them -func ld(s, t string, ignoreCase bool) int { - if ignoreCase { - s = strings.ToLower(s) - t = strings.ToLower(t) - } - d := make([][]int, len(s)+1) - for i := range d { - d[i] = make([]int, len(t)+1) - } - for i := range d { - d[i][0] = i - } - for j := range d[0] { - d[0][j] = j - } - for j := 1; j <= len(t); j++ { - for i := 1; i <= len(s); i++ { - if s[i-1] == t[j-1] { - d[i][j] = d[i-1][j-1] - } else { - min := d[i-1][j] - if d[i][j-1] < min { - min = d[i][j-1] - } - if d[i-1][j-1] < min { - min = d[i-1][j-1] - } - d[i][j] = min + 1 - } - } - - } - return d[len(s)][len(t)] -} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go deleted file mode 100644 index 083e4ea7f4..0000000000 --- a/vendor/github.com/spf13/cobra/command.go +++ /dev/null @@ -1,1256 +0,0 @@ -// Copyright © 2013 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. -//In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - - flag "github.com/spf13/pflag" -) - -// Command is just that, a command for your application. -// eg. 'go run' ... 'run' is the command. Cobra requires -// you to define the usage and description as part of your command -// definition to ensure usability. -type Command struct { - // Name is the command name, usually the executable's name. - name string - // The one-line usage message. - Use string - // An array of aliases that can be used instead of the first word in Use. - Aliases []string - // An array of command names for which this command will be suggested - similar to aliases but only suggests. - SuggestFor []string - // The short description shown in the 'help' output. - Short string - // The long message shown in the 'help ' output. - Long string - // Examples of how to use the command - Example string - // List of all valid non-flag arguments that are accepted in bash completions - ValidArgs []string - // List of aliases for ValidArgs. These are not suggested to the user in the bash - // completion, but accepted if entered manually. - ArgAliases []string - // Custom functions used by the bash autocompletion generator - BashCompletionFunction string - // Is this command deprecated and should print this string when used? - Deprecated string - // Is this command hidden and should NOT show up in the list of available commands? - Hidden bool - // Full set of flags - flags *flag.FlagSet - // Set of flags childrens of this command will inherit - pflags *flag.FlagSet - // Flags that are declared specifically by this command (not inherited). - lflags *flag.FlagSet - // SilenceErrors is an option to quiet errors down stream - SilenceErrors bool - // Silence Usage is an option to silence usage when an error occurs. - SilenceUsage bool - // The *Run functions are executed in the following order: - // * PersistentPreRun() - // * PreRun() - // * Run() - // * PostRun() - // * PersistentPostRun() - // All functions get the same args, the arguments after the command name - // PersistentPreRun: children of this command will inherit and execute - PersistentPreRun func(cmd *Command, args []string) - // PersistentPreRunE: PersistentPreRun but returns an error - PersistentPreRunE func(cmd *Command, args []string) error - // PreRun: children of this command will not inherit. - PreRun func(cmd *Command, args []string) - // PreRunE: PreRun but returns an error - PreRunE func(cmd *Command, args []string) error - // Run: Typically the actual work function. Most commands will only implement this - Run func(cmd *Command, args []string) - // RunE: Run but returns an error - RunE func(cmd *Command, args []string) error - // PostRun: run after the Run command. - PostRun func(cmd *Command, args []string) - // PostRunE: PostRun but returns an error - PostRunE func(cmd *Command, args []string) error - // PersistentPostRun: children of this command will inherit and execute after PostRun - PersistentPostRun func(cmd *Command, args []string) - // PersistentPostRunE: PersistentPostRun but returns an error - PersistentPostRunE func(cmd *Command, args []string) error - // DisableAutoGenTag remove - DisableAutoGenTag bool - // Commands is the list of commands supported by this program. - commands []*Command - // Parent Command for this command - parent *Command - // max lengths of commands' string lengths for use in padding - commandsMaxUseLen int - commandsMaxCommandPathLen int - commandsMaxNameLen int - // is commands slice are sorted or not - commandsAreSorted bool - - flagErrorBuf *bytes.Buffer - - args []string // actual args parsed from flags - output *io.Writer // out writer if set in SetOutput(w) - usageFunc func(*Command) error // Usage can be defined by application - usageTemplate string // Can be defined by Application - helpTemplate string // Can be defined by Application - helpFunc func(*Command, []string) // Help can be defined by application - helpCommand *Command // The help command - // The global normalization function that we can use on every pFlag set and children commands - globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName - - // Disable the suggestions based on Levenshtein distance that go along with 'unknown command' messages - DisableSuggestions bool - // If displaying suggestions, allows to set the minimum levenshtein distance to display, must be > 0 - SuggestionsMinimumDistance int - - // Disable the flag parsing. If this is true all flags will be passed to the command as arguments. - DisableFlagParsing bool -} - -// os.Args[1:] by default, if desired, can be overridden -// particularly useful when testing. -func (c *Command) SetArgs(a []string) { - c.args = a -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -func (c *Command) SetOutput(output io.Writer) { - c.output = &output -} - -// Usage can be defined by application -func (c *Command) SetUsageFunc(f func(*Command) error) { - c.usageFunc = f -} - -// Can be defined by Application -func (c *Command) SetUsageTemplate(s string) { - c.usageTemplate = s -} - -// Can be defined by Application -func (c *Command) SetHelpFunc(f func(*Command, []string)) { - c.helpFunc = f -} - -func (c *Command) SetHelpCommand(cmd *Command) { - c.helpCommand = cmd -} - -// Can be defined by Application -func (c *Command) SetHelpTemplate(s string) { - c.helpTemplate = s -} - -// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. -// The user should not have a cyclic dependency on commands. -func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { - c.Flags().SetNormalizeFunc(n) - c.PersistentFlags().SetNormalizeFunc(n) - c.globNormFunc = n - - for _, command := range c.commands { - command.SetGlobalNormalizationFunc(n) - } -} - -func (c *Command) OutOrStdout() io.Writer { - return c.getOut(os.Stdout) -} - -func (c *Command) OutOrStderr() io.Writer { - return c.getOut(os.Stderr) -} - -func (c *Command) getOut(def io.Writer) io.Writer { - if c.output != nil { - return *c.output - } - if c.HasParent() { - return c.parent.getOut(def) - } - return def -} - -// UsageFunc returns either the function set by SetUsageFunc for this command -// or a parent, or it returns a default usage function -func (c *Command) UsageFunc() (f func(*Command) error) { - if c.usageFunc != nil { - return c.usageFunc - } - - if c.HasParent() { - return c.parent.UsageFunc() - } - return func(c *Command) error { - c.mergePersistentFlags() - err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c) - if err != nil { - c.Println(err) - } - return err - } -} - -// Output the usage for the command -// Used when a user provides invalid input -// Can be defined by user by overriding UsageFunc -func (c *Command) Usage() error { - return c.UsageFunc()(c) -} - -// HelpFunc returns either the function set by SetHelpFunc for this command -// or a parent, or it returns a function with default help behavior -func (c *Command) HelpFunc() func(*Command, []string) { - cmd := c - for cmd != nil { - if cmd.helpFunc != nil { - return cmd.helpFunc - } - cmd = cmd.parent - } - return func(*Command, []string) { - c.mergePersistentFlags() - err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) - if err != nil { - c.Println(err) - } - } -} - -// Output the help for the command -// Used when a user calls help [command] -// Can be defined by user by overriding HelpFunc -func (c *Command) Help() error { - c.HelpFunc()(c, []string{}) - return nil -} - -func (c *Command) UsageString() string { - tmpOutput := c.output - bb := new(bytes.Buffer) - c.SetOutput(bb) - c.Usage() - c.output = tmpOutput - return bb.String() -} - -var minUsagePadding = 25 - -func (c *Command) UsagePadding() int { - if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen { - return minUsagePadding - } - return c.parent.commandsMaxUseLen -} - -var minCommandPathPadding = 11 - -// -func (c *Command) CommandPathPadding() int { - if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen { - return minCommandPathPadding - } - return c.parent.commandsMaxCommandPathLen -} - -var minNamePadding = 11 - -func (c *Command) NamePadding() int { - if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen { - return minNamePadding - } - return c.parent.commandsMaxNameLen -} - -func (c *Command) UsageTemplate() string { - if c.usageTemplate != "" { - return c.usageTemplate - } - - if c.HasParent() { - return c.parent.UsageTemplate() - } - return `Usage:{{if .Runnable}} - {{if .HasAvailableFlags}}{{appendIfNotPresent .UseLine "[flags]"}}{{else}}{{.UseLine}}{{end}}{{end}}{{if .HasAvailableSubCommands}} - {{ .CommandPath}} [command]{{end}}{{if gt .Aliases 0}} - -Aliases: - {{.NameAndAliases}} -{{end}}{{if .HasExample}} - -Examples: -{{ .Example }}{{end}}{{ if .HasAvailableSubCommands}} - -Available Commands:{{range .Commands}}{{if .IsAvailableCommand}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableLocalFlags}} - -Flags: -{{.LocalFlags.FlagUsages | trimRightSpace}}{{end}}{{ if .HasAvailableInheritedFlags}} - -Global Flags: -{{.InheritedFlags.FlagUsages | trimRightSpace}}{{end}}{{if .HasHelpSubCommands}} - -Additional help topics:{{range .Commands}}{{if .IsHelpCommand}} - {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableSubCommands }} - -Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} -` -} - -func (c *Command) HelpTemplate() string { - if c.helpTemplate != "" { - return c.helpTemplate - } - - if c.HasParent() { - return c.parent.HelpTemplate() - } - return `{{with or .Long .Short }}{{. | trim}} - -{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` -} - -// Really only used when casting a command to a commander -func (c *Command) resetChildrensParents() { - for _, x := range c.commands { - x.parent = c - } -} - -// Test if the named flag is a boolean flag. -func isBooleanFlag(name string, f *flag.FlagSet) bool { - flag := f.Lookup(name) - if flag == nil { - return false - } - return flag.Value.Type() == "bool" -} - -// Test if the named flag is a boolean flag. -func isBooleanShortFlag(name string, f *flag.FlagSet) bool { - result := false - f.VisitAll(func(f *flag.Flag) { - if f.Shorthand == name && f.Value.Type() == "bool" { - result = true - } - }) - return result -} - -func stripFlags(args []string, c *Command) []string { - if len(args) < 1 { - return args - } - c.mergePersistentFlags() - - commands := []string{} - - inQuote := false - inFlag := false - for _, y := range args { - if !inQuote { - switch { - case strings.HasPrefix(y, "\""): - inQuote = true - case strings.Contains(y, "=\""): - inQuote = true - case strings.HasPrefix(y, "--") && !strings.Contains(y, "="): - // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' - inFlag = !isBooleanFlag(y[2:], c.Flags()) - case strings.HasPrefix(y, "-") && !strings.Contains(y, "=") && len(y) == 2 && !isBooleanShortFlag(y[1:], c.Flags()): - inFlag = true - case inFlag: - inFlag = false - case y == "": - // strip empty commands, as the go tests expect this to be ok.... - case !strings.HasPrefix(y, "-"): - commands = append(commands, y) - inFlag = false - } - } - - if strings.HasSuffix(y, "\"") && !strings.HasSuffix(y, "\\\"") { - inQuote = false - } - } - - return commands -} - -// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like -// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). -func argsMinusFirstX(args []string, x string) []string { - for i, y := range args { - if x == y { - ret := []string{} - ret = append(ret, args[:i]...) - ret = append(ret, args[i+1:]...) - return ret - } - } - return args -} - -// find the target command given the args and command tree -// Meant to be run on the highest node. Only searches down. -func (c *Command) Find(args []string) (*Command, []string, error) { - if c == nil { - return nil, nil, fmt.Errorf("Called find() on a nil Command") - } - - var innerfind func(*Command, []string) (*Command, []string) - - innerfind = func(c *Command, innerArgs []string) (*Command, []string) { - argsWOflags := stripFlags(innerArgs, c) - if len(argsWOflags) == 0 { - return c, innerArgs - } - nextSubCmd := argsWOflags[0] - matches := make([]*Command, 0) - for _, cmd := range c.commands { - if cmd.Name() == nextSubCmd || cmd.HasAlias(nextSubCmd) { // exact name or alias match - return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd)) - } - if EnablePrefixMatching { - if strings.HasPrefix(cmd.Name(), nextSubCmd) { // prefix match - matches = append(matches, cmd) - } - for _, x := range cmd.Aliases { - if strings.HasPrefix(x, nextSubCmd) { - matches = append(matches, cmd) - } - } - } - } - - // only accept a single prefix match - multiple matches would be ambiguous - if len(matches) == 1 { - return innerfind(matches[0], argsMinusFirstX(innerArgs, argsWOflags[0])) - } - - return c, innerArgs - } - - commandFound, a := innerfind(c, args) - argsWOflags := stripFlags(a, commandFound) - - // no subcommand, always take args - if !commandFound.HasSubCommands() { - return commandFound, a, nil - } - - // root command with subcommands, do subcommand checking - if commandFound == c && len(argsWOflags) > 0 { - suggestionsString := "" - if !c.DisableSuggestions { - if c.SuggestionsMinimumDistance <= 0 { - c.SuggestionsMinimumDistance = 2 - } - if suggestions := c.SuggestionsFor(argsWOflags[0]); len(suggestions) > 0 { - suggestionsString += "\n\nDid you mean this?\n" - for _, s := range suggestions { - suggestionsString += fmt.Sprintf("\t%v\n", s) - } - } - } - return commandFound, a, fmt.Errorf("unknown command %q for %q%s", argsWOflags[0], commandFound.CommandPath(), suggestionsString) - } - - return commandFound, a, nil -} - -func (c *Command) SuggestionsFor(typedName string) []string { - suggestions := []string{} - for _, cmd := range c.commands { - if cmd.IsAvailableCommand() { - levenshteinDistance := ld(typedName, cmd.Name(), true) - suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance - suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName)) - if suggestByLevenshtein || suggestByPrefix { - suggestions = append(suggestions, cmd.Name()) - } - for _, explicitSuggestion := range cmd.SuggestFor { - if strings.EqualFold(typedName, explicitSuggestion) { - suggestions = append(suggestions, cmd.Name()) - } - } - } - } - return suggestions -} - -func (c *Command) VisitParents(fn func(*Command)) { - var traverse func(*Command) *Command - - traverse = func(x *Command) *Command { - if x != c { - fn(x) - } - if x.HasParent() { - return traverse(x.parent) - } - return x - } - traverse(c) -} - -func (c *Command) Root() *Command { - var findRoot func(*Command) *Command - - findRoot = func(x *Command) *Command { - if x.HasParent() { - return findRoot(x.parent) - } - return x - } - - return findRoot(c) -} - -// ArgsLenAtDash will return the length of f.Args at the moment when a -- was -// found during arg parsing. This allows your program to know which args were -// before the -- and which came after. (Description from -// https://godoc.org/github.com/spf13/pflag#FlagSet.ArgsLenAtDash). -func (c *Command) ArgsLenAtDash() int { - return c.Flags().ArgsLenAtDash() -} - -func (c *Command) execute(a []string) (err error) { - if c == nil { - return fmt.Errorf("Called Execute() on a nil Command") - } - - if len(c.Deprecated) > 0 { - c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) - } - - // initialize help flag as the last point possible to allow for user - // overriding - c.initHelpFlag() - - err = c.ParseFlags(a) - if err != nil { - return err - } - // If help is called, regardless of other flags, return we want help - // Also say we need help if the command isn't runnable. - helpVal, err := c.Flags().GetBool("help") - if err != nil { - // should be impossible to get here as we always declare a help - // flag in initHelpFlag() - c.Println("\"help\" flag declared as non-bool. Please correct your code") - return err - } - - if helpVal || !c.Runnable() { - return flag.ErrHelp - } - - c.preRun() - - argWoFlags := c.Flags().Args() - if c.DisableFlagParsing { - argWoFlags = a - } - - for p := c; p != nil; p = p.Parent() { - if p.PersistentPreRunE != nil { - if err := p.PersistentPreRunE(c, argWoFlags); err != nil { - return err - } - break - } else if p.PersistentPreRun != nil { - p.PersistentPreRun(c, argWoFlags) - break - } - } - if c.PreRunE != nil { - if err := c.PreRunE(c, argWoFlags); err != nil { - return err - } - } else if c.PreRun != nil { - c.PreRun(c, argWoFlags) - } - - if c.RunE != nil { - if err := c.RunE(c, argWoFlags); err != nil { - return err - } - } else { - c.Run(c, argWoFlags) - } - if c.PostRunE != nil { - if err := c.PostRunE(c, argWoFlags); err != nil { - return err - } - } else if c.PostRun != nil { - c.PostRun(c, argWoFlags) - } - for p := c; p != nil; p = p.Parent() { - if p.PersistentPostRunE != nil { - if err := p.PersistentPostRunE(c, argWoFlags); err != nil { - return err - } - break - } else if p.PersistentPostRun != nil { - p.PersistentPostRun(c, argWoFlags) - break - } - } - - return nil -} - -func (c *Command) preRun() { - for _, x := range initializers { - x() - } -} - -func (c *Command) errorMsgFromParse() string { - s := c.flagErrorBuf.String() - - x := strings.Split(s, "\n") - - if len(x) > 0 { - return x[0] - } - return "" -} - -// Call execute to use the args (os.Args[1:] by default) -// and run through the command tree finding appropriate matches -// for commands and then corresponding flags. -func (c *Command) Execute() error { - _, err := c.ExecuteC() - return err -} - -func (c *Command) ExecuteC() (cmd *Command, err error) { - - // Regardless of what command execute is called on, run on Root only - if c.HasParent() { - return c.Root().ExecuteC() - } - - // windows hook - if preExecHookFn != nil { - preExecHookFn(c) - } - - // initialize help as the last point possible to allow for user - // overriding - c.initHelpCmd() - - var args []string - - // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 - if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { - args = os.Args[1:] - } else { - args = c.args - } - - cmd, flags, err := c.Find(args) - if err != nil { - // If found parse to a subcommand and then failed, talk about the subcommand - if cmd != nil { - c = cmd - } - if !c.SilenceErrors { - c.Println("Error:", err.Error()) - c.Printf("Run '%v --help' for usage.\n", c.CommandPath()) - } - return c, err - } - err = cmd.execute(flags) - if err != nil { - // Always show help if requested, even if SilenceErrors is in - // effect - if err == flag.ErrHelp { - cmd.HelpFunc()(cmd, args) - return cmd, nil - } - - // If root command has SilentErrors flagged, - // all subcommands should respect it - if !cmd.SilenceErrors && !c.SilenceErrors { - c.Println("Error:", err.Error()) - } - - // If root command has SilentUsage flagged, - // all subcommands should respect it - if !cmd.SilenceUsage && !c.SilenceUsage { - c.Println(cmd.UsageString()) - } - return cmd, err - } - return cmd, nil -} - -func (c *Command) initHelpFlag() { - if c.Flags().Lookup("help") == nil { - c.Flags().BoolP("help", "h", false, "help for "+c.Name()) - } -} - -func (c *Command) initHelpCmd() { - if c.helpCommand == nil { - if !c.HasSubCommands() { - return - } - - c.helpCommand = &Command{ - Use: "help [command]", - Short: "Help about any command", - Long: `Help provides help for any command in the application. - Simply type ` + c.Name() + ` help [path to command] for full details.`, - PersistentPreRun: func(cmd *Command, args []string) {}, - PersistentPostRun: func(cmd *Command, args []string) {}, - - Run: func(c *Command, args []string) { - cmd, _, e := c.Root().Find(args) - if cmd == nil || e != nil { - c.Printf("Unknown help topic %#q.", args) - c.Root().Usage() - } else { - cmd.Help() - } - }, - } - } - c.AddCommand(c.helpCommand) -} - -// Used for testing -func (c *Command) ResetCommands() { - c.commands = nil - c.helpCommand = nil -} - -// Sorts commands by their names -type commandSorterByName []*Command - -func (c commandSorterByName) Len() int { return len(c) } -func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] } -func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() } - -// Commands returns a sorted slice of child commands. -func (c *Command) Commands() []*Command { - // do not sort commands if it already sorted or sorting was disabled - if EnableCommandSorting && !c.commandsAreSorted { - sort.Sort(commandSorterByName(c.commands)) - c.commandsAreSorted = true - } - return c.commands -} - -// AddCommand adds one or more commands to this parent command. -func (c *Command) AddCommand(cmds ...*Command) { - for i, x := range cmds { - if cmds[i] == c { - panic("Command can't be a child of itself") - } - cmds[i].parent = c - // update max lengths - usageLen := len(x.Use) - if usageLen > c.commandsMaxUseLen { - c.commandsMaxUseLen = usageLen - } - commandPathLen := len(x.CommandPath()) - if commandPathLen > c.commandsMaxCommandPathLen { - c.commandsMaxCommandPathLen = commandPathLen - } - nameLen := len(x.Name()) - if nameLen > c.commandsMaxNameLen { - c.commandsMaxNameLen = nameLen - } - // If global normalization function exists, update all children - if c.globNormFunc != nil { - x.SetGlobalNormalizationFunc(c.globNormFunc) - } - c.commands = append(c.commands, x) - c.commandsAreSorted = false - } -} - -// RemoveCommand removes one or more commands from a parent command. -func (c *Command) RemoveCommand(cmds ...*Command) { - commands := []*Command{} -main: - for _, command := range c.commands { - for _, cmd := range cmds { - if command == cmd { - command.parent = nil - continue main - } - } - commands = append(commands, command) - } - c.commands = commands - // recompute all lengths - c.commandsMaxUseLen = 0 - c.commandsMaxCommandPathLen = 0 - c.commandsMaxNameLen = 0 - for _, command := range c.commands { - usageLen := len(command.Use) - if usageLen > c.commandsMaxUseLen { - c.commandsMaxUseLen = usageLen - } - commandPathLen := len(command.CommandPath()) - if commandPathLen > c.commandsMaxCommandPathLen { - c.commandsMaxCommandPathLen = commandPathLen - } - nameLen := len(command.Name()) - if nameLen > c.commandsMaxNameLen { - c.commandsMaxNameLen = nameLen - } - } -} - -// Print is a convenience method to Print to the defined output, fallback to Stderr if not set -func (c *Command) Print(i ...interface{}) { - fmt.Fprint(c.OutOrStderr(), i...) -} - -// Println is a convenience method to Println to the defined output, fallback to Stderr if not set -func (c *Command) Println(i ...interface{}) { - str := fmt.Sprintln(i...) - c.Print(str) -} - -// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set -func (c *Command) Printf(format string, i ...interface{}) { - str := fmt.Sprintf(format, i...) - c.Print(str) -} - -// CommandPath returns the full path to this command. -func (c *Command) CommandPath() string { - str := c.Name() - x := c - for x.HasParent() { - str = x.parent.Name() + " " + str - x = x.parent - } - return str -} - -//The full usage for a given command (including parents) -func (c *Command) UseLine() string { - str := "" - if c.HasParent() { - str = c.parent.CommandPath() + " " - } - return str + c.Use -} - -// For use in determining which flags have been assigned to which commands -// and which persist -func (c *Command) DebugFlags() { - c.Println("DebugFlags called on", c.Name()) - var debugflags func(*Command) - - debugflags = func(x *Command) { - if x.HasFlags() || x.HasPersistentFlags() { - c.Println(x.Name()) - } - if x.HasFlags() { - x.flags.VisitAll(func(f *flag.Flag) { - if x.HasPersistentFlags() { - if x.persistentFlag(f.Name) == nil { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]") - } - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") - } - }) - } - if x.HasPersistentFlags() { - x.pflags.VisitAll(func(f *flag.Flag) { - if x.HasFlags() { - if x.flags.Lookup(f.Name) == nil { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") - } - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") - } - }) - } - c.Println(x.flagErrorBuf) - if x.HasSubCommands() { - for _, y := range x.commands { - debugflags(y) - } - } - } - - debugflags(c) -} - -// Name returns the command's name: the first word in the use line. -func (c *Command) Name() string { - if c.name != "" { - return c.name - } - name := c.Use - i := strings.Index(name, " ") - if i >= 0 { - name = name[:i] - } - return name -} - -// HasAlias determines if a given string is an alias of the command. -func (c *Command) HasAlias(s string) bool { - for _, a := range c.Aliases { - if a == s { - return true - } - } - return false -} - -func (c *Command) NameAndAliases() string { - return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ") -} - -func (c *Command) HasExample() bool { - return len(c.Example) > 0 -} - -// Runnable determines if the command is itself runnable -func (c *Command) Runnable() bool { - return c.Run != nil || c.RunE != nil -} - -// HasSubCommands determines if the command has children commands -func (c *Command) HasSubCommands() bool { - return len(c.commands) > 0 -} - -// IsAvailableCommand determines if a command is available as a non-help command -// (this includes all non deprecated/hidden commands) -func (c *Command) IsAvailableCommand() bool { - if len(c.Deprecated) != 0 || c.Hidden { - return false - } - - if c.HasParent() && c.Parent().helpCommand == c { - return false - } - - if c.Runnable() || c.HasAvailableSubCommands() { - return true - } - - return false -} - -// IsHelpCommand determines if a command is a 'help' command; a help command is -// determined by the fact that it is NOT runnable/hidden/deprecated, and has no -// sub commands that are runnable/hidden/deprecated -func (c *Command) IsHelpCommand() bool { - - // if a command is runnable, deprecated, or hidden it is not a 'help' command - if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden { - return false - } - - // if any non-help sub commands are found, the command is not a 'help' command - for _, sub := range c.commands { - if !sub.IsHelpCommand() { - return false - } - } - - // the command either has no sub commands, or no non-help sub commands - return true -} - -// HasHelpSubCommands determines if a command has any avilable 'help' sub commands -// that need to be shown in the usage/help default template under 'additional help -// topics' -func (c *Command) HasHelpSubCommands() bool { - - // return true on the first found available 'help' sub command - for _, sub := range c.commands { - if sub.IsHelpCommand() { - return true - } - } - - // the command either has no sub commands, or no available 'help' sub commands - return false -} - -// HasAvailableSubCommands determines if a command has available sub commands that -// need to be shown in the usage/help default template under 'available commands' -func (c *Command) HasAvailableSubCommands() bool { - - // return true on the first found available (non deprecated/help/hidden) - // sub command - for _, sub := range c.commands { - if sub.IsAvailableCommand() { - return true - } - } - - // the command either has no sub comamnds, or no available (non deprecated/help/hidden) - // sub commands - return false -} - -// Determine if the command is a child command -func (c *Command) HasParent() bool { - return c.parent != nil -} - -// GlobalNormalizationFunc returns the global normalization function or nil if doesn't exists -func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { - return c.globNormFunc -} - -// Get the complete FlagSet that applies to this command (local and persistent declared here and by all parents) -func (c *Command) Flags() *flag.FlagSet { - if c.flags == nil { - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.flags.SetOutput(c.flagErrorBuf) - } - return c.flags -} - -// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands -func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { - persistentFlags := c.PersistentFlags() - - out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.LocalFlags().VisitAll(func(f *flag.Flag) { - if persistentFlags.Lookup(f.Name) == nil { - out.AddFlag(f) - } - }) - return out -} - -// Get the local FlagSet specifically set in the current command -func (c *Command) LocalFlags() *flag.FlagSet { - c.mergePersistentFlags() - - local := flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.lflags.VisitAll(func(f *flag.Flag) { - local.AddFlag(f) - }) - if !c.HasParent() { - flag.CommandLine.VisitAll(func(f *flag.Flag) { - if local.Lookup(f.Name) == nil { - local.AddFlag(f) - } - }) - } - return local -} - -// All Flags which were inherited from parents commands -func (c *Command) InheritedFlags() *flag.FlagSet { - c.mergePersistentFlags() - - inherited := flag.NewFlagSet(c.Name(), flag.ContinueOnError) - local := c.LocalFlags() - - var rmerge func(x *Command) - - rmerge = func(x *Command) { - if x.HasPersistentFlags() { - x.PersistentFlags().VisitAll(func(f *flag.Flag) { - if inherited.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil { - inherited.AddFlag(f) - } - }) - } - if x.HasParent() { - rmerge(x.parent) - } - } - - if c.HasParent() { - rmerge(c.parent) - } - - return inherited -} - -// All Flags which were not inherited from parent commands -func (c *Command) NonInheritedFlags() *flag.FlagSet { - return c.LocalFlags() -} - -// Get the Persistent FlagSet specifically set in the current command -func (c *Command) PersistentFlags() *flag.FlagSet { - if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.pflags.SetOutput(c.flagErrorBuf) - } - return c.pflags -} - -// For use in testing -func (c *Command) ResetFlags() { - c.flagErrorBuf = new(bytes.Buffer) - c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.pflags.SetOutput(c.flagErrorBuf) -} - -// Does the command contain any flags (local plus persistent from the entire structure) -func (c *Command) HasFlags() bool { - return c.Flags().HasFlags() -} - -// Does the command contain persistent flags -func (c *Command) HasPersistentFlags() bool { - return c.PersistentFlags().HasFlags() -} - -// Does the command has flags specifically declared locally -func (c *Command) HasLocalFlags() bool { - return c.LocalFlags().HasFlags() -} - -// Does the command have flags inherited from its parent command -func (c *Command) HasInheritedFlags() bool { - return c.InheritedFlags().HasFlags() -} - -// Does the command contain any flags (local plus persistent from the entire -// structure) which are not hidden or deprecated -func (c *Command) HasAvailableFlags() bool { - return c.Flags().HasAvailableFlags() -} - -// Does the command contain persistent flags which are not hidden or deprecated -func (c *Command) HasAvailablePersistentFlags() bool { - return c.PersistentFlags().HasAvailableFlags() -} - -// Does the command has flags specifically declared locally which are not hidden -// or deprecated -func (c *Command) HasAvailableLocalFlags() bool { - return c.LocalFlags().HasAvailableFlags() -} - -// Does the command have flags inherited from its parent command which are -// not hidden or deprecated -func (c *Command) HasAvailableInheritedFlags() bool { - return c.InheritedFlags().HasAvailableFlags() -} - -// Flag climbs up the command tree looking for matching flag -func (c *Command) Flag(name string) (flag *flag.Flag) { - flag = c.Flags().Lookup(name) - - if flag == nil { - flag = c.persistentFlag(name) - } - - return -} - -// recursively find matching persistent flag -func (c *Command) persistentFlag(name string) (flag *flag.Flag) { - if c.HasPersistentFlags() { - flag = c.PersistentFlags().Lookup(name) - } - - if flag == nil && c.HasParent() { - flag = c.parent.persistentFlag(name) - } - return -} - -// ParseFlags parses persistent flag tree & local flags -func (c *Command) ParseFlags(args []string) (err error) { - if c.DisableFlagParsing { - return nil - } - c.mergePersistentFlags() - err = c.Flags().Parse(args) - return -} - -// Parent returns a commands parent command -func (c *Command) Parent() *Command { - return c.parent -} - -func (c *Command) mergePersistentFlags() { - var rmerge func(x *Command) - - // Save the set of local flags - if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.lflags.SetOutput(c.flagErrorBuf) - addtolocal := func(f *flag.Flag) { - c.lflags.AddFlag(f) - } - c.Flags().VisitAll(addtolocal) - c.PersistentFlags().VisitAll(addtolocal) - } - rmerge = func(x *Command) { - if !x.HasParent() { - flag.CommandLine.VisitAll(func(f *flag.Flag) { - if x.PersistentFlags().Lookup(f.Name) == nil { - x.PersistentFlags().AddFlag(f) - } - }) - } - if x.HasPersistentFlags() { - x.PersistentFlags().VisitAll(func(f *flag.Flag) { - if c.Flags().Lookup(f.Name) == nil { - c.Flags().AddFlag(f) - } - }) - } - if x.HasParent() { - rmerge(x.parent) - } - } - - rmerge(c) -} diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go deleted file mode 100644 index 6159c1cc19..0000000000 --- a/vendor/github.com/spf13/cobra/command_notwin.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !windows - -package cobra - -var preExecHookFn func(*Command) diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go deleted file mode 100644 index 4b0eaa1b6b..0000000000 --- a/vendor/github.com/spf13/cobra/command_win.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build windows - -package cobra - -import ( - "os" - "time" - - "github.com/inconshreveable/mousetrap" -) - -var preExecHookFn = preExecHook - -// enables an information splash screen on Windows if the CLI is started from explorer.exe. -var MousetrapHelpText string = `This is a command line tool - -You need to open cmd.exe and run it from there. -` - -func preExecHook(c *Command) { - if mousetrap.StartedByExplorer() { - c.Print(MousetrapHelpText) - time.Sleep(5 * time.Second) - os.Exit(1) - } -} diff --git a/vendor/github.com/spf13/pflag/.gitignore b/vendor/github.com/spf13/pflag/.gitignore new file mode 100644 index 0000000000..c3da290134 --- /dev/null +++ b/vendor/github.com/spf13/pflag/.gitignore @@ -0,0 +1,2 @@ +.idea/* + diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md index 08ad945658..eefb46dec8 100644 --- a/vendor/github.com/spf13/pflag/README.md +++ b/vendor/github.com/spf13/pflag/README.md @@ -1,4 +1,6 @@ [![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag) +[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag) ## Description @@ -106,9 +108,9 @@ that give one-letter shorthands for flags. You can use these by appending var ip = flag.IntP("flagname", "f", 1234, "help message") var flagvar bool func init() { - flag.BoolVarP("boolname", "b", true, "help message") + flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") } -flag.VarP(&flagVar, "varname", "v", 1234, "help message") +flag.VarP(&flagVal, "varname", "v", "help message") ``` Shorthand letters can be used with single dashes on the command line. diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go new file mode 100644 index 0000000000..5af02f1a75 --- /dev/null +++ b/vendor/github.com/spf13/pflag/bool_slice.go @@ -0,0 +1,147 @@ +package pflag + +import ( + "io" + "strconv" + "strings" +) + +// -- boolSlice Value +type boolSliceValue struct { + value *[]bool + changed bool +} + +func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue { + bsv := new(boolSliceValue) + bsv.value = p + *bsv.value = val + return bsv +} + +// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag. +// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended. +func (s *boolSliceValue) Set(val string) error { + + // remove all quote characters + rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") + + // read flag arguments with CSV parser + boolStrSlice, err := readAsCSV(rmQuote.Replace(val)) + if err != nil && err != io.EOF { + return err + } + + // parse boolean values into slice + out := make([]bool, 0, len(boolStrSlice)) + for _, boolStr := range boolStrSlice { + b, err := strconv.ParseBool(strings.TrimSpace(boolStr)) + if err != nil { + return err + } + out = append(out, b) + } + + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + + s.changed = true + + return nil +} + +// Type returns a string that uniquely represents this flag's type. +func (s *boolSliceValue) Type() string { + return "boolSlice" +} + +// String defines a "native" format for this boolean slice flag value. +func (s *boolSliceValue) String() string { + + boolStrSlice := make([]string, len(*s.value)) + for i, b := range *s.value { + boolStrSlice[i] = strconv.FormatBool(b) + } + + out, _ := writeAsCSV(boolStrSlice) + + return "[" + out + "]" +} + +func boolSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []bool{}, nil + } + ss := strings.Split(val, ",") + out := make([]bool, len(ss)) + for i, t := range ss { + var err error + out[i], err = strconv.ParseBool(t) + if err != nil { + return nil, err + } + } + return out, nil +} + +// GetBoolSlice returns the []bool value of a flag with the given name. +func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) { + val, err := f.getFlagType(name, "boolSlice", boolSliceConv) + if err != nil { + return []bool{}, err + } + return val.([]bool), nil +} + +// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string. +// The argument p points to a []bool variable in which to store the value of the flag. +func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) { + f.VarP(newBoolSliceValue(value, p), name, "", usage) +} + +// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { + f.VarP(newBoolSliceValue(value, p), name, shorthand, usage) +} + +// BoolSliceVar defines a []bool flag with specified name, default value, and usage string. +// The argument p points to a []bool variable in which to store the value of the flag. +func BoolSliceVar(p *[]bool, name string, value []bool, usage string) { + CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage) +} + +// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. +func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { + CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage) +} + +// BoolSlice defines a []bool flag with specified name, default value, and usage string. +// The return value is the address of a []bool variable that stores the value of the flag. +func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool { + p := []bool{} + f.BoolSliceVarP(&p, name, "", value, usage) + return &p +} + +// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { + p := []bool{} + f.BoolSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// BoolSlice defines a []bool flag with specified name, default value, and usage string. +// The return value is the address of a []bool variable that stores the value of the flag. +func BoolSlice(name string, value []bool, usage string) *[]bool { + return CommandLine.BoolSliceP(name, "", value, usage) +} + +// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. +func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { + return CommandLine.BoolSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index fa815642ed..746af6327e 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -487,9 +487,76 @@ func UnquoteUsage(flag *Flag) (name string, usage string) { return } -// FlagUsages Returns a string containing the usage information for all flags in -// the FlagSet -func (f *FlagSet) FlagUsages() string { +// Splits the string `s` on whitespace into an initial substring up to +// `i` runes in length and the remainder. Will go `slop` over `i` if +// that encompasses the entire string (which allows the caller to +// avoid short orphan words on the final line). +func wrapN(i, slop int, s string) (string, string) { + if i+slop > len(s) { + return s, "" + } + + w := strings.LastIndexAny(s[:i], " \t") + if w <= 0 { + return s, "" + } + + return s[:w], s[w+1:] +} + +// Wraps the string `s` to a maximum width `w` with leading indent +// `i`. The first line is not indented (this is assumed to be done by +// caller). Pass `w` == 0 to do no wrapping +func wrap(i, w int, s string) string { + if w == 0 { + return s + } + + // space between indent i and end of line width w into which + // we should wrap the text. + wrap := w - i + + var r, l string + + // Not enough space for sensible wrapping. Wrap as a block on + // the next line instead. + if wrap < 24 { + i = 16 + wrap = w - i + r += "\n" + strings.Repeat(" ", i) + } + // If still not enough space then don't even try to wrap. + if wrap < 24 { + return s + } + + // Try to avoid short orphan words on the final line, by + // allowing wrapN to go a bit over if that would fit in the + // remainder of the line. + slop := 5 + wrap = wrap - slop + + // Handle first line, which is indented by the caller (or the + // special case above) + l, s = wrapN(wrap, slop, s) + r = r + l + + // Now wrap the rest + for s != "" { + var t string + + t, s = wrapN(wrap, slop, s) + r = r + "\n" + strings.Repeat(" ", i) + t + } + + return r + +} + +// FlagUsagesWrapped returns a string containing the usage information +// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no +// wrapping) +func (f *FlagSet) FlagUsagesWrapped(cols int) string { x := new(bytes.Buffer) lines := make([]string, 0, len(f.formal)) @@ -546,12 +613,19 @@ func (f *FlagSet) FlagUsages() string { for _, line := range lines { sidx := strings.Index(line, "\x00") spacing := strings.Repeat(" ", maxlen-sidx) - fmt.Fprintln(x, line[:sidx], spacing, line[sidx+1:]) + // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx + fmt.Fprintln(x, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:])) } return x.String() } +// FlagUsages returns a string containing the usage information for all flags in +// the FlagSet +func (f *FlagSet) FlagUsages() string { + return f.FlagUsagesWrapped(0) +} + // PrintDefaults prints to standard error the default values of all defined command-line flags. func PrintDefaults() { CommandLine.PrintDefaults() @@ -635,7 +709,7 @@ func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag { // VarP is like Var, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { - _ = f.VarPF(value, name, shorthand, usage) + f.VarPF(value, name, shorthand, usage) } // AddFlag will add the flag to the FlagSet @@ -752,7 +826,7 @@ func containsShorthand(arg, shorthand string) bool { return strings.Contains(arg, shorthand) } -func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error) { +func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) { a = args name := s[2:] if len(name) == 0 || name[0] == '-' || name[0] == '=' { @@ -786,11 +860,11 @@ func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error) err = f.failf("flag needs an argument: %s", s) return } - err = f.setFlag(flag, value, s) + err = fn(flag, value, s) return } -func (f *FlagSet) parseSingleShortArg(shorthands string, args []string) (outShorts string, outArgs []string, err error) { +func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) { if strings.HasPrefix(shorthands, "test.") { return } @@ -825,16 +899,16 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string) (outShor err = f.failf("flag needs an argument: %q in -%s", c, shorthands) return } - err = f.setFlag(flag, value, shorthands) + err = fn(flag, value, shorthands) return } -func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error) { +func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) { a = args shorthands := s[1:] for len(shorthands) > 0 { - shorthands, a, err = f.parseSingleShortArg(shorthands, args) + shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn) if err != nil { return } @@ -843,7 +917,7 @@ func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error) return } -func (f *FlagSet) parseArgs(args []string) (err error) { +func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) { for len(args) > 0 { s := args[0] args = args[1:] @@ -863,9 +937,9 @@ func (f *FlagSet) parseArgs(args []string) (err error) { f.args = append(f.args, args...) break } - args, err = f.parseLongArg(s, args) + args, err = f.parseLongArg(s, args, fn) } else { - args, err = f.parseShortArg(s, args) + args, err = f.parseShortArg(s, args, fn) } if err != nil { return @@ -881,7 +955,41 @@ func (f *FlagSet) parseArgs(args []string) (err error) { func (f *FlagSet) Parse(arguments []string) error { f.parsed = true f.args = make([]string, 0, len(arguments)) - err := f.parseArgs(arguments) + + assign := func(flag *Flag, value, origArg string) error { + return f.setFlag(flag, value, origArg) + } + + err := f.parseArgs(arguments, assign) + if err != nil { + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + return nil +} + +type parseFunc func(flag *Flag, value, origArg string) error + +// ParseAll parses flag definitions from the argument list, which should not +// include the command name. The arguments for fn are flag and value. Must be +// called after all flags in the FlagSet are defined and before flags are +// accessed by the program. The return value will be ErrHelp if -help was set +// but not defined. +func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error { + f.parsed = true + f.args = make([]string, 0, len(arguments)) + + assign := func(flag *Flag, value, origArg string) error { + return fn(flag, value) + } + + err := f.parseArgs(arguments, assign) if err != nil { switch f.errorHandling { case ContinueOnError: @@ -907,6 +1015,14 @@ func Parse() { CommandLine.Parse(os.Args[1:]) } +// ParseAll parses the command-line flags from os.Args[1:] and called fn for each. +// The arguments for fn are flag and value. Must be called after all flags are +// defined and before flags are accessed by the program. +func ParseAll(fn func(flag *Flag, value string) error) { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.ParseAll(os.Args[1:], fn) +} + // SetInterspersed sets whether to support interspersed option/non-option arguments. func SetInterspersed(interspersed bool) { CommandLine.SetInterspersed(interspersed) diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go index b056147fd8..c4f47ebe59 100644 --- a/vendor/github.com/spf13/pflag/golangflag.go +++ b/vendor/github.com/spf13/pflag/golangflag.go @@ -6,13 +6,10 @@ package pflag import ( goflag "flag" - "fmt" "reflect" "strings" ) -var _ = fmt.Print - // flagValueWrapper implements pflag.Value around a flag.Value. The main // difference here is the addition of the Type method that returns a string // name of the type. As this is generally unknown, we approximate that with diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go index 88a17430a0..3d414ba69f 100644 --- a/vendor/github.com/spf13/pflag/ip.go +++ b/vendor/github.com/spf13/pflag/ip.go @@ -6,8 +6,6 @@ import ( "strings" ) -var _ = strings.TrimSpace - // -- net.IP value type ipValue net.IP diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go new file mode 100644 index 0000000000..7dd196fe3f --- /dev/null +++ b/vendor/github.com/spf13/pflag/ip_slice.go @@ -0,0 +1,148 @@ +package pflag + +import ( + "fmt" + "io" + "net" + "strings" +) + +// -- ipSlice Value +type ipSliceValue struct { + value *[]net.IP + changed bool +} + +func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue { + ipsv := new(ipSliceValue) + ipsv.value = p + *ipsv.value = val + return ipsv +} + +// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag. +// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended. +func (s *ipSliceValue) Set(val string) error { + + // remove all quote characters + rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") + + // read flag arguments with CSV parser + ipStrSlice, err := readAsCSV(rmQuote.Replace(val)) + if err != nil && err != io.EOF { + return err + } + + // parse ip values into slice + out := make([]net.IP, 0, len(ipStrSlice)) + for _, ipStr := range ipStrSlice { + ip := net.ParseIP(strings.TrimSpace(ipStr)) + if ip == nil { + return fmt.Errorf("invalid string being converted to IP address: %s", ipStr) + } + out = append(out, ip) + } + + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + + s.changed = true + + return nil +} + +// Type returns a string that uniquely represents this flag's type. +func (s *ipSliceValue) Type() string { + return "ipSlice" +} + +// String defines a "native" format for this net.IP slice flag value. +func (s *ipSliceValue) String() string { + + ipStrSlice := make([]string, len(*s.value)) + for i, ip := range *s.value { + ipStrSlice[i] = ip.String() + } + + out, _ := writeAsCSV(ipStrSlice) + + return "[" + out + "]" +} + +func ipSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Emtpy string would cause a slice with one (empty) entry + if len(val) == 0 { + return []net.IP{}, nil + } + ss := strings.Split(val, ",") + out := make([]net.IP, len(ss)) + for i, sval := range ss { + ip := net.ParseIP(strings.TrimSpace(sval)) + if ip == nil { + return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) + } + out[i] = ip + } + return out, nil +} + +// GetIPSlice returns the []net.IP value of a flag with the given name +func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) { + val, err := f.getFlagType(name, "ipSlice", ipSliceConv) + if err != nil { + return []net.IP{}, err + } + return val.([]net.IP), nil +} + +// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string. +// The argument p points to a []net.IP variable in which to store the value of the flag. +func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { + f.VarP(newIPSliceValue(value, p), name, "", usage) +} + +// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { + f.VarP(newIPSliceValue(value, p), name, shorthand, usage) +} + +// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string. +// The argument p points to a []net.IP variable in which to store the value of the flag. +func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { + CommandLine.VarP(newIPSliceValue(value, p), name, "", usage) +} + +// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. +func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { + CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage) +} + +// IPSlice defines a []net.IP flag with specified name, default value, and usage string. +// The return value is the address of a []net.IP variable that stores the value of that flag. +func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP { + p := []net.IP{} + f.IPSliceVarP(&p, name, "", value, usage) + return &p +} + +// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { + p := []net.IP{} + f.IPSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// IPSlice defines a []net.IP flag with specified name, default value, and usage string. +// The return value is the address of a []net.IP variable that stores the value of the flag. +func IPSlice(name string, value []net.IP, usage string) *[]net.IP { + return CommandLine.IPSliceP(name, "", value, usage) +} + +// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. +func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { + return CommandLine.IPSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go index 149b764b1e..e2c1b8bcd5 100644 --- a/vendor/github.com/spf13/pflag/ipnet.go +++ b/vendor/github.com/spf13/pflag/ipnet.go @@ -27,8 +27,6 @@ func (*ipNetValue) Type() string { return "ipNet" } -var _ = strings.TrimSpace - func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue { *p = val return (*ipNetValue)(p) diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go index 93b4e43290..276b7ed49e 100644 --- a/vendor/github.com/spf13/pflag/string_array.go +++ b/vendor/github.com/spf13/pflag/string_array.go @@ -1,11 +1,5 @@ package pflag -import ( - "fmt" -) - -var _ = fmt.Fprint - // -- stringArray Value type stringArrayValue struct { value *[]string diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go index 7829cfafb2..05eee75438 100644 --- a/vendor/github.com/spf13/pflag/string_slice.go +++ b/vendor/github.com/spf13/pflag/string_slice.go @@ -3,12 +3,9 @@ package pflag import ( "bytes" "encoding/csv" - "fmt" "strings" ) -var _ = fmt.Fprint - // -- stringSlice Value type stringSliceValue struct { value *[]string @@ -39,7 +36,7 @@ func writeAsCSV(vals []string) (string, error) { return "", err } w.Flush() - return strings.TrimSuffix(b.String(), fmt.Sprintln()), nil + return strings.TrimSuffix(b.String(), "\n"), nil } func (s *stringSliceValue) Set(val string) error { diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go new file mode 100644 index 0000000000..edd94c600a --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint_slice.go @@ -0,0 +1,126 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- uintSlice Value +type uintSliceValue struct { + value *[]uint + changed bool +} + +func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue { + uisv := new(uintSliceValue) + uisv.value = p + *uisv.value = val + return uisv +} + +func (s *uintSliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]uint, len(ss)) + for i, d := range ss { + u, err := strconv.ParseUint(d, 10, 0) + if err != nil { + return err + } + out[i] = uint(u) + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *uintSliceValue) Type() string { + return "uintSlice" +} + +func (s *uintSliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func uintSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []uint{}, nil + } + ss := strings.Split(val, ",") + out := make([]uint, len(ss)) + for i, d := range ss { + u, err := strconv.ParseUint(d, 10, 0) + if err != nil { + return nil, err + } + out[i] = uint(u) + } + return out, nil +} + +// GetUintSlice returns the []uint value of a flag with the given name. +func (f *FlagSet) GetUintSlice(name string) ([]uint, error) { + val, err := f.getFlagType(name, "uintSlice", uintSliceConv) + if err != nil { + return []uint{}, err + } + return val.([]uint), nil +} + +// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string. +// The argument p points to a []uint variable in which to store the value of the flag. +func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) { + f.VarP(newUintSliceValue(value, p), name, "", usage) +} + +// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { + f.VarP(newUintSliceValue(value, p), name, shorthand, usage) +} + +// UintSliceVar defines a uint[] flag with specified name, default value, and usage string. +// The argument p points to a uint[] variable in which to store the value of the flag. +func UintSliceVar(p *[]uint, name string, value []uint, usage string) { + CommandLine.VarP(newUintSliceValue(value, p), name, "", usage) +} + +// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash. +func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { + CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage) +} + +// UintSlice defines a []uint flag with specified name, default value, and usage string. +// The return value is the address of a []uint variable that stores the value of the flag. +func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint { + p := []uint{} + f.UintSliceVarP(&p, name, "", value, usage) + return &p +} + +// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { + p := []uint{} + f.UintSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// UintSlice defines a []uint flag with specified name, default value, and usage string. +// The return value is the address of a []uint variable that stores the value of the flag. +func UintSlice(name string, value []uint, usage string) *[]uint { + return CommandLine.UintSliceP(name, "", value, usage) +} + +// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. +func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { + return CommandLine.UintSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/ugorji/go/codec/0doc.go b/vendor/github.com/ugorji/go/codec/0doc.go index caa7e0a3b5..209f9ebad5 100644 --- a/vendor/github.com/ugorji/go/codec/0doc.go +++ b/vendor/github.com/ugorji/go/codec/0doc.go @@ -64,10 +64,11 @@ Rich Feature Set includes: - Never silently skip data when decoding. User decides whether to return an error or silently skip data when keys or indexes in the data stream do not map to fields in the struct. + - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown) - Encode/Decode from/to chan types (for iterative streaming support) - Drop-in replacement for encoding/json. `json:` key in struct tag supported. - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Handle unique idiosynchracies of codecs e.g. + - Handle unique idiosyncrasies of codecs e.g. - For messagepack, configure how ambiguities in handling raw bytes are resolved - For messagepack, provide rpc server/client codec to support msgpack-rpc protocol defined at: @@ -171,6 +172,8 @@ package codec // TODO: // +// - optimization for codecgen: +// if len of entity is <= 3 words, then support a value receiver for encode. // - (En|De)coder should store an error when it occurs. // Until reset, subsequent calls return that error that was stored. // This means that free panics must go away. @@ -178,16 +181,19 @@ package codec // - Decoding using a chan is good, but incurs concurrency costs. // This is because there's no fast way to use a channel without it // having to switch goroutines constantly. -// Callback pattern is still the best. Maybe cnsider supporting something like: +// Callback pattern is still the best. Maybe consider supporting something like: // type X struct { // Name string // Ys []Y // Ys chan <- Y -// Ys func(interface{}) -> call this interface for each entry in there. +// Ys func(Y) -> call this function for each entry // } // - Consider adding a isZeroer interface { isZero() bool } // It is used within isEmpty, for omitEmpty support. // - Consider making Handle used AS-IS within the encoding/decoding session. // This means that we don't cache Handle information within the (En|De)coder, // except we really need it at Reset(...) -// - Handle recursive types during encoding/decoding? +// - Consider adding math/big support +// - Consider reducing the size of the generated functions: +// Maybe use one loop, and put the conditionals in the loop. +// for ... { if cLen > 0 { if j == cLen { break } } else if dd.CheckBreak() { break } } diff --git a/vendor/github.com/ugorji/go/codec/README.md b/vendor/github.com/ugorji/go/codec/README.md index a790a52bb9..91cb3a27bd 100644 --- a/vendor/github.com/ugorji/go/codec/README.md +++ b/vendor/github.com/ugorji/go/codec/README.md @@ -68,7 +68,7 @@ Rich Feature Set includes: - Encode/Decode from/to chan types (for iterative streaming support) - Drop-in replacement for encoding/json. `json:` key in struct tag supported. - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Handle unique idiosynchracies of codecs e.g. + - Handle unique idiosyncrasies of codecs e.g. - For messagepack, configure how ambiguities in handling raw bytes are resolved - For messagepack, provide rpc server/client codec to support msgpack-rpc protocol defined at: diff --git a/vendor/github.com/ugorji/go/codec/binc.go b/vendor/github.com/ugorji/go/codec/binc.go index c884d14dce..33120dcb61 100644 --- a/vendor/github.com/ugorji/go/codec/binc.go +++ b/vendor/github.com/ugorji/go/codec/binc.go @@ -348,6 +348,13 @@ func (d *bincDecDriver) readNextBd() { d.bdRead = true } +func (d *bincDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + func (d *bincDecDriver) ContainerType() (vt valueType) { if d.vd == bincVdSpecial && d.vs == bincSpNil { return valueTypeNil @@ -705,7 +712,7 @@ func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) } func (d *bincDecDriver) DecodeString() (s string) { - // DecodeBytes does not accomodate symbols, whose impl stores string version in map. + // DecodeBytes does not accommodate symbols, whose impl stores string version in map. // Use decStringAndBytes directly. // return string(d.DecodeBytes(d.b[:], true, true)) _, s = d.decStringAndBytes(d.b[:], true, true) @@ -908,10 +915,14 @@ func (h *BincHandle) newDecDriver(d *Decoder) decDriver { func (e *bincEncDriver) reset() { e.w = e.e.w + e.s = 0 + e.m = nil } func (d *bincDecDriver) reset() { d.r = d.d.r + d.s = nil + d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0 } var _ decDriver = (*bincDecDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/cbor.go b/vendor/github.com/ugorji/go/codec/cbor.go index 0e5d32b2ea..4fa349ac87 100644 --- a/vendor/github.com/ugorji/go/codec/cbor.go +++ b/vendor/github.com/ugorji/go/codec/cbor.go @@ -188,6 +188,13 @@ func (d *cborDecDriver) readNextBd() { d.bdRead = true } +func (d *cborDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + func (d *cborDecDriver) ContainerType() (vt valueType) { if d.bd == cborBdNil { return valueTypeNil @@ -508,7 +515,7 @@ func (d *cborDecDriver) DecodeNaked() { n.v = valueTypeExt n.u = d.decUint() n.l = nil - d.bdRead = false + // d.bdRead = false // d.d.decode(&re.Value) // handled by decode itself. // decodeFurther = true default: @@ -578,6 +585,7 @@ func (e *cborEncDriver) reset() { func (d *cborDecDriver) reset() { d.r = d.d.r + d.bd, d.bdRead = 0, false } var _ decDriver = (*cborDecDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/decode.go b/vendor/github.com/ugorji/go/codec/decode.go index b3b99f0367..52c1dfe836 100644 --- a/vendor/github.com/ugorji/go/codec/decode.go +++ b/vendor/github.com/ugorji/go/codec/decode.go @@ -91,10 +91,12 @@ type decDriver interface { uncacheRead() } -type decNoSeparator struct{} +type decNoSeparator struct { +} + +func (_ decNoSeparator) ReadEnd() {} -func (_ decNoSeparator) ReadEnd() {} -func (_ decNoSeparator) uncacheRead() {} +// func (_ decNoSeparator) uncacheRead() {} type DecodeOptions struct { // MapType specifies type to use during schema-less decoding of a map in the stream. @@ -161,6 +163,15 @@ type DecodeOptions struct { // Note: Handles will be smart when using the intern functionality. // So everything will not be interned. InternString bool + + // PreferArrayOverSlice controls whether to decode to an array or a slice. + // + // This only impacts decoding into a nil interface{}. + // Consequently, it has no effect on codecgen. + // + // *Note*: This only applies if using go1.5 and above, + // as it requires reflect.ArrayOf support which was absent before go1.5. + PreferArrayOverSlice bool } // ------------------------------------ @@ -433,6 +444,10 @@ func (f *decFnInfo) rawExt(rv reflect.Value) { f.d.d.DecodeExt(rv.Addr().Interface(), 0, nil) } +func (f *decFnInfo) raw(rv reflect.Value) { + rv.SetBytes(f.d.raw()) +} + func (f *decFnInfo) ext(rv reflect.Value) { f.d.d.DecodeExt(rv.Addr().Interface(), f.xfTag, f.xfFn) } @@ -583,14 +598,16 @@ func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) { if d.mtid == 0 || d.mtid == mapIntfIntfTypId { l := len(n.ms) n.ms = append(n.ms, nil) - d.decode(&n.ms[l]) - rvn = reflect.ValueOf(&n.ms[l]).Elem() + var v2 interface{} = &n.ms[l] + d.decode(v2) + rvn = reflect.ValueOf(v2).Elem() n.ms = n.ms[:l] } else if d.mtid == mapStrIntfTypId { // for json performance l := len(n.ns) n.ns = append(n.ns, nil) - d.decode(&n.ns[l]) - rvn = reflect.ValueOf(&n.ns[l]).Elem() + var v2 interface{} = &n.ns[l] + d.decode(v2) + rvn = reflect.ValueOf(v2).Elem() n.ns = n.ns[:l] } else { rvn = reflect.New(d.h.MapType).Elem() @@ -601,9 +618,13 @@ func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) { if d.stid == 0 || d.stid == intfSliceTypId { l := len(n.ss) n.ss = append(n.ss, nil) - d.decode(&n.ss[l]) - rvn = reflect.ValueOf(&n.ss[l]).Elem() + var v2 interface{} = &n.ss[l] + d.decode(v2) n.ss = n.ss[:l] + rvn = reflect.ValueOf(v2).Elem() + if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice { + rvn = reflectArrayOf(rvn) + } } else { rvn = reflect.New(d.h.SliceType).Elem() d.decodeValue(rvn, nil) @@ -615,9 +636,9 @@ func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) { l := len(n.is) n.is = append(n.is, nil) v2 := &n.is[l] - n.is = n.is[:l] d.decode(v2) v = *v2 + n.is = n.is[:l] } bfn := d.h.getExtForTag(tag) if bfn == nil { @@ -1166,7 +1187,7 @@ type decRtidFn struct { // primitives are being decoded. // // maps and arrays are not handled by this mechanism. -// However, RawExt is, and we accomodate for extensions that decode +// However, RawExt is, and we accommodate for extensions that decode // RawExt from DecodeNaked, but need to decode the value subsequently. // kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat. // @@ -1453,8 +1474,8 @@ func (d *Decoder) swallow() { l := len(n.is) n.is = append(n.is, nil) v2 := &n.is[l] - n.is = n.is[:l] d.decode(v2) + n.is = n.is[:l] } } } @@ -1504,6 +1525,8 @@ func (d *Decoder) decode(iv interface{}) { *v = 0 case *[]uint8: *v = nil + case *Raw: + *v = nil case reflect.Value: if v.Kind() != reflect.Ptr || v.IsNil() { d.errNotValidPtrValue(v) @@ -1543,7 +1566,6 @@ func (d *Decoder) decode(iv interface{}) { d.decodeValueNotNil(v.Elem(), nil) case *string: - *v = d.d.DecodeString() case *bool: *v = d.d.DecodeBool() @@ -1574,6 +1596,9 @@ func (d *Decoder) decode(iv interface{}) { case *[]uint8: *v = d.d.DecodeBytes(*v, false, false) + case *Raw: + *v = d.raw() + case *interface{}: d.decodeValueNotNil(reflect.ValueOf(iv).Elem(), nil) @@ -1695,6 +1720,8 @@ func (d *Decoder) getDecFn(rt reflect.Type, checkFastpath, checkCodecSelfer bool fn.f = (*decFnInfo).selferUnmarshal } else if rtid == rawExtTypId { fn.f = (*decFnInfo).rawExt + } else if rtid == rawTypId { + fn.f = (*decFnInfo).raw } else if d.d.IsBuiltinType(rtid) { fn.f = (*decFnInfo).builtin } else if xfFn := d.h.getExt(rtid); xfFn != nil { @@ -1793,12 +1820,13 @@ func (d *Decoder) getDecFn(rt reflect.Type, checkFastpath, checkCodecSelfer bool } func (d *Decoder) structFieldNotFound(index int, rvkencname string) { + // NOTE: rvkencname may be a stringView, so don't pass it to another function. if d.h.ErrorIfNoField { if index >= 0 { d.errorf("no matching struct field found when decoding stream array at index %v", index) return } else if rvkencname != "" { - d.errorf("no matching struct field found when decoding stream map with key %s", rvkencname) + d.errorf("no matching struct field found when decoding stream map with key " + rvkencname) return } } @@ -1862,6 +1890,7 @@ func (d *Decoder) intern(s string) { } } +// nextValueBytes returns the next value in the stream as a set of bytes. func (d *Decoder) nextValueBytes() []byte { d.d.uncacheRead() d.r.track() @@ -1869,6 +1898,15 @@ func (d *Decoder) nextValueBytes() []byte { return d.r.stopTrack() } +func (d *Decoder) raw() []byte { + // ensure that this is not a view into the bytes + // i.e. make new copy always. + bs := d.nextValueBytes() + bs2 := make([]byte, len(bs)) + copy(bs2, bs) + return bs2 +} + // -------------------------------------------------- // decSliceHelper assists when decoding into a slice, from a map or an array in the stream. diff --git a/vendor/github.com/ugorji/go/codec/decode_go.go b/vendor/github.com/ugorji/go/codec/decode_go.go new file mode 100644 index 0000000000..ba289cef61 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/decode_go.go @@ -0,0 +1,16 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.5 + +package codec + +import "reflect" + +const reflectArrayOfSupported = true + +func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) { + rvn2 = reflect.New(reflect.ArrayOf(rvn.Len(), intfTyp)).Elem() + reflect.Copy(rvn2, rvn) + return +} diff --git a/vendor/github.com/ugorji/go/codec/decode_go14.go b/vendor/github.com/ugorji/go/codec/decode_go14.go new file mode 100644 index 0000000000..50063bc8fc --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/decode_go14.go @@ -0,0 +1,14 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.5 + +package codec + +import "reflect" + +const reflectArrayOfSupported = false + +func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) { + panic("reflect.ArrayOf unsupported") +} diff --git a/vendor/github.com/ugorji/go/codec/encode.go b/vendor/github.com/ugorji/go/codec/encode.go index 99af6fa555..c2cef812ec 100644 --- a/vendor/github.com/ugorji/go/codec/encode.go +++ b/vendor/github.com/ugorji/go/codec/encode.go @@ -110,6 +110,28 @@ type EncodeOptions struct { // Canonical bool + // CheckCircularRef controls whether we check for circular references + // and error fast during an encode. + // + // If enabled, an error is received if a pointer to a struct + // references itself either directly or through one of its fields (iteratively). + // + // This is opt-in, as there may be a performance hit to checking circular references. + CheckCircularRef bool + + // RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers + // when checking if a value is empty. + // + // Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls. + RecursiveEmptyCheck bool + + // Raw controls whether we encode Raw values. + // This is a "dangerous" option and must be explicitly set. + // If set, we blindly encode Raw values as-is, without checking + // if they are a correct representation of a value in that format. + // If unset, we error out. + Raw bool + // AsSymbols defines what should be encoded as symbols. // // Encoding as symbols can reduce the encoded size significantly. @@ -132,13 +154,16 @@ type simpleIoEncWriterWriter struct { w io.Writer bw io.ByteWriter sw ioEncStringWriter + bs [1]byte } func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { if o.bw != nil { return o.bw.WriteByte(c) } - _, err = o.w.Write([]byte{c}) + // _, err = o.w.Write([]byte{c}) + o.bs[0] = c + _, err = o.w.Write(o.bs[:]) return } @@ -210,45 +235,57 @@ type bytesEncWriter struct { } func (z *bytesEncWriter) writeb(s []byte) { - if len(s) > 0 { - c := z.grow(len(s)) - copy(z.b[c:], s) + if len(s) == 0 { + return } + oc, a := z.growNoAlloc(len(s)) + if a { + z.growAlloc(len(s), oc) + } + copy(z.b[oc:], s) } func (z *bytesEncWriter) writestr(s string) { - if len(s) > 0 { - c := z.grow(len(s)) - copy(z.b[c:], s) + if len(s) == 0 { + return } + oc, a := z.growNoAlloc(len(s)) + if a { + z.growAlloc(len(s), oc) + } + copy(z.b[oc:], s) } func (z *bytesEncWriter) writen1(b1 byte) { - c := z.grow(1) - z.b[c] = b1 + oc, a := z.growNoAlloc(1) + if a { + z.growAlloc(1, oc) + } + z.b[oc] = b1 } func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { - c := z.grow(2) - z.b[c] = b1 - z.b[c+1] = b2 + oc, a := z.growNoAlloc(2) + if a { + z.growAlloc(2, oc) + } + z.b[oc+1] = b2 + z.b[oc] = b1 } func (z *bytesEncWriter) atEndOfEncode() { *(z.out) = z.b[:z.c] } -func (z *bytesEncWriter) grow(n int) (oldcursor int) { +// have a growNoalloc(n int), which can be inlined. +// if allocation is needed, then call growAlloc(n int) + +func (z *bytesEncWriter) growNoAlloc(n int) (oldcursor int, allocNeeded bool) { oldcursor = z.c - z.c = oldcursor + n + z.c = z.c + n if z.c > len(z.b) { if z.c > cap(z.b) { - // appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls. - // bytes.Buffer model (2*cap + n): much better - // bs := make([]byte, 2*cap(z.b)+n) - bs := make([]byte, growCap(cap(z.b), 1, n)) - copy(bs, z.b[:oldcursor]) - z.b = bs + allocNeeded = true } else { z.b = z.b[:cap(z.b)] } @@ -256,6 +293,15 @@ func (z *bytesEncWriter) grow(n int) (oldcursor int) { return } +func (z *bytesEncWriter) growAlloc(n int, oldcursor int) { + // appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls. + // bytes.Buffer model (2*cap + n): much better + // bs := make([]byte, 2*cap(z.b)+n) + bs := make([]byte, growCap(cap(z.b), 1, n)) + copy(bs, z.b[:oldcursor]) + z.b = bs +} + // --------------------------------------------- type encFnInfo struct { @@ -270,6 +316,10 @@ func (f *encFnInfo) builtin(rv reflect.Value) { f.e.e.EncodeBuiltin(f.ti.rtid, rv.Interface()) } +func (f *encFnInfo) raw(rv reflect.Value) { + f.e.raw(rv.Interface().(Raw)) +} + func (f *encFnInfo) rawExt(rv reflect.Value) { // rev := rv.Interface().(RawExt) // f.e.e.EncodeRawExt(&rev, f.e) @@ -296,7 +346,7 @@ func (f *encFnInfo) getValueForMarshalInterface(rv reflect.Value, indir int8) (v v = rv.Interface() } else if indir == -1 { // If a non-pointer was passed to Encode(), then that value is not addressable. - // Take addr if addresable, else copy value to an addressable value. + // Take addr if addressable, else copy value to an addressable value. if rv.CanAddr() { v = rv.Addr().Interface() } else { @@ -464,7 +514,7 @@ func (f *encFnInfo) kSlice(rv reflect.Value) { for j := 0; j < l; j++ { if cr != nil { if ti.mbs { - if l%2 == 0 { + if j%2 == 0 { cr.sendContainerState(containerMapKey) } else { cr.sendContainerState(containerMapValue) @@ -503,7 +553,7 @@ func (f *encFnInfo) kStruct(rv reflect.Value) { newlen := len(fti.sfi) // Use sync.Pool to reduce allocating slices unnecessarily. - // The cost of the occasional locking is less than the cost of new allocation. + // The cost of sync.Pool is less than the cost of new allocation. pool, poolv, fkvs := encStructPoolGet(newlen) // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) @@ -512,25 +562,20 @@ func (f *encFnInfo) kStruct(rv reflect.Value) { } newlen = 0 var kv stringRv + recur := e.h.RecursiveEmptyCheck for _, si := range tisfi { kv.r = si.field(rv, false) - // if si.i != -1 { - // rvals[newlen] = rv.Field(int(si.i)) - // } else { - // rvals[newlen] = rv.FieldByIndex(si.is) - // } if toMap { - if si.omitEmpty && isEmptyValue(kv.r) { + if si.omitEmpty && isEmptyValue(kv.r, recur, recur) { continue } kv.v = si.encName } else { // use the zero value. // if a reference or struct, set to nil (so you do not output too much) - if si.omitEmpty && isEmptyValue(kv.r) { + if si.omitEmpty && isEmptyValue(kv.r, recur, recur) { switch kv.r.Kind() { - case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, - reflect.Map, reflect.Slice: + case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice: kv.r = reflect.Value{} //encode as nil } } @@ -541,7 +586,7 @@ func (f *encFnInfo) kStruct(rv reflect.Value) { // debugf(">>>> kStruct: newlen: %v", newlen) // sep := !e.be - ee := e.e //don't dereference everytime + ee := e.e //don't dereference every time if toMap { ee.EncodeMapStart(newlen) @@ -596,13 +641,15 @@ func (f *encFnInfo) kStruct(rv reflect.Value) { // f.e.encodeValue(rv.Elem()) // } -func (f *encFnInfo) kInterface(rv reflect.Value) { - if rv.IsNil() { - f.e.e.EncodeNil() - return - } - f.e.encodeValue(rv.Elem(), nil) -} +// func (f *encFnInfo) kInterface(rv reflect.Value) { +// println("kInterface called") +// debug.PrintStack() +// if rv.IsNil() { +// f.e.e.EncodeNil() +// return +// } +// f.e.encodeValue(rv.Elem(), nil) +// } func (f *encFnInfo) kMap(rv reflect.Value) { ee := f.e.e @@ -877,6 +924,7 @@ type Encoder struct { // as the handler MAY need to do some coordination. w encWriter s []encRtidFn + ci set be bool // is binary encoding js bool // is json handle @@ -925,7 +973,7 @@ func newEncoder(h Handle) *Encoder { // Reset the Encoder with a new output stream. // -// This accomodates using the state of the Encoder, +// This accommodates using the state of the Encoder, // where it has "cached" information about sub-engines. func (e *Encoder) Reset(w io.Writer) { ww, ok := w.(ioEncWriterWriter) @@ -1032,20 +1080,6 @@ func (e *Encoder) MustEncode(v interface{}) { e.w.atEndOfEncode() } -// comment out these (Must)Write methods. They were only put there to support cbor. -// However, users already have access to the streams, and can write directly. -// -// // Write allows users write to the Encoder stream directly. -// func (e *Encoder) Write(bs []byte) (err error) { -// defer panicToErr(&err) -// e.w.writeb(bs) -// return -// } -// // MustWrite is like write, but panics if unable to Write. -// func (e *Encoder) MustWrite(bs []byte) { -// e.w.writeb(bs) -// } - func (e *Encoder) encode(iv interface{}) { // if ics, ok := iv.(Selfer); ok { // ics.CodecEncodeSelf(e) @@ -1057,7 +1091,8 @@ func (e *Encoder) encode(iv interface{}) { e.e.EncodeNil() case Selfer: v.CodecEncodeSelf(e) - + case Raw: + e.raw(v) case reflect.Value: e.encodeValue(v, nil) @@ -1133,20 +1168,23 @@ func (e *Encoder) encode(iv interface{}) { } } -func (e *Encoder) encodeI(iv interface{}, checkFastpath, checkCodecSelfer bool) { - if rv, proceed := e.preEncodeValue(reflect.ValueOf(iv)); proceed { - rt := rv.Type() - rtid := reflect.ValueOf(rt).Pointer() - fn := e.getEncFn(rtid, rt, checkFastpath, checkCodecSelfer) - fn.f(&fn.i, rv) - } -} - -func (e *Encoder) preEncodeValue(rv reflect.Value) (rv2 reflect.Value, proceed bool) { +func (e *Encoder) preEncodeValue(rv reflect.Value) (rv2 reflect.Value, sptr uintptr, proceed bool) { // use a goto statement instead of a recursive function for ptr/interface. TOP: switch rv.Kind() { - case reflect.Ptr, reflect.Interface: + case reflect.Ptr: + if rv.IsNil() { + e.e.EncodeNil() + return + } + rv = rv.Elem() + if e.h.CheckCircularRef && rv.Kind() == reflect.Struct { + // TODO: Movable pointers will be an issue here. Future problem. + sptr = rv.UnsafeAddr() + break TOP + } + goto TOP + case reflect.Interface: if rv.IsNil() { e.e.EncodeNil() return @@ -1163,18 +1201,40 @@ TOP: return } - return rv, true + proceed = true + rv2 = rv + return +} + +func (e *Encoder) doEncodeValue(rv reflect.Value, fn *encFn, sptr uintptr, + checkFastpath, checkCodecSelfer bool) { + if sptr != 0 { + if (&e.ci).add(sptr) { + e.errorf("circular reference found: # %d", sptr) + } + } + if fn == nil { + rt := rv.Type() + rtid := reflect.ValueOf(rt).Pointer() + // fn = e.getEncFn(rtid, rt, true, true) + fn = e.getEncFn(rtid, rt, checkFastpath, checkCodecSelfer) + } + fn.f(&fn.i, rv) + if sptr != 0 { + (&e.ci).remove(sptr) + } +} + +func (e *Encoder) encodeI(iv interface{}, checkFastpath, checkCodecSelfer bool) { + if rv, sptr, proceed := e.preEncodeValue(reflect.ValueOf(iv)); proceed { + e.doEncodeValue(rv, nil, sptr, checkFastpath, checkCodecSelfer) + } } func (e *Encoder) encodeValue(rv reflect.Value, fn *encFn) { // if a valid fn is passed, it MUST BE for the dereferenced type of rv - if rv, proceed := e.preEncodeValue(rv); proceed { - if fn == nil { - rt := rv.Type() - rtid := reflect.ValueOf(rt).Pointer() - fn = e.getEncFn(rtid, rt, true, true) - } - fn.f(&fn.i, rv) + if rv, sptr, proceed := e.preEncodeValue(rv); proceed { + e.doEncodeValue(rv, fn, sptr, true, true) } } @@ -1217,6 +1277,8 @@ func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCo if checkCodecSelfer && ti.cs { fn.f = (*encFnInfo).selferMarshal + } else if rtid == rawTypId { + fn.f = (*encFnInfo).raw } else if rtid == rawExtTypId { fn.f = (*encFnInfo).rawExt } else if e.e.IsBuiltinType(rtid) { @@ -1234,7 +1296,7 @@ func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCo } else { rk := rt.Kind() if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { - if rt.PkgPath() == "" { + if rt.PkgPath() == "" { // un-named slice or map if idx := fastpathAV.index(rtid); idx != -1 { fn.f = fastpathAV[idx].encfn } @@ -1284,10 +1346,11 @@ func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCo fn.f = (*encFnInfo).kSlice case reflect.Struct: fn.f = (*encFnInfo).kStruct + // reflect.Ptr and reflect.Interface are handled already by preEncodeValue // case reflect.Ptr: // fn.f = (*encFnInfo).kPtr - case reflect.Interface: - fn.f = (*encFnInfo).kInterface + // case reflect.Interface: + // fn.f = (*encFnInfo).kInterface case reflect.Map: fn.f = (*encFnInfo).kMap default: @@ -1320,6 +1383,18 @@ func (e *Encoder) asis(v []byte) { } } +func (e *Encoder) raw(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + e.errorf("Raw values cannot be encoded: %v", v) + } + if e.as == nil { + e.w.writeb(v) + } else { + e.as.EncodeAsis(v) + } +} + func (e *Encoder) errorf(format string, params ...interface{}) { err := fmt.Errorf(format, params...) panic(err) @@ -1353,25 +1428,6 @@ func encStructPoolGet(newlen int) (p *sync.Pool, v interface{}, s []stringRv) { // panic(errors.New("encStructPoolLen must be equal to 4")) // defensive, in case it is changed // } // idxpool := newlen / 8 - - // if pool == nil { - // fkvs = make([]stringRv, newlen) - // } else { - // poolv = pool.Get() - // switch vv := poolv.(type) { - // case *[8]stringRv: - // fkvs = vv[:newlen] - // case *[16]stringRv: - // fkvs = vv[:newlen] - // case *[32]stringRv: - // fkvs = vv[:newlen] - // case *[64]stringRv: - // fkvs = vv[:newlen] - // case *[128]stringRv: - // fkvs = vv[:newlen] - // } - // } - if newlen <= 8 { p = &encStructPool[0] v = p.Get() diff --git a/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/vendor/github.com/ugorji/go/codec/fast-path.generated.go index d968a500fd..f2e5d2dcf6 100644 --- a/vendor/github.com/ugorji/go/codec/fast-path.generated.go +++ b/vendor/github.com/ugorji/go/codec/fast-path.generated.go @@ -23,7 +23,7 @@ package codec // Currently support // - slice of all builtin types, // - map of all builtin types to string or interface value -// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8) +// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) // This should provide adequate "typical" implementations. // // Note that fast track decode functions must handle values for which an address cannot be obtained. @@ -38,6 +38,8 @@ import ( "sort" ) +const fastpathEnabled = true + const fastpathCheckNilFalse = false // for reflect const fastpathCheckNilTrue = true // for type switch @@ -81,9 +83,6 @@ var fastpathAV fastpathA // due to possible initialization loop error, make fastpath in an init() func init() { - if !fastpathEnabled { - return - } i := 0 fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) { xrt := reflect.TypeOf(v) @@ -373,9 +372,6 @@ func init() { // -- -- fast path type switch func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } switch v := iv.(type) { case []interface{}: @@ -1741,9 +1737,6 @@ func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { } func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } switch v := iv.(type) { case []interface{}: @@ -1829,9 +1822,6 @@ func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { } func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } switch v := iv.(type) { case map[interface{}]interface{}: @@ -3124,7 +3114,11 @@ func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { // -- -- fast path functions func (f *encFnInfo) fastpathEncSliceIntfR(rv reflect.Value) { - fastpathTV.EncSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceIntfV(v []interface{}, checkNil bool, e *Encoder) { ee := e.e @@ -3145,8 +3139,39 @@ func (_ fastpathT) EncSliceIntfV(v []interface{}, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + e.encode(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceStringR(rv reflect.Value) { - fastpathTV.EncSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceStringV(v []string, checkNil bool, e *Encoder) { ee := e.e @@ -3167,8 +3192,39 @@ func (_ fastpathT) EncSliceStringV(v []string, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceStringV(v []string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeString(c_UTF8, v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceFloat32R(rv reflect.Value) { - fastpathTV.EncSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceFloat32V(v []float32, checkNil bool, e *Encoder) { ee := e.e @@ -3189,8 +3245,39 @@ func (_ fastpathT) EncSliceFloat32V(v []float32, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeFloat32(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceFloat64R(rv reflect.Value) { - fastpathTV.EncSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceFloat64V(v []float64, checkNil bool, e *Encoder) { ee := e.e @@ -3211,8 +3298,39 @@ func (_ fastpathT) EncSliceFloat64V(v []float64, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeFloat64(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceUintR(rv reflect.Value) { - fastpathTV.EncSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceUintV(v []uint, checkNil bool, e *Encoder) { ee := e.e @@ -3233,8 +3351,39 @@ func (_ fastpathT) EncSliceUintV(v []uint, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceUintV(v []uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceUint16R(rv reflect.Value) { - fastpathTV.EncSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceUint16V(v []uint16, checkNil bool, e *Encoder) { ee := e.e @@ -3255,8 +3404,39 @@ func (_ fastpathT) EncSliceUint16V(v []uint16, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceUint32R(rv reflect.Value) { - fastpathTV.EncSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceUint32V(v []uint32, checkNil bool, e *Encoder) { ee := e.e @@ -3277,8 +3457,39 @@ func (_ fastpathT) EncSliceUint32V(v []uint32, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceUint64R(rv reflect.Value) { - fastpathTV.EncSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceUint64V(v []uint64, checkNil bool, e *Encoder) { ee := e.e @@ -3299,8 +3510,39 @@ func (_ fastpathT) EncSliceUint64V(v []uint64, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceUintptrR(rv reflect.Value) { - fastpathTV.EncSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) { ee := e.e @@ -3321,8 +3563,39 @@ func (_ fastpathT) EncSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + e.encode(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceIntR(rv reflect.Value) { - fastpathTV.EncSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceIntV(v []int, checkNil bool, e *Encoder) { ee := e.e @@ -3343,8 +3616,39 @@ func (_ fastpathT) EncSliceIntV(v []int, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceIntV(v []int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceInt8R(rv reflect.Value) { - fastpathTV.EncSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceInt8V(v []int8, checkNil bool, e *Encoder) { ee := e.e @@ -3365,8 +3669,39 @@ func (_ fastpathT) EncSliceInt8V(v []int8, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceInt8V(v []int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceInt16R(rv reflect.Value) { - fastpathTV.EncSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceInt16V(v []int16, checkNil bool, e *Encoder) { ee := e.e @@ -3387,8 +3722,39 @@ func (_ fastpathT) EncSliceInt16V(v []int16, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceInt16V(v []int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceInt32R(rv reflect.Value) { - fastpathTV.EncSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceInt32V(v []int32, checkNil bool, e *Encoder) { ee := e.e @@ -3409,8 +3775,39 @@ func (_ fastpathT) EncSliceInt32V(v []int32, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceInt32V(v []int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceInt64R(rv reflect.Value) { - fastpathTV.EncSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceInt64V(v []int64, checkNil bool, e *Encoder) { ee := e.e @@ -3431,8 +3828,39 @@ func (_ fastpathT) EncSliceInt64V(v []int64, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceInt64V(v []int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceBoolR(rv reflect.Value) { - fastpathTV.EncSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceBoolV(v []bool, checkNil bool, e *Encoder) { ee := e.e @@ -3453,6 +3881,33 @@ func (_ fastpathT) EncSliceBoolV(v []bool, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceBoolV(v []bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeBool(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncMapIntfIntfR(rv reflect.Value) { fastpathTV.EncMapIntfIntfV(rv.Interface().(map[interface{}]interface{}), fastpathCheckNilFalse, f.e) } @@ -15489,9 +15944,6 @@ func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, checkNil bool, e *Encoder) { // -- -- fast path type switch func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { - if !fastpathEnabled { - return false - } switch v := iv.(type) { case []interface{}: @@ -17712,7 +18164,7 @@ func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool, changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -17771,7 +18223,7 @@ func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool, changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]interface{}, 1, 4) @@ -17846,7 +18298,7 @@ func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool, d changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -17905,7 +18357,7 @@ func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool, d changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]string, 1, 4) @@ -17979,7 +18431,7 @@ func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool, changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -18038,7 +18490,7 @@ func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool, changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]float32, 1, 4) @@ -18112,7 +18564,7 @@ func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool, changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -18171,7 +18623,7 @@ func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool, changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]float64, 1, 4) @@ -18245,7 +18697,7 @@ func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool, d *Dec changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -18304,7 +18756,7 @@ func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool, d *Dec changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]uint, 1, 4) @@ -18378,7 +18830,7 @@ func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool, d changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -18437,7 +18889,7 @@ func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool, d changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]uint16, 1, 4) @@ -18511,7 +18963,7 @@ func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool, d changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -18570,7 +19022,7 @@ func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool, d changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]uint32, 1, 4) @@ -18644,7 +19096,7 @@ func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool, d changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -18703,7 +19155,7 @@ func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool, d changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]uint64, 1, 4) @@ -18777,7 +19229,7 @@ func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool, changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -18836,7 +19288,7 @@ func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool, changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]uintptr, 1, 4) @@ -18910,7 +19362,7 @@ func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool, d *Decod changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -18969,7 +19421,7 @@ func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool, d *Decod changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]int, 1, 4) @@ -19043,7 +19495,7 @@ func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool, d *Dec changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -19102,7 +19554,7 @@ func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool, d *Dec changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]int8, 1, 4) @@ -19176,7 +19628,7 @@ func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool, d *D changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -19235,7 +19687,7 @@ func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool, d *D changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]int16, 1, 4) @@ -19309,7 +19761,7 @@ func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool, d *D changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -19368,7 +19820,7 @@ func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool, d *D changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]int32, 1, 4) @@ -19442,7 +19894,7 @@ func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool, d *D changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -19501,7 +19953,7 @@ func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool, d *D changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]int64, 1, 4) @@ -19575,7 +20027,7 @@ func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool, d *Dec changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -19634,7 +20086,7 @@ func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool, d *Dec changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]bool, 1, 4) diff --git a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl index 58cc6df4c5..c3ffdf93d9 100644 --- a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl +++ b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl @@ -23,7 +23,7 @@ package codec // Currently support // - slice of all builtin types, // - map of all builtin types to string or interface value -// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8) +// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) // This should provide adequate "typical" implementations. // // Note that fast track decode functions must handle values for which an address cannot be obtained. @@ -38,6 +38,8 @@ import ( "sort" ) +const fastpathEnabled = true + const fastpathCheckNilFalse = false // for reflect const fastpathCheckNilTrue = true // for type switch @@ -81,9 +83,6 @@ var fastpathAV fastpathA // due to possible initialization loop error, make fastpath in an init() func init() { - if !fastpathEnabled { - return - } i := 0 fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) { xrt := reflect.TypeOf(v) @@ -106,9 +105,6 @@ func init() { // -- -- fast path type switch func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } switch v := iv.(type) { {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} case []{{ .Elem }}:{{else}} @@ -126,9 +122,6 @@ func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { } func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } switch v := iv.(type) { {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} case []{{ .Elem }}: @@ -144,9 +137,6 @@ func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { } func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } switch v := iv.(type) { {{range .Values}}{{if not .Primitive}}{{if .MapKey }} case map[{{ .MapKey }}]{{ .Elem }}: @@ -165,7 +155,11 @@ func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) { ee := e.e @@ -182,6 +176,31 @@ func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, checkNil b if cr != nil { cr.sendContainerState(containerArrayEnd) }{{/* ee.EncodeEnd() */}} } +func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + {{ encmd .Elem "v2"}} + } + if cr != nil { cr.sendContainerState(containerMapEnd) } +} + {{end}}{{end}}{{end}} {{range .Values}}{{if not .Primitive}}{{if .MapKey }} @@ -257,9 +276,6 @@ func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Ele // -- -- fast path type switch func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { - if !fastpathEnabled { - return false - } switch v := iv.(type) { {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} case []{{ .Elem }}:{{else}} @@ -328,7 +344,7 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil b changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -391,7 +407,7 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil b changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]{{ .Elem }}, 1, 4) diff --git a/vendor/github.com/ugorji/go/codec/fast-path.not.go b/vendor/github.com/ugorji/go/codec/fast-path.not.go index d6f5f0c911..63e5911452 100644 --- a/vendor/github.com/ugorji/go/codec/fast-path.not.go +++ b/vendor/github.com/ugorji/go/codec/fast-path.not.go @@ -4,6 +4,8 @@ package codec import "reflect" +const fastpathEnabled = false + // The generated fast-path code is very large, and adds a few seconds to the build time. // This causes test execution, execution of small tools which use codec, etc // to take a long time. diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl index 2caae5bfda..32df54144c 100644 --- a/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl +++ b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl @@ -1,6 +1,7 @@ {{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} -{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} var {{var "c"}} bool {{/* // changed */}} +_ = {{var "c"}}{{end}} if {{var "l"}} == 0 { {{if isSlice }}if {{var "v"}} == nil { {{var "v"}} = []{{ .Typ }}{} @@ -26,6 +27,8 @@ if {{var "l"}} == 0 { } {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}} var {{var "rt"}} bool {{/* truncated */}} + _, _ = {{var "rl"}}, {{var "rt"}} + {{var "rr"}} = {{var "l"}} // len({{var "v"}}) if {{var "l"}} > cap({{var "v"}}) { {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}}) {{ else }}{{if not .Immutable }} diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go index 22bce776bb..eb0bdad357 100644 --- a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go +++ b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go @@ -1,4 +1,4 @@ -// //+build ignore +/* // +build ignore */ // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. @@ -17,7 +17,7 @@ import ( // This file is used to generate helper code for codecgen. // The values here i.e. genHelper(En|De)coder are not to be used directly by -// library users. They WILL change continously and without notice. +// library users. They WILL change continuously and without notice. // // To help enforce this, we create an unexported type with exported members. // The only way to get the type is via the one exported type that we control (somewhat). @@ -83,6 +83,11 @@ func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { f.e.marshal(bs, fnerr, false, c_RAW) } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncRaw(iv Raw) { + f.e.raw(iv) +} + // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperEncoder) TimeRtidIfBinc() uintptr { if _, ok := f.e.hh.(*BincHandle); ok { @@ -191,6 +196,11 @@ func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { } } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecRaw() []byte { + return f.d.raw() +} + // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperDecoder) TimeRtidIfBinc() uintptr { if _, ok := f.d.hh.(*BincHandle); ok { diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl index 31958574ff..ad99f6671b 100644 --- a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl +++ b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl @@ -1,4 +1,4 @@ -// //+build ignore +/* // +build ignore */ // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. @@ -17,7 +17,7 @@ import ( // This file is used to generate helper code for codecgen. // The values here i.e. genHelper(En|De)coder are not to be used directly by -// library users. They WILL change continously and without notice. +// library users. They WILL change continuously and without notice. // // To help enforce this, we create an unexported type with exported members. // The only way to get the type is via the one exported type that we control (somewhat). @@ -79,6 +79,10 @@ func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { f.e.marshal(bs, fnerr, false, c_RAW) } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncRaw(iv Raw) { + f.e.raw(iv) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperEncoder) TimeRtidIfBinc() uintptr { if _, ok := f.e.hh.(*BincHandle); ok { return timeTypId @@ -172,6 +176,10 @@ func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { } } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecRaw() []byte { + return f.d.raw() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperDecoder) TimeRtidIfBinc() uintptr { if _, ok := f.d.hh.(*BincHandle); ok { return timeTypId diff --git a/vendor/github.com/ugorji/go/codec/gen.generated.go b/vendor/github.com/ugorji/go/codec/gen.generated.go index fb6f4b8097..2ace97b78c 100644 --- a/vendor/github.com/ugorji/go/codec/gen.generated.go +++ b/vendor/github.com/ugorji/go/codec/gen.generated.go @@ -68,8 +68,9 @@ z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) const genDecListTmpl = ` {{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} -{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} var {{var "c"}} bool {{/* // changed */}} +_ = {{var "c"}}{{end}} if {{var "l"}} == 0 { {{if isSlice }}if {{var "v"}} == nil { {{var "v"}} = []{{ .Typ }}{} @@ -95,6 +96,8 @@ if {{var "l"}} == 0 { } {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}} var {{var "rt"}} bool {{/* truncated */}} + _, _ = {{var "rl"}}, {{var "rt"}} + {{var "rr"}} = {{var "l"}} // len({{var "v"}}) if {{var "l"}} > cap({{var "v"}}) { {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}}) {{ else }}{{if not .Immutable }} diff --git a/vendor/github.com/ugorji/go/codec/gen.go b/vendor/github.com/ugorji/go/codec/gen.go index a075e7c0d6..c4944dbff3 100644 --- a/vendor/github.com/ugorji/go/codec/gen.go +++ b/vendor/github.com/ugorji/go/codec/gen.go @@ -12,7 +12,6 @@ import ( "io" "io/ioutil" "math/rand" - "os" "reflect" "regexp" "sort" @@ -21,11 +20,14 @@ import ( "sync" "text/template" "time" + "unicode" + "unicode/utf8" ) // --------------------------------------------------- // codecgen supports the full cycle of reflection-based codec: // - RawExt +// - Raw // - Builtins // - Extensions // - (Binary|Text|JSON)(Unm|M)arshal @@ -76,7 +78,7 @@ import ( // codecgen will panic if the file was generated with an old version of the library in use. // // Note: -// It was a concious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. +// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. // This way, there isn't a function call overhead just to see that we should not enter a block of code. // GenVersion is the current version of codecgen. @@ -124,6 +126,7 @@ var ( genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice") genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__") genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) + genCheckVendor bool ) // genRunner holds some state used during a Gen run. @@ -162,6 +165,10 @@ type genRunner struct { // // Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.* func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) { + // All types passed to this method do not have a codec.Selfer method implemented directly. + // codecgen already checks the AST and skips any types that define the codec.Selfer methods. + // Consequently, there's no need to check and trim them if they implement codec.Selfer + if len(typ) == 0 { return } @@ -199,7 +206,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn x.genRefPkgs(t) } if buildTags != "" { - x.line("//+build " + buildTags) + x.line("// +build " + buildTags) x.line("") } x.line(` @@ -266,6 +273,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn x.line("type " + x.hn + " struct{}") x.line("") + x.varsfxreset() x.line("func init() {") x.linef("if %sGenVersion != %v {", x.cpfx, GenVersion) x.line("_, file, _, _ := runtime.Caller(0)") @@ -309,6 +317,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn for _, t := range x.ts { rtid := reflect.ValueOf(t).Pointer() // generate enc functions for all these slice/map types. + x.varsfxreset() x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx) x.genRequiredMethodVars(true) switch t.Kind() { @@ -323,6 +332,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn x.line("") // generate dec functions for all these slice/map types. + x.varsfxreset() x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx) x.genRequiredMethodVars(false) switch t.Kind() { @@ -377,7 +387,7 @@ func (x *genRunner) genRefPkgs(t reflect.Type) { x.imn[tpkg] = tpkg } else { x.imc++ - x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + tpkg[idx+1:] + x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false) } } } @@ -408,6 +418,10 @@ func (x *genRunner) varsfx() string { return strconv.FormatUint(x.c, 10) } +func (x *genRunner) varsfxreset() { + x.c = 0 +} + func (x *genRunner) out(s string) { if _, err := io.WriteString(x.w, s); err != nil { panic(err) @@ -494,6 +508,7 @@ func (x *genRunner) selfer(encode bool) { // always make decode use a pointer receiver, // and structs always use a ptr receiver (encode|decode) isptr := !encode || t.Kind() == reflect.Struct + x.varsfxreset() fnSigPfx := "func (x " if isptr { fnSigPfx += "*" @@ -566,9 +581,28 @@ func (x *genRunner) xtraSM(varname string, encode bool, t reflect.Type) { } else { x.linef("h.dec%s((*%s)(%s), d)", x.genMethodNameT(t), x.genTypeName(t), varname) } - if _, ok := x.tm[t]; !ok { - x.tm[t] = struct{}{} - x.ts = append(x.ts, t) + x.registerXtraT(t) +} + +func (x *genRunner) registerXtraT(t reflect.Type) { + // recursively register the types + if _, ok := x.tm[t]; ok { + return + } + var tkey reflect.Type + switch t.Kind() { + case reflect.Chan, reflect.Slice, reflect.Array: + case reflect.Map: + tkey = t.Key() + default: + return + } + x.tm[t] = struct{}{} + x.ts = append(x.ts, t) + // check if this refers to any xtra types eg. a slice of array: add the array + x.registerXtraT(t.Elem()) + if tkey != nil { + x.registerXtraT(tkey) } } @@ -608,22 +642,33 @@ func (x *genRunner) encVar(varname string, t reflect.Type) { } -// enc will encode a variable (varname) of type T, -// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type *T (to prevent copying) +// enc will encode a variable (varname) of type t, +// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type ptrTo(T) (to prevent copying) func (x *genRunner) enc(varname string, t reflect.Type) { - // varName here must be to a pointer to a struct/array, or to a value directly. rtid := reflect.ValueOf(t).Pointer() // We call CodecEncodeSelf if one of the following are honored: // - the type already implements Selfer, call that // - the type has a Selfer implementation just created, use that // - the type is in the list of the ones we will generate for, but it is not currently being generated + mi := x.varsfx() tptr := reflect.PtrTo(t) tk := t.Kind() if x.checkForSelfer(t, varname) { - if t.Implements(selferTyp) || (tptr.Implements(selferTyp) && (tk == reflect.Array || tk == reflect.Struct)) { - x.line(varname + ".CodecEncodeSelf(e)") - return + if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T + if tptr.Implements(selferTyp) || t.Implements(selferTyp) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } else { // varname is of type T + if t.Implements(selferTyp) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } else if tptr.Implements(selferTyp) { + x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname) + x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi) + return + } } if _, ok := x.te[rtid]; ok { @@ -651,14 +696,17 @@ func (x *genRunner) enc(varname string, t reflect.Type) { } // check if - // - type is RawExt + // - type is RawExt, Raw // - the type implements (Text|JSON|Binary)(Unm|M)arshal - mi := x.varsfx() x.linef("%sm%s := z.EncBinary()", genTempVarPfx, mi) x.linef("_ = %sm%s", genTempVarPfx, mi) x.line("if false {") //start if block defer func() { x.line("}") }() //end if block + if t == rawTyp { + x.linef("} else { z.EncRaw(%v)", varname) + return + } if t == rawExtTyp { x.linef("} else { r.EncodeRawExt(%v, e)", varname) return @@ -676,15 +724,31 @@ func (x *genRunner) enc(varname string, t reflect.Type) { // first check if extensions are configued, before doing the interface conversion x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname) } - if t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) { - x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) - } - if t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) { - x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) - } else if t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) { - x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) + if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T + if t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) + } + if t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) + } else if t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) { + x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) + } + } else { // varname is of type T + if t.Implements(binaryMarshalerTyp) { + x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) + } else if tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if %sm%s { z.EncBinaryMarshal(&%v) ", genTempVarPfx, mi, varname) + } + if t.Implements(jsonMarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) + } else if tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", genTempVarPfx, mi, varname) + } else if t.Implements(textMarshalerTyp) { + x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) + } else if tptr.Implements(textMarshalerTyp) { + x.linef("} else if !%sm%s { z.EncTextMarshal(&%v) ", genTempVarPfx, mi, varname) + } } - x.line("} else {") switch t.Kind() { @@ -922,6 +986,14 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { } func (x *genRunner) encListFallback(varname string, t reflect.Type) { + if t.AssignableTo(uint8SliceTyp) { + x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, []byte(%s))", x.xs, varname) + return + } + if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { + x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, ([%v]byte(%s))[:])", x.xs, t.Len(), varname) + return + } i := x.varsfx() g := genTempVarPfx x.line("r.EncodeArrayStart(len(" + varname + "))") @@ -1020,6 +1092,8 @@ func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) { } } +// dec will decode a variable (varname) of type ptrTo(t). +// t is always a basetype (i.e. not of kind reflect.Ptr). func (x *genRunner) dec(varname string, t reflect.Type) { // assumptions: // - the varname is to a pointer already. No need to take address of it @@ -1056,7 +1130,7 @@ func (x *genRunner) dec(varname string, t reflect.Type) { } // check if - // - type is RawExt + // - type is Raw, RawExt // - the type implements (Text|JSON|Binary)(Unm|M)arshal mi := x.varsfx() x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi) @@ -1064,6 +1138,10 @@ func (x *genRunner) dec(varname string, t reflect.Type) { x.line("if false {") //start if block defer func() { x.line("}") }() //end if block + if t == rawTyp { + x.linef("} else { *%v = z.DecRaw()", varname) + return + } if t == rawExtTyp { x.linef("} else { r.DecodeExt(%v, 0, nil)", varname) return @@ -1189,59 +1267,49 @@ func (x *genRunner) dec(varname string, t reflect.Type) { } func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAsPtr bool) { - // We have to use the actual type name when doing a direct assignment. - // We don't have the luxury of casting the pointer to the underlying type. - // - // Consequently, in the situation of a - // type Message int32 - // var x Message - // var i int32 = 32 - // x = i // this will bomb - // x = Message(i) // this will work - // *((*int32)(&x)) = i // this will work - // - // Consequently, we replace: - // case reflect.Uint32: x.line(varname + " = uint32(r.DecodeUint(32))") - // with: - // case reflect.Uint32: x.line(varname + " = " + genTypeNamePrim(t, x.tc) + "(r.DecodeUint(32))") + // This should only be used for exact primitives (ie un-named types). + // Named types may be implementations of Selfer, Unmarshaler, etc. + // They should be handled by dec(...) - xfn := func(t reflect.Type) string { - return x.genTypeNamePrim(t) + if t.Name() != "" { + tryAsPtr = true + return } + switch t.Kind() { case reflect.Int: - x.linef("%s = %s(r.DecodeInt(codecSelferBitsize%s))", varname, xfn(t), x.xs) + x.linef("%s = r.DecodeInt(codecSelferBitsize%s)", varname, x.xs) case reflect.Int8: - x.linef("%s = %s(r.DecodeInt(8))", varname, xfn(t)) + x.linef("%s = r.DecodeInt(8)", varname) case reflect.Int16: - x.linef("%s = %s(r.DecodeInt(16))", varname, xfn(t)) + x.linef("%s = r.DecodeInt(16)", varname) case reflect.Int32: - x.linef("%s = %s(r.DecodeInt(32))", varname, xfn(t)) + x.linef("%s = r.DecodeInt(32)", varname) case reflect.Int64: - x.linef("%s = %s(r.DecodeInt(64))", varname, xfn(t)) + x.linef("%s = r.DecodeInt(64)", varname) case reflect.Uint: - x.linef("%s = %s(r.DecodeUint(codecSelferBitsize%s))", varname, xfn(t), x.xs) + x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs) case reflect.Uint8: - x.linef("%s = %s(r.DecodeUint(8))", varname, xfn(t)) + x.linef("%s = r.DecodeUint(8)", varname) case reflect.Uint16: - x.linef("%s = %s(r.DecodeUint(16))", varname, xfn(t)) + x.linef("%s = r.DecodeUint(16)", varname) case reflect.Uint32: - x.linef("%s = %s(r.DecodeUint(32))", varname, xfn(t)) + x.linef("%s = r.DecodeUint(32)", varname) case reflect.Uint64: - x.linef("%s = %s(r.DecodeUint(64))", varname, xfn(t)) + x.linef("%s = r.DecodeUint(64)", varname) case reflect.Uintptr: - x.linef("%s = %s(r.DecodeUint(codecSelferBitsize%s))", varname, xfn(t), x.xs) + x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs) case reflect.Float32: - x.linef("%s = %s(r.DecodeFloat(true))", varname, xfn(t)) + x.linef("%s = r.DecodeFloat(true)", varname) case reflect.Float64: - x.linef("%s = %s(r.DecodeFloat(false))", varname, xfn(t)) + x.linef("%s = r.DecodeFloat(false)", varname) case reflect.Bool: - x.linef("%s = %s(r.DecodeBool())", varname, xfn(t)) + x.linef("%s = r.DecodeBool()", varname) case reflect.String: - x.linef("%s = %s(r.DecodeString())", varname, xfn(t)) + x.linef("%s = r.DecodeString()", varname) default: tryAsPtr = true } @@ -1249,6 +1317,14 @@ func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAs } func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) { + if t.AssignableTo(uint8SliceTyp) { + x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false, false)") + return + } + if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { + x.linef("r.DecodeBytes( ((*[%s]byte)(%s))[:], false, true)", t.Len(), varname) + return + } type tstruc struct { TempVar string Rand string @@ -1364,7 +1440,7 @@ func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintpt if si.i != -1 { t2 = t.Field(int(si.i)) } else { - //we must accomodate anonymous fields, where the embedded field is a nil pointer in the value. + //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. // t2 = t.FieldByIndex(si.is) t2typ := t varname3 := varname @@ -1452,7 +1528,7 @@ func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid if si.i != -1 { t2 = t.Field(int(si.i)) } else { - //we must accomodate anonymous fields, where the embedded field is a nil pointer in the value. + //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. // t2 = t.FieldByIndex(si.is) t2typ := t varname3 := varname @@ -1569,8 +1645,6 @@ func (x *genV) MethodNamePfx(prefix string, prim bool) string { } -var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" - // genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise. // // This handles the misbehaviour that occurs when 1.5-style vendoring is enabled, @@ -1592,6 +1666,26 @@ func genImportPath(t reflect.Type) (s string) { return } +// A go identifier is (letter|_)[letter|number|_]* +func genGoIdentifier(s string, checkFirstChar bool) string { + b := make([]byte, 0, len(s)) + t := make([]byte, 4) + var n int + for i, r := range s { + if checkFirstChar && i == 0 && !unicode.IsLetter(r) { + b = append(b, '_') + } + // r must be unicode_letter, unicode_digit or _ + if unicode.IsLetter(r) || unicode.IsDigit(r) { + n = utf8.EncodeRune(t, r) + b = append(b, t[:n]...) + } else { + b = append(b, '_') + } + } + return string(b) +} + func genNonPtr(t reflect.Type) reflect.Type { for t.Kind() == reflect.Ptr { t = t.Elem() @@ -1601,7 +1695,7 @@ func genNonPtr(t reflect.Type) reflect.Type { func genTitleCaseName(s string) string { switch s { - case "interface{}": + case "interface{}", "interface {}": return "Intf" default: return strings.ToUpper(s[0:1]) + s[1:] @@ -1704,7 +1798,7 @@ func (x genInternal) FastpathLen() (l int) { func genInternalZeroValue(s string) string { switch s { - case "interface{}": + case "interface{}", "interface {}": return "nil" case "bool": return "false" @@ -1856,7 +1950,7 @@ func genInternalInit() { } var gt genInternal - // For each slice or map type, there must be a (symetrical) Encode and Decode fast-path function + // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function for _, s := range types { gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]}) if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already. diff --git a/vendor/github.com/ugorji/go/codec/gen_15.go b/vendor/github.com/ugorji/go/codec/gen_15.go new file mode 100644 index 0000000000..ab76c31027 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen_15.go @@ -0,0 +1,12 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.5,!go1.6 + +package codec + +import "os" + +func init() { + genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" +} diff --git a/vendor/github.com/ugorji/go/codec/gen_16.go b/vendor/github.com/ugorji/go/codec/gen_16.go new file mode 100644 index 0000000000..87c04e2e18 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen_16.go @@ -0,0 +1,12 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.6 + +package codec + +import "os" + +func init() { + genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0" +} diff --git a/vendor/github.com/ugorji/go/codec/gen_17.go b/vendor/github.com/ugorji/go/codec/gen_17.go new file mode 100644 index 0000000000..3881a43ce6 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen_17.go @@ -0,0 +1,10 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.7 + +package codec + +func init() { + genCheckVendor = true +} diff --git a/vendor/github.com/ugorji/go/codec/helper.go b/vendor/github.com/ugorji/go/codec/helper.go index 560014ae39..8b94fc1e4d 100644 --- a/vendor/github.com/ugorji/go/codec/helper.go +++ b/vendor/github.com/ugorji/go/codec/helper.go @@ -38,10 +38,6 @@ package codec // a length prefix, or if it used explicit breaks. If length-prefixed, we assume that // it has to be binary, and we do not even try to read separators. // -// The only codec that may suffer (slightly) is cbor, and only when decoding indefinite-length. -// It may suffer because we treat it like a text-based codec, and read separators. -// However, this read is a no-op and the cost is insignificant. -// // Philosophy // ------------ // On decode, this codec will update containers appropriately: @@ -137,17 +133,6 @@ const ( // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. recoverPanicToErr = true - // Fast path functions try to create a fast path encode or decode implementation - // for common maps and slices, by by-passing reflection altogether. - fastpathEnabled = true - - // if checkStructForEmptyValue, check structs fields to see if an empty value. - // This could be an expensive call, so possibly disable it. - checkStructForEmptyValue = false - - // if derefForIsEmptyValue, deref pointers and interfaces when checking isEmptyValue - derefForIsEmptyValue = false - // if resetSliceElemToZeroValue, then on decoding a slice, reset the element to a zero value first. // Only concern is that, if the slice already contained some garbage, we will decode into that garbage. // The chances of this are slim, so leave this "optimization". @@ -155,8 +140,10 @@ const ( resetSliceElemToZeroValue bool = false ) -var oneByteArr = [1]byte{0} -var zeroByteSlice = oneByteArr[:0:0] +var ( + oneByteArr = [1]byte{0} + zeroByteSlice = oneByteArr[:0:0] +) type charEncoding uint8 @@ -215,6 +202,41 @@ const ( containerArrayEnd ) +// sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo +type sfiIdx struct { + name string + index int +} + +// do not recurse if a containing type refers to an embedded type +// which refers back to its containing type (via a pointer). +// The second time this back-reference happens, break out, +// so as not to cause an infinite loop. +const rgetMaxRecursion = 2 + +// Anecdotally, we believe most types have <= 12 fields. +// Java's PMD rules set TooManyFields threshold to 15. +const rgetPoolTArrayLen = 12 + +type rgetT struct { + fNames []string + encNames []string + etypes []uintptr + sfis []*structFieldInfo +} + +type rgetPoolT struct { + fNames [rgetPoolTArrayLen]string + encNames [rgetPoolTArrayLen]string + etypes [rgetPoolTArrayLen]uintptr + sfis [rgetPoolTArrayLen]*structFieldInfo + sfiidx [rgetPoolTArrayLen]sfiIdx +} + +var rgetPool = sync.Pool{ + New: func() interface{} { return new(rgetPoolT) }, +} + type containerStateRecv interface { sendContainerState(containerState) } @@ -240,6 +262,7 @@ var ( stringTyp = reflect.TypeOf("") timeTyp = reflect.TypeOf(time.Time{}) rawExtTyp = reflect.TypeOf(RawExt{}) + rawTyp = reflect.TypeOf(Raw{}) uint8SliceTyp = reflect.TypeOf([]uint8(nil)) mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() @@ -257,6 +280,7 @@ var ( uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() + rawTypId = reflect.ValueOf(rawTyp).Pointer() intfTypId = reflect.ValueOf(intfTyp).Pointer() timeTypId = reflect.ValueOf(timeTyp).Pointer() stringTypId = reflect.ValueOf(stringTyp).Pointer() @@ -337,6 +361,11 @@ type Handle interface { isBinary() bool } +// Raw represents raw formatted bytes. +// We "blindly" store it during encode and store the raw bytes during decode. +// Note: it is dangerous during encode, so we may gate the behaviour behind an Encode flag which must be explicitly set. +type Raw []byte + // RawExt represents raw unprocessed extension data. // Some codecs will decode extension data as a *RawExt if there is no registered extension for the tag. // @@ -347,7 +376,7 @@ type RawExt struct { // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types Data []byte // Value represents the extension, if Data is nil. - // Value is used by codecs (e.g. cbor) which use the format to do custom serialization of the types. + // Value is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types. Value interface{} } @@ -525,7 +554,7 @@ func (o *extHandle) AddExt( func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { // o is a pointer, because we may need to initialize it if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { - err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T", + err = fmt.Errorf("codec.Handle.AddExt: Takes named type, not a pointer or interface: %T", reflect.Zero(rt).Interface()) return } @@ -568,7 +597,8 @@ func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn { } type structFieldInfo struct { - encName string // encode name + encName string // encode name + fieldName string // field name // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. @@ -714,6 +744,7 @@ type typeInfo struct { } func (ti *typeInfo) indexForEncName(name string) int { + // NOTE: name may be a stringView, so don't pass it to another function. //tisfi := ti.sfi const binarySearchThreshold = 16 if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { @@ -828,19 +859,19 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { } if rt.Kind() == reflect.Struct { - var siInfo *structFieldInfo + var omitEmpty bool if f, ok := rt.FieldByName(structInfoFieldName); ok { - siInfo = parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag)) + siInfo := parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag)) ti.toArray = siInfo.toArray + omitEmpty = siInfo.omitEmpty } - sfip := make([]*structFieldInfo, 0, rt.NumField()) - x.rget(rt, nil, make(map[string]bool, 16), &sfip, siInfo) - - ti.sfip = make([]*structFieldInfo, len(sfip)) - ti.sfi = make([]*structFieldInfo, len(sfip)) - copy(ti.sfip, sfip) - sort.Sort(sfiSortedByEncName(sfip)) - copy(ti.sfi, sfip) + pi := rgetPool.Get() + pv := pi.(*rgetPoolT) + pv.etypes[0] = ti.baseId + vv := rgetT{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]} + x.rget(rt, rtid, omitEmpty, nil, &vv) + ti.sfip, ti.sfi = rgetResolveSFI(vv.sfis, pv.sfiidx[:0]) + rgetPool.Put(pi) } // sfi = sfip @@ -853,17 +884,30 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { return } -func (x *TypeInfos) rget(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool, - sfi *[]*structFieldInfo, siInfo *structFieldInfo, +func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool, + indexstack []int, pv *rgetT, ) { - for j := 0; j < rt.NumField(); j++ { + // Read up fields and store how to access the value. + // + // It uses go's rules for message selectors, + // which say that the field with the shallowest depth is selected. + // + // Note: we consciously use slices, not a map, to simulate a set. + // Typically, types have < 16 fields, + // and iteration using equals is faster than maps there + +LOOP: + for j, jlen := 0, rt.NumField(); j < jlen; j++ { f := rt.Field(j) fkind := f.Type.Kind() // skip if a func type, or is unexported, or structTag value == "-" - if fkind == reflect.Func { - continue + switch fkind { + case reflect.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer: + continue LOOP } - // if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) { + + // if r1, _ := utf8.DecodeRuneInString(f.Name); + // r1 == utf8.RuneError || !unicode.IsUpper(r1) { if f.PkgPath != "" && !f.Anonymous { // unexported, not embedded continue } @@ -872,7 +916,8 @@ func (x *TypeInfos) rget(rt reflect.Type, indexstack []int, fnameToHastag map[st continue } var si *structFieldInfo - // if anonymous and no struct tag (or it's blank), and a struct (or pointer to struct), inline it. + // if anonymous and no struct tag (or it's blank), + // and a struct (or pointer to struct), inline it. if f.Anonymous && fkind != reflect.Interface { doInline := stag == "" if !doInline { @@ -886,11 +931,31 @@ func (x *TypeInfos) rget(rt reflect.Type, indexstack []int, fnameToHastag map[st ft = ft.Elem() } if ft.Kind() == reflect.Struct { - indexstack2 := make([]int, len(indexstack)+1, len(indexstack)+4) - copy(indexstack2, indexstack) - indexstack2[len(indexstack)] = j - // indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) - x.rget(ft, indexstack2, fnameToHastag, sfi, siInfo) + // if etypes contains this, don't call rget again (as fields are already seen here) + ftid := reflect.ValueOf(ft).Pointer() + // We cannot recurse forever, but we need to track other field depths. + // So - we break if we see a type twice (not the first time). + // This should be sufficient to handle an embedded type that refers to its + // owning type, which then refers to its embedded type. + processIt := true + numk := 0 + for _, k := range pv.etypes { + if k == ftid { + numk++ + if numk == rgetMaxRecursion { + processIt = false + break + } + } + } + if processIt { + pv.etypes = append(pv.etypes, ftid) + indexstack2 := make([]int, len(indexstack)+1) + copy(indexstack2, indexstack) + indexstack2[len(indexstack)] = j + // indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + x.rget(ft, ftid, omitEmpty, indexstack2, pv) + } continue } } @@ -901,36 +966,86 @@ func (x *TypeInfos) rget(rt reflect.Type, indexstack []int, fnameToHastag map[st continue } - // do not let fields with same name in embedded structs override field at higher level. - // this must be done after anonymous check, to allow anonymous field - // still include their child fields - if _, ok := fnameToHastag[f.Name]; ok { - continue - } if f.Name == "" { panic(noFieldNameToStructFieldInfoErr) } + + pv.fNames = append(pv.fNames, f.Name) + if si == nil { si = parseStructFieldInfo(f.Name, stag) } else if si.encName == "" { si.encName = f.Name } + si.fieldName = f.Name + + pv.encNames = append(pv.encNames, si.encName) + // si.ikind = int(f.Type.Kind()) if len(indexstack) == 0 { si.i = int16(j) } else { si.i = -1 - si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + si.is = make([]int, len(indexstack)+1) + copy(si.is, indexstack) + si.is[len(indexstack)] = j + // si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) } - if siInfo != nil { - if siInfo.omitEmpty { - si.omitEmpty = true + if omitEmpty { + si.omitEmpty = true + } + pv.sfis = append(pv.sfis, si) + } +} + +// resolves the struct field info got from a call to rget. +// Returns a trimmed, unsorted and sorted []*structFieldInfo. +func rgetResolveSFI(x []*structFieldInfo, pv []sfiIdx) (y, z []*structFieldInfo) { + var n int + for i, v := range x { + xn := v.encName //TODO: fieldName or encName? use encName for now. + var found bool + for j, k := range pv { + if k.name == xn { + // one of them must be reset to nil, and the index updated appropriately to the other one + if len(v.is) == len(x[k.index].is) { + } else if len(v.is) < len(x[k.index].is) { + pv[j].index = i + if x[k.index] != nil { + x[k.index] = nil + n++ + } + } else { + if x[i] != nil { + x[i] = nil + n++ + } + } + found = true + break } } - *sfi = append(*sfi, si) - fnameToHastag[f.Name] = stag != "" + if !found { + pv = append(pv, sfiIdx{xn, i}) + } } + + // remove all the nils + y = make([]*structFieldInfo, len(x)-n) + n = 0 + for _, v := range x { + if v == nil { + continue + } + y[n] = v + n++ + } + + z = make([]*structFieldInfo, len(y)) + copy(z, y) + sort.Sort(sfiSortedByEncName(z)) + return } func panicToErr(err *error) { @@ -1127,3 +1242,73 @@ type bytesISlice []bytesI func (p bytesISlice) Len() int { return len(p) } func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ----------------- + +type set []uintptr + +func (s *set) add(v uintptr) (exists bool) { + // e.ci is always nil, or len >= 1 + // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Add: %v, exists: %v\n", v, exists) }() + x := *s + if x == nil { + x = make([]uintptr, 1, 8) + x[0] = v + *s = x + return + } + // typically, length will be 1. make this perform. + if len(x) == 1 { + if j := x[0]; j == 0 { + x[0] = v + } else if j == v { + exists = true + } else { + x = append(x, v) + *s = x + } + return + } + // check if it exists + for _, j := range x { + if j == v { + exists = true + return + } + } + // try to replace a "deleted" slot + for i, j := range x { + if j == 0 { + x[i] = v + return + } + } + // if unable to replace deleted slot, just append it. + x = append(x, v) + *s = x + return +} + +func (s *set) remove(v uintptr) (exists bool) { + // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Rm: %v, exists: %v\n", v, exists) }() + x := *s + if len(x) == 0 { + return + } + if len(x) == 1 { + if x[0] == v { + x[0] = 0 + } + return + } + for i, j := range x { + if j == v { + exists = true + x[i] = 0 // set it to 0, as way to delete it. + // copy(x[i:], x[i+1:]) + // x = x[:len(x)-1] + return + } + } + return +} diff --git a/vendor/github.com/ugorji/go/codec/helper_internal.go b/vendor/github.com/ugorji/go/codec/helper_internal.go index dea981fbb7..5d0727f77f 100644 --- a/vendor/github.com/ugorji/go/codec/helper_internal.go +++ b/vendor/github.com/ugorji/go/codec/helper_internal.go @@ -70,8 +70,8 @@ func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool { return false } -func isEmptyValue(v reflect.Value) bool { - return hIsEmptyValue(v, derefForIsEmptyValue, checkStructForEmptyValue) +func isEmptyValue(v reflect.Value, deref, checkStruct bool) bool { + return hIsEmptyValue(v, deref, checkStruct) } func pruneSignExt(v []byte, pos bool) (n int) { diff --git a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go index 7c2ffc0fde..8b06a00459 100644 --- a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go +++ b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go @@ -1,4 +1,4 @@ -//+build !unsafe +// +build !unsafe // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_unsafe.go index 373b2b1027..0f596c71aa 100644 --- a/vendor/github.com/ugorji/go/codec/helper_unsafe.go +++ b/vendor/github.com/ugorji/go/codec/helper_unsafe.go @@ -1,4 +1,4 @@ -//+build unsafe +// +build unsafe // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. @@ -16,7 +16,7 @@ type unsafeString struct { Len int } -type unsafeBytes struct { +type unsafeSlice struct { Data uintptr Len int Cap int @@ -29,8 +29,10 @@ func stringView(v []byte) string { if len(v) == 0 { return "" } - x := unsafeString{uintptr(unsafe.Pointer(&v[0])), len(v)} - return *(*string)(unsafe.Pointer(&x)) + + bx := (*unsafeSlice)(unsafe.Pointer(&v)) + sx := unsafeString{bx.Data, bx.Len} + return *(*string)(unsafe.Pointer(&sx)) } // bytesView returns a view of the string as a []byte. @@ -40,6 +42,8 @@ func bytesView(v string) []byte { if len(v) == 0 { return zeroByteSlice } - x := unsafeBytes{uintptr(unsafe.Pointer(&v)), len(v), len(v)} - return *(*[]byte)(unsafe.Pointer(&x)) + + sx := (*unsafeString)(unsafe.Pointer(&v)) + bx := unsafeSlice{sx.Data, sx.Len, sx.Len} + return *(*[]byte)(unsafe.Pointer(&bx)) } diff --git a/vendor/github.com/ugorji/go/codec/json.go b/vendor/github.com/ugorji/go/codec/json.go index a18a5f7063..5bb3896289 100644 --- a/vendor/github.com/ugorji/go/codec/json.go +++ b/vendor/github.com/ugorji/go/codec/json.go @@ -43,18 +43,23 @@ import ( //-------------------------------- -var jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 'n', 'u', 'l', 'l'} +var ( + jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 'n', 'u', 'l', 'l'} -var jsonFloat64Pow10 = [...]float64{ - 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, - 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, - 1e20, 1e21, 1e22, -} + jsonFloat64Pow10 = [...]float64{ + 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + 1e20, 1e21, 1e22, + } -var jsonUint64Pow10 = [...]uint64{ - 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, - 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, -} + jsonUint64Pow10 = [...]uint64{ + 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + } + + // jsonTabs and jsonSpaces are used as caches for indents + jsonTabs, jsonSpaces string +) const ( // jsonUnreadAfterDecNum controls whether we unread after decoding a number. @@ -85,8 +90,23 @@ const ( jsonNumUintMaxVal = 1< 1<<53 || v < -(1<<53)) { + e.w.writen1('"') + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) + e.w.writen1('"') + return + } e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) } func (e *jsonEncDriver) EncodeUint(v uint64) { + if x := e.h.IntegerAsString; x == 'A' || x == 'L' && v > 1<<53 { + e.w.writen1('"') + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) + e.w.writen1('"') + return + } e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) } @@ -165,11 +251,17 @@ func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { } func (e *jsonEncDriver) EncodeArrayStart(length int) { + if e.d { + e.dl++ + } e.w.writen1('[') e.c = containerArrayStart } func (e *jsonEncDriver) EncodeMapStart(length int) { + if e.d { + e.dl++ + } e.w.writen1('{') e.c = containerMapStart } @@ -564,6 +656,11 @@ func (d *jsonDecDriver) decNum(storeBytes bool) { d.tok = b } b := d.tok + var str bool + if b == '"' { + str = true + b = d.r.readn1() + } if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) { d.d.errorf("json: decNum: got first char '%c'", b) return @@ -578,6 +675,10 @@ func (d *jsonDecDriver) decNum(storeBytes bool) { n.reset() d.bs = d.bs[:0] + if str && storeBytes { + d.bs = append(d.bs, '"') + } + // The format of a number is as below: // parsing: sign? digit* dot? digit* e? sign? digit* // states: 0 1* 2 3* 4 5* 6 7 @@ -668,6 +769,14 @@ LOOP: default: break LOOP } + case '"': + if str { + if storeBytes { + d.bs = append(d.bs, '"') + } + b, eof = r.readn1eof() + } + break LOOP default: break LOOP } @@ -822,6 +931,11 @@ func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut [ if isstring { return d.bs } + // if appendStringAsBytes returned a zero-len slice, then treat as nil. + // This should only happen for null, and "". + if len(d.bs) == 0 { + return nil + } bs0 := d.bs slen := base64.StdEncoding.DecodedLen(len(bs0)) if slen <= cap(bs) { @@ -859,6 +973,14 @@ func (d *jsonDecDriver) appendStringAsBytes() { } d.tok = b } + + // handle null as a string + if d.tok == 'n' { + d.readStrIdx(10, 13) // ull + d.bs = d.bs[:0] + return + } + if d.tok != '"' { d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok) } @@ -1033,6 +1155,24 @@ type JsonHandle struct { // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. // If not configured, raw bytes are encoded to/from base64 text. RawBytesExt InterfaceExt + + // Indent indicates how a value is encoded. + // - If positive, indent by that number of spaces. + // - If negative, indent by that number of tabs. + Indent int8 + + // IntegerAsString controls how integers (signed and unsigned) are encoded. + // + // Per the JSON Spec, JSON numbers are 64-bit floating point numbers. + // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision. + // This can be mitigated by configuring how to encode integers. + // + // IntegerAsString interpretes the following values: + // - if 'L', then encode integers > 2^53 as a json string. + // - if 'A', then encode all integers as a json string + // containing the exact integer representation as a decimal. + // - else encode all integers as a json number (default) + IntegerAsString uint8 } func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { @@ -1040,26 +1180,48 @@ func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceE } func (h *JsonHandle) newEncDriver(e *Encoder) encDriver { - hd := jsonEncDriver{e: e, w: e.w, h: h} + hd := jsonEncDriver{e: e, h: h} hd.bs = hd.b[:0] - hd.se.i = h.RawBytesExt + + hd.reset() + return &hd } func (h *JsonHandle) newDecDriver(d *Decoder) decDriver { // d := jsonDecDriver{r: r.(*bytesDecReader), h: h} - hd := jsonDecDriver{d: d, r: d.r, h: h} + hd := jsonDecDriver{d: d, h: h} hd.bs = hd.b[:0] - hd.se.i = h.RawBytesExt + hd.reset() return &hd } func (e *jsonEncDriver) reset() { e.w = e.e.w + e.se.i = e.h.RawBytesExt + if e.bs != nil { + e.bs = e.bs[:0] + } + e.d, e.dt, e.dl, e.ds = false, false, 0, "" + e.c = 0 + if e.h.Indent > 0 { + e.d = true + e.ds = jsonSpaces[:e.h.Indent] + } else if e.h.Indent < 0 { + e.d = true + e.dt = true + e.ds = jsonTabs[:-(e.h.Indent)] + } } func (d *jsonDecDriver) reset() { d.r = d.d.r + d.se.i = d.h.RawBytesExt + if d.bs != nil { + d.bs = d.bs[:0] + } + d.c, d.tok = 0, 0 + d.n.reset() } var jsonEncodeTerminate = []byte{' '} diff --git a/vendor/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/ugorji/go/codec/msgpack.go index 5eb4c96368..e79830b562 100644 --- a/vendor/github.com/ugorji/go/codec/msgpack.go +++ b/vendor/github.com/ugorji/go/codec/msgpack.go @@ -374,7 +374,7 @@ func (d *msgpackDecDriver) DecodeNaked() { } if n.v == valueTypeUint && d.h.SignedInteger { n.v = valueTypeInt - n.i = int64(n.v) + n.i = int64(n.u) } return } @@ -561,6 +561,13 @@ func (d *msgpackDecDriver) readNextBd() { d.bdRead = true } +func (d *msgpackDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + func (d *msgpackDecDriver) ContainerType() (vt valueType) { bd := d.bd if bd == mpNil { @@ -729,6 +736,7 @@ func (e *msgpackEncDriver) reset() { func (d *msgpackDecDriver) reset() { d.r = d.d.r + d.bd, d.bdRead = 0, false } //-------------------------------------------------- diff --git a/vendor/github.com/ugorji/go/codec/prebuild.sh b/vendor/github.com/ugorji/go/codec/prebuild.sh index 98f442487a..909f4bb0f2 100755 --- a/vendor/github.com/ugorji/go/codec/prebuild.sh +++ b/vendor/github.com/ugorji/go/codec/prebuild.sh @@ -171,7 +171,7 @@ do 'xf') zforce=1;; 'xb') zbak=1;; 'xx') zexternal=1;; - *) echo "prebuild.sh accepts [-fb] only"; return 1;; + *) echo "prebuild.sh accepts [-fbx] only"; return 1;; esac done shift $((OPTIND-1)) diff --git a/vendor/github.com/ugorji/go/codec/rpc.go b/vendor/github.com/ugorji/go/codec/rpc.go index dad53d0c61..8062bed313 100644 --- a/vendor/github.com/ugorji/go/codec/rpc.go +++ b/vendor/github.com/ugorji/go/codec/rpc.go @@ -25,7 +25,7 @@ type Rpc interface { } // RpcCodecBuffered allows access to the underlying bufio.Reader/Writer -// used by the rpc connection. It accomodates use-cases where the connection +// used by the rpc connection. It accommodates use-cases where the connection // should be used by rpc and non-rpc functions, e.g. streaming a file after // sending an rpc response. type RpcCodecBuffered interface { diff --git a/vendor/github.com/ugorji/go/codec/simple.go b/vendor/github.com/ugorji/go/codec/simple.go index c150496509..d07208c874 100644 --- a/vendor/github.com/ugorji/go/codec/simple.go +++ b/vendor/github.com/ugorji/go/codec/simple.go @@ -166,6 +166,13 @@ func (d *simpleDecDriver) readNextBd() { d.bdRead = true } +func (d *simpleDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + func (d *simpleDecDriver) ContainerType() (vt valueType) { if d.bd == simpleVdNil { return valueTypeNil @@ -340,7 +347,7 @@ func (d *simpleDecDriver) decLen() int { } return int(ui) } - d.d.errorf("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8) + d.d.errorf("decLen: Cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8) return -1 } @@ -474,7 +481,7 @@ func (d *simpleDecDriver) DecodeNaked() { // SimpleHandle is a Handle for a very simple encoding format. // // simple is a simplistic codec similar to binc, but not as compact. -// - Encoding of a value is always preceeded by the descriptor byte (bd) +// - Encoding of a value is always preceded by the descriptor byte (bd) // - True, false, nil are encoded fully in 1 byte (the descriptor) // - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). // There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. @@ -512,6 +519,7 @@ func (e *simpleEncDriver) reset() { func (d *simpleDecDriver) reset() { d.r = d.d.r + d.bd, d.bdRead = 0, false } var _ decDriver = (*simpleDecDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/test.py b/vendor/github.com/ugorji/go/codec/test.py index dfe3b0c9a0..c0ad20b34c 100755 --- a/vendor/github.com/ugorji/go/codec/test.py +++ b/vendor/github.com/ugorji/go/codec/test.py @@ -9,6 +9,8 @@ # sudo apt-get install python-pip # pip install --user msgpack-python msgpack-rpc-python cbor +# Ensure all "string" keys are utf strings (else encoded as bytes) + import cbor, msgpack, msgpackrpc, sys, os, threading def get_test_data_list(): @@ -26,35 +28,39 @@ def get_test_data_list(): -3232.0, -6464646464.0, 3232.0, + 6464.0, 6464646464.0, False, True, + u"null", None, u"someday", - u"", - u"bytestring", 1328176922000002000, + u"", -2206187877999998000, + u"bytestring", 270, + u"none", -2013855847999995777, #-6795364578871345152, ] l1 = [ { "true": True, "false": False }, - { "true": "True", + { "true": u"True", "false": False, "uint16(1616)": 1616 }, { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ], "int32":32323232, "bool": True, - "LONG STRING": "123456789012345678901234567890123456789012345678901234567890", - "SHORT STRING": "1234567890" }, - { True: "true", 8: False, "false": 0 } + "LONG STRING": u"123456789012345678901234567890123456789012345678901234567890", + "SHORT STRING": u"1234567890" }, + { True: "true", 138: False, "false": 200 } ] l = [] l.extend(l0) l.append(l0) + l.append(1) l.extend(l1) return l diff --git a/vendor/github.com/ugorji/go/codec/tests.sh b/vendor/github.com/ugorji/go/codec/tests.sh index b1602ea7e7..342f336dfe 100755 --- a/vendor/github.com/ugorji/go/codec/tests.sh +++ b/vendor/github.com/ugorji/go/codec/tests.sh @@ -6,6 +6,7 @@ _run() { # 1. VARIATIONS: regular (t), canonical (c), IO R/W (i), # binc-nosymbols (n), struct2array (s), intern string (e), + # json-indent (d), circular (l) # 2. MODE: reflection (r), external (x), codecgen (g), unsafe (u), notfastpath (f) # 3. OPTIONS: verbose (v), reset (z), must (m), # @@ -16,7 +17,7 @@ _run() { zargs="" local OPTIND OPTIND=1 - while getopts "xurtcinsvgzmef" flag + while getopts "_xurtcinsvgzmefdl" flag do case "x$flag" in 'xr') ;; @@ -27,6 +28,7 @@ _run() { 'xv') zargs="$zargs -tv" ;; 'xz') zargs="$zargs -tr" ;; 'xm') zargs="$zargs -tm" ;; + 'xl') zargs="$zargs -tl" ;; *) ;; esac done @@ -35,15 +37,19 @@ _run() { # echo ">>>>>>> TAGS: $ztags" OPTIND=1 - while getopts "xurtcinsvgzmef" flag + while getopts "_xurtcinsvgzmefdl" flag do case "x$flag" in 'xt') printf ">>>>>>> REGULAR : "; go test "-tags=$ztags" $zargs ; sleep 2 ;; 'xc') printf ">>>>>>> CANONICAL : "; go test "-tags=$ztags" $zargs -tc; sleep 2 ;; 'xi') printf ">>>>>>> I/O : "; go test "-tags=$ztags" $zargs -ti; sleep 2 ;; - 'xn') printf ">>>>>>> NO_SYMBOLS : "; go test "-tags=$ztags" $zargs -tn; sleep 2 ;; + 'xn') printf ">>>>>>> NO_SYMBOLS : "; go test "-tags=$ztags" -run=Binc $zargs -tn; sleep 2 ;; 'xs') printf ">>>>>>> TO_ARRAY : "; go test "-tags=$ztags" $zargs -ts; sleep 2 ;; 'xe') printf ">>>>>>> INTERN : "; go test "-tags=$ztags" $zargs -te; sleep 2 ;; + 'xd') printf ">>>>>>> INDENT : "; + go test "-tags=$ztags" -run=JsonCodecsTable -td=-1 $zargs; + go test "-tags=$ztags" -run=JsonCodecsTable -td=8 $zargs; + sleep 2 ;; *) ;; esac done @@ -53,22 +59,44 @@ _run() { } # echo ">>>>>>> RUNNING VARIATIONS OF TESTS" -if [[ "x$@" = "x" ]]; then +if [[ "x$@" = "x" || "x$@" = "x-A" ]]; then # All: r, x, g, gu - _run "-rtcinsm" # regular - _run "-rtcinsmz" # regular with reset - _run "-rtcinsmf" # regular with no fastpath (notfastpath) - _run "-xtcinsm" # external - _run "-gxtcinsm" # codecgen: requires external - _run "-gxutcinsm" # codecgen + unsafe + _run "-_tcinsed_ml" # regular + _run "-_tcinsed_ml_z" # regular with reset + _run "-_tcinsed_ml_f" # regular with no fastpath (notfastpath) + _run "-x_tcinsed_ml" # external + _run "-gx_tcinsed_ml" # codecgen: requires external + _run "-gxu_tcinsed_ml" # codecgen + unsafe elif [[ "x$@" = "x-Z" ]]; then # Regular - _run "-rtcinsm" # regular - _run "-rtcinsmz" # regular with reset + _run "-_tcinsed_ml" # regular + _run "-_tcinsed_ml_z" # regular with reset elif [[ "x$@" = "x-F" ]]; then # regular with notfastpath - _run "-rtcinsmf" # regular - _run "-rtcinsmzf" # regular with reset + _run "-_tcinsed_ml_f" # regular + _run "-_tcinsed_ml_zf" # regular with reset +elif [[ "x$@" = "x-C" ]]; then + # codecgen + _run "-gx_tcinsed_ml" # codecgen: requires external + _run "-gxu_tcinsed_ml" # codecgen + unsafe +elif [[ "x$@" = "x-X" ]]; then + # external + _run "-x_tcinsed_ml" # external +elif [[ "x$@" = "x-h" || "x$@" = "x-?" ]]; then + cat < +# The email address is not required for organizations. + +# Please keep the list sorted. +Google Inc. diff --git a/vendor/google.golang.org/api/CONTRIBUTORS b/vendor/google.golang.org/api/CONTRIBUTORS new file mode 100644 index 0000000000..e16e530b8c --- /dev/null +++ b/vendor/google.golang.org/api/CONTRIBUTORS @@ -0,0 +1,50 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# https://cla.developers.google.com/about/google-individual +# https://cla.developers.google.com/about/google-corporate +# +# The CLA can be filled out on the web: +# +# https://cla.developers.google.com/ +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name +# +# An entry with two email addresses specifies that the +# first address should be used in the submit logs and +# that the second address should be recognized as the +# same person when interacting with Rietveld. + +# Please keep the list sorted. + +Alain Vongsouvanhalainv +Andrew Gerrand +Brad Fitzpatrick +Eric Koleda +Francesc Campoy +Garrick Evans +Glenn Lewis +Ivan Krasin +Jason Hall +Johan Euphrosine +Kostik Shtoyk +Michael McGreevy +Nick Craig-Wood +Scott Van Woudenberg +Takashi Matsuo diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 1265f0425d..ec518f3dc5 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"C5oy1hgQsABtYOYIOXWcR3BgYqU/IHPQejAPoHbj6rriHVm8lNKt_bg\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/AtRtoI2wqhCu6Twyv23FyGvWiUE\"", "discoveryVersion": "v1", "id": "compute:v1", "name": "compute", "version": "v1", - "revision": "20160812", + "revision": "20161115", "title": "Compute Engine API", "description": "Creates and runs virtual machines on Google Cloud Platform.", "ownerDomain": "google.com", @@ -398,7 +398,7 @@ }, "source": { "type": "string", - "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. This field is only applicable for persistent disks. Note that for InstanceTemplate, it is just disk name, not URL for the disk." + "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or disks.source is required.\n\nIf desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks.\n\nNote that for InstanceTemplate, specify the disk name, not the URL for the disk." }, "type": { "type": "string", @@ -439,7 +439,7 @@ }, "sourceImage": { "type": "string", - "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a private image that you created, specify the image name in the following format:\n\nglobal/images/my-private-image \n\nYou can also specify a private image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-private-family" + "description": "The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or disks.source is required.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a private image that you created, specify the image name in the following format:\n\nglobal/images/my-private-image \n\nYou can also specify a private image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-private-family \n\nIf the source image is deleted later, this field will not be set." }, "sourceImageEncryptionKey": { "$ref": "CustomerEncryptionKey", @@ -484,6 +484,10 @@ ] } }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the instance group resides (for autoscalers living in regional scope)." + }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." @@ -696,7 +700,7 @@ "properties": { "metric": { "type": "string", - "description": "The identifier of the Cloud Monitoring metric. The metric cannot have negative values and should be a utilization metric, which means that the number of virtual machines handling requests should increase or decrease proportionally to the metric. The metric must also have a label of compute.googleapis.com/resource_id with the value of the instance's unique ID, although this alone does not guarantee that the metric is valid.\n\nFor example, the following is a valid metric:\ncompute.googleapis.com/instance/network/received_bytes_count\n\n\nThe following is not a valid metric because it does not increase or decrease based on usage:\ncompute.googleapis.com/instance/cpu/reserved_cores" + "description": "The identifier of the Stackdriver Monitoring metric. The metric cannot have negative values and should be a utilization metric, which means that the number of virtual machines handling requests should increase or decrease proportionally to the metric. The metric must also have a label of compute.googleapis.com/resource_id with the value of the instance's unique ID, although this alone does not guarantee that the metric is valid.\n\nFor example, the following is a valid metric:\ncompute.googleapis.com/instance/network/received_bytes_count\nThe following is not a valid metric because it does not increase or decrease based on usage:\ncompute.googleapis.com/instance/cpu/reserved_cores" }, "utilizationTarget": { "type": "number", @@ -705,7 +709,7 @@ }, "utilizationTargetType": { "type": "string", - "description": "Defines how target utilization value is expressed for a Cloud Monitoring metric. Either GAUGE, DELTA_PER_SECOND, or DELTA_PER_MINUTE. If not specified, the default is GAUGE.", + "description": "Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Either GAUGE, DELTA_PER_SECOND, or DELTA_PER_MINUTE. If not specified, the default is GAUGE.", "enum": [ "DELTA_PER_MINUTE", "DELTA_PER_SECOND", @@ -738,19 +742,21 @@ "properties": { "balancingMode": { "type": "string", - "description": "Specifies the balancing mode for this backend. For global HTTP(S) load balancing, the default is UTILIZATION. Valid values are UTILIZATION and RATE.\n\nThis cannot be used for internal load balancing.", + "description": "Specifies the balancing mode for this backend. For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION. Valid values are UTILIZATION, RATE (for HTTP(S)) and CONNECTION (for TCP/SSL).\n\nThis cannot be used for internal load balancing.", "enum": [ + "CONNECTION", "RATE", "UTILIZATION" ], "enumDescriptions": [ + "", "", "" ] }, "capacityScaler": { "type": "number", - "description": "A multiplier applied to the group's maximum servicing capacity (either UTILIZATION or RATE). Default value is 1, which means the group will serve up to 100% of its configured CPU or RPS (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available CPU or RPS. Valid range is [0.0,1.0].\n\nThis cannot be used for internal load balancing.", + "description": "A multiplier applied to the group's maximum servicing capacity (based on UTILIZATION, RATE or CONNECTION). Default value is 1, which means the group will serve up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available Capacity. Valid range is [0.0,1.0].\n\nThis cannot be used for internal load balancing.", "format": "float" }, "description": { @@ -761,6 +767,16 @@ "type": "string", "description": "The fully-qualified URL of a zonal Instance Group resource. This instance group defines the list of instances that serve traffic. Member virtual machine instances from each instance group must live in the same zone as the instance group itself. No two backends in a backend service are allowed to use same Instance Group resource.\n\nNote that you must specify an Instance Group resource using the fully-qualified URL, rather than a partial URL.\n\nWhen the BackendService has load balancing scheme INTERNAL, the instance group must be in a zone within the same region as the BackendService." }, + "maxConnections": { + "type": "integer", + "description": "The max number of simultaneous connections for the group. Can be used with either CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must be set.\n\nThis cannot be used for internal load balancing.", + "format": "int32" + }, + "maxConnectionsPerInstance": { + "type": "integer", + "description": "The max number of simultaneous connections that a single backend instance can handle. This is used to calculate the capacity of the group. Can be used in either CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must be set.\n\nThis cannot be used for internal load balancing.", + "format": "int32" + }, "maxRate": { "type": "integer", "description": "The max requests per second (RPS) of the group. Can be used with either RATE or UTILIZATION balancing modes, but required if RATE mode. For RATE mode, either maxRate or maxRatePerInstance must be set.\n\nThis cannot be used for internal load balancing.", @@ -795,6 +811,9 @@ "$ref": "Backend" } }, + "connectionDraining": { + "$ref": "ConnectionDraining" + }, "creationTimestamp": { "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." @@ -829,6 +848,19 @@ "description": "[Output Only] Type of resource. Always compute#backendService for backend services.", "default": "compute#backendService" }, + "loadBalancingScheme": { + "type": "string", + "enum": [ + "EXTERNAL", + "INTERNAL", + "INVALID_LOAD_BALANCING_SCHEME" + ], + "enumDescriptions": [ + "", + "", + "" + ] + }, "name": { "type": "string", "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", @@ -848,9 +880,15 @@ "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, HTTP2, TCP and SSL. The default is HTTP.\n\nFor internal load balancing, the possible values are TCP and UDP, and the default is TCP.", "enum": [ "HTTP", - "HTTPS" + "HTTPS", + "SSL", + "TCP", + "UDP" ], "enumDescriptions": [ + "", + "", + "", "", "" ] @@ -868,6 +906,7 @@ "description": "Type of session affinity to use. The default is NONE.\n\nWhen the load balancing scheme is EXTERNAL, can be NONE, CLIENT_IP, or GENERATED_COOKIE.\n\nWhen the load balancing scheme is INTERNAL, can be NONE, CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO.\n\nWhen the protocol is UDP, this field is not used.", "enum": [ "CLIENT_IP", + "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "NONE" @@ -876,6 +915,7 @@ "", "", "", + "", "" ] }, @@ -886,6 +926,38 @@ } } }, + "BackendServiceAggregatedList": { + "id": "BackendServiceAggregatedList", + "type": "object", + "description": "Contains a list of BackendServicesScopedList.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "object", + "description": "A map of scoped BackendService lists.", + "additionalProperties": { + "$ref": "BackendServicesScopedList", + "description": "Name of the scope containing this set of BackendServices." + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#backendServiceAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] A token used to continue a truncated list request." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, "BackendServiceGroupHealth": { "id": "BackendServiceGroupHealth", "type": "object", @@ -934,6 +1006,86 @@ } } }, + "BackendServicesScopedList": { + "id": "BackendServicesScopedList", + "type": "object", + "properties": { + "backendServices": { + "type": "array", + "description": "List of BackendServices contained in this scope.", + "items": { + "$ref": "BackendService" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of backend services when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, "CacheInvalidationRule": { "id": "CacheInvalidationRule", "type": "object", @@ -943,6 +1095,18 @@ } } }, + "ConnectionDraining": { + "id": "ConnectionDraining", + "type": "object", + "description": "Message containing connection draining configuration.", + "properties": { + "drainingTimeoutSec": { + "type": "integer", + "description": "Time for which instance will be drained (not accept new connections, but still work to finish started).", + "format": "int32" + } + } + }, "CustomerEncryptionKey": { "id": "CustomerEncryptionKey", "type": "object", @@ -979,15 +1143,15 @@ "properties": { "deleted": { "type": "string", - "description": "An optional RFC3339 timestamp on or after which the deprecation state of this resource will be changed to DELETED." + "description": "An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DELETED. This is only informational and the status will not change unless the client explicitly changes it." }, "deprecated": { "type": "string", - "description": "An optional RFC3339 timestamp on or after which the deprecation state of this resource will be changed to DEPRECATED." + "description": "An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DEPRECATED. This is only informational and the status will not change unless the client explicitly changes it." }, "obsolete": { "type": "string", - "description": "An optional RFC3339 timestamp on or after which the deprecation state of this resource will be changed to OBSOLETE." + "description": "An optional RFC3339 timestamp on or after which the state of this resource is intended to change to OBSOLETE. This is only informational and the status will not change unless the client explicitly changes it." }, "replacement": { "type": "string", @@ -1100,7 +1264,7 @@ }, "status": { "type": "string", - "description": "[Output Only] The status of disk creation. Applicable statuses includes: CREATING, FAILED, READY, RESTORING.", + "description": "[Output Only] The status of disk creation.", "enum": [ "CREATING", "FAILED", @@ -1634,6 +1798,10 @@ "" ] }, + "backendService": { + "type": "string", + "description": "This field is not used for external load balancing.\n\nFor internal load balancing, this field identifies the BackendService resource to receive the matched traffic." + }, "creationTimestamp": { "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." @@ -1652,15 +1820,40 @@ "description": "[Output Only] Type of the resource. Always compute#forwardingRule for Forwarding Rule resources.", "default": "compute#forwardingRule" }, + "loadBalancingScheme": { + "type": "string", + "description": "This signifies what the ForwardingRule will be used for and can only take the following values: INTERNAL EXTERNAL The value of INTERNAL means that this will be used for Internal Network Load Balancing (TCP, UDP). The value of EXTERNAL means that this will be used for External Load Balancing (HTTP(S) LB, External TCP/UDP LB, SSL Proxy)", + "enum": [ + "EXTERNAL", + "INTERNAL", + "INVALID" + ], + "enumDescriptions": [ + "", + "", + "" + ] + }, "name": { "type": "string", "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" }, + "network": { + "type": "string", + "description": "This field is not used for external load balancing.\n\nFor internal load balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used." + }, "portRange": { "type": "string", "description": "Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to ports in the specified range will be forwarded to target. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges.\n\nThis field is not used for internal load balancing." }, + "ports": { + "type": "array", + "description": "This field is not used for external load balancing.\n\nWhen the load balancing scheme is INTERNAL, a single port or a comma separated list of ports can be configured. Only packets addressed to these ports will be forwarded to the backends configured with this forwarding rule. If the port list is not provided then all ports are allowed to pass through.\n\nYou may specify a maximum of up to 5 ports.", + "items": { + "type": "string" + } + }, "region": { "type": "string", "description": "[Output Only] URL of the region where the regional forwarding rule resides. This field is not applicable to global forwarding rules." @@ -1669,6 +1862,10 @@ "type": "string", "description": "[Output Only] Server-defined URL for the resource." }, + "subnetwork": { + "type": "string", + "description": "This field is not used for external load balancing.\n\nFor internal load balancing, this field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule.\n\nIf the network specified is in auto subnet mode, this field is optional. However, if the network is in custom subnet mode, a subnetwork must be specified." + }, "target": { "type": "string", "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global TargetHttpProxy or TargetHttpsProxy resource. The forwarded traffic must be of a type appropriate to the target object. For example, TargetHttpProxy requires HTTP traffic, and TargetHttpsProxy requires HTTPS traffic.\n\nThis field is not used for internal load balancing." @@ -1817,73 +2014,101 @@ } } }, - "HealthCheckReference": { - "id": "HealthCheckReference", + "GuestOsFeature": { + "id": "GuestOsFeature", "type": "object", - "description": "A full or valid partial URL to a health check. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project-id/global/httpHealthChecks/health-check \n- projects/project-id/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", + "description": "Guest OS features.", "properties": { - "healthCheck": { - "type": "string" + "type": { + "type": "string", + "description": "The type of supported feature. Currenty only VIRTIO_SCSI_MULTIQUEUE is supported. For newer Windows images, the server might also populate this property with the value WINDOWS to indicate that this is a Windows image. This value is purely informational and does not enable or disable any features.", + "enum": [ + "FEATURE_TYPE_UNSPECIFIED", + "VIRTIO_SCSI_MULTIQUEUE", + "WINDOWS" + ], + "enumDescriptions": [ + "", + "", + "" + ] } } }, - "HealthStatus": { - "id": "HealthStatus", + "HTTPHealthCheck": { + "id": "HTTPHealthCheck", "type": "object", "properties": { - "healthState": { + "host": { "type": "string", - "description": "Health state of the instance.", + "description": "The value of the host header in the HTTP health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used." + }, + "port": { + "type": "integer", + "description": "The TCP port number for the health check request. The default value is 80.", + "format": "int32" + }, + "portName": { + "type": "string", + "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." + }, + "proxyHeader": { + "type": "string", + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", "enum": [ - "HEALTHY", - "UNHEALTHY" + "NONE", + "PROXY_V1" ], "enumDescriptions": [ "", "" ] }, - "instance": { - "type": "string", - "description": "URL of the instance resource." - }, - "ipAddress": { + "requestPath": { "type": "string", - "description": "The IP address represented by this resource." - }, - "port": { - "type": "integer", - "description": "The port on the instance.", - "format": "int32" + "description": "The request path of the HTTP health check request. The default value is /." } } }, - "HostRule": { - "id": "HostRule", + "HTTPSHealthCheck": { + "id": "HTTPSHealthCheck", "type": "object", - "description": "UrlMaps A host-matching rule for a URL. If matched, will use the named PathMatcher to select the BackendService.", "properties": { - "description": { + "host": { "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." + "description": "The value of the host header in the HTTPS health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used." }, - "hosts": { - "type": "array", - "description": "The list of host patterns to match. They must be valid hostnames, except * will match any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or ..", - "items": { - "type": "string" - } + "port": { + "type": "integer", + "description": "The TCP port number for the health check request. The default value is 443.", + "format": "int32" }, - "pathMatcher": { + "portName": { "type": "string", - "description": "The name of the PathMatcher to use to match the path portion of the URL if the hostRule matches the URL's host portion." + "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." + }, + "proxyHeader": { + "type": "string", + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "requestPath": { + "type": "string", + "description": "The request path of the HTTPS health check request. The default value is /." } } }, - "HttpHealthCheck": { - "id": "HttpHealthCheck", + "HealthCheck": { + "id": "HealthCheck", "type": "object", - "description": "An HttpHealthCheck resource. This resource defines a template for how individual instances should be checked for health, via HTTP.", + "description": "An HealthCheck resource. This resource defines a template for how individual virtual machines should be checked for health, via one of the supported protocols.", "properties": { "checkIntervalSec": { "type": "integer", @@ -1892,7 +2117,7 @@ }, "creationTimestamp": { "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." + "description": "[Output Only] Creation timestamp in 3339 text format." }, "description": { "type": "string", @@ -1903,9 +2128,11 @@ "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", "format": "int32" }, - "host": { - "type": "string", - "description": "The value of the host header in the HTTP health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used." + "httpHealthCheck": { + "$ref": "HTTPHealthCheck" + }, + "httpsHealthCheck": { + "$ref": "HTTPSHealthCheck" }, "id": { "type": "string", @@ -1914,36 +2141,212 @@ }, "kind": { "type": "string", - "description": "[Output Only] Type of the resource. Always compute#httpHealthCheck for HTTP health checks.", - "default": "compute#httpHealthCheck" + "description": "Type of the resource.", + "default": "compute#healthCheck" }, "name": { "type": "string", "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" }, - "port": { - "type": "integer", - "description": "The TCP port number for the HTTP health check request. The default value is 80.", - "format": "int32" - }, - "requestPath": { - "type": "string", - "description": "The request path of the HTTP health check request. The default value is /." - }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." }, + "sslHealthCheck": { + "$ref": "SSLHealthCheck" + }, + "tcpHealthCheck": { + "$ref": "TCPHealthCheck" + }, "timeoutSec": { "type": "integer", "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have greater value than checkIntervalSec.", "format": "int32" }, - "unhealthyThreshold": { - "type": "integer", - "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", - "format": "int32" + "type": { + "type": "string", + "description": "Specifies the type of the healthCheck, either TCP, UDP, SSL, HTTP, HTTPS or HTTP2. If not specified, the default is TCP. Exactly one of the protocol-specific health check field must be specified, which must match type field.", + "enum": [ + "HTTP", + "HTTPS", + "INVALID", + "SSL", + "TCP" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ] + }, + "unhealthyThreshold": { + "type": "integer", + "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", + "format": "int32" + } + } + }, + "HealthCheckList": { + "id": "HealthCheckList", + "type": "object", + "description": "Contains a list of HealthCheck resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "A list of HealthCheck resources.", + "items": { + "$ref": "HealthCheck" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#healthCheckList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "HealthCheckReference": { + "id": "HealthCheckReference", + "type": "object", + "description": "A full or valid partial URL to a health check. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project-id/global/httpHealthChecks/health-check \n- projects/project-id/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", + "properties": { + "healthCheck": { + "type": "string" + } + } + }, + "HealthStatus": { + "id": "HealthStatus", + "type": "object", + "properties": { + "healthState": { + "type": "string", + "description": "Health state of the instance.", + "enum": [ + "HEALTHY", + "UNHEALTHY" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "instance": { + "type": "string", + "description": "URL of the instance resource." + }, + "ipAddress": { + "type": "string", + "description": "The IP address represented by this resource." + }, + "port": { + "type": "integer", + "description": "The port on the instance.", + "format": "int32" + } + } + }, + "HostRule": { + "id": "HostRule", + "type": "object", + "description": "UrlMaps A host-matching rule for a URL. If matched, will use the named PathMatcher to select the BackendService.", + "properties": { + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "hosts": { + "type": "array", + "description": "The list of host patterns to match. They must be valid hostnames, except * will match any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or ..", + "items": { + "type": "string" + } + }, + "pathMatcher": { + "type": "string", + "description": "The name of the PathMatcher to use to match the path portion of the URL if the hostRule matches the URL's host portion." + } + } + }, + "HttpHealthCheck": { + "id": "HttpHealthCheck", + "type": "object", + "description": "An HttpHealthCheck resource. This resource defines a template for how individual instances should be checked for health, via HTTP.", + "properties": { + "checkIntervalSec": { + "type": "integer", + "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", + "format": "int32" + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "healthyThreshold": { + "type": "integer", + "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", + "format": "int32" + }, + "host": { + "type": "string", + "description": "The value of the host header in the HTTP health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#httpHealthCheck for HTTP health checks.", + "default": "compute#httpHealthCheck" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "port": { + "type": "integer", + "description": "The TCP port number for the HTTP health check request. The default value is 80.", + "format": "int32" + }, + "requestPath": { + "type": "string", + "description": "The request path of the HTTP health check request. The default value is /." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "timeoutSec": { + "type": "integer", + "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have greater value than checkIntervalSec.", + "format": "int32" + }, + "unhealthyThreshold": { + "type": "integer", + "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", + "format": "int32" } } }, @@ -2107,6 +2510,13 @@ "type": "string", "description": "The name of the image family to which this image belongs. You can create disks by specifying an image family instead of a specific image name. The image family always returns its latest image that is not deprecated. The name of the image family must comply with RFC1035." }, + "guestOsFeatures": { + "type": "array", + "description": "A list of features to enable on the guest OS. Applicable for bootable images only. Currently, only one feature can be enabled, VIRTIO_SCSCI_MULTIQUEUE, which allows each virtual CPU to have its own queue. For Windows images, you can only enable VIRTIO_SCSCI_MULTIQUEUE on images with driver version 1.2.0.1621 or higher. Linux images with kernel versions 3.17 and higher will support VIRTIO_SCSCI_MULTIQUEUE.\n\nFor new Windows images, the server might also populate this field with the value WINDOWS, to indicate that this is a Windows image. This value is purely informational and does not enable or disable any features.", + "items": { + "$ref": "GuestOsFeature" + } + }, "id": { "type": "string", "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -2174,7 +2584,7 @@ }, "sourceDisk": { "type": "string", - "description": "URL of the The source disk used to create this image. This can be a full or valid partial URL. You must provide either this property or the rawDisk.source property but not both to create an image. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disk/disk \n- projects/project/zones/zone/disk/disk \n- zones/zone/disks/disk" + "description": "URL of the source disk used to create this image. This can be a full or valid partial URL. You must provide either this property or the rawDisk.source property but not both to create an image. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk \n- projects/project/zones/zone/disks/disk \n- zones/zone/disks/disk" }, "sourceDiskEncryptionKey": { "$ref": "CustomerEncryptionKey", @@ -2321,7 +2731,7 @@ }, "status": { "type": "string", - "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDED, SUSPENDING, and TERMINATED.", + "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, and TERMINATED.", "enum": [ "PROVISIONING", "RUNNING", @@ -2407,7 +2817,7 @@ }, "id": { "type": "string", - "description": "[Output Only] A unique identifier for this resource type. The server generates this identifier.", + "description": "[Output Only] A unique identifier for this instance group, generated by the server.", "format": "uint64" }, "kind": { @@ -2436,6 +2846,10 @@ "type": "string", "description": "The URL of the network to which all instances in the instance group belong." }, + "region": { + "type": "string", + "description": "The URL of the region where the instance group is located (for regional resources)." + }, "selfLink": { "type": "string", "description": "[Output Only] The URL for this instance group. The server generates this URL." @@ -2584,6 +2998,10 @@ "$ref": "NamedPort" } }, + "region": { + "type": "string", + "description": "[Output Only] The URL of the region where the managed instance group resides (for regional resources)." + }, "selfLink": { "type": "string", "description": "[Output Only] The URL for this managed instance group. The server defines this URL." @@ -2627,7 +3045,7 @@ }, "creatingWithoutRetries": { "type": "integer", - "description": "[Output Only] The number of instances that the managed instance group will attempt to create. The group attempts to create each instance only once. If the group fails to create any of these instances, it decreases the group's target_size value accordingly.", + "description": "[Output Only] The number of instances that the managed instance group will attempt to create. The group attempts to create each instance only once. If the group fails to create any of these instances, it decreases the group's targetSize value accordingly.", "format": "int32" }, "deleting": { @@ -2715,7 +3133,7 @@ }, "selfLink": { "type": "string", - "description": "[Output Only] The URL for this resource type. The server generates this URL." + "description": "[Output Only] Server-defined URL for this resource." } } }, @@ -3923,7 +4341,7 @@ }, "selfLink": { "type": "string", - "description": "[Output Only] Server-defined URL for this resource ." + "description": "[Output Only] Server-defined URL for this resource." } } }, @@ -4048,7 +4466,7 @@ }, "targetLink": { "type": "string", - "description": "[Output Only] The URL of the resource that the operation modifies." + "description": "[Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the persistent disk that the snapshot was created from." }, "user": { "type": "string", @@ -4404,6 +4822,9 @@ "IN_USE_ADDRESSES", "LOCAL_SSD_TOTAL_GB", "NETWORKS", + "PREEMPTIBLE_CPUS", + "REGIONAL_AUTOSCALERS", + "REGIONAL_INSTANCE_GROUP_MANAGERS", "ROUTERS", "ROUTES", "SNAPSHOTS", @@ -4450,6 +4871,9 @@ "", "", "", + "", + "", + "", "" ] }, @@ -4523,10 +4947,10 @@ } } }, - "RegionList": { - "id": "RegionList", + "RegionAutoscalerList": { + "id": "RegionAutoscalerList", "type": "object", - "description": "Contains a list of region resources.", + "description": "Contains a list of autoscalers.", "properties": { "id": { "type": "string", @@ -4534,19 +4958,19 @@ }, "items": { "type": "array", - "description": "[Output Only] A list of Region resources.", + "description": "A list of autoscalers.", "items": { - "$ref": "Region" + "$ref": "Autoscaler" } }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#regionList for lists of regions.", - "default": "compute#regionList" + "description": "Type of resource.", + "default": "compute#regionAutoscalerList" }, "nextPageToken": { "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + "description": "[Output Only] A token used to continue a truncated list request." }, "selfLink": { "type": "string", @@ -4554,52 +4978,296 @@ } } }, - "ResourceGroupReference": { - "id": "ResourceGroupReference", + "RegionInstanceGroupList": { + "id": "RegionInstanceGroupList", "type": "object", + "description": "Contains a list of InstanceGroup resources.", "properties": { - "group": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "A list of InstanceGroup resources.", + "items": { + "$ref": "InstanceGroup" + } + }, + "kind": { + "type": "string", + "description": "The resource type.", + "default": "compute#regionInstanceGroupList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { "type": "string", - "description": "A URI referencing one of the resource views listed in the backend service." + "description": "[Output Only] The URL for this resource type. The server generates this URL." } } }, - "Route": { - "id": "Route", + "RegionInstanceGroupManagerList": { + "id": "RegionInstanceGroupManagerList", "type": "object", - "description": "Represents a Route resource. A route specifies how certain packets should be handled by the network. Routes are associated with instances by tags and the set of routes for a particular instance is called its routing table.\n\nFor each packet leaving a instance, the system searches that instance's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the nextHop field of the winning route - either to another instance destination, a instance gateway or a Google Compute Engine-operated gateway.\n\nPackets that do not match any route in the sending instance's routing table are dropped.", + "description": "Contains a list of managed instance groups.", "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { + "id": { "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." }, - "destRange": { - "type": "string", - "description": "The destination range of outgoing packets that this route applies to.", - "annotations": { - "required": [ - "compute.routes.insert" - ] + "items": { + "type": "array", + "description": "A list of managed instance groups.", + "items": { + "$ref": "InstanceGroupManager" } }, - "id": { + "kind": { "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" + "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerList for a list of managed instance groups that exist in th regional scope.", + "default": "compute#regionInstanceGroupManagerList" }, - "kind": { + "nextPageToken": { "type": "string", - "description": "[Output Only] Type of this resource. Always compute#routes for Route resources.", - "default": "compute#route" + "description": "[Output only] A token used to continue a truncated list request." }, - "name": { + "selfLink": { "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "description": "[Output only] The URL for this resource type. The server generates this URL." + } + } + }, + "RegionInstanceGroupManagersAbandonInstancesRequest": { + "id": "RegionInstanceGroupManagersAbandonInstancesRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "The names of one or more instances to abandon.", + "items": { + "type": "string" + } + } + } + }, + "RegionInstanceGroupManagersDeleteInstancesRequest": { + "id": "RegionInstanceGroupManagersDeleteInstancesRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "The names of one or more instances to delete.", + "items": { + "type": "string" + } + } + } + }, + "RegionInstanceGroupManagersListInstancesResponse": { + "id": "RegionInstanceGroupManagersListInstancesResponse", + "type": "object", + "properties": { + "managedInstances": { + "type": "array", + "description": "List of managed instances.", + "items": { + "$ref": "ManagedInstance" + } + } + } + }, + "RegionInstanceGroupManagersRecreateRequest": { + "id": "RegionInstanceGroupManagersRecreateRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "The URL for one or more instances to recreate.", + "items": { + "type": "string" + } + } + } + }, + "RegionInstanceGroupManagersSetTargetPoolsRequest": { + "id": "RegionInstanceGroupManagersSetTargetPoolsRequest", + "type": "object", + "properties": { + "fingerprint": { + "type": "string", + "description": "Fingerprint of the target pools information, which is a hash of the contents. This field is used for optimistic locking when you update the target pool entries. This field is optional.", + "format": "byte" + }, + "targetPools": { + "type": "array", + "description": "The URL of all TargetPool resources to which instances in the instanceGroup field are added. The target pools automatically apply to all of the instances in the managed instance group.", + "items": { + "type": "string" + } + } + } + }, + "RegionInstanceGroupManagersSetTemplateRequest": { + "id": "RegionInstanceGroupManagersSetTemplateRequest", + "type": "object", + "properties": { + "instanceTemplate": { + "type": "string", + "description": "URL of the InstanceTemplate resource from which all new instances will be created." + } + } + }, + "RegionInstanceGroupsListInstances": { + "id": "RegionInstanceGroupsListInstances", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource. Defined by the server." + }, + "items": { + "type": "array", + "description": "A list of instances and any named ports that are assigned to those instances.", + "items": { + "$ref": "InstanceWithNamedPorts" + } + }, + "kind": { + "type": "string", + "description": "The resource type.", + "default": "compute#regionInstanceGroupsListInstances" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "RegionInstanceGroupsListInstancesRequest": { + "id": "RegionInstanceGroupsListInstancesRequest", + "type": "object", + "properties": { + "instanceState": { + "type": "string", + "description": "Instances in which state should be returned. Valid options are: 'ALL', 'RUNNING'. By default, it lists all instances.", + "enum": [ + "ALL", + "RUNNING" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "portName": { + "type": "string", + "description": "Name of port user is interested in. It is optional. If it is set, only information about this ports will be returned. If it is not set, all the named ports will be returned. Always lists all instances.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + } + } + }, + "RegionInstanceGroupsSetNamedPortsRequest": { + "id": "RegionInstanceGroupsSetNamedPortsRequest", + "type": "object", + "properties": { + "fingerprint": { + "type": "string", + "description": "The fingerprint of the named ports information for this instance group. Use this optional property to prevent conflicts when multiple users change the named ports settings concurrently. Obtain the fingerprint with the instanceGroups.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request.", + "format": "byte" + }, + "namedPorts": { + "type": "array", + "description": "The list of named ports to set for this instance group.", + "items": { + "$ref": "NamedPort" + } + } + } + }, + "RegionList": { + "id": "RegionList", + "type": "object", + "description": "Contains a list of region resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Region resources.", + "items": { + "$ref": "Region" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#regionList for lists of regions.", + "default": "compute#regionList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "ResourceGroupReference": { + "id": "ResourceGroupReference", + "type": "object", + "properties": { + "group": { + "type": "string", + "description": "A URI referencing one of the instance groups listed in the backend service." + } + } + }, + "Route": { + "id": "Route", + "type": "object", + "description": "Represents a Route resource. A route specifies how certain packets should be handled by the network. Routes are associated with instances by tags and the set of routes for a particular instance is called its routing table.\n\nFor each packet leaving a instance, the system searches that instance's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the nextHop field of the winning route - either to another instance destination, a instance gateway or a Google Compute Engine-operated gateway.\n\nPackets that do not match any route in the sending instance's routing table are dropped.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "destRange": { + "type": "string", + "description": "The destination range of outgoing packets that this route applies to.", + "annotations": { + "required": [ + "compute.routes.insert" + ] + } + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of this resource. Always compute#routes for Route resources.", + "default": "compute#route" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "annotations": { "required": [ "compute.routes.insert" @@ -4795,7 +5463,7 @@ }, "interfaces": { "type": "array", - "description": "Router interfaces. Each interface requires either one linked resource (e.g. linkedVpnTunnel) or IP address and IP address range (e.g. ipRange).", + "description": "Router interfaces. Each interface requires either one linked resource (e.g. linkedVpnTunnel), or IP address and IP address range (e.g. ipRange), or both.", "items": { "$ref": "RouterInterface" } @@ -4956,7 +5624,7 @@ }, "selfLink": { "type": "string", - "description": "[Output Only] Server-defined URL for the resource." + "description": "[Output Only] Server-defined URL for this resource." } } }, @@ -5147,6 +5815,41 @@ } } }, + "SSLHealthCheck": { + "id": "SSLHealthCheck", + "type": "object", + "properties": { + "port": { + "type": "integer", + "description": "The TCP port number for the health check request. The default value is 443.", + "format": "int32" + }, + "portName": { + "type": "string", + "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." + }, + "proxyHeader": { + "type": "string", + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "request": { + "type": "string", + "description": "The application data to send once the SSL connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII." + }, + "response": { + "type": "string", + "description": "The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII." + } + } + }, "Scheduling": { "id": "Scheduling", "type": "object", @@ -5188,9 +5891,19 @@ "description": "[Output Only] Type of the resource. Always compute#serialPortOutput for serial port output.", "default": "compute#serialPortOutput" }, + "next": { + "type": "string", + "description": "[Output Only] The position of the next byte of content from the serial console output. Use this value in the next request as the start parameter.", + "format": "int64" + }, "selfLink": { "type": "string", - "description": "[Output Only] Server-defined URL for the resource." + "description": "[Output Only] Server-defined URL for this resource." + }, + "start": { + "type": "string", + "description": "[Output Only] The starting byte position of the output that was returned. This should match the start parameter sent with the request. If the serial console output exceeds the size of the buffer, older output will be overwritten by newer content and the start values will be mismatched.", + "format": "int64" } } }, @@ -5525,6 +6238,16 @@ } } }, + "SubnetworksExpandIpCidrRangeRequest": { + "id": "SubnetworksExpandIpCidrRangeRequest", + "type": "object", + "properties": { + "ipCidrRange": { + "type": "string", + "description": "The IP (in CIDR format or netmask) of internal addresses that are legal on this Subnetwork. This range should be disjoint from other subnetworks within this network. This range can only be larger than (i.e. a superset of) the range previously defined before the update." + } + } + }, "SubnetworksScopedList": { "id": "SubnetworksScopedList", "type": "object", @@ -5605,18 +6328,53 @@ } } }, - "Tags": { - "id": "Tags", + "TCPHealthCheck": { + "id": "TCPHealthCheck", "type": "object", - "description": "A set of instance tags.", "properties": { - "fingerprint": { + "port": { + "type": "integer", + "description": "The TCP port number for the health check request. The default value is 80.", + "format": "int32" + }, + "portName": { "type": "string", - "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata.\n\nTo see the latest fingerprint, make get() request to the instance.", - "format": "byte" + "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." }, - "items": { - "type": "array", + "proxyHeader": { + "type": "string", + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "request": { + "type": "string", + "description": "The application data to send once the TCP connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII." + }, + "response": { + "type": "string", + "description": "The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII." + } + } + }, + "Tags": { + "id": "Tags", + "type": "object", + "description": "A set of instance tags.", + "properties": { + "fingerprint": { + "type": "string", + "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata.\n\nTo see the latest fingerprint, make get() request to the instance.", + "format": "byte" + }, + "items": { + "type": "array", "description": "An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035.", "items": { "type": "string" @@ -6040,6 +6798,7 @@ "description": "Sesssion affinity option, must be one of the following values:\nNONE: Connections from the same client IP may go to any instance in the pool.\nCLIENT_IP: Connections from the same client IP will go to the same instance in the pool while that instance remains healthy.\nCLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol will go to the same instance in the pool while that instance remains healthy.", "enum": [ "CLIENT_IP", + "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "NONE" @@ -6048,6 +6807,7 @@ "", "", "", + "", "" ] } @@ -6273,6 +7033,135 @@ } } }, + "TargetSslProxiesSetBackendServiceRequest": { + "id": "TargetSslProxiesSetBackendServiceRequest", + "type": "object", + "properties": { + "service": { + "type": "string", + "description": "The URL of the new BackendService resource for the targetSslProxy." + } + } + }, + "TargetSslProxiesSetProxyHeaderRequest": { + "id": "TargetSslProxiesSetProxyHeaderRequest", + "type": "object", + "properties": { + "proxyHeader": { + "type": "string", + "description": "The new type of proxy header to append before sending data to the backend. NONE or PROXY_V1 are allowed.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "TargetSslProxiesSetSslCertificatesRequest": { + "id": "TargetSslProxiesSetSslCertificatesRequest", + "type": "object", + "properties": { + "sslCertificates": { + "type": "array", + "description": "New set of URLs to SslCertificate resources to associate with this TargetSslProxy. Currently exactly one ssl certificate must be specified.", + "items": { + "type": "string" + } + } + } + }, + "TargetSslProxy": { + "id": "TargetSslProxy", + "type": "object", + "description": "A TargetSslProxy resource. This resource defines an SSL proxy.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#targetSslProxy for target SSL proxies.", + "default": "compute#targetSslProxy" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "proxyHeader": { + "type": "string", + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "service": { + "type": "string", + "description": "URL to the BackendService resource." + }, + "sslCertificates": { + "type": "array", + "description": "URLs to SslCertificate resources that are used to authenticate connections to Backends. Currently exactly one SSL certificate must be specified.", + "items": { + "type": "string" + } + } + } + }, + "TargetSslProxyList": { + "id": "TargetSslProxyList", + "type": "object", + "description": "Contains a list of TargetSslProxy resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "A list of TargetSslProxy resources.", + "items": { + "$ref": "TargetSslProxy" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#targetSslProxyList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, "TargetVpnGateway": { "id": "TargetVpnGateway", "type": "object", @@ -6382,7 +7271,7 @@ }, "selfLink": { "type": "string", - "description": "[Output Only] Server-defined URL for the resource." + "description": "[Output Only] Server-defined URL for this resource." } } }, @@ -6413,7 +7302,7 @@ }, "selfLink": { "type": "string", - "description": "[Output Only] Server-defined URL for the resource." + "description": "[Output Only] Server-defined URL for this resource." } } }, @@ -6883,7 +7772,7 @@ }, "selfLink": { "type": "string", - "description": "[Output Only] Server-defined URL for the resource." + "description": "[Output Only] Server-defined URL for this resource." } } }, @@ -7075,6 +7964,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -7239,6 +8133,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -7296,6 +8195,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -7460,6 +8364,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -7583,6 +8492,56 @@ }, "backendServices": { "methods": { + "aggregatedList": { + "id": "compute.backendServices.aggregatedList", + "path": "{project}/aggregated/backendServices", + "httpMethod": "GET", + "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "BackendServiceAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "delete": { "id": "compute.backendServices.delete", "path": "{project}/global/backendServices/{backendService}", @@ -7734,6 +8693,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -7763,7 +8727,7 @@ "id": "compute.backendServices.patch", "path": "{project}/global/backendServices/{backendService}", "httpMethod": "PATCH", - "description": "Updates the entire content of the BackendService resource. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports patch semantics.", + "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports patch semantics.", "parameters": { "backendService": { "type": "string", @@ -7799,7 +8763,7 @@ "id": "compute.backendServices.update", "path": "{project}/global/backendServices/{backendService}", "httpMethod": "PUT", - "description": "Updates the entire content of the BackendService resource. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", "parameters": { "backendService": { "type": "string", @@ -7855,6 +8819,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -7942,6 +8911,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -7999,6 +8973,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -8211,6 +9190,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -8407,6 +9391,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -8528,6 +9517,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -8692,6 +9686,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -8888,6 +9887,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -9032,6 +10036,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -9117,6 +10126,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -9226,6 +10240,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -9253,17 +10272,17 @@ } } }, - "httpHealthChecks": { + "healthChecks": { "methods": { "delete": { - "id": "compute.httpHealthChecks.delete", - "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "id": "compute.healthChecks.delete", + "path": "{project}/global/healthChecks/{healthCheck}", "httpMethod": "DELETE", - "description": "Deletes the specified HttpHealthCheck resource.", + "description": "Deletes the specified HealthCheck resource.", "parameters": { - "httpHealthCheck": { + "healthCheck": { "type": "string", - "description": "Name of the HttpHealthCheck resource to delete.", + "description": "Name of the HealthCheck resource to delete.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -9278,7 +10297,7 @@ }, "parameterOrder": [ "project", - "httpHealthCheck" + "healthCheck" ], "response": { "$ref": "Operation" @@ -9289,14 +10308,14 @@ ] }, "get": { - "id": "compute.httpHealthChecks.get", - "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "id": "compute.healthChecks.get", + "path": "{project}/global/healthChecks/{healthCheck}", "httpMethod": "GET", - "description": "Returns the specified HttpHealthCheck resource. Get a list of available HTTP health checks by making a list() request.", + "description": "Returns the specified HealthCheck resource. Get a list of available health checks by making a list() request.", "parameters": { - "httpHealthCheck": { + "healthCheck": { "type": "string", - "description": "Name of the HttpHealthCheck resource to return.", + "description": "Name of the HealthCheck resource to return.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -9311,10 +10330,10 @@ }, "parameterOrder": [ "project", - "httpHealthCheck" + "healthCheck" ], "response": { - "$ref": "HttpHealthCheck" + "$ref": "HealthCheck" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -9323,10 +10342,10 @@ ] }, "insert": { - "id": "compute.httpHealthChecks.insert", - "path": "{project}/global/httpHealthChecks", + "id": "compute.healthChecks.insert", + "path": "{project}/global/healthChecks", "httpMethod": "POST", - "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", + "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", "parameters": { "project": { "type": "string", @@ -9340,7 +10359,7 @@ "project" ], "request": { - "$ref": "HttpHealthCheck" + "$ref": "HealthCheck" }, "response": { "$ref": "Operation" @@ -9351,10 +10370,10 @@ ] }, "list": { - "id": "compute.httpHealthChecks.list", - "path": "{project}/global/httpHealthChecks", + "id": "compute.healthChecks.list", + "path": "{project}/global/healthChecks", "httpMethod": "GET", - "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", + "description": "Retrieves the list of HealthCheck resources available to the specified project.", "parameters": { "filter": { "type": "string", @@ -9370,6 +10389,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -9387,7 +10411,7 @@ "project" ], "response": { - "$ref": "HttpHealthCheckList" + "$ref": "HealthCheckList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -9396,12 +10420,233 @@ ] }, "patch": { - "id": "compute.httpHealthChecks.patch", - "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "id": "compute.healthChecks.patch", + "path": "{project}/global/healthChecks/{healthCheck}", "httpMethod": "PATCH", - "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", + "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", "parameters": { - "httpHealthCheck": { + "healthCheck": { + "type": "string", + "description": "Name of the HealthCheck resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "healthCheck" + ], + "request": { + "$ref": "HealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "id": "compute.healthChecks.update", + "path": "{project}/global/healthChecks/{healthCheck}", + "httpMethod": "PUT", + "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", + "parameters": { + "healthCheck": { + "type": "string", + "description": "Name of the HealthCheck resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "healthCheck" + ], + "request": { + "$ref": "HealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "httpHealthChecks": { + "methods": { + "delete": { + "id": "compute.httpHealthChecks.delete", + "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "httpMethod": "DELETE", + "description": "Deletes the specified HttpHealthCheck resource.", + "parameters": { + "httpHealthCheck": { + "type": "string", + "description": "Name of the HttpHealthCheck resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpHealthCheck" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.httpHealthChecks.get", + "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "httpMethod": "GET", + "description": "Returns the specified HttpHealthCheck resource. Get a list of available HTTP health checks by making a list() request.", + "parameters": { + "httpHealthCheck": { + "type": "string", + "description": "Name of the HttpHealthCheck resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpHealthCheck" + ], + "response": { + "$ref": "HttpHealthCheck" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.httpHealthChecks.insert", + "path": "{project}/global/httpHealthChecks", + "httpMethod": "POST", + "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "HttpHealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.httpHealthChecks.list", + "path": "{project}/global/httpHealthChecks", + "httpMethod": "GET", + "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "HttpHealthCheckList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.httpHealthChecks.patch", + "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "httpMethod": "PATCH", + "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", + "parameters": { + "httpHealthCheck": { "type": "string", "description": "Name of the HttpHealthCheck resource to update.", "required": true, @@ -9586,6 +10831,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -9875,6 +11125,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -9966,6 +11221,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -10167,6 +11427,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -10478,6 +11743,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -10637,6 +11907,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -10695,6 +11970,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -10934,6 +12214,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -11034,6 +12319,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -11318,6 +12608,12 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "start": { + "type": "string", + "description": "For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value that was returned in the previous call.", + "format": "int64", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -11396,6 +12692,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -11890,6 +13191,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -11977,6 +13283,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -12077,43 +13388,1368 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.networks.insert", + "path": "{project}/global/networks", + "httpMethod": "POST", + "description": "Creates a network in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Network" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.networks.list", + "path": "{project}/global/networks", + "httpMethod": "GET", + "description": "Retrieves the list of networks available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "NetworkList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "switchToCustomMode": { + "id": "compute.networks.switchToCustomMode", + "path": "{project}/global/networks/{network}/switchToCustomMode", + "httpMethod": "POST", + "description": "Switches the network mode from auto subnet mode to custom subnet mode.", + "parameters": { + "network": { + "type": "string", + "description": "Name of the network to be updated.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "network" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "projects": { + "methods": { + "get": { + "id": "compute.projects.get", + "path": "{project}", + "httpMethod": "GET", + "description": "Returns the specified Project resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "Project" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "moveDisk": { + "id": "compute.projects.moveDisk", + "path": "{project}/moveDisk", + "httpMethod": "POST", + "description": "Moves a persistent disk from one zone to another.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "DiskMoveRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "moveInstance": { + "id": "compute.projects.moveInstance", + "path": "{project}/moveInstance", + "httpMethod": "POST", + "description": "Moves an instance and its attached persistent disks from one zone to another.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "InstanceMoveRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setCommonInstanceMetadata": { + "id": "compute.projects.setCommonInstanceMetadata", + "path": "{project}/setCommonInstanceMetadata", + "httpMethod": "POST", + "description": "Sets metadata common to all instances within the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Metadata" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setUsageExportBucket": { + "id": "compute.projects.setUsageExportBucket", + "path": "{project}/setUsageExportBucket", + "httpMethod": "POST", + "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "UsageExportLocation" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "regionAutoscalers": { + "methods": { + "delete": { + "id": "compute.regionAutoscalers.delete", + "path": "{project}/regions/{region}/autoscalers/{autoscaler}", + "httpMethod": "DELETE", + "description": "Deletes the specified autoscaler.", + "parameters": { + "autoscaler": { + "type": "string", + "description": "Name of the autoscaler to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "autoscaler" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.regionAutoscalers.get", + "path": "{project}/regions/{region}/autoscalers/{autoscaler}", + "httpMethod": "GET", + "description": "Returns the specified autoscaler.", + "parameters": { + "autoscaler": { + "type": "string", + "description": "Name of the autoscaler to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "autoscaler" + ], + "response": { + "$ref": "Autoscaler" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.regionAutoscalers.insert", + "path": "{project}/regions/{region}/autoscalers", + "httpMethod": "POST", + "description": "Creates an autoscaler in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "Autoscaler" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.regionAutoscalers.list", + "path": "{project}/regions/{region}/autoscalers", + "httpMethod": "GET", + "description": "Retrieves a list of autoscalers contained within the specified region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "RegionAutoscalerList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.regionAutoscalers.patch", + "path": "{project}/regions/{region}/autoscalers", + "httpMethod": "PATCH", + "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports patch semantics.", + "parameters": { + "autoscaler": { + "type": "string", + "description": "Name of the autoscaler to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "autoscaler" + ], + "request": { + "$ref": "Autoscaler" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "id": "compute.regionAutoscalers.update", + "path": "{project}/regions/{region}/autoscalers", + "httpMethod": "PUT", + "description": "Updates an autoscaler in the specified project using the data included in the request.", + "parameters": { + "autoscaler": { + "type": "string", + "description": "Name of the autoscaler to update.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "Autoscaler" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "regionBackendServices": { + "methods": { + "delete": { + "id": "compute.regionBackendServices.delete", + "path": "{project}/regions/{region}/backendServices/{backendService}", + "httpMethod": "DELETE", + "description": "Deletes the specified regional BackendService resource.", + "parameters": { + "backendService": { + "type": "string", + "description": "Name of the BackendService resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "backendService" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.regionBackendServices.get", + "path": "{project}/regions/{region}/backendServices/{backendService}", + "httpMethod": "GET", + "description": "Returns the specified regional BackendService resource.", + "parameters": { + "backendService": { + "type": "string", + "description": "Name of the BackendService resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "backendService" + ], + "response": { + "$ref": "BackendService" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getHealth": { + "id": "compute.regionBackendServices.getHealth", + "path": "{project}/regions/{region}/backendServices/{backendService}/getHealth", + "httpMethod": "POST", + "description": "Gets the most recent health check results for this regional BackendService.", + "parameters": { + "backendService": { + "type": "string", + "description": "Name of the BackendService resource to which the queried instance belongs.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "backendService" + ], + "request": { + "$ref": "ResourceGroupReference" + }, + "response": { + "$ref": "BackendServiceGroupHealth" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.regionBackendServices.insert", + "path": "{project}/regions/{region}/backendServices", + "httpMethod": "POST", + "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Restrictions and Guidelines for more information.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "BackendService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.regionBackendServices.list", + "path": "{project}/regions/{region}/backendServices", + "httpMethod": "GET", + "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "BackendServiceList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.regionBackendServices.patch", + "path": "{project}/regions/{region}/backendServices/{backendService}", + "httpMethod": "PATCH", + "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports patch semantics.", + "parameters": { + "backendService": { + "type": "string", + "description": "Name of the BackendService resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "backendService" + ], + "request": { + "$ref": "BackendService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "id": "compute.regionBackendServices.update", + "path": "{project}/regions/{region}/backendServices/{backendService}", + "httpMethod": "PUT", + "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + "parameters": { + "backendService": { + "type": "string", + "description": "Name of the BackendService resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "backendService" + ], + "request": { + "$ref": "BackendService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "regionInstanceGroupManagers": { + "methods": { + "abandonInstances": { + "id": "compute.regionInstanceGroupManagers.abandonInstances", + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + "httpMethod": "POST", + "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "Name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "instanceGroupManager" + ], + "request": { + "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "delete": { + "id": "compute.regionInstanceGroupManagers.delete", + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + "httpMethod": "DELETE", + "description": "Deletes the specified managed instance group and all of the instances in that group.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "Name of the managed instance group to delete.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "instanceGroupManager" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "deleteInstances": { + "id": "compute.regionInstanceGroupManagers.deleteInstances", + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + "httpMethod": "POST", + "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "Name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "instanceGroupManager" + ], + "request": { + "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.regionInstanceGroupManagers.get", + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + "httpMethod": "GET", + "description": "Returns all of the details about the specified managed instance group.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "Name of the managed instance group to return.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "instanceGroupManager" + ], + "response": { + "$ref": "InstanceGroupManager" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.regionInstanceGroupManagers.insert", + "path": "{project}/regions/{region}/instanceGroupManagers", + "httpMethod": "POST", + "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "InstanceGroupManager" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.regionInstanceGroupManagers.list", + "path": "{project}/regions/{region}/instanceGroupManagers", + "httpMethod": "GET", + "description": "Retrieves the list of managed instance groups that are contained within the specified region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "RegionInstanceGroupManagerList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "listManagedInstances": { + "id": "compute.regionInstanceGroupManagers.listManagedInstances", + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + "httpMethod": "POST", + "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "instanceGroupManager" + ], + "response": { + "$ref": "RegionInstanceGroupManagersListInstancesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "recreateInstances": { + "id": "compute.regionInstanceGroupManagers.recreateInstances", + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + "httpMethod": "POST", + "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "Name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "instanceGroupManager" + ], + "request": { + "$ref": "RegionInstanceGroupManagersRecreateRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "resize": { + "id": "compute.regionInstanceGroupManagers.resize", + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", + "httpMethod": "POST", + "description": "Changes the intended size for the managed instance group. If you increase the size, the group schedules actions to create new instances using the current instance template. If you decrease the size, the group schedules delete actions on one or more instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "Name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "location": "path" + }, + "size": { + "type": "integer", + "description": "Number of instances that should exist in this instance group manager.", + "required": true, + "format": "int32", + "minimum": "0", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "region", + "instanceGroupManager", + "size" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setInstanceTemplate": { + "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + "httpMethod": "POST", + "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "instanceGroupManager" + ], + "request": { + "$ref": "RegionInstanceGroupManagersSetTemplateRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setTargetPools": { + "id": "compute.regionInstanceGroupManagers.setTargetPools", + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + "httpMethod": "POST", + "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "Name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "instanceGroupManager" + ], + "request": { + "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" ] - }, - "insert": { - "id": "compute.networks.insert", - "path": "{project}/global/networks", - "httpMethod": "POST", - "description": "Creates a network in the specified project using the data included in the request.", + } + } + }, + "regionInstanceGroups": { + "methods": { + "get": { + "id": "compute.regionInstanceGroups.get", + "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}", + "httpMethod": "GET", + "description": "Returns the specified instance group resource.", "parameters": { + "instanceGroup": { + "type": "string", + "description": "Name of the instance group resource to return.", + "required": true, + "location": "path" + }, "project": { "type": "string", "description": "Project ID for this request.", "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "location": "path" } }, "parameterOrder": [ - "project" + "project", + "region", + "instanceGroup" ], - "request": { - "$ref": "Network" - }, "response": { - "$ref": "Operation" + "$ref": "InstanceGroup" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, "list": { - "id": "compute.networks.list", - "path": "{project}/global/networks", + "id": "compute.regionInstanceGroups.list", + "path": "{project}/regions/{region}/instanceGroups", "httpMethod": "GET", - "description": "Retrieves the list of networks available to the specified project.", + "description": "Retrieves the list of instance group resources contained within the specified region.", "parameters": { "filter": { "type": "string", @@ -12129,6 +14765,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -12140,43 +14781,20 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "NetworkList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "projects": { - "methods": { - "get": { - "id": "compute.projects.get", - "path": "{project}", - "httpMethod": "GET", - "description": "Returns the specified Project resource.", - "parameters": { - "project": { + }, + "region": { "type": "string", - "description": "Project ID for this request.", + "description": "Name of the region scoping this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, "parameterOrder": [ - "project" + "project", + "region" ], "response": { - "$ref": "Project" + "$ref": "RegionInstanceGroupList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -12184,119 +14802,113 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "moveDisk": { - "id": "compute.projects.moveDisk", - "path": "{project}/moveDisk", + "listInstances": { + "id": "compute.regionInstanceGroups.listInstances", + "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", "httpMethod": "POST", - "description": "Moves a persistent disk from one zone to another.", + "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", "parameters": { - "project": { + "filter": { "type": "string", - "description": "Project ID for this request.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "instanceGroup": { + "type": "string", + "description": "Name of the regional instance group for which we want to list the instances.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "DiskMoveRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "moveInstance": { - "id": "compute.projects.moveInstance", - "path": "{project}/moveInstance", - "httpMethod": "POST", - "description": "Moves an instance and its attached persistent disks from one zone to another.", - "parameters": { + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, "project": { "type": "string", "description": "Project ID for this request.", "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "InstanceMoveRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setCommonInstanceMetadata": { - "id": "compute.projects.setCommonInstanceMetadata", - "path": "{project}/setCommonInstanceMetadata", - "httpMethod": "POST", - "description": "Sets metadata common to all instances within the specified project using the data included in the request.", - "parameters": { - "project": { + }, + "region": { "type": "string", - "description": "Project ID for this request.", + "description": "Name of the region scoping this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, "parameterOrder": [ - "project" + "project", + "region", + "instanceGroup" ], "request": { - "$ref": "Metadata" + "$ref": "RegionInstanceGroupsListInstancesRequest" }, "response": { - "$ref": "Operation" + "$ref": "RegionInstanceGroupsListInstances" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "setUsageExportBucket": { - "id": "compute.projects.setUsageExportBucket", - "path": "{project}/setUsageExportBucket", + "setNamedPorts": { + "id": "compute.regionInstanceGroups.setNamedPorts", + "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", "httpMethod": "POST", - "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", + "description": "Sets the named ports for the specified regional instance group.", "parameters": { + "instanceGroup": { + "type": "string", + "description": "The name of the regional instance group where the named ports are updated.", + "required": true, + "location": "path" + }, "project": { "type": "string", "description": "Project ID for this request.", "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "location": "path" } }, "parameterOrder": [ - "project" + "project", + "region", + "instanceGroup" ], "request": { - "$ref": "UsageExportLocation" + "$ref": "RegionInstanceGroupsSetNamedPortsRequest" }, "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" + "https://www.googleapis.com/auth/compute" ] } } @@ -12403,6 +15015,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -12494,6 +15111,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -12543,6 +15165,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -12749,6 +15376,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -12786,7 +15418,7 @@ "id": "compute.routers.patch", "path": "{project}/regions/{region}/routers/{router}", "httpMethod": "PATCH", - "description": "Updates the entire content of the Router resource. This method supports patch semantics.", + "description": "Updates the specified Router resource with the data included in the request. This method supports patch semantics.", "parameters": { "project": { "type": "string", @@ -12875,7 +15507,7 @@ "id": "compute.routers.update", "path": "{project}/regions/{region}/routers/{router}", "httpMethod": "PUT", - "description": "Updates the entire content of the Router resource.", + "description": "Updates the specified Router resource with the data included in the request.", "parameters": { "project": { "type": "string", @@ -13034,6 +15666,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -13150,6 +15787,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -13294,6 +15936,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -13343,6 +15990,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -13390,7 +16042,48 @@ }, "subnetwork": { "type": "string", - "description": "Name of the Subnetwork resource to delete.", + "description": "Name of the Subnetwork resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "subnetwork" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "expandIpCidrRange": { + "id": "compute.subnetworks.expandIpCidrRange", + "path": "{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", + "httpMethod": "POST", + "description": "Expands the IP CIDR range of the subnetwork to a specified value.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "subnetwork": { + "type": "string", + "description": "Name of the Subnetwork resource to update.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -13401,6 +16094,9 @@ "region", "subnetwork" ], + "request": { + "$ref": "SubnetworksExpandIpCidrRangeRequest" + }, "response": { "$ref": "Operation" }, @@ -13507,6 +16203,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -13659,6 +16360,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -13839,6 +16545,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -13960,6 +16671,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -14124,6 +16840,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -14269,6 +16990,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -14478,6 +17204,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -14651,6 +17382,263 @@ } } }, + "targetSslProxies": { + "methods": { + "delete": { + "id": "compute.targetSslProxies.delete", + "path": "{project}/global/targetSslProxies/{targetSslProxy}", + "httpMethod": "DELETE", + "description": "Deletes the specified TargetSslProxy resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetSslProxy": { + "type": "string", + "description": "Name of the TargetSslProxy resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetSslProxy" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.targetSslProxies.get", + "path": "{project}/global/targetSslProxies/{targetSslProxy}", + "httpMethod": "GET", + "description": "Returns the specified TargetSslProxy resource. Get a list of available target SSL proxies by making a list() request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetSslProxy": { + "type": "string", + "description": "Name of the TargetSslProxy resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetSslProxy" + ], + "response": { + "$ref": "TargetSslProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.targetSslProxies.insert", + "path": "{project}/global/targetSslProxies", + "httpMethod": "POST", + "description": "Creates a TargetSslProxy resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "TargetSslProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.targetSslProxies.list", + "path": "{project}/global/targetSslProxies", + "httpMethod": "GET", + "description": "Retrieves the list of TargetSslProxy resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "TargetSslProxyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setBackendService": { + "id": "compute.targetSslProxies.setBackendService", + "path": "{project}/global/targetSslProxies/{targetSslProxy}/setBackendService", + "httpMethod": "POST", + "description": "Changes the BackendService for TargetSslProxy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetSslProxy": { + "type": "string", + "description": "Name of the TargetSslProxy resource whose BackendService resource is to be set.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetSslProxy" + ], + "request": { + "$ref": "TargetSslProxiesSetBackendServiceRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setProxyHeader": { + "id": "compute.targetSslProxies.setProxyHeader", + "path": "{project}/global/targetSslProxies/{targetSslProxy}/setProxyHeader", + "httpMethod": "POST", + "description": "Changes the ProxyHeaderType for TargetSslProxy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetSslProxy": { + "type": "string", + "description": "Name of the TargetSslProxy resource whose ProxyHeader is to be set.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetSslProxy" + ], + "request": { + "$ref": "TargetSslProxiesSetProxyHeaderRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setSslCertificates": { + "id": "compute.targetSslProxies.setSslCertificates", + "path": "{project}/global/targetSslProxies/{targetSslProxy}/setSslCertificates", + "httpMethod": "POST", + "description": "Changes SslCertificates for TargetSslProxy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetSslProxy": { + "type": "string", + "description": "Name of the TargetSslProxy resource whose SslCertificate resource is to be set.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetSslProxy" + ], + "request": { + "$ref": "TargetSslProxiesSetSslCertificatesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, "targetVpnGateways": { "methods": { "aggregatedList": { @@ -14673,6 +17661,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -14837,6 +17830,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -15025,6 +18023,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -15054,7 +18057,7 @@ "id": "compute.urlMaps.patch", "path": "{project}/global/urlMaps/{urlMap}", "httpMethod": "PATCH", - "description": "Updates the entire content of the UrlMap resource. This method supports patch semantics.", + "description": "Updates the specified UrlMap resource with the data included in the request. This method supports patch semantics.", "parameters": { "project": { "type": "string", @@ -15090,7 +18093,7 @@ "id": "compute.urlMaps.update", "path": "{project}/global/urlMaps/{urlMap}", "httpMethod": "PUT", - "description": "Updates the entire content of the UrlMap resource.", + "description": "Updates the specified UrlMap resource with the data included in the request.", "parameters": { "project": { "type": "string", @@ -15182,6 +18185,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -15346,6 +18354,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -15483,6 +18496,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", @@ -15574,6 +18592,11 @@ "maximum": "500", "location": "query" }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, "pageToken": { "type": "string", "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index 188ce80cb4..d0453bc337 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -81,6 +81,7 @@ func New(client *http.Client) (*Service, error) { s.GlobalAddresses = NewGlobalAddressesService(s) s.GlobalForwardingRules = NewGlobalForwardingRulesService(s) s.GlobalOperations = NewGlobalOperationsService(s) + s.HealthChecks = NewHealthChecksService(s) s.HttpHealthChecks = NewHttpHealthChecksService(s) s.HttpsHealthChecks = NewHttpsHealthChecksService(s) s.Images = NewImagesService(s) @@ -92,6 +93,10 @@ func New(client *http.Client) (*Service, error) { s.MachineTypes = NewMachineTypesService(s) s.Networks = NewNetworksService(s) s.Projects = NewProjectsService(s) + s.RegionAutoscalers = NewRegionAutoscalersService(s) + s.RegionBackendServices = NewRegionBackendServicesService(s) + s.RegionInstanceGroupManagers = NewRegionInstanceGroupManagersService(s) + s.RegionInstanceGroups = NewRegionInstanceGroupsService(s) s.RegionOperations = NewRegionOperationsService(s) s.Regions = NewRegionsService(s) s.Routers = NewRoutersService(s) @@ -103,6 +108,7 @@ func New(client *http.Client) (*Service, error) { s.TargetHttpsProxies = NewTargetHttpsProxiesService(s) s.TargetInstances = NewTargetInstancesService(s) s.TargetPools = NewTargetPoolsService(s) + s.TargetSslProxies = NewTargetSslProxiesService(s) s.TargetVpnGateways = NewTargetVpnGatewaysService(s) s.UrlMaps = NewUrlMapsService(s) s.VpnTunnels = NewVpnTunnelsService(s) @@ -136,6 +142,8 @@ type Service struct { GlobalOperations *GlobalOperationsService + HealthChecks *HealthChecksService + HttpHealthChecks *HttpHealthChecksService HttpsHealthChecks *HttpsHealthChecksService @@ -158,6 +166,14 @@ type Service struct { Projects *ProjectsService + RegionAutoscalers *RegionAutoscalersService + + RegionBackendServices *RegionBackendServicesService + + RegionInstanceGroupManagers *RegionInstanceGroupManagersService + + RegionInstanceGroups *RegionInstanceGroupsService + RegionOperations *RegionOperationsService Regions *RegionsService @@ -180,6 +196,8 @@ type Service struct { TargetPools *TargetPoolsService + TargetSslProxies *TargetSslProxiesService + TargetVpnGateways *TargetVpnGatewaysService UrlMaps *UrlMapsService @@ -288,6 +306,15 @@ type GlobalOperationsService struct { s *Service } +func NewHealthChecksService(s *Service) *HealthChecksService { + rs := &HealthChecksService{s: s} + return rs +} + +type HealthChecksService struct { + s *Service +} + func NewHttpHealthChecksService(s *Service) *HttpHealthChecksService { rs := &HttpHealthChecksService{s: s} return rs @@ -387,6 +414,42 @@ type ProjectsService struct { s *Service } +func NewRegionAutoscalersService(s *Service) *RegionAutoscalersService { + rs := &RegionAutoscalersService{s: s} + return rs +} + +type RegionAutoscalersService struct { + s *Service +} + +func NewRegionBackendServicesService(s *Service) *RegionBackendServicesService { + rs := &RegionBackendServicesService{s: s} + return rs +} + +type RegionBackendServicesService struct { + s *Service +} + +func NewRegionInstanceGroupManagersService(s *Service) *RegionInstanceGroupManagersService { + rs := &RegionInstanceGroupManagersService{s: s} + return rs +} + +type RegionInstanceGroupManagersService struct { + s *Service +} + +func NewRegionInstanceGroupsService(s *Service) *RegionInstanceGroupsService { + rs := &RegionInstanceGroupsService{s: s} + return rs +} + +type RegionInstanceGroupsService struct { + s *Service +} + func NewRegionOperationsService(s *Service) *RegionOperationsService { rs := &RegionOperationsService{s: s} return rs @@ -486,6 +549,15 @@ type TargetPoolsService struct { s *Service } +func NewTargetSslProxiesService(s *Service) *TargetSslProxiesService { + rs := &TargetSslProxiesService{s: s} + return rs +} + +type TargetSslProxiesService struct { + s *Service +} + func NewTargetVpnGatewaysService(s *Service) *TargetVpnGatewaysService { rs := &TargetVpnGatewaysService{s: s} return rs @@ -562,12 +634,20 @@ type AccessConfig struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *AccessConfig) MarshalJSON() ([]byte, error) { type noMethod AccessConfig raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Address: A reserved address resource. @@ -632,12 +712,20 @@ type Address struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Address") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *Address) MarshalJSON() ([]byte, error) { type noMethod Address raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type AddressAggregatedList struct { @@ -674,12 +762,20 @@ type AddressAggregatedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *AddressAggregatedList) MarshalJSON() ([]byte, error) { type noMethod AddressAggregatedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AddressList: Contains a list of addresses. @@ -717,12 +813,20 @@ type AddressList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *AddressList) MarshalJSON() ([]byte, error) { type noMethod AddressList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type AddressesScopedList struct { @@ -740,12 +844,20 @@ type AddressesScopedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Addresses") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *AddressesScopedList) MarshalJSON() ([]byte, error) { type noMethod AddressesScopedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AddressesScopedListWarning: [Output Only] Informational warning which @@ -790,12 +902,20 @@ type AddressesScopedListWarning struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *AddressesScopedListWarning) MarshalJSON() ([]byte, error) { type noMethod AddressesScopedListWarning raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type AddressesScopedListWarningData struct { @@ -819,12 +939,20 @@ type AddressesScopedListWarningData struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *AddressesScopedListWarningData) MarshalJSON() ([]byte, error) { type noMethod AddressesScopedListWarningData raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AttachedDisk: An instance-attached disk resource. @@ -917,9 +1045,15 @@ type AttachedDisk struct { Mode string `json:"mode,omitempty"` // Source: Specifies a valid partial or full URL to an existing - // Persistent Disk resource. This field is only applicable for - // persistent disks. Note that for InstanceTemplate, it is just disk - // name, not URL for the disk. + // Persistent Disk resource. When creating a new instance, one of + // initializeParams.sourceImage or disks.source is required. + // + // If desired, you can also attach existing non-root persistent disks + // using this property. This field is only applicable for persistent + // disks. + // + // Note that for InstanceTemplate, specify the disk name, not the URL + // for the disk. Source string `json:"source,omitempty"` // Type: Specifies the type of the disk, either SCRATCH or PERSISTENT. @@ -937,12 +1071,20 @@ type AttachedDisk struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AutoDelete") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *AttachedDisk) MarshalJSON() ([]byte, error) { type noMethod AttachedDisk raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AttachedDiskInitializeParams: [Input Only] Specifies the parameters @@ -978,8 +1120,9 @@ type AttachedDiskInitializeParams struct { // is the name of the disk type, not URL. DiskType string `json:"diskType,omitempty"` - // SourceImage: The source image used to create this disk. If the source - // image is deleted, this field will not be set. + // SourceImage: The source image to create this disk. When creating a + // new instance, one of initializeParams.sourceImage or disks.source is + // required. // // To create a disk with one of the public operating system images, // specify the image by its family name. For example, specify @@ -1004,6 +1147,8 @@ type AttachedDiskInitializeParams struct { // family/family-name: // // global/images/family/my-private-family + // + // If the source image is deleted later, this field will not be set. SourceImage string `json:"sourceImage,omitempty"` // SourceImageEncryptionKey: The customer-supplied encryption key of the @@ -1022,12 +1167,20 @@ type AttachedDiskInitializeParams struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DiskName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *AttachedDiskInitializeParams) MarshalJSON() ([]byte, error) { type noMethod AttachedDiskInitializeParams raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Autoscaler: Represents an Autoscaler resource. Autoscalers allow you @@ -1069,6 +1222,10 @@ type Autoscaler struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // Region: [Output Only] URL of the region where the instance group + // resides (for autoscalers living in regional scope). + Region string `json:"region,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -1091,12 +1248,21 @@ type Autoscaler struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AutoscalingPolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *Autoscaler) MarshalJSON() ([]byte, error) { type noMethod Autoscaler raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type AutoscalerAggregatedList struct { @@ -1133,12 +1299,20 @@ type AutoscalerAggregatedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *AutoscalerAggregatedList) MarshalJSON() ([]byte, error) { type noMethod AutoscalerAggregatedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AutoscalerList: Contains a list of Autoscaler resources. @@ -1176,12 +1350,20 @@ type AutoscalerList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *AutoscalerList) MarshalJSON() ([]byte, error) { type noMethod AutoscalerList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type AutoscalersScopedList struct { @@ -1200,12 +1382,20 @@ type AutoscalersScopedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Autoscalers") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *AutoscalersScopedList) MarshalJSON() ([]byte, error) { type noMethod AutoscalersScopedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AutoscalersScopedListWarning: [Output Only] Informational warning @@ -1250,12 +1440,20 @@ type AutoscalersScopedListWarning struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *AutoscalersScopedListWarning) MarshalJSON() ([]byte, error) { type noMethod AutoscalersScopedListWarning raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type AutoscalersScopedListWarningData struct { @@ -1279,12 +1477,20 @@ type AutoscalersScopedListWarningData struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *AutoscalersScopedListWarningData) MarshalJSON() ([]byte, error) { type noMethod AutoscalersScopedListWarningData raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AutoscalingPolicy: Cloud Autoscaler policy. @@ -1334,12 +1540,21 @@ type AutoscalingPolicy struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CoolDownPeriodSec") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *AutoscalingPolicy) MarshalJSON() ([]byte, error) { type noMethod AutoscalingPolicy raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AutoscalingPolicyCpuUtilization: CPU utilization policy. @@ -1366,32 +1581,40 @@ type AutoscalingPolicyCpuUtilization struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "UtilizationTarget") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *AutoscalingPolicyCpuUtilization) MarshalJSON() ([]byte, error) { type noMethod AutoscalingPolicyCpuUtilization raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AutoscalingPolicyCustomMetricUtilization: Custom utilization metric // policy. type AutoscalingPolicyCustomMetricUtilization struct { - // Metric: The identifier of the Cloud Monitoring metric. The metric - // cannot have negative values and should be a utilization metric, which - // means that the number of virtual machines handling requests should - // increase or decrease proportionally to the metric. The metric must - // also have a label of compute.googleapis.com/resource_id with the - // value of the instance's unique ID, although this alone does not - // guarantee that the metric is valid. + // Metric: The identifier of the Stackdriver Monitoring metric. The + // metric cannot have negative values and should be a utilization + // metric, which means that the number of virtual machines handling + // requests should increase or decrease proportionally to the metric. + // The metric must also have a label of + // compute.googleapis.com/resource_id with the value of the instance's + // unique ID, although this alone does not guarantee that the metric is + // valid. // // For example, the following is a valid // metric: // compute.googleapis.com/instance/network/received_bytes_count - // - // - // - // The following is not a valid metric because it does not increase or + // T + // he following is not a valid metric because it does not increase or // decrease based on // usage: // compute.googleapis.com/instance/cpu/reserved_cores @@ -1402,7 +1625,7 @@ type AutoscalingPolicyCustomMetricUtilization struct { UtilizationTarget float64 `json:"utilizationTarget,omitempty"` // UtilizationTargetType: Defines how target utilization value is - // expressed for a Cloud Monitoring metric. Either GAUGE, + // expressed for a Stackdriver Monitoring metric. Either GAUGE, // DELTA_PER_SECOND, or DELTA_PER_MINUTE. If not specified, the default // is GAUGE. // @@ -1419,12 +1642,20 @@ type AutoscalingPolicyCustomMetricUtilization struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Metric") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *AutoscalingPolicyCustomMetricUtilization) MarshalJSON() ([]byte, error) { type noMethod AutoscalingPolicyCustomMetricUtilization raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AutoscalingPolicyLoadBalancingUtilization: Configuration parameters @@ -1443,32 +1674,43 @@ type AutoscalingPolicyLoadBalancingUtilization struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "UtilizationTarget") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *AutoscalingPolicyLoadBalancingUtilization) MarshalJSON() ([]byte, error) { type noMethod AutoscalingPolicyLoadBalancingUtilization raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Backend: Message containing information of one individual backend. type Backend struct { // BalancingMode: Specifies the balancing mode for this backend. For - // global HTTP(S) load balancing, the default is UTILIZATION. Valid - // values are UTILIZATION and RATE. + // global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION. + // Valid values are UTILIZATION, RATE (for HTTP(S)) and CONNECTION (for + // TCP/SSL). // // This cannot be used for internal load balancing. // // Possible values: + // "CONNECTION" // "RATE" // "UTILIZATION" BalancingMode string `json:"balancingMode,omitempty"` // CapacityScaler: A multiplier applied to the group's maximum servicing - // capacity (either UTILIZATION or RATE). Default value is 1, which - // means the group will serve up to 100% of its configured CPU or RPS - // (depending on balancingMode). A setting of 0 means the group is - // completely drained, offering 0% of its available CPU or RPS. Valid + // capacity (based on UTILIZATION, RATE or CONNECTION). Default value is + // 1, which means the group will serve up to 100% of its configured + // capacity (depending on balancingMode). A setting of 0 means the group + // is completely drained, offering 0% of its available Capacity. Valid // range is [0.0,1.0]. // // This cannot be used for internal load balancing. @@ -1493,6 +1735,23 @@ type Backend struct { // BackendService. Group string `json:"group,omitempty"` + // MaxConnections: The max number of simultaneous connections for the + // group. Can be used with either CONNECTION or UTILIZATION balancing + // modes. For CONNECTION mode, either maxConnections or + // maxConnectionsPerInstance must be set. + // + // This cannot be used for internal load balancing. + MaxConnections int64 `json:"maxConnections,omitempty"` + + // MaxConnectionsPerInstance: The max number of simultaneous connections + // that a single backend instance can handle. This is used to calculate + // the capacity of the group. Can be used in either CONNECTION or + // UTILIZATION balancing modes. For CONNECTION mode, either + // maxConnections or maxConnectionsPerInstance must be set. + // + // This cannot be used for internal load balancing. + MaxConnectionsPerInstance int64 `json:"maxConnectionsPerInstance,omitempty"` + // MaxRate: The max requests per second (RPS) of the group. Can be used // with either RATE or UTILIZATION balancing modes, but required if RATE // mode. For RATE mode, either maxRate or maxRatePerInstance must be @@ -1523,12 +1782,20 @@ type Backend struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BalancingMode") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *Backend) MarshalJSON() ([]byte, error) { type noMethod Backend raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // BackendService: A BackendService resource. This resource defines a @@ -1545,6 +1812,8 @@ type BackendService struct { // Backends: The list of backends that serve this BackendService. Backends []*Backend `json:"backends,omitempty"` + ConnectionDraining *ConnectionDraining `json:"connectionDraining,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -1581,6 +1850,12 @@ type BackendService struct { // for backend services. Kind string `json:"kind,omitempty"` + // Possible values: + // "EXTERNAL" + // "INTERNAL" + // "INVALID_LOAD_BALANCING_SCHEME" + LoadBalancingScheme string `json:"loadBalancingScheme,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -1615,6 +1890,9 @@ type BackendService struct { // Possible values: // "HTTP" // "HTTPS" + // "SSL" + // "TCP" + // "UDP" Protocol string `json:"protocol,omitempty"` // Region: [Output Only] URL of the region where the regional backend @@ -1638,6 +1916,7 @@ type BackendService struct { // // Possible values: // "CLIENT_IP" + // "CLIENT_IP_PORT_PROTO" // "CLIENT_IP_PROTO" // "GENERATED_COOKIE" // "NONE" @@ -1659,12 +1938,68 @@ type BackendService struct { // field is empty or not. This may be used to include empty fields in // Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AffinityCookieTtlSec") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *BackendService) MarshalJSON() ([]byte, error) { type noMethod BackendService raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackendServiceAggregatedList: Contains a list of +// BackendServicesScopedList. +type BackendServiceAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A map of scoped BackendService lists. + Items map[string]BackendServicesScopedList `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] A token used to continue a truncated + // list request. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type BackendServiceGroupHealth struct { @@ -1685,12 +2020,20 @@ type BackendServiceGroupHealth struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HealthStatus") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *BackendServiceGroupHealth) MarshalJSON() ([]byte, error) { type noMethod BackendServiceGroupHealth raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // BackendServiceList: Contains a list of BackendService resources. @@ -1724,12 +2067,147 @@ type BackendServiceList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *BackendServiceList) MarshalJSON() ([]byte, error) { type noMethod BackendServiceList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendServicesScopedList struct { + // BackendServices: List of BackendServices contained in this scope. + BackendServices []*BackendService `json:"backendServices,omitempty"` + + // Warning: Informational warning which replaces the list of backend + // services when the list is empty. + Warning *BackendServicesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BackendServices") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackendServices") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BackendServicesScopedList) MarshalJSON() ([]byte, error) { + type noMethod BackendServicesScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackendServicesScopedListWarning: Informational warning which +// replaces the list of backend services when the list is empty. +type BackendServicesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*BackendServicesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServicesScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod BackendServicesScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendServicesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServicesScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod BackendServicesScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type CacheInvalidationRule struct { @@ -1742,12 +2220,51 @@ type CacheInvalidationRule struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Path") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *CacheInvalidationRule) MarshalJSON() ([]byte, error) { type noMethod CacheInvalidationRule raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ConnectionDraining: Message containing connection draining +// configuration. +type ConnectionDraining struct { + // DrainingTimeoutSec: Time for which instance will be drained (not + // accept new connections, but still work to finish started). + DrainingTimeoutSec int64 `json:"drainingTimeoutSec,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DrainingTimeoutSec") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DrainingTimeoutSec") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ConnectionDraining) MarshalJSON() ([]byte, error) { + type noMethod ConnectionDraining + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // CustomerEncryptionKey: Represents a customer-supplied encryption key @@ -1767,12 +2284,20 @@ type CustomerEncryptionKey struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RawKey") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *CustomerEncryptionKey) MarshalJSON() ([]byte, error) { type noMethod CustomerEncryptionKey raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type CustomerEncryptionKeyProtectedDisk struct { @@ -1792,26 +2317,41 @@ type CustomerEncryptionKeyProtectedDisk struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DiskEncryptionKey") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *CustomerEncryptionKeyProtectedDisk) MarshalJSON() ([]byte, error) { type noMethod CustomerEncryptionKeyProtectedDisk raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // DeprecationStatus: Deprecation status for a public resource. type DeprecationStatus struct { - // Deleted: An optional RFC3339 timestamp on or after which the - // deprecation state of this resource will be changed to DELETED. + // Deleted: An optional RFC3339 timestamp on or after which the state of + // this resource is intended to change to DELETED. This is only + // informational and the status will not change unless the client + // explicitly changes it. Deleted string `json:"deleted,omitempty"` - // Deprecated: An optional RFC3339 timestamp on or after which the - // deprecation state of this resource will be changed to DEPRECATED. + // Deprecated: An optional RFC3339 timestamp on or after which the state + // of this resource is intended to change to DEPRECATED. This is only + // informational and the status will not change unless the client + // explicitly changes it. Deprecated string `json:"deprecated,omitempty"` - // Obsolete: An optional RFC3339 timestamp on or after which the - // deprecation state of this resource will be changed to OBSOLETE. + // Obsolete: An optional RFC3339 timestamp on or after which the state + // of this resource is intended to change to OBSOLETE. This is only + // informational and the status will not change unless the client + // explicitly changes it. Obsolete string `json:"obsolete,omitempty"` // Replacement: The URL of the suggested replacement for a deprecated @@ -1839,12 +2379,20 @@ type DeprecationStatus struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Deleted") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *DeprecationStatus) MarshalJSON() ([]byte, error) { type noMethod DeprecationStatus raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Disk: A Disk resource. @@ -1981,8 +2529,7 @@ type Disk struct { // version of the snapshot that was used. SourceSnapshotId string `json:"sourceSnapshotId,omitempty"` - // Status: [Output Only] The status of disk creation. Applicable - // statuses includes: CREATING, FAILED, READY, RESTORING. + // Status: [Output Only] The status of disk creation. // // Possible values: // "CREATING" @@ -2013,12 +2560,21 @@ type Disk struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *Disk) MarshalJSON() ([]byte, error) { type noMethod Disk raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type DiskAggregatedList struct { @@ -2055,12 +2611,20 @@ type DiskAggregatedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *DiskAggregatedList) MarshalJSON() ([]byte, error) { type noMethod DiskAggregatedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // DiskList: A list of Disk resources. @@ -2098,12 +2662,20 @@ type DiskList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *DiskList) MarshalJSON() ([]byte, error) { type noMethod DiskList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type DiskMoveRequest struct { @@ -2132,12 +2704,21 @@ type DiskMoveRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DestinationZone") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *DiskMoveRequest) MarshalJSON() ([]byte, error) { type noMethod DiskMoveRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // DiskType: A DiskType resource. @@ -2189,12 +2770,21 @@ type DiskType struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *DiskType) MarshalJSON() ([]byte, error) { type noMethod DiskType raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type DiskTypeAggregatedList struct { @@ -2231,12 +2821,20 @@ type DiskTypeAggregatedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *DiskTypeAggregatedList) MarshalJSON() ([]byte, error) { type noMethod DiskTypeAggregatedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // DiskTypeList: Contains a list of disk types. @@ -2274,12 +2872,20 @@ type DiskTypeList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *DiskTypeList) MarshalJSON() ([]byte, error) { type noMethod DiskTypeList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type DiskTypesScopedList struct { @@ -2297,12 +2903,20 @@ type DiskTypesScopedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DiskTypes") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *DiskTypesScopedList) MarshalJSON() ([]byte, error) { type noMethod DiskTypesScopedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // DiskTypesScopedListWarning: [Output Only] Informational warning which @@ -2347,12 +2961,20 @@ type DiskTypesScopedListWarning struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *DiskTypesScopedListWarning) MarshalJSON() ([]byte, error) { type noMethod DiskTypesScopedListWarning raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type DiskTypesScopedListWarningData struct { @@ -2376,12 +2998,20 @@ type DiskTypesScopedListWarningData struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *DiskTypesScopedListWarningData) MarshalJSON() ([]byte, error) { type noMethod DiskTypesScopedListWarningData raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type DisksResizeRequest struct { @@ -2396,12 +3026,20 @@ type DisksResizeRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SizeGb") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *DisksResizeRequest) MarshalJSON() ([]byte, error) { type noMethod DisksResizeRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type DisksScopedList struct { @@ -2419,12 +3057,20 @@ type DisksScopedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Disks") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *DisksScopedList) MarshalJSON() ([]byte, error) { type noMethod DisksScopedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // DisksScopedListWarning: [Output Only] Informational warning which @@ -2469,12 +3115,20 @@ type DisksScopedListWarning struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *DisksScopedListWarning) MarshalJSON() ([]byte, error) { type noMethod DisksScopedListWarning raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type DisksScopedListWarningData struct { @@ -2498,12 +3152,20 @@ type DisksScopedListWarningData struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *DisksScopedListWarningData) MarshalJSON() ([]byte, error) { type noMethod DisksScopedListWarningData raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Firewall: Represents a Firewall resource. @@ -2592,12 +3254,20 @@ type Firewall struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Allowed") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *Firewall) MarshalJSON() ([]byte, error) { type noMethod Firewall raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type FirewallAllowed struct { @@ -2622,12 +3292,20 @@ type FirewallAllowed struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IPProtocol") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *FirewallAllowed) MarshalJSON() ([]byte, error) { type noMethod FirewallAllowed raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // FirewallList: Contains a list of firewalls. @@ -2665,12 +3343,20 @@ type FirewallList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *FirewallList) MarshalJSON() ([]byte, error) { type noMethod FirewallList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ForwardingRule: A ForwardingRule resource. A ForwardingRule resource @@ -2708,6 +3394,13 @@ type ForwardingRule struct { // "UDP" IPProtocol string `json:"IPProtocol,omitempty"` + // BackendService: This field is not used for external load + // balancing. + // + // For internal load balancing, this field identifies the BackendService + // resource to receive the matched traffic. + BackendService string `json:"backendService,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -2724,6 +3417,19 @@ type ForwardingRule struct { // compute#forwardingRule for Forwarding Rule resources. Kind string `json:"kind,omitempty"` + // LoadBalancingScheme: This signifies what the ForwardingRule will be + // used for and can only take the following values: INTERNAL EXTERNAL + // The value of INTERNAL means that this will be used for Internal + // Network Load Balancing (TCP, UDP). The value of EXTERNAL means that + // this will be used for External Load Balancing (HTTP(S) LB, External + // TCP/UDP LB, SSL Proxy) + // + // Possible values: + // "EXTERNAL" + // "INTERNAL" + // "INVALID" + LoadBalancingScheme string `json:"loadBalancingScheme,omitempty"` + // Name: Name of the resource; provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -2733,6 +3439,13 @@ type ForwardingRule struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // Network: This field is not used for external load balancing. + // + // For internal load balancing, this field identifies the network that + // the load balanced IP should belong to for this Forwarding Rule. If + // this field is not specified, the default network will be used. + Network string `json:"network,omitempty"` + // PortRange: Applicable only when IPProtocol is TCP, UDP, or SCTP, only // packets addressed to ports in the specified range will be forwarded // to target. Forwarding rules with the same [IPAddress, IPProtocol] @@ -2741,6 +3454,17 @@ type ForwardingRule struct { // This field is not used for internal load balancing. PortRange string `json:"portRange,omitempty"` + // Ports: This field is not used for external load balancing. + // + // When the load balancing scheme is INTERNAL, a single port or a comma + // separated list of ports can be configured. Only packets addressed to + // these ports will be forwarded to the backends configured with this + // forwarding rule. If the port list is not provided then all ports are + // allowed to pass through. + // + // You may specify a maximum of up to 5 ports. + Ports []string `json:"ports,omitempty"` + // Region: [Output Only] URL of the region where the regional forwarding // rule resides. This field is not applicable to global forwarding // rules. @@ -2749,6 +3473,17 @@ type ForwardingRule struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // Subnetwork: This field is not used for external load balancing. + // + // For internal load balancing, this field identifies the subnetwork + // that the load balanced IP should belong to for this Forwarding + // Rule. + // + // If the network specified is in auto subnet mode, this field is + // optional. However, if the network is in custom subnet mode, a + // subnetwork must be specified. + Subnetwork string `json:"subnetwork,omitempty"` + // Target: The URL of the target resource to receive the matched // traffic. For regional forwarding rules, this target must live in the // same region as the forwarding rule. For global forwarding rules, this @@ -2771,12 +3506,20 @@ type ForwardingRule struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IPAddress") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ForwardingRule) MarshalJSON() ([]byte, error) { type noMethod ForwardingRule raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type ForwardingRuleAggregatedList struct { @@ -2813,12 +3556,20 @@ type ForwardingRuleAggregatedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ForwardingRuleAggregatedList) MarshalJSON() ([]byte, error) { type noMethod ForwardingRuleAggregatedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ForwardingRuleList: Contains a list of ForwardingRule resources. @@ -2855,12 +3606,20 @@ type ForwardingRuleList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ForwardingRuleList) MarshalJSON() ([]byte, error) { type noMethod ForwardingRuleList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type ForwardingRulesScopedList struct { @@ -2878,12 +3637,21 @@ type ForwardingRulesScopedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ForwardingRules") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *ForwardingRulesScopedList) MarshalJSON() ([]byte, error) { type noMethod ForwardingRulesScopedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ForwardingRulesScopedListWarning: Informational warning which @@ -2928,12 +3696,20 @@ type ForwardingRulesScopedListWarning struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ForwardingRulesScopedListWarning) MarshalJSON() ([]byte, error) { type noMethod ForwardingRulesScopedListWarning raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type ForwardingRulesScopedListWarningData struct { @@ -2957,12 +3733,305 @@ type ForwardingRulesScopedListWarningData struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ForwardingRulesScopedListWarningData) MarshalJSON() ([]byte, error) { type noMethod ForwardingRulesScopedListWarningData raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GuestOsFeature: Guest OS features. +type GuestOsFeature struct { + // Type: The type of supported feature. Currenty only + // VIRTIO_SCSI_MULTIQUEUE is supported. For newer Windows images, the + // server might also populate this property with the value WINDOWS to + // indicate that this is a Windows image. This value is purely + // informational and does not enable or disable any features. + // + // Possible values: + // "FEATURE_TYPE_UNSPECIFIED" + // "VIRTIO_SCSI_MULTIQUEUE" + // "WINDOWS" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Type") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Type") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GuestOsFeature) MarshalJSON() ([]byte, error) { + type noMethod GuestOsFeature + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HTTPHealthCheck struct { + // Host: The value of the host header in the HTTP health check request. + // If left empty (default value), the IP on behalf of which this health + // check is performed will be used. + Host string `json:"host,omitempty"` + + // Port: The TCP port number for the health check request. The default + // value is 80. + Port int64 `json:"port,omitempty"` + + // PortName: Port name as defined in InstanceGroup#NamedPort#name. If + // both port and port_name are defined, port takes precedence. + PortName string `json:"portName,omitempty"` + + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // RequestPath: The request path of the HTTP health check request. The + // default value is /. + RequestPath string `json:"requestPath,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Host") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Host") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HTTPHealthCheck) MarshalJSON() ([]byte, error) { + type noMethod HTTPHealthCheck + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HTTPSHealthCheck struct { + // Host: The value of the host header in the HTTPS health check request. + // If left empty (default value), the IP on behalf of which this health + // check is performed will be used. + Host string `json:"host,omitempty"` + + // Port: The TCP port number for the health check request. The default + // value is 443. + Port int64 `json:"port,omitempty"` + + // PortName: Port name as defined in InstanceGroup#NamedPort#name. If + // both port and port_name are defined, port takes precedence. + PortName string `json:"portName,omitempty"` + + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // RequestPath: The request path of the HTTPS health check request. The + // default value is /. + RequestPath string `json:"requestPath,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Host") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Host") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { + type noMethod HTTPSHealthCheck + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HealthCheck: An HealthCheck resource. This resource defines a +// template for how individual virtual machines should be checked for +// health, via one of the supported protocols. +type HealthCheck struct { + // CheckIntervalSec: How often (in seconds) to send a health check. The + // default value is 5 seconds. + CheckIntervalSec int64 `json:"checkIntervalSec,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in 3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // HealthyThreshold: A so-far unhealthy instance will be marked healthy + // after this many consecutive successes. The default value is 2. + HealthyThreshold int64 `json:"healthyThreshold,omitempty"` + + HttpHealthCheck *HTTPHealthCheck `json:"httpHealthCheck,omitempty"` + + HttpsHealthCheck *HTTPSHealthCheck `json:"httpsHealthCheck,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: Type of the resource. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + SslHealthCheck *SSLHealthCheck `json:"sslHealthCheck,omitempty"` + + TcpHealthCheck *TCPHealthCheck `json:"tcpHealthCheck,omitempty"` + + // TimeoutSec: How long (in seconds) to wait before claiming failure. + // The default value is 5 seconds. It is invalid for timeoutSec to have + // greater value than checkIntervalSec. + TimeoutSec int64 `json:"timeoutSec,omitempty"` + + // Type: Specifies the type of the healthCheck, either TCP, UDP, SSL, + // HTTP, HTTPS or HTTP2. If not specified, the default is TCP. Exactly + // one of the protocol-specific health check field must be specified, + // which must match type field. + // + // Possible values: + // "HTTP" + // "HTTPS" + // "INVALID" + // "SSL" + // "TCP" + Type string `json:"type,omitempty"` + + // UnhealthyThreshold: A so-far healthy instance will be marked + // unhealthy after this many consecutive failures. The default value is + // 2. + UnhealthyThreshold int64 `json:"unhealthyThreshold,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CheckIntervalSec") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CheckIntervalSec") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *HealthCheck) MarshalJSON() ([]byte, error) { + type noMethod HealthCheck + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HealthCheckList: Contains a list of HealthCheck resources. +type HealthCheckList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A list of HealthCheck resources. + Items []*HealthCheck `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthCheckList) MarshalJSON() ([]byte, error) { + type noMethod HealthCheckList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // HealthCheckReference: A full or valid partial URL to a health check. @@ -2981,12 +4050,20 @@ type HealthCheckReference struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HealthCheck") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *HealthCheckReference) MarshalJSON() ([]byte, error) { type noMethod HealthCheckReference raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type HealthStatus struct { @@ -3013,12 +4090,20 @@ type HealthStatus struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HealthState") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *HealthStatus) MarshalJSON() ([]byte, error) { type noMethod HealthStatus raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // HostRule: UrlMaps A host-matching rule for a URL. If matched, will @@ -3045,12 +4130,20 @@ type HostRule struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *HostRule) MarshalJSON() ([]byte, error) { type noMethod HostRule raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // HttpHealthCheck: An HttpHealthCheck resource. This resource defines a @@ -3127,12 +4220,21 @@ type HttpHealthCheck struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CheckIntervalSec") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *HttpHealthCheck) MarshalJSON() ([]byte, error) { type noMethod HttpHealthCheck raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // HttpHealthCheckList: Contains a list of HttpHealthCheck resources. @@ -3169,12 +4271,20 @@ type HttpHealthCheckList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *HttpHealthCheckList) MarshalJSON() ([]byte, error) { type noMethod HttpHealthCheckList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // HttpsHealthCheck: An HttpsHealthCheck resource. This resource defines @@ -3250,12 +4360,21 @@ type HttpsHealthCheck struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CheckIntervalSec") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *HttpsHealthCheck) MarshalJSON() ([]byte, error) { type noMethod HttpsHealthCheck raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // HttpsHealthCheckList: Contains a list of HttpsHealthCheck resources. @@ -3292,12 +4411,20 @@ type HttpsHealthCheckList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *HttpsHealthCheckList) MarshalJSON() ([]byte, error) { type noMethod HttpsHealthCheckList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Image: An Image resource. @@ -3328,6 +4455,20 @@ type Image struct { // RFC1035. Family string `json:"family,omitempty"` + // GuestOsFeatures: A list of features to enable on the guest OS. + // Applicable for bootable images only. Currently, only one feature can + // be enabled, VIRTIO_SCSCI_MULTIQUEUE, which allows each virtual CPU to + // have its own queue. For Windows images, you can only enable + // VIRTIO_SCSCI_MULTIQUEUE on images with driver version 1.2.0.1621 or + // higher. Linux images with kernel versions 3.17 and higher will + // support VIRTIO_SCSCI_MULTIQUEUE. + // + // For new Windows images, the server might also populate this field + // with the value WINDOWS, to indicate that this is a Windows image. + // This value is purely informational and does not enable or disable any + // features. + GuestOsFeatures []*GuestOsFeature `json:"guestOsFeatures,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` @@ -3369,13 +4510,13 @@ type Image struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // SourceDisk: URL of the The source disk used to create this image. - // This can be a full or valid partial URL. You must provide either this + // SourceDisk: URL of the source disk used to create this image. This + // can be a full or valid partial URL. You must provide either this // property or the rawDisk.source property but not both to create an // image. For example, the following are valid values: // - - // https://www.googleapis.com/compute/v1/projects/project/zones/zone/disk/disk - // - projects/project/zones/zone/disk/disk + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk + // - projects/project/zones/zone/disks/disk // - zones/zone/disks/disk SourceDisk string `json:"sourceDisk,omitempty"` @@ -3418,12 +4559,21 @@ type Image struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ArchiveSizeBytes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *Image) MarshalJSON() ([]byte, error) { type noMethod Image raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ImageRawDisk: The parameters of the raw disk image. @@ -3453,12 +4603,20 @@ type ImageRawDisk struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ContainerType") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ImageRawDisk) MarshalJSON() ([]byte, error) { type noMethod ImageRawDisk raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ImageList: Contains a list of images. @@ -3495,12 +4653,20 @@ type ImageList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ImageList) MarshalJSON() ([]byte, error) { type noMethod ImageList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Instance: An Instance resource. @@ -3593,7 +4759,7 @@ type Instance struct { // Status: [Output Only] The status of the instance. One of the // following values: PROVISIONING, STAGING, RUNNING, STOPPING, - // SUSPENDED, SUSPENDING, and TERMINATED. + // SUSPENDING, SUSPENDED, and TERMINATED. // // Possible values: // "PROVISIONING" @@ -3631,12 +4797,20 @@ type Instance struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CanIpForward") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *Instance) MarshalJSON() ([]byte, error) { type noMethod Instance raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceAggregatedList struct { @@ -3674,12 +4848,20 @@ type InstanceAggregatedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceAggregatedList) MarshalJSON() ([]byte, error) { type noMethod InstanceAggregatedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroup struct { @@ -3696,8 +4878,8 @@ type InstanceGroup struct { // change the named ports concurrently. Fingerprint string `json:"fingerprint,omitempty"` - // Id: [Output Only] A unique identifier for this resource type. The - // server generates this identifier. + // Id: [Output Only] A unique identifier for this instance group, + // generated by the server. Id uint64 `json:"id,omitempty,string"` // Kind: [Output Only] The resource type, which is always @@ -3723,6 +4905,10 @@ type InstanceGroup struct { // instance group belong. Network string `json:"network,omitempty"` + // Region: The URL of the region where the instance group is located + // (for regional resources). + Region string `json:"region,omitempty"` + // SelfLink: [Output Only] The URL for this instance group. The server // generates this URL. SelfLink string `json:"selfLink,omitempty"` @@ -3750,12 +4936,21 @@ type InstanceGroup struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *InstanceGroup) MarshalJSON() ([]byte, error) { type noMethod InstanceGroup raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupAggregatedList struct { @@ -3794,12 +4989,20 @@ type InstanceGroupAggregatedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceGroupAggregatedList) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupAggregatedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // InstanceGroupList: A list of InstanceGroup resources. @@ -3838,12 +5041,20 @@ type InstanceGroupList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceGroupList) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // InstanceGroupManager: An Instance Group Manager resource. @@ -3896,6 +5107,10 @@ type InstanceGroupManager struct { // complementary to this Instance Group Manager. NamedPorts []*NamedPort `json:"namedPorts,omitempty"` + // Region: [Output Only] The URL of the region where the managed + // instance group resides (for regional resources). + Region string `json:"region,omitempty"` + // SelfLink: [Output Only] The URL for this managed instance group. The // server defines this URL. SelfLink string `json:"selfLink,omitempty"` @@ -3925,12 +5140,21 @@ type InstanceGroupManager struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BaseInstanceName") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *InstanceGroupManager) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupManager raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupManagerActionsSummary struct { @@ -3953,7 +5177,7 @@ type InstanceGroupManagerActionsSummary struct { // CreatingWithoutRetries: [Output Only] The number of instances that // the managed instance group will attempt to create. The group attempts // to create each instance only once. If the group fails to create any - // of these instances, it decreases the group's target_size value + // of these instances, it decreases the group's targetSize value // accordingly. CreatingWithoutRetries int64 `json:"creatingWithoutRetries,omitempty"` @@ -3991,12 +5215,20 @@ type InstanceGroupManagerActionsSummary struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Abandoning") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceGroupManagerActionsSummary) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupManagerActionsSummary raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupManagerAggregatedList struct { @@ -4035,12 +5267,20 @@ type InstanceGroupManagerAggregatedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceGroupManagerAggregatedList) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupManagerAggregatedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // InstanceGroupManagerList: [Output Only] A list of managed instance @@ -4066,8 +5306,7 @@ type InstanceGroupManagerList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] The URL for this resource type. The server - // generates this URL. + // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -4081,12 +5320,20 @@ type InstanceGroupManagerList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceGroupManagerList) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupManagerList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupManagersAbandonInstancesRequest struct { @@ -4101,12 +5348,20 @@ type InstanceGroupManagersAbandonInstancesRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupManagersAbandonInstancesRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupManagersDeleteInstancesRequest struct { @@ -4121,12 +5376,20 @@ type InstanceGroupManagersDeleteInstancesRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupManagersDeleteInstancesRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupManagersListManagedInstancesResponse struct { @@ -4145,12 +5408,21 @@ type InstanceGroupManagersListManagedInstancesResponse struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ManagedInstances") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *InstanceGroupManagersListManagedInstancesResponse) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupManagersListManagedInstancesResponse raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupManagersRecreateInstancesRequest struct { @@ -4164,12 +5436,20 @@ type InstanceGroupManagersRecreateInstancesRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceGroupManagersRecreateInstancesRequest) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupManagersRecreateInstancesRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupManagersScopedList struct { @@ -4189,12 +5469,21 @@ type InstanceGroupManagersScopedList struct { // field is empty or not. This may be used to include empty fields in // Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InstanceGroupManagers") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *InstanceGroupManagersScopedList) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupManagersScopedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // InstanceGroupManagersScopedListWarning: [Output Only] The warning @@ -4240,12 +5529,20 @@ type InstanceGroupManagersScopedListWarning struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceGroupManagersScopedListWarning) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupManagersScopedListWarning raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupManagersScopedListWarningData struct { @@ -4269,12 +5566,20 @@ type InstanceGroupManagersScopedListWarningData struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceGroupManagersScopedListWarningData) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupManagersScopedListWarningData raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupManagersSetInstanceTemplateRequest struct { @@ -4290,12 +5595,21 @@ type InstanceGroupManagersSetInstanceTemplateRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InstanceTemplate") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *InstanceGroupManagersSetInstanceTemplateRequest) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupManagersSetInstanceTemplateRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupManagersSetTargetPoolsRequest struct { @@ -4321,12 +5635,20 @@ type InstanceGroupManagersSetTargetPoolsRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Fingerprint") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupManagersSetTargetPoolsRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupsAddInstancesRequest struct { @@ -4340,12 +5662,20 @@ type InstanceGroupsAddInstancesRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceGroupsAddInstancesRequest) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupsAddInstancesRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupsListInstances struct { @@ -4385,12 +5715,20 @@ type InstanceGroupsListInstances struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceGroupsListInstances) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupsListInstances raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupsListInstancesRequest struct { @@ -4411,12 +5749,20 @@ type InstanceGroupsListInstancesRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InstanceState") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupsListInstancesRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupsRemoveInstancesRequest struct { @@ -4430,12 +5776,20 @@ type InstanceGroupsRemoveInstancesRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceGroupsRemoveInstancesRequest) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupsRemoveInstancesRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupsScopedList struct { @@ -4454,12 +5808,21 @@ type InstanceGroupsScopedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InstanceGroups") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *InstanceGroupsScopedList) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupsScopedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // InstanceGroupsScopedListWarning: [Output Only] An informational @@ -4505,12 +5868,20 @@ type InstanceGroupsScopedListWarning struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceGroupsScopedListWarning) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupsScopedListWarning raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupsScopedListWarningData struct { @@ -4534,12 +5905,20 @@ type InstanceGroupsScopedListWarningData struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupsScopedListWarningData raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupsSetNamedPortsRequest struct { @@ -4561,12 +5940,20 @@ type InstanceGroupsSetNamedPortsRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Fingerprint") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { type noMethod InstanceGroupsSetNamedPortsRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // InstanceList: Contains a list of instances. @@ -4604,12 +5991,20 @@ type InstanceList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceList) MarshalJSON() ([]byte, error) { type noMethod InstanceList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceMoveRequest struct { @@ -4638,12 +6033,21 @@ type InstanceMoveRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DestinationZone") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *InstanceMoveRequest) MarshalJSON() ([]byte, error) { type noMethod InstanceMoveRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceProperties struct { @@ -4701,12 +6105,20 @@ type InstanceProperties struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CanIpForward") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceProperties) MarshalJSON() ([]byte, error) { type noMethod InstanceProperties raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceReference struct { @@ -4720,12 +6132,20 @@ type InstanceReference struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instance") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceReference) MarshalJSON() ([]byte, error) { type noMethod InstanceReference raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // InstanceTemplate: An Instance Template resource. @@ -4773,12 +6193,21 @@ type InstanceTemplate struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *InstanceTemplate) MarshalJSON() ([]byte, error) { type noMethod InstanceTemplate raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // InstanceTemplateList: A list of instance templates. @@ -4817,12 +6246,20 @@ type InstanceTemplateList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceTemplateList) MarshalJSON() ([]byte, error) { type noMethod InstanceTemplateList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceWithNamedPorts struct { @@ -4853,12 +6290,20 @@ type InstanceWithNamedPorts struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instance") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstanceWithNamedPorts) MarshalJSON() ([]byte, error) { type noMethod InstanceWithNamedPorts raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstancesScopedList struct { @@ -4876,12 +6321,20 @@ type InstancesScopedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstancesScopedList) MarshalJSON() ([]byte, error) { type noMethod InstancesScopedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // InstancesScopedListWarning: [Output Only] Informational warning which @@ -4926,12 +6379,20 @@ type InstancesScopedListWarning struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstancesScopedListWarning) MarshalJSON() ([]byte, error) { type noMethod InstancesScopedListWarning raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstancesScopedListWarningData struct { @@ -4955,12 +6416,20 @@ type InstancesScopedListWarningData struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstancesScopedListWarningData) MarshalJSON() ([]byte, error) { type noMethod InstancesScopedListWarningData raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstancesSetMachineTypeRequest struct { @@ -4976,12 +6445,20 @@ type InstancesSetMachineTypeRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MachineType") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstancesSetMachineTypeRequest) MarshalJSON() ([]byte, error) { type noMethod InstancesSetMachineTypeRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstancesStartWithEncryptionKeyRequest struct { @@ -5002,12 +6479,20 @@ type InstancesStartWithEncryptionKeyRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Disks") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *InstancesStartWithEncryptionKeyRequest) MarshalJSON() ([]byte, error) { type noMethod InstancesStartWithEncryptionKeyRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // License: A license resource. @@ -5039,12 +6524,20 @@ type License struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ChargesUseFee") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *License) MarshalJSON() ([]byte, error) { type noMethod License raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // MachineType: A Machine Type resource. @@ -5118,12 +6611,21 @@ type MachineType struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *MachineType) MarshalJSON() ([]byte, error) { type noMethod MachineType raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type MachineTypeScratchDisks struct { @@ -5137,12 +6639,20 @@ type MachineTypeScratchDisks struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DiskGb") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *MachineTypeScratchDisks) MarshalJSON() ([]byte, error) { type noMethod MachineTypeScratchDisks raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type MachineTypeAggregatedList struct { @@ -5180,12 +6690,20 @@ type MachineTypeAggregatedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *MachineTypeAggregatedList) MarshalJSON() ([]byte, error) { type noMethod MachineTypeAggregatedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // MachineTypeList: Contains a list of machine types. @@ -5223,12 +6741,20 @@ type MachineTypeList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *MachineTypeList) MarshalJSON() ([]byte, error) { type noMethod MachineTypeList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type MachineTypesScopedList struct { @@ -5247,12 +6773,20 @@ type MachineTypesScopedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MachineTypes") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *MachineTypesScopedList) MarshalJSON() ([]byte, error) { type noMethod MachineTypesScopedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // MachineTypesScopedListWarning: [Output Only] An informational warning @@ -5297,12 +6831,20 @@ type MachineTypesScopedListWarning struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *MachineTypesScopedListWarning) MarshalJSON() ([]byte, error) { type noMethod MachineTypesScopedListWarning raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type MachineTypesScopedListWarningData struct { @@ -5326,12 +6868,20 @@ type MachineTypesScopedListWarningData struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *MachineTypesScopedListWarningData) MarshalJSON() ([]byte, error) { type noMethod MachineTypesScopedListWarningData raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type ManagedInstance struct { @@ -5404,12 +6954,20 @@ type ManagedInstance struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CurrentAction") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ManagedInstance) MarshalJSON() ([]byte, error) { type noMethod ManagedInstance raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type ManagedInstanceLastAttempt struct { @@ -5424,12 +6982,20 @@ type ManagedInstanceLastAttempt struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Errors") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ManagedInstanceLastAttempt) MarshalJSON() ([]byte, error) { type noMethod ManagedInstanceLastAttempt raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ManagedInstanceLastAttemptErrors: [Output Only] Encountered errors @@ -5446,12 +7012,20 @@ type ManagedInstanceLastAttemptErrors struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Errors") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ManagedInstanceLastAttemptErrors) MarshalJSON() ([]byte, error) { type noMethod ManagedInstanceLastAttemptErrors raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type ManagedInstanceLastAttemptErrorsErrors struct { @@ -5472,12 +7046,20 @@ type ManagedInstanceLastAttemptErrorsErrors struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ManagedInstanceLastAttemptErrorsErrors) MarshalJSON() ([]byte, error) { type noMethod ManagedInstanceLastAttemptErrorsErrors raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Metadata: A metadata key/value entry. @@ -5505,12 +7087,20 @@ type Metadata struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Fingerprint") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *Metadata) MarshalJSON() ([]byte, error) { type noMethod Metadata raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type MetadataItems struct { @@ -5534,12 +7124,20 @@ type MetadataItems struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *MetadataItems) MarshalJSON() ([]byte, error) { type noMethod MetadataItems raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // NamedPort: The named port. For example: . @@ -5558,12 +7156,20 @@ type NamedPort struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *NamedPort) MarshalJSON() ([]byte, error) { type noMethod NamedPort raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Network: Represents a Network resource. Read Networks and Firewalls @@ -5631,12 +7237,20 @@ type Network struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IPv4Range") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *Network) MarshalJSON() ([]byte, error) { type noMethod Network raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // NetworkInterface: A network interface resource attached to an @@ -5691,12 +7305,20 @@ type NetworkInterface struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AccessConfigs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *NetworkInterface) MarshalJSON() ([]byte, error) { type noMethod NetworkInterface raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // NetworkList: Contains a list of networks. @@ -5720,7 +7342,7 @@ type NetworkList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource . + // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -5734,12 +7356,20 @@ type NetworkList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *NetworkList) MarshalJSON() ([]byte, error) { type noMethod NetworkList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Operation: An Operation resource, used to manage asynchronous API @@ -5828,7 +7458,8 @@ type Operation struct { TargetId uint64 `json:"targetId,omitempty,string"` // TargetLink: [Output Only] The URL of the resource that the operation - // modifies. + // modifies. For operations related to creating a snapshot, this points + // to the persistent disk that the snapshot was created from. TargetLink string `json:"targetLink,omitempty"` // User: [Output Only] User who requested the operation, for example: @@ -5854,12 +7485,21 @@ type Operation struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClientOperationId") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *Operation) MarshalJSON() ([]byte, error) { type noMethod Operation raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // OperationError: [Output Only] If errors are generated during @@ -5876,12 +7516,20 @@ type OperationError struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Errors") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *OperationError) MarshalJSON() ([]byte, error) { type noMethod OperationError raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type OperationErrorErrors struct { @@ -5902,12 +7550,20 @@ type OperationErrorErrors struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *OperationErrorErrors) MarshalJSON() ([]byte, error) { type noMethod OperationErrorErrors raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type OperationWarnings struct { @@ -5950,12 +7606,20 @@ type OperationWarnings struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *OperationWarnings) MarshalJSON() ([]byte, error) { type noMethod OperationWarnings raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type OperationWarningsData struct { @@ -5979,12 +7643,20 @@ type OperationWarningsData struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *OperationWarningsData) MarshalJSON() ([]byte, error) { type noMethod OperationWarningsData raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type OperationAggregatedList struct { @@ -6021,12 +7693,20 @@ type OperationAggregatedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *OperationAggregatedList) MarshalJSON() ([]byte, error) { type noMethod OperationAggregatedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // OperationList: Contains a list of Operation resources. @@ -6064,12 +7744,20 @@ type OperationList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *OperationList) MarshalJSON() ([]byte, error) { type noMethod OperationList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type OperationsScopedList struct { @@ -6087,12 +7775,20 @@ type OperationsScopedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Operations") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *OperationsScopedList) MarshalJSON() ([]byte, error) { type noMethod OperationsScopedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // OperationsScopedListWarning: [Output Only] Informational warning @@ -6137,12 +7833,20 @@ type OperationsScopedListWarning struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *OperationsScopedListWarning) MarshalJSON() ([]byte, error) { type noMethod OperationsScopedListWarning raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type OperationsScopedListWarningData struct { @@ -6166,12 +7870,20 @@ type OperationsScopedListWarningData struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *OperationsScopedListWarningData) MarshalJSON() ([]byte, error) { type noMethod OperationsScopedListWarningData raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // PathMatcher: A matcher for the path portion of the URL. The @@ -6206,12 +7918,21 @@ type PathMatcher struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DefaultService") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *PathMatcher) MarshalJSON() ([]byte, error) { type noMethod PathMatcher raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // PathRule: A path-matching rule for a URL. If matched, will use the @@ -6234,12 +7955,20 @@ type PathRule struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Paths") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *PathRule) MarshalJSON() ([]byte, error) { type noMethod PathRule raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Project: A Project resource. Projects can only be created in the @@ -6300,12 +8029,21 @@ type Project struct { // field is empty or not. This may be used to include empty fields in // Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CommonInstanceMetadata") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *Project) MarshalJSON() ([]byte, error) { type noMethod Project raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Quota: A quotas entry. @@ -6331,6 +8069,9 @@ type Quota struct { // "IN_USE_ADDRESSES" // "LOCAL_SSD_TOTAL_GB" // "NETWORKS" + // "PREEMPTIBLE_CPUS" + // "REGIONAL_AUTOSCALERS" + // "REGIONAL_INSTANCE_GROUP_MANAGERS" // "ROUTERS" // "ROUTES" // "SNAPSHOTS" @@ -6358,12 +8099,20 @@ type Quota struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Limit") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *Quota) MarshalJSON() ([]byte, error) { type noMethod Quota raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Region: Region resource. @@ -6418,12 +8167,467 @@ type Region struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *Region) MarshalJSON() ([]byte, error) { type noMethod Region raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RegionAutoscalerList: Contains a list of autoscalers. +type RegionAutoscalerList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A list of autoscalers. + Items []*Autoscaler `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] A token used to continue a truncated + // list request. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionAutoscalerList) MarshalJSON() ([]byte, error) { + type noMethod RegionAutoscalerList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RegionInstanceGroupList: Contains a list of InstanceGroup resources. +type RegionInstanceGroupList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A list of InstanceGroup resources. + Items []*InstanceGroup `json:"items,omitempty"` + + // Kind: The resource type. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] The URL for this resource type. The server + // generates this URL. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RegionInstanceGroupManagerList: Contains a list of managed instance +// groups. +type RegionInstanceGroupManagerList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A list of managed instance groups. + Items []*InstanceGroupManager `json:"items,omitempty"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroupManagerList for a list of managed instance + // groups that exist in th regional scope. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output only] A token used to continue a truncated + // list request. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output only] The URL for this resource type. The server + // generates this URL. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupManagerList) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagerList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupManagersAbandonInstancesRequest struct { + // Instances: The names of one or more instances to abandon. + Instances []string `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersAbandonInstancesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupManagersDeleteInstancesRequest struct { + // Instances: The names of one or more instances to delete. + Instances []string `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersDeleteInstancesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupManagersListInstancesResponse struct { + // ManagedInstances: List of managed instances. + ManagedInstances []*ManagedInstance `json:"managedInstances,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ManagedInstances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ManagedInstances") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupManagersListInstancesResponse) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersListInstancesResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupManagersRecreateRequest struct { + // Instances: The URL for one or more instances to recreate. + Instances []string `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupManagersRecreateRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersRecreateRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupManagersSetTargetPoolsRequest struct { + // Fingerprint: Fingerprint of the target pools information, which is a + // hash of the contents. This field is used for optimistic locking when + // you update the target pool entries. This field is optional. + Fingerprint string `json:"fingerprint,omitempty"` + + // TargetPools: The URL of all TargetPool resources to which instances + // in the instanceGroup field are added. The target pools automatically + // apply to all of the instances in the managed instance group. + TargetPools []string `json:"targetPools,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Fingerprint") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersSetTargetPoolsRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupManagersSetTemplateRequest struct { + // InstanceTemplate: URL of the InstanceTemplate resource from which all + // new instances will be created. + InstanceTemplate string `json:"instanceTemplate,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InstanceTemplate") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupManagersSetTemplateRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersSetTemplateRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupsListInstances struct { + // Id: [Output Only] Unique identifier for the resource. Defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of instances and any named ports that are assigned to + // those instances. + Items []*InstanceWithNamedPorts `json:"items,omitempty"` + + // Kind: The resource type. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupsListInstances) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupsListInstances + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupsListInstancesRequest struct { + // InstanceState: Instances in which state should be returned. Valid + // options are: 'ALL', 'RUNNING'. By default, it lists all instances. + // + // Possible values: + // "ALL" + // "RUNNING" + InstanceState string `json:"instanceState,omitempty"` + + // PortName: Name of port user is interested in. It is optional. If it + // is set, only information about this ports will be returned. If it is + // not set, all the named ports will be returned. Always lists all + // instances. + PortName string `json:"portName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InstanceState") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InstanceState") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupsListInstancesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupsSetNamedPortsRequest struct { + // Fingerprint: The fingerprint of the named ports information for this + // instance group. Use this optional property to prevent conflicts when + // multiple users change the named ports settings concurrently. Obtain + // the fingerprint with the instanceGroups.get method. Then, include the + // fingerprint in your request to ensure that you do not overwrite + // changes that were applied from another concurrent request. + Fingerprint string `json:"fingerprint,omitempty"` + + // NamedPorts: The list of named ports to set for this instance group. + NamedPorts []*NamedPort `json:"namedPorts,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Fingerprint") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupsSetNamedPortsRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // RegionList: Contains a list of region resources. @@ -6461,16 +8665,24 @@ type RegionList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *RegionList) MarshalJSON() ([]byte, error) { type noMethod RegionList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type ResourceGroupReference struct { - // Group: A URI referencing one of the resource views listed in the + // Group: A URI referencing one of the instance groups listed in the // backend service. Group string `json:"group,omitempty"` @@ -6481,12 +8693,20 @@ type ResourceGroupReference struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Group") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ResourceGroupReference) MarshalJSON() ([]byte, error) { type noMethod ResourceGroupReference raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Route: Represents a Route resource. A route specifies how certain @@ -6595,12 +8815,21 @@ type Route struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *Route) MarshalJSON() ([]byte, error) { type noMethod Route raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type RouteWarnings struct { @@ -6643,12 +8872,20 @@ type RouteWarnings struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *RouteWarnings) MarshalJSON() ([]byte, error) { type noMethod RouteWarnings raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type RouteWarningsData struct { @@ -6672,12 +8909,20 @@ type RouteWarningsData struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *RouteWarningsData) MarshalJSON() ([]byte, error) { type noMethod RouteWarningsData raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // RouteList: Contains a list of Route resources. @@ -6714,12 +8959,20 @@ type RouteList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *RouteList) MarshalJSON() ([]byte, error) { type noMethod RouteList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Router: Router resource. @@ -6745,8 +8998,8 @@ type Router struct { Id uint64 `json:"id,omitempty,string"` // Interfaces: Router interfaces. Each interface requires either one - // linked resource (e.g. linkedVpnTunnel) or IP address and IP address - // range (e.g. ipRange). + // linked resource (e.g. linkedVpnTunnel), or IP address and IP address + // range (e.g. ipRange), or both. Interfaces []*RouterInterface `json:"interfaces,omitempty"` // Kind: [Output Only] Type of resource. Always compute#router for @@ -6782,12 +9035,20 @@ type Router struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Bgp") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *Router) MarshalJSON() ([]byte, error) { type noMethod Router raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // RouterAggregatedList: Contains a list of routers. @@ -6824,12 +9085,20 @@ type RouterAggregatedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *RouterAggregatedList) MarshalJSON() ([]byte, error) { type noMethod RouterAggregatedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type RouterBgp struct { @@ -6846,12 +9115,20 @@ type RouterBgp struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Asn") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *RouterBgp) MarshalJSON() ([]byte, error) { type noMethod RouterBgp raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type RouterBgpPeer struct { @@ -6885,12 +9162,21 @@ type RouterBgpPeer struct { // field is empty or not. This may be used to include empty fields in // Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AdvertisedRoutePriority") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *RouterBgpPeer) MarshalJSON() ([]byte, error) { type noMethod RouterBgpPeer raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type RouterInterface struct { @@ -6917,12 +9203,20 @@ type RouterInterface struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IpRange") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *RouterInterface) MarshalJSON() ([]byte, error) { type noMethod RouterInterface raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // RouterList: Contains a list of Router resources. @@ -6946,7 +9240,7 @@ type RouterList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. + // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -6960,12 +9254,20 @@ type RouterList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *RouterList) MarshalJSON() ([]byte, error) { type noMethod RouterList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type RouterStatus struct { @@ -6984,12 +9286,20 @@ type RouterStatus struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BestRoutes") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *RouterStatus) MarshalJSON() ([]byte, error) { type noMethod RouterStatus raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type RouterStatusBgpPeerStatus struct { @@ -7036,12 +9346,21 @@ type RouterStatusBgpPeerStatus struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AdvertisedRoutes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *RouterStatusBgpPeerStatus) MarshalJSON() ([]byte, error) { type noMethod RouterStatusBgpPeerStatus raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type RouterStatusResponse struct { @@ -7061,12 +9380,20 @@ type RouterStatusResponse struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *RouterStatusResponse) MarshalJSON() ([]byte, error) { type noMethod RouterStatusResponse raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type RoutersPreviewResponse struct { @@ -7084,12 +9411,20 @@ type RoutersPreviewResponse struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Resource") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *RoutersPreviewResponse) MarshalJSON() ([]byte, error) { type noMethod RoutersPreviewResponse raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type RoutersScopedList struct { @@ -7107,12 +9442,20 @@ type RoutersScopedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Routers") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *RoutersScopedList) MarshalJSON() ([]byte, error) { type noMethod RoutersScopedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // RoutersScopedListWarning: Informational warning which replaces the @@ -7157,12 +9500,20 @@ type RoutersScopedListWarning struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *RoutersScopedListWarning) MarshalJSON() ([]byte, error) { type noMethod RoutersScopedListWarning raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type RoutersScopedListWarningData struct { @@ -7186,12 +9537,72 @@ type RoutersScopedListWarningData struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *RoutersScopedListWarningData) MarshalJSON() ([]byte, error) { type noMethod RoutersScopedListWarningData raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SSLHealthCheck struct { + // Port: The TCP port number for the health check request. The default + // value is 443. + Port int64 `json:"port,omitempty"` + + // PortName: Port name as defined in InstanceGroup#NamedPort#name. If + // both port and port_name are defined, port takes precedence. + PortName string `json:"portName,omitempty"` + + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // Request: The application data to send once the SSL connection has + // been established (default value is empty). If both request and + // response are empty, the connection establishment alone will indicate + // health. The request data can only be ASCII. + Request string `json:"request,omitempty"` + + // Response: The bytes to match against the beginning of the response + // data. If left empty (the default value), any response will indicate + // health. The response data can only be ASCII. + Response string `json:"response,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Port") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Port") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SSLHealthCheck) MarshalJSON() ([]byte, error) { + type noMethod SSLHealthCheck + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Scheduling: Sets the scheduling options for an Instance. @@ -7224,12 +9635,21 @@ type Scheduling struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AutomaticRestart") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *Scheduling) MarshalJSON() ([]byte, error) { type noMethod Scheduling raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SerialPortOutput: An instance's serial console output. @@ -7241,9 +9661,21 @@ type SerialPortOutput struct { // compute#serialPortOutput for serial port output. Kind string `json:"kind,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. + // Next: [Output Only] The position of the next byte of content from the + // serial console output. Use this value in the next request as the + // start parameter. + Next int64 `json:"next,omitempty,string"` + + // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Start: [Output Only] The starting byte position of the output that + // was returned. This should match the start parameter sent with the + // request. If the serial console output exceeds the size of the buffer, + // older output will be overwritten by newer content and the start + // values will be mismatched. + Start int64 `json:"start,omitempty,string"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -7255,12 +9687,20 @@ type SerialPortOutput struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Contents") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *SerialPortOutput) MarshalJSON() ([]byte, error) { type noMethod SerialPortOutput raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ServiceAccount: A service account. @@ -7279,12 +9719,20 @@ type ServiceAccount struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Email") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ServiceAccount) MarshalJSON() ([]byte, error) { type noMethod ServiceAccount raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Snapshot: A persistent disk snapshot resource. @@ -7394,12 +9842,21 @@ type Snapshot struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *Snapshot) MarshalJSON() ([]byte, error) { type noMethod Snapshot raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SnapshotList: Contains a list of Snapshot resources. @@ -7436,12 +9893,20 @@ type SnapshotList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *SnapshotList) MarshalJSON() ([]byte, error) { type noMethod SnapshotList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SslCertificate: An SslCertificate resource. This resource provides a @@ -7496,12 +9961,20 @@ type SslCertificate struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Certificate") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *SslCertificate) MarshalJSON() ([]byte, error) { type noMethod SslCertificate raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SslCertificateList: Contains a list of SslCertificate resources. @@ -7538,12 +10011,20 @@ type SslCertificateList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *SslCertificateList) MarshalJSON() ([]byte, error) { type noMethod SslCertificateList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Subnetwork: A Subnetwork resource. @@ -7605,12 +10086,21 @@ type Subnetwork struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *Subnetwork) MarshalJSON() ([]byte, error) { type noMethod Subnetwork raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type SubnetworkAggregatedList struct { @@ -7647,12 +10137,20 @@ type SubnetworkAggregatedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { type noMethod SubnetworkAggregatedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SubnetworkList: Contains a list of Subnetwork resources. @@ -7690,12 +10188,51 @@ type SubnetworkList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *SubnetworkList) MarshalJSON() ([]byte, error) { type noMethod SubnetworkList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SubnetworksExpandIpCidrRangeRequest struct { + // IpCidrRange: The IP (in CIDR format or netmask) of internal addresses + // that are legal on this Subnetwork. This range should be disjoint from + // other subnetworks within this network. This range can only be larger + // than (i.e. a superset of) the range previously defined before the + // update. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IpCidrRange") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworksExpandIpCidrRangeRequest) MarshalJSON() ([]byte, error) { + type noMethod SubnetworksExpandIpCidrRangeRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type SubnetworksScopedList struct { @@ -7713,12 +10250,20 @@ type SubnetworksScopedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Subnetworks") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *SubnetworksScopedList) MarshalJSON() ([]byte, error) { type noMethod SubnetworksScopedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SubnetworksScopedListWarning: An informational warning that appears @@ -7763,12 +10308,20 @@ type SubnetworksScopedListWarning struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *SubnetworksScopedListWarning) MarshalJSON() ([]byte, error) { type noMethod SubnetworksScopedListWarning raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type SubnetworksScopedListWarningData struct { @@ -7792,12 +10345,72 @@ type SubnetworksScopedListWarningData struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *SubnetworksScopedListWarningData) MarshalJSON() ([]byte, error) { type noMethod SubnetworksScopedListWarningData raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TCPHealthCheck struct { + // Port: The TCP port number for the health check request. The default + // value is 80. + Port int64 `json:"port,omitempty"` + + // PortName: Port name as defined in InstanceGroup#NamedPort#name. If + // both port and port_name are defined, port takes precedence. + PortName string `json:"portName,omitempty"` + + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // Request: The application data to send once the TCP connection has + // been established (default value is empty). If both request and + // response are empty, the connection establishment alone will indicate + // health. The request data can only be ASCII. + Request string `json:"request,omitempty"` + + // Response: The bytes to match against the beginning of the response + // data. If left empty (the default value), any response will indicate + // health. The response data can only be ASCII. + Response string `json:"response,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Port") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Port") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TCPHealthCheck) MarshalJSON() ([]byte, error) { + type noMethod TCPHealthCheck + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Tags: A set of instance tags. @@ -7823,12 +10436,20 @@ type Tags struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Fingerprint") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *Tags) MarshalJSON() ([]byte, error) { type noMethod Tags raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetHttpProxy: A TargetHttpProxy resource. This resource defines an @@ -7877,12 +10498,21 @@ type TargetHttpProxy struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *TargetHttpProxy) MarshalJSON() ([]byte, error) { type noMethod TargetHttpProxy raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetHttpProxyList: A list of TargetHttpProxy resources. @@ -7920,12 +10550,20 @@ type TargetHttpProxyList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetHttpProxyList) MarshalJSON() ([]byte, error) { type noMethod TargetHttpProxyList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type TargetHttpsProxiesSetSslCertificatesRequest struct { @@ -7941,12 +10579,21 @@ type TargetHttpsProxiesSetSslCertificatesRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SslCertificates") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { type noMethod TargetHttpsProxiesSetSslCertificatesRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetHttpsProxy: A TargetHttpsProxy resource. This resource defines @@ -8005,12 +10652,21 @@ type TargetHttpsProxy struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *TargetHttpsProxy) MarshalJSON() ([]byte, error) { type noMethod TargetHttpsProxy raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetHttpsProxyList: Contains a list of TargetHttpsProxy resources. @@ -8048,12 +10704,20 @@ type TargetHttpsProxyList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetHttpsProxyList) MarshalJSON() ([]byte, error) { type noMethod TargetHttpsProxyList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetInstance: A TargetInstance resource. This resource defines an @@ -8119,12 +10783,21 @@ type TargetInstance struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *TargetInstance) MarshalJSON() ([]byte, error) { type noMethod TargetInstance raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type TargetInstanceAggregatedList struct { @@ -8160,12 +10833,20 @@ type TargetInstanceAggregatedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { type noMethod TargetInstanceAggregatedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetInstanceList: Contains a list of TargetInstance resources. @@ -8202,12 +10883,20 @@ type TargetInstanceList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetInstanceList) MarshalJSON() ([]byte, error) { type noMethod TargetInstanceList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type TargetInstancesScopedList struct { @@ -8225,12 +10914,21 @@ type TargetInstancesScopedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetInstances") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *TargetInstancesScopedList) MarshalJSON() ([]byte, error) { type noMethod TargetInstancesScopedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetInstancesScopedListWarning: Informational warning which @@ -8275,12 +10973,20 @@ type TargetInstancesScopedListWarning struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetInstancesScopedListWarning) MarshalJSON() ([]byte, error) { type noMethod TargetInstancesScopedListWarning raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type TargetInstancesScopedListWarningData struct { @@ -8304,12 +11010,20 @@ type TargetInstancesScopedListWarningData struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { type noMethod TargetInstancesScopedListWarningData raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetPool: A TargetPool resource. This resource defines a pool of @@ -8407,6 +11121,7 @@ type TargetPool struct { // // Possible values: // "CLIENT_IP" + // "CLIENT_IP_PORT_PROTO" // "CLIENT_IP_PROTO" // "GENERATED_COOKIE" // "NONE" @@ -8423,12 +11138,20 @@ type TargetPool struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackupPool") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetPool) MarshalJSON() ([]byte, error) { type noMethod TargetPool raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type TargetPoolAggregatedList struct { @@ -8466,12 +11189,20 @@ type TargetPoolAggregatedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { type noMethod TargetPoolAggregatedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type TargetPoolInstanceHealth struct { @@ -8493,12 +11224,20 @@ type TargetPoolInstanceHealth struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HealthStatus") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetPoolInstanceHealth) MarshalJSON() ([]byte, error) { type noMethod TargetPoolInstanceHealth raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetPoolList: Contains a list of TargetPool resources. @@ -8536,12 +11275,20 @@ type TargetPoolList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetPoolList) MarshalJSON() ([]byte, error) { type noMethod TargetPoolList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type TargetPoolsAddHealthCheckRequest struct { @@ -8556,12 +11303,20 @@ type TargetPoolsAddHealthCheckRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HealthChecks") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetPoolsAddHealthCheckRequest) MarshalJSON() ([]byte, error) { type noMethod TargetPoolsAddHealthCheckRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type TargetPoolsAddInstanceRequest struct { @@ -8581,12 +11336,20 @@ type TargetPoolsAddInstanceRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetPoolsAddInstanceRequest) MarshalJSON() ([]byte, error) { type noMethod TargetPoolsAddInstanceRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type TargetPoolsRemoveHealthCheckRequest struct { @@ -8605,12 +11368,20 @@ type TargetPoolsRemoveHealthCheckRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HealthChecks") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetPoolsRemoveHealthCheckRequest) MarshalJSON() ([]byte, error) { type noMethod TargetPoolsRemoveHealthCheckRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type TargetPoolsRemoveInstanceRequest struct { @@ -8624,12 +11395,20 @@ type TargetPoolsRemoveInstanceRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetPoolsRemoveInstanceRequest) MarshalJSON() ([]byte, error) { type noMethod TargetPoolsRemoveInstanceRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type TargetPoolsScopedList struct { @@ -8647,12 +11426,20 @@ type TargetPoolsScopedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetPools") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetPoolsScopedList) MarshalJSON() ([]byte, error) { type noMethod TargetPoolsScopedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetPoolsScopedListWarning: Informational warning which replaces @@ -8697,12 +11484,20 @@ type TargetPoolsScopedListWarning struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetPoolsScopedListWarning) MarshalJSON() ([]byte, error) { type noMethod TargetPoolsScopedListWarning raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type TargetPoolsScopedListWarningData struct { @@ -8726,12 +11521,20 @@ type TargetPoolsScopedListWarningData struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetPoolsScopedListWarningData) MarshalJSON() ([]byte, error) { type noMethod TargetPoolsScopedListWarningData raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type TargetReference struct { @@ -8744,12 +11547,236 @@ type TargetReference struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Target") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetReference) MarshalJSON() ([]byte, error) { type noMethod TargetReference raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetSslProxiesSetBackendServiceRequest struct { + // Service: The URL of the new BackendService resource for the + // targetSslProxy. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Service") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Service") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxiesSetBackendServiceRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetSslProxiesSetProxyHeaderRequest struct { + // ProxyHeader: The new type of proxy header to append before sending + // data to the backend. NONE or PROXY_V1 are allowed. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProxyHeader") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ProxyHeader") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxiesSetProxyHeaderRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetSslProxiesSetSslCertificatesRequest struct { + // SslCertificates: New set of URLs to SslCertificate resources to + // associate with this TargetSslProxy. Currently exactly one ssl + // certificate must be specified. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SslCertificates") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SslCertificates") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxiesSetSslCertificatesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetSslProxy: A TargetSslProxy resource. This resource defines an +// SSL proxy. +type TargetSslProxy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always + // compute#targetSslProxy for target SSL proxies. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Service: URL to the BackendService resource. + Service string `json:"service,omitempty"` + + // SslCertificates: URLs to SslCertificate resources that are used to + // authenticate connections to Backends. Currently exactly one SSL + // certificate must be specified. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxy) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetSslProxyList: Contains a list of TargetSslProxy resources. +type TargetSslProxyList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetSslProxy resources. + Items []*TargetSslProxy `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxyList) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxyList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetVpnGateway: Represents a Target VPN gateway resource. @@ -8820,12 +11847,21 @@ type TargetVpnGateway struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { type noMethod TargetVpnGateway raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type TargetVpnGatewayAggregatedList struct { @@ -8848,7 +11884,7 @@ type TargetVpnGatewayAggregatedList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. + // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -8862,12 +11898,20 @@ type TargetVpnGatewayAggregatedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { type noMethod TargetVpnGatewayAggregatedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetVpnGatewayList: Contains a list of TargetVpnGateway resources. @@ -8891,7 +11935,7 @@ type TargetVpnGatewayList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. + // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -8905,12 +11949,20 @@ type TargetVpnGatewayList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetVpnGatewayList) MarshalJSON() ([]byte, error) { type noMethod TargetVpnGatewayList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type TargetVpnGatewaysScopedList struct { @@ -8929,12 +11981,21 @@ type TargetVpnGatewaysScopedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetVpnGateways") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *TargetVpnGatewaysScopedList) MarshalJSON() ([]byte, error) { type noMethod TargetVpnGatewaysScopedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetVpnGatewaysScopedListWarning: [Output Only] Informational @@ -8979,12 +12040,20 @@ type TargetVpnGatewaysScopedListWarning struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetVpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { type noMethod TargetVpnGatewaysScopedListWarning raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type TargetVpnGatewaysScopedListWarningData struct { @@ -9008,12 +12077,20 @@ type TargetVpnGatewaysScopedListWarningData struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { type noMethod TargetVpnGatewaysScopedListWarningData raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type TestFailure struct { @@ -9032,12 +12109,20 @@ type TestFailure struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ActualService") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *TestFailure) MarshalJSON() ([]byte, error) { type noMethod TestFailure raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // UrlMap: A UrlMap resource. This resource defines the mapping from URL @@ -9103,12 +12188,21 @@ type UrlMap struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *UrlMap) MarshalJSON() ([]byte, error) { type noMethod UrlMap raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // UrlMapList: Contains a list of UrlMap resources. @@ -9145,12 +12239,20 @@ type UrlMapList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *UrlMapList) MarshalJSON() ([]byte, error) { type noMethod UrlMapList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type UrlMapReference struct { @@ -9163,12 +12265,20 @@ type UrlMapReference struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "UrlMap") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *UrlMapReference) MarshalJSON() ([]byte, error) { type noMethod UrlMapReference raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // UrlMapTest: Message for the expected URL mappings. @@ -9193,12 +12303,20 @@ type UrlMapTest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *UrlMapTest) MarshalJSON() ([]byte, error) { type noMethod UrlMapTest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // UrlMapValidationResult: Message representing the validation result @@ -9224,12 +12342,20 @@ type UrlMapValidationResult struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LoadErrors") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *UrlMapValidationResult) MarshalJSON() ([]byte, error) { type noMethod UrlMapValidationResult raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type UrlMapsValidateRequest struct { @@ -9243,12 +12369,20 @@ type UrlMapsValidateRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Resource") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *UrlMapsValidateRequest) MarshalJSON() ([]byte, error) { type noMethod UrlMapsValidateRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type UrlMapsValidateResponse struct { @@ -9265,12 +12399,20 @@ type UrlMapsValidateResponse struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Result") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { type noMethod UrlMapsValidateResponse raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // UsageExportLocation: The location in Cloud Storage and naming method @@ -9300,12 +12442,20 @@ type UsageExportLocation struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BucketName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *UsageExportLocation) MarshalJSON() ([]byte, error) { type noMethod UsageExportLocation raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type VpnTunnel struct { @@ -9406,12 +12556,21 @@ type VpnTunnel struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *VpnTunnel) MarshalJSON() ([]byte, error) { type noMethod VpnTunnel raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type VpnTunnelAggregatedList struct { @@ -9448,12 +12607,20 @@ type VpnTunnelAggregatedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { type noMethod VpnTunnelAggregatedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // VpnTunnelList: Contains a list of VpnTunnel resources. @@ -9477,7 +12644,7 @@ type VpnTunnelList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. + // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -9491,12 +12658,20 @@ type VpnTunnelList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *VpnTunnelList) MarshalJSON() ([]byte, error) { type noMethod VpnTunnelList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type VpnTunnelsScopedList struct { @@ -9514,12 +12689,20 @@ type VpnTunnelsScopedList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "VpnTunnels") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *VpnTunnelsScopedList) MarshalJSON() ([]byte, error) { type noMethod VpnTunnelsScopedList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // VpnTunnelsScopedListWarning: Informational warning which replaces the @@ -9564,12 +12747,20 @@ type VpnTunnelsScopedListWarning struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *VpnTunnelsScopedListWarning) MarshalJSON() ([]byte, error) { type noMethod VpnTunnelsScopedListWarning raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type VpnTunnelsScopedListWarningData struct { @@ -9593,12 +12784,20 @@ type VpnTunnelsScopedListWarningData struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { type noMethod VpnTunnelsScopedListWarningData raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Zone: A Zone resource. @@ -9650,12 +12849,21 @@ type Zone struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *Zone) MarshalJSON() ([]byte, error) { type noMethod Zone raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ZoneList: Contains a list of zone resources. @@ -9692,12 +12900,20 @@ type ZoneList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ZoneList) MarshalJSON() ([]byte, error) { type noMethod ZoneList raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // method id "compute.addresses.aggregatedList": @@ -9708,6 +12924,7 @@ type AddressesAggregatedListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // AggregatedList: Retrieves an aggregated list of addresses. @@ -9760,6 +12977,23 @@ func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAgg return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AddressesAggregatedListCall) OrderBy(orderBy string) *AddressesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -9794,8 +13028,20 @@ func (c *AddressesAggregatedListCall) Context(ctx context.Context) *AddressesAgg return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -9871,6 +13117,11 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -9927,6 +13178,7 @@ type AddressesDeleteCall struct { address string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Deletes the specified address resource. @@ -9955,8 +13207,20 @@ func (c *AddressesDeleteCall) Context(ctx context.Context) *AddressesDeleteCall return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) @@ -10063,6 +13327,7 @@ type AddressesGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Returns the specified address resource. @@ -10101,8 +13366,20 @@ func (c *AddressesGetCall) Context(ctx context.Context) *AddressesGetCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -10212,6 +13489,7 @@ type AddressesInsertCall struct { address *Address urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Insert: Creates an address resource in the specified project using @@ -10241,8 +13519,20 @@ func (c *AddressesInsertCall) Context(ctx context.Context) *AddressesInsertCall return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) @@ -10347,6 +13637,7 @@ type AddressesListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Retrieves a list of addresses contained within the specified @@ -10401,6 +13692,23 @@ func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall { return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AddressesListCall) OrderBy(orderBy string) *AddressesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -10435,8 +13743,20 @@ func (c *AddressesListCall) Context(ctx context.Context) *AddressesListCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -10514,6 +13834,11 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -10576,6 +13901,7 @@ type AutoscalersAggregatedListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // AggregatedList: Retrieves an aggregated list of autoscalers. @@ -10627,6 +13953,23 @@ func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *Autoscaler return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AutoscalersAggregatedListCall) OrderBy(orderBy string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -10661,8 +14004,20 @@ func (c *AutoscalersAggregatedListCall) Context(ctx context.Context) *Autoscaler return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -10738,6 +14093,11 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -10794,6 +14154,7 @@ type AutoscalersDeleteCall struct { autoscaler string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Deletes the specified autoscaler. @@ -10821,8 +14182,20 @@ func (c *AutoscalersDeleteCall) Context(ctx context.Context) *AutoscalersDeleteC return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) @@ -10929,6 +14302,7 @@ type AutoscalersGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Returns the specified autoscaler resource. Get a list of @@ -10967,8 +14341,20 @@ func (c *AutoscalersGetCall) Context(ctx context.Context) *AutoscalersGetCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -11078,6 +14464,7 @@ type AutoscalersInsertCall struct { autoscaler *Autoscaler urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Insert: Creates an autoscaler in the specified project using the data @@ -11106,8 +14493,20 @@ func (c *AutoscalersInsertCall) Context(ctx context.Context) *AutoscalersInsertC return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) @@ -11212,6 +14611,7 @@ type AutoscalersListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Retrieves a list of autoscalers contained within the specified @@ -11265,6 +14665,23 @@ func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AutoscalersListCall) OrderBy(orderBy string) *AutoscalersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -11299,8 +14716,20 @@ func (c *AutoscalersListCall) Context(ctx context.Context) *AutoscalersListCall return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -11378,6 +14807,11 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -11441,6 +14875,7 @@ type AutoscalersPatchCall struct { autoscaler2 *Autoscaler urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Patch: Updates an autoscaler in the specified project using the data @@ -11470,8 +14905,20 @@ func (c *AutoscalersPatchCall) Context(ctx context.Context) *AutoscalersPatchCal return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler2) @@ -11584,6 +15031,7 @@ type AutoscalersUpdateCall struct { autoscaler *Autoscaler urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Update: Updates an autoscaler in the specified project using the data @@ -11619,8 +15067,20 @@ func (c *AutoscalersUpdateCall) Context(ctx context.Context) *AutoscalersUpdateC return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) @@ -11722,6 +15182,259 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er } +// method id "compute.backendServices.aggregatedList": + +type BackendServicesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all BackendService resources, +// regional and global, available to the specified project. +func (r *BackendServicesService) AggregatedList(project string) *BackendServicesAggregatedListCall { + c := &BackendServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *BackendServicesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServicesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *BackendServicesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *BackendServicesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *BackendServicesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesAggregatedListCall) Context(ctx context.Context) *BackendServicesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/backendServices") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.aggregatedList" call. +// Exactly one of *BackendServiceAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *BackendServiceAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*BackendServiceAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendServiceAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.backendServices.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/backendServices", + // "response": { + // "$ref": "BackendServiceAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*BackendServiceAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.backendServices.delete": type BackendServicesDeleteCall struct { @@ -11730,6 +15443,7 @@ type BackendServicesDeleteCall struct { backendService string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Deletes the specified BackendService resource. @@ -11757,8 +15471,20 @@ func (c *BackendServicesDeleteCall) Context(ctx context.Context) *BackendService return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) @@ -11855,6 +15581,7 @@ type BackendServicesGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Returns the specified BackendService resource. Get a list of @@ -11893,8 +15620,20 @@ func (c *BackendServicesGetCall) Context(ctx context.Context) *BackendServicesGe return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -11995,6 +15734,7 @@ type BackendServicesGetHealthCall struct { resourcegroupreference *ResourceGroupReference urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // GetHealth: Gets the most recent health check results for this @@ -12024,8 +15764,20 @@ func (c *BackendServicesGetHealthCall) Context(ctx context.Context) *BackendServ return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesGetHealthCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) @@ -12129,6 +15881,7 @@ type BackendServicesInsertCall struct { backendservice *BackendService urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Insert: Creates a BackendService resource in the specified project @@ -12159,8 +15912,20 @@ func (c *BackendServicesInsertCall) Context(ctx context.Context) *BackendService return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) @@ -12255,6 +16020,7 @@ type BackendServicesListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Retrieves the list of BackendService resources available to the @@ -12308,6 +16074,23 @@ func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesL return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -12342,8 +16125,20 @@ func (c *BackendServicesListCall) Context(ctx context.Context) *BackendServicesL return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -12419,6 +16214,11 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -12475,12 +16275,14 @@ type BackendServicesPatchCall struct { backendservice *BackendService urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } -// Patch: Updates the entire content of the BackendService resource. -// There are several restrictions and guidelines to keep in mind when -// updating a backend service. Read Restrictions and Guidelines for -// more information. This method supports patch semantics. +// Patch: Updates the specified BackendService resource with the data +// included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. This method +// supports patch semantics. // For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/patch func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -12506,8 +16308,20 @@ func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServices return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) @@ -12565,7 +16379,7 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Updates the entire content of the BackendService resource. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports patch semantics.", + // "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports patch semantics.", // "httpMethod": "PATCH", // "id": "compute.backendServices.patch", // "parameterOrder": [ @@ -12612,12 +16426,13 @@ type BackendServicesUpdateCall struct { backendservice *BackendService urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } -// Update: Updates the entire content of the BackendService resource. -// There are several restrictions and guidelines to keep in mind when -// updating a backend service. Read Restrictions and Guidelines for -// more information. +// Update: Updates the specified BackendService resource with the data +// included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. // For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/update func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -12643,8 +16458,20 @@ func (c *BackendServicesUpdateCall) Context(ctx context.Context) *BackendService return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) @@ -12702,7 +16529,7 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates the entire content of the BackendService resource. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + // "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", // "httpMethod": "PUT", // "id": "compute.backendServices.update", // "parameterOrder": [ @@ -12748,6 +16575,7 @@ type DiskTypesAggregatedListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // AggregatedList: Retrieves an aggregated list of disk types. @@ -12800,6 +16628,23 @@ func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAgg return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *DiskTypesAggregatedListCall) OrderBy(orderBy string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -12834,8 +16679,20 @@ func (c *DiskTypesAggregatedListCall) Context(ctx context.Context) *DiskTypesAgg return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DiskTypesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -12911,6 +16768,11 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -12968,6 +16830,7 @@ type DiskTypesGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Returns the specified disk type. Get a list of available disk @@ -13007,8 +16870,20 @@ func (c *DiskTypesGetCall) Context(ctx context.Context) *DiskTypesGetCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DiskTypesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -13118,6 +16993,7 @@ type DiskTypesListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Retrieves a list of disk types available to the specified @@ -13172,6 +17048,23 @@ func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *DiskTypesListCall) OrderBy(orderBy string) *DiskTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -13206,8 +17099,20 @@ func (c *DiskTypesListCall) Context(ctx context.Context) *DiskTypesListCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DiskTypesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -13285,6 +17190,11 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -13347,6 +17257,7 @@ type DisksAggregatedListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // AggregatedList: Retrieves an aggregated list of persistent disks. @@ -13399,6 +17310,23 @@ func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedL return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *DisksAggregatedListCall) OrderBy(orderBy string) *DisksAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -13433,8 +17361,20 @@ func (c *DisksAggregatedListCall) Context(ctx context.Context) *DisksAggregatedL return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -13510,6 +17450,11 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -13567,6 +17512,7 @@ type DisksCreateSnapshotCall struct { snapshot *Snapshot urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // CreateSnapshot: Creates a snapshot of a specified persistent disk. @@ -13596,8 +17542,20 @@ func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnaps return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksCreateSnapshotCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) @@ -13711,6 +17669,7 @@ type DisksDeleteCall struct { disk string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Deletes the specified persistent disk. Deleting a disk @@ -13742,8 +17701,20 @@ func (c *DisksDeleteCall) Context(ctx context.Context) *DisksDeleteCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) @@ -13849,6 +17820,7 @@ type DisksGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Returns a specified persistent disk. Get a list of available @@ -13888,8 +17860,20 @@ func (c *DisksGetCall) Context(ctx context.Context) *DisksGetCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -13999,6 +17983,7 @@ type DisksInsertCall struct { disk *Disk urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Insert: Creates a persistent disk in the specified project using the @@ -14038,8 +18023,20 @@ func (c *DisksInsertCall) Context(ctx context.Context) *DisksInsertCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) @@ -14149,6 +18146,7 @@ type DisksListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Retrieves a list of persistent disks contained within the @@ -14203,6 +18201,23 @@ func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *DisksListCall) OrderBy(orderBy string) *DisksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -14237,8 +18252,20 @@ func (c *DisksListCall) Context(ctx context.Context) *DisksListCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -14316,6 +18343,11 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -14380,6 +18412,7 @@ type DisksResizeCall struct { disksresizerequest *DisksResizeRequest urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Resize: Resizes the specified persistent disk. @@ -14408,8 +18441,20 @@ func (c *DisksResizeCall) Context(ctx context.Context) *DisksResizeCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksResizeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksresizerequest) @@ -14522,6 +18567,7 @@ type FirewallsDeleteCall struct { firewall string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Deletes the specified firewall. @@ -14549,8 +18595,20 @@ func (c *FirewallsDeleteCall) Context(ctx context.Context) *FirewallsDeleteCall return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FirewallsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) @@ -14647,6 +18705,7 @@ type FirewallsGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Returns the specified firewall. @@ -14684,8 +18743,20 @@ func (c *FirewallsGetCall) Context(ctx context.Context) *FirewallsGetCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FirewallsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -14785,6 +18856,7 @@ type FirewallsInsertCall struct { firewall *Firewall urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Insert: Creates a firewall rule in the specified project using the @@ -14813,8 +18885,20 @@ func (c *FirewallsInsertCall) Context(ctx context.Context) *FirewallsInsertCall return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FirewallsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall) @@ -14909,6 +18993,7 @@ type FirewallsListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Retrieves the list of firewall rules available to the specified @@ -14962,6 +19047,23 @@ func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *FirewallsListCall) OrderBy(orderBy string) *FirewallsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -14996,8 +19098,20 @@ func (c *FirewallsListCall) Context(ctx context.Context) *FirewallsListCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FirewallsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -15073,6 +19187,11 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -15129,6 +19248,7 @@ type FirewallsPatchCall struct { firewall2 *Firewall urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Patch: Updates the specified firewall rule with the data included in @@ -15158,8 +19278,20 @@ func (c *FirewallsPatchCall) Context(ctx context.Context) *FirewallsPatchCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FirewallsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) @@ -15264,6 +19396,7 @@ type FirewallsUpdateCall struct { firewall2 *Firewall urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Update: Updates the specified firewall rule with the data included in @@ -15293,8 +19426,20 @@ func (c *FirewallsUpdateCall) Context(ctx context.Context) *FirewallsUpdateCall return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FirewallsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) @@ -15398,6 +19543,7 @@ type ForwardingRulesAggregatedListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // AggregatedList: Retrieves an aggregated list of forwarding rules. @@ -15450,6 +19596,23 @@ func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *Forwar return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *ForwardingRulesAggregatedListCall) OrderBy(orderBy string) *ForwardingRulesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -15484,8 +19647,20 @@ func (c *ForwardingRulesAggregatedListCall) Context(ctx context.Context) *Forwar return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ForwardingRulesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -15561,6 +19736,11 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -15617,6 +19797,7 @@ type ForwardingRulesDeleteCall struct { forwardingRule string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Deletes the specified ForwardingRule resource. @@ -15645,8 +19826,20 @@ func (c *ForwardingRulesDeleteCall) Context(ctx context.Context) *ForwardingRule return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ForwardingRulesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) @@ -15753,6 +19946,7 @@ type ForwardingRulesGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Returns the specified ForwardingRule resource. @@ -15791,8 +19985,20 @@ func (c *ForwardingRulesGetCall) Context(ctx context.Context) *ForwardingRulesGe return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ForwardingRulesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -15902,6 +20108,7 @@ type ForwardingRulesInsertCall struct { forwardingrule *ForwardingRule urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Insert: Creates a ForwardingRule resource in the specified project @@ -15931,8 +20138,20 @@ func (c *ForwardingRulesInsertCall) Context(ctx context.Context) *ForwardingRule return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ForwardingRulesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) @@ -16037,6 +20256,7 @@ type ForwardingRulesListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Retrieves a list of ForwardingRule resources available to the @@ -16091,6 +20311,23 @@ func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesL return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *ForwardingRulesListCall) OrderBy(orderBy string) *ForwardingRulesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -16125,8 +20362,20 @@ func (c *ForwardingRulesListCall) Context(ctx context.Context) *ForwardingRulesL return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ForwardingRulesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -16204,6 +20453,11 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -16268,6 +20522,7 @@ type ForwardingRulesSetTargetCall struct { targetreference *TargetReference urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // SetTarget: Changes target URL for forwarding rule. The new target @@ -16298,8 +20553,20 @@ func (c *ForwardingRulesSetTargetCall) Context(ctx context.Context) *ForwardingR return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ForwardingRulesSetTargetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) @@ -16412,6 +20679,7 @@ type GlobalAddressesDeleteCall struct { address string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Deletes the specified address resource. @@ -16439,8 +20707,20 @@ func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddresse return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalAddressesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) @@ -16537,6 +20817,7 @@ type GlobalAddressesGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Returns the specified address resource. Get a list of available @@ -16575,8 +20856,20 @@ func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGe return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalAddressesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -16676,6 +20969,7 @@ type GlobalAddressesInsertCall struct { address *Address urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Insert: Creates an address resource in the specified project using @@ -16704,8 +20998,20 @@ func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddresse return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalAddressesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) @@ -16800,6 +21106,7 @@ type GlobalAddressesListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Retrieves a list of global addresses. @@ -16852,6 +21159,23 @@ func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesL return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *GlobalAddressesListCall) OrderBy(orderBy string) *GlobalAddressesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -16886,8 +21210,20 @@ func (c *GlobalAddressesListCall) Context(ctx context.Context) *GlobalAddressesL return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalAddressesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -16963,6 +21299,11 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -17018,6 +21359,7 @@ type GlobalForwardingRulesDeleteCall struct { forwardingRule string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Deletes the specified ForwardingRule resource. @@ -17045,8 +21387,20 @@ func (c *GlobalForwardingRulesDeleteCall) Context(ctx context.Context) *GlobalFo return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) @@ -17143,6 +21497,7 @@ type GlobalForwardingRulesGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Returns the specified ForwardingRule resource. Get a list of @@ -17181,8 +21536,20 @@ func (c *GlobalForwardingRulesGetCall) Context(ctx context.Context) *GlobalForwa return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalForwardingRulesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -17282,6 +21649,7 @@ type GlobalForwardingRulesInsertCall struct { forwardingrule *ForwardingRule urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Insert: Creates a ForwardingRule resource in the specified project @@ -17310,8 +21678,20 @@ func (c *GlobalForwardingRulesInsertCall) Context(ctx context.Context) *GlobalFo return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalForwardingRulesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) @@ -17406,6 +21786,7 @@ type GlobalForwardingRulesListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Retrieves a list of ForwardingRule resources available to the @@ -17459,6 +21840,23 @@ func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForw return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *GlobalForwardingRulesListCall) OrderBy(orderBy string) *GlobalForwardingRulesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -17493,8 +21891,20 @@ func (c *GlobalForwardingRulesListCall) Context(ctx context.Context) *GlobalForw return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalForwardingRulesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -17570,6 +21980,11 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -17626,6 +22041,7 @@ type GlobalForwardingRulesSetTargetCall struct { targetreference *TargetReference urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // SetTarget: Changes target URL for forwarding rule. The new target @@ -17655,8 +22071,20 @@ func (c *GlobalForwardingRulesSetTargetCall) Context(ctx context.Context) *Globa return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalForwardingRulesSetTargetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) @@ -17760,6 +22188,7 @@ type GlobalOperationsAggregatedListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // AggregatedList: Retrieves an aggregated list of all operations. @@ -17812,6 +22241,23 @@ func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *Globa return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *GlobalOperationsAggregatedListCall) OrderBy(orderBy string) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -17846,8 +22292,20 @@ func (c *GlobalOperationsAggregatedListCall) Context(ctx context.Context) *Globa return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalOperationsAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -17923,6 +22381,11 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -17978,6 +22441,7 @@ type GlobalOperationsDeleteCall struct { operation string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Deletes the specified Operations resource. @@ -18005,8 +22469,20 @@ func (c *GlobalOperationsDeleteCall) Context(ctx context.Context) *GlobalOperati return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalOperationsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) @@ -18075,6 +22551,7 @@ type GlobalOperationsGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Retrieves the specified Operations resource. Get a list of @@ -18113,8 +22590,20 @@ func (c *GlobalOperationsGetCall) Context(ctx context.Context) *GlobalOperations return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalOperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -18214,6 +22703,7 @@ type GlobalOperationsListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Retrieves a list of Operation resources contained within the @@ -18267,6 +22757,23 @@ func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperation return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *GlobalOperationsListCall) OrderBy(orderBy string) *GlobalOperationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -18301,8 +22808,20 @@ func (c *GlobalOperationsListCall) Context(ctx context.Context) *GlobalOperation return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalOperationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -18378,6 +22897,11 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -18425,29 +22949,29 @@ func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationL } } -// method id "compute.httpHealthChecks.delete": +// method id "compute.healthChecks.delete": -type HttpHealthChecksDeleteCall struct { - s *Service - project string - httpHealthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context +type HealthChecksDeleteCall struct { + s *Service + project string + healthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HttpHealthCheck resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/delete -func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { - c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HealthCheck resource. +func (r *HealthChecksService) Delete(project string, healthCheck string) *HealthChecksDeleteCall { + c := &HealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck + c.healthCheck = healthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall { +func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -18455,35 +22979,47 @@ func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChecksDeleteCall { +func (c *HealthChecksDeleteCall) Context(ctx context.Context) *HealthChecksDeleteCall { c.ctx_ = ctx return c } -func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HealthChecksDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.delete" call. +// Do executes the "compute.healthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -18514,16 +23050,16 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified HttpHealthCheck resource.", + // "description": "Deletes the specified HealthCheck resource.", // "httpMethod": "DELETE", - // "id": "compute.httpHealthChecks.delete", + // "id": "compute.healthChecks.delete", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "healthCheck" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to delete.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -18537,7 +23073,7 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "{project}/global/healthChecks/{healthCheck}", // "response": { // "$ref": "Operation" // }, @@ -18549,31 +23085,31 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.httpHealthChecks.get": +// method id "compute.healthChecks.get": -type HttpHealthChecksGetCall struct { - s *Service - project string - httpHealthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type HealthChecksGetCall struct { + s *Service + project string + healthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified HttpHealthCheck resource. Get a list of -// available HTTP health checks by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/get -func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { - c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HealthCheck resource. Get a list of +// available health checks by making a list() request. +func (r *HealthChecksService) Get(project string, healthCheck string) *HealthChecksGetCall { + c := &HealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck + c.healthCheck = healthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall { +func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -18583,7 +23119,7 @@ func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecks // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthChecksGetCall { +func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -18591,38 +23127,50 @@ func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthCheck // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecksGetCall { +func (c *HealthChecksGetCall) Context(ctx context.Context) *HealthChecksGetCall { c.ctx_ = ctx return c } -func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HealthChecksGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.get" call. -// Exactly one of *HttpHealthCheck or error will be non-nil. Any non-2xx +// Do executes the "compute.healthChecks.get" call. +// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *HttpHealthCheck.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheck, error) { +// *HealthCheck.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -18641,7 +23189,7 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpHealthCheck{ + ret := &HealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -18653,16 +23201,16 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC } return ret, nil // { - // "description": "Returns the specified HttpHealthCheck resource. Get a list of available HTTP health checks by making a list() request.", + // "description": "Returns the specified HealthCheck resource. Get a list of available health checks by making a list() request.", // "httpMethod": "GET", - // "id": "compute.httpHealthChecks.get", + // "id": "compute.healthChecks.get", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "healthCheck" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to return.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -18676,9 +23224,9 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "{project}/global/healthChecks/{healthCheck}", // "response": { - // "$ref": "HttpHealthCheck" + // "$ref": "HealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -18689,30 +23237,30 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC } -// method id "compute.httpHealthChecks.insert": +// method id "compute.healthChecks.insert": -type HttpHealthChecksInsertCall struct { - s *Service - project string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context +type HealthChecksInsertCall struct { + s *Service + project string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HttpHealthCheck resource in the specified project -// using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/insert -func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { - c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) *HealthChecksInsertCall { + c := &HealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httphealthcheck = httphealthcheck + c.healthcheck = healthcheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall { +func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -18720,22 +23268,34 @@ func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChecksInsertCall { +func (c *HealthChecksInsertCall) Context(ctx context.Context) *HealthChecksInsertCall { c.ctx_ = ctx return c } -func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HealthChecksInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -18745,14 +23305,14 @@ func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.insert" call. +// Do executes the "compute.healthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -18783,9 +23343,9 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.httpHealthChecks.insert", + // "id": "compute.healthChecks.insert", // "parameterOrder": [ // "project" // ], @@ -18798,9 +23358,9 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks", + // "path": "{project}/global/healthChecks", // "request": { - // "$ref": "HttpHealthCheck" + // "$ref": "HealthCheck" // }, // "response": { // "$ref": "Operation" @@ -18813,21 +23373,21 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.httpHealthChecks.list": +// method id "compute.healthChecks.list": -type HttpHealthChecksListCall struct { +type HealthChecksListCall struct { s *Service project string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of HttpHealthCheck resources available to -// the specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/list -func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { - c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of HealthCheck resources available to the +// specified project. +func (r *HealthChecksService) List(project string) *HealthChecksListCall { + c := &HealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -18859,7 +23419,7 @@ func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { +func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { c.urlParams_.Set("filter", filter) return c } @@ -18869,15 +23429,32 @@ func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCa // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in // subsequent list requests. -func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall { +func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall { +func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -18885,7 +23462,7 @@ func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecks // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall { +func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -18895,7 +23472,7 @@ func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthCheck // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChecksListCall { +func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -18903,20 +23480,32 @@ func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthChecksListCall { +func (c *HealthChecksListCall) Context(ctx context.Context) *HealthChecksListCall { c.ctx_ = ctx return c } -func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HealthChecksListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -18926,14 +23515,14 @@ func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.list" call. -// Exactly one of *HttpHealthCheckList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpHealthCheckList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.healthChecks.list" call. +// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HealthCheckList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheckList, error) { +func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -18952,7 +23541,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpHealthCheckList{ + ret := &HealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -18964,9 +23553,9 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth } return ret, nil // { - // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", + // "description": "Retrieves the list of HealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.httpHealthChecks.list", + // "id": "compute.healthChecks.list", // "parameterOrder": [ // "project" // ], @@ -18985,6 +23574,11 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -18998,9 +23592,9 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks", + // "path": "{project}/global/healthChecks", // "response": { - // "$ref": "HttpHealthCheckList" + // "$ref": "HealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -19014,7 +23608,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealthCheckList) error) error { +func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -19032,33 +23626,33 @@ func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealth } } -// method id "compute.httpHealthChecks.patch": +// method id "compute.healthChecks.patch": -type HttpHealthChecksPatchCall struct { - s *Service - project string - httpHealthCheck string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context +type HealthChecksPatchCall struct { + s *Service + project string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HttpHealthCheck resource in the specified project -// using the data included in the request. This method supports patch +// Patch: Updates a HealthCheck resource in the specified project using +// the data included in the request. This method supports patch // semantics. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/patch -func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { - c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HealthChecksService) Patch(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksPatchCall { + c := &HealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck - c.httphealthcheck = httphealthcheck + c.healthCheck = healthCheck + c.healthcheck = healthcheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall { +func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -19066,40 +23660,52 @@ func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChecksPatchCall { +func (c *HealthChecksPatchCall) Context(ctx context.Context) *HealthChecksPatchCall { c.ctx_ = ctx return c } -func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HealthChecksPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.patch" call. +// Do executes the "compute.healthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -19130,16 +23736,16 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", // "httpMethod": "PATCH", - // "id": "compute.httpHealthChecks.patch", + // "id": "compute.healthChecks.patch", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "healthCheck" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to update.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -19153,9 +23759,9 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "{project}/global/healthChecks/{healthCheck}", // "request": { - // "$ref": "HttpHealthCheck" + // "$ref": "HealthCheck" // }, // "response": { // "$ref": "Operation" @@ -19168,32 +23774,32 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.httpHealthChecks.update": +// method id "compute.healthChecks.update": -type HttpHealthChecksUpdateCall struct { - s *Service - project string - httpHealthCheck string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context +type HealthChecksUpdateCall struct { + s *Service + project string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a HttpHealthCheck resource in the specified project -// using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/update -func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { - c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *HealthChecksService) Update(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksUpdateCall { + c := &HealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck - c.httphealthcheck = httphealthcheck + c.healthCheck = healthCheck + c.healthcheck = healthcheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall { +func (c *HealthChecksUpdateCall) Fields(s ...googleapi.Field) *HealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -19201,40 +23807,52 @@ func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChecksUpdateCall { +func (c *HealthChecksUpdateCall) Context(ctx context.Context) *HealthChecksUpdateCall { c.ctx_ = ctx return c } -func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HealthChecksUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.update" call. +// Do executes the "compute.healthChecks.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -19265,16 +23883,16 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", // "httpMethod": "PUT", - // "id": "compute.httpHealthChecks.update", + // "id": "compute.healthChecks.update", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "healthCheck" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to update.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -19288,9 +23906,9 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "{project}/global/healthChecks/{healthCheck}", // "request": { - // "$ref": "HttpHealthCheck" + // "$ref": "HealthCheck" // }, // "response": { // "$ref": "Operation" @@ -19303,28 +23921,30 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.httpsHealthChecks.delete": +// method id "compute.httpHealthChecks.delete": -type HttpsHealthChecksDeleteCall struct { - s *Service - project string - httpsHealthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context +type HttpHealthChecksDeleteCall struct { + s *Service + project string + httpHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HttpsHealthCheck resource. -func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { - c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HttpHealthCheck resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/delete +func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { + c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck + c.httpHealthCheck = httpHealthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthChecksDeleteCall { +func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -19332,35 +23952,47 @@ func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthChecksDeleteCall { +func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChecksDeleteCall { c.ctx_ = ctx return c } -func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HttpHealthChecksDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.delete" call. +// Do executes the "compute.httpHealthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -19391,16 +24023,16 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified HttpsHealthCheck resource.", + // "description": "Deletes the specified HttpHealthCheck resource.", // "httpMethod": "DELETE", - // "id": "compute.httpsHealthChecks.delete", + // "id": "compute.httpHealthChecks.delete", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "httpHealthCheck" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to delete.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -19414,7 +24046,7 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "response": { // "$ref": "Operation" // }, @@ -19426,30 +24058,32 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.httpsHealthChecks.get": +// method id "compute.httpHealthChecks.get": -type HttpsHealthChecksGetCall struct { - s *Service - project string - httpsHealthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type HttpHealthChecksGetCall struct { + s *Service + project string + httpHealthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified HttpsHealthCheck resource. Get a list of -// available HTTPS health checks by making a list() request. -func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { - c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HttpHealthCheck resource. Get a list of +// available HTTP health checks by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/get +func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { + c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck + c.httpHealthCheck = httpHealthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChecksGetCall { +func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -19459,7 +24093,7 @@ func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChec // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChecksGetCall { +func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthChecksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -19467,38 +24101,50 @@ func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChecksGetCall { +func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecksGetCall { c.ctx_ = ctx return c } -func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HttpHealthChecksGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.get" call. -// Exactly one of *HttpsHealthCheck or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpsHealthCheck.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.httpHealthChecks.get" call. +// Exactly one of *HttpHealthCheck or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HttpHealthCheck.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheck, error) { +func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -19517,7 +24163,7 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpsHealthCheck{ + ret := &HttpHealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -19529,16 +24175,16 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt } return ret, nil // { - // "description": "Returns the specified HttpsHealthCheck resource. Get a list of available HTTPS health checks by making a list() request.", + // "description": "Returns the specified HttpHealthCheck resource. Get a list of available HTTP health checks by making a list() request.", // "httpMethod": "GET", - // "id": "compute.httpsHealthChecks.get", + // "id": "compute.httpHealthChecks.get", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "httpHealthCheck" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to return.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -19552,9 +24198,9 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "response": { - // "$ref": "HttpsHealthCheck" + // "$ref": "HttpHealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -19565,29 +24211,31 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt } -// method id "compute.httpsHealthChecks.insert": +// method id "compute.httpHealthChecks.insert": -type HttpsHealthChecksInsertCall struct { - s *Service - project string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context +type HttpHealthChecksInsertCall struct { + s *Service + project string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HttpsHealthCheck resource in the specified project +// Insert: Creates a HttpHealthCheck resource in the specified project // using the data included in the request. -func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { - c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/insert +func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { + c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpshealthcheck = httpshealthcheck + c.httphealthcheck = httphealthcheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthChecksInsertCall { +func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -19595,22 +24243,34 @@ func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthChecksInsertCall { +func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChecksInsertCall { c.ctx_ = ctx return c } -func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HttpHealthChecksInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -19620,14 +24280,14 @@ func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.insert" call. +// Do executes the "compute.httpHealthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -19658,9 +24318,9 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.httpsHealthChecks.insert", + // "id": "compute.httpHealthChecks.insert", // "parameterOrder": [ // "project" // ], @@ -19673,9 +24333,9 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks", + // "path": "{project}/global/httpHealthChecks", // "request": { - // "$ref": "HttpsHealthCheck" + // "$ref": "HttpHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -19688,20 +24348,22 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.httpsHealthChecks.list": +// method id "compute.httpHealthChecks.list": -type HttpsHealthChecksListCall struct { +type HttpHealthChecksListCall struct { s *Service project string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of HttpsHealthCheck resources available to +// List: Retrieves the list of HttpHealthCheck resources available to // the specified project. -func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { - c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/list +func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { + c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -19733,7 +24395,7 @@ func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCa // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { +func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { c.urlParams_.Set("filter", filter) return c } @@ -19743,15 +24405,32 @@ func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksList // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in // subsequent list requests. -func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChecksListCall { +func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChecksListCall { +func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -19759,7 +24438,7 @@ func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChec // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChecksListCall { +func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -19769,7 +24448,7 @@ func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthChecksListCall { +func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -19777,20 +24456,32 @@ func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthCh // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChecksListCall { +func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthChecksListCall { c.ctx_ = ctx return c } -func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HttpHealthChecksListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -19800,14 +24491,14 @@ func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.list" call. -// Exactly one of *HttpsHealthCheckList or error will be non-nil. Any +// Do executes the "compute.httpHealthChecks.list" call. +// Exactly one of *HttpHealthCheckList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *HttpsHealthCheckList.ServerResponse.Header or (if a response was +// *HttpHealthCheckList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheckList, error) { +func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -19826,7 +24517,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpsHealthCheckList{ + ret := &HttpHealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -19838,9 +24529,9 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal } return ret, nil // { - // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", + // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.httpsHealthChecks.list", + // "id": "compute.httpHealthChecks.list", // "parameterOrder": [ // "project" // ], @@ -19859,6 +24550,11 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -19872,9 +24568,9 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks", + // "path": "{project}/global/httpHealthChecks", // "response": { - // "$ref": "HttpsHealthCheckList" + // "$ref": "HttpHealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -19888,7 +24584,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHealthCheckList) error) error { +func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -19906,32 +24602,34 @@ func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHeal } } -// method id "compute.httpsHealthChecks.patch": +// method id "compute.httpHealthChecks.patch": -type HttpsHealthChecksPatchCall struct { - s *Service - project string - httpsHealthCheck string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context +type HttpHealthChecksPatchCall struct { + s *Service + project string + httpHealthCheck string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HttpsHealthCheck resource in the specified project +// Patch: Updates a HttpHealthCheck resource in the specified project // using the data included in the request. This method supports patch // semantics. -func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { - c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/patch +func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { + c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck - c.httpshealthcheck = httpshealthcheck + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthChecksPatchCall { +func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -19939,40 +24637,52 @@ func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthCh // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthChecksPatchCall { +func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChecksPatchCall { c.ctx_ = ctx return c } -func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HttpHealthChecksPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.patch" call. +// Do executes the "compute.httpHealthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -20003,16 +24713,16 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", // "httpMethod": "PATCH", - // "id": "compute.httpsHealthChecks.patch", + // "id": "compute.httpHealthChecks.patch", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "httpHealthCheck" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to update.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -20026,9 +24736,9 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "request": { - // "$ref": "HttpsHealthCheck" + // "$ref": "HttpHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -20041,31 +24751,33 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.httpsHealthChecks.update": +// method id "compute.httpHealthChecks.update": -type HttpsHealthChecksUpdateCall struct { - s *Service - project string - httpsHealthCheck string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context +type HttpHealthChecksUpdateCall struct { + s *Service + project string + httpHealthCheck string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a HttpsHealthCheck resource in the specified project +// Update: Updates a HttpHealthCheck resource in the specified project // using the data included in the request. -func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { - c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/update +func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { + c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck - c.httpshealthcheck = httpshealthcheck + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthChecksUpdateCall { +func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -20073,40 +24785,52 @@ func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthChecksUpdateCall { +func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChecksUpdateCall { c.ctx_ = ctx return c } -func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HttpHealthChecksUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.update" call. +// Do executes the "compute.httpHealthChecks.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -20137,16 +24861,16 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", // "httpMethod": "PUT", - // "id": "compute.httpsHealthChecks.update", + // "id": "compute.httpHealthChecks.update", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "httpHealthCheck" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to update.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -20160,9 +24884,9 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "request": { - // "$ref": "HttpsHealthCheck" + // "$ref": "HttpHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -20175,29 +24899,29 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.images.delete": +// method id "compute.httpsHealthChecks.delete": -type ImagesDeleteCall struct { - s *Service - project string - image string - urlParams_ gensupport.URLParams - ctx_ context.Context +type HttpsHealthChecksDeleteCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified image. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/delete -func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { - c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HttpsHealthCheck resource. +func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { + c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image + c.httpsHealthCheck = httpsHealthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { +func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -20205,35 +24929,47 @@ func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { +func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthChecksDeleteCall { c.ctx_ = ctx return c } -func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HttpsHealthChecksDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.delete" call. +// Do executes the "compute.httpsHealthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -20264,16 +25000,16 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified image.", + // "description": "Deletes the specified HttpsHealthCheck resource.", // "httpMethod": "DELETE", - // "id": "compute.images.delete", + // "id": "compute.httpsHealthChecks.delete", // "parameterOrder": [ // "project", - // "image" + // "httpsHealthCheck" // ], // "parameters": { - // "image": { - // "description": "Name of the image resource to delete.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -20287,7 +25023,7 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "type": "string" // } // }, - // "path": "{project}/global/images/{image}", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "response": { // "$ref": "Operation" // }, @@ -20299,75 +25035,92 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.images.deprecate": +// method id "compute.httpsHealthChecks.get": -type ImagesDeprecateCall struct { - s *Service - project string - image string - deprecationstatus *DeprecationStatus - urlParams_ gensupport.URLParams - ctx_ context.Context +type HttpsHealthChecksGetCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Deprecate: Sets the deprecation status of an image. -// -// If an empty request body is given, clears the deprecation status -// instead. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/deprecate -func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { - c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HttpsHealthCheck resource. Get a list of +// available HTTPS health checks by making a list() request. +func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { + c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image - c.deprecationstatus = deprecationstatus + c.httpsHealthCheck = httpsHealthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall { +func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChecksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall { +func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChecksGetCall { c.ctx_ = ctx return c } -func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HttpsHealthChecksGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.deprecate" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.httpsHealthChecks.get" call. +// Exactly one of *HttpsHealthCheck or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpsHealthCheck.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -20386,7 +25139,7 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &HttpsHealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -20398,16 +25151,16 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", - // "httpMethod": "POST", - // "id": "compute.images.deprecate", + // "description": "Returns the specified HttpsHealthCheck resource. Get a list of available HTTPS health checks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.httpsHealthChecks.get", // "parameterOrder": [ // "project", - // "image" + // "httpsHealthCheck" // ], // "parameters": { - // "image": { - // "description": "Image name.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -20421,95 +25174,95 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/global/images/{image}/deprecate", - // "request": { - // "$ref": "DeprecationStatus" - // }, + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "response": { - // "$ref": "Operation" + // "$ref": "HttpsHealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.images.get": +// method id "compute.httpsHealthChecks.insert": -type ImagesGetCall struct { - s *Service - project string - image string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type HttpsHealthChecksInsertCall struct { + s *Service + project string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified image. Get a list of available images by -// making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/get -func (r *ImagesService) Get(project string, image string) *ImagesGetCall { - c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HttpsHealthCheck resource in the specified project +// using the data included in the request. +func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { + c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image + c.httpshealthcheck = httpshealthcheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { +func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { +func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthChecksInsertCall { c.ctx_ = ctx return c } -func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HttpsHealthChecksInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.get" call. -// Exactly one of *Image or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Image.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { +// Do executes the "compute.httpsHealthChecks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -20528,7 +25281,7 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Image{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -20540,21 +25293,13 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { } return ret, nil // { - // "description": "Returns the specified image. Get a list of available images by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.images.get", + // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.httpsHealthChecks.insert", // "parameterOrder": [ - // "project", - // "image" + // "project" // ], // "parameters": { - // "image": { - // "description": "Name of the image resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -20563,43 +25308,111 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { // "type": "string" // } // }, - // "path": "{project}/global/images/{image}", + // "path": "{project}/global/httpsHealthChecks", + // "request": { + // "$ref": "HttpsHealthCheck" + // }, // "response": { - // "$ref": "Image" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.images.getFromFamily": +// method id "compute.httpsHealthChecks.list": -type ImagesGetFromFamilyCall struct { +type HttpsHealthChecksListCall struct { s *Service project string - family string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// GetFromFamily: Returns the latest image that is part of an image -// family and is not deprecated. -func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { - c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of HttpsHealthCheck resources available to +// the specified project. +func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { + c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.family = family + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChecksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChecksListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFamilyCall { +func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -20609,7 +25422,7 @@ func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFam // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFamilyCall { +func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -20617,38 +25430,49 @@ func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesGetFromFamilyCall) Context(ctx context.Context) *ImagesGetFromFamilyCall { +func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChecksListCall { c.ctx_ = ctx return c } -func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HttpsHealthChecksListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "family": c.family, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.getFromFamily" call. -// Exactly one of *Image or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Image.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, error) { +// Do executes the "compute.httpsHealthChecks.list" call. +// Exactly one of *HttpsHealthCheckList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpsHealthCheckList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -20667,7 +25491,7 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Image{ + ret := &HttpsHealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -20679,19 +25503,35 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro } return ret, nil // { - // "description": "Returns the latest image that is part of an image family and is not deprecated.", + // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.images.getFromFamily", + // "id": "compute.httpsHealthChecks.list", // "parameterOrder": [ - // "project", - // "family" + // "project" // ], // "parameters": { - // "family": { - // "description": "Name of the image family to search for.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -20702,9 +25542,9 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro // "type": "string" // } // }, - // "path": "{project}/global/images/family/{family}", + // "path": "{project}/global/httpsHealthChecks", // "response": { - // "$ref": "Image" + // "$ref": "HttpsHealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -20715,30 +25555,54 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro } -// method id "compute.images.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHealthCheckList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ImagesInsertCall struct { - s *Service - project string - image *Image - urlParams_ gensupport.URLParams - ctx_ context.Context +// method id "compute.httpsHealthChecks.patch": + +type HttpsHealthChecksPatchCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an image in the specified project using the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/insert -func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { - c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. This method supports patch +// semantics. +func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { + c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { +func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -20746,39 +25610,52 @@ func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { +func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthChecksPatchCall { c.ctx_ = ctx return c } -func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HttpsHealthChecksPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.insert" call. +// Do executes the "compute.httpsHealthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -20809,13 +25686,21 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates an image in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.images.insert", + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "compute.httpsHealthChecks.patch", // "parameterOrder": [ - // "project" + // "project", + // "httpsHealthCheck" // ], // "parameters": { + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -20824,150 +25709,8002 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "type": "string" // } // }, - // "path": "{project}/global/images", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "request": { - // "$ref": "Image" + // "$ref": "HttpsHealthCheck" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.images.list": +// method id "compute.httpsHealthChecks.update": -type ImagesListCall struct { +type HttpsHealthChecksUpdateCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. +func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { + c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthChecksUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthChecksUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HttpsHealthChecksUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.httpsHealthChecks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.httpsHealthChecks.update", + // "parameterOrder": [ + // "project", + // "httpsHealthCheck" + // ], + // "parameters": { + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "request": { + // "$ref": "HttpsHealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.images.delete": + +type ImagesDeleteCall struct { + s *Service + project string + image string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified image. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/delete +func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { + c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.image = image + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ImagesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "image": c.image, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.images.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified image.", + // "httpMethod": "DELETE", + // "id": "compute.images.delete", + // "parameterOrder": [ + // "project", + // "image" + // ], + // "parameters": { + // "image": { + // "description": "Name of the image resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images/{image}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.images.deprecate": + +type ImagesDeprecateCall struct { + s *Service + project string + image string + deprecationstatus *DeprecationStatus + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Deprecate: Sets the deprecation status of an image. +// +// If an empty request body is given, clears the deprecation status +// instead. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/deprecate +func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { + c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.image = image + c.deprecationstatus = deprecationstatus + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ImagesDeprecateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "image": c.image, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.images.deprecate" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", + // "httpMethod": "POST", + // "id": "compute.images.deprecate", + // "parameterOrder": [ + // "project", + // "image" + // ], + // "parameters": { + // "image": { + // "description": "Image name.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images/{image}/deprecate", + // "request": { + // "$ref": "DeprecationStatus" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.images.get": + +type ImagesGetCall struct { s *Service project string + image string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of private images available to the specified -// project. Private images are images you create that belong to your -// project. This method does not get any images that belong to other -// projects, including publicly-available images, like Debian 8. If you -// want to get a list of publicly-available images, use this method to -// make a request to the respective image project, such as debian-cloud -// or windows-cloud. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/list -func (r *ImagesService) List(project string) *ImagesListCall { - c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified image. Get a list of available images by +// making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/get +func (r *ImagesService) Get(project string, image string) *ImagesGetCall { + c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.image = image return c } -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *ImagesListCall) Filter(filter string) *ImagesListCall { - c.urlParams_.Set("filter", filter) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. -func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { + c.ctx_ = ctx return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { - c.urlParams_.Set("pageToken", pageToken) +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ImagesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "image": c.image, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.images.get" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Image.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Image{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified image. Get a list of available images by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.images.get", + // "parameterOrder": [ + // "project", + // "image" + // ], + // "parameters": { + // "image": { + // "description": "Name of the image resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images/{image}", + // "response": { + // "$ref": "Image" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.images.getFromFamily": + +type ImagesGetFromFamilyCall struct { + s *Service + project string + family string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetFromFamily: Returns the latest image that is part of an image +// family and is not deprecated. +func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { + c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.family = family + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFamilyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFamilyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesGetFromFamilyCall) Context(ctx context.Context) *ImagesGetFromFamilyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ImagesGetFromFamilyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "family": c.family, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.images.getFromFamily" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Image.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Image{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the latest image that is part of an image family and is not deprecated.", + // "httpMethod": "GET", + // "id": "compute.images.getFromFamily", + // "parameterOrder": [ + // "project", + // "family" + // ], + // "parameters": { + // "family": { + // "description": "Name of the image family to search for.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images/family/{family}", + // "response": { + // "$ref": "Image" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.images.insert": + +type ImagesInsertCall struct { + s *Service + project string + image *Image + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an image in the specified project using the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/insert +func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { + c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.image = image + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ImagesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.images.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an image in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.images.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images", + // "request": { + // "$ref": "Image" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "compute.images.list": + +type ImagesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of private images available to the specified +// project. Private images are images you create that belong to your +// project. This method does not get any images that belong to other +// projects, including publicly-available images, like Debian 8. If you +// want to get a list of publicly-available images, use this method to +// make a request to the respective image project, such as debian-cloud +// or windows-cloud. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/list +func (r *ImagesService) List(project string) *ImagesListCall { + c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *ImagesListCall) Filter(filter string) *ImagesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *ImagesListCall) OrderBy(orderBy string) *ImagesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ImagesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.images.list" call. +// Exactly one of *ImageList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ImageList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ImageList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + // "httpMethod": "GET", + // "id": "compute.images.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images", + // "response": { + // "$ref": "ImageList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.instanceGroupManagers.abandonInstances": + +type InstanceGroupManagersAbandonInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AbandonInstances: Schedules a group action to remove the specified +// instances from the managed instance group. Abandoning an instance +// does not delete the instance, but it does remove the instance from +// any target pools that are applied by the managed instance group. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you abandon. This operation is marked as +// DONE when the action is scheduled even if the instances have not yet +// been removed from the group. You must separately verify the status of +// the abandoning action with the listmanagedinstances method. +func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { + c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersabandoninstancesrequest = instancegroupmanagersabandoninstancesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAbandonInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *InstanceGroupManagersAbandonInstancesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroupManagers.abandonInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.abandonInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "request": { + // "$ref": "InstanceGroupManagersAbandonInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.aggregatedList": + +type InstanceGroupManagersAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of managed instance groups and +// groups them by zone. +func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { + c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceGroupManagersAggregatedListCall) OrderBy(orderBy string) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) *InstanceGroupManagersAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroupManagers.aggregatedList" call. +// Exactly one of *InstanceGroupManagerAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InstanceGroupManagerAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupManagerAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of managed instance groups and groups them by zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroupManagers.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/instanceGroupManagers", + // "response": { + // "$ref": "InstanceGroupManagerAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.instanceGroupManagers.delete": + +type InstanceGroupManagersDeleteCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified managed instance group and all of the +// instances in that group. Note that the instance group must not belong +// to a backend service. Read Deleting an instance group for more +// information. +func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { + c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *InstanceGroupManagersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupManagersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroupManagers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "httpMethod": "DELETE", + // "id": "compute.instanceGroupManagers.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.deleteInstances": + +type InstanceGroupManagersDeleteInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeleteInstances: Schedules a group action to delete the specified +// instances in the managed instance group. The instances are also +// removed from any target pools of which they were a member. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you delete. This operation is marked as DONE +// when the action is scheduled even if the instances are still being +// deleted. You must separately verify the status of the deleting action +// with the listmanagedinstances method. +func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { + c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersdeleteinstancesrequest = instancegroupmanagersdeleteinstancesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *InstanceGroupManagersDeleteInstancesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroupManagers.deleteInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.deleteInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "request": { + // "$ref": "InstanceGroupManagersDeleteInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.get": + +type InstanceGroupManagersGetCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns all of the details about the specified managed instance +// group. Get a list of available managed instance groups by making a +// list() request. +func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { + c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGroupManagersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGroupManagersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGroupManagersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupManagersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroupManagers.get" call. +// Exactly one of *InstanceGroupManager or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupManager.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupManager{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns all of the details about the specified managed instance group. Get a list of available managed instance groups by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instanceGroupManagers.get", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "response": { + // "$ref": "InstanceGroupManager" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.insert": + +type InstanceGroupManagersInsertCall struct { + s *Service + project string + zone string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a managed instance group using the information that +// you specify in the request. After the group is created, it schedules +// an action to create instances in the group using the specified +// instance template. This operation is marked as DONE when the group is +// created even if the instances in the group have not yet been created. +// You must separately verify the status of the individual instances +// with the listmanagedinstances method. +func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { + c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instancegroupmanager = instancegroupmanager + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *InstanceGroupManagersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *InstanceGroupManagersInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupManagersInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroupManagers.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers", + // "request": { + // "$ref": "InstanceGroupManager" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.list": + +type InstanceGroupManagersListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of managed instance groups that are contained +// within the specified project and zone. +func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { + c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGroupManagersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceGroupManagersListCall) OrderBy(orderBy string) *InstanceGroupManagersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGroupManagersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGroupManagersListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupManagersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroupManagers.list" call. +// Exactly one of *InstanceGroupManagerList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupManagerList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupManagerList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroupManagers.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers", + // "response": { + // "$ref": "InstanceGroupManagerList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.instanceGroupManagers.listManagedInstances": + +type InstanceGroupManagersListManagedInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// ListManagedInstances: Lists all of the instances in the managed +// instance group. Each instance in the list has a currentAction, which +// indicates the action that the managed instance group is performing on +// the instance. For example, if the group is still creating an +// instance, the currentAction is CREATING. If a previous action failed, +// the list displays the errors for that failed action. +func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { + c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *InstanceGroupManagersListManagedInstancesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroupManagers.listManagedInstances" call. +// Exactly one of *InstanceGroupManagersListManagedInstancesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *InstanceGroupManagersListManagedInstancesResponse.ServerResponse.Head +// er or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListManagedInstancesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupManagersListManagedInstancesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.listManagedInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "response": { + // "$ref": "InstanceGroupManagersListManagedInstancesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.recreateInstances": + +type InstanceGroupManagersRecreateInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// RecreateInstances: Schedules a group action to recreate the specified +// instances in the managed instance group. The instances are deleted +// and recreated using the current instance template for the managed +// instance group. This operation is marked as DONE when the action is +// scheduled even if the instances have not yet been recreated. You must +// separately verify the status of the recreating action with the +// listmanagedinstances method. +func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { + c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroupManagers.recreateInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.recreateInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + // "request": { + // "$ref": "InstanceGroupManagersRecreateInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.resize": + +type InstanceGroupManagersResizeCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Resize: Resizes the managed instance group. If you increase the size, +// the group creates new instances using the current instance template. +// If you decrease the size, the group deletes instances. The resize +// operation is marked DONE when the resize actions are scheduled even +// if the group has not yet added or deleted any instances. You must +// separately verify the status of the creating or deleting actions with +// the listmanagedinstances method. +func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { + c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.urlParams_.Set("size", fmt.Sprint(size)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupManagersResizeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroupManagers.resize" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.resize", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager", + // "size" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "size": { + // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", + // "format": "int32", + // "location": "query", + // "required": true, + // "type": "integer" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.setInstanceTemplate": + +type InstanceGroupManagersSetInstanceTemplateCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetInstanceTemplate: Specifies the instance template to use when +// creating new instances in this group. The templates for existing +// instances in the group do not change unless you recreate them. +func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { + c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.setInstanceTemplate", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "request": { + // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.setTargetPools": + +type InstanceGroupManagersSetTargetPoolsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetTargetPools: Modifies the target pools to which all instances in +// this managed instance group are assigned. The target pools +// automatically apply to all of the instances in the managed instance +// group. This operation is marked DONE when you make the request even +// if the instances have not yet been added to their target pools. The +// change might take some time to apply to all of the instances in the +// group depending on the size of the group. +func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { + c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroupManagers.setTargetPools" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.setTargetPools", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + // "request": { + // "$ref": "InstanceGroupManagersSetTargetPoolsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroups.addInstances": + +type InstanceGroupsAddInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddInstances: Adds a list of instances to the specified instance +// group. All of the instances in the instance group must be in the same +// network/subnetwork. Read Adding instances for more information. +func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { + c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupsaddinstancesrequest = instancegroupsaddinstancesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsAddInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceGroupsAddInstancesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupsAddInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroups.addInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.addInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group where you are adding instances.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", + // "request": { + // "$ref": "InstanceGroupsAddInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroups.aggregatedList": + +type InstanceGroupsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of instance groups and sorts them +// by zone. +func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { + c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceGroupsAggregatedListCall) OrderBy(orderBy string) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *InstanceGroupsAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupsAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroups.aggregatedList" call. +// Exactly one of *InstanceGroupAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of instance groups and sorts them by zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroups.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/instanceGroups", + // "response": { + // "$ref": "InstanceGroupAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.instanceGroups.delete": + +type InstanceGroupsDeleteCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified instance group. The instances in the +// group are not deleted. Note that instance group must not belong to a +// backend service. Read Deleting an instance group for more +// information. +func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { + c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroups.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "httpMethod": "DELETE", + // "id": "compute.instanceGroups.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroups.get": + +type InstanceGroupsGetCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified instance group. Get a list of available +// instance groups by making a list() request. +func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { + c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroups.get" call. +// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *InstanceGroup.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroup{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified instance group. Get a list of available instance groups by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instanceGroups.get", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "response": { + // "$ref": "InstanceGroup" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceGroups.insert": + +type InstanceGroupsInsertCall struct { + s *Service + project string + zone string + instancegroup *InstanceGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an instance group in the specified project using the +// parameters that are included in the request. +func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { + c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instancegroup = instancegroup + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroups.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an instance group in the specified project using the parameters that are included in the request.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups", + // "request": { + // "$ref": "InstanceGroup" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroups.list": + +type InstanceGroupsListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of instance groups that are located in the +// specified project and zone. +func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { + c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceGroupsListCall) OrderBy(orderBy string) *InstanceGroupsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroups.list" call. +// Exactly one of *InstanceGroupList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroups.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups", + // "response": { + // "$ref": "InstanceGroupList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGroupList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.instanceGroups.listInstances": + +type InstanceGroupsListInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// ListInstances: Lists the instances in the specified instance group. +func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { + c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupslistinstancesrequest = instancegroupslistinstancesrequest + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceGroupsListInstancesCall) OrderBy(orderBy string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *InstanceGroupsListInstancesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupsListInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroups.listInstances" call. +// Exactly one of *InstanceGroupsListInstances or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupsListInstances.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupsListInstances, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupsListInstances{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the instances in the specified instance group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.listInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "instanceGroup": { + // "description": "The name of the instance group from which you want to generate a list of included instances.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", + // "request": { + // "$ref": "InstanceGroupsListInstancesRequest" + // }, + // "response": { + // "$ref": "InstanceGroupsListInstances" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceGroups.removeInstances": + +type InstanceGroupsRemoveInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// RemoveInstances: Removes one or more instances from the specified +// instance group, but does not delete those instances. +func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { + c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupsremoveinstancesrequest = instancegroupsremoveinstancesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsRemoveInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *InstanceGroupsRemoveInstancesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroups.removeInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Removes one or more instances from the specified instance group, but does not delete those instances.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.removeInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group where the specified instances will be removed.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", + // "request": { + // "$ref": "InstanceGroupsRemoveInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroups.setNamedPorts": + +type InstanceGroupsSetNamedPortsCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetNamedPorts: Sets the named ports for the specified instance group. +func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { + c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupssetnamedportsrequest = instancegroupssetnamedportsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *InstanceGroupsSetNamedPortsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *InstanceGroupsSetNamedPortsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroups.setNamedPorts" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the named ports for the specified instance group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.setNamedPorts", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group where the named ports are updated.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", + // "request": { + // "$ref": "InstanceGroupsSetNamedPortsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceTemplates.delete": + +type InstanceTemplatesDeleteCall struct { + s *Service + project string + instanceTemplate string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified instance template. If you delete an +// instance template that is being referenced from another instance +// group, the instance group will not be able to create or recreate +// virtual machine instances. Deleting an instance template is permanent +// and cannot be undone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/delete +func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { + c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instanceTemplate = instanceTemplate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemplatesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceTemplatesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instanceTemplate": c.instanceTemplate, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceTemplates.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified instance template. If you delete an instance template that is being referenced from another instance group, the instance group will not be able to create or recreate virtual machine instances. Deleting an instance template is permanent and cannot be undone.", + // "httpMethod": "DELETE", + // "id": "compute.instanceTemplates.delete", + // "parameterOrder": [ + // "project", + // "instanceTemplate" + // ], + // "parameters": { + // "instanceTemplate": { + // "description": "The name of the instance template to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceTemplates.get": + +type InstanceTemplatesGetCall struct { + s *Service + project string + instanceTemplate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified instance template. Get a list of available +// instance templates by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/get +func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { + c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instanceTemplate = instanceTemplate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplatesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceTemplatesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instanceTemplate": c.instanceTemplate, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceTemplates.get" call. +// Exactly one of *InstanceTemplate or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceTemplate.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified instance template. Get a list of available instance templates by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instanceTemplates.get", + // "parameterOrder": [ + // "project", + // "instanceTemplate" + // ], + // "parameters": { + // "instanceTemplate": { + // "description": "The name of the instance template.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "response": { + // "$ref": "InstanceTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceTemplates.insert": + +type InstanceTemplatesInsertCall struct { + s *Service + project string + instancetemplate *InstanceTemplate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an instance template in the specified project using +// the data that is included in the request. If you are creating a new +// template to update an existing instance group, your new instance +// template must use the same network or, if applicable, the same +// subnetwork as the original template. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/insert +func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { + c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instancetemplate = instancetemplate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemplatesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceTemplatesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceTemplates.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", + // "httpMethod": "POST", + // "id": "compute.instanceTemplates.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/instanceTemplates", + // "request": { + // "$ref": "InstanceTemplate" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceTemplates.list": + +type InstanceTemplatesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of instance templates that are contained +// within the specified project and zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/list +func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { + c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceTemplatesListCall) OrderBy(orderBy string) *InstanceTemplatesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTemplatesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTemplatesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceTemplatesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceTemplates.list" call. +// Exactly one of *InstanceTemplateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceTemplateList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceTemplateList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", + // "httpMethod": "GET", + // "id": "compute.instanceTemplates.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/instanceTemplates", + // "response": { + // "$ref": "InstanceTemplateList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.instances.addAccessConfig": + +type InstancesAddAccessConfigCall struct { + s *Service + project string + zone string + instance string + accessconfig *AccessConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddAccessConfig: Adds an access config to an instance's network +// interface. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/addAccessConfig +func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { + c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.accessconfig = accessconfig + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAddAccessConfigCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesAddAccessConfigCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.addAccessConfig" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds an access config to an instance's network interface.", + // "httpMethod": "POST", + // "id": "compute.instances.addAccessConfig", + // "parameterOrder": [ + // "project", + // "zone", + // "instance", + // "networkInterface" + // ], + // "parameters": { + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface to add to this instance.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", + // "request": { + // "$ref": "AccessConfig" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.aggregatedList": + +type InstancesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves aggregated list of instances. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/aggregatedList +func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { + c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstancesAggregatedListCall) OrderBy(orderBy string) *InstancesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.aggregatedList" call. +// Exactly one of *InstanceAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves aggregated list of instances.", + // "httpMethod": "GET", + // "id": "compute.instances.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/instances", + // "response": { + // "$ref": "InstanceAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.instances.attachDisk": + +type InstancesAttachDiskCall struct { + s *Service + project string + zone string + instance string + attacheddisk *AttachedDisk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AttachDisk: Attaches a Disk resource to an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/attachDisk +func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { + c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.attacheddisk = attacheddisk + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachDiskCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesAttachDiskCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.attachDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Attaches a Disk resource to an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.attachDisk", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", + // "request": { + // "$ref": "AttachedDisk" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.delete": + +type InstancesDeleteCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified Instance resource. For more +// information, see Stopping or Deleting an Instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete +func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { + c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", + // "httpMethod": "DELETE", + // "id": "compute.instances.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.deleteAccessConfig": + +type InstancesDeleteAccessConfigCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeleteAccessConfig: Deletes an access config from an instance's +// network interface. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig +func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { + c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("accessConfig", accessConfig) + c.urlParams_.Set("networkInterface", networkInterface) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesDeleteAccessConfigCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.deleteAccessConfig" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes an access config from an instance's network interface.", + // "httpMethod": "POST", + // "id": "compute.instances.deleteAccessConfig", + // "parameterOrder": [ + // "project", + // "zone", + // "instance", + // "accessConfig", + // "networkInterface" + // ], + // "parameters": { + // "accessConfig": { + // "description": "The name of the access config to delete.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.detachDisk": + +type InstancesDetachDiskCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DetachDisk: Detaches a disk from an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/detachDisk +func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { + c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("deviceName", deviceName) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachDiskCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesDetachDiskCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.detachDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Detaches a disk from an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.detachDisk", + // "parameterOrder": [ + // "project", + // "zone", + // "instance", + // "deviceName" + // ], + // "parameters": { + // "deviceName": { + // "description": "Disk device name to detach.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "Instance name.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.get": + +type InstancesGetCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified Instance resource. Get a list of available +// instances by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/get +func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { + c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.get" call. +// Exactly one of *Instance or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Instance.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Instance{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified Instance resource. Get a list of available instances by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instances.get", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}", + // "response": { + // "$ref": "Instance" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instances.getSerialPortOutput": + +type InstancesGetSerialPortOutputCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetSerialPortOutput: Returns the specified instance's serial port +// output. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/getSerialPortOutput +func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { + c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Port sets the optional parameter "port": Specifies which COM or +// serial port to retrieve data from. +func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("port", fmt.Sprint(port)) + return c +} + +// Start sets the optional parameter "start": For the initial request, +// leave this field unspecified. For subsequent calls, this field should +// be set to the next value that was returned in the previous call. +func (c *InstancesGetSerialPortOutputCall) Start(start int64) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("start", fmt.Sprint(start)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *InstancesGetSerialPortOutputCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *InstancesGetSerialPortOutputCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesGetSerialPortOutputCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.getSerialPortOutput" call. +// Exactly one of *SerialPortOutput or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SerialPortOutput.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*SerialPortOutput, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SerialPortOutput{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified instance's serial port output.", + // "httpMethod": "GET", + // "id": "compute.instances.getSerialPortOutput", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "port": { + // "default": "1", + // "description": "Specifies which COM or serial port to retrieve data from.", + // "format": "int32", + // "location": "query", + // "maximum": "4", + // "minimum": "1", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "start": { + // "description": "For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value that was returned in the previous call.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/serialPort", + // "response": { + // "$ref": "SerialPortOutput" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instances.insert": + +type InstancesInsertCall struct { + s *Service + project string + zone string + instance *Instance + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an instance resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/insert +func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { + c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an instance resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances", + // "request": { + // "$ref": "Instance" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.list": + +type InstancesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of instances contained within the specified +// zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/list +func (r *InstancesService) List(project string, zone string) *InstancesListCall { + c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InstancesListCall) Filter(filter string) *InstancesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstancesListCall) OrderBy(orderBy string) *InstancesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.list" call. +// Exactly one of *InstanceList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *InstanceList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of instances contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.instances.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances", + // "response": { + // "$ref": "InstanceList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.instances.reset": + +type InstancesResetCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Reset: Performs a hard reset on the instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/reset +func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { + c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesResetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.reset" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Performs a hard reset on the instance.", + // "httpMethod": "POST", + // "id": "compute.instances.reset", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/reset", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.setDiskAutoDelete": + +type InstancesSetDiskAutoDeleteCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to +// an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setDiskAutoDelete +func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { + c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("autoDelete", fmt.Sprint(autoDelete)) + c.urlParams_.Set("deviceName", deviceName) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *InstancesSetDiskAutoDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.setDiskAutoDelete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the auto-delete flag for a disk attached to an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.setDiskAutoDelete", + // "parameterOrder": [ + // "project", + // "zone", + // "instance", + // "autoDelete", + // "deviceName" + // ], + // "parameters": { + // "autoDelete": { + // "description": "Whether to auto-delete the disk when the instance is deleted.", + // "location": "query", + // "required": true, + // "type": "boolean" + // }, + // "deviceName": { + // "description": "The device name of the disk to modify.", + // "location": "query", + // "pattern": "\\w[\\w.-]{0,254}", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "The instance name.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.setMachineType": + +type InstancesSetMachineTypeCall struct { + s *Service + project string + zone string + instance string + instancessetmachinetyperequest *InstancesSetMachineTypeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetMachineType: Changes the machine type for a stopped instance to +// the machine type specified in the request. +func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { + c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.instancessetmachinetyperequest = instancessetmachinetyperequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSetMachineTypeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSetMachineTypeCall) Context(ctx context.Context) *InstancesSetMachineTypeCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesSetMachineTypeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachinetyperequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.setMachineType" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.setMachineType", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", + // "request": { + // "$ref": "InstancesSetMachineTypeRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.setMetadata": + +type InstancesSetMetadataCall struct { + s *Service + project string + zone string + instance string + metadata *Metadata + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetMetadata: Sets metadata for the specified instance to the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setMetadata +func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { + c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.metadata = metadata return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { +func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { +func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMetadataCall { c.ctx_ = ctx return c } -func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesSetMetadataCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.list" call. -// Exactly one of *ImageList or error will be non-nil. Any non-2xx +// Do executes the "compute.instances.setMetadata" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *ImageList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { +func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -20986,7 +33723,7 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ImageList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -20998,30 +33735,20 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { } return ret, nil // { - // "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", - // "httpMethod": "GET", - // "id": "compute.images.list", + // "description": "Sets metadata for the specified instance to the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.setMetadata", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", - // "format": "uint32", - // "location": "query", - // "maximum": "500", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -21030,76 +33757,58 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images", + // "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", + // "request": { + // "$ref": "Metadata" + // }, // "response": { - // "$ref": "ImageList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.abandonInstances": +// method id "compute.instances.setScheduling": -type InstanceGroupManagersAbandonInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context +type InstancesSetSchedulingCall struct { + s *Service + project string + zone string + instance string + scheduling *Scheduling + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AbandonInstances: Schedules a group action to remove the specified -// instances from the managed instance group. Abandoning an instance -// does not delete the instance, but it does remove the instance from -// any target pools that are applied by the managed instance group. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you abandon. This operation is marked as -// DONE when the action is scheduled even if the instances have not yet -// been removed from the group. You must separately verify the status of -// the abandoning action with the listmanagedinstances method. -func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { - c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetScheduling: Sets an instance's scheduling options. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling +func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { + c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersabandoninstancesrequest = instancegroupmanagersabandoninstancesrequest + c.instance = instance + c.scheduling = scheduling return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAbandonInstancesCall { +func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -21107,41 +33816,53 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *InstanceGroupManagersAbandonInstancesCall { +func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { c.ctx_ = ctx return c } -func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesSetSchedulingCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.abandonInstances" call. +// Do executes the "compute.instances.setScheduling" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -21172,18 +33893,19 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.", + // "description": "Sets an instance's scheduling options.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.abandonInstances", + // "id": "compute.instances.setScheduling", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "Instance name.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -21195,15 +33917,16 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", // "request": { - // "$ref": "InstanceGroupManagersAbandonInstancesRequest" + // "$ref": "Scheduling" // }, // "response": { // "$ref": "Operation" @@ -21216,127 +33939,89 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt } -// method id "compute.instanceGroupManagers.aggregatedList": +// method id "compute.instances.setTags": -type InstanceGroupManagersAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type InstancesSetTagsCall struct { + s *Service + project string + zone string + instance string + tags *Tags + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves the list of managed instance groups and -// groups them by zone. -func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { - c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTags: Sets tags for the specified instance to the data included in +// the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setTags +func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { + c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. -func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) + c.zone = zone + c.instance = instance + c.tags = tags return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAggregatedListCall { +func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) *InstanceGroupManagersAggregatedListCall { +func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { c.ctx_ = ctx return c } -func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesSetTagsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.aggregatedList" call. -// Exactly one of *InstanceGroupManagerAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *InstanceGroupManagerAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerAggregatedList, error) { +// Do executes the "compute.instances.setTags" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -21355,7 +34040,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagerAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -21367,30 +34052,20 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Retrieves the list of managed instance groups and groups them by zone.", - // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.aggregatedList", + // "description": "Sets tags for the specified instance to the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.setTags", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", - // "format": "uint32", - // "location": "query", - // "maximum": "500", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -21399,69 +34074,58 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/instanceGroupManagers", + // "path": "{project}/zones/{zone}/instances/{instance}/setTags", + // "request": { + // "$ref": "Tags" + // }, // "response": { - // "$ref": "InstanceGroupManagerAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.delete": +// method id "compute.instances.start": -type InstanceGroupManagersDeleteCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context +type InstancesStartCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified managed instance group and all of the -// instances in that group. Note that the instance group must not belong -// to a backend service. Read Deleting an instance group for more -// information. -func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { - c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Start: Starts an instance that was stopped using the using the +// instances().stop method. For more information, see Restart an +// instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/start +func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { + c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager + c.instance = instance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteCall { +func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -21469,36 +34133,48 @@ func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *InstanceGroupManagersDeleteCall { +func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { c.ctx_ = ctx return c } -func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesStartCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.delete" call. +// Do executes the "compute.instances.start" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -21529,18 +34205,19 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", - // "httpMethod": "DELETE", - // "id": "compute.instanceGroupManagers.delete", + // "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.start", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group to delete.", + // "instance": { + // "description": "Name of the instance resource to start.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -21552,13 +34229,14 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/zones/{zone}/instances/{instance}/start", // "response": { // "$ref": "Operation" // }, @@ -21570,39 +34248,35 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroupManagers.deleteInstances": +// method id "compute.instances.startWithEncryptionKey": -type InstanceGroupManagersDeleteInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context +type InstancesStartWithEncryptionKeyCall struct { + s *Service + project string + zone string + instance string + instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteInstances: Schedules a group action to delete the specified -// instances in the managed instance group. The instances are also -// removed from any target pools of which they were a member. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you delete. This operation is marked as DONE -// when the action is scheduled even if the instances are still being -// deleted. You must separately verify the status of the deleting action -// with the listmanagedinstances method. -func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { - c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// StartWithEncryptionKey: Starts an instance that was stopped using the +// using the instances().stop method. For more information, see Restart +// an instance. +func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { + c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersdeleteinstancesrequest = instancegroupmanagersdeleteinstancesrequest + c.instance = instance + c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteInstancesCall { +func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -21610,41 +34284,53 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *InstanceGroupManagersDeleteInstancesCall { +func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { c.ctx_ = ctx return c } -func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.deleteInstances" call. +// Do executes the "compute.instances.startWithEncryptionKey" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -21675,18 +34361,19 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.", + // "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.deleteInstances", + // "id": "compute.instances.startWithEncryptionKey", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "Name of the instance resource to start.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -21698,15 +34385,16 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", // "request": { - // "$ref": "InstanceGroupManagersDeleteInstancesRequest" + // "$ref": "InstancesStartWithEncryptionKeyRequest" // }, // "response": { // "$ref": "Operation" @@ -21719,83 +34407,87 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti } -// method id "compute.instanceGroupManagers.get": +// method id "compute.instances.stop": -type InstanceGroupManagersGetCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type InstancesStopCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns all of the details about the specified managed instance -// group. Get a list of available managed instance groups by making a -// list() request. -func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { - c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Stop: Stops a running instance, shutting it down cleanly, and allows +// you to restart the instance at a later time. Stopped instances do not +// incur per-minute, virtual machine usage charges while they are +// stopped, but any resources that the virtual machine is using, such as +// persistent disks and static IP addresses, will continue to be charged +// until they are deleted. For more information, see Stopping an +// instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop +func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { + c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager + c.instance = instance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGroupManagersGetCall { +func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGroupManagersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGroupManagersGetCall { +func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { c.ctx_ = ctx return c } -func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesStopCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.get" call. -// Exactly one of *InstanceGroupManager or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupManager.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { +// Do executes the "compute.instances.stop" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -21814,7 +34506,7 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManager{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -21826,18 +34518,19 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan } return ret, nil // { - // "description": "Returns all of the details about the specified managed instance group. Get a list of available managed instance groups by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.get", + // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.stop", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "Name of the instance resource to stop.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -21849,96 +34542,112 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/zones/{zone}/instances/{instance}/stop", // "response": { - // "$ref": "InstanceGroupManager" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceGroupManagers.insert": +// method id "compute.licenses.get": -type InstanceGroupManagersInsertCall struct { - s *Service - project string - zone string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context +type LicensesGetCall struct { + s *Service + project string + license string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a managed instance group using the information that -// you specify in the request. After the group is created, it schedules -// an action to create instances in the group using the specified -// instance template. This operation is marked as DONE when the group is -// created even if the instances in the group have not yet been created. -// You must separately verify the status of the individual instances -// with the listmanagedinstances method. -func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { - c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified License resource. Get a list of available +// licenses by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/licenses/get +func (r *LicensesService) Get(project string, license string) *LicensesGetCall { + c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instancegroupmanager = instancegroupmanager + c.license = license return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *InstanceGroupManagersInsertCall { +func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *InstanceGroupManagersInsertCall { +func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { c.ctx_ = ctx return c } -func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LicensesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.licenses.get" call. +// Exactly one of *License or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *License.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -21957,7 +34666,7 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &License{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -21969,60 +34678,58 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.insert", + // "description": "Returns the specified License resource. Get a list of available licenses by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.licenses.get", // "parameterOrder": [ // "project", - // "zone" + // "license" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "license": { + // "description": "Name of the License resource to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers", - // "request": { - // "$ref": "InstanceGroupManager" - // }, + // "path": "{project}/global/licenses/{license}", // "response": { - // "$ref": "Operation" + // "$ref": "License" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.list": +// method id "compute.machineTypes.aggregatedList": -type InstanceGroupManagersListCall struct { +type MachineTypesAggregatedListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of managed instance groups that are contained -// within the specified project and zone. -func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { - c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of machine types. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/aggregatedList +func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { + c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -22053,7 +34760,7 @@ func (r *InstanceGroupManagersService) List(project string, zone string) *Instan // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { +func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -22063,15 +34770,32 @@ func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupMana // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in // subsequent list requests. -func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGroupManagersListCall { +func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGroupManagersListCall { +func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -22079,7 +34803,7 @@ func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGro // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListCall { +func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -22089,7 +34813,7 @@ func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGr // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListCall { +func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -22097,38 +34821,49 @@ func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGroupManagersListCall { +func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { c.ctx_ = ctx return c } -func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MachineTypesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.list" call. -// Exactly one of *InstanceGroupManagerList or error will be non-nil. +// Do executes the "compute.machineTypes.aggregatedList" call. +// Exactly one of *MachineTypeAggregatedList or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupManagerList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// *MachineTypeAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerList, error) { +func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -22147,7 +34882,7 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagerList{ + ret := &MachineTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -22159,12 +34894,11 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta } return ret, nil // { - // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", + // "description": "Retrieves an aggregated list of machine types.", // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.list", + // "id": "compute.machineTypes.aggregatedList", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { @@ -22181,6 +34915,11 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -22192,17 +34931,11 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers", + // "path": "{project}/aggregated/machineTypes", // "response": { - // "$ref": "InstanceGroupManagerList" + // "$ref": "MachineTypeAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -22216,238 +34949,114 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.listManagedInstances": - -type InstanceGroupManagersListManagedInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// ListManagedInstances: Lists all of the instances in the managed -// instance group. Each instance in the list has a currentAction, which -// indicates the action that the managed instance group is performing on -// the instance. For example, if the group is still creating an -// instance, the currentAction is CREATING. If a previous action failed, -// the list displays the errors for that failed action. -func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { - c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *InstanceGroupManagersListManagedInstancesCall { - c.ctx_ = ctx - return c -} - -func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.listManagedInstances" call. -// Exactly one of *InstanceGroupManagersListManagedInstancesResponse or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *InstanceGroupManagersListManagedInstancesResponse.ServerResponse.Head -// er or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListManagedInstancesResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceGroupManagersListManagedInstancesResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.listManagedInstances", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", - // "response": { - // "$ref": "InstanceGroupManagersListManagedInstancesResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - +func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// method id "compute.instanceGroupManagers.recreateInstances": +// method id "compute.machineTypes.get": -type InstanceGroupManagersRecreateInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context +type MachineTypesGetCall struct { + s *Service + project string + zone string + machineType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// RecreateInstances: Schedules a group action to recreate the specified -// instances in the managed instance group. The instances are deleted -// and recreated using the current instance template for the managed -// instance group. This operation is marked as DONE when the action is -// scheduled even if the instances have not yet been recreated. You must -// separately verify the status of the recreating action with the -// listmanagedinstances method. -func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { - c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified machine type. Get a list of available +// machine types by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/get +func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { + c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest + c.machineType = machineType return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { +func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { +func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { c.ctx_ = ctx return c } -func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MachineTypesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "machineType": c.machineType, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.recreateInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.machineTypes.get" call. +// Exactly one of *MachineType or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *MachineType.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -22466,7 +35075,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &MachineType{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -22478,18 +35087,19 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.recreateInstances", + // "description": "Returns the specified machine type. Get a list of available machine types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.machineTypes.get", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "machineType" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "machineType": { + // "description": "Name of the machine type to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -22501,95 +35111,180 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", - // "request": { - // "$ref": "InstanceGroupManagersRecreateInstancesRequest" - // }, + // "path": "{project}/zones/{zone}/machineTypes/{machineType}", // "response": { - // "$ref": "Operation" + // "$ref": "MachineType" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.resize": +// method id "compute.machineTypes.list": -type InstanceGroupManagersResizeCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context +type MachineTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the managed instance group. If you increase the size, -// the group creates new instances using the current instance template. -// If you decrease the size, the group deletes instances. The resize -// operation is marked DONE when the resize actions are scheduled even -// if the group has not yet added or deleted any instances. You must -// separately verify the status of the creating or deleting actions with -// the listmanagedinstances method. -func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { - c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of machine types available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/list +func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { + c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.urlParams_.Set("size", fmt.Sprint(size)) + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { +func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { +func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { c.ctx_ = ctx return c } -func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MachineTypesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.resize" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.machineTypes.list" call. +// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *MachineTypeList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -22608,7 +35303,7 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &MachineTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -22620,20 +35315,36 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.resize", + // "description": "Retrieves a list of machine types available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.machineTypes.list", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager", - // "size" + // "zone" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -22643,60 +35354,72 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, - // "size": { - // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", - // "format": "int32", - // "location": "query", - // "required": true, - // "type": "integer" - // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + // "path": "{project}/zones/{zone}/machineTypes", // "response": { - // "$ref": "Operation" + // "$ref": "MachineTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.setInstanceTemplate": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstanceGroupManagersSetInstanceTemplateCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context +// method id "compute.networks.delete": + +type NetworksDeleteCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetInstanceTemplate: Specifies the instance template to use when -// creating new instances in this group. The templates for existing -// instances in the group do not change unless you recreate them. -func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { - c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified network. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/delete +func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall { + c := &NetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest + c.network = network return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -22704,41 +35427,47 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Fie // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *NetworksDeleteCall) Context(ctx context.Context) *NetworksDeleteCall { c.ctx_ = ctx return c } -func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworksDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. +// Do executes the "compute.networks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -22769,18 +35498,18 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setInstanceTemplate", + // "description": "Deletes the specified network.", + // "httpMethod": "DELETE", + // "id": "compute.networks.delete", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "network" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "network": { + // "description": "Name of the network to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -22790,18 +35519,9 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", - // "request": { - // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" - // }, + // "path": "{project}/global/networks/{network}", // "response": { // "$ref": "Operation" // }, @@ -22813,80 +35533,93 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call } -// method id "compute.instanceGroupManagers.setTargetPools": +// method id "compute.networks.get": -type InstanceGroupManagersSetTargetPoolsCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context +type NetworksGetCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetTargetPools: Modifies the target pools to which all instances in -// this managed instance group are assigned. The target pools -// automatically apply to all of the instances in the managed instance -// group. This operation is marked DONE when you make the request even -// if the instances have not yet been added to their target pools. The -// change might take some time to apply to all of the instances in the -// group depending on the size of the group. -func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { - c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified network. Get a list of available networks +// by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/get +func (r *NetworksService) Get(project string, network string) *NetworksGetCall { + c := &NetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest + c.network = network return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { +func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { +func (c *NetworksGetCall) Context(ctx context.Context) *NetworksGetCall { c.ctx_ = ctx return c } -func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworksGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setTargetPools" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networks.get" call. +// Exactly one of *Network or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Network.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -22905,7 +35638,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Network{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -22917,18 +35650,18 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setTargetPools", + // "description": "Returns the specified network. Get a list of available networks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.networks.get", // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" + // "project", + // "network" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "network": { + // "description": "Name of the network to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -22938,57 +35671,46 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", - // "request": { - // "$ref": "InstanceGroupManagersSetTargetPoolsRequest" - // }, + // "path": "{project}/global/networks/{network}", // "response": { - // "$ref": "Operation" + // "$ref": "Network" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroups.addInstances": +// method id "compute.networks.insert": -type InstanceGroupsAddInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context +type NetworksInsertCall struct { + s *Service + project string + network *Network + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddInstances: Adds a list of instances to the specified instance -// group. All of the instances in the instance group must be in the same -// network/subnetwork. Read Adding instances for more information. -func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { - c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a network in the specified project using the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/insert +func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { + c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupsaddinstancesrequest = instancegroupsaddinstancesrequest + c.network = network return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsAddInstancesCall { +func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -22996,41 +35718,51 @@ func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceGroupsAddInstancesCall { +func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { c.ctx_ = ctx return c } -func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworksInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.addInstances" call. +// Do executes the "compute.networks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23061,38 +35793,24 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", + // "description": "Creates a network in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.addInstances", + // "id": "compute.networks.insert", // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroup" + // "project" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where you are adding instances.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", + // "path": "{project}/global/networks", // "request": { - // "$ref": "InstanceGroupsAddInstancesRequest" + // "$ref": "Network" // }, // "response": { // "$ref": "Operation" @@ -23105,20 +35823,22 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instanceGroups.aggregatedList": +// method id "compute.networks.list": -type InstanceGroupsAggregatedListCall struct { +type NetworksListCall struct { s *Service project string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves the list of instance groups and sorts them -// by zone. -func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { - c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of networks available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/list +func (r *NetworksService) List(project string) *NetworksListCall { + c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -23150,7 +35870,7 @@ func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAg // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { +func (c *NetworksListCall) Filter(filter string) *NetworksListCall { c.urlParams_.Set("filter", filter) return c } @@ -23160,15 +35880,32 @@ func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroups // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in // subsequent list requests. -func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupsAggregatedListCall { +func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NetworksListCall) OrderBy(orderBy string) *NetworksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *InstanceGroupsAggregatedListCall { +func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -23176,7 +35913,7 @@ func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *Instance // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupsAggregatedListCall { +func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -23186,7 +35923,7 @@ func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *Instanc // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupsAggregatedListCall { +func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { c.ifNoneMatch_ = entityTag return c } @@ -23194,20 +35931,32 @@ func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *Instan // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *InstanceGroupsAggregatedListCall { +func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { c.ctx_ = ctx return c } -func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworksListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -23217,14 +35966,14 @@ func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.aggregatedList" call. -// Exactly one of *InstanceGroupAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupAggregatedList, error) { +// Do executes the "compute.networks.list" call. +// Exactly one of *NetworkList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *NetworkList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23243,7 +35992,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupAggregatedList{ + ret := &NetworkList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -23255,9 +36004,9 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In } return ret, nil // { - // "description": "Retrieves the list of instance groups and sorts them by zone.", + // "description": "Retrieves the list of networks available to the specified project.", // "httpMethod": "GET", - // "id": "compute.instanceGroups.aggregatedList", + // "id": "compute.networks.list", // "parameterOrder": [ // "project" // ], @@ -23276,6 +36025,11 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -23289,9 +36043,9 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // "type": "string" // } // }, - // "path": "{project}/aggregated/instanceGroups", + // "path": "{project}/global/networks", // "response": { - // "$ref": "InstanceGroupAggregatedList" + // "$ref": "NetworkList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -23305,7 +36059,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupAggregatedList) error) error { +func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -23323,33 +36077,30 @@ func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*In } } -// method id "compute.instanceGroups.delete": +// method id "compute.networks.switchToCustomMode": -type InstanceGroupsDeleteCall struct { - s *Service - project string - zone string - instanceGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context +type NetworksSwitchToCustomModeCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified instance group. The instances in the -// group are not deleted. Note that instance group must not belong to a -// backend service. Read Deleting an instance group for more -// information. -func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { - c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SwitchToCustomMode: Switches the network mode from auto subnet mode +// to custom subnet mode. +func (r *NetworksService) SwitchToCustomMode(project string, network string) *NetworksSwitchToCustomModeCall { + c := &NetworksSwitchToCustomModeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroup = instanceGroup + c.network = network return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsDeleteCall { +func (c *NetworksSwitchToCustomModeCall) Fields(s ...googleapi.Field) *NetworksSwitchToCustomModeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -23357,36 +36108,47 @@ func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsDeleteCall { +func (c *NetworksSwitchToCustomModeCall) Context(ctx context.Context) *NetworksSwitchToCustomModeCall { c.ctx_ = ctx return c } -func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworksSwitchToCustomModeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/switchToCustomMode") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.delete" call. +// Do executes the "compute.networks.switchToCustomMode" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23417,18 +36179,18 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", - // "httpMethod": "DELETE", - // "id": "compute.instanceGroups.delete", + // "description": "Switches the network mode from auto subnet mode to custom subnet mode.", + // "httpMethod": "POST", + // "id": "compute.networks.switchToCustomMode", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroup" + // "network" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group to delete.", + // "network": { + // "description": "Name of the network to be updated.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -23438,15 +36200,9 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "path": "{project}/global/networks/{network}/switchToCustomMode", // "response": { // "$ref": "Operation" // }, @@ -23458,32 +36214,29 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.instanceGroups.get": +// method id "compute.projects.get": -type InstanceGroupsGetCall struct { - s *Service - project string - zone string - instanceGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type ProjectsGetCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance group. Get a list of available -// instance groups by making a list() request. -func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { - c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Project resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/get +func (r *ProjectsService) Get(project string) *ProjectsGetCall { + c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroup = instanceGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetCall { +func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -23493,7 +36246,7 @@ func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGetCall { +func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -23501,177 +36254,49 @@ func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetCall { +func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { c.ctx_ = ctx return c } -func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroups.get" call. -// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *InstanceGroup.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceGroup{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified instance group. Get a list of available instance groups by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instanceGroups.get", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroup" - // ], - // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", - // "response": { - // "$ref": "InstanceGroup" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.instanceGroups.insert": - -type InstanceGroupsInsertCall struct { - s *Service - project string - zone string - instancegroup *InstanceGroup - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Insert: Creates an instance group in the specified project using the -// parameters that are included in the request. -func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { - c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instancegroup = instancegroup - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsInsertCall { - c.ctx_ = ctx - return c -} - -func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.projects.get" call. +// Exactly one of *Project or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Project.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23690,7 +36315,7 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Project{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -23702,166 +36327,109 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates an instance group in the specified project using the parameters that are included in the request.", - // "httpMethod": "POST", - // "id": "compute.instanceGroups.insert", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where you want to create the instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroups", - // "request": { - // "$ref": "InstanceGroup" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroups.list": - -type InstanceGroupsListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context -} - -// List: Retrieves the list of instance groups that are located in the -// specified project and zone. -func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { - c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { - c.urlParams_.Set("filter", filter) - return c + // "description": "Returns the specified Project resource.", + // "httpMethod": "GET", + // "id": "compute.projects.get", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}", + // "response": { + // "$ref": "Project" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. -func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c +// method id "compute.projects.moveDisk": + +type ProjectsMoveDiskCall struct { + s *Service + project string + diskmoverequest *DiskMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsListCall { - c.urlParams_.Set("pageToken", pageToken) +// MoveDisk: Moves a persistent disk from one zone to another. +func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequest) *ProjectsMoveDiskCall { + c := &ProjectsMoveDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.diskmoverequest = diskmoverequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsListCall { +func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsListCall { +func (c *ProjectsMoveDiskCall) Context(ctx context.Context) *ProjectsMoveDiskCall { c.ctx_ = ctx return c } -func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsMoveDiskCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.diskmoverequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveDisk") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.list" call. -// Exactly one of *InstanceGroupList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupList, error) { +// Do executes the "compute.projects.moveDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23880,7 +36448,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -23892,157 +36460,60 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou } return ret, nil // { - // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", - // "httpMethod": "GET", - // "id": "compute.instanceGroups.list", + // "description": "Moves a persistent disk from one zone to another.", + // "httpMethod": "POST", + // "id": "compute.projects.moveDisk", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", - // "format": "uint32", - // "location": "query", - // "maximum": "500", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups", + // "path": "{project}/moveDisk", + // "request": { + // "$ref": "DiskMoveRequest" + // }, // "response": { - // "$ref": "InstanceGroupList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGroupList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroups.listInstances": +// method id "compute.projects.moveInstance": -type InstanceGroupsListInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context +type ProjectsMoveInstanceCall struct { + s *Service + project string + instancemoverequest *InstanceMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListInstances: Lists the instances in the specified instance group. -func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { - c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// MoveInstance: Moves an instance and its attached persistent disks +// from one zone to another. +func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall { + c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupslistinstancesrequest = instancegroupslistinstancesrequest - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. -func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("pageToken", pageToken) + c.instancemoverequest = instancemoverequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsListInstancesCall { +func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -24050,41 +36521,51 @@ func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *InstanceGroupsListInstancesCall { +func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall { c.ctx_ = ctx return c } -func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsMoveInstanceCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveInstance") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.listInstances" call. -// Exactly one of *InstanceGroupsListInstances or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupsListInstances.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupsListInstances, error) { +// Do executes the "compute.projects.moveInstance" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -24103,7 +36584,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupsListInstances{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -24115,97 +36596,61 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins } return ret, nil // { - // "description": "Lists the instances in the specified instance group.", + // "description": "Moves an instance and its attached persistent disks from one zone to another.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.listInstances", + // "id": "compute.projects.moveInstance", // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroup" + // "project" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "instanceGroup": { - // "description": "The name of the instance group from which you want to generate a list of included instances.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", - // "format": "uint32", - // "location": "query", - // "maximum": "500", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", + // "path": "{project}/moveInstance", // "request": { - // "$ref": "InstanceGroupsListInstancesRequest" + // "$ref": "InstanceMoveRequest" // }, // "response": { - // "$ref": "InstanceGroupsListInstances" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceGroups.removeInstances": +// method id "compute.projects.setCommonInstanceMetadata": -type InstanceGroupsRemoveInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context +type ProjectsSetCommonInstanceMetadataCall struct { + s *Service + project string + metadata *Metadata + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveInstances: Removes one or more instances from the specified -// instance group, but does not delete those instances. -func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { - c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetCommonInstanceMetadata: Sets metadata common to all instances +// within the specified project using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setCommonInstanceMetadata +func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall { + c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupsremoveinstancesrequest = instancegroupsremoveinstancesrequest + c.metadata = metadata return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsRemoveInstancesCall { +func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -24213,41 +36658,51 @@ func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *Instan // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *InstanceGroupsRemoveInstancesCall { +func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall { c.ctx_ = ctx return c } -func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setCommonInstanceMetadata") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.removeInstances" call. +// Do executes the "compute.projects.setCommonInstanceMetadata" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -24278,38 +36733,24 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Removes one or more instances from the specified instance group, but does not delete those instances.", + // "description": "Sets metadata common to all instances within the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.removeInstances", + // "id": "compute.projects.setCommonInstanceMetadata", // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroup" + // "project" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where the specified instances will be removed.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", + // "path": "{project}/setCommonInstanceMetadata", // "request": { - // "$ref": "InstanceGroupsRemoveInstancesRequest" + // "$ref": "Metadata" // }, // "response": { // "$ref": "Operation" @@ -24322,32 +36763,33 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.instanceGroups.setNamedPorts": +// method id "compute.projects.setUsageExportBucket": -type InstanceGroupsSetNamedPortsCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context +type ProjectsSetUsageExportBucketCall struct { + s *Service + project string + usageexportlocation *UsageExportLocation + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetNamedPorts: Sets the named ports for the specified instance group. -func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { - c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetUsageExportBucket: Enables the usage export feature and sets the +// usage export bucket where reports are stored. If you provide an empty +// request body using this method, the usage export feature will be +// disabled. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setUsageExportBucket +func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall { + c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupssetnamedportsrequest = instancegroupssetnamedportsrequest + c.usageexportlocation = usageexportlocation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *InstanceGroupsSetNamedPortsCall { +func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -24355,41 +36797,51 @@ func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *InstanceGroupsSetNamedPortsCall { +func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall { c.ctx_ = ctx return c } -func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsSetUsageExportBucketCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setUsageExportBucket") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.setNamedPorts" call. +// Do executes the "compute.projects.setUsageExportBucket" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -24420,77 +36872,64 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Sets the named ports for the specified instance group.", + // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.setNamedPorts", + // "id": "compute.projects.setUsageExportBucket", // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroup" + // "project" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where the named ports are updated.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", + // "path": "{project}/setUsageExportBucket", // "request": { - // "$ref": "InstanceGroupsSetNamedPortsRequest" + // "$ref": "UsageExportLocation" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "compute.instanceTemplates.delete": +// method id "compute.regionAutoscalers.delete": -type InstanceTemplatesDeleteCall struct { - s *Service - project string - instanceTemplate string - urlParams_ gensupport.URLParams - ctx_ context.Context +type RegionAutoscalersDeleteCall struct { + s *Service + project string + region string + autoscaler string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified instance template. If you delete an -// instance template that is being referenced from another instance -// group, the instance group will not be able to create or recreate -// virtual machine instances. Deleting an instance template is permanent -// and cannot be undone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/delete -func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { - c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified autoscaler. +func (r *RegionAutoscalersService) Delete(project string, region string, autoscaler string) *RegionAutoscalersDeleteCall { + c := &RegionAutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instanceTemplate = instanceTemplate + c.region = region + c.autoscaler = autoscaler return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall { +func (c *RegionAutoscalersDeleteCall) Fields(s ...googleapi.Field) *RegionAutoscalersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -24498,35 +36937,48 @@ func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemplatesDeleteCall { +func (c *RegionAutoscalersDeleteCall) Context(ctx context.Context) *RegionAutoscalersDeleteCall { c.ctx_ = ctx return c } -func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionAutoscalersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "instanceTemplate": c.instanceTemplate, + "project": c.project, + "region": c.region, + "autoscaler": c.autoscaler, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.delete" call. +// Do executes the "compute.regionAutoscalers.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -24557,16 +37009,17 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified instance template. If you delete an instance template that is being referenced from another instance group, the instance group will not be able to create or recreate virtual machine instances. Deleting an instance template is permanent and cannot be undone.", + // "description": "Deletes the specified autoscaler.", // "httpMethod": "DELETE", - // "id": "compute.instanceTemplates.delete", + // "id": "compute.regionAutoscalers.delete", // "parameterOrder": [ // "project", - // "instanceTemplate" + // "region", + // "autoscaler" // ], // "parameters": { - // "instanceTemplate": { - // "description": "The name of the instance template to delete.", + // "autoscaler": { + // "description": "Name of the autoscaler to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -24578,9 +37031,16 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", // "response": { // "$ref": "Operation" // }, @@ -24592,31 +37052,32 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.instanceTemplates.get": +// method id "compute.regionAutoscalers.get": -type InstanceTemplatesGetCall struct { - s *Service - project string - instanceTemplate string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type RegionAutoscalersGetCall struct { + s *Service + project string + region string + autoscaler string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance template. Get a list of available -// instance templates by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/get -func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { - c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified autoscaler. +func (r *RegionAutoscalersService) Get(project string, region string, autoscaler string) *RegionAutoscalersGetCall { + c := &RegionAutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instanceTemplate = instanceTemplate + c.region = region + c.autoscaler = autoscaler return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall { +func (c *RegionAutoscalersGetCall) Fields(s ...googleapi.Field) *RegionAutoscalersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -24626,7 +37087,7 @@ func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplat // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetCall { +func (c *RegionAutoscalersGetCall) IfNoneMatch(entityTag string) *RegionAutoscalersGetCall { c.ifNoneMatch_ = entityTag return c } @@ -24634,38 +37095,51 @@ func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTempla // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplatesGetCall { +func (c *RegionAutoscalersGetCall) Context(ctx context.Context) *RegionAutoscalersGetCall { c.ctx_ = ctx return c } -func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionAutoscalersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "instanceTemplate": c.instanceTemplate, + "project": c.project, + "region": c.region, + "autoscaler": c.autoscaler, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.get" call. -// Exactly one of *InstanceTemplate or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceTemplate.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { +// Do executes the "compute.regionAutoscalers.get" call. +// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Autoscaler.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -24684,7 +37158,7 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceTemplate{ + ret := &Autoscaler{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -24696,16 +37170,17 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe } return ret, nil // { - // "description": "Returns the specified instance template. Get a list of available instance templates by making a list() request.", + // "description": "Returns the specified autoscaler.", // "httpMethod": "GET", - // "id": "compute.instanceTemplates.get", + // "id": "compute.regionAutoscalers.get", // "parameterOrder": [ // "project", - // "instanceTemplate" + // "region", + // "autoscaler" // ], // "parameters": { - // "instanceTemplate": { - // "description": "The name of the instance template.", + // "autoscaler": { + // "description": "Name of the autoscaler to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -24717,11 +37192,18 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", // "response": { - // "$ref": "InstanceTemplate" + // "$ref": "Autoscaler" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -24732,33 +37214,32 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe } -// method id "compute.instanceTemplates.insert": +// method id "compute.regionAutoscalers.insert": -type InstanceTemplatesInsertCall struct { - s *Service - project string - instancetemplate *InstanceTemplate - urlParams_ gensupport.URLParams - ctx_ context.Context +type RegionAutoscalersInsertCall struct { + s *Service + project string + region string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance template in the specified project using -// the data that is included in the request. If you are creating a new -// template to update an existing instance group, your new instance -// template must use the same network or, if applicable, the same -// subnetwork as the original template. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/insert -func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { - c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an autoscaler in the specified project using the data +// included in the request. +func (r *RegionAutoscalersService) Insert(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersInsertCall { + c := &RegionAutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instancetemplate = instancetemplate + c.region = region + c.autoscaler = autoscaler return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall { +func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutoscalersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -24766,39 +37247,52 @@ func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemplatesInsertCall { +func (c *RegionAutoscalersInsertCall) Context(ctx context.Context) *RegionAutoscalersInsertCall { c.ctx_ = ctx return c } -func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionAutoscalersInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.insert" call. +// Do executes the "compute.regionAutoscalers.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -24829,11 +37323,12 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", + // "description": "Creates an autoscaler in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instanceTemplates.insert", + // "id": "compute.regionAutoscalers.insert", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "project": { @@ -24842,11 +37337,18 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates", + // "path": "{project}/regions/{region}/autoscalers", // "request": { - // "$ref": "InstanceTemplate" + // "$ref": "Autoscaler" // }, // "response": { // "$ref": "Operation" @@ -24859,22 +37361,24 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.instanceTemplates.list": +// method id "compute.regionAutoscalers.list": -type InstanceTemplatesListCall struct { +type RegionAutoscalersListCall struct { s *Service project string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of instance templates that are contained -// within the specified project and zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/list -func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { - c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of autoscalers contained within the specified +// region. +func (r *RegionAutoscalersService) List(project string, region string) *RegionAutoscalersListCall { + c := &RegionAutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region return c } @@ -24905,7 +37409,7 @@ func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCa // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { +func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall { c.urlParams_.Set("filter", filter) return c } @@ -24915,15 +37419,32 @@ func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesList // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in // subsequent list requests. -func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall { +func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscalersListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall { +func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscalersListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -24931,7 +37452,7 @@ func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplat // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall { +func (c *RegionAutoscalersListCall) Fields(s ...googleapi.Field) *RegionAutoscalersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -24941,7 +37462,7 @@ func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTempla // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTemplatesListCall { +func (c *RegionAutoscalersListCall) IfNoneMatch(entityTag string) *RegionAutoscalersListCall { c.ifNoneMatch_ = entityTag return c } @@ -24949,37 +37470,50 @@ func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTempl // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTemplatesListCall { +func (c *RegionAutoscalersListCall) Context(ctx context.Context) *RegionAutoscalersListCall { c.ctx_ = ctx return c } -func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionAutoscalersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.list" call. -// Exactly one of *InstanceTemplateList or error will be non-nil. Any +// Do executes the "compute.regionAutoscalers.list" call. +// Exactly one of *RegionAutoscalerList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *InstanceTemplateList.ServerResponse.Header or (if a response was +// *RegionAutoscalerList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { +func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAutoscalerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -24998,7 +37532,7 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceTemplateList{ + ret := &RegionAutoscalerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -25010,11 +37544,12 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT } return ret, nil // { - // "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", + // "description": "Retrieves a list of autoscalers contained within the specified region.", // "httpMethod": "GET", - // "id": "compute.instanceTemplates.list", + // "id": "compute.regionAutoscalers.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "filter": { @@ -25031,6 +37566,11 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -25042,11 +37582,18 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates", + // "path": "{project}/regions/{region}/autoscalers", // "response": { - // "$ref": "InstanceTemplateList" + // "$ref": "RegionAutoscalerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -25060,7 +37607,7 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { +func (c *RegionAutoscalersListCall) Pages(ctx context.Context, f func(*RegionAutoscalerList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -25078,35 +37625,33 @@ func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceT } } -// method id "compute.instances.addAccessConfig": +// method id "compute.regionAutoscalers.patch": -type InstancesAddAccessConfigCall struct { - s *Service - project string - zone string - instance string - accessconfig *AccessConfig - urlParams_ gensupport.URLParams - ctx_ context.Context +type RegionAutoscalersPatchCall struct { + s *Service + project string + region string + autoscaler2 *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddAccessConfig: Adds an access config to an instance's network -// interface. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/addAccessConfig -func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { - c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates an autoscaler in the specified project using the data +// included in the request. This method supports patch semantics. +func (r *RegionAutoscalersService) Patch(project string, region string, autoscaler string, autoscaler2 *Autoscaler) *RegionAutoscalersPatchCall { + c := &RegionAutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.accessconfig = accessconfig + c.region = region + c.urlParams_.Set("autoscaler", autoscaler) + c.autoscaler2 = autoscaler2 return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall { +func (c *RegionAutoscalersPatchCall) Fields(s ...googleapi.Field) *RegionAutoscalersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -25114,41 +37659,362 @@ func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAd // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAddAccessConfigCall { +func (c *RegionAutoscalersPatchCall) Context(ctx context.Context) *RegionAutoscalersPatchCall { c.ctx_ = ctx return c } -func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionAutoscalersPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionAutoscalers.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "compute.regionAutoscalers.patch", + // "parameterOrder": [ + // "project", + // "region", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to update.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionAutoscalers.update": + +type RegionAutoscalersUpdateCall struct { + s *Service + project string + region string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an autoscaler in the specified project using the data +// included in the request. +func (r *RegionAutoscalersService) Update(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersUpdateCall { + c := &RegionAutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to update. +func (c *RegionAutoscalersUpdateCall) Autoscaler(autoscaler string) *RegionAutoscalersUpdateCall { + c.urlParams_.Set("autoscaler", autoscaler) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutoscalersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionAutoscalersUpdateCall) Context(ctx context.Context) *RegionAutoscalersUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionAutoscalersUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionAutoscalers.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an autoscaler in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.regionAutoscalers.update", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to update.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionBackendServices.delete": + +type RegionBackendServicesDeleteCall struct { + s *Service + project string + region string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified regional BackendService resource. +func (r *RegionBackendServicesService) Delete(project string, region string, backendService string) *RegionBackendServicesDeleteCall { + c := &RegionBackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.backendService = backendService + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionBackendServicesDeleteCall) Fields(s ...googleapi.Field) *RegionBackendServicesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionBackendServicesDeleteCall) Context(ctx context.Context) *RegionBackendServicesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionBackendServicesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.addAccessConfig" call. +// Do executes the "compute.regionBackendServices.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -25179,29 +38045,22 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Adds an access config to an instance's network interface.", - // "httpMethod": "POST", - // "id": "compute.instances.addAccessConfig", + // "description": "Deletes the specified regional BackendService resource.", + // "httpMethod": "DELETE", + // "id": "compute.regionBackendServices.delete", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "networkInterface" + // "region", + // "backendService" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", + // "backendService": { + // "description": "Name of the BackendService resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface to add to this instance.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -25209,18 +38068,15 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", - // "request": { - // "$ref": "AccessConfig" - // }, + // "path": "{project}/regions/{region}/backendServices/{backendService}", // "response": { // "$ref": "Operation" // }, @@ -25232,78 +38088,32 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.instances.aggregatedList": +// method id "compute.regionBackendServices.get": -type InstancesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type RegionBackendServicesGetCall struct { + s *Service + project string + region string + backendService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves aggregated list of instances. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/aggregatedList -func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { - c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified regional BackendService resource. +func (r *RegionBackendServicesService) Get(project string, region string, backendService string) *RegionBackendServicesGetCall { + c := &RegionBackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. -func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) + c.region = region + c.backendService = backendService return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall { +func (c *RegionBackendServicesGetCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -25313,7 +38123,7 @@ func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAgg // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAggregatedListCall { +func (c *RegionBackendServicesGetCall) IfNoneMatch(entityTag string) *RegionBackendServicesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -25321,37 +38131,51 @@ func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAggregatedListCall { +func (c *RegionBackendServicesGetCall) Context(ctx context.Context) *RegionBackendServicesGetCall { c.ctx_ = ctx return c } -func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionBackendServicesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.aggregatedList" call. -// Exactly one of *InstanceAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionBackendServices.get" call. +// Exactly one of *BackendService or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *BackendService.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceAggregatedList, error) { +func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -25370,7 +38194,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceAggregatedList{ + ret := &BackendService{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -25382,30 +38206,20 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc } return ret, nil // { - // "description": "Retrieves aggregated list of instances.", + // "description": "Returns the specified regional BackendService resource.", // "httpMethod": "GET", - // "id": "compute.instances.aggregatedList", + // "id": "compute.regionBackendServices.get", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "backendService" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", - // "format": "uint32", - // "location": "query", - // "maximum": "500", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "backendService": { + // "description": "Name of the BackendService resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -25414,11 +38228,18 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/instances", + // "path": "{project}/regions/{region}/backendServices/{backendService}", // "response": { - // "$ref": "InstanceAggregatedList" + // "$ref": "BackendService" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -25429,54 +38250,34 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instances.attachDisk": +// method id "compute.regionBackendServices.getHealth": -type InstancesAttachDiskCall struct { - s *Service - project string - zone string - instance string - attacheddisk *AttachedDisk - urlParams_ gensupport.URLParams - ctx_ context.Context +type RegionBackendServicesGetHealthCall struct { + s *Service + project string + region string + backendService string + resourcegroupreference *ResourceGroupReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AttachDisk: Attaches a Disk resource to an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/attachDisk -func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { - c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetHealth: Gets the most recent health check results for this +// regional BackendService. +func (r *RegionBackendServicesService) GetHealth(project string, region string, backendService string, resourcegroupreference *ResourceGroupReference) *RegionBackendServicesGetHealthCall { + c := &RegionBackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.attacheddisk = attacheddisk + c.region = region + c.backendService = backendService + c.resourcegroupreference = resourcegroupreference return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall { +func (c *RegionBackendServicesGetHealthCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetHealthCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -25484,41 +38285,53 @@ func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachDiskCall { +func (c *RegionBackendServicesGetHealthCall) Context(ctx context.Context) *RegionBackendServicesGetHealthCall { c.ctx_ = ctx return c } -func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionBackendServicesGetHealthCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}/getHealth") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.attachDisk" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionBackendServices.getHealth" call. +// Exactly one of *BackendServiceGroupHealth or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *BackendServiceGroupHealth.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -25537,7 +38350,7 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &BackendServiceGroupHealth{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -25549,78 +38362,81 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Attaches a Disk resource to an instance.", + // "description": "Gets the most recent health check results for this regional BackendService.", // "httpMethod": "POST", - // "id": "compute.instances.attachDisk", + // "id": "compute.regionBackendServices.getHealth", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "backendService" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", + // "backendService": { + // "description": "Name of the BackendService resource to which the queried instance belongs.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", + // "path": "{project}/regions/{region}/backendServices/{backendService}/getHealth", // "request": { - // "$ref": "AttachedDisk" + // "$ref": "ResourceGroupReference" // }, // "response": { - // "$ref": "Operation" + // "$ref": "BackendServiceGroupHealth" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.delete": +// method id "compute.regionBackendServices.insert": -type InstancesDeleteCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context +type RegionBackendServicesInsertCall struct { + s *Service + project string + region string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Instance resource. For more -// information, see Stopping or Deleting an Instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete -func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { - c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a regional BackendService resource in the specified +// project using the data included in the request. There are several +// restrictions and guidelines to keep in mind when creating a regional +// backend service. Read Restrictions and Guidelines for more +// information. +func (r *RegionBackendServicesService) Insert(project string, region string, backendservice *BackendService) *RegionBackendServicesInsertCall { + c := &RegionBackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.region = region + c.backendservice = backendservice return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { +func (c *RegionBackendServicesInsertCall) Fields(s ...googleapi.Field) *RegionBackendServicesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -25628,36 +38444,52 @@ func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { +func (c *RegionBackendServicesInsertCall) Context(ctx context.Context) *RegionBackendServicesInsertCall { c.ctx_ = ctx return c } -func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionBackendServicesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.delete" call. +// Do executes the "compute.regionBackendServices.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -25688,22 +38520,14 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", - // "httpMethod": "DELETE", - // "id": "compute.instances.delete", + // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Restrictions and Guidelines for more information.", + // "httpMethod": "POST", + // "id": "compute.regionBackendServices.insert", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -25711,15 +38535,18 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}", + // "path": "{project}/regions/{region}/backendServices", + // "request": { + // "$ref": "BackendService" + // }, // "response": { // "$ref": "Operation" // }, @@ -25731,71 +38558,159 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.instances.deleteAccessConfig": +// method id "compute.regionBackendServices.list": -type InstancesDeleteAccessConfigCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context +type RegionBackendServicesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// DeleteAccessConfig: Deletes an access config from an instance's -// network interface. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig -func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { - c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of regional BackendService resources +// available to the specified project in the given region. +func (r *RegionBackendServicesService) List(project string, region string) *RegionBackendServicesListCall { + c := &RegionBackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("accessConfig", accessConfig) - c.urlParams_.Set("networkInterface", networkInterface) + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServicesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *RegionBackendServicesListCall) MaxResults(maxResults int64) *RegionBackendServicesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionBackendServicesListCall) OrderBy(orderBy string) *RegionBackendServicesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBackendServicesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { +func (c *RegionBackendServicesListCall) Fields(s ...googleapi.Field) *RegionBackendServicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionBackendServicesListCall) IfNoneMatch(entityTag string) *RegionBackendServicesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { +func (c *RegionBackendServicesListCall) Context(ctx context.Context) *RegionBackendServicesListCall { c.ctx_ = ctx return c } -func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionBackendServicesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.deleteAccessConfig" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionBackendServices.list" call. +// Exactly one of *BackendServiceList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BackendServiceList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -25814,7 +38729,7 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &BackendServiceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -25826,34 +38741,36 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes an access config from an instance's network interface.", - // "httpMethod": "POST", - // "id": "compute.instances.deleteAccessConfig", + // "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", + // "httpMethod": "GET", + // "id": "compute.regionBackendServices.list", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "accessConfig", - // "networkInterface" + // "region" // ], // "parameters": { - // "accessConfig": { - // "description": "The name of the access config to delete.", + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", - // "required": true, // "type": "string" // }, - // "instance": { - // "description": "The instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface.", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", - // "required": true, // "type": "string" // }, // "project": { @@ -25863,52 +38780,79 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + // "path": "{project}/regions/{region}/backendServices", // "response": { - // "$ref": "Operation" + // "$ref": "BackendServiceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.detachDisk": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionBackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionBackendServices.patch": -type InstancesDetachDiskCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context +type RegionBackendServicesPatchCall struct { + s *Service + project string + region string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DetachDisk: Detaches a disk from an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/detachDisk -func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { - c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified regional BackendService resource with +// the data included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. This method +// supports patch semantics. +func (r *RegionBackendServicesService) Patch(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesPatchCall { + c := &RegionBackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("deviceName", deviceName) + c.region = region + c.backendService = backendService + c.backendservice = backendservice return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall { +func (c *RegionBackendServicesPatchCall) Fields(s ...googleapi.Field) *RegionBackendServicesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -25916,36 +38860,53 @@ func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachDiskCall { +func (c *RegionBackendServicesPatchCall) Context(ctx context.Context) *RegionBackendServicesPatchCall { c.ctx_ = ctx return c } -func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionBackendServicesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.detachDisk" call. +// Do executes the "compute.regionBackendServices.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -25976,24 +38937,17 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Detaches a disk from an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.detachDisk", + // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "compute.regionBackendServices.patch", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "deviceName" + // "region", + // "backendService" // ], // "parameters": { - // "deviceName": { - // "description": "Disk device name to detach.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "instance": { - // "description": "Instance name.", + // "backendService": { + // "description": "Name of the BackendService resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -26006,15 +38960,18 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", + // "path": "{project}/regions/{region}/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, // "response": { // "$ref": "Operation" // }, @@ -26026,83 +38983,90 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.instances.get": +// method id "compute.regionBackendServices.update": -type InstancesGetCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type RegionBackendServicesUpdateCall struct { + s *Service + project string + region string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Instance resource. Get a list of available -// instances by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/get -func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { - c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified regional BackendService resource with +// the data included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. +func (r *RegionBackendServicesService) Update(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesUpdateCall { + c := &RegionBackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.region = region + c.backendService = backendService + c.backendservice = backendservice return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { +func (c *RegionBackendServicesUpdateCall) Fields(s ...googleapi.Field) *RegionBackendServicesUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { +func (c *RegionBackendServicesUpdateCall) Context(ctx context.Context) *RegionBackendServicesUpdateCall { c.ctx_ = ctx return c } -func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionBackendServicesUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.get" call. -// Exactly one of *Instance or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Instance.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.regionBackendServices.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { +func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -26121,7 +39085,7 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Instance{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -26133,17 +39097,17 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { } return ret, nil // { - // "description": "Returns the specified Instance resource. Get a list of available instances by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instances.get", + // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + // "httpMethod": "PUT", + // "id": "compute.regionBackendServices.update", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "backendService" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to return.", + // "backendService": { + // "description": "Name of the BackendService resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -26156,111 +39120,118 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}", + // "path": "{project}/regions/{region}/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, // "response": { - // "$ref": "Instance" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.getSerialPortOutput": +// method id "compute.regionInstanceGroupManagers.abandonInstances": -type InstancesGetSerialPortOutputCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type RegionInstanceGroupManagersAbandonInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetSerialPortOutput: Returns the specified instance's serial port -// output. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/getSerialPortOutput -func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { - c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AbandonInstances: Schedules a group action to remove the specified +// instances from the managed instance group. Abandoning an instance +// does not delete the instance, but it does remove the instance from +// any target pools that are applied by the managed instance group. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you abandon. This operation is marked as +// DONE when the action is scheduled even if the instances have not yet +// been removed from the group. You must separately verify the status of +// the abandoning action with the listmanagedinstances method. +func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest) *RegionInstanceGroupManagersAbandonInstancesCall { + c := &RegionInstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - return c -} - -// Port sets the optional parameter "port": Specifies which COM or -// serial port to retrieve data from. -func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialPortOutputCall { - c.urlParams_.Set("port", fmt.Sprint(port)) + c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersabandoninstancesrequest = regioninstancegroupmanagersabandoninstancesrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall { +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersAbandonInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *InstancesGetSerialPortOutputCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *InstancesGetSerialPortOutputCall { +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersAbandonInstancesCall { c.ctx_ = ctx return c } -func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersabandoninstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getSerialPortOutput" call. -// Exactly one of *SerialPortOutput or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *SerialPortOutput.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*SerialPortOutput, error) { +// Do executes the "compute.regionInstanceGroupManagers.abandonInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -26279,7 +39250,7 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SerialPortOutput{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -26291,31 +39262,21 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se } return ret, nil // { - // "description": "Returns the specified instance's serial port output.", - // "httpMethod": "GET", - // "id": "compute.instances.getSerialPortOutput", + // "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.abandonInstances", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "instanceGroupManager" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "port": { - // "default": "1", - // "description": "Specifies which COM or serial port to retrieve data from.", - // "format": "int32", - // "location": "query", - // "maximum": "4", - // "minimum": "1", - // "type": "integer" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -26323,53 +39284,54 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/serialPort", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "request": { + // "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" + // }, // "response": { - // "$ref": "SerialPortOutput" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.insert": +// method id "compute.regionInstanceGroupManagers.delete": -type InstancesInsertCall struct { - s *Service - project string - zone string - instance *Instance - urlParams_ gensupport.URLParams - ctx_ context.Context +type RegionInstanceGroupManagersDeleteCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance resource in the specified project using -// the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/insert -func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { - c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified managed instance group and all of the +// instances in that group. +func (r *RegionInstanceGroupManagersService) Delete(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersDeleteCall { + c := &RegionInstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.region = region + c.instanceGroupManager = instanceGroupManager return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { +func (c *RegionInstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -26377,40 +39339,48 @@ func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { +func (c *RegionInstanceGroupManagersDeleteCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteCall { c.ctx_ = ctx return c } -func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.insert" call. +// Do executes the "compute.regionInstanceGroupManagers.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -26441,14 +39411,21 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates an instance resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.insert", + // "description": "Deletes the specified managed instance group and all of the instances in that group.", + // "httpMethod": "DELETE", + // "id": "compute.regionInstanceGroupManagers.delete", // "parameterOrder": [ // "project", - // "zone" + // "region", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -26456,18 +39433,14 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances", - // "request": { - // "$ref": "Instance" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", // "response": { // "$ref": "Operation" // }, @@ -26479,130 +39452,94 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.instances.list": +// method id "compute.regionInstanceGroupManagers.deleteInstances": -type InstancesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type RegionInstanceGroupManagersDeleteInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of instances contained within the specified -// zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/list -func (r *InstancesService) List(project string, zone string) *InstancesListCall { - c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteInstances: Schedules a group action to delete the specified +// instances in the managed instance group. The instances are also +// removed from any target pools of which they were a member. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you delete. This operation is marked as DONE +// when the action is scheduled even if the instances are still being +// deleted. You must separately verify the status of the deleting action +// with the listmanagedinstances method. +func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest) *RegionInstanceGroupManagersDeleteInstancesCall { + c := &RegionInstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstancesListCall) Filter(filter string) *InstancesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. -func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersdeleteinstancesrequest = regioninstancegroupmanagersdeleteinstancesrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteInstancesCall { c.ctx_ = ctx return c } -func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersdeleteinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.list" call. -// Exactly one of *InstanceList or error will be non-nil. Any non-2xx +// Do executes the "compute.regionInstanceGroupManagers.deleteInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *InstanceList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, error) { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -26621,7 +39558,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -26633,31 +39570,19 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err } return ret, nil // { - // "description": "Retrieves the list of instances contained within the specified zone.", - // "httpMethod": "GET", - // "id": "compute.instances.list", + // "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.deleteInstances", // "parameterOrder": [ // "project", - // "zone" + // "region", + // "instanceGroupManager" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", - // "format": "uint32", - // "location": "query", - // "maximum": "500", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -26667,110 +39592,117 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "request": { + // "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" + // }, // "response": { - // "$ref": "InstanceList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instances.reset": +// method id "compute.regionInstanceGroupManagers.get": -type InstancesResetCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context +type RegionInstanceGroupManagersGetCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Reset: Performs a hard reset on the instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/reset -func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { - c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns all of the details about the specified managed instance +// group. +func (r *RegionInstanceGroupManagersService) Get(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersGetCall { + c := &RegionInstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.region = region + c.instanceGroupManager = instanceGroupManager return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { +func (c *RegionInstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { +func (c *RegionInstanceGroupManagersGetCall) Context(ctx context.Context) *RegionInstanceGroupManagersGetCall { c.ctx_ = ctx return c } -func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.reset" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroupManagers.get" call. +// Exactly one of *InstanceGroupManager or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupManager.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -26789,7 +39721,7 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroupManager{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -26801,19 +39733,18 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Performs a hard reset on the instance.", - // "httpMethod": "POST", - // "id": "compute.instances.reset", + // "description": "Returns all of the details about the specified managed instance group.", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroupManagers.get", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "instanceGroupManager" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "instanceGroupManager": { + // "description": "Name of the managed instance group to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -26824,54 +39755,57 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/reset", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroupManager" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setDiskAutoDelete": +// method id "compute.regionInstanceGroupManagers.insert": -type InstancesSetDiskAutoDeleteCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context +type RegionInstanceGroupManagersInsertCall struct { + s *Service + project string + region string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to -// an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setDiskAutoDelete -func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { - c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a managed instance group using the information that +// you specify in the request. After the group is created, it schedules +// an action to create instances in the group using the specified +// instance template. This operation is marked as DONE when the group is +// created even if the instances in the group have not yet been created. +// You must separately verify the status of the individual instances +// with the listmanagedinstances method. +func (r *RegionInstanceGroupManagersService) Insert(project string, region string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersInsertCall { + c := &RegionInstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("autoDelete", fmt.Sprint(autoDelete)) - c.urlParams_.Set("deviceName", deviceName) + c.region = region + c.instancegroupmanager = instancegroupmanager return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall { +func (c *RegionInstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -26879,36 +39813,52 @@ func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *Instances // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *InstancesSetDiskAutoDeleteCall { +func (c *RegionInstanceGroupManagersInsertCall) Context(ctx context.Context) *RegionInstanceGroupManagersInsertCall { c.ctx_ = ctx return c } -func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setDiskAutoDelete" call. +// Do executes the "compute.regionInstanceGroupManagers.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -26939,37 +39889,14 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Sets the auto-delete flag for a disk attached to an instance.", + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.", // "httpMethod": "POST", - // "id": "compute.instances.setDiskAutoDelete", + // "id": "compute.regionInstanceGroupManagers.insert", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "autoDelete", - // "deviceName" + // "region" // ], // "parameters": { - // "autoDelete": { - // "description": "Whether to auto-delete the disk when the instance is deleted.", - // "location": "query", - // "required": true, - // "type": "boolean" - // }, - // "deviceName": { - // "description": "The device name of the disk to modify.", - // "location": "query", - // "pattern": "\\w[\\w.-]{0,254}", - // "required": true, - // "type": "string" - // }, - // "instance": { - // "description": "The instance name.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -26977,15 +39904,17 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", + // "path": "{project}/regions/{region}/instanceGroupManagers", + // "request": { + // "$ref": "InstanceGroupManager" + // }, // "response": { // "$ref": "Operation" // }, @@ -26997,75 +39926,159 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instances.setMachineType": +// method id "compute.regionInstanceGroupManagers.list": -type InstancesSetMachineTypeCall struct { - s *Service - project string - zone string - instance string - instancessetmachinetyperequest *InstancesSetMachineTypeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context +type RegionInstanceGroupManagersListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetMachineType: Changes the machine type for a stopped instance to -// the machine type specified in the request. -func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { - c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of managed instance groups that are +// contained within the specified region. +func (r *RegionInstanceGroupManagersService) List(project string, region string) *RegionInstanceGroupManagersListCall { + c := &RegionInstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancessetmachinetyperequest = instancessetmachinetyperequest + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *RegionInstanceGroupManagersListCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionInstanceGroupManagersListCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSetMachineTypeCall { +func (c *RegionInstanceGroupManagersListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupManagersListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMachineTypeCall) Context(ctx context.Context) *InstancesSetMachineTypeCall { +func (c *RegionInstanceGroupManagersListCall) Context(ctx context.Context) *RegionInstanceGroupManagersListCall { c.ctx_ = ctx return c } -func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachinetyperequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMachineType" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroupManagers.list" call. +// Exactly one of *RegionInstanceGroupManagerList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *RegionInstanceGroupManagerList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -27084,7 +40097,7 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &RegionInstanceGroupManagerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -27096,20 +40109,36 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.setMachineType", + // "description": "Retrieves the list of managed instance groups that are contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroupManagers.list", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -27119,57 +40148,75 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", - // "request": { - // "$ref": "InstancesSetMachineTypeRequest" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers", // "response": { - // "$ref": "Operation" + // "$ref": "RegionInstanceGroupManagerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setMetadata": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupManagersListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagerList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesSetMetadataCall struct { - s *Service - project string - zone string - instance string - metadata *Metadata - urlParams_ gensupport.URLParams - ctx_ context.Context +// method id "compute.regionInstanceGroupManagers.listManagedInstances": + +type RegionInstanceGroupManagersListManagedInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetMetadata: Sets metadata for the specified instance to the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setMetadata -func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { - c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListManagedInstances: Lists the instances in the managed instance +// group and instances that are scheduled to be created. The list +// includes any current actions that the group has scheduled for its +// instances. +func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListManagedInstancesCall { + c := &RegionInstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.metadata = metadata + c.region = region + c.instanceGroupManager = instanceGroupManager return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -27177,41 +40224,50 @@ func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMetadataCall { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersListManagedInstancesCall { c.ctx_ = ctx return c } -func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMetadata" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroupManagers.listManagedInstances" call. +// Exactly one of *RegionInstanceGroupManagersListInstancesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *RegionInstanceGroupManagersListInstancesResponse.ServerResponse.Heade +// r or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstancesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -27230,7 +40286,7 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &RegionInstanceGroupManagersListInstancesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -27242,19 +40298,18 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Sets metadata for the specified instance to the data included in the request.", + // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", // "httpMethod": "POST", - // "id": "compute.instances.setMetadata", + // "id": "compute.regionInstanceGroupManagers.listManagedInstances", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "instanceGroupManager" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -27265,56 +40320,59 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", - // "request": { - // "$ref": "Metadata" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "response": { - // "$ref": "Operation" + // "$ref": "RegionInstanceGroupManagersListInstancesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setScheduling": +// method id "compute.regionInstanceGroupManagers.recreateInstances": -type InstancesSetSchedulingCall struct { - s *Service - project string - zone string - instance string - scheduling *Scheduling - urlParams_ gensupport.URLParams - ctx_ context.Context +type RegionInstanceGroupManagersRecreateInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetScheduling: Sets an instance's scheduling options. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling -func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { - c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RecreateInstances: Schedules a group action to recreate the specified +// instances in the managed instance group. The instances are deleted +// and recreated using the current instance template for the managed +// instance group. This operation is marked as DONE when the action is +// scheduled even if the instances have not yet been recreated. You must +// separately verify the status of the recreating action with the +// listmanagedinstances method. +func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { + c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.scheduling = scheduling + c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersrecreaterequest = regioninstancegroupmanagersrecreaterequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -27322,41 +40380,53 @@ func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetS // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersRecreateInstancesCall { c.ctx_ = ctx return c } -func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersrecreaterequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setScheduling" call. +// Do executes the "compute.regionInstanceGroupManagers.recreateInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -27387,19 +40457,18 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Sets an instance's scheduling options.", + // "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.", // "httpMethod": "POST", - // "id": "compute.instances.setScheduling", + // "id": "compute.regionInstanceGroupManagers.recreateInstances", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "instanceGroupManager" // ], // "parameters": { - // "instance": { - // "description": "Instance name.", + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -27410,17 +40479,16 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", // "request": { - // "$ref": "Scheduling" + // "$ref": "RegionInstanceGroupManagersRecreateRequest" // }, // "response": { // "$ref": "Operation" @@ -27433,34 +40501,39 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.instances.setTags": - -type InstancesSetTagsCall struct { - s *Service - project string - zone string - instance string - tags *Tags - urlParams_ gensupport.URLParams - ctx_ context.Context -} +// method id "compute.regionInstanceGroupManagers.resize": -// SetTags: Sets tags for the specified instance to the data included in -// the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setTags -func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { - c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +type RegionInstanceGroupManagersResizeCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Resize: Changes the intended size for the managed instance group. If +// you increase the size, the group schedules actions to create new +// instances using the current instance template. If you decrease the +// size, the group schedules delete actions on one or more instances. +// The resize operation is marked DONE when the resize actions are +// scheduled even if the group has not yet added or deleted any +// instances. You must separately verify the status of the creating or +// deleting actions with the listmanagedinstances method. +func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { + c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.tags = tags + c.region = region + c.instanceGroupManager = instanceGroupManager + c.urlParams_.Set("size", fmt.Sprint(size)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { +func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -27468,41 +40541,48 @@ func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { +func (c *RegionInstanceGroupManagersResizeCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeCall { c.ctx_ = ctx return c } -func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setTags" call. +// Do executes the "compute.regionInstanceGroupManagers.resize" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -27533,19 +40613,19 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Sets tags for the specified instance to the data included in the request.", + // "description": "Changes the intended size for the managed instance group. If you increase the size, the group schedules actions to create new instances using the current instance template. If you decrease the size, the group schedules delete actions on one or more instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.", // "httpMethod": "POST", - // "id": "compute.instances.setTags", + // "id": "compute.regionInstanceGroupManagers.resize", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "instanceGroupManager", + // "size" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -27556,18 +40636,22 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "size": { + // "description": "Number of instances that should exist in this instance group manager.", + // "format": "int32", + // "location": "query", + // "minimum": "0", + // "required": true, + // "type": "integer" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setTags", - // "request": { - // "$ref": "Tags" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", // "response": { // "$ref": "Operation" // }, @@ -27579,33 +40663,35 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.instances.start": +// method id "compute.regionInstanceGroupManagers.setInstanceTemplate": -type InstancesStartCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context +type RegionInstanceGroupManagersSetInstanceTemplateCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Start: Starts an instance that was stopped using the using the -// instances().stop method. For more information, see Restart an -// instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/start -func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { - c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetInstanceTemplate: Sets the instance template to use when creating +// new instances or recreating instances in this group. Existing +// instances are not affected. +func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { + c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerssettemplaterequest = regioninstancegroupmanagerssettemplaterequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -27613,36 +40699,53 @@ func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetInstanceTemplateCall { c.ctx_ = ctx return c } -func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.start" call. +// Do executes the "compute.regionInstanceGroupManagers.setInstanceTemplate" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -27673,19 +40776,18 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", + // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", // "httpMethod": "POST", - // "id": "compute.instances.start", + // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "instanceGroupManager" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -27696,15 +40798,17 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/start", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "request": { + // "$ref": "RegionInstanceGroupManagersSetTemplateRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -27716,34 +40820,35 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.instances.startWithEncryptionKey": +// method id "compute.regionInstanceGroupManagers.setTargetPools": -type InstancesStartWithEncryptionKeyCall struct { - s *Service - project string - zone string - instance string - instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context +type RegionInstanceGroupManagersSetTargetPoolsCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// StartWithEncryptionKey: Starts an instance that was stopped using the -// using the instances().stop method. For more information, see Restart -// an instance. -func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { - c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTargetPools: Modifies the target pools to which all new instances +// in this group are assigned. Existing instances in the group are not +// affected. +func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { + c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest + c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerssettargetpoolsrequest = regioninstancegroupmanagerssettargetpoolsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -27751,41 +40856,53 @@ func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *Inst // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetTargetPoolsCall { c.ctx_ = ctx return c } -func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettargetpoolsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.startWithEncryptionKey" call. +// Do executes the "compute.regionInstanceGroupManagers.setTargetPools" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -27816,19 +40933,18 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", + // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", // "httpMethod": "POST", - // "id": "compute.instances.startWithEncryptionKey", + // "id": "compute.regionInstanceGroupManagers.setTargetPools", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "instanceGroupManager" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -27839,17 +40955,16 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", // "request": { - // "$ref": "InstancesStartWithEncryptionKeyRequest" + // "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" // }, // "response": { // "$ref": "Operation" @@ -27862,74 +40977,94 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( } -// method id "compute.instances.stop": +// method id "compute.regionInstanceGroups.get": -type InstancesStopCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context +type RegionInstanceGroupsGetCall struct { + s *Service + project string + region string + instanceGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Stop: Stops a running instance, shutting it down cleanly, and allows -// you to restart the instance at a later time. Stopped instances do not -// incur per-minute, virtual machine usage charges while they are -// stopped, but any resources that the virtual machine is using, such as -// persistent disks and static IP addresses, will continue to be charged -// until they are deleted. For more information, see Stopping an -// instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop -func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { - c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified instance group resource. +func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { + c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.region = region + c.instanceGroup = instanceGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { +func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { +func (c *RegionInstanceGroupsGetCall) Context(ctx context.Context) *RegionInstanceGroupsGetCall { c.ctx_ = ctx return c } -func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.stop" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.regionInstanceGroups.get" call. +// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *InstanceGroup.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -27948,7 +41083,7 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -27960,19 +41095,18 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.stop", + // "description": "Returns the specified instance group resource.", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroups.get", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "instanceGroup" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to stop.", + // "instanceGroup": { + // "description": "Name of the instance group resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -27983,51 +41117,118 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/stop", + // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.licenses.get": +// method id "compute.regionInstanceGroups.list": -type LicensesGetCall struct { +type RegionInstanceGroupsListCall struct { s *Service project string - license string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// Get: Returns the specified License resource. Get a list of available -// licenses by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/licenses/get -func (r *LicensesService) Get(project string, license string) *LicensesGetCall { - c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of instance group resources contained within +// the specified region. +func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { + c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInstanceGroupsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGroupsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstanceGroupsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { +func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -28037,7 +41238,7 @@ func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { +func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsListCall { c.ifNoneMatch_ = entityTag return c } @@ -28045,38 +41246,50 @@ func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { +func (c *RegionInstanceGroupsListCall) Context(ctx context.Context) *RegionInstanceGroupsListCall { c.ctx_ = ctx return c } -func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "license": c.license, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.get" call. -// Exactly one of *License or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *License.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { +// Do executes the "compute.regionInstanceGroups.list" call. +// Exactly one of *RegionInstanceGroupList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RegionInstanceGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -28095,7 +41308,7 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &License{ + ret := &RegionInstanceGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -28107,19 +41320,36 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { } return ret, nil // { - // "description": "Returns the specified License resource. Get a list of available licenses by making a list() request.", + // "description": "Retrieves the list of instance group resources contained within the specified region.", // "httpMethod": "GET", - // "id": "compute.licenses.get", + // "id": "compute.regionInstanceGroups.list", // "parameterOrder": [ // "project", - // "license" + // "region" // ], // "parameters": { - // "license": { - // "description": "Name of the License resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -28128,11 +41358,17 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/licenses/{license}", + // "path": "{project}/regions/{region}/instanceGroups", // "response": { - // "$ref": "License" + // "$ref": "RegionInstanceGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -28143,21 +41379,50 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { } -// method id "compute.machineTypes.aggregatedList": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type MachineTypesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +// method id "compute.regionInstanceGroups.listInstances": + +type RegionInstanceGroupsListInstancesCall struct { + s *Service + project string + region string + instanceGroup string + regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of machine types. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/aggregatedList -func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { - c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListInstances: Lists the instances in the specified instance group +// and displays information about the named ports. Depending on the +// specified options, this method can list all instances or only the +// instances that are running. +func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { + c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.instanceGroup = instanceGroup + c.regioninstancegroupslistinstancesrequest = regioninstancegroupslistinstancesrequest return c } @@ -28188,7 +41453,7 @@ func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggreg // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { +func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("filter", filter) return c } @@ -28198,15 +41463,32 @@ func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggr // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in // subsequent list requests. -func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { +func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { +func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -28214,55 +41496,62 @@ func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTyp // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { +func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { +func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { c.ctx_ = ctx return c } -func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.machineTypes.aggregatedList" call. -// Exactly one of *MachineTypeAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *MachineTypeAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroups.listInstances" call. +// Exactly one of *RegionInstanceGroupsListInstances or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *RegionInstanceGroupsListInstances.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -28281,7 +41570,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineTypeAggregatedList{ + ret := &RegionInstanceGroupsListInstances{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -28293,11 +41582,13 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach } return ret, nil // { - // "description": "Retrieves an aggregated list of machine types.", - // "httpMethod": "GET", - // "id": "compute.machineTypes.aggregatedList", + // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroups.listInstances", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "instanceGroup" // ], // "parameters": { // "filter": { @@ -28305,6 +41596,12 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // "location": "query", // "type": "string" // }, + // "instanceGroup": { + // "description": "Name of the regional instance group for which we want to list the instances.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", @@ -28314,6 +41611,11 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -28325,11 +41627,20 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/machineTypes", + // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + // "request": { + // "$ref": "RegionInstanceGroupsListInstancesRequest" + // }, // "response": { - // "$ref": "MachineTypeAggregatedList" + // "$ref": "RegionInstanceGroupsListInstances" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -28340,104 +41651,88 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.machineTypes.get": +// method id "compute.regionInstanceGroups.setNamedPorts": -type MachineTypesGetCall struct { - s *Service - project string - zone string - machineType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type RegionInstanceGroupsSetNamedPortsCall struct { + s *Service + project string + region string + instanceGroup string + regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified machine type. Get a list of available -// machine types by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/get -func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { - c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetNamedPorts: Sets the named ports for the specified regional +// instance group. +func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { + c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.machineType = machineType + c.region = region + c.instanceGroup = instanceGroup + c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { +func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { +func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { c.ctx_ = ctx return c } -func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "machineType": c.machineType, + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.get" call. -// Exactly one of *MachineType or error will be non-nil. Any non-2xx +// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *MachineType.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { +func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -28456,7 +41751,7 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -28468,19 +41763,18 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er } return ret, nil // { - // "description": "Returns the specified machine type. Get a list of available machine types by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.machineTypes.get", + // "description": "Sets the named ports for the specified regional instance group.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroups.setNamedPorts", // "parameterOrder": [ // "project", - // "zone", - // "machineType" + // "region", + // "instanceGroup" // ], // "parameters": { - // "machineType": { - // "description": "Name of the machine type to return.", + // "instanceGroup": { + // "description": "The name of the regional instance group where the named ports are updated.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -28491,206 +41785,122 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/machineTypes/{machineType}", + // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", + // "request": { + // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" + // }, // "response": { - // "$ref": "MachineType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.machineTypes.list": +// method id "compute.regionOperations.delete": -type MachineTypesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type RegionOperationsDeleteCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of machine types available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/list -func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { - c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified region-specific Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/delete +func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall { + c := &RegionOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. -func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.region = region + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { +func (c *RegionOperationsDeleteCall) Fields(s ...googleapi.Field) *RegionOperationsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { +func (c *RegionOperationsDeleteCall) Context(ctx context.Context) *RegionOperationsDeleteCall { c.ctx_ = ctx return c } -func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionOperationsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "region": c.region, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.list" call. -// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *MachineTypeList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { +// Do executes the "compute.regionOperations.delete" call. +func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &MachineTypeList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err + return err } - return ret, nil + return nil // { - // "description": "Retrieves a list of machine types available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.machineTypes.list", + // "description": "Deletes the specified region-specific Operations resource.", + // "httpMethod": "DELETE", + // "id": "compute.regionOperations.delete", // "parameterOrder": [ // "project", - // "zone" + // "region", + // "operation" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", - // "format": "uint32", - // "location": "query", - // "maximum": "500", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "operation": { + // "description": "Name of the Operations resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -28700,107 +41910,112 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/machineTypes", - // "response": { - // "$ref": "MachineTypeList" - // }, + // "path": "{project}/regions/{region}/operations/{operation}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.networks.delete": +// method id "compute.regionOperations.get": -type NetworksDeleteCall struct { - s *Service - project string - network string - urlParams_ gensupport.URLParams - ctx_ context.Context +type RegionOperationsGetCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified network. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/delete -func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall { - c := &NetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Retrieves the specified region-specific Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/get +func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall { + c := &RegionOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network + c.region = region + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { +func (c *RegionOperationsGetCall) Fields(s ...googleapi.Field) *RegionOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionOperationsGetCall) IfNoneMatch(entityTag string) *RegionOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksDeleteCall) Context(ctx context.Context) *NetworksDeleteCall { +func (c *RegionOperationsGetCall) Context(ctx context.Context) *RegionOperationsGetCall { c.ctx_ = ctx return c } -func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionOperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "region": c.region, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.delete" call. +// Do executes the "compute.regionOperations.get" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -28831,16 +42046,17 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Deletes the specified network.", - // "httpMethod": "DELETE", - // "id": "compute.networks.delete", + // "description": "Retrieves the specified region-specific Operations resource.", + // "httpMethod": "GET", + // "id": "compute.regionOperations.get", // "parameterOrder": [ // "project", - // "network" + // "region", + // "operation" // ], // "parameters": { - // "network": { - // "description": "Name of the network to delete.", + // "operation": { + // "description": "Name of the Operations resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -28852,45 +42068,121 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}", + // "path": "{project}/regions/{region}/operations/{operation}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.networks.get": +// method id "compute.regionOperations.list": -type NetworksGetCall struct { +type RegionOperationsListCall struct { s *Service project string - network string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// Get: Returns the specified network. Get a list of available networks -// by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/get -func (r *NetworksService) Get(project string, network string) *NetworksGetCall { - c := &NetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of Operation resources contained within the +// specified region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/list +func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall { + c := &RegionOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionOperationsListCall) OrderBy(orderBy string) *RegionOperationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { +func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -28900,7 +42192,7 @@ func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { +func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperationsListCall { c.ifNoneMatch_ = entityTag return c } @@ -28908,38 +42200,50 @@ func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksGetCall) Context(ctx context.Context) *NetworksGetCall { +func (c *RegionOperationsListCall) Context(ctx context.Context) *RegionOperationsListCall { c.ctx_ = ctx return c } -func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionOperationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.get" call. -// Exactly one of *Network or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Network.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { +// Do executes the "compute.regionOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *OperationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -28958,7 +42262,7 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Network{ + ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -28970,19 +42274,36 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { } return ret, nil // { - // "description": "Returns the specified network. Get a list of available networks by making a list() request.", + // "description": "Retrieves a list of Operation resources contained within the specified region.", // "httpMethod": "GET", - // "id": "compute.networks.get", + // "id": "compute.regionOperations.list", // "parameterOrder": [ // "project", - // "network" + // "region" // ], // "parameters": { - // "network": { - // "description": "Name of the network to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -28991,11 +42312,18 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}", + // "path": "{project}/regions/{region}/operations", // "response": { - // "$ref": "Network" + // "$ref": "OperationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -29006,70 +42334,114 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { } -// method id "compute.networks.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type NetworksInsertCall struct { - s *Service - project string - network *Network - urlParams_ gensupport.URLParams - ctx_ context.Context +// method id "compute.regions.get": + +type RegionsGetCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a network in the specified project using the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/insert -func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { - c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Region resource. Get a list of available +// regions by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/get +func (r *RegionsService) Get(project string, region string) *RegionsGetCall { + c := &RegionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network + c.region = region return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { +func (c *RegionsGetCall) Fields(s ...googleapi.Field) *RegionsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionsGetCall) IfNoneMatch(entityTag string) *RegionsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { +func (c *RegionsGetCall) Context(ctx context.Context) *RegionsGetCall { c.ctx_ = ctx return c } -func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regions.get" call. +// Exactly one of *Region or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Region.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29088,7 +42460,7 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Region{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -29100,11 +42472,12 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Creates a network in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.networks.insert", + // "description": "Returns the specified Region resource. Get a list of available regions by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.regions.get", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "project": { @@ -29113,38 +42486,44 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/networks", - // "request": { - // "$ref": "Network" - // }, + // "path": "{project}/regions/{region}", // "response": { - // "$ref": "Operation" + // "$ref": "Region" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.networks.list": +// method id "compute.regions.list": -type NetworksListCall struct { +type RegionsListCall struct { s *Service project string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of networks available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/list -func (r *NetworksService) List(project string) *NetworksListCall { - c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of region resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/list +func (r *RegionsService) List(project string) *RegionsListCall { + c := &RegionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -29176,7 +42555,7 @@ func (r *NetworksService) List(project string) *NetworksListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *NetworksListCall) Filter(filter string) *NetworksListCall { +func (c *RegionsListCall) Filter(filter string) *RegionsListCall { c.urlParams_.Set("filter", filter) return c } @@ -29186,15 +42565,32 @@ func (c *NetworksListCall) Filter(filter string) *NetworksListCall { // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in // subsequent list requests. -func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { +func (c *RegionsListCall) MaxResults(maxResults int64) *RegionsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionsListCall) OrderBy(orderBy string) *RegionsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { +func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -29202,7 +42598,7 @@ func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { +func (c *RegionsListCall) Fields(s ...googleapi.Field) *RegionsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -29212,7 +42608,7 @@ func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { +func (c *RegionsListCall) IfNoneMatch(entityTag string) *RegionsListCall { c.ifNoneMatch_ = entityTag return c } @@ -29220,20 +42616,32 @@ func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { +func (c *RegionsListCall) Context(ctx context.Context) *RegionsListCall { c.ctx_ = ctx return c } -func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -29243,14 +42651,14 @@ func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.list" call. -// Exactly one of *NetworkList or error will be non-nil. Any non-2xx +// Do executes the "compute.regions.list" call. +// Exactly one of *RegionList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *NetworkList.ServerResponse.Header or (if a response was returned at +// *RegionList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error) { +func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29269,7 +42677,7 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NetworkList{ + ret := &RegionList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -29281,9 +42689,9 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error } return ret, nil // { - // "description": "Retrieves the list of networks available to the specified project.", + // "description": "Retrieves the list of region resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.networks.list", + // "id": "compute.regions.list", // "parameterOrder": [ // "project" // ], @@ -29302,6 +42710,11 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -29315,9 +42728,9 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error // "type": "string" // } // }, - // "path": "{project}/global/networks", + // "path": "{project}/regions", // "response": { - // "$ref": "NetworkList" + // "$ref": "RegionList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -29331,7 +42744,7 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error) error { +func (c *RegionsListCall) Pages(ctx context.Context, f func(*RegionList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -29349,28 +42762,95 @@ func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error } } -// method id "compute.projects.get": +// method id "compute.routers.aggregatedList": -type ProjectsGetCall struct { +type RoutersAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Project resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/get -func (r *ProjectsService) Get(project string) *ProjectsGetCall { - c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of routers. +func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCall { + c := &RoutersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RoutersAggregatedListCall) Filter(filter string) *RoutersAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *RoutersAggregatedListCall) MaxResults(maxResults int64) *RoutersAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RoutersAggregatedListCall) OrderBy(orderBy string) *RoutersAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RoutersAggregatedListCall) PageToken(pageToken string) *RoutersAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { +func (c *RoutersAggregatedListCall) Fields(s ...googleapi.Field) *RoutersAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -29380,7 +42860,7 @@ func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { +func (c *RoutersAggregatedListCall) IfNoneMatch(entityTag string) *RoutersAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -29388,20 +42868,32 @@ func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { +func (c *RoutersAggregatedListCall) Context(ctx context.Context) *RoutersAggregatedListCall { c.ctx_ = ctx return c } -func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoutersAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/routers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -29411,14 +42903,14 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.get" call. -// Exactly one of *Project or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Project.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { +// Do executes the "compute.routers.aggregatedList" call. +// Exactly one of *RouterAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RouterAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29437,7 +42929,7 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Project{ + ret := &RouterAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -29449,13 +42941,37 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { } return ret, nil // { - // "description": "Returns the specified Project resource.", + // "description": "Retrieves an aggregated list of routers.", // "httpMethod": "GET", - // "id": "compute.projects.get", + // "id": "compute.routers.aggregatedList", // "parameterOrder": [ // "project" // ], // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -29464,9 +42980,9 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { // "type": "string" // } // }, - // "path": "{project}", + // "path": "{project}/aggregated/routers", // "response": { - // "$ref": "Project" + // "$ref": "RouterAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -29477,28 +42993,52 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { } -// method id "compute.projects.moveDisk": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RoutersAggregatedListCall) Pages(ctx context.Context, f func(*RouterAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ProjectsMoveDiskCall struct { - s *Service - project string - diskmoverequest *DiskMoveRequest - urlParams_ gensupport.URLParams - ctx_ context.Context +// method id "compute.routers.delete": + +type RoutersDeleteCall struct { + s *Service + project string + region string + router string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// MoveDisk: Moves a persistent disk from one zone to another. -func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequest) *ProjectsMoveDiskCall { - c := &ProjectsMoveDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Router resource. +func (r *RoutersService) Delete(project string, region string, router string) *RoutersDeleteCall { + c := &RoutersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.diskmoverequest = diskmoverequest + c.region = region + c.router = router return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCall { +func (c *RoutersDeleteCall) Fields(s ...googleapi.Field) *RoutersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -29506,39 +43046,48 @@ func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsMoveDiskCall) Context(ctx context.Context) *ProjectsMoveDiskCall { +func (c *RoutersDeleteCall) Context(ctx context.Context) *RoutersDeleteCall { c.ctx_ = ctx return c } -func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoutersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.diskmoverequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.moveDisk" call. +// Do executes the "compute.routers.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29569,11 +43118,13 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Moves a persistent disk from one zone to another.", - // "httpMethod": "POST", - // "id": "compute.projects.moveDisk", + // "description": "Deletes the specified Router resource.", + // "httpMethod": "DELETE", + // "id": "compute.routers.delete", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "router" // ], // "parameters": { // "project": { @@ -29582,12 +43133,23 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/moveDisk", - // "request": { - // "$ref": "DiskMoveRequest" - // }, + // "path": "{project}/regions/{region}/routers/{router}", // "response": { // "$ref": "Operation" // }, @@ -29599,69 +43161,95 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.projects.moveInstance": - -type ProjectsMoveInstanceCall struct { - s *Service - project string - instancemoverequest *InstanceMoveRequest - urlParams_ gensupport.URLParams - ctx_ context.Context -} +// method id "compute.routers.get": -// MoveInstance: Moves an instance and its attached persistent disks -// from one zone to another. -func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall { - c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +type RoutersGetCall struct { + s *Service + project string + region string + router string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified Router resource. Get a list of available +// routers by making a list() request. +func (r *RoutersService) Get(project string, region string, router string) *RoutersGetCall { + c := &RoutersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instancemoverequest = instancemoverequest + c.region = region + c.router = router return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall { +func (c *RoutersGetCall) Fields(s ...googleapi.Field) *RoutersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutersGetCall) IfNoneMatch(entityTag string) *RoutersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall { +func (c *RoutersGetCall) Context(ctx context.Context) *RoutersGetCall { c.ctx_ = ctx return c } -func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoutersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveInstance") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.moveInstance" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.routers.get" call. +// Exactly one of *Router or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Router.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29680,7 +43268,7 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Router{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -29692,11 +43280,13 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Moves an instance and its attached persistent disks from one zone to another.", - // "httpMethod": "POST", - // "id": "compute.projects.moveInstance", + // "description": "Returns the specified Router resource. Get a list of available routers by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.routers.get", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "router" // ], // "parameters": { // "project": { @@ -29705,87 +43295,124 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/moveInstance", - // "request": { - // "$ref": "InstanceMoveRequest" - // }, + // "path": "{project}/regions/{region}/routers/{router}", // "response": { - // "$ref": "Operation" + // "$ref": "Router" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.projects.setCommonInstanceMetadata": +// method id "compute.routers.getRouterStatus": -type ProjectsSetCommonInstanceMetadataCall struct { - s *Service - project string - metadata *Metadata - urlParams_ gensupport.URLParams - ctx_ context.Context +type RoutersGetRouterStatusCall struct { + s *Service + project string + region string + router string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetCommonInstanceMetadata: Sets metadata common to all instances -// within the specified project using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setCommonInstanceMetadata -func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall { - c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetRouterStatus: Retrieves runtime information of the specified +// router. +func (r *RoutersService) GetRouterStatus(project string, region string, router string) *RoutersGetRouterStatusCall { + c := &RoutersGetRouterStatusCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.metadata = metadata + c.region = region + c.router = router return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall { +func (c *RoutersGetRouterStatusCall) Fields(s ...googleapi.Field) *RoutersGetRouterStatusCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutersGetRouterStatusCall) IfNoneMatch(entityTag string) *RoutersGetRouterStatusCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall { +func (c *RoutersGetRouterStatusCall) Context(ctx context.Context) *RoutersGetRouterStatusCall { c.ctx_ = ctx return c } -func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoutersGetRouterStatusCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setCommonInstanceMetadata") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/getRouterStatus") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setCommonInstanceMetadata" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.routers.getRouterStatus" call. +// Exactly one of *RouterStatusResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RouterStatusResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterStatusResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29804,7 +43431,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &RouterStatusResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -29816,11 +43443,13 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets metadata common to all instances within the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.projects.setCommonInstanceMetadata", + // "description": "Retrieves runtime information of the specified router.", + // "httpMethod": "GET", + // "id": "compute.routers.getRouterStatus", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "router" // ], // "parameters": { // "project": { @@ -29829,49 +43458,61 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to query.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/setCommonInstanceMetadata", - // "request": { - // "$ref": "Metadata" - // }, + // "path": "{project}/regions/{region}/routers/{router}/getRouterStatus", // "response": { - // "$ref": "Operation" + // "$ref": "RouterStatusResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.projects.setUsageExportBucket": +// method id "compute.routers.insert": -type ProjectsSetUsageExportBucketCall struct { - s *Service - project string - usageexportlocation *UsageExportLocation - urlParams_ gensupport.URLParams - ctx_ context.Context +type RoutersInsertCall struct { + s *Service + project string + region string + router *Router + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetUsageExportBucket: Enables the usage export feature and sets the -// usage export bucket where reports are stored. If you provide an empty -// request body using this method, the usage export feature will be -// disabled. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setUsageExportBucket -func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall { - c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a Router resource in the specified project and region +// using the data included in the request. +func (r *RoutersService) Insert(project string, region string, router *Router) *RoutersInsertCall { + c := &RoutersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.usageexportlocation = usageexportlocation + c.region = region + c.router = router return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall { +func (c *RoutersInsertCall) Fields(s ...googleapi.Field) *RoutersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -29879,39 +43520,52 @@ func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *Project // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall { +func (c *RoutersInsertCall) Context(ctx context.Context) *RoutersInsertCall { c.ctx_ = ctx return c } -func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoutersInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.router) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setUsageExportBucket") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setUsageExportBucket" call. +// Do executes the "compute.routers.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29942,11 +43596,12 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", + // "description": "Creates a Router resource in the specified project and region using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.projects.setUsageExportBucket", + // "id": "compute.routers.insert", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "project": { @@ -29955,107 +43610,243 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/setUsageExportBucket", + // "path": "{project}/regions/{region}/routers", // "request": { - // "$ref": "UsageExportLocation" + // "$ref": "Router" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionOperations.delete": +// method id "compute.routers.list": -type RegionOperationsDeleteCall struct { - s *Service - project string - region string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context +type RoutersListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified region-specific Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/delete -func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall { - c := &RegionOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of Router resources available to the specified +// project. +func (r *RoutersService) List(project string, region string) *RoutersListCall { + c := &RoutersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.operation = operation + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RoutersListCall) Filter(filter string) *RoutersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *RoutersListCall) MaxResults(maxResults int64) *RoutersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RoutersListCall) OrderBy(orderBy string) *RoutersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RoutersListCall) PageToken(pageToken string) *RoutersListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionOperationsDeleteCall) Fields(s ...googleapi.Field) *RegionOperationsDeleteCall { +func (c *RoutersListCall) Fields(s ...googleapi.Field) *RoutersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutersListCall) IfNoneMatch(entityTag string) *RoutersListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionOperationsDeleteCall) Context(ctx context.Context) *RegionOperationsDeleteCall { +func (c *RoutersListCall) Context(ctx context.Context) *RoutersListCall { c.ctx_ = ctx return c } -func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoutersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "operation": c.operation, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionOperations.delete" call. -func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "compute.routers.list" call. +// Exactly one of *RouterList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RouterList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return nil, err } - return nil + ret := &RouterList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Deletes the specified region-specific Operations resource.", - // "httpMethod": "DELETE", - // "id": "compute.regionOperations.delete", + // "description": "Retrieves a list of Router resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.routers.list", // "parameterOrder": [ // "project", - // "region", - // "operation" + // "region" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -30073,91 +43864,122 @@ func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { // "type": "string" // } // }, - // "path": "{project}/regions/{region}/operations/{operation}", + // "path": "{project}/regions/{region}/routers", + // "response": { + // "$ref": "RouterList" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionOperations.get": - -type RegionOperationsGetCall struct { - s *Service - project string - region string - operation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RoutersListCall) Pages(ctx context.Context, f func(*RouterList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// Get: Retrieves the specified region-specific Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/get -func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall { - c := &RegionOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// method id "compute.routers.patch": + +type RoutersPatchCall struct { + s *Service + project string + region string + router string + router2 *Router + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified Router resource with the data included +// in the request. This method supports patch semantics. +func (r *RoutersService) Patch(project string, region string, router string, router2 *Router) *RoutersPatchCall { + c := &RoutersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.operation = operation + c.router = router + c.router2 = router2 return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionOperationsGetCall) Fields(s ...googleapi.Field) *RegionOperationsGetCall { +func (c *RoutersPatchCall) Fields(s ...googleapi.Field) *RoutersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionOperationsGetCall) IfNoneMatch(entityTag string) *RegionOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionOperationsGetCall) Context(ctx context.Context) *RegionOperationsGetCall { +func (c *RoutersPatchCall) Context(ctx context.Context) *RoutersPatchCall { c.ctx_ = ctx return c } -func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoutersPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "operation": c.operation, + "project": c.project, + "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionOperations.get" call. +// Do executes the "compute.routers.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30188,22 +44010,15 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Retrieves the specified region-specific Operations resource.", - // "httpMethod": "GET", - // "id": "compute.regionOperations.get", + // "description": "Updates the specified Router resource with the data included in the request. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "compute.routers.patch", // "parameterOrder": [ // "project", // "region", - // "operation" + // "router" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -30217,145 +44032,113 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/operations/{operation}", + // "path": "{project}/regions/{region}/routers/{router}", + // "request": { + // "$ref": "Router" + // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionOperations.list": +// method id "compute.routers.preview": -type RegionOperationsListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type RoutersPreviewCall struct { + s *Service + project string + region string + router string + router2 *Router + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of Operation resources contained within the -// specified region. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/list -func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall { - c := &RegionOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Preview: Preview fields auto-generated during router create and +// update operations. Calling this method does NOT create or update the +// router. +func (r *RoutersService) Preview(project string, region string, router string, router2 *Router) *RoutersPreviewCall { + c := &RoutersPreviewCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. -func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperationsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperationsListCall { - c.urlParams_.Set("pageToken", pageToken) + c.router = router + c.router2 = router2 return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperationsListCall { +func (c *RoutersPreviewCall) Fields(s ...googleapi.Field) *RoutersPreviewCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionOperationsListCall) Context(ctx context.Context) *RegionOperationsListCall { +func (c *RoutersPreviewCall) Context(ctx context.Context) *RoutersPreviewCall { c.ctx_ = ctx return c } -func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoutersPreviewCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/preview") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionOperations.list" call. -// Exactly one of *OperationList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *OperationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.routers.preview" call. +// Exactly one of *RoutersPreviewResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RoutersPreviewResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { +func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30374,7 +44157,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &OperationList{ + ret := &RoutersPreviewResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -30386,33 +44169,15 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL } return ret, nil // { - // "description": "Retrieves a list of Operation resources contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.regionOperations.list", + // "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", + // "httpMethod": "POST", + // "id": "compute.routers.preview", // "parameterOrder": [ // "project", - // "region" + // "region", + // "router" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", - // "format": "uint32", - // "location": "query", - // "maximum": "500", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -30426,11 +44191,21 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to query.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/operations", + // "path": "{project}/regions/{region}/routers/{router}/preview", + // "request": { + // "$ref": "Router" + // }, // "response": { - // "$ref": "OperationList" + // "$ref": "RoutersPreviewResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -30441,101 +44216,88 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regions.get": +// method id "compute.routers.update": -type RegionsGetCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type RoutersUpdateCall struct { + s *Service + project string + region string + router string + router2 *Router + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Region resource. Get a list of available -// regions by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/get -func (r *RegionsService) Get(project string, region string) *RegionsGetCall { - c := &RegionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified Router resource with the data included +// in the request. +func (r *RoutersService) Update(project string, region string, router string, router2 *Router) *RoutersUpdateCall { + c := &RoutersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region + c.router = router + c.router2 = router2 return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionsGetCall) Fields(s ...googleapi.Field) *RegionsGetCall { +func (c *RoutersUpdateCall) Fields(s ...googleapi.Field) *RoutersUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionsGetCall) IfNoneMatch(entityTag string) *RegionsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionsGetCall) Context(ctx context.Context) *RegionsGetCall { +func (c *RoutersUpdateCall) Context(ctx context.Context) *RoutersUpdateCall { c.ctx_ = ctx return c } -func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoutersUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regions.get" call. -// Exactly one of *Region or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Region.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { +// Do executes the "compute.routers.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30554,7 +44316,7 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Region{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -30566,12 +44328,13 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { } return ret, nil // { - // "description": "Returns the specified Region resource. Get a list of available regions by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.regions.get", + // "description": "Updates the specified Router resource with the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.routers.update", // "parameterOrder": [ // "project", - // "region" + // "region", + // "router" // ], // "parameters": { // "project": { @@ -30582,147 +44345,107 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { // "type": "string" // }, // "region": { - // "description": "Name of the region resource to return.", + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}", + // "path": "{project}/regions/{region}/routers/{router}", + // "request": { + // "$ref": "Router" + // }, // "response": { - // "$ref": "Region" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regions.list": +// method id "compute.routes.delete": -type RegionsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type RoutesDeleteCall struct { + s *Service + project string + route string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of region resources available to the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/list -func (r *RegionsService) List(project string) *RegionsListCall { - c := &RegionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Route resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/delete +func (r *RoutesService) Delete(project string, route string) *RoutesDeleteCall { + c := &RoutesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionsListCall) Filter(filter string) *RegionsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. -func (c *RegionsListCall) MaxResults(maxResults int64) *RegionsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall { - c.urlParams_.Set("pageToken", pageToken) + c.route = route return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionsListCall) Fields(s ...googleapi.Field) *RegionsListCall { +func (c *RoutesDeleteCall) Fields(s ...googleapi.Field) *RoutesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionsListCall) IfNoneMatch(entityTag string) *RegionsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionsListCall) Context(ctx context.Context) *RegionsListCall { +func (c *RoutesDeleteCall) Context(ctx context.Context) *RoutesDeleteCall { c.ctx_ = ctx return c } -func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoutesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "route": c.route, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regions.list" call. -// Exactly one of *RegionList or error will be non-nil. Any non-2xx +// Do executes the "compute.routes.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *RegionList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) { +func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30741,7 +44464,7 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -30753,145 +44476,67 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) } return ret, nil // { - // "description": "Retrieves the list of region resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.regions.list", + // "description": "Deletes the specified Route resource.", + // "httpMethod": "DELETE", + // "id": "compute.routes.delete", // "parameterOrder": [ - // "project" + // "project", + // "route" // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", - // "format": "uint32", - // "location": "query", - // "maximum": "500", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, + // "parameters": { // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "route": { + // "description": "Name of the Route resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions", + // "path": "{project}/global/routes/{route}", // "response": { - // "$ref": "RegionList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionsListCall) Pages(ctx context.Context, f func(*RegionList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.routers.aggregatedList": +// method id "compute.routes.get": -type RoutersAggregatedListCall struct { +type RoutesGetCall struct { s *Service project string + route string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of routers. -func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCall { - c := &RoutersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Route resource. Get a list of available +// routes by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/get +func (r *RoutesService) Get(project string, route string) *RoutesGetCall { + c := &RoutesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RoutersAggregatedListCall) Filter(filter string) *RoutersAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. -func (c *RoutersAggregatedListCall) MaxResults(maxResults int64) *RoutersAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RoutersAggregatedListCall) PageToken(pageToken string) *RoutersAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) + c.route = route return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersAggregatedListCall) Fields(s ...googleapi.Field) *RoutersAggregatedListCall { +func (c *RoutesGetCall) Fields(s ...googleapi.Field) *RoutesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -30901,7 +44546,7 @@ func (c *RoutersAggregatedListCall) Fields(s ...googleapi.Field) *RoutersAggrega // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RoutersAggregatedListCall) IfNoneMatch(entityTag string) *RoutersAggregatedListCall { +func (c *RoutesGetCall) IfNoneMatch(entityTag string) *RoutesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -30909,37 +44554,50 @@ func (c *RoutersAggregatedListCall) IfNoneMatch(entityTag string) *RoutersAggreg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersAggregatedListCall) Context(ctx context.Context) *RoutersAggregatedListCall { +func (c *RoutesGetCall) Context(ctx context.Context) *RoutesGetCall { c.ctx_ = ctx return c } -func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoutesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/routers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "route": c.route, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.aggregatedList" call. -// Exactly one of *RouterAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RouterAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAggregatedList, error) { +// Do executes the "compute.routes.get" call. +// Exactly one of *Route or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Route.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30958,7 +44616,7 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RouterAggregatedList{ + ret := &Route{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -30970,43 +44628,32 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg } return ret, nil // { - // "description": "Retrieves an aggregated list of routers.", + // "description": "Returns the specified Route resource. Get a list of available routes by making a list() request.", // "httpMethod": "GET", - // "id": "compute.routers.aggregatedList", + // "id": "compute.routes.get", // "parameterOrder": [ - // "project" + // "project", + // "route" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", - // "format": "uint32", - // "location": "query", - // "maximum": "500", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "route": { + // "description": "Name of the Route resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/routers", + // "path": "{project}/global/routes/{route}", // "response": { - // "$ref": "RouterAggregatedList" + // "$ref": "Route" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -31017,51 +44664,31 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RoutersAggregatedListCall) Pages(ctx context.Context, f func(*RouterAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.routers.delete": +// method id "compute.routes.insert": -type RoutersDeleteCall struct { +type RoutesInsertCall struct { s *Service project string - region string - router string + route *Route urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Router resource. -func (r *RoutersService) Delete(project string, region string, router string) *RoutersDeleteCall { - c := &RoutersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a Route resource in the specified project using the +// data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/insert +func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall { + c := &RoutesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.router = router + c.route = route return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersDeleteCall) Fields(s ...googleapi.Field) *RoutersDeleteCall { +func (c *RoutesInsertCall) Fields(s ...googleapi.Field) *RoutesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -31069,36 +44696,51 @@ func (c *RoutersDeleteCall) Fields(s ...googleapi.Field) *RoutersDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersDeleteCall) Context(ctx context.Context) *RoutersDeleteCall { +func (c *RoutesInsertCall) Context(ctx context.Context) *RoutesInsertCall { c.ctx_ = ctx return c } -func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoutesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.route) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, - "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.delete" call. +// Do executes the "compute.routes.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -31129,13 +44771,11 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified Router resource.", - // "httpMethod": "DELETE", - // "id": "compute.routers.delete", + // "description": "Creates a Route resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.routes.insert", // "parameterOrder": [ - // "project", - // "region", - // "router" + // "project" // ], // "parameters": { // "project": { @@ -31144,23 +44784,12 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{router}", + // "path": "{project}/global/routes", + // "request": { + // "$ref": "Route" + // }, // "response": { // "$ref": "Operation" // }, @@ -31172,32 +44801,97 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.routers.get": +// method id "compute.routes.list": -type RoutersGetCall struct { +type RoutesListCall struct { s *Service project string - region string - router string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Router resource. Get a list of available -// routers by making a list() request. -func (r *RoutersService) Get(project string, region string, router string) *RoutersGetCall { - c := &RoutersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of Route resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/list +func (r *RoutesService) List(project string) *RoutesListCall { + c := &RoutesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.router = router + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RoutesListCall) Filter(filter string) *RoutesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *RoutesListCall) MaxResults(maxResults int64) *RoutesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RoutesListCall) OrderBy(orderBy string) *RoutesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersGetCall) Fields(s ...googleapi.Field) *RoutersGetCall { +func (c *RoutesListCall) Fields(s ...googleapi.Field) *RoutesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -31207,7 +44901,7 @@ func (c *RoutersGetCall) Fields(s ...googleapi.Field) *RoutersGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RoutersGetCall) IfNoneMatch(entityTag string) *RoutersGetCall { +func (c *RoutesListCall) IfNoneMatch(entityTag string) *RoutesListCall { c.ifNoneMatch_ = entityTag return c } @@ -31215,39 +44909,49 @@ func (c *RoutersGetCall) IfNoneMatch(entityTag string) *RoutersGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersGetCall) Context(ctx context.Context) *RoutersGetCall { +func (c *RoutesListCall) Context(ctx context.Context) *RoutesListCall { c.ctx_ = ctx return c } -func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoutesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, - "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.get" call. -// Exactly one of *Router or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Router.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { +// Do executes the "compute.routes.list" call. +// Exactly one of *RouteList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RouteList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -31266,7 +44970,7 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Router{ + ret := &RouteList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -31278,40 +44982,48 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { } return ret, nil // { - // "description": "Returns the specified Router resource. Get a list of available routers by making a list() request.", + // "description": "Retrieves the list of Route resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.routers.get", + // "id": "compute.routes.list", // "parameterOrder": [ - // "project", - // "region", - // "router" + // "project" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "router": { - // "description": "Name of the Router resource to return.", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{router}", + // "path": "{project}/global/routes", // "response": { - // "$ref": "Router" + // "$ref": "RouteList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -31322,82 +45034,105 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { } -// method id "compute.routers.getRouterStatus": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RoutesListCall) Pages(ctx context.Context, f func(*RouteList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RoutersGetRouterStatusCall struct { - s *Service - project string - region string - router string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +// method id "compute.snapshots.delete": + +type SnapshotsDeleteCall struct { + s *Service + project string + snapshot string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetRouterStatus: Retrieves runtime information of the specified -// router. -func (r *RoutersService) GetRouterStatus(project string, region string, router string) *RoutersGetRouterStatusCall { - c := &RoutersGetRouterStatusCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Snapshot resource. Keep in mind that +// deleting a single snapshot might not necessarily delete all the data +// on that snapshot. If any data on the snapshot that is marked for +// deletion is needed for subsequent snapshots, the data will be moved +// to the next corresponding snapshot. +// +// For more information, see Deleting snaphots. +// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/delete +func (r *SnapshotsService) Delete(project string, snapshot string) *SnapshotsDeleteCall { + c := &SnapshotsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.router = router + c.snapshot = snapshot return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersGetRouterStatusCall) Fields(s ...googleapi.Field) *RoutersGetRouterStatusCall { +func (c *SnapshotsDeleteCall) Fields(s ...googleapi.Field) *SnapshotsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RoutersGetRouterStatusCall) IfNoneMatch(entityTag string) *RoutersGetRouterStatusCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersGetRouterStatusCall) Context(ctx context.Context) *RoutersGetRouterStatusCall { +func (c *SnapshotsDeleteCall) Context(ctx context.Context) *SnapshotsDeleteCall { c.ctx_ = ctx return c } -func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SnapshotsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/getRouterStatus") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "router": c.router, + "project": c.project, + "snapshot": c.snapshot, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.getRouterStatus" call. -// Exactly one of *RouterStatusResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RouterStatusResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterStatusResponse, error) { +// Do executes the "compute.snapshots.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -31416,7 +45151,7 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RouterStatusResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -31428,13 +45163,12 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt } return ret, nil // { - // "description": "Retrieves runtime information of the specified router.", - // "httpMethod": "GET", - // "id": "compute.routers.getRouterStatus", + // "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snaphots.", + // "httpMethod": "DELETE", + // "id": "compute.snapshots.delete", // "parameterOrder": [ // "project", - // "region", - // "router" + // "snapshot" // ], // "parameters": { // "project": { @@ -31444,100 +45178,113 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to query.", + // "snapshot": { + // "description": "Name of the Snapshot resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{router}/getRouterStatus", + // "path": "{project}/global/snapshots/{snapshot}", // "response": { - // "$ref": "RouterStatusResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.routers.insert": +// method id "compute.snapshots.get": -type RoutersInsertCall struct { - s *Service - project string - region string - router *Router - urlParams_ gensupport.URLParams - ctx_ context.Context +type SnapshotsGetCall struct { + s *Service + project string + snapshot string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a Router resource in the specified project and region -// using the data included in the request. -func (r *RoutersService) Insert(project string, region string, router *Router) *RoutersInsertCall { - c := &RoutersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Snapshot resource. Get a list of available +// snapshots by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/get +func (r *SnapshotsService) Get(project string, snapshot string) *SnapshotsGetCall { + c := &SnapshotsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.router = router + c.snapshot = snapshot return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersInsertCall) Fields(s ...googleapi.Field) *RoutersInsertCall { +func (c *SnapshotsGetCall) Fields(s ...googleapi.Field) *SnapshotsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SnapshotsGetCall) IfNoneMatch(entityTag string) *SnapshotsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersInsertCall) Context(ctx context.Context) *RoutersInsertCall { +func (c *SnapshotsGetCall) Context(ctx context.Context) *SnapshotsGetCall { c.ctx_ = ctx return c } -func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SnapshotsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "snapshot": c.snapshot, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.snapshots.get" call. +// Exactly one of *Snapshot or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Snapshot.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -31556,7 +45303,7 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Snapshot{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -31568,12 +45315,12 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates a Router resource in the specified project and region using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.routers.insert", + // "description": "Returns the specified Snapshot resource. Get a list of available snapshots by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.snapshots.get", // "parameterOrder": [ // "project", - // "region" + // "snapshot" // ], // "parameters": { // "project": { @@ -31583,46 +45330,44 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "snapshot": { + // "description": "Name of the Snapshot resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers", - // "request": { - // "$ref": "Router" - // }, + // "path": "{project}/global/snapshots/{snapshot}", // "response": { - // "$ref": "Operation" + // "$ref": "Snapshot" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.routers.list": +// method id "compute.snapshots.list": -type RoutersListCall struct { +type SnapshotsListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of Router resources available to the specified -// project. -func (r *RoutersService) List(project string, region string) *RoutersListCall { - c := &RoutersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of Snapshot resources contained within the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/list +func (r *SnapshotsService) List(project string) *SnapshotsListCall { + c := &SnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -31653,7 +45398,7 @@ func (r *RoutersService) List(project string, region string) *RoutersListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *RoutersListCall) Filter(filter string) *RoutersListCall { +func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall { c.urlParams_.Set("filter", filter) return c } @@ -31663,15 +45408,32 @@ func (c *RoutersListCall) Filter(filter string) *RoutersListCall { // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in // subsequent list requests. -func (c *RoutersListCall) MaxResults(maxResults int64) *RoutersListCall { +func (c *SnapshotsListCall) MaxResults(maxResults int64) *SnapshotsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *SnapshotsListCall) OrderBy(orderBy string) *SnapshotsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RoutersListCall) PageToken(pageToken string) *RoutersListCall { +func (c *SnapshotsListCall) PageToken(pageToken string) *SnapshotsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -31679,7 +45441,7 @@ func (c *RoutersListCall) PageToken(pageToken string) *RoutersListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersListCall) Fields(s ...googleapi.Field) *RoutersListCall { +func (c *SnapshotsListCall) Fields(s ...googleapi.Field) *SnapshotsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -31689,7 +45451,7 @@ func (c *RoutersListCall) Fields(s ...googleapi.Field) *RoutersListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RoutersListCall) IfNoneMatch(entityTag string) *RoutersListCall { +func (c *SnapshotsListCall) IfNoneMatch(entityTag string) *SnapshotsListCall { c.ifNoneMatch_ = entityTag return c } @@ -31697,38 +45459,49 @@ func (c *RoutersListCall) IfNoneMatch(entityTag string) *RoutersListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersListCall) Context(ctx context.Context) *RoutersListCall { +func (c *SnapshotsListCall) Context(ctx context.Context) *SnapshotsListCall { c.ctx_ = ctx return c } -func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SnapshotsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.list" call. -// Exactly one of *RouterList or error will be non-nil. Any non-2xx +// Do executes the "compute.snapshots.list" call. +// Exactly one of *SnapshotList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *RouterList.ServerResponse.Header or (if a response was returned at +// *SnapshotList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) { +func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -31747,7 +45520,7 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RouterList{ + ret := &SnapshotList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -31759,12 +45532,11 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) } return ret, nil // { - // "description": "Retrieves a list of Router resources available to the specified project.", + // "description": "Retrieves the list of Snapshot resources contained within the specified project.", // "httpMethod": "GET", - // "id": "compute.routers.list", + // "id": "compute.snapshots.list", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { @@ -31781,6 +45553,11 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -31792,18 +45569,11 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers", + // "path": "{project}/global/snapshots", // "response": { - // "$ref": "RouterList" + // "$ref": "SnapshotList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -31817,7 +45587,7 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RoutersListCall) Pages(ctx context.Context, f func(*RouterList) error) error { +func (c *SnapshotsListCall) Pages(ctx context.Context, f func(*SnapshotList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -31835,33 +45605,29 @@ func (c *RoutersListCall) Pages(ctx context.Context, f func(*RouterList) error) } } -// method id "compute.routers.patch": +// method id "compute.sslCertificates.delete": -type RoutersPatchCall struct { - s *Service - project string - region string - router string - router2 *Router - urlParams_ gensupport.URLParams - ctx_ context.Context +type SslCertificatesDeleteCall struct { + s *Service + project string + sslCertificate string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the entire content of the Router resource. This method -// supports patch semantics. -func (r *RoutersService) Patch(project string, region string, router string, router2 *Router) *RoutersPatchCall { - c := &RoutersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified SslCertificate resource. +func (r *SslCertificatesService) Delete(project string, sslCertificate string) *SslCertificatesDeleteCall { + c := &SslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.router = router - c.router2 = router2 + c.sslCertificate = sslCertificate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersPatchCall) Fields(s ...googleapi.Field) *RoutersPatchCall { +func (c *SslCertificatesDeleteCall) Fields(s ...googleapi.Field) *SslCertificatesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -31869,41 +45635,47 @@ func (c *RoutersPatchCall) Fields(s ...googleapi.Field) *RoutersPatchCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersPatchCall) Context(ctx context.Context) *RoutersPatchCall { +func (c *SslCertificatesDeleteCall) Context(ctx context.Context) *SslCertificatesDeleteCall { c.ctx_ = ctx return c } -func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SslCertificatesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "router": c.router, + "project": c.project, + "sslCertificate": c.sslCertificate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.patch" call. +// Do executes the "compute.sslCertificates.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -31934,13 +45706,12 @@ func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Updates the entire content of the Router resource. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "compute.routers.patch", + // "description": "Deletes the specified SslCertificate resource.", + // "httpMethod": "DELETE", + // "id": "compute.sslCertificates.delete", // "parameterOrder": [ // "project", - // "region", - // "router" + // "sslCertificate" // ], // "parameters": { // "project": { @@ -31950,25 +45721,15 @@ func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to update.", + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{router}", - // "request": { - // "$ref": "Router" - // }, + // "path": "{project}/global/sslCertificates/{sslCertificate}", // "response": { // "$ref": "Operation" // }, @@ -31980,221 +45741,92 @@ func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.routers.preview": +// method id "compute.sslCertificates.get": -type RoutersPreviewCall struct { - s *Service - project string - region string - router string - router2 *Router - urlParams_ gensupport.URLParams - ctx_ context.Context +type SslCertificatesGetCall struct { + s *Service + project string + sslCertificate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Preview: Preview fields auto-generated during router create and -// update operations. Calling this method does NOT create or update the -// router. -func (r *RoutersService) Preview(project string, region string, router string, router2 *Router) *RoutersPreviewCall { - c := &RoutersPreviewCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified SslCertificate resource. Get a list of +// available SSL certificates by making a list() request. +func (r *SslCertificatesService) Get(project string, sslCertificate string) *SslCertificatesGetCall { + c := &SslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.router = router - c.router2 = router2 + c.sslCertificate = sslCertificate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersPreviewCall) Fields(s ...googleapi.Field) *RoutersPreviewCall { +func (c *SslCertificatesGetCall) Fields(s ...googleapi.Field) *SslCertificatesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutersPreviewCall) Context(ctx context.Context) *RoutersPreviewCall { - c.ctx_ = ctx - return c -} - -func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/preview") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "router": c.router, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routers.preview" call. -// Exactly one of *RoutersPreviewResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RoutersPreviewResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &RoutersPreviewResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", - // "httpMethod": "POST", - // "id": "compute.routers.preview", - // "parameterOrder": [ - // "project", - // "region", - // "router" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to query.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/routers/{router}/preview", - // "request": { - // "$ref": "Router" - // }, - // "response": { - // "$ref": "RoutersPreviewResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.routers.update": - -type RoutersUpdateCall struct { - s *Service - project string - region string - router string - router2 *Router - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Update: Updates the entire content of the Router resource. -func (r *RoutersService) Update(project string, region string, router string, router2 *Router) *RoutersUpdateCall { - c := &RoutersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.router = router - c.router2 = router2 - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutersUpdateCall) Fields(s ...googleapi.Field) *RoutersUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslCertificatesGetCall) IfNoneMatch(entityTag string) *SslCertificatesGetCall { + c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersUpdateCall) Context(ctx context.Context) *RoutersUpdateCall { +func (c *SslCertificatesGetCall) Context(ctx context.Context) *SslCertificatesGetCall { c.ctx_ = ctx return c } -func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SslCertificatesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "router": c.router, + "project": c.project, + "sslCertificate": c.sslCertificate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.sslCertificates.get" call. +// Exactly one of *SslCertificate or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *SslCertificate.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertificate, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32213,7 +45845,7 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &SslCertificate{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -32225,13 +45857,12 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Updates the entire content of the Router resource.", - // "httpMethod": "PUT", - // "id": "compute.routers.update", + // "description": "Returns the specified SslCertificate resource. Get a list of available SSL certificates by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.sslCertificates.get", // "parameterOrder": [ // "project", - // "region", - // "router" + // "sslCertificate" // ], // "parameters": { // "project": { @@ -32241,59 +45872,51 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to update.", + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{router}", - // "request": { - // "$ref": "Router" - // }, + // "path": "{project}/global/sslCertificates/{sslCertificate}", // "response": { - // "$ref": "Operation" + // "$ref": "SslCertificate" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.routes.delete": +// method id "compute.sslCertificates.insert": -type RoutesDeleteCall struct { - s *Service - project string - route string - urlParams_ gensupport.URLParams - ctx_ context.Context +type SslCertificatesInsertCall struct { + s *Service + project string + sslcertificate *SslCertificate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Route resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/delete -func (r *RoutesService) Delete(project string, route string) *RoutesDeleteCall { - c := &RoutesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a SslCertificate resource in the specified project +// using the data included in the request. +func (r *SslCertificatesService) Insert(project string, sslcertificate *SslCertificate) *SslCertificatesInsertCall { + c := &SslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.route = route + c.sslcertificate = sslcertificate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutesDeleteCall) Fields(s ...googleapi.Field) *RoutesDeleteCall { +func (c *SslCertificatesInsertCall) Fields(s ...googleapi.Field) *SslCertificatesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -32301,35 +45924,51 @@ func (c *RoutesDeleteCall) Fields(s ...googleapi.Field) *RoutesDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutesDeleteCall) Context(ctx context.Context) *RoutesDeleteCall { +func (c *SslCertificatesInsertCall) Context(ctx context.Context) *SslCertificatesInsertCall { c.ctx_ = ctx return c } -func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SslCertificatesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "route": c.route, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routes.delete" call. +// Do executes the "compute.sslCertificates.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32360,12 +45999,11 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified Route resource.", - // "httpMethod": "DELETE", - // "id": "compute.routes.delete", + // "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.sslCertificates.insert", // "parameterOrder": [ - // "project", - // "route" + // "project" // ], // "parameters": { // "project": { @@ -32374,16 +46012,12 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "route": { - // "description": "Name of the Route resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/global/routes/{route}", + // "path": "{project}/global/sslCertificates", + // "request": { + // "$ref": "SslCertificate" + // }, // "response": { // "$ref": "Operation" // }, @@ -32395,31 +46029,96 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.routes.get": +// method id "compute.sslCertificates.list": -type RoutesGetCall struct { +type SslCertificatesListCall struct { s *Service project string - route string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Route resource. Get a list of available -// routes by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/get -func (r *RoutesService) Get(project string, route string) *RoutesGetCall { - c := &RoutesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of SslCertificate resources available to the +// specified project. +func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { + c := &SslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.route = route + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *SslCertificatesListCall) Filter(filter string) *SslCertificatesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *SslCertificatesListCall) MaxResults(maxResults int64) *SslCertificatesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *SslCertificatesListCall) OrderBy(orderBy string) *SslCertificatesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SslCertificatesListCall) PageToken(pageToken string) *SslCertificatesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutesGetCall) Fields(s ...googleapi.Field) *RoutesGetCall { +func (c *SslCertificatesListCall) Fields(s ...googleapi.Field) *SslCertificatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -32429,7 +46128,7 @@ func (c *RoutesGetCall) Fields(s ...googleapi.Field) *RoutesGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RoutesGetCall) IfNoneMatch(entityTag string) *RoutesGetCall { +func (c *SslCertificatesListCall) IfNoneMatch(entityTag string) *SslCertificatesListCall { c.ifNoneMatch_ = entityTag return c } @@ -32437,38 +46136,49 @@ func (c *RoutesGetCall) IfNoneMatch(entityTag string) *RoutesGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutesGetCall) Context(ctx context.Context) *RoutesGetCall { +func (c *SslCertificatesListCall) Context(ctx context.Context) *SslCertificatesListCall { c.ctx_ = ctx return c } -func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SslCertificatesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "route": c.route, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routes.get" call. -// Exactly one of *Route or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Route.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { +// Do executes the "compute.sslCertificates.list" call. +// Exactly one of *SslCertificateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SslCertificateList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertificateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32487,7 +46197,7 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Route{ + ret := &SslCertificateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -32499,32 +46209,48 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { } return ret, nil // { - // "description": "Returns the specified Route resource. Get a list of available routes by making a list() request.", + // "description": "Retrieves the list of SslCertificate resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.routes.get", + // "id": "compute.sslCertificates.list", // "parameterOrder": [ - // "project", - // "route" + // "project" // ], // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "route": { - // "description": "Name of the Route resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/global/routes/{route}", + // "path": "{project}/global/sslCertificates", // "response": { - // "$ref": "Route" + // "$ref": "SslCertificateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -32535,55 +46261,161 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { } -// method id "compute.routes.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertificateList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RoutesInsertCall struct { - s *Service - project string - route *Route - urlParams_ gensupport.URLParams - ctx_ context.Context +// method id "compute.subnetworks.aggregatedList": + +type SubnetworksAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a Route resource in the specified project using the -// data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/insert -func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall { - c := &RoutesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of subnetworks. +func (r *SubnetworksService) AggregatedList(project string) *SubnetworksAggregatedListCall { + c := &SubnetworksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.route = route + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *SubnetworksAggregatedListCall) Filter(filter string) *SubnetworksAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *SubnetworksAggregatedListCall) MaxResults(maxResults int64) *SubnetworksAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *SubnetworksAggregatedListCall) OrderBy(orderBy string) *SubnetworksAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SubnetworksAggregatedListCall) PageToken(pageToken string) *SubnetworksAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutesInsertCall) Fields(s ...googleapi.Field) *RoutesInsertCall { +func (c *SubnetworksAggregatedListCall) Fields(s ...googleapi.Field) *SubnetworksAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SubnetworksAggregatedListCall) IfNoneMatch(entityTag string) *SubnetworksAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutesInsertCall) Context(ctx context.Context) *RoutesInsertCall { +func (c *SubnetworksAggregatedListCall) Context(ctx context.Context) *SubnetworksAggregatedListCall { c.ctx_ = ctx return c } -func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SubnetworksAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SubnetworksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.route) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/subnetworks") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -32591,14 +46423,14 @@ func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routes.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.subnetworks.aggregatedList" call. +// Exactly one of *SubnetworkAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *SubnetworkAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*SubnetworkAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32617,7 +46449,7 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &SubnetworkAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -32629,13 +46461,37 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates a Route resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.routes.insert", + // "description": "Retrieves an aggregated list of subnetworks.", + // "httpMethod": "GET", + // "id": "compute.subnetworks.aggregatedList", // "parameterOrder": [ // "project" // ], // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -32644,142 +46500,114 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "type": "string" // } // }, - // "path": "{project}/global/routes", - // "request": { - // "$ref": "Route" - // }, + // "path": "{project}/aggregated/subnetworks", // "response": { - // "$ref": "Operation" + // "$ref": "SubnetworkAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.routes.list": - -type RoutesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context -} - -// List: Retrieves the list of Route resources available to the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/list -func (r *RoutesService) List(project string) *RoutesListCall { - c := &RoutesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SubnetworksAggregatedListCall) Pages(ctx context.Context, f func(*SubnetworkAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RoutesListCall) Filter(filter string) *RoutesListCall { - c.urlParams_.Set("filter", filter) - return c -} +// method id "compute.subnetworks.delete": -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. -func (c *RoutesListCall) MaxResults(maxResults int64) *RoutesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c +type SubnetworksDeleteCall struct { + s *Service + project string + region string + subnetwork string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall { - c.urlParams_.Set("pageToken", pageToken) +// Delete: Deletes the specified subnetwork. +func (r *SubnetworksService) Delete(project string, region string, subnetwork string) *SubnetworksDeleteCall { + c := &SubnetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.subnetwork = subnetwork return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutesListCall) Fields(s ...googleapi.Field) *RoutesListCall { +func (c *SubnetworksDeleteCall) Fields(s ...googleapi.Field) *SubnetworksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RoutesListCall) IfNoneMatch(entityTag string) *RoutesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutesListCall) Context(ctx context.Context) *RoutesListCall { +func (c *SubnetworksDeleteCall) Context(ctx context.Context) *SubnetworksDeleteCall { c.ctx_ = ctx return c } -func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SubnetworksDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SubnetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "subnetwork": c.subnetwork, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routes.list" call. -// Exactly one of *RouteList or error will be non-nil. Any non-2xx +// Do executes the "compute.subnetworks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *RouteList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { +func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32798,7 +46626,7 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RouteList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -32810,103 +46638,77 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { } return ret, nil // { - // "description": "Retrieves the list of Route resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.routes.list", + // "description": "Deletes the specified subnetwork.", + // "httpMethod": "DELETE", + // "id": "compute.subnetworks.delete", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "subnetwork" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", - // "format": "uint32", - // "location": "query", - // "maximum": "500", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "subnetwork": { + // "description": "Name of the Subnetwork resource to delete.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/routes", + // "path": "{project}/regions/{region}/subnetworks/{subnetwork}", // "response": { - // "$ref": "RouteList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RoutesListCall) Pages(ctx context.Context, f func(*RouteList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.snapshots.delete": +// method id "compute.subnetworks.expandIpCidrRange": -type SnapshotsDeleteCall struct { - s *Service - project string - snapshot string - urlParams_ gensupport.URLParams - ctx_ context.Context +type SubnetworksExpandIpCidrRangeCall struct { + s *Service + project string + region string + subnetwork string + subnetworksexpandipcidrrangerequest *SubnetworksExpandIpCidrRangeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Snapshot resource. Keep in mind that -// deleting a single snapshot might not necessarily delete all the data -// on that snapshot. If any data on the snapshot that is marked for -// deletion is needed for subsequent snapshots, the data will be moved -// to the next corresponding snapshot. -// -// For more information, see Deleting snaphots. -// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/delete -func (r *SnapshotsService) Delete(project string, snapshot string) *SnapshotsDeleteCall { - c := &SnapshotsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ExpandIpCidrRange: Expands the IP CIDR range of the subnetwork to a +// specified value. +func (r *SubnetworksService) ExpandIpCidrRange(project string, region string, subnetwork string, subnetworksexpandipcidrrangerequest *SubnetworksExpandIpCidrRangeRequest) *SubnetworksExpandIpCidrRangeCall { + c := &SubnetworksExpandIpCidrRangeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.snapshot = snapshot + c.region = region + c.subnetwork = subnetwork + c.subnetworksexpandipcidrrangerequest = subnetworksexpandipcidrrangerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SnapshotsDeleteCall) Fields(s ...googleapi.Field) *SnapshotsDeleteCall { +func (c *SubnetworksExpandIpCidrRangeCall) Fields(s ...googleapi.Field) *SubnetworksExpandIpCidrRangeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -32914,35 +46716,53 @@ func (c *SnapshotsDeleteCall) Fields(s ...googleapi.Field) *SnapshotsDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SnapshotsDeleteCall) Context(ctx context.Context) *SnapshotsDeleteCall { +func (c *SubnetworksExpandIpCidrRangeCall) Context(ctx context.Context) *SubnetworksExpandIpCidrRangeCall { c.ctx_ = ctx return c } -func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SubnetworksExpandIpCidrRangeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SubnetworksExpandIpCidrRangeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.subnetworksexpandipcidrrangerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "snapshot": c.snapshot, + "project": c.project, + "region": c.region, + "subnetwork": c.subnetwork, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.snapshots.delete" call. +// Do executes the "compute.subnetworks.expandIpCidrRange" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SubnetworksExpandIpCidrRangeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32973,12 +46793,13 @@ func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snaphots.", - // "httpMethod": "DELETE", - // "id": "compute.snapshots.delete", + // "description": "Expands the IP CIDR range of the subnetwork to a specified value.", + // "httpMethod": "POST", + // "id": "compute.subnetworks.expandIpCidrRange", // "parameterOrder": [ // "project", - // "snapshot" + // "region", + // "subnetwork" // ], // "parameters": { // "project": { @@ -32988,15 +46809,25 @@ func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "snapshot": { - // "description": "Name of the Snapshot resource to delete.", + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "subnetwork": { + // "description": "Name of the Subnetwork resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/snapshots/{snapshot}", + // "path": "{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", + // "request": { + // "$ref": "SubnetworksExpandIpCidrRangeRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -33008,31 +46839,33 @@ func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.snapshots.get": +// method id "compute.subnetworks.get": -type SnapshotsGetCall struct { +type SubnetworksGetCall struct { s *Service project string - snapshot string + region string + subnetwork string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Snapshot resource. Get a list of available -// snapshots by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/get -func (r *SnapshotsService) Get(project string, snapshot string) *SnapshotsGetCall { - c := &SnapshotsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified subnetwork. Get a list of available +// subnetworks list() request. +func (r *SubnetworksService) Get(project string, region string, subnetwork string) *SubnetworksGetCall { + c := &SubnetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.snapshot = snapshot + c.region = region + c.subnetwork = subnetwork return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SnapshotsGetCall) Fields(s ...googleapi.Field) *SnapshotsGetCall { +func (c *SubnetworksGetCall) Fields(s ...googleapi.Field) *SubnetworksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -33042,7 +46875,7 @@ func (c *SnapshotsGetCall) Fields(s ...googleapi.Field) *SnapshotsGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SnapshotsGetCall) IfNoneMatch(entityTag string) *SnapshotsGetCall { +func (c *SubnetworksGetCall) IfNoneMatch(entityTag string) *SubnetworksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -33050,38 +46883,51 @@ func (c *SnapshotsGetCall) IfNoneMatch(entityTag string) *SnapshotsGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SnapshotsGetCall) Context(ctx context.Context) *SnapshotsGetCall { +func (c *SubnetworksGetCall) Context(ctx context.Context) *SubnetworksGetCall { c.ctx_ = ctx return c } -func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SubnetworksGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SubnetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "snapshot": c.snapshot, + "project": c.project, + "region": c.region, + "subnetwork": c.subnetwork, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.snapshots.get" call. -// Exactly one of *Snapshot or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Snapshot.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.subnetworks.get" call. +// Exactly one of *Subnetwork or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Subnetwork.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { +func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33100,7 +46946,7 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Snapshot{ + ret := &Subnetwork{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -33112,12 +46958,13 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { } return ret, nil // { - // "description": "Returns the specified Snapshot resource. Get a list of available snapshots by making a list() request.", + // "description": "Returns the specified subnetwork. Get a list of available subnetworks list() request.", // "httpMethod": "GET", - // "id": "compute.snapshots.get", + // "id": "compute.subnetworks.get", // "parameterOrder": [ // "project", - // "snapshot" + // "region", + // "subnetwork" // ], // "parameters": { // "project": { @@ -33127,17 +46974,24 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { // "required": true, // "type": "string" // }, - // "snapshot": { - // "description": "Name of the Snapshot resource to return.", + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "subnetwork": { + // "description": "Name of the Subnetwork resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/snapshots/{snapshot}", + // "path": "{project}/regions/{region}/subnetworks/{subnetwork}", // "response": { - // "$ref": "Snapshot" + // "$ref": "Subnetwork" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -33148,22 +47002,171 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { } -// method id "compute.snapshots.list": +// method id "compute.subnetworks.insert": -type SnapshotsListCall struct { +type SubnetworksInsertCall struct { + s *Service + project string + region string + subnetwork *Subnetwork + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a subnetwork in the specified project using the data +// included in the request. +func (r *SubnetworksService) Insert(project string, region string, subnetwork *Subnetwork) *SubnetworksInsertCall { + c := &SubnetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.subnetwork = subnetwork + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SubnetworksInsertCall) Fields(s ...googleapi.Field) *SubnetworksInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SubnetworksInsertCall) Context(ctx context.Context) *SubnetworksInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SubnetworksInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SubnetworksInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.subnetwork) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.subnetworks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a subnetwork in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.subnetworks.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/subnetworks", + // "request": { + // "$ref": "Subnetwork" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.subnetworks.list": + +type SubnetworksListCall struct { s *Service project string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of Snapshot resources contained within the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/list -func (r *SnapshotsService) List(project string) *SnapshotsListCall { - c := &SnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of subnetworks available to the specified +// project. +func (r *SubnetworksService) List(project string, region string) *SubnetworksListCall { + c := &SubnetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region return c } @@ -33194,7 +47197,7 @@ func (r *SnapshotsService) List(project string) *SnapshotsListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall { +func (c *SubnetworksListCall) Filter(filter string) *SubnetworksListCall { c.urlParams_.Set("filter", filter) return c } @@ -33204,15 +47207,32 @@ func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall { // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in // subsequent list requests. -func (c *SnapshotsListCall) MaxResults(maxResults int64) *SnapshotsListCall { +func (c *SubnetworksListCall) MaxResults(maxResults int64) *SubnetworksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *SubnetworksListCall) OrderBy(orderBy string) *SubnetworksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *SnapshotsListCall) PageToken(pageToken string) *SnapshotsListCall { +func (c *SubnetworksListCall) PageToken(pageToken string) *SubnetworksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -33220,7 +47240,7 @@ func (c *SnapshotsListCall) PageToken(pageToken string) *SnapshotsListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SnapshotsListCall) Fields(s ...googleapi.Field) *SnapshotsListCall { +func (c *SubnetworksListCall) Fields(s ...googleapi.Field) *SubnetworksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -33230,7 +47250,7 @@ func (c *SnapshotsListCall) Fields(s ...googleapi.Field) *SnapshotsListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SnapshotsListCall) IfNoneMatch(entityTag string) *SnapshotsListCall { +func (c *SubnetworksListCall) IfNoneMatch(entityTag string) *SubnetworksListCall { c.ifNoneMatch_ = entityTag return c } @@ -33238,37 +47258,50 @@ func (c *SnapshotsListCall) IfNoneMatch(entityTag string) *SnapshotsListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SnapshotsListCall) Context(ctx context.Context) *SnapshotsListCall { +func (c *SubnetworksListCall) Context(ctx context.Context) *SubnetworksListCall { c.ctx_ = ctx return c } -func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SubnetworksListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SubnetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.snapshots.list" call. -// Exactly one of *SnapshotList or error will be non-nil. Any non-2xx +// Do executes the "compute.subnetworks.list" call. +// Exactly one of *SubnetworkList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *SnapshotList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, error) { +// *SubnetworkList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33287,7 +47320,7 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SnapshotList{ + ret := &SubnetworkList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -33299,11 +47332,12 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err } return ret, nil // { - // "description": "Retrieves the list of Snapshot resources contained within the specified project.", + // "description": "Retrieves a list of subnetworks available to the specified project.", // "httpMethod": "GET", - // "id": "compute.snapshots.list", + // "id": "compute.subnetworks.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "filter": { @@ -33320,6 +47354,11 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -33331,11 +47370,18 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/snapshots", + // "path": "{project}/regions/{region}/subnetworks", // "response": { - // "$ref": "SnapshotList" + // "$ref": "SubnetworkList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -33349,7 +47395,7 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *SnapshotsListCall) Pages(ctx context.Context, f func(*SnapshotList) error) error { +func (c *SubnetworksListCall) Pages(ctx context.Context, f func(*SubnetworkList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -33367,28 +47413,30 @@ func (c *SnapshotsListCall) Pages(ctx context.Context, f func(*SnapshotList) err } } -// method id "compute.sslCertificates.delete": +// method id "compute.targetHttpProxies.delete": -type SslCertificatesDeleteCall struct { - s *Service - project string - sslCertificate string - urlParams_ gensupport.URLParams - ctx_ context.Context +type TargetHttpProxiesDeleteCall struct { + s *Service + project string + targetHttpProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified SslCertificate resource. -func (r *SslCertificatesService) Delete(project string, sslCertificate string) *SslCertificatesDeleteCall { - c := &SslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified TargetHttpProxy resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/delete +func (r *TargetHttpProxiesService) Delete(project string, targetHttpProxy string) *TargetHttpProxiesDeleteCall { + c := &TargetHttpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.sslCertificate = sslCertificate + c.targetHttpProxy = targetHttpProxy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslCertificatesDeleteCall) Fields(s ...googleapi.Field) *SslCertificatesDeleteCall { +func (c *TargetHttpProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetHttpProxiesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -33396,35 +47444,47 @@ func (c *SslCertificatesDeleteCall) Fields(s ...googleapi.Field) *SslCertificate // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslCertificatesDeleteCall) Context(ctx context.Context) *SslCertificatesDeleteCall { +func (c *TargetHttpProxiesDeleteCall) Context(ctx context.Context) *TargetHttpProxiesDeleteCall { c.ctx_ = ctx return c } -func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpProxiesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "sslCertificate": c.sslCertificate, + "project": c.project, + "targetHttpProxy": c.targetHttpProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslCertificates.delete" call. +// Do executes the "compute.targetHttpProxies.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33455,12 +47515,12 @@ func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified SslCertificate resource.", + // "description": "Deletes the specified TargetHttpProxy resource.", // "httpMethod": "DELETE", - // "id": "compute.sslCertificates.delete", + // "id": "compute.targetHttpProxies.delete", // "parameterOrder": [ // "project", - // "sslCertificate" + // "targetHttpProxy" // ], // "parameters": { // "project": { @@ -33470,15 +47530,15 @@ func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "sslCertificate": { - // "description": "Name of the SslCertificate resource to delete.", + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/sslCertificates/{sslCertificate}", + // "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", // "response": { // "$ref": "Operation" // }, @@ -33490,30 +47550,32 @@ func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.sslCertificates.get": +// method id "compute.targetHttpProxies.get": -type SslCertificatesGetCall struct { - s *Service - project string - sslCertificate string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type TargetHttpProxiesGetCall struct { + s *Service + project string + targetHttpProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified SslCertificate resource. Get a list of -// available SSL certificates by making a list() request. -func (r *SslCertificatesService) Get(project string, sslCertificate string) *SslCertificatesGetCall { - c := &SslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified TargetHttpProxy resource. Get a list of +// available target HTTP proxies by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/get +func (r *TargetHttpProxiesService) Get(project string, targetHttpProxy string) *TargetHttpProxiesGetCall { + c := &TargetHttpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.sslCertificate = sslCertificate + c.targetHttpProxy = targetHttpProxy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslCertificatesGetCall) Fields(s ...googleapi.Field) *SslCertificatesGetCall { +func (c *TargetHttpProxiesGetCall) Fields(s ...googleapi.Field) *TargetHttpProxiesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -33523,7 +47585,7 @@ func (c *SslCertificatesGetCall) Fields(s ...googleapi.Field) *SslCertificatesGe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SslCertificatesGetCall) IfNoneMatch(entityTag string) *SslCertificatesGetCall { +func (c *TargetHttpProxiesGetCall) IfNoneMatch(entityTag string) *TargetHttpProxiesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -33531,38 +47593,50 @@ func (c *SslCertificatesGetCall) IfNoneMatch(entityTag string) *SslCertificatesG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslCertificatesGetCall) Context(ctx context.Context) *SslCertificatesGetCall { +func (c *TargetHttpProxiesGetCall) Context(ctx context.Context) *TargetHttpProxiesGetCall { c.ctx_ = ctx return c } -func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpProxiesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "sslCertificate": c.sslCertificate, + "project": c.project, + "targetHttpProxy": c.targetHttpProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslCertificates.get" call. -// Exactly one of *SslCertificate or error will be non-nil. Any non-2xx +// Do executes the "compute.targetHttpProxies.get" call. +// Exactly one of *TargetHttpProxy or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *SslCertificate.ServerResponse.Header or (if a response was returned +// *TargetHttpProxy.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertificate, error) { +func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33581,7 +47655,7 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SslCertificate{ + ret := &TargetHttpProxy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -33593,12 +47667,12 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica } return ret, nil // { - // "description": "Returns the specified SslCertificate resource. Get a list of available SSL certificates by making a list() request.", + // "description": "Returns the specified TargetHttpProxy resource. Get a list of available target HTTP proxies by making a list() request.", // "httpMethod": "GET", - // "id": "compute.sslCertificates.get", + // "id": "compute.targetHttpProxies.get", // "parameterOrder": [ // "project", - // "sslCertificate" + // "targetHttpProxy" // ], // "parameters": { // "project": { @@ -33608,17 +47682,17 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica // "required": true, // "type": "string" // }, - // "sslCertificate": { - // "description": "Name of the SslCertificate resource to return.", + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/sslCertificates/{sslCertificate}", + // "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", // "response": { - // "$ref": "SslCertificate" + // "$ref": "TargetHttpProxy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -33629,29 +47703,31 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica } -// method id "compute.sslCertificates.insert": +// method id "compute.targetHttpProxies.insert": -type SslCertificatesInsertCall struct { - s *Service - project string - sslcertificate *SslCertificate - urlParams_ gensupport.URLParams - ctx_ context.Context +type TargetHttpProxiesInsertCall struct { + s *Service + project string + targethttpproxy *TargetHttpProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a SslCertificate resource in the specified project +// Insert: Creates a TargetHttpProxy resource in the specified project // using the data included in the request. -func (r *SslCertificatesService) Insert(project string, sslcertificate *SslCertificate) *SslCertificatesInsertCall { - c := &SslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/insert +func (r *TargetHttpProxiesService) Insert(project string, targethttpproxy *TargetHttpProxy) *TargetHttpProxiesInsertCall { + c := &TargetHttpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.sslcertificate = sslcertificate + c.targethttpproxy = targethttpproxy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslCertificatesInsertCall) Fields(s ...googleapi.Field) *SslCertificatesInsertCall { +func (c *TargetHttpProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttpProxiesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -33659,22 +47735,34 @@ func (c *SslCertificatesInsertCall) Fields(s ...googleapi.Field) *SslCertificate // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslCertificatesInsertCall) Context(ctx context.Context) *SslCertificatesInsertCall { +func (c *TargetHttpProxiesInsertCall) Context(ctx context.Context) *TargetHttpProxiesInsertCall { c.ctx_ = ctx return c } -func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpProxiesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpproxy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -33684,14 +47772,14 @@ func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslCertificates.insert" call. +// Do executes the "compute.targetHttpProxies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33722,9 +47810,9 @@ func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", + // "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.sslCertificates.insert", + // "id": "compute.targetHttpProxies.insert", // "parameterOrder": [ // "project" // ], @@ -33737,9 +47825,9 @@ func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/global/sslCertificates", + // "path": "{project}/global/targetHttpProxies", // "request": { - // "$ref": "SslCertificate" + // "$ref": "TargetHttpProxy" // }, // "response": { // "$ref": "Operation" @@ -33752,20 +47840,22 @@ func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.sslCertificates.list": +// method id "compute.targetHttpProxies.list": -type SslCertificatesListCall struct { +type TargetHttpProxiesListCall struct { s *Service project string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of SslCertificate resources available to the -// specified project. -func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { - c := &SslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of TargetHttpProxy resources available to +// the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/list +func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCall { + c := &TargetHttpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -33797,7 +47887,7 @@ func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *SslCertificatesListCall) Filter(filter string) *SslCertificatesListCall { +func (c *TargetHttpProxiesListCall) Filter(filter string) *TargetHttpProxiesListCall { c.urlParams_.Set("filter", filter) return c } @@ -33807,15 +47897,32 @@ func (c *SslCertificatesListCall) Filter(filter string) *SslCertificatesListCall // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in // subsequent list requests. -func (c *SslCertificatesListCall) MaxResults(maxResults int64) *SslCertificatesListCall { +func (c *TargetHttpProxiesListCall) MaxResults(maxResults int64) *TargetHttpProxiesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *TargetHttpProxiesListCall) OrderBy(orderBy string) *TargetHttpProxiesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *SslCertificatesListCall) PageToken(pageToken string) *SslCertificatesListCall { +func (c *TargetHttpProxiesListCall) PageToken(pageToken string) *TargetHttpProxiesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -33823,7 +47930,7 @@ func (c *SslCertificatesListCall) PageToken(pageToken string) *SslCertificatesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslCertificatesListCall) Fields(s ...googleapi.Field) *SslCertificatesListCall { +func (c *TargetHttpProxiesListCall) Fields(s ...googleapi.Field) *TargetHttpProxiesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -33833,7 +47940,7 @@ func (c *SslCertificatesListCall) Fields(s ...googleapi.Field) *SslCertificatesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SslCertificatesListCall) IfNoneMatch(entityTag string) *SslCertificatesListCall { +func (c *TargetHttpProxiesListCall) IfNoneMatch(entityTag string) *TargetHttpProxiesListCall { c.ifNoneMatch_ = entityTag return c } @@ -33841,20 +47948,32 @@ func (c *SslCertificatesListCall) IfNoneMatch(entityTag string) *SslCertificates // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslCertificatesListCall) Context(ctx context.Context) *SslCertificatesListCall { +func (c *TargetHttpProxiesListCall) Context(ctx context.Context) *TargetHttpProxiesListCall { c.ctx_ = ctx return c } -func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpProxiesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -33864,14 +47983,14 @@ func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslCertificates.list" call. -// Exactly one of *SslCertificateList or error will be non-nil. Any +// Do executes the "compute.targetHttpProxies.list" call. +// Exactly one of *TargetHttpProxyList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *SslCertificateList.ServerResponse.Header or (if a response was +// *TargetHttpProxyList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertificateList, error) { +func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33890,7 +48009,7 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SslCertificateList{ + ret := &TargetHttpProxyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -33902,9 +48021,9 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific } return ret, nil // { - // "description": "Retrieves the list of SslCertificate resources available to the specified project.", + // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.sslCertificates.list", + // "id": "compute.targetHttpProxies.list", // "parameterOrder": [ // "project" // ], @@ -33923,6 +48042,11 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -33936,9 +48060,9 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific // "type": "string" // } // }, - // "path": "{project}/global/sslCertificates", + // "path": "{project}/global/targetHttpProxies", // "response": { - // "$ref": "SslCertificateList" + // "$ref": "TargetHttpProxyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -33952,7 +48076,7 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *SslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertificateList) error) error { +func (c *TargetHttpProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpProxyList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -33970,125 +48094,85 @@ func (c *SslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertific } } -// method id "compute.subnetworks.aggregatedList": +// method id "compute.targetHttpProxies.setUrlMap": -type SubnetworksAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type TargetHttpProxiesSetUrlMapCall struct { + s *Service + project string + targetHttpProxy string + urlmapreference *UrlMapReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of subnetworks. -func (r *SubnetworksService) AggregatedList(project string) *SubnetworksAggregatedListCall { - c := &SubnetworksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetUrlMap: Changes the URL map for TargetHttpProxy. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/setUrlMap +func (r *TargetHttpProxiesService) SetUrlMap(project string, targetHttpProxy string, urlmapreference *UrlMapReference) *TargetHttpProxiesSetUrlMapCall { + c := &TargetHttpProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *SubnetworksAggregatedListCall) Filter(filter string) *SubnetworksAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. -func (c *SubnetworksAggregatedListCall) MaxResults(maxResults int64) *SubnetworksAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *SubnetworksAggregatedListCall) PageToken(pageToken string) *SubnetworksAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) + c.targetHttpProxy = targetHttpProxy + c.urlmapreference = urlmapreference return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SubnetworksAggregatedListCall) Fields(s ...googleapi.Field) *SubnetworksAggregatedListCall { +func (c *TargetHttpProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHttpProxiesSetUrlMapCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SubnetworksAggregatedListCall) IfNoneMatch(entityTag string) *SubnetworksAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SubnetworksAggregatedListCall) Context(ctx context.Context) *SubnetworksAggregatedListCall { +func (c *TargetHttpProxiesSetUrlMapCall) Context(ctx context.Context) *TargetHttpProxiesSetUrlMapCall { c.ctx_ = ctx return c } -func (c *SubnetworksAggregatedListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpProxiesSetUrlMapCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/subnetworks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "targetHttpProxy": c.targetHttpProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.subnetworks.aggregatedList" call. -// Exactly one of *SubnetworkAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *SubnetworkAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*SubnetworkAggregatedList, error) { +// Do executes the "compute.targetHttpProxies.setUrlMap" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34107,7 +48191,7 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SubnetworkAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -34119,98 +48203,67 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne } return ret, nil // { - // "description": "Retrieves an aggregated list of subnetworks.", - // "httpMethod": "GET", - // "id": "compute.subnetworks.aggregatedList", + // "description": "Changes the URL map for TargetHttpProxy.", + // "httpMethod": "POST", + // "id": "compute.targetHttpProxies.setUrlMap", // "parameterOrder": [ - // "project" + // "project", + // "targetHttpProxy" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", - // "format": "uint32", - // "location": "query", - // "maximum": "500", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy to set a URL map for.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/subnetworks", + // "path": "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + // "request": { + // "$ref": "UrlMapReference" + // }, // "response": { - // "$ref": "SubnetworkAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *SubnetworksAggregatedListCall) Pages(ctx context.Context, f func(*SubnetworkAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.subnetworks.delete": +// method id "compute.targetHttpsProxies.delete": -type SubnetworksDeleteCall struct { - s *Service - project string - region string - subnetwork string - urlParams_ gensupport.URLParams - ctx_ context.Context +type TargetHttpsProxiesDeleteCall struct { + s *Service + project string + targetHttpsProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified subnetwork. -func (r *SubnetworksService) Delete(project string, region string, subnetwork string) *SubnetworksDeleteCall { - c := &SubnetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified TargetHttpsProxy resource. +func (r *TargetHttpsProxiesService) Delete(project string, targetHttpsProxy string) *TargetHttpsProxiesDeleteCall { + c := &TargetHttpsProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.subnetwork = subnetwork + c.targetHttpsProxy = targetHttpsProxy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SubnetworksDeleteCall) Fields(s ...googleapi.Field) *SubnetworksDeleteCall { +func (c *TargetHttpsProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -34218,36 +48271,47 @@ func (c *SubnetworksDeleteCall) Fields(s ...googleapi.Field) *SubnetworksDeleteC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SubnetworksDeleteCall) Context(ctx context.Context) *SubnetworksDeleteCall { +func (c *TargetHttpsProxiesDeleteCall) Context(ctx context.Context) *TargetHttpsProxiesDeleteCall { c.ctx_ = ctx return c } -func (c *SubnetworksDeleteCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpsProxiesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "subnetwork": c.subnetwork, + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.subnetworks.delete" call. +// Do executes the "compute.targetHttpsProxies.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34278,13 +48342,12 @@ func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified subnetwork.", + // "description": "Deletes the specified TargetHttpsProxy resource.", // "httpMethod": "DELETE", - // "id": "compute.subnetworks.delete", + // "id": "compute.targetHttpsProxies.delete", // "parameterOrder": [ // "project", - // "region", - // "subnetwork" + // "targetHttpsProxy" // ], // "parameters": { // "project": { @@ -34294,22 +48357,15 @@ func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "subnetwork": { - // "description": "Name of the Subnetwork resource to delete.", + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/subnetworks/{subnetwork}", + // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", // "response": { // "$ref": "Operation" // }, @@ -34321,32 +48377,31 @@ func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.subnetworks.get": +// method id "compute.targetHttpsProxies.get": -type SubnetworksGetCall struct { - s *Service - project string - region string - subnetwork string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type TargetHttpsProxiesGetCall struct { + s *Service + project string + targetHttpsProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified subnetwork. Get a list of available -// subnetworks list() request. -func (r *SubnetworksService) Get(project string, region string, subnetwork string) *SubnetworksGetCall { - c := &SubnetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified TargetHttpsProxy resource. Get a list of +// available target HTTPS proxies by making a list() request. +func (r *TargetHttpsProxiesService) Get(project string, targetHttpsProxy string) *TargetHttpsProxiesGetCall { + c := &TargetHttpsProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.subnetwork = subnetwork + c.targetHttpsProxy = targetHttpsProxy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SubnetworksGetCall) Fields(s ...googleapi.Field) *SubnetworksGetCall { +func (c *TargetHttpsProxiesGetCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -34356,7 +48411,7 @@ func (c *SubnetworksGetCall) Fields(s ...googleapi.Field) *SubnetworksGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SubnetworksGetCall) IfNoneMatch(entityTag string) *SubnetworksGetCall { +func (c *TargetHttpsProxiesGetCall) IfNoneMatch(entityTag string) *TargetHttpsProxiesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -34364,39 +48419,50 @@ func (c *SubnetworksGetCall) IfNoneMatch(entityTag string) *SubnetworksGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SubnetworksGetCall) Context(ctx context.Context) *SubnetworksGetCall { +func (c *TargetHttpsProxiesGetCall) Context(ctx context.Context) *TargetHttpsProxiesGetCall { c.ctx_ = ctx return c } -func (c *SubnetworksGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpsProxiesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "subnetwork": c.subnetwork, + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.subnetworks.get" call. -// Exactly one of *Subnetwork or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Subnetwork.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, error) { +// Do executes the "compute.targetHttpsProxies.get" call. +// Exactly one of *TargetHttpsProxy or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetHttpsProxy.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34415,7 +48481,7 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Subnetwork{ + ret := &TargetHttpsProxy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -34427,13 +48493,12 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro } return ret, nil // { - // "description": "Returns the specified subnetwork. Get a list of available subnetworks list() request.", + // "description": "Returns the specified TargetHttpsProxy resource. Get a list of available target HTTPS proxies by making a list() request.", // "httpMethod": "GET", - // "id": "compute.subnetworks.get", + // "id": "compute.targetHttpsProxies.get", // "parameterOrder": [ // "project", - // "region", - // "subnetwork" + // "targetHttpsProxy" // ], // "parameters": { // "project": { @@ -34443,24 +48508,17 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "subnetwork": { - // "description": "Name of the Subnetwork resource to return.", + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/subnetworks/{subnetwork}", + // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", // "response": { - // "$ref": "Subnetwork" + // "$ref": "TargetHttpsProxy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -34471,31 +48529,30 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro } -// method id "compute.subnetworks.insert": +// method id "compute.targetHttpsProxies.insert": -type SubnetworksInsertCall struct { - s *Service - project string - region string - subnetwork *Subnetwork - urlParams_ gensupport.URLParams - ctx_ context.Context +type TargetHttpsProxiesInsertCall struct { + s *Service + project string + targethttpsproxy *TargetHttpsProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a subnetwork in the specified project using the data -// included in the request. -func (r *SubnetworksService) Insert(project string, region string, subnetwork *Subnetwork) *SubnetworksInsertCall { - c := &SubnetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a TargetHttpsProxy resource in the specified project +// using the data included in the request. +func (r *TargetHttpsProxiesService) Insert(project string, targethttpsproxy *TargetHttpsProxy) *TargetHttpsProxiesInsertCall { + c := &TargetHttpsProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.subnetwork = subnetwork + c.targethttpsproxy = targethttpsproxy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SubnetworksInsertCall) Fields(s ...googleapi.Field) *SubnetworksInsertCall { +func (c *TargetHttpsProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -34503,40 +48560,51 @@ func (c *SubnetworksInsertCall) Fields(s ...googleapi.Field) *SubnetworksInsertC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SubnetworksInsertCall) Context(ctx context.Context) *SubnetworksInsertCall { +func (c *TargetHttpsProxiesInsertCall) Context(ctx context.Context) *TargetHttpsProxiesInsertCall { c.ctx_ = ctx return c } -func (c *SubnetworksInsertCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpsProxiesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.subnetwork) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.subnetworks.insert" call. +// Do executes the "compute.targetHttpsProxies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34567,12 +48635,11 @@ func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates a subnetwork in the specified project using the data included in the request.", + // "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.subnetworks.insert", + // "id": "compute.targetHttpsProxies.insert", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "project": { @@ -34581,18 +48648,11 @@ func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/subnetworks", + // "path": "{project}/global/targetHttpsProxies", // "request": { - // "$ref": "Subnetwork" + // "$ref": "TargetHttpsProxy" // }, // "response": { // "$ref": "Operation" @@ -34605,23 +48665,22 @@ func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.subnetworks.list": +// method id "compute.targetHttpsProxies.list": -type SubnetworksListCall struct { +type TargetHttpsProxiesListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of subnetworks available to the specified -// project. -func (r *SubnetworksService) List(project string, region string) *SubnetworksListCall { - c := &SubnetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of TargetHttpsProxy resources available to +// the specified project. +func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesListCall { + c := &TargetHttpsProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -34652,7 +48711,7 @@ func (r *SubnetworksService) List(project string, region string) *SubnetworksLis // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *SubnetworksListCall) Filter(filter string) *SubnetworksListCall { +func (c *TargetHttpsProxiesListCall) Filter(filter string) *TargetHttpsProxiesListCall { c.urlParams_.Set("filter", filter) return c } @@ -34662,15 +48721,32 @@ func (c *SubnetworksListCall) Filter(filter string) *SubnetworksListCall { // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in // subsequent list requests. -func (c *SubnetworksListCall) MaxResults(maxResults int64) *SubnetworksListCall { +func (c *TargetHttpsProxiesListCall) MaxResults(maxResults int64) *TargetHttpsProxiesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *TargetHttpsProxiesListCall) OrderBy(orderBy string) *TargetHttpsProxiesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *SubnetworksListCall) PageToken(pageToken string) *SubnetworksListCall { +func (c *TargetHttpsProxiesListCall) PageToken(pageToken string) *TargetHttpsProxiesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -34678,221 +48754,67 @@ func (c *SubnetworksListCall) PageToken(pageToken string) *SubnetworksListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SubnetworksListCall) Fields(s ...googleapi.Field) *SubnetworksListCall { +func (c *TargetHttpsProxiesListCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SubnetworksListCall) IfNoneMatch(entityTag string) *SubnetworksListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SubnetworksListCall) Context(ctx context.Context) *SubnetworksListCall { - c.ctx_ = ctx - return c -} - -func (c *SubnetworksListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.subnetworks.list" call. -// Exactly one of *SubnetworkList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *SubnetworkList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &SubnetworkList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of subnetworks available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.subnetworks.list", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", - // "format": "uint32", - // "location": "query", - // "maximum": "500", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/subnetworks", - // "response": { - // "$ref": "SubnetworkList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *SubnetworksListCall) Pages(ctx context.Context, f func(*SubnetworkList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.targetHttpProxies.delete": - -type TargetHttpProxiesDeleteCall struct { - s *Service - project string - targetHttpProxy string - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Delete: Deletes the specified TargetHttpProxy resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/delete -func (r *TargetHttpProxiesService) Delete(project string, targetHttpProxy string) *TargetHttpProxiesDeleteCall { - c := &TargetHttpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetHttpProxy = targetHttpProxy - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetHttpProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetHttpProxiesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetHttpsProxiesListCall) IfNoneMatch(entityTag string) *TargetHttpsProxiesListCall { + c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpProxiesDeleteCall) Context(ctx context.Context) *TargetHttpProxiesDeleteCall { +func (c *TargetHttpsProxiesListCall) Context(ctx context.Context) *TargetHttpsProxiesListCall { c.ctx_ = ctx return c } -func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpsProxiesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpProxy": c.targetHttpProxy, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpProxies.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.targetHttpsProxies.list" call. +// Exactly one of *TargetHttpsProxyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetHttpsProxyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34911,7 +48833,7 @@ func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TargetHttpsProxyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -34923,115 +48845,157 @@ func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified TargetHttpProxy resource.", - // "httpMethod": "DELETE", - // "id": "compute.targetHttpProxies.delete", + // "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.targetHttpsProxies.list", // "parameterOrder": [ - // "project", - // "targetHttpProxy" + // "project" // ], // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "targetHttpProxy": { - // "description": "Name of the TargetHttpProxy resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + // "path": "{project}/global/targetHttpsProxies", // "response": { - // "$ref": "Operation" + // "$ref": "TargetHttpsProxyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.targetHttpProxies.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetHttpsProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpsProxyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type TargetHttpProxiesGetCall struct { - s *Service - project string - targetHttpProxy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +// method id "compute.targetHttpsProxies.setSslCertificates": + +type TargetHttpsProxiesSetSslCertificatesCall struct { + s *Service + project string + targetHttpsProxy string + targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified TargetHttpProxy resource. Get a list of -// available target HTTP proxies by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/get -func (r *TargetHttpProxiesService) Get(project string, targetHttpProxy string) *TargetHttpProxiesGetCall { - c := &TargetHttpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. +func (r *TargetHttpsProxiesService) SetSslCertificates(project string, targetHttpsProxy string, targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest) *TargetHttpsProxiesSetSslCertificatesCall { + c := &TargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetHttpProxy = targetHttpProxy + c.targetHttpsProxy = targetHttpsProxy + c.targethttpsproxiessetsslcertificatesrequest = targethttpsproxiessetsslcertificatesrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpProxiesGetCall) Fields(s ...googleapi.Field) *TargetHttpProxiesGetCall { +func (c *TargetHttpsProxiesSetSslCertificatesCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetSslCertificatesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetHttpProxiesGetCall) IfNoneMatch(entityTag string) *TargetHttpProxiesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpProxiesGetCall) Context(ctx context.Context) *TargetHttpProxiesGetCall { +func (c *TargetHttpsProxiesSetSslCertificatesCall) Context(ctx context.Context) *TargetHttpsProxiesSetSslCertificatesCall { c.ctx_ = ctx return c } -func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxiessetsslcertificatesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpProxy": c.targetHttpProxy, + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpProxies.get" call. -// Exactly one of *TargetHttpProxy or error will be non-nil. Any non-2xx +// Do executes the "compute.targetHttpsProxies.setSslCertificates" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *TargetHttpProxy.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxy, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35050,7 +49014,7 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetHttpProxy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -35062,12 +49026,12 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp } return ret, nil // { - // "description": "Returns the specified TargetHttpProxy resource. Get a list of available target HTTP proxies by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.targetHttpProxies.get", + // "description": "Replaces SslCertificates for TargetHttpsProxy.", + // "httpMethod": "POST", + // "id": "compute.targetHttpsProxies.setSslCertificates", // "parameterOrder": [ // "project", - // "targetHttpProxy" + // "targetHttpsProxy" // ], // "parameters": { // "project": { @@ -35077,51 +49041,54 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp // "required": true, // "type": "string" // }, - // "targetHttpProxy": { - // "description": "Name of the TargetHttpProxy resource to return.", + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + // "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + // "request": { + // "$ref": "TargetHttpsProxiesSetSslCertificatesRequest" + // }, // "response": { - // "$ref": "TargetHttpProxy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.targetHttpProxies.insert": +// method id "compute.targetHttpsProxies.setUrlMap": -type TargetHttpProxiesInsertCall struct { - s *Service - project string - targethttpproxy *TargetHttpProxy - urlParams_ gensupport.URLParams - ctx_ context.Context +type TargetHttpsProxiesSetUrlMapCall struct { + s *Service + project string + targetHttpsProxy string + urlmapreference *UrlMapReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a TargetHttpProxy resource in the specified project -// using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/insert -func (r *TargetHttpProxiesService) Insert(project string, targethttpproxy *TargetHttpProxy) *TargetHttpProxiesInsertCall { - c := &TargetHttpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetUrlMap: Changes the URL map for TargetHttpsProxy. +func (r *TargetHttpsProxiesService) SetUrlMap(project string, targetHttpsProxy string, urlmapreference *UrlMapReference) *TargetHttpsProxiesSetUrlMapCall { + c := &TargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targethttpproxy = targethttpproxy + c.targetHttpsProxy = targetHttpsProxy + c.urlmapreference = urlmapreference return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttpProxiesInsertCall { +func (c *TargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetUrlMapCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35129,39 +49096,52 @@ func (c *TargetHttpProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttpPr // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpProxiesInsertCall) Context(ctx context.Context) *TargetHttpProxiesInsertCall { +func (c *TargetHttpsProxiesSetUrlMapCall) Context(ctx context.Context) *TargetHttpsProxiesSetUrlMapCall { c.ctx_ = ctx return c } -func (c *TargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpsProxiesSetUrlMapCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpproxy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpProxies.insert" call. +// Do executes the "compute.targetHttpsProxies.setUrlMap" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35192,11 +49172,12 @@ func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", + // "description": "Changes the URL map for TargetHttpsProxy.", // "httpMethod": "POST", - // "id": "compute.targetHttpProxies.insert", + // "id": "compute.targetHttpsProxies.setUrlMap", // "parameterOrder": [ - // "project" + // "project", + // "targetHttpsProxy" // ], // "parameters": { // "project": { @@ -35205,11 +49186,18 @@ func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/targetHttpProxies", + // "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", // "request": { - // "$ref": "TargetHttpProxy" + // "$ref": "UrlMapReference" // }, // "response": { // "$ref": "Operation" @@ -35222,21 +49210,21 @@ func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.targetHttpProxies.list": +// method id "compute.targetInstances.aggregatedList": -type TargetHttpProxiesListCall struct { +type TargetInstancesAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of TargetHttpProxy resources available to -// the specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/list -func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCall { - c := &TargetHttpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of target instances. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/aggregatedList +func (r *TargetInstancesService) AggregatedList(project string) *TargetInstancesAggregatedListCall { + c := &TargetInstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -35268,7 +49256,7 @@ func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCa // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *TargetHttpProxiesListCall) Filter(filter string) *TargetHttpProxiesListCall { +func (c *TargetInstancesAggregatedListCall) Filter(filter string) *TargetInstancesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -35278,15 +49266,32 @@ func (c *TargetHttpProxiesListCall) Filter(filter string) *TargetHttpProxiesList // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in // subsequent list requests. -func (c *TargetHttpProxiesListCall) MaxResults(maxResults int64) *TargetHttpProxiesListCall { +func (c *TargetInstancesAggregatedListCall) MaxResults(maxResults int64) *TargetInstancesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *TargetInstancesAggregatedListCall) OrderBy(orderBy string) *TargetInstancesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *TargetHttpProxiesListCall) PageToken(pageToken string) *TargetHttpProxiesListCall { +func (c *TargetInstancesAggregatedListCall) PageToken(pageToken string) *TargetInstancesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -35294,7 +49299,7 @@ func (c *TargetHttpProxiesListCall) PageToken(pageToken string) *TargetHttpProxi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpProxiesListCall) Fields(s ...googleapi.Field) *TargetHttpProxiesListCall { +func (c *TargetInstancesAggregatedListCall) Fields(s ...googleapi.Field) *TargetInstancesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35304,7 +49309,7 @@ func (c *TargetHttpProxiesListCall) Fields(s ...googleapi.Field) *TargetHttpProx // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetHttpProxiesListCall) IfNoneMatch(entityTag string) *TargetHttpProxiesListCall { +func (c *TargetInstancesAggregatedListCall) IfNoneMatch(entityTag string) *TargetInstancesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -35312,20 +49317,32 @@ func (c *TargetHttpProxiesListCall) IfNoneMatch(entityTag string) *TargetHttpPro // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpProxiesListCall) Context(ctx context.Context) *TargetHttpProxiesListCall { +func (c *TargetInstancesAggregatedListCall) Context(ctx context.Context) *TargetInstancesAggregatedListCall { c.ctx_ = ctx return c } -func (c *TargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetInstancesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -35335,14 +49352,14 @@ func (c *TargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpProxies.list" call. -// Exactly one of *TargetHttpProxyList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetHttpProxyList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.targetInstances.aggregatedList" call. +// Exactly one of *TargetInstanceAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *TargetInstanceAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxyList, error) { +func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetInstanceAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35361,7 +49378,7 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetHttpProxyList{ + ret := &TargetInstanceAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -35373,9 +49390,9 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt } return ret, nil // { - // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", + // "description": "Retrieves an aggregated list of target instances.", // "httpMethod": "GET", - // "id": "compute.targetHttpProxies.list", + // "id": "compute.targetInstances.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -35394,6 +49411,11 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -35407,9 +49429,9 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt // "type": "string" // } // }, - // "path": "{project}/global/targetHttpProxies", + // "path": "{project}/aggregated/targetInstances", // "response": { - // "$ref": "TargetHttpProxyList" + // "$ref": "TargetInstanceAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -35423,7 +49445,7 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *TargetHttpProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpProxyList) error) error { +func (c *TargetInstancesAggregatedListCall) Pages(ctx context.Context, f func(*TargetInstanceAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -35441,31 +49463,32 @@ func (c *TargetHttpProxiesListCall) Pages(ctx context.Context, f func(*TargetHtt } } -// method id "compute.targetHttpProxies.setUrlMap": +// method id "compute.targetInstances.delete": -type TargetHttpProxiesSetUrlMapCall struct { - s *Service - project string - targetHttpProxy string - urlmapreference *UrlMapReference - urlParams_ gensupport.URLParams - ctx_ context.Context +type TargetInstancesDeleteCall struct { + s *Service + project string + zone string + targetInstance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetUrlMap: Changes the URL map for TargetHttpProxy. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/setUrlMap -func (r *TargetHttpProxiesService) SetUrlMap(project string, targetHttpProxy string, urlmapreference *UrlMapReference) *TargetHttpProxiesSetUrlMapCall { - c := &TargetHttpProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified TargetInstance resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/delete +func (r *TargetInstancesService) Delete(project string, zone string, targetInstance string) *TargetInstancesDeleteCall { + c := &TargetInstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetHttpProxy = targetHttpProxy - c.urlmapreference = urlmapreference + c.zone = zone + c.targetInstance = targetInstance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHttpProxiesSetUrlMapCall { +func (c *TargetInstancesDeleteCall) Fields(s ...googleapi.Field) *TargetInstancesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35473,166 +49496,48 @@ func (c *TargetHttpProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHtt // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpProxiesSetUrlMapCall) Context(ctx context.Context) *TargetHttpProxiesSetUrlMapCall { +func (c *TargetInstancesDeleteCall) Context(ctx context.Context) *TargetInstancesDeleteCall { c.ctx_ = ctx return c } -func (c *TargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpProxy": c.targetHttpProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetHttpProxies.setUrlMap" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetInstancesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Changes the URL map for TargetHttpProxy.", - // "httpMethod": "POST", - // "id": "compute.targetHttpProxies.setUrlMap", - // "parameterOrder": [ - // "project", - // "targetHttpProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "targetHttpProxy": { - // "description": "Name of the TargetHttpProxy to set a URL map for.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap", - // "request": { - // "$ref": "UrlMapReference" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetHttpsProxies.delete": - -type TargetHttpsProxiesDeleteCall struct { - s *Service - project string - targetHttpsProxy string - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Delete: Deletes the specified TargetHttpsProxy resource. -func (r *TargetHttpsProxiesService) Delete(project string, targetHttpsProxy string) *TargetHttpsProxiesDeleteCall { - c := &TargetHttpsProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetHttpsProxy = targetHttpsProxy - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetHttpsProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetHttpsProxiesDeleteCall) Context(ctx context.Context) *TargetHttpsProxiesDeleteCall { - c.ctx_ = ctx - return c + return c.header_ } -func (c *TargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpsProxy": c.targetHttpsProxy, + "project": c.project, + "zone": c.zone, + "targetInstance": c.targetInstance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpsProxies.delete" call. +// Do executes the "compute.targetInstances.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35663,12 +49568,13 @@ func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Deletes the specified TargetHttpsProxy resource.", + // "description": "Deletes the specified TargetInstance resource.", // "httpMethod": "DELETE", - // "id": "compute.targetHttpsProxies.delete", + // "id": "compute.targetInstances.delete", // "parameterOrder": [ // "project", - // "targetHttpsProxy" + // "zone", + // "targetInstance" // ], // "parameters": { // "project": { @@ -35678,15 +49584,22 @@ func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operat // "required": true, // "type": "string" // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource to delete.", + // "targetInstance": { + // "description": "Name of the TargetInstance resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", + // "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", // "response": { // "$ref": "Operation" // }, @@ -35698,30 +49611,34 @@ func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.targetHttpsProxies.get": +// method id "compute.targetInstances.get": -type TargetHttpsProxiesGetCall struct { - s *Service - project string - targetHttpsProxy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type TargetInstancesGetCall struct { + s *Service + project string + zone string + targetInstance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified TargetHttpsProxy resource. Get a list of -// available target HTTPS proxies by making a list() request. -func (r *TargetHttpsProxiesService) Get(project string, targetHttpsProxy string) *TargetHttpsProxiesGetCall { - c := &TargetHttpsProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified TargetInstance resource. Get a list of +// available target instances by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/get +func (r *TargetInstancesService) Get(project string, zone string, targetInstance string) *TargetInstancesGetCall { + c := &TargetInstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetHttpsProxy = targetHttpsProxy + c.zone = zone + c.targetInstance = targetInstance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpsProxiesGetCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesGetCall { +func (c *TargetInstancesGetCall) Fields(s ...googleapi.Field) *TargetInstancesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35731,7 +49648,7 @@ func (c *TargetHttpsProxiesGetCall) Fields(s ...googleapi.Field) *TargetHttpsPro // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetHttpsProxiesGetCall) IfNoneMatch(entityTag string) *TargetHttpsProxiesGetCall { +func (c *TargetInstancesGetCall) IfNoneMatch(entityTag string) *TargetInstancesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -35739,38 +49656,51 @@ func (c *TargetHttpsProxiesGetCall) IfNoneMatch(entityTag string) *TargetHttpsPr // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpsProxiesGetCall) Context(ctx context.Context) *TargetHttpsProxiesGetCall { +func (c *TargetInstancesGetCall) Context(ctx context.Context) *TargetInstancesGetCall { c.ctx_ = ctx return c } -func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetInstancesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpsProxy": c.targetHttpsProxy, + "project": c.project, + "zone": c.zone, + "targetInstance": c.targetInstance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpsProxies.get" call. -// Exactly one of *TargetHttpsProxy or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetHttpsProxy.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.targetInstances.get" call. +// Exactly one of *TargetInstance or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetInstance.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxy, error) { +func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstance, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35789,7 +49719,7 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetHttpsProxy{ + ret := &TargetInstance{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -35801,12 +49731,13 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt } return ret, nil // { - // "description": "Returns the specified TargetHttpsProxy resource. Get a list of available target HTTPS proxies by making a list() request.", + // "description": "Returns the specified TargetInstance resource. Get a list of available target instances by making a list() request.", // "httpMethod": "GET", - // "id": "compute.targetHttpsProxies.get", + // "id": "compute.targetInstances.get", // "parameterOrder": [ // "project", - // "targetHttpsProxy" + // "zone", + // "targetInstance" // ], // "parameters": { // "project": { @@ -35816,17 +49747,24 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt // "required": true, // "type": "string" // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource to return.", + // "targetInstance": { + // "description": "Name of the TargetInstance resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", + // "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", // "response": { - // "$ref": "TargetHttpsProxy" + // "$ref": "TargetInstance" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -35837,29 +49775,33 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt } -// method id "compute.targetHttpsProxies.insert": +// method id "compute.targetInstances.insert": -type TargetHttpsProxiesInsertCall struct { - s *Service - project string - targethttpsproxy *TargetHttpsProxy - urlParams_ gensupport.URLParams - ctx_ context.Context +type TargetInstancesInsertCall struct { + s *Service + project string + zone string + targetinstance *TargetInstance + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a TargetHttpsProxy resource in the specified project -// using the data included in the request. -func (r *TargetHttpsProxiesService) Insert(project string, targethttpsproxy *TargetHttpsProxy) *TargetHttpsProxiesInsertCall { - c := &TargetHttpsProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a TargetInstance resource in the specified project +// and zone using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/insert +func (r *TargetInstancesService) Insert(project string, zone string, targetinstance *TargetInstance) *TargetInstancesInsertCall { + c := &TargetInstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targethttpsproxy = targethttpsproxy + c.zone = zone + c.targetinstance = targetinstance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpsProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesInsertCall { +func (c *TargetInstancesInsertCall) Fields(s ...googleapi.Field) *TargetInstancesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35867,39 +49809,52 @@ func (c *TargetHttpsProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttps // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpsProxiesInsertCall) Context(ctx context.Context) *TargetHttpsProxiesInsertCall { +func (c *TargetInstancesInsertCall) Context(ctx context.Context) *TargetInstancesInsertCall { c.ctx_ = ctx return c } -func (c *TargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetInstancesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetInstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetinstance) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpsProxies.insert" call. +// Do executes the "compute.targetInstances.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35930,11 +49885,12 @@ func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", + // "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.targetHttpsProxies.insert", + // "id": "compute.targetInstances.insert", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "project": { @@ -35943,11 +49899,18 @@ func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operat // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/targetHttpsProxies", + // "path": "{project}/zones/{zone}/targetInstances", // "request": { - // "$ref": "TargetHttpsProxy" + // "$ref": "TargetInstance" // }, // "response": { // "$ref": "Operation" @@ -35960,21 +49923,25 @@ func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.targetHttpsProxies.list": +// method id "compute.targetInstances.list": -type TargetHttpsProxiesListCall struct { +type TargetInstancesListCall struct { s *Service project string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of TargetHttpsProxy resources available to -// the specified project. -func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesListCall { - c := &TargetHttpsProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of TargetInstance resources available to the +// specified project and zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/list +func (r *TargetInstancesService) List(project string, zone string) *TargetInstancesListCall { + c := &TargetInstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone return c } @@ -36005,7 +49972,7 @@ func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesList // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *TargetHttpsProxiesListCall) Filter(filter string) *TargetHttpsProxiesListCall { +func (c *TargetInstancesListCall) Filter(filter string) *TargetInstancesListCall { c.urlParams_.Set("filter", filter) return c } @@ -36015,15 +49982,32 @@ func (c *TargetHttpsProxiesListCall) Filter(filter string) *TargetHttpsProxiesLi // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in // subsequent list requests. -func (c *TargetHttpsProxiesListCall) MaxResults(maxResults int64) *TargetHttpsProxiesListCall { +func (c *TargetInstancesListCall) MaxResults(maxResults int64) *TargetInstancesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *TargetInstancesListCall) OrderBy(orderBy string) *TargetInstancesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *TargetHttpsProxiesListCall) PageToken(pageToken string) *TargetHttpsProxiesListCall { +func (c *TargetInstancesListCall) PageToken(pageToken string) *TargetInstancesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -36031,7 +50015,7 @@ func (c *TargetHttpsProxiesListCall) PageToken(pageToken string) *TargetHttpsPro // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpsProxiesListCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesListCall { +func (c *TargetInstancesListCall) Fields(s ...googleapi.Field) *TargetInstancesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -36041,7 +50025,7 @@ func (c *TargetHttpsProxiesListCall) Fields(s ...googleapi.Field) *TargetHttpsPr // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetHttpsProxiesListCall) IfNoneMatch(entityTag string) *TargetHttpsProxiesListCall { +func (c *TargetInstancesListCall) IfNoneMatch(entityTag string) *TargetInstancesListCall { c.ifNoneMatch_ = entityTag return c } @@ -36049,37 +50033,50 @@ func (c *TargetHttpsProxiesListCall) IfNoneMatch(entityTag string) *TargetHttpsP // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpsProxiesListCall) Context(ctx context.Context) *TargetHttpsProxiesListCall { +func (c *TargetInstancesListCall) Context(ctx context.Context) *TargetInstancesListCall { c.ctx_ = ctx return c } -func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetInstancesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpsProxies.list" call. -// Exactly one of *TargetHttpsProxyList or error will be non-nil. Any +// Do executes the "compute.targetInstances.list" call. +// Exactly one of *TargetInstanceList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TargetHttpsProxyList.ServerResponse.Header or (if a response was +// *TargetInstanceList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxyList, error) { +func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInstanceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36098,7 +50095,7 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetHttpsProxyList{ + ret := &TargetInstanceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -36110,11 +50107,12 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt } return ret, nil // { - // "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", + // "description": "Retrieves a list of TargetInstance resources available to the specified project and zone.", // "httpMethod": "GET", - // "id": "compute.targetHttpsProxies.list", + // "id": "compute.targetInstances.list", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "filter": { @@ -36131,6 +50129,11 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -36142,11 +50145,18 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/targetHttpsProxies", + // "path": "{project}/zones/{zone}/targetInstances", // "response": { - // "$ref": "TargetHttpsProxyList" + // "$ref": "TargetInstanceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -36160,7 +50170,7 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *TargetHttpsProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpsProxyList) error) error { +func (c *TargetInstancesListCall) Pages(ctx context.Context, f func(*TargetInstanceList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -36178,30 +50188,34 @@ func (c *TargetHttpsProxiesListCall) Pages(ctx context.Context, f func(*TargetHt } } -// method id "compute.targetHttpsProxies.setSslCertificates": +// method id "compute.targetPools.addHealthCheck": -type TargetHttpsProxiesSetSslCertificatesCall struct { - s *Service - project string - targetHttpsProxy string - targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context +type TargetPoolsAddHealthCheckCall struct { + s *Service + project string + region string + targetPool string + targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. -func (r *TargetHttpsProxiesService) SetSslCertificates(project string, targetHttpsProxy string, targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest) *TargetHttpsProxiesSetSslCertificatesCall { - c := &TargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddHealthCheck: Adds health check URLs to a target pool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addHealthCheck +func (r *TargetPoolsService) AddHealthCheck(project string, region string, targetPool string, targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest) *TargetPoolsAddHealthCheckCall { + c := &TargetPoolsAddHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetHttpsProxy = targetHttpsProxy - c.targethttpsproxiessetsslcertificatesrequest = targethttpsproxiessetsslcertificatesrequest + c.region = region + c.targetPool = targetPool + c.targetpoolsaddhealthcheckrequest = targetpoolsaddhealthcheckrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpsProxiesSetSslCertificatesCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetSslCertificatesCall { +func (c *TargetPoolsAddHealthCheckCall) Fields(s ...googleapi.Field) *TargetPoolsAddHealthCheckCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -36209,40 +50223,53 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpsProxiesSetSslCertificatesCall) Context(ctx context.Context) *TargetHttpsProxiesSetSslCertificatesCall { +func (c *TargetPoolsAddHealthCheckCall) Context(ctx context.Context) *TargetPoolsAddHealthCheckCall { c.ctx_ = ctx return c } -func (c *TargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetPoolsAddHealthCheckCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxiessetsslcertificatesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddhealthcheckrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpsProxy": c.targetHttpsProxy, + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpsProxies.setSslCertificates" call. +// Do executes the "compute.targetPools.addHealthCheck" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36273,12 +50300,13 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Replaces SslCertificates for TargetHttpsProxy.", + // "description": "Adds health check URLs to a target pool.", // "httpMethod": "POST", - // "id": "compute.targetHttpsProxies.setSslCertificates", + // "id": "compute.targetPools.addHealthCheck", // "parameterOrder": [ // "project", - // "targetHttpsProxy" + // "region", + // "targetPool" // ], // "parameters": { // "project": { @@ -36288,17 +50316,24 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti // "required": true, // "type": "string" // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the target pool to add a health check to.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + // "path": "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", // "request": { - // "$ref": "TargetHttpsProxiesSetSslCertificatesRequest" + // "$ref": "TargetPoolsAddHealthCheckRequest" // }, // "response": { // "$ref": "Operation" @@ -36311,30 +50346,34 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti } -// method id "compute.targetHttpsProxies.setUrlMap": +// method id "compute.targetPools.addInstance": -type TargetHttpsProxiesSetUrlMapCall struct { - s *Service - project string - targetHttpsProxy string - urlmapreference *UrlMapReference - urlParams_ gensupport.URLParams - ctx_ context.Context +type TargetPoolsAddInstanceCall struct { + s *Service + project string + region string + targetPool string + targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetUrlMap: Changes the URL map for TargetHttpsProxy. -func (r *TargetHttpsProxiesService) SetUrlMap(project string, targetHttpsProxy string, urlmapreference *UrlMapReference) *TargetHttpsProxiesSetUrlMapCall { - c := &TargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddInstance: Adds an instance to a target pool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addInstance +func (r *TargetPoolsService) AddInstance(project string, region string, targetPool string, targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest) *TargetPoolsAddInstanceCall { + c := &TargetPoolsAddInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetHttpsProxy = targetHttpsProxy - c.urlmapreference = urlmapreference + c.region = region + c.targetPool = targetPool + c.targetpoolsaddinstancerequest = targetpoolsaddinstancerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetUrlMapCall { +func (c *TargetPoolsAddInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsAddInstanceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -36342,40 +50381,53 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHt // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpsProxiesSetUrlMapCall) Context(ctx context.Context) *TargetHttpsProxiesSetUrlMapCall { +func (c *TargetPoolsAddInstanceCall) Context(ctx context.Context) *TargetPoolsAddInstanceCall { c.ctx_ = ctx return c } -func (c *TargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetPoolsAddInstanceCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddinstancerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addInstance") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpsProxy": c.targetHttpsProxy, + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpsProxies.setUrlMap" call. +// Do executes the "compute.targetPools.addInstance" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36406,12 +50458,13 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Changes the URL map for TargetHttpsProxy.", + // "description": "Adds an instance to a target pool.", // "httpMethod": "POST", - // "id": "compute.targetHttpsProxies.setUrlMap", + // "id": "compute.targetPools.addInstance", // "parameterOrder": [ // "project", - // "targetHttpsProxy" + // "region", + // "targetPool" // ], // "parameters": { // "project": { @@ -36421,17 +50474,24 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource to add instances to.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + // "path": "{project}/regions/{region}/targetPools/{targetPool}/addInstance", // "request": { - // "$ref": "UrlMapReference" + // "$ref": "TargetPoolsAddInstanceRequest" // }, // "response": { // "$ref": "Operation" @@ -36444,20 +50504,21 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.targetInstances.aggregatedList": +// method id "compute.targetPools.aggregatedList": -type TargetInstancesAggregatedListCall struct { +type TargetPoolsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of target instances. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/aggregatedList -func (r *TargetInstancesService) AggregatedList(project string) *TargetInstancesAggregatedListCall { - c := &TargetInstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of target pools. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/aggregatedList +func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregatedListCall { + c := &TargetPoolsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -36489,7 +50550,7 @@ func (r *TargetInstancesService) AggregatedList(project string) *TargetInstances // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *TargetInstancesAggregatedListCall) Filter(filter string) *TargetInstancesAggregatedListCall { +func (c *TargetPoolsAggregatedListCall) Filter(filter string) *TargetPoolsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -36499,15 +50560,32 @@ func (c *TargetInstancesAggregatedListCall) Filter(filter string) *TargetInstanc // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in // subsequent list requests. -func (c *TargetInstancesAggregatedListCall) MaxResults(maxResults int64) *TargetInstancesAggregatedListCall { +func (c *TargetPoolsAggregatedListCall) MaxResults(maxResults int64) *TargetPoolsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *TargetPoolsAggregatedListCall) OrderBy(orderBy string) *TargetPoolsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *TargetInstancesAggregatedListCall) PageToken(pageToken string) *TargetInstancesAggregatedListCall { +func (c *TargetPoolsAggregatedListCall) PageToken(pageToken string) *TargetPoolsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -36515,7 +50593,7 @@ func (c *TargetInstancesAggregatedListCall) PageToken(pageToken string) *TargetI // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetInstancesAggregatedListCall) Fields(s ...googleapi.Field) *TargetInstancesAggregatedListCall { +func (c *TargetPoolsAggregatedListCall) Fields(s ...googleapi.Field) *TargetPoolsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -36525,7 +50603,7 @@ func (c *TargetInstancesAggregatedListCall) Fields(s ...googleapi.Field) *Target // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetInstancesAggregatedListCall) IfNoneMatch(entityTag string) *TargetInstancesAggregatedListCall { +func (c *TargetPoolsAggregatedListCall) IfNoneMatch(entityTag string) *TargetPoolsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -36533,20 +50611,32 @@ func (c *TargetInstancesAggregatedListCall) IfNoneMatch(entityTag string) *Targe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetInstancesAggregatedListCall) Context(ctx context.Context) *TargetInstancesAggregatedListCall { +func (c *TargetPoolsAggregatedListCall) Context(ctx context.Context) *TargetPoolsAggregatedListCall { c.ctx_ = ctx return c } -func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetPoolsAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetPoolsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -36556,14 +50646,14 @@ func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Respons return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetInstances.aggregatedList" call. -// Exactly one of *TargetInstanceAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *TargetInstanceAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.targetPools.aggregatedList" call. +// Exactly one of *TargetPoolAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TargetPoolAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetInstanceAggregatedList, error) { +func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetPoolAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36582,7 +50672,7 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetInstanceAggregatedList{ + ret := &TargetPoolAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -36594,9 +50684,9 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Retrieves an aggregated list of target instances.", + // "description": "Retrieves an aggregated list of target pools.", // "httpMethod": "GET", - // "id": "compute.targetInstances.aggregatedList", + // "id": "compute.targetPools.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -36615,6 +50705,11 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -36628,9 +50723,9 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T // "type": "string" // } // }, - // "path": "{project}/aggregated/targetInstances", + // "path": "{project}/aggregated/targetPools", // "response": { - // "$ref": "TargetInstanceAggregatedList" + // "$ref": "TargetPoolAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -36644,7 +50739,7 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *TargetInstancesAggregatedListCall) Pages(ctx context.Context, f func(*TargetInstanceAggregatedList) error) error { +func (c *TargetPoolsAggregatedListCall) Pages(ctx context.Context, f func(*TargetPoolAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -36662,31 +50757,32 @@ func (c *TargetInstancesAggregatedListCall) Pages(ctx context.Context, f func(*T } } -// method id "compute.targetInstances.delete": +// method id "compute.targetPools.delete": -type TargetInstancesDeleteCall struct { - s *Service - project string - zone string - targetInstance string - urlParams_ gensupport.URLParams - ctx_ context.Context +type TargetPoolsDeleteCall struct { + s *Service + project string + region string + targetPool string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified TargetInstance resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/delete -func (r *TargetInstancesService) Delete(project string, zone string, targetInstance string) *TargetInstancesDeleteCall { - c := &TargetInstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified target pool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/delete +func (r *TargetPoolsService) Delete(project string, region string, targetPool string) *TargetPoolsDeleteCall { + c := &TargetPoolsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.targetInstance = targetInstance + c.region = region + c.targetPool = targetPool return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetInstancesDeleteCall) Fields(s ...googleapi.Field) *TargetInstancesDeleteCall { +func (c *TargetPoolsDeleteCall) Fields(s ...googleapi.Field) *TargetPoolsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -36694,36 +50790,48 @@ func (c *TargetInstancesDeleteCall) Fields(s ...googleapi.Field) *TargetInstance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetInstancesDeleteCall) Context(ctx context.Context) *TargetInstancesDeleteCall { +func (c *TargetPoolsDeleteCall) Context(ctx context.Context) *TargetPoolsDeleteCall { c.ctx_ = ctx return c } -func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetPoolsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "targetInstance": c.targetInstance, + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetInstances.delete" call. +// Do executes the "compute.targetPools.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36754,13 +50862,13 @@ func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified TargetInstance resource.", + // "description": "Deletes the specified target pool.", // "httpMethod": "DELETE", - // "id": "compute.targetInstances.delete", + // "id": "compute.targetPools.delete", // "parameterOrder": [ // "project", - // "zone", - // "targetInstance" + // "region", + // "targetPool" // ], // "parameters": { // "project": { @@ -36770,22 +50878,22 @@ func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "targetInstance": { - // "description": "Name of the TargetInstance resource to delete.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "Name of the zone scoping this request.", + // "targetPool": { + // "description": "Name of the TargetPool resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", + // "path": "{project}/regions/{region}/targetPools/{targetPool}", // "response": { // "$ref": "Operation" // }, @@ -36797,33 +50905,34 @@ func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.targetInstances.get": +// method id "compute.targetPools.get": -type TargetInstancesGetCall struct { - s *Service - project string - zone string - targetInstance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type TargetPoolsGetCall struct { + s *Service + project string + region string + targetPool string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified TargetInstance resource. Get a list of -// available target instances by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/get -func (r *TargetInstancesService) Get(project string, zone string, targetInstance string) *TargetInstancesGetCall { - c := &TargetInstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified target pool. Get a list of available +// target pools by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/get +func (r *TargetPoolsService) Get(project string, region string, targetPool string) *TargetPoolsGetCall { + c := &TargetPoolsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.targetInstance = targetInstance + c.region = region + c.targetPool = targetPool return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetInstancesGetCall) Fields(s ...googleapi.Field) *TargetInstancesGetCall { +func (c *TargetPoolsGetCall) Fields(s ...googleapi.Field) *TargetPoolsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -36833,7 +50942,7 @@ func (c *TargetInstancesGetCall) Fields(s ...googleapi.Field) *TargetInstancesGe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetInstancesGetCall) IfNoneMatch(entityTag string) *TargetInstancesGetCall { +func (c *TargetPoolsGetCall) IfNoneMatch(entityTag string) *TargetPoolsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -36841,39 +50950,51 @@ func (c *TargetInstancesGetCall) IfNoneMatch(entityTag string) *TargetInstancesG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetInstancesGetCall) Context(ctx context.Context) *TargetInstancesGetCall { +func (c *TargetPoolsGetCall) Context(ctx context.Context) *TargetPoolsGetCall { c.ctx_ = ctx return c } -func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetPoolsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetPoolsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "targetInstance": c.targetInstance, + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetInstances.get" call. -// Exactly one of *TargetInstance or error will be non-nil. Any non-2xx +// Do executes the "compute.targetPools.get" call. +// Exactly one of *TargetPool or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *TargetInstance.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstance, error) { +// *TargetPool.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36892,7 +51013,7 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetInstance{ + ret := &TargetPool{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -36904,13 +51025,13 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan } return ret, nil // { - // "description": "Returns the specified TargetInstance resource. Get a list of available target instances by making a list() request.", + // "description": "Returns the specified target pool. Get a list of available target pools by making a list() request.", // "httpMethod": "GET", - // "id": "compute.targetInstances.get", + // "id": "compute.targetPools.get", // "parameterOrder": [ // "project", - // "zone", - // "targetInstance" + // "region", + // "targetPool" // ], // "parameters": { // "project": { @@ -36920,24 +51041,24 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan // "required": true, // "type": "string" // }, - // "targetInstance": { - // "description": "Name of the TargetInstance resource to return.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "Name of the zone scoping this request.", + // "targetPool": { + // "description": "Name of the TargetPool resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", + // "path": "{project}/regions/{region}/targetPools/{targetPool}", // "response": { - // "$ref": "TargetInstance" + // "$ref": "TargetPool" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -36948,32 +51069,193 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan } -// method id "compute.targetInstances.insert": +// method id "compute.targetPools.getHealth": -type TargetInstancesInsertCall struct { - s *Service - project string - zone string - targetinstance *TargetInstance - urlParams_ gensupport.URLParams - ctx_ context.Context +type TargetPoolsGetHealthCall struct { + s *Service + project string + region string + targetPool string + instancereference *InstanceReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GetHealth: Gets the most recent health check results for each IP for +// the instance that is referenced by the given target pool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/getHealth +func (r *TargetPoolsService) GetHealth(project string, region string, targetPool string, instancereference *InstanceReference) *TargetPoolsGetHealthCall { + c := &TargetPoolsGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + c.instancereference = instancereference + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsGetHealthCall) Fields(s ...googleapi.Field) *TargetPoolsGetHealthCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsGetHealthCall) Context(ctx context.Context) *TargetPoolsGetHealthCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetPoolsGetHealthCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetPoolsGetHealthCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancereference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/getHealth") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetPools.getHealth" call. +// Exactly one of *TargetPoolInstanceHealth or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TargetPoolInstanceHealth.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPoolInstanceHealth, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetPoolInstanceHealth{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool.", + // "httpMethod": "POST", + // "id": "compute.targetPools.getHealth", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource to which the queried instance belongs.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools/{targetPool}/getHealth", + // "request": { + // "$ref": "InstanceReference" + // }, + // "response": { + // "$ref": "TargetPoolInstanceHealth" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetPools.insert": + +type TargetPoolsInsertCall struct { + s *Service + project string + region string + targetpool *TargetPool + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a TargetInstance resource in the specified project -// and zone using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/insert -func (r *TargetInstancesService) Insert(project string, zone string, targetinstance *TargetInstance) *TargetInstancesInsertCall { - c := &TargetInstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a target pool in the specified project and region +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/insert +func (r *TargetPoolsService) Insert(project string, region string, targetpool *TargetPool) *TargetPoolsInsertCall { + c := &TargetPoolsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.targetinstance = targetinstance + c.region = region + c.targetpool = targetpool return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetInstancesInsertCall) Fields(s ...googleapi.Field) *TargetInstancesInsertCall { +func (c *TargetPoolsInsertCall) Fields(s ...googleapi.Field) *TargetPoolsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -36981,40 +51263,52 @@ func (c *TargetInstancesInsertCall) Fields(s ...googleapi.Field) *TargetInstance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetInstancesInsertCall) Context(ctx context.Context) *TargetInstancesInsertCall { +func (c *TargetPoolsInsertCall) Context(ctx context.Context) *TargetPoolsInsertCall { c.ctx_ = ctx return c } -func (c *TargetInstancesInsertCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetPoolsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetPoolsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetinstance) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpool) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetInstances.insert" call. +// Do executes the "compute.targetPools.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37045,12 +51339,12 @@ func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", + // "description": "Creates a target pool in the specified project and region using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.targetInstances.insert", + // "id": "compute.targetPools.insert", // "parameterOrder": [ // "project", - // "zone" + // "region" // ], // "parameters": { // "project": { @@ -37060,17 +51354,17 @@ func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "zone": { - // "description": "Name of the zone scoping this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/targetInstances", + // "path": "{project}/regions/{region}/targetPools", // "request": { - // "$ref": "TargetInstance" + // "$ref": "TargetPool" // }, // "response": { // "$ref": "Operation" @@ -37083,24 +51377,25 @@ func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.targetInstances.list": +// method id "compute.targetPools.list": -type TargetInstancesListCall struct { +type TargetPoolsListCall struct { s *Service project string - zone string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of TargetInstance resources available to the -// specified project and zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/list -func (r *TargetInstancesService) List(project string, zone string) *TargetInstancesListCall { - c := &TargetInstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of target pools available to the specified +// project and region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/list +func (r *TargetPoolsService) List(project string, region string) *TargetPoolsListCall { + c := &TargetPoolsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone + c.region = region return c } @@ -37131,7 +51426,7 @@ func (r *TargetInstancesService) List(project string, zone string) *TargetInstan // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *TargetInstancesListCall) Filter(filter string) *TargetInstancesListCall { +func (c *TargetPoolsListCall) Filter(filter string) *TargetPoolsListCall { c.urlParams_.Set("filter", filter) return c } @@ -37141,15 +51436,32 @@ func (c *TargetInstancesListCall) Filter(filter string) *TargetInstancesListCall // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in // subsequent list requests. -func (c *TargetInstancesListCall) MaxResults(maxResults int64) *TargetInstancesListCall { +func (c *TargetPoolsListCall) MaxResults(maxResults int64) *TargetPoolsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *TargetPoolsListCall) OrderBy(orderBy string) *TargetPoolsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *TargetInstancesListCall) PageToken(pageToken string) *TargetInstancesListCall { +func (c *TargetPoolsListCall) PageToken(pageToken string) *TargetPoolsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -37157,7 +51469,7 @@ func (c *TargetInstancesListCall) PageToken(pageToken string) *TargetInstancesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetInstancesListCall) Fields(s ...googleapi.Field) *TargetInstancesListCall { +func (c *TargetPoolsListCall) Fields(s ...googleapi.Field) *TargetPoolsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -37167,7 +51479,7 @@ func (c *TargetInstancesListCall) Fields(s ...googleapi.Field) *TargetInstancesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetInstancesListCall) IfNoneMatch(entityTag string) *TargetInstancesListCall { +func (c *TargetPoolsListCall) IfNoneMatch(entityTag string) *TargetPoolsListCall { c.ifNoneMatch_ = entityTag return c } @@ -37175,38 +51487,50 @@ func (c *TargetInstancesListCall) IfNoneMatch(entityTag string) *TargetInstances // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetInstancesListCall) Context(ctx context.Context) *TargetInstancesListCall { +func (c *TargetPoolsListCall) Context(ctx context.Context) *TargetPoolsListCall { c.ctx_ = ctx return c } -func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetPoolsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetInstances.list" call. -// Exactly one of *TargetInstanceList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetInstanceList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.targetPools.list" call. +// Exactly one of *TargetPoolList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetPoolList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInstanceList, error) { +func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37225,7 +51549,7 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetInstanceList{ + ret := &TargetPoolList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -37237,12 +51561,12 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta } return ret, nil // { - // "description": "Retrieves a list of TargetInstance resources available to the specified project and zone.", + // "description": "Retrieves a list of target pools available to the specified project and region.", // "httpMethod": "GET", - // "id": "compute.targetInstances.list", + // "id": "compute.targetPools.list", // "parameterOrder": [ // "project", - // "zone" + // "region" // ], // "parameters": { // "filter": { @@ -37259,6 +51583,11 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -37271,17 +51600,17 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta // "required": true, // "type": "string" // }, - // "zone": { - // "description": "Name of the zone scoping this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/targetInstances", + // "path": "{project}/regions/{region}/targetPools", // "response": { - // "$ref": "TargetInstanceList" + // "$ref": "TargetPoolList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -37295,7 +51624,7 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *TargetInstancesListCall) Pages(ctx context.Context, f func(*TargetInstanceList) error) error { +func (c *TargetPoolsListCall) Pages(ctx context.Context, f func(*TargetPoolList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -37313,33 +51642,34 @@ func (c *TargetInstancesListCall) Pages(ctx context.Context, f func(*TargetInsta } } -// method id "compute.targetPools.addHealthCheck": +// method id "compute.targetPools.removeHealthCheck": -type TargetPoolsAddHealthCheckCall struct { - s *Service - project string - region string - targetPool string - targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest - urlParams_ gensupport.URLParams - ctx_ context.Context +type TargetPoolsRemoveHealthCheckCall struct { + s *Service + project string + region string + targetPool string + targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddHealthCheck: Adds health check URLs to a target pool. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addHealthCheck -func (r *TargetPoolsService) AddHealthCheck(project string, region string, targetPool string, targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest) *TargetPoolsAddHealthCheckCall { - c := &TargetPoolsAddHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemoveHealthCheck: Removes health check URL from a target pool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeHealthCheck +func (r *TargetPoolsService) RemoveHealthCheck(project string, region string, targetPool string, targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest) *TargetPoolsRemoveHealthCheckCall { + c := &TargetPoolsRemoveHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.targetPool = targetPool - c.targetpoolsaddhealthcheckrequest = targetpoolsaddhealthcheckrequest + c.targetpoolsremovehealthcheckrequest = targetpoolsremovehealthcheckrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsAddHealthCheckCall) Fields(s ...googleapi.Field) *TargetPoolsAddHealthCheckCall { +func (c *TargetPoolsRemoveHealthCheckCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveHealthCheckCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -37347,22 +51677,34 @@ func (c *TargetPoolsAddHealthCheckCall) Fields(s ...googleapi.Field) *TargetPool // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsAddHealthCheckCall) Context(ctx context.Context) *TargetPoolsAddHealthCheckCall { +func (c *TargetPoolsRemoveHealthCheckCall) Context(ctx context.Context) *TargetPoolsRemoveHealthCheckCall { c.ctx_ = ctx return c } -func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetPoolsRemoveHealthCheckCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetPoolsRemoveHealthCheckCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddhealthcheckrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremovehealthcheckrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -37374,14 +51716,14 @@ func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.addHealthCheck" call. +// Do executes the "compute.targetPools.removeHealthCheck" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37412,9 +51754,9 @@ func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Adds health check URLs to a target pool.", + // "description": "Removes health check URL from a target pool.", // "httpMethod": "POST", - // "id": "compute.targetPools.addHealthCheck", + // "id": "compute.targetPools.removeHealthCheck", // "parameterOrder": [ // "project", // "region", @@ -37429,23 +51771,23 @@ func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Opera // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "targetPool": { - // "description": "Name of the target pool to add a health check to.", + // "description": "Name of the target pool to remove health checks from.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", + // "path": "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", // "request": { - // "$ref": "TargetPoolsAddHealthCheckRequest" + // "$ref": "TargetPoolsRemoveHealthCheckRequest" // }, // "response": { // "$ref": "Operation" @@ -37458,33 +51800,34 @@ func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Opera } -// method id "compute.targetPools.addInstance": +// method id "compute.targetPools.removeInstance": -type TargetPoolsAddInstanceCall struct { - s *Service - project string - region string - targetPool string - targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context +type TargetPoolsRemoveInstanceCall struct { + s *Service + project string + region string + targetPool string + targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddInstance: Adds an instance to a target pool. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addInstance -func (r *TargetPoolsService) AddInstance(project string, region string, targetPool string, targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest) *TargetPoolsAddInstanceCall { - c := &TargetPoolsAddInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemoveInstance: Removes instance URL from a target pool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeInstance +func (r *TargetPoolsService) RemoveInstance(project string, region string, targetPool string, targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest) *TargetPoolsRemoveInstanceCall { + c := &TargetPoolsRemoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.targetPool = targetPool - c.targetpoolsaddinstancerequest = targetpoolsaddinstancerequest + c.targetpoolsremoveinstancerequest = targetpoolsremoveinstancerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsAddInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsAddInstanceCall { +func (c *TargetPoolsRemoveInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveInstanceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -37492,22 +51835,34 @@ func (c *TargetPoolsAddInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsAd // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsAddInstanceCall) Context(ctx context.Context) *TargetPoolsAddInstanceCall { +func (c *TargetPoolsRemoveInstanceCall) Context(ctx context.Context) *TargetPoolsRemoveInstanceCall { c.ctx_ = ctx return c } -func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetPoolsRemoveInstanceCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddinstancerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremoveinstancerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addInstance") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeInstance") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -37519,14 +51874,14 @@ func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.addInstance" call. +// Do executes the "compute.targetPools.removeInstance" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37557,9 +51912,9 @@ func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Adds an instance to a target pool.", + // "description": "Removes instance URL from a target pool.", // "httpMethod": "POST", - // "id": "compute.targetPools.addInstance", + // "id": "compute.targetPools.removeInstance", // "parameterOrder": [ // "project", // "region", @@ -37581,16 +51936,16 @@ func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // }, // "targetPool": { - // "description": "Name of the TargetPool resource to add instances to.", + // "description": "Name of the TargetPool resource to remove instances from.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}/addInstance", + // "path": "{project}/regions/{region}/targetPools/{targetPool}/removeInstance", // "request": { - // "$ref": "TargetPoolsAddInstanceRequest" + // "$ref": "TargetPoolsRemoveInstanceRequest" // }, // "response": { // "$ref": "Operation" @@ -37603,269 +51958,78 @@ func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.targetPools.aggregatedList": +// method id "compute.targetPools.setBackup": -type TargetPoolsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type TargetPoolsSetBackupCall struct { + s *Service + project string + region string + targetPool string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of target pools. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/aggregatedList -func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregatedListCall { - c := &TargetPoolsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetBackup: Changes a backup target pool's configurations. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/setBackup +func (r *TargetPoolsService) SetBackup(project string, region string, targetPool string, targetreference *TargetReference) *TargetPoolsSetBackupCall { + c := &TargetPoolsSetBackupCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.targetPool = targetPool + c.targetreference = targetreference return c } -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *TargetPoolsAggregatedListCall) Filter(filter string) *TargetPoolsAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. -func (c *TargetPoolsAggregatedListCall) MaxResults(maxResults int64) *TargetPoolsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *TargetPoolsAggregatedListCall) PageToken(pageToken string) *TargetPoolsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// FailoverRatio sets the optional parameter "failoverRatio": New +// failoverRatio value for the target pool. +func (c *TargetPoolsSetBackupCall) FailoverRatio(failoverRatio float64) *TargetPoolsSetBackupCall { + c.urlParams_.Set("failoverRatio", fmt.Sprint(failoverRatio)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsAggregatedListCall) Fields(s ...googleapi.Field) *TargetPoolsAggregatedListCall { +func (c *TargetPoolsSetBackupCall) Fields(s ...googleapi.Field) *TargetPoolsSetBackupCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetPoolsAggregatedListCall) IfNoneMatch(entityTag string) *TargetPoolsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsAggregatedListCall) Context(ctx context.Context) *TargetPoolsAggregatedListCall { +func (c *TargetPoolsSetBackupCall) Context(ctx context.Context) *TargetPoolsSetBackupCall { c.ctx_ = ctx return c } -func (c *TargetPoolsAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetPoolsSetBackupCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetPools") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) + return c.header_ } -// Do executes the "compute.targetPools.aggregatedList" call. -// Exactly one of *TargetPoolAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TargetPoolAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetPoolAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } +func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) if err != nil { return nil, err } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetPoolAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an aggregated list of target pools.", - // "httpMethod": "GET", - // "id": "compute.targetPools.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", - // "format": "uint32", - // "location": "query", - // "maximum": "500", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/targetPools", - // "response": { - // "$ref": "TargetPoolAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *TargetPoolsAggregatedListCall) Pages(ctx context.Context, f func(*TargetPoolAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.targetPools.delete": - -type TargetPoolsDeleteCall struct { - s *Service - project string - region string - targetPool string - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Delete: Deletes the specified target pool. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/delete -func (r *TargetPoolsService) Delete(project string, region string, targetPool string) *TargetPoolsDeleteCall { - c := &TargetPoolsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.targetPool = targetPool - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetPoolsDeleteCall) Fields(s ...googleapi.Field) *TargetPoolsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetPoolsDeleteCall) Context(ctx context.Context) *TargetPoolsDeleteCall { - c.ctx_ = ctx - return c -} - -func (c *TargetPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/setBackup") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -37875,14 +52039,14 @@ func (c *TargetPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.delete" call. +// Do executes the "compute.targetPools.setBackup" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37913,15 +52077,21 @@ func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified target pool.", - // "httpMethod": "DELETE", - // "id": "compute.targetPools.delete", + // "description": "Changes a backup target pool's configurations.", + // "httpMethod": "POST", + // "id": "compute.targetPools.setBackup", // "parameterOrder": [ // "project", // "region", // "targetPool" // ], // "parameters": { + // "failoverRatio": { + // "description": "New failoverRatio value for the target pool.", + // "format": "float", + // "location": "query", + // "type": "number" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -37937,14 +52107,17 @@ func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // }, // "targetPool": { - // "description": "Name of the TargetPool resource to delete.", + // "description": "Name of the TargetPool resource to set a backup pool for.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}", + // "path": "{project}/regions/{region}/targetPools/{targetPool}/setBackup", + // "request": { + // "$ref": "TargetReference" + // }, // "response": { // "$ref": "Operation" // }, @@ -37956,83 +52129,77 @@ func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.targetPools.get": +// method id "compute.targetSslProxies.delete": -type TargetPoolsGetCall struct { - s *Service - project string - region string - targetPool string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context +type TargetSslProxiesDeleteCall struct { + s *Service + project string + targetSslProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified target pool. Get a list of available -// target pools by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/get -func (r *TargetPoolsService) Get(project string, region string, targetPool string) *TargetPoolsGetCall { - c := &TargetPoolsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified TargetSslProxy resource. +func (r *TargetSslProxiesService) Delete(project string, targetSslProxy string) *TargetSslProxiesDeleteCall { + c := &TargetSslProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.targetPool = targetPool + c.targetSslProxy = targetSslProxy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsGetCall) Fields(s ...googleapi.Field) *TargetPoolsGetCall { +func (c *TargetSslProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetSslProxiesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetPoolsGetCall) IfNoneMatch(entityTag string) *TargetPoolsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsGetCall) Context(ctx context.Context) *TargetPoolsGetCall { +func (c *TargetSslProxiesDeleteCall) Context(ctx context.Context) *TargetSslProxiesDeleteCall { c.ctx_ = ctx return c } -func (c *TargetPoolsGetCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetSslProxiesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetSslProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + for k, v := range c.header_ { + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, + "project": c.project, + "targetSslProxy": c.targetSslProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.get" call. -// Exactly one of *TargetPool or error will be non-nil. Any non-2xx +// Do executes the "compute.targetSslProxies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *TargetPool.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, error) { +func (c *TargetSslProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38051,7 +52218,7 @@ func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetPool{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38063,13 +52230,12 @@ func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, erro } return ret, nil // { - // "description": "Returns the specified target pool. Get a list of available target pools by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.targetPools.get", + // "description": "Deletes the specified TargetSslProxy resource.", + // "httpMethod": "DELETE", + // "id": "compute.targetSslProxies.delete", // "parameterOrder": [ // "project", - // "region", - // "targetPool" + // "targetSslProxy" // ], // "parameters": { // "project": { @@ -38079,104 +52245,112 @@ func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, erro // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "targetPool": { - // "description": "Name of the TargetPool resource to return.", + // "targetSslProxy": { + // "description": "Name of the TargetSslProxy resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}", + // "path": "{project}/global/targetSslProxies/{targetSslProxy}", // "response": { - // "$ref": "TargetPool" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.targetPools.getHealth": +// method id "compute.targetSslProxies.get": -type TargetPoolsGetHealthCall struct { - s *Service - project string - region string - targetPool string - instancereference *InstanceReference - urlParams_ gensupport.URLParams - ctx_ context.Context +type TargetSslProxiesGetCall struct { + s *Service + project string + targetSslProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// GetHealth: Gets the most recent health check results for each IP for -// the instance that is referenced by the given target pool. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/getHealth -func (r *TargetPoolsService) GetHealth(project string, region string, targetPool string, instancereference *InstanceReference) *TargetPoolsGetHealthCall { - c := &TargetPoolsGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified TargetSslProxy resource. Get a list of +// available target SSL proxies by making a list() request. +func (r *TargetSslProxiesService) Get(project string, targetSslProxy string) *TargetSslProxiesGetCall { + c := &TargetSslProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.targetPool = targetPool - c.instancereference = instancereference + c.targetSslProxy = targetSslProxy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsGetHealthCall) Fields(s ...googleapi.Field) *TargetPoolsGetHealthCall { +func (c *TargetSslProxiesGetCall) Fields(s ...googleapi.Field) *TargetSslProxiesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetSslProxiesGetCall) IfNoneMatch(entityTag string) *TargetSslProxiesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsGetHealthCall) Context(ctx context.Context) *TargetPoolsGetHealthCall { +func (c *TargetSslProxiesGetCall) Context(ctx context.Context) *TargetSslProxiesGetCall { c.ctx_ = ctx return c } -func (c *TargetPoolsGetHealthCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetSslProxiesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetSslProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancereference) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/getHealth") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, + "project": c.project, + "targetSslProxy": c.targetSslProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.getHealth" call. -// Exactly one of *TargetPoolInstanceHealth or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TargetPoolInstanceHealth.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.targetSslProxies.get" call. +// Exactly one of *TargetSslProxy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetSslProxy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPoolInstanceHealth, error) { +func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslProxy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38195,7 +52369,7 @@ func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPool if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetPoolInstanceHealth{ + ret := &TargetSslProxy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38207,13 +52381,12 @@ func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPool } return ret, nil // { - // "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool.", - // "httpMethod": "POST", - // "id": "compute.targetPools.getHealth", + // "description": "Returns the specified TargetSslProxy resource. Get a list of available target SSL proxies by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.targetSslProxies.get", // "parameterOrder": [ // "project", - // "region", - // "targetPool" + // "targetSslProxy" // ], // "parameters": { // "project": { @@ -38223,27 +52396,17 @@ func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPool // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "targetPool": { - // "description": "Name of the TargetPool resource to which the queried instance belongs.", + // "targetSslProxy": { + // "description": "Name of the TargetSslProxy resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}/getHealth", - // "request": { - // "$ref": "InstanceReference" - // }, + // "path": "{project}/global/targetSslProxies/{targetSslProxy}", // "response": { - // "$ref": "TargetPoolInstanceHealth" + // "$ref": "TargetSslProxy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -38254,32 +52417,30 @@ func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPool } -// method id "compute.targetPools.insert": +// method id "compute.targetSslProxies.insert": -type TargetPoolsInsertCall struct { - s *Service - project string - region string - targetpool *TargetPool - urlParams_ gensupport.URLParams - ctx_ context.Context +type TargetSslProxiesInsertCall struct { + s *Service + project string + targetsslproxy *TargetSslProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a target pool in the specified project and region +// Insert: Creates a TargetSslProxy resource in the specified project // using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/insert -func (r *TargetPoolsService) Insert(project string, region string, targetpool *TargetPool) *TargetPoolsInsertCall { - c := &TargetPoolsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *TargetSslProxiesService) Insert(project string, targetsslproxy *TargetSslProxy) *TargetSslProxiesInsertCall { + c := &TargetSslProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.targetpool = targetpool + c.targetsslproxy = targetsslproxy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsInsertCall) Fields(s ...googleapi.Field) *TargetPoolsInsertCall { +func (c *TargetSslProxiesInsertCall) Fields(s ...googleapi.Field) *TargetSslProxiesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -38287,40 +52448,51 @@ func (c *TargetPoolsInsertCall) Fields(s ...googleapi.Field) *TargetPoolsInsertC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsInsertCall) Context(ctx context.Context) *TargetPoolsInsertCall { +func (c *TargetSslProxiesInsertCall) Context(ctx context.Context) *TargetSslProxiesInsertCall { c.ctx_ = ctx return c } -func (c *TargetPoolsInsertCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetSslProxiesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetSslProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpool) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetsslproxy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.insert" call. +// Do executes the "compute.targetSslProxies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetSslProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38351,12 +52523,11 @@ func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates a target pool in the specified project and region using the data included in the request.", + // "description": "Creates a TargetSslProxy resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.targetPools.insert", + // "id": "compute.targetSslProxies.insert", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "project": { @@ -38365,18 +52536,11 @@ func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools", + // "path": "{project}/global/targetSslProxies", // "request": { - // "$ref": "TargetPool" + // "$ref": "TargetSslProxy" // }, // "response": { // "$ref": "Operation" @@ -38389,24 +52553,22 @@ func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.targetPools.list": +// method id "compute.targetSslProxies.list": -type TargetPoolsListCall struct { +type TargetSslProxiesListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of target pools available to the specified -// project and region. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/list -func (r *TargetPoolsService) List(project string, region string) *TargetPoolsListCall { - c := &TargetPoolsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of TargetSslProxy resources available to the +// specified project. +func (r *TargetSslProxiesService) List(project string) *TargetSslProxiesListCall { + c := &TargetSslProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -38437,7 +52599,7 @@ func (r *TargetPoolsService) List(project string, region string) *TargetPoolsLis // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *TargetPoolsListCall) Filter(filter string) *TargetPoolsListCall { +func (c *TargetSslProxiesListCall) Filter(filter string) *TargetSslProxiesListCall { c.urlParams_.Set("filter", filter) return c } @@ -38447,15 +52609,32 @@ func (c *TargetPoolsListCall) Filter(filter string) *TargetPoolsListCall { // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in // subsequent list requests. -func (c *TargetPoolsListCall) MaxResults(maxResults int64) *TargetPoolsListCall { +func (c *TargetSslProxiesListCall) MaxResults(maxResults int64) *TargetSslProxiesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *TargetSslProxiesListCall) OrderBy(orderBy string) *TargetSslProxiesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *TargetPoolsListCall) PageToken(pageToken string) *TargetPoolsListCall { +func (c *TargetSslProxiesListCall) PageToken(pageToken string) *TargetSslProxiesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -38463,7 +52642,7 @@ func (c *TargetPoolsListCall) PageToken(pageToken string) *TargetPoolsListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsListCall) Fields(s ...googleapi.Field) *TargetPoolsListCall { +func (c *TargetSslProxiesListCall) Fields(s ...googleapi.Field) *TargetSslProxiesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -38473,7 +52652,7 @@ func (c *TargetPoolsListCall) Fields(s ...googleapi.Field) *TargetPoolsListCall // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetPoolsListCall) IfNoneMatch(entityTag string) *TargetPoolsListCall { +func (c *TargetSslProxiesListCall) IfNoneMatch(entityTag string) *TargetSslProxiesListCall { c.ifNoneMatch_ = entityTag return c } @@ -38481,38 +52660,49 @@ func (c *TargetPoolsListCall) IfNoneMatch(entityTag string) *TargetPoolsListCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsListCall) Context(ctx context.Context) *TargetPoolsListCall { +func (c *TargetSslProxiesListCall) Context(ctx context.Context) *TargetSslProxiesListCall { c.ctx_ = ctx return c } -func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetSslProxiesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetSslProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.list" call. -// Exactly one of *TargetPoolList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *TargetPoolList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.targetSslProxies.list" call. +// Exactly one of *TargetSslProxyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetSslProxyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, error) { +func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslProxyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38531,7 +52721,7 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetPoolList{ + ret := &TargetSslProxyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38543,12 +52733,11 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, } return ret, nil // { - // "description": "Retrieves a list of target pools available to the specified project and region.", + // "description": "Retrieves the list of TargetSslProxy resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.targetPools.list", + // "id": "compute.targetSslProxies.list", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { @@ -38565,6 +52754,11 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -38576,18 +52770,11 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools", + // "path": "{project}/global/targetSslProxies", // "response": { - // "$ref": "TargetPoolList" + // "$ref": "TargetSslProxyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -38601,7 +52788,7 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *TargetPoolsListCall) Pages(ctx context.Context, f func(*TargetPoolList) error) error { +func (c *TargetSslProxiesListCall) Pages(ctx context.Context, f func(*TargetSslProxyList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -38619,33 +52806,31 @@ func (c *TargetPoolsListCall) Pages(ctx context.Context, f func(*TargetPoolList) } } -// method id "compute.targetPools.removeHealthCheck": +// method id "compute.targetSslProxies.setBackendService": -type TargetPoolsRemoveHealthCheckCall struct { - s *Service - project string - region string - targetPool string - targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest - urlParams_ gensupport.URLParams - ctx_ context.Context +type TargetSslProxiesSetBackendServiceCall struct { + s *Service + project string + targetSslProxy string + targetsslproxiessetbackendservicerequest *TargetSslProxiesSetBackendServiceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveHealthCheck: Removes health check URL from a target pool. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeHealthCheck -func (r *TargetPoolsService) RemoveHealthCheck(project string, region string, targetPool string, targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest) *TargetPoolsRemoveHealthCheckCall { - c := &TargetPoolsRemoveHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetBackendService: Changes the BackendService for TargetSslProxy. +func (r *TargetSslProxiesService) SetBackendService(project string, targetSslProxy string, targetsslproxiessetbackendservicerequest *TargetSslProxiesSetBackendServiceRequest) *TargetSslProxiesSetBackendServiceCall { + c := &TargetSslProxiesSetBackendServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.targetPool = targetPool - c.targetpoolsremovehealthcheckrequest = targetpoolsremovehealthcheckrequest + c.targetSslProxy = targetSslProxy + c.targetsslproxiessetbackendservicerequest = targetsslproxiessetbackendservicerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsRemoveHealthCheckCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveHealthCheckCall { +func (c *TargetSslProxiesSetBackendServiceCall) Fields(s ...googleapi.Field) *TargetSslProxiesSetBackendServiceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -38653,41 +52838,52 @@ func (c *TargetPoolsRemoveHealthCheckCall) Fields(s ...googleapi.Field) *TargetP // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsRemoveHealthCheckCall) Context(ctx context.Context) *TargetPoolsRemoveHealthCheckCall { +func (c *TargetSslProxiesSetBackendServiceCall) Context(ctx context.Context) *TargetSslProxiesSetBackendServiceCall { c.ctx_ = ctx return c } -func (c *TargetPoolsRemoveHealthCheckCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetSslProxiesSetBackendServiceCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetSslProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremovehealthcheckrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetsslproxiessetbackendservicerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setBackendService") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, + "project": c.project, + "targetSslProxy": c.targetSslProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.removeHealthCheck" call. +// Do executes the "compute.targetSslProxies.setBackendService" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetSslProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38718,13 +52914,12 @@ func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Removes health check URL from a target pool.", + // "description": "Changes the BackendService for TargetSslProxy.", // "httpMethod": "POST", - // "id": "compute.targetPools.removeHealthCheck", + // "id": "compute.targetSslProxies.setBackendService", // "parameterOrder": [ // "project", - // "region", - // "targetPool" + // "targetSslProxy" // ], // "parameters": { // "project": { @@ -38734,24 +52929,17 @@ func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "targetPool": { - // "description": "Name of the target pool to remove health checks from.", + // "targetSslProxy": { + // "description": "Name of the TargetSslProxy resource whose BackendService resource is to be set.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", + // "path": "{project}/global/targetSslProxies/{targetSslProxy}/setBackendService", // "request": { - // "$ref": "TargetPoolsRemoveHealthCheckRequest" + // "$ref": "TargetSslProxiesSetBackendServiceRequest" // }, // "response": { // "$ref": "Operation" @@ -38764,33 +52952,31 @@ func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.targetPools.removeInstance": +// method id "compute.targetSslProxies.setProxyHeader": -type TargetPoolsRemoveInstanceCall struct { - s *Service - project string - region string - targetPool string - targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context +type TargetSslProxiesSetProxyHeaderCall struct { + s *Service + project string + targetSslProxy string + targetsslproxiessetproxyheaderrequest *TargetSslProxiesSetProxyHeaderRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveInstance: Removes instance URL from a target pool. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeInstance -func (r *TargetPoolsService) RemoveInstance(project string, region string, targetPool string, targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest) *TargetPoolsRemoveInstanceCall { - c := &TargetPoolsRemoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetProxyHeader: Changes the ProxyHeaderType for TargetSslProxy. +func (r *TargetSslProxiesService) SetProxyHeader(project string, targetSslProxy string, targetsslproxiessetproxyheaderrequest *TargetSslProxiesSetProxyHeaderRequest) *TargetSslProxiesSetProxyHeaderCall { + c := &TargetSslProxiesSetProxyHeaderCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.targetPool = targetPool - c.targetpoolsremoveinstancerequest = targetpoolsremoveinstancerequest + c.targetSslProxy = targetSslProxy + c.targetsslproxiessetproxyheaderrequest = targetsslproxiessetproxyheaderrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsRemoveInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveInstanceCall { +func (c *TargetSslProxiesSetProxyHeaderCall) Fields(s ...googleapi.Field) *TargetSslProxiesSetProxyHeaderCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -38798,41 +52984,52 @@ func (c *TargetPoolsRemoveInstanceCall) Fields(s ...googleapi.Field) *TargetPool // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsRemoveInstanceCall) Context(ctx context.Context) *TargetPoolsRemoveInstanceCall { +func (c *TargetSslProxiesSetProxyHeaderCall) Context(ctx context.Context) *TargetSslProxiesSetProxyHeaderCall { c.ctx_ = ctx return c } -func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetSslProxiesSetProxyHeaderCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetSslProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremoveinstancerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetsslproxiessetproxyheaderrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeInstance") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setProxyHeader") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, + "project": c.project, + "targetSslProxy": c.targetSslProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.removeInstance" call. +// Do executes the "compute.targetSslProxies.setProxyHeader" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetSslProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38863,13 +53060,12 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Removes instance URL from a target pool.", + // "description": "Changes the ProxyHeaderType for TargetSslProxy.", // "httpMethod": "POST", - // "id": "compute.targetPools.removeInstance", + // "id": "compute.targetSslProxies.setProxyHeader", // "parameterOrder": [ // "project", - // "region", - // "targetPool" + // "targetSslProxy" // ], // "parameters": { // "project": { @@ -38879,24 +53075,17 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "targetPool": { - // "description": "Name of the TargetPool resource to remove instances from.", + // "targetSslProxy": { + // "description": "Name of the TargetSslProxy resource whose ProxyHeader is to be set.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}/removeInstance", + // "path": "{project}/global/targetSslProxies/{targetSslProxy}/setProxyHeader", // "request": { - // "$ref": "TargetPoolsRemoveInstanceRequest" + // "$ref": "TargetSslProxiesSetProxyHeaderRequest" // }, // "response": { // "$ref": "Operation" @@ -38909,40 +53098,31 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera } -// method id "compute.targetPools.setBackup": +// method id "compute.targetSslProxies.setSslCertificates": -type TargetPoolsSetBackupCall struct { - s *Service - project string - region string - targetPool string - targetreference *TargetReference - urlParams_ gensupport.URLParams - ctx_ context.Context +type TargetSslProxiesSetSslCertificatesCall struct { + s *Service + project string + targetSslProxy string + targetsslproxiessetsslcertificatesrequest *TargetSslProxiesSetSslCertificatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetBackup: Changes a backup target pool's configurations. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/setBackup -func (r *TargetPoolsService) SetBackup(project string, region string, targetPool string, targetreference *TargetReference) *TargetPoolsSetBackupCall { - c := &TargetPoolsSetBackupCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetSslCertificates: Changes SslCertificates for TargetSslProxy. +func (r *TargetSslProxiesService) SetSslCertificates(project string, targetSslProxy string, targetsslproxiessetsslcertificatesrequest *TargetSslProxiesSetSslCertificatesRequest) *TargetSslProxiesSetSslCertificatesCall { + c := &TargetSslProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.targetPool = targetPool - c.targetreference = targetreference - return c -} - -// FailoverRatio sets the optional parameter "failoverRatio": New -// failoverRatio value for the target pool. -func (c *TargetPoolsSetBackupCall) FailoverRatio(failoverRatio float64) *TargetPoolsSetBackupCall { - c.urlParams_.Set("failoverRatio", fmt.Sprint(failoverRatio)) + c.targetSslProxy = targetSslProxy + c.targetsslproxiessetsslcertificatesrequest = targetsslproxiessetsslcertificatesrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsSetBackupCall) Fields(s ...googleapi.Field) *TargetPoolsSetBackupCall { +func (c *TargetSslProxiesSetSslCertificatesCall) Fields(s ...googleapi.Field) *TargetSslProxiesSetSslCertificatesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -38950,41 +53130,52 @@ func (c *TargetPoolsSetBackupCall) Fields(s ...googleapi.Field) *TargetPoolsSetB // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsSetBackupCall) Context(ctx context.Context) *TargetPoolsSetBackupCall { +func (c *TargetSslProxiesSetSslCertificatesCall) Context(ctx context.Context) *TargetSslProxiesSetSslCertificatesCall { c.ctx_ = ctx return c } -func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) { +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetSslProxiesSetSslCertificatesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetSslProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetsslproxiessetsslcertificatesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/setBackup") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setSslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, + "project": c.project, + "targetSslProxy": c.targetSslProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.setBackup" call. +// Do executes the "compute.targetSslProxies.setSslCertificates" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39015,21 +53206,14 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Changes a backup target pool's configurations.", + // "description": "Changes SslCertificates for TargetSslProxy.", // "httpMethod": "POST", - // "id": "compute.targetPools.setBackup", + // "id": "compute.targetSslProxies.setSslCertificates", // "parameterOrder": [ // "project", - // "region", - // "targetPool" + // "targetSslProxy" // ], // "parameters": { - // "failoverRatio": { - // "description": "New failoverRatio value for the target pool.", - // "format": "float", - // "location": "query", - // "type": "number" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -39037,24 +53221,17 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "targetPool": { - // "description": "Name of the TargetPool resource to set a backup pool for.", + // "targetSslProxy": { + // "description": "Name of the TargetSslProxy resource whose SslCertificate resource is to be set.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}/setBackup", + // "path": "{project}/global/targetSslProxies/{targetSslProxy}/setSslCertificates", // "request": { - // "$ref": "TargetReference" + // "$ref": "TargetSslProxiesSetSslCertificatesRequest" // }, // "response": { // "$ref": "Operation" @@ -39075,6 +53252,7 @@ type TargetVpnGatewaysAggregatedListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // AggregatedList: Retrieves an aggregated list of target VPN gateways. @@ -39126,6 +53304,23 @@ func (c *TargetVpnGatewaysAggregatedListCall) MaxResults(maxResults int64) *Targ return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *TargetVpnGatewaysAggregatedListCall) OrderBy(orderBy string) *TargetVpnGatewaysAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -39160,8 +53355,20 @@ func (c *TargetVpnGatewaysAggregatedListCall) Context(ctx context.Context) *Targ return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetVpnGatewaysAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *TargetVpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -39237,6 +53444,11 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -39293,6 +53505,7 @@ type TargetVpnGatewaysDeleteCall struct { targetVpnGateway string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Deletes the specified target VPN gateway. @@ -39320,8 +53533,20 @@ func (c *TargetVpnGatewaysDeleteCall) Context(ctx context.Context) *TargetVpnGat return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetVpnGatewaysDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *TargetVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) @@ -39428,6 +53653,7 @@ type TargetVpnGatewaysGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Returns the specified target VPN gateway. Get a list of @@ -39466,8 +53692,20 @@ func (c *TargetVpnGatewaysGetCall) Context(ctx context.Context) *TargetVpnGatewa return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetVpnGatewaysGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *TargetVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -39577,6 +53815,7 @@ type TargetVpnGatewaysInsertCall struct { targetvpngateway *TargetVpnGateway urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Insert: Creates a target VPN gateway in the specified project and @@ -39605,8 +53844,20 @@ func (c *TargetVpnGatewaysInsertCall) Context(ctx context.Context) *TargetVpnGat return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetVpnGatewaysInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *TargetVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetvpngateway) @@ -39711,6 +53962,7 @@ type TargetVpnGatewaysListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Retrieves a list of target VPN gateways available to the @@ -39764,6 +54016,23 @@ func (c *TargetVpnGatewaysListCall) MaxResults(maxResults int64) *TargetVpnGatew return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *TargetVpnGatewaysListCall) OrderBy(orderBy string) *TargetVpnGatewaysListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -39798,8 +54067,20 @@ func (c *TargetVpnGatewaysListCall) Context(ctx context.Context) *TargetVpnGatew return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetVpnGatewaysListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *TargetVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -39877,6 +54158,11 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -39939,6 +54225,7 @@ type UrlMapsDeleteCall struct { urlMap string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Deletes the specified UrlMap resource. @@ -39966,8 +54253,20 @@ func (c *UrlMapsDeleteCall) Context(ctx context.Context) *UrlMapsDeleteCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UrlMapsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *UrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) @@ -40064,6 +54363,7 @@ type UrlMapsGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Returns the specified UrlMap resource. Get a list of available @@ -40102,8 +54402,20 @@ func (c *UrlMapsGetCall) Context(ctx context.Context) *UrlMapsGetCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UrlMapsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *UrlMapsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -40203,6 +54515,7 @@ type UrlMapsInsertCall struct { urlmap *UrlMap urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Insert: Creates a UrlMap resource in the specified project using the @@ -40231,8 +54544,20 @@ func (c *UrlMapsInsertCall) Context(ctx context.Context) *UrlMapsInsertCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UrlMapsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *UrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) @@ -40328,6 +54653,7 @@ type UrlMapsInvalidateCacheCall struct { cacheinvalidationrule *CacheInvalidationRule urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // InvalidateCache: Initiates a cache invalidation operation, @@ -40356,8 +54682,20 @@ func (c *UrlMapsInvalidateCacheCall) Context(ctx context.Context) *UrlMapsInvali return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UrlMapsInvalidateCacheCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *UrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.cacheinvalidationrule) @@ -40461,6 +54799,7 @@ type UrlMapsListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Retrieves the list of UrlMap resources available to the @@ -40514,6 +54853,23 @@ func (c *UrlMapsListCall) MaxResults(maxResults int64) *UrlMapsListCall { return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *UrlMapsListCall) OrderBy(orderBy string) *UrlMapsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -40548,8 +54904,20 @@ func (c *UrlMapsListCall) Context(ctx context.Context) *UrlMapsListCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UrlMapsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *UrlMapsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -40625,6 +54993,11 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -40681,10 +55054,11 @@ type UrlMapsPatchCall struct { urlmap *UrlMap urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } -// Patch: Updates the entire content of the UrlMap resource. This method -// supports patch semantics. +// Patch: Updates the specified UrlMap resource with the data included +// in the request. This method supports patch semantics. // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/patch func (r *UrlMapsService) Patch(project string, urlMap string, urlmap *UrlMap) *UrlMapsPatchCall { c := &UrlMapsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -40710,8 +55084,20 @@ func (c *UrlMapsPatchCall) Context(ctx context.Context) *UrlMapsPatchCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UrlMapsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *UrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) @@ -40769,7 +55155,7 @@ func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Updates the entire content of the UrlMap resource. This method supports patch semantics.", + // "description": "Updates the specified UrlMap resource with the data included in the request. This method supports patch semantics.", // "httpMethod": "PATCH", // "id": "compute.urlMaps.patch", // "parameterOrder": [ @@ -40816,9 +55202,11 @@ type UrlMapsUpdateCall struct { urlmap *UrlMap urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } -// Update: Updates the entire content of the UrlMap resource. +// Update: Updates the specified UrlMap resource with the data included +// in the request. // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/update func (r *UrlMapsService) Update(project string, urlMap string, urlmap *UrlMap) *UrlMapsUpdateCall { c := &UrlMapsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -40844,8 +55232,20 @@ func (c *UrlMapsUpdateCall) Context(ctx context.Context) *UrlMapsUpdateCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UrlMapsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *UrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) @@ -40903,7 +55303,7 @@ func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Updates the entire content of the UrlMap resource.", + // "description": "Updates the specified UrlMap resource with the data included in the request.", // "httpMethod": "PUT", // "id": "compute.urlMaps.update", // "parameterOrder": [ @@ -40950,6 +55350,7 @@ type UrlMapsValidateCall struct { urlmapsvalidaterequest *UrlMapsValidateRequest urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Validate: Runs static validation for the UrlMap. In particular, the @@ -40980,8 +55381,20 @@ func (c *UrlMapsValidateCall) Context(ctx context.Context) *UrlMapsValidateCall return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UrlMapsValidateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *UrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapsvalidaterequest) @@ -41085,6 +55498,7 @@ type VpnTunnelsAggregatedListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // AggregatedList: Retrieves an aggregated list of VPN tunnels. @@ -41136,6 +55550,23 @@ func (c *VpnTunnelsAggregatedListCall) MaxResults(maxResults int64) *VpnTunnelsA return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *VpnTunnelsAggregatedListCall) OrderBy(orderBy string) *VpnTunnelsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -41170,8 +55601,20 @@ func (c *VpnTunnelsAggregatedListCall) Context(ctx context.Context) *VpnTunnelsA return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnTunnelsAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *VpnTunnelsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -41247,6 +55690,11 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -41303,6 +55751,7 @@ type VpnTunnelsDeleteCall struct { vpnTunnel string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Deletes the specified VpnTunnel resource. @@ -41330,8 +55779,20 @@ func (c *VpnTunnelsDeleteCall) Context(ctx context.Context) *VpnTunnelsDeleteCal return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnTunnelsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *VpnTunnelsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) @@ -41438,6 +55899,7 @@ type VpnTunnelsGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Returns the specified VpnTunnel resource. Get a list of @@ -41476,8 +55938,20 @@ func (c *VpnTunnelsGetCall) Context(ctx context.Context) *VpnTunnelsGetCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnTunnelsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *VpnTunnelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -41587,6 +56061,7 @@ type VpnTunnelsInsertCall struct { vpntunnel *VpnTunnel urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Insert: Creates a VpnTunnel resource in the specified project and @@ -41615,8 +56090,20 @@ func (c *VpnTunnelsInsertCall) Context(ctx context.Context) *VpnTunnelsInsertCal return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnTunnelsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *VpnTunnelsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.vpntunnel) @@ -41721,6 +56208,7 @@ type VpnTunnelsListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Retrieves a list of VpnTunnel resources contained in the @@ -41774,6 +56262,23 @@ func (c *VpnTunnelsListCall) MaxResults(maxResults int64) *VpnTunnelsListCall { return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *VpnTunnelsListCall) OrderBy(orderBy string) *VpnTunnelsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -41808,8 +56313,20 @@ func (c *VpnTunnelsListCall) Context(ctx context.Context) *VpnTunnelsListCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnTunnelsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *VpnTunnelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -41887,6 +56404,11 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -41950,6 +56472,7 @@ type ZoneOperationsDeleteCall struct { operation string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Deletes the specified zone-specific Operations resource. @@ -41978,8 +56501,20 @@ func (c *ZoneOperationsDeleteCall) Context(ctx context.Context) *ZoneOperationsD return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ZoneOperationsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ZoneOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) @@ -42058,6 +56593,7 @@ type ZoneOperationsGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Retrieves the specified zone-specific Operations resource. @@ -42096,8 +56632,20 @@ func (c *ZoneOperationsGetCall) Context(ctx context.Context) *ZoneOperationsGetC return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ZoneOperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -42207,6 +56755,7 @@ type ZoneOperationsListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Retrieves a list of Operation resources contained within the @@ -42261,6 +56810,23 @@ func (c *ZoneOperationsListCall) MaxResults(maxResults int64) *ZoneOperationsLis return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *ZoneOperationsListCall) OrderBy(orderBy string) *ZoneOperationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -42295,8 +56861,20 @@ func (c *ZoneOperationsListCall) Context(ctx context.Context) *ZoneOperationsLis return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ZoneOperationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -42374,6 +56952,11 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -42437,6 +57020,7 @@ type ZonesGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Returns the specified Zone resource. Get a list of available @@ -42475,8 +57059,20 @@ func (c *ZonesGetCall) Context(ctx context.Context) *ZonesGetCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ZonesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ZonesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -42576,6 +57172,7 @@ type ZonesListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Retrieves the list of Zone resources available to the specified @@ -42629,6 +57226,23 @@ func (c *ZonesListCall) MaxResults(maxResults int64) *ZonesListCall { return c } +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *ZonesListCall) OrderBy(orderBy string) *ZonesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. @@ -42663,8 +57277,20 @@ func (c *ZonesListCall) Context(ctx context.Context) *ZonesListCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ZonesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ZonesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -42740,6 +57366,11 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", diff --git a/vendor/google.golang.org/api/container/v1/container-gen.go b/vendor/google.golang.org/api/container/v1/container-gen.go index db07cdf817..edb28a9570 100644 --- a/vendor/google.golang.org/api/container/v1/container-gen.go +++ b/vendor/google.golang.org/api/container/v1/container-gen.go @@ -154,12 +154,21 @@ type AddonsConfig struct { // field is empty or not. This may be used to include empty fields in // Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HorizontalPodAutoscaling") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *AddonsConfig) MarshalJSON() ([]byte, error) { type noMethod AddonsConfig raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Cluster: A Google Container Engine cluster. @@ -322,12 +331,20 @@ type Cluster struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AddonsConfig") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *Cluster) MarshalJSON() ([]byte, error) { type noMethod Cluster raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ClusterUpdate: ClusterUpdate describes an update to the cluster. @@ -366,12 +383,21 @@ type ClusterUpdate struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DesiredAddonsConfig") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *ClusterUpdate) MarshalJSON() ([]byte, error) { type noMethod ClusterUpdate raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // CreateClusterRequest: CreateClusterRequest creates a cluster. @@ -387,12 +413,20 @@ type CreateClusterRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Cluster") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *CreateClusterRequest) MarshalJSON() ([]byte, error) { type noMethod CreateClusterRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // CreateNodePoolRequest: CreateNodePoolRequest creates a node pool for @@ -408,12 +442,20 @@ type CreateNodePoolRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NodePool") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *CreateNodePoolRequest) MarshalJSON() ([]byte, error) { type noMethod CreateNodePoolRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // HorizontalPodAutoscaling: Configuration options for the horizontal @@ -434,12 +476,20 @@ type HorizontalPodAutoscaling struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Disabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *HorizontalPodAutoscaling) MarshalJSON() ([]byte, error) { type noMethod HorizontalPodAutoscaling raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // HttpLoadBalancing: Configuration options for the HTTP (L7) load @@ -458,12 +508,20 @@ type HttpLoadBalancing struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Disabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *HttpLoadBalancing) MarshalJSON() ([]byte, error) { type noMethod HttpLoadBalancing raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ListClustersResponse: ListClustersResponse is the result of @@ -488,12 +546,20 @@ type ListClustersResponse struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Clusters") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ListClustersResponse) MarshalJSON() ([]byte, error) { type noMethod ListClustersResponse raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ListNodePoolsResponse: ListNodePoolsResponse is the result of @@ -513,12 +579,20 @@ type ListNodePoolsResponse struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NodePools") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ListNodePoolsResponse) MarshalJSON() ([]byte, error) { type noMethod ListNodePoolsResponse raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ListOperationsResponse: ListOperationsResponse is the result of @@ -543,12 +617,20 @@ type ListOperationsResponse struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MissingZones") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { type noMethod ListOperationsResponse raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // MasterAuth: The authentication information for accessing the master @@ -583,12 +665,21 @@ type MasterAuth struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClientCertificate") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *MasterAuth) MarshalJSON() ([]byte, error) { type noMethod MasterAuth raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // NodeConfig: Parameters that describe the nodes in a cluster. @@ -635,12 +726,20 @@ type NodeConfig struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DiskSizeGb") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *NodeConfig) MarshalJSON() ([]byte, error) { type noMethod NodeConfig raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // NodePool: NodePool contains the name and configuration for a @@ -700,12 +799,20 @@ type NodePool struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Config") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *NodePool) MarshalJSON() ([]byte, error) { type noMethod NodePool raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Operation: This operation resource represents operations that may @@ -767,12 +874,20 @@ type Operation struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Detail") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *Operation) MarshalJSON() ([]byte, error) { type noMethod Operation raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ServerConfig: Container Engine service configuration. @@ -802,12 +917,21 @@ type ServerConfig struct { // field is empty or not. This may be used to include empty fields in // Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DefaultClusterVersion") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *ServerConfig) MarshalJSON() ([]byte, error) { type noMethod ServerConfig raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // UpdateClusterRequest: UpdateClusterRequest updates the settings of a @@ -823,12 +947,20 @@ type UpdateClusterRequest struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Update") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *UpdateClusterRequest) MarshalJSON() ([]byte, error) { type noMethod UpdateClusterRequest raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // method id "container.projects.zones.getServerconfig": @@ -840,6 +972,7 @@ type ProjectsZonesGetServerconfigCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // GetServerconfig: Returns configuration info about the Container @@ -877,8 +1010,20 @@ func (c *ProjectsZonesGetServerconfigCall) Context(ctx context.Context) *Project return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsZonesGetServerconfigCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ProjectsZonesGetServerconfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -975,6 +1120,7 @@ type ProjectsZonesClustersCreateCall struct { createclusterrequest *CreateClusterRequest urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Create: Creates a cluster, consisting of the specified number and @@ -1010,8 +1156,20 @@ func (c *ProjectsZonesClustersCreateCall) Context(ctx context.Context) *Projects return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsZonesClustersCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ProjectsZonesClustersCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.createclusterrequest) @@ -1113,6 +1271,7 @@ type ProjectsZonesClustersDeleteCall struct { clusterId string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Deletes the cluster, including the Kubernetes endpoint and @@ -1145,8 +1304,20 @@ func (c *ProjectsZonesClustersDeleteCall) Context(ctx context.Context) *Projects return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsZonesClustersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ProjectsZonesClustersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) @@ -1249,6 +1420,7 @@ type ProjectsZonesClustersGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Gets the details of a specific cluster. @@ -1286,8 +1458,20 @@ func (c *ProjectsZonesClustersGetCall) Context(ctx context.Context) *ProjectsZon return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsZonesClustersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ProjectsZonesClustersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -1392,6 +1576,7 @@ type ProjectsZonesClustersListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Lists all clusters owned by a project in either the specified @@ -1429,8 +1614,20 @@ func (c *ProjectsZonesClustersListCall) Context(ctx context.Context) *ProjectsZo return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsZonesClustersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ProjectsZonesClustersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -1528,6 +1725,7 @@ type ProjectsZonesClustersUpdateCall struct { updateclusterrequest *UpdateClusterRequest urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Update: Updates the settings of a specific cluster. @@ -1556,8 +1754,20 @@ func (c *ProjectsZonesClustersUpdateCall) Context(ctx context.Context) *Projects return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsZonesClustersUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ProjectsZonesClustersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.updateclusterrequest) @@ -1668,6 +1878,7 @@ type ProjectsZonesClustersNodePoolsCreateCall struct { createnodepoolrequest *CreateNodePoolRequest urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Create: Creates a node pool for a cluster. @@ -1696,8 +1907,20 @@ func (c *ProjectsZonesClustersNodePoolsCreateCall) Context(ctx context.Context) return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsZonesClustersNodePoolsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ProjectsZonesClustersNodePoolsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.createnodepoolrequest) @@ -1808,6 +2031,7 @@ type ProjectsZonesClustersNodePoolsDeleteCall struct { nodePoolId string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Deletes a node pool from a cluster. @@ -1836,8 +2060,20 @@ func (c *ProjectsZonesClustersNodePoolsDeleteCall) Context(ctx context.Context) return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsZonesClustersNodePoolsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ProjectsZonesClustersNodePoolsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) @@ -1949,6 +2185,7 @@ type ProjectsZonesClustersNodePoolsGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Retrieves the node pool requested. @@ -1987,8 +2224,20 @@ func (c *ProjectsZonesClustersNodePoolsGetCall) Context(ctx context.Context) *Pr return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsZonesClustersNodePoolsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ProjectsZonesClustersNodePoolsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -2102,6 +2351,7 @@ type ProjectsZonesClustersNodePoolsListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Lists the node pools for a cluster. @@ -2139,8 +2389,20 @@ func (c *ProjectsZonesClustersNodePoolsListCall) Context(ctx context.Context) *P return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsZonesClustersNodePoolsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ProjectsZonesClustersNodePoolsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -2246,6 +2508,7 @@ type ProjectsZonesOperationsGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Gets the specified operation. @@ -2283,8 +2546,20 @@ func (c *ProjectsZonesOperationsGetCall) Context(ctx context.Context) *ProjectsZ return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsZonesOperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ProjectsZonesOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -2389,6 +2664,7 @@ type ProjectsZonesOperationsListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Lists all operations in a project in a specific zone or all @@ -2426,8 +2702,20 @@ func (c *ProjectsZonesOperationsListCall) Context(ctx context.Context) *Projects return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsZonesOperationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ProjectsZonesOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) diff --git a/vendor/google.golang.org/api/gensupport/json.go b/vendor/google.golang.org/api/gensupport/json.go index dd7bcd2eb0..53331b79b0 100644 --- a/vendor/google.golang.org/api/gensupport/json.go +++ b/vendor/google.golang.org/api/gensupport/json.go @@ -12,13 +12,13 @@ import ( ) // MarshalJSON returns a JSON encoding of schema containing only selected fields. -// A field is selected if: -// * it has a non-empty value, or -// * its field name is present in forceSendFields, and -// * it is not a nil pointer or nil interface. +// A field is selected if any of the following is true: +// * it has a non-empty value +// * its field name is present in forceSendFields and it is not a nil pointer or nil interface +// * its field name is present in nullFields. // The JSON key for each selected field is taken from the field's json: struct tag. -func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) { - if len(forceSendFields) == 0 { +func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) { + if len(forceSendFields) == 0 && len(nullFields) == 0 { return json.Marshal(schema) } @@ -26,15 +26,19 @@ func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) { for _, f := range forceSendFields { mustInclude[f] = struct{}{} } + useNull := make(map[string]struct{}) + for _, f := range nullFields { + useNull[f] = struct{}{} + } - dataMap, err := schemaToMap(schema, mustInclude) + dataMap, err := schemaToMap(schema, mustInclude, useNull) if err != nil { return nil, err } return json.Marshal(dataMap) } -func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[string]interface{}, error) { +func schemaToMap(schema interface{}, mustInclude, useNull map[string]struct{}) (map[string]interface{}, error) { m := make(map[string]interface{}) s := reflect.ValueOf(schema) st := s.Type() @@ -54,6 +58,14 @@ func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[strin v := s.Field(i) f := st.Field(i) + + if _, ok := useNull[f.Name]; ok { + if !isEmptyValue(v) { + return nil, fmt.Errorf("field %q in NullFields has non-empty value", f.Name) + } + m[tag.apiName] = nil + continue + } if !includeField(v, f, mustInclude) { continue } diff --git a/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/google.golang.org/api/gensupport/resumable.go index 695e365a1d..dcd591f7ff 100644 --- a/vendor/google.golang.org/api/gensupport/resumable.go +++ b/vendor/google.golang.org/api/gensupport/resumable.go @@ -5,6 +5,7 @@ package gensupport import ( + "errors" "fmt" "io" "net/http" @@ -15,10 +16,6 @@ import ( ) const ( - // statusResumeIncomplete is the code returned by the Google uploader - // when the transfer is not yet complete. - statusResumeIncomplete = 308 - // statusTooManyRequests is returned by the storage API if the // per-project limits have been temporarily exceeded. The request // should be retried. @@ -79,8 +76,23 @@ func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, req.Header.Set("Content-Range", contentRange) req.Header.Set("Content-Type", rx.MediaType) req.Header.Set("User-Agent", rx.UserAgent) + + // Google's upload endpoint uses status code 308 for a + // different purpose than the "308 Permanent Redirect" + // since-standardized in RFC 7238. Because of the conflict in + // semantics, Google added this new request header which + // causes it to not use "308" and instead reply with 200 OK + // and sets the upload-specific "X-HTTP-Status-Code-Override: + // 308" response header. + req.Header.Set("X-GUploader-No-308", "yes") + return SendRequest(ctx, rx.Client, req) +} +func statusResumeIncomplete(resp *http.Response) bool { + // This is how the server signals "status resume incomplete" + // when X-GUploader-No-308 is set to "yes": + return resp != nil && resp.Header.Get("X-Http-Status-Code-Override") == "308" } // reportProgress calls a user-supplied callback to report upload progress. @@ -111,11 +123,17 @@ func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, e return res, err } - if res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK { + // We sent "X-GUploader-No-308: yes" (see comment elsewhere in + // this file), so we don't expect to get a 308. + if res.StatusCode == 308 { + return nil, errors.New("unexpected 308 response status code") + } + + if res.StatusCode == http.StatusOK { rx.reportProgress(off, off+int64(size)) } - if res.StatusCode == statusResumeIncomplete { + if statusResumeIncomplete(res) { rx.Media.Next() } return res, nil @@ -177,7 +195,7 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err // If the chunk was uploaded successfully, but there's still // more to go, upload the next chunk without any delay. - if status == statusResumeIncomplete { + if statusResumeIncomplete(resp) { pause = 0 backoff.Reset() resp.Body.Close() diff --git a/vendor/google.golang.org/api/gensupport/retry.go b/vendor/google.golang.org/api/gensupport/retry.go index 7f83d1da99..9023368acb 100644 --- a/vendor/google.golang.org/api/gensupport/retry.go +++ b/vendor/google.golang.org/api/gensupport/retry.go @@ -55,23 +55,17 @@ func DefaultBackoffStrategy() BackoffStrategy { // shouldRetry returns true if the HTTP response / error indicates that the // request should be attempted again. func shouldRetry(status int, err error) bool { - // Retry for 5xx response codes. - if 500 <= status && status < 600 { + if 500 <= status && status <= 599 { return true } - - // Retry on statusTooManyRequests{ if status == statusTooManyRequests { return true } - - // Retry on unexpected EOFs and temporary network errors. if err == io.ErrUnexpectedEOF { return true } if err, ok := err.(net.Error); ok { return err.Temporary() } - return false } diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index ad059b5d56..bdb11c4ece 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -50,7 +50,7 @@ const ( // UserAgent is the header string used to identify this package. UserAgent = "google-api-go-client/" + Version - // The default chunk size to use for resumable uplods if not specified by the user. + // The default chunk size to use for resumable uploads if not specified by the user. DefaultUploadChunkSize = 8 * 1024 * 1024 // The minimum chunk size that can be used for resumable uploads. All diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go index a02b4b0716..c8fdd54161 100644 --- a/vendor/google.golang.org/api/googleapi/types.go +++ b/vendor/google.golang.org/api/googleapi/types.go @@ -6,6 +6,7 @@ package googleapi import ( "encoding/json" + "errors" "strconv" ) @@ -149,6 +150,25 @@ func (s Float64s) MarshalJSON() ([]byte, error) { }) } +// RawMessage is a raw encoded JSON value. +// It is identical to json.RawMessage, except it does not suffer from +// https://golang.org/issue/14493. +type RawMessage []byte + +// MarshalJSON returns m. +func (m RawMessage) MarshalJSON() ([]byte, error) { + return m, nil +} + +// UnmarshalJSON sets *m to a copy of data. +func (m *RawMessage) UnmarshalJSON(data []byte) error { + if m == nil { + return errors.New("googleapi.RawMessage: UnmarshalJSON on nil pointer") + } + *m = append((*m)[:0], data...) + return nil +} + /* * Helper routines for simplifying the creation of optional fields of basic type. */ diff --git a/vendor/k8s.io/apimachinery/LICENSE b/vendor/k8s.io/apimachinery/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/k8s.io/apimachinery/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS new file mode 100755 index 0000000000..ae828ad253 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -0,0 +1,27 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- nikhiljindal +- gmarek +- erictune +- saad-ali +- janetkuo +- timstclair +- eparis +- timothysc +- dims +- spxtr +- hongchaodeng +- krousey +- satnam6502 +- cjcullen +- david-mcmahon +- goltermann diff --git a/vendor/k8s.io/kubernetes/pkg/api/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/api/errors/doc.go rename to vendor/k8s.io/apimachinery/pkg/api/errors/doc.go diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go new file mode 100644 index 0000000000..560c889b9c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -0,0 +1,478 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// HTTP Status codes not in the golang http package. +const ( + StatusUnprocessableEntity = 422 + StatusTooManyRequests = 429 + // StatusServerTimeout is an indication that a transient server error has + // occurred and the client *should* retry, with an optional Retry-After + // header to specify the back off window. + StatusServerTimeout = 504 +) + +// StatusError is an error intended for consumption by a REST API server; it can also be +// reconstructed by clients from a REST response. Public to allow easy type switches. +type StatusError struct { + ErrStatus metav1.Status +} + +// APIStatus is exposed by errors that can be converted to an api.Status object +// for finer grained details. +type APIStatus interface { + Status() metav1.Status +} + +var _ error = &StatusError{} + +// Error implements the Error interface. +func (e *StatusError) Error() string { + return e.ErrStatus.Message +} + +// Status allows access to e's status without having to know the detailed workings +// of StatusError. +func (e *StatusError) Status() metav1.Status { + return e.ErrStatus +} + +// DebugError reports extended info about the error to debug output. +func (e *StatusError) DebugError() (string, []interface{}) { + if out, err := json.MarshalIndent(e.ErrStatus, "", " "); err == nil { + return "server response object: %s", []interface{}{string(out)} + } + return "server response object: %#v", []interface{}{e.ErrStatus} +} + +// UnexpectedObjectError can be returned by FromObject if it's passed a non-status object. +type UnexpectedObjectError struct { + Object runtime.Object +} + +// Error returns an error message describing 'u'. +func (u *UnexpectedObjectError) Error() string { + return fmt.Sprintf("unexpected object: %v", u.Object) +} + +// FromObject generates an StatusError from an metav1.Status, if that is the type of obj; otherwise, +// returns an UnexpecteObjectError. +func FromObject(obj runtime.Object) error { + switch t := obj.(type) { + case *metav1.Status: + return &StatusError{*t} + } + return &UnexpectedObjectError{obj} +} + +// NewNotFound returns a new error which indicates that the resource of the kind and the name was not found. +func NewNotFound(qualifiedResource schema.GroupResource, name string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusNotFound, + Reason: metav1.StatusReasonNotFound, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + }, + Message: fmt.Sprintf("%s %q not found", qualifiedResource.String(), name), + }} +} + +// NewAlreadyExists returns an error indicating the item requested exists by that identifier. +func NewAlreadyExists(qualifiedResource schema.GroupResource, name string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusConflict, + Reason: metav1.StatusReasonAlreadyExists, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + }, + Message: fmt.Sprintf("%s %q already exists", qualifiedResource.String(), name), + }} +} + +// NewUnauthorized returns an error indicating the client is not authorized to perform the requested +// action. +func NewUnauthorized(reason string) *StatusError { + message := reason + if len(message) == 0 { + message = "not authorized" + } + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusUnauthorized, + Reason: metav1.StatusReasonUnauthorized, + Message: message, + }} +} + +// NewForbidden returns an error indicating the requested action was forbidden +func NewForbidden(qualifiedResource schema.GroupResource, name string, err error) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusForbidden, + Reason: metav1.StatusReasonForbidden, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + }, + Message: fmt.Sprintf("%s %q is forbidden: %v", qualifiedResource.String(), name, err), + }} +} + +// NewConflict returns an error indicating the item can't be updated as provided. +func NewConflict(qualifiedResource schema.GroupResource, name string, err error) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusConflict, + Reason: metav1.StatusReasonConflict, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + }, + Message: fmt.Sprintf("Operation cannot be fulfilled on %s %q: %v", qualifiedResource.String(), name, err), + }} +} + +// NewGone returns an error indicating the item no longer available at the server and no forwarding address is known. +func NewGone(message string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusGone, + Reason: metav1.StatusReasonGone, + Message: message, + }} +} + +// NewInvalid returns an error indicating the item is invalid and cannot be processed. +func NewInvalid(qualifiedKind schema.GroupKind, name string, errs field.ErrorList) *StatusError { + causes := make([]metav1.StatusCause, 0, len(errs)) + for i := range errs { + err := errs[i] + causes = append(causes, metav1.StatusCause{ + Type: metav1.CauseType(err.Type), + Message: err.ErrorBody(), + Field: err.Field, + }) + } + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: StatusUnprocessableEntity, // RFC 4918: StatusUnprocessableEntity + Reason: metav1.StatusReasonInvalid, + Details: &metav1.StatusDetails{ + Group: qualifiedKind.Group, + Kind: qualifiedKind.Kind, + Name: name, + Causes: causes, + }, + Message: fmt.Sprintf("%s %q is invalid: %v", qualifiedKind.String(), name, errs.ToAggregate()), + }} +} + +// NewBadRequest creates an error that indicates that the request is invalid and can not be processed. +func NewBadRequest(reason string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusBadRequest, + Reason: metav1.StatusReasonBadRequest, + Message: reason, + }} +} + +// NewServiceUnavailable creates an error that indicates that the requested service is unavailable. +func NewServiceUnavailable(reason string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusServiceUnavailable, + Reason: metav1.StatusReasonServiceUnavailable, + Message: reason, + }} +} + +// NewMethodNotSupported returns an error indicating the requested action is not supported on this kind. +func NewMethodNotSupported(qualifiedResource schema.GroupResource, action string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusMethodNotAllowed, + Reason: metav1.StatusReasonMethodNotAllowed, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + }, + Message: fmt.Sprintf("%s is not supported on resources of kind %q", action, qualifiedResource.String()), + }} +} + +// NewServerTimeout returns an error indicating the requested action could not be completed due to a +// transient error, and the client should try again. +func NewServerTimeout(qualifiedResource schema.GroupResource, operation string, retryAfterSeconds int) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusInternalServerError, + Reason: metav1.StatusReasonServerTimeout, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: operation, + RetryAfterSeconds: int32(retryAfterSeconds), + }, + Message: fmt.Sprintf("The %s operation against %s could not be completed at this time, please try again.", operation, qualifiedResource.String()), + }} +} + +// NewServerTimeoutForKind should not exist. Server timeouts happen when accessing resources, the Kind is just what we +// happened to be looking at when the request failed. This delegates to keep code sane, but we should work towards removing this. +func NewServerTimeoutForKind(qualifiedKind schema.GroupKind, operation string, retryAfterSeconds int) *StatusError { + return NewServerTimeout(schema.GroupResource{Group: qualifiedKind.Group, Resource: qualifiedKind.Kind}, operation, retryAfterSeconds) +} + +// NewInternalError returns an error indicating the item is invalid and cannot be processed. +func NewInternalError(err error) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusInternalServerError, + Reason: metav1.StatusReasonInternalError, + Details: &metav1.StatusDetails{ + Causes: []metav1.StatusCause{{Message: err.Error()}}, + }, + Message: fmt.Sprintf("Internal error occurred: %v", err), + }} +} + +// NewTimeoutError returns an error indicating that a timeout occurred before the request +// could be completed. Clients may retry, but the operation may still complete. +func NewTimeoutError(message string, retryAfterSeconds int) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: StatusServerTimeout, + Reason: metav1.StatusReasonTimeout, + Message: fmt.Sprintf("Timeout: %s", message), + Details: &metav1.StatusDetails{ + RetryAfterSeconds: int32(retryAfterSeconds), + }, + }} +} + +// NewGenericServerResponse returns a new error for server responses that are not in a recognizable form. +func NewGenericServerResponse(code int, verb string, qualifiedResource schema.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError { + reason := metav1.StatusReasonUnknown + message := fmt.Sprintf("the server responded with the status code %d but did not return more information", code) + switch code { + case http.StatusConflict: + if verb == "POST" { + reason = metav1.StatusReasonAlreadyExists + } else { + reason = metav1.StatusReasonConflict + } + message = "the server reported a conflict" + case http.StatusNotFound: + reason = metav1.StatusReasonNotFound + message = "the server could not find the requested resource" + case http.StatusBadRequest: + reason = metav1.StatusReasonBadRequest + message = "the server rejected our request for an unknown reason" + case http.StatusUnauthorized: + reason = metav1.StatusReasonUnauthorized + message = "the server has asked for the client to provide credentials" + case http.StatusForbidden: + reason = metav1.StatusReasonForbidden + // the server message has details about who is trying to perform what action. Keep its message. + message = serverMessage + case http.StatusMethodNotAllowed: + reason = metav1.StatusReasonMethodNotAllowed + message = "the server does not allow this method on the requested resource" + case StatusUnprocessableEntity: + reason = metav1.StatusReasonInvalid + message = "the server rejected our request due to an error in our request" + case StatusServerTimeout: + reason = metav1.StatusReasonServerTimeout + message = "the server cannot complete the requested operation at this time, try again later" + case StatusTooManyRequests: + reason = metav1.StatusReasonTimeout + message = "the server has received too many requests and has asked us to try again later" + default: + if code >= 500 { + reason = metav1.StatusReasonInternalError + message = fmt.Sprintf("an error on the server (%q) has prevented the request from succeeding", serverMessage) + } + } + switch { + case !qualifiedResource.Empty() && len(name) > 0: + message = fmt.Sprintf("%s (%s %s %s)", message, strings.ToLower(verb), qualifiedResource.String(), name) + case !qualifiedResource.Empty(): + message = fmt.Sprintf("%s (%s %s)", message, strings.ToLower(verb), qualifiedResource.String()) + } + var causes []metav1.StatusCause + if isUnexpectedResponse { + causes = []metav1.StatusCause{ + { + Type: metav1.CauseTypeUnexpectedServerResponse, + Message: serverMessage, + }, + } + } else { + causes = nil + } + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: int32(code), + Reason: reason, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + + Causes: causes, + RetryAfterSeconds: int32(retryAfterSeconds), + }, + Message: message, + }} +} + +// IsNotFound returns true if the specified error was created by NewNotFound. +func IsNotFound(err error) bool { + return reasonForError(err) == metav1.StatusReasonNotFound +} + +// IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists. +func IsAlreadyExists(err error) bool { + return reasonForError(err) == metav1.StatusReasonAlreadyExists +} + +// IsConflict determines if the err is an error which indicates the provided update conflicts. +func IsConflict(err error) bool { + return reasonForError(err) == metav1.StatusReasonConflict +} + +// IsInvalid determines if the err is an error which indicates the provided resource is not valid. +func IsInvalid(err error) bool { + return reasonForError(err) == metav1.StatusReasonInvalid +} + +// IsMethodNotSupported determines if the err is an error which indicates the provided action could not +// be performed because it is not supported by the server. +func IsMethodNotSupported(err error) bool { + return reasonForError(err) == metav1.StatusReasonMethodNotAllowed +} + +// IsBadRequest determines if err is an error which indicates that the request is invalid. +func IsBadRequest(err error) bool { + return reasonForError(err) == metav1.StatusReasonBadRequest +} + +// IsUnauthorized determines if err is an error which indicates that the request is unauthorized and +// requires authentication by the user. +func IsUnauthorized(err error) bool { + return reasonForError(err) == metav1.StatusReasonUnauthorized +} + +// IsForbidden determines if err is an error which indicates that the request is forbidden and cannot +// be completed as requested. +func IsForbidden(err error) bool { + return reasonForError(err) == metav1.StatusReasonForbidden +} + +// IsTimeout determines if err is an error which indicates that request times out due to long +// processing. +func IsTimeout(err error) bool { + return reasonForError(err) == metav1.StatusReasonTimeout +} + +// IsServerTimeout determines if err is an error which indicates that the request needs to be retried +// by the client. +func IsServerTimeout(err error) bool { + return reasonForError(err) == metav1.StatusReasonServerTimeout +} + +// IsInternalError determines if err is an error which indicates an internal server error. +func IsInternalError(err error) bool { + return reasonForError(err) == metav1.StatusReasonInternalError +} + +// IsTooManyRequests determines if err is an error which indicates that there are too many requests +// that the server cannot handle. +// TODO: update IsTooManyRequests() when the TooManyRequests(429) error returned from the API server has a non-empty Reason field +func IsTooManyRequests(err error) bool { + switch t := err.(type) { + case APIStatus: + return t.Status().Code == StatusTooManyRequests + } + return false +} + +// IsUnexpectedServerError returns true if the server response was not in the expected API format, +// and may be the result of another HTTP actor. +func IsUnexpectedServerError(err error) bool { + switch t := err.(type) { + case APIStatus: + if d := t.Status().Details; d != nil { + for _, cause := range d.Causes { + if cause.Type == metav1.CauseTypeUnexpectedServerResponse { + return true + } + } + } + } + return false +} + +// IsUnexpectedObjectError determines if err is due to an unexpected object from the master. +func IsUnexpectedObjectError(err error) bool { + _, ok := err.(*UnexpectedObjectError) + return err != nil && ok +} + +// SuggestsClientDelay returns true if this error suggests a client delay as well as the +// suggested seconds to wait, or false if the error does not imply a wait. +func SuggestsClientDelay(err error) (int, bool) { + switch t := err.(type) { + case APIStatus: + if t.Status().Details != nil { + switch t.Status().Reason { + case metav1.StatusReasonServerTimeout, metav1.StatusReasonTimeout: + return int(t.Status().Details.RetryAfterSeconds), true + } + } + } + return 0, false +} + +func reasonForError(err error) metav1.StatusReason { + switch t := err.(type) { + case APIStatus: + return t.Status().Reason + } + return metav1.StatusReasonUnknown +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS new file mode 100755 index 0000000000..6044c031ed --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS @@ -0,0 +1,26 @@ +reviewers: +- thockin +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- nikhiljindal +- gmarek +- kargakis +- janetkuo +- ncdc +- eparis +- dims +- krousey +- markturansky +- fabioy +- resouer +- david-mcmahon +- mfojtik +- jianhuiz +- feihujiang +- ghodss diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/default.go b/vendor/k8s.io/apimachinery/pkg/api/meta/default.go new file mode 100644 index 0000000000..5ea906a2a7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/default.go @@ -0,0 +1,51 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +// NewDefaultRESTMapperFromScheme instantiates a DefaultRESTMapper based on types registered in the given scheme. +func NewDefaultRESTMapperFromScheme(defaultGroupVersions []schema.GroupVersion, interfacesFunc VersionInterfacesFunc, + importPathPrefix string, ignoredKinds, rootScoped sets.String, scheme *runtime.Scheme) *DefaultRESTMapper { + + mapper := NewDefaultRESTMapper(defaultGroupVersions, interfacesFunc) + // enumerate all supported versions, get the kinds, and register with the mapper how to address + // our resources. + for _, gv := range defaultGroupVersions { + for kind, oType := range scheme.KnownTypes(gv) { + gvk := gv.WithKind(kind) + // TODO: Remove import path check. + // We check the import path because we currently stuff both "api" and "extensions" objects + // into the same group within Scheme since Scheme has no notion of groups yet. + if !strings.Contains(oType.PkgPath(), importPathPrefix) || ignoredKinds.Has(kind) { + continue + } + scope := RESTScopeNamespace + if rootScoped.Has(kind) { + scope = RESTScopeRoot + } + mapper.Add(gvk, scope) + } + } + return mapper +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/doc.go b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/api/meta/doc.go rename to vendor/k8s.io/apimachinery/pkg/api/meta/doc.go diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go new file mode 100644 index 0000000000..1503bd6d84 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go @@ -0,0 +1,105 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// AmbiguousResourceError is returned if the RESTMapper finds multiple matches for a resource +type AmbiguousResourceError struct { + PartialResource schema.GroupVersionResource + + MatchingResources []schema.GroupVersionResource + MatchingKinds []schema.GroupVersionKind +} + +func (e *AmbiguousResourceError) Error() string { + switch { + case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0: + return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialResource, e.MatchingResources, e.MatchingKinds) + case len(e.MatchingKinds) > 0: + return fmt.Sprintf("%v matches multiple kinds %v", e.PartialResource, e.MatchingKinds) + case len(e.MatchingResources) > 0: + return fmt.Sprintf("%v matches multiple resources %v", e.PartialResource, e.MatchingResources) + } + return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialResource) +} + +// AmbiguousKindError is returned if the RESTMapper finds multiple matches for a kind +type AmbiguousKindError struct { + PartialKind schema.GroupVersionKind + + MatchingResources []schema.GroupVersionResource + MatchingKinds []schema.GroupVersionKind +} + +func (e *AmbiguousKindError) Error() string { + switch { + case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0: + return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialKind, e.MatchingResources, e.MatchingKinds) + case len(e.MatchingKinds) > 0: + return fmt.Sprintf("%v matches multiple kinds %v", e.PartialKind, e.MatchingKinds) + case len(e.MatchingResources) > 0: + return fmt.Sprintf("%v matches multiple resources %v", e.PartialKind, e.MatchingResources) + } + return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialKind) +} + +func IsAmbiguousError(err error) bool { + if err == nil { + return false + } + switch err.(type) { + case *AmbiguousResourceError, *AmbiguousKindError: + return true + default: + return false + } +} + +// NoResourceMatchError is returned if the RESTMapper can't find any match for a resource +type NoResourceMatchError struct { + PartialResource schema.GroupVersionResource +} + +func (e *NoResourceMatchError) Error() string { + return fmt.Sprintf("no matches for %v", e.PartialResource) +} + +// NoKindMatchError is returned if the RESTMapper can't find any match for a kind +type NoKindMatchError struct { + PartialKind schema.GroupVersionKind +} + +func (e *NoKindMatchError) Error() string { + return fmt.Sprintf("no matches for %v", e.PartialKind) +} + +func IsNoMatchError(err error) bool { + if err == nil { + return false + } + switch err.(type) { + case *NoResourceMatchError, *NoKindMatchError: + return true + default: + return false + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/firsthit_restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go similarity index 78% rename from vendor/k8s.io/kubernetes/pkg/api/meta/firsthit_restmapper.go rename to vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go index 8a21b54aab..fd22100229 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/firsthit_restmapper.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go @@ -19,8 +19,8 @@ package meta import ( "fmt" - "k8s.io/kubernetes/pkg/api/unversioned" - utilerrors "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" ) // FirstHitRESTMapper is a wrapper for multiple RESTMappers which returns the @@ -33,7 +33,7 @@ func (m FirstHitRESTMapper) String() string { return fmt.Sprintf("FirstHitRESTMapper{\n\t%v\n}", m.MultiRESTMapper) } -func (m FirstHitRESTMapper) ResourceFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) { +func (m FirstHitRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { errors := []error{} for _, t := range m.MultiRESTMapper { ret, err := t.ResourceFor(resource) @@ -43,10 +43,10 @@ func (m FirstHitRESTMapper) ResourceFor(resource unversioned.GroupVersionResourc errors = append(errors, err) } - return unversioned.GroupVersionResource{}, collapseAggregateErrors(errors) + return schema.GroupVersionResource{}, collapseAggregateErrors(errors) } -func (m FirstHitRESTMapper) KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) { +func (m FirstHitRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { errors := []error{} for _, t := range m.MultiRESTMapper { ret, err := t.KindFor(resource) @@ -56,13 +56,13 @@ func (m FirstHitRESTMapper) KindFor(resource unversioned.GroupVersionResource) ( errors = append(errors, err) } - return unversioned.GroupVersionKind{}, collapseAggregateErrors(errors) + return schema.GroupVersionKind{}, collapseAggregateErrors(errors) } // RESTMapping provides the REST mapping for the resource based on the // kind and version. This implementation supports multiple REST schemas and // return the first match. -func (m FirstHitRESTMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (*RESTMapping, error) { +func (m FirstHitRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { errors := []error{} for _, t := range m.MultiRESTMapper { ret, err := t.RESTMapping(gk, versions...) diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go similarity index 93% rename from vendor/k8s.io/kubernetes/pkg/api/meta/help.go rename to vendor/k8s.io/apimachinery/pkg/api/meta/help.go index 39868e6b00..930441fa6a 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/help.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -20,18 +20,16 @@ import ( "fmt" "reflect" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" ) // IsListType returns true if the provided Object has a slice called Items func IsListType(obj runtime.Object) bool { - // if we're a runtime.Unstructured, check to see if we have an `items` key - // This is a list type for recognition, but other Items type methods will fail on it - // and give you errors. - if unstructured, ok := obj.(*runtime.Unstructured); ok { - _, ok := unstructured.Object["items"] - return ok + // if we're a runtime.Unstructured, check whether this is a list. + // TODO: refactor GetItemsPtr to use an interface that returns []runtime.Object + if unstructured, ok := obj.(runtime.Unstructured); ok { + return unstructured.IsList() } _, err := GetItemsPtr(obj) diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go new file mode 100644 index 0000000000..524b1ebd81 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go @@ -0,0 +1,147 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +// VersionInterfaces contains the interfaces one should use for dealing with types of a particular version. +type VersionInterfaces struct { + runtime.ObjectConvertor + MetadataAccessor +} + +type ListMetaAccessor interface { + GetListMeta() List +} + +// List lets you work with list metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field will be a no-op and return a default value. +type List metav1.List + +// Type exposes the type and APIVersion of versioned or internal API objects. +type Type metav1.Type + +// MetadataAccessor lets you work with object and list metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field (Name, UID, Namespace on lists) will be a no-op and return +// a default value. +// +// MetadataAccessor exposes Interface in a way that can be used with multiple objects. +type MetadataAccessor interface { + APIVersion(obj runtime.Object) (string, error) + SetAPIVersion(obj runtime.Object, version string) error + + Kind(obj runtime.Object) (string, error) + SetKind(obj runtime.Object, kind string) error + + Namespace(obj runtime.Object) (string, error) + SetNamespace(obj runtime.Object, namespace string) error + + Name(obj runtime.Object) (string, error) + SetName(obj runtime.Object, name string) error + + GenerateName(obj runtime.Object) (string, error) + SetGenerateName(obj runtime.Object, name string) error + + UID(obj runtime.Object) (types.UID, error) + SetUID(obj runtime.Object, uid types.UID) error + + SelfLink(obj runtime.Object) (string, error) + SetSelfLink(obj runtime.Object, selfLink string) error + + Labels(obj runtime.Object) (map[string]string, error) + SetLabels(obj runtime.Object, labels map[string]string) error + + Annotations(obj runtime.Object) (map[string]string, error) + SetAnnotations(obj runtime.Object, annotations map[string]string) error + + runtime.ResourceVersioner +} + +type RESTScopeName string + +const ( + RESTScopeNameNamespace RESTScopeName = "namespace" + RESTScopeNameRoot RESTScopeName = "root" +) + +// RESTScope contains the information needed to deal with REST resources that are in a resource hierarchy +type RESTScope interface { + // Name of the scope + Name() RESTScopeName + // ParamName is the optional name of the parameter that should be inserted in the resource url + // If empty, no param will be inserted + ParamName() string + // ArgumentName is the optional name that should be used for the variable holding the value. + ArgumentName() string + // ParamDescription is the optional description to use to document the parameter in api documentation + ParamDescription() string +} + +// RESTMapping contains the information needed to deal with objects of a specific +// resource and kind in a RESTful manner. +type RESTMapping struct { + // Resource is a string representing the name of this resource as a REST client would see it + Resource string + + GroupVersionKind schema.GroupVersionKind + + // Scope contains the information needed to deal with REST Resources that are in a resource hierarchy + Scope RESTScope + + runtime.ObjectConvertor + MetadataAccessor +} + +// RESTMapper allows clients to map resources to kind, and map kind and version +// to interfaces for manipulating those objects. It is primarily intended for +// consumers of Kubernetes compatible REST APIs as defined in docs/devel/api-conventions.md. +// +// The Kubernetes API provides versioned resources and object kinds which are scoped +// to API groups. In other words, kinds and resources should not be assumed to be +// unique across groups. +// +// TODO: split into sub-interfaces +type RESTMapper interface { + // KindFor takes a partial resource and returns the single match. Returns an error if there are multiple matches + KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) + + // KindsFor takes a partial resource and returns the list of potential kinds in priority order + KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) + + // ResourceFor takes a partial resource and returns the single match. Returns an error if there are multiple matches + ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) + + // ResourcesFor takes a partial resource and returns the list of potential resource in priority order + ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) + + // RESTMapping identifies a preferred resource mapping for the provided group kind. + RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) + // RESTMappings returns all resource mappings for the provided group kind if no + // version search is provided. Otherwise identifies a preferred resource mapping for + // the provided version(s). + RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) + + AliasesForResource(resource string) ([]string, bool) + ResourceSingularizer(resource string) (singular string, err error) +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go new file mode 100644 index 0000000000..7492324ed9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -0,0 +1,581 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + "reflect" + + "github.com/golang/glog" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +// errNotList is returned when an object implements the Object style interfaces but not the List style +// interfaces. +var errNotList = fmt.Errorf("object does not implement the List interfaces") + +// ListAccessor returns a List interface for the provided object or an error if the object does +// not provide List. +// IMPORTANT: Objects are a superset of lists, so all Objects return List metadata. Do not use this +// check to determine whether an object *is* a List. +// TODO: return bool instead of error +func ListAccessor(obj interface{}) (List, error) { + switch t := obj.(type) { + case List: + return t, nil + case metav1.List: + return t, nil + case ListMetaAccessor: + if m := t.GetListMeta(); m != nil { + return m, nil + } + return nil, errNotList + case metav1.ListMetaAccessor: + if m := t.GetListMeta(); m != nil { + return m, nil + } + return nil, errNotList + case metav1.Object: + return t, nil + case metav1.ObjectMetaAccessor: + if m := t.GetObjectMeta(); m != nil { + return m, nil + } + return nil, errNotList + default: + return nil, errNotList + } +} + +// errNotObject is returned when an object implements the List style interfaces but not the Object style +// interfaces. +var errNotObject = fmt.Errorf("object does not implement the Object interfaces") + +// Accessor takes an arbitrary object pointer and returns meta.Interface. +// obj must be a pointer to an API type. An error is returned if the minimum +// required fields are missing. Fields that are not required return the default +// value and are a no-op if set. +// TODO: return bool instead of error +func Accessor(obj interface{}) (metav1.Object, error) { + switch t := obj.(type) { + case metav1.Object: + return t, nil + case metav1.ObjectMetaAccessor: + if m := t.GetObjectMeta(); m != nil { + return m, nil + } + return nil, errNotObject + case List, metav1.List, ListMetaAccessor, metav1.ListMetaAccessor: + return nil, errNotObject + default: + return nil, errNotObject + } +} + +// TypeAccessor returns an interface that allows retrieving and modifying the APIVersion +// and Kind of an in-memory internal object. +// TODO: this interface is used to test code that does not have ObjectMeta or ListMeta +// in round tripping (objects which can use apiVersion/kind, but do not fit the Kube +// api conventions). +func TypeAccessor(obj interface{}) (Type, error) { + if typed, ok := obj.(runtime.Object); ok { + return objectAccessor{typed}, nil + } + v, err := conversion.EnforcePtr(obj) + if err != nil { + return nil, err + } + t := v.Type() + if v.Kind() != reflect.Struct { + return nil, fmt.Errorf("expected struct, but got %v: %v (%#v)", v.Kind(), t, v.Interface()) + } + + typeMeta := v.FieldByName("TypeMeta") + if !typeMeta.IsValid() { + return nil, fmt.Errorf("struct %v lacks embedded TypeMeta type", t) + } + a := &genericAccessor{} + if err := extractFromTypeMeta(typeMeta, a); err != nil { + return nil, fmt.Errorf("unable to find type fields on %#v: %v", typeMeta, err) + } + return a, nil +} + +type objectAccessor struct { + runtime.Object +} + +func (obj objectAccessor) GetKind() string { + return obj.GetObjectKind().GroupVersionKind().Kind +} + +func (obj objectAccessor) SetKind(kind string) { + gvk := obj.GetObjectKind().GroupVersionKind() + gvk.Kind = kind + obj.GetObjectKind().SetGroupVersionKind(gvk) +} + +func (obj objectAccessor) GetAPIVersion() string { + return obj.GetObjectKind().GroupVersionKind().GroupVersion().String() +} + +func (obj objectAccessor) SetAPIVersion(version string) { + gvk := obj.GetObjectKind().GroupVersionKind() + gv, err := schema.ParseGroupVersion(version) + if err != nil { + gv = schema.GroupVersion{Version: version} + } + gvk.Group, gvk.Version = gv.Group, gv.Version + obj.GetObjectKind().SetGroupVersionKind(gvk) +} + +// NewAccessor returns a MetadataAccessor that can retrieve +// or manipulate resource version on objects derived from core API +// metadata concepts. +func NewAccessor() MetadataAccessor { + return resourceAccessor{} +} + +// resourceAccessor implements ResourceVersioner and SelfLinker. +type resourceAccessor struct{} + +func (resourceAccessor) Kind(obj runtime.Object) (string, error) { + return objectAccessor{obj}.GetKind(), nil +} + +func (resourceAccessor) SetKind(obj runtime.Object, kind string) error { + objectAccessor{obj}.SetKind(kind) + return nil +} + +func (resourceAccessor) APIVersion(obj runtime.Object) (string, error) { + return objectAccessor{obj}.GetAPIVersion(), nil +} + +func (resourceAccessor) SetAPIVersion(obj runtime.Object, version string) error { + objectAccessor{obj}.SetAPIVersion(version) + return nil +} + +func (resourceAccessor) Namespace(obj runtime.Object) (string, error) { + accessor, err := Accessor(obj) + if err != nil { + return "", err + } + return accessor.GetNamespace(), nil +} + +func (resourceAccessor) SetNamespace(obj runtime.Object, namespace string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetNamespace(namespace) + return nil +} + +func (resourceAccessor) Name(obj runtime.Object) (string, error) { + accessor, err := Accessor(obj) + if err != nil { + return "", err + } + return accessor.GetName(), nil +} + +func (resourceAccessor) SetName(obj runtime.Object, name string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetName(name) + return nil +} + +func (resourceAccessor) GenerateName(obj runtime.Object) (string, error) { + accessor, err := Accessor(obj) + if err != nil { + return "", err + } + return accessor.GetGenerateName(), nil +} + +func (resourceAccessor) SetGenerateName(obj runtime.Object, name string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetGenerateName(name) + return nil +} + +func (resourceAccessor) UID(obj runtime.Object) (types.UID, error) { + accessor, err := Accessor(obj) + if err != nil { + return "", err + } + return accessor.GetUID(), nil +} + +func (resourceAccessor) SetUID(obj runtime.Object, uid types.UID) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetUID(uid) + return nil +} + +func (resourceAccessor) SelfLink(obj runtime.Object) (string, error) { + accessor, err := ListAccessor(obj) + if err != nil { + return "", err + } + return accessor.GetSelfLink(), nil +} + +func (resourceAccessor) SetSelfLink(obj runtime.Object, selfLink string) error { + accessor, err := ListAccessor(obj) + if err != nil { + return err + } + accessor.SetSelfLink(selfLink) + return nil +} + +func (resourceAccessor) Labels(obj runtime.Object) (map[string]string, error) { + accessor, err := Accessor(obj) + if err != nil { + return nil, err + } + return accessor.GetLabels(), nil +} + +func (resourceAccessor) SetLabels(obj runtime.Object, labels map[string]string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetLabels(labels) + return nil +} + +func (resourceAccessor) Annotations(obj runtime.Object) (map[string]string, error) { + accessor, err := Accessor(obj) + if err != nil { + return nil, err + } + return accessor.GetAnnotations(), nil +} + +func (resourceAccessor) SetAnnotations(obj runtime.Object, annotations map[string]string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetAnnotations(annotations) + return nil +} + +func (resourceAccessor) ResourceVersion(obj runtime.Object) (string, error) { + accessor, err := ListAccessor(obj) + if err != nil { + return "", err + } + return accessor.GetResourceVersion(), nil +} + +func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) error { + accessor, err := ListAccessor(obj) + if err != nil { + return err + } + accessor.SetResourceVersion(version) + return nil +} + +// extractFromOwnerReference extracts v to o. v is the OwnerReferences field of an object. +func extractFromOwnerReference(v reflect.Value, o *metav1.OwnerReference) error { + if err := runtime.Field(v, "APIVersion", &o.APIVersion); err != nil { + return err + } + if err := runtime.Field(v, "Kind", &o.Kind); err != nil { + return err + } + if err := runtime.Field(v, "Name", &o.Name); err != nil { + return err + } + if err := runtime.Field(v, "UID", &o.UID); err != nil { + return err + } + var controllerPtr *bool + if err := runtime.Field(v, "Controller", &controllerPtr); err != nil { + return err + } + if controllerPtr != nil { + controller := *controllerPtr + o.Controller = &controller + } + var blockOwnerDeletionPtr *bool + if err := runtime.Field(v, "BlockOwnerDeletion", &blockOwnerDeletionPtr); err != nil { + return err + } + if blockOwnerDeletionPtr != nil { + block := *blockOwnerDeletionPtr + o.BlockOwnerDeletion = &block + } + return nil +} + +// setOwnerReference sets v to o. v is the OwnerReferences field of an object. +func setOwnerReference(v reflect.Value, o *metav1.OwnerReference) error { + if err := runtime.SetField(o.APIVersion, v, "APIVersion"); err != nil { + return err + } + if err := runtime.SetField(o.Kind, v, "Kind"); err != nil { + return err + } + if err := runtime.SetField(o.Name, v, "Name"); err != nil { + return err + } + if err := runtime.SetField(o.UID, v, "UID"); err != nil { + return err + } + if o.Controller != nil { + controller := *(o.Controller) + if err := runtime.SetField(&controller, v, "Controller"); err != nil { + return err + } + } + if o.BlockOwnerDeletion != nil { + block := *(o.BlockOwnerDeletion) + if err := runtime.SetField(&block, v, "BlockOwnerDeletion"); err != nil { + return err + } + } + return nil +} + +// genericAccessor contains pointers to strings that can modify an arbitrary +// struct and implements the Accessor interface. +type genericAccessor struct { + namespace *string + name *string + generateName *string + uid *types.UID + apiVersion *string + kind *string + resourceVersion *string + selfLink *string + creationTimestamp *metav1.Time + deletionTimestamp **metav1.Time + labels *map[string]string + annotations *map[string]string + ownerReferences reflect.Value + finalizers *[]string +} + +func (a genericAccessor) GetNamespace() string { + if a.namespace == nil { + return "" + } + return *a.namespace +} + +func (a genericAccessor) SetNamespace(namespace string) { + if a.namespace == nil { + return + } + *a.namespace = namespace +} + +func (a genericAccessor) GetName() string { + if a.name == nil { + return "" + } + return *a.name +} + +func (a genericAccessor) SetName(name string) { + if a.name == nil { + return + } + *a.name = name +} + +func (a genericAccessor) GetGenerateName() string { + if a.generateName == nil { + return "" + } + return *a.generateName +} + +func (a genericAccessor) SetGenerateName(generateName string) { + if a.generateName == nil { + return + } + *a.generateName = generateName +} + +func (a genericAccessor) GetUID() types.UID { + if a.uid == nil { + return "" + } + return *a.uid +} + +func (a genericAccessor) SetUID(uid types.UID) { + if a.uid == nil { + return + } + *a.uid = uid +} + +func (a genericAccessor) GetAPIVersion() string { + return *a.apiVersion +} + +func (a genericAccessor) SetAPIVersion(version string) { + *a.apiVersion = version +} + +func (a genericAccessor) GetKind() string { + return *a.kind +} + +func (a genericAccessor) SetKind(kind string) { + *a.kind = kind +} + +func (a genericAccessor) GetResourceVersion() string { + return *a.resourceVersion +} + +func (a genericAccessor) SetResourceVersion(version string) { + *a.resourceVersion = version +} + +func (a genericAccessor) GetSelfLink() string { + return *a.selfLink +} + +func (a genericAccessor) SetSelfLink(selfLink string) { + *a.selfLink = selfLink +} + +func (a genericAccessor) GetCreationTimestamp() metav1.Time { + return *a.creationTimestamp +} + +func (a genericAccessor) SetCreationTimestamp(timestamp metav1.Time) { + *a.creationTimestamp = timestamp +} + +func (a genericAccessor) GetDeletionTimestamp() *metav1.Time { + return *a.deletionTimestamp +} + +func (a genericAccessor) SetDeletionTimestamp(timestamp *metav1.Time) { + *a.deletionTimestamp = timestamp +} + +func (a genericAccessor) GetLabels() map[string]string { + if a.labels == nil { + return nil + } + return *a.labels +} + +func (a genericAccessor) SetLabels(labels map[string]string) { + *a.labels = labels +} + +func (a genericAccessor) GetAnnotations() map[string]string { + if a.annotations == nil { + return nil + } + return *a.annotations +} + +func (a genericAccessor) SetAnnotations(annotations map[string]string) { + if a.annotations == nil { + emptyAnnotations := make(map[string]string) + a.annotations = &emptyAnnotations + } + *a.annotations = annotations +} + +func (a genericAccessor) GetFinalizers() []string { + if a.finalizers == nil { + return nil + } + return *a.finalizers +} + +func (a genericAccessor) SetFinalizers(finalizers []string) { + *a.finalizers = finalizers +} + +func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { + var ret []metav1.OwnerReference + s := a.ownerReferences + if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { + glog.Errorf("expect %v to be a pointer to slice", s) + return ret + } + s = s.Elem() + // Set the capacity to one element greater to avoid copy if the caller later append an element. + ret = make([]metav1.OwnerReference, s.Len(), s.Len()+1) + for i := 0; i < s.Len(); i++ { + if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil { + glog.Errorf("extractFromOwnerReference failed: %v", err) + return ret + } + } + return ret +} + +func (a genericAccessor) SetOwnerReferences(references []metav1.OwnerReference) { + s := a.ownerReferences + if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { + glog.Errorf("expect %v to be a pointer to slice", s) + } + s = s.Elem() + newReferences := reflect.MakeSlice(s.Type(), len(references), len(references)) + for i := 0; i < len(references); i++ { + if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil { + glog.Errorf("setOwnerReference failed: %v", err) + return + } + } + s.Set(newReferences) +} + +// extractFromTypeMeta extracts pointers to version and kind fields from an object +func extractFromTypeMeta(v reflect.Value, a *genericAccessor) error { + if err := runtime.FieldPtr(v, "APIVersion", &a.apiVersion); err != nil { + return err + } + if err := runtime.FieldPtr(v, "Kind", &a.kind); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/multirestmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go similarity index 79% rename from vendor/k8s.io/kubernetes/pkg/api/meta/multirestmapper.go rename to vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go index 7b239e0340..d32b0dbf79 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/multirestmapper.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go @@ -20,9 +20,9 @@ import ( "fmt" "strings" - "k8s.io/kubernetes/pkg/api/unversioned" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" ) // MultiRESTMapper is a wrapper for multiple RESTMappers. @@ -51,8 +51,8 @@ func (m MultiRESTMapper) ResourceSingularizer(resource string) (singular string, return } -func (m MultiRESTMapper) ResourcesFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) { - allGVRs := []unversioned.GroupVersionResource{} +func (m MultiRESTMapper) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + allGVRs := []schema.GroupVersionResource{} for _, t := range m { gvrs, err := t.ResourcesFor(resource) // ignore "no match" errors, but any other error percolates back up @@ -86,8 +86,8 @@ func (m MultiRESTMapper) ResourcesFor(resource unversioned.GroupVersionResource) return allGVRs, nil } -func (m MultiRESTMapper) KindsFor(resource unversioned.GroupVersionResource) (gvk []unversioned.GroupVersionKind, err error) { - allGVKs := []unversioned.GroupVersionKind{} +func (m MultiRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) { + allGVKs := []schema.GroupVersionKind{} for _, t := range m { gvks, err := t.KindsFor(resource) // ignore "no match" errors, but any other error percolates back up @@ -121,34 +121,34 @@ func (m MultiRESTMapper) KindsFor(resource unversioned.GroupVersionResource) (gv return allGVKs, nil } -func (m MultiRESTMapper) ResourceFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) { +func (m MultiRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { resources, err := m.ResourcesFor(resource) if err != nil { - return unversioned.GroupVersionResource{}, err + return schema.GroupVersionResource{}, err } if len(resources) == 1 { return resources[0], nil } - return unversioned.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources} + return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources} } -func (m MultiRESTMapper) KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) { +func (m MultiRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { kinds, err := m.KindsFor(resource) if err != nil { - return unversioned.GroupVersionKind{}, err + return schema.GroupVersionKind{}, err } if len(kinds) == 1 { return kinds[0], nil } - return unversioned.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds} + return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds} } // RESTMapping provides the REST mapping for the resource based on the // kind and version. This implementation supports multiple REST schemas and // return the first match. -func (m MultiRESTMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (*RESTMapping, error) { +func (m MultiRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { allMappings := []*RESTMapping{} errors := []error{} @@ -171,7 +171,7 @@ func (m MultiRESTMapper) RESTMapping(gk unversioned.GroupKind, versions ...strin return allMappings[0], nil } if len(allMappings) > 1 { - var kinds []unversioned.GroupVersionKind + var kinds []schema.GroupVersionKind for _, m := range allMappings { kinds = append(kinds, m.GroupVersionKind) } @@ -185,12 +185,12 @@ func (m MultiRESTMapper) RESTMapping(gk unversioned.GroupKind, versions ...strin // RESTMappings returns all possible RESTMappings for the provided group kind, or an error // if the type is not recognized. -func (m MultiRESTMapper) RESTMappings(gk unversioned.GroupKind) ([]*RESTMapping, error) { +func (m MultiRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { var allMappings []*RESTMapping var errors []error for _, t := range m { - currMappings, err := t.RESTMappings(gk) + currMappings, err := t.RESTMappings(gk, versions...) // ignore "no match" errors, but any other error percolates back up if IsNoMatchError(err) { continue diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/priority.go b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go similarity index 75% rename from vendor/k8s.io/kubernetes/pkg/api/meta/priority.go rename to vendor/k8s.io/apimachinery/pkg/api/meta/priority.go index 2e460609d1..91203d11fc 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/priority.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go @@ -19,7 +19,7 @@ package meta import ( "fmt" - "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/apimachinery/pkg/runtime/schema" ) const ( @@ -39,13 +39,13 @@ type PriorityRESTMapper struct { // The list of all matching resources is narrowed based on the patterns until only one remains. // A pattern with no matches is skipped. A pattern with more than one match uses its // matches as the list to continue matching against. - ResourcePriority []unversioned.GroupVersionResource + ResourcePriority []schema.GroupVersionResource // KindPriority is a list of priority patterns to apply to matching kinds. // The list of all matching kinds is narrowed based on the patterns until only one remains. // A pattern with no matches is skipped. A pattern with more than one match uses its // matches as the list to continue matching against. - KindPriority []unversioned.GroupVersionKind + KindPriority []schema.GroupVersionKind } func (m PriorityRESTMapper) String() string { @@ -53,18 +53,18 @@ func (m PriorityRESTMapper) String() string { } // ResourceFor finds all resources, then passes them through the ResourcePriority patterns to find a single matching hit. -func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) { +func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionResource, error) { originalGVRs, err := m.Delegate.ResourcesFor(partiallySpecifiedResource) if err != nil { - return unversioned.GroupVersionResource{}, err + return schema.GroupVersionResource{}, err } if len(originalGVRs) == 1 { return originalGVRs[0], nil } - remainingGVRs := append([]unversioned.GroupVersionResource{}, originalGVRs...) + remainingGVRs := append([]schema.GroupVersionResource{}, originalGVRs...) for _, pattern := range m.ResourcePriority { - matchedGVRs := []unversioned.GroupVersionResource{} + matchedGVRs := []schema.GroupVersionResource{} for _, gvr := range remainingGVRs { if resourceMatches(pattern, gvr) { matchedGVRs = append(matchedGVRs, gvr) @@ -85,22 +85,22 @@ func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource unversioned.G } } - return unversioned.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingResources: originalGVRs} + return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingResources: originalGVRs} } // KindFor finds all kinds, then passes them through the KindPriority patterns to find a single matching hit. -func (m PriorityRESTMapper) KindFor(partiallySpecifiedResource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) { +func (m PriorityRESTMapper) KindFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionKind, error) { originalGVKs, err := m.Delegate.KindsFor(partiallySpecifiedResource) if err != nil { - return unversioned.GroupVersionKind{}, err + return schema.GroupVersionKind{}, err } if len(originalGVKs) == 1 { return originalGVKs[0], nil } - remainingGVKs := append([]unversioned.GroupVersionKind{}, originalGVKs...) + remainingGVKs := append([]schema.GroupVersionKind{}, originalGVKs...) for _, pattern := range m.KindPriority { - matchedGVKs := []unversioned.GroupVersionKind{} + matchedGVKs := []schema.GroupVersionKind{} for _, gvr := range remainingGVKs { if kindMatches(pattern, gvr) { matchedGVKs = append(matchedGVKs, gvr) @@ -121,10 +121,10 @@ func (m PriorityRESTMapper) KindFor(partiallySpecifiedResource unversioned.Group } } - return unversioned.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingKinds: originalGVKs} + return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingKinds: originalGVKs} } -func resourceMatches(pattern unversioned.GroupVersionResource, resource unversioned.GroupVersionResource) bool { +func resourceMatches(pattern schema.GroupVersionResource, resource schema.GroupVersionResource) bool { if pattern.Group != AnyGroup && pattern.Group != resource.Group { return false } @@ -138,7 +138,7 @@ func resourceMatches(pattern unversioned.GroupVersionResource, resource unversio return true } -func kindMatches(pattern unversioned.GroupVersionKind, kind unversioned.GroupVersionKind) bool { +func kindMatches(pattern schema.GroupVersionKind, kind schema.GroupVersionKind) bool { if pattern.Group != AnyGroup && pattern.Group != kind.Group { return false } @@ -152,7 +152,7 @@ func kindMatches(pattern unversioned.GroupVersionKind, kind unversioned.GroupVer return true } -func (m PriorityRESTMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (mapping *RESTMapping, err error) { +func (m PriorityRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (mapping *RESTMapping, err error) { mappings, err := m.Delegate.RESTMappings(gk) if err != nil { return nil, err @@ -161,11 +161,11 @@ func (m PriorityRESTMapper) RESTMapping(gk unversioned.GroupKind, versions ...st // any versions the user provides take priority priorities := m.KindPriority if len(versions) > 0 { - priorities = make([]unversioned.GroupVersionKind, 0, len(m.KindPriority)+len(versions)) + priorities = make([]schema.GroupVersionKind, 0, len(m.KindPriority)+len(versions)) for _, version := range versions { - gv, err := unversioned.ParseGroupVersion(version) - if err != nil { - return nil, err + gv := schema.GroupVersion{ + Version: version, + Group: gk.Group, } priorities = append(priorities, gv.WithKind(AnyKind)) } @@ -198,15 +198,15 @@ func (m PriorityRESTMapper) RESTMapping(gk unversioned.GroupKind, versions ...st return remaining[0], nil } - var kinds []unversioned.GroupVersionKind + var kinds []schema.GroupVersionKind for _, m := range mappings { kinds = append(kinds, m.GroupVersionKind) } return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds} } -func (m PriorityRESTMapper) RESTMappings(gk unversioned.GroupKind) ([]*RESTMapping, error) { - return m.Delegate.RESTMappings(gk) +func (m PriorityRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { + return m.Delegate.RESTMappings(gk, versions...) } func (m PriorityRESTMapper) AliasesForResource(alias string) (aliases []string, ok bool) { @@ -217,10 +217,10 @@ func (m PriorityRESTMapper) ResourceSingularizer(resource string) (singular stri return m.Delegate.ResourceSingularizer(resource) } -func (m PriorityRESTMapper) ResourcesFor(partiallySpecifiedResource unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) { +func (m PriorityRESTMapper) ResourcesFor(partiallySpecifiedResource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { return m.Delegate.ResourcesFor(partiallySpecifiedResource) } -func (m PriorityRESTMapper) KindsFor(partiallySpecifiedResource unversioned.GroupVersionResource) (gvk []unversioned.GroupVersionKind, err error) { +func (m PriorityRESTMapper) KindsFor(partiallySpecifiedResource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) { return m.Delegate.KindsFor(partiallySpecifiedResource) } diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go new file mode 100644 index 0000000000..bf4c77a82b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go @@ -0,0 +1,566 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// TODO: move everything in this file to pkg/api/rest +package meta + +import ( + "fmt" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Implements RESTScope interface +type restScope struct { + name RESTScopeName + paramName string + argumentName string + paramDescription string +} + +func (r *restScope) Name() RESTScopeName { + return r.name +} +func (r *restScope) ParamName() string { + return r.paramName +} +func (r *restScope) ArgumentName() string { + return r.argumentName +} +func (r *restScope) ParamDescription() string { + return r.paramDescription +} + +var RESTScopeNamespace = &restScope{ + name: RESTScopeNameNamespace, + paramName: "namespaces", + argumentName: "namespace", + paramDescription: "object name and auth scope, such as for teams and projects", +} + +var RESTScopeRoot = &restScope{ + name: RESTScopeNameRoot, +} + +// DefaultRESTMapper exposes mappings between the types defined in a +// runtime.Scheme. It assumes that all types defined the provided scheme +// can be mapped with the provided MetadataAccessor and Codec interfaces. +// +// The resource name of a Kind is defined as the lowercase, +// English-plural version of the Kind string. +// When converting from resource to Kind, the singular version of the +// resource name is also accepted for convenience. +// +// TODO: Only accept plural for some operations for increased control? +// (`get pod bar` vs `get pods bar`) +type DefaultRESTMapper struct { + defaultGroupVersions []schema.GroupVersion + + resourceToKind map[schema.GroupVersionResource]schema.GroupVersionKind + kindToPluralResource map[schema.GroupVersionKind]schema.GroupVersionResource + kindToScope map[schema.GroupVersionKind]RESTScope + singularToPlural map[schema.GroupVersionResource]schema.GroupVersionResource + pluralToSingular map[schema.GroupVersionResource]schema.GroupVersionResource + + interfacesFunc VersionInterfacesFunc + + // aliasToResource is used for mapping aliases to resources + aliasToResource map[string][]string +} + +func (m *DefaultRESTMapper) String() string { + return fmt.Sprintf("DefaultRESTMapper{kindToPluralResource=%v}", m.kindToPluralResource) +} + +var _ RESTMapper = &DefaultRESTMapper{} + +// VersionInterfacesFunc returns the appropriate typer, and metadata accessor for a +// given api version, or an error if no such api version exists. +type VersionInterfacesFunc func(version schema.GroupVersion) (*VersionInterfaces, error) + +// NewDefaultRESTMapper initializes a mapping between Kind and APIVersion +// to a resource name and back based on the objects in a runtime.Scheme +// and the Kubernetes API conventions. Takes a group name, a priority list of the versions +// to search when an object has no default version (set empty to return an error), +// and a function that retrieves the correct metadata for a given version. +func NewDefaultRESTMapper(defaultGroupVersions []schema.GroupVersion, f VersionInterfacesFunc) *DefaultRESTMapper { + resourceToKind := make(map[schema.GroupVersionResource]schema.GroupVersionKind) + kindToPluralResource := make(map[schema.GroupVersionKind]schema.GroupVersionResource) + kindToScope := make(map[schema.GroupVersionKind]RESTScope) + singularToPlural := make(map[schema.GroupVersionResource]schema.GroupVersionResource) + pluralToSingular := make(map[schema.GroupVersionResource]schema.GroupVersionResource) + aliasToResource := make(map[string][]string) + // TODO: verify name mappings work correctly when versions differ + + return &DefaultRESTMapper{ + resourceToKind: resourceToKind, + kindToPluralResource: kindToPluralResource, + kindToScope: kindToScope, + defaultGroupVersions: defaultGroupVersions, + singularToPlural: singularToPlural, + pluralToSingular: pluralToSingular, + aliasToResource: aliasToResource, + interfacesFunc: f, + } +} + +func (m *DefaultRESTMapper) Add(kind schema.GroupVersionKind, scope RESTScope) { + plural, singular := KindToResource(kind) + + m.singularToPlural[singular] = plural + m.pluralToSingular[plural] = singular + + m.resourceToKind[singular] = kind + m.resourceToKind[plural] = kind + + m.kindToPluralResource[kind] = plural + m.kindToScope[kind] = scope +} + +// unpluralizedSuffixes is a list of resource suffixes that are the same plural and singular +// This is only is only necessary because some bits of code are lazy and don't actually use the RESTMapper like they should. +// TODO eliminate this so that different callers can correctly map to resources. This probably means updating all +// callers to use the RESTMapper they mean. +var unpluralizedSuffixes = []string{ + "endpoints", +} + +// KindToResource converts Kind to a resource name. +// Broken. This method only "sort of" works when used outside of this package. It assumes that Kinds and Resources match +// and they aren't guaranteed to do so. +func KindToResource(kind schema.GroupVersionKind) ( /*plural*/ schema.GroupVersionResource /*singular*/, schema.GroupVersionResource) { + kindName := kind.Kind + if len(kindName) == 0 { + return schema.GroupVersionResource{}, schema.GroupVersionResource{} + } + singularName := strings.ToLower(kindName) + singular := kind.GroupVersion().WithResource(singularName) + + for _, skip := range unpluralizedSuffixes { + if strings.HasSuffix(singularName, skip) { + return singular, singular + } + } + + switch string(singularName[len(singularName)-1]) { + case "s": + return kind.GroupVersion().WithResource(singularName + "es"), singular + case "y": + return kind.GroupVersion().WithResource(strings.TrimSuffix(singularName, "y") + "ies"), singular + } + + return kind.GroupVersion().WithResource(singularName + "s"), singular +} + +// ResourceSingularizer implements RESTMapper +// It converts a resource name from plural to singular (e.g., from pods to pod) +func (m *DefaultRESTMapper) ResourceSingularizer(resourceType string) (string, error) { + partialResource := schema.GroupVersionResource{Resource: resourceType} + resources, err := m.ResourcesFor(partialResource) + if err != nil { + return resourceType, err + } + + singular := schema.GroupVersionResource{} + for _, curr := range resources { + currSingular, ok := m.pluralToSingular[curr] + if !ok { + continue + } + if singular.Empty() { + singular = currSingular + continue + } + + if currSingular.Resource != singular.Resource { + return resourceType, fmt.Errorf("multiple possible singular resources (%v) found for %v", resources, resourceType) + } + } + + if singular.Empty() { + return resourceType, fmt.Errorf("no singular of resource %v has been defined", resourceType) + } + + return singular.Resource, nil +} + +// coerceResourceForMatching makes the resource lower case and converts internal versions to unspecified (legacy behavior) +func coerceResourceForMatching(resource schema.GroupVersionResource) schema.GroupVersionResource { + resource.Resource = strings.ToLower(resource.Resource) + if resource.Version == runtime.APIVersionInternal { + resource.Version = "" + } + + return resource +} + +func (m *DefaultRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + resource := coerceResourceForMatching(input) + + hasResource := len(resource.Resource) > 0 + hasGroup := len(resource.Group) > 0 + hasVersion := len(resource.Version) > 0 + + if !hasResource { + return nil, fmt.Errorf("a resource must be present, got: %v", resource) + } + + ret := []schema.GroupVersionResource{} + switch { + case hasGroup && hasVersion: + // fully qualified. Find the exact match + for plural, singular := range m.pluralToSingular { + if singular == resource { + ret = append(ret, plural) + break + } + if plural == resource { + ret = append(ret, plural) + break + } + } + + case hasGroup: + // given a group, prefer an exact match. If you don't find one, resort to a prefix match on group + foundExactMatch := false + requestedGroupResource := resource.GroupResource() + for plural, singular := range m.pluralToSingular { + if singular.GroupResource() == requestedGroupResource { + foundExactMatch = true + ret = append(ret, plural) + } + if plural.GroupResource() == requestedGroupResource { + foundExactMatch = true + ret = append(ret, plural) + } + } + + // if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match + // storageclass.storage.k8s.io + if !foundExactMatch { + for plural, singular := range m.pluralToSingular { + if !strings.HasPrefix(plural.Group, requestedGroupResource.Group) { + continue + } + if singular.Resource == requestedGroupResource.Resource { + ret = append(ret, plural) + } + if plural.Resource == requestedGroupResource.Resource { + ret = append(ret, plural) + } + } + + } + + case hasVersion: + for plural, singular := range m.pluralToSingular { + if singular.Version == resource.Version && singular.Resource == resource.Resource { + ret = append(ret, plural) + } + if plural.Version == resource.Version && plural.Resource == resource.Resource { + ret = append(ret, plural) + } + } + + default: + for plural, singular := range m.pluralToSingular { + if singular.Resource == resource.Resource { + ret = append(ret, plural) + } + if plural.Resource == resource.Resource { + ret = append(ret, plural) + } + } + } + + if len(ret) == 0 { + return nil, &NoResourceMatchError{PartialResource: resource} + } + + sort.Sort(resourceByPreferredGroupVersion{ret, m.defaultGroupVersions}) + return ret, nil +} + +func (m *DefaultRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + resources, err := m.ResourcesFor(resource) + if err != nil { + return schema.GroupVersionResource{}, err + } + if len(resources) == 1 { + return resources[0], nil + } + + return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources} +} + +func (m *DefaultRESTMapper) KindsFor(input schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + resource := coerceResourceForMatching(input) + + hasResource := len(resource.Resource) > 0 + hasGroup := len(resource.Group) > 0 + hasVersion := len(resource.Version) > 0 + + if !hasResource { + return nil, fmt.Errorf("a resource must be present, got: %v", resource) + } + + ret := []schema.GroupVersionKind{} + switch { + // fully qualified. Find the exact match + case hasGroup && hasVersion: + kind, exists := m.resourceToKind[resource] + if exists { + ret = append(ret, kind) + } + + case hasGroup: + foundExactMatch := false + requestedGroupResource := resource.GroupResource() + for currResource, currKind := range m.resourceToKind { + if currResource.GroupResource() == requestedGroupResource { + foundExactMatch = true + ret = append(ret, currKind) + } + } + + // if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match + // storageclass.storage.k8s.io + if !foundExactMatch { + for currResource, currKind := range m.resourceToKind { + if !strings.HasPrefix(currResource.Group, requestedGroupResource.Group) { + continue + } + if currResource.Resource == requestedGroupResource.Resource { + ret = append(ret, currKind) + } + } + + } + + case hasVersion: + for currResource, currKind := range m.resourceToKind { + if currResource.Version == resource.Version && currResource.Resource == resource.Resource { + ret = append(ret, currKind) + } + } + + default: + for currResource, currKind := range m.resourceToKind { + if currResource.Resource == resource.Resource { + ret = append(ret, currKind) + } + } + } + + if len(ret) == 0 { + return nil, &NoResourceMatchError{PartialResource: input} + } + + sort.Sort(kindByPreferredGroupVersion{ret, m.defaultGroupVersions}) + return ret, nil +} + +func (m *DefaultRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + kinds, err := m.KindsFor(resource) + if err != nil { + return schema.GroupVersionKind{}, err + } + if len(kinds) == 1 { + return kinds[0], nil + } + + return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds} +} + +type kindByPreferredGroupVersion struct { + list []schema.GroupVersionKind + sortOrder []schema.GroupVersion +} + +func (o kindByPreferredGroupVersion) Len() int { return len(o.list) } +func (o kindByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] } +func (o kindByPreferredGroupVersion) Less(i, j int) bool { + lhs := o.list[i] + rhs := o.list[j] + if lhs == rhs { + return false + } + + if lhs.GroupVersion() == rhs.GroupVersion() { + return lhs.Kind < rhs.Kind + } + + // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order + lhsIndex := -1 + rhsIndex := -1 + + for i := range o.sortOrder { + if o.sortOrder[i] == lhs.GroupVersion() { + lhsIndex = i + } + if o.sortOrder[i] == rhs.GroupVersion() { + rhsIndex = i + } + } + + if rhsIndex == -1 { + return true + } + + return lhsIndex < rhsIndex +} + +type resourceByPreferredGroupVersion struct { + list []schema.GroupVersionResource + sortOrder []schema.GroupVersion +} + +func (o resourceByPreferredGroupVersion) Len() int { return len(o.list) } +func (o resourceByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] } +func (o resourceByPreferredGroupVersion) Less(i, j int) bool { + lhs := o.list[i] + rhs := o.list[j] + if lhs == rhs { + return false + } + + if lhs.GroupVersion() == rhs.GroupVersion() { + return lhs.Resource < rhs.Resource + } + + // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order + lhsIndex := -1 + rhsIndex := -1 + + for i := range o.sortOrder { + if o.sortOrder[i] == lhs.GroupVersion() { + lhsIndex = i + } + if o.sortOrder[i] == rhs.GroupVersion() { + rhsIndex = i + } + } + + if rhsIndex == -1 { + return true + } + + return lhsIndex < rhsIndex +} + +// RESTMapping returns a struct representing the resource path and conversion interfaces a +// RESTClient should use to operate on the provided group/kind in order of versions. If a version search +// order is not provided, the search order provided to DefaultRESTMapper will be used to resolve which +// version should be used to access the named group/kind. +func (m *DefaultRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { + mappings, err := m.RESTMappings(gk, versions...) + if err != nil { + return nil, err + } + if len(mappings) == 0 { + return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")} + } + // since we rely on RESTMappings method + // take the first match and return to the caller + // as this was the existing behavior. + return mappings[0], nil +} + +// RESTMappings returns the RESTMappings for the provided group kind. If a version search order +// is not provided, the search order provided to DefaultRESTMapper will be used. +func (m *DefaultRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { + mappings := make([]*RESTMapping, 0) + potentialGVK := make([]schema.GroupVersionKind, 0) + hadVersion := false + + // Pick an appropriate version + for _, version := range versions { + if len(version) == 0 || version == runtime.APIVersionInternal { + continue + } + currGVK := gk.WithVersion(version) + hadVersion = true + if _, ok := m.kindToPluralResource[currGVK]; ok { + potentialGVK = append(potentialGVK, currGVK) + break + } + } + // Use the default preferred versions + if !hadVersion && len(potentialGVK) == 0 { + for _, gv := range m.defaultGroupVersions { + if gv.Group != gk.Group { + continue + } + potentialGVK = append(potentialGVK, gk.WithVersion(gv.Version)) + } + } + + if len(potentialGVK) == 0 { + return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")} + } + + for _, gvk := range potentialGVK { + //Ensure we have a REST mapping + res, ok := m.kindToPluralResource[gvk] + if !ok { + continue + } + + // Ensure we have a REST scope + scope, ok := m.kindToScope[gvk] + if !ok { + return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported scope", gvk.GroupVersion(), gvk.Kind) + } + + interfaces, err := m.interfacesFunc(gvk.GroupVersion()) + if err != nil { + return nil, fmt.Errorf("the provided version %q has no relevant versions: %v", gvk.GroupVersion().String(), err) + } + + mappings = append(mappings, &RESTMapping{ + Resource: res.Resource, + GroupVersionKind: gvk, + Scope: scope, + + ObjectConvertor: interfaces.ObjectConvertor, + MetadataAccessor: interfaces.MetadataAccessor, + }) + } + + if len(mappings) == 0 { + return nil, &NoResourceMatchError{PartialResource: schema.GroupVersionResource{Group: gk.Group, Resource: gk.Kind}} + } + return mappings, nil +} + +// AddResourceAlias maps aliases to resources +func (m *DefaultRESTMapper) AddResourceAlias(alias string, resources ...string) { + if len(resources) == 0 { + return + } + m.aliasToResource[alias] = resources +} + +// AliasesForResource returns whether a resource has an alias or not +func (m *DefaultRESTMapper) AliasesForResource(alias string) ([]string, bool) { + if res, ok := m.aliasToResource[alias]; ok { + return res, true + } + return nil, false +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go b/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go new file mode 100644 index 0000000000..3ebf248157 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go @@ -0,0 +1,31 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// InterfacesForUnstructured returns VersionInterfaces suitable for +// dealing with unstructured.Unstructured objects. +func InterfacesForUnstructured(schema.GroupVersion) (*VersionInterfaces, error) { + return &VersionInterfaces{ + ObjectConvertor: &unstructured.UnstructuredObjectConverter{}, + MetadataAccessor: NewAccessor(), + }, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS new file mode 100755 index 0000000000..b905e57f0f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -0,0 +1,17 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- derekwaynecarr +- mikedanese +- saad-ali +- janetkuo +- timstclair +- eparis +- timothysc +- jbeda +- xiang90 +- mbohlool +- david-mcmahon +- goltermann diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/amount.go b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/api/resource/amount.go rename to vendor/k8s.io/apimachinery/pkg/api/resource/amount.go diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go new file mode 100644 index 0000000000..87fb5f5803 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -0,0 +1,71 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +// DO NOT EDIT! + +/* + Package resource is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto + + It has these top-level messages: + Quantity +*/ +package resource + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *Quantity) Reset() { *m = Quantity{} } +func (*Quantity) ProtoMessage() {} +func (*Quantity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func init() { + proto.RegisterType((*Quantity)(nil), "k8s.io.apimachinery.pkg.api.resource.Quantity") +} + +var fileDescriptorGenerated = []byte{ + // 253 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x8f, 0xb1, 0x4a, 0x03, 0x41, + 0x10, 0x86, 0x77, 0x1b, 0x89, 0x57, 0x06, 0x11, 0x49, 0xb1, 0x17, 0xc4, 0x42, 0x04, 0x77, 0x0a, + 0x9b, 0x60, 0x69, 0x6f, 0xa1, 0xa5, 0xdd, 0xdd, 0x65, 0xdc, 0x2c, 0x67, 0x76, 0x8f, 0xd9, 0x59, + 0x21, 0x5d, 0x4a, 0xcb, 0x94, 0x96, 0xb9, 0xb7, 0x49, 0x99, 0xd2, 0xc2, 0xc2, 0x3b, 0x5f, 0x44, + 0x72, 0xc9, 0x81, 0x08, 0x76, 0xf3, 0xfd, 0xc3, 0x37, 0xfc, 0x93, 0xdc, 0x97, 0x93, 0xa0, 0xad, + 0x87, 0x32, 0xe6, 0x48, 0x0e, 0x19, 0x03, 0xbc, 0xa2, 0x9b, 0x7a, 0x82, 0xc3, 0x22, 0xab, 0xec, + 0x3c, 0x2b, 0x66, 0xd6, 0x21, 0x2d, 0xa0, 0x2a, 0xcd, 0x2e, 0x00, 0xc2, 0xe0, 0x23, 0x15, 0x08, + 0x06, 0x1d, 0x52, 0xc6, 0x38, 0xd5, 0x15, 0x79, 0xf6, 0xc3, 0x8b, 0xbd, 0xa5, 0x7f, 0x5b, 0xba, + 0x2a, 0xcd, 0x2e, 0xd0, 0xbd, 0x35, 0xba, 0x36, 0x96, 0x67, 0x31, 0xd7, 0x85, 0x9f, 0x83, 0xf1, + 0xc6, 0x43, 0x27, 0xe7, 0xf1, 0xb9, 0xa3, 0x0e, 0xba, 0x69, 0x7f, 0x74, 0x74, 0xf3, 0x5f, 0x95, + 0xc8, 0xf6, 0x05, 0xac, 0xe3, 0xc0, 0xf4, 0xb7, 0xc9, 0xf9, 0x24, 0x19, 0x3c, 0xc4, 0xcc, 0xb1, + 0xe5, 0xc5, 0xf0, 0x34, 0x39, 0x0a, 0x4c, 0xd6, 0x99, 0x33, 0x39, 0x96, 0x97, 0xc7, 0x8f, 0x07, + 0xba, 0x3d, 0x79, 0x5f, 0xa7, 0xe2, 0xad, 0x4e, 0xc5, 0xaa, 0x4e, 0xc5, 0xba, 0x4e, 0xc5, 0xf2, + 0x73, 0x2c, 0xee, 0xae, 0x36, 0x8d, 0x12, 0xdb, 0x46, 0x89, 0x8f, 0x46, 0x89, 0x65, 0xab, 0xe4, + 0xa6, 0x55, 0x72, 0xdb, 0x2a, 0xf9, 0xd5, 0x2a, 0xb9, 0xfa, 0x56, 0xe2, 0x69, 0xd0, 0x7f, 0xf2, + 0x13, 0x00, 0x00, 0xff, 0xff, 0xdf, 0x3c, 0xf3, 0xc9, 0x3f, 0x01, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto new file mode 100644 index 0000000000..608299da4d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.api.resource; + +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "resource"; + +// Quantity is a fixed-point representation of a number. +// It provides convenient marshaling/unmarshaling in JSON and YAML, +// in addition to String() and Int64() accessors. +// +// The serialization format is: +// +// ::= +// (Note that may be empty, from the "" case in .) +// ::= 0 | 1 | ... | 9 +// ::= | +// ::= | . | . | . +// ::= "+" | "-" +// ::= | +// ::= | | +// ::= Ki | Mi | Gi | Ti | Pi | Ei +// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) +// ::= m | "" | k | M | G | T | P | E +// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) +// ::= "e" | "E" +// +// No matter which of the three exponent forms is used, no quantity may represent +// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal +// places. Numbers larger or more precise will be capped or rounded up. +// (E.g.: 0.1m will rounded up to 1m.) +// This may be extended in the future if we require larger or smaller quantities. +// +// When a Quantity is parsed from a string, it will remember the type of suffix +// it had, and will use the same type again when it is serialized. +// +// Before serializing, Quantity will be put in "canonical form". +// This means that Exponent/suffix will be adjusted up or down (with a +// corresponding increase or decrease in Mantissa) such that: +// a. No precision is lost +// b. No fractional digits will be emitted +// c. The exponent (or suffix) is as large as possible. +// The sign will be omitted unless the number is negative. +// +// Examples: +// 1.5 will be serialized as "1500m" +// 1.5Gi will be serialized as "1536Mi" +// +// NOTE: We reserve the right to amend this canonical format, perhaps to +// allow 1.5 to be canonical. +// TODO: Remove above disclaimer after all bikeshedding about format is over, +// or after March 2015. +// +// Note that the quantity will NEVER be internally represented by a +// floating point number. That is the whole point of this exercise. +// +// Non-canonical values will still parse as long as they are well formed, +// but will be re-emitted in their canonical form. (So always use canonical +// form, or don't diff.) +// +// This format is intended to make it difficult to use these numbers without +// writing some sort of special handling code in the hopes that that will +// cause implementors to also use a fixed point implementation. +// +// +protobuf=true +// +protobuf.embed=string +// +protobuf.options.marshal=false +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +message Quantity { + optional string string = 1; +} + diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/math.go b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/api/resource/math.go rename to vendor/k8s.io/apimachinery/pkg/api/resource/math.go diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/api/resource/quantity.go rename to vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index d8f2718ee5..3a95608821 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -29,7 +29,7 @@ import ( "github.com/go-openapi/spec" inf "gopkg.in/inf.v0" - "k8s.io/kubernetes/pkg/genericapiserver/openapi/common" + "k8s.io/apimachinery/pkg/openapi" ) // Quantity is a fixed-point representation of a number. @@ -399,8 +399,8 @@ func (q Quantity) DeepCopy() Quantity { } // OpenAPIDefinition returns openAPI definition for this type. -func (_ Quantity) OpenAPIDefinition() common.OpenAPIDefinition { - return common.OpenAPIDefinition{ +func (_ Quantity) OpenAPIDefinition() openapi.OpenAPIDefinition { + return openapi.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ Type: []string{"string"}, diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/quantity_proto.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/api/resource/quantity_proto.go rename to vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/scale_int.go b/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/api/resource/scale_int.go rename to vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/suffix.go b/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/api/resource/suffix.go rename to vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go diff --git a/vendor/k8s.io/kubernetes/pkg/apimachinery/announced/announced.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/apimachinery/announced/announced.go rename to vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go index bb1982a649..2c8568c1f7 100644 --- a/vendor/k8s.io/kubernetes/pkg/apimachinery/announced/announced.go +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go @@ -24,16 +24,8 @@ package announced import ( "fmt" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/runtime" -) - -var ( - DefaultGroupFactoryRegistry = make(APIGroupFactoryRegistry) - - // These functions will announce your group or version. - AnnounceGroupVersion = DefaultGroupFactoryRegistry.AnnounceGroupVersion - AnnounceGroup = DefaultGroupFactoryRegistry.AnnounceGroup + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" ) // APIGroupFactoryRegistry allows for groups and versions to announce themselves, diff --git a/vendor/k8s.io/kubernetes/pkg/apimachinery/announced/group_factory.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go similarity index 83% rename from vendor/k8s.io/kubernetes/pkg/apimachinery/announced/group_factory.go rename to vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go index 75b22c0636..afb03295dd 100644 --- a/vendor/k8s.io/kubernetes/pkg/apimachinery/announced/group_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go @@ -21,13 +21,12 @@ import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apimachinery" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" ) type SchemeFunc func(*runtime.Scheme) error @@ -43,10 +42,16 @@ type GroupVersionFactoryArgs struct { // GroupMetaFactoryArgs contains the group-level args of a GroupMetaFactory. type GroupMetaFactoryArgs struct { + // GroupName is the name of the API-Group + // + // example: 'servicecatalog.k8s.io' GroupName string VersionPreferenceOrder []string - ImportPrefix string - + // ImportPrefix is the base go package of the API-Group + // + // example: 'k8s.io/kubernetes/pkg/apis/autoscaling' + ImportPrefix string + // RootScopedKinds are resources that are not namespaced. RootScopedKinds sets.String // nil is allowed IgnoredKinds sets.String // nil is allowed @@ -79,8 +84,8 @@ func NewGroupMetaFactory(groupArgs *GroupMetaFactoryArgs, versions VersionToSche // programmer importing the wrong set of packages. If this assumption doesn't // work for you, just call DefaultGroupFactoryRegistry.AnnouncePreconstructedFactory // yourself. -func (gmf *GroupMetaFactory) Announce() *GroupMetaFactory { - if err := DefaultGroupFactoryRegistry.AnnouncePreconstructedFactory(gmf); err != nil { +func (gmf *GroupMetaFactory) Announce(groupFactoryRegistry APIGroupFactoryRegistry) *GroupMetaFactory { + if err := groupFactoryRegistry.AnnouncePreconstructedFactory(gmf); err != nil { panic(err) } return gmf @@ -107,7 +112,7 @@ type GroupMetaFactory struct { VersionArgs map[string]*GroupVersionFactoryArgs // assembled by Register() - prioritizedVersionList []unversioned.GroupVersion + prioritizedVersionList []schema.GroupVersion } // Register constructs the finalized prioritized version list and sanity checks @@ -124,11 +129,11 @@ func (gmf *GroupMetaFactory) Register(m *registered.APIRegistrationManager) erro if pvSet.Len() != len(gmf.GroupArgs.VersionPreferenceOrder) { return fmt.Errorf("preference order for group %v has duplicates: %v", gmf.GroupArgs.GroupName, gmf.GroupArgs.VersionPreferenceOrder) } - prioritizedVersions := []unversioned.GroupVersion{} + prioritizedVersions := []schema.GroupVersion{} for _, v := range gmf.GroupArgs.VersionPreferenceOrder { prioritizedVersions = append( prioritizedVersions, - unversioned.GroupVersion{ + schema.GroupVersion{ Group: gmf.GroupArgs.GroupName, Version: v, }, @@ -136,7 +141,7 @@ func (gmf *GroupMetaFactory) Register(m *registered.APIRegistrationManager) erro } // Go through versions that weren't explicitly prioritized. - unprioritizedVersions := []unversioned.GroupVersion{} + unprioritizedVersions := []schema.GroupVersion{} for _, v := range gmf.VersionArgs { if v.GroupName != gmf.GroupArgs.GroupName { return fmt.Errorf("found %v/%v in group %v?", v.GroupName, v.VersionName, gmf.GroupArgs.GroupName) @@ -145,7 +150,7 @@ func (gmf *GroupMetaFactory) Register(m *registered.APIRegistrationManager) erro pvSet.Delete(v.VersionName) continue } - unprioritizedVersions = append(unprioritizedVersions, unversioned.GroupVersion{Group: v.GroupName, Version: v.VersionName}) + unprioritizedVersions = append(unprioritizedVersions, schema.GroupVersion{Group: v.GroupName, Version: v.VersionName}) } if len(unprioritizedVersions) > 1 { glog.Warningf("group %v has multiple unprioritized versions: %#v. They will have an arbitrary preference order!", gmf.GroupArgs.GroupName, unprioritizedVersions) @@ -159,7 +164,7 @@ func (gmf *GroupMetaFactory) Register(m *registered.APIRegistrationManager) erro return nil } -func (gmf *GroupMetaFactory) newRESTMapper(scheme *runtime.Scheme, externalVersions []unversioned.GroupVersion, groupMeta *apimachinery.GroupMeta) meta.RESTMapper { +func (gmf *GroupMetaFactory) newRESTMapper(scheme *runtime.Scheme, externalVersions []schema.GroupVersion, groupMeta *apimachinery.GroupMeta) meta.RESTMapper { // the list of kinds that are scoped at the root of the api hierarchy // if a kind is not enumerated here, it is assumed to have a namespace scope rootScoped := sets.NewString() @@ -171,7 +176,7 @@ func (gmf *GroupMetaFactory) newRESTMapper(scheme *runtime.Scheme, externalVersi ignoredKinds = gmf.GroupArgs.IgnoredKinds } - return api.NewDefaultRESTMapperFromScheme( + return meta.NewDefaultRESTMapperFromScheme( externalVersions, groupMeta.InterfacesFor, gmf.GroupArgs.ImportPrefix, @@ -183,7 +188,7 @@ func (gmf *GroupMetaFactory) newRESTMapper(scheme *runtime.Scheme, externalVersi // Enable enables group versions that are allowed, adds methods to the scheme, etc. func (gmf *GroupMetaFactory) Enable(m *registered.APIRegistrationManager, scheme *runtime.Scheme) error { - externalVersions := []unversioned.GroupVersion{} + externalVersions := []schema.GroupVersion{} for _, v := range gmf.prioritizedVersionList { if !m.IsAllowedVersion(v) { continue @@ -214,7 +219,7 @@ func (gmf *GroupMetaFactory) Enable(m *registered.APIRegistrationManager, scheme for _, v := range externalVersions { gvf := gmf.VersionArgs[v.Version] if err := groupMeta.AddVersionInterfaces( - unversioned.GroupVersion{Group: gvf.GroupName, Version: gvf.VersionName}, + schema.GroupVersion{Group: gvf.GroupName, Version: gvf.VersionName}, &meta.VersionInterfaces{ ObjectConvertor: scheme, MetadataAccessor: accessor, @@ -235,11 +240,11 @@ func (gmf *GroupMetaFactory) Enable(m *registered.APIRegistrationManager, scheme // RegisterAndEnable is provided only to allow this code to get added in multiple steps. // It's really bad that this is called in init() methods, but supporting this // temporarily lets us do the change incrementally. -func (gmf *GroupMetaFactory) RegisterAndEnable() error { - if err := gmf.Register(registered.DefaultAPIRegistrationManager); err != nil { +func (gmf *GroupMetaFactory) RegisterAndEnable(registry *registered.APIRegistrationManager, scheme *runtime.Scheme) error { + if err := gmf.Register(registry); err != nil { return err } - if err := gmf.Enable(registered.DefaultAPIRegistrationManager, api.Scheme); err != nil { + if err := gmf.Enable(registry, scheme); err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/pkg/apimachinery/doc.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/apimachinery/doc.go rename to vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go new file mode 100644 index 0000000000..f2e32c88cd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go @@ -0,0 +1,376 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package to keep track of API Versions that can be registered and are enabled in api.Scheme. +package registered + +import ( + "fmt" + "sort" + "strings" + + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apimachinery" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +// APIRegistrationManager provides the concept of what API groups are enabled. +// +// TODO: currently, it also provides a "registered" concept. But it's wrong to +// have both concepts in the same object. Therefore the "announced" package is +// going to take over the registered concept. After all the install packages +// are switched to using the announce package instead of this package, then we +// can combine the registered/enabled concepts in this object. Simplifying this +// isn't easy right now because there are so many callers of this package. +type APIRegistrationManager struct { + // registeredGroupVersions stores all API group versions for which RegisterGroup is called. + registeredVersions map[schema.GroupVersion]struct{} + + // thirdPartyGroupVersions are API versions which are dynamically + // registered (and unregistered) via API calls to the apiserver + thirdPartyGroupVersions []schema.GroupVersion + + // enabledVersions represents all enabled API versions. It should be a + // subset of registeredVersions. Please call EnableVersions() to add + // enabled versions. + enabledVersions map[schema.GroupVersion]struct{} + + // map of group meta for all groups. + groupMetaMap map[string]*apimachinery.GroupMeta + + // envRequestedVersions represents the versions requested via the + // KUBE_API_VERSIONS environment variable. The install package of each group + // checks this list before add their versions to the latest package and + // Scheme. This list is small and order matters, so represent as a slice + envRequestedVersions []schema.GroupVersion +} + +// NewAPIRegistrationManager constructs a new manager. The argument ought to be +// the value of the KUBE_API_VERSIONS env var, or a value of this which you +// wish to test. +func NewAPIRegistrationManager(kubeAPIVersions string) (*APIRegistrationManager, error) { + m := &APIRegistrationManager{ + registeredVersions: map[schema.GroupVersion]struct{}{}, + thirdPartyGroupVersions: []schema.GroupVersion{}, + enabledVersions: map[schema.GroupVersion]struct{}{}, + groupMetaMap: map[string]*apimachinery.GroupMeta{}, + envRequestedVersions: []schema.GroupVersion{}, + } + + if len(kubeAPIVersions) != 0 { + for _, version := range strings.Split(kubeAPIVersions, ",") { + gv, err := schema.ParseGroupVersion(version) + if err != nil { + return nil, fmt.Errorf("invalid api version: %s in KUBE_API_VERSIONS: %s.", + version, kubeAPIVersions) + } + m.envRequestedVersions = append(m.envRequestedVersions, gv) + } + } + return m, nil +} + +func NewOrDie(kubeAPIVersions string) *APIRegistrationManager { + m, err := NewAPIRegistrationManager(kubeAPIVersions) + if err != nil { + glog.Fatalf("Could not construct version manager: %v (KUBE_API_VERSIONS=%q)", err, kubeAPIVersions) + } + return m +} + +// RegisterVersions adds the given group versions to the list of registered group versions. +func (m *APIRegistrationManager) RegisterVersions(availableVersions []schema.GroupVersion) { + for _, v := range availableVersions { + m.registeredVersions[v] = struct{}{} + } +} + +// RegisterGroup adds the given group to the list of registered groups. +func (m *APIRegistrationManager) RegisterGroup(groupMeta apimachinery.GroupMeta) error { + groupName := groupMeta.GroupVersion.Group + if _, found := m.groupMetaMap[groupName]; found { + return fmt.Errorf("group %q is already registered in groupsMap: %v", groupName, m.groupMetaMap) + } + m.groupMetaMap[groupName] = &groupMeta + return nil +} + +// EnableVersions adds the versions for the given group to the list of enabled versions. +// Note that the caller should call RegisterGroup before calling this method. +// The caller of this function is responsible to add the versions to scheme and RESTMapper. +func (m *APIRegistrationManager) EnableVersions(versions ...schema.GroupVersion) error { + var unregisteredVersions []schema.GroupVersion + for _, v := range versions { + if _, found := m.registeredVersions[v]; !found { + unregisteredVersions = append(unregisteredVersions, v) + } + m.enabledVersions[v] = struct{}{} + } + if len(unregisteredVersions) != 0 { + return fmt.Errorf("Please register versions before enabling them: %v", unregisteredVersions) + } + return nil +} + +// IsAllowedVersion returns if the version is allowed by the KUBE_API_VERSIONS +// environment variable. If the environment variable is empty, then it always +// returns true. +func (m *APIRegistrationManager) IsAllowedVersion(v schema.GroupVersion) bool { + if len(m.envRequestedVersions) == 0 { + return true + } + for _, envGV := range m.envRequestedVersions { + if v == envGV { + return true + } + } + return false +} + +// IsEnabledVersion returns if a version is enabled. +func (m *APIRegistrationManager) IsEnabledVersion(v schema.GroupVersion) bool { + _, found := m.enabledVersions[v] + return found +} + +// EnabledVersions returns all enabled versions. Groups are randomly ordered, but versions within groups +// are priority order from best to worst +func (m *APIRegistrationManager) EnabledVersions() []schema.GroupVersion { + ret := []schema.GroupVersion{} + for _, groupMeta := range m.groupMetaMap { + for _, version := range groupMeta.GroupVersions { + if m.IsEnabledVersion(version) { + ret = append(ret, version) + } + } + } + return ret +} + +// EnabledVersionsForGroup returns all enabled versions for a group in order of best to worst +func (m *APIRegistrationManager) EnabledVersionsForGroup(group string) []schema.GroupVersion { + groupMeta, ok := m.groupMetaMap[group] + if !ok { + return []schema.GroupVersion{} + } + + ret := []schema.GroupVersion{} + for _, version := range groupMeta.GroupVersions { + if m.IsEnabledVersion(version) { + ret = append(ret, version) + } + } + return ret +} + +// Group returns the metadata of a group if the group is registered, otherwise +// an error is returned. +func (m *APIRegistrationManager) Group(group string) (*apimachinery.GroupMeta, error) { + groupMeta, found := m.groupMetaMap[group] + if !found { + return nil, fmt.Errorf("group %v has not been registered", group) + } + groupMetaCopy := *groupMeta + return &groupMetaCopy, nil +} + +// IsRegistered takes a string and determines if it's one of the registered groups +func (m *APIRegistrationManager) IsRegistered(group string) bool { + _, found := m.groupMetaMap[group] + return found +} + +// IsRegisteredVersion returns if a version is registered. +func (m *APIRegistrationManager) IsRegisteredVersion(v schema.GroupVersion) bool { + _, found := m.registeredVersions[v] + return found +} + +// RegisteredGroupVersions returns all registered group versions. +func (m *APIRegistrationManager) RegisteredGroupVersions() []schema.GroupVersion { + ret := []schema.GroupVersion{} + for groupVersion := range m.registeredVersions { + ret = append(ret, groupVersion) + } + return ret +} + +// IsThirdPartyAPIGroupVersion returns true if the api version is a user-registered group/version. +func (m *APIRegistrationManager) IsThirdPartyAPIGroupVersion(gv schema.GroupVersion) bool { + for ix := range m.thirdPartyGroupVersions { + if m.thirdPartyGroupVersions[ix] == gv { + return true + } + } + return false +} + +// AddThirdPartyAPIGroupVersions sets the list of third party versions, +// registers them in the API machinery and enables them. +// Skips GroupVersions that are already registered. +// Returns the list of GroupVersions that were skipped. +func (m *APIRegistrationManager) AddThirdPartyAPIGroupVersions(gvs ...schema.GroupVersion) []schema.GroupVersion { + filteredGVs := []schema.GroupVersion{} + skippedGVs := []schema.GroupVersion{} + for ix := range gvs { + if !m.IsRegisteredVersion(gvs[ix]) { + filteredGVs = append(filteredGVs, gvs[ix]) + } else { + glog.V(3).Infof("Skipping %s, because its already registered", gvs[ix].String()) + skippedGVs = append(skippedGVs, gvs[ix]) + } + } + if len(filteredGVs) == 0 { + return skippedGVs + } + m.RegisterVersions(filteredGVs) + m.EnableVersions(filteredGVs...) + m.thirdPartyGroupVersions = append(m.thirdPartyGroupVersions, filteredGVs...) + + return skippedGVs +} + +// InterfacesFor is a union meta.VersionInterfacesFunc func for all registered types +func (m *APIRegistrationManager) InterfacesFor(version schema.GroupVersion) (*meta.VersionInterfaces, error) { + groupMeta, err := m.Group(version.Group) + if err != nil { + return nil, err + } + return groupMeta.InterfacesFor(version) +} + +// TODO: This is an expedient function, because we don't check if a Group is +// supported throughout the code base. We will abandon this function and +// checking the error returned by the Group() function. +func (m *APIRegistrationManager) GroupOrDie(group string) *apimachinery.GroupMeta { + groupMeta, found := m.groupMetaMap[group] + if !found { + if group == "" { + panic("The legacy v1 API is not registered.") + } else { + panic(fmt.Sprintf("Group %s is not registered.", group)) + } + } + groupMetaCopy := *groupMeta + return &groupMetaCopy +} + +// RESTMapper returns a union RESTMapper of all known types with priorities chosen in the following order: +// 1. if KUBE_API_VERSIONS is specified, then KUBE_API_VERSIONS in order, OR +// 1. legacy kube group preferred version, extensions preferred version, metrics perferred version, legacy +// kube any version, extensions any version, metrics any version, all other groups alphabetical preferred version, +// all other groups alphabetical. +func (m *APIRegistrationManager) RESTMapper(versionPatterns ...schema.GroupVersion) meta.RESTMapper { + unionMapper := meta.MultiRESTMapper{} + unionedGroups := sets.NewString() + for enabledVersion := range m.enabledVersions { + if !unionedGroups.Has(enabledVersion.Group) { + unionedGroups.Insert(enabledVersion.Group) + groupMeta := m.groupMetaMap[enabledVersion.Group] + unionMapper = append(unionMapper, groupMeta.RESTMapper) + } + } + + if len(versionPatterns) != 0 { + resourcePriority := []schema.GroupVersionResource{} + kindPriority := []schema.GroupVersionKind{} + for _, versionPriority := range versionPatterns { + resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) + kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) + } + + return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} + } + + if len(m.envRequestedVersions) != 0 { + resourcePriority := []schema.GroupVersionResource{} + kindPriority := []schema.GroupVersionKind{} + + for _, versionPriority := range m.envRequestedVersions { + resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) + kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) + } + + return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} + } + + prioritizedGroups := []string{"", "extensions", "metrics"} + resourcePriority, kindPriority := m.prioritiesForGroups(prioritizedGroups...) + + prioritizedGroupsSet := sets.NewString(prioritizedGroups...) + remainingGroups := sets.String{} + for enabledVersion := range m.enabledVersions { + if !prioritizedGroupsSet.Has(enabledVersion.Group) { + remainingGroups.Insert(enabledVersion.Group) + } + } + + remainingResourcePriority, remainingKindPriority := m.prioritiesForGroups(remainingGroups.List()...) + resourcePriority = append(resourcePriority, remainingResourcePriority...) + kindPriority = append(kindPriority, remainingKindPriority...) + + return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} +} + +// prioritiesForGroups returns the resource and kind priorities for a PriorityRESTMapper, preferring the preferred version of each group first, +// then any non-preferred version of the group second. +func (m *APIRegistrationManager) prioritiesForGroups(groups ...string) ([]schema.GroupVersionResource, []schema.GroupVersionKind) { + resourcePriority := []schema.GroupVersionResource{} + kindPriority := []schema.GroupVersionKind{} + + for _, group := range groups { + availableVersions := m.EnabledVersionsForGroup(group) + if len(availableVersions) > 0 { + resourcePriority = append(resourcePriority, availableVersions[0].WithResource(meta.AnyResource)) + kindPriority = append(kindPriority, availableVersions[0].WithKind(meta.AnyKind)) + } + } + for _, group := range groups { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{Group: group, Version: meta.AnyVersion, Resource: meta.AnyResource}) + kindPriority = append(kindPriority, schema.GroupVersionKind{Group: group, Version: meta.AnyVersion, Kind: meta.AnyKind}) + } + + return resourcePriority, kindPriority +} + +// AllPreferredGroupVersions returns the preferred versions of all registered +// groups in the form of "group1/version1,group2/version2,..." +func (m *APIRegistrationManager) AllPreferredGroupVersions() string { + if len(m.groupMetaMap) == 0 { + return "" + } + var defaults []string + for _, groupMeta := range m.groupMetaMap { + defaults = append(defaults, groupMeta.GroupVersion.String()) + } + sort.Strings(defaults) + return strings.Join(defaults, ",") +} + +// ValidateEnvRequestedVersions returns a list of versions that are requested in +// the KUBE_API_VERSIONS environment variable, but not enabled. +func (m *APIRegistrationManager) ValidateEnvRequestedVersions() []schema.GroupVersion { + var missingVersions []schema.GroupVersion + for _, v := range m.envRequestedVersions { + if _, found := m.enabledVersions[v]; !found { + missingVersions = append(missingVersions, v) + } + } + return missingVersions +} diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go new file mode 100644 index 0000000000..213e34bc00 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go @@ -0,0 +1,87 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apimachinery + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupMeta stores the metadata of a group. +type GroupMeta struct { + // GroupVersion represents the preferred version of the group. + GroupVersion schema.GroupVersion + + // GroupVersions is Group + all versions in that group. + GroupVersions []schema.GroupVersion + + // SelfLinker can set or get the SelfLink field of all API types. + // TODO: when versioning changes, make this part of each API definition. + // TODO(lavalamp): Combine SelfLinker & ResourceVersioner interfaces, force all uses + // to go through the InterfacesFor method below. + SelfLinker runtime.SelfLinker + + // RESTMapper provides the default mapping between REST paths and the objects declared in api.Scheme and all known + // versions. + RESTMapper meta.RESTMapper + + // InterfacesFor returns the default Codec and ResourceVersioner for a given version + // string, or an error if the version is not known. + // TODO: make this stop being a func pointer and always use the default + // function provided below once every place that populates this field has been changed. + InterfacesFor func(version schema.GroupVersion) (*meta.VersionInterfaces, error) + + // InterfacesByVersion stores the per-version interfaces. + InterfacesByVersion map[schema.GroupVersion]*meta.VersionInterfaces +} + +// DefaultInterfacesFor returns the default Codec and ResourceVersioner for a given version +// string, or an error if the version is not known. +// TODO: Remove the "Default" prefix. +func (gm *GroupMeta) DefaultInterfacesFor(version schema.GroupVersion) (*meta.VersionInterfaces, error) { + if v, ok := gm.InterfacesByVersion[version]; ok { + return v, nil + } + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, gm.GroupVersions) +} + +// AddVersionInterfaces adds the given version to the group. Only call during +// init, after that GroupMeta objects should be immutable. Not thread safe. +// (If you use this, be sure to set .InterfacesFor = .DefaultInterfacesFor) +// TODO: remove the "Interfaces" suffix and make this also maintain the +// .GroupVersions member. +func (gm *GroupMeta) AddVersionInterfaces(version schema.GroupVersion, interfaces *meta.VersionInterfaces) error { + if e, a := gm.GroupVersion.Group, version.Group; a != e { + return fmt.Errorf("got a version in group %v, but am in group %v", a, e) + } + if gm.InterfacesByVersion == nil { + gm.InterfacesByVersion = make(map[schema.GroupVersion]*meta.VersionInterfaces) + } + gm.InterfacesByVersion[version] = interfaces + + // TODO: refactor to make the below error not possible, this function + // should *set* GroupVersions rather than depend on it. + for _, v := range gm.GroupVersions { + if v == version { + return nil + } + } + return fmt.Errorf("added a version interface without the corresponding version %v being in the list %#v", version, gm.GroupVersions) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS new file mode 100755 index 0000000000..381a525094 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -0,0 +1,33 @@ +reviewers: +- thockin +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- caesarxuchao +- liggitt +- nikhiljindal +- gmarek +- erictune +- davidopp +- sttts +- quinton-hoole +- kargakis +- luxas +- janetkuo +- justinsb +- ncdc +- timothysc +- soltysh +- dims +- madhusudancs +- hongchaodeng +- krousey +- mml +- mbohlool +- david-mcmahon +- therc +- mqliang +- kevin-wangzefeng +- jianhuiz +- feihujiang diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go new file mode 100644 index 0000000000..b9a8fd02d5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -0,0 +1,264 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func AddConversionFuncs(scheme *runtime.Scheme) error { + return scheme.AddConversionFuncs( + Convert_v1_TypeMeta_To_v1_TypeMeta, + + Convert_unversioned_ListMeta_To_unversioned_ListMeta, + + Convert_intstr_IntOrString_To_intstr_IntOrString, + + Convert_unversioned_Time_To_unversioned_Time, + + Convert_Slice_string_To_unversioned_Time, + + Convert_resource_Quantity_To_resource_Quantity, + + Convert_string_To_labels_Selector, + Convert_labels_Selector_To_string, + + Convert_string_To_fields_Selector, + Convert_fields_Selector_To_string, + + Convert_Pointer_bool_To_bool, + Convert_bool_To_Pointer_bool, + + Convert_Pointer_string_To_string, + Convert_string_To_Pointer_string, + + Convert_Pointer_int64_To_int, + Convert_int_To_Pointer_int64, + + Convert_Pointer_int32_To_int32, + Convert_int32_To_Pointer_int32, + + Convert_Pointer_float64_To_float64, + Convert_float64_To_Pointer_float64, + + Convert_map_to_unversioned_LabelSelector, + Convert_unversioned_LabelSelector_to_map, + + Convert_Slice_string_To_Slice_int32, + ) +} + +func Convert_Pointer_float64_To_float64(in **float64, out *float64, s conversion.Scope) error { + if *in == nil { + *out = 0 + return nil + } + *out = float64(**in) + return nil +} + +func Convert_float64_To_Pointer_float64(in *float64, out **float64, s conversion.Scope) error { + temp := float64(*in) + *out = &temp + return nil +} + +func Convert_Pointer_int32_To_int32(in **int32, out *int32, s conversion.Scope) error { + if *in == nil { + *out = 0 + return nil + } + *out = int32(**in) + return nil +} + +func Convert_int32_To_Pointer_int32(in *int32, out **int32, s conversion.Scope) error { + temp := int32(*in) + *out = &temp + return nil +} + +func Convert_Pointer_int64_To_int(in **int64, out *int, s conversion.Scope) error { + if *in == nil { + *out = 0 + return nil + } + *out = int(**in) + return nil +} + +func Convert_int_To_Pointer_int64(in *int, out **int64, s conversion.Scope) error { + temp := int64(*in) + *out = &temp + return nil +} + +func Convert_Pointer_string_To_string(in **string, out *string, s conversion.Scope) error { + if *in == nil { + *out = "" + return nil + } + *out = **in + return nil +} + +func Convert_string_To_Pointer_string(in *string, out **string, s conversion.Scope) error { + if in == nil { + stringVar := "" + *out = &stringVar + return nil + } + *out = in + return nil +} + +func Convert_Pointer_bool_To_bool(in **bool, out *bool, s conversion.Scope) error { + if *in == nil { + *out = false + return nil + } + *out = **in + return nil +} + +func Convert_bool_To_Pointer_bool(in *bool, out **bool, s conversion.Scope) error { + if in == nil { + boolVar := false + *out = &boolVar + return nil + } + *out = in + return nil +} + +// +k8s:conversion-fn=drop +func Convert_v1_TypeMeta_To_v1_TypeMeta(in, out *TypeMeta, s conversion.Scope) error { + // These values are explicitly not copied + //out.APIVersion = in.APIVersion + //out.Kind = in.Kind + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_unversioned_ListMeta_To_unversioned_ListMeta(in, out *ListMeta, s conversion.Scope) error { + *out = *in + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_intstr_IntOrString_To_intstr_IntOrString(in, out *intstr.IntOrString, s conversion.Scope) error { + *out = *in + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_unversioned_Time_To_unversioned_Time(in *Time, out *Time, s conversion.Scope) error { + // Cannot deep copy these, because time.Time has unexported fields. + *out = *in + return nil +} + +// Convert_Slice_string_To_unversioned_Time allows converting a URL query parameter value +func Convert_Slice_string_To_unversioned_Time(input *[]string, out *Time, s conversion.Scope) error { + str := "" + if len(*input) > 0 { + str = (*input)[0] + } + return out.UnmarshalQueryParameter(str) +} + +func Convert_string_To_labels_Selector(in *string, out *labels.Selector, s conversion.Scope) error { + selector, err := labels.Parse(*in) + if err != nil { + return err + } + *out = selector + return nil +} + +func Convert_string_To_fields_Selector(in *string, out *fields.Selector, s conversion.Scope) error { + selector, err := fields.ParseSelector(*in) + if err != nil { + return err + } + *out = selector + return nil +} + +func Convert_labels_Selector_To_string(in *labels.Selector, out *string, s conversion.Scope) error { + if *in == nil { + return nil + } + *out = (*in).String() + return nil +} + +func Convert_fields_Selector_To_string(in *fields.Selector, out *string, s conversion.Scope) error { + if *in == nil { + return nil + } + *out = (*in).String() + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_resource_Quantity_To_resource_Quantity(in *resource.Quantity, out *resource.Quantity, s conversion.Scope) error { + *out = *in + return nil +} + +func Convert_map_to_unversioned_LabelSelector(in *map[string]string, out *LabelSelector, s conversion.Scope) error { + if in == nil { + return nil + } + out = new(LabelSelector) + for labelKey, labelValue := range *in { + AddLabelToSelector(out, labelKey, labelValue) + } + return nil +} + +func Convert_unversioned_LabelSelector_to_map(in *LabelSelector, out *map[string]string, s conversion.Scope) error { + var err error + *out, err = LabelSelectorAsMap(in) + return err +} + +// Convert_Slice_string_To_Slice_int32 converts multiple query parameters or +// a single query parameter with a comma delimited value to multiple int32. +// This is used for port forwarding which needs the ports as int32. +func Convert_Slice_string_To_Slice_int32(in *[]string, out *[]int32, s conversion.Scope) error { + for _, s := range *in { + for _, v := range strings.Split(s, ",") { + x, err := strconv.ParseUint(v, 10, 16) + if err != nil { + return fmt.Errorf("cannot convert to []int32: %v", err) + } + *out = append(*out, int32(x)) + } + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go new file mode 100644 index 0000000000..52273240f0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=meta.k8s.io +package v1 diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go new file mode 100644 index 0000000000..fea458dfb3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go @@ -0,0 +1,47 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "time" +) + +// Duration is a wrapper around time.Duration which supports correct +// marshaling to YAML and JSON. In particular, it marshals into strings, which +// can be used as map keys in json. +type Duration struct { + time.Duration `protobuf:"varint,1,opt,name=duration,casttype=time.Duration"` +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (d *Duration) UnmarshalJSON(b []byte) error { + var str string + json.Unmarshal(b, &str) + + pd, err := time.ParseDuration(str) + if err != nil { + return err + } + d.Duration = pd + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(d.Duration.String()) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go new file mode 100644 index 0000000000..72282474d4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -0,0 +1,6915 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto + + It has these top-level messages: + APIGroup + APIGroupList + APIResource + APIResourceList + APIVersions + DeleteOptions + Duration + ExportOptions + GetOptions + GroupKind + GroupResource + GroupVersion + GroupVersionForDiscovery + GroupVersionKind + GroupVersionResource + LabelSelector + LabelSelectorRequirement + ListMeta + ListOptions + ObjectMeta + OwnerReference + Preconditions + RootPaths + ServerAddressByClientCIDR + Status + StatusCause + StatusDetails + Time + Timestamp + TypeMeta + Verbs + WatchEvent +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import time "time" +import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *APIGroup) Reset() { *m = APIGroup{} } +func (*APIGroup) ProtoMessage() {} +func (*APIGroup) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *APIGroupList) Reset() { *m = APIGroupList{} } +func (*APIGroupList) ProtoMessage() {} +func (*APIGroupList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *APIResource) Reset() { *m = APIResource{} } +func (*APIResource) ProtoMessage() {} +func (*APIResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *APIResourceList) Reset() { *m = APIResourceList{} } +func (*APIResourceList) ProtoMessage() {} +func (*APIResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *APIVersions) Reset() { *m = APIVersions{} } +func (*APIVersions) ProtoMessage() {} +func (*APIVersions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } +func (*DeleteOptions) ProtoMessage() {} +func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *ExportOptions) Reset() { *m = ExportOptions{} } +func (*ExportOptions) ProtoMessage() {} +func (*ExportOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *GetOptions) Reset() { *m = GetOptions{} } +func (*GetOptions) ProtoMessage() {} +func (*GetOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *GroupKind) Reset() { *m = GroupKind{} } +func (*GroupKind) ProtoMessage() {} +func (*GroupKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *GroupResource) Reset() { *m = GroupResource{} } +func (*GroupResource) ProtoMessage() {} +func (*GroupResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *GroupVersion) Reset() { *m = GroupVersion{} } +func (*GroupVersion) ProtoMessage() {} +func (*GroupVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } +func (*GroupVersionForDiscovery) ProtoMessage() {} +func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{12} +} + +func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } +func (*GroupVersionKind) ProtoMessage() {} +func (*GroupVersionKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } +func (*GroupVersionResource) ProtoMessage() {} +func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *LabelSelector) Reset() { *m = LabelSelector{} } +func (*LabelSelector) ProtoMessage() {} +func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } +func (*LabelSelectorRequirement) ProtoMessage() {} +func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{16} +} + +func (m *ListMeta) Reset() { *m = ListMeta{} } +func (*ListMeta) ProtoMessage() {} +func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } + +func (m *ListOptions) Reset() { *m = ListOptions{} } +func (*ListOptions) ProtoMessage() {} +func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } + +func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } +func (*ObjectMeta) ProtoMessage() {} +func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + +func (m *OwnerReference) Reset() { *m = OwnerReference{} } +func (*OwnerReference) ProtoMessage() {} +func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } + +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } + +func (m *RootPaths) Reset() { *m = RootPaths{} } +func (*RootPaths) ProtoMessage() {} +func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } + +func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } +func (*ServerAddressByClientCIDR) ProtoMessage() {} +func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{23} +} + +func (m *Status) Reset() { *m = Status{} } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } + +func (m *StatusCause) Reset() { *m = StatusCause{} } +func (*StatusCause) ProtoMessage() {} +func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + +func (m *StatusDetails) Reset() { *m = StatusDetails{} } +func (*StatusDetails) ProtoMessage() {} +func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } + +func (m *Time) Reset() { *m = Time{} } +func (*Time) ProtoMessage() {} +func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } + +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } + +func (m *Verbs) Reset() { *m = Verbs{} } +func (*Verbs) ProtoMessage() {} +func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } + +func (m *WatchEvent) Reset() { *m = WatchEvent{} } +func (*WatchEvent) ProtoMessage() {} +func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } + +func init() { + proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") + proto.RegisterType((*APIGroupList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroupList") + proto.RegisterType((*APIResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResource") + proto.RegisterType((*APIResourceList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResourceList") + proto.RegisterType((*APIVersions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIVersions") + proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") + proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") + proto.RegisterType((*ExportOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ExportOptions") + proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") + proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") + proto.RegisterType((*GroupResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource") + proto.RegisterType((*GroupVersion)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersion") + proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery") + proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind") + proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource") + proto.RegisterType((*LabelSelector)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector") + proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement") + proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta") + proto.RegisterType((*ListOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions") + proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta") + proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference") + proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions") + proto.RegisterType((*RootPaths)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.RootPaths") + proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR") + proto.RegisterType((*Status)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Status") + proto.RegisterType((*StatusCause)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusCause") + proto.RegisterType((*StatusDetails)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusDetails") + proto.RegisterType((*Time)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Time") + proto.RegisterType((*Timestamp)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Timestamp") + proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TypeMeta") + proto.RegisterType((*Verbs)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Verbs") + proto.RegisterType((*WatchEvent)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.WatchEvent") +} +func (m *APIGroup) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *APIGroup) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if len(m.Versions) > 0 { + for _, msg := range m.Versions { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.PreferredVersion.Size())) + n1, err := m.PreferredVersion.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, msg := range m.ServerAddressByClientCIDRs { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *APIGroupList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *APIGroupList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Groups) > 0 { + for _, msg := range m.Groups { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *APIResource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *APIResource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x10 + i++ + if m.Namespaced { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + if m.Verbs != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Verbs.Size())) + n2, err := m.Verbs.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *APIResourceList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *APIResourceList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.GroupVersion))) + i += copy(data[i:], m.GroupVersion) + if len(m.APIResources) > 0 { + for _, msg := range m.APIResources { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *APIVersions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *APIVersions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Versions) > 0 { + for _, s := range m.Versions { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, msg := range m.ServerAddressByClientCIDRs { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DeleteOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeleteOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.GracePeriodSeconds != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.GracePeriodSeconds)) + } + if m.Preconditions != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Preconditions.Size())) + n3, err := m.Preconditions.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.OrphanDependents != nil { + data[i] = 0x18 + i++ + if *m.OrphanDependents { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if m.PropagationPolicy != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.PropagationPolicy))) + i += copy(data[i:], *m.PropagationPolicy) + } + return i, nil +} + +func (m *Duration) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Duration) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Duration)) + return i, nil +} + +func (m *ExportOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ExportOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Export { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x10 + i++ + if m.Exact { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *GetOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GetOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) + i += copy(data[i:], m.ResourceVersion) + return i, nil +} + +func (m *GroupKind) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GroupKind) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + return i, nil +} + +func (m *GroupResource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GroupResource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) + i += copy(data[i:], m.Resource) + return i, nil +} + +func (m *GroupVersion) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GroupVersion) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Version))) + i += copy(data[i:], m.Version) + return i, nil +} + +func (m *GroupVersionForDiscovery) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GroupVersionForDiscovery) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.GroupVersion))) + i += copy(data[i:], m.GroupVersion) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Version))) + i += copy(data[i:], m.Version) + return i, nil +} + +func (m *GroupVersionKind) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GroupVersionKind) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Version))) + i += copy(data[i:], m.Version) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + return i, nil +} + +func (m *GroupVersionResource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GroupVersionResource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Version))) + i += copy(data[i:], m.Version) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) + i += copy(data[i:], m.Resource) + return i, nil +} + +func (m *LabelSelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LabelSelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.MatchLabels) > 0 { + for k := range m.MatchLabels { + data[i] = 0xa + i++ + v := m.MatchLabels[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if len(m.MatchExpressions) > 0 { + for _, msg := range m.MatchExpressions { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LabelSelectorRequirement) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LabelSelectorRequirement) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) + i += copy(data[i:], m.Operator) + if len(m.Values) > 0 { + for _, s := range m.Values { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *ListMeta) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ListMeta) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SelfLink))) + i += copy(data[i:], m.SelfLink) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) + i += copy(data[i:], m.ResourceVersion) + return i, nil +} + +func (m *ListOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ListOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.LabelSelector))) + i += copy(data[i:], m.LabelSelector) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FieldSelector))) + i += copy(data[i:], m.FieldSelector) + data[i] = 0x18 + i++ + if m.Watch { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) + i += copy(data[i:], m.ResourceVersion) + if m.TimeoutSeconds != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TimeoutSeconds)) + } + return i, nil +} + +func (m *ObjectMeta) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectMeta) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.GenerateName))) + i += copy(data[i:], m.GenerateName) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SelfLink))) + i += copy(data[i:], m.SelfLink) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.UID))) + i += copy(data[i:], m.UID) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) + i += copy(data[i:], m.ResourceVersion) + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Generation)) + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CreationTimestamp.Size())) + n4, err := m.CreationTimestamp.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if m.DeletionTimestamp != nil { + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(m.DeletionTimestamp.Size())) + n5, err := m.DeletionTimestamp.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.DeletionGracePeriodSeconds != nil { + data[i] = 0x50 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.DeletionGracePeriodSeconds)) + } + if len(m.Labels) > 0 { + for k := range m.Labels { + data[i] = 0x5a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if len(m.Annotations) > 0 { + for k := range m.Annotations { + data[i] = 0x62 + i++ + v := m.Annotations[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if len(m.OwnerReferences) > 0 { + for _, msg := range m.OwnerReferences { + data[i] = 0x6a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + data[i] = 0x72 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x7a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ClusterName))) + i += copy(data[i:], m.ClusterName) + return i, nil +} + +func (m *OwnerReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *OwnerReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.UID))) + i += copy(data[i:], m.UID) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + if m.Controller != nil { + data[i] = 0x30 + i++ + if *m.Controller { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if m.BlockOwnerDeletion != nil { + data[i] = 0x38 + i++ + if *m.BlockOwnerDeletion { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *Preconditions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Preconditions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.UID != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.UID))) + i += copy(data[i:], *m.UID) + } + return i, nil +} + +func (m *RootPaths) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RootPaths) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *ServerAddressByClientCIDR) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServerAddressByClientCIDR) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ClientCIDR))) + i += copy(data[i:], m.ClientCIDR) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ServerAddress))) + i += copy(data[i:], m.ServerAddress) + return i, nil +} + +func (m *Status) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Status) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + if m.Details != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Details.Size())) + n7, err := m.Details.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Code)) + return i, nil +} + +func (m *StatusCause) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StatusCause) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Field))) + i += copy(data[i:], m.Field) + return i, nil +} + +func (m *StatusDetails) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StatusDetails) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + if len(m.Causes) > 0 { + for _, msg := range m.Causes { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RetryAfterSeconds)) + return i, nil +} + +func (m *Timestamp) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Timestamp) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Seconds)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Nanos)) + return i, nil +} + +func (m *TypeMeta) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TypeMeta) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + return i, nil +} + +func (m Verbs) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m Verbs) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *WatchEvent) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *WatchEvent) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) + n8, err := m.Object.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *APIGroup) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.PreferredVersion.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, e := range m.ServerAddressByClientCIDRs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIGroupList) Size() (n int) { + var l int + _ = l + if len(m.Groups) > 0 { + for _, e := range m.Groups { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIResource) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if m.Verbs != nil { + l = m.Verbs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIResourceList) Size() (n int) { + var l int + _ = l + l = len(m.GroupVersion) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.APIResources) > 0 { + for _, e := range m.APIResources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIVersions) Size() (n int) { + var l int + _ = l + if len(m.Versions) > 0 { + for _, s := range m.Versions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, e := range m.ServerAddressByClientCIDRs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeleteOptions) Size() (n int) { + var l int + _ = l + if m.GracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.GracePeriodSeconds)) + } + if m.Preconditions != nil { + l = m.Preconditions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.OrphanDependents != nil { + n += 2 + } + if m.PropagationPolicy != nil { + l = len(*m.PropagationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Duration) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Duration)) + return n +} + +func (m *ExportOptions) Size() (n int) { + var l int + _ = l + n += 2 + n += 2 + return n +} + +func (m *GetOptions) Size() (n int) { + var l int + _ = l + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupKind) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupResource) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersion) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersionForDiscovery) Size() (n int) { + var l int + _ = l + l = len(m.GroupVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersionKind) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersionResource) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LabelSelector) Size() (n int) { + var l int + _ = l + if len(m.MatchLabels) > 0 { + for k, v := range m.MatchLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LabelSelectorRequirement) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ListMeta) Size() (n int) { + var l int + _ = l + l = len(m.SelfLink) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ListOptions) Size() (n int) { + var l int + _ = l + l = len(m.LabelSelector) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldSelector) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + return n +} + +func (m *ObjectMeta) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.GenerateName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SelfLink) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + l = m.CreationTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DeletionTimestamp != nil { + l = m.DeletionTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DeletionGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.DeletionGracePeriodSeconds)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.OwnerReferences) > 0 { + for _, e := range m.OwnerReferences { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ClusterName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *OwnerReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + if m.Controller != nil { + n += 2 + } + if m.BlockOwnerDeletion != nil { + n += 2 + } + return n +} + +func (m *Preconditions) Size() (n int) { + var l int + _ = l + if m.UID != nil { + l = len(*m.UID) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RootPaths) Size() (n int) { + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServerAddressByClientCIDR) Size() (n int) { + var l int + _ = l + l = len(m.ClientCIDR) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ServerAddress) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Status) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + if m.Details != nil { + l = m.Details.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Code)) + return n +} + +func (m *StatusCause) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Field) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatusDetails) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Causes) > 0 { + for _, e := range m.Causes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.RetryAfterSeconds)) + return n +} + +func (m *Timestamp) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Seconds)) + n += 1 + sovGenerated(uint64(m.Nanos)) + return n +} + +func (m *TypeMeta) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m Verbs) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *WatchEvent) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *APIGroup) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIGroup{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, + `PreferredVersion:` + strings.Replace(strings.Replace(this.PreferredVersion.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, + `ServerAddressByClientCIDRs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServerAddressByClientCIDRs), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *APIGroupList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIGroupList{`, + `Groups:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Groups), "APIGroup", "APIGroup", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *APIResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIResource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespaced:` + fmt.Sprintf("%v", this.Namespaced) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Verbs:` + strings.Replace(fmt.Sprintf("%v", this.Verbs), "Verbs", "Verbs", 1) + `,`, + `ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`, + `}`, + }, "") + return s +} +func (this *APIResourceList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIResourceList{`, + `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, + `APIResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.APIResources), "APIResource", "APIResource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteOptions{`, + `GracePeriodSeconds:` + valueToStringGenerated(this.GracePeriodSeconds) + `,`, + `Preconditions:` + strings.Replace(fmt.Sprintf("%v", this.Preconditions), "Preconditions", "Preconditions", 1) + `,`, + `OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`, + `PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *Duration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Duration{`, + `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, + `}`, + }, "") + return s +} +func (this *ExportOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExportOptions{`, + `Export:` + fmt.Sprintf("%v", this.Export) + `,`, + `Exact:` + fmt.Sprintf("%v", this.Exact) + `,`, + `}`, + }, "") + return s +} +func (this *GetOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetOptions{`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `}`, + }, "") + return s +} +func (this *GroupVersionForDiscovery) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupVersionForDiscovery{`, + `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `}`, + }, "") + return s +} +func (this *LabelSelector) String() string { + if this == nil { + return "nil" + } + keysForMatchLabels := make([]string, 0, len(this.MatchLabels)) + for k := range this.MatchLabels { + keysForMatchLabels = append(keysForMatchLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels) + mapStringForMatchLabels := "map[string]string{" + for _, k := range keysForMatchLabels { + mapStringForMatchLabels += fmt.Sprintf("%v: %v,", k, this.MatchLabels[k]) + } + mapStringForMatchLabels += "}" + s := strings.Join([]string{`&LabelSelector{`, + `MatchLabels:` + mapStringForMatchLabels + `,`, + `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LabelSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *ListMeta) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListMeta{`, + `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `}`, + }, "") + return s +} +func (this *ListOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListOptions{`, + `LabelSelector:` + fmt.Sprintf("%v", this.LabelSelector) + `,`, + `FieldSelector:` + fmt.Sprintf("%v", this.FieldSelector) + `,`, + `Watch:` + fmt.Sprintf("%v", this.Watch) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMeta) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ObjectMeta{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `GenerateName:` + fmt.Sprintf("%v", this.GenerateName) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `CreationTimestamp:` + strings.Replace(strings.Replace(this.CreationTimestamp.String(), "Time", "Time", 1), `&`, ``, 1) + `,`, + `DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "Time", 1) + `,`, + `DeletionGracePeriodSeconds:` + valueToStringGenerated(this.DeletionGracePeriodSeconds) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `OwnerReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OwnerReferences), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + `,`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, + `}`, + }, "") + return s +} +func (this *OwnerReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OwnerReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Controller:` + valueToStringGenerated(this.Controller) + `,`, + `BlockOwnerDeletion:` + valueToStringGenerated(this.BlockOwnerDeletion) + `,`, + `}`, + }, "") + return s +} +func (this *Preconditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Preconditions{`, + `UID:` + valueToStringGenerated(this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *RootPaths) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RootPaths{`, + `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, + `}`, + }, "") + return s +} +func (this *ServerAddressByClientCIDR) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServerAddressByClientCIDR{`, + `ClientCIDR:` + fmt.Sprintf("%v", this.ClientCIDR) + `,`, + `ServerAddress:` + fmt.Sprintf("%v", this.ServerAddress) + `,`, + `}`, + }, "") + return s +} +func (this *Status) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Status{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Details:` + strings.Replace(fmt.Sprintf("%v", this.Details), "StatusDetails", "StatusDetails", 1) + `,`, + `Code:` + fmt.Sprintf("%v", this.Code) + `,`, + `}`, + }, "") + return s +} +func (this *StatusCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatusCause{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Field:` + fmt.Sprintf("%v", this.Field) + `,`, + `}`, + }, "") + return s +} +func (this *StatusDetails) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatusDetails{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Causes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Causes), "StatusCause", "StatusCause", 1), `&`, ``, 1) + `,`, + `RetryAfterSeconds:` + fmt.Sprintf("%v", this.RetryAfterSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *Timestamp) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Timestamp{`, + `Seconds:` + fmt.Sprintf("%v", this.Seconds) + `,`, + `Nanos:` + fmt.Sprintf("%v", this.Nanos) + `,`, + `}`, + }, "") + return s +} +func (this *TypeMeta) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TypeMeta{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *WatchEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchEvent{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(strings.Replace(this.Object.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *APIGroup) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, GroupVersionForDiscovery{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PreferredVersion.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) + if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIGroupList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroupList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroupList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, APIGroup{}) + if err := m.Groups[len(m.Groups)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIResource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaced", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Namespaced = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Verbs == nil { + m.Verbs = Verbs{} + } + if err := m.Verbs.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShortNames = append(m.ShortNames, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIResourceList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIResourceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIResourceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIResources = append(m.APIResources, APIResource{}) + if err := m.APIResources[len(m.APIResources)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIVersions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIVersions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIVersions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) + if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.GracePeriodSeconds = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Preconditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Preconditions == nil { + m.Preconditions = &Preconditions{} + } + if err := m.Preconditions.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OrphanDependents", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OrphanDependents = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PropagationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := DeletionPropagation(data[iNdEx:postIndex]) + m.PropagationPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Duration) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Duration |= (time.Duration(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExportOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Export = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Exact = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupKind) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupKind: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupResource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersion) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersionForDiscovery) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionForDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionForDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersionKind) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersionResource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.MatchLabels == nil { + m.MatchLabels = make(map[string]string) + } + m.MatchLabels[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = LabelSelectorOperator(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListMeta) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelfLink = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelSelector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldSelector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Watch = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMeta) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GenerateName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelfLink = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Generation |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreationTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CreationTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletionTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeletionTimestamp == nil { + m.DeletionTimestamp = &Time{} + } + if err := m.DeletionTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletionGracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DeletionGracePeriodSeconds = &v + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Labels == nil { + m.Labels = make(map[string]string) + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OwnerReferences = append(m.OwnerReferences, OwnerReference{}) + if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OwnerReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Controller = &b + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockOwnerDeletion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BlockOwnerDeletion = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Preconditions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Preconditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Preconditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) + m.UID = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RootPaths) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RootPaths: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RootPaths: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServerAddressByClientCIDR) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServerAddressByClientCIDR: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServerAddressByClientCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientCIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientCIDR = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddress = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Status) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Status: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = StatusReason(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Details == nil { + m.Details = &StatusDetails{} + } + if err := m.Details.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Code |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusCause) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = CauseType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Field = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusDetails) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusDetails: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusDetails: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Causes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Causes = append(m.Causes, StatusCause{}) + if err := m.Causes[len(m.Causes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RetryAfterSeconds", wireType) + } + m.RetryAfterSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.RetryAfterSeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Timestamp) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Seconds |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Nanos |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypeMeta) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Verbs) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Verbs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Verbs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchEvent) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 2160 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x19, 0xcd, 0x6f, 0x23, 0x57, + 0x3d, 0x63, 0xc7, 0x5e, 0xfb, 0xe7, 0x38, 0x1f, 0xaf, 0x59, 0x70, 0x23, 0x61, 0xa7, 0xd3, 0x0a, + 0xa5, 0xb0, 0xb5, 0x49, 0x0a, 0xd5, 0xb2, 0xc0, 0x42, 0x26, 0xce, 0x46, 0x51, 0x37, 0x9b, 0xe8, + 0xa5, 0xbb, 0x88, 0x65, 0x85, 0x98, 0xcc, 0xbc, 0x38, 0x43, 0xc6, 0x33, 0xc3, 0x7b, 0x63, 0x6f, + 0x4c, 0x0f, 0x54, 0x2a, 0x48, 0x1c, 0x10, 0xda, 0x23, 0x07, 0x84, 0xba, 0x82, 0x1b, 0x37, 0xfe, + 0x06, 0x24, 0xf6, 0x58, 0x89, 0x0b, 0x07, 0x64, 0xb1, 0xee, 0x81, 0x23, 0xf7, 0x9c, 0xd0, 0x7b, + 0xf3, 0xe6, 0xcb, 0x8e, 0x9b, 0x31, 0xed, 0xa1, 0xa7, 0x78, 0x7e, 0xdf, 0xef, 0xf7, 0xfd, 0x5e, + 0xe0, 0xe0, 0xfc, 0x36, 0x6b, 0x5a, 0x6e, 0xeb, 0xbc, 0x77, 0x42, 0xa8, 0x43, 0x7c, 0xc2, 0x5a, + 0x7d, 0xe2, 0x98, 0x2e, 0x6d, 0x49, 0x84, 0xee, 0x59, 0x5d, 0xdd, 0x38, 0xb3, 0x1c, 0x42, 0x07, + 0x2d, 0xef, 0xbc, 0xc3, 0x01, 0xac, 0xd5, 0x25, 0xbe, 0xde, 0xea, 0x6f, 0xb6, 0x3a, 0xc4, 0x21, + 0x54, 0xf7, 0x89, 0xd9, 0xf4, 0xa8, 0xeb, 0xbb, 0xe8, 0x8d, 0x80, 0xab, 0x99, 0xe4, 0x6a, 0x7a, + 0xe7, 0x1d, 0x0e, 0x60, 0x4d, 0xce, 0xd5, 0xec, 0x6f, 0xae, 0xbd, 0xd5, 0xb1, 0xfc, 0xb3, 0xde, + 0x49, 0xd3, 0x70, 0xbb, 0xad, 0x8e, 0xdb, 0x71, 0x5b, 0x82, 0xf9, 0xa4, 0x77, 0x2a, 0xbe, 0xc4, + 0x87, 0xf8, 0x15, 0x08, 0x5d, 0x9b, 0x6a, 0x0a, 0xed, 0x39, 0xbe, 0xd5, 0x25, 0xe3, 0x56, 0xac, + 0xbd, 0x73, 0x1d, 0x03, 0x33, 0xce, 0x48, 0x57, 0x9f, 0xe0, 0x7b, 0x7b, 0x1a, 0x5f, 0xcf, 0xb7, + 0xec, 0x96, 0xe5, 0xf8, 0xcc, 0xa7, 0xe3, 0x4c, 0xea, 0xdf, 0xf3, 0x50, 0xda, 0x3e, 0xda, 0xdf, + 0xa3, 0x6e, 0xcf, 0x43, 0xeb, 0x30, 0xef, 0xe8, 0x5d, 0x52, 0x53, 0xd6, 0x95, 0x8d, 0xb2, 0xb6, + 0xf0, 0x62, 0xd8, 0x98, 0x1b, 0x0d, 0x1b, 0xf3, 0x0f, 0xf4, 0x2e, 0xc1, 0x02, 0x83, 0x6c, 0x28, + 0xf5, 0x09, 0x65, 0x96, 0xeb, 0xb0, 0x5a, 0x6e, 0x3d, 0xbf, 0x51, 0xd9, 0xba, 0xdb, 0xcc, 0xe2, + 0xb4, 0xa6, 0x50, 0xf0, 0x28, 0x60, 0xbd, 0xe7, 0xd2, 0xb6, 0xc5, 0x0c, 0xb7, 0x4f, 0xe8, 0x40, + 0x5b, 0x96, 0x5a, 0x4a, 0x12, 0xc9, 0x70, 0xa4, 0x01, 0xfd, 0x4a, 0x81, 0x65, 0x8f, 0x92, 0x53, + 0x42, 0x29, 0x31, 0x25, 0xbe, 0x96, 0x5f, 0x57, 0x3e, 0x07, 0xb5, 0x35, 0xa9, 0x76, 0xf9, 0x68, + 0x4c, 0x3e, 0x9e, 0xd0, 0x88, 0xfe, 0xa4, 0xc0, 0x1a, 0x23, 0xb4, 0x4f, 0xe8, 0xb6, 0x69, 0x52, + 0xc2, 0x98, 0x36, 0xd8, 0xb1, 0x2d, 0xe2, 0xf8, 0x3b, 0xfb, 0x6d, 0xcc, 0x6a, 0xf3, 0xc2, 0x0f, + 0xdf, 0xcf, 0x66, 0xd0, 0xf1, 0x34, 0x39, 0x9a, 0x2a, 0x2d, 0x5a, 0x9b, 0x4a, 0xc2, 0xf0, 0xa7, + 0x98, 0xa1, 0x9e, 0xc2, 0x42, 0x18, 0xc8, 0xfb, 0x16, 0xf3, 0xd1, 0x23, 0x28, 0x76, 0xf8, 0x07, + 0xab, 0x29, 0xc2, 0xc0, 0x66, 0x36, 0x03, 0x43, 0x19, 0xda, 0xa2, 0xb4, 0xa7, 0x28, 0x3e, 0x19, + 0x96, 0xd2, 0xd4, 0x0f, 0x73, 0x50, 0xd9, 0x3e, 0xda, 0xc7, 0x84, 0xb9, 0x3d, 0x6a, 0x90, 0x0c, + 0x49, 0xb3, 0x05, 0xc0, 0xff, 0x32, 0x4f, 0x37, 0x88, 0x59, 0xcb, 0xad, 0x2b, 0x1b, 0x25, 0x0d, + 0x49, 0x3a, 0x78, 0x10, 0x61, 0x70, 0x82, 0x8a, 0x4b, 0x3d, 0xb7, 0x1c, 0x53, 0x44, 0x3b, 0x21, + 0xf5, 0x5d, 0xcb, 0x31, 0xb1, 0xc0, 0xa0, 0xfb, 0x50, 0xe8, 0x13, 0x7a, 0xc2, 0xfd, 0xcf, 0x13, + 0xe2, 0xeb, 0xd9, 0x8e, 0xf7, 0x88, 0xb3, 0x68, 0xe5, 0xd1, 0xb0, 0x51, 0x10, 0x3f, 0x71, 0x20, + 0x04, 0x35, 0x01, 0xd8, 0x99, 0x4b, 0x7d, 0x61, 0x4e, 0xad, 0xb0, 0x9e, 0xdf, 0x28, 0x6b, 0x8b, + 0xdc, 0xbe, 0xe3, 0x08, 0x8a, 0x13, 0x14, 0xea, 0x5f, 0x15, 0x58, 0x4a, 0x78, 0x41, 0x78, 0xfc, + 0x36, 0x2c, 0x74, 0x12, 0xf9, 0x26, 0x3d, 0xb2, 0x2a, 0x6d, 0x5f, 0x48, 0xe6, 0x22, 0x4e, 0x51, + 0x22, 0x02, 0x65, 0x2a, 0x25, 0x85, 0x75, 0xb5, 0x99, 0x39, 0x5c, 0xa1, 0x0d, 0xb1, 0xa6, 0x04, + 0x90, 0xe1, 0x58, 0xb2, 0xfa, 0x1f, 0x45, 0x84, 0x2e, 0xac, 0x34, 0xb4, 0x91, 0xa8, 0x66, 0x45, + 0x1c, 0x79, 0x61, 0x4a, 0x25, 0x5e, 0x53, 0x02, 0xb9, 0x2f, 0x44, 0x09, 0xdc, 0x29, 0xfd, 0xfe, + 0xa3, 0xc6, 0xdc, 0x07, 0xff, 0x5a, 0x9f, 0x53, 0x3f, 0xc9, 0x41, 0xb5, 0x4d, 0x6c, 0xe2, 0x93, + 0x43, 0xcf, 0x17, 0x27, 0xb8, 0x07, 0xa8, 0x43, 0x75, 0x83, 0x1c, 0x11, 0x6a, 0xb9, 0xe6, 0x31, + 0x31, 0x5c, 0xc7, 0x64, 0x22, 0x44, 0x79, 0xed, 0x4b, 0xa3, 0x61, 0x03, 0xed, 0x4d, 0x60, 0xf1, + 0x15, 0x1c, 0xc8, 0x86, 0xaa, 0x47, 0xc5, 0x6f, 0xcb, 0x97, 0x6d, 0x90, 0xa7, 0xdf, 0xdb, 0xd9, + 0xce, 0x7e, 0x94, 0x64, 0xd5, 0x56, 0x46, 0xc3, 0x46, 0x35, 0x05, 0xc2, 0x69, 0xe1, 0xe8, 0x07, + 0xb0, 0xec, 0x52, 0xef, 0x4c, 0x77, 0xda, 0xc4, 0x23, 0x8e, 0x49, 0x1c, 0x9f, 0x89, 0x92, 0x28, + 0x69, 0xab, 0xbc, 0x79, 0x1d, 0x8e, 0xe1, 0xf0, 0x04, 0x35, 0x7a, 0x0c, 0x2b, 0x1e, 0x75, 0x3d, + 0xbd, 0xa3, 0x73, 0x89, 0x47, 0xae, 0x6d, 0x19, 0x03, 0x51, 0x32, 0x65, 0xed, 0xd6, 0x68, 0xd8, + 0x58, 0x39, 0x1a, 0x47, 0x5e, 0x0e, 0x1b, 0xaf, 0x08, 0xd7, 0x71, 0x48, 0x8c, 0xc4, 0x93, 0x62, + 0xd4, 0x7d, 0x28, 0xb5, 0x7b, 0x54, 0x40, 0xd0, 0xf7, 0xa0, 0x64, 0xca, 0xdf, 0xd2, 0xab, 0xaf, + 0x85, 0x9d, 0x3d, 0xa4, 0xb9, 0x1c, 0x36, 0xaa, 0x7c, 0x80, 0x35, 0x43, 0x00, 0x8e, 0x58, 0xd4, + 0x27, 0x50, 0xdd, 0xbd, 0xf0, 0x5c, 0xea, 0x87, 0xf1, 0xfa, 0x2a, 0x14, 0x89, 0x00, 0x08, 0x69, + 0xa5, 0xb8, 0x1d, 0x05, 0x64, 0x58, 0x62, 0xd1, 0xeb, 0x50, 0x20, 0x17, 0xba, 0xe1, 0xcb, 0xbe, + 0x52, 0x95, 0x64, 0x85, 0x5d, 0x0e, 0xc4, 0x01, 0x4e, 0x3d, 0x04, 0xd8, 0x23, 0x91, 0xe8, 0x6d, + 0x58, 0x0a, 0x6b, 0x22, 0x5d, 0xaa, 0x5f, 0x96, 0xcc, 0x4b, 0x38, 0x8d, 0xc6, 0xe3, 0xf4, 0xea, + 0x13, 0x28, 0x8b, 0x72, 0xe6, 0xfd, 0x88, 0x9b, 0x20, 0xaa, 0x59, 0x4a, 0x89, 0x4c, 0x10, 0x14, + 0x38, 0xc0, 0x45, 0x0d, 0x2d, 0x37, 0xad, 0xa1, 0x25, 0xb2, 0xd7, 0x86, 0x6a, 0xc0, 0x1b, 0xf6, + 0xd8, 0x4c, 0x1a, 0x6e, 0x41, 0x29, 0x34, 0x53, 0x6a, 0x89, 0x66, 0x6b, 0x28, 0x08, 0x47, 0x14, + 0x09, 0x6d, 0x67, 0x90, 0x6a, 0x4d, 0xd9, 0x94, 0xbd, 0x09, 0x37, 0x64, 0x73, 0x90, 0xba, 0x96, + 0x24, 0xd9, 0x8d, 0xd0, 0x67, 0x21, 0x3e, 0xa1, 0xe9, 0x97, 0x50, 0x9b, 0x36, 0x90, 0x3f, 0x43, + 0xf3, 0xcc, 0x6e, 0x8a, 0xfa, 0x3b, 0x05, 0x96, 0x93, 0x92, 0xb2, 0x87, 0x2f, 0xbb, 0x92, 0xeb, + 0x47, 0x57, 0xc2, 0x23, 0x7f, 0x54, 0x60, 0x35, 0x75, 0xb4, 0x99, 0x22, 0x3e, 0x83, 0x51, 0xc9, + 0xe4, 0xc8, 0xcf, 0x90, 0x1c, 0xff, 0xc8, 0x41, 0xf5, 0xbe, 0x7e, 0x42, 0xec, 0x63, 0x62, 0x13, + 0xc3, 0x77, 0x29, 0x7a, 0x1f, 0x2a, 0x5d, 0xdd, 0x37, 0xce, 0x04, 0x34, 0x5c, 0x2e, 0xda, 0xd9, + 0xda, 0x5f, 0x4a, 0x52, 0xf3, 0x20, 0x16, 0xb3, 0xeb, 0xf8, 0x74, 0xa0, 0xbd, 0x22, 0x4d, 0xaa, + 0x24, 0x30, 0x38, 0xa9, 0x4d, 0x6c, 0x84, 0xe2, 0x7b, 0xf7, 0xc2, 0xe3, 0xfd, 0x7f, 0xf6, 0x45, + 0x34, 0x65, 0x02, 0x26, 0x3f, 0xef, 0x59, 0x94, 0x74, 0x89, 0xe3, 0xc7, 0x1b, 0xe1, 0xc1, 0x98, + 0x7c, 0x3c, 0xa1, 0x71, 0xed, 0x2e, 0x2c, 0x8f, 0x1b, 0x8f, 0x96, 0x21, 0x7f, 0x4e, 0x06, 0x41, + 0xbc, 0x30, 0xff, 0x89, 0x56, 0xa1, 0xd0, 0xd7, 0xed, 0x9e, 0xac, 0x46, 0x1c, 0x7c, 0xdc, 0xc9, + 0xdd, 0x56, 0xd4, 0x3f, 0x2b, 0x50, 0x9b, 0x66, 0x08, 0xfa, 0x4a, 0x42, 0x90, 0x56, 0x91, 0x56, + 0xe5, 0xdf, 0x25, 0x83, 0x40, 0xea, 0x2e, 0x94, 0x5c, 0x8f, 0xef, 0xf0, 0x2e, 0x95, 0x51, 0x7f, + 0x33, 0x8c, 0xe4, 0xa1, 0x84, 0x5f, 0x0e, 0x1b, 0x37, 0x53, 0xe2, 0x43, 0x04, 0x8e, 0x58, 0x91, + 0x0a, 0x45, 0x61, 0x0f, 0x9f, 0x27, 0x7c, 0xf2, 0x03, 0xef, 0xad, 0x8f, 0x04, 0x04, 0x4b, 0x8c, + 0xfa, 0x3e, 0x94, 0xf8, 0x62, 0x73, 0x40, 0x7c, 0x9d, 0x27, 0x10, 0x23, 0xf6, 0xe9, 0x7d, 0xcb, + 0x39, 0x97, 0xa6, 0x45, 0x09, 0x74, 0x2c, 0xe1, 0x38, 0xa2, 0xb8, 0xaa, 0xc5, 0xe6, 0x66, 0x6c, + 0xb1, 0x7f, 0xc9, 0x41, 0x85, 0x6b, 0x0f, 0xbb, 0xf6, 0x77, 0xa0, 0x6a, 0x27, 0xcf, 0x24, 0xad, + 0xb8, 0x29, 0x05, 0xa6, 0xb3, 0x14, 0xa7, 0x69, 0x39, 0xf3, 0xa9, 0x45, 0x6c, 0x33, 0x62, 0xce, + 0xa5, 0x99, 0xef, 0x25, 0x91, 0x38, 0x4d, 0xcb, 0x6b, 0xf1, 0x29, 0x8f, 0xb6, 0x9c, 0xbc, 0x51, + 0x2d, 0xfe, 0x90, 0x03, 0x71, 0x80, 0xbb, 0xea, 0xc4, 0xf3, 0xb3, 0x9d, 0x18, 0xdd, 0x81, 0x45, + 0x3e, 0x1e, 0xdd, 0x9e, 0x1f, 0xae, 0x27, 0x05, 0x31, 0x48, 0xd1, 0x68, 0xd8, 0x58, 0x7c, 0x2f, + 0x85, 0xc1, 0x63, 0x94, 0xea, 0x87, 0x00, 0x70, 0x78, 0xf2, 0x33, 0x62, 0x04, 0xd1, 0xba, 0x7e, + 0x29, 0xe7, 0xfd, 0x56, 0xde, 0x05, 0x39, 0x54, 0x3a, 0x24, 0xee, 0xb7, 0x09, 0x1c, 0x4e, 0x51, + 0xa2, 0x16, 0x94, 0xa3, 0x45, 0x5d, 0xf6, 0x92, 0x15, 0xc9, 0x56, 0x8e, 0xb6, 0x79, 0x1c, 0xd3, + 0xa4, 0x52, 0x67, 0xfe, 0xda, 0xd4, 0xd1, 0x20, 0xdf, 0xb3, 0x4c, 0x71, 0xf4, 0xb2, 0xf6, 0x8d, + 0x30, 0xfd, 0x1f, 0xee, 0xb7, 0x2f, 0x87, 0x8d, 0xd7, 0xa6, 0x5d, 0x71, 0xfd, 0x81, 0x47, 0x58, + 0xf3, 0xe1, 0x7e, 0x1b, 0x73, 0xe6, 0xab, 0x82, 0x51, 0x9c, 0x31, 0x18, 0x5b, 0x00, 0xf2, 0xd4, + 0x9c, 0xfb, 0x46, 0x10, 0x88, 0xf0, 0xd2, 0xb2, 0x17, 0x61, 0x70, 0x82, 0x0a, 0x31, 0x58, 0x31, + 0x28, 0x11, 0xbf, 0x79, 0xb8, 0x98, 0xaf, 0x77, 0xbd, 0x5a, 0x49, 0xec, 0x87, 0x5f, 0xcb, 0xd6, + 0x9d, 0x38, 0x9b, 0xf6, 0xaa, 0x54, 0xb3, 0xb2, 0x33, 0x2e, 0x0c, 0x4f, 0xca, 0x47, 0x2e, 0xac, + 0x98, 0x72, 0x5d, 0x8b, 0x95, 0x96, 0x67, 0x56, 0x7a, 0x93, 0x2b, 0x6c, 0x8f, 0x0b, 0xc2, 0x93, + 0xb2, 0xd1, 0x4f, 0x60, 0x2d, 0x04, 0x4e, 0xee, 0xcc, 0x35, 0x10, 0x9e, 0xaa, 0xf3, 0x2d, 0xbe, + 0x3d, 0x95, 0x0a, 0x7f, 0x8a, 0x04, 0x64, 0x42, 0xd1, 0x0e, 0x66, 0x4b, 0x45, 0x34, 0xf6, 0xef, + 0x66, 0x3b, 0x45, 0x9c, 0xfd, 0xcd, 0xe4, 0x4c, 0x89, 0xf6, 0x46, 0x39, 0x4e, 0xa4, 0x6c, 0x74, + 0x01, 0x15, 0xdd, 0x71, 0x5c, 0x5f, 0x0f, 0xb6, 0xf8, 0x05, 0xa1, 0x6a, 0x7b, 0x66, 0x55, 0xdb, + 0xb1, 0x8c, 0xb1, 0x19, 0x96, 0xc0, 0xe0, 0xa4, 0x2a, 0xf4, 0x14, 0x96, 0xdc, 0xa7, 0x0e, 0xa1, + 0x98, 0x9c, 0x12, 0x4a, 0x1c, 0x7e, 0xe5, 0xab, 0x0a, 0xed, 0xdf, 0xcc, 0xa8, 0x3d, 0xc5, 0x1c, + 0xa7, 0x74, 0x1a, 0xce, 0xf0, 0xb8, 0x16, 0x7e, 0xc7, 0x3d, 0xb5, 0x1c, 0xdd, 0xb6, 0x7e, 0x41, + 0x28, 0xab, 0x2d, 0xc6, 0x77, 0xdc, 0x7b, 0x11, 0x14, 0x27, 0x28, 0xd0, 0xb7, 0xa0, 0x62, 0xd8, + 0x3d, 0xe6, 0x13, 0x2a, 0x3a, 0xc4, 0x92, 0xa8, 0xa0, 0xe8, 0x7c, 0x3b, 0x31, 0x0a, 0x27, 0xe9, + 0xd6, 0xbe, 0x0d, 0x95, 0xff, 0x73, 0x2e, 0xf2, 0xb9, 0x3a, 0xee, 0xd0, 0x99, 0xe6, 0xea, 0xdf, + 0x72, 0xb0, 0x98, 0x76, 0x43, 0xb4, 0x8d, 0x29, 0x53, 0x1f, 0x12, 0xc2, 0x5e, 0x99, 0x9f, 0xda, + 0x2b, 0x65, 0x4b, 0x9a, 0xff, 0x2c, 0x2d, 0x69, 0x0b, 0x40, 0xf7, 0xac, 0xb0, 0x1b, 0x05, 0xdd, + 0x2d, 0xea, 0x27, 0xf1, 0xa5, 0x1c, 0x27, 0xa8, 0x78, 0xc0, 0x0c, 0xd7, 0xf1, 0xa9, 0x6b, 0xdb, + 0x84, 0x8a, 0x0e, 0x56, 0x0a, 0x02, 0xb6, 0x13, 0x41, 0x71, 0x82, 0x82, 0xdf, 0x71, 0x4f, 0x6c, + 0xd7, 0x38, 0x17, 0x2e, 0x08, 0xab, 0x4f, 0xf4, 0xae, 0x52, 0x70, 0xc7, 0xd5, 0x26, 0xb0, 0xf8, + 0x0a, 0x0e, 0xf5, 0x10, 0xd2, 0xb7, 0x52, 0x74, 0x37, 0x70, 0x80, 0x12, 0x5d, 0x1b, 0x67, 0x3b, + 0xbc, 0x7a, 0x0b, 0xca, 0xd8, 0x75, 0xfd, 0x23, 0xdd, 0x3f, 0x63, 0xa8, 0x01, 0x05, 0x8f, 0xff, + 0x90, 0x4f, 0x0e, 0xe2, 0x2d, 0x46, 0x60, 0x70, 0x00, 0x57, 0x7f, 0xab, 0xc0, 0xab, 0x53, 0x5f, + 0x00, 0xb8, 0x23, 0x8d, 0xe8, 0x4b, 0x9a, 0x14, 0x39, 0x32, 0xa6, 0xc3, 0x09, 0x2a, 0x3e, 0xfe, + 0x53, 0xcf, 0x06, 0xe3, 0xe3, 0x3f, 0xa5, 0x0d, 0xa7, 0x69, 0xd5, 0xff, 0xe6, 0xa0, 0x78, 0xec, + 0xeb, 0x7e, 0x8f, 0xa1, 0x27, 0x50, 0xe2, 0x55, 0x68, 0xea, 0xbe, 0x2e, 0x34, 0x67, 0x7e, 0x55, + 0x0b, 0xd7, 0xa8, 0x78, 0xf2, 0x85, 0x10, 0x1c, 0x49, 0xe4, 0x57, 0x5e, 0x26, 0xf4, 0x48, 0xf3, + 0xa2, 0xd6, 0x15, 0x68, 0xc7, 0x12, 0xcb, 0xd7, 0xfe, 0x2e, 0x61, 0x4c, 0xef, 0x84, 0x39, 0x1b, + 0xad, 0xfd, 0x07, 0x01, 0x18, 0x87, 0x78, 0xf4, 0x0e, 0x14, 0x29, 0xd1, 0x59, 0xb4, 0x8c, 0xd4, + 0x43, 0x91, 0x58, 0x40, 0x2f, 0x87, 0x8d, 0x05, 0x29, 0x5c, 0x7c, 0x63, 0x49, 0x8d, 0x1e, 0xc3, + 0x0d, 0x93, 0xf8, 0xba, 0x65, 0x07, 0x3b, 0x48, 0xe6, 0xf7, 0x8d, 0x40, 0x58, 0x3b, 0x60, 0xd5, + 0x2a, 0xdc, 0x26, 0xf9, 0x81, 0x43, 0x81, 0xbc, 0xde, 0x0c, 0xd7, 0x24, 0x22, 0x9f, 0x0b, 0x71, + 0xbd, 0xed, 0xb8, 0x26, 0xc1, 0x02, 0xa3, 0x3e, 0x53, 0xa0, 0x12, 0x48, 0xda, 0xd1, 0x7b, 0x8c, + 0xa0, 0xcd, 0xe8, 0x14, 0x41, 0xb8, 0xc3, 0x01, 0x39, 0xff, 0xde, 0xc0, 0x23, 0x97, 0xc3, 0x46, + 0x59, 0x90, 0xf1, 0x8f, 0xe8, 0x00, 0x09, 0x1f, 0xe5, 0xae, 0xf1, 0xd1, 0xeb, 0x50, 0x10, 0xfb, + 0x9e, 0x74, 0x66, 0xb4, 0xde, 0x89, 0x9d, 0x10, 0x07, 0x38, 0xf5, 0x0f, 0x39, 0xa8, 0xa6, 0x0e, + 0x97, 0x61, 0xc5, 0x8a, 0xee, 0x70, 0xb9, 0x0c, 0xef, 0x02, 0xd3, 0x1f, 0x3a, 0x7f, 0x04, 0x45, + 0x83, 0x9f, 0x2f, 0x7c, 0x69, 0xde, 0x9c, 0x25, 0x14, 0xc2, 0x33, 0x71, 0x26, 0x89, 0x4f, 0x86, + 0xa5, 0x40, 0xb4, 0x07, 0x2b, 0x94, 0xf8, 0x74, 0xb0, 0x7d, 0xea, 0x13, 0x9a, 0x5c, 0x3a, 0x0b, + 0xf1, 0x12, 0x82, 0xc7, 0x09, 0xf0, 0x24, 0x8f, 0x6a, 0xc3, 0x3c, 0x5f, 0x10, 0xb8, 0xdb, 0x59, + 0xea, 0x69, 0x2d, 0x72, 0x7b, 0xc8, 0x1c, 0xe2, 0xb9, 0x77, 0x1c, 0xdd, 0x71, 0x83, 0x64, 0x2f, + 0xc4, 0xde, 0x79, 0xc0, 0x81, 0x38, 0xc0, 0xdd, 0x59, 0xe5, 0x17, 0xd1, 0xdf, 0x3c, 0x6f, 0xcc, + 0x3d, 0x7b, 0xde, 0x98, 0xfb, 0xe8, 0xb9, 0xbc, 0x94, 0xfe, 0x18, 0xca, 0xf1, 0x3a, 0xf2, 0x39, + 0xab, 0x54, 0x7f, 0x0a, 0x25, 0x9e, 0x49, 0xe1, 0x1a, 0x7d, 0xcd, 0xf0, 0x48, 0xb7, 0xf5, 0x5c, + 0x96, 0xb6, 0xae, 0x6e, 0x41, 0xf0, 0xf6, 0xcc, 0x3b, 0xa1, 0xe5, 0x93, 0x6e, 0xaa, 0x13, 0xee, + 0x73, 0x00, 0x0e, 0xe0, 0x89, 0x7b, 0xf8, 0xaf, 0x15, 0x00, 0x71, 0xdf, 0xd8, 0xed, 0xf3, 0x3b, + 0xe2, 0x3a, 0xcc, 0xf3, 0x16, 0x3b, 0x6e, 0x98, 0x28, 0x01, 0x81, 0x41, 0x0f, 0xa1, 0xe8, 0x8a, + 0x35, 0x45, 0x3e, 0x50, 0xbe, 0x35, 0x35, 0x6b, 0xe4, 0xbf, 0x95, 0x9a, 0x58, 0x7f, 0xba, 0x7b, + 0xe1, 0x13, 0x87, 0xdb, 0x18, 0x67, 0x4c, 0xb0, 0xeb, 0x60, 0x29, 0x4c, 0x7b, 0xe3, 0xc5, 0xcb, + 0xfa, 0xdc, 0xc7, 0x2f, 0xeb, 0x73, 0xff, 0x7c, 0x59, 0x9f, 0xfb, 0x60, 0x54, 0x57, 0x5e, 0x8c, + 0xea, 0xca, 0xc7, 0xa3, 0xba, 0xf2, 0xef, 0x51, 0x5d, 0x79, 0xf6, 0x49, 0x7d, 0xee, 0x71, 0xae, + 0xbf, 0xf9, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb1, 0xe8, 0xc1, 0x2f, 0x98, 0x1b, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto new file mode 100644 index 0000000000..9d42018ec1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -0,0 +1,689 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.apis.meta.v1; + +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// APIGroup contains the name, the supported versions, and the preferred version +// of a group. +message APIGroup { + // name is the name of the group. + optional string name = 1; + + // versions are the versions supported in this group. + repeated GroupVersionForDiscovery versions = 2; + + // preferredVersion is the version preferred by the API server, which + // probably is the storage version. + // +optional + optional GroupVersionForDiscovery preferredVersion = 3; + + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4; +} + +// APIGroupList is a list of APIGroup, to allow clients to discover the API at +// /apis. +message APIGroupList { + // groups is a list of APIGroup. + repeated APIGroup groups = 1; +} + +// APIResource specifies the name of a resource and whether it is namespaced. +message APIResource { + // name is the name of the resource. + optional string name = 1; + + // namespaced indicates if a resource is namespaced or not. + optional bool namespaced = 2; + + // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') + optional string kind = 3; + + // verbs is a list of supported kube verbs (this includes get, list, watch, create, + // update, patch, delete, deletecollection, and proxy) + optional Verbs verbs = 4; + + // shortNames is a list of suggested short names of the resource. + repeated string shortNames = 5; +} + +// APIResourceList is a list of APIResource, it is used to expose the name of the +// resources supported in a specific group and version, and if the resource +// is namespaced. +message APIResourceList { + // groupVersion is the group and version this APIResourceList is for. + optional string groupVersion = 1; + + // resources contains the name of the resources and if they are namespaced. + repeated APIResource resources = 2; +} + +// APIVersions lists the versions that are available, to allow clients to +// discover the API at /api, which is the root path of the legacy v1 API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message APIVersions { + // versions are the api versions that are available. + repeated string versions = 1; + + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2; +} + +// DeleteOptions may be provided when deleting an API object. +message DeleteOptions { + // The duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // Defaults to a per object value if not specified. zero means delete immediately. + // +optional + optional int64 gracePeriodSeconds = 1; + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + optional Preconditions preconditions = 2; + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + optional bool orphanDependents = 3; + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // +optional + optional string propagationPolicy = 4; +} + +// Duration is a wrapper around time.Duration which supports correct +// marshaling to YAML and JSON. In particular, it marshals into strings, which +// can be used as map keys in json. +message Duration { + optional int64 duration = 1; +} + +// ExportOptions is the query options to the standard REST get call. +message ExportOptions { + // Should this value be exported. Export strips fields that a user can not specify. + optional bool export = 1; + + // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. + optional bool exact = 2; +} + +// GetOptions is the standard query options to the standard REST get call. +message GetOptions { + // When specified: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + optional string resourceVersion = 1; +} + +// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupKind { + optional string group = 1; + + optional string kind = 2; +} + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupResource { + optional string group = 1; + + optional string resource = 2; +} + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupVersion { + optional string group = 1; + + optional string version = 2; +} + +// GroupVersion contains the "group/version" and "version" string of a version. +// It is made a struct to keep extensibility. +message GroupVersionForDiscovery { + // groupVersion specifies the API group and version in the form "group/version" + optional string groupVersion = 1; + + // version specifies the version in the form of "version". This is to save + // the clients the trouble of splitting the GroupVersion. + optional string version = 2; +} + +// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupVersionKind { + optional string group = 1; + + optional string version = 2; + + optional string kind = 3; +} + +// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupVersionResource { + optional string group = 1; + + optional string version = 2; + + optional string resource = 3; +} + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +message LabelSelector { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + // +optional + map matchLabels = 1; + + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + // +optional + repeated LabelSelectorRequirement matchExpressions = 2; +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +message LabelSelectorRequirement { + // key is the label key that the selector applies to. + optional string key = 1; + + // operator represents a key's relationship to a set of values. + // Valid operators ard In, NotIn, Exists and DoesNotExist. + optional string operator = 2; + + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + // +optional + repeated string values = 3; +} + +// ListMeta describes metadata that synthetic resources must have, including lists and +// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. +message ListMeta { + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + optional string selfLink = 1; + + // String that identifies the server's internal version of this object that + // can be used by clients to determine when objects have changed. + // Value must be treated as opaque by clients and passed unmodified back to the server. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + optional string resourceVersion = 2; +} + +// ListOptions is the query options to a standard REST list call. +message ListOptions { + // A selector to restrict the list of returned objects by their labels. + // Defaults to everything. + // +optional + optional string labelSelector = 1; + + // A selector to restrict the list of returned objects by their fields. + // Defaults to everything. + // +optional + optional string fieldSelector = 2; + + // Watch for changes to the described resources and return them as a stream of + // add, update, and remove notifications. Specify resourceVersion. + // +optional + optional bool watch = 3; + + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + // +optional + optional string resourceVersion = 4; + + // Timeout for the list/watch call. + // +optional + optional int64 timeoutSeconds = 5; +} + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +message ObjectMeta { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + optional string name = 1; + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency + // +optional + optional string generateName = 2; + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + optional string namespace = 3; + + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + optional string selfLink = 4; + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // + // Populated by the system. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + optional string uid = 5; + + // An opaque value that represents the internal version of this object that can + // be used by clients to determine when objects have changed. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and passed unmodified back to the server. + // They may only be valid for a particular resource or set of resources. + // + // Populated by the system. + // Read-only. + // Value must be treated as opaque by clients and . + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + optional string resourceVersion = 6; + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + optional int64 generation = 7; + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // + // Populated by the system. + // Read-only. + // Null for lists. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional Time creationTimestamp = 8; + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field. Once set, + // this value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard + // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the + // API. In the presence of network partitions, this object may still exist after this + // timestamp, until an administrator or automated process can determine the resource is + // fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional Time deletionTimestamp = 9; + + // Number of seconds allowed for this object to gracefully terminate before + // it will be removed from the system. Only set when deletionTimestamp is also set. + // May only be shortened. + // Read-only. + // +optional + optional int64 deletionGracePeriodSeconds = 10; + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + map labels = 11; + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + map annotations = 12; + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + repeated OwnerReference ownerReferences = 13; + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + repeated string finalizers = 14; + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + optional string clusterName = 15; +} + +// OwnerReference contains enough information to let you identify an owning +// object. Currently, an owning object must be in the same namespace, so there +// is no namespace field. +message OwnerReference { + // API version of the referent. + optional string apiVersion = 5; + + // Kind of the referent. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional string kind = 1; + + // Name of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + optional string name = 3; + + // UID of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + optional string uid = 4; + + // If true, this reference points to the managing controller. + // +optional + optional bool controller = 6; + + // If true, AND if the owner has the "foregroundDeletion" finalizer, then + // the owner cannot be deleted from the key-value store until this + // reference is removed. + // Defaults to false. + // To set this field, a user needs "delete" permission of the owner, + // otherwise 422 (Unprocessable Entity) will be returned. + // +optional + optional bool blockOwnerDeletion = 7; +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +message Preconditions { + // Specifies the target UID. + // +optional + optional string uid = 1; +} + +// RootPaths lists the paths available at root. +// For example: "/healthz", "/apis". +message RootPaths { + // paths are the paths available at root. + repeated string paths = 1; +} + +// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. +message ServerAddressByClientCIDR { + // The CIDR with which clients can match their IP to figure out the server address that they should use. + optional string clientCIDR = 1; + + // Address of this server, suitable for a client that matches the above CIDR. + // This can be a hostname, hostname:port, IP or IP:port. + optional string serverAddress = 2; +} + +// Status is a return value for calls that don't return other objects. +message Status { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional ListMeta metadata = 1; + + // Status of the operation. + // One of: "Success" or "Failure". + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional string status = 2; + + // A human-readable description of the status of this operation. + // +optional + optional string message = 3; + + // A machine-readable description of why this operation is in the + // "Failure" status. If this value is empty there + // is no information available. A Reason clarifies an HTTP status + // code but does not override it. + // +optional + optional string reason = 4; + + // Extended data associated with the reason. Each reason may define its + // own extended details. This field is optional and the data returned + // is not guaranteed to conform to any schema except that defined by + // the reason type. + // +optional + optional StatusDetails details = 5; + + // Suggested HTTP return code for this status, 0 if not set. + // +optional + optional int32 code = 6; +} + +// StatusCause provides more information about an api.Status failure, including +// cases when multiple errors are encountered. +message StatusCause { + // A machine-readable description of the cause of the error. If this value is + // empty there is no information available. + // +optional + optional string reason = 1; + + // A human-readable description of the cause of the error. This field may be + // presented as-is to a reader. + // +optional + optional string message = 2; + + // The field of the resource that has caused this error, as named by its JSON + // serialization. May include dot and postfix notation for nested attributes. + // Arrays are zero-indexed. Fields may appear more than once in an array of + // causes due to fields having multiple errors. + // Optional. + // + // Examples: + // "name" - the field "name" on the current resource + // "items[0].name" - the field "name" on the first array entry in "items" + // +optional + optional string field = 3; +} + +// StatusDetails is a set of additional properties that MAY be set by the +// server to provide additional information about a response. The Reason +// field of a Status object defines what attributes will be set. Clients +// must ignore fields that do not match the defined type of each attribute, +// and should assume that any attribute may be empty, invalid, or under +// defined. +message StatusDetails { + // The name attribute of the resource associated with the status StatusReason + // (when there is a single name which can be described). + // +optional + optional string name = 1; + + // The group attribute of the resource associated with the status StatusReason. + // +optional + optional string group = 2; + + // The kind attribute of the resource associated with the status StatusReason. + // On some operations may differ from the requested resource Kind. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional string kind = 3; + + // The Causes array includes more details associated with the StatusReason + // failure. Not all StatusReasons may provide detailed causes. + // +optional + repeated StatusCause causes = 4; + + // If specified, the time in seconds before the operation should be retried. + // +optional + optional int32 retryAfterSeconds = 5; +} + +// Time is a wrapper around time.Time which supports correct +// marshaling to YAML and JSON. Wrappers are provided for many +// of the factory methods that the time package offers. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +message Time { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + optional int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + optional int32 nanos = 2; +} + +// Timestamp is a struct that is equivalent to Time, but intended for +// protobuf marshalling/unmarshalling. It is generated into a serialization +// that matches Time. Do not use in Go structs. +message Timestamp { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + optional int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + optional int32 nanos = 2; +} + +// TypeMeta describes an individual object in an API response or request +// with strings representing the type of the object and its API schema version. +// Structures that are versioned or persisted should inline TypeMeta. +message TypeMeta { + // Kind is a string value representing the REST resource this object represents. + // Servers may infer this from the endpoint the client submits requests to. + // Cannot be updated. + // In CamelCase. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional string kind = 1; + + // APIVersion defines the versioned schema of this representation of an object. + // Servers should convert recognized schemas to the latest internal value, and + // may reject unrecognized values. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources + // +optional + optional string apiVersion = 2; +} + +// Verbs masks the value so protobuf can generate +// +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message Verbs { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// Event represents a single event to a watched resource. +// +// +protobuf=true +message WatchEvent { + optional string type = 1; + + // Object is: + // * If Type is Added or Modified: the new state of the object. + // * If Type is Deleted: the state of the object immediately before deletion. + // * If Type is Error: *Status is recommended; other types may make sense + // depending on context. + optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go new file mode 100644 index 0000000000..8b6fdef5a4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go @@ -0,0 +1,148 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupResource struct { + Group string `protobuf:"bytes,1,opt,name=group"` + Resource string `protobuf:"bytes,2,opt,name=resource"` +} + +func (gr *GroupResource) String() string { + if len(gr.Group) == 0 { + return gr.Resource + } + return gr.Resource + "." + gr.Group +} + +// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupVersionResource struct { + Group string `protobuf:"bytes,1,opt,name=group"` + Version string `protobuf:"bytes,2,opt,name=version"` + Resource string `protobuf:"bytes,3,opt,name=resource"` +} + +func (gvr *GroupVersionResource) String() string { + return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") +} + +// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupKind struct { + Group string `protobuf:"bytes,1,opt,name=group"` + Kind string `protobuf:"bytes,2,opt,name=kind"` +} + +func (gk *GroupKind) String() string { + if len(gk.Group) == 0 { + return gk.Kind + } + return gk.Kind + "." + gk.Group +} + +// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupVersionKind struct { + Group string `protobuf:"bytes,1,opt,name=group"` + Version string `protobuf:"bytes,2,opt,name=version"` + Kind string `protobuf:"bytes,3,opt,name=kind"` +} + +func (gvk GroupVersionKind) String() string { + return gvk.Group + "/" + gvk.Version + ", Kind=" + gvk.Kind +} + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupVersion struct { + Group string `protobuf:"bytes,1,opt,name=group"` + Version string `protobuf:"bytes,2,opt,name=version"` +} + +// Empty returns true if group and version are empty +func (gv GroupVersion) Empty() bool { + return len(gv.Group) == 0 && len(gv.Version) == 0 +} + +// String puts "group" and "version" into a single "group/version" string. For the legacy v1 +// it returns "v1". +func (gv GroupVersion) String() string { + // special case the internal apiVersion for the legacy kube types + if gv.Empty() { + return "" + } + + // special case of "v1" for backward compatibility + if len(gv.Group) == 0 && gv.Version == "v1" { + return gv.Version + } + if len(gv.Group) > 0 { + return gv.Group + "/" + gv.Version + } + return gv.Version +} + +// MarshalJSON implements the json.Marshaller interface. +func (gv GroupVersion) MarshalJSON() ([]byte, error) { + s := gv.String() + if strings.Count(s, "/") > 1 { + return []byte{}, fmt.Errorf("illegal GroupVersion %v: contains more than one /", s) + } + return json.Marshal(s) +} + +func (gv *GroupVersion) unmarshal(value []byte) error { + var s string + if err := json.Unmarshal(value, &s); err != nil { + return err + } + parsed, err := schema.ParseGroupVersion(s) + if err != nil { + return err + } + gv.Group, gv.Version = parsed.Group, parsed.Version + return nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (gv *GroupVersion) UnmarshalJSON(value []byte) error { + return gv.unmarshal(value) +} + +// UnmarshalTEXT implements the Ugorji's encoding.TextUnmarshaler interface. +func (gv *GroupVersion) UnmarshalText(value []byte) error { + return gv.unmarshal(value) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go new file mode 100644 index 0000000000..b62dd9ee02 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -0,0 +1,234 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" +) + +// LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements +// labels.Selector +// Note: This function should be kept in sync with the selector methods in pkg/labels/selector.go +func LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) { + if ps == nil { + return labels.Nothing(), nil + } + if len(ps.MatchLabels)+len(ps.MatchExpressions) == 0 { + return labels.Everything(), nil + } + selector := labels.NewSelector() + for k, v := range ps.MatchLabels { + r, err := labels.NewRequirement(k, selection.Equals, []string{v}) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + for _, expr := range ps.MatchExpressions { + var op selection.Operator + switch expr.Operator { + case LabelSelectorOpIn: + op = selection.In + case LabelSelectorOpNotIn: + op = selection.NotIn + case LabelSelectorOpExists: + op = selection.Exists + case LabelSelectorOpDoesNotExist: + op = selection.DoesNotExist + default: + return nil, fmt.Errorf("%q is not a valid pod selector operator", expr.Operator) + } + r, err := labels.NewRequirement(expr.Key, op, append([]string(nil), expr.Values...)) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + return selector, nil +} + +// LabelSelectorAsMap converts the LabelSelector api type into a map of strings, ie. the +// original structure of a label selector. Operators that cannot be converted into plain +// labels (Exists, DoesNotExist, NotIn, and In with more than one value) will result in +// an error. +func LabelSelectorAsMap(ps *LabelSelector) (map[string]string, error) { + if ps == nil { + return nil, nil + } + selector := map[string]string{} + for k, v := range ps.MatchLabels { + selector[k] = v + } + for _, expr := range ps.MatchExpressions { + switch expr.Operator { + case LabelSelectorOpIn: + if len(expr.Values) != 1 { + return selector, fmt.Errorf("operator %q without a single value cannot be converted into the old label selector format", expr.Operator) + } + // Should we do anything in case this will override a previous key-value pair? + selector[expr.Key] = expr.Values[0] + case LabelSelectorOpNotIn, LabelSelectorOpExists, LabelSelectorOpDoesNotExist: + return selector, fmt.Errorf("operator %q cannot be converted into the old label selector format", expr.Operator) + default: + return selector, fmt.Errorf("%q is not a valid selector operator", expr.Operator) + } + } + return selector, nil +} + +// ParseToLabelSelector parses a string representing a selector into a LabelSelector object. +// Note: This function should be kept in sync with the parser in pkg/labels/selector.go +func ParseToLabelSelector(selector string) (*LabelSelector, error) { + reqs, err := labels.ParseToRequirements(selector) + if err != nil { + return nil, fmt.Errorf("couldn't parse the selector string \"%s\": %v", selector, err) + } + + labelSelector := &LabelSelector{ + MatchLabels: map[string]string{}, + MatchExpressions: []LabelSelectorRequirement{}, + } + for _, req := range reqs { + var op LabelSelectorOperator + switch req.Operator() { + case selection.Equals, selection.DoubleEquals: + vals := req.Values() + if vals.Len() != 1 { + return nil, fmt.Errorf("equals operator must have exactly one value") + } + val, ok := vals.PopAny() + if !ok { + return nil, fmt.Errorf("equals operator has exactly one value but it cannot be retrieved") + } + labelSelector.MatchLabels[req.Key()] = val + continue + case selection.In: + op = LabelSelectorOpIn + case selection.NotIn: + op = LabelSelectorOpNotIn + case selection.Exists: + op = LabelSelectorOpExists + case selection.DoesNotExist: + op = LabelSelectorOpDoesNotExist + case selection.GreaterThan, selection.LessThan: + // Adding a separate case for these operators to indicate that this is deliberate + return nil, fmt.Errorf("%q isn't supported in label selectors", req.Operator()) + default: + return nil, fmt.Errorf("%q is not a valid label selector operator", req.Operator()) + } + labelSelector.MatchExpressions = append(labelSelector.MatchExpressions, LabelSelectorRequirement{ + Key: req.Key(), + Operator: op, + Values: req.Values().List(), + }) + } + return labelSelector, nil +} + +// SetAsLabelSelector converts the labels.Set object into a LabelSelector api object. +func SetAsLabelSelector(ls labels.Set) *LabelSelector { + if ls == nil { + return nil + } + + selector := &LabelSelector{ + MatchLabels: make(map[string]string), + } + for label, value := range ls { + selector.MatchLabels[label] = value + } + + return selector +} + +// FormatLabelSelector convert labelSelector into plain string +func FormatLabelSelector(labelSelector *LabelSelector) string { + selector, err := LabelSelectorAsSelector(labelSelector) + if err != nil { + return "" + } + + l := selector.String() + if len(l) == 0 { + l = "" + } + return l +} + +func ExtractGroupVersions(l *APIGroupList) []string { + var groupVersions []string + for _, g := range l.Groups { + for _, gv := range g.Versions { + groupVersions = append(groupVersions, gv.GroupVersion) + } + } + return groupVersions +} + +// HasAnnotation returns a bool if passed in annotation exists +func HasAnnotation(obj ObjectMeta, ann string) bool { + _, found := obj.Annotations[ann] + return found +} + +// SetMetaDataAnnotation sets the annotation and value +func SetMetaDataAnnotation(obj *ObjectMeta, ann string, value string) { + if obj.Annotations == nil { + obj.Annotations = make(map[string]string) + } + obj.Annotations[ann] = value +} + +// SingleObject returns a ListOptions for watching a single object. +func SingleObject(meta ObjectMeta) ListOptions { + return ListOptions{ + FieldSelector: fields.OneTermEqualSelector("metadata.name", meta.Name).String(), + ResourceVersion: meta.ResourceVersion, + } +} + +// NewDeleteOptions returns a DeleteOptions indicating the resource should +// be deleted within the specified grace period. Use zero to indicate +// immediate deletion. If you would prefer to use the default grace period, +// use &metav1.DeleteOptions{} directly. +func NewDeleteOptions(grace int64) *DeleteOptions { + return &DeleteOptions{GracePeriodSeconds: &grace} +} + +// NewPreconditionDeleteOptions returns a DeleteOptions with a UID precondition set. +func NewPreconditionDeleteOptions(uid string) *DeleteOptions { + u := types.UID(uid) + p := Preconditions{UID: &u} + return &DeleteOptions{Preconditions: &p} +} + +// NewUIDPreconditions returns a Preconditions with UID set. +func NewUIDPreconditions(uid string) *Preconditions { + u := types.UID(uid) + return &Preconditions{UID: &u} +} + +// HasObjectMetaSystemFieldValues returns true if fields that are managed by the system on ObjectMeta have values. +func HasObjectMetaSystemFieldValues(meta *ObjectMeta) bool { + return !meta.CreationTimestamp.Time.IsZero() || + len(meta.UID) != 0 +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go new file mode 100644 index 0000000000..8b4c0423e6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go @@ -0,0 +1,75 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// Clones the given selector and returns a new selector with the given key and value added. +// Returns the given selector, if labelKey is empty. +func CloneSelectorAndAddLabel(selector *LabelSelector, labelKey, labelValue string) *LabelSelector { + if labelKey == "" { + // Don't need to add a label. + return selector + } + + // Clone. + newSelector := new(LabelSelector) + + // TODO(madhusudancs): Check if you can use deepCopy_extensions_LabelSelector here. + newSelector.MatchLabels = make(map[string]string) + if selector.MatchLabels != nil { + for key, val := range selector.MatchLabels { + newSelector.MatchLabels[key] = val + } + } + newSelector.MatchLabels[labelKey] = labelValue + + if selector.MatchExpressions != nil { + newMExps := make([]LabelSelectorRequirement, len(selector.MatchExpressions)) + for i, me := range selector.MatchExpressions { + newMExps[i].Key = me.Key + newMExps[i].Operator = me.Operator + if me.Values != nil { + newMExps[i].Values = make([]string, len(me.Values)) + copy(newMExps[i].Values, me.Values) + } else { + newMExps[i].Values = nil + } + } + newSelector.MatchExpressions = newMExps + } else { + newSelector.MatchExpressions = nil + } + + return newSelector +} + +// AddLabelToSelector returns a selector with the given key and value added to the given selector's MatchLabels. +func AddLabelToSelector(selector *LabelSelector, labelKey, labelValue string) *LabelSelector { + if labelKey == "" { + // Don't need to add a label. + return selector + } + if selector.MatchLabels == nil { + selector.MatchLabels = make(map[string]string) + } + selector.MatchLabels[labelKey] = labelValue + return selector +} + +// SelectorHasLabel checks if the given selector contains the given label key in its MatchLabels +func SelectorHasLabel(selector *LabelSelector, labelKey string) bool { + return len(selector.MatchLabels[labelKey]) > 0 +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go new file mode 100644 index 0000000000..108e34f0f5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go @@ -0,0 +1,209 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +// ObjectMetaFor returns a pointer to a provided object's ObjectMeta. +// TODO: allow runtime.Unknown to extract this object +// TODO: Remove this function and use meta.ObjectMetaAccessor() instead. +func ObjectMetaFor(obj runtime.Object) (*ObjectMeta, error) { + v, err := conversion.EnforcePtr(obj) + if err != nil { + return nil, err + } + var meta *ObjectMeta + err = runtime.FieldPtr(v, "ObjectMeta", &meta) + return meta, err +} + +// ListMetaFor returns a pointer to a provided object's ListMeta, +// or an error if the object does not have that pointer. +// TODO: allow runtime.Unknown to extract this object +// TODO: Remove this function and use meta.ObjectMetaAccessor() instead. +func ListMetaFor(obj runtime.Object) (*ListMeta, error) { + v, err := conversion.EnforcePtr(obj) + if err != nil { + return nil, err + } + var meta *ListMeta + err = runtime.FieldPtr(v, "ListMeta", &meta) + return meta, err +} + +// TODO: move this, Object, List, and Type to a different package +type ObjectMetaAccessor interface { + GetObjectMeta() Object +} + +// Object lets you work with object metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field (Name, UID, Namespace on lists) will be a no-op and return +// a default value. +type Object interface { + GetNamespace() string + SetNamespace(namespace string) + GetName() string + SetName(name string) + GetGenerateName() string + SetGenerateName(name string) + GetUID() types.UID + SetUID(uid types.UID) + GetResourceVersion() string + SetResourceVersion(version string) + GetSelfLink() string + SetSelfLink(selfLink string) + GetCreationTimestamp() Time + SetCreationTimestamp(timestamp Time) + GetDeletionTimestamp() *Time + SetDeletionTimestamp(timestamp *Time) + GetLabels() map[string]string + SetLabels(labels map[string]string) + GetAnnotations() map[string]string + SetAnnotations(annotations map[string]string) + GetFinalizers() []string + SetFinalizers(finalizers []string) + GetOwnerReferences() []OwnerReference + SetOwnerReferences([]OwnerReference) + GetClusterName() string + SetClusterName(clusterName string) +} + +// ListMetaAccessor retrieves the list interface from an object +type ListMetaAccessor interface { + GetListMeta() List +} + +// List lets you work with list metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field will be a no-op and return a default value. +// TODO: move this, and TypeMeta and ListMeta, to a different package +type List interface { + GetResourceVersion() string + SetResourceVersion(version string) + GetSelfLink() string + SetSelfLink(selfLink string) +} + +// Type exposes the type and APIVersion of versioned or internal API objects. +// TODO: move this, and TypeMeta and ListMeta, to a different package +type Type interface { + GetAPIVersion() string + SetAPIVersion(version string) + GetKind() string + SetKind(kind string) +} + +func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceVersion } +func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } +func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink } +func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } + +func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } + +// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ListMeta) GetListMeta() List { return obj } + +func (obj *ObjectMeta) GetObjectMeta() Object { return obj } + +// Namespace implements metav1.Object for any object with an ObjectMeta typed field. Allows +// fast, direct access to metadata fields for API objects. +func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } +func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } +func (meta *ObjectMeta) GetName() string { return meta.Name } +func (meta *ObjectMeta) SetName(name string) { meta.Name = name } +func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName } +func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName } +func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } +func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } +func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } +func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } +func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink } +func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } +func (meta *ObjectMeta) GetCreationTimestamp() Time { return meta.CreationTimestamp } +func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp Time) { + meta.CreationTimestamp = creationTimestamp +} +func (meta *ObjectMeta) GetDeletionTimestamp() *Time { return meta.DeletionTimestamp } +func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *Time) { + meta.DeletionTimestamp = deletionTimestamp +} +func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels } +func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } +func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } +func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } +func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } +func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } + +func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { + ret := make([]OwnerReference, len(meta.OwnerReferences)) + for i := 0; i < len(meta.OwnerReferences); i++ { + ret[i].Kind = meta.OwnerReferences[i].Kind + ret[i].Name = meta.OwnerReferences[i].Name + ret[i].UID = meta.OwnerReferences[i].UID + ret[i].APIVersion = meta.OwnerReferences[i].APIVersion + if meta.OwnerReferences[i].Controller != nil { + value := *meta.OwnerReferences[i].Controller + ret[i].Controller = &value + } + if meta.OwnerReferences[i].BlockOwnerDeletion != nil { + value := *meta.OwnerReferences[i].BlockOwnerDeletion + ret[i].BlockOwnerDeletion = &value + } + } + return ret +} + +func (meta *ObjectMeta) SetOwnerReferences(references []OwnerReference) { + newReferences := make([]OwnerReference, len(references)) + for i := 0; i < len(references); i++ { + newReferences[i].Kind = references[i].Kind + newReferences[i].Name = references[i].Name + newReferences[i].UID = references[i].UID + newReferences[i].APIVersion = references[i].APIVersion + if references[i].Controller != nil { + value := *references[i].Controller + newReferences[i].Controller = &value + } + if references[i].BlockOwnerDeletion != nil { + value := *references[i].BlockOwnerDeletion + newReferences[i].BlockOwnerDeletion = &value + } + } + meta.OwnerReferences = newReferences +} + +func (meta *ObjectMeta) GetClusterName() string { + return meta.ClusterName +} +func (meta *ObjectMeta) SetClusterName(clusterName string) { + meta.ClusterName = clusterName +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go new file mode 100644 index 0000000000..8645d1abc7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go @@ -0,0 +1,82 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "meta.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// WatchEventKind is name reserved for serializing watch events. +const WatchEventKind = "WatchEvent" + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// AddToGroupVersion registers common meta types into schemas. +func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) { + scheme.AddKnownTypeWithName(groupVersion.WithKind(WatchEventKind), &WatchEvent{}) + scheme.AddKnownTypeWithName( + schema.GroupVersion{Group: groupVersion.Group, Version: runtime.APIVersionInternal}.WithKind(WatchEventKind), + &InternalEvent{}, + ) + // Supports legacy code paths, most callers should use metav1.ParameterCodec for now + scheme.AddKnownTypes(groupVersion, + &ListOptions{}, + &ExportOptions{}, + &GetOptions{}, + &DeleteOptions{}, + ) + scheme.AddConversionFuncs( + Convert_versioned_Event_to_watch_Event, + Convert_versioned_InternalEvent_to_versioned_Event, + Convert_watch_Event_to_versioned_Event, + Convert_versioned_Event_to_versioned_InternalEvent, + ) + + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. + scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) + AddConversionFuncs(scheme) + RegisterDefaults(scheme) +} + +// scheme is the registry for the common types that adhere to the meta v1 API spec. +var scheme = runtime.NewScheme() + +// ParameterCodec knows about query parameters used with the meta v1 API spec. +var ParameterCodec = runtime.NewParameterCodec(scheme) + +func init() { + scheme.AddUnversionedTypes(SchemeGroupVersion, + &ListOptions{}, + &ExportOptions{}, + &GetOptions{}, + &DeleteOptions{}, + ) + + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. + scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) + RegisterDefaults(scheme) +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go similarity index 96% rename from vendor/k8s.io/kubernetes/pkg/api/unversioned/time.go rename to vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index 636e24a3ba..a1e01f3443 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -14,13 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package unversioned +package v1 import ( "encoding/json" "time" - "k8s.io/kubernetes/pkg/genericapiserver/openapi/common" + "k8s.io/apimachinery/pkg/openapi" "github.com/go-openapi/spec" "github.com/google/gofuzz" @@ -145,8 +145,8 @@ func (t Time) MarshalJSON() ([]byte, error) { return json.Marshal(t.UTC().Format(time.RFC3339)) } -func (_ Time) OpenAPIDefinition() common.OpenAPIDefinition { - return common.OpenAPIDefinition{ +func (_ Time) OpenAPIDefinition() openapi.OpenAPIDefinition { + return openapi.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ Type: []string{"string"}, diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/api/unversioned/time_proto.go rename to vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go index ba25e9164f..aea28e410b 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/time_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package unversioned +package v1 import ( "time" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go new file mode 100644 index 0000000000..c6cf1fab88 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -0,0 +1,792 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package unversioned contains API types that are common to all versions. +// +// The package contains two categories of types: +// - external (serialized) types that lack their own version (e.g TypeMeta) +// - internal (never-serialized) types that are needed by several different +// api groups, and so live here, to avoid duplication and/or import loops +// (e.g. LabelSelector). +// In the future, we will probably move these categories of objects into +// separate packages. +package v1 + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/types" +) + +// TypeMeta describes an individual object in an API response or request +// with strings representing the type of the object and its API schema version. +// Structures that are versioned or persisted should inline TypeMeta. +type TypeMeta struct { + // Kind is a string value representing the REST resource this object represents. + // Servers may infer this from the endpoint the client submits requests to. + // Cannot be updated. + // In CamelCase. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` + + // APIVersion defines the versioned schema of this representation of an object. + // Servers should convert recognized schemas to the latest internal value, and + // may reject unrecognized values. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` +} + +// ListMeta describes metadata that synthetic resources must have, including lists and +// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. +type ListMeta struct { + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"` + + // String that identifies the server's internal version of this object that + // can be used by clients to determine when objects have changed. + // Value must be treated as opaque by clients and passed unmodified back to the server. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` +} + +// These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here +const ( + FinalizerOrphanDependents string = "orphan" + FinalizerDeleteDependents string = "foregroundDeletion" +) + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +type ObjectMeta struct { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency + // +optional + GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // + // Populated by the system. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` + + // An opaque value that represents the internal version of this object that can + // be used by clients to determine when objects have changed. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and passed unmodified back to the server. + // They may only be valid for a particular resource or set of resources. + // + // Populated by the system. + // Read-only. + // Value must be treated as opaque by clients and . + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"` + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // + // Populated by the system. + // Read-only. + // Null for lists. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + CreationTimestamp Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field. Once set, + // this value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard + // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the + // API. In the presence of network partitions, this object may still exist after this + // timestamp, until an administrator or automated process can determine the resource is + // fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + DeletionTimestamp *Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` + + // Number of seconds allowed for this object to gracefully terminate before + // it will be removed from the system. Only set when deletionTimestamp is also set. + // May only be shortened. + // Read-only. + // +optional + DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"` +} + +const ( + // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients + NamespaceDefault string = "default" + // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces + NamespaceAll string = "" + // NamespaceNone is the argument for a context when there is no namespace. + NamespaceNone string = "" + // NamespaceSystem is the system namespace where we place system components. + NamespaceSystem string = "kube-system" + // NamespacePublic is the namespace where we place public info (ConfigMaps) + NamespacePublic string = "kube-public" +) + +// OwnerReference contains enough information to let you identify an owning +// object. Currently, an owning object must be in the same namespace, so there +// is no namespace field. +type OwnerReference struct { + // API version of the referent. + APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` + // Kind of the referent. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // UID of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // If true, this reference points to the managing controller. + // +optional + Controller *bool `json:"controller,omitempty" protobuf:"varint,6,opt,name=controller"` + // If true, AND if the owner has the "foregroundDeletion" finalizer, then + // the owner cannot be deleted from the key-value store until this + // reference is removed. + // Defaults to false. + // To set this field, a user needs "delete" permission of the owner, + // otherwise 422 (Unprocessable Entity) will be returned. + // +optional + BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty" protobuf:"varint,7,opt,name=blockOwnerDeletion"` +} + +// ListOptions is the query options to a standard REST list call. +type ListOptions struct { + TypeMeta `json:",inline"` + + // A selector to restrict the list of returned objects by their labels. + // Defaults to everything. + // +optional + LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` + // A selector to restrict the list of returned objects by their fields. + // Defaults to everything. + // +optional + FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"` + // Watch for changes to the described resources and return them as a stream of + // add, update, and remove notifications. Specify resourceVersion. + // +optional + Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"` + // Timeout for the list/watch call. + // +optional + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"` +} + +// ExportOptions is the query options to the standard REST get call. +type ExportOptions struct { + TypeMeta `json:",inline"` + // Should this value be exported. Export strips fields that a user can not specify. + Export bool `json:"export" protobuf:"varint,1,opt,name=export"` + // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. + Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` +} + +// GetOptions is the standard query options to the standard REST get call. +type GetOptions struct { + TypeMeta `json:",inline"` + // When specified: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,1,opt,name=resourceVersion"` +} + +// DeletionPropagation decides if a deletion will propagate to the dependents of +// the object, and how the garbage collector will handle the propagation. +type DeletionPropagation string + +const ( + // Orphans the dependents. + DeletePropagationOrphan DeletionPropagation = "Orphan" + // Deletes the object from the key-value store, the garbage collector will + // delete the dependents in the background. + DeletePropagationBackground DeletionPropagation = "Background" + // The object exists in the key-value store until the garbage collector + // deletes all the dependents whose ownerReference.blockOwnerDeletion=true + // from the key-value store. API sever will put the "foregroundDeletion" + // finalizer on the object, and sets its deletionTimestamp. This policy is + // cascading, i.e., the dependents will be deleted with Foreground. + DeletePropagationForeground DeletionPropagation = "Foreground" +) + +// DeleteOptions may be provided when deleting an API object. +type DeleteOptions struct { + TypeMeta `json:",inline"` + + // The duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // Defaults to a per object value if not specified. zero means delete immediately. + // +optional + GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"` + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"` + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"` + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // +optional + PropagationPolicy *DeletionPropagation `json:"propagationPolicy,omitempty" protobuf:"varint,4,opt,name=propagationPolicy"` +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` +} + +// Status is a return value for calls that don't return other objects. +type Status struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Status of the operation. + // One of: "Success" or "Failure". + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status string `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // A human-readable description of the status of this operation. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + // A machine-readable description of why this operation is in the + // "Failure" status. If this value is empty there + // is no information available. A Reason clarifies an HTTP status + // code but does not override it. + // +optional + Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason,casttype=StatusReason"` + // Extended data associated with the reason. Each reason may define its + // own extended details. This field is optional and the data returned + // is not guaranteed to conform to any schema except that defined by + // the reason type. + // +optional + Details *StatusDetails `json:"details,omitempty" protobuf:"bytes,5,opt,name=details"` + // Suggested HTTP return code for this status, 0 if not set. + // +optional + Code int32 `json:"code,omitempty" protobuf:"varint,6,opt,name=code"` +} + +// StatusDetails is a set of additional properties that MAY be set by the +// server to provide additional information about a response. The Reason +// field of a Status object defines what attributes will be set. Clients +// must ignore fields that do not match the defined type of each attribute, +// and should assume that any attribute may be empty, invalid, or under +// defined. +type StatusDetails struct { + // The name attribute of the resource associated with the status StatusReason + // (when there is a single name which can be described). + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // The group attribute of the resource associated with the status StatusReason. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"` + // The kind attribute of the resource associated with the status StatusReason. + // On some operations may differ from the requested resource Kind. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` + // The Causes array includes more details associated with the StatusReason + // failure. Not all StatusReasons may provide detailed causes. + // +optional + Causes []StatusCause `json:"causes,omitempty" protobuf:"bytes,4,rep,name=causes"` + // If specified, the time in seconds before the operation should be retried. + // +optional + RetryAfterSeconds int32 `json:"retryAfterSeconds,omitempty" protobuf:"varint,5,opt,name=retryAfterSeconds"` +} + +// Values of Status.Status +const ( + StatusSuccess = "Success" + StatusFailure = "Failure" +) + +// StatusReason is an enumeration of possible failure causes. Each StatusReason +// must map to a single HTTP status code, but multiple reasons may map +// to the same HTTP status code. +// TODO: move to apiserver +type StatusReason string + +const ( + // StatusReasonUnknown means the server has declined to indicate a specific reason. + // The details field may contain other information about this error. + // Status code 500. + StatusReasonUnknown StatusReason = "" + + // StatusReasonUnauthorized means the server can be reached and understood the request, but requires + // the user to present appropriate authorization credentials (identified by the WWW-Authenticate header) + // in order for the action to be completed. If the user has specified credentials on the request, the + // server considers them insufficient. + // Status code 401 + StatusReasonUnauthorized StatusReason = "Unauthorized" + + // StatusReasonForbidden means the server can be reached and understood the request, but refuses + // to take any further action. It is the result of the server being configured to deny access for some reason + // to the requested resource by the client. + // Details (optional): + // "kind" string - the kind attribute of the forbidden resource + // on some operations may differ from the requested + // resource. + // "id" string - the identifier of the forbidden resource + // Status code 403 + StatusReasonForbidden StatusReason = "Forbidden" + + // StatusReasonNotFound means one or more resources required for this operation + // could not be found. + // Details (optional): + // "kind" string - the kind attribute of the missing resource + // on some operations may differ from the requested + // resource. + // "id" string - the identifier of the missing resource + // Status code 404 + StatusReasonNotFound StatusReason = "NotFound" + + // StatusReasonAlreadyExists means the resource you are creating already exists. + // Details (optional): + // "kind" string - the kind attribute of the conflicting resource + // "id" string - the identifier of the conflicting resource + // Status code 409 + StatusReasonAlreadyExists StatusReason = "AlreadyExists" + + // StatusReasonConflict means the requested operation cannot be completed + // due to a conflict in the operation. The client may need to alter the + // request. Each resource may define custom details that indicate the + // nature of the conflict. + // Status code 409 + StatusReasonConflict StatusReason = "Conflict" + + // StatusReasonGone means the item is no longer available at the server and no + // forwarding address is known. + // Status code 410 + StatusReasonGone StatusReason = "Gone" + + // StatusReasonInvalid means the requested create or update operation cannot be + // completed due to invalid data provided as part of the request. The client may + // need to alter the request. When set, the client may use the StatusDetails + // message field as a summary of the issues encountered. + // Details (optional): + // "kind" string - the kind attribute of the invalid resource + // "id" string - the identifier of the invalid resource + // "causes" - one or more StatusCause entries indicating the data in the + // provided resource that was invalid. The code, message, and + // field attributes will be set. + // Status code 422 + StatusReasonInvalid StatusReason = "Invalid" + + // StatusReasonServerTimeout means the server can be reached and understood the request, + // but cannot complete the action in a reasonable time. The client should retry the request. + // This is may be due to temporary server load or a transient communication issue with + // another server. Status code 500 is used because the HTTP spec provides no suitable + // server-requested client retry and the 5xx class represents actionable errors. + // Details (optional): + // "kind" string - the kind attribute of the resource being acted on. + // "id" string - the operation that is being attempted. + // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried + // Status code 500 + StatusReasonServerTimeout StatusReason = "ServerTimeout" + + // StatusReasonTimeout means that the request could not be completed within the given time. + // Clients can get this response only when they specified a timeout param in the request, + // or if the server cannot complete the operation within a reasonable amount of time. + // The request might succeed with an increased value of timeout param. The client *should* + // wait at least the number of seconds specified by the retryAfterSeconds field. + // Details (optional): + // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried + // Status code 504 + StatusReasonTimeout StatusReason = "Timeout" + + // StatusReasonBadRequest means that the request itself was invalid, because the request + // doesn't make any sense, for example deleting a read-only object. This is different than + // StatusReasonInvalid above which indicates that the API call could possibly succeed, but the + // data was invalid. API calls that return BadRequest can never succeed. + StatusReasonBadRequest StatusReason = "BadRequest" + + // StatusReasonMethodNotAllowed means that the action the client attempted to perform on the + // resource was not supported by the code - for instance, attempting to delete a resource that + // can only be created. API calls that return MethodNotAllowed can never succeed. + StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed" + + // StatusReasonInternalError indicates that an internal error occurred, it is unexpected + // and the outcome of the call is unknown. + // Details (optional): + // "causes" - The original error + // Status code 500 + StatusReasonInternalError StatusReason = "InternalError" + + // StatusReasonExpired indicates that the request is invalid because the content you are requesting + // has expired and is no longer available. It is typically associated with watches that can't be + // serviced. + // Status code 410 (gone) + StatusReasonExpired StatusReason = "Expired" + + // StatusReasonServiceUnavailable means that the request itself was valid, + // but the requested service is unavailable at this time. + // Retrying the request after some time might succeed. + // Status code 503 + StatusReasonServiceUnavailable StatusReason = "ServiceUnavailable" +) + +// StatusCause provides more information about an api.Status failure, including +// cases when multiple errors are encountered. +type StatusCause struct { + // A machine-readable description of the cause of the error. If this value is + // empty there is no information available. + // +optional + Type CauseType `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason,casttype=CauseType"` + // A human-readable description of the cause of the error. This field may be + // presented as-is to a reader. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // The field of the resource that has caused this error, as named by its JSON + // serialization. May include dot and postfix notation for nested attributes. + // Arrays are zero-indexed. Fields may appear more than once in an array of + // causes due to fields having multiple errors. + // Optional. + // + // Examples: + // "name" - the field "name" on the current resource + // "items[0].name" - the field "name" on the first array entry in "items" + // +optional + Field string `json:"field,omitempty" protobuf:"bytes,3,opt,name=field"` +} + +// CauseType is a machine readable value providing more detail about what +// occurred in a status response. An operation may have multiple causes for a +// status (whether Failure or Success). +type CauseType string + +const ( + // CauseTypeFieldValueNotFound is used to report failure to find a requested value + // (e.g. looking up an ID). + CauseTypeFieldValueNotFound CauseType = "FieldValueNotFound" + // CauseTypeFieldValueRequired is used to report required values that are not + // provided (e.g. empty strings, null values, or empty arrays). + CauseTypeFieldValueRequired CauseType = "FieldValueRequired" + // CauseTypeFieldValueDuplicate is used to report collisions of values that must be + // unique (e.g. unique IDs). + CauseTypeFieldValueDuplicate CauseType = "FieldValueDuplicate" + // CauseTypeFieldValueInvalid is used to report malformed values (e.g. failed regex + // match). + CauseTypeFieldValueInvalid CauseType = "FieldValueInvalid" + // CauseTypeFieldValueNotSupported is used to report valid (as per formatting rules) + // values that can not be handled (e.g. an enumerated string). + CauseTypeFieldValueNotSupported CauseType = "FieldValueNotSupported" + // CauseTypeUnexpectedServerResponse is used to report when the server responded to the client + // without the expected return type. The presence of this cause indicates the error may be + // due to an intervening proxy or the server software malfunctioning. + CauseTypeUnexpectedServerResponse CauseType = "UnexpectedServerResponse" +) + +// APIVersions lists the versions that are available, to allow clients to +// discover the API at /api, which is the root path of the legacy v1 API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type APIVersions struct { + TypeMeta `json:",inline"` + // versions are the api versions that are available. + Versions []string `json:"versions" protobuf:"bytes,1,rep,name=versions"` + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,2,rep,name=serverAddressByClientCIDRs"` +} + +// APIGroupList is a list of APIGroup, to allow clients to discover the API at +// /apis. +type APIGroupList struct { + TypeMeta `json:",inline"` + // groups is a list of APIGroup. + Groups []APIGroup `json:"groups" protobuf:"bytes,1,rep,name=groups"` +} + +// APIGroup contains the name, the supported versions, and the preferred version +// of a group. +type APIGroup struct { + TypeMeta `json:",inline"` + // name is the name of the group. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // versions are the versions supported in this group. + Versions []GroupVersionForDiscovery `json:"versions" protobuf:"bytes,2,rep,name=versions"` + // preferredVersion is the version preferred by the API server, which + // probably is the storage version. + // +optional + PreferredVersion GroupVersionForDiscovery `json:"preferredVersion,omitempty" protobuf:"bytes,3,opt,name=preferredVersion"` + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"` +} + +// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. +type ServerAddressByClientCIDR struct { + // The CIDR with which clients can match their IP to figure out the server address that they should use. + ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"` + // Address of this server, suitable for a client that matches the above CIDR. + // This can be a hostname, hostname:port, IP or IP:port. + ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"` +} + +// GroupVersion contains the "group/version" and "version" string of a version. +// It is made a struct to keep extensibility. +type GroupVersionForDiscovery struct { + // groupVersion specifies the API group and version in the form "group/version" + GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"` + // version specifies the version in the form of "version". This is to save + // the clients the trouble of splitting the GroupVersion. + Version string `json:"version" protobuf:"bytes,2,opt,name=version"` +} + +// APIResource specifies the name of a resource and whether it is namespaced. +type APIResource struct { + // name is the name of the resource. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // namespaced indicates if a resource is namespaced or not. + Namespaced bool `json:"namespaced" protobuf:"varint,2,opt,name=namespaced"` + // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') + Kind string `json:"kind" protobuf:"bytes,3,opt,name=kind"` + // verbs is a list of supported kube verbs (this includes get, list, watch, create, + // update, patch, delete, deletecollection, and proxy) + Verbs Verbs `json:"verbs" protobuf:"bytes,4,opt,name=verbs"` + // shortNames is a list of suggested short names of the resource. + ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,5,rep,name=shortNames"` +} + +// Verbs masks the value so protobuf can generate +// +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type Verbs []string + +func (vs Verbs) String() string { + return fmt.Sprintf("%v", []string(vs)) +} + +// APIResourceList is a list of APIResource, it is used to expose the name of the +// resources supported in a specific group and version, and if the resource +// is namespaced. +type APIResourceList struct { + TypeMeta `json:",inline"` + // groupVersion is the group and version this APIResourceList is for. + GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"` + // resources contains the name of the resources and if they are namespaced. + APIResources []APIResource `json:"resources" protobuf:"bytes,2,rep,name=resources"` +} + +// RootPaths lists the paths available at root. +// For example: "/healthz", "/apis". +type RootPaths struct { + // paths are the paths available at root. + Paths []string `json:"paths" protobuf:"bytes,1,rep,name=paths"` +} + +// TODO: remove me when watch is refactored +func LabelSelectorQueryParam(version string) string { + return "labelSelector" +} + +// TODO: remove me when watch is refactored +func FieldSelectorQueryParam(version string) string { + return "fieldSelector" +} + +// String returns available api versions as a human-friendly version string. +func (apiVersions APIVersions) String() string { + return strings.Join(apiVersions.Versions, ",") +} + +func (apiVersions APIVersions) GoString() string { + return apiVersions.String() +} + +// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. +type Patch struct{} + +// Note: +// There are two different styles of label selectors used in versioned types: +// an older style which is represented as just a string in versioned types, and a +// newer style that is structured. LabelSelector is an internal representation for the +// latter style. + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +type LabelSelector struct { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + // +optional + MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + // +optional + MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +type LabelSelectorRequirement struct { + // key is the label key that the selector applies to. + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + // operator represents a key's relationship to a set of values. + // Valid operators ard In, NotIn, Exists and DoesNotExist. + Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + // +optional + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A label selector operator is the set of operators that can be used in a selector requirement. +type LabelSelectorOperator string + +const ( + LabelSelectorOpIn LabelSelectorOperator = "In" + LabelSelectorOpNotIn LabelSelectorOperator = "NotIn" + LabelSelectorOpExists LabelSelectorOperator = "Exists" + LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..950c002855 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -0,0 +1,290 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_APIGroup = map[string]string{ + "": "APIGroup contains the name, the supported versions, and the preferred version of a group.", + "name": "name is the name of the group.", + "versions": "versions are the versions supported in this group.", + "preferredVersion": "preferredVersion is the version preferred by the API server, which probably is the storage version.", + "serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", +} + +func (APIGroup) SwaggerDoc() map[string]string { + return map_APIGroup +} + +var map_APIGroupList = map[string]string{ + "": "APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.", + "groups": "groups is a list of APIGroup.", +} + +func (APIGroupList) SwaggerDoc() map[string]string { + return map_APIGroupList +} + +var map_APIResource = map[string]string{ + "": "APIResource specifies the name of a resource and whether it is namespaced.", + "name": "name is the name of the resource.", + "namespaced": "namespaced indicates if a resource is namespaced or not.", + "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", + "verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", + "shortNames": "shortNames is a list of suggested short names of the resource.", +} + +func (APIResource) SwaggerDoc() map[string]string { + return map_APIResource +} + +var map_APIResourceList = map[string]string{ + "": "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.", + "groupVersion": "groupVersion is the group and version this APIResourceList is for.", + "resources": "resources contains the name of the resources and if they are namespaced.", +} + +func (APIResourceList) SwaggerDoc() map[string]string { + return map_APIResourceList +} + +var map_APIVersions = map[string]string{ + "": "APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API.", + "versions": "versions are the api versions that are available.", + "serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", +} + +func (APIVersions) SwaggerDoc() map[string]string { + return map_APIVersions +} + +var map_DeleteOptions = map[string]string{ + "": "DeleteOptions may be provided when deleting an API object.", + "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", + "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.", +} + +func (DeleteOptions) SwaggerDoc() map[string]string { + return map_DeleteOptions +} + +var map_ExportOptions = map[string]string{ + "": "ExportOptions is the query options to the standard REST get call.", + "export": "Should this value be exported. Export strips fields that a user can not specify.", + "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.", +} + +func (ExportOptions) SwaggerDoc() map[string]string { + return map_ExportOptions +} + +var map_GetOptions = map[string]string{ + "": "GetOptions is the standard query options to the standard REST get call.", + "resourceVersion": "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", +} + +func (GetOptions) SwaggerDoc() map[string]string { + return map_GetOptions +} + +var map_GroupVersionForDiscovery = map[string]string{ + "": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.", + "groupVersion": "groupVersion specifies the API group and version in the form \"group/version\"", + "version": "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.", +} + +func (GroupVersionForDiscovery) SwaggerDoc() map[string]string { + return map_GroupVersionForDiscovery +} + +var map_LabelSelector = map[string]string{ + "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", + "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", + "matchExpressions": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", +} + +func (LabelSelector) SwaggerDoc() map[string]string { + return map_LabelSelector +} + +var map_LabelSelectorRequirement = map[string]string{ + "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + "key": "key is the label key that the selector applies to.", + "operator": "operator represents a key's relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist.", + "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", +} + +func (LabelSelectorRequirement) SwaggerDoc() map[string]string { + return map_LabelSelectorRequirement +} + +var map_ListMeta = map[string]string{ + "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", + "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", + "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency", +} + +func (ListMeta) SwaggerDoc() map[string]string { + return map_ListMeta +} + +var map_ListOptions = map[string]string{ + "": "ListOptions is the query options to a standard REST list call.", + "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "timeoutSeconds": "Timeout for the list/watch call.", +} + +func (ListOptions) SwaggerDoc() map[string]string { + return map_ListOptions +} + +var map_ObjectMeta = map[string]string{ + "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", + "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency", + "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", + "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency", + "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", + "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", + "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", + "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", + "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", + "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", +} + +func (ObjectMeta) SwaggerDoc() map[string]string { + return map_ObjectMeta +} + +var map_OwnerReference = map[string]string{ + "": "OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.", + "apiVersion": "API version of the referent.", + "kind": "Kind of the referent. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "controller": "If true, this reference points to the managing controller.", + "blockOwnerDeletion": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", +} + +func (OwnerReference) SwaggerDoc() map[string]string { + return map_OwnerReference +} + +var map_Patch = map[string]string{ + "": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.", +} + +func (Patch) SwaggerDoc() map[string]string { + return map_Patch +} + +var map_Preconditions = map[string]string{ + "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", + "uid": "Specifies the target UID.", +} + +func (Preconditions) SwaggerDoc() map[string]string { + return map_Preconditions +} + +var map_RootPaths = map[string]string{ + "": "RootPaths lists the paths available at root. For example: \"/healthz\", \"/apis\".", + "paths": "paths are the paths available at root.", +} + +func (RootPaths) SwaggerDoc() map[string]string { + return map_RootPaths +} + +var map_ServerAddressByClientCIDR = map[string]string{ + "": "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.", + "clientCIDR": "The CIDR with which clients can match their IP to figure out the server address that they should use.", + "serverAddress": "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.", +} + +func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string { + return map_ServerAddressByClientCIDR +} + +var map_Status = map[string]string{ + "": "Status is a return value for calls that don't return other objects.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "message": "A human-readable description of the status of this operation.", + "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", + "details": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", + "code": "Suggested HTTP return code for this status, 0 if not set.", +} + +func (Status) SwaggerDoc() map[string]string { + return map_Status +} + +var map_StatusCause = map[string]string{ + "": "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.", + "reason": "A machine-readable description of the cause of the error. If this value is empty there is no information available.", + "message": "A human-readable description of the cause of the error. This field may be presented as-is to a reader.", + "field": "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"", +} + +func (StatusCause) SwaggerDoc() map[string]string { + return map_StatusCause +} + +var map_StatusDetails = map[string]string{ + "": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", + "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", + "group": "The group attribute of the resource associated with the status StatusReason.", + "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", + "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried.", +} + +func (StatusDetails) SwaggerDoc() map[string]string { + return map_StatusDetails +} + +var map_TypeMeta = map[string]string{ + "": "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.", + "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources", +} + +func (TypeMeta) SwaggerDoc() map[string]string { + return map_TypeMeta +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go new file mode 100644 index 0000000000..ae20726b6e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -0,0 +1,689 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unstructured + +import ( + "bytes" + gojson "encoding/json" + "errors" + "fmt" + "io" + "strings" + + "github.com/golang/glog" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" +) + +// Unstructured allows objects that do not have Golang structs registered to be manipulated +// generically. This can be used to deal with the API objects from a plug-in. Unstructured +// objects still have functioning TypeMeta features-- kind, version, etc. +// +// WARNING: This object has accessors for the v1 standard metadata. You *MUST NOT* use this +// type if you are dealing with objects that are not in the server meta v1 schema. +// +// TODO: make the serialization part of this type distinct from the field accessors. +type Unstructured struct { + // Object is a JSON compatible map with string, float, int, bool, []interface{}, or + // map[string]interface{} + // children. + Object map[string]interface{} +} + +var _ metav1.Object = &Unstructured{} +var _ runtime.Unstructured = &Unstructured{} +var _ runtime.Unstructured = &UnstructuredList{} + +func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj } +func (obj *UnstructuredList) GetObjectKind() schema.ObjectKind { return obj } + +func (obj *Unstructured) IsUnstructuredObject() {} +func (obj *UnstructuredList) IsUnstructuredObject() {} + +func (obj *Unstructured) IsList() bool { + if obj.Object != nil { + _, ok := obj.Object["items"] + return ok + } + return false +} +func (obj *UnstructuredList) IsList() bool { return true } + +func (obj *Unstructured) UnstructuredContent() map[string]interface{} { + if obj.Object == nil { + obj.Object = make(map[string]interface{}) + } + return obj.Object +} + +// UnstructuredContent returns a map contain an overlay of the Items field onto +// the Object field. Items always overwrites overlay. Changing "items" in the +// returned object will affect items in the underlying Items field, but changing +// the "items" slice itself will have no effect. +// TODO: expose SetUnstructuredContent on runtime.Unstructured that allows +// items to be changed. +func (obj *UnstructuredList) UnstructuredContent() map[string]interface{} { + out := obj.Object + if out == nil { + out = make(map[string]interface{}) + } + items := make([]interface{}, len(obj.Items)) + for i, item := range obj.Items { + items[i] = item.Object + } + out["items"] = items + return out +} + +// MarshalJSON ensures that the unstructured object produces proper +// JSON when passed to Go's standard JSON library. +func (u *Unstructured) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + err := UnstructuredJSONScheme.Encode(u, &buf) + return buf.Bytes(), err +} + +// UnmarshalJSON ensures that the unstructured object properly decodes +// JSON when passed to Go's standard JSON library. +func (u *Unstructured) UnmarshalJSON(b []byte) error { + _, _, err := UnstructuredJSONScheme.Decode(b, nil, u) + return err +} + +func getNestedField(obj map[string]interface{}, fields ...string) interface{} { + var val interface{} = obj + for _, field := range fields { + if _, ok := val.(map[string]interface{}); !ok { + return nil + } + val = val.(map[string]interface{})[field] + } + return val +} + +func getNestedString(obj map[string]interface{}, fields ...string) string { + if str, ok := getNestedField(obj, fields...).(string); ok { + return str + } + return "" +} + +func getNestedSlice(obj map[string]interface{}, fields ...string) []string { + if m, ok := getNestedField(obj, fields...).([]interface{}); ok { + strSlice := make([]string, 0, len(m)) + for _, v := range m { + if str, ok := v.(string); ok { + strSlice = append(strSlice, str) + } + } + return strSlice + } + return nil +} + +func getNestedMap(obj map[string]interface{}, fields ...string) map[string]string { + if m, ok := getNestedField(obj, fields...).(map[string]interface{}); ok { + strMap := make(map[string]string, len(m)) + for k, v := range m { + if str, ok := v.(string); ok { + strMap[k] = str + } + } + return strMap + } + return nil +} + +func setNestedField(obj map[string]interface{}, value interface{}, fields ...string) { + m := obj + if len(fields) > 1 { + for _, field := range fields[0 : len(fields)-1] { + if _, ok := m[field].(map[string]interface{}); !ok { + m[field] = make(map[string]interface{}) + } + m = m[field].(map[string]interface{}) + } + } + m[fields[len(fields)-1]] = value +} + +func setNestedSlice(obj map[string]interface{}, value []string, fields ...string) { + m := make([]interface{}, 0, len(value)) + for _, v := range value { + m = append(m, v) + } + setNestedField(obj, m, fields...) +} + +func setNestedMap(obj map[string]interface{}, value map[string]string, fields ...string) { + m := make(map[string]interface{}, len(value)) + for k, v := range value { + m[k] = v + } + setNestedField(obj, m, fields...) +} + +func (u *Unstructured) setNestedField(value interface{}, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + setNestedField(u.Object, value, fields...) +} + +func (u *Unstructured) setNestedSlice(value []string, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + setNestedSlice(u.Object, value, fields...) +} + +func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + setNestedMap(u.Object, value, fields...) +} + +func extractOwnerReference(src interface{}) metav1.OwnerReference { + v := src.(map[string]interface{}) + // though this field is a *bool, but when decoded from JSON, it's + // unmarshalled as bool. + var controllerPtr *bool + controller, ok := (getNestedField(v, "controller")).(bool) + if !ok { + controllerPtr = nil + } else { + controllerCopy := controller + controllerPtr = &controllerCopy + } + var blockOwnerDeletionPtr *bool + blockOwnerDeletion, ok := (getNestedField(v, "blockOwnerDeletion")).(bool) + if !ok { + blockOwnerDeletionPtr = nil + } else { + blockOwnerDeletionCopy := blockOwnerDeletion + blockOwnerDeletionPtr = &blockOwnerDeletionCopy + } + return metav1.OwnerReference{ + Kind: getNestedString(v, "kind"), + Name: getNestedString(v, "name"), + APIVersion: getNestedString(v, "apiVersion"), + UID: (types.UID)(getNestedString(v, "uid")), + Controller: controllerPtr, + BlockOwnerDeletion: blockOwnerDeletionPtr, + } +} + +func setOwnerReference(src metav1.OwnerReference) map[string]interface{} { + ret := make(map[string]interface{}) + controllerPtr := src.Controller + if controllerPtr != nil { + controller := *controllerPtr + controllerPtr = &controller + } + blockOwnerDeletionPtr := src.BlockOwnerDeletion + if blockOwnerDeletionPtr != nil { + blockOwnerDeletion := *blockOwnerDeletionPtr + blockOwnerDeletionPtr = &blockOwnerDeletion + } + setNestedField(ret, src.Kind, "kind") + setNestedField(ret, src.Name, "name") + setNestedField(ret, src.APIVersion, "apiVersion") + setNestedField(ret, string(src.UID), "uid") + setNestedField(ret, controllerPtr, "controller") + setNestedField(ret, blockOwnerDeletionPtr, "blockOwnerDeletion") + return ret +} + +func getOwnerReferences(object map[string]interface{}) ([]map[string]interface{}, error) { + field := getNestedField(object, "metadata", "ownerReferences") + if field == nil { + return nil, fmt.Errorf("cannot find field metadata.ownerReferences in %v", object) + } + ownerReferences, ok := field.([]map[string]interface{}) + if ok { + return ownerReferences, nil + } + // TODO: This is hacky... + interfaces, ok := field.([]interface{}) + if !ok { + return nil, fmt.Errorf("expect metadata.ownerReferences to be a slice in %#v", object) + } + ownerReferences = make([]map[string]interface{}, 0, len(interfaces)) + for i := 0; i < len(interfaces); i++ { + r, ok := interfaces[i].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("expect element metadata.ownerReferences to be a map[string]interface{} in %#v", object) + } + ownerReferences = append(ownerReferences, r) + } + return ownerReferences, nil +} + +func (u *Unstructured) GetOwnerReferences() []metav1.OwnerReference { + original, err := getOwnerReferences(u.Object) + if err != nil { + glog.V(6).Info(err) + return nil + } + ret := make([]metav1.OwnerReference, 0, len(original)) + for i := 0; i < len(original); i++ { + ret = append(ret, extractOwnerReference(original[i])) + } + return ret +} + +func (u *Unstructured) SetOwnerReferences(references []metav1.OwnerReference) { + var newReferences = make([]map[string]interface{}, 0, len(references)) + for i := 0; i < len(references); i++ { + newReferences = append(newReferences, setOwnerReference(references[i])) + } + u.setNestedField(newReferences, "metadata", "ownerReferences") +} + +func (u *Unstructured) GetAPIVersion() string { + return getNestedString(u.Object, "apiVersion") +} + +func (u *Unstructured) SetAPIVersion(version string) { + u.setNestedField(version, "apiVersion") +} + +func (u *Unstructured) GetKind() string { + return getNestedString(u.Object, "kind") +} + +func (u *Unstructured) SetKind(kind string) { + u.setNestedField(kind, "kind") +} + +func (u *Unstructured) GetNamespace() string { + return getNestedString(u.Object, "metadata", "namespace") +} + +func (u *Unstructured) SetNamespace(namespace string) { + u.setNestedField(namespace, "metadata", "namespace") +} + +func (u *Unstructured) GetName() string { + return getNestedString(u.Object, "metadata", "name") +} + +func (u *Unstructured) SetName(name string) { + u.setNestedField(name, "metadata", "name") +} + +func (u *Unstructured) GetGenerateName() string { + return getNestedString(u.Object, "metadata", "generateName") +} + +func (u *Unstructured) SetGenerateName(name string) { + u.setNestedField(name, "metadata", "generateName") +} + +func (u *Unstructured) GetUID() types.UID { + return types.UID(getNestedString(u.Object, "metadata", "uid")) +} + +func (u *Unstructured) SetUID(uid types.UID) { + u.setNestedField(string(uid), "metadata", "uid") +} + +func (u *Unstructured) GetResourceVersion() string { + return getNestedString(u.Object, "metadata", "resourceVersion") +} + +func (u *Unstructured) SetResourceVersion(version string) { + u.setNestedField(version, "metadata", "resourceVersion") +} + +func (u *Unstructured) GetSelfLink() string { + return getNestedString(u.Object, "metadata", "selfLink") +} + +func (u *Unstructured) SetSelfLink(selfLink string) { + u.setNestedField(selfLink, "metadata", "selfLink") +} + +func (u *Unstructured) GetCreationTimestamp() metav1.Time { + var timestamp metav1.Time + timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp")) + return timestamp +} + +func (u *Unstructured) SetCreationTimestamp(timestamp metav1.Time) { + ts, _ := timestamp.MarshalQueryParameter() + u.setNestedField(ts, "metadata", "creationTimestamp") +} + +func (u *Unstructured) GetDeletionTimestamp() *metav1.Time { + var timestamp metav1.Time + timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "deletionTimestamp")) + if timestamp.IsZero() { + return nil + } + return ×tamp +} + +func (u *Unstructured) SetDeletionTimestamp(timestamp *metav1.Time) { + ts, _ := timestamp.MarshalQueryParameter() + u.setNestedField(ts, "metadata", "deletionTimestamp") +} + +func (u *Unstructured) GetLabels() map[string]string { + return getNestedMap(u.Object, "metadata", "labels") +} + +func (u *Unstructured) SetLabels(labels map[string]string) { + u.setNestedMap(labels, "metadata", "labels") +} + +func (u *Unstructured) GetAnnotations() map[string]string { + return getNestedMap(u.Object, "metadata", "annotations") +} + +func (u *Unstructured) SetAnnotations(annotations map[string]string) { + u.setNestedMap(annotations, "metadata", "annotations") +} + +func (u *Unstructured) SetGroupVersionKind(gvk schema.GroupVersionKind) { + u.SetAPIVersion(gvk.GroupVersion().String()) + u.SetKind(gvk.Kind) +} + +func (u *Unstructured) GroupVersionKind() schema.GroupVersionKind { + gv, err := schema.ParseGroupVersion(u.GetAPIVersion()) + if err != nil { + return schema.GroupVersionKind{} + } + gvk := gv.WithKind(u.GetKind()) + return gvk +} + +func (u *Unstructured) GetFinalizers() []string { + return getNestedSlice(u.Object, "metadata", "finalizers") +} + +func (u *Unstructured) SetFinalizers(finalizers []string) { + u.setNestedSlice(finalizers, "metadata", "finalizers") +} + +func (u *Unstructured) GetClusterName() string { + return getNestedString(u.Object, "metadata", "clusterName") +} + +func (u *Unstructured) SetClusterName(clusterName string) { + u.setNestedField(clusterName, "metadata", "clusterName") +} + +// UnstructuredList allows lists that do not have Golang structs +// registered to be manipulated generically. This can be used to deal +// with the API lists from a plug-in. +type UnstructuredList struct { + Object map[string]interface{} + + // Items is a list of unstructured objects. + Items []*Unstructured `json:"items"` +} + +// MarshalJSON ensures that the unstructured list object produces proper +// JSON when passed to Go's standard JSON library. +func (u *UnstructuredList) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + err := UnstructuredJSONScheme.Encode(u, &buf) + return buf.Bytes(), err +} + +// UnmarshalJSON ensures that the unstructured list object properly +// decodes JSON when passed to Go's standard JSON library. +func (u *UnstructuredList) UnmarshalJSON(b []byte) error { + _, _, err := UnstructuredJSONScheme.Decode(b, nil, u) + return err +} + +func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + setNestedField(u.Object, value, fields...) +} + +func (u *UnstructuredList) GetAPIVersion() string { + return getNestedString(u.Object, "apiVersion") +} + +func (u *UnstructuredList) SetAPIVersion(version string) { + u.setNestedField(version, "apiVersion") +} + +func (u *UnstructuredList) GetKind() string { + return getNestedString(u.Object, "kind") +} + +func (u *UnstructuredList) SetKind(kind string) { + u.setNestedField(kind, "kind") +} + +func (u *UnstructuredList) GetResourceVersion() string { + return getNestedString(u.Object, "metadata", "resourceVersion") +} + +func (u *UnstructuredList) SetResourceVersion(version string) { + u.setNestedField(version, "metadata", "resourceVersion") +} + +func (u *UnstructuredList) GetSelfLink() string { + return getNestedString(u.Object, "metadata", "selfLink") +} + +func (u *UnstructuredList) SetSelfLink(selfLink string) { + u.setNestedField(selfLink, "metadata", "selfLink") +} + +func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) { + u.SetAPIVersion(gvk.GroupVersion().String()) + u.SetKind(gvk.Kind) +} + +func (u *UnstructuredList) GroupVersionKind() schema.GroupVersionKind { + gv, err := schema.ParseGroupVersion(u.GetAPIVersion()) + if err != nil { + return schema.GroupVersionKind{} + } + gvk := gv.WithKind(u.GetKind()) + return gvk +} + +// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured +// type, which can be used for generic access to objects without a predefined scheme. +// TODO: move into serializer/json. +var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{} + +type unstructuredJSONScheme struct{} + +func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + var err error + if obj != nil { + err = s.decodeInto(data, obj) + } else { + obj, err = s.decode(data) + } + + if err != nil { + return nil, nil, err + } + + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, &gvk, runtime.NewMissingKindErr(string(data)) + } + + return obj, &gvk, nil +} + +func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { + switch t := obj.(type) { + case *Unstructured: + return json.NewEncoder(w).Encode(t.Object) + case *UnstructuredList: + items := make([]map[string]interface{}, 0, len(t.Items)) + for _, i := range t.Items { + items = append(items, i.Object) + } + t.Object["items"] = items + defer func() { delete(t.Object, "items") }() + return json.NewEncoder(w).Encode(t.Object) + case *runtime.Unknown: + // TODO: Unstructured needs to deal with ContentType. + _, err := w.Write(t.Raw) + return err + default: + return json.NewEncoder(w).Encode(t) + } +} + +func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) { + type detector struct { + Items gojson.RawMessage + } + var det detector + if err := json.Unmarshal(data, &det); err != nil { + return nil, err + } + + if det.Items != nil { + list := &UnstructuredList{} + err := s.decodeToList(data, list) + return list, err + } + + // No Items field, so it wasn't a list. + unstruct := &Unstructured{} + err := s.decodeToUnstructured(data, unstruct) + return unstruct, err +} + +func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) error { + switch x := obj.(type) { + case *Unstructured: + return s.decodeToUnstructured(data, x) + case *UnstructuredList: + return s.decodeToList(data, x) + case *runtime.VersionedObjects: + o, err := s.decode(data) + if err == nil { + x.Objects = []runtime.Object{o} + } + return err + default: + return json.Unmarshal(data, x) + } +} + +func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error { + m := make(map[string]interface{}) + if err := json.Unmarshal(data, &m); err != nil { + return err + } + + unstruct.Object = m + + return nil +} + +func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error { + type decodeList struct { + Items []gojson.RawMessage + } + + var dList decodeList + if err := json.Unmarshal(data, &dList); err != nil { + return err + } + + if err := json.Unmarshal(data, &list.Object); err != nil { + return err + } + + // For typed lists, e.g., a PodList, API server doesn't set each item's + // APIVersion and Kind. We need to set it. + listAPIVersion := list.GetAPIVersion() + listKind := list.GetKind() + itemKind := strings.TrimSuffix(listKind, "List") + + delete(list.Object, "items") + list.Items = nil + for _, i := range dList.Items { + unstruct := &Unstructured{} + if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil { + return err + } + // This is hacky. Set the item's Kind and APIVersion to those inferred + // from the List. + if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 { + unstruct.SetKind(itemKind) + unstruct.SetAPIVersion(listAPIVersion) + } + list.Items = append(list.Items, unstruct) + } + return nil +} + +// UnstructuredObjectConverter is an ObjectConverter for use with +// Unstructured objects. Since it has no schema or type information, +// it will only succeed for no-op conversions. This is provided as a +// sane implementation for APIs that require an object converter. +type UnstructuredObjectConverter struct{} + +func (UnstructuredObjectConverter) Convert(in, out, context interface{}) error { + unstructIn, ok := in.(*Unstructured) + if !ok { + return fmt.Errorf("input type %T in not valid for unstructured conversion", in) + } + + unstructOut, ok := out.(*Unstructured) + if !ok { + return fmt.Errorf("output type %T in not valid for unstructured conversion", out) + } + + // maybe deep copy the map? It is documented in the + // ObjectConverter interface that this function is not + // guaranteeed to not mutate the input. Or maybe set the input + // object to nil. + unstructOut.Object = unstructIn.Object + return nil +} + +func (UnstructuredObjectConverter) ConvertToVersion(in runtime.Object, target runtime.GroupVersioner) (runtime.Object, error) { + if kind := in.GetObjectKind().GroupVersionKind(); !kind.Empty() { + gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{kind}) + if !ok { + // TODO: should this be a typed error? + return nil, fmt.Errorf("%v is unstructured and is not suitable for converting to %q", kind, target) + } + in.GetObjectKind().SetGroupVersionKind(gvk) + } + return in, nil +} + +func (UnstructuredObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + return "", "", errors.New("unstructured cannot convert field labels") +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go new file mode 100644 index 0000000000..a645501a1a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go @@ -0,0 +1,80 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" +) + +// Event represents a single event to a watched resource. +// +// +protobuf=true +type WatchEvent struct { + Type string `json:"type" protobuf:"bytes,1,opt,name=type"` + + // Object is: + // * If Type is Added or Modified: the new state of the object. + // * If Type is Deleted: the state of the object immediately before deletion. + // * If Type is Error: *Status is recommended; other types may make sense + // depending on context. + Object runtime.RawExtension `json:"object" protobuf:"bytes,2,opt,name=object"` +} + +func Convert_watch_Event_to_versioned_Event(in *watch.Event, out *WatchEvent, s conversion.Scope) error { + out.Type = string(in.Type) + switch t := in.Object.(type) { + case *runtime.Unknown: + // TODO: handle other fields on Unknown and detect type + out.Object.Raw = t.Raw + case nil: + default: + out.Object.Object = in.Object + } + return nil +} + +func Convert_versioned_InternalEvent_to_versioned_Event(in *InternalEvent, out *WatchEvent, s conversion.Scope) error { + return Convert_watch_Event_to_versioned_Event((*watch.Event)(in), out, s) +} + +func Convert_versioned_Event_to_watch_Event(in *WatchEvent, out *watch.Event, s conversion.Scope) error { + out.Type = watch.EventType(in.Type) + if in.Object.Object != nil { + out.Object = in.Object.Object + } else if in.Object.Raw != nil { + // TODO: handle other fields on Unknown and detect type + out.Object = &runtime.Unknown{ + Raw: in.Object.Raw, + ContentType: runtime.ContentTypeJSON, + } + } + return nil +} + +func Convert_versioned_Event_to_versioned_InternalEvent(in *WatchEvent, out *InternalEvent, s conversion.Scope) error { + return Convert_versioned_Event_to_watch_Event(in, (*watch.Event)(out), s) +} + +// InternalEvent makes watch.Event versioned +// +protobuf=false +type InternalEvent watch.Event + +func (e *InternalEvent) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } +func (e *WatchEvent) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/well_known_labels.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/well_known_labels.go similarity index 75% rename from vendor/k8s.io/kubernetes/pkg/api/unversioned/well_known_labels.go rename to vendor/k8s.io/apimachinery/pkg/apis/meta/v1/well_known_labels.go index 7556181e88..bd17546a4e 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/well_known_labels.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/well_known_labels.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package unversioned +package v1 const ( // If you add a new topology domain here, also consider adding it to the set of default values @@ -27,6 +27,23 @@ const ( LabelOS = "beta.kubernetes.io/os" LabelArch = "beta.kubernetes.io/arch" + + // Historically fluentd was a manifest pod the was migrated to DaemonSet. + // To avoid situation during cluster upgrade when there are two instances + // of fluentd running on a node, kubelet need to mark node on which + // fluentd in not running as a manifest pod with LabelFluentdDsReady. + LabelFluentdDsReady = "alpha.kubernetes.io/fluentd-ds-ready" + + // When feature-gate for TaintBasedEvictions=true flag is enabled, + // TaintNodeNotReady would be automatically added by node controller + // when node is not ready, and removed when node becomes ready. + TaintNodeNotReady = "node.alpha.kubernetes.io/notReady" + + // When feature-gate for TaintBasedEvictions=true flag is enabled, + // TaintNodeUnreachable would be automatically added by node controller + // when node becomes unreachable (corresponding to NodeReady status ConditionUnknown) + // and removed when node becomes reachable (NodeReady status ConditionTrue). + TaintNodeUnreachable = "node.alpha.kubernetes.io/unreachable" ) // Role labels are applied to Nodes to mark their purpose. In particular, we diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..b5f7360e26 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -0,0 +1,554 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + reflect "reflect" +) + +// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. +func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { + return []conversion.GeneratedDeepCopyFunc{ + {Fn: DeepCopy_v1_APIGroup, InType: reflect.TypeOf(&APIGroup{})}, + {Fn: DeepCopy_v1_APIGroupList, InType: reflect.TypeOf(&APIGroupList{})}, + {Fn: DeepCopy_v1_APIResource, InType: reflect.TypeOf(&APIResource{})}, + {Fn: DeepCopy_v1_APIResourceList, InType: reflect.TypeOf(&APIResourceList{})}, + {Fn: DeepCopy_v1_APIVersions, InType: reflect.TypeOf(&APIVersions{})}, + {Fn: DeepCopy_v1_DeleteOptions, InType: reflect.TypeOf(&DeleteOptions{})}, + {Fn: DeepCopy_v1_Duration, InType: reflect.TypeOf(&Duration{})}, + {Fn: DeepCopy_v1_ExportOptions, InType: reflect.TypeOf(&ExportOptions{})}, + {Fn: DeepCopy_v1_GetOptions, InType: reflect.TypeOf(&GetOptions{})}, + {Fn: DeepCopy_v1_GroupKind, InType: reflect.TypeOf(&GroupKind{})}, + {Fn: DeepCopy_v1_GroupResource, InType: reflect.TypeOf(&GroupResource{})}, + {Fn: DeepCopy_v1_GroupVersion, InType: reflect.TypeOf(&GroupVersion{})}, + {Fn: DeepCopy_v1_GroupVersionForDiscovery, InType: reflect.TypeOf(&GroupVersionForDiscovery{})}, + {Fn: DeepCopy_v1_GroupVersionKind, InType: reflect.TypeOf(&GroupVersionKind{})}, + {Fn: DeepCopy_v1_GroupVersionResource, InType: reflect.TypeOf(&GroupVersionResource{})}, + {Fn: DeepCopy_v1_InternalEvent, InType: reflect.TypeOf(&InternalEvent{})}, + {Fn: DeepCopy_v1_LabelSelector, InType: reflect.TypeOf(&LabelSelector{})}, + {Fn: DeepCopy_v1_LabelSelectorRequirement, InType: reflect.TypeOf(&LabelSelectorRequirement{})}, + {Fn: DeepCopy_v1_ListMeta, InType: reflect.TypeOf(&ListMeta{})}, + {Fn: DeepCopy_v1_ListOptions, InType: reflect.TypeOf(&ListOptions{})}, + {Fn: DeepCopy_v1_ObjectMeta, InType: reflect.TypeOf(&ObjectMeta{})}, + {Fn: DeepCopy_v1_OwnerReference, InType: reflect.TypeOf(&OwnerReference{})}, + {Fn: DeepCopy_v1_Patch, InType: reflect.TypeOf(&Patch{})}, + {Fn: DeepCopy_v1_Preconditions, InType: reflect.TypeOf(&Preconditions{})}, + {Fn: DeepCopy_v1_RootPaths, InType: reflect.TypeOf(&RootPaths{})}, + {Fn: DeepCopy_v1_ServerAddressByClientCIDR, InType: reflect.TypeOf(&ServerAddressByClientCIDR{})}, + {Fn: DeepCopy_v1_Status, InType: reflect.TypeOf(&Status{})}, + {Fn: DeepCopy_v1_StatusCause, InType: reflect.TypeOf(&StatusCause{})}, + {Fn: DeepCopy_v1_StatusDetails, InType: reflect.TypeOf(&StatusDetails{})}, + {Fn: DeepCopy_v1_Time, InType: reflect.TypeOf(&Time{})}, + {Fn: DeepCopy_v1_Timestamp, InType: reflect.TypeOf(&Timestamp{})}, + {Fn: DeepCopy_v1_TypeMeta, InType: reflect.TypeOf(&TypeMeta{})}, + {Fn: DeepCopy_v1_WatchEvent, InType: reflect.TypeOf(&WatchEvent{})}, + } +} + +func DeepCopy_v1_APIGroup(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIGroup) + out := out.(*APIGroup) + *out = *in + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]GroupVersionForDiscovery, len(*in)) + copy(*out, *in) + } + if in.ServerAddressByClientCIDRs != nil { + in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_APIGroupList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIGroupList) + out := out.(*APIGroupList) + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]APIGroup, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*APIGroup) + } + } + } + return nil + } +} + +func DeepCopy_v1_APIResource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIResource) + out := out.(*APIResource) + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make(Verbs, len(*in)) + copy(*out, *in) + } + if in.ShortNames != nil { + in, out := &in.ShortNames, &out.ShortNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_APIResourceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIResourceList) + out := out.(*APIResourceList) + *out = *in + if in.APIResources != nil { + in, out := &in.APIResources, &out.APIResources + *out = make([]APIResource, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*APIResource) + } + } + } + return nil + } +} + +func DeepCopy_v1_APIVersions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIVersions) + out := out.(*APIVersions) + *out = *in + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ServerAddressByClientCIDRs != nil { + in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_DeleteOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeleteOptions) + out := out.(*DeleteOptions) + *out = *in + if in.GracePeriodSeconds != nil { + in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Preconditions != nil { + in, out := &in.Preconditions, &out.Preconditions + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*Preconditions) + } + } + if in.OrphanDependents != nil { + in, out := &in.OrphanDependents, &out.OrphanDependents + *out = new(bool) + **out = **in + } + if in.PropagationPolicy != nil { + in, out := &in.PropagationPolicy, &out.PropagationPolicy + *out = new(DeletionPropagation) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_Duration(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Duration) + out := out.(*Duration) + *out = *in + return nil + } +} + +func DeepCopy_v1_ExportOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExportOptions) + out := out.(*ExportOptions) + *out = *in + return nil + } +} + +func DeepCopy_v1_GetOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GetOptions) + out := out.(*GetOptions) + *out = *in + return nil + } +} + +func DeepCopy_v1_GroupKind(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GroupKind) + out := out.(*GroupKind) + *out = *in + return nil + } +} + +func DeepCopy_v1_GroupResource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GroupResource) + out := out.(*GroupResource) + *out = *in + return nil + } +} + +func DeepCopy_v1_GroupVersion(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GroupVersion) + out := out.(*GroupVersion) + *out = *in + return nil + } +} + +func DeepCopy_v1_GroupVersionForDiscovery(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GroupVersionForDiscovery) + out := out.(*GroupVersionForDiscovery) + *out = *in + return nil + } +} + +func DeepCopy_v1_GroupVersionKind(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GroupVersionKind) + out := out.(*GroupVersionKind) + *out = *in + return nil + } +} + +func DeepCopy_v1_GroupVersionResource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GroupVersionResource) + out := out.(*GroupVersionResource) + *out = *in + return nil + } +} + +func DeepCopy_v1_InternalEvent(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*InternalEvent) + out := out.(*InternalEvent) + *out = *in + // in.Object is kind 'Interface' + if in.Object != nil { + if newVal, err := c.DeepCopy(&in.Object); err != nil { + return err + } else { + out.Object = *newVal.(*runtime.Object) + } + } + return nil + } +} + +func DeepCopy_v1_LabelSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LabelSelector) + out := out.(*LabelSelector) + *out = *in + if in.MatchLabels != nil { + in, out := &in.MatchLabels, &out.MatchLabels + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]LabelSelectorRequirement, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*LabelSelectorRequirement) + } + } + } + return nil + } +} + +func DeepCopy_v1_LabelSelectorRequirement(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LabelSelectorRequirement) + out := out.(*LabelSelectorRequirement) + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_ListMeta(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ListMeta) + out := out.(*ListMeta) + *out = *in + return nil + } +} + +func DeepCopy_v1_ListOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ListOptions) + out := out.(*ListOptions) + *out = *in + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_ObjectMeta(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMeta) + out := out.(*ObjectMeta) + *out = *in + out.CreationTimestamp = in.CreationTimestamp.DeepCopy() + if in.DeletionTimestamp != nil { + in, out := &in.DeletionTimestamp, &out.DeletionTimestamp + *out = new(Time) + **out = (*in).DeepCopy() + } + if in.DeletionGracePeriodSeconds != nil { + in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]OwnerReference, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*OwnerReference) + } + } + } + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_OwnerReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*OwnerReference) + out := out.(*OwnerReference) + *out = *in + if in.Controller != nil { + in, out := &in.Controller, &out.Controller + *out = new(bool) + **out = **in + } + if in.BlockOwnerDeletion != nil { + in, out := &in.BlockOwnerDeletion, &out.BlockOwnerDeletion + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_Patch(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Patch) + out := out.(*Patch) + *out = *in + return nil + } +} + +func DeepCopy_v1_Preconditions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Preconditions) + out := out.(*Preconditions) + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(types.UID) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_RootPaths(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RootPaths) + out := out.(*RootPaths) + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_ServerAddressByClientCIDR(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServerAddressByClientCIDR) + out := out.(*ServerAddressByClientCIDR) + *out = *in + return nil + } +} + +func DeepCopy_v1_Status(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Status) + out := out.(*Status) + *out = *in + if in.Details != nil { + in, out := &in.Details, &out.Details + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*StatusDetails) + } + } + return nil + } +} + +func DeepCopy_v1_StatusCause(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatusCause) + out := out.(*StatusCause) + *out = *in + return nil + } +} + +func DeepCopy_v1_StatusDetails(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatusDetails) + out := out.(*StatusDetails) + *out = *in + if in.Causes != nil { + in, out := &in.Causes, &out.Causes + *out = make([]StatusCause, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_Time(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Time) + out := out.(*Time) + *out = in.DeepCopy() + return nil + } +} + +func DeepCopy_v1_Timestamp(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Timestamp) + out := out.(*Timestamp) + *out = *in + return nil + } +} + +func DeepCopy_v1_TypeMeta(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TypeMeta) + out := out.(*TypeMeta) + *out = *in + return nil + } +} + +func DeepCopy_v1_WatchEvent(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*WatchEvent) + out := out.(*WatchEvent) + *out = *in + if newVal, err := c.DeepCopy(&in.Object); err != nil { + return err + } else { + out.Object = *newVal.(*runtime.RawExtension) + } + return nil + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go new file mode 100644 index 0000000000..6df448eb9f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/OWNERS b/vendor/k8s.io/apimachinery/pkg/conversion/OWNERS new file mode 100644 index 0000000000..c25e7faf2d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/OWNERS @@ -0,0 +1,10 @@ +approvers: +- derekwaynecarr +- lavalamp +- smarterclayton +- wojtek-t +reviewers: +- derekwaynecarr +- lavalamp +- smarterclayton +- wojtek-t diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/cloner.go b/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/conversion/cloner.go rename to vendor/k8s.io/apimachinery/pkg/conversion/cloner.go diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/conversion/converter.go rename to vendor/k8s.io/apimachinery/pkg/conversion/converter.go diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/deep_equal.go b/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go similarity index 94% rename from vendor/k8s.io/kubernetes/pkg/conversion/deep_equal.go rename to vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go index 6bfc870a37..f21abe1e53 100644 --- a/vendor/k8s.io/kubernetes/pkg/conversion/deep_equal.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go @@ -17,7 +17,7 @@ limitations under the License. package conversion import ( - "k8s.io/kubernetes/third_party/forked/golang/reflect" + "k8s.io/apimachinery/third_party/forked/golang/reflect" ) // The code for this type must be located in third_party, since it forks from diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/conversion/doc.go rename to vendor/k8s.io/apimachinery/pkg/conversion/doc.go diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/helper.go b/vendor/k8s.io/apimachinery/pkg/conversion/helper.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/conversion/helper.go rename to vendor/k8s.io/apimachinery/pkg/conversion/helper.go diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/conversion/queryparams/convert.go rename to vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/conversion/queryparams/doc.go rename to vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go diff --git a/vendor/k8s.io/kubernetes/pkg/fields/doc.go b/vendor/k8s.io/apimachinery/pkg/fields/doc.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/fields/doc.go rename to vendor/k8s.io/apimachinery/pkg/fields/doc.go diff --git a/vendor/k8s.io/kubernetes/pkg/fields/fields.go b/vendor/k8s.io/apimachinery/pkg/fields/fields.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/fields/fields.go rename to vendor/k8s.io/apimachinery/pkg/fields/fields.go diff --git a/vendor/k8s.io/kubernetes/pkg/fields/requirements.go b/vendor/k8s.io/apimachinery/pkg/fields/requirements.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/fields/requirements.go rename to vendor/k8s.io/apimachinery/pkg/fields/requirements.go index 33c6e4e1a8..70d94ded88 100644 --- a/vendor/k8s.io/kubernetes/pkg/fields/requirements.go +++ b/vendor/k8s.io/apimachinery/pkg/fields/requirements.go @@ -16,7 +16,7 @@ limitations under the License. package fields -import "k8s.io/kubernetes/pkg/selection" +import "k8s.io/apimachinery/pkg/selection" // Requirements is AND of all requirements. type Requirements []Requirement diff --git a/vendor/k8s.io/apimachinery/pkg/fields/selector.go b/vendor/k8s.io/apimachinery/pkg/fields/selector.go new file mode 100644 index 0000000000..bb156b4cba --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/fields/selector.go @@ -0,0 +1,413 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fields + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/selection" +) + +// Selector represents a field selector. +type Selector interface { + // Matches returns true if this selector matches the given set of fields. + Matches(Fields) bool + + // Empty returns true if this selector does not restrict the selection space. + Empty() bool + + // RequiresExactMatch allows a caller to introspect whether a given selector + // requires a single specific field to be set, and if so returns the value it + // requires. + RequiresExactMatch(field string) (value string, found bool) + + // Transform returns a new copy of the selector after TransformFunc has been + // applied to the entire selector, or an error if fn returns an error. + Transform(fn TransformFunc) (Selector, error) + + // Requirements converts this interface to Requirements to expose + // more detailed selection information. + Requirements() Requirements + + // String returns a human readable string that represents this selector. + String() string +} + +// Everything returns a selector that matches all fields. +func Everything() Selector { + return andTerm{} +} + +type hasTerm struct { + field, value string +} + +func (t *hasTerm) Matches(ls Fields) bool { + return ls.Get(t.field) == t.value +} + +func (t *hasTerm) Empty() bool { + return false +} + +func (t *hasTerm) RequiresExactMatch(field string) (value string, found bool) { + if t.field == field { + return t.value, true + } + return "", false +} + +func (t *hasTerm) Transform(fn TransformFunc) (Selector, error) { + field, value, err := fn(t.field, t.value) + if err != nil { + return nil, err + } + return &hasTerm{field, value}, nil +} + +func (t *hasTerm) Requirements() Requirements { + return []Requirement{{ + Field: t.field, + Operator: selection.Equals, + Value: t.value, + }} +} + +func (t *hasTerm) String() string { + return fmt.Sprintf("%v=%v", t.field, EscapeValue(t.value)) +} + +type notHasTerm struct { + field, value string +} + +func (t *notHasTerm) Matches(ls Fields) bool { + return ls.Get(t.field) != t.value +} + +func (t *notHasTerm) Empty() bool { + return false +} + +func (t *notHasTerm) RequiresExactMatch(field string) (value string, found bool) { + return "", false +} + +func (t *notHasTerm) Transform(fn TransformFunc) (Selector, error) { + field, value, err := fn(t.field, t.value) + if err != nil { + return nil, err + } + return ¬HasTerm{field, value}, nil +} + +func (t *notHasTerm) Requirements() Requirements { + return []Requirement{{ + Field: t.field, + Operator: selection.NotEquals, + Value: t.value, + }} +} + +func (t *notHasTerm) String() string { + return fmt.Sprintf("%v!=%v", t.field, EscapeValue(t.value)) +} + +type andTerm []Selector + +func (t andTerm) Matches(ls Fields) bool { + for _, q := range t { + if !q.Matches(ls) { + return false + } + } + return true +} + +func (t andTerm) Empty() bool { + if t == nil { + return true + } + if len([]Selector(t)) == 0 { + return true + } + for i := range t { + if !t[i].Empty() { + return false + } + } + return true +} + +func (t andTerm) RequiresExactMatch(field string) (string, bool) { + if t == nil || len([]Selector(t)) == 0 { + return "", false + } + for i := range t { + if value, found := t[i].RequiresExactMatch(field); found { + return value, found + } + } + return "", false +} + +func (t andTerm) Transform(fn TransformFunc) (Selector, error) { + next := make([]Selector, len([]Selector(t))) + for i, s := range []Selector(t) { + n, err := s.Transform(fn) + if err != nil { + return nil, err + } + next[i] = n + } + return andTerm(next), nil +} + +func (t andTerm) Requirements() Requirements { + reqs := make([]Requirement, 0, len(t)) + for _, s := range []Selector(t) { + rs := s.Requirements() + reqs = append(reqs, rs...) + } + return reqs +} + +func (t andTerm) String() string { + var terms []string + for _, q := range t { + terms = append(terms, q.String()) + } + return strings.Join(terms, ",") +} + +// SelectorFromSet returns a Selector which will match exactly the given Set. A +// nil Set is considered equivalent to Everything(). +func SelectorFromSet(ls Set) Selector { + if ls == nil { + return Everything() + } + items := make([]Selector, 0, len(ls)) + for field, value := range ls { + items = append(items, &hasTerm{field: field, value: value}) + } + if len(items) == 1 { + return items[0] + } + return andTerm(items) +} + +// valueEscaper prefixes \,= characters with a backslash +var valueEscaper = strings.NewReplacer( + // escape \ characters + `\`, `\\`, + // then escape , and = characters to allow unambiguous parsing of the value in a fieldSelector + `,`, `\,`, + `=`, `\=`, +) + +// Escapes an arbitrary literal string for use as a fieldSelector value +func EscapeValue(s string) string { + return valueEscaper.Replace(s) +} + +// InvalidEscapeSequence indicates an error occurred unescaping a field selector +type InvalidEscapeSequence struct { + sequence string +} + +func (i InvalidEscapeSequence) Error() string { + return fmt.Sprintf("invalid field selector: invalid escape sequence: %s", i.sequence) +} + +// UnescapedRune indicates an error occurred unescaping a field selector +type UnescapedRune struct { + r rune +} + +func (i UnescapedRune) Error() string { + return fmt.Sprintf("invalid field selector: unescaped character in value: %v", i.r) +} + +// Unescapes a fieldSelector value and returns the original literal value. +// May return the original string if it contains no escaped or special characters. +func UnescapeValue(s string) (string, error) { + // if there's no escaping or special characters, just return to avoid allocation + if !strings.ContainsAny(s, `\,=`) { + return s, nil + } + + v := bytes.NewBuffer(make([]byte, 0, len(s))) + inSlash := false + for _, c := range s { + if inSlash { + switch c { + case '\\', ',', '=': + // omit the \ for recognized escape sequences + v.WriteRune(c) + default: + // error on unrecognized escape sequences + return "", InvalidEscapeSequence{sequence: string([]rune{'\\', c})} + } + inSlash = false + continue + } + + switch c { + case '\\': + inSlash = true + case ',', '=': + // unescaped , and = characters are not allowed in field selector values + return "", UnescapedRune{r: c} + default: + v.WriteRune(c) + } + } + + // Ending with a single backslash is an invalid sequence + if inSlash { + return "", InvalidEscapeSequence{sequence: "\\"} + } + + return v.String(), nil +} + +// ParseSelectorOrDie takes a string representing a selector and returns an +// object suitable for matching, or panic when an error occur. +func ParseSelectorOrDie(s string) Selector { + selector, err := ParseSelector(s) + if err != nil { + panic(err) + } + return selector +} + +// ParseSelector takes a string representing a selector and returns an +// object suitable for matching, or an error. +func ParseSelector(selector string) (Selector, error) { + return parseSelector(selector, + func(lhs, rhs string) (newLhs, newRhs string, err error) { + return lhs, rhs, nil + }) +} + +// Parses the selector and runs them through the given TransformFunc. +func ParseAndTransformSelector(selector string, fn TransformFunc) (Selector, error) { + return parseSelector(selector, fn) +} + +// Function to transform selectors. +type TransformFunc func(field, value string) (newField, newValue string, err error) + +// splitTerms returns the comma-separated terms contained in the given fieldSelector. +// Backslash-escaped commas are treated as data instead of delimiters, and are included in the returned terms, with the leading backslash preserved. +func splitTerms(fieldSelector string) []string { + if len(fieldSelector) == 0 { + return nil + } + + terms := make([]string, 0, 1) + startIndex := 0 + inSlash := false + for i, c := range fieldSelector { + switch { + case inSlash: + inSlash = false + case c == '\\': + inSlash = true + case c == ',': + terms = append(terms, fieldSelector[startIndex:i]) + startIndex = i + 1 + } + } + + terms = append(terms, fieldSelector[startIndex:]) + + return terms +} + +const ( + notEqualOperator = "!=" + doubleEqualOperator = "==" + equalOperator = "=" +) + +// termOperators holds the recognized operators supported in fieldSelectors. +// doubleEqualOperator and equal are equivalent, but doubleEqualOperator is checked first +// to avoid leaving a leading = character on the rhs value. +var termOperators = []string{notEqualOperator, doubleEqualOperator, equalOperator} + +// splitTerm returns the lhs, operator, and rhs parsed from the given term, along with an indicator of whether the parse was successful. +// no escaping of special characters is supported in the lhs value, so the first occurance of a recognized operator is used as the split point. +// the literal rhs is returned, and the caller is responsible for applying any desired unescaping. +func splitTerm(term string) (lhs, op, rhs string, ok bool) { + for i := range term { + remaining := term[i:] + for _, op := range termOperators { + if strings.HasPrefix(remaining, op) { + return term[0:i], op, term[i+len(op):], true + } + } + } + return "", "", "", false +} + +func parseSelector(selector string, fn TransformFunc) (Selector, error) { + parts := splitTerms(selector) + sort.StringSlice(parts).Sort() + var items []Selector + for _, part := range parts { + if part == "" { + continue + } + lhs, op, rhs, ok := splitTerm(part) + if !ok { + return nil, fmt.Errorf("invalid selector: '%s'; can't understand '%s'", selector, part) + } + unescapedRHS, err := UnescapeValue(rhs) + if err != nil { + return nil, err + } + switch op { + case notEqualOperator: + items = append(items, ¬HasTerm{field: lhs, value: unescapedRHS}) + case doubleEqualOperator: + items = append(items, &hasTerm{field: lhs, value: unescapedRHS}) + case equalOperator: + items = append(items, &hasTerm{field: lhs, value: unescapedRHS}) + default: + return nil, fmt.Errorf("invalid selector: '%s'; can't understand '%s'", selector, part) + } + } + if len(items) == 1 { + return items[0].Transform(fn) + } + return andTerm(items).Transform(fn) +} + +// OneTermEqualSelector returns an object that matches objects where one field/field equals one value. +// Cannot return an error. +func OneTermEqualSelector(k, v string) Selector { + return &hasTerm{field: k, value: v} +} + +// AndSelectors creates a selector that is the logical AND of all the given selectors +func AndSelectors(selectors ...Selector) Selector { + return andTerm(selectors) +} diff --git a/vendor/k8s.io/kubernetes/pkg/labels/doc.go b/vendor/k8s.io/apimachinery/pkg/labels/doc.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/labels/doc.go rename to vendor/k8s.io/apimachinery/pkg/labels/doc.go diff --git a/vendor/k8s.io/kubernetes/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/labels/labels.go rename to vendor/k8s.io/apimachinery/pkg/labels/labels.go diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go new file mode 100644 index 0000000000..9bddc35a60 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -0,0 +1,836 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labels + +import ( + "bytes" + "fmt" + "sort" + "strconv" + "strings" + + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" +) + +// Requirements is AND of all requirements. +type Requirements []Requirement + +// Selector represents a label selector. +type Selector interface { + // Matches returns true if this selector matches the given set of labels. + Matches(Labels) bool + + // Empty returns true if this selector does not restrict the selection space. + Empty() bool + + // String returns a human readable string that represents this selector. + String() string + + // Add adds requirements to the Selector + Add(r ...Requirement) Selector + + // Requirements converts this interface into Requirements to expose + // more detailed selection information. + // If there are querying parameters, it will return converted requirements and selectable=true. + // If this selector doesn't want to select anything, it will return selectable=false. + Requirements() (requirements Requirements, selectable bool) +} + +// Everything returns a selector that matches all labels. +func Everything() Selector { + return internalSelector{} +} + +type nothingSelector struct{} + +func (n nothingSelector) Matches(_ Labels) bool { return false } +func (n nothingSelector) Empty() bool { return false } +func (n nothingSelector) String() string { return "" } +func (n nothingSelector) Add(_ ...Requirement) Selector { return n } +func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false } + +// Nothing returns a selector that matches no labels +func Nothing() Selector { + return nothingSelector{} +} + +func NewSelector() Selector { + return internalSelector(nil) +} + +type internalSelector []Requirement + +// Sort by key to obtain determisitic parser +type ByKey []Requirement + +func (a ByKey) Len() int { return len(a) } + +func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key } + +// Requirement contains values, a key, and an operator that relates the key and values. +// The zero value of Requirement is invalid. +// Requirement implements both set based match and exact match +// Requirement should be initialized via NewRequirement constructor for creating a valid Requirement. +type Requirement struct { + key string + operator selection.Operator + // In huge majority of cases we have at most one value here. + // It is generally faster to operate on a single-element slice + // than on a single-element map, so we have a slice here. + strValues []string +} + +// NewRequirement is the constructor for a Requirement. +// If any of these rules is violated, an error is returned: +// (1) The operator can only be In, NotIn, Equals, DoubleEquals, NotEquals, Exists, or DoesNotExist. +// (2) If the operator is In or NotIn, the values set must be non-empty. +// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. +// (4) If the operator is Exists or DoesNotExist, the value set must be empty. +// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. +// (6) The key is invalid due to its length, or sequence +// of characters. See validateLabelKey for more details. +// +// The empty string is a valid value in the input values set. +func NewRequirement(key string, op selection.Operator, vals []string) (*Requirement, error) { + if err := validateLabelKey(key); err != nil { + return nil, err + } + switch op { + case selection.In, selection.NotIn: + if len(vals) == 0 { + return nil, fmt.Errorf("for 'in', 'notin' operators, values set can't be empty") + } + case selection.Equals, selection.DoubleEquals, selection.NotEquals: + if len(vals) != 1 { + return nil, fmt.Errorf("exact-match compatibility requires one single value") + } + case selection.Exists, selection.DoesNotExist: + if len(vals) != 0 { + return nil, fmt.Errorf("values set must be empty for exists and does not exist") + } + case selection.GreaterThan, selection.LessThan: + if len(vals) != 1 { + return nil, fmt.Errorf("for 'Gt', 'Lt' operators, exactly one value is required") + } + for i := range vals { + if _, err := strconv.ParseInt(vals[i], 10, 64); err != nil { + return nil, fmt.Errorf("for 'Gt', 'Lt' operators, the value must be an integer") + } + } + default: + return nil, fmt.Errorf("operator '%v' is not recognized", op) + } + + for i := range vals { + if err := validateLabelValue(vals[i]); err != nil { + return nil, err + } + } + sort.Strings(vals) + return &Requirement{key: key, operator: op, strValues: vals}, nil +} + +func (r *Requirement) hasValue(value string) bool { + for i := range r.strValues { + if r.strValues[i] == value { + return true + } + } + return false +} + +// Matches returns true if the Requirement matches the input Labels. +// There is a match in the following cases: +// (1) The operator is Exists and Labels has the Requirement's key. +// (2) The operator is In, Labels has the Requirement's key and Labels' +// value for that key is in Requirement's value set. +// (3) The operator is NotIn, Labels has the Requirement's key and +// Labels' value for that key is not in Requirement's value set. +// (4) The operator is DoesNotExist or NotIn and Labels does not have the +// Requirement's key. +// (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has +// the Requirement's key and the corresponding value satisfies mathematical inequality. +func (r *Requirement) Matches(ls Labels) bool { + switch r.operator { + case selection.In, selection.Equals, selection.DoubleEquals: + if !ls.Has(r.key) { + return false + } + return r.hasValue(ls.Get(r.key)) + case selection.NotIn, selection.NotEquals: + if !ls.Has(r.key) { + return true + } + return !r.hasValue(ls.Get(r.key)) + case selection.Exists: + return ls.Has(r.key) + case selection.DoesNotExist: + return !ls.Has(r.key) + case selection.GreaterThan, selection.LessThan: + if !ls.Has(r.key) { + return false + } + lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64) + if err != nil { + glog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) + return false + } + + // There should be only one strValue in r.strValues, and can be converted to a integer. + if len(r.strValues) != 1 { + glog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) + return false + } + + var rValue int64 + for i := range r.strValues { + rValue, err = strconv.ParseInt(r.strValues[i], 10, 64) + if err != nil { + glog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) + return false + } + } + return (r.operator == selection.GreaterThan && lsValue > rValue) || (r.operator == selection.LessThan && lsValue < rValue) + default: + return false + } +} + +func (r *Requirement) Key() string { + return r.key +} +func (r *Requirement) Operator() selection.Operator { + return r.operator +} +func (r *Requirement) Values() sets.String { + ret := sets.String{} + for i := range r.strValues { + ret.Insert(r.strValues[i]) + } + return ret +} + +// Return true if the internalSelector doesn't restrict selection space +func (lsel internalSelector) Empty() bool { + if lsel == nil { + return true + } + return len(lsel) == 0 +} + +// String returns a human-readable string that represents this +// Requirement. If called on an invalid Requirement, an error is +// returned. See NewRequirement for creating a valid Requirement. +func (r *Requirement) String() string { + var buffer bytes.Buffer + if r.operator == selection.DoesNotExist { + buffer.WriteString("!") + } + buffer.WriteString(r.key) + + switch r.operator { + case selection.Equals: + buffer.WriteString("=") + case selection.DoubleEquals: + buffer.WriteString("==") + case selection.NotEquals: + buffer.WriteString("!=") + case selection.In: + buffer.WriteString(" in ") + case selection.NotIn: + buffer.WriteString(" notin ") + case selection.GreaterThan: + buffer.WriteString(">") + case selection.LessThan: + buffer.WriteString("<") + case selection.Exists, selection.DoesNotExist: + return buffer.String() + } + + switch r.operator { + case selection.In, selection.NotIn: + buffer.WriteString("(") + } + if len(r.strValues) == 1 { + buffer.WriteString(r.strValues[0]) + } else { // only > 1 since == 0 prohibited by NewRequirement + buffer.WriteString(strings.Join(r.strValues, ",")) + } + + switch r.operator { + case selection.In, selection.NotIn: + buffer.WriteString(")") + } + return buffer.String() +} + +// Add adds requirements to the selector. It copies the current selector returning a new one +func (lsel internalSelector) Add(reqs ...Requirement) Selector { + var sel internalSelector + for ix := range lsel { + sel = append(sel, lsel[ix]) + } + for _, r := range reqs { + sel = append(sel, r) + } + sort.Sort(ByKey(sel)) + return sel +} + +// Matches for a internalSelector returns true if all +// its Requirements match the input Labels. If any +// Requirement does not match, false is returned. +func (lsel internalSelector) Matches(l Labels) bool { + for ix := range lsel { + if matches := lsel[ix].Matches(l); !matches { + return false + } + } + return true +} + +func (lsel internalSelector) Requirements() (Requirements, bool) { return Requirements(lsel), true } + +// String returns a comma-separated string of all +// the internalSelector Requirements' human-readable strings. +func (lsel internalSelector) String() string { + var reqs []string + for ix := range lsel { + reqs = append(reqs, lsel[ix].String()) + } + return strings.Join(reqs, ",") +} + +// constants definition for lexer token +type Token int + +const ( + ErrorToken Token = iota + EndOfStringToken + ClosedParToken + CommaToken + DoesNotExistToken + DoubleEqualsToken + EqualsToken + GreaterThanToken + IdentifierToken // to represent keys and values + InToken + LessThanToken + NotEqualsToken + NotInToken + OpenParToken +) + +// string2token contains the mapping between lexer Token and token literal +// (except IdentifierToken, EndOfStringToken and ErrorToken since it makes no sense) +var string2token = map[string]Token{ + ")": ClosedParToken, + ",": CommaToken, + "!": DoesNotExistToken, + "==": DoubleEqualsToken, + "=": EqualsToken, + ">": GreaterThanToken, + "in": InToken, + "<": LessThanToken, + "!=": NotEqualsToken, + "notin": NotInToken, + "(": OpenParToken, +} + +// The item produced by the lexer. It contains the Token and the literal. +type ScannedItem struct { + tok Token + literal string +} + +// isWhitespace returns true if the rune is a space, tab, or newline. +func isWhitespace(ch byte) bool { + return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n' +} + +// isSpecialSymbol detect if the character ch can be an operator +func isSpecialSymbol(ch byte) bool { + switch ch { + case '=', '!', '(', ')', ',', '>', '<': + return true + } + return false +} + +// Lexer represents the Lexer struct for label selector. +// It contains necessary informationt to tokenize the input string +type Lexer struct { + // s stores the string to be tokenized + s string + // pos is the position currently tokenized + pos int +} + +// read return the character currently lexed +// increment the position and check the buffer overflow +func (l *Lexer) read() (b byte) { + b = 0 + if l.pos < len(l.s) { + b = l.s[l.pos] + l.pos++ + } + return b +} + +// unread 'undoes' the last read character +func (l *Lexer) unread() { + l.pos-- +} + +// scanIdOrKeyword scans string to recognize literal token (for example 'in') or an identifier. +func (l *Lexer) scanIdOrKeyword() (tok Token, lit string) { + var buffer []byte +IdentifierLoop: + for { + switch ch := l.read(); { + case ch == 0: + break IdentifierLoop + case isSpecialSymbol(ch) || isWhitespace(ch): + l.unread() + break IdentifierLoop + default: + buffer = append(buffer, ch) + } + } + s := string(buffer) + if val, ok := string2token[s]; ok { // is a literal token? + return val, s + } + return IdentifierToken, s // otherwise is an identifier +} + +// scanSpecialSymbol scans string starting with special symbol. +// special symbol identify non literal operators. "!=", "==", "=" +func (l *Lexer) scanSpecialSymbol() (Token, string) { + lastScannedItem := ScannedItem{} + var buffer []byte +SpecialSymbolLoop: + for { + switch ch := l.read(); { + case ch == 0: + break SpecialSymbolLoop + case isSpecialSymbol(ch): + buffer = append(buffer, ch) + if token, ok := string2token[string(buffer)]; ok { + lastScannedItem = ScannedItem{tok: token, literal: string(buffer)} + } else if lastScannedItem.tok != 0 { + l.unread() + break SpecialSymbolLoop + } + default: + l.unread() + break SpecialSymbolLoop + } + } + if lastScannedItem.tok == 0 { + return ErrorToken, fmt.Sprintf("error expected: keyword found '%s'", buffer) + } + return lastScannedItem.tok, lastScannedItem.literal +} + +// skipWhiteSpaces consumes all blank characters +// returning the first non blank character +func (l *Lexer) skipWhiteSpaces(ch byte) byte { + for { + if !isWhitespace(ch) { + return ch + } + ch = l.read() + } +} + +// Lex returns a pair of Token and the literal +// literal is meaningfull only for IdentifierToken token +func (l *Lexer) Lex() (tok Token, lit string) { + switch ch := l.skipWhiteSpaces(l.read()); { + case ch == 0: + return EndOfStringToken, "" + case isSpecialSymbol(ch): + l.unread() + return l.scanSpecialSymbol() + default: + l.unread() + return l.scanIdOrKeyword() + } +} + +// Parser data structure contains the label selector parser data structure +type Parser struct { + l *Lexer + scannedItems []ScannedItem + position int +} + +// Parser context represents context during parsing: +// some literal for example 'in' and 'notin' can be +// recognized as operator for example 'x in (a)' but +// it can be recognized as value for example 'value in (in)' +type ParserContext int + +const ( + KeyAndOperator ParserContext = iota + Values +) + +// lookahead func returns the current token and string. No increment of current position +func (p *Parser) lookahead(context ParserContext) (Token, string) { + tok, lit := p.scannedItems[p.position].tok, p.scannedItems[p.position].literal + if context == Values { + switch tok { + case InToken, NotInToken: + tok = IdentifierToken + } + } + return tok, lit +} + +// consume returns current token and string. Increments the the position +func (p *Parser) consume(context ParserContext) (Token, string) { + p.position++ + tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal + if context == Values { + switch tok { + case InToken, NotInToken: + tok = IdentifierToken + } + } + return tok, lit +} + +// scan runs through the input string and stores the ScannedItem in an array +// Parser can now lookahead and consume the tokens +func (p *Parser) scan() { + for { + token, literal := p.l.Lex() + p.scannedItems = append(p.scannedItems, ScannedItem{token, literal}) + if token == EndOfStringToken { + break + } + } +} + +// parse runs the left recursive descending algorithm +// on input string. It returns a list of Requirement objects. +func (p *Parser) parse() (internalSelector, error) { + p.scan() // init scannedItems + + var requirements internalSelector + for { + tok, lit := p.lookahead(Values) + switch tok { + case IdentifierToken, DoesNotExistToken: + r, err := p.parseRequirement() + if err != nil { + return nil, fmt.Errorf("unable to parse requirement: %v", err) + } + requirements = append(requirements, *r) + t, l := p.consume(Values) + switch t { + case EndOfStringToken: + return requirements, nil + case CommaToken: + t2, l2 := p.lookahead(Values) + if t2 != IdentifierToken && t2 != DoesNotExistToken { + return nil, fmt.Errorf("found '%s', expected: identifier after ','", l2) + } + default: + return nil, fmt.Errorf("found '%s', expected: ',' or 'end of string'", l) + } + case EndOfStringToken: + return requirements, nil + default: + return nil, fmt.Errorf("found '%s', expected: !, identifier, or 'end of string'", lit) + } + } +} + +func (p *Parser) parseRequirement() (*Requirement, error) { + key, operator, err := p.parseKeyAndInferOperator() + if err != nil { + return nil, err + } + if operator == selection.Exists || operator == selection.DoesNotExist { // operator found lookahead set checked + return NewRequirement(key, operator, []string{}) + } + operator, err = p.parseOperator() + if err != nil { + return nil, err + } + var values sets.String + switch operator { + case selection.In, selection.NotIn: + values, err = p.parseValues() + case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.GreaterThan, selection.LessThan: + values, err = p.parseExactValue() + } + if err != nil { + return nil, err + } + return NewRequirement(key, operator, values.List()) + +} + +// parseKeyAndInferOperator parse literals. +// in case of no operator '!, in, notin, ==, =, !=' are found +// the 'exists' operator is inferred +func (p *Parser) parseKeyAndInferOperator() (string, selection.Operator, error) { + var operator selection.Operator + tok, literal := p.consume(Values) + if tok == DoesNotExistToken { + operator = selection.DoesNotExist + tok, literal = p.consume(Values) + } + if tok != IdentifierToken { + err := fmt.Errorf("found '%s', expected: identifier", literal) + return "", "", err + } + if err := validateLabelKey(literal); err != nil { + return "", "", err + } + if t, _ := p.lookahead(Values); t == EndOfStringToken || t == CommaToken { + if operator != selection.DoesNotExist { + operator = selection.Exists + } + } + return literal, operator, nil +} + +// parseOperator return operator and eventually matchType +// matchType can be exact +func (p *Parser) parseOperator() (op selection.Operator, err error) { + tok, lit := p.consume(KeyAndOperator) + switch tok { + // DoesNotExistToken shouldn't be here because it's a unary operator, not a binary operator + case InToken: + op = selection.In + case EqualsToken: + op = selection.Equals + case DoubleEqualsToken: + op = selection.DoubleEquals + case GreaterThanToken: + op = selection.GreaterThan + case LessThanToken: + op = selection.LessThan + case NotInToken: + op = selection.NotIn + case NotEqualsToken: + op = selection.NotEquals + default: + return "", fmt.Errorf("found '%s', expected: '=', '!=', '==', 'in', notin'", lit) + } + return op, nil +} + +// parseValues parses the values for set based matching (x,y,z) +func (p *Parser) parseValues() (sets.String, error) { + tok, lit := p.consume(Values) + if tok != OpenParToken { + return nil, fmt.Errorf("found '%s' expected: '('", lit) + } + tok, lit = p.lookahead(Values) + switch tok { + case IdentifierToken, CommaToken: + s, err := p.parseIdentifiersList() // handles general cases + if err != nil { + return s, err + } + if tok, _ = p.consume(Values); tok != ClosedParToken { + return nil, fmt.Errorf("found '%s', expected: ')'", lit) + } + return s, nil + case ClosedParToken: // handles "()" + p.consume(Values) + return sets.NewString(""), nil + default: + return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit) + } +} + +// parseIdentifiersList parses a (possibly empty) list of +// of comma separated (possibly empty) identifiers +func (p *Parser) parseIdentifiersList() (sets.String, error) { + s := sets.NewString() + for { + tok, lit := p.consume(Values) + switch tok { + case IdentifierToken: + s.Insert(lit) + tok2, lit2 := p.lookahead(Values) + switch tok2 { + case CommaToken: + continue + case ClosedParToken: + return s, nil + default: + return nil, fmt.Errorf("found '%s', expected: ',' or ')'", lit2) + } + case CommaToken: // handled here since we can have "(," + if s.Len() == 0 { + s.Insert("") // to handle (, + } + tok2, _ := p.lookahead(Values) + if tok2 == ClosedParToken { + s.Insert("") // to handle ,) Double "" removed by StringSet + return s, nil + } + if tok2 == CommaToken { + p.consume(Values) + s.Insert("") // to handle ,, Double "" removed by StringSet + } + default: // it can be operator + return s, fmt.Errorf("found '%s', expected: ',', or identifier", lit) + } + } +} + +// parseExactValue parses the only value for exact match style +func (p *Parser) parseExactValue() (sets.String, error) { + s := sets.NewString() + tok, lit := p.lookahead(Values) + if tok == EndOfStringToken || tok == CommaToken { + s.Insert("") + return s, nil + } + tok, lit = p.consume(Values) + if tok == IdentifierToken { + s.Insert(lit) + return s, nil + } + return nil, fmt.Errorf("found '%s', expected: identifier", lit) +} + +// Parse takes a string representing a selector and returns a selector +// object, or an error. This parsing function differs from ParseSelector +// as they parse different selectors with different syntaxes. +// The input will cause an error if it does not follow this form: +// +// ::= | "," +// ::= [!] KEY [ | ] +// ::= "" | +// ::= | +// ::= "notin" +// ::= "in" +// ::= "(" ")" +// ::= VALUE | VALUE "," +// ::= ["="|"=="|"!="] VALUE +// +// KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL. Max length is 63 characters. +// VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 63 characters. +// Delimiter is white space: (' ', '\t') +// Example of valid syntax: +// "x in (foo,,baz),y,z notin ()" +// +// Note: +// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the +// VALUEs in its requirement +// (2) Exclusion - " notin " - denotes that the KEY is not equal to any +// of the VALUEs in its requirement or does not exist +// (3) The empty string is a valid VALUE +// (4) A requirement with just a KEY - as in "y" above - denotes that +// the KEY exists and can be any VALUE. +// (5) A requirement with just !KEY requires that the KEY not exist. +// +func Parse(selector string) (Selector, error) { + parsedSelector, err := parse(selector) + if err == nil { + return parsedSelector, nil + } + return nil, err +} + +// parse parses the string representation of the selector and returns the internalSelector struct. +// The callers of this method can then decide how to return the internalSelector struct to their +// callers. This function has two callers now, one returns a Selector interface and the other +// returns a list of requirements. +func parse(selector string) (internalSelector, error) { + p := &Parser{l: &Lexer{s: selector, pos: 0}} + items, err := p.parse() + if err != nil { + return nil, err + } + sort.Sort(ByKey(items)) // sort to grant determistic parsing + return internalSelector(items), err +} + +func validateLabelKey(k string) error { + if errs := validation.IsQualifiedName(k); len(errs) != 0 { + return fmt.Errorf("invalid label key %q: %s", k, strings.Join(errs, "; ")) + } + return nil +} + +func validateLabelValue(v string) error { + if errs := validation.IsValidLabelValue(v); len(errs) != 0 { + return fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; ")) + } + return nil +} + +// SelectorFromSet returns a Selector which will match exactly the given Set. A +// nil and empty Sets are considered equivalent to Everything(). +func SelectorFromSet(ls Set) Selector { + if ls == nil || len(ls) == 0 { + return internalSelector{} + } + var requirements internalSelector + for label, value := range ls { + if r, err := NewRequirement(label, selection.Equals, []string{value}); err != nil { + //TODO: double check errors when input comes from serialization? + return internalSelector{} + } else { + requirements = append(requirements, *r) + } + } + // sort to have deterministic string representation + sort.Sort(ByKey(requirements)) + return requirements +} + +// SelectorFromValidatedSet returns a Selector which will match exactly the given Set. +// A nil and empty Sets are considered equivalent to Everything(). +// It assumes that Set is already validated and doesn't do any validation. +func SelectorFromValidatedSet(ls Set) Selector { + if ls == nil || len(ls) == 0 { + return internalSelector{} + } + var requirements internalSelector + for label, value := range ls { + requirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}}) + } + // sort to have deterministic string representation + sort.Sort(ByKey(requirements)) + return requirements +} + +// ParseToRequirements takes a string representing a selector and returns a list of +// requirements. This function is suitable for those callers that perform additional +// processing on selector requirements. +// See the documentation for Parse() function for more details. +// TODO: Consider exporting the internalSelector type instead. +func ParseToRequirements(selector string) ([]Requirement, error) { + return parse(selector) +} diff --git a/vendor/k8s.io/kubernetes/pkg/genericapiserver/openapi/common/common.go b/vendor/k8s.io/apimachinery/pkg/openapi/common.go similarity index 86% rename from vendor/k8s.io/kubernetes/pkg/genericapiserver/openapi/common/common.go rename to vendor/k8s.io/apimachinery/pkg/openapi/common.go index d2a80f2e90..605776ed8f 100644 --- a/vendor/k8s.io/kubernetes/pkg/genericapiserver/openapi/common/common.go +++ b/vendor/k8s.io/apimachinery/pkg/openapi/common.go @@ -14,11 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package common +package openapi import ( "github.com/emicklei/go-restful" "github.com/go-openapi/spec" + "strings" ) // OpenAPIDefinition describes single type. Normally these definitions are auto-generated using gen-openapi. @@ -27,8 +28,10 @@ type OpenAPIDefinition struct { Dependencies []string } +type ReferenceCallback func(path string) spec.Ref + // OpenAPIDefinitions is collection of all definitions. -type OpenAPIDefinitions map[string]OpenAPIDefinition +type GetOpenAPIDefinitions func(ReferenceCallback) map[string]OpenAPIDefinition // OpenAPIDefinitionGetter gets openAPI definitions for a given type. If a type implements this interface, // the definition returned by it will be used, otherwise the auto-generated definitions will be used. See @@ -59,11 +62,18 @@ type Config struct { // OpenAPIDefinitions should provide definition for all models used by routes. Failure to provide this map // or any of the models will result in spec generation failure. - Definitions *OpenAPIDefinitions + GetDefinitions GetOpenAPIDefinitions // GetOperationIDAndTags returns operation id and tags for a restful route. It is an optional function to customize operation IDs. GetOperationIDAndTags func(servePath string, r *restful.Route) (string, []string, error) + // GetDefinitionName returns a friendly name for a definition base on the serving path. parameter `name` is the full name of the definition. + // It is an optional function to customize model names. + GetDefinitionName func(servePath string, name string) (string, spec.Extensions) + + // PostProcessSpec runs after the spec is ready to serve. It allows a final modification to the spec before serving. + PostProcessSpec func(*spec.Swagger) (*spec.Swagger, error) + // SecurityDefinitions is list of all security definitions for OpenAPI service. If this is not nil, the user of config // is responsible to provide DefaultSecurity and (maybe) add unauthorized response to CommonResponses. SecurityDefinitions *spec.SecurityDefinitions @@ -141,3 +151,10 @@ func GetOpenAPITypeFormat(typeName string) (string, string) { } return mapped[0], mapped[1] } + +func EscapeJsonPointer(p string) string { + // Escaping reference name using rfc6901 + p = strings.Replace(p, "~", "~0", -1) + p = strings.Replace(p, "/", "~1", -1) + return p +} diff --git a/vendor/k8s.io/apimachinery/pkg/openapi/doc.go b/vendor/k8s.io/apimachinery/pkg/openapi/doc.go new file mode 100644 index 0000000000..5ed572cc13 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/openapi/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package openapi holds shared codes and types between open API code generator and spec generator. +package openapi diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/OWNERS b/vendor/k8s.io/apimachinery/pkg/runtime/OWNERS new file mode 100644 index 0000000000..a49419f709 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/OWNERS @@ -0,0 +1,19 @@ +approvers: +- caesarxuchao +- deads2k +- lavalamp +- smarterclayton +reviewers: +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- derekwaynecarr +- caesarxuchao +- mikedanese +- nikhiljindal +- gmarek +- krousey +- timothysc +- piosz +- mbohlool diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go new file mode 100644 index 0000000000..d9748f0664 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go @@ -0,0 +1,316 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/url" + "reflect" + + "k8s.io/apimachinery/pkg/conversion/queryparams" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// codec binds an encoder and decoder. +type codec struct { + Encoder + Decoder +} + +// NewCodec creates a Codec from an Encoder and Decoder. +func NewCodec(e Encoder, d Decoder) Codec { + return codec{e, d} +} + +// Encode is a convenience wrapper for encoding to a []byte from an Encoder +func Encode(e Encoder, obj Object) ([]byte, error) { + // TODO: reuse buffer + buf := &bytes.Buffer{} + if err := e.Encode(obj, buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// Decode is a convenience wrapper for decoding data into an Object. +func Decode(d Decoder, data []byte) (Object, error) { + obj, _, err := d.Decode(data, nil, nil) + return obj, err +} + +// DecodeInto performs a Decode into the provided object. +func DecodeInto(d Decoder, data []byte, into Object) error { + out, gvk, err := d.Decode(data, nil, into) + if err != nil { + return err + } + if out != into { + return fmt.Errorf("unable to decode %s into %v", gvk, reflect.TypeOf(into)) + } + return nil +} + +// EncodeOrDie is a version of Encode which will panic instead of returning an error. For tests. +func EncodeOrDie(e Encoder, obj Object) string { + bytes, err := Encode(e, obj) + if err != nil { + panic(err) + } + return string(bytes) +} + +// DefaultingSerializer invokes defaulting after decoding. +type DefaultingSerializer struct { + Defaulter ObjectDefaulter + Decoder Decoder + // Encoder is optional to allow this type to be used as both a Decoder and an Encoder + Encoder +} + +// Decode performs a decode and then allows the defaulter to act on the provided object. +func (d DefaultingSerializer) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { + obj, gvk, err := d.Decoder.Decode(data, defaultGVK, into) + if err != nil { + return obj, gvk, err + } + d.Defaulter.Default(obj) + return obj, gvk, nil +} + +// UseOrCreateObject returns obj if the canonical ObjectKind returned by the provided typer matches gvk, or +// invokes the ObjectCreator to instantiate a new gvk. Returns an error if the typer cannot find the object. +func UseOrCreateObject(t ObjectTyper, c ObjectCreater, gvk schema.GroupVersionKind, obj Object) (Object, error) { + if obj != nil { + kinds, _, err := t.ObjectKinds(obj) + if err != nil { + return nil, err + } + for _, kind := range kinds { + if gvk == kind { + return obj, nil + } + } + } + return c.New(gvk) +} + +// NoopEncoder converts an Decoder to a Serializer or Codec for code that expects them but only uses decoding. +type NoopEncoder struct { + Decoder +} + +var _ Serializer = NoopEncoder{} + +func (n NoopEncoder) Encode(obj Object, w io.Writer) error { + return fmt.Errorf("encoding is not allowed for this codec: %v", reflect.TypeOf(n.Decoder)) +} + +// NoopDecoder converts an Encoder to a Serializer or Codec for code that expects them but only uses encoding. +type NoopDecoder struct { + Encoder +} + +var _ Serializer = NoopDecoder{} + +func (n NoopDecoder) Decode(data []byte, gvk *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { + return nil, nil, fmt.Errorf("decoding is not allowed for this codec: %v", reflect.TypeOf(n.Encoder)) +} + +// NewParameterCodec creates a ParameterCodec capable of transforming url values into versioned objects and back. +func NewParameterCodec(scheme *Scheme) ParameterCodec { + return ¶meterCodec{ + typer: scheme, + convertor: scheme, + creator: scheme, + } +} + +// parameterCodec implements conversion to and from query parameters and objects. +type parameterCodec struct { + typer ObjectTyper + convertor ObjectConvertor + creator ObjectCreater +} + +var _ ParameterCodec = ¶meterCodec{} + +// DecodeParameters converts the provided url.Values into an object of type From with the kind of into, and then +// converts that object to into (if necessary). Returns an error if the operation cannot be completed. +func (c *parameterCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into Object) error { + if len(parameters) == 0 { + return nil + } + targetGVKs, _, err := c.typer.ObjectKinds(into) + if err != nil { + return err + } + for i := range targetGVKs { + if targetGVKs[i].GroupVersion() == from { + return c.convertor.Convert(¶meters, into, nil) + } + } + input, err := c.creator.New(from.WithKind(targetGVKs[0].Kind)) + if err != nil { + return err + } + if err := c.convertor.Convert(¶meters, input, nil); err != nil { + return err + } + return c.convertor.Convert(input, into, nil) +} + +// EncodeParameters converts the provided object into the to version, then converts that object to url.Values. +// Returns an error if conversion is not possible. +func (c *parameterCodec) EncodeParameters(obj Object, to schema.GroupVersion) (url.Values, error) { + gvks, _, err := c.typer.ObjectKinds(obj) + if err != nil { + return nil, err + } + gvk := gvks[0] + if to != gvk.GroupVersion() { + out, err := c.convertor.ConvertToVersion(obj, to) + if err != nil { + return nil, err + } + obj = out + } + return queryparams.Convert(obj) +} + +type base64Serializer struct { + Encoder + Decoder +} + +func NewBase64Serializer(e Encoder, d Decoder) Serializer { + return &base64Serializer{e, d} +} + +func (s base64Serializer) Encode(obj Object, stream io.Writer) error { + e := base64.NewEncoder(base64.StdEncoding, stream) + err := s.Encoder.Encode(obj, e) + e.Close() + return err +} + +func (s base64Serializer) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { + out := make([]byte, base64.StdEncoding.DecodedLen(len(data))) + n, err := base64.StdEncoding.Decode(out, data) + if err != nil { + return nil, nil, err + } + return s.Decoder.Decode(out[:n], defaults, into) +} + +// SerializerInfoForMediaType returns the first info in types that has a matching media type (which cannot +// include media-type parameters), or the first info with an empty media type, or false if no type matches. +func SerializerInfoForMediaType(types []SerializerInfo, mediaType string) (SerializerInfo, bool) { + for _, info := range types { + if info.MediaType == mediaType { + return info, true + } + } + for _, info := range types { + if len(info.MediaType) == 0 { + return info, true + } + } + return SerializerInfo{}, false +} + +var ( + // InternalGroupVersioner will always prefer the internal version for a given group version kind. + InternalGroupVersioner GroupVersioner = internalGroupVersioner{} + // DisabledGroupVersioner will reject all kinds passed to it. + DisabledGroupVersioner GroupVersioner = disabledGroupVersioner{} +) + +type internalGroupVersioner struct{} + +// KindForGroupVersionKinds returns an internal Kind if one is found, or converts the first provided kind to the internal version. +func (internalGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + for _, kind := range kinds { + if kind.Version == APIVersionInternal { + return kind, true + } + } + for _, kind := range kinds { + return schema.GroupVersionKind{Group: kind.Group, Version: APIVersionInternal, Kind: kind.Kind}, true + } + return schema.GroupVersionKind{}, false +} + +type disabledGroupVersioner struct{} + +// KindForGroupVersionKinds returns false for any input. +func (disabledGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + return schema.GroupVersionKind{}, false +} + +// GroupVersioners implements GroupVersioner and resolves to the first exact match for any kind. +type GroupVersioners []GroupVersioner + +// KindForGroupVersionKinds returns the first match of any of the group versioners, or false if no match occured. +func (gvs GroupVersioners) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + for _, gv := range gvs { + target, ok := gv.KindForGroupVersionKinds(kinds) + if !ok { + continue + } + return target, true + } + return schema.GroupVersionKind{}, false +} + +// Assert that schema.GroupVersion and GroupVersions implement GroupVersioner +var _ GroupVersioner = schema.GroupVersion{} +var _ GroupVersioner = schema.GroupVersions{} +var _ GroupVersioner = multiGroupVersioner{} + +type multiGroupVersioner struct { + target schema.GroupVersion + acceptedGroupKinds []schema.GroupKind +} + +// NewMultiGroupVersioner returns the provided group version for any kind that matches one of the provided group kinds. +// Kind may be empty in the provided group kind, in which case any kind will match. +func NewMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner { + if len(groupKinds) == 0 || (len(groupKinds) == 1 && groupKinds[0].Group == gv.Group) { + return gv + } + return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds} +} + +// KindForGroupVersionKinds returns the target group version if any kind matches any of the original group kinds. It will +// use the originating kind where possible. +func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + for _, src := range kinds { + for _, kind := range v.acceptedGroupKinds { + if kind.Group != src.Group { + continue + } + if len(kind.Kind) > 0 && kind.Kind != src.Kind { + continue + } + return v.target.WithKind(src.Kind), true + } + } + return schema.GroupVersionKind{}, false +} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/codec_check.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/runtime/codec_check.go rename to vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go index b0126963d3..1d34ec1a85 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/codec_check.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go @@ -20,14 +20,14 @@ import ( "fmt" "reflect" - "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/apimachinery/pkg/runtime/schema" ) // CheckCodec makes sure that the codec can encode objects like internalType, // decode all of the external types listed, and also decode them into the given // object. (Will modify internalObject.) (Assumes JSON serialization.) // TODO: verify that the correct external version is chosen on encode... -func CheckCodec(c Codec, internalType Object, externalTypes ...unversioned.GroupVersionKind) error { +func CheckCodec(c Codec, internalType Object, externalTypes ...schema.GroupVersionKind) error { _, err := Encode(c, internalType) if err != nil { return fmt.Errorf("Internal type not encodable: %v", err) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go new file mode 100644 index 0000000000..8eedffc9c3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go @@ -0,0 +1,98 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Defines conversions between generic types and structs to map query strings +// to struct objects. +package runtime + +import ( + "reflect" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/conversion" +) + +// JSONKeyMapper uses the struct tags on a conversion to determine the key value for +// the other side. Use when mapping from a map[string]* to a struct or vice versa. +func JSONKeyMapper(key string, sourceTag, destTag reflect.StructTag) (string, string) { + if s := destTag.Get("json"); len(s) > 0 { + return strings.SplitN(s, ",", 2)[0], key + } + if s := sourceTag.Get("json"); len(s) > 0 { + return key, strings.SplitN(s, ",", 2)[0] + } + return key, key +} + +// DefaultStringConversions are helpers for converting []string and string to real values. +var DefaultStringConversions = []interface{}{ + Convert_Slice_string_To_string, + Convert_Slice_string_To_int, + Convert_Slice_string_To_bool, + Convert_Slice_string_To_int64, +} + +func Convert_Slice_string_To_string(input *[]string, out *string, s conversion.Scope) error { + if len(*input) == 0 { + *out = "" + } + *out = (*input)[0] + return nil +} + +func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope) error { + if len(*input) == 0 { + *out = 0 + } + str := (*input)[0] + i, err := strconv.Atoi(str) + if err != nil { + return err + } + *out = i + return nil +} + +// Conver_Slice_string_To_bool will convert a string parameter to boolean. +// Only the absence of a value, a value of "false", or a value of "0" resolve to false. +// Any other value (including empty string) resolves to true. +func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope) error { + if len(*input) == 0 { + *out = false + return nil + } + switch strings.ToLower((*input)[0]) { + case "false", "0": + *out = false + default: + *out = true + } + return nil +} + +func Convert_Slice_string_To_int64(input *[]string, out *int64, s conversion.Scope) error { + if len(*input) == 0 { + *out = 0 + } + str := (*input)[0] + i, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *out = i + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/runtime/doc.go rename to vendor/k8s.io/apimachinery/pkg/runtime/doc.go diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/embedded.go b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go similarity index 89% rename from vendor/k8s.io/kubernetes/pkg/runtime/embedded.go rename to vendor/k8s.io/apimachinery/pkg/runtime/embedded.go index eb1f573db6..e8825a787a 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/embedded.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go @@ -19,21 +19,21 @@ package runtime import ( "errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/conversion" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" ) type encodable struct { E Encoder `json:"-"` obj Object - versions []unversioned.GroupVersion + versions []schema.GroupVersion } -func (e encodable) GetObjectKind() unversioned.ObjectKind { return e.obj.GetObjectKind() } +func (e encodable) GetObjectKind() schema.ObjectKind { return e.obj.GetObjectKind() } // NewEncodable creates an object that will be encoded with the provided codec on demand. // Provided as a convenience for test cases dealing with internal objects. -func NewEncodable(e Encoder, obj Object, versions ...unversioned.GroupVersion) Object { +func NewEncodable(e Encoder, obj Object, versions ...schema.GroupVersion) Object { if _, ok := obj.(*Unknown); ok { return obj } @@ -52,7 +52,7 @@ func (re encodable) MarshalJSON() ([]byte, error) { // NewEncodableList creates an object that will be encoded with the provided codec on demand. // Provided as a convenience for test cases dealing with internal objects. -func NewEncodableList(e Encoder, objects []Object, versions ...unversioned.GroupVersion) []Object { +func NewEncodableList(e Encoder, objects []Object, versions ...schema.GroupVersion) []Object { out := make([]Object, len(objects)) for i := range objects { if _, ok := objects[i].(*Unknown); ok { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go new file mode 100644 index 0000000000..c9a0e1696d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go @@ -0,0 +1,102 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type notRegisteredErr struct { + gvk schema.GroupVersionKind + t reflect.Type +} + +// NewNotRegisteredErr is exposed for testing. +func NewNotRegisteredErr(gvk schema.GroupVersionKind, t reflect.Type) error { + return ¬RegisteredErr{gvk: gvk, t: t} +} + +func (k *notRegisteredErr) Error() string { + if k.t != nil { + return fmt.Sprintf("no kind is registered for the type %v", k.t) + } + if len(k.gvk.Kind) == 0 { + return fmt.Sprintf("no version %q has been registered", k.gvk.GroupVersion()) + } + if k.gvk.Version == APIVersionInternal { + return fmt.Sprintf("no kind %q is registered for the internal version of group %q", k.gvk.Kind, k.gvk.Group) + } + + return fmt.Sprintf("no kind %q is registered for version %q", k.gvk.Kind, k.gvk.GroupVersion()) +} + +// IsNotRegisteredError returns true if the error indicates the provided +// object or input data is not registered. +func IsNotRegisteredError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*notRegisteredErr) + return ok +} + +type missingKindErr struct { + data string +} + +func NewMissingKindErr(data string) error { + return &missingKindErr{data} +} + +func (k *missingKindErr) Error() string { + return fmt.Sprintf("Object 'Kind' is missing in '%s'", k.data) +} + +// IsMissingKind returns true if the error indicates that the provided object +// is missing a 'Kind' field. +func IsMissingKind(err error) bool { + if err == nil { + return false + } + _, ok := err.(*missingKindErr) + return ok +} + +type missingVersionErr struct { + data string +} + +// IsMissingVersion returns true if the error indicates that the provided object +// is missing a 'Version' field. +func NewMissingVersionErr(data string) error { + return &missingVersionErr{data} +} + +func (k *missingVersionErr) Error() string { + return fmt.Sprintf("Object 'apiVersion' is missing in '%s'", k.data) +} + +func IsMissingVersion(err error) bool { + if err == nil { + return false + } + _, ok := err.(*missingVersionErr) + return ok +} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/runtime/extension.go rename to vendor/k8s.io/apimachinery/pkg/runtime/extension.go diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go new file mode 100644 index 0000000000..9947bd8e1a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -0,0 +1,767 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +// DO NOT EDIT! + +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto + + It has these top-level messages: + RawExtension + TypeMeta + Unknown +*/ +package runtime + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *RawExtension) Reset() { *m = RawExtension{} } +func (*RawExtension) ProtoMessage() {} +func (*RawExtension) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *Unknown) Reset() { *m = Unknown{} } +func (*Unknown) ProtoMessage() {} +func (*Unknown) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func init() { + proto.RegisterType((*RawExtension)(nil), "k8s.io.apimachinery.pkg.runtime.RawExtension") + proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.runtime.TypeMeta") + proto.RegisterType((*Unknown)(nil), "k8s.io.apimachinery.pkg.runtime.Unknown") +} +func (m *RawExtension) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RawExtension) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Raw != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Raw))) + i += copy(data[i:], m.Raw) + } + return i, nil +} + +func (m *TypeMeta) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TypeMeta) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + return i, nil +} + +func (m *Unknown) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Unknown) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.TypeMeta.Size())) + n1, err := m.TypeMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.Raw != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Raw))) + i += copy(data[i:], m.Raw) + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) + i += copy(data[i:], m.ContentEncoding) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) + i += copy(data[i:], m.ContentType) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *RawExtension) Size() (n int) { + var l int + _ = l + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TypeMeta) Size() (n int) { + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Unknown) Size() (n int) { + var l int + _ = l + l = m.TypeMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ContentEncoding) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContentType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *RawExtension) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RawExtension{`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, + `}`, + }, "") + return s +} +func (this *TypeMeta) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TypeMeta{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *Unknown) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Unknown{`, + `TypeMeta:` + strings.Replace(strings.Replace(this.TypeMeta.String(), "TypeMeta", "TypeMeta", 1), `&`, ``, 1) + `,`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, + `ContentEncoding:` + fmt.Sprintf("%v", this.ContentEncoding) + `,`, + `ContentType:` + fmt.Sprintf("%v", this.ContentType) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *RawExtension) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RawExtension: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RawExtension: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], data[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypeMeta) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Unknown) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Unknown: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Unknown: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TypeMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], data[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContentEncoding", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContentEncoding = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContentType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContentType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 391 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x90, 0x4f, 0x8b, 0xd3, 0x40, + 0x18, 0xc6, 0x93, 0x6d, 0xa1, 0xeb, 0xb4, 0xb0, 0x32, 0x1e, 0x8c, 0x7b, 0x98, 0x2c, 0x3d, 0xd9, + 0x83, 0x33, 0xb0, 0x22, 0x78, 0xdd, 0x94, 0x82, 0x22, 0x82, 0x0c, 0xfe, 0x01, 0x4f, 0x4e, 0x93, + 0x31, 0x1d, 0x62, 0xdf, 0x09, 0x93, 0x89, 0xb1, 0x37, 0x3f, 0x82, 0x1f, 0xab, 0xc7, 0x1e, 0x3d, + 0x15, 0x1b, 0x3f, 0x84, 0x57, 0xe9, 0x74, 0x5a, 0x6b, 0x45, 0xf6, 0x96, 0x79, 0x9f, 0xe7, 0xf7, + 0xbc, 0xcf, 0x1b, 0xf4, 0xac, 0x78, 0x5a, 0x51, 0xa5, 0x59, 0x51, 0x4f, 0xa5, 0x01, 0x69, 0x65, + 0xc5, 0x3e, 0x4b, 0xc8, 0xb4, 0x61, 0x5e, 0x10, 0xa5, 0x9a, 0x8b, 0x74, 0xa6, 0x40, 0x9a, 0x05, + 0x2b, 0x8b, 0x9c, 0x99, 0x1a, 0xac, 0x9a, 0x4b, 0x96, 0x4b, 0x90, 0x46, 0x58, 0x99, 0xd1, 0xd2, + 0x68, 0xab, 0x71, 0xbc, 0x03, 0xe8, 0x31, 0x40, 0xcb, 0x22, 0xa7, 0x1e, 0xb8, 0x7c, 0x94, 0x2b, + 0x3b, 0xab, 0xa7, 0x34, 0xd5, 0x73, 0x96, 0xeb, 0x5c, 0x33, 0xc7, 0x4d, 0xeb, 0x8f, 0xee, 0xe5, + 0x1e, 0xee, 0x6b, 0x97, 0x77, 0xf9, 0xf8, 0x7f, 0x05, 0x6a, 0xab, 0x3e, 0x31, 0x05, 0xb6, 0xb2, + 0xe6, 0xb4, 0xc4, 0x70, 0x84, 0x06, 0x5c, 0x34, 0x93, 0x2f, 0x56, 0x42, 0xa5, 0x34, 0xe0, 0x07, + 0xa8, 0x63, 0x44, 0x13, 0x85, 0x57, 0xe1, 0xc3, 0x41, 0xd2, 0x6b, 0xd7, 0x71, 0x87, 0x8b, 0x86, + 0x6f, 0x67, 0xc3, 0x0f, 0xe8, 0xfc, 0xf5, 0xa2, 0x94, 0x2f, 0xa5, 0x15, 0xf8, 0x1a, 0x21, 0x51, + 0xaa, 0xb7, 0xd2, 0x6c, 0x21, 0xe7, 0xbe, 0x93, 0xe0, 0xe5, 0x3a, 0x0e, 0xda, 0x75, 0x8c, 0x6e, + 0x5e, 0x3d, 0xf7, 0x0a, 0x3f, 0x72, 0xe1, 0x2b, 0xd4, 0x2d, 0x14, 0x64, 0xd1, 0x99, 0x73, 0x0f, + 0xbc, 0xbb, 0xfb, 0x42, 0x41, 0xc6, 0x9d, 0x32, 0xfc, 0x15, 0xa2, 0xde, 0x1b, 0x28, 0x40, 0x37, + 0x80, 0xdf, 0xa1, 0x73, 0xeb, 0xb7, 0xb9, 0xfc, 0xfe, 0xf5, 0x88, 0xde, 0xf2, 0xc3, 0xe8, 0xbe, + 0x5e, 0x72, 0xd7, 0x87, 0x1f, 0x0a, 0xf3, 0x43, 0xd8, 0xfe, 0xc2, 0xb3, 0x7f, 0x2f, 0xc4, 0x37, + 0xe8, 0x22, 0xd5, 0x60, 0x25, 0xd8, 0x09, 0xa4, 0x3a, 0x53, 0x90, 0x47, 0x1d, 0x57, 0xf6, 0xbe, + 0xcf, 0xbb, 0x18, 0xff, 0x2d, 0xf3, 0x53, 0x3f, 0x7e, 0x82, 0xfa, 0x7e, 0xb4, 0x5d, 0x1d, 0x75, + 0x1d, 0x7e, 0xcf, 0xe3, 0xfd, 0xf1, 0x1f, 0x89, 0x1f, 0xfb, 0x92, 0xd1, 0x72, 0x43, 0x82, 0xd5, + 0x86, 0x04, 0xdf, 0x37, 0x24, 0xf8, 0xda, 0x92, 0x70, 0xd9, 0x92, 0x70, 0xd5, 0x92, 0xf0, 0x47, + 0x4b, 0xc2, 0x6f, 0x3f, 0x49, 0xf0, 0xbe, 0xe7, 0x8f, 0xfc, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x5d, + 0x24, 0xc6, 0x1a, 0x81, 0x02, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto new file mode 100644 index 0000000000..57fc840785 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.runtime; + +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "runtime"; + +// RawExtension is used to hold extensions in external versions. +// +// To use this, make a field which has RawExtension as its type in your external, versioned +// struct, and Object in your internal struct. You also need to register your +// various plugin types. +// +// // Internal package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.Object `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // External package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.RawExtension `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // On the wire, the JSON will look something like this: +// { +// "kind":"MyAPIObject", +// "apiVersion":"v1", +// "myPlugin": { +// "kind":"PluginA", +// "aOption":"foo", +// }, +// } +// +// So what happens? Decode first uses json or yaml to unmarshal the serialized data into +// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. +// The next step is to copy (using pkg/conversion) into the internal struct. The runtime +// package's DefaultScheme has conversion functions installed which will unpack the +// JSON stored in RawExtension, turning it into the correct object type, and storing it +// in the Object. (TODO: In the case where the object is of an unknown type, a +// runtime.Unknown object will be created and stored.) +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +message RawExtension { + // Raw is the underlying serialization of this object. + // + // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. + optional bytes raw = 1; +} + +// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, +// like this: +// type MyAwesomeAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// ... // other fields +// } +// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind +// +// TypeMeta is provided here for convenience. You may use it directly from this package or define +// your own with the same fields. +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +message TypeMeta { + // +optional + optional string apiVersion = 1; + + // +optional + optional string kind = 2; +} + +// Unknown allows api objects with unknown types to be passed-through. This can be used +// to deal with the API objects from a plug-in. Unknown objects still have functioning +// TypeMeta features-- kind, version, etc. +// TODO: Make this object have easy access to field based accessors and settors for +// metadata and field mutatation. +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +message Unknown { + optional TypeMeta typeMeta = 1; + + // Raw will hold the complete serialized object which couldn't be matched + // with a registered type. Most likely, nothing should be done with this + // except for passing it through the system. + optional bytes raw = 2; + + // ContentEncoding is encoding used to encode 'Raw' data. + // Unspecified means no encoding. + optional string contentEncoding = 3; + + // ContentType is serialization method used to serialize 'Raw'. + // Unspecified means ContentTypeJSON. + optional string contentType = 4; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go new file mode 100644 index 0000000000..a6c1a8d34d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go @@ -0,0 +1,212 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "io" + "reflect" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/errors" +) + +// unsafeObjectConvertor implements ObjectConvertor using the unsafe conversion path. +type unsafeObjectConvertor struct { + *Scheme +} + +var _ ObjectConvertor = unsafeObjectConvertor{} + +// ConvertToVersion converts in to the provided outVersion without copying the input first, which +// is only safe if the output object is not mutated or reused. +func (c unsafeObjectConvertor) ConvertToVersion(in Object, outVersion GroupVersioner) (Object, error) { + return c.Scheme.UnsafeConvertToVersion(in, outVersion) +} + +// UnsafeObjectConvertor performs object conversion without copying the object structure, +// for use when the converted object will not be reused or mutated. Primarily for use within +// versioned codecs, which use the external object for serialization but do not return it. +func UnsafeObjectConvertor(scheme *Scheme) ObjectConvertor { + return unsafeObjectConvertor{scheme} +} + +// SetField puts the value of src, into fieldName, which must be a member of v. +// The value of src must be assignable to the field. +func SetField(src interface{}, v reflect.Value, fieldName string) error { + field := v.FieldByName(fieldName) + if !field.IsValid() { + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + } + srcValue := reflect.ValueOf(src) + if srcValue.Type().AssignableTo(field.Type()) { + field.Set(srcValue) + return nil + } + if srcValue.Type().ConvertibleTo(field.Type()) { + field.Set(srcValue.Convert(field.Type())) + return nil + } + return fmt.Errorf("couldn't assign/convert %v to %v", srcValue.Type(), field.Type()) +} + +// Field puts the value of fieldName, which must be a member of v, into dest, +// which must be a variable to which this field's value can be assigned. +func Field(v reflect.Value, fieldName string, dest interface{}) error { + field := v.FieldByName(fieldName) + if !field.IsValid() { + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + } + destValue, err := conversion.EnforcePtr(dest) + if err != nil { + return err + } + if field.Type().AssignableTo(destValue.Type()) { + destValue.Set(field) + return nil + } + if field.Type().ConvertibleTo(destValue.Type()) { + destValue.Set(field.Convert(destValue.Type())) + return nil + } + return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), destValue.Type()) +} + +// fieldPtr puts the address of fieldName, which must be a member of v, +// into dest, which must be an address of a variable to which this field's +// address can be assigned. +func FieldPtr(v reflect.Value, fieldName string, dest interface{}) error { + field := v.FieldByName(fieldName) + if !field.IsValid() { + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + } + v, err := conversion.EnforcePtr(dest) + if err != nil { + return err + } + field = field.Addr() + if field.Type().AssignableTo(v.Type()) { + v.Set(field) + return nil + } + if field.Type().ConvertibleTo(v.Type()) { + v.Set(field.Convert(v.Type())) + return nil + } + return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), v.Type()) +} + +// EncodeList ensures that each object in an array is converted to a Unknown{} in serialized form. +// TODO: accept a content type. +func EncodeList(e Encoder, objects []Object) error { + var errs []error + for i := range objects { + data, err := Encode(e, objects[i]) + if err != nil { + errs = append(errs, err) + continue + } + // TODO: Set ContentEncoding and ContentType. + objects[i] = &Unknown{Raw: data} + } + return errors.NewAggregate(errs) +} + +func decodeListItem(obj *Unknown, decoders []Decoder) (Object, error) { + for _, decoder := range decoders { + // TODO: Decode based on ContentType. + obj, err := Decode(decoder, obj.Raw) + if err != nil { + if IsNotRegisteredError(err) { + continue + } + return nil, err + } + return obj, nil + } + // could not decode, so leave the object as Unknown, but give the decoders the + // chance to set Unknown.TypeMeta if it is available. + for _, decoder := range decoders { + if err := DecodeInto(decoder, obj.Raw, obj); err == nil { + return obj, nil + } + } + return obj, nil +} + +// DecodeList alters the list in place, attempting to decode any objects found in +// the list that have the Unknown type. Any errors that occur are returned +// after the entire list is processed. Decoders are tried in order. +func DecodeList(objects []Object, decoders ...Decoder) []error { + errs := []error(nil) + for i, obj := range objects { + switch t := obj.(type) { + case *Unknown: + decoded, err := decodeListItem(t, decoders) + if err != nil { + errs = append(errs, err) + break + } + objects[i] = decoded + } + } + return errs +} + +// MultiObjectTyper returns the types of objects across multiple schemes in order. +type MultiObjectTyper []ObjectTyper + +var _ ObjectTyper = MultiObjectTyper{} + +func (m MultiObjectTyper) ObjectKinds(obj Object) (gvks []schema.GroupVersionKind, unversionedType bool, err error) { + for _, t := range m { + gvks, unversionedType, err = t.ObjectKinds(obj) + if err == nil { + return + } + } + return +} + +func (m MultiObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool { + for _, t := range m { + if t.Recognizes(gvk) { + return true + } + } + return false +} + +// SetZeroValue would set the object of objPtr to zero value of its type. +func SetZeroValue(objPtr Object) error { + v, err := conversion.EnforcePtr(objPtr) + if err != nil { + return err + } + v.Set(reflect.Zero(v.Type())) + return nil +} + +// DefaultFramer is valid for any stream that can read objects serially without +// any separation in the stream. +var DefaultFramer = defaultFramer{} + +type defaultFramer struct{} + +func (defaultFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { return r } +func (defaultFramer) NewFrameWriter(w io.Writer) io.Writer { return w } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go new file mode 100644 index 0000000000..fcb18ba111 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -0,0 +1,251 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "io" + "net/url" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + // APIVersionInternal may be used if you are registering a type that should not + // be considered stable or serialized - it is a convention only and has no + // special behavior in this package. + APIVersionInternal = "__internal" +) + +// GroupVersioner refines a set of possible conversion targets into a single option. +type GroupVersioner interface { + // KindForGroupVersionKinds returns a desired target group version kind for the given input, or returns ok false if no + // target is known. In general, if the return target is not in the input list, the caller is expected to invoke + // Scheme.New(target) and then perform a conversion between the current Go type and the destination Go type. + // Sophisticated implementations may use additional information about the input kinds to pick a destination kind. + KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (target schema.GroupVersionKind, ok bool) +} + +// Encoders write objects to a serialized form +type Encoder interface { + // Encode writes an object to a stream. Implementations may return errors if the versions are + // incompatible, or if no conversion is defined. + Encode(obj Object, w io.Writer) error +} + +// Decoders attempt to load an object from data. +type Decoder interface { + // Decode attempts to deserialize the provided data using either the innate typing of the scheme or the + // default kind, group, and version provided. It returns a decoded object as well as the kind, group, and + // version from the serialized data, or an error. If into is non-nil, it will be used as the target type + // and implementations may choose to use it rather than reallocating an object. However, the object is not + // guaranteed to be populated. The returned object is not guaranteed to match into. If defaults are + // provided, they are applied to the data by default. If no defaults or partial defaults are provided, the + // type of the into may be used to guide conversion decisions. + Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) +} + +// Serializer is the core interface for transforming objects into a serialized format and back. +// Implementations may choose to perform conversion of the object, but no assumptions should be made. +type Serializer interface { + Encoder + Decoder +} + +// Codec is a Serializer that deals with the details of versioning objects. It offers the same +// interface as Serializer, so this is a marker to consumers that care about the version of the objects +// they receive. +type Codec Serializer + +// ParameterCodec defines methods for serializing and deserializing API objects to url.Values and +// performing any necessary conversion. Unlike the normal Codec, query parameters are not self describing +// and the desired version must be specified. +type ParameterCodec interface { + // DecodeParameters takes the given url.Values in the specified group version and decodes them + // into the provided object, or returns an error. + DecodeParameters(parameters url.Values, from schema.GroupVersion, into Object) error + // EncodeParameters encodes the provided object as query parameters or returns an error. + EncodeParameters(obj Object, to schema.GroupVersion) (url.Values, error) +} + +// Framer is a factory for creating readers and writers that obey a particular framing pattern. +type Framer interface { + NewFrameReader(r io.ReadCloser) io.ReadCloser + NewFrameWriter(w io.Writer) io.Writer +} + +// SerializerInfo contains information about a specific serialization format +type SerializerInfo struct { + // MediaType is the value that represents this serializer over the wire. + MediaType string + // EncodesAsText indicates this serializer can be encoded to UTF-8 safely. + EncodesAsText bool + // Serializer is the individual object serializer for this media type. + Serializer Serializer + // PrettySerializer, if set, can serialize this object in a form biased towards + // readability. + PrettySerializer Serializer + // StreamSerializer, if set, describes the streaming serialization format + // for this media type. + StreamSerializer *StreamSerializerInfo +} + +// StreamSerializerInfo contains information about a specific stream serialization format +type StreamSerializerInfo struct { + // EncodesAsText indicates this serializer can be encoded to UTF-8 safely. + EncodesAsText bool + // Serializer is the top level object serializer for this type when streaming + Serializer + // Framer is the factory for retrieving streams that separate objects on the wire + Framer +} + +// NegotiatedSerializer is an interface used for obtaining encoders, decoders, and serializers +// for multiple supported media types. This would commonly be accepted by a server component +// that performs HTTP content negotiation to accept multiple formats. +type NegotiatedSerializer interface { + // SupportedMediaTypes is the media types supported for reading and writing single objects. + SupportedMediaTypes() []SerializerInfo + + // EncoderForVersion returns an encoder that ensures objects being written to the provided + // serializer are in the provided group version. + EncoderForVersion(serializer Encoder, gv GroupVersioner) Encoder + // DecoderForVersion returns a decoder that ensures objects being read by the provided + // serializer are in the provided group version by default. + DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder +} + +// StorageSerializer is an interface used for obtaining encoders, decoders, and serializers +// that can read and write data at rest. This would commonly be used by client tools that must +// read files, or server side storage interfaces that persist restful objects. +type StorageSerializer interface { + // SupportedMediaTypes are the media types supported for reading and writing objects. + SupportedMediaTypes() []SerializerInfo + + // UniversalDeserializer returns a Serializer that can read objects in multiple supported formats + // by introspecting the data at rest. + UniversalDeserializer() Decoder + + // EncoderForVersion returns an encoder that ensures objects being written to the provided + // serializer are in the provided group version. + EncoderForVersion(serializer Encoder, gv GroupVersioner) Encoder + // DecoderForVersion returns a decoder that ensures objects being read by the provided + // serializer are in the provided group version by default. + DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder +} + +// NestedObjectEncoder is an optional interface that objects may implement to be given +// an opportunity to encode any nested Objects / RawExtensions during serialization. +type NestedObjectEncoder interface { + EncodeNestedObjects(e Encoder) error +} + +// NestedObjectDecoder is an optional interface that objects may implement to be given +// an opportunity to decode any nested Objects / RawExtensions during serialization. +type NestedObjectDecoder interface { + DecodeNestedObjects(d Decoder) error +} + +/////////////////////////////////////////////////////////////////////////////// +// Non-codec interfaces + +type ObjectDefaulter interface { + // Default takes an object (must be a pointer) and applies any default values. + // Defaulters may not error. + Default(in Object) +} + +type ObjectVersioner interface { + ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error) +} + +// ObjectConvertor converts an object to a different version. +type ObjectConvertor interface { + // Convert attempts to convert one object into another, or returns an error. This method does + // not guarantee the in object is not mutated. The context argument will be passed to + // all nested conversions. + Convert(in, out, context interface{}) error + // ConvertToVersion takes the provided object and converts it the provided version. This + // method does not guarantee that the in object is not mutated. This method is similar to + // Convert() but handles specific details of choosing the correct output version. + ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error) + ConvertFieldLabel(version, kind, label, value string) (string, string, error) +} + +// ObjectTyper contains methods for extracting the APIVersion and Kind +// of objects. +type ObjectTyper interface { + // ObjectKinds returns the all possible group,version,kind of the provided object, true if + // the object is unversioned, or an error if the object is not recognized + // (IsNotRegisteredError will return true). + ObjectKinds(Object) ([]schema.GroupVersionKind, bool, error) + // Recognizes returns true if the scheme is able to handle the provided version and kind, + // or more precisely that the provided version is a possible conversion or decoding + // target. + Recognizes(gvk schema.GroupVersionKind) bool +} + +// ObjectCreater contains methods for instantiating an object by kind and version. +type ObjectCreater interface { + New(kind schema.GroupVersionKind) (out Object, err error) +} + +// ObjectCopier duplicates an object. +type ObjectCopier interface { + // Copy returns an exact copy of the provided Object, or an error if the + // copy could not be completed. + Copy(Object) (Object, error) +} + +// ResourceVersioner provides methods for setting and retrieving +// the resource version from an API object. +type ResourceVersioner interface { + SetResourceVersion(obj Object, version string) error + ResourceVersion(obj Object) (string, error) +} + +// SelfLinker provides methods for setting and retrieving the SelfLink field of an API object. +type SelfLinker interface { + SetSelfLink(obj Object, selfLink string) error + SelfLink(obj Object) (string, error) + + // Knowing Name is sometimes necessary to use a SelfLinker. + Name(obj Object) (string, error) + // Knowing Namespace is sometimes necessary to use a SelfLinker + Namespace(obj Object) (string, error) +} + +// All API types registered with Scheme must support the Object interface. Since objects in a scheme are +// expected to be serialized to the wire, the interface an Object must provide to the Scheme allows +// serializers to set the kind, version, and group the object is represented as. An Object may choose +// to return a no-op ObjectKindAccessor in cases where it is not expected to be serialized. +type Object interface { + GetObjectKind() schema.ObjectKind +} + +// Unstructured objects store values as map[string]interface{}, with only values that can be serialized +// to JSON allowed. +type Unstructured interface { + // IsUnstructuredObject is a marker interface to allow objects that can be serialized but not introspected + // to bypass conversion. + IsUnstructuredObject() + // IsList returns true if this type is a list or matches the list convention - has an array called "items". + IsList() bool + // UnstructuredContent returns a non-nil, mutable map of the contents of this object. Values may be + // []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to + // and from JSON. + UnstructuredContent() map[string]interface{} +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/register.go b/vendor/k8s.io/apimachinery/pkg/runtime/register.go new file mode 100644 index 0000000000..2ec6db8201 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/register.go @@ -0,0 +1,61 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import "k8s.io/apimachinery/pkg/runtime/schema" + +// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *Unknown) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } + +// GetObjectKind implements Object for VersionedObjects, returning an empty ObjectKind +// interface if no objects are provided, or the ObjectKind interface of the object in the +// highest array position. +func (obj *VersionedObjects) GetObjectKind() schema.ObjectKind { + last := obj.Last() + if last == nil { + return schema.EmptyObjectKind + } + return last.GetObjectKind() +} + +// First returns the leftmost object in the VersionedObjects array, which is usually the +// object as serialized on the wire. +func (obj *VersionedObjects) First() Object { + if len(obj.Objects) == 0 { + return nil + } + return obj.Objects[0] +} + +// Last is the rightmost object in the VersionedObjects array, which is the object after +// all transformations have been applied. This is the same object that would be returned +// by Decode in a normal invocation (without VersionedObjects in the into argument). +func (obj *VersionedObjects) Last() Object { + if len(obj.Objects) == 0 { + return nil + } + return obj.Objects[len(obj.Objects)-1] +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go new file mode 100644 index 0000000000..dfe4c5f53c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -0,0 +1,59 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto +// DO NOT EDIT! + +/* + Package schema is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto + + It has these top-level messages: +*/ +package schema + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +var fileDescriptorGenerated = []byte{ + // 199 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0xce, 0x2f, 0x4e, 0x05, 0x31, + 0x10, 0xc7, 0xf1, 0xd6, 0x20, 0x90, 0xc8, 0x27, 0x46, 0x12, 0x0c, 0x1d, 0x81, 0x41, 0x73, 0x01, + 0x3c, 0xae, 0xbb, 0x6f, 0xe8, 0x36, 0xa5, 0x7f, 0xd2, 0x4e, 0x49, 0x70, 0x1c, 0x81, 0x63, 0xad, + 0x5c, 0x89, 0x64, 0xcb, 0x45, 0x48, 0xda, 0x15, 0x84, 0x04, 0xd7, 0x5f, 0x9a, 0xcf, 0xe4, 0x7b, + 0xf9, 0xe8, 0xee, 0x8b, 0xb2, 0x11, 0x5d, 0x9d, 0x28, 0x07, 0x62, 0x2a, 0xf8, 0x4a, 0xe1, 0x1c, + 0x33, 0x1e, 0x1f, 0x3a, 0x59, 0xaf, 0xe7, 0xc5, 0x06, 0xca, 0x6f, 0x98, 0x9c, 0xc1, 0x5c, 0x03, + 0x5b, 0x4f, 0x58, 0xe6, 0x85, 0xbc, 0x46, 0x43, 0x81, 0xb2, 0x66, 0x3a, 0xab, 0x94, 0x23, 0xc7, + 0xab, 0xeb, 0xe1, 0xd4, 0x6f, 0xa7, 0x92, 0x33, 0xea, 0x70, 0x6a, 0xb8, 0xd3, 0xad, 0xb1, 0xbc, + 0xd4, 0x49, 0xcd, 0xd1, 0xa3, 0x89, 0x26, 0x62, 0xe7, 0x53, 0x7d, 0xee, 0xab, 0x8f, 0xfe, 0x1a, + 0x67, 0x4f, 0x77, 0xff, 0xe5, 0x54, 0xb6, 0x2f, 0x68, 0x03, 0x17, 0xce, 0x7f, 0x5b, 0x1e, 0x6e, + 0xd6, 0x1d, 0xc4, 0xb6, 0x83, 0xf8, 0xdc, 0x41, 0xbc, 0x37, 0x90, 0x6b, 0x03, 0xb9, 0x35, 0x90, + 0x5f, 0x0d, 0xe4, 0xc7, 0x37, 0x88, 0xa7, 0x8b, 0x51, 0xf3, 0x13, 0x00, 0x00, 0xff, 0xff, 0xd9, + 0x82, 0x09, 0xbe, 0x07, 0x01, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto new file mode 100644 index 0000000000..ebc1a263d2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto @@ -0,0 +1,28 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.runtime.schema; + +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "schema"; + diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go new file mode 100644 index 0000000000..1a9bba1060 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -0,0 +1,277 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "fmt" + "strings" +) + +// ParseResourceArg takes the common style of string which may be either `resource.group.com` or `resource.version.group.com` +// and parses it out into both possibilities. This code takes no responsibility for knowing which representation was intended +// but with a knowledge of all GroupVersions, calling code can take a very good guess. If there are only two segments, then +// `*GroupVersionResource` is nil. +// `resource.group.com` -> `group=com, version=group, resource=resource` and `group=group.com, resource=resource` +func ParseResourceArg(arg string) (*GroupVersionResource, GroupResource) { + var gvr *GroupVersionResource + if strings.Count(arg, ".") >= 2 { + s := strings.SplitN(arg, ".", 3) + gvr = &GroupVersionResource{Group: s[2], Version: s[1], Resource: s[0]} + } + + return gvr, ParseGroupResource(arg) +} + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +type GroupResource struct { + Group string + Resource string +} + +func (gr GroupResource) WithVersion(version string) GroupVersionResource { + return GroupVersionResource{Group: gr.Group, Version: version, Resource: gr.Resource} +} + +func (gr GroupResource) Empty() bool { + return len(gr.Group) == 0 && len(gr.Resource) == 0 +} + +func (gr *GroupResource) String() string { + if len(gr.Group) == 0 { + return gr.Resource + } + return gr.Resource + "." + gr.Group +} + +// ParseGroupResource turns "resource.group" string into a GroupResource struct. Empty strings are allowed +// for each field. +func ParseGroupResource(gr string) GroupResource { + if i := strings.Index(gr, "."); i == -1 { + return GroupResource{Resource: gr} + } else { + return GroupResource{Group: gr[i+1:], Resource: gr[:i]} + } +} + +// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion +// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling +type GroupVersionResource struct { + Group string + Version string + Resource string +} + +func (gvr GroupVersionResource) Empty() bool { + return len(gvr.Group) == 0 && len(gvr.Version) == 0 && len(gvr.Resource) == 0 +} + +func (gvr GroupVersionResource) GroupResource() GroupResource { + return GroupResource{Group: gvr.Group, Resource: gvr.Resource} +} + +func (gvr GroupVersionResource) GroupVersion() GroupVersion { + return GroupVersion{Group: gvr.Group, Version: gvr.Version} +} + +func (gvr *GroupVersionResource) String() string { + return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") +} + +// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +type GroupKind struct { + Group string + Kind string +} + +func (gk GroupKind) Empty() bool { + return len(gk.Group) == 0 && len(gk.Kind) == 0 +} + +func (gk GroupKind) WithVersion(version string) GroupVersionKind { + return GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind} +} + +func (gk *GroupKind) String() string { + if len(gk.Group) == 0 { + return gk.Kind + } + return gk.Kind + "." + gk.Group +} + +// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion +// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling +type GroupVersionKind struct { + Group string + Version string + Kind string +} + +// Empty returns true if group, version, and kind are empty +func (gvk GroupVersionKind) Empty() bool { + return len(gvk.Group) == 0 && len(gvk.Version) == 0 && len(gvk.Kind) == 0 +} + +func (gvk GroupVersionKind) GroupKind() GroupKind { + return GroupKind{Group: gvk.Group, Kind: gvk.Kind} +} + +func (gvk GroupVersionKind) GroupVersion() GroupVersion { + return GroupVersion{Group: gvk.Group, Version: gvk.Version} +} + +func (gvk GroupVersionKind) String() string { + return gvk.Group + "/" + gvk.Version + ", Kind=" + gvk.Kind +} + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +type GroupVersion struct { + Group string + Version string +} + +// Empty returns true if group and version are empty +func (gv GroupVersion) Empty() bool { + return len(gv.Group) == 0 && len(gv.Version) == 0 +} + +// String puts "group" and "version" into a single "group/version" string. For the legacy v1 +// it returns "v1". +func (gv GroupVersion) String() string { + // special case the internal apiVersion for the legacy kube types + if gv.Empty() { + return "" + } + + // special case of "v1" for backward compatibility + if len(gv.Group) == 0 && gv.Version == "v1" { + return gv.Version + } + if len(gv.Group) > 0 { + return gv.Group + "/" + gv.Version + } + return gv.Version +} + +// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false +// if none of the options match the group. It prefers a match to group and version over just group. +// TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme. +// TODO: Introduce an adapter type between GroupVersion and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) +// in fewer places. +func (gv GroupVersion) KindForGroupVersionKinds(kinds []GroupVersionKind) (target GroupVersionKind, ok bool) { + for _, gvk := range kinds { + if gvk.Group == gv.Group && gvk.Version == gv.Version { + return gvk, true + } + } + for _, gvk := range kinds { + if gvk.Group == gv.Group { + return gv.WithKind(gvk.Kind), true + } + } + return GroupVersionKind{}, false +} + +// ParseGroupVersion turns "group/version" string into a GroupVersion struct. It reports error +// if it cannot parse the string. +func ParseGroupVersion(gv string) (GroupVersion, error) { + // this can be the internal version for the legacy kube types + // TODO once we've cleared the last uses as strings, this special case should be removed. + if (len(gv) == 0) || (gv == "/") { + return GroupVersion{}, nil + } + + switch strings.Count(gv, "/") { + case 0: + return GroupVersion{"", gv}, nil + case 1: + i := strings.Index(gv, "/") + return GroupVersion{gv[:i], gv[i+1:]}, nil + default: + return GroupVersion{}, fmt.Errorf("unexpected GroupVersion string: %v", gv) + } +} + +// WithKind creates a GroupVersionKind based on the method receiver's GroupVersion and the passed Kind. +func (gv GroupVersion) WithKind(kind string) GroupVersionKind { + return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind} +} + +// WithResource creates a GroupVersionResource based on the method receiver's GroupVersion and the passed Resource. +func (gv GroupVersion) WithResource(resource string) GroupVersionResource { + return GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: resource} +} + +// GroupVersions can be used to represent a set of desired group versions. +// TODO: Move GroupVersions to a package under pkg/runtime, since it's used by scheme. +// TODO: Introduce an adapter type between GroupVersions and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) +// in fewer places. +type GroupVersions []GroupVersion + +// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false +// if none of the options match the group. +func (gvs GroupVersions) KindForGroupVersionKinds(kinds []GroupVersionKind) (GroupVersionKind, bool) { + var targets []GroupVersionKind + for _, gv := range gvs { + target, ok := gv.KindForGroupVersionKinds(kinds) + if !ok { + continue + } + targets = append(targets, target) + } + if len(targets) == 1 { + return targets[0], true + } + if len(targets) > 1 { + return bestMatch(kinds, targets), true + } + return GroupVersionKind{}, false +} + +// bestMatch tries to pick best matching GroupVersionKind and falls back to the first +// found if no exact match exists. +func bestMatch(kinds []GroupVersionKind, targets []GroupVersionKind) GroupVersionKind { + for _, gvk := range targets { + for _, k := range kinds { + if k == gvk { + return k + } + } + } + return targets[0] +} + +// ToAPIVersionAndKind is a convenience method for satisfying runtime.Object on types that +// do not use TypeMeta. +func (gvk *GroupVersionKind) ToAPIVersionAndKind() (string, string) { + if gvk == nil { + return "", "" + } + return gvk.GroupVersion().String(), gvk.Kind +} + +// FromAPIVersionAndKind returns a GVK representing the provided fields for types that +// do not use TypeMeta. This method exists to support test types and legacy serializations +// that have a distinct group and kind. +// TODO: further reduce usage of this method. +func FromAPIVersionAndKind(apiVersion, kind string) GroupVersionKind { + if gv, err := ParseGroupVersion(apiVersion); err == nil { + return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind} + } + return GroupVersionKind{Kind: kind} +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go new file mode 100644 index 0000000000..b570668458 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go @@ -0,0 +1,40 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +// All objects that are serialized from a Scheme encode their type information. This interface is used +// by serialization to set type information from the Scheme onto the serialized version of an object. +// For objects that cannot be serialized or have unique requirements, this interface may be a no-op. +type ObjectKind interface { + // SetGroupVersionKind sets or clears the intended serialized kind of an object. Passing kind nil + // should clear the current setting. + SetGroupVersionKind(kind GroupVersionKind) + // GroupVersionKind returns the stored group, version, and kind of an object, or nil if the object does + // not expose or provide these fields. + GroupVersionKind() GroupVersionKind +} + +// EmptyObjectKind implements the ObjectKind interface as a noop +var EmptyObjectKind = emptyObjectKind{} + +type emptyObjectKind struct{} + +// SetGroupVersionKind implements the ObjectKind interface +func (emptyObjectKind) SetGroupVersionKind(gvk GroupVersionKind) {} + +// GroupVersionKind implements the ObjectKind interface +func (emptyObjectKind) GroupVersionKind() GroupVersionKind { return GroupVersionKind{} } diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go similarity index 90% rename from vendor/k8s.io/kubernetes/pkg/runtime/scheme.go rename to vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index 0b42feae42..fbec6ad9bc 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -21,8 +21,8 @@ import ( "net/url" "reflect" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/conversion" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" ) // Scheme defines methods for serializing and deserializing API objects, a type @@ -43,14 +43,14 @@ import ( type Scheme struct { // versionMap allows one to figure out the go type of an object with // the given version and name. - gvkToType map[unversioned.GroupVersionKind]reflect.Type + gvkToType map[schema.GroupVersionKind]reflect.Type // typeToGroupVersion allows one to find metadata for a given go object. // The reflect.Type we index by should *not* be a pointer. - typeToGVK map[reflect.Type][]unversioned.GroupVersionKind + typeToGVK map[reflect.Type][]schema.GroupVersionKind // unversionedTypes are transformed without conversion in ConvertToVersion. - unversionedTypes map[reflect.Type]unversioned.GroupVersionKind + unversionedTypes map[reflect.Type]schema.GroupVersionKind // unversionedKinds are the names of kinds that can be created in the context of any group // or version @@ -80,9 +80,9 @@ type FieldLabelConversionFunc func(label, value string) (internalLabel, internal // NewScheme creates a new Scheme. This scheme is pluggable by default. func NewScheme() *Scheme { s := &Scheme{ - gvkToType: map[unversioned.GroupVersionKind]reflect.Type{}, - typeToGVK: map[reflect.Type][]unversioned.GroupVersionKind{}, - unversionedTypes: map[reflect.Type]unversioned.GroupVersionKind{}, + gvkToType: map[schema.GroupVersionKind]reflect.Type{}, + typeToGVK: map[reflect.Type][]schema.GroupVersionKind{}, + unversionedTypes: map[reflect.Type]schema.GroupVersionKind{}, unversionedKinds: map[string]reflect.Type{}, cloner: conversion.NewCloner(), fieldLabelConversionFuncs: map[string]map[string]FieldLabelConversionFunc{}, @@ -145,7 +145,7 @@ func (s *Scheme) Converter() *conversion.Converter { // // TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into // every version with particular schemas. Resolve this method at that point. -func (s *Scheme) AddUnversionedTypes(version unversioned.GroupVersion, types ...Object) { +func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Object) { s.AddKnownTypes(version, types...) for _, obj := range types { t := reflect.TypeOf(obj).Elem() @@ -162,23 +162,14 @@ func (s *Scheme) AddUnversionedTypes(version unversioned.GroupVersion, types ... // All objects passed to types should be pointers to structs. The name that go reports for // the struct becomes the "kind" field when encoding. Version may not be empty - use the // APIVersionInternal constant if you have a type that does not have a formal version. -func (s *Scheme) AddKnownTypes(gv unversioned.GroupVersion, types ...Object) { - if len(gv.Version) == 0 { - panic(fmt.Sprintf("version is required on all types: %s %v", gv, types[0])) - } +func (s *Scheme) AddKnownTypes(gv schema.GroupVersion, types ...Object) { for _, obj := range types { t := reflect.TypeOf(obj) if t.Kind() != reflect.Ptr { panic("All types must be pointers to structs.") } t = t.Elem() - if t.Kind() != reflect.Struct { - panic("All types must be pointers to structs.") - } - - gvk := gv.WithKind(t.Name()) - s.gvkToType[gvk] = t - s.typeToGVK[t] = append(s.typeToGVK[t], gvk) + s.AddKnownTypeWithName(gv.WithKind(t.Name()), obj) } } @@ -186,7 +177,7 @@ func (s *Scheme) AddKnownTypes(gv unversioned.GroupVersion, types ...Object) { // be encoded as. Useful for testing when you don't want to make multiple packages to define // your structs. Version may not be empty - use the APIVersionInternal constant if you have a // type that does not have a formal version. -func (s *Scheme) AddKnownTypeWithName(gvk unversioned.GroupVersionKind, obj Object) { +func (s *Scheme) AddKnownTypeWithName(gvk schema.GroupVersionKind, obj Object) { t := reflect.TypeOf(obj) if len(gvk.Version) == 0 { panic(fmt.Sprintf("version is required on all types: %s %v", gvk, t)) @@ -199,12 +190,22 @@ func (s *Scheme) AddKnownTypeWithName(gvk unversioned.GroupVersionKind, obj Obje panic("All types must be pointers to structs.") } + if oldT, found := s.gvkToType[gvk]; found && oldT != t { + panic(fmt.Sprintf("Double registration of different types for %v: old=%v.%v, new=%v.%v", gvk, oldT.PkgPath(), oldT.Name(), t.PkgPath(), t.Name())) + } + s.gvkToType[gvk] = t + + for _, existingGvk := range s.typeToGVK[t] { + if existingGvk == gvk { + return + } + } s.typeToGVK[t] = append(s.typeToGVK[t], gvk) } // KnownTypes returns the types known for the given version. -func (s *Scheme) KnownTypes(gv unversioned.GroupVersion) map[string]reflect.Type { +func (s *Scheme) KnownTypes(gv schema.GroupVersion) map[string]reflect.Type { types := make(map[string]reflect.Type) for gvk, t := range s.gvkToType { if gv != gvk.GroupVersion() { @@ -217,23 +218,23 @@ func (s *Scheme) KnownTypes(gv unversioned.GroupVersion) map[string]reflect.Type } // AllKnownTypes returns the all known types. -func (s *Scheme) AllKnownTypes() map[unversioned.GroupVersionKind]reflect.Type { +func (s *Scheme) AllKnownTypes() map[schema.GroupVersionKind]reflect.Type { return s.gvkToType } // ObjectKind returns the group,version,kind of the go object and true if this object // is considered unversioned, or an error if it's not a pointer or is unregistered. -func (s *Scheme) ObjectKind(obj Object) (unversioned.GroupVersionKind, bool, error) { +func (s *Scheme) ObjectKind(obj Object) (schema.GroupVersionKind, bool, error) { gvks, unversionedType, err := s.ObjectKinds(obj) if err != nil { - return unversioned.GroupVersionKind{}, false, err + return schema.GroupVersionKind{}, false, err } return gvks[0], unversionedType, nil } // ObjectKinds returns all possible group,version,kind of the go object, true if the // object is considered unversioned, or an error if it's not a pointer or is unregistered. -func (s *Scheme) ObjectKinds(obj Object) ([]unversioned.GroupVersionKind, bool, error) { +func (s *Scheme) ObjectKinds(obj Object) ([]schema.GroupVersionKind, bool, error) { v, err := conversion.EnforcePtr(obj) if err != nil { return nil, false, err @@ -242,7 +243,7 @@ func (s *Scheme) ObjectKinds(obj Object) ([]unversioned.GroupVersionKind, bool, gvks, ok := s.typeToGVK[t] if !ok { - return nil, false, NewNotRegisteredErr(unversioned.GroupVersionKind{}, t) + return nil, false, NewNotRegisteredErr(schema.GroupVersionKind{}, t) } _, unversionedType := s.unversionedTypes[t] @@ -251,7 +252,7 @@ func (s *Scheme) ObjectKinds(obj Object) ([]unversioned.GroupVersionKind, bool, // Recognizes returns true if the scheme is able to handle the provided group,version,kind // of an object. -func (s *Scheme) Recognizes(gvk unversioned.GroupVersionKind) bool { +func (s *Scheme) Recognizes(gvk schema.GroupVersionKind) bool { _, exists := s.gvkToType[gvk] return exists } @@ -272,7 +273,7 @@ func (s *Scheme) IsUnversioned(obj Object) (bool, bool) { // New returns a new API object of the given version and name, or an error if it hasn't // been registered. The version and kind fields must be specified. -func (s *Scheme) New(kind unversioned.GroupVersionKind) (Object, error) { +func (s *Scheme) New(kind schema.GroupVersionKind) (Object, error) { if t, exists := s.gvkToType[kind]; exists { return reflect.New(t).Interface().(Object), nil } @@ -514,7 +515,7 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( } kinds, ok := s.typeToGVK[t] if !ok || len(kinds) == 0 { - return nil, NewNotRegisteredErr(unversioned.GroupVersionKind{}, t) + return nil, NewNotRegisteredErr(schema.GroupVersionKind{}, t) } gvk, ok := target.KindForGroupVersionKinds(kinds) @@ -522,7 +523,7 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( // try to see if this type is listed as unversioned (for legacy support) // TODO: when we move to server API versions, we should completely remove the unversioned concept if unversionedKind, ok := s.unversionedTypes[t]; ok { - if gvk, ok := target.KindForGroupVersionKinds([]unversioned.GroupVersionKind{unversionedKind}); ok { + if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok { return copyAndSetTargetKind(copy, s, in, gvk) } return copyAndSetTargetKind(copy, s, in, unversionedKind) @@ -541,7 +542,7 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( // type is unversioned, no conversion necessary if unversionedKind, ok := s.unversionedTypes[t]; ok { - if gvk, ok := target.KindForGroupVersionKinds([]unversioned.GroupVersionKind{unversionedKind}); ok { + if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok { return copyAndSetTargetKind(copy, s, in, gvk) } return copyAndSetTargetKind(copy, s, in, unversionedKind) @@ -576,7 +577,7 @@ func (s *Scheme) generateConvertMeta(in interface{}) (conversion.FieldMatchingFl } // copyAndSetTargetKind performs a conditional copy before returning the object, or an error if copy was not successful. -func copyAndSetTargetKind(copy bool, copier ObjectCopier, obj Object, kind unversioned.GroupVersionKind) (Object, error) { +func copyAndSetTargetKind(copy bool, copier ObjectCopier, obj Object, kind schema.GroupVersionKind) (Object, error) { if copy { copied, err := copier.Copy(obj) if err != nil { @@ -589,11 +590,11 @@ func copyAndSetTargetKind(copy bool, copier ObjectCopier, obj Object, kind unver } // setTargetKind sets the kind on an object, taking into account whether the target kind is the internal version. -func setTargetKind(obj Object, kind unversioned.GroupVersionKind) { +func setTargetKind(obj Object, kind schema.GroupVersionKind) { if kind.Version == APIVersionInternal { // internal is a special case // TODO: look at removing the need to special case this - obj.GetObjectKind().SetGroupVersionKind(unversioned.GroupVersionKind{}) + obj.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{}) return } obj.GetObjectKind().SetGroupVersionKind(kind) diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/scheme_builder.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/runtime/scheme_builder.go rename to vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go similarity index 93% rename from vendor/k8s.io/kubernetes/pkg/runtime/serializer/codec_factory.go rename to vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index d58c6cce07..65f451124d 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -17,11 +17,11 @@ limitations under the License. package serializer import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer/json" - "k8s.io/kubernetes/pkg/runtime/serializer/recognizer" - "k8s.io/kubernetes/pkg/runtime/serializer/versioning" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" + "k8s.io/apimachinery/pkg/runtime/serializer/versioning" ) // serializerExtensions are for serializers that are conditionally compiled in @@ -162,8 +162,8 @@ func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo { // // TODO: make this call exist only in pkg/api, and initialize it with the set of default versions. // All other callers will be forced to request a Codec directly. -func (f CodecFactory) LegacyCodec(version ...unversioned.GroupVersion) runtime.Codec { - return versioning.NewDefaultingCodecForScheme(f.scheme, f.legacySerializer, f.universal, unversioned.GroupVersions(version), runtime.InternalGroupVersioner) +func (f CodecFactory) LegacyCodec(version ...schema.GroupVersion) runtime.Codec { + return versioning.NewDefaultingCodecForScheme(f.scheme, f.legacySerializer, f.universal, schema.GroupVersions(version), runtime.InternalGroupVersioner) } // UniversalDeserializer can convert any stored data recognized by this factory into a Go object that satisfies @@ -181,12 +181,12 @@ func (f CodecFactory) UniversalDeserializer() runtime.Decoder { // // TODO: the decoder will eventually be removed in favor of dealing with objects in their versioned form // TODO: only accept a group versioner -func (f CodecFactory) UniversalDecoder(versions ...unversioned.GroupVersion) runtime.Decoder { +func (f CodecFactory) UniversalDecoder(versions ...schema.GroupVersion) runtime.Decoder { var versioner runtime.GroupVersioner if len(versions) == 0 { versioner = runtime.InternalGroupVersioner } else { - versioner = unversioned.GroupVersions(versions) + versioner = schema.GroupVersions(versions) } return f.CodecForVersions(nil, f.universal, nil, versioner) } @@ -220,9 +220,10 @@ type DirectCodecFactory struct { CodecFactory } -// EncoderForVersion returns an encoder that does not do conversion. gv is ignored. -func (f DirectCodecFactory) EncoderForVersion(serializer runtime.Encoder, _ runtime.GroupVersioner) runtime.Encoder { +// EncoderForVersion returns an encoder that does not do conversion. +func (f DirectCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder { return versioning.DirectEncoder{ + Version: version, Encoder: serializer, ObjectTyper: f.CodecFactory.scheme, } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go new file mode 100644 index 0000000000..28bb91b533 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -0,0 +1,245 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "encoding/json" + "io" + + "github.com/ghodss/yaml" + "github.com/ugorji/go/codec" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" + "k8s.io/apimachinery/pkg/util/framer" + utilyaml "k8s.io/apimachinery/pkg/util/yaml" +) + +// NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer +// is not nil, the object has the group, version, and kind fields set. +func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer { + return &Serializer{ + meta: meta, + creater: creater, + typer: typer, + yaml: false, + pretty: pretty, + } +} + +// NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer +// is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that +// matches JSON, and will error if constructs are used that do not serialize to JSON. +func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { + return &Serializer{ + meta: meta, + creater: creater, + typer: typer, + yaml: true, + } +} + +type Serializer struct { + meta MetaFactory + creater runtime.ObjectCreater + typer runtime.ObjectTyper + yaml bool + pretty bool +} + +// Serializer implements Serializer +var _ runtime.Serializer = &Serializer{} +var _ recognizer.RecognizingDecoder = &Serializer{} + +// Decode attempts to convert the provided data into YAML or JSON, extract the stored schema kind, apply the provided default gvk, and then +// load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, the raw data will be +// extracted and no decoding will be performed. If into is not registered with the typer, then the object will be straight decoded using +// normal JSON/YAML unmarshalling. If into is provided and the original data is not fully qualified with kind/version/group, the type of +// the into will be used to alter the returned gvk. On success or most errors, the method will return the calculated schema kind. +func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + if versioned, ok := into.(*runtime.VersionedObjects); ok { + into = versioned.Last() + obj, actual, err := s.Decode(originalData, gvk, into) + if err != nil { + return nil, actual, err + } + versioned.Objects = []runtime.Object{obj} + return versioned, actual, nil + } + + data := originalData + if s.yaml { + altered, err := yaml.YAMLToJSON(data) + if err != nil { + return nil, nil, err + } + data = altered + } + + actual, err := s.meta.Interpret(data) + if err != nil { + return nil, nil, err + } + + if gvk != nil { + // apply kind and version defaulting from provided default + if len(actual.Kind) == 0 { + actual.Kind = gvk.Kind + } + if len(actual.Version) == 0 && len(actual.Group) == 0 { + actual.Group = gvk.Group + actual.Version = gvk.Version + } + if len(actual.Version) == 0 && actual.Group == gvk.Group { + actual.Version = gvk.Version + } + } + + if unk, ok := into.(*runtime.Unknown); ok && unk != nil { + unk.Raw = originalData + unk.ContentType = runtime.ContentTypeJSON + unk.GetObjectKind().SetGroupVersionKind(*actual) + return unk, actual, nil + } + + if into != nil { + types, _, err := s.typer.ObjectKinds(into) + switch { + case runtime.IsNotRegisteredError(err): + if err := codec.NewDecoderBytes(data, new(codec.JsonHandle)).Decode(into); err != nil { + return nil, actual, err + } + return into, actual, nil + case err != nil: + return nil, actual, err + default: + typed := types[0] + if len(actual.Kind) == 0 { + actual.Kind = typed.Kind + } + if len(actual.Version) == 0 && len(actual.Group) == 0 { + actual.Group = typed.Group + actual.Version = typed.Version + } + if len(actual.Version) == 0 && actual.Group == typed.Group { + actual.Version = typed.Version + } + } + } + + if len(actual.Kind) == 0 { + return nil, actual, runtime.NewMissingKindErr(string(originalData)) + } + if len(actual.Version) == 0 { + return nil, actual, runtime.NewMissingVersionErr(string(originalData)) + } + + // use the target if necessary + obj, err := runtime.UseOrCreateObject(s.typer, s.creater, *actual, into) + if err != nil { + return nil, actual, err + } + + if err := codec.NewDecoderBytes(data, new(codec.JsonHandle)).Decode(obj); err != nil { + return nil, actual, err + } + return obj, actual, nil +} + +// Encode serializes the provided object to the given writer. +func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { + if s.yaml { + json, err := json.Marshal(obj) + if err != nil { + return err + } + data, err := yaml.JSONToYAML(json) + if err != nil { + return err + } + _, err = w.Write(data) + return err + } + + if s.pretty { + data, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return err + } + _, err = w.Write(data) + return err + } + encoder := json.NewEncoder(w) + return encoder.Encode(obj) +} + +// RecognizesData implements the RecognizingDecoder interface. +func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) { + if s.yaml { + // we could potentially look for '---' + return false, true, nil + } + _, _, ok = utilyaml.GuessJSONStream(peek, 2048) + return ok, false, nil +} + +// Framer is the default JSON framing behavior, with newlines delimiting individual objects. +var Framer = jsonFramer{} + +type jsonFramer struct{} + +// NewFrameWriter implements stream framing for this serializer +func (jsonFramer) NewFrameWriter(w io.Writer) io.Writer { + // we can write JSON objects directly to the writer, because they are self-framing + return w +} + +// NewFrameReader implements stream framing for this serializer +func (jsonFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { + // we need to extract the JSON chunks of data to pass to Decode() + return framer.NewJSONFramedReader(r) +} + +// Framer is the default JSON framing behavior, with newlines delimiting individual objects. +var YAMLFramer = yamlFramer{} + +type yamlFramer struct{} + +// NewFrameWriter implements stream framing for this serializer +func (yamlFramer) NewFrameWriter(w io.Writer) io.Writer { + return yamlFrameWriter{w} +} + +// NewFrameReader implements stream framing for this serializer +func (yamlFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { + // extract the YAML document chunks directly + return utilyaml.NewDocumentDecoder(r) +} + +type yamlFrameWriter struct { + w io.Writer +} + +// Write separates each document with the YAML document separator (`---` followed by line +// break). Writers must write well formed YAML documents (include a final line break). +func (w yamlFrameWriter) Write(data []byte) (n int, err error) { + if _, err := w.w.Write([]byte("---\n")); err != nil { + return 0, err + } + return w.w.Write(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go new file mode 100644 index 0000000000..df3f5f989a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go @@ -0,0 +1,63 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// MetaFactory is used to store and retrieve the version and kind +// information for JSON objects in a serializer. +type MetaFactory interface { + // Interpret should return the version and kind of the wire-format of + // the object. + Interpret(data []byte) (*schema.GroupVersionKind, error) +} + +// DefaultMetaFactory is a default factory for versioning objects in JSON. The object +// in memory and in the default JSON serialization will use the "kind" and "apiVersion" +// fields. +var DefaultMetaFactory = SimpleMetaFactory{} + +// SimpleMetaFactory provides default methods for retrieving the type and version of objects +// that are identified with an "apiVersion" and "kind" fields in their JSON +// serialization. It may be parameterized with the names of the fields in memory, or an +// optional list of base structs to search for those fields in memory. +type SimpleMetaFactory struct { +} + +// Interpret will return the APIVersion and Kind of the JSON wire-format +// encoding of an object, or an error. +func (SimpleMetaFactory) Interpret(data []byte) (*schema.GroupVersionKind, error) { + findKind := struct { + // +optional + APIVersion string `json:"apiVersion,omitempty"` + // +optional + Kind string `json:"kind,omitempty"` + }{} + if err := json.Unmarshal(data, &findKind); err != nil { + return nil, fmt.Errorf("couldn't get version/kind; json parse error: %v", err) + } + gv, err := schema.ParseGroupVersion(findKind.APIVersion) + if err != nil { + return nil, err + } + return &schema.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: findKind.Kind}, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/negotiated_codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go similarity index 97% rename from vendor/k8s.io/kubernetes/pkg/runtime/serializer/negotiated_codec.go rename to vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go index 20337fc40f..a42b4a41a8 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/negotiated_codec.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go @@ -17,7 +17,7 @@ limitations under the License. package serializer import ( - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // TODO: We should split negotiated serializers that we can change versions on from those we can change diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/doc.go rename to vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go similarity index 94% rename from vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/protobuf.go rename to vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go index 5a6a50de38..8d4ea71180 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/protobuf.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go @@ -24,10 +24,10 @@ import ( "github.com/gogo/protobuf/proto" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer/recognizer" - "k8s.io/kubernetes/pkg/util/framer" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" + "k8s.io/apimachinery/pkg/util/framer" ) var ( @@ -36,7 +36,7 @@ var ( // byte being reserved for the encoding style. The only encoding style defined is 0x00, which means that // the rest of the byte stream is a message of type k8s.io.kubernetes.pkg.runtime.Unknown (proto2). // - // See k8s.io/kubernetes/pkg/runtime/generated.proto for details of the runtime.Unknown message. + // See k8s.io/apimachinery/pkg/runtime/generated.proto for details of the runtime.Unknown message. // // This encoding scheme is experimental, and is subject to change at any time. protoEncodingPrefix = []byte{0x6b, 0x38, 0x73, 0x00} @@ -85,7 +85,7 @@ var _ recognizer.RecognizingDecoder = &Serializer{} // be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is // not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most // errors, the method will return the calculated schema kind. -func (s *Serializer) Decode(originalData []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { +func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { if versioned, ok := into.(*runtime.VersionedObjects); ok { into = versioned.Last() obj, actual, err := s.Decode(originalData, gvk, into) @@ -255,7 +255,7 @@ func (s *Serializer) RecognizesData(peek io.Reader) (bool, bool, error) { } // copyKindDefaults defaults dst to the value in src if dst does not have a value set. -func copyKindDefaults(dst, src *unversioned.GroupVersionKind) { +func copyKindDefaults(dst, src *schema.GroupVersionKind) { if src == nil { return } @@ -316,7 +316,7 @@ var _ runtime.Serializer = &RawSerializer{} // be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is // not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most // errors, the method will return the calculated schema kind. -func (s *RawSerializer) Decode(originalData []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { +func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { if into == nil { return nil, nil, fmt.Errorf("this serializer requires an object to decode into: %#v", s) } @@ -341,7 +341,7 @@ func (s *RawSerializer) Decode(originalData []byte, gvk *unversioned.GroupVersio } data := originalData - actual := &unversioned.GroupVersionKind{} + actual := &schema.GroupVersionKind{} copyKindDefaults(actual, gvk) if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil { @@ -386,7 +386,7 @@ func (s *RawSerializer) Decode(originalData []byte, gvk *unversioned.GroupVersio } // unmarshalToObject is the common code between decode in the raw and normal serializer. -func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, actual *unversioned.GroupVersionKind, into runtime.Object, data []byte) (runtime.Object, *unversioned.GroupVersionKind, error) { +func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, actual *schema.GroupVersionKind, into runtime.Object, data []byte) (runtime.Object, *schema.GroupVersionKind, error) { // use the target if necessary obj, err := runtime.UseOrCreateObject(typer, creater, *actual, into) if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf_extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go similarity index 94% rename from vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf_extension.go rename to vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go index a50dae5408..545cf78df7 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf_extension.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go @@ -17,8 +17,8 @@ limitations under the License. package serializer import ( - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer/protobuf" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" ) const ( diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/recognizer.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go similarity index 93% rename from vendor/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/recognizer.go rename to vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go index 310002a242..38497ab533 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/recognizer.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go @@ -22,8 +22,8 @@ import ( "fmt" "io" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) type RecognizingDecoder interface { @@ -81,7 +81,7 @@ func (d *decoder) RecognizesData(peek io.Reader) (bool, bool, error) { return false, anyUnknown, lastErr } -func (d *decoder) Decode(data []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { +func (d *decoder) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { var ( lastErr error skipped []runtime.Decoder diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go similarity index 90% rename from vendor/k8s.io/kubernetes/pkg/runtime/serializer/streaming/streaming.go rename to vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go index ac17138e43..91fd4ed4f0 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/streaming/streaming.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go @@ -23,8 +23,8 @@ import ( "fmt" "io" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // Encoder is a runtime.Encoder on a stream. @@ -37,7 +37,7 @@ type Encoder interface { // Decoder is a runtime.Decoder from a stream. type Decoder interface { // Decode will return io.EOF when no more objects are available. - Decode(defaults *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) + Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) // Close closes the underlying stream. Close() error } @@ -71,7 +71,7 @@ func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder { var ErrObjectTooLarge = fmt.Errorf("object to decode was longer than maximum allowed size") // Decode reads the next object from the stream and decodes it. -func (d *decoder) Decode(defaults *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { +func (d *decoder) Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { base := 0 for { n, err := d.reader.Read(d.buf[base:]) diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/runtime/serializer/versioning/versioning.go rename to vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index c82460e4a4..829d4fa895 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/versioning/versioning.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -19,9 +19,9 @@ package versioning import ( "io" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) // NewCodecForScheme is a convenience method for callers that are using a scheme. @@ -93,7 +93,7 @@ type codec struct { // Decode attempts a decode of the object, then tries to convert it to the internal version. If into is provided and the decoding is // successful, the returned runtime.Object will be the value passed as into. Note that this may bypass conversion if you pass an // into that matches the serialized version. -func (c *codec) Decode(data []byte, defaultGVK *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { +func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { versioned, isVersioned := into.(*runtime.VersionedObjects) if isVersioned { into = versioned.Last() @@ -181,7 +181,7 @@ func (c *codec) Decode(data []byte, defaultGVK *unversioned.GroupVersionKind, in // conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is. func (c *codec) Encode(obj runtime.Object, w io.Writer) error { switch obj.(type) { - case *runtime.Unknown, *runtime.Unstructured, *runtime.UnstructuredList: + case *runtime.Unknown, runtime.Unstructured: return c.encoder.Encode(obj, w) } @@ -227,6 +227,7 @@ func (c *codec) Encode(obj runtime.Object, w io.Writer) error { // DirectEncoder serializes an object and ensures the GVK is set. type DirectEncoder struct { + Version runtime.GroupVersioner runtime.Encoder runtime.ObjectTyper } @@ -242,7 +243,14 @@ func (e DirectEncoder) Encode(obj runtime.Object, stream io.Writer) error { } kind := obj.GetObjectKind() oldGVK := kind.GroupVersionKind() - kind.SetGroupVersionKind(gvks[0]) + gvk := gvks[0] + if e.Version != nil { + preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks) + if ok { + gvk = preferredGVK + } + } + kind.SetGroupVersionKind(gvk) err = e.Encoder.Encode(obj, stream) kind.SetGroupVersionKind(oldGVK) return err @@ -254,12 +262,12 @@ type DirectDecoder struct { } // Decode does not do conversion. It removes the gvk during deserialization. -func (d DirectDecoder) Decode(data []byte, defaults *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { +func (d DirectDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { obj, gvk, err := d.Decoder.Decode(data, defaults, into) if obj != nil { kind := obj.GetObjectKind() // clearing the gvk is just a convention of a codec - kind.SetGroupVersionKind(unversioned.GroupVersionKind{}) + kind.SetGroupVersionKind(schema.GroupVersionKind{}) } return obj, gvk, err } diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/swagger_doc_generator.go b/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/runtime/swagger_doc_generator.go rename to vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go new file mode 100644 index 0000000000..f972c5e697 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -0,0 +1,133 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +// Note that the types provided in this file are not versioned and are intended to be +// safe to use from within all versions of every API object. + +// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, +// like this: +// type MyAwesomeAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// ... // other fields +// } +// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind +// +// TypeMeta is provided here for convenience. You may use it directly from this package or define +// your own with the same fields. +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +type TypeMeta struct { + // +optional + APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"` + // +optional + Kind string `json:"kind,omitempty" yaml:"kind,omitempty" protobuf:"bytes,2,opt,name=kind"` +} + +const ( + ContentTypeJSON string = "application/json" +) + +// RawExtension is used to hold extensions in external versions. +// +// To use this, make a field which has RawExtension as its type in your external, versioned +// struct, and Object in your internal struct. You also need to register your +// various plugin types. +// +// // Internal package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.Object `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // External package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.RawExtension `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // On the wire, the JSON will look something like this: +// { +// "kind":"MyAPIObject", +// "apiVersion":"v1", +// "myPlugin": { +// "kind":"PluginA", +// "aOption":"foo", +// }, +// } +// +// So what happens? Decode first uses json or yaml to unmarshal the serialized data into +// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. +// The next step is to copy (using pkg/conversion) into the internal struct. The runtime +// package's DefaultScheme has conversion functions installed which will unpack the +// JSON stored in RawExtension, turning it into the correct object type, and storing it +// in the Object. (TODO: In the case where the object is of an unknown type, a +// runtime.Unknown object will be created and stored.) +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +type RawExtension struct { + // Raw is the underlying serialization of this object. + // + // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. + Raw []byte `protobuf:"bytes,1,opt,name=raw"` + // Object can hold a representation of this extension - useful for working with versioned + // structs. + Object Object `json:"-"` +} + +// Unknown allows api objects with unknown types to be passed-through. This can be used +// to deal with the API objects from a plug-in. Unknown objects still have functioning +// TypeMeta features-- kind, version, etc. +// TODO: Make this object have easy access to field based accessors and settors for +// metadata and field mutatation. +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +type Unknown struct { + TypeMeta `json:",inline" protobuf:"bytes,1,opt,name=typeMeta"` + // Raw will hold the complete serialized object which couldn't be matched + // with a registered type. Most likely, nothing should be done with this + // except for passing it through the system. + Raw []byte `protobuf:"bytes,2,opt,name=raw"` + // ContentEncoding is encoding used to encode 'Raw' data. + // Unspecified means no encoding. + ContentEncoding string `protobuf:"bytes,3,opt,name=contentEncoding"` + // ContentType is serialization method used to serialize 'Raw'. + // Unspecified means ContentTypeJSON. + ContentType string `protobuf:"bytes,4,opt,name=contentType"` +} + +// VersionedObjects is used by Decoders to give callers a way to access all versions +// of an object during the decoding process. +type VersionedObjects struct { + // Objects is the set of objects retrieved during decoding, in order of conversion. + // The 0 index is the object as serialized on the wire. If conversion has occurred, + // other objects may be present. The right most object is the same as would be returned + // by a normal Decode call. + Objects []Object +} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/types_proto.go b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/runtime/types_proto.go rename to vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go new file mode 100644 index 0000000000..b75b3ebb22 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -0,0 +1,80 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package runtime + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + reflect "reflect" +) + +// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. +func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { + return []conversion.GeneratedDeepCopyFunc{ + {Fn: DeepCopy_runtime_RawExtension, InType: reflect.TypeOf(&RawExtension{})}, + {Fn: DeepCopy_runtime_TypeMeta, InType: reflect.TypeOf(&TypeMeta{})}, + {Fn: DeepCopy_runtime_Unknown, InType: reflect.TypeOf(&Unknown{})}, + } +} + +func DeepCopy_runtime_RawExtension(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RawExtension) + out := out.(*RawExtension) + *out = *in + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + // in.Object is kind 'Interface' + if in.Object != nil { + if newVal, err := c.DeepCopy(&in.Object); err != nil { + return err + } else { + out.Object = *newVal.(*Object) + } + } + return nil + } +} + +func DeepCopy_runtime_TypeMeta(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TypeMeta) + out := out.(*TypeMeta) + *out = *in + return nil + } +} + +func DeepCopy_runtime_Unknown(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Unknown) + out := out.(*Unknown) + *out = *in + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/selection/operator.go b/vendor/k8s.io/apimachinery/pkg/selection/operator.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/selection/operator.go rename to vendor/k8s.io/apimachinery/pkg/selection/operator.go diff --git a/vendor/k8s.io/kubernetes/pkg/types/doc.go b/vendor/k8s.io/apimachinery/pkg/types/doc.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/types/doc.go rename to vendor/k8s.io/apimachinery/pkg/types/doc.go diff --git a/vendor/k8s.io/kubernetes/pkg/types/namespacedname.go b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/types/namespacedname.go rename to vendor/k8s.io/apimachinery/pkg/types/namespacedname.go diff --git a/vendor/k8s.io/kubernetes/pkg/types/nodename.go b/vendor/k8s.io/apimachinery/pkg/types/nodename.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/types/nodename.go rename to vendor/k8s.io/apimachinery/pkg/types/nodename.go diff --git a/vendor/k8s.io/apimachinery/pkg/types/patch.go b/vendor/k8s.io/apimachinery/pkg/types/patch.go new file mode 100644 index 0000000000..d522d1dbdc --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/patch.go @@ -0,0 +1,28 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +// Similarly to above, these are constants to support HTTP PATCH utilized by +// both the client and server that didn't make sense for a whole package to be +// dedicated to. +type PatchType string + +const ( + JSONPatchType PatchType = "application/json-patch+json" + MergePatchType PatchType = "application/merge-patch+json" + StrategicMergePatchType PatchType = "application/strategic-merge-patch+json" +) diff --git a/vendor/k8s.io/kubernetes/pkg/types/uid.go b/vendor/k8s.io/apimachinery/pkg/types/uid.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/types/uid.go rename to vendor/k8s.io/apimachinery/pkg/types/uid.go diff --git a/vendor/k8s.io/kubernetes/pkg/types/unix_user_id.go b/vendor/k8s.io/apimachinery/pkg/types/unix_user_id.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/types/unix_user_id.go rename to vendor/k8s.io/apimachinery/pkg/types/unix_user_id.go diff --git a/vendor/k8s.io/kubernetes/pkg/util/diff/diff.go b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/util/diff/diff.go rename to vendor/k8s.io/apimachinery/pkg/util/diff/diff.go index cf7b97a96c..0f730875e8 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/diff/diff.go +++ b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go @@ -27,7 +27,7 @@ import ( "github.com/davecgh/go-spew/spew" - "k8s.io/kubernetes/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/validation/field" ) // StringDiff diffs a and b and returns a human readable diff. diff --git a/vendor/k8s.io/kubernetes/pkg/util/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/util/errors/doc.go rename to vendor/k8s.io/apimachinery/pkg/util/errors/doc.go diff --git a/vendor/k8s.io/kubernetes/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/util/errors/errors.go rename to vendor/k8s.io/apimachinery/pkg/util/errors/errors.go diff --git a/vendor/k8s.io/kubernetes/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/util/framer/framer.go rename to vendor/k8s.io/apimachinery/pkg/util/framer/framer.go diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go new file mode 100644 index 0000000000..7b9c554e0e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -0,0 +1,374 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto +// DO NOT EDIT! + +/* + Package intstr is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto + + It has these top-level messages: + IntOrString +*/ +package intstr + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *IntOrString) Reset() { *m = IntOrString{} } +func (*IntOrString) ProtoMessage() {} +func (*IntOrString) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func init() { + proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString") +} +func (m *IntOrString) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IntOrString) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Type)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.IntVal)) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.StrVal))) + i += copy(data[i:], m.StrVal) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *IntOrString) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Type)) + n += 1 + sovGenerated(uint64(m.IntVal)) + l = len(m.StrVal) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *IntOrString) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IntOrString: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IntOrString: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Type |= (Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntVal", wireType) + } + m.IntVal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.IntVal |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StrVal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StrVal = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 288 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0xf4, 0x30, + 0x1c, 0xc6, 0x93, 0xf7, 0xee, 0x3d, 0xb4, 0x82, 0x43, 0x71, 0x38, 0x1c, 0xd2, 0xa2, 0x20, 0x5d, + 0x4c, 0x56, 0x71, 0xec, 0x76, 0x20, 0x08, 0x3d, 0x71, 0x70, 0x6b, 0xef, 0x62, 0x2e, 0xf4, 0x2e, + 0x09, 0xe9, 0xbf, 0x42, 0xb7, 0xfb, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x3a, 0xde, 0xe8, 0x20, 0x87, + 0xad, 0xdf, 0xc2, 0x49, 0x9a, 0x16, 0x74, 0x4a, 0x9e, 0xe7, 0xf9, 0xfd, 0x02, 0xf1, 0x6e, 0xf2, + 0xab, 0x82, 0x4a, 0xcd, 0xf2, 0x32, 0xe3, 0x56, 0x71, 0xe0, 0x05, 0x7b, 0xe2, 0x6a, 0xa9, 0x2d, + 0x1b, 0x86, 0xd4, 0xc8, 0x4d, 0xba, 0x58, 0x49, 0xc5, 0x6d, 0xc5, 0x4c, 0x2e, 0x58, 0x09, 0x72, + 0xcd, 0xa4, 0x82, 0x02, 0x2c, 0x13, 0x5c, 0x71, 0x9b, 0x02, 0x5f, 0x52, 0x63, 0x35, 0x68, 0xff, + 0xbc, 0x97, 0xe8, 0x5f, 0x89, 0x9a, 0x5c, 0xd0, 0x4e, 0xa2, 0xbd, 0x74, 0x7a, 0x29, 0x24, 0xac, + 0xca, 0x8c, 0x2e, 0xf4, 0x86, 0x09, 0x2d, 0x34, 0x73, 0x6e, 0x56, 0x3e, 0xba, 0xe4, 0x82, 0xbb, + 0xf5, 0x6f, 0x9e, 0xbd, 0x60, 0xef, 0x68, 0xa6, 0xe0, 0xd6, 0xce, 0xc1, 0x4a, 0x25, 0xfc, 0xc8, + 0x1b, 0x43, 0x65, 0xf8, 0x14, 0x87, 0x38, 0x1a, 0xc5, 0x27, 0xf5, 0x3e, 0x40, 0xed, 0x3e, 0x18, + 0xdf, 0x55, 0x86, 0x7f, 0x0f, 0x67, 0xe2, 0x08, 0xff, 0xc2, 0x9b, 0x48, 0x05, 0xf7, 0xe9, 0x7a, + 0xfa, 0x2f, 0xc4, 0xd1, 0xff, 0xf8, 0x78, 0x60, 0x27, 0x33, 0xd7, 0x26, 0xc3, 0xda, 0x71, 0x05, + 0xd8, 0x8e, 0x1b, 0x85, 0x38, 0x3a, 0xfc, 0xe5, 0xe6, 0xae, 0x4d, 0x86, 0xf5, 0xfa, 0xe0, 0xf5, + 0x2d, 0x40, 0xdb, 0x8f, 0x10, 0xc5, 0x51, 0xdd, 0x10, 0xb4, 0x6b, 0x08, 0x7a, 0x6f, 0x08, 0xda, + 0xb6, 0x04, 0xd7, 0x2d, 0xc1, 0xbb, 0x96, 0xe0, 0xcf, 0x96, 0xe0, 0xe7, 0x2f, 0x82, 0x1e, 0x26, + 0xfd, 0x67, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x55, 0xdf, 0x2a, 0x60, 0x01, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto new file mode 100644 index 0000000000..cccaf6f689 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto @@ -0,0 +1,43 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.util.intstr; + +// Package-wide variables from generator "generated". +option go_package = "intstr"; + +// IntOrString is a type that can hold an int32 or a string. When used in +// JSON or YAML marshalling and unmarshalling, it produces or consumes the +// inner type. This allows you to have, for example, a JSON field that can +// accept a name or number. +// TODO: Rename to Int32OrString +// +// +protobuf=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +message IntOrString { + optional int64 type = 1; + + optional int32 intVal = 2; + + optional string strVal = 3; +} + diff --git a/vendor/k8s.io/kubernetes/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go similarity index 88% rename from vendor/k8s.io/kubernetes/pkg/util/intstr/intstr.go rename to vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 248a16cc43..02586b3481 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -20,12 +20,14 @@ import ( "encoding/json" "fmt" "math" + "runtime/debug" "strconv" "strings" - "k8s.io/kubernetes/pkg/genericapiserver/openapi/common" + "k8s.io/apimachinery/pkg/openapi" "github.com/go-openapi/spec" + "github.com/golang/glog" "github.com/google/gofuzz" ) @@ -57,6 +59,9 @@ const ( // than int32. // TODO: convert to (val int32) func FromInt(val int) IntOrString { + if val > math.MaxInt32 || val < math.MinInt32 { + glog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) + } return IntOrString{Type: Int, IntVal: int32(val)} } @@ -65,6 +70,16 @@ func FromString(val string) IntOrString { return IntOrString{Type: String, StrVal: val} } +// Parse the given string and try to convert it to an integer before +// setting it as a string value. +func Parse(val string) IntOrString { + i, err := strconv.Atoi(val) + if err != nil { + return FromString(val) + } + return FromInt(i) +} + // UnmarshalJSON implements the json.Unmarshaller interface. func (intstr *IntOrString) UnmarshalJSON(value []byte) error { if value[0] == '"' { @@ -105,8 +120,8 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) { } } -func (_ IntOrString) OpenAPIDefinition() common.OpenAPIDefinition { - return common.OpenAPIDefinition{ +func (_ IntOrString) OpenAPIDefinition() openapi.OpenAPIDefinition { + return openapi.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ Type: []string{"string"}, diff --git a/vendor/k8s.io/kubernetes/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/util/json/json.go rename to vendor/k8s.io/apimachinery/pkg/util/json/json.go diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go new file mode 100644 index 0000000000..b9d3e6017f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go @@ -0,0 +1,66 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mergepatch + +import ( + "errors" + "fmt" +) + +var ErrBadJSONDoc = errors.New("Invalid JSON document") +var ErrNoListOfLists = errors.New("Lists of lists are not supported") +var ErrBadPatchFormatForPrimitiveList = errors.New("Invalid patch format of primitive list") + +// IsPreconditionFailed returns true if the provided error indicates +// a precondition failed. +func IsPreconditionFailed(err error) bool { + _, ok := err.(ErrPreconditionFailed) + return ok +} + +type ErrPreconditionFailed struct { + message string +} + +func NewErrPreconditionFailed(target map[string]interface{}) ErrPreconditionFailed { + s := fmt.Sprintf("precondition failed for: %v", target) + return ErrPreconditionFailed{s} +} + +func (err ErrPreconditionFailed) Error() string { + return err.message +} + +type ErrConflict struct { + message string +} + +func NewErrConflict(patch, current string) ErrConflict { + s := fmt.Sprintf("patch:\n%s\nconflicts with changes made from original to current:\n%s\n", patch, current) + return ErrConflict{s} +} + +func (err ErrConflict) Error() string { + return err.message +} + +// IsConflict returns true if the provided error indicates +// a conflict between the patch and the current configuration. +func IsConflict(err error) bool { + _, ok := err.(ErrConflict) + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go new file mode 100644 index 0000000000..591884e5c8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -0,0 +1,126 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mergepatch + +import ( + "fmt" + "reflect" + + "github.com/davecgh/go-spew/spew" + "github.com/ghodss/yaml" +) + +// PreconditionFunc asserts that an incompatible change is not present within a patch. +type PreconditionFunc func(interface{}) bool + +// RequireKeyUnchanged returns a precondition function that fails if the provided key +// is present in the patch (indicating that its value has changed). +func RequireKeyUnchanged(key string) PreconditionFunc { + return func(patch interface{}) bool { + patchMap, ok := patch.(map[string]interface{}) + if !ok { + return true + } + + // The presence of key means that its value has been changed, so the test fails. + _, ok = patchMap[key] + return !ok + } +} + +// RequireMetadataKeyUnchanged creates a precondition function that fails +// if the metadata.key is present in the patch (indicating its value +// has changed). +func RequireMetadataKeyUnchanged(key string) PreconditionFunc { + return func(patch interface{}) bool { + patchMap, ok := patch.(map[string]interface{}) + if !ok { + return true + } + patchMap1, ok := patchMap["metadata"] + if !ok { + return true + } + patchMap2, ok := patchMap1.(map[string]interface{}) + if !ok { + return true + } + _, ok = patchMap2[key] + return !ok + } +} + +func ToYAMLOrError(v interface{}) string { + y, err := toYAML(v) + if err != nil { + return err.Error() + } + + return y +} + +func toYAML(v interface{}) (string, error) { + y, err := yaml.Marshal(v) + if err != nil { + return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v)) + } + + return string(y), nil +} + +// HasConflicts returns true if the left and right JSON interface objects overlap with +// different values in any key. All keys are required to be strings. Since patches of the +// same Type have congruent keys, this is valid for multiple patch types. This method +// supports JSON merge patch semantics. +func HasConflicts(left, right interface{}) (bool, error) { + switch typedLeft := left.(type) { + case map[string]interface{}: + switch typedRight := right.(type) { + case map[string]interface{}: + for key, leftValue := range typedLeft { + rightValue, ok := typedRight[key] + if !ok { + return false, nil + } + return HasConflicts(leftValue, rightValue) + } + + return false, nil + default: + return true, nil + } + case []interface{}: + switch typedRight := right.(type) { + case []interface{}: + if len(typedLeft) != len(typedRight) { + return true, nil + } + + for i := range typedLeft { + return HasConflicts(typedLeft[i], typedRight[i]) + } + + return false, nil + default: + return true, nil + } + case string, float64, bool, int, int64, nil: + return !reflect.DeepEqual(left, right), nil + default: + return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go new file mode 100644 index 0000000000..c32082e931 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -0,0 +1,269 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + + "github.com/golang/glog" + "golang.org/x/net/http2" +) + +// IsProbableEOF returns true if the given error resembles a connection termination +// scenario that would justify assuming that the watch is empty. +// These errors are what the Go http stack returns back to us which are general +// connection closure errors (strongly correlated) and callers that need to +// differentiate probable errors in connection behavior between normal "this is +// disconnected" should use the method. +func IsProbableEOF(err error) bool { + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + switch { + case err == io.EOF: + return true + case err.Error() == "http: can't write HTTP request on broken connection": + return true + case strings.Contains(err.Error(), "connection reset by peer"): + return true + case strings.Contains(strings.ToLower(err.Error()), "use of closed network connection"): + return true + } + return false +} + +var defaultTransport = http.DefaultTransport.(*http.Transport) + +// SetOldTransportDefaults applies the defaults from http.DefaultTransport +// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset +func SetOldTransportDefaults(t *http.Transport) *http.Transport { + if t.Proxy == nil || isDefault(t.Proxy) { + // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings + // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY + t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) + } + if t.Dial == nil { + t.Dial = defaultTransport.Dial + } + if t.TLSHandshakeTimeout == 0 { + t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout + } + return t +} + +// SetTransportDefaults applies the defaults from http.DefaultTransport +// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset +func SetTransportDefaults(t *http.Transport) *http.Transport { + t = SetOldTransportDefaults(t) + // Allow clients to disable http2 if needed. + if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { + glog.Infof("HTTP2 has been explicitly disabled") + } else { + if err := http2.ConfigureTransport(t); err != nil { + glog.Warningf("Transport failed http2 configuration: %v", err) + } + } + return t +} + +type RoundTripperWrapper interface { + http.RoundTripper + WrappedRoundTripper() http.RoundTripper +} + +type DialFunc func(net, addr string) (net.Conn, error) + +func Dialer(transport http.RoundTripper) (DialFunc, error) { + if transport == nil { + return nil, nil + } + + switch transport := transport.(type) { + case *http.Transport: + return transport.Dial, nil + case RoundTripperWrapper: + return Dialer(transport.WrappedRoundTripper()) + default: + return nil, fmt.Errorf("unknown transport type: %v", transport) + } +} + +// CloneTLSConfig returns a tls.Config with all exported fields except SessionTicketsDisabled and SessionTicketKey copied. +// This makes it safe to call CloneTLSConfig on a config in active use by a server. +// TODO: replace with tls.Config#Clone when we move to go1.8 +func CloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} + +type TLSClientConfigHolder interface { + TLSClientConfig() *tls.Config +} + +func TLSClientConfig(transport http.RoundTripper) (*tls.Config, error) { + if transport == nil { + return nil, nil + } + + switch transport := transport.(type) { + case *http.Transport: + return transport.TLSClientConfig, nil + case TLSClientConfigHolder: + return transport.TLSClientConfig(), nil + case RoundTripperWrapper: + return TLSClientConfig(transport.WrappedRoundTripper()) + default: + return nil, fmt.Errorf("unknown transport type: %v", transport) + } +} + +func FormatURL(scheme string, host string, port int, path string) *url.URL { + return &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(host, strconv.Itoa(port)), + Path: path, + } +} + +func GetHTTPClient(req *http.Request) string { + if userAgent, ok := req.Header["User-Agent"]; ok { + if len(userAgent) > 0 { + return userAgent[0] + } + } + return "unknown" +} + +// Extracts and returns the clients IP from the given request. +// Looks at X-Forwarded-For header, X-Real-Ip header and request.RemoteAddr in that order. +// Returns nil if none of them are set or is set to an invalid value. +func GetClientIP(req *http.Request) net.IP { + hdr := req.Header + // First check the X-Forwarded-For header for requests via proxy. + hdrForwardedFor := hdr.Get("X-Forwarded-For") + if hdrForwardedFor != "" { + // X-Forwarded-For can be a csv of IPs in case of multiple proxies. + // Use the first valid one. + parts := strings.Split(hdrForwardedFor, ",") + for _, part := range parts { + ip := net.ParseIP(strings.TrimSpace(part)) + if ip != nil { + return ip + } + } + } + + // Try the X-Real-Ip header. + hdrRealIp := hdr.Get("X-Real-Ip") + if hdrRealIp != "" { + ip := net.ParseIP(hdrRealIp) + if ip != nil { + return ip + } + } + + // Fallback to Remote Address in request, which will give the correct client IP when there is no proxy. + // Remote Address in Go's HTTP server is in the form host:port so we need to split that first. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err == nil { + return net.ParseIP(host) + } + + // Fallback if Remote Address was just IP. + return net.ParseIP(req.RemoteAddr) +} + +var defaultProxyFuncPointer = fmt.Sprintf("%p", http.ProxyFromEnvironment) + +// isDefault checks to see if the transportProxierFunc is pointing to the default one +func isDefault(transportProxier func(*http.Request) (*url.URL, error)) bool { + transportProxierPointer := fmt.Sprintf("%p", transportProxier) + return transportProxierPointer == defaultProxyFuncPointer +} + +// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if +// no matching CIDRs are found +func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { + // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it + noProxyEnv := os.Getenv("NO_PROXY") + noProxyRules := strings.Split(noProxyEnv, ",") + + cidrs := []*net.IPNet{} + for _, noProxyRule := range noProxyRules { + _, cidr, _ := net.ParseCIDR(noProxyRule) + if cidr != nil { + cidrs = append(cidrs, cidr) + } + } + + if len(cidrs) == 0 { + return delegate + } + + return func(req *http.Request) (*url.URL, error) { + host := req.URL.Host + // for some urls, the Host is already the host, not the host:port + if net.ParseIP(host) == nil { + var err error + host, _, err = net.SplitHostPort(req.URL.Host) + if err != nil { + return delegate(req) + } + } + + ip := net.ParseIP(host) + if ip == nil { + return delegate(req) + } + + for _, cidr := range cidrs { + if cidr.Contains(ip) { + return nil, nil + } + } + + return delegate(req) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/util/net/interface.go rename to vendor/k8s.io/apimachinery/pkg/util/net/interface.go diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/port_range.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/util/net/port_range.go rename to vendor/k8s.io/apimachinery/pkg/util/net/port_range.go diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/port_split.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/util/net/port_split.go rename to vendor/k8s.io/apimachinery/pkg/util/net/port_split.go index 29c985edc3..c0fd4e20fe 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/net/port_split.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go @@ -19,7 +19,7 @@ package net import ( "strings" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/sets" ) var validSchemes = sets.NewString("http", "https", "") diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go new file mode 100644 index 0000000000..461144f0ba --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go @@ -0,0 +1,46 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "net" + "reflect" + "syscall" +) + +// IPNetEqual checks if the two input IPNets are representing the same subnet. +// For example, +// 10.0.0.1/24 and 10.0.0.0/24 are the same subnet. +// 10.0.0.1/24 and 10.0.0.0/25 are not the same subnet. +func IPNetEqual(ipnet1, ipnet2 *net.IPNet) bool { + if ipnet1 == nil || ipnet2 == nil { + return false + } + if reflect.DeepEqual(ipnet1.Mask, ipnet2.Mask) && ipnet1.Contains(ipnet2.IP) && ipnet2.Contains(ipnet1.IP) { + return true + } + return false +} + +// Returns if the given err is "connection reset by peer" error. +func IsConnectionReset(err error) bool { + opErr, ok := err.(*net.OpError) + if ok && opErr.Err.Error() == syscall.ECONNRESET.Error() { + return true + } + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/rand/rand.go b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/util/rand/rand.go rename to vendor/k8s.io/apimachinery/pkg/util/rand/rand.go diff --git a/vendor/k8s.io/kubernetes/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go similarity index 76% rename from vendor/k8s.io/kubernetes/pkg/util/runtime/runtime.go rename to vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index 976de49d58..748174e191 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -19,6 +19,8 @@ package runtime import ( "fmt" "runtime" + "sync" + "time" "github.com/golang/glog" ) @@ -79,7 +81,18 @@ func getCallers(r interface{}) string { // ErrorHandlers is a list of functions which will be invoked when an unreturnable // error occurs. -var ErrorHandlers = []func(error){logError} +// TODO(lavalamp): for testability, this and the below HandleError function +// should be packaged up into a testable and reusable object. +var ErrorHandlers = []func(error){ + logError, + (&rudimentaryErrorBackoff{ + lastErrorTime: time.Now(), + // 1ms was the number folks were able to stomach as a global rate limit. + // If you need to log errors more than 1000 times a second you + // should probably consider fixing your code instead. :) + minPeriod: time.Millisecond, + }).OnError, +} // HandlerError is a method to invoke when a non-user facing piece of code cannot // return an error and needs to indicate it has been ignored. Invoking this method @@ -101,6 +114,26 @@ func logError(err error) { glog.ErrorDepth(2, err) } +type rudimentaryErrorBackoff struct { + minPeriod time.Duration // immutable + // TODO(lavalamp): use the clock for testability. Need to move that + // package for that to be accessible here. + lastErrorTimeLock sync.Mutex + lastErrorTime time.Time +} + +// OnError will block if it is called more often than the embedded period time. +// This will prevent overly tight hot error loops. +func (r *rudimentaryErrorBackoff) OnError(error) { + r.lastErrorTimeLock.Lock() + defer r.lastErrorTimeLock.Unlock() + d := time.Since(r.lastErrorTime) + if d < r.minPeriod { + time.Sleep(r.minPeriod - d) + } + r.lastErrorTime = time.Now() +} + // GetCaller returns the caller of the function that calls it. func GetCaller() string { var pc [1]uintptr diff --git a/vendor/k8s.io/kubernetes/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/util/sets/byte.go rename to vendor/k8s.io/apimachinery/pkg/util/sets/byte.go index 3d6d0dfe43..a460e4b1f5 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/sets/byte.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go new file mode 100644 index 0000000000..28a6a7d5c7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by set-gen. Do not edit it manually! + +// Package sets has auto-generated set types. +package sets diff --git a/vendor/k8s.io/kubernetes/pkg/util/sets/empty.go b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/util/sets/empty.go rename to vendor/k8s.io/apimachinery/pkg/util/sets/empty.go index 5654edd773..cd22b953aa 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/sets/empty.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/kubernetes/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/util/sets/int.go rename to vendor/k8s.io/apimachinery/pkg/util/sets/int.go index 6d32f84c78..0614e9fb00 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/sets/int.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/kubernetes/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/util/sets/int64.go rename to vendor/k8s.io/apimachinery/pkg/util/sets/int64.go index 1de18319b7..82e1ba7821 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/sets/int64.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/kubernetes/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/util/sets/string.go rename to vendor/k8s.io/apimachinery/pkg/util/sets/string.go index da66eaf8e7..baef7a6a2b 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/sets/string.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go new file mode 100644 index 0000000000..2eb67af464 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -0,0 +1,1260 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/mergepatch" + forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" +) + +// An alternate implementation of JSON Merge Patch +// (https://tools.ietf.org/html/rfc7386) which supports the ability to annotate +// certain fields with metadata that indicates whether the elements of JSON +// lists should be merged or replaced. +// +// For more information, see the PATCH section of docs/devel/api-conventions.md. +// +// Some of the content of this package was borrowed with minor adaptations from +// evanphx/json-patch and openshift/origin. + +const ( + directiveMarker = "$patch" + deleteDirective = "delete" + replaceDirective = "replace" + mergeDirective = "merge" + + deleteFromPrimitiveListDirectivePrefix = "$deleteFromPrimitiveList" +) + +// JSONMap is a representations of JSON object encoded as map[string]interface{} +// where the children can be either map[string]interface{}, []interface{} or +// primitive type). +// Operating on JSONMap representation is much faster as it doesn't require any +// json marshaling and/or unmarshaling operations. +type JSONMap map[string]interface{} + +// The following code is adapted from github.com/openshift/origin/pkg/util/jsonmerge. +// Instead of defining a Delta that holds an original, a patch and a set of preconditions, +// the reconcile method accepts a set of preconditions as an argument. + +// CreateTwoWayMergePatch creates a patch that can be passed to StrategicMergePatch from an original +// document and a modified document, which are passed to the method as json encoded content. It will +// return a patch that yields the modified document when applied to the original document, or an error +// if either of the two documents is invalid. +func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + originalMap := map[string]interface{}{} + if len(original) > 0 { + if err := json.Unmarshal(original, &originalMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + modifiedMap := map[string]interface{}{} + if len(modified) > 0 { + if err := json.Unmarshal(modified, &modifiedMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + patchMap, err := CreateTwoWayMergeMapPatch(originalMap, modifiedMap, dataStruct, fns...) + if err != nil { + return nil, err + } + + return json.Marshal(patchMap) +} + +// CreateTwoWayMergeMapPatch creates a patch from an original and modified JSON objects, +// encoded JSONMap. +// The serialized version of the map can then be passed to StrategicMergeMapPatch. +func CreateTwoWayMergeMapPatch(original, modified JSONMap, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { + t, err := getTagStructType(dataStruct) + if err != nil { + return nil, err + } + + patchMap, err := diffMaps(original, modified, t, false, false) + if err != nil { + return nil, err + } + + // Apply the preconditions to the patch, and return an error if any of them fail. + for _, fn := range fns { + if !fn(patchMap) { + return nil, mergepatch.NewErrPreconditionFailed(patchMap) + } + } + + return patchMap, nil +} + +// Returns a (recursive) strategic merge patch that yields modified when applied to original. +func diffMaps(original, modified map[string]interface{}, t reflect.Type, ignoreChangesAndAdditions, ignoreDeletions bool) (map[string]interface{}, error) { + patch := map[string]interface{}{} + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + for key, modifiedValue := range modified { + originalValue, ok := original[key] + if !ok { + // Key was added, so add to patch + if !ignoreChangesAndAdditions { + patch[key] = modifiedValue + } + + continue + } + + // The patch has a patch directive + if key == directiveMarker { + originalString, ok := originalValue.(string) + if !ok { + return nil, fmt.Errorf("invalid value for special key: %s", directiveMarker) + } + + modifiedString, ok := modifiedValue.(string) + if !ok { + return nil, fmt.Errorf("invalid value for special key: %s", directiveMarker) + } + + if modifiedString != originalString { + patch[directiveMarker] = modifiedValue + } + + continue + } + + if reflect.TypeOf(originalValue) != reflect.TypeOf(modifiedValue) { + // Types have changed, so add to patch + if !ignoreChangesAndAdditions { + patch[key] = modifiedValue + } + + continue + } + + // Types are the same, so compare values + switch originalValueTyped := originalValue.(type) { + case map[string]interface{}: + modifiedValueTyped := modifiedValue.(map[string]interface{}) + fieldType, fieldPatchStrategy, _, err := forkedjson.LookupPatchMetadata(t, key) + if err != nil { + // We couldn't look up metadata for the field + // If the values are identical, this doesn't matter, no patch is needed + if reflect.DeepEqual(originalValue, modifiedValue) { + continue + } + // Otherwise, return the error + return nil, err + } + + if fieldPatchStrategy == replaceDirective { + if !ignoreChangesAndAdditions { + patch[key] = modifiedValue + } + continue + } + + patchValue, err := diffMaps(originalValueTyped, modifiedValueTyped, fieldType, ignoreChangesAndAdditions, ignoreDeletions) + if err != nil { + return nil, err + } + + if len(patchValue) > 0 { + patch[key] = patchValue + } + + continue + case []interface{}: + modifiedValueTyped := modifiedValue.([]interface{}) + fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, key) + if err != nil { + // We couldn't look up metadata for the field + // If the values are identical, this doesn't matter, no patch is needed + if reflect.DeepEqual(originalValue, modifiedValue) { + continue + } + // Otherwise, return the error + return nil, err + } + + if fieldPatchStrategy == mergeDirective { + addList, deletionList, err := diffLists(originalValueTyped, modifiedValueTyped, fieldType.Elem(), fieldPatchMergeKey, ignoreChangesAndAdditions, ignoreDeletions) + if err != nil { + return nil, err + } + + if len(addList) > 0 { + patch[key] = addList + } + + // generate a parallel list for deletion + if len(deletionList) > 0 { + parallelDeletionListKey := fmt.Sprintf("%s/%s", deleteFromPrimitiveListDirectivePrefix, key) + patch[parallelDeletionListKey] = deletionList + } + + continue + } + } + + if !ignoreChangesAndAdditions { + if !reflect.DeepEqual(originalValue, modifiedValue) { + // Values are different, so add to patch + patch[key] = modifiedValue + } + } + } + + if !ignoreDeletions { + // Add nils for deleted values + for key := range original { + _, found := modified[key] + if !found { + patch[key] = nil + } + } + } + + return patch, nil +} + +// Returns a (recursive) strategic merge patch and a parallel deletion list if necessary. +// Only list of primitives with merge strategy will generate a parallel deletion list. +// These two lists should yield modified when applied to original, for lists with merge semantics. +func diffLists(original, modified []interface{}, t reflect.Type, mergeKey string, ignoreChangesAndAdditions, ignoreDeletions bool) ([]interface{}, []interface{}, error) { + if len(original) == 0 { + // Both slices are empty - do nothing + if len(modified) == 0 || ignoreChangesAndAdditions { + return nil, nil, nil + } + + // Old slice was empty - add all elements from the new slice + return modified, nil, nil + } + + elementType, err := sliceElementType(original, modified) + if err != nil { + return nil, nil, err + } + + switch elementType.Kind() { + case reflect.Map: + patchList, err := diffListsOfMaps(original, modified, t, mergeKey, ignoreChangesAndAdditions, ignoreDeletions) + return patchList, nil, err + case reflect.Slice: + // Lists of Lists are not permitted by the api + return nil, nil, mergepatch.ErrNoListOfLists + default: + return diffListsOfScalars(original, modified, ignoreChangesAndAdditions, ignoreDeletions) + } +} + +// diffListsOfScalars returns 2 lists, the first one is addList and the second one is deletionList. +// Argument ignoreChangesAndAdditions controls if calculate addList. true means not calculate. +// Argument ignoreDeletions controls if calculate deletionList. true means not calculate. +func diffListsOfScalars(original, modified []interface{}, ignoreChangesAndAdditions, ignoreDeletions bool) ([]interface{}, []interface{}, error) { + // Sort the scalars for easier calculating the diff + originalScalars := sortScalars(original) + modifiedScalars := sortScalars(modified) + + originalIndex, modifiedIndex := 0, 0 + addList := []interface{}{} + deletionList := []interface{}{} + + originalInBounds := originalIndex < len(originalScalars) + modifiedInBounds := modifiedIndex < len(modifiedScalars) + bothInBounds := originalInBounds && modifiedInBounds + for originalInBounds || modifiedInBounds { + + // we need to compare the string representation of the scalar, + // because the scalar is an interface which doesn't support neither < nor < + // And that's how func sortScalars compare scalars. + var originalString, modifiedString string + if originalInBounds { + originalString = fmt.Sprintf("%v", originalScalars[originalIndex]) + } + + if modifiedInBounds { + modifiedString = fmt.Sprintf("%v", modifiedScalars[modifiedIndex]) + } + + switch { + // scalars are identical + case bothInBounds && originalString == modifiedString: + originalIndex++ + modifiedIndex++ + // only modified is in bound + case !originalInBounds: + fallthrough + // modified has additional scalar + case bothInBounds && originalString > modifiedString: + if !ignoreChangesAndAdditions { + modifiedValue := modifiedScalars[modifiedIndex] + addList = append(addList, modifiedValue) + } + modifiedIndex++ + // only original is in bound + case !modifiedInBounds: + fallthrough + // original has additional scalar + case bothInBounds && originalString < modifiedString: + if !ignoreDeletions { + originalValue := originalScalars[originalIndex] + deletionList = append(deletionList, originalValue) + } + originalIndex++ + } + + originalInBounds = originalIndex < len(originalScalars) + modifiedInBounds = modifiedIndex < len(modifiedScalars) + bothInBounds = originalInBounds && modifiedInBounds + } + + return addList, deletionList, nil +} + +var errNoMergeKeyFmt = "map: %v does not contain declared merge key: %s" +var errBadArgTypeFmt = "expected a %s, but received a %s" + +// Returns a (recursive) strategic merge patch that yields modified when applied to original, +// for a pair of lists of maps with merge semantics. +func diffListsOfMaps(original, modified []interface{}, t reflect.Type, mergeKey string, ignoreChangesAndAdditions, ignoreDeletions bool) ([]interface{}, error) { + patch := make([]interface{}, 0) + + originalSorted, err := sortMergeListsByNameArray(original, t, mergeKey, false) + if err != nil { + return nil, err + } + + modifiedSorted, err := sortMergeListsByNameArray(modified, t, mergeKey, false) + if err != nil { + return nil, err + } + + originalIndex, modifiedIndex := 0, 0 + +loopB: + for ; modifiedIndex < len(modifiedSorted); modifiedIndex++ { + modifiedMap, ok := modifiedSorted[modifiedIndex].(map[string]interface{}) + if !ok { + t := reflect.TypeOf(modifiedSorted[modifiedIndex]) + return nil, fmt.Errorf(errBadArgTypeFmt, "map[string]interface{}", t.Kind().String()) + } + + modifiedValue, ok := modifiedMap[mergeKey] + if !ok { + return nil, fmt.Errorf(errNoMergeKeyFmt, modifiedMap, mergeKey) + } + + for ; originalIndex < len(originalSorted); originalIndex++ { + originalMap, ok := originalSorted[originalIndex].(map[string]interface{}) + if !ok { + t := reflect.TypeOf(originalSorted[originalIndex]) + return nil, fmt.Errorf(errBadArgTypeFmt, "map[string]interface{}", t.Kind().String()) + } + + originalValue, ok := originalMap[mergeKey] + if !ok { + return nil, fmt.Errorf(errNoMergeKeyFmt, originalMap, mergeKey) + } + + // Assume that the merge key values are comparable strings + originalString := fmt.Sprintf("%v", originalValue) + modifiedString := fmt.Sprintf("%v", modifiedValue) + if originalString >= modifiedString { + if originalString == modifiedString { + // Merge key values are equal, so recurse + patchValue, err := diffMaps(originalMap, modifiedMap, t, ignoreChangesAndAdditions, ignoreDeletions) + if err != nil { + return nil, err + } + + originalIndex++ + if len(patchValue) > 0 { + patchValue[mergeKey] = modifiedValue + patch = append(patch, patchValue) + } + } else if !ignoreChangesAndAdditions { + // Item was added, so add to patch + patch = append(patch, modifiedMap) + } + + continue loopB + } + + if !ignoreDeletions { + // Item was deleted, so add delete directive + patch = append(patch, map[string]interface{}{mergeKey: originalValue, directiveMarker: deleteDirective}) + } + } + + break + } + + if !ignoreDeletions { + // Delete any remaining items found only in original + for ; originalIndex < len(originalSorted); originalIndex++ { + originalMap, ok := originalSorted[originalIndex].(map[string]interface{}) + if !ok { + t := reflect.TypeOf(originalSorted[originalIndex]) + return nil, fmt.Errorf(errBadArgTypeFmt, "map[string]interface{}", t.Kind().String()) + } + + originalValue, ok := originalMap[mergeKey] + if !ok { + return nil, fmt.Errorf(errNoMergeKeyFmt, originalMap, mergeKey) + } + + patch = append(patch, map[string]interface{}{mergeKey: originalValue, directiveMarker: deleteDirective}) + } + } + + if !ignoreChangesAndAdditions { + // Add any remaining items found only in modified + for ; modifiedIndex < len(modifiedSorted); modifiedIndex++ { + patch = append(patch, modifiedSorted[modifiedIndex]) + } + } + + return patch, nil +} + +// StrategicMergePatch applies a strategic merge patch. The patch and the original document +// must be json encoded content. A patch can be created from an original and a modified document +// by calling CreateStrategicMergePatch. +func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) { + if original == nil { + original = []byte("{}") + } + + if patch == nil { + patch = []byte("{}") + } + + originalMap := map[string]interface{}{} + err := json.Unmarshal(original, &originalMap) + if err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + + patchMap := map[string]interface{}{} + err = json.Unmarshal(patch, &patchMap) + if err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + + result, err := StrategicMergeMapPatch(originalMap, patchMap, dataStruct) + if err != nil { + return nil, err + } + + return json.Marshal(result) +} + +// StrategicMergePatch applies a strategic merge patch. The original and patch documents +// must be JSONMap. A patch can be created from an original and modified document by +// calling CreateTwoWayMergeMapPatch. +func StrategicMergeMapPatch(original, patch JSONMap, dataStruct interface{}) (JSONMap, error) { + t, err := getTagStructType(dataStruct) + if err != nil { + return nil, err + } + return mergeMap(original, patch, t, true, true) +} + +func getTagStructType(dataStruct interface{}) (reflect.Type, error) { + if dataStruct == nil { + return nil, fmt.Errorf(errBadArgTypeFmt, "struct", "nil") + } + + t := reflect.TypeOf(dataStruct) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf(errBadArgTypeFmt, "struct", t.Kind().String()) + } + + return t, nil +} + +var errBadPatchTypeFmt = "unknown patch type: %s in map: %v" + +// Merge fields from a patch map into the original map. Note: This may modify +// both the original map and the patch because getting a deep copy of a map in +// golang is highly non-trivial. +// flag mergeDeleteList controls if using the parallel list to delete or keeping the list. +// If patch contains any null field (e.g. field_1: null) that is not +// present in original, then to propagate it to the end result use +// ignoreUnmatchedNulls == false. +func mergeMap(original, patch map[string]interface{}, t reflect.Type, mergeDeleteList, ignoreUnmatchedNulls bool) (map[string]interface{}, error) { + if v, ok := patch[directiveMarker]; ok { + if v == replaceDirective { + // If the patch contains "$patch: replace", don't merge it, just use the + // patch directly. Later on, we can add a single level replace that only + // affects the map that the $patch is in. + delete(patch, directiveMarker) + return patch, nil + } + + if v == deleteDirective { + // If the patch contains "$patch: delete", don't merge it, just return + // an empty map. + return map[string]interface{}{}, nil + } + + return nil, fmt.Errorf(errBadPatchTypeFmt, v, patch) + } + + // nil is an accepted value for original to simplify logic in other places. + // If original is nil, replace it with an empty map and then apply the patch. + if original == nil { + original = map[string]interface{}{} + } + + // Start merging the patch into the original. + for k, patchV := range patch { + // If found a parallel list for deletion and we are going to merge the list, + // overwrite the key to the original key and set flag isDeleteList + isDeleteList := false + foundParallelListPrefix := strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) + if foundParallelListPrefix { + if !mergeDeleteList { + original[k] = patchV + continue + } + substrings := strings.SplitN(k, "/", 2) + if len(substrings) <= 1 { + return nil, mergepatch.ErrBadPatchFormatForPrimitiveList + } + isDeleteList = true + k = substrings[1] + } + + // If the value of this key is null, delete the key if it exists in the + // original. Otherwise, check if we want to preserve it or skip it. + // Preserving the null value is useful when we want to send an explicit + // delete to the API server. + if patchV == nil { + if _, ok := original[k]; ok { + delete(original, k) + } + + if ignoreUnmatchedNulls { + continue + } + } + + _, ok := original[k] + if !ok { + // If it's not in the original document, just take the patch value. + original[k] = patchV + continue + } + + // If the data type is a pointer, resolve the element. + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + // If they're both maps or lists, recurse into the value. + originalType := reflect.TypeOf(original[k]) + patchType := reflect.TypeOf(patchV) + if originalType == patchType { + // First find the fieldPatchStrategy and fieldPatchMergeKey. + fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k) + if err != nil { + return nil, err + } + + if originalType.Kind() == reflect.Map && fieldPatchStrategy != replaceDirective { + typedOriginal := original[k].(map[string]interface{}) + typedPatch := patchV.(map[string]interface{}) + var err error + original[k], err = mergeMap(typedOriginal, typedPatch, fieldType, mergeDeleteList, ignoreUnmatchedNulls) + if err != nil { + return nil, err + } + + continue + } + + if originalType.Kind() == reflect.Slice && fieldPatchStrategy == mergeDirective { + elemType := fieldType.Elem() + typedOriginal := original[k].([]interface{}) + typedPatch := patchV.([]interface{}) + var err error + original[k], err = mergeSlice(typedOriginal, typedPatch, elemType, fieldPatchMergeKey, mergeDeleteList, isDeleteList, ignoreUnmatchedNulls) + if err != nil { + return nil, err + } + + continue + } + } + + // If originalType and patchType are different OR the types are both + // maps or slices but we're just supposed to replace them, just take + // the value from patch. + original[k] = patchV + } + + return original, nil +} + +// Merge two slices together. Note: This may modify both the original slice and +// the patch because getting a deep copy of a slice in golang is highly +// non-trivial. +func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey string, mergeDeleteList, isDeleteList, ignoreUnmatchedNulls bool) ([]interface{}, error) { + if len(original) == 0 && len(patch) == 0 { + return original, nil + } + + // All the values must be of the same type, but not a list. + t, err := sliceElementType(original, patch) + if err != nil { + return nil, err + } + + // If the elements are not maps, merge the slices of scalars. + if t.Kind() != reflect.Map { + if mergeDeleteList && isDeleteList { + return deleteFromSlice(original, patch), nil + } + // Maybe in the future add a "concat" mode that doesn't + // uniqify. + both := append(original, patch...) + return uniqifyScalars(both), nil + } + + if mergeKey == "" { + return nil, fmt.Errorf("cannot merge lists without merge key for type %s", elemType.Kind().String()) + } + + // First look for any special $patch elements. + patchWithoutSpecialElements := []interface{}{} + replace := false + for _, v := range patch { + typedV := v.(map[string]interface{}) + patchType, ok := typedV[directiveMarker] + if ok { + if patchType == deleteDirective { + mergeValue, ok := typedV[mergeKey] + if ok { + // delete all matching entries (based on merge key) from a merging list + for { + _, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + if err != nil { + return nil, err + } + + if !found { + break + } + // Delete the element at originalKey. + original = append(original[:originalKey], original[originalKey+1:]...) + } + } else { + return nil, fmt.Errorf("delete patch type with no merge key defined") + } + } else if patchType == replaceDirective { + replace = true + // Continue iterating through the array to prune any other $patch elements. + } else if patchType == mergeDirective { + return nil, fmt.Errorf("merging lists cannot yet be specified in the patch") + } else { + return nil, fmt.Errorf(errBadPatchTypeFmt, patchType, typedV) + } + } else { + patchWithoutSpecialElements = append(patchWithoutSpecialElements, v) + } + } + + if replace { + return patchWithoutSpecialElements, nil + } + + patch = patchWithoutSpecialElements + + // Merge patch into original. + for _, v := range patch { + // Because earlier we confirmed that all the elements are maps. + typedV := v.(map[string]interface{}) + mergeValue, ok := typedV[mergeKey] + if !ok { + return nil, fmt.Errorf(errNoMergeKeyFmt, typedV, mergeKey) + } + + // If we find a value with this merge key value in original, merge the + // maps. Otherwise append onto original. + originalMap, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + if err != nil { + return nil, err + } + + if found { + var mergedMaps interface{} + var err error + // Merge into original. + mergedMaps, err = mergeMap(originalMap, typedV, elemType, mergeDeleteList, ignoreUnmatchedNulls) + if err != nil { + return nil, err + } + + original[originalKey] = mergedMaps + } else { + original = append(original, v) + } + } + + return original, nil +} + +// deleteFromSlice uses the parallel list to delete the items in a list of scalars +func deleteFromSlice(current, toDelete []interface{}) []interface{} { + currentScalars := uniqifyAndSortScalars(current) + toDeleteScalars := uniqifyAndSortScalars(toDelete) + + currentIndex, toDeleteIndex := 0, 0 + mergedList := []interface{}{} + + for currentIndex < len(currentScalars) && toDeleteIndex < len(toDeleteScalars) { + originalString := fmt.Sprintf("%v", currentScalars[currentIndex]) + modifiedString := fmt.Sprintf("%v", toDeleteScalars[toDeleteIndex]) + + switch { + // found an item to delete + case originalString == modifiedString: + currentIndex++ + // Request to delete an item that was not found in the current list + case originalString > modifiedString: + toDeleteIndex++ + // Found an item that was not part of the deletion list, keep it + case originalString < modifiedString: + mergedList = append(mergedList, currentScalars[currentIndex]) + currentIndex++ + } + } + return append(mergedList, currentScalars[currentIndex:]...) +} + +// This method no longer panics if any element of the slice is not a map. +func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{}) (map[string]interface{}, int, bool, error) { + for k, v := range m { + typedV, ok := v.(map[string]interface{}) + if !ok { + return nil, 0, false, fmt.Errorf("value for key %v is not a map.", k) + } + + valueToMatch, ok := typedV[key] + if ok && valueToMatch == value { + return typedV, k, true, nil + } + } + + return nil, 0, false, nil +} + +// This function takes a JSON map and sorts all the lists that should be merged +// by key. This is needed by tests because in JSON, list order is significant, +// but in Strategic Merge Patch, merge lists do not have significant order. +// Sorting the lists allows for order-insensitive comparison of patched maps. +func sortMergeListsByName(mapJSON []byte, dataStruct interface{}) ([]byte, error) { + var m map[string]interface{} + err := json.Unmarshal(mapJSON, &m) + if err != nil { + return nil, err + } + + newM, err := sortMergeListsByNameMap(m, reflect.TypeOf(dataStruct)) + if err != nil { + return nil, err + } + + return json.Marshal(newM) +} + +// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in a map. +func sortMergeListsByNameMap(s map[string]interface{}, t reflect.Type) (map[string]interface{}, error) { + newS := map[string]interface{}{} + for k, v := range s { + if strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) { + typedV, ok := v.([]interface{}) + if !ok { + return nil, mergepatch.ErrBadPatchFormatForPrimitiveList + } + v = uniqifyAndSortScalars(typedV) + } else if k != directiveMarker { + fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k) + if err != nil { + return nil, err + } + + // If v is a map or a merge slice, recurse. + if typedV, ok := v.(map[string]interface{}); ok { + var err error + v, err = sortMergeListsByNameMap(typedV, fieldType) + if err != nil { + return nil, err + } + } else if typedV, ok := v.([]interface{}); ok { + if fieldPatchStrategy == mergeDirective { + var err error + v, err = sortMergeListsByNameArray(typedV, fieldType.Elem(), fieldPatchMergeKey, true) + if err != nil { + return nil, err + } + } + } + } + + newS[k] = v + } + + return newS, nil +} + +// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in an array. +func sortMergeListsByNameArray(s []interface{}, elemType reflect.Type, mergeKey string, recurse bool) ([]interface{}, error) { + if len(s) == 0 { + return s, nil + } + + // We don't support lists of lists yet. + t, err := sliceElementType(s) + if err != nil { + return nil, err + } + + // If the elements are not maps... + if t.Kind() != reflect.Map { + // Sort the elements, because they may have been merged out of order. + return uniqifyAndSortScalars(s), nil + } + + // Elements are maps - if one of the keys of the map is a map or a + // list, we may need to recurse into it. + newS := []interface{}{} + for _, elem := range s { + if recurse { + typedElem := elem.(map[string]interface{}) + newElem, err := sortMergeListsByNameMap(typedElem, elemType) + if err != nil { + return nil, err + } + + newS = append(newS, newElem) + } else { + newS = append(newS, elem) + } + } + + // Sort the maps. + newS = sortMapsBasedOnField(newS, mergeKey) + return newS, nil +} + +func sortMapsBasedOnField(m []interface{}, fieldName string) []interface{} { + mapM := mapSliceFromSlice(m) + ss := SortableSliceOfMaps{mapM, fieldName} + sort.Sort(ss) + newS := sliceFromMapSlice(ss.s) + return newS +} + +func mapSliceFromSlice(m []interface{}) []map[string]interface{} { + newM := []map[string]interface{}{} + for _, v := range m { + vt := v.(map[string]interface{}) + newM = append(newM, vt) + } + + return newM +} + +func sliceFromMapSlice(s []map[string]interface{}) []interface{} { + newS := []interface{}{} + for _, v := range s { + newS = append(newS, v) + } + + return newS +} + +type SortableSliceOfMaps struct { + s []map[string]interface{} + k string // key to sort on +} + +func (ss SortableSliceOfMaps) Len() int { + return len(ss.s) +} + +func (ss SortableSliceOfMaps) Less(i, j int) bool { + iStr := fmt.Sprintf("%v", ss.s[i][ss.k]) + jStr := fmt.Sprintf("%v", ss.s[j][ss.k]) + return sort.StringsAreSorted([]string{iStr, jStr}) +} + +func (ss SortableSliceOfMaps) Swap(i, j int) { + tmp := ss.s[i] + ss.s[i] = ss.s[j] + ss.s[j] = tmp +} + +func uniqifyAndSortScalars(s []interface{}) []interface{} { + s = uniqifyScalars(s) + return sortScalars(s) +} + +func sortScalars(s []interface{}) []interface{} { + ss := SortableSliceOfScalars{s} + sort.Sort(ss) + return ss.s +} + +func uniqifyScalars(s []interface{}) []interface{} { + // Clever algorithm to uniqify. + length := len(s) - 1 + for i := 0; i < length; i++ { + for j := i + 1; j <= length; j++ { + if s[i] == s[j] { + s[j] = s[length] + s = s[0:length] + length-- + j-- + } + } + } + + return s +} + +type SortableSliceOfScalars struct { + s []interface{} +} + +func (ss SortableSliceOfScalars) Len() int { + return len(ss.s) +} + +func (ss SortableSliceOfScalars) Less(i, j int) bool { + iStr := fmt.Sprintf("%v", ss.s[i]) + jStr := fmt.Sprintf("%v", ss.s[j]) + return sort.StringsAreSorted([]string{iStr, jStr}) +} + +func (ss SortableSliceOfScalars) Swap(i, j int) { + tmp := ss.s[i] + ss.s[i] = ss.s[j] + ss.s[j] = tmp +} + +// Returns the type of the elements of N slice(s). If the type is different, +// another slice or undefined, returns an error. +func sliceElementType(slices ...[]interface{}) (reflect.Type, error) { + var prevType reflect.Type + for _, s := range slices { + // Go through elements of all given slices and make sure they are all the same type. + for _, v := range s { + currentType := reflect.TypeOf(v) + if prevType == nil { + prevType = currentType + // We don't support lists of lists yet. + if prevType.Kind() == reflect.Slice { + return nil, mergepatch.ErrNoListOfLists + } + } else { + if prevType != currentType { + return nil, fmt.Errorf("list element types are not identical: %v", fmt.Sprint(slices)) + } + prevType = currentType + } + } + } + + if prevType == nil { + return nil, fmt.Errorf("no elements in any of the given slices") + } + + return prevType, nil +} + +// MergingMapsHaveConflicts returns true if the left and right JSON interface +// objects overlap with different values in any key. All keys are required to be +// strings. Since patches of the same Type have congruent keys, this is valid +// for multiple patch types. This method supports strategic merge patch semantics. +func MergingMapsHaveConflicts(left, right map[string]interface{}, dataStruct interface{}) (bool, error) { + t, err := getTagStructType(dataStruct) + if err != nil { + return true, err + } + + return mergingMapFieldsHaveConflicts(left, right, t, "", "") +} + +func mergingMapFieldsHaveConflicts( + left, right interface{}, + fieldType reflect.Type, + fieldPatchStrategy, fieldPatchMergeKey string, +) (bool, error) { + switch leftType := left.(type) { + case map[string]interface{}: + switch rightType := right.(type) { + case map[string]interface{}: + leftMarker, okLeft := leftType[directiveMarker] + rightMarker, okRight := rightType[directiveMarker] + // if one or the other has a directive marker, + // then we need to consider that before looking at the individual keys, + // since a directive operates on the whole map. + if okLeft || okRight { + // if one has a directive marker and the other doesn't, + // then we have a conflict, since one is deleting or replacing the whole map, + // and the other is doing things to individual keys. + if okLeft != okRight { + return true, nil + } + + // if they both have markers, but they are not the same directive, + // then we have a conflict because they're doing different things to the map. + if leftMarker != rightMarker { + return true, nil + } + } + + if fieldPatchStrategy == replaceDirective { + return false, nil + } + + // Check the individual keys. + return mapsHaveConflicts(leftType, rightType, fieldType) + default: + return true, nil + } + case []interface{}: + switch rightType := right.(type) { + case []interface{}: + return slicesHaveConflicts(leftType, rightType, fieldType, fieldPatchStrategy, fieldPatchMergeKey) + default: + return true, nil + } + case string, float64, bool, int, int64, nil: + return !reflect.DeepEqual(left, right), nil + default: + return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) + } +} + +func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType reflect.Type) (bool, error) { + for key, leftValue := range typedLeft { + if key != directiveMarker { + if rightValue, ok := typedRight[key]; ok { + fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(structType, key) + if err != nil { + return true, err + } + + if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, + fieldType, fieldPatchStrategy, fieldPatchMergeKey); hasConflicts { + return true, err + } + } + } + } + + return false, nil +} + +func slicesHaveConflicts( + typedLeft, typedRight []interface{}, + fieldType reflect.Type, + fieldPatchStrategy, fieldPatchMergeKey string, +) (bool, error) { + elementType, err := sliceElementType(typedLeft, typedRight) + if err != nil { + return true, err + } + + valueType := fieldType.Elem() + if fieldPatchStrategy == mergeDirective { + // Merging lists of scalars have no conflicts by definition + // So we only need to check further if the elements are maps + if elementType.Kind() != reflect.Map { + return false, nil + } + + // Build a map for each slice and then compare the two maps + leftMap, err := sliceOfMapsToMapOfMaps(typedLeft, fieldPatchMergeKey) + if err != nil { + return true, err + } + + rightMap, err := sliceOfMapsToMapOfMaps(typedRight, fieldPatchMergeKey) + if err != nil { + return true, err + } + + return mapsOfMapsHaveConflicts(leftMap, rightMap, valueType) + } + + // Either we don't have type information, or these are non-merging lists + if len(typedLeft) != len(typedRight) { + return true, nil + } + + // Sort scalar slices to prevent ordering issues + // We have no way to sort non-merging lists of maps + if elementType.Kind() != reflect.Map { + typedLeft = uniqifyAndSortScalars(typedLeft) + typedRight = uniqifyAndSortScalars(typedRight) + } + + // Compare the slices element by element in order + // This test will fail if the slices are not sorted + for i := range typedLeft { + if hasConflicts, err := mergingMapFieldsHaveConflicts(typedLeft[i], typedRight[i], valueType, "", ""); hasConflicts { + return true, err + } + } + + return false, nil +} + +func sliceOfMapsToMapOfMaps(slice []interface{}, mergeKey string) (map[string]interface{}, error) { + result := make(map[string]interface{}, len(slice)) + for _, value := range slice { + typedValue, ok := value.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("invalid element type in merging list:%v", slice) + } + + mergeValue, ok := typedValue[mergeKey] + if !ok { + return nil, fmt.Errorf("cannot find merge key `%s` in merging list element:%v", mergeKey, typedValue) + } + + result[fmt.Sprintf("%s", mergeValue)] = typedValue + } + + return result, nil +} + +func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType reflect.Type) (bool, error) { + for key, leftValue := range typedLeft { + if rightValue, ok := typedRight[key]; ok { + if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, structType, "", ""); hasConflicts { + return true, err + } + } + } + + return false, nil +} + +// CreateThreeWayMergePatch reconciles a modified configuration with an original configuration, +// while preserving any changes or deletions made to the original configuration in the interim, +// and not overridden by the current configuration. All three documents must be passed to the +// method as json encoded content. It will return a strategic merge patch, or an error if any +// of the documents is invalid, or if there are any preconditions that fail against the modified +// configuration, or, if overwrite is false and there are conflicts between the modified and current +// configurations. Conflicts are defined as keys changed differently from original to modified +// than from original to current. In other words, a conflict occurs if modified changes any key +// in a way that is different from how it is changed in current (e.g., deleting it, changing its +// value). We also propagate values fields that do not exist in original but are explicitly +// defined in modified. +func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct interface{}, overwrite bool, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + originalMap := map[string]interface{}{} + if len(original) > 0 { + if err := json.Unmarshal(original, &originalMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + modifiedMap := map[string]interface{}{} + if len(modified) > 0 { + if err := json.Unmarshal(modified, &modifiedMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + currentMap := map[string]interface{}{} + if len(current) > 0 { + if err := json.Unmarshal(current, ¤tMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + t, err := getTagStructType(dataStruct) + if err != nil { + return nil, err + } + + // The patch is the difference from current to modified without deletions, plus deletions + // from original to modified. To find it, we compute deletions, which are the deletions from + // original to modified, and delta, which is the difference from current to modified without + // deletions, and then apply delta to deletions as a patch, which should be strictly additive. + deltaMap, err := diffMaps(currentMap, modifiedMap, t, false, true) + if err != nil { + return nil, err + } + + deletionsMap, err := diffMaps(originalMap, modifiedMap, t, true, false) + if err != nil { + return nil, err + } + + patchMap, err := mergeMap(deletionsMap, deltaMap, t, false, false) + if err != nil { + return nil, err + } + + // Apply the preconditions to the patch, and return an error if any of them fail. + for _, fn := range fns { + if !fn(patchMap) { + return nil, mergepatch.NewErrPreconditionFailed(patchMap) + } + } + + // If overwrite is false, and the patch contains any keys that were changed differently, + // then return a conflict error. + if !overwrite { + changedMap, err := diffMaps(originalMap, currentMap, t, false, false) + if err != nil { + return nil, err + } + + hasConflicts, err := MergingMapsHaveConflicts(patchMap, changedMap, dataStruct) + if err != nil { + return nil, err + } + + if hasConflicts { + return nil, mergepatch.NewErrConflict(mergepatch.ToYAMLOrError(patchMap), mergepatch.ToYAMLOrError(changedMap)) + } + } + + return json.Marshal(patchMap) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/uuid/uuid.go b/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go similarity index 97% rename from vendor/k8s.io/kubernetes/pkg/util/uuid/uuid.go rename to vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go index 51f2689a68..bf478223d9 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/uuid/uuid.go +++ b/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go @@ -20,7 +20,8 @@ import ( "sync" "github.com/pborman/uuid" - "k8s.io/kubernetes/pkg/types" + + "k8s.io/apimachinery/pkg/types" ) var uuidLock sync.Mutex diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go new file mode 100644 index 0000000000..43c779a11b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -0,0 +1,254 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package field + +import ( + "fmt" + "reflect" + "strings" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Error is an implementation of the 'error' interface, which represents a +// field-level validation error. +type Error struct { + Type ErrorType + Field string + BadValue interface{} + Detail string +} + +var _ error = &Error{} + +// Error implements the error interface. +func (v *Error) Error() string { + return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody()) +} + +// ErrorBody returns the error message without the field name. This is useful +// for building nice-looking higher-level error reporting. +func (v *Error) ErrorBody() string { + var s string + switch v.Type { + case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal: + s = fmt.Sprintf("%s", v.Type) + default: + value := v.BadValue + valueType := reflect.TypeOf(value) + if value == nil || valueType == nil { + value = "null" + } else if valueType.Kind() == reflect.Ptr { + if reflectValue := reflect.ValueOf(value); reflectValue.IsNil() { + value = "null" + } else { + value = reflectValue.Elem().Interface() + } + } + switch t := value.(type) { + case int64, int32, float64, float32, bool: + // use simple printer for simple types + s = fmt.Sprintf("%s: %v", v.Type, value) + case string: + s = fmt.Sprintf("%s: %q", v.Type, t) + case fmt.Stringer: + // anything that defines String() is better than raw struct + s = fmt.Sprintf("%s: %s", v.Type, t.String()) + default: + // fallback to raw struct + // TODO: internal types have panic guards against json.Marshalling to prevent + // accidental use of internal types in external serialized form. For now, use + // %#v, although it would be better to show a more expressive output in the future + s = fmt.Sprintf("%s: %#v", v.Type, value) + } + } + if len(v.Detail) != 0 { + s += fmt.Sprintf(": %s", v.Detail) + } + return s +} + +// ErrorType is a machine readable value providing more detail about why +// a field is invalid. These values are expected to match 1-1 with +// CauseType in api/types.go. +type ErrorType string + +// TODO: These values are duplicated in api/types.go, but there's a circular dep. Fix it. +const ( + // ErrorTypeNotFound is used to report failure to find a requested value + // (e.g. looking up an ID). See NotFound(). + ErrorTypeNotFound ErrorType = "FieldValueNotFound" + // ErrorTypeRequired is used to report required values that are not + // provided (e.g. empty strings, null values, or empty arrays). See + // Required(). + ErrorTypeRequired ErrorType = "FieldValueRequired" + // ErrorTypeDuplicate is used to report collisions of values that must be + // unique (e.g. unique IDs). See Duplicate(). + ErrorTypeDuplicate ErrorType = "FieldValueDuplicate" + // ErrorTypeInvalid is used to report malformed values (e.g. failed regex + // match, too long, out of bounds). See Invalid(). + ErrorTypeInvalid ErrorType = "FieldValueInvalid" + // ErrorTypeNotSupported is used to report unknown values for enumerated + // fields (e.g. a list of valid values). See NotSupported(). + ErrorTypeNotSupported ErrorType = "FieldValueNotSupported" + // ErrorTypeForbidden is used to report valid (as per formatting rules) + // values which would be accepted under some conditions, but which are not + // permitted by the current conditions (such as security policy). See + // Forbidden(). + ErrorTypeForbidden ErrorType = "FieldValueForbidden" + // ErrorTypeTooLong is used to report that the given value is too long. + // This is similar to ErrorTypeInvalid, but the error will not include the + // too-long value. See TooLong(). + ErrorTypeTooLong ErrorType = "FieldValueTooLong" + // ErrorTypeInternal is used to report other errors that are not related + // to user input. See InternalError(). + ErrorTypeInternal ErrorType = "InternalError" +) + +// String converts a ErrorType into its corresponding canonical error message. +func (t ErrorType) String() string { + switch t { + case ErrorTypeNotFound: + return "Not found" + case ErrorTypeRequired: + return "Required value" + case ErrorTypeDuplicate: + return "Duplicate value" + case ErrorTypeInvalid: + return "Invalid value" + case ErrorTypeNotSupported: + return "Unsupported value" + case ErrorTypeForbidden: + return "Forbidden" + case ErrorTypeTooLong: + return "Too long" + case ErrorTypeInternal: + return "Internal error" + default: + panic(fmt.Sprintf("unrecognized validation error: %q", string(t))) + } +} + +// NotFound returns a *Error indicating "value not found". This is +// used to report failure to find a requested value (e.g. looking up an ID). +func NotFound(field *Path, value interface{}) *Error { + return &Error{ErrorTypeNotFound, field.String(), value, ""} +} + +// Required returns a *Error indicating "value required". This is used +// to report required values that are not provided (e.g. empty strings, null +// values, or empty arrays). +func Required(field *Path, detail string) *Error { + return &Error{ErrorTypeRequired, field.String(), "", detail} +} + +// Duplicate returns a *Error indicating "duplicate value". This is +// used to report collisions of values that must be unique (e.g. names or IDs). +func Duplicate(field *Path, value interface{}) *Error { + return &Error{ErrorTypeDuplicate, field.String(), value, ""} +} + +// Invalid returns a *Error indicating "invalid value". This is used +// to report malformed values (e.g. failed regex match, too long, out of bounds). +func Invalid(field *Path, value interface{}, detail string) *Error { + return &Error{ErrorTypeInvalid, field.String(), value, detail} +} + +// NotSupported returns a *Error indicating "unsupported value". +// This is used to report unknown values for enumerated fields (e.g. a list of +// valid values). +func NotSupported(field *Path, value interface{}, validValues []string) *Error { + detail := "" + if validValues != nil && len(validValues) > 0 { + detail = "supported values: " + strings.Join(validValues, ", ") + } + return &Error{ErrorTypeNotSupported, field.String(), value, detail} +} + +// Forbidden returns a *Error indicating "forbidden". This is used to +// report valid (as per formatting rules) values which would be accepted under +// some conditions, but which are not permitted by current conditions (e.g. +// security policy). +func Forbidden(field *Path, detail string) *Error { + return &Error{ErrorTypeForbidden, field.String(), "", detail} +} + +// TooLong returns a *Error indicating "too long". This is used to +// report that the given value is too long. This is similar to +// Invalid, but the returned error will not include the too-long +// value. +func TooLong(field *Path, value interface{}, maxLength int) *Error { + return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d characters", maxLength)} +} + +// InternalError returns a *Error indicating "internal error". This is used +// to signal that an error was found that was not directly related to user +// input. The err argument must be non-nil. +func InternalError(field *Path, err error) *Error { + return &Error{ErrorTypeInternal, field.String(), nil, err.Error()} +} + +// ErrorList holds a set of Errors. It is plausible that we might one day have +// non-field errors in this same umbrella package, but for now we don't, so +// we can keep it simple and leave ErrorList here. +type ErrorList []*Error + +// NewErrorTypeMatcher returns an errors.Matcher that returns true +// if the provided error is a Error and has the provided ErrorType. +func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher { + return func(err error) bool { + if e, ok := err.(*Error); ok { + return e.Type == t + } + return false + } +} + +// ToAggregate converts the ErrorList into an errors.Aggregate. +func (list ErrorList) ToAggregate() utilerrors.Aggregate { + errs := make([]error, 0, len(list)) + errorMsgs := sets.NewString() + for _, err := range list { + msg := fmt.Sprintf("%v", err) + if errorMsgs.Has(msg) { + continue + } + errorMsgs.Insert(msg) + errs = append(errs, err) + } + return utilerrors.NewAggregate(errs) +} + +func fromAggregate(agg utilerrors.Aggregate) ErrorList { + errs := agg.Errors() + list := make(ErrorList, len(errs)) + for i := range errs { + list[i] = errs[i].(*Error) + } + return list +} + +// Filter removes items from the ErrorList that match the provided fns. +func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList { + err := utilerrors.FilterOut(list.ToAggregate(), fns...) + if err == nil { + return nil + } + // FilterOut takes an Aggregate and returns an Aggregate + return fromAggregate(err.(utilerrors.Aggregate)) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/validation/field/path.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/util/validation/field/path.go rename to vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go new file mode 100644 index 0000000000..a0afc26e73 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -0,0 +1,343 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "math" + "net" + "regexp" + "strings" +) + +const qnameCharFmt string = "[A-Za-z0-9]" +const qnameExtCharFmt string = "[-A-Za-z0-9_.]" +const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt +const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" +const qualifiedNameMaxLength int = 63 + +var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$") + +// IsQualifiedName tests whether the value passed is what Kubernetes calls a +// "qualified name". This is a format used in various places throughout the +// system. If the value is not valid, a list of error strings is returned. +// Otherwise an empty list (or nil) is returned. +func IsQualifiedName(value string) []string { + var errs []string + parts := strings.Split(value, "/") + var name string + switch len(parts) { + case 1: + name = parts[0] + case 2: + var prefix string + prefix, name = parts[0], parts[1] + if len(prefix) == 0 { + errs = append(errs, "prefix part "+EmptyError()) + } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 { + errs = append(errs, prefixEach(msgs, "prefix part ")...) + } + default: + return append(errs, "a qualified name "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")+ + " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')") + } + + if len(name) == 0 { + errs = append(errs, "name part "+EmptyError()) + } else if len(name) > qualifiedNameMaxLength { + errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength)) + } + if !qualifiedNameRegexp.MatchString(name) { + errs = append(errs, "name part "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")) + } + return errs +} + +const labelValueFmt string = "(" + qualifiedNameFmt + ")?" +const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" +const LabelValueMaxLength int = 63 + +var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$") + +// IsValidLabelValue tests whether the value passed is a valid label value. If +// the value is not valid, a list of error strings is returned. Otherwise an +// empty list (or nil) is returned. +func IsValidLabelValue(value string) []string { + var errs []string + if len(value) > LabelValueMaxLength { + errs = append(errs, MaxLenError(LabelValueMaxLength)) + } + if !labelValueRegexp.MatchString(value) { + errs = append(errs, RegexError(labelValueErrMsg, labelValueFmt, "MyValue", "my_value", "12345")) + } + return errs +} + +const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" +const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" +const DNS1123LabelMaxLength int = 63 + +var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$") + +// IsDNS1123Label tests for a string that conforms to the definition of a label in +// DNS (RFC 1123). +func IsDNS1123Label(value string) []string { + var errs []string + if len(value) > DNS1123LabelMaxLength { + errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) + } + if !dns1123LabelRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + } + return errs +} + +const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" +const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" +const DNS1123SubdomainMaxLength int = 253 + +var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") + +// IsDNS1123Subdomain tests for a string that conforms to the definition of a +// subdomain in DNS (RFC 1123). +func IsDNS1123Subdomain(value string) []string { + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !dns1123SubdomainRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1123SubdomainErrorMsg, dns1123SubdomainFmt, "example.com")) + } + return errs +} + +const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" +const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" +const DNS1035LabelMaxLength int = 63 + +var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$") + +// IsDNS1035Label tests for a string that conforms to the definition of a label in +// DNS (RFC 1035). +func IsDNS1035Label(value string) []string { + var errs []string + if len(value) > DNS1035LabelMaxLength { + errs = append(errs, MaxLenError(DNS1035LabelMaxLength)) + } + if !dns1035LabelRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1035LabelErrMsg, dns1035LabelFmt, "my-name", "abc-123")) + } + return errs +} + +// wildcard definition - RFC 1034 section 4.3.3. +// examples: +// - valid: *.bar.com, *.foo.bar.com +// - invalid: *.*.bar.com, *.foo.*.com, *bar.com, f*.bar.com, * +const wildcardDNS1123SubdomainFmt = "\\*\\." + dns1123SubdomainFmt +const wildcardDNS1123SubdomainErrMsg = "a wildcard DNS-1123 subdomain must start with '*.', followed by a valid DNS subdomain, which must consist of lower case alphanumeric characters, '-' or '.' and end with an alphanumeric character" + +// IsWildcardDNS1123Subdomain tests for a string that conforms to the definition of a +// wildcard subdomain in DNS (RFC 1034 section 4.3.3). +func IsWildcardDNS1123Subdomain(value string) []string { + wildcardDNS1123SubdomainRegexp := regexp.MustCompile("^" + wildcardDNS1123SubdomainFmt + "$") + + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !wildcardDNS1123SubdomainRegexp.MatchString(value) { + errs = append(errs, RegexError(wildcardDNS1123SubdomainErrMsg, wildcardDNS1123SubdomainFmt, "*.example.com")) + } + return errs +} + +const cIdentifierFmt string = "[A-Za-z_][A-Za-z0-9_]*" +const identifierErrMsg string = "a valid C identifier must start with alphabetic character or '_', followed by a string of alphanumeric characters or '_'" + +var cIdentifierRegexp = regexp.MustCompile("^" + cIdentifierFmt + "$") + +// IsCIdentifier tests for a string that conforms the definition of an identifier +// in C. This checks the format, but not the length. +func IsCIdentifier(value string) []string { + if !cIdentifierRegexp.MatchString(value) { + return []string{RegexError(identifierErrMsg, cIdentifierFmt, "my_name", "MY_NAME", "MyName")} + } + return nil +} + +// IsValidPortNum tests that the argument is a valid, non-zero port number. +func IsValidPortNum(port int) []string { + if 1 <= port && port <= 65535 { + return nil + } + return []string{InclusiveRangeError(1, 65535)} +} + +// Now in libcontainer UID/GID limits is 0 ~ 1<<31 - 1 +// TODO: once we have a type for UID/GID we should make these that type. +const ( + minUserID = 0 + maxUserID = math.MaxInt32 + minGroupID = 0 + maxGroupID = math.MaxInt32 +) + +// IsValidGroupId tests that the argument is a valid Unix GID. +func IsValidGroupId(gid int64) []string { + if minGroupID <= gid && gid <= maxGroupID { + return nil + } + return []string{InclusiveRangeError(minGroupID, maxGroupID)} +} + +// IsValidUserId tests that the argument is a valid Unix UID. +func IsValidUserId(uid int64) []string { + if minUserID <= uid && uid <= maxUserID { + return nil + } + return []string{InclusiveRangeError(minUserID, maxUserID)} +} + +var portNameCharsetRegex = regexp.MustCompile("^[-a-z0-9]+$") +var portNameOneLetterRegexp = regexp.MustCompile("[a-z]") + +// IsValidPortName check that the argument is valid syntax. It must be +// non-empty and no more than 15 characters long. It may contain only [-a-z0-9] +// and must contain at least one letter [a-z]. It must not start or end with a +// hyphen, nor contain adjacent hyphens. +// +// Note: We only allow lower-case characters, even though RFC 6335 is case +// insensitive. +func IsValidPortName(port string) []string { + var errs []string + if len(port) > 15 { + errs = append(errs, MaxLenError(15)) + } + if !portNameCharsetRegex.MatchString(port) { + errs = append(errs, "must contain only alpha-numeric characters (a-z, 0-9), and hyphens (-)") + } + if !portNameOneLetterRegexp.MatchString(port) { + errs = append(errs, "must contain at least one letter or number (a-z, 0-9)") + } + if strings.Contains(port, "--") { + errs = append(errs, "must not contain consecutive hyphens") + } + if len(port) > 0 && (port[0] == '-' || port[len(port)-1] == '-') { + errs = append(errs, "must not begin or end with a hyphen") + } + return errs +} + +// IsValidIP tests that the argument is a valid IP address. +func IsValidIP(value string) []string { + if net.ParseIP(value) == nil { + return []string{"must be a valid IP address, (e.g. 10.9.8.7)"} + } + return nil +} + +const percentFmt string = "[0-9]+%" +const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'" + +var percentRegexp = regexp.MustCompile("^" + percentFmt + "$") + +func IsValidPercent(percent string) []string { + if !percentRegexp.MatchString(percent) { + return []string{RegexError(percentErrMsg, percentFmt, "1%", "93%")} + } + return nil +} + +const httpHeaderNameFmt string = "[-A-Za-z0-9]+" +const httpHeaderNameErrMsg string = "a valid HTTP header must consist of alphanumeric characters or '-'" + +var httpHeaderNameRegexp = regexp.MustCompile("^" + httpHeaderNameFmt + "$") + +// IsHTTPHeaderName checks that a string conforms to the Go HTTP library's +// definition of a valid header field name (a stricter subset than RFC7230). +func IsHTTPHeaderName(value string) []string { + if !httpHeaderNameRegexp.MatchString(value) { + return []string{RegexError(httpHeaderNameErrMsg, httpHeaderNameFmt, "X-Header-Name")} + } + return nil +} + +const configMapKeyFmt = `[-._a-zA-Z0-9]+` +const configMapKeyErrMsg string = "a valid config key must consist of alphanumeric characters, '-', '_' or '.'" + +var configMapKeyRegexp = regexp.MustCompile("^" + configMapKeyFmt + "$") + +// IsConfigMapKey tests for a string that is a valid key for a ConfigMap or Secret +func IsConfigMapKey(value string) []string { + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !configMapKeyRegexp.MatchString(value) { + errs = append(errs, RegexError(configMapKeyErrMsg, configMapKeyFmt, "key.name", "KEY_NAME", "key-name")) + } + if value == "." { + errs = append(errs, `must not be '.'`) + } else if value == ".." { + errs = append(errs, `must not be '..'`) + } else if strings.HasPrefix(value, "..") { + errs = append(errs, `must not start with '..'`) + } + return errs +} + +// MaxLenError returns a string explanation of a "string too long" validation +// failure. +func MaxLenError(length int) string { + return fmt.Sprintf("must be no more than %d characters", length) +} + +// RegexError returns a string explanation of a regex validation failure. +func RegexError(msg string, fmt string, examples ...string) string { + if len(examples) == 0 { + return msg + " (regex used for validation is '" + fmt + "')" + } + msg += " (e.g. " + for i := range examples { + if i > 0 { + msg += " or " + } + msg += "'" + examples[i] + "', " + } + msg += "regex used for validation is '" + fmt + "')" + return msg +} + +// EmptyError returns a string explanation of a "must not be empty" validation +// failure. +func EmptyError() string { + return "must be non-empty" +} + +func prefixEach(msgs []string, prefix string) []string { + for i := range msgs { + msgs[i] = prefix + msgs[i] + } + return msgs +} + +// InclusiveRangeError returns a string explanation of a numeric "must be +// between" validation failure. +func InclusiveRangeError(lo, hi int) string { + return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/wait/doc.go b/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/util/wait/doc.go rename to vendor/k8s.io/apimachinery/pkg/util/wait/doc.go diff --git a/vendor/k8s.io/kubernetes/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/util/wait/wait.go rename to vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index 34ed2301da..4704afd91b 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -21,7 +21,7 @@ import ( "math/rand" "time" - "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/runtime" ) // For any test of the style: diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go new file mode 100644 index 0000000000..6ebfaea707 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -0,0 +1,346 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yaml + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "strings" + "unicode" + + "github.com/ghodss/yaml" + "github.com/golang/glog" +) + +// ToJSON converts a single YAML document into a JSON document +// or returns an error. If the document appears to be JSON the +// YAML decoding path is not used (so that error messages are +// JSON specific). +func ToJSON(data []byte) ([]byte, error) { + if hasJSONPrefix(data) { + return data, nil + } + return yaml.YAMLToJSON(data) +} + +// YAMLToJSONDecoder decodes YAML documents from an io.Reader by +// separating individual documents. It first converts the YAML +// body to JSON, then unmarshals the JSON. +type YAMLToJSONDecoder struct { + reader Reader +} + +// NewYAMLToJSONDecoder decodes YAML documents from the provided +// stream in chunks by converting each document (as defined by +// the YAML spec) into its own chunk, converting it to JSON via +// yaml.YAMLToJSON, and then passing it to json.Decoder. +func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder { + reader := bufio.NewReader(r) + return &YAMLToJSONDecoder{ + reader: NewYAMLReader(reader), + } +} + +// Decode reads a YAML document as JSON from the stream or returns +// an error. The decoding rules match json.Unmarshal, not +// yaml.Unmarshal. +func (d *YAMLToJSONDecoder) Decode(into interface{}) error { + bytes, err := d.reader.Read() + if err != nil && err != io.EOF { + return err + } + + if len(bytes) != 0 { + err := yaml.Unmarshal(bytes, into) + if err != nil { + return YAMLSyntaxError{err} + } + } + return err +} + +// YAMLDecoder reads chunks of objects and returns ErrShortBuffer if +// the data is not sufficient. +type YAMLDecoder struct { + r io.ReadCloser + scanner *bufio.Scanner + remaining []byte +} + +// NewDocumentDecoder decodes YAML documents from the provided +// stream in chunks by converting each document (as defined by +// the YAML spec) into its own chunk. io.ErrShortBuffer will be +// returned if the entire buffer could not be read to assist +// the caller in framing the chunk. +func NewDocumentDecoder(r io.ReadCloser) io.ReadCloser { + scanner := bufio.NewScanner(r) + scanner.Split(splitYAMLDocument) + return &YAMLDecoder{ + r: r, + scanner: scanner, + } +} + +// Read reads the previous slice into the buffer, or attempts to read +// the next chunk. +// TODO: switch to readline approach. +func (d *YAMLDecoder) Read(data []byte) (n int, err error) { + left := len(d.remaining) + if left == 0 { + // return the next chunk from the stream + if !d.scanner.Scan() { + err := d.scanner.Err() + if err == nil { + err = io.EOF + } + return 0, err + } + out := d.scanner.Bytes() + d.remaining = out + left = len(out) + } + + // fits within data + if left <= len(data) { + copy(data, d.remaining) + d.remaining = nil + return len(d.remaining), nil + } + + // caller will need to reread + copy(data, d.remaining[:left]) + d.remaining = d.remaining[left:] + return len(data), io.ErrShortBuffer +} + +func (d *YAMLDecoder) Close() error { + return d.r.Close() +} + +const yamlSeparator = "\n---" +const separator = "---" + +// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents. +func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + sep := len([]byte(yamlSeparator)) + if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 { + // We have a potential document terminator + i += sep + after := data[i:] + if len(after) == 0 { + // we can't read any more characters + if atEOF { + return len(data), data[:len(data)-sep], nil + } + return 0, nil, nil + } + if j := bytes.IndexByte(after, '\n'); j >= 0 { + return i + j + 1, data[0 : i-sep], nil + } + return 0, nil, nil + } + // If we're at EOF, we have a final, non-terminated line. Return it. + if atEOF { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil +} + +// decoder is a convenience interface for Decode. +type decoder interface { + Decode(into interface{}) error +} + +// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or +// YAML documents by sniffing for a leading { character. +type YAMLOrJSONDecoder struct { + r io.Reader + bufferSize int + + decoder decoder + rawData []byte +} + +type JSONSyntaxError struct { + Line int + Err error +} + +func (e JSONSyntaxError) Error() string { + return fmt.Sprintf("json: line %d: %s", e.Line, e.Err.Error()) +} + +type YAMLSyntaxError struct { + err error +} + +func (e YAMLSyntaxError) Error() string { + return e.err.Error() +} + +// NewYAMLOrJSONDecoder returns a decoder that will process YAML documents +// or JSON documents from the given reader as a stream. bufferSize determines +// how far into the stream the decoder will look to figure out whether this +// is a JSON stream (has whitespace followed by an open brace). +func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder { + return &YAMLOrJSONDecoder{ + r: r, + bufferSize: bufferSize, + } +} + +// Decode unmarshals the next object from the underlying stream into the +// provide object, or returns an error. +func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { + if d.decoder == nil { + buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize) + if isJSON { + glog.V(4).Infof("decoding stream as JSON") + d.decoder = json.NewDecoder(buffer) + d.rawData = origData + } else { + glog.V(4).Infof("decoding stream as YAML") + d.decoder = NewYAMLToJSONDecoder(buffer) + } + } + err := d.decoder.Decode(into) + if jsonDecoder, ok := d.decoder.(*json.Decoder); ok { + if syntax, ok := err.(*json.SyntaxError); ok { + data, readErr := ioutil.ReadAll(jsonDecoder.Buffered()) + if readErr != nil { + glog.V(4).Infof("reading stream failed: %v", readErr) + } + js := string(data) + + // if contents from io.Reader are not complete, + // use the original raw data to prevent panic + if int64(len(js)) <= syntax.Offset { + js = string(d.rawData) + } + + start := strings.LastIndex(js[:syntax.Offset], "\n") + 1 + line := strings.Count(js[:start], "\n") + return JSONSyntaxError{ + Line: line, + Err: fmt.Errorf(syntax.Error()), + } + } + } + return err +} + +type Reader interface { + Read() ([]byte, error) +} + +type YAMLReader struct { + reader Reader +} + +func NewYAMLReader(r *bufio.Reader) *YAMLReader { + return &YAMLReader{ + reader: &LineReader{reader: r}, + } +} + +// Read returns a full YAML document. +func (r *YAMLReader) Read() ([]byte, error) { + var buffer bytes.Buffer + for { + line, err := r.reader.Read() + if err != nil && err != io.EOF { + return nil, err + } + + sep := len([]byte(separator)) + if i := bytes.Index(line, []byte(separator)); i == 0 { + // We have a potential document terminator + i += sep + after := line[i:] + if len(strings.TrimRightFunc(string(after), unicode.IsSpace)) == 0 { + if buffer.Len() != 0 { + return buffer.Bytes(), nil + } + if err == io.EOF { + return nil, err + } + } + } + if err == io.EOF { + if buffer.Len() != 0 { + // If we're at EOF, we have a final, non-terminated line. Return it. + return buffer.Bytes(), nil + } + return nil, err + } + buffer.Write(line) + } +} + +type LineReader struct { + reader *bufio.Reader +} + +// Read returns a single line (with '\n' ended) from the underlying reader. +// An error is returned iff there is an error with the underlying reader. +func (r *LineReader) Read() ([]byte, error) { + var ( + isPrefix bool = true + err error = nil + line []byte + buffer bytes.Buffer + ) + + for isPrefix && err == nil { + line, isPrefix, err = r.reader.ReadLine() + buffer.Write(line) + } + buffer.WriteByte('\n') + return buffer.Bytes(), err +} + +// GuessJSONStream scans the provided reader up to size, looking +// for an open brace indicating this is JSON. It will return the +// bufio.Reader it creates for the consumer. +func GuessJSONStream(r io.Reader, size int) (io.Reader, []byte, bool) { + buffer := bufio.NewReaderSize(r, size) + b, _ := buffer.Peek(size) + return buffer, b, hasJSONPrefix(b) +} + +var jsonPrefix = []byte("{") + +// hasJSONPrefix returns true if the provided buffer appears to start with +// a JSON open brace. +func hasJSONPrefix(buf []byte) bool { + return hasPrefix(buf, jsonPrefix) +} + +// Return true if the first non-whitespace bytes in buf is +// prefix. +func hasPrefix(buf []byte, prefix []byte) bool { + trim := bytes.TrimLeftFunc(buf, unicode.IsSpace) + return bytes.HasPrefix(trim, prefix) +} diff --git a/vendor/k8s.io/apimachinery/pkg/version/doc.go b/vendor/k8s.io/apimachinery/pkg/version/doc.go new file mode 100644 index 0000000000..00431d4899 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/version/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package version supplies the type for version information collected at build time. +// +k8s:openapi-gen=true +package version diff --git a/vendor/k8s.io/apimachinery/pkg/version/types.go b/vendor/k8s.io/apimachinery/pkg/version/types.go new file mode 100644 index 0000000000..72727b503b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/version/types.go @@ -0,0 +1,37 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +// Info contains versioning information. +// TODO: Add []string of api versions supported? It's still unclear +// how we'll want to distribute that information. +type Info struct { + Major string `json:"major"` + Minor string `json:"minor"` + GitVersion string `json:"gitVersion"` + GitCommit string `json:"gitCommit"` + GitTreeState string `json:"gitTreeState"` + BuildDate string `json:"buildDate"` + GoVersion string `json:"goVersion"` + Compiler string `json:"compiler"` + Platform string `json:"platform"` +} + +// String returns info as a human-friendly version string. +func (info Info) String() string { + return info.GitVersion +} diff --git a/vendor/k8s.io/kubernetes/pkg/watch/doc.go b/vendor/k8s.io/apimachinery/pkg/watch/doc.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/watch/doc.go rename to vendor/k8s.io/apimachinery/pkg/watch/doc.go diff --git a/vendor/k8s.io/kubernetes/pkg/watch/filter.go b/vendor/k8s.io/apimachinery/pkg/watch/filter.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/watch/filter.go rename to vendor/k8s.io/apimachinery/pkg/watch/filter.go diff --git a/vendor/k8s.io/kubernetes/pkg/watch/mux.go b/vendor/k8s.io/apimachinery/pkg/watch/mux.go similarity index 97% rename from vendor/k8s.io/kubernetes/pkg/watch/mux.go rename to vendor/k8s.io/apimachinery/pkg/watch/mux.go index ec6de050e5..fafccd78ef 100644 --- a/vendor/k8s.io/kubernetes/pkg/watch/mux.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/mux.go @@ -19,8 +19,8 @@ package watch import ( "sync" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // FullChannelBehavior controls how the Broadcaster reacts if a watcher's watch @@ -81,8 +81,8 @@ const internalRunFunctionMarker = "internal-do-function" // a function type we can shoehorn into the queue. type functionFakeRuntimeObject func() -func (obj functionFakeRuntimeObject) GetObjectKind() unversioned.ObjectKind { - return unversioned.EmptyObjectKind +func (obj functionFakeRuntimeObject) GetObjectKind() schema.ObjectKind { + return schema.EmptyObjectKind } // Execute f, blocking the incoming queue (and waiting for it to drain first). diff --git a/vendor/k8s.io/kubernetes/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go similarity index 96% rename from vendor/k8s.io/kubernetes/pkg/watch/streamwatcher.go rename to vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go index 26cf61d052..93bb1cdf7f 100644 --- a/vendor/k8s.io/kubernetes/pkg/watch/streamwatcher.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -21,9 +21,9 @@ import ( "sync" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/net" - utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/net" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) // Decoder allows StreamWatcher to watch any stream for which a Decoder can be written. diff --git a/vendor/k8s.io/kubernetes/pkg/watch/until.go b/vendor/k8s.io/apimachinery/pkg/watch/until.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/watch/until.go rename to vendor/k8s.io/apimachinery/pkg/watch/until.go index 17970b021d..6e139de590 100644 --- a/vendor/k8s.io/kubernetes/pkg/watch/until.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/until.go @@ -17,9 +17,10 @@ limitations under the License. package watch import ( + "errors" "time" - "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/apimachinery/pkg/util/wait" ) // ConditionFunc returns true if the condition has been reached, false if it has not been reached yet, @@ -28,6 +29,9 @@ import ( // from false to true). type ConditionFunc func(event Event) (bool, error) +// errWatchClosed is returned when the watch channel is closed before timeout in Until. +var errWatchClosed = errors.New("watch closed before Until timeout") + // Until reads items from the watch until each provided condition succeeds, and then returns the last watch // encountered. The first condition that returns an error terminates the watch (and the event is also returned). // If no event has been received, the returned event will be nil. @@ -61,7 +65,7 @@ func Until(timeout time.Duration, watcher Interface, conditions ...ConditionFunc select { case event, ok := <-ch: if !ok { - return lastEvent, wait.ErrWaitTimeout + return lastEvent, errWatchClosed } lastEvent = &event diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go new file mode 100644 index 0000000000..dd49c41f9a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -0,0 +1,269 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/runtime" + + "github.com/golang/glog" +) + +// Interface can be implemented by anything that knows how to watch and report changes. +type Interface interface { + // Stops watching. Will close the channel returned by ResultChan(). Releases + // any resources used by the watch. + Stop() + + // Returns a chan which will receive all the events. If an error occurs + // or Stop() is called, this channel will be closed, in which case the + // watch should be completely cleaned up. + ResultChan() <-chan Event +} + +// EventType defines the possible types of events. +type EventType string + +const ( + Added EventType = "ADDED" + Modified EventType = "MODIFIED" + Deleted EventType = "DELETED" + Error EventType = "ERROR" + + DefaultChanSize int32 = 100 +) + +// Event represents a single event to a watched resource. +type Event struct { + Type EventType + + // Object is: + // * If Type is Added or Modified: the new state of the object. + // * If Type is Deleted: the state of the object immediately before deletion. + // * If Type is Error: *api.Status is recommended; other types may make sense + // depending on context. + Object runtime.Object +} + +type emptyWatch chan Event + +// NewEmptyWatch returns a watch interface that returns no results and is closed. +// May be used in certain error conditions where no information is available but +// an error is not warranted. +func NewEmptyWatch() Interface { + ch := make(chan Event) + close(ch) + return emptyWatch(ch) +} + +// Stop implements Interface +func (w emptyWatch) Stop() { +} + +// ResultChan implements Interface +func (w emptyWatch) ResultChan() <-chan Event { + return chan Event(w) +} + +// FakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. +type FakeWatcher struct { + result chan Event + Stopped bool + sync.Mutex +} + +func NewFake() *FakeWatcher { + return &FakeWatcher{ + result: make(chan Event), + } +} + +func NewFakeWithChanSize(size int, blocking bool) *FakeWatcher { + return &FakeWatcher{ + result: make(chan Event, size), + } +} + +// Stop implements Interface.Stop(). +func (f *FakeWatcher) Stop() { + f.Lock() + defer f.Unlock() + if !f.Stopped { + glog.V(4).Infof("Stopping fake watcher.") + close(f.result) + f.Stopped = true + } +} + +func (f *FakeWatcher) IsStopped() bool { + f.Lock() + defer f.Unlock() + return f.Stopped +} + +// Reset prepares the watcher to be reused. +func (f *FakeWatcher) Reset() { + f.Lock() + defer f.Unlock() + f.Stopped = false + f.result = make(chan Event) +} + +func (f *FakeWatcher) ResultChan() <-chan Event { + return f.result +} + +// Add sends an add event. +func (f *FakeWatcher) Add(obj runtime.Object) { + f.result <- Event{Added, obj} +} + +// Modify sends a modify event. +func (f *FakeWatcher) Modify(obj runtime.Object) { + f.result <- Event{Modified, obj} +} + +// Delete sends a delete event. +func (f *FakeWatcher) Delete(lastValue runtime.Object) { + f.result <- Event{Deleted, lastValue} +} + +// Error sends an Error event. +func (f *FakeWatcher) Error(errValue runtime.Object) { + f.result <- Event{Error, errValue} +} + +// Action sends an event of the requested type, for table-based testing. +func (f *FakeWatcher) Action(action EventType, obj runtime.Object) { + f.result <- Event{action, obj} +} + +// RaceFreeFakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. +type RaceFreeFakeWatcher struct { + result chan Event + Stopped bool + sync.Mutex +} + +func NewRaceFreeFake() *RaceFreeFakeWatcher { + return &RaceFreeFakeWatcher{ + result: make(chan Event, DefaultChanSize), + } +} + +// Stop implements Interface.Stop(). +func (f *RaceFreeFakeWatcher) Stop() { + f.Lock() + defer f.Unlock() + if !f.Stopped { + glog.V(4).Infof("Stopping fake watcher.") + close(f.result) + f.Stopped = true + } +} + +func (f *RaceFreeFakeWatcher) IsStopped() bool { + f.Lock() + defer f.Unlock() + return f.Stopped +} + +// Reset prepares the watcher to be reused. +func (f *RaceFreeFakeWatcher) Reset() { + f.Lock() + defer f.Unlock() + f.Stopped = false + f.result = make(chan Event, DefaultChanSize) +} + +func (f *RaceFreeFakeWatcher) ResultChan() <-chan Event { + f.Lock() + defer f.Unlock() + return f.result +} + +// Add sends an add event. +func (f *RaceFreeFakeWatcher) Add(obj runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Added, obj}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Modify sends a modify event. +func (f *RaceFreeFakeWatcher) Modify(obj runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Modified, obj}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Delete sends a delete event. +func (f *RaceFreeFakeWatcher) Delete(lastValue runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Deleted, lastValue}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Error sends an Error event. +func (f *RaceFreeFakeWatcher) Error(errValue runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Error, errValue}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Action sends an event of the requested type, for table-based testing. +func (f *RaceFreeFakeWatcher) Action(action EventType, obj runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{action, obj}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/json/fields.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go similarity index 100% rename from vendor/k8s.io/kubernetes/third_party/forked/golang/json/fields.go rename to vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/reflect/deep_equal.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go similarity index 100% rename from vendor/k8s.io/kubernetes/third_party/forked/golang/reflect/deep_equal.go rename to vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/reflect/type.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go similarity index 100% rename from vendor/k8s.io/kubernetes/third_party/forked/golang/reflect/type.go rename to vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go diff --git a/vendor/k8s.io/apiserver/LICENSE b/vendor/k8s.io/apiserver/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/k8s.io/apiserver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz/doc.go b/vendor/k8s.io/apiserver/pkg/server/healthz/doc.go new file mode 100644 index 0000000000..3ce26017c7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/healthz/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package healthz implements basic http server health checking. +// Usage: +// import "k8s.io/apiserver/pkg/server/healthz" +// healthz.DefaultHealthz() +package healthz diff --git a/vendor/k8s.io/kubernetes/pkg/healthz/healthz.go b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go similarity index 86% rename from vendor/k8s.io/kubernetes/pkg/healthz/healthz.go rename to vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go index b71797d753..441500dcb5 100644 --- a/vendor/k8s.io/kubernetes/pkg/healthz/healthz.go +++ b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go @@ -23,7 +23,7 @@ import ( "sync" ) -// HealthzChecker is a named healthz check. +// HealthzChecker is a named healthz checker. type HealthzChecker interface { Name() string Check(req *http.Request) error @@ -41,7 +41,7 @@ func DefaultHealthz(checks ...HealthzChecker) { // PingHealthz returns true automatically when checked var PingHealthz HealthzChecker = ping{} -// ping implements the simplest possible health checker. +// ping implements the simplest possible healthz checker. type ping struct{} func (ping) Name() string { @@ -53,7 +53,7 @@ func (ping) Check(_ *http.Request) error { return nil } -// NamedCheck returns a health checker for the given name and function. +// NamedCheck returns a healthz checker for the given name and function. func NamedCheck(name string, check func(r *http.Request) error) HealthzChecker { return &healthzCheck{name, check} } @@ -96,9 +96,10 @@ func handleRootHealthz(checks ...HealthzChecker) http.HandlerFunc { failed := false var verboseOut bytes.Buffer for _, check := range checks { - err := check.Check(r) - if err != nil { - fmt.Fprintf(&verboseOut, "[-]%v failed: %v\n", check.Name(), err) + if check.Check(r) != nil { + // don't include the error since this endpoint is public. If someone wants more detail + // they should have explicit permission to the detailed checks. + fmt.Fprintf(&verboseOut, "[-]%v failed: reason withheld\n", check.Name()) failed = true } else { fmt.Fprintf(&verboseOut, "[+]%v ok\n", check.Name()) @@ -125,7 +126,7 @@ func adaptCheckToHandler(c func(r *http.Request) error) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { err := c(r) if err != nil { - http.Error(w, fmt.Sprintf("Internal server error: %v", err), http.StatusInternalServerError) + http.Error(w, fmt.Sprintf("internal server error: %v", err), http.StatusInternalServerError) } else { fmt.Fprint(w, "ok") } diff --git a/vendor/k8s.io/client-go/LICENSE b/vendor/k8s.io/client-go/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/k8s.io/client-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go new file mode 100644 index 0000000000..ff4c57a4f0 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -0,0 +1,439 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "encoding/json" + "fmt" + "net/url" + "sort" + "strings" + + "github.com/emicklei/go-restful/swagger" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/api/v1" + restclient "k8s.io/client-go/rest" +) + +// defaultRetries is the number of times a resource discovery is repeated if an api group disappears on the fly (e.g. ThirdPartyResources). +const defaultRetries = 2 + +// DiscoveryInterface holds the methods that discover server-supported API groups, +// versions and resources. +type DiscoveryInterface interface { + RESTClient() restclient.Interface + ServerGroupsInterface + ServerResourcesInterface + ServerVersionInterface + SwaggerSchemaInterface +} + +// CachedDiscoveryInterface is a DiscoveryInterface with cache invalidation and freshness. +type CachedDiscoveryInterface interface { + DiscoveryInterface + // Fresh returns true if no cached data was used that had been retrieved before the instantiation. + Fresh() bool + // Invalidate enforces that no cached data is used in the future that is older than the current time. + Invalidate() +} + +// ServerGroupsInterface has methods for obtaining supported groups on the API server +type ServerGroupsInterface interface { + // ServerGroups returns the supported groups, with information like supported versions and the + // preferred version. + ServerGroups() (*metav1.APIGroupList, error) +} + +// ServerResourcesInterface has methods for obtaining supported resources on the API server +type ServerResourcesInterface interface { + // ServerResourcesForGroupVersion returns the supported resources for a group and version. + ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) + // ServerResources returns the supported resources for all groups and versions. + ServerResources() ([]*metav1.APIResourceList, error) + // ServerPreferredResources returns the supported resources with the version preferred by the + // server. + ServerPreferredResources() ([]*metav1.APIResourceList, error) + // ServerPreferredNamespacedResources returns the supported namespaced resources with the + // version preferred by the server. + ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) +} + +// ServerVersionInterface has a method for retrieving the server's version. +type ServerVersionInterface interface { + // ServerVersion retrieves and parses the server's version (git version). + ServerVersion() (*version.Info, error) +} + +// SwaggerSchemaInterface has a method to retrieve the swagger schema. +type SwaggerSchemaInterface interface { + // SwaggerSchema retrieves and parses the swagger API schema the server supports. + SwaggerSchema(version schema.GroupVersion) (*swagger.ApiDeclaration, error) +} + +// DiscoveryClient implements the functions that discover server-supported API groups, +// versions and resources. +type DiscoveryClient struct { + restClient restclient.Interface + + LegacyPrefix string +} + +// Convert metav1.APIVersions to metav1.APIGroup. APIVersions is used by legacy v1, so +// group would be "". +func apiVersionsToAPIGroup(apiVersions *metav1.APIVersions) (apiGroup metav1.APIGroup) { + groupVersions := []metav1.GroupVersionForDiscovery{} + for _, version := range apiVersions.Versions { + groupVersion := metav1.GroupVersionForDiscovery{ + GroupVersion: version, + Version: version, + } + groupVersions = append(groupVersions, groupVersion) + } + apiGroup.Versions = groupVersions + // There should be only one groupVersion returned at /api + apiGroup.PreferredVersion = groupVersions[0] + return +} + +// ServerGroups returns the supported groups, with information like supported versions and the +// preferred version. +func (d *DiscoveryClient) ServerGroups() (apiGroupList *metav1.APIGroupList, err error) { + // Get the groupVersions exposed at /api + v := &metav1.APIVersions{} + err = d.restClient.Get().AbsPath(d.LegacyPrefix).Do().Into(v) + apiGroup := metav1.APIGroup{} + if err == nil && len(v.Versions) != 0 { + apiGroup = apiVersionsToAPIGroup(v) + } + if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { + return nil, err + } + + // Get the groupVersions exposed at /apis + apiGroupList = &metav1.APIGroupList{} + err = d.restClient.Get().AbsPath("/apis").Do().Into(apiGroupList) + if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { + return nil, err + } + // to be compatible with a v1.0 server, if it's a 403 or 404, ignore and return whatever we got from /api + if err != nil && (errors.IsNotFound(err) || errors.IsForbidden(err)) { + apiGroupList = &metav1.APIGroupList{} + } + + // append the group retrieved from /api to the list if not empty + if len(v.Versions) != 0 { + apiGroupList.Groups = append(apiGroupList.Groups, apiGroup) + } + return apiGroupList, nil +} + +// ServerResourcesForGroupVersion returns the supported resources for a group and version. +func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (resources *metav1.APIResourceList, err error) { + url := url.URL{} + if len(groupVersion) == 0 { + return nil, fmt.Errorf("groupVersion shouldn't be empty") + } + if len(d.LegacyPrefix) > 0 && groupVersion == "v1" { + url.Path = d.LegacyPrefix + "/" + groupVersion + } else { + url.Path = "/apis/" + groupVersion + } + resources = &metav1.APIResourceList{ + GroupVersion: groupVersion, + } + err = d.restClient.Get().AbsPath(url.String()).Do().Into(resources) + if err != nil { + // ignore 403 or 404 error to be compatible with an v1.0 server. + if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) { + return resources, nil + } + return nil, err + } + return resources, nil +} + +// serverResources returns the supported resources for all groups and versions. +func (d *DiscoveryClient) serverResources(failEarly bool) ([]*metav1.APIResourceList, error) { + apiGroups, err := d.ServerGroups() + if err != nil { + return nil, err + } + + result := []*metav1.APIResourceList{} + failedGroups := make(map[schema.GroupVersion]error) + + for _, apiGroup := range apiGroups.Groups { + for _, version := range apiGroup.Versions { + gv := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} + resources, err := d.ServerResourcesForGroupVersion(version.GroupVersion) + if err != nil { + // TODO: maybe restrict this to NotFound errors + failedGroups[gv] = err + if failEarly { + return nil, &ErrGroupDiscoveryFailed{Groups: failedGroups} + } + continue + } + + result = append(result, resources) + } + } + + if len(failedGroups) == 0 { + return result, nil + } + + return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} +} + +// ServerResources returns the supported resources for all groups and versions. +func (d *DiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) { + return withRetries(defaultRetries, d.serverResources) +} + +// ErrGroupDiscoveryFailed is returned if one or more API groups fail to load. +type ErrGroupDiscoveryFailed struct { + // Groups is a list of the groups that failed to load and the error cause + Groups map[schema.GroupVersion]error +} + +// Error implements the error interface +func (e *ErrGroupDiscoveryFailed) Error() string { + var groups []string + for k, v := range e.Groups { + groups = append(groups, fmt.Sprintf("%s: %v", k, v)) + } + sort.Strings(groups) + return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(groups, ", ")) +} + +// IsGroupDiscoveryFailedError returns true if the provided error indicates the server was unable to discover +// a complete list of APIs for the client to use. +func IsGroupDiscoveryFailedError(err error) bool { + _, ok := err.(*ErrGroupDiscoveryFailed) + return err != nil && ok +} + +// serverPreferredResources returns the supported resources with the version preferred by the server. +func (d *DiscoveryClient) serverPreferredResources(failEarly bool) ([]*metav1.APIResourceList, error) { + serverGroupList, err := d.ServerGroups() + if err != nil { + return nil, err + } + + result := []*metav1.APIResourceList{} + failedGroups := make(map[schema.GroupVersion]error) + + grVersions := map[schema.GroupResource]string{} // selected version of a GroupResource + grApiResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource + gvApiResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping + + for _, apiGroup := range serverGroupList.Groups { + for _, version := range apiGroup.Versions { + groupVersion := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} + apiResourceList, err := d.ServerResourcesForGroupVersion(version.GroupVersion) + if err != nil { + // TODO: maybe restrict this to NotFound errors + failedGroups[groupVersion] = err + if failEarly { + return nil, &ErrGroupDiscoveryFailed{Groups: failedGroups} + } + continue + } + + // create empty list which is filled later in another loop + emptyApiResourceList := metav1.APIResourceList{ + GroupVersion: version.GroupVersion, + } + gvApiResourceLists[groupVersion] = &emptyApiResourceList + result = append(result, &emptyApiResourceList) + + for i := range apiResourceList.APIResources { + apiResource := &apiResourceList.APIResources[i] + if strings.Contains(apiResource.Name, "/") { + continue + } + gv := schema.GroupResource{Group: apiGroup.Name, Resource: apiResource.Name} + if _, ok := grApiResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version { + // only override with preferred version + continue + } + grVersions[gv] = version.Version + grApiResources[gv] = apiResource + } + } + } + + // group selected APIResources according to GroupVersion into APIResourceLists + for groupResource, apiResource := range grApiResources { + version := grVersions[groupResource] + groupVersion := schema.GroupVersion{Group: groupResource.Group, Version: version} + apiResourceList := gvApiResourceLists[groupVersion] + apiResourceList.APIResources = append(apiResourceList.APIResources, *apiResource) + } + + if len(failedGroups) == 0 { + return result, nil + } + + return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} +} + +// ServerPreferredResources returns the supported resources with the version preferred by the +// server. +func (d *DiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + return withRetries(defaultRetries, func(retryEarly bool) ([]*metav1.APIResourceList, error) { + return d.serverPreferredResources(retryEarly) + }) +} + +// ServerPreferredNamespacedResources returns the supported namespaced resources with the +// version preferred by the server. +func (d *DiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + all, err := d.ServerPreferredResources() + return FilteredBy(ResourcePredicateFunc(func(groupVersion string, r *metav1.APIResource) bool { + return r.Namespaced + }), all), err +} + +// ServerVersion retrieves and parses the server's version (git version). +func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { + body, err := d.restClient.Get().AbsPath("/version").Do().Raw() + if err != nil { + return nil, err + } + var info version.Info + err = json.Unmarshal(body, &info) + if err != nil { + return nil, fmt.Errorf("got '%s': %v", string(body), err) + } + return &info, nil +} + +// SwaggerSchema retrieves and parses the swagger API schema the server supports. +func (d *DiscoveryClient) SwaggerSchema(version schema.GroupVersion) (*swagger.ApiDeclaration, error) { + if version.Empty() { + return nil, fmt.Errorf("groupVersion cannot be empty") + } + + groupList, err := d.ServerGroups() + if err != nil { + return nil, err + } + groupVersions := metav1.ExtractGroupVersions(groupList) + // This check also takes care the case that kubectl is newer than the running endpoint + if stringDoesntExistIn(version.String(), groupVersions) { + return nil, fmt.Errorf("API version: %v is not supported by the server. Use one of: %v", version, groupVersions) + } + var path string + if len(d.LegacyPrefix) > 0 && version == v1.SchemeGroupVersion { + path = "/swaggerapi" + d.LegacyPrefix + "/" + version.Version + } else { + path = "/swaggerapi/apis/" + version.Group + "/" + version.Version + } + + body, err := d.restClient.Get().AbsPath(path).Do().Raw() + if err != nil { + return nil, err + } + var schema swagger.ApiDeclaration + err = json.Unmarshal(body, &schema) + if err != nil { + return nil, fmt.Errorf("got '%s': %v", string(body), err) + } + return &schema, nil +} + +// withRetries retries the given recovery function in case the groups supported by the server change after ServerGroup() returns. +func withRetries(maxRetries int, f func(failEarly bool) ([]*metav1.APIResourceList, error)) ([]*metav1.APIResourceList, error) { + var result []*metav1.APIResourceList + var err error + for i := 0; i < maxRetries; i++ { + failEarly := i < maxRetries-1 + result, err = f(failEarly) + if err == nil { + return result, nil + } + if _, ok := err.(*ErrGroupDiscoveryFailed); !ok { + return nil, err + } + } + return result, err +} + +func setDiscoveryDefaults(config *restclient.Config) error { + config.APIPath = "" + config.GroupVersion = nil + codec := runtime.NoopEncoder{Decoder: api.Codecs.UniversalDecoder()} + config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec}) + if len(config.UserAgent) == 0 { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + return nil +} + +// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. This client +// can be used to discover supported resources in the API server. +func NewDiscoveryClientForConfig(c *restclient.Config) (*DiscoveryClient, error) { + config := *c + if err := setDiscoveryDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.UnversionedRESTClientFor(&config) + return &DiscoveryClient{restClient: client, LegacyPrefix: "/api"}, err +} + +// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. If +// there is an error, it panics. +func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient { + client, err := NewDiscoveryClientForConfig(c) + if err != nil { + panic(err) + } + return client + +} + +// New creates a new DiscoveryClient for the given RESTClient. +func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient { + return &DiscoveryClient{restClient: c, LegacyPrefix: "/api"} +} + +func stringDoesntExistIn(str string, slice []string) bool { + for _, s := range slice { + if s == str { + return false + } + } + return true +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *DiscoveryClient) RESTClient() restclient.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/discovery/fake/discovery.go b/vendor/k8s.io/client-go/discovery/fake/discovery.go new file mode 100644 index 0000000000..8fbed28d97 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/fake/discovery.go @@ -0,0 +1,97 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "fmt" + + "github.com/emicklei/go-restful/swagger" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/pkg/api/v1" + kubeversion "k8s.io/client-go/pkg/version" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/testing" +) + +type FakeDiscovery struct { + *testing.Fake +} + +func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + action := testing.ActionImpl{ + Verb: "get", + Resource: schema.GroupVersionResource{Resource: "resource"}, + } + c.Invokes(action, nil) + for _, resourceList := range c.Resources { + if resourceList.GroupVersion == groupVersion { + return resourceList, nil + } + } + return nil, fmt.Errorf("GroupVersion %q not found", groupVersion) +} + +func (c *FakeDiscovery) ServerResources() ([]*metav1.APIResourceList, error) { + action := testing.ActionImpl{ + Verb: "get", + Resource: schema.GroupVersionResource{Resource: "resource"}, + } + c.Invokes(action, nil) + return c.Resources, nil +} + +func (c *FakeDiscovery) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + return nil, nil +} + +func (c *FakeDiscovery) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + return nil, nil +} + +func (c *FakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) { + return nil, nil +} + +func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { + action := testing.ActionImpl{} + action.Verb = "get" + action.Resource = schema.GroupVersionResource{Resource: "version"} + + c.Invokes(action, nil) + versionInfo := kubeversion.Get() + return &versionInfo, nil +} + +func (c *FakeDiscovery) SwaggerSchema(version schema.GroupVersion) (*swagger.ApiDeclaration, error) { + action := testing.ActionImpl{} + action.Verb = "get" + if version == v1.SchemeGroupVersion { + action.Resource = schema.GroupVersionResource{Resource: "/swaggerapi/api/" + version.Version} + } else { + action.Resource = schema.GroupVersionResource{Resource: "/swaggerapi/apis/" + version.Group + "/" + version.Version} + } + + c.Invokes(action, nil) + return &swagger.ApiDeclaration{}, nil +} + +func (c *FakeDiscovery) RESTClient() restclient.Interface { + return nil +} diff --git a/vendor/k8s.io/client-go/discovery/helper.go b/vendor/k8s.io/client-go/discovery/helper.go new file mode 100644 index 0000000000..f184bc9295 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/helper.go @@ -0,0 +1,162 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + apimachineryversion "k8s.io/apimachinery/pkg/version" +) + +// MatchesServerVersion queries the server to compares the build version +// (git hash) of the client with the server's build version. It returns an error +// if it failed to contact the server or if the versions are not an exact match. +func MatchesServerVersion(clientVersion apimachineryversion.Info, client DiscoveryInterface) error { + sVer, err := client.ServerVersion() + if err != nil { + return fmt.Errorf("couldn't read version from server: %v\n", err) + } + // GitVersion includes GitCommit and GitTreeState, but best to be safe? + if clientVersion.GitVersion != sVer.GitVersion || clientVersion.GitCommit != sVer.GitCommit || clientVersion.GitTreeState != sVer.GitTreeState { + return fmt.Errorf("server version (%#v) differs from client version (%#v)!\n", sVer, clientVersion) + } + + return nil +} + +// NegotiateVersion queries the server's supported api versions to find +// a version that both client and server support. +// - If no version is provided, try registered client versions in order of +// preference. +// - If version is provided and the server does not support it, +// return an error. +// TODO negotiation should be reserved for cases where we need a version for a given group. In those cases, it should return an ordered list of +// server preferences. From that list, a separate function can match from an ordered list of client versions. +// This is not what the function has ever done before, but it makes more logical sense. +func NegotiateVersion(client DiscoveryInterface, requiredGV *schema.GroupVersion, clientRegisteredGVs []schema.GroupVersion) (*schema.GroupVersion, error) { + clientVersions := sets.String{} + for _, gv := range clientRegisteredGVs { + clientVersions.Insert(gv.String()) + } + groups, err := client.ServerGroups() + if err != nil { + // This is almost always a connection error, and higher level code should treat this as a generic error, + // not a negotiation specific error. + return nil, err + } + versions := metav1.ExtractGroupVersions(groups) + serverVersions := sets.String{} + for _, v := range versions { + serverVersions.Insert(v) + } + + // If version explicitly requested verify that both client and server support it. + // If server does not support warn, but try to negotiate a lower version. + if requiredGV != nil { + if !clientVersions.Has(requiredGV.String()) { + return nil, fmt.Errorf("client does not support API version %q; client supported API versions: %v", requiredGV, clientVersions) + + } + // If the server supports no versions, then we should just use the preferredGV + // This can happen because discovery fails due to 403 Forbidden errors + if len(serverVersions) == 0 { + return requiredGV, nil + } + if serverVersions.Has(requiredGV.String()) { + return requiredGV, nil + } + // If we are using an explicit config version the server does not support, fail. + return nil, fmt.Errorf("server does not support API version %q", requiredGV) + } + + for _, clientGV := range clientRegisteredGVs { + if serverVersions.Has(clientGV.String()) { + // Version was not explicitly requested in command config (--api-version). + // Ok to fall back to a supported version with a warning. + // TODO: caesarxuchao: enable the warning message when we have + // proper fix. Please refer to issue #14895. + // if len(version) != 0 { + // glog.Warningf("Server does not support API version '%s'. Falling back to '%s'.", version, clientVersion) + // } + t := clientGV + return &t, nil + } + } + + // if we have no server versions and we have no required version, choose the first clientRegisteredVersion + if len(serverVersions) == 0 && len(clientRegisteredGVs) > 0 { + return &clientRegisteredGVs[0], nil + } + + // fall back to an empty GroupVersion. Most client commands no longer respect a GroupVersion anyway + return &schema.GroupVersion{}, nil +} + +// GroupVersionResources converts APIResourceLists to the GroupVersionResources. +func GroupVersionResources(rls []*metav1.APIResourceList) (map[schema.GroupVersionResource]struct{}, error) { + gvrs := map[schema.GroupVersionResource]struct{}{} + for _, rl := range rls { + gv, err := schema.ParseGroupVersion(rl.GroupVersion) + if err != nil { + return nil, err + } + for i := range rl.APIResources { + gvrs[schema.GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: rl.APIResources[i].Name}] = struct{}{} + } + } + return gvrs, nil +} + +// FilteredBy filters by the given predicate. Empty APIResourceLists are dropped. +func FilteredBy(pred ResourcePredicate, rls []*metav1.APIResourceList) []*metav1.APIResourceList { + result := []*metav1.APIResourceList{} + for _, rl := range rls { + filtered := *rl + filtered.APIResources = nil + for i := range rl.APIResources { + if pred.Match(rl.GroupVersion, &rl.APIResources[i]) { + filtered.APIResources = append(filtered.APIResources, rl.APIResources[i]) + } + } + if filtered.APIResources != nil { + result = append(result, &filtered) + } + } + return result +} + +type ResourcePredicate interface { + Match(groupVersion string, r *metav1.APIResource) bool +} + +type ResourcePredicateFunc func(groupVersion string, r *metav1.APIResource) bool + +func (fn ResourcePredicateFunc) Match(groupVersion string, r *metav1.APIResource) bool { + return fn(groupVersion, r) +} + +// SupportsAllVerbs is a predicate matching a resource iff all given verbs are supported. +type SupportsAllVerbs struct { + Verbs []string +} + +func (p SupportsAllVerbs) Match(groupVersion string, r *metav1.APIResource) bool { + return sets.NewString([]string(r.Verbs)...).HasAll(p.Verbs...) +} diff --git a/vendor/k8s.io/client-go/discovery/restmapper.go b/vendor/k8s.io/client-go/discovery/restmapper.go new file mode 100644 index 0000000000..9b0769a187 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/restmapper.go @@ -0,0 +1,320 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/golang/glog" +) + +// APIGroupResources is an API group with a mapping of versions to +// resources. +type APIGroupResources struct { + Group metav1.APIGroup + // A mapping of version string to a slice of APIResources for + // that version. + VersionedResources map[string][]metav1.APIResource +} + +// NewRESTMapper returns a PriorityRESTMapper based on the discovered +// groups and resources passed in. +func NewRESTMapper(groupResources []*APIGroupResources, versionInterfaces meta.VersionInterfacesFunc) meta.RESTMapper { + unionMapper := meta.MultiRESTMapper{} + + var groupPriority []string + // /v1 is special. It should always come first + resourcePriority := []schema.GroupVersionResource{{Group: "", Version: "v1", Resource: meta.AnyResource}} + kindPriority := []schema.GroupVersionKind{{Group: "", Version: "v1", Kind: meta.AnyKind}} + + for _, group := range groupResources { + groupPriority = append(groupPriority, group.Group.Name) + + if len(group.Group.PreferredVersion.Version) != 0 { + preferred := group.Group.PreferredVersion.Version + if _, ok := group.VersionedResources[preferred]; ok { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Resource: meta.AnyResource, + }) + + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Kind: meta.AnyKind, + }) + } + } + + for _, discoveryVersion := range group.Group.Versions { + resources, ok := group.VersionedResources[discoveryVersion.Version] + if !ok { + continue + } + + gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version} + versionMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{gv}, versionInterfaces) + + for _, resource := range resources { + scope := meta.RESTScopeNamespace + if !resource.Namespaced { + scope = meta.RESTScopeRoot + } + versionMapper.Add(gv.WithKind(resource.Kind), scope) + // TODO only do this if it supports listing + versionMapper.Add(gv.WithKind(resource.Kind+"List"), scope) + } + // TODO why is this type not in discovery (at least for "v1") + versionMapper.Add(gv.WithKind("List"), meta.RESTScopeRoot) + unionMapper = append(unionMapper, versionMapper) + } + } + + for _, group := range groupPriority { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group, + Version: meta.AnyVersion, + Resource: meta.AnyResource, + }) + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group, + Version: meta.AnyVersion, + Kind: meta.AnyKind, + }) + } + + return meta.PriorityRESTMapper{ + Delegate: unionMapper, + ResourcePriority: resourcePriority, + KindPriority: kindPriority, + } +} + +// GetAPIGroupResources uses the provided discovery client to gather +// discovery information and populate a slice of APIGroupResources. +func GetAPIGroupResources(cl DiscoveryInterface) ([]*APIGroupResources, error) { + apiGroups, err := cl.ServerGroups() + if err != nil { + return nil, err + } + var result []*APIGroupResources + for _, group := range apiGroups.Groups { + groupResources := &APIGroupResources{ + Group: group, + VersionedResources: make(map[string][]metav1.APIResource), + } + for _, version := range group.Versions { + resources, err := cl.ServerResourcesForGroupVersion(version.GroupVersion) + if err != nil { + if errors.IsNotFound(err) { + continue // ignore as this can race with deletion of 3rd party APIs + } + return nil, err + } + groupResources.VersionedResources[version.Version] = resources.APIResources + } + result = append(result, groupResources) + } + return result, nil +} + +// DeferredDiscoveryRESTMapper is a RESTMapper that will defer +// initialization of the RESTMapper until the first mapping is +// requested. +type DeferredDiscoveryRESTMapper struct { + initMu sync.Mutex + delegate meta.RESTMapper + cl CachedDiscoveryInterface + versionInterface meta.VersionInterfacesFunc +} + +// NewDeferredDiscoveryRESTMapper returns a +// DeferredDiscoveryRESTMapper that will lazily query the provided +// client for discovery information to do REST mappings. +func NewDeferredDiscoveryRESTMapper(cl CachedDiscoveryInterface, versionInterface meta.VersionInterfacesFunc) *DeferredDiscoveryRESTMapper { + return &DeferredDiscoveryRESTMapper{ + cl: cl, + versionInterface: versionInterface, + } +} + +func (d *DeferredDiscoveryRESTMapper) getDelegate() (meta.RESTMapper, error) { + d.initMu.Lock() + defer d.initMu.Unlock() + + if d.delegate != nil { + return d.delegate, nil + } + + groupResources, err := GetAPIGroupResources(d.cl) + if err != nil { + return nil, err + } + + d.delegate = NewRESTMapper(groupResources, d.versionInterface) + return d.delegate, err +} + +// Reset resets the internally cached Discovery information and will +// cause the next mapping request to re-discover. +func (d *DeferredDiscoveryRESTMapper) Reset() { + glog.V(5).Info("Invalidating discovery information") + + d.initMu.Lock() + defer d.initMu.Unlock() + + d.cl.Invalidate() + d.delegate = nil +} + +// KindFor takes a partial resource and returns back the single match. +// It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) KindFor(resource schema.GroupVersionResource) (gvk schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionKind{}, err + } + gvk, err = del.KindFor(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvk, err = d.KindFor(resource) + } + return +} + +// KindsFor takes a partial resource and returns back the list of +// potential kinds in priority order. +func (d *DeferredDiscoveryRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvks []schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvks, err = del.KindsFor(resource) + if len(gvks) == 0 && !d.cl.Fresh() { + d.Reset() + gvks, err = d.KindsFor(resource) + } + return +} + +// ResourceFor takes a partial resource and returns back the single +// match. It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) ResourceFor(input schema.GroupVersionResource) (gvr schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionResource{}, err + } + gvr, err = del.ResourceFor(input) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvr, err = d.ResourceFor(input) + } + return +} + +// ResourcesFor takes a partial resource and returns back the list of +// potential resource in priority order. +func (d *DeferredDiscoveryRESTMapper) ResourcesFor(input schema.GroupVersionResource) (gvrs []schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvrs, err = del.ResourcesFor(input) + if len(gvrs) == 0 && !d.cl.Fresh() { + d.Reset() + gvrs, err = d.ResourcesFor(input) + } + return +} + +// RESTMapping identifies a preferred resource mapping for the +// provided group kind. +func (d *DeferredDiscoveryRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (m *meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + m, err = del.RESTMapping(gk, versions...) + if err != nil && !d.cl.Fresh() { + d.Reset() + m, err = d.RESTMapping(gk, versions...) + } + return +} + +// RESTMappings returns the RESTMappings for the provided group kind +// in a rough internal preferred order. If no kind is found, it will +// return a NoResourceMatchError. +func (d *DeferredDiscoveryRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) (ms []*meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + ms, err = del.RESTMappings(gk, versions...) + if len(ms) == 0 && !d.cl.Fresh() { + d.Reset() + ms, err = d.RESTMappings(gk, versions...) + } + return +} + +// AliasesForResource returns whether a resource has an alias or not. +func (d *DeferredDiscoveryRESTMapper) AliasesForResource(resource string) (as []string, ok bool) { + del, err := d.getDelegate() + if err != nil { + return nil, false + } + as, ok = del.AliasesForResource(resource) + if len(as) == 0 && !d.cl.Fresh() { + d.Reset() + as, ok = d.AliasesForResource(resource) + } + return +} + +// ResourceSingularizer converts a resource name from plural to +// singular (e.g., from pods to pod). +func (d *DeferredDiscoveryRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + del, err := d.getDelegate() + if err != nil { + return resource, err + } + singular, err = del.ResourceSingularizer(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + singular, err = d.ResourceSingularizer(resource) + } + return +} + +func (d *DeferredDiscoveryRESTMapper) String() string { + del, err := d.getDelegate() + if err != nil { + return fmt.Sprintf("DeferredDiscoveryRESTMapper{%v}", err) + } + return fmt.Sprintf("DeferredDiscoveryRESTMapper{\n\t%v\n}", del) +} + +// Make sure it satisfies the interface +var _ meta.RESTMapper = &DeferredDiscoveryRESTMapper{} diff --git a/vendor/k8s.io/client-go/discovery/unstructured.go b/vendor/k8s.io/client-go/discovery/unstructured.go new file mode 100644 index 0000000000..ee72d668bc --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/unstructured.go @@ -0,0 +1,95 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// UnstructuredObjectTyper provides a runtime.ObjectTyper implmentation for +// runtime.Unstructured object based on discovery information. +type UnstructuredObjectTyper struct { + registered map[schema.GroupVersionKind]bool +} + +// NewUnstructuredObjectTyper returns a runtime.ObjectTyper for +// unstructred objects based on discovery information. +func NewUnstructuredObjectTyper(groupResources []*APIGroupResources) *UnstructuredObjectTyper { + dot := &UnstructuredObjectTyper{registered: make(map[schema.GroupVersionKind]bool)} + for _, group := range groupResources { + for _, discoveryVersion := range group.Group.Versions { + resources, ok := group.VersionedResources[discoveryVersion.Version] + if !ok { + continue + } + + gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version} + for _, resource := range resources { + dot.registered[gv.WithKind(resource.Kind)] = true + } + } + } + return dot +} + +// ObjectKind returns the group,version,kind of the provided object, or an error +// if the object in not runtime.Unstructured or has no group,version,kind +// information. +func (d *UnstructuredObjectTyper) ObjectKind(obj runtime.Object) (schema.GroupVersionKind, error) { + if _, ok := obj.(runtime.Unstructured); !ok { + return schema.GroupVersionKind{}, fmt.Errorf("type %T is invalid for dynamic object typer", obj) + } + + return obj.GetObjectKind().GroupVersionKind(), nil +} + +// ObjectKinds returns a slice of one element with the group,version,kind of the +// provided object, or an error if the object is not runtime.Unstructured or +// has no group,version,kind information. unversionedType will always be false +// because runtime.Unstructured object should always have group,version,kind +// information set. +func (d *UnstructuredObjectTyper) ObjectKinds(obj runtime.Object) (gvks []schema.GroupVersionKind, unversionedType bool, err error) { + gvk, err := d.ObjectKind(obj) + if err != nil { + return nil, false, err + } + + return []schema.GroupVersionKind{gvk}, false, nil +} + +// Recognizes returns true if the provided group,version,kind was in the +// discovery information. +func (d *UnstructuredObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool { + return d.registered[gvk] +} + +// IsUnversioned returns false always because runtime.Unstructured objects +// should always have group,version,kind information set. ok will be true if the +// object's group,version,kind is api.Registry. +func (d *UnstructuredObjectTyper) IsUnversioned(obj runtime.Object) (unversioned bool, ok bool) { + gvk, err := d.ObjectKind(obj) + if err != nil { + return false, false + } + + return false, d.registered[gvk] +} + +var _ runtime.ObjectTyper = &UnstructuredObjectTyper{} diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go new file mode 100644 index 0000000000..5fd0c54e8f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -0,0 +1,514 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubernetes + +import ( + glog "github.com/golang/glog" + discovery "k8s.io/client-go/discovery" + appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" + authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" + authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" + authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" + authorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" + autoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1" + autoscalingv2alpha1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1" + batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" + batchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1" + certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" + policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" + rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" + rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" + settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" + storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" + storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + CoreV1() corev1.CoreV1Interface + // Deprecated: please explicitly pick a version if possible. + Core() corev1.CoreV1Interface + AppsV1beta1() appsv1beta1.AppsV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Apps() appsv1beta1.AppsV1beta1Interface + AuthenticationV1() authenticationv1.AuthenticationV1Interface + // Deprecated: please explicitly pick a version if possible. + Authentication() authenticationv1.AuthenticationV1Interface + AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface + AuthorizationV1() authorizationv1.AuthorizationV1Interface + // Deprecated: please explicitly pick a version if possible. + Authorization() authorizationv1.AuthorizationV1Interface + AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface + AutoscalingV1() autoscalingv1.AutoscalingV1Interface + // Deprecated: please explicitly pick a version if possible. + Autoscaling() autoscalingv1.AutoscalingV1Interface + AutoscalingV2alpha1() autoscalingv2alpha1.AutoscalingV2alpha1Interface + BatchV1() batchv1.BatchV1Interface + // Deprecated: please explicitly pick a version if possible. + Batch() batchv1.BatchV1Interface + BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface + CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Certificates() certificatesv1beta1.CertificatesV1beta1Interface + ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Extensions() extensionsv1beta1.ExtensionsV1beta1Interface + PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Policy() policyv1beta1.PolicyV1beta1Interface + RbacV1beta1() rbacv1beta1.RbacV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Rbac() rbacv1beta1.RbacV1beta1Interface + RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface + SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface + // Deprecated: please explicitly pick a version if possible. + Settings() settingsv1alpha1.SettingsV1alpha1Interface + StorageV1beta1() storagev1beta1.StorageV1beta1Interface + StorageV1() storagev1.StorageV1Interface + // Deprecated: please explicitly pick a version if possible. + Storage() storagev1.StorageV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + *corev1.CoreV1Client + *appsv1beta1.AppsV1beta1Client + *authenticationv1.AuthenticationV1Client + *authenticationv1beta1.AuthenticationV1beta1Client + *authorizationv1.AuthorizationV1Client + *authorizationv1beta1.AuthorizationV1beta1Client + *autoscalingv1.AutoscalingV1Client + *autoscalingv2alpha1.AutoscalingV2alpha1Client + *batchv1.BatchV1Client + *batchv2alpha1.BatchV2alpha1Client + *certificatesv1beta1.CertificatesV1beta1Client + *extensionsv1beta1.ExtensionsV1beta1Client + *policyv1beta1.PolicyV1beta1Client + *rbacv1beta1.RbacV1beta1Client + *rbacv1alpha1.RbacV1alpha1Client + *settingsv1alpha1.SettingsV1alpha1Client + *storagev1beta1.StorageV1beta1Client + *storagev1.StorageV1Client +} + +// CoreV1 retrieves the CoreV1Client +func (c *Clientset) CoreV1() corev1.CoreV1Interface { + if c == nil { + return nil + } + return c.CoreV1Client +} + +// Deprecated: Core retrieves the default version of CoreClient. +// Please explicitly pick a version. +func (c *Clientset) Core() corev1.CoreV1Interface { + if c == nil { + return nil + } + return c.CoreV1Client +} + +// AppsV1beta1 retrieves the AppsV1beta1Client +func (c *Clientset) AppsV1beta1() appsv1beta1.AppsV1beta1Interface { + if c == nil { + return nil + } + return c.AppsV1beta1Client +} + +// Deprecated: Apps retrieves the default version of AppsClient. +// Please explicitly pick a version. +func (c *Clientset) Apps() appsv1beta1.AppsV1beta1Interface { + if c == nil { + return nil + } + return c.AppsV1beta1Client +} + +// AuthenticationV1 retrieves the AuthenticationV1Client +func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interface { + if c == nil { + return nil + } + return c.AuthenticationV1Client +} + +// Deprecated: Authentication retrieves the default version of AuthenticationClient. +// Please explicitly pick a version. +func (c *Clientset) Authentication() authenticationv1.AuthenticationV1Interface { + if c == nil { + return nil + } + return c.AuthenticationV1Client +} + +// AuthenticationV1beta1 retrieves the AuthenticationV1beta1Client +func (c *Clientset) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface { + if c == nil { + return nil + } + return c.AuthenticationV1beta1Client +} + +// AuthorizationV1 retrieves the AuthorizationV1Client +func (c *Clientset) AuthorizationV1() authorizationv1.AuthorizationV1Interface { + if c == nil { + return nil + } + return c.AuthorizationV1Client +} + +// Deprecated: Authorization retrieves the default version of AuthorizationClient. +// Please explicitly pick a version. +func (c *Clientset) Authorization() authorizationv1.AuthorizationV1Interface { + if c == nil { + return nil + } + return c.AuthorizationV1Client +} + +// AuthorizationV1beta1 retrieves the AuthorizationV1beta1Client +func (c *Clientset) AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface { + if c == nil { + return nil + } + return c.AuthorizationV1beta1Client +} + +// AutoscalingV1 retrieves the AutoscalingV1Client +func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { + if c == nil { + return nil + } + return c.AutoscalingV1Client +} + +// Deprecated: Autoscaling retrieves the default version of AutoscalingClient. +// Please explicitly pick a version. +func (c *Clientset) Autoscaling() autoscalingv1.AutoscalingV1Interface { + if c == nil { + return nil + } + return c.AutoscalingV1Client +} + +// AutoscalingV2alpha1 retrieves the AutoscalingV2alpha1Client +func (c *Clientset) AutoscalingV2alpha1() autoscalingv2alpha1.AutoscalingV2alpha1Interface { + if c == nil { + return nil + } + return c.AutoscalingV2alpha1Client +} + +// BatchV1 retrieves the BatchV1Client +func (c *Clientset) BatchV1() batchv1.BatchV1Interface { + if c == nil { + return nil + } + return c.BatchV1Client +} + +// Deprecated: Batch retrieves the default version of BatchClient. +// Please explicitly pick a version. +func (c *Clientset) Batch() batchv1.BatchV1Interface { + if c == nil { + return nil + } + return c.BatchV1Client +} + +// BatchV2alpha1 retrieves the BatchV2alpha1Client +func (c *Clientset) BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface { + if c == nil { + return nil + } + return c.BatchV2alpha1Client +} + +// CertificatesV1beta1 retrieves the CertificatesV1beta1Client +func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface { + if c == nil { + return nil + } + return c.CertificatesV1beta1Client +} + +// Deprecated: Certificates retrieves the default version of CertificatesClient. +// Please explicitly pick a version. +func (c *Clientset) Certificates() certificatesv1beta1.CertificatesV1beta1Interface { + if c == nil { + return nil + } + return c.CertificatesV1beta1Client +} + +// ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client +func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { + if c == nil { + return nil + } + return c.ExtensionsV1beta1Client +} + +// Deprecated: Extensions retrieves the default version of ExtensionsClient. +// Please explicitly pick a version. +func (c *Clientset) Extensions() extensionsv1beta1.ExtensionsV1beta1Interface { + if c == nil { + return nil + } + return c.ExtensionsV1beta1Client +} + +// PolicyV1beta1 retrieves the PolicyV1beta1Client +func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { + if c == nil { + return nil + } + return c.PolicyV1beta1Client +} + +// Deprecated: Policy retrieves the default version of PolicyClient. +// Please explicitly pick a version. +func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface { + if c == nil { + return nil + } + return c.PolicyV1beta1Client +} + +// RbacV1beta1 retrieves the RbacV1beta1Client +func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface { + if c == nil { + return nil + } + return c.RbacV1beta1Client +} + +// Deprecated: Rbac retrieves the default version of RbacClient. +// Please explicitly pick a version. +func (c *Clientset) Rbac() rbacv1beta1.RbacV1beta1Interface { + if c == nil { + return nil + } + return c.RbacV1beta1Client +} + +// RbacV1alpha1 retrieves the RbacV1alpha1Client +func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { + if c == nil { + return nil + } + return c.RbacV1alpha1Client +} + +// SettingsV1alpha1 retrieves the SettingsV1alpha1Client +func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface { + if c == nil { + return nil + } + return c.SettingsV1alpha1Client +} + +// Deprecated: Settings retrieves the default version of SettingsClient. +// Please explicitly pick a version. +func (c *Clientset) Settings() settingsv1alpha1.SettingsV1alpha1Interface { + if c == nil { + return nil + } + return c.SettingsV1alpha1Client +} + +// StorageV1beta1 retrieves the StorageV1beta1Client +func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { + if c == nil { + return nil + } + return c.StorageV1beta1Client +} + +// StorageV1 retrieves the StorageV1Client +func (c *Clientset) StorageV1() storagev1.StorageV1Interface { + if c == nil { + return nil + } + return c.StorageV1Client +} + +// Deprecated: Storage retrieves the default version of StorageClient. +// Please explicitly pick a version. +func (c *Clientset) Storage() storagev1.StorageV1Interface { + if c == nil { + return nil + } + return c.StorageV1Client +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.CoreV1Client, err = corev1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AppsV1beta1Client, err = appsv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AuthenticationV1Client, err = authenticationv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AuthenticationV1beta1Client, err = authenticationv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AuthorizationV1Client, err = authorizationv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AuthorizationV1beta1Client, err = authorizationv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AutoscalingV1Client, err = autoscalingv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AutoscalingV2alpha1Client, err = autoscalingv2alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.BatchV1Client, err = batchv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.BatchV2alpha1Client, err = batchv2alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.CertificatesV1beta1Client, err = certificatesv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.ExtensionsV1beta1Client, err = extensionsv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.PolicyV1beta1Client, err = policyv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.RbacV1beta1Client, err = rbacv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.RbacV1alpha1Client, err = rbacv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.SettingsV1alpha1Client, err = settingsv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.StorageV1beta1Client, err = storagev1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.StorageV1Client, err = storagev1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + glog.Errorf("failed to create the DiscoveryClient: %v", err) + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.CoreV1Client = corev1.NewForConfigOrDie(c) + cs.AppsV1beta1Client = appsv1beta1.NewForConfigOrDie(c) + cs.AuthenticationV1Client = authenticationv1.NewForConfigOrDie(c) + cs.AuthenticationV1beta1Client = authenticationv1beta1.NewForConfigOrDie(c) + cs.AuthorizationV1Client = authorizationv1.NewForConfigOrDie(c) + cs.AuthorizationV1beta1Client = authorizationv1beta1.NewForConfigOrDie(c) + cs.AutoscalingV1Client = autoscalingv1.NewForConfigOrDie(c) + cs.AutoscalingV2alpha1Client = autoscalingv2alpha1.NewForConfigOrDie(c) + cs.BatchV1Client = batchv1.NewForConfigOrDie(c) + cs.BatchV2alpha1Client = batchv2alpha1.NewForConfigOrDie(c) + cs.CertificatesV1beta1Client = certificatesv1beta1.NewForConfigOrDie(c) + cs.ExtensionsV1beta1Client = extensionsv1beta1.NewForConfigOrDie(c) + cs.PolicyV1beta1Client = policyv1beta1.NewForConfigOrDie(c) + cs.RbacV1beta1Client = rbacv1beta1.NewForConfigOrDie(c) + cs.RbacV1alpha1Client = rbacv1alpha1.NewForConfigOrDie(c) + cs.SettingsV1alpha1Client = settingsv1alpha1.NewForConfigOrDie(c) + cs.StorageV1beta1Client = storagev1beta1.NewForConfigOrDie(c) + cs.StorageV1Client = storagev1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.CoreV1Client = corev1.New(c) + cs.AppsV1beta1Client = appsv1beta1.New(c) + cs.AuthenticationV1Client = authenticationv1.New(c) + cs.AuthenticationV1beta1Client = authenticationv1beta1.New(c) + cs.AuthorizationV1Client = authorizationv1.New(c) + cs.AuthorizationV1beta1Client = authorizationv1beta1.New(c) + cs.AutoscalingV1Client = autoscalingv1.New(c) + cs.AutoscalingV2alpha1Client = autoscalingv2alpha1.New(c) + cs.BatchV1Client = batchv1.New(c) + cs.BatchV2alpha1Client = batchv2alpha1.New(c) + cs.CertificatesV1beta1Client = certificatesv1beta1.New(c) + cs.ExtensionsV1beta1Client = extensionsv1beta1.New(c) + cs.PolicyV1beta1Client = policyv1beta1.New(c) + cs.RbacV1beta1Client = rbacv1beta1.New(c) + cs.RbacV1alpha1Client = rbacv1alpha1.New(c) + cs.SettingsV1alpha1Client = settingsv1alpha1.New(c) + cs.StorageV1beta1Client = storagev1beta1.New(c) + cs.StorageV1Client = storagev1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/k8s.io/client-go/kubernetes/doc.go b/vendor/k8s.io/client-go/kubernetes/doc.go new file mode 100644 index 0000000000..2af84c6699 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated clientset. +package kubernetes diff --git a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go new file mode 100644 index 0000000000..d8a4b83526 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -0,0 +1,245 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + kubernetes "k8s.io/client-go/kubernetes" + appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" + fakeappsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake" + authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" + fakeauthenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1/fake" + authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" + fakeauthenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake" + authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" + fakeauthorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1/fake" + authorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" + fakeauthorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake" + autoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1" + fakeautoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake" + autoscalingv2alpha1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1" + fakeautoscalingv2alpha1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake" + batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" + fakebatchv1 "k8s.io/client-go/kubernetes/typed/batch/v1/fake" + batchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1" + fakebatchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake" + certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" + fakecertificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + fakecorev1 "k8s.io/client-go/kubernetes/typed/core/v1/fake" + extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" + fakeextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake" + policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" + fakepolicyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake" + rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" + fakerbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake" + rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" + fakerbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake" + settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" + fakesettingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake" + storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" + fakestoragev1 "k8s.io/client-go/kubernetes/typed/storage/v1/fake" + storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" + fakestoragev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(registry, scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + fakePtr := testing.Fake{} + fakePtr.AddReactor("*", "*", testing.ObjectReaction(o, registry.RESTMapper())) + + fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + + return &Clientset{fakePtr} +} + +// Clientset implements kubernetes.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return &fakediscovery.FakeDiscovery{Fake: &c.Fake} +} + +var _ kubernetes.Interface = &Clientset{} + +// CoreV1 retrieves the CoreV1Client +func (c *Clientset) CoreV1() corev1.CoreV1Interface { + return &fakecorev1.FakeCoreV1{Fake: &c.Fake} +} + +// Core retrieves the CoreV1Client +func (c *Clientset) Core() corev1.CoreV1Interface { + return &fakecorev1.FakeCoreV1{Fake: &c.Fake} +} + +// AppsV1beta1 retrieves the AppsV1beta1Client +func (c *Clientset) AppsV1beta1() appsv1beta1.AppsV1beta1Interface { + return &fakeappsv1beta1.FakeAppsV1beta1{Fake: &c.Fake} +} + +// Apps retrieves the AppsV1beta1Client +func (c *Clientset) Apps() appsv1beta1.AppsV1beta1Interface { + return &fakeappsv1beta1.FakeAppsV1beta1{Fake: &c.Fake} +} + +// AuthenticationV1 retrieves the AuthenticationV1Client +func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interface { + return &fakeauthenticationv1.FakeAuthenticationV1{Fake: &c.Fake} +} + +// Authentication retrieves the AuthenticationV1Client +func (c *Clientset) Authentication() authenticationv1.AuthenticationV1Interface { + return &fakeauthenticationv1.FakeAuthenticationV1{Fake: &c.Fake} +} + +// AuthenticationV1beta1 retrieves the AuthenticationV1beta1Client +func (c *Clientset) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface { + return &fakeauthenticationv1beta1.FakeAuthenticationV1beta1{Fake: &c.Fake} +} + +// AuthorizationV1 retrieves the AuthorizationV1Client +func (c *Clientset) AuthorizationV1() authorizationv1.AuthorizationV1Interface { + return &fakeauthorizationv1.FakeAuthorizationV1{Fake: &c.Fake} +} + +// Authorization retrieves the AuthorizationV1Client +func (c *Clientset) Authorization() authorizationv1.AuthorizationV1Interface { + return &fakeauthorizationv1.FakeAuthorizationV1{Fake: &c.Fake} +} + +// AuthorizationV1beta1 retrieves the AuthorizationV1beta1Client +func (c *Clientset) AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface { + return &fakeauthorizationv1beta1.FakeAuthorizationV1beta1{Fake: &c.Fake} +} + +// AutoscalingV1 retrieves the AutoscalingV1Client +func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { + return &fakeautoscalingv1.FakeAutoscalingV1{Fake: &c.Fake} +} + +// Autoscaling retrieves the AutoscalingV1Client +func (c *Clientset) Autoscaling() autoscalingv1.AutoscalingV1Interface { + return &fakeautoscalingv1.FakeAutoscalingV1{Fake: &c.Fake} +} + +// AutoscalingV2alpha1 retrieves the AutoscalingV2alpha1Client +func (c *Clientset) AutoscalingV2alpha1() autoscalingv2alpha1.AutoscalingV2alpha1Interface { + return &fakeautoscalingv2alpha1.FakeAutoscalingV2alpha1{Fake: &c.Fake} +} + +// BatchV1 retrieves the BatchV1Client +func (c *Clientset) BatchV1() batchv1.BatchV1Interface { + return &fakebatchv1.FakeBatchV1{Fake: &c.Fake} +} + +// Batch retrieves the BatchV1Client +func (c *Clientset) Batch() batchv1.BatchV1Interface { + return &fakebatchv1.FakeBatchV1{Fake: &c.Fake} +} + +// BatchV2alpha1 retrieves the BatchV2alpha1Client +func (c *Clientset) BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface { + return &fakebatchv2alpha1.FakeBatchV2alpha1{Fake: &c.Fake} +} + +// CertificatesV1beta1 retrieves the CertificatesV1beta1Client +func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface { + return &fakecertificatesv1beta1.FakeCertificatesV1beta1{Fake: &c.Fake} +} + +// Certificates retrieves the CertificatesV1beta1Client +func (c *Clientset) Certificates() certificatesv1beta1.CertificatesV1beta1Interface { + return &fakecertificatesv1beta1.FakeCertificatesV1beta1{Fake: &c.Fake} +} + +// ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client +func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { + return &fakeextensionsv1beta1.FakeExtensionsV1beta1{Fake: &c.Fake} +} + +// Extensions retrieves the ExtensionsV1beta1Client +func (c *Clientset) Extensions() extensionsv1beta1.ExtensionsV1beta1Interface { + return &fakeextensionsv1beta1.FakeExtensionsV1beta1{Fake: &c.Fake} +} + +// PolicyV1beta1 retrieves the PolicyV1beta1Client +func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { + return &fakepolicyv1beta1.FakePolicyV1beta1{Fake: &c.Fake} +} + +// Policy retrieves the PolicyV1beta1Client +func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface { + return &fakepolicyv1beta1.FakePolicyV1beta1{Fake: &c.Fake} +} + +// RbacV1beta1 retrieves the RbacV1beta1Client +func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface { + return &fakerbacv1beta1.FakeRbacV1beta1{Fake: &c.Fake} +} + +// Rbac retrieves the RbacV1beta1Client +func (c *Clientset) Rbac() rbacv1beta1.RbacV1beta1Interface { + return &fakerbacv1beta1.FakeRbacV1beta1{Fake: &c.Fake} +} + +// RbacV1alpha1 retrieves the RbacV1alpha1Client +func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { + return &fakerbacv1alpha1.FakeRbacV1alpha1{Fake: &c.Fake} +} + +// SettingsV1alpha1 retrieves the SettingsV1alpha1Client +func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface { + return &fakesettingsv1alpha1.FakeSettingsV1alpha1{Fake: &c.Fake} +} + +// Settings retrieves the SettingsV1alpha1Client +func (c *Clientset) Settings() settingsv1alpha1.SettingsV1alpha1Interface { + return &fakesettingsv1alpha1.FakeSettingsV1alpha1{Fake: &c.Fake} +} + +// StorageV1beta1 retrieves the StorageV1beta1Client +func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { + return &fakestoragev1beta1.FakeStorageV1beta1{Fake: &c.Fake} +} + +// StorageV1 retrieves the StorageV1Client +func (c *Clientset) StorageV1() storagev1.StorageV1Interface { + return &fakestoragev1.FakeStorageV1{Fake: &c.Fake} +} + +// Storage retrieves the StorageV1Client +func (c *Clientset) Storage() storagev1.StorageV1Interface { + return &fakestoragev1.FakeStorageV1{Fake: &c.Fake} +} diff --git a/vendor/k8s.io/client-go/kubernetes/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/fake/doc.go new file mode 100644 index 0000000000..5f565b3c8d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/fake/register.go b/vendor/k8s.io/client-go/kubernetes/fake/register.go new file mode 100644 index 0000000000..3ad666057c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/fake/register.go @@ -0,0 +1,68 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + announced "k8s.io/apimachinery/pkg/apimachinery/announced" + registered "k8s.io/apimachinery/pkg/apimachinery/registered" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + core "k8s.io/client-go/pkg/api/install" + apps "k8s.io/client-go/pkg/apis/apps/install" + authentication "k8s.io/client-go/pkg/apis/authentication/install" + authorization "k8s.io/client-go/pkg/apis/authorization/install" + autoscaling "k8s.io/client-go/pkg/apis/autoscaling/install" + batch "k8s.io/client-go/pkg/apis/batch/install" + certificates "k8s.io/client-go/pkg/apis/certificates/install" + extensions "k8s.io/client-go/pkg/apis/extensions/install" + policy "k8s.io/client-go/pkg/apis/policy/install" + rbac "k8s.io/client-go/pkg/apis/rbac/install" + settings "k8s.io/client-go/pkg/apis/settings/install" + storage "k8s.io/client-go/pkg/apis/storage/install" + os "os" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) +var parameterCodec = runtime.NewParameterCodec(scheme) + +var registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS")) +var groupFactoryRegistry = make(announced.APIGroupFactoryRegistry) + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + Install(groupFactoryRegistry, registry, scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + core.Install(groupFactoryRegistry, registry, scheme) + apps.Install(groupFactoryRegistry, registry, scheme) + authentication.Install(groupFactoryRegistry, registry, scheme) + authorization.Install(groupFactoryRegistry, registry, scheme) + autoscaling.Install(groupFactoryRegistry, registry, scheme) + batch.Install(groupFactoryRegistry, registry, scheme) + certificates.Install(groupFactoryRegistry, registry, scheme) + extensions.Install(groupFactoryRegistry, registry, scheme) + policy.Install(groupFactoryRegistry, registry, scheme) + rbac.Install(groupFactoryRegistry, registry, scheme) + settings.Install(groupFactoryRegistry, registry, scheme) + storage.Install(groupFactoryRegistry, registry, scheme) + +} diff --git a/vendor/k8s.io/client-go/kubernetes/import_known_versions.go b/vendor/k8s.io/client-go/kubernetes/import_known_versions.go new file mode 100644 index 0000000000..297466b01a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/import_known_versions.go @@ -0,0 +1,42 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubernetes + +// These imports are the API groups the client will support. +import ( + "fmt" + + "k8s.io/client-go/pkg/api" + _ "k8s.io/client-go/pkg/api/install" + _ "k8s.io/client-go/pkg/apis/apps/install" + _ "k8s.io/client-go/pkg/apis/authentication/install" + _ "k8s.io/client-go/pkg/apis/authorization/install" + _ "k8s.io/client-go/pkg/apis/autoscaling/install" + _ "k8s.io/client-go/pkg/apis/batch/install" + _ "k8s.io/client-go/pkg/apis/certificates/install" + _ "k8s.io/client-go/pkg/apis/extensions/install" + _ "k8s.io/client-go/pkg/apis/policy/install" + _ "k8s.io/client-go/pkg/apis/rbac/install" + _ "k8s.io/client-go/pkg/apis/settings/install" + _ "k8s.io/client-go/pkg/apis/storage/install" +) + +func init() { + if missingVersions := api.Registry.ValidateEnvRequestedVersions(); len(missingVersions) != 0 { + panic(fmt.Sprintf("KUBE_API_VERSIONS contains versions that are not installed: %q.", missingVersions)) + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/doc.go b/vendor/k8s.io/client-go/kubernetes/scheme/doc.go new file mode 100644 index 0000000000..5d8ec824f0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go new file mode 100644 index 0000000000..1fe5dbe4ce --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + corev1 "k8s.io/client-go/pkg/api/v1" + appsv1beta1 "k8s.io/client-go/pkg/apis/apps/v1beta1" + authenticationv1 "k8s.io/client-go/pkg/apis/authentication/v1" + authenticationv1beta1 "k8s.io/client-go/pkg/apis/authentication/v1beta1" + authorizationv1 "k8s.io/client-go/pkg/apis/authorization/v1" + authorizationv1beta1 "k8s.io/client-go/pkg/apis/authorization/v1beta1" + autoscalingv1 "k8s.io/client-go/pkg/apis/autoscaling/v1" + autoscalingv2alpha1 "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1" + batchv1 "k8s.io/client-go/pkg/apis/batch/v1" + batchv2alpha1 "k8s.io/client-go/pkg/apis/batch/v2alpha1" + certificatesv1beta1 "k8s.io/client-go/pkg/apis/certificates/v1beta1" + extensionsv1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + policyv1beta1 "k8s.io/client-go/pkg/apis/policy/v1beta1" + rbacv1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" + rbacv1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" + settingsv1alpha1 "k8s.io/client-go/pkg/apis/settings/v1alpha1" + storagev1 "k8s.io/client-go/pkg/apis/storage/v1" + storagev1beta1 "k8s.io/client-go/pkg/apis/storage/v1beta1" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + AddToScheme(Scheme) +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kuberentes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +func AddToScheme(scheme *runtime.Scheme) { + corev1.AddToScheme(scheme) + appsv1beta1.AddToScheme(scheme) + authenticationv1.AddToScheme(scheme) + authenticationv1beta1.AddToScheme(scheme) + authorizationv1.AddToScheme(scheme) + authorizationv1beta1.AddToScheme(scheme) + autoscalingv1.AddToScheme(scheme) + autoscalingv2alpha1.AddToScheme(scheme) + batchv1.AddToScheme(scheme) + batchv2alpha1.AddToScheme(scheme) + certificatesv1beta1.AddToScheme(scheme) + extensionsv1beta1.AddToScheme(scheme) + policyv1beta1.AddToScheme(scheme) + rbacv1beta1.AddToScheme(scheme) + rbacv1alpha1.AddToScheme(scheme) + settingsv1alpha1.AddToScheme(scheme) + storagev1beta1.AddToScheme(scheme) + storagev1.AddToScheme(scheme) + +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go new file mode 100644 index 0000000000..a520f87fce --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go @@ -0,0 +1,98 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/apps/v1beta1" + rest "k8s.io/client-go/rest" +) + +type AppsV1beta1Interface interface { + RESTClient() rest.Interface + DeploymentsGetter + ScalesGetter + StatefulSetsGetter +} + +// AppsV1beta1Client is used to interact with features provided by the apps group. +type AppsV1beta1Client struct { + restClient rest.Interface +} + +func (c *AppsV1beta1Client) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *AppsV1beta1Client) Scales(namespace string) ScaleInterface { + return newScales(c, namespace) +} + +func (c *AppsV1beta1Client) StatefulSets(namespace string) StatefulSetInterface { + return newStatefulSets(c, namespace) +} + +// NewForConfig creates a new AppsV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AppsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AppsV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new AppsV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AppsV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AppsV1beta1Client for the given RESTClient. +func New(c rest.Interface) *AppsV1beta1Client { + return &AppsV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AppsV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go new file mode 100644 index 0000000000..b30965ee78 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/apps/v1beta1" + rest "k8s.io/client-go/rest" +) + +// DeploymentsGetter has a method to return a DeploymentInterface. +// A group's client should implement this interface. +type DeploymentsGetter interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + Create(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Update(*v1beta1.Deployment) (*v1beta1.Deployment, error) + UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Deployment, error) + List(opts v1.ListOptions) (*v1beta1.DeploymentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) + DeploymentExpansion +} + +// deployments implements DeploymentInterface +type deployments struct { + client rest.Interface + ns string +} + +// newDeployments returns a Deployments +func newDeployments(c *AppsV1beta1Client, namespace string) *deployments { + return &deployments{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + result = &v1beta1.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched deployment. +func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("deployments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go new file mode 100644 index 0000000000..11b5238972 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go new file mode 100644 index 0000000000..c6548330a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go new file mode 100644 index 0000000000..2a0bede6da --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeAppsV1beta1 struct { + *testing.Fake +} + +func (c *FakeAppsV1beta1) Deployments(namespace string) v1beta1.DeploymentInterface { + return &FakeDeployments{c, namespace} +} + +func (c *FakeAppsV1beta1) Scales(namespace string) v1beta1.ScaleInterface { + return &FakeScales{c, namespace} +} + +func (c *FakeAppsV1beta1) StatefulSets(namespace string) v1beta1.StatefulSetInterface { + return &FakeStatefulSets{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAppsV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go new file mode 100644 index 0000000000..a929ac8b05 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1beta1 "k8s.io/client-go/pkg/apis/apps/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeDeployments implements DeploymentInterface +type FakeDeployments struct { + Fake *FakeAppsV1beta1 + ns string +} + +var deploymentsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta1", Resource: "deployments"} + +func (c *FakeDeployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +func (c *FakeDeployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +func (c *FakeDeployments) UpdateStatus(deployment *v1beta1.Deployment) (*v1beta1.Deployment, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) + + return err +} + +func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{}) + return err +} + +func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +func (c *FakeDeployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(deploymentsResource, c.ns, opts), &v1beta1.DeploymentList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.DeploymentList{} + for _, item := range obj.(*v1beta1.DeploymentList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *FakeDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched deployment. +func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, data, subresources...), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go new file mode 100644 index 0000000000..8abb429acc --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +// FakeScales implements ScaleInterface +type FakeScales struct { + Fake *FakeAppsV1beta1 + ns string +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go new file mode 100644 index 0000000000..430e4a224a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1beta1 "k8s.io/client-go/pkg/apis/apps/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeStatefulSets implements StatefulSetInterface +type FakeStatefulSets struct { + Fake *FakeAppsV1beta1 + ns string +} + +var statefulsetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta1", Resource: "statefulsets"} + +func (c *FakeStatefulSets) Create(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.StatefulSet), err +} + +func (c *FakeStatefulSets) Update(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.StatefulSet), err +} + +func (c *FakeStatefulSets) UpdateStatus(statefulSet *v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1beta1.StatefulSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.StatefulSet), err +} + +func (c *FakeStatefulSets) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(statefulsetsResource, c.ns, name), &v1beta1.StatefulSet{}) + + return err +} + +func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.StatefulSetList{}) + return err +} + +func (c *FakeStatefulSets) Get(name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1beta1.StatefulSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.StatefulSet), err +} + +func (c *FakeStatefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(statefulsetsResource, c.ns, opts), &v1beta1.StatefulSetList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.StatefulSetList{} + for _, item := range obj.(*v1beta1.StatefulSetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested statefulSets. +func (c *FakeStatefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched statefulSet. +func (c *FakeStatefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, data, subresources...), &v1beta1.StatefulSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.StatefulSet), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..deca5c8663 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type DeploymentExpansion interface{} + +type ScaleExpansion interface{} + +type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go new file mode 100644 index 0000000000..d3bf9e1032 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// ScalesGetter has a method to return a ScaleInterface. +// A group's client should implement this interface. +type ScalesGetter interface { + Scales(namespace string) ScaleInterface +} + +// ScaleInterface has methods to work with Scale resources. +type ScaleInterface interface { + ScaleExpansion +} + +// scales implements ScaleInterface +type scales struct { + client rest.Interface + ns string +} + +// newScales returns a Scales +func newScales(c *AppsV1beta1Client, namespace string) *scales { + return &scales{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go new file mode 100644 index 0000000000..8b5a7822fc --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/apps/v1beta1" + rest "k8s.io/client-go/rest" +) + +// StatefulSetsGetter has a method to return a StatefulSetInterface. +// A group's client should implement this interface. +type StatefulSetsGetter interface { + StatefulSets(namespace string) StatefulSetInterface +} + +// StatefulSetInterface has methods to work with StatefulSet resources. +type StatefulSetInterface interface { + Create(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) + Update(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) + UpdateStatus(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.StatefulSet, error) + List(opts v1.ListOptions) (*v1beta1.StatefulSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) + StatefulSetExpansion +} + +// statefulSets implements StatefulSetInterface +type statefulSets struct { + client rest.Interface + ns string +} + +// newStatefulSets returns a StatefulSets +func newStatefulSets(c *AppsV1beta1Client, namespace string) *statefulSets { + return &statefulSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Create(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("statefulsets"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Update(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + Body(statefulSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *statefulSets) UpdateStatus(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + SubResource("status"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. +func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. +func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. +func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { + result = &v1beta1.StatefulSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested statefulSets. +func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched statefulSet. +func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("statefulsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go new file mode 100644 index 0000000000..bd53fc2498 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/apis/authentication/v1" + rest "k8s.io/client-go/rest" +) + +type AuthenticationV1Interface interface { + RESTClient() rest.Interface + TokenReviewsGetter +} + +// AuthenticationV1Client is used to interact with features provided by the authentication.k8s.io group. +type AuthenticationV1Client struct { + restClient rest.Interface +} + +func (c *AuthenticationV1Client) TokenReviews() TokenReviewInterface { + return newTokenReviews(c) +} + +// NewForConfig creates a new AuthenticationV1Client for the given config. +func NewForConfig(c *rest.Config) (*AuthenticationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuthenticationV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthenticationV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthenticationV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthenticationV1Client for the given RESTClient. +func New(c rest.Interface) *AuthenticationV1Client { + return &AuthenticationV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthenticationV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go new file mode 100644 index 0000000000..54673bfa73 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go new file mode 100644 index 0000000000..c6548330a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go new file mode 100644 index 0000000000..85eb00ddfd --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go @@ -0,0 +1,38 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/client-go/kubernetes/typed/authentication/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeAuthenticationV1 struct { + *testing.Fake +} + +func (c *FakeAuthenticationV1) TokenReviews() v1.TokenReviewInterface { + return &FakeTokenReviews{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAuthenticationV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go new file mode 100644 index 0000000000..b1c527a711 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +// FakeTokenReviews implements TokenReviewInterface +type FakeTokenReviews struct { + Fake *FakeAuthenticationV1 +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go new file mode 100644 index 0000000000..7bd5245980 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + authenticationapi "k8s.io/client-go/pkg/apis/authentication/v1" + core "k8s.io/client-go/testing" +) + +func (c *FakeTokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { + obj, err := c.Fake.Invokes(core.NewRootCreateAction(authenticationapi.SchemeGroupVersion.WithResource("tokenreviews"), tokenReview), &authenticationapi.TokenReview{}) + return obj.(*authenticationapi.TokenReview), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go new file mode 100644 index 0000000000..42e76d5e43 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go @@ -0,0 +1,17 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go new file mode 100644 index 0000000000..9cfef4e6ac --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + rest "k8s.io/client-go/rest" +) + +// TokenReviewsGetter has a method to return a TokenReviewInterface. +// A group's client should implement this interface. +type TokenReviewsGetter interface { + TokenReviews() TokenReviewInterface +} + +// TokenReviewInterface has methods to work with TokenReview resources. +type TokenReviewInterface interface { + TokenReviewExpansion +} + +// tokenReviews implements TokenReviewInterface +type tokenReviews struct { + client rest.Interface +} + +// newTokenReviews returns a TokenReviews +func newTokenReviews(c *AuthenticationV1Client) *tokenReviews { + return &tokenReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go new file mode 100644 index 0000000000..fb41782fe5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go @@ -0,0 +1,35 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authenticationapi "k8s.io/client-go/pkg/apis/authentication/v1" +) + +type TokenReviewExpansion interface { + Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) +} + +func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { + result = &authenticationapi.TokenReview{} + err = c.client.Post(). + Resource("tokenreviews"). + Body(tokenReview). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go new file mode 100644 index 0000000000..419dc2cb67 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/authentication/v1beta1" + rest "k8s.io/client-go/rest" +) + +type AuthenticationV1beta1Interface interface { + RESTClient() rest.Interface + TokenReviewsGetter +} + +// AuthenticationV1beta1Client is used to interact with features provided by the authentication.k8s.io group. +type AuthenticationV1beta1Client struct { + restClient rest.Interface +} + +func (c *AuthenticationV1beta1Client) TokenReviews() TokenReviewInterface { + return newTokenReviews(c) +} + +// NewForConfig creates a new AuthenticationV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AuthenticationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuthenticationV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthenticationV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthenticationV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthenticationV1beta1Client for the given RESTClient. +func New(c rest.Interface) *AuthenticationV1beta1Client { + return &AuthenticationV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthenticationV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go new file mode 100644 index 0000000000..11b5238972 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go new file mode 100644 index 0000000000..c6548330a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go new file mode 100644 index 0000000000..8f66d8c5b1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go @@ -0,0 +1,38 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeAuthenticationV1beta1 struct { + *testing.Fake +} + +func (c *FakeAuthenticationV1beta1) TokenReviews() v1beta1.TokenReviewInterface { + return &FakeTokenReviews{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAuthenticationV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go new file mode 100644 index 0000000000..e8c57210b3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +// FakeTokenReviews implements TokenReviewInterface +type FakeTokenReviews struct { + Fake *FakeAuthenticationV1beta1 +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go new file mode 100644 index 0000000000..e27e092907 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + authenticationapi "k8s.io/client-go/pkg/apis/authentication/v1beta1" + core "k8s.io/client-go/testing" +) + +func (c *FakeTokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { + obj, err := c.Fake.Invokes(core.NewRootCreateAction(authenticationapi.SchemeGroupVersion.WithResource("tokenreviews"), tokenReview), &authenticationapi.TokenReview{}) + return obj.(*authenticationapi.TokenReview), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..2b7e8ca0bf --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go @@ -0,0 +1,17 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go new file mode 100644 index 0000000000..7f9f1e9fa0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// TokenReviewsGetter has a method to return a TokenReviewInterface. +// A group's client should implement this interface. +type TokenReviewsGetter interface { + TokenReviews() TokenReviewInterface +} + +// TokenReviewInterface has methods to work with TokenReview resources. +type TokenReviewInterface interface { + TokenReviewExpansion +} + +// tokenReviews implements TokenReviewInterface +type tokenReviews struct { + client rest.Interface +} + +// newTokenReviews returns a TokenReviews +func newTokenReviews(c *AuthenticationV1beta1Client) *tokenReviews { + return &tokenReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go new file mode 100644 index 0000000000..375b6f637c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go @@ -0,0 +1,35 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authenticationapi "k8s.io/client-go/pkg/apis/authentication/v1beta1" +) + +type TokenReviewExpansion interface { + Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) +} + +func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { + result = &authenticationapi.TokenReview{} + err = c.client.Post(). + Resource("tokenreviews"). + Body(tokenReview). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go new file mode 100644 index 0000000000..af2924a300 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go @@ -0,0 +1,98 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/apis/authorization/v1" + rest "k8s.io/client-go/rest" +) + +type AuthorizationV1Interface interface { + RESTClient() rest.Interface + LocalSubjectAccessReviewsGetter + SelfSubjectAccessReviewsGetter + SubjectAccessReviewsGetter +} + +// AuthorizationV1Client is used to interact with features provided by the authorization.k8s.io group. +type AuthorizationV1Client struct { + restClient rest.Interface +} + +func (c *AuthorizationV1Client) LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface { + return newLocalSubjectAccessReviews(c, namespace) +} + +func (c *AuthorizationV1Client) SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface { + return newSelfSubjectAccessReviews(c) +} + +func (c *AuthorizationV1Client) SubjectAccessReviews() SubjectAccessReviewInterface { + return newSubjectAccessReviews(c) +} + +// NewForConfig creates a new AuthorizationV1Client for the given config. +func NewForConfig(c *rest.Config) (*AuthorizationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuthorizationV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthorizationV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthorizationV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthorizationV1Client for the given RESTClient. +func New(c rest.Interface) *AuthorizationV1Client { + return &AuthorizationV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthorizationV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go new file mode 100644 index 0000000000..54673bfa73 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go new file mode 100644 index 0000000000..c6548330a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go new file mode 100644 index 0000000000..5107d5fc8b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/client-go/kubernetes/typed/authorization/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeAuthorizationV1 struct { + *testing.Fake +} + +func (c *FakeAuthorizationV1) LocalSubjectAccessReviews(namespace string) v1.LocalSubjectAccessReviewInterface { + return &FakeLocalSubjectAccessReviews{c, namespace} +} + +func (c *FakeAuthorizationV1) SelfSubjectAccessReviews() v1.SelfSubjectAccessReviewInterface { + return &FakeSelfSubjectAccessReviews{c} +} + +func (c *FakeAuthorizationV1) SubjectAccessReviews() v1.SubjectAccessReviewInterface { + return &FakeSubjectAccessReviews{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAuthorizationV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go new file mode 100644 index 0000000000..a49fc9b729 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +// FakeLocalSubjectAccessReviews implements LocalSubjectAccessReviewInterface +type FakeLocalSubjectAccessReviews struct { + Fake *FakeAuthorizationV1 + ns string +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go new file mode 100644 index 0000000000..e587d20328 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1" + core "k8s.io/client-go/testing" +) + +func (c *FakeLocalSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { + obj, err := c.Fake.Invokes(core.NewCreateAction(authorizationapi.SchemeGroupVersion.WithResource("localsubjectaccessreviews"), c.ns, sar), &authorizationapi.SubjectAccessReview{}) + return obj.(*authorizationapi.LocalSubjectAccessReview), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go new file mode 100644 index 0000000000..26d9011b4c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +// FakeSelfSubjectAccessReviews implements SelfSubjectAccessReviewInterface +type FakeSelfSubjectAccessReviews struct { + Fake *FakeAuthorizationV1 +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go new file mode 100644 index 0000000000..e0bbcb1efc --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1" + core "k8s.io/client-go/testing" +) + +func (c *FakeSelfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { + obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("selfsubjectaccessreviews"), sar), &authorizationapi.SelfSubjectAccessReview{}) + return obj.(*authorizationapi.SelfSubjectAccessReview), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go new file mode 100644 index 0000000000..778d06e593 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +// FakeSubjectAccessReviews implements SubjectAccessReviewInterface +type FakeSubjectAccessReviews struct { + Fake *FakeAuthorizationV1 +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go new file mode 100644 index 0000000000..823d1542c3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1" + core "k8s.io/client-go/testing" +) + +func (c *FakeSubjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { + obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("subjectaccessreviews"), sar), &authorizationapi.SubjectAccessReview{}) + return obj.(*authorizationapi.SubjectAccessReview), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go new file mode 100644 index 0000000000..42e76d5e43 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go @@ -0,0 +1,17 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go new file mode 100644 index 0000000000..b2085bceb2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + rest "k8s.io/client-go/rest" +) + +// LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. +// A group's client should implement this interface. +type LocalSubjectAccessReviewsGetter interface { + LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface +} + +// LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. +type LocalSubjectAccessReviewInterface interface { + LocalSubjectAccessReviewExpansion +} + +// localSubjectAccessReviews implements LocalSubjectAccessReviewInterface +type localSubjectAccessReviews struct { + client rest.Interface + ns string +} + +// newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews +func newLocalSubjectAccessReviews(c *AuthorizationV1Client, namespace string) *localSubjectAccessReviews { + return &localSubjectAccessReviews{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go new file mode 100644 index 0000000000..c3b487c72c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go @@ -0,0 +1,36 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1" +) + +type LocalSubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) +} + +func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { + result = &authorizationapi.LocalSubjectAccessReview{} + err = c.client.Post(). + Namespace(c.ns). + Resource("localsubjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go new file mode 100644 index 0000000000..cfb019eaaf --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. +// A group's client should implement this interface. +type SelfSubjectAccessReviewsGetter interface { + SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface +} + +// SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. +type SelfSubjectAccessReviewInterface interface { + SelfSubjectAccessReviewExpansion +} + +// selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface +type selfSubjectAccessReviews struct { + client rest.Interface +} + +// newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews +func newSelfSubjectAccessReviews(c *AuthorizationV1Client) *selfSubjectAccessReviews { + return &selfSubjectAccessReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go new file mode 100644 index 0000000000..1076150801 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go @@ -0,0 +1,35 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1" +) + +type SelfSubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) +} + +func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { + result = &authorizationapi.SelfSubjectAccessReview{} + err = c.client.Post(). + Resource("selfsubjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go new file mode 100644 index 0000000000..08f6d60952 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. +// A group's client should implement this interface. +type SubjectAccessReviewsGetter interface { + SubjectAccessReviews() SubjectAccessReviewInterface +} + +// SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. +type SubjectAccessReviewInterface interface { + SubjectAccessReviewExpansion +} + +// subjectAccessReviews implements SubjectAccessReviewInterface +type subjectAccessReviews struct { + client rest.Interface +} + +// newSubjectAccessReviews returns a SubjectAccessReviews +func newSubjectAccessReviews(c *AuthorizationV1Client) *subjectAccessReviews { + return &subjectAccessReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go new file mode 100644 index 0000000000..dfdf6521ae --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go @@ -0,0 +1,36 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1" +) + +// The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface. +type SubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) +} + +func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { + result = &authorizationapi.SubjectAccessReview{} + err = c.client.Post(). + Resource("subjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go new file mode 100644 index 0000000000..b49b3b30b6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go @@ -0,0 +1,98 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/authorization/v1beta1" + rest "k8s.io/client-go/rest" +) + +type AuthorizationV1beta1Interface interface { + RESTClient() rest.Interface + LocalSubjectAccessReviewsGetter + SelfSubjectAccessReviewsGetter + SubjectAccessReviewsGetter +} + +// AuthorizationV1beta1Client is used to interact with features provided by the authorization.k8s.io group. +type AuthorizationV1beta1Client struct { + restClient rest.Interface +} + +func (c *AuthorizationV1beta1Client) LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface { + return newLocalSubjectAccessReviews(c, namespace) +} + +func (c *AuthorizationV1beta1Client) SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface { + return newSelfSubjectAccessReviews(c) +} + +func (c *AuthorizationV1beta1Client) SubjectAccessReviews() SubjectAccessReviewInterface { + return newSubjectAccessReviews(c) +} + +// NewForConfig creates a new AuthorizationV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AuthorizationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuthorizationV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthorizationV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthorizationV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthorizationV1beta1Client for the given RESTClient. +func New(c rest.Interface) *AuthorizationV1beta1Client { + return &AuthorizationV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthorizationV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go new file mode 100644 index 0000000000..11b5238972 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go new file mode 100644 index 0000000000..c6548330a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go new file mode 100644 index 0000000000..89b0b2d789 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeAuthorizationV1beta1 struct { + *testing.Fake +} + +func (c *FakeAuthorizationV1beta1) LocalSubjectAccessReviews(namespace string) v1beta1.LocalSubjectAccessReviewInterface { + return &FakeLocalSubjectAccessReviews{c, namespace} +} + +func (c *FakeAuthorizationV1beta1) SelfSubjectAccessReviews() v1beta1.SelfSubjectAccessReviewInterface { + return &FakeSelfSubjectAccessReviews{c} +} + +func (c *FakeAuthorizationV1beta1) SubjectAccessReviews() v1beta1.SubjectAccessReviewInterface { + return &FakeSubjectAccessReviews{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAuthorizationV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/fake_generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_generated_expansion.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/fake_generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_generated_expansion.go diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go new file mode 100644 index 0000000000..11987f1256 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +// FakeLocalSubjectAccessReviews implements LocalSubjectAccessReviewInterface +type FakeLocalSubjectAccessReviews struct { + Fake *FakeAuthorizationV1beta1 + ns string +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go new file mode 100644 index 0000000000..0db666db02 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1beta1" + core "k8s.io/client-go/testing" +) + +func (c *FakeLocalSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { + obj, err := c.Fake.Invokes(core.NewCreateAction(authorizationapi.SchemeGroupVersion.WithResource("localsubjectaccessreviews"), c.ns, sar), &authorizationapi.SubjectAccessReview{}) + return obj.(*authorizationapi.LocalSubjectAccessReview), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go new file mode 100644 index 0000000000..aeba438953 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +// FakeSelfSubjectAccessReviews implements SelfSubjectAccessReviewInterface +type FakeSelfSubjectAccessReviews struct { + Fake *FakeAuthorizationV1beta1 +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go new file mode 100644 index 0000000000..d1f7756380 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1beta1" + core "k8s.io/client-go/testing" +) + +func (c *FakeSelfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { + obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("selfsubjectaccessreviews"), sar), &authorizationapi.SelfSubjectAccessReview{}) + return obj.(*authorizationapi.SelfSubjectAccessReview), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go new file mode 100644 index 0000000000..4413c6d34d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +// FakeSubjectAccessReviews implements SubjectAccessReviewInterface +type FakeSubjectAccessReviews struct { + Fake *FakeAuthorizationV1beta1 +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go new file mode 100644 index 0000000000..58ced8eca6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1beta1" + core "k8s.io/client-go/testing" +) + +func (c *FakeSubjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { + obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("subjectaccessreviews"), sar), &authorizationapi.SubjectAccessReview{}) + return obj.(*authorizationapi.SubjectAccessReview), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..2b7e8ca0bf --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go @@ -0,0 +1,17 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go new file mode 100644 index 0000000000..9b8e103419 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. +// A group's client should implement this interface. +type LocalSubjectAccessReviewsGetter interface { + LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface +} + +// LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. +type LocalSubjectAccessReviewInterface interface { + LocalSubjectAccessReviewExpansion +} + +// localSubjectAccessReviews implements LocalSubjectAccessReviewInterface +type localSubjectAccessReviews struct { + client rest.Interface + ns string +} + +// newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews +func newLocalSubjectAccessReviews(c *AuthorizationV1beta1Client, namespace string) *localSubjectAccessReviews { + return &localSubjectAccessReviews{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go new file mode 100644 index 0000000000..d2ce4f0d7f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go @@ -0,0 +1,36 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1beta1" +) + +type LocalSubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) +} + +func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { + result = &authorizationapi.LocalSubjectAccessReview{} + err = c.client.Post(). + Namespace(c.ns). + Resource("localsubjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go new file mode 100644 index 0000000000..1ef3e49afe --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. +// A group's client should implement this interface. +type SelfSubjectAccessReviewsGetter interface { + SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface +} + +// SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. +type SelfSubjectAccessReviewInterface interface { + SelfSubjectAccessReviewExpansion +} + +// selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface +type selfSubjectAccessReviews struct { + client rest.Interface +} + +// newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews +func newSelfSubjectAccessReviews(c *AuthorizationV1beta1Client) *selfSubjectAccessReviews { + return &selfSubjectAccessReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go new file mode 100644 index 0000000000..d341eb14a5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go @@ -0,0 +1,35 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1beta1" +) + +type SelfSubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) +} + +func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { + result = &authorizationapi.SelfSubjectAccessReview{} + err = c.client.Post(). + Resource("selfsubjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go new file mode 100644 index 0000000000..cd60e9df6b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. +// A group's client should implement this interface. +type SubjectAccessReviewsGetter interface { + SubjectAccessReviews() SubjectAccessReviewInterface +} + +// SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. +type SubjectAccessReviewInterface interface { + SubjectAccessReviewExpansion +} + +// subjectAccessReviews implements SubjectAccessReviewInterface +type subjectAccessReviews struct { + client rest.Interface +} + +// newSubjectAccessReviews returns a SubjectAccessReviews +func newSubjectAccessReviews(c *AuthorizationV1beta1Client) *subjectAccessReviews { + return &subjectAccessReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go new file mode 100644 index 0000000000..8d03b0811a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go @@ -0,0 +1,36 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1beta1" +) + +// The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface. +type SubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) +} + +func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { + result = &authorizationapi.SubjectAccessReview{} + err = c.client.Post(). + Resource("subjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go new file mode 100644 index 0000000000..b235891c91 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/apis/autoscaling/v1" + rest "k8s.io/client-go/rest" +) + +type AutoscalingV1Interface interface { + RESTClient() rest.Interface + HorizontalPodAutoscalersGetter +} + +// AutoscalingV1Client is used to interact with features provided by the autoscaling group. +type AutoscalingV1Client struct { + restClient rest.Interface +} + +func (c *AutoscalingV1Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { + return newHorizontalPodAutoscalers(c, namespace) +} + +// NewForConfig creates a new AutoscalingV1Client for the given config. +func NewForConfig(c *rest.Config) (*AutoscalingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AutoscalingV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AutoscalingV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AutoscalingV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AutoscalingV1Client for the given RESTClient. +func New(c rest.Interface) *AutoscalingV1Client { + return &AutoscalingV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AutoscalingV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go new file mode 100644 index 0000000000..54673bfa73 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go new file mode 100644 index 0000000000..c6548330a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go new file mode 100644 index 0000000000..c3e0d37349 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go @@ -0,0 +1,38 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeAutoscalingV1 struct { + *testing.Fake +} + +func (c *FakeAutoscalingV1) HorizontalPodAutoscalers(namespace string) v1.HorizontalPodAutoscalerInterface { + return &FakeHorizontalPodAutoscalers{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAutoscalingV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go new file mode 100644 index 0000000000..66ef7be49a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/pkg/apis/autoscaling/v1" + testing "k8s.io/client-go/testing" +) + +// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type FakeHorizontalPodAutoscalers struct { + Fake *FakeAutoscalingV1 + ns string +} + +var horizontalpodautoscalersResource = schema.GroupVersionResource{Group: "autoscaling", Version: "v1", Resource: "horizontalpodautoscalers"} + +func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *meta_v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &v1.HorizontalPodAutoscaler{}) + + return err +} + +func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.HorizontalPodAutoscalerList{}) + return err +} + +func (c *FakeHorizontalPodAutoscalers) Get(name string, options meta_v1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) List(opts meta_v1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(horizontalpodautoscalersResource, c.ns, opts), &v1.HorizontalPodAutoscalerList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.HorizontalPodAutoscalerList{} + for _, item := range obj.(*v1.HorizontalPodAutoscalerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *FakeHorizontalPodAutoscalers) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched horizontalPodAutoscaler. +func (c *FakeHorizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, data, subresources...), &v1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.HorizontalPodAutoscaler), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go new file mode 100644 index 0000000000..effefbd50b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go new file mode 100644 index 0000000000..f9c790af22 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/apis/autoscaling/v1" + rest "k8s.io/client-go/rest" +) + +// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. +// A group's client should implement this interface. +type HorizontalPodAutoscalersGetter interface { + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface +} + +// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. +type HorizontalPodAutoscalerInterface interface { + Create(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) + Update(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) + UpdateStatus(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.HorizontalPodAutoscaler, error) + List(opts meta_v1.ListOptions) (*v1.HorizontalPodAutoscalerList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) + HorizontalPodAutoscalerExpansion +} + +// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type horizontalPodAutoscalers struct { + client rest.Interface + ns string +} + +// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers +func newHorizontalPodAutoscalers(c *AutoscalingV1Client, namespace string) *horizontalPodAutoscalers { + return &horizontalPodAutoscalers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Post(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + SubResource("status"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *horizontalPodAutoscalers) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *horizontalPodAutoscalers) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *horizontalPodAutoscalers) Get(name string, options meta_v1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *horizontalPodAutoscalers) List(opts meta_v1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { + result = &v1.HorizontalPodAutoscalerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *horizontalPodAutoscalers) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched horizontalPodAutoscaler. +func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/autoscaling_client.go new file mode 100644 index 0000000000..0d16aa3e97 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/autoscaling_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v2alpha1 "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1" + rest "k8s.io/client-go/rest" +) + +type AutoscalingV2alpha1Interface interface { + RESTClient() rest.Interface + HorizontalPodAutoscalersGetter +} + +// AutoscalingV2alpha1Client is used to interact with features provided by the autoscaling group. +type AutoscalingV2alpha1Client struct { + restClient rest.Interface +} + +func (c *AutoscalingV2alpha1Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { + return newHorizontalPodAutoscalers(c, namespace) +} + +// NewForConfig creates a new AutoscalingV2alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*AutoscalingV2alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AutoscalingV2alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new AutoscalingV2alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AutoscalingV2alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AutoscalingV2alpha1Client for the given RESTClient. +func New(c rest.Interface) *AutoscalingV2alpha1Client { + return &AutoscalingV2alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v2alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AutoscalingV2alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/doc.go new file mode 100644 index 0000000000..d29bd3f4e1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v2alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/doc.go new file mode 100644 index 0000000000..c6548330a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/fake_autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/fake_autoscaling_client.go new file mode 100644 index 0000000000..d9518a7180 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/fake_autoscaling_client.go @@ -0,0 +1,38 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v2alpha1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeAutoscalingV2alpha1 struct { + *testing.Fake +} + +func (c *FakeAutoscalingV2alpha1) HorizontalPodAutoscalers(namespace string) v2alpha1.HorizontalPodAutoscalerInterface { + return &FakeHorizontalPodAutoscalers{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAutoscalingV2alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/fake_horizontalpodautoscaler.go new file mode 100644 index 0000000000..f47662a0e4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/fake_horizontalpodautoscaler.go @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v2alpha1 "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1" + testing "k8s.io/client-go/testing" +) + +// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type FakeHorizontalPodAutoscalers struct { + Fake *FakeAutoscalingV2alpha1 + ns string +} + +var horizontalpodautoscalersResource = schema.GroupVersionResource{Group: "autoscaling", Version: "v2alpha1", Resource: "horizontalpodautoscalers"} + +func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2alpha1.HorizontalPodAutoscaler) (result *v2alpha1.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2alpha1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2alpha1.HorizontalPodAutoscaler) (result *v2alpha1.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2alpha1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2alpha1.HorizontalPodAutoscaler) (*v2alpha1.HorizontalPodAutoscaler, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2alpha1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &v2alpha1.HorizontalPodAutoscaler{}) + + return err +} + +func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v2alpha1.HorizontalPodAutoscalerList{}) + return err +} + +func (c *FakeHorizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2alpha1.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2alpha1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2alpha1.HorizontalPodAutoscalerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(horizontalpodautoscalersResource, c.ns, opts), &v2alpha1.HorizontalPodAutoscalerList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2alpha1.HorizontalPodAutoscalerList{} + for _, item := range obj.(*v2alpha1.HorizontalPodAutoscalerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *FakeHorizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched horizontalPodAutoscaler. +func (c *FakeHorizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, data, subresources...), &v2alpha1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.HorizontalPodAutoscaler), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/generated_expansion.go new file mode 100644 index 0000000000..e40f2c5a13 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/horizontalpodautoscaler.go new file mode 100644 index 0000000000..c85bfd9f3c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/horizontalpodautoscaler.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v2alpha1 "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1" + rest "k8s.io/client-go/rest" +) + +// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. +// A group's client should implement this interface. +type HorizontalPodAutoscalersGetter interface { + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface +} + +// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. +type HorizontalPodAutoscalerInterface interface { + Create(*v2alpha1.HorizontalPodAutoscaler) (*v2alpha1.HorizontalPodAutoscaler, error) + Update(*v2alpha1.HorizontalPodAutoscaler) (*v2alpha1.HorizontalPodAutoscaler, error) + UpdateStatus(*v2alpha1.HorizontalPodAutoscaler) (*v2alpha1.HorizontalPodAutoscaler, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v2alpha1.HorizontalPodAutoscaler, error) + List(opts v1.ListOptions) (*v2alpha1.HorizontalPodAutoscalerList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.HorizontalPodAutoscaler, err error) + HorizontalPodAutoscalerExpansion +} + +// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type horizontalPodAutoscalers struct { + client rest.Interface + ns string +} + +// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers +func newHorizontalPodAutoscalers(c *AutoscalingV2alpha1Client, namespace string) *horizontalPodAutoscalers { + return &horizontalPodAutoscalers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2alpha1.HorizontalPodAutoscaler) (result *v2alpha1.HorizontalPodAutoscaler, err error) { + result = &v2alpha1.HorizontalPodAutoscaler{} + err = c.client.Post(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2alpha1.HorizontalPodAutoscaler) (result *v2alpha1.HorizontalPodAutoscaler, err error) { + result = &v2alpha1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2alpha1.HorizontalPodAutoscaler) (result *v2alpha1.HorizontalPodAutoscaler, err error) { + result = &v2alpha1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + SubResource("status"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2alpha1.HorizontalPodAutoscaler, err error) { + result = &v2alpha1.HorizontalPodAutoscaler{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2alpha1.HorizontalPodAutoscalerList, err error) { + result = &v2alpha1.HorizontalPodAutoscalerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched horizontalPodAutoscaler. +func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.HorizontalPodAutoscaler, err error) { + result = &v2alpha1.HorizontalPodAutoscaler{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go new file mode 100644 index 0000000000..c8766f5b60 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/apis/batch/v1" + rest "k8s.io/client-go/rest" +) + +type BatchV1Interface interface { + RESTClient() rest.Interface + JobsGetter +} + +// BatchV1Client is used to interact with features provided by the batch group. +type BatchV1Client struct { + restClient rest.Interface +} + +func (c *BatchV1Client) Jobs(namespace string) JobInterface { + return newJobs(c, namespace) +} + +// NewForConfig creates a new BatchV1Client for the given config. +func NewForConfig(c *rest.Config) (*BatchV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &BatchV1Client{client}, nil +} + +// NewForConfigOrDie creates a new BatchV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *BatchV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new BatchV1Client for the given RESTClient. +func New(c rest.Interface) *BatchV1Client { + return &BatchV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *BatchV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go new file mode 100644 index 0000000000..54673bfa73 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go new file mode 100644 index 0000000000..c6548330a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go new file mode 100644 index 0000000000..4e2d361e80 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go @@ -0,0 +1,38 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/client-go/kubernetes/typed/batch/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeBatchV1 struct { + *testing.Fake +} + +func (c *FakeBatchV1) Jobs(namespace string) v1.JobInterface { + return &FakeJobs{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeBatchV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go new file mode 100644 index 0000000000..e193a77654 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/pkg/apis/batch/v1" + testing "k8s.io/client-go/testing" +) + +// FakeJobs implements JobInterface +type FakeJobs struct { + Fake *FakeBatchV1 + ns string +} + +var jobsResource = schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"} + +func (c *FakeJobs) Create(job *v1.Job) (result *v1.Job, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(jobsResource, c.ns, job), &v1.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Job), err +} + +func (c *FakeJobs) Update(job *v1.Job) (result *v1.Job, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(jobsResource, c.ns, job), &v1.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Job), err +} + +func (c *FakeJobs) UpdateStatus(job *v1.Job) (*v1.Job, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(jobsResource, "status", c.ns, job), &v1.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Job), err +} + +func (c *FakeJobs) Delete(name string, options *meta_v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(jobsResource, c.ns, name), &v1.Job{}) + + return err +} + +func (c *FakeJobs) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(jobsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.JobList{}) + return err +} + +func (c *FakeJobs) Get(name string, options meta_v1.GetOptions) (result *v1.Job, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(jobsResource, c.ns, name), &v1.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Job), err +} + +func (c *FakeJobs) List(opts meta_v1.ListOptions) (result *v1.JobList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(jobsResource, c.ns, opts), &v1.JobList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.JobList{} + for _, item := range obj.(*v1.JobList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested jobs. +func (c *FakeJobs) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(jobsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched job. +func (c *FakeJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, name, data, subresources...), &v1.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Job), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go new file mode 100644 index 0000000000..68d7741fa0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +type JobExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go new file mode 100644 index 0000000000..c8120f6d85 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/apis/batch/v1" + rest "k8s.io/client-go/rest" +) + +// JobsGetter has a method to return a JobInterface. +// A group's client should implement this interface. +type JobsGetter interface { + Jobs(namespace string) JobInterface +} + +// JobInterface has methods to work with Job resources. +type JobInterface interface { + Create(*v1.Job) (*v1.Job, error) + Update(*v1.Job) (*v1.Job, error) + UpdateStatus(*v1.Job) (*v1.Job, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Job, error) + List(opts meta_v1.ListOptions) (*v1.JobList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) + JobExpansion +} + +// jobs implements JobInterface +type jobs struct { + client rest.Interface + ns string +} + +// newJobs returns a Jobs +func newJobs(c *BatchV1Client, namespace string) *jobs { + return &jobs{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Create(job *v1.Job) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Post(). + Namespace(c.ns). + Resource("jobs"). + Body(job). + Do(). + Into(result) + return +} + +// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Update(job *v1.Job) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + Body(job). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *jobs) UpdateStatus(job *v1.Job) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + SubResource("status"). + Body(job). + Do(). + Into(result) + return +} + +// Delete takes name of the job and deletes it. Returns an error if one occurs. +func (c *jobs) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *jobs) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the job, and returns the corresponding job object, and an error if there is any. +func (c *jobs) Get(name string, options meta_v1.GetOptions) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Jobs that match those selectors. +func (c *jobs) List(opts meta_v1.ListOptions) (result *v1.JobList, err error) { + result = &v1.JobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested jobs. +func (c *jobs) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched job. +func (c *jobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("jobs"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go new file mode 100644 index 0000000000..e2b2dd5cd0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v2alpha1 "k8s.io/client-go/pkg/apis/batch/v2alpha1" + rest "k8s.io/client-go/rest" +) + +type BatchV2alpha1Interface interface { + RESTClient() rest.Interface + CronJobsGetter +} + +// BatchV2alpha1Client is used to interact with features provided by the batch group. +type BatchV2alpha1Client struct { + restClient rest.Interface +} + +func (c *BatchV2alpha1Client) CronJobs(namespace string) CronJobInterface { + return newCronJobs(c, namespace) +} + +// NewForConfig creates a new BatchV2alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*BatchV2alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &BatchV2alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new BatchV2alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *BatchV2alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new BatchV2alpha1Client for the given RESTClient. +func New(c rest.Interface) *BatchV2alpha1Client { + return &BatchV2alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v2alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *BatchV2alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go new file mode 100644 index 0000000000..2447f2add0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v2alpha1 "k8s.io/client-go/pkg/apis/batch/v2alpha1" + rest "k8s.io/client-go/rest" +) + +// CronJobsGetter has a method to return a CronJobInterface. +// A group's client should implement this interface. +type CronJobsGetter interface { + CronJobs(namespace string) CronJobInterface +} + +// CronJobInterface has methods to work with CronJob resources. +type CronJobInterface interface { + Create(*v2alpha1.CronJob) (*v2alpha1.CronJob, error) + Update(*v2alpha1.CronJob) (*v2alpha1.CronJob, error) + UpdateStatus(*v2alpha1.CronJob) (*v2alpha1.CronJob, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v2alpha1.CronJob, error) + List(opts v1.ListOptions) (*v2alpha1.CronJobList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) + CronJobExpansion +} + +// cronJobs implements CronJobInterface +type cronJobs struct { + client rest.Interface + ns string +} + +// newCronJobs returns a CronJobs +func newCronJobs(c *BatchV2alpha1Client, namespace string) *cronJobs { + return &cronJobs{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. +func (c *cronJobs) Create(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cronjobs"). + Body(cronJob). + Do(). + Into(result) + return +} + +// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. +func (c *cronJobs) Update(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cronjobs"). + Name(cronJob.Name). + Body(cronJob). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *cronJobs) UpdateStatus(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cronjobs"). + Name(cronJob.Name). + SubResource("status"). + Body(cronJob). + Do(). + Into(result) + return +} + +// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. +func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cronjobs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. +func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CronJobs that match those selectors. +func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) { + result = &v2alpha1.CronJobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cronJobs. +func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched cronJob. +func (c *cronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cronjobs"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go new file mode 100644 index 0000000000..d29bd3f4e1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v2alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go new file mode 100644 index 0000000000..c6548330a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go new file mode 100644 index 0000000000..c8f5a40de8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go @@ -0,0 +1,38 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeBatchV2alpha1 struct { + *testing.Fake +} + +func (c *FakeBatchV2alpha1) CronJobs(namespace string) v2alpha1.CronJobInterface { + return &FakeCronJobs{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeBatchV2alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go new file mode 100644 index 0000000000..9e570a8a45 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v2alpha1 "k8s.io/client-go/pkg/apis/batch/v2alpha1" + testing "k8s.io/client-go/testing" +) + +// FakeCronJobs implements CronJobInterface +type FakeCronJobs struct { + Fake *FakeBatchV2alpha1 + ns string +} + +var cronjobsResource = schema.GroupVersionResource{Group: "batch", Version: "v2alpha1", Resource: "cronjobs"} + +func (c *FakeCronJobs) Create(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &v2alpha1.CronJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CronJob), err +} + +func (c *FakeCronJobs) Update(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &v2alpha1.CronJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CronJob), err +} + +func (c *FakeCronJobs) UpdateStatus(cronJob *v2alpha1.CronJob) (*v2alpha1.CronJob, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &v2alpha1.CronJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CronJob), err +} + +func (c *FakeCronJobs) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(cronjobsResource, c.ns, name), &v2alpha1.CronJob{}) + + return err +} + +func (c *FakeCronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(cronjobsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v2alpha1.CronJobList{}) + return err +} + +func (c *FakeCronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1.CronJob, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(cronjobsResource, c.ns, name), &v2alpha1.CronJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CronJob), err +} + +func (c *FakeCronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(cronjobsResource, c.ns, opts), &v2alpha1.CronJobList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2alpha1.CronJobList{} + for _, item := range obj.(*v2alpha1.CronJobList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested cronJobs. +func (c *FakeCronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(cronjobsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched cronJob. +func (c *FakeCronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, data, subresources...), &v2alpha1.CronJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CronJob), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go new file mode 100644 index 0000000000..078027ef49 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +type CronJobExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go new file mode 100644 index 0000000000..c9c39acb20 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/certificates/v1beta1" + rest "k8s.io/client-go/rest" +) + +type CertificatesV1beta1Interface interface { + RESTClient() rest.Interface + CertificateSigningRequestsGetter +} + +// CertificatesV1beta1Client is used to interact with features provided by the certificates.k8s.io group. +type CertificatesV1beta1Client struct { + restClient rest.Interface +} + +func (c *CertificatesV1beta1Client) CertificateSigningRequests() CertificateSigningRequestInterface { + return newCertificateSigningRequests(c) +} + +// NewForConfig creates a new CertificatesV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*CertificatesV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CertificatesV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new CertificatesV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CertificatesV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CertificatesV1beta1Client for the given RESTClient. +func New(c rest.Interface) *CertificatesV1beta1Client { + return &CertificatesV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CertificatesV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go new file mode 100644 index 0000000000..7407ef0688 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go @@ -0,0 +1,161 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/certificates/v1beta1" + rest "k8s.io/client-go/rest" +) + +// CertificateSigningRequestsGetter has a method to return a CertificateSigningRequestInterface. +// A group's client should implement this interface. +type CertificateSigningRequestsGetter interface { + CertificateSigningRequests() CertificateSigningRequestInterface +} + +// CertificateSigningRequestInterface has methods to work with CertificateSigningRequest resources. +type CertificateSigningRequestInterface interface { + Create(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) + Update(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) + UpdateStatus(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.CertificateSigningRequest, error) + List(opts v1.ListOptions) (*v1beta1.CertificateSigningRequestList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) + CertificateSigningRequestExpansion +} + +// certificateSigningRequests implements CertificateSigningRequestInterface +type certificateSigningRequests struct { + client rest.Interface +} + +// newCertificateSigningRequests returns a CertificateSigningRequests +func newCertificateSigningRequests(c *CertificatesV1beta1Client) *certificateSigningRequests { + return &certificateSigningRequests{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. +func (c *certificateSigningRequests) Create(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Post(). + Resource("certificatesigningrequests"). + Body(certificateSigningRequest). + Do(). + Into(result) + return +} + +// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. +func (c *certificateSigningRequests) Update(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Put(). + Resource("certificatesigningrequests"). + Name(certificateSigningRequest.Name). + Body(certificateSigningRequest). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *certificateSigningRequests) UpdateStatus(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Put(). + Resource("certificatesigningrequests"). + Name(certificateSigningRequest.Name). + SubResource("status"). + Body(certificateSigningRequest). + Do(). + Into(result) + return +} + +// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. +func (c *certificateSigningRequests) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("certificatesigningrequests"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *certificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("certificatesigningrequests"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. +func (c *certificateSigningRequests) Get(name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Get(). + Resource("certificatesigningrequests"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. +func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { + result = &v1beta1.CertificateSigningRequestList{} + err = c.client.Get(). + Resource("certificatesigningrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested certificateSigningRequests. +func (c *certificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("certificatesigningrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched certificateSigningRequest. +func (c *certificateSigningRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Patch(pt). + Resource("certificatesigningrequests"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go new file mode 100644 index 0000000000..4765bba8ab --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go @@ -0,0 +1,37 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + certificates "k8s.io/client-go/pkg/apis/certificates/v1beta1" +) + +type CertificateSigningRequestExpansion interface { + UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) +} + +func (c *certificateSigningRequests) UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) { + result = &certificates.CertificateSigningRequest{} + err = c.client.Put(). + Resource("certificatesigningrequests"). + Name(certificateSigningRequest.Name). + Body(certificateSigningRequest). + SubResource("approval"). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go new file mode 100644 index 0000000000..11b5238972 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go new file mode 100644 index 0000000000..c6548330a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go new file mode 100644 index 0000000000..550c5bba15 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go @@ -0,0 +1,38 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeCertificatesV1beta1 struct { + *testing.Fake +} + +func (c *FakeCertificatesV1beta1) CertificateSigningRequests() v1beta1.CertificateSigningRequestInterface { + return &FakeCertificateSigningRequests{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCertificatesV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go new file mode 100644 index 0000000000..3ff1fe5a15 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go @@ -0,0 +1,119 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1beta1 "k8s.io/client-go/pkg/apis/certificates/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeCertificateSigningRequests implements CertificateSigningRequestInterface +type FakeCertificateSigningRequests struct { + Fake *FakeCertificatesV1beta1 +} + +var certificatesigningrequestsResource = schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"} + +func (c *FakeCertificateSigningRequests) Create(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CertificateSigningRequest), err +} + +func (c *FakeCertificateSigningRequests) Update(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CertificateSigningRequest), err +} + +func (c *FakeCertificateSigningRequests) UpdateStatus(certificateSigningRequest *v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "status", certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CertificateSigningRequest), err +} + +func (c *FakeCertificateSigningRequests) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(certificatesigningrequestsResource, name), &v1beta1.CertificateSigningRequest{}) + return err +} + +func (c *FakeCertificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(certificatesigningrequestsResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.CertificateSigningRequestList{}) + return err +} + +func (c *FakeCertificateSigningRequests) Get(name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(certificatesigningrequestsResource, name), &v1beta1.CertificateSigningRequest{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CertificateSigningRequest), err +} + +func (c *FakeCertificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(certificatesigningrequestsResource, opts), &v1beta1.CertificateSigningRequestList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.CertificateSigningRequestList{} + for _, item := range obj.(*v1beta1.CertificateSigningRequestList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested certificateSigningRequests. +func (c *FakeCertificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(certificatesigningrequestsResource, opts)) +} + +// Patch applies the patch and returns the patched certificateSigningRequest. +func (c *FakeCertificateSigningRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, data, subresources...), &v1beta1.CertificateSigningRequest{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CertificateSigningRequest), err +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/fake_certificatesigningrequest_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go similarity index 88% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/fake_certificatesigningrequest_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go index 6e426c0852..2333c0f3ec 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/fake_certificatesigningrequest_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,8 +17,8 @@ limitations under the License. package fake import ( - "k8s.io/kubernetes/pkg/apis/certificates" - "k8s.io/kubernetes/pkg/client/testing/core" + certificates "k8s.io/client-go/pkg/apis/certificates/v1beta1" + core "k8s.io/client-go/testing" ) func (c *FakeCertificateSigningRequests) UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..2b7e8ca0bf --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go @@ -0,0 +1,17 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go new file mode 100644 index 0000000000..50671976aa --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// ComponentStatusesGetter has a method to return a ComponentStatusInterface. +// A group's client should implement this interface. +type ComponentStatusesGetter interface { + ComponentStatuses() ComponentStatusInterface +} + +// ComponentStatusInterface has methods to work with ComponentStatus resources. +type ComponentStatusInterface interface { + Create(*v1.ComponentStatus) (*v1.ComponentStatus, error) + Update(*v1.ComponentStatus) (*v1.ComponentStatus, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ComponentStatus, error) + List(opts meta_v1.ListOptions) (*v1.ComponentStatusList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error) + ComponentStatusExpansion +} + +// componentStatuses implements ComponentStatusInterface +type componentStatuses struct { + client rest.Interface +} + +// newComponentStatuses returns a ComponentStatuses +func newComponentStatuses(c *CoreV1Client) *componentStatuses { + return &componentStatuses{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. +func (c *componentStatuses) Create(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Post(). + Resource("componentstatuses"). + Body(componentStatus). + Do(). + Into(result) + return +} + +// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. +func (c *componentStatuses) Update(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Put(). + Resource("componentstatuses"). + Name(componentStatus.Name). + Body(componentStatus). + Do(). + Into(result) + return +} + +// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. +func (c *componentStatuses) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("componentstatuses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *componentStatuses) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Resource("componentstatuses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. +func (c *componentStatuses) Get(name string, options meta_v1.GetOptions) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Get(). + Resource("componentstatuses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. +func (c *componentStatuses) List(opts meta_v1.ListOptions) (result *v1.ComponentStatusList, err error) { + result = &v1.ComponentStatusList{} + err = c.client.Get(). + Resource("componentstatuses"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested componentStatuses. +func (c *componentStatuses) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("componentstatuses"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched componentStatus. +func (c *componentStatuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Patch(pt). + Resource("componentstatuses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go new file mode 100644 index 0000000000..bb21636e2d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// ConfigMapsGetter has a method to return a ConfigMapInterface. +// A group's client should implement this interface. +type ConfigMapsGetter interface { + ConfigMaps(namespace string) ConfigMapInterface +} + +// ConfigMapInterface has methods to work with ConfigMap resources. +type ConfigMapInterface interface { + Create(*v1.ConfigMap) (*v1.ConfigMap, error) + Update(*v1.ConfigMap) (*v1.ConfigMap, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ConfigMap, error) + List(opts meta_v1.ListOptions) (*v1.ConfigMapList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) + ConfigMapExpansion +} + +// configMaps implements ConfigMapInterface +type configMaps struct { + client rest.Interface + ns string +} + +// newConfigMaps returns a ConfigMaps +func newConfigMaps(c *CoreV1Client, namespace string) *configMaps { + return &configMaps{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. +func (c *configMaps) Create(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Post(). + Namespace(c.ns). + Resource("configmaps"). + Body(configMap). + Do(). + Into(result) + return +} + +// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. +func (c *configMaps) Update(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Put(). + Namespace(c.ns). + Resource("configmaps"). + Name(configMap.Name). + Body(configMap). + Do(). + Into(result) + return +} + +// Delete takes name of the configMap and deletes it. Returns an error if one occurs. +func (c *configMaps) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("configmaps"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *configMaps) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. +func (c *configMaps) Get(name string, options meta_v1.GetOptions) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. +func (c *configMaps) List(opts meta_v1.ListOptions) (result *v1.ConfigMapList, err error) { + result = &v1.ConfigMapList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested configMaps. +func (c *configMaps) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched configMap. +func (c *configMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("configmaps"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go new file mode 100644 index 0000000000..0972960d10 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go @@ -0,0 +1,163 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +type CoreV1Interface interface { + RESTClient() rest.Interface + ComponentStatusesGetter + ConfigMapsGetter + EndpointsGetter + EventsGetter + LimitRangesGetter + NamespacesGetter + NodesGetter + PersistentVolumesGetter + PersistentVolumeClaimsGetter + PodsGetter + PodTemplatesGetter + ReplicationControllersGetter + ResourceQuotasGetter + SecretsGetter + ServicesGetter + ServiceAccountsGetter +} + +// CoreV1Client is used to interact with features provided by the group. +type CoreV1Client struct { + restClient rest.Interface +} + +func (c *CoreV1Client) ComponentStatuses() ComponentStatusInterface { + return newComponentStatuses(c) +} + +func (c *CoreV1Client) ConfigMaps(namespace string) ConfigMapInterface { + return newConfigMaps(c, namespace) +} + +func (c *CoreV1Client) Endpoints(namespace string) EndpointsInterface { + return newEndpoints(c, namespace) +} + +func (c *CoreV1Client) Events(namespace string) EventInterface { + return newEvents(c, namespace) +} + +func (c *CoreV1Client) LimitRanges(namespace string) LimitRangeInterface { + return newLimitRanges(c, namespace) +} + +func (c *CoreV1Client) Namespaces() NamespaceInterface { + return newNamespaces(c) +} + +func (c *CoreV1Client) Nodes() NodeInterface { + return newNodes(c) +} + +func (c *CoreV1Client) PersistentVolumes() PersistentVolumeInterface { + return newPersistentVolumes(c) +} + +func (c *CoreV1Client) PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface { + return newPersistentVolumeClaims(c, namespace) +} + +func (c *CoreV1Client) Pods(namespace string) PodInterface { + return newPods(c, namespace) +} + +func (c *CoreV1Client) PodTemplates(namespace string) PodTemplateInterface { + return newPodTemplates(c, namespace) +} + +func (c *CoreV1Client) ReplicationControllers(namespace string) ReplicationControllerInterface { + return newReplicationControllers(c, namespace) +} + +func (c *CoreV1Client) ResourceQuotas(namespace string) ResourceQuotaInterface { + return newResourceQuotas(c, namespace) +} + +func (c *CoreV1Client) Secrets(namespace string) SecretInterface { + return newSecrets(c, namespace) +} + +func (c *CoreV1Client) Services(namespace string) ServiceInterface { + return newServices(c, namespace) +} + +func (c *CoreV1Client) ServiceAccounts(namespace string) ServiceAccountInterface { + return newServiceAccounts(c, namespace) +} + +// NewForConfig creates a new CoreV1Client for the given config. +func NewForConfig(c *rest.Config) (*CoreV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreV1Client{client}, nil +} + +// NewForConfigOrDie creates a new CoreV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CoreV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreV1Client for the given RESTClient. +func New(c rest.Interface) *CoreV1Client { + return &CoreV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/api" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go new file mode 100644 index 0000000000..54673bfa73 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go new file mode 100644 index 0000000000..3580742a86 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// EndpointsGetter has a method to return a EndpointsInterface. +// A group's client should implement this interface. +type EndpointsGetter interface { + Endpoints(namespace string) EndpointsInterface +} + +// EndpointsInterface has methods to work with Endpoints resources. +type EndpointsInterface interface { + Create(*v1.Endpoints) (*v1.Endpoints, error) + Update(*v1.Endpoints) (*v1.Endpoints, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Endpoints, error) + List(opts meta_v1.ListOptions) (*v1.EndpointsList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) + EndpointsExpansion +} + +// endpoints implements EndpointsInterface +type endpoints struct { + client rest.Interface + ns string +} + +// newEndpoints returns a Endpoints +func newEndpoints(c *CoreV1Client, namespace string) *endpoints { + return &endpoints{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *endpoints) Create(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Post(). + Namespace(c.ns). + Resource("endpoints"). + Body(endpoints). + Do(). + Into(result) + return +} + +// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *endpoints) Update(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Put(). + Namespace(c.ns). + Resource("endpoints"). + Name(endpoints.Name). + Body(endpoints). + Do(). + Into(result) + return +} + +// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. +func (c *endpoints) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpoints"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *endpoints) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. +func (c *endpoints) Get(name string, options meta_v1.GetOptions) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Endpoints that match those selectors. +func (c *endpoints) List(opts meta_v1.ListOptions) (result *v1.EndpointsList, err error) { + result = &v1.EndpointsList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested endpoints. +func (c *endpoints) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched endpoints. +func (c *endpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("endpoints"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go new file mode 100644 index 0000000000..c4ac11006c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// EventsGetter has a method to return a EventInterface. +// A group's client should implement this interface. +type EventsGetter interface { + Events(namespace string) EventInterface +} + +// EventInterface has methods to work with Event resources. +type EventInterface interface { + Create(*v1.Event) (*v1.Event, error) + Update(*v1.Event) (*v1.Event, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Event, error) + List(opts meta_v1.ListOptions) (*v1.EventList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) + EventExpansion +} + +// events implements EventInterface +type events struct { + client rest.Interface + ns string +} + +// newEvents returns a Events +func newEvents(c *CoreV1Client, namespace string) *events { + return &events{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Create(event *v1.Event) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Post(). + Namespace(c.ns). + Resource("events"). + Body(event). + Do(). + Into(result) + return +} + +// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Update(event *v1.Event) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Put(). + Namespace(c.ns). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return +} + +// Delete takes name of the event and deletes it. Returns an error if one occurs. +func (c *events) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *events) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the event, and returns the corresponding event object, and an error if there is any. +func (c *events) Get(name string, options meta_v1.GetOptions) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Events that match those selectors. +func (c *events) List(opts meta_v1.ListOptions) (result *v1.EventList, err error) { + result = &v1.EventList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested events. +func (c *events) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched event. +func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("events"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go new file mode 100644 index 0000000000..9b4490ea97 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go @@ -0,0 +1,163 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/pkg/api/v1" +) + +// The EventExpansion interface allows manually adding extra methods to the EventInterface. +type EventExpansion interface { + // CreateWithEventNamespace is the same as a Create, except that it sends the request to the event.Namespace. + CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) + // UpdateWithEventNamespace is the same as a Update, except that it sends the request to the event.Namespace. + UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) + PatchWithEventNamespace(event *v1.Event, data []byte) (*v1.Event, error) + // Search finds events about the specified object + Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) + // Returns the appropriate field selector based on the API version being used to communicate with the server. + // The returned field selector can be used with List and Watch to filter desired events. + GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector +} + +// CreateWithEventNamespace makes a new event. Returns the copy of the event the server returns, +// or an error. The namespace to create the event within is deduced from the +// event; it must either match this event client's namespace, or this event +// client must have been created with the "" namespace. +func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + if e.ns != "" && event.Namespace != e.ns { + return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + } + result := &v1.Event{} + err := e.client.Post(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Body(event). + Do(). + Into(result) + return result, err +} + +// UpdateWithEventNamespace modifies an existing event. It returns the copy of the event that the server returns, +// or an error. The namespace and key to update the event within is deduced from the event. The +// namespace must either match this event client's namespace, or this event client must have been +// created with the "" namespace. Update also requires the ResourceVersion to be set in the event +// object. +func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + result := &v1.Event{} + err := e.client.Put(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return result, err +} + +// PatchWithEventNamespace modifies an existing event. It returns the copy of +// the event that the server returns, or an error. The namespace and name of the +// target event is deduced from the incompleteEvent. The namespace must either +// match this event client's namespace, or this event client must have been +// created with the "" namespace. +func (e *events) PatchWithEventNamespace(incompleteEvent *v1.Event, data []byte) (*v1.Event, error) { + if e.ns != "" && incompleteEvent.Namespace != e.ns { + return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", incompleteEvent.Namespace, e.ns) + } + result := &v1.Event{} + err := e.client.Patch(types.StrategicMergePatchType). + NamespaceIfScoped(incompleteEvent.Namespace, len(incompleteEvent.Namespace) > 0). + Resource("events"). + Name(incompleteEvent.Name). + Body(data). + Do(). + Into(result) + return result, err +} + +// Search finds events about the specified object. The namespace of the +// object must match this event's client namespace unless the event client +// was made with the "" namespace. +func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) { + ref, err := v1.GetReference(scheme, objOrRef) + if err != nil { + return nil, err + } + if e.ns != "" && ref.Namespace != e.ns { + return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns) + } + stringRefKind := string(ref.Kind) + var refKind *string + if stringRefKind != "" { + refKind = &stringRefKind + } + stringRefUID := string(ref.UID) + var refUID *string + if stringRefUID != "" { + refUID = &stringRefUID + } + fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) + return e.List(metav1.ListOptions{FieldSelector: fieldSelector.String()}) +} + +// Returns the appropriate field selector based on the API version being used to communicate with the server. +// The returned field selector can be used with List and Watch to filter desired events. +func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { + apiVersion := e.client.APIVersion().String() + field := fields.Set{} + if involvedObjectName != nil { + field[GetInvolvedObjectNameFieldLabel(apiVersion)] = *involvedObjectName + } + if involvedObjectNamespace != nil { + field["involvedObject.namespace"] = *involvedObjectNamespace + } + if involvedObjectKind != nil { + field["involvedObject.kind"] = *involvedObjectKind + } + if involvedObjectUID != nil { + field["involvedObject.uid"] = *involvedObjectUID + } + return field.AsSelector() +} + +// Returns the appropriate field label to use for name of the involved object as per the given API version. +func GetInvolvedObjectNameFieldLabel(version string) string { + return "involvedObject.name" +} + +// TODO: This is a temporary arrangement and will be removed once all clients are moved to use the clientset. +type EventSinkImpl struct { + Interface EventInterface +} + +func (e *EventSinkImpl) Create(event *v1.Event) (*v1.Event, error) { + return e.Interface.CreateWithEventNamespace(event) +} + +func (e *EventSinkImpl) Update(event *v1.Event) (*v1.Event, error) { + return e.Interface.UpdateWithEventNamespace(event) +} + +func (e *EventSinkImpl) Patch(event *v1.Event, data []byte) (*v1.Event, error) { + return e.Interface.PatchWithEventNamespace(event, data) +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go new file mode 100644 index 0000000000..c6548330a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go new file mode 100644 index 0000000000..6e21110835 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go @@ -0,0 +1,110 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/pkg/api/v1" + testing "k8s.io/client-go/testing" +) + +// FakeComponentStatuses implements ComponentStatusInterface +type FakeComponentStatuses struct { + Fake *FakeCoreV1 +} + +var componentstatusesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "componentstatuses"} + +func (c *FakeComponentStatuses) Create(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(componentstatusesResource, componentStatus), &v1.ComponentStatus{}) + if obj == nil { + return nil, err + } + return obj.(*v1.ComponentStatus), err +} + +func (c *FakeComponentStatuses) Update(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(componentstatusesResource, componentStatus), &v1.ComponentStatus{}) + if obj == nil { + return nil, err + } + return obj.(*v1.ComponentStatus), err +} + +func (c *FakeComponentStatuses) Delete(name string, options *meta_v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(componentstatusesResource, name), &v1.ComponentStatus{}) + return err +} + +func (c *FakeComponentStatuses) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(componentstatusesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ComponentStatusList{}) + return err +} + +func (c *FakeComponentStatuses) Get(name string, options meta_v1.GetOptions) (result *v1.ComponentStatus, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(componentstatusesResource, name), &v1.ComponentStatus{}) + if obj == nil { + return nil, err + } + return obj.(*v1.ComponentStatus), err +} + +func (c *FakeComponentStatuses) List(opts meta_v1.ListOptions) (result *v1.ComponentStatusList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(componentstatusesResource, opts), &v1.ComponentStatusList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.ComponentStatusList{} + for _, item := range obj.(*v1.ComponentStatusList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested componentStatuses. +func (c *FakeComponentStatuses) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(componentstatusesResource, opts)) +} + +// Patch applies the patch and returns the patched componentStatus. +func (c *FakeComponentStatuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, name, data, subresources...), &v1.ComponentStatus{}) + if obj == nil { + return nil, err + } + return obj.(*v1.ComponentStatus), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go new file mode 100644 index 0000000000..560ad7a400 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go @@ -0,0 +1,118 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/pkg/api/v1" + testing "k8s.io/client-go/testing" +) + +// FakeConfigMaps implements ConfigMapInterface +type FakeConfigMaps struct { + Fake *FakeCoreV1 + ns string +} + +var configmapsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"} + +func (c *FakeConfigMaps) Create(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(configmapsResource, c.ns, configMap), &v1.ConfigMap{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ConfigMap), err +} + +func (c *FakeConfigMaps) Update(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(configmapsResource, c.ns, configMap), &v1.ConfigMap{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ConfigMap), err +} + +func (c *FakeConfigMaps) Delete(name string, options *meta_v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(configmapsResource, c.ns, name), &v1.ConfigMap{}) + + return err +} + +func (c *FakeConfigMaps) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(configmapsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ConfigMapList{}) + return err +} + +func (c *FakeConfigMaps) Get(name string, options meta_v1.GetOptions) (result *v1.ConfigMap, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(configmapsResource, c.ns, name), &v1.ConfigMap{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ConfigMap), err +} + +func (c *FakeConfigMaps) List(opts meta_v1.ListOptions) (result *v1.ConfigMapList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(configmapsResource, c.ns, opts), &v1.ConfigMapList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.ConfigMapList{} + for _, item := range obj.(*v1.ConfigMapList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested configMaps. +func (c *FakeConfigMaps) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(configmapsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched configMap. +func (c *FakeConfigMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, name, data, subresources...), &v1.ConfigMap{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ConfigMap), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go new file mode 100644 index 0000000000..0956b4c63c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go @@ -0,0 +1,98 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/client-go/kubernetes/typed/core/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeCoreV1 struct { + *testing.Fake +} + +func (c *FakeCoreV1) ComponentStatuses() v1.ComponentStatusInterface { + return &FakeComponentStatuses{c} +} + +func (c *FakeCoreV1) ConfigMaps(namespace string) v1.ConfigMapInterface { + return &FakeConfigMaps{c, namespace} +} + +func (c *FakeCoreV1) Endpoints(namespace string) v1.EndpointsInterface { + return &FakeEndpoints{c, namespace} +} + +func (c *FakeCoreV1) Events(namespace string) v1.EventInterface { + return &FakeEvents{c, namespace} +} + +func (c *FakeCoreV1) LimitRanges(namespace string) v1.LimitRangeInterface { + return &FakeLimitRanges{c, namespace} +} + +func (c *FakeCoreV1) Namespaces() v1.NamespaceInterface { + return &FakeNamespaces{c} +} + +func (c *FakeCoreV1) Nodes() v1.NodeInterface { + return &FakeNodes{c} +} + +func (c *FakeCoreV1) PersistentVolumes() v1.PersistentVolumeInterface { + return &FakePersistentVolumes{c} +} + +func (c *FakeCoreV1) PersistentVolumeClaims(namespace string) v1.PersistentVolumeClaimInterface { + return &FakePersistentVolumeClaims{c, namespace} +} + +func (c *FakeCoreV1) Pods(namespace string) v1.PodInterface { + return &FakePods{c, namespace} +} + +func (c *FakeCoreV1) PodTemplates(namespace string) v1.PodTemplateInterface { + return &FakePodTemplates{c, namespace} +} + +func (c *FakeCoreV1) ReplicationControllers(namespace string) v1.ReplicationControllerInterface { + return &FakeReplicationControllers{c, namespace} +} + +func (c *FakeCoreV1) ResourceQuotas(namespace string) v1.ResourceQuotaInterface { + return &FakeResourceQuotas{c, namespace} +} + +func (c *FakeCoreV1) Secrets(namespace string) v1.SecretInterface { + return &FakeSecrets{c, namespace} +} + +func (c *FakeCoreV1) Services(namespace string) v1.ServiceInterface { + return &FakeServices{c, namespace} +} + +func (c *FakeCoreV1) ServiceAccounts(namespace string) v1.ServiceAccountInterface { + return &FakeServiceAccounts{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCoreV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go new file mode 100644 index 0000000000..d4b7ad0686 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go @@ -0,0 +1,118 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/pkg/api/v1" + testing "k8s.io/client-go/testing" +) + +// FakeEndpoints implements EndpointsInterface +type FakeEndpoints struct { + Fake *FakeCoreV1 + ns string +} + +var endpointsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "endpoints"} + +func (c *FakeEndpoints) Create(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Endpoints), err +} + +func (c *FakeEndpoints) Update(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Endpoints), err +} + +func (c *FakeEndpoints) Delete(name string, options *meta_v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(endpointsResource, c.ns, name), &v1.Endpoints{}) + + return err +} + +func (c *FakeEndpoints) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(endpointsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.EndpointsList{}) + return err +} + +func (c *FakeEndpoints) Get(name string, options meta_v1.GetOptions) (result *v1.Endpoints, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(endpointsResource, c.ns, name), &v1.Endpoints{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Endpoints), err +} + +func (c *FakeEndpoints) List(opts meta_v1.ListOptions) (result *v1.EndpointsList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(endpointsResource, c.ns, opts), &v1.EndpointsList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.EndpointsList{} + for _, item := range obj.(*v1.EndpointsList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested endpoints. +func (c *FakeEndpoints) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(endpointsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched endpoints. +func (c *FakeEndpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, name, data, subresources...), &v1.Endpoints{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Endpoints), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go new file mode 100644 index 0000000000..7abe81587d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go @@ -0,0 +1,118 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/pkg/api/v1" + testing "k8s.io/client-go/testing" +) + +// FakeEvents implements EventInterface +type FakeEvents struct { + Fake *FakeCoreV1 + ns string +} + +var eventsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "events"} + +func (c *FakeEvents) Create(event *v1.Event) (result *v1.Event, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1.Event{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Event), err +} + +func (c *FakeEvents) Update(event *v1.Event) (result *v1.Event, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1.Event{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Event), err +} + +func (c *FakeEvents) Delete(name string, options *meta_v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(eventsResource, c.ns, name), &v1.Event{}) + + return err +} + +func (c *FakeEvents) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.EventList{}) + return err +} + +func (c *FakeEvents) Get(name string, options meta_v1.GetOptions) (result *v1.Event, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(eventsResource, c.ns, name), &v1.Event{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Event), err +} + +func (c *FakeEvents) List(opts meta_v1.ListOptions) (result *v1.EventList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(eventsResource, c.ns, opts), &v1.EventList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.EventList{} + for _, item := range obj.(*v1.EventList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested events. +func (c *FakeEvents) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched event. +func (c *FakeEvents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, data, subresources...), &v1.Event{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Event), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go new file mode 100644 index 0000000000..b4de7bcda8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go @@ -0,0 +1,89 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/api/v1" + core "k8s.io/client-go/testing" +) + +func (c *FakeEvents) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + action := core.NewRootCreateAction(eventsResource, event) + if c.ns != "" { + action = core.NewCreateAction(eventsResource, c.ns, event) + } + obj, err := c.Fake.Invokes(action, event) + if obj == nil { + return nil, err + } + + return obj.(*v1.Event), err +} + +// Update replaces an existing event. Returns the copy of the event the server returns, or an error. +func (c *FakeEvents) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + action := core.NewRootUpdateAction(eventsResource, event) + if c.ns != "" { + action = core.NewUpdateAction(eventsResource, c.ns, event) + } + obj, err := c.Fake.Invokes(action, event) + if obj == nil { + return nil, err + } + + return obj.(*v1.Event), err +} + +// PatchWithEventNamespace patches an existing event. Returns the copy of the event the server returns, or an error. +func (c *FakeEvents) PatchWithEventNamespace(event *v1.Event, data []byte) (*v1.Event, error) { + action := core.NewRootPatchAction(eventsResource, event.Name, data) + if c.ns != "" { + action = core.NewPatchAction(eventsResource, c.ns, event.Name, data) + } + obj, err := c.Fake.Invokes(action, event) + if obj == nil { + return nil, err + } + + return obj.(*v1.Event), err +} + +// Search returns a list of events matching the specified object. +func (c *FakeEvents) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) { + action := core.NewRootListAction(eventsResource, api.ListOptions{}) + if c.ns != "" { + action = core.NewListAction(eventsResource, c.ns, api.ListOptions{}) + } + obj, err := c.Fake.Invokes(action, &v1.EventList{}) + if obj == nil { + return nil, err + } + + return obj.(*v1.EventList), err +} + +func (c *FakeEvents) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { + action := core.GenericActionImpl{} + action.Verb = "get-field-selector" + action.Resource = eventsResource + + c.Fake.Invokes(action, nil) + return fields.Everything() +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go new file mode 100644 index 0000000000..2786358382 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go @@ -0,0 +1,118 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/pkg/api/v1" + testing "k8s.io/client-go/testing" +) + +// FakeLimitRanges implements LimitRangeInterface +type FakeLimitRanges struct { + Fake *FakeCoreV1 + ns string +} + +var limitrangesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "limitranges"} + +func (c *FakeLimitRanges) Create(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(limitrangesResource, c.ns, limitRange), &v1.LimitRange{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.LimitRange), err +} + +func (c *FakeLimitRanges) Update(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(limitrangesResource, c.ns, limitRange), &v1.LimitRange{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.LimitRange), err +} + +func (c *FakeLimitRanges) Delete(name string, options *meta_v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(limitrangesResource, c.ns, name), &v1.LimitRange{}) + + return err +} + +func (c *FakeLimitRanges) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(limitrangesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.LimitRangeList{}) + return err +} + +func (c *FakeLimitRanges) Get(name string, options meta_v1.GetOptions) (result *v1.LimitRange, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(limitrangesResource, c.ns, name), &v1.LimitRange{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.LimitRange), err +} + +func (c *FakeLimitRanges) List(opts meta_v1.ListOptions) (result *v1.LimitRangeList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(limitrangesResource, c.ns, opts), &v1.LimitRangeList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.LimitRangeList{} + for _, item := range obj.(*v1.LimitRangeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested limitRanges. +func (c *FakeLimitRanges) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(limitrangesResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched limitRange. +func (c *FakeLimitRanges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, name, data, subresources...), &v1.LimitRange{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.LimitRange), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go new file mode 100644 index 0000000000..2ec7376761 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go @@ -0,0 +1,119 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/pkg/api/v1" + testing "k8s.io/client-go/testing" +) + +// FakeNamespaces implements NamespaceInterface +type FakeNamespaces struct { + Fake *FakeCoreV1 +} + +var namespacesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"} + +func (c *FakeNamespaces) Create(namespace *v1.Namespace) (result *v1.Namespace, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(namespacesResource, namespace), &v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Namespace), err +} + +func (c *FakeNamespaces) Update(namespace *v1.Namespace) (result *v1.Namespace, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(namespacesResource, namespace), &v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Namespace), err +} + +func (c *FakeNamespaces) UpdateStatus(namespace *v1.Namespace) (*v1.Namespace, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(namespacesResource, "status", namespace), &v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Namespace), err +} + +func (c *FakeNamespaces) Delete(name string, options *meta_v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(namespacesResource, name), &v1.Namespace{}) + return err +} + +func (c *FakeNamespaces) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(namespacesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1.NamespaceList{}) + return err +} + +func (c *FakeNamespaces) Get(name string, options meta_v1.GetOptions) (result *v1.Namespace, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(namespacesResource, name), &v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Namespace), err +} + +func (c *FakeNamespaces) List(opts meta_v1.ListOptions) (result *v1.NamespaceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(namespacesResource, opts), &v1.NamespaceList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.NamespaceList{} + for _, item := range obj.(*v1.NamespaceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested namespaces. +func (c *FakeNamespaces) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(namespacesResource, opts)) +} + +// Patch applies the patch and returns the patched namespace. +func (c *FakeNamespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, name, data, subresources...), &v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Namespace), err +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_namespace_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go similarity index 81% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_namespace_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go index 41eb343e02..1ac7e75d76 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_namespace_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go @@ -17,11 +17,11 @@ limitations under the License. package fake import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/client-go/pkg/api/v1" + core "k8s.io/client-go/testing" ) -func (c *FakeNamespaces) Finalize(namespace *api.Namespace) (*api.Namespace, error) { +func (c *FakeNamespaces) Finalize(namespace *v1.Namespace) (*v1.Namespace, error) { action := core.CreateActionImpl{} action.Verb = "create" action.Resource = namespacesResource @@ -33,5 +33,5 @@ func (c *FakeNamespaces) Finalize(namespace *api.Namespace) (*api.Namespace, err return nil, err } - return obj.(*api.Namespace), err + return obj.(*v1.Namespace), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go new file mode 100644 index 0000000000..357297228b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go @@ -0,0 +1,119 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/pkg/api/v1" + testing "k8s.io/client-go/testing" +) + +// FakeNodes implements NodeInterface +type FakeNodes struct { + Fake *FakeCoreV1 +} + +var nodesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "nodes"} + +func (c *FakeNodes) Create(node *v1.Node) (result *v1.Node, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(nodesResource, node), &v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Node), err +} + +func (c *FakeNodes) Update(node *v1.Node) (result *v1.Node, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(nodesResource, node), &v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Node), err +} + +func (c *FakeNodes) UpdateStatus(node *v1.Node) (*v1.Node, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(nodesResource, "status", node), &v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Node), err +} + +func (c *FakeNodes) Delete(name string, options *meta_v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(nodesResource, name), &v1.Node{}) + return err +} + +func (c *FakeNodes) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(nodesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1.NodeList{}) + return err +} + +func (c *FakeNodes) Get(name string, options meta_v1.GetOptions) (result *v1.Node, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(nodesResource, name), &v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Node), err +} + +func (c *FakeNodes) List(opts meta_v1.ListOptions) (result *v1.NodeList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(nodesResource, opts), &v1.NodeList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.NodeList{} + for _, item := range obj.(*v1.NodeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested nodes. +func (c *FakeNodes) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(nodesResource, opts)) +} + +// Patch applies the patch and returns the patched node. +func (c *FakeNodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(nodesResource, name, data, subresources...), &v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Node), err +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_node_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go similarity index 84% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_node_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go index 09ddd84676..597334582c 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_node_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go @@ -17,16 +17,16 @@ limitations under the License. package fake import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/client-go/pkg/api/v1" + core "k8s.io/client-go/testing" ) -func (c *FakeNodes) PatchStatus(nodeName string, data []byte) (*api.Node, error) { +func (c *FakeNodes) PatchStatus(nodeName string, data []byte) (*v1.Node, error) { obj, err := c.Fake.Invokes( - core.NewRootPatchSubresourceAction(nodesResource, nodeName, data, "status"), &api.Node{}) + core.NewRootPatchSubresourceAction(nodesResource, nodeName, data, "status"), &v1.Node{}) if obj == nil { return nil, err } - return obj.(*api.Node), err + return obj.(*v1.Node), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go new file mode 100644 index 0000000000..8c3bed76e7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go @@ -0,0 +1,119 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/pkg/api/v1" + testing "k8s.io/client-go/testing" +) + +// FakePersistentVolumes implements PersistentVolumeInterface +type FakePersistentVolumes struct { + Fake *FakeCoreV1 +} + +var persistentvolumesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumes"} + +func (c *FakePersistentVolumes) Create(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(persistentvolumesResource, persistentVolume), &v1.PersistentVolume{}) + if obj == nil { + return nil, err + } + return obj.(*v1.PersistentVolume), err +} + +func (c *FakePersistentVolumes) Update(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(persistentvolumesResource, persistentVolume), &v1.PersistentVolume{}) + if obj == nil { + return nil, err + } + return obj.(*v1.PersistentVolume), err +} + +func (c *FakePersistentVolumes) UpdateStatus(persistentVolume *v1.PersistentVolume) (*v1.PersistentVolume, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(persistentvolumesResource, "status", persistentVolume), &v1.PersistentVolume{}) + if obj == nil { + return nil, err + } + return obj.(*v1.PersistentVolume), err +} + +func (c *FakePersistentVolumes) Delete(name string, options *meta_v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(persistentvolumesResource, name), &v1.PersistentVolume{}) + return err +} + +func (c *FakePersistentVolumes) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(persistentvolumesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1.PersistentVolumeList{}) + return err +} + +func (c *FakePersistentVolumes) Get(name string, options meta_v1.GetOptions) (result *v1.PersistentVolume, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(persistentvolumesResource, name), &v1.PersistentVolume{}) + if obj == nil { + return nil, err + } + return obj.(*v1.PersistentVolume), err +} + +func (c *FakePersistentVolumes) List(opts meta_v1.ListOptions) (result *v1.PersistentVolumeList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(persistentvolumesResource, opts), &v1.PersistentVolumeList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.PersistentVolumeList{} + for _, item := range obj.(*v1.PersistentVolumeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested persistentVolumes. +func (c *FakePersistentVolumes) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(persistentvolumesResource, opts)) +} + +// Patch applies the patch and returns the patched persistentVolume. +func (c *FakePersistentVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, name, data, subresources...), &v1.PersistentVolume{}) + if obj == nil { + return nil, err + } + return obj.(*v1.PersistentVolume), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go new file mode 100644 index 0000000000..f909a7f08c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/pkg/api/v1" + testing "k8s.io/client-go/testing" +) + +// FakePersistentVolumeClaims implements PersistentVolumeClaimInterface +type FakePersistentVolumeClaims struct { + Fake *FakeCoreV1 + ns string +} + +var persistentvolumeclaimsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumeclaims"} + +func (c *FakePersistentVolumeClaims) Create(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.PersistentVolumeClaim), err +} + +func (c *FakePersistentVolumeClaims) Update(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.PersistentVolumeClaim), err +} + +func (c *FakePersistentVolumeClaims) UpdateStatus(persistentVolumeClaim *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(persistentvolumeclaimsResource, "status", c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.PersistentVolumeClaim), err +} + +func (c *FakePersistentVolumeClaims) Delete(name string, options *meta_v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(persistentvolumeclaimsResource, c.ns, name), &v1.PersistentVolumeClaim{}) + + return err +} + +func (c *FakePersistentVolumeClaims) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(persistentvolumeclaimsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.PersistentVolumeClaimList{}) + return err +} + +func (c *FakePersistentVolumeClaims) Get(name string, options meta_v1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(persistentvolumeclaimsResource, c.ns, name), &v1.PersistentVolumeClaim{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.PersistentVolumeClaim), err +} + +func (c *FakePersistentVolumeClaims) List(opts meta_v1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(persistentvolumeclaimsResource, c.ns, opts), &v1.PersistentVolumeClaimList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.PersistentVolumeClaimList{} + for _, item := range obj.(*v1.PersistentVolumeClaimList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested persistentVolumeClaims. +func (c *FakePersistentVolumeClaims) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(persistentvolumeclaimsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched persistentVolumeClaim. +func (c *FakePersistentVolumeClaims) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, name, data, subresources...), &v1.PersistentVolumeClaim{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.PersistentVolumeClaim), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go new file mode 100644 index 0000000000..3f921fe021 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/pkg/api/v1" + testing "k8s.io/client-go/testing" +) + +// FakePods implements PodInterface +type FakePods struct { + Fake *FakeCoreV1 + ns string +} + +var podsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} + +func (c *FakePods) Create(pod *v1.Pod) (result *v1.Pod, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(podsResource, c.ns, pod), &v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Pod), err +} + +func (c *FakePods) Update(pod *v1.Pod) (result *v1.Pod, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(podsResource, c.ns, pod), &v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Pod), err +} + +func (c *FakePods) UpdateStatus(pod *v1.Pod) (*v1.Pod, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(podsResource, "status", c.ns, pod), &v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Pod), err +} + +func (c *FakePods) Delete(name string, options *meta_v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(podsResource, c.ns, name), &v1.Pod{}) + + return err +} + +func (c *FakePods) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(podsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.PodList{}) + return err +} + +func (c *FakePods) Get(name string, options meta_v1.GetOptions) (result *v1.Pod, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(podsResource, c.ns, name), &v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Pod), err +} + +func (c *FakePods) List(opts meta_v1.ListOptions) (result *v1.PodList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(podsResource, c.ns, opts), &v1.PodList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.PodList{} + for _, item := range obj.(*v1.PodList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested pods. +func (c *FakePods) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(podsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched pod. +func (c *FakePods) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, name, data, subresources...), &v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Pod), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go new file mode 100644 index 0000000000..c9d404e162 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go @@ -0,0 +1,58 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "k8s.io/client-go/pkg/api/v1" + policy "k8s.io/client-go/pkg/apis/policy/v1beta1" + restclient "k8s.io/client-go/rest" + core "k8s.io/client-go/testing" +) + +func (c *FakePods) Bind(binding *v1.Binding) error { + action := core.CreateActionImpl{} + action.Verb = "create" + action.Resource = podsResource + action.Subresource = "bindings" + action.Object = binding + + _, err := c.Fake.Invokes(action, binding) + return err +} + +func (c *FakePods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { + action := core.GenericActionImpl{} + action.Verb = "get" + action.Namespace = c.ns + action.Resource = podsResource + action.Subresource = "logs" + action.Value = opts + + _, _ = c.Fake.Invokes(action, &v1.Pod{}) + return &restclient.Request{} +} + +func (c *FakePods) Evict(eviction *policy.Eviction) error { + action := core.CreateActionImpl{} + action.Verb = "create" + action.Resource = podsResource + action.Subresource = "eviction" + action.Object = eviction + + _, err := c.Fake.Invokes(action, eviction) + return err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go new file mode 100644 index 0000000000..86091e4127 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go @@ -0,0 +1,118 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/pkg/api/v1" + testing "k8s.io/client-go/testing" +) + +// FakePodTemplates implements PodTemplateInterface +type FakePodTemplates struct { + Fake *FakeCoreV1 + ns string +} + +var podtemplatesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "podtemplates"} + +func (c *FakePodTemplates) Create(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(podtemplatesResource, c.ns, podTemplate), &v1.PodTemplate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.PodTemplate), err +} + +func (c *FakePodTemplates) Update(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(podtemplatesResource, c.ns, podTemplate), &v1.PodTemplate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.PodTemplate), err +} + +func (c *FakePodTemplates) Delete(name string, options *meta_v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(podtemplatesResource, c.ns, name), &v1.PodTemplate{}) + + return err +} + +func (c *FakePodTemplates) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(podtemplatesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.PodTemplateList{}) + return err +} + +func (c *FakePodTemplates) Get(name string, options meta_v1.GetOptions) (result *v1.PodTemplate, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(podtemplatesResource, c.ns, name), &v1.PodTemplate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.PodTemplate), err +} + +func (c *FakePodTemplates) List(opts meta_v1.ListOptions) (result *v1.PodTemplateList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(podtemplatesResource, c.ns, opts), &v1.PodTemplateList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.PodTemplateList{} + for _, item := range obj.(*v1.PodTemplateList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested podTemplates. +func (c *FakePodTemplates) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(podtemplatesResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched podTemplate. +func (c *FakePodTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, name, data, subresources...), &v1.PodTemplate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.PodTemplate), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go new file mode 100644 index 0000000000..f2bb8cef9c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/pkg/api/v1" + testing "k8s.io/client-go/testing" +) + +// FakeReplicationControllers implements ReplicationControllerInterface +type FakeReplicationControllers struct { + Fake *FakeCoreV1 + ns string +} + +var replicationcontrollersResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"} + +func (c *FakeReplicationControllers) Create(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(replicationcontrollersResource, c.ns, replicationController), &v1.ReplicationController{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ReplicationController), err +} + +func (c *FakeReplicationControllers) Update(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(replicationcontrollersResource, c.ns, replicationController), &v1.ReplicationController{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ReplicationController), err +} + +func (c *FakeReplicationControllers) UpdateStatus(replicationController *v1.ReplicationController) (*v1.ReplicationController, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "status", c.ns, replicationController), &v1.ReplicationController{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ReplicationController), err +} + +func (c *FakeReplicationControllers) Delete(name string, options *meta_v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(replicationcontrollersResource, c.ns, name), &v1.ReplicationController{}) + + return err +} + +func (c *FakeReplicationControllers) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(replicationcontrollersResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ReplicationControllerList{}) + return err +} + +func (c *FakeReplicationControllers) Get(name string, options meta_v1.GetOptions) (result *v1.ReplicationController, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(replicationcontrollersResource, c.ns, name), &v1.ReplicationController{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ReplicationController), err +} + +func (c *FakeReplicationControllers) List(opts meta_v1.ListOptions) (result *v1.ReplicationControllerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(replicationcontrollersResource, c.ns, opts), &v1.ReplicationControllerList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.ReplicationControllerList{} + for _, item := range obj.(*v1.ReplicationControllerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested replicationControllers. +func (c *FakeReplicationControllers) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(replicationcontrollersResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched replicationController. +func (c *FakeReplicationControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, name, data, subresources...), &v1.ReplicationController{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ReplicationController), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go new file mode 100644 index 0000000000..0389f8e879 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/pkg/api/v1" + testing "k8s.io/client-go/testing" +) + +// FakeResourceQuotas implements ResourceQuotaInterface +type FakeResourceQuotas struct { + Fake *FakeCoreV1 + ns string +} + +var resourcequotasResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "resourcequotas"} + +func (c *FakeResourceQuotas) Create(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(resourcequotasResource, c.ns, resourceQuota), &v1.ResourceQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ResourceQuota), err +} + +func (c *FakeResourceQuotas) Update(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(resourcequotasResource, c.ns, resourceQuota), &v1.ResourceQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ResourceQuota), err +} + +func (c *FakeResourceQuotas) UpdateStatus(resourceQuota *v1.ResourceQuota) (*v1.ResourceQuota, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(resourcequotasResource, "status", c.ns, resourceQuota), &v1.ResourceQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ResourceQuota), err +} + +func (c *FakeResourceQuotas) Delete(name string, options *meta_v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(resourcequotasResource, c.ns, name), &v1.ResourceQuota{}) + + return err +} + +func (c *FakeResourceQuotas) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(resourcequotasResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ResourceQuotaList{}) + return err +} + +func (c *FakeResourceQuotas) Get(name string, options meta_v1.GetOptions) (result *v1.ResourceQuota, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(resourcequotasResource, c.ns, name), &v1.ResourceQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ResourceQuota), err +} + +func (c *FakeResourceQuotas) List(opts meta_v1.ListOptions) (result *v1.ResourceQuotaList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(resourcequotasResource, c.ns, opts), &v1.ResourceQuotaList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.ResourceQuotaList{} + for _, item := range obj.(*v1.ResourceQuotaList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested resourceQuotas. +func (c *FakeResourceQuotas) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(resourcequotasResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched resourceQuota. +func (c *FakeResourceQuotas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, name, data, subresources...), &v1.ResourceQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ResourceQuota), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go new file mode 100644 index 0000000000..a0b305504f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go @@ -0,0 +1,118 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/pkg/api/v1" + testing "k8s.io/client-go/testing" +) + +// FakeSecrets implements SecretInterface +type FakeSecrets struct { + Fake *FakeCoreV1 + ns string +} + +var secretsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"} + +func (c *FakeSecrets) Create(secret *v1.Secret) (result *v1.Secret, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(secretsResource, c.ns, secret), &v1.Secret{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Secret), err +} + +func (c *FakeSecrets) Update(secret *v1.Secret) (result *v1.Secret, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(secretsResource, c.ns, secret), &v1.Secret{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Secret), err +} + +func (c *FakeSecrets) Delete(name string, options *meta_v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(secretsResource, c.ns, name), &v1.Secret{}) + + return err +} + +func (c *FakeSecrets) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(secretsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.SecretList{}) + return err +} + +func (c *FakeSecrets) Get(name string, options meta_v1.GetOptions) (result *v1.Secret, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(secretsResource, c.ns, name), &v1.Secret{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Secret), err +} + +func (c *FakeSecrets) List(opts meta_v1.ListOptions) (result *v1.SecretList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(secretsResource, c.ns, opts), &v1.SecretList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.SecretList{} + for _, item := range obj.(*v1.SecretList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested secrets. +func (c *FakeSecrets) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(secretsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched secret. +func (c *FakeSecrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, name, data, subresources...), &v1.Secret{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Secret), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go new file mode 100644 index 0000000000..4cc8ed0e91 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/pkg/api/v1" + testing "k8s.io/client-go/testing" +) + +// FakeServices implements ServiceInterface +type FakeServices struct { + Fake *FakeCoreV1 + ns string +} + +var servicesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"} + +func (c *FakeServices) Create(service *v1.Service) (result *v1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(servicesResource, c.ns, service), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +func (c *FakeServices) Update(service *v1.Service) (result *v1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(servicesResource, c.ns, service), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +func (c *FakeServices) UpdateStatus(service *v1.Service) (*v1.Service, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +func (c *FakeServices) Delete(name string, options *meta_v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(servicesResource, c.ns, name), &v1.Service{}) + + return err +} + +func (c *FakeServices) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(servicesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ServiceList{}) + return err +} + +func (c *FakeServices) Get(name string, options meta_v1.GetOptions) (result *v1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(servicesResource, c.ns, name), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +func (c *FakeServices) List(opts meta_v1.ListOptions) (result *v1.ServiceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(servicesResource, c.ns, opts), &v1.ServiceList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.ServiceList{} + for _, item := range obj.(*v1.ServiceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *FakeServices) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(servicesResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched service. +func (c *FakeServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, data, subresources...), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_service_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service_expansion.go similarity index 90% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_service_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service_expansion.go index 978585f670..92e4930d71 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_service_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service_expansion.go @@ -17,8 +17,8 @@ limitations under the License. package fake import ( - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/client/testing/core" + restclient "k8s.io/client-go/rest" + core "k8s.io/client-go/testing" ) func (c *FakeServices) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go new file mode 100644 index 0000000000..c049d7dec8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go @@ -0,0 +1,118 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/pkg/api/v1" + testing "k8s.io/client-go/testing" +) + +// FakeServiceAccounts implements ServiceAccountInterface +type FakeServiceAccounts struct { + Fake *FakeCoreV1 + ns string +} + +var serviceaccountsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "serviceaccounts"} + +func (c *FakeServiceAccounts) Create(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(serviceaccountsResource, c.ns, serviceAccount), &v1.ServiceAccount{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ServiceAccount), err +} + +func (c *FakeServiceAccounts) Update(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(serviceaccountsResource, c.ns, serviceAccount), &v1.ServiceAccount{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ServiceAccount), err +} + +func (c *FakeServiceAccounts) Delete(name string, options *meta_v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(serviceaccountsResource, c.ns, name), &v1.ServiceAccount{}) + + return err +} + +func (c *FakeServiceAccounts) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(serviceaccountsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ServiceAccountList{}) + return err +} + +func (c *FakeServiceAccounts) Get(name string, options meta_v1.GetOptions) (result *v1.ServiceAccount, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(serviceaccountsResource, c.ns, name), &v1.ServiceAccount{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ServiceAccount), err +} + +func (c *FakeServiceAccounts) List(opts meta_v1.ListOptions) (result *v1.ServiceAccountList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(serviceaccountsResource, c.ns, opts), &v1.ServiceAccountList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.ServiceAccountList{} + for _, item := range obj.(*v1.ServiceAccountList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested serviceAccounts. +func (c *FakeServiceAccounts) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(serviceaccountsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched serviceAccount. +func (c *FakeServiceAccounts) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, name, data, subresources...), &v1.ServiceAccount{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ServiceAccount), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go new file mode 100644 index 0000000000..5fe0585b41 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +type ComponentStatusExpansion interface{} + +type ConfigMapExpansion interface{} + +type EndpointsExpansion interface{} + +type LimitRangeExpansion interface{} + +type PersistentVolumeExpansion interface{} + +type PersistentVolumeClaimExpansion interface{} + +type PodTemplateExpansion interface{} + +type ReplicationControllerExpansion interface{} + +type ResourceQuotaExpansion interface{} + +type SecretExpansion interface{} + +type ServiceAccountExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go new file mode 100644 index 0000000000..998f03452c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// LimitRangesGetter has a method to return a LimitRangeInterface. +// A group's client should implement this interface. +type LimitRangesGetter interface { + LimitRanges(namespace string) LimitRangeInterface +} + +// LimitRangeInterface has methods to work with LimitRange resources. +type LimitRangeInterface interface { + Create(*v1.LimitRange) (*v1.LimitRange, error) + Update(*v1.LimitRange) (*v1.LimitRange, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.LimitRange, error) + List(opts meta_v1.ListOptions) (*v1.LimitRangeList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error) + LimitRangeExpansion +} + +// limitRanges implements LimitRangeInterface +type limitRanges struct { + client rest.Interface + ns string +} + +// newLimitRanges returns a LimitRanges +func newLimitRanges(c *CoreV1Client, namespace string) *limitRanges { + return &limitRanges{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. +func (c *limitRanges) Create(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Post(). + Namespace(c.ns). + Resource("limitranges"). + Body(limitRange). + Do(). + Into(result) + return +} + +// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. +func (c *limitRanges) Update(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Put(). + Namespace(c.ns). + Resource("limitranges"). + Name(limitRange.Name). + Body(limitRange). + Do(). + Into(result) + return +} + +// Delete takes name of the limitRange and deletes it. Returns an error if one occurs. +func (c *limitRanges) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("limitranges"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *limitRanges) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. +func (c *limitRanges) Get(name string, options meta_v1.GetOptions) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. +func (c *limitRanges) List(opts meta_v1.ListOptions) (result *v1.LimitRangeList, err error) { + result = &v1.LimitRangeList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested limitRanges. +func (c *limitRanges) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched limitRange. +func (c *limitRanges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("limitranges"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go new file mode 100644 index 0000000000..3092bd8e18 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go @@ -0,0 +1,161 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// NamespacesGetter has a method to return a NamespaceInterface. +// A group's client should implement this interface. +type NamespacesGetter interface { + Namespaces() NamespaceInterface +} + +// NamespaceInterface has methods to work with Namespace resources. +type NamespaceInterface interface { + Create(*v1.Namespace) (*v1.Namespace, error) + Update(*v1.Namespace) (*v1.Namespace, error) + UpdateStatus(*v1.Namespace) (*v1.Namespace, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Namespace, error) + List(opts meta_v1.ListOptions) (*v1.NamespaceList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) + NamespaceExpansion +} + +// namespaces implements NamespaceInterface +type namespaces struct { + client rest.Interface +} + +// newNamespaces returns a Namespaces +func newNamespaces(c *CoreV1Client) *namespaces { + return &namespaces{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *namespaces) Create(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Post(). + Resource("namespaces"). + Body(namespace). + Do(). + Into(result) + return +} + +// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *namespaces) Update(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put(). + Resource("namespaces"). + Name(namespace.Name). + Body(namespace). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *namespaces) UpdateStatus(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put(). + Resource("namespaces"). + Name(namespace.Name). + SubResource("status"). + Body(namespace). + Do(). + Into(result) + return +} + +// Delete takes name of the namespace and deletes it. Returns an error if one occurs. +func (c *namespaces) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("namespaces"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *namespaces) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Resource("namespaces"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. +func (c *namespaces) Get(name string, options meta_v1.GetOptions) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Get(). + Resource("namespaces"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Namespaces that match those selectors. +func (c *namespaces) List(opts meta_v1.ListOptions) (result *v1.NamespaceList, err error) { + result = &v1.NamespaceList{} + err = c.client.Get(). + Resource("namespaces"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested namespaces. +func (c *namespaces) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("namespaces"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched namespace. +func (c *namespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Patch(pt). + Resource("namespaces"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go new file mode 100644 index 0000000000..2034300007 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go @@ -0,0 +1,31 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import "k8s.io/client-go/pkg/api/v1" + +// The NamespaceExpansion interface allows manually adding extra methods to the NamespaceInterface. +type NamespaceExpansion interface { + Finalize(item *v1.Namespace) (*v1.Namespace, error) +} + +// Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. +func (c *namespaces) Finalize(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put().Resource("namespaces").Name(namespace.Name).SubResource("finalize").Body(namespace).Do().Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go new file mode 100644 index 0000000000..6b82d4fabd --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go @@ -0,0 +1,161 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// NodesGetter has a method to return a NodeInterface. +// A group's client should implement this interface. +type NodesGetter interface { + Nodes() NodeInterface +} + +// NodeInterface has methods to work with Node resources. +type NodeInterface interface { + Create(*v1.Node) (*v1.Node, error) + Update(*v1.Node) (*v1.Node, error) + UpdateStatus(*v1.Node) (*v1.Node, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Node, error) + List(opts meta_v1.ListOptions) (*v1.NodeList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) + NodeExpansion +} + +// nodes implements NodeInterface +type nodes struct { + client rest.Interface +} + +// newNodes returns a Nodes +func newNodes(c *CoreV1Client) *nodes { + return &nodes{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. +func (c *nodes) Create(node *v1.Node) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Post(). + Resource("nodes"). + Body(node). + Do(). + Into(result) + return +} + +// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. +func (c *nodes) Update(node *v1.Node) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Put(). + Resource("nodes"). + Name(node.Name). + Body(node). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *nodes) UpdateStatus(node *v1.Node) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Put(). + Resource("nodes"). + Name(node.Name). + SubResource("status"). + Body(node). + Do(). + Into(result) + return +} + +// Delete takes name of the node and deletes it. Returns an error if one occurs. +func (c *nodes) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("nodes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *nodes) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Resource("nodes"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the node, and returns the corresponding node object, and an error if there is any. +func (c *nodes) Get(name string, options meta_v1.GetOptions) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Get(). + Resource("nodes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Nodes that match those selectors. +func (c *nodes) List(opts meta_v1.ListOptions) (result *v1.NodeList, err error) { + result = &v1.NodeList{} + err = c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested nodes. +func (c *nodes) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched node. +func (c *nodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Patch(pt). + Resource("nodes"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go new file mode 100644 index 0000000000..29c12aabcd --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go @@ -0,0 +1,43 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/pkg/api/v1" +) + +// The NodeExpansion interface allows manually adding extra methods to the NodeInterface. +type NodeExpansion interface { + // PatchStatus modifies the status of an existing node. It returns the copy + // of the node that the server returns, or an error. + PatchStatus(nodeName string, data []byte) (*v1.Node, error) +} + +// PatchStatus modifies the status of an existing node. It returns the copy of +// the node that the server returns, or an error. +func (c *nodes) PatchStatus(nodeName string, data []byte) (*v1.Node, error) { + result := &v1.Node{} + err := c.client.Patch(types.StrategicMergePatchType). + Resource("nodes"). + Name(nodeName). + SubResource("status"). + Body(data). + Do(). + Into(result) + return result, err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go new file mode 100644 index 0000000000..16a4b7316f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go @@ -0,0 +1,161 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// PersistentVolumesGetter has a method to return a PersistentVolumeInterface. +// A group's client should implement this interface. +type PersistentVolumesGetter interface { + PersistentVolumes() PersistentVolumeInterface +} + +// PersistentVolumeInterface has methods to work with PersistentVolume resources. +type PersistentVolumeInterface interface { + Create(*v1.PersistentVolume) (*v1.PersistentVolume, error) + Update(*v1.PersistentVolume) (*v1.PersistentVolume, error) + UpdateStatus(*v1.PersistentVolume) (*v1.PersistentVolume, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.PersistentVolume, error) + List(opts meta_v1.ListOptions) (*v1.PersistentVolumeList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error) + PersistentVolumeExpansion +} + +// persistentVolumes implements PersistentVolumeInterface +type persistentVolumes struct { + client rest.Interface +} + +// newPersistentVolumes returns a PersistentVolumes +func newPersistentVolumes(c *CoreV1Client) *persistentVolumes { + return &persistentVolumes{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. +func (c *persistentVolumes) Create(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Post(). + Resource("persistentvolumes"). + Body(persistentVolume). + Do(). + Into(result) + return +} + +// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. +func (c *persistentVolumes) Update(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Put(). + Resource("persistentvolumes"). + Name(persistentVolume.Name). + Body(persistentVolume). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *persistentVolumes) UpdateStatus(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Put(). + Resource("persistentvolumes"). + Name(persistentVolume.Name). + SubResource("status"). + Body(persistentVolume). + Do(). + Into(result) + return +} + +// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. +func (c *persistentVolumes) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("persistentvolumes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *persistentVolumes) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Resource("persistentvolumes"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. +func (c *persistentVolumes) Get(name string, options meta_v1.GetOptions) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Get(). + Resource("persistentvolumes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. +func (c *persistentVolumes) List(opts meta_v1.ListOptions) (result *v1.PersistentVolumeList, err error) { + result = &v1.PersistentVolumeList{} + err = c.client.Get(). + Resource("persistentvolumes"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested persistentVolumes. +func (c *persistentVolumes) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("persistentvolumes"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched persistentVolume. +func (c *persistentVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Patch(pt). + Resource("persistentvolumes"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go new file mode 100644 index 0000000000..ae7cc4a4f4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// PersistentVolumeClaimsGetter has a method to return a PersistentVolumeClaimInterface. +// A group's client should implement this interface. +type PersistentVolumeClaimsGetter interface { + PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface +} + +// PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources. +type PersistentVolumeClaimInterface interface { + Create(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) + Update(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) + UpdateStatus(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.PersistentVolumeClaim, error) + List(opts meta_v1.ListOptions) (*v1.PersistentVolumeClaimList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error) + PersistentVolumeClaimExpansion +} + +// persistentVolumeClaims implements PersistentVolumeClaimInterface +type persistentVolumeClaims struct { + client rest.Interface + ns string +} + +// newPersistentVolumeClaims returns a PersistentVolumeClaims +func newPersistentVolumeClaims(c *CoreV1Client, namespace string) *persistentVolumeClaims { + return &persistentVolumeClaims{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. +func (c *persistentVolumeClaims) Create(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Post(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Body(persistentVolumeClaim). + Do(). + Into(result) + return +} + +// Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. +func (c *persistentVolumeClaims) Update(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Put(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(persistentVolumeClaim.Name). + Body(persistentVolumeClaim). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *persistentVolumeClaims) UpdateStatus(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Put(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(persistentVolumeClaim.Name). + SubResource("status"). + Body(persistentVolumeClaim). + Do(). + Into(result) + return +} + +// Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. +func (c *persistentVolumeClaims) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *persistentVolumeClaims) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. +func (c *persistentVolumeClaims) Get(name string, options meta_v1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Get(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. +func (c *persistentVolumeClaims) List(opts meta_v1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { + result = &v1.PersistentVolumeClaimList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested persistentVolumeClaims. +func (c *persistentVolumeClaims) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched persistentVolumeClaim. +func (c *persistentVolumeClaims) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go new file mode 100644 index 0000000000..5648750eaa --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// PodsGetter has a method to return a PodInterface. +// A group's client should implement this interface. +type PodsGetter interface { + Pods(namespace string) PodInterface +} + +// PodInterface has methods to work with Pod resources. +type PodInterface interface { + Create(*v1.Pod) (*v1.Pod, error) + Update(*v1.Pod) (*v1.Pod, error) + UpdateStatus(*v1.Pod) (*v1.Pod, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Pod, error) + List(opts meta_v1.ListOptions) (*v1.PodList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) + PodExpansion +} + +// pods implements PodInterface +type pods struct { + client rest.Interface + ns string +} + +// newPods returns a Pods +func newPods(c *CoreV1Client, namespace string) *pods { + return &pods{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) Create(pod *v1.Pod) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Post(). + Namespace(c.ns). + Resource("pods"). + Body(pod). + Do(). + Into(result) + return +} + +// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) Update(pod *v1.Pod) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(pod.Name). + Body(pod). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *pods) UpdateStatus(pod *v1.Pod) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(pod.Name). + SubResource("status"). + Body(pod). + Do(). + Into(result) + return +} + +// Delete takes name of the pod and deletes it. Returns an error if one occurs. +func (c *pods) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pods"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *pods) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. +func (c *pods) Get(name string, options meta_v1.GetOptions) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Pods that match those selectors. +func (c *pods) List(opts meta_v1.ListOptions) (result *v1.PodList, err error) { + result = &v1.PodList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested pods. +func (c *pods) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched pod. +func (c *pods) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("pods"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go new file mode 100644 index 0000000000..82f3b7fba3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go @@ -0,0 +1,45 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/api/v1" + policy "k8s.io/client-go/pkg/apis/policy/v1beta1" + restclient "k8s.io/client-go/rest" +) + +// The PodExpansion interface allows manually adding extra methods to the PodInterface. +type PodExpansion interface { + Bind(binding *v1.Binding) error + Evict(eviction *policy.Eviction) error + GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request +} + +// Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored). +func (c *pods) Bind(binding *v1.Binding) error { + return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).SubResource("binding").Body(binding).Do().Error() +} + +func (c *pods) Evict(eviction *policy.Eviction) error { + return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do().Error() +} + +// Get constructs a request for getting the logs for a pod +func (c *pods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { + return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, api.ParameterCodec) +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go new file mode 100644 index 0000000000..19c82f17b3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// PodTemplatesGetter has a method to return a PodTemplateInterface. +// A group's client should implement this interface. +type PodTemplatesGetter interface { + PodTemplates(namespace string) PodTemplateInterface +} + +// PodTemplateInterface has methods to work with PodTemplate resources. +type PodTemplateInterface interface { + Create(*v1.PodTemplate) (*v1.PodTemplate, error) + Update(*v1.PodTemplate) (*v1.PodTemplate, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.PodTemplate, error) + List(opts meta_v1.ListOptions) (*v1.PodTemplateList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error) + PodTemplateExpansion +} + +// podTemplates implements PodTemplateInterface +type podTemplates struct { + client rest.Interface + ns string +} + +// newPodTemplates returns a PodTemplates +func newPodTemplates(c *CoreV1Client, namespace string) *podTemplates { + return &podTemplates{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. +func (c *podTemplates) Create(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podtemplates"). + Body(podTemplate). + Do(). + Into(result) + return +} + +// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. +func (c *podTemplates) Update(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podtemplates"). + Name(podTemplate.Name). + Body(podTemplate). + Do(). + Into(result) + return +} + +// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. +func (c *podTemplates) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podtemplates"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podTemplates) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. +func (c *podTemplates) Get(name string, options meta_v1.GetOptions) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. +func (c *podTemplates) List(opts meta_v1.ListOptions) (result *v1.PodTemplateList, err error) { + result = &v1.PodTemplateList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podTemplates. +func (c *podTemplates) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched podTemplate. +func (c *podTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podtemplates"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go new file mode 100644 index 0000000000..2f4f4fa9e1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// ReplicationControllersGetter has a method to return a ReplicationControllerInterface. +// A group's client should implement this interface. +type ReplicationControllersGetter interface { + ReplicationControllers(namespace string) ReplicationControllerInterface +} + +// ReplicationControllerInterface has methods to work with ReplicationController resources. +type ReplicationControllerInterface interface { + Create(*v1.ReplicationController) (*v1.ReplicationController, error) + Update(*v1.ReplicationController) (*v1.ReplicationController, error) + UpdateStatus(*v1.ReplicationController) (*v1.ReplicationController, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ReplicationController, error) + List(opts meta_v1.ListOptions) (*v1.ReplicationControllerList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) + ReplicationControllerExpansion +} + +// replicationControllers implements ReplicationControllerInterface +type replicationControllers struct { + client rest.Interface + ns string +} + +// newReplicationControllers returns a ReplicationControllers +func newReplicationControllers(c *CoreV1Client, namespace string) *replicationControllers { + return &replicationControllers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. +func (c *replicationControllers) Create(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Body(replicationController). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. +func (c *replicationControllers) Update(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationController.Name). + Body(replicationController). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *replicationControllers) UpdateStatus(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationController.Name). + SubResource("status"). + Body(replicationController). + Do(). + Into(result) + return +} + +// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. +func (c *replicationControllers) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicationControllers) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. +func (c *replicationControllers) Get(name string, options meta_v1.GetOptions) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. +func (c *replicationControllers) List(opts meta_v1.ListOptions) (result *v1.ReplicationControllerList, err error) { + result = &v1.ReplicationControllerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicationControllers. +func (c *replicationControllers) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched replicationController. +func (c *replicationControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("replicationcontrollers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go new file mode 100644 index 0000000000..565fe1e6d6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// ResourceQuotasGetter has a method to return a ResourceQuotaInterface. +// A group's client should implement this interface. +type ResourceQuotasGetter interface { + ResourceQuotas(namespace string) ResourceQuotaInterface +} + +// ResourceQuotaInterface has methods to work with ResourceQuota resources. +type ResourceQuotaInterface interface { + Create(*v1.ResourceQuota) (*v1.ResourceQuota, error) + Update(*v1.ResourceQuota) (*v1.ResourceQuota, error) + UpdateStatus(*v1.ResourceQuota) (*v1.ResourceQuota, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ResourceQuota, error) + List(opts meta_v1.ListOptions) (*v1.ResourceQuotaList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error) + ResourceQuotaExpansion +} + +// resourceQuotas implements ResourceQuotaInterface +type resourceQuotas struct { + client rest.Interface + ns string +} + +// newResourceQuotas returns a ResourceQuotas +func newResourceQuotas(c *CoreV1Client, namespace string) *resourceQuotas { + return &resourceQuotas{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *resourceQuotas) Create(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Post(). + Namespace(c.ns). + Resource("resourcequotas"). + Body(resourceQuota). + Do(). + Into(result) + return +} + +// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *resourceQuotas) Update(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(resourceQuota.Name). + Body(resourceQuota). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *resourceQuotas) UpdateStatus(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(resourceQuota.Name). + SubResource("status"). + Body(resourceQuota). + Do(). + Into(result) + return +} + +// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. +func (c *resourceQuotas) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *resourceQuotas) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. +func (c *resourceQuotas) Get(name string, options meta_v1.GetOptions) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. +func (c *resourceQuotas) List(opts meta_v1.ListOptions) (result *v1.ResourceQuotaList, err error) { + result = &v1.ResourceQuotaList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resourceQuotas. +func (c *resourceQuotas) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched resourceQuota. +func (c *resourceQuotas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("resourcequotas"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go new file mode 100644 index 0000000000..fbcede8185 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// SecretsGetter has a method to return a SecretInterface. +// A group's client should implement this interface. +type SecretsGetter interface { + Secrets(namespace string) SecretInterface +} + +// SecretInterface has methods to work with Secret resources. +type SecretInterface interface { + Create(*v1.Secret) (*v1.Secret, error) + Update(*v1.Secret) (*v1.Secret, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Secret, error) + List(opts meta_v1.ListOptions) (*v1.SecretList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) + SecretExpansion +} + +// secrets implements SecretInterface +type secrets struct { + client rest.Interface + ns string +} + +// newSecrets returns a Secrets +func newSecrets(c *CoreV1Client, namespace string) *secrets { + return &secrets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *secrets) Create(secret *v1.Secret) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Post(). + Namespace(c.ns). + Resource("secrets"). + Body(secret). + Do(). + Into(result) + return +} + +// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *secrets) Update(secret *v1.Secret) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Put(). + Namespace(c.ns). + Resource("secrets"). + Name(secret.Name). + Body(secret). + Do(). + Into(result) + return +} + +// Delete takes name of the secret and deletes it. Returns an error if one occurs. +func (c *secrets) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("secrets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *secrets) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. +func (c *secrets) Get(name string, options meta_v1.GetOptions) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Secrets that match those selectors. +func (c *secrets) List(opts meta_v1.ListOptions) (result *v1.SecretList, err error) { + result = &v1.SecretList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested secrets. +func (c *secrets) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched secret. +func (c *secrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("secrets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go new file mode 100644 index 0000000000..0eccf79a80 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// ServicesGetter has a method to return a ServiceInterface. +// A group's client should implement this interface. +type ServicesGetter interface { + Services(namespace string) ServiceInterface +} + +// ServiceInterface has methods to work with Service resources. +type ServiceInterface interface { + Create(*v1.Service) (*v1.Service, error) + Update(*v1.Service) (*v1.Service, error) + UpdateStatus(*v1.Service) (*v1.Service, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Service, error) + List(opts meta_v1.ListOptions) (*v1.ServiceList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) + ServiceExpansion +} + +// services implements ServiceInterface +type services struct { + client rest.Interface + ns string +} + +// newServices returns a Services +func newServices(c *CoreV1Client, namespace string) *services { + return &services{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Create(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Post(). + Namespace(c.ns). + Resource("services"). + Body(service). + Do(). + Into(result) + return +} + +// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Update(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + Body(service). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *services) UpdateStatus(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + SubResource("status"). + Body(service). + Do(). + Into(result) + return +} + +// Delete takes name of the service and deletes it. Returns an error if one occurs. +func (c *services) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *services) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the service, and returns the corresponding service object, and an error if there is any. +func (c *services) Get(name string, options meta_v1.GetOptions) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Services that match those selectors. +func (c *services) List(opts meta_v1.ListOptions) (result *v1.ServiceList, err error) { + result = &v1.ServiceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *services) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched service. +func (c *services) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("services"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go new file mode 100644 index 0000000000..4937fd1a39 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go @@ -0,0 +1,41 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/util/net" + restclient "k8s.io/client-go/rest" +) + +// The ServiceExpansion interface allows manually adding extra methods to the ServiceInterface. +type ServiceExpansion interface { + ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper +} + +// ProxyGet returns a response of the service by calling it through the proxy. +func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { + request := c.client.Get(). + Namespace(c.ns). + Resource("services"). + SubResource("proxy"). + Name(net.JoinSchemeNamePort(scheme, name, port)). + Suffix(path) + for k, v := range params { + request = request.Param(k, v) + } + return request +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go new file mode 100644 index 0000000000..f71789f6ab --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// ServiceAccountsGetter has a method to return a ServiceAccountInterface. +// A group's client should implement this interface. +type ServiceAccountsGetter interface { + ServiceAccounts(namespace string) ServiceAccountInterface +} + +// ServiceAccountInterface has methods to work with ServiceAccount resources. +type ServiceAccountInterface interface { + Create(*v1.ServiceAccount) (*v1.ServiceAccount, error) + Update(*v1.ServiceAccount) (*v1.ServiceAccount, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ServiceAccount, error) + List(opts meta_v1.ListOptions) (*v1.ServiceAccountList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error) + ServiceAccountExpansion +} + +// serviceAccounts implements ServiceAccountInterface +type serviceAccounts struct { + client rest.Interface + ns string +} + +// newServiceAccounts returns a ServiceAccounts +func newServiceAccounts(c *CoreV1Client, namespace string) *serviceAccounts { + return &serviceAccounts{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. +func (c *serviceAccounts) Create(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Post(). + Namespace(c.ns). + Resource("serviceaccounts"). + Body(serviceAccount). + Do(). + Into(result) + return +} + +// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. +func (c *serviceAccounts) Update(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Put(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(serviceAccount.Name). + Body(serviceAccount). + Do(). + Into(result) + return +} + +// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. +func (c *serviceAccounts) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *serviceAccounts) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. +func (c *serviceAccounts) Get(name string, options meta_v1.GetOptions) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. +func (c *serviceAccounts) List(opts meta_v1.ListOptions) (result *v1.ServiceAccountList, err error) { + result = &v1.ServiceAccountList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested serviceAccounts. +func (c *serviceAccounts) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched serviceAccount. +func (c *serviceAccounts) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("serviceaccounts"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go new file mode 100644 index 0000000000..8c132db9ba --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + rest "k8s.io/client-go/rest" +) + +// DaemonSetsGetter has a method to return a DaemonSetInterface. +// A group's client should implement this interface. +type DaemonSetsGetter interface { + DaemonSets(namespace string) DaemonSetInterface +} + +// DaemonSetInterface has methods to work with DaemonSet resources. +type DaemonSetInterface interface { + Create(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) + Update(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) + UpdateStatus(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.DaemonSet, error) + List(opts v1.ListOptions) (*v1beta1.DaemonSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) + DaemonSetExpansion +} + +// daemonSets implements DaemonSetInterface +type daemonSets struct { + client rest.Interface + ns string +} + +// newDaemonSets returns a DaemonSets +func newDaemonSets(c *ExtensionsV1beta1Client, namespace string) *daemonSets { + return &daemonSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("daemonsets"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + Body(daemonSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *daemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + SubResource("status"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. +func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. +func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. +func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { + result = &v1beta1.DaemonSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested daemonSets. +func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched daemonSet. +func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("daemonsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go new file mode 100644 index 0000000000..7d0122c2ae --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + rest "k8s.io/client-go/rest" +) + +// DeploymentsGetter has a method to return a DeploymentInterface. +// A group's client should implement this interface. +type DeploymentsGetter interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + Create(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Update(*v1beta1.Deployment) (*v1beta1.Deployment, error) + UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Deployment, error) + List(opts v1.ListOptions) (*v1beta1.DeploymentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) + DeploymentExpansion +} + +// deployments implements DeploymentInterface +type deployments struct { + client rest.Interface + ns string +} + +// newDeployments returns a Deployments +func newDeployments(c *ExtensionsV1beta1Client, namespace string) *deployments { + return &deployments{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + result = &v1beta1.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched deployment. +func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("deployments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go new file mode 100644 index 0000000000..e737f09ae1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go @@ -0,0 +1,29 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import "k8s.io/client-go/pkg/apis/extensions/v1beta1" + +// The DeploymentExpansion interface allows manually adding extra methods to the DeploymentInterface. +type DeploymentExpansion interface { + Rollback(*v1beta1.DeploymentRollback) error +} + +// Rollback applied the provided DeploymentRollback to the named deployment in the current namespace. +func (c *deployments) Rollback(deploymentRollback *v1beta1.DeploymentRollback) error { + return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do().Error() +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go new file mode 100644 index 0000000000..11b5238972 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go new file mode 100644 index 0000000000..5284346e19 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -0,0 +1,118 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + rest "k8s.io/client-go/rest" +) + +type ExtensionsV1beta1Interface interface { + RESTClient() rest.Interface + DaemonSetsGetter + DeploymentsGetter + IngressesGetter + PodSecurityPoliciesGetter + ReplicaSetsGetter + ScalesGetter + ThirdPartyResourcesGetter +} + +// ExtensionsV1beta1Client is used to interact with features provided by the extensions group. +type ExtensionsV1beta1Client struct { + restClient rest.Interface +} + +func (c *ExtensionsV1beta1Client) DaemonSets(namespace string) DaemonSetInterface { + return newDaemonSets(c, namespace) +} + +func (c *ExtensionsV1beta1Client) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *ExtensionsV1beta1Client) Ingresses(namespace string) IngressInterface { + return newIngresses(c, namespace) +} + +func (c *ExtensionsV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { + return newPodSecurityPolicies(c) +} + +func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterface { + return newReplicaSets(c, namespace) +} + +func (c *ExtensionsV1beta1Client) Scales(namespace string) ScaleInterface { + return newScales(c, namespace) +} + +func (c *ExtensionsV1beta1Client) ThirdPartyResources() ThirdPartyResourceInterface { + return newThirdPartyResources(c) +} + +// NewForConfig creates a new ExtensionsV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*ExtensionsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &ExtensionsV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new ExtensionsV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ExtensionsV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ExtensionsV1beta1Client for the given RESTClient. +func New(c rest.Interface) *ExtensionsV1beta1Client { + return &ExtensionsV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ExtensionsV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go new file mode 100644 index 0000000000..c6548330a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go new file mode 100644 index 0000000000..e7413703b6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeDaemonSets implements DaemonSetInterface +type FakeDaemonSets struct { + Fake *FakeExtensionsV1beta1 + ns string +} + +var daemonsetsResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "daemonsets"} + +func (c *FakeDaemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.DaemonSet), err +} + +func (c *FakeDaemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.DaemonSet), err +} + +func (c *FakeDaemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.DaemonSet), err +} + +func (c *FakeDaemonSets) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(daemonsetsResource, c.ns, name), &v1beta1.DaemonSet{}) + + return err +} + +func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.DaemonSetList{}) + return err +} + +func (c *FakeDaemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1beta1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.DaemonSet), err +} + +func (c *FakeDaemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(daemonsetsResource, c.ns, opts), &v1beta1.DaemonSetList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.DaemonSetList{} + for _, item := range obj.(*v1beta1.DaemonSetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested daemonSets. +func (c *FakeDaemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched daemonSet. +func (c *FakeDaemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, data, subresources...), &v1beta1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.DaemonSet), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go new file mode 100644 index 0000000000..e4a2a0305e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeDeployments implements DeploymentInterface +type FakeDeployments struct { + Fake *FakeExtensionsV1beta1 + ns string +} + +var deploymentsResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"} + +func (c *FakeDeployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +func (c *FakeDeployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +func (c *FakeDeployments) UpdateStatus(deployment *v1beta1.Deployment) (*v1beta1.Deployment, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) + + return err +} + +func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{}) + return err +} + +func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +func (c *FakeDeployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(deploymentsResource, c.ns, opts), &v1beta1.DeploymentList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.DeploymentList{} + for _, item := range obj.(*v1beta1.DeploymentList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *FakeDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched deployment. +func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, data, subresources...), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_deployment_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go similarity index 82% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_deployment_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go index 8b73f41073..f49c336664 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_deployment_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go @@ -17,11 +17,11 @@ limitations under the License. package fake import ( - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/client-go/pkg/apis/extensions/v1beta1" + core "k8s.io/client-go/testing" ) -func (c *FakeDeployments) Rollback(deploymentRollback *extensions.DeploymentRollback) error { +func (c *FakeDeployments) Rollback(deploymentRollback *v1beta1.DeploymentRollback) error { action := core.CreateActionImpl{} action.Verb = "create" action.Resource = deploymentsResource diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go new file mode 100644 index 0000000000..c0489371ac --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeExtensionsV1beta1 struct { + *testing.Fake +} + +func (c *FakeExtensionsV1beta1) DaemonSets(namespace string) v1beta1.DaemonSetInterface { + return &FakeDaemonSets{c, namespace} +} + +func (c *FakeExtensionsV1beta1) Deployments(namespace string) v1beta1.DeploymentInterface { + return &FakeDeployments{c, namespace} +} + +func (c *FakeExtensionsV1beta1) Ingresses(namespace string) v1beta1.IngressInterface { + return &FakeIngresses{c, namespace} +} + +func (c *FakeExtensionsV1beta1) PodSecurityPolicies() v1beta1.PodSecurityPolicyInterface { + return &FakePodSecurityPolicies{c} +} + +func (c *FakeExtensionsV1beta1) ReplicaSets(namespace string) v1beta1.ReplicaSetInterface { + return &FakeReplicaSets{c, namespace} +} + +func (c *FakeExtensionsV1beta1) Scales(namespace string) v1beta1.ScaleInterface { + return &FakeScales{c, namespace} +} + +func (c *FakeExtensionsV1beta1) ThirdPartyResources() v1beta1.ThirdPartyResourceInterface { + return &FakeThirdPartyResources{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeExtensionsV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go new file mode 100644 index 0000000000..7ad797e389 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeIngresses implements IngressInterface +type FakeIngresses struct { + Fake *FakeExtensionsV1beta1 + ns string +} + +var ingressesResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "ingresses"} + +func (c *FakeIngresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Ingress), err +} + +func (c *FakeIngresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Ingress), err +} + +func (c *FakeIngresses) UpdateStatus(ingress *v1beta1.Ingress) (*v1beta1.Ingress, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1beta1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Ingress), err +} + +func (c *FakeIngresses) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) + + return err +} + +func (c *FakeIngresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.IngressList{}) + return err +} + +func (c *FakeIngresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Ingress), err +} + +func (c *FakeIngresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(ingressesResource, c.ns, opts), &v1beta1.IngressList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.IngressList{} + for _, item := range obj.(*v1beta1.IngressList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ingresses. +func (c *FakeIngresses) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(ingressesResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched ingress. +func (c *FakeIngresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, data, subresources...), &v1beta1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Ingress), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go new file mode 100644 index 0000000000..78b0868cb5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go @@ -0,0 +1,110 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakePodSecurityPolicies implements PodSecurityPolicyInterface +type FakePodSecurityPolicies struct { + Fake *FakeExtensionsV1beta1 +} + +var podsecuritypoliciesResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "podsecuritypolicies"} + +func (c *FakePodSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodSecurityPolicy), err +} + +func (c *FakePodSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodSecurityPolicy), err +} + +func (c *FakePodSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{}) + return err +} + +func (c *FakePodSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(podsecuritypoliciesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.PodSecurityPolicyList{}) + return err +} + +func (c *FakePodSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodSecurityPolicy), err +} + +func (c *FakePodSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(podsecuritypoliciesResource, opts), &v1beta1.PodSecurityPolicyList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.PodSecurityPolicyList{} + for _, item := range obj.(*v1beta1.PodSecurityPolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested podSecurityPolicies. +func (c *FakePodSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(podsecuritypoliciesResource, opts)) +} + +// Patch applies the patch and returns the patched podSecurityPolicy. +func (c *FakePodSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, data, subresources...), &v1beta1.PodSecurityPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodSecurityPolicy), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go new file mode 100644 index 0000000000..b5fbc6b5a9 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeReplicaSets implements ReplicaSetInterface +type FakeReplicaSets struct { + Fake *FakeExtensionsV1beta1 + ns string +} + +var replicasetsResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "replicasets"} + +func (c *FakeReplicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ReplicaSet), err +} + +func (c *FakeReplicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ReplicaSet), err +} + +func (c *FakeReplicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ReplicaSet), err +} + +func (c *FakeReplicaSets) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(replicasetsResource, c.ns, name), &v1beta1.ReplicaSet{}) + + return err +} + +func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.ReplicaSetList{}) + return err +} + +func (c *FakeReplicaSets) Get(name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1beta1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ReplicaSet), err +} + +func (c *FakeReplicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(replicasetsResource, c.ns, opts), &v1beta1.ReplicaSetList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.ReplicaSetList{} + for _, item := range obj.(*v1beta1.ReplicaSetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *FakeReplicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched replicaSet. +func (c *FakeReplicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, data, subresources...), &v1beta1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ReplicaSet), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go new file mode 100644 index 0000000000..77f8c61a7d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +// FakeScales implements ScaleInterface +type FakeScales struct { + Fake *FakeExtensionsV1beta1 + ns string +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale_expansion.go new file mode 100644 index 0000000000..db13b4c37b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale_expansion.go @@ -0,0 +1,47 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/pkg/apis/extensions/v1beta1" + core "k8s.io/client-go/testing" +) + +func (c *FakeScales) Get(kind string, name string) (result *v1beta1.Scale, err error) { + action := core.GetActionImpl{} + action.Verb = "get" + action.Namespace = c.ns + action.Resource = schema.GroupVersionResource{Resource: kind} + action.Subresource = "scale" + action.Name = name + obj, err := c.Fake.Invokes(action, &v1beta1.Scale{}) + result = obj.(*v1beta1.Scale) + return +} + +func (c *FakeScales) Update(kind string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + action := core.UpdateActionImpl{} + action.Verb = "update" + action.Namespace = c.ns + action.Resource = schema.GroupVersionResource{Resource: kind} + action.Subresource = "scale" + action.Object = scale + obj, err := c.Fake.Invokes(action, scale) + result = obj.(*v1beta1.Scale) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go new file mode 100644 index 0000000000..cb625070af --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go @@ -0,0 +1,110 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeThirdPartyResources implements ThirdPartyResourceInterface +type FakeThirdPartyResources struct { + Fake *FakeExtensionsV1beta1 +} + +var thirdpartyresourcesResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "thirdpartyresources"} + +func (c *FakeThirdPartyResources) Create(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(thirdpartyresourcesResource, thirdPartyResource), &v1beta1.ThirdPartyResource{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ThirdPartyResource), err +} + +func (c *FakeThirdPartyResources) Update(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(thirdpartyresourcesResource, thirdPartyResource), &v1beta1.ThirdPartyResource{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ThirdPartyResource), err +} + +func (c *FakeThirdPartyResources) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(thirdpartyresourcesResource, name), &v1beta1.ThirdPartyResource{}) + return err +} + +func (c *FakeThirdPartyResources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(thirdpartyresourcesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.ThirdPartyResourceList{}) + return err +} + +func (c *FakeThirdPartyResources) Get(name string, options v1.GetOptions) (result *v1beta1.ThirdPartyResource, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(thirdpartyresourcesResource, name), &v1beta1.ThirdPartyResource{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ThirdPartyResource), err +} + +func (c *FakeThirdPartyResources) List(opts v1.ListOptions) (result *v1beta1.ThirdPartyResourceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(thirdpartyresourcesResource, opts), &v1beta1.ThirdPartyResourceList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.ThirdPartyResourceList{} + for _, item := range obj.(*v1beta1.ThirdPartyResourceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested thirdPartyResources. +func (c *FakeThirdPartyResources) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(thirdpartyresourcesResource, opts)) +} + +// Patch applies the patch and returns the patched thirdPartyResource. +func (c *FakeThirdPartyResources) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ThirdPartyResource, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(thirdpartyresourcesResource, name, data, subresources...), &v1beta1.ThirdPartyResource{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ThirdPartyResource), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..d0a3d64bcf --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type DaemonSetExpansion interface{} + +type IngressExpansion interface{} + +type PodSecurityPolicyExpansion interface{} + +type ReplicaSetExpansion interface{} + +type ThirdPartyResourceExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go new file mode 100644 index 0000000000..86bf65b8f5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + rest "k8s.io/client-go/rest" +) + +// IngressesGetter has a method to return a IngressInterface. +// A group's client should implement this interface. +type IngressesGetter interface { + Ingresses(namespace string) IngressInterface +} + +// IngressInterface has methods to work with Ingress resources. +type IngressInterface interface { + Create(*v1beta1.Ingress) (*v1beta1.Ingress, error) + Update(*v1beta1.Ingress) (*v1beta1.Ingress, error) + UpdateStatus(*v1beta1.Ingress) (*v1beta1.Ingress, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Ingress, error) + List(opts v1.ListOptions) (*v1beta1.IngressList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) + IngressExpansion +} + +// ingresses implements IngressInterface +type ingresses struct { + client rest.Interface + ns string +} + +// newIngresses returns a Ingresses +func newIngresses(c *ExtensionsV1beta1Client, namespace string) *ingresses { + return &ingresses{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Post(). + Namespace(c.ns). + Resource("ingresses"). + Body(ingress). + Do(). + Into(result) + return +} + +// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresses"). + Name(ingress.Name). + Body(ingress). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *ingresses) UpdateStatus(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresses"). + Name(ingress.Name). + SubResource("status"). + Body(ingress). + Do(). + Into(result) + return +} + +// Delete takes name of the ingress and deletes it. Returns an error if one occurs. +func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. +func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Ingresses that match those selectors. +func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { + result = &v1beta1.IngressList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested ingresses. +func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched ingress. +func (c *ingresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("ingresses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go new file mode 100644 index 0000000000..ad57a6cabf --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + rest "k8s.io/client-go/rest" +) + +// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. +// A group's client should implement this interface. +type PodSecurityPoliciesGetter interface { + PodSecurityPolicies() PodSecurityPolicyInterface +} + +// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. +type PodSecurityPolicyInterface interface { + Create(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) + Update(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) + List(opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) + PodSecurityPolicyExpansion +} + +// podSecurityPolicies implements PodSecurityPolicyInterface +type podSecurityPolicies struct { + client rest.Interface +} + +// newPodSecurityPolicies returns a PodSecurityPolicies +func newPodSecurityPolicies(c *ExtensionsV1beta1Client) *podSecurityPolicies { + return &podSecurityPolicies{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Post(). + Resource("podsecuritypolicies"). + Body(podSecurityPolicy). + Do(). + Into(result) + return +} + +// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Put(). + Resource("podsecuritypolicies"). + Name(podSecurityPolicy.Name). + Body(podSecurityPolicy). + Do(). + Into(result) + return +} + +// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. +func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("podsecuritypolicies"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("podsecuritypolicies"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. +func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. +func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { + result = &v1beta1.PodSecurityPolicyList{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podSecurityPolicies. +func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched podSecurityPolicy. +func (c *podSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Patch(pt). + Resource("podsecuritypolicies"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go new file mode 100644 index 0000000000..aa6f505a2b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + rest "k8s.io/client-go/rest" +) + +// ReplicaSetsGetter has a method to return a ReplicaSetInterface. +// A group's client should implement this interface. +type ReplicaSetsGetter interface { + ReplicaSets(namespace string) ReplicaSetInterface +} + +// ReplicaSetInterface has methods to work with ReplicaSet resources. +type ReplicaSetInterface interface { + Create(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) + Update(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) + UpdateStatus(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ReplicaSet, error) + List(opts v1.ListOptions) (*v1beta1.ReplicaSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) + ReplicaSetExpansion +} + +// replicaSets implements ReplicaSetInterface +type replicaSets struct { + client rest.Interface + ns string +} + +// newReplicaSets returns a ReplicaSets +func newReplicaSets(c *ExtensionsV1beta1Client, namespace string) *replicaSets { + return &replicaSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicasets"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + Body(replicaSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *replicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + SubResource("status"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. +func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. +func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. +func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { + result = &v1beta1.ReplicaSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched replicaSet. +func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("replicasets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go new file mode 100644 index 0000000000..733012adee --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// ScalesGetter has a method to return a ScaleInterface. +// A group's client should implement this interface. +type ScalesGetter interface { + Scales(namespace string) ScaleInterface +} + +// ScaleInterface has methods to work with Scale resources. +type ScaleInterface interface { + ScaleExpansion +} + +// scales implements ScaleInterface +type scales struct { + client rest.Interface + ns string +} + +// newScales returns a Scales +func newScales(c *ExtensionsV1beta1Client, namespace string) *scales { + return &scales{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go new file mode 100644 index 0000000000..efeae4b01b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go @@ -0,0 +1,65 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/pkg/apis/extensions/v1beta1" +) + +// The ScaleExpansion interface allows manually adding extra methods to the ScaleInterface. +type ScaleExpansion interface { + Get(kind string, name string) (*v1beta1.Scale, error) + Update(kind string, scale *v1beta1.Scale) (*v1beta1.Scale, error) +} + +// Get takes the reference to scale subresource and returns the subresource or error, if one occurs. +func (c *scales) Get(kind string, name string) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + + // TODO this method needs to take a proper unambiguous kind + fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} + resource, _ := meta.KindToResource(fullyQualifiedKind) + + err = c.client.Get(). + Namespace(c.ns). + Resource(resource.Resource). + Name(name). + SubResource("scale"). + Do(). + Into(result) + return +} + +func (c *scales) Update(kind string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + + // TODO this method needs to take a proper unambiguous kind + fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} + resource, _ := meta.KindToResource(fullyQualifiedKind) + + err = c.client.Put(). + Namespace(scale.Namespace). + Resource(resource.Resource). + Name(scale.Name). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go new file mode 100644 index 0000000000..617c2069be --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + rest "k8s.io/client-go/rest" +) + +// ThirdPartyResourcesGetter has a method to return a ThirdPartyResourceInterface. +// A group's client should implement this interface. +type ThirdPartyResourcesGetter interface { + ThirdPartyResources() ThirdPartyResourceInterface +} + +// ThirdPartyResourceInterface has methods to work with ThirdPartyResource resources. +type ThirdPartyResourceInterface interface { + Create(*v1beta1.ThirdPartyResource) (*v1beta1.ThirdPartyResource, error) + Update(*v1beta1.ThirdPartyResource) (*v1beta1.ThirdPartyResource, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ThirdPartyResource, error) + List(opts v1.ListOptions) (*v1beta1.ThirdPartyResourceList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ThirdPartyResource, err error) + ThirdPartyResourceExpansion +} + +// thirdPartyResources implements ThirdPartyResourceInterface +type thirdPartyResources struct { + client rest.Interface +} + +// newThirdPartyResources returns a ThirdPartyResources +func newThirdPartyResources(c *ExtensionsV1beta1Client) *thirdPartyResources { + return &thirdPartyResources{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a thirdPartyResource and creates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. +func (c *thirdPartyResources) Create(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { + result = &v1beta1.ThirdPartyResource{} + err = c.client.Post(). + Resource("thirdpartyresources"). + Body(thirdPartyResource). + Do(). + Into(result) + return +} + +// Update takes the representation of a thirdPartyResource and updates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. +func (c *thirdPartyResources) Update(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { + result = &v1beta1.ThirdPartyResource{} + err = c.client.Put(). + Resource("thirdpartyresources"). + Name(thirdPartyResource.Name). + Body(thirdPartyResource). + Do(). + Into(result) + return +} + +// Delete takes name of the thirdPartyResource and deletes it. Returns an error if one occurs. +func (c *thirdPartyResources) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("thirdpartyresources"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *thirdPartyResources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("thirdpartyresources"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the thirdPartyResource, and returns the corresponding thirdPartyResource object, and an error if there is any. +func (c *thirdPartyResources) Get(name string, options v1.GetOptions) (result *v1beta1.ThirdPartyResource, err error) { + result = &v1beta1.ThirdPartyResource{} + err = c.client.Get(). + Resource("thirdpartyresources"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ThirdPartyResources that match those selectors. +func (c *thirdPartyResources) List(opts v1.ListOptions) (result *v1beta1.ThirdPartyResourceList, err error) { + result = &v1beta1.ThirdPartyResourceList{} + err = c.client.Get(). + Resource("thirdpartyresources"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested thirdPartyResources. +func (c *thirdPartyResources) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("thirdpartyresources"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched thirdPartyResource. +func (c *thirdPartyResources) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ThirdPartyResource, err error) { + result = &v1beta1.ThirdPartyResource{} + err = c.client.Patch(pt). + Resource("thirdpartyresources"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go new file mode 100644 index 0000000000..11b5238972 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go new file mode 100644 index 0000000000..9c4133e369 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// EvictionsGetter has a method to return a EvictionInterface. +// A group's client should implement this interface. +type EvictionsGetter interface { + Evictions(namespace string) EvictionInterface +} + +// EvictionInterface has methods to work with Eviction resources. +type EvictionInterface interface { + EvictionExpansion +} + +// evictions implements EvictionInterface +type evictions struct { + client rest.Interface + ns string +} + +// newEvictions returns a Evictions +func newEvictions(c *PolicyV1beta1Client, namespace string) *evictions { + return &evictions{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go new file mode 100644 index 0000000000..bde1baca6e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + policy "k8s.io/client-go/pkg/apis/policy/v1beta1" +) + +// The EvictionExpansion interface allows manually adding extra methods to the ScaleInterface. +type EvictionExpansion interface { + Evict(eviction *policy.Eviction) error +} + +func (c *evictions) Evict(eviction *policy.Eviction) error { + return c.client.Post(). + AbsPath("/api/v1"). + Namespace(eviction.Namespace). + Resource("pods"). + Name(eviction.Name). + SubResource("eviction"). + Body(eviction). + Do(). + Error() +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go new file mode 100644 index 0000000000..c6548330a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go similarity index 90% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_eviction.go rename to vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go index 70b7ebc56a..a091d8de39 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_eviction.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,6 +18,6 @@ package fake // FakeEvictions implements EvictionInterface type FakeEvictions struct { - Fake *FakePolicy + Fake *FakePolicyV1beta1 ns string } diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go similarity index 77% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_eviction_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go index ff6093872f..35c40713c0 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_eviction_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go @@ -17,16 +17,16 @@ limitations under the License. package fake import ( - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - policy "k8s.io/kubernetes/pkg/apis/policy" - core "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/apimachinery/pkg/runtime/schema" + policy "k8s.io/client-go/pkg/apis/policy/v1beta1" + core "k8s.io/client-go/testing" ) func (c *FakeEvictions) Evict(eviction *policy.Eviction) error { action := core.GetActionImpl{} action.Verb = "post" action.Namespace = c.ns - action.Resource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "pods"} + action.Resource = schema.GroupVersionResource{Group: "", Version: "", Resource: "pods"} action.Subresource = "eviction" _, err := c.Fake.Invokes(action, eviction) return err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go new file mode 100644 index 0000000000..309686585f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1beta1 "k8s.io/client-go/pkg/apis/policy/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakePodDisruptionBudgets implements PodDisruptionBudgetInterface +type FakePodDisruptionBudgets struct { + Fake *FakePolicyV1beta1 + ns string +} + +var poddisruptionbudgetsResource = schema.GroupVersionResource{Group: "policy", Version: "v1beta1", Resource: "poddisruptionbudgets"} + +func (c *FakePodDisruptionBudgets) Create(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodDisruptionBudget), err +} + +func (c *FakePodDisruptionBudgets) Update(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodDisruptionBudget), err +} + +func (c *FakePodDisruptionBudgets) UpdateStatus(podDisruptionBudget *v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodDisruptionBudget), err +} + +func (c *FakePodDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(poddisruptionbudgetsResource, c.ns, name), &v1beta1.PodDisruptionBudget{}) + + return err +} + +func (c *FakePodDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(poddisruptionbudgetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.PodDisruptionBudgetList{}) + return err +} + +func (c *FakePodDisruptionBudgets) Get(name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(poddisruptionbudgetsResource, c.ns, name), &v1beta1.PodDisruptionBudget{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodDisruptionBudget), err +} + +func (c *FakePodDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(poddisruptionbudgetsResource, c.ns, opts), &v1beta1.PodDisruptionBudgetList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.PodDisruptionBudgetList{} + for _, item := range obj.(*v1beta1.PodDisruptionBudgetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. +func (c *FakePodDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(poddisruptionbudgetsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched podDisruptionBudget. +func (c *FakePodDisruptionBudgets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, data, subresources...), &v1beta1.PodDisruptionBudget{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodDisruptionBudget), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go new file mode 100644 index 0000000000..c9039b5196 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go @@ -0,0 +1,42 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakePolicyV1beta1 struct { + *testing.Fake +} + +func (c *FakePolicyV1beta1) Evictions(namespace string) v1beta1.EvictionInterface { + return &FakeEvictions{c, namespace} +} + +func (c *FakePolicyV1beta1) PodDisruptionBudgets(namespace string) v1beta1.PodDisruptionBudgetInterface { + return &FakePodDisruptionBudgets{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakePolicyV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..511adc6ef7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type PodDisruptionBudgetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go new file mode 100644 index 0000000000..8088dd0196 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/policy/v1beta1" + rest "k8s.io/client-go/rest" +) + +// PodDisruptionBudgetsGetter has a method to return a PodDisruptionBudgetInterface. +// A group's client should implement this interface. +type PodDisruptionBudgetsGetter interface { + PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface +} + +// PodDisruptionBudgetInterface has methods to work with PodDisruptionBudget resources. +type PodDisruptionBudgetInterface interface { + Create(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) + Update(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) + UpdateStatus(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.PodDisruptionBudget, error) + List(opts v1.ListOptions) (*v1beta1.PodDisruptionBudgetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) + PodDisruptionBudgetExpansion +} + +// podDisruptionBudgets implements PodDisruptionBudgetInterface +type podDisruptionBudgets struct { + client rest.Interface + ns string +} + +// newPodDisruptionBudgets returns a PodDisruptionBudgets +func newPodDisruptionBudgets(c *PolicyV1beta1Client, namespace string) *podDisruptionBudgets { + return &podDisruptionBudgets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. +func (c *podDisruptionBudgets) Create(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Post(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Body(podDisruptionBudget). + Do(). + Into(result) + return +} + +// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. +func (c *podDisruptionBudgets) Update(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Put(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Name(podDisruptionBudget.Name). + Body(podDisruptionBudget). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *podDisruptionBudgets) UpdateStatus(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Put(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Name(podDisruptionBudget.Name). + SubResource("status"). + Body(podDisruptionBudget). + Do(). + Into(result) + return +} + +// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. +func (c *podDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. +func (c *podDisruptionBudgets) Get(name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Get(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. +func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { + result = &v1beta1.PodDisruptionBudgetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. +func (c *podDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched podDisruptionBudget. +func (c *podDisruptionBudgets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go new file mode 100644 index 0000000000..9d62aad04e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go @@ -0,0 +1,93 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/policy/v1beta1" + rest "k8s.io/client-go/rest" +) + +type PolicyV1beta1Interface interface { + RESTClient() rest.Interface + EvictionsGetter + PodDisruptionBudgetsGetter +} + +// PolicyV1beta1Client is used to interact with features provided by the policy group. +type PolicyV1beta1Client struct { + restClient rest.Interface +} + +func (c *PolicyV1beta1Client) Evictions(namespace string) EvictionInterface { + return newEvictions(c, namespace) +} + +func (c *PolicyV1beta1Client) PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface { + return newPodDisruptionBudgets(c, namespace) +} + +// NewForConfig creates a new PolicyV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*PolicyV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &PolicyV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new PolicyV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *PolicyV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new PolicyV1beta1Client for the given RESTClient. +func New(c rest.Interface) *PolicyV1beta1Client { + return &PolicyV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *PolicyV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go new file mode 100644 index 0000000000..241061025a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" + rest "k8s.io/client-go/rest" +) + +// ClusterRolesGetter has a method to return a ClusterRoleInterface. +// A group's client should implement this interface. +type ClusterRolesGetter interface { + ClusterRoles() ClusterRoleInterface +} + +// ClusterRoleInterface has methods to work with ClusterRole resources. +type ClusterRoleInterface interface { + Create(*v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error) + Update(*v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.ClusterRole, error) + List(opts v1.ListOptions) (*v1alpha1.ClusterRoleList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) + ClusterRoleExpansion +} + +// clusterRoles implements ClusterRoleInterface +type clusterRoles struct { + client rest.Interface +} + +// newClusterRoles returns a ClusterRoles +func newClusterRoles(c *RbacV1alpha1Client) *clusterRoles { + return &clusterRoles{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Create(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Post(). + Resource("clusterroles"). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Update(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Put(). + Resource("clusterroles"). + Name(clusterRole.Name). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. +func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. +func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Get(). + Resource("clusterroles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. +func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { + result = &v1alpha1.ClusterRoleList{} + err = c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched clusterRole. +func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Patch(pt). + Resource("clusterroles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go new file mode 100644 index 0000000000..bc48873a59 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" + rest "k8s.io/client-go/rest" +) + +// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. +// A group's client should implement this interface. +type ClusterRoleBindingsGetter interface { + ClusterRoleBindings() ClusterRoleBindingInterface +} + +// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. +type ClusterRoleBindingInterface interface { + Create(*v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error) + Update(*v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.ClusterRoleBinding, error) + List(opts v1.ListOptions) (*v1alpha1.ClusterRoleBindingList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) + ClusterRoleBindingExpansion +} + +// clusterRoleBindings implements ClusterRoleBindingInterface +type clusterRoleBindings struct { + client rest.Interface +} + +// newClusterRoleBindings returns a ClusterRoleBindings +func newClusterRoleBindings(c *RbacV1alpha1Client) *clusterRoleBindings { + return &clusterRoleBindings{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Create(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Post(). + Resource("clusterrolebindings"). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Update(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Put(). + Resource("clusterrolebindings"). + Name(clusterRoleBinding.Name). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. +func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. +func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Get(). + Resource("clusterrolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. +func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { + result = &v1alpha1.ClusterRoleBindingList{} + err = c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched clusterRoleBinding. +func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Patch(pt). + Resource("clusterrolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go new file mode 100644 index 0000000000..ba8d10d3b6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go new file mode 100644 index 0000000000..c6548330a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go new file mode 100644 index 0000000000..fed2d4e8c0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go @@ -0,0 +1,110 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" + testing "k8s.io/client-go/testing" +) + +// FakeClusterRoles implements ClusterRoleInterface +type FakeClusterRoles struct { + Fake *FakeRbacV1alpha1 +} + +var clusterrolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "clusterroles"} + +func (c *FakeClusterRoles) Create(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRole), err +} + +func (c *FakeClusterRoles) Update(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRole), err +} + +func (c *FakeClusterRoles) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(clusterrolesResource, name), &v1alpha1.ClusterRole{}) + return err +} + +func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleList{}) + return err +} + +func (c *FakeClusterRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1alpha1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRole), err +} + +func (c *FakeClusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(clusterrolesResource, opts), &v1alpha1.ClusterRoleList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ClusterRoleList{} + for _, item := range obj.(*v1alpha1.ClusterRoleList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *FakeClusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts)) +} + +// Patch applies the patch and returns the patched clusterRole. +func (c *FakeClusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, data, subresources...), &v1alpha1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRole), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go new file mode 100644 index 0000000000..9e577cdf04 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go @@ -0,0 +1,110 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" + testing "k8s.io/client-go/testing" +) + +// FakeClusterRoleBindings implements ClusterRoleBindingInterface +type FakeClusterRoleBindings struct { + Fake *FakeRbacV1alpha1 +} + +var clusterrolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "clusterrolebindings"} + +func (c *FakeClusterRoleBindings) Create(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRoleBinding), err +} + +func (c *FakeClusterRoleBindings) Update(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRoleBinding), err +} + +func (c *FakeClusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(clusterrolebindingsResource, name), &v1alpha1.ClusterRoleBinding{}) + return err +} + +func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleBindingList{}) + return err +} + +func (c *FakeClusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1alpha1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRoleBinding), err +} + +func (c *FakeClusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(clusterrolebindingsResource, opts), &v1alpha1.ClusterRoleBindingList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ClusterRoleBindingList{} + for _, item := range obj.(*v1alpha1.ClusterRoleBindingList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *FakeClusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts)) +} + +// Patch applies the patch and returns the patched clusterRoleBinding. +func (c *FakeClusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, data, subresources...), &v1alpha1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRoleBinding), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go new file mode 100644 index 0000000000..11470696d0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go @@ -0,0 +1,50 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeRbacV1alpha1 struct { + *testing.Fake +} + +func (c *FakeRbacV1alpha1) ClusterRoles() v1alpha1.ClusterRoleInterface { + return &FakeClusterRoles{c} +} + +func (c *FakeRbacV1alpha1) ClusterRoleBindings() v1alpha1.ClusterRoleBindingInterface { + return &FakeClusterRoleBindings{c} +} + +func (c *FakeRbacV1alpha1) Roles(namespace string) v1alpha1.RoleInterface { + return &FakeRoles{c, namespace} +} + +func (c *FakeRbacV1alpha1) RoleBindings(namespace string) v1alpha1.RoleBindingInterface { + return &FakeRoleBindings{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeRbacV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go new file mode 100644 index 0000000000..511b210493 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go @@ -0,0 +1,118 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" + testing "k8s.io/client-go/testing" +) + +// FakeRoles implements RoleInterface +type FakeRoles struct { + Fake *FakeRbacV1alpha1 + ns string +} + +var rolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "roles"} + +func (c *FakeRoles) Create(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1alpha1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Role), err +} + +func (c *FakeRoles) Update(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1alpha1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Role), err +} + +func (c *FakeRoles) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(rolesResource, c.ns, name), &v1alpha1.Role{}) + + return err +} + +func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.RoleList{}) + return err +} + +func (c *FakeRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1alpha1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Role), err +} + +func (c *FakeRoles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(rolesResource, c.ns, opts), &v1alpha1.RoleList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.RoleList{} + for _, item := range obj.(*v1alpha1.RoleList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *FakeRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched role. +func (c *FakeRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, data, subresources...), &v1alpha1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Role), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go new file mode 100644 index 0000000000..85d6c7edb6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go @@ -0,0 +1,118 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" + testing "k8s.io/client-go/testing" +) + +// FakeRoleBindings implements RoleBindingInterface +type FakeRoleBindings struct { + Fake *FakeRbacV1alpha1 + ns string +} + +var rolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "rolebindings"} + +func (c *FakeRoleBindings) Create(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RoleBinding), err +} + +func (c *FakeRoleBindings) Update(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RoleBinding), err +} + +func (c *FakeRoleBindings) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(rolebindingsResource, c.ns, name), &v1alpha1.RoleBinding{}) + + return err +} + +func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.RoleBindingList{}) + return err +} + +func (c *FakeRoleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1alpha1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RoleBinding), err +} + +func (c *FakeRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(rolebindingsResource, c.ns, opts), &v1alpha1.RoleBindingList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.RoleBindingList{} + for _, item := range obj.(*v1alpha1.RoleBindingList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *FakeRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched roleBinding. +func (c *FakeRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, data, subresources...), &v1alpha1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RoleBinding), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go new file mode 100644 index 0000000000..f506fc3468 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +type ClusterRoleExpansion interface{} + +type ClusterRoleBindingExpansion interface{} + +type RoleExpansion interface{} + +type RoleBindingExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go new file mode 100644 index 0000000000..67256d50f5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go @@ -0,0 +1,103 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" + rest "k8s.io/client-go/rest" +) + +type RbacV1alpha1Interface interface { + RESTClient() rest.Interface + ClusterRolesGetter + ClusterRoleBindingsGetter + RolesGetter + RoleBindingsGetter +} + +// RbacV1alpha1Client is used to interact with features provided by the rbac.authorization.k8s.io group. +type RbacV1alpha1Client struct { + restClient rest.Interface +} + +func (c *RbacV1alpha1Client) ClusterRoles() ClusterRoleInterface { + return newClusterRoles(c) +} + +func (c *RbacV1alpha1Client) ClusterRoleBindings() ClusterRoleBindingInterface { + return newClusterRoleBindings(c) +} + +func (c *RbacV1alpha1Client) Roles(namespace string) RoleInterface { + return newRoles(c, namespace) +} + +func (c *RbacV1alpha1Client) RoleBindings(namespace string) RoleBindingInterface { + return newRoleBindings(c, namespace) +} + +// NewForConfig creates a new RbacV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*RbacV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &RbacV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new RbacV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *RbacV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new RbacV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *RbacV1alpha1Client { + return &RbacV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *RbacV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go new file mode 100644 index 0000000000..9c4fa915eb --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" + rest "k8s.io/client-go/rest" +) + +// RolesGetter has a method to return a RoleInterface. +// A group's client should implement this interface. +type RolesGetter interface { + Roles(namespace string) RoleInterface +} + +// RoleInterface has methods to work with Role resources. +type RoleInterface interface { + Create(*v1alpha1.Role) (*v1alpha1.Role, error) + Update(*v1alpha1.Role) (*v1alpha1.Role, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.Role, error) + List(opts v1.ListOptions) (*v1alpha1.RoleList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) + RoleExpansion +} + +// roles implements RoleInterface +type roles struct { + client rest.Interface + ns string +} + +// newRoles returns a Roles +func newRoles(c *RbacV1alpha1Client, namespace string) *roles { + return &roles{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Create(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Post(). + Namespace(c.ns). + Resource("roles"). + Body(role). + Do(). + Into(result) + return +} + +// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Update(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Put(). + Namespace(c.ns). + Resource("roles"). + Name(role.Name). + Body(role). + Do(). + Into(result) + return +} + +// Delete takes name of the role and deletes it. Returns an error if one occurs. +func (c *roles) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the role, and returns the corresponding role object, and an error if there is any. +func (c *roles) Get(name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Roles that match those selectors. +func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { + result = &v1alpha1.RoleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched role. +func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("roles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go new file mode 100644 index 0000000000..5e81967f17 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" + rest "k8s.io/client-go/rest" +) + +// RoleBindingsGetter has a method to return a RoleBindingInterface. +// A group's client should implement this interface. +type RoleBindingsGetter interface { + RoleBindings(namespace string) RoleBindingInterface +} + +// RoleBindingInterface has methods to work with RoleBinding resources. +type RoleBindingInterface interface { + Create(*v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error) + Update(*v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.RoleBinding, error) + List(opts v1.ListOptions) (*v1alpha1.RoleBindingList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) + RoleBindingExpansion +} + +// roleBindings implements RoleBindingInterface +type roleBindings struct { + client rest.Interface + ns string +} + +// newRoleBindings returns a RoleBindings +func newRoleBindings(c *RbacV1alpha1Client, namespace string) *roleBindings { + return &roleBindings{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Create(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Post(). + Namespace(c.ns). + Resource("rolebindings"). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Update(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("rolebindings"). + Name(roleBinding.Name). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. +func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. +func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. +func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { + result = &v1alpha1.RoleBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched roleBinding. +func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("rolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go new file mode 100644 index 0000000000..6a63571f39 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" + rest "k8s.io/client-go/rest" +) + +// ClusterRolesGetter has a method to return a ClusterRoleInterface. +// A group's client should implement this interface. +type ClusterRolesGetter interface { + ClusterRoles() ClusterRoleInterface +} + +// ClusterRoleInterface has methods to work with ClusterRole resources. +type ClusterRoleInterface interface { + Create(*v1beta1.ClusterRole) (*v1beta1.ClusterRole, error) + Update(*v1beta1.ClusterRole) (*v1beta1.ClusterRole, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ClusterRole, error) + List(opts v1.ListOptions) (*v1beta1.ClusterRoleList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) + ClusterRoleExpansion +} + +// clusterRoles implements ClusterRoleInterface +type clusterRoles struct { + client rest.Interface +} + +// newClusterRoles returns a ClusterRoles +func newClusterRoles(c *RbacV1beta1Client) *clusterRoles { + return &clusterRoles{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Create(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { + result = &v1beta1.ClusterRole{} + err = c.client.Post(). + Resource("clusterroles"). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Update(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { + result = &v1beta1.ClusterRole{} + err = c.client.Put(). + Resource("clusterroles"). + Name(clusterRole.Name). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. +func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. +func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { + result = &v1beta1.ClusterRole{} + err = c.client.Get(). + Resource("clusterroles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. +func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { + result = &v1beta1.ClusterRoleList{} + err = c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched clusterRole. +func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) { + result = &v1beta1.ClusterRole{} + err = c.client.Patch(pt). + Resource("clusterroles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go new file mode 100644 index 0000000000..a2f3875096 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" + rest "k8s.io/client-go/rest" +) + +// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. +// A group's client should implement this interface. +type ClusterRoleBindingsGetter interface { + ClusterRoleBindings() ClusterRoleBindingInterface +} + +// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. +type ClusterRoleBindingInterface interface { + Create(*v1beta1.ClusterRoleBinding) (*v1beta1.ClusterRoleBinding, error) + Update(*v1beta1.ClusterRoleBinding) (*v1beta1.ClusterRoleBinding, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ClusterRoleBinding, error) + List(opts v1.ListOptions) (*v1beta1.ClusterRoleBindingList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) + ClusterRoleBindingExpansion +} + +// clusterRoleBindings implements ClusterRoleBindingInterface +type clusterRoleBindings struct { + client rest.Interface +} + +// newClusterRoleBindings returns a ClusterRoleBindings +func newClusterRoleBindings(c *RbacV1beta1Client) *clusterRoleBindings { + return &clusterRoleBindings{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Create(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { + result = &v1beta1.ClusterRoleBinding{} + err = c.client.Post(). + Resource("clusterrolebindings"). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Update(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { + result = &v1beta1.ClusterRoleBinding{} + err = c.client.Put(). + Resource("clusterrolebindings"). + Name(clusterRoleBinding.Name). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. +func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. +func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { + result = &v1beta1.ClusterRoleBinding{} + err = c.client.Get(). + Resource("clusterrolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. +func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { + result = &v1beta1.ClusterRoleBindingList{} + err = c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched clusterRoleBinding. +func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { + result = &v1beta1.ClusterRoleBinding{} + err = c.client.Patch(pt). + Resource("clusterrolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go new file mode 100644 index 0000000000..11b5238972 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go new file mode 100644 index 0000000000..c6548330a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go new file mode 100644 index 0000000000..25e04d15a1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go @@ -0,0 +1,110 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeClusterRoles implements ClusterRoleInterface +type FakeClusterRoles struct { + Fake *FakeRbacV1beta1 +} + +var clusterrolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "clusterroles"} + +func (c *FakeClusterRoles) Create(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ClusterRole), err +} + +func (c *FakeClusterRoles) Update(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ClusterRole), err +} + +func (c *FakeClusterRoles) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(clusterrolesResource, name), &v1beta1.ClusterRole{}) + return err +} + +func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleList{}) + return err +} + +func (c *FakeClusterRoles) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1beta1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ClusterRole), err +} + +func (c *FakeClusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(clusterrolesResource, opts), &v1beta1.ClusterRoleList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.ClusterRoleList{} + for _, item := range obj.(*v1beta1.ClusterRoleList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *FakeClusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts)) +} + +// Patch applies the patch and returns the patched clusterRole. +func (c *FakeClusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, data, subresources...), &v1beta1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ClusterRole), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go new file mode 100644 index 0000000000..38fb39c89d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go @@ -0,0 +1,110 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeClusterRoleBindings implements ClusterRoleBindingInterface +type FakeClusterRoleBindings struct { + Fake *FakeRbacV1beta1 +} + +var clusterrolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "clusterrolebindings"} + +func (c *FakeClusterRoleBindings) Create(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ClusterRoleBinding), err +} + +func (c *FakeClusterRoleBindings) Update(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ClusterRoleBinding), err +} + +func (c *FakeClusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(clusterrolebindingsResource, name), &v1beta1.ClusterRoleBinding{}) + return err +} + +func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleBindingList{}) + return err +} + +func (c *FakeClusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1beta1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ClusterRoleBinding), err +} + +func (c *FakeClusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(clusterrolebindingsResource, opts), &v1beta1.ClusterRoleBindingList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.ClusterRoleBindingList{} + for _, item := range obj.(*v1beta1.ClusterRoleBindingList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *FakeClusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts)) +} + +// Patch applies the patch and returns the patched clusterRoleBinding. +func (c *FakeClusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, data, subresources...), &v1beta1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ClusterRoleBinding), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go new file mode 100644 index 0000000000..929d035293 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go @@ -0,0 +1,50 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeRbacV1beta1 struct { + *testing.Fake +} + +func (c *FakeRbacV1beta1) ClusterRoles() v1beta1.ClusterRoleInterface { + return &FakeClusterRoles{c} +} + +func (c *FakeRbacV1beta1) ClusterRoleBindings() v1beta1.ClusterRoleBindingInterface { + return &FakeClusterRoleBindings{c} +} + +func (c *FakeRbacV1beta1) Roles(namespace string) v1beta1.RoleInterface { + return &FakeRoles{c, namespace} +} + +func (c *FakeRbacV1beta1) RoleBindings(namespace string) v1beta1.RoleBindingInterface { + return &FakeRoleBindings{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeRbacV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go new file mode 100644 index 0000000000..fd7637613c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go @@ -0,0 +1,118 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeRoles implements RoleInterface +type FakeRoles struct { + Fake *FakeRbacV1beta1 + ns string +} + +var rolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "roles"} + +func (c *FakeRoles) Create(role *v1beta1.Role) (result *v1beta1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1beta1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Role), err +} + +func (c *FakeRoles) Update(role *v1beta1.Role) (result *v1beta1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1beta1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Role), err +} + +func (c *FakeRoles) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(rolesResource, c.ns, name), &v1beta1.Role{}) + + return err +} + +func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.RoleList{}) + return err +} + +func (c *FakeRoles) Get(name string, options v1.GetOptions) (result *v1beta1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1beta1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Role), err +} + +func (c *FakeRoles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(rolesResource, c.ns, opts), &v1beta1.RoleList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.RoleList{} + for _, item := range obj.(*v1beta1.RoleList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *FakeRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched role. +func (c *FakeRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, data, subresources...), &v1beta1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Role), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go new file mode 100644 index 0000000000..5e55996fbb --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go @@ -0,0 +1,118 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeRoleBindings implements RoleBindingInterface +type FakeRoleBindings struct { + Fake *FakeRbacV1beta1 + ns string +} + +var rolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "rolebindings"} + +func (c *FakeRoleBindings) Create(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.RoleBinding), err +} + +func (c *FakeRoleBindings) Update(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.RoleBinding), err +} + +func (c *FakeRoleBindings) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(rolebindingsResource, c.ns, name), &v1beta1.RoleBinding{}) + + return err +} + +func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.RoleBindingList{}) + return err +} + +func (c *FakeRoleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1beta1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.RoleBinding), err +} + +func (c *FakeRoleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(rolebindingsResource, c.ns, opts), &v1beta1.RoleBindingList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.RoleBindingList{} + for _, item := range obj.(*v1beta1.RoleBindingList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *FakeRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched roleBinding. +func (c *FakeRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, data, subresources...), &v1beta1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.RoleBinding), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..d7f80c0042 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type ClusterRoleExpansion interface{} + +type ClusterRoleBindingExpansion interface{} + +type RoleExpansion interface{} + +type RoleBindingExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go new file mode 100644 index 0000000000..a24ef4f9ab --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go @@ -0,0 +1,103 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" + rest "k8s.io/client-go/rest" +) + +type RbacV1beta1Interface interface { + RESTClient() rest.Interface + ClusterRolesGetter + ClusterRoleBindingsGetter + RolesGetter + RoleBindingsGetter +} + +// RbacV1beta1Client is used to interact with features provided by the rbac.authorization.k8s.io group. +type RbacV1beta1Client struct { + restClient rest.Interface +} + +func (c *RbacV1beta1Client) ClusterRoles() ClusterRoleInterface { + return newClusterRoles(c) +} + +func (c *RbacV1beta1Client) ClusterRoleBindings() ClusterRoleBindingInterface { + return newClusterRoleBindings(c) +} + +func (c *RbacV1beta1Client) Roles(namespace string) RoleInterface { + return newRoles(c, namespace) +} + +func (c *RbacV1beta1Client) RoleBindings(namespace string) RoleBindingInterface { + return newRoleBindings(c, namespace) +} + +// NewForConfig creates a new RbacV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*RbacV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &RbacV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new RbacV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *RbacV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new RbacV1beta1Client for the given RESTClient. +func New(c rest.Interface) *RbacV1beta1Client { + return &RbacV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *RbacV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go new file mode 100644 index 0000000000..aa57ac3196 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" + rest "k8s.io/client-go/rest" +) + +// RolesGetter has a method to return a RoleInterface. +// A group's client should implement this interface. +type RolesGetter interface { + Roles(namespace string) RoleInterface +} + +// RoleInterface has methods to work with Role resources. +type RoleInterface interface { + Create(*v1beta1.Role) (*v1beta1.Role, error) + Update(*v1beta1.Role) (*v1beta1.Role, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Role, error) + List(opts v1.ListOptions) (*v1beta1.RoleList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) + RoleExpansion +} + +// roles implements RoleInterface +type roles struct { + client rest.Interface + ns string +} + +// newRoles returns a Roles +func newRoles(c *RbacV1beta1Client, namespace string) *roles { + return &roles{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Create(role *v1beta1.Role) (result *v1beta1.Role, err error) { + result = &v1beta1.Role{} + err = c.client.Post(). + Namespace(c.ns). + Resource("roles"). + Body(role). + Do(). + Into(result) + return +} + +// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Update(role *v1beta1.Role) (result *v1beta1.Role, err error) { + result = &v1beta1.Role{} + err = c.client.Put(). + Namespace(c.ns). + Resource("roles"). + Name(role.Name). + Body(role). + Do(). + Into(result) + return +} + +// Delete takes name of the role and deletes it. Returns an error if one occurs. +func (c *roles) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the role, and returns the corresponding role object, and an error if there is any. +func (c *roles) Get(name string, options v1.GetOptions) (result *v1beta1.Role, err error) { + result = &v1beta1.Role{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Roles that match those selectors. +func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) { + result = &v1beta1.RoleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched role. +func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) { + result = &v1beta1.Role{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("roles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go new file mode 100644 index 0000000000..3dee9107e1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" + rest "k8s.io/client-go/rest" +) + +// RoleBindingsGetter has a method to return a RoleBindingInterface. +// A group's client should implement this interface. +type RoleBindingsGetter interface { + RoleBindings(namespace string) RoleBindingInterface +} + +// RoleBindingInterface has methods to work with RoleBinding resources. +type RoleBindingInterface interface { + Create(*v1beta1.RoleBinding) (*v1beta1.RoleBinding, error) + Update(*v1beta1.RoleBinding) (*v1beta1.RoleBinding, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.RoleBinding, error) + List(opts v1.ListOptions) (*v1beta1.RoleBindingList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) + RoleBindingExpansion +} + +// roleBindings implements RoleBindingInterface +type roleBindings struct { + client rest.Interface + ns string +} + +// newRoleBindings returns a RoleBindings +func newRoleBindings(c *RbacV1beta1Client, namespace string) *roleBindings { + return &roleBindings{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Create(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { + result = &v1beta1.RoleBinding{} + err = c.client.Post(). + Namespace(c.ns). + Resource("rolebindings"). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Update(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { + result = &v1beta1.RoleBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("rolebindings"). + Name(roleBinding.Name). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. +func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. +func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { + result = &v1beta1.RoleBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. +func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { + result = &v1beta1.RoleBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched roleBinding. +func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) { + result = &v1beta1.RoleBinding{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("rolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go new file mode 100644 index 0000000000..ba8d10d3b6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go new file mode 100644 index 0000000000..c6548330a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go new file mode 100644 index 0000000000..aa2fd101b4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go @@ -0,0 +1,118 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1alpha1 "k8s.io/client-go/pkg/apis/settings/v1alpha1" + testing "k8s.io/client-go/testing" +) + +// FakePodPresets implements PodPresetInterface +type FakePodPresets struct { + Fake *FakeSettingsV1alpha1 + ns string +} + +var podpresetsResource = schema.GroupVersionResource{Group: "settings.k8s.io", Version: "v1alpha1", Resource: "podpresets"} + +func (c *FakePodPresets) Create(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(podpresetsResource, c.ns, podPreset), &v1alpha1.PodPreset{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodPreset), err +} + +func (c *FakePodPresets) Update(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(podpresetsResource, c.ns, podPreset), &v1alpha1.PodPreset{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodPreset), err +} + +func (c *FakePodPresets) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(podpresetsResource, c.ns, name), &v1alpha1.PodPreset{}) + + return err +} + +func (c *FakePodPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(podpresetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.PodPresetList{}) + return err +} + +func (c *FakePodPresets) Get(name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(podpresetsResource, c.ns, name), &v1alpha1.PodPreset{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodPreset), err +} + +func (c *FakePodPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(podpresetsResource, c.ns, opts), &v1alpha1.PodPresetList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.PodPresetList{} + for _, item := range obj.(*v1alpha1.PodPresetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested podPresets. +func (c *FakePodPresets) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(podpresetsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched podPreset. +func (c *FakePodPresets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(podpresetsResource, c.ns, name, data, subresources...), &v1alpha1.PodPreset{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodPreset), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go new file mode 100644 index 0000000000..6feb9b218f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go @@ -0,0 +1,38 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeSettingsV1alpha1 struct { + *testing.Fake +} + +func (c *FakeSettingsV1alpha1) PodPresets(namespace string) v1alpha1.PodPresetInterface { + return &FakePodPresets{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeSettingsV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go new file mode 100644 index 0000000000..d599b2935c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +type PodPresetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go new file mode 100644 index 0000000000..b8fc0a022a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1alpha1 "k8s.io/client-go/pkg/apis/settings/v1alpha1" + rest "k8s.io/client-go/rest" +) + +// PodPresetsGetter has a method to return a PodPresetInterface. +// A group's client should implement this interface. +type PodPresetsGetter interface { + PodPresets(namespace string) PodPresetInterface +} + +// PodPresetInterface has methods to work with PodPreset resources. +type PodPresetInterface interface { + Create(*v1alpha1.PodPreset) (*v1alpha1.PodPreset, error) + Update(*v1alpha1.PodPreset) (*v1alpha1.PodPreset, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.PodPreset, error) + List(opts v1.ListOptions) (*v1alpha1.PodPresetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) + PodPresetExpansion +} + +// podPresets implements PodPresetInterface +type podPresets struct { + client rest.Interface + ns string +} + +// newPodPresets returns a PodPresets +func newPodPresets(c *SettingsV1alpha1Client, namespace string) *podPresets { + return &podPresets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a podPreset and creates it. Returns the server's representation of the podPreset, and an error, if there is any. +func (c *podPresets) Create(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { + result = &v1alpha1.PodPreset{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podpresets"). + Body(podPreset). + Do(). + Into(result) + return +} + +// Update takes the representation of a podPreset and updates it. Returns the server's representation of the podPreset, and an error, if there is any. +func (c *podPresets) Update(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { + result = &v1alpha1.PodPreset{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podpresets"). + Name(podPreset.Name). + Body(podPreset). + Do(). + Into(result) + return +} + +// Delete takes name of the podPreset and deletes it. Returns an error if one occurs. +func (c *podPresets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podpresets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podpresets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the podPreset, and returns the corresponding podPreset object, and an error if there is any. +func (c *podPresets) Get(name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) { + result = &v1alpha1.PodPreset{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podpresets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodPresets that match those selectors. +func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { + result = &v1alpha1.PodPresetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podpresets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podPresets. +func (c *podPresets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podpresets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched podPreset. +func (c *podPresets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) { + result = &v1alpha1.PodPreset{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podpresets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go new file mode 100644 index 0000000000..1a90034bcd --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1alpha1 "k8s.io/client-go/pkg/apis/settings/v1alpha1" + rest "k8s.io/client-go/rest" +) + +type SettingsV1alpha1Interface interface { + RESTClient() rest.Interface + PodPresetsGetter +} + +// SettingsV1alpha1Client is used to interact with features provided by the settings.k8s.io group. +type SettingsV1alpha1Client struct { + restClient rest.Interface +} + +func (c *SettingsV1alpha1Client) PodPresets(namespace string) PodPresetInterface { + return newPodPresets(c, namespace) +} + +// NewForConfig creates a new SettingsV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*SettingsV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &SettingsV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new SettingsV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SettingsV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SettingsV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *SettingsV1alpha1Client { + return &SettingsV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SettingsV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go new file mode 100644 index 0000000000..54673bfa73 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go new file mode 100644 index 0000000000..c6548330a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go new file mode 100644 index 0000000000..97972f0f74 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go @@ -0,0 +1,38 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/client-go/kubernetes/typed/storage/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeStorageV1 struct { + *testing.Fake +} + +func (c *FakeStorageV1) StorageClasses() v1.StorageClassInterface { + return &FakeStorageClasses{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeStorageV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go new file mode 100644 index 0000000000..3f3a7c0770 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go @@ -0,0 +1,110 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/pkg/apis/storage/v1" + testing "k8s.io/client-go/testing" +) + +// FakeStorageClasses implements StorageClassInterface +type FakeStorageClasses struct { + Fake *FakeStorageV1 +} + +var storageclassesResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1", Resource: "storageclasses"} + +func (c *FakeStorageClasses) Create(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &v1.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1.StorageClass), err +} + +func (c *FakeStorageClasses) Update(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &v1.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1.StorageClass), err +} + +func (c *FakeStorageClasses) Delete(name string, options *meta_v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(storageclassesResource, name), &v1.StorageClass{}) + return err +} + +func (c *FakeStorageClasses) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1.StorageClassList{}) + return err +} + +func (c *FakeStorageClasses) Get(name string, options meta_v1.GetOptions) (result *v1.StorageClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(storageclassesResource, name), &v1.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1.StorageClass), err +} + +func (c *FakeStorageClasses) List(opts meta_v1.ListOptions) (result *v1.StorageClassList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(storageclassesResource, opts), &v1.StorageClassList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.StorageClassList{} + for _, item := range obj.(*v1.StorageClassList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested storageClasses. +func (c *FakeStorageClasses) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(storageclassesResource, opts)) +} + +// Patch applies the patch and returns the patched storageClass. +func (c *FakeStorageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, data, subresources...), &v1.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1.StorageClass), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go new file mode 100644 index 0000000000..39df9fb879 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +type StorageClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go new file mode 100644 index 0000000000..a95d30b2d7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/apis/storage/v1" + rest "k8s.io/client-go/rest" +) + +type StorageV1Interface interface { + RESTClient() rest.Interface + StorageClassesGetter +} + +// StorageV1Client is used to interact with features provided by the storage.k8s.io group. +type StorageV1Client struct { + restClient rest.Interface +} + +func (c *StorageV1Client) StorageClasses() StorageClassInterface { + return newStorageClasses(c) +} + +// NewForConfig creates a new StorageV1Client for the given config. +func NewForConfig(c *rest.Config) (*StorageV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &StorageV1Client{client}, nil +} + +// NewForConfigOrDie creates a new StorageV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *StorageV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new StorageV1Client for the given RESTClient. +func New(c rest.Interface) *StorageV1Client { + return &StorageV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *StorageV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go new file mode 100644 index 0000000000..0283c99709 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/apis/storage/v1" + rest "k8s.io/client-go/rest" +) + +// StorageClassesGetter has a method to return a StorageClassInterface. +// A group's client should implement this interface. +type StorageClassesGetter interface { + StorageClasses() StorageClassInterface +} + +// StorageClassInterface has methods to work with StorageClass resources. +type StorageClassInterface interface { + Create(*v1.StorageClass) (*v1.StorageClass, error) + Update(*v1.StorageClass) (*v1.StorageClass, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.StorageClass, error) + List(opts meta_v1.ListOptions) (*v1.StorageClassList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error) + StorageClassExpansion +} + +// storageClasses implements StorageClassInterface +type storageClasses struct { + client rest.Interface +} + +// newStorageClasses returns a StorageClasses +func newStorageClasses(c *StorageV1Client) *storageClasses { + return &storageClasses{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Create(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) { + result = &v1.StorageClass{} + err = c.client.Post(). + Resource("storageclasses"). + Body(storageClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Update(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) { + result = &v1.StorageClass{} + err = c.client.Put(). + Resource("storageclasses"). + Name(storageClass.Name). + Body(storageClass). + Do(). + Into(result) + return +} + +// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. +func (c *storageClasses) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("storageclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *storageClasses) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Resource("storageclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. +func (c *storageClasses) Get(name string, options meta_v1.GetOptions) (result *v1.StorageClass, err error) { + result = &v1.StorageClass{} + err = c.client.Get(). + Resource("storageclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. +func (c *storageClasses) List(opts meta_v1.ListOptions) (result *v1.StorageClassList, err error) { + result = &v1.StorageClassList{} + err = c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storageClasses. +func (c *storageClasses) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched storageClass. +func (c *storageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error) { + result = &v1.StorageClass{} + err = c.client.Patch(pt). + Resource("storageclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go new file mode 100644 index 0000000000..11b5238972 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go new file mode 100644 index 0000000000..c6548330a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go new file mode 100644 index 0000000000..a178091d98 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go @@ -0,0 +1,38 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeStorageV1beta1 struct { + *testing.Fake +} + +func (c *FakeStorageV1beta1) StorageClasses() v1beta1.StorageClassInterface { + return &FakeStorageClasses{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeStorageV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go new file mode 100644 index 0000000000..b8c937d462 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go @@ -0,0 +1,110 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1beta1 "k8s.io/client-go/pkg/apis/storage/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeStorageClasses implements StorageClassInterface +type FakeStorageClasses struct { + Fake *FakeStorageV1beta1 +} + +var storageclassesResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1beta1", Resource: "storageclasses"} + +func (c *FakeStorageClasses) Create(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.StorageClass), err +} + +func (c *FakeStorageClasses) Update(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.StorageClass), err +} + +func (c *FakeStorageClasses) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(storageclassesResource, name), &v1beta1.StorageClass{}) + return err +} + +func (c *FakeStorageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.StorageClassList{}) + return err +} + +func (c *FakeStorageClasses) Get(name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(storageclassesResource, name), &v1beta1.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.StorageClass), err +} + +func (c *FakeStorageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(storageclassesResource, opts), &v1beta1.StorageClassList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.StorageClassList{} + for _, item := range obj.(*v1beta1.StorageClassList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested storageClasses. +func (c *FakeStorageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(storageclassesResource, opts)) +} + +// Patch applies the patch and returns the patched storageClass. +func (c *FakeStorageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, data, subresources...), &v1beta1.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.StorageClass), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..6f3f0c55e6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type StorageClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go new file mode 100644 index 0000000000..7abb99e573 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/storage/v1beta1" + rest "k8s.io/client-go/rest" +) + +type StorageV1beta1Interface interface { + RESTClient() rest.Interface + StorageClassesGetter +} + +// StorageV1beta1Client is used to interact with features provided by the storage.k8s.io group. +type StorageV1beta1Client struct { + restClient rest.Interface +} + +func (c *StorageV1beta1Client) StorageClasses() StorageClassInterface { + return newStorageClasses(c) +} + +// NewForConfig creates a new StorageV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*StorageV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &StorageV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new StorageV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *StorageV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new StorageV1beta1Client for the given RESTClient. +func New(c rest.Interface) *StorageV1beta1Client { + return &StorageV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *StorageV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go new file mode 100644 index 0000000000..6980fc78bf --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/storage/v1beta1" + rest "k8s.io/client-go/rest" +) + +// StorageClassesGetter has a method to return a StorageClassInterface. +// A group's client should implement this interface. +type StorageClassesGetter interface { + StorageClasses() StorageClassInterface +} + +// StorageClassInterface has methods to work with StorageClass resources. +type StorageClassInterface interface { + Create(*v1beta1.StorageClass) (*v1beta1.StorageClass, error) + Update(*v1beta1.StorageClass) (*v1beta1.StorageClass, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.StorageClass, error) + List(opts v1.ListOptions) (*v1beta1.StorageClassList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) + StorageClassExpansion +} + +// storageClasses implements StorageClassInterface +type storageClasses struct { + client rest.Interface +} + +// newStorageClasses returns a StorageClasses +func newStorageClasses(c *StorageV1beta1Client) *storageClasses { + return &storageClasses{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Create(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Post(). + Resource("storageclasses"). + Body(storageClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Update(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Put(). + Resource("storageclasses"). + Name(storageClass.Name). + Body(storageClass). + Do(). + Into(result) + return +} + +// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. +func (c *storageClasses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("storageclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *storageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("storageclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. +func (c *storageClasses) Get(name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Get(). + Resource("storageclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. +func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { + result = &v1beta1.StorageClassList{} + err = c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storageClasses. +func (c *storageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched storageClass. +func (c *storageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Patch(pt). + Resource("storageclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go b/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go new file mode 100644 index 0000000000..a40deded29 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go @@ -0,0 +1,68 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + api "k8s.io/client-go/pkg/api" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" +) + +// ComponentStatusLister helps list ComponentStatuses. +type ComponentStatusLister interface { + // List lists all ComponentStatuses in the indexer. + List(selector labels.Selector) (ret []*v1.ComponentStatus, err error) + // Get retrieves the ComponentStatus from the index for a given name. + Get(name string) (*v1.ComponentStatus, error) + ComponentStatusListerExpansion +} + +// componentStatusLister implements the ComponentStatusLister interface. +type componentStatusLister struct { + indexer cache.Indexer +} + +// NewComponentStatusLister returns a new ComponentStatusLister. +func NewComponentStatusLister(indexer cache.Indexer) ComponentStatusLister { + return &componentStatusLister{indexer: indexer} +} + +// List lists all ComponentStatuses in the indexer. +func (s *componentStatusLister) List(selector labels.Selector) (ret []*v1.ComponentStatus, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ComponentStatus)) + }) + return ret, err +} + +// Get retrieves the ComponentStatus from the index for a given name. +func (s *componentStatusLister) Get(name string) (*v1.ComponentStatus, error) { + key := &v1.ComponentStatus{ObjectMeta: meta_v1.ObjectMeta{Name: name}} + obj, exists, err := s.indexer.Get(key) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(api.Resource("componentstatus"), name) + } + return obj.(*v1.ComponentStatus), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/configmap.go b/vendor/k8s.io/client-go/listers/core/v1/configmap.go new file mode 100644 index 0000000000..32124dedbc --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/configmap.go @@ -0,0 +1,95 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + api "k8s.io/client-go/pkg/api" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" +) + +// ConfigMapLister helps list ConfigMaps. +type ConfigMapLister interface { + // List lists all ConfigMaps in the indexer. + List(selector labels.Selector) (ret []*v1.ConfigMap, err error) + // ConfigMaps returns an object that can list and get ConfigMaps. + ConfigMaps(namespace string) ConfigMapNamespaceLister + ConfigMapListerExpansion +} + +// configMapLister implements the ConfigMapLister interface. +type configMapLister struct { + indexer cache.Indexer +} + +// NewConfigMapLister returns a new ConfigMapLister. +func NewConfigMapLister(indexer cache.Indexer) ConfigMapLister { + return &configMapLister{indexer: indexer} +} + +// List lists all ConfigMaps in the indexer. +func (s *configMapLister) List(selector labels.Selector) (ret []*v1.ConfigMap, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ConfigMap)) + }) + return ret, err +} + +// ConfigMaps returns an object that can list and get ConfigMaps. +func (s *configMapLister) ConfigMaps(namespace string) ConfigMapNamespaceLister { + return configMapNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ConfigMapNamespaceLister helps list and get ConfigMaps. +type ConfigMapNamespaceLister interface { + // List lists all ConfigMaps in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ConfigMap, err error) + // Get retrieves the ConfigMap from the indexer for a given namespace and name. + Get(name string) (*v1.ConfigMap, error) + ConfigMapNamespaceListerExpansion +} + +// configMapNamespaceLister implements the ConfigMapNamespaceLister +// interface. +type configMapNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ConfigMaps in the indexer for a given namespace. +func (s configMapNamespaceLister) List(selector labels.Selector) (ret []*v1.ConfigMap, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ConfigMap)) + }) + return ret, err +} + +// Get retrieves the ConfigMap from the indexer for a given namespace and name. +func (s configMapNamespaceLister) Get(name string) (*v1.ConfigMap, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(api.Resource("configmap"), name) + } + return obj.(*v1.ConfigMap), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/endpoints.go b/vendor/k8s.io/client-go/listers/core/v1/endpoints.go new file mode 100644 index 0000000000..0092e135b4 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/endpoints.go @@ -0,0 +1,95 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + api "k8s.io/client-go/pkg/api" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" +) + +// EndpointsLister helps list Endpoints. +type EndpointsLister interface { + // List lists all Endpoints in the indexer. + List(selector labels.Selector) (ret []*v1.Endpoints, err error) + // Endpoints returns an object that can list and get Endpoints. + Endpoints(namespace string) EndpointsNamespaceLister + EndpointsListerExpansion +} + +// endpointsLister implements the EndpointsLister interface. +type endpointsLister struct { + indexer cache.Indexer +} + +// NewEndpointsLister returns a new EndpointsLister. +func NewEndpointsLister(indexer cache.Indexer) EndpointsLister { + return &endpointsLister{indexer: indexer} +} + +// List lists all Endpoints in the indexer. +func (s *endpointsLister) List(selector labels.Selector) (ret []*v1.Endpoints, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Endpoints)) + }) + return ret, err +} + +// Endpoints returns an object that can list and get Endpoints. +func (s *endpointsLister) Endpoints(namespace string) EndpointsNamespaceLister { + return endpointsNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EndpointsNamespaceLister helps list and get Endpoints. +type EndpointsNamespaceLister interface { + // List lists all Endpoints in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Endpoints, err error) + // Get retrieves the Endpoints from the indexer for a given namespace and name. + Get(name string) (*v1.Endpoints, error) + EndpointsNamespaceListerExpansion +} + +// endpointsNamespaceLister implements the EndpointsNamespaceLister +// interface. +type endpointsNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Endpoints in the indexer for a given namespace. +func (s endpointsNamespaceLister) List(selector labels.Selector) (ret []*v1.Endpoints, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Endpoints)) + }) + return ret, err +} + +// Get retrieves the Endpoints from the indexer for a given namespace and name. +func (s endpointsNamespaceLister) Get(name string) (*v1.Endpoints, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(api.Resource("endpoints"), name) + } + return obj.(*v1.Endpoints), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/event.go b/vendor/k8s.io/client-go/listers/core/v1/event.go new file mode 100644 index 0000000000..f30e033a90 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/event.go @@ -0,0 +1,95 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + api "k8s.io/client-go/pkg/api" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" +) + +// EventLister helps list Events. +type EventLister interface { + // List lists all Events in the indexer. + List(selector labels.Selector) (ret []*v1.Event, err error) + // Events returns an object that can list and get Events. + Events(namespace string) EventNamespaceLister + EventListerExpansion +} + +// eventLister implements the EventLister interface. +type eventLister struct { + indexer cache.Indexer +} + +// NewEventLister returns a new EventLister. +func NewEventLister(indexer cache.Indexer) EventLister { + return &eventLister{indexer: indexer} +} + +// List lists all Events in the indexer. +func (s *eventLister) List(selector labels.Selector) (ret []*v1.Event, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Event)) + }) + return ret, err +} + +// Events returns an object that can list and get Events. +func (s *eventLister) Events(namespace string) EventNamespaceLister { + return eventNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EventNamespaceLister helps list and get Events. +type EventNamespaceLister interface { + // List lists all Events in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Event, err error) + // Get retrieves the Event from the indexer for a given namespace and name. + Get(name string) (*v1.Event, error) + EventNamespaceListerExpansion +} + +// eventNamespaceLister implements the EventNamespaceLister +// interface. +type eventNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Events in the indexer for a given namespace. +func (s eventNamespaceLister) List(selector labels.Selector) (ret []*v1.Event, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Event)) + }) + return ret, err +} + +// Get retrieves the Event from the indexer for a given namespace and name. +func (s eventNamespaceLister) Get(name string) (*v1.Event, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(api.Resource("event"), name) + } + return obj.(*v1.Event), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/core/v1/expansion_generated.go new file mode 100644 index 0000000000..9b7fad0a56 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/expansion_generated.go @@ -0,0 +1,111 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +// ComponentStatusListerExpansion allows custom methods to be added to +// ComponentStatusLister. +type ComponentStatusListerExpansion interface{} + +// ConfigMapListerExpansion allows custom methods to be added to +// ConfigMapLister. +type ConfigMapListerExpansion interface{} + +// ConfigMapNamespaceListerExpansion allows custom methods to be added to +// ConfigMapNamespaeLister. +type ConfigMapNamespaceListerExpansion interface{} + +// EndpointsListerExpansion allows custom methods to be added to +// EndpointsLister. +type EndpointsListerExpansion interface{} + +// EndpointsNamespaceListerExpansion allows custom methods to be added to +// EndpointsNamespaeLister. +type EndpointsNamespaceListerExpansion interface{} + +// EventListerExpansion allows custom methods to be added to +// EventLister. +type EventListerExpansion interface{} + +// EventNamespaceListerExpansion allows custom methods to be added to +// EventNamespaeLister. +type EventNamespaceListerExpansion interface{} + +// LimitRangeListerExpansion allows custom methods to be added to +// LimitRangeLister. +type LimitRangeListerExpansion interface{} + +// LimitRangeNamespaceListerExpansion allows custom methods to be added to +// LimitRangeNamespaeLister. +type LimitRangeNamespaceListerExpansion interface{} + +// NamespaceListerExpansion allows custom methods to be added to +// NamespaceLister. +type NamespaceListerExpansion interface{} + +// PersistentVolumeListerExpansion allows custom methods to be added to +// PersistentVolumeLister. +type PersistentVolumeListerExpansion interface{} + +// PersistentVolumeClaimListerExpansion allows custom methods to be added to +// PersistentVolumeClaimLister. +type PersistentVolumeClaimListerExpansion interface{} + +// PersistentVolumeClaimNamespaceListerExpansion allows custom methods to be added to +// PersistentVolumeClaimNamespaeLister. +type PersistentVolumeClaimNamespaceListerExpansion interface{} + +// PodListerExpansion allows custom methods to be added to +// PodLister. +type PodListerExpansion interface{} + +// PodNamespaceListerExpansion allows custom methods to be added to +// PodNamespaeLister. +type PodNamespaceListerExpansion interface{} + +// PodTemplateListerExpansion allows custom methods to be added to +// PodTemplateLister. +type PodTemplateListerExpansion interface{} + +// PodTemplateNamespaceListerExpansion allows custom methods to be added to +// PodTemplateNamespaeLister. +type PodTemplateNamespaceListerExpansion interface{} + +// ResourceQuotaListerExpansion allows custom methods to be added to +// ResourceQuotaLister. +type ResourceQuotaListerExpansion interface{} + +// ResourceQuotaNamespaceListerExpansion allows custom methods to be added to +// ResourceQuotaNamespaeLister. +type ResourceQuotaNamespaceListerExpansion interface{} + +// SecretListerExpansion allows custom methods to be added to +// SecretLister. +type SecretListerExpansion interface{} + +// SecretNamespaceListerExpansion allows custom methods to be added to +// SecretNamespaeLister. +type SecretNamespaceListerExpansion interface{} + +// ServiceAccountListerExpansion allows custom methods to be added to +// ServiceAccountLister. +type ServiceAccountListerExpansion interface{} + +// ServiceAccountNamespaceListerExpansion allows custom methods to be added to +// ServiceAccountNamespaeLister. +type ServiceAccountNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/core/v1/limitrange.go b/vendor/k8s.io/client-go/listers/core/v1/limitrange.go new file mode 100644 index 0000000000..3ad2aeae4a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/limitrange.go @@ -0,0 +1,95 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + api "k8s.io/client-go/pkg/api" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" +) + +// LimitRangeLister helps list LimitRanges. +type LimitRangeLister interface { + // List lists all LimitRanges in the indexer. + List(selector labels.Selector) (ret []*v1.LimitRange, err error) + // LimitRanges returns an object that can list and get LimitRanges. + LimitRanges(namespace string) LimitRangeNamespaceLister + LimitRangeListerExpansion +} + +// limitRangeLister implements the LimitRangeLister interface. +type limitRangeLister struct { + indexer cache.Indexer +} + +// NewLimitRangeLister returns a new LimitRangeLister. +func NewLimitRangeLister(indexer cache.Indexer) LimitRangeLister { + return &limitRangeLister{indexer: indexer} +} + +// List lists all LimitRanges in the indexer. +func (s *limitRangeLister) List(selector labels.Selector) (ret []*v1.LimitRange, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.LimitRange)) + }) + return ret, err +} + +// LimitRanges returns an object that can list and get LimitRanges. +func (s *limitRangeLister) LimitRanges(namespace string) LimitRangeNamespaceLister { + return limitRangeNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// LimitRangeNamespaceLister helps list and get LimitRanges. +type LimitRangeNamespaceLister interface { + // List lists all LimitRanges in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.LimitRange, err error) + // Get retrieves the LimitRange from the indexer for a given namespace and name. + Get(name string) (*v1.LimitRange, error) + LimitRangeNamespaceListerExpansion +} + +// limitRangeNamespaceLister implements the LimitRangeNamespaceLister +// interface. +type limitRangeNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all LimitRanges in the indexer for a given namespace. +func (s limitRangeNamespaceLister) List(selector labels.Selector) (ret []*v1.LimitRange, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.LimitRange)) + }) + return ret, err +} + +// Get retrieves the LimitRange from the indexer for a given namespace and name. +func (s limitRangeNamespaceLister) Get(name string) (*v1.LimitRange, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(api.Resource("limitrange"), name) + } + return obj.(*v1.LimitRange), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/namespace.go b/vendor/k8s.io/client-go/listers/core/v1/namespace.go new file mode 100644 index 0000000000..23f036046b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/namespace.go @@ -0,0 +1,68 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + api "k8s.io/client-go/pkg/api" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" +) + +// NamespaceLister helps list Namespaces. +type NamespaceLister interface { + // List lists all Namespaces in the indexer. + List(selector labels.Selector) (ret []*v1.Namespace, err error) + // Get retrieves the Namespace from the index for a given name. + Get(name string) (*v1.Namespace, error) + NamespaceListerExpansion +} + +// namespaceLister implements the NamespaceLister interface. +type namespaceLister struct { + indexer cache.Indexer +} + +// NewNamespaceLister returns a new NamespaceLister. +func NewNamespaceLister(indexer cache.Indexer) NamespaceLister { + return &namespaceLister{indexer: indexer} +} + +// List lists all Namespaces in the indexer. +func (s *namespaceLister) List(selector labels.Selector) (ret []*v1.Namespace, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Namespace)) + }) + return ret, err +} + +// Get retrieves the Namespace from the index for a given name. +func (s *namespaceLister) Get(name string) (*v1.Namespace, error) { + key := &v1.Namespace{ObjectMeta: meta_v1.ObjectMeta{Name: name}} + obj, exists, err := s.indexer.Get(key) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(api.Resource("namespace"), name) + } + return obj.(*v1.Namespace), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/node.go b/vendor/k8s.io/client-go/listers/core/v1/node.go new file mode 100644 index 0000000000..9b729e8335 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/node.go @@ -0,0 +1,68 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + api "k8s.io/client-go/pkg/api" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" +) + +// NodeLister helps list Nodes. +type NodeLister interface { + // List lists all Nodes in the indexer. + List(selector labels.Selector) (ret []*v1.Node, err error) + // Get retrieves the Node from the index for a given name. + Get(name string) (*v1.Node, error) + NodeListerExpansion +} + +// nodeLister implements the NodeLister interface. +type nodeLister struct { + indexer cache.Indexer +} + +// NewNodeLister returns a new NodeLister. +func NewNodeLister(indexer cache.Indexer) NodeLister { + return &nodeLister{indexer: indexer} +} + +// List lists all Nodes in the indexer. +func (s *nodeLister) List(selector labels.Selector) (ret []*v1.Node, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Node)) + }) + return ret, err +} + +// Get retrieves the Node from the index for a given name. +func (s *nodeLister) Get(name string) (*v1.Node, error) { + key := &v1.Node{ObjectMeta: meta_v1.ObjectMeta{Name: name}} + obj, exists, err := s.indexer.Get(key) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(api.Resource("node"), name) + } + return obj.(*v1.Node), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/node_expansion.go b/vendor/k8s.io/client-go/listers/core/v1/node_expansion.go new file mode 100644 index 0000000000..8d446b0dca --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/node_expansion.go @@ -0,0 +1,48 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/pkg/api/v1" +) + +// NodeConditionPredicate is a function that indicates whether the given node's conditions meet +// some set of criteria defined by the function. +type NodeConditionPredicate func(node *v1.Node) bool + +// NodeListerExpansion allows custom methods to be added to +// NodeLister. +type NodeListerExpansion interface { + ListWithPredicate(predicate NodeConditionPredicate) ([]*v1.Node, error) +} + +func (l *nodeLister) ListWithPredicate(predicate NodeConditionPredicate) ([]*v1.Node, error) { + nodes, err := l.List(labels.Everything()) + if err != nil { + return nil, err + } + + var filtered []*v1.Node + for i := range nodes { + if predicate(nodes[i]) { + filtered = append(filtered, nodes[i]) + } + } + + return filtered, nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go new file mode 100644 index 0000000000..837c736ca6 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go @@ -0,0 +1,68 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + api "k8s.io/client-go/pkg/api" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" +) + +// PersistentVolumeLister helps list PersistentVolumes. +type PersistentVolumeLister interface { + // List lists all PersistentVolumes in the indexer. + List(selector labels.Selector) (ret []*v1.PersistentVolume, err error) + // Get retrieves the PersistentVolume from the index for a given name. + Get(name string) (*v1.PersistentVolume, error) + PersistentVolumeListerExpansion +} + +// persistentVolumeLister implements the PersistentVolumeLister interface. +type persistentVolumeLister struct { + indexer cache.Indexer +} + +// NewPersistentVolumeLister returns a new PersistentVolumeLister. +func NewPersistentVolumeLister(indexer cache.Indexer) PersistentVolumeLister { + return &persistentVolumeLister{indexer: indexer} +} + +// List lists all PersistentVolumes in the indexer. +func (s *persistentVolumeLister) List(selector labels.Selector) (ret []*v1.PersistentVolume, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PersistentVolume)) + }) + return ret, err +} + +// Get retrieves the PersistentVolume from the index for a given name. +func (s *persistentVolumeLister) Get(name string) (*v1.PersistentVolume, error) { + key := &v1.PersistentVolume{ObjectMeta: meta_v1.ObjectMeta{Name: name}} + obj, exists, err := s.indexer.Get(key) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(api.Resource("persistentvolume"), name) + } + return obj.(*v1.PersistentVolume), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go new file mode 100644 index 0000000000..6b8aea80d4 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go @@ -0,0 +1,95 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + api "k8s.io/client-go/pkg/api" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" +) + +// PersistentVolumeClaimLister helps list PersistentVolumeClaims. +type PersistentVolumeClaimLister interface { + // List lists all PersistentVolumeClaims in the indexer. + List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) + // PersistentVolumeClaims returns an object that can list and get PersistentVolumeClaims. + PersistentVolumeClaims(namespace string) PersistentVolumeClaimNamespaceLister + PersistentVolumeClaimListerExpansion +} + +// persistentVolumeClaimLister implements the PersistentVolumeClaimLister interface. +type persistentVolumeClaimLister struct { + indexer cache.Indexer +} + +// NewPersistentVolumeClaimLister returns a new PersistentVolumeClaimLister. +func NewPersistentVolumeClaimLister(indexer cache.Indexer) PersistentVolumeClaimLister { + return &persistentVolumeClaimLister{indexer: indexer} +} + +// List lists all PersistentVolumeClaims in the indexer. +func (s *persistentVolumeClaimLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PersistentVolumeClaim)) + }) + return ret, err +} + +// PersistentVolumeClaims returns an object that can list and get PersistentVolumeClaims. +func (s *persistentVolumeClaimLister) PersistentVolumeClaims(namespace string) PersistentVolumeClaimNamespaceLister { + return persistentVolumeClaimNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PersistentVolumeClaimNamespaceLister helps list and get PersistentVolumeClaims. +type PersistentVolumeClaimNamespaceLister interface { + // List lists all PersistentVolumeClaims in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) + // Get retrieves the PersistentVolumeClaim from the indexer for a given namespace and name. + Get(name string) (*v1.PersistentVolumeClaim, error) + PersistentVolumeClaimNamespaceListerExpansion +} + +// persistentVolumeClaimNamespaceLister implements the PersistentVolumeClaimNamespaceLister +// interface. +type persistentVolumeClaimNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PersistentVolumeClaims in the indexer for a given namespace. +func (s persistentVolumeClaimNamespaceLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PersistentVolumeClaim)) + }) + return ret, err +} + +// Get retrieves the PersistentVolumeClaim from the indexer for a given namespace and name. +func (s persistentVolumeClaimNamespaceLister) Get(name string) (*v1.PersistentVolumeClaim, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(api.Resource("persistentvolumeclaim"), name) + } + return obj.(*v1.PersistentVolumeClaim), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/pod.go b/vendor/k8s.io/client-go/listers/core/v1/pod.go new file mode 100644 index 0000000000..d5b78ee393 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/pod.go @@ -0,0 +1,95 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + api "k8s.io/client-go/pkg/api" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" +) + +// PodLister helps list Pods. +type PodLister interface { + // List lists all Pods in the indexer. + List(selector labels.Selector) (ret []*v1.Pod, err error) + // Pods returns an object that can list and get Pods. + Pods(namespace string) PodNamespaceLister + PodListerExpansion +} + +// podLister implements the PodLister interface. +type podLister struct { + indexer cache.Indexer +} + +// NewPodLister returns a new PodLister. +func NewPodLister(indexer cache.Indexer) PodLister { + return &podLister{indexer: indexer} +} + +// List lists all Pods in the indexer. +func (s *podLister) List(selector labels.Selector) (ret []*v1.Pod, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Pod)) + }) + return ret, err +} + +// Pods returns an object that can list and get Pods. +func (s *podLister) Pods(namespace string) PodNamespaceLister { + return podNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodNamespaceLister helps list and get Pods. +type PodNamespaceLister interface { + // List lists all Pods in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Pod, err error) + // Get retrieves the Pod from the indexer for a given namespace and name. + Get(name string) (*v1.Pod, error) + PodNamespaceListerExpansion +} + +// podNamespaceLister implements the PodNamespaceLister +// interface. +type podNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Pods in the indexer for a given namespace. +func (s podNamespaceLister) List(selector labels.Selector) (ret []*v1.Pod, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Pod)) + }) + return ret, err +} + +// Get retrieves the Pod from the indexer for a given namespace and name. +func (s podNamespaceLister) Get(name string) (*v1.Pod, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(api.Resource("pod"), name) + } + return obj.(*v1.Pod), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go b/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go new file mode 100644 index 0000000000..fadd11fbea --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go @@ -0,0 +1,95 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + api "k8s.io/client-go/pkg/api" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" +) + +// PodTemplateLister helps list PodTemplates. +type PodTemplateLister interface { + // List lists all PodTemplates in the indexer. + List(selector labels.Selector) (ret []*v1.PodTemplate, err error) + // PodTemplates returns an object that can list and get PodTemplates. + PodTemplates(namespace string) PodTemplateNamespaceLister + PodTemplateListerExpansion +} + +// podTemplateLister implements the PodTemplateLister interface. +type podTemplateLister struct { + indexer cache.Indexer +} + +// NewPodTemplateLister returns a new PodTemplateLister. +func NewPodTemplateLister(indexer cache.Indexer) PodTemplateLister { + return &podTemplateLister{indexer: indexer} +} + +// List lists all PodTemplates in the indexer. +func (s *podTemplateLister) List(selector labels.Selector) (ret []*v1.PodTemplate, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PodTemplate)) + }) + return ret, err +} + +// PodTemplates returns an object that can list and get PodTemplates. +func (s *podTemplateLister) PodTemplates(namespace string) PodTemplateNamespaceLister { + return podTemplateNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodTemplateNamespaceLister helps list and get PodTemplates. +type PodTemplateNamespaceLister interface { + // List lists all PodTemplates in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.PodTemplate, err error) + // Get retrieves the PodTemplate from the indexer for a given namespace and name. + Get(name string) (*v1.PodTemplate, error) + PodTemplateNamespaceListerExpansion +} + +// podTemplateNamespaceLister implements the PodTemplateNamespaceLister +// interface. +type podTemplateNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PodTemplates in the indexer for a given namespace. +func (s podTemplateNamespaceLister) List(selector labels.Selector) (ret []*v1.PodTemplate, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PodTemplate)) + }) + return ret, err +} + +// Get retrieves the PodTemplate from the indexer for a given namespace and name. +func (s podTemplateNamespaceLister) Get(name string) (*v1.PodTemplate, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(api.Resource("podtemplate"), name) + } + return obj.(*v1.PodTemplate), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go new file mode 100644 index 0000000000..a38e5d7cc4 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go @@ -0,0 +1,95 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + api "k8s.io/client-go/pkg/api" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" +) + +// ReplicationControllerLister helps list ReplicationControllers. +type ReplicationControllerLister interface { + // List lists all ReplicationControllers in the indexer. + List(selector labels.Selector) (ret []*v1.ReplicationController, err error) + // ReplicationControllers returns an object that can list and get ReplicationControllers. + ReplicationControllers(namespace string) ReplicationControllerNamespaceLister + ReplicationControllerListerExpansion +} + +// replicationControllerLister implements the ReplicationControllerLister interface. +type replicationControllerLister struct { + indexer cache.Indexer +} + +// NewReplicationControllerLister returns a new ReplicationControllerLister. +func NewReplicationControllerLister(indexer cache.Indexer) ReplicationControllerLister { + return &replicationControllerLister{indexer: indexer} +} + +// List lists all ReplicationControllers in the indexer. +func (s *replicationControllerLister) List(selector labels.Selector) (ret []*v1.ReplicationController, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ReplicationController)) + }) + return ret, err +} + +// ReplicationControllers returns an object that can list and get ReplicationControllers. +func (s *replicationControllerLister) ReplicationControllers(namespace string) ReplicationControllerNamespaceLister { + return replicationControllerNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ReplicationControllerNamespaceLister helps list and get ReplicationControllers. +type ReplicationControllerNamespaceLister interface { + // List lists all ReplicationControllers in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ReplicationController, err error) + // Get retrieves the ReplicationController from the indexer for a given namespace and name. + Get(name string) (*v1.ReplicationController, error) + ReplicationControllerNamespaceListerExpansion +} + +// replicationControllerNamespaceLister implements the ReplicationControllerNamespaceLister +// interface. +type replicationControllerNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ReplicationControllers in the indexer for a given namespace. +func (s replicationControllerNamespaceLister) List(selector labels.Selector) (ret []*v1.ReplicationController, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ReplicationController)) + }) + return ret, err +} + +// Get retrieves the ReplicationController from the indexer for a given namespace and name. +func (s replicationControllerNamespaceLister) Get(name string) (*v1.ReplicationController, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(api.Resource("replicationcontroller"), name) + } + return obj.(*v1.ReplicationController), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller_expansion.go b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller_expansion.go new file mode 100644 index 0000000000..c0d667454b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller_expansion.go @@ -0,0 +1,66 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/pkg/api/v1" +) + +// ReplicationControllerListerExpansion allows custom methods to be added to +// ReplicationControllerLister. +type ReplicationControllerListerExpansion interface { + GetPodControllers(pod *v1.Pod) ([]*v1.ReplicationController, error) +} + +// ReplicationControllerNamespaceListerExpansion allows custom methods to be added to +// ReplicationControllerNamespaeLister. +type ReplicationControllerNamespaceListerExpansion interface{} + +// GetPodControllers returns a list of ReplicationControllers that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching ReplicationControllers are found. +func (s *replicationControllerLister) GetPodControllers(pod *v1.Pod) ([]*v1.ReplicationController, error) { + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no controllers found for pod %v because it has no labels", pod.Name) + } + + items, err := s.ReplicationControllers(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var controllers []*v1.ReplicationController + for i := range items { + rc := items[i] + selector := labels.Set(rc.Spec.Selector).AsSelectorPreValidated() + + // If an rc with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + controllers = append(controllers, rc) + } + + if len(controllers) == 0 { + return nil, fmt.Errorf("could not find controller for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return controllers, nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go b/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go new file mode 100644 index 0000000000..1a30bc6286 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go @@ -0,0 +1,95 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + api "k8s.io/client-go/pkg/api" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" +) + +// ResourceQuotaLister helps list ResourceQuotas. +type ResourceQuotaLister interface { + // List lists all ResourceQuotas in the indexer. + List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) + // ResourceQuotas returns an object that can list and get ResourceQuotas. + ResourceQuotas(namespace string) ResourceQuotaNamespaceLister + ResourceQuotaListerExpansion +} + +// resourceQuotaLister implements the ResourceQuotaLister interface. +type resourceQuotaLister struct { + indexer cache.Indexer +} + +// NewResourceQuotaLister returns a new ResourceQuotaLister. +func NewResourceQuotaLister(indexer cache.Indexer) ResourceQuotaLister { + return &resourceQuotaLister{indexer: indexer} +} + +// List lists all ResourceQuotas in the indexer. +func (s *resourceQuotaLister) List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ResourceQuota)) + }) + return ret, err +} + +// ResourceQuotas returns an object that can list and get ResourceQuotas. +func (s *resourceQuotaLister) ResourceQuotas(namespace string) ResourceQuotaNamespaceLister { + return resourceQuotaNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ResourceQuotaNamespaceLister helps list and get ResourceQuotas. +type ResourceQuotaNamespaceLister interface { + // List lists all ResourceQuotas in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) + // Get retrieves the ResourceQuota from the indexer for a given namespace and name. + Get(name string) (*v1.ResourceQuota, error) + ResourceQuotaNamespaceListerExpansion +} + +// resourceQuotaNamespaceLister implements the ResourceQuotaNamespaceLister +// interface. +type resourceQuotaNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ResourceQuotas in the indexer for a given namespace. +func (s resourceQuotaNamespaceLister) List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ResourceQuota)) + }) + return ret, err +} + +// Get retrieves the ResourceQuota from the indexer for a given namespace and name. +func (s resourceQuotaNamespaceLister) Get(name string) (*v1.ResourceQuota, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(api.Resource("resourcequota"), name) + } + return obj.(*v1.ResourceQuota), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/secret.go b/vendor/k8s.io/client-go/listers/core/v1/secret.go new file mode 100644 index 0000000000..150bd9450e --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/secret.go @@ -0,0 +1,95 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + api "k8s.io/client-go/pkg/api" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" +) + +// SecretLister helps list Secrets. +type SecretLister interface { + // List lists all Secrets in the indexer. + List(selector labels.Selector) (ret []*v1.Secret, err error) + // Secrets returns an object that can list and get Secrets. + Secrets(namespace string) SecretNamespaceLister + SecretListerExpansion +} + +// secretLister implements the SecretLister interface. +type secretLister struct { + indexer cache.Indexer +} + +// NewSecretLister returns a new SecretLister. +func NewSecretLister(indexer cache.Indexer) SecretLister { + return &secretLister{indexer: indexer} +} + +// List lists all Secrets in the indexer. +func (s *secretLister) List(selector labels.Selector) (ret []*v1.Secret, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Secret)) + }) + return ret, err +} + +// Secrets returns an object that can list and get Secrets. +func (s *secretLister) Secrets(namespace string) SecretNamespaceLister { + return secretNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// SecretNamespaceLister helps list and get Secrets. +type SecretNamespaceLister interface { + // List lists all Secrets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Secret, err error) + // Get retrieves the Secret from the indexer for a given namespace and name. + Get(name string) (*v1.Secret, error) + SecretNamespaceListerExpansion +} + +// secretNamespaceLister implements the SecretNamespaceLister +// interface. +type secretNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Secrets in the indexer for a given namespace. +func (s secretNamespaceLister) List(selector labels.Selector) (ret []*v1.Secret, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Secret)) + }) + return ret, err +} + +// Get retrieves the Secret from the indexer for a given namespace and name. +func (s secretNamespaceLister) Get(name string) (*v1.Secret, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(api.Resource("secret"), name) + } + return obj.(*v1.Secret), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/service.go b/vendor/k8s.io/client-go/listers/core/v1/service.go new file mode 100644 index 0000000000..30d22c154b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/service.go @@ -0,0 +1,95 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + api "k8s.io/client-go/pkg/api" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" +) + +// ServiceLister helps list Services. +type ServiceLister interface { + // List lists all Services in the indexer. + List(selector labels.Selector) (ret []*v1.Service, err error) + // Services returns an object that can list and get Services. + Services(namespace string) ServiceNamespaceLister + ServiceListerExpansion +} + +// serviceLister implements the ServiceLister interface. +type serviceLister struct { + indexer cache.Indexer +} + +// NewServiceLister returns a new ServiceLister. +func NewServiceLister(indexer cache.Indexer) ServiceLister { + return &serviceLister{indexer: indexer} +} + +// List lists all Services in the indexer. +func (s *serviceLister) List(selector labels.Selector) (ret []*v1.Service, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Service)) + }) + return ret, err +} + +// Services returns an object that can list and get Services. +func (s *serviceLister) Services(namespace string) ServiceNamespaceLister { + return serviceNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ServiceNamespaceLister helps list and get Services. +type ServiceNamespaceLister interface { + // List lists all Services in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Service, err error) + // Get retrieves the Service from the indexer for a given namespace and name. + Get(name string) (*v1.Service, error) + ServiceNamespaceListerExpansion +} + +// serviceNamespaceLister implements the ServiceNamespaceLister +// interface. +type serviceNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Services in the indexer for a given namespace. +func (s serviceNamespaceLister) List(selector labels.Selector) (ret []*v1.Service, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Service)) + }) + return ret, err +} + +// Get retrieves the Service from the indexer for a given namespace and name. +func (s serviceNamespaceLister) Get(name string) (*v1.Service, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(api.Resource("service"), name) + } + return obj.(*v1.Service), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/service_expansion.go b/vendor/k8s.io/client-go/listers/core/v1/service_expansion.go new file mode 100644 index 0000000000..fd07f6d89f --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/service_expansion.go @@ -0,0 +1,56 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/pkg/api/v1" +) + +// ServiceListerExpansion allows custom methods to be added to +// ServiceLister. +type ServiceListerExpansion interface { + GetPodServices(pod *v1.Pod) ([]*v1.Service, error) +} + +// ServiceNamespaceListerExpansion allows custom methods to be added to +// ServiceNamespaeLister. +type ServiceNamespaceListerExpansion interface{} + +// TODO: Move this back to scheduler as a helper function that takes a Store, +// rather than a method of ServiceLister. +func (s *serviceLister) GetPodServices(pod *v1.Pod) ([]*v1.Service, error) { + allServices, err := s.Services(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var services []*v1.Service + for i := range allServices { + service := allServices[i] + if service.Spec.Selector == nil { + // services with nil selectors match nothing, not everything. + continue + } + selector := labels.Set(service.Spec.Selector).AsSelectorPreValidated() + if selector.Matches(labels.Set(pod.Labels)) { + services = append(services, service) + } + } + + return services, nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go new file mode 100644 index 0000000000..9c07933995 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go @@ -0,0 +1,95 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + api "k8s.io/client-go/pkg/api" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" +) + +// ServiceAccountLister helps list ServiceAccounts. +type ServiceAccountLister interface { + // List lists all ServiceAccounts in the indexer. + List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) + // ServiceAccounts returns an object that can list and get ServiceAccounts. + ServiceAccounts(namespace string) ServiceAccountNamespaceLister + ServiceAccountListerExpansion +} + +// serviceAccountLister implements the ServiceAccountLister interface. +type serviceAccountLister struct { + indexer cache.Indexer +} + +// NewServiceAccountLister returns a new ServiceAccountLister. +func NewServiceAccountLister(indexer cache.Indexer) ServiceAccountLister { + return &serviceAccountLister{indexer: indexer} +} + +// List lists all ServiceAccounts in the indexer. +func (s *serviceAccountLister) List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ServiceAccount)) + }) + return ret, err +} + +// ServiceAccounts returns an object that can list and get ServiceAccounts. +func (s *serviceAccountLister) ServiceAccounts(namespace string) ServiceAccountNamespaceLister { + return serviceAccountNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ServiceAccountNamespaceLister helps list and get ServiceAccounts. +type ServiceAccountNamespaceLister interface { + // List lists all ServiceAccounts in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) + // Get retrieves the ServiceAccount from the indexer for a given namespace and name. + Get(name string) (*v1.ServiceAccount, error) + ServiceAccountNamespaceListerExpansion +} + +// serviceAccountNamespaceLister implements the ServiceAccountNamespaceLister +// interface. +type serviceAccountNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ServiceAccounts in the indexer for a given namespace. +func (s serviceAccountNamespaceLister) List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ServiceAccount)) + }) + return ret, err +} + +// Get retrieves the ServiceAccount from the indexer for a given namespace and name. +func (s serviceAccountNamespaceLister) Get(name string) (*v1.ServiceAccount, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(api.Resource("serviceaccount"), name) + } + return obj.(*v1.ServiceAccount), nil +} diff --git a/vendor/k8s.io/client-go/pkg/api/OWNERS b/vendor/k8s.io/client-go/pkg/api/OWNERS new file mode 100644 index 0000000000..3a9b0c6d15 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/OWNERS @@ -0,0 +1,44 @@ +approvers: +- erictune +- lavalamp +- smarterclayton +- thockin +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- yujuhong +- brendandburns +- derekwaynecarr +- caesarxuchao +- vishh +- mikedanese +- liggitt +- nikhiljindal +- bprashanth +- gmarek +- erictune +- davidopp +- pmorie +- sttts +- kargakis +- dchen1107 +- saad-ali +- zmerlynn +- luxas +- janetkuo +- justinsb +- pwittrock +- roberthbailey +- ncdc +- timstclair +- yifan-gu +- eparis +- mwielgus +- timothysc +- soltysh +- piosz +- jsafrane +- jbeda diff --git a/vendor/k8s.io/client-go/pkg/api/defaults.go b/vendor/k8s.io/client-go/pkg/api/defaults.go new file mode 100644 index 0000000000..baa49a8d7e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/defaults.go @@ -0,0 +1,36 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return scheme.AddDefaultingFuncs( + func(obj *ListOptions) { + if obj.LabelSelector == nil { + obj.LabelSelector = labels.Everything() + } + if obj.FieldSelector == nil { + obj.FieldSelector = fields.Everything() + } + }, + ) +} diff --git a/vendor/k8s.io/client-go/pkg/api/doc.go b/vendor/k8s.io/client-go/pkg/api/doc.go new file mode 100644 index 0000000000..a4262ab361 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package api contains the latest (or "internal") version of the +// Kubernetes API objects. This is the API objects as represented in memory. +// The contract presented to clients is located in the versioned packages, +// which are sub-directories. The first one is "v1". Those packages +// describe how a particular version is serialized to storage/network. +package api diff --git a/vendor/k8s.io/client-go/pkg/api/field_constants.go b/vendor/k8s.io/client-go/pkg/api/field_constants.go new file mode 100644 index 0000000000..5ead0f13fe --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/field_constants.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +// Field path constants that are specific to the internal API +// representation. +const ( + NodeUnschedulableField = "spec.unschedulable" + ObjectNameField = "metadata.name" + PodHostField = "spec.nodeName" + PodStatusField = "status.phase" + SecretTypeField = "type" + + EventReasonField = "reason" + EventSourceField = "source" + EventTypeField = "type" + EventInvolvedKindField = "involvedObject.kind" + EventInvolvedNamespaceField = "involvedObject.namespace" + EventInvolvedNameField = "involvedObject.name" + EventInvolvedUIDField = "involvedObject.uid" + EventInvolvedAPIVersionField = "involvedObject.apiVersion" + EventInvolvedResourceVersionField = "involvedObject.resourceVersion" + EventInvolvedFieldPathField = "involvedObject.fieldPath" +) diff --git a/vendor/k8s.io/client-go/pkg/api/helpers.go b/vendor/k8s.io/client-go/pkg/api/helpers.go new file mode 100644 index 0000000000..4705e11fed --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/helpers.go @@ -0,0 +1,636 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "crypto/md5" + "fmt" + "reflect" + "strings" + "time" + + "github.com/davecgh/go-spew/spew" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Conversion error conveniently packages up errors in conversions. +type ConversionError struct { + In, Out interface{} + Message string +} + +// Return a helpful string about the error +func (c *ConversionError) Error() string { + return spew.Sprintf( + "Conversion error: %s. (in: %v(%+v) out: %v)", + c.Message, reflect.TypeOf(c.In), c.In, reflect.TypeOf(c.Out), + ) +} + +const ( + // annotation key prefix used to identify non-convertible json paths. + NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" +) + +// NonConvertibleFields iterates over the provided map and filters out all but +// any keys with the "non-convertible.kubernetes.io" prefix. +func NonConvertibleFields(annotations map[string]string) map[string]string { + nonConvertibleKeys := map[string]string{} + for key, value := range annotations { + if strings.HasPrefix(key, NonConvertibleAnnotationPrefix) { + nonConvertibleKeys[key] = value + } + } + return nonConvertibleKeys +} + +// Semantic can do semantic deep equality checks for api objects. +// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true +var Semantic = conversion.EqualitiesOrDie( + func(a, b resource.Quantity) bool { + // Ignore formatting, only care that numeric value stayed the same. + // TODO: if we decide it's important, it should be safe to start comparing the format. + // + // Uninitialized quantities are equivalent to 0 quantities. + return a.Cmp(b) == 0 + }, + func(a, b metav1.Time) bool { + return a.UTC() == b.UTC() + }, + func(a, b labels.Selector) bool { + return a.String() == b.String() + }, + func(a, b fields.Selector) bool { + return a.String() == b.String() + }, +) + +var standardResourceQuotaScopes = sets.NewString( + string(ResourceQuotaScopeTerminating), + string(ResourceQuotaScopeNotTerminating), + string(ResourceQuotaScopeBestEffort), + string(ResourceQuotaScopeNotBestEffort), +) + +// IsStandardResourceQuotaScope returns true if the scope is a standard value +func IsStandardResourceQuotaScope(str string) bool { + return standardResourceQuotaScopes.Has(str) +} + +var podObjectCountQuotaResources = sets.NewString( + string(ResourcePods), +) + +var podComputeQuotaResources = sets.NewString( + string(ResourceCPU), + string(ResourceMemory), + string(ResourceLimitsCPU), + string(ResourceLimitsMemory), + string(ResourceRequestsCPU), + string(ResourceRequestsMemory), +) + +// IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope +func IsResourceQuotaScopeValidForResource(scope ResourceQuotaScope, resource string) bool { + switch scope { + case ResourceQuotaScopeTerminating, ResourceQuotaScopeNotTerminating, ResourceQuotaScopeNotBestEffort: + return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource) + case ResourceQuotaScopeBestEffort: + return podObjectCountQuotaResources.Has(resource) + default: + return true + } +} + +var standardContainerResources = sets.NewString( + string(ResourceCPU), + string(ResourceMemory), +) + +// IsStandardContainerResourceName returns true if the container can make a resource request +// for the specified resource +func IsStandardContainerResourceName(str string) bool { + return standardContainerResources.Has(str) +} + +// IsOpaqueIntResourceName returns true if the resource name has the opaque +// integer resource prefix. +func IsOpaqueIntResourceName(name ResourceName) bool { + return strings.HasPrefix(string(name), ResourceOpaqueIntPrefix) +} + +// OpaqueIntResourceName returns a ResourceName with the canonical opaque +// integer prefix prepended. If the argument already has the prefix, it is +// returned unmodified. +func OpaqueIntResourceName(name string) ResourceName { + if IsOpaqueIntResourceName(ResourceName(name)) { + return ResourceName(name) + } + return ResourceName(fmt.Sprintf("%s%s", ResourceOpaqueIntPrefix, name)) +} + +var standardLimitRangeTypes = sets.NewString( + string(LimitTypePod), + string(LimitTypeContainer), + string(LimitTypePersistentVolumeClaim), +) + +// IsStandardLimitRangeType returns true if the type is Pod or Container +func IsStandardLimitRangeType(str string) bool { + return standardLimitRangeTypes.Has(str) +} + +var standardQuotaResources = sets.NewString( + string(ResourceCPU), + string(ResourceMemory), + string(ResourceRequestsCPU), + string(ResourceRequestsMemory), + string(ResourceRequestsStorage), + string(ResourceLimitsCPU), + string(ResourceLimitsMemory), + string(ResourcePods), + string(ResourceQuotas), + string(ResourceServices), + string(ResourceReplicationControllers), + string(ResourceSecrets), + string(ResourcePersistentVolumeClaims), + string(ResourceConfigMaps), + string(ResourceServicesNodePorts), + string(ResourceServicesLoadBalancers), +) + +// IsStandardQuotaResourceName returns true if the resource is known to +// the quota tracking system +func IsStandardQuotaResourceName(str string) bool { + return standardQuotaResources.Has(str) +} + +var standardResources = sets.NewString( + string(ResourceCPU), + string(ResourceMemory), + string(ResourceRequestsCPU), + string(ResourceRequestsMemory), + string(ResourceLimitsCPU), + string(ResourceLimitsMemory), + string(ResourcePods), + string(ResourceQuotas), + string(ResourceServices), + string(ResourceReplicationControllers), + string(ResourceSecrets), + string(ResourceConfigMaps), + string(ResourcePersistentVolumeClaims), + string(ResourceStorage), + string(ResourceRequestsStorage), +) + +// IsStandardResourceName returns true if the resource is known to the system +func IsStandardResourceName(str string) bool { + return standardResources.Has(str) +} + +var integerResources = sets.NewString( + string(ResourcePods), + string(ResourceQuotas), + string(ResourceServices), + string(ResourceReplicationControllers), + string(ResourceSecrets), + string(ResourceConfigMaps), + string(ResourcePersistentVolumeClaims), + string(ResourceServicesNodePorts), + string(ResourceServicesLoadBalancers), +) + +// IsIntegerResourceName returns true if the resource is measured in integer values +func IsIntegerResourceName(str string) bool { + return integerResources.Has(str) || IsOpaqueIntResourceName(ResourceName(str)) +} + +// this function aims to check if the service's ClusterIP is set or not +// the objective is not to perform validation here +func IsServiceIPSet(service *Service) bool { + return service.Spec.ClusterIP != ClusterIPNone && service.Spec.ClusterIP != "" +} + +// this function aims to check if the service's cluster IP is requested or not +func IsServiceIPRequested(service *Service) bool { + // ExternalName services are CNAME aliases to external ones. Ignore the IP. + if service.Spec.Type == ServiceTypeExternalName { + return false + } + return service.Spec.ClusterIP == "" +} + +var standardFinalizers = sets.NewString( + string(FinalizerKubernetes), + metav1.FinalizerOrphanDependents, +) + +// HasAnnotation returns a bool if passed in annotation exists +func HasAnnotation(obj ObjectMeta, ann string) bool { + _, found := obj.Annotations[ann] + return found +} + +// SetMetaDataAnnotation sets the annotation and value +func SetMetaDataAnnotation(obj *ObjectMeta, ann string, value string) { + if obj.Annotations == nil { + obj.Annotations = make(map[string]string) + } + obj.Annotations[ann] = value +} + +func IsStandardFinalizerName(str string) bool { + return standardFinalizers.Has(str) +} + +// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, +// only if they do not already exist +func AddToNodeAddresses(addresses *[]NodeAddress, addAddresses ...NodeAddress) { + for _, add := range addAddresses { + exists := false + for _, existing := range *addresses { + if existing.Address == add.Address && existing.Type == add.Type { + exists = true + break + } + } + if !exists { + *addresses = append(*addresses, add) + } + } +} + +func HashObject(obj runtime.Object, codec runtime.Codec) (string, error) { + data, err := runtime.Encode(codec, obj) + if err != nil { + return "", err + } + return fmt.Sprintf("%x", md5.Sum(data)), nil +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusEqual(l, r *LoadBalancerStatus) bool { + return ingressSliceEqual(l.Ingress, r.Ingress) +} + +func ingressSliceEqual(lhs, rhs []LoadBalancerIngress) bool { + if len(lhs) != len(rhs) { + return false + } + for i := range lhs { + if !ingressEqual(&lhs[i], &rhs[i]) { + return false + } + } + return true +} + +func ingressEqual(lhs, rhs *LoadBalancerIngress) bool { + if lhs.IP != rhs.IP { + return false + } + if lhs.Hostname != rhs.Hostname { + return false + } + return true +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusDeepCopy(lb *LoadBalancerStatus) *LoadBalancerStatus { + c := &LoadBalancerStatus{} + c.Ingress = make([]LoadBalancerIngress, len(lb.Ingress)) + for i := range lb.Ingress { + c.Ingress[i] = lb.Ingress[i] + } + return c +} + +// GetAccessModesAsString returns a string representation of an array of access modes. +// modes, when present, are always in the same order: RWO,ROX,RWX. +func GetAccessModesAsString(modes []PersistentVolumeAccessMode) string { + modes = removeDuplicateAccessModes(modes) + modesStr := []string{} + if containsAccessMode(modes, ReadWriteOnce) { + modesStr = append(modesStr, "RWO") + } + if containsAccessMode(modes, ReadOnlyMany) { + modesStr = append(modesStr, "ROX") + } + if containsAccessMode(modes, ReadWriteMany) { + modesStr = append(modesStr, "RWX") + } + return strings.Join(modesStr, ",") +} + +// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString +func GetAccessModesFromString(modes string) []PersistentVolumeAccessMode { + strmodes := strings.Split(modes, ",") + accessModes := []PersistentVolumeAccessMode{} + for _, s := range strmodes { + s = strings.Trim(s, " ") + switch { + case s == "RWO": + accessModes = append(accessModes, ReadWriteOnce) + case s == "ROX": + accessModes = append(accessModes, ReadOnlyMany) + case s == "RWX": + accessModes = append(accessModes, ReadWriteMany) + } + } + return accessModes +} + +// removeDuplicateAccessModes returns an array of access modes without any duplicates +func removeDuplicateAccessModes(modes []PersistentVolumeAccessMode) []PersistentVolumeAccessMode { + accessModes := []PersistentVolumeAccessMode{} + for _, m := range modes { + if !containsAccessMode(accessModes, m) { + accessModes = append(accessModes, m) + } + } + return accessModes +} + +func containsAccessMode(modes []PersistentVolumeAccessMode, mode PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +// ParseRFC3339 parses an RFC3339 date in either RFC3339Nano or RFC3339 format. +func ParseRFC3339(s string, nowFn func() metav1.Time) (metav1.Time, error) { + if t, timeErr := time.Parse(time.RFC3339Nano, s); timeErr == nil { + return metav1.Time{Time: t}, nil + } + t, err := time.Parse(time.RFC3339, s) + if err != nil { + return metav1.Time{}, err + } + return metav1.Time{Time: t}, nil +} + +// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements +// labels.Selector. +func NodeSelectorRequirementsAsSelector(nsm []NodeSelectorRequirement) (labels.Selector, error) { + if len(nsm) == 0 { + return labels.Nothing(), nil + } + selector := labels.NewSelector() + for _, expr := range nsm { + var op selection.Operator + switch expr.Operator { + case NodeSelectorOpIn: + op = selection.In + case NodeSelectorOpNotIn: + op = selection.NotIn + case NodeSelectorOpExists: + op = selection.Exists + case NodeSelectorOpDoesNotExist: + op = selection.DoesNotExist + case NodeSelectorOpGt: + op = selection.GreaterThan + case NodeSelectorOpLt: + op = selection.LessThan + default: + return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) + } + r, err := labels.NewRequirement(expr.Key, op, expr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + return selector, nil +} + +const ( + // SeccompPodAnnotationKey represents the key of a seccomp profile applied + // to all containers of a pod. + SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" + + // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied + // to one container of a pod. + SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" + + // CreatedByAnnotation represents the key used to store the spec(json) + // used to create the resource. + CreatedByAnnotation = "kubernetes.io/created-by" + + // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) + // in the Annotations of a Node. + PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" + + // SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by + // the kubelet. Pods with other sysctls will fail to launch. + SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls" + + // UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly + // namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use + // is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet + // will fail to launch. + UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls" + + // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // an object (e.g. secret, config map) before fetching it again from apiserver. + // This annotation can be attached to node. + ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" +) + +// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. +// Returns true if something was updated, false otherwise. +func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) { + podTolerations := pod.Spec.Tolerations + + var newTolerations []Toleration + updated := false + for i := range podTolerations { + if toleration.MatchToleration(&podTolerations[i]) { + if Semantic.DeepEqual(toleration, podTolerations[i]) { + return false, nil + } + newTolerations = append(newTolerations, *toleration) + updated = true + continue + } + + newTolerations = append(newTolerations, podTolerations[i]) + } + + if !updated { + newTolerations = append(newTolerations, *toleration) + } + + pod.Spec.Tolerations = newTolerations + return true, nil +} + +// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , +// if the two tolerations have same combination, regard as they match. +// TODO: uniqueness check for tolerations in api validations. +func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { + return t.Key == tolerationToMatch.Key && + t.Effect == tolerationToMatch.Effect && + t.Operator == tolerationToMatch.Operator && + t.Value == tolerationToMatch.Value +} + +// TolerationToleratesTaint checks if the toleration tolerates the taint. +func TolerationToleratesTaint(toleration *Toleration, taint *Taint) bool { + if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect { + return false + } + + if toleration.Key != taint.Key { + return false + } + // TODO: Use proper defaulting when Toleration becomes a field of PodSpec + if (len(toleration.Operator) == 0 || toleration.Operator == TolerationOpEqual) && toleration.Value == taint.Value { + return true + } + if toleration.Operator == TolerationOpExists { + return true + } + return false +} + +// TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations. +func TaintToleratedByTolerations(taint *Taint, tolerations []Toleration) bool { + tolerated := false + for i := range tolerations { + if TolerationToleratesTaint(&tolerations[i], taint) { + tolerated = true + break + } + } + return tolerated +} + +// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, +// if the two taints have same key:effect, regard as they match. +func (t *Taint) MatchTaint(taintToMatch Taint) bool { + return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect +} + +// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +func (t *Taint) ToString() string { + if len(t.Value) == 0 { + return fmt.Sprintf("%v:%v", t.Key, t.Effect) + } + return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) +} + +// SysctlsFromPodAnnotations parses the sysctl annotations into a slice of safe Sysctls +// and a slice of unsafe Sysctls. This is only a convenience wrapper around +// SysctlsFromPodAnnotation. +func SysctlsFromPodAnnotations(a map[string]string) ([]Sysctl, []Sysctl, error) { + safe, err := SysctlsFromPodAnnotation(a[SysctlsPodAnnotationKey]) + if err != nil { + return nil, nil, err + } + unsafe, err := SysctlsFromPodAnnotation(a[UnsafeSysctlsPodAnnotationKey]) + if err != nil { + return nil, nil, err + } + + return safe, unsafe, nil +} + +// SysctlsFromPodAnnotation parses an annotation value into a slice of Sysctls. +func SysctlsFromPodAnnotation(annotation string) ([]Sysctl, error) { + if len(annotation) == 0 { + return nil, nil + } + + kvs := strings.Split(annotation, ",") + sysctls := make([]Sysctl, len(kvs)) + for i, kv := range kvs { + cs := strings.Split(kv, "=") + if len(cs) != 2 || len(cs[0]) == 0 { + return nil, fmt.Errorf("sysctl %q not of the format sysctl_name=value", kv) + } + sysctls[i].Name = cs[0] + sysctls[i].Value = cs[1] + } + return sysctls, nil +} + +// PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls. +func PodAnnotationsFromSysctls(sysctls []Sysctl) string { + if len(sysctls) == 0 { + return "" + } + + kvs := make([]string, len(sysctls)) + for i := range sysctls { + kvs[i] = fmt.Sprintf("%s=%s", sysctls[i].Name, sysctls[i].Value) + } + return strings.Join(kvs, ",") +} + +// GetPersistentVolumeClass returns StorageClassName. +func GetPersistentVolumeClass(volume *PersistentVolume) string { + // Use beta annotation first + if class, found := volume.Annotations[BetaStorageClassAnnotation]; found { + return class + } + + return volume.Spec.StorageClassName +} + +// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was +// requested, it returns "". +func GetPersistentVolumeClaimClass(claim *PersistentVolumeClaim) string { + // Use beta annotation first + if class, found := claim.Annotations[BetaStorageClassAnnotation]; found { + return class + } + + if claim.Spec.StorageClassName != nil { + return *claim.Spec.StorageClassName + } + + return "" +} + +// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. +func PersistentVolumeClaimHasClass(claim *PersistentVolumeClaim) bool { + // Use beta annotation first + if _, found := claim.Annotations[BetaStorageClassAnnotation]; found { + return true + } + + if claim.Spec.StorageClassName != nil { + return true + } + + return false +} diff --git a/vendor/k8s.io/client-go/pkg/api/install/OWNERS b/vendor/k8s.io/client-go/pkg/api/install/OWNERS new file mode 100755 index 0000000000..01d6b37021 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/install/OWNERS @@ -0,0 +1,11 @@ +reviewers: +- lavalamp +- smarterclayton +- deads2k +- caesarxuchao +- liggitt +- nikhiljindal +- dims +- krousey +- david-mcmahon +- feihujiang diff --git a/vendor/k8s.io/client-go/pkg/api/install/install.go b/vendor/k8s.io/client-go/pkg/api/install/install.go new file mode 100644 index 0000000000..1b9553b706 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/install/install.go @@ -0,0 +1,70 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the v1 monolithic api, making it available as an +// option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/api/v1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: api.GroupName, + VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/api", + AddInternalObjectsToScheme: api.AddToScheme, + RootScopedKinds: sets.NewString( + "Node", + "Namespace", + "PersistentVolume", + "ComponentStatus", + ), + IgnoredKinds: sets.NewString( + "ListOptions", + "DeleteOptions", + "Status", + "PodLogOptions", + "PodExecOptions", + "PodAttachOptions", + "PodPortForwardOptions", + "PodProxyOptions", + "NodeProxyOptions", + "ServiceProxyOptions", + "ThirdPartyResource", + "ThirdPartyResourceData", + "ThirdPartyResourceList", + ), + }, + announced.VersionToSchemeFunc{ + v1.SchemeGroupVersion.Version: v1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/api/json.go b/vendor/k8s.io/client-go/pkg/api/json.go new file mode 100644 index 0000000000..3a6e04c182 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/json.go @@ -0,0 +1,28 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import "encoding/json" + +// This file implements json marshaling/unmarshaling interfaces on objects that are currently marshaled into annotations +// to prevent anyone from marshaling these internal structs. + +var _ = json.Marshaler(&AvoidPods{}) +var _ = json.Unmarshaler(&AvoidPods{}) + +func (AvoidPods) MarshalJSON() ([]byte, error) { panic("do not marshal internal struct") } +func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") } diff --git a/vendor/k8s.io/client-go/pkg/api/ref.go b/vendor/k8s.io/client-go/pkg/api/ref.go new file mode 100644 index 0000000000..370cf5513a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/ref.go @@ -0,0 +1,132 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "errors" + "fmt" + "net/url" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + // Errors that could be returned by GetReference. + ErrNilObject = errors.New("can't reference a nil object") + ErrNoSelfLink = errors.New("selfLink was empty, can't make reference") +) + +// GetReference returns an ObjectReference which refers to the given +// object, or an error if the object doesn't follow the conventions +// that would allow this. +// TODO: should take a meta.Interface see http://issue.k8s.io/7127 +func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, error) { + if obj == nil { + return nil, ErrNilObject + } + if ref, ok := obj.(*ObjectReference); ok { + // Don't make a reference to a reference. + return ref, nil + } + + gvk := obj.GetObjectKind().GroupVersionKind() + + // if the object referenced is actually persisted, we can just get kind from meta + // if we are building an object reference to something not yet persisted, we should fallback to scheme + kind := gvk.Kind + if len(kind) == 0 { + // TODO: this is wrong + gvks, _, err := scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + kind = gvks[0].Kind + } + + // An object that implements only List has enough metadata to build a reference + var listMeta meta.List + objectMeta, err := meta.Accessor(obj) + if err != nil { + listMeta, err = meta.ListAccessor(obj) + if err != nil { + return nil, err + } + } else { + listMeta = objectMeta + } + + // if the object referenced is actually persisted, we can also get version from meta + version := gvk.GroupVersion().String() + if len(version) == 0 { + selfLink := listMeta.GetSelfLink() + if len(selfLink) == 0 { + return nil, ErrNoSelfLink + } + selfLinkUrl, err := url.Parse(selfLink) + if err != nil { + return nil, err + } + // example paths: ///* + parts := strings.Split(selfLinkUrl.Path, "/") + if len(parts) < 3 { + return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version) + } + version = parts[2] + } + + // only has list metadata + if objectMeta == nil { + return &ObjectReference{ + Kind: kind, + APIVersion: version, + ResourceVersion: listMeta.GetResourceVersion(), + }, nil + } + + return &ObjectReference{ + Kind: kind, + APIVersion: version, + Name: objectMeta.GetName(), + Namespace: objectMeta.GetNamespace(), + UID: objectMeta.GetUID(), + ResourceVersion: objectMeta.GetResourceVersion(), + }, nil +} + +// GetPartialReference is exactly like GetReference, but allows you to set the FieldPath. +func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*ObjectReference, error) { + ref, err := GetReference(scheme, obj) + if err != nil { + return nil, err + } + ref.FieldPath = fieldPath + return ref, nil +} + +// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that +// intend only to get a reference to that object. This simplifies the event recording interface. +func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/client-go/pkg/api/register.go b/vendor/k8s.io/client-go/pkg/api/register.go new file mode 100644 index 0000000000..bd842b182e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/register.go @@ -0,0 +1,135 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "os" + + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// GroupFactoryRegistry is the APIGroupFactoryRegistry (overlaps a bit with Registry, see comments in package for details) +var GroupFactoryRegistry = make(announced.APIGroupFactoryRegistry) + +// Registry is an instance of an API registry. This is an interim step to start removing the idea of a global +// API registry. +var Registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS")) + +// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. +// NOTE: If you are copying this file to start a new api group, STOP! Copy the +// extensions group instead. This Scheme is special and should appear ONLY in +// the api group, unless you really know what you're doing. +// TODO(lavalamp): make the above error impossible. +var Scheme = runtime.NewScheme() + +// Codecs provides access to encoding and decoding for the scheme +var Codecs = serializer.NewCodecFactory(Scheme) + +// GroupName is the group name use in this package +const GroupName = "" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Unversioned is group version for unversioned API objects +// TODO: this should be v1 probably +var Unversioned = schema.GroupVersion{Group: "", Version: "v1"} + +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(Scheme) + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { + return err + } + scheme.AddKnownTypes(SchemeGroupVersion, + &Pod{}, + &PodList{}, + &PodStatusResult{}, + &PodTemplate{}, + &PodTemplateList{}, + &ReplicationControllerList{}, + &ReplicationController{}, + &ServiceList{}, + &Service{}, + &ServiceProxyOptions{}, + &NodeList{}, + &Node{}, + &NodeProxyOptions{}, + &Endpoints{}, + &EndpointsList{}, + &Binding{}, + &Event{}, + &EventList{}, + &List{}, + &LimitRange{}, + &LimitRangeList{}, + &ResourceQuota{}, + &ResourceQuotaList{}, + &Namespace{}, + &NamespaceList{}, + &ServiceAccount{}, + &ServiceAccountList{}, + &Secret{}, + &SecretList{}, + &PersistentVolume{}, + &PersistentVolumeList{}, + &PersistentVolumeClaim{}, + &PersistentVolumeClaimList{}, + &PodAttachOptions{}, + &PodLogOptions{}, + &PodExecOptions{}, + &PodPortForwardOptions{}, + &PodProxyOptions{}, + &ComponentStatus{}, + &ComponentStatusList{}, + &SerializedReference{}, + &RangeAllocation{}, + &ConfigMap{}, + &ConfigMapList{}, + ) + + // Register Unversioned types under their own special group + scheme.AddUnversionedTypes(Unversioned, + &metav1.Status{}, + &metav1.APIVersions{}, + &metav1.APIGroupList{}, + &metav1.APIGroup{}, + &metav1.APIResourceList{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/api/resource_helpers.go b/vendor/k8s.io/client-go/pkg/api/resource_helpers.go new file mode 100644 index 0000000000..88d0f80d76 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/resource_helpers.go @@ -0,0 +1,229 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "time" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Returns string version of ResourceName. +func (self ResourceName) String() string { + return string(self) +} + +// Returns the CPU limit if specified. +func (self *ResourceList) Cpu() *resource.Quantity { + if val, ok := (*self)[ResourceCPU]; ok { + return &val + } + return &resource.Quantity{Format: resource.DecimalSI} +} + +// Returns the Memory limit if specified. +func (self *ResourceList) Memory() *resource.Quantity { + if val, ok := (*self)[ResourceMemory]; ok { + return &val + } + return &resource.Quantity{Format: resource.BinarySI} +} + +func (self *ResourceList) Pods() *resource.Quantity { + if val, ok := (*self)[ResourcePods]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) NvidiaGPU() *resource.Quantity { + if val, ok := (*self)[ResourceNvidiaGPU]; ok { + return &val + } + return &resource.Quantity{} +} + +func GetContainerStatus(statuses []ContainerStatus, name string) (ContainerStatus, bool) { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i], true + } + } + return ContainerStatus{}, false +} + +func GetExistingContainerStatus(statuses []ContainerStatus, name string) ContainerStatus { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i] + } + } + return ContainerStatus{} +} + +// IsPodAvailable returns true if a pod is available; false otherwise. +// Precondition for an available pod is that it must be ready. On top +// of that, there are two cases when a pod can be considered available: +// 1. minReadySeconds == 0, or +// 2. LastTransitionTime (is set) + minReadySeconds < current time +func IsPodAvailable(pod *Pod, minReadySeconds int32, now metav1.Time) bool { + if !IsPodReady(pod) { + return false + } + + c := GetPodReadyCondition(pod.Status) + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { + return true + } + return false +} + +// IsPodReady returns true if a pod is ready; false otherwise. +func IsPodReady(pod *Pod) bool { + return IsPodReadyConditionTrue(pod.Status) +} + +// IsPodReady retruns true if a pod is ready; false otherwise. +func IsPodReadyConditionTrue(status PodStatus) bool { + condition := GetPodReadyCondition(status) + return condition != nil && condition.Status == ConditionTrue +} + +// Extracts the pod ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetPodReadyCondition(status PodStatus) *PodCondition { + _, condition := GetPodCondition(&status, PodReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetPodCondition(status *PodStatus, conditionType PodConditionType) (int, *PodCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// GetNodeCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetNodeCondition(status *NodeStatus, conditionType NodeConditionType) (int, *NodeCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the +// status has changed. +// Returns true if pod condition has changed or has been added. +func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool { + condition.LastTransitionTime = metav1.Now() + // Try to find this pod condition. + conditionIndex, oldCondition := GetPodCondition(status, condition.Type) + + if oldCondition == nil { + // We are adding new pod condition. + status.Conditions = append(status.Conditions, *condition) + return true + } else { + // We are updating an existing condition, so we need to check if it has changed. + if condition.Status == oldCondition.Status { + condition.LastTransitionTime = oldCondition.LastTransitionTime + } + + isEqual := condition.Status == oldCondition.Status && + condition.Reason == oldCondition.Reason && + condition.Message == oldCondition.Message && + condition.LastProbeTime.Equal(oldCondition.LastProbeTime) && + condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime) + + status.Conditions[conditionIndex] = *condition + // Return true if one of the fields have changed. + return !isEqual + } +} + +// IsNodeReady returns true if a node is ready; false otherwise. +func IsNodeReady(node *Node) bool { + for _, c := range node.Status.Conditions { + if c.Type == NodeReady { + return c.Status == ConditionTrue + } + } + return false +} + +// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all +// containers of the pod. +func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, limits map[ResourceName]resource.Quantity, err error) { + reqs, limits = map[ResourceName]resource.Quantity{}, map[ResourceName]resource.Quantity{} + for _, container := range pod.Spec.Containers { + for name, quantity := range container.Resources.Requests { + if value, ok := reqs[name]; !ok { + reqs[name] = *quantity.Copy() + } else { + value.Add(quantity) + reqs[name] = value + } + } + for name, quantity := range container.Resources.Limits { + if value, ok := limits[name]; !ok { + limits[name] = *quantity.Copy() + } else { + value.Add(quantity) + limits[name] = value + } + } + } + // init containers define the minimum of any resource + for _, container := range pod.Spec.InitContainers { + for name, quantity := range container.Resources.Requests { + value, ok := reqs[name] + if !ok { + reqs[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + reqs[name] = *quantity.Copy() + } + } + for name, quantity := range container.Resources.Limits { + value, ok := limits[name] + if !ok { + limits[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + limits[name] = *quantity.Copy() + } + } + } + return +} diff --git a/vendor/k8s.io/client-go/pkg/api/types.go b/vendor/k8s.io/client-go/pkg/api/types.go new file mode 100644 index 0000000000..4f42369508 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/types.go @@ -0,0 +1,3822 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// Common string formats +// --------------------- +// Many fields in this API have formatting requirements. The commonly used +// formats are defined here. +// +// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier" +// in the C language. This is captured by the following regex: +// [A-Za-z_][A-Za-z0-9_]* +// This defines the format, but not the length restriction, which should be +// specified at the definition of any field of this type. +// +// DNS_LABEL: This is a string, no more than 63 characters long, that conforms +// to the definition of a "label" in RFCs 1035 and 1123. This is captured +// by the following regex: +// [a-z0-9]([-a-z0-9]*[a-z0-9])? +// +// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms +// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured +// by the following regex: +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// or more simply: +// DNS_LABEL(\.DNS_LABEL)* +// +// IANA_SVC_NAME: This is a string, no more than 15 characters long, that +// conforms to the definition of IANA service name in RFC 6335. +// It must contains at least one letter [a-z] and it must contains only [a-z0-9-]. +// Hypens ('-') cannot be leading or trailing character of the string +// and cannot be adjacent to other hyphens. + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. +type ObjectMeta struct { + // Name is unique within a namespace. Name is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // +optional + Name string + + // GenerateName indicates that the name should be made unique by the server prior to persisting + // it. A non-empty value for the field indicates the name will be made unique (and the name + // returned to the client will be different than the name passed). The value of this field will + // be combined with a unique suffix on the server if the Name field has not been provided. + // The provided value must be valid within the rules for Name, and may be truncated by the length + // of the suffix required to make the value unique on the server. + // + // If this field is specified, and Name is not present, the server will NOT return a 409 if the + // generated name exists - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // +optional + GenerateName string + + // Namespace defines the space within which name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // +optional + Namespace string + + // SelfLink is a URL representing this object. + // +optional + SelfLink string + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // +optional + UID types.UID + + // An opaque value that represents the version of this resource. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and values may only be valid for a particular + // resource or set of resources. Only servers will generate resource versions. + // +optional + ResourceVersion string + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + Generation int64 + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // +optional + CreationTimestamp metav1.Time + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field. Once set, + // this value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard + // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the + // API. In the presence of network partitions, this object may still exist after this + // timestamp, until an administrator or automated process can determine the resource is + // fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + DeletionTimestamp *metav1.Time + + // DeletionGracePeriodSeconds records the graceful deletion value set when graceful deletion + // was requested. Represents the most recent grace period, and may only be shortened once set. + // +optional + DeletionGracePeriodSeconds *int64 + + // Labels are key value pairs that may be used to scope and select individual resources. + // Label keys are of the form: + // label-key ::= prefixed-name | name + // prefixed-name ::= prefix '/' name + // prefix ::= DNS_SUBDOMAIN + // name ::= DNS_LABEL + // The prefix is optional. If the prefix is not specified, the key is assumed to be private + // to the user. Other system components that wish to use labels must specify a prefix. The + // "kubernetes.io/" prefix is reserved for use by kubernetes components. + // +optional + Labels map[string]string + + // Annotations are unstructured key value data stored with a resource that may be set by + // external tooling. They are not queryable and should be preserved when modifying + // objects. Annotation keys have the same formatting restrictions as Label keys. See the + // comments on Labels for details. + // +optional + Annotations map[string]string + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + OwnerReferences []metav1.OwnerReference + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + Finalizers []string + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + ClusterName string +} + +const ( + // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients + NamespaceDefault string = "default" + // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces + NamespaceAll string = "" + // NamespaceNone is the argument for a context when there is no namespace. + NamespaceNone string = "" + // NamespaceSystem is the system namespace where we place system components. + NamespaceSystem string = "kube-system" + // NamespacePublic is the namespace where we place public info (ConfigMaps) + NamespacePublic string = "kube-public" + // TerminationMessagePathDefault means the default path to capture the application termination message running in a container + TerminationMessagePathDefault string = "/dev/termination-log" +) + +// Volume represents a named volume in a pod that may be accessed by any containers in the pod. +type Volume struct { + // Required: This must be a DNS_LABEL. Each volume in a pod must have + // a unique name. + Name string + // The VolumeSource represents the location and type of a volume to mount. + // This is optional for now. If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + // +optional + VolumeSource +} + +// VolumeSource represents the source location of a volume to mount. +// Only one of its members may be specified. +type VolumeSource struct { + // HostPath represents file or directory on the host machine that is + // directly exposed to the container. This is generally used for system + // agents or other privileged things that are allowed to see the host + // machine. Most containers will NOT need this. + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + HostPath *HostPathVolumeSource + // EmptyDir represents a temporary directory that shares a pod's lifetime. + // +optional + EmptyDir *EmptyDirVolumeSource + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource + // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource + // GitRepo represents a git repository at a particular revision. + // +optional + GitRepo *GitRepoVolumeSource + // Secret represents a secret that should populate this volume. + // +optional + Secret *SecretVolumeSource + // NFS represents an NFS mount on the host that shares a pod's lifetime + // +optional + NFS *NFSVolumeSource + // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + ISCSI *ISCSIVolumeSource + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime + // +optional + Glusterfs *GlusterfsVolumeSource + // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace + // +optional + PersistentVolumeClaim *PersistentVolumeClaimVolumeSource + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // +optional + RBD *RBDVolumeSource + + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource + + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. + // +optional + FlexVolume *FlexVolumeSource + + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // +optional + Cinder *CinderVolumeSource + + // CephFS represents a Cephfs mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource + + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource + + // DownwardAPI represents metadata about the pod that should populate this volume + // +optional + DownwardAPI *DownwardAPIVolumeSource + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource + // ConfigMap represents a configMap that should populate this volume + // +optional + ConfigMap *ConfigMapVolumeSource + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource + // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // Items for all in one resources secrets, configmaps, and downward API + Projected *ProjectedVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource +} + +// Similar to VolumeSource but meant for the administrator who creates PVs. +// Exactly one of its members must be set. +type PersistentVolumeSource struct { + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource + // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource + // HostPath represents a directory on the host. + // Provisioned by a developer or tester. + // This is useful for single-node development and testing only! + // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. + // +optional + HostPath *HostPathVolumeSource + // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod + // +optional + Glusterfs *GlusterfsVolumeSource + // NFS represents an NFS mount on the host that shares a pod's lifetime + // +optional + NFS *NFSVolumeSource + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // +optional + RBD *RBDVolumeSource + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource + // ISCSIVolumeSource represents an ISCSI resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + ISCSI *ISCSIVolumeSource + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. + // +optional + FlexVolume *FlexVolumeSource + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // +optional + Cinder *CinderVolumeSource + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource + // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource +} + +type PersistentVolumeClaimVolumeSource struct { + // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume + ClaimName string + // Optional: Defaults to false (read/write). ReadOnly here + // will force the ReadOnly setting in VolumeMounts + // +optional + ReadOnly bool +} + +const ( + // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. + // It's currently still used and will be held for backwards compatibility + BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" +) + +// +genclient=true +// +nonNamespaced=true + +type PersistentVolume struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + //Spec defines a persistent volume owned by the cluster + // +optional + Spec PersistentVolumeSpec + + // Status represents the current information about persistent volume. + // +optional + Status PersistentVolumeStatus +} + +type PersistentVolumeSpec struct { + // Resources represents the actual resources of the volume + Capacity ResourceList + // Source represents the location and type of a volume to mount. + PersistentVolumeSource + // AccessModes contains all ways the volume can be mounted + // +optional + AccessModes []PersistentVolumeAccessMode + // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // ClaimRef is expected to be non-nil when bound. + // claim.VolumeName is the authoritative bind between PV and PVC. + // When set to non-nil value, PVC.Spec.Selector of the referenced PVC is + // ignored, i.e. labels of this PV do not need to match PVC selector. + // +optional + ClaimRef *ObjectReference + // Optional: what happens to a persistent volume when released from its claim. + // +optional + PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + StorageClassName string +} + +// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes +type PersistentVolumeReclaimPolicy string + +const ( + // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. + // The volume plugin must support Recycling. + PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" + // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. + // The volume plugin must support Deletion. + PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" + // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. + // The default policy is Retain. + PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" +) + +type PersistentVolumeStatus struct { + // Phase indicates if a volume is available, bound to a claim, or released by a claim + // +optional + Phase PersistentVolumePhase + // A human-readable message indicating details about why the volume is in this state. + // +optional + Message string + // Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI + // +optional + Reason string +} + +type PersistentVolumeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PersistentVolume +} + +// +genclient=true + +// PersistentVolumeClaim is a user's request for and claim to a persistent volume +type PersistentVolumeClaim struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the volume requested by a pod author + // +optional + Spec PersistentVolumeClaimSpec + + // Status represents the current information about a claim + // +optional + Status PersistentVolumeClaimStatus +} + +type PersistentVolumeClaimList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PersistentVolumeClaim +} + +// PersistentVolumeClaimSpec describes the common attributes of storage devices +// and allows a Source for provider-specific attributes +type PersistentVolumeClaimSpec struct { + // Contains the types of access modes required + // +optional + AccessModes []PersistentVolumeAccessMode + // A label query over volumes to consider for binding. This selector is + // ignored when VolumeName is set + // +optional + Selector *metav1.LabelSelector + // Resources represents the minimum resources required + // +optional + Resources ResourceRequirements + // VolumeName is the binding reference to the PersistentVolume backing this + // claim. When set to non-empty value Selector is not evaluated + // +optional + VolumeName string + // Name of the StorageClass required by the claim. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1 + // +optional + StorageClassName *string +} + +type PersistentVolumeClaimStatus struct { + // Phase represents the current phase of PersistentVolumeClaim + // +optional + Phase PersistentVolumeClaimPhase + // AccessModes contains all ways the volume backing the PVC can be mounted + // +optional + AccessModes []PersistentVolumeAccessMode + // Represents the actual resources of the underlying volume + // +optional + Capacity ResourceList +} + +type PersistentVolumeAccessMode string + +const ( + // can be mounted read/write mode to exactly 1 host + ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" + // can be mounted in read-only mode to many hosts + ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" + // can be mounted in read/write mode to many hosts + ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" +) + +type PersistentVolumePhase string + +const ( + // used for PersistentVolumes that are not available + VolumePending PersistentVolumePhase = "Pending" + // used for PersistentVolumes that are not yet bound + // Available volumes are held by the binder and matched to PersistentVolumeClaims + VolumeAvailable PersistentVolumePhase = "Available" + // used for PersistentVolumes that are bound + VolumeBound PersistentVolumePhase = "Bound" + // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted + // released volumes must be recycled before becoming available again + // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource + VolumeReleased PersistentVolumePhase = "Released" + // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim + VolumeFailed PersistentVolumePhase = "Failed" +) + +type PersistentVolumeClaimPhase string + +const ( + // used for PersistentVolumeClaims that are not yet bound + ClaimPending PersistentVolumeClaimPhase = "Pending" + // used for PersistentVolumeClaims that are bound + ClaimBound PersistentVolumeClaimPhase = "Bound" + // used for PersistentVolumeClaims that lost their underlying + // PersistentVolume. The claim was bound to a PersistentVolume and this + // volume does not exist any longer and all data on it was lost. + ClaimLost PersistentVolumeClaimPhase = "Lost" +) + +// Represents a host path mapped into a pod. +// Host path volumes do not support ownership management or SELinux relabeling. +type HostPathVolumeSource struct { + Path string +} + +// Represents an empty directory for a pod. +// Empty directory volumes support ownership management and SELinux relabeling. +type EmptyDirVolumeSource struct { + // TODO: Longer term we want to represent the selection of underlying + // media more like a scheduling problem - user says what traits they + // need, we give them a backing store that satisfies that. For now + // this will cover the most common needs. + // Optional: what type of storage medium should back this directory. + // The default is "" which means to use the node's default medium. + // +optional + Medium StorageMedium +} + +// StorageMedium defines ways that storage can be allocated to a volume. +type StorageMedium string + +const ( + StorageMediumDefault StorageMedium = "" // use whatever the default is for the node + StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) +) + +// Protocol defines network protocols supported for things like container ports. +type Protocol string + +const ( + // ProtocolTCP is the TCP protocol. + ProtocolTCP Protocol = "TCP" + // ProtocolUDP is the UDP protocol. + ProtocolUDP Protocol = "UDP" +) + +// Represents a Persistent Disk resource in Google Compute Engine. +// +// A GCE PD must exist before mounting to a container. The disk must +// also be in the same GCE project and zone as the kubelet. A GCE PD +// can only be mounted as read/write once or read-only many times. GCE +// PDs support ownership management and SELinux relabeling. +type GCEPersistentDiskVolumeSource struct { + // Unique name of the PD resource. Used to identify the disk in GCE + PDName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Partition on the disk to mount. + // If omitted, kubelet will attempt to mount the device name. + // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. + // +optional + Partition int32 + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIVolumeSource struct { + // Required: iSCSI target portal + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + TargetPortal string + // Required: target iSCSI Qualified Name + // +optional + IQN string + // Required: iSCSI target lun number + // +optional + Lun int32 + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + ISCSIInterface string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Required: list of iSCSI target portal ips for high availability. + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + Portals []string +} + +// Represents a Fibre Channel volume. +// Fibre Channel volumes can only be mounted as read/write once. +// Fibre Channel volumes support ownership management and SELinux relabeling. +type FCVolumeSource struct { + // Required: FC target worldwide names (WWNs) + TargetWWNs []string + // Required: FC target lun number + Lun *int32 + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// FlexVolume represents a generic volume resource that is +// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. +type FlexVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: Extra driver options if any. + // +optional + Options map[string]string +} + +// Represents a Persistent Disk resource in AWS. +// +// An AWS EBS disk must exist before mounting to a container. The disk +// must also be in the same AWS zone as the kubelet. An AWS EBS disk +// can only be mounted as read/write once. AWS EBS volumes support +// ownership management and SELinux relabeling. +type AWSElasticBlockStoreVolumeSource struct { + // Unique id of the persistent disk resource. Used to identify the disk in AWS + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Partition on the disk to mount. + // If omitted, kubelet will attempt to mount the device name. + // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. + // +optional + Partition int32 + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a volume that is populated with the contents of a git repository. +// Git repo volumes do not support ownership management. +// Git repo volumes support SELinux relabeling. +type GitRepoVolumeSource struct { + // Repository URL + Repository string + // Commit hash, this is optional + // +optional + Revision string + // Clone target, this is optional + // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + // git repository. Otherwise, if specified, the volume will contain the git repository in + // the subdirectory with the given name. + // +optional + Directory string + // TODO: Consider credentials here. +} + +// Adapts a Secret into a volume. +// +// The contents of the target Secret's Data field will be presented in a volume +// as files using the keys in the Data field as the file names. +// Secret volumes support ownership management and SELinux relabeling. +type SecretVolumeSource struct { + // Name of the secret in the pod's namespace to use. + // +optional + SecretName string + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +type SecretProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Represents an NFS mount that lasts the lifetime of a pod. +// NFS volumes do not support ownership management or SELinux relabeling. +type NFSVolumeSource struct { + // Server is the hostname or IP address of the NFS server + Server string + + // Path is the exported NFS share + Path string + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the NFS export to be mounted with read-only permissions + // +optional + ReadOnly bool +} + +// Represents a Quobyte mount that lasts the lifetime of a pod. +// Quobyte volumes do not support ownership management or SELinux relabeling. +type QuobyteVolumeSource struct { + // Registry represents a single or multiple Quobyte Registry services + // specified as a string as host:port pair (multiple entries are separated with commas) + // which acts as the central registry for volumes + Registry string + + // Volume is a string that references an already created Quobyte volume by name. + Volume string + + // Defaults to false (read/write). ReadOnly here will force + // the Quobyte to be mounted with read-only permissions + // +optional + ReadOnly bool + + // User to map volume access to + // Defaults to the root user + // +optional + User string + + // Group to map volume access to + // Default is no group + // +optional + Group string +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsVolumeSource struct { + // Required: EndpointsName is the endpoint name that details Glusterfs topology + EndpointsName string + + // Required: Path is the Glusterfs volume path + Path string + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the Glusterfs to be mounted with read-only permissions + // +optional + ReadOnly bool +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDVolumeSource struct { + // Required: CephMonitors is a collection of Ceph monitors + CephMonitors []string + // Required: RBDImage is the rados image name + RBDImage string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: RadosPool is the rados pool name,default is rbd + // +optional + RBDPool string + // Optional: RBDUser is the rados user name, default is admin + // +optional + RadosUser string + // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring + // +optional + Keyring string + // Optional: SecretRef is name of the authentication secret for RBDUser, default is nil. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a cinder volume resource in Openstack. A Cinder volume +// must exist before mounting to a container. The volume must also be +// in the same region as the kubelet. Cinder volumes support ownership +// management and SELinux relabeling. +type CinderVolumeSource struct { + // Unique id of the volume used to identify the cinder volume + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + Monitors []string + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string + // Optional: User is the rados user name, default is admin + // +optional + User string + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // +optional + SecretFile string + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Flocker volume mounted by the Flocker agent. +// One and only one of datasetName and datasetUUID should be set. +// Flocker volumes do not support ownership management or SELinux relabeling. +type FlockerVolumeSource struct { + // Name of the dataset stored as metadata -> name on the dataset for Flocker + // should be considered as deprecated + // +optional + DatasetName string + // UUID of the dataset. This is unique identifier of a Flocker dataset + // +optional + DatasetUUID string +} + +// Represents a volume containing downward API info. +// Downward API volumes support ownership management and SELinux relabeling. +type DownwardAPIVolumeSource struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 +} + +// Represents a single file containing information from the downward API +type DownwardAPIVolumeFile struct { + // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + Path string + // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + // +optional + FieldRef *ObjectFieldSelector + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +type DownwardAPIProjection struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFileVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string + // Share Name + ShareName string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a vSphere volume resource. +type VsphereVirtualDiskVolumeSource struct { + // Path that identifies vSphere volume vmdk + VolumePath string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string +} + +// Represents a Photon Controller persistent disk resource. +type PhotonPersistentDiskVolumeSource struct { + // ID that identifies Photon Controller persistent disk + PdID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + FSType string +} + +// PortworxVolumeSource represents a Portworx volume resource. +type PortworxVolumeSource struct { + // VolumeID uniquely identifies a Portworx volume + VolumeID string + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +type AzureDataDiskCachingMode string + +const ( + AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" + AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" + AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" +) + +// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +type AzureDiskVolumeSource struct { + // The Name of the data disk in the blob storage + DiskName string + // The URI the the data disk in the blob storage + DataDiskURI string + // Host Caching mode: None, Read Only, Read Write. + // +optional + CachingMode *AzureDataDiskCachingMode + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType *string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly *bool +} + +// ScaleIOVolumeSource represents a persistent ScaleIO volume +type ScaleIOVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string + // The name of the storage system as configured in ScaleIO. + System string + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *LocalObjectReference + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool + // The name of the Protection Domain for the configured storage (defaults to "default"). + // +optional + ProtectionDomain string + // The Storage Pool associated with the protection domain (defaults to "default"). + // +optional + StoragePool string + // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). + // +optional + StorageMode string + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + VolumeName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Adapts a ConfigMap into a volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// volume as files using the keys in the Data field as the file names, unless +// the items element is populated with specific mappings of keys to paths. +// ConfigMap volumes support ownership management and SELinux relabeling. +type ConfigMapVolumeSource struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool +} + +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +type ConfigMapProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool +} + +// Represents a projected volume source +type ProjectedVolumeSource struct { + // list of volume projections + Sources []VolumeProjection + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 +} + +// Projection that may be projected along with other supported volume types +type VolumeProjection struct { + // all types below are the supported types for projection into the same volume + + // information about the secret data to project + Secret *SecretProjection + // information about the downwardAPI data to project + DownwardAPI *DownwardAPIProjection + // information about the configMap data to project + ConfigMap *ConfigMapProjection +} + +// Maps a string key to a path within a volume. +type KeyToPath struct { + // The key to project. + Key string + + // The relative path of the file to map the key to. + // May not be an absolute path. + // May not contain the path element '..'. + // May not start with the string '..'. + Path string + // Optional: mode bits to use on this file, should be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 +} + +// ContainerPort represents a network port in a single container +type ContainerPort struct { + // Optional: If specified, this must be an IANA_SVC_NAME Each named port + // in a pod must have a unique name. + // +optional + Name string + // Optional: If specified, this must be a valid port number, 0 < x < 65536. + // If HostNetwork is specified, this must match ContainerPort. + // +optional + HostPort int32 + // Required: This must be a valid port number, 0 < x < 65536. + ContainerPort int32 + // Required: Supports "TCP" and "UDP". + // +optional + Protocol Protocol + // Optional: What host IP to bind the external port to. + // +optional + HostIP string +} + +// VolumeMount describes a mounting of a Volume within a container. +type VolumeMount struct { + // Required: This must match the Name of a Volume [above]. + Name string + // Optional: Defaults to false (read-write). + // +optional + ReadOnly bool + // Required. Must not contain ':'. + MountPath string + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + // +optional + SubPath string +} + +// EnvVar represents an environment variable present in a Container. +type EnvVar struct { + // Required: This must be a C_IDENTIFIER. + Name string + // Optional: no more than one of the following may be specified. + // Optional: Defaults to ""; variable references $(VAR_NAME) are expanded + // using the previous defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. The $(VAR_NAME) + // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + // references will never be expanded, regardless of whether the variable + // exists or not. + // +optional + Value string + // Optional: Specifies a source the value of this var should come from. + // +optional + ValueFrom *EnvVarSource +} + +// EnvVarSource represents a source for the value of an EnvVar. +// Only one of its fields may be set. +type EnvVarSource struct { + // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, + // spec.nodeName, spec.serviceAccountName, status.podIP. + // +optional + FieldRef *ObjectFieldSelector + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector + // Selects a key of a ConfigMap. + // +optional + ConfigMapKeyRef *ConfigMapKeySelector + // Selects a key of a secret in the pod's namespace. + // +optional + SecretKeyRef *SecretKeySelector +} + +// ObjectFieldSelector selects an APIVersioned field of an object. +type ObjectFieldSelector struct { + // Required: Version of the schema the FieldPath is written in terms of. + // If no value is specified, it will be defaulted to the APIVersion of the + // enclosing object. + APIVersion string + // Required: Path of the field to select in the specified API version + FieldPath string +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +type ResourceFieldSelector struct { + // Container name: required for volumes, optional for env vars + // +optional + ContainerName string + // Required: resource to select + Resource string + // Specifies the output format of the exposed resources, defaults to "1" + // +optional + Divisor resource.Quantity +} + +// Selects a key from a ConfigMap. +type ConfigMapKeySelector struct { + // The ConfigMap to select from. + LocalObjectReference + // The key to select. + Key string + // Specify whether the ConfigMap or it's key must be defined + // +optional + Optional *bool +} + +// SecretKeySelector selects a key of a Secret. +type SecretKeySelector struct { + // The name of the secret in the pod's namespace to select from. + LocalObjectReference + // The key of the secret to select from. Must be a valid secret key. + Key string + // Specify whether the Secret or it's key must be defined + // +optional + Optional *bool +} + +// EnvFromSource represents the source of a set of ConfigMaps +type EnvFromSource struct { + // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // +optional + Prefix string + // The ConfigMap to select from. + //+optional + ConfigMapRef *ConfigMapEnvSource + // The Secret to select from. + //+optional + SecretRef *SecretEnvSource +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +type ConfigMapEnvSource struct { + // The ConfigMap to select from. + LocalObjectReference + // Specify whether the ConfigMap must be defined + // +optional + Optional *bool +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +type SecretEnvSource struct { + // The Secret to select from. + LocalObjectReference + // Specify whether the Secret must be defined + // +optional + Optional *bool +} + +// HTTPHeader describes a custom header to be used in HTTP probes +type HTTPHeader struct { + // The header field name + Name string + // The header field value + Value string +} + +// HTTPGetAction describes an action based on HTTP Get requests. +type HTTPGetAction struct { + // Optional: Path to access on the HTTP server. + // +optional + Path string + // Required: Name or number of the port to access on the container. + // +optional + Port intstr.IntOrString + // Optional: Host name to connect to, defaults to the pod IP. You + // probably want to set "Host" in httpHeaders instead. + // +optional + Host string + // Optional: Scheme to use for connecting to the host, defaults to HTTP. + // +optional + Scheme URIScheme + // Optional: Custom headers to set in the request. HTTP allows repeated headers. + // +optional + HTTPHeaders []HTTPHeader +} + +// URIScheme identifies the scheme used for connection to a host for Get actions +type URIScheme string + +const ( + // URISchemeHTTP means that the scheme used will be http:// + URISchemeHTTP URIScheme = "HTTP" + // URISchemeHTTPS means that the scheme used will be https:// + URISchemeHTTPS URIScheme = "HTTPS" +) + +// TCPSocketAction describes an action based on opening a socket +type TCPSocketAction struct { + // Required: Port to connect to. + // +optional + Port intstr.IntOrString +} + +// ExecAction describes a "run in container" action. +type ExecAction struct { + // Command is the command line to execute inside the container, the working directory for the + // command is root ('/') in the container's filesystem. The command is simply exec'd, it is + // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + // a shell, you need to explicitly call out to that shell. + // +optional + Command []string +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +type Probe struct { + // The action taken to determine the health of a container + Handler + // Length of time before health checking is activated. In seconds. + // +optional + InitialDelaySeconds int32 + // Length of time before health checking times out. In seconds. + // +optional + TimeoutSeconds int32 + // How often (in seconds) to perform the probe. + // +optional + PeriodSeconds int32 + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Must be 1 for liveness. + // +optional + SuccessThreshold int32 + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // +optional + FailureThreshold int32 +} + +// PullPolicy describes a policy for if/when to pull a container image +type PullPolicy string + +const ( + // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. + PullAlways PullPolicy = "Always" + // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present + PullNever PullPolicy = "Never" + // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. + PullIfNotPresent PullPolicy = "IfNotPresent" +) + +// TerminationMessagePolicy describes how termination messages are retrieved from a container. +type TerminationMessagePolicy string + +const ( + // TerminationMessageReadFile is the default behavior and will set the container status message to + // the contents of the container's terminationMessagePath when the container exits. + TerminationMessageReadFile TerminationMessagePolicy = "File" + // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs + // for the container status message when the container exits with an error and the + // terminationMessagePath has no contents. + TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" +) + +// Capability represent POSIX capabilities type +type Capability string + +// Capabilities represent POSIX capabilities that can be added or removed to a running container. +type Capabilities struct { + // Added capabilities + // +optional + Add []Capability + // Removed capabilities + // +optional + Drop []Capability +} + +// ResourceRequirements describes the compute resource requirements. +type ResourceRequirements struct { + // Limits describes the maximum amount of compute resources allowed. + // +optional + Limits ResourceList + // Requests describes the minimum amount of compute resources required. + // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value + // +optional + Requests ResourceList +} + +// Container represents a single container that is expected to be run on the host. +type Container struct { + // Required: This must be a DNS_LABEL. Each container in a pod must + // have a unique name. + Name string + // Required. + Image string + // Optional: The docker image's entrypoint is used if this is not provided; cannot be updated. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // +optional + Command []string + // Optional: The docker image's cmd is used if this is not provided; cannot be updated. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // +optional + Args []string + // Optional: Defaults to Docker's default. + // +optional + WorkingDir string + // +optional + Ports []ContainerPort + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource + // +optional + Env []EnvVar + // Compute resource requirements. + // +optional + Resources ResourceRequirements + // +optional + VolumeMounts []VolumeMount + // +optional + LivenessProbe *Probe + // +optional + ReadinessProbe *Probe + // +optional + Lifecycle *Lifecycle + // Required. + // +optional + TerminationMessagePath string + // +optional + TerminationMessagePolicy TerminationMessagePolicy + // Required: Policy for pulling images for this container + ImagePullPolicy PullPolicy + // Optional: SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // +optional + SecurityContext *SecurityContext + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + // +optional + Stdin bool + // +optional + StdinOnce bool + // +optional + TTY bool +} + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +type Handler struct { + // One and only one of the following should be specified. + // Exec specifies the action to take. + // +optional + Exec *ExecAction + // HTTPGet specifies the http request to perform. + // +optional + HTTPGet *HTTPGetAction + // TCPSocket specifies an action involving a TCP port. + // TODO: implement a realistic TCP lifecycle hook + // +optional + TCPSocket *TCPSocketAction +} + +// Lifecycle describes actions that the management system should take in response to container lifecycle +// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks +// until the action is complete, unless the container process fails, in which case the handler is aborted. +type Lifecycle struct { + // PostStart is called immediately after a container is created. If the handler fails, the container + // is terminated and restarted. + // +optional + PostStart *Handler + // PreStop is called immediately before a container is terminated. The reason for termination is + // passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. + // +optional + PreStop *Handler +} + +// The below types are used by kube_client and api_server. + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; +// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +type ContainerStateWaiting struct { + // A brief CamelCase string indicating details about why the container is in waiting state. + // +optional + Reason string + // A human-readable message indicating details about why the container is in waiting state. + // +optional + Message string +} + +type ContainerStateRunning struct { + // +optional + StartedAt metav1.Time +} + +type ContainerStateTerminated struct { + ExitCode int32 + // +optional + Signal int32 + // +optional + Reason string + // +optional + Message string + // +optional + StartedAt metav1.Time + // +optional + FinishedAt metav1.Time + // +optional + ContainerID string +} + +// ContainerState holds a possible state of container. +// Only one of its members may be specified. +// If none of them is specified, the default one is ContainerStateWaiting. +type ContainerState struct { + // +optional + Waiting *ContainerStateWaiting + // +optional + Running *ContainerStateRunning + // +optional + Terminated *ContainerStateTerminated +} + +type ContainerStatus struct { + // Each container in a pod must have a unique name. + Name string + // +optional + State ContainerState + // +optional + LastTerminationState ContainerState + // Ready specifies whether the container has passed its readiness check. + Ready bool + // Note that this is calculated from dead containers. But those containers are subject to + // garbage collection. This value will get capped at 5 by GC. + RestartCount int32 + Image string + ImageID string + // +optional + ContainerID string +} + +// PodPhase is a label for the condition of a pod at the current time. +type PodPhase string + +// These are the valid statuses of pods. +const ( + // PodPending means the pod has been accepted by the system, but one or more of the containers + // has not been started. This includes time before being bound to a node, as well as time spent + // pulling images onto the host. + PodPending PodPhase = "Pending" + // PodRunning means the pod has been bound to a node and all of the containers have been started. + // At least one container is still running or is in the process of being restarted. + PodRunning PodPhase = "Running" + // PodSucceeded means that all containers in the pod have voluntarily terminated + // with a container exit code of 0, and the system is not going to restart any of these containers. + PodSucceeded PodPhase = "Succeeded" + // PodFailed means that all containers in the pod have terminated, and at least one container has + // terminated in a failure (exited with a non-zero exit code or was stopped by the system). + PodFailed PodPhase = "Failed" + // PodUnknown means that for some reason the state of the pod could not be obtained, typically due + // to an error in communicating with the host of the pod. + PodUnknown PodPhase = "Unknown" +) + +type PodConditionType string + +// These are valid conditions of pod. +const ( + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" + // PodReady means the pod is able to service requests and should be added to the + // load balancing pools of all matching services. + PodReady PodConditionType = "Ready" + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" + // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler + // can't schedule the pod right now, for example due to insufficient resources in the cluster. + PodReasonUnschedulable = "Unschedulable" +) + +type PodCondition struct { + Type PodConditionType + Status ConditionStatus + // +optional + LastProbeTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +// RestartPolicy describes how the container should be restarted. +// Only one of the following restart policies may be specified. +// If none of the following policies is specified, the default one +// is RestartPolicyAlways. +type RestartPolicy string + +const ( + RestartPolicyAlways RestartPolicy = "Always" + RestartPolicyOnFailure RestartPolicy = "OnFailure" + RestartPolicyNever RestartPolicy = "Never" +) + +// PodList is a list of Pods. +type PodList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Pod +} + +// DNSPolicy defines how a pod's DNS will be configured. +type DNSPolicy string + +const ( + // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS + // first, if it is available, then fall back on the default + // (as determined by kubelet) DNS settings. + DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" + + // DNSClusterFirst indicates that the pod should use cluster DNS + // first unless hostNetwork is true, if it is available, then + // fall back on the default (as determined by kubelet) DNS settings. + DNSClusterFirst DNSPolicy = "ClusterFirst" + + // DNSDefault indicates that the pod should use the default (as + // determined by kubelet) DNS settings. + DNSDefault DNSPolicy = "Default" +) + +// A node selector represents the union of the results of one or more label queries +// over a set of nodes; that is, it represents the OR of the selectors represented +// by the node selector terms. +type NodeSelector struct { + //Required. A list of node selector terms. The terms are ORed. + NodeSelectorTerms []NodeSelectorTerm +} + +// A null or empty node selector term matches no objects. +type NodeSelectorTerm struct { + //Required. A list of node selector requirements. The requirements are ANDed. + MatchExpressions []NodeSelectorRequirement +} + +// A node selector requirement is a selector that contains values, a key, and an operator +// that relates the key and values. +type NodeSelectorRequirement struct { + // The label key that the selector applies to. + Key string + // Represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + Operator NodeSelectorOperator + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. If the operator is Gt or Lt, the values + // array must have a single element, which will be interpreted as an integer. + // This array is replaced during a strategic merge patch. + // +optional + Values []string +} + +// A node selector operator is the set of operators that can be used in +// a node selector requirement. +type NodeSelectorOperator string + +const ( + NodeSelectorOpIn NodeSelectorOperator = "In" + NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" + NodeSelectorOpExists NodeSelectorOperator = "Exists" + NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" + NodeSelectorOpGt NodeSelectorOperator = "Gt" + NodeSelectorOpLt NodeSelectorOperator = "Lt" +) + +// Affinity is a group of affinity scheduling rules. +type Affinity struct { + // Describes node affinity scheduling rules for the pod. + // +optional + NodeAffinity *NodeAffinity + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAffinity *PodAffinity + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAntiAffinity *PodAntiAffinity +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +type PodAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +type PodAntiAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +type WeightedPodAffinityTerm struct { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + Weight int32 + // Required. A pod affinity term, associated with the corresponding weight. + PodAffinityTerm PodAffinityTerm +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key matches that of any node on which +// a pod of the set of pods is running. +type PodAffinityTerm struct { + // A label query over a set of resources, in this case pods. + // +optional + LabelSelector *metav1.LabelSelector + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // null or empty list means "this pod's namespace" + Namespaces []string + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" + // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); + // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. + // +optional + TopologyKey string +} + +// Node affinity is a group of node affinity scheduling rules. +type NodeAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // will try to eventually evict the pod from its node. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // may or may not try to eventually evict the pod from its node. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node matches the corresponding matchExpressions; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm +} + +// An empty preferred scheduling term matches all objects with implicit weight 0 +// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +type PreferredSchedulingTerm struct { + // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + Weight int32 + // A node selector term, associated with the corresponding weight. + Preference NodeSelectorTerm +} + +// The node this Taint is attached to has the effect "effect" on +// any pod that that does not tolerate the Taint. +type Taint struct { + // Required. The taint key to be applied to a node. + Key string + // Required. The taint value corresponding to the taint key. + // +optional + Value string + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + Effect TaintEffect + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + TimeAdded metav1.Time +} + +type TaintEffect string + +const ( + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // but allow all pods submitted to Kubelet without going through the scheduler + // to start, and allow all already-running pods to continue running. + // Enforced by the scheduler. + TaintEffectNoSchedule TaintEffect = "NoSchedule" + // Like TaintEffectNoSchedule, but the scheduler tries not to schedule + // new pods onto the node, rather than prohibiting new pods from scheduling + // onto the node entirely. Enforced by the scheduler. + TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to + // Kubelet without going through the scheduler to start. + // Enforced by Kubelet and the scheduler. + // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" + // Evict any already-running pods that do not tolerate the taint. + // Currently enforced by NodeController. + TaintEffectNoExecute TaintEffect = "NoExecute" +) + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple using the matching operator . +type Toleration struct { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // +optional + Key string + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + // +optional + Operator TolerationOperator + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + // +optional + Value string + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + // +optional + Effect TaintEffect + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // +optional + TolerationSeconds *int64 +} + +// A toleration operator is the set of operators that can be used in a toleration. +type TolerationOperator string + +const ( + TolerationOpExists TolerationOperator = "Exists" + TolerationOpEqual TolerationOperator = "Equal" +) + +// PodSpec is a description of a pod +type PodSpec struct { + Volumes []Volume + // List of initialization containers belonging to the pod. + InitContainers []Container + // List of containers belonging to the pod. + Containers []Container + // +optional + RestartPolicy RestartPolicy + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // +optional + TerminationGracePeriodSeconds *int64 + // Optional duration in seconds relative to the StartTime that the pod may be active on a node + // before the system actively tries to terminate the pod; value must be positive integer + // +optional + ActiveDeadlineSeconds *int64 + // Required: Set DNS policy. + // +optional + DNSPolicy DNSPolicy + // NodeSelector is a selector which must be true for the pod to fit on a node + // +optional + NodeSelector map[string]string + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod + // The pod will be allowed to use secrets referenced by the ServiceAccount + ServiceAccountName string + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + AutomountServiceAccountToken *bool + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + NodeName string + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *PodSecurityContext + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // +optional + ImagePullSecrets []LocalObjectReference + // Specifies the hostname of the Pod. + // If not specified, the pod's hostname will be set to a system-defined value. + // +optional + Hostname string + // If specified, the fully qualified Pod hostname will be "...svc.". + // If not specified, the pod will not have a domainname at all. + // +optional + Subdomain string + // If specified, the pod's scheduling constraints + // +optional + Affinity *Affinity + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string + // If specified, the pod's tolerations. + // +optional + Tolerations []Toleration +} + +// Sysctl defines a kernel parameter to be set +type Sysctl struct { + // Name of a property to set + Name string + // Value of a property to set + Value string +} + +// PodSecurityContext holds pod-level security attributes and common container settings. +// Some fields are also present in container.securityContext. Field values of +// container.securityContext take precedence over field values of PodSecurityContext. +type PodSecurityContext struct { + // Use the host's network namespace. If this option is set, the ports that will be + // used must be specified. + // Optional: Default to false + // +k8s:conversion-gen=false + // +optional + HostNetwork bool + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostPID bool + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostIPC bool + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + // +optional + SELinuxOptions *SELinuxOptions + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsUser *int64 + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID. If unspecified, no groups will be added to + // any container. + // +optional + SupplementalGroups []int64 + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // +optional + FSGroup *int64 +} + +// PodQOSClass defines the supported qos classes of Pods. +type PodQOSClass string + +const ( + // PodQOSGuaranteed is the Guaranteed qos class. + PodQOSGuaranteed PodQOSClass = "Guaranteed" + // PodQOSBurstable is the Burstable qos class. + PodQOSBurstable PodQOSClass = "Burstable" + // PodQOSBestEffort is the BestEffort qos class. + PodQOSBestEffort PodQOSClass = "BestEffort" +) + +// PodStatus represents information about the status of a pod. Status may trail the actual +// state of a system. +type PodStatus struct { + // +optional + Phase PodPhase + // +optional + Conditions []PodCondition + // A human readable message indicating details about why the pod is in this state. + // +optional + Message string + // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk' + // +optional + Reason string + + // +optional + HostIP string + // +optional + PodIP string + + // Date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the pod. + // +optional + StartTime *metav1.Time + // +optional + QOSClass PodQOSClass + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses + InitContainerStatuses []ContainerStatus + // The list has one entry per container in the manifest. Each entry is + // currently the output of `docker inspect`. This output format is *not* + // final and should not be relied upon. + // TODO: Make real decisions about what our info should look like. Re-enable fuzz test + // when we have done this. + // +optional + ContainerStatuses []ContainerStatus +} + +// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded +type PodStatusResult struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + // Status represents the current information about a pod. This data may not be up + // to date. + // +optional + Status PodStatus +} + +// +genclient=true + +// Pod is a collection of containers, used as either input (create, update) or as output (list, get). +type Pod struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a pod. + // +optional + Spec PodSpec + + // Status represents the current information about a pod. This data may not be up + // to date. + // +optional + Status PodStatus +} + +// PodTemplateSpec describes the data a pod should have when created from a template +type PodTemplateSpec struct { + // Metadata of the pods created from this template. + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a pod. + // +optional + Spec PodSpec +} + +// +genclient=true + +// PodTemplate describes a template for creating copies of a predefined pod. +type PodTemplate struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Template defines the pods that will be created from this pod template + // +optional + Template PodTemplateSpec +} + +// PodTemplateList is a list of PodTemplates. +type PodTemplateList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []PodTemplate +} + +// ReplicationControllerSpec is the specification of a replication controller. +// As the internal representation of a replication controller, it may have either +// a TemplateRef or a Template set. +type ReplicationControllerSpec struct { + // Replicas is the number of desired replicas. + Replicas int32 + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // Selector is a label query over pods that should match the Replicas count. + Selector map[string]string + + // TemplateRef is a reference to an object that describes the pod that will be created if + // insufficient replicas are detected. This reference is ignored if a Template is set. + // Must be set before converting to a versioned API object + // +optional + //TemplateRef *ObjectReference + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Internally, this takes precedence over a + // TemplateRef. + // +optional + Template *PodTemplateSpec +} + +// ReplicationControllerStatus represents the current status of a replication +// controller. +type ReplicationControllerStatus struct { + // Replicas is the number of actual replicas. + Replicas int32 + + // The number of pods that have labels matching the labels of the pod template of the replication controller. + // +optional + FullyLabeledReplicas int32 + + // The number of ready replicas for this replication controller. + // +optional + ReadyReplicas int32 + + // The number of available replicas (ready for at least minReadySeconds) for this replication controller. + // +optional + AvailableReplicas int32 + + // ObservedGeneration is the most recent generation observed by the controller. + // +optional + ObservedGeneration int64 + + // Represents the latest available observations of a replication controller's current state. + // +optional + Conditions []ReplicationControllerCondition +} + +type ReplicationControllerConditionType string + +// These are valid conditions of a replication controller. +const ( + // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods + // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, + // etc. or deleted due to kubelet being down or finalizers are failing. + ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" +) + +// ReplicationControllerCondition describes the state of a replication controller at a certain point. +type ReplicationControllerCondition struct { + // Type of replication controller condition. + Type ReplicationControllerConditionType + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + // +optional + Reason string + // A human readable message indicating details about the transition. + // +optional + Message string +} + +// +genclient=true + +// ReplicationController represents the configuration of a replication controller. +type ReplicationController struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired behavior of this replication controller. + // +optional + Spec ReplicationControllerSpec + + // Status is the current status of this replication controller. This data may be + // out of date by some window of time. + // +optional + Status ReplicationControllerStatus +} + +// ReplicationControllerList is a collection of replication controllers. +type ReplicationControllerList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ReplicationController +} + +const ( + // ClusterIPNone - do not assign a cluster IP + // no proxying required and no environment variables should be created for pods + ClusterIPNone = "None" +) + +// ServiceList holds a list of services. +type ServiceList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Service +} + +// Session Affinity Type string +type ServiceAffinity string + +const ( + // ServiceAffinityClientIP is the Client IP based. + ServiceAffinityClientIP ServiceAffinity = "ClientIP" + + // ServiceAffinityNone - no session affinity. + ServiceAffinityNone ServiceAffinity = "None" +) + +// Service Type string describes ingress methods for a service +type ServiceType string + +const ( + // ServiceTypeClusterIP means a service will only be accessible inside the + // cluster, via the ClusterIP. + ServiceTypeClusterIP ServiceType = "ClusterIP" + + // ServiceTypeNodePort means a service will be exposed on one port of + // every node, in addition to 'ClusterIP' type. + ServiceTypeNodePort ServiceType = "NodePort" + + // ServiceTypeLoadBalancer means a service will be exposed via an + // external load balancer (if the cloud provider supports it), in addition + // to 'NodePort' type. + ServiceTypeLoadBalancer ServiceType = "LoadBalancer" + + // ServiceTypeExternalName means a service consists of only a reference to + // an external name that kubedns or equivalent will return as a CNAME + // record, with no exposing or proxying of any pods involved. + ServiceTypeExternalName ServiceType = "ExternalName" +) + +// ServiceStatus represents the current status of a service +type ServiceStatus struct { + // LoadBalancer contains the current status of the load-balancer, + // if one is present. + // +optional + LoadBalancer LoadBalancerStatus +} + +// LoadBalancerStatus represents the status of a load-balancer +type LoadBalancerStatus struct { + // Ingress is a list containing ingress points for the load-balancer; + // traffic intended for the service should be sent to these ingress points. + // +optional + Ingress []LoadBalancerIngress +} + +// LoadBalancerIngress represents the status of a load-balancer ingress point: +// traffic intended for the service should be sent to an ingress point. +type LoadBalancerIngress struct { + // IP is set for load-balancer ingress points that are IP based + // (typically GCE or OpenStack load-balancers) + // +optional + IP string + + // Hostname is set for load-balancer ingress points that are DNS based + // (typically AWS load-balancers) + // +optional + Hostname string +} + +// ServiceSpec describes the attributes that a user creates on a service +type ServiceSpec struct { + // Type determines how the Service is exposed. Defaults to ClusterIP. Valid + // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + // "ExternalName" maps to the specified externalName. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing to + // endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object. If clusterIP is + // "None", no virtual IP is allocated and the endpoints are published as a + // set of endpoints rather than a stable IP. + // "NodePort" builds on ClusterIP and allocates a port on every node which + // routes to the clusterIP. + // "LoadBalancer" builds on NodePort and creates an + // external load-balancer (if supported in the current cloud) which routes + // to the clusterIP. + // More info: http://kubernetes.io/docs/user-guide/services#overview + // +optional + Type ServiceType + + // Required: The list of ports that are exposed by this service. + Ports []ServicePort + + // Route service traffic to pods with label keys and values matching this + // selector. If empty or not present, the service is assumed to have an + // external process managing its endpoints, which Kubernetes will not + // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + // Ignored if type is ExternalName. + // More info: http://kubernetes.io/docs/user-guide/services#overview + Selector map[string]string + + // ClusterIP is the IP address of the service and is usually assigned + // randomly by the master. If an address is specified manually and is not in + // use by others, it will be allocated to the service; otherwise, creation + // of the service will fail. This field can not be changed through updates. + // Valid values are "None", empty string (""), or a valid IP address. "None" + // can be specified for headless services when proxying is not required. + // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if + // type is ExternalName. + // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + // +optional + ClusterIP string + + // ExternalName is the external reference that kubedns or equivalent will + // return as a CNAME record for this service. No proxying will be involved. + // Must be a valid DNS name and requires Type to be ExternalName. + ExternalName string + + // ExternalIPs are used by external load balancers, or can be set by + // users to handle external traffic that arrives at a node. + // +optional + ExternalIPs []string + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + // +optional + LoadBalancerIP string + + // Optional: Supports "ClientIP" and "None". Used to maintain session affinity. + // +optional + SessionAffinity ServiceAffinity + + // Optional: If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // +optional + LoadBalancerSourceRanges []string +} + +type ServicePort struct { + // Optional if only one ServicePort is defined on this service: The + // name of this port within the service. This must be a DNS_LABEL. + // All ports within a ServiceSpec must have unique names. This maps to + // the 'Name' field in EndpointPort objects. + Name string + + // The IP protocol for this port. Supports "TCP" and "UDP". + Protocol Protocol + + // The port that will be exposed on the service. + Port int32 + + // Optional: The target port on pods selected by this service. If this + // is a string, it will be looked up as a named port in the target + // Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + TargetPort intstr.IntOrString + + // The port on each node on which this service is exposed. + // Default is to auto-allocate a port if the ServiceType of this Service requires one. + NodePort int32 +} + +// +genclient=true + +// Service is a named abstraction of software service (for example, mysql) consisting of local port +// (for example 3306) that the proxy listens on, and the selector that determines which pods +// will answer requests sent through the proxy. +type Service struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a service. + // +optional + Spec ServiceSpec + + // Status represents the current status of a service. + // +optional + Status ServiceStatus +} + +// +genclient=true + +// ServiceAccount binds together: +// * a name, understood by users, and perhaps by peripheral systems, for an identity +// * a principal that can be authenticated and authorized +// * a set of secrets +type ServiceAccount struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount + Secrets []ObjectReference + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // +optional + ImagePullSecrets []LocalObjectReference + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + AutomountServiceAccountToken *bool +} + +// ServiceAccountList is a list of ServiceAccount objects +type ServiceAccountList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ServiceAccount +} + +// +genclient=true + +// Endpoints is a collection of endpoints that implement the actual service. Example: +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] +type Endpoints struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // The set of all endpoints is the union of all subsets. + Subsets []EndpointSubset +} + +// EndpointSubset is a group of addresses with a common set of ports. The +// expanded set of endpoints is the Cartesian product of Addresses x Ports. +// For example, given: +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// The resulting set of endpoints can be viewed as: +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +type EndpointSubset struct { + Addresses []EndpointAddress + NotReadyAddresses []EndpointAddress + Ports []EndpointPort +} + +// EndpointAddress is a tuple that describes single IP address. +type EndpointAddress struct { + // The IP of this endpoint. + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. + // TODO: This should allow hostname or IP, see #4447. + IP string + // Optional: Hostname of this endpoint + // Meant to be used by DNS servers etc. + // +optional + Hostname string + // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + // +optional + NodeName *string + // Optional: The kubernetes object related to the entry point. + TargetRef *ObjectReference +} + +// EndpointPort is a tuple that describes a single port. +type EndpointPort struct { + // The name of this port (corresponds to ServicePort.Name). Optional + // if only one port is defined. Must be a DNS_LABEL. + Name string + + // The port number. + Port int32 + + // The IP protocol for this port. + Protocol Protocol +} + +// EndpointsList is a list of endpoints. +type EndpointsList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Endpoints +} + +// NodeSpec describes the attributes that a node is created with. +type NodeSpec struct { + // PodCIDR represents the pod IP range assigned to the node + // Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs. + // +optional + PodCIDR string + + // External ID of the node assigned by some machine database (e.g. a cloud provider) + // +optional + ExternalID string + + // ID of the node assigned by the cloud provider + // Note: format is "://" + // +optional + ProviderID string + + // Unschedulable controls node schedulability of new pods. By default node is schedulable. + // +optional + Unschedulable bool + + // If specified, the node's taints. + // +optional + Taints []Taint +} + +// DaemonEndpoint contains information about a single Daemon endpoint. +type DaemonEndpoint struct { + /* + The port tag was not properly in quotes in earlier releases, so it must be + uppercased for backwards compat (since it was falling back to var name of + 'Port'). + */ + + // Port number of the given endpoint. + Port int32 +} + +// NodeDaemonEndpoints lists ports opened by daemons running on the Node. +type NodeDaemonEndpoints struct { + // Endpoint on which Kubelet is listening. + // +optional + KubeletEndpoint DaemonEndpoint +} + +// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. +type NodeSystemInfo struct { + // MachineID reported by the node. For unique machine identification + // in the cluster this field is prefered. Learn more from man(5) + // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html + MachineID string + // SystemUUID reported by the node. For unique machine identification + // MachineID is prefered. This field is specific to Red Hat hosts + // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html + SystemUUID string + // Boot ID reported by the node. + BootID string + // Kernel Version reported by the node. + KernelVersion string + // OS Image reported by the node. + OSImage string + // ContainerRuntime Version reported by the node. + ContainerRuntimeVersion string + // Kubelet Version reported by the node. + KubeletVersion string + // KubeProxy Version reported by the node. + KubeProxyVersion string + // The Operating System reported by the node + OperatingSystem string + // The Architecture reported by the node + Architecture string +} + +// NodeStatus is information about the current status of a node. +type NodeStatus struct { + // Capacity represents the total resources of a node. + // +optional + Capacity ResourceList + // Allocatable represents the resources of a node that are available for scheduling. + // +optional + Allocatable ResourceList + // NodePhase is the current lifecycle phase of the node. + // +optional + Phase NodePhase + // Conditions is an array of current node conditions. + // +optional + Conditions []NodeCondition + // Queried from cloud provider, if available. + // +optional + Addresses []NodeAddress + // Endpoints of daemons running on the Node. + // +optional + DaemonEndpoints NodeDaemonEndpoints + // Set of ids/uuids to uniquely identify the node. + // +optional + NodeInfo NodeSystemInfo + // List of container images on this node + // +optional + Images []ContainerImage + // List of attachable volumes in use (mounted) by the node. + // +optional + VolumesInUse []UniqueVolumeName + // List of volumes that are attached to the node. + // +optional + VolumesAttached []AttachedVolume +} + +type UniqueVolumeName string + +// AttachedVolume describes a volume attached to a node +type AttachedVolume struct { + // Name of the attached volume + Name UniqueVolumeName + + // DevicePath represents the device path where the volume should be available + DevicePath string +} + +// AvoidPods describes pods that should avoid this node. This is the value for a +// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and +// will eventually become a field of NodeStatus. +type AvoidPods struct { + // Bounded-sized list of signatures of pods that should avoid this node, sorted + // in timestamp order from oldest to newest. Size of the slice is unspecified. + // +optional + PreferAvoidPods []PreferAvoidPodsEntry +} + +// Describes a class of pods that should avoid this node. +type PreferAvoidPodsEntry struct { + // The class of pods. + PodSignature PodSignature + // Time at which this entry was added to the list. + // +optional + EvictionTime metav1.Time + // (brief) reason why this entry was added to the list. + // +optional + Reason string + // Human readable message indicating why this entry was added to the list. + // +optional + Message string +} + +// Describes the class of pods that should avoid this node. +// Exactly one field should be set. +type PodSignature struct { + // Reference to controller whose pods should avoid this node. + // +optional + PodController *metav1.OwnerReference +} + +// Describe a container image +type ContainerImage struct { + // Names by which this image is known. + Names []string + // The size of the image in bytes. + // +optional + SizeBytes int64 +} + +type NodePhase string + +// These are the valid phases of node. +const ( + // NodePending means the node has been created/added by the system, but not configured. + NodePending NodePhase = "Pending" + // NodeRunning means the node has been configured and has Kubernetes components running. + NodeRunning NodePhase = "Running" + // NodeTerminated means the node has been removed from the cluster. + NodeTerminated NodePhase = "Terminated" +) + +type NodeConditionType string + +// These are valid conditions of node. Currently, we don't have enough information to decide +// node condition. In the future, we will add more. The proposed set of conditions are: +// NodeReady, NodeReachable +const ( + // NodeReady means kubelet is healthy and ready to accept pods. + NodeReady NodeConditionType = "Ready" + // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk + // space on the node. + NodeOutOfDisk NodeConditionType = "OutOfDisk" + // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. + NodeMemoryPressure NodeConditionType = "MemoryPressure" + // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. + NodeDiskPressure NodeConditionType = "DiskPressure" + // NodeNetworkUnavailable means that network for the node is not correctly configured. + NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" +) + +type NodeCondition struct { + Type NodeConditionType + Status ConditionStatus + // +optional + LastHeartbeatTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +type NodeAddressType string + +// These are valid address types of node. NodeLegacyHostIP is used to transit +// from out-dated HostIP field to NodeAddress. +const ( + // Deprecated: NodeLegacyHostIP will be removed in 1.7. + NodeLegacyHostIP NodeAddressType = "LegacyHostIP" + NodeHostName NodeAddressType = "Hostname" + NodeExternalIP NodeAddressType = "ExternalIP" + NodeInternalIP NodeAddressType = "InternalIP" + NodeExternalDNS NodeAddressType = "ExternalDNS" + NodeInternalDNS NodeAddressType = "InternalDNS" +) + +type NodeAddress struct { + Type NodeAddressType + Address string +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +type NodeResources struct { + // Capacity represents the available resources of a node + // +optional + Capacity ResourceList +} + +// ResourceName is the name identifying various resources in a ResourceList. +type ResourceName string + +// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, +// with the -, _, and . characters allowed anywhere, except the first or last character. +// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than +// camel case, separating compound words. +// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. +const ( + // CPU, in cores. (500m = .5 cores) + ResourceCPU ResourceName = "cpu" + // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceMemory ResourceName = "memory" + // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) + ResourceStorage ResourceName = "storage" + // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned. + ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu" + // Number of Pods that may be running on this Node: see ResourcePods +) + +const ( + // Namespace prefix for opaque counted resources (alpha). + ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-" +) + +// ResourceList is a set of (resource name, quantity) pairs. +type ResourceList map[ResourceName]resource.Quantity + +// +genclient=true +// +nonNamespaced=true + +// Node is a worker node in Kubernetes +// The name of the node according to etcd is in ObjectMeta.Name. +type Node struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a node. + // +optional + Spec NodeSpec + + // Status describes the current status of a Node + // +optional + Status NodeStatus +} + +// NodeList is a list of nodes. +type NodeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Node +} + +// NamespaceSpec describes the attributes on a Namespace +type NamespaceSpec struct { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + Finalizers []FinalizerName +} + +// FinalizerName is the name identifying a finalizer during namespace lifecycle. +type FinalizerName string + +// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or +// in metav1. +const ( + FinalizerKubernetes FinalizerName = "kubernetes" +) + +// NamespaceStatus is information about the current status of a Namespace. +type NamespaceStatus struct { + // Phase is the current lifecycle phase of the namespace. + // +optional + Phase NamespacePhase +} + +type NamespacePhase string + +// These are the valid phases of a namespace. +const ( + // NamespaceActive means the namespace is available for use in the system + NamespaceActive NamespacePhase = "Active" + // NamespaceTerminating means the namespace is undergoing graceful termination + NamespaceTerminating NamespacePhase = "Terminating" +) + +// +genclient=true +// +nonNamespaced=true + +// A namespace provides a scope for Names. +// Use of multiple namespaces is optional +type Namespace struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of the Namespace. + // +optional + Spec NamespaceSpec + + // Status describes the current status of a Namespace + // +optional + Status NamespaceStatus +} + +// NamespaceList is a list of Namespaces. +type NamespaceList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Namespace +} + +// Binding ties one object to another - for example, a pod is bound to a node by a scheduler. +type Binding struct { + metav1.TypeMeta + // ObjectMeta describes the object that is being bound. + // +optional + metav1.ObjectMeta + + // Target is the object to bind to. + Target ObjectReference +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID +} + +// DeletionPropagation decides whether and how garbage collection will be performed. +type DeletionPropagation string + +const ( + // Orphans the dependents. + DeletePropagationOrphan DeletionPropagation = "Orphan" + // Deletes the object from the key-value store, the garbage collector will delete the dependents in the background. + DeletePropagationBackground DeletionPropagation = "Background" + // The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store. + // API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp. + // This policy is cascading, i.e., the dependents will be deleted with Foreground. + DeletePropagationForeground DeletionPropagation = "Foreground" +) + +// DeleteOptions may be provided when deleting an API object +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +type DeleteOptions struct { + metav1.TypeMeta + + // Optional duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // +optional + GracePeriodSeconds *int64 + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + Preconditions *Preconditions + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + OrphanDependents *bool + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // +optional + PropagationPolicy *DeletionPropagation +} + +// ListOptions is the query options to a standard REST list call, and has future support for +// watch calls. +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +type ListOptions struct { + metav1.TypeMeta + + // A selector based on labels + LabelSelector labels.Selector + // A selector based on fields + FieldSelector fields.Selector + // If true, watch for changes to this list + Watch bool + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + ResourceVersion string + // Timeout for the list/watch call. + TimeoutSeconds *int64 +} + +// PodLogOptions is the query options for a Pod's logs REST call +type PodLogOptions struct { + metav1.TypeMeta + + // Container for which to return logs + Container string + // If true, follow the logs for the pod + Follow bool + // If true, return previous terminated container logs + Previous bool + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceSeconds *int64 + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceTime *metav1.Time + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. + Timestamps bool + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + TailLines *int64 + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + LimitBytes *int64 +} + +// PodAttachOptions is the query options to a Pod's remote attach call +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +type PodAttachOptions struct { + metav1.TypeMeta + + // Stdin if true indicates that stdin is to be redirected for the attach call + // +optional + Stdin bool + + // Stdout if true indicates that stdout is to be redirected for the attach call + // +optional + Stdout bool + + // Stderr if true indicates that stderr is to be redirected for the attach call + // +optional + Stderr bool + + // TTY if true indicates that a tty will be allocated for the attach call + // +optional + TTY bool + + // Container to attach to. + // +optional + Container string +} + +// PodExecOptions is the query options to a Pod's remote exec call +type PodExecOptions struct { + metav1.TypeMeta + + // Stdin if true indicates that stdin is to be redirected for the exec call + Stdin bool + + // Stdout if true indicates that stdout is to be redirected for the exec call + Stdout bool + + // Stderr if true indicates that stderr is to be redirected for the exec call + Stderr bool + + // TTY if true indicates that a tty will be allocated for the exec call + TTY bool + + // Container in which to execute the command. + Container string + + // Command is the remote command to execute; argv array; not executed within a shell. + Command []string +} + +// PodPortForwardOptions is the query options to a Pod's port forward call +type PodPortForwardOptions struct { + metav1.TypeMeta + + // The list of ports to forward + // +optional + Ports []int32 +} + +// PodProxyOptions is the query options to a Pod's proxy call +type PodProxyOptions struct { + metav1.TypeMeta + + // Path is the URL path to use for the current proxy request + Path string +} + +// NodeProxyOptions is the query options to a Node's proxy call +type NodeProxyOptions struct { + metav1.TypeMeta + + // Path is the URL path to use for the current proxy request + Path string +} + +// ServiceProxyOptions is the query options to a Service's proxy call. +type ServiceProxyOptions struct { + metav1.TypeMeta + + // Path is the part of URLs that include service endpoints, suffixes, + // and parameters to use for the current proxy request to service. + // For example, the whole request URL is + // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. + // Path is _search?q=user:kimchy. + Path string +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +type ObjectReference struct { + // +optional + Kind string + // +optional + Namespace string + // +optional + Name string + // +optional + UID types.UID + // +optional + APIVersion string + // +optional + ResourceVersion string + + // Optional. If referring to a piece of an object instead of an entire object, this string + // should contain information to identify the sub-object. For example, if the object + // reference is to a container within a pod, this would take on a value like: + // "spec.containers{name}" (where "name" refers to the name of the container that triggered + // the event) or if no container name is specified "spec.containers[2]" (container with + // index 2 in this pod). This syntax is chosen only to have some well-defined way of + // referencing a part of an object. + // TODO: this design is not final and this field is subject to change in the future. + // +optional + FieldPath string +} + +// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. +type LocalObjectReference struct { + //TODO: Add other useful fields. apiVersion, kind, uid? + Name string +} + +type SerializedReference struct { + metav1.TypeMeta + // +optional + Reference ObjectReference +} + +type EventSource struct { + // Component from which the event is generated. + // +optional + Component string + // Node name on which the event is generated. + // +optional + Host string +} + +// Valid values for event types (new types could be added in future) +const ( + // Information only and will not cause any problems + EventTypeNormal string = "Normal" + // These events are to warn that something might go wrong + EventTypeWarning string = "Warning" +) + +// +genclient=true + +// Event is a report of an event somewhere in the cluster. +// TODO: Decide whether to store these separately or with the object they apply to. +type Event struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Required. The object that this event is about. + // +optional + InvolvedObject ObjectReference + + // Optional; this should be a short, machine understandable string that gives the reason + // for this event being generated. For example, if the event is reporting that a container + // can't start, the Reason might be "ImageNotFound". + // TODO: provide exact specification for format. + // +optional + Reason string + + // Optional. A human-readable description of the status of this operation. + // TODO: decide on maximum length. + // +optional + Message string + + // Optional. The component reporting this event. Should be a short machine understandable string. + // +optional + Source EventSource + + // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) + // +optional + FirstTimestamp metav1.Time + + // The time at which the most recent occurrence of this event was recorded. + // +optional + LastTimestamp metav1.Time + + // The number of times this event has occurred. + // +optional + Count int32 + + // Type of this event (Normal, Warning), new types could be added in the future. + // +optional + Type string +} + +// EventList is a list of events. +type EventList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Event +} + +// List holds a list of objects, which may not be known by the server. +type List struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []runtime.Object +} + +// A type of object that is limited +type LimitType string + +const ( + // Limit that applies to all pods in a namespace + LimitTypePod LimitType = "Pod" + // Limit that applies to all containers in a namespace + LimitTypeContainer LimitType = "Container" + // Limit that applies to all persistent volume claims in a namespace + LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" +) + +// LimitRangeItem defines a min/max usage limit for any resource that matches on kind +type LimitRangeItem struct { + // Type of resource that this limit applies to + // +optional + Type LimitType + // Max usage constraints on this kind by resource name + // +optional + Max ResourceList + // Min usage constraints on this kind by resource name + // +optional + Min ResourceList + // Default resource requirement limit value by resource name. + // +optional + Default ResourceList + // DefaultRequest resource requirement request value by resource name. + // +optional + DefaultRequest ResourceList + // MaxLimitRequestRatio represents the max burst value for the named resource + // +optional + MaxLimitRequestRatio ResourceList +} + +// LimitRangeSpec defines a min/max usage limit for resources that match on kind +type LimitRangeSpec struct { + // Limits is the list of LimitRangeItem objects that are enforced + Limits []LimitRangeItem +} + +// +genclient=true + +// LimitRange sets resource usage limits for each kind of resource in a Namespace +type LimitRange struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the limits enforced + // +optional + Spec LimitRangeSpec +} + +// LimitRangeList is a list of LimitRange items. +type LimitRangeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is a list of LimitRange objects + Items []LimitRange +} + +// The following identify resource constants for Kubernetes object types +const ( + // Pods, number + ResourcePods ResourceName = "pods" + // Services, number + ResourceServices ResourceName = "services" + // ReplicationControllers, number + ResourceReplicationControllers ResourceName = "replicationcontrollers" + // ResourceQuotas, number + ResourceQuotas ResourceName = "resourcequotas" + // ResourceSecrets, number + ResourceSecrets ResourceName = "secrets" + // ResourceConfigMaps, number + ResourceConfigMaps ResourceName = "configmaps" + // ResourcePersistentVolumeClaims, number + ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" + // ResourceServicesNodePorts, number + ResourceServicesNodePorts ResourceName = "services.nodeports" + // ResourceServicesLoadBalancers, number + ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" + // CPU request, in cores. (500m = .5 cores) + ResourceRequestsCPU ResourceName = "requests.cpu" + // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsMemory ResourceName = "requests.memory" + // Storage request, in bytes + ResourceRequestsStorage ResourceName = "requests.storage" + // CPU limit, in cores. (500m = .5 cores) + ResourceLimitsCPU ResourceName = "limits.cpu" + // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsMemory ResourceName = "limits.memory" +) + +// A ResourceQuotaScope defines a filter that must match each object tracked by a quota +type ResourceQuotaScope string + +const ( + // Match all pod objects where spec.activeDeadlineSeconds + ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" + // Match all pod objects where !spec.activeDeadlineSeconds + ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" + // Match all pod objects that have best effort quality of service + ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" + // Match all pod objects that do not have best effort quality of service + ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" +) + +// ResourceQuotaSpec defines the desired hard limits to enforce for Quota +type ResourceQuotaSpec struct { + // Hard is the set of desired hard limits for each named resource + // +optional + Hard ResourceList + // A collection of filters that must match each object tracked by a quota. + // If not specified, the quota matches all objects. + // +optional + Scopes []ResourceQuotaScope +} + +// ResourceQuotaStatus defines the enforced hard limits and observed use +type ResourceQuotaStatus struct { + // Hard is the set of enforced hard limits for each named resource + // +optional + Hard ResourceList + // Used is the current observed total usage of the resource in the namespace + // +optional + Used ResourceList +} + +// +genclient=true + +// ResourceQuota sets aggregate quota restrictions enforced per namespace +type ResourceQuota struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired quota + // +optional + Spec ResourceQuotaSpec + + // Status defines the actual enforced quota and its current usage + // +optional + Status ResourceQuotaStatus +} + +// ResourceQuotaList is a list of ResourceQuota items +type ResourceQuotaList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is a list of ResourceQuota objects + Items []ResourceQuota +} + +// +genclient=true + +// Secret holds secret data of a certain type. The total bytes of the values in +// the Data field must be less than MaxSecretSize bytes. +type Secret struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN + // or leading dot followed by valid DNS_SUBDOMAIN. + // The serialized form of the secret data is a base64 encoded string, + // representing the arbitrary (possibly non-string) data value here. + // +optional + Data map[string][]byte + + // Used to facilitate programmatic handling of secret data. + // +optional + Type SecretType +} + +const MaxSecretSize = 1 * 1024 * 1024 + +type SecretType string + +const ( + // SecretTypeOpaque is the default; arbitrary user-defined data + SecretTypeOpaque SecretType = "Opaque" + + // SecretTypeServiceAccountToken contains a token that identifies a service account to the API + // + // Required fields: + // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies + // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies + // - Secret.Data["token"] - a token that identifies the service account to the API + SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" + + // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountNameKey = "kubernetes.io/service-account.name" + // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountUIDKey = "kubernetes.io/service-account.uid" + // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets + ServiceAccountTokenKey = "token" + // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets + ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" + // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets + ServiceAccountRootCAKey = "ca.crt" + // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls + ServiceAccountNamespaceKey = "namespace" + + // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg + // + // Required fields: + // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file + SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" + + // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets + DockerConfigKey = ".dockercfg" + + // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json + // + // Required fields: + // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file + SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" + + // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets + DockerConfigJsonKey = ".dockerconfigjson" + + // SecretTypeBasicAuth contains data needed for basic authentication. + // + // Required at least one of fields: + // - Secret.Data["username"] - username used for authentication + // - Secret.Data["password"] - password or token needed for authentication + SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" + + // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets + BasicAuthUsernameKey = "username" + // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets + BasicAuthPasswordKey = "password" + + // SecretTypeSSHAuth contains data needed for SSH authetication. + // + // Required field: + // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication + SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" + + // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets + SSHAuthPrivateKey = "ssh-privatekey" + + // SecretTypeTLS contains information about a TLS client or server secret. It + // is primarily used with TLS termination of the Ingress resource, but may be + // used in other types. + // + // Required fields: + // - Secret.Data["tls.key"] - TLS private key. + // Secret.Data["tls.crt"] - TLS certificate. + // TODO: Consider supporting different formats, specifying CA/destinationCA. + SecretTypeTLS SecretType = "kubernetes.io/tls" + + // TLSCertKey is the key for tls certificates in a TLS secret. + TLSCertKey = "tls.crt" + // TLSPrivateKeyKey is the key for the private key field in a TLS secret. + TLSPrivateKeyKey = "tls.key" +) + +type SecretList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Secret +} + +// +genclient=true + +// ConfigMap holds configuration data for components or applications to consume. +type ConfigMap struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Data contains the configuration data. + // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. + // +optional + Data map[string]string +} + +// ConfigMapList is a resource containing a list of ConfigMap objects. +type ConfigMapList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is the list of ConfigMaps. + Items []ConfigMap +} + +// These constants are for remote command execution and port forwarding and are +// used by both the client side and server side components. +// +// This is probably not the ideal place for them, but it didn't seem worth it +// to create pkg/exec and pkg/portforward just to contain a single file with +// constants in it. Suggestions for more appropriate alternatives are +// definitely welcome! +const ( + // Enable stdin for remote command execution + ExecStdinParam = "input" + // Enable stdout for remote command execution + ExecStdoutParam = "output" + // Enable stderr for remote command execution + ExecStderrParam = "error" + // Enable TTY for remote command execution + ExecTTYParam = "tty" + // Command to run for remote command execution + ExecCommandParamm = "command" + + // Name of header that specifies stream type + StreamType = "streamType" + // Value for streamType header for stdin stream + StreamTypeStdin = "stdin" + // Value for streamType header for stdout stream + StreamTypeStdout = "stdout" + // Value for streamType header for stderr stream + StreamTypeStderr = "stderr" + // Value for streamType header for data stream + StreamTypeData = "data" + // Value for streamType header for error stream + StreamTypeError = "error" + // Value for streamType header for terminal resize stream + StreamTypeResize = "resize" + + // Name of header that specifies the port being forwarded + PortHeader = "port" + // Name of header that specifies a request ID used to associate the error + // and data streams for a single forwarded connection + PortForwardRequestIDHeader = "requestID" +) + +// Type and constants for component health validation. +type ComponentConditionType string + +// These are the valid conditions for the component. +const ( + ComponentHealthy ComponentConditionType = "Healthy" +) + +type ComponentCondition struct { + Type ComponentConditionType + Status ConditionStatus + // +optional + Message string + // +optional + Error string +} + +// +genclient=true +// +nonNamespaced=true + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +type ComponentStatus struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // +optional + Conditions []ComponentCondition +} + +type ComponentStatusList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ComponentStatus +} + +// SecurityContext holds security configuration that will be applied to a container. +// Some fields are present in both SecurityContext and PodSecurityContext. When both +// are set, the values in SecurityContext take precedence. +type SecurityContext struct { + // The capabilities to add/drop when running containers. + // Defaults to the default set of capabilities granted by the container runtime. + // +optional + Capabilities *Capabilities + // Run container in privileged mode. + // Processes in privileged containers are essentially equivalent to root on the host. + // Defaults to false. + // +optional + Privileged *bool + // The SELinux context to be applied to the container. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + SELinuxOptions *SELinuxOptions + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsUser *int64 + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool + // The read-only root filesystem allows you to restrict the locations that an application can write + // files to, ensuring the persistent data can only be written to mounts. + // +optional + ReadOnlyRootFilesystem *bool +} + +// SELinuxOptions are the labels to be applied to the container. +type SELinuxOptions struct { + // SELinux user label + // +optional + User string + // SELinux role label + // +optional + Role string + // SELinux type label + // +optional + Type string + // SELinux level label. + // +optional + Level string +} + +// RangeAllocation is an opaque API object (not exposed to end users) that can be persisted to record +// the global allocation state of the cluster. The schema of Range and Data generic, in that Range +// should be a string representation of the inputs to a range (for instance, for IP allocation it +// might be a CIDR) and Data is an opaque blob understood by an allocator which is typically a +// binary range. Consumers should use annotations to record additional information (schema version, +// data encoding hints). A range allocation should *ALWAYS* be recreatable at any time by observation +// of the cluster, thus the object is less strongly typed than most. +type RangeAllocation struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + // A string representing a unique label for a range of resources, such as a CIDR "10.0.0.0/8" or + // port range "10000-30000". Range is not strongly schema'd here. The Range is expected to define + // a start and end unless there is an implicit end. + Range string + // A byte array representing the serialized state of a range allocation. Additional clarifiers on + // the type or format of data should be represented with annotations. For IP allocations, this is + // represented as a bit array starting at the base IP of the CIDR in Range, with each bit representing + // a single allocated address (the fifth bit on CIDR 10.0.0.0/8 is 10.0.0.4). + Data []byte +} + +const ( + // "default-scheduler" is the name of default scheduler. + DefaultSchedulerName = "default-scheduler" + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // When the --hard-pod-affinity-weight scheduler flag is not specified, + // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. + DefaultHardPodAffinitySymmetricWeight int = 1 + + // When the --failure-domains scheduler flag is not specified, + // DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity. + DefaultFailureDomains string = metav1.LabelHostname + "," + metav1.LabelZoneFailureDomain + "," + metav1.LabelZoneRegion +) diff --git a/vendor/k8s.io/client-go/pkg/api/v1/OWNERS b/vendor/k8s.io/client-go/pkg/api/v1/OWNERS new file mode 100755 index 0000000000..fdb84b24a9 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/OWNERS @@ -0,0 +1,41 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- yujuhong +- brendandburns +- derekwaynecarr +- caesarxuchao +- vishh +- mikedanese +- liggitt +- nikhiljindal +- bprashanth +- gmarek +- erictune +- davidopp +- pmorie +- sttts +- kargakis +- dchen1107 +- saad-ali +- zmerlynn +- luxas +- janetkuo +- justinsb +- roberthbailey +- ncdc +- timstclair +- eparis +- timothysc +- piosz +- jsafrane +- dims +- errordeveloper +- madhusudancs +- krousey +- jayunit100 +- rootfs +- markturansky diff --git a/vendor/k8s.io/client-go/pkg/api/v1/conversion.go b/vendor/k8s.io/client-go/pkg/api/v1/conversion.go new file mode 100644 index 0000000000..041517819e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/conversion.go @@ -0,0 +1,785 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "fmt" + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/extensions" +) + +// This is a "fast-path" that avoids reflection for common types. It focuses on the objects that are +// converted the most in the cluster. +// TODO: generate one of these for every external API group - this is to prove the impact +func addFastPathConversionFuncs(scheme *runtime.Scheme) error { + scheme.AddGenericConversionFunc(func(objA, objB interface{}, s conversion.Scope) (bool, error) { + switch a := objA.(type) { + case *Pod: + switch b := objB.(type) { + case *api.Pod: + return true, Convert_v1_Pod_To_api_Pod(a, b, s) + } + case *api.Pod: + switch b := objB.(type) { + case *Pod: + return true, Convert_api_Pod_To_v1_Pod(a, b, s) + } + + case *Event: + switch b := objB.(type) { + case *api.Event: + return true, Convert_v1_Event_To_api_Event(a, b, s) + } + case *api.Event: + switch b := objB.(type) { + case *Event: + return true, Convert_api_Event_To_v1_Event(a, b, s) + } + + case *ReplicationController: + switch b := objB.(type) { + case *api.ReplicationController: + return true, Convert_v1_ReplicationController_To_api_ReplicationController(a, b, s) + } + case *api.ReplicationController: + switch b := objB.(type) { + case *ReplicationController: + return true, Convert_api_ReplicationController_To_v1_ReplicationController(a, b, s) + } + + case *Node: + switch b := objB.(type) { + case *api.Node: + return true, Convert_v1_Node_To_api_Node(a, b, s) + } + case *api.Node: + switch b := objB.(type) { + case *Node: + return true, Convert_api_Node_To_v1_Node(a, b, s) + } + + case *Namespace: + switch b := objB.(type) { + case *api.Namespace: + return true, Convert_v1_Namespace_To_api_Namespace(a, b, s) + } + case *api.Namespace: + switch b := objB.(type) { + case *Namespace: + return true, Convert_api_Namespace_To_v1_Namespace(a, b, s) + } + + case *Service: + switch b := objB.(type) { + case *api.Service: + return true, Convert_v1_Service_To_api_Service(a, b, s) + } + case *api.Service: + switch b := objB.(type) { + case *Service: + return true, Convert_api_Service_To_v1_Service(a, b, s) + } + + case *Endpoints: + switch b := objB.(type) { + case *api.Endpoints: + return true, Convert_v1_Endpoints_To_api_Endpoints(a, b, s) + } + case *api.Endpoints: + switch b := objB.(type) { + case *Endpoints: + return true, Convert_api_Endpoints_To_v1_Endpoints(a, b, s) + } + + case *metav1.WatchEvent: + switch b := objB.(type) { + case *metav1.InternalEvent: + return true, metav1.Convert_versioned_Event_to_versioned_InternalEvent(a, b, s) + } + case *metav1.InternalEvent: + switch b := objB.(type) { + case *metav1.WatchEvent: + return true, metav1.Convert_versioned_InternalEvent_to_versioned_Event(a, b, s) + } + } + return false, nil + }) + return nil +} + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs( + Convert_api_Pod_To_v1_Pod, + Convert_api_PodSpec_To_v1_PodSpec, + Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, + Convert_api_ServiceSpec_To_v1_ServiceSpec, + Convert_v1_Pod_To_api_Pod, + Convert_v1_PodSpec_To_api_PodSpec, + Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec, + Convert_v1_Secret_To_api_Secret, + Convert_v1_ServiceSpec_To_api_ServiceSpec, + Convert_v1_ResourceList_To_api_ResourceList, + Convert_v1_ReplicationController_to_extensions_ReplicaSet, + Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec, + Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus, + Convert_extensions_ReplicaSet_to_v1_ReplicationController, + Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec, + Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus, + ) + if err != nil { + return err + } + + // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. + for _, k := range []string{ + "Endpoints", + "ResourceQuota", + "PersistentVolumeClaim", + "Service", + "ServiceAccount", + "ConfigMap", + } { + kind := k // don't close over range variables + err = scheme.AddFieldLabelConversionFunc("v1", kind, + func(label, value string) (string, string, error) { + switch label { + case "metadata.namespace", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label %q not supported for %q", label, kind) + } + }, + ) + if err != nil { + return err + } + } + + // Add field conversion funcs. + err = scheme.AddFieldLabelConversionFunc("v1", "Pod", + func(label, value string) (string, string, error) { + switch label { + case "metadata.annotations", + "metadata.labels", + "metadata.name", + "metadata.namespace", + "spec.nodeName", + "spec.restartPolicy", + "spec.serviceAccountName", + "status.phase", + "status.podIP": + return label, value, nil + // This is for backwards compatibility with old v1 clients which send spec.host + case "spec.host": + return "spec.nodeName", value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + err = scheme.AddFieldLabelConversionFunc("v1", "Node", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + case "spec.unschedulable": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + err = scheme.AddFieldLabelConversionFunc("v1", "ReplicationController", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", + "metadata.namespace", + "status.replicas": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) + if err != nil { + return err + } + err = scheme.AddFieldLabelConversionFunc("v1", "PersistentVolume", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + if err := AddFieldLabelConversionsForEvent(scheme); err != nil { + return err + } + if err := AddFieldLabelConversionsForNamespace(scheme); err != nil { + return err + } + if err := AddFieldLabelConversionsForSecret(scheme); err != nil { + return err + } + return nil +} + +func Convert_v1_ReplicationController_to_extensions_ReplicaSet(in *ReplicationController, out *extensions.ReplicaSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec(in *ReplicationControllerSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { + out.Replicas = *in.Replicas + if in.Selector != nil { + metav1.Convert_map_to_unversioned_LabelSelector(&in.Selector, out.Selector, s) + } + if in.Template != nil { + if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, &out.Template, s); err != nil { + return err + } + } + return nil +} + +func Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus(in *ReplicationControllerStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + return nil +} + +func Convert_extensions_ReplicaSet_to_v1_ReplicationController(in *extensions.ReplicaSet, out *ReplicationController, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + fieldErr, ok := err.(*field.Error) + if !ok { + return err + } + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + out.Annotations[api.NonConvertibleAnnotationPrefix+"/"+fieldErr.Field] = reflect.ValueOf(fieldErr.BadValue).String() + } + if err := Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec(in *extensions.ReplicaSetSpec, out *ReplicationControllerSpec, s conversion.Scope) error { + out.Replicas = new(int32) + *out.Replicas = in.Replicas + out.MinReadySeconds = in.MinReadySeconds + var invalidErr error + if in.Selector != nil { + invalidErr = metav1.Convert_unversioned_LabelSelector_to_map(in.Selector, &out.Selector, s) + } + out.Template = new(PodTemplateSpec) + if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, out.Template, s); err != nil { + return err + } + return invalidErr +} + +func Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus(in *extensions.ReplicaSetStatus, out *ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + return nil +} + +func Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *ReplicationControllerSpec, s conversion.Scope) error { + out.Replicas = &in.Replicas + out.MinReadySeconds = in.MinReadySeconds + out.Selector = in.Selector + if in.Template != nil { + out.Template = new(PodTemplateSpec) + if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = *in.Replicas + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = in.Selector + if in.Template != nil { + out.Template = new(api.PodTemplateSpec) + if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, out.Template, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func Convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error { + if err := autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in, out, s); err != nil { + return err + } + + if old := out.Annotations; old != nil { + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + } + if len(out.Status.InitContainerStatuses) > 0 { + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + value, err := json.Marshal(out.Status.InitContainerStatuses) + if err != nil { + return err + } + out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value) + out.Annotations[PodInitContainerStatusesBetaAnnotationKey] = string(value) + } else { + delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) + } + return nil +} + +func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { + // TODO: sometime after we move init container to stable, remove these conversions + // If there is a beta annotation, copy to alpha key. + // See commit log for PR #31026 for why we do this. + if valueBeta, okBeta := in.Annotations[PodInitContainerStatusesBetaAnnotationKey]; okBeta { + in.Annotations[PodInitContainerStatusesAnnotationKey] = valueBeta + } + // Move the annotation to the internal repr. field + if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok { + var values []ContainerStatus + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + // Conversion from external to internal version exists more to + // satisfy the needs of the decoder than it does to be a general + // purpose tool. And Decode always creates an intermediate object + // to decode to. Thus the caller of UnsafeConvertToVersion is + // taking responsibility to ensure mutation of in is not exposed + // back to the caller. + in.Status.InitContainerStatuses = values + } + + if err := autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in, out, s); err != nil { + return err + } + if len(out.Annotations) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) + } + return nil +} + +func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error { + if err := autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil { + return err + } + + // TODO: sometime after we move init container to stable, remove these conversions. + if old := out.Annotations; old != nil { + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + } + if len(out.Spec.InitContainers) > 0 { + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + value, err := json.Marshal(out.Spec.InitContainers) + if err != nil { + return err + } + out.Annotations[PodInitContainersAnnotationKey] = string(value) + out.Annotations[PodInitContainersBetaAnnotationKey] = string(value) + } else { + delete(out.Annotations, PodInitContainersAnnotationKey) + delete(out.Annotations, PodInitContainersBetaAnnotationKey) + } + return nil +} + +func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { + // TODO: sometime after we move init container to stable, remove these conversions + // If there is a beta annotation, copy to alpha key. + // See commit log for PR #31026 for why we do this. + if valueBeta, okBeta := in.Annotations[PodInitContainersBetaAnnotationKey]; okBeta { + in.Annotations[PodInitContainersAnnotationKey] = valueBeta + } + // Move the annotation to the internal repr. field + if value, ok := in.Annotations[PodInitContainersAnnotationKey]; ok { + var values []Container + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + // Conversion from external to internal version exists more to + // satisfy the needs of the decoder than it does to be a general + // purpose tool. And Decode always creates an intermediate object + // to decode to. Thus the caller of UnsafeConvertToVersion is + // taking responsibility to ensure mutation of in is not exposed + // back to the caller. + in.Spec.InitContainers = values + + // Call defaulters explicitly until annotations are removed + for i := range in.Spec.InitContainers { + c := &in.Spec.InitContainers[i] + SetDefaults_Container(c) + } + } + + if err := autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s); err != nil { + return err + } + if len(out.Annotations) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, PodInitContainersAnnotationKey) + delete(out.Annotations, PodInitContainersBetaAnnotationKey) + } + return nil +} + +// The following two PodSpec conversions are done here to support ServiceAccount +// as an alias for ServiceAccountName. +func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error { + if err := autoConvert_api_PodSpec_To_v1_PodSpec(in, out, s); err != nil { + return err + } + + // DeprecatedServiceAccount is an alias for ServiceAccountName. + out.DeprecatedServiceAccount = in.ServiceAccountName + + if in.SecurityContext != nil { + // the host namespace fields have to be handled here for backward compatibility + // with v1.0.0 + out.HostPID = in.SecurityContext.HostPID + out.HostNetwork = in.SecurityContext.HostNetwork + out.HostIPC = in.SecurityContext.HostIPC + } + + return nil +} + +func Convert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversion.Scope) error { + if err := autoConvert_v1_PodSpec_To_api_PodSpec(in, out, s); err != nil { + return err + } + + // We support DeprecatedServiceAccount as an alias for ServiceAccountName. + // If both are specified, ServiceAccountName (the new field) wins. + if in.ServiceAccountName == "" { + out.ServiceAccountName = in.DeprecatedServiceAccount + } + + // the host namespace fields have to be handled specially for backward compatibility + // with v1.0.0 + if out.SecurityContext == nil { + out.SecurityContext = new(api.PodSecurityContext) + } + out.SecurityContext.HostNetwork = in.HostNetwork + out.SecurityContext.HostPID = in.HostPID + out.SecurityContext.HostIPC = in.HostIPC + + return nil +} + +func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error { + if err := autoConvert_api_Pod_To_v1_Pod(in, out, s); err != nil { + return err + } + + // TODO: sometime after we move init container to stable, remove these conversions + if len(out.Spec.InitContainers) > 0 || len(out.Status.InitContainerStatuses) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, PodInitContainersAnnotationKey) + delete(out.Annotations, PodInitContainersBetaAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) + } + if len(out.Spec.InitContainers) > 0 { + value, err := json.Marshal(out.Spec.InitContainers) + if err != nil { + return err + } + out.Annotations[PodInitContainersAnnotationKey] = string(value) + out.Annotations[PodInitContainersBetaAnnotationKey] = string(value) + } + if len(out.Status.InitContainerStatuses) > 0 { + value, err := json.Marshal(out.Status.InitContainerStatuses) + if err != nil { + return err + } + out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value) + out.Annotations[PodInitContainerStatusesBetaAnnotationKey] = string(value) + } + + return nil +} + +func Convert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error { + // If there is a beta annotation, copy to alpha key. + // See commit log for PR #31026 for why we do this. + if valueBeta, okBeta := in.Annotations[PodInitContainersBetaAnnotationKey]; okBeta { + in.Annotations[PodInitContainersAnnotationKey] = valueBeta + } + // TODO: sometime after we move init container to stable, remove these conversions + // Move the annotation to the internal repr. field + if value, ok := in.Annotations[PodInitContainersAnnotationKey]; ok { + var values []Container + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + // Conversion from external to internal version exists more to + // satisfy the needs of the decoder than it does to be a general + // purpose tool. And Decode always creates an intermediate object + // to decode to. Thus the caller of UnsafeConvertToVersion is + // taking responsibility to ensure mutation of in is not exposed + // back to the caller. + in.Spec.InitContainers = values + // Call defaulters explicitly until annotations are removed + for i := range in.Spec.InitContainers { + c := &in.Spec.InitContainers[i] + SetDefaults_Container(c) + } + } + // If there is a beta annotation, copy to alpha key. + // See commit log for PR #31026 for why we do this. + if valueBeta, okBeta := in.Annotations[PodInitContainerStatusesBetaAnnotationKey]; okBeta { + in.Annotations[PodInitContainerStatusesAnnotationKey] = valueBeta + } + if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok { + var values []ContainerStatus + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + // Conversion from external to internal version exists more to + // satisfy the needs of the decoder than it does to be a general + // purpose tool. And Decode always creates an intermediate object + // to decode to. Thus the caller of UnsafeConvertToVersion is + // taking responsibility to ensure mutation of in is not exposed + // back to the caller. + in.Status.InitContainerStatuses = values + } + + if err := autoConvert_v1_Pod_To_api_Pod(in, out, s); err != nil { + return err + } + if len(out.Annotations) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, PodInitContainersAnnotationKey) + delete(out.Annotations, PodInitContainersBetaAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) + } + return nil +} + +func Convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error { + if err := autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in, out, s); err != nil { + return err + } + // Publish both externalIPs and deprecatedPublicIPs fields in v1. + out.DeprecatedPublicIPs = in.ExternalIPs + return nil +} + +func Convert_v1_Secret_To_api_Secret(in *Secret, out *api.Secret, s conversion.Scope) error { + if err := autoConvert_v1_Secret_To_api_Secret(in, out, s); err != nil { + return err + } + + // StringData overwrites Data + if len(in.StringData) > 0 { + if out.Data == nil { + out.Data = map[string][]byte{} + } + for k, v := range in.StringData { + out.Data[k] = []byte(v) + } + } + + return nil +} + +func Convert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error { + if err := autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in, out, s); err != nil { + return err + } + // Prefer the legacy deprecatedPublicIPs field, if provided. + if len(in.DeprecatedPublicIPs) > 0 { + out.ExternalIPs = in.DeprecatedPublicIPs + } + return nil +} + +func Convert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *PodSecurityContext, s conversion.Scope) error { + out.SupplementalGroups = in.SupplementalGroups + if in.SELinuxOptions != nil { + out.SELinuxOptions = new(SELinuxOptions) + if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + return err + } + } else { + out.SELinuxOptions = nil + } + out.RunAsUser = in.RunAsUser + out.RunAsNonRoot = in.RunAsNonRoot + out.FSGroup = in.FSGroup + return nil +} + +func Convert_v1_PodSecurityContext_To_api_PodSecurityContext(in *PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error { + out.SupplementalGroups = in.SupplementalGroups + if in.SELinuxOptions != nil { + out.SELinuxOptions = new(api.SELinuxOptions) + if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + return err + } + } else { + out.SELinuxOptions = nil + } + out.RunAsUser = in.RunAsUser + out.RunAsNonRoot = in.RunAsNonRoot + out.FSGroup = in.FSGroup + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_v1_ResourceList_To_api_ResourceList(in *ResourceList, out *api.ResourceList, s conversion.Scope) error { + if *in == nil { + return nil + } + if *out == nil { + *out = make(api.ResourceList, len(*in)) + } + for key, val := range *in { + // Moved to defaults + // TODO(#18538): We round up resource values to milli scale to maintain API compatibility. + // In the future, we should instead reject values that need rounding. + // const milliScale = -3 + // val.RoundUp(milliScale) + + (*out)[api.ResourceName(key)] = val + } + return nil +} + +func AddFieldLabelConversionsForEvent(scheme *runtime.Scheme) error { + return scheme.AddFieldLabelConversionFunc("v1", "Event", + func(label, value string) (string, string, error) { + switch label { + case "involvedObject.kind", + "involvedObject.namespace", + "involvedObject.name", + "involvedObject.uid", + "involvedObject.apiVersion", + "involvedObject.resourceVersion", + "involvedObject.fieldPath", + "reason", + "source", + "type", + "metadata.namespace", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) +} + +func AddFieldLabelConversionsForNamespace(scheme *runtime.Scheme) error { + return scheme.AddFieldLabelConversionFunc("v1", "Namespace", + func(label, value string) (string, string, error) { + switch label { + case "status.phase", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) +} + +func AddFieldLabelConversionsForSecret(scheme *runtime.Scheme) error { + return scheme.AddFieldLabelConversionFunc("v1", "Secret", + func(label, value string) (string, string, error) { + switch label { + case "type", + "metadata.namespace", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/defaults.go b/vendor/k8s.io/client-go/pkg/api/v1/defaults.go new file mode 100644 index 0000000000..17b0deb01f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/defaults.go @@ -0,0 +1,389 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/pkg/util" + "k8s.io/client-go/pkg/util/parsers" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_PodExecOptions, + SetDefaults_PodAttachOptions, + SetDefaults_ReplicationController, + SetDefaults_Volume, + SetDefaults_ContainerPort, + SetDefaults_Container, + SetDefaults_ServiceSpec, + SetDefaults_Pod, + SetDefaults_PodSpec, + SetDefaults_Probe, + SetDefaults_SecretVolumeSource, + SetDefaults_ConfigMapVolumeSource, + SetDefaults_DownwardAPIVolumeSource, + SetDefaults_ProjectedVolumeSource, + SetDefaults_Secret, + SetDefaults_PersistentVolume, + SetDefaults_PersistentVolumeClaim, + SetDefaults_ISCSIVolumeSource, + SetDefaults_Endpoints, + SetDefaults_HTTPGetAction, + SetDefaults_NamespaceStatus, + SetDefaults_Node, + SetDefaults_NodeStatus, + SetDefaults_ObjectFieldSelector, + SetDefaults_LimitRangeItem, + SetDefaults_ConfigMap, + SetDefaults_RBDVolumeSource, + SetDefaults_ResourceList, + ) +} + +func SetDefaults_ResourceList(obj *ResourceList) { + for key, val := range *obj { + // TODO(#18538): We round up resource values to milli scale to maintain API compatibility. + // In the future, we should instead reject values that need rounding. + const milliScale = -3 + val.RoundUp(milliScale) + + (*obj)[ResourceName(key)] = val + } +} + +func SetDefaults_PodExecOptions(obj *PodExecOptions) { + obj.Stdout = true + obj.Stderr = true +} +func SetDefaults_PodAttachOptions(obj *PodAttachOptions) { + obj.Stdout = true + obj.Stderr = true +} +func SetDefaults_ReplicationController(obj *ReplicationController) { + var labels map[string]string + if obj.Spec.Template != nil { + labels = obj.Spec.Template.Labels + } + // TODO: support templates defined elsewhere when we support them in the API + if labels != nil { + if len(obj.Spec.Selector) == 0 { + obj.Spec.Selector = labels + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } +} +func SetDefaults_Volume(obj *Volume) { + if util.AllPtrFieldsNil(&obj.VolumeSource) { + obj.VolumeSource = VolumeSource{ + EmptyDir: &EmptyDirVolumeSource{}, + } + } +} +func SetDefaults_ContainerPort(obj *ContainerPort) { + if obj.Protocol == "" { + obj.Protocol = ProtocolTCP + } +} +func SetDefaults_Container(obj *Container) { + if obj.ImagePullPolicy == "" { + // Ignore error and assume it has been validated elsewhere + _, tag, _, _ := parsers.ParseImageName(obj.Image) + + // Check image tag + if tag == "latest" { + obj.ImagePullPolicy = PullAlways + } else { + obj.ImagePullPolicy = PullIfNotPresent + } + } + if obj.TerminationMessagePath == "" { + obj.TerminationMessagePath = TerminationMessagePathDefault + } + if obj.TerminationMessagePolicy == "" { + obj.TerminationMessagePolicy = TerminationMessageReadFile + } +} +func SetDefaults_ServiceSpec(obj *ServiceSpec) { + if obj.SessionAffinity == "" { + obj.SessionAffinity = ServiceAffinityNone + } + if obj.Type == "" { + obj.Type = ServiceTypeClusterIP + } + for i := range obj.Ports { + sp := &obj.Ports[i] + if sp.Protocol == "" { + sp.Protocol = ProtocolTCP + } + if sp.TargetPort == intstr.FromInt(0) || sp.TargetPort == intstr.FromString("") { + sp.TargetPort = intstr.FromInt(int(sp.Port)) + } + } +} +func SetDefaults_Pod(obj *Pod) { + // If limits are specified, but requests are not, default requests to limits + // This is done here rather than a more specific defaulting pass on ResourceRequirements + // because we only want this defaulting semantic to take place on a Pod and not a PodTemplate + for i := range obj.Spec.Containers { + // set requests to limits if requests are not specified, but limits are + if obj.Spec.Containers[i].Resources.Limits != nil { + if obj.Spec.Containers[i].Resources.Requests == nil { + obj.Spec.Containers[i].Resources.Requests = make(ResourceList) + } + for key, value := range obj.Spec.Containers[i].Resources.Limits { + if _, exists := obj.Spec.Containers[i].Resources.Requests[key]; !exists { + obj.Spec.Containers[i].Resources.Requests[key] = *(value.Copy()) + } + } + } + } + for i := range obj.Spec.InitContainers { + if obj.Spec.InitContainers[i].Resources.Limits != nil { + if obj.Spec.InitContainers[i].Resources.Requests == nil { + obj.Spec.InitContainers[i].Resources.Requests = make(ResourceList) + } + for key, value := range obj.Spec.InitContainers[i].Resources.Limits { + if _, exists := obj.Spec.InitContainers[i].Resources.Requests[key]; !exists { + obj.Spec.InitContainers[i].Resources.Requests[key] = *(value.Copy()) + } + } + } + } +} +func SetDefaults_PodSpec(obj *PodSpec) { + if obj.DNSPolicy == "" { + obj.DNSPolicy = DNSClusterFirst + } + if obj.RestartPolicy == "" { + obj.RestartPolicy = RestartPolicyAlways + } + if obj.HostNetwork { + defaultHostNetworkPorts(&obj.Containers) + defaultHostNetworkPorts(&obj.InitContainers) + } + if obj.SecurityContext == nil { + obj.SecurityContext = &PodSecurityContext{} + } + if obj.TerminationGracePeriodSeconds == nil { + period := int64(DefaultTerminationGracePeriodSeconds) + obj.TerminationGracePeriodSeconds = &period + } + if obj.SchedulerName == "" { + obj.SchedulerName = DefaultSchedulerName + } +} +func SetDefaults_Probe(obj *Probe) { + if obj.TimeoutSeconds == 0 { + obj.TimeoutSeconds = 1 + } + if obj.PeriodSeconds == 0 { + obj.PeriodSeconds = 10 + } + if obj.SuccessThreshold == 0 { + obj.SuccessThreshold = 1 + } + if obj.FailureThreshold == 0 { + obj.FailureThreshold = 3 + } +} +func SetDefaults_SecretVolumeSource(obj *SecretVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(SecretVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_ConfigMapVolumeSource(obj *ConfigMapVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(ConfigMapVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_DownwardAPIVolumeSource(obj *DownwardAPIVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(DownwardAPIVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_Secret(obj *Secret) { + if obj.Type == "" { + obj.Type = SecretTypeOpaque + } +} +func SetDefaults_ProjectedVolumeSource(obj *ProjectedVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(ProjectedVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_PersistentVolume(obj *PersistentVolume) { + if obj.Status.Phase == "" { + obj.Status.Phase = VolumePending + } + if obj.Spec.PersistentVolumeReclaimPolicy == "" { + obj.Spec.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimRetain + } +} +func SetDefaults_PersistentVolumeClaim(obj *PersistentVolumeClaim) { + if obj.Status.Phase == "" { + obj.Status.Phase = ClaimPending + } +} +func SetDefaults_ISCSIVolumeSource(obj *ISCSIVolumeSource) { + if obj.ISCSIInterface == "" { + obj.ISCSIInterface = "default" + } +} +func SetDefaults_AzureDiskVolumeSource(obj *AzureDiskVolumeSource) { + if obj.CachingMode == nil { + obj.CachingMode = new(AzureDataDiskCachingMode) + *obj.CachingMode = AzureDataDiskCachingNone + } + if obj.FSType == nil { + obj.FSType = new(string) + *obj.FSType = "ext4" + } + if obj.ReadOnly == nil { + obj.ReadOnly = new(bool) + *obj.ReadOnly = false + } +} +func SetDefaults_Endpoints(obj *Endpoints) { + for i := range obj.Subsets { + ss := &obj.Subsets[i] + for i := range ss.Ports { + ep := &ss.Ports[i] + if ep.Protocol == "" { + ep.Protocol = ProtocolTCP + } + } + } +} +func SetDefaults_HTTPGetAction(obj *HTTPGetAction) { + if obj.Path == "" { + obj.Path = "/" + } + if obj.Scheme == "" { + obj.Scheme = URISchemeHTTP + } +} +func SetDefaults_NamespaceStatus(obj *NamespaceStatus) { + if obj.Phase == "" { + obj.Phase = NamespaceActive + } +} +func SetDefaults_Node(obj *Node) { + if obj.Spec.ExternalID == "" { + obj.Spec.ExternalID = obj.Name + } +} +func SetDefaults_NodeStatus(obj *NodeStatus) { + if obj.Allocatable == nil && obj.Capacity != nil { + obj.Allocatable = make(ResourceList, len(obj.Capacity)) + for key, value := range obj.Capacity { + obj.Allocatable[key] = *(value.Copy()) + } + obj.Allocatable = obj.Capacity + } +} +func SetDefaults_ObjectFieldSelector(obj *ObjectFieldSelector) { + if obj.APIVersion == "" { + obj.APIVersion = "v1" + } +} +func SetDefaults_LimitRangeItem(obj *LimitRangeItem) { + // for container limits, we apply default values + if obj.Type == LimitTypeContainer { + + if obj.Default == nil { + obj.Default = make(ResourceList) + } + if obj.DefaultRequest == nil { + obj.DefaultRequest = make(ResourceList) + } + + // If a default limit is unspecified, but the max is specified, default the limit to the max + for key, value := range obj.Max { + if _, exists := obj.Default[key]; !exists { + obj.Default[key] = *(value.Copy()) + } + } + // If a default limit is specified, but the default request is not, default request to limit + for key, value := range obj.Default { + if _, exists := obj.DefaultRequest[key]; !exists { + obj.DefaultRequest[key] = *(value.Copy()) + } + } + // If a default request is not specified, but the min is provided, default request to the min + for key, value := range obj.Min { + if _, exists := obj.DefaultRequest[key]; !exists { + obj.DefaultRequest[key] = *(value.Copy()) + } + } + } +} +func SetDefaults_ConfigMap(obj *ConfigMap) { + if obj.Data == nil { + obj.Data = make(map[string]string) + } +} + +// With host networking default all container ports to host ports. +func defaultHostNetworkPorts(containers *[]Container) { + for i := range *containers { + for j := range (*containers)[i].Ports { + if (*containers)[i].Ports[j].HostPort == 0 { + (*containers)[i].Ports[j].HostPort = (*containers)[i].Ports[j].ContainerPort + } + } + } +} + +func SetDefaults_RBDVolumeSource(obj *RBDVolumeSource) { + if obj.RBDPool == "" { + obj.RBDPool = "rbd" + } + if obj.RadosUser == "" { + obj.RadosUser = "admin" + } + if obj.Keyring == "" { + obj.Keyring = "/etc/ceph/keyring" + } +} + +func SetDefaults_ScaleIOVolumeSource(obj *ScaleIOVolumeSource) { + if obj.ProtectionDomain == "" { + obj.ProtectionDomain = "default" + } + if obj.StoragePool == "" { + obj.StoragePool = "default" + } + if obj.StorageMode == "" { + obj.StorageMode = "ThinProvisioned" + } + if obj.FSType == "" { + obj.FSType = "xfs" + } +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/doc.go b/vendor/k8s.io/client-go/pkg/api/v1/doc.go new file mode 100644 index 0000000000..0fdd87f750 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/k8s.io/client-go/pkg/api/v1/generate.go b/vendor/k8s.io/client-go/pkg/api/v1/generate.go new file mode 100644 index 0000000000..b8c44e4c7f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/generate.go @@ -0,0 +1,64 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + utilrand "k8s.io/apimachinery/pkg/util/rand" +) + +// NameGenerator generates names for objects. Some backends may have more information +// available to guide selection of new names and this interface hides those details. +type NameGenerator interface { + // GenerateName generates a valid name from the base name, adding a random suffix to the + // the base. If base is valid, the returned name must also be valid. The generator is + // responsible for knowing the maximum valid name length. + GenerateName(base string) string +} + +// GenerateName will resolve the object name of the provided ObjectMeta to a generated version if +// necessary. It expects that validation for ObjectMeta has already completed (that Base is a +// valid name) and that the NameGenerator generates a name that is also valid. +func GenerateName(u NameGenerator, meta *ObjectMeta) { + if len(meta.GenerateName) == 0 || len(meta.Name) != 0 { + return + } + meta.Name = u.GenerateName(meta.GenerateName) +} + +// simpleNameGenerator generates random names. +type simpleNameGenerator struct{} + +// SimpleNameGenerator is a generator that returns the name plus a random suffix of five alphanumerics +// when a name is requested. The string is guaranteed to not exceed the length of a standard Kubernetes +// name (63 characters) +var SimpleNameGenerator NameGenerator = simpleNameGenerator{} + +const ( + // TODO: make this flexible for non-core resources with alternate naming rules. + maxNameLength = 63 + randomLength = 5 + maxGeneratedNameLength = maxNameLength - randomLength +) + +func (simpleNameGenerator) GenerateName(base string) string { + if len(base) > maxGeneratedNameLength { + base = base[:maxGeneratedNameLength] + } + return fmt.Sprintf("%s%s", base, utilrand.String(randomLength)) +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/generated.pb.go b/vendor/k8s.io/client-go/pkg/api/v1/generated.pb.go new file mode 100644 index 0000000000..20f632dc99 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/generated.pb.go @@ -0,0 +1,43238 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/api/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/api/v1/generated.proto + + It has these top-level messages: + AWSElasticBlockStoreVolumeSource + Affinity + AttachedVolume + AvoidPods + AzureDiskVolumeSource + AzureFileVolumeSource + Binding + Capabilities + CephFSVolumeSource + CinderVolumeSource + ComponentCondition + ComponentStatus + ComponentStatusList + ConfigMap + ConfigMapEnvSource + ConfigMapKeySelector + ConfigMapList + ConfigMapProjection + ConfigMapVolumeSource + Container + ContainerImage + ContainerPort + ContainerState + ContainerStateRunning + ContainerStateTerminated + ContainerStateWaiting + ContainerStatus + DaemonEndpoint + DeleteOptions + DownwardAPIProjection + DownwardAPIVolumeFile + DownwardAPIVolumeSource + EmptyDirVolumeSource + EndpointAddress + EndpointPort + EndpointSubset + Endpoints + EndpointsList + EnvFromSource + EnvVar + EnvVarSource + Event + EventList + EventSource + ExecAction + FCVolumeSource + FlexVolumeSource + FlockerVolumeSource + GCEPersistentDiskVolumeSource + GitRepoVolumeSource + GlusterfsVolumeSource + HTTPGetAction + HTTPHeader + Handler + HostPathVolumeSource + ISCSIVolumeSource + KeyToPath + Lifecycle + LimitRange + LimitRangeItem + LimitRangeList + LimitRangeSpec + List + ListOptions + LoadBalancerIngress + LoadBalancerStatus + LocalObjectReference + NFSVolumeSource + Namespace + NamespaceList + NamespaceSpec + NamespaceStatus + Node + NodeAddress + NodeAffinity + NodeCondition + NodeDaemonEndpoints + NodeList + NodeProxyOptions + NodeResources + NodeSelector + NodeSelectorRequirement + NodeSelectorTerm + NodeSpec + NodeStatus + NodeSystemInfo + ObjectFieldSelector + ObjectMeta + ObjectReference + PersistentVolume + PersistentVolumeClaim + PersistentVolumeClaimList + PersistentVolumeClaimSpec + PersistentVolumeClaimStatus + PersistentVolumeClaimVolumeSource + PersistentVolumeList + PersistentVolumeSource + PersistentVolumeSpec + PersistentVolumeStatus + PhotonPersistentDiskVolumeSource + Pod + PodAffinity + PodAffinityTerm + PodAntiAffinity + PodAttachOptions + PodCondition + PodExecOptions + PodList + PodLogOptions + PodPortForwardOptions + PodProxyOptions + PodSecurityContext + PodSignature + PodSpec + PodStatus + PodStatusResult + PodTemplate + PodTemplateList + PodTemplateSpec + PortworxVolumeSource + Preconditions + PreferAvoidPodsEntry + PreferredSchedulingTerm + Probe + ProjectedVolumeSource + QuobyteVolumeSource + RBDVolumeSource + RangeAllocation + ReplicationController + ReplicationControllerCondition + ReplicationControllerList + ReplicationControllerSpec + ReplicationControllerStatus + ResourceFieldSelector + ResourceQuota + ResourceQuotaList + ResourceQuotaSpec + ResourceQuotaStatus + ResourceRequirements + SELinuxOptions + ScaleIOVolumeSource + Secret + SecretEnvSource + SecretKeySelector + SecretList + SecretProjection + SecretVolumeSource + SecurityContext + SerializedReference + Service + ServiceAccount + ServiceAccountList + ServiceList + ServicePort + ServiceProxyOptions + ServiceSpec + ServiceStatus + Sysctl + TCPSocketAction + Taint + Toleration + Volume + VolumeMount + VolumeProjection + VolumeSource + VsphereVirtualDiskVolumeSource + WeightedPodAffinityTerm +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" + +import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *AWSElasticBlockStoreVolumeSource) Reset() { *m = AWSElasticBlockStoreVolumeSource{} } +func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {} +func (*AWSElasticBlockStoreVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{0} +} + +func (m *Affinity) Reset() { *m = Affinity{} } +func (*Affinity) ProtoMessage() {} +func (*Affinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *AttachedVolume) Reset() { *m = AttachedVolume{} } +func (*AttachedVolume) ProtoMessage() {} +func (*AttachedVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *AvoidPods) Reset() { *m = AvoidPods{} } +func (*AvoidPods) ProtoMessage() {} +func (*AvoidPods) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} } +func (*AzureDiskVolumeSource) ProtoMessage() {} +func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } +func (*AzureFileVolumeSource) ProtoMessage() {} +func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *Binding) Reset() { *m = Binding{} } +func (*Binding) ProtoMessage() {} +func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *Capabilities) Reset() { *m = Capabilities{} } +func (*Capabilities) ProtoMessage() {} +func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } +func (*CephFSVolumeSource) ProtoMessage() {} +func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } +func (*CinderVolumeSource) ProtoMessage() {} +func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } +func (*ComponentCondition) ProtoMessage() {} +func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } +func (*ComponentStatus) ProtoMessage() {} +func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } +func (*ComponentStatusList) ProtoMessage() {} +func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *ConfigMap) Reset() { *m = ConfigMap{} } +func (*ConfigMap) ProtoMessage() {} +func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } +func (*ConfigMapEnvSource) ProtoMessage() {} +func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } +func (*ConfigMapKeySelector) ProtoMessage() {} +func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } +func (*ConfigMapList) ProtoMessage() {} +func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + +func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } +func (*ConfigMapProjection) ProtoMessage() {} +func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } + +func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } +func (*ConfigMapVolumeSource) ProtoMessage() {} +func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } + +func (m *Container) Reset() { *m = Container{} } +func (*Container) ProtoMessage() {} +func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + +func (m *ContainerImage) Reset() { *m = ContainerImage{} } +func (*ContainerImage) ProtoMessage() {} +func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } + +func (m *ContainerPort) Reset() { *m = ContainerPort{} } +func (*ContainerPort) ProtoMessage() {} +func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } + +func (m *ContainerState) Reset() { *m = ContainerState{} } +func (*ContainerState) ProtoMessage() {} +func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } + +func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } +func (*ContainerStateRunning) ProtoMessage() {} +func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } + +func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } +func (*ContainerStateTerminated) ProtoMessage() {} +func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{24} +} + +func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } +func (*ContainerStateWaiting) ProtoMessage() {} +func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + +func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } +func (*ContainerStatus) ProtoMessage() {} +func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } + +func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } +func (*DaemonEndpoint) ProtoMessage() {} +func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } + +func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } +func (*DeleteOptions) ProtoMessage() {} +func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } + +func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } +func (*DownwardAPIProjection) ProtoMessage() {} +func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } + +func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } +func (*DownwardAPIVolumeFile) ProtoMessage() {} +func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } + +func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } +func (*DownwardAPIVolumeSource) ProtoMessage() {} +func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{31} +} + +func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } +func (*EmptyDirVolumeSource) ProtoMessage() {} +func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } + +func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } +func (*EndpointAddress) ProtoMessage() {} +func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } + +func (m *EndpointPort) Reset() { *m = EndpointPort{} } +func (*EndpointPort) ProtoMessage() {} +func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } + +func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } +func (*EndpointSubset) ProtoMessage() {} +func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } + +func (m *Endpoints) Reset() { *m = Endpoints{} } +func (*Endpoints) ProtoMessage() {} +func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } + +func (m *EndpointsList) Reset() { *m = EndpointsList{} } +func (*EndpointsList) ProtoMessage() {} +func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } + +func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } +func (*EnvFromSource) ProtoMessage() {} +func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } + +func (m *EnvVar) Reset() { *m = EnvVar{} } +func (*EnvVar) ProtoMessage() {} +func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } + +func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } +func (*EnvVarSource) ProtoMessage() {} +func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } + +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } + +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } + +func (m *EventSource) Reset() { *m = EventSource{} } +func (*EventSource) ProtoMessage() {} +func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } + +func (m *ExecAction) Reset() { *m = ExecAction{} } +func (*ExecAction) ProtoMessage() {} +func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } + +func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } +func (*FCVolumeSource) ProtoMessage() {} +func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } + +func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } +func (*FlexVolumeSource) ProtoMessage() {} +func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } + +func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } +func (*FlockerVolumeSource) ProtoMessage() {} +func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } + +func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } +func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} +func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{48} +} + +func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } +func (*GitRepoVolumeSource) ProtoMessage() {} +func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } + +func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } +func (*GlusterfsVolumeSource) ProtoMessage() {} +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } + +func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } +func (*HTTPGetAction) ProtoMessage() {} +func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } + +func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } +func (*HTTPHeader) ProtoMessage() {} +func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } + +func (m *Handler) Reset() { *m = Handler{} } +func (*Handler) ProtoMessage() {} +func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } + +func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } +func (*HostPathVolumeSource) ProtoMessage() {} +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } + +func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } +func (*ISCSIVolumeSource) ProtoMessage() {} +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } + +func (m *KeyToPath) Reset() { *m = KeyToPath{} } +func (*KeyToPath) ProtoMessage() {} +func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } + +func (m *Lifecycle) Reset() { *m = Lifecycle{} } +func (*Lifecycle) ProtoMessage() {} +func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } + +func (m *LimitRange) Reset() { *m = LimitRange{} } +func (*LimitRange) ProtoMessage() {} +func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } + +func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } +func (*LimitRangeItem) ProtoMessage() {} +func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } + +func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } +func (*LimitRangeList) ProtoMessage() {} +func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } + +func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } +func (*LimitRangeSpec) ProtoMessage() {} +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } + +func (m *List) Reset() { *m = List{} } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } + +func (m *ListOptions) Reset() { *m = ListOptions{} } +func (*ListOptions) ProtoMessage() {} +func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } + +func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } +func (*LoadBalancerIngress) ProtoMessage() {} +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } + +func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } +func (*LoadBalancerStatus) ProtoMessage() {} +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } + +func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } +func (*LocalObjectReference) ProtoMessage() {} +func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } + +func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } +func (*NFSVolumeSource) ProtoMessage() {} +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } + +func (m *Namespace) Reset() { *m = Namespace{} } +func (*Namespace) ProtoMessage() {} +func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } + +func (m *NamespaceList) Reset() { *m = NamespaceList{} } +func (*NamespaceList) ProtoMessage() {} +func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } + +func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } +func (*NamespaceSpec) ProtoMessage() {} +func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } + +func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } +func (*NamespaceStatus) ProtoMessage() {} +func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } + +func (m *Node) Reset() { *m = Node{} } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } + +func (m *NodeAddress) Reset() { *m = NodeAddress{} } +func (*NodeAddress) ProtoMessage() {} +func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } + +func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } +func (*NodeAffinity) ProtoMessage() {} +func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } + +func (m *NodeCondition) Reset() { *m = NodeCondition{} } +func (*NodeCondition) ProtoMessage() {} +func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } + +func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } +func (*NodeDaemonEndpoints) ProtoMessage() {} +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } + +func (m *NodeList) Reset() { *m = NodeList{} } +func (*NodeList) ProtoMessage() {} +func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } + +func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } +func (*NodeProxyOptions) ProtoMessage() {} +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } + +func (m *NodeResources) Reset() { *m = NodeResources{} } +func (*NodeResources) ProtoMessage() {} +func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } + +func (m *NodeSelector) Reset() { *m = NodeSelector{} } +func (*NodeSelector) ProtoMessage() {} +func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } + +func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } +func (*NodeSelectorRequirement) ProtoMessage() {} +func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{81} +} + +func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } +func (*NodeSelectorTerm) ProtoMessage() {} +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } + +func (m *NodeSpec) Reset() { *m = NodeSpec{} } +func (*NodeSpec) ProtoMessage() {} +func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } + +func (m *NodeStatus) Reset() { *m = NodeStatus{} } +func (*NodeStatus) ProtoMessage() {} +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } + +func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } +func (*NodeSystemInfo) ProtoMessage() {} +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } + +func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } +func (*ObjectFieldSelector) ProtoMessage() {} +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } + +func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } +func (*ObjectMeta) ProtoMessage() {} +func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } + +func (m *ObjectReference) Reset() { *m = ObjectReference{} } +func (*ObjectReference) ProtoMessage() {} +func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } + +func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } +func (*PersistentVolume) ProtoMessage() {} +func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } + +func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } +func (*PersistentVolumeClaim) ProtoMessage() {} +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } + +func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } +func (*PersistentVolumeClaimList) ProtoMessage() {} +func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{91} +} + +func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } +func (*PersistentVolumeClaimSpec) ProtoMessage() {} +func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{92} +} + +func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } +func (*PersistentVolumeClaimStatus) ProtoMessage() {} +func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{93} +} + +func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } +func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} +func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{94} +} + +func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } +func (*PersistentVolumeList) ProtoMessage() {} +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } + +func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } +func (*PersistentVolumeSource) ProtoMessage() {} +func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } + +func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } +func (*PersistentVolumeSpec) ProtoMessage() {} +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } + +func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } +func (*PersistentVolumeStatus) ProtoMessage() {} +func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } + +func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } +func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} +func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{99} +} + +func (m *Pod) Reset() { *m = Pod{} } +func (*Pod) ProtoMessage() {} +func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } + +func (m *PodAffinity) Reset() { *m = PodAffinity{} } +func (*PodAffinity) ProtoMessage() {} +func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{101} } + +func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } +func (*PodAffinityTerm) ProtoMessage() {} +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{102} } + +func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } +func (*PodAntiAffinity) ProtoMessage() {} +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{103} } + +func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } +func (*PodAttachOptions) ProtoMessage() {} +func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{104} } + +func (m *PodCondition) Reset() { *m = PodCondition{} } +func (*PodCondition) ProtoMessage() {} +func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{105} } + +func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } +func (*PodExecOptions) ProtoMessage() {} +func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{106} } + +func (m *PodList) Reset() { *m = PodList{} } +func (*PodList) ProtoMessage() {} +func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{107} } + +func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } +func (*PodLogOptions) ProtoMessage() {} +func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } + +func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } +func (*PodPortForwardOptions) ProtoMessage() {} +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{109} } + +func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } +func (*PodProxyOptions) ProtoMessage() {} +func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } + +func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } +func (*PodSecurityContext) ProtoMessage() {} +func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } + +func (m *PodSignature) Reset() { *m = PodSignature{} } +func (*PodSignature) ProtoMessage() {} +func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } + +func (m *PodSpec) Reset() { *m = PodSpec{} } +func (*PodSpec) ProtoMessage() {} +func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } + +func (m *PodStatus) Reset() { *m = PodStatus{} } +func (*PodStatus) ProtoMessage() {} +func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } + +func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } +func (*PodStatusResult) ProtoMessage() {} +func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } + +func (m *PodTemplate) Reset() { *m = PodTemplate{} } +func (*PodTemplate) ProtoMessage() {} +func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } + +func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } +func (*PodTemplateList) ProtoMessage() {} +func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } + +func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } +func (*PodTemplateSpec) ProtoMessage() {} +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } + +func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } +func (*PortworxVolumeSource) ProtoMessage() {} +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } + +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } + +func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } +func (*PreferAvoidPodsEntry) ProtoMessage() {} +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } + +func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } +func (*PreferredSchedulingTerm) ProtoMessage() {} +func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{122} +} + +func (m *Probe) Reset() { *m = Probe{} } +func (*Probe) ProtoMessage() {} +func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } + +func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } +func (*ProjectedVolumeSource) ProtoMessage() {} +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } + +func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } +func (*QuobyteVolumeSource) ProtoMessage() {} +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } + +func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } +func (*RBDVolumeSource) ProtoMessage() {} +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } + +func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } +func (*RangeAllocation) ProtoMessage() {} +func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } + +func (m *ReplicationController) Reset() { *m = ReplicationController{} } +func (*ReplicationController) ProtoMessage() {} +func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } + +func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } +func (*ReplicationControllerCondition) ProtoMessage() {} +func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{129} +} + +func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } +func (*ReplicationControllerList) ProtoMessage() {} +func (*ReplicationControllerList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{130} +} + +func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } +func (*ReplicationControllerSpec) ProtoMessage() {} +func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{131} +} + +func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } +func (*ReplicationControllerStatus) ProtoMessage() {} +func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{132} +} + +func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } +func (*ResourceFieldSelector) ProtoMessage() {} +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } + +func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } +func (*ResourceQuota) ProtoMessage() {} +func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } + +func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } +func (*ResourceQuotaList) ProtoMessage() {} +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } + +func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } +func (*ResourceQuotaSpec) ProtoMessage() {} +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } + +func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } +func (*ResourceQuotaStatus) ProtoMessage() {} +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } + +func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } +func (*ResourceRequirements) ProtoMessage() {} +func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{138} } + +func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } +func (*SELinuxOptions) ProtoMessage() {} +func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } + +func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } +func (*ScaleIOVolumeSource) ProtoMessage() {} +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } + +func (m *Secret) Reset() { *m = Secret{} } +func (*Secret) ProtoMessage() {} +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } + +func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } +func (*SecretEnvSource) ProtoMessage() {} +func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } + +func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } +func (*SecretKeySelector) ProtoMessage() {} +func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } + +func (m *SecretList) Reset() { *m = SecretList{} } +func (*SecretList) ProtoMessage() {} +func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{144} } + +func (m *SecretProjection) Reset() { *m = SecretProjection{} } +func (*SecretProjection) ProtoMessage() {} +func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{145} } + +func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } +func (*SecretVolumeSource) ProtoMessage() {} +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{146} } + +func (m *SecurityContext) Reset() { *m = SecurityContext{} } +func (*SecurityContext) ProtoMessage() {} +func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{147} } + +func (m *SerializedReference) Reset() { *m = SerializedReference{} } +func (*SerializedReference) ProtoMessage() {} +func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } + +func (m *Service) Reset() { *m = Service{} } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } + +func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } +func (*ServiceAccount) ProtoMessage() {} +func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } + +func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } +func (*ServiceAccountList) ProtoMessage() {} +func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } + +func (m *ServiceList) Reset() { *m = ServiceList{} } +func (*ServiceList) ProtoMessage() {} +func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } + +func (m *ServicePort) Reset() { *m = ServicePort{} } +func (*ServicePort) ProtoMessage() {} +func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } + +func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } +func (*ServiceProxyOptions) ProtoMessage() {} +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } + +func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } +func (*ServiceSpec) ProtoMessage() {} +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } + +func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } +func (*ServiceStatus) ProtoMessage() {} +func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } + +func (m *Sysctl) Reset() { *m = Sysctl{} } +func (*Sysctl) ProtoMessage() {} +func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } + +func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } +func (*TCPSocketAction) ProtoMessage() {} +func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } + +func (m *Taint) Reset() { *m = Taint{} } +func (*Taint) ProtoMessage() {} +func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } + +func (m *Toleration) Reset() { *m = Toleration{} } +func (*Toleration) ProtoMessage() {} +func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } + +func (m *Volume) Reset() { *m = Volume{} } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } + +func (m *VolumeMount) Reset() { *m = VolumeMount{} } +func (*VolumeMount) ProtoMessage() {} +func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } + +func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } +func (*VolumeProjection) ProtoMessage() {} +func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } + +func (m *VolumeSource) Reset() { *m = VolumeSource{} } +func (*VolumeSource) ProtoMessage() {} +func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } + +func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } +func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} +func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{165} +} + +func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } +func (*WeightedPodAffinityTerm) ProtoMessage() {} +func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{166} +} + +func init() { + proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.AWSElasticBlockStoreVolumeSource") + proto.RegisterType((*Affinity)(nil), "k8s.io.client-go.pkg.api.v1.Affinity") + proto.RegisterType((*AttachedVolume)(nil), "k8s.io.client-go.pkg.api.v1.AttachedVolume") + proto.RegisterType((*AvoidPods)(nil), "k8s.io.client-go.pkg.api.v1.AvoidPods") + proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.AzureDiskVolumeSource") + proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.AzureFileVolumeSource") + proto.RegisterType((*Binding)(nil), "k8s.io.client-go.pkg.api.v1.Binding") + proto.RegisterType((*Capabilities)(nil), "k8s.io.client-go.pkg.api.v1.Capabilities") + proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.CephFSVolumeSource") + proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.CinderVolumeSource") + proto.RegisterType((*ComponentCondition)(nil), "k8s.io.client-go.pkg.api.v1.ComponentCondition") + proto.RegisterType((*ComponentStatus)(nil), "k8s.io.client-go.pkg.api.v1.ComponentStatus") + proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.client-go.pkg.api.v1.ComponentStatusList") + proto.RegisterType((*ConfigMap)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMap") + proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapEnvSource") + proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapKeySelector") + proto.RegisterType((*ConfigMapList)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapList") + proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapProjection") + proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapVolumeSource") + proto.RegisterType((*Container)(nil), "k8s.io.client-go.pkg.api.v1.Container") + proto.RegisterType((*ContainerImage)(nil), "k8s.io.client-go.pkg.api.v1.ContainerImage") + proto.RegisterType((*ContainerPort)(nil), "k8s.io.client-go.pkg.api.v1.ContainerPort") + proto.RegisterType((*ContainerState)(nil), "k8s.io.client-go.pkg.api.v1.ContainerState") + proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.client-go.pkg.api.v1.ContainerStateRunning") + proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.client-go.pkg.api.v1.ContainerStateTerminated") + proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.client-go.pkg.api.v1.ContainerStateWaiting") + proto.RegisterType((*ContainerStatus)(nil), "k8s.io.client-go.pkg.api.v1.ContainerStatus") + proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.client-go.pkg.api.v1.DaemonEndpoint") + proto.RegisterType((*DeleteOptions)(nil), "k8s.io.client-go.pkg.api.v1.DeleteOptions") + proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.client-go.pkg.api.v1.DownwardAPIProjection") + proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.client-go.pkg.api.v1.DownwardAPIVolumeFile") + proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.DownwardAPIVolumeSource") + proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.EmptyDirVolumeSource") + proto.RegisterType((*EndpointAddress)(nil), "k8s.io.client-go.pkg.api.v1.EndpointAddress") + proto.RegisterType((*EndpointPort)(nil), "k8s.io.client-go.pkg.api.v1.EndpointPort") + proto.RegisterType((*EndpointSubset)(nil), "k8s.io.client-go.pkg.api.v1.EndpointSubset") + proto.RegisterType((*Endpoints)(nil), "k8s.io.client-go.pkg.api.v1.Endpoints") + proto.RegisterType((*EndpointsList)(nil), "k8s.io.client-go.pkg.api.v1.EndpointsList") + proto.RegisterType((*EnvFromSource)(nil), "k8s.io.client-go.pkg.api.v1.EnvFromSource") + proto.RegisterType((*EnvVar)(nil), "k8s.io.client-go.pkg.api.v1.EnvVar") + proto.RegisterType((*EnvVarSource)(nil), "k8s.io.client-go.pkg.api.v1.EnvVarSource") + proto.RegisterType((*Event)(nil), "k8s.io.client-go.pkg.api.v1.Event") + proto.RegisterType((*EventList)(nil), "k8s.io.client-go.pkg.api.v1.EventList") + proto.RegisterType((*EventSource)(nil), "k8s.io.client-go.pkg.api.v1.EventSource") + proto.RegisterType((*ExecAction)(nil), "k8s.io.client-go.pkg.api.v1.ExecAction") + proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.FCVolumeSource") + proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.FlexVolumeSource") + proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.FlockerVolumeSource") + proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.GCEPersistentDiskVolumeSource") + proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.GitRepoVolumeSource") + proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.GlusterfsVolumeSource") + proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.client-go.pkg.api.v1.HTTPGetAction") + proto.RegisterType((*HTTPHeader)(nil), "k8s.io.client-go.pkg.api.v1.HTTPHeader") + proto.RegisterType((*Handler)(nil), "k8s.io.client-go.pkg.api.v1.Handler") + proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.HostPathVolumeSource") + proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.ISCSIVolumeSource") + proto.RegisterType((*KeyToPath)(nil), "k8s.io.client-go.pkg.api.v1.KeyToPath") + proto.RegisterType((*Lifecycle)(nil), "k8s.io.client-go.pkg.api.v1.Lifecycle") + proto.RegisterType((*LimitRange)(nil), "k8s.io.client-go.pkg.api.v1.LimitRange") + proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.client-go.pkg.api.v1.LimitRangeItem") + proto.RegisterType((*LimitRangeList)(nil), "k8s.io.client-go.pkg.api.v1.LimitRangeList") + proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.client-go.pkg.api.v1.LimitRangeSpec") + proto.RegisterType((*List)(nil), "k8s.io.client-go.pkg.api.v1.List") + proto.RegisterType((*ListOptions)(nil), "k8s.io.client-go.pkg.api.v1.ListOptions") + proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.client-go.pkg.api.v1.LoadBalancerIngress") + proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.client-go.pkg.api.v1.LoadBalancerStatus") + proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.client-go.pkg.api.v1.LocalObjectReference") + proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.NFSVolumeSource") + proto.RegisterType((*Namespace)(nil), "k8s.io.client-go.pkg.api.v1.Namespace") + proto.RegisterType((*NamespaceList)(nil), "k8s.io.client-go.pkg.api.v1.NamespaceList") + proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.client-go.pkg.api.v1.NamespaceSpec") + proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.client-go.pkg.api.v1.NamespaceStatus") + proto.RegisterType((*Node)(nil), "k8s.io.client-go.pkg.api.v1.Node") + proto.RegisterType((*NodeAddress)(nil), "k8s.io.client-go.pkg.api.v1.NodeAddress") + proto.RegisterType((*NodeAffinity)(nil), "k8s.io.client-go.pkg.api.v1.NodeAffinity") + proto.RegisterType((*NodeCondition)(nil), "k8s.io.client-go.pkg.api.v1.NodeCondition") + proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.client-go.pkg.api.v1.NodeDaemonEndpoints") + proto.RegisterType((*NodeList)(nil), "k8s.io.client-go.pkg.api.v1.NodeList") + proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.client-go.pkg.api.v1.NodeProxyOptions") + proto.RegisterType((*NodeResources)(nil), "k8s.io.client-go.pkg.api.v1.NodeResources") + proto.RegisterType((*NodeSelector)(nil), "k8s.io.client-go.pkg.api.v1.NodeSelector") + proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.client-go.pkg.api.v1.NodeSelectorRequirement") + proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.client-go.pkg.api.v1.NodeSelectorTerm") + proto.RegisterType((*NodeSpec)(nil), "k8s.io.client-go.pkg.api.v1.NodeSpec") + proto.RegisterType((*NodeStatus)(nil), "k8s.io.client-go.pkg.api.v1.NodeStatus") + proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.client-go.pkg.api.v1.NodeSystemInfo") + proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.client-go.pkg.api.v1.ObjectFieldSelector") + proto.RegisterType((*ObjectMeta)(nil), "k8s.io.client-go.pkg.api.v1.ObjectMeta") + proto.RegisterType((*ObjectReference)(nil), "k8s.io.client-go.pkg.api.v1.ObjectReference") + proto.RegisterType((*PersistentVolume)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolume") + proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaim") + proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaimList") + proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaimSpec") + proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaimStatus") + proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaimVolumeSource") + proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeList") + proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeSource") + proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeSpec") + proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeStatus") + proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.PhotonPersistentDiskVolumeSource") + proto.RegisterType((*Pod)(nil), "k8s.io.client-go.pkg.api.v1.Pod") + proto.RegisterType((*PodAffinity)(nil), "k8s.io.client-go.pkg.api.v1.PodAffinity") + proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.client-go.pkg.api.v1.PodAffinityTerm") + proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.client-go.pkg.api.v1.PodAntiAffinity") + proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodAttachOptions") + proto.RegisterType((*PodCondition)(nil), "k8s.io.client-go.pkg.api.v1.PodCondition") + proto.RegisterType((*PodExecOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodExecOptions") + proto.RegisterType((*PodList)(nil), "k8s.io.client-go.pkg.api.v1.PodList") + proto.RegisterType((*PodLogOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodLogOptions") + proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodPortForwardOptions") + proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodProxyOptions") + proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.client-go.pkg.api.v1.PodSecurityContext") + proto.RegisterType((*PodSignature)(nil), "k8s.io.client-go.pkg.api.v1.PodSignature") + proto.RegisterType((*PodSpec)(nil), "k8s.io.client-go.pkg.api.v1.PodSpec") + proto.RegisterType((*PodStatus)(nil), "k8s.io.client-go.pkg.api.v1.PodStatus") + proto.RegisterType((*PodStatusResult)(nil), "k8s.io.client-go.pkg.api.v1.PodStatusResult") + proto.RegisterType((*PodTemplate)(nil), "k8s.io.client-go.pkg.api.v1.PodTemplate") + proto.RegisterType((*PodTemplateList)(nil), "k8s.io.client-go.pkg.api.v1.PodTemplateList") + proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.client-go.pkg.api.v1.PodTemplateSpec") + proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.PortworxVolumeSource") + proto.RegisterType((*Preconditions)(nil), "k8s.io.client-go.pkg.api.v1.Preconditions") + proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.client-go.pkg.api.v1.PreferAvoidPodsEntry") + proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.client-go.pkg.api.v1.PreferredSchedulingTerm") + proto.RegisterType((*Probe)(nil), "k8s.io.client-go.pkg.api.v1.Probe") + proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.ProjectedVolumeSource") + proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.QuobyteVolumeSource") + proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.RBDVolumeSource") + proto.RegisterType((*RangeAllocation)(nil), "k8s.io.client-go.pkg.api.v1.RangeAllocation") + proto.RegisterType((*ReplicationController)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationController") + proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationControllerCondition") + proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationControllerList") + proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationControllerSpec") + proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationControllerStatus") + proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.client-go.pkg.api.v1.ResourceFieldSelector") + proto.RegisterType((*ResourceQuota)(nil), "k8s.io.client-go.pkg.api.v1.ResourceQuota") + proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.client-go.pkg.api.v1.ResourceQuotaList") + proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.client-go.pkg.api.v1.ResourceQuotaSpec") + proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.client-go.pkg.api.v1.ResourceQuotaStatus") + proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.client-go.pkg.api.v1.ResourceRequirements") + proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.client-go.pkg.api.v1.SELinuxOptions") + proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.ScaleIOVolumeSource") + proto.RegisterType((*Secret)(nil), "k8s.io.client-go.pkg.api.v1.Secret") + proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.client-go.pkg.api.v1.SecretEnvSource") + proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.client-go.pkg.api.v1.SecretKeySelector") + proto.RegisterType((*SecretList)(nil), "k8s.io.client-go.pkg.api.v1.SecretList") + proto.RegisterType((*SecretProjection)(nil), "k8s.io.client-go.pkg.api.v1.SecretProjection") + proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.SecretVolumeSource") + proto.RegisterType((*SecurityContext)(nil), "k8s.io.client-go.pkg.api.v1.SecurityContext") + proto.RegisterType((*SerializedReference)(nil), "k8s.io.client-go.pkg.api.v1.SerializedReference") + proto.RegisterType((*Service)(nil), "k8s.io.client-go.pkg.api.v1.Service") + proto.RegisterType((*ServiceAccount)(nil), "k8s.io.client-go.pkg.api.v1.ServiceAccount") + proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.client-go.pkg.api.v1.ServiceAccountList") + proto.RegisterType((*ServiceList)(nil), "k8s.io.client-go.pkg.api.v1.ServiceList") + proto.RegisterType((*ServicePort)(nil), "k8s.io.client-go.pkg.api.v1.ServicePort") + proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.client-go.pkg.api.v1.ServiceProxyOptions") + proto.RegisterType((*ServiceSpec)(nil), "k8s.io.client-go.pkg.api.v1.ServiceSpec") + proto.RegisterType((*ServiceStatus)(nil), "k8s.io.client-go.pkg.api.v1.ServiceStatus") + proto.RegisterType((*Sysctl)(nil), "k8s.io.client-go.pkg.api.v1.Sysctl") + proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.client-go.pkg.api.v1.TCPSocketAction") + proto.RegisterType((*Taint)(nil), "k8s.io.client-go.pkg.api.v1.Taint") + proto.RegisterType((*Toleration)(nil), "k8s.io.client-go.pkg.api.v1.Toleration") + proto.RegisterType((*Volume)(nil), "k8s.io.client-go.pkg.api.v1.Volume") + proto.RegisterType((*VolumeMount)(nil), "k8s.io.client-go.pkg.api.v1.VolumeMount") + proto.RegisterType((*VolumeProjection)(nil), "k8s.io.client-go.pkg.api.v1.VolumeProjection") + proto.RegisterType((*VolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.VolumeSource") + proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.VsphereVirtualDiskVolumeSource") + proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.client-go.pkg.api.v1.WeightedPodAffinityTerm") +} +func (m *AWSElasticBlockStoreVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.VolumeID))) + i += copy(data[i:], m.VolumeID) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Partition)) + data[i] = 0x20 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *Affinity) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Affinity) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NodeAffinity != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.NodeAffinity.Size())) + n1, err := m.NodeAffinity.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.PodAffinity != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodAffinity.Size())) + n2, err := m.PodAffinity.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.PodAntiAffinity != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodAntiAffinity.Size())) + n3, err := m.PodAntiAffinity.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *AttachedVolume) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *AttachedVolume) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DevicePath))) + i += copy(data[i:], m.DevicePath) + return i, nil +} + +func (m *AvoidPods) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *AvoidPods) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.PreferAvoidPods) > 0 { + for _, msg := range m.PreferAvoidPods { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *AzureDiskVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *AzureDiskVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DiskName))) + i += copy(data[i:], m.DiskName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DataDiskURI))) + i += copy(data[i:], m.DataDiskURI) + if m.CachingMode != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.CachingMode))) + i += copy(data[i:], *m.CachingMode) + } + if m.FSType != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.FSType))) + i += copy(data[i:], *m.FSType) + } + if m.ReadOnly != nil { + data[i] = 0x28 + i++ + if *m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *AzureFileVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *AzureFileVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SecretName))) + i += copy(data[i:], m.SecretName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ShareName))) + i += copy(data[i:], m.ShareName) + data[i] = 0x18 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *Binding) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Binding) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) + n5, err := m.Target.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *Capabilities) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Capabilities) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Add) > 0 { + for _, s := range m.Add { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Drop) > 0 { + for _, s := range m.Drop { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *CephFSVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CephFSVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.User))) + i += copy(data[i:], m.User) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SecretFile))) + i += copy(data[i:], m.SecretFile) + if m.SecretRef != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) + n6, err := m.SecretRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + } + data[i] = 0x30 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *CinderVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CinderVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.VolumeID))) + i += copy(data[i:], m.VolumeID) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x18 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *ComponentCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ComponentCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Error))) + i += copy(data[i:], m.Error) + return i, nil +} + +func (m *ComponentStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ComponentStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ComponentStatusList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ComponentStatusList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n8, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ConfigMap) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConfigMap) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + if len(m.Data) > 0 { + for k := range m.Data { + data[i] = 0x12 + i++ + v := m.Data[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + return i, nil +} + +func (m *ConfigMapEnvSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConfigMapEnvSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n10, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + if m.Optional != nil { + data[i] = 0x10 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ConfigMapKeySelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConfigMapKeySelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n11, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + if m.Optional != nil { + data[i] = 0x18 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ConfigMapList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConfigMapList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n12, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ConfigMapProjection) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConfigMapProjection) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n13, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Optional != nil { + data[i] = 0x20 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ConfigMapVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConfigMapVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n14, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.DefaultMode != nil { + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.DefaultMode)) + } + if m.Optional != nil { + data[i] = 0x20 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *Container) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Container) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Image))) + i += copy(data[i:], m.Image) + if len(m.Command) > 0 { + for _, s := range m.Command { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.WorkingDir))) + i += copy(data[i:], m.WorkingDir) + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Env) > 0 { + for _, msg := range m.Env { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Resources.Size())) + n15, err := m.Resources.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n15 + if len(m.VolumeMounts) > 0 { + for _, msg := range m.VolumeMounts { + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.LivenessProbe != nil { + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LivenessProbe.Size())) + n16, err := m.LivenessProbe.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.ReadinessProbe != nil { + data[i] = 0x5a + i++ + i = encodeVarintGenerated(data, i, uint64(m.ReadinessProbe.Size())) + n17, err := m.ReadinessProbe.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.Lifecycle != nil { + data[i] = 0x62 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Lifecycle.Size())) + n18, err := m.Lifecycle.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n18 + } + data[i] = 0x6a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.TerminationMessagePath))) + i += copy(data[i:], m.TerminationMessagePath) + data[i] = 0x72 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ImagePullPolicy))) + i += copy(data[i:], m.ImagePullPolicy) + if m.SecurityContext != nil { + data[i] = 0x7a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecurityContext.Size())) + n19, err := m.SecurityContext.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n19 + } + data[i] = 0x80 + i++ + data[i] = 0x1 + i++ + if m.Stdin { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x88 + i++ + data[i] = 0x1 + i++ + if m.StdinOnce { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x90 + i++ + data[i] = 0x1 + i++ + if m.TTY { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if len(m.EnvFrom) > 0 { + for _, msg := range m.EnvFrom { + data[i] = 0x9a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0xa2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.TerminationMessagePolicy))) + i += copy(data[i:], m.TerminationMessagePolicy) + return i, nil +} + +func (m *ContainerImage) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerImage) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SizeBytes)) + return i, nil +} + +func (m *ContainerPort) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerPort) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.HostPort)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ContainerPort)) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Protocol))) + i += copy(data[i:], m.Protocol) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.HostIP))) + i += copy(data[i:], m.HostIP) + return i, nil +} + +func (m *ContainerState) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerState) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Waiting != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Waiting.Size())) + n20, err := m.Waiting.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if m.Running != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Running.Size())) + n21, err := m.Running.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.Terminated != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Terminated.Size())) + n22, err := m.Terminated.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n22 + } + return i, nil +} + +func (m *ContainerStateRunning) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerStateRunning) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.StartedAt.Size())) + n23, err := m.StartedAt.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n23 + return i, nil +} + +func (m *ContainerStateTerminated) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerStateTerminated) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ExitCode)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Signal)) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.StartedAt.Size())) + n24, err := m.StartedAt.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n24 + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FinishedAt.Size())) + n25, err := m.FinishedAt.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n25 + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContainerID))) + i += copy(data[i:], m.ContainerID) + return i, nil +} + +func (m *ContainerStateWaiting) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerStateWaiting) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *ContainerStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.State.Size())) + n26, err := m.State.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n26 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTerminationState.Size())) + n27, err := m.LastTerminationState.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n27 + data[i] = 0x20 + i++ + if m.Ready { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RestartCount)) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Image))) + i += copy(data[i:], m.Image) + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ImageID))) + i += copy(data[i:], m.ImageID) + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContainerID))) + i += copy(data[i:], m.ContainerID) + return i, nil +} + +func (m *DaemonEndpoint) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DaemonEndpoint) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Port)) + return i, nil +} + +func (m *DeleteOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeleteOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.GracePeriodSeconds != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.GracePeriodSeconds)) + } + if m.Preconditions != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Preconditions.Size())) + n28, err := m.Preconditions.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n28 + } + if m.OrphanDependents != nil { + data[i] = 0x18 + i++ + if *m.OrphanDependents { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if m.PropagationPolicy != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.PropagationPolicy))) + i += copy(data[i:], *m.PropagationPolicy) + } + return i, nil +} + +func (m *DownwardAPIProjection) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DownwardAPIProjection) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DownwardAPIVolumeFile) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DownwardAPIVolumeFile) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + if m.FieldRef != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FieldRef.Size())) + n29, err := m.FieldRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n29 + } + if m.ResourceFieldRef != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.ResourceFieldRef.Size())) + n30, err := m.ResourceFieldRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n30 + } + if m.Mode != nil { + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Mode)) + } + return i, nil +} + +func (m *DownwardAPIVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DownwardAPIVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.DefaultMode != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.DefaultMode)) + } + return i, nil +} + +func (m *EmptyDirVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EmptyDirVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Medium))) + i += copy(data[i:], m.Medium) + return i, nil +} + +func (m *EndpointAddress) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EndpointAddress) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.IP))) + i += copy(data[i:], m.IP) + if m.TargetRef != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetRef.Size())) + n31, err := m.TargetRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n31 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Hostname))) + i += copy(data[i:], m.Hostname) + if m.NodeName != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.NodeName))) + i += copy(data[i:], *m.NodeName) + } + return i, nil +} + +func (m *EndpointPort) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EndpointPort) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Port)) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Protocol))) + i += copy(data[i:], m.Protocol) + return i, nil +} + +func (m *EndpointSubset) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EndpointSubset) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Addresses) > 0 { + for _, msg := range m.Addresses { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.NotReadyAddresses) > 0 { + for _, msg := range m.NotReadyAddresses { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Endpoints) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Endpoints) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n32, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n32 + if len(m.Subsets) > 0 { + for _, msg := range m.Subsets { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EndpointsList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EndpointsList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n33, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n33 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EnvFromSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EnvFromSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Prefix))) + i += copy(data[i:], m.Prefix) + if m.ConfigMapRef != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ConfigMapRef.Size())) + n34, err := m.ConfigMapRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n34 + } + if m.SecretRef != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) + n35, err := m.SecretRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n35 + } + return i, nil +} + +func (m *EnvVar) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EnvVar) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Value))) + i += copy(data[i:], m.Value) + if m.ValueFrom != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.ValueFrom.Size())) + n36, err := m.ValueFrom.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n36 + } + return i, nil +} + +func (m *EnvVarSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EnvVarSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.FieldRef != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.FieldRef.Size())) + n37, err := m.FieldRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n37 + } + if m.ResourceFieldRef != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ResourceFieldRef.Size())) + n38, err := m.ResourceFieldRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n38 + } + if m.ConfigMapKeyRef != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.ConfigMapKeyRef.Size())) + n39, err := m.ConfigMapKeyRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n39 + } + if m.SecretKeyRef != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretKeyRef.Size())) + n40, err := m.SecretKeyRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n40 + } + return i, nil +} + +func (m *Event) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Event) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n41, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n41 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.InvolvedObject.Size())) + n42, err := m.InvolvedObject.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n42 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Source.Size())) + n43, err := m.Source.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n43 + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FirstTimestamp.Size())) + n44, err := m.FirstTimestamp.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n44 + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTimestamp.Size())) + n45, err := m.LastTimestamp.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n45 + data[i] = 0x40 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Count)) + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + return i, nil +} + +func (m *EventList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EventList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n46, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n46 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EventSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EventSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Component))) + i += copy(data[i:], m.Component) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Host))) + i += copy(data[i:], m.Host) + return i, nil +} + +func (m *ExecAction) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ExecAction) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *FCVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *FCVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TargetWWNs) > 0 { + for _, s := range m.TargetWWNs { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if m.Lun != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Lun)) + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x20 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *FlexVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *FlexVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Driver))) + i += copy(data[i:], m.Driver) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + if m.SecretRef != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) + n47, err := m.SecretRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n47 + } + data[i] = 0x20 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if len(m.Options) > 0 { + for k := range m.Options { + data[i] = 0x2a + i++ + v := m.Options[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + return i, nil +} + +func (m *FlockerVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *FlockerVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DatasetName))) + i += copy(data[i:], m.DatasetName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DatasetUUID))) + i += copy(data[i:], m.DatasetUUID) + return i, nil +} + +func (m *GCEPersistentDiskVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GCEPersistentDiskVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.PDName))) + i += copy(data[i:], m.PDName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Partition)) + data[i] = 0x20 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *GitRepoVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GitRepoVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Repository))) + i += copy(data[i:], m.Repository) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Revision))) + i += copy(data[i:], m.Revision) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Directory))) + i += copy(data[i:], m.Directory) + return i, nil +} + +func (m *GlusterfsVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GlusterfsVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.EndpointsName))) + i += copy(data[i:], m.EndpointsName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x18 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *HTTPGetAction) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HTTPGetAction) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) + n48, err := m.Port.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n48 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Host))) + i += copy(data[i:], m.Host) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Scheme))) + i += copy(data[i:], m.Scheme) + if len(m.HTTPHeaders) > 0 { + for _, msg := range m.HTTPHeaders { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HTTPHeader) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HTTPHeader) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Value))) + i += copy(data[i:], m.Value) + return i, nil +} + +func (m *Handler) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Handler) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Exec != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Exec.Size())) + n49, err := m.Exec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n49 + } + if m.HTTPGet != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.HTTPGet.Size())) + n50, err := m.HTTPGet.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n50 + } + if m.TCPSocket != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.TCPSocket.Size())) + n51, err := m.TCPSocket.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n51 + } + return i, nil +} + +func (m *HostPathVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HostPathVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + return i, nil +} + +func (m *ISCSIVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ISCSIVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.TargetPortal))) + i += copy(data[i:], m.TargetPortal) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.IQN))) + i += copy(data[i:], m.IQN) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Lun)) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ISCSIInterface))) + i += copy(data[i:], m.ISCSIInterface) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x30 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if len(m.Portals) > 0 { + for _, s := range m.Portals { + data[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *KeyToPath) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *KeyToPath) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + if m.Mode != nil { + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Mode)) + } + return i, nil +} + +func (m *Lifecycle) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Lifecycle) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PostStart != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PostStart.Size())) + n52, err := m.PostStart.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n52 + } + if m.PreStop != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PreStop.Size())) + n53, err := m.PreStop.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n53 + } + return i, nil +} + +func (m *LimitRange) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LimitRange) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n54, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n54 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n55, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n55 + return i, nil +} + +func (m *LimitRangeItem) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LimitRangeItem) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if len(m.Max) > 0 { + for k := range m.Max { + data[i] = 0x12 + i++ + v := m.Max[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n56, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n56 + } + } + if len(m.Min) > 0 { + for k := range m.Min { + data[i] = 0x1a + i++ + v := m.Min[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n57, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n57 + } + } + if len(m.Default) > 0 { + for k := range m.Default { + data[i] = 0x22 + i++ + v := m.Default[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n58, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n58 + } + } + if len(m.DefaultRequest) > 0 { + for k := range m.DefaultRequest { + data[i] = 0x2a + i++ + v := m.DefaultRequest[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n59, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n59 + } + } + if len(m.MaxLimitRequestRatio) > 0 { + for k := range m.MaxLimitRequestRatio { + data[i] = 0x32 + i++ + v := m.MaxLimitRequestRatio[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n60, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n60 + } + } + return i, nil +} + +func (m *LimitRangeList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LimitRangeList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n61, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n61 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LimitRangeSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LimitRangeSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Limits) > 0 { + for _, msg := range m.Limits { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *List) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *List) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n62, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n62 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ListOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ListOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.LabelSelector))) + i += copy(data[i:], m.LabelSelector) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FieldSelector))) + i += copy(data[i:], m.FieldSelector) + data[i] = 0x18 + i++ + if m.Watch { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) + i += copy(data[i:], m.ResourceVersion) + if m.TimeoutSeconds != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TimeoutSeconds)) + } + return i, nil +} + +func (m *LoadBalancerIngress) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LoadBalancerIngress) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.IP))) + i += copy(data[i:], m.IP) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Hostname))) + i += copy(data[i:], m.Hostname) + return i, nil +} + +func (m *LoadBalancerStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LoadBalancerStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ingress) > 0 { + for _, msg := range m.Ingress { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LocalObjectReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LocalObjectReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + return i, nil +} + +func (m *NFSVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NFSVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Server))) + i += copy(data[i:], m.Server) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x18 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *Namespace) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Namespace) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n63, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n63 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n64, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n64 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n65, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n65 + return i, nil +} + +func (m *NamespaceList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NamespaceList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n66, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n66 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NamespaceSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NamespaceSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *NamespaceStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NamespaceStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) + i += copy(data[i:], m.Phase) + return i, nil +} + +func (m *Node) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Node) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n67, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n67 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n68, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n68 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n69, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n69 + return i, nil +} + +func (m *NodeAddress) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeAddress) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Address))) + i += copy(data[i:], m.Address) + return i, nil +} + +func (m *NodeAffinity) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeAffinity) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) + n70, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n70 + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastHeartbeatTime.Size())) + n71, err := m.LastHeartbeatTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n71 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n72, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n72 + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *NodeDaemonEndpoints) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeDaemonEndpoints) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.KubeletEndpoint.Size())) + n73, err := m.KubeletEndpoint.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n73 + return i, nil +} + +func (m *NodeList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n74, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n74 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeProxyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeProxyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + return i, nil +} + +func (m *NodeResources) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeResources) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + for k := range m.Capacity { + data[i] = 0xa + i++ + v := m.Capacity[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n75, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n75 + } + } + return i, nil +} + +func (m *NodeSelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeSelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeSelectorTerms) > 0 { + for _, msg := range m.NodeSelectorTerms { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeSelectorRequirement) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeSelectorRequirement) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) + i += copy(data[i:], m.Operator) + if len(m.Values) > 0 { + for _, s := range m.Values { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *NodeSelectorTerm) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeSelectorTerm) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, msg := range m.MatchExpressions { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.PodCIDR))) + i += copy(data[i:], m.PodCIDR) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ExternalID))) + i += copy(data[i:], m.ExternalID) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ProviderID))) + i += copy(data[i:], m.ProviderID) + data[i] = 0x20 + i++ + if m.Unschedulable { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if len(m.Taints) > 0 { + for _, msg := range m.Taints { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + for k := range m.Capacity { + data[i] = 0xa + i++ + v := m.Capacity[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n76, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n76 + } + } + if len(m.Allocatable) > 0 { + for k := range m.Allocatable { + data[i] = 0x12 + i++ + v := m.Allocatable[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n77, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n77 + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) + i += copy(data[i:], m.Phase) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Addresses) > 0 { + for _, msg := range m.Addresses { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DaemonEndpoints.Size())) + n78, err := m.DaemonEndpoints.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n78 + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.NodeInfo.Size())) + n79, err := m.NodeInfo.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n79 + if len(m.Images) > 0 { + for _, msg := range m.Images { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.VolumesInUse) > 0 { + for _, s := range m.VolumesInUse { + data[i] = 0x4a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.VolumesAttached) > 0 { + for _, msg := range m.VolumesAttached { + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeSystemInfo) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeSystemInfo) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MachineID))) + i += copy(data[i:], m.MachineID) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SystemUUID))) + i += copy(data[i:], m.SystemUUID) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.BootID))) + i += copy(data[i:], m.BootID) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.KernelVersion))) + i += copy(data[i:], m.KernelVersion) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.OSImage))) + i += copy(data[i:], m.OSImage) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContainerRuntimeVersion))) + i += copy(data[i:], m.ContainerRuntimeVersion) + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.KubeletVersion))) + i += copy(data[i:], m.KubeletVersion) + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.KubeProxyVersion))) + i += copy(data[i:], m.KubeProxyVersion) + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.OperatingSystem))) + i += copy(data[i:], m.OperatingSystem) + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Architecture))) + i += copy(data[i:], m.Architecture) + return i, nil +} + +func (m *ObjectFieldSelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectFieldSelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FieldPath))) + i += copy(data[i:], m.FieldPath) + return i, nil +} + +func (m *ObjectMeta) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectMeta) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.GenerateName))) + i += copy(data[i:], m.GenerateName) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SelfLink))) + i += copy(data[i:], m.SelfLink) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.UID))) + i += copy(data[i:], m.UID) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) + i += copy(data[i:], m.ResourceVersion) + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Generation)) + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CreationTimestamp.Size())) + n80, err := m.CreationTimestamp.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n80 + if m.DeletionTimestamp != nil { + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(m.DeletionTimestamp.Size())) + n81, err := m.DeletionTimestamp.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n81 + } + if m.DeletionGracePeriodSeconds != nil { + data[i] = 0x50 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.DeletionGracePeriodSeconds)) + } + if len(m.Labels) > 0 { + for k := range m.Labels { + data[i] = 0x5a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if len(m.Annotations) > 0 { + for k := range m.Annotations { + data[i] = 0x62 + i++ + v := m.Annotations[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if len(m.OwnerReferences) > 0 { + for _, msg := range m.OwnerReferences { + data[i] = 0x6a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + data[i] = 0x72 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x7a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ClusterName))) + i += copy(data[i:], m.ClusterName) + return i, nil +} + +func (m *ObjectReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.UID))) + i += copy(data[i:], m.UID) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) + i += copy(data[i:], m.ResourceVersion) + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FieldPath))) + i += copy(data[i:], m.FieldPath) + return i, nil +} + +func (m *PersistentVolume) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolume) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n82, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n82 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n83, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n83 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n84, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n84 + return i, nil +} + +func (m *PersistentVolumeClaim) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeClaim) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n85, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n85 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n86, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n86 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n87, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n87 + return i, nil +} + +func (m *PersistentVolumeClaimList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeClaimList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n88, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n88 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PersistentVolumeClaimSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeClaimSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Resources.Size())) + n89, err := m.Resources.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n89 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.VolumeName))) + i += copy(data[i:], m.VolumeName) + if m.Selector != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n90, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n90 + } + if m.StorageClassName != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.StorageClassName))) + i += copy(data[i:], *m.StorageClassName) + } + return i, nil +} + +func (m *PersistentVolumeClaimStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeClaimStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) + i += copy(data[i:], m.Phase) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Capacity) > 0 { + for k := range m.Capacity { + data[i] = 0x1a + i++ + v := m.Capacity[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n91, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n91 + } + } + return i, nil +} + +func (m *PersistentVolumeClaimVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeClaimVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ClaimName))) + i += copy(data[i:], m.ClaimName) + data[i] = 0x10 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *PersistentVolumeList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n92, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n92 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PersistentVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.GCEPersistentDisk != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.GCEPersistentDisk.Size())) + n93, err := m.GCEPersistentDisk.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n93 + } + if m.AWSElasticBlockStore != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AWSElasticBlockStore.Size())) + n94, err := m.AWSElasticBlockStore.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n94 + } + if m.HostPath != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.HostPath.Size())) + n95, err := m.HostPath.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n95 + } + if m.Glusterfs != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Glusterfs.Size())) + n96, err := m.Glusterfs.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n96 + } + if m.NFS != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.NFS.Size())) + n97, err := m.NFS.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n97 + } + if m.RBD != nil { + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RBD.Size())) + n98, err := m.RBD.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n98 + } + if m.ISCSI != nil { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.ISCSI.Size())) + n99, err := m.ISCSI.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n99 + } + if m.Cinder != nil { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Cinder.Size())) + n100, err := m.Cinder.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n100 + } + if m.CephFS != nil { + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CephFS.Size())) + n101, err := m.CephFS.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n101 + } + if m.FC != nil { + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FC.Size())) + n102, err := m.FC.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n102 + } + if m.Flocker != nil { + data[i] = 0x5a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Flocker.Size())) + n103, err := m.Flocker.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n103 + } + if m.FlexVolume != nil { + data[i] = 0x62 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FlexVolume.Size())) + n104, err := m.FlexVolume.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n104 + } + if m.AzureFile != nil { + data[i] = 0x6a + i++ + i = encodeVarintGenerated(data, i, uint64(m.AzureFile.Size())) + n105, err := m.AzureFile.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n105 + } + if m.VsphereVolume != nil { + data[i] = 0x72 + i++ + i = encodeVarintGenerated(data, i, uint64(m.VsphereVolume.Size())) + n106, err := m.VsphereVolume.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n106 + } + if m.Quobyte != nil { + data[i] = 0x7a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Quobyte.Size())) + n107, err := m.Quobyte.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n107 + } + if m.AzureDisk != nil { + data[i] = 0x82 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AzureDisk.Size())) + n108, err := m.AzureDisk.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n108 + } + if m.PhotonPersistentDisk != nil { + data[i] = 0x8a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PhotonPersistentDisk.Size())) + n109, err := m.PhotonPersistentDisk.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n109 + } + if m.PortworxVolume != nil { + data[i] = 0x92 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PortworxVolume.Size())) + n110, err := m.PortworxVolume.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n110 + } + if m.ScaleIO != nil { + data[i] = 0x9a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ScaleIO.Size())) + n111, err := m.ScaleIO.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n111 + } + return i, nil +} + +func (m *PersistentVolumeSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + for k := range m.Capacity { + data[i] = 0xa + i++ + v := m.Capacity[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n112, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n112 + } + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PersistentVolumeSource.Size())) + n113, err := m.PersistentVolumeSource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n113 + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if m.ClaimRef != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ClaimRef.Size())) + n114, err := m.ClaimRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n114 + } + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.PersistentVolumeReclaimPolicy))) + i += copy(data[i:], m.PersistentVolumeReclaimPolicy) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.StorageClassName))) + i += copy(data[i:], m.StorageClassName) + return i, nil +} + +func (m *PersistentVolumeStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) + i += copy(data[i:], m.Phase) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + return i, nil +} + +func (m *PhotonPersistentDiskVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PhotonPersistentDiskVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.PdID))) + i += copy(data[i:], m.PdID) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + return i, nil +} + +func (m *Pod) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Pod) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n115, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n115 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n116, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n116 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n117, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n117 + return i, nil +} + +func (m *PodAffinity) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodAffinity) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodAffinityTerm) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodAffinityTerm) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.LabelSelector != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LabelSelector.Size())) + n118, err := m.LabelSelector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n118 + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.TopologyKey))) + i += copy(data[i:], m.TopologyKey) + return i, nil +} + +func (m *PodAntiAffinity) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodAntiAffinity) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodAttachOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodAttachOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Stdin { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x10 + i++ + if m.Stdout { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x18 + i++ + if m.Stderr { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x20 + i++ + if m.TTY { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Container))) + i += copy(data[i:], m.Container) + return i, nil +} + +func (m *PodCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) + n119, err := m.LastProbeTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n119 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n120, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n120 + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *PodExecOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodExecOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Stdin { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x10 + i++ + if m.Stdout { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x18 + i++ + if m.Stderr { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x20 + i++ + if m.TTY { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Container))) + i += copy(data[i:], m.Container) + if len(m.Command) > 0 { + for _, s := range m.Command { + data[i] = 0x32 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *PodList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n121, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n121 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodLogOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodLogOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Container))) + i += copy(data[i:], m.Container) + data[i] = 0x10 + i++ + if m.Follow { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x18 + i++ + if m.Previous { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if m.SinceSeconds != nil { + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SinceTime.Size())) + n122, err := m.SinceTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n122 + } + data[i] = 0x30 + i++ + if m.Timestamps { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if m.TailLines != nil { + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + data[i] = 0x40 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.LimitBytes)) + } + return i, nil +} + +func (m *PodPortForwardOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodPortForwardOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, num := range m.Ports { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(num)) + } + } + return i, nil +} + +func (m *PodProxyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodProxyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + return i, nil +} + +func (m *PodSecurityContext) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodSecurityContext) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.SELinuxOptions != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) + n123, err := m.SELinuxOptions.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n123 + } + if m.RunAsUser != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + data[i] = 0x18 + i++ + if *m.RunAsNonRoot { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if len(m.SupplementalGroups) > 0 { + for _, num := range m.SupplementalGroups { + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(num)) + } + } + if m.FSGroup != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.FSGroup)) + } + return i, nil +} + +func (m *PodSignature) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodSignature) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PodController != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodController.Size())) + n124, err := m.PodController.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n124 + } + return i, nil +} + +func (m *PodSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Volumes) > 0 { + for _, msg := range m.Volumes { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Containers) > 0 { + for _, msg := range m.Containers { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.RestartPolicy))) + i += copy(data[i:], m.RestartPolicy) + if m.TerminationGracePeriodSeconds != nil { + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TerminationGracePeriodSeconds)) + } + if m.ActiveDeadlineSeconds != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds)) + } + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DNSPolicy))) + i += copy(data[i:], m.DNSPolicy) + if len(m.NodeSelector) > 0 { + for k := range m.NodeSelector { + data[i] = 0x3a + i++ + v := m.NodeSelector[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ServiceAccountName))) + i += copy(data[i:], m.ServiceAccountName) + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DeprecatedServiceAccount))) + i += copy(data[i:], m.DeprecatedServiceAccount) + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.NodeName))) + i += copy(data[i:], m.NodeName) + data[i] = 0x58 + i++ + if m.HostNetwork { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x60 + i++ + if m.HostPID { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x68 + i++ + if m.HostIPC { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if m.SecurityContext != nil { + data[i] = 0x72 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecurityContext.Size())) + n125, err := m.SecurityContext.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n125 + } + if len(m.ImagePullSecrets) > 0 { + for _, msg := range m.ImagePullSecrets { + data[i] = 0x7a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x82 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Hostname))) + i += copy(data[i:], m.Hostname) + data[i] = 0x8a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Subdomain))) + i += copy(data[i:], m.Subdomain) + if m.Affinity != nil { + data[i] = 0x92 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Affinity.Size())) + n126, err := m.Affinity.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n126 + } + data[i] = 0x9a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SchedulerName))) + i += copy(data[i:], m.SchedulerName) + if len(m.InitContainers) > 0 { + for _, msg := range m.InitContainers { + data[i] = 0xa2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.AutomountServiceAccountToken != nil { + data[i] = 0xa8 + i++ + data[i] = 0x1 + i++ + if *m.AutomountServiceAccountToken { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if len(m.Tolerations) > 0 { + for _, msg := range m.Tolerations { + data[i] = 0xb2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) + i += copy(data[i:], m.Phase) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.HostIP))) + i += copy(data[i:], m.HostIP) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.PodIP))) + i += copy(data[i:], m.PodIP) + if m.StartTime != nil { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) + n127, err := m.StartTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n127 + } + if len(m.ContainerStatuses) > 0 { + for _, msg := range m.ContainerStatuses { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.QOSClass))) + i += copy(data[i:], m.QOSClass) + if len(m.InitContainerStatuses) > 0 { + for _, msg := range m.InitContainerStatuses { + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodStatusResult) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodStatusResult) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n128, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n128 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n129, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n129 + return i, nil +} + +func (m *PodTemplate) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodTemplate) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n130, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n130 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n131, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n131 + return i, nil +} + +func (m *PodTemplateList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodTemplateList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n132, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n132 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodTemplateSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodTemplateSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n133, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n133 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n134, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n134 + return i, nil +} + +func (m *PortworxVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PortworxVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.VolumeID))) + i += copy(data[i:], m.VolumeID) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x18 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *Preconditions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Preconditions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.UID != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.UID))) + i += copy(data[i:], *m.UID) + } + return i, nil +} + +func (m *PreferAvoidPodsEntry) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PreferAvoidPodsEntry) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodSignature.Size())) + n135, err := m.PodSignature.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n135 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.EvictionTime.Size())) + n136, err := m.EvictionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n136 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *PreferredSchedulingTerm) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PreferredSchedulingTerm) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Weight)) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Preference.Size())) + n137, err := m.Preference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n137 + return i, nil +} + +func (m *Probe) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Probe) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Handler.Size())) + n138, err := m.Handler.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n138 + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.InitialDelaySeconds)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TimeoutSeconds)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PeriodSeconds)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SuccessThreshold)) + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FailureThreshold)) + return i, nil +} + +func (m *ProjectedVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ProjectedVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Sources) > 0 { + for _, msg := range m.Sources { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.DefaultMode != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.DefaultMode)) + } + return i, nil +} + +func (m *QuobyteVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *QuobyteVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Registry))) + i += copy(data[i:], m.Registry) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Volume))) + i += copy(data[i:], m.Volume) + data[i] = 0x18 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.User))) + i += copy(data[i:], m.User) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + return i, nil +} + +func (m *RBDVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RBDVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.RBDImage))) + i += copy(data[i:], m.RBDImage) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.RBDPool))) + i += copy(data[i:], m.RBDPool) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.RadosUser))) + i += copy(data[i:], m.RadosUser) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Keyring))) + i += copy(data[i:], m.Keyring) + if m.SecretRef != nil { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) + n139, err := m.SecretRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n139 + } + data[i] = 0x40 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *RangeAllocation) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RangeAllocation) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n140, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n140 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Range))) + i += copy(data[i:], m.Range) + if m.Data != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Data))) + i += copy(data[i:], m.Data) + } + return i, nil +} + +func (m *ReplicationController) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicationController) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n141, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n141 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n142, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n142 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n143, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n143 + return i, nil +} + +func (m *ReplicationControllerCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicationControllerCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n144, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n144 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *ReplicationControllerList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicationControllerList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n145, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n145 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReplicationControllerSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicationControllerSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + } + if len(m.Selector) > 0 { + for k := range m.Selector { + data[i] = 0x12 + i++ + v := m.Selector[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if m.Template != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n146, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n146 + } + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) + return i, nil +} + +func (m *ReplicationControllerStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicationControllerStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FullyLabeledReplicas)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ReadyReplicas)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResourceFieldSelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceFieldSelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContainerName))) + i += copy(data[i:], m.ContainerName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) + i += copy(data[i:], m.Resource) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Divisor.Size())) + n147, err := m.Divisor.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n147 + return i, nil +} + +func (m *ResourceQuota) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceQuota) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n148, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n148 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n149, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n149 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n150, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n150 + return i, nil +} + +func (m *ResourceQuotaList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceQuotaList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n151, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n151 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResourceQuotaSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceQuotaSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hard) > 0 { + for k := range m.Hard { + data[i] = 0xa + i++ + v := m.Hard[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n152, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n152 + } + } + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *ResourceQuotaStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceQuotaStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hard) > 0 { + for k := range m.Hard { + data[i] = 0xa + i++ + v := m.Hard[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n153, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n153 + } + } + if len(m.Used) > 0 { + for k := range m.Used { + data[i] = 0x12 + i++ + v := m.Used[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n154, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n154 + } + } + return i, nil +} + +func (m *ResourceRequirements) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceRequirements) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Limits) > 0 { + for k := range m.Limits { + data[i] = 0xa + i++ + v := m.Limits[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n155, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n155 + } + } + if len(m.Requests) > 0 { + for k := range m.Requests { + data[i] = 0x12 + i++ + v := m.Requests[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n156, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n156 + } + } + return i, nil +} + +func (m *SELinuxOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SELinuxOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.User))) + i += copy(data[i:], m.User) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Role))) + i += copy(data[i:], m.Role) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Level))) + i += copy(data[i:], m.Level) + return i, nil +} + +func (m *ScaleIOVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleIOVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Gateway))) + i += copy(data[i:], m.Gateway) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.System))) + i += copy(data[i:], m.System) + if m.SecretRef != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) + n157, err := m.SecretRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n157 + } + data[i] = 0x20 + i++ + if m.SSLEnabled { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ProtectionDomain))) + i += copy(data[i:], m.ProtectionDomain) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.StoragePool))) + i += copy(data[i:], m.StoragePool) + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.StorageMode))) + i += copy(data[i:], m.StorageMode) + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.VolumeName))) + i += copy(data[i:], m.VolumeName) + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x50 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *Secret) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Secret) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n158, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n158 + if len(m.Data) > 0 { + for k := range m.Data { + data[i] = 0x12 + i++ + v := m.Data[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if len(m.StringData) > 0 { + for k := range m.StringData { + data[i] = 0x22 + i++ + v := m.StringData[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + return i, nil +} + +func (m *SecretEnvSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecretEnvSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n159, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n159 + if m.Optional != nil { + data[i] = 0x10 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SecretKeySelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecretKeySelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n160, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n160 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + if m.Optional != nil { + data[i] = 0x18 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SecretList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecretList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n161, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n161 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *SecretProjection) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecretProjection) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n162, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n162 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Optional != nil { + data[i] = 0x20 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SecretVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecretVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SecretName))) + i += copy(data[i:], m.SecretName) + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.DefaultMode != nil { + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.DefaultMode)) + } + if m.Optional != nil { + data[i] = 0x20 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SecurityContext) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecurityContext) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Capabilities != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Capabilities.Size())) + n163, err := m.Capabilities.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n163 + } + if m.Privileged != nil { + data[i] = 0x10 + i++ + if *m.Privileged { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if m.SELinuxOptions != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) + n164, err := m.SELinuxOptions.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n164 + } + if m.RunAsUser != nil { + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + data[i] = 0x28 + i++ + if *m.RunAsNonRoot { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if m.ReadOnlyRootFilesystem != nil { + data[i] = 0x30 + i++ + if *m.ReadOnlyRootFilesystem { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SerializedReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SerializedReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Reference.Size())) + n165, err := m.Reference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n165 + return i, nil +} + +func (m *Service) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Service) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n166, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n166 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n167, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n167 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n168, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n168 + return i, nil +} + +func (m *ServiceAccount) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServiceAccount) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n169, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n169 + if len(m.Secrets) > 0 { + for _, msg := range m.Secrets { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.ImagePullSecrets) > 0 { + for _, msg := range m.ImagePullSecrets { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.AutomountServiceAccountToken != nil { + data[i] = 0x20 + i++ + if *m.AutomountServiceAccountToken { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ServiceAccountList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServiceAccountList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n170, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n170 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ServiceList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServiceList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n171, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n171 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ServicePort) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServicePort) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Protocol))) + i += copy(data[i:], m.Protocol) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Port)) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetPort.Size())) + n172, err := m.TargetPort.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n172 + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NodePort)) + return i, nil +} + +func (m *ServiceProxyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServiceProxyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + return i, nil +} + +func (m *ServiceSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServiceSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Selector) > 0 { + for k := range m.Selector { + data[i] = 0x12 + i++ + v := m.Selector[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ClusterIP))) + i += copy(data[i:], m.ClusterIP) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if len(m.ExternalIPs) > 0 { + for _, s := range m.ExternalIPs { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.DeprecatedPublicIPs) > 0 { + for _, s := range m.DeprecatedPublicIPs { + data[i] = 0x32 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SessionAffinity))) + i += copy(data[i:], m.SessionAffinity) + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.LoadBalancerIP))) + i += copy(data[i:], m.LoadBalancerIP) + if len(m.LoadBalancerSourceRanges) > 0 { + for _, s := range m.LoadBalancerSourceRanges { + data[i] = 0x4a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ExternalName))) + i += copy(data[i:], m.ExternalName) + return i, nil +} + +func (m *ServiceStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServiceStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LoadBalancer.Size())) + n173, err := m.LoadBalancer.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n173 + return i, nil +} + +func (m *Sysctl) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Sysctl) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Value))) + i += copy(data[i:], m.Value) + return i, nil +} + +func (m *TCPSocketAction) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TCPSocketAction) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) + n174, err := m.Port.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n174 + return i, nil +} + +func (m *Taint) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Taint) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Value))) + i += copy(data[i:], m.Value) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Effect))) + i += copy(data[i:], m.Effect) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TimeAdded.Size())) + n175, err := m.TimeAdded.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n175 + return i, nil +} + +func (m *Toleration) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Toleration) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) + i += copy(data[i:], m.Operator) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Value))) + i += copy(data[i:], m.Value) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Effect))) + i += copy(data[i:], m.Effect) + if m.TolerationSeconds != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TolerationSeconds)) + } + return i, nil +} + +func (m *Volume) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Volume) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.VolumeSource.Size())) + n176, err := m.VolumeSource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n176 + return i, nil +} + +func (m *VolumeMount) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *VolumeMount) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x10 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MountPath))) + i += copy(data[i:], m.MountPath) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SubPath))) + i += copy(data[i:], m.SubPath) + return i, nil +} + +func (m *VolumeProjection) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *VolumeProjection) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Secret != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Secret.Size())) + n177, err := m.Secret.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n177 + } + if m.DownwardAPI != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DownwardAPI.Size())) + n178, err := m.DownwardAPI.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n178 + } + if m.ConfigMap != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.ConfigMap.Size())) + n179, err := m.ConfigMap.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n179 + } + return i, nil +} + +func (m *VolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *VolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.HostPath != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.HostPath.Size())) + n180, err := m.HostPath.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n180 + } + if m.EmptyDir != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.EmptyDir.Size())) + n181, err := m.EmptyDir.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n181 + } + if m.GCEPersistentDisk != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.GCEPersistentDisk.Size())) + n182, err := m.GCEPersistentDisk.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n182 + } + if m.AWSElasticBlockStore != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AWSElasticBlockStore.Size())) + n183, err := m.AWSElasticBlockStore.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n183 + } + if m.GitRepo != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.GitRepo.Size())) + n184, err := m.GitRepo.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n184 + } + if m.Secret != nil { + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Secret.Size())) + n185, err := m.Secret.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n185 + } + if m.NFS != nil { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.NFS.Size())) + n186, err := m.NFS.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n186 + } + if m.ISCSI != nil { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ISCSI.Size())) + n187, err := m.ISCSI.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n187 + } + if m.Glusterfs != nil { + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Glusterfs.Size())) + n188, err := m.Glusterfs.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n188 + } + if m.PersistentVolumeClaim != nil { + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PersistentVolumeClaim.Size())) + n189, err := m.PersistentVolumeClaim.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n189 + } + if m.RBD != nil { + data[i] = 0x5a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RBD.Size())) + n190, err := m.RBD.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n190 + } + if m.FlexVolume != nil { + data[i] = 0x62 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FlexVolume.Size())) + n191, err := m.FlexVolume.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n191 + } + if m.Cinder != nil { + data[i] = 0x6a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Cinder.Size())) + n192, err := m.Cinder.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n192 + } + if m.CephFS != nil { + data[i] = 0x72 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CephFS.Size())) + n193, err := m.CephFS.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n193 + } + if m.Flocker != nil { + data[i] = 0x7a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Flocker.Size())) + n194, err := m.Flocker.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n194 + } + if m.DownwardAPI != nil { + data[i] = 0x82 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DownwardAPI.Size())) + n195, err := m.DownwardAPI.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n195 + } + if m.FC != nil { + data[i] = 0x8a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FC.Size())) + n196, err := m.FC.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n196 + } + if m.AzureFile != nil { + data[i] = 0x92 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AzureFile.Size())) + n197, err := m.AzureFile.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n197 + } + if m.ConfigMap != nil { + data[i] = 0x9a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ConfigMap.Size())) + n198, err := m.ConfigMap.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n198 + } + if m.VsphereVolume != nil { + data[i] = 0xa2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.VsphereVolume.Size())) + n199, err := m.VsphereVolume.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n199 + } + if m.Quobyte != nil { + data[i] = 0xaa + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Quobyte.Size())) + n200, err := m.Quobyte.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n200 + } + if m.AzureDisk != nil { + data[i] = 0xb2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AzureDisk.Size())) + n201, err := m.AzureDisk.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n201 + } + if m.PhotonPersistentDisk != nil { + data[i] = 0xba + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PhotonPersistentDisk.Size())) + n202, err := m.PhotonPersistentDisk.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n202 + } + if m.PortworxVolume != nil { + data[i] = 0xc2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PortworxVolume.Size())) + n203, err := m.PortworxVolume.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n203 + } + if m.ScaleIO != nil { + data[i] = 0xca + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ScaleIO.Size())) + n204, err := m.ScaleIO.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n204 + } + if m.Projected != nil { + data[i] = 0xd2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Projected.Size())) + n205, err := m.Projected.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n205 + } + return i, nil +} + +func (m *VsphereVirtualDiskVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *VsphereVirtualDiskVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.VolumePath))) + i += copy(data[i:], m.VolumePath) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + return i, nil +} + +func (m *WeightedPodAffinityTerm) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *WeightedPodAffinityTerm) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Weight)) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodAffinityTerm.Size())) + n206, err := m.PodAffinityTerm.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n206 + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 + return n +} + +func (m *Affinity) Size() (n int) { + var l int + _ = l + if m.NodeAffinity != nil { + l = m.NodeAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PodAffinity != nil { + l = m.PodAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PodAntiAffinity != nil { + l = m.PodAntiAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AttachedVolume) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DevicePath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AvoidPods) Size() (n int) { + var l int + _ = l + if len(m.PreferAvoidPods) > 0 { + for _, e := range m.PreferAvoidPods { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *AzureDiskVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.DiskName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DataDiskURI) + n += 1 + l + sovGenerated(uint64(l)) + if m.CachingMode != nil { + l = len(*m.CachingMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadOnly != nil { + n += 2 + } + return n +} + +func (m *AzureFileVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ShareName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Binding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Capabilities) Size() (n int) { + var l int + _ = l + if len(m.Add) > 0 { + for _, s := range m.Add { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Drop) > 0 { + for _, s := range m.Drop { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CephFSVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SecretFile) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *CinderVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ComponentCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ComponentStatus) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ComponentStatusList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConfigMap) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Data) > 0 { + for k, v := range m.Data { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ConfigMapEnvSource) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *ConfigMapKeySelector) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *ConfigMapList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConfigMapProjection) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *ConfigMapVolumeSource) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *Container) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.WorkingDir) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.LivenessProbe != nil { + l = m.LivenessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadinessProbe != nil { + l = m.ReadinessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Lifecycle != nil { + l = m.Lifecycle.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.TerminationMessagePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImagePullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 3 + n += 3 + n += 3 + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TerminationMessagePolicy) + n += 2 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerImage) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.SizeBytes)) + return n +} + +func (m *ContainerPort) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HostPort)) + n += 1 + sovGenerated(uint64(m.ContainerPort)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerState) Size() (n int) { + var l int + _ = l + if m.Waiting != nil { + l = m.Waiting.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Running != nil { + l = m.Running.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Terminated != nil { + l = m.Terminated.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ContainerStateRunning) Size() (n int) { + var l int + _ = l + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerStateTerminated) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ExitCode)) + n += 1 + sovGenerated(uint64(m.Signal)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FinishedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerStateWaiting) Size() (n int) { + var l int + _ = l + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerStatus) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.State.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTerminationState.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 1 + sovGenerated(uint64(m.RestartCount)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImageID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonEndpoint) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Port)) + return n +} + +func (m *DeleteOptions) Size() (n int) { + var l int + _ = l + if m.GracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.GracePeriodSeconds)) + } + if m.Preconditions != nil { + l = m.Preconditions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.OrphanDependents != nil { + n += 2 + } + if m.PropagationPolicy != nil { + l = len(*m.PropagationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DownwardAPIProjection) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DownwardAPIVolumeFile) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) + } + return n +} + +func (m *DownwardAPIVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + return n +} + +func (m *EmptyDirVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Medium) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EndpointAddress) Size() (n int) { + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetRef != nil { + l = m.TargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EndpointPort) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EndpointSubset) Size() (n int) { + var l int + _ = l + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NotReadyAddresses) > 0 { + for _, e := range m.NotReadyAddresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Endpoints) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subsets) > 0 { + for _, e := range m.Subsets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EndpointsList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EnvFromSource) Size() (n int) { + var l int + _ = l + l = len(m.Prefix) + n += 1 + l + sovGenerated(uint64(l)) + if m.ConfigMapRef != nil { + l = m.ConfigMapRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EnvVar) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + if m.ValueFrom != nil { + l = m.ValueFrom.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EnvVarSource) Size() (n int) { + var l int + _ = l + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConfigMapKeyRef != nil { + l = m.ConfigMapKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SecretKeyRef != nil { + l = m.SecretKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Event) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.InvolvedObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FirstTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EventList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EventSource) Size() (n int) { + var l int + _ = l + l = len(m.Component) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExecAction) Size() (n int) { + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FCVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.TargetWWNs) > 0 { + for _, s := range m.TargetWWNs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Lun != nil { + n += 1 + sovGenerated(uint64(*m.Lun)) + } + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *FlexVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *FlockerVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.DatasetName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DatasetUUID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GCEPersistentDiskVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.PDName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 + return n +} + +func (m *GitRepoVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Repository) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Revision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Directory) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GlusterfsVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.EndpointsName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *HTTPGetAction) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Scheme) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.HTTPHeaders) > 0 { + for _, e := range m.HTTPHeaders { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HTTPHeader) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Handler) Size() (n int) { + var l int + _ = l + if m.Exec != nil { + l = m.Exec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HTTPGet != nil { + l = m.HTTPGet.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TCPSocket != nil { + l = m.TCPSocket.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HostPathVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ISCSIVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *KeyToPath) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) + } + return n +} + +func (m *Lifecycle) Size() (n int) { + var l int + _ = l + if m.PostStart != nil { + l = m.PostStart.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PreStop != nil { + l = m.PreStop.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *LimitRange) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LimitRangeItem) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Max) > 0 { + for k, v := range m.Max { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Min) > 0 { + for k, v := range m.Min { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Default) > 0 { + for k, v := range m.Default { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.DefaultRequest) > 0 { + for k, v := range m.DefaultRequest { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.MaxLimitRequestRatio) > 0 { + for k, v := range m.MaxLimitRequestRatio { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *LimitRangeList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LimitRangeSpec) Size() (n int) { + var l int + _ = l + if len(m.Limits) > 0 { + for _, e := range m.Limits { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *List) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ListOptions) Size() (n int) { + var l int + _ = l + l = len(m.LabelSelector) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldSelector) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + return n +} + +func (m *LoadBalancerIngress) Size() (n int) { + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LoadBalancerStatus) Size() (n int) { + var l int + _ = l + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LocalObjectReference) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NFSVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Server) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Namespace) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamespaceList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NamespaceSpec) Size() (n int) { + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NamespaceStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Node) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeAddress) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Address) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeAffinity) Size() (n int) { + var l int + _ = l + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + l = m.RequiredDuringSchedulingIgnoredDuringExecution.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastHeartbeatTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeDaemonEndpoints) Size() (n int) { + var l int + _ = l + l = m.KubeletEndpoint.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeProxyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeResources) Size() (n int) { + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodeSelector) Size() (n int) { + var l int + _ = l + if len(m.NodeSelectorTerms) > 0 { + for _, e := range m.NodeSelectorTerms { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeSelectorRequirement) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeSelectorTerm) Size() (n int) { + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeSpec) Size() (n int) { + var l int + _ = l + l = len(m.PodCIDR) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ExternalID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ProviderID) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Taints) > 0 { + for _, e := range m.Taints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeStatus) Size() (n int) { + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Allocatable) > 0 { + for k, v := range m.Allocatable { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DaemonEndpoints.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.NodeInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.VolumesInUse) > 0 { + for _, s := range m.VolumesInUse { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.VolumesAttached) > 0 { + for _, e := range m.VolumesAttached { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeSystemInfo) Size() (n int) { + var l int + _ = l + l = len(m.MachineID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SystemUUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.BootID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KernelVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OSImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerRuntimeVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeletVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeProxyVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OperatingSystem) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Architecture) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectFieldSelector) Size() (n int) { + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectMeta) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.GenerateName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SelfLink) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + l = m.CreationTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DeletionTimestamp != nil { + l = m.DeletionTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DeletionGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.DeletionGracePeriodSeconds)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.OwnerReferences) > 0 { + for _, e := range m.OwnerReferences { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ClusterName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolume) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolumeClaim) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolumeClaimList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PersistentVolumeClaimSpec) Size() (n int) { + var l int + _ = l + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.StorageClassName != nil { + l = len(*m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PersistentVolumeClaimStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *PersistentVolumeClaimVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.ClaimName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *PersistentVolumeList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PersistentVolumeSource) Size() (n int) { + var l int + _ = l + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FC != nil { + l = m.FC.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Quobyte != nil { + l = m.Quobyte.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AzureDisk != nil { + l = m.AzureDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PhotonPersistentDisk != nil { + l = m.PhotonPersistentDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PersistentVolumeSpec) Size() (n int) { + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.PersistentVolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ClaimRef != nil { + l = m.ClaimRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.PersistentVolumeReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolumeStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PhotonPersistentDiskVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.PdID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Pod) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodAffinity) Size() (n int) { + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodAffinityTerm) Size() (n int) { + var l int + _ = l + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TopologyKey) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodAntiAffinity) Size() (n int) { + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodAttachOptions) Size() (n int) { + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodExecOptions) Size() (n int) { + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodLogOptions) Size() (n int) { + var l int + _ = l + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.SinceSeconds != nil { + n += 1 + sovGenerated(uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + l = m.SinceTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TailLines != nil { + n += 1 + sovGenerated(uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + n += 1 + sovGenerated(uint64(*m.LimitBytes)) + } + return n +} + +func (m *PodPortForwardOptions) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + n += 1 + sovGenerated(uint64(e)) + } + } + return n +} + +func (m *PodProxyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityContext) Size() (n int) { + var l int + _ = l + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + n += 2 + } + if len(m.SupplementalGroups) > 0 { + for _, e := range m.SupplementalGroups { + n += 1 + sovGenerated(uint64(e)) + } + } + if m.FSGroup != nil { + n += 1 + sovGenerated(uint64(*m.FSGroup)) + } + return n +} + +func (m *PodSignature) Size() (n int) { + var l int + _ = l + if m.PodController != nil { + l = m.PodController.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodSpec) Size() (n int) { + var l int + _ = l + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RestartPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.TerminationGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + l = len(m.DNSPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeprecatedServiceAccount) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + n += 2 + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Hostname) + n += 2 + l + sovGenerated(uint64(l)) + l = len(m.Subdomain) + n += 2 + l + sovGenerated(uint64(l)) + if m.Affinity != nil { + l = m.Affinity.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + l = len(m.SchedulerName) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.InitContainers) > 0 { + for _, e := range m.InitContainers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.AutomountServiceAccountToken != nil { + n += 3 + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodIP) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartTime != nil { + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ContainerStatuses) > 0 { + for _, e := range m.ContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.QOSClass) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.InitContainerStatuses) > 0 { + for _, e := range m.InitContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodStatusResult) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodTemplate) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodTemplateList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodTemplateSpec) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PortworxVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Preconditions) Size() (n int) { + var l int + _ = l + if m.UID != nil { + l = len(*m.UID) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PreferAvoidPodsEntry) Size() (n int) { + var l int + _ = l + l = m.PodSignature.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.EvictionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PreferredSchedulingTerm) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.Preference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Probe) Size() (n int) { + var l int + _ = l + l = m.Handler.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.InitialDelaySeconds)) + n += 1 + sovGenerated(uint64(m.TimeoutSeconds)) + n += 1 + sovGenerated(uint64(m.PeriodSeconds)) + n += 1 + sovGenerated(uint64(m.SuccessThreshold)) + n += 1 + sovGenerated(uint64(m.FailureThreshold)) + return n +} + +func (m *ProjectedVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.Sources) > 0 { + for _, e := range m.Sources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + return n +} + +func (m *QuobyteVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Registry) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Volume) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RBDVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RBDImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RBDPool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RadosUser) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Keyring) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *RangeAllocation) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Range) + n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ReplicationController) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicationControllerCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicationControllerList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicationControllerSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *ReplicationControllerStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceFieldSelector) Size() (n int) { + var l int + _ = l + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Divisor.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceQuota) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceQuotaList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceQuotaSpec) Size() (n int) { + var l int + _ = l + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceQuotaStatus) Size() (n int) { + var l int + _ = l + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Used) > 0 { + for k, v := range m.Used { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ResourceRequirements) Size() (n int) { + var l int + _ = l + if len(m.Limits) > 0 { + for k, v := range m.Limits { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Requests) > 0 { + for k, v := range m.Requests { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SELinuxOptions) Size() (n int) { + var l int + _ = l + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Role) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleIOVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.System) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + l = len(m.ProtectionDomain) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageMode) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Secret) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Data) > 0 { + for k, v := range m.Data { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.StringData) > 0 { + for k, v := range m.StringData { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SecretEnvSource) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretKeySelector) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SecretProjection) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecurityContext) Size() (n int) { + var l int + _ = l + if m.Capabilities != nil { + l = m.Capabilities.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Privileged != nil { + n += 2 + } + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + n += 2 + } + if m.ReadOnlyRootFilesystem != nil { + n += 2 + } + return n +} + +func (m *SerializedReference) Size() (n int) { + var l int + _ = l + l = m.Reference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Service) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceAccount) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AutomountServiceAccountToken != nil { + n += 2 + } + return n +} + +func (m *ServiceAccountList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServicePort) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + l = m.TargetPort.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.NodePort)) + return n +} + +func (m *ServiceProxyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceSpec) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.ClusterIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ExternalIPs) > 0 { + for _, s := range m.ExternalIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.DeprecatedPublicIPs) > 0 { + for _, s := range m.DeprecatedPublicIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SessionAffinity) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.LoadBalancerIP) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.LoadBalancerSourceRanges) > 0 { + for _, s := range m.LoadBalancerSourceRanges { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ExternalName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceStatus) Size() (n int) { + var l int + _ = l + l = m.LoadBalancer.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Sysctl) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TCPSocketAction) Size() (n int) { + var l int + _ = l + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Taint) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TimeAdded.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Toleration) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TolerationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TolerationSeconds)) + } + return n +} + +func (m *Volume) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.VolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeMount) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.MountPath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeProjection) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeSource) Size() (n int) { + var l int + _ = l + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EmptyDir != nil { + l = m.EmptyDir.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitRepo != nil { + l = m.GitRepo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PersistentVolumeClaim != nil { + l = m.PersistentVolumeClaim.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.FC != nil { + l = m.FC.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Quobyte != nil { + l = m.Quobyte.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.AzureDisk != nil { + l = m.AzureDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PhotonPersistentDisk != nil { + l = m.PhotonPersistentDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Projected != nil { + l = m.Projected.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VsphereVirtualDiskVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WeightedPodAffinityTerm) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.PodAffinityTerm.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AWSElasticBlockStoreVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AWSElasticBlockStoreVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Affinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Affinity{`, + `NodeAffinity:` + strings.Replace(fmt.Sprintf("%v", this.NodeAffinity), "NodeAffinity", "NodeAffinity", 1) + `,`, + `PodAffinity:` + strings.Replace(fmt.Sprintf("%v", this.PodAffinity), "PodAffinity", "PodAffinity", 1) + `,`, + `PodAntiAffinity:` + strings.Replace(fmt.Sprintf("%v", this.PodAntiAffinity), "PodAntiAffinity", "PodAntiAffinity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AttachedVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AttachedVolume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, + `}`, + }, "") + return s +} +func (this *AvoidPods) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AvoidPods{`, + `PreferAvoidPods:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferAvoidPods), "PreferAvoidPodsEntry", "PreferAvoidPodsEntry", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *AzureDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureDiskVolumeSource{`, + `DiskName:` + fmt.Sprintf("%v", this.DiskName) + `,`, + `DataDiskURI:` + fmt.Sprintf("%v", this.DataDiskURI) + `,`, + `CachingMode:` + valueToStringGenerated(this.CachingMode) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *AzureFileVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureFileVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Binding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Binding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Capabilities) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Capabilities{`, + `Add:` + fmt.Sprintf("%v", this.Add) + `,`, + `Drop:` + fmt.Sprintf("%v", this.Drop) + `,`, + `}`, + }, "") + return s +} +func (this *CephFSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CephFSVolumeSource{`, + `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *CinderVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CinderVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ComponentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ComponentStatus{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ComponentCondition", "ComponentCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentStatusList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ComponentStatusList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ComponentStatus", "ComponentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMap) String() string { + if this == nil { + return "nil" + } + keysForData := make([]string, 0, len(this.Data)) + for k := range this.Data { + keysForData = append(keysForData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + mapStringForData := "map[string]string{" + for _, k := range keysForData { + mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + } + mapStringForData += "}" + s := strings.Join([]string{`&ConfigMap{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + mapStringForData + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapEnvSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapKeySelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapKeySelector{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ConfigMap", "ConfigMap", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapVolumeSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *Container) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Container{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + `,`, + `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "EnvVar", 1), `&`, ``, 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + `,`, + `LivenessProbe:` + strings.Replace(fmt.Sprintf("%v", this.LivenessProbe), "Probe", "Probe", 1) + `,`, + `ReadinessProbe:` + strings.Replace(fmt.Sprintf("%v", this.ReadinessProbe), "Probe", "Probe", 1) + `,`, + `Lifecycle:` + strings.Replace(fmt.Sprintf("%v", this.Lifecycle), "Lifecycle", "Lifecycle", 1) + `,`, + `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, + `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "SecurityContext", "SecurityContext", 1) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + `,`, + `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerImage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerImage{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerPort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, + `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerState) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerState{`, + `Waiting:` + strings.Replace(fmt.Sprintf("%v", this.Waiting), "ContainerStateWaiting", "ContainerStateWaiting", 1) + `,`, + `Running:` + strings.Replace(fmt.Sprintf("%v", this.Running), "ContainerStateRunning", "ContainerStateRunning", 1) + `,`, + `Terminated:` + strings.Replace(fmt.Sprintf("%v", this.Terminated), "ContainerStateTerminated", "ContainerStateTerminated", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateRunning) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateRunning{`, + `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateTerminated) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateTerminated{`, + `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, + `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `FinishedAt:` + strings.Replace(strings.Replace(this.FinishedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateWaiting) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateWaiting{`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, + `LastTerminationState:` + strings.Replace(strings.Replace(this.LastTerminationState.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, + `Ready:` + fmt.Sprintf("%v", this.Ready) + `,`, + `RestartCount:` + fmt.Sprintf("%v", this.RestartCount) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonEndpoint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonEndpoint{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteOptions{`, + `GracePeriodSeconds:` + valueToStringGenerated(this.GracePeriodSeconds) + `,`, + `Preconditions:` + strings.Replace(fmt.Sprintf("%v", this.Preconditions), "Preconditions", "Preconditions", 1) + `,`, + `OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`, + `PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DownwardAPIProjection{`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIVolumeFile) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DownwardAPIVolumeFile{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `FieldRef:` + strings.Replace(fmt.Sprintf("%v", this.FieldRef), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, + `ResourceFieldRef:` + strings.Replace(fmt.Sprintf("%v", this.ResourceFieldRef), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DownwardAPIVolumeSource{`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `}`, + }, "") + return s +} +func (this *EmptyDirVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EmptyDirVolumeSource{`, + `Medium:` + fmt.Sprintf("%v", this.Medium) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointAddress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "ObjectReference", 1) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointPort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSubset) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointSubset{`, + `Addresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Addresses), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + `,`, + `NotReadyAddresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NotReadyAddresses), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + `,`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Endpoints) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Endpoints{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subsets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subsets), "EndpointSubset", "EndpointSubset", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointsList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointsList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Endpoints", "Endpoints", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvFromSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvFromSource{`, + `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, + `ConfigMapRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapRef), "ConfigMapEnvSource", "ConfigMapEnvSource", 1) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretEnvSource", "SecretEnvSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvVar) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvVar{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `ValueFrom:` + strings.Replace(fmt.Sprintf("%v", this.ValueFrom), "EnvVarSource", "EnvVarSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvVarSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvVarSource{`, + `FieldRef:` + strings.Replace(fmt.Sprintf("%v", this.FieldRef), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, + `ResourceFieldRef:` + strings.Replace(fmt.Sprintf("%v", this.ResourceFieldRef), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, + `ConfigMapKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapKeyRef), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`, + `SecretKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretKeyRef), "SecretKeySelector", "SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Event) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Event{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `InvolvedObject:` + strings.Replace(strings.Replace(this.InvolvedObject.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "EventSource", "EventSource", 1), `&`, ``, 1) + `,`, + `FirstTimestamp:` + strings.Replace(strings.Replace(this.FirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTimestamp:` + strings.Replace(strings.Replace(this.LastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *EventList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EventSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSource{`, + `Component:` + fmt.Sprintf("%v", this.Component) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `}`, + }, "") + return s +} +func (this *ExecAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecAction{`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *FCVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FCVolumeSource{`, + `TargetWWNs:` + fmt.Sprintf("%v", this.TargetWWNs) + `,`, + `Lun:` + valueToStringGenerated(this.Lun) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *FlexVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k := range this.Options { + keysForOptions = append(keysForOptions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&FlexVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *FlockerVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlockerVolumeSource{`, + `DatasetName:` + fmt.Sprintf("%v", this.DatasetName) + `,`, + `DatasetUUID:` + fmt.Sprintf("%v", this.DatasetUUID) + `,`, + `}`, + }, "") + return s +} +func (this *GCEPersistentDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GCEPersistentDiskVolumeSource{`, + `PDName:` + fmt.Sprintf("%v", this.PDName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *GitRepoVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitRepoVolumeSource{`, + `Repository:` + fmt.Sprintf("%v", this.Repository) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `Directory:` + fmt.Sprintf("%v", this.Directory) + `,`, + `}`, + }, "") + return s +} +func (this *GlusterfsVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GlusterfsVolumeSource{`, + `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPGetAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPGetAction{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Scheme:` + fmt.Sprintf("%v", this.Scheme) + `,`, + `HTTPHeaders:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HTTPHeaders), "HTTPHeader", "HTTPHeader", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPHeader) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPHeader{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Handler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Handler{`, + `Exec:` + strings.Replace(fmt.Sprintf("%v", this.Exec), "ExecAction", "ExecAction", 1) + `,`, + `HTTPGet:` + strings.Replace(fmt.Sprintf("%v", this.HTTPGet), "HTTPGetAction", "HTTPGetAction", 1) + `,`, + `TCPSocket:` + strings.Replace(fmt.Sprintf("%v", this.TCPSocket), "TCPSocketAction", "TCPSocketAction", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HostPathVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostPathVolumeSource{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ISCSIVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ISCSIVolumeSource{`, + `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, + `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, + `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, + `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `}`, + }, "") + return s +} +func (this *KeyToPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KeyToPath{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *Lifecycle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Lifecycle{`, + `PostStart:` + strings.Replace(fmt.Sprintf("%v", this.PostStart), "Handler", "Handler", 1) + `,`, + `PreStop:` + strings.Replace(fmt.Sprintf("%v", this.PreStop), "Handler", "Handler", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitRange{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LimitRangeSpec", "LimitRangeSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeItem) String() string { + if this == nil { + return "nil" + } + keysForMax := make([]string, 0, len(this.Max)) + for k := range this.Max { + keysForMax = append(keysForMax, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMax) + mapStringForMax := "ResourceList{" + for _, k := range keysForMax { + mapStringForMax += fmt.Sprintf("%v: %v,", k, this.Max[ResourceName(k)]) + } + mapStringForMax += "}" + keysForMin := make([]string, 0, len(this.Min)) + for k := range this.Min { + keysForMin = append(keysForMin, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMin) + mapStringForMin := "ResourceList{" + for _, k := range keysForMin { + mapStringForMin += fmt.Sprintf("%v: %v,", k, this.Min[ResourceName(k)]) + } + mapStringForMin += "}" + keysForDefault := make([]string, 0, len(this.Default)) + for k := range this.Default { + keysForDefault = append(keysForDefault, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) + mapStringForDefault := "ResourceList{" + for _, k := range keysForDefault { + mapStringForDefault += fmt.Sprintf("%v: %v,", k, this.Default[ResourceName(k)]) + } + mapStringForDefault += "}" + keysForDefaultRequest := make([]string, 0, len(this.DefaultRequest)) + for k := range this.DefaultRequest { + keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) + mapStringForDefaultRequest := "ResourceList{" + for _, k := range keysForDefaultRequest { + mapStringForDefaultRequest += fmt.Sprintf("%v: %v,", k, this.DefaultRequest[ResourceName(k)]) + } + mapStringForDefaultRequest += "}" + keysForMaxLimitRequestRatio := make([]string, 0, len(this.MaxLimitRequestRatio)) + for k := range this.MaxLimitRequestRatio { + keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) + mapStringForMaxLimitRequestRatio := "ResourceList{" + for _, k := range keysForMaxLimitRequestRatio { + mapStringForMaxLimitRequestRatio += fmt.Sprintf("%v: %v,", k, this.MaxLimitRequestRatio[ResourceName(k)]) + } + mapStringForMaxLimitRequestRatio += "}" + s := strings.Join([]string{`&LimitRangeItem{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Max:` + mapStringForMax + `,`, + `Min:` + mapStringForMin + `,`, + `Default:` + mapStringForDefault + `,`, + `DefaultRequest:` + mapStringForDefaultRequest + `,`, + `MaxLimitRequestRatio:` + mapStringForMaxLimitRequestRatio + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitRangeList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "LimitRange", "LimitRange", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitRangeSpec{`, + `Limits:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Limits), "LimitRangeItem", "LimitRangeItem", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *List) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&List{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListOptions{`, + `LabelSelector:` + fmt.Sprintf("%v", this.LabelSelector) + `,`, + `FieldSelector:` + fmt.Sprintf("%v", this.FieldSelector) + `,`, + `Watch:` + fmt.Sprintf("%v", this.Watch) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *LoadBalancerIngress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LoadBalancerIngress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `}`, + }, "") + return s +} +func (this *LoadBalancerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LoadBalancerStatus{`, + `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "LoadBalancerIngress", "LoadBalancerIngress", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LocalObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalObjectReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *NFSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NFSVolumeSource{`, + `Server:` + fmt.Sprintf("%v", this.Server) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Namespace) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Namespace{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NamespaceSpec", "NamespaceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NamespaceStatus", "NamespaceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceSpec{`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `}`, + }, "") + return s +} +func (this *Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Node{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeAddress{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Address:` + fmt.Sprintf("%v", this.Address) + `,`, + `}`, + }, "") + return s +} +func (this *NodeAffinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "NodeSelector", "NodeSelector", 1) + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "PreferredSchedulingTerm", "PreferredSchedulingTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastHeartbeatTime:` + strings.Replace(strings.Replace(this.LastHeartbeatTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *NodeDaemonEndpoints) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeDaemonEndpoints{`, + `KubeletEndpoint:` + strings.Replace(strings.Replace(this.KubeletEndpoint.String(), "DaemonEndpoint", "DaemonEndpoint", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Node", "Node", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *NodeResources) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&NodeResources{`, + `Capacity:` + mapStringForCapacity + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSelector{`, + `NodeSelectorTerms:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NodeSelectorTerms), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelectorTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSelectorTerm{`, + `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSpec{`, + `PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`, + `ExternalID:` + fmt.Sprintf("%v", this.ExternalID) + `,`, + `ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`, + `Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`, + `Taints:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Taints), "Taint", "Taint", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeStatus) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + keysForAllocatable := make([]string, 0, len(this.Allocatable)) + for k := range this.Allocatable { + keysForAllocatable = append(keysForAllocatable, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) + mapStringForAllocatable := "ResourceList{" + for _, k := range keysForAllocatable { + mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[ResourceName(k)]) + } + mapStringForAllocatable += "}" + s := strings.Join([]string{`&NodeStatus{`, + `Capacity:` + mapStringForCapacity + `,`, + `Allocatable:` + mapStringForAllocatable + `,`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "NodeCondition", "NodeCondition", 1), `&`, ``, 1) + `,`, + `Addresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Addresses), "NodeAddress", "NodeAddress", 1), `&`, ``, 1) + `,`, + `DaemonEndpoints:` + strings.Replace(strings.Replace(this.DaemonEndpoints.String(), "NodeDaemonEndpoints", "NodeDaemonEndpoints", 1), `&`, ``, 1) + `,`, + `NodeInfo:` + strings.Replace(strings.Replace(this.NodeInfo.String(), "NodeSystemInfo", "NodeSystemInfo", 1), `&`, ``, 1) + `,`, + `Images:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Images), "ContainerImage", "ContainerImage", 1), `&`, ``, 1) + `,`, + `VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`, + `VolumesAttached:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumesAttached), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSystemInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSystemInfo{`, + `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, + `SystemUUID:` + fmt.Sprintf("%v", this.SystemUUID) + `,`, + `BootID:` + fmt.Sprintf("%v", this.BootID) + `,`, + `KernelVersion:` + fmt.Sprintf("%v", this.KernelVersion) + `,`, + `OSImage:` + fmt.Sprintf("%v", this.OSImage) + `,`, + `ContainerRuntimeVersion:` + fmt.Sprintf("%v", this.ContainerRuntimeVersion) + `,`, + `KubeletVersion:` + fmt.Sprintf("%v", this.KubeletVersion) + `,`, + `KubeProxyVersion:` + fmt.Sprintf("%v", this.KubeProxyVersion) + `,`, + `OperatingSystem:` + fmt.Sprintf("%v", this.OperatingSystem) + `,`, + `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectFieldSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectFieldSelector{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMeta) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ObjectMeta{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `GenerateName:` + fmt.Sprintf("%v", this.GenerateName) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `CreationTimestamp:` + strings.Replace(strings.Replace(this.CreationTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `DeletionGracePeriodSeconds:` + valueToStringGenerated(this.DeletionGracePeriodSeconds) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `OwnerReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OwnerReferences), "OwnerReference", "k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference", 1), `&`, ``, 1) + `,`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolume{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeSpec", "PersistentVolumeSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeStatus", "PersistentVolumeStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaim{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeClaimSpec", "PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeClaimStatus", "PersistentVolumeClaimStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolumeClaim", "PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimSpec{`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimStatus) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&PersistentVolumeClaimStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `Capacity:` + mapStringForCapacity + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimVolumeSource{`, + `ClaimName:` + fmt.Sprintf("%v", this.ClaimName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolume", "PersistentVolume", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeSource{`, + `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, + `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, + `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, + `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`, + `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, + `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`, + `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, + `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, + `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, + `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeSpec) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&PersistentVolumeSpec{`, + `Capacity:` + mapStringForCapacity + `,`, + `PersistentVolumeSource:` + strings.Replace(strings.Replace(this.PersistentVolumeSource.String(), "PersistentVolumeSource", "PersistentVolumeSource", 1), `&`, ``, 1) + `,`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `ClaimRef:` + strings.Replace(fmt.Sprintf("%v", this.ClaimRef), "ObjectReference", "ObjectReference", 1) + `,`, + `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, + `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `}`, + }, "") + return s +} +func (this *PhotonPersistentDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PhotonPersistentDiskVolumeSource{`, + `PdID:` + fmt.Sprintf("%v", this.PdID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `}`, + }, "") + return s +} +func (this *Pod) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Pod{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodAffinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodAffinityTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAffinityTerm{`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, + `}`, + }, "") + return s +} +func (this *PodAntiAffinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAntiAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodAttachOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAttachOptions{`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *PodCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PodExecOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodExecOptions{`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *PodList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Pod", "Pod", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodLogOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodLogOptions{`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, + `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, + `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, + `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, + `}`, + }, "") + return s +} +func (this *PodPortForwardOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodPortForwardOptions{`, + `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, + `}`, + }, "") + return s +} +func (this *PodProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityContext{`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "SELinuxOptions", 1) + `,`, + `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, + `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, + `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, + `FSGroup:` + valueToStringGenerated(this.FSGroup) + `,`, + `}`, + }, "") + return s +} +func (this *PodSignature) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSignature{`, + `PodController:` + strings.Replace(fmt.Sprintf("%v", this.PodController), "OwnerReference", "k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSpec) String() string { + if this == nil { + return "nil" + } + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&PodSpec{`, + `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "Volume", 1), `&`, ``, 1) + `,`, + `Containers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Containers), "Container", "Container", 1), `&`, ``, 1) + `,`, + `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, + `TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `DeprecatedServiceAccount:` + fmt.Sprintf("%v", this.DeprecatedServiceAccount) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, + `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, + `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "PodSecurityContext", 1) + `,`, + `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, + `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "Affinity", 1) + `,`, + `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, + `InitContainers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainers), "Container", "Container", 1), `&`, ``, 1) + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `Tolerations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tolerations), "Toleration", "Toleration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "PodCondition", "PodCondition", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `ContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, + `QOSClass:` + fmt.Sprintf("%v", this.QOSClass) + `,`, + `InitContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodStatusResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodStatusResult{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplateList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodTemplate", "PodTemplate", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PortworxVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortworxVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Preconditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Preconditions{`, + `UID:` + valueToStringGenerated(this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *PreferAvoidPodsEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PreferAvoidPodsEntry{`, + `PodSignature:` + strings.Replace(strings.Replace(this.PodSignature.String(), "PodSignature", "PodSignature", 1), `&`, ``, 1) + `,`, + `EvictionTime:` + strings.Replace(strings.Replace(this.EvictionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PreferredSchedulingTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PreferredSchedulingTerm{`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `Preference:` + strings.Replace(strings.Replace(this.Preference.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Probe) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Probe{`, + `Handler:` + strings.Replace(strings.Replace(this.Handler.String(), "Handler", "Handler", 1), `&`, ``, 1) + `,`, + `InitialDelaySeconds:` + fmt.Sprintf("%v", this.InitialDelaySeconds) + `,`, + `TimeoutSeconds:` + fmt.Sprintf("%v", this.TimeoutSeconds) + `,`, + `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, + `SuccessThreshold:` + fmt.Sprintf("%v", this.SuccessThreshold) + `,`, + `FailureThreshold:` + fmt.Sprintf("%v", this.FailureThreshold) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectedVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProjectedVolumeSource{`, + `Sources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sources), "VolumeProjection", "VolumeProjection", 1), `&`, ``, 1) + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `}`, + }, "") + return s +} +func (this *QuobyteVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QuobyteVolumeSource{`, + `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, + `Volume:` + fmt.Sprintf("%v", this.Volume) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `}`, + }, "") + return s +} +func (this *RBDVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RBDVolumeSource{`, + `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, + `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, + `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, + `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *RangeAllocation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RangeAllocation{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Range:` + fmt.Sprintf("%v", this.Range) + `,`, + `Data:` + valueToStringGenerated(this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationController) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationController{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicationControllerSpec", "ReplicationControllerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicationControllerStatus", "ReplicationControllerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationControllerCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationControllerList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicationController", "ReplicationController", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerSpec) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ReplicationControllerSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `Template:` + strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "PodTemplateSpec", 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationControllerStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicationControllerCondition", "ReplicationControllerCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceFieldSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceFieldSelector{`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Divisor:` + strings.Replace(strings.Replace(this.Divisor.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuota) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceQuota{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceQuotaSpec", "ResourceQuotaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceQuotaStatus", "ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceQuotaList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ResourceQuota", "ResourceQuota", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaSpec) String() string { + if this == nil { + return "nil" + } + keysForHard := make([]string, 0, len(this.Hard)) + for k := range this.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + mapStringForHard := "ResourceList{" + for _, k := range keysForHard { + mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + } + mapStringForHard += "}" + s := strings.Join([]string{`&ResourceQuotaSpec{`, + `Hard:` + mapStringForHard + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaStatus) String() string { + if this == nil { + return "nil" + } + keysForHard := make([]string, 0, len(this.Hard)) + for k := range this.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + mapStringForHard := "ResourceList{" + for _, k := range keysForHard { + mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + } + mapStringForHard += "}" + keysForUsed := make([]string, 0, len(this.Used)) + for k := range this.Used { + keysForUsed = append(keysForUsed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) + mapStringForUsed := "ResourceList{" + for _, k := range keysForUsed { + mapStringForUsed += fmt.Sprintf("%v: %v,", k, this.Used[ResourceName(k)]) + } + mapStringForUsed += "}" + s := strings.Join([]string{`&ResourceQuotaStatus{`, + `Hard:` + mapStringForHard + `,`, + `Used:` + mapStringForUsed + `,`, + `}`, + }, "") + return s +} +func (this *ResourceRequirements) String() string { + if this == nil { + return "nil" + } + keysForLimits := make([]string, 0, len(this.Limits)) + for k := range this.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + mapStringForLimits := "ResourceList{" + for _, k := range keysForLimits { + mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)]) + } + mapStringForLimits += "}" + keysForRequests := make([]string, 0, len(this.Requests)) + for k := range this.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + mapStringForRequests := "ResourceList{" + for _, k := range keysForRequests { + mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)]) + } + mapStringForRequests += "}" + s := strings.Join([]string{`&ResourceRequirements{`, + `Limits:` + mapStringForLimits + `,`, + `Requests:` + mapStringForRequests + `,`, + `}`, + }, "") + return s +} +func (this *SELinuxOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SELinuxOptions{`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleIOVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleIOVolumeSource{`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `System:` + fmt.Sprintf("%v", this.System) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, + `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, + `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, + `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Secret) String() string { + if this == nil { + return "nil" + } + keysForData := make([]string, 0, len(this.Data)) + for k := range this.Data { + keysForData = append(keysForData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + mapStringForData := "map[string][]byte{" + for _, k := range keysForData { + mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + } + mapStringForData += "}" + keysForStringData := make([]string, 0, len(this.StringData)) + for k := range this.StringData { + keysForStringData = append(keysForStringData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) + mapStringForStringData := "map[string]string{" + for _, k := range keysForStringData { + mapStringForStringData += fmt.Sprintf("%v: %v,", k, this.StringData[k]) + } + mapStringForStringData += "}" + s := strings.Join([]string{`&Secret{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + mapStringForData + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `StringData:` + mapStringForStringData + `,`, + `}`, + }, "") + return s +} +func (this *SecretEnvSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretKeySelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretKeySelector{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Secret", "Secret", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SecretProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecurityContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecurityContext{`, + `Capabilities:` + strings.Replace(fmt.Sprintf("%v", this.Capabilities), "Capabilities", "Capabilities", 1) + `,`, + `Privileged:` + valueToStringGenerated(this.Privileged) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "SELinuxOptions", 1) + `,`, + `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, + `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, + `ReadOnlyRootFilesystem:` + valueToStringGenerated(this.ReadOnlyRootFilesystem) + `,`, + `}`, + }, "") + return s +} +func (this *SerializedReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SerializedReference{`, + `Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Service{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceStatus", "ServiceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccount{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Secrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secrets), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ServiceAccount", "ServiceAccount", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Service", "Service", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServicePort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServicePort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `TargetPort:` + strings.Replace(strings.Replace(this.TargetPort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ServiceSpec{`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "ServicePort", "ServicePort", 1), `&`, ``, 1) + `,`, + `Selector:` + mapStringForSelector + `,`, + `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`, + `DeprecatedPublicIPs:` + fmt.Sprintf("%v", this.DeprecatedPublicIPs) + `,`, + `SessionAffinity:` + fmt.Sprintf("%v", this.SessionAffinity) + `,`, + `LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`, + `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`, + `ExternalName:` + fmt.Sprintf("%v", this.ExternalName) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Sysctl) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sysctl{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *TCPSocketAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TCPSocketAction{`, + `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Taint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Taint{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TimeAdded:` + strings.Replace(strings.Replace(this.TimeAdded.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Toleration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Toleration{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *Volume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Volume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `VolumeSource:` + strings.Replace(strings.Replace(this.VolumeSource.String(), "VolumeSource", "VolumeSource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeMount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeMount{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, + `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeProjection{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretProjection", "SecretProjection", 1) + `,`, + `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, + `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeSource{`, + `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, + `EmptyDir:` + strings.Replace(fmt.Sprintf("%v", this.EmptyDir), "EmptyDirVolumeSource", "EmptyDirVolumeSource", 1) + `,`, + `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, + `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, + `GitRepo:` + strings.Replace(fmt.Sprintf("%v", this.GitRepo), "GitRepoVolumeSource", "GitRepoVolumeSource", 1) + `,`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretVolumeSource", "SecretVolumeSource", 1) + `,`, + `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, + `PersistentVolumeClaim:` + strings.Replace(fmt.Sprintf("%v", this.PersistentVolumeClaim), "PersistentVolumeClaimVolumeSource", "PersistentVolumeClaimVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`, + `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, + `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIVolumeSource", "DownwardAPIVolumeSource", 1) + `,`, + `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`, + `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapVolumeSource", "ConfigMapVolumeSource", 1) + `,`, + `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, + `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, + `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, + `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, + `Projected:` + strings.Replace(fmt.Sprintf("%v", this.Projected), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VsphereVirtualDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VsphereVirtualDiskVolumeSource{`, + `VolumePath:` + fmt.Sprintf("%v", this.VolumePath) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `}`, + }, "") + return s +} +func (this *WeightedPodAffinityTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WeightedPodAffinityTerm{`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `PodAffinityTerm:` + strings.Replace(strings.Replace(this.PodAffinityTerm.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + m.Partition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Partition |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Affinity) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Affinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Affinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeAffinity == nil { + m.NodeAffinity = &NodeAffinity{} + } + if err := m.NodeAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAffinity == nil { + m.PodAffinity = &PodAffinity{} + } + if err := m.PodAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAntiAffinity == nil { + m.PodAntiAffinity = &PodAntiAffinity{} + } + if err := m.PodAntiAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttachedVolume) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = UniqueVolumeName(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DevicePath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AvoidPods) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AvoidPods: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AvoidPods: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferAvoidPods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferAvoidPods = append(m.PreferAvoidPods, PreferAvoidPodsEntry{}) + if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureDiskVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DiskName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataDiskURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataDiskURI = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CachingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AzureDataDiskCachingMode(data[iNdEx:postIndex]) + m.CachingMode = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.FSType = &s + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnly = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureFileVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShareName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Binding) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Binding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Capabilities) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Add = append(m.Add, Capability(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drop = append(m.Drop, Capability(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CephFSVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Monitors = append(m.Monitors, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretFile = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CinderVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ComponentConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ComponentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentStatusList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ComponentStatus{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMap) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Data == nil { + m.Data = make(map[string]string) + } + m.Data[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapEnvSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapEnvSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapKeySelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ConfigMap{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapProjection) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Container) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Container: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WorkingDir = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, ContainerPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LivenessProbe == nil { + m.LivenessProbe = &Probe{} + } + if err := m.LivenessProbe.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReadinessProbe == nil { + m.ReadinessProbe = &Probe{} + } + if err := m.ReadinessProbe.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Lifecycle == nil { + m.Lifecycle = &Lifecycle{} + } + if err := m.Lifecycle.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminationMessagePath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullPolicy = PullPolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &SecurityContext{} + } + if err := m.SecurityContext.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.StdinOnce = bool(v != 0) + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminationMessagePolicy = TerminationMessagePolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerImage) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) + } + m.SizeBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.SizeBytes |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerPort) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) + } + m.HostPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.HostPort |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) + } + m.ContainerPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ContainerPort |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = Protocol(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerState) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Waiting == nil { + m.Waiting = &ContainerStateWaiting{} + } + if err := m.Waiting.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Running == nil { + m.Running = &ContainerStateRunning{} + } + if err := m.Running.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Terminated == nil { + m.Terminated = &ContainerStateTerminated{} + } + if err := m.Terminated.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateRunning) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartedAt.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateTerminated) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + } + m.ExitCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ExitCode |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) + } + m.Signal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Signal |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartedAt.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FinishedAt.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateWaiting) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateWaiting: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateWaiting: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.State.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTerminationState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTerminationState.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Ready = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartCount", wireType) + } + m.RestartCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.RestartCount |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonEndpoint) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Port |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.GracePeriodSeconds = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Preconditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Preconditions == nil { + m.Preconditions = &Preconditions{} + } + if err := m.Preconditions.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OrphanDependents", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OrphanDependents = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PropagationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := DeletionPropagation(data[iNdEx:postIndex]) + m.PropagationPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DownwardAPIProjection) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} + } + if err := m.FieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} + } + if err := m.ResourceFieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DownwardAPIVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EmptyDirVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Medium = StorageMedium(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointAddress) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetRef == nil { + m.TargetRef = &ObjectReference{} + } + if err := m.TargetRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.NodeName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointPort) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Port |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = Protocol(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSubset) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, EndpointAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) + if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Endpoints) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subsets = append(m.Subsets, EndpointSubset{}) + if err := m.Subsets[len(m.Subsets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointsList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Endpoints{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvFromSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prefix = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMapRef == nil { + m.ConfigMapRef = &ConfigMapEnvSource{} + } + if err := m.ConfigMapRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretEnvSource{} + } + if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvVar) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValueFrom == nil { + m.ValueFrom = &EnvVarSource{} + } + if err := m.ValueFrom.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvVarSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} + } + if err := m.FieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} + } + if err := m.ResourceFieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMapKeyRef == nil { + m.ConfigMapKeyRef = &ConfigMapKeySelector{} + } + if err := m.ConfigMapKeyRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretKeyRef == nil { + m.SecretKeyRef = &SecretKeySelector{} + } + if err := m.SecretKeyRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Event) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InvolvedObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.InvolvedObject.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FirstTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FirstTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Count |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Event{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Component", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Component = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecAction) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FCVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FCVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FCVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetWWNs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetWWNs = append(m.TargetWWNs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Lun = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlexVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlexVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlexVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Options == nil { + m.Options = make(map[string]string) + } + m.Options[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlockerVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlockerVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlockerVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DatasetName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DatasetName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DatasetUUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DatasetUUID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GCEPersistentDiskVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PDName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PDName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + m.Partition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Partition |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitRepoVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitRepoVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitRepoVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Repository = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Revision = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Directory", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Directory = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GlusterfsVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GlusterfsVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GlusterfsVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndpointsName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EndpointsName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPGetAction) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPGetAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPGetAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Port.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheme", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scheme = URIScheme(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPHeaders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HTTPHeaders = append(m.HTTPHeaders, HTTPHeader{}) + if err := m.HTTPHeaders[len(m.HTTPHeaders)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPHeader) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Handler) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Handler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Handler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exec == nil { + m.Exec = &ExecAction{} + } + if err := m.Exec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPGet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTPGet == nil { + m.HTTPGet = &HTTPGetAction{} + } + if err := m.HTTPGet.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TCPSocket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TCPSocket == nil { + m.TCPSocket = &TCPSocketAction{} + } + if err := m.TCPSocket.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostPathVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostPathVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostPathVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ISCSIVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ISCSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetPortal = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IQN = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + } + m.Lun = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Lun |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ISCSIInterface = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Portals = append(m.Portals, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyToPath) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyToPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyToPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Lifecycle) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Lifecycle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Lifecycle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PostStart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PostStart == nil { + m.PostStart = &Handler{} + } + if err := m.PostStart.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreStop", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PreStop == nil { + m.PreStop = &Handler{} + } + if err := m.PreStop.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRange) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRangeItem) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRangeItem: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRangeItem: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = LimitType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Max == nil { + m.Max = make(ResourceList) + } + m.Max[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Min == nil { + m.Min = make(ResourceList) + } + m.Min[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Default == nil { + m.Default = make(ResourceList) + } + m.Default[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.DefaultRequest == nil { + m.DefaultRequest = make(ResourceList) + } + m.DefaultRequest[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxLimitRequestRatio", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.MaxLimitRequestRatio == nil { + m.MaxLimitRequestRatio = make(ResourceList) + } + m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRangeList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRangeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRangeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, LimitRange{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRangeSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRangeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRangeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Limits = append(m.Limits, LimitRangeItem{}) + if err := m.Limits[len(m.Limits)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *List) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: List: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelSelector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldSelector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Watch = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LoadBalancerIngress) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LoadBalancerIngress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LoadBalancerStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LoadBalancerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, LoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalObjectReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NFSVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Server = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Namespace) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Namespace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Namespace{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, FinalizerName(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = NamespacePhase(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Node) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Node: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeAddress) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = NodeAddressType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeAffinity) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequiredDuringSchedulingIgnoredDuringExecution == nil { + m.RequiredDuringSchedulingIgnoredDuringExecution = &NodeSelector{} + } + if err := m.RequiredDuringSchedulingIgnoredDuringExecution.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, PreferredSchedulingTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = NodeConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastHeartbeatTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastHeartbeatTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeDaemonEndpoints) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeDaemonEndpoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeDaemonEndpoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeletEndpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.KubeletEndpoint.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Node{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeProxyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeProxyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeResources) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + m.Capacity[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelectorTerms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeSelectorTerms = append(m.NodeSelectorTerms, NodeSelectorTerm{}) + if err := m.NodeSelectorTerms[len(m.NodeSelectorTerms)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelectorRequirement) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = NodeSelectorOperator(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelectorTerm) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelectorTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, NodeSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodCIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodCIDR = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProviderID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Unschedulable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Unschedulable = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Taints = append(m.Taints, Taint{}) + if err := m.Taints[len(m.Taints)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + m.Capacity[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Allocatable == nil { + m.Allocatable = make(ResourceList) + } + m.Allocatable[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = NodePhase(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, NodeCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, NodeAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DaemonEndpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DaemonEndpoints.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.NodeInfo.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ContainerImage{}) + if err := m.Images[len(m.Images)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumesInUse", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumesInUse = append(m.VolumesInUse, UniqueVolumeName(data[iNdEx:postIndex])) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumesAttached", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumesAttached = append(m.VolumesAttached, AttachedVolume{}) + if err := m.VolumesAttached[len(m.VolumesAttached)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSystemInfo) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSystemInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSystemInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MachineID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MachineID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SystemUUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SystemUUID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BootID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BootID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KernelVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KernelVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OSImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OSImage = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerRuntimeVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerRuntimeVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeletVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KubeletVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeProxyVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KubeProxyVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OperatingSystem", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OperatingSystem = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architecture = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectFieldSelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectFieldSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldPath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMeta) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GenerateName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelfLink = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Generation |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreationTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CreationTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletionTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeletionTimestamp == nil { + m.DeletionTimestamp = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.DeletionTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletionGracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DeletionGracePeriodSeconds = &v + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Labels == nil { + m.Labels = make(map[string]string) + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OwnerReferences = append(m.OwnerReferences, k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference{}) + if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldPath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolume) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaim) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PersistentVolumeClaim{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.StorageClassName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = PersistentVolumeClaimPhase(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + m.Capacity[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClaimName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClaimName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PersistentVolume{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GCEPersistentDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GCEPersistentDisk == nil { + m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{} + } + if err := m.GCEPersistentDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AWSElasticBlockStore", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AWSElasticBlockStore == nil { + m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{} + } + if err := m.AWSElasticBlockStore.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HostPath == nil { + m.HostPath = &HostPathVolumeSource{} + } + if err := m.HostPath.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Glusterfs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Glusterfs == nil { + m.Glusterfs = &GlusterfsVolumeSource{} + } + if err := m.Glusterfs.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NFS == nil { + m.NFS = &NFSVolumeSource{} + } + if err := m.NFS.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBD", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RBD == nil { + m.RBD = &RBDVolumeSource{} + } + if err := m.RBD.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ISCSI == nil { + m.ISCSI = &ISCSIVolumeSource{} + } + if err := m.ISCSI.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cinder", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cinder == nil { + m.Cinder = &CinderVolumeSource{} + } + if err := m.Cinder.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CephFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CephFS == nil { + m.CephFS = &CephFSVolumeSource{} + } + if err := m.CephFS.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FC", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FC == nil { + m.FC = &FCVolumeSource{} + } + if err := m.FC.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flocker", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Flocker == nil { + m.Flocker = &FlockerVolumeSource{} + } + if err := m.Flocker.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FlexVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FlexVolume == nil { + m.FlexVolume = &FlexVolumeSource{} + } + if err := m.FlexVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureFile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureFile == nil { + m.AzureFile = &AzureFileVolumeSource{} + } + if err := m.AzureFile.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VsphereVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VsphereVolume == nil { + m.VsphereVolume = &VsphereVirtualDiskVolumeSource{} + } + if err := m.VsphereVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quobyte", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Quobyte == nil { + m.Quobyte = &QuobyteVolumeSource{} + } + if err := m.Quobyte.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureDisk == nil { + m.AzureDisk = &AzureDiskVolumeSource{} + } + if err := m.AzureDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PhotonPersistentDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PhotonPersistentDisk == nil { + m.PhotonPersistentDisk = &PhotonPersistentDiskVolumeSource{} + } + if err := m.PhotonPersistentDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortworxVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PortworxVolume == nil { + m.PortworxVolume = &PortworxVolumeSource{} + } + if err := m.PortworxVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleIO", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleIO == nil { + m.ScaleIO = &ScaleIOVolumeSource{} + } + if err := m.ScaleIO.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + m.Capacity[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PersistentVolumeSource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClaimRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClaimRef == nil { + m.ClaimRef = &ObjectReference{} + } + if err := m.ClaimRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeReclaimPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageClassName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = PersistentVolumePhase(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PhotonPersistentDiskVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PhotonPersistentDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PhotonPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PdID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PdID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Pod) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Pod: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Pod: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAffinity) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) + if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAffinityTerm) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAffinityTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKey = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAntiAffinity) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) + if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAttachOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdout = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stderr = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PodConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodExecOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodExecOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodExecOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdout = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stderr = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Pod{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodLogOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodLogOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodLogOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Follow = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Previous = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SinceSeconds = &v + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SinceTime == nil { + m.SinceTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.SinceTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Timestamps = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TailLines = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LimitBytes = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodPortForwardOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodPortForwardOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodPortForwardOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodProxyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodProxyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityContext) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RunAsUser = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.RunAsNonRoot = &b + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FSGroup = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSignature) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSignature: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSignature: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodController", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodController == nil { + m.PodController = &k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference{} + } + if err := m.PodController.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, Container{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RestartPolicy = RestartPolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationGracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TerminationGracePeriodSeconds = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DNSPolicy = DNSPolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedServiceAccount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeprecatedServiceAccount = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostNetwork = bool(v != 0) + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostPID = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostIPC = bool(v != 0) + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &PodSecurityContext{} + } + if err := m.SecurityContext.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subdomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subdomain = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Affinity == nil { + m.Affinity = &Affinity{} + } + if err := m.Affinity.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchedulerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchedulerName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitContainers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InitContainers = append(m.InitContainers, Container{}) + if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AutomountServiceAccountToken = &b + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = PodPhase(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, PodCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartTime == nil { + m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerStatuses = append(m.ContainerStatuses, ContainerStatus{}) + if err := m.ContainerStatuses[len(m.ContainerStatuses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QOSClass", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QOSClass = PodQOSClass(data[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitContainerStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InitContainerStatuses = append(m.InitContainerStatuses, ContainerStatus{}) + if err := m.InitContainerStatuses[len(m.InitContainerStatuses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodStatusResult) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodStatusResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodStatusResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodTemplate) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodTemplateList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodTemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodTemplateSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortworxVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortworxVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortworxVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Preconditions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Preconditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Preconditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) + m.UID = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreferAvoidPodsEntry) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PreferAvoidPodsEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PreferAvoidPodsEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSignature", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodSignature.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvictionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EvictionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreferredSchedulingTerm) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PreferredSchedulingTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PreferredSchedulingTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Weight |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Preference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Preference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Probe) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Probe: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Probe: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Handler.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialDelaySeconds", wireType) + } + m.InitialDelaySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.InitialDelaySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + m.TimeoutSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.TimeoutSeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PeriodSeconds", wireType) + } + m.PeriodSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.PeriodSeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessThreshold", wireType) + } + m.SuccessThreshold = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.SuccessThreshold |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureThreshold", wireType) + } + m.FailureThreshold = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.FailureThreshold |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectedVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectedVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectedVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sources = append(m.Sources, VolumeProjection{}) + if err := m.Sources[len(m.Sources)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QuobyteVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QuobyteVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QuobyteVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Registry", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Registry = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volume = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RBDVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RBDVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RBDVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CephMonitors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CephMonitors = append(m.CephMonitors, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBDImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RBDImage = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBDPool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RBDPool = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RadosUser", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RadosUser = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyring", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyring = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeAllocation) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeAllocation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeAllocation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Range = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationController) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationController: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationController: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ReplicationControllerConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ReplicationController{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Selector == nil { + m.Selector = make(map[string]string) + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Template == nil { + m.Template = &PodTemplateSpec{} + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ReplicationControllerCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceFieldSelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceFieldSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Divisor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Divisor.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuota) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuota: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuota: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceQuota{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Hard == nil { + m.Hard = make(ResourceList) + } + m.Hard[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, ResourceQuotaScope(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Hard == nil { + m.Hard = make(ResourceList) + } + m.Hard[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Used", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Used == nil { + m.Used = make(ResourceList) + } + m.Used[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceRequirements) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceRequirements: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Limits == nil { + m.Limits = make(ResourceList) + } + m.Limits[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Requests == nil { + m.Requests = make(ResourceList) + } + m.Requests[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SELinuxOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SELinuxOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SELinuxOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleIOVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleIOVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gateway = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field System", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.System = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SSLEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SSLEnabled = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProtectionDomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProtectionDomain = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoragePool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoragePool = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageMode = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Secret) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Secret: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapbyteLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := make([]byte, mapbyteLen) + copy(mapvalue, data[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + if m.Data == nil { + m.Data = make(map[string][]byte) + } + m.Data[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = SecretType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.StringData == nil { + m.StringData = make(map[string]string) + } + m.StringData[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretEnvSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretEnvSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretKeySelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretKeySelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Secret{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretProjection) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecurityContext) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecurityContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capabilities", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capabilities == nil { + m.Capabilities = &Capabilities{} + } + if err := m.Capabilities.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Privileged = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RunAsUser = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.RunAsNonRoot = &b + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnlyRootFilesystem = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SerializedReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SerializedReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SerializedReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Reference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Service) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Service: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccount) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, ObjectReference{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AutomountServiceAccountToken = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ServiceAccount{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Service{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServicePort) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServicePort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServicePort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = Protocol(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Port |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetPort.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePort", wireType) + } + m.NodePort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.NodePort |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceProxyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceProxyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, ServicePort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Selector == nil { + m.Selector = make(map[string]string) + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ServiceType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalIPs = append(m.ExternalIPs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedPublicIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeprecatedPublicIPs = append(m.DeprecatedPublicIPs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinity", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionAffinity = ServiceAffinity(data[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LoadBalancerIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LoadBalancer.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sysctl) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sysctl: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sysctl: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TCPSocketAction) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TCPSocketAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TCPSocketAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Port.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Taint) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Taint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Taint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Effect = TaintEffect(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeAdded", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TimeAdded.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Toleration) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Toleration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Toleration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = TolerationOperator(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Effect = TaintEffect(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TolerationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TolerationSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Volume) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Volume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.VolumeSource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeMount) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeMount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeMount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountPath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubPath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeProjection) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &SecretProjection{} + } + if err := m.Secret.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DownwardAPI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DownwardAPI == nil { + m.DownwardAPI = &DownwardAPIProjection{} + } + if err := m.DownwardAPI.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMap == nil { + m.ConfigMap = &ConfigMapProjection{} + } + if err := m.ConfigMap.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HostPath == nil { + m.HostPath = &HostPathVolumeSource{} + } + if err := m.HostPath.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EmptyDir", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EmptyDir == nil { + m.EmptyDir = &EmptyDirVolumeSource{} + } + if err := m.EmptyDir.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GCEPersistentDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GCEPersistentDisk == nil { + m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{} + } + if err := m.GCEPersistentDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AWSElasticBlockStore", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AWSElasticBlockStore == nil { + m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{} + } + if err := m.AWSElasticBlockStore.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitRepo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitRepo == nil { + m.GitRepo = &GitRepoVolumeSource{} + } + if err := m.GitRepo.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &SecretVolumeSource{} + } + if err := m.Secret.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NFS == nil { + m.NFS = &NFSVolumeSource{} + } + if err := m.NFS.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ISCSI == nil { + m.ISCSI = &ISCSIVolumeSource{} + } + if err := m.ISCSI.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Glusterfs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Glusterfs == nil { + m.Glusterfs = &GlusterfsVolumeSource{} + } + if err := m.Glusterfs.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeClaim", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PersistentVolumeClaim == nil { + m.PersistentVolumeClaim = &PersistentVolumeClaimVolumeSource{} + } + if err := m.PersistentVolumeClaim.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBD", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RBD == nil { + m.RBD = &RBDVolumeSource{} + } + if err := m.RBD.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FlexVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FlexVolume == nil { + m.FlexVolume = &FlexVolumeSource{} + } + if err := m.FlexVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cinder", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cinder == nil { + m.Cinder = &CinderVolumeSource{} + } + if err := m.Cinder.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CephFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CephFS == nil { + m.CephFS = &CephFSVolumeSource{} + } + if err := m.CephFS.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flocker", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Flocker == nil { + m.Flocker = &FlockerVolumeSource{} + } + if err := m.Flocker.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DownwardAPI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DownwardAPI == nil { + m.DownwardAPI = &DownwardAPIVolumeSource{} + } + if err := m.DownwardAPI.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FC", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FC == nil { + m.FC = &FCVolumeSource{} + } + if err := m.FC.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureFile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureFile == nil { + m.AzureFile = &AzureFileVolumeSource{} + } + if err := m.AzureFile.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMap == nil { + m.ConfigMap = &ConfigMapVolumeSource{} + } + if err := m.ConfigMap.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VsphereVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VsphereVolume == nil { + m.VsphereVolume = &VsphereVirtualDiskVolumeSource{} + } + if err := m.VsphereVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quobyte", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Quobyte == nil { + m.Quobyte = &QuobyteVolumeSource{} + } + if err := m.Quobyte.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureDisk == nil { + m.AzureDisk = &AzureDiskVolumeSource{} + } + if err := m.AzureDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PhotonPersistentDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PhotonPersistentDisk == nil { + m.PhotonPersistentDisk = &PhotonPersistentDiskVolumeSource{} + } + if err := m.PhotonPersistentDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortworxVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PortworxVolume == nil { + m.PortworxVolume = &PortworxVolumeSource{} + } + if err := m.PortworxVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleIO", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleIO == nil { + m.ScaleIO = &ScaleIOVolumeSource{} + } + if err := m.ScaleIO.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Projected", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Projected == nil { + m.Projected = &ProjectedVolumeSource{} + } + if err := m.Projected.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VsphereVirtualDiskVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VsphereVirtualDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VsphereVirtualDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumePath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WeightedPodAffinityTerm) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WeightedPodAffinityTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WeightedPodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Weight |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAffinityTerm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodAffinityTerm.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 11000 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0xbd, 0x7d, 0x70, 0x24, 0xc7, + 0x75, 0x18, 0xae, 0xd9, 0xc5, 0xd7, 0x3e, 0x7c, 0x37, 0x70, 0x47, 0x10, 0x22, 0x0f, 0xc7, 0xa1, + 0x48, 0x1d, 0xc9, 0x23, 0x20, 0x1e, 0x49, 0xf1, 0x24, 0xea, 0x47, 0x09, 0xc0, 0x02, 0x77, 0xd0, + 0x7d, 0x2d, 0x7b, 0x71, 0x77, 0x14, 0xc5, 0x9f, 0xc8, 0xb9, 0x9d, 0x06, 0x30, 0xbc, 0xc1, 0xcc, + 0x72, 0x66, 0x16, 0x77, 0x90, 0xa2, 0x2a, 0x5b, 0x51, 0xc9, 0x49, 0x59, 0x49, 0xe4, 0x72, 0x54, + 0x95, 0x72, 0x52, 0xa5, 0x94, 0xab, 0xe2, 0x28, 0x9f, 0x8e, 0xa2, 0xb2, 0x24, 0x97, 0xe5, 0xa4, + 0xe2, 0x58, 0x8e, 0x5c, 0x95, 0x38, 0xaa, 0x72, 0x25, 0x76, 0xca, 0x15, 0xd8, 0x82, 0x2a, 0xfe, + 0x23, 0x7f, 0xe4, 0x8f, 0xf8, 0x3f, 0x24, 0x95, 0x4a, 0xf5, 0xe7, 0x74, 0xcf, 0xee, 0x62, 0x66, + 0xc1, 0x03, 0x7c, 0x52, 0xe5, 0xbf, 0xdd, 0x7e, 0xaf, 0x5f, 0x7f, 0x4c, 0xf7, 0xeb, 0xf7, 0x5e, + 0xbf, 0xf7, 0x1a, 0xce, 0xdf, 0xbd, 0x18, 0xcf, 0x7b, 0xe1, 0xc2, 0xdd, 0xd6, 0x1d, 0x12, 0x05, + 0x24, 0x21, 0xf1, 0x42, 0xf3, 0xee, 0xe6, 0x82, 0xd3, 0xf4, 0x16, 0x76, 0x5e, 0x58, 0xd8, 0x24, + 0x01, 0x89, 0x9c, 0x84, 0xb8, 0xf3, 0xcd, 0x28, 0x4c, 0x42, 0xf4, 0x18, 0xc7, 0x9e, 0x4f, 0xb1, + 0xe7, 0x9b, 0x77, 0x37, 0xe7, 0x9d, 0xa6, 0x37, 0xbf, 0xf3, 0xc2, 0xec, 0xf3, 0x9b, 0x5e, 0xb2, + 0xd5, 0xba, 0x33, 0xdf, 0x08, 0xb7, 0x17, 0x36, 0xc3, 0xcd, 0x70, 0x81, 0x55, 0xba, 0xd3, 0xda, + 0x60, 0xff, 0xd8, 0x1f, 0xf6, 0x8b, 0x13, 0x9b, 0x7d, 0x49, 0x34, 0xed, 0x34, 0xbd, 0x6d, 0xa7, + 0xb1, 0xe5, 0x05, 0x24, 0xda, 0x55, 0x8d, 0x47, 0x24, 0x0e, 0x5b, 0x51, 0x83, 0x64, 0xbb, 0x70, + 0x68, 0xad, 0x78, 0x61, 0x9b, 0x24, 0x4e, 0x87, 0x8e, 0xcf, 0x2e, 0x74, 0xab, 0x15, 0xb5, 0x82, + 0xc4, 0xdb, 0x6e, 0x6f, 0xe6, 0xa3, 0x79, 0x15, 0xe2, 0xc6, 0x16, 0xd9, 0x76, 0xda, 0xea, 0xbd, + 0xd8, 0xad, 0x5e, 0x2b, 0xf1, 0xfc, 0x05, 0x2f, 0x48, 0xe2, 0x24, 0x3a, 0x6c, 0x4c, 0x31, 0x89, + 0x76, 0x48, 0x94, 0x0e, 0x88, 0xdc, 0x77, 0xb6, 0x9b, 0x3e, 0xe9, 0x30, 0x26, 0xfb, 0x8f, 0x2c, + 0x38, 0xbb, 0x78, 0xbb, 0xbe, 0xe2, 0x3b, 0x71, 0xe2, 0x35, 0x96, 0xfc, 0xb0, 0x71, 0xb7, 0x9e, + 0x84, 0x11, 0xb9, 0x15, 0xfa, 0xad, 0x6d, 0x52, 0x67, 0xd3, 0x87, 0xce, 0xc3, 0xd0, 0x0e, 0xfb, + 0xbf, 0x56, 0x9d, 0xb1, 0xce, 0x5a, 0xe7, 0x2a, 0x4b, 0x13, 0x3f, 0xdc, 0x9b, 0xfb, 0xc0, 0xfe, + 0xde, 0xdc, 0xd0, 0x2d, 0x51, 0x8e, 0x15, 0x06, 0x7a, 0x1a, 0x06, 0x36, 0xe2, 0xf5, 0xdd, 0x26, + 0x99, 0x29, 0x31, 0xdc, 0x31, 0x81, 0x3b, 0xb0, 0x5a, 0xa7, 0xa5, 0x58, 0x40, 0xd1, 0x02, 0x54, + 0x9a, 0x4e, 0x94, 0x78, 0x89, 0x17, 0x06, 0x33, 0xe5, 0xb3, 0xd6, 0xb9, 0xfe, 0xa5, 0x49, 0x81, + 0x5a, 0xa9, 0x49, 0x00, 0x4e, 0x71, 0x68, 0x37, 0x22, 0xe2, 0xb8, 0x37, 0x02, 0x7f, 0x77, 0xa6, + 0xef, 0xac, 0x75, 0x6e, 0x28, 0xed, 0x06, 0x16, 0xe5, 0x58, 0x61, 0xd8, 0xdf, 0x2b, 0xc1, 0xd0, + 0xe2, 0xc6, 0x86, 0x17, 0x78, 0xc9, 0x2e, 0x7a, 0x07, 0x46, 0x82, 0xd0, 0x25, 0xf2, 0x3f, 0x1b, + 0xc5, 0xf0, 0x85, 0x67, 0xe7, 0x0f, 0x5b, 0x8a, 0xf3, 0xd7, 0xb5, 0x1a, 0x4b, 0x13, 0xfb, 0x7b, + 0x73, 0x23, 0x7a, 0x09, 0x36, 0x28, 0xa2, 0xb7, 0x60, 0xb8, 0x19, 0xba, 0xaa, 0x81, 0x12, 0x6b, + 0xe0, 0x99, 0xc3, 0x1b, 0xa8, 0xa5, 0x15, 0x96, 0xc6, 0xf7, 0xf7, 0xe6, 0x86, 0xb5, 0x02, 0xac, + 0x93, 0x43, 0x3e, 0x8c, 0xd3, 0xbf, 0x41, 0xe2, 0xa9, 0x16, 0xca, 0xac, 0x85, 0xe7, 0xf3, 0x5b, + 0xd0, 0x2a, 0x2d, 0x4d, 0xed, 0xef, 0xcd, 0x8d, 0x67, 0x0a, 0x71, 0x96, 0xb4, 0xfd, 0x79, 0x18, + 0x5b, 0x4c, 0x12, 0xa7, 0xb1, 0x45, 0x5c, 0xfe, 0x7d, 0xd1, 0x4b, 0xd0, 0x17, 0x38, 0xdb, 0x44, + 0x7c, 0xfd, 0xb3, 0x62, 0xda, 0xfb, 0xae, 0x3b, 0xdb, 0xe4, 0x60, 0x6f, 0x6e, 0xe2, 0x66, 0xe0, + 0xbd, 0xd7, 0x12, 0x6b, 0x86, 0x96, 0x61, 0x86, 0x8d, 0x2e, 0x00, 0xb8, 0x64, 0xc7, 0x6b, 0x90, + 0x9a, 0x93, 0x6c, 0x89, 0xd5, 0x80, 0x44, 0x5d, 0xa8, 0x2a, 0x08, 0xd6, 0xb0, 0xec, 0x2f, 0x59, + 0x50, 0x59, 0xdc, 0x09, 0x3d, 0xb7, 0x16, 0xba, 0x31, 0x6a, 0xc1, 0x78, 0x33, 0x22, 0x1b, 0x24, + 0x52, 0x45, 0x33, 0xd6, 0xd9, 0xf2, 0xb9, 0xe1, 0x0b, 0x17, 0x72, 0xc6, 0x6d, 0x56, 0x5a, 0x09, + 0x92, 0x68, 0x77, 0xe9, 0x11, 0xd1, 0xf4, 0x78, 0x06, 0x8a, 0xb3, 0x6d, 0xd8, 0xbf, 0x54, 0x82, + 0x53, 0x8b, 0x9f, 0x6f, 0x45, 0xa4, 0xea, 0xc5, 0x77, 0xb3, 0x5b, 0xc1, 0xf5, 0xe2, 0xbb, 0xd7, + 0xd3, 0xc9, 0x50, 0x6b, 0xb0, 0x2a, 0xca, 0xb1, 0xc2, 0x40, 0xcf, 0xc3, 0x20, 0xfd, 0x7d, 0x13, + 0xaf, 0x89, 0xd1, 0x4f, 0x09, 0xe4, 0xe1, 0xaa, 0x93, 0x38, 0x55, 0x0e, 0xc2, 0x12, 0x07, 0x5d, + 0x83, 0xe1, 0x06, 0xdb, 0xef, 0x9b, 0xd7, 0x42, 0x97, 0xb0, 0x2f, 0x5c, 0x59, 0x7a, 0x8e, 0xa2, + 0x2f, 0xa7, 0xc5, 0x07, 0x7b, 0x73, 0x33, 0xbc, 0x6f, 0x82, 0x84, 0x06, 0xc3, 0x7a, 0x7d, 0x64, + 0xab, 0x8d, 0xd8, 0xc7, 0x28, 0x41, 0x87, 0x4d, 0x78, 0x4e, 0xdb, 0x53, 0xfd, 0x6c, 0x4f, 0x8d, + 0x74, 0xd9, 0x4f, 0xff, 0xd8, 0x12, 0x73, 0xb2, 0xea, 0xf9, 0x26, 0x7b, 0xb8, 0x00, 0x10, 0x93, + 0x46, 0x44, 0x12, 0x6d, 0x56, 0xd4, 0x67, 0xae, 0x2b, 0x08, 0xd6, 0xb0, 0xe8, 0xe6, 0x8f, 0xb7, + 0x9c, 0x88, 0xad, 0x16, 0x31, 0x37, 0x6a, 0xf3, 0xd7, 0x25, 0x00, 0xa7, 0x38, 0xc6, 0xe6, 0x2f, + 0xe7, 0x6e, 0xfe, 0xdf, 0xb1, 0x60, 0x70, 0xc9, 0x0b, 0x5c, 0x2f, 0xd8, 0x44, 0xef, 0xc0, 0x10, + 0xe5, 0xe8, 0xae, 0x93, 0x38, 0x62, 0xdf, 0x7f, 0x44, 0x2e, 0x1e, 0x9d, 0xc1, 0xca, 0xe5, 0x13, + 0xcf, 0x53, 0x6c, 0xba, 0x88, 0x6e, 0xdc, 0x79, 0x97, 0x34, 0x92, 0x6b, 0x24, 0x71, 0xd2, 0xe1, + 0xa4, 0x65, 0x58, 0x51, 0x45, 0x37, 0x61, 0x20, 0x71, 0xa2, 0x4d, 0x92, 0x88, 0x6d, 0x9f, 0xb3, + 0x29, 0x39, 0x0d, 0x4c, 0x97, 0x1c, 0x09, 0x1a, 0x24, 0x65, 0x90, 0xeb, 0x8c, 0x08, 0x16, 0xc4, + 0xec, 0x06, 0x8c, 0x2c, 0x3b, 0x4d, 0xe7, 0x8e, 0xe7, 0x7b, 0x89, 0x47, 0x62, 0xf4, 0x61, 0x28, + 0x3b, 0xae, 0xcb, 0x36, 0x40, 0x65, 0xe9, 0xd4, 0xfe, 0xde, 0x5c, 0x79, 0xd1, 0x75, 0x0f, 0xf6, + 0xe6, 0x40, 0x61, 0xed, 0x62, 0x8a, 0x81, 0x9e, 0x85, 0x3e, 0x37, 0x0a, 0x9b, 0x33, 0x25, 0x86, + 0x79, 0x9a, 0xee, 0xd4, 0x6a, 0x14, 0x36, 0x33, 0xa8, 0x0c, 0xc7, 0xfe, 0x41, 0x09, 0xd0, 0x32, + 0x69, 0x6e, 0xad, 0xd6, 0x8d, 0x6f, 0x7a, 0x0e, 0x86, 0xb6, 0xc3, 0xc0, 0x4b, 0xc2, 0x28, 0x16, + 0x0d, 0xb2, 0x75, 0x71, 0x4d, 0x94, 0x61, 0x05, 0x45, 0x67, 0xa1, 0xaf, 0x99, 0x6e, 0xef, 0x11, + 0xc9, 0x1a, 0xd8, 0xc6, 0x66, 0x10, 0x8a, 0xd1, 0x8a, 0x49, 0x24, 0xd6, 0xb3, 0xc2, 0xb8, 0x19, + 0x93, 0x08, 0x33, 0x48, 0xba, 0x82, 0xe8, 0xda, 0x12, 0xab, 0x35, 0xb3, 0x82, 0x28, 0x04, 0x6b, + 0x58, 0xe8, 0x6d, 0xa8, 0xf0, 0x7f, 0x98, 0x6c, 0xb0, 0xa5, 0x9b, 0xcb, 0x14, 0xae, 0x86, 0x0d, + 0xc7, 0xcf, 0x4e, 0xfe, 0x28, 0x5b, 0x71, 0x92, 0x10, 0x4e, 0x69, 0x1a, 0x2b, 0x6e, 0x20, 0x77, + 0xc5, 0xfd, 0x1d, 0x0b, 0xd0, 0xb2, 0x17, 0xb8, 0x24, 0x3a, 0x81, 0xa3, 0xb3, 0xb7, 0xcd, 0xf0, + 0x27, 0xb4, 0x6b, 0xe1, 0x76, 0x33, 0x0c, 0x48, 0x90, 0x2c, 0x87, 0x81, 0xcb, 0x8f, 0xd3, 0x8f, + 0x43, 0x5f, 0x42, 0x9b, 0xe2, 0xdd, 0x7a, 0x5a, 0x7e, 0x16, 0xda, 0xc0, 0xc1, 0xde, 0xdc, 0xe9, + 0xf6, 0x1a, 0xac, 0x0b, 0xac, 0x0e, 0xfa, 0x18, 0x0c, 0xc4, 0x89, 0x93, 0xb4, 0x62, 0xd1, 0xd1, + 0x27, 0x64, 0x47, 0xeb, 0xac, 0xf4, 0x60, 0x6f, 0x6e, 0x5c, 0x55, 0xe3, 0x45, 0x58, 0x54, 0x40, + 0xcf, 0xc0, 0xe0, 0x36, 0x89, 0x63, 0x67, 0x53, 0x32, 0xb8, 0x71, 0x51, 0x77, 0xf0, 0x1a, 0x2f, + 0xc6, 0x12, 0x8e, 0x9e, 0x84, 0x7e, 0x12, 0x45, 0x61, 0x24, 0x56, 0xc4, 0xa8, 0x40, 0xec, 0x5f, + 0xa1, 0x85, 0x98, 0xc3, 0xec, 0xff, 0x62, 0xc1, 0xb8, 0xea, 0x2b, 0x6f, 0xeb, 0x04, 0xb6, 0xbc, + 0x0b, 0xd0, 0x90, 0x03, 0x8c, 0xd9, 0x46, 0xd3, 0xda, 0xe8, 0xbc, 0xfc, 0xda, 0x27, 0x34, 0x6d, + 0x43, 0x15, 0xc5, 0x58, 0xa3, 0x6b, 0xff, 0x3b, 0x0b, 0xa6, 0x32, 0x63, 0xbb, 0xea, 0xc5, 0x09, + 0x7a, 0xab, 0x6d, 0x7c, 0xf3, 0xc5, 0xc6, 0x47, 0x6b, 0xb3, 0xd1, 0xa9, 0xf5, 0x22, 0x4b, 0xb4, + 0xb1, 0x61, 0xe8, 0xf7, 0x12, 0xb2, 0x2d, 0x87, 0xf5, 0x7c, 0xc1, 0x61, 0xf1, 0xfe, 0xa5, 0x5f, + 0x69, 0x8d, 0xd2, 0xc0, 0x9c, 0x94, 0xfd, 0xbf, 0x2c, 0xa8, 0x2c, 0x87, 0xc1, 0x86, 0xb7, 0x79, + 0xcd, 0x69, 0x9e, 0xc0, 0xf7, 0xa9, 0x43, 0x1f, 0xa3, 0xce, 0x87, 0xf0, 0x42, 0xde, 0x10, 0x44, + 0xc7, 0xe6, 0xe9, 0x99, 0xca, 0x85, 0x05, 0xc5, 0xa6, 0x68, 0x11, 0x66, 0xc4, 0x66, 0x5f, 0x81, + 0x8a, 0x42, 0x40, 0x13, 0x50, 0xbe, 0x4b, 0xb8, 0x24, 0x59, 0xc1, 0xf4, 0x27, 0x9a, 0x86, 0xfe, + 0x1d, 0xc7, 0x6f, 0x89, 0xcd, 0x8b, 0xf9, 0x9f, 0x8f, 0x97, 0x2e, 0x5a, 0xf6, 0x0f, 0xd8, 0x0e, + 0x14, 0x8d, 0xac, 0x04, 0x3b, 0x82, 0x39, 0x7c, 0xd9, 0x82, 0x69, 0xbf, 0x03, 0x53, 0x12, 0x73, + 0x72, 0x14, 0x76, 0xf6, 0x98, 0xe8, 0xf6, 0x74, 0x27, 0x28, 0xee, 0xd8, 0x1a, 0xe5, 0xf5, 0x61, + 0x93, 0x2e, 0x38, 0xc7, 0x67, 0x5d, 0x17, 0x32, 0xc0, 0x0d, 0x51, 0x86, 0x15, 0xd4, 0xfe, 0x73, + 0x0b, 0xa6, 0xd5, 0x38, 0xae, 0x90, 0xdd, 0x3a, 0xf1, 0x49, 0x23, 0x09, 0xa3, 0x87, 0x65, 0x24, + 0x8f, 0xf3, 0x6f, 0xc2, 0x79, 0xd2, 0xb0, 0x20, 0x50, 0xbe, 0x42, 0x76, 0xf9, 0x07, 0xd2, 0x07, + 0x5a, 0x3e, 0x74, 0xa0, 0xbf, 0x65, 0xc1, 0xa8, 0x1a, 0xe8, 0x09, 0x6c, 0xb9, 0xab, 0xe6, 0x96, + 0xfb, 0x70, 0xc1, 0xf5, 0xda, 0x65, 0xb3, 0xfd, 0xed, 0x12, 0x65, 0x1b, 0x02, 0xa7, 0x16, 0x85, + 0x74, 0x92, 0x28, 0xc7, 0x7f, 0x48, 0xbe, 0x52, 0x6f, 0x83, 0xbd, 0x42, 0x76, 0xd7, 0x43, 0x2a, + 0x4d, 0x74, 0x1e, 0xac, 0xf1, 0x51, 0xfb, 0x0e, 0xfd, 0xa8, 0xbf, 0x5f, 0x82, 0x53, 0x6a, 0x5a, + 0x8c, 0x53, 0xfa, 0x67, 0x72, 0x62, 0x5e, 0x80, 0x61, 0x97, 0x6c, 0x38, 0x2d, 0x3f, 0x51, 0xda, + 0x44, 0x3f, 0x57, 0x33, 0xab, 0x69, 0x31, 0xd6, 0x71, 0x7a, 0x98, 0xcb, 0x6f, 0x0c, 0x33, 0x7e, + 0x9e, 0x38, 0x74, 0xd5, 0x53, 0x09, 0x4f, 0x53, 0x0f, 0x47, 0x74, 0xf5, 0x50, 0xa8, 0x82, 0x4f, + 0x42, 0xbf, 0xb7, 0x4d, 0xcf, 0xfc, 0x92, 0x79, 0x94, 0xaf, 0xd1, 0x42, 0xcc, 0x61, 0xe8, 0x29, + 0x18, 0x6c, 0x84, 0xdb, 0xdb, 0x4e, 0xe0, 0xce, 0x94, 0x99, 0xcc, 0x39, 0x4c, 0xc5, 0x82, 0x65, + 0x5e, 0x84, 0x25, 0x0c, 0x3d, 0x06, 0x7d, 0x4e, 0xb4, 0x19, 0xcf, 0xf4, 0x31, 0x9c, 0x21, 0xda, + 0xd2, 0x62, 0xb4, 0x19, 0x63, 0x56, 0x4a, 0x65, 0xc9, 0x7b, 0x61, 0x74, 0xd7, 0x0b, 0x36, 0xab, + 0x5e, 0xc4, 0x04, 0x43, 0x4d, 0x96, 0xbc, 0xad, 0x20, 0x58, 0xc3, 0x42, 0x35, 0xe8, 0x6f, 0x86, + 0x51, 0x12, 0xcf, 0x0c, 0xb0, 0x89, 0x7f, 0x2e, 0x77, 0xfb, 0xf1, 0x71, 0xd7, 0xc2, 0x28, 0x49, + 0x87, 0x42, 0xff, 0xc5, 0x98, 0x13, 0x42, 0xcb, 0x50, 0x26, 0xc1, 0xce, 0xcc, 0x20, 0xa3, 0xf7, + 0xa1, 0xc3, 0xe9, 0xad, 0x04, 0x3b, 0xb7, 0x9c, 0x28, 0xe5, 0x57, 0x2b, 0xc1, 0x0e, 0xa6, 0xb5, + 0x51, 0x03, 0x2a, 0xd2, 0x84, 0x15, 0xcf, 0x0c, 0x15, 0x59, 0x8a, 0x58, 0xa0, 0x63, 0xf2, 0x5e, + 0xcb, 0x8b, 0xc8, 0x36, 0x09, 0x92, 0x38, 0x55, 0xac, 0x24, 0x34, 0xc6, 0x29, 0x5d, 0xd4, 0x80, + 0x11, 0x2e, 0x7f, 0x5e, 0x0b, 0x5b, 0x41, 0x12, 0xcf, 0x54, 0x58, 0x97, 0x73, 0x2c, 0x17, 0xb7, + 0xd2, 0x1a, 0x4b, 0xd3, 0x82, 0xfc, 0x88, 0x56, 0x18, 0x63, 0x83, 0x28, 0x7a, 0x0b, 0x46, 0x7d, + 0x6f, 0x87, 0x04, 0x24, 0x8e, 0x6b, 0x51, 0x78, 0x87, 0xcc, 0x00, 0x1b, 0xcd, 0x93, 0x79, 0x5a, + 0x7c, 0x78, 0x87, 0x2c, 0x4d, 0xee, 0xef, 0xcd, 0x8d, 0x5e, 0xd5, 0x6b, 0x63, 0x93, 0x18, 0x7a, + 0x1b, 0xc6, 0xa8, 0xb0, 0xeb, 0xa5, 0xe4, 0x87, 0x8b, 0x93, 0x47, 0xfb, 0x7b, 0x73, 0x63, 0xd8, + 0xa8, 0x8e, 0x33, 0xe4, 0xd0, 0x3a, 0x54, 0x7c, 0x6f, 0x83, 0x34, 0x76, 0x1b, 0x3e, 0x99, 0x19, + 0x61, 0xb4, 0x73, 0x36, 0xe7, 0x55, 0x89, 0xce, 0x15, 0x0c, 0xf5, 0x17, 0xa7, 0x84, 0xd0, 0x2d, + 0x38, 0x9d, 0x90, 0x68, 0xdb, 0x0b, 0x1c, 0xba, 0xa9, 0x84, 0xf4, 0xcb, 0x4c, 0x25, 0xa3, 0x6c, + 0xd5, 0x9e, 0x11, 0x13, 0x7b, 0x7a, 0xbd, 0x23, 0x16, 0xee, 0x52, 0x1b, 0xdd, 0x80, 0x71, 0xb6, + 0x9f, 0x6a, 0x2d, 0xdf, 0xaf, 0x85, 0xbe, 0xd7, 0xd8, 0x9d, 0x19, 0x63, 0x04, 0x9f, 0x92, 0x06, + 0x90, 0x35, 0x13, 0x4c, 0x15, 0xc3, 0xf4, 0x1f, 0xce, 0xd6, 0x46, 0x3e, 0x8c, 0xc7, 0xa4, 0xd1, + 0x8a, 0xbc, 0x64, 0x97, 0xae, 0x7d, 0x72, 0x3f, 0x99, 0x19, 0x2f, 0xa2, 0xe8, 0xd6, 0xcd, 0x4a, + 0xdc, 0xfa, 0x94, 0x29, 0xc4, 0x59, 0xd2, 0x94, 0x55, 0xc4, 0x89, 0xeb, 0x05, 0x33, 0x13, 0x8c, + 0x03, 0xa9, 0xfd, 0x55, 0xa7, 0x85, 0x98, 0xc3, 0x98, 0xfd, 0x80, 0xfe, 0xb8, 0x41, 0xb9, 0xf4, + 0x24, 0x43, 0x4c, 0xed, 0x07, 0x12, 0x80, 0x53, 0x1c, 0x2a, 0x1a, 0x24, 0xc9, 0xee, 0x0c, 0x62, + 0xa8, 0x6a, 0xab, 0xad, 0xaf, 0x7f, 0x06, 0xd3, 0x72, 0x74, 0x0b, 0x06, 0x49, 0xb0, 0xb3, 0x1a, + 0x85, 0xdb, 0x33, 0x53, 0x45, 0x78, 0xc0, 0x0a, 0x47, 0xe6, 0xe7, 0x47, 0xaa, 0xc2, 0x88, 0x62, + 0x2c, 0x89, 0xa1, 0xfb, 0x30, 0xd3, 0xe1, 0x2b, 0xf1, 0x8f, 0x32, 0xcd, 0x3e, 0xca, 0x27, 0x44, + 0xdd, 0x99, 0xf5, 0x2e, 0x78, 0x07, 0x87, 0xc0, 0x70, 0x57, 0xea, 0xf6, 0x1d, 0x18, 0x53, 0x8c, + 0x8a, 0x7d, 0x6f, 0x34, 0x07, 0xfd, 0x94, 0x17, 0x4b, 0x85, 0xbe, 0x42, 0x27, 0x95, 0xb2, 0xe8, + 0x18, 0xf3, 0x72, 0x36, 0xa9, 0xde, 0xe7, 0xc9, 0xd2, 0x6e, 0x42, 0xb8, 0x62, 0x57, 0xd6, 0x26, + 0x55, 0x02, 0x70, 0x8a, 0x63, 0xff, 0x1f, 0x2e, 0x26, 0xa5, 0xdc, 0xb0, 0xc0, 0x49, 0x70, 0x1e, + 0x86, 0xb6, 0xc2, 0x38, 0xa1, 0xd8, 0xac, 0x8d, 0xfe, 0x54, 0x30, 0xba, 0x2c, 0xca, 0xb1, 0xc2, + 0x40, 0xaf, 0xc2, 0x68, 0x43, 0x6f, 0x40, 0x1c, 0x63, 0xa7, 0x44, 0x15, 0xb3, 0x75, 0x6c, 0xe2, + 0xa2, 0x8b, 0x30, 0xc4, 0xac, 0xdc, 0x8d, 0xd0, 0x17, 0x2a, 0xa4, 0x3c, 0x95, 0x87, 0x6a, 0xa2, + 0xfc, 0x40, 0xfb, 0x8d, 0x15, 0x36, 0x55, 0xc4, 0x69, 0x17, 0xd6, 0x6a, 0xe2, 0x00, 0x51, 0x8a, + 0xf8, 0x65, 0x56, 0x8a, 0x05, 0xd4, 0xfe, 0x17, 0x25, 0x6d, 0x96, 0xa9, 0x02, 0x44, 0xd0, 0x9b, + 0x30, 0x78, 0xcf, 0xf1, 0x12, 0x2f, 0xd8, 0x14, 0xd2, 0xc3, 0x8b, 0x05, 0x4f, 0x13, 0x56, 0xfd, + 0x36, 0xaf, 0xca, 0x4f, 0x3e, 0xf1, 0x07, 0x4b, 0x82, 0x94, 0x76, 0xd4, 0x0a, 0x02, 0x4a, 0xbb, + 0xd4, 0x3b, 0x6d, 0xcc, 0xab, 0x72, 0xda, 0xe2, 0x0f, 0x96, 0x04, 0xd1, 0x06, 0x80, 0x5c, 0x4b, + 0xc4, 0x15, 0xd6, 0xe5, 0x8f, 0xf6, 0x42, 0x7e, 0x5d, 0xd5, 0x5e, 0x1a, 0xa3, 0x67, 0x6d, 0xfa, + 0x1f, 0x6b, 0x94, 0xed, 0x84, 0x09, 0x61, 0xed, 0xdd, 0x42, 0x9f, 0xa5, 0x5b, 0xda, 0x89, 0x12, + 0xe2, 0x2e, 0x26, 0x59, 0x03, 0xfd, 0xe1, 0x22, 0xf6, 0xba, 0xb7, 0x4d, 0xf4, 0xed, 0x2f, 0x88, + 0xe0, 0x94, 0x9e, 0xfd, 0xdd, 0x32, 0xcc, 0x74, 0xeb, 0x2e, 0x5d, 0x92, 0xe4, 0xbe, 0x97, 0x2c, + 0x53, 0x31, 0xc9, 0x32, 0x97, 0xe4, 0x8a, 0x28, 0xc7, 0x0a, 0x83, 0xae, 0x8d, 0xd8, 0xdb, 0x94, + 0xca, 0x52, 0x7f, 0xba, 0x36, 0xea, 0xac, 0x14, 0x0b, 0x28, 0xc5, 0x8b, 0x88, 0x13, 0x8b, 0xcb, + 0x0d, 0x6d, 0x0d, 0x61, 0x56, 0x8a, 0x05, 0x54, 0x37, 0x88, 0xf4, 0xe5, 0x18, 0x44, 0x8c, 0x29, + 0xea, 0x7f, 0xb0, 0x53, 0x84, 0x3e, 0x07, 0xb0, 0xe1, 0x05, 0x5e, 0xbc, 0xc5, 0xa8, 0x0f, 0xf4, + 0x4c, 0x5d, 0x09, 0x59, 0xab, 0x8a, 0x0a, 0xd6, 0x28, 0xa2, 0x97, 0x61, 0x58, 0x6d, 0xcf, 0xb5, + 0xea, 0xcc, 0xa0, 0x69, 0x10, 0x4f, 0x79, 0x55, 0x15, 0xeb, 0x78, 0xf6, 0xbb, 0xd9, 0xf5, 0x22, + 0x76, 0x85, 0x36, 0xbf, 0x56, 0xd1, 0xf9, 0x2d, 0x1d, 0x3e, 0xbf, 0xf6, 0x7f, 0x2e, 0xc3, 0xb8, + 0xd1, 0x58, 0x2b, 0x2e, 0xc0, 0xd1, 0x5e, 0xa7, 0x07, 0x96, 0x93, 0x10, 0xb1, 0x27, 0xcf, 0xf7, + 0xb2, 0x69, 0xf4, 0xe3, 0x8d, 0xee, 0x05, 0x4e, 0x09, 0x6d, 0x41, 0xc5, 0x77, 0x62, 0x66, 0x52, + 0x21, 0x62, 0x2f, 0xf6, 0x46, 0x36, 0x55, 0x3f, 0x9c, 0x38, 0xd1, 0x4e, 0x0f, 0xde, 0x4a, 0x4a, + 0x9c, 0x9e, 0xb6, 0x54, 0xd8, 0x91, 0x37, 0x6a, 0xaa, 0x3b, 0x54, 0x22, 0xda, 0xc5, 0x1c, 0x86, + 0x2e, 0xc2, 0x48, 0x44, 0xd8, 0x4a, 0x59, 0xa6, 0xf2, 0x1c, 0x5b, 0x7a, 0xfd, 0xa9, 0xe0, 0x87, + 0x35, 0x18, 0x36, 0x30, 0x53, 0xb9, 0x7f, 0xe0, 0x10, 0xb9, 0xff, 0x19, 0x18, 0x64, 0x3f, 0xd4, + 0xaa, 0x50, 0x5f, 0x68, 0x8d, 0x17, 0x63, 0x09, 0xcf, 0x2e, 0xa2, 0xa1, 0x82, 0x8b, 0xe8, 0x59, + 0x18, 0xab, 0x3a, 0x64, 0x3b, 0x0c, 0x56, 0x02, 0xb7, 0x19, 0x7a, 0x41, 0x82, 0x66, 0xa0, 0x8f, + 0x9d, 0x27, 0x7c, 0xbf, 0xf7, 0x51, 0x0a, 0xb8, 0x8f, 0xca, 0xee, 0xf6, 0x9f, 0x94, 0x60, 0xb4, + 0x4a, 0x7c, 0x92, 0x10, 0xae, 0xf7, 0xc4, 0x68, 0x15, 0xd0, 0x66, 0xe4, 0x34, 0x48, 0x8d, 0x44, + 0x5e, 0xe8, 0xd6, 0x49, 0x23, 0x0c, 0xd8, 0x45, 0x14, 0x3d, 0x20, 0x4f, 0xef, 0xef, 0xcd, 0xa1, + 0x4b, 0x6d, 0x50, 0xdc, 0xa1, 0x06, 0x72, 0x61, 0xb4, 0x19, 0x11, 0xc3, 0x6e, 0x68, 0xe5, 0x8b, + 0x1a, 0x35, 0xbd, 0x0a, 0x97, 0x86, 0x8d, 0x22, 0x6c, 0x12, 0x45, 0x9f, 0x82, 0x89, 0x30, 0x6a, + 0x6e, 0x39, 0x41, 0x95, 0x34, 0x49, 0xe0, 0x52, 0x15, 0x40, 0x58, 0x3b, 0xa6, 0xf7, 0xf7, 0xe6, + 0x26, 0x6e, 0x64, 0x60, 0xb8, 0x0d, 0x1b, 0xbd, 0x09, 0x93, 0xcd, 0x28, 0x6c, 0x3a, 0x9b, 0x6c, + 0xc9, 0x08, 0x69, 0x85, 0xf3, 0xa6, 0xf3, 0xfb, 0x7b, 0x73, 0x93, 0xb5, 0x2c, 0xf0, 0x60, 0x6f, + 0x6e, 0x8a, 0x4d, 0x19, 0x2d, 0x49, 0x81, 0xb8, 0x9d, 0x8c, 0xfd, 0x1e, 0x9c, 0xaa, 0x86, 0xf7, + 0x82, 0x7b, 0x4e, 0xe4, 0x2e, 0xd6, 0xd6, 0x34, 0xe3, 0xc4, 0x1b, 0x52, 0xf9, 0xe5, 0x17, 0x7c, + 0x39, 0x27, 0x9b, 0x46, 0x83, 0xab, 0x1d, 0xab, 0x9e, 0x4f, 0xba, 0x98, 0x43, 0xfe, 0x49, 0xc9, + 0x68, 0x33, 0xc5, 0x57, 0x77, 0x17, 0x56, 0xd7, 0xbb, 0x8b, 0xcf, 0xc2, 0xd0, 0x86, 0x47, 0x7c, + 0x17, 0x93, 0x0d, 0xf1, 0xb5, 0x5e, 0x28, 0x72, 0xb9, 0xb3, 0x4a, 0xeb, 0x48, 0xeb, 0x18, 0x57, + 0xa2, 0x57, 0x05, 0x19, 0xac, 0x08, 0xa2, 0x16, 0x4c, 0x48, 0x3d, 0x4c, 0x42, 0xc5, 0x66, 0x7f, + 0xb1, 0x98, 0x9a, 0x67, 0x36, 0xc3, 0x3e, 0x2f, 0xce, 0x10, 0xc4, 0x6d, 0x4d, 0x50, 0xfd, 0x79, + 0x9b, 0x1e, 0x75, 0x7d, 0x6c, 0xe9, 0x33, 0xfd, 0x99, 0x99, 0x02, 0x58, 0xa9, 0xfd, 0x6b, 0x16, + 0x3c, 0xd2, 0x36, 0x5b, 0xc2, 0x4e, 0x72, 0x6c, 0xdf, 0x28, 0x6b, 0xac, 0x28, 0xe5, 0x1b, 0x2b, + 0xec, 0x1b, 0x30, 0xbd, 0xb2, 0xdd, 0x4c, 0x76, 0xab, 0x9e, 0x79, 0xe5, 0xf2, 0x0a, 0x0c, 0x6c, + 0x13, 0xd7, 0x6b, 0x6d, 0x8b, 0xcf, 0x3a, 0x27, 0xcf, 0x85, 0x6b, 0xac, 0xf4, 0x60, 0x6f, 0x6e, + 0xb4, 0x9e, 0x84, 0x91, 0xb3, 0x49, 0x78, 0x01, 0x16, 0xe8, 0xf6, 0x8f, 0x2d, 0x18, 0x97, 0xfc, + 0x61, 0xd1, 0x75, 0x23, 0x12, 0xc7, 0x68, 0x16, 0x4a, 0x5e, 0x53, 0x10, 0x02, 0x41, 0xa8, 0xb4, + 0x56, 0xc3, 0x25, 0xaf, 0x89, 0xde, 0x84, 0x0a, 0xbf, 0xa9, 0x4b, 0x17, 0x47, 0x8f, 0x37, 0x7f, + 0x4c, 0x37, 0x5c, 0x97, 0x34, 0x70, 0x4a, 0x4e, 0x4a, 0xc9, 0xec, 0xe4, 0x29, 0x9b, 0xf7, 0x46, + 0x97, 0x45, 0x39, 0x56, 0x18, 0xe8, 0x1c, 0x0c, 0x05, 0xa1, 0xcb, 0x2f, 0x53, 0xf9, 0x3e, 0x65, + 0x4b, 0xee, 0xba, 0x28, 0xc3, 0x0a, 0x6a, 0x7f, 0xd5, 0x82, 0x11, 0x39, 0xc6, 0x82, 0x02, 0x3b, + 0xdd, 0x24, 0xa9, 0xb0, 0x9e, 0x6e, 0x12, 0x2a, 0x70, 0x33, 0x88, 0x21, 0x67, 0x97, 0x7b, 0x91, + 0xb3, 0xed, 0xdf, 0x2c, 0xc1, 0x98, 0xec, 0x4e, 0xbd, 0x75, 0x27, 0x26, 0x54, 0x0c, 0xa9, 0x38, + 0x7c, 0xf2, 0x89, 0x5c, 0x67, 0xcf, 0xe7, 0xe9, 0x62, 0xc6, 0x37, 0x4b, 0xc5, 0x9c, 0x45, 0x49, + 0x07, 0xa7, 0x24, 0xd1, 0x0e, 0x4c, 0x06, 0x61, 0xc2, 0x8e, 0x37, 0x05, 0x2f, 0x76, 0xd3, 0x91, + 0x6d, 0xe7, 0x51, 0xd1, 0xce, 0xe4, 0xf5, 0x2c, 0x3d, 0xdc, 0xde, 0x04, 0xba, 0x21, 0x6d, 0x4c, + 0x65, 0xd6, 0xd6, 0xb3, 0xc5, 0xda, 0xea, 0x6e, 0x62, 0xb2, 0x7f, 0xcf, 0x82, 0x8a, 0x44, 0x3b, + 0x89, 0x2b, 0xaf, 0xdb, 0x30, 0x18, 0xb3, 0x4f, 0x24, 0xa7, 0xeb, 0x7c, 0xb1, 0x21, 0xf0, 0xef, + 0x9a, 0x9e, 0xe9, 0xfc, 0x7f, 0x8c, 0x25, 0x35, 0x66, 0x6c, 0x57, 0x03, 0x79, 0xe8, 0x8c, 0xed, + 0xaa, 0x67, 0xdd, 0x6f, 0xb6, 0x46, 0x0d, 0x6b, 0x00, 0x15, 0x4c, 0x9b, 0x11, 0xd9, 0xf0, 0xee, + 0x67, 0x05, 0xd3, 0x1a, 0x2b, 0xc5, 0x02, 0x8a, 0x36, 0x60, 0xa4, 0x21, 0xcd, 0xd1, 0x29, 0x0b, + 0xf9, 0x48, 0x41, 0xdb, 0xbf, 0xba, 0x46, 0xe2, 0xae, 0x49, 0xcb, 0x1a, 0x25, 0x6c, 0xd0, 0xa5, + 0x7c, 0x2a, 0xbd, 0x29, 0x2f, 0x17, 0x34, 0xdc, 0x44, 0x24, 0x49, 0x5b, 0xe8, 0x7a, 0x49, 0x6e, + 0x7f, 0xd3, 0x82, 0x01, 0x6e, 0xbf, 0x2c, 0x66, 0x04, 0xd6, 0x2e, 0xc8, 0xd2, 0xf9, 0xbc, 0x45, + 0x0b, 0xc5, 0x7d, 0x19, 0xba, 0x0d, 0x15, 0xf6, 0x83, 0xd9, 0x62, 0xca, 0x45, 0xfc, 0xb4, 0x78, + 0xfb, 0x7a, 0x57, 0x6f, 0x49, 0x02, 0x38, 0xa5, 0x65, 0x7f, 0xbf, 0x4c, 0x59, 0x5f, 0x8a, 0x6a, + 0x9c, 0xed, 0xd6, 0x49, 0x9c, 0xed, 0xa5, 0xe3, 0x3f, 0xdb, 0xdf, 0x83, 0xf1, 0x86, 0x76, 0x41, + 0x97, 0x7e, 0xf1, 0x0b, 0x05, 0x97, 0x95, 0x76, 0xab, 0xc7, 0xed, 0x75, 0xcb, 0x26, 0x39, 0x9c, + 0xa5, 0x8f, 0x08, 0x8c, 0xf0, 0xf5, 0x20, 0xda, 0xeb, 0x63, 0xed, 0x2d, 0x14, 0x59, 0x61, 0x7a, + 0x63, 0x6c, 0x15, 0xd7, 0x35, 0x42, 0xd8, 0x20, 0x6b, 0xff, 0x4a, 0x3f, 0xf4, 0xaf, 0xec, 0x90, + 0x20, 0x39, 0x01, 0x56, 0xb7, 0x0d, 0x63, 0x5e, 0xb0, 0x13, 0xfa, 0x3b, 0xc4, 0xe5, 0xf0, 0xa3, + 0x1d, 0xef, 0xa7, 0x45, 0x23, 0x63, 0x6b, 0x06, 0x31, 0x9c, 0x21, 0x7e, 0x1c, 0x96, 0x82, 0xd7, + 0x61, 0x80, 0xaf, 0x0c, 0x61, 0x26, 0xc8, 0xb1, 0xe7, 0xb3, 0x89, 0x15, 0x3b, 0x28, 0xb5, 0x67, + 0xf0, 0xab, 0x04, 0x41, 0x08, 0xbd, 0x0b, 0x63, 0x1b, 0x5e, 0x14, 0x27, 0x54, 0xd9, 0x8f, 0x13, + 0x67, 0xbb, 0x79, 0x04, 0x1b, 0x81, 0x9a, 0x91, 0x55, 0x83, 0x12, 0xce, 0x50, 0x46, 0x9b, 0x30, + 0x4a, 0x55, 0xd4, 0xb4, 0xa9, 0xc1, 0x9e, 0x9b, 0x52, 0x26, 0xc2, 0xab, 0x3a, 0x21, 0x6c, 0xd2, + 0xa5, 0x2c, 0xa9, 0xc1, 0x54, 0xda, 0x21, 0x26, 0xdd, 0x28, 0x96, 0xc4, 0x75, 0x59, 0x0e, 0xa3, + 0x9c, 0x8d, 0x79, 0xca, 0x54, 0x4c, 0xce, 0x96, 0xfa, 0xc3, 0xd8, 0xdf, 0xa6, 0x67, 0x31, 0x9d, + 0xc3, 0x13, 0x38, 0xbe, 0x2e, 0x9b, 0xc7, 0xd7, 0x93, 0x05, 0xbe, 0x6c, 0x97, 0xa3, 0xeb, 0x1d, + 0x18, 0xd6, 0x3e, 0x3c, 0x5a, 0x80, 0x4a, 0x43, 0x3a, 0x73, 0x08, 0x2e, 0xae, 0x44, 0x29, 0xe5, + 0xe5, 0x81, 0x53, 0x1c, 0x3a, 0x2f, 0x54, 0x04, 0xcd, 0xba, 0x7e, 0x51, 0x01, 0x15, 0x33, 0x88, + 0xfd, 0x22, 0xc0, 0xca, 0x7d, 0xd2, 0x58, 0xe4, 0x2a, 0x9e, 0x76, 0xbf, 0x67, 0x75, 0xbf, 0xdf, + 0xb3, 0xbf, 0x65, 0xc1, 0xd8, 0xea, 0xb2, 0x21, 0xd3, 0xcf, 0x03, 0x70, 0xd9, 0xf8, 0xf6, 0xed, + 0xeb, 0xd2, 0x7e, 0xcd, 0x8d, 0x8c, 0xaa, 0x14, 0x6b, 0x18, 0xe8, 0x51, 0x28, 0xfb, 0xad, 0x40, + 0x88, 0xac, 0x83, 0xfb, 0x7b, 0x73, 0xe5, 0xab, 0xad, 0x00, 0xd3, 0x32, 0xcd, 0xc7, 0xaa, 0x5c, + 0xd8, 0xc7, 0x2a, 0xdf, 0xdb, 0xf8, 0xeb, 0x65, 0x98, 0x58, 0xf5, 0xc9, 0x7d, 0xa3, 0xd7, 0x4f, + 0xc3, 0x80, 0x1b, 0x79, 0x3b, 0x24, 0xca, 0x0a, 0x02, 0x55, 0x56, 0x8a, 0x05, 0xb4, 0xb0, 0xdb, + 0xd7, 0xdb, 0xed, 0x07, 0xf9, 0xf1, 0xb9, 0xbc, 0xe5, 0x8e, 0x19, 0x6d, 0xc0, 0x20, 0xbf, 0x0f, + 0x8e, 0x67, 0xfa, 0xd9, 0x52, 0x7c, 0xf5, 0xf0, 0xce, 0x64, 0xe7, 0x67, 0x5e, 0xd8, 0x57, 0xb8, + 0xc3, 0x8d, 0xe2, 0x65, 0xa2, 0x14, 0x4b, 0xe2, 0xb3, 0x1f, 0x87, 0x11, 0x1d, 0xb3, 0x27, 0xcf, + 0x9b, 0xbf, 0x6a, 0xc1, 0xd4, 0xaa, 0x1f, 0x36, 0xee, 0x66, 0xfc, 0xf2, 0x5e, 0x86, 0x61, 0xba, + 0x99, 0x62, 0xc3, 0x69, 0xd5, 0xf0, 0xce, 0x15, 0x20, 0xac, 0xe3, 0x69, 0xd5, 0x6e, 0xde, 0x5c, + 0xab, 0x76, 0x72, 0xea, 0x15, 0x20, 0xac, 0xe3, 0xd9, 0x7f, 0x60, 0xc1, 0xe3, 0x97, 0x96, 0x57, + 0x6a, 0x24, 0x8a, 0xbd, 0x38, 0x21, 0x41, 0xd2, 0xe6, 0x57, 0x4c, 0x65, 0x46, 0x57, 0xeb, 0x4a, + 0x2a, 0x33, 0x56, 0x59, 0x2f, 0x04, 0xf4, 0x61, 0x71, 0xae, 0xff, 0xa6, 0x05, 0x53, 0x97, 0xbc, + 0x04, 0x93, 0x66, 0x98, 0x75, 0x05, 0x8e, 0x48, 0x33, 0x8c, 0xbd, 0x24, 0x8c, 0x76, 0xb3, 0xae, + 0xc0, 0x58, 0x41, 0xb0, 0x86, 0xc5, 0x5b, 0xde, 0xf1, 0x62, 0xda, 0xd3, 0x92, 0xa9, 0xea, 0x62, + 0x51, 0x8e, 0x15, 0x06, 0x1d, 0x98, 0xeb, 0x45, 0x4c, 0x64, 0xd8, 0x15, 0x3b, 0x58, 0x0d, 0xac, + 0x2a, 0x01, 0x38, 0xc5, 0xb1, 0xff, 0x9e, 0x05, 0xa7, 0x2e, 0xf9, 0xad, 0x38, 0x21, 0xd1, 0x46, + 0x6c, 0x74, 0xf6, 0x45, 0xa8, 0x10, 0x29, 0xdc, 0x8b, 0xbe, 0xaa, 0x43, 0x43, 0x49, 0xfd, 0xdc, + 0x0f, 0x59, 0xe1, 0x15, 0x70, 0x77, 0xed, 0xcd, 0x39, 0xf3, 0xb7, 0x4b, 0x30, 0x7a, 0x79, 0x7d, + 0xbd, 0x76, 0x89, 0x24, 0x82, 0x4b, 0xe6, 0x1b, 0xa5, 0xb0, 0xa6, 0x91, 0x1f, 0x26, 0xfc, 0xb4, + 0x12, 0xcf, 0x9f, 0xe7, 0xe1, 0x22, 0xf3, 0x6b, 0x41, 0x72, 0x23, 0xaa, 0x27, 0x91, 0x17, 0x6c, + 0x76, 0xd4, 0xe1, 0x25, 0x2f, 0x2f, 0x77, 0xe3, 0xe5, 0xe8, 0x45, 0x18, 0x60, 0xf1, 0x2a, 0x52, + 0xf8, 0xf8, 0xa0, 0x92, 0x13, 0x58, 0xe9, 0xc1, 0xde, 0x5c, 0xe5, 0x26, 0x5e, 0xe3, 0x7f, 0xb0, + 0x40, 0x45, 0x6f, 0xc3, 0xf0, 0x56, 0x92, 0x34, 0x2f, 0x13, 0xc7, 0x25, 0x91, 0xe4, 0x13, 0xe7, + 0x0e, 0xe7, 0x13, 0x74, 0x3a, 0x78, 0x85, 0x74, 0x6b, 0xa5, 0x65, 0x31, 0xd6, 0x29, 0xda, 0x75, + 0x80, 0x14, 0xf6, 0x80, 0x74, 0x10, 0xfb, 0xe7, 0x4b, 0x30, 0x78, 0xd9, 0x09, 0x5c, 0x9f, 0x44, + 0x68, 0x15, 0xfa, 0xc8, 0x7d, 0xd2, 0x10, 0x07, 0x79, 0x4e, 0xd7, 0xd3, 0xc3, 0x8e, 0xdb, 0xd5, + 0xe8, 0x7f, 0xcc, 0xea, 0x23, 0x0c, 0x83, 0xb4, 0xdf, 0x97, 0x94, 0x97, 0xf8, 0x73, 0xf9, 0xb3, + 0xa0, 0x16, 0x05, 0x3f, 0x29, 0x45, 0x11, 0x96, 0x84, 0x98, 0x05, 0xaa, 0xd1, 0xac, 0x53, 0xf6, + 0x96, 0x14, 0xd3, 0xec, 0xd6, 0x97, 0x6b, 0x1c, 0x5d, 0xd0, 0xe5, 0x16, 0x28, 0x59, 0x88, 0x53, + 0x72, 0xf6, 0x45, 0x98, 0x66, 0xf7, 0xb1, 0x4e, 0xb2, 0x65, 0xec, 0x9a, 0xdc, 0xe5, 0x69, 0xff, + 0xb0, 0x04, 0x93, 0x6b, 0xf5, 0xe5, 0xba, 0x69, 0x3b, 0xbc, 0x08, 0x23, 0xfc, 0x80, 0xa6, 0x8b, + 0xce, 0xf1, 0x45, 0x7d, 0x75, 0x87, 0xb0, 0xae, 0xc1, 0xb0, 0x81, 0x89, 0x1e, 0x87, 0xb2, 0xf7, + 0x5e, 0x90, 0xf5, 0xea, 0x5b, 0x7b, 0xfd, 0x3a, 0xa6, 0xe5, 0x14, 0x4c, 0xcf, 0x7a, 0xce, 0xe4, + 0x14, 0x58, 0x9d, 0xf7, 0xaf, 0xc1, 0x98, 0x17, 0x37, 0x62, 0x6f, 0x2d, 0xa0, 0x1c, 0xc0, 0x69, + 0xc8, 0xe5, 0x9b, 0x0a, 0xe7, 0xb4, 0xab, 0x0a, 0x8a, 0x33, 0xd8, 0x1a, 0xc7, 0xed, 0x2f, 0x2c, + 0x2f, 0xe4, 0xba, 0x8b, 0x53, 0x51, 0xa8, 0xc9, 0x46, 0x17, 0x33, 0x1f, 0x21, 0x21, 0x0a, 0xf1, + 0x01, 0xc7, 0x58, 0xc2, 0xec, 0x77, 0xa1, 0xa2, 0xdc, 0xbc, 0xa4, 0x77, 0xa3, 0xd5, 0xc5, 0xbb, + 0x31, 0x9f, 0x33, 0x49, 0xc3, 0x6f, 0xb9, 0xa3, 0xe1, 0xf7, 0x9f, 0x59, 0x90, 0xfa, 0xa9, 0x20, + 0x0c, 0x95, 0x66, 0xc8, 0x2e, 0x89, 0x22, 0x79, 0x1b, 0xfb, 0x54, 0xce, 0x82, 0xe5, 0x1b, 0x86, + 0x2f, 0xa9, 0x9a, 0xac, 0x8b, 0x53, 0x32, 0xe8, 0x2a, 0x0c, 0x36, 0x23, 0x52, 0x4f, 0x58, 0x68, + 0x42, 0x0f, 0x14, 0xf9, 0xdc, 0xf0, 0x9a, 0x58, 0x92, 0xb0, 0xff, 0x95, 0x05, 0x70, 0xd5, 0xdb, + 0xf6, 0x12, 0xec, 0x04, 0x9b, 0xe4, 0x04, 0xb4, 0xc2, 0xeb, 0xd0, 0x17, 0x37, 0x49, 0xa3, 0xd8, + 0x35, 0x5f, 0xda, 0xb3, 0x7a, 0x93, 0x34, 0xd2, 0xcf, 0x41, 0xff, 0x61, 0x46, 0xc7, 0xfe, 0x1e, + 0xc0, 0x58, 0x8a, 0x46, 0x25, 0x73, 0xf4, 0xbc, 0xe1, 0x93, 0xff, 0x68, 0xc6, 0x27, 0xbf, 0xc2, + 0xb0, 0x35, 0x37, 0xfc, 0x04, 0xca, 0xdb, 0xce, 0x7d, 0xa1, 0x08, 0xbc, 0x5c, 0xb4, 0x43, 0xb4, + 0xa5, 0xf9, 0x6b, 0xce, 0x7d, 0x2e, 0x77, 0x3d, 0x27, 0x17, 0xd2, 0x35, 0xe7, 0xfe, 0x01, 0xbf, + 0xcc, 0x63, 0x1b, 0x96, 0x6a, 0x1e, 0x5f, 0xfa, 0xd3, 0xf4, 0x3f, 0xe3, 0xa1, 0xb4, 0x39, 0xd6, + 0xaa, 0x17, 0x08, 0x3b, 0x66, 0x8f, 0xad, 0x7a, 0x41, 0xb6, 0x55, 0x2f, 0x28, 0xd0, 0xaa, 0xc7, + 0x9c, 0x57, 0x07, 0x85, 0xf9, 0x9f, 0x79, 0xfe, 0x0d, 0x5f, 0xf8, 0x58, 0x4f, 0x4d, 0x8b, 0x7b, + 0x04, 0xde, 0xfc, 0x82, 0x14, 0x36, 0x45, 0x69, 0x6e, 0x17, 0x64, 0xd3, 0xe8, 0xef, 0x5b, 0x30, + 0x26, 0x7e, 0x63, 0xf2, 0x5e, 0x8b, 0xc4, 0x89, 0x38, 0xd4, 0x3e, 0x75, 0x94, 0xde, 0x08, 0x12, + 0xbc, 0x53, 0x1f, 0x95, 0x1c, 0xc9, 0x04, 0xe6, 0xf6, 0x2d, 0xd3, 0x1f, 0xf4, 0x3d, 0x0b, 0xa6, + 0xb7, 0x9d, 0xfb, 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xc4, 0x0b, 0x85, 0x77, 0xe3, 0x6a, 0xaf, 0xeb, + 0xa4, 0x8d, 0x10, 0xef, 0xae, 0x74, 0x5c, 0x9a, 0xee, 0x84, 0x92, 0xdb, 0xe9, 0x8e, 0x3d, 0x9c, + 0xdd, 0x80, 0x21, 0xb9, 0x30, 0x3b, 0x88, 0xf9, 0x55, 0xfd, 0xec, 0xce, 0x51, 0xaa, 0xe7, 0xa5, + 0x69, 0x6c, 0xfe, 0xf5, 0x96, 0x13, 0x24, 0x5e, 0xb2, 0xab, 0xa9, 0x05, 0xac, 0x1d, 0xb1, 0x14, + 0x8f, 0xb5, 0x9d, 0x77, 0x61, 0x44, 0x5f, 0x77, 0xc7, 0xda, 0xd6, 0x7b, 0x30, 0xd5, 0x61, 0x55, + 0x1d, 0x6b, 0x93, 0xf7, 0xe0, 0xd1, 0xae, 0xeb, 0xe3, 0x38, 0x1b, 0xb6, 0x7f, 0xdb, 0xd2, 0x59, + 0xe7, 0x09, 0x18, 0x5d, 0xae, 0x99, 0x46, 0x97, 0x73, 0x45, 0xf7, 0x50, 0x17, 0xcb, 0xcb, 0x86, + 0xde, 0x7d, 0x7a, 0x24, 0xa0, 0x75, 0x18, 0xf0, 0x69, 0x89, 0xbc, 0xf3, 0x3a, 0xdf, 0xcb, 0x2e, + 0x4d, 0x85, 0x12, 0x56, 0x1e, 0x63, 0x41, 0xcb, 0xfe, 0x9e, 0x05, 0x7d, 0x7f, 0x89, 0x11, 0x43, + 0x6d, 0xa4, 0x45, 0xe0, 0xfb, 0x3c, 0x76, 0xee, 0xad, 0xdc, 0x4f, 0x48, 0x10, 0x33, 0x19, 0xb4, + 0xdb, 0xad, 0xfd, 0x30, 0x6d, 0x4a, 0x3a, 0x61, 0xbc, 0x0a, 0xa3, 0xbe, 0x73, 0x87, 0xf8, 0xd2, + 0x60, 0x9c, 0xd5, 0xd8, 0xae, 0xea, 0x40, 0x6c, 0xe2, 0xd2, 0xca, 0x1b, 0xba, 0x3d, 0x5d, 0x08, + 0x49, 0xaa, 0xb2, 0x61, 0x6c, 0xc7, 0x26, 0x2e, 0x55, 0x19, 0xee, 0x39, 0x49, 0x63, 0x4b, 0x68, + 0x73, 0xaa, 0xbb, 0xb7, 0x69, 0x21, 0xe6, 0x30, 0xb4, 0x08, 0xe3, 0x72, 0xc5, 0xde, 0xa2, 0x6a, + 0x7e, 0x18, 0x08, 0x39, 0x53, 0x45, 0x1d, 0x63, 0x13, 0x8c, 0xb3, 0xf8, 0xe8, 0xe3, 0x30, 0x46, + 0x27, 0x27, 0x6c, 0x25, 0xd2, 0xc5, 0xa4, 0x9f, 0xb9, 0x98, 0x30, 0x0f, 0xe5, 0x75, 0x03, 0x82, + 0x33, 0x98, 0xf6, 0xdb, 0x30, 0x75, 0x35, 0x74, 0xdc, 0x25, 0xc7, 0x77, 0x82, 0x06, 0x89, 0xd6, + 0x82, 0xcd, 0xdc, 0xeb, 0x6b, 0xfd, 0x8a, 0xb9, 0x94, 0x77, 0xc5, 0x6c, 0x47, 0x80, 0xf4, 0x06, + 0x84, 0x73, 0xd4, 0x5b, 0x30, 0xe8, 0xf1, 0xa6, 0xc4, 0xb2, 0x7d, 0x21, 0xcf, 0x1e, 0xd5, 0xd6, + 0x47, 0xcd, 0xd9, 0x87, 0x17, 0x60, 0x49, 0x92, 0xaa, 0x20, 0x9d, 0x0c, 0x58, 0xf9, 0x5a, 0x9e, + 0xfd, 0xd7, 0x2d, 0x18, 0xbf, 0x9e, 0x09, 0x69, 0x7d, 0x1a, 0x06, 0x78, 0x62, 0x84, 0xac, 0x89, + 0xa5, 0xce, 0x4a, 0xb1, 0x80, 0x3e, 0x70, 0x0d, 0xff, 0x97, 0x4b, 0x50, 0x61, 0x6e, 0xb6, 0x4d, + 0xaa, 0x4e, 0x1c, 0xbf, 0x98, 0x7a, 0xcd, 0x10, 0x53, 0x73, 0xb4, 0x4c, 0xd5, 0xb1, 0x6e, 0x52, + 0x2a, 0xba, 0xa9, 0x42, 0x3d, 0x0b, 0x29, 0x98, 0x29, 0x41, 0x1e, 0x0e, 0x38, 0x66, 0x46, 0x86, + 0xca, 0x30, 0x50, 0x76, 0xe9, 0xab, 0x70, 0x1f, 0xba, 0x4b, 0x5f, 0xd5, 0xb3, 0x2e, 0xcc, 0xa9, + 0xa6, 0x75, 0x9e, 0xb1, 0xef, 0x4f, 0x32, 0xe7, 0x49, 0xc7, 0xf7, 0x3e, 0x4f, 0x54, 0xc4, 0xf4, + 0x9c, 0x70, 0x86, 0x14, 0xa5, 0x07, 0x8c, 0xcf, 0x88, 0x7f, 0x3c, 0x20, 0x3e, 0xad, 0x62, 0x5f, + 0x86, 0xf1, 0xcc, 0xd4, 0xa1, 0x97, 0xa1, 0xbf, 0xb9, 0xe5, 0xc4, 0x24, 0xe3, 0xc7, 0xd2, 0x5f, + 0xa3, 0x85, 0x07, 0x7b, 0x73, 0x63, 0xaa, 0x02, 0x2b, 0xc1, 0x1c, 0xdb, 0xfe, 0x72, 0x09, 0xfa, + 0xae, 0x87, 0xee, 0x49, 0x2c, 0xb5, 0xcb, 0xc6, 0x52, 0x7b, 0x3a, 0x3f, 0x9d, 0x46, 0xd7, 0x55, + 0x56, 0xcb, 0xac, 0xb2, 0x73, 0x05, 0x68, 0x1d, 0xbe, 0xc0, 0xb6, 0x61, 0x98, 0xa5, 0xeb, 0x10, + 0x8e, 0x3c, 0x2f, 0x1a, 0x9a, 0xd5, 0x5c, 0x46, 0xb3, 0x1a, 0xd7, 0x50, 0x35, 0xfd, 0xea, 0x19, + 0x18, 0x14, 0x8e, 0x23, 0x59, 0xd7, 0x51, 0x81, 0x8b, 0x25, 0xdc, 0xfe, 0x97, 0x65, 0x30, 0xd2, + 0x83, 0xa0, 0xdf, 0xb3, 0x60, 0x3e, 0xe2, 0x61, 0x38, 0x6e, 0xb5, 0x15, 0x79, 0xc1, 0x66, 0xbd, + 0xb1, 0x45, 0xdc, 0x96, 0xef, 0x05, 0x9b, 0x6b, 0x9b, 0x41, 0xa8, 0x8a, 0x57, 0xee, 0x93, 0x46, + 0x8b, 0xd9, 0x69, 0x0b, 0x67, 0x25, 0x51, 0x97, 0xa6, 0x17, 0xf6, 0xf7, 0xe6, 0xe6, 0x71, 0x4f, + 0xad, 0xe0, 0x1e, 0x7b, 0x85, 0xfe, 0xd8, 0x82, 0x05, 0x9e, 0x20, 0xa3, 0xf8, 0x48, 0x0a, 0x69, + 0xa4, 0x35, 0x49, 0x34, 0x25, 0xb7, 0x4e, 0xa2, 0xed, 0xa5, 0x57, 0xc4, 0x24, 0x2f, 0xd4, 0x7a, + 0x6b, 0x15, 0xf7, 0xda, 0x4d, 0xfb, 0xdf, 0x94, 0x61, 0x94, 0xce, 0x67, 0x1a, 0x14, 0xff, 0xb2, + 0xb1, 0x4c, 0x9e, 0xc8, 0x2c, 0x93, 0x49, 0x03, 0xf9, 0xc1, 0xc4, 0xc3, 0xc7, 0x30, 0xe9, 0x3b, + 0x71, 0x72, 0x99, 0x38, 0x51, 0x72, 0x87, 0x38, 0xec, 0x6e, 0x32, 0xeb, 0xf7, 0x50, 0xe0, 0xba, + 0x53, 0x39, 0x23, 0x5d, 0xcd, 0x12, 0xc3, 0xed, 0xf4, 0xd1, 0x0e, 0x20, 0x76, 0x0f, 0x1a, 0x39, + 0x41, 0xcc, 0xc7, 0xe2, 0x09, 0xbb, 0x6e, 0x6f, 0xad, 0xce, 0x8a, 0x56, 0xd1, 0xd5, 0x36, 0x6a, + 0xb8, 0x43, 0x0b, 0xda, 0x4d, 0x77, 0x7f, 0xd1, 0x9b, 0xee, 0x81, 0x1c, 0x9f, 0xed, 0xaf, 0x58, + 0x30, 0x45, 0x3f, 0x8b, 0xe9, 0xdf, 0x1b, 0xa3, 0x10, 0xc6, 0xe9, 0xb2, 0xf3, 0x49, 0x22, 0xcb, + 0xc4, 0xfe, 0xca, 0x91, 0xac, 0x4d, 0x3a, 0xa9, 0xf8, 0x76, 0xc5, 0x24, 0x86, 0xb3, 0xd4, 0xed, + 0x6f, 0x59, 0xc0, 0x3c, 0xee, 0x4e, 0xe0, 0x30, 0xbb, 0x64, 0x1e, 0x66, 0x76, 0x3e, 0xc7, 0xe8, + 0x72, 0x8e, 0xbd, 0x04, 0x13, 0x14, 0x5a, 0x8b, 0xc2, 0xfb, 0xbb, 0x52, 0xd0, 0xce, 0x37, 0xf0, + 0x7e, 0xa5, 0xc4, 0xb7, 0x8d, 0x8a, 0x27, 0x44, 0xbf, 0x60, 0xc1, 0x50, 0xc3, 0x69, 0x3a, 0x0d, + 0x9e, 0x5c, 0xa9, 0x80, 0x75, 0xc6, 0xa8, 0x3f, 0xbf, 0x2c, 0xea, 0x72, 0xcb, 0xc2, 0x47, 0xe4, + 0xd0, 0x65, 0x71, 0xae, 0x35, 0x41, 0x35, 0x3e, 0x7b, 0x17, 0x46, 0x0d, 0x62, 0xc7, 0xaa, 0x86, + 0xfe, 0x82, 0xc5, 0x99, 0xbe, 0x52, 0x15, 0xee, 0xc1, 0x64, 0xa0, 0xfd, 0xa7, 0xec, 0x4c, 0x4a, + 0xc6, 0xf3, 0xc5, 0xd9, 0x3a, 0xe3, 0x82, 0x9a, 0x77, 0x61, 0x86, 0x20, 0x6e, 0x6f, 0xc3, 0xfe, + 0x55, 0x0b, 0x1e, 0xd1, 0x11, 0xb5, 0x00, 0xd0, 0x3c, 0xbb, 0x71, 0x15, 0x86, 0xc2, 0x26, 0x89, + 0x9c, 0x54, 0x2d, 0x3a, 0x27, 0xe7, 0xff, 0x86, 0x28, 0x3f, 0xd8, 0x9b, 0x9b, 0xd6, 0xa9, 0xcb, + 0x72, 0xac, 0x6a, 0x22, 0x1b, 0x06, 0xd8, 0xbc, 0xc4, 0x22, 0x74, 0x97, 0x25, 0x1b, 0x62, 0x97, + 0x2a, 0x31, 0x16, 0x10, 0xfb, 0x6f, 0x59, 0x7c, 0xb9, 0xe9, 0x5d, 0x47, 0x5f, 0x80, 0x89, 0x6d, + 0xaa, 0x41, 0xad, 0xdc, 0x6f, 0xd2, 0x83, 0x94, 0x5d, 0x27, 0x5b, 0x45, 0x8e, 0x8f, 0x2e, 0xc3, + 0x5d, 0x9a, 0x11, 0xbd, 0x9f, 0xb8, 0x96, 0x21, 0x8b, 0xdb, 0x1a, 0xb2, 0xff, 0x61, 0x89, 0xef, + 0x59, 0x26, 0xc3, 0x3d, 0x03, 0x83, 0xcd, 0xd0, 0x5d, 0x5e, 0xab, 0x62, 0x31, 0x57, 0x8a, 0xe9, + 0xd4, 0x78, 0x31, 0x96, 0x70, 0x74, 0x01, 0x80, 0xdc, 0x4f, 0x48, 0x14, 0x38, 0xbe, 0xba, 0x06, + 0x56, 0xa2, 0xd2, 0x8a, 0x82, 0x60, 0x0d, 0x8b, 0xd6, 0x69, 0x46, 0xe1, 0x8e, 0xe7, 0xb2, 0xc8, + 0x85, 0xb2, 0x59, 0xa7, 0xa6, 0x20, 0x58, 0xc3, 0xa2, 0x7a, 0x6b, 0x2b, 0x88, 0xf9, 0x31, 0xe6, + 0xdc, 0x11, 0xb9, 0x71, 0x86, 0x52, 0xbd, 0xf5, 0xa6, 0x0e, 0xc4, 0x26, 0x2e, 0xba, 0x02, 0x03, + 0x89, 0xc3, 0x2e, 0x37, 0xfb, 0x8b, 0x78, 0x8a, 0xac, 0x53, 0x5c, 0x3d, 0x19, 0x11, 0xad, 0x8a, + 0x05, 0x09, 0xfb, 0x3f, 0x55, 0x00, 0x52, 0xa9, 0x0b, 0x7d, 0xb9, 0x7d, 0xc3, 0x7f, 0xb4, 0xa8, + 0xc8, 0xf6, 0xe0, 0x76, 0x3b, 0xfa, 0x9a, 0x05, 0xc3, 0x8e, 0xef, 0x87, 0x0d, 0x27, 0x61, 0xd3, + 0x53, 0x2a, 0xca, 0x7a, 0x44, 0x4f, 0x16, 0xd3, 0xba, 0xbc, 0x33, 0x2f, 0xca, 0x0b, 0x47, 0x0d, + 0x92, 0xdb, 0x1f, 0xbd, 0x0b, 0xe8, 0x23, 0x52, 0x6a, 0xe7, 0x5f, 0x78, 0x36, 0x2b, 0xb5, 0x57, + 0x18, 0xc3, 0xd5, 0x04, 0x76, 0xf4, 0xb6, 0x91, 0x4b, 0xa6, 0xaf, 0x48, 0xf8, 0xa9, 0x21, 0x87, + 0xe4, 0xa5, 0x91, 0x41, 0x6f, 0xea, 0x2e, 0xd5, 0xfd, 0x45, 0xe2, 0xbb, 0x35, 0x71, 0x38, 0xc7, + 0x9d, 0x3a, 0x81, 0x71, 0xd7, 0x3c, 0x79, 0x85, 0x5b, 0xd8, 0x0b, 0xf9, 0x2d, 0x64, 0x8e, 0xec, + 0xf4, 0xac, 0xcd, 0x00, 0x70, 0xb6, 0x09, 0xf4, 0x26, 0x77, 0x78, 0x5f, 0x0b, 0x36, 0x42, 0xe1, + 0x1a, 0x76, 0xbe, 0xc0, 0x37, 0xdf, 0x8d, 0x13, 0xb2, 0x4d, 0xeb, 0xa4, 0x87, 0xeb, 0x75, 0x41, + 0x05, 0x2b, 0x7a, 0x68, 0x1d, 0x06, 0x58, 0xb4, 0x51, 0x3c, 0x33, 0x54, 0xc4, 0x12, 0x67, 0x06, + 0xd9, 0xa6, 0xfb, 0x87, 0xfd, 0x8d, 0xb1, 0xa0, 0x85, 0x2e, 0xcb, 0x30, 0xfb, 0x78, 0x2d, 0xb8, + 0x19, 0x13, 0x16, 0x66, 0x5f, 0x59, 0xfa, 0x50, 0x1a, 0x37, 0xcf, 0xcb, 0x3b, 0x66, 0xd3, 0x33, + 0x6a, 0x52, 0xc1, 0x46, 0xfc, 0x97, 0x49, 0xfa, 0x66, 0xa0, 0x48, 0x47, 0xcd, 0x94, 0x7e, 0xe9, + 0x64, 0xdf, 0x32, 0x89, 0xe1, 0x2c, 0xf5, 0x13, 0x3d, 0x52, 0x67, 0x03, 0x98, 0xc8, 0x6e, 0xca, + 0x63, 0x3d, 0xc2, 0x7f, 0xd2, 0x07, 0x63, 0xe6, 0xe2, 0x40, 0x0b, 0x50, 0x11, 0x44, 0x54, 0xd2, + 0x2e, 0xb5, 0x07, 0xae, 0x49, 0x00, 0x4e, 0x71, 0x58, 0xfa, 0x32, 0x56, 0x5d, 0x73, 0x0a, 0x4a, + 0xd3, 0x97, 0x29, 0x08, 0xd6, 0xb0, 0xa8, 0x24, 0x7c, 0x27, 0x0c, 0x13, 0x75, 0x12, 0xa8, 0x75, + 0xb3, 0xc4, 0x4a, 0xb1, 0x80, 0xd2, 0x13, 0xe0, 0x2e, 0xfd, 0x98, 0xbe, 0x69, 0x55, 0x54, 0x27, + 0xc0, 0x15, 0x1d, 0x88, 0x4d, 0x5c, 0x7a, 0xa2, 0x85, 0x31, 0x5b, 0x88, 0x42, 0xde, 0x4e, 0x9d, + 0xac, 0xea, 0x3c, 0x02, 0x4f, 0xc2, 0xd1, 0x67, 0xe0, 0x11, 0x15, 0x30, 0x87, 0xb9, 0x95, 0x56, + 0xb6, 0x38, 0x60, 0xa8, 0xcc, 0x8f, 0x2c, 0x77, 0x46, 0xc3, 0xdd, 0xea, 0xa3, 0xd7, 0x60, 0x4c, + 0xc8, 0xca, 0x92, 0xe2, 0xa0, 0x79, 0x03, 0x7f, 0xc5, 0x80, 0xe2, 0x0c, 0x36, 0xaa, 0xc2, 0x04, + 0x2d, 0x61, 0x42, 0xaa, 0xa4, 0xc0, 0x03, 0xff, 0xd4, 0x51, 0x7f, 0x25, 0x03, 0xc7, 0x6d, 0x35, + 0xd0, 0x22, 0x8c, 0x73, 0x61, 0x85, 0x2a, 0x86, 0xec, 0x3b, 0x08, 0x7f, 0x4e, 0xb5, 0x11, 0x6e, + 0x98, 0x60, 0x9c, 0xc5, 0x47, 0x17, 0x61, 0xc4, 0x89, 0x1a, 0x5b, 0x5e, 0x42, 0x1a, 0x49, 0x2b, + 0xe2, 0x49, 0x2c, 0x34, 0x17, 0x86, 0x45, 0x0d, 0x86, 0x0d, 0x4c, 0xfb, 0xf3, 0x30, 0xd5, 0xc1, + 0x79, 0x9c, 0x2e, 0x1c, 0xa7, 0xe9, 0xc9, 0x31, 0x65, 0xdc, 0xa5, 0x16, 0x6b, 0x6b, 0x72, 0x34, + 0x1a, 0x16, 0x5d, 0x9d, 0xcc, 0x3c, 0xad, 0xe5, 0xd4, 0x54, 0xab, 0x73, 0x55, 0x02, 0x70, 0x8a, + 0x63, 0xff, 0x45, 0x05, 0x34, 0xeb, 0x4d, 0x01, 0x17, 0x99, 0x8b, 0x30, 0x22, 0xd3, 0xc4, 0x6a, + 0xe9, 0x19, 0xd5, 0x30, 0x2f, 0x69, 0x30, 0x6c, 0x60, 0xd2, 0xbe, 0x05, 0xd2, 0x26, 0x95, 0x75, + 0xce, 0x52, 0xc6, 0x2a, 0x9c, 0xe2, 0xa0, 0xf3, 0x30, 0x14, 0x13, 0x7f, 0xe3, 0xaa, 0x17, 0xdc, + 0x15, 0x0b, 0x5b, 0x71, 0xe6, 0xba, 0x28, 0xc7, 0x0a, 0x03, 0x2d, 0x41, 0xb9, 0xe5, 0xb9, 0x62, + 0x29, 0x4b, 0xb1, 0xa1, 0x7c, 0x73, 0xad, 0x7a, 0xb0, 0x37, 0xf7, 0x44, 0xb7, 0x9c, 0xb9, 0x54, + 0x3f, 0x8f, 0xe7, 0xe9, 0xf6, 0xa3, 0x95, 0x3b, 0xd9, 0xe9, 0x07, 0x7a, 0xb4, 0xd3, 0x5f, 0x00, + 0x10, 0xa3, 0x96, 0x6b, 0xb9, 0x9c, 0x7e, 0xb5, 0x4b, 0x0a, 0x82, 0x35, 0x2c, 0xaa, 0xe5, 0x37, + 0x22, 0xe2, 0x48, 0x45, 0x98, 0x3b, 0x35, 0x0f, 0x1d, 0x5d, 0xcb, 0x5f, 0xce, 0x12, 0xc3, 0xed, + 0xf4, 0x51, 0x08, 0x93, 0xae, 0x88, 0xca, 0x4c, 0x1b, 0xad, 0xf4, 0xee, 0x49, 0x4d, 0x1b, 0xac, + 0x66, 0x09, 0xe1, 0x76, 0xda, 0xe8, 0x73, 0x30, 0x2b, 0x0b, 0xdb, 0x43, 0x62, 0xd9, 0x76, 0x29, + 0x2f, 0x9d, 0xd9, 0xdf, 0x9b, 0x9b, 0xad, 0x76, 0xc5, 0xc2, 0x87, 0x50, 0x40, 0x6f, 0xc1, 0x00, + 0xbb, 0xd7, 0x89, 0x67, 0x86, 0xd9, 0x89, 0xf7, 0x52, 0x11, 0x7f, 0x7c, 0xba, 0xea, 0xe7, 0xd9, + 0xed, 0x90, 0xf0, 0x34, 0x4d, 0x2f, 0xcb, 0x58, 0x21, 0x16, 0x34, 0x51, 0x13, 0x86, 0x9d, 0x20, + 0x08, 0x13, 0x87, 0x0b, 0x62, 0x23, 0x45, 0x64, 0x49, 0xad, 0x89, 0xc5, 0xb4, 0x2e, 0x6f, 0x47, + 0x39, 0xaf, 0x69, 0x10, 0xac, 0x37, 0x81, 0xee, 0xc1, 0x78, 0x78, 0x8f, 0x32, 0x4c, 0x79, 0xb5, + 0x11, 0xcf, 0x8c, 0x9a, 0x03, 0xcb, 0x31, 0xd4, 0x1a, 0x95, 0x35, 0x4e, 0x66, 0x12, 0xc5, 0xd9, + 0x56, 0xd0, 0xbc, 0x61, 0xae, 0x1e, 0x4b, 0xfd, 0xa9, 0x53, 0x73, 0xb5, 0x6e, 0x9d, 0x66, 0x61, + 0xd7, 0xdc, 0x87, 0x92, 0x71, 0x84, 0xf1, 0x4c, 0xd8, 0x75, 0x0a, 0xc2, 0x3a, 0xde, 0xec, 0xc7, + 0x60, 0x58, 0x9b, 0xf8, 0x5e, 0x1c, 0x77, 0x67, 0x5f, 0x83, 0x89, 0xec, 0x84, 0xf6, 0xe4, 0xf8, + 0xfb, 0x3f, 0x4b, 0x30, 0xde, 0xe1, 0xde, 0xe8, 0xae, 0xc7, 0x9c, 0xcf, 0x0d, 0xd6, 0x77, 0xc5, + 0x0b, 0x5c, 0xcc, 0x20, 0x26, 0x03, 0x2b, 0x15, 0x60, 0x60, 0x92, 0x9b, 0x96, 0xbb, 0x72, 0x53, + 0xc1, 0xb4, 0xfa, 0xde, 0x0f, 0xd3, 0x32, 0xcf, 0x89, 0xfe, 0x42, 0xe7, 0xc4, 0x03, 0x60, 0x74, + 0xc6, 0x51, 0x33, 0x58, 0xe0, 0xa8, 0xf9, 0x66, 0x09, 0x26, 0x52, 0x27, 0x67, 0x91, 0x3b, 0xfa, + 0xf8, 0xaf, 0x21, 0xd6, 0x8d, 0x6b, 0x88, 0xbc, 0xd4, 0xd0, 0x99, 0xfe, 0x75, 0xbd, 0x92, 0x78, + 0x2b, 0x73, 0x25, 0xf1, 0x52, 0x8f, 0x74, 0x0f, 0xbf, 0x9e, 0xf8, 0x6e, 0x09, 0x4e, 0x65, 0xab, + 0x2c, 0xfb, 0x8e, 0xb7, 0x7d, 0x02, 0xf3, 0xf5, 0x19, 0x63, 0xbe, 0x5e, 0xe9, 0x6d, 0x5c, 0xac, + 0x93, 0x5d, 0x27, 0xcd, 0xc9, 0x4c, 0xda, 0xc7, 0x8e, 0x42, 0xfc, 0xf0, 0x99, 0xfb, 0x43, 0x0b, + 0x1e, 0xed, 0x58, 0xef, 0x04, 0x0c, 0xaf, 0x6f, 0x98, 0x86, 0xd7, 0x17, 0x8f, 0x30, 0xba, 0x2e, + 0x96, 0xd8, 0x5f, 0x2b, 0x77, 0x19, 0x15, 0x33, 0x4d, 0xdd, 0x80, 0x61, 0xa7, 0xd1, 0x20, 0x71, + 0x7c, 0x2d, 0x74, 0x55, 0x02, 0xa7, 0xe7, 0xd9, 0xd9, 0x92, 0x16, 0x1f, 0xec, 0xcd, 0xcd, 0x66, + 0x49, 0xa4, 0x60, 0xac, 0x53, 0x30, 0x53, 0xcb, 0x95, 0x8e, 0x29, 0xb5, 0xdc, 0x05, 0x80, 0x1d, + 0xa5, 0xc5, 0x66, 0x2d, 0x5e, 0x9a, 0x7e, 0xab, 0x61, 0xa1, 0xff, 0x9f, 0x49, 0x84, 0xdc, 0x49, + 0xa3, 0xcf, 0x8c, 0x97, 0xcc, 0xf9, 0x7e, 0xba, 0xc3, 0x07, 0x0f, 0xcb, 0x54, 0xd6, 0x41, 0x45, + 0x12, 0x7d, 0x0a, 0x26, 0x62, 0x1e, 0xfc, 0xbf, 0xec, 0x3b, 0x31, 0xf3, 0xee, 0x17, 0xfc, 0x94, + 0x45, 0x58, 0xd6, 0x33, 0x30, 0xdc, 0x86, 0x6d, 0x7f, 0xa7, 0x0c, 0x1f, 0x3c, 0x64, 0xd9, 0xa2, + 0x45, 0xf3, 0xd6, 0xf6, 0xb9, 0xac, 0xfd, 0x67, 0xb6, 0x63, 0x65, 0xc3, 0x20, 0x94, 0xf9, 0xda, + 0xa5, 0xf7, 0xfd, 0xb5, 0xbf, 0xae, 0x5b, 0xeb, 0xb8, 0xdf, 0xe6, 0xa5, 0x23, 0x6f, 0xcc, 0x9f, + 0x56, 0x63, 0xfd, 0x97, 0x2c, 0x78, 0xa2, 0xe3, 0xb0, 0x0c, 0x2f, 0x91, 0x05, 0xa8, 0x34, 0x68, + 0xa1, 0x16, 0x8b, 0x93, 0x06, 0xc1, 0x49, 0x00, 0x4e, 0x71, 0x0c, 0x67, 0x90, 0x52, 0xae, 0x33, + 0xc8, 0xef, 0x5b, 0x30, 0x9d, 0xed, 0xc4, 0x09, 0xf0, 0xad, 0xba, 0xc9, 0xb7, 0xe6, 0x7b, 0xfb, + 0xf8, 0x5d, 0x58, 0xd6, 0x7f, 0x1f, 0x83, 0xd3, 0x6d, 0xa7, 0x1e, 0x9f, 0xc5, 0x9f, 0xb3, 0x60, + 0x72, 0x93, 0x49, 0xef, 0x5a, 0xc0, 0x93, 0x18, 0x57, 0x4e, 0x94, 0xd8, 0xa1, 0x71, 0x52, 0x5c, + 0x17, 0x69, 0x43, 0xc1, 0xed, 0x8d, 0xa1, 0xaf, 0x5a, 0x30, 0xed, 0xdc, 0x8b, 0xdb, 0x5e, 0x36, + 0x11, 0x0b, 0xe9, 0xb5, 0x1c, 0x63, 0x59, 0xce, 0x9b, 0x28, 0x4b, 0x33, 0xfb, 0x7b, 0x73, 0xd3, + 0x9d, 0xb0, 0x70, 0xc7, 0x56, 0xe9, 0xf7, 0xdd, 0x12, 0xe1, 0x14, 0xc5, 0x42, 0xf7, 0x3a, 0x05, + 0x5f, 0x70, 0xb6, 0x26, 0x21, 0x58, 0x51, 0x44, 0xef, 0x40, 0x65, 0x53, 0xc6, 0x38, 0x65, 0xd9, + 0x66, 0x97, 0x69, 0xee, 0x14, 0x12, 0xc5, 0x7d, 0xf7, 0x15, 0x08, 0xa7, 0x44, 0xd1, 0x65, 0x28, + 0x07, 0x1b, 0xb1, 0x88, 0x26, 0xce, 0xf3, 0x01, 0x32, 0x3d, 0xaf, 0x78, 0x00, 0xe6, 0xf5, 0xd5, + 0x3a, 0xa6, 0x24, 0x28, 0xa5, 0xe8, 0x8e, 0x2b, 0xac, 0xc4, 0x39, 0x94, 0xf0, 0x52, 0xb5, 0x9d, + 0x12, 0x5e, 0xaa, 0x62, 0x4a, 0x02, 0xd5, 0xa0, 0x9f, 0x05, 0x6b, 0x08, 0x13, 0x70, 0x4e, 0xc8, + 0x79, 0x5b, 0x48, 0x0a, 0xcf, 0x80, 0xc8, 0x8a, 0x31, 0x27, 0x84, 0xd6, 0x61, 0xa0, 0xc1, 0x92, + 0xf8, 0x0b, 0xdd, 0x3c, 0x2f, 0x19, 0x43, 0x5b, 0xc2, 0x7f, 0x7e, 0xef, 0xc5, 0xcb, 0xb1, 0xa0, + 0xc5, 0xa8, 0x92, 0xe6, 0xd6, 0x46, 0x2c, 0x94, 0xef, 0x3c, 0xaa, 0x6d, 0xcf, 0x31, 0x08, 0xaa, + 0xac, 0x1c, 0x0b, 0x5a, 0xa8, 0x0a, 0xa5, 0x8d, 0x86, 0x48, 0xa4, 0x9a, 0x63, 0xfa, 0x35, 0xa3, + 0x69, 0x97, 0x06, 0xf6, 0xf7, 0xe6, 0x4a, 0xab, 0xcb, 0xb8, 0xb4, 0xd1, 0x40, 0x6f, 0xc0, 0xe0, + 0x06, 0x8f, 0x8f, 0x14, 0x49, 0x53, 0x5f, 0xc8, 0x0b, 0xe2, 0x6c, 0x0b, 0xa6, 0xe4, 0xf1, 0x19, + 0x02, 0x80, 0x25, 0x39, 0x96, 0x4f, 0x4e, 0x45, 0x7c, 0x8a, 0xac, 0xa9, 0xf3, 0xbd, 0x45, 0x88, + 0x0a, 0x9d, 0x54, 0x95, 0x62, 0x8d, 0x22, 0x5d, 0xf3, 0x8e, 0x7c, 0x8f, 0x84, 0x65, 0x4c, 0xcd, + 0x5d, 0xf3, 0x1d, 0x9f, 0x2f, 0xe1, 0x6b, 0x5e, 0x81, 0x70, 0x4a, 0x14, 0xb5, 0x60, 0x74, 0x27, + 0x6e, 0x6e, 0x11, 0xb9, 0xf5, 0x59, 0x1a, 0xd5, 0xe1, 0x0b, 0x9f, 0xc8, 0xc9, 0x8d, 0x2b, 0xaa, + 0x78, 0x51, 0xd2, 0x72, 0xfc, 0x36, 0x0e, 0xc6, 0x12, 0x78, 0xdd, 0xd2, 0xc9, 0x62, 0xb3, 0x15, + 0xfa, 0x49, 0xde, 0x6b, 0x85, 0x77, 0x76, 0x13, 0x22, 0xd2, 0xac, 0xe6, 0x7c, 0x92, 0xd7, 0x39, + 0x72, 0xfb, 0x27, 0x11, 0x00, 0x2c, 0xc9, 0xa9, 0x29, 0x63, 0xdc, 0x78, 0xa2, 0xf0, 0x94, 0xb5, + 0x8d, 0x21, 0x9d, 0x32, 0xc6, 0x7d, 0x53, 0xa2, 0x8c, 0xeb, 0x36, 0xb7, 0xc2, 0x24, 0x0c, 0x32, + 0xbc, 0x7f, 0xb2, 0x08, 0xd7, 0xad, 0x75, 0xa8, 0xd9, 0xce, 0x75, 0x3b, 0x61, 0xe1, 0x8e, 0xad, + 0xa2, 0x00, 0xc6, 0x9a, 0x61, 0x94, 0xdc, 0x0b, 0x23, 0xb9, 0x0e, 0x51, 0x21, 0x1d, 0xd1, 0xa8, + 0x23, 0xda, 0x66, 0x6e, 0xb8, 0x26, 0x04, 0x67, 0xa8, 0xd3, 0x4f, 0x17, 0x37, 0x1c, 0x9f, 0xac, + 0xdd, 0x98, 0x99, 0x2a, 0xf2, 0xe9, 0xea, 0x1c, 0xb9, 0xfd, 0xd3, 0x09, 0x00, 0x96, 0xe4, 0xec, + 0x5f, 0x1d, 0x68, 0x17, 0x1c, 0x98, 0x6a, 0xf0, 0x37, 0xdb, 0x6f, 0x62, 0x3f, 0xd5, 0xbb, 0x06, + 0xfc, 0x00, 0xef, 0x64, 0xbf, 0x6a, 0xc1, 0xe9, 0x66, 0x47, 0xb1, 0x40, 0x1c, 0xbd, 0xbd, 0x2a, + 0xd2, 0x7c, 0x5a, 0x54, 0x36, 0xe4, 0xce, 0x70, 0xdc, 0xa5, 0xcd, 0xac, 0x30, 0x5d, 0x7e, 0xdf, + 0xc2, 0xf4, 0x6d, 0x18, 0x62, 0xd2, 0x5f, 0x9a, 0xeb, 0xa4, 0xc7, 0xb4, 0x20, 0xec, 0x10, 0x5f, + 0x16, 0x24, 0xb0, 0x22, 0x46, 0x27, 0xee, 0xf1, 0xec, 0x20, 0x30, 0x61, 0x60, 0x91, 0x83, 0x8f, + 0x6b, 0x2a, 0xab, 0x62, 0x26, 0x1e, 0xaf, 0x1d, 0x86, 0x7c, 0x90, 0x87, 0x80, 0x0f, 0x6f, 0x0c, + 0x55, 0x3b, 0xa8, 0x4a, 0x03, 0xe6, 0xb5, 0x4b, 0xbe, 0xba, 0x74, 0xb2, 0x22, 0xfe, 0x3f, 0xb2, + 0x3a, 0x48, 0xa4, 0x5c, 0x2d, 0xfb, 0x84, 0xa9, 0x96, 0x3d, 0x9d, 0x55, 0xcb, 0xda, 0x8c, 0x31, + 0x86, 0x46, 0x56, 0x3c, 0x87, 0x68, 0xd1, 0x64, 0x2e, 0xb6, 0x0f, 0x67, 0xf3, 0xd8, 0x1d, 0x73, + 0xc5, 0x72, 0xd5, 0x25, 0x64, 0xea, 0x8a, 0xe5, 0xae, 0x55, 0x31, 0x83, 0x14, 0xcd, 0x07, 0x60, + 0xff, 0x7c, 0x09, 0xca, 0xb5, 0xd0, 0x3d, 0x01, 0xe3, 0xd2, 0x25, 0xc3, 0xb8, 0xf4, 0x54, 0xee, + 0xfb, 0x74, 0x5d, 0x4d, 0x49, 0x37, 0x32, 0xa6, 0xa4, 0x0f, 0xe7, 0x93, 0x3a, 0xdc, 0x70, 0xf4, + 0xbd, 0x32, 0xe8, 0x2f, 0xec, 0xa1, 0xff, 0x70, 0x14, 0x0f, 0xdd, 0x72, 0xb1, 0x47, 0xf7, 0x44, + 0x1b, 0xcc, 0x93, 0x4b, 0xc6, 0xf5, 0xfd, 0xd4, 0x3a, 0xea, 0xde, 0x26, 0xde, 0xe6, 0x56, 0x42, + 0xdc, 0xec, 0xc0, 0x4e, 0xce, 0x51, 0xf7, 0xcf, 0x2d, 0x18, 0xcf, 0xb4, 0x8e, 0xfc, 0x4e, 0x01, + 0x41, 0x47, 0x34, 0x17, 0x4d, 0xe6, 0x46, 0x10, 0xcd, 0x03, 0x28, 0xab, 0xbf, 0x34, 0xc9, 0x30, + 0xe9, 0x54, 0x5d, 0x0b, 0xc4, 0x58, 0xc3, 0x40, 0x2f, 0xc3, 0x70, 0x12, 0x36, 0x43, 0x3f, 0xdc, + 0xdc, 0xbd, 0x42, 0x64, 0xa6, 0x0a, 0x75, 0x63, 0xb2, 0x9e, 0x82, 0xb0, 0x8e, 0x67, 0x7f, 0xbf, + 0x0c, 0xd9, 0xf7, 0x19, 0xff, 0xdf, 0x3a, 0xfd, 0xe9, 0x59, 0xa7, 0x7f, 0x64, 0xc1, 0x04, 0x6d, + 0x9d, 0xb9, 0xce, 0x48, 0x87, 0x5a, 0xf5, 0xa0, 0x81, 0x75, 0xc8, 0x83, 0x06, 0x4f, 0x53, 0x6e, + 0xe7, 0x86, 0xad, 0x44, 0x18, 0x91, 0x34, 0x26, 0x46, 0x4b, 0xb1, 0x80, 0x0a, 0x3c, 0x12, 0x45, + 0x22, 0xf2, 0x48, 0xc7, 0x23, 0x51, 0x84, 0x05, 0x54, 0xbe, 0x77, 0xd0, 0xd7, 0xe5, 0xbd, 0x03, + 0x96, 0xeb, 0x49, 0xb8, 0x6b, 0x08, 0xb1, 0x42, 0xcb, 0xf5, 0x24, 0xfd, 0x38, 0x52, 0x1c, 0xfb, + 0xdb, 0x65, 0x18, 0xa9, 0x85, 0x6e, 0xea, 0x29, 0xff, 0x92, 0xe1, 0x29, 0x7f, 0x36, 0xe3, 0x29, + 0x3f, 0xa1, 0xe3, 0x3e, 0x18, 0x47, 0x79, 0x91, 0x13, 0x8c, 0xbd, 0xc8, 0x71, 0x44, 0x27, 0x79, + 0x23, 0x27, 0x98, 0x22, 0x84, 0x4d, 0xba, 0x3f, 0x4b, 0xce, 0xf1, 0xff, 0xdb, 0x82, 0xb1, 0x5a, + 0xe8, 0xd2, 0x05, 0xfa, 0xb3, 0xb4, 0x1a, 0xf5, 0x4c, 0x62, 0x03, 0x87, 0x64, 0x12, 0xfb, 0x75, + 0x0b, 0x06, 0x6b, 0xa1, 0x7b, 0x02, 0x06, 0xd6, 0x55, 0xd3, 0xc0, 0xfa, 0x44, 0x2e, 0xe7, 0xed, + 0x62, 0x53, 0xfd, 0x4e, 0x19, 0x46, 0x69, 0x8f, 0xc3, 0x4d, 0xf9, 0xbd, 0x8c, 0xb9, 0xb1, 0x0a, + 0xcc, 0x0d, 0x15, 0x09, 0x43, 0xdf, 0x0f, 0xef, 0x65, 0xbf, 0xdd, 0x2a, 0x2b, 0xc5, 0x02, 0x8a, + 0xce, 0xc3, 0x50, 0x33, 0x22, 0x3b, 0x5e, 0xd8, 0x8a, 0xb3, 0x51, 0x8c, 0x35, 0x51, 0x8e, 0x15, + 0x06, 0x7a, 0x09, 0x46, 0x62, 0x2f, 0x68, 0x10, 0xe9, 0xcc, 0xd1, 0xc7, 0x9c, 0x39, 0x78, 0xd2, + 0x46, 0xad, 0x1c, 0x1b, 0x58, 0xe8, 0x36, 0x54, 0xd8, 0x7f, 0xb6, 0x83, 0x7a, 0x7f, 0xb0, 0x80, + 0x67, 0x2a, 0x93, 0x04, 0x70, 0x4a, 0x0b, 0x5d, 0x00, 0x48, 0xa4, 0xdb, 0x49, 0x2c, 0xf2, 0xad, + 0x28, 0xb9, 0x54, 0x39, 0xa4, 0xc4, 0x58, 0xc3, 0x42, 0xcf, 0x41, 0x25, 0x71, 0x3c, 0xff, 0xaa, + 0x17, 0x90, 0x58, 0xb8, 0xed, 0x88, 0x04, 0xcc, 0xa2, 0x10, 0xa7, 0x70, 0x7a, 0xde, 0xb3, 0x18, + 0x6a, 0xfe, 0x18, 0xca, 0x10, 0xc3, 0x66, 0xe7, 0xfd, 0x55, 0x55, 0x8a, 0x35, 0x0c, 0xfb, 0x22, + 0x9c, 0xaa, 0x85, 0x6e, 0x2d, 0x8c, 0x92, 0xd5, 0x30, 0xba, 0xe7, 0x44, 0xae, 0xfc, 0x7e, 0x73, + 0x32, 0xef, 0x2f, 0x3d, 0x93, 0xfb, 0xb9, 0xcd, 0xd1, 0xc8, 0xe3, 0xfb, 0x22, 0x3b, 0xf1, 0x7b, + 0x0c, 0xc1, 0xf8, 0x51, 0x09, 0x50, 0x8d, 0x39, 0xc6, 0x18, 0x6f, 0xe7, 0x6c, 0xc1, 0x58, 0x4c, + 0xae, 0x7a, 0x41, 0xeb, 0xbe, 0x20, 0x55, 0x2c, 0xe6, 0xa5, 0xbe, 0xa2, 0xd7, 0xe1, 0x96, 0x0e, + 0xb3, 0x0c, 0x67, 0xe8, 0xd2, 0xc9, 0x8c, 0x5a, 0xc1, 0x62, 0x7c, 0x33, 0x26, 0x91, 0x78, 0x2b, + 0x86, 0x4d, 0x26, 0x96, 0x85, 0x38, 0x85, 0xd3, 0xc5, 0xc3, 0xfe, 0x5c, 0x0f, 0x03, 0x1c, 0x86, + 0x89, 0x5c, 0x6e, 0xec, 0xed, 0x00, 0xad, 0x1c, 0x1b, 0x58, 0x68, 0x15, 0x50, 0xdc, 0x6a, 0x36, + 0x7d, 0x76, 0xd7, 0xe8, 0xf8, 0x97, 0xa2, 0xb0, 0xd5, 0xe4, 0xfe, 0xd1, 0x22, 0xed, 0x7e, 0xbd, + 0x0d, 0x8a, 0x3b, 0xd4, 0xa0, 0xcc, 0x62, 0x23, 0x66, 0xbf, 0x45, 0x40, 0x35, 0xb7, 0x57, 0xd6, + 0x59, 0x11, 0x96, 0x30, 0xfb, 0x8b, 0xec, 0x80, 0x63, 0x8f, 0x78, 0x24, 0xad, 0x88, 0xa0, 0x6d, + 0x18, 0x6d, 0xb2, 0x43, 0x2c, 0x89, 0x42, 0xdf, 0x27, 0x52, 0xbe, 0x3c, 0x9a, 0x6b, 0x0e, 0x4f, + 0xdb, 0xaf, 0x93, 0xc3, 0x26, 0x75, 0xfb, 0x17, 0xc7, 0x18, 0xaf, 0x12, 0xd7, 0xbd, 0x83, 0xc2, + 0x09, 0x57, 0x48, 0x72, 0x1f, 0x2a, 0xf2, 0x1c, 0x57, 0x7a, 0x0e, 0x08, 0x97, 0x5e, 0x2c, 0xa9, + 0xa0, 0xcf, 0x32, 0x17, 0x73, 0xce, 0x20, 0x8a, 0x3f, 0x32, 0xc8, 0xf1, 0x0d, 0xf7, 0x72, 0x41, + 0x02, 0x6b, 0xe4, 0xd0, 0x55, 0x18, 0x15, 0x6f, 0x3e, 0x08, 0x33, 0x45, 0xd9, 0x50, 0xb1, 0x47, + 0xb1, 0x0e, 0x3c, 0xc8, 0x16, 0x60, 0xb3, 0x32, 0xda, 0x84, 0xc7, 0xb5, 0x37, 0x8d, 0x3a, 0xb8, + 0x91, 0x71, 0xce, 0xf3, 0xc4, 0xfe, 0xde, 0xdc, 0xe3, 0xeb, 0x87, 0x21, 0xe2, 0xc3, 0xe9, 0xa0, + 0x1b, 0x70, 0xca, 0x69, 0x24, 0xde, 0x0e, 0xa9, 0x12, 0xc7, 0xf5, 0xbd, 0x80, 0x98, 0x51, 0xf7, + 0x8f, 0xee, 0xef, 0xcd, 0x9d, 0x5a, 0xec, 0x84, 0x80, 0x3b, 0xd7, 0x43, 0x9f, 0x80, 0x8a, 0x1b, + 0xc4, 0x62, 0x0e, 0x06, 0x8c, 0x27, 0xbc, 0x2a, 0xd5, 0xeb, 0x75, 0x35, 0xfe, 0xf4, 0x0f, 0x4e, + 0x2b, 0xa0, 0xf7, 0xf8, 0x13, 0xf5, 0x4a, 0x9b, 0xe1, 0x4f, 0xc7, 0xbd, 0x52, 0x48, 0x7f, 0x36, + 0x62, 0x61, 0xb8, 0x05, 0x4f, 0xb9, 0x6b, 0x1a, 0x61, 0x32, 0x46, 0x13, 0xe8, 0xd3, 0x80, 0x62, + 0x12, 0xed, 0x78, 0x0d, 0xb2, 0xd8, 0x60, 0x99, 0x4e, 0x99, 0x8d, 0x67, 0xc8, 0x88, 0x5b, 0x40, + 0xf5, 0x36, 0x0c, 0xdc, 0xa1, 0x16, 0xba, 0x4c, 0x39, 0x8f, 0x5e, 0x2a, 0xbc, 0x6b, 0xa5, 0x60, + 0x38, 0x53, 0x25, 0xcd, 0x88, 0x34, 0x9c, 0x84, 0xb8, 0x26, 0x45, 0x9c, 0xa9, 0x47, 0xcf, 0x25, + 0x95, 0xcc, 0x1e, 0x4c, 0x9f, 0xd0, 0xf6, 0x84, 0xf6, 0x54, 0xcf, 0xda, 0x0a, 0xe3, 0xe4, 0x3a, + 0x49, 0xee, 0x85, 0xd1, 0x5d, 0x76, 0x87, 0x31, 0xa4, 0xa5, 0x8d, 0x4b, 0x41, 0x58, 0xc7, 0xa3, + 0x32, 0x14, 0xbb, 0x3c, 0x5b, 0xab, 0xb2, 0x9b, 0x89, 0xa1, 0x74, 0xef, 0x5c, 0xe6, 0xc5, 0x58, + 0xc2, 0x25, 0xea, 0x5a, 0x6d, 0x99, 0xdd, 0x32, 0x64, 0x50, 0xd7, 0x6a, 0xcb, 0x58, 0xc2, 0x51, + 0xd8, 0xfe, 0x50, 0xda, 0x58, 0x91, 0x1b, 0x9f, 0x76, 0x4e, 0x5e, 0xf0, 0xad, 0xb4, 0xfb, 0x30, + 0xa1, 0x1e, 0x6b, 0xe3, 0x19, 0x3d, 0xe3, 0x99, 0xf1, 0x22, 0x0f, 0xe4, 0x77, 0x4c, 0x0c, 0xaa, + 0xec, 0x7a, 0x6b, 0x19, 0x9a, 0xb8, 0xad, 0x15, 0x23, 0x7b, 0xc4, 0x44, 0xee, 0x03, 0x05, 0x0b, + 0x50, 0x89, 0x5b, 0x77, 0xdc, 0x70, 0xdb, 0xf1, 0x02, 0x76, 0x15, 0xa0, 0x3f, 0xf7, 0x2e, 0x01, + 0x38, 0xc5, 0x41, 0x35, 0x18, 0x72, 0x84, 0x0a, 0x27, 0x4c, 0xf6, 0x39, 0xd1, 0xe5, 0x52, 0xe1, + 0xe3, 0xd6, 0x55, 0xf9, 0x0f, 0x2b, 0x2a, 0xe8, 0x55, 0x18, 0x15, 0xc1, 0x51, 0xc2, 0x89, 0x71, + 0xca, 0x74, 0xa4, 0xaf, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0x4d, 0x18, 0xa3, 0x54, 0x52, 0x06, 0x38, + 0x33, 0xdd, 0x1b, 0x0f, 0xd5, 0x52, 0x41, 0xeb, 0x64, 0x70, 0x86, 0x2c, 0x72, 0xe1, 0x31, 0xa7, + 0x95, 0x84, 0xdb, 0x74, 0x27, 0x98, 0xfb, 0x64, 0x3d, 0xbc, 0x4b, 0x82, 0x99, 0x53, 0x6c, 0x05, + 0x9e, 0xdd, 0xdf, 0x9b, 0x7b, 0x6c, 0xf1, 0x10, 0x3c, 0x7c, 0x28, 0x15, 0xf4, 0x36, 0x0c, 0x27, + 0xa1, 0x2f, 0x7c, 0x93, 0xe3, 0x99, 0xd3, 0x45, 0x72, 0xda, 0xac, 0xab, 0x0a, 0xba, 0x19, 0x43, + 0x11, 0xc1, 0x3a, 0xc5, 0xd9, 0x4f, 0xc2, 0x64, 0x1b, 0x4b, 0xea, 0xc9, 0x7d, 0xf3, 0x3f, 0xf6, + 0x43, 0x45, 0x59, 0xf4, 0xd0, 0x82, 0x69, 0xbc, 0x7d, 0x34, 0x6b, 0xbc, 0x1d, 0xa2, 0x02, 0x94, + 0x6e, 0xaf, 0xfd, 0x5c, 0x87, 0xe7, 0xb9, 0x9f, 0xcd, 0xdd, 0x83, 0xc5, 0x23, 0xaa, 0x7a, 0x78, + 0xc4, 0x3c, 0xd5, 0xea, 0xfa, 0x0e, 0xd5, 0xea, 0x0a, 0x3e, 0x39, 0x47, 0xf5, 0xb7, 0x66, 0xe8, + 0xae, 0xd5, 0xb2, 0x2f, 0x2a, 0xd5, 0x68, 0x21, 0xe6, 0x30, 0x26, 0x77, 0xd3, 0x33, 0x95, 0xc9, + 0xdd, 0x83, 0x47, 0x94, 0xbb, 0x25, 0x01, 0x9c, 0xd2, 0x42, 0x3b, 0x30, 0xd9, 0x30, 0x1f, 0xc8, + 0x52, 0x71, 0x52, 0xcf, 0xf7, 0xf0, 0x40, 0x55, 0x4b, 0x7b, 0x3d, 0x63, 0x39, 0x4b, 0x0f, 0xb7, + 0x37, 0x81, 0x5e, 0x85, 0xa1, 0xf7, 0xc2, 0x98, 0x5d, 0x2b, 0x88, 0x83, 0x45, 0xc6, 0xa3, 0x0c, + 0xbd, 0x7e, 0xa3, 0xce, 0xca, 0x0f, 0xf6, 0xe6, 0x86, 0x6b, 0xa1, 0x2b, 0xff, 0x62, 0x55, 0x01, + 0x7d, 0xc9, 0x82, 0x53, 0xc6, 0x3e, 0x53, 0x3d, 0x87, 0xa3, 0xf4, 0xfc, 0x71, 0xd1, 0xf2, 0xa9, + 0xb5, 0x4e, 0x34, 0x71, 0xe7, 0xa6, 0xec, 0xdf, 0xe5, 0x26, 0x4c, 0x61, 0xd4, 0x20, 0x71, 0xcb, + 0x3f, 0x89, 0x4c, 0xf6, 0x37, 0x0c, 0x7b, 0xcb, 0x03, 0x30, 0xa2, 0xff, 0x7b, 0x8b, 0x19, 0xd1, + 0xd7, 0xc9, 0x76, 0xd3, 0x77, 0x92, 0x93, 0xf0, 0xee, 0xfd, 0x2c, 0x0c, 0x25, 0xa2, 0xb5, 0x62, + 0x69, 0xf8, 0xb5, 0xee, 0xb1, 0xcb, 0x05, 0x75, 0x30, 0xc9, 0x52, 0xac, 0x08, 0xda, 0xff, 0x9a, + 0x7f, 0x15, 0x09, 0x39, 0x01, 0x4b, 0xc1, 0x75, 0xd3, 0x52, 0xf0, 0x4c, 0xe1, 0xb1, 0x74, 0xb1, + 0x18, 0x7c, 0xdf, 0x1c, 0x01, 0xd3, 0x1f, 0x7e, 0x7a, 0x6e, 0x79, 0xec, 0x5f, 0xb1, 0x60, 0xba, + 0xd3, 0x75, 0x3b, 0x15, 0x30, 0xb8, 0xf6, 0xa2, 0xee, 0xbf, 0xd4, 0xac, 0xde, 0x12, 0xe5, 0x58, + 0x61, 0x14, 0xce, 0x8b, 0xdd, 0x5b, 0xea, 0xa6, 0x1b, 0x60, 0x3e, 0xb5, 0x86, 0x5e, 0xe3, 0xce, + 0xfc, 0x96, 0x7a, 0x0b, 0xad, 0x37, 0x47, 0x7e, 0xfb, 0x37, 0x4a, 0x30, 0xcd, 0x8d, 0xd0, 0x8b, + 0x3b, 0xa1, 0xe7, 0xd6, 0x42, 0x57, 0x84, 0x36, 0xb8, 0x30, 0xd2, 0xd4, 0x94, 0xcf, 0x62, 0xa9, + 0x60, 0x74, 0x75, 0x35, 0x15, 0xf8, 0xf5, 0x52, 0x6c, 0x50, 0xa5, 0xad, 0x90, 0x1d, 0xaf, 0xa1, + 0x6c, 0x9a, 0xa5, 0x9e, 0x4f, 0x06, 0xd5, 0xca, 0x8a, 0x46, 0x07, 0x1b, 0x54, 0x8f, 0xe1, 0x39, + 0x0b, 0xfb, 0x1f, 0x58, 0xf0, 0x48, 0x97, 0x74, 0x31, 0xb4, 0xb9, 0x7b, 0xcc, 0xf0, 0x2f, 0xde, + 0xf2, 0x53, 0xcd, 0xf1, 0xeb, 0x00, 0x2c, 0xa0, 0xe8, 0x0e, 0x00, 0x37, 0xe7, 0xb3, 0x97, 0xdd, + 0x4b, 0x45, 0xfc, 0x91, 0xda, 0x92, 0x32, 0x68, 0xf1, 0xfa, 0xea, 0x2d, 0x77, 0x8d, 0xaa, 0xfd, + 0xad, 0x32, 0xf4, 0xf3, 0x27, 0xa3, 0x6b, 0x30, 0xb8, 0xc5, 0xd3, 0xd7, 0xf6, 0x96, 0x3d, 0x37, + 0x55, 0x2e, 0x78, 0x01, 0x96, 0x64, 0xd0, 0x35, 0x98, 0xa2, 0x27, 0x8b, 0xe7, 0xf8, 0x55, 0xe2, + 0x3b, 0xbb, 0x52, 0x5b, 0xe5, 0x6f, 0x1c, 0xc8, 0x64, 0xdc, 0x53, 0x6b, 0xed, 0x28, 0xb8, 0x53, + 0x3d, 0xf4, 0x5a, 0x5b, 0xb6, 0x39, 0x9e, 0x16, 0x58, 0x49, 0xaa, 0x87, 0x67, 0x9c, 0xa3, 0xf2, + 0x74, 0xb3, 0x4d, 0x2f, 0xd7, 0x5e, 0xe6, 0x35, 0x75, 0x71, 0x13, 0x97, 0xf9, 0x16, 0xb4, 0x98, + 0x4f, 0xc5, 0xfa, 0x56, 0x44, 0xe2, 0xad, 0xd0, 0x77, 0xc5, 0xa3, 0x92, 0xa9, 0x6f, 0x41, 0x06, + 0x8e, 0xdb, 0x6a, 0x50, 0x2a, 0x1b, 0x8e, 0xe7, 0xb7, 0x22, 0x92, 0x52, 0x19, 0x30, 0xa9, 0xac, + 0x66, 0xe0, 0xb8, 0xad, 0x06, 0x5d, 0x5b, 0xa7, 0xc4, 0x3b, 0x84, 0x32, 0x38, 0x5a, 0xb0, 0xa0, + 0xcf, 0xc0, 0xa0, 0x74, 0x91, 0x2f, 0x94, 0xc3, 0x43, 0x38, 0x0e, 0xa8, 0x37, 0x0d, 0xb5, 0x37, + 0xaf, 0x84, 0x73, 0xbc, 0xa4, 0x77, 0x94, 0xf7, 0xee, 0xfe, 0xcc, 0x82, 0xa9, 0x0e, 0xae, 0x5e, + 0x9c, 0xa5, 0x6d, 0x7a, 0x71, 0xa2, 0x32, 0xee, 0x6b, 0x2c, 0x8d, 0x97, 0x63, 0x85, 0x41, 0x77, + 0x0b, 0x67, 0x9a, 0x59, 0x46, 0x29, 0x5c, 0x40, 0x04, 0xb4, 0x37, 0x46, 0x89, 0xce, 0x42, 0x5f, + 0x2b, 0x26, 0x91, 0x7c, 0x7c, 0x4e, 0xf2, 0x79, 0x66, 0x07, 0x64, 0x10, 0x2a, 0xb6, 0x6e, 0x2a, + 0x13, 0x9c, 0x26, 0xb6, 0x72, 0x23, 0x1c, 0x87, 0xd9, 0x5f, 0x2f, 0xc3, 0x78, 0xc6, 0xe5, 0x93, + 0x76, 0x64, 0x3b, 0x0c, 0xbc, 0x24, 0x54, 0x79, 0xd5, 0xf8, 0x7b, 0x57, 0xa4, 0xb9, 0x75, 0x4d, + 0x94, 0x63, 0x85, 0x81, 0x9e, 0x96, 0xef, 0x8d, 0x66, 0x5f, 0x12, 0x58, 0xaa, 0x1a, 0x4f, 0x8e, + 0x16, 0x7d, 0x05, 0xe4, 0x49, 0xe8, 0x6b, 0x86, 0xea, 0xf9, 0x68, 0xf5, 0x3d, 0xf1, 0x52, 0xb5, + 0x16, 0x86, 0x3e, 0x66, 0x40, 0xf4, 0x94, 0x18, 0x7d, 0xe6, 0xe6, 0x02, 0x3b, 0x6e, 0x18, 0x6b, + 0x53, 0xf0, 0x0c, 0x0c, 0xde, 0x25, 0xbb, 0x91, 0x17, 0x6c, 0x66, 0xef, 0x6d, 0xae, 0xf0, 0x62, + 0x2c, 0xe1, 0xe6, 0x4b, 0x1f, 0x83, 0xc7, 0xfc, 0xd2, 0xc7, 0x50, 0xee, 0x39, 0xf8, 0x1d, 0x0b, + 0xc6, 0x59, 0xb2, 0x51, 0x11, 0x9a, 0xef, 0x85, 0xc1, 0x09, 0xc8, 0x18, 0x4f, 0x42, 0x7f, 0x44, + 0x1b, 0xcd, 0xa6, 0xea, 0x67, 0x3d, 0xc1, 0x1c, 0x86, 0x1e, 0x83, 0x3e, 0xd6, 0x05, 0xfa, 0x19, + 0x47, 0x78, 0x4e, 0xf3, 0xaa, 0x93, 0x38, 0x98, 0x95, 0xb2, 0x28, 0x2b, 0x4c, 0x9a, 0xbe, 0xc7, + 0x3b, 0x9d, 0x9a, 0x5b, 0x1f, 0xb6, 0x28, 0xab, 0x8e, 0x9d, 0x7c, 0x50, 0x51, 0x56, 0x9d, 0x89, + 0x1f, 0x2e, 0xe7, 0xff, 0x8f, 0x12, 0x9c, 0xe9, 0x58, 0x2f, 0xbd, 0x01, 0x5e, 0x35, 0x6e, 0x80, + 0x2f, 0x64, 0x6e, 0x80, 0xed, 0xc3, 0x6b, 0x3f, 0x98, 0x3b, 0xe1, 0xce, 0x57, 0xb5, 0xe5, 0x13, + 0xbc, 0xaa, 0xed, 0x2b, 0x2a, 0xe2, 0xf4, 0xe7, 0x88, 0x38, 0x7f, 0x68, 0xc1, 0xa3, 0x1d, 0xa7, + 0xec, 0xa1, 0x0b, 0x6b, 0xeb, 0xd8, 0xcb, 0x2e, 0xda, 0xc9, 0x2f, 0x97, 0xbb, 0x8c, 0x8a, 0xe9, + 0x29, 0xe7, 0x28, 0x17, 0x62, 0xc0, 0x58, 0x08, 0x6f, 0x23, 0x9c, 0x03, 0xf1, 0x32, 0xac, 0xa0, + 0x28, 0xd6, 0xc2, 0xc2, 0x78, 0x27, 0x57, 0x8e, 0xb8, 0xa1, 0xe6, 0x4d, 0x3b, 0xb9, 0x9e, 0x6f, + 0x20, 0x1b, 0x2c, 0x76, 0x5b, 0xd3, 0x3c, 0xcb, 0x47, 0xd1, 0x3c, 0x47, 0x3a, 0x6b, 0x9d, 0x68, + 0x11, 0xc6, 0xb7, 0xbd, 0x80, 0x3d, 0x10, 0x6a, 0x4a, 0x4f, 0x2a, 0x36, 0xf7, 0x9a, 0x09, 0xc6, + 0x59, 0xfc, 0xd9, 0x57, 0x61, 0xf4, 0xe8, 0xd6, 0xb5, 0x1f, 0x97, 0xe1, 0x83, 0x87, 0x30, 0x05, + 0x7e, 0x3a, 0x18, 0xdf, 0x45, 0x3b, 0x1d, 0xda, 0xbe, 0x4d, 0x0d, 0xa6, 0x37, 0x5a, 0xbe, 0xbf, + 0xcb, 0xfc, 0xa7, 0x88, 0x2b, 0x31, 0x84, 0x50, 0xa3, 0x5e, 0x22, 0x5f, 0xed, 0x80, 0x83, 0x3b, + 0xd6, 0x44, 0x9f, 0x06, 0x14, 0xde, 0x61, 0xe9, 0x78, 0xdd, 0x34, 0x9f, 0x02, 0xfb, 0x04, 0xe5, + 0x74, 0xab, 0xde, 0x68, 0xc3, 0xc0, 0x1d, 0x6a, 0x51, 0x39, 0x95, 0x3d, 0x62, 0xae, 0xba, 0x95, + 0x91, 0x53, 0xb1, 0x0e, 0xc4, 0x26, 0x2e, 0xba, 0x04, 0x93, 0xce, 0x8e, 0xe3, 0xf1, 0xf4, 0x5a, + 0x92, 0x00, 0x17, 0x54, 0x95, 0xfd, 0x6a, 0x31, 0x8b, 0x80, 0xdb, 0xeb, 0xa0, 0xa6, 0x61, 0x90, + 0xe4, 0x89, 0xf8, 0x3f, 0x71, 0x84, 0x15, 0x5c, 0xd8, 0x44, 0x69, 0xff, 0x57, 0x8b, 0x1e, 0x7d, + 0x1d, 0xde, 0x92, 0xa4, 0x33, 0xa2, 0x0c, 0x6c, 0x5a, 0x98, 0x9b, 0x9a, 0x91, 0x65, 0x1d, 0x88, + 0x4d, 0x5c, 0xbe, 0x34, 0xe2, 0xd4, 0x9d, 0xdb, 0x90, 0x36, 0x45, 0x84, 0xa8, 0xc2, 0xa0, 0x12, + 0xb4, 0xeb, 0xed, 0x78, 0x71, 0x18, 0x89, 0x0d, 0xd4, 0xa3, 0x73, 0x6f, 0xca, 0x2f, 0xab, 0x9c, + 0x0c, 0x96, 0xf4, 0xec, 0x6f, 0x94, 0x60, 0x54, 0xb6, 0xf8, 0x7a, 0x2b, 0x4c, 0x9c, 0x13, 0x38, + 0xd2, 0x5f, 0x37, 0x8e, 0xf4, 0x85, 0x62, 0x01, 0xb3, 0xac, 0x73, 0x5d, 0x8f, 0xf2, 0xcf, 0x64, + 0x8e, 0xf2, 0x17, 0x7a, 0x21, 0x7a, 0xf8, 0x11, 0xfe, 0x6f, 0x2d, 0x98, 0x34, 0xf0, 0x4f, 0xe0, + 0x24, 0xa9, 0x99, 0x27, 0xc9, 0x73, 0x3d, 0x8c, 0xa6, 0xcb, 0x09, 0xf2, 0xed, 0x52, 0x66, 0x14, + 0xec, 0xe4, 0xf8, 0x02, 0xf4, 0x6d, 0x39, 0x91, 0x5b, 0x2c, 0xd7, 0x64, 0x5b, 0xf5, 0xf9, 0xcb, + 0x4e, 0xe4, 0x72, 0xfe, 0x7f, 0x5e, 0xbd, 0x74, 0xe5, 0x44, 0x6e, 0x6e, 0x94, 0x03, 0x6b, 0x14, + 0x5d, 0x84, 0x81, 0xb8, 0x11, 0x36, 0x95, 0x1f, 0xe8, 0x59, 0xfe, 0x0a, 0x16, 0x2d, 0x39, 0xd8, + 0x9b, 0x43, 0x66, 0x73, 0xb4, 0x18, 0x0b, 0xfc, 0xd9, 0x4d, 0xa8, 0xa8, 0xa6, 0x8f, 0xd5, 0x13, + 0xfe, 0xbf, 0x95, 0x61, 0xaa, 0xc3, 0x5a, 0x41, 0x5f, 0x34, 0xe6, 0xed, 0xd5, 0x9e, 0x17, 0xdb, + 0xfb, 0x9c, 0xb9, 0x2f, 0x32, 0x4d, 0xc9, 0x15, 0xab, 0xe3, 0x08, 0xcd, 0xdf, 0x8c, 0x49, 0xb6, + 0x79, 0x5a, 0x94, 0xdf, 0x3c, 0x6d, 0xf6, 0xc4, 0xa6, 0x9f, 0x36, 0xa4, 0x7a, 0x7a, 0xac, 0xdf, + 0xf9, 0xaf, 0xf5, 0xc1, 0x74, 0xa7, 0xc8, 0x7c, 0xf4, 0x15, 0x2b, 0xf3, 0xa0, 0xc4, 0x6b, 0xbd, + 0x87, 0xf7, 0xf3, 0x57, 0x26, 0x44, 0x36, 0x9b, 0x79, 0xf3, 0x89, 0x89, 0xdc, 0x19, 0x17, 0xad, + 0xb3, 0xf8, 0xa4, 0x88, 0x3f, 0x0e, 0x22, 0xb9, 0xc2, 0xa7, 0x8e, 0xd0, 0x15, 0xf1, 0xbe, 0x48, + 0x9c, 0x89, 0x4f, 0x92, 0xc5, 0xf9, 0xf1, 0x49, 0xb2, 0x0f, 0xb3, 0x1e, 0x0c, 0x6b, 0xe3, 0x3a, + 0xd6, 0x65, 0x70, 0x97, 0x1e, 0x51, 0x5a, 0xbf, 0x8f, 0x75, 0x29, 0xfc, 0x5d, 0x0b, 0x32, 0x4e, + 0x5b, 0xca, 0x2c, 0x63, 0x75, 0x35, 0xcb, 0x9c, 0x85, 0xbe, 0x28, 0xf4, 0x49, 0xf6, 0xb1, 0x03, + 0x1c, 0xfa, 0x04, 0x33, 0x88, 0x7a, 0xfc, 0xb6, 0xdc, 0xed, 0xf1, 0x5b, 0xaa, 0xa7, 0xfb, 0x64, + 0x87, 0x48, 0x23, 0x89, 0x62, 0xe3, 0x57, 0x69, 0x21, 0xe6, 0x30, 0xfb, 0xb7, 0xfa, 0x60, 0xaa, + 0x43, 0xb4, 0x1b, 0xd5, 0x90, 0x36, 0x9d, 0x84, 0xdc, 0x73, 0x76, 0xb3, 0x49, 0x57, 0x2f, 0xf1, + 0x62, 0x2c, 0xe1, 0xcc, 0xd9, 0x94, 0x27, 0x6e, 0xcb, 0x98, 0xae, 0x44, 0xbe, 0x36, 0x01, 0x3d, + 0xfe, 0x67, 0x52, 0x2f, 0x00, 0xc4, 0xb1, 0xbf, 0x12, 0x50, 0x09, 0xcf, 0x15, 0x4e, 0xad, 0x69, + 0xbe, 0xbf, 0xfa, 0x55, 0x01, 0xc1, 0x1a, 0x16, 0xaa, 0xc2, 0x44, 0x33, 0x0a, 0x13, 0x6e, 0x18, + 0xac, 0x72, 0x47, 0x88, 0x7e, 0x33, 0x9a, 0xaa, 0x96, 0x81, 0xe3, 0xb6, 0x1a, 0xe8, 0x65, 0x18, + 0x16, 0x11, 0x56, 0xb5, 0x30, 0xf4, 0x85, 0x19, 0x49, 0x5d, 0xc7, 0xd7, 0x53, 0x10, 0xd6, 0xf1, + 0xb4, 0x6a, 0xcc, 0xda, 0x38, 0xd8, 0xb1, 0x1a, 0xb7, 0x38, 0x6a, 0x78, 0x99, 0xfc, 0x1d, 0x43, + 0x85, 0xf2, 0x77, 0xa4, 0x86, 0xb5, 0x4a, 0xe1, 0x8b, 0x18, 0xc8, 0x35, 0x40, 0xfd, 0x41, 0x19, + 0x06, 0xf8, 0xa7, 0x38, 0x01, 0x29, 0xaf, 0x26, 0x4c, 0x4a, 0x85, 0x72, 0x25, 0xf0, 0x5e, 0xcd, + 0x57, 0x9d, 0xc4, 0xe1, 0xac, 0x49, 0xed, 0x90, 0xd4, 0x0c, 0x85, 0xe6, 0x8d, 0x3d, 0x34, 0x9b, + 0xb1, 0x94, 0x00, 0xa7, 0xa1, 0xed, 0xa8, 0x2d, 0x80, 0x98, 0x3d, 0xd5, 0x49, 0x69, 0x88, 0x8c, + 0xb0, 0x2f, 0x15, 0xea, 0x47, 0x5d, 0x55, 0xe3, 0xbd, 0x49, 0x97, 0xa5, 0x02, 0x60, 0x8d, 0xf6, + 0xec, 0x2b, 0x50, 0x51, 0xc8, 0x79, 0x2a, 0xe4, 0x88, 0xce, 0xda, 0xfe, 0x3f, 0x18, 0xcf, 0xb4, + 0xd5, 0x93, 0x06, 0xfa, 0x3b, 0x16, 0x8c, 0xf3, 0x2e, 0xaf, 0x04, 0x3b, 0x82, 0x15, 0x7c, 0xd9, + 0x82, 0x69, 0xbf, 0xc3, 0x4e, 0x14, 0x9f, 0xf9, 0x28, 0x7b, 0x58, 0x29, 0x9f, 0x9d, 0xa0, 0xb8, + 0x63, 0x6b, 0xe8, 0x1c, 0x0c, 0xf1, 0x97, 0x87, 0x1d, 0x5f, 0x78, 0x50, 0x8f, 0xf0, 0x5c, 0xd8, + 0xbc, 0x0c, 0x2b, 0xa8, 0xfd, 0x13, 0x0b, 0x26, 0xdb, 0x1e, 0xb2, 0x7f, 0x58, 0x86, 0x21, 0xb2, + 0x7e, 0x97, 0xba, 0x64, 0xfd, 0xd6, 0x47, 0x59, 0x3e, 0x74, 0x94, 0xbf, 0x61, 0x81, 0x58, 0xa1, + 0x27, 0xa0, 0x3f, 0xac, 0x99, 0xfa, 0xc3, 0x87, 0x8a, 0x2c, 0xfa, 0x2e, 0x8a, 0xc3, 0x2f, 0x95, + 0x60, 0x82, 0x23, 0xa4, 0x37, 0x32, 0x0f, 0xcb, 0xc7, 0xe9, 0xed, 0x35, 0x1a, 0xf5, 0x04, 0x68, + 0xe7, 0x91, 0x1a, 0xdf, 0xb2, 0xef, 0xd0, 0x6f, 0xf9, 0x17, 0x16, 0x20, 0x3e, 0x27, 0xd9, 0x67, + 0x9b, 0xf9, 0xe9, 0xa6, 0x99, 0x03, 0x52, 0xce, 0xa1, 0x20, 0x58, 0xc3, 0x7a, 0xc0, 0x43, 0xc8, + 0xdc, 0x87, 0x95, 0xf3, 0xef, 0xc3, 0x7a, 0x18, 0xf5, 0xef, 0x96, 0x21, 0xeb, 0x4a, 0x89, 0xde, + 0x81, 0x91, 0x86, 0xd3, 0x74, 0xee, 0x78, 0xbe, 0x97, 0x78, 0x24, 0x2e, 0x76, 0xe1, 0xbe, 0xac, + 0xd5, 0x10, 0xd7, 0x50, 0x5a, 0x09, 0x36, 0x28, 0xa2, 0x79, 0x80, 0x66, 0xe4, 0xed, 0x78, 0x3e, + 0xd9, 0x64, 0x1a, 0x0f, 0x8b, 0xc5, 0xe0, 0x77, 0xc7, 0xb2, 0x14, 0x6b, 0x18, 0x1d, 0x7c, 0xf7, + 0xcb, 0x27, 0xe1, 0xbb, 0xdf, 0xd7, 0xa3, 0xef, 0x7e, 0x7f, 0x21, 0xdf, 0x7d, 0x0c, 0xa7, 0xe5, + 0xe1, 0x4d, 0xff, 0xaf, 0x7a, 0x3e, 0x11, 0xb2, 0x1b, 0x8f, 0xd5, 0x98, 0xdd, 0xdf, 0x9b, 0x3b, + 0x8d, 0x3b, 0x62, 0xe0, 0x2e, 0x35, 0xed, 0x16, 0x4c, 0xd5, 0x49, 0xe4, 0xb1, 0x9c, 0x94, 0x6e, + 0xba, 0x97, 0x3e, 0x07, 0x95, 0x28, 0xb3, 0x8d, 0x7b, 0x0c, 0xc8, 0xd7, 0xb2, 0x98, 0xc9, 0x6d, + 0x9b, 0x92, 0xb4, 0xff, 0x46, 0x09, 0x06, 0x85, 0x13, 0xe5, 0x09, 0x08, 0x1f, 0x57, 0x0c, 0x13, + 0xd3, 0x33, 0x79, 0xfc, 0x8f, 0x75, 0xab, 0xab, 0x71, 0xa9, 0x9e, 0x31, 0x2e, 0x3d, 0x57, 0x8c, + 0xdc, 0xe1, 0x66, 0xa5, 0x7f, 0x5a, 0x86, 0x31, 0xd3, 0xa9, 0xf4, 0x04, 0xa6, 0xe5, 0x0d, 0x18, + 0x8c, 0x85, 0x7f, 0x73, 0xa9, 0x88, 0xcf, 0x5e, 0xf6, 0x13, 0xa7, 0x37, 0xf1, 0xc2, 0xa3, 0x59, + 0x92, 0xeb, 0xe8, 0x42, 0x5d, 0x3e, 0x11, 0x17, 0xea, 0x3c, 0x5f, 0xdf, 0xbe, 0x07, 0xe1, 0xeb, + 0x6b, 0xff, 0x80, 0xb1, 0x7c, 0xbd, 0xfc, 0x04, 0x8e, 0xf1, 0xd7, 0xcd, 0xc3, 0xe1, 0x7c, 0xa1, + 0x75, 0x27, 0xba, 0xd7, 0xe5, 0x38, 0xff, 0xae, 0x05, 0xc3, 0x02, 0xf1, 0x04, 0x06, 0xf0, 0x69, + 0x73, 0x00, 0x4f, 0x15, 0x1a, 0x40, 0x97, 0x9e, 0x7f, 0xa3, 0xa4, 0x7a, 0x5e, 0x13, 0x4f, 0xed, + 0xe7, 0x66, 0xe0, 0x1e, 0xa2, 0xaa, 0x5f, 0xd8, 0x08, 0x7d, 0x21, 0xc0, 0x3d, 0x96, 0x86, 0xe6, + 0xf1, 0xf2, 0x03, 0xed, 0x37, 0x56, 0xd8, 0x2c, 0x72, 0x2c, 0x8c, 0x12, 0x71, 0x80, 0x76, 0x7a, + 0xe8, 0xdf, 0x05, 0x48, 0x5f, 0x57, 0x17, 0x51, 0xad, 0xdd, 0x77, 0x6b, 0x2b, 0xf1, 0xfc, 0x79, + 0x2f, 0x48, 0xe2, 0x24, 0x9a, 0x5f, 0x0b, 0x92, 0x1b, 0x11, 0x17, 0xfa, 0xb5, 0x58, 0x3b, 0x45, + 0x0b, 0x6b, 0x74, 0x65, 0x10, 0x07, 0x6b, 0xa3, 0xdf, 0xbc, 0x41, 0xba, 0x2e, 0xca, 0xb1, 0xc2, + 0xb0, 0x5f, 0x61, 0x9c, 0x9d, 0x4d, 0x50, 0x6f, 0x61, 0x70, 0xbf, 0x38, 0xa0, 0xa6, 0x96, 0x99, + 0x85, 0xaf, 0xeb, 0xc1, 0x76, 0x45, 0xd9, 0x27, 0xed, 0x82, 0xee, 0x47, 0x9d, 0xc6, 0xe6, 0x21, + 0xd2, 0x76, 0xed, 0xf8, 0x4a, 0x61, 0x8e, 0xdc, 0xc3, 0x45, 0x23, 0x4b, 0x3a, 0xc8, 0x32, 0xad, + 0xad, 0xd5, 0xb2, 0x79, 0xd3, 0x97, 0x25, 0x00, 0xa7, 0x38, 0x68, 0x41, 0x28, 0x94, 0xdc, 0xe2, + 0xf2, 0xc1, 0x8c, 0x42, 0x29, 0xa7, 0x44, 0xd3, 0x28, 0x5f, 0x80, 0x61, 0xf5, 0x14, 0x4d, 0x8d, + 0x3f, 0x02, 0x52, 0xe1, 0xf2, 0xd5, 0x4a, 0x5a, 0x8c, 0x75, 0x1c, 0xb4, 0x06, 0x53, 0xae, 0x8a, + 0xd9, 0xa9, 0xb5, 0xee, 0xf8, 0x5e, 0x83, 0x56, 0xe5, 0xf1, 0xb6, 0x8f, 0xec, 0xef, 0xcd, 0x4d, + 0x55, 0xdb, 0xc1, 0xb8, 0x53, 0x1d, 0xb4, 0x0e, 0xe3, 0x31, 0x7f, 0x72, 0x47, 0x06, 0x66, 0x08, + 0x1b, 0xc4, 0xb3, 0xf2, 0xbe, 0xb3, 0x6e, 0x82, 0x0f, 0x58, 0x11, 0xe7, 0x0a, 0x32, 0x94, 0x23, + 0x4b, 0x02, 0xbd, 0x06, 0x63, 0xbe, 0xfe, 0x9e, 0x68, 0x4d, 0x98, 0x28, 0x94, 0x07, 0x9b, 0xf1, + 0xda, 0x68, 0x0d, 0x67, 0xb0, 0xd1, 0x1b, 0x30, 0xa3, 0x97, 0x88, 0x3c, 0x42, 0x4e, 0xb0, 0x49, + 0x62, 0xf1, 0x3c, 0xc7, 0x63, 0xfb, 0x7b, 0x73, 0x33, 0x57, 0xbb, 0xe0, 0xe0, 0xae, 0xb5, 0xd1, + 0x45, 0x18, 0x91, 0x33, 0xa9, 0x85, 0x31, 0xa5, 0xbe, 0x93, 0x1a, 0x0c, 0x1b, 0x98, 0xef, 0xef, + 0x5a, 0xf7, 0x0b, 0xb4, 0xb2, 0x76, 0x84, 0xa3, 0x77, 0x61, 0x44, 0xef, 0x63, 0xf6, 0x6c, 0xce, + 0x7f, 0xa3, 0x55, 0x88, 0x02, 0xaa, 0xe7, 0x3a, 0x0c, 0x1b, 0xb4, 0xed, 0x1b, 0x30, 0x50, 0xdf, + 0x8d, 0x1b, 0x89, 0x5f, 0x80, 0xbf, 0x3d, 0x69, 0x0c, 0x21, 0xdd, 0x7b, 0xec, 0xbd, 0x28, 0x31, + 0x22, 0x9b, 0xc0, 0xf8, 0xfa, 0x72, 0xad, 0x1e, 0x36, 0xee, 0x92, 0x64, 0x91, 0x6b, 0x6f, 0x58, + 0x70, 0x37, 0xeb, 0x88, 0x5c, 0xab, 0x03, 0x3f, 0xb4, 0xff, 0xd4, 0x82, 0x7e, 0xf6, 0xd6, 0x51, + 0xde, 0x3b, 0x59, 0x45, 0x3a, 0x8d, 0x5e, 0x86, 0x01, 0xb2, 0xb1, 0x41, 0x1a, 0x89, 0xd8, 0xc6, + 0x32, 0x56, 0x60, 0x60, 0x85, 0x95, 0xd2, 0xcd, 0xc9, 0x1a, 0xe3, 0x7f, 0xb1, 0x40, 0x46, 0x9f, + 0x85, 0x4a, 0xe2, 0x6d, 0x93, 0x45, 0xd7, 0x15, 0x06, 0xc5, 0xde, 0xdc, 0x57, 0x14, 0xb3, 0x58, + 0x97, 0x44, 0x70, 0x4a, 0xcf, 0xfe, 0x5a, 0x09, 0x20, 0x8d, 0xd4, 0xc9, 0x1b, 0xe6, 0x52, 0xdb, + 0x73, 0x60, 0x4f, 0x77, 0x78, 0x0e, 0x0c, 0xa5, 0x04, 0x3b, 0x3c, 0x06, 0xa6, 0xa6, 0xaa, 0x5c, + 0x68, 0xaa, 0xfa, 0x7a, 0x99, 0xaa, 0x65, 0x98, 0x4c, 0x23, 0x8d, 0xcc, 0x90, 0x4d, 0x96, 0x1c, + 0x74, 0x3d, 0x0b, 0xc4, 0xed, 0xf8, 0xf6, 0xd7, 0x2c, 0x10, 0x0e, 0x8f, 0x05, 0x56, 0xab, 0x2b, + 0x9f, 0xee, 0x31, 0xb2, 0x98, 0x3d, 0x5b, 0xc4, 0x17, 0x54, 0xe4, 0x2e, 0x53, 0xfb, 0xc7, 0xc8, + 0x58, 0x66, 0x50, 0xb5, 0x7f, 0xd3, 0x82, 0x61, 0x0e, 0xbe, 0xc6, 0x64, 0xea, 0xfc, 0x7e, 0xf5, + 0x94, 0x79, 0x96, 0xbd, 0x6a, 0x43, 0x09, 0xab, 0x0c, 0xa4, 0xfa, 0xab, 0x36, 0x12, 0x80, 0x53, + 0x1c, 0xf4, 0x0c, 0x0c, 0xc6, 0xad, 0x3b, 0x0c, 0x3d, 0xe3, 0xfd, 0x58, 0xe7, 0xc5, 0x58, 0xc2, + 0xed, 0x7f, 0x5e, 0x82, 0x89, 0xac, 0xf3, 0x2b, 0xc2, 0x30, 0xc0, 0x65, 0xec, 0xac, 0x78, 0x76, + 0x98, 0x2d, 0x47, 0x73, 0x9e, 0x05, 0xfe, 0x36, 0x33, 0x33, 0xba, 0x0b, 0x4a, 0x68, 0x03, 0x86, + 0xdd, 0xf0, 0x5e, 0x70, 0xcf, 0x89, 0xdc, 0xc5, 0xda, 0x9a, 0xf8, 0x12, 0x39, 0xee, 0x4a, 0xd5, + 0xb4, 0x82, 0xee, 0x9a, 0xcb, 0x6c, 0x0b, 0x29, 0x08, 0xeb, 0x84, 0xa9, 0x4e, 0xd9, 0x08, 0x83, + 0x0d, 0x6f, 0xf3, 0x9a, 0xd3, 0x2c, 0x76, 0x31, 0xbf, 0x2c, 0xd1, 0xb5, 0x36, 0x46, 0x45, 0x8e, + 0x06, 0x0e, 0xc0, 0x29, 0x49, 0xfb, 0xd7, 0xa7, 0xc1, 0x58, 0x0b, 0x46, 0x7a, 0x58, 0xeb, 0x81, + 0xa7, 0x87, 0x7d, 0x0b, 0x86, 0xc8, 0x76, 0x33, 0xd9, 0xad, 0x7a, 0x51, 0xb1, 0x64, 0xdf, 0x2b, + 0x02, 0xbb, 0x9d, 0xba, 0x84, 0x60, 0x45, 0xb1, 0x4b, 0xb2, 0xdf, 0xf2, 0x43, 0x91, 0xec, 0xb7, + 0xef, 0x2f, 0x25, 0xd9, 0xef, 0x1b, 0x30, 0xb8, 0xe9, 0x25, 0x98, 0x34, 0x43, 0x91, 0xf4, 0x22, + 0x67, 0xf1, 0x5c, 0xe2, 0xc8, 0xed, 0x69, 0x20, 0x05, 0x00, 0x4b, 0x72, 0x68, 0x5d, 0x6d, 0xaa, + 0x81, 0x22, 0x67, 0x79, 0xbb, 0xad, 0xaf, 0xe3, 0xb6, 0x12, 0xc9, 0x7d, 0x07, 0xdf, 0x7f, 0x72, + 0x5f, 0x95, 0x92, 0x77, 0xe8, 0x41, 0xa5, 0xe4, 0x35, 0x52, 0x1b, 0x57, 0x8e, 0x23, 0xb5, 0xf1, + 0xd7, 0x2c, 0x38, 0xd5, 0xec, 0x94, 0x18, 0x5c, 0x24, 0xd7, 0xfd, 0xe4, 0x11, 0x52, 0xa5, 0x1b, + 0x4d, 0xb3, 0x54, 0x02, 0x1d, 0xd1, 0x70, 0xe7, 0x86, 0x65, 0x8e, 0xe4, 0xe1, 0xf7, 0x9f, 0x23, + 0xf9, 0xb8, 0xb3, 0xf0, 0xa6, 0x19, 0x93, 0x47, 0x8f, 0x25, 0x63, 0xf2, 0xd8, 0x03, 0xcc, 0x98, + 0xac, 0xe5, 0x3a, 0x1e, 0x7f, 0xb0, 0xb9, 0x8e, 0xb7, 0xcc, 0x73, 0x89, 0xa7, 0xd6, 0x7d, 0xb9, + 0xf0, 0xb9, 0x64, 0xb4, 0x70, 0xf8, 0xc9, 0xc4, 0xb3, 0x3e, 0x4f, 0xbe, 0xcf, 0xac, 0xcf, 0x46, + 0xee, 0x64, 0x74, 0x1c, 0xb9, 0x93, 0xdf, 0xd1, 0x4f, 0xd0, 0xa9, 0x22, 0x2d, 0xa8, 0x83, 0xb2, + 0xbd, 0x85, 0x4e, 0x67, 0x68, 0x7b, 0x76, 0xe6, 0xe9, 0x93, 0xce, 0xce, 0x7c, 0xea, 0x18, 0xb3, + 0x33, 0x9f, 0x3e, 0xd1, 0xec, 0xcc, 0x8f, 0x3c, 0x24, 0xd9, 0x99, 0x67, 0x4e, 0x2a, 0x3b, 0xf3, + 0xa3, 0x0f, 0x34, 0x3b, 0x33, 0xfd, 0x74, 0x4d, 0x19, 0x42, 0x36, 0x33, 0x5b, 0xe4, 0xd3, 0x75, + 0x8c, 0x38, 0xe3, 0x9f, 0x4e, 0x81, 0x70, 0x4a, 0xd4, 0xfe, 0x2b, 0x70, 0xe6, 0xf0, 0xa5, 0x9b, + 0x7a, 0x6b, 0xd4, 0x52, 0x9b, 0x59, 0xc6, 0x5b, 0x83, 0x89, 0x85, 0x1a, 0x56, 0xe1, 0xf4, 0xb1, + 0xdf, 0xb6, 0xe0, 0x91, 0x2e, 0xd9, 0x15, 0x0b, 0xc7, 0x5f, 0x36, 0x61, 0xbc, 0x69, 0x56, 0x2d, + 0x1c, 0xce, 0x6d, 0x64, 0x73, 0x54, 0x3e, 0xf2, 0x19, 0x00, 0xce, 0x92, 0x5f, 0xfa, 0xd0, 0x0f, + 0x7f, 0x7c, 0xe6, 0x03, 0x3f, 0xfa, 0xf1, 0x99, 0x0f, 0xfc, 0xf1, 0x8f, 0xcf, 0x7c, 0xe0, 0xe7, + 0xf6, 0xcf, 0x58, 0x3f, 0xdc, 0x3f, 0x63, 0xfd, 0x68, 0xff, 0x8c, 0xf5, 0x67, 0xfb, 0x67, 0xac, + 0xaf, 0xfd, 0xe4, 0xcc, 0x07, 0xde, 0x2c, 0xed, 0xbc, 0xf0, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, + 0x5f, 0x32, 0x87, 0xa4, 0xbe, 0xc8, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/generated.proto b/vendor/k8s.io/client-go/pkg/api/v1/generated.proto new file mode 100644 index 0000000000..9c48148aaf --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/generated.proto @@ -0,0 +1,3922 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.api.v1; + +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Represents a Persistent Disk resource in AWS. +// +// An AWS EBS disk must exist before mounting to a container. The disk +// must also be in the same AWS zone as the kubelet. An AWS EBS disk +// can only be mounted as read/write once. AWS EBS volumes support +// ownership management and SELinux relabeling. +message AWSElasticBlockStoreVolumeSource { + // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + optional string volumeID = 1; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 2; + + // The partition in the volume that you want to mount. + // If omitted, the default is to mount by volume name. + // Examples: For volume /dev/sda1, you specify the partition as "1". + // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + // +optional + optional int32 partition = 3; + + // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". + // If omitted, the default is "false". + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // +optional + optional bool readOnly = 4; +} + +// Affinity is a group of affinity scheduling rules. +message Affinity { + // Describes node affinity scheduling rules for the pod. + // +optional + optional NodeAffinity nodeAffinity = 1; + + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + // +optional + optional PodAffinity podAffinity = 2; + + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + // +optional + optional PodAntiAffinity podAntiAffinity = 3; +} + +// AttachedVolume describes a volume attached to a node +message AttachedVolume { + // Name of the attached volume + optional string name = 1; + + // DevicePath represents the device path where the volume should be available + optional string devicePath = 2; +} + +// AvoidPods describes pods that should avoid this node. This is the value for a +// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and +// will eventually become a field of NodeStatus. +message AvoidPods { + // Bounded-sized list of signatures of pods that should avoid this node, sorted + // in timestamp order from oldest to newest. Size of the slice is unspecified. + // +optional + repeated PreferAvoidPodsEntry preferAvoidPods = 1; +} + +// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +message AzureDiskVolumeSource { + // The Name of the data disk in the blob storage + optional string diskName = 1; + + // The URI the data disk in the blob storage + optional string diskURI = 2; + + // Host Caching mode: None, Read Only, Read Write. + // +optional + optional string cachingMode = 3; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 4; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 5; +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +message AzureFileVolumeSource { + // the name of secret that contains Azure Storage Account Name and Key + optional string secretName = 1; + + // Share Name + optional string shareName = 2; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 3; +} + +// Binding ties one object to another. +// For example, a pod is bound to a node by a scheduler. +message Binding { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The target object that you want to bind to the standard object. + optional ObjectReference target = 2; +} + +// Adds and removes POSIX capabilities from running containers. +message Capabilities { + // Added capabilities + // +optional + repeated string add = 1; + + // Removed capabilities + // +optional + repeated string drop = 2; +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +message CephFSVolumeSource { + // Required: Monitors is a collection of Ceph monitors + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + repeated string monitors = 1; + + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + optional string path = 2; + + // Optional: User is the rados user name, default is admin + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + optional string user = 3; + + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + optional string secretFile = 4; + + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + optional LocalObjectReference secretRef = 5; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + optional bool readOnly = 6; +} + +// Represents a cinder volume resource in Openstack. +// A Cinder volume must exist before mounting to a container. +// The volume must also be in the same region as the kubelet. +// Cinder volumes support ownership management and SELinux relabeling. +message CinderVolumeSource { + // volume id used to identify the volume in cinder + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + optional string volumeID = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + optional string fsType = 2; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + optional bool readOnly = 3; +} + +// Information about the condition of a component. +message ComponentCondition { + // Type of condition for a component. + // Valid value: "Healthy" + optional string type = 1; + + // Status of the condition for a component. + // Valid values for "Healthy": "True", "False", or "Unknown". + optional string status = 2; + + // Message about the condition for a component. + // For example, information about a health check. + // +optional + optional string message = 3; + + // Condition error code for a component. + // For example, a health check error code. + // +optional + optional string error = 4; +} + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +message ComponentStatus { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // List of component conditions observed + // +optional + repeated ComponentCondition conditions = 2; +} + +// Status of all the conditions for the component as a list of ComponentStatus objects. +message ComponentStatusList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ComponentStatus objects. + repeated ComponentStatus items = 2; +} + +// ConfigMap holds configuration data for pods to consume. +message ConfigMap { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Data contains the configuration data. + // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. + // +optional + map data = 2; +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +message ConfigMapEnvSource { + // The ConfigMap to select from. + optional LocalObjectReference localObjectReference = 1; + + // Specify whether the ConfigMap must be defined + // +optional + optional bool optional = 2; +} + +// Selects a key from a ConfigMap. +message ConfigMapKeySelector { + // The ConfigMap to select from. + optional LocalObjectReference localObjectReference = 1; + + // The key to select. + optional string key = 2; + + // Specify whether the ConfigMap or it's key must be defined + // +optional + optional bool optional = 3; +} + +// ConfigMapList is a resource containing a list of ConfigMap objects. +message ConfigMapList { + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ConfigMaps. + repeated ConfigMap items = 2; +} + +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +message ConfigMapProjection { + optional LocalObjectReference localObjectReference = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + repeated KeyToPath items = 2; + + // Specify whether the ConfigMap or it's keys must be defined + // +optional + optional bool optional = 4; +} + +// Adapts a ConfigMap into a volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// volume as files using the keys in the Data field as the file names, unless +// the items element is populated with specific mappings of keys to paths. +// ConfigMap volumes support ownership management and SELinux relabeling. +message ConfigMapVolumeSource { + optional LocalObjectReference localObjectReference = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + repeated KeyToPath items = 2; + + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 defaultMode = 3; + + // Specify whether the ConfigMap or it's keys must be defined + // +optional + optional bool optional = 4; +} + +// A single application container that you want to run within a pod. +message Container { + // Name of the container specified as a DNS_LABEL. + // Each container in a pod must have a unique name (DNS_LABEL). + // Cannot be updated. + optional string name = 1; + + // Docker image name. + // More info: http://kubernetes.io/docs/user-guide/images + // +optional + optional string image = 2; + + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands + // +optional + repeated string command = 3; + + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands + // +optional + repeated string args = 4; + + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + optional string workingDir = 5; + + // List of ports to expose from the container. Exposing a port here gives + // the system additional information about the network connections a + // container uses, but is primarily informational. Not specifying a port here + // DOES NOT prevent that port from being exposed. Any port which is + // listening on the default "0.0.0.0" address inside a container will be + // accessible from the network. + // Cannot be updated. + // +optional + repeated ContainerPort ports = 6; + + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + repeated EnvFromSource envFrom = 19; + + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + repeated EnvVar env = 7; + + // Compute Resources required by this container. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources + // +optional + optional ResourceRequirements resources = 8; + + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + repeated VolumeMount volumeMounts = 9; + + // Periodic probe of container liveness. + // Container will be restarted if the probe fails. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // +optional + optional Probe livenessProbe = 10; + + // Periodic probe of container service readiness. + // Container will be removed from service endpoints if the probe fails. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // +optional + optional Probe readinessProbe = 11; + + // Actions that the management system should take in response to container lifecycle events. + // Cannot be updated. + // +optional + optional Lifecycle lifecycle = 12; + + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + optional string terminationMessagePath = 13; + + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + optional string terminationMessagePolicy = 20; + + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/images#updating-images + // +optional + optional string imagePullPolicy = 14; + + // Security options the pod should run with. + // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md + // +optional + optional SecurityContext securityContext = 15; + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + optional bool stdin = 16; + + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + optional bool stdinOnce = 17; + + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + optional bool tty = 18; +} + +// Describe a container image +message ContainerImage { + // Names by which this image is known. + // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + repeated string names = 1; + + // The size of the image in bytes. + // +optional + optional int64 sizeBytes = 2; +} + +// ContainerPort represents a network port in a single container. +message ContainerPort { + // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + // named port in a pod must have a unique name. Name for the port that can be + // referred to by services. + // +optional + optional string name = 1; + + // Number of port to expose on the host. + // If specified, this must be a valid port number, 0 < x < 65536. + // If HostNetwork is specified, this must match ContainerPort. + // Most containers do not need this. + // +optional + optional int32 hostPort = 2; + + // Number of port to expose on the pod's IP address. + // This must be a valid port number, 0 < x < 65536. + optional int32 containerPort = 3; + + // Protocol for port. Must be UDP or TCP. + // Defaults to "TCP". + // +optional + optional string protocol = 4; + + // What host IP to bind the external port to. + // +optional + optional string hostIP = 5; +} + +// ContainerState holds a possible state of container. +// Only one of its members may be specified. +// If none of them is specified, the default one is ContainerStateWaiting. +message ContainerState { + // Details about a waiting container + // +optional + optional ContainerStateWaiting waiting = 1; + + // Details about a running container + // +optional + optional ContainerStateRunning running = 2; + + // Details about a terminated container + // +optional + optional ContainerStateTerminated terminated = 3; +} + +// ContainerStateRunning is a running state of a container. +message ContainerStateRunning { + // Time at which the container was last (re-)started + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 1; +} + +// ContainerStateTerminated is a terminated state of a container. +message ContainerStateTerminated { + // Exit status from the last termination of the container + optional int32 exitCode = 1; + + // Signal from the last termination of the container + // +optional + optional int32 signal = 2; + + // (brief) reason from the last termination of the container + // +optional + optional string reason = 3; + + // Message regarding the last termination of the container + // +optional + optional string message = 4; + + // Time at which previous execution of the container started + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5; + + // Time at which the container last terminated + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 6; + + // Container's ID in the format 'docker://' + // +optional + optional string containerID = 7; +} + +// ContainerStateWaiting is a waiting state of a container. +message ContainerStateWaiting { + // (brief) reason the container is not yet running. + // +optional + optional string reason = 1; + + // Message regarding why the container is not yet running. + // +optional + optional string message = 2; +} + +// ContainerStatus contains details for the current status of this container. +message ContainerStatus { + // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Cannot be updated. + optional string name = 1; + + // Details about the container's current condition. + // +optional + optional ContainerState state = 2; + + // Details about the container's last termination condition. + // +optional + optional ContainerState lastState = 3; + + // Specifies whether the container has passed its readiness probe. + optional bool ready = 4; + + // The number of times the container has been restarted, currently based on + // the number of dead containers that have not yet been removed. + // Note that this is calculated from dead containers. But those containers are subject to + // garbage collection. This value will get capped at 5 by GC. + optional int32 restartCount = 5; + + // The image the container is running. + // More info: http://kubernetes.io/docs/user-guide/images + // TODO(dchen1107): Which image the container is running with? + optional string image = 6; + + // ImageID of the container's image. + optional string imageID = 7; + + // Container's ID in the format 'docker://'. + // More info: http://kubernetes.io/docs/user-guide/container-environment#container-information + // +optional + optional string containerID = 8; +} + +// DaemonEndpoint contains information about a single Daemon endpoint. +message DaemonEndpoint { + // Port number of the given endpoint. + optional int32 Port = 1; +} + +// DeleteOptions may be provided when deleting an API object +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +// +k8s:openapi-gen=false +message DeleteOptions { + // The duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // Defaults to a per object value if not specified. zero means delete immediately. + // +optional + optional int64 gracePeriodSeconds = 1; + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + optional Preconditions preconditions = 2; + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + optional bool orphanDependents = 3; + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // +optional + optional string propagationPolicy = 4; +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +message DownwardAPIProjection { + // Items is a list of DownwardAPIVolume file + // +optional + repeated DownwardAPIVolumeFile items = 1; +} + +// DownwardAPIVolumeFile represents information to create the file containing the pod field +message DownwardAPIVolumeFile { + // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + optional string path = 1; + + // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + // +optional + optional ObjectFieldSelector fieldRef = 2; + + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + optional ResourceFieldSelector resourceFieldRef = 3; + + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 mode = 4; +} + +// DownwardAPIVolumeSource represents a volume containing downward API info. +// Downward API volumes support ownership management and SELinux relabeling. +message DownwardAPIVolumeSource { + // Items is a list of downward API volume file + // +optional + repeated DownwardAPIVolumeFile items = 1; + + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 defaultMode = 2; +} + +// Represents an empty directory for a pod. +// Empty directory volumes support ownership management and SELinux relabeling. +message EmptyDirVolumeSource { + // What type of storage medium should back this directory. + // The default is "" which means to use the node's default medium. + // Must be an empty string (default) or Memory. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + optional string medium = 1; +} + +// EndpointAddress is a tuple that describes single IP address. +message EndpointAddress { + // The IP of this endpoint. + // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), + // or link-local multicast ((224.0.0.0/24). + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. + // TODO: This should allow hostname or IP, See #4447. + optional string ip = 1; + + // The Hostname of this endpoint + // +optional + optional string hostname = 3; + + // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + // +optional + optional string nodeName = 4; + + // Reference to object providing the endpoint. + // +optional + optional ObjectReference targetRef = 2; +} + +// EndpointPort is a tuple that describes a single port. +message EndpointPort { + // The name of this port (corresponds to ServicePort.Name). + // Must be a DNS_LABEL. + // Optional only if one port is defined. + // +optional + optional string name = 1; + + // The port number of the endpoint. + optional int32 port = 2; + + // The IP protocol for this port. + // Must be UDP or TCP. + // Default is TCP. + // +optional + optional string protocol = 3; +} + +// EndpointSubset is a group of addresses with a common set of ports. The +// expanded set of endpoints is the Cartesian product of Addresses x Ports. +// For example, given: +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// The resulting set of endpoints can be viewed as: +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +message EndpointSubset { + // IP addresses which offer the related ports that are marked as ready. These endpoints + // should be considered safe for load balancers and clients to utilize. + // +optional + repeated EndpointAddress addresses = 1; + + // IP addresses which offer the related ports but are not currently marked as ready + // because they have not yet finished starting, have recently failed a readiness check, + // or have recently failed a liveness check. + // +optional + repeated EndpointAddress notReadyAddresses = 2; + + // Port numbers available on the related IP addresses. + // +optional + repeated EndpointPort ports = 3; +} + +// Endpoints is a collection of endpoints that implement the actual service. Example: +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] +message Endpoints { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The set of all endpoints is the union of all subsets. Addresses are placed into + // subsets according to the IPs they share. A single address with multiple ports, + // some of which are ready and some of which are not (because they come from + // different containers) will result in the address being displayed in different + // subsets for the different ports. No address will appear in both Addresses and + // NotReadyAddresses in the same subset. + // Sets of addresses and ports that comprise a service. + repeated EndpointSubset subsets = 2; +} + +// EndpointsList is a list of endpoints. +message EndpointsList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of endpoints. + repeated Endpoints items = 2; +} + +// EnvFromSource represents the source of a set of ConfigMaps +message EnvFromSource { + // An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // +optional + optional string prefix = 1; + + // The ConfigMap to select from + // +optional + optional ConfigMapEnvSource configMapRef = 2; + + // The Secret to select from + // +optional + optional SecretEnvSource secretRef = 3; +} + +// EnvVar represents an environment variable present in a Container. +message EnvVar { + // Name of the environment variable. Must be a C_IDENTIFIER. + optional string name = 1; + + // Variable references $(VAR_NAME) are expanded + // using the previous defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. The $(VAR_NAME) + // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + // references will never be expanded, regardless of whether the variable + // exists or not. + // Defaults to "". + // +optional + optional string value = 2; + + // Source for the environment variable's value. Cannot be used if value is not empty. + // +optional + optional EnvVarSource valueFrom = 3; +} + +// EnvVarSource represents a source for the value of an EnvVar. +message EnvVarSource { + // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, + // spec.nodeName, spec.serviceAccountName, status.podIP. + // +optional + optional ObjectFieldSelector fieldRef = 1; + + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + optional ResourceFieldSelector resourceFieldRef = 2; + + // Selects a key of a ConfigMap. + // +optional + optional ConfigMapKeySelector configMapKeyRef = 3; + + // Selects a key of a secret in the pod's namespace + // +optional + optional SecretKeySelector secretKeyRef = 4; +} + +// Event is a report of an event somewhere in the cluster. +// TODO: Decide whether to store these separately or with the object they apply to. +message Event { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The object that this event is about. + optional ObjectReference involvedObject = 2; + + // This should be a short, machine understandable string that gives the reason + // for the transition into the object's current status. + // TODO: provide exact specification for format. + // +optional + optional string reason = 3; + + // A human-readable description of the status of this operation. + // TODO: decide on maximum length. + // +optional + optional string message = 4; + + // The component reporting this event. Should be a short machine understandable string. + // +optional + optional EventSource source = 5; + + // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time firstTimestamp = 6; + + // The time at which the most recent occurrence of this event was recorded. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTimestamp = 7; + + // The number of times this event has occurred. + // +optional + optional int32 count = 8; + + // Type of this event (Normal, Warning), new types could be added in the future + // +optional + optional string type = 9; +} + +// EventList is a list of events. +message EventList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of events + repeated Event items = 2; +} + +// EventSource contains information for an event. +message EventSource { + // Component from which the event is generated. + // +optional + optional string component = 1; + + // Node name on which the event is generated. + // +optional + optional string host = 2; +} + +// ExecAction describes a "run in container" action. +message ExecAction { + // Command is the command line to execute inside the container, the working directory for the + // command is root ('/') in the container's filesystem. The command is simply exec'd, it is + // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + // a shell, you need to explicitly call out to that shell. + // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + // +optional + repeated string command = 1; +} + +// Represents a Fibre Channel volume. +// Fibre Channel volumes can only be mounted as read/write once. +// Fibre Channel volumes support ownership management and SELinux relabeling. +message FCVolumeSource { + // Required: FC target worldwide names (WWNs) + repeated string targetWWNs = 1; + + // Required: FC target lun number + optional int32 lun = 2; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 3; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 4; +} + +// FlexVolume represents a generic volume resource that is +// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. +message FlexVolumeSource { + // Driver is the name of the driver to use for this volume. + optional string driver = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + optional string fsType = 2; + + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + optional LocalObjectReference secretRef = 3; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 4; + + // Optional: Extra command options if any. + // +optional + map options = 5; +} + +// Represents a Flocker volume mounted by the Flocker agent. +// One and only one of datasetName and datasetUUID should be set. +// Flocker volumes do not support ownership management or SELinux relabeling. +message FlockerVolumeSource { + // Name of the dataset stored as metadata -> name on the dataset for Flocker + // should be considered as deprecated + // +optional + optional string datasetName = 1; + + // UUID of the dataset. This is unique identifier of a Flocker dataset + // +optional + optional string datasetUUID = 2; +} + +// Represents a Persistent Disk resource in Google Compute Engine. +// +// A GCE PD must exist before mounting to a container. The disk must +// also be in the same GCE project and zone as the kubelet. A GCE PD +// can only be mounted as read/write once or read-only many times. GCE +// PDs support ownership management and SELinux relabeling. +message GCEPersistentDiskVolumeSource { + // Unique name of the PD resource in GCE. Used to identify the disk in GCE. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + optional string pdName = 1; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 2; + + // The partition in the volume that you want to mount. + // If omitted, the default is to mount by volume name. + // Examples: For volume /dev/sda1, you specify the partition as "1". + // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // +optional + optional int32 partition = 3; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // +optional + optional bool readOnly = 4; +} + +// Represents a volume that is populated with the contents of a git repository. +// Git repo volumes do not support ownership management. +// Git repo volumes support SELinux relabeling. +message GitRepoVolumeSource { + // Repository URL + optional string repository = 1; + + // Commit hash for the specified revision. + // +optional + optional string revision = 2; + + // Target directory name. + // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + // git repository. Otherwise, if specified, the volume will contain the git repository in + // the subdirectory with the given name. + // +optional + optional string directory = 3; +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +message GlusterfsVolumeSource { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + optional string endpoints = 1; + + // Path is the Glusterfs volume path. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + optional string path = 2; + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + optional bool readOnly = 3; +} + +// HTTPGetAction describes an action based on HTTP Get requests. +message HTTPGetAction { + // Path to access on the HTTP server. + // +optional + optional string path = 1; + + // Name or number of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; + + // Host name to connect to, defaults to the pod IP. You probably want to set + // "Host" in httpHeaders instead. + // +optional + optional string host = 3; + + // Scheme to use for connecting to the host. + // Defaults to HTTP. + // +optional + optional string scheme = 4; + + // Custom headers to set in the request. HTTP allows repeated headers. + // +optional + repeated HTTPHeader httpHeaders = 5; +} + +// HTTPHeader describes a custom header to be used in HTTP probes +message HTTPHeader { + // The header field name + optional string name = 1; + + // The header field value + optional string value = 2; +} + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +message Handler { + // One and only one of the following should be specified. + // Exec specifies the action to take. + // +optional + optional ExecAction exec = 1; + + // HTTPGet specifies the http request to perform. + // +optional + optional HTTPGetAction httpGet = 2; + + // TCPSocket specifies an action involving a TCP port. + // TCP hooks not yet supported + // TODO: implement a realistic TCP lifecycle hook + // +optional + optional TCPSocketAction tcpSocket = 3; +} + +// Represents a host path mapped into a pod. +// Host path volumes do not support ownership management or SELinux relabeling. +message HostPathVolumeSource { + // Path of the directory on the host. + // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + optional string path = 1; +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +message ISCSIVolumeSource { + // iSCSI target portal. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + optional string targetPortal = 1; + + // Target iSCSI Qualified Name. + optional string iqn = 2; + + // iSCSI target lun number. + optional int32 lun = 3; + + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + optional string iscsiInterface = 4; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://kubernetes.io/docs/user-guide/volumes#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 5; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // +optional + optional bool readOnly = 6; + + // iSCSI target portal List. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + repeated string portals = 7; +} + +// Maps a string key to a path within a volume. +message KeyToPath { + // The key to project. + optional string key = 1; + + // The relative path of the file to map the key to. + // May not be an absolute path. + // May not contain the path element '..'. + // May not start with the string '..'. + optional string path = 2; + + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 mode = 3; +} + +// Lifecycle describes actions that the management system should take in response to container lifecycle +// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks +// until the action is complete, unless the container process fails, in which case the handler is aborted. +message Lifecycle { + // PostStart is called immediately after a container is created. If the handler fails, + // the container is terminated and restarted according to its restart policy. + // Other management of the container blocks until the hook completes. + // More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details + // +optional + optional Handler postStart = 1; + + // PreStop is called immediately before a container is terminated. + // The container is terminated after the handler completes. + // The reason for termination is passed to the handler. + // Regardless of the outcome of the handler, the container is eventually terminated. + // Other management of the container blocks until the hook completes. + // More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details + // +optional + optional Handler preStop = 2; +} + +// LimitRange sets resource usage limits for each kind of resource in a Namespace. +message LimitRange { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the limits enforced. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional LimitRangeSpec spec = 2; +} + +// LimitRangeItem defines a min/max usage limit for any resource that matches on kind. +message LimitRangeItem { + // Type of resource that this limit applies to. + // +optional + optional string type = 1; + + // Max usage constraints on this kind by resource name. + // +optional + map max = 2; + + // Min usage constraints on this kind by resource name. + // +optional + map min = 3; + + // Default resource requirement limit value by resource name if resource limit is omitted. + // +optional + map default = 4; + + // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. + // +optional + map defaultRequest = 5; + + // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. + // +optional + map maxLimitRequestRatio = 6; +} + +// LimitRangeList is a list of LimitRange items. +message LimitRangeList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of LimitRange objects. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md + repeated LimitRange items = 2; +} + +// LimitRangeSpec defines a min/max usage limit for resources that match on kind. +message LimitRangeSpec { + // Limits is the list of LimitRangeItem objects that are enforced. + repeated LimitRangeItem limits = 1; +} + +// List holds a list of objects, which may not be known by the server. +message List { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of objects + repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; +} + +// ListOptions is the query options to a standard REST list call. +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +// +k8s:openapi-gen=false +message ListOptions { + // A selector to restrict the list of returned objects by their labels. + // Defaults to everything. + // +optional + optional string labelSelector = 1; + + // A selector to restrict the list of returned objects by their fields. + // Defaults to everything. + // +optional + optional string fieldSelector = 2; + + // Watch for changes to the described resources and return them as a stream of + // add, update, and remove notifications. Specify resourceVersion. + // +optional + optional bool watch = 3; + + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + // +optional + optional string resourceVersion = 4; + + // Timeout for the list/watch call. + // +optional + optional int64 timeoutSeconds = 5; +} + +// LoadBalancerIngress represents the status of a load-balancer ingress point: +// traffic intended for the service should be sent to an ingress point. +message LoadBalancerIngress { + // IP is set for load-balancer ingress points that are IP based + // (typically GCE or OpenStack load-balancers) + // +optional + optional string ip = 1; + + // Hostname is set for load-balancer ingress points that are DNS based + // (typically AWS load-balancers) + // +optional + optional string hostname = 2; +} + +// LoadBalancerStatus represents the status of a load-balancer. +message LoadBalancerStatus { + // Ingress is a list containing ingress points for the load-balancer. + // Traffic intended for the service should be sent to these ingress points. + // +optional + repeated LoadBalancerIngress ingress = 1; +} + +// LocalObjectReference contains enough information to let you locate the +// referenced object inside the same namespace. +message LocalObjectReference { + // Name of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // TODO: Add other useful fields. apiVersion, kind, uid? + // +optional + optional string name = 1; +} + +// Represents an NFS mount that lasts the lifetime of a pod. +// NFS volumes do not support ownership management or SELinux relabeling. +message NFSVolumeSource { + // Server is the hostname or IP address of the NFS server. + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + optional string server = 1; + + // Path that is exported by the NFS server. + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + optional string path = 2; + + // ReadOnly here will force + // the NFS export to be mounted with read-only permissions. + // Defaults to false. + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // +optional + optional bool readOnly = 3; +} + +// Namespace provides a scope for Names. +// Use of multiple namespaces is optional. +message Namespace { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the behavior of the Namespace. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional NamespaceSpec spec = 2; + + // Status describes the current status of a Namespace. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional NamespaceStatus status = 3; +} + +// NamespaceList is a list of Namespaces. +message NamespaceList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Namespace objects in the list. + // More info: http://kubernetes.io/docs/user-guide/namespaces + repeated Namespace items = 2; +} + +// NamespaceSpec describes the attributes on a Namespace. +message NamespaceSpec { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. + // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers + // +optional + repeated string finalizers = 1; +} + +// NamespaceStatus is information about the current status of a Namespace. +message NamespaceStatus { + // Phase is the current lifecycle phase of the namespace. + // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#phases + // +optional + optional string phase = 1; +} + +// Node is a worker node in Kubernetes. +// Each node will have a unique identifier in the cache (i.e. in etcd). +message Node { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the behavior of a node. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional NodeSpec spec = 2; + + // Most recently observed status of the node. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional NodeStatus status = 3; +} + +// NodeAddress contains information for the node's address. +message NodeAddress { + // Node address type, one of Hostname, ExternalIP or InternalIP. + optional string type = 1; + + // The node address. + optional string address = 2; +} + +// Node affinity is a group of node affinity scheduling rules. +message NodeAffinity { + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // may or may not try to eventually evict the pod from its node. + // +optional + optional NodeSelector requiredDuringSchedulingIgnoredDuringExecution = 1; + + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node matches the corresponding matchExpressions; the + // node(s) with the highest sum are the most preferred. + // +optional + repeated PreferredSchedulingTerm preferredDuringSchedulingIgnoredDuringExecution = 2; +} + +// NodeCondition contains condition information for a node. +message NodeCondition { + // Type of node condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time we got an update on a given condition. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastHeartbeatTime = 3; + + // Last time the condition transit from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // (brief) reason for the condition's last transition. + // +optional + optional string reason = 5; + + // Human readable message indicating details about last transition. + // +optional + optional string message = 6; +} + +// NodeDaemonEndpoints lists ports opened by daemons running on the Node. +message NodeDaemonEndpoints { + // Endpoint on which Kubelet is listening. + // +optional + optional DaemonEndpoint kubeletEndpoint = 1; +} + +// NodeList is the whole list of all Nodes which have been registered with master. +message NodeList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of nodes + repeated Node items = 2; +} + +// NodeProxyOptions is the query options to a Node's proxy call. +message NodeProxyOptions { + // Path is the URL path to use for the current proxy request to node. + // +optional + optional string path = 1; +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +message NodeResources { + // Capacity represents the available resources of a node + map capacity = 1; +} + +// A node selector represents the union of the results of one or more label queries +// over a set of nodes; that is, it represents the OR of the selectors represented +// by the node selector terms. +message NodeSelector { + // Required. A list of node selector terms. The terms are ORed. + repeated NodeSelectorTerm nodeSelectorTerms = 1; +} + +// A node selector requirement is a selector that contains values, a key, and an operator +// that relates the key and values. +message NodeSelectorRequirement { + // The label key that the selector applies to. + optional string key = 1; + + // Represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + optional string operator = 2; + + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. If the operator is Gt or Lt, the values + // array must have a single element, which will be interpreted as an integer. + // This array is replaced during a strategic merge patch. + // +optional + repeated string values = 3; +} + +// A null or empty node selector term matches no objects. +message NodeSelectorTerm { + // Required. A list of node selector requirements. The requirements are ANDed. + repeated NodeSelectorRequirement matchExpressions = 1; +} + +// NodeSpec describes the attributes that a node is created with. +message NodeSpec { + // PodCIDR represents the pod IP range assigned to the node. + // +optional + optional string podCIDR = 1; + + // External ID of the node assigned by some machine database (e.g. a cloud provider). + // Deprecated. + // +optional + optional string externalID = 2; + + // ID of the node assigned by the cloud provider in the format: :// + // +optional + optional string providerID = 3; + + // Unschedulable controls node schedulability of new pods. By default, node is schedulable. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration + // +optional + optional bool unschedulable = 4; + + // If specified, the node's taints. + // +optional + repeated Taint taints = 5; +} + +// NodeStatus is information about the current status of a node. +message NodeStatus { + // Capacity represents the total resources of a node. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity for more details. + // +optional + map capacity = 1; + + // Allocatable represents the resources of a node that are available for scheduling. + // Defaults to Capacity. + // +optional + map allocatable = 2; + + // NodePhase is the recently observed lifecycle phase of the node. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase + // The field is never populated, and now is deprecated. + // +optional + optional string phase = 3; + + // Conditions is an array of current observed node conditions. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition + // +optional + repeated NodeCondition conditions = 4; + + // List of addresses reachable to the node. + // Queried from cloud provider, if available. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses + // +optional + repeated NodeAddress addresses = 5; + + // Endpoints of daemons running on the Node. + // +optional + optional NodeDaemonEndpoints daemonEndpoints = 6; + + // Set of ids/uuids to uniquely identify the node. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info + // +optional + optional NodeSystemInfo nodeInfo = 7; + + // List of container images on this node + // +optional + repeated ContainerImage images = 8; + + // List of attachable volumes in use (mounted) by the node. + // +optional + repeated string volumesInUse = 9; + + // List of volumes that are attached to the node. + // +optional + repeated AttachedVolume volumesAttached = 10; +} + +// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. +message NodeSystemInfo { + // MachineID reported by the node. For unique machine identification + // in the cluster this field is prefered. Learn more from man(5) + // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html + optional string machineID = 1; + + // SystemUUID reported by the node. For unique machine identification + // MachineID is prefered. This field is specific to Red Hat hosts + // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html + optional string systemUUID = 2; + + // Boot ID reported by the node. + optional string bootID = 3; + + // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). + optional string kernelVersion = 4; + + // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). + optional string osImage = 5; + + // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). + optional string containerRuntimeVersion = 6; + + // Kubelet Version reported by the node. + optional string kubeletVersion = 7; + + // KubeProxy Version reported by the node. + optional string kubeProxyVersion = 8; + + // The Operating System reported by the node + optional string operatingSystem = 9; + + // The Architecture reported by the node + optional string architecture = 10; +} + +// ObjectFieldSelector selects an APIVersioned field of an object. +message ObjectFieldSelector { + // Version of the schema the FieldPath is written in terms of, defaults to "v1". + // +optional + optional string apiVersion = 1; + + // Path of the field to select in the specified API version. + optional string fieldPath = 2; +} + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. +// +k8s:openapi-gen=false +message ObjectMeta { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + optional string name = 1; + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency + // +optional + optional string generateName = 2; + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + optional string namespace = 3; + + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + optional string selfLink = 4; + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // + // Populated by the system. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + optional string uid = 5; + + // An opaque value that represents the internal version of this object that can + // be used by clients to determine when objects have changed. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and passed unmodified back to the server. + // They may only be valid for a particular resource or set of resources. + // + // Populated by the system. + // Read-only. + // Value must be treated as opaque by clients and . + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + optional string resourceVersion = 6; + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + optional int64 generation = 7; + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // + // Populated by the system. + // Read-only. + // Null for lists. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time creationTimestamp = 8; + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field. Once set, + // this value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard + // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the + // API. In the presence of network partitions, this object may still exist after this + // timestamp, until an administrator or automated process can determine the resource is + // fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deletionTimestamp = 9; + + // Number of seconds allowed for this object to gracefully terminate before + // it will be removed from the system. Only set when deletionTimestamp is also set. + // May only be shortened. + // Read-only. + // +optional + optional int64 deletionGracePeriodSeconds = 10; + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + map labels = 11; + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + map annotations = 12; + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + repeated k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference ownerReferences = 13; + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + repeated string finalizers = 14; + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + optional string clusterName = 15; +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +message ObjectReference { + // Kind of the referent. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional string kind = 1; + + // Namespace of the referent. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + optional string namespace = 2; + + // Name of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + optional string name = 3; + + // UID of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + optional string uid = 4; + + // API version of the referent. + // +optional + optional string apiVersion = 5; + + // Specific resourceVersion to which this reference is made, if any. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + optional string resourceVersion = 6; + + // If referring to a piece of an object instead of an entire object, this string + // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + // For example, if the object reference is to a container within a pod, this would take on a value like: + // "spec.containers{name}" (where "name" refers to the name of the container that triggered + // the event) or if no container name is specified "spec.containers[2]" (container with + // index 2 in this pod). This syntax is chosen only to have some well-defined way of + // referencing a part of an object. + // TODO: this design is not final and this field is subject to change in the future. + // +optional + optional string fieldPath = 7; +} + +// PersistentVolume (PV) is a storage resource provisioned by an administrator. +// It is analogous to a node. +// More info: http://kubernetes.io/docs/user-guide/persistent-volumes +message PersistentVolume { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines a specification of a persistent volume owned by the cluster. + // Provisioned by an administrator. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes + // +optional + optional PersistentVolumeSpec spec = 2; + + // Status represents the current information/status for the persistent volume. + // Populated by the system. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes + // +optional + optional PersistentVolumeStatus status = 3; +} + +// PersistentVolumeClaim is a user's request for and claim to a persistent volume +message PersistentVolumeClaim { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired characteristics of a volume requested by a pod author. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // +optional + optional PersistentVolumeClaimSpec spec = 2; + + // Status represents the current information/status of a persistent volume claim. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // +optional + optional PersistentVolumeClaimStatus status = 3; +} + +// PersistentVolumeClaimList is a list of PersistentVolumeClaim items. +message PersistentVolumeClaimList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // A list of persistent volume claims. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + repeated PersistentVolumeClaim items = 2; +} + +// PersistentVolumeClaimSpec describes the common attributes of storage devices +// and allows a Source for provider-specific attributes +message PersistentVolumeClaimSpec { + // AccessModes contains the desired access modes the volume should have. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 + // +optional + repeated string accessModes = 1; + + // A label query over volumes to consider for binding. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + + // Resources represents the minimum resources the volume should have. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources + // +optional + optional ResourceRequirements resources = 2; + + // VolumeName is the binding reference to the PersistentVolume backing this claim. + // +optional + optional string volumeName = 3; + + // Name of the StorageClass required by the claim. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1 + // +optional + optional string storageClassName = 5; +} + +// PersistentVolumeClaimStatus is the current status of a persistent volume claim. +message PersistentVolumeClaimStatus { + // Phase represents the current phase of PersistentVolumeClaim. + // +optional + optional string phase = 1; + + // AccessModes contains the actual access modes the volume backing the PVC has. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 + // +optional + repeated string accessModes = 2; + + // Represents the actual resources of the underlying volume. + // +optional + map capacity = 3; +} + +// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. +// This volume finds the bound PV and mounts that volume for the pod. A +// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another +// type of volume that is owned by someone else (the system). +message PersistentVolumeClaimVolumeSource { + // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + optional string claimName = 1; + + // Will force the ReadOnly setting in VolumeMounts. + // Default false. + // +optional + optional bool readOnly = 2; +} + +// PersistentVolumeList is a list of PersistentVolume items. +message PersistentVolumeList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of persistent volumes. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes + repeated PersistentVolume items = 2; +} + +// PersistentVolumeSource is similar to VolumeSource but meant for the +// administrator who creates PVs. Exactly one of its members must be set. +message PersistentVolumeSource { + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // +optional + optional GCEPersistentDiskVolumeSource gcePersistentDisk = 1; + + // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // +optional + optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 2; + + // HostPath represents a directory on the host. + // Provisioned by a developer or tester. + // This is useful for single-node development and testing only! + // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. + // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + // +optional + optional HostPathVolumeSource hostPath = 3; + + // Glusterfs represents a Glusterfs volume that is attached to a host and + // exposed to the pod. Provisioned by an admin. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // +optional + optional GlusterfsVolumeSource glusterfs = 4; + + // NFS represents an NFS mount on the host. Provisioned by an admin. + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // +optional + optional NFSVolumeSource nfs = 5; + + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // +optional + optional RBDVolumeSource rbd = 6; + + // ISCSI represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // +optional + optional ISCSIVolumeSource iscsi = 7; + + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + optional CinderVolumeSource cinder = 8; + + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + optional CephFSVolumeSource cephfs = 9; + + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + optional FCVolumeSource fc = 10; + + // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running + // +optional + optional FlockerVolumeSource flocker = 11; + + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an + // alpha feature and may change in future. + // +optional + optional FlexVolumeSource flexVolume = 12; + + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + optional AzureFileVolumeSource azureFile = 13; + + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + optional VsphereVirtualDiskVolumeSource vsphereVolume = 14; + + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + optional QuobyteVolumeSource quobyte = 15; + + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + optional AzureDiskVolumeSource azureDisk = 16; + + // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 17; + + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + optional PortworxVolumeSource portworxVolume = 18; + + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + optional ScaleIOVolumeSource scaleIO = 19; +} + +// PersistentVolumeSpec is the specification of a persistent volume. +message PersistentVolumeSpec { + // A description of the persistent volume's resources and capacity. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity + // +optional + map capacity = 1; + + // The actual volume backing the persistent volume. + optional PersistentVolumeSource persistentVolumeSource = 2; + + // AccessModes contains all ways the volume can be mounted. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes + // +optional + repeated string accessModes = 3; + + // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // Expected to be non-nil when bound. + // claim.VolumeName is the authoritative bind between PV and PVC. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#binding + // +optional + optional ObjectReference claimRef = 4; + + // What happens to a persistent volume when released from its claim. + // Valid options are Retain (default) and Recycle. + // Recycling must be supported by the volume plugin underlying this persistent volume. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy + // +optional + optional string persistentVolumeReclaimPolicy = 5; + + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + optional string storageClassName = 6; +} + +// PersistentVolumeStatus is the current status of a persistent volume. +message PersistentVolumeStatus { + // Phase indicates if a volume is available, bound to a claim, or released by a claim. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#phase + // +optional + optional string phase = 1; + + // A human-readable message indicating details about why the volume is in this state. + // +optional + optional string message = 2; + + // Reason is a brief CamelCase string that describes any failure and is meant + // for machine parsing and tidy display in the CLI. + // +optional + optional string reason = 3; +} + +// Represents a Photon Controller persistent disk resource. +message PhotonPersistentDiskVolumeSource { + // ID that identifies Photon Controller persistent disk + optional string pdID = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + optional string fsType = 2; +} + +// Pod is a collection of containers that can run on a host. This resource is created +// by clients and scheduled onto hosts. +message Pod { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the pod. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional PodSpec spec = 2; + + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional PodStatus status = 3; +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +message PodAffinity { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; + + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key tches that of any node on which +// a pod of the set of pods is running +message PodAffinityTerm { + // A label query over a set of resources, in this case pods. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; + + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // null or empty list means "this pod's namespace" + repeated string namespaces = 2; + + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" + // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); + // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. + // +optional + optional string topologyKey = 3; +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +message PodAntiAffinity { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; + + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; +} + +// PodAttachOptions is the query options to a Pod's remote attach call. +// --- +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +message PodAttachOptions { + // Stdin if true, redirects the standard input stream of the pod for this call. + // Defaults to false. + // +optional + optional bool stdin = 1; + + // Stdout if true indicates that stdout is to be redirected for the attach call. + // Defaults to true. + // +optional + optional bool stdout = 2; + + // Stderr if true indicates that stderr is to be redirected for the attach call. + // Defaults to true. + // +optional + optional bool stderr = 3; + + // TTY if true indicates that a tty will be allocated for the attach call. + // This is passed through the container runtime so the tty + // is allocated on the worker node by the container runtime. + // Defaults to false. + // +optional + optional bool tty = 4; + + // The container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + // +optional + optional string container = 5; +} + +// PodCondition contains details for the current condition of this pod. +message PodCondition { + // Type is the type of the condition. + // Currently only Ready. + // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + optional string type = 1; + + // Status is the status of the condition. + // Can be True, False, Unknown. + // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + optional string status = 2; + + // Last time we probed the condition. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // Unique, one-word, CamelCase reason for the condition's last transition. + // +optional + optional string reason = 5; + + // Human-readable message indicating details about last transition. + // +optional + optional string message = 6; +} + +// PodExecOptions is the query options to a Pod's remote exec call. +// --- +// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +message PodExecOptions { + // Redirect the standard input stream of the pod for this call. + // Defaults to false. + // +optional + optional bool stdin = 1; + + // Redirect the standard output stream of the pod for this call. + // Defaults to true. + // +optional + optional bool stdout = 2; + + // Redirect the standard error stream of the pod for this call. + // Defaults to true. + // +optional + optional bool stderr = 3; + + // TTY if true indicates that a tty will be allocated for the exec call. + // Defaults to false. + // +optional + optional bool tty = 4; + + // Container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + // +optional + optional string container = 5; + + // Command is the remote command to execute. argv array. Not executed within a shell. + repeated string command = 6; +} + +// PodList is a list of Pods. +message PodList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of pods. + // More info: http://kubernetes.io/docs/user-guide/pods + repeated Pod items = 2; +} + +// PodLogOptions is the query options for a Pod's logs REST call. +message PodLogOptions { + // The container for which to stream logs. Defaults to only container if there is one container in the pod. + // +optional + optional string container = 1; + + // Follow the log stream of the pod. Defaults to false. + // +optional + optional bool follow = 2; + + // Return previous terminated container logs. Defaults to false. + // +optional + optional bool previous = 3; + + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + // +optional + optional int64 sinceSeconds = 4; + + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; + + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + // +optional + optional bool timestamps = 6; + + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + // +optional + optional int64 tailLines = 7; + + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + // +optional + optional int64 limitBytes = 8; +} + +// PodPortForwardOptions is the query options to a Pod's port forward call +// when using WebSockets. +// The `port` query parameter must specify the port or +// ports (comma separated) to forward over. +// Port forwarding over SPDY does not use these options. It requires the port +// to be passed in the `port` header as part of request. +message PodPortForwardOptions { + // List of ports to forward + // Required when using WebSockets + // +optional + repeated int32 ports = 1; +} + +// PodProxyOptions is the query options to a Pod's proxy call. +message PodProxyOptions { + // Path is the URL path to use for the current proxy request to pod. + // +optional + optional string path = 1; +} + +// PodSecurityContext holds pod-level security attributes and common container settings. +// Some fields are also present in container.securityContext. Field values of +// container.securityContext take precedence over field values of PodSecurityContext. +message PodSecurityContext { + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + // +optional + optional SELinuxOptions seLinuxOptions = 1; + + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + optional int64 runAsUser = 2; + + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional bool runAsNonRoot = 3; + + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID. If unspecified, no groups will be added to + // any container. + // +optional + repeated int64 supplementalGroups = 4; + + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // +optional + optional int64 fsGroup = 5; +} + +// Describes the class of pods that should avoid this node. +// Exactly one field should be set. +message PodSignature { + // Reference to controller whose pods should avoid this node. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference podController = 1; +} + +// PodSpec is a description of a pod. +message PodSpec { + // List of volumes that can be mounted by containers belonging to the pod. + // More info: http://kubernetes.io/docs/user-guide/volumes + // +optional + repeated Volume volumes = 1; + + // List of initialization containers belonging to the pod. + // Init containers are executed in order prior to containers being started. If any + // init container fails, the pod is considered to have failed and is handled according + // to its restartPolicy. The name for an init container or normal container must be + // unique among all containers. + // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. + // The resourceRequirements of an init container are taken into account during scheduling + // by finding the highest request/limit for each resource type, and then using the max of + // of that value or the sum of the normal containers. Limits are applied to init containers + // in a similar fashion. + // Init containers cannot currently be added or removed. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/containers + repeated Container initContainers = 20; + + // List of containers belonging to the pod. + // Containers cannot currently be added or removed. + // There must be at least one container in a Pod. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/containers + repeated Container containers = 2; + + // Restart policy for all containers within the pod. + // One of Always, OnFailure, Never. + // Default to Always. + // More info: http://kubernetes.io/docs/user-guide/pod-states#restartpolicy + // +optional + optional string restartPolicy = 3; + + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // Defaults to 30 seconds. + // +optional + optional int64 terminationGracePeriodSeconds = 4; + + // Optional duration in seconds the pod may be active on the node relative to + // StartTime before the system will actively try to mark it failed and kill associated containers. + // Value must be a positive integer. + // +optional + optional int64 activeDeadlineSeconds = 5; + + // Set DNS policy for containers within the pod. + // One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. + // Defaults to "ClusterFirst". + // To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + // +optional + optional string dnsPolicy = 6; + + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: http://kubernetes.io/docs/user-guide/node-selection/README + // +optional + map nodeSelector = 7; + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod. + // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md + // +optional + optional string serviceAccountName = 8; + + // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + // Deprecated: Use serviceAccountName instead. + // +k8s:conversion-gen=false + // +optional + optional string serviceAccount = 9; + + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + optional bool automountServiceAccountToken = 21; + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + optional string nodeName = 10; + + // Host networking requested for this pod. Use the host's network namespace. + // If this option is set, the ports that will be used must be specified. + // Default to false. + // +k8s:conversion-gen=false + // +optional + optional bool hostNetwork = 11; + + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + optional bool hostPID = 12; + + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + optional bool hostIPC = 13; + + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + optional PodSecurityContext securityContext = 14; + + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // More info: http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + // +optional + repeated LocalObjectReference imagePullSecrets = 15; + + // Specifies the hostname of the Pod + // If not specified, the pod's hostname will be set to a system-defined value. + // +optional + optional string hostname = 16; + + // If specified, the fully qualified Pod hostname will be "...svc.". + // If not specified, the pod will not have a domainname at all. + // +optional + optional string subdomain = 17; + + // If specified, the pod's scheduling constraints + // +optional + optional Affinity affinity = 18; + + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + optional string schedulerName = 19; + + // If specified, the pod's tolerations. + // +optional + repeated Toleration tolerations = 22; +} + +// PodStatus represents information about the status of a pod. Status may trail the actual +// state of a system. +message PodStatus { + // Current condition of the pod. + // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-phase + // +optional + optional string phase = 1; + + // Current service state of pod. + // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + // +optional + repeated PodCondition conditions = 2; + + // A human readable message indicating details about why the pod is in this condition. + // +optional + optional string message = 3; + + // A brief CamelCase message indicating details about why the pod is in this state. + // e.g. 'OutOfDisk' + // +optional + optional string reason = 4; + + // IP address of the host to which the pod is assigned. Empty if not yet scheduled. + // +optional + optional string hostIP = 5; + + // IP address allocated to the pod. Routable at least within the cluster. + // Empty if not yet allocated. + // +optional + optional string podIP = 6; + + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the pod. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7; + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses + repeated ContainerStatus initContainerStatuses = 10; + + // The list has one entry per container in the manifest. Each entry is currently the output + // of `docker inspect`. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses + // +optional + repeated ContainerStatus containerStatuses = 8; + + // The Quality of Service (QOS) classification assigned to the pod based on resource requirements + // See PodQOSClass type for available QOS classes + // More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md + // +optional + optional string qosClass = 9; +} + +// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded +message PodStatusResult { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional PodStatus status = 2; +} + +// PodTemplate describes a template for creating copies of a predefined pod. +message PodTemplate { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Template defines the pods that will be created from this pod template. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional PodTemplateSpec template = 2; +} + +// PodTemplateList is a list of PodTemplates. +message PodTemplateList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of pod templates + repeated PodTemplate items = 2; +} + +// PodTemplateSpec describes the data a pod should have when created from a template +message PodTemplateSpec { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the pod. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional PodSpec spec = 2; +} + +// PortworxVolumeSource represents a Portworx volume resource. +message PortworxVolumeSource { + // VolumeID uniquely identifies a Portworx volume + optional string volumeID = 1; + + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + optional string fsType = 2; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 3; +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +// +k8s:openapi-gen=false +message Preconditions { + // Specifies the target UID. + // +optional + optional string uid = 1; +} + +// Describes a class of pods that should avoid this node. +message PreferAvoidPodsEntry { + // The class of pods. + optional PodSignature podSignature = 1; + + // Time at which this entry was added to the list. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time evictionTime = 2; + + // (brief) reason why this entry was added to the list. + // +optional + optional string reason = 3; + + // Human readable message indicating why this entry was added to the list. + // +optional + optional string message = 4; +} + +// An empty preferred scheduling term matches all objects with implicit weight 0 +// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +message PreferredSchedulingTerm { + // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + optional int32 weight = 1; + + // A node selector term, associated with the corresponding weight. + optional NodeSelectorTerm preference = 2; +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +message Probe { + // The action taken to determine the health of a container + optional Handler handler = 1; + + // Number of seconds after the container has started before liveness probes are initiated. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // +optional + optional int32 initialDelaySeconds = 2; + + // Number of seconds after which the probe times out. + // Defaults to 1 second. Minimum value is 1. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // +optional + optional int32 timeoutSeconds = 3; + + // How often (in seconds) to perform the probe. + // Default to 10 seconds. Minimum value is 1. + // +optional + optional int32 periodSeconds = 4; + + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Defaults to 1. Must be 1 for liveness. Minimum value is 1. + // +optional + optional int32 successThreshold = 5; + + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // Defaults to 3. Minimum value is 1. + // +optional + optional int32 failureThreshold = 6; +} + +// Represents a projected volume source +message ProjectedVolumeSource { + // list of volume projections + repeated VolumeProjection sources = 1; + + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 defaultMode = 2; +} + +// Represents a Quobyte mount that lasts the lifetime of a pod. +// Quobyte volumes do not support ownership management or SELinux relabeling. +message QuobyteVolumeSource { + // Registry represents a single or multiple Quobyte Registry services + // specified as a string as host:port pair (multiple entries are separated with commas) + // which acts as the central registry for volumes + optional string registry = 1; + + // Volume is a string that references an already created Quobyte volume by name. + optional string volume = 2; + + // ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. + // Defaults to false. + // +optional + optional bool readOnly = 3; + + // User to map volume access to + // Defaults to serivceaccount user + // +optional + optional string user = 4; + + // Group to map volume access to + // Default is no group + // +optional + optional string group = 5; +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +message RBDVolumeSource { + // A collection of Ceph monitors. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + repeated string monitors = 1; + + // The rados image name. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + optional string image = 2; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://kubernetes.io/docs/user-guide/volumes#rbd + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 3; + + // The rados pool name. + // Default is rbd. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it. + // +optional + optional string pool = 4; + + // The rados user name. + // Default is admin. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional string user = 5; + + // Keyring is the path to key ring for RBDUser. + // Default is /etc/ceph/keyring. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional string keyring = 6; + + // SecretRef is name of the authentication secret for RBDUser. If provided + // overrides keyring. + // Default is nil. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional LocalObjectReference secretRef = 7; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional bool readOnly = 8; +} + +// RangeAllocation is not a public type. +message RangeAllocation { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Range is string that identifies the range represented by 'data'. + optional string range = 2; + + // Data is a bit array containing all allocated addresses in the previous segment. + optional bytes data = 3; +} + +// ReplicationController represents the configuration of a replication controller. +message ReplicationController { + // If the Labels of a ReplicationController are empty, they are defaulted to + // be the same as the Pod(s) that the replication controller manages. + // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the specification of the desired behavior of the replication controller. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicationControllerSpec spec = 2; + + // Status is the most recently observed status of the replication controller. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicationControllerStatus status = 3; +} + +// ReplicationControllerCondition describes the state of a replication controller at a certain point. +message ReplicationControllerCondition { + // Type of replication controller condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// ReplicationControllerList is a collection of replication controllers. +message ReplicationControllerList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of replication controllers. + // More info: http://kubernetes.io/docs/user-guide/replication-controller + repeated ReplicationController items = 2; +} + +// ReplicationControllerSpec is the specification of a replication controller. +message ReplicationControllerSpec { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // +optional + optional int32 replicas = 1; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 4; + + // Selector is a label query over pods that should match the Replicas count. + // If Selector is empty, it is defaulted to the labels present on the Pod template. + // Label keys and values that must match in order to be controlled by this replication + // controller, if empty defaulted to labels on Pod template. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + map selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. This takes precedence over a TemplateRef. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + // +optional + optional PodTemplateSpec template = 3; +} + +// ReplicationControllerStatus represents the current status of a replication +// controller. +message ReplicationControllerStatus { + // Replicas is the most recently oberved number of replicas. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + optional int32 replicas = 1; + + // The number of pods that have labels matching the labels of the pod template of the replication controller. + // +optional + optional int32 fullyLabeledReplicas = 2; + + // The number of ready replicas for this replication controller. + // +optional + optional int32 readyReplicas = 4; + + // The number of available replicas (ready for at least minReadySeconds) for this replication controller. + // +optional + optional int32 availableReplicas = 5; + + // ObservedGeneration reflects the generation of the most recently observed replication controller. + // +optional + optional int64 observedGeneration = 3; + + // Represents the latest available observations of a replication controller's current state. + // +optional + repeated ReplicationControllerCondition conditions = 6; +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +message ResourceFieldSelector { + // Container name: required for volumes, optional for env vars + // +optional + optional string containerName = 1; + + // Required: resource to select + optional string resource = 2; + + // Specifies the output format of the exposed resources, defaults to "1" + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity divisor = 3; +} + +// ResourceQuota sets aggregate quota restrictions enforced per namespace +message ResourceQuota { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired quota. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional ResourceQuotaSpec spec = 2; + + // Status defines the actual enforced quota and its current usage. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional ResourceQuotaStatus status = 3; +} + +// ResourceQuotaList is a list of ResourceQuota items. +message ResourceQuotaList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ResourceQuota objects. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + repeated ResourceQuota items = 2; +} + +// ResourceQuotaSpec defines the desired hard limits to enforce for Quota. +message ResourceQuotaSpec { + // Hard is the set of desired hard limits for each named resource. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // +optional + map hard = 1; + + // A collection of filters that must match each object tracked by a quota. + // If not specified, the quota matches all objects. + // +optional + repeated string scopes = 2; +} + +// ResourceQuotaStatus defines the enforced hard limits and observed use. +message ResourceQuotaStatus { + // Hard is the set of enforced hard limits for each named resource. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // +optional + map hard = 1; + + // Used is the current observed total usage of the resource in the namespace. + // +optional + map used = 2; +} + +// ResourceRequirements describes the compute resource requirements. +message ResourceRequirements { + // Limits describes the maximum amount of compute resources allowed. + // More info: http://kubernetes.io/docs/user-guide/compute-resources/ + // +optional + map limits = 1; + + // Requests describes the minimum amount of compute resources required. + // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value. + // More info: http://kubernetes.io/docs/user-guide/compute-resources/ + // +optional + map requests = 2; +} + +// SELinuxOptions are the labels to be applied to the container +message SELinuxOptions { + // User is a SELinux user label that applies to the container. + // +optional + optional string user = 1; + + // Role is a SELinux role label that applies to the container. + // +optional + optional string role = 2; + + // Type is a SELinux type label that applies to the container. + // +optional + optional string type = 3; + + // Level is SELinux level label that applies to the container. + // +optional + optional string level = 4; +} + +// ScaleIOVolumeSource represents a persistent ScaleIO volume +message ScaleIOVolumeSource { + // The host address of the ScaleIO API Gateway. + optional string gateway = 1; + + // The name of the storage system as configured in ScaleIO. + optional string system = 2; + + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + optional LocalObjectReference secretRef = 3; + + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + optional bool sslEnabled = 4; + + // The name of the Protection Domain for the configured storage (defaults to "default"). + // +optional + optional string protectionDomain = 5; + + // The Storage Pool associated with the protection domain (defaults to "default"). + // +optional + optional string storagePool = 6; + + // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). + // +optional + optional string storageMode = 7; + + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + optional string volumeName = 8; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 9; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 10; +} + +// Secret holds secret data of a certain type. The total bytes of the values in +// the Data field must be less than MaxSecretSize bytes. +message Secret { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN + // or leading dot followed by valid DNS_SUBDOMAIN. + // The serialized form of the secret data is a base64 encoded string, + // representing the arbitrary (possibly non-string) data value here. + // Described in https://tools.ietf.org/html/rfc4648#section-4 + // +optional + map data = 2; + + // stringData allows specifying non-binary secret data in string form. + // It is provided as a write-only convenience method. + // All keys and values are merged into the data field on write, overwriting any existing values. + // It is never output when reading from the API. + // +k8s:conversion-gen=false + // +optional + map stringData = 4; + + // Used to facilitate programmatic handling of secret data. + // +optional + optional string type = 3; +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +message SecretEnvSource { + // The Secret to select from. + optional LocalObjectReference localObjectReference = 1; + + // Specify whether the Secret must be defined + // +optional + optional bool optional = 2; +} + +// SecretKeySelector selects a key of a Secret. +message SecretKeySelector { + // The name of the secret in the pod's namespace to select from. + optional LocalObjectReference localObjectReference = 1; + + // The key of the secret to select from. Must be a valid secret key. + optional string key = 2; + + // Specify whether the Secret or it's key must be defined + // +optional + optional bool optional = 3; +} + +// SecretList is a list of Secret. +message SecretList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of secret objects. + // More info: http://kubernetes.io/docs/user-guide/secrets + repeated Secret items = 2; +} + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +message SecretProjection { + optional LocalObjectReference localObjectReference = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + repeated KeyToPath items = 2; + + // Specify whether the Secret or its key must be defined + // +optional + optional bool optional = 4; +} + +// Adapts a Secret into a volume. +// +// The contents of the target Secret's Data field will be presented in a volume +// as files using the keys in the Data field as the file names. +// Secret volumes support ownership management and SELinux relabeling. +message SecretVolumeSource { + // Name of the secret in the pod's namespace to use. + // More info: http://kubernetes.io/docs/user-guide/volumes#secrets + // +optional + optional string secretName = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + repeated KeyToPath items = 2; + + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 defaultMode = 3; + + // Specify whether the Secret or it's keys must be defined + // +optional + optional bool optional = 4; +} + +// SecurityContext holds security configuration that will be applied to a container. +// Some fields are present in both SecurityContext and PodSecurityContext. When both +// are set, the values in SecurityContext take precedence. +message SecurityContext { + // The capabilities to add/drop when running containers. + // Defaults to the default set of capabilities granted by the container runtime. + // +optional + optional Capabilities capabilities = 1; + + // Run container in privileged mode. + // Processes in privileged containers are essentially equivalent to root on the host. + // Defaults to false. + // +optional + optional bool privileged = 2; + + // The SELinux context to be applied to the container. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional SELinuxOptions seLinuxOptions = 3; + + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional int64 runAsUser = 4; + + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional bool runAsNonRoot = 5; + + // Whether this container has a read-only root filesystem. + // Default is false. + // +optional + optional bool readOnlyRootFilesystem = 6; +} + +// SerializedReference is a reference to serialized object. +message SerializedReference { + // The reference to an object in the system. + // +optional + optional ObjectReference reference = 1; +} + +// Service is a named abstraction of software service (for example, mysql) consisting of local port +// (for example 3306) that the proxy listens on, and the selector that determines which pods +// will answer requests sent through the proxy. +message Service { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the behavior of a service. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional ServiceSpec spec = 2; + + // Most recently observed status of the service. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional ServiceStatus status = 3; +} + +// ServiceAccount binds together: +// * a name, understood by users, and perhaps by peripheral systems, for an identity +// * a principal that can be authenticated and authorized +// * a set of secrets +message ServiceAccount { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. + // More info: http://kubernetes.io/docs/user-guide/secrets + // +optional + repeated ObjectReference secrets = 2; + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // More info: http://kubernetes.io/docs/user-guide/secrets#manually-specifying-an-imagepullsecret + // +optional + repeated LocalObjectReference imagePullSecrets = 3; + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + optional bool automountServiceAccountToken = 4; +} + +// ServiceAccountList is a list of ServiceAccount objects +message ServiceAccountList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ServiceAccounts. + // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts + repeated ServiceAccount items = 2; +} + +// ServiceList holds a list of services. +message ServiceList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of services + repeated Service items = 2; +} + +// ServicePort contains information on service's port. +message ServicePort { + // The name of this port within the service. This must be a DNS_LABEL. + // All ports within a ServiceSpec must have unique names. This maps to + // the 'Name' field in EndpointPort objects. + // Optional if only one ServicePort is defined on this service. + // +optional + optional string name = 1; + + // The IP protocol for this port. Supports "TCP" and "UDP". + // Default is TCP. + // +optional + optional string protocol = 2; + + // The port that will be exposed by this service. + optional int32 port = 3; + + // Number or name of the port to access on the pods targeted by the service. + // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + // If this is a string, it will be looked up as a named port in the + // target Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + // More info: http://kubernetes.io/docs/user-guide/services#defining-a-service + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4; + + // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. + // Usually assigned by the system. If specified, it will be allocated to the service + // if unused or else creation of the service will fail. + // Default is to auto-allocate a port if the ServiceType of this Service requires one. + // More info: http://kubernetes.io/docs/user-guide/services#type--nodeport + // +optional + optional int32 nodePort = 5; +} + +// ServiceProxyOptions is the query options to a Service's proxy call. +message ServiceProxyOptions { + // Path is the part of URLs that include service endpoints, suffixes, + // and parameters to use for the current proxy request to service. + // For example, the whole request URL is + // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. + // Path is _search?q=user:kimchy. + // +optional + optional string path = 1; +} + +// ServiceSpec describes the attributes that a user creates on a service. +message ServiceSpec { + // The list of ports that are exposed by this service. + // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + repeated ServicePort ports = 1; + + // Route service traffic to pods with label keys and values matching this + // selector. If empty or not present, the service is assumed to have an + // external process managing its endpoints, which Kubernetes will not + // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + // Ignored if type is ExternalName. + // More info: http://kubernetes.io/docs/user-guide/services#overview + // +optional + map selector = 2; + + // clusterIP is the IP address of the service and is usually assigned + // randomly by the master. If an address is specified manually and is not in + // use by others, it will be allocated to the service; otherwise, creation + // of the service will fail. This field can not be changed through updates. + // Valid values are "None", empty string (""), or a valid IP address. "None" + // can be specified for headless services when proxying is not required. + // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if + // type is ExternalName. + // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + // +optional + optional string clusterIP = 3; + + // type determines how the Service is exposed. Defaults to ClusterIP. Valid + // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + // "ExternalName" maps to the specified externalName. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing to + // endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object. If clusterIP is + // "None", no virtual IP is allocated and the endpoints are published as a + // set of endpoints rather than a stable IP. + // "NodePort" builds on ClusterIP and allocates a port on every node which + // routes to the clusterIP. + // "LoadBalancer" builds on NodePort and creates an + // external load-balancer (if supported in the current cloud) which routes + // to the clusterIP. + // More info: http://kubernetes.io/docs/user-guide/services#overview + // +optional + optional string type = 4; + + // externalIPs is a list of IP addresses for which nodes in the cluster + // will also accept traffic for this service. These IPs are not managed by + // Kubernetes. The user is responsible for ensuring that traffic arrives + // at a node with this IP. A common example is external load-balancers + // that are not part of the Kubernetes system. A previous form of this + // functionality exists as the deprecatedPublicIPs field. When using this + // field, callers should also clear the deprecatedPublicIPs field. + // +optional + repeated string externalIPs = 5; + + // deprecatedPublicIPs is deprecated and replaced by the externalIPs field + // with almost the exact same semantics. This field is retained in the v1 + // API for compatibility until at least 8/20/2016. It will be removed from + // any new API revisions. If both deprecatedPublicIPs *and* externalIPs are + // set, deprecatedPublicIPs is used. + // +k8s:conversion-gen=false + // +optional + repeated string deprecatedPublicIPs = 6; + + // Supports "ClientIP" and "None". Used to maintain session affinity. + // Enable client IP based session affinity. + // Must be ClientIP or None. + // Defaults to None. + // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + // +optional + optional string sessionAffinity = 7; + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + // +optional + optional string loadBalancerIP = 8; + + // If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // More info: http://kubernetes.io/docs/user-guide/services-firewalls + // +optional + repeated string loadBalancerSourceRanges = 9; + + // externalName is the external reference that kubedns or equivalent will + // return as a CNAME record for this service. No proxying will be involved. + // Must be a valid DNS name and requires Type to be ExternalName. + // +optional + optional string externalName = 10; +} + +// ServiceStatus represents the current status of a service. +message ServiceStatus { + // LoadBalancer contains the current status of the load-balancer, + // if one is present. + // +optional + optional LoadBalancerStatus loadBalancer = 1; +} + +message Sysctl { + optional string name = 1; + + optional string value = 2; +} + +// TCPSocketAction describes an action based on opening a socket +message TCPSocketAction { + // Number or name of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1; +} + +// The node this Taint is attached to has the effect "effect" on +// any pod that that does not tolerate the Taint. +message Taint { + // Required. The taint key to be applied to a node. + optional string key = 1; + + // Required. The taint value corresponding to the taint key. + // +optional + optional string value = 2; + + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + optional string effect = 3; + + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; +} + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple using the matching operator . +message Toleration { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // +optional + optional string key = 1; + + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + // +optional + optional string operator = 2; + + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + // +optional + optional string value = 3; + + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + // +optional + optional string effect = 4; + + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // +optional + optional int64 tolerationSeconds = 5; +} + +// Volume represents a named volume in a pod that may be accessed by any container in the pod. +message Volume { + // Volume's name. + // Must be a DNS_LABEL and unique within the pod. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + optional string name = 1; + + // VolumeSource represents the location and type of the mounted volume. + // If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + optional VolumeSource volumeSource = 2; +} + +// VolumeMount describes a mounting of a Volume within a container. +message VolumeMount { + // This must match the Name of a Volume. + optional string name = 1; + + // Mounted read-only if true, read-write otherwise (false or unspecified). + // Defaults to false. + // +optional + optional bool readOnly = 2; + + // Path within the container at which the volume should be mounted. Must + // not contain ':'. + optional string mountPath = 3; + + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + // +optional + optional string subPath = 4; +} + +// Projection that may be projected along with other supported volume types +message VolumeProjection { + // information about the secret data to project + optional SecretProjection secret = 1; + + // information about the downwardAPI data to project + optional DownwardAPIProjection downwardAPI = 2; + + // information about the configMap data to project + optional ConfigMapProjection configMap = 3; +} + +// Represents the source of a volume to mount. +// Only one of its members may be specified. +message VolumeSource { + // HostPath represents a pre-existing file or directory on the host + // machine that is directly exposed to the container. This is generally + // used for system agents or other privileged things that are allowed + // to see the host machine. Most containers will NOT need this. + // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + optional HostPathVolumeSource hostPath = 1; + + // EmptyDir represents a temporary directory that shares a pod's lifetime. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + optional EmptyDirVolumeSource emptyDir = 2; + + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // +optional + optional GCEPersistentDiskVolumeSource gcePersistentDisk = 3; + + // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // +optional + optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4; + + // GitRepo represents a git repository at a particular revision. + // +optional + optional GitRepoVolumeSource gitRepo = 5; + + // Secret represents a secret that should populate this volume. + // More info: http://kubernetes.io/docs/user-guide/volumes#secrets + // +optional + optional SecretVolumeSource secret = 6; + + // NFS represents an NFS mount on the host that shares a pod's lifetime + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // +optional + optional NFSVolumeSource nfs = 7; + + // ISCSI represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md + // +optional + optional ISCSIVolumeSource iscsi = 8; + + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // +optional + optional GlusterfsVolumeSource glusterfs = 9; + + // PersistentVolumeClaimVolumeSource represents a reference to a + // PersistentVolumeClaim in the same namespace. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // +optional + optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; + + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // +optional + optional RBDVolumeSource rbd = 11; + + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an + // alpha feature and may change in future. + // +optional + optional FlexVolumeSource flexVolume = 12; + + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + optional CinderVolumeSource cinder = 13; + + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + optional CephFSVolumeSource cephfs = 14; + + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + optional FlockerVolumeSource flocker = 15; + + // DownwardAPI represents downward API about the pod that should populate this volume + // +optional + optional DownwardAPIVolumeSource downwardAPI = 16; + + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + optional FCVolumeSource fc = 17; + + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + optional AzureFileVolumeSource azureFile = 18; + + // ConfigMap represents a configMap that should populate this volume + // +optional + optional ConfigMapVolumeSource configMap = 19; + + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + optional VsphereVirtualDiskVolumeSource vsphereVolume = 20; + + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + optional QuobyteVolumeSource quobyte = 21; + + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + optional AzureDiskVolumeSource azureDisk = 22; + + // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 23; + + // Items for all in one resources secrets, configmaps, and downward API + optional ProjectedVolumeSource projected = 26; + + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + optional PortworxVolumeSource portworxVolume = 24; + + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + optional ScaleIOVolumeSource scaleIO = 25; +} + +// Represents a vSphere volume resource. +message VsphereVirtualDiskVolumeSource { + // Path that identifies vSphere volume vmdk + optional string volumePath = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 2; +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +message WeightedPodAffinityTerm { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + optional int32 weight = 1; + + // Required. A pod affinity term, associated with the corresponding weight. + optional PodAffinityTerm podAffinityTerm = 2; +} + diff --git a/vendor/k8s.io/client-go/pkg/api/v1/helpers.go b/vendor/k8s.io/client-go/pkg/api/v1/helpers.go new file mode 100644 index 0000000000..01f4ef4706 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/helpers.go @@ -0,0 +1,632 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "fmt" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + + "k8s.io/client-go/pkg/api" +) + +// IsOpaqueIntResourceName returns true if the resource name has the opaque +// integer resource prefix. +func IsOpaqueIntResourceName(name ResourceName) bool { + return strings.HasPrefix(string(name), ResourceOpaqueIntPrefix) +} + +// OpaqueIntResourceName returns a ResourceName with the canonical opaque +// integer prefix prepended. If the argument already has the prefix, it is +// returned unmodified. +func OpaqueIntResourceName(name string) ResourceName { + if IsOpaqueIntResourceName(ResourceName(name)) { + return ResourceName(name) + } + return ResourceName(fmt.Sprintf("%s%s", api.ResourceOpaqueIntPrefix, name)) +} + +// NewDeleteOptions returns a DeleteOptions indicating the resource should +// be deleted within the specified grace period. Use zero to indicate +// immediate deletion. If you would prefer to use the default grace period, +// use &metav1.DeleteOptions{} directly. +func NewDeleteOptions(grace int64) *DeleteOptions { + return &DeleteOptions{GracePeriodSeconds: &grace} +} + +// NewPreconditionDeleteOptions returns a DeleteOptions with a UID precondition set. +func NewPreconditionDeleteOptions(uid string) *DeleteOptions { + u := types.UID(uid) + p := Preconditions{UID: &u} + return &DeleteOptions{Preconditions: &p} +} + +// NewUIDPreconditions returns a Preconditions with UID set. +func NewUIDPreconditions(uid string) *Preconditions { + u := types.UID(uid) + return &Preconditions{UID: &u} +} + +// this function aims to check if the service's ClusterIP is set or not +// the objective is not to perform validation here +func IsServiceIPSet(service *Service) bool { + return service.Spec.ClusterIP != ClusterIPNone && service.Spec.ClusterIP != "" +} + +// this function aims to check if the service's cluster IP is requested or not +func IsServiceIPRequested(service *Service) bool { + // ExternalName services are CNAME aliases to external ones. Ignore the IP. + if service.Spec.Type == ServiceTypeExternalName { + return false + } + return service.Spec.ClusterIP == "" +} + +var standardFinalizers = sets.NewString( + string(FinalizerKubernetes), + metav1.FinalizerOrphanDependents, +) + +func IsStandardFinalizerName(str string) bool { + return standardFinalizers.Has(str) +} + +// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, +// only if they do not already exist +func AddToNodeAddresses(addresses *[]NodeAddress, addAddresses ...NodeAddress) { + for _, add := range addAddresses { + exists := false + for _, existing := range *addresses { + if existing.Address == add.Address && existing.Type == add.Type { + exists = true + break + } + } + if !exists { + *addresses = append(*addresses, add) + } + } +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusEqual(l, r *LoadBalancerStatus) bool { + return ingressSliceEqual(l.Ingress, r.Ingress) +} + +func ingressSliceEqual(lhs, rhs []LoadBalancerIngress) bool { + if len(lhs) != len(rhs) { + return false + } + for i := range lhs { + if !ingressEqual(&lhs[i], &rhs[i]) { + return false + } + } + return true +} + +func ingressEqual(lhs, rhs *LoadBalancerIngress) bool { + if lhs.IP != rhs.IP { + return false + } + if lhs.Hostname != rhs.Hostname { + return false + } + return true +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusDeepCopy(lb *LoadBalancerStatus) *LoadBalancerStatus { + c := &LoadBalancerStatus{} + c.Ingress = make([]LoadBalancerIngress, len(lb.Ingress)) + for i := range lb.Ingress { + c.Ingress[i] = lb.Ingress[i] + } + return c +} + +// GetAccessModesAsString returns a string representation of an array of access modes. +// modes, when present, are always in the same order: RWO,ROX,RWX. +func GetAccessModesAsString(modes []PersistentVolumeAccessMode) string { + modes = removeDuplicateAccessModes(modes) + modesStr := []string{} + if containsAccessMode(modes, ReadWriteOnce) { + modesStr = append(modesStr, "RWO") + } + if containsAccessMode(modes, ReadOnlyMany) { + modesStr = append(modesStr, "ROX") + } + if containsAccessMode(modes, ReadWriteMany) { + modesStr = append(modesStr, "RWX") + } + return strings.Join(modesStr, ",") +} + +// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString +func GetAccessModesFromString(modes string) []PersistentVolumeAccessMode { + strmodes := strings.Split(modes, ",") + accessModes := []PersistentVolumeAccessMode{} + for _, s := range strmodes { + s = strings.Trim(s, " ") + switch { + case s == "RWO": + accessModes = append(accessModes, ReadWriteOnce) + case s == "ROX": + accessModes = append(accessModes, ReadOnlyMany) + case s == "RWX": + accessModes = append(accessModes, ReadWriteMany) + } + } + return accessModes +} + +// removeDuplicateAccessModes returns an array of access modes without any duplicates +func removeDuplicateAccessModes(modes []PersistentVolumeAccessMode) []PersistentVolumeAccessMode { + accessModes := []PersistentVolumeAccessMode{} + for _, m := range modes { + if !containsAccessMode(accessModes, m) { + accessModes = append(accessModes, m) + } + } + return accessModes +} + +func containsAccessMode(modes []PersistentVolumeAccessMode, mode PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements +// labels.Selector. +func NodeSelectorRequirementsAsSelector(nsm []NodeSelectorRequirement) (labels.Selector, error) { + if len(nsm) == 0 { + return labels.Nothing(), nil + } + selector := labels.NewSelector() + for _, expr := range nsm { + var op selection.Operator + switch expr.Operator { + case NodeSelectorOpIn: + op = selection.In + case NodeSelectorOpNotIn: + op = selection.NotIn + case NodeSelectorOpExists: + op = selection.Exists + case NodeSelectorOpDoesNotExist: + op = selection.DoesNotExist + case NodeSelectorOpGt: + op = selection.GreaterThan + case NodeSelectorOpLt: + op = selection.LessThan + default: + return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) + } + r, err := labels.NewRequirement(expr.Key, op, expr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + return selector, nil +} + +const ( + // SeccompPodAnnotationKey represents the key of a seccomp profile applied + // to all containers of a pod. + SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" + + // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied + // to one container of a pod. + SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" + + // CreatedByAnnotation represents the key used to store the spec(json) + // used to create the resource. + CreatedByAnnotation = "kubernetes.io/created-by" + + // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) + // in the Annotations of a Node. + PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" + + // SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by + // the kubelet. Pods with other sysctls will fail to launch. + SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls" + + // UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly + // namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use + // is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet + // will fail to launch. + UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls" + + // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // an object (e.g. secret, config map) before fetching it again from apiserver. + // This annotation can be attached to node. + ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" + + // AffinityAnnotationKey represents the key of affinity data (json serialized) + // in the Annotations of a Pod. + // TODO: remove when alpha support for affinity is removed + AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity" +) + +// Tries to add a toleration to annotations list. Returns true if something was updated +// false otherwise. +func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) { + podTolerations := pod.Spec.Tolerations + + var newTolerations []Toleration + updated := false + for i := range podTolerations { + if toleration.MatchToleration(&podTolerations[i]) { + if api.Semantic.DeepEqual(toleration, podTolerations[i]) { + return false, nil + } + newTolerations = append(newTolerations, *toleration) + updated = true + continue + } + + newTolerations = append(newTolerations, podTolerations[i]) + } + + if !updated { + newTolerations = append(newTolerations, *toleration) + } + + pod.Spec.Tolerations = newTolerations + return true, nil +} + +// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , +// if the two tolerations have same combination, regard as they match. +// TODO: uniqueness check for tolerations in api validations. +func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { + return t.Key == tolerationToMatch.Key && + t.Effect == tolerationToMatch.Effect && + t.Operator == tolerationToMatch.Operator && + t.Value == tolerationToMatch.Value +} + +// ToleratesTaint checks if the toleration tolerates the taint. +// The matching follows the rules below: +// (1) Empty toleration.effect means to match all taint effects, +// otherwise taint effect must equal to toleration.effect. +// (2) If toleration.operator is 'Exists', it means to match all taint values. +// (3) Empty toleration.key means to match all taint keys. +// If toleration.key is empty, toleration.operator must be 'Exists'; +// this combination means to match all taint values and all taint keys. +func (t *Toleration) ToleratesTaint(taint *Taint) bool { + if len(t.Effect) > 0 && t.Effect != taint.Effect { + return false + } + + if len(t.Key) > 0 && t.Key != taint.Key { + return false + } + + // TODO: Use proper defaulting when Toleration becomes a field of PodSpec + switch t.Operator { + // empty operator means Equal + case "", TolerationOpEqual: + return t.Value == taint.Value + case TolerationOpExists: + return true + default: + return false + } +} + +// TolerationsTolerateTaint checks if taint is tolerated by any of the tolerations. +func TolerationsTolerateTaint(tolerations []Toleration, taint *Taint) bool { + for i := range tolerations { + if tolerations[i].ToleratesTaint(taint) { + return true + } + } + return false +} + +type taintsFilterFunc func(*Taint) bool + +// TolerationsTolerateTaintsWithFilter checks if given tolerations tolerates +// all the taints that apply to the filter in given taint list. +func TolerationsTolerateTaintsWithFilter(tolerations []Toleration, taints []Taint, applyFilter taintsFilterFunc) bool { + if len(taints) == 0 { + return true + } + + for i := range taints { + if applyFilter != nil && !applyFilter(&taints[i]) { + continue + } + + if !TolerationsTolerateTaint(tolerations, &taints[i]) { + return false + } + } + + return true +} + +// DeleteTaintsByKey removes all the taints that have the same key to given taintKey +func DeleteTaintsByKey(taints []Taint, taintKey string) ([]Taint, bool) { + newTaints := []Taint{} + deleted := false + for i := range taints { + if taintKey == taints[i].Key { + deleted = true + continue + } + newTaints = append(newTaints, taints[i]) + } + return newTaints, deleted +} + +// DeleteTaint removes all the the taints that have the same key and effect to given taintToDelete. +func DeleteTaint(taints []Taint, taintToDelete *Taint) ([]Taint, bool) { + newTaints := []Taint{} + deleted := false + for i := range taints { + if taintToDelete.MatchTaint(&taints[i]) { + deleted = true + continue + } + newTaints = append(newTaints, taints[i]) + } + return newTaints, deleted +} + +// Returns true and list of Tolerations matching all Taints if all are tolerated, or false otherwise. +func GetMatchingTolerations(taints []Taint, tolerations []Toleration) (bool, []Toleration) { + if len(taints) == 0 { + return true, []Toleration{} + } + if len(tolerations) == 0 && len(taints) > 0 { + return false, []Toleration{} + } + result := []Toleration{} + for i := range taints { + tolerated := false + for j := range tolerations { + if tolerations[j].ToleratesTaint(&taints[i]) { + result = append(result, tolerations[j]) + tolerated = true + break + } + } + if !tolerated { + return false, []Toleration{} + } + } + return true, result +} + +// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, +// if the two taints have same key:effect, regard as they match. +func (t *Taint) MatchTaint(taintToMatch *Taint) bool { + return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect +} + +// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +func (t *Taint) ToString() string { + if len(t.Value) == 0 { + return fmt.Sprintf("%v:%v", t.Key, t.Effect) + } + return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) +} + +func GetAvoidPodsFromNodeAnnotations(annotations map[string]string) (AvoidPods, error) { + var avoidPods AvoidPods + if len(annotations) > 0 && annotations[PreferAvoidPodsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[PreferAvoidPodsAnnotationKey]), &avoidPods) + if err != nil { + return avoidPods, err + } + } + return avoidPods, nil +} + +// SysctlsFromPodAnnotations parses the sysctl annotations into a slice of safe Sysctls +// and a slice of unsafe Sysctls. This is only a convenience wrapper around +// SysctlsFromPodAnnotation. +func SysctlsFromPodAnnotations(a map[string]string) ([]Sysctl, []Sysctl, error) { + safe, err := SysctlsFromPodAnnotation(a[SysctlsPodAnnotationKey]) + if err != nil { + return nil, nil, err + } + unsafe, err := SysctlsFromPodAnnotation(a[UnsafeSysctlsPodAnnotationKey]) + if err != nil { + return nil, nil, err + } + + return safe, unsafe, nil +} + +// SysctlsFromPodAnnotation parses an annotation value into a slice of Sysctls. +func SysctlsFromPodAnnotation(annotation string) ([]Sysctl, error) { + if len(annotation) == 0 { + return nil, nil + } + + kvs := strings.Split(annotation, ",") + sysctls := make([]Sysctl, len(kvs)) + for i, kv := range kvs { + cs := strings.Split(kv, "=") + if len(cs) != 2 || len(cs[0]) == 0 { + return nil, fmt.Errorf("sysctl %q not of the format sysctl_name=value", kv) + } + sysctls[i].Name = cs[0] + sysctls[i].Value = cs[1] + } + return sysctls, nil +} + +// PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls. +func PodAnnotationsFromSysctls(sysctls []Sysctl) string { + if len(sysctls) == 0 { + return "" + } + + kvs := make([]string, len(sysctls)) + for i := range sysctls { + kvs[i] = fmt.Sprintf("%s=%s", sysctls[i].Name, sysctls[i].Value) + } + return strings.Join(kvs, ",") +} + +type Sysctl struct { + Name string `protobuf:"bytes,1,opt,name=name"` + Value string `protobuf:"bytes,2,opt,name=value"` +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +type NodeResources struct { + // Capacity represents the available resources of a node + Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` +} + +// Tries to add a taint to annotations list. Returns a new copy of updated Node and true if something was updated +// false otherwise. +func AddOrUpdateTaint(node *Node, taint *Taint) (*Node, bool, error) { + objCopy, err := api.Scheme.DeepCopy(node) + if err != nil { + return nil, false, err + } + newNode := objCopy.(*Node) + nodeTaints := newNode.Spec.Taints + + var newTaints []Taint + updated := false + for i := range nodeTaints { + if taint.MatchTaint(&nodeTaints[i]) { + if api.Semantic.DeepEqual(taint, nodeTaints[i]) { + return newNode, false, nil + } + newTaints = append(newTaints, *taint) + updated = true + continue + } + + newTaints = append(newTaints, nodeTaints[i]) + } + + if !updated { + newTaints = append(newTaints, *taint) + } + + newNode.Spec.Taints = newTaints + return newNode, true, nil +} + +func TaintExists(taints []Taint, taintToFind *Taint) bool { + for _, taint := range taints { + if taint.MatchTaint(taintToFind) { + return true + } + } + return false +} + +// Tries to remove a taint from annotations list. Returns a new copy of updated Node and true if something was updated +// false otherwise. +func RemoveTaint(node *Node, taint *Taint) (*Node, bool, error) { + objCopy, err := api.Scheme.DeepCopy(node) + if err != nil { + return nil, false, err + } + newNode := objCopy.(*Node) + nodeTaints := newNode.Spec.Taints + if len(nodeTaints) == 0 { + return newNode, false, nil + } + + if !TaintExists(nodeTaints, taint) { + return newNode, false, nil + } + + newTaints, _ := DeleteTaint(nodeTaints, taint) + newNode.Spec.Taints = newTaints + return newNode, true, nil +} + +// GetAffinityFromPodAnnotations gets the json serialized affinity data from Pod.Annotations +// and converts it to the Affinity type in api. +// TODO: remove when alpha support for affinity is removed +func GetAffinityFromPodAnnotations(annotations map[string]string) (*Affinity, error) { + if len(annotations) > 0 && annotations[AffinityAnnotationKey] != "" { + var affinity Affinity + err := json.Unmarshal([]byte(annotations[AffinityAnnotationKey]), &affinity) + if err != nil { + return nil, err + } + return &affinity, nil + } + return nil, nil +} + +// GetPersistentVolumeClass returns StorageClassName. +func GetPersistentVolumeClass(volume *PersistentVolume) string { + // Use beta annotation first + if class, found := volume.Annotations[BetaStorageClassAnnotation]; found { + return class + } + + return volume.Spec.StorageClassName +} + +// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was +// requested, it returns "". +func GetPersistentVolumeClaimClass(claim *PersistentVolumeClaim) string { + // Use beta annotation first + if class, found := claim.Annotations[BetaStorageClassAnnotation]; found { + return class + } + + if claim.Spec.StorageClassName != nil { + return *claim.Spec.StorageClassName + } + + return "" +} + +// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. +func PersistentVolumeClaimHasClass(claim *PersistentVolumeClaim) bool { + // Use beta annotation first + if _, found := claim.Annotations[BetaStorageClassAnnotation]; found { + return true + } + + if claim.Spec.StorageClassName != nil { + return true + } + + return false +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/meta.go b/vendor/k8s.io/client-go/pkg/api/v1/meta.go new file mode 100644 index 0000000000..bb1ae2ff79 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/meta.go @@ -0,0 +1,98 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func (obj *ObjectMeta) GetObjectMeta() metav1.Object { return obj } + +// Namespace implements metav1.Object for any object with an ObjectMeta typed field. Allows +// fast, direct access to metadata fields for API objects. +func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } +func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } +func (meta *ObjectMeta) GetName() string { return meta.Name } +func (meta *ObjectMeta) SetName(name string) { meta.Name = name } +func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName } +func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName } +func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } +func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } +func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } +func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } +func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink } +func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } +func (meta *ObjectMeta) GetCreationTimestamp() metav1.Time { return meta.CreationTimestamp } +func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp metav1.Time) { + meta.CreationTimestamp = creationTimestamp +} +func (meta *ObjectMeta) GetDeletionTimestamp() *metav1.Time { return meta.DeletionTimestamp } +func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *metav1.Time) { + meta.DeletionTimestamp = deletionTimestamp +} +func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels } +func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } +func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } +func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } +func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } +func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } + +func (meta *ObjectMeta) GetOwnerReferences() []metav1.OwnerReference { + ret := make([]metav1.OwnerReference, len(meta.OwnerReferences)) + for i := 0; i < len(meta.OwnerReferences); i++ { + ret[i].Kind = meta.OwnerReferences[i].Kind + ret[i].Name = meta.OwnerReferences[i].Name + ret[i].UID = meta.OwnerReferences[i].UID + ret[i].APIVersion = meta.OwnerReferences[i].APIVersion + if meta.OwnerReferences[i].Controller != nil { + value := *meta.OwnerReferences[i].Controller + ret[i].Controller = &value + } + if meta.OwnerReferences[i].BlockOwnerDeletion != nil { + value := *meta.OwnerReferences[i].BlockOwnerDeletion + ret[i].BlockOwnerDeletion = &value + } + } + return ret +} + +func (meta *ObjectMeta) SetOwnerReferences(references []metav1.OwnerReference) { + newReferences := make([]metav1.OwnerReference, len(references)) + for i := 0; i < len(references); i++ { + newReferences[i].Kind = references[i].Kind + newReferences[i].Name = references[i].Name + newReferences[i].UID = references[i].UID + newReferences[i].APIVersion = references[i].APIVersion + if references[i].Controller != nil { + value := *references[i].Controller + newReferences[i].Controller = &value + } + if references[i].BlockOwnerDeletion != nil { + value := *references[i].BlockOwnerDeletion + newReferences[i].BlockOwnerDeletion = &value + } + } + meta.OwnerReferences = newReferences +} + +func (meta *ObjectMeta) GetClusterName() string { + return meta.ClusterName +} +func (meta *ObjectMeta) SetClusterName(clusterName string) { + meta.ClusterName = clusterName +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/ref.go b/vendor/k8s.io/client-go/pkg/api/v1/ref.go new file mode 100644 index 0000000000..5d33719fef --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/ref.go @@ -0,0 +1,133 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "errors" + "fmt" + "net/url" + "strings" + + "k8s.io/apimachinery/pkg/runtime/schema" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +var ( + // Errors that could be returned by GetReference. + ErrNilObject = errors.New("can't reference a nil object") + ErrNoSelfLink = errors.New("selfLink was empty, can't make reference") +) + +// GetReference returns an ObjectReference which refers to the given +// object, or an error if the object doesn't follow the conventions +// that would allow this. +// TODO: should take a meta.Interface see http://issue.k8s.io/7127 +func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, error) { + if obj == nil { + return nil, ErrNilObject + } + if ref, ok := obj.(*ObjectReference); ok { + // Don't make a reference to a reference. + return ref, nil + } + + gvk := obj.GetObjectKind().GroupVersionKind() + + // if the object referenced is actually persisted, we can just get kind from meta + // if we are building an object reference to something not yet persisted, we should fallback to scheme + kind := gvk.Kind + if len(kind) == 0 { + // TODO: this is wrong + gvks, _, err := scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + kind = gvks[0].Kind + } + + // An object that implements only List has enough metadata to build a reference + var listMeta meta.List + objectMeta, err := meta.Accessor(obj) + if err != nil { + listMeta, err = meta.ListAccessor(obj) + if err != nil { + return nil, err + } + } else { + listMeta = objectMeta + } + + // if the object referenced is actually persisted, we can also get version from meta + version := gvk.GroupVersion().String() + if len(version) == 0 { + selfLink := listMeta.GetSelfLink() + if len(selfLink) == 0 { + return nil, ErrNoSelfLink + } + selfLinkUrl, err := url.Parse(selfLink) + if err != nil { + return nil, err + } + // example paths: ///* + parts := strings.Split(selfLinkUrl.Path, "/") + if len(parts) < 3 { + return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version) + } + version = parts[2] + } + + // only has list metadata + if objectMeta == nil { + return &ObjectReference{ + Kind: kind, + APIVersion: version, + ResourceVersion: listMeta.GetResourceVersion(), + }, nil + } + + return &ObjectReference{ + Kind: kind, + APIVersion: version, + Name: objectMeta.GetName(), + Namespace: objectMeta.GetNamespace(), + UID: objectMeta.GetUID(), + ResourceVersion: objectMeta.GetResourceVersion(), + }, nil +} + +// GetPartialReference is exactly like GetReference, but allows you to set the FieldPath. +func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*ObjectReference, error) { + ref, err := GetReference(scheme, obj) + if err != nil { + return nil, err + } + ref.FieldPath = fieldPath + return ref, nil +} + +// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that +// intend only to get a reference to that object. This simplifies the event recording interface. +func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/client-go/pkg/api/v1/register.go b/vendor/k8s.io/client-go/pkg/api/v1/register.go new file mode 100644 index 0000000000..5c2dfddd11 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/register.go @@ -0,0 +1,96 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs, addFastPathConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Pod{}, + &PodList{}, + &PodStatusResult{}, + &PodTemplate{}, + &PodTemplateList{}, + &ReplicationController{}, + &ReplicationControllerList{}, + &Service{}, + &ServiceProxyOptions{}, + &ServiceList{}, + &Endpoints{}, + &EndpointsList{}, + &Node{}, + &NodeList{}, + &NodeProxyOptions{}, + &Binding{}, + &Event{}, + &EventList{}, + &List{}, + &LimitRange{}, + &LimitRangeList{}, + &ResourceQuota{}, + &ResourceQuotaList{}, + &Namespace{}, + &NamespaceList{}, + &Secret{}, + &SecretList{}, + &ServiceAccount{}, + &ServiceAccountList{}, + &PersistentVolume{}, + &PersistentVolumeList{}, + &PersistentVolumeClaim{}, + &PersistentVolumeClaimList{}, + &PodAttachOptions{}, + &PodLogOptions{}, + &PodExecOptions{}, + &PodPortForwardOptions{}, + &PodProxyOptions{}, + &ComponentStatus{}, + &ComponentStatusList{}, + &SerializedReference{}, + &RangeAllocation{}, + &ConfigMap{}, + &ConfigMapList{}, + ) + + // Add common types + scheme.AddKnownTypes(SchemeGroupVersion, &metav1.Status{}) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/resource_helpers.go b/vendor/k8s.io/client-go/pkg/api/v1/resource_helpers.go new file mode 100644 index 0000000000..ec84232762 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/resource_helpers.go @@ -0,0 +1,257 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Returns string version of ResourceName. +func (self ResourceName) String() string { + return string(self) +} + +// Returns the CPU limit if specified. +func (self *ResourceList) Cpu() *resource.Quantity { + if val, ok := (*self)[ResourceCPU]; ok { + return &val + } + return &resource.Quantity{Format: resource.DecimalSI} +} + +// Returns the Memory limit if specified. +func (self *ResourceList) Memory() *resource.Quantity { + if val, ok := (*self)[ResourceMemory]; ok { + return &val + } + return &resource.Quantity{Format: resource.BinarySI} +} + +func (self *ResourceList) Pods() *resource.Quantity { + if val, ok := (*self)[ResourcePods]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) NvidiaGPU() *resource.Quantity { + if val, ok := (*self)[ResourceNvidiaGPU]; ok { + return &val + } + return &resource.Quantity{} +} + +func GetContainerStatus(statuses []ContainerStatus, name string) (ContainerStatus, bool) { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i], true + } + } + return ContainerStatus{}, false +} + +func GetExistingContainerStatus(statuses []ContainerStatus, name string) ContainerStatus { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i] + } + } + return ContainerStatus{} +} + +// IsPodAvailable returns true if a pod is available; false otherwise. +// Precondition for an available pod is that it must be ready. On top +// of that, there are two cases when a pod can be considered available: +// 1. minReadySeconds == 0, or +// 2. LastTransitionTime (is set) + minReadySeconds < current time +func IsPodAvailable(pod *Pod, minReadySeconds int32, now metav1.Time) bool { + if !IsPodReady(pod) { + return false + } + + c := GetPodReadyCondition(pod.Status) + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { + return true + } + return false +} + +// IsPodReady returns true if a pod is ready; false otherwise. +func IsPodReady(pod *Pod) bool { + return IsPodReadyConditionTrue(pod.Status) +} + +// IsPodReady retruns true if a pod is ready; false otherwise. +func IsPodReadyConditionTrue(status PodStatus) bool { + condition := GetPodReadyCondition(status) + return condition != nil && condition.Status == ConditionTrue +} + +// Extracts the pod ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetPodReadyCondition(status PodStatus) *PodCondition { + _, condition := GetPodCondition(&status, PodReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetPodCondition(status *PodStatus, conditionType PodConditionType) (int, *PodCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// GetNodeCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetNodeCondition(status *NodeStatus, conditionType NodeConditionType) (int, *NodeCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the +// status has changed. +// Returns true if pod condition has changed or has been added. +func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool { + condition.LastTransitionTime = metav1.Now() + // Try to find this pod condition. + conditionIndex, oldCondition := GetPodCondition(status, condition.Type) + + if oldCondition == nil { + // We are adding new pod condition. + status.Conditions = append(status.Conditions, *condition) + return true + } else { + // We are updating an existing condition, so we need to check if it has changed. + if condition.Status == oldCondition.Status { + condition.LastTransitionTime = oldCondition.LastTransitionTime + } + + isEqual := condition.Status == oldCondition.Status && + condition.Reason == oldCondition.Reason && + condition.Message == oldCondition.Message && + condition.LastProbeTime.Equal(oldCondition.LastProbeTime) && + condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime) + + status.Conditions[conditionIndex] = *condition + // Return true if one of the fields have changed. + return !isEqual + } +} + +// IsNodeReady returns true if a node is ready; false otherwise. +func IsNodeReady(node *Node) bool { + for _, c := range node.Status.Conditions { + if c.Type == NodeReady { + return c.Status == ConditionTrue + } + } + return false +} + +// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all +// containers of the pod. +func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, limits map[ResourceName]resource.Quantity, err error) { + reqs, limits = map[ResourceName]resource.Quantity{}, map[ResourceName]resource.Quantity{} + for _, container := range pod.Spec.Containers { + for name, quantity := range container.Resources.Requests { + if value, ok := reqs[name]; !ok { + reqs[name] = *quantity.Copy() + } else { + value.Add(quantity) + reqs[name] = value + } + } + for name, quantity := range container.Resources.Limits { + if value, ok := limits[name]; !ok { + limits[name] = *quantity.Copy() + } else { + value.Add(quantity) + limits[name] = value + } + } + } + // init containers define the minimum of any resource + for _, container := range pod.Spec.InitContainers { + for name, quantity := range container.Resources.Requests { + value, ok := reqs[name] + if !ok { + reqs[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + reqs[name] = *quantity.Copy() + } + } + for name, quantity := range container.Resources.Limits { + value, ok := limits[name] + if !ok { + limits[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + limits[name] = *quantity.Copy() + } + } + } + return +} + +// finds and returns the request for a specific resource. +func GetResourceRequest(pod *Pod, resource ResourceName) int64 { + if resource == ResourcePods { + return 1 + } + totalResources := int64(0) + for _, container := range pod.Spec.Containers { + if rQuantity, ok := container.Resources.Requests[resource]; ok { + if resource == ResourceCPU { + totalResources += rQuantity.MilliValue() + } else { + totalResources += rQuantity.Value() + } + } + } + // take max_resource(sum_pod, any_init_container) + for _, container := range pod.Spec.InitContainers { + if rQuantity, ok := container.Resources.Requests[resource]; ok { + if resource == ResourceCPU && rQuantity.MilliValue() > totalResources { + totalResources = rQuantity.MilliValue() + } else if rQuantity.Value() > totalResources { + totalResources = rQuantity.Value() + } + } + } + return totalResources +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/types.generated.go b/vendor/k8s.io/client-go/pkg/api/v1/types.generated.go new file mode 100644 index 0000000000..c6fd805aa5 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/types.generated.go @@ -0,0 +1,73800 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg3_resource "k8s.io/apimachinery/pkg/api/resource" + pkg2_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg5_runtime "k8s.io/apimachinery/pkg/runtime" + pkg1_types "k8s.io/apimachinery/pkg/types" + pkg4_intstr "k8s.io/apimachinery/pkg/util/intstr" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg3_resource.Quantity + var v1 pkg2_v1.Time + var v2 pkg5_runtime.RawExtension + var v3 pkg1_types.UID + var v4 pkg4_intstr.IntOrString + var v5 time.Time + _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 + } +} + +func (x *ObjectMeta) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [15]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = x.GenerateName != "" + yyq2[2] = x.Namespace != "" + yyq2[3] = x.SelfLink != "" + yyq2[4] = x.UID != "" + yyq2[5] = x.ResourceVersion != "" + yyq2[6] = x.Generation != 0 + yyq2[7] = true + yyq2[8] = x.DeletionTimestamp != nil + yyq2[9] = x.DeletionGracePeriodSeconds != nil + yyq2[10] = len(x.Labels) != 0 + yyq2[11] = len(x.Annotations) != 0 + yyq2[12] = len(x.OwnerReferences) != 0 + yyq2[13] = len(x.Finalizers) != 0 + yyq2[14] = x.ClusterName != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(15) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("generateName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selfLink")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(x.UID) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("uid")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.EncExt(x.UID) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeInt(int64(x.Generation)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("generation")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeInt(int64(x.Generation)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yy25 := &x.CreationTimestamp + yym26 := z.EncBinary() + _ = yym26 + if false { + } else if z.HasExtensions() && z.EncExt(yy25) { + } else if yym26 { + z.EncBinaryMarshal(yy25) + } else if !yym26 && z.IsJSONHandle() { + z.EncJSONMarshal(yy25) + } else { + z.EncFallback(yy25) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("creationTimestamp")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy27 := &x.CreationTimestamp + yym28 := z.EncBinary() + _ = yym28 + if false { + } else if z.HasExtensions() && z.EncExt(yy27) { + } else if yym28 { + z.EncBinaryMarshal(yy27) + } else if !yym28 && z.IsJSONHandle() { + z.EncJSONMarshal(yy27) + } else { + z.EncFallback(yy27) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.DeletionTimestamp == nil { + r.EncodeNil() + } else { + yym30 := z.EncBinary() + _ = yym30 + if false { + } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { + } else if yym30 { + z.EncBinaryMarshal(x.DeletionTimestamp) + } else if !yym30 && z.IsJSONHandle() { + z.EncJSONMarshal(x.DeletionTimestamp) + } else { + z.EncFallback(x.DeletionTimestamp) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("deletionTimestamp")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DeletionTimestamp == nil { + r.EncodeNil() + } else { + yym31 := z.EncBinary() + _ = yym31 + if false { + } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { + } else if yym31 { + z.EncBinaryMarshal(x.DeletionTimestamp) + } else if !yym31 && z.IsJSONHandle() { + z.EncJSONMarshal(x.DeletionTimestamp) + } else { + z.EncFallback(x.DeletionTimestamp) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.DeletionGracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy33 := *x.DeletionGracePeriodSeconds + yym34 := z.EncBinary() + _ = yym34 + if false { + } else { + r.EncodeInt(int64(yy33)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("deletionGracePeriodSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DeletionGracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy35 := *x.DeletionGracePeriodSeconds + yym36 := z.EncBinary() + _ = yym36 + if false { + } else { + r.EncodeInt(int64(yy35)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.Labels == nil { + r.EncodeNil() + } else { + yym38 := z.EncBinary() + _ = yym38 + if false { + } else { + z.F.EncMapStringStringV(x.Labels, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("labels")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Labels == nil { + r.EncodeNil() + } else { + yym39 := z.EncBinary() + _ = yym39 + if false { + } else { + z.F.EncMapStringStringV(x.Labels, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + if x.Annotations == nil { + r.EncodeNil() + } else { + yym41 := z.EncBinary() + _ = yym41 + if false { + } else { + z.F.EncMapStringStringV(x.Annotations, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("annotations")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Annotations == nil { + r.EncodeNil() + } else { + yym42 := z.EncBinary() + _ = yym42 + if false { + } else { + z.F.EncMapStringStringV(x.Annotations, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.OwnerReferences == nil { + r.EncodeNil() + } else { + yym44 := z.EncBinary() + _ = yym44 + if false { + } else { + h.encSlicev1_OwnerReference(([]pkg2_v1.OwnerReference)(x.OwnerReferences), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ownerReferences")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.OwnerReferences == nil { + r.EncodeNil() + } else { + yym45 := z.EncBinary() + _ = yym45 + if false { + } else { + h.encSlicev1_OwnerReference(([]pkg2_v1.OwnerReference)(x.OwnerReferences), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + if x.Finalizers == nil { + r.EncodeNil() + } else { + yym47 := z.EncBinary() + _ = yym47 + if false { + } else { + z.F.EncSliceStringV(x.Finalizers, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("finalizers")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Finalizers == nil { + r.EncodeNil() + } else { + yym48 := z.EncBinary() + _ = yym48 + if false { + } else { + z.F.EncSliceStringV(x.Finalizers, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + yym50 := z.EncBinary() + _ = yym50 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("clusterName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym51 := z.EncBinary() + _ = yym51 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectMeta) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectMeta) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "generateName": + if r.TryDecodeAsNil() { + x.GenerateName = "" + } else { + yyv6 := &x.GenerateName + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv8 := &x.Namespace + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "selfLink": + if r.TryDecodeAsNil() { + x.SelfLink = "" + } else { + yyv10 := &x.SelfLink + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "uid": + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv12 := &x.UID + yym13 := z.DecBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.DecExt(yyv12) { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "resourceVersion": + if r.TryDecodeAsNil() { + x.ResourceVersion = "" + } else { + yyv14 := &x.ResourceVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "generation": + if r.TryDecodeAsNil() { + x.Generation = 0 + } else { + yyv16 := &x.Generation + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*int64)(yyv16)) = int64(r.DecodeInt(64)) + } + } + case "creationTimestamp": + if r.TryDecodeAsNil() { + x.CreationTimestamp = pkg2_v1.Time{} + } else { + yyv18 := &x.CreationTimestamp + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else if yym19 { + z.DecBinaryUnmarshal(yyv18) + } else if !yym19 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv18) + } else { + z.DecFallback(yyv18, false) + } + } + case "deletionTimestamp": + if r.TryDecodeAsNil() { + if x.DeletionTimestamp != nil { + x.DeletionTimestamp = nil + } + } else { + if x.DeletionTimestamp == nil { + x.DeletionTimestamp = new(pkg2_v1.Time) + } + yym21 := z.DecBinary() + _ = yym21 + if false { + } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { + } else if yym21 { + z.DecBinaryUnmarshal(x.DeletionTimestamp) + } else if !yym21 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.DeletionTimestamp) + } else { + z.DecFallback(x.DeletionTimestamp, false) + } + } + case "deletionGracePeriodSeconds": + if r.TryDecodeAsNil() { + if x.DeletionGracePeriodSeconds != nil { + x.DeletionGracePeriodSeconds = nil + } + } else { + if x.DeletionGracePeriodSeconds == nil { + x.DeletionGracePeriodSeconds = new(int64) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + case "labels": + if r.TryDecodeAsNil() { + x.Labels = nil + } else { + yyv24 := &x.Labels + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + z.F.DecMapStringStringX(yyv24, false, d) + } + } + case "annotations": + if r.TryDecodeAsNil() { + x.Annotations = nil + } else { + yyv26 := &x.Annotations + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + z.F.DecMapStringStringX(yyv26, false, d) + } + } + case "ownerReferences": + if r.TryDecodeAsNil() { + x.OwnerReferences = nil + } else { + yyv28 := &x.OwnerReferences + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + h.decSlicev1_OwnerReference((*[]pkg2_v1.OwnerReference)(yyv28), d) + } + } + case "finalizers": + if r.TryDecodeAsNil() { + x.Finalizers = nil + } else { + yyv30 := &x.Finalizers + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + z.F.DecSliceStringX(yyv30, false, d) + } + } + case "clusterName": + if r.TryDecodeAsNil() { + x.ClusterName = "" + } else { + yyv32 := &x.ClusterName + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + *((*string)(yyv32)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj34 int + var yyb34 bool + var yyhl34 bool = l >= 0 + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv35 := &x.Name + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.GenerateName = "" + } else { + yyv37 := &x.GenerateName + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv39 := &x.Namespace + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SelfLink = "" + } else { + yyv41 := &x.SelfLink + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*string)(yyv41)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv43 := &x.UID + yym44 := z.DecBinary() + _ = yym44 + if false { + } else if z.HasExtensions() && z.DecExt(yyv43) { + } else { + *((*string)(yyv43)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ResourceVersion = "" + } else { + yyv45 := &x.ResourceVersion + yym46 := z.DecBinary() + _ = yym46 + if false { + } else { + *((*string)(yyv45)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Generation = 0 + } else { + yyv47 := &x.Generation + yym48 := z.DecBinary() + _ = yym48 + if false { + } else { + *((*int64)(yyv47)) = int64(r.DecodeInt(64)) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CreationTimestamp = pkg2_v1.Time{} + } else { + yyv49 := &x.CreationTimestamp + yym50 := z.DecBinary() + _ = yym50 + if false { + } else if z.HasExtensions() && z.DecExt(yyv49) { + } else if yym50 { + z.DecBinaryUnmarshal(yyv49) + } else if !yym50 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv49) + } else { + z.DecFallback(yyv49, false) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DeletionTimestamp != nil { + x.DeletionTimestamp = nil + } + } else { + if x.DeletionTimestamp == nil { + x.DeletionTimestamp = new(pkg2_v1.Time) + } + yym52 := z.DecBinary() + _ = yym52 + if false { + } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { + } else if yym52 { + z.DecBinaryUnmarshal(x.DeletionTimestamp) + } else if !yym52 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.DeletionTimestamp) + } else { + z.DecFallback(x.DeletionTimestamp, false) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DeletionGracePeriodSeconds != nil { + x.DeletionGracePeriodSeconds = nil + } + } else { + if x.DeletionGracePeriodSeconds == nil { + x.DeletionGracePeriodSeconds = new(int64) + } + yym54 := z.DecBinary() + _ = yym54 + if false { + } else { + *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Labels = nil + } else { + yyv55 := &x.Labels + yym56 := z.DecBinary() + _ = yym56 + if false { + } else { + z.F.DecMapStringStringX(yyv55, false, d) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Annotations = nil + } else { + yyv57 := &x.Annotations + yym58 := z.DecBinary() + _ = yym58 + if false { + } else { + z.F.DecMapStringStringX(yyv57, false, d) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.OwnerReferences = nil + } else { + yyv59 := &x.OwnerReferences + yym60 := z.DecBinary() + _ = yym60 + if false { + } else { + h.decSlicev1_OwnerReference((*[]pkg2_v1.OwnerReference)(yyv59), d) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Finalizers = nil + } else { + yyv61 := &x.Finalizers + yym62 := z.DecBinary() + _ = yym62 + if false { + } else { + z.F.DecSliceStringX(yyv61, false, d) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ClusterName = "" + } else { + yyv63 := &x.ClusterName + yym64 := z.DecBinary() + _ = yym64 + if false { + } else { + *((*string)(yyv63)) = r.DecodeString() + } + } + for { + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj34-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [27]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.VolumeSource.HostPath != nil && x.HostPath != nil + yyq2[2] = x.VolumeSource.EmptyDir != nil && x.EmptyDir != nil + yyq2[3] = x.VolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil + yyq2[4] = x.VolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil + yyq2[5] = x.VolumeSource.GitRepo != nil && x.GitRepo != nil + yyq2[6] = x.VolumeSource.Secret != nil && x.Secret != nil + yyq2[7] = x.VolumeSource.NFS != nil && x.NFS != nil + yyq2[8] = x.VolumeSource.ISCSI != nil && x.ISCSI != nil + yyq2[9] = x.VolumeSource.Glusterfs != nil && x.Glusterfs != nil + yyq2[10] = x.VolumeSource.PersistentVolumeClaim != nil && x.PersistentVolumeClaim != nil + yyq2[11] = x.VolumeSource.RBD != nil && x.RBD != nil + yyq2[12] = x.VolumeSource.FlexVolume != nil && x.FlexVolume != nil + yyq2[13] = x.VolumeSource.Cinder != nil && x.Cinder != nil + yyq2[14] = x.VolumeSource.CephFS != nil && x.CephFS != nil + yyq2[15] = x.VolumeSource.Flocker != nil && x.Flocker != nil + yyq2[16] = x.VolumeSource.DownwardAPI != nil && x.DownwardAPI != nil + yyq2[17] = x.VolumeSource.FC != nil && x.FC != nil + yyq2[18] = x.VolumeSource.AzureFile != nil && x.AzureFile != nil + yyq2[19] = x.VolumeSource.ConfigMap != nil && x.ConfigMap != nil + yyq2[20] = x.VolumeSource.VsphereVolume != nil && x.VsphereVolume != nil + yyq2[21] = x.VolumeSource.Quobyte != nil && x.Quobyte != nil + yyq2[22] = x.VolumeSource.AzureDisk != nil && x.AzureDisk != nil + yyq2[23] = x.VolumeSource.PhotonPersistentDisk != nil && x.PhotonPersistentDisk != nil + yyq2[24] = x.VolumeSource.Projected != nil && x.Projected != nil + yyq2[25] = x.VolumeSource.PortworxVolume != nil && x.PortworxVolume != nil + yyq2[26] = x.VolumeSource.ScaleIO != nil && x.ScaleIO != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(27) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + var yyn6 bool + if x.VolumeSource.HostPath == nil { + yyn6 = true + goto LABEL6 + } + LABEL6: + if yyr2 || yy2arr2 { + if yyn6 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn6 { + r.EncodeNil() + } else { + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } + } + } + var yyn9 bool + if x.VolumeSource.EmptyDir == nil { + yyn9 = true + goto LABEL9 + } + LABEL9: + if yyr2 || yy2arr2 { + if yyn9 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.EmptyDir == nil { + r.EncodeNil() + } else { + x.EmptyDir.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("emptyDir")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn9 { + r.EncodeNil() + } else { + if x.EmptyDir == nil { + r.EncodeNil() + } else { + x.EmptyDir.CodecEncodeSelf(e) + } + } + } + } + var yyn12 bool + if x.VolumeSource.GCEPersistentDisk == nil { + yyn12 = true + goto LABEL12 + } + LABEL12: + if yyr2 || yy2arr2 { + if yyn12 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn12 { + r.EncodeNil() + } else { + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } + } + } + var yyn15 bool + if x.VolumeSource.AWSElasticBlockStore == nil { + yyn15 = true + goto LABEL15 + } + LABEL15: + if yyr2 || yy2arr2 { + if yyn15 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn15 { + r.EncodeNil() + } else { + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } + } + } + var yyn18 bool + if x.VolumeSource.GitRepo == nil { + yyn18 = true + goto LABEL18 + } + LABEL18: + if yyr2 || yy2arr2 { + if yyn18 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.GitRepo == nil { + r.EncodeNil() + } else { + x.GitRepo.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gitRepo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn18 { + r.EncodeNil() + } else { + if x.GitRepo == nil { + r.EncodeNil() + } else { + x.GitRepo.CodecEncodeSelf(e) + } + } + } + } + var yyn21 bool + if x.VolumeSource.Secret == nil { + yyn21 = true + goto LABEL21 + } + LABEL21: + if yyr2 || yy2arr2 { + if yyn21 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secret")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn21 { + r.EncodeNil() + } else { + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } + } + } + var yyn24 bool + if x.VolumeSource.NFS == nil { + yyn24 = true + goto LABEL24 + } + LABEL24: + if yyr2 || yy2arr2 { + if yyn24 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn24 { + r.EncodeNil() + } else { + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } + } + } + var yyn27 bool + if x.VolumeSource.ISCSI == nil { + yyn27 = true + goto LABEL27 + } + LABEL27: + if yyr2 || yy2arr2 { + if yyn27 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("iscsi")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn27 { + r.EncodeNil() + } else { + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } + } + } + var yyn30 bool + if x.VolumeSource.Glusterfs == nil { + yyn30 = true + goto LABEL30 + } + LABEL30: + if yyr2 || yy2arr2 { + if yyn30 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn30 { + r.EncodeNil() + } else { + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } + } + } + var yyn33 bool + if x.VolumeSource.PersistentVolumeClaim == nil { + yyn33 = true + goto LABEL33 + } + LABEL33: + if yyr2 || yy2arr2 { + if yyn33 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.PersistentVolumeClaim == nil { + r.EncodeNil() + } else { + x.PersistentVolumeClaim.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn33 { + r.EncodeNil() + } else { + if x.PersistentVolumeClaim == nil { + r.EncodeNil() + } else { + x.PersistentVolumeClaim.CodecEncodeSelf(e) + } + } + } + } + var yyn36 bool + if x.VolumeSource.RBD == nil { + yyn36 = true + goto LABEL36 + } + LABEL36: + if yyr2 || yy2arr2 { + if yyn36 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rbd")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn36 { + r.EncodeNil() + } else { + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } + } + } + var yyn39 bool + if x.VolumeSource.FlexVolume == nil { + yyn39 = true + goto LABEL39 + } + LABEL39: + if yyr2 || yy2arr2 { + if yyn39 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn39 { + r.EncodeNil() + } else { + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn42 bool + if x.VolumeSource.Cinder == nil { + yyn42 = true + goto LABEL42 + } + LABEL42: + if yyr2 || yy2arr2 { + if yyn42 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cinder")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn42 { + r.EncodeNil() + } else { + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } + } + } + var yyn45 bool + if x.VolumeSource.CephFS == nil { + yyn45 = true + goto LABEL45 + } + LABEL45: + if yyr2 || yy2arr2 { + if yyn45 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cephfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn45 { + r.EncodeNil() + } else { + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } + } + } + var yyn48 bool + if x.VolumeSource.Flocker == nil { + yyn48 = true + goto LABEL48 + } + LABEL48: + if yyr2 || yy2arr2 { + if yyn48 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flocker")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn48 { + r.EncodeNil() + } else { + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } + } + } + var yyn51 bool + if x.VolumeSource.DownwardAPI == nil { + yyn51 = true + goto LABEL51 + } + LABEL51: + if yyr2 || yy2arr2 { + if yyn51 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[16] { + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[16] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn51 { + r.EncodeNil() + } else { + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } + } + } + var yyn54 bool + if x.VolumeSource.FC == nil { + yyn54 = true + goto LABEL54 + } + LABEL54: + if yyr2 || yy2arr2 { + if yyn54 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fc")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn54 { + r.EncodeNil() + } else { + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } + } + } + var yyn57 bool + if x.VolumeSource.AzureFile == nil { + yyn57 = true + goto LABEL57 + } + LABEL57: + if yyr2 || yy2arr2 { + if yyn57 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureFile")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn57 { + r.EncodeNil() + } else { + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } + } + } + var yyn60 bool + if x.VolumeSource.ConfigMap == nil { + yyn60 = true + goto LABEL60 + } + LABEL60: + if yyr2 || yy2arr2 { + if yyn60 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("configMap")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn60 { + r.EncodeNil() + } else { + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } + } + } + var yyn63 bool + if x.VolumeSource.VsphereVolume == nil { + yyn63 = true + goto LABEL63 + } + LABEL63: + if yyr2 || yy2arr2 { + if yyn63 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[20] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[20] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn63 { + r.EncodeNil() + } else { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn66 bool + if x.VolumeSource.Quobyte == nil { + yyn66 = true + goto LABEL66 + } + LABEL66: + if yyr2 || yy2arr2 { + if yyn66 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[21] { + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[21] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("quobyte")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn66 { + r.EncodeNil() + } else { + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } + } + } + var yyn69 bool + if x.VolumeSource.AzureDisk == nil { + yyn69 = true + goto LABEL69 + } + LABEL69: + if yyr2 || yy2arr2 { + if yyn69 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[22] { + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[22] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn69 { + r.EncodeNil() + } else { + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } + } + } + var yyn72 bool + if x.VolumeSource.PhotonPersistentDisk == nil { + yyn72 = true + goto LABEL72 + } + LABEL72: + if yyr2 || yy2arr2 { + if yyn72 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[23] { + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[23] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn72 { + r.EncodeNil() + } else { + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } + } + } + var yyn75 bool + if x.VolumeSource.Projected == nil { + yyn75 = true + goto LABEL75 + } + LABEL75: + if yyr2 || yy2arr2 { + if yyn75 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[24] { + if x.Projected == nil { + r.EncodeNil() + } else { + x.Projected.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[24] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("projected")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn75 { + r.EncodeNil() + } else { + if x.Projected == nil { + r.EncodeNil() + } else { + x.Projected.CodecEncodeSelf(e) + } + } + } + } + var yyn78 bool + if x.VolumeSource.PortworxVolume == nil { + yyn78 = true + goto LABEL78 + } + LABEL78: + if yyr2 || yy2arr2 { + if yyn78 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[25] { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[25] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn78 { + r.EncodeNil() + } else { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn81 bool + if x.VolumeSource.ScaleIO == nil { + yyn81 = true + goto LABEL81 + } + LABEL81: + if yyr2 || yy2arr2 { + if yyn81 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[26] { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[26] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn81 { + r.EncodeNil() + } else { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Volume) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Volume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "hostPath": + if x.VolumeSource.HostPath == nil { + x.VolumeSource.HostPath = new(HostPathVolumeSource) + } + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + case "emptyDir": + if x.VolumeSource.EmptyDir == nil { + x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource) + } + if r.TryDecodeAsNil() { + if x.EmptyDir != nil { + x.EmptyDir = nil + } + } else { + if x.EmptyDir == nil { + x.EmptyDir = new(EmptyDirVolumeSource) + } + x.EmptyDir.CodecDecodeSelf(d) + } + case "gcePersistentDisk": + if x.VolumeSource.GCEPersistentDisk == nil { + x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + case "awsElasticBlockStore": + if x.VolumeSource.AWSElasticBlockStore == nil { + x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + case "gitRepo": + if x.VolumeSource.GitRepo == nil { + x.VolumeSource.GitRepo = new(GitRepoVolumeSource) + } + if r.TryDecodeAsNil() { + if x.GitRepo != nil { + x.GitRepo = nil + } + } else { + if x.GitRepo == nil { + x.GitRepo = new(GitRepoVolumeSource) + } + x.GitRepo.CodecDecodeSelf(d) + } + case "secret": + if x.VolumeSource.Secret == nil { + x.VolumeSource.Secret = new(SecretVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretVolumeSource) + } + x.Secret.CodecDecodeSelf(d) + } + case "nfs": + if x.VolumeSource.NFS == nil { + x.VolumeSource.NFS = new(NFSVolumeSource) + } + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + case "iscsi": + if x.VolumeSource.ISCSI == nil { + x.VolumeSource.ISCSI = new(ISCSIVolumeSource) + } + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + case "glusterfs": + if x.VolumeSource.Glusterfs == nil { + x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + case "persistentVolumeClaim": + if x.VolumeSource.PersistentVolumeClaim == nil { + x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) + } + if r.TryDecodeAsNil() { + if x.PersistentVolumeClaim != nil { + x.PersistentVolumeClaim = nil + } + } else { + if x.PersistentVolumeClaim == nil { + x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) + } + x.PersistentVolumeClaim.CodecDecodeSelf(d) + } + case "rbd": + if x.VolumeSource.RBD == nil { + x.VolumeSource.RBD = new(RBDVolumeSource) + } + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + case "flexVolume": + if x.VolumeSource.FlexVolume == nil { + x.VolumeSource.FlexVolume = new(FlexVolumeSource) + } + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + case "cinder": + if x.VolumeSource.Cinder == nil { + x.VolumeSource.Cinder = new(CinderVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + case "cephfs": + if x.VolumeSource.CephFS == nil { + x.VolumeSource.CephFS = new(CephFSVolumeSource) + } + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + case "flocker": + if x.VolumeSource.Flocker == nil { + x.VolumeSource.Flocker = new(FlockerVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + case "downwardAPI": + if x.VolumeSource.DownwardAPI == nil { + x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource) + } + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIVolumeSource) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + case "fc": + if x.VolumeSource.FC == nil { + x.VolumeSource.FC = new(FCVolumeSource) + } + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + case "azureFile": + if x.VolumeSource.AzureFile == nil { + x.VolumeSource.AzureFile = new(AzureFileVolumeSource) + } + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + case "configMap": + if x.VolumeSource.ConfigMap == nil { + x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource) + } + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapVolumeSource) + } + x.ConfigMap.CodecDecodeSelf(d) + } + case "vsphereVolume": + if x.VolumeSource.VsphereVolume == nil { + x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + case "quobyte": + if x.VolumeSource.Quobyte == nil { + x.VolumeSource.Quobyte = new(QuobyteVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + case "azureDisk": + if x.VolumeSource.AzureDisk == nil { + x.VolumeSource.AzureDisk = new(AzureDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + case "photonPersistentDisk": + if x.VolumeSource.PhotonPersistentDisk == nil { + x.VolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + case "projected": + if x.VolumeSource.Projected == nil { + x.VolumeSource.Projected = new(ProjectedVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Projected != nil { + x.Projected = nil + } + } else { + if x.Projected == nil { + x.Projected = new(ProjectedVolumeSource) + } + x.Projected.CodecDecodeSelf(d) + } + case "portworxVolume": + if x.VolumeSource.PortworxVolume == nil { + x.VolumeSource.PortworxVolume = new(PortworxVolumeSource) + } + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + case "scaleIO": + if x.VolumeSource.ScaleIO == nil { + x.VolumeSource.ScaleIO = new(ScaleIOVolumeSource) + } + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj32 int + var yyb32 bool + var yyhl32 bool = l >= 0 + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv33 := &x.Name + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*string)(yyv33)) = r.DecodeString() + } + } + if x.VolumeSource.HostPath == nil { + x.VolumeSource.HostPath = new(HostPathVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + if x.VolumeSource.EmptyDir == nil { + x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.EmptyDir != nil { + x.EmptyDir = nil + } + } else { + if x.EmptyDir == nil { + x.EmptyDir = new(EmptyDirVolumeSource) + } + x.EmptyDir.CodecDecodeSelf(d) + } + if x.VolumeSource.GCEPersistentDisk == nil { + x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + if x.VolumeSource.AWSElasticBlockStore == nil { + x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + if x.VolumeSource.GitRepo == nil { + x.VolumeSource.GitRepo = new(GitRepoVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GitRepo != nil { + x.GitRepo = nil + } + } else { + if x.GitRepo == nil { + x.GitRepo = new(GitRepoVolumeSource) + } + x.GitRepo.CodecDecodeSelf(d) + } + if x.VolumeSource.Secret == nil { + x.VolumeSource.Secret = new(SecretVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretVolumeSource) + } + x.Secret.CodecDecodeSelf(d) + } + if x.VolumeSource.NFS == nil { + x.VolumeSource.NFS = new(NFSVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + if x.VolumeSource.ISCSI == nil { + x.VolumeSource.ISCSI = new(ISCSIVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + if x.VolumeSource.Glusterfs == nil { + x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + if x.VolumeSource.PersistentVolumeClaim == nil { + x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PersistentVolumeClaim != nil { + x.PersistentVolumeClaim = nil + } + } else { + if x.PersistentVolumeClaim == nil { + x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) + } + x.PersistentVolumeClaim.CodecDecodeSelf(d) + } + if x.VolumeSource.RBD == nil { + x.VolumeSource.RBD = new(RBDVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + if x.VolumeSource.FlexVolume == nil { + x.VolumeSource.FlexVolume = new(FlexVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + if x.VolumeSource.Cinder == nil { + x.VolumeSource.Cinder = new(CinderVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + if x.VolumeSource.CephFS == nil { + x.VolumeSource.CephFS = new(CephFSVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + if x.VolumeSource.Flocker == nil { + x.VolumeSource.Flocker = new(FlockerVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + if x.VolumeSource.DownwardAPI == nil { + x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIVolumeSource) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + if x.VolumeSource.FC == nil { + x.VolumeSource.FC = new(FCVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + if x.VolumeSource.AzureFile == nil { + x.VolumeSource.AzureFile = new(AzureFileVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + if x.VolumeSource.ConfigMap == nil { + x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapVolumeSource) + } + x.ConfigMap.CodecDecodeSelf(d) + } + if x.VolumeSource.VsphereVolume == nil { + x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + if x.VolumeSource.Quobyte == nil { + x.VolumeSource.Quobyte = new(QuobyteVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + if x.VolumeSource.AzureDisk == nil { + x.VolumeSource.AzureDisk = new(AzureDiskVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + if x.VolumeSource.PhotonPersistentDisk == nil { + x.VolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + if x.VolumeSource.Projected == nil { + x.VolumeSource.Projected = new(ProjectedVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Projected != nil { + x.Projected = nil + } + } else { + if x.Projected == nil { + x.Projected = new(ProjectedVolumeSource) + } + x.Projected.CodecDecodeSelf(d) + } + if x.VolumeSource.PortworxVolume == nil { + x.VolumeSource.PortworxVolume = new(PortworxVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + if x.VolumeSource.ScaleIO == nil { + x.VolumeSource.ScaleIO = new(ScaleIOVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + for { + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj32-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [26]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.HostPath != nil + yyq2[1] = x.EmptyDir != nil + yyq2[2] = x.GCEPersistentDisk != nil + yyq2[3] = x.AWSElasticBlockStore != nil + yyq2[4] = x.GitRepo != nil + yyq2[5] = x.Secret != nil + yyq2[6] = x.NFS != nil + yyq2[7] = x.ISCSI != nil + yyq2[8] = x.Glusterfs != nil + yyq2[9] = x.PersistentVolumeClaim != nil + yyq2[10] = x.RBD != nil + yyq2[11] = x.FlexVolume != nil + yyq2[12] = x.Cinder != nil + yyq2[13] = x.CephFS != nil + yyq2[14] = x.Flocker != nil + yyq2[15] = x.DownwardAPI != nil + yyq2[16] = x.FC != nil + yyq2[17] = x.AzureFile != nil + yyq2[18] = x.ConfigMap != nil + yyq2[19] = x.VsphereVolume != nil + yyq2[20] = x.Quobyte != nil + yyq2[21] = x.AzureDisk != nil + yyq2[22] = x.PhotonPersistentDisk != nil + yyq2[23] = x.Projected != nil + yyq2[24] = x.PortworxVolume != nil + yyq2[25] = x.ScaleIO != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(26) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.EmptyDir == nil { + r.EncodeNil() + } else { + x.EmptyDir.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("emptyDir")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.EmptyDir == nil { + r.EncodeNil() + } else { + x.EmptyDir.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.GitRepo == nil { + r.EncodeNil() + } else { + x.GitRepo.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gitRepo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.GitRepo == nil { + r.EncodeNil() + } else { + x.GitRepo.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secret")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("iscsi")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.PersistentVolumeClaim == nil { + r.EncodeNil() + } else { + x.PersistentVolumeClaim.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PersistentVolumeClaim == nil { + r.EncodeNil() + } else { + x.PersistentVolumeClaim.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rbd")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cinder")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cephfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flocker")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[16] { + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[16] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fc")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureFile")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("configMap")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[20] { + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[20] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("quobyte")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[21] { + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[21] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[22] { + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[22] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[23] { + if x.Projected == nil { + r.EncodeNil() + } else { + x.Projected.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[23] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("projected")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Projected == nil { + r.EncodeNil() + } else { + x.Projected.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[24] { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[24] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[25] { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[25] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *VolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *VolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "hostPath": + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + case "emptyDir": + if r.TryDecodeAsNil() { + if x.EmptyDir != nil { + x.EmptyDir = nil + } + } else { + if x.EmptyDir == nil { + x.EmptyDir = new(EmptyDirVolumeSource) + } + x.EmptyDir.CodecDecodeSelf(d) + } + case "gcePersistentDisk": + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + case "awsElasticBlockStore": + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + case "gitRepo": + if r.TryDecodeAsNil() { + if x.GitRepo != nil { + x.GitRepo = nil + } + } else { + if x.GitRepo == nil { + x.GitRepo = new(GitRepoVolumeSource) + } + x.GitRepo.CodecDecodeSelf(d) + } + case "secret": + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretVolumeSource) + } + x.Secret.CodecDecodeSelf(d) + } + case "nfs": + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + case "iscsi": + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + case "glusterfs": + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + case "persistentVolumeClaim": + if r.TryDecodeAsNil() { + if x.PersistentVolumeClaim != nil { + x.PersistentVolumeClaim = nil + } + } else { + if x.PersistentVolumeClaim == nil { + x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) + } + x.PersistentVolumeClaim.CodecDecodeSelf(d) + } + case "rbd": + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + case "flexVolume": + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + case "cinder": + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + case "cephfs": + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + case "flocker": + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + case "downwardAPI": + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIVolumeSource) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + case "fc": + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + case "azureFile": + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + case "configMap": + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapVolumeSource) + } + x.ConfigMap.CodecDecodeSelf(d) + } + case "vsphereVolume": + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + case "quobyte": + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + case "azureDisk": + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + case "photonPersistentDisk": + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + case "projected": + if r.TryDecodeAsNil() { + if x.Projected != nil { + x.Projected = nil + } + } else { + if x.Projected == nil { + x.Projected = new(ProjectedVolumeSource) + } + x.Projected.CodecDecodeSelf(d) + } + case "portworxVolume": + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + case "scaleIO": + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj30 int + var yyb30 bool + var yyhl30 bool = l >= 0 + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.EmptyDir != nil { + x.EmptyDir = nil + } + } else { + if x.EmptyDir == nil { + x.EmptyDir = new(EmptyDirVolumeSource) + } + x.EmptyDir.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GitRepo != nil { + x.GitRepo = nil + } + } else { + if x.GitRepo == nil { + x.GitRepo = new(GitRepoVolumeSource) + } + x.GitRepo.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretVolumeSource) + } + x.Secret.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PersistentVolumeClaim != nil { + x.PersistentVolumeClaim = nil + } + } else { + if x.PersistentVolumeClaim == nil { + x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) + } + x.PersistentVolumeClaim.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIVolumeSource) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapVolumeSource) + } + x.ConfigMap.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Projected != nil { + x.Projected = nil + } + } else { + if x.Projected == nil { + x.Projected = new(ProjectedVolumeSource) + } + x.Projected.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + for { + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj30-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeClaimVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("claimName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeClaimVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "claimName": + if r.TryDecodeAsNil() { + x.ClaimName = "" + } else { + yyv4 := &x.ClaimName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv6 := &x.ReadOnly + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ClaimName = "" + } else { + yyv9 := &x.ClaimName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv11 := &x.ReadOnly + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [19]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.GCEPersistentDisk != nil + yyq2[1] = x.AWSElasticBlockStore != nil + yyq2[2] = x.HostPath != nil + yyq2[3] = x.Glusterfs != nil + yyq2[4] = x.NFS != nil + yyq2[5] = x.RBD != nil + yyq2[6] = x.ISCSI != nil + yyq2[7] = x.Cinder != nil + yyq2[8] = x.CephFS != nil + yyq2[9] = x.FC != nil + yyq2[10] = x.Flocker != nil + yyq2[11] = x.FlexVolume != nil + yyq2[12] = x.AzureFile != nil + yyq2[13] = x.VsphereVolume != nil + yyq2[14] = x.Quobyte != nil + yyq2[15] = x.AzureDisk != nil + yyq2[16] = x.PhotonPersistentDisk != nil + yyq2[17] = x.PortworxVolume != nil + yyq2[18] = x.ScaleIO != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(19) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rbd")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("iscsi")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cinder")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cephfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fc")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flocker")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureFile")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("quobyte")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[16] { + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[16] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "gcePersistentDisk": + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + case "awsElasticBlockStore": + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + case "hostPath": + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + case "glusterfs": + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + case "nfs": + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + case "rbd": + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + case "iscsi": + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + case "cinder": + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + case "cephfs": + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + case "fc": + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + case "flocker": + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + case "flexVolume": + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + case "azureFile": + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + case "vsphereVolume": + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + case "quobyte": + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + case "azureDisk": + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + case "photonPersistentDisk": + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + case "portworxVolume": + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + case "scaleIO": + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj23 int + var yyb23 bool + var yyhl23 bool = l >= 0 + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + for { + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj23-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolume) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolume) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PersistentVolumeSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = PersistentVolumeStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PersistentVolumeSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = PersistentVolumeStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [24]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Capacity) != 0 + yyq2[1] = x.PersistentVolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil + yyq2[2] = x.PersistentVolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil + yyq2[3] = x.PersistentVolumeSource.HostPath != nil && x.HostPath != nil + yyq2[4] = x.PersistentVolumeSource.Glusterfs != nil && x.Glusterfs != nil + yyq2[5] = x.PersistentVolumeSource.NFS != nil && x.NFS != nil + yyq2[6] = x.PersistentVolumeSource.RBD != nil && x.RBD != nil + yyq2[7] = x.PersistentVolumeSource.ISCSI != nil && x.ISCSI != nil + yyq2[8] = x.PersistentVolumeSource.Cinder != nil && x.Cinder != nil + yyq2[9] = x.PersistentVolumeSource.CephFS != nil && x.CephFS != nil + yyq2[10] = x.PersistentVolumeSource.FC != nil && x.FC != nil + yyq2[11] = x.PersistentVolumeSource.Flocker != nil && x.Flocker != nil + yyq2[12] = x.PersistentVolumeSource.FlexVolume != nil && x.FlexVolume != nil + yyq2[13] = x.PersistentVolumeSource.AzureFile != nil && x.AzureFile != nil + yyq2[14] = x.PersistentVolumeSource.VsphereVolume != nil && x.VsphereVolume != nil + yyq2[15] = x.PersistentVolumeSource.Quobyte != nil && x.Quobyte != nil + yyq2[16] = x.PersistentVolumeSource.AzureDisk != nil && x.AzureDisk != nil + yyq2[17] = x.PersistentVolumeSource.PhotonPersistentDisk != nil && x.PhotonPersistentDisk != nil + yyq2[18] = x.PersistentVolumeSource.PortworxVolume != nil && x.PortworxVolume != nil + yyq2[19] = x.PersistentVolumeSource.ScaleIO != nil && x.ScaleIO != nil + yyq2[20] = len(x.AccessModes) != 0 + yyq2[21] = x.ClaimRef != nil + yyq2[22] = x.PersistentVolumeReclaimPolicy != "" + yyq2[23] = x.StorageClassName != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(24) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("capacity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } + } + var yyn6 bool + if x.PersistentVolumeSource.GCEPersistentDisk == nil { + yyn6 = true + goto LABEL6 + } + LABEL6: + if yyr2 || yy2arr2 { + if yyn6 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn6 { + r.EncodeNil() + } else { + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } + } + } + var yyn9 bool + if x.PersistentVolumeSource.AWSElasticBlockStore == nil { + yyn9 = true + goto LABEL9 + } + LABEL9: + if yyr2 || yy2arr2 { + if yyn9 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn9 { + r.EncodeNil() + } else { + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } + } + } + var yyn12 bool + if x.PersistentVolumeSource.HostPath == nil { + yyn12 = true + goto LABEL12 + } + LABEL12: + if yyr2 || yy2arr2 { + if yyn12 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn12 { + r.EncodeNil() + } else { + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } + } + } + var yyn15 bool + if x.PersistentVolumeSource.Glusterfs == nil { + yyn15 = true + goto LABEL15 + } + LABEL15: + if yyr2 || yy2arr2 { + if yyn15 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn15 { + r.EncodeNil() + } else { + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } + } + } + var yyn18 bool + if x.PersistentVolumeSource.NFS == nil { + yyn18 = true + goto LABEL18 + } + LABEL18: + if yyr2 || yy2arr2 { + if yyn18 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn18 { + r.EncodeNil() + } else { + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } + } + } + var yyn21 bool + if x.PersistentVolumeSource.RBD == nil { + yyn21 = true + goto LABEL21 + } + LABEL21: + if yyr2 || yy2arr2 { + if yyn21 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rbd")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn21 { + r.EncodeNil() + } else { + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } + } + } + var yyn24 bool + if x.PersistentVolumeSource.ISCSI == nil { + yyn24 = true + goto LABEL24 + } + LABEL24: + if yyr2 || yy2arr2 { + if yyn24 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("iscsi")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn24 { + r.EncodeNil() + } else { + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } + } + } + var yyn27 bool + if x.PersistentVolumeSource.Cinder == nil { + yyn27 = true + goto LABEL27 + } + LABEL27: + if yyr2 || yy2arr2 { + if yyn27 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cinder")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn27 { + r.EncodeNil() + } else { + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } + } + } + var yyn30 bool + if x.PersistentVolumeSource.CephFS == nil { + yyn30 = true + goto LABEL30 + } + LABEL30: + if yyr2 || yy2arr2 { + if yyn30 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cephfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn30 { + r.EncodeNil() + } else { + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } + } + } + var yyn33 bool + if x.PersistentVolumeSource.FC == nil { + yyn33 = true + goto LABEL33 + } + LABEL33: + if yyr2 || yy2arr2 { + if yyn33 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fc")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn33 { + r.EncodeNil() + } else { + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } + } + } + var yyn36 bool + if x.PersistentVolumeSource.Flocker == nil { + yyn36 = true + goto LABEL36 + } + LABEL36: + if yyr2 || yy2arr2 { + if yyn36 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flocker")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn36 { + r.EncodeNil() + } else { + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } + } + } + var yyn39 bool + if x.PersistentVolumeSource.FlexVolume == nil { + yyn39 = true + goto LABEL39 + } + LABEL39: + if yyr2 || yy2arr2 { + if yyn39 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn39 { + r.EncodeNil() + } else { + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn42 bool + if x.PersistentVolumeSource.AzureFile == nil { + yyn42 = true + goto LABEL42 + } + LABEL42: + if yyr2 || yy2arr2 { + if yyn42 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureFile")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn42 { + r.EncodeNil() + } else { + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } + } + } + var yyn45 bool + if x.PersistentVolumeSource.VsphereVolume == nil { + yyn45 = true + goto LABEL45 + } + LABEL45: + if yyr2 || yy2arr2 { + if yyn45 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn45 { + r.EncodeNil() + } else { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn48 bool + if x.PersistentVolumeSource.Quobyte == nil { + yyn48 = true + goto LABEL48 + } + LABEL48: + if yyr2 || yy2arr2 { + if yyn48 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("quobyte")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn48 { + r.EncodeNil() + } else { + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } + } + } + var yyn51 bool + if x.PersistentVolumeSource.AzureDisk == nil { + yyn51 = true + goto LABEL51 + } + LABEL51: + if yyr2 || yy2arr2 { + if yyn51 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[16] { + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[16] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn51 { + r.EncodeNil() + } else { + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } + } + } + var yyn54 bool + if x.PersistentVolumeSource.PhotonPersistentDisk == nil { + yyn54 = true + goto LABEL54 + } + LABEL54: + if yyr2 || yy2arr2 { + if yyn54 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn54 { + r.EncodeNil() + } else { + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } + } + } + var yyn57 bool + if x.PersistentVolumeSource.PortworxVolume == nil { + yyn57 = true + goto LABEL57 + } + LABEL57: + if yyr2 || yy2arr2 { + if yyn57 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn57 { + r.EncodeNil() + } else { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn60 bool + if x.PersistentVolumeSource.ScaleIO == nil { + yyn60 = true + goto LABEL60 + } + LABEL60: + if yyr2 || yy2arr2 { + if yyn60 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn60 { + r.EncodeNil() + } else { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[20] { + if x.AccessModes == nil { + r.EncodeNil() + } else { + yym64 := z.EncBinary() + _ = yym64 + if false { + } else { + h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[20] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("accessModes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AccessModes == nil { + r.EncodeNil() + } else { + yym65 := z.EncBinary() + _ = yym65 + if false { + } else { + h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[21] { + if x.ClaimRef == nil { + r.EncodeNil() + } else { + x.ClaimRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[21] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("claimRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ClaimRef == nil { + r.EncodeNil() + } else { + x.ClaimRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[22] { + x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[22] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeReclaimPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[23] { + yym73 := z.EncBinary() + _ = yym73 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StorageClassName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[23] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storageClassName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym74 := z.EncBinary() + _ = yym74 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StorageClassName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "capacity": + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv4 := &x.Capacity + yyv4.CodecDecodeSelf(d) + } + case "gcePersistentDisk": + if x.PersistentVolumeSource.GCEPersistentDisk == nil { + x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + case "awsElasticBlockStore": + if x.PersistentVolumeSource.AWSElasticBlockStore == nil { + x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + case "hostPath": + if x.PersistentVolumeSource.HostPath == nil { + x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource) + } + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + case "glusterfs": + if x.PersistentVolumeSource.Glusterfs == nil { + x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + case "nfs": + if x.PersistentVolumeSource.NFS == nil { + x.PersistentVolumeSource.NFS = new(NFSVolumeSource) + } + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + case "rbd": + if x.PersistentVolumeSource.RBD == nil { + x.PersistentVolumeSource.RBD = new(RBDVolumeSource) + } + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + case "iscsi": + if x.PersistentVolumeSource.ISCSI == nil { + x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource) + } + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + case "cinder": + if x.PersistentVolumeSource.Cinder == nil { + x.PersistentVolumeSource.Cinder = new(CinderVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + case "cephfs": + if x.PersistentVolumeSource.CephFS == nil { + x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource) + } + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + case "fc": + if x.PersistentVolumeSource.FC == nil { + x.PersistentVolumeSource.FC = new(FCVolumeSource) + } + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + case "flocker": + if x.PersistentVolumeSource.Flocker == nil { + x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + case "flexVolume": + if x.PersistentVolumeSource.FlexVolume == nil { + x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource) + } + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + case "azureFile": + if x.PersistentVolumeSource.AzureFile == nil { + x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource) + } + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + case "vsphereVolume": + if x.PersistentVolumeSource.VsphereVolume == nil { + x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + case "quobyte": + if x.PersistentVolumeSource.Quobyte == nil { + x.PersistentVolumeSource.Quobyte = new(QuobyteVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + case "azureDisk": + if x.PersistentVolumeSource.AzureDisk == nil { + x.PersistentVolumeSource.AzureDisk = new(AzureDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + case "photonPersistentDisk": + if x.PersistentVolumeSource.PhotonPersistentDisk == nil { + x.PersistentVolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + case "portworxVolume": + if x.PersistentVolumeSource.PortworxVolume == nil { + x.PersistentVolumeSource.PortworxVolume = new(PortworxVolumeSource) + } + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + case "scaleIO": + if x.PersistentVolumeSource.ScaleIO == nil { + x.PersistentVolumeSource.ScaleIO = new(ScaleIOVolumeSource) + } + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + case "accessModes": + if r.TryDecodeAsNil() { + x.AccessModes = nil + } else { + yyv24 := &x.AccessModes + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv24), d) + } + } + case "claimRef": + if r.TryDecodeAsNil() { + if x.ClaimRef != nil { + x.ClaimRef = nil + } + } else { + if x.ClaimRef == nil { + x.ClaimRef = new(ObjectReference) + } + x.ClaimRef.CodecDecodeSelf(d) + } + case "persistentVolumeReclaimPolicy": + if r.TryDecodeAsNil() { + x.PersistentVolumeReclaimPolicy = "" + } else { + yyv27 := &x.PersistentVolumeReclaimPolicy + yyv27.CodecDecodeSelf(d) + } + case "storageClassName": + if r.TryDecodeAsNil() { + x.StorageClassName = "" + } else { + yyv28 := &x.StorageClassName + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*string)(yyv28)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj30 int + var yyb30 bool + var yyhl30 bool = l >= 0 + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv31 := &x.Capacity + yyv31.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.GCEPersistentDisk == nil { + x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.AWSElasticBlockStore == nil { + x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.HostPath == nil { + x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.Glusterfs == nil { + x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.NFS == nil { + x.PersistentVolumeSource.NFS = new(NFSVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.RBD == nil { + x.PersistentVolumeSource.RBD = new(RBDVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.ISCSI == nil { + x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.Cinder == nil { + x.PersistentVolumeSource.Cinder = new(CinderVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.CephFS == nil { + x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.FC == nil { + x.PersistentVolumeSource.FC = new(FCVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.Flocker == nil { + x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.FlexVolume == nil { + x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.AzureFile == nil { + x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.VsphereVolume == nil { + x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.Quobyte == nil { + x.PersistentVolumeSource.Quobyte = new(QuobyteVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.AzureDisk == nil { + x.PersistentVolumeSource.AzureDisk = new(AzureDiskVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.PhotonPersistentDisk == nil { + x.PersistentVolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.PortworxVolume == nil { + x.PersistentVolumeSource.PortworxVolume = new(PortworxVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.ScaleIO == nil { + x.PersistentVolumeSource.ScaleIO = new(ScaleIOVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AccessModes = nil + } else { + yyv51 := &x.AccessModes + yym52 := z.DecBinary() + _ = yym52 + if false { + } else { + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv51), d) + } + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ClaimRef != nil { + x.ClaimRef = nil + } + } else { + if x.ClaimRef == nil { + x.ClaimRef = new(ObjectReference) + } + x.ClaimRef.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PersistentVolumeReclaimPolicy = "" + } else { + yyv54 := &x.PersistentVolumeReclaimPolicy + yyv54.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StorageClassName = "" + } else { + yyv55 := &x.StorageClassName + yym56 := z.DecBinary() + _ = yym56 + if false { + } else { + *((*string)(yyv55)) = r.DecodeString() + } + } + for { + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj30-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x PersistentVolumeReclaimPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PersistentVolumeReclaimPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *PersistentVolumeStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Phase != "" + yyq2[1] = x.Message != "" + yyq2[2] = x.Reason != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Phase.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("phase")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Phase.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "phase": + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv4 := &x.Phase + yyv4.CodecDecodeSelf(d) + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv5 := &x.Message + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv7 := &x.Reason + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv10 := &x.Phase + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv11 := &x.Message + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv13 := &x.Reason + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePersistentVolume((*[]PersistentVolume)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePersistentVolume((*[]PersistentVolume)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeClaim) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeClaim) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeClaim) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PersistentVolumeClaimSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = PersistentVolumeClaimStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeClaim) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PersistentVolumeClaimSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = PersistentVolumeClaimStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeClaimList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeClaimList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeClaimList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeClaimList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeClaimSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.AccessModes) != 0 + yyq2[1] = x.Selector != nil + yyq2[2] = true + yyq2[3] = x.VolumeName != "" + yyq2[4] = x.StorageClassName != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.AccessModes == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("accessModes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AccessModes == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.Resources + yy10.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resources")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.Resources + yy12.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.StorageClassName == nil { + r.EncodeNil() + } else { + yy18 := *x.StorageClassName + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy18)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storageClassName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StorageClassName == nil { + r.EncodeNil() + } else { + yy20 := *x.StorageClassName + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy20)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeClaimSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "accessModes": + if r.TryDecodeAsNil() { + x.AccessModes = nil + } else { + yyv4 := &x.AccessModes + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv4), d) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg2_v1.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "resources": + if r.TryDecodeAsNil() { + x.Resources = ResourceRequirements{} + } else { + yyv8 := &x.Resources + yyv8.CodecDecodeSelf(d) + } + case "volumeName": + if r.TryDecodeAsNil() { + x.VolumeName = "" + } else { + yyv9 := &x.VolumeName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + case "storageClassName": + if r.TryDecodeAsNil() { + if x.StorageClassName != nil { + x.StorageClassName = nil + } + } else { + if x.StorageClassName == nil { + x.StorageClassName = new(string) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(x.StorageClassName)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AccessModes = nil + } else { + yyv14 := &x.AccessModes + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv14), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg2_v1.LabelSelector) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resources = ResourceRequirements{} + } else { + yyv18 := &x.Resources + yyv18.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeName = "" + } else { + yyv19 := &x.VolumeName + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StorageClassName != nil { + x.StorageClassName = nil + } + } else { + if x.StorageClassName == nil { + x.StorageClassName = new(string) + } + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(x.StorageClassName)) = r.DecodeString() + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeClaimStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Phase != "" + yyq2[1] = len(x.AccessModes) != 0 + yyq2[2] = len(x.Capacity) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Phase.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("phase")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Phase.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.AccessModes == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("accessModes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AccessModes == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("capacity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeClaimStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "phase": + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv4 := &x.Phase + yyv4.CodecDecodeSelf(d) + } + case "accessModes": + if r.TryDecodeAsNil() { + x.AccessModes = nil + } else { + yyv5 := &x.AccessModes + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv5), d) + } + } + case "capacity": + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv7 := &x.Capacity + yyv7.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv9 := &x.Phase + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AccessModes = nil + } else { + yyv10 := &x.AccessModes + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv10), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv12 := &x.Capacity + yyv12.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x PersistentVolumeAccessMode) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PersistentVolumeAccessMode) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x PersistentVolumePhase) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PersistentVolumePhase) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x PersistentVolumeClaimPhase) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PersistentVolumeClaimPhase) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *HostPathVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HostPathVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HostPathVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HostPathVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv7 := &x.Path + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EmptyDirVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Medium != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Medium.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("medium")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Medium.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EmptyDirVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EmptyDirVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "medium": + if r.TryDecodeAsNil() { + x.Medium = "" + } else { + yyv4 := &x.Medium + yyv4.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EmptyDirVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Medium = "" + } else { + yyv6 := &x.Medium + yyv6.CodecDecodeSelf(d) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *GlusterfsVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("endpoints")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *GlusterfsVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *GlusterfsVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "endpoints": + if r.TryDecodeAsNil() { + x.EndpointsName = "" + } else { + yyv4 := &x.EndpointsName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv6 := &x.Path + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *GlusterfsVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EndpointsName = "" + } else { + yyv11 := &x.EndpointsName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv13 := &x.Path + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RBDVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.FSType != "" + yyq2[3] = x.RBDPool != "" + yyq2[4] = x.RadosUser != "" + yyq2[5] = x.Keyring != "" + yyq2[6] = x.SecretRef != nil + yyq2[7] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(8) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.CephMonitors == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.CephMonitors, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("monitors")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CephMonitors == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.CephMonitors, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("image")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pool")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Keyring)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("keyring")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Keyring)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RBDVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RBDVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "monitors": + if r.TryDecodeAsNil() { + x.CephMonitors = nil + } else { + yyv4 := &x.CephMonitors + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "image": + if r.TryDecodeAsNil() { + x.RBDImage = "" + } else { + yyv6 := &x.RBDImage + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv8 := &x.FSType + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "pool": + if r.TryDecodeAsNil() { + x.RBDPool = "" + } else { + yyv10 := &x.RBDPool + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "user": + if r.TryDecodeAsNil() { + x.RadosUser = "" + } else { + yyv12 := &x.RadosUser + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "keyring": + if r.TryDecodeAsNil() { + x.Keyring = "" + } else { + yyv14 := &x.Keyring + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv17 := &x.ReadOnly + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(yyv17)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RBDVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj19 int + var yyb19 bool + var yyhl19 bool = l >= 0 + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CephMonitors = nil + } else { + yyv20 := &x.CephMonitors + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + z.F.DecSliceStringX(yyv20, false, d) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RBDImage = "" + } else { + yyv22 := &x.RBDImage + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*string)(yyv22)) = r.DecodeString() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv24 := &x.FSType + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*string)(yyv24)) = r.DecodeString() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RBDPool = "" + } else { + yyv26 := &x.RBDPool + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RadosUser = "" + } else { + yyv28 := &x.RadosUser + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*string)(yyv28)) = r.DecodeString() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Keyring = "" + } else { + yyv30 := &x.Keyring + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*string)(yyv30)) = r.DecodeString() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv33 := &x.ReadOnly + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*bool)(yyv33)) = r.DecodeBool() + } + } + for { + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj19-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CinderVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CinderVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CinderVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "volumeID": + if r.TryDecodeAsNil() { + x.VolumeID = "" + } else { + yyv4 := &x.VolumeID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CinderVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeID = "" + } else { + yyv11 := &x.VolumeID + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv13 := &x.FSType + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CephFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Path != "" + yyq2[2] = x.User != "" + yyq2[3] = x.SecretFile != "" + yyq2[4] = x.SecretRef != nil + yyq2[5] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Monitors == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.Monitors, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("monitors")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Monitors == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.Monitors, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretFile")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CephFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CephFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "monitors": + if r.TryDecodeAsNil() { + x.Monitors = nil + } else { + yyv4 := &x.Monitors + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv6 := &x.Path + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "user": + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv8 := &x.User + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "secretFile": + if r.TryDecodeAsNil() { + x.SecretFile = "" + } else { + yyv10 := &x.SecretFile + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv13 := &x.ReadOnly + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*bool)(yyv13)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CephFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj15 int + var yyb15 bool + var yyhl15 bool = l >= 0 + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Monitors = nil + } else { + yyv16 := &x.Monitors + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + z.F.DecSliceStringX(yyv16, false, d) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv18 := &x.Path + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv20 := &x.User + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SecretFile = "" + } else { + yyv22 := &x.SecretFile + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*string)(yyv22)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv25 := &x.ReadOnly + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + for { + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj15-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *FlockerVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.DatasetName != "" + yyq2[1] = x.DatasetUUID != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("datasetName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DatasetUUID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("datasetUUID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DatasetUUID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *FlockerVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *FlockerVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "datasetName": + if r.TryDecodeAsNil() { + x.DatasetName = "" + } else { + yyv4 := &x.DatasetName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "datasetUUID": + if r.TryDecodeAsNil() { + x.DatasetUUID = "" + } else { + yyv6 := &x.DatasetUUID + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *FlockerVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DatasetName = "" + } else { + yyv9 := &x.DatasetName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DatasetUUID = "" + } else { + yyv11 := &x.DatasetUUID + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x StorageMedium) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *StorageMedium) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x Protocol) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *Protocol) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *GCEPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.Partition != 0 + yyq2[3] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PDName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pdName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PDName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Partition)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("partition")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.Partition)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *GCEPersistentDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "pdName": + if r.TryDecodeAsNil() { + x.PDName = "" + } else { + yyv4 := &x.PDName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "partition": + if r.TryDecodeAsNil() { + x.Partition = 0 + } else { + yyv8 := &x.Partition + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv10 := &x.ReadOnly + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PDName = "" + } else { + yyv13 := &x.PDName + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv15 := &x.FSType + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Partition = 0 + } else { + yyv17 := &x.Partition + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *QuobyteVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.ReadOnly != false + yyq2[3] = x.User != "" + yyq2[4] = x.Group != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Registry)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("registry")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Registry)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Volume)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Volume)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Group)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("group")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Group)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *QuobyteVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *QuobyteVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "registry": + if r.TryDecodeAsNil() { + x.Registry = "" + } else { + yyv4 := &x.Registry + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "volume": + if r.TryDecodeAsNil() { + x.Volume = "" + } else { + yyv6 := &x.Volume + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "user": + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv10 := &x.User + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "group": + if r.TryDecodeAsNil() { + x.Group = "" + } else { + yyv12 := &x.Group + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *QuobyteVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Registry = "" + } else { + yyv15 := &x.Registry + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Volume = "" + } else { + yyv17 := &x.Volume + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv21 := &x.User + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Group = "" + } else { + yyv23 := &x.Group + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *FlexVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.SecretRef != nil + yyq2[3] = x.ReadOnly != false + yyq2[4] = len(x.Options) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Driver)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("driver")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Driver)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Options == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncMapStringStringV(x.Options, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("options")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Options == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncMapStringStringV(x.Options, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *FlexVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *FlexVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "driver": + if r.TryDecodeAsNil() { + x.Driver = "" + } else { + yyv4 := &x.Driver + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv9 := &x.ReadOnly + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*bool)(yyv9)) = r.DecodeBool() + } + } + case "options": + if r.TryDecodeAsNil() { + x.Options = nil + } else { + yyv11 := &x.Options + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + z.F.DecMapStringStringX(yyv11, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *FlexVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Driver = "" + } else { + yyv14 := &x.Driver + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv16 := &x.FSType + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Options = nil + } else { + yyv21 := &x.Options + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + z.F.DecMapStringStringX(yyv21, false, d) + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *AWSElasticBlockStoreVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.Partition != 0 + yyq2[3] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Partition)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("partition")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.Partition)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *AWSElasticBlockStoreVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "volumeID": + if r.TryDecodeAsNil() { + x.VolumeID = "" + } else { + yyv4 := &x.VolumeID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "partition": + if r.TryDecodeAsNil() { + x.Partition = 0 + } else { + yyv8 := &x.Partition + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv10 := &x.ReadOnly + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeID = "" + } else { + yyv13 := &x.VolumeID + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv15 := &x.FSType + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Partition = 0 + } else { + yyv17 := &x.Partition + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *GitRepoVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Revision != "" + yyq2[2] = x.Directory != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Repository)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("repository")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Repository)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Revision)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("revision")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Revision)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Directory)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("directory")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Directory)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *GitRepoVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *GitRepoVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "repository": + if r.TryDecodeAsNil() { + x.Repository = "" + } else { + yyv4 := &x.Repository + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "revision": + if r.TryDecodeAsNil() { + x.Revision = "" + } else { + yyv6 := &x.Revision + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "directory": + if r.TryDecodeAsNil() { + x.Directory = "" + } else { + yyv8 := &x.Directory + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *GitRepoVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Repository = "" + } else { + yyv11 := &x.Repository + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Revision = "" + } else { + yyv13 := &x.Revision + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Directory = "" + } else { + yyv15 := &x.Directory + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SecretVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.SecretName != "" + yyq2[1] = len(x.Items) != 0 + yyq2[2] = x.DefaultMode != nil + yyq2[3] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Items == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy10 := *x.DefaultMode + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy12 := *x.DefaultMode + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy15 := *x.Optional + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(yy15)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy17 := *x.Optional + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeBool(bool(yy17)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecretVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecretVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "secretName": + if r.TryDecodeAsNil() { + x.SecretName = "" + } else { + yyv4 := &x.SecretName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) + } + } + case "defaultMode": + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecretVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SecretName = "" + } else { + yyv13 := &x.SecretName + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv15 := &x.Items + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv15), d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SecretProjection) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = len(x.Items) != 0 + yyq2[2] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Items == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy10 := *x.Optional + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy12 := *x.Optional + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecretProjection) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecretProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecretProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv11 := &x.Name + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Server)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("server")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Server)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "server": + if r.TryDecodeAsNil() { + x.Server = "" + } else { + yyv4 := &x.Server + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv6 := &x.Path + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Server = "" + } else { + yyv11 := &x.Server + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv13 := &x.Path + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ISCSIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[3] = x.ISCSIInterface != "" + yyq2[4] = x.FSType != "" + yyq2[5] = x.ReadOnly != false + yyq2[6] = len(x.Portals) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetPortal")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IQN)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("iqn")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IQN)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Lun)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lun")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.Lun)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("iscsiInterface")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.Portals == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + z.F.EncSliceStringV(x.Portals, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portals")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Portals == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + z.F.EncSliceStringV(x.Portals, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ISCSIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ISCSIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "targetPortal": + if r.TryDecodeAsNil() { + x.TargetPortal = "" + } else { + yyv4 := &x.TargetPortal + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "iqn": + if r.TryDecodeAsNil() { + x.IQN = "" + } else { + yyv6 := &x.IQN + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "lun": + if r.TryDecodeAsNil() { + x.Lun = 0 + } else { + yyv8 := &x.Lun + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "iscsiInterface": + if r.TryDecodeAsNil() { + x.ISCSIInterface = "" + } else { + yyv10 := &x.ISCSIInterface + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv12 := &x.FSType + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv14 := &x.ReadOnly + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "portals": + if r.TryDecodeAsNil() { + x.Portals = nil + } else { + yyv16 := &x.Portals + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + z.F.DecSliceStringX(yyv16, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetPortal = "" + } else { + yyv19 := &x.TargetPortal + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.IQN = "" + } else { + yyv21 := &x.IQN + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Lun = 0 + } else { + yyv23 := &x.Lun + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ISCSIInterface = "" + } else { + yyv25 := &x.ISCSIInterface + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv27 := &x.FSType + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv29 := &x.ReadOnly + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Portals = nil + } else { + yyv31 := &x.Portals + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + z.F.DecSliceStringX(yyv31, false, d) + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *FCVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.FSType != "" + yyq2[3] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.TargetWWNs == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.TargetWWNs, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetWWNs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetWWNs == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.TargetWWNs, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Lun == nil { + r.EncodeNil() + } else { + yy7 := *x.Lun + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lun")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Lun == nil { + r.EncodeNil() + } else { + yy9 := *x.Lun + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *FCVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *FCVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "targetWWNs": + if r.TryDecodeAsNil() { + x.TargetWWNs = nil + } else { + yyv4 := &x.TargetWWNs + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "lun": + if r.TryDecodeAsNil() { + if x.Lun != nil { + x.Lun = nil + } + } else { + if x.Lun == nil { + x.Lun = new(int32) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(x.Lun)) = int32(r.DecodeInt(32)) + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv8 := &x.FSType + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv10 := &x.ReadOnly + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *FCVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetWWNs = nil + } else { + yyv13 := &x.TargetWWNs + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecSliceStringX(yyv13, false, d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Lun != nil { + x.Lun = nil + } + } else { + if x.Lun == nil { + x.Lun = new(int32) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(x.Lun)) = int32(r.DecodeInt(32)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv17 := &x.FSType + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *AzureFileVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ShareName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("shareName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ShareName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *AzureFileVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *AzureFileVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "secretName": + if r.TryDecodeAsNil() { + x.SecretName = "" + } else { + yyv4 := &x.SecretName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "shareName": + if r.TryDecodeAsNil() { + x.ShareName = "" + } else { + yyv6 := &x.ShareName + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *AzureFileVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SecretName = "" + } else { + yyv11 := &x.SecretName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ShareName = "" + } else { + yyv13 := &x.ShareName + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *VsphereVirtualDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumePath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *VsphereVirtualDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "volumePath": + if r.TryDecodeAsNil() { + x.VolumePath = "" + } else { + yyv4 := &x.VolumePath + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumePath = "" + } else { + yyv9 := &x.VolumePath + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv11 := &x.FSType + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PhotonPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PdID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pdID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PdID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PhotonPersistentDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PhotonPersistentDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "pdID": + if r.TryDecodeAsNil() { + x.PdID = "" + } else { + yyv4 := &x.PdID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PhotonPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PdID = "" + } else { + yyv9 := &x.PdID + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv11 := &x.FSType + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x AzureDataDiskCachingMode) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *AzureDataDiskCachingMode) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *AzureDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.CachingMode != nil + yyq2[3] = x.FSType != nil + yyq2[4] = x.ReadOnly != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DiskName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("diskName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DiskName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DataDiskURI)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("diskURI")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DataDiskURI)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.CachingMode == nil { + r.EncodeNil() + } else { + yy10 := *x.CachingMode + yy10.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cachingMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CachingMode == nil { + r.EncodeNil() + } else { + yy12 := *x.CachingMode + yy12.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.FSType == nil { + r.EncodeNil() + } else { + yy15 := *x.FSType + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy15)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FSType == nil { + r.EncodeNil() + } else { + yy17 := *x.FSType + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy17)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.ReadOnly == nil { + r.EncodeNil() + } else { + yy20 := *x.ReadOnly + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeBool(bool(yy20)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ReadOnly == nil { + r.EncodeNil() + } else { + yy22 := *x.ReadOnly + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(yy22)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *AzureDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *AzureDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "diskName": + if r.TryDecodeAsNil() { + x.DiskName = "" + } else { + yyv4 := &x.DiskName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "diskURI": + if r.TryDecodeAsNil() { + x.DataDiskURI = "" + } else { + yyv6 := &x.DataDiskURI + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "cachingMode": + if r.TryDecodeAsNil() { + if x.CachingMode != nil { + x.CachingMode = nil + } + } else { + if x.CachingMode == nil { + x.CachingMode = new(AzureDataDiskCachingMode) + } + x.CachingMode.CodecDecodeSelf(d) + } + case "fsType": + if r.TryDecodeAsNil() { + if x.FSType != nil { + x.FSType = nil + } + } else { + if x.FSType == nil { + x.FSType = new(string) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(x.FSType)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + if x.ReadOnly != nil { + x.ReadOnly = nil + } + } else { + if x.ReadOnly == nil { + x.ReadOnly = new(bool) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(x.ReadOnly)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *AzureDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DiskName = "" + } else { + yyv14 := &x.DiskName + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DataDiskURI = "" + } else { + yyv16 := &x.DataDiskURI + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CachingMode != nil { + x.CachingMode = nil + } + } else { + if x.CachingMode == nil { + x.CachingMode = new(AzureDataDiskCachingMode) + } + x.CachingMode.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FSType != nil { + x.FSType = nil + } + } else { + if x.FSType == nil { + x.FSType = new(string) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(x.FSType)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ReadOnly != nil { + x.ReadOnly = nil + } + } else { + if x.ReadOnly == nil { + x.ReadOnly = new(bool) + } + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(x.ReadOnly)) = r.DecodeBool() + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PortworxVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PortworxVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PortworxVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "volumeID": + if r.TryDecodeAsNil() { + x.VolumeID = "" + } else { + yyv4 := &x.VolumeID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PortworxVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeID = "" + } else { + yyv11 := &x.VolumeID + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv13 := &x.FSType + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ScaleIOVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[3] = x.SSLEnabled != false + yyq2[4] = x.ProtectionDomain != "" + yyq2[5] = x.StoragePool != "" + yyq2[6] = x.StorageMode != "" + yyq2[7] = x.VolumeName != "" + yyq2[8] = x.FSType != "" + yyq2[9] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Gateway)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gateway")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Gateway)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.System)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("system")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.System)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.SSLEnabled)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sslEnabled")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.SSLEnabled)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ProtectionDomain)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("protectionDomain")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ProtectionDomain)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StoragePool)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storagePool")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StoragePool)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StorageMode)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storageMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StorageMode)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScaleIOVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScaleIOVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "gateway": + if r.TryDecodeAsNil() { + x.Gateway = "" + } else { + yyv4 := &x.Gateway + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "system": + if r.TryDecodeAsNil() { + x.System = "" + } else { + yyv6 := &x.System + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + case "sslEnabled": + if r.TryDecodeAsNil() { + x.SSLEnabled = false + } else { + yyv9 := &x.SSLEnabled + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*bool)(yyv9)) = r.DecodeBool() + } + } + case "protectionDomain": + if r.TryDecodeAsNil() { + x.ProtectionDomain = "" + } else { + yyv11 := &x.ProtectionDomain + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + case "storagePool": + if r.TryDecodeAsNil() { + x.StoragePool = "" + } else { + yyv13 := &x.StoragePool + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + case "storageMode": + if r.TryDecodeAsNil() { + x.StorageMode = "" + } else { + yyv15 := &x.StorageMode + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + case "volumeName": + if r.TryDecodeAsNil() { + x.VolumeName = "" + } else { + yyv17 := &x.VolumeName + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv19 := &x.FSType + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv21 := &x.ReadOnly + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(yyv21)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScaleIOVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj23 int + var yyb23 bool + var yyhl23 bool = l >= 0 + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Gateway = "" + } else { + yyv24 := &x.Gateway + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*string)(yyv24)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.System = "" + } else { + yyv26 := &x.System + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SSLEnabled = false + } else { + yyv29 := &x.SSLEnabled + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ProtectionDomain = "" + } else { + yyv31 := &x.ProtectionDomain + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StoragePool = "" + } else { + yyv33 := &x.StoragePool + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*string)(yyv33)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StorageMode = "" + } else { + yyv35 := &x.StorageMode + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeName = "" + } else { + yyv37 := &x.VolumeName + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv39 := &x.FSType + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv41 := &x.ReadOnly + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*bool)(yyv41)) = r.DecodeBool() + } + } + for { + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj23-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = len(x.Items) != 0 + yyq2[2] = x.DefaultMode != nil + yyq2[3] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Items == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy10 := *x.DefaultMode + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy12 := *x.DefaultMode + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy15 := *x.Optional + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(yy15)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy17 := *x.Optional + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeBool(bool(yy17)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMapVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMapVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) + } + } + case "defaultMode": + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMapVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv13 := &x.Name + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv15 := &x.Items + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv15), d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMapProjection) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = len(x.Items) != 0 + yyq2[2] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Items == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy10 := *x.Optional + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy12 := *x.Optional + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMapProjection) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMapProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMapProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv11 := &x.Name + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ProjectedVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.DefaultMode != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Sources == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceVolumeProjection(([]VolumeProjection)(x.Sources), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sources")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Sources == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceVolumeProjection(([]VolumeProjection)(x.Sources), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy7 := *x.DefaultMode + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy9 := *x.DefaultMode + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ProjectedVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ProjectedVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "sources": + if r.TryDecodeAsNil() { + x.Sources = nil + } else { + yyv4 := &x.Sources + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceVolumeProjection((*[]VolumeProjection)(yyv4), d) + } + } + case "defaultMode": + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ProjectedVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Sources = nil + } else { + yyv9 := &x.Sources + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceVolumeProjection((*[]VolumeProjection)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *VolumeProjection) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Secret != nil + yyq2[1] = x.DownwardAPI != nil + yyq2[2] = x.ConfigMap != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secret")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("configMap")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *VolumeProjection) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *VolumeProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "secret": + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretProjection) + } + x.Secret.CodecDecodeSelf(d) + } + case "downwardAPI": + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIProjection) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + case "configMap": + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapProjection) + } + x.ConfigMap.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *VolumeProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretProjection) + } + x.Secret.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIProjection) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapProjection) + } + x.ConfigMap.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *KeyToPath) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.Mode != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Mode == nil { + r.EncodeNil() + } else { + yy10 := *x.Mode + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("mode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Mode == nil { + r.EncodeNil() + } else { + yy12 := *x.Mode + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *KeyToPath) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *KeyToPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv6 := &x.Path + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "mode": + if r.TryDecodeAsNil() { + if x.Mode != nil { + x.Mode = nil + } + } else { + if x.Mode == nil { + x.Mode = new(int32) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *KeyToPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv11 := &x.Key + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv13 := &x.Path + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Mode != nil { + x.Mode = nil + } + } else { + if x.Mode == nil { + x.Mode = new(int32) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ContainerPort) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = x.HostPort != 0 + yyq2[3] = x.Protocol != "" + yyq2[4] = x.HostIP != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.HostPort)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPort")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.HostPort)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.ContainerPort)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerPort")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.ContainerPort)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + x.Protocol.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("protocol")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Protocol.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostIP")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerPort) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "hostPort": + if r.TryDecodeAsNil() { + x.HostPort = 0 + } else { + yyv6 := &x.HostPort + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "containerPort": + if r.TryDecodeAsNil() { + x.ContainerPort = 0 + } else { + yyv8 := &x.ContainerPort + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "protocol": + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv10 := &x.Protocol + yyv10.CodecDecodeSelf(d) + } + case "hostIP": + if r.TryDecodeAsNil() { + x.HostIP = "" + } else { + yyv11 := &x.HostIP + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv14 := &x.Name + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostPort = 0 + } else { + yyv16 := &x.HostPort + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*int32)(yyv16)) = int32(r.DecodeInt(32)) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerPort = 0 + } else { + yyv18 := &x.ContainerPort + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*int32)(yyv18)) = int32(r.DecodeInt(32)) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv20 := &x.Protocol + yyv20.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostIP = "" + } else { + yyv21 := &x.HostIP + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *VolumeMount) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.ReadOnly != false + yyq2[3] = x.SubPath != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("mountPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *VolumeMount) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *VolumeMount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv6 := &x.ReadOnly + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + case "mountPath": + if r.TryDecodeAsNil() { + x.MountPath = "" + } else { + yyv8 := &x.MountPath + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "subPath": + if r.TryDecodeAsNil() { + x.SubPath = "" + } else { + yyv10 := &x.SubPath + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *VolumeMount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv13 := &x.Name + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MountPath = "" + } else { + yyv17 := &x.MountPath + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SubPath = "" + } else { + yyv19 := &x.SubPath + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EnvVar) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Value != "" + yyq2[2] = x.ValueFrom != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ValueFrom == nil { + r.EncodeNil() + } else { + x.ValueFrom.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("valueFrom")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ValueFrom == nil { + r.EncodeNil() + } else { + x.ValueFrom.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EnvVar) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EnvVar) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv6 := &x.Value + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "valueFrom": + if r.TryDecodeAsNil() { + if x.ValueFrom != nil { + x.ValueFrom = nil + } + } else { + if x.ValueFrom == nil { + x.ValueFrom = new(EnvVarSource) + } + x.ValueFrom.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EnvVar) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv10 := &x.Name + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv12 := &x.Value + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ValueFrom != nil { + x.ValueFrom = nil + } + } else { + if x.ValueFrom == nil { + x.ValueFrom = new(EnvVarSource) + } + x.ValueFrom.CodecDecodeSelf(d) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.FieldRef != nil + yyq2[1] = x.ResourceFieldRef != nil + yyq2[2] = x.ConfigMapKeyRef != nil + yyq2[3] = x.SecretKeyRef != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.FieldRef == nil { + r.EncodeNil() + } else { + x.FieldRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FieldRef == nil { + r.EncodeNil() + } else { + x.FieldRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ConfigMapKeyRef == nil { + r.EncodeNil() + } else { + x.ConfigMapKeyRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("configMapKeyRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ConfigMapKeyRef == nil { + r.EncodeNil() + } else { + x.ConfigMapKeyRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.SecretKeyRef == nil { + r.EncodeNil() + } else { + x.SecretKeyRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretKeyRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretKeyRef == nil { + r.EncodeNil() + } else { + x.SecretKeyRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EnvVarSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EnvVarSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "fieldRef": + if r.TryDecodeAsNil() { + if x.FieldRef != nil { + x.FieldRef = nil + } + } else { + if x.FieldRef == nil { + x.FieldRef = new(ObjectFieldSelector) + } + x.FieldRef.CodecDecodeSelf(d) + } + case "resourceFieldRef": + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) + } + case "configMapKeyRef": + if r.TryDecodeAsNil() { + if x.ConfigMapKeyRef != nil { + x.ConfigMapKeyRef = nil + } + } else { + if x.ConfigMapKeyRef == nil { + x.ConfigMapKeyRef = new(ConfigMapKeySelector) + } + x.ConfigMapKeyRef.CodecDecodeSelf(d) + } + case "secretKeyRef": + if r.TryDecodeAsNil() { + if x.SecretKeyRef != nil { + x.SecretKeyRef = nil + } + } else { + if x.SecretKeyRef == nil { + x.SecretKeyRef = new(SecretKeySelector) + } + x.SecretKeyRef.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EnvVarSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FieldRef != nil { + x.FieldRef = nil + } + } else { + if x.FieldRef == nil { + x.FieldRef = new(ObjectFieldSelector) + } + x.FieldRef.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ConfigMapKeyRef != nil { + x.ConfigMapKeyRef = nil + } + } else { + if x.ConfigMapKeyRef == nil { + x.ConfigMapKeyRef = new(ConfigMapKeySelector) + } + x.ConfigMapKeyRef.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretKeyRef != nil { + x.SecretKeyRef = nil + } + } else { + if x.SecretKeyRef == nil { + x.SecretKeyRef = new(SecretKeySelector) + } + x.SecretKeyRef.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ObjectFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fieldPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv4 := &x.APIVersion + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fieldPath": + if r.TryDecodeAsNil() { + x.FieldPath = "" + } else { + yyv6 := &x.FieldPath + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv9 := &x.APIVersion + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FieldPath = "" + } else { + yyv11 := &x.FieldPath + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ContainerName != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.Divisor + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("divisor")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.Divisor + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "containerName": + if r.TryDecodeAsNil() { + x.ContainerName = "" + } else { + yyv4 := &x.ContainerName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "resource": + if r.TryDecodeAsNil() { + x.Resource = "" + } else { + yyv6 := &x.Resource + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "divisor": + if r.TryDecodeAsNil() { + x.Divisor = pkg3_resource.Quantity{} + } else { + yyv8 := &x.Divisor + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerName = "" + } else { + yyv11 := &x.ContainerName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resource = "" + } else { + yyv13 := &x.Resource + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Divisor = pkg3_resource.Quantity{} + } else { + yyv15 := &x.Divisor + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv15) + } else { + z.DecFallback(yyv15, false) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[2] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy10 := *x.Optional + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy12 := *x.Optional + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMapKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMapKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv11 := &x.Name + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv13 := &x.Key + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SecretKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[2] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy10 := *x.Optional + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy12 := *x.Optional + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecretKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecretKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecretKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv11 := &x.Name + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv13 := &x.Key + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EnvFromSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Prefix != "" + yyq2[1] = x.ConfigMapRef != nil + yyq2[2] = x.SecretRef != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Prefix)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("prefix")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Prefix)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.ConfigMapRef == nil { + r.EncodeNil() + } else { + x.ConfigMapRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("configMapRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ConfigMapRef == nil { + r.EncodeNil() + } else { + x.ConfigMapRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EnvFromSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EnvFromSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "configMapRef": + if r.TryDecodeAsNil() { + if x.ConfigMapRef != nil { + x.ConfigMapRef = nil + } + } else { + if x.ConfigMapRef == nil { + x.ConfigMapRef = new(ConfigMapEnvSource) + } + x.ConfigMapRef.CodecDecodeSelf(d) + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(SecretEnvSource) + } + x.SecretRef.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EnvFromSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv9 := &x.Prefix + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ConfigMapRef != nil { + x.ConfigMapRef = nil + } + } else { + if x.ConfigMapRef == nil { + x.ConfigMapRef = new(ConfigMapEnvSource) + } + x.ConfigMapRef.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(SecretEnvSource) + } + x.SecretRef.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMapEnvSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy7 := *x.Optional + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy9 := *x.Optional + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMapEnvSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMapEnvSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMapEnvSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv9 := &x.Name + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SecretEnvSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy7 := *x.Optional + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy9 := *x.Optional + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecretEnvSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecretEnvSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecretEnvSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv9 := &x.Name + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HTTPHeader) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HTTPHeader) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HTTPHeader) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv6 := &x.Value + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HTTPHeader) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv9 := &x.Name + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv11 := &x.Value + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HTTPGetAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Path != "" + yyq2[2] = x.Host != "" + yyq2[3] = x.Scheme != "" + yyq2[4] = len(x.HTTPHeaders) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.Port + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("port")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.Port + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("host")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + x.Scheme.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scheme")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Scheme.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.HTTPHeaders == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("httpHeaders")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.HTTPHeaders == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HTTPGetAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HTTPGetAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "port": + if r.TryDecodeAsNil() { + x.Port = pkg4_intstr.IntOrString{} + } else { + yyv6 := &x.Port + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "host": + if r.TryDecodeAsNil() { + x.Host = "" + } else { + yyv8 := &x.Host + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "scheme": + if r.TryDecodeAsNil() { + x.Scheme = "" + } else { + yyv10 := &x.Scheme + yyv10.CodecDecodeSelf(d) + } + case "httpHeaders": + if r.TryDecodeAsNil() { + x.HTTPHeaders = nil + } else { + yyv11 := &x.HTTPHeaders + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceHTTPHeader((*[]HTTPHeader)(yyv11), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HTTPGetAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv14 := &x.Path + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Port = pkg4_intstr.IntOrString{} + } else { + yyv16 := &x.Port + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else if !yym17 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv16) + } else { + z.DecFallback(yyv16, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Host = "" + } else { + yyv18 := &x.Host + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Scheme = "" + } else { + yyv20 := &x.Scheme + yyv20.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HTTPHeaders = nil + } else { + yyv21 := &x.HTTPHeaders + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + h.decSliceHTTPHeader((*[]HTTPHeader)(yyv21), d) + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x URIScheme) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *URIScheme) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *TCPSocketAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.Port + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(yy4) + } else { + z.EncFallback(yy4) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("port")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.Port + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(yy6) + } else { + z.EncFallback(yy6) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *TCPSocketAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *TCPSocketAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "port": + if r.TryDecodeAsNil() { + x.Port = pkg4_intstr.IntOrString{} + } else { + yyv4 := &x.Port + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) + } else { + z.DecFallback(yyv4, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *TCPSocketAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Port = pkg4_intstr.IntOrString{} + } else { + yyv7 := &x.Port + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ExecAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Command) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Command == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.Command, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("command")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Command == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.Command, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ExecAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ExecAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "command": + if r.TryDecodeAsNil() { + x.Command = nil + } else { + yyv4 := &x.Command + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ExecAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Command = nil + } else { + yyv7 := &x.Command + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + z.F.DecSliceStringX(yyv7, false, d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Handler.Exec != nil && x.Exec != nil + yyq2[1] = x.Handler.HTTPGet != nil && x.HTTPGet != nil + yyq2[2] = x.Handler.TCPSocket != nil && x.TCPSocket != nil + yyq2[3] = x.InitialDelaySeconds != 0 + yyq2[4] = x.TimeoutSeconds != 0 + yyq2[5] = x.PeriodSeconds != 0 + yyq2[6] = x.SuccessThreshold != 0 + yyq2[7] = x.FailureThreshold != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(8) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + var yyn3 bool + if x.Handler.Exec == nil { + yyn3 = true + goto LABEL3 + } + LABEL3: + if yyr2 || yy2arr2 { + if yyn3 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Exec == nil { + r.EncodeNil() + } else { + x.Exec.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("exec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn3 { + r.EncodeNil() + } else { + if x.Exec == nil { + r.EncodeNil() + } else { + x.Exec.CodecEncodeSelf(e) + } + } + } + } + var yyn6 bool + if x.Handler.HTTPGet == nil { + yyn6 = true + goto LABEL6 + } + LABEL6: + if yyr2 || yy2arr2 { + if yyn6 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.HTTPGet == nil { + r.EncodeNil() + } else { + x.HTTPGet.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("httpGet")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn6 { + r.EncodeNil() + } else { + if x.HTTPGet == nil { + r.EncodeNil() + } else { + x.HTTPGet.CodecEncodeSelf(e) + } + } + } + } + var yyn9 bool + if x.Handler.TCPSocket == nil { + yyn9 = true + goto LABEL9 + } + LABEL9: + if yyr2 || yy2arr2 { + if yyn9 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.TCPSocket == nil { + r.EncodeNil() + } else { + x.TCPSocket.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tcpSocket")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn9 { + r.EncodeNil() + } else { + if x.TCPSocket == nil { + r.EncodeNil() + } else { + x.TCPSocket.CodecEncodeSelf(e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.InitialDelaySeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("initialDelaySeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.InitialDelaySeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.TimeoutSeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("timeoutSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.TimeoutSeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.PeriodSeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("periodSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.PeriodSeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeInt(int64(x.SuccessThreshold)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("successThreshold")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeInt(int64(x.SuccessThreshold)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeInt(int64(x.FailureThreshold)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("failureThreshold")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeInt(int64(x.FailureThreshold)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Probe) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Probe) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "exec": + if x.Handler.Exec == nil { + x.Handler.Exec = new(ExecAction) + } + if r.TryDecodeAsNil() { + if x.Exec != nil { + x.Exec = nil + } + } else { + if x.Exec == nil { + x.Exec = new(ExecAction) + } + x.Exec.CodecDecodeSelf(d) + } + case "httpGet": + if x.Handler.HTTPGet == nil { + x.Handler.HTTPGet = new(HTTPGetAction) + } + if r.TryDecodeAsNil() { + if x.HTTPGet != nil { + x.HTTPGet = nil + } + } else { + if x.HTTPGet == nil { + x.HTTPGet = new(HTTPGetAction) + } + x.HTTPGet.CodecDecodeSelf(d) + } + case "tcpSocket": + if x.Handler.TCPSocket == nil { + x.Handler.TCPSocket = new(TCPSocketAction) + } + if r.TryDecodeAsNil() { + if x.TCPSocket != nil { + x.TCPSocket = nil + } + } else { + if x.TCPSocket == nil { + x.TCPSocket = new(TCPSocketAction) + } + x.TCPSocket.CodecDecodeSelf(d) + } + case "initialDelaySeconds": + if r.TryDecodeAsNil() { + x.InitialDelaySeconds = 0 + } else { + yyv7 := &x.InitialDelaySeconds + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + case "timeoutSeconds": + if r.TryDecodeAsNil() { + x.TimeoutSeconds = 0 + } else { + yyv9 := &x.TimeoutSeconds + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*int32)(yyv9)) = int32(r.DecodeInt(32)) + } + } + case "periodSeconds": + if r.TryDecodeAsNil() { + x.PeriodSeconds = 0 + } else { + yyv11 := &x.PeriodSeconds + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } + } + case "successThreshold": + if r.TryDecodeAsNil() { + x.SuccessThreshold = 0 + } else { + yyv13 := &x.SuccessThreshold + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*int32)(yyv13)) = int32(r.DecodeInt(32)) + } + } + case "failureThreshold": + if r.TryDecodeAsNil() { + x.FailureThreshold = 0 + } else { + yyv15 := &x.FailureThreshold + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(yyv15)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj17 int + var yyb17 bool + var yyhl17 bool = l >= 0 + if x.Handler.Exec == nil { + x.Handler.Exec = new(ExecAction) + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Exec != nil { + x.Exec = nil + } + } else { + if x.Exec == nil { + x.Exec = new(ExecAction) + } + x.Exec.CodecDecodeSelf(d) + } + if x.Handler.HTTPGet == nil { + x.Handler.HTTPGet = new(HTTPGetAction) + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HTTPGet != nil { + x.HTTPGet = nil + } + } else { + if x.HTTPGet == nil { + x.HTTPGet = new(HTTPGetAction) + } + x.HTTPGet.CodecDecodeSelf(d) + } + if x.Handler.TCPSocket == nil { + x.Handler.TCPSocket = new(TCPSocketAction) + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TCPSocket != nil { + x.TCPSocket = nil + } + } else { + if x.TCPSocket == nil { + x.TCPSocket = new(TCPSocketAction) + } + x.TCPSocket.CodecDecodeSelf(d) + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.InitialDelaySeconds = 0 + } else { + yyv21 := &x.InitialDelaySeconds + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TimeoutSeconds = 0 + } else { + yyv23 := &x.TimeoutSeconds + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PeriodSeconds = 0 + } else { + yyv25 := &x.PeriodSeconds + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SuccessThreshold = 0 + } else { + yyv27 := &x.SuccessThreshold + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(yyv27)) = int32(r.DecodeInt(32)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FailureThreshold = 0 + } else { + yyv29 := &x.FailureThreshold + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*int32)(yyv29)) = int32(r.DecodeInt(32)) + } + } + for { + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj17-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x PullPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PullPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x TerminationMessagePolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *TerminationMessagePolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x Capability) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *Capability) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *Capabilities) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Add) != 0 + yyq2[1] = len(x.Drop) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Add == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceCapability(([]Capability)(x.Add), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("add")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Add == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceCapability(([]Capability)(x.Add), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Drop == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceCapability(([]Capability)(x.Drop), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("drop")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Drop == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceCapability(([]Capability)(x.Drop), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Capabilities) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Capabilities) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "add": + if r.TryDecodeAsNil() { + x.Add = nil + } else { + yyv4 := &x.Add + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceCapability((*[]Capability)(yyv4), d) + } + } + case "drop": + if r.TryDecodeAsNil() { + x.Drop = nil + } else { + yyv6 := &x.Drop + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceCapability((*[]Capability)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Capabilities) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Add = nil + } else { + yyv9 := &x.Add + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceCapability((*[]Capability)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Drop = nil + } else { + yyv11 := &x.Drop + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceCapability((*[]Capability)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceRequirements) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Limits) != 0 + yyq2[1] = len(x.Requests) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Limits == nil { + r.EncodeNil() + } else { + x.Limits.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("limits")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Limits == nil { + r.EncodeNil() + } else { + x.Limits.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Requests == nil { + r.EncodeNil() + } else { + x.Requests.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requests")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Requests == nil { + r.EncodeNil() + } else { + x.Requests.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceRequirements) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceRequirements) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "limits": + if r.TryDecodeAsNil() { + x.Limits = nil + } else { + yyv4 := &x.Limits + yyv4.CodecDecodeSelf(d) + } + case "requests": + if r.TryDecodeAsNil() { + x.Requests = nil + } else { + yyv5 := &x.Requests + yyv5.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceRequirements) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Limits = nil + } else { + yyv7 := &x.Limits + yyv7.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Requests = nil + } else { + yyv8 := &x.Requests + yyv8.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [20]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Image != "" + yyq2[2] = len(x.Command) != 0 + yyq2[3] = len(x.Args) != 0 + yyq2[4] = x.WorkingDir != "" + yyq2[5] = len(x.Ports) != 0 + yyq2[6] = len(x.EnvFrom) != 0 + yyq2[7] = len(x.Env) != 0 + yyq2[8] = true + yyq2[9] = len(x.VolumeMounts) != 0 + yyq2[10] = x.LivenessProbe != nil + yyq2[11] = x.ReadinessProbe != nil + yyq2[12] = x.Lifecycle != nil + yyq2[13] = x.TerminationMessagePath != "" + yyq2[14] = x.TerminationMessagePolicy != "" + yyq2[15] = x.ImagePullPolicy != "" + yyq2[16] = x.SecurityContext != nil + yyq2[17] = x.Stdin != false + yyq2[18] = x.StdinOnce != false + yyq2[19] = x.TTY != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(20) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Image)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("image")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Image)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Command == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceStringV(x.Command, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("command")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Command == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceStringV(x.Command, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Args == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncSliceStringV(x.Args, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("args")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Args == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncSliceStringV(x.Args, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("workingDir")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.Ports == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceContainerPort(([]ContainerPort)(x.Ports), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ports")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ports == nil { + r.EncodeNil() + } else { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + h.encSliceContainerPort(([]ContainerPort)(x.Ports), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.EnvFrom == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + h.encSliceEnvFromSource(([]EnvFromSource)(x.EnvFrom), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("envFrom")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.EnvFrom == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + h.encSliceEnvFromSource(([]EnvFromSource)(x.EnvFrom), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.Env == nil { + r.EncodeNil() + } else { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + h.encSliceEnvVar(([]EnvVar)(x.Env), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("env")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Env == nil { + r.EncodeNil() + } else { + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + h.encSliceEnvVar(([]EnvVar)(x.Env), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + yy28 := &x.Resources + yy28.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resources")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy30 := &x.Resources + yy30.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.VolumeMounts == nil { + r.EncodeNil() + } else { + yym33 := z.EncBinary() + _ = yym33 + if false { + } else { + h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeMounts")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VolumeMounts == nil { + r.EncodeNil() + } else { + yym34 := z.EncBinary() + _ = yym34 + if false { + } else { + h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.LivenessProbe == nil { + r.EncodeNil() + } else { + x.LivenessProbe.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("livenessProbe")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LivenessProbe == nil { + r.EncodeNil() + } else { + x.LivenessProbe.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + if x.ReadinessProbe == nil { + r.EncodeNil() + } else { + x.ReadinessProbe.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readinessProbe")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ReadinessProbe == nil { + r.EncodeNil() + } else { + x.ReadinessProbe.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.Lifecycle == nil { + r.EncodeNil() + } else { + x.Lifecycle.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lifecycle")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Lifecycle == nil { + r.EncodeNil() + } else { + x.Lifecycle.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + yym45 := z.EncBinary() + _ = yym45 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("terminationMessagePath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym46 := z.EncBinary() + _ = yym46 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + x.TerminationMessagePolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("terminationMessagePolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.TerminationMessagePolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + x.ImagePullPolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("imagePullPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.ImagePullPolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[16] { + if x.SecurityContext == nil { + r.EncodeNil() + } else { + x.SecurityContext.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[16] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("securityContext")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecurityContext == nil { + r.EncodeNil() + } else { + x.SecurityContext.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + yym57 := z.EncBinary() + _ = yym57 + if false { + } else { + r.EncodeBool(bool(x.Stdin)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stdin")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym58 := z.EncBinary() + _ = yym58 + if false { + } else { + r.EncodeBool(bool(x.Stdin)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + yym60 := z.EncBinary() + _ = yym60 + if false { + } else { + r.EncodeBool(bool(x.StdinOnce)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stdinOnce")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym61 := z.EncBinary() + _ = yym61 + if false { + } else { + r.EncodeBool(bool(x.StdinOnce)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + yym63 := z.EncBinary() + _ = yym63 + if false { + } else { + r.EncodeBool(bool(x.TTY)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tty")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym64 := z.EncBinary() + _ = yym64 + if false { + } else { + r.EncodeBool(bool(x.TTY)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Container) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Container) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "image": + if r.TryDecodeAsNil() { + x.Image = "" + } else { + yyv6 := &x.Image + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "command": + if r.TryDecodeAsNil() { + x.Command = nil + } else { + yyv8 := &x.Command + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceStringX(yyv8, false, d) + } + } + case "args": + if r.TryDecodeAsNil() { + x.Args = nil + } else { + yyv10 := &x.Args + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + z.F.DecSliceStringX(yyv10, false, d) + } + } + case "workingDir": + if r.TryDecodeAsNil() { + x.WorkingDir = "" + } else { + yyv12 := &x.WorkingDir + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "ports": + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv14 := &x.Ports + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + h.decSliceContainerPort((*[]ContainerPort)(yyv14), d) + } + } + case "envFrom": + if r.TryDecodeAsNil() { + x.EnvFrom = nil + } else { + yyv16 := &x.EnvFrom + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + h.decSliceEnvFromSource((*[]EnvFromSource)(yyv16), d) + } + } + case "env": + if r.TryDecodeAsNil() { + x.Env = nil + } else { + yyv18 := &x.Env + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + h.decSliceEnvVar((*[]EnvVar)(yyv18), d) + } + } + case "resources": + if r.TryDecodeAsNil() { + x.Resources = ResourceRequirements{} + } else { + yyv20 := &x.Resources + yyv20.CodecDecodeSelf(d) + } + case "volumeMounts": + if r.TryDecodeAsNil() { + x.VolumeMounts = nil + } else { + yyv21 := &x.VolumeMounts + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + h.decSliceVolumeMount((*[]VolumeMount)(yyv21), d) + } + } + case "livenessProbe": + if r.TryDecodeAsNil() { + if x.LivenessProbe != nil { + x.LivenessProbe = nil + } + } else { + if x.LivenessProbe == nil { + x.LivenessProbe = new(Probe) + } + x.LivenessProbe.CodecDecodeSelf(d) + } + case "readinessProbe": + if r.TryDecodeAsNil() { + if x.ReadinessProbe != nil { + x.ReadinessProbe = nil + } + } else { + if x.ReadinessProbe == nil { + x.ReadinessProbe = new(Probe) + } + x.ReadinessProbe.CodecDecodeSelf(d) + } + case "lifecycle": + if r.TryDecodeAsNil() { + if x.Lifecycle != nil { + x.Lifecycle = nil + } + } else { + if x.Lifecycle == nil { + x.Lifecycle = new(Lifecycle) + } + x.Lifecycle.CodecDecodeSelf(d) + } + case "terminationMessagePath": + if r.TryDecodeAsNil() { + x.TerminationMessagePath = "" + } else { + yyv26 := &x.TerminationMessagePath + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } + } + case "terminationMessagePolicy": + if r.TryDecodeAsNil() { + x.TerminationMessagePolicy = "" + } else { + yyv28 := &x.TerminationMessagePolicy + yyv28.CodecDecodeSelf(d) + } + case "imagePullPolicy": + if r.TryDecodeAsNil() { + x.ImagePullPolicy = "" + } else { + yyv29 := &x.ImagePullPolicy + yyv29.CodecDecodeSelf(d) + } + case "securityContext": + if r.TryDecodeAsNil() { + if x.SecurityContext != nil { + x.SecurityContext = nil + } + } else { + if x.SecurityContext == nil { + x.SecurityContext = new(SecurityContext) + } + x.SecurityContext.CodecDecodeSelf(d) + } + case "stdin": + if r.TryDecodeAsNil() { + x.Stdin = false + } else { + yyv31 := &x.Stdin + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*bool)(yyv31)) = r.DecodeBool() + } + } + case "stdinOnce": + if r.TryDecodeAsNil() { + x.StdinOnce = false + } else { + yyv33 := &x.StdinOnce + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*bool)(yyv33)) = r.DecodeBool() + } + } + case "tty": + if r.TryDecodeAsNil() { + x.TTY = false + } else { + yyv35 := &x.TTY + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*bool)(yyv35)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj37 int + var yyb37 bool + var yyhl37 bool = l >= 0 + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv38 := &x.Name + yym39 := z.DecBinary() + _ = yym39 + if false { + } else { + *((*string)(yyv38)) = r.DecodeString() + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Image = "" + } else { + yyv40 := &x.Image + yym41 := z.DecBinary() + _ = yym41 + if false { + } else { + *((*string)(yyv40)) = r.DecodeString() + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Command = nil + } else { + yyv42 := &x.Command + yym43 := z.DecBinary() + _ = yym43 + if false { + } else { + z.F.DecSliceStringX(yyv42, false, d) + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Args = nil + } else { + yyv44 := &x.Args + yym45 := z.DecBinary() + _ = yym45 + if false { + } else { + z.F.DecSliceStringX(yyv44, false, d) + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.WorkingDir = "" + } else { + yyv46 := &x.WorkingDir + yym47 := z.DecBinary() + _ = yym47 + if false { + } else { + *((*string)(yyv46)) = r.DecodeString() + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv48 := &x.Ports + yym49 := z.DecBinary() + _ = yym49 + if false { + } else { + h.decSliceContainerPort((*[]ContainerPort)(yyv48), d) + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EnvFrom = nil + } else { + yyv50 := &x.EnvFrom + yym51 := z.DecBinary() + _ = yym51 + if false { + } else { + h.decSliceEnvFromSource((*[]EnvFromSource)(yyv50), d) + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Env = nil + } else { + yyv52 := &x.Env + yym53 := z.DecBinary() + _ = yym53 + if false { + } else { + h.decSliceEnvVar((*[]EnvVar)(yyv52), d) + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resources = ResourceRequirements{} + } else { + yyv54 := &x.Resources + yyv54.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeMounts = nil + } else { + yyv55 := &x.VolumeMounts + yym56 := z.DecBinary() + _ = yym56 + if false { + } else { + h.decSliceVolumeMount((*[]VolumeMount)(yyv55), d) + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LivenessProbe != nil { + x.LivenessProbe = nil + } + } else { + if x.LivenessProbe == nil { + x.LivenessProbe = new(Probe) + } + x.LivenessProbe.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ReadinessProbe != nil { + x.ReadinessProbe = nil + } + } else { + if x.ReadinessProbe == nil { + x.ReadinessProbe = new(Probe) + } + x.ReadinessProbe.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Lifecycle != nil { + x.Lifecycle = nil + } + } else { + if x.Lifecycle == nil { + x.Lifecycle = new(Lifecycle) + } + x.Lifecycle.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TerminationMessagePath = "" + } else { + yyv60 := &x.TerminationMessagePath + yym61 := z.DecBinary() + _ = yym61 + if false { + } else { + *((*string)(yyv60)) = r.DecodeString() + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TerminationMessagePolicy = "" + } else { + yyv62 := &x.TerminationMessagePolicy + yyv62.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ImagePullPolicy = "" + } else { + yyv63 := &x.ImagePullPolicy + yyv63.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecurityContext != nil { + x.SecurityContext = nil + } + } else { + if x.SecurityContext == nil { + x.SecurityContext = new(SecurityContext) + } + x.SecurityContext.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stdin = false + } else { + yyv65 := &x.Stdin + yym66 := z.DecBinary() + _ = yym66 + if false { + } else { + *((*bool)(yyv65)) = r.DecodeBool() + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StdinOnce = false + } else { + yyv67 := &x.StdinOnce + yym68 := z.DecBinary() + _ = yym68 + if false { + } else { + *((*bool)(yyv67)) = r.DecodeBool() + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TTY = false + } else { + yyv69 := &x.TTY + yym70 := z.DecBinary() + _ = yym70 + if false { + } else { + *((*bool)(yyv69)) = r.DecodeBool() + } + } + for { + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj37-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Handler) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Exec != nil + yyq2[1] = x.HTTPGet != nil + yyq2[2] = x.TCPSocket != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Exec == nil { + r.EncodeNil() + } else { + x.Exec.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("exec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Exec == nil { + r.EncodeNil() + } else { + x.Exec.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.HTTPGet == nil { + r.EncodeNil() + } else { + x.HTTPGet.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("httpGet")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.HTTPGet == nil { + r.EncodeNil() + } else { + x.HTTPGet.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.TCPSocket == nil { + r.EncodeNil() + } else { + x.TCPSocket.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tcpSocket")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TCPSocket == nil { + r.EncodeNil() + } else { + x.TCPSocket.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Handler) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Handler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "exec": + if r.TryDecodeAsNil() { + if x.Exec != nil { + x.Exec = nil + } + } else { + if x.Exec == nil { + x.Exec = new(ExecAction) + } + x.Exec.CodecDecodeSelf(d) + } + case "httpGet": + if r.TryDecodeAsNil() { + if x.HTTPGet != nil { + x.HTTPGet = nil + } + } else { + if x.HTTPGet == nil { + x.HTTPGet = new(HTTPGetAction) + } + x.HTTPGet.CodecDecodeSelf(d) + } + case "tcpSocket": + if r.TryDecodeAsNil() { + if x.TCPSocket != nil { + x.TCPSocket = nil + } + } else { + if x.TCPSocket == nil { + x.TCPSocket = new(TCPSocketAction) + } + x.TCPSocket.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Handler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Exec != nil { + x.Exec = nil + } + } else { + if x.Exec == nil { + x.Exec = new(ExecAction) + } + x.Exec.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HTTPGet != nil { + x.HTTPGet = nil + } + } else { + if x.HTTPGet == nil { + x.HTTPGet = new(HTTPGetAction) + } + x.HTTPGet.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TCPSocket != nil { + x.TCPSocket = nil + } + } else { + if x.TCPSocket == nil { + x.TCPSocket = new(TCPSocketAction) + } + x.TCPSocket.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Lifecycle) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.PostStart != nil + yyq2[1] = x.PreStop != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.PostStart == nil { + r.EncodeNil() + } else { + x.PostStart.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("postStart")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PostStart == nil { + r.EncodeNil() + } else { + x.PostStart.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreStop == nil { + r.EncodeNil() + } else { + x.PreStop.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preStop")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreStop == nil { + r.EncodeNil() + } else { + x.PreStop.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Lifecycle) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Lifecycle) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "postStart": + if r.TryDecodeAsNil() { + if x.PostStart != nil { + x.PostStart = nil + } + } else { + if x.PostStart == nil { + x.PostStart = new(Handler) + } + x.PostStart.CodecDecodeSelf(d) + } + case "preStop": + if r.TryDecodeAsNil() { + if x.PreStop != nil { + x.PreStop = nil + } + } else { + if x.PreStop == nil { + x.PreStop = new(Handler) + } + x.PreStop.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Lifecycle) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PostStart != nil { + x.PostStart = nil + } + } else { + if x.PostStart == nil { + x.PostStart = new(Handler) + } + x.PostStart.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PreStop != nil { + x.PreStop = nil + } + } else { + if x.PreStop == nil { + x.PreStop = new(Handler) + } + x.PreStop.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ConditionStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ConditionStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ContainerStateWaiting) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Reason != "" + yyq2[1] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerStateWaiting) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerStateWaiting) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv4 := &x.Reason + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv6 := &x.Message + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerStateWaiting) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv9 := &x.Reason + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv11 := &x.Message + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ContainerStateRunning) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.StartedAt + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else if yym5 { + z.EncBinaryMarshal(yy4) + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(yy4) + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("startedAt")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.StartedAt + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else if yym7 { + z.EncBinaryMarshal(yy6) + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(yy6) + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerStateRunning) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerStateRunning) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "startedAt": + if r.TryDecodeAsNil() { + x.StartedAt = pkg2_v1.Time{} + } else { + yyv4 := &x.StartedAt + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if yym5 { + z.DecBinaryUnmarshal(yyv4) + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) + } else { + z.DecFallback(yyv4, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerStateRunning) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StartedAt = pkg2_v1.Time{} + } else { + yyv7 := &x.StartedAt + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if yym8 { + z.DecBinaryUnmarshal(yyv7) + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ContainerStateTerminated) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Signal != 0 + yyq2[2] = x.Reason != "" + yyq2[3] = x.Message != "" + yyq2[4] = true + yyq2[5] = true + yyq2[6] = x.ContainerID != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.ExitCode)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("exitCode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.ExitCode)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.Signal)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("signal")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.Signal)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy16 := &x.StartedAt + yym17 := z.EncBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.EncExt(yy16) { + } else if yym17 { + z.EncBinaryMarshal(yy16) + } else if !yym17 && z.IsJSONHandle() { + z.EncJSONMarshal(yy16) + } else { + z.EncFallback(yy16) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("startedAt")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy18 := &x.StartedAt + yym19 := z.EncBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.EncExt(yy18) { + } else if yym19 { + z.EncBinaryMarshal(yy18) + } else if !yym19 && z.IsJSONHandle() { + z.EncJSONMarshal(yy18) + } else { + z.EncFallback(yy18) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yy21 := &x.FinishedAt + yym22 := z.EncBinary() + _ = yym22 + if false { + } else if z.HasExtensions() && z.EncExt(yy21) { + } else if yym22 { + z.EncBinaryMarshal(yy21) + } else if !yym22 && z.IsJSONHandle() { + z.EncJSONMarshal(yy21) + } else { + z.EncFallback(yy21) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("finishedAt")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy23 := &x.FinishedAt + yym24 := z.EncBinary() + _ = yym24 + if false { + } else if z.HasExtensions() && z.EncExt(yy23) { + } else if yym24 { + z.EncBinaryMarshal(yy23) + } else if !yym24 && z.IsJSONHandle() { + z.EncJSONMarshal(yy23) + } else { + z.EncFallback(yy23) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym27 := z.EncBinary() + _ = yym27 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerStateTerminated) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerStateTerminated) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "exitCode": + if r.TryDecodeAsNil() { + x.ExitCode = 0 + } else { + yyv4 := &x.ExitCode + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "signal": + if r.TryDecodeAsNil() { + x.Signal = 0 + } else { + yyv6 := &x.Signal + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv8 := &x.Reason + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv10 := &x.Message + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "startedAt": + if r.TryDecodeAsNil() { + x.StartedAt = pkg2_v1.Time{} + } else { + yyv12 := &x.StartedAt + yym13 := z.DecBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.DecExt(yyv12) { + } else if yym13 { + z.DecBinaryUnmarshal(yyv12) + } else if !yym13 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv12) + } else { + z.DecFallback(yyv12, false) + } + } + case "finishedAt": + if r.TryDecodeAsNil() { + x.FinishedAt = pkg2_v1.Time{} + } else { + yyv14 := &x.FinishedAt + yym15 := z.DecBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.DecExt(yyv14) { + } else if yym15 { + z.DecBinaryUnmarshal(yyv14) + } else if !yym15 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv14) + } else { + z.DecFallback(yyv14, false) + } + } + case "containerID": + if r.TryDecodeAsNil() { + x.ContainerID = "" + } else { + yyv16 := &x.ContainerID + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerStateTerminated) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ExitCode = 0 + } else { + yyv19 := &x.ExitCode + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int32)(yyv19)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Signal = 0 + } else { + yyv21 := &x.Signal + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv23 := &x.Reason + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv25 := &x.Message + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StartedAt = pkg2_v1.Time{} + } else { + yyv27 := &x.StartedAt + yym28 := z.DecBinary() + _ = yym28 + if false { + } else if z.HasExtensions() && z.DecExt(yyv27) { + } else if yym28 { + z.DecBinaryUnmarshal(yyv27) + } else if !yym28 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv27) + } else { + z.DecFallback(yyv27, false) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FinishedAt = pkg2_v1.Time{} + } else { + yyv29 := &x.FinishedAt + yym30 := z.DecBinary() + _ = yym30 + if false { + } else if z.HasExtensions() && z.DecExt(yyv29) { + } else if yym30 { + z.DecBinaryUnmarshal(yyv29) + } else if !yym30 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv29) + } else { + z.DecFallback(yyv29, false) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerID = "" + } else { + yyv31 := &x.ContainerID + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ContainerState) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Waiting != nil + yyq2[1] = x.Running != nil + yyq2[2] = x.Terminated != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Waiting == nil { + r.EncodeNil() + } else { + x.Waiting.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("waiting")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Waiting == nil { + r.EncodeNil() + } else { + x.Waiting.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Running == nil { + r.EncodeNil() + } else { + x.Running.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("running")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Running == nil { + r.EncodeNil() + } else { + x.Running.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Terminated == nil { + r.EncodeNil() + } else { + x.Terminated.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("terminated")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Terminated == nil { + r.EncodeNil() + } else { + x.Terminated.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerState) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerState) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "waiting": + if r.TryDecodeAsNil() { + if x.Waiting != nil { + x.Waiting = nil + } + } else { + if x.Waiting == nil { + x.Waiting = new(ContainerStateWaiting) + } + x.Waiting.CodecDecodeSelf(d) + } + case "running": + if r.TryDecodeAsNil() { + if x.Running != nil { + x.Running = nil + } + } else { + if x.Running == nil { + x.Running = new(ContainerStateRunning) + } + x.Running.CodecDecodeSelf(d) + } + case "terminated": + if r.TryDecodeAsNil() { + if x.Terminated != nil { + x.Terminated = nil + } + } else { + if x.Terminated == nil { + x.Terminated = new(ContainerStateTerminated) + } + x.Terminated.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerState) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Waiting != nil { + x.Waiting = nil + } + } else { + if x.Waiting == nil { + x.Waiting = new(ContainerStateWaiting) + } + x.Waiting.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Running != nil { + x.Running = nil + } + } else { + if x.Running == nil { + x.Running = new(ContainerStateRunning) + } + x.Running.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Terminated != nil { + x.Terminated = nil + } + } else { + if x.Terminated == nil { + x.Terminated = new(ContainerStateTerminated) + } + x.Terminated.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ContainerStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = true + yyq2[2] = true + yyq2[7] = x.ContainerID != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(8) + } else { + yynn2 = 5 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy7 := &x.State + yy7.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("state")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.State + yy9.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy12 := &x.LastTerminationState + yy12.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastState")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.LastTerminationState + yy14.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Ready)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ready")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeBool(bool(x.Ready)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.RestartCount)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("restartCount")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeInt(int64(x.RestartCount)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Image)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("image")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Image)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ImageID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("imageID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym27 := z.EncBinary() + _ = yym27 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ImageID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym30 := z.EncBinary() + _ = yym30 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "state": + if r.TryDecodeAsNil() { + x.State = ContainerState{} + } else { + yyv6 := &x.State + yyv6.CodecDecodeSelf(d) + } + case "lastState": + if r.TryDecodeAsNil() { + x.LastTerminationState = ContainerState{} + } else { + yyv7 := &x.LastTerminationState + yyv7.CodecDecodeSelf(d) + } + case "ready": + if r.TryDecodeAsNil() { + x.Ready = false + } else { + yyv8 := &x.Ready + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "restartCount": + if r.TryDecodeAsNil() { + x.RestartCount = 0 + } else { + yyv10 := &x.RestartCount + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "image": + if r.TryDecodeAsNil() { + x.Image = "" + } else { + yyv12 := &x.Image + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "imageID": + if r.TryDecodeAsNil() { + x.ImageID = "" + } else { + yyv14 := &x.ImageID + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "containerID": + if r.TryDecodeAsNil() { + x.ContainerID = "" + } else { + yyv16 := &x.ContainerID + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv19 := &x.Name + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.State = ContainerState{} + } else { + yyv21 := &x.State + yyv21.CodecDecodeSelf(d) + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTerminationState = ContainerState{} + } else { + yyv22 := &x.LastTerminationState + yyv22.CodecDecodeSelf(d) + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ready = false + } else { + yyv23 := &x.Ready + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RestartCount = 0 + } else { + yyv25 := &x.RestartCount + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Image = "" + } else { + yyv27 := &x.Image + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ImageID = "" + } else { + yyv29 := &x.ImageID + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerID = "" + } else { + yyv31 := &x.ContainerID + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x PodPhase) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PodPhase) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x PodConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PodConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *PodCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Status.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Status.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastProbeTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastProbeTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) + } else { + z.EncFallback(yy17) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "lastProbeTime": + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg2_v1.Time{} + } else { + yyv6 := &x.LastProbeTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_v1.Time{} + } else { + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv10 := &x.Reason + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv12 := &x.Message + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv15 := &x.Type + yyv15.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv16 := &x.Status + yyv16.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg2_v1.Time{} + } else { + yyv17 := &x.LastProbeTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_v1.Time{} + } else { + yyv19 := &x.LastTransitionTime + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if yym20 { + z.DecBinaryUnmarshal(yyv19) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv21 := &x.Reason + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv23 := &x.Message + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x RestartPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *RestartPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x DNSPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DNSPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *NodeSelector) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.NodeSelectorTerms == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeSelectorTerms")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NodeSelectorTerms == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeSelector) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "nodeSelectorTerms": + if r.TryDecodeAsNil() { + x.NodeSelectorTerms = nil + } else { + yyv4 := &x.NodeSelectorTerms + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NodeSelectorTerms = nil + } else { + yyv7 := &x.NodeSelectorTerms + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeSelectorTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.MatchExpressions == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("matchExpressions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MatchExpressions == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeSelectorTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeSelectorTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "matchExpressions": + if r.TryDecodeAsNil() { + x.MatchExpressions = nil + } else { + yyv4 := &x.MatchExpressions + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeSelectorTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MatchExpressions = nil + } else { + yyv7 := &x.MatchExpressions + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = len(x.Values) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Operator.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("operator")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Operator.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Values == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceStringV(x.Values, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("values")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Values == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceStringV(x.Values, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeSelectorRequirement) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeSelectorRequirement) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "operator": + if r.TryDecodeAsNil() { + x.Operator = "" + } else { + yyv6 := &x.Operator + yyv6.CodecDecodeSelf(d) + } + case "values": + if r.TryDecodeAsNil() { + x.Values = nil + } else { + yyv7 := &x.Values + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + z.F.DecSliceStringX(yyv7, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv10 := &x.Key + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Operator = "" + } else { + yyv12 := &x.Operator + yyv12.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Values = nil + } else { + yyv13 := &x.Values + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecSliceStringX(yyv13, false, d) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x NodeSelectorOperator) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *NodeSelectorOperator) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.NodeAffinity != nil + yyq2[1] = x.PodAffinity != nil + yyq2[2] = x.PodAntiAffinity != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.NodeAffinity == nil { + r.EncodeNil() + } else { + x.NodeAffinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NodeAffinity == nil { + r.EncodeNil() + } else { + x.NodeAffinity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PodAffinity == nil { + r.EncodeNil() + } else { + x.PodAffinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodAffinity == nil { + r.EncodeNil() + } else { + x.PodAffinity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.PodAntiAffinity == nil { + r.EncodeNil() + } else { + x.PodAntiAffinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAntiAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodAntiAffinity == nil { + r.EncodeNil() + } else { + x.PodAntiAffinity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Affinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Affinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "nodeAffinity": + if r.TryDecodeAsNil() { + if x.NodeAffinity != nil { + x.NodeAffinity = nil + } + } else { + if x.NodeAffinity == nil { + x.NodeAffinity = new(NodeAffinity) + } + x.NodeAffinity.CodecDecodeSelf(d) + } + case "podAffinity": + if r.TryDecodeAsNil() { + if x.PodAffinity != nil { + x.PodAffinity = nil + } + } else { + if x.PodAffinity == nil { + x.PodAffinity = new(PodAffinity) + } + x.PodAffinity.CodecDecodeSelf(d) + } + case "podAntiAffinity": + if r.TryDecodeAsNil() { + if x.PodAntiAffinity != nil { + x.PodAntiAffinity = nil + } + } else { + if x.PodAntiAffinity == nil { + x.PodAntiAffinity = new(PodAntiAffinity) + } + x.PodAntiAffinity.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Affinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NodeAffinity != nil { + x.NodeAffinity = nil + } + } else { + if x.NodeAffinity == nil { + x.NodeAffinity = new(NodeAffinity) + } + x.NodeAffinity.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodAffinity != nil { + x.PodAffinity = nil + } + } else { + if x.PodAffinity == nil { + x.PodAffinity = new(PodAffinity) + } + x.PodAffinity.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodAntiAffinity != nil { + x.PodAntiAffinity = nil + } + } else { + if x.PodAntiAffinity == nil { + x.PodAntiAffinity = new(PodAntiAffinity) + } + x.PodAntiAffinity.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "requiredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) + } + } + case "preferredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAntiAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAntiAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAntiAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "requiredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) + } + } + case "preferredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAntiAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *WeightedPodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("weight")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.PodAffinityTerm + yy7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAffinityTerm")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.PodAffinityTerm + yy9.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *WeightedPodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *WeightedPodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "weight": + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + yyv4 := &x.Weight + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "podAffinityTerm": + if r.TryDecodeAsNil() { + x.PodAffinityTerm = PodAffinityTerm{} + } else { + yyv6 := &x.PodAffinityTerm + yyv6.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *WeightedPodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + yyv8 := &x.Weight + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodAffinityTerm = PodAffinityTerm{} + } else { + yyv10 := &x.PodAffinityTerm + yyv10.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.LabelSelector != nil + yyq2[1] = len(x.Namespaces) != 0 + yyq2[2] = x.TopologyKey != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.LabelSelector == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { + } else { + z.EncFallback(x.LabelSelector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("labelSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LabelSelector == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { + } else { + z.EncFallback(x.LabelSelector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Namespaces == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncSliceStringV(x.Namespaces, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespaces")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Namespaces == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncSliceStringV(x.Namespaces, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("topologyKey")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "labelSelector": + if r.TryDecodeAsNil() { + if x.LabelSelector != nil { + x.LabelSelector = nil + } + } else { + if x.LabelSelector == nil { + x.LabelSelector = new(pkg2_v1.LabelSelector) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { + } else { + z.DecFallback(x.LabelSelector, false) + } + } + case "namespaces": + if r.TryDecodeAsNil() { + x.Namespaces = nil + } else { + yyv6 := &x.Namespaces + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + case "topologyKey": + if r.TryDecodeAsNil() { + x.TopologyKey = "" + } else { + yyv8 := &x.TopologyKey + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LabelSelector != nil { + x.LabelSelector = nil + } + } else { + if x.LabelSelector == nil { + x.LabelSelector = new(pkg2_v1.LabelSelector) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { + } else { + z.DecFallback(x.LabelSelector, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespaces = nil + } else { + yyv13 := &x.Namespaces + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecSliceStringX(yyv13, false, d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TopologyKey = "" + } else { + yyv15 := &x.TopologyKey + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.RequiredDuringSchedulingIgnoredDuringExecution != nil + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "requiredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + } else { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) + } + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) + } + case "preferredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv5 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv5), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + } else { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) + } + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv9 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv9), d) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("weight")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.Preference + yy7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preference")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.Preference + yy9.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PreferredSchedulingTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PreferredSchedulingTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "weight": + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + yyv4 := &x.Weight + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "preference": + if r.TryDecodeAsNil() { + x.Preference = NodeSelectorTerm{} + } else { + yyv6 := &x.Preference + yyv6.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PreferredSchedulingTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + yyv8 := &x.Weight + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Preference = NodeSelectorTerm{} + } else { + yyv10 := &x.Preference + yyv10.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Taint) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Value != "" + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Effect.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("effect")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Effect.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy13 := &x.TimeAdded + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(yy13) { + } else if yym14 { + z.EncBinaryMarshal(yy13) + } else if !yym14 && z.IsJSONHandle() { + z.EncJSONMarshal(yy13) + } else { + z.EncFallback(yy13) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("timeAdded")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy15 := &x.TimeAdded + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Taint) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Taint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv6 := &x.Value + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "effect": + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + yyv8 := &x.Effect + yyv8.CodecDecodeSelf(d) + } + case "timeAdded": + if r.TryDecodeAsNil() { + x.TimeAdded = pkg2_v1.Time{} + } else { + yyv9 := &x.TimeAdded + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if yym10 { + z.DecBinaryUnmarshal(yyv9) + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) + } else { + z.DecFallback(yyv9, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Taint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv12 := &x.Key + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv14 := &x.Value + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + yyv16 := &x.Effect + yyv16.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TimeAdded = pkg2_v1.Time{} + } else { + yyv17 := &x.TimeAdded + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x TaintEffect) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *TaintEffect) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *Toleration) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Key != "" + yyq2[1] = x.Operator != "" + yyq2[2] = x.Value != "" + yyq2[3] = x.Effect != "" + yyq2[4] = x.TolerationSeconds != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + x.Operator.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("operator")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Operator.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + x.Effect.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("effect")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Effect.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.TolerationSeconds == nil { + r.EncodeNil() + } else { + yy16 := *x.TolerationSeconds + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(yy16)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tolerationSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TolerationSeconds == nil { + r.EncodeNil() + } else { + yy18 := *x.TolerationSeconds + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(yy18)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Toleration) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Toleration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "operator": + if r.TryDecodeAsNil() { + x.Operator = "" + } else { + yyv6 := &x.Operator + yyv6.CodecDecodeSelf(d) + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv7 := &x.Value + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + case "effect": + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + yyv9 := &x.Effect + yyv9.CodecDecodeSelf(d) + } + case "tolerationSeconds": + if r.TryDecodeAsNil() { + if x.TolerationSeconds != nil { + x.TolerationSeconds = nil + } + } else { + if x.TolerationSeconds == nil { + x.TolerationSeconds = new(int64) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int64)(x.TolerationSeconds)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Toleration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv13 := &x.Key + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Operator = "" + } else { + yyv15 := &x.Operator + yyv15.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv16 := &x.Value + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + yyv18 := &x.Effect + yyv18.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TolerationSeconds != nil { + x.TolerationSeconds = nil + } + } else { + if x.TolerationSeconds == nil { + x.TolerationSeconds = new(int64) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(x.TolerationSeconds)) = int64(r.DecodeInt(64)) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x TolerationOperator) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *TolerationOperator) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [22]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Volumes) != 0 + yyq2[1] = len(x.InitContainers) != 0 + yyq2[3] = x.RestartPolicy != "" + yyq2[4] = x.TerminationGracePeriodSeconds != nil + yyq2[5] = x.ActiveDeadlineSeconds != nil + yyq2[6] = x.DNSPolicy != "" + yyq2[7] = len(x.NodeSelector) != 0 + yyq2[8] = x.ServiceAccountName != "" + yyq2[9] = x.DeprecatedServiceAccount != "" + yyq2[10] = x.AutomountServiceAccountToken != nil + yyq2[11] = x.NodeName != "" + yyq2[12] = x.HostNetwork != false + yyq2[13] = x.HostPID != false + yyq2[14] = x.HostIPC != false + yyq2[15] = x.SecurityContext != nil + yyq2[16] = len(x.ImagePullSecrets) != 0 + yyq2[17] = x.Hostname != "" + yyq2[18] = x.Subdomain != "" + yyq2[19] = x.Affinity != nil + yyq2[20] = x.SchedulerName != "" + yyq2[21] = len(x.Tolerations) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(22) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Volumes == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceVolume(([]Volume)(x.Volumes), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Volumes == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceVolume(([]Volume)(x.Volumes), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.InitContainers == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceContainer(([]Container)(x.InitContainers), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("initContainers")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.InitContainers == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceContainer(([]Container)(x.InitContainers), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Containers == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceContainer(([]Container)(x.Containers), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containers")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Containers == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encSliceContainer(([]Container)(x.Containers), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + x.RestartPolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("restartPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.RestartPolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.TerminationGracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy16 := *x.TerminationGracePeriodSeconds + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(yy16)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("terminationGracePeriodSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TerminationGracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy18 := *x.TerminationGracePeriodSeconds + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(yy18)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.ActiveDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy21 := *x.ActiveDeadlineSeconds + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeInt(int64(yy21)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ActiveDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy23 := *x.ActiveDeadlineSeconds + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeInt(int64(yy23)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + x.DNSPolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("dnsPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.DNSPolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.NodeSelector == nil { + r.EncodeNil() + } else { + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + z.F.EncMapStringStringV(x.NodeSelector, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NodeSelector == nil { + r.EncodeNil() + } else { + yym30 := z.EncBinary() + _ = yym30 + if false { + } else { + z.F.EncMapStringStringV(x.NodeSelector, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serviceAccountName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym33 := z.EncBinary() + _ = yym33 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + yym35 := z.EncBinary() + _ = yym35 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DeprecatedServiceAccount)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serviceAccount")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym36 := z.EncBinary() + _ = yym36 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DeprecatedServiceAccount)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.AutomountServiceAccountToken == nil { + r.EncodeNil() + } else { + yy38 := *x.AutomountServiceAccountToken + yym39 := z.EncBinary() + _ = yym39 + if false { + } else { + r.EncodeBool(bool(yy38)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("automountServiceAccountToken")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AutomountServiceAccountToken == nil { + r.EncodeNil() + } else { + yy40 := *x.AutomountServiceAccountToken + yym41 := z.EncBinary() + _ = yym41 + if false { + } else { + r.EncodeBool(bool(yy40)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + yym43 := z.EncBinary() + _ = yym43 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.NodeName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym44 := z.EncBinary() + _ = yym44 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.NodeName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + yym46 := z.EncBinary() + _ = yym46 + if false { + } else { + r.EncodeBool(bool(x.HostNetwork)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym47 := z.EncBinary() + _ = yym47 + if false { + } else { + r.EncodeBool(bool(x.HostNetwork)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + yym49 := z.EncBinary() + _ = yym49 + if false { + } else { + r.EncodeBool(bool(x.HostPID)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym50 := z.EncBinary() + _ = yym50 + if false { + } else { + r.EncodeBool(bool(x.HostPID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + yym52 := z.EncBinary() + _ = yym52 + if false { + } else { + r.EncodeBool(bool(x.HostIPC)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym53 := z.EncBinary() + _ = yym53 + if false { + } else { + r.EncodeBool(bool(x.HostIPC)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + if x.SecurityContext == nil { + r.EncodeNil() + } else { + x.SecurityContext.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("securityContext")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecurityContext == nil { + r.EncodeNil() + } else { + x.SecurityContext.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[16] { + if x.ImagePullSecrets == nil { + r.EncodeNil() + } else { + yym58 := z.EncBinary() + _ = yym58 + if false { + } else { + h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[16] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ImagePullSecrets == nil { + r.EncodeNil() + } else { + yym59 := z.EncBinary() + _ = yym59 + if false { + } else { + h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + yym61 := z.EncBinary() + _ = yym61 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostname")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym62 := z.EncBinary() + _ = yym62 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + yym64 := z.EncBinary() + _ = yym64 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subdomain")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym65 := z.EncBinary() + _ = yym65 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + if x.Affinity == nil { + r.EncodeNil() + } else { + x.Affinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("affinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Affinity == nil { + r.EncodeNil() + } else { + x.Affinity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[20] { + yym70 := z.EncBinary() + _ = yym70 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[20] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("schedulerName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym71 := z.EncBinary() + _ = yym71 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[21] { + if x.Tolerations == nil { + r.EncodeNil() + } else { + yym73 := z.EncBinary() + _ = yym73 + if false { + } else { + h.encSliceToleration(([]Toleration)(x.Tolerations), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[21] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tolerations")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Tolerations == nil { + r.EncodeNil() + } else { + yym74 := z.EncBinary() + _ = yym74 + if false { + } else { + h.encSliceToleration(([]Toleration)(x.Tolerations), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "volumes": + if r.TryDecodeAsNil() { + x.Volumes = nil + } else { + yyv4 := &x.Volumes + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceVolume((*[]Volume)(yyv4), d) + } + } + case "initContainers": + if r.TryDecodeAsNil() { + x.InitContainers = nil + } else { + yyv6 := &x.InitContainers + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceContainer((*[]Container)(yyv6), d) + } + } + case "containers": + if r.TryDecodeAsNil() { + x.Containers = nil + } else { + yyv8 := &x.Containers + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + h.decSliceContainer((*[]Container)(yyv8), d) + } + } + case "restartPolicy": + if r.TryDecodeAsNil() { + x.RestartPolicy = "" + } else { + yyv10 := &x.RestartPolicy + yyv10.CodecDecodeSelf(d) + } + case "terminationGracePeriodSeconds": + if r.TryDecodeAsNil() { + if x.TerminationGracePeriodSeconds != nil { + x.TerminationGracePeriodSeconds = nil + } + } else { + if x.TerminationGracePeriodSeconds == nil { + x.TerminationGracePeriodSeconds = new(int64) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + case "activeDeadlineSeconds": + if r.TryDecodeAsNil() { + if x.ActiveDeadlineSeconds != nil { + x.ActiveDeadlineSeconds = nil + } + } else { + if x.ActiveDeadlineSeconds == nil { + x.ActiveDeadlineSeconds = new(int64) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + case "dnsPolicy": + if r.TryDecodeAsNil() { + x.DNSPolicy = "" + } else { + yyv15 := &x.DNSPolicy + yyv15.CodecDecodeSelf(d) + } + case "nodeSelector": + if r.TryDecodeAsNil() { + x.NodeSelector = nil + } else { + yyv16 := &x.NodeSelector + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + z.F.DecMapStringStringX(yyv16, false, d) + } + } + case "serviceAccountName": + if r.TryDecodeAsNil() { + x.ServiceAccountName = "" + } else { + yyv18 := &x.ServiceAccountName + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + case "serviceAccount": + if r.TryDecodeAsNil() { + x.DeprecatedServiceAccount = "" + } else { + yyv20 := &x.DeprecatedServiceAccount + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } + } + case "automountServiceAccountToken": + if r.TryDecodeAsNil() { + if x.AutomountServiceAccountToken != nil { + x.AutomountServiceAccountToken = nil + } + } else { + if x.AutomountServiceAccountToken == nil { + x.AutomountServiceAccountToken = new(bool) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() + } + } + case "nodeName": + if r.TryDecodeAsNil() { + x.NodeName = "" + } else { + yyv24 := &x.NodeName + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*string)(yyv24)) = r.DecodeString() + } + } + case "hostNetwork": + if r.TryDecodeAsNil() { + x.HostNetwork = false + } else { + yyv26 := &x.HostNetwork + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*bool)(yyv26)) = r.DecodeBool() + } + } + case "hostPID": + if r.TryDecodeAsNil() { + x.HostPID = false + } else { + yyv28 := &x.HostPID + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*bool)(yyv28)) = r.DecodeBool() + } + } + case "hostIPC": + if r.TryDecodeAsNil() { + x.HostIPC = false + } else { + yyv30 := &x.HostIPC + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*bool)(yyv30)) = r.DecodeBool() + } + } + case "securityContext": + if r.TryDecodeAsNil() { + if x.SecurityContext != nil { + x.SecurityContext = nil + } + } else { + if x.SecurityContext == nil { + x.SecurityContext = new(PodSecurityContext) + } + x.SecurityContext.CodecDecodeSelf(d) + } + case "imagePullSecrets": + if r.TryDecodeAsNil() { + x.ImagePullSecrets = nil + } else { + yyv33 := &x.ImagePullSecrets + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv33), d) + } + } + case "hostname": + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv35 := &x.Hostname + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } + } + case "subdomain": + if r.TryDecodeAsNil() { + x.Subdomain = "" + } else { + yyv37 := &x.Subdomain + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } + } + case "affinity": + if r.TryDecodeAsNil() { + if x.Affinity != nil { + x.Affinity = nil + } + } else { + if x.Affinity == nil { + x.Affinity = new(Affinity) + } + x.Affinity.CodecDecodeSelf(d) + } + case "schedulerName": + if r.TryDecodeAsNil() { + x.SchedulerName = "" + } else { + yyv40 := &x.SchedulerName + yym41 := z.DecBinary() + _ = yym41 + if false { + } else { + *((*string)(yyv40)) = r.DecodeString() + } + } + case "tolerations": + if r.TryDecodeAsNil() { + x.Tolerations = nil + } else { + yyv42 := &x.Tolerations + yym43 := z.DecBinary() + _ = yym43 + if false { + } else { + h.decSliceToleration((*[]Toleration)(yyv42), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj44 int + var yyb44 bool + var yyhl44 bool = l >= 0 + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Volumes = nil + } else { + yyv45 := &x.Volumes + yym46 := z.DecBinary() + _ = yym46 + if false { + } else { + h.decSliceVolume((*[]Volume)(yyv45), d) + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.InitContainers = nil + } else { + yyv47 := &x.InitContainers + yym48 := z.DecBinary() + _ = yym48 + if false { + } else { + h.decSliceContainer((*[]Container)(yyv47), d) + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Containers = nil + } else { + yyv49 := &x.Containers + yym50 := z.DecBinary() + _ = yym50 + if false { + } else { + h.decSliceContainer((*[]Container)(yyv49), d) + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RestartPolicy = "" + } else { + yyv51 := &x.RestartPolicy + yyv51.CodecDecodeSelf(d) + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TerminationGracePeriodSeconds != nil { + x.TerminationGracePeriodSeconds = nil + } + } else { + if x.TerminationGracePeriodSeconds == nil { + x.TerminationGracePeriodSeconds = new(int64) + } + yym53 := z.DecBinary() + _ = yym53 + if false { + } else { + *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ActiveDeadlineSeconds != nil { + x.ActiveDeadlineSeconds = nil + } + } else { + if x.ActiveDeadlineSeconds == nil { + x.ActiveDeadlineSeconds = new(int64) + } + yym55 := z.DecBinary() + _ = yym55 + if false { + } else { + *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DNSPolicy = "" + } else { + yyv56 := &x.DNSPolicy + yyv56.CodecDecodeSelf(d) + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NodeSelector = nil + } else { + yyv57 := &x.NodeSelector + yym58 := z.DecBinary() + _ = yym58 + if false { + } else { + z.F.DecMapStringStringX(yyv57, false, d) + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ServiceAccountName = "" + } else { + yyv59 := &x.ServiceAccountName + yym60 := z.DecBinary() + _ = yym60 + if false { + } else { + *((*string)(yyv59)) = r.DecodeString() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DeprecatedServiceAccount = "" + } else { + yyv61 := &x.DeprecatedServiceAccount + yym62 := z.DecBinary() + _ = yym62 + if false { + } else { + *((*string)(yyv61)) = r.DecodeString() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AutomountServiceAccountToken != nil { + x.AutomountServiceAccountToken = nil + } + } else { + if x.AutomountServiceAccountToken == nil { + x.AutomountServiceAccountToken = new(bool) + } + yym64 := z.DecBinary() + _ = yym64 + if false { + } else { + *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NodeName = "" + } else { + yyv65 := &x.NodeName + yym66 := z.DecBinary() + _ = yym66 + if false { + } else { + *((*string)(yyv65)) = r.DecodeString() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostNetwork = false + } else { + yyv67 := &x.HostNetwork + yym68 := z.DecBinary() + _ = yym68 + if false { + } else { + *((*bool)(yyv67)) = r.DecodeBool() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostPID = false + } else { + yyv69 := &x.HostPID + yym70 := z.DecBinary() + _ = yym70 + if false { + } else { + *((*bool)(yyv69)) = r.DecodeBool() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostIPC = false + } else { + yyv71 := &x.HostIPC + yym72 := z.DecBinary() + _ = yym72 + if false { + } else { + *((*bool)(yyv71)) = r.DecodeBool() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecurityContext != nil { + x.SecurityContext = nil + } + } else { + if x.SecurityContext == nil { + x.SecurityContext = new(PodSecurityContext) + } + x.SecurityContext.CodecDecodeSelf(d) + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ImagePullSecrets = nil + } else { + yyv74 := &x.ImagePullSecrets + yym75 := z.DecBinary() + _ = yym75 + if false { + } else { + h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv74), d) + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv76 := &x.Hostname + yym77 := z.DecBinary() + _ = yym77 + if false { + } else { + *((*string)(yyv76)) = r.DecodeString() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subdomain = "" + } else { + yyv78 := &x.Subdomain + yym79 := z.DecBinary() + _ = yym79 + if false { + } else { + *((*string)(yyv78)) = r.DecodeString() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Affinity != nil { + x.Affinity = nil + } + } else { + if x.Affinity == nil { + x.Affinity = new(Affinity) + } + x.Affinity.CodecDecodeSelf(d) + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SchedulerName = "" + } else { + yyv81 := &x.SchedulerName + yym82 := z.DecBinary() + _ = yym82 + if false { + } else { + *((*string)(yyv81)) = r.DecodeString() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Tolerations = nil + } else { + yyv83 := &x.Tolerations + yym84 := z.DecBinary() + _ = yym84 + if false { + } else { + h.decSliceToleration((*[]Toleration)(yyv83), d) + } + } + for { + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj44-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodSecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.SELinuxOptions != nil + yyq2[1] = x.RunAsUser != nil + yyq2[2] = x.RunAsNonRoot != nil + yyq2[3] = len(x.SupplementalGroups) != 0 + yyq2[4] = x.FSGroup != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.SELinuxOptions == nil { + r.EncodeNil() + } else { + x.SELinuxOptions.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SELinuxOptions == nil { + r.EncodeNil() + } else { + x.SELinuxOptions.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.RunAsUser == nil { + r.EncodeNil() + } else { + yy7 := *x.RunAsUser + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RunAsUser == nil { + r.EncodeNil() + } else { + yy9 := *x.RunAsUser + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.RunAsNonRoot == nil { + r.EncodeNil() + } else { + yy12 := *x.RunAsNonRoot + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RunAsNonRoot == nil { + r.EncodeNil() + } else { + yy14 := *x.RunAsNonRoot + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeBool(bool(yy14)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.SupplementalGroups == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncSliceInt64V(x.SupplementalGroups, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SupplementalGroups == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + z.F.EncSliceInt64V(x.SupplementalGroups, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.FSGroup == nil { + r.EncodeNil() + } else { + yy20 := *x.FSGroup + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeInt(int64(yy20)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsGroup")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FSGroup == nil { + r.EncodeNil() + } else { + yy22 := *x.FSGroup + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeInt(int64(yy22)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodSecurityContext) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodSecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "seLinuxOptions": + if r.TryDecodeAsNil() { + if x.SELinuxOptions != nil { + x.SELinuxOptions = nil + } + } else { + if x.SELinuxOptions == nil { + x.SELinuxOptions = new(SELinuxOptions) + } + x.SELinuxOptions.CodecDecodeSelf(d) + } + case "runAsUser": + if r.TryDecodeAsNil() { + if x.RunAsUser != nil { + x.RunAsUser = nil + } + } else { + if x.RunAsUser == nil { + x.RunAsUser = new(int64) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) + } + } + case "runAsNonRoot": + if r.TryDecodeAsNil() { + if x.RunAsNonRoot != nil { + x.RunAsNonRoot = nil + } + } else { + if x.RunAsNonRoot == nil { + x.RunAsNonRoot = new(bool) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() + } + } + case "supplementalGroups": + if r.TryDecodeAsNil() { + x.SupplementalGroups = nil + } else { + yyv9 := &x.SupplementalGroups + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + z.F.DecSliceInt64X(yyv9, false, d) + } + } + case "fsGroup": + if r.TryDecodeAsNil() { + if x.FSGroup != nil { + x.FSGroup = nil + } + } else { + if x.FSGroup == nil { + x.FSGroup = new(int64) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodSecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SELinuxOptions != nil { + x.SELinuxOptions = nil + } + } else { + if x.SELinuxOptions == nil { + x.SELinuxOptions = new(SELinuxOptions) + } + x.SELinuxOptions.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RunAsUser != nil { + x.RunAsUser = nil + } + } else { + if x.RunAsUser == nil { + x.RunAsUser = new(int64) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RunAsNonRoot != nil { + x.RunAsNonRoot = nil + } + } else { + if x.RunAsNonRoot == nil { + x.RunAsNonRoot = new(bool) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SupplementalGroups = nil + } else { + yyv19 := &x.SupplementalGroups + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + z.F.DecSliceInt64X(yyv19, false, d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FSGroup != nil { + x.FSGroup = nil + } + } else { + if x.FSGroup == nil { + x.FSGroup = new(int64) + } + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64)) + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x PodQOSClass) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PodQOSClass) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Phase != "" + yyq2[1] = len(x.Conditions) != 0 + yyq2[2] = x.Message != "" + yyq2[3] = x.Reason != "" + yyq2[4] = x.HostIP != "" + yyq2[5] = x.PodIP != "" + yyq2[6] = x.StartTime != nil + yyq2[7] = len(x.InitContainerStatuses) != 0 + yyq2[8] = len(x.ContainerStatuses) != 0 + yyq2[9] = x.QOSClass != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Phase.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("phase")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Phase.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSlicePodCondition(([]PodCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSlicePodCondition(([]PodCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostIP")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PodIP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podIP")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PodIP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.StartTime == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else if z.HasExtensions() && z.EncExt(x.StartTime) { + } else if yym22 { + z.EncBinaryMarshal(x.StartTime) + } else if !yym22 && z.IsJSONHandle() { + z.EncJSONMarshal(x.StartTime) + } else { + z.EncFallback(x.StartTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("startTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StartTime == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else if z.HasExtensions() && z.EncExt(x.StartTime) { + } else if yym23 { + z.EncBinaryMarshal(x.StartTime) + } else if !yym23 && z.IsJSONHandle() { + z.EncJSONMarshal(x.StartTime) + } else { + z.EncFallback(x.StartTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.InitContainerStatuses == nil { + r.EncodeNil() + } else { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + h.encSliceContainerStatus(([]ContainerStatus)(x.InitContainerStatuses), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("initContainerStatuses")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.InitContainerStatuses == nil { + r.EncodeNil() + } else { + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + h.encSliceContainerStatus(([]ContainerStatus)(x.InitContainerStatuses), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.ContainerStatuses == nil { + r.EncodeNil() + } else { + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerStatuses")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ContainerStatuses == nil { + r.EncodeNil() + } else { + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + x.QOSClass.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("qosClass")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.QOSClass.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "phase": + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv4 := &x.Phase + yyv4.CodecDecodeSelf(d) + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv5 := &x.Conditions + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSlicePodCondition((*[]PodCondition)(yyv5), d) + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv7 := &x.Message + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv9 := &x.Reason + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + case "hostIP": + if r.TryDecodeAsNil() { + x.HostIP = "" + } else { + yyv11 := &x.HostIP + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + case "podIP": + if r.TryDecodeAsNil() { + x.PodIP = "" + } else { + yyv13 := &x.PodIP + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + case "startTime": + if r.TryDecodeAsNil() { + if x.StartTime != nil { + x.StartTime = nil + } + } else { + if x.StartTime == nil { + x.StartTime = new(pkg2_v1.Time) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(x.StartTime) { + } else if yym16 { + z.DecBinaryUnmarshal(x.StartTime) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.StartTime) + } else { + z.DecFallback(x.StartTime, false) + } + } + case "initContainerStatuses": + if r.TryDecodeAsNil() { + x.InitContainerStatuses = nil + } else { + yyv17 := &x.InitContainerStatuses + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + h.decSliceContainerStatus((*[]ContainerStatus)(yyv17), d) + } + } + case "containerStatuses": + if r.TryDecodeAsNil() { + x.ContainerStatuses = nil + } else { + yyv19 := &x.ContainerStatuses + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceContainerStatus((*[]ContainerStatus)(yyv19), d) + } + } + case "qosClass": + if r.TryDecodeAsNil() { + x.QOSClass = "" + } else { + yyv21 := &x.QOSClass + yyv21.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj22 int + var yyb22 bool + var yyhl22 bool = l >= 0 + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv23 := &x.Phase + yyv23.CodecDecodeSelf(d) + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv24 := &x.Conditions + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + h.decSlicePodCondition((*[]PodCondition)(yyv24), d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv26 := &x.Message + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv28 := &x.Reason + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*string)(yyv28)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostIP = "" + } else { + yyv30 := &x.HostIP + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*string)(yyv30)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodIP = "" + } else { + yyv32 := &x.PodIP + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + *((*string)(yyv32)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StartTime != nil { + x.StartTime = nil + } + } else { + if x.StartTime == nil { + x.StartTime = new(pkg2_v1.Time) + } + yym35 := z.DecBinary() + _ = yym35 + if false { + } else if z.HasExtensions() && z.DecExt(x.StartTime) { + } else if yym35 { + z.DecBinaryUnmarshal(x.StartTime) + } else if !yym35 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.StartTime) + } else { + z.DecFallback(x.StartTime, false) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.InitContainerStatuses = nil + } else { + yyv36 := &x.InitContainerStatuses + yym37 := z.DecBinary() + _ = yym37 + if false { + } else { + h.decSliceContainerStatus((*[]ContainerStatus)(yyv36), d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerStatuses = nil + } else { + yyv38 := &x.ContainerStatuses + yym39 := z.DecBinary() + _ = yym39 + if false { + } else { + h.decSliceContainerStatus((*[]ContainerStatus)(yyv38), d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.QOSClass = "" + } else { + yyv40 := &x.QOSClass + yyv40.CodecDecodeSelf(d) + } + for { + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj22-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodStatusResult) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Status + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Status + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodStatusResult) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodStatusResult) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "status": + if r.TryDecodeAsNil() { + x.Status = PodStatus{} + } else { + yyv10 := &x.Status + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodStatusResult) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = PodStatus{} + } else { + yyv18 := &x.Status + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Pod) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Pod) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Pod) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PodSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = PodStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Pod) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PodSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = PodStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePod(([]Pod)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePod(([]Pod)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePod((*[]Pod)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePod((*[]Pod)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PodSpec{} + } else { + yyv6 := &x.Spec + yyv6.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PodSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodTemplate) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Template + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Template + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodTemplate) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = PodTemplateSpec{} + } else { + yyv10 := &x.Template + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = PodTemplateSpec{} + } else { + yyv18 := &x.Template + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodTemplateList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePodTemplate(([]PodTemplate)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePodTemplate(([]PodTemplate)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodTemplateList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodTemplateList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePodTemplate((*[]PodTemplate)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodTemplateList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePodTemplate((*[]PodTemplate)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicationControllerSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != nil + yyq2[1] = x.MinReadySeconds != 0 + yyq2[2] = len(x.Selector) != 0 + yyq2[3] = x.Template != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Replicas == nil { + r.EncodeNil() + } else { + yy4 := *x.Replicas + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Replicas == nil { + r.EncodeNil() + } else { + yy6 := *x.Replicas + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Template == nil { + r.EncodeNil() + } else { + x.Template.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Template == nil { + r.EncodeNil() + } else { + x.Template.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicationControllerSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicationControllerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + case "minReadySeconds": + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv6 := &x.MinReadySeconds + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv8 := &x.Selector + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecMapStringStringX(yyv8, false, d) + } + } + case "template": + if r.TryDecodeAsNil() { + if x.Template != nil { + x.Template = nil + } + } else { + if x.Template == nil { + x.Template = new(PodTemplateSpec) + } + x.Template.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicationControllerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv14 := &x.MinReadySeconds + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv16 := &x.Selector + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + z.F.DecMapStringStringX(yyv16, false, d) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Template != nil { + x.Template = nil + } + } else { + if x.Template == nil { + x.Template = new(PodTemplateSpec) + } + x.Template.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FullyLabeledReplicas != 0 + yyq2[2] = x.ReadyReplicas != 0 + yyq2[3] = x.AvailableReplicas != 0 + yyq2[4] = x.ObservedGeneration != 0 + yyq2[5] = len(x.Conditions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.FullyLabeledReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.FullyLabeledReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readyReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceReplicationControllerCondition(([]ReplicationControllerCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + h.encSliceReplicationControllerCondition(([]ReplicationControllerCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicationControllerStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicationControllerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "fullyLabeledReplicas": + if r.TryDecodeAsNil() { + x.FullyLabeledReplicas = 0 + } else { + yyv6 := &x.FullyLabeledReplicas + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "readyReplicas": + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv8 := &x.ReadyReplicas + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "availableReplicas": + if r.TryDecodeAsNil() { + x.AvailableReplicas = 0 + } else { + yyv10 := &x.AvailableReplicas + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "observedGeneration": + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv12 := &x.ObservedGeneration + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int64)(yyv12)) = int64(r.DecodeInt(64)) + } + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv14 := &x.Conditions + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + h.decSliceReplicationControllerCondition((*[]ReplicationControllerCondition)(yyv14), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv17 := &x.Replicas + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FullyLabeledReplicas = 0 + } else { + yyv19 := &x.FullyLabeledReplicas + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int32)(yyv19)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv21 := &x.ReadyReplicas + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AvailableReplicas = 0 + } else { + yyv23 := &x.AvailableReplicas + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv25 := &x.ObservedGeneration + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int64)(yyv25)) = int64(r.DecodeInt(64)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv27 := &x.Conditions + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + h.decSliceReplicationControllerCondition((*[]ReplicationControllerCondition)(yyv27), d) + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj16-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ReplicationControllerConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ReplicationControllerConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ReplicationControllerCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = x.Reason != "" + yyq2[4] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Status.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Status.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastTransitionTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastTransitionTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicationControllerCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicationControllerCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_v1.Time{} + } else { + yyv6 := &x.LastTransitionTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv8 := &x.Reason + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv10 := &x.Message + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicationControllerCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv13 := &x.Type + yyv13.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv14 := &x.Status + yyv14.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_v1.Time{} + } else { + yyv15 := &x.LastTransitionTime + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else if yym16 { + z.DecBinaryUnmarshal(yyv15) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv15) + } else { + z.DecFallback(yyv15, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv17 := &x.Reason + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv19 := &x.Message + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicationController) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicationController) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicationController) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ReplicationControllerSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ReplicationControllerStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicationController) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ReplicationControllerSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ReplicationControllerStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicationControllerList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceReplicationController(([]ReplicationController)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceReplicationController(([]ReplicationController)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicationControllerList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicationControllerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceReplicationController((*[]ReplicationController)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicationControllerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceReplicationController((*[]ReplicationController)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ServiceAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ServiceAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x ServiceType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ServiceType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ServiceStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.LoadBalancer + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("loadBalancer")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.LoadBalancer + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "loadBalancer": + if r.TryDecodeAsNil() { + x.LoadBalancer = LoadBalancerStatus{} + } else { + yyv4 := &x.LoadBalancer + yyv4.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LoadBalancer = LoadBalancerStatus{} + } else { + yyv6 := &x.LoadBalancer + yyv6.CodecDecodeSelf(d) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LoadBalancerStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Ingress) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Ingress == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ingress")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ingress == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LoadBalancerStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LoadBalancerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "ingress": + if r.TryDecodeAsNil() { + x.Ingress = nil + } else { + yyv4 := &x.Ingress + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LoadBalancerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ingress = nil + } else { + yyv7 := &x.Ingress + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LoadBalancerIngress) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.IP != "" + yyq2[1] = x.Hostname != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ip")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostname")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LoadBalancerIngress) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LoadBalancerIngress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "ip": + if r.TryDecodeAsNil() { + x.IP = "" + } else { + yyv4 := &x.IP + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "hostname": + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv6 := &x.Hostname + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LoadBalancerIngress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.IP = "" + } else { + yyv9 := &x.IP + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv11 := &x.Hostname + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Ports) != 0 + yyq2[1] = len(x.Selector) != 0 + yyq2[2] = x.ClusterIP != "" + yyq2[3] = x.Type != "" + yyq2[4] = len(x.ExternalIPs) != 0 + yyq2[5] = len(x.DeprecatedPublicIPs) != 0 + yyq2[6] = x.SessionAffinity != "" + yyq2[7] = x.LoadBalancerIP != "" + yyq2[8] = len(x.LoadBalancerSourceRanges) != 0 + yyq2[9] = x.ExternalName != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Ports == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceServicePort(([]ServicePort)(x.Ports), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ports")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ports == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceServicePort(([]ServicePort)(x.Ports), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("clusterIP")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + x.Type.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.ExternalIPs == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncSliceStringV(x.ExternalIPs, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("externalIPs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ExternalIPs == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncSliceStringV(x.ExternalIPs, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.DeprecatedPublicIPs == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + z.F.EncSliceStringV(x.DeprecatedPublicIPs, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("deprecatedPublicIPs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DeprecatedPublicIPs == nil { + r.EncodeNil() + } else { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + z.F.EncSliceStringV(x.DeprecatedPublicIPs, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + x.SessionAffinity.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sessionAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.SessionAffinity.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("loadBalancerIP")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.LoadBalancerSourceRanges == nil { + r.EncodeNil() + } else { + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("loadBalancerSourceRanges")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LoadBalancerSourceRanges == nil { + r.EncodeNil() + } else { + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ExternalName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("externalName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ExternalName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "ports": + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv4 := &x.Ports + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceServicePort((*[]ServicePort)(yyv4), d) + } + } + case "selector": + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv6 := &x.Selector + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecMapStringStringX(yyv6, false, d) + } + } + case "clusterIP": + if r.TryDecodeAsNil() { + x.ClusterIP = "" + } else { + yyv8 := &x.ClusterIP + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv10 := &x.Type + yyv10.CodecDecodeSelf(d) + } + case "externalIPs": + if r.TryDecodeAsNil() { + x.ExternalIPs = nil + } else { + yyv11 := &x.ExternalIPs + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + z.F.DecSliceStringX(yyv11, false, d) + } + } + case "deprecatedPublicIPs": + if r.TryDecodeAsNil() { + x.DeprecatedPublicIPs = nil + } else { + yyv13 := &x.DeprecatedPublicIPs + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecSliceStringX(yyv13, false, d) + } + } + case "sessionAffinity": + if r.TryDecodeAsNil() { + x.SessionAffinity = "" + } else { + yyv15 := &x.SessionAffinity + yyv15.CodecDecodeSelf(d) + } + case "loadBalancerIP": + if r.TryDecodeAsNil() { + x.LoadBalancerIP = "" + } else { + yyv16 := &x.LoadBalancerIP + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + case "loadBalancerSourceRanges": + if r.TryDecodeAsNil() { + x.LoadBalancerSourceRanges = nil + } else { + yyv18 := &x.LoadBalancerSourceRanges + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + z.F.DecSliceStringX(yyv18, false, d) + } + } + case "externalName": + if r.TryDecodeAsNil() { + x.ExternalName = "" + } else { + yyv20 := &x.ExternalName + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj22 int + var yyb22 bool + var yyhl22 bool = l >= 0 + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv23 := &x.Ports + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + h.decSliceServicePort((*[]ServicePort)(yyv23), d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv25 := &x.Selector + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + z.F.DecMapStringStringX(yyv25, false, d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ClusterIP = "" + } else { + yyv27 := &x.ClusterIP + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv29 := &x.Type + yyv29.CodecDecodeSelf(d) + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ExternalIPs = nil + } else { + yyv30 := &x.ExternalIPs + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + z.F.DecSliceStringX(yyv30, false, d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DeprecatedPublicIPs = nil + } else { + yyv32 := &x.DeprecatedPublicIPs + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + z.F.DecSliceStringX(yyv32, false, d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SessionAffinity = "" + } else { + yyv34 := &x.SessionAffinity + yyv34.CodecDecodeSelf(d) + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LoadBalancerIP = "" + } else { + yyv35 := &x.LoadBalancerIP + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LoadBalancerSourceRanges = nil + } else { + yyv37 := &x.LoadBalancerSourceRanges + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + z.F.DecSliceStringX(yyv37, false, d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ExternalName = "" + } else { + yyv39 := &x.ExternalName + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } + } + for { + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj22-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ServicePort) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = x.Protocol != "" + yyq2[3] = true + yyq2[4] = x.NodePort != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + x.Protocol.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("protocol")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Protocol.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Port)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("port")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.Port)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy13 := &x.TargetPort + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(yy13) { + } else if !yym14 && z.IsJSONHandle() { + z.EncJSONMarshal(yy13) + } else { + z.EncFallback(yy13) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetPort")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy15 := &x.TargetPort + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeInt(int64(x.NodePort)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodePort")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.NodePort)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServicePort) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServicePort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "protocol": + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv6 := &x.Protocol + yyv6.CodecDecodeSelf(d) + } + case "port": + if r.TryDecodeAsNil() { + x.Port = 0 + } else { + yyv7 := &x.Port + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + case "targetPort": + if r.TryDecodeAsNil() { + x.TargetPort = pkg4_intstr.IntOrString{} + } else { + yyv9 := &x.TargetPort + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) + } else { + z.DecFallback(yyv9, false) + } + } + case "nodePort": + if r.TryDecodeAsNil() { + x.NodePort = 0 + } else { + yyv11 := &x.NodePort + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServicePort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv14 := &x.Name + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv16 := &x.Protocol + yyv16.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Port = 0 + } else { + yyv17 := &x.Port + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetPort = pkg4_intstr.IntOrString{} + } else { + yyv19 := &x.TargetPort + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) + } else { + z.DecFallback(yyv19, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NodePort = 0 + } else { + yyv21 := &x.NodePort + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Service) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Service) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Service) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ServiceSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ServiceStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Service) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ServiceSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ServiceStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ServiceList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceService(([]Service)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceService(([]Service)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceService((*[]Service)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceService((*[]Service)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ServiceAccount) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = len(x.Secrets) != 0 + yyq2[4] = len(x.ImagePullSecrets) != 0 + yyq2[5] = x.AutomountServiceAccountToken != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Secrets == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secrets")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Secrets == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.ImagePullSecrets == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ImagePullSecrets == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.AutomountServiceAccountToken == nil { + r.EncodeNil() + } else { + yy21 := *x.AutomountServiceAccountToken + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeBool(bool(yy21)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("automountServiceAccountToken")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AutomountServiceAccountToken == nil { + r.EncodeNil() + } else { + yy23 := *x.AutomountServiceAccountToken + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeBool(bool(yy23)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceAccount) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceAccount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "secrets": + if r.TryDecodeAsNil() { + x.Secrets = nil + } else { + yyv10 := &x.Secrets + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceObjectReference((*[]ObjectReference)(yyv10), d) + } + } + case "imagePullSecrets": + if r.TryDecodeAsNil() { + x.ImagePullSecrets = nil + } else { + yyv12 := &x.ImagePullSecrets + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv12), d) + } + } + case "automountServiceAccountToken": + if r.TryDecodeAsNil() { + if x.AutomountServiceAccountToken != nil { + x.AutomountServiceAccountToken = nil + } + } else { + if x.AutomountServiceAccountToken == nil { + x.AutomountServiceAccountToken = new(bool) + } + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceAccount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv17 := &x.Kind + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv19 := &x.APIVersion + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv21 := &x.ObjectMeta + yym22 := z.DecBinary() + _ = yym22 + if false { + } else if z.HasExtensions() && z.DecExt(yyv21) { + } else { + z.DecFallback(yyv21, false) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Secrets = nil + } else { + yyv23 := &x.Secrets + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + h.decSliceObjectReference((*[]ObjectReference)(yyv23), d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ImagePullSecrets = nil + } else { + yyv25 := &x.ImagePullSecrets + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv25), d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AutomountServiceAccountToken != nil { + x.AutomountServiceAccountToken = nil + } + } else { + if x.AutomountServiceAccountToken == nil { + x.AutomountServiceAccountToken = new(bool) + } + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj16-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ServiceAccountList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceAccountList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceAccountList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceServiceAccount((*[]ServiceAccount)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceAccountList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceServiceAccount((*[]ServiceAccount)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Endpoints) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Subsets == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subsets")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Subsets == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Endpoints) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Endpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "subsets": + if r.TryDecodeAsNil() { + x.Subsets = nil + } else { + yyv10 := &x.Subsets + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceEndpointSubset((*[]EndpointSubset)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Endpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subsets = nil + } else { + yyv19 := &x.Subsets + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceEndpointSubset((*[]EndpointSubset)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EndpointSubset) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Addresses) != 0 + yyq2[1] = len(x.NotReadyAddresses) != 0 + yyq2[2] = len(x.Ports) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Addresses == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("addresses")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Addresses == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.NotReadyAddresses == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("notReadyAddresses")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NotReadyAddresses == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Ports == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ports")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ports == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EndpointSubset) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EndpointSubset) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "addresses": + if r.TryDecodeAsNil() { + x.Addresses = nil + } else { + yyv4 := &x.Addresses + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceEndpointAddress((*[]EndpointAddress)(yyv4), d) + } + } + case "notReadyAddresses": + if r.TryDecodeAsNil() { + x.NotReadyAddresses = nil + } else { + yyv6 := &x.NotReadyAddresses + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceEndpointAddress((*[]EndpointAddress)(yyv6), d) + } + } + case "ports": + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv8 := &x.Ports + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + h.decSliceEndpointPort((*[]EndpointPort)(yyv8), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EndpointSubset) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Addresses = nil + } else { + yyv11 := &x.Addresses + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceEndpointAddress((*[]EndpointAddress)(yyv11), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NotReadyAddresses = nil + } else { + yyv13 := &x.NotReadyAddresses + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceEndpointAddress((*[]EndpointAddress)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv15 := &x.Ports + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + h.decSliceEndpointPort((*[]EndpointPort)(yyv15), d) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EndpointAddress) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Hostname != "" + yyq2[2] = x.NodeName != nil + yyq2[3] = x.TargetRef != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IP)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ip")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IP)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostname")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.NodeName == nil { + r.EncodeNil() + } else { + yy10 := *x.NodeName + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NodeName == nil { + r.EncodeNil() + } else { + yy12 := *x.NodeName + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.TargetRef == nil { + r.EncodeNil() + } else { + x.TargetRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetRef == nil { + r.EncodeNil() + } else { + x.TargetRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EndpointAddress) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EndpointAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "ip": + if r.TryDecodeAsNil() { + x.IP = "" + } else { + yyv4 := &x.IP + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "hostname": + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv6 := &x.Hostname + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "nodeName": + if r.TryDecodeAsNil() { + if x.NodeName != nil { + x.NodeName = nil + } + } else { + if x.NodeName == nil { + x.NodeName = new(string) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(x.NodeName)) = r.DecodeString() + } + } + case "targetRef": + if r.TryDecodeAsNil() { + if x.TargetRef != nil { + x.TargetRef = nil + } + } else { + if x.TargetRef == nil { + x.TargetRef = new(ObjectReference) + } + x.TargetRef.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.IP = "" + } else { + yyv12 := &x.IP + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv14 := &x.Hostname + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NodeName != nil { + x.NodeName = nil + } + } else { + if x.NodeName == nil { + x.NodeName = new(string) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(x.NodeName)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TargetRef != nil { + x.TargetRef = nil + } + } else { + if x.TargetRef == nil { + x.TargetRef = new(ObjectReference) + } + x.TargetRef.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EndpointPort) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[2] = x.Protocol != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.Port)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("port")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.Port)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + x.Protocol.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("protocol")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Protocol.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EndpointPort) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EndpointPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "port": + if r.TryDecodeAsNil() { + x.Port = 0 + } else { + yyv6 := &x.Port + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "protocol": + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv8 := &x.Protocol + yyv8.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EndpointPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv10 := &x.Name + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Port = 0 + } else { + yyv12 := &x.Port + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(yyv12)) = int32(r.DecodeInt(32)) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv14 := &x.Protocol + yyv14.CodecDecodeSelf(d) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EndpointsList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceEndpoints(([]Endpoints)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceEndpoints(([]Endpoints)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EndpointsList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EndpointsList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceEndpoints((*[]Endpoints)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EndpointsList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceEndpoints((*[]Endpoints)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.PodCIDR != "" + yyq2[1] = x.ExternalID != "" + yyq2[2] = x.ProviderID != "" + yyq2[3] = x.Unschedulable != false + yyq2[4] = len(x.Taints) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podCIDR")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("externalID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("providerID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Unschedulable)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("unschedulable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Unschedulable)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Taints == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceTaint(([]Taint)(x.Taints), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("taints")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Taints == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSliceTaint(([]Taint)(x.Taints), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "podCIDR": + if r.TryDecodeAsNil() { + x.PodCIDR = "" + } else { + yyv4 := &x.PodCIDR + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "externalID": + if r.TryDecodeAsNil() { + x.ExternalID = "" + } else { + yyv6 := &x.ExternalID + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "providerID": + if r.TryDecodeAsNil() { + x.ProviderID = "" + } else { + yyv8 := &x.ProviderID + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "unschedulable": + if r.TryDecodeAsNil() { + x.Unschedulable = false + } else { + yyv10 := &x.Unschedulable + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "taints": + if r.TryDecodeAsNil() { + x.Taints = nil + } else { + yyv12 := &x.Taints + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + h.decSliceTaint((*[]Taint)(yyv12), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodCIDR = "" + } else { + yyv15 := &x.PodCIDR + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ExternalID = "" + } else { + yyv17 := &x.ExternalID + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ProviderID = "" + } else { + yyv19 := &x.ProviderID + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Unschedulable = false + } else { + yyv21 := &x.Unschedulable + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(yyv21)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Taints = nil + } else { + yyv23 := &x.Taints + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + h.decSliceTaint((*[]Taint)(yyv23), d) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DaemonEndpoint) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Port)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("Port")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Port)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DaemonEndpoint) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DaemonEndpoint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "Port": + if r.TryDecodeAsNil() { + x.Port = 0 + } else { + yyv4 := &x.Port + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DaemonEndpoint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Port = 0 + } else { + yyv7 := &x.Port + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeDaemonEndpoints) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.KubeletEndpoint + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kubeletEndpoint")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.KubeletEndpoint + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeDaemonEndpoints) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeDaemonEndpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kubeletEndpoint": + if r.TryDecodeAsNil() { + x.KubeletEndpoint = DaemonEndpoint{} + } else { + yyv4 := &x.KubeletEndpoint + yyv4.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeDaemonEndpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.KubeletEndpoint = DaemonEndpoint{} + } else { + yyv6 := &x.KubeletEndpoint + yyv6.CodecDecodeSelf(d) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 10 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MachineID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("machineID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MachineID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("systemUUID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.BootID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("bootID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.BootID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kernelVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.OSImage)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("osImage")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.OSImage)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerRuntimeVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kubeletVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kubeProxyVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("operatingSystem")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("architecture")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeSystemInfo) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeSystemInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "machineID": + if r.TryDecodeAsNil() { + x.MachineID = "" + } else { + yyv4 := &x.MachineID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "systemUUID": + if r.TryDecodeAsNil() { + x.SystemUUID = "" + } else { + yyv6 := &x.SystemUUID + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "bootID": + if r.TryDecodeAsNil() { + x.BootID = "" + } else { + yyv8 := &x.BootID + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "kernelVersion": + if r.TryDecodeAsNil() { + x.KernelVersion = "" + } else { + yyv10 := &x.KernelVersion + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "osImage": + if r.TryDecodeAsNil() { + x.OSImage = "" + } else { + yyv12 := &x.OSImage + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "containerRuntimeVersion": + if r.TryDecodeAsNil() { + x.ContainerRuntimeVersion = "" + } else { + yyv14 := &x.ContainerRuntimeVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "kubeletVersion": + if r.TryDecodeAsNil() { + x.KubeletVersion = "" + } else { + yyv16 := &x.KubeletVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + case "kubeProxyVersion": + if r.TryDecodeAsNil() { + x.KubeProxyVersion = "" + } else { + yyv18 := &x.KubeProxyVersion + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + case "operatingSystem": + if r.TryDecodeAsNil() { + x.OperatingSystem = "" + } else { + yyv20 := &x.OperatingSystem + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } + } + case "architecture": + if r.TryDecodeAsNil() { + x.Architecture = "" + } else { + yyv22 := &x.Architecture + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*string)(yyv22)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj24 int + var yyb24 bool + var yyhl24 bool = l >= 0 + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MachineID = "" + } else { + yyv25 := &x.MachineID + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SystemUUID = "" + } else { + yyv27 := &x.SystemUUID + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.BootID = "" + } else { + yyv29 := &x.BootID + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.KernelVersion = "" + } else { + yyv31 := &x.KernelVersion + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.OSImage = "" + } else { + yyv33 := &x.OSImage + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*string)(yyv33)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerRuntimeVersion = "" + } else { + yyv35 := &x.ContainerRuntimeVersion + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.KubeletVersion = "" + } else { + yyv37 := &x.KubeletVersion + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.KubeProxyVersion = "" + } else { + yyv39 := &x.KubeProxyVersion + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.OperatingSystem = "" + } else { + yyv41 := &x.OperatingSystem + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*string)(yyv41)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Architecture = "" + } else { + yyv43 := &x.Architecture + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*string)(yyv43)) = r.DecodeString() + } + } + for { + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj24-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Capacity) != 0 + yyq2[1] = len(x.Allocatable) != 0 + yyq2[2] = x.Phase != "" + yyq2[3] = len(x.Conditions) != 0 + yyq2[4] = len(x.Addresses) != 0 + yyq2[5] = true + yyq2[6] = true + yyq2[7] = len(x.Images) != 0 + yyq2[8] = len(x.VolumesInUse) != 0 + yyq2[9] = len(x.VolumesAttached) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("capacity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Allocatable == nil { + r.EncodeNil() + } else { + x.Allocatable.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("allocatable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Allocatable == nil { + r.EncodeNil() + } else { + x.Allocatable.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + x.Phase.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("phase")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Phase.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Addresses == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("addresses")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Addresses == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yy19 := &x.DaemonEndpoints + yy19.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("daemonEndpoints")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy21 := &x.DaemonEndpoints + yy21.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yy24 := &x.NodeInfo + yy24.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeInfo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy26 := &x.NodeInfo + yy26.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.Images == nil { + r.EncodeNil() + } else { + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + h.encSliceContainerImage(([]ContainerImage)(x.Images), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("images")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Images == nil { + r.EncodeNil() + } else { + yym30 := z.EncBinary() + _ = yym30 + if false { + } else { + h.encSliceContainerImage(([]ContainerImage)(x.Images), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.VolumesInUse == nil { + r.EncodeNil() + } else { + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumesInUse")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VolumesInUse == nil { + r.EncodeNil() + } else { + yym33 := z.EncBinary() + _ = yym33 + if false { + } else { + h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.VolumesAttached == nil { + r.EncodeNil() + } else { + yym35 := z.EncBinary() + _ = yym35 + if false { + } else { + h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumesAttached")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VolumesAttached == nil { + r.EncodeNil() + } else { + yym36 := z.EncBinary() + _ = yym36 + if false { + } else { + h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "capacity": + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv4 := &x.Capacity + yyv4.CodecDecodeSelf(d) + } + case "allocatable": + if r.TryDecodeAsNil() { + x.Allocatable = nil + } else { + yyv5 := &x.Allocatable + yyv5.CodecDecodeSelf(d) + } + case "phase": + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv6 := &x.Phase + yyv6.CodecDecodeSelf(d) + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv7 := &x.Conditions + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceNodeCondition((*[]NodeCondition)(yyv7), d) + } + } + case "addresses": + if r.TryDecodeAsNil() { + x.Addresses = nil + } else { + yyv9 := &x.Addresses + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceNodeAddress((*[]NodeAddress)(yyv9), d) + } + } + case "daemonEndpoints": + if r.TryDecodeAsNil() { + x.DaemonEndpoints = NodeDaemonEndpoints{} + } else { + yyv11 := &x.DaemonEndpoints + yyv11.CodecDecodeSelf(d) + } + case "nodeInfo": + if r.TryDecodeAsNil() { + x.NodeInfo = NodeSystemInfo{} + } else { + yyv12 := &x.NodeInfo + yyv12.CodecDecodeSelf(d) + } + case "images": + if r.TryDecodeAsNil() { + x.Images = nil + } else { + yyv13 := &x.Images + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceContainerImage((*[]ContainerImage)(yyv13), d) + } + } + case "volumesInUse": + if r.TryDecodeAsNil() { + x.VolumesInUse = nil + } else { + yyv15 := &x.VolumesInUse + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv15), d) + } + } + case "volumesAttached": + if r.TryDecodeAsNil() { + x.VolumesAttached = nil + } else { + yyv17 := &x.VolumesAttached + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + h.decSliceAttachedVolume((*[]AttachedVolume)(yyv17), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj19 int + var yyb19 bool + var yyhl19 bool = l >= 0 + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv20 := &x.Capacity + yyv20.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Allocatable = nil + } else { + yyv21 := &x.Allocatable + yyv21.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv22 := &x.Phase + yyv22.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv23 := &x.Conditions + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + h.decSliceNodeCondition((*[]NodeCondition)(yyv23), d) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Addresses = nil + } else { + yyv25 := &x.Addresses + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + h.decSliceNodeAddress((*[]NodeAddress)(yyv25), d) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DaemonEndpoints = NodeDaemonEndpoints{} + } else { + yyv27 := &x.DaemonEndpoints + yyv27.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NodeInfo = NodeSystemInfo{} + } else { + yyv28 := &x.NodeInfo + yyv28.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Images = nil + } else { + yyv29 := &x.Images + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + h.decSliceContainerImage((*[]ContainerImage)(yyv29), d) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumesInUse = nil + } else { + yyv31 := &x.VolumesInUse + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv31), d) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumesAttached = nil + } else { + yyv33 := &x.VolumesAttached + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + h.decSliceAttachedVolume((*[]AttachedVolume)(yyv33), d) + } + } + for { + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj19-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x UniqueVolumeName) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *UniqueVolumeName) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *AttachedVolume) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Name.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Name.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("devicePath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *AttachedVolume) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *AttachedVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yyv4.CodecDecodeSelf(d) + } + case "devicePath": + if r.TryDecodeAsNil() { + x.DevicePath = "" + } else { + yyv5 := &x.DevicePath + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *AttachedVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yyv8.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DevicePath = "" + } else { + yyv9 := &x.DevicePath + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *AvoidPods) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.PreferAvoidPods) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.PreferAvoidPods == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePreferAvoidPodsEntry(([]PreferAvoidPodsEntry)(x.PreferAvoidPods), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferAvoidPods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferAvoidPods == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePreferAvoidPodsEntry(([]PreferAvoidPodsEntry)(x.PreferAvoidPods), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *AvoidPods) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *AvoidPods) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "preferAvoidPods": + if r.TryDecodeAsNil() { + x.PreferAvoidPods = nil + } else { + yyv4 := &x.PreferAvoidPods + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePreferAvoidPodsEntry((*[]PreferAvoidPodsEntry)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *AvoidPods) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferAvoidPods = nil + } else { + yyv7 := &x.PreferAvoidPods + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSlicePreferAvoidPodsEntry((*[]PreferAvoidPodsEntry)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PreferAvoidPodsEntry) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = true + yyq2[2] = x.Reason != "" + yyq2[3] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.PodSignature + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podSignature")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.PodSignature + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.EvictionTime + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if yym10 { + z.EncBinaryMarshal(yy9) + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("evictionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.EvictionTime + yym12 := z.EncBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.EncExt(yy11) { + } else if yym12 { + z.EncBinaryMarshal(yy11) + } else if !yym12 && z.IsJSONHandle() { + z.EncJSONMarshal(yy11) + } else { + z.EncFallback(yy11) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PreferAvoidPodsEntry) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PreferAvoidPodsEntry) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "podSignature": + if r.TryDecodeAsNil() { + x.PodSignature = PodSignature{} + } else { + yyv4 := &x.PodSignature + yyv4.CodecDecodeSelf(d) + } + case "evictionTime": + if r.TryDecodeAsNil() { + x.EvictionTime = pkg2_v1.Time{} + } else { + yyv5 := &x.EvictionTime + yym6 := z.DecBinary() + _ = yym6 + if false { + } else if z.HasExtensions() && z.DecExt(yyv5) { + } else if yym6 { + z.DecBinaryUnmarshal(yyv5) + } else if !yym6 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv5) + } else { + z.DecFallback(yyv5, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv7 := &x.Reason + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv9 := &x.Message + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PreferAvoidPodsEntry) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodSignature = PodSignature{} + } else { + yyv12 := &x.PodSignature + yyv12.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EvictionTime = pkg2_v1.Time{} + } else { + yyv13 := &x.EvictionTime + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if yym14 { + z.DecBinaryUnmarshal(yyv13) + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv15 := &x.Reason + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv17 := &x.Message + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodSignature) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.PodController != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.PodController == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.PodController) { + } else { + z.EncFallback(x.PodController) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podController")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodController == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.PodController) { + } else { + z.EncFallback(x.PodController) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodSignature) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodSignature) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "podController": + if r.TryDecodeAsNil() { + if x.PodController != nil { + x.PodController = nil + } + } else { + if x.PodController == nil { + x.PodController = new(pkg2_v1.OwnerReference) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.PodController) { + } else { + z.DecFallback(x.PodController, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodSignature) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodController != nil { + x.PodController = nil + } + } else { + if x.PodController == nil { + x.PodController = new(pkg2_v1.OwnerReference) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(x.PodController) { + } else { + z.DecFallback(x.PodController, false) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ContainerImage) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.SizeBytes != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Names == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.Names, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("names")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Names == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.Names, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.SizeBytes)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sizeBytes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.SizeBytes)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerImage) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerImage) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "names": + if r.TryDecodeAsNil() { + x.Names = nil + } else { + yyv4 := &x.Names + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "sizeBytes": + if r.TryDecodeAsNil() { + x.SizeBytes = 0 + } else { + yyv6 := &x.SizeBytes + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int64)(yyv6)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerImage) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Names = nil + } else { + yyv9 := &x.Names + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + z.F.DecSliceStringX(yyv9, false, d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SizeBytes = 0 + } else { + yyv11 := &x.SizeBytes + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int64)(yyv11)) = int64(r.DecodeInt(64)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x NodePhase) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *NodePhase) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x NodeConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *NodeConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *NodeCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Status.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Status.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastHeartbeatTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastHeartbeatTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastHeartbeatTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) + } else { + z.EncFallback(yy17) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "lastHeartbeatTime": + if r.TryDecodeAsNil() { + x.LastHeartbeatTime = pkg2_v1.Time{} + } else { + yyv6 := &x.LastHeartbeatTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_v1.Time{} + } else { + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv10 := &x.Reason + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv12 := &x.Message + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv15 := &x.Type + yyv15.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv16 := &x.Status + yyv16.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastHeartbeatTime = pkg2_v1.Time{} + } else { + yyv17 := &x.LastHeartbeatTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_v1.Time{} + } else { + yyv19 := &x.LastTransitionTime + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if yym20 { + z.DecBinaryUnmarshal(yyv19) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv21 := &x.Reason + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv23 := &x.Message + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x NodeAddressType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *NodeAddressType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *NodeAddress) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Address)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("address")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Address)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeAddress) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "address": + if r.TryDecodeAsNil() { + x.Address = "" + } else { + yyv5 := &x.Address + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv8 := &x.Type + yyv8.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Address = "" + } else { + yyv9 := &x.Address + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ResourceName) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ResourceName) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x ResourceList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encResourceList((ResourceList)(x), e) + } + } +} + +func (x *ResourceList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decResourceList((*ResourceList)(x), d) + } +} + +func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = NodeSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = NodeStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = NodeSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = NodeStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceNode(([]Node)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceNode(([]Node)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceNode((*[]Node)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceNode((*[]Node)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x FinalizerName) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *FinalizerName) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *NamespaceSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Finalizers) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Finalizers == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("finalizers")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Finalizers == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NamespaceSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NamespaceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "finalizers": + if r.TryDecodeAsNil() { + x.Finalizers = nil + } else { + yyv4 := &x.Finalizers + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceFinalizerName((*[]FinalizerName)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NamespaceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Finalizers = nil + } else { + yyv7 := &x.Finalizers + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceFinalizerName((*[]FinalizerName)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NamespaceStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Phase != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Phase.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("phase")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Phase.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NamespaceStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NamespaceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "phase": + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv4 := &x.Phase + yyv4.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NamespaceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv6 := &x.Phase + yyv6.CodecDecodeSelf(d) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x NamespacePhase) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *NamespacePhase) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *Namespace) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Namespace) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Namespace) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = NamespaceSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = NamespaceStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Namespace) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = NamespaceSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = NamespaceStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NamespaceList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceNamespace(([]Namespace)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceNamespace(([]Namespace)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NamespaceList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NamespaceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceNamespace((*[]Namespace)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NamespaceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceNamespace((*[]Namespace)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Binding) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Target + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("target")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Target + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Binding) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Binding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "target": + if r.TryDecodeAsNil() { + x.Target = ObjectReference{} + } else { + yyv10 := &x.Target + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Binding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Target = ObjectReference{} + } else { + yyv18 := &x.Target + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Preconditions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.UID != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.UID == nil { + r.EncodeNil() + } else { + yy4 := *x.UID + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("uid")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.UID == nil { + r.EncodeNil() + } else { + yy6 := *x.UID + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Preconditions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Preconditions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "uid": + if r.TryDecodeAsNil() { + if x.UID != nil { + x.UID = nil + } + } else { + if x.UID == nil { + x.UID = new(pkg1_types.UID) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.UID) { + } else { + *((*string)(x.UID)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Preconditions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.UID != nil { + x.UID = nil + } + } else { + if x.UID == nil { + x.UID = new(pkg1_types.UID) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(x.UID) { + } else { + *((*string)(x.UID)) = r.DecodeString() + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x DeletionPropagation) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DeletionPropagation) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.GracePeriodSeconds != nil + yyq2[3] = x.Preconditions != nil + yyq2[4] = x.OrphanDependents != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.GracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy10 := *x.GracePeriodSeconds + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gracePeriodSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.GracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy12 := *x.GracePeriodSeconds + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Preconditions == nil { + r.EncodeNil() + } else { + x.Preconditions.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preconditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Preconditions == nil { + r.EncodeNil() + } else { + x.Preconditions.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.OrphanDependents == nil { + r.EncodeNil() + } else { + yy18 := *x.OrphanDependents + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(yy18)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("orphanDependents")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.OrphanDependents == nil { + r.EncodeNil() + } else { + yy20 := *x.OrphanDependents + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeBool(bool(yy20)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.PropagationPolicy == nil { + r.EncodeNil() + } else { + yy23 := *x.PropagationPolicy + yy23.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("PropagationPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PropagationPolicy == nil { + r.EncodeNil() + } else { + yy25 := *x.PropagationPolicy + yy25.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "gracePeriodSeconds": + if r.TryDecodeAsNil() { + if x.GracePeriodSeconds != nil { + x.GracePeriodSeconds = nil + } + } else { + if x.GracePeriodSeconds == nil { + x.GracePeriodSeconds = new(int64) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + case "preconditions": + if r.TryDecodeAsNil() { + if x.Preconditions != nil { + x.Preconditions = nil + } + } else { + if x.Preconditions == nil { + x.Preconditions = new(Preconditions) + } + x.Preconditions.CodecDecodeSelf(d) + } + case "orphanDependents": + if r.TryDecodeAsNil() { + if x.OrphanDependents != nil { + x.OrphanDependents = nil + } + } else { + if x.OrphanDependents == nil { + x.OrphanDependents = new(bool) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(x.OrphanDependents)) = r.DecodeBool() + } + } + case "PropagationPolicy": + if r.TryDecodeAsNil() { + if x.PropagationPolicy != nil { + x.PropagationPolicy = nil + } + } else { + if x.PropagationPolicy == nil { + x.PropagationPolicy = new(DeletionPropagation) + } + x.PropagationPolicy.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv15 := &x.Kind + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv17 := &x.APIVersion + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GracePeriodSeconds != nil { + x.GracePeriodSeconds = nil + } + } else { + if x.GracePeriodSeconds == nil { + x.GracePeriodSeconds = new(int64) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Preconditions != nil { + x.Preconditions = nil + } + } else { + if x.Preconditions == nil { + x.Preconditions = new(Preconditions) + } + x.Preconditions.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.OrphanDependents != nil { + x.OrphanDependents = nil + } + } else { + if x.OrphanDependents == nil { + x.OrphanDependents = new(bool) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*bool)(x.OrphanDependents)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PropagationPolicy != nil { + x.PropagationPolicy = nil + } + } else { + if x.PropagationPolicy == nil { + x.PropagationPolicy = new(DeletionPropagation) + } + x.PropagationPolicy.CodecDecodeSelf(d) + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ListOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.LabelSelector != "" + yyq2[3] = x.FieldSelector != "" + yyq2[4] = x.Watch != false + yyq2[5] = x.ResourceVersion != "" + yyq2[6] = x.TimeoutSeconds != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.LabelSelector)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("labelSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.LabelSelector)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FieldSelector)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fieldSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FieldSelector)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Watch)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("watch")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Watch)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.TimeoutSeconds == nil { + r.EncodeNil() + } else { + yy22 := *x.TimeoutSeconds + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeInt(int64(yy22)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("timeoutSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TimeoutSeconds == nil { + r.EncodeNil() + } else { + yy24 := *x.TimeoutSeconds + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeInt(int64(yy24)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ListOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ListOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "labelSelector": + if r.TryDecodeAsNil() { + x.LabelSelector = "" + } else { + yyv8 := &x.LabelSelector + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "fieldSelector": + if r.TryDecodeAsNil() { + x.FieldSelector = "" + } else { + yyv10 := &x.FieldSelector + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "watch": + if r.TryDecodeAsNil() { + x.Watch = false + } else { + yyv12 := &x.Watch + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + case "resourceVersion": + if r.TryDecodeAsNil() { + x.ResourceVersion = "" + } else { + yyv14 := &x.ResourceVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "timeoutSeconds": + if r.TryDecodeAsNil() { + if x.TimeoutSeconds != nil { + x.TimeoutSeconds = nil + } + } else { + if x.TimeoutSeconds == nil { + x.TimeoutSeconds = new(int64) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ListOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv19 := &x.Kind + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv21 := &x.APIVersion + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LabelSelector = "" + } else { + yyv23 := &x.LabelSelector + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FieldSelector = "" + } else { + yyv25 := &x.FieldSelector + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Watch = false + } else { + yyv27 := &x.Watch + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(yyv27)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ResourceVersion = "" + } else { + yyv29 := &x.ResourceVersion + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TimeoutSeconds != nil { + x.TimeoutSeconds = nil + } + } else { + if x.TimeoutSeconds == nil { + x.TimeoutSeconds = new(int64) + } + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64)) + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodLogOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Container != "" + yyq2[3] = x.Follow != false + yyq2[4] = x.Previous != false + yyq2[5] = x.SinceSeconds != nil + yyq2[6] = x.SinceTime != nil + yyq2[7] = x.Timestamps != false + yyq2[8] = x.TailLines != nil + yyq2[9] = x.LimitBytes != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Container)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("container")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Container)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Follow)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("follow")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Follow)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Previous)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("previous")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Previous)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.SinceSeconds == nil { + r.EncodeNil() + } else { + yy19 := *x.SinceSeconds + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(yy19)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sinceSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SinceSeconds == nil { + r.EncodeNil() + } else { + yy21 := *x.SinceSeconds + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeInt(int64(yy21)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.SinceTime == nil { + r.EncodeNil() + } else { + yym24 := z.EncBinary() + _ = yym24 + if false { + } else if z.HasExtensions() && z.EncExt(x.SinceTime) { + } else if yym24 { + z.EncBinaryMarshal(x.SinceTime) + } else if !yym24 && z.IsJSONHandle() { + z.EncJSONMarshal(x.SinceTime) + } else { + z.EncFallback(x.SinceTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sinceTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SinceTime == nil { + r.EncodeNil() + } else { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else if z.HasExtensions() && z.EncExt(x.SinceTime) { + } else if yym25 { + z.EncBinaryMarshal(x.SinceTime) + } else if !yym25 && z.IsJSONHandle() { + z.EncJSONMarshal(x.SinceTime) + } else { + z.EncFallback(x.SinceTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym27 := z.EncBinary() + _ = yym27 + if false { + } else { + r.EncodeBool(bool(x.Timestamps)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("timestamps")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeBool(bool(x.Timestamps)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.TailLines == nil { + r.EncodeNil() + } else { + yy30 := *x.TailLines + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeInt(int64(yy30)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tailLines")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TailLines == nil { + r.EncodeNil() + } else { + yy32 := *x.TailLines + yym33 := z.EncBinary() + _ = yym33 + if false { + } else { + r.EncodeInt(int64(yy32)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.LimitBytes == nil { + r.EncodeNil() + } else { + yy35 := *x.LimitBytes + yym36 := z.EncBinary() + _ = yym36 + if false { + } else { + r.EncodeInt(int64(yy35)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("limitBytes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LimitBytes == nil { + r.EncodeNil() + } else { + yy37 := *x.LimitBytes + yym38 := z.EncBinary() + _ = yym38 + if false { + } else { + r.EncodeInt(int64(yy37)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodLogOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodLogOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "container": + if r.TryDecodeAsNil() { + x.Container = "" + } else { + yyv8 := &x.Container + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "follow": + if r.TryDecodeAsNil() { + x.Follow = false + } else { + yyv10 := &x.Follow + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "previous": + if r.TryDecodeAsNil() { + x.Previous = false + } else { + yyv12 := &x.Previous + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + case "sinceSeconds": + if r.TryDecodeAsNil() { + if x.SinceSeconds != nil { + x.SinceSeconds = nil + } + } else { + if x.SinceSeconds == nil { + x.SinceSeconds = new(int64) + } + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64)) + } + } + case "sinceTime": + if r.TryDecodeAsNil() { + if x.SinceTime != nil { + x.SinceTime = nil + } + } else { + if x.SinceTime == nil { + x.SinceTime = new(pkg2_v1.Time) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(x.SinceTime) { + } else if yym17 { + z.DecBinaryUnmarshal(x.SinceTime) + } else if !yym17 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.SinceTime) + } else { + z.DecFallback(x.SinceTime, false) + } + } + case "timestamps": + if r.TryDecodeAsNil() { + x.Timestamps = false + } else { + yyv18 := &x.Timestamps + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*bool)(yyv18)) = r.DecodeBool() + } + } + case "tailLines": + if r.TryDecodeAsNil() { + if x.TailLines != nil { + x.TailLines = nil + } + } else { + if x.TailLines == nil { + x.TailLines = new(int64) + } + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*int64)(x.TailLines)) = int64(r.DecodeInt(64)) + } + } + case "limitBytes": + if r.TryDecodeAsNil() { + if x.LimitBytes != nil { + x.LimitBytes = nil + } + } else { + if x.LimitBytes == nil { + x.LimitBytes = new(int64) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj24 int + var yyb24 bool + var yyhl24 bool = l >= 0 + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv25 := &x.Kind + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv27 := &x.APIVersion + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Container = "" + } else { + yyv29 := &x.Container + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Follow = false + } else { + yyv31 := &x.Follow + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*bool)(yyv31)) = r.DecodeBool() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Previous = false + } else { + yyv33 := &x.Previous + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*bool)(yyv33)) = r.DecodeBool() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SinceSeconds != nil { + x.SinceSeconds = nil + } + } else { + if x.SinceSeconds == nil { + x.SinceSeconds = new(int64) + } + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SinceTime != nil { + x.SinceTime = nil + } + } else { + if x.SinceTime == nil { + x.SinceTime = new(pkg2_v1.Time) + } + yym38 := z.DecBinary() + _ = yym38 + if false { + } else if z.HasExtensions() && z.DecExt(x.SinceTime) { + } else if yym38 { + z.DecBinaryUnmarshal(x.SinceTime) + } else if !yym38 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.SinceTime) + } else { + z.DecFallback(x.SinceTime, false) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Timestamps = false + } else { + yyv39 := &x.Timestamps + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*bool)(yyv39)) = r.DecodeBool() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TailLines != nil { + x.TailLines = nil + } + } else { + if x.TailLines == nil { + x.TailLines = new(int64) + } + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*int64)(x.TailLines)) = int64(r.DecodeInt(64)) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LimitBytes != nil { + x.LimitBytes = nil + } + } else { + if x.LimitBytes == nil { + x.LimitBytes = new(int64) + } + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64)) + } + } + for { + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj24-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAttachOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Stdin != false + yyq2[3] = x.Stdout != false + yyq2[4] = x.Stderr != false + yyq2[5] = x.TTY != false + yyq2[6] = x.Container != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.Stdin)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stdin")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.Stdin)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Stdout)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stdout")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Stdout)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Stderr)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stderr")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Stderr)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.TTY)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tty")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.TTY)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Container)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("container")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Container)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAttachOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAttachOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "stdin": + if r.TryDecodeAsNil() { + x.Stdin = false + } else { + yyv8 := &x.Stdin + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "stdout": + if r.TryDecodeAsNil() { + x.Stdout = false + } else { + yyv10 := &x.Stdout + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "stderr": + if r.TryDecodeAsNil() { + x.Stderr = false + } else { + yyv12 := &x.Stderr + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + case "tty": + if r.TryDecodeAsNil() { + x.TTY = false + } else { + yyv14 := &x.TTY + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "container": + if r.TryDecodeAsNil() { + x.Container = "" + } else { + yyv16 := &x.Container + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAttachOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv19 := &x.Kind + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv21 := &x.APIVersion + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stdin = false + } else { + yyv23 := &x.Stdin + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stdout = false + } else { + yyv25 := &x.Stdout + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stderr = false + } else { + yyv27 := &x.Stderr + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(yyv27)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TTY = false + } else { + yyv29 := &x.TTY + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Container = "" + } else { + yyv31 := &x.Container + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodExecOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Stdin != false + yyq2[3] = x.Stdout != false + yyq2[4] = x.Stderr != false + yyq2[5] = x.TTY != false + yyq2[6] = x.Container != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(8) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.Stdin)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stdin")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.Stdin)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Stdout)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stdout")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Stdout)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Stderr)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stderr")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Stderr)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.TTY)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tty")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.TTY)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Container)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("container")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Container)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Command == nil { + r.EncodeNil() + } else { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + z.F.EncSliceStringV(x.Command, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("command")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Command == nil { + r.EncodeNil() + } else { + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + z.F.EncSliceStringV(x.Command, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodExecOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodExecOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "stdin": + if r.TryDecodeAsNil() { + x.Stdin = false + } else { + yyv8 := &x.Stdin + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "stdout": + if r.TryDecodeAsNil() { + x.Stdout = false + } else { + yyv10 := &x.Stdout + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "stderr": + if r.TryDecodeAsNil() { + x.Stderr = false + } else { + yyv12 := &x.Stderr + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + case "tty": + if r.TryDecodeAsNil() { + x.TTY = false + } else { + yyv14 := &x.TTY + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "container": + if r.TryDecodeAsNil() { + x.Container = "" + } else { + yyv16 := &x.Container + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + case "command": + if r.TryDecodeAsNil() { + x.Command = nil + } else { + yyv18 := &x.Command + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + z.F.DecSliceStringX(yyv18, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodExecOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj20 int + var yyb20 bool + var yyhl20 bool = l >= 0 + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv21 := &x.Kind + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv23 := &x.APIVersion + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stdin = false + } else { + yyv25 := &x.Stdin + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stdout = false + } else { + yyv27 := &x.Stdout + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(yyv27)) = r.DecodeBool() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stderr = false + } else { + yyv29 := &x.Stderr + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TTY = false + } else { + yyv31 := &x.TTY + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*bool)(yyv31)) = r.DecodeBool() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Container = "" + } else { + yyv33 := &x.Container + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*string)(yyv33)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Command = nil + } else { + yyv35 := &x.Command + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + z.F.DecSliceStringX(yyv35, false, d) + } + } + for { + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj20-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodPortForwardOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = len(x.Ports) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Ports == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceInt32V(x.Ports, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ports")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ports == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceInt32V(x.Ports, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodPortForwardOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodPortForwardOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "ports": + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv8 := &x.Ports + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceInt32X(yyv8, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodPortForwardOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv13 := &x.APIVersion + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv15 := &x.Ports + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + z.F.DecSliceInt32X(yyv15, false, d) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Path != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv8 := &x.Path + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv13 := &x.APIVersion + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv15 := &x.Path + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Path != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv8 := &x.Path + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv13 := &x.APIVersion + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv15 := &x.Path + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Path != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv8 := &x.Path + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv13 := &x.APIVersion + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv15 := &x.Path + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.Namespace != "" + yyq2[2] = x.Name != "" + yyq2[3] = x.UID != "" + yyq2[4] = x.APIVersion != "" + yyq2[5] = x.ResourceVersion != "" + yyq2[6] = x.FieldPath != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.UID) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("uid")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(x.UID) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fieldPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv6 := &x.Namespace + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "uid": + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv10 := &x.UID + yym11 := z.DecBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.DecExt(yyv10) { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv12 := &x.APIVersion + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "resourceVersion": + if r.TryDecodeAsNil() { + x.ResourceVersion = "" + } else { + yyv14 := &x.ResourceVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "fieldPath": + if r.TryDecodeAsNil() { + x.FieldPath = "" + } else { + yyv16 := &x.FieldPath + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv19 := &x.Kind + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv21 := &x.Namespace + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv23 := &x.Name + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv25 := &x.UID + yym26 := z.DecBinary() + _ = yym26 + if false { + } else if z.HasExtensions() && z.DecExt(yyv25) { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv27 := &x.APIVersion + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ResourceVersion = "" + } else { + yyv29 := &x.ResourceVersion + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FieldPath = "" + } else { + yyv31 := &x.FieldPath + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LocalObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LocalObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LocalObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LocalObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv7 := &x.Name + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SerializedReference) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.Reference + yy10.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reference")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.Reference + yy12.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SerializedReference) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SerializedReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "reference": + if r.TryDecodeAsNil() { + x.Reference = ObjectReference{} + } else { + yyv8 := &x.Reference + yyv8.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SerializedReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv10 := &x.Kind + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv12 := &x.APIVersion + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reference = ObjectReference{} + } else { + yyv14 := &x.Reference + yyv14.CodecDecodeSelf(d) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EventSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Component != "" + yyq2[1] = x.Host != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Component)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("component")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Component)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("host")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EventSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EventSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "component": + if r.TryDecodeAsNil() { + x.Component = "" + } else { + yyv4 := &x.Component + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "host": + if r.TryDecodeAsNil() { + x.Host = "" + } else { + yyv6 := &x.Host + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EventSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Component = "" + } else { + yyv9 := &x.Component + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Host = "" + } else { + yyv11 := &x.Host + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Event) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [11]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + yyq2[6] = true + yyq2[7] = true + yyq2[8] = true + yyq2[9] = x.Count != 0 + yyq2[10] = x.Type != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(11) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.InvolvedObject + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("involvedObject")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.InvolvedObject + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yy26 := &x.Source + yy26.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("source")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy28 := &x.Source + yy28.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yy31 := &x.FirstTimestamp + yym32 := z.EncBinary() + _ = yym32 + if false { + } else if z.HasExtensions() && z.EncExt(yy31) { + } else if yym32 { + z.EncBinaryMarshal(yy31) + } else if !yym32 && z.IsJSONHandle() { + z.EncJSONMarshal(yy31) + } else { + z.EncFallback(yy31) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("firstTimestamp")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy33 := &x.FirstTimestamp + yym34 := z.EncBinary() + _ = yym34 + if false { + } else if z.HasExtensions() && z.EncExt(yy33) { + } else if yym34 { + z.EncBinaryMarshal(yy33) + } else if !yym34 && z.IsJSONHandle() { + z.EncJSONMarshal(yy33) + } else { + z.EncFallback(yy33) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + yy36 := &x.LastTimestamp + yym37 := z.EncBinary() + _ = yym37 + if false { + } else if z.HasExtensions() && z.EncExt(yy36) { + } else if yym37 { + z.EncBinaryMarshal(yy36) + } else if !yym37 && z.IsJSONHandle() { + z.EncJSONMarshal(yy36) + } else { + z.EncFallback(yy36) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTimestamp")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy38 := &x.LastTimestamp + yym39 := z.EncBinary() + _ = yym39 + if false { + } else if z.HasExtensions() && z.EncExt(yy38) { + } else if yym39 { + z.EncBinaryMarshal(yy38) + } else if !yym39 && z.IsJSONHandle() { + z.EncJSONMarshal(yy38) + } else { + z.EncFallback(yy38) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + yym41 := z.EncBinary() + _ = yym41 + if false { + } else { + r.EncodeInt(int64(x.Count)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("count")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym42 := z.EncBinary() + _ = yym42 + if false { + } else { + r.EncodeInt(int64(x.Count)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + yym44 := z.EncBinary() + _ = yym44 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Type)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym45 := z.EncBinary() + _ = yym45 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Type)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Event) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Event) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "involvedObject": + if r.TryDecodeAsNil() { + x.InvolvedObject = ObjectReference{} + } else { + yyv10 := &x.InvolvedObject + yyv10.CodecDecodeSelf(d) + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv11 := &x.Reason + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv13 := &x.Message + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + case "source": + if r.TryDecodeAsNil() { + x.Source = EventSource{} + } else { + yyv15 := &x.Source + yyv15.CodecDecodeSelf(d) + } + case "firstTimestamp": + if r.TryDecodeAsNil() { + x.FirstTimestamp = pkg2_v1.Time{} + } else { + yyv16 := &x.FirstTimestamp + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else if yym17 { + z.DecBinaryUnmarshal(yyv16) + } else if !yym17 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv16) + } else { + z.DecFallback(yyv16, false) + } + } + case "lastTimestamp": + if r.TryDecodeAsNil() { + x.LastTimestamp = pkg2_v1.Time{} + } else { + yyv18 := &x.LastTimestamp + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else if yym19 { + z.DecBinaryUnmarshal(yyv18) + } else if !yym19 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv18) + } else { + z.DecFallback(yyv18, false) + } + } + case "count": + if r.TryDecodeAsNil() { + x.Count = 0 + } else { + yyv20 := &x.Count + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*int32)(yyv20)) = int32(r.DecodeInt(32)) + } + } + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv22 := &x.Type + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*string)(yyv22)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Event) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj24 int + var yyb24 bool + var yyhl24 bool = l >= 0 + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv25 := &x.Kind + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv27 := &x.APIVersion + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv29 := &x.ObjectMeta + yym30 := z.DecBinary() + _ = yym30 + if false { + } else if z.HasExtensions() && z.DecExt(yyv29) { + } else { + z.DecFallback(yyv29, false) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.InvolvedObject = ObjectReference{} + } else { + yyv31 := &x.InvolvedObject + yyv31.CodecDecodeSelf(d) + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv32 := &x.Reason + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + *((*string)(yyv32)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv34 := &x.Message + yym35 := z.DecBinary() + _ = yym35 + if false { + } else { + *((*string)(yyv34)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Source = EventSource{} + } else { + yyv36 := &x.Source + yyv36.CodecDecodeSelf(d) + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FirstTimestamp = pkg2_v1.Time{} + } else { + yyv37 := &x.FirstTimestamp + yym38 := z.DecBinary() + _ = yym38 + if false { + } else if z.HasExtensions() && z.DecExt(yyv37) { + } else if yym38 { + z.DecBinaryUnmarshal(yyv37) + } else if !yym38 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv37) + } else { + z.DecFallback(yyv37, false) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTimestamp = pkg2_v1.Time{} + } else { + yyv39 := &x.LastTimestamp + yym40 := z.DecBinary() + _ = yym40 + if false { + } else if z.HasExtensions() && z.DecExt(yyv39) { + } else if yym40 { + z.DecBinaryUnmarshal(yyv39) + } else if !yym40 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv39) + } else { + z.DecFallback(yyv39, false) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Count = 0 + } else { + yyv41 := &x.Count + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*int32)(yyv41)) = int32(r.DecodeInt(32)) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv43 := &x.Type + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*string)(yyv43)) = r.DecodeString() + } + } + for { + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj24-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EventList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceEvent(([]Event)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceEvent(([]Event)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EventList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EventList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceEvent((*[]Event)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EventList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceEvent((*[]Event)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *List) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceruntime_RawExtension(([]pkg5_runtime.RawExtension)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceruntime_RawExtension(([]pkg5_runtime.RawExtension)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *List) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *List) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceruntime_RawExtension((*[]pkg5_runtime.RawExtension)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *List) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceruntime_RawExtension((*[]pkg5_runtime.RawExtension)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x LimitType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *LimitType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Type != "" + yyq2[1] = len(x.Max) != 0 + yyq2[2] = len(x.Min) != 0 + yyq2[3] = len(x.Default) != 0 + yyq2[4] = len(x.DefaultRequest) != 0 + yyq2[5] = len(x.MaxLimitRequestRatio) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Type.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Max == nil { + r.EncodeNil() + } else { + x.Max.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("max")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Max == nil { + r.EncodeNil() + } else { + x.Max.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Min == nil { + r.EncodeNil() + } else { + x.Min.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("min")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Min == nil { + r.EncodeNil() + } else { + x.Min.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Default == nil { + r.EncodeNil() + } else { + x.Default.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("default")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Default == nil { + r.EncodeNil() + } else { + x.Default.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.DefaultRequest == nil { + r.EncodeNil() + } else { + x.DefaultRequest.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultRequest")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultRequest == nil { + r.EncodeNil() + } else { + x.DefaultRequest.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.MaxLimitRequestRatio == nil { + r.EncodeNil() + } else { + x.MaxLimitRequestRatio.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxLimitRequestRatio")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MaxLimitRequestRatio == nil { + r.EncodeNil() + } else { + x.MaxLimitRequestRatio.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LimitRangeItem) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LimitRangeItem) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "max": + if r.TryDecodeAsNil() { + x.Max = nil + } else { + yyv5 := &x.Max + yyv5.CodecDecodeSelf(d) + } + case "min": + if r.TryDecodeAsNil() { + x.Min = nil + } else { + yyv6 := &x.Min + yyv6.CodecDecodeSelf(d) + } + case "default": + if r.TryDecodeAsNil() { + x.Default = nil + } else { + yyv7 := &x.Default + yyv7.CodecDecodeSelf(d) + } + case "defaultRequest": + if r.TryDecodeAsNil() { + x.DefaultRequest = nil + } else { + yyv8 := &x.DefaultRequest + yyv8.CodecDecodeSelf(d) + } + case "maxLimitRequestRatio": + if r.TryDecodeAsNil() { + x.MaxLimitRequestRatio = nil + } else { + yyv9 := &x.MaxLimitRequestRatio + yyv9.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LimitRangeItem) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv11 := &x.Type + yyv11.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Max = nil + } else { + yyv12 := &x.Max + yyv12.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Min = nil + } else { + yyv13 := &x.Min + yyv13.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Default = nil + } else { + yyv14 := &x.Default + yyv14.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DefaultRequest = nil + } else { + yyv15 := &x.DefaultRequest + yyv15.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MaxLimitRequestRatio = nil + } else { + yyv16 := &x.MaxLimitRequestRatio + yyv16.CodecDecodeSelf(d) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LimitRangeSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Limits == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("limits")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Limits == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LimitRangeSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LimitRangeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "limits": + if r.TryDecodeAsNil() { + x.Limits = nil + } else { + yyv4 := &x.Limits + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LimitRangeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Limits = nil + } else { + yyv7 := &x.Limits + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LimitRange) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LimitRange) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LimitRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = LimitRangeSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LimitRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = LimitRangeSpec{} + } else { + yyv18 := &x.Spec + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LimitRangeList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceLimitRange(([]LimitRange)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceLimitRange(([]LimitRange)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LimitRangeList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LimitRangeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceLimitRange((*[]LimitRange)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LimitRangeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceLimitRange((*[]LimitRange)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ResourceQuotaScope) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ResourceQuotaScope) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Hard) != 0 + yyq2[1] = len(x.Scopes) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Hard == nil { + r.EncodeNil() + } else { + x.Hard.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hard")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Hard == nil { + r.EncodeNil() + } else { + x.Hard.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Scopes == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scopes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Scopes == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceQuotaSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceQuotaSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "hard": + if r.TryDecodeAsNil() { + x.Hard = nil + } else { + yyv4 := &x.Hard + yyv4.CodecDecodeSelf(d) + } + case "scopes": + if r.TryDecodeAsNil() { + x.Scopes = nil + } else { + yyv5 := &x.Scopes + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv5), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceQuotaSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hard = nil + } else { + yyv8 := &x.Hard + yyv8.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Scopes = nil + } else { + yyv9 := &x.Scopes + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv9), d) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceQuotaStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Hard) != 0 + yyq2[1] = len(x.Used) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Hard == nil { + r.EncodeNil() + } else { + x.Hard.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hard")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Hard == nil { + r.EncodeNil() + } else { + x.Hard.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Used == nil { + r.EncodeNil() + } else { + x.Used.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("used")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Used == nil { + r.EncodeNil() + } else { + x.Used.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceQuotaStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceQuotaStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "hard": + if r.TryDecodeAsNil() { + x.Hard = nil + } else { + yyv4 := &x.Hard + yyv4.CodecDecodeSelf(d) + } + case "used": + if r.TryDecodeAsNil() { + x.Used = nil + } else { + yyv5 := &x.Used + yyv5.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceQuotaStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hard = nil + } else { + yyv7 := &x.Hard + yyv7.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Used = nil + } else { + yyv8 := &x.Used + yyv8.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceQuota) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceQuota) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceQuota) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ResourceQuotaSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ResourceQuotaStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceQuota) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ResourceQuotaSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ResourceQuotaStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceQuotaList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceQuotaList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceQuotaList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceResourceQuota((*[]ResourceQuota)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceQuotaList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceResourceQuota((*[]ResourceQuota)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Secret) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = len(x.Data) != 0 + yyq2[4] = len(x.StringData) != 0 + yyq2[5] = x.Type != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Data == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("data")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Data == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.StringData == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + z.F.EncMapStringStringV(x.StringData, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stringData")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StringData == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + z.F.EncMapStringStringV(x.StringData, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + x.Type.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Secret) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Secret) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "data": + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv10 := &x.Data + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decMapstringSliceuint8((*map[string][]uint8)(yyv10), d) + } + } + case "stringData": + if r.TryDecodeAsNil() { + x.StringData = nil + } else { + yyv12 := &x.StringData + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + z.F.DecMapStringStringX(yyv12, false, d) + } + } + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv14 := &x.Type + yyv14.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Secret) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj15 int + var yyb15 bool + var yyhl15 bool = l >= 0 + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv16 := &x.Kind + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv18 := &x.APIVersion + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv20 := &x.ObjectMeta + yym21 := z.DecBinary() + _ = yym21 + if false { + } else if z.HasExtensions() && z.DecExt(yyv20) { + } else { + z.DecFallback(yyv20, false) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv22 := &x.Data + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + h.decMapstringSliceuint8((*map[string][]uint8)(yyv22), d) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StringData = nil + } else { + yyv24 := &x.StringData + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + z.F.DecMapStringStringX(yyv24, false, d) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv26 := &x.Type + yyv26.CodecDecodeSelf(d) + } + for { + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj15-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x SecretType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *SecretType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *SecretList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceSecret(([]Secret)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceSecret(([]Secret)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecretList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecretList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceSecret((*[]Secret)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecretList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceSecret((*[]Secret)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMap) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = len(x.Data) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Data == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + z.F.EncMapStringStringV(x.Data, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("data")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Data == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncMapStringStringV(x.Data, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMap) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMap) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "data": + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv10 := &x.Data + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + z.F.DecMapStringStringX(yyv10, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMap) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv19 := &x.Data + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + z.F.DecMapStringStringX(yyv19, false, d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMapList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceConfigMap(([]ConfigMap)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceConfigMap(([]ConfigMap)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMapList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMapList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceConfigMap((*[]ConfigMap)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMapList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceConfigMap((*[]ConfigMap)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ComponentConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ComponentConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ComponentCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.Message != "" + yyq2[3] = x.Error != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Status.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Status.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Error)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("error")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Error)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ComponentCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ComponentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv6 := &x.Message + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "error": + if r.TryDecodeAsNil() { + x.Error = "" + } else { + yyv8 := &x.Error + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ComponentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv11 := &x.Type + yyv11.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv12 := &x.Status + yyv12.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv13 := &x.Message + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Error = "" + } else { + yyv15 := &x.Error + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ComponentStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = len(x.Conditions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ComponentStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ComponentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv10 := &x.Conditions + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceComponentCondition((*[]ComponentCondition)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ComponentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv19 := &x.Conditions + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceComponentCondition((*[]ComponentCondition)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ComponentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ComponentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ComponentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceComponentStatus((*[]ComponentStatus)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ComponentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceComponentStatus((*[]ComponentStatus)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DownwardAPIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Items) != 0 + yyq2[1] = x.DefaultMode != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Items == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy7 := *x.DefaultMode + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy9 := *x.DefaultMode + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DownwardAPIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DownwardAPIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv4 := &x.Items + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv4), d) + } + } + case "defaultMode": + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DownwardAPIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv9 := &x.Items + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DownwardAPIVolumeFile) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FieldRef != nil + yyq2[2] = x.ResourceFieldRef != nil + yyq2[3] = x.Mode != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.FieldRef == nil { + r.EncodeNil() + } else { + x.FieldRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FieldRef == nil { + r.EncodeNil() + } else { + x.FieldRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Mode == nil { + r.EncodeNil() + } else { + yy13 := *x.Mode + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(yy13)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("mode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Mode == nil { + r.EncodeNil() + } else { + yy15 := *x.Mode + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(yy15)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DownwardAPIVolumeFile) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DownwardAPIVolumeFile) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fieldRef": + if r.TryDecodeAsNil() { + if x.FieldRef != nil { + x.FieldRef = nil + } + } else { + if x.FieldRef == nil { + x.FieldRef = new(ObjectFieldSelector) + } + x.FieldRef.CodecDecodeSelf(d) + } + case "resourceFieldRef": + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) + } + case "mode": + if r.TryDecodeAsNil() { + if x.Mode != nil { + x.Mode = nil + } + } else { + if x.Mode == nil { + x.Mode = new(int32) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DownwardAPIVolumeFile) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv11 := &x.Path + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FieldRef != nil { + x.FieldRef = nil + } + } else { + if x.FieldRef == nil { + x.FieldRef = new(ObjectFieldSelector) + } + x.FieldRef.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Mode != nil { + x.Mode = nil + } + } else { + if x.Mode == nil { + x.Mode = new(int32) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DownwardAPIProjection) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Items) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Items == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DownwardAPIProjection) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DownwardAPIProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv4 := &x.Items + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DownwardAPIProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv7 := &x.Items + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Capabilities != nil + yyq2[1] = x.Privileged != nil + yyq2[2] = x.SELinuxOptions != nil + yyq2[3] = x.RunAsUser != nil + yyq2[4] = x.RunAsNonRoot != nil + yyq2[5] = x.ReadOnlyRootFilesystem != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Capabilities == nil { + r.EncodeNil() + } else { + x.Capabilities.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("capabilities")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Capabilities == nil { + r.EncodeNil() + } else { + x.Capabilities.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Privileged == nil { + r.EncodeNil() + } else { + yy7 := *x.Privileged + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("privileged")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Privileged == nil { + r.EncodeNil() + } else { + yy9 := *x.Privileged + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.SELinuxOptions == nil { + r.EncodeNil() + } else { + x.SELinuxOptions.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SELinuxOptions == nil { + r.EncodeNil() + } else { + x.SELinuxOptions.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.RunAsUser == nil { + r.EncodeNil() + } else { + yy15 := *x.RunAsUser + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(yy15)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RunAsUser == nil { + r.EncodeNil() + } else { + yy17 := *x.RunAsUser + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeInt(int64(yy17)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.RunAsNonRoot == nil { + r.EncodeNil() + } else { + yy20 := *x.RunAsNonRoot + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeBool(bool(yy20)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RunAsNonRoot == nil { + r.EncodeNil() + } else { + yy22 := *x.RunAsNonRoot + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(yy22)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.ReadOnlyRootFilesystem == nil { + r.EncodeNil() + } else { + yy25 := *x.ReadOnlyRootFilesystem + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeBool(bool(yy25)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ReadOnlyRootFilesystem == nil { + r.EncodeNil() + } else { + yy27 := *x.ReadOnlyRootFilesystem + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeBool(bool(yy27)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecurityContext) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "capabilities": + if r.TryDecodeAsNil() { + if x.Capabilities != nil { + x.Capabilities = nil + } + } else { + if x.Capabilities == nil { + x.Capabilities = new(Capabilities) + } + x.Capabilities.CodecDecodeSelf(d) + } + case "privileged": + if r.TryDecodeAsNil() { + if x.Privileged != nil { + x.Privileged = nil + } + } else { + if x.Privileged == nil { + x.Privileged = new(bool) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*bool)(x.Privileged)) = r.DecodeBool() + } + } + case "seLinuxOptions": + if r.TryDecodeAsNil() { + if x.SELinuxOptions != nil { + x.SELinuxOptions = nil + } + } else { + if x.SELinuxOptions == nil { + x.SELinuxOptions = new(SELinuxOptions) + } + x.SELinuxOptions.CodecDecodeSelf(d) + } + case "runAsUser": + if r.TryDecodeAsNil() { + if x.RunAsUser != nil { + x.RunAsUser = nil + } + } else { + if x.RunAsUser == nil { + x.RunAsUser = new(int64) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) + } + } + case "runAsNonRoot": + if r.TryDecodeAsNil() { + if x.RunAsNonRoot != nil { + x.RunAsNonRoot = nil + } + } else { + if x.RunAsNonRoot == nil { + x.RunAsNonRoot = new(bool) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() + } + } + case "readOnlyRootFilesystem": + if r.TryDecodeAsNil() { + if x.ReadOnlyRootFilesystem != nil { + x.ReadOnlyRootFilesystem = nil + } + } else { + if x.ReadOnlyRootFilesystem == nil { + x.ReadOnlyRootFilesystem = new(bool) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Capabilities != nil { + x.Capabilities = nil + } + } else { + if x.Capabilities == nil { + x.Capabilities = new(Capabilities) + } + x.Capabilities.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Privileged != nil { + x.Privileged = nil + } + } else { + if x.Privileged == nil { + x.Privileged = new(bool) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*bool)(x.Privileged)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SELinuxOptions != nil { + x.SELinuxOptions = nil + } + } else { + if x.SELinuxOptions == nil { + x.SELinuxOptions = new(SELinuxOptions) + } + x.SELinuxOptions.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RunAsUser != nil { + x.RunAsUser = nil + } + } else { + if x.RunAsUser == nil { + x.RunAsUser = new(int64) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RunAsNonRoot != nil { + x.RunAsNonRoot = nil + } + } else { + if x.RunAsNonRoot == nil { + x.RunAsNonRoot = new(bool) + } + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ReadOnlyRootFilesystem != nil { + x.ReadOnlyRootFilesystem = nil + } + } else { + if x.ReadOnlyRootFilesystem == nil { + x.ReadOnlyRootFilesystem = new(bool) + } + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SELinuxOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.User != "" + yyq2[1] = x.Role != "" + yyq2[2] = x.Type != "" + yyq2[3] = x.Level != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Role)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("role")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Role)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Type)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Type)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Level)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("level")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Level)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SELinuxOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SELinuxOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "user": + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv4 := &x.User + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "role": + if r.TryDecodeAsNil() { + x.Role = "" + } else { + yyv6 := &x.Role + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv8 := &x.Type + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "level": + if r.TryDecodeAsNil() { + x.Level = "" + } else { + yyv10 := &x.Level + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SELinuxOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv13 := &x.User + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Role = "" + } else { + yyv15 := &x.Role + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv17 := &x.Type + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Level = "" + } else { + yyv19 := &x.Level + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RangeAllocation) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Range)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("range")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Range)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Data == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("data")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Data == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RangeAllocation) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RangeAllocation) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "range": + if r.TryDecodeAsNil() { + x.Range = "" + } else { + yyv10 := &x.Range + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "data": + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv12 := &x.Data + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *yyv12 = r.DecodeBytes(*(*[]byte)(yyv12), false, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RangeAllocation) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv15 := &x.Kind + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv17 := &x.APIVersion + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv19 := &x.ObjectMeta + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Range = "" + } else { + yyv21 := &x.Range + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv23 := &x.Data + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *yyv23 = r.DecodeBytes(*(*[]byte)(yyv23), false, false) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSlicev1_OwnerReference(v []pkg2_v1.OwnerReference, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yym3 := z.EncBinary() + _ = yym3 + if false { + } else if z.HasExtensions() && z.EncExt(yy2) { + } else { + z.EncFallback(yy2) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicev1_OwnerReference(v *[]pkg2_v1.OwnerReference, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg2_v1.OwnerReference{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 80) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]pkg2_v1.OwnerReference, yyrl1) + } + } else { + yyv1 = make([]pkg2_v1.OwnerReference, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_v1.OwnerReference{} + } else { + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else if z.HasExtensions() && z.DecExt(yyv2) { + } else { + z.DecFallback(yyv2, false) + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, pkg2_v1.OwnerReference{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_v1.OwnerReference{} + } else { + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, pkg2_v1.OwnerReference{}) // var yyz1 pkg2_v1.OwnerReference + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_v1.OwnerReference{} + } else { + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else { + z.DecFallback(yyv6, false) + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg2_v1.OwnerReference{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePersistentVolumeAccessMode(v []PersistentVolumeAccessMode, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePersistentVolumeAccessMode(v *[]PersistentVolumeAccessMode, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PersistentVolumeAccessMode{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PersistentVolumeAccessMode, yyrl1) + } + } else { + yyv1 = make([]PersistentVolumeAccessMode, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 PersistentVolumeAccessMode + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PersistentVolumeAccessMode{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePersistentVolume(v []PersistentVolume, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePersistentVolume(v *[]PersistentVolume, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PersistentVolume{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 528) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PersistentVolume, yyrl1) + } + } else { + yyv1 = make([]PersistentVolume, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PersistentVolume{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PersistentVolume{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PersistentVolume{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PersistentVolume{}) // var yyz1 PersistentVolume + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PersistentVolume{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PersistentVolume{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePersistentVolumeClaim(v []PersistentVolumeClaim, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePersistentVolumeClaim(v *[]PersistentVolumeClaim, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PersistentVolumeClaim{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 376) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PersistentVolumeClaim, yyrl1) + } + } else { + yyv1 = make([]PersistentVolumeClaim, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PersistentVolumeClaim{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PersistentVolumeClaim{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PersistentVolumeClaim{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PersistentVolumeClaim{}) // var yyz1 PersistentVolumeClaim + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PersistentVolumeClaim{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PersistentVolumeClaim{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceKeyToPath(v []KeyToPath, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceKeyToPath(v *[]KeyToPath, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []KeyToPath{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]KeyToPath, yyrl1) + } + } else { + yyv1 = make([]KeyToPath, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = KeyToPath{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, KeyToPath{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = KeyToPath{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, KeyToPath{}) // var yyz1 KeyToPath + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = KeyToPath{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []KeyToPath{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceVolumeProjection(v []VolumeProjection, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceVolumeProjection(v *[]VolumeProjection, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []VolumeProjection{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]VolumeProjection, yyrl1) + } + } else { + yyv1 = make([]VolumeProjection, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeProjection{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, VolumeProjection{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeProjection{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, VolumeProjection{}) // var yyz1 VolumeProjection + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeProjection{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []VolumeProjection{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceHTTPHeader(v []HTTPHeader, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceHTTPHeader(v *[]HTTPHeader, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []HTTPHeader{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]HTTPHeader, yyrl1) + } + } else { + yyv1 = make([]HTTPHeader, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HTTPHeader{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, HTTPHeader{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HTTPHeader{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, HTTPHeader{}) // var yyz1 HTTPHeader + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = HTTPHeader{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []HTTPHeader{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceCapability(v []Capability, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCapability(v *[]Capability, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Capability{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Capability, yyrl1) + } + } else { + yyv1 = make([]Capability, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 Capability + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Capability{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceContainerPort(v []ContainerPort, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceContainerPort(v *[]ContainerPort, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ContainerPort{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ContainerPort, yyrl1) + } + } else { + yyv1 = make([]ContainerPort, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerPort{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ContainerPort{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerPort{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ContainerPort{}) // var yyz1 ContainerPort + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerPort{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ContainerPort{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEnvFromSource(v []EnvFromSource, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEnvFromSource(v *[]EnvFromSource, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EnvFromSource{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]EnvFromSource, yyrl1) + } + } else { + yyv1 = make([]EnvFromSource, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EnvFromSource{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EnvFromSource{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EnvFromSource{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EnvFromSource{}) // var yyz1 EnvFromSource + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = EnvFromSource{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EnvFromSource{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEnvVar(v []EnvVar, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEnvVar(v *[]EnvVar, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EnvVar{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]EnvVar, yyrl1) + } + } else { + yyv1 = make([]EnvVar, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EnvVar{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EnvVar{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EnvVar{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EnvVar{}) // var yyz1 EnvVar + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = EnvVar{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EnvVar{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceVolumeMount(v []VolumeMount, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceVolumeMount(v *[]VolumeMount, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []VolumeMount{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]VolumeMount, yyrl1) + } + } else { + yyv1 = make([]VolumeMount, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeMount{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, VolumeMount{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeMount{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, VolumeMount{}) // var yyz1 VolumeMount + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeMount{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []VolumeMount{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNodeSelectorTerm(v []NodeSelectorTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNodeSelectorTerm(v *[]NodeSelectorTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NodeSelectorTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NodeSelectorTerm, yyrl1) + } + } else { + yyv1 = make([]NodeSelectorTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeSelectorTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NodeSelectorTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeSelectorTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NodeSelectorTerm{}) // var yyz1 NodeSelectorTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeSelectorTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NodeSelectorTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNodeSelectorRequirement(v []NodeSelectorRequirement, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNodeSelectorRequirement(v *[]NodeSelectorRequirement, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NodeSelectorRequirement{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NodeSelectorRequirement, yyrl1) + } + } else { + yyv1 = make([]NodeSelectorRequirement, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeSelectorRequirement{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NodeSelectorRequirement{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeSelectorRequirement{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NodeSelectorRequirement{}) // var yyz1 NodeSelectorRequirement + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeSelectorRequirement{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NodeSelectorRequirement{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePodAffinityTerm(v []PodAffinityTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePodAffinityTerm(v *[]PodAffinityTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodAffinityTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PodAffinityTerm, yyrl1) + } + } else { + yyv1 = make([]PodAffinityTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodAffinityTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodAffinityTerm{}) // var yyz1 PodAffinityTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodAffinityTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceWeightedPodAffinityTerm(v []WeightedPodAffinityTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceWeightedPodAffinityTerm(v *[]WeightedPodAffinityTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []WeightedPodAffinityTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]WeightedPodAffinityTerm, yyrl1) + } + } else { + yyv1 = make([]WeightedPodAffinityTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, WeightedPodAffinityTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, WeightedPodAffinityTerm{}) // var yyz1 WeightedPodAffinityTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []WeightedPodAffinityTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePreferredSchedulingTerm(v []PreferredSchedulingTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePreferredSchedulingTerm(v *[]PreferredSchedulingTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PreferredSchedulingTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PreferredSchedulingTerm, yyrl1) + } + } else { + yyv1 = make([]PreferredSchedulingTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PreferredSchedulingTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PreferredSchedulingTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PreferredSchedulingTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PreferredSchedulingTerm{}) // var yyz1 PreferredSchedulingTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PreferredSchedulingTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PreferredSchedulingTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceVolume(v []Volume, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceVolume(v *[]Volume, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Volume{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 224) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Volume, yyrl1) + } + } else { + yyv1 = make([]Volume, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Volume{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Volume{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Volume{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Volume{}) // var yyz1 Volume + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Volume{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Volume{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceContainer(v []Container, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceContainer(v *[]Container, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Container{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Container, yyrl1) + } + } else { + yyv1 = make([]Container, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Container{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Container{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Container{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Container{}) // var yyz1 Container + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Container{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Container{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceLocalObjectReference(v []LocalObjectReference, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceLocalObjectReference(v *[]LocalObjectReference, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []LocalObjectReference{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]LocalObjectReference, yyrl1) + } + } else { + yyv1 = make([]LocalObjectReference, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LocalObjectReference{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, LocalObjectReference{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LocalObjectReference{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, LocalObjectReference{}) // var yyz1 LocalObjectReference + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = LocalObjectReference{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []LocalObjectReference{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceToleration(v []Toleration, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceToleration(v *[]Toleration, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Toleration{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Toleration, yyrl1) + } + } else { + yyv1 = make([]Toleration, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Toleration{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Toleration{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Toleration{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Toleration{}) // var yyz1 Toleration + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Toleration{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Toleration{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePodCondition(v []PodCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePodCondition(v *[]PodCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PodCondition, yyrl1) + } + } else { + yyv1 = make([]PodCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodCondition{}) // var yyz1 PodCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceContainerStatus(v []ContainerStatus, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceContainerStatus(v *[]ContainerStatus, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ContainerStatus{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 120) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ContainerStatus, yyrl1) + } + } else { + yyv1 = make([]ContainerStatus, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerStatus{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ContainerStatus{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerStatus{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ContainerStatus{}) // var yyz1 ContainerStatus + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerStatus{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ContainerStatus{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePod(v []Pod, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePod(v *[]Pod, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Pod{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 736) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Pod, yyrl1) + } + } else { + yyv1 = make([]Pod, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Pod{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Pod{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Pod{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Pod{}) // var yyz1 Pod + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Pod{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Pod{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePodTemplate(v []PodTemplate, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePodTemplate(v *[]PodTemplate, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodTemplate{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 784) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PodTemplate, yyrl1) + } + } else { + yyv1 = make([]PodTemplate, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodTemplate{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodTemplate{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodTemplate{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodTemplate{}) // var yyz1 PodTemplate + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodTemplate{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodTemplate{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceReplicationControllerCondition(v []ReplicationControllerCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceReplicationControllerCondition(v *[]ReplicationControllerCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ReplicationControllerCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 88) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ReplicationControllerCondition, yyrl1) + } + } else { + yyv1 = make([]ReplicationControllerCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicationControllerCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ReplicationControllerCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicationControllerCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ReplicationControllerCondition{}) // var yyz1 ReplicationControllerCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicationControllerCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ReplicationControllerCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceReplicationController(v []ReplicationController, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceReplicationController(v *[]ReplicationController, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ReplicationController{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 336) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ReplicationController, yyrl1) + } + } else { + yyv1 = make([]ReplicationController, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicationController{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ReplicationController{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicationController{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ReplicationController{}) // var yyz1 ReplicationController + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicationController{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ReplicationController{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceLoadBalancerIngress(v []LoadBalancerIngress, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceLoadBalancerIngress(v *[]LoadBalancerIngress, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []LoadBalancerIngress{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]LoadBalancerIngress, yyrl1) + } + } else { + yyv1 = make([]LoadBalancerIngress, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LoadBalancerIngress{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, LoadBalancerIngress{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LoadBalancerIngress{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, LoadBalancerIngress{}) // var yyz1 LoadBalancerIngress + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = LoadBalancerIngress{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []LoadBalancerIngress{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceServicePort(v []ServicePort, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceServicePort(v *[]ServicePort, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ServicePort{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 80) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ServicePort, yyrl1) + } + } else { + yyv1 = make([]ServicePort, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServicePort{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ServicePort{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServicePort{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ServicePort{}) // var yyz1 ServicePort + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServicePort{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ServicePort{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceService(v []Service, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceService(v *[]Service, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Service{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 464) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Service, yyrl1) + } + } else { + yyv1 = make([]Service, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Service{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Service{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Service{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Service{}) // var yyz1 Service + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Service{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Service{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceObjectReference(v []ObjectReference, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceObjectReference(v *[]ObjectReference, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ObjectReference{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ObjectReference, yyrl1) + } + } else { + yyv1 = make([]ObjectReference, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ObjectReference{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ObjectReference{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ObjectReference{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ObjectReference{}) // var yyz1 ObjectReference + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ObjectReference{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ObjectReference{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceServiceAccount(v []ServiceAccount, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceServiceAccount(v *[]ServiceAccount, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ServiceAccount{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 312) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ServiceAccount, yyrl1) + } + } else { + yyv1 = make([]ServiceAccount, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServiceAccount{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ServiceAccount{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServiceAccount{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ServiceAccount{}) // var yyz1 ServiceAccount + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServiceAccount{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ServiceAccount{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEndpointSubset(v []EndpointSubset, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEndpointSubset(v *[]EndpointSubset, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EndpointSubset{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]EndpointSubset, yyrl1) + } + } else { + yyv1 = make([]EndpointSubset, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointSubset{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EndpointSubset{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointSubset{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EndpointSubset{}) // var yyz1 EndpointSubset + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointSubset{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EndpointSubset{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEndpointAddress(v []EndpointAddress, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEndpointAddress(v *[]EndpointAddress, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EndpointAddress{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]EndpointAddress, yyrl1) + } + } else { + yyv1 = make([]EndpointAddress, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointAddress{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EndpointAddress{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointAddress{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EndpointAddress{}) // var yyz1 EndpointAddress + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointAddress{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EndpointAddress{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEndpointPort(v []EndpointPort, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEndpointPort(v *[]EndpointPort, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EndpointPort{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]EndpointPort, yyrl1) + } + } else { + yyv1 = make([]EndpointPort, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointPort{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EndpointPort{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointPort{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EndpointPort{}) // var yyz1 EndpointPort + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointPort{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EndpointPort{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEndpoints(v []Endpoints, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEndpoints(v *[]Endpoints, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Endpoints{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Endpoints, yyrl1) + } + } else { + yyv1 = make([]Endpoints, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Endpoints{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Endpoints{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Endpoints{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Endpoints{}) // var yyz1 Endpoints + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Endpoints{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Endpoints{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceTaint(v []Taint, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceTaint(v *[]Taint, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Taint{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Taint, yyrl1) + } + } else { + yyv1 = make([]Taint, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Taint{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Taint{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Taint{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Taint{}) // var yyz1 Taint + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Taint{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Taint{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNodeCondition(v []NodeCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNodeCondition(v *[]NodeCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NodeCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NodeCondition, yyrl1) + } + } else { + yyv1 = make([]NodeCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NodeCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NodeCondition{}) // var yyz1 NodeCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NodeCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNodeAddress(v []NodeAddress, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNodeAddress(v *[]NodeAddress, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NodeAddress{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NodeAddress, yyrl1) + } + } else { + yyv1 = make([]NodeAddress, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeAddress{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NodeAddress{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeAddress{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NodeAddress{}) // var yyz1 NodeAddress + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeAddress{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NodeAddress{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceContainerImage(v []ContainerImage, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceContainerImage(v *[]ContainerImage, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ContainerImage{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ContainerImage, yyrl1) + } + } else { + yyv1 = make([]ContainerImage, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerImage{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ContainerImage{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerImage{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ContainerImage{}) // var yyz1 ContainerImage + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerImage{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ContainerImage{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceUniqueVolumeName(v []UniqueVolumeName, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceUniqueVolumeName(v *[]UniqueVolumeName, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []UniqueVolumeName{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]UniqueVolumeName, yyrl1) + } + } else { + yyv1 = make([]UniqueVolumeName, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 UniqueVolumeName + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []UniqueVolumeName{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceAttachedVolume(v []AttachedVolume, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceAttachedVolume(v *[]AttachedVolume, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []AttachedVolume{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]AttachedVolume, yyrl1) + } + } else { + yyv1 = make([]AttachedVolume, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = AttachedVolume{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, AttachedVolume{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = AttachedVolume{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, AttachedVolume{}) // var yyz1 AttachedVolume + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = AttachedVolume{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []AttachedVolume{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePreferAvoidPodsEntry(v []PreferAvoidPodsEntry, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePreferAvoidPodsEntry(v *[]PreferAvoidPodsEntry, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PreferAvoidPodsEntry{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PreferAvoidPodsEntry, yyrl1) + } + } else { + yyv1 = make([]PreferAvoidPodsEntry, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PreferAvoidPodsEntry{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PreferAvoidPodsEntry{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PreferAvoidPodsEntry{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PreferAvoidPodsEntry{}) // var yyz1 PreferAvoidPodsEntry + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PreferAvoidPodsEntry{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PreferAvoidPodsEntry{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encResourceList(v ResourceList, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yyk1.CodecEncodeSelf(e) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy3 := &yyv1 + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(yy3) { + } else if !yym4 && z.IsJSONHandle() { + z.EncJSONMarshal(yy3) + } else { + z.EncFallback(yy3) + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decResourceList(v *ResourceList, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 72) + yyv1 = make(map[ResourceName]pkg3_resource.Quantity, yyrl1) + *v = yyv1 + } + var yymk1 ResourceName + var yymv1 pkg3_resource.Quantity + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yyv2.CodecDecodeSelf(d) + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = pkg3_resource.Quantity{} + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = pkg3_resource.Quantity{} + } else { + yyv3 := &yymv1 + yym4 := z.DecBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.DecExt(yyv3) { + } else if !yym4 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv3) + } else { + z.DecFallback(yyv3, false) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv5 := &yymk1 + yyv5.CodecDecodeSelf(d) + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = pkg3_resource.Quantity{} + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = pkg3_resource.Quantity{} + } else { + yyv6 := &yymv1 + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encSliceNode(v []Node, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNode(v *[]Node, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Node{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 656) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Node, yyrl1) + } + } else { + yyv1 = make([]Node, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Node{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Node{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Node{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Node{}) // var yyz1 Node + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Node{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Node{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceFinalizerName(v []FinalizerName, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceFinalizerName(v *[]FinalizerName, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []FinalizerName{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]FinalizerName, yyrl1) + } + } else { + yyv1 = make([]FinalizerName, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 FinalizerName + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []FinalizerName{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNamespace(v []Namespace, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNamespace(v *[]Namespace, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Namespace{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Namespace, yyrl1) + } + } else { + yyv1 = make([]Namespace, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Namespace{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Namespace{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Namespace{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Namespace{}) // var yyz1 Namespace + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Namespace{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Namespace{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEvent(v []Event, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEvent(v *[]Event, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Event{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 504) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Event, yyrl1) + } + } else { + yyv1 = make([]Event, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Event{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Event{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Event{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Event{}) // var yyz1 Event + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Event{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Event{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceruntime_RawExtension(v []pkg5_runtime.RawExtension, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yym3 := z.EncBinary() + _ = yym3 + if false { + } else if z.HasExtensions() && z.EncExt(yy2) { + } else if !yym3 && z.IsJSONHandle() { + z.EncJSONMarshal(yy2) + } else { + z.EncFallback(yy2) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceruntime_RawExtension(v *[]pkg5_runtime.RawExtension, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg5_runtime.RawExtension{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]pkg5_runtime.RawExtension, yyrl1) + } + } else { + yyv1 = make([]pkg5_runtime.RawExtension, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg5_runtime.RawExtension{} + } else { + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else if z.HasExtensions() && z.DecExt(yyv2) { + } else if !yym3 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv2) + } else { + z.DecFallback(yyv2, false) + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, pkg5_runtime.RawExtension{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg5_runtime.RawExtension{} + } else { + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) + } else { + z.DecFallback(yyv4, false) + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, pkg5_runtime.RawExtension{}) // var yyz1 pkg5_runtime.RawExtension + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg5_runtime.RawExtension{} + } else { + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg5_runtime.RawExtension{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceLimitRangeItem(v []LimitRangeItem, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceLimitRangeItem(v *[]LimitRangeItem, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []LimitRangeItem{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]LimitRangeItem, yyrl1) + } + } else { + yyv1 = make([]LimitRangeItem, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LimitRangeItem{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, LimitRangeItem{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LimitRangeItem{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, LimitRangeItem{}) // var yyz1 LimitRangeItem + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = LimitRangeItem{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []LimitRangeItem{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceLimitRange(v []LimitRange, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceLimitRange(v *[]LimitRange, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []LimitRange{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]LimitRange, yyrl1) + } + } else { + yyv1 = make([]LimitRange, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LimitRange{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, LimitRange{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LimitRange{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, LimitRange{}) // var yyz1 LimitRange + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = LimitRange{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []LimitRange{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceResourceQuotaScope(v []ResourceQuotaScope, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceResourceQuotaScope(v *[]ResourceQuotaScope, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ResourceQuotaScope{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ResourceQuotaScope, yyrl1) + } + } else { + yyv1 = make([]ResourceQuotaScope, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 ResourceQuotaScope + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ResourceQuotaScope{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceResourceQuota(v []ResourceQuota, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceResourceQuota(v *[]ResourceQuota, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ResourceQuota{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 304) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ResourceQuota, yyrl1) + } + } else { + yyv1 = make([]ResourceQuota, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ResourceQuota{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ResourceQuota{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ResourceQuota{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ResourceQuota{}) // var yyz1 ResourceQuota + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ResourceQuota{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ResourceQuota{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encMapstringSliceuint8(v map[string][]uint8, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyv1 == nil { + r.EncodeNil() + } else { + yym3 := z.EncBinary() + _ = yym3 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(yyv1)) + } + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decMapstringSliceuint8(v *map[string][]uint8, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string][]uint8, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 []uint8 + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv4 := &yymv1 + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *yyv4 = r.DecodeBytes(*(*[]byte)(yyv4), false, false) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv6 := &yymk1 + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv8 := &yymv1 + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *yyv8 = r.DecodeBytes(*(*[]byte)(yyv8), false, false) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encSliceuint8(v []uint8, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(v)) +} + +func (x codecSelfer1234) decSliceuint8(v *[]uint8, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + *v = r.DecodeBytes(*((*[]byte)(v)), false, false) +} + +func (x codecSelfer1234) encSliceSecret(v []Secret, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceSecret(v *[]Secret, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Secret{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 288) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Secret, yyrl1) + } + } else { + yyv1 = make([]Secret, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Secret{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Secret{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Secret{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Secret{}) // var yyz1 Secret + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Secret{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Secret{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceConfigMap(v []ConfigMap, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceConfigMap(v *[]ConfigMap, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ConfigMap{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ConfigMap, yyrl1) + } + } else { + yyv1 = make([]ConfigMap, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ConfigMap{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ConfigMap{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ConfigMap{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ConfigMap{}) // var yyz1 ConfigMap + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ConfigMap{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ConfigMap{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceComponentCondition(v []ComponentCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceComponentCondition(v *[]ComponentCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ComponentCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ComponentCondition, yyrl1) + } + } else { + yyv1 = make([]ComponentCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ComponentCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ComponentCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ComponentCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ComponentCondition{}) // var yyz1 ComponentCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ComponentCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ComponentCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceComponentStatus(v []ComponentStatus, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceComponentStatus(v *[]ComponentStatus, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ComponentStatus{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ComponentStatus, yyrl1) + } + } else { + yyv1 = make([]ComponentStatus, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ComponentStatus{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ComponentStatus{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ComponentStatus{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ComponentStatus{}) // var yyz1 ComponentStatus + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ComponentStatus{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ComponentStatus{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceDownwardAPIVolumeFile(v []DownwardAPIVolumeFile, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFile, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []DownwardAPIVolumeFile{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]DownwardAPIVolumeFile, yyrl1) + } + } else { + yyv1 = make([]DownwardAPIVolumeFile, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DownwardAPIVolumeFile{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, DownwardAPIVolumeFile{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DownwardAPIVolumeFile{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, DownwardAPIVolumeFile{}) // var yyz1 DownwardAPIVolumeFile + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = DownwardAPIVolumeFile{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []DownwardAPIVolumeFile{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/types.go b/vendor/k8s.io/client-go/pkg/api/v1/types.go new file mode 100644 index 0000000000..a75a1d0f06 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/types.go @@ -0,0 +1,4380 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// The comments for the structs and fields can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored and not exported to the SwaggerAPI. +// +// The aforementioned methods can be generated by hack/update-generated-swagger-docs.sh + +// Common string formats +// --------------------- +// Many fields in this API have formatting requirements. The commonly used +// formats are defined here. +// +// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier" +// in the C language. This is captured by the following regex: +// [A-Za-z_][A-Za-z0-9_]* +// This defines the format, but not the length restriction, which should be +// specified at the definition of any field of this type. +// +// DNS_LABEL: This is a string, no more than 63 characters long, that conforms +// to the definition of a "label" in RFCs 1035 and 1123. This is captured +// by the following regex: +// [a-z0-9]([-a-z0-9]*[a-z0-9])? +// +// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms +// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured +// by the following regex: +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// or more simply: +// DNS_LABEL(\.DNS_LABEL)* +// +// IANA_SVC_NAME: This is a string, no more than 15 characters long, that +// conforms to the definition of IANA service name in RFC 6335. +// It must contains at least one letter [a-z] and it must contains only [a-z0-9-]. +// Hypens ('-') cannot be leading or trailing character of the string +// and cannot be adjacent to other hyphens. + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. +// +k8s:openapi-gen=false +type ObjectMeta struct { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency + // +optional + GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // + // Populated by the system. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + + // An opaque value that represents the internal version of this object that can + // be used by clients to determine when objects have changed. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and passed unmodified back to the server. + // They may only be valid for a particular resource or set of resources. + // + // Populated by the system. + // Read-only. + // Value must be treated as opaque by clients and . + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"` + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // + // Populated by the system. + // Read-only. + // Null for lists. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + CreationTimestamp metav1.Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field. Once set, + // this value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard + // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the + // API. In the presence of network partitions, this object may still exist after this + // timestamp, until an administrator or automated process can determine the resource is + // fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + DeletionTimestamp *metav1.Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` + + // Number of seconds allowed for this object to gracefully terminate before + // it will be removed from the system. Only set when deletionTimestamp is also set. + // May only be shortened. + // Read-only. + // +optional + DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"` +} + +const ( + // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients + NamespaceDefault string = "default" + // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces + NamespaceAll string = "" +) + +// Volume represents a named volume in a pod that may be accessed by any container in the pod. +type Volume struct { + // Volume's name. + // Must be a DNS_LABEL and unique within the pod. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // VolumeSource represents the location and type of the mounted volume. + // If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"` +} + +// Represents the source of a volume to mount. +// Only one of its members may be specified. +type VolumeSource struct { + // HostPath represents a pre-existing file or directory on the host + // machine that is directly exposed to the container. This is generally + // used for system agents or other privileged things that are allowed + // to see the host machine. Most containers will NOT need this. + // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"` + // EmptyDir represents a temporary directory that shares a pod's lifetime. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"` + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"` + // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"` + // GitRepo represents a git repository at a particular revision. + // +optional + GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"` + // Secret represents a secret that should populate this volume. + // More info: http://kubernetes.io/docs/user-guide/volumes#secrets + // +optional + Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"` + // NFS represents an NFS mount on the host that shares a pod's lifetime + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // +optional + NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"` + // ISCSI represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md + // +optional + ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // +optional + Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` + // PersistentVolumeClaimVolumeSource represents a reference to a + // PersistentVolumeClaim in the same namespace. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // +optional + PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // +optional + RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an + // alpha feature and may change in future. + // +optional + FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"` + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"` + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"` + // DownwardAPI represents downward API about the pod that should populate this volume + // +optional + DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"` + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"` + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"` + // ConfigMap represents a configMap that should populate this volume + // +optional + ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"` + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"` + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"` + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"` + // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"` + // Items for all in one resources secrets, configmaps, and downward API + Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"` + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"` + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"` +} + +// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. +// This volume finds the bound PV and mounts that volume for the pod. A +// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another +// type of volume that is owned by someone else (the system). +type PersistentVolumeClaimVolumeSource struct { + // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"` + // Will force the ReadOnly setting in VolumeMounts. + // Default false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` +} + +// PersistentVolumeSource is similar to VolumeSource but meant for the +// administrator who creates PVs. Exactly one of its members must be set. +type PersistentVolumeSource struct { + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"` + // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"` + // HostPath represents a directory on the host. + // Provisioned by a developer or tester. + // This is useful for single-node development and testing only! + // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. + // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + // +optional + HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"` + // Glusterfs represents a Glusterfs volume that is attached to a host and + // exposed to the pod. Provisioned by an admin. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // +optional + Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` + // NFS represents an NFS mount on the host. Provisioned by an admin. + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // +optional + NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"` + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // +optional + RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` + // ISCSI represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // +optional + ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"` + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"` + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"` + // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"` + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an + // alpha feature and may change in future. + // +optional + FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"` + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"` + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"` + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"` + // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"` + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"` + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"` +} + +const ( + // AlphaStorageClassAnnotation represents the previous alpha storage class + // annotation. It's currently still used and will be held for backwards + // compatibility + AlphaStorageClassAnnotation = "volume.alpha.kubernetes.io/storage-class" + + // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. + // It's currently still used and will be held for backwards compatibility + BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" +) + +// +genclient=true +// +nonNamespaced=true + +// PersistentVolume (PV) is a storage resource provisioned by an administrator. +// It is analogous to a node. +// More info: http://kubernetes.io/docs/user-guide/persistent-volumes +type PersistentVolume struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines a specification of a persistent volume owned by the cluster. + // Provisioned by an administrator. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes + // +optional + Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status represents the current information/status for the persistent volume. + // Populated by the system. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes + // +optional + Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PersistentVolumeSpec is the specification of a persistent volume. +type PersistentVolumeSpec struct { + // A description of the persistent volume's resources and capacity. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity + // +optional + Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` + // The actual volume backing the persistent volume. + PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"` + // AccessModes contains all ways the volume can be mounted. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes + // +optional + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` + // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // Expected to be non-nil when bound. + // claim.VolumeName is the authoritative bind between PV and PVC. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#binding + // +optional + ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"` + // What happens to a persistent volume when released from its claim. + // Valid options are Retain (default) and Recycle. + // Recycling must be supported by the volume plugin underlying this persistent volume. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy + // +optional + PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"` + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + StorageClassName string `json:"storageClassName,omitempty" protobuf:"bytes,6,opt,name=storageClassName"` +} + +// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes. +type PersistentVolumeReclaimPolicy string + +const ( + // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. + // The volume plugin must support Recycling. + PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" + // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. + // The volume plugin must support Deletion. + PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" + // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. + // The default policy is Retain. + PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" +) + +// PersistentVolumeStatus is the current status of a persistent volume. +type PersistentVolumeStatus struct { + // Phase indicates if a volume is available, bound to a claim, or released by a claim. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#phase + // +optional + Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"` + // A human-readable message indicating details about why the volume is in this state. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // Reason is a brief CamelCase string that describes any failure and is meant + // for machine parsing and tidy display in the CLI. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` +} + +// PersistentVolumeList is a list of PersistentVolume items. +type PersistentVolumeList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of persistent volumes. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes + Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// PersistentVolumeClaim is a user's request for and claim to a persistent volume +type PersistentVolumeClaim struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired characteristics of a volume requested by a pod author. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // +optional + Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status represents the current information/status of a persistent volume claim. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // +optional + Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PersistentVolumeClaimList is a list of PersistentVolumeClaim items. +type PersistentVolumeClaimList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // A list of persistent volume claims. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PersistentVolumeClaimSpec describes the common attributes of storage devices +// and allows a Source for provider-specific attributes +type PersistentVolumeClaimSpec struct { + // AccessModes contains the desired access modes the volume should have. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 + // +optional + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` + // A label query over volumes to consider for binding. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` + // Resources represents the minimum resources the volume should have. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources + // +optional + Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"` + // VolumeName is the binding reference to the PersistentVolume backing this claim. + // +optional + VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"` + // Name of the StorageClass required by the claim. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1 + // +optional + StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"` +} + +// PersistentVolumeClaimStatus is the current status of a persistent volume claim. +type PersistentVolumeClaimStatus struct { + // Phase represents the current phase of PersistentVolumeClaim. + // +optional + Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"` + // AccessModes contains the actual access modes the volume backing the PVC has. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 + // +optional + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` + // Represents the actual resources of the underlying volume. + // +optional + Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` +} + +type PersistentVolumeAccessMode string + +const ( + // can be mounted read/write mode to exactly 1 host + ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" + // can be mounted in read-only mode to many hosts + ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" + // can be mounted in read/write mode to many hosts + ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" +) + +type PersistentVolumePhase string + +const ( + // used for PersistentVolumes that are not available + VolumePending PersistentVolumePhase = "Pending" + // used for PersistentVolumes that are not yet bound + // Available volumes are held by the binder and matched to PersistentVolumeClaims + VolumeAvailable PersistentVolumePhase = "Available" + // used for PersistentVolumes that are bound + VolumeBound PersistentVolumePhase = "Bound" + // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted + // released volumes must be recycled before becoming available again + // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource + VolumeReleased PersistentVolumePhase = "Released" + // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim + VolumeFailed PersistentVolumePhase = "Failed" +) + +type PersistentVolumeClaimPhase string + +const ( + // used for PersistentVolumeClaims that are not yet bound + ClaimPending PersistentVolumeClaimPhase = "Pending" + // used for PersistentVolumeClaims that are bound + ClaimBound PersistentVolumeClaimPhase = "Bound" + // used for PersistentVolumeClaims that lost their underlying + // PersistentVolume. The claim was bound to a PersistentVolume and this + // volume does not exist any longer and all data on it was lost. + ClaimLost PersistentVolumeClaimPhase = "Lost" +) + +// Represents a host path mapped into a pod. +// Host path volumes do not support ownership management or SELinux relabeling. +type HostPathVolumeSource struct { + // Path of the directory on the host. + // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + Path string `json:"path" protobuf:"bytes,1,opt,name=path"` +} + +// Represents an empty directory for a pod. +// Empty directory volumes support ownership management and SELinux relabeling. +type EmptyDirVolumeSource struct { + // What type of storage medium should back this directory. + // The default is "" which means to use the node's default medium. + // Must be an empty string (default) or Memory. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"` +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsVolumeSource struct { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` + + // Path is the Glusterfs volume path. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDVolumeSource struct { + // A collection of Ceph monitors. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` + // The rados image name. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://kubernetes.io/docs/user-guide/volumes#rbd + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + // The rados pool name. + // Default is rbd. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it. + // +optional + RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` + // The rados user name. + // Default is admin. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` + // Keyring is the path to key ring for RBDUser. + // Default is /etc/ceph/keyring. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` + // SecretRef is name of the authentication secret for RBDUser. If provided + // overrides keyring. + // Default is nil. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` +} + +// Represents a cinder volume resource in Openstack. +// A Cinder volume must exist before mounting to a container. +// The volume must also be in the same region as the kubelet. +// Cinder volumes support ownership management and SELinux relabeling. +type CinderVolumeSource struct { + // volume id used to identify the volume in cinder + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` + // Optional: User is the rados user name, default is admin + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` +} + +// Represents a Flocker volume mounted by the Flocker agent. +// One and only one of datasetName and datasetUUID should be set. +// Flocker volumes do not support ownership management or SELinux relabeling. +type FlockerVolumeSource struct { + // Name of the dataset stored as metadata -> name on the dataset for Flocker + // should be considered as deprecated + // +optional + DatasetName string `json:"datasetName,omitempty" protobuf:"bytes,1,opt,name=datasetName"` + // UUID of the dataset. This is unique identifier of a Flocker dataset + // +optional + DatasetUUID string `json:"datasetUUID,omitempty" protobuf:"bytes,2,opt,name=datasetUUID"` +} + +// StorageMedium defines ways that storage can be allocated to a volume. +type StorageMedium string + +const ( + StorageMediumDefault StorageMedium = "" // use whatever the default is for the node + StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) +) + +// Protocol defines network protocols supported for things like container ports. +type Protocol string + +const ( + // ProtocolTCP is the TCP protocol. + ProtocolTCP Protocol = "TCP" + // ProtocolUDP is the UDP protocol. + ProtocolUDP Protocol = "UDP" +) + +// Represents a Persistent Disk resource in Google Compute Engine. +// +// A GCE PD must exist before mounting to a container. The disk must +// also be in the same GCE project and zone as the kubelet. A GCE PD +// can only be mounted as read/write once or read-only many times. GCE +// PDs support ownership management and SELinux relabeling. +type GCEPersistentDiskVolumeSource struct { + // Unique name of the PD resource in GCE. Used to identify the disk in GCE. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // The partition in the volume that you want to mount. + // If omitted, the default is to mount by volume name. + // Examples: For volume /dev/sda1, you specify the partition as "1". + // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // +optional + Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` +} + +// Represents a Quobyte mount that lasts the lifetime of a pod. +// Quobyte volumes do not support ownership management or SELinux relabeling. +type QuobyteVolumeSource struct { + // Registry represents a single or multiple Quobyte Registry services + // specified as a string as host:port pair (multiple entries are separated with commas) + // which acts as the central registry for volumes + Registry string `json:"registry" protobuf:"bytes,1,opt,name=registry"` + + // Volume is a string that references an already created Quobyte volume by name. + Volume string `json:"volume" protobuf:"bytes,2,opt,name=volume"` + + // ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. + // Defaults to false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` + + // User to map volume access to + // Defaults to serivceaccount user + // +optional + User string `json:"user,omitempty" protobuf:"bytes,4,opt,name=user"` + + // Group to map volume access to + // Default is no group + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"` +} + +// FlexVolume represents a generic volume resource that is +// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. +type FlexVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` + // Optional: Extra command options if any. + // +optional + Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"` +} + +// Represents a Persistent Disk resource in AWS. +// +// An AWS EBS disk must exist before mounting to a container. The disk +// must also be in the same AWS zone as the kubelet. An AWS EBS disk +// can only be mounted as read/write once. AWS EBS volumes support +// ownership management and SELinux relabeling. +type AWSElasticBlockStoreVolumeSource struct { + // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // The partition in the volume that you want to mount. + // If omitted, the default is to mount by volume name. + // Examples: For volume /dev/sda1, you specify the partition as "1". + // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + // +optional + Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` + // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". + // If omitted, the default is "false". + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` +} + +// Represents a volume that is populated with the contents of a git repository. +// Git repo volumes do not support ownership management. +// Git repo volumes support SELinux relabeling. +type GitRepoVolumeSource struct { + // Repository URL + Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"` + // Commit hash for the specified revision. + // +optional + Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"` + // Target directory name. + // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + // git repository. Otherwise, if specified, the volume will contain the git repository in + // the subdirectory with the given name. + // +optional + Directory string `json:"directory,omitempty" protobuf:"bytes,3,opt,name=directory"` +} + +// Adapts a Secret into a volume. +// +// The contents of the target Secret's Data field will be presented in a volume +// as files using the keys in the Data field as the file names. +// Secret volumes support ownership management and SELinux relabeling. +type SecretVolumeSource struct { + // Name of the secret in the pod's namespace to use. + // More info: http://kubernetes.io/docs/user-guide/volumes#secrets + // +optional + SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"` + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"` + // Specify whether the Secret or it's keys must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + +const ( + SecretVolumeSourceDefaultMode int32 = 0644 +) + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +type SecretProjection struct { + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + +// Represents an NFS mount that lasts the lifetime of a pod. +// NFS volumes do not support ownership management or SELinux relabeling. +type NFSVolumeSource struct { + // Server is the hostname or IP address of the NFS server. + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + Server string `json:"server" protobuf:"bytes,1,opt,name=server"` + + // Path that is exported by the NFS server. + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` + + // ReadOnly here will force + // the NFS export to be mounted with read-only permissions. + // Defaults to false. + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIVolumeSource struct { + // iSCSI target portal. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"` + // Target iSCSI Qualified Name. + IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"` + // iSCSI target lun number. + Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"` + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://kubernetes.io/docs/user-guide/volumes#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` + // iSCSI target portal List. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` +} + +// Represents a Fibre Channel volume. +// Fibre Channel volumes can only be mounted as read/write once. +// Fibre Channel volumes support ownership management and SELinux relabeling. +type FCVolumeSource struct { + // Required: FC target worldwide names (WWNs) + TargetWWNs []string `json:"targetWWNs" protobuf:"bytes,1,rep,name=targetWWNs"` + // Required: FC target lun number + Lun *int32 `json:"lun" protobuf:"varint,2,opt,name=lun"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFileVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"` + // Share Name + ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// Represents a vSphere volume resource. +type VsphereVirtualDiskVolumeSource struct { + // Path that identifies vSphere volume vmdk + VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` +} + +// Represents a Photon Controller persistent disk resource. +type PhotonPersistentDiskVolumeSource struct { + // ID that identifies Photon Controller persistent disk + PdID string `json:"pdID" protobuf:"bytes,1,opt,name=pdID"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` +} + +type AzureDataDiskCachingMode string + +const ( + AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" + AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" + AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" +) + +// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +type AzureDiskVolumeSource struct { + // The Name of the data disk in the blob storage + DiskName string `json:"diskName" protobuf:"bytes,1,opt,name=diskName"` + // The URI the data disk in the blob storage + DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"` + // Host Caching mode: None, Read Only, Read Write. + // +optional + CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"` +} + +// PortworxVolumeSource represents a Portworx volume resource. +type PortworxVolumeSource struct { + // VolumeID uniquely identifies a Portworx volume + VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// ScaleIOVolumeSource represents a persistent ScaleIO volume +type ScaleIOVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"` + // The name of the storage system as configured in ScaleIO. + System string `json:"system" protobuf:"bytes,2,opt,name=system"` + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *LocalObjectReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"` + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"` + // The name of the Protection Domain for the configured storage (defaults to "default"). + // +optional + ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"` + // The Storage Pool associated with the protection domain (defaults to "default"). + // +optional + StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"` + // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). + // +optional + StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"` +} + +// Adapts a ConfigMap into a volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// volume as files using the keys in the Data field as the file names, unless +// the items element is populated with specific mappings of keys to paths. +// ConfigMap volumes support ownership management and SELinux relabeling. +type ConfigMapVolumeSource struct { + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"` + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + +const ( + ConfigMapVolumeSourceDefaultMode int32 = 0644 +) + +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +type ConfigMapProjection struct { + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + +// Represents a projected volume source +type ProjectedVolumeSource struct { + // list of volume projections + Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"` + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"` +} + +// Projection that may be projected along with other supported volume types +type VolumeProjection struct { + // all types below are the supported types for projection into the same volume + + // information about the secret data to project + Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"` + // information about the downwardAPI data to project + DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"` + // information about the configMap data to project + ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"` +} + +const ( + ProjectedVolumeSourceDefaultMode int32 = 0644 +) + +// Maps a string key to a path within a volume. +type KeyToPath struct { + // The key to project. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + + // The relative path of the file to map the key to. + // May not be an absolute path. + // May not contain the path element '..'. + // May not start with the string '..'. + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"` +} + +// ContainerPort represents a network port in a single container. +type ContainerPort struct { + // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + // named port in a pod must have a unique name. Name for the port that can be + // referred to by services. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // Number of port to expose on the host. + // If specified, this must be a valid port number, 0 < x < 65536. + // If HostNetwork is specified, this must match ContainerPort. + // Most containers do not need this. + // +optional + HostPort int32 `json:"hostPort,omitempty" protobuf:"varint,2,opt,name=hostPort"` + // Number of port to expose on the pod's IP address. + // This must be a valid port number, 0 < x < 65536. + ContainerPort int32 `json:"containerPort" protobuf:"varint,3,opt,name=containerPort"` + // Protocol for port. Must be UDP or TCP. + // Defaults to "TCP". + // +optional + Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"` + // What host IP to bind the external port to. + // +optional + HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` +} + +// VolumeMount describes a mounting of a Volume within a container. +type VolumeMount struct { + // This must match the Name of a Volume. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Mounted read-only if true, read-write otherwise (false or unspecified). + // Defaults to false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` + // Path within the container at which the volume should be mounted. Must + // not contain ':'. + MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"` + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + // +optional + SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"` +} + +// EnvVar represents an environment variable present in a Container. +type EnvVar struct { + // Name of the environment variable. Must be a C_IDENTIFIER. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Optional: no more than one of the following may be specified. + + // Variable references $(VAR_NAME) are expanded + // using the previous defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. The $(VAR_NAME) + // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + // references will never be expanded, regardless of whether the variable + // exists or not. + // Defaults to "". + // +optional + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // Source for the environment variable's value. Cannot be used if value is not empty. + // +optional + ValueFrom *EnvVarSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"` +} + +// EnvVarSource represents a source for the value of an EnvVar. +type EnvVarSource struct { + // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, + // spec.nodeName, spec.serviceAccountName, status.podIP. + // +optional + FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"` + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,2,opt,name=resourceFieldRef"` + // Selects a key of a ConfigMap. + // +optional + ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,3,opt,name=configMapKeyRef"` + // Selects a key of a secret in the pod's namespace + // +optional + SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"` +} + +// ObjectFieldSelector selects an APIVersioned field of an object. +type ObjectFieldSelector struct { + // Version of the schema the FieldPath is written in terms of, defaults to "v1". + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"` + // Path of the field to select in the specified API version. + FieldPath string `json:"fieldPath" protobuf:"bytes,2,opt,name=fieldPath"` +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +type ResourceFieldSelector struct { + // Container name: required for volumes, optional for env vars + // +optional + ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` + // Required: resource to select + Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"` + // Specifies the output format of the exposed resources, defaults to "1" + // +optional + Divisor resource.Quantity `json:"divisor,omitempty" protobuf:"bytes,3,opt,name=divisor"` +} + +// Selects a key from a ConfigMap. +type ConfigMapKeySelector struct { + // The ConfigMap to select from. + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // The key to select. + Key string `json:"key" protobuf:"bytes,2,opt,name=key"` + // Specify whether the ConfigMap or it's key must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` +} + +// SecretKeySelector selects a key of a Secret. +type SecretKeySelector struct { + // The name of the secret in the pod's namespace to select from. + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // The key of the secret to select from. Must be a valid secret key. + Key string `json:"key" protobuf:"bytes,2,opt,name=key"` + // Specify whether the Secret or it's key must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` +} + +// EnvFromSource represents the source of a set of ConfigMaps +type EnvFromSource struct { + // An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // +optional + Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"` + // The ConfigMap to select from + // +optional + ConfigMapRef *ConfigMapEnvSource `json:"configMapRef,omitempty" protobuf:"bytes,2,opt,name=configMapRef"` + // The Secret to select from + // +optional + SecretRef *SecretEnvSource `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +type ConfigMapEnvSource struct { + // The ConfigMap to select from. + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // Specify whether the ConfigMap must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"` +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +type SecretEnvSource struct { + // The Secret to select from. + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // Specify whether the Secret must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"` +} + +// HTTPHeader describes a custom header to be used in HTTP probes +type HTTPHeader struct { + // The header field name + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // The header field value + Value string `json:"value" protobuf:"bytes,2,opt,name=value"` +} + +// HTTPGetAction describes an action based on HTTP Get requests. +type HTTPGetAction struct { + // Path to access on the HTTP server. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` + // Name or number of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + Port intstr.IntOrString `json:"port" protobuf:"bytes,2,opt,name=port"` + // Host name to connect to, defaults to the pod IP. You probably want to set + // "Host" in httpHeaders instead. + // +optional + Host string `json:"host,omitempty" protobuf:"bytes,3,opt,name=host"` + // Scheme to use for connecting to the host. + // Defaults to HTTP. + // +optional + Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"` + // Custom headers to set in the request. HTTP allows repeated headers. + // +optional + HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"` +} + +// URIScheme identifies the scheme used for connection to a host for Get actions +type URIScheme string + +const ( + // URISchemeHTTP means that the scheme used will be http:// + URISchemeHTTP URIScheme = "HTTP" + // URISchemeHTTPS means that the scheme used will be https:// + URISchemeHTTPS URIScheme = "HTTPS" +) + +// TCPSocketAction describes an action based on opening a socket +type TCPSocketAction struct { + // Number or name of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + Port intstr.IntOrString `json:"port" protobuf:"bytes,1,opt,name=port"` +} + +// ExecAction describes a "run in container" action. +type ExecAction struct { + // Command is the command line to execute inside the container, the working directory for the + // command is root ('/') in the container's filesystem. The command is simply exec'd, it is + // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + // a shell, you need to explicitly call out to that shell. + // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + // +optional + Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"` +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +type Probe struct { + // The action taken to determine the health of a container + Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"` + // Number of seconds after the container has started before liveness probes are initiated. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // +optional + InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"` + // Number of seconds after which the probe times out. + // Defaults to 1 second. Minimum value is 1. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // +optional + TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"` + // How often (in seconds) to perform the probe. + // Default to 10 seconds. Minimum value is 1. + // +optional + PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"` + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Defaults to 1. Must be 1 for liveness. Minimum value is 1. + // +optional + SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"` + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // Defaults to 3. Minimum value is 1. + // +optional + FailureThreshold int32 `json:"failureThreshold,omitempty" protobuf:"varint,6,opt,name=failureThreshold"` +} + +// PullPolicy describes a policy for if/when to pull a container image +type PullPolicy string + +const ( + // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. + PullAlways PullPolicy = "Always" + // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present + PullNever PullPolicy = "Never" + // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. + PullIfNotPresent PullPolicy = "IfNotPresent" +) + +// TerminationMessagePolicy describes how termination messages are retrieved from a container. +type TerminationMessagePolicy string + +const ( + // TerminationMessageReadFile is the default behavior and will set the container status message to + // the contents of the container's terminationMessagePath when the container exits. + TerminationMessageReadFile TerminationMessagePolicy = "File" + // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs + // for the container status message when the container exits with an error and the + // terminationMessagePath has no contents. + TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" +) + +// Capability represent POSIX capabilities type +type Capability string + +// Adds and removes POSIX capabilities from running containers. +type Capabilities struct { + // Added capabilities + // +optional + Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"` + // Removed capabilities + // +optional + Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"` +} + +// ResourceRequirements describes the compute resource requirements. +type ResourceRequirements struct { + // Limits describes the maximum amount of compute resources allowed. + // More info: http://kubernetes.io/docs/user-guide/compute-resources/ + // +optional + Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"` + // Requests describes the minimum amount of compute resources required. + // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value. + // More info: http://kubernetes.io/docs/user-guide/compute-resources/ + // +optional + Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"` +} + +const ( + // TerminationMessagePathDefault means the default path to capture the application termination message running in a container + TerminationMessagePathDefault string = "/dev/termination-log" +) + +// A single application container that you want to run within a pod. +type Container struct { + // Name of the container specified as a DNS_LABEL. + // Each container in a pod must have a unique name (DNS_LABEL). + // Cannot be updated. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Docker image name. + // More info: http://kubernetes.io/docs/user-guide/images + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands + // +optional + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands + // +optional + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // List of ports to expose from the container. Exposing a port here gives + // the system additional information about the network connections a + // container uses, but is primarily informational. Not specifying a port here + // DOES NOT prevent that port from being exposed. Any port which is + // listening on the default "0.0.0.0" address inside a container will be + // accessible from the network. + // Cannot be updated. + // +optional + Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Compute Resources required by this container. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources + // +optional + Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // Periodic probe of container liveness. + // Container will be restarted if the probe fails. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // +optional + LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` + // Periodic probe of container service readiness. + // Container will be removed from service endpoints if the probe fails. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // +optional + ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // Actions that the management system should take in response to container lifecycle events. + // Cannot be updated. + // +optional + Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/images#updating-images + // +optional + ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // Security options the pod should run with. + // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md + // +optional + SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` +} + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +type Handler struct { + // One and only one of the following should be specified. + // Exec specifies the action to take. + // +optional + Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"` + // HTTPGet specifies the http request to perform. + // +optional + HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"` + // TCPSocket specifies an action involving a TCP port. + // TCP hooks not yet supported + // TODO: implement a realistic TCP lifecycle hook + // +optional + TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` +} + +// Lifecycle describes actions that the management system should take in response to container lifecycle +// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks +// until the action is complete, unless the container process fails, in which case the handler is aborted. +type Lifecycle struct { + // PostStart is called immediately after a container is created. If the handler fails, + // the container is terminated and restarted according to its restart policy. + // Other management of the container blocks until the hook completes. + // More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details + // +optional + PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` + // PreStop is called immediately before a container is terminated. + // The container is terminated after the handler completes. + // The reason for termination is passed to the handler. + // Regardless of the outcome of the handler, the container is eventually terminated. + // Other management of the container blocks until the hook completes. + // More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details + // +optional + PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"` +} + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// ContainerStateWaiting is a waiting state of a container. +type ContainerStateWaiting struct { + // (brief) reason the container is not yet running. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason"` + // Message regarding why the container is not yet running. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` +} + +// ContainerStateRunning is a running state of a container. +type ContainerStateRunning struct { + // Time at which the container was last (re-)started + // +optional + StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"` +} + +// ContainerStateTerminated is a terminated state of a container. +type ContainerStateTerminated struct { + // Exit status from the last termination of the container + ExitCode int32 `json:"exitCode" protobuf:"varint,1,opt,name=exitCode"` + // Signal from the last termination of the container + // +optional + Signal int32 `json:"signal,omitempty" protobuf:"varint,2,opt,name=signal"` + // (brief) reason from the last termination of the container + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // Message regarding the last termination of the container + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` + // Time at which previous execution of the container started + // +optional + StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"` + // Time at which the container last terminated + // +optional + FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"` + // Container's ID in the format 'docker://' + // +optional + ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"` +} + +// ContainerState holds a possible state of container. +// Only one of its members may be specified. +// If none of them is specified, the default one is ContainerStateWaiting. +type ContainerState struct { + // Details about a waiting container + // +optional + Waiting *ContainerStateWaiting `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"` + // Details about a running container + // +optional + Running *ContainerStateRunning `json:"running,omitempty" protobuf:"bytes,2,opt,name=running"` + // Details about a terminated container + // +optional + Terminated *ContainerStateTerminated `json:"terminated,omitempty" protobuf:"bytes,3,opt,name=terminated"` +} + +// ContainerStatus contains details for the current status of this container. +type ContainerStatus struct { + // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Cannot be updated. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Details about the container's current condition. + // +optional + State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"` + // Details about the container's last termination condition. + // +optional + LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"` + // Specifies whether the container has passed its readiness probe. + Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"` + // The number of times the container has been restarted, currently based on + // the number of dead containers that have not yet been removed. + // Note that this is calculated from dead containers. But those containers are subject to + // garbage collection. This value will get capped at 5 by GC. + RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"` + // The image the container is running. + // More info: http://kubernetes.io/docs/user-guide/images + // TODO(dchen1107): Which image the container is running with? + Image string `json:"image" protobuf:"bytes,6,opt,name=image"` + // ImageID of the container's image. + ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"` + // Container's ID in the format 'docker://'. + // More info: http://kubernetes.io/docs/user-guide/container-environment#container-information + // +optional + ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"` +} + +// PodPhase is a label for the condition of a pod at the current time. +type PodPhase string + +// These are the valid statuses of pods. +const ( + // PodPending means the pod has been accepted by the system, but one or more of the containers + // has not been started. This includes time before being bound to a node, as well as time spent + // pulling images onto the host. + PodPending PodPhase = "Pending" + // PodRunning means the pod has been bound to a node and all of the containers have been started. + // At least one container is still running or is in the process of being restarted. + PodRunning PodPhase = "Running" + // PodSucceeded means that all containers in the pod have voluntarily terminated + // with a container exit code of 0, and the system is not going to restart any of these containers. + PodSucceeded PodPhase = "Succeeded" + // PodFailed means that all containers in the pod have terminated, and at least one container has + // terminated in a failure (exited with a non-zero exit code or was stopped by the system). + PodFailed PodPhase = "Failed" + // PodUnknown means that for some reason the state of the pod could not be obtained, typically due + // to an error in communicating with the host of the pod. + PodUnknown PodPhase = "Unknown" +) + +// PodConditionType is a valid value for PodCondition.Type +type PodConditionType string + +// These are valid conditions of pod. +const ( + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" + // PodReady means the pod is able to service requests and should be added to the + // load balancing pools of all matching services. + PodReady PodConditionType = "Ready" + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" + // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler + // can't schedule the pod right now, for example due to insufficient resources in the cluster. + PodReasonUnschedulable = "Unschedulable" +) + +// PodCondition contains details for the current condition of this pod. +type PodCondition struct { + // Type is the type of the condition. + // Currently only Ready. + // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"` + // Status is the status of the condition. + // Can be True, False, Unknown. + // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // Unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// RestartPolicy describes how the container should be restarted. +// Only one of the following restart policies may be specified. +// If none of the following policies is specified, the default one +// is RestartPolicyAlways. +type RestartPolicy string + +const ( + RestartPolicyAlways RestartPolicy = "Always" + RestartPolicyOnFailure RestartPolicy = "OnFailure" + RestartPolicyNever RestartPolicy = "Never" +) + +// DNSPolicy defines how a pod's DNS will be configured. +type DNSPolicy string + +const ( + // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS + // first, if it is available, then fall back on the default + // (as determined by kubelet) DNS settings. + DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" + + // DNSClusterFirst indicates that the pod should use cluster DNS + // first unless hostNetwork is true, if it is available, then + // fall back on the default (as determined by kubelet) DNS settings. + DNSClusterFirst DNSPolicy = "ClusterFirst" + + // DNSDefault indicates that the pod should use the default (as + // determined by kubelet) DNS settings. + DNSDefault DNSPolicy = "Default" + + DefaultTerminationGracePeriodSeconds = 30 +) + +// A node selector represents the union of the results of one or more label queries +// over a set of nodes; that is, it represents the OR of the selectors represented +// by the node selector terms. +type NodeSelector struct { + //Required. A list of node selector terms. The terms are ORed. + NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"` +} + +// A null or empty node selector term matches no objects. +type NodeSelectorTerm struct { + //Required. A list of node selector requirements. The requirements are ANDed. + MatchExpressions []NodeSelectorRequirement `json:"matchExpressions" protobuf:"bytes,1,rep,name=matchExpressions"` +} + +// A node selector requirement is a selector that contains values, a key, and an operator +// that relates the key and values. +type NodeSelectorRequirement struct { + // The label key that the selector applies to. + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + // Represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + Operator NodeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=NodeSelectorOperator"` + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. If the operator is Gt or Lt, the values + // array must have a single element, which will be interpreted as an integer. + // This array is replaced during a strategic merge patch. + // +optional + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A node selector operator is the set of operators that can be used in +// a node selector requirement. +type NodeSelectorOperator string + +const ( + NodeSelectorOpIn NodeSelectorOperator = "In" + NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" + NodeSelectorOpExists NodeSelectorOperator = "Exists" + NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" + NodeSelectorOpGt NodeSelectorOperator = "Gt" + NodeSelectorOpLt NodeSelectorOperator = "Lt" +) + +// Affinity is a group of affinity scheduling rules. +type Affinity struct { + // Describes node affinity scheduling rules for the pod. + // +optional + NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"` + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"` + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"` +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +type PodAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +type PodAntiAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +type WeightedPodAffinityTerm struct { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"` + // Required. A pod affinity term, associated with the corresponding weight. + PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"` +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key tches that of any node on which +// a pod of the set of pods is running +type PodAffinityTerm struct { + // A label query over a set of resources, in this case pods. + // +optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // null or empty list means "this pod's namespace" + Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"` + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" + // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); + // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. + // +optional + TopologyKey string `json:"topologyKey,omitempty" protobuf:"bytes,3,opt,name=topologyKey"` +} + +// Node affinity is a group of node affinity scheduling rules. +type NodeAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // will try to eventually evict the pod from its node. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // may or may not try to eventually evict the pod from its node. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,opt,name=requiredDuringSchedulingIgnoredDuringExecution"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node matches the corresponding matchExpressions; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` +} + +// An empty preferred scheduling term matches all objects with implicit weight 0 +// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +type PreferredSchedulingTerm struct { + // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"` + // A node selector term, associated with the corresponding weight. + Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"` +} + +// The node this Taint is attached to has the effect "effect" on +// any pod that that does not tolerate the Taint. +type Taint struct { + // Required. The taint key to be applied to a node. + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + // Required. The taint value corresponding to the taint key. + // +optional + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"` + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + TimeAdded metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"` +} + +type TaintEffect string + +const ( + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // but allow all pods submitted to Kubelet without going through the scheduler + // to start, and allow all already-running pods to continue running. + // Enforced by the scheduler. + TaintEffectNoSchedule TaintEffect = "NoSchedule" + // Like TaintEffectNoSchedule, but the scheduler tries not to schedule + // new pods onto the node, rather than prohibiting new pods from scheduling + // onto the node entirely. Enforced by the scheduler. + TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to + // Kubelet without going through the scheduler to start. + // Enforced by Kubelet and the scheduler. + // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" + // Evict any already-running pods that do not tolerate the taint. + // Currently enforced by NodeController. + TaintEffectNoExecute TaintEffect = "NoExecute" +) + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple using the matching operator . +type Toleration struct { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // +optional + Key string `json:"key,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + // +optional + Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"` + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + // +optional + Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"` + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + // +optional + Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"` + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // +optional + TolerationSeconds *int64 `json:"tolerationSeconds,omitempty" protobuf:"varint,5,opt,name=tolerationSeconds"` +} + +// A toleration operator is the set of operators that can be used in a toleration. +type TolerationOperator string + +const ( + TolerationOpExists TolerationOperator = "Exists" + TolerationOpEqual TolerationOperator = "Equal" +) + +const ( + // This annotation key will be used to contain an array of v1 JSON encoded Containers + // for init containers. The annotation will be placed into the internal type and cleared. + // This key is only recognized by version >= 1.4. + PodInitContainersBetaAnnotationKey = "pod.beta.kubernetes.io/init-containers" + // This annotation key will be used to contain an array of v1 JSON encoded Containers + // for init containers. The annotation will be placed into the internal type and cleared. + // This key is recognized by version >= 1.3. For version 1.4 code, this key + // will have its value copied to the beta key. + PodInitContainersAnnotationKey = "pod.alpha.kubernetes.io/init-containers" + // This annotation key will be used to contain an array of v1 JSON encoded + // ContainerStatuses for init containers. The annotation will be placed into the internal + // type and cleared. This key is only recognized by version >= 1.4. + PodInitContainerStatusesBetaAnnotationKey = "pod.beta.kubernetes.io/init-container-statuses" + // This annotation key will be used to contain an array of v1 JSON encoded + // ContainerStatuses for init containers. The annotation will be placed into the internal + // type and cleared. This key is recognized by version >= 1.3. For version 1.4 code, + // this key will have its value copied to the beta key. + PodInitContainerStatusesAnnotationKey = "pod.alpha.kubernetes.io/init-container-statuses" +) + +// PodSpec is a description of a pod. +type PodSpec struct { + // List of volumes that can be mounted by containers belonging to the pod. + // More info: http://kubernetes.io/docs/user-guide/volumes + // +optional + Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` + // List of initialization containers belonging to the pod. + // Init containers are executed in order prior to containers being started. If any + // init container fails, the pod is considered to have failed and is handled according + // to its restartPolicy. The name for an init container or normal container must be + // unique among all containers. + // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. + // The resourceRequirements of an init container are taken into account during scheduling + // by finding the highest request/limit for each resource type, and then using the max of + // of that value or the sum of the normal containers. Limits are applied to init containers + // in a similar fashion. + // Init containers cannot currently be added or removed. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/containers + InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"` + // List of containers belonging to the pod. + // Containers cannot currently be added or removed. + // There must be at least one container in a Pod. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/containers + Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` + // Restart policy for all containers within the pod. + // One of Always, OnFailure, Never. + // Default to Always. + // More info: http://kubernetes.io/docs/user-guide/pod-states#restartpolicy + // +optional + RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"` + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // Defaults to 30 seconds. + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"` + // Optional duration in seconds the pod may be active on the node relative to + // StartTime before the system will actively try to mark it failed and kill associated containers. + // Value must be a positive integer. + // +optional + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"` + // Set DNS policy for containers within the pod. + // One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. + // Defaults to "ClusterFirst". + // To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + // +optional + DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"` + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: http://kubernetes.io/docs/user-guide/node-selection/README + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod. + // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` + // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + // Deprecated: Use serviceAccountName instead. + // +k8s:conversion-gen=false + // +optional + DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"` + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"` + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"` + // Host networking requested for this pod. Use the host's network namespace. + // If this option is set, the ports that will be used must be specified. + // Default to false. + // +k8s:conversion-gen=false + // +optional + HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"` + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"` + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"` + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"` + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // More info: http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + // +optional + ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"` + // Specifies the hostname of the Pod + // If not specified, the pod's hostname will be set to a system-defined value. + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"` + // If specified, the fully qualified Pod hostname will be "...svc.". + // If not specified, the pod will not have a domainname at all. + // +optional + Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"` + // If specified, the pod's scheduling constraints + // +optional + Affinity *Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"` + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"` + // If specified, the pod's tolerations. + // +optional + Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"` +} + +// PodSecurityContext holds pod-level security attributes and common container settings. +// Some fields are also present in container.securityContext. Field values of +// container.securityContext take precedence over field values of PodSecurityContext. +type PodSecurityContext struct { + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + // +optional + SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"` + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"` + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"` + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID. If unspecified, no groups will be added to + // any container. + // +optional + SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"` + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // +optional + FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"` +} + +// PodQOSClass defines the supported qos classes of Pods. +type PodQOSClass string + +const ( + // PodQOSGuaranteed is the Guaranteed qos class. + PodQOSGuaranteed PodQOSClass = "Guaranteed" + // PodQOSBurstable is the Burstable qos class. + PodQOSBurstable PodQOSClass = "Burstable" + // PodQOSBestEffort is the BestEffort qos class. + PodQOSBestEffort PodQOSClass = "BestEffort" +) + +// PodStatus represents information about the status of a pod. Status may trail the actual +// state of a system. +type PodStatus struct { + // Current condition of the pod. + // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-phase + // +optional + Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"` + // Current service state of pod. + // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + // +optional + Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` + // A human readable message indicating details about why the pod is in this condition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + // A brief CamelCase message indicating details about why the pod is in this state. + // e.g. 'OutOfDisk' + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + + // IP address of the host to which the pod is assigned. Empty if not yet scheduled. + // +optional + HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` + // IP address allocated to the pod. Routable at least within the cluster. + // Empty if not yet allocated. + // +optional + PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"` + + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the pod. + // +optional + StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"` + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses + InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"` + + // The list has one entry per container in the manifest. Each entry is currently the output + // of `docker inspect`. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses + // +optional + ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` + // The Quality of Service (QOS) classification assigned to the pod based on resource requirements + // See PodQOSClass type for available QOS classes + // More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md + // +optional + QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` +} + +// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded +type PodStatusResult struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// +genclient=true + +// Pod is a collection of containers that can run on a host. This resource is created +// by clients and scheduled onto hosts. +type Pod struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the pod. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodList is a list of Pods. +type PodList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of pods. + // More info: http://kubernetes.io/docs/user-guide/pods + Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PodTemplateSpec describes the data a pod should have when created from a template +type PodTemplateSpec struct { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the pod. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +genclient=true + +// PodTemplate describes a template for creating copies of a predefined pod. +type PodTemplate struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Template defines the pods that will be created from this pod template. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` +} + +// PodTemplateList is a list of PodTemplates. +type PodTemplateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of pod templates + Items []PodTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ReplicationControllerSpec is the specification of a replication controller. +type ReplicationControllerSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // Selector is a label query over pods that should match the Replicas count. + // If Selector is empty, it is defaulted to the labels present on the Pod template. + // Label keys and values that must match in order to be controlled by this replication + // controller, if empty defaulted to labels on Pod template. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` + + // TemplateRef is a reference to an object that describes the pod that will be created if + // insufficient replicas are detected. + // Reference to an object that describes the pod that will be created if insufficient replicas are detected. + // +optional + // TemplateRef *ObjectReference `json:"templateRef,omitempty"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. This takes precedence over a TemplateRef. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + // +optional + Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` +} + +// ReplicationControllerStatus represents the current status of a replication +// controller. +type ReplicationControllerStatus struct { + // Replicas is the most recently oberved number of replicas. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // The number of pods that have labels matching the labels of the pod template of the replication controller. + // +optional + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` + + // The number of ready replicas for this replication controller. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` + + // The number of available replicas (ready for at least minReadySeconds) for this replication controller. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` + + // ObservedGeneration reflects the generation of the most recently observed replication controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` + + // Represents the latest available observations of a replication controller's current state. + // +optional + Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` +} + +type ReplicationControllerConditionType string + +// These are valid conditions of a replication controller. +const ( + // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods + // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, + // etc. or deleted due to kubelet being down or finalizers are failing. + ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" +) + +// ReplicationControllerCondition describes the state of a replication controller at a certain point. +type ReplicationControllerCondition struct { + // Type of replication controller condition. + Type ReplicationControllerConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicationControllerConditionType"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient=true + +// ReplicationController represents the configuration of a replication controller. +type ReplicationController struct { + metav1.TypeMeta `json:",inline"` + + // If the Labels of a ReplicationController are empty, they are defaulted to + // be the same as the Pod(s) that the replication controller manages. + // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the specification of the desired behavior of the replication controller. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the most recently observed status of the replication controller. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ReplicationControllerList is a collection of replication controllers. +type ReplicationControllerList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of replication controllers. + // More info: http://kubernetes.io/docs/user-guide/replication-controller + Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Session Affinity Type string +type ServiceAffinity string + +const ( + // ServiceAffinityClientIP is the Client IP based. + ServiceAffinityClientIP ServiceAffinity = "ClientIP" + + // ServiceAffinityNone - no session affinity. + ServiceAffinityNone ServiceAffinity = "None" +) + +// Service Type string describes ingress methods for a service +type ServiceType string + +const ( + // ServiceTypeClusterIP means a service will only be accessible inside the + // cluster, via the cluster IP. + ServiceTypeClusterIP ServiceType = "ClusterIP" + + // ServiceTypeNodePort means a service will be exposed on one port of + // every node, in addition to 'ClusterIP' type. + ServiceTypeNodePort ServiceType = "NodePort" + + // ServiceTypeLoadBalancer means a service will be exposed via an + // external load balancer (if the cloud provider supports it), in addition + // to 'NodePort' type. + ServiceTypeLoadBalancer ServiceType = "LoadBalancer" + + // ServiceTypeExternalName means a service consists of only a reference to + // an external name that kubedns or equivalent will return as a CNAME + // record, with no exposing or proxying of any pods involved. + ServiceTypeExternalName ServiceType = "ExternalName" +) + +// ServiceStatus represents the current status of a service. +type ServiceStatus struct { + // LoadBalancer contains the current status of the load-balancer, + // if one is present. + // +optional + LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` +} + +// LoadBalancerStatus represents the status of a load-balancer. +type LoadBalancerStatus struct { + // Ingress is a list containing ingress points for the load-balancer. + // Traffic intended for the service should be sent to these ingress points. + // +optional + Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` +} + +// LoadBalancerIngress represents the status of a load-balancer ingress point: +// traffic intended for the service should be sent to an ingress point. +type LoadBalancerIngress struct { + // IP is set for load-balancer ingress points that are IP based + // (typically GCE or OpenStack load-balancers) + // +optional + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + + // Hostname is set for load-balancer ingress points that are DNS based + // (typically AWS load-balancers) + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` +} + +// ServiceSpec describes the attributes that a user creates on a service. +type ServiceSpec struct { + // The list of ports that are exposed by this service. + // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"` + + // Route service traffic to pods with label keys and values matching this + // selector. If empty or not present, the service is assumed to have an + // external process managing its endpoints, which Kubernetes will not + // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + // Ignored if type is ExternalName. + // More info: http://kubernetes.io/docs/user-guide/services#overview + // +optional + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` + + // clusterIP is the IP address of the service and is usually assigned + // randomly by the master. If an address is specified manually and is not in + // use by others, it will be allocated to the service; otherwise, creation + // of the service will fail. This field can not be changed through updates. + // Valid values are "None", empty string (""), or a valid IP address. "None" + // can be specified for headless services when proxying is not required. + // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if + // type is ExternalName. + // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + // +optional + ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"` + + // type determines how the Service is exposed. Defaults to ClusterIP. Valid + // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + // "ExternalName" maps to the specified externalName. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing to + // endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object. If clusterIP is + // "None", no virtual IP is allocated and the endpoints are published as a + // set of endpoints rather than a stable IP. + // "NodePort" builds on ClusterIP and allocates a port on every node which + // routes to the clusterIP. + // "LoadBalancer" builds on NodePort and creates an + // external load-balancer (if supported in the current cloud) which routes + // to the clusterIP. + // More info: http://kubernetes.io/docs/user-guide/services#overview + // +optional + Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"` + + // externalIPs is a list of IP addresses for which nodes in the cluster + // will also accept traffic for this service. These IPs are not managed by + // Kubernetes. The user is responsible for ensuring that traffic arrives + // at a node with this IP. A common example is external load-balancers + // that are not part of the Kubernetes system. A previous form of this + // functionality exists as the deprecatedPublicIPs field. When using this + // field, callers should also clear the deprecatedPublicIPs field. + // +optional + ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"` + + // deprecatedPublicIPs is deprecated and replaced by the externalIPs field + // with almost the exact same semantics. This field is retained in the v1 + // API for compatibility until at least 8/20/2016. It will be removed from + // any new API revisions. If both deprecatedPublicIPs *and* externalIPs are + // set, deprecatedPublicIPs is used. + // +k8s:conversion-gen=false + // +optional + DeprecatedPublicIPs []string `json:"deprecatedPublicIPs,omitempty" protobuf:"bytes,6,rep,name=deprecatedPublicIPs"` + + // Supports "ClientIP" and "None". Used to maintain session affinity. + // Enable client IP based session affinity. + // Must be ClientIP or None. + // Defaults to None. + // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + // +optional + SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"` + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + // +optional + LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"` + + // If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // More info: http://kubernetes.io/docs/user-guide/services-firewalls + // +optional + LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"` + + // externalName is the external reference that kubedns or equivalent will + // return as a CNAME record for this service. No proxying will be involved. + // Must be a valid DNS name and requires Type to be ExternalName. + // +optional + ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"` +} + +// ServicePort contains information on service's port. +type ServicePort struct { + // The name of this port within the service. This must be a DNS_LABEL. + // All ports within a ServiceSpec must have unique names. This maps to + // the 'Name' field in EndpointPort objects. + // Optional if only one ServicePort is defined on this service. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // The IP protocol for this port. Supports "TCP" and "UDP". + // Default is TCP. + // +optional + Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` + + // The port that will be exposed by this service. + Port int32 `json:"port" protobuf:"varint,3,opt,name=port"` + + // Number or name of the port to access on the pods targeted by the service. + // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + // If this is a string, it will be looked up as a named port in the + // target Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + // More info: http://kubernetes.io/docs/user-guide/services#defining-a-service + // +optional + TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"` + + // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. + // Usually assigned by the system. If specified, it will be allocated to the service + // if unused or else creation of the service will fail. + // Default is to auto-allocate a port if the ServiceType of this Service requires one. + // More info: http://kubernetes.io/docs/user-guide/services#type--nodeport + // +optional + NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"` +} + +// +genclient=true + +// Service is a named abstraction of software service (for example, mysql) consisting of local port +// (for example 3306) that the proxy listens on, and the selector that determines which pods +// will answer requests sent through the proxy. +type Service struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the behavior of a service. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the service. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +const ( + // ClusterIPNone - do not assign a cluster IP + // no proxying required and no environment variables should be created for pods + ClusterIPNone = "None" +) + +// ServiceList holds a list of services. +type ServiceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of services + Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// ServiceAccount binds together: +// * a name, understood by users, and perhaps by peripheral systems, for an identity +// * a principal that can be authenticated and authorized +// * a set of secrets +type ServiceAccount struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. + // More info: http://kubernetes.io/docs/user-guide/secrets + // +optional + Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"` + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // More info: http://kubernetes.io/docs/user-guide/secrets#manually-specifying-an-imagepullsecret + // +optional + ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"` + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,4,opt,name=automountServiceAccountToken"` +} + +// ServiceAccountList is a list of ServiceAccount objects +type ServiceAccountList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of ServiceAccounts. + // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts + Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// Endpoints is a collection of endpoints that implement the actual service. Example: +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] +type Endpoints struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The set of all endpoints is the union of all subsets. Addresses are placed into + // subsets according to the IPs they share. A single address with multiple ports, + // some of which are ready and some of which are not (because they come from + // different containers) will result in the address being displayed in different + // subsets for the different ports. No address will appear in both Addresses and + // NotReadyAddresses in the same subset. + // Sets of addresses and ports that comprise a service. + Subsets []EndpointSubset `json:"subsets" protobuf:"bytes,2,rep,name=subsets"` +} + +// EndpointSubset is a group of addresses with a common set of ports. The +// expanded set of endpoints is the Cartesian product of Addresses x Ports. +// For example, given: +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// The resulting set of endpoints can be viewed as: +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +type EndpointSubset struct { + // IP addresses which offer the related ports that are marked as ready. These endpoints + // should be considered safe for load balancers and clients to utilize. + // +optional + Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"` + // IP addresses which offer the related ports but are not currently marked as ready + // because they have not yet finished starting, have recently failed a readiness check, + // or have recently failed a liveness check. + // +optional + NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"` + // Port numbers available on the related IP addresses. + // +optional + Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"` +} + +// EndpointAddress is a tuple that describes single IP address. +type EndpointAddress struct { + // The IP of this endpoint. + // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), + // or link-local multicast ((224.0.0.0/24). + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. + // TODO: This should allow hostname or IP, See #4447. + IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` + // The Hostname of this endpoint + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + // +optional + NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,4,opt,name=nodeName"` + // Reference to object providing the endpoint. + // +optional + TargetRef *ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,2,opt,name=targetRef"` +} + +// EndpointPort is a tuple that describes a single port. +type EndpointPort struct { + // The name of this port (corresponds to ServicePort.Name). + // Must be a DNS_LABEL. + // Optional only if one port is defined. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // The port number of the endpoint. + Port int32 `json:"port" protobuf:"varint,2,opt,name=port"` + + // The IP protocol for this port. + // Must be UDP or TCP. + // Default is TCP. + // +optional + Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"` +} + +// EndpointsList is a list of endpoints. +type EndpointsList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of endpoints. + Items []Endpoints `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// NodeSpec describes the attributes that a node is created with. +type NodeSpec struct { + // PodCIDR represents the pod IP range assigned to the node. + // +optional + PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"` + // External ID of the node assigned by some machine database (e.g. a cloud provider). + // Deprecated. + // +optional + ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"` + // ID of the node assigned by the cloud provider in the format: :// + // +optional + ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"` + // Unschedulable controls node schedulability of new pods. By default, node is schedulable. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration + // +optional + Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"` + // If specified, the node's taints. + // +optional + Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"` +} + +// DaemonEndpoint contains information about a single Daemon endpoint. +type DaemonEndpoint struct { + /* + The port tag was not properly in quotes in earlier releases, so it must be + uppercased for backwards compat (since it was falling back to var name of + 'Port'). + */ + + // Port number of the given endpoint. + Port int32 `json:"Port" protobuf:"varint,1,opt,name=Port"` +} + +// NodeDaemonEndpoints lists ports opened by daemons running on the Node. +type NodeDaemonEndpoints struct { + // Endpoint on which Kubelet is listening. + // +optional + KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"` +} + +// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. +type NodeSystemInfo struct { + // MachineID reported by the node. For unique machine identification + // in the cluster this field is prefered. Learn more from man(5) + // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html + MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"` + // SystemUUID reported by the node. For unique machine identification + // MachineID is prefered. This field is specific to Red Hat hosts + // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html + SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"` + // Boot ID reported by the node. + BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"` + // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). + KernelVersion string `json:"kernelVersion" protobuf:"bytes,4,opt,name=kernelVersion"` + // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). + OSImage string `json:"osImage" protobuf:"bytes,5,opt,name=osImage"` + // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). + ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"` + // Kubelet Version reported by the node. + KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"` + // KubeProxy Version reported by the node. + KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"` + // The Operating System reported by the node + OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"` + // The Architecture reported by the node + Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"` +} + +// NodeStatus is information about the current status of a node. +type NodeStatus struct { + // Capacity represents the total resources of a node. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity for more details. + // +optional + Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` + // Allocatable represents the resources of a node that are available for scheduling. + // Defaults to Capacity. + // +optional + Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"` + // NodePhase is the recently observed lifecycle phase of the node. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase + // The field is never populated, and now is deprecated. + // +optional + Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"` + // Conditions is an array of current observed node conditions. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition + // +optional + Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` + // List of addresses reachable to the node. + // Queried from cloud provider, if available. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses + // +optional + Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"` + // Endpoints of daemons running on the Node. + // +optional + DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"` + // Set of ids/uuids to uniquely identify the node. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info + // +optional + NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"` + // List of container images on this node + // +optional + Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"` + // List of attachable volumes in use (mounted) by the node. + // +optional + VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"` + // List of volumes that are attached to the node. + // +optional + VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"` +} + +type UniqueVolumeName string + +// AttachedVolume describes a volume attached to a node +type AttachedVolume struct { + // Name of the attached volume + Name UniqueVolumeName `json:"name" protobuf:"bytes,1,rep,name=name"` + + // DevicePath represents the device path where the volume should be available + DevicePath string `json:"devicePath" protobuf:"bytes,2,rep,name=devicePath"` +} + +// AvoidPods describes pods that should avoid this node. This is the value for a +// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and +// will eventually become a field of NodeStatus. +type AvoidPods struct { + // Bounded-sized list of signatures of pods that should avoid this node, sorted + // in timestamp order from oldest to newest. Size of the slice is unspecified. + // +optional + PreferAvoidPods []PreferAvoidPodsEntry `json:"preferAvoidPods,omitempty" protobuf:"bytes,1,rep,name=preferAvoidPods"` +} + +// Describes a class of pods that should avoid this node. +type PreferAvoidPodsEntry struct { + // The class of pods. + PodSignature PodSignature `json:"podSignature" protobuf:"bytes,1,opt,name=podSignature"` + // Time at which this entry was added to the list. + // +optional + EvictionTime metav1.Time `json:"evictionTime,omitempty" protobuf:"bytes,2,opt,name=evictionTime"` + // (brief) reason why this entry was added to the list. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // Human readable message indicating why this entry was added to the list. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` +} + +// Describes the class of pods that should avoid this node. +// Exactly one field should be set. +type PodSignature struct { + // Reference to controller whose pods should avoid this node. + // +optional + PodController *metav1.OwnerReference `json:"podController,omitempty" protobuf:"bytes,1,opt,name=podController"` +} + +// Describe a container image +type ContainerImage struct { + // Names by which this image is known. + // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + Names []string `json:"names" protobuf:"bytes,1,rep,name=names"` + // The size of the image in bytes. + // +optional + SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"` +} + +type NodePhase string + +// These are the valid phases of node. +const ( + // NodePending means the node has been created/added by the system, but not configured. + NodePending NodePhase = "Pending" + // NodeRunning means the node has been configured and has Kubernetes components running. + NodeRunning NodePhase = "Running" + // NodeTerminated means the node has been removed from the cluster. + NodeTerminated NodePhase = "Terminated" +) + +type NodeConditionType string + +// These are valid conditions of node. Currently, we don't have enough information to decide +// node condition. In the future, we will add more. The proposed set of conditions are: +// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable. +const ( + // NodeReady means kubelet is healthy and ready to accept pods. + NodeReady NodeConditionType = "Ready" + // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk + // space on the node. + NodeOutOfDisk NodeConditionType = "OutOfDisk" + // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. + NodeMemoryPressure NodeConditionType = "MemoryPressure" + // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. + NodeDiskPressure NodeConditionType = "DiskPressure" + // NodeNetworkUnavailable means that network for the node is not correctly configured. + NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" + // NodeInodePressure means the kubelet is under pressure due to insufficient available inodes. + NodeInodePressure NodeConditionType = "InodePressure" +) + +// NodeCondition contains condition information for a node. +type NodeCondition struct { + // Type of node condition. + Type NodeConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeConditionType"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Last time we got an update on a given condition. + // +optional + LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"` + // Last time the condition transit from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // (brief) reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +type NodeAddressType string + +// These are valid address type of node. +const ( + // Deprecated: NodeLegacyHostIP will be removed in 1.7. + NodeLegacyHostIP NodeAddressType = "LegacyHostIP" + NodeHostName NodeAddressType = "Hostname" + NodeExternalIP NodeAddressType = "ExternalIP" + NodeInternalIP NodeAddressType = "InternalIP" + NodeExternalDNS NodeAddressType = "ExternalDNS" + NodeInternalDNS NodeAddressType = "InternalDNS" +) + +// NodeAddress contains information for the node's address. +type NodeAddress struct { + // Node address type, one of Hostname, ExternalIP or InternalIP. + Type NodeAddressType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeAddressType"` + // The node address. + Address string `json:"address" protobuf:"bytes,2,opt,name=address"` +} + +// ResourceName is the name identifying various resources in a ResourceList. +type ResourceName string + +// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, +// with the -, _, and . characters allowed anywhere, except the first or last character. +// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than +// camel case, separating compound words. +// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. +const ( + // CPU, in cores. (500m = .5 cores) + ResourceCPU ResourceName = "cpu" + // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceMemory ResourceName = "memory" + // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) + ResourceStorage ResourceName = "storage" + // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned. + ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu" + // Number of Pods that may be running on this Node: see ResourcePods +) + +const ( + // Namespace prefix for opaque counted resources (alpha). + ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-" +) + +// ResourceList is a set of (resource name, quantity) pairs. +type ResourceList map[ResourceName]resource.Quantity + +// +genclient=true +// +nonNamespaced=true + +// Node is a worker node in Kubernetes. +// Each node will have a unique identifier in the cache (i.e. in etcd). +type Node struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the behavior of a node. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the node. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// NodeList is the whole list of all Nodes which have been registered with master. +type NodeList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of nodes + Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// FinalizerName is the name identifying a finalizer during namespace lifecycle. +type FinalizerName string + +// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or +// in metav1. +const ( + FinalizerKubernetes FinalizerName = "kubernetes" +) + +// NamespaceSpec describes the attributes on a Namespace. +type NamespaceSpec struct { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. + // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers + // +optional + Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"` +} + +// NamespaceStatus is information about the current status of a Namespace. +type NamespaceStatus struct { + // Phase is the current lifecycle phase of the namespace. + // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#phases + // +optional + Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"` +} + +type NamespacePhase string + +// These are the valid phases of a namespace. +const ( + // NamespaceActive means the namespace is available for use in the system + NamespaceActive NamespacePhase = "Active" + // NamespaceTerminating means the namespace is undergoing graceful termination + NamespaceTerminating NamespacePhase = "Terminating" +) + +// +genclient=true +// +nonNamespaced=true + +// Namespace provides a scope for Names. +// Use of multiple namespaces is optional. +type Namespace struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the behavior of the Namespace. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status describes the current status of a Namespace. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// NamespaceList is a list of Namespaces. +type NamespaceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Namespace objects in the list. + // More info: http://kubernetes.io/docs/user-guide/namespaces + Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Binding ties one object to another. +// For example, a pod is bound to a node by a scheduler. +type Binding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The target object that you want to bind to the standard object. + Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"` +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +// +k8s:openapi-gen=false +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` +} + +// DeletionPropagation decides if a deletion will propagate to the dependents of the object, and how the garbage collector will handle the propagation. +type DeletionPropagation string + +const ( + // Orphans the dependents. + DeletePropagationOrphan DeletionPropagation = "Orphan" + // Deletes the object from the key-value store, the garbage collector will delete the dependents in the background. + DeletePropagationBackground DeletionPropagation = "Background" + // The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store. + // API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp. + // This policy is cascading, i.e., the dependents will be deleted with Foreground. + DeletePropagationForeground DeletionPropagation = "Foreground" +) + +// DeleteOptions may be provided when deleting an API object +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +// +k8s:openapi-gen=false +type DeleteOptions struct { + metav1.TypeMeta `json:",inline"` + + // The duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // Defaults to a per object value if not specified. zero means delete immediately. + // +optional + GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"` + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"` + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"` + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // +optional + PropagationPolicy *DeletionPropagation `protobuf:"bytes,4,opt,name=propagationPolicy,casttype=DeletionPropagation"` +} + +// ListOptions is the query options to a standard REST list call. +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +// +k8s:openapi-gen=false +type ListOptions struct { + metav1.TypeMeta `json:",inline"` + + // A selector to restrict the list of returned objects by their labels. + // Defaults to everything. + // +optional + LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` + // A selector to restrict the list of returned objects by their fields. + // Defaults to everything. + // +optional + FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"` + // Watch for changes to the described resources and return them as a stream of + // add, update, and remove notifications. Specify resourceVersion. + // +optional + Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"` + // Timeout for the list/watch call. + // +optional + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"` +} + +// PodLogOptions is the query options for a Pod's logs REST call. +type PodLogOptions struct { + metav1.TypeMeta `json:",inline"` + + // The container for which to stream logs. Defaults to only container if there is one container in the pod. + // +optional + Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"` + // Follow the log stream of the pod. Defaults to false. + // +optional + Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"` + // Return previous terminated container logs. Defaults to false. + // +optional + Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"` + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + // +optional + SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"` + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + // +optional + SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"` + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + // +optional + Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"` + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + // +optional + TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"` + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + // +optional + LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` +} + +// PodAttachOptions is the query options to a Pod's remote attach call. +// --- +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +type PodAttachOptions struct { + metav1.TypeMeta `json:",inline"` + + // Stdin if true, redirects the standard input stream of the pod for this call. + // Defaults to false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"` + + // Stdout if true indicates that stdout is to be redirected for the attach call. + // Defaults to true. + // +optional + Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"` + + // Stderr if true indicates that stderr is to be redirected for the attach call. + // Defaults to true. + // +optional + Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"` + + // TTY if true indicates that a tty will be allocated for the attach call. + // This is passed through the container runtime so the tty + // is allocated on the worker node by the container runtime. + // Defaults to false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"` + + // The container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + // +optional + Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` +} + +// PodExecOptions is the query options to a Pod's remote exec call. +// --- +// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +type PodExecOptions struct { + metav1.TypeMeta `json:",inline"` + + // Redirect the standard input stream of the pod for this call. + // Defaults to false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"` + + // Redirect the standard output stream of the pod for this call. + // Defaults to true. + // +optional + Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"` + + // Redirect the standard error stream of the pod for this call. + // Defaults to true. + // +optional + Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"` + + // TTY if true indicates that a tty will be allocated for the exec call. + // Defaults to false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"` + + // Container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + // +optional + Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` + + // Command is the remote command to execute. argv array. Not executed within a shell. + Command []string `json:"command" protobuf:"bytes,6,rep,name=command"` +} + +// PodPortForwardOptions is the query options to a Pod's port forward call +// when using WebSockets. +// The `port` query parameter must specify the port or +// ports (comma separated) to forward over. +// Port forwarding over SPDY does not use these options. It requires the port +// to be passed in the `port` header as part of request. +type PodPortForwardOptions struct { + metav1.TypeMeta `json:",inline"` + + // List of ports to forward + // Required when using WebSockets + // +optional + Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"` +} + +// PodProxyOptions is the query options to a Pod's proxy call. +type PodProxyOptions struct { + metav1.TypeMeta `json:",inline"` + + // Path is the URL path to use for the current proxy request to pod. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` +} + +// NodeProxyOptions is the query options to a Node's proxy call. +type NodeProxyOptions struct { + metav1.TypeMeta `json:",inline"` + + // Path is the URL path to use for the current proxy request to node. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` +} + +// ServiceProxyOptions is the query options to a Service's proxy call. +type ServiceProxyOptions struct { + metav1.TypeMeta `json:",inline"` + + // Path is the part of URLs that include service endpoints, suffixes, + // and parameters to use for the current proxy request to service. + // For example, the whole request URL is + // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. + // Path is _search?q=user:kimchy. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +type ObjectReference struct { + // Kind of the referent. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` + // Namespace of the referent. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` + // Name of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"` + // UID of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // API version of the referent. + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"` + // Specific resourceVersion to which this reference is made, if any. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` + + // If referring to a piece of an object instead of an entire object, this string + // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + // For example, if the object reference is to a container within a pod, this would take on a value like: + // "spec.containers{name}" (where "name" refers to the name of the container that triggered + // the event) or if no container name is specified "spec.containers[2]" (container with + // index 2 in this pod). This syntax is chosen only to have some well-defined way of + // referencing a part of an object. + // TODO: this design is not final and this field is subject to change in the future. + // +optional + FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,7,opt,name=fieldPath"` +} + +// LocalObjectReference contains enough information to let you locate the +// referenced object inside the same namespace. +type LocalObjectReference struct { + // Name of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // TODO: Add other useful fields. apiVersion, kind, uid? + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` +} + +// SerializedReference is a reference to serialized object. +type SerializedReference struct { + metav1.TypeMeta `json:",inline"` + // The reference to an object in the system. + // +optional + Reference ObjectReference `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"` +} + +// EventSource contains information for an event. +type EventSource struct { + // Component from which the event is generated. + // +optional + Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"` + // Node name on which the event is generated. + // +optional + Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"` +} + +// Valid values for event types (new types could be added in future) +const ( + // Information only and will not cause any problems + EventTypeNormal string = "Normal" + // These events are to warn that something might go wrong + EventTypeWarning string = "Warning" +) + +// +genclient=true + +// Event is a report of an event somewhere in the cluster. +// TODO: Decide whether to store these separately or with the object they apply to. +type Event struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + // The object that this event is about. + InvolvedObject ObjectReference `json:"involvedObject" protobuf:"bytes,2,opt,name=involvedObject"` + + // This should be a short, machine understandable string that gives the reason + // for the transition into the object's current status. + // TODO: provide exact specification for format. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + + // A human-readable description of the status of this operation. + // TODO: decide on maximum length. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` + + // The component reporting this event. Should be a short machine understandable string. + // +optional + Source EventSource `json:"source,omitempty" protobuf:"bytes,5,opt,name=source"` + + // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) + // +optional + FirstTimestamp metav1.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"` + + // The time at which the most recent occurrence of this event was recorded. + // +optional + LastTimestamp metav1.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"` + + // The number of times this event has occurred. + // +optional + Count int32 `json:"count,omitempty" protobuf:"varint,8,opt,name=count"` + + // Type of this event (Normal, Warning), new types could be added in the future + // +optional + Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"` +} + +// EventList is a list of events. +type EventList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of events + Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// List holds a list of objects, which may not be known by the server. +type List struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of objects + Items []runtime.RawExtension `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// LimitType is a type of object that is limited +type LimitType string + +const ( + // Limit that applies to all pods in a namespace + LimitTypePod LimitType = "Pod" + // Limit that applies to all containers in a namespace + LimitTypeContainer LimitType = "Container" + // Limit that applies to all persistent volume claims in a namespace + LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" +) + +// LimitRangeItem defines a min/max usage limit for any resource that matches on kind. +type LimitRangeItem struct { + // Type of resource that this limit applies to. + // +optional + Type LimitType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=LimitType"` + // Max usage constraints on this kind by resource name. + // +optional + Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"` + // Min usage constraints on this kind by resource name. + // +optional + Min ResourceList `json:"min,omitempty" protobuf:"bytes,3,rep,name=min,casttype=ResourceList,castkey=ResourceName"` + // Default resource requirement limit value by resource name if resource limit is omitted. + // +optional + Default ResourceList `json:"default,omitempty" protobuf:"bytes,4,rep,name=default,casttype=ResourceList,castkey=ResourceName"` + // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. + // +optional + DefaultRequest ResourceList `json:"defaultRequest,omitempty" protobuf:"bytes,5,rep,name=defaultRequest,casttype=ResourceList,castkey=ResourceName"` + // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. + // +optional + MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty" protobuf:"bytes,6,rep,name=maxLimitRequestRatio,casttype=ResourceList,castkey=ResourceName"` +} + +// LimitRangeSpec defines a min/max usage limit for resources that match on kind. +type LimitRangeSpec struct { + // Limits is the list of LimitRangeItem objects that are enforced. + Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"` +} + +// +genclient=true + +// LimitRange sets resource usage limits for each kind of resource in a Namespace. +type LimitRange struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the limits enforced. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// LimitRangeList is a list of LimitRange items. +type LimitRangeList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of LimitRange objects. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md + Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// The following identify resource constants for Kubernetes object types +const ( + // Pods, number + ResourcePods ResourceName = "pods" + // Services, number + ResourceServices ResourceName = "services" + // ReplicationControllers, number + ResourceReplicationControllers ResourceName = "replicationcontrollers" + // ResourceQuotas, number + ResourceQuotas ResourceName = "resourcequotas" + // ResourceSecrets, number + ResourceSecrets ResourceName = "secrets" + // ResourceConfigMaps, number + ResourceConfigMaps ResourceName = "configmaps" + // ResourcePersistentVolumeClaims, number + ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" + // ResourceServicesNodePorts, number + ResourceServicesNodePorts ResourceName = "services.nodeports" + // ResourceServicesLoadBalancers, number + ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" + // CPU request, in cores. (500m = .5 cores) + ResourceRequestsCPU ResourceName = "requests.cpu" + // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsMemory ResourceName = "requests.memory" + // Storage request, in bytes + ResourceRequestsStorage ResourceName = "requests.storage" + // CPU limit, in cores. (500m = .5 cores) + ResourceLimitsCPU ResourceName = "limits.cpu" + // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsMemory ResourceName = "limits.memory" +) + +// A ResourceQuotaScope defines a filter that must match each object tracked by a quota +type ResourceQuotaScope string + +const ( + // Match all pod objects where spec.activeDeadlineSeconds + ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" + // Match all pod objects where !spec.activeDeadlineSeconds + ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" + // Match all pod objects that have best effort quality of service + ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" + // Match all pod objects that do not have best effort quality of service + ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" +) + +// ResourceQuotaSpec defines the desired hard limits to enforce for Quota. +type ResourceQuotaSpec struct { + // Hard is the set of desired hard limits for each named resource. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // +optional + Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` + // A collection of filters that must match each object tracked by a quota. + // If not specified, the quota matches all objects. + // +optional + Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"` +} + +// ResourceQuotaStatus defines the enforced hard limits and observed use. +type ResourceQuotaStatus struct { + // Hard is the set of enforced hard limits for each named resource. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // +optional + Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` + // Used is the current observed total usage of the resource in the namespace. + // +optional + Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"` +} + +// +genclient=true + +// ResourceQuota sets aggregate quota restrictions enforced per namespace +type ResourceQuota struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired quota. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status defines the actual enforced quota and its current usage. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ResourceQuotaList is a list of ResourceQuota items. +type ResourceQuotaList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ResourceQuota objects. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// Secret holds secret data of a certain type. The total bytes of the values in +// the Data field must be less than MaxSecretSize bytes. +type Secret struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN + // or leading dot followed by valid DNS_SUBDOMAIN. + // The serialized form of the secret data is a base64 encoded string, + // representing the arbitrary (possibly non-string) data value here. + // Described in https://tools.ietf.org/html/rfc4648#section-4 + // +optional + Data map[string][]byte `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"` + + // stringData allows specifying non-binary secret data in string form. + // It is provided as a write-only convenience method. + // All keys and values are merged into the data field on write, overwriting any existing values. + // It is never output when reading from the API. + // +k8s:conversion-gen=false + // +optional + StringData map[string]string `json:"stringData,omitempty" protobuf:"bytes,4,rep,name=stringData"` + + // Used to facilitate programmatic handling of secret data. + // +optional + Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"` +} + +const MaxSecretSize = 1 * 1024 * 1024 + +type SecretType string + +const ( + // SecretTypeOpaque is the default. Arbitrary user-defined data + SecretTypeOpaque SecretType = "Opaque" + + // SecretTypeServiceAccountToken contains a token that identifies a service account to the API + // + // Required fields: + // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies + // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies + // - Secret.Data["token"] - a token that identifies the service account to the API + SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" + + // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountNameKey = "kubernetes.io/service-account.name" + // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountUIDKey = "kubernetes.io/service-account.uid" + // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets + ServiceAccountTokenKey = "token" + // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets + ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" + // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets + ServiceAccountRootCAKey = "ca.crt" + // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls + ServiceAccountNamespaceKey = "namespace" + + // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg + // + // Required fields: + // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file + SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" + + // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets + DockerConfigKey = ".dockercfg" + + // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json + // + // Required fields: + // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file + SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" + + // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets + DockerConfigJsonKey = ".dockerconfigjson" + + // SecretTypeBasicAuth contains data needed for basic authentication. + // + // Required at least one of fields: + // - Secret.Data["username"] - username used for authentication + // - Secret.Data["password"] - password or token needed for authentication + SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" + + // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets + BasicAuthUsernameKey = "username" + // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets + BasicAuthPasswordKey = "password" + + // SecretTypeSSHAuth contains data needed for SSH authetication. + // + // Required field: + // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication + SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" + + // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets + SSHAuthPrivateKey = "ssh-privatekey" + // SecretTypeTLS contains information about a TLS client or server secret. It + // is primarily used with TLS termination of the Ingress resource, but may be + // used in other types. + // + // Required fields: + // - Secret.Data["tls.key"] - TLS private key. + // Secret.Data["tls.crt"] - TLS certificate. + // TODO: Consider supporting different formats, specifying CA/destinationCA. + SecretTypeTLS SecretType = "kubernetes.io/tls" + + // TLSCertKey is the key for tls certificates in a TLS secert. + TLSCertKey = "tls.crt" + // TLSPrivateKeyKey is the key for the private key field in a TLS secret. + TLSPrivateKeyKey = "tls.key" +) + +// SecretList is a list of Secret. +type SecretList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of secret objects. + // More info: http://kubernetes.io/docs/user-guide/secrets + Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// ConfigMap holds configuration data for pods to consume. +type ConfigMap struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Data contains the configuration data. + // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. + // +optional + Data map[string]string `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"` +} + +// ConfigMapList is a resource containing a list of ConfigMap objects. +type ConfigMapList struct { + metav1.TypeMeta `json:",inline"` + + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ConfigMaps. + Items []ConfigMap `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Type and constants for component health validation. +type ComponentConditionType string + +// These are the valid conditions for the component. +const ( + ComponentHealthy ComponentConditionType = "Healthy" +) + +// Information about the condition of a component. +type ComponentCondition struct { + // Type of condition for a component. + // Valid value: "Healthy" + Type ComponentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ComponentConditionType"` + // Status of the condition for a component. + // Valid values for "Healthy": "True", "False", or "Unknown". + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Message about the condition for a component. + // For example, information about a health check. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + // Condition error code for a component. + // For example, a health check error code. + // +optional + Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"` +} + +// +genclient=true +// +nonNamespaced=true + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +type ComponentStatus struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of component conditions observed + // +optional + Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` +} + +// Status of all the conditions for the component as a list of ComponentStatus objects. +type ComponentStatusList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of ComponentStatus objects. + Items []ComponentStatus `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// DownwardAPIVolumeSource represents a volume containing downward API info. +// Downward API volumes support ownership management and SELinux relabeling. +type DownwardAPIVolumeSource struct { + // Items is a list of downward API volume file + // +optional + Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"` + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"` +} + +const ( + DownwardAPIVolumeSourceDefaultMode int32 = 0644 +) + +// DownwardAPIVolumeFile represents information to create the file containing the pod field +type DownwardAPIVolumeFile struct { + // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + Path string `json:"path" protobuf:"bytes,1,opt,name=path"` + // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + // +optional + FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"` + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,3,opt,name=resourceFieldRef"` + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 `json:"mode,omitempty" protobuf:"varint,4,opt,name=mode"` +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +type DownwardAPIProjection struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"` +} + +// SecurityContext holds security configuration that will be applied to a container. +// Some fields are present in both SecurityContext and PodSecurityContext. When both +// are set, the values in SecurityContext take precedence. +type SecurityContext struct { + // The capabilities to add/drop when running containers. + // Defaults to the default set of capabilities granted by the container runtime. + // +optional + Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"` + // Run container in privileged mode. + // Processes in privileged containers are essentially equivalent to root on the host. + // Defaults to false. + // +optional + Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"` + // The SELinux context to be applied to the container. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"` + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"` + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"` + // Whether this container has a read-only root filesystem. + // Default is false. + // +optional + ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"` +} + +// SELinuxOptions are the labels to be applied to the container +type SELinuxOptions struct { + // User is a SELinux user label that applies to the container. + // +optional + User string `json:"user,omitempty" protobuf:"bytes,1,opt,name=user"` + // Role is a SELinux role label that applies to the container. + // +optional + Role string `json:"role,omitempty" protobuf:"bytes,2,opt,name=role"` + // Type is a SELinux type label that applies to the container. + // +optional + Type string `json:"type,omitempty" protobuf:"bytes,3,opt,name=type"` + // Level is SELinux level label that applies to the container. + // +optional + Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"` +} + +// RangeAllocation is not a public type. +type RangeAllocation struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Range is string that identifies the range represented by 'data'. + Range string `json:"range" protobuf:"bytes,2,opt,name=range"` + // Data is a bit array containing all allocated addresses in the previous segment. + Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"` +} + +const ( + // "default-scheduler" is the name of default scheduler. + DefaultSchedulerName = "default-scheduler" + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // When the --hard-pod-affinity-weight scheduler flag is not specified, + // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. + DefaultHardPodAffinitySymmetricWeight int = 1 + + // When the --failure-domains scheduler flag is not specified, + // DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity. + DefaultFailureDomains string = metav1.LabelHostname + "," + metav1.LabelZoneFailureDomain + "," + metav1.LabelZoneRegion +) diff --git a/vendor/k8s.io/client-go/pkg/api/v1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/api/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..75416d59a2 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/types_swagger_doc_generated.go @@ -0,0 +1,1960 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AWSElasticBlockStoreVolumeSource = map[string]string{ + "": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.", + "volumeID": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", + "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", + "readOnly": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", +} + +func (AWSElasticBlockStoreVolumeSource) SwaggerDoc() map[string]string { + return map_AWSElasticBlockStoreVolumeSource +} + +var map_Affinity = map[string]string{ + "": "Affinity is a group of affinity scheduling rules.", + "nodeAffinity": "Describes node affinity scheduling rules for the pod.", + "podAffinity": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).", + "podAntiAffinity": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).", +} + +func (Affinity) SwaggerDoc() map[string]string { + return map_Affinity +} + +var map_AttachedVolume = map[string]string{ + "": "AttachedVolume describes a volume attached to a node", + "name": "Name of the attached volume", + "devicePath": "DevicePath represents the device path where the volume should be available", +} + +func (AttachedVolume) SwaggerDoc() map[string]string { + return map_AttachedVolume +} + +var map_AvoidPods = map[string]string{ + "": "AvoidPods describes pods that should avoid this node. This is the value for a Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and will eventually become a field of NodeStatus.", + "preferAvoidPods": "Bounded-sized list of signatures of pods that should avoid this node, sorted in timestamp order from oldest to newest. Size of the slice is unspecified.", +} + +func (AvoidPods) SwaggerDoc() map[string]string { + return map_AvoidPods +} + +var map_AzureDiskVolumeSource = map[string]string{ + "": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "diskName": "The Name of the data disk in the blob storage", + "diskURI": "The URI the data disk in the blob storage", + "cachingMode": "Host Caching mode: None, Read Only, Read Write.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (AzureDiskVolumeSource) SwaggerDoc() map[string]string { + return map_AzureDiskVolumeSource +} + +var map_AzureFileVolumeSource = map[string]string{ + "": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "secretName": "the name of secret that contains Azure Storage Account Name and Key", + "shareName": "Share Name", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (AzureFileVolumeSource) SwaggerDoc() map[string]string { + return map_AzureFileVolumeSource +} + +var map_Binding = map[string]string{ + "": "Binding ties one object to another. For example, a pod is bound to a node by a scheduler.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "target": "The target object that you want to bind to the standard object.", +} + +func (Binding) SwaggerDoc() map[string]string { + return map_Binding +} + +var map_Capabilities = map[string]string{ + "": "Adds and removes POSIX capabilities from running containers.", + "add": "Added capabilities", + "drop": "Removed capabilities", +} + +func (Capabilities) SwaggerDoc() map[string]string { + return map_Capabilities +} + +var map_CephFSVolumeSource = map[string]string{ + "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", + "user": "Optional: User is the rados user name, default is admin More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", +} + +func (CephFSVolumeSource) SwaggerDoc() map[string]string { + return map_CephFSVolumeSource +} + +var map_CinderVolumeSource = map[string]string{ + "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", + "volumeID": "volume id used to identify the volume in cinder More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", +} + +func (CinderVolumeSource) SwaggerDoc() map[string]string { + return map_CinderVolumeSource +} + +var map_ComponentCondition = map[string]string{ + "": "Information about the condition of a component.", + "type": "Type of condition for a component. Valid value: \"Healthy\"", + "status": "Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\".", + "message": "Message about the condition for a component. For example, information about a health check.", + "error": "Condition error code for a component. For example, a health check error code.", +} + +func (ComponentCondition) SwaggerDoc() map[string]string { + return map_ComponentCondition +} + +var map_ComponentStatus = map[string]string{ + "": "ComponentStatus (and ComponentStatusList) holds the cluster validation info.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "conditions": "List of component conditions observed", +} + +func (ComponentStatus) SwaggerDoc() map[string]string { + return map_ComponentStatus +} + +var map_ComponentStatusList = map[string]string{ + "": "Status of all the conditions for the component as a list of ComponentStatus objects.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of ComponentStatus objects.", +} + +func (ComponentStatusList) SwaggerDoc() map[string]string { + return map_ComponentStatusList +} + +var map_ConfigMap = map[string]string{ + "": "ConfigMap holds configuration data for pods to consume.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "data": "Data contains the configuration data. Each key must be a valid DNS_SUBDOMAIN with an optional leading dot.", +} + +func (ConfigMap) SwaggerDoc() map[string]string { + return map_ConfigMap +} + +var map_ConfigMapEnvSource = map[string]string{ + "": "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.", + "optional": "Specify whether the ConfigMap must be defined", +} + +func (ConfigMapEnvSource) SwaggerDoc() map[string]string { + return map_ConfigMapEnvSource +} + +var map_ConfigMapKeySelector = map[string]string{ + "": "Selects a key from a ConfigMap.", + "key": "The key to select.", + "optional": "Specify whether the ConfigMap or it's key must be defined", +} + +func (ConfigMapKeySelector) SwaggerDoc() map[string]string { + return map_ConfigMapKeySelector +} + +var map_ConfigMapList = map[string]string{ + "": "ConfigMapList is a resource containing a list of ConfigMap objects.", + "metadata": "More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of ConfigMaps.", +} + +func (ConfigMapList) SwaggerDoc() map[string]string { + return map_ConfigMapList +} + +var map_ConfigMapProjection = map[string]string{ + "": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", + "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "optional": "Specify whether the ConfigMap or it's keys must be defined", +} + +func (ConfigMapProjection) SwaggerDoc() map[string]string { + return map_ConfigMapProjection +} + +var map_ConfigMapVolumeSource = map[string]string{ + "": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", + "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "optional": "Specify whether the ConfigMap or it's keys must be defined", +} + +func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { + return map_ConfigMapVolumeSource +} + +var map_Container = map[string]string{ + "": "A single application container that you want to run within a pod.", + "name": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + "image": "Docker image name. More info: http://kubernetes.io/docs/user-guide/images", + "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands", + "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands", + "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "env": "List of environment variables to set in the container. Cannot be updated.", + "resources": "Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources", + "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", + "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", + "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/images#updating-images", + "securityContext": "Security options the pod should run with. More info: http://releases.k8s.io/HEAD/docs/design/security_context.md", + "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", +} + +func (Container) SwaggerDoc() map[string]string { + return map_Container +} + +var map_ContainerImage = map[string]string{ + "": "Describe a container image", + "names": "Names by which this image is known. e.g. [\"gcr.io/google_containers/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", + "sizeBytes": "The size of the image in bytes.", +} + +func (ContainerImage) SwaggerDoc() map[string]string { + return map_ContainerImage +} + +var map_ContainerPort = map[string]string{ + "": "ContainerPort represents a network port in a single container.", + "name": "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.", + "hostPort": "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.", + "containerPort": "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.", + "protocol": "Protocol for port. Must be UDP or TCP. Defaults to \"TCP\".", + "hostIP": "What host IP to bind the external port to.", +} + +func (ContainerPort) SwaggerDoc() map[string]string { + return map_ContainerPort +} + +var map_ContainerState = map[string]string{ + "": "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.", + "waiting": "Details about a waiting container", + "running": "Details about a running container", + "terminated": "Details about a terminated container", +} + +func (ContainerState) SwaggerDoc() map[string]string { + return map_ContainerState +} + +var map_ContainerStateRunning = map[string]string{ + "": "ContainerStateRunning is a running state of a container.", + "startedAt": "Time at which the container was last (re-)started", +} + +func (ContainerStateRunning) SwaggerDoc() map[string]string { + return map_ContainerStateRunning +} + +var map_ContainerStateTerminated = map[string]string{ + "": "ContainerStateTerminated is a terminated state of a container.", + "exitCode": "Exit status from the last termination of the container", + "signal": "Signal from the last termination of the container", + "reason": "(brief) reason from the last termination of the container", + "message": "Message regarding the last termination of the container", + "startedAt": "Time at which previous execution of the container started", + "finishedAt": "Time at which the container last terminated", + "containerID": "Container's ID in the format 'docker://'", +} + +func (ContainerStateTerminated) SwaggerDoc() map[string]string { + return map_ContainerStateTerminated +} + +var map_ContainerStateWaiting = map[string]string{ + "": "ContainerStateWaiting is a waiting state of a container.", + "reason": "(brief) reason the container is not yet running.", + "message": "Message regarding why the container is not yet running.", +} + +func (ContainerStateWaiting) SwaggerDoc() map[string]string { + return map_ContainerStateWaiting +} + +var map_ContainerStatus = map[string]string{ + "": "ContainerStatus contains details for the current status of this container.", + "name": "This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.", + "state": "Details about the container's current condition.", + "lastState": "Details about the container's last termination condition.", + "ready": "Specifies whether the container has passed its readiness probe.", + "restartCount": "The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.", + "image": "The image the container is running. More info: http://kubernetes.io/docs/user-guide/images", + "imageID": "ImageID of the container's image.", + "containerID": "Container's ID in the format 'docker://'. More info: http://kubernetes.io/docs/user-guide/container-environment#container-information", +} + +func (ContainerStatus) SwaggerDoc() map[string]string { + return map_ContainerStatus +} + +var map_DaemonEndpoint = map[string]string{ + "": "DaemonEndpoint contains information about a single Daemon endpoint.", + "Port": "Port number of the given endpoint.", +} + +func (DaemonEndpoint) SwaggerDoc() map[string]string { + return map_DaemonEndpoint +} + +var map_DeleteOptions = map[string]string{ + "": "DeleteOptions may be provided when deleting an API object DEPRECATED: This type has been moved to meta/v1 and will be removed soon.", + "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", + "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "PropagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.", +} + +func (DeleteOptions) SwaggerDoc() map[string]string { + return map_DeleteOptions +} + +var map_DownwardAPIProjection = map[string]string{ + "": "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.", + "items": "Items is a list of DownwardAPIVolume file", +} + +func (DownwardAPIProjection) SwaggerDoc() map[string]string { + return map_DownwardAPIProjection +} + +var map_DownwardAPIVolumeFile = map[string]string{ + "": "DownwardAPIVolumeFile represents information to create the file containing the pod field", + "path": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", + "fieldRef": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.", + "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", + "mode": "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", +} + +func (DownwardAPIVolumeFile) SwaggerDoc() map[string]string { + return map_DownwardAPIVolumeFile +} + +var map_DownwardAPIVolumeSource = map[string]string{ + "": "DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.", + "items": "Items is a list of downward API volume file", + "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", +} + +func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string { + return map_DownwardAPIVolumeSource +} + +var map_EmptyDirVolumeSource = map[string]string{ + "": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", + "medium": "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir", +} + +func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { + return map_EmptyDirVolumeSource +} + +var map_EndpointAddress = map[string]string{ + "": "EndpointAddress is a tuple that describes single IP address.", + "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.", + "hostname": "The Hostname of this endpoint", + "nodeName": "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.", + "targetRef": "Reference to object providing the endpoint.", +} + +func (EndpointAddress) SwaggerDoc() map[string]string { + return map_EndpointAddress +} + +var map_EndpointPort = map[string]string{ + "": "EndpointPort is a tuple that describes a single port.", + "name": "The name of this port (corresponds to ServicePort.Name). Must be a DNS_LABEL. Optional only if one port is defined.", + "port": "The port number of the endpoint.", + "protocol": "The IP protocol for this port. Must be UDP or TCP. Default is TCP.", +} + +func (EndpointPort) SwaggerDoc() map[string]string { + return map_EndpointPort +} + +var map_EndpointSubset = map[string]string{ + "": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n }\nThe resulting set of endpoints can be viewed as:\n a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n b: [ 10.10.1.1:309, 10.10.2.2:309 ]", + "addresses": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.", + "notReadyAddresses": "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.", + "ports": "Port numbers available on the related IP addresses.", +} + +func (EndpointSubset) SwaggerDoc() map[string]string { + return map_EndpointSubset +} + +var map_Endpoints = map[string]string{ + "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", +} + +func (Endpoints) SwaggerDoc() map[string]string { + return map_Endpoints +} + +var map_EndpointsList = map[string]string{ + "": "EndpointsList is a list of endpoints.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of endpoints.", +} + +func (EndpointsList) SwaggerDoc() map[string]string { + return map_EndpointsList +} + +var map_EnvFromSource = map[string]string{ + "": "EnvFromSource represents the source of a set of ConfigMaps", + "prefix": "An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.", + "configMapRef": "The ConfigMap to select from", + "secretRef": "The Secret to select from", +} + +func (EnvFromSource) SwaggerDoc() map[string]string { + return map_EnvFromSource +} + +var map_EnvVar = map[string]string{ + "": "EnvVar represents an environment variable present in a Container.", + "name": "Name of the environment variable. Must be a C_IDENTIFIER.", + "value": "Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".", + "valueFrom": "Source for the environment variable's value. Cannot be used if value is not empty.", +} + +func (EnvVar) SwaggerDoc() map[string]string { + return map_EnvVar +} + +var map_EnvVarSource = map[string]string{ + "": "EnvVarSource represents a source for the value of an EnvVar.", + "fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.podIP.", + "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", + "configMapKeyRef": "Selects a key of a ConfigMap.", + "secretKeyRef": "Selects a key of a secret in the pod's namespace", +} + +func (EnvVarSource) SwaggerDoc() map[string]string { + return map_EnvVarSource +} + +var map_Event = map[string]string{ + "": "Event is a report of an event somewhere in the cluster.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "involvedObject": "The object that this event is about.", + "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", + "message": "A human-readable description of the status of this operation.", + "source": "The component reporting this event. Should be a short machine understandable string.", + "firstTimestamp": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)", + "lastTimestamp": "The time at which the most recent occurrence of this event was recorded.", + "count": "The number of times this event has occurred.", + "type": "Type of this event (Normal, Warning), new types could be added in the future", +} + +func (Event) SwaggerDoc() map[string]string { + return map_Event +} + +var map_EventList = map[string]string{ + "": "EventList is a list of events.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of events", +} + +func (EventList) SwaggerDoc() map[string]string { + return map_EventList +} + +var map_EventSource = map[string]string{ + "": "EventSource contains information for an event.", + "component": "Component from which the event is generated.", + "host": "Node name on which the event is generated.", +} + +func (EventSource) SwaggerDoc() map[string]string { + return map_EventSource +} + +var map_ExecAction = map[string]string{ + "": "ExecAction describes a \"run in container\" action.", + "command": "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.", +} + +func (ExecAction) SwaggerDoc() map[string]string { + return map_ExecAction +} + +var map_FCVolumeSource = map[string]string{ + "": "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.", + "targetWWNs": "Required: FC target worldwide names (WWNs)", + "lun": "Required: FC target lun number", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (FCVolumeSource) SwaggerDoc() map[string]string { + return map_FCVolumeSource +} + +var map_FlexVolumeSource = map[string]string{ + "": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "driver": "Driver is the name of the driver to use for this volume.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + "secretRef": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "options": "Optional: Extra command options if any.", +} + +func (FlexVolumeSource) SwaggerDoc() map[string]string { + return map_FlexVolumeSource +} + +var map_FlockerVolumeSource = map[string]string{ + "": "Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.", + "datasetName": "Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated", + "datasetUUID": "UUID of the dataset. This is unique identifier of a Flocker dataset", +} + +func (FlockerVolumeSource) SwaggerDoc() map[string]string { + return map_FlockerVolumeSource +} + +var map_GCEPersistentDiskVolumeSource = map[string]string{ + "": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.", + "pdName": "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", + "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", +} + +func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string { + return map_GCEPersistentDiskVolumeSource +} + +var map_GitRepoVolumeSource = map[string]string{ + "": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.", + "repository": "Repository URL", + "revision": "Commit hash for the specified revision.", + "directory": "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", +} + +func (GitRepoVolumeSource) SwaggerDoc() map[string]string { + return map_GitRepoVolumeSource +} + +var map_GlusterfsVolumeSource = map[string]string{ + "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", + "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "path": "Path is the Glusterfs volume path. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", +} + +func (GlusterfsVolumeSource) SwaggerDoc() map[string]string { + return map_GlusterfsVolumeSource +} + +var map_HTTPGetAction = map[string]string{ + "": "HTTPGetAction describes an action based on HTTP Get requests.", + "path": "Path to access on the HTTP server.", + "port": "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", + "host": "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.", + "scheme": "Scheme to use for connecting to the host. Defaults to HTTP.", + "httpHeaders": "Custom headers to set in the request. HTTP allows repeated headers.", +} + +func (HTTPGetAction) SwaggerDoc() map[string]string { + return map_HTTPGetAction +} + +var map_HTTPHeader = map[string]string{ + "": "HTTPHeader describes a custom header to be used in HTTP probes", + "name": "The header field name", + "value": "The header field value", +} + +func (HTTPHeader) SwaggerDoc() map[string]string { + return map_HTTPHeader +} + +var map_Handler = map[string]string{ + "": "Handler defines a specific action that should be taken", + "exec": "One and only one of the following should be specified. Exec specifies the action to take.", + "httpGet": "HTTPGet specifies the http request to perform.", + "tcpSocket": "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported", +} + +func (Handler) SwaggerDoc() map[string]string { + return map_Handler +} + +var map_HostPathVolumeSource = map[string]string{ + "": "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.", + "path": "Path of the directory on the host. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath", +} + +func (HostPathVolumeSource) SwaggerDoc() map[string]string { + return map_HostPathVolumeSource +} + +var map_ISCSIVolumeSource = map[string]string{ + "": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + "targetPortal": "iSCSI target portal. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "iqn": "Target iSCSI Qualified Name.", + "lun": "iSCSI target lun number.", + "iscsiInterface": "Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#iscsi", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "portals": "iSCSI target portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", +} + +func (ISCSIVolumeSource) SwaggerDoc() map[string]string { + return map_ISCSIVolumeSource +} + +var map_KeyToPath = map[string]string{ + "": "Maps a string key to a path within a volume.", + "key": "The key to project.", + "path": "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", + "mode": "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", +} + +func (KeyToPath) SwaggerDoc() map[string]string { + return map_KeyToPath +} + +var map_Lifecycle = map[string]string{ + "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", + "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details", + "preStop": "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details", +} + +func (Lifecycle) SwaggerDoc() map[string]string { + return map_Lifecycle +} + +var map_LimitRange = map[string]string{ + "": "LimitRange sets resource usage limits for each kind of resource in a Namespace.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the limits enforced. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (LimitRange) SwaggerDoc() map[string]string { + return map_LimitRange +} + +var map_LimitRangeItem = map[string]string{ + "": "LimitRangeItem defines a min/max usage limit for any resource that matches on kind.", + "type": "Type of resource that this limit applies to.", + "max": "Max usage constraints on this kind by resource name.", + "min": "Min usage constraints on this kind by resource name.", + "default": "Default resource requirement limit value by resource name if resource limit is omitted.", + "defaultRequest": "DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.", + "maxLimitRequestRatio": "MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.", +} + +func (LimitRangeItem) SwaggerDoc() map[string]string { + return map_LimitRangeItem +} + +var map_LimitRangeList = map[string]string{ + "": "LimitRangeList is a list of LimitRange items.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "Items is a list of LimitRange objects. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md", +} + +func (LimitRangeList) SwaggerDoc() map[string]string { + return map_LimitRangeList +} + +var map_LimitRangeSpec = map[string]string{ + "": "LimitRangeSpec defines a min/max usage limit for resources that match on kind.", + "limits": "Limits is the list of LimitRangeItem objects that are enforced.", +} + +func (LimitRangeSpec) SwaggerDoc() map[string]string { + return map_LimitRangeSpec +} + +var map_List = map[string]string{ + "": "List holds a list of objects, which may not be known by the server.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of objects", +} + +func (List) SwaggerDoc() map[string]string { + return map_List +} + +var map_ListOptions = map[string]string{ + "": "ListOptions is the query options to a standard REST list call. DEPRECATED: This type has been moved to meta/v1 and will be removed soon.", + "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "timeoutSeconds": "Timeout for the list/watch call.", +} + +func (ListOptions) SwaggerDoc() map[string]string { + return map_ListOptions +} + +var map_LoadBalancerIngress = map[string]string{ + "": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", + "ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", + "hostname": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)", +} + +func (LoadBalancerIngress) SwaggerDoc() map[string]string { + return map_LoadBalancerIngress +} + +var map_LoadBalancerStatus = map[string]string{ + "": "LoadBalancerStatus represents the status of a load-balancer.", + "ingress": "Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points.", +} + +func (LoadBalancerStatus) SwaggerDoc() map[string]string { + return map_LoadBalancerStatus +} + +var map_LocalObjectReference = map[string]string{ + "": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", + "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", +} + +func (LocalObjectReference) SwaggerDoc() map[string]string { + return map_LocalObjectReference +} + +var map_NFSVolumeSource = map[string]string{ + "": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", + "server": "Server is the hostname or IP address of the NFS server. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", + "path": "Path that is exported by the NFS server. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", + "readOnly": "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", +} + +func (NFSVolumeSource) SwaggerDoc() map[string]string { + return map_NFSVolumeSource +} + +var map_Namespace = map[string]string{ + "": "Namespace provides a scope for Names. Use of multiple namespaces is optional.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of the Namespace. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status describes the current status of a Namespace. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (Namespace) SwaggerDoc() map[string]string { + return map_Namespace +} + +var map_NamespaceList = map[string]string{ + "": "NamespaceList is a list of Namespaces.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "Items is the list of Namespace objects in the list. More info: http://kubernetes.io/docs/user-guide/namespaces", +} + +func (NamespaceList) SwaggerDoc() map[string]string { + return map_NamespaceList +} + +var map_NamespaceSpec = map[string]string{ + "": "NamespaceSpec describes the attributes on a Namespace.", + "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers", +} + +func (NamespaceSpec) SwaggerDoc() map[string]string { + return map_NamespaceSpec +} + +var map_NamespaceStatus = map[string]string{ + "": "NamespaceStatus is information about the current status of a Namespace.", + "phase": "Phase is the current lifecycle phase of the namespace. More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#phases", +} + +func (NamespaceStatus) SwaggerDoc() map[string]string { + return map_NamespaceStatus +} + +var map_Node = map[string]string{ + "": "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a node. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (Node) SwaggerDoc() map[string]string { + return map_Node +} + +var map_NodeAddress = map[string]string{ + "": "NodeAddress contains information for the node's address.", + "type": "Node address type, one of Hostname, ExternalIP or InternalIP.", + "address": "The node address.", +} + +func (NodeAddress) SwaggerDoc() map[string]string { + return map_NodeAddress +} + +var map_NodeAffinity = map[string]string{ + "": "Node affinity is a group of node affinity scheduling rules.", + "requiredDuringSchedulingIgnoredDuringExecution": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.", + "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.", +} + +func (NodeAffinity) SwaggerDoc() map[string]string { + return map_NodeAffinity +} + +var map_NodeCondition = map[string]string{ + "": "NodeCondition contains condition information for a node.", + "type": "Type of node condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastHeartbeatTime": "Last time we got an update on a given condition.", + "lastTransitionTime": "Last time the condition transit from one status to another.", + "reason": "(brief) reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (NodeCondition) SwaggerDoc() map[string]string { + return map_NodeCondition +} + +var map_NodeDaemonEndpoints = map[string]string{ + "": "NodeDaemonEndpoints lists ports opened by daemons running on the Node.", + "kubeletEndpoint": "Endpoint on which Kubelet is listening.", +} + +func (NodeDaemonEndpoints) SwaggerDoc() map[string]string { + return map_NodeDaemonEndpoints +} + +var map_NodeList = map[string]string{ + "": "NodeList is the whole list of all Nodes which have been registered with master.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of nodes", +} + +func (NodeList) SwaggerDoc() map[string]string { + return map_NodeList +} + +var map_NodeProxyOptions = map[string]string{ + "": "NodeProxyOptions is the query options to a Node's proxy call.", + "path": "Path is the URL path to use for the current proxy request to node.", +} + +func (NodeProxyOptions) SwaggerDoc() map[string]string { + return map_NodeProxyOptions +} + +var map_NodeSelector = map[string]string{ + "": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.", + "nodeSelectorTerms": "Required. A list of node selector terms. The terms are ORed.", +} + +func (NodeSelector) SwaggerDoc() map[string]string { + return map_NodeSelector +} + +var map_NodeSelectorRequirement = map[string]string{ + "": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + "key": "The label key that the selector applies to.", + "operator": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", + "values": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", +} + +func (NodeSelectorRequirement) SwaggerDoc() map[string]string { + return map_NodeSelectorRequirement +} + +var map_NodeSelectorTerm = map[string]string{ + "": "A null or empty node selector term matches no objects.", + "matchExpressions": "Required. A list of node selector requirements. The requirements are ANDed.", +} + +func (NodeSelectorTerm) SwaggerDoc() map[string]string { + return map_NodeSelectorTerm +} + +var map_NodeSpec = map[string]string{ + "": "NodeSpec describes the attributes that a node is created with.", + "podCIDR": "PodCIDR represents the pod IP range assigned to the node.", + "externalID": "External ID of the node assigned by some machine database (e.g. a cloud provider). Deprecated.", + "providerID": "ID of the node assigned by the cloud provider in the format: ://", + "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration", + "taints": "If specified, the node's taints.", +} + +func (NodeSpec) SwaggerDoc() map[string]string { + return map_NodeSpec +} + +var map_NodeStatus = map[string]string{ + "": "NodeStatus is information about the current status of a node.", + "capacity": "Capacity represents the total resources of a node. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity for more details.", + "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", + "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase The field is never populated, and now is deprecated.", + "conditions": "Conditions is an array of current observed node conditions. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses", + "daemonEndpoints": "Endpoints of daemons running on the Node.", + "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info", + "images": "List of container images on this node", + "volumesInUse": "List of attachable volumes in use (mounted) by the node.", + "volumesAttached": "List of volumes that are attached to the node.", +} + +func (NodeStatus) SwaggerDoc() map[string]string { + return map_NodeStatus +} + +var map_NodeSystemInfo = map[string]string{ + "": "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.", + "machineID": "MachineID reported by the node. For unique machine identification in the cluster this field is prefered. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html", + "systemUUID": "SystemUUID reported by the node. For unique machine identification MachineID is prefered. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html", + "bootID": "Boot ID reported by the node.", + "kernelVersion": "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).", + "osImage": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).", + "containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).", + "kubeletVersion": "Kubelet Version reported by the node.", + "kubeProxyVersion": "KubeProxy Version reported by the node.", + "operatingSystem": "The Operating System reported by the node", + "architecture": "The Architecture reported by the node", +} + +func (NodeSystemInfo) SwaggerDoc() map[string]string { + return map_NodeSystemInfo +} + +var map_ObjectFieldSelector = map[string]string{ + "": "ObjectFieldSelector selects an APIVersioned field of an object.", + "apiVersion": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".", + "fieldPath": "Path of the field to select in the specified API version.", +} + +func (ObjectFieldSelector) SwaggerDoc() map[string]string { + return map_ObjectFieldSelector +} + +var map_ObjectMeta = map[string]string{ + "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon.", + "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency", + "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", + "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency", + "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", + "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", + "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", + "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", + "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", + "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", +} + +func (ObjectMeta) SwaggerDoc() map[string]string { + return map_ObjectMeta +} + +var map_ObjectReference = map[string]string{ + "": "ObjectReference contains enough information to let you inspect or modify the referred object.", + "kind": "Kind of the referent. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "namespace": "Namespace of the referent. More info: http://kubernetes.io/docs/user-guide/namespaces", + "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "apiVersion": "API version of the referent.", + "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency", + "fieldPath": "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.", +} + +func (ObjectReference) SwaggerDoc() map[string]string { + return map_ObjectReference +} + +var map_PersistentVolume = map[string]string{ + "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: http://kubernetes.io/docs/user-guide/persistent-volumes", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes", + "status": "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes", +} + +func (PersistentVolume) SwaggerDoc() map[string]string { + return map_PersistentVolume +} + +var map_PersistentVolumeClaim = map[string]string{ + "": "PersistentVolumeClaim is a user's request for and claim to a persistent volume", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the desired characteristics of a volume requested by a pod author. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims", + "status": "Status represents the current information/status of a persistent volume claim. Read-only. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims", +} + +func (PersistentVolumeClaim) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaim +} + +var map_PersistentVolumeClaimList = map[string]string{ + "": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "A list of persistent volume claims. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims", +} + +func (PersistentVolumeClaimList) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimList +} + +var map_PersistentVolumeClaimSpec = map[string]string{ + "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", + "accessModes": "AccessModes contains the desired access modes the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1", + "selector": "A label query over volumes to consider for binding.", + "resources": "Resources represents the minimum resources the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources", + "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", + "storageClassName": "Name of the StorageClass required by the claim. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1", +} + +func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimSpec +} + +var map_PersistentVolumeClaimStatus = map[string]string{ + "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", + "phase": "Phase represents the current phase of PersistentVolumeClaim.", + "accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1", + "capacity": "Represents the actual resources of the underlying volume.", +} + +func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimStatus +} + +var map_PersistentVolumeClaimVolumeSource = map[string]string{ + "": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).", + "claimName": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims", + "readOnly": "Will force the ReadOnly setting in VolumeMounts. Default false.", +} + +func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimVolumeSource +} + +var map_PersistentVolumeList = map[string]string{ + "": "PersistentVolumeList is a list of PersistentVolume items.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of persistent volumes. More info: http://kubernetes.io/docs/user-guide/persistent-volumes", +} + +func (PersistentVolumeList) SwaggerDoc() map[string]string { + return map_PersistentVolumeList +} + +var map_PersistentVolumeSource = map[string]string{ + "": "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.", + "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", + "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", + "hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath", + "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", + "nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", +} + +func (PersistentVolumeSource) SwaggerDoc() map[string]string { + return map_PersistentVolumeSource +} + +var map_PersistentVolumeSpec = map[string]string{ + "": "PersistentVolumeSpec is the specification of a persistent volume.", + "capacity": "A description of the persistent volume's resources and capacity. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity", + "accessModes": "AccessModes contains all ways the volume can be mounted. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes", + "claimRef": "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#binding", + "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy", + "storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", +} + +func (PersistentVolumeSpec) SwaggerDoc() map[string]string { + return map_PersistentVolumeSpec +} + +var map_PersistentVolumeStatus = map[string]string{ + "": "PersistentVolumeStatus is the current status of a persistent volume.", + "phase": "Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#phase", + "message": "A human-readable message indicating details about why the volume is in this state.", + "reason": "Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", +} + +func (PersistentVolumeStatus) SwaggerDoc() map[string]string { + return map_PersistentVolumeStatus +} + +var map_PhotonPersistentDiskVolumeSource = map[string]string{ + "": "Represents a Photon Controller persistent disk resource.", + "pdID": "ID that identifies Photon Controller persistent disk", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", +} + +func (PhotonPersistentDiskVolumeSource) SwaggerDoc() map[string]string { + return map_PhotonPersistentDiskVolumeSource +} + +var map_Pod = map[string]string{ + "": "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (Pod) SwaggerDoc() map[string]string { + return map_Pod +} + +var map_PodAffinity = map[string]string{ + "": "Pod affinity is a group of inter pod affinity scheduling rules.", + "requiredDuringSchedulingIgnoredDuringExecution": "NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:\"requiredDuringSchedulingRequiredDuringExecution,omitempty\"` If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", +} + +func (PodAffinity) SwaggerDoc() map[string]string { + return map_PodAffinity +} + +var map_PodAffinityTerm = map[string]string{ + "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key tches that of any node on which a pod of the set of pods is running", + "labelSelector": "A label query over a set of resources, in this case pods.", + "namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"", + "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as \"all topologies\" (\"all topologies\" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.", +} + +func (PodAffinityTerm) SwaggerDoc() map[string]string { + return map_PodAffinityTerm +} + +var map_PodAntiAffinity = map[string]string{ + "": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.", + "requiredDuringSchedulingIgnoredDuringExecution": "NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:\"requiredDuringSchedulingRequiredDuringExecution,omitempty\"` If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", +} + +func (PodAntiAffinity) SwaggerDoc() map[string]string { + return map_PodAntiAffinity +} + +var map_PodAttachOptions = map[string]string{ + "": "PodAttachOptions is the query options to a Pod's remote attach call.", + "stdin": "Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.", + "stdout": "Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.", + "stderr": "Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.", + "tty": "TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.", + "container": "The container in which to execute the command. Defaults to only container if there is only one container in the pod.", +} + +func (PodAttachOptions) SwaggerDoc() map[string]string { + return map_PodAttachOptions +} + +var map_PodCondition = map[string]string{ + "": "PodCondition contains details for the current condition of this pod.", + "type": "Type is the type of the condition. Currently only Ready. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions", + "status": "Status is the status of the condition. Can be True, False, Unknown. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions", + "lastProbeTime": "Last time we probed the condition.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "Unique, one-word, CamelCase reason for the condition's last transition.", + "message": "Human-readable message indicating details about last transition.", +} + +func (PodCondition) SwaggerDoc() map[string]string { + return map_PodCondition +} + +var map_PodExecOptions = map[string]string{ + "": "PodExecOptions is the query options to a Pod's remote exec call.", + "stdin": "Redirect the standard input stream of the pod for this call. Defaults to false.", + "stdout": "Redirect the standard output stream of the pod for this call. Defaults to true.", + "stderr": "Redirect the standard error stream of the pod for this call. Defaults to true.", + "tty": "TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.", + "container": "Container in which to execute the command. Defaults to only container if there is only one container in the pod.", + "command": "Command is the remote command to execute. argv array. Not executed within a shell.", +} + +func (PodExecOptions) SwaggerDoc() map[string]string { + return map_PodExecOptions +} + +var map_PodList = map[string]string{ + "": "PodList is a list of Pods.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of pods. More info: http://kubernetes.io/docs/user-guide/pods", +} + +func (PodList) SwaggerDoc() map[string]string { + return map_PodList +} + +var map_PodLogOptions = map[string]string{ + "": "PodLogOptions is the query options for a Pod's logs REST call.", + "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.", + "follow": "Follow the log stream of the pod. Defaults to false.", + "previous": "Return previous terminated container logs. Defaults to false.", + "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", + "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", +} + +func (PodLogOptions) SwaggerDoc() map[string]string { + return map_PodLogOptions +} + +var map_PodPortForwardOptions = map[string]string{ + "": "PodPortForwardOptions is the query options to a Pod's port forward call when using WebSockets. The `port` query parameter must specify the port or ports (comma separated) to forward over. Port forwarding over SPDY does not use these options. It requires the port to be passed in the `port` header as part of request.", + "ports": "List of ports to forward Required when using WebSockets", +} + +func (PodPortForwardOptions) SwaggerDoc() map[string]string { + return map_PodPortForwardOptions +} + +var map_PodProxyOptions = map[string]string{ + "": "PodProxyOptions is the query options to a Pod's proxy call.", + "path": "Path is the URL path to use for the current proxy request to pod.", +} + +func (PodProxyOptions) SwaggerDoc() map[string]string { + return map_PodProxyOptions +} + +var map_PodSecurityContext = map[string]string{ + "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", + "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", + "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", +} + +func (PodSecurityContext) SwaggerDoc() map[string]string { + return map_PodSecurityContext +} + +var map_PodSignature = map[string]string{ + "": "Describes the class of pods that should avoid this node. Exactly one field should be set.", + "podController": "Reference to controller whose pods should avoid this node.", +} + +func (PodSignature) SwaggerDoc() map[string]string { + return map_PodSignature +} + +var map_PodSpec = map[string]string{ + "": "PodSpec is a description of a pod.", + "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: http://kubernetes.io/docs/user-guide/volumes", + "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers", + "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers", + "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: http://kubernetes.io/docs/user-guide/pod-states#restartpolicy", + "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", + "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", + "dnsPolicy": "Set DNS policy for containers within the pod. One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. Defaults to \"ClusterFirst\". To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", + "nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: http://kubernetes.io/docs/user-guide/node-selection/README", + "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md", + "serviceAccount": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", + "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", + "nodeName": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", + "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", + "hostPID": "Use the host's pid namespace. Optional: Default to false.", + "hostIPC": "Use the host's ipc namespace. Optional: Default to false.", + "securityContext": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", + "imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod", + "hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.", + "subdomain": "If specified, the fully qualified Pod hostname will be \"...svc.\". If not specified, the pod will not have a domainname at all.", + "affinity": "If specified, the pod's scheduling constraints", + "schedulerName": "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", + "tolerations": "If specified, the pod's tolerations.", +} + +func (PodSpec) SwaggerDoc() map[string]string { + return map_PodSpec +} + +var map_PodStatus = map[string]string{ + "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system.", + "phase": "Current condition of the pod. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-phase", + "conditions": "Current service state of pod. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions", + "message": "A human readable message indicating details about why the pod is in this condition.", + "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk'", + "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", + "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", + "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", + "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses", + "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses", + "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md", +} + +func (PodStatus) SwaggerDoc() map[string]string { + return map_PodStatus +} + +var map_PodStatusResult = map[string]string{ + "": "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (PodStatusResult) SwaggerDoc() map[string]string { + return map_PodStatusResult +} + +var map_PodTemplate = map[string]string{ + "": "PodTemplate describes a template for creating copies of a predefined pod.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "template": "Template defines the pods that will be created from this pod template. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (PodTemplate) SwaggerDoc() map[string]string { + return map_PodTemplate +} + +var map_PodTemplateList = map[string]string{ + "": "PodTemplateList is a list of PodTemplates.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of pod templates", +} + +func (PodTemplateList) SwaggerDoc() map[string]string { + return map_PodTemplateList +} + +var map_PodTemplateSpec = map[string]string{ + "": "PodTemplateSpec describes the data a pod should have when created from a template", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (PodTemplateSpec) SwaggerDoc() map[string]string { + return map_PodTemplateSpec +} + +var map_PortworxVolumeSource = map[string]string{ + "": "PortworxVolumeSource represents a Portworx volume resource.", + "volumeID": "VolumeID uniquely identifies a Portworx volume", + "fsType": "FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (PortworxVolumeSource) SwaggerDoc() map[string]string { + return map_PortworxVolumeSource +} + +var map_Preconditions = map[string]string{ + "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", + "uid": "Specifies the target UID.", +} + +func (Preconditions) SwaggerDoc() map[string]string { + return map_Preconditions +} + +var map_PreferAvoidPodsEntry = map[string]string{ + "": "Describes a class of pods that should avoid this node.", + "podSignature": "The class of pods.", + "evictionTime": "Time at which this entry was added to the list.", + "reason": "(brief) reason why this entry was added to the list.", + "message": "Human readable message indicating why this entry was added to the list.", +} + +func (PreferAvoidPodsEntry) SwaggerDoc() map[string]string { + return map_PreferAvoidPodsEntry +} + +var map_PreferredSchedulingTerm = map[string]string{ + "": "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).", + "weight": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.", + "preference": "A node selector term, associated with the corresponding weight.", +} + +func (PreferredSchedulingTerm) SwaggerDoc() map[string]string { + return map_PreferredSchedulingTerm +} + +var map_Probe = map[string]string{ + "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", + "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", + "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", + "periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", + "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.", + "failureThreshold": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", +} + +func (Probe) SwaggerDoc() map[string]string { + return map_Probe +} + +var map_ProjectedVolumeSource = map[string]string{ + "": "Represents a projected volume source", + "sources": "list of volume projections", + "defaultMode": "Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", +} + +func (ProjectedVolumeSource) SwaggerDoc() map[string]string { + return map_ProjectedVolumeSource +} + +var map_QuobyteVolumeSource = map[string]string{ + "": "Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.", + "registry": "Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", + "volume": "Volume is a string that references an already created Quobyte volume by name.", + "readOnly": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", + "user": "User to map volume access to Defaults to serivceaccount user", + "group": "Group to map volume access to Default is no group", +} + +func (QuobyteVolumeSource) SwaggerDoc() map[string]string { + return map_QuobyteVolumeSource +} + +var map_RBDVolumeSource = map[string]string{ + "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", + "monitors": "A collection of Ceph monitors. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#rbd", + "pool": "The rados pool name. Default is rbd. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it.", + "user": "The rados user name. Default is admin. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", +} + +func (RBDVolumeSource) SwaggerDoc() map[string]string { + return map_RBDVolumeSource +} + +var map_RangeAllocation = map[string]string{ + "": "RangeAllocation is not a public type.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "range": "Range is string that identifies the range represented by 'data'.", + "data": "Data is a bit array containing all allocated addresses in the previous segment.", +} + +func (RangeAllocation) SwaggerDoc() map[string]string { + return map_RangeAllocation +} + +var map_ReplicationController = map[string]string{ + "": "ReplicationController represents the configuration of a replication controller.", + "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (ReplicationController) SwaggerDoc() map[string]string { + return map_ReplicationController +} + +var map_ReplicationControllerCondition = map[string]string{ + "": "ReplicationControllerCondition describes the state of a replication controller at a certain point.", + "type": "Type of replication controller condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (ReplicationControllerCondition) SwaggerDoc() map[string]string { + return map_ReplicationControllerCondition +} + +var map_ReplicationControllerList = map[string]string{ + "": "ReplicationControllerList is a collection of replication controllers.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of replication controllers. More info: http://kubernetes.io/docs/user-guide/replication-controller", +} + +func (ReplicationControllerList) SwaggerDoc() map[string]string { + return map_ReplicationControllerList +} + +var map_ReplicationControllerSpec = map[string]string{ + "": "ReplicationControllerSpec is the specification of a replication controller.", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "selector": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template", +} + +func (ReplicationControllerSpec) SwaggerDoc() map[string]string { + return map_ReplicationControllerSpec +} + +var map_ReplicationControllerStatus = map[string]string{ + "": "ReplicationControllerStatus represents the current status of a replication controller.", + "replicas": "Replicas is the most recently oberved number of replicas. More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller", + "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replication controller.", + "readyReplicas": "The number of ready replicas for this replication controller.", + "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replication controller.", + "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed replication controller.", + "conditions": "Represents the latest available observations of a replication controller's current state.", +} + +func (ReplicationControllerStatus) SwaggerDoc() map[string]string { + return map_ReplicationControllerStatus +} + +var map_ResourceFieldSelector = map[string]string{ + "": "ResourceFieldSelector represents container resources (cpu, memory) and their output format", + "containerName": "Container name: required for volumes, optional for env vars", + "resource": "Required: resource to select", + "divisor": "Specifies the output format of the exposed resources, defaults to \"1\"", +} + +func (ResourceFieldSelector) SwaggerDoc() map[string]string { + return map_ResourceFieldSelector +} + +var map_ResourceQuota = map[string]string{ + "": "ResourceQuota sets aggregate quota restrictions enforced per namespace", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the desired quota. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status defines the actual enforced quota and its current usage. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (ResourceQuota) SwaggerDoc() map[string]string { + return map_ResourceQuota +} + +var map_ResourceQuotaList = map[string]string{ + "": "ResourceQuotaList is a list of ResourceQuota items.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "Items is a list of ResourceQuota objects. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", +} + +func (ResourceQuotaList) SwaggerDoc() map[string]string { + return map_ResourceQuotaList +} + +var map_ResourceQuotaSpec = map[string]string{ + "": "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.", + "hard": "Hard is the set of desired hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", + "scopes": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.", +} + +func (ResourceQuotaSpec) SwaggerDoc() map[string]string { + return map_ResourceQuotaSpec +} + +var map_ResourceQuotaStatus = map[string]string{ + "": "ResourceQuotaStatus defines the enforced hard limits and observed use.", + "hard": "Hard is the set of enforced hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", + "used": "Used is the current observed total usage of the resource in the namespace.", +} + +func (ResourceQuotaStatus) SwaggerDoc() map[string]string { + return map_ResourceQuotaStatus +} + +var map_ResourceRequirements = map[string]string{ + "": "ResourceRequirements describes the compute resource requirements.", + "limits": "Limits describes the maximum amount of compute resources allowed. More info: http://kubernetes.io/docs/user-guide/compute-resources/", + "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: http://kubernetes.io/docs/user-guide/compute-resources/", +} + +func (ResourceRequirements) SwaggerDoc() map[string]string { + return map_ResourceRequirements +} + +var map_SELinuxOptions = map[string]string{ + "": "SELinuxOptions are the labels to be applied to the container", + "user": "User is a SELinux user label that applies to the container.", + "role": "Role is a SELinux role label that applies to the container.", + "type": "Type is a SELinux type label that applies to the container.", + "level": "Level is SELinux level label that applies to the container.", +} + +func (SELinuxOptions) SwaggerDoc() map[string]string { + return map_SELinuxOptions +} + +var map_ScaleIOVolumeSource = map[string]string{ + "": "ScaleIOVolumeSource represents a persistent ScaleIO volume", + "gateway": "The host address of the ScaleIO API Gateway.", + "system": "The name of the storage system as configured in ScaleIO.", + "secretRef": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", + "sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false", + "protectionDomain": "The name of the Protection Domain for the configured storage (defaults to \"default\").", + "storagePool": "The Storage Pool associated with the protection domain (defaults to \"default\").", + "storageMode": "Indicates whether the storage for a volume should be thick or thin (defaults to \"thin\").", + "volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (ScaleIOVolumeSource) SwaggerDoc() map[string]string { + return map_ScaleIOVolumeSource +} + +var map_Secret = map[string]string{ + "": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "data": "Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN or leading dot followed by valid DNS_SUBDOMAIN. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", + "stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.", + "type": "Used to facilitate programmatic handling of secret data.", +} + +func (Secret) SwaggerDoc() map[string]string { + return map_Secret +} + +var map_SecretEnvSource = map[string]string{ + "": "SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.", + "optional": "Specify whether the Secret must be defined", +} + +func (SecretEnvSource) SwaggerDoc() map[string]string { + return map_SecretEnvSource +} + +var map_SecretKeySelector = map[string]string{ + "": "SecretKeySelector selects a key of a Secret.", + "key": "The key of the secret to select from. Must be a valid secret key.", + "optional": "Specify whether the Secret or it's key must be defined", +} + +func (SecretKeySelector) SwaggerDoc() map[string]string { + return map_SecretKeySelector +} + +var map_SecretList = map[string]string{ + "": "SecretList is a list of Secret.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "Items is a list of secret objects. More info: http://kubernetes.io/docs/user-guide/secrets", +} + +func (SecretList) SwaggerDoc() map[string]string { + return map_SecretList +} + +var map_SecretProjection = map[string]string{ + "": "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.", + "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "optional": "Specify whether the Secret or its key must be defined", +} + +func (SecretProjection) SwaggerDoc() map[string]string { + return map_SecretProjection +} + +var map_SecretVolumeSource = map[string]string{ + "": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.", + "secretName": "Name of the secret in the pod's namespace to use. More info: http://kubernetes.io/docs/user-guide/volumes#secrets", + "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "optional": "Specify whether the Secret or it's keys must be defined", +} + +func (SecretVolumeSource) SwaggerDoc() map[string]string { + return map_SecretVolumeSource +} + +var map_SecurityContext = map[string]string{ + "": "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.", + "capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.", + "privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.", + "seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false.", +} + +func (SecurityContext) SwaggerDoc() map[string]string { + return map_SecurityContext +} + +var map_SerializedReference = map[string]string{ + "": "SerializedReference is a reference to serialized object.", + "reference": "The reference to an object in the system.", +} + +func (SerializedReference) SwaggerDoc() map[string]string { + return map_SerializedReference +} + +var map_Service = map[string]string{ + "": "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a service. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (Service) SwaggerDoc() map[string]string { + return map_Service +} + +var map_ServiceAccount = map[string]string{ + "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: http://kubernetes.io/docs/user-guide/secrets", + "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: http://kubernetes.io/docs/user-guide/secrets#manually-specifying-an-imagepullsecret", + "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.", +} + +func (ServiceAccount) SwaggerDoc() map[string]string { + return map_ServiceAccount +} + +var map_ServiceAccountList = map[string]string{ + "": "ServiceAccountList is a list of ServiceAccount objects", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of ServiceAccounts. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts", +} + +func (ServiceAccountList) SwaggerDoc() map[string]string { + return map_ServiceAccountList +} + +var map_ServiceList = map[string]string{ + "": "ServiceList holds a list of services.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of services", +} + +func (ServiceList) SwaggerDoc() map[string]string { + return map_ServiceList +} + +var map_ServicePort = map[string]string{ + "": "ServicePort contains information on service's port.", + "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.", + "protocol": "The IP protocol for this port. Supports \"TCP\" and \"UDP\". Default is TCP.", + "port": "The port that will be exposed by this service.", + "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: http://kubernetes.io/docs/user-guide/services#defining-a-service", + "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: http://kubernetes.io/docs/user-guide/services#type--nodeport", +} + +func (ServicePort) SwaggerDoc() map[string]string { + return map_ServicePort +} + +var map_ServiceProxyOptions = map[string]string{ + "": "ServiceProxyOptions is the query options to a Service's proxy call.", + "path": "Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.", +} + +func (ServiceProxyOptions) SwaggerDoc() map[string]string { + return map_ServiceProxyOptions +} + +var map_ServiceSpec = map[string]string{ + "": "ServiceSpec describes the attributes that a user creates on a service.", + "ports": "The list of ports that are exposed by this service. More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies", + "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: http://kubernetes.io/docs/user-guide/services#overview", + "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies", + "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: http://kubernetes.io/docs/user-guide/services#overview", + "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system. A previous form of this functionality exists as the deprecatedPublicIPs field. When using this field, callers should also clear the deprecatedPublicIPs field.", + "deprecatedPublicIPs": "deprecatedPublicIPs is deprecated and replaced by the externalIPs field with almost the exact same semantics. This field is retained in the v1 API for compatibility until at least 8/20/2016. It will be removed from any new API revisions. If both deprecatedPublicIPs *and* externalIPs are set, deprecatedPublicIPs is used.", + "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies", + "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", + "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: http://kubernetes.io/docs/user-guide/services-firewalls", + "externalName": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid DNS name and requires Type to be ExternalName.", +} + +func (ServiceSpec) SwaggerDoc() map[string]string { + return map_ServiceSpec +} + +var map_ServiceStatus = map[string]string{ + "": "ServiceStatus represents the current status of a service.", + "loadBalancer": "LoadBalancer contains the current status of the load-balancer, if one is present.", +} + +func (ServiceStatus) SwaggerDoc() map[string]string { + return map_ServiceStatus +} + +var map_TCPSocketAction = map[string]string{ + "": "TCPSocketAction describes an action based on opening a socket", + "port": "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", +} + +func (TCPSocketAction) SwaggerDoc() map[string]string { + return map_TCPSocketAction +} + +var map_Taint = map[string]string{ + "": "The node this Taint is attached to has the effect \"effect\" on any pod that that does not tolerate the Taint.", + "key": "Required. The taint key to be applied to a node.", + "value": "Required. The taint value corresponding to the taint key.", + "effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.", + "timeAdded": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.", +} + +func (Taint) SwaggerDoc() map[string]string { + return map_Taint +} + +var map_Toleration = map[string]string{ + "": "The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .", + "key": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.", + "operator": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", + "value": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", + "effect": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", + "tolerationSeconds": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.", +} + +func (Toleration) SwaggerDoc() map[string]string { + return map_Toleration +} + +var map_Volume = map[string]string{ + "": "Volume represents a named volume in a pod that may be accessed by any container in the pod.", + "name": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: http://kubernetes.io/docs/user-guide/identifiers#names", +} + +func (Volume) SwaggerDoc() map[string]string { + return map_Volume +} + +var map_VolumeMount = map[string]string{ + "": "VolumeMount describes a mounting of a Volume within a container.", + "name": "This must match the Name of a Volume.", + "readOnly": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", + "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", + "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", +} + +func (VolumeMount) SwaggerDoc() map[string]string { + return map_VolumeMount +} + +var map_VolumeProjection = map[string]string{ + "": "Projection that may be projected along with other supported volume types", + "secret": "information about the secret data to project", + "downwardAPI": "information about the downwardAPI data to project", + "configMap": "information about the configMap data to project", +} + +func (VolumeProjection) SwaggerDoc() map[string]string { + return map_VolumeProjection +} + +var map_VolumeSource = map[string]string{ + "": "Represents the source of a volume to mount. Only one of its members may be specified.", + "hostPath": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath", + "emptyDir": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir", + "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", + "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", + "gitRepo": "GitRepo represents a git repository at a particular revision.", + "secret": "Secret represents a secret that should populate this volume. More info: http://kubernetes.io/docs/user-guide/volumes#secrets", + "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: http://kubernetes.io/docs/user-guide/volumes#nfs", + "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md", + "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", + "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", + "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "configMap": "ConfigMap represents a configMap that should populate this volume", + "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "projected": "Items for all in one resources secrets, configmaps, and downward API", + "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", +} + +func (VolumeSource) SwaggerDoc() map[string]string { + return map_VolumeSource +} + +var map_VsphereVirtualDiskVolumeSource = map[string]string{ + "": "Represents a vSphere volume resource.", + "volumePath": "Path that identifies vSphere volume vmdk", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", +} + +func (VsphereVirtualDiskVolumeSource) SwaggerDoc() map[string]string { + return map_VsphereVirtualDiskVolumeSource +} + +var map_WeightedPodAffinityTerm = map[string]string{ + "": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", + "weight": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", + "podAffinityTerm": "Required. A pod affinity term, associated with the corresponding weight.", +} + +func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string { + return map_WeightedPodAffinityTerm +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.conversion.go new file mode 100644 index 0000000000..c57c8cc252 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.conversion.go @@ -0,0 +1,4614 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + api "k8s.io/client-go/pkg/api" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource, + Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource, + Convert_v1_Affinity_To_api_Affinity, + Convert_api_Affinity_To_v1_Affinity, + Convert_v1_AttachedVolume_To_api_AttachedVolume, + Convert_api_AttachedVolume_To_v1_AttachedVolume, + Convert_v1_AvoidPods_To_api_AvoidPods, + Convert_api_AvoidPods_To_v1_AvoidPods, + Convert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource, + Convert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource, + Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource, + Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource, + Convert_v1_Binding_To_api_Binding, + Convert_api_Binding_To_v1_Binding, + Convert_v1_Capabilities_To_api_Capabilities, + Convert_api_Capabilities_To_v1_Capabilities, + Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource, + Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource, + Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource, + Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource, + Convert_v1_ComponentCondition_To_api_ComponentCondition, + Convert_api_ComponentCondition_To_v1_ComponentCondition, + Convert_v1_ComponentStatus_To_api_ComponentStatus, + Convert_api_ComponentStatus_To_v1_ComponentStatus, + Convert_v1_ComponentStatusList_To_api_ComponentStatusList, + Convert_api_ComponentStatusList_To_v1_ComponentStatusList, + Convert_v1_ConfigMap_To_api_ConfigMap, + Convert_api_ConfigMap_To_v1_ConfigMap, + Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource, + Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource, + Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector, + Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector, + Convert_v1_ConfigMapList_To_api_ConfigMapList, + Convert_api_ConfigMapList_To_v1_ConfigMapList, + Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection, + Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection, + Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource, + Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource, + Convert_v1_Container_To_api_Container, + Convert_api_Container_To_v1_Container, + Convert_v1_ContainerImage_To_api_ContainerImage, + Convert_api_ContainerImage_To_v1_ContainerImage, + Convert_v1_ContainerPort_To_api_ContainerPort, + Convert_api_ContainerPort_To_v1_ContainerPort, + Convert_v1_ContainerState_To_api_ContainerState, + Convert_api_ContainerState_To_v1_ContainerState, + Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning, + Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning, + Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated, + Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated, + Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting, + Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting, + Convert_v1_ContainerStatus_To_api_ContainerStatus, + Convert_api_ContainerStatus_To_v1_ContainerStatus, + Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint, + Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint, + Convert_v1_DeleteOptions_To_api_DeleteOptions, + Convert_api_DeleteOptions_To_v1_DeleteOptions, + Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection, + Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection, + Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile, + Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile, + Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource, + Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource, + Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource, + Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource, + Convert_v1_EndpointAddress_To_api_EndpointAddress, + Convert_api_EndpointAddress_To_v1_EndpointAddress, + Convert_v1_EndpointPort_To_api_EndpointPort, + Convert_api_EndpointPort_To_v1_EndpointPort, + Convert_v1_EndpointSubset_To_api_EndpointSubset, + Convert_api_EndpointSubset_To_v1_EndpointSubset, + Convert_v1_Endpoints_To_api_Endpoints, + Convert_api_Endpoints_To_v1_Endpoints, + Convert_v1_EndpointsList_To_api_EndpointsList, + Convert_api_EndpointsList_To_v1_EndpointsList, + Convert_v1_EnvFromSource_To_api_EnvFromSource, + Convert_api_EnvFromSource_To_v1_EnvFromSource, + Convert_v1_EnvVar_To_api_EnvVar, + Convert_api_EnvVar_To_v1_EnvVar, + Convert_v1_EnvVarSource_To_api_EnvVarSource, + Convert_api_EnvVarSource_To_v1_EnvVarSource, + Convert_v1_Event_To_api_Event, + Convert_api_Event_To_v1_Event, + Convert_v1_EventList_To_api_EventList, + Convert_api_EventList_To_v1_EventList, + Convert_v1_EventSource_To_api_EventSource, + Convert_api_EventSource_To_v1_EventSource, + Convert_v1_ExecAction_To_api_ExecAction, + Convert_api_ExecAction_To_v1_ExecAction, + Convert_v1_FCVolumeSource_To_api_FCVolumeSource, + Convert_api_FCVolumeSource_To_v1_FCVolumeSource, + Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource, + Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource, + Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource, + Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource, + Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource, + Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource, + Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource, + Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource, + Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource, + Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource, + Convert_v1_HTTPGetAction_To_api_HTTPGetAction, + Convert_api_HTTPGetAction_To_v1_HTTPGetAction, + Convert_v1_HTTPHeader_To_api_HTTPHeader, + Convert_api_HTTPHeader_To_v1_HTTPHeader, + Convert_v1_Handler_To_api_Handler, + Convert_api_Handler_To_v1_Handler, + Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource, + Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource, + Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource, + Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource, + Convert_v1_KeyToPath_To_api_KeyToPath, + Convert_api_KeyToPath_To_v1_KeyToPath, + Convert_v1_Lifecycle_To_api_Lifecycle, + Convert_api_Lifecycle_To_v1_Lifecycle, + Convert_v1_LimitRange_To_api_LimitRange, + Convert_api_LimitRange_To_v1_LimitRange, + Convert_v1_LimitRangeItem_To_api_LimitRangeItem, + Convert_api_LimitRangeItem_To_v1_LimitRangeItem, + Convert_v1_LimitRangeList_To_api_LimitRangeList, + Convert_api_LimitRangeList_To_v1_LimitRangeList, + Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec, + Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec, + Convert_v1_List_To_api_List, + Convert_api_List_To_v1_List, + Convert_v1_ListOptions_To_api_ListOptions, + Convert_api_ListOptions_To_v1_ListOptions, + Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress, + Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress, + Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus, + Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus, + Convert_v1_LocalObjectReference_To_api_LocalObjectReference, + Convert_api_LocalObjectReference_To_v1_LocalObjectReference, + Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource, + Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource, + Convert_v1_Namespace_To_api_Namespace, + Convert_api_Namespace_To_v1_Namespace, + Convert_v1_NamespaceList_To_api_NamespaceList, + Convert_api_NamespaceList_To_v1_NamespaceList, + Convert_v1_NamespaceSpec_To_api_NamespaceSpec, + Convert_api_NamespaceSpec_To_v1_NamespaceSpec, + Convert_v1_NamespaceStatus_To_api_NamespaceStatus, + Convert_api_NamespaceStatus_To_v1_NamespaceStatus, + Convert_v1_Node_To_api_Node, + Convert_api_Node_To_v1_Node, + Convert_v1_NodeAddress_To_api_NodeAddress, + Convert_api_NodeAddress_To_v1_NodeAddress, + Convert_v1_NodeAffinity_To_api_NodeAffinity, + Convert_api_NodeAffinity_To_v1_NodeAffinity, + Convert_v1_NodeCondition_To_api_NodeCondition, + Convert_api_NodeCondition_To_v1_NodeCondition, + Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints, + Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints, + Convert_v1_NodeList_To_api_NodeList, + Convert_api_NodeList_To_v1_NodeList, + Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions, + Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions, + Convert_v1_NodeResources_To_api_NodeResources, + Convert_api_NodeResources_To_v1_NodeResources, + Convert_v1_NodeSelector_To_api_NodeSelector, + Convert_api_NodeSelector_To_v1_NodeSelector, + Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement, + Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement, + Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm, + Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm, + Convert_v1_NodeSpec_To_api_NodeSpec, + Convert_api_NodeSpec_To_v1_NodeSpec, + Convert_v1_NodeStatus_To_api_NodeStatus, + Convert_api_NodeStatus_To_v1_NodeStatus, + Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo, + Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo, + Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector, + Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector, + Convert_v1_ObjectMeta_To_api_ObjectMeta, + Convert_api_ObjectMeta_To_v1_ObjectMeta, + Convert_v1_ObjectReference_To_api_ObjectReference, + Convert_api_ObjectReference_To_v1_ObjectReference, + Convert_v1_PersistentVolume_To_api_PersistentVolume, + Convert_api_PersistentVolume_To_v1_PersistentVolume, + Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim, + Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim, + Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList, + Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList, + Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec, + Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec, + Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus, + Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus, + Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource, + Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource, + Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList, + Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList, + Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource, + Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource, + Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec, + Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec, + Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus, + Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus, + Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource, + Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource, + Convert_v1_Pod_To_api_Pod, + Convert_api_Pod_To_v1_Pod, + Convert_v1_PodAffinity_To_api_PodAffinity, + Convert_api_PodAffinity_To_v1_PodAffinity, + Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm, + Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm, + Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity, + Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity, + Convert_v1_PodAttachOptions_To_api_PodAttachOptions, + Convert_api_PodAttachOptions_To_v1_PodAttachOptions, + Convert_v1_PodCondition_To_api_PodCondition, + Convert_api_PodCondition_To_v1_PodCondition, + Convert_v1_PodExecOptions_To_api_PodExecOptions, + Convert_api_PodExecOptions_To_v1_PodExecOptions, + Convert_v1_PodList_To_api_PodList, + Convert_api_PodList_To_v1_PodList, + Convert_v1_PodLogOptions_To_api_PodLogOptions, + Convert_api_PodLogOptions_To_v1_PodLogOptions, + Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions, + Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions, + Convert_v1_PodProxyOptions_To_api_PodProxyOptions, + Convert_api_PodProxyOptions_To_v1_PodProxyOptions, + Convert_v1_PodSecurityContext_To_api_PodSecurityContext, + Convert_api_PodSecurityContext_To_v1_PodSecurityContext, + Convert_v1_PodSignature_To_api_PodSignature, + Convert_api_PodSignature_To_v1_PodSignature, + Convert_v1_PodSpec_To_api_PodSpec, + Convert_api_PodSpec_To_v1_PodSpec, + Convert_v1_PodStatus_To_api_PodStatus, + Convert_api_PodStatus_To_v1_PodStatus, + Convert_v1_PodStatusResult_To_api_PodStatusResult, + Convert_api_PodStatusResult_To_v1_PodStatusResult, + Convert_v1_PodTemplate_To_api_PodTemplate, + Convert_api_PodTemplate_To_v1_PodTemplate, + Convert_v1_PodTemplateList_To_api_PodTemplateList, + Convert_api_PodTemplateList_To_v1_PodTemplateList, + Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec, + Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec, + Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource, + Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource, + Convert_v1_Preconditions_To_api_Preconditions, + Convert_api_Preconditions_To_v1_Preconditions, + Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry, + Convert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry, + Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm, + Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm, + Convert_v1_Probe_To_api_Probe, + Convert_api_Probe_To_v1_Probe, + Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource, + Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource, + Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource, + Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource, + Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource, + Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource, + Convert_v1_RangeAllocation_To_api_RangeAllocation, + Convert_api_RangeAllocation_To_v1_RangeAllocation, + Convert_v1_ReplicationController_To_api_ReplicationController, + Convert_api_ReplicationController_To_v1_ReplicationController, + Convert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition, + Convert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition, + Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList, + Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList, + Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec, + Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, + Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus, + Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus, + Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector, + Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector, + Convert_v1_ResourceQuota_To_api_ResourceQuota, + Convert_api_ResourceQuota_To_v1_ResourceQuota, + Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList, + Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList, + Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec, + Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec, + Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus, + Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus, + Convert_v1_ResourceRequirements_To_api_ResourceRequirements, + Convert_api_ResourceRequirements_To_v1_ResourceRequirements, + Convert_v1_SELinuxOptions_To_api_SELinuxOptions, + Convert_api_SELinuxOptions_To_v1_SELinuxOptions, + Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource, + Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource, + Convert_v1_Secret_To_api_Secret, + Convert_api_Secret_To_v1_Secret, + Convert_v1_SecretEnvSource_To_api_SecretEnvSource, + Convert_api_SecretEnvSource_To_v1_SecretEnvSource, + Convert_v1_SecretKeySelector_To_api_SecretKeySelector, + Convert_api_SecretKeySelector_To_v1_SecretKeySelector, + Convert_v1_SecretList_To_api_SecretList, + Convert_api_SecretList_To_v1_SecretList, + Convert_v1_SecretProjection_To_api_SecretProjection, + Convert_api_SecretProjection_To_v1_SecretProjection, + Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource, + Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource, + Convert_v1_SecurityContext_To_api_SecurityContext, + Convert_api_SecurityContext_To_v1_SecurityContext, + Convert_v1_SerializedReference_To_api_SerializedReference, + Convert_api_SerializedReference_To_v1_SerializedReference, + Convert_v1_Service_To_api_Service, + Convert_api_Service_To_v1_Service, + Convert_v1_ServiceAccount_To_api_ServiceAccount, + Convert_api_ServiceAccount_To_v1_ServiceAccount, + Convert_v1_ServiceAccountList_To_api_ServiceAccountList, + Convert_api_ServiceAccountList_To_v1_ServiceAccountList, + Convert_v1_ServiceList_To_api_ServiceList, + Convert_api_ServiceList_To_v1_ServiceList, + Convert_v1_ServicePort_To_api_ServicePort, + Convert_api_ServicePort_To_v1_ServicePort, + Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions, + Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions, + Convert_v1_ServiceSpec_To_api_ServiceSpec, + Convert_api_ServiceSpec_To_v1_ServiceSpec, + Convert_v1_ServiceStatus_To_api_ServiceStatus, + Convert_api_ServiceStatus_To_v1_ServiceStatus, + Convert_v1_Sysctl_To_api_Sysctl, + Convert_api_Sysctl_To_v1_Sysctl, + Convert_v1_TCPSocketAction_To_api_TCPSocketAction, + Convert_api_TCPSocketAction_To_v1_TCPSocketAction, + Convert_v1_Taint_To_api_Taint, + Convert_api_Taint_To_v1_Taint, + Convert_v1_Toleration_To_api_Toleration, + Convert_api_Toleration_To_v1_Toleration, + Convert_v1_Volume_To_api_Volume, + Convert_api_Volume_To_v1_Volume, + Convert_v1_VolumeMount_To_api_VolumeMount, + Convert_api_VolumeMount_To_v1_VolumeMount, + Convert_v1_VolumeProjection_To_api_VolumeProjection, + Convert_api_VolumeProjection_To_v1_VolumeProjection, + Convert_v1_VolumeSource_To_api_VolumeSource, + Convert_api_VolumeSource_To_v1_VolumeSource, + Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource, + Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource, + Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm, + Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm, + ) +} + +func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in, out, s) +} + +func autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + return autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in, out, s) +} + +func autoConvert_v1_Affinity_To_api_Affinity(in *Affinity, out *api.Affinity, s conversion.Scope) error { + out.NodeAffinity = (*api.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + out.PodAffinity = (*api.PodAffinity)(unsafe.Pointer(in.PodAffinity)) + out.PodAntiAffinity = (*api.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) + return nil +} + +func Convert_v1_Affinity_To_api_Affinity(in *Affinity, out *api.Affinity, s conversion.Scope) error { + return autoConvert_v1_Affinity_To_api_Affinity(in, out, s) +} + +func autoConvert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *Affinity, s conversion.Scope) error { + out.NodeAffinity = (*NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + out.PodAffinity = (*PodAffinity)(unsafe.Pointer(in.PodAffinity)) + out.PodAntiAffinity = (*PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) + return nil +} + +func Convert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *Affinity, s conversion.Scope) error { + return autoConvert_api_Affinity_To_v1_Affinity(in, out, s) +} + +func autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in *AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error { + out.Name = api.UniqueVolumeName(in.Name) + out.DevicePath = in.DevicePath + return nil +} + +func Convert_v1_AttachedVolume_To_api_AttachedVolume(in *AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error { + return autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in, out, s) +} + +func autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *AttachedVolume, s conversion.Scope) error { + out.Name = UniqueVolumeName(in.Name) + out.DevicePath = in.DevicePath + return nil +} + +func Convert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *AttachedVolume, s conversion.Scope) error { + return autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in, out, s) +} + +func autoConvert_v1_AvoidPods_To_api_AvoidPods(in *AvoidPods, out *api.AvoidPods, s conversion.Scope) error { + out.PreferAvoidPods = *(*[]api.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) + return nil +} + +func Convert_v1_AvoidPods_To_api_AvoidPods(in *AvoidPods, out *api.AvoidPods, s conversion.Scope) error { + return autoConvert_v1_AvoidPods_To_api_AvoidPods(in, out, s) +} + +func autoConvert_api_AvoidPods_To_v1_AvoidPods(in *api.AvoidPods, out *AvoidPods, s conversion.Scope) error { + out.PreferAvoidPods = *(*[]PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) + return nil +} + +func Convert_api_AvoidPods_To_v1_AvoidPods(in *api.AvoidPods, out *AvoidPods, s conversion.Scope) error { + return autoConvert_api_AvoidPods_To_v1_AvoidPods(in, out, s) +} + +func autoConvert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in *AzureDiskVolumeSource, out *api.AzureDiskVolumeSource, s conversion.Scope) error { + out.DiskName = in.DiskName + out.DataDiskURI = in.DataDiskURI + out.CachingMode = (*api.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) + out.FSType = (*string)(unsafe.Pointer(in.FSType)) + out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) + return nil +} + +func Convert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in *AzureDiskVolumeSource, out *api.AzureDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in, out, s) +} + +func autoConvert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *api.AzureDiskVolumeSource, out *AzureDiskVolumeSource, s conversion.Scope) error { + out.DiskName = in.DiskName + out.DataDiskURI = in.DataDiskURI + out.CachingMode = (*AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) + out.FSType = (*string)(unsafe.Pointer(in.FSType)) + out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) + return nil +} + +func Convert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *api.AzureDiskVolumeSource, out *AzureDiskVolumeSource, s conversion.Scope) error { + return autoConvert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in, out, s) +} + +func autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *AzureFileVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *AzureFileVolumeSource, s conversion.Scope) error { + return autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in, out, s) +} + +func autoConvert_v1_Binding_To_api_Binding(in *Binding, out *api.Binding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Binding_To_api_Binding(in *Binding, out *api.Binding, s conversion.Scope) error { + return autoConvert_v1_Binding_To_api_Binding(in, out, s) +} + +func autoConvert_api_Binding_To_v1_Binding(in *api.Binding, out *Binding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +func Convert_api_Binding_To_v1_Binding(in *api.Binding, out *Binding, s conversion.Scope) error { + return autoConvert_api_Binding_To_v1_Binding(in, out, s) +} + +func autoConvert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capabilities, s conversion.Scope) error { + out.Add = *(*[]api.Capability)(unsafe.Pointer(&in.Add)) + out.Drop = *(*[]api.Capability)(unsafe.Pointer(&in.Drop)) + return nil +} + +func Convert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capabilities, s conversion.Scope) error { + return autoConvert_v1_Capabilities_To_api_Capabilities(in, out, s) +} + +func autoConvert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capabilities, s conversion.Scope) error { + out.Add = *(*[]Capability)(unsafe.Pointer(&in.Add)) + out.Drop = *(*[]Capability)(unsafe.Pointer(&in.Drop)) + return nil +} + +func Convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capabilities, s conversion.Scope) error { + return autoConvert_api_Capabilities_To_v1_Capabilities(in, out, s) +} + +func autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in, out, s) +} + +func autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *CephFSVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *CephFSVolumeSource, s conversion.Scope) error { + return autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in, out, s) +} + +func autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in, out, s) +} + +func autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *CinderVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *CinderVolumeSource, s conversion.Scope) error { + return autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s) +} + +func autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in *ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { + out.Type = api.ComponentConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + out.Message = in.Message + out.Error = in.Error + return nil +} + +func Convert_v1_ComponentCondition_To_api_ComponentCondition(in *ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { + return autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in, out, s) +} + +func autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *ComponentCondition, s conversion.Scope) error { + out.Type = ComponentConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.Message = in.Message + out.Error = in.Error + return nil +} + +func Convert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *ComponentCondition, s conversion.Scope) error { + return autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in, out, s) +} + +func autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in *ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Conditions = *(*[]api.ComponentCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +func Convert_v1_ComponentStatus_To_api_ComponentStatus(in *ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { + return autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in, out, s) +} + +func autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *ComponentStatus, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Conditions = *(*[]ComponentCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +func Convert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *ComponentStatus, s conversion.Scope) error { + return autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in, out, s) +} + +func autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in *ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.ComponentStatus)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_ComponentStatusList_To_api_ComponentStatusList(in *ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { + return autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in, out, s) +} + +func autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *ComponentStatusList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]ComponentStatus)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *ComponentStatusList, s conversion.Scope) error { + return autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in, out, s) +} + +func autoConvert_v1_ConfigMap_To_api_ConfigMap(in *ConfigMap, out *api.ConfigMap, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) + return nil +} + +func Convert_v1_ConfigMap_To_api_ConfigMap(in *ConfigMap, out *api.ConfigMap, s conversion.Scope) error { + return autoConvert_v1_ConfigMap_To_api_ConfigMap(in, out, s) +} + +func autoConvert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *ConfigMap, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) + return nil +} + +func Convert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *ConfigMap, s conversion.Scope) error { + return autoConvert_api_ConfigMap_To_v1_ConfigMap(in, out, s) +} + +func autoConvert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in *ConfigMapEnvSource, out *api.ConfigMapEnvSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in *ConfigMapEnvSource, out *api.ConfigMapEnvSource, s conversion.Scope) error { + return autoConvert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in, out, s) +} + +func autoConvert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *api.ConfigMapEnvSource, out *ConfigMapEnvSource, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *api.ConfigMapEnvSource, out *ConfigMapEnvSource, s conversion.Scope) error { + return autoConvert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in, out, s) +} + +func autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { + return autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in, out, s) +} + +func autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *ConfigMapKeySelector, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *ConfigMapKeySelector, s conversion.Scope) error { + return autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in, out, s) +} + +func autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in *ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.ConfigMap)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_ConfigMapList_To_api_ConfigMapList(in *ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { + return autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in, out, s) +} + +func autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *ConfigMapList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]ConfigMap)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *ConfigMapList, s conversion.Scope) error { + return autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in, out, s) +} + +func autoConvert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in *ConfigMapProjection, out *api.ConfigMapProjection, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in *ConfigMapProjection, out *api.ConfigMapProjection, s conversion.Scope) error { + return autoConvert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in, out, s) +} + +func autoConvert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in *api.ConfigMapProjection, out *ConfigMapProjection, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in *api.ConfigMapProjection, out *ConfigMapProjection, s conversion.Scope) error { + return autoConvert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in, out, s) +} + +func autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in, out, s) +} + +func autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *ConfigMapVolumeSource, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *ConfigMapVolumeSource, s conversion.Scope) error { + return autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in, out, s) +} + +func autoConvert_v1_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error { + out.Name = in.Name + out.Image = in.Image + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) + out.WorkingDir = in.WorkingDir + out.Ports = *(*[]api.ContainerPort)(unsafe.Pointer(&in.Ports)) + out.EnvFrom = *(*[]api.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + out.Env = *(*[]api.EnvVar)(unsafe.Pointer(&in.Env)) + if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeMounts = *(*[]api.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + out.LivenessProbe = (*api.Probe)(unsafe.Pointer(in.LivenessProbe)) + out.ReadinessProbe = (*api.Probe)(unsafe.Pointer(in.ReadinessProbe)) + out.Lifecycle = (*api.Lifecycle)(unsafe.Pointer(in.Lifecycle)) + out.TerminationMessagePath = in.TerminationMessagePath + out.TerminationMessagePolicy = api.TerminationMessagePolicy(in.TerminationMessagePolicy) + out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy) + out.SecurityContext = (*api.SecurityContext)(unsafe.Pointer(in.SecurityContext)) + out.Stdin = in.Stdin + out.StdinOnce = in.StdinOnce + out.TTY = in.TTY + return nil +} + +func Convert_v1_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error { + return autoConvert_v1_Container_To_api_Container(in, out, s) +} + +func autoConvert_api_Container_To_v1_Container(in *api.Container, out *Container, s conversion.Scope) error { + out.Name = in.Name + out.Image = in.Image + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) + out.WorkingDir = in.WorkingDir + out.Ports = *(*[]ContainerPort)(unsafe.Pointer(&in.Ports)) + out.EnvFrom = *(*[]EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + out.Env = *(*[]EnvVar)(unsafe.Pointer(&in.Env)) + if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeMounts = *(*[]VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + out.LivenessProbe = (*Probe)(unsafe.Pointer(in.LivenessProbe)) + out.ReadinessProbe = (*Probe)(unsafe.Pointer(in.ReadinessProbe)) + out.Lifecycle = (*Lifecycle)(unsafe.Pointer(in.Lifecycle)) + out.TerminationMessagePath = in.TerminationMessagePath + out.TerminationMessagePolicy = TerminationMessagePolicy(in.TerminationMessagePolicy) + out.ImagePullPolicy = PullPolicy(in.ImagePullPolicy) + out.SecurityContext = (*SecurityContext)(unsafe.Pointer(in.SecurityContext)) + out.Stdin = in.Stdin + out.StdinOnce = in.StdinOnce + out.TTY = in.TTY + return nil +} + +func Convert_api_Container_To_v1_Container(in *api.Container, out *Container, s conversion.Scope) error { + return autoConvert_api_Container_To_v1_Container(in, out, s) +} + +func autoConvert_v1_ContainerImage_To_api_ContainerImage(in *ContainerImage, out *api.ContainerImage, s conversion.Scope) error { + out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + out.SizeBytes = in.SizeBytes + return nil +} + +func Convert_v1_ContainerImage_To_api_ContainerImage(in *ContainerImage, out *api.ContainerImage, s conversion.Scope) error { + return autoConvert_v1_ContainerImage_To_api_ContainerImage(in, out, s) +} + +func autoConvert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *ContainerImage, s conversion.Scope) error { + out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + out.SizeBytes = in.SizeBytes + return nil +} + +func Convert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *ContainerImage, s conversion.Scope) error { + return autoConvert_api_ContainerImage_To_v1_ContainerImage(in, out, s) +} + +func autoConvert_v1_ContainerPort_To_api_ContainerPort(in *ContainerPort, out *api.ContainerPort, s conversion.Scope) error { + out.Name = in.Name + out.HostPort = in.HostPort + out.ContainerPort = in.ContainerPort + out.Protocol = api.Protocol(in.Protocol) + out.HostIP = in.HostIP + return nil +} + +func Convert_v1_ContainerPort_To_api_ContainerPort(in *ContainerPort, out *api.ContainerPort, s conversion.Scope) error { + return autoConvert_v1_ContainerPort_To_api_ContainerPort(in, out, s) +} + +func autoConvert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *ContainerPort, s conversion.Scope) error { + out.Name = in.Name + out.HostPort = in.HostPort + out.ContainerPort = in.ContainerPort + out.Protocol = Protocol(in.Protocol) + out.HostIP = in.HostIP + return nil +} + +func Convert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *ContainerPort, s conversion.Scope) error { + return autoConvert_api_ContainerPort_To_v1_ContainerPort(in, out, s) +} + +func autoConvert_v1_ContainerState_To_api_ContainerState(in *ContainerState, out *api.ContainerState, s conversion.Scope) error { + out.Waiting = (*api.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) + out.Running = (*api.ContainerStateRunning)(unsafe.Pointer(in.Running)) + out.Terminated = (*api.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) + return nil +} + +func Convert_v1_ContainerState_To_api_ContainerState(in *ContainerState, out *api.ContainerState, s conversion.Scope) error { + return autoConvert_v1_ContainerState_To_api_ContainerState(in, out, s) +} + +func autoConvert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *ContainerState, s conversion.Scope) error { + out.Waiting = (*ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) + out.Running = (*ContainerStateRunning)(unsafe.Pointer(in.Running)) + out.Terminated = (*ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) + return nil +} + +func Convert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *ContainerState, s conversion.Scope) error { + return autoConvert_api_ContainerState_To_v1_ContainerState(in, out, s) +} + +func autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { + out.StartedAt = in.StartedAt + return nil +} + +func Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { + return autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in, out, s) +} + +func autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *ContainerStateRunning, s conversion.Scope) error { + out.StartedAt = in.StartedAt + return nil +} + +func Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *ContainerStateRunning, s conversion.Scope) error { + return autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in, out, s) +} + +func autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { + out.ExitCode = in.ExitCode + out.Signal = in.Signal + out.Reason = in.Reason + out.Message = in.Message + out.StartedAt = in.StartedAt + out.FinishedAt = in.FinishedAt + out.ContainerID = in.ContainerID + return nil +} + +func Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { + return autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in, out, s) +} + +func autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *ContainerStateTerminated, s conversion.Scope) error { + out.ExitCode = in.ExitCode + out.Signal = in.Signal + out.Reason = in.Reason + out.Message = in.Message + out.StartedAt = in.StartedAt + out.FinishedAt = in.FinishedAt + out.ContainerID = in.ContainerID + return nil +} + +func Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *ContainerStateTerminated, s conversion.Scope) error { + return autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in, out, s) +} + +func autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { + return autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in, out, s) +} + +func autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *ContainerStateWaiting, s conversion.Scope) error { + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *ContainerStateWaiting, s conversion.Scope) error { + return autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in, out, s) +} + +func autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in *ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1_ContainerState_To_api_ContainerState(&in.State, &out.State, s); err != nil { + return err + } + if err := Convert_v1_ContainerState_To_api_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { + return err + } + out.Ready = in.Ready + out.RestartCount = in.RestartCount + out.Image = in.Image + out.ImageID = in.ImageID + out.ContainerID = in.ContainerID + return nil +} + +func Convert_v1_ContainerStatus_To_api_ContainerStatus(in *ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { + return autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in, out, s) +} + +func autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *ContainerStatus, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_api_ContainerState_To_v1_ContainerState(&in.State, &out.State, s); err != nil { + return err + } + if err := Convert_api_ContainerState_To_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { + return err + } + out.Ready = in.Ready + out.RestartCount = in.RestartCount + out.Image = in.Image + out.ImageID = in.ImageID + out.ContainerID = in.ContainerID + return nil +} + +func Convert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *ContainerStatus, s conversion.Scope) error { + return autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in, out, s) +} + +func autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { + out.Port = in.Port + return nil +} + +func Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { + return autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in, out, s) +} + +func autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *DaemonEndpoint, s conversion.Scope) error { + out.Port = in.Port + return nil +} + +func Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *DaemonEndpoint, s conversion.Scope) error { + return autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in, out, s) +} + +func autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in *DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { + out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) + out.Preconditions = (*api.Preconditions)(unsafe.Pointer(in.Preconditions)) + out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) + out.PropagationPolicy = (*api.DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) + return nil +} + +func Convert_v1_DeleteOptions_To_api_DeleteOptions(in *DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { + return autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in, out, s) +} + +func autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *DeleteOptions, s conversion.Scope) error { + out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) + out.Preconditions = (*Preconditions)(unsafe.Pointer(in.Preconditions)) + out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) + out.PropagationPolicy = (*DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) + return nil +} + +func Convert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *DeleteOptions, s conversion.Scope) error { + return autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in, out, s) +} + +func autoConvert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in *DownwardAPIProjection, out *api.DownwardAPIProjection, s conversion.Scope) error { + out.Items = *(*[]api.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in *DownwardAPIProjection, out *api.DownwardAPIProjection, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in, out, s) +} + +func autoConvert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *api.DownwardAPIProjection, out *DownwardAPIProjection, s conversion.Scope) error { + out.Items = *(*[]DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *api.DownwardAPIProjection, out *DownwardAPIProjection, s conversion.Scope) error { + return autoConvert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in, out, s) +} + +func autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { + out.Path = in.Path + out.FieldRef = (*api.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*api.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +func Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in, out, s) +} + +func autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, s conversion.Scope) error { + out.Path = in.Path + out.FieldRef = (*ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +func Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, s conversion.Scope) error { + return autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in, out, s) +} + +func autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { + out.Items = *(*[]api.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +func Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in, out, s) +} + +func autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, s conversion.Scope) error { + out.Items = *(*[]DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +func Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, s conversion.Scope) error { + return autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in, out, s) +} + +func autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { + out.Medium = api.StorageMedium(in.Medium) + return nil +} + +func Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { + return autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in, out, s) +} + +func autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *EmptyDirVolumeSource, s conversion.Scope) error { + out.Medium = StorageMedium(in.Medium) + return nil +} + +func Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *EmptyDirVolumeSource, s conversion.Scope) error { + return autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in, out, s) +} + +func autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in *EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) + out.TargetRef = (*api.ObjectReference)(unsafe.Pointer(in.TargetRef)) + return nil +} + +func Convert_v1_EndpointAddress_To_api_EndpointAddress(in *EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { + return autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in, out, s) +} + +func autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *EndpointAddress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) + out.TargetRef = (*ObjectReference)(unsafe.Pointer(in.TargetRef)) + return nil +} + +func Convert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *EndpointAddress, s conversion.Scope) error { + return autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in, out, s) +} + +func autoConvert_v1_EndpointPort_To_api_EndpointPort(in *EndpointPort, out *api.EndpointPort, s conversion.Scope) error { + out.Name = in.Name + out.Port = in.Port + out.Protocol = api.Protocol(in.Protocol) + return nil +} + +func Convert_v1_EndpointPort_To_api_EndpointPort(in *EndpointPort, out *api.EndpointPort, s conversion.Scope) error { + return autoConvert_v1_EndpointPort_To_api_EndpointPort(in, out, s) +} + +func autoConvert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *EndpointPort, s conversion.Scope) error { + out.Name = in.Name + out.Port = in.Port + out.Protocol = Protocol(in.Protocol) + return nil +} + +func Convert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *EndpointPort, s conversion.Scope) error { + return autoConvert_api_EndpointPort_To_v1_EndpointPort(in, out, s) +} + +func autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in *EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { + out.Addresses = *(*[]api.EndpointAddress)(unsafe.Pointer(&in.Addresses)) + out.NotReadyAddresses = *(*[]api.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) + out.Ports = *(*[]api.EndpointPort)(unsafe.Pointer(&in.Ports)) + return nil +} + +func Convert_v1_EndpointSubset_To_api_EndpointSubset(in *EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { + return autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in, out, s) +} + +func autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *EndpointSubset, s conversion.Scope) error { + out.Addresses = *(*[]EndpointAddress)(unsafe.Pointer(&in.Addresses)) + out.NotReadyAddresses = *(*[]EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) + out.Ports = *(*[]EndpointPort)(unsafe.Pointer(&in.Ports)) + return nil +} + +func Convert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *EndpointSubset, s conversion.Scope) error { + return autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in, out, s) +} + +func autoConvert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subsets = *(*[]api.EndpointSubset)(unsafe.Pointer(&in.Subsets)) + return nil +} + +func Convert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s conversion.Scope) error { + return autoConvert_v1_Endpoints_To_api_Endpoints(in, out, s) +} + +func autoConvert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *Endpoints, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subsets = *(*[]EndpointSubset)(unsafe.Pointer(&in.Subsets)) + return nil +} + +func Convert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *Endpoints, s conversion.Scope) error { + return autoConvert_api_Endpoints_To_v1_Endpoints(in, out, s) +} + +func autoConvert_v1_EndpointsList_To_api_EndpointsList(in *EndpointsList, out *api.EndpointsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.Endpoints)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_EndpointsList_To_api_EndpointsList(in *EndpointsList, out *api.EndpointsList, s conversion.Scope) error { + return autoConvert_v1_EndpointsList_To_api_EndpointsList(in, out, s) +} + +func autoConvert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *EndpointsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Endpoints)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *EndpointsList, s conversion.Scope) error { + return autoConvert_api_EndpointsList_To_v1_EndpointsList(in, out, s) +} + +func autoConvert_v1_EnvFromSource_To_api_EnvFromSource(in *EnvFromSource, out *api.EnvFromSource, s conversion.Scope) error { + out.Prefix = in.Prefix + out.ConfigMapRef = (*api.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) + out.SecretRef = (*api.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) + return nil +} + +func Convert_v1_EnvFromSource_To_api_EnvFromSource(in *EnvFromSource, out *api.EnvFromSource, s conversion.Scope) error { + return autoConvert_v1_EnvFromSource_To_api_EnvFromSource(in, out, s) +} + +func autoConvert_api_EnvFromSource_To_v1_EnvFromSource(in *api.EnvFromSource, out *EnvFromSource, s conversion.Scope) error { + out.Prefix = in.Prefix + out.ConfigMapRef = (*ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) + out.SecretRef = (*SecretEnvSource)(unsafe.Pointer(in.SecretRef)) + return nil +} + +func Convert_api_EnvFromSource_To_v1_EnvFromSource(in *api.EnvFromSource, out *EnvFromSource, s conversion.Scope) error { + return autoConvert_api_EnvFromSource_To_v1_EnvFromSource(in, out, s) +} + +func autoConvert_v1_EnvVar_To_api_EnvVar(in *EnvVar, out *api.EnvVar, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + out.ValueFrom = (*api.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) + return nil +} + +func Convert_v1_EnvVar_To_api_EnvVar(in *EnvVar, out *api.EnvVar, s conversion.Scope) error { + return autoConvert_v1_EnvVar_To_api_EnvVar(in, out, s) +} + +func autoConvert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *EnvVar, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + out.ValueFrom = (*EnvVarSource)(unsafe.Pointer(in.ValueFrom)) + return nil +} + +func Convert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *EnvVar, s conversion.Scope) error { + return autoConvert_api_EnvVar_To_v1_EnvVar(in, out, s) +} + +func autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in *EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { + out.FieldRef = (*api.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*api.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.ConfigMapKeyRef = (*api.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) + out.SecretKeyRef = (*api.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) + return nil +} + +func Convert_v1_EnvVarSource_To_api_EnvVarSource(in *EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { + return autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in, out, s) +} + +func autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *EnvVarSource, s conversion.Scope) error { + out.FieldRef = (*ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.ConfigMapKeyRef = (*ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) + out.SecretKeyRef = (*SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) + return nil +} + +func Convert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *EnvVarSource, s conversion.Scope) error { + return autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in, out, s) +} + +func autoConvert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + if err := Convert_v1_EventSource_To_api_EventSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.FirstTimestamp = in.FirstTimestamp + out.LastTimestamp = in.LastTimestamp + out.Count = in.Count + out.Type = in.Type + return nil +} + +func Convert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.Scope) error { + return autoConvert_v1_Event_To_api_Event(in, out, s) +} + +func autoConvert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + if err := Convert_api_EventSource_To_v1_EventSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.FirstTimestamp = in.FirstTimestamp + out.LastTimestamp = in.LastTimestamp + out.Count = in.Count + out.Type = in.Type + return nil +} + +func Convert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.Scope) error { + return autoConvert_api_Event_To_v1_Event(in, out, s) +} + +func autoConvert_v1_EventList_To_api_EventList(in *EventList, out *api.EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.Event)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_EventList_To_api_EventList(in *EventList, out *api.EventList, s conversion.Scope) error { + return autoConvert_v1_EventList_To_api_EventList(in, out, s) +} + +func autoConvert_api_EventList_To_v1_EventList(in *api.EventList, out *EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Event)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_api_EventList_To_v1_EventList(in *api.EventList, out *EventList, s conversion.Scope) error { + return autoConvert_api_EventList_To_v1_EventList(in, out, s) +} + +func autoConvert_v1_EventSource_To_api_EventSource(in *EventSource, out *api.EventSource, s conversion.Scope) error { + out.Component = in.Component + out.Host = in.Host + return nil +} + +func Convert_v1_EventSource_To_api_EventSource(in *EventSource, out *api.EventSource, s conversion.Scope) error { + return autoConvert_v1_EventSource_To_api_EventSource(in, out, s) +} + +func autoConvert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *EventSource, s conversion.Scope) error { + out.Component = in.Component + out.Host = in.Host + return nil +} + +func Convert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *EventSource, s conversion.Scope) error { + return autoConvert_api_EventSource_To_v1_EventSource(in, out, s) +} + +func autoConvert_v1_ExecAction_To_api_ExecAction(in *ExecAction, out *api.ExecAction, s conversion.Scope) error { + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +func Convert_v1_ExecAction_To_api_ExecAction(in *ExecAction, out *api.ExecAction, s conversion.Scope) error { + return autoConvert_v1_ExecAction_To_api_ExecAction(in, out, s) +} + +func autoConvert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *ExecAction, s conversion.Scope) error { + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +func Convert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *ExecAction, s conversion.Scope) error { + return autoConvert_api_ExecAction_To_v1_ExecAction(in, out, s) +} + +func autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { + out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) + out.Lun = (*int32)(unsafe.Pointer(in.Lun)) + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in, out, s) +} + +func autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *FCVolumeSource, s conversion.Scope) error { + out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) + out.Lun = (*int32)(unsafe.Pointer(in.Lun)) + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *FCVolumeSource, s conversion.Scope) error { + return autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in, out, s) +} + +func autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.FSType = in.FSType + out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) + return nil +} + +func Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in, out, s) +} + +func autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *FlexVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.FSType = in.FSType + out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) + return nil +} + +func Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *FlexVolumeSource, s conversion.Scope) error { + return autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in, out, s) +} + +func autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { + out.DatasetName = in.DatasetName + out.DatasetUUID = in.DatasetUUID + return nil +} + +func Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in, out, s) +} + +func autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *FlockerVolumeSource, s conversion.Scope) error { + out.DatasetName = in.DatasetName + out.DatasetUUID = in.DatasetUUID + return nil +} + +func Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *FlockerVolumeSource, s conversion.Scope) error { + return autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in, out, s) +} + +func autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + out.PDName = in.PDName + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, s conversion.Scope) error { + out.PDName = in.PDName + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { + out.Repository = in.Repository + out.Revision = in.Revision + out.Directory = in.Directory + return nil +} + +func Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in, out, s) +} + +func autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *GitRepoVolumeSource, s conversion.Scope) error { + out.Repository = in.Repository + out.Revision = in.Revision + out.Directory = in.Directory + return nil +} + +func Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *GitRepoVolumeSource, s conversion.Scope) error { + return autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s) +} + +func autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in, out, s) +} + +func autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *GlusterfsVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *GlusterfsVolumeSource, s conversion.Scope) error { + return autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in, out, s) +} + +func autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in *HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { + out.Path = in.Path + out.Port = in.Port + out.Host = in.Host + out.Scheme = api.URIScheme(in.Scheme) + out.HTTPHeaders = *(*[]api.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) + return nil +} + +func Convert_v1_HTTPGetAction_To_api_HTTPGetAction(in *HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { + return autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in, out, s) +} + +func autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *HTTPGetAction, s conversion.Scope) error { + out.Path = in.Path + out.Port = in.Port + out.Host = in.Host + out.Scheme = URIScheme(in.Scheme) + out.HTTPHeaders = *(*[]HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) + return nil +} + +func Convert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *HTTPGetAction, s conversion.Scope) error { + return autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in, out, s) +} + +func autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in *HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +func Convert_v1_HTTPHeader_To_api_HTTPHeader(in *HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { + return autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in, out, s) +} + +func autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *HTTPHeader, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +func Convert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *HTTPHeader, s conversion.Scope) error { + return autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in, out, s) +} + +func autoConvert_v1_Handler_To_api_Handler(in *Handler, out *api.Handler, s conversion.Scope) error { + out.Exec = (*api.ExecAction)(unsafe.Pointer(in.Exec)) + out.HTTPGet = (*api.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) + out.TCPSocket = (*api.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + return nil +} + +func Convert_v1_Handler_To_api_Handler(in *Handler, out *api.Handler, s conversion.Scope) error { + return autoConvert_v1_Handler_To_api_Handler(in, out, s) +} + +func autoConvert_api_Handler_To_v1_Handler(in *api.Handler, out *Handler, s conversion.Scope) error { + out.Exec = (*ExecAction)(unsafe.Pointer(in.Exec)) + out.HTTPGet = (*HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) + out.TCPSocket = (*TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + return nil +} + +func Convert_api_Handler_To_v1_Handler(in *api.Handler, out *Handler, s conversion.Scope) error { + return autoConvert_api_Handler_To_v1_Handler(in, out, s) +} + +func autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +func Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { + return autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in, out, s) +} + +func autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *HostPathVolumeSource, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +func Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *HostPathVolumeSource, s conversion.Scope) error { + return autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in, out, s) +} + +func autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + return nil +} + +func Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in, out, s) +} + +func autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *ISCSIVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + return nil +} + +func Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *ISCSIVolumeSource, s conversion.Scope) error { + return autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s) +} + +func autoConvert_v1_KeyToPath_To_api_KeyToPath(in *KeyToPath, out *api.KeyToPath, s conversion.Scope) error { + out.Key = in.Key + out.Path = in.Path + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +func Convert_v1_KeyToPath_To_api_KeyToPath(in *KeyToPath, out *api.KeyToPath, s conversion.Scope) error { + return autoConvert_v1_KeyToPath_To_api_KeyToPath(in, out, s) +} + +func autoConvert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *KeyToPath, s conversion.Scope) error { + out.Key = in.Key + out.Path = in.Path + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +func Convert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *KeyToPath, s conversion.Scope) error { + return autoConvert_api_KeyToPath_To_v1_KeyToPath(in, out, s) +} + +func autoConvert_v1_Lifecycle_To_api_Lifecycle(in *Lifecycle, out *api.Lifecycle, s conversion.Scope) error { + out.PostStart = (*api.Handler)(unsafe.Pointer(in.PostStart)) + out.PreStop = (*api.Handler)(unsafe.Pointer(in.PreStop)) + return nil +} + +func Convert_v1_Lifecycle_To_api_Lifecycle(in *Lifecycle, out *api.Lifecycle, s conversion.Scope) error { + return autoConvert_v1_Lifecycle_To_api_Lifecycle(in, out, s) +} + +func autoConvert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *Lifecycle, s conversion.Scope) error { + out.PostStart = (*Handler)(unsafe.Pointer(in.PostStart)) + out.PreStop = (*Handler)(unsafe.Pointer(in.PreStop)) + return nil +} + +func Convert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *Lifecycle, s conversion.Scope) error { + return autoConvert_api_Lifecycle_To_v1_Lifecycle(in, out, s) +} + +func autoConvert_v1_LimitRange_To_api_LimitRange(in *LimitRange, out *api.LimitRange, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_v1_LimitRange_To_api_LimitRange(in *LimitRange, out *api.LimitRange, s conversion.Scope) error { + return autoConvert_v1_LimitRange_To_api_LimitRange(in, out, s) +} + +func autoConvert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *LimitRange, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *LimitRange, s conversion.Scope) error { + return autoConvert_api_LimitRange_To_v1_LimitRange(in, out, s) +} + +func autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { + out.Type = api.LimitType(in.Type) + out.Max = *(*api.ResourceList)(unsafe.Pointer(&in.Max)) + out.Min = *(*api.ResourceList)(unsafe.Pointer(&in.Min)) + out.Default = *(*api.ResourceList)(unsafe.Pointer(&in.Default)) + out.DefaultRequest = *(*api.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) + out.MaxLimitRequestRatio = *(*api.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) + return nil +} + +func Convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { + return autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in, out, s) +} + +func autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *LimitRangeItem, s conversion.Scope) error { + out.Type = LimitType(in.Type) + out.Max = *(*ResourceList)(unsafe.Pointer(&in.Max)) + out.Min = *(*ResourceList)(unsafe.Pointer(&in.Min)) + out.Default = *(*ResourceList)(unsafe.Pointer(&in.Default)) + out.DefaultRequest = *(*ResourceList)(unsafe.Pointer(&in.DefaultRequest)) + out.MaxLimitRequestRatio = *(*ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) + return nil +} + +func Convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *LimitRangeItem, s conversion.Scope) error { + return autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in, out, s) +} + +func autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in *LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.LimitRange)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_LimitRangeList_To_api_LimitRangeList(in *LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { + return autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in, out, s) +} + +func autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *LimitRangeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]LimitRange)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *LimitRangeList, s conversion.Scope) error { + return autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in, out, s) +} + +func autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { + out.Limits = *(*[]api.LimitRangeItem)(unsafe.Pointer(&in.Limits)) + return nil +} + +func Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { + return autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in, out, s) +} + +func autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *LimitRangeSpec, s conversion.Scope) error { + out.Limits = *(*[]LimitRangeItem)(unsafe.Pointer(&in.Limits)) + return nil +} + +func Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *LimitRangeSpec, s conversion.Scope) error { + return autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in, out, s) +} + +func autoConvert_v1_List_To_api_List(in *List, out *api.List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_List_To_api_List(in *List, out *api.List, s conversion.Scope) error { + return autoConvert_v1_List_To_api_List(in, out, s) +} + +func autoConvert_api_List_To_v1_List(in *api.List, out *List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_api_List_To_v1_List(in *api.List, out *List, s conversion.Scope) error { + return autoConvert_api_List_To_v1_List(in, out, s) +} + +func autoConvert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error { + if err := meta_v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := meta_v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.Watch = in.Watch + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +func Convert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error { + return autoConvert_v1_ListOptions_To_api_ListOptions(in, out, s) +} + +func autoConvert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error { + if err := meta_v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := meta_v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.Watch = in.Watch + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +func Convert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error { + return autoConvert_api_ListOptions_To_v1_ListOptions(in, out, s) +} + +func autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + return nil +} + +func Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { + return autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in, out, s) +} + +func autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *LoadBalancerIngress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + return nil +} + +func Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *LoadBalancerIngress, s conversion.Scope) error { + return autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in, out, s) +} + +func autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { + out.Ingress = *(*[]api.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) + return nil +} + +func Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { + return autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in, out, s) +} + +func autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *LoadBalancerStatus, s conversion.Scope) error { + out.Ingress = *(*[]LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) + return nil +} + +func Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *LoadBalancerStatus, s conversion.Scope) error { + return autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s) +} + +func autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in *LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +func Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in *LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { + return autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in, out, s) +} + +func autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *LocalObjectReference, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +func Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *LocalObjectReference, s conversion.Scope) error { + return autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in, out, s) +} + +func autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { + out.Server = in.Server + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in, out, s) +} + +func autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *NFSVolumeSource, s conversion.Scope) error { + out.Server = in.Server + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *NFSVolumeSource, s conversion.Scope) error { + return autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in, out, s) +} + +func autoConvert_v1_Namespace_To_api_Namespace(in *Namespace, out *api.Namespace, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_NamespaceSpec_To_api_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_NamespaceStatus_To_api_NamespaceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Namespace_To_api_Namespace(in *Namespace, out *api.Namespace, s conversion.Scope) error { + return autoConvert_v1_Namespace_To_api_Namespace(in, out, s) +} + +func autoConvert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *Namespace, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_NamespaceSpec_To_v1_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_NamespaceStatus_To_v1_NamespaceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *Namespace, s conversion.Scope) error { + return autoConvert_api_Namespace_To_v1_Namespace(in, out, s) +} + +func autoConvert_v1_NamespaceList_To_api_NamespaceList(in *NamespaceList, out *api.NamespaceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.Namespace)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_NamespaceList_To_api_NamespaceList(in *NamespaceList, out *api.NamespaceList, s conversion.Scope) error { + return autoConvert_v1_NamespaceList_To_api_NamespaceList(in, out, s) +} + +func autoConvert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *NamespaceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Namespace)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *NamespaceList, s conversion.Scope) error { + return autoConvert_api_NamespaceList_To_v1_NamespaceList(in, out, s) +} + +func autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in *NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { + out.Finalizers = *(*[]api.FinalizerName)(unsafe.Pointer(&in.Finalizers)) + return nil +} + +func Convert_v1_NamespaceSpec_To_api_NamespaceSpec(in *NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { + return autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in, out, s) +} + +func autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *NamespaceSpec, s conversion.Scope) error { + out.Finalizers = *(*[]FinalizerName)(unsafe.Pointer(&in.Finalizers)) + return nil +} + +func Convert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *NamespaceSpec, s conversion.Scope) error { + return autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in, out, s) +} + +func autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in *NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { + out.Phase = api.NamespacePhase(in.Phase) + return nil +} + +func Convert_v1_NamespaceStatus_To_api_NamespaceStatus(in *NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { + return autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in, out, s) +} + +func autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *NamespaceStatus, s conversion.Scope) error { + out.Phase = NamespacePhase(in.Phase) + return nil +} + +func Convert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *NamespaceStatus, s conversion.Scope) error { + return autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in, out, s) +} + +func autoConvert_v1_Node_To_api_Node(in *Node, out *api.Node, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_NodeSpec_To_api_NodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_NodeStatus_To_api_NodeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Node_To_api_Node(in *Node, out *api.Node, s conversion.Scope) error { + return autoConvert_v1_Node_To_api_Node(in, out, s) +} + +func autoConvert_api_Node_To_v1_Node(in *api.Node, out *Node, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_NodeSpec_To_v1_NodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_NodeStatus_To_v1_NodeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_Node_To_v1_Node(in *api.Node, out *Node, s conversion.Scope) error { + return autoConvert_api_Node_To_v1_Node(in, out, s) +} + +func autoConvert_v1_NodeAddress_To_api_NodeAddress(in *NodeAddress, out *api.NodeAddress, s conversion.Scope) error { + out.Type = api.NodeAddressType(in.Type) + out.Address = in.Address + return nil +} + +func Convert_v1_NodeAddress_To_api_NodeAddress(in *NodeAddress, out *api.NodeAddress, s conversion.Scope) error { + return autoConvert_v1_NodeAddress_To_api_NodeAddress(in, out, s) +} + +func autoConvert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *NodeAddress, s conversion.Scope) error { + out.Type = NodeAddressType(in.Type) + out.Address = in.Address + return nil +} + +func Convert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *NodeAddress, s conversion.Scope) error { + return autoConvert_api_NodeAddress_To_v1_NodeAddress(in, out, s) +} + +func autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in *NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = (*api.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +func Convert_v1_NodeAffinity_To_api_NodeAffinity(in *NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error { + return autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in, out, s) +} + +func autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *NodeAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = (*NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +func Convert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *NodeAffinity, s conversion.Scope) error { + return autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in, out, s) +} + +func autoConvert_v1_NodeCondition_To_api_NodeCondition(in *NodeCondition, out *api.NodeCondition, s conversion.Scope) error { + out.Type = api.NodeConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + out.LastHeartbeatTime = in.LastHeartbeatTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1_NodeCondition_To_api_NodeCondition(in *NodeCondition, out *api.NodeCondition, s conversion.Scope) error { + return autoConvert_v1_NodeCondition_To_api_NodeCondition(in, out, s) +} + +func autoConvert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *NodeCondition, s conversion.Scope) error { + out.Type = NodeConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.LastHeartbeatTime = in.LastHeartbeatTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *NodeCondition, s conversion.Scope) error { + return autoConvert_api_NodeCondition_To_v1_NodeCondition(in, out, s) +} + +func autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { + if err := Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { + return err + } + return nil +} + +func Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { + return autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in, out, s) +} + +func autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *NodeDaemonEndpoints, s conversion.Scope) error { + if err := Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { + return err + } + return nil +} + +func Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *NodeDaemonEndpoints, s conversion.Scope) error { + return autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in, out, s) +} + +func autoConvert_v1_NodeList_To_api_NodeList(in *NodeList, out *api.NodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.Node)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_NodeList_To_api_NodeList(in *NodeList, out *api.NodeList, s conversion.Scope) error { + return autoConvert_v1_NodeList_To_api_NodeList(in, out, s) +} + +func autoConvert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *NodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Node)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *NodeList, s conversion.Scope) error { + return autoConvert_api_NodeList_To_v1_NodeList(in, out, s) +} + +func autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +func Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { + return autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in, out, s) +} + +func autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOptions, out *NodeProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +func Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOptions, out *NodeProxyOptions, s conversion.Scope) error { + return autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in, out, s) +} + +func autoConvert_v1_NodeResources_To_api_NodeResources(in *NodeResources, out *api.NodeResources, s conversion.Scope) error { + out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +func Convert_v1_NodeResources_To_api_NodeResources(in *NodeResources, out *api.NodeResources, s conversion.Scope) error { + return autoConvert_v1_NodeResources_To_api_NodeResources(in, out, s) +} + +func autoConvert_api_NodeResources_To_v1_NodeResources(in *api.NodeResources, out *NodeResources, s conversion.Scope) error { + out.Capacity = *(*ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +func Convert_api_NodeResources_To_v1_NodeResources(in *api.NodeResources, out *NodeResources, s conversion.Scope) error { + return autoConvert_api_NodeResources_To_v1_NodeResources(in, out, s) +} + +func autoConvert_v1_NodeSelector_To_api_NodeSelector(in *NodeSelector, out *api.NodeSelector, s conversion.Scope) error { + out.NodeSelectorTerms = *(*[]api.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) + return nil +} + +func Convert_v1_NodeSelector_To_api_NodeSelector(in *NodeSelector, out *api.NodeSelector, s conversion.Scope) error { + return autoConvert_v1_NodeSelector_To_api_NodeSelector(in, out, s) +} + +func autoConvert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *NodeSelector, s conversion.Scope) error { + out.NodeSelectorTerms = *(*[]NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) + return nil +} + +func Convert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *NodeSelector, s conversion.Scope) error { + return autoConvert_api_NodeSelector_To_v1_NodeSelector(in, out, s) +} + +func autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = api.NodeSelectorOperator(in.Operator) + out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) + return nil +} + +func Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error { + return autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in, out, s) +} + +func autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *NodeSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = NodeSelectorOperator(in.Operator) + out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) + return nil +} + +func Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *NodeSelectorRequirement, s conversion.Scope) error { + return autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in, out, s) +} + +func autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error { + out.MatchExpressions = *(*[]api.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) + return nil +} + +func Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error { + return autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in, out, s) +} + +func autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *NodeSelectorTerm, s conversion.Scope) error { + out.MatchExpressions = *(*[]NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) + return nil +} + +func Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *NodeSelectorTerm, s conversion.Scope) error { + return autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in, out, s) +} + +func autoConvert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s conversion.Scope) error { + out.PodCIDR = in.PodCIDR + out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID + out.Unschedulable = in.Unschedulable + out.Taints = *(*[]api.Taint)(unsafe.Pointer(&in.Taints)) + return nil +} + +func Convert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s conversion.Scope) error { + return autoConvert_v1_NodeSpec_To_api_NodeSpec(in, out, s) +} + +func autoConvert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s conversion.Scope) error { + out.PodCIDR = in.PodCIDR + out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID + out.Unschedulable = in.Unschedulable + out.Taints = *(*[]Taint)(unsafe.Pointer(&in.Taints)) + return nil +} + +func Convert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s conversion.Scope) error { + return autoConvert_api_NodeSpec_To_v1_NodeSpec(in, out, s) +} + +func autoConvert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus, s conversion.Scope) error { + out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Allocatable = *(*api.ResourceList)(unsafe.Pointer(&in.Allocatable)) + out.Phase = api.NodePhase(in.Phase) + out.Conditions = *(*[]api.NodeCondition)(unsafe.Pointer(&in.Conditions)) + out.Addresses = *(*[]api.NodeAddress)(unsafe.Pointer(&in.Addresses)) + if err := Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { + return err + } + if err := Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { + return err + } + out.Images = *(*[]api.ContainerImage)(unsafe.Pointer(&in.Images)) + out.VolumesInUse = *(*[]api.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) + out.VolumesAttached = *(*[]api.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) + return nil +} + +func Convert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus, s conversion.Scope) error { + return autoConvert_v1_NodeStatus_To_api_NodeStatus(in, out, s) +} + +func autoConvert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus, s conversion.Scope) error { + out.Capacity = *(*ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Allocatable = *(*ResourceList)(unsafe.Pointer(&in.Allocatable)) + out.Phase = NodePhase(in.Phase) + out.Conditions = *(*[]NodeCondition)(unsafe.Pointer(&in.Conditions)) + out.Addresses = *(*[]NodeAddress)(unsafe.Pointer(&in.Addresses)) + if err := Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { + return err + } + if err := Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { + return err + } + out.Images = *(*[]ContainerImage)(unsafe.Pointer(&in.Images)) + out.VolumesInUse = *(*[]UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) + out.VolumesAttached = *(*[]AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) + return nil +} + +func Convert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus, s conversion.Scope) error { + return autoConvert_api_NodeStatus_To_v1_NodeStatus(in, out, s) +} + +func autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error { + out.MachineID = in.MachineID + out.SystemUUID = in.SystemUUID + out.BootID = in.BootID + out.KernelVersion = in.KernelVersion + out.OSImage = in.OSImage + out.ContainerRuntimeVersion = in.ContainerRuntimeVersion + out.KubeletVersion = in.KubeletVersion + out.KubeProxyVersion = in.KubeProxyVersion + out.OperatingSystem = in.OperatingSystem + out.Architecture = in.Architecture + return nil +} + +func Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error { + return autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in, out, s) +} + +func autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *NodeSystemInfo, s conversion.Scope) error { + out.MachineID = in.MachineID + out.SystemUUID = in.SystemUUID + out.BootID = in.BootID + out.KernelVersion = in.KernelVersion + out.OSImage = in.OSImage + out.ContainerRuntimeVersion = in.ContainerRuntimeVersion + out.KubeletVersion = in.KubeletVersion + out.KubeProxyVersion = in.KubeProxyVersion + out.OperatingSystem = in.OperatingSystem + out.Architecture = in.Architecture + return nil +} + +func Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *NodeSystemInfo, s conversion.Scope) error { + return autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in, out, s) +} + +func autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { + out.APIVersion = in.APIVersion + out.FieldPath = in.FieldPath + return nil +} + +func Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { + return autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in, out, s) +} + +func autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *ObjectFieldSelector, s conversion.Scope) error { + out.APIVersion = in.APIVersion + out.FieldPath = in.FieldPath + return nil +} + +func Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *ObjectFieldSelector, s conversion.Scope) error { + return autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in, out, s) +} + +func autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { + out.Name = in.Name + out.GenerateName = in.GenerateName + out.Namespace = in.Namespace + out.SelfLink = in.SelfLink + out.UID = types.UID(in.UID) + out.ResourceVersion = in.ResourceVersion + out.Generation = in.Generation + out.CreationTimestamp = in.CreationTimestamp + out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) + out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) + out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) + out.ClusterName = in.ClusterName + return nil +} + +func Convert_v1_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { + return autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in, out, s) +} + +func autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *ObjectMeta, s conversion.Scope) error { + out.Name = in.Name + out.GenerateName = in.GenerateName + out.Namespace = in.Namespace + out.SelfLink = in.SelfLink + out.UID = types.UID(in.UID) + out.ResourceVersion = in.ResourceVersion + out.Generation = in.Generation + out.CreationTimestamp = in.CreationTimestamp + out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) + out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) + out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) + out.ClusterName = in.ClusterName + return nil +} + +func Convert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *ObjectMeta, s conversion.Scope) error { + return autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in, out, s) +} + +func autoConvert_v1_ObjectReference_To_api_ObjectReference(in *ObjectReference, out *api.ObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.FieldPath = in.FieldPath + return nil +} + +func Convert_v1_ObjectReference_To_api_ObjectReference(in *ObjectReference, out *api.ObjectReference, s conversion.Scope) error { + return autoConvert_v1_ObjectReference_To_api_ObjectReference(in, out, s) +} + +func autoConvert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *ObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.FieldPath = in.FieldPath + return nil +} + +func Convert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *ObjectReference, s conversion.Scope) error { + return autoConvert_api_ObjectReference_To_v1_ObjectReference(in, out, s) +} + +func autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in *PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_PersistentVolume_To_api_PersistentVolume(in *PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error { + return autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in, out, s) +} + +func autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *PersistentVolume, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *PersistentVolume, s conversion.Scope) error { + return autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in, out, s) +} + +func autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *PersistentVolumeClaim, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *PersistentVolumeClaim, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in, out, s) +} + +func autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *PersistentVolumeClaimList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *PersistentVolumeClaimList, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { + out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeName = in.VolumeName + out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) + return nil +} + +func Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in, out, s) +} + +func autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, s conversion.Scope) error { + out.AccessModes = *(*[]PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeName = in.VolumeName + out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) + return nil +} + +func Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error { + out.Phase = api.PersistentVolumeClaimPhase(in.Phase) + out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +func Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in, out, s) +} + +func autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, s conversion.Scope) error { + out.Phase = PersistentVolumeClaimPhase(in.Phase) + out.AccessModes = *(*[]PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Capacity = *(*ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +func Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + out.ClaimName = in.ClaimName + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in, out, s) +} + +func autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + out.ClaimName = in.ClaimName + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in, out, s) +} + +func autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.PersistentVolume, len(*in)) + for i := range *in { + if err := Convert_v1_PersistentVolume_To_api_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in, out, s) +} + +func autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *PersistentVolumeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolume, len(*in)) + for i := range *in { + if err := Convert_api_PersistentVolume_To_v1_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *PersistentVolumeList, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in, out, s) +} + +func autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error { + out.GCEPersistentDisk = (*api.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*api.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.HostPath = (*api.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.Glusterfs = (*api.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.NFS = (*api.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.RBD = (*api.RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.ISCSI = (*api.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Cinder = (*api.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*api.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.FC = (*api.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.Flocker = (*api.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.FlexVolume = (*api.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.AzureFile = (*api.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.VsphereVolume = (*api.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.PortworxVolume = (*api.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*api.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + return nil +} + +func Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in, out, s) +} + +func autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *PersistentVolumeSource, s conversion.Scope) error { + out.GCEPersistentDisk = (*GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.HostPath = (*HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.Glusterfs = (*GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.NFS = (*NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.RBD = (*RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.Quobyte = (*QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.ISCSI = (*ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.FlexVolume = (*FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.FC = (*FCVolumeSource)(unsafe.Pointer(in.FC)) + out.Flocker = (*FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.AzureFile = (*AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.VsphereVolume = (*VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.AzureDisk = (*AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.PortworxVolume = (*PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + return nil +} + +func Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *PersistentVolumeSource, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error { + out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) + if err := Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { + return err + } + out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.ClaimRef = (*api.ObjectReference)(unsafe.Pointer(in.ClaimRef)) + out.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) + out.StorageClassName = in.StorageClassName + return nil +} + +func Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in, out, s) +} + +func autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *PersistentVolumeSpec, s conversion.Scope) error { + out.Capacity = *(*ResourceList)(unsafe.Pointer(&in.Capacity)) + if err := Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { + return err + } + out.AccessModes = *(*[]PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.ClaimRef = (*ObjectReference)(unsafe.Pointer(in.ClaimRef)) + out.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) + out.StorageClassName = in.StorageClassName + return nil +} + +func Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *PersistentVolumeSpec, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in, out, s) +} + +func autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error { + out.Phase = api.PersistentVolumePhase(in.Phase) + out.Message = in.Message + out.Reason = in.Reason + return nil +} + +func Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in, out, s) +} + +func autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *PersistentVolumeStatus, s conversion.Scope) error { + out.Phase = PersistentVolumePhase(in.Phase) + out.Message = in.Message + out.Reason = in.Reason + return nil +} + +func Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *PersistentVolumeStatus, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in, out, s) +} + +func autoConvert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in *PhotonPersistentDiskVolumeSource, out *api.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + out.PdID = in.PdID + out.FSType = in.FSType + return nil +} + +func Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in *PhotonPersistentDiskVolumeSource, out *api.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *api.PhotonPersistentDiskVolumeSource, out *PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + out.PdID = in.PdID + out.FSType = in.FSType + return nil +} + +func Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *api.PhotonPersistentDiskVolumeSource, out *PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PodAffinity_To_api_PodAffinity(in *PodAffinity, out *api.PodAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]api.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +func Convert_v1_PodAffinity_To_api_PodAffinity(in *PodAffinity, out *api.PodAffinity, s conversion.Scope) error { + return autoConvert_v1_PodAffinity_To_api_PodAffinity(in, out, s) +} + +func autoConvert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *PodAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +func Convert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *PodAffinity, s conversion.Scope) error { + return autoConvert_api_PodAffinity_To_v1_PodAffinity(in, out, s) +} + +func autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { + out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.TopologyKey = in.TopologyKey + return nil +} + +func Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { + return autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in, out, s) +} + +func autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *PodAffinityTerm, s conversion.Scope) error { + out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.TopologyKey = in.TopologyKey + return nil +} + +func Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *PodAffinityTerm, s conversion.Scope) error { + return autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in, out, s) +} + +func autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]api.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +func Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { + return autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in, out, s) +} + +func autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *PodAntiAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +func Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *PodAntiAffinity, s conversion.Scope) error { + return autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in, out, s) +} + +func autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + return nil +} + +func Convert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { + return autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in, out, s) +} + +func autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *PodAttachOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + return nil +} + +func Convert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *PodAttachOptions, s conversion.Scope) error { + return autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in, out, s) +} + +func autoConvert_v1_PodCondition_To_api_PodCondition(in *PodCondition, out *api.PodCondition, s conversion.Scope) error { + out.Type = api.PodConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1_PodCondition_To_api_PodCondition(in *PodCondition, out *api.PodCondition, s conversion.Scope) error { + return autoConvert_v1_PodCondition_To_api_PodCondition(in, out, s) +} + +func autoConvert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *PodCondition, s conversion.Scope) error { + out.Type = PodConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *PodCondition, s conversion.Scope) error { + return autoConvert_api_PodCondition_To_v1_PodCondition(in, out, s) +} + +func autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in *PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +func Convert_v1_PodExecOptions_To_api_PodExecOptions(in *PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error { + return autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in, out, s) +} + +func autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *PodExecOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +func Convert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *PodExecOptions, s conversion.Scope) error { + return autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in, out, s) +} + +func autoConvert_v1_PodList_To_api_PodList(in *PodList, out *api.PodList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.Pod, len(*in)) + for i := range *in { + if err := Convert_v1_Pod_To_api_Pod(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_PodList_To_api_PodList(in *PodList, out *api.PodList, s conversion.Scope) error { + return autoConvert_v1_PodList_To_api_PodList(in, out, s) +} + +func autoConvert_api_PodList_To_v1_PodList(in *api.PodList, out *PodList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pod, len(*in)) + for i := range *in { + if err := Convert_api_Pod_To_v1_Pod(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_api_PodList_To_v1_PodList(in *api.PodList, out *PodList, s conversion.Scope) error { + return autoConvert_api_PodList_To_v1_PodList(in, out, s) +} + +func autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in *PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error { + out.Container = in.Container + out.Follow = in.Follow + out.Previous = in.Previous + out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) + out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) + out.Timestamps = in.Timestamps + out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) + out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) + return nil +} + +func Convert_v1_PodLogOptions_To_api_PodLogOptions(in *PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error { + return autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in, out, s) +} + +func autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *PodLogOptions, s conversion.Scope) error { + out.Container = in.Container + out.Follow = in.Follow + out.Previous = in.Previous + out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) + out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) + out.Timestamps = in.Timestamps + out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) + out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) + return nil +} + +func Convert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *PodLogOptions, s conversion.Scope) error { + return autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in, out, s) +} + +func autoConvert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in *PodPortForwardOptions, out *api.PodPortForwardOptions, s conversion.Scope) error { + out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) + return nil +} + +func Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in *PodPortForwardOptions, out *api.PodPortForwardOptions, s conversion.Scope) error { + return autoConvert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in, out, s) +} + +func autoConvert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *api.PodPortForwardOptions, out *PodPortForwardOptions, s conversion.Scope) error { + out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) + return nil +} + +func Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *api.PodPortForwardOptions, out *PodPortForwardOptions, s conversion.Scope) error { + return autoConvert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in, out, s) +} + +func autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in *PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +func Convert_v1_PodProxyOptions_To_api_PodProxyOptions(in *PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error { + return autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in, out, s) +} + +func autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *PodProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +func Convert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *PodProxyOptions, s conversion.Scope) error { + return autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in, out, s) +} + +func autoConvert_v1_PodSecurityContext_To_api_PodSecurityContext(in *PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error { + out.SELinuxOptions = (*api.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) + out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) + return nil +} + +func autoConvert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *PodSecurityContext, s conversion.Scope) error { + // INFO: in.HostNetwork opted out of conversion generation + // INFO: in.HostPID opted out of conversion generation + // INFO: in.HostIPC opted out of conversion generation + out.SELinuxOptions = (*SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) + out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) + return nil +} + +func autoConvert_v1_PodSignature_To_api_PodSignature(in *PodSignature, out *api.PodSignature, s conversion.Scope) error { + out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) + return nil +} + +func Convert_v1_PodSignature_To_api_PodSignature(in *PodSignature, out *api.PodSignature, s conversion.Scope) error { + return autoConvert_v1_PodSignature_To_api_PodSignature(in, out, s) +} + +func autoConvert_api_PodSignature_To_v1_PodSignature(in *api.PodSignature, out *PodSignature, s conversion.Scope) error { + out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) + return nil +} + +func Convert_api_PodSignature_To_v1_PodSignature(in *api.PodSignature, out *PodSignature, s conversion.Scope) error { + return autoConvert_api_PodSignature_To_v1_PodSignature(in, out, s) +} + +func autoConvert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversion.Scope) error { + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]api.Volume, len(*in)) + for i := range *in { + if err := Convert_v1_Volume_To_api_Volume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + out.InitContainers = *(*[]api.Container)(unsafe.Pointer(&in.InitContainers)) + out.Containers = *(*[]api.Container)(unsafe.Pointer(&in.Containers)) + out.RestartPolicy = api.RestartPolicy(in.RestartPolicy) + out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.DNSPolicy = api.DNSPolicy(in.DNSPolicy) + out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) + out.ServiceAccountName = in.ServiceAccountName + // INFO: in.DeprecatedServiceAccount opted out of conversion generation + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + out.NodeName = in.NodeName + // INFO: in.HostNetwork opted out of conversion generation + // INFO: in.HostPID opted out of conversion generation + // INFO: in.HostIPC opted out of conversion generation + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(api.PodSecurityContext) + if err := Convert_v1_PodSecurityContext_To_api_PodSecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.ImagePullSecrets = *(*[]api.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain + out.Affinity = (*api.Affinity)(unsafe.Pointer(in.Affinity)) + out.SchedulerName = in.SchedulerName + out.Tolerations = *(*[]api.Toleration)(unsafe.Pointer(&in.Tolerations)) + return nil +} + +func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error { + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + if err := Convert_api_Volume_To_v1_Volume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + out.InitContainers = *(*[]Container)(unsafe.Pointer(&in.InitContainers)) + out.Containers = *(*[]Container)(unsafe.Pointer(&in.Containers)) + out.RestartPolicy = RestartPolicy(in.RestartPolicy) + out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.DNSPolicy = DNSPolicy(in.DNSPolicy) + out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) + out.ServiceAccountName = in.ServiceAccountName + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + out.NodeName = in.NodeName + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(PodSecurityContext) + if err := Convert_api_PodSecurityContext_To_v1_PodSecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.ImagePullSecrets = *(*[]LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain + out.Affinity = (*Affinity)(unsafe.Pointer(in.Affinity)) + out.SchedulerName = in.SchedulerName + out.Tolerations = *(*[]Toleration)(unsafe.Pointer(&in.Tolerations)) + return nil +} + +func autoConvert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus, s conversion.Scope) error { + out.Phase = api.PodPhase(in.Phase) + out.Conditions = *(*[]api.PodCondition)(unsafe.Pointer(&in.Conditions)) + out.Message = in.Message + out.Reason = in.Reason + out.HostIP = in.HostIP + out.PodIP = in.PodIP + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + out.InitContainerStatuses = *(*[]api.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) + out.ContainerStatuses = *(*[]api.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) + out.QOSClass = api.PodQOSClass(in.QOSClass) + return nil +} + +func Convert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus, s conversion.Scope) error { + return autoConvert_v1_PodStatus_To_api_PodStatus(in, out, s) +} + +func autoConvert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s conversion.Scope) error { + out.Phase = PodPhase(in.Phase) + out.Conditions = *(*[]PodCondition)(unsafe.Pointer(&in.Conditions)) + out.Message = in.Message + out.Reason = in.Reason + out.HostIP = in.HostIP + out.PodIP = in.PodIP + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + out.QOSClass = PodQOSClass(in.QOSClass) + out.InitContainerStatuses = *(*[]ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) + out.ContainerStatuses = *(*[]ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) + return nil +} + +func Convert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s conversion.Scope) error { + return autoConvert_api_PodStatus_To_v1_PodStatus(in, out, s) +} + +func autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error { + return autoConvert_v1_PodTemplate_To_api_PodTemplate(in, out, s) +} + +func autoConvert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *PodTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *PodTemplate, s conversion.Scope) error { + return autoConvert_api_PodTemplate_To_v1_PodTemplate(in, out, s) +} + +func autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in *PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.PodTemplate, len(*in)) + for i := range *in { + if err := Convert_v1_PodTemplate_To_api_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_PodTemplateList_To_api_PodTemplateList(in *PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { + return autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in, out, s) +} + +func autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *PodTemplateList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodTemplate, len(*in)) + for i := range *in { + if err := Convert_api_PodTemplate_To_v1_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *PodTemplateList, s conversion.Scope) error { + return autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in, out, s) +} + +func autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in *PortworxVolumeSource, out *api.PortworxVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in *PortworxVolumeSource, out *api.PortworxVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in, out, s) +} + +func autoConvert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *api.PortworxVolumeSource, out *PortworxVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *api.PortworxVolumeSource, out *PortworxVolumeSource, s conversion.Scope) error { + return autoConvert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in, out, s) +} + +func autoConvert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error { + out.UID = (*types.UID)(unsafe.Pointer(in.UID)) + return nil +} + +func Convert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error { + return autoConvert_v1_Preconditions_To_api_Preconditions(in, out, s) +} + +func autoConvert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *Preconditions, s conversion.Scope) error { + out.UID = (*types.UID)(unsafe.Pointer(in.UID)) + return nil +} + +func Convert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *Preconditions, s conversion.Scope) error { + return autoConvert_api_Preconditions_To_v1_Preconditions(in, out, s) +} + +func autoConvert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in *PreferAvoidPodsEntry, out *api.PreferAvoidPodsEntry, s conversion.Scope) error { + if err := Convert_v1_PodSignature_To_api_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { + return err + } + out.EvictionTime = in.EvictionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in *PreferAvoidPodsEntry, out *api.PreferAvoidPodsEntry, s conversion.Scope) error { + return autoConvert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in, out, s) +} + +func autoConvert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *api.PreferAvoidPodsEntry, out *PreferAvoidPodsEntry, s conversion.Scope) error { + if err := Convert_api_PodSignature_To_v1_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { + return err + } + out.EvictionTime = in.EvictionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *api.PreferAvoidPodsEntry, out *PreferAvoidPodsEntry, s conversion.Scope) error { + return autoConvert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in, out, s) +} + +func autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { + return err + } + return nil +} + +func Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error { + return autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in, out, s) +} + +func autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *PreferredSchedulingTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { + return err + } + return nil +} + +func Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *PreferredSchedulingTerm, s conversion.Scope) error { + return autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in, out, s) +} + +func autoConvert_v1_Probe_To_api_Probe(in *Probe, out *api.Probe, s conversion.Scope) error { + if err := Convert_v1_Handler_To_api_Handler(&in.Handler, &out.Handler, s); err != nil { + return err + } + out.InitialDelaySeconds = in.InitialDelaySeconds + out.TimeoutSeconds = in.TimeoutSeconds + out.PeriodSeconds = in.PeriodSeconds + out.SuccessThreshold = in.SuccessThreshold + out.FailureThreshold = in.FailureThreshold + return nil +} + +func Convert_v1_Probe_To_api_Probe(in *Probe, out *api.Probe, s conversion.Scope) error { + return autoConvert_v1_Probe_To_api_Probe(in, out, s) +} + +func autoConvert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope) error { + if err := Convert_api_Handler_To_v1_Handler(&in.Handler, &out.Handler, s); err != nil { + return err + } + out.InitialDelaySeconds = in.InitialDelaySeconds + out.TimeoutSeconds = in.TimeoutSeconds + out.PeriodSeconds = in.PeriodSeconds + out.SuccessThreshold = in.SuccessThreshold + out.FailureThreshold = in.FailureThreshold + return nil +} + +func Convert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope) error { + return autoConvert_api_Probe_To_v1_Probe(in, out, s) +} + +func autoConvert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in *ProjectedVolumeSource, out *api.ProjectedVolumeSource, s conversion.Scope) error { + out.Sources = *(*[]api.VolumeProjection)(unsafe.Pointer(&in.Sources)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +func Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in *ProjectedVolumeSource, out *api.ProjectedVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in, out, s) +} + +func autoConvert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *api.ProjectedVolumeSource, out *ProjectedVolumeSource, s conversion.Scope) error { + out.Sources = *(*[]VolumeProjection)(unsafe.Pointer(&in.Sources)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +func Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *api.ProjectedVolumeSource, out *ProjectedVolumeSource, s conversion.Scope) error { + return autoConvert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in, out, s) +} + +func autoConvert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in *QuobyteVolumeSource, out *api.QuobyteVolumeSource, s conversion.Scope) error { + out.Registry = in.Registry + out.Volume = in.Volume + out.ReadOnly = in.ReadOnly + out.User = in.User + out.Group = in.Group + return nil +} + +func Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in *QuobyteVolumeSource, out *api.QuobyteVolumeSource, s conversion.Scope) error { + return autoConvert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in, out, s) +} + +func autoConvert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *api.QuobyteVolumeSource, out *QuobyteVolumeSource, s conversion.Scope) error { + out.Registry = in.Registry + out.Volume = in.Volume + out.ReadOnly = in.ReadOnly + out.User = in.User + out.Group = in.Group + return nil +} + +func Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *api.QuobyteVolumeSource, out *QuobyteVolumeSource, s conversion.Scope) error { + return autoConvert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in, out, s) +} + +func autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { + return autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in, out, s) +} + +func autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error { + return autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in, out, s) +} + +func autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Range = in.Range + out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + return nil +} + +func Convert_v1_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error { + return autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in, out, s) +} + +func autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Range = in.Range + out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + return nil +} + +func Convert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error { + return autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in, out, s) +} + +func autoConvert_v1_ReplicationController_To_api_ReplicationController(in *ReplicationController, out *api.ReplicationController, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_ReplicationController_To_api_ReplicationController(in *ReplicationController, out *api.ReplicationController, s conversion.Scope) error { + return autoConvert_v1_ReplicationController_To_api_ReplicationController(in, out, s) +} + +func autoConvert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *ReplicationController, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *ReplicationController, s conversion.Scope) error { + return autoConvert_api_ReplicationController_To_v1_ReplicationController(in, out, s) +} + +func autoConvert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in *ReplicationControllerCondition, out *api.ReplicationControllerCondition, s conversion.Scope) error { + out.Type = api.ReplicationControllerConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in *ReplicationControllerCondition, out *api.ReplicationControllerCondition, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in, out, s) +} + +func autoConvert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *api.ReplicationControllerCondition, out *ReplicationControllerCondition, s conversion.Scope) error { + out.Type = ReplicationControllerConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *api.ReplicationControllerCondition, out *ReplicationControllerCondition, s conversion.Scope) error { + return autoConvert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in, out, s) +} + +func autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.ReplicationController, len(*in)) + for i := range *in { + if err := Convert_v1_ReplicationController_To_api_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in, out, s) +} + +func autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *ReplicationControllerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationController, len(*in)) + for i := range *in { + if err := Convert_api_ReplicationController_To_v1_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *ReplicationControllerList, s conversion.Scope) error { + return autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in, out, s) +} + +func autoConvert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error { + if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(api.PodTemplateSpec) + if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(*in, *out, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func autoConvert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *ReplicationControllerSpec, s conversion.Scope) error { + if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(PodTemplateSpec) + if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(*in, *out, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]api.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +func Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in, out, s) +} + +func autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +func Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *ReplicationControllerStatus, s conversion.Scope) error { + return autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s) +} + +func autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error { + out.ContainerName = in.ContainerName + out.Resource = in.Resource + out.Divisor = in.Divisor + return nil +} + +func Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error { + return autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in, out, s) +} + +func autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *ResourceFieldSelector, s conversion.Scope) error { + out.ContainerName = in.ContainerName + out.Resource = in.Resource + out.Divisor = in.Divisor + return nil +} + +func Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *ResourceFieldSelector, s conversion.Scope) error { + return autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in, out, s) +} + +func autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in *ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_ResourceQuota_To_api_ResourceQuota(in *ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error { + return autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in, out, s) +} + +func autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *ResourceQuota, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *ResourceQuota, s conversion.Scope) error { + return autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in, out, s) +} + +func autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.ResourceQuota)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in, out, s) +} + +func autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *ResourceQuotaList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]ResourceQuota)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *ResourceQuotaList, s conversion.Scope) error { + return autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in, out, s) +} + +func autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error { + out.Hard = *(*api.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Scopes = *(*[]api.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) + return nil +} + +func Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in, out, s) +} + +func autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *ResourceQuotaSpec, s conversion.Scope) error { + out.Hard = *(*ResourceList)(unsafe.Pointer(&in.Hard)) + out.Scopes = *(*[]ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) + return nil +} + +func Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *ResourceQuotaSpec, s conversion.Scope) error { + return autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in, out, s) +} + +func autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error { + out.Hard = *(*api.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Used = *(*api.ResourceList)(unsafe.Pointer(&in.Used)) + return nil +} + +func Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in, out, s) +} + +func autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *ResourceQuotaStatus, s conversion.Scope) error { + out.Hard = *(*ResourceList)(unsafe.Pointer(&in.Hard)) + out.Used = *(*ResourceList)(unsafe.Pointer(&in.Used)) + return nil +} + +func Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *ResourceQuotaStatus, s conversion.Scope) error { + return autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in, out, s) +} + +func autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { + out.Limits = *(*api.ResourceList)(unsafe.Pointer(&in.Limits)) + out.Requests = *(*api.ResourceList)(unsafe.Pointer(&in.Requests)) + return nil +} + +func Convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { + return autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in, out, s) +} + +func autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error { + out.Limits = *(*ResourceList)(unsafe.Pointer(&in.Limits)) + out.Requests = *(*ResourceList)(unsafe.Pointer(&in.Requests)) + return nil +} + +func Convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error { + return autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in, out, s) +} + +func autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in *SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { + out.User = in.User + out.Role = in.Role + out.Type = in.Type + out.Level = in.Level + return nil +} + +func Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in *SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { + return autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in, out, s) +} + +func autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *SELinuxOptions, s conversion.Scope) error { + out.User = in.User + out.Role = in.Role + out.Type = in.Type + out.Level = in.Level + return nil +} + +func Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *SELinuxOptions, s conversion.Scope) error { + return autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in, out, s) +} + +func autoConvert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in *ScaleIOVolumeSource, out *api.ScaleIOVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in *ScaleIOVolumeSource, out *api.ScaleIOVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in, out, s) +} + +func autoConvert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *api.ScaleIOVolumeSource, out *ScaleIOVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *api.ScaleIOVolumeSource, out *ScaleIOVolumeSource, s conversion.Scope) error { + return autoConvert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in, out, s) +} + +func autoConvert_v1_Secret_To_api_Secret(in *Secret, out *api.Secret, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) + // INFO: in.StringData opted out of conversion generation + out.Type = api.SecretType(in.Type) + return nil +} + +func autoConvert_api_Secret_To_v1_Secret(in *api.Secret, out *Secret, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) + out.Type = SecretType(in.Type) + return nil +} + +func Convert_api_Secret_To_v1_Secret(in *api.Secret, out *Secret, s conversion.Scope) error { + return autoConvert_api_Secret_To_v1_Secret(in, out, s) +} + +func autoConvert_v1_SecretEnvSource_To_api_SecretEnvSource(in *SecretEnvSource, out *api.SecretEnvSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_SecretEnvSource_To_api_SecretEnvSource(in *SecretEnvSource, out *api.SecretEnvSource, s conversion.Scope) error { + return autoConvert_v1_SecretEnvSource_To_api_SecretEnvSource(in, out, s) +} + +func autoConvert_api_SecretEnvSource_To_v1_SecretEnvSource(in *api.SecretEnvSource, out *SecretEnvSource, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_SecretEnvSource_To_v1_SecretEnvSource(in *api.SecretEnvSource, out *SecretEnvSource, s conversion.Scope) error { + return autoConvert_api_SecretEnvSource_To_v1_SecretEnvSource(in, out, s) +} + +func autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in *SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_SecretKeySelector_To_api_SecretKeySelector(in *SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { + return autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in, out, s) +} + +func autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *SecretKeySelector, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *SecretKeySelector, s conversion.Scope) error { + return autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in, out, s) +} + +func autoConvert_v1_SecretList_To_api_SecretList(in *SecretList, out *api.SecretList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.Secret, len(*in)) + for i := range *in { + if err := Convert_v1_Secret_To_api_Secret(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_SecretList_To_api_SecretList(in *SecretList, out *api.SecretList, s conversion.Scope) error { + return autoConvert_v1_SecretList_To_api_SecretList(in, out, s) +} + +func autoConvert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Secret, len(*in)) + for i := range *in { + if err := Convert_api_Secret_To_v1_Secret(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList, s conversion.Scope) error { + return autoConvert_api_SecretList_To_v1_SecretList(in, out, s) +} + +func autoConvert_v1_SecretProjection_To_api_SecretProjection(in *SecretProjection, out *api.SecretProjection, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_SecretProjection_To_api_SecretProjection(in *SecretProjection, out *api.SecretProjection, s conversion.Scope) error { + return autoConvert_v1_SecretProjection_To_api_SecretProjection(in, out, s) +} + +func autoConvert_api_SecretProjection_To_v1_SecretProjection(in *api.SecretProjection, out *SecretProjection, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_SecretProjection_To_v1_SecretProjection(in *api.SecretProjection, out *SecretProjection, s conversion.Scope) error { + return autoConvert_api_SecretProjection_To_v1_SecretProjection(in, out, s) +} + +func autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { + return autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in, out, s) +} + +func autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *SecretVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *SecretVolumeSource, s conversion.Scope) error { + return autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in, out, s) +} + +func autoConvert_v1_SecurityContext_To_api_SecurityContext(in *SecurityContext, out *api.SecurityContext, s conversion.Scope) error { + out.Capabilities = (*api.Capabilities)(unsafe.Pointer(in.Capabilities)) + out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) + out.SELinuxOptions = (*api.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) + return nil +} + +func Convert_v1_SecurityContext_To_api_SecurityContext(in *SecurityContext, out *api.SecurityContext, s conversion.Scope) error { + return autoConvert_v1_SecurityContext_To_api_SecurityContext(in, out, s) +} + +func autoConvert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *SecurityContext, s conversion.Scope) error { + out.Capabilities = (*Capabilities)(unsafe.Pointer(in.Capabilities)) + out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) + out.SELinuxOptions = (*SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) + return nil +} + +func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *SecurityContext, s conversion.Scope) error { + return autoConvert_api_SecurityContext_To_v1_SecurityContext(in, out, s) +} + +func autoConvert_v1_SerializedReference_To_api_SerializedReference(in *SerializedReference, out *api.SerializedReference, s conversion.Scope) error { + if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Reference, &out.Reference, s); err != nil { + return err + } + return nil +} + +func Convert_v1_SerializedReference_To_api_SerializedReference(in *SerializedReference, out *api.SerializedReference, s conversion.Scope) error { + return autoConvert_v1_SerializedReference_To_api_SerializedReference(in, out, s) +} + +func autoConvert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *SerializedReference, s conversion.Scope) error { + if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Reference, &out.Reference, s); err != nil { + return err + } + return nil +} + +func Convert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *SerializedReference, s conversion.Scope) error { + return autoConvert_api_SerializedReference_To_v1_SerializedReference(in, out, s) +} + +func autoConvert_v1_Service_To_api_Service(in *Service, out *api.Service, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ServiceSpec_To_api_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ServiceStatus_To_api_ServiceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Service_To_api_Service(in *Service, out *api.Service, s conversion.Scope) error { + return autoConvert_v1_Service_To_api_Service(in, out, s) +} + +func autoConvert_api_Service_To_v1_Service(in *api.Service, out *Service, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_ServiceSpec_To_v1_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_ServiceStatus_To_v1_ServiceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_Service_To_v1_Service(in *api.Service, out *Service, s conversion.Scope) error { + return autoConvert_api_Service_To_v1_Service(in, out, s) +} + +func autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in *ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Secrets = *(*[]api.ObjectReference)(unsafe.Pointer(&in.Secrets)) + out.ImagePullSecrets = *(*[]api.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + return nil +} + +func Convert_v1_ServiceAccount_To_api_ServiceAccount(in *ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error { + return autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in, out, s) +} + +func autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *ServiceAccount, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Secrets = *(*[]ObjectReference)(unsafe.Pointer(&in.Secrets)) + out.ImagePullSecrets = *(*[]LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + return nil +} + +func Convert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *ServiceAccount, s conversion.Scope) error { + return autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in, out, s) +} + +func autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in *ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.ServiceAccount)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_ServiceAccountList_To_api_ServiceAccountList(in *ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error { + return autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in, out, s) +} + +func autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *ServiceAccountList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]ServiceAccount)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *ServiceAccountList, s conversion.Scope) error { + return autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in, out, s) +} + +func autoConvert_v1_ServiceList_To_api_ServiceList(in *ServiceList, out *api.ServiceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.Service, len(*in)) + for i := range *in { + if err := Convert_v1_Service_To_api_Service(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_ServiceList_To_api_ServiceList(in *ServiceList, out *api.ServiceList, s conversion.Scope) error { + return autoConvert_v1_ServiceList_To_api_ServiceList(in, out, s) +} + +func autoConvert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *ServiceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + if err := Convert_api_Service_To_v1_Service(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *ServiceList, s conversion.Scope) error { + return autoConvert_api_ServiceList_To_v1_ServiceList(in, out, s) +} + +func autoConvert_v1_ServicePort_To_api_ServicePort(in *ServicePort, out *api.ServicePort, s conversion.Scope) error { + out.Name = in.Name + out.Protocol = api.Protocol(in.Protocol) + out.Port = in.Port + out.TargetPort = in.TargetPort + out.NodePort = in.NodePort + return nil +} + +func Convert_v1_ServicePort_To_api_ServicePort(in *ServicePort, out *api.ServicePort, s conversion.Scope) error { + return autoConvert_v1_ServicePort_To_api_ServicePort(in, out, s) +} + +func autoConvert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *ServicePort, s conversion.Scope) error { + out.Name = in.Name + out.Protocol = Protocol(in.Protocol) + out.Port = in.Port + out.TargetPort = in.TargetPort + out.NodePort = in.NodePort + return nil +} + +func Convert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *ServicePort, s conversion.Scope) error { + return autoConvert_api_ServicePort_To_v1_ServicePort(in, out, s) +} + +func autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +func Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error { + return autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in, out, s) +} + +func autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *ServiceProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +func Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *ServiceProxyOptions, s conversion.Scope) error { + return autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in, out, s) +} + +func autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error { + out.Ports = *(*[]api.ServicePort)(unsafe.Pointer(&in.Ports)) + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + out.ClusterIP = in.ClusterIP + out.Type = api.ServiceType(in.Type) + out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) + // INFO: in.DeprecatedPublicIPs opted out of conversion generation + out.SessionAffinity = api.ServiceAffinity(in.SessionAffinity) + out.LoadBalancerIP = in.LoadBalancerIP + out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) + out.ExternalName = in.ExternalName + return nil +} + +func autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error { + out.Type = ServiceType(in.Type) + out.Ports = *(*[]ServicePort)(unsafe.Pointer(&in.Ports)) + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + out.ClusterIP = in.ClusterIP + out.ExternalName = in.ExternalName + out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) + out.LoadBalancerIP = in.LoadBalancerIP + out.SessionAffinity = ServiceAffinity(in.SessionAffinity) + out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) + return nil +} + +func autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in *ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error { + if err := Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { + return err + } + return nil +} + +func Convert_v1_ServiceStatus_To_api_ServiceStatus(in *ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error { + return autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in, out, s) +} + +func autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *ServiceStatus, s conversion.Scope) error { + if err := Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { + return err + } + return nil +} + +func Convert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *ServiceStatus, s conversion.Scope) error { + return autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in, out, s) +} + +func autoConvert_v1_Sysctl_To_api_Sysctl(in *Sysctl, out *api.Sysctl, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +func Convert_v1_Sysctl_To_api_Sysctl(in *Sysctl, out *api.Sysctl, s conversion.Scope) error { + return autoConvert_v1_Sysctl_To_api_Sysctl(in, out, s) +} + +func autoConvert_api_Sysctl_To_v1_Sysctl(in *api.Sysctl, out *Sysctl, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +func Convert_api_Sysctl_To_v1_Sysctl(in *api.Sysctl, out *Sysctl, s conversion.Scope) error { + return autoConvert_api_Sysctl_To_v1_Sysctl(in, out, s) +} + +func autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in *TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { + out.Port = in.Port + return nil +} + +func Convert_v1_TCPSocketAction_To_api_TCPSocketAction(in *TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { + return autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in, out, s) +} + +func autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *TCPSocketAction, s conversion.Scope) error { + out.Port = in.Port + return nil +} + +func Convert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *TCPSocketAction, s conversion.Scope) error { + return autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in, out, s) +} + +func autoConvert_v1_Taint_To_api_Taint(in *Taint, out *api.Taint, s conversion.Scope) error { + out.Key = in.Key + out.Value = in.Value + out.Effect = api.TaintEffect(in.Effect) + out.TimeAdded = in.TimeAdded + return nil +} + +func Convert_v1_Taint_To_api_Taint(in *Taint, out *api.Taint, s conversion.Scope) error { + return autoConvert_v1_Taint_To_api_Taint(in, out, s) +} + +func autoConvert_api_Taint_To_v1_Taint(in *api.Taint, out *Taint, s conversion.Scope) error { + out.Key = in.Key + out.Value = in.Value + out.Effect = TaintEffect(in.Effect) + out.TimeAdded = in.TimeAdded + return nil +} + +func Convert_api_Taint_To_v1_Taint(in *api.Taint, out *Taint, s conversion.Scope) error { + return autoConvert_api_Taint_To_v1_Taint(in, out, s) +} + +func autoConvert_v1_Toleration_To_api_Toleration(in *Toleration, out *api.Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Operator = api.TolerationOperator(in.Operator) + out.Value = in.Value + out.Effect = api.TaintEffect(in.Effect) + out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) + return nil +} + +func Convert_v1_Toleration_To_api_Toleration(in *Toleration, out *api.Toleration, s conversion.Scope) error { + return autoConvert_v1_Toleration_To_api_Toleration(in, out, s) +} + +func autoConvert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Operator = TolerationOperator(in.Operator) + out.Value = in.Value + out.Effect = TaintEffect(in.Effect) + out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) + return nil +} + +func Convert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *Toleration, s conversion.Scope) error { + return autoConvert_api_Toleration_To_v1_Toleration(in, out, s) +} + +func autoConvert_v1_Volume_To_api_Volume(in *Volume, out *api.Volume, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1_VolumeSource_To_api_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Volume_To_api_Volume(in *Volume, out *api.Volume, s conversion.Scope) error { + return autoConvert_v1_Volume_To_api_Volume(in, out, s) +} + +func autoConvert_api_Volume_To_v1_Volume(in *api.Volume, out *Volume, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_api_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { + return err + } + return nil +} + +func Convert_api_Volume_To_v1_Volume(in *api.Volume, out *Volume, s conversion.Scope) error { + return autoConvert_api_Volume_To_v1_Volume(in, out, s) +} + +func autoConvert_v1_VolumeMount_To_api_VolumeMount(in *VolumeMount, out *api.VolumeMount, s conversion.Scope) error { + out.Name = in.Name + out.ReadOnly = in.ReadOnly + out.MountPath = in.MountPath + out.SubPath = in.SubPath + return nil +} + +func Convert_v1_VolumeMount_To_api_VolumeMount(in *VolumeMount, out *api.VolumeMount, s conversion.Scope) error { + return autoConvert_v1_VolumeMount_To_api_VolumeMount(in, out, s) +} + +func autoConvert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *VolumeMount, s conversion.Scope) error { + out.Name = in.Name + out.ReadOnly = in.ReadOnly + out.MountPath = in.MountPath + out.SubPath = in.SubPath + return nil +} + +func Convert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *VolumeMount, s conversion.Scope) error { + return autoConvert_api_VolumeMount_To_v1_VolumeMount(in, out, s) +} + +func autoConvert_v1_VolumeProjection_To_api_VolumeProjection(in *VolumeProjection, out *api.VolumeProjection, s conversion.Scope) error { + out.Secret = (*api.SecretProjection)(unsafe.Pointer(in.Secret)) + out.DownwardAPI = (*api.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) + out.ConfigMap = (*api.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) + return nil +} + +func Convert_v1_VolumeProjection_To_api_VolumeProjection(in *VolumeProjection, out *api.VolumeProjection, s conversion.Scope) error { + return autoConvert_v1_VolumeProjection_To_api_VolumeProjection(in, out, s) +} + +func autoConvert_api_VolumeProjection_To_v1_VolumeProjection(in *api.VolumeProjection, out *VolumeProjection, s conversion.Scope) error { + out.Secret = (*SecretProjection)(unsafe.Pointer(in.Secret)) + out.DownwardAPI = (*DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) + out.ConfigMap = (*ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) + return nil +} + +func Convert_api_VolumeProjection_To_v1_VolumeProjection(in *api.VolumeProjection, out *VolumeProjection, s conversion.Scope) error { + return autoConvert_api_VolumeProjection_To_v1_VolumeProjection(in, out, s) +} + +func autoConvert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.VolumeSource, s conversion.Scope) error { + out.HostPath = (*api.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.EmptyDir = (*api.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) + out.GCEPersistentDisk = (*api.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*api.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.GitRepo = (*api.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) + out.Secret = (*api.SecretVolumeSource)(unsafe.Pointer(in.Secret)) + out.NFS = (*api.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.ISCSI = (*api.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Glusterfs = (*api.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.PersistentVolumeClaim = (*api.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) + out.RBD = (*api.RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.FlexVolume = (*api.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*api.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*api.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.Flocker = (*api.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.DownwardAPI = (*api.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) + out.FC = (*api.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.AzureFile = (*api.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.ConfigMap = (*api.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) + out.VsphereVolume = (*api.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.Projected = (*api.ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) + out.PortworxVolume = (*api.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*api.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + return nil +} + +func Convert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.VolumeSource, s conversion.Scope) error { + return autoConvert_v1_VolumeSource_To_api_VolumeSource(in, out, s) +} + +func autoConvert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error { + out.HostPath = (*HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.EmptyDir = (*EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) + out.GCEPersistentDisk = (*GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.GitRepo = (*GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) + out.Secret = (*SecretVolumeSource)(unsafe.Pointer(in.Secret)) + out.NFS = (*NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.ISCSI = (*ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Glusterfs = (*GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.PersistentVolumeClaim = (*PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) + out.RBD = (*RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.Quobyte = (*QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.FlexVolume = (*FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.Flocker = (*FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.DownwardAPI = (*DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) + out.FC = (*FCVolumeSource)(unsafe.Pointer(in.FC)) + out.AzureFile = (*AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.ConfigMap = (*ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) + out.VsphereVolume = (*VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.AzureDisk = (*AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.Projected = (*ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) + out.PortworxVolume = (*PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + return nil +} + +func Convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error { + return autoConvert_api_VolumeSource_To_v1_VolumeSource(in, out, s) +} + +func autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + out.VolumePath = in.VolumePath + out.FSType = in.FSType + return nil +} + +func Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in, out, s) +} + +func autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + out.VolumePath = in.VolumePath + out.FSType = in.FSType + return nil +} + +func Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + return autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { + return err + } + return nil +} + +func Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { + return autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in, out, s) +} + +func autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { + return err + } + return nil +} + +func Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, s conversion.Scope) error { + return autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..463e946800 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.deepcopy.go @@ -0,0 +1,3500 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AWSElasticBlockStoreVolumeSource, InType: reflect.TypeOf(&AWSElasticBlockStoreVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Affinity, InType: reflect.TypeOf(&Affinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AttachedVolume, InType: reflect.TypeOf(&AttachedVolume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AvoidPods, InType: reflect.TypeOf(&AvoidPods{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AzureDiskVolumeSource, InType: reflect.TypeOf(&AzureDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AzureFileVolumeSource, InType: reflect.TypeOf(&AzureFileVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Binding, InType: reflect.TypeOf(&Binding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Capabilities, InType: reflect.TypeOf(&Capabilities{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_CephFSVolumeSource, InType: reflect.TypeOf(&CephFSVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_CinderVolumeSource, InType: reflect.TypeOf(&CinderVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ComponentCondition, InType: reflect.TypeOf(&ComponentCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ComponentStatus, InType: reflect.TypeOf(&ComponentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ComponentStatusList, InType: reflect.TypeOf(&ComponentStatusList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMap, InType: reflect.TypeOf(&ConfigMap{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapEnvSource, InType: reflect.TypeOf(&ConfigMapEnvSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapKeySelector, InType: reflect.TypeOf(&ConfigMapKeySelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapList, InType: reflect.TypeOf(&ConfigMapList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapProjection, InType: reflect.TypeOf(&ConfigMapProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapVolumeSource, InType: reflect.TypeOf(&ConfigMapVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Container, InType: reflect.TypeOf(&Container{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerImage, InType: reflect.TypeOf(&ContainerImage{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerPort, InType: reflect.TypeOf(&ContainerPort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerState, InType: reflect.TypeOf(&ContainerState{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerStateRunning, InType: reflect.TypeOf(&ContainerStateRunning{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerStateTerminated, InType: reflect.TypeOf(&ContainerStateTerminated{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerStateWaiting, InType: reflect.TypeOf(&ContainerStateWaiting{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerStatus, InType: reflect.TypeOf(&ContainerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DaemonEndpoint, InType: reflect.TypeOf(&DaemonEndpoint{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DeleteOptions, InType: reflect.TypeOf(&DeleteOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DownwardAPIProjection, InType: reflect.TypeOf(&DownwardAPIProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DownwardAPIVolumeFile, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DownwardAPIVolumeSource, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EmptyDirVolumeSource, InType: reflect.TypeOf(&EmptyDirVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EndpointAddress, InType: reflect.TypeOf(&EndpointAddress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EndpointPort, InType: reflect.TypeOf(&EndpointPort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EndpointSubset, InType: reflect.TypeOf(&EndpointSubset{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Endpoints, InType: reflect.TypeOf(&Endpoints{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EndpointsList, InType: reflect.TypeOf(&EndpointsList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EnvFromSource, InType: reflect.TypeOf(&EnvFromSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EnvVar, InType: reflect.TypeOf(&EnvVar{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EnvVarSource, InType: reflect.TypeOf(&EnvVarSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Event, InType: reflect.TypeOf(&Event{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EventList, InType: reflect.TypeOf(&EventList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EventSource, InType: reflect.TypeOf(&EventSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ExecAction, InType: reflect.TypeOf(&ExecAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_FCVolumeSource, InType: reflect.TypeOf(&FCVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_FlexVolumeSource, InType: reflect.TypeOf(&FlexVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_FlockerVolumeSource, InType: reflect.TypeOf(&FlockerVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_GCEPersistentDiskVolumeSource, InType: reflect.TypeOf(&GCEPersistentDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_GitRepoVolumeSource, InType: reflect.TypeOf(&GitRepoVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_GlusterfsVolumeSource, InType: reflect.TypeOf(&GlusterfsVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HTTPGetAction, InType: reflect.TypeOf(&HTTPGetAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HTTPHeader, InType: reflect.TypeOf(&HTTPHeader{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Handler, InType: reflect.TypeOf(&Handler{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HostPathVolumeSource, InType: reflect.TypeOf(&HostPathVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ISCSIVolumeSource, InType: reflect.TypeOf(&ISCSIVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_KeyToPath, InType: reflect.TypeOf(&KeyToPath{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Lifecycle, InType: reflect.TypeOf(&Lifecycle{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LimitRange, InType: reflect.TypeOf(&LimitRange{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LimitRangeItem, InType: reflect.TypeOf(&LimitRangeItem{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LimitRangeList, InType: reflect.TypeOf(&LimitRangeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LimitRangeSpec, InType: reflect.TypeOf(&LimitRangeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_List, InType: reflect.TypeOf(&List{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ListOptions, InType: reflect.TypeOf(&ListOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LoadBalancerIngress, InType: reflect.TypeOf(&LoadBalancerIngress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LoadBalancerStatus, InType: reflect.TypeOf(&LoadBalancerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LocalObjectReference, InType: reflect.TypeOf(&LocalObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NFSVolumeSource, InType: reflect.TypeOf(&NFSVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Namespace, InType: reflect.TypeOf(&Namespace{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NamespaceList, InType: reflect.TypeOf(&NamespaceList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NamespaceSpec, InType: reflect.TypeOf(&NamespaceSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NamespaceStatus, InType: reflect.TypeOf(&NamespaceStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Node, InType: reflect.TypeOf(&Node{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeAddress, InType: reflect.TypeOf(&NodeAddress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeAffinity, InType: reflect.TypeOf(&NodeAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeCondition, InType: reflect.TypeOf(&NodeCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeDaemonEndpoints, InType: reflect.TypeOf(&NodeDaemonEndpoints{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeList, InType: reflect.TypeOf(&NodeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeProxyOptions, InType: reflect.TypeOf(&NodeProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeResources, InType: reflect.TypeOf(&NodeResources{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSelector, InType: reflect.TypeOf(&NodeSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSelectorRequirement, InType: reflect.TypeOf(&NodeSelectorRequirement{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSelectorTerm, InType: reflect.TypeOf(&NodeSelectorTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSpec, InType: reflect.TypeOf(&NodeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeStatus, InType: reflect.TypeOf(&NodeStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSystemInfo, InType: reflect.TypeOf(&NodeSystemInfo{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectFieldSelector, InType: reflect.TypeOf(&ObjectFieldSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectMeta, InType: reflect.TypeOf(&ObjectMeta{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectReference, InType: reflect.TypeOf(&ObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolume, InType: reflect.TypeOf(&PersistentVolume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaim, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaimList, InType: reflect.TypeOf(&PersistentVolumeClaimList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaimSpec, InType: reflect.TypeOf(&PersistentVolumeClaimSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaimStatus, InType: reflect.TypeOf(&PersistentVolumeClaimStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaimVolumeSource, InType: reflect.TypeOf(&PersistentVolumeClaimVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeList, InType: reflect.TypeOf(&PersistentVolumeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeSource, InType: reflect.TypeOf(&PersistentVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeSpec, InType: reflect.TypeOf(&PersistentVolumeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeStatus, InType: reflect.TypeOf(&PersistentVolumeStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PhotonPersistentDiskVolumeSource, InType: reflect.TypeOf(&PhotonPersistentDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Pod, InType: reflect.TypeOf(&Pod{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodAffinity, InType: reflect.TypeOf(&PodAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodAffinityTerm, InType: reflect.TypeOf(&PodAffinityTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodAntiAffinity, InType: reflect.TypeOf(&PodAntiAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodAttachOptions, InType: reflect.TypeOf(&PodAttachOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodCondition, InType: reflect.TypeOf(&PodCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodExecOptions, InType: reflect.TypeOf(&PodExecOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodList, InType: reflect.TypeOf(&PodList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodLogOptions, InType: reflect.TypeOf(&PodLogOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodPortForwardOptions, InType: reflect.TypeOf(&PodPortForwardOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodProxyOptions, InType: reflect.TypeOf(&PodProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodSecurityContext, InType: reflect.TypeOf(&PodSecurityContext{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodSignature, InType: reflect.TypeOf(&PodSignature{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodSpec, InType: reflect.TypeOf(&PodSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodStatus, InType: reflect.TypeOf(&PodStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodStatusResult, InType: reflect.TypeOf(&PodStatusResult{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodTemplate, InType: reflect.TypeOf(&PodTemplate{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodTemplateList, InType: reflect.TypeOf(&PodTemplateList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodTemplateSpec, InType: reflect.TypeOf(&PodTemplateSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PortworxVolumeSource, InType: reflect.TypeOf(&PortworxVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Preconditions, InType: reflect.TypeOf(&Preconditions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PreferAvoidPodsEntry, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PreferredSchedulingTerm, InType: reflect.TypeOf(&PreferredSchedulingTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Probe, InType: reflect.TypeOf(&Probe{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ProjectedVolumeSource, InType: reflect.TypeOf(&ProjectedVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_QuobyteVolumeSource, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_RBDVolumeSource, InType: reflect.TypeOf(&RBDVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_RangeAllocation, InType: reflect.TypeOf(&RangeAllocation{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationController, InType: reflect.TypeOf(&ReplicationController{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationControllerCondition, InType: reflect.TypeOf(&ReplicationControllerCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationControllerList, InType: reflect.TypeOf(&ReplicationControllerList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationControllerSpec, InType: reflect.TypeOf(&ReplicationControllerSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationControllerStatus, InType: reflect.TypeOf(&ReplicationControllerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceFieldSelector, InType: reflect.TypeOf(&ResourceFieldSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceQuota, InType: reflect.TypeOf(&ResourceQuota{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceQuotaList, InType: reflect.TypeOf(&ResourceQuotaList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceQuotaSpec, InType: reflect.TypeOf(&ResourceQuotaSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceQuotaStatus, InType: reflect.TypeOf(&ResourceQuotaStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceRequirements, InType: reflect.TypeOf(&ResourceRequirements{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SELinuxOptions, InType: reflect.TypeOf(&SELinuxOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ScaleIOVolumeSource, InType: reflect.TypeOf(&ScaleIOVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Secret, InType: reflect.TypeOf(&Secret{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretEnvSource, InType: reflect.TypeOf(&SecretEnvSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretKeySelector, InType: reflect.TypeOf(&SecretKeySelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretList, InType: reflect.TypeOf(&SecretList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretProjection, InType: reflect.TypeOf(&SecretProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretVolumeSource, InType: reflect.TypeOf(&SecretVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecurityContext, InType: reflect.TypeOf(&SecurityContext{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SerializedReference, InType: reflect.TypeOf(&SerializedReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Service, InType: reflect.TypeOf(&Service{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceAccount, InType: reflect.TypeOf(&ServiceAccount{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceAccountList, InType: reflect.TypeOf(&ServiceAccountList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceList, InType: reflect.TypeOf(&ServiceList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServicePort, InType: reflect.TypeOf(&ServicePort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceProxyOptions, InType: reflect.TypeOf(&ServiceProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceSpec, InType: reflect.TypeOf(&ServiceSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceStatus, InType: reflect.TypeOf(&ServiceStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Sysctl, InType: reflect.TypeOf(&Sysctl{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TCPSocketAction, InType: reflect.TypeOf(&TCPSocketAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Taint, InType: reflect.TypeOf(&Taint{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Toleration, InType: reflect.TypeOf(&Toleration{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Volume, InType: reflect.TypeOf(&Volume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VolumeMount, InType: reflect.TypeOf(&VolumeMount{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VolumeProjection, InType: reflect.TypeOf(&VolumeProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VolumeSource, InType: reflect.TypeOf(&VolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VsphereVirtualDiskVolumeSource, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_WeightedPodAffinityTerm, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})}, + ) +} + +func DeepCopy_v1_AWSElasticBlockStoreVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AWSElasticBlockStoreVolumeSource) + out := out.(*AWSElasticBlockStoreVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_Affinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Affinity) + out := out.(*Affinity) + *out = *in + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(NodeAffinity) + if err := DeepCopy_v1_NodeAffinity(*in, *out, c); err != nil { + return err + } + } + if in.PodAffinity != nil { + in, out := &in.PodAffinity, &out.PodAffinity + *out = new(PodAffinity) + if err := DeepCopy_v1_PodAffinity(*in, *out, c); err != nil { + return err + } + } + if in.PodAntiAffinity != nil { + in, out := &in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(PodAntiAffinity) + if err := DeepCopy_v1_PodAntiAffinity(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_AttachedVolume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AttachedVolume) + out := out.(*AttachedVolume) + *out = *in + return nil + } +} + +func DeepCopy_v1_AvoidPods(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AvoidPods) + out := out.(*AvoidPods) + *out = *in + if in.PreferAvoidPods != nil { + in, out := &in.PreferAvoidPods, &out.PreferAvoidPods + *out = make([]PreferAvoidPodsEntry, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PreferAvoidPodsEntry(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_AzureDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AzureDiskVolumeSource) + out := out.(*AzureDiskVolumeSource) + *out = *in + if in.CachingMode != nil { + in, out := &in.CachingMode, &out.CachingMode + *out = new(AzureDataDiskCachingMode) + **out = **in + } + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_AzureFileVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AzureFileVolumeSource) + out := out.(*AzureFileVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_Binding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Binding) + out := out.(*Binding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + return nil + } +} + +func DeepCopy_v1_Capabilities(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Capabilities) + out := out.(*Capabilities) + *out = *in + if in.Add != nil { + in, out := &in.Add, &out.Add + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + if in.Drop != nil { + in, out := &in.Drop, &out.Drop + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_CephFSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CephFSVolumeSource) + out := out.(*CephFSVolumeSource) + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_CinderVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CinderVolumeSource) + out := out.(*CinderVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_ComponentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentCondition) + out := out.(*ComponentCondition) + *out = *in + return nil + } +} + +func DeepCopy_v1_ComponentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentStatus) + out := out.(*ComponentStatus) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ComponentCondition, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_ComponentStatusList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentStatusList) + out := out.(*ComponentStatusList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ComponentStatus, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ComponentStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_ConfigMap(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMap) + out := out.(*ConfigMap) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1_ConfigMapEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapEnvSource) + out := out.(*ConfigMapEnvSource) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_ConfigMapKeySelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapKeySelector) + out := out.(*ConfigMapKeySelector) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_ConfigMapList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapList) + out := out.(*ConfigMapList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigMap, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ConfigMap(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_ConfigMapProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapProjection) + out := out.(*ConfigMapProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_ConfigMapVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapVolumeSource) + out := out.(*ConfigMapVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_Container(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Container) + out := out.(*Container) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) + for i := range *in { + if err := DeepCopy_v1_EnvFromSource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + if err := DeepCopy_v1_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if err := DeepCopy_v1_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { + return err + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + if err := DeepCopy_v1_Probe(*in, *out, c); err != nil { + return err + } + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + if err := DeepCopy_v1_Probe(*in, *out, c); err != nil { + return err + } + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(Lifecycle) + if err := DeepCopy_v1_Lifecycle(*in, *out, c); err != nil { + return err + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContext) + if err := DeepCopy_v1_SecurityContext(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_ContainerImage(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerImage) + out := out.(*ContainerImage) + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_ContainerPort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerPort) + out := out.(*ContainerPort) + *out = *in + return nil + } +} + +func DeepCopy_v1_ContainerState(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerState) + out := out.(*ContainerState) + *out = *in + if in.Waiting != nil { + in, out := &in.Waiting, &out.Waiting + *out = new(ContainerStateWaiting) + **out = **in + } + if in.Running != nil { + in, out := &in.Running, &out.Running + *out = new(ContainerStateRunning) + if err := DeepCopy_v1_ContainerStateRunning(*in, *out, c); err != nil { + return err + } + } + if in.Terminated != nil { + in, out := &in.Terminated, &out.Terminated + *out = new(ContainerStateTerminated) + if err := DeepCopy_v1_ContainerStateTerminated(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_ContainerStateRunning(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateRunning) + out := out.(*ContainerStateRunning) + *out = *in + out.StartedAt = in.StartedAt.DeepCopy() + return nil + } +} + +func DeepCopy_v1_ContainerStateTerminated(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateTerminated) + out := out.(*ContainerStateTerminated) + *out = *in + out.StartedAt = in.StartedAt.DeepCopy() + out.FinishedAt = in.FinishedAt.DeepCopy() + return nil + } +} + +func DeepCopy_v1_ContainerStateWaiting(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateWaiting) + out := out.(*ContainerStateWaiting) + *out = *in + return nil + } +} + +func DeepCopy_v1_ContainerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStatus) + out := out.(*ContainerStatus) + *out = *in + if err := DeepCopy_v1_ContainerState(&in.State, &out.State, c); err != nil { + return err + } + if err := DeepCopy_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_DaemonEndpoint(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonEndpoint) + out := out.(*DaemonEndpoint) + *out = *in + return nil + } +} + +func DeepCopy_v1_DeleteOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeleteOptions) + out := out.(*DeleteOptions) + *out = *in + if in.GracePeriodSeconds != nil { + in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Preconditions != nil { + in, out := &in.Preconditions, &out.Preconditions + *out = new(Preconditions) + if err := DeepCopy_v1_Preconditions(*in, *out, c); err != nil { + return err + } + } + if in.OrphanDependents != nil { + in, out := &in.OrphanDependents, &out.OrphanDependents + *out = new(bool) + **out = **in + } + if in.PropagationPolicy != nil { + in, out := &in.PropagationPolicy, &out.PropagationPolicy + *out = new(DeletionPropagation) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_DownwardAPIProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIProjection) + out := out.(*DownwardAPIProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + if err := DeepCopy_v1_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_DownwardAPIVolumeFile(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIVolumeFile) + out := out.(*DownwardAPIVolumeFile) + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := DeepCopy_v1_ResourceFieldSelector(*in, *out, c); err != nil { + return err + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_DownwardAPIVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIVolumeSource) + out := out.(*DownwardAPIVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + if err := DeepCopy_v1_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_EmptyDirVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EmptyDirVolumeSource) + out := out.(*EmptyDirVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_EndpointAddress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointAddress) + out := out.(*EndpointAddress) + *out = *in + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + *out = new(string) + **out = **in + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(ObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_EndpointPort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointPort) + out := out.(*EndpointPort) + *out = *in + return nil + } +} + +func DeepCopy_v1_EndpointSubset(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointSubset) + out := out.(*EndpointSubset) + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + if err := DeepCopy_v1_EndpointAddress(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.NotReadyAddresses != nil { + in, out := &in.NotReadyAddresses, &out.NotReadyAddresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + if err := DeepCopy_v1_EndpointAddress(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_Endpoints(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Endpoints) + out := out.(*Endpoints) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Subsets != nil { + in, out := &in.Subsets, &out.Subsets + *out = make([]EndpointSubset, len(*in)) + for i := range *in { + if err := DeepCopy_v1_EndpointSubset(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_EndpointsList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointsList) + out := out.(*EndpointsList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Endpoints, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Endpoints(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_EnvFromSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvFromSource) + out := out.(*EnvFromSource) + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(ConfigMapEnvSource) + if err := DeepCopy_v1_ConfigMapEnvSource(*in, *out, c); err != nil { + return err + } + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretEnvSource) + if err := DeepCopy_v1_SecretEnvSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_EnvVar(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvVar) + out := out.(*EnvVar) + *out = *in + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(EnvVarSource) + if err := DeepCopy_v1_EnvVarSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_EnvVarSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvVarSource) + out := out.(*EnvVarSource) + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := DeepCopy_v1_ResourceFieldSelector(*in, *out, c); err != nil { + return err + } + } + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(ConfigMapKeySelector) + if err := DeepCopy_v1_ConfigMapKeySelector(*in, *out, c); err != nil { + return err + } + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(SecretKeySelector) + if err := DeepCopy_v1_SecretKeySelector(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_Event(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Event) + out := out.(*Event) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + out.FirstTimestamp = in.FirstTimestamp.DeepCopy() + out.LastTimestamp = in.LastTimestamp.DeepCopy() + return nil + } +} + +func DeepCopy_v1_EventList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EventList) + out := out.(*EventList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Event(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_EventSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EventSource) + out := out.(*EventSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_ExecAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExecAction) + out := out.(*ExecAction) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_FCVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FCVolumeSource) + out := out.(*FCVolumeSource) + *out = *in + if in.TargetWWNs != nil { + in, out := &in.TargetWWNs, &out.TargetWWNs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_FlexVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FlexVolumeSource) + out := out.(*FlexVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1_FlockerVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FlockerVolumeSource) + out := out.(*FlockerVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_GCEPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GCEPersistentDiskVolumeSource) + out := out.(*GCEPersistentDiskVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_GitRepoVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GitRepoVolumeSource) + out := out.(*GitRepoVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_GlusterfsVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GlusterfsVolumeSource) + out := out.(*GlusterfsVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_HTTPGetAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPGetAction) + out := out.(*HTTPGetAction) + *out = *in + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = make([]HTTPHeader, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_HTTPHeader(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPHeader) + out := out.(*HTTPHeader) + *out = *in + return nil + } +} + +func DeepCopy_v1_Handler(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Handler) + out := out.(*Handler) + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecAction) + if err := DeepCopy_v1_ExecAction(*in, *out, c); err != nil { + return err + } + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + *out = new(HTTPGetAction) + if err := DeepCopy_v1_HTTPGetAction(*in, *out, c); err != nil { + return err + } + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + *out = new(TCPSocketAction) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_HostPathVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HostPathVolumeSource) + out := out.(*HostPathVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_ISCSIVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ISCSIVolumeSource) + out := out.(*ISCSIVolumeSource) + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_KeyToPath(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*KeyToPath) + out := out.(*KeyToPath) + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_Lifecycle(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Lifecycle) + out := out.(*Lifecycle) + *out = *in + if in.PostStart != nil { + in, out := &in.PostStart, &out.PostStart + *out = new(Handler) + if err := DeepCopy_v1_Handler(*in, *out, c); err != nil { + return err + } + } + if in.PreStop != nil { + in, out := &in.PreStop, &out.PreStop + *out = new(Handler) + if err := DeepCopy_v1_Handler(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_LimitRange(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRange) + out := out.(*LimitRange) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_LimitRangeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_LimitRangeItem(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeItem) + out := out.(*LimitRangeItem) + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.DefaultRequest != nil { + in, out := &in.DefaultRequest, &out.DefaultRequest + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.MaxLimitRequestRatio != nil { + in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_v1_LimitRangeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeList) + out := out.(*LimitRangeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LimitRange, len(*in)) + for i := range *in { + if err := DeepCopy_v1_LimitRange(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_LimitRangeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeSpec) + out := out.(*LimitRangeSpec) + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make([]LimitRangeItem, len(*in)) + for i := range *in { + if err := DeepCopy_v1_LimitRangeItem(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_List(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*List) + out := out.(*List) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*runtime.RawExtension) + } + } + } + return nil + } +} + +func DeepCopy_v1_ListOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ListOptions) + out := out.(*ListOptions) + *out = *in + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_LoadBalancerIngress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LoadBalancerIngress) + out := out.(*LoadBalancerIngress) + *out = *in + return nil + } +} + +func DeepCopy_v1_LoadBalancerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LoadBalancerStatus) + out := out.(*LoadBalancerStatus) + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]LoadBalancerIngress, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_LocalObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LocalObjectReference) + out := out.(*LocalObjectReference) + *out = *in + return nil + } +} + +func DeepCopy_v1_NFSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NFSVolumeSource) + out := out.(*NFSVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_Namespace(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Namespace) + out := out.(*Namespace) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_NamespaceSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_NamespaceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceList) + out := out.(*NamespaceList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Namespace, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Namespace(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_NamespaceSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceSpec) + out := out.(*NamespaceSpec) + *out = *in + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]FinalizerName, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_NamespaceStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceStatus) + out := out.(*NamespaceStatus) + *out = *in + return nil + } +} + +func DeepCopy_v1_Node(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Node) + out := out.(*Node) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_NodeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_NodeStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_NodeAddress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeAddress) + out := out.(*NodeAddress) + *out = *in + return nil + } +} + +func DeepCopy_v1_NodeAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeAffinity) + out := out.(*NodeAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = new(NodeSelector) + if err := DeepCopy_v1_NodeSelector(*in, *out, c); err != nil { + return err + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]PreferredSchedulingTerm, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PreferredSchedulingTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_NodeCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeCondition) + out := out.(*NodeCondition) + *out = *in + out.LastHeartbeatTime = in.LastHeartbeatTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1_NodeDaemonEndpoints(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeDaemonEndpoints) + out := out.(*NodeDaemonEndpoints) + *out = *in + return nil + } +} + +func DeepCopy_v1_NodeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeList) + out := out.(*NodeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Node, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Node(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_NodeProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeProxyOptions) + out := out.(*NodeProxyOptions) + *out = *in + return nil + } +} + +func DeepCopy_v1_NodeResources(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeResources) + out := out.(*NodeResources) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_v1_NodeSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelector) + out := out.(*NodeSelector) + *out = *in + if in.NodeSelectorTerms != nil { + in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms + *out = make([]NodeSelectorTerm, len(*in)) + for i := range *in { + if err := DeepCopy_v1_NodeSelectorTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_NodeSelectorRequirement(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelectorRequirement) + out := out.(*NodeSelectorRequirement) + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_NodeSelectorTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelectorTerm) + out := out.(*NodeSelectorTerm) + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + if err := DeepCopy_v1_NodeSelectorRequirement(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_NodeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSpec) + out := out.(*NodeSpec) + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]Taint, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Taint(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_NodeStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeStatus) + out := out.(*NodeStatus) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NodeCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1_NodeCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]NodeAddress, len(*in)) + copy(*out, *in) + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ContainerImage, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ContainerImage(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.VolumesInUse != nil { + in, out := &in.VolumesInUse, &out.VolumesInUse + *out = make([]UniqueVolumeName, len(*in)) + copy(*out, *in) + } + if in.VolumesAttached != nil { + in, out := &in.VolumesAttached, &out.VolumesAttached + *out = make([]AttachedVolume, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_NodeSystemInfo(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSystemInfo) + out := out.(*NodeSystemInfo) + *out = *in + return nil + } +} + +func DeepCopy_v1_ObjectFieldSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectFieldSelector) + out := out.(*ObjectFieldSelector) + *out = *in + return nil + } +} + +func DeepCopy_v1_ObjectMeta(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMeta) + out := out.(*ObjectMeta) + *out = *in + out.CreationTimestamp = in.CreationTimestamp.DeepCopy() + if in.DeletionTimestamp != nil { + in, out := &in.DeletionTimestamp, &out.DeletionTimestamp + *out = new(meta_v1.Time) + **out = (*in).DeepCopy() + } + if in.DeletionGracePeriodSeconds != nil { + in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]meta_v1.OwnerReference, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*meta_v1.OwnerReference) + } + } + } + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_ObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectReference) + out := out.(*ObjectReference) + *out = *in + return nil + } +} + +func DeepCopy_v1_PersistentVolume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolume) + out := out.(*PersistentVolume) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_PersistentVolumeClaim(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaim) + out := out.(*PersistentVolumeClaim) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_PersistentVolumeClaimList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimList) + out := out.(*PersistentVolumeClaimList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PersistentVolumeClaim(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_PersistentVolumeClaimSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimSpec) + out := out.(*PersistentVolumeClaimSpec) + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*meta_v1.LabelSelector) + } + } + if err := DeepCopy_v1_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { + return err + } + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_PersistentVolumeClaimStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimStatus) + out := out.(*PersistentVolumeClaimStatus) + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_v1_PersistentVolumeClaimVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimVolumeSource) + out := out.(*PersistentVolumeClaimVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_PersistentVolumeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeList) + out := out.(*PersistentVolumeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolume, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PersistentVolume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_PersistentVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeSource) + out := out.(*PersistentVolumeSource) + *out = *in + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + **out = **in + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + **out = **in + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + if err := DeepCopy_v1_RBDVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + if err := DeepCopy_v1_ISCSIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + **out = **in + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + if err := DeepCopy_v1_CephFSVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + if err := DeepCopy_v1_FCVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + if err := DeepCopy_v1_FlexVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + **out = **in + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + if err := DeepCopy_v1_AzureDiskVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + if err := DeepCopy_v1_ScaleIOVolumeSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_PersistentVolumeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeSpec) + out := out.(*PersistentVolumeSpec) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if err := DeepCopy_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, c); err != nil { + return err + } + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.ClaimRef != nil { + in, out := &in.ClaimRef, &out.ClaimRef + *out = new(ObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_PersistentVolumeStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeStatus) + out := out.(*PersistentVolumeStatus) + *out = *in + return nil + } +} + +func DeepCopy_v1_PhotonPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PhotonPersistentDiskVolumeSource) + out := out.(*PhotonPersistentDiskVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_Pod(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Pod) + out := out.(*Pod) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_PodSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_PodStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_PodAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAffinity) + out := out.(*PodAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_v1_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_PodAffinityTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAffinityTerm) + out := out.(*PodAffinityTerm) + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*meta_v1.LabelSelector) + } + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_PodAntiAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAntiAffinity) + out := out.(*PodAntiAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_v1_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_PodAttachOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAttachOptions) + out := out.(*PodAttachOptions) + *out = *in + return nil + } +} + +func DeepCopy_v1_PodCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodCondition) + out := out.(*PodCondition) + *out = *in + out.LastProbeTime = in.LastProbeTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1_PodExecOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodExecOptions) + out := out.(*PodExecOptions) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_PodList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodList) + out := out.(*PodList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pod, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Pod(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_PodLogOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodLogOptions) + out := out.(*PodLogOptions) + *out = *in + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = **in + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + *out = new(meta_v1.Time) + **out = (*in).DeepCopy() + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + *out = new(int64) + **out = **in + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_PodPortForwardOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPortForwardOptions) + out := out.(*PodPortForwardOptions) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]int32, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_PodProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodProxyOptions) + out := out.(*PodProxyOptions) + *out = *in + return nil + } +} + +func DeepCopy_v1_PodSecurityContext(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityContext) + out := out.(*PodSecurityContext) + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(int64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.SupplementalGroups != nil { + in, out := &in.SupplementalGroups, &out.SupplementalGroups + *out = make([]int64, len(*in)) + copy(*out, *in) + } + if in.FSGroup != nil { + in, out := &in.FSGroup, &out.FSGroup + *out = new(int64) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_PodSignature(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSignature) + out := out.(*PodSignature) + *out = *in + if in.PodController != nil { + in, out := &in.PodController, &out.PodController + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*meta_v1.OwnerReference) + } + } + return nil + } +} + +func DeepCopy_v1_PodSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSpec) + out := out.(*PodSpec) + *out = *in + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Volume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]Container, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Container(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]Container, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Container(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(PodSecurityContext) + if err := DeepCopy_v1_PodSecurityContext(*in, *out, c); err != nil { + return err + } + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(Affinity) + if err := DeepCopy_v1_Affinity(*in, *out, c); err != nil { + return err + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]Toleration, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Toleration(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_PodStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodStatus) + out := out.(*PodStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PodCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PodCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(meta_v1.Time) + **out = (*in).DeepCopy() + } + if in.InitContainerStatuses != nil { + in, out := &in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ContainerStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.ContainerStatuses != nil { + in, out := &in.ContainerStatuses, &out.ContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ContainerStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_PodStatusResult(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodStatusResult) + out := out.(*PodStatusResult) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_PodStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_PodTemplate(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplate) + out := out.(*PodTemplate) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_PodTemplateList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplateList) + out := out.(*PodTemplateList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodTemplate, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PodTemplate(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_PodTemplateSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplateSpec) + out := out.(*PodTemplateSpec) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_PodSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_PortworxVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PortworxVolumeSource) + out := out.(*PortworxVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_Preconditions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Preconditions) + out := out.(*Preconditions) + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(types.UID) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_PreferAvoidPodsEntry(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PreferAvoidPodsEntry) + out := out.(*PreferAvoidPodsEntry) + *out = *in + if err := DeepCopy_v1_PodSignature(&in.PodSignature, &out.PodSignature, c); err != nil { + return err + } + out.EvictionTime = in.EvictionTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1_PreferredSchedulingTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PreferredSchedulingTerm) + out := out.(*PreferredSchedulingTerm) + *out = *in + if err := DeepCopy_v1_NodeSelectorTerm(&in.Preference, &out.Preference, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_Probe(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Probe) + out := out.(*Probe) + *out = *in + if err := DeepCopy_v1_Handler(&in.Handler, &out.Handler, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_ProjectedVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ProjectedVolumeSource) + out := out.(*ProjectedVolumeSource) + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]VolumeProjection, len(*in)) + for i := range *in { + if err := DeepCopy_v1_VolumeProjection(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_QuobyteVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*QuobyteVolumeSource) + out := out.(*QuobyteVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_RBDVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RBDVolumeSource) + out := out.(*RBDVolumeSource) + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_RangeAllocation(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RangeAllocation) + out := out.(*RangeAllocation) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_ReplicationController(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationController) + out := out.(*ReplicationController) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_ReplicationControllerStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_ReplicationControllerCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerCondition) + out := out.(*ReplicationControllerCondition) + *out = *in + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1_ReplicationControllerList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerList) + out := out.(*ReplicationControllerList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationController, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ReplicationController(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_ReplicationControllerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerSpec) + out := out.(*ReplicationControllerSpec) + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(PodTemplateSpec) + if err := DeepCopy_v1_PodTemplateSpec(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_ReplicationControllerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerStatus) + out := out.(*ReplicationControllerStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicationControllerCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ReplicationControllerCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_ResourceFieldSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceFieldSelector) + out := out.(*ResourceFieldSelector) + *out = *in + out.Divisor = in.Divisor.DeepCopy() + return nil + } +} + +func DeepCopy_v1_ResourceQuota(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuota) + out := out.(*ResourceQuota) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_ResourceQuotaStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_ResourceQuotaList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaList) + out := out.(*ResourceQuotaList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceQuota, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ResourceQuota(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_ResourceQuotaSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaSpec) + out := out.(*ResourceQuotaSpec) + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ResourceQuotaScope, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_ResourceQuotaStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaStatus) + out := out.(*ResourceQuotaStatus) + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Used != nil { + in, out := &in.Used, &out.Used + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_v1_ResourceRequirements(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceRequirements) + out := out.(*ResourceRequirements) + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_v1_SELinuxOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SELinuxOptions) + out := out.(*SELinuxOptions) + *out = *in + return nil + } +} + +func DeepCopy_v1_ScaleIOVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleIOVolumeSource) + out := out.(*ScaleIOVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_Secret(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Secret) + out := out.(*Secret) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string][]byte) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*[]byte) + } + } + } + if in.StringData != nil { + in, out := &in.StringData, &out.StringData + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1_SecretEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretEnvSource) + out := out.(*SecretEnvSource) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_SecretKeySelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretKeySelector) + out := out.(*SecretKeySelector) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_SecretList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretList) + out := out.(*SecretList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Secret, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Secret(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_SecretProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretProjection) + out := out.(*SecretProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_SecretVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretVolumeSource) + out := out.(*SecretVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_SecurityContext(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecurityContext) + out := out.(*SecurityContext) + *out = *in + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = new(Capabilities) + if err := DeepCopy_v1_Capabilities(*in, *out, c); err != nil { + return err + } + } + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + *out = new(bool) + **out = **in + } + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(int64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.ReadOnlyRootFilesystem != nil { + in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_SerializedReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SerializedReference) + out := out.(*SerializedReference) + *out = *in + return nil + } +} + +func DeepCopy_v1_Service(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Service) + out := out.(*Service) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_ServiceSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_ServiceStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_ServiceAccount(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceAccount) + out := out.(*ServiceAccount) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_ServiceAccountList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceAccountList) + out := out.(*ServiceAccountList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccount, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ServiceAccount(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_ServiceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceList) + out := out.(*ServiceList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Service(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_ServicePort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServicePort) + out := out.(*ServicePort) + *out = *in + return nil + } +} + +func DeepCopy_v1_ServiceProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceProxyOptions) + out := out.(*ServiceProxyOptions) + *out = *in + return nil + } +} + +func DeepCopy_v1_ServiceSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceSpec) + out := out.(*ServiceSpec) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ServicePort, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExternalIPs != nil { + in, out := &in.ExternalIPs, &out.ExternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DeprecatedPublicIPs != nil { + in, out := &in.DeprecatedPublicIPs, &out.DeprecatedPublicIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_ServiceStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceStatus) + out := out.(*ServiceStatus) + *out = *in + if err := DeepCopy_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_Sysctl(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Sysctl) + out := out.(*Sysctl) + *out = *in + return nil + } +} + +func DeepCopy_v1_TCPSocketAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TCPSocketAction) + out := out.(*TCPSocketAction) + *out = *in + return nil + } +} + +func DeepCopy_v1_Taint(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Taint) + out := out.(*Taint) + *out = *in + out.TimeAdded = in.TimeAdded.DeepCopy() + return nil + } +} + +func DeepCopy_v1_Toleration(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Toleration) + out := out.(*Toleration) + *out = *in + if in.TolerationSeconds != nil { + in, out := &in.TolerationSeconds, &out.TolerationSeconds + *out = new(int64) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_Volume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Volume) + out := out.(*Volume) + *out = *in + if err := DeepCopy_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_VolumeMount(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeMount) + out := out.(*VolumeMount) + *out = *in + return nil + } +} + +func DeepCopy_v1_VolumeProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeProjection) + out := out.(*VolumeProjection) + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretProjection) + if err := DeepCopy_v1_SecretProjection(*in, *out, c); err != nil { + return err + } + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIProjection) + if err := DeepCopy_v1_DownwardAPIProjection(*in, *out, c); err != nil { + return err + } + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapProjection) + if err := DeepCopy_v1_ConfigMapProjection(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_VolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeSource) + out := out.(*VolumeSource) + *out = *in + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + **out = **in + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(EmptyDirVolumeSource) + **out = **in + } + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.GitRepo != nil { + in, out := &in.GitRepo, &out.GitRepo + *out = new(GitRepoVolumeSource) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretVolumeSource) + if err := DeepCopy_v1_SecretVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + if err := DeepCopy_v1_ISCSIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + **out = **in + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(PersistentVolumeClaimVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + if err := DeepCopy_v1_RBDVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + if err := DeepCopy_v1_FlexVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + **out = **in + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + if err := DeepCopy_v1_CephFSVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIVolumeSource) + if err := DeepCopy_v1_DownwardAPIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + if err := DeepCopy_v1_FCVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + **out = **in + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapVolumeSource) + if err := DeepCopy_v1_ConfigMapVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + if err := DeepCopy_v1_AzureDiskVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(ProjectedVolumeSource) + if err := DeepCopy_v1_ProjectedVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + if err := DeepCopy_v1_ScaleIOVolumeSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_VsphereVirtualDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VsphereVirtualDiskVolumeSource) + out := out.(*VsphereVirtualDiskVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_WeightedPodAffinityTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*WeightedPodAffinityTerm) + out := out.(*WeightedPodAffinityTerm) + *out = *in + if err := DeepCopy_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil { + return err + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.defaults.go new file mode 100644 index 0000000000..121b39185c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.defaults.go @@ -0,0 +1,631 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&ConfigMap{}, func(obj interface{}) { SetObjectDefaults_ConfigMap(obj.(*ConfigMap)) }) + scheme.AddTypeDefaultingFunc(&ConfigMapList{}, func(obj interface{}) { SetObjectDefaults_ConfigMapList(obj.(*ConfigMapList)) }) + scheme.AddTypeDefaultingFunc(&Endpoints{}, func(obj interface{}) { SetObjectDefaults_Endpoints(obj.(*Endpoints)) }) + scheme.AddTypeDefaultingFunc(&EndpointsList{}, func(obj interface{}) { SetObjectDefaults_EndpointsList(obj.(*EndpointsList)) }) + scheme.AddTypeDefaultingFunc(&LimitRange{}, func(obj interface{}) { SetObjectDefaults_LimitRange(obj.(*LimitRange)) }) + scheme.AddTypeDefaultingFunc(&LimitRangeList{}, func(obj interface{}) { SetObjectDefaults_LimitRangeList(obj.(*LimitRangeList)) }) + scheme.AddTypeDefaultingFunc(&Namespace{}, func(obj interface{}) { SetObjectDefaults_Namespace(obj.(*Namespace)) }) + scheme.AddTypeDefaultingFunc(&NamespaceList{}, func(obj interface{}) { SetObjectDefaults_NamespaceList(obj.(*NamespaceList)) }) + scheme.AddTypeDefaultingFunc(&Node{}, func(obj interface{}) { SetObjectDefaults_Node(obj.(*Node)) }) + scheme.AddTypeDefaultingFunc(&NodeList{}, func(obj interface{}) { SetObjectDefaults_NodeList(obj.(*NodeList)) }) + scheme.AddTypeDefaultingFunc(&PersistentVolume{}, func(obj interface{}) { SetObjectDefaults_PersistentVolume(obj.(*PersistentVolume)) }) + scheme.AddTypeDefaultingFunc(&PersistentVolumeClaim{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeClaim(obj.(*PersistentVolumeClaim)) }) + scheme.AddTypeDefaultingFunc(&PersistentVolumeClaimList{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeClaimList(obj.(*PersistentVolumeClaimList)) }) + scheme.AddTypeDefaultingFunc(&PersistentVolumeList{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeList(obj.(*PersistentVolumeList)) }) + scheme.AddTypeDefaultingFunc(&Pod{}, func(obj interface{}) { SetObjectDefaults_Pod(obj.(*Pod)) }) + scheme.AddTypeDefaultingFunc(&PodAttachOptions{}, func(obj interface{}) { SetObjectDefaults_PodAttachOptions(obj.(*PodAttachOptions)) }) + scheme.AddTypeDefaultingFunc(&PodExecOptions{}, func(obj interface{}) { SetObjectDefaults_PodExecOptions(obj.(*PodExecOptions)) }) + scheme.AddTypeDefaultingFunc(&PodList{}, func(obj interface{}) { SetObjectDefaults_PodList(obj.(*PodList)) }) + scheme.AddTypeDefaultingFunc(&PodTemplate{}, func(obj interface{}) { SetObjectDefaults_PodTemplate(obj.(*PodTemplate)) }) + scheme.AddTypeDefaultingFunc(&PodTemplateList{}, func(obj interface{}) { SetObjectDefaults_PodTemplateList(obj.(*PodTemplateList)) }) + scheme.AddTypeDefaultingFunc(&ReplicationController{}, func(obj interface{}) { SetObjectDefaults_ReplicationController(obj.(*ReplicationController)) }) + scheme.AddTypeDefaultingFunc(&ReplicationControllerList{}, func(obj interface{}) { SetObjectDefaults_ReplicationControllerList(obj.(*ReplicationControllerList)) }) + scheme.AddTypeDefaultingFunc(&ResourceQuota{}, func(obj interface{}) { SetObjectDefaults_ResourceQuota(obj.(*ResourceQuota)) }) + scheme.AddTypeDefaultingFunc(&ResourceQuotaList{}, func(obj interface{}) { SetObjectDefaults_ResourceQuotaList(obj.(*ResourceQuotaList)) }) + scheme.AddTypeDefaultingFunc(&Secret{}, func(obj interface{}) { SetObjectDefaults_Secret(obj.(*Secret)) }) + scheme.AddTypeDefaultingFunc(&SecretList{}, func(obj interface{}) { SetObjectDefaults_SecretList(obj.(*SecretList)) }) + scheme.AddTypeDefaultingFunc(&Service{}, func(obj interface{}) { SetObjectDefaults_Service(obj.(*Service)) }) + scheme.AddTypeDefaultingFunc(&ServiceList{}, func(obj interface{}) { SetObjectDefaults_ServiceList(obj.(*ServiceList)) }) + return nil +} + +func SetObjectDefaults_ConfigMap(in *ConfigMap) { + SetDefaults_ConfigMap(in) +} + +func SetObjectDefaults_ConfigMapList(in *ConfigMapList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ConfigMap(a) + } +} + +func SetObjectDefaults_Endpoints(in *Endpoints) { + SetDefaults_Endpoints(in) +} + +func SetObjectDefaults_EndpointsList(in *EndpointsList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Endpoints(a) + } +} + +func SetObjectDefaults_LimitRange(in *LimitRange) { + for i := range in.Spec.Limits { + a := &in.Spec.Limits[i] + SetDefaults_LimitRangeItem(a) + SetDefaults_ResourceList(&a.Max) + SetDefaults_ResourceList(&a.Min) + SetDefaults_ResourceList(&a.Default) + SetDefaults_ResourceList(&a.DefaultRequest) + SetDefaults_ResourceList(&a.MaxLimitRequestRatio) + } +} + +func SetObjectDefaults_LimitRangeList(in *LimitRangeList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_LimitRange(a) + } +} + +func SetObjectDefaults_Namespace(in *Namespace) { + SetDefaults_NamespaceStatus(&in.Status) +} + +func SetObjectDefaults_NamespaceList(in *NamespaceList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Namespace(a) + } +} + +func SetObjectDefaults_Node(in *Node) { + SetDefaults_Node(in) + SetDefaults_NodeStatus(&in.Status) + SetDefaults_ResourceList(&in.Status.Capacity) + SetDefaults_ResourceList(&in.Status.Allocatable) +} + +func SetObjectDefaults_NodeList(in *NodeList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Node(a) + } +} + +func SetObjectDefaults_PersistentVolume(in *PersistentVolume) { + SetDefaults_PersistentVolume(in) + SetDefaults_ResourceList(&in.Spec.Capacity) + if in.Spec.PersistentVolumeSource.RBD != nil { + SetDefaults_RBDVolumeSource(in.Spec.PersistentVolumeSource.RBD) + } + if in.Spec.PersistentVolumeSource.ISCSI != nil { + SetDefaults_ISCSIVolumeSource(in.Spec.PersistentVolumeSource.ISCSI) + } + if in.Spec.PersistentVolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(in.Spec.PersistentVolumeSource.AzureDisk) + } + if in.Spec.PersistentVolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(in.Spec.PersistentVolumeSource.ScaleIO) + } +} + +func SetObjectDefaults_PersistentVolumeClaim(in *PersistentVolumeClaim) { + SetDefaults_PersistentVolumeClaim(in) + SetDefaults_ResourceList(&in.Spec.Resources.Limits) + SetDefaults_ResourceList(&in.Spec.Resources.Requests) + SetDefaults_ResourceList(&in.Status.Capacity) +} + +func SetObjectDefaults_PersistentVolumeClaimList(in *PersistentVolumeClaimList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_PersistentVolumeClaim(a) + } +} + +func SetObjectDefaults_PersistentVolumeList(in *PersistentVolumeList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_PersistentVolume(a) + } +} + +func SetObjectDefaults_Pod(in *Pod) { + SetDefaults_Pod(in) + SetDefaults_PodSpec(&in.Spec) + for i := range in.Spec.Volumes { + a := &in.Spec.Volumes[i] + SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.InitContainers { + a := &in.Spec.InitContainers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Containers { + a := &in.Spec.Containers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_PodAttachOptions(in *PodAttachOptions) { + SetDefaults_PodAttachOptions(in) +} + +func SetObjectDefaults_PodExecOptions(in *PodExecOptions) { + SetDefaults_PodExecOptions(in) +} + +func SetObjectDefaults_PodList(in *PodList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Pod(a) + } +} + +func SetObjectDefaults_PodTemplate(in *PodTemplate) { + SetDefaults_PodSpec(&in.Template.Spec) + for i := range in.Template.Spec.Volumes { + a := &in.Template.Spec.Volumes[i] + SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Template.Spec.InitContainers { + a := &in.Template.Spec.InitContainers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Template.Spec.Containers { + a := &in.Template.Spec.Containers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_PodTemplateList(in *PodTemplateList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_PodTemplate(a) + } +} + +func SetObjectDefaults_ReplicationController(in *ReplicationController) { + SetDefaults_ReplicationController(in) + if in.Spec.Template != nil { + SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + } +} + +func SetObjectDefaults_ReplicationControllerList(in *ReplicationControllerList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ReplicationController(a) + } +} + +func SetObjectDefaults_ResourceQuota(in *ResourceQuota) { + SetDefaults_ResourceList(&in.Spec.Hard) + SetDefaults_ResourceList(&in.Status.Hard) + SetDefaults_ResourceList(&in.Status.Used) +} + +func SetObjectDefaults_ResourceQuotaList(in *ResourceQuotaList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ResourceQuota(a) + } +} + +func SetObjectDefaults_Secret(in *Secret) { + SetDefaults_Secret(in) +} + +func SetObjectDefaults_SecretList(in *SecretList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Secret(a) + } +} + +func SetObjectDefaults_Service(in *Service) { + SetDefaults_ServiceSpec(&in.Spec) +} + +func SetObjectDefaults_ServiceList(in *ServiceList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Service(a) + } +} diff --git a/vendor/k8s.io/client-go/pkg/api/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/api/zz_generated.deepcopy.go new file mode 100644 index 0000000000..c018bcc4ee --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/zz_generated.deepcopy.go @@ -0,0 +1,3527 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package api + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + fields "k8s.io/apimachinery/pkg/fields" + labels "k8s.io/apimachinery/pkg/labels" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AWSElasticBlockStoreVolumeSource, InType: reflect.TypeOf(&AWSElasticBlockStoreVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Affinity, InType: reflect.TypeOf(&Affinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AttachedVolume, InType: reflect.TypeOf(&AttachedVolume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AvoidPods, InType: reflect.TypeOf(&AvoidPods{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AzureDiskVolumeSource, InType: reflect.TypeOf(&AzureDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AzureFileVolumeSource, InType: reflect.TypeOf(&AzureFileVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Binding, InType: reflect.TypeOf(&Binding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Capabilities, InType: reflect.TypeOf(&Capabilities{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_CephFSVolumeSource, InType: reflect.TypeOf(&CephFSVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_CinderVolumeSource, InType: reflect.TypeOf(&CinderVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ComponentCondition, InType: reflect.TypeOf(&ComponentCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ComponentStatus, InType: reflect.TypeOf(&ComponentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ComponentStatusList, InType: reflect.TypeOf(&ComponentStatusList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMap, InType: reflect.TypeOf(&ConfigMap{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapEnvSource, InType: reflect.TypeOf(&ConfigMapEnvSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapKeySelector, InType: reflect.TypeOf(&ConfigMapKeySelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapList, InType: reflect.TypeOf(&ConfigMapList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapProjection, InType: reflect.TypeOf(&ConfigMapProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapVolumeSource, InType: reflect.TypeOf(&ConfigMapVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Container, InType: reflect.TypeOf(&Container{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerImage, InType: reflect.TypeOf(&ContainerImage{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerPort, InType: reflect.TypeOf(&ContainerPort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerState, InType: reflect.TypeOf(&ContainerState{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStateRunning, InType: reflect.TypeOf(&ContainerStateRunning{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStateTerminated, InType: reflect.TypeOf(&ContainerStateTerminated{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStateWaiting, InType: reflect.TypeOf(&ContainerStateWaiting{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStatus, InType: reflect.TypeOf(&ContainerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConversionError, InType: reflect.TypeOf(&ConversionError{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DaemonEndpoint, InType: reflect.TypeOf(&DaemonEndpoint{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeleteOptions, InType: reflect.TypeOf(&DeleteOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DownwardAPIProjection, InType: reflect.TypeOf(&DownwardAPIProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DownwardAPIVolumeFile, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DownwardAPIVolumeSource, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EmptyDirVolumeSource, InType: reflect.TypeOf(&EmptyDirVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointAddress, InType: reflect.TypeOf(&EndpointAddress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointPort, InType: reflect.TypeOf(&EndpointPort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointSubset, InType: reflect.TypeOf(&EndpointSubset{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Endpoints, InType: reflect.TypeOf(&Endpoints{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointsList, InType: reflect.TypeOf(&EndpointsList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EnvFromSource, InType: reflect.TypeOf(&EnvFromSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EnvVar, InType: reflect.TypeOf(&EnvVar{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EnvVarSource, InType: reflect.TypeOf(&EnvVarSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Event, InType: reflect.TypeOf(&Event{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EventList, InType: reflect.TypeOf(&EventList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EventSource, InType: reflect.TypeOf(&EventSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ExecAction, InType: reflect.TypeOf(&ExecAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_FCVolumeSource, InType: reflect.TypeOf(&FCVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_FlexVolumeSource, InType: reflect.TypeOf(&FlexVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_FlockerVolumeSource, InType: reflect.TypeOf(&FlockerVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_GCEPersistentDiskVolumeSource, InType: reflect.TypeOf(&GCEPersistentDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_GitRepoVolumeSource, InType: reflect.TypeOf(&GitRepoVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_GlusterfsVolumeSource, InType: reflect.TypeOf(&GlusterfsVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_HTTPGetAction, InType: reflect.TypeOf(&HTTPGetAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_HTTPHeader, InType: reflect.TypeOf(&HTTPHeader{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Handler, InType: reflect.TypeOf(&Handler{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_HostPathVolumeSource, InType: reflect.TypeOf(&HostPathVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ISCSIVolumeSource, InType: reflect.TypeOf(&ISCSIVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_KeyToPath, InType: reflect.TypeOf(&KeyToPath{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Lifecycle, InType: reflect.TypeOf(&Lifecycle{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRange, InType: reflect.TypeOf(&LimitRange{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRangeItem, InType: reflect.TypeOf(&LimitRangeItem{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRangeList, InType: reflect.TypeOf(&LimitRangeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRangeSpec, InType: reflect.TypeOf(&LimitRangeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_List, InType: reflect.TypeOf(&List{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ListOptions, InType: reflect.TypeOf(&ListOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LoadBalancerIngress, InType: reflect.TypeOf(&LoadBalancerIngress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LoadBalancerStatus, InType: reflect.TypeOf(&LoadBalancerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LocalObjectReference, InType: reflect.TypeOf(&LocalObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NFSVolumeSource, InType: reflect.TypeOf(&NFSVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Namespace, InType: reflect.TypeOf(&Namespace{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NamespaceList, InType: reflect.TypeOf(&NamespaceList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NamespaceSpec, InType: reflect.TypeOf(&NamespaceSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NamespaceStatus, InType: reflect.TypeOf(&NamespaceStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Node, InType: reflect.TypeOf(&Node{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeAddress, InType: reflect.TypeOf(&NodeAddress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeAffinity, InType: reflect.TypeOf(&NodeAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeCondition, InType: reflect.TypeOf(&NodeCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeDaemonEndpoints, InType: reflect.TypeOf(&NodeDaemonEndpoints{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeList, InType: reflect.TypeOf(&NodeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeProxyOptions, InType: reflect.TypeOf(&NodeProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeResources, InType: reflect.TypeOf(&NodeResources{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSelector, InType: reflect.TypeOf(&NodeSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSelectorRequirement, InType: reflect.TypeOf(&NodeSelectorRequirement{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSelectorTerm, InType: reflect.TypeOf(&NodeSelectorTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSpec, InType: reflect.TypeOf(&NodeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeStatus, InType: reflect.TypeOf(&NodeStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSystemInfo, InType: reflect.TypeOf(&NodeSystemInfo{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ObjectFieldSelector, InType: reflect.TypeOf(&ObjectFieldSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ObjectMeta, InType: reflect.TypeOf(&ObjectMeta{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ObjectReference, InType: reflect.TypeOf(&ObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolume, InType: reflect.TypeOf(&PersistentVolume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaim, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimList, InType: reflect.TypeOf(&PersistentVolumeClaimList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimSpec, InType: reflect.TypeOf(&PersistentVolumeClaimSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimStatus, InType: reflect.TypeOf(&PersistentVolumeClaimStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimVolumeSource, InType: reflect.TypeOf(&PersistentVolumeClaimVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeList, InType: reflect.TypeOf(&PersistentVolumeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeSource, InType: reflect.TypeOf(&PersistentVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeSpec, InType: reflect.TypeOf(&PersistentVolumeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeStatus, InType: reflect.TypeOf(&PersistentVolumeStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PhotonPersistentDiskVolumeSource, InType: reflect.TypeOf(&PhotonPersistentDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Pod, InType: reflect.TypeOf(&Pod{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAffinity, InType: reflect.TypeOf(&PodAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAffinityTerm, InType: reflect.TypeOf(&PodAffinityTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAntiAffinity, InType: reflect.TypeOf(&PodAntiAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAttachOptions, InType: reflect.TypeOf(&PodAttachOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodCondition, InType: reflect.TypeOf(&PodCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodExecOptions, InType: reflect.TypeOf(&PodExecOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodList, InType: reflect.TypeOf(&PodList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodLogOptions, InType: reflect.TypeOf(&PodLogOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodPortForwardOptions, InType: reflect.TypeOf(&PodPortForwardOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodProxyOptions, InType: reflect.TypeOf(&PodProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodSecurityContext, InType: reflect.TypeOf(&PodSecurityContext{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodSignature, InType: reflect.TypeOf(&PodSignature{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodSpec, InType: reflect.TypeOf(&PodSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodStatus, InType: reflect.TypeOf(&PodStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodStatusResult, InType: reflect.TypeOf(&PodStatusResult{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodTemplate, InType: reflect.TypeOf(&PodTemplate{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodTemplateList, InType: reflect.TypeOf(&PodTemplateList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodTemplateSpec, InType: reflect.TypeOf(&PodTemplateSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PortworxVolumeSource, InType: reflect.TypeOf(&PortworxVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Preconditions, InType: reflect.TypeOf(&Preconditions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PreferAvoidPodsEntry, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PreferredSchedulingTerm, InType: reflect.TypeOf(&PreferredSchedulingTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Probe, InType: reflect.TypeOf(&Probe{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ProjectedVolumeSource, InType: reflect.TypeOf(&ProjectedVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_QuobyteVolumeSource, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_RBDVolumeSource, InType: reflect.TypeOf(&RBDVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_RangeAllocation, InType: reflect.TypeOf(&RangeAllocation{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationController, InType: reflect.TypeOf(&ReplicationController{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerCondition, InType: reflect.TypeOf(&ReplicationControllerCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerList, InType: reflect.TypeOf(&ReplicationControllerList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerSpec, InType: reflect.TypeOf(&ReplicationControllerSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerStatus, InType: reflect.TypeOf(&ReplicationControllerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceFieldSelector, InType: reflect.TypeOf(&ResourceFieldSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuota, InType: reflect.TypeOf(&ResourceQuota{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuotaList, InType: reflect.TypeOf(&ResourceQuotaList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuotaSpec, InType: reflect.TypeOf(&ResourceQuotaSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuotaStatus, InType: reflect.TypeOf(&ResourceQuotaStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceRequirements, InType: reflect.TypeOf(&ResourceRequirements{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SELinuxOptions, InType: reflect.TypeOf(&SELinuxOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ScaleIOVolumeSource, InType: reflect.TypeOf(&ScaleIOVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Secret, InType: reflect.TypeOf(&Secret{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretEnvSource, InType: reflect.TypeOf(&SecretEnvSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretKeySelector, InType: reflect.TypeOf(&SecretKeySelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretList, InType: reflect.TypeOf(&SecretList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretProjection, InType: reflect.TypeOf(&SecretProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretVolumeSource, InType: reflect.TypeOf(&SecretVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecurityContext, InType: reflect.TypeOf(&SecurityContext{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SerializedReference, InType: reflect.TypeOf(&SerializedReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Service, InType: reflect.TypeOf(&Service{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceAccount, InType: reflect.TypeOf(&ServiceAccount{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceAccountList, InType: reflect.TypeOf(&ServiceAccountList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceList, InType: reflect.TypeOf(&ServiceList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServicePort, InType: reflect.TypeOf(&ServicePort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceProxyOptions, InType: reflect.TypeOf(&ServiceProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceSpec, InType: reflect.TypeOf(&ServiceSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceStatus, InType: reflect.TypeOf(&ServiceStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Sysctl, InType: reflect.TypeOf(&Sysctl{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_TCPSocketAction, InType: reflect.TypeOf(&TCPSocketAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Taint, InType: reflect.TypeOf(&Taint{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Toleration, InType: reflect.TypeOf(&Toleration{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Volume, InType: reflect.TypeOf(&Volume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VolumeMount, InType: reflect.TypeOf(&VolumeMount{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VolumeProjection, InType: reflect.TypeOf(&VolumeProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VolumeSource, InType: reflect.TypeOf(&VolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VsphereVirtualDiskVolumeSource, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_WeightedPodAffinityTerm, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})}, + ) +} + +func DeepCopy_api_AWSElasticBlockStoreVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AWSElasticBlockStoreVolumeSource) + out := out.(*AWSElasticBlockStoreVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_Affinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Affinity) + out := out.(*Affinity) + *out = *in + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(NodeAffinity) + if err := DeepCopy_api_NodeAffinity(*in, *out, c); err != nil { + return err + } + } + if in.PodAffinity != nil { + in, out := &in.PodAffinity, &out.PodAffinity + *out = new(PodAffinity) + if err := DeepCopy_api_PodAffinity(*in, *out, c); err != nil { + return err + } + } + if in.PodAntiAffinity != nil { + in, out := &in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(PodAntiAffinity) + if err := DeepCopy_api_PodAntiAffinity(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_AttachedVolume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AttachedVolume) + out := out.(*AttachedVolume) + *out = *in + return nil + } +} + +func DeepCopy_api_AvoidPods(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AvoidPods) + out := out.(*AvoidPods) + *out = *in + if in.PreferAvoidPods != nil { + in, out := &in.PreferAvoidPods, &out.PreferAvoidPods + *out = make([]PreferAvoidPodsEntry, len(*in)) + for i := range *in { + if err := DeepCopy_api_PreferAvoidPodsEntry(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_AzureDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AzureDiskVolumeSource) + out := out.(*AzureDiskVolumeSource) + *out = *in + if in.CachingMode != nil { + in, out := &in.CachingMode, &out.CachingMode + *out = new(AzureDataDiskCachingMode) + **out = **in + } + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_AzureFileVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AzureFileVolumeSource) + out := out.(*AzureFileVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_Binding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Binding) + out := out.(*Binding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + return nil + } +} + +func DeepCopy_api_Capabilities(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Capabilities) + out := out.(*Capabilities) + *out = *in + if in.Add != nil { + in, out := &in.Add, &out.Add + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + if in.Drop != nil { + in, out := &in.Drop, &out.Drop + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_CephFSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CephFSVolumeSource) + out := out.(*CephFSVolumeSource) + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_api_CinderVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CinderVolumeSource) + out := out.(*CinderVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_ComponentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentCondition) + out := out.(*ComponentCondition) + *out = *in + return nil + } +} + +func DeepCopy_api_ComponentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentStatus) + out := out.(*ComponentStatus) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ComponentCondition, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_ComponentStatusList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentStatusList) + out := out.(*ComponentStatusList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ComponentStatus, len(*in)) + for i := range *in { + if err := DeepCopy_api_ComponentStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_ConfigMap(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMap) + out := out.(*ConfigMap) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_api_ConfigMapEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapEnvSource) + out := out.(*ConfigMapEnvSource) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_ConfigMapKeySelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapKeySelector) + out := out.(*ConfigMapKeySelector) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_ConfigMapList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapList) + out := out.(*ConfigMapList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigMap, len(*in)) + for i := range *in { + if err := DeepCopy_api_ConfigMap(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_ConfigMapProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapProjection) + out := out.(*ConfigMapProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_ConfigMapVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapVolumeSource) + out := out.(*ConfigMapVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_Container(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Container) + out := out.(*Container) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) + for i := range *in { + if err := DeepCopy_api_EnvFromSource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + if err := DeepCopy_api_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if err := DeepCopy_api_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { + return err + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + if err := DeepCopy_api_Probe(*in, *out, c); err != nil { + return err + } + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + if err := DeepCopy_api_Probe(*in, *out, c); err != nil { + return err + } + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(Lifecycle) + if err := DeepCopy_api_Lifecycle(*in, *out, c); err != nil { + return err + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContext) + if err := DeepCopy_api_SecurityContext(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_ContainerImage(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerImage) + out := out.(*ContainerImage) + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_ContainerPort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerPort) + out := out.(*ContainerPort) + *out = *in + return nil + } +} + +func DeepCopy_api_ContainerState(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerState) + out := out.(*ContainerState) + *out = *in + if in.Waiting != nil { + in, out := &in.Waiting, &out.Waiting + *out = new(ContainerStateWaiting) + **out = **in + } + if in.Running != nil { + in, out := &in.Running, &out.Running + *out = new(ContainerStateRunning) + if err := DeepCopy_api_ContainerStateRunning(*in, *out, c); err != nil { + return err + } + } + if in.Terminated != nil { + in, out := &in.Terminated, &out.Terminated + *out = new(ContainerStateTerminated) + if err := DeepCopy_api_ContainerStateTerminated(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_ContainerStateRunning(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateRunning) + out := out.(*ContainerStateRunning) + *out = *in + out.StartedAt = in.StartedAt.DeepCopy() + return nil + } +} + +func DeepCopy_api_ContainerStateTerminated(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateTerminated) + out := out.(*ContainerStateTerminated) + *out = *in + out.StartedAt = in.StartedAt.DeepCopy() + out.FinishedAt = in.FinishedAt.DeepCopy() + return nil + } +} + +func DeepCopy_api_ContainerStateWaiting(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateWaiting) + out := out.(*ContainerStateWaiting) + *out = *in + return nil + } +} + +func DeepCopy_api_ContainerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStatus) + out := out.(*ContainerStatus) + *out = *in + if err := DeepCopy_api_ContainerState(&in.State, &out.State, c); err != nil { + return err + } + if err := DeepCopy_api_ContainerState(&in.LastTerminationState, &out.LastTerminationState, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_ConversionError(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConversionError) + out := out.(*ConversionError) + *out = *in + // in.In is kind 'Interface' + if in.In != nil { + if newVal, err := c.DeepCopy(&in.In); err != nil { + return err + } else { + out.In = *newVal.(*interface{}) + } + } + // in.Out is kind 'Interface' + if in.Out != nil { + if newVal, err := c.DeepCopy(&in.Out); err != nil { + return err + } else { + out.Out = *newVal.(*interface{}) + } + } + return nil + } +} + +func DeepCopy_api_DaemonEndpoint(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonEndpoint) + out := out.(*DaemonEndpoint) + *out = *in + return nil + } +} + +func DeepCopy_api_DeleteOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeleteOptions) + out := out.(*DeleteOptions) + *out = *in + if in.GracePeriodSeconds != nil { + in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Preconditions != nil { + in, out := &in.Preconditions, &out.Preconditions + *out = new(Preconditions) + if err := DeepCopy_api_Preconditions(*in, *out, c); err != nil { + return err + } + } + if in.OrphanDependents != nil { + in, out := &in.OrphanDependents, &out.OrphanDependents + *out = new(bool) + **out = **in + } + if in.PropagationPolicy != nil { + in, out := &in.PropagationPolicy, &out.PropagationPolicy + *out = new(DeletionPropagation) + **out = **in + } + return nil + } +} + +func DeepCopy_api_DownwardAPIProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIProjection) + out := out.(*DownwardAPIProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + if err := DeepCopy_api_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_DownwardAPIVolumeFile(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIVolumeFile) + out := out.(*DownwardAPIVolumeFile) + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := DeepCopy_api_ResourceFieldSelector(*in, *out, c); err != nil { + return err + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_api_DownwardAPIVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIVolumeSource) + out := out.(*DownwardAPIVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + if err := DeepCopy_api_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_api_EmptyDirVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EmptyDirVolumeSource) + out := out.(*EmptyDirVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_EndpointAddress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointAddress) + out := out.(*EndpointAddress) + *out = *in + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + *out = new(string) + **out = **in + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(ObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_api_EndpointPort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointPort) + out := out.(*EndpointPort) + *out = *in + return nil + } +} + +func DeepCopy_api_EndpointSubset(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointSubset) + out := out.(*EndpointSubset) + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + if err := DeepCopy_api_EndpointAddress(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.NotReadyAddresses != nil { + in, out := &in.NotReadyAddresses, &out.NotReadyAddresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + if err := DeepCopy_api_EndpointAddress(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_Endpoints(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Endpoints) + out := out.(*Endpoints) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Subsets != nil { + in, out := &in.Subsets, &out.Subsets + *out = make([]EndpointSubset, len(*in)) + for i := range *in { + if err := DeepCopy_api_EndpointSubset(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_EndpointsList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointsList) + out := out.(*EndpointsList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Endpoints, len(*in)) + for i := range *in { + if err := DeepCopy_api_Endpoints(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_EnvFromSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvFromSource) + out := out.(*EnvFromSource) + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(ConfigMapEnvSource) + if err := DeepCopy_api_ConfigMapEnvSource(*in, *out, c); err != nil { + return err + } + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretEnvSource) + if err := DeepCopy_api_SecretEnvSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_EnvVar(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvVar) + out := out.(*EnvVar) + *out = *in + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(EnvVarSource) + if err := DeepCopy_api_EnvVarSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_EnvVarSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvVarSource) + out := out.(*EnvVarSource) + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := DeepCopy_api_ResourceFieldSelector(*in, *out, c); err != nil { + return err + } + } + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(ConfigMapKeySelector) + if err := DeepCopy_api_ConfigMapKeySelector(*in, *out, c); err != nil { + return err + } + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(SecretKeySelector) + if err := DeepCopy_api_SecretKeySelector(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_Event(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Event) + out := out.(*Event) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + out.FirstTimestamp = in.FirstTimestamp.DeepCopy() + out.LastTimestamp = in.LastTimestamp.DeepCopy() + return nil + } +} + +func DeepCopy_api_EventList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EventList) + out := out.(*EventList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + if err := DeepCopy_api_Event(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_EventSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EventSource) + out := out.(*EventSource) + *out = *in + return nil + } +} + +func DeepCopy_api_ExecAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExecAction) + out := out.(*ExecAction) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_FCVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FCVolumeSource) + out := out.(*FCVolumeSource) + *out = *in + if in.TargetWWNs != nil { + in, out := &in.TargetWWNs, &out.TargetWWNs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_api_FlexVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FlexVolumeSource) + out := out.(*FlexVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_api_FlockerVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FlockerVolumeSource) + out := out.(*FlockerVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_GCEPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GCEPersistentDiskVolumeSource) + out := out.(*GCEPersistentDiskVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_GitRepoVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GitRepoVolumeSource) + out := out.(*GitRepoVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_GlusterfsVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GlusterfsVolumeSource) + out := out.(*GlusterfsVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_HTTPGetAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPGetAction) + out := out.(*HTTPGetAction) + *out = *in + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = make([]HTTPHeader, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_HTTPHeader(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPHeader) + out := out.(*HTTPHeader) + *out = *in + return nil + } +} + +func DeepCopy_api_Handler(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Handler) + out := out.(*Handler) + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecAction) + if err := DeepCopy_api_ExecAction(*in, *out, c); err != nil { + return err + } + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + *out = new(HTTPGetAction) + if err := DeepCopy_api_HTTPGetAction(*in, *out, c); err != nil { + return err + } + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + *out = new(TCPSocketAction) + **out = **in + } + return nil + } +} + +func DeepCopy_api_HostPathVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HostPathVolumeSource) + out := out.(*HostPathVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_ISCSIVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ISCSIVolumeSource) + out := out.(*ISCSIVolumeSource) + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_KeyToPath(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*KeyToPath) + out := out.(*KeyToPath) + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_api_Lifecycle(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Lifecycle) + out := out.(*Lifecycle) + *out = *in + if in.PostStart != nil { + in, out := &in.PostStart, &out.PostStart + *out = new(Handler) + if err := DeepCopy_api_Handler(*in, *out, c); err != nil { + return err + } + } + if in.PreStop != nil { + in, out := &in.PreStop, &out.PreStop + *out = new(Handler) + if err := DeepCopy_api_Handler(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_LimitRange(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRange) + out := out.(*LimitRange) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_LimitRangeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_LimitRangeItem(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeItem) + out := out.(*LimitRangeItem) + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.DefaultRequest != nil { + in, out := &in.DefaultRequest, &out.DefaultRequest + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.MaxLimitRequestRatio != nil { + in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_api_LimitRangeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeList) + out := out.(*LimitRangeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LimitRange, len(*in)) + for i := range *in { + if err := DeepCopy_api_LimitRange(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_LimitRangeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeSpec) + out := out.(*LimitRangeSpec) + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make([]LimitRangeItem, len(*in)) + for i := range *in { + if err := DeepCopy_api_LimitRangeItem(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_List(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*List) + out := out.(*List) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*runtime.Object) + } + } + } + return nil + } +} + +func DeepCopy_api_ListOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ListOptions) + out := out.(*ListOptions) + *out = *in + // in.LabelSelector is kind 'Interface' + if in.LabelSelector != nil { + if newVal, err := c.DeepCopy(&in.LabelSelector); err != nil { + return err + } else { + out.LabelSelector = *newVal.(*labels.Selector) + } + } + // in.FieldSelector is kind 'Interface' + if in.FieldSelector != nil { + if newVal, err := c.DeepCopy(&in.FieldSelector); err != nil { + return err + } else { + out.FieldSelector = *newVal.(*fields.Selector) + } + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + return nil + } +} + +func DeepCopy_api_LoadBalancerIngress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LoadBalancerIngress) + out := out.(*LoadBalancerIngress) + *out = *in + return nil + } +} + +func DeepCopy_api_LoadBalancerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LoadBalancerStatus) + out := out.(*LoadBalancerStatus) + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]LoadBalancerIngress, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_LocalObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LocalObjectReference) + out := out.(*LocalObjectReference) + *out = *in + return nil + } +} + +func DeepCopy_api_NFSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NFSVolumeSource) + out := out.(*NFSVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_Namespace(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Namespace) + out := out.(*Namespace) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_NamespaceSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_NamespaceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceList) + out := out.(*NamespaceList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Namespace, len(*in)) + for i := range *in { + if err := DeepCopy_api_Namespace(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_NamespaceSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceSpec) + out := out.(*NamespaceSpec) + *out = *in + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]FinalizerName, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_NamespaceStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceStatus) + out := out.(*NamespaceStatus) + *out = *in + return nil + } +} + +func DeepCopy_api_Node(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Node) + out := out.(*Node) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_NodeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_NodeStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_NodeAddress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeAddress) + out := out.(*NodeAddress) + *out = *in + return nil + } +} + +func DeepCopy_api_NodeAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeAffinity) + out := out.(*NodeAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = new(NodeSelector) + if err := DeepCopy_api_NodeSelector(*in, *out, c); err != nil { + return err + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]PreferredSchedulingTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_PreferredSchedulingTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_NodeCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeCondition) + out := out.(*NodeCondition) + *out = *in + out.LastHeartbeatTime = in.LastHeartbeatTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_api_NodeDaemonEndpoints(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeDaemonEndpoints) + out := out.(*NodeDaemonEndpoints) + *out = *in + return nil + } +} + +func DeepCopy_api_NodeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeList) + out := out.(*NodeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Node, len(*in)) + for i := range *in { + if err := DeepCopy_api_Node(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_NodeProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeProxyOptions) + out := out.(*NodeProxyOptions) + *out = *in + return nil + } +} + +func DeepCopy_api_NodeResources(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeResources) + out := out.(*NodeResources) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_api_NodeSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelector) + out := out.(*NodeSelector) + *out = *in + if in.NodeSelectorTerms != nil { + in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms + *out = make([]NodeSelectorTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_NodeSelectorTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_NodeSelectorRequirement(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelectorRequirement) + out := out.(*NodeSelectorRequirement) + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_NodeSelectorTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelectorTerm) + out := out.(*NodeSelectorTerm) + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + if err := DeepCopy_api_NodeSelectorRequirement(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_NodeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSpec) + out := out.(*NodeSpec) + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]Taint, len(*in)) + for i := range *in { + if err := DeepCopy_api_Taint(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_NodeStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeStatus) + out := out.(*NodeStatus) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NodeCondition, len(*in)) + for i := range *in { + if err := DeepCopy_api_NodeCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]NodeAddress, len(*in)) + copy(*out, *in) + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ContainerImage, len(*in)) + for i := range *in { + if err := DeepCopy_api_ContainerImage(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.VolumesInUse != nil { + in, out := &in.VolumesInUse, &out.VolumesInUse + *out = make([]UniqueVolumeName, len(*in)) + copy(*out, *in) + } + if in.VolumesAttached != nil { + in, out := &in.VolumesAttached, &out.VolumesAttached + *out = make([]AttachedVolume, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_NodeSystemInfo(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSystemInfo) + out := out.(*NodeSystemInfo) + *out = *in + return nil + } +} + +func DeepCopy_api_ObjectFieldSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectFieldSelector) + out := out.(*ObjectFieldSelector) + *out = *in + return nil + } +} + +func DeepCopy_api_ObjectMeta(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMeta) + out := out.(*ObjectMeta) + *out = *in + out.CreationTimestamp = in.CreationTimestamp.DeepCopy() + if in.DeletionTimestamp != nil { + in, out := &in.DeletionTimestamp, &out.DeletionTimestamp + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + if in.DeletionGracePeriodSeconds != nil { + in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]v1.OwnerReference, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*v1.OwnerReference) + } + } + } + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_ObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectReference) + out := out.(*ObjectReference) + *out = *in + return nil + } +} + +func DeepCopy_api_PersistentVolume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolume) + out := out.(*PersistentVolume) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PersistentVolumeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_PersistentVolumeClaim(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaim) + out := out.(*PersistentVolumeClaim) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_PersistentVolumeClaimStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_PersistentVolumeClaimList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimList) + out := out.(*PersistentVolumeClaimList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := DeepCopy_api_PersistentVolumeClaim(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_PersistentVolumeClaimSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimSpec) + out := out.(*PersistentVolumeClaimSpec) + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := DeepCopy_api_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { + return err + } + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } + return nil + } +} + +func DeepCopy_api_PersistentVolumeClaimStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimStatus) + out := out.(*PersistentVolumeClaimStatus) + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_api_PersistentVolumeClaimVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimVolumeSource) + out := out.(*PersistentVolumeClaimVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_PersistentVolumeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeList) + out := out.(*PersistentVolumeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolume, len(*in)) + for i := range *in { + if err := DeepCopy_api_PersistentVolume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_PersistentVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeSource) + out := out.(*PersistentVolumeSource) + *out = *in + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + **out = **in + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + **out = **in + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + if err := DeepCopy_api_RBDVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + if err := DeepCopy_api_ISCSIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + if err := DeepCopy_api_FlexVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + **out = **in + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + if err := DeepCopy_api_CephFSVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + if err := DeepCopy_api_FCVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + **out = **in + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + if err := DeepCopy_api_AzureDiskVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + if err := DeepCopy_api_ScaleIOVolumeSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_PersistentVolumeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeSpec) + out := out.(*PersistentVolumeSpec) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if err := DeepCopy_api_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, c); err != nil { + return err + } + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.ClaimRef != nil { + in, out := &in.ClaimRef, &out.ClaimRef + *out = new(ObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_api_PersistentVolumeStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeStatus) + out := out.(*PersistentVolumeStatus) + *out = *in + return nil + } +} + +func DeepCopy_api_PhotonPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PhotonPersistentDiskVolumeSource) + out := out.(*PhotonPersistentDiskVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_Pod(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Pod) + out := out.(*Pod) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PodSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_PodStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_PodAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAffinity) + out := out.(*PodAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_PodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_PodAffinityTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAffinityTerm) + out := out.(*PodAffinityTerm) + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_PodAntiAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAntiAffinity) + out := out.(*PodAntiAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_PodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_PodAttachOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAttachOptions) + out := out.(*PodAttachOptions) + *out = *in + return nil + } +} + +func DeepCopy_api_PodCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodCondition) + out := out.(*PodCondition) + *out = *in + out.LastProbeTime = in.LastProbeTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_api_PodExecOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodExecOptions) + out := out.(*PodExecOptions) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_PodList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodList) + out := out.(*PodList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pod, len(*in)) + for i := range *in { + if err := DeepCopy_api_Pod(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_PodLogOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodLogOptions) + out := out.(*PodLogOptions) + *out = *in + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = **in + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + *out = new(int64) + **out = **in + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = **in + } + return nil + } +} + +func DeepCopy_api_PodPortForwardOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPortForwardOptions) + out := out.(*PodPortForwardOptions) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]int32, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_PodProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodProxyOptions) + out := out.(*PodProxyOptions) + *out = *in + return nil + } +} + +func DeepCopy_api_PodSecurityContext(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityContext) + out := out.(*PodSecurityContext) + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(int64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.SupplementalGroups != nil { + in, out := &in.SupplementalGroups, &out.SupplementalGroups + *out = make([]int64, len(*in)) + copy(*out, *in) + } + if in.FSGroup != nil { + in, out := &in.FSGroup, &out.FSGroup + *out = new(int64) + **out = **in + } + return nil + } +} + +func DeepCopy_api_PodSignature(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSignature) + out := out.(*PodSignature) + *out = *in + if in.PodController != nil { + in, out := &in.PodController, &out.PodController + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.OwnerReference) + } + } + return nil + } +} + +func DeepCopy_api_PodSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSpec) + out := out.(*PodSpec) + *out = *in + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + if err := DeepCopy_api_Volume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]Container, len(*in)) + for i := range *in { + if err := DeepCopy_api_Container(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]Container, len(*in)) + for i := range *in { + if err := DeepCopy_api_Container(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(PodSecurityContext) + if err := DeepCopy_api_PodSecurityContext(*in, *out, c); err != nil { + return err + } + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(Affinity) + if err := DeepCopy_api_Affinity(*in, *out, c); err != nil { + return err + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]Toleration, len(*in)) + for i := range *in { + if err := DeepCopy_api_Toleration(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_PodStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodStatus) + out := out.(*PodStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PodCondition, len(*in)) + for i := range *in { + if err := DeepCopy_api_PodCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + if in.InitContainerStatuses != nil { + in, out := &in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + if err := DeepCopy_api_ContainerStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.ContainerStatuses != nil { + in, out := &in.ContainerStatuses, &out.ContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + if err := DeepCopy_api_ContainerStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_PodStatusResult(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodStatusResult) + out := out.(*PodStatusResult) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PodStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_PodTemplate(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplate) + out := out.(*PodTemplate) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_PodTemplateList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplateList) + out := out.(*PodTemplateList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodTemplate, len(*in)) + for i := range *in { + if err := DeepCopy_api_PodTemplate(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_PodTemplateSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplateSpec) + out := out.(*PodTemplateSpec) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PodSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_PortworxVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PortworxVolumeSource) + out := out.(*PortworxVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_Preconditions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Preconditions) + out := out.(*Preconditions) + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(types.UID) + **out = **in + } + return nil + } +} + +func DeepCopy_api_PreferAvoidPodsEntry(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PreferAvoidPodsEntry) + out := out.(*PreferAvoidPodsEntry) + *out = *in + if err := DeepCopy_api_PodSignature(&in.PodSignature, &out.PodSignature, c); err != nil { + return err + } + out.EvictionTime = in.EvictionTime.DeepCopy() + return nil + } +} + +func DeepCopy_api_PreferredSchedulingTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PreferredSchedulingTerm) + out := out.(*PreferredSchedulingTerm) + *out = *in + if err := DeepCopy_api_NodeSelectorTerm(&in.Preference, &out.Preference, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_Probe(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Probe) + out := out.(*Probe) + *out = *in + if err := DeepCopy_api_Handler(&in.Handler, &out.Handler, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_ProjectedVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ProjectedVolumeSource) + out := out.(*ProjectedVolumeSource) + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]VolumeProjection, len(*in)) + for i := range *in { + if err := DeepCopy_api_VolumeProjection(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_api_QuobyteVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*QuobyteVolumeSource) + out := out.(*QuobyteVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_RBDVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RBDVolumeSource) + out := out.(*RBDVolumeSource) + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_api_RangeAllocation(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RangeAllocation) + out := out.(*RangeAllocation) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_ReplicationController(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationController) + out := out.(*ReplicationController) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_ReplicationControllerSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_ReplicationControllerStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_ReplicationControllerCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerCondition) + out := out.(*ReplicationControllerCondition) + *out = *in + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_api_ReplicationControllerList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerList) + out := out.(*ReplicationControllerList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationController, len(*in)) + for i := range *in { + if err := DeepCopy_api_ReplicationController(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_ReplicationControllerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerSpec) + out := out.(*ReplicationControllerSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(PodTemplateSpec) + if err := DeepCopy_api_PodTemplateSpec(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_ReplicationControllerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerStatus) + out := out.(*ReplicationControllerStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicationControllerCondition, len(*in)) + for i := range *in { + if err := DeepCopy_api_ReplicationControllerCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_ResourceFieldSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceFieldSelector) + out := out.(*ResourceFieldSelector) + *out = *in + out.Divisor = in.Divisor.DeepCopy() + return nil + } +} + +func DeepCopy_api_ResourceQuota(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuota) + out := out.(*ResourceQuota) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_ResourceQuotaSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_ResourceQuotaStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_ResourceQuotaList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaList) + out := out.(*ResourceQuotaList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceQuota, len(*in)) + for i := range *in { + if err := DeepCopy_api_ResourceQuota(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_ResourceQuotaSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaSpec) + out := out.(*ResourceQuotaSpec) + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ResourceQuotaScope, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_ResourceQuotaStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaStatus) + out := out.(*ResourceQuotaStatus) + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Used != nil { + in, out := &in.Used, &out.Used + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_api_ResourceRequirements(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceRequirements) + out := out.(*ResourceRequirements) + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_api_SELinuxOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SELinuxOptions) + out := out.(*SELinuxOptions) + *out = *in + return nil + } +} + +func DeepCopy_api_ScaleIOVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleIOVolumeSource) + out := out.(*ScaleIOVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_api_Secret(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Secret) + out := out.(*Secret) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string][]byte) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*[]byte) + } + } + } + return nil + } +} + +func DeepCopy_api_SecretEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretEnvSource) + out := out.(*SecretEnvSource) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_SecretKeySelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretKeySelector) + out := out.(*SecretKeySelector) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_SecretList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretList) + out := out.(*SecretList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Secret, len(*in)) + for i := range *in { + if err := DeepCopy_api_Secret(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_SecretProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretProjection) + out := out.(*SecretProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_SecretVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretVolumeSource) + out := out.(*SecretVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_SecurityContext(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecurityContext) + out := out.(*SecurityContext) + *out = *in + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = new(Capabilities) + if err := DeepCopy_api_Capabilities(*in, *out, c); err != nil { + return err + } + } + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + *out = new(bool) + **out = **in + } + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(int64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.ReadOnlyRootFilesystem != nil { + in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_SerializedReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SerializedReference) + out := out.(*SerializedReference) + *out = *in + return nil + } +} + +func DeepCopy_api_Service(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Service) + out := out.(*Service) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_ServiceSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_ServiceStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_ServiceAccount(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceAccount) + out := out.(*ServiceAccount) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_ServiceAccountList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceAccountList) + out := out.(*ServiceAccountList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccount, len(*in)) + for i := range *in { + if err := DeepCopy_api_ServiceAccount(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_ServiceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceList) + out := out.(*ServiceList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + if err := DeepCopy_api_Service(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_ServicePort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServicePort) + out := out.(*ServicePort) + *out = *in + return nil + } +} + +func DeepCopy_api_ServiceProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceProxyOptions) + out := out.(*ServiceProxyOptions) + *out = *in + return nil + } +} + +func DeepCopy_api_ServiceSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceSpec) + out := out.(*ServiceSpec) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ServicePort, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExternalIPs != nil { + in, out := &in.ExternalIPs, &out.ExternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_ServiceStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceStatus) + out := out.(*ServiceStatus) + *out = *in + if err := DeepCopy_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_Sysctl(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Sysctl) + out := out.(*Sysctl) + *out = *in + return nil + } +} + +func DeepCopy_api_TCPSocketAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TCPSocketAction) + out := out.(*TCPSocketAction) + *out = *in + return nil + } +} + +func DeepCopy_api_Taint(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Taint) + out := out.(*Taint) + *out = *in + out.TimeAdded = in.TimeAdded.DeepCopy() + return nil + } +} + +func DeepCopy_api_Toleration(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Toleration) + out := out.(*Toleration) + *out = *in + if in.TolerationSeconds != nil { + in, out := &in.TolerationSeconds, &out.TolerationSeconds + *out = new(int64) + **out = **in + } + return nil + } +} + +func DeepCopy_api_Volume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Volume) + out := out.(*Volume) + *out = *in + if err := DeepCopy_api_VolumeSource(&in.VolumeSource, &out.VolumeSource, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_VolumeMount(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeMount) + out := out.(*VolumeMount) + *out = *in + return nil + } +} + +func DeepCopy_api_VolumeProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeProjection) + out := out.(*VolumeProjection) + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretProjection) + if err := DeepCopy_api_SecretProjection(*in, *out, c); err != nil { + return err + } + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIProjection) + if err := DeepCopy_api_DownwardAPIProjection(*in, *out, c); err != nil { + return err + } + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapProjection) + if err := DeepCopy_api_ConfigMapProjection(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_VolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeSource) + out := out.(*VolumeSource) + *out = *in + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + **out = **in + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(EmptyDirVolumeSource) + **out = **in + } + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.GitRepo != nil { + in, out := &in.GitRepo, &out.GitRepo + *out = new(GitRepoVolumeSource) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretVolumeSource) + if err := DeepCopy_api_SecretVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + if err := DeepCopy_api_ISCSIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + **out = **in + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(PersistentVolumeClaimVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + if err := DeepCopy_api_RBDVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + if err := DeepCopy_api_FlexVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + **out = **in + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + if err := DeepCopy_api_CephFSVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIVolumeSource) + if err := DeepCopy_api_DownwardAPIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + if err := DeepCopy_api_FCVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + **out = **in + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapVolumeSource) + if err := DeepCopy_api_ConfigMapVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + if err := DeepCopy_api_AzureDiskVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(ProjectedVolumeSource) + if err := DeepCopy_api_ProjectedVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + if err := DeepCopy_api_ScaleIOVolumeSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_VsphereVirtualDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VsphereVirtualDiskVolumeSource) + out := out.(*VsphereVirtualDiskVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_WeightedPodAffinityTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*WeightedPodAffinityTerm) + out := out.(*WeightedPodAffinityTerm) + *out = *in + if err := DeepCopy_api_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil { + return err + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/OWNERS b/vendor/k8s.io/client-go/pkg/apis/apps/OWNERS new file mode 100755 index 0000000000..1cdc56eec0 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/OWNERS @@ -0,0 +1,21 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- deads2k +- caesarxuchao +- bprashanth +- pmorie +- sttts +- saad-ali +- ncdc +- timstclair +- timothysc +- dims +- errordeveloper +- mml +- m1093782566 +- mbohlool +- david-mcmahon +- kevin-wangzefeng +- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/doc.go b/vendor/k8s.io/client-go/pkg/apis/apps/doc.go new file mode 100644 index 0000000000..d27cee51c2 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apps diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/install/install.go b/vendor/k8s.io/client-go/pkg/apis/apps/install/install.go new file mode 100644 index 0000000000..ca50f3ea44 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/install/install.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the apps API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/apps" + "k8s.io/client-go/pkg/apis/apps/v1beta1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: apps.GroupName, + VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/apps", + AddInternalObjectsToScheme: apps.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/register.go b/vendor/k8s.io/client-go/pkg/apis/apps/register.go new file mode 100644 index 0000000000..d1d3bab26a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apps + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/pkg/apis/extensions" +) + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// GroupName is the group name use in this package +const GroupName = "apps" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + // TODO this will get cleaned up with the scheme types are fixed + scheme.AddKnownTypes(SchemeGroupVersion, + &extensions.Deployment{}, + &extensions.DeploymentList{}, + &extensions.DeploymentRollback{}, + &extensions.Scale{}, + &StatefulSet{}, + &StatefulSetList{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/types.go b/vendor/k8s.io/client-go/pkg/apis/apps/types.go new file mode 100644 index 0000000000..cd5ce8284c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/types.go @@ -0,0 +1,103 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apps + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api" +) + +// +genclient=true + +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +type StatefulSet struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired identities of pods in this set. + // +optional + Spec StatefulSetSpec + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + Status StatefulSetStatus +} + +// A StatefulSetSpec is the specification of a StatefulSet. +type StatefulSetSpec struct { + // Replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + Replicas int32 + + // Selector is a label query over pods that should match the replica count. + // If empty, defaulted to labels on the pod template. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector *metav1.LabelSelector + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + Template api.PodTemplateSpec + + // VolumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + VolumeClaimTemplates []api.PersistentVolumeClaim + + // ServiceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + ServiceName string +} + +// StatefulSetStatus represents the current state of a StatefulSet. +type StatefulSetStatus struct { + // most recent generation observed by this StatefulSet. + // +optional + ObservedGeneration *int64 + + // Replicas is the number of actual replicas. + Replicas int32 +} + +// StatefulSetList is a collection of StatefulSets. +type StatefulSetList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []StatefulSet +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/conversion.go new file mode 100644 index 0000000000..96d3330f8d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/conversion.go @@ -0,0 +1,297 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/pkg/apis/apps" + "k8s.io/client-go/pkg/apis/extensions" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions to handle the *int32 -> int32 + // conversion. A pointer is useful in the versioned type so we can default + // it, but a plain int32 is more convenient in the internal type. These + // functions are the same as the autogenerated ones in every other way. + err := scheme.AddConversionFuncs( + Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec, + Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec, + // extensions + // TODO: below conversions should be dropped in favor of auto-generated + // ones, see https://github.com/kubernetes/kubernetextensionsssues/39865 + Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, + Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, + Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, + Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, + Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, + Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, + Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, + Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, + ) + if err != nil { + return err + } + + // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. + err = scheme.AddFieldLabelConversionFunc("apps/v1beta1", "StatefulSet", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace", "status.successful": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported for StatefulSet: %s", label) + } + }) + if err != nil { + return err + } + err = api.Scheme.AddFieldLabelConversionFunc("apps/v1beta1", "Deployment", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace": + return label, value, nil + default: + return "", "", fmt.Errorf("field label %q not supported for Deployment", label) + } + }) + if err != nil { + return err + } + + return nil +} + +func Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = *in.Replicas + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + if err := s.Convert(*in, *out, 0); err != nil { + return err + } + } else { + out.Selector = nil + } + if err := s.Convert(&in.Template, &out.Template, 0); err != nil { + return err + } + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]api.PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.VolumeClaimTemplates = nil + } + out.ServiceName = in.ServiceName + return nil +} + +func Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.StatefulSetSpec, out *StatefulSetSpec, s conversion.Scope) error { + out.Replicas = new(int32) + *out.Replicas = in.Replicas + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + if err := s.Convert(*in, *out, 0); err != nil { + return err + } + } else { + out.Selector = nil + } + if err := s.Convert(&in.Template, &out.Template, 0); err != nil { + return err + } + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]v1.PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.VolumeClaimTemplates = nil + } + out.ServiceName = in.ServiceName + return nil +} + +func Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { + out.Replicas = int32(in.Replicas) + + out.Selector = nil + out.TargetSelector = "" + if in.Selector != nil { + if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { + out.Selector = in.Selector.MatchLabels + } + + selector, err := metav1.LabelSelectorAsSelector(in.Selector) + if err != nil { + return fmt.Errorf("invalid label selector: %v", err) + } + out.TargetSelector = selector.String() + } + return nil +} + +func Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + + // Normally when 2 fields map to the same internal value we favor the old field, since + // old clients can't be expected to know about new fields but clients that know about the + // new field can be expected to know about the old field (though that's not quite true, due + // to kubectl apply). However, these fields are readonly, so any non-nil value should work. + if in.TargetSelector != "" { + labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) + if err != nil { + out.Selector = nil + return fmt.Errorf("failed to parse target selector: %v", err) + } + out.Selector = labelSelector + } else if in.Selector != nil { + out.Selector = new(metav1.LabelSelector) + selector := make(map[string]string) + for key, val := range in.Selector { + selector[key] = val + } + out.Selector.MatchLabels = selector + } else { + out.Selector = nil + } + return nil +} + +func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = *in.Replicas + } + out.Selector = in.Selector + if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + out.RevisionHistoryLimit = in.RevisionHistoryLimit + out.MinReadySeconds = in.MinReadySeconds + out.Paused = in.Paused + if in.RollbackTo != nil { + out.RollbackTo = new(extensions.RollbackConfig) + out.RollbackTo.Revision = in.RollbackTo.Revision + } else { + out.RollbackTo = nil + } + if in.ProgressDeadlineSeconds != nil { + out.ProgressDeadlineSeconds = new(int32) + *out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds + } + return nil +} + +func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error { + out.Replicas = &in.Replicas + out.Selector = in.Selector + if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + if in.RevisionHistoryLimit != nil { + out.RevisionHistoryLimit = new(int32) + *out.RevisionHistoryLimit = int32(*in.RevisionHistoryLimit) + } + out.MinReadySeconds = int32(in.MinReadySeconds) + out.Paused = in.Paused + if in.RollbackTo != nil { + out.RollbackTo = new(RollbackConfig) + out.RollbackTo.Revision = int64(in.RollbackTo.Revision) + } else { + out.RollbackTo = nil + } + if in.ProgressDeadlineSeconds != nil { + out.ProgressDeadlineSeconds = new(int32) + *out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds + } + return nil +} + +func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { + out.Type = DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + out.RollingUpdate = new(RollingUpdateDeployment) + if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { + out.Type = extensions.DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + out.RollingUpdate = new(extensions.RollingUpdateDeployment) + if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { + if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { + return err + } + if err := s.Convert(in.MaxSurge, &out.MaxSurge, 0); err != nil { + return err + } + return nil +} + +func Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { + if out.MaxUnavailable == nil { + out.MaxUnavailable = &intstr.IntOrString{} + } + if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { + return err + } + if out.MaxSurge == nil { + out.MaxSurge = &intstr.IntOrString{} + } + if err := s.Convert(&in.MaxSurge, out.MaxSurge, 0); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/defaults.go new file mode 100644 index 0000000000..004cecd3f0 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/defaults.go @@ -0,0 +1,103 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_StatefulSet, + SetDefaults_Deployment, + ) +} + +func SetDefaults_StatefulSet(obj *StatefulSet) { + labels := obj.Spec.Template.Labels + if labels != nil { + if obj.Spec.Selector == nil { + obj.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: labels, + } + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } +} + +// SetDefaults_Deployment sets additional defaults compared to its counterpart +// in extensions. These addons are: +// - MaxUnavailable during rolling update set to 25% (1 in extensions) +// - MaxSurge value during rolling update set to 25% (1 in extensions) +// - RevisionHistoryLimit set to 2 (not set in extensions) +// - ProgressDeadlineSeconds set to 600s (not set in extensions) +func SetDefaults_Deployment(obj *Deployment) { + // Default labels and selector to labels from pod template spec. + labels := obj.Spec.Template.Labels + + if labels != nil { + if obj.Spec.Selector == nil { + obj.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels} + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + // Set DeploymentSpec.Replicas to 1 if it is not set. + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } + strategy := &obj.Spec.Strategy + // Set default DeploymentStrategyType as RollingUpdate. + if strategy.Type == "" { + strategy.Type = RollingUpdateDeploymentStrategyType + } + if strategy.Type == RollingUpdateDeploymentStrategyType { + if strategy.RollingUpdate == nil { + rollingUpdate := RollingUpdateDeployment{} + strategy.RollingUpdate = &rollingUpdate + } + if strategy.RollingUpdate.MaxUnavailable == nil { + // Set default MaxUnavailable as 25% by default. + maxUnavailable := intstr.FromString("25%") + strategy.RollingUpdate.MaxUnavailable = &maxUnavailable + } + if strategy.RollingUpdate.MaxSurge == nil { + // Set default MaxSurge as 25% by default. + maxSurge := intstr.FromString("25%") + strategy.RollingUpdate.MaxSurge = &maxSurge + } + } + if obj.Spec.RevisionHistoryLimit == nil { + obj.Spec.RevisionHistoryLimit = new(int32) + *obj.Spec.RevisionHistoryLimit = 2 + } + if obj.Spec.ProgressDeadlineSeconds == nil { + obj.Spec.ProgressDeadlineSeconds = new(int32) + *obj.Spec.ProgressDeadlineSeconds = 600 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/doc.go new file mode 100644 index 0000000000..a397b30e92 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.pb.go new file mode 100644 index 0000000000..3e215241ba --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.pb.go @@ -0,0 +1,3939 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/apps/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/apps/v1beta1/generated.proto + + It has these top-level messages: + Deployment + DeploymentCondition + DeploymentList + DeploymentRollback + DeploymentSpec + DeploymentStatus + DeploymentStrategy + RollbackConfig + RollingUpdateDeployment + Scale + ScaleSpec + ScaleStatus + StatefulSet + StatefulSetList + StatefulSetSpec + StatefulSetStatus +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } +func (*DeploymentRollback) ProtoMessage() {} +func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } +func (*RollbackConfig) ProtoMessage() {} +func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } +func (*RollingUpdateDeployment) ProtoMessage() {} +func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func init() { + proto.RegisterType((*Deployment)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.Deployment") + proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.DeploymentCondition") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.DeploymentList") + proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.DeploymentRollback") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.DeploymentStrategy") + proto.RegisterType((*RollbackConfig)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.RollbackConfig") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.RollingUpdateDeployment") + proto.RegisterType((*Scale)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.ScaleStatus") + proto.RegisterType((*StatefulSet)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.StatefulSet") + proto.RegisterType((*StatefulSetList)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.StatefulSetList") + proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.StatefulSetSpec") + proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.StatefulSetStatus") +} +func (m *Deployment) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Deployment) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *DeploymentCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastUpdateTime.Size())) + n4, err := m.LastUpdateTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n5, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *DeploymentList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DeploymentRollback) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentRollback) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if len(m.UpdatedAnnotations) > 0 { + for k := range m.UpdatedAnnotations { + data[i] = 0x12 + i++ + v := m.UpdatedAnnotations[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) + n7, err := m.RollbackTo.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + return i, nil +} + +func (m *DeploymentSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n8, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n9, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Strategy.Size())) + n10, err := m.Strategy.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.RevisionHistoryLimit)) + } + data[i] = 0x38 + i++ + if m.Paused { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if m.RollbackTo != nil { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) + n11, err := m.RollbackTo.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.ProgressDeadlineSeconds != nil { + data[i] = 0x48 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ProgressDeadlineSeconds)) + } + return i, nil +} + +func (m *DeploymentStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.UpdatedReplicas)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AvailableReplicas)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ReadyReplicas)) + return i, nil +} + +func (m *DeploymentStrategy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentStrategy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.RollingUpdate != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollingUpdate.Size())) + n12, err := m.RollingUpdate.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} + +func (m *RollbackConfig) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RollbackConfig) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Revision)) + return i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxUnavailable.Size())) + n13, err := m.MaxUnavailable.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.MaxSurge != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxSurge.Size())) + n14, err := m.MaxSurge.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + } + return i, nil +} + +func (m *Scale) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Scale) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n15, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n15 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n16, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n16 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n17, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n17 + return i, nil +} + +func (m *ScaleSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + return i, nil +} + +func (m *ScaleStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k := range m.Selector { + data[i] = 0x12 + i++ + v := m.Selector[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.TargetSelector))) + i += copy(data[i:], m.TargetSelector) + return i, nil +} + +func (m *StatefulSet) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StatefulSet) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n18, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n18 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n19, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n19 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n20, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n20 + return i, nil +} + +func (m *StatefulSetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StatefulSetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n21, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n21 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StatefulSetSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StatefulSetSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n22, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n22 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n23, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n23 + if len(m.VolumeClaimTemplates) > 0 { + for _, msg := range m.VolumeClaimTemplates { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ServiceName))) + i += copy(data[i:], m.ServiceName) + return i, nil +} + +func (m *StatefulSetStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StatefulSetStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ObservedGeneration != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) + } + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *Deployment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentRollback) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UpdatedAnnotations) > 0 { + for k, v := range m.UpdatedAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.RollbackTo != nil { + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n +} + +func (m *DeploymentStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + return n +} + +func (m *DeploymentStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollbackConfig) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Revision)) + return n +} + +func (m *RollingUpdateDeployment) Size() (n int) { + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Scale) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleSpec) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func (m *ScaleStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.TargetSelector) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeClaimTemplates) > 0 { + for _, e := range m.VolumeClaimTemplates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetStatus) Size() (n int) { + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Deployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentRollback) String() string { + if this == nil { + return "nil" + } + keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) + for k := range this.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + mapStringForUpdatedAnnotations := "map[string]string{" + for _, k := range keysForUpdatedAnnotations { + mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) + } + mapStringForUpdatedAnnotations += "}" + s := strings.Join([]string{`&DeploymentRollback{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, + `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `RollbackTo:` + strings.Replace(fmt.Sprintf("%v", this.RollbackTo), "RollbackConfig", "RollbackConfig", 1) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollbackConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollbackConfig{`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleStatus) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_kubernetes_pkg_api_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetStatus{`, + `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Deployment) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentRollback) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.UpdatedAnnotations == nil { + m.UpdatedAnnotations = make(map[string]string) + } + m.UpdatedAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollbackTo == nil { + m.RollbackTo = &RollbackConfig{} + } + if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStrategy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollbackConfig) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scale) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Selector == nil { + m.Selector = make(map[string]string) + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetSelector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSet) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StatefulSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_kubernetes_pkg_api_v1.PersistentVolumeClaim{}) + if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 1525 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe4, 0x58, 0xcb, 0x6f, 0x5b, 0xc5, + 0x17, 0xce, 0x4d, 0xec, 0xc4, 0x99, 0x34, 0x4e, 0x33, 0xc9, 0xaf, 0xf1, 0x2f, 0x45, 0x4e, 0xe5, + 0x45, 0x1f, 0xa8, 0xbd, 0xa6, 0x69, 0xa1, 0x8f, 0x40, 0x45, 0xdc, 0x96, 0x52, 0x94, 0xd0, 0x6a, + 0xec, 0x54, 0xb4, 0x14, 0x89, 0xb1, 0x3d, 0xbd, 0x9d, 0xfa, 0xbe, 0x74, 0x67, 0x6c, 0xc5, 0x3b, + 0x36, 0x2c, 0x90, 0x58, 0xb0, 0x62, 0x87, 0xd8, 0x23, 0x24, 0x76, 0xfc, 0x0d, 0x11, 0x6c, 0xba, + 0x44, 0x2c, 0x22, 0xe2, 0xfe, 0x17, 0x5d, 0xa1, 0x99, 0x3b, 0xf7, 0xe5, 0x6b, 0x27, 0x8e, 0x11, + 0xdd, 0xb0, 0xf3, 0x9d, 0x39, 0xdf, 0x77, 0xce, 0xcc, 0x7c, 0xe7, 0xcc, 0x19, 0x83, 0x6b, 0xad, + 0xeb, 0x4c, 0xa7, 0x4e, 0xb9, 0xd5, 0xae, 0x13, 0xcf, 0x26, 0x9c, 0xb0, 0xb2, 0xdb, 0x32, 0xca, + 0xd8, 0xa5, 0xac, 0x8c, 0x5d, 0x97, 0x95, 0x3b, 0x97, 0xeb, 0x84, 0xe3, 0xcb, 0x65, 0x83, 0xd8, + 0xc4, 0xc3, 0x9c, 0x34, 0x75, 0xd7, 0x73, 0xb8, 0x03, 0xcf, 0xf9, 0x40, 0x3d, 0x02, 0xea, 0x6e, + 0xcb, 0xd0, 0x05, 0x50, 0x17, 0x40, 0x5d, 0x01, 0x57, 0x2f, 0x19, 0x94, 0x3f, 0x6f, 0xd7, 0xf5, + 0x86, 0x63, 0x95, 0x0d, 0xc7, 0x70, 0xca, 0x12, 0x5f, 0x6f, 0x3f, 0x93, 0x5f, 0xf2, 0x43, 0xfe, + 0xf2, 0x79, 0x57, 0xaf, 0xaa, 0x80, 0xb0, 0x4b, 0x2d, 0xdc, 0x78, 0x4e, 0x6d, 0xe2, 0x75, 0xa3, + 0x90, 0x2c, 0xc2, 0x71, 0xb9, 0x93, 0x8a, 0x66, 0xb5, 0x3c, 0x0c, 0xe5, 0xb5, 0x6d, 0x4e, 0x2d, + 0x92, 0x02, 0xbc, 0x77, 0x14, 0x80, 0x35, 0x9e, 0x13, 0x0b, 0xa7, 0x70, 0x57, 0x86, 0xe1, 0xda, + 0x9c, 0x9a, 0x65, 0x6a, 0x73, 0xc6, 0xbd, 0x14, 0x28, 0xb6, 0x26, 0x46, 0xbc, 0x0e, 0xf1, 0xa2, + 0x05, 0x91, 0x5d, 0x6c, 0xb9, 0x26, 0x19, 0xb4, 0xa6, 0x8b, 0x43, 0x8f, 0x66, 0x90, 0xf5, 0x07, + 0x87, 0x1c, 0x24, 0xd9, 0xe5, 0xc4, 0x66, 0xd4, 0xb1, 0x87, 0x1e, 0x67, 0xe9, 0xe7, 0x49, 0x00, + 0xee, 0x10, 0xd7, 0x74, 0xba, 0x16, 0xb1, 0x39, 0xfc, 0x12, 0xe4, 0xc4, 0x56, 0x37, 0x31, 0xc7, + 0x05, 0xed, 0x8c, 0x76, 0x7e, 0x6e, 0xfd, 0x1d, 0x5d, 0x1d, 0x78, 0x7c, 0xe5, 0xd1, 0x91, 0x0b, + 0x6b, 0xbd, 0x73, 0x59, 0x7f, 0x50, 0x7f, 0x41, 0x1a, 0x7c, 0x9b, 0x70, 0x5c, 0x81, 0x7b, 0xfb, + 0x6b, 0x13, 0xbd, 0xfd, 0x35, 0x10, 0x8d, 0xa1, 0x90, 0x15, 0x3e, 0x06, 0x19, 0xe6, 0x92, 0x46, + 0x61, 0x52, 0xb2, 0x5f, 0xd3, 0x47, 0x94, 0x93, 0x1e, 0x05, 0x59, 0x75, 0x49, 0xa3, 0x72, 0x42, + 0x39, 0xc9, 0x88, 0x2f, 0x24, 0x29, 0x21, 0x06, 0xd3, 0x8c, 0x63, 0xde, 0x66, 0x85, 0x29, 0x49, + 0x7e, 0x63, 0x1c, 0x72, 0x49, 0x50, 0xc9, 0x2b, 0xfa, 0x69, 0xff, 0x1b, 0x29, 0xe2, 0xd2, 0xc1, + 0x14, 0x58, 0x8a, 0x8c, 0x6f, 0x3b, 0x76, 0x93, 0x72, 0xea, 0xd8, 0x70, 0x03, 0x64, 0x78, 0xd7, + 0x25, 0x72, 0xcf, 0x66, 0x2b, 0xe7, 0x82, 0xe0, 0x6a, 0x5d, 0x97, 0xbc, 0xde, 0x5f, 0x5b, 0x19, + 0x00, 0x11, 0x53, 0x48, 0x82, 0xe0, 0xa3, 0x30, 0xee, 0x49, 0x09, 0xbf, 0x95, 0x74, 0xfe, 0x7a, + 0x7f, 0xed, 0x50, 0x49, 0xe8, 0x21, 0x67, 0x32, 0x58, 0x78, 0x16, 0x4c, 0x7b, 0x04, 0x33, 0xc7, + 0x2e, 0x64, 0x24, 0x6f, 0xb8, 0x28, 0x24, 0x47, 0x91, 0x9a, 0x85, 0x17, 0xc0, 0x8c, 0x45, 0x18, + 0xc3, 0x06, 0x29, 0x64, 0xa5, 0xe1, 0x82, 0x32, 0x9c, 0xd9, 0xf6, 0x87, 0x51, 0x30, 0x0f, 0x5f, + 0x80, 0xbc, 0x89, 0x19, 0xdf, 0x71, 0x9b, 0x98, 0x93, 0x1a, 0xb5, 0x48, 0x61, 0x5a, 0x6e, 0xf5, + 0xdb, 0xa3, 0xa9, 0x44, 0x20, 0x2a, 0xa7, 0x14, 0x7b, 0x7e, 0x2b, 0xc1, 0x84, 0xfa, 0x98, 0x61, + 0x07, 0x40, 0x31, 0x52, 0xf3, 0xb0, 0xcd, 0xfc, 0x2d, 0x13, 0xfe, 0x66, 0x8e, 0xed, 0x6f, 0x55, + 0xf9, 0x83, 0x5b, 0x29, 0x36, 0x34, 0xc0, 0x43, 0x69, 0x4f, 0x03, 0xf9, 0xe8, 0xc0, 0xb6, 0x28, + 0xe3, 0xf0, 0x69, 0x2a, 0x2d, 0xf4, 0xd1, 0x02, 0x10, 0x68, 0x99, 0x14, 0x27, 0x55, 0x10, 0xb9, + 0x60, 0x24, 0x96, 0x12, 0x9f, 0x81, 0x2c, 0xe5, 0xc4, 0x12, 0xc7, 0x3f, 0x75, 0x7e, 0x6e, 0xfd, + 0xca, 0x18, 0xb2, 0xad, 0xcc, 0x2b, 0xfe, 0xec, 0x7d, 0xc1, 0x84, 0x7c, 0xc2, 0xd2, 0xb7, 0x53, + 0x00, 0x46, 0x46, 0xc8, 0x31, 0xcd, 0x3a, 0x6e, 0xb4, 0xe0, 0x19, 0x90, 0xb1, 0xb1, 0x15, 0xa8, + 0x35, 0x4c, 0xa5, 0x4f, 0xb1, 0x45, 0x90, 0x9c, 0x81, 0x3f, 0x6a, 0x00, 0xb6, 0xe5, 0x51, 0x34, + 0x37, 0x6d, 0xdb, 0xe1, 0x58, 0xec, 0x4e, 0x10, 0x60, 0x75, 0x8c, 0x00, 0x03, 0xdf, 0xfa, 0x4e, + 0x8a, 0xf5, 0xae, 0xcd, 0xbd, 0x6e, 0x74, 0x4a, 0x69, 0x03, 0x34, 0x20, 0x14, 0xd8, 0x02, 0xc0, + 0x53, 0x9c, 0x35, 0x47, 0x25, 0xfc, 0xe8, 0xd5, 0x24, 0x08, 0xe7, 0xb6, 0x63, 0x3f, 0xa3, 0x46, + 0x54, 0xb2, 0x50, 0x48, 0x89, 0x62, 0xf4, 0xab, 0x77, 0xc1, 0xca, 0x90, 0xb8, 0xe1, 0x49, 0x30, + 0xd5, 0x22, 0x5d, 0x7f, 0x2b, 0x91, 0xf8, 0x09, 0x97, 0x41, 0xb6, 0x83, 0xcd, 0x36, 0xf1, 0xb3, + 0x19, 0xf9, 0x1f, 0x37, 0x27, 0xaf, 0x6b, 0xa5, 0x3f, 0xb3, 0x71, 0x65, 0x89, 0xca, 0x05, 0xcf, + 0x83, 0x9c, 0x47, 0x5c, 0x93, 0x36, 0x30, 0x93, 0x1c, 0xd9, 0xca, 0x09, 0xa1, 0x12, 0xa4, 0xc6, + 0x50, 0x38, 0x0b, 0xbf, 0x00, 0x39, 0x46, 0x4c, 0xd2, 0xe0, 0x8e, 0xa7, 0x8a, 0xe7, 0x95, 0x11, + 0x35, 0x88, 0xeb, 0xc4, 0xac, 0x2a, 0xa8, 0x4f, 0x1f, 0x7c, 0xa1, 0x90, 0x12, 0x7e, 0x0e, 0x72, + 0x9c, 0x58, 0xae, 0x89, 0x39, 0x51, 0xbb, 0x79, 0x69, 0xf8, 0x6e, 0x0a, 0xda, 0x87, 0x4e, 0xb3, + 0xa6, 0x00, 0xb2, 0x22, 0x87, 0x0a, 0x0f, 0x46, 0x51, 0x48, 0x08, 0x29, 0xc8, 0x31, 0x2e, 0xae, + 0x1d, 0xa3, 0x2b, 0x6b, 0xd1, 0xdc, 0xfa, 0xc6, 0x58, 0xb5, 0xd9, 0xa7, 0x88, 0x5c, 0x05, 0x23, + 0x28, 0xa4, 0x87, 0x9b, 0x60, 0xc1, 0xa2, 0x36, 0x22, 0xb8, 0xd9, 0xad, 0x92, 0x86, 0x63, 0x37, + 0x99, 0x2c, 0x6a, 0xd9, 0xca, 0x8a, 0x02, 0x2d, 0x6c, 0x27, 0xa7, 0x51, 0xbf, 0x3d, 0xdc, 0x02, + 0xcb, 0x1e, 0xe9, 0x50, 0x71, 0x71, 0x7e, 0x4c, 0x19, 0x77, 0xbc, 0xee, 0x16, 0xb5, 0x28, 0x97, + 0xa5, 0x2e, 0x5b, 0x29, 0xf4, 0xf6, 0xd7, 0x96, 0xd1, 0x80, 0x79, 0x34, 0x10, 0x25, 0xaa, 0xb0, + 0x8b, 0xdb, 0x8c, 0x34, 0x65, 0xe9, 0xca, 0x45, 0x55, 0xf8, 0xa1, 0x1c, 0x45, 0x6a, 0x16, 0x1a, + 0x09, 0x41, 0xe7, 0xfe, 0x99, 0xa0, 0xf3, 0xc3, 0xc5, 0x0c, 0x77, 0xc0, 0x8a, 0xeb, 0x39, 0x86, + 0x47, 0x18, 0xbb, 0x43, 0x70, 0xd3, 0xa4, 0x36, 0x09, 0x76, 0x6a, 0x56, 0xae, 0xf0, 0x74, 0x6f, + 0x7f, 0x6d, 0xe5, 0xe1, 0x60, 0x13, 0x34, 0x0c, 0x5b, 0xfa, 0x3e, 0x03, 0x4e, 0xf6, 0xdf, 0xa3, + 0xf0, 0x13, 0x00, 0x9d, 0xba, 0xec, 0x7d, 0x9a, 0xf7, 0xfc, 0xce, 0x83, 0x3a, 0xb6, 0x14, 0xfa, + 0x54, 0x94, 0xf1, 0x0f, 0x52, 0x16, 0x68, 0x00, 0x0a, 0x5e, 0x8c, 0xa5, 0xca, 0xa4, 0x0c, 0x34, + 0xd4, 0xc1, 0x80, 0x74, 0xd9, 0x04, 0x0b, 0xaa, 0x6a, 0x04, 0x93, 0x52, 0xd6, 0x31, 0x1d, 0xec, + 0x24, 0xa7, 0x51, 0xbf, 0x3d, 0xbc, 0x07, 0x16, 0x71, 0x07, 0x53, 0x13, 0xd7, 0x4d, 0x12, 0x92, + 0x64, 0x24, 0xc9, 0xff, 0x15, 0xc9, 0xe2, 0x66, 0xbf, 0x01, 0x4a, 0x63, 0xe0, 0x36, 0x58, 0x6a, + 0xdb, 0x69, 0x2a, 0x5f, 0x97, 0xa7, 0x15, 0xd5, 0xd2, 0x4e, 0xda, 0x04, 0x0d, 0xc2, 0x41, 0x17, + 0x80, 0x46, 0x70, 0xe5, 0xb3, 0xc2, 0xb4, 0xac, 0xc9, 0xef, 0x8f, 0x91, 0x4f, 0x61, 0xdf, 0x10, + 0xd5, 0xbf, 0x70, 0x88, 0xa1, 0x98, 0x0f, 0xb8, 0x01, 0xe6, 0x3d, 0x91, 0x21, 0x61, 0xe8, 0x33, + 0x32, 0xf4, 0xff, 0x29, 0xd8, 0x3c, 0x8a, 0x4f, 0xa2, 0xa4, 0x6d, 0xe9, 0x77, 0x2d, 0x7e, 0x09, + 0x05, 0x29, 0x0b, 0x6f, 0x26, 0x5a, 0xa6, 0xb3, 0x7d, 0x2d, 0xd3, 0xa9, 0x34, 0x22, 0xd6, 0x31, + 0x75, 0xc1, 0xbc, 0x10, 0x34, 0xb5, 0x0d, 0xff, 0x10, 0x55, 0x41, 0xfc, 0xf0, 0x58, 0xe9, 0x12, + 0xa2, 0x63, 0xd7, 0xe8, 0xa2, 0x5c, 0x4d, 0x7c, 0x12, 0x25, 0x3d, 0x95, 0x6e, 0x81, 0x7c, 0x32, + 0xd7, 0x7c, 0x5d, 0xfa, 0x89, 0xaf, 0x94, 0x1d, 0xd3, 0xa5, 0x3f, 0x8e, 0x42, 0x8b, 0xd2, 0x2b, + 0x0d, 0xac, 0x0c, 0xf1, 0x0e, 0x4d, 0x90, 0xb7, 0xf0, 0x6e, 0x4c, 0x07, 0x47, 0xf6, 0xe0, 0xe2, + 0xf5, 0xa1, 0xfb, 0xaf, 0x0f, 0xfd, 0xbe, 0xcd, 0x1f, 0x78, 0x55, 0xee, 0x51, 0xdb, 0xa8, 0x40, + 0xd1, 0x5f, 0x6d, 0x27, 0xb8, 0x50, 0x1f, 0x37, 0x7c, 0x02, 0x72, 0x16, 0xde, 0xad, 0xb6, 0x3d, + 0x23, 0xd8, 0xbf, 0xe3, 0xfb, 0x91, 0xb7, 0xc9, 0xb6, 0x62, 0x41, 0x21, 0x5f, 0xe9, 0x87, 0x49, + 0x90, 0xad, 0x36, 0xb0, 0x49, 0xde, 0xc0, 0x8b, 0xa2, 0x96, 0x78, 0x51, 0xac, 0x8f, 0xac, 0x01, + 0x19, 0xdf, 0xd0, 0xc7, 0xc4, 0xd3, 0xbe, 0xc7, 0xc4, 0xd5, 0x63, 0xf2, 0x1e, 0xfe, 0x8e, 0xb8, + 0x01, 0x66, 0x43, 0xf7, 0x89, 0xc2, 0xa6, 0x1d, 0x55, 0xd8, 0x4a, 0x3f, 0x4d, 0x82, 0xb9, 0x98, + 0x8b, 0xe3, 0xa1, 0xa1, 0x9b, 0xe8, 0x22, 0x44, 0xe5, 0xa8, 0x8c, 0xb3, 0x30, 0x3d, 0xe8, 0x20, + 0xfc, 0xe6, 0x2d, 0xba, 0x90, 0xd3, 0x8d, 0xc5, 0x2d, 0x90, 0xe7, 0xd8, 0x33, 0x08, 0x0f, 0xe6, + 0xe4, 0x86, 0xce, 0x46, 0xcf, 0x80, 0x5a, 0x62, 0x16, 0xf5, 0x59, 0xaf, 0x6e, 0x80, 0xf9, 0x84, + 0xb3, 0x63, 0x75, 0x5c, 0xbf, 0x88, 0xcd, 0xe2, 0x98, 0x93, 0x67, 0x6d, 0xb3, 0x4a, 0xde, 0xc4, + 0xfb, 0xf6, 0x49, 0x42, 0x8d, 0xd7, 0x47, 0xdf, 0xdc, 0x28, 0xca, 0xa1, 0x9a, 0xac, 0xf7, 0x69, + 0xf2, 0xe6, 0x58, 0xec, 0x87, 0x2b, 0xf3, 0x37, 0x0d, 0x2c, 0xc4, 0xac, 0xdf, 0xc0, 0xf3, 0xe7, + 0x71, 0xf2, 0xf9, 0x73, 0x75, 0x9c, 0x45, 0x0d, 0x79, 0xff, 0xfc, 0x3a, 0x95, 0x58, 0xcc, 0x7f, + 0xa8, 0xe3, 0xfe, 0x5a, 0x03, 0xcb, 0x1d, 0xc7, 0x6c, 0x5b, 0xe4, 0xb6, 0x89, 0xa9, 0x15, 0x58, + 0x88, 0xfe, 0xe5, 0x88, 0x37, 0xa6, 0xf4, 0x44, 0x3c, 0x46, 0x19, 0x27, 0x36, 0x7f, 0x14, 0x71, + 0x54, 0xde, 0x52, 0xfe, 0x96, 0x1f, 0x0d, 0x20, 0x46, 0x03, 0xdd, 0xc1, 0x77, 0xc1, 0x9c, 0x68, + 0xe4, 0x68, 0x83, 0x88, 0xd7, 0xa5, 0xfa, 0x7f, 0x61, 0x49, 0x11, 0xcd, 0x55, 0xa3, 0x29, 0x14, + 0xb7, 0x2b, 0x7d, 0xa3, 0x81, 0xc5, 0x94, 0x66, 0xe1, 0x47, 0x87, 0x74, 0x93, 0xa7, 0xfe, 0xad, + 0x4e, 0xb2, 0x72, 0x61, 0xef, 0xa0, 0x38, 0xf1, 0xf2, 0xa0, 0x38, 0xf1, 0xc7, 0x41, 0x71, 0xe2, + 0xab, 0x5e, 0x51, 0xdb, 0xeb, 0x15, 0xb5, 0x97, 0xbd, 0xa2, 0xf6, 0x57, 0xaf, 0xa8, 0x7d, 0xf7, + 0xaa, 0x38, 0xf1, 0x64, 0x46, 0x29, 0xf2, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd6, 0xb9, 0xde, + 0x1a, 0x56, 0x15, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.proto new file mode 100644 index 0000000000..8ca77b2e6a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.proto @@ -0,0 +1,342 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.apps.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Deployment enables declarative updates for Pods and ReplicaSets. +message Deployment { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the Deployment. + // +optional + optional DeploymentSpec spec = 2; + + // Most recently observed status of the Deployment. + // +optional + optional DeploymentStatus status = 3; +} + +// DeploymentCondition describes the state of a deployment at a certain point. +message DeploymentCondition { + // Type of deployment condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // Last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// DeploymentList is a list of Deployments. +message DeploymentList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Deployments. + repeated Deployment items = 2; +} + +// DeploymentRollback stores the information required to rollback a deployment. +message DeploymentRollback { + // Required: This must match the Name of a deployment. + optional string name = 1; + + // The annotations to be updated to a deployment + // +optional + map updatedAnnotations = 2; + + // The config of this deployment rollback. + optional RollbackConfig rollbackTo = 3; +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +message DeploymentSpec { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + optional int32 replicas = 1; + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template describes the pods that will be created. + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + optional DeploymentStrategy strategy = 4; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 5; + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 2. + // +optional + optional int32 revisionHistoryLimit = 6; + + // Indicates that the deployment is paused. + // +optional + optional bool paused = 7; + + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + optional RollbackConfig rollbackTo = 8; + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Once autoRollback is + // implemented, the deployment controller will automatically rollback failed + // deployments. Note that progress will not be estimated during the time a + // deployment is paused. Defaults to 600s. + optional int32 progressDeadlineSeconds = 9; +} + +// DeploymentStatus is the most recently observed status of the Deployment. +message DeploymentStatus { + // The generation observed by the deployment controller. + // +optional + optional int64 observedGeneration = 1; + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + optional int32 replicas = 2; + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + optional int32 updatedReplicas = 3; + + // Total number of ready pods targeted by this deployment. + // +optional + optional int32 readyReplicas = 7; + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + optional int32 availableReplicas = 4; + + // Total number of unavailable pods targeted by this deployment. + // +optional + optional int32 unavailableReplicas = 5; + + // Represents the latest available observations of a deployment's current state. + repeated DeploymentCondition conditions = 6; +} + +// DeploymentStrategy describes how to replace existing pods with new ones. +message DeploymentStrategy { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + optional RollingUpdateDeployment rollingUpdate = 2; +} + +message RollbackConfig { + // The revision to rollback to. If set to 0, rollbck to the last revision. + // +optional + optional int64 revision = 1; +} + +// Spec to control the desired behavior of rolling update. +message RollingUpdateDeployment { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new RC can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; +} + +// Scale represents a scaling request for a resource. +message Scale { + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + optional ScaleSpec spec = 2; + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + optional ScaleStatus status = 3; +} + +// ScaleSpec describes the attributes of a scale subresource +message ScaleSpec { + // desired number of instances for the scaled object. + // +optional + optional int32 replicas = 1; +} + +// ScaleStatus represents the current status of a scale subresource. +message ScaleStatus { + // actual number of observed instances of the scaled object. + optional int32 replicas = 1; + + // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + map selector = 2; + + // label selector for pods that should match the replicas count. This is a serializated + // version of both map-based and more expressive set-based selectors. This is done to + // avoid introspection in the clients. The string will be in the same format as the + // query-param syntax. If the target type only supports map-based selectors, both this + // field and map-based selector field are populated. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + optional string targetSelector = 3; +} + +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +message StatefulSet { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired identities of pods in this set. + // +optional + optional StatefulSetSpec spec = 2; + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + optional StatefulSetStatus status = 3; +} + +// StatefulSetList is a collection of StatefulSets. +message StatefulSetList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated StatefulSet items = 2; +} + +// A StatefulSetSpec is the specification of a StatefulSet. +message StatefulSetSpec { + // Replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + optional int32 replicas = 1; + + // Selector is a label query over pods that should match the replica count. + // If empty, defaulted to labels on the pod template. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; + + // VolumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + repeated k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + + // ServiceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + optional string serviceName = 5; +} + +// StatefulSetStatus represents the current state of a StatefulSet. +message StatefulSetStatus { + // most recent generation observed by this StatefulSet. + // +optional + optional int64 observedGeneration = 1; + + // Replicas is the number of actual replicas. + optional int32 replicas = 2; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/register.go new file mode 100644 index 0000000000..6e618e1d8f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "apps" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Deployment{}, + &DeploymentList{}, + &DeploymentRollback{}, + &Scale{}, + &StatefulSet{}, + &StatefulSetList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.generated.go new file mode 100644 index 0000000000..4095041204 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.generated.go @@ -0,0 +1,6485 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg4_resource "k8s.io/apimachinery/pkg/api/resource" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + pkg5_intstr "k8s.io/apimachinery/pkg/util/intstr" + pkg3_v1 "k8s.io/client-go/pkg/api/v1" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg4_resource.Quantity + var v1 pkg1_v1.TypeMeta + var v2 pkg2_types.UID + var v3 pkg5_intstr.IntOrString + var v4 pkg3_v1.PodTemplateSpec + var v5 time.Time + _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 + } +} + +func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv7 := &x.Replicas + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.Selector) != 0 + yyq2[2] = x.TargetSelector != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv6 := &x.Selector + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecMapStringStringX(yyv6, false, d) + } + } + case "targetSelector": + if r.TryDecodeAsNil() { + x.TargetSelector = "" + } else { + yyv8 := &x.TargetSelector + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv11 := &x.Replicas + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv13 := &x.Selector + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecMapStringStringX(yyv13, false, d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetSelector = "" + } else { + yyv15 := &x.TargetSelector + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ScaleSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ScaleStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ScaleSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ScaleStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StatefulSet) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StatefulSet) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StatefulSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = StatefulSetSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = StatefulSetStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StatefulSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = StatefulSetSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = StatefulSetStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StatefulSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != nil + yyq2[1] = x.Selector != nil + yyq2[3] = len(x.VolumeClaimTemplates) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Replicas == nil { + r.EncodeNil() + } else { + yy4 := *x.Replicas + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Replicas == nil { + r.EncodeNil() + } else { + yy6 := *x.Replicas + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.Template + yy12.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.Template + yy14.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.VolumeClaimTemplates == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSlicev1_PersistentVolumeClaim(([]pkg3_v1.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeClaimTemplates")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VolumeClaimTemplates == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSlicev1_PersistentVolumeClaim(([]pkg3_v1.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serviceName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StatefulSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StatefulSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = pkg3_v1.PodTemplateSpec{} + } else { + yyv8 := &x.Template + yyv8.CodecDecodeSelf(d) + } + case "volumeClaimTemplates": + if r.TryDecodeAsNil() { + x.VolumeClaimTemplates = nil + } else { + yyv9 := &x.VolumeClaimTemplates + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicev1_PersistentVolumeClaim((*[]pkg3_v1.PersistentVolumeClaim)(yyv9), d) + } + } + case "serviceName": + if r.TryDecodeAsNil() { + x.ServiceName = "" + } else { + yyv11 := &x.ServiceName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StatefulSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = pkg3_v1.PodTemplateSpec{} + } else { + yyv18 := &x.Template + yyv18.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeClaimTemplates = nil + } else { + yyv19 := &x.VolumeClaimTemplates + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicev1_PersistentVolumeClaim((*[]pkg3_v1.PersistentVolumeClaim)(yyv19), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ServiceName = "" + } else { + yyv21 := &x.ServiceName + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StatefulSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy4 := *x.ObservedGeneration + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy6 := *x.ObservedGeneration + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StatefulSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StatefulSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "observedGeneration": + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv6 := &x.Replicas + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StatefulSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv11 := &x.Replicas + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StatefulSetList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceStatefulSet(([]StatefulSet)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceStatefulSet(([]StatefulSet)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StatefulSetList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StatefulSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceStatefulSet((*[]StatefulSet)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StatefulSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceStatefulSet((*[]StatefulSet)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Deployment) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Deployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = DeploymentSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = DeploymentStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = DeploymentSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = DeploymentStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [9]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != nil + yyq2[1] = x.Selector != nil + yyq2[3] = true + yyq2[4] = x.MinReadySeconds != 0 + yyq2[5] = x.RevisionHistoryLimit != nil + yyq2[6] = x.Paused != false + yyq2[7] = x.RollbackTo != nil + yyq2[8] = x.ProgressDeadlineSeconds != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(9) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Replicas == nil { + r.EncodeNil() + } else { + yy4 := *x.Replicas + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Replicas == nil { + r.EncodeNil() + } else { + yy6 := *x.Replicas + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.Template + yy12.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.Template + yy14.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy17 := &x.Strategy + yy17.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("strategy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy19 := &x.Strategy + yy19.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.RevisionHistoryLimit == nil { + r.EncodeNil() + } else { + yy25 := *x.RevisionHistoryLimit + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeInt(int64(yy25)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("revisionHistoryLimit")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RevisionHistoryLimit == nil { + r.EncodeNil() + } else { + yy27 := *x.RevisionHistoryLimit + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeInt(int64(yy27)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym30 := z.EncBinary() + _ = yym30 + if false { + } else { + r.EncodeBool(bool(x.Paused)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("paused")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeBool(bool(x.Paused)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.RollbackTo == nil { + r.EncodeNil() + } else { + x.RollbackTo.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RollbackTo == nil { + r.EncodeNil() + } else { + x.RollbackTo.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.ProgressDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy36 := *x.ProgressDeadlineSeconds + yym37 := z.EncBinary() + _ = yym37 + if false { + } else { + r.EncodeInt(int64(yy36)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("progressDeadlineSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ProgressDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy38 := *x.ProgressDeadlineSeconds + yym39 := z.EncBinary() + _ = yym39 + if false { + } else { + r.EncodeInt(int64(yy38)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = pkg3_v1.PodTemplateSpec{} + } else { + yyv8 := &x.Template + yyv8.CodecDecodeSelf(d) + } + case "strategy": + if r.TryDecodeAsNil() { + x.Strategy = DeploymentStrategy{} + } else { + yyv9 := &x.Strategy + yyv9.CodecDecodeSelf(d) + } + case "minReadySeconds": + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv10 := &x.MinReadySeconds + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "revisionHistoryLimit": + if r.TryDecodeAsNil() { + if x.RevisionHistoryLimit != nil { + x.RevisionHistoryLimit = nil + } + } else { + if x.RevisionHistoryLimit == nil { + x.RevisionHistoryLimit = new(int32) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + case "paused": + if r.TryDecodeAsNil() { + x.Paused = false + } else { + yyv14 := &x.Paused + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "rollbackTo": + if r.TryDecodeAsNil() { + if x.RollbackTo != nil { + x.RollbackTo = nil + } + } else { + if x.RollbackTo == nil { + x.RollbackTo = new(RollbackConfig) + } + x.RollbackTo.CodecDecodeSelf(d) + } + case "progressDeadlineSeconds": + if r.TryDecodeAsNil() { + if x.ProgressDeadlineSeconds != nil { + x.ProgressDeadlineSeconds = nil + } + } else { + if x.ProgressDeadlineSeconds == nil { + x.ProgressDeadlineSeconds = new(int32) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj19 int + var yyb19 bool + var yyhl19 bool = l >= 0 + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = pkg3_v1.PodTemplateSpec{} + } else { + yyv24 := &x.Template + yyv24.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Strategy = DeploymentStrategy{} + } else { + yyv25 := &x.Strategy + yyv25.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv26 := &x.MinReadySeconds + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*int32)(yyv26)) = int32(r.DecodeInt(32)) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RevisionHistoryLimit != nil { + x.RevisionHistoryLimit = nil + } + } else { + if x.RevisionHistoryLimit == nil { + x.RevisionHistoryLimit = new(int32) + } + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Paused = false + } else { + yyv30 := &x.Paused + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*bool)(yyv30)) = r.DecodeBool() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RollbackTo != nil { + x.RollbackTo = nil + } + } else { + if x.RollbackTo == nil { + x.RollbackTo = new(RollbackConfig) + } + x.RollbackTo.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ProgressDeadlineSeconds != nil { + x.ProgressDeadlineSeconds = nil + } + } else { + if x.ProgressDeadlineSeconds == nil { + x.ProgressDeadlineSeconds = new(int32) + } + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) + } + } + for { + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj19-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentRollback) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[3] = len(x.UpdatedAnnotations) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.UpdatedAnnotations == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("updatedAnnotations")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.UpdatedAnnotations == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy16 := &x.RollbackTo + yy16.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy18 := &x.RollbackTo + yy18.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentRollback) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentRollback) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "updatedAnnotations": + if r.TryDecodeAsNil() { + x.UpdatedAnnotations = nil + } else { + yyv10 := &x.UpdatedAnnotations + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + z.F.DecMapStringStringX(yyv10, false, d) + } + } + case "rollbackTo": + if r.TryDecodeAsNil() { + x.RollbackTo = RollbackConfig{} + } else { + yyv12 := &x.RollbackTo + yyv12.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentRollback) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv14 := &x.Kind + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv16 := &x.APIVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv18 := &x.Name + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UpdatedAnnotations = nil + } else { + yyv20 := &x.UpdatedAnnotations + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + z.F.DecMapStringStringX(yyv20, false, d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RollbackTo = RollbackConfig{} + } else { + yyv22 := &x.RollbackTo + yyv22.CodecDecodeSelf(d) + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RollbackConfig) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Revision != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Revision)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("revision")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Revision)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RollbackConfig) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RollbackConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "revision": + if r.TryDecodeAsNil() { + x.Revision = 0 + } else { + yyv4 := &x.Revision + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RollbackConfig) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Revision = 0 + } else { + yyv7 := &x.Revision + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int64)(yyv7)) = int64(r.DecodeInt(64)) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentStrategy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Type != "" + yyq2[1] = x.RollingUpdate != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Type.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.RollingUpdate == nil { + r.EncodeNil() + } else { + x.RollingUpdate.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RollingUpdate == nil { + r.EncodeNil() + } else { + x.RollingUpdate.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentStrategy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "rollingUpdate": + if r.TryDecodeAsNil() { + if x.RollingUpdate != nil { + x.RollingUpdate = nil + } + } else { + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDeployment) + } + x.RollingUpdate.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv7 := &x.Type + yyv7.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RollingUpdate != nil { + x.RollingUpdate = nil + } + } else { + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDeployment) + } + x.RollingUpdate.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x DeploymentStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DeploymentStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.MaxUnavailable != nil + yyq2[1] = x.MaxSurge != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.MaxUnavailable == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { + } else if !yym4 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxUnavailable) + } else { + z.EncFallback(x.MaxUnavailable) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MaxUnavailable == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxUnavailable) + } else { + z.EncFallback(x.MaxUnavailable) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.MaxSurge == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxSurge) + } else { + z.EncFallback(x.MaxSurge) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxSurge")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MaxSurge == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxSurge) + } else { + z.EncFallback(x.MaxSurge) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RollingUpdateDeployment) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "maxUnavailable": + if r.TryDecodeAsNil() { + if x.MaxUnavailable != nil { + x.MaxUnavailable = nil + } + } else { + if x.MaxUnavailable == nil { + x.MaxUnavailable = new(pkg5_intstr.IntOrString) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxUnavailable) + } else { + z.DecFallback(x.MaxUnavailable, false) + } + } + case "maxSurge": + if r.TryDecodeAsNil() { + if x.MaxSurge != nil { + x.MaxSurge = nil + } + } else { + if x.MaxSurge == nil { + x.MaxSurge = new(pkg5_intstr.IntOrString) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxSurge) + } else { + z.DecFallback(x.MaxSurge, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RollingUpdateDeployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.MaxUnavailable != nil { + x.MaxUnavailable = nil + } + } else { + if x.MaxUnavailable == nil { + x.MaxUnavailable = new(pkg5_intstr.IntOrString) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxUnavailable) + } else { + z.DecFallback(x.MaxUnavailable, false) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.MaxSurge != nil { + x.MaxSurge = nil + } + } else { + if x.MaxSurge == nil { + x.MaxSurge = new(pkg5_intstr.IntOrString) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxSurge) + } else { + z.DecFallback(x.MaxSurge, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != 0 + yyq2[1] = x.Replicas != 0 + yyq2[2] = x.UpdatedReplicas != 0 + yyq2[3] = x.ReadyReplicas != 0 + yyq2[4] = x.AvailableReplicas != 0 + yyq2[5] = x.UnavailableReplicas != 0 + yyq2[6] = len(x.Conditions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.UpdatedReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("updatedReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.UpdatedReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readyReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.UnavailableReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("unavailableReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.UnavailableReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "observedGeneration": + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv4 := &x.ObservedGeneration + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } + } + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv6 := &x.Replicas + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "updatedReplicas": + if r.TryDecodeAsNil() { + x.UpdatedReplicas = 0 + } else { + yyv8 := &x.UpdatedReplicas + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "readyReplicas": + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv10 := &x.ReadyReplicas + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "availableReplicas": + if r.TryDecodeAsNil() { + x.AvailableReplicas = 0 + } else { + yyv12 := &x.AvailableReplicas + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(yyv12)) = int32(r.DecodeInt(32)) + } + } + case "unavailableReplicas": + if r.TryDecodeAsNil() { + x.UnavailableReplicas = 0 + } else { + yyv14 := &x.UnavailableReplicas + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv16 := &x.Conditions + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv16), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv19 := &x.ObservedGeneration + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(yyv19)) = int64(r.DecodeInt(64)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv21 := &x.Replicas + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UpdatedReplicas = 0 + } else { + yyv23 := &x.UpdatedReplicas + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv25 := &x.ReadyReplicas + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AvailableReplicas = 0 + } else { + yyv27 := &x.AvailableReplicas + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(yyv27)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UnavailableReplicas = 0 + } else { + yyv29 := &x.UnavailableReplicas + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*int32)(yyv29)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv31 := &x.Conditions + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv31), d) + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x DeploymentConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DeploymentConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *DeploymentCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf7 := &x.Status + yysf7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf8 := &x.Status + yysf8.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastUpdateTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastUpdateTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastUpdateTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) + } else { + z.EncFallback(yy17) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "lastUpdateTime": + if r.TryDecodeAsNil() { + x.LastUpdateTime = pkg1_v1.Time{} + } else { + yyv6 := &x.LastUpdateTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv10 := &x.Reason + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv12 := &x.Message + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv15 := &x.Type + yyv15.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv16 := &x.Status + yyv16.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastUpdateTime = pkg1_v1.Time{} + } else { + yyv17 := &x.LastUpdateTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv19 := &x.LastTransitionTime + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if yym20 { + z.DecBinaryUnmarshal(yyv19) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv21 := &x.Reason + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv23 := &x.Message + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceDeployment(([]Deployment)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceDeployment(([]Deployment)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceDeployment((*[]Deployment)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceDeployment((*[]Deployment)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSlicev1_PersistentVolumeClaim(v []pkg3_v1.PersistentVolumeClaim, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicev1_PersistentVolumeClaim(v *[]pkg3_v1.PersistentVolumeClaim, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg3_v1.PersistentVolumeClaim{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 376) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]pkg3_v1.PersistentVolumeClaim, yyrl1) + } + } else { + yyv1 = make([]pkg3_v1.PersistentVolumeClaim, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg3_v1.PersistentVolumeClaim{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, pkg3_v1.PersistentVolumeClaim{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg3_v1.PersistentVolumeClaim{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, pkg3_v1.PersistentVolumeClaim{}) // var yyz1 pkg3_v1.PersistentVolumeClaim + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg3_v1.PersistentVolumeClaim{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg3_v1.PersistentVolumeClaim{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceStatefulSet(v []StatefulSet, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceStatefulSet(v *[]StatefulSet, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []StatefulSet{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 856) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]StatefulSet, yyrl1) + } + } else { + yyv1 = make([]StatefulSet, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = StatefulSet{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, StatefulSet{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = StatefulSet{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, StatefulSet{}) // var yyz1 StatefulSet + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = StatefulSet{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []StatefulSet{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceDeploymentCondition(v []DeploymentCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceDeploymentCondition(v *[]DeploymentCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []DeploymentCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]DeploymentCondition, yyrl1) + } + } else { + yyv1 = make([]DeploymentCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DeploymentCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, DeploymentCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DeploymentCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, DeploymentCondition{}) // var yyz1 DeploymentCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = DeploymentCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []DeploymentCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Deployment{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 920) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Deployment, yyrl1) + } + } else { + yyv1 = make([]Deployment, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Deployment{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Deployment{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Deployment{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Deployment{}) // var yyz1 Deployment + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Deployment{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Deployment{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.go new file mode 100644 index 0000000000..a5675d5d30 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.go @@ -0,0 +1,375 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/pkg/api/v1" +) + +const ( + // StatefulSetInitAnnotation if present, and set to false, indicates that a Pod's readiness should be ignored. + StatefulSetInitAnnotation = "pod.alpha.kubernetes.io/initialized" +) + +// ScaleSpec describes the attributes of a scale subresource +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` +} + +// ScaleStatus represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` + + // label selector for pods that should match the replicas count. This is a serializated + // version of both map-based and more expressive set-based selectors. This is done to + // avoid introspection in the clients. The string will be in the same format as the + // query-param syntax. If the target type only supports map-based selectors, both this + // field and map-based selector field are populated. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` +} + +// +genclient=true +// +noMethods=true + +// Scale represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient=true + +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +type StatefulSet struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired identities of pods in this set. + // +optional + Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// A StatefulSetSpec is the specification of a StatefulSet. +type StatefulSetSpec struct { + // Replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Selector is a label query over pods that should match the replica count. + // If empty, defaulted to labels on the pod template. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // VolumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` + + // ServiceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` +} + +// StatefulSetStatus represents the current state of a StatefulSet. +type StatefulSetStatus struct { + // most recent generation observed by this StatefulSet. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Replicas is the number of actual replicas. + Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` +} + +// StatefulSetList is a collection of StatefulSets. +type StatefulSetList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// Deployment enables declarative updates for Pods and ReplicaSets. +type Deployment struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // Template describes the pods that will be created. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 2. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` + + // Indicates that the deployment is paused. + // +optional + Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` + + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"` + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Once autoRollback is + // implemented, the deployment controller will automatically rollback failed + // deployments. Note that progress will not be estimated during the time a + // deployment is paused. Defaults to 600s. + ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` +} + +// DeploymentRollback stores the information required to rollback a deployment. +type DeploymentRollback struct { + metav1.TypeMeta `json:",inline"` + // Required: This must match the Name of a deployment. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // The annotations to be updated to a deployment + // +optional + UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"` + // The config of this deployment rollback. + RollbackTo RollbackConfig `json:"rollbackTo" protobuf:"bytes,3,opt,name=rollbackTo"` +} + +type RollbackConfig struct { + // The revision to rollback to. If set to 0, rollbck to the last revision. + // +optional + Revision int64 `json:"revision,omitempty" protobuf:"varint,1,opt,name=revision"` +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing RCs (and label key that is added to its pods) to prevent the existing RCs + // to select new pods (and old pods being select by new RC). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +// DeploymentStrategy describes how to replace existing pods with new ones. +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new RC can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of desired pods. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` +} + +// DeploymentStatus is the most recently observed status of the Deployment. +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` + + // Total number of unavailable pods targeted by this deployment. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` + + // Represents the latest available observations of a deployment's current state. + Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// DeploymentList is a list of Deployments. +type DeploymentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Deployments. + Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..44e9f3e45e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,208 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Deployment = map[string]string{ + "": "Deployment enables declarative updates for Pods and ReplicaSets.", + "metadata": "Standard object metadata.", + "spec": "Specification of the desired behavior of the Deployment.", + "status": "Most recently observed status of the Deployment.", +} + +func (Deployment) SwaggerDoc() map[string]string { + return map_Deployment +} + +var map_DeploymentCondition = map[string]string{ + "": "DeploymentCondition describes the state of a deployment at a certain point.", + "type": "Type of deployment condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DeploymentCondition) SwaggerDoc() map[string]string { + return map_DeploymentCondition +} + +var map_DeploymentList = map[string]string{ + "": "DeploymentList is a list of Deployments.", + "metadata": "Standard list metadata.", + "items": "Items is the list of Deployments.", +} + +func (DeploymentList) SwaggerDoc() map[string]string { + return map_DeploymentList +} + +var map_DeploymentRollback = map[string]string{ + "": "DeploymentRollback stores the information required to rollback a deployment.", + "name": "Required: This must match the Name of a deployment.", + "updatedAnnotations": "The annotations to be updated to a deployment", + "rollbackTo": "The config of this deployment rollback.", +} + +func (DeploymentRollback) SwaggerDoc() map[string]string { + return map_DeploymentRollback +} + +var map_DeploymentSpec = map[string]string{ + "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", + "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", + "template": "Template describes the pods that will be created.", + "strategy": "The deployment strategy to use to replace existing pods with new ones.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.", + "paused": "Indicates that the deployment is paused.", + "rollbackTo": "The config this deployment is rolling back to. Will be cleared after rollback is done.", + "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Once autoRollback is implemented, the deployment controller will automatically rollback failed deployments. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", +} + +func (DeploymentSpec) SwaggerDoc() map[string]string { + return map_DeploymentSpec +} + +var map_DeploymentStatus = map[string]string{ + "": "DeploymentStatus is the most recently observed status of the Deployment.", + "observedGeneration": "The generation observed by the deployment controller.", + "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "readyReplicas": "Total number of ready pods targeted by this deployment.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + "unavailableReplicas": "Total number of unavailable pods targeted by this deployment.", + "conditions": "Represents the latest available observations of a deployment's current state.", +} + +func (DeploymentStatus) SwaggerDoc() map[string]string { + return map_DeploymentStatus +} + +var map_DeploymentStrategy = map[string]string{ + "": "DeploymentStrategy describes how to replace existing pods with new ones.", + "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", +} + +func (DeploymentStrategy) SwaggerDoc() map[string]string { + return map_DeploymentStrategy +} + +var map_RollbackConfig = map[string]string{ + "revision": "The revision to rollback to. If set to 0, rollbck to the last revision.", +} + +func (RollbackConfig) SwaggerDoc() map[string]string { + return map_RollbackConfig +} + +var map_RollingUpdateDeployment = map[string]string{ + "": "Spec to control the desired behavior of rolling update.", + "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", +} + +func (RollingUpdateDeployment) SwaggerDoc() map[string]string { + return map_RollingUpdateDeployment +} + +var map_Scale = map[string]string{ + "": "Scale represents a scaling request for a resource.", + "metadata": "Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.", +} + +func (Scale) SwaggerDoc() map[string]string { + return map_Scale +} + +var map_ScaleSpec = map[string]string{ + "": "ScaleSpec describes the attributes of a scale subresource", + "replicas": "desired number of instances for the scaled object.", +} + +func (ScaleSpec) SwaggerDoc() map[string]string { + return map_ScaleSpec +} + +var map_ScaleStatus = map[string]string{ + "": "ScaleStatus represents the current status of a scale subresource.", + "replicas": "actual number of observed instances of the scaled object.", + "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", +} + +func (ScaleStatus) SwaggerDoc() map[string]string { + return map_ScaleStatus +} + +var map_StatefulSet = map[string]string{ + "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "spec": "Spec defines the desired identities of pods in this set.", + "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", +} + +func (StatefulSet) SwaggerDoc() map[string]string { + return map_StatefulSet +} + +var map_StatefulSetList = map[string]string{ + "": "StatefulSetList is a collection of StatefulSets.", +} + +func (StatefulSetList) SwaggerDoc() map[string]string { + return map_StatefulSetList +} + +var map_StatefulSetSpec = map[string]string{ + "": "A StatefulSetSpec is the specification of a StatefulSet.", + "replicas": "Replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "selector": "Selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "volumeClaimTemplates": "VolumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "serviceName": "ServiceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", +} + +func (StatefulSetSpec) SwaggerDoc() map[string]string { + return map_StatefulSetSpec +} + +var map_StatefulSetStatus = map[string]string{ + "": "StatefulSetStatus represents the current state of a StatefulSet.", + "observedGeneration": "most recent generation observed by this StatefulSet.", + "replicas": "Replicas is the number of actual replicas.", +} + +func (StatefulSetStatus) SwaggerDoc() map[string]string { + return map_StatefulSetStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.conversion.go new file mode 100644 index 0000000000..069ed93925 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.conversion.go @@ -0,0 +1,166 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/client-go/pkg/api" + api_v1 "k8s.io/client-go/pkg/api/v1" + apps "k8s.io/client-go/pkg/apis/apps" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_StatefulSet_To_apps_StatefulSet, + Convert_apps_StatefulSet_To_v1beta1_StatefulSet, + Convert_v1beta1_StatefulSetList_To_apps_StatefulSetList, + Convert_apps_StatefulSetList_To_v1beta1_StatefulSetList, + Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec, + Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec, + Convert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus, + Convert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus, + ) +} + +func autoConvert_v1beta1_StatefulSet_To_apps_StatefulSet(in *StatefulSet, out *apps.StatefulSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_StatefulSet_To_apps_StatefulSet(in *StatefulSet, out *apps.StatefulSet, s conversion.Scope) error { + return autoConvert_v1beta1_StatefulSet_To_apps_StatefulSet(in, out, s) +} + +func autoConvert_apps_StatefulSet_To_v1beta1_StatefulSet(in *apps.StatefulSet, out *StatefulSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_apps_StatefulSet_To_v1beta1_StatefulSet(in *apps.StatefulSet, out *StatefulSet, s conversion.Scope) error { + return autoConvert_apps_StatefulSet_To_v1beta1_StatefulSet(in, out, s) +} + +func autoConvert_v1beta1_StatefulSetList_To_apps_StatefulSetList(in *StatefulSetList, out *apps.StatefulSetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]apps.StatefulSet, len(*in)) + for i := range *in { + if err := Convert_v1beta1_StatefulSet_To_apps_StatefulSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1beta1_StatefulSetList_To_apps_StatefulSetList(in *StatefulSetList, out *apps.StatefulSetList, s conversion.Scope) error { + return autoConvert_v1beta1_StatefulSetList_To_apps_StatefulSetList(in, out, s) +} + +func autoConvert_apps_StatefulSetList_To_v1beta1_StatefulSetList(in *apps.StatefulSetList, out *StatefulSetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StatefulSet, len(*in)) + for i := range *in { + if err := Convert_apps_StatefulSet_To_v1beta1_StatefulSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_apps_StatefulSetList_To_v1beta1_StatefulSetList(in *apps.StatefulSetList, out *StatefulSetList, s conversion.Scope) error { + return autoConvert_apps_StatefulSetList_To_v1beta1_StatefulSetList(in, out, s) +} + +func autoConvert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error { + if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + out.VolumeClaimTemplates = *(*[]api.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates)) + out.ServiceName = in.ServiceName + return nil +} + +func autoConvert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.StatefulSetSpec, out *StatefulSetSpec, s conversion.Scope) error { + if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + out.VolumeClaimTemplates = *(*[]api_v1.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates)) + out.ServiceName = in.ServiceName + return nil +} + +func autoConvert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(in *StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error { + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.Replicas = in.Replicas + return nil +} + +func Convert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(in *StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error { + return autoConvert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(in, out, s) +} + +func autoConvert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus(in *apps.StatefulSetStatus, out *StatefulSetStatus, s conversion.Scope) error { + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.Replicas = in.Replicas + return nil +} + +func Convert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus(in *apps.StatefulSetStatus, out *StatefulSetStatus, s conversion.Scope) error { + return autoConvert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..d9b9d3d3c6 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,355 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" + api_v1 "k8s.io/client-go/pkg/api/v1" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Deployment, InType: reflect.TypeOf(&Deployment{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentCondition, InType: reflect.TypeOf(&DeploymentCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentList, InType: reflect.TypeOf(&DeploymentList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentRollback, InType: reflect.TypeOf(&DeploymentRollback{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentSpec, InType: reflect.TypeOf(&DeploymentSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentStatus, InType: reflect.TypeOf(&DeploymentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentStrategy, InType: reflect.TypeOf(&DeploymentStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollbackConfig, InType: reflect.TypeOf(&RollbackConfig{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollingUpdateDeployment, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Scale, InType: reflect.TypeOf(&Scale{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StatefulSet, InType: reflect.TypeOf(&StatefulSet{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StatefulSetList, InType: reflect.TypeOf(&StatefulSetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StatefulSetSpec, InType: reflect.TypeOf(&StatefulSetSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StatefulSetStatus, InType: reflect.TypeOf(&StatefulSetStatus{})}, + ) +} + +func DeepCopy_v1beta1_Deployment(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Deployment) + out := out.(*Deployment) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_DeploymentStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentCondition) + out := out.(*DeploymentCondition) + *out = *in + out.LastUpdateTime = in.LastUpdateTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1beta1_DeploymentList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentList) + out := out.(*DeploymentList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_Deployment(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentRollback(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentRollback) + out := out.(*DeploymentRollback) + *out = *in + if in.UpdatedAnnotations != nil { + in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentSpec) + out := out.(*DeploymentSpec) + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, c); err != nil { + return err + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.RollbackTo != nil { + in, out := &in.RollbackTo, &out.RollbackTo + *out = new(RollbackConfig) + **out = **in + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentStatus) + out := out.(*DeploymentStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_DeploymentCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentStrategy) + out := out.(*DeploymentStrategy) + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + if err := DeepCopy_v1beta1_RollingUpdateDeployment(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1beta1_RollbackConfig(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollbackConfig) + out := out.(*RollbackConfig) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_RollingUpdateDeployment(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollingUpdateDeployment) + out := out.(*RollingUpdateDeployment) + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } + return nil + } +} + +func DeepCopy_v1beta1_Scale(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Scale) + out := out.(*Scale) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_ScaleStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_ScaleSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleSpec) + out := out.(*ScaleSpec) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_ScaleStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleStatus) + out := out.(*ScaleStatus) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1beta1_StatefulSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatefulSet) + out := out.(*StatefulSet) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_StatefulSetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_StatefulSetStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_StatefulSetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatefulSetList) + out := out.(*StatefulSetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StatefulSet, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_StatefulSet(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_StatefulSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatefulSetSpec) + out := out.(*StatefulSetSpec) + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]api_v1.PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := api_v1.DeepCopy_v1_PersistentVolumeClaim(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_StatefulSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatefulSetStatus) + out := out.(*StatefulSetStatus) + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.defaults.go new file mode 100644 index 0000000000..9b822c84cb --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.defaults.go @@ -0,0 +1,326 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/client-go/pkg/api/v1" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&Deployment{}, func(obj interface{}) { SetObjectDefaults_Deployment(obj.(*Deployment)) }) + scheme.AddTypeDefaultingFunc(&DeploymentList{}, func(obj interface{}) { SetObjectDefaults_DeploymentList(obj.(*DeploymentList)) }) + scheme.AddTypeDefaultingFunc(&StatefulSet{}, func(obj interface{}) { SetObjectDefaults_StatefulSet(obj.(*StatefulSet)) }) + scheme.AddTypeDefaultingFunc(&StatefulSetList{}, func(obj interface{}) { SetObjectDefaults_StatefulSetList(obj.(*StatefulSetList)) }) + return nil +} + +func SetObjectDefaults_Deployment(in *Deployment) { + SetDefaults_Deployment(in) + v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_DeploymentList(in *DeploymentList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Deployment(a) + } +} + +func SetObjectDefaults_StatefulSet(in *StatefulSet) { + SetDefaults_StatefulSet(in) + v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.VolumeClaimTemplates { + a := &in.Spec.VolumeClaimTemplates[i] + v1.SetDefaults_PersistentVolumeClaim(a) + v1.SetDefaults_ResourceList(&a.Spec.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Spec.Resources.Requests) + v1.SetDefaults_ResourceList(&a.Status.Capacity) + } +} + +func SetObjectDefaults_StatefulSetList(in *StatefulSetList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_StatefulSet(a) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/apps/zz_generated.deepcopy.go new file mode 100644 index 0000000000..5048531ca8 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/zz_generated.deepcopy.go @@ -0,0 +1,125 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package apps + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/client-go/pkg/api" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_apps_StatefulSet, InType: reflect.TypeOf(&StatefulSet{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_apps_StatefulSetList, InType: reflect.TypeOf(&StatefulSetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_apps_StatefulSetSpec, InType: reflect.TypeOf(&StatefulSetSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_apps_StatefulSetStatus, InType: reflect.TypeOf(&StatefulSetStatus{})}, + ) +} + +func DeepCopy_apps_StatefulSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatefulSet) + out := out.(*StatefulSet) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_apps_StatefulSetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_apps_StatefulSetStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_apps_StatefulSetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatefulSetList) + out := out.(*StatefulSetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StatefulSet, len(*in)) + for i := range *in { + if err := DeepCopy_apps_StatefulSet(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_apps_StatefulSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatefulSetSpec) + out := out.(*StatefulSetSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]api.PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := api.DeepCopy_api_PersistentVolumeClaim(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_apps_StatefulSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatefulSetStatus) + out := out.(*StatefulSetStatus) + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/OWNERS b/vendor/k8s.io/client-go/pkg/apis/authentication/OWNERS new file mode 100755 index 0000000000..4135522b21 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/OWNERS @@ -0,0 +1,9 @@ +reviewers: +- liggitt +- lavalamp +- wojtek-t +- deads2k +- sttts +- timothysc +- mbohlool +- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/doc.go b/vendor/k8s.io/client-go/pkg/apis/authentication/doc.go new file mode 100644 index 0000000000..194de434d8 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=authentication.k8s.io +package authentication diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/install/install.go b/vendor/k8s.io/client-go/pkg/apis/authentication/install/install.go new file mode 100644 index 0000000000..b8a9521a6b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/install/install.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/authentication" + "k8s.io/client-go/pkg/apis/authentication/v1" + "k8s.io/client-go/pkg/apis/authentication/v1beta1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: authentication.GroupName, + VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version, v1beta1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/authentication", + RootScopedKinds: sets.NewString("TokenReview"), + AddInternalObjectsToScheme: authentication.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + v1.SchemeGroupVersion.Version: v1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/register.go b/vendor/k8s.io/client-go/pkg/apis/authentication/register.go new file mode 100644 index 0000000000..b0ac3c28bf --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &TokenReview{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/types.go b/vendor/k8s.io/client-go/pkg/apis/authentication/types.go new file mode 100644 index 0000000000..9c1e66b7bb --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/types.go @@ -0,0 +1,89 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // ImpersonateUserHeader is used to impersonate a particular user during an API server request + ImpersonateUserHeader = "Impersonate-User" + + // ImpersonateGroupHeader is used to impersonate a particular group during an API server request. + // It can be repeated multiplied times for multiple groups. + ImpersonateGroupHeader = "Impersonate-Group" + + // ImpersonateUserExtraHeaderPrefix is a prefix for any header used to impersonate an entry in the + // extra map[string][]string for user.Info. The key will be every after the prefix. + // It can be repeated multiplied times for multiple map keys and the same key can be repeated multiple + // times to have multiple elements in the slice under a single key + ImpersonateUserExtraHeaderPrefix = "Impersonate-Extra-" +) + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// TokenReview attempts to authenticate a token to a known user. +type TokenReview struct { + metav1.TypeMeta + // ObjectMeta fulfills the metav1.ObjectMetaAccessor interface so that the stock + // REST handler paths work + metav1.ObjectMeta + + // Spec holds information about the request being evaluated + Spec TokenReviewSpec + + // Status is filled in by the server and indicates whether the request can be authenticated. + Status TokenReviewStatus +} + +// TokenReviewSpec is a description of the token authentication request. +type TokenReviewSpec struct { + // Token is the opaque bearer token. + Token string +} + +// TokenReviewStatus is the result of the token authentication request. +// This type mirrors the authentication.Token interface +type TokenReviewStatus struct { + // Authenticated indicates that the token was associated with a known user. + Authenticated bool + // User is the UserInfo associated with the provided token. + User UserInfo + // Error indicates that the token couldn't be checked + Error string +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +type UserInfo struct { + // The name that uniquely identifies this user among all active users. + Username string + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + UID string + // The names of groups this user is a part of. + Groups []string + // Any additional information provided by the authenticator. + Extra map[string]ExtraValue +} + +// ExtraValue masks the value so protobuf can generate +type ExtraValue []string diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/conversion.go new file mode 100644 index 0000000000..2ff5732d6d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/conversion.go @@ -0,0 +1,26 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + return scheme.AddConversionFuncs() +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/defaults.go new file mode 100644 index 0000000000..d63d917543 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/defaults.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return scheme.AddDefaultingFuncs() +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/doc.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/doc.go new file mode 100644 index 0000000000..8140e47c59 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=authentication.k8s.io +package v1 diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.pb.go new file mode 100644 index 0000000000..e3dfb9d3ad --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.pb.go @@ -0,0 +1,1281 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/authentication/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/authentication/v1/generated.proto + + It has these top-level messages: + ExtraValue + TokenReview + TokenReviewSpec + TokenReviewStatus + UserInfo +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func init() { + proto.RegisterType((*ExtraValue)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.ExtraValue") + proto.RegisterType((*TokenReview)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.TokenReview") + proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.TokenReviewSpec") + proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.TokenReviewStatus") + proto.RegisterType((*UserInfo)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.UserInfo") +} +func (m ExtraValue) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m ExtraValue) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *TokenReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TokenReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *TokenReviewSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TokenReviewSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Token))) + i += copy(data[i:], m.Token) + return i, nil +} + +func (m *TokenReviewStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TokenReviewStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Authenticated { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.User.Size())) + n4, err := m.User.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Error))) + i += copy(data[i:], m.Error) + return i, nil +} + +func (m *UserInfo) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *UserInfo) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Username))) + i += copy(data[i:], m.Username) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.UID))) + i += copy(data[i:], m.UID) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Extra) > 0 { + for k := range m.Extra { + data[i] = 0x22 + i++ + v := m.Extra[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n5, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m ExtraValue) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TokenReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenReviewSpec) Size() (n int) { + var l int + _ = l + l = len(m.Token) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenReviewStatus) Size() (n int) { + var l int + _ = l + n += 2 + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *UserInfo) Size() (n int) { + var l int + _ = l + l = len(m.Username) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *TokenReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReviewSpec{`, + `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReviewStatus{`, + `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, + `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *UserInfo) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&UserInfo{`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExtraValue) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReviewSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReviewStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Authenticated", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Authenticated = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.User.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserInfo) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 655 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x53, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0xb5, 0xf3, 0x53, 0x25, 0x93, 0xaf, 0x1f, 0x65, 0x24, 0xa4, 0x28, 0x12, 0x4e, 0x14, 0x58, + 0x74, 0x51, 0xc6, 0xa4, 0xa0, 0x52, 0x15, 0x10, 0xaa, 0x45, 0x85, 0xba, 0x00, 0xa4, 0x81, 0x22, + 0xc4, 0x06, 0x26, 0xce, 0xad, 0x63, 0x52, 0xff, 0x68, 0x3c, 0x36, 0xed, 0xae, 0x8f, 0xc0, 0x92, + 0x25, 0xaf, 0xc1, 0x1b, 0x74, 0x47, 0x77, 0xb0, 0x40, 0x15, 0x0d, 0x2f, 0x82, 0x66, 0x3c, 0xd4, + 0x2e, 0x69, 0x85, 0xda, 0xdd, 0xcc, 0x99, 0x7b, 0xce, 0xbd, 0xe7, 0xde, 0xb9, 0xe8, 0xc1, 0x64, + 0x35, 0x21, 0x7e, 0x64, 0x4f, 0xd2, 0x21, 0xf0, 0x10, 0x04, 0x24, 0x76, 0x3c, 0xf1, 0x6c, 0x16, + 0xfb, 0x89, 0xcd, 0x52, 0x31, 0x86, 0x50, 0xf8, 0x2e, 0x13, 0x7e, 0x14, 0xda, 0xd9, 0xc0, 0xf6, + 0x20, 0x04, 0xce, 0x04, 0x8c, 0x48, 0xcc, 0x23, 0x11, 0xe1, 0xa5, 0x9c, 0x4d, 0x0a, 0x36, 0x89, + 0x27, 0x1e, 0x91, 0x6c, 0x72, 0x9a, 0x4d, 0xb2, 0x41, 0xe7, 0x96, 0xe7, 0x8b, 0x71, 0x3a, 0x24, + 0x6e, 0x14, 0xd8, 0x5e, 0xe4, 0x45, 0xb6, 0x12, 0x19, 0xa6, 0xdb, 0xea, 0xa6, 0x2e, 0xea, 0x94, + 0x8b, 0x77, 0xee, 0xea, 0xd2, 0x58, 0xec, 0x07, 0xcc, 0x1d, 0xfb, 0x21, 0xf0, 0xbd, 0xa2, 0xb8, + 0x00, 0x04, 0x3b, 0xa3, 0xa4, 0x8e, 0x7d, 0x1e, 0x8b, 0xa7, 0xa1, 0xf0, 0x03, 0x98, 0x21, 0xac, + 0xfc, 0x8b, 0x90, 0xb8, 0x63, 0x08, 0xd8, 0x0c, 0xef, 0xce, 0x79, 0xbc, 0x54, 0xf8, 0x3b, 0xb6, + 0x1f, 0x8a, 0x44, 0xf0, 0x19, 0x52, 0xc9, 0x53, 0x02, 0x3c, 0x03, 0x5e, 0x18, 0x82, 0x5d, 0x16, + 0xc4, 0x3b, 0x70, 0x86, 0xa7, 0xfe, 0x3d, 0x84, 0x36, 0x76, 0x05, 0x67, 0xaf, 0xd8, 0x4e, 0x0a, + 0xb8, 0x8b, 0xea, 0xbe, 0x80, 0x20, 0x69, 0x9b, 0xbd, 0xea, 0x62, 0xd3, 0x69, 0x4e, 0x8f, 0xba, + 0xf5, 0x4d, 0x09, 0xd0, 0x1c, 0x5f, 0x6b, 0x7c, 0xfa, 0xdc, 0x35, 0xf6, 0x7f, 0xf4, 0x8c, 0xfe, + 0x97, 0x0a, 0x6a, 0xbd, 0x8c, 0x26, 0x10, 0x52, 0xc8, 0x7c, 0xf8, 0x80, 0xdf, 0xa1, 0x86, 0xec, + 0xdb, 0x88, 0x09, 0xd6, 0x36, 0x7b, 0xe6, 0x62, 0x6b, 0xf9, 0x36, 0xd1, 0x23, 0x2c, 0xdb, 0x28, + 0x86, 0x28, 0xa3, 0x49, 0x36, 0x20, 0xcf, 0x87, 0xef, 0xc1, 0x15, 0x4f, 0x41, 0x30, 0x07, 0x1f, + 0x1c, 0x75, 0x8d, 0xe9, 0x51, 0x17, 0x15, 0x18, 0x3d, 0x51, 0xc5, 0x6f, 0x51, 0x2d, 0x89, 0xc1, + 0x6d, 0x57, 0x94, 0xfa, 0x43, 0x72, 0x91, 0x0f, 0x42, 0x4a, 0xa5, 0xbe, 0x88, 0xc1, 0x75, 0xfe, + 0xd3, 0xa9, 0x6a, 0xf2, 0x46, 0x95, 0x30, 0xf6, 0xd0, 0x5c, 0x22, 0x98, 0x48, 0x93, 0x76, 0x55, + 0xa5, 0x78, 0x74, 0xf9, 0x14, 0x4a, 0xc6, 0xf9, 0x5f, 0x27, 0x99, 0xcb, 0xef, 0x54, 0xcb, 0xf7, + 0x57, 0xd0, 0x95, 0xbf, 0xea, 0xc1, 0x37, 0x50, 0x5d, 0x48, 0x48, 0xf5, 0xae, 0xe9, 0xcc, 0x6b, + 0x66, 0x3d, 0x8f, 0xcb, 0xdf, 0xfa, 0x5f, 0x4d, 0x74, 0x75, 0x26, 0x0b, 0xbe, 0x8f, 0xe6, 0x4b, + 0xc5, 0xc0, 0x48, 0x49, 0x34, 0x9c, 0x6b, 0x5a, 0x62, 0x7e, 0xbd, 0xfc, 0x48, 0x4f, 0xc7, 0xe2, + 0xd7, 0xa8, 0x96, 0x26, 0xc0, 0x75, 0x53, 0x57, 0x2e, 0xe6, 0x78, 0x2b, 0x01, 0xbe, 0x19, 0x6e, + 0x47, 0x45, 0x37, 0x25, 0x42, 0x95, 0xa2, 0x74, 0x04, 0x9c, 0x47, 0x5c, 0x35, 0xb3, 0xe4, 0x68, + 0x43, 0x82, 0x34, 0x7f, 0xeb, 0x7f, 0xab, 0xa0, 0xc6, 0x1f, 0x15, 0xbc, 0x84, 0x1a, 0x92, 0x19, + 0xb2, 0x00, 0x74, 0x1b, 0x16, 0x34, 0x49, 0xc5, 0x48, 0x9c, 0x9e, 0x44, 0xe0, 0xeb, 0xa8, 0x9a, + 0xfa, 0x23, 0x55, 0x78, 0xd3, 0x69, 0xe9, 0xc0, 0xea, 0xd6, 0xe6, 0x63, 0x2a, 0x71, 0xdc, 0x47, + 0x73, 0x1e, 0x8f, 0xd2, 0x58, 0x0e, 0x53, 0xfe, 0x65, 0x24, 0xe7, 0xf0, 0x44, 0x21, 0x54, 0xbf, + 0xe0, 0x6d, 0x54, 0x07, 0xf9, 0xf9, 0xdb, 0xb5, 0x5e, 0x75, 0xb1, 0xb5, 0xbc, 0x7e, 0x39, 0xf7, + 0x44, 0x2d, 0xd0, 0x46, 0x28, 0xf8, 0x5e, 0xc9, 0xa5, 0xc4, 0x68, 0x2e, 0xdf, 0xe1, 0x7a, 0xc9, + 0x54, 0x0c, 0x5e, 0x40, 0xd5, 0x09, 0xec, 0xe5, 0x0e, 0xa9, 0x3c, 0xe2, 0x67, 0xa8, 0x9e, 0xc9, + 0xfd, 0xd3, 0x53, 0x58, 0xbd, 0x58, 0x1d, 0xc5, 0xfe, 0xd2, 0x5c, 0x66, 0xad, 0xb2, 0x6a, 0x3a, + 0x37, 0x0f, 0x8e, 0x2d, 0xe3, 0xf0, 0xd8, 0x32, 0xbe, 0x1f, 0x5b, 0xc6, 0xfe, 0xd4, 0x32, 0x0f, + 0xa6, 0x96, 0x79, 0x38, 0xb5, 0xcc, 0x9f, 0x53, 0xcb, 0xfc, 0xf8, 0xcb, 0x32, 0xde, 0x54, 0xb2, + 0xc1, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbb, 0x6b, 0x11, 0x20, 0xa4, 0x05, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.proto new file mode 100644 index 0000000000..ea5203d37b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.proto @@ -0,0 +1,100 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.authentication.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// TokenReview attempts to authenticate a token to a known user. +// Note: TokenReview requests may be cached by the webhook token authenticator +// plugin in the kube-apiserver. +message TokenReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated + optional TokenReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request can be authenticated. + // +optional + optional TokenReviewStatus status = 3; +} + +// TokenReviewSpec is a description of the token authentication request. +message TokenReviewSpec { + // Token is the opaque bearer token. + // +optional + optional string token = 1; +} + +// TokenReviewStatus is the result of the token authentication request. +message TokenReviewStatus { + // Authenticated indicates that the token was associated with a known user. + // +optional + optional bool authenticated = 1; + + // User is the UserInfo associated with the provided token. + // +optional + optional UserInfo user = 2; + + // Error indicates that the token couldn't be checked + // +optional + optional string error = 3; +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +message UserInfo { + // The name that uniquely identifies this user among all active users. + // +optional + optional string username = 1; + + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + // +optional + optional string uid = 2; + + // The names of groups this user is a part of. + // +optional + repeated string groups = 3; + + // Any additional information provided by the authenticator. + // +optional + map extra = 4; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/register.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/register.go new file mode 100644 index 0000000000..8661169af4 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/register.go @@ -0,0 +1,48 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &TokenReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types.go new file mode 100644 index 0000000000..e6ff587056 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types.go @@ -0,0 +1,91 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// TokenReview attempts to authenticate a token to a known user. +// Note: TokenReview requests may be cached by the webhook token authenticator +// plugin in the kube-apiserver. +type TokenReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated + Spec TokenReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request can be authenticated. + // +optional + Status TokenReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// TokenReviewSpec is a description of the token authentication request. +type TokenReviewSpec struct { + // Token is the opaque bearer token. + // +optional + Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` +} + +// TokenReviewStatus is the result of the token authentication request. +type TokenReviewStatus struct { + // Authenticated indicates that the token was associated with a known user. + // +optional + Authenticated bool `json:"authenticated,omitempty" protobuf:"varint,1,opt,name=authenticated"` + // User is the UserInfo associated with the provided token. + // +optional + User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // Error indicates that the token couldn't be checked + // +optional + Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +type UserInfo struct { + // The name that uniquely identifies this user among all active users. + // +optional + Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"` + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + // +optional + UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` + // The names of groups this user is a part of. + // +optional + Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` + // Any additional information provided by the authenticator. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..bb235e4eaa --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types_swagger_doc_generated.go @@ -0,0 +1,72 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_TokenReview = map[string]string{ + "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the request can be authenticated.", +} + +func (TokenReview) SwaggerDoc() map[string]string { + return map_TokenReview +} + +var map_TokenReviewSpec = map[string]string{ + "": "TokenReviewSpec is a description of the token authentication request.", + "token": "Token is the opaque bearer token.", +} + +func (TokenReviewSpec) SwaggerDoc() map[string]string { + return map_TokenReviewSpec +} + +var map_TokenReviewStatus = map[string]string{ + "": "TokenReviewStatus is the result of the token authentication request.", + "authenticated": "Authenticated indicates that the token was associated with a known user.", + "user": "User is the UserInfo associated with the provided token.", + "error": "Error indicates that the token couldn't be checked", +} + +func (TokenReviewStatus) SwaggerDoc() map[string]string { + return map_TokenReviewStatus +} + +var map_UserInfo = map[string]string{ + "": "UserInfo holds the information about the user needed to implement the user.Info interface.", + "username": "The name that uniquely identifies this user among all active users.", + "uid": "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.", + "groups": "The names of groups this user is a part of.", + "extra": "Any additional information provided by the authenticator.", +} + +func (UserInfo) SwaggerDoc() map[string]string { + return map_UserInfo +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.conversion.go new file mode 100644 index 0000000000..9c1335e915 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.conversion.go @@ -0,0 +1,145 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + authentication "k8s.io/client-go/pkg/apis/authentication" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_TokenReview_To_authentication_TokenReview, + Convert_authentication_TokenReview_To_v1_TokenReview, + Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec, + Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec, + Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus, + Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus, + Convert_v1_UserInfo_To_authentication_UserInfo, + Convert_authentication_UserInfo_To_v1_UserInfo, + ) +} + +func autoConvert_v1_TokenReview_To_authentication_TokenReview(in *TokenReview, out *authentication.TokenReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_TokenReview_To_authentication_TokenReview(in *TokenReview, out *authentication.TokenReview, s conversion.Scope) error { + return autoConvert_v1_TokenReview_To_authentication_TokenReview(in, out, s) +} + +func autoConvert_authentication_TokenReview_To_v1_TokenReview(in *authentication.TokenReview, out *TokenReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authentication_TokenReview_To_v1_TokenReview(in *authentication.TokenReview, out *TokenReview, s conversion.Scope) error { + return autoConvert_authentication_TokenReview_To_v1_TokenReview(in, out, s) +} + +func autoConvert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { + out.Token = in.Token + return nil +} + +func Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { + return autoConvert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in, out, s) +} + +func autoConvert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { + out.Token = in.Token + return nil +} + +func Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { + return autoConvert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in, out, s) +} + +func autoConvert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error { + out.Authenticated = in.Authenticated + if err := Convert_v1_UserInfo_To_authentication_UserInfo(&in.User, &out.User, s); err != nil { + return err + } + out.Error = in.Error + return nil +} + +func Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error { + return autoConvert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in, out, s) +} + +func autoConvert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { + out.Authenticated = in.Authenticated + if err := Convert_authentication_UserInfo_To_v1_UserInfo(&in.User, &out.User, s); err != nil { + return err + } + out.Error = in.Error + return nil +} + +func Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { + return autoConvert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in, out, s) +} + +func autoConvert_v1_UserInfo_To_authentication_UserInfo(in *UserInfo, out *authentication.UserInfo, s conversion.Scope) error { + out.Username = in.Username + out.UID = in.UID + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]authentication.ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_v1_UserInfo_To_authentication_UserInfo(in *UserInfo, out *authentication.UserInfo, s conversion.Scope) error { + return autoConvert_v1_UserInfo_To_authentication_UserInfo(in, out, s) +} + +func autoConvert_authentication_UserInfo_To_v1_UserInfo(in *authentication.UserInfo, out *UserInfo, s conversion.Scope) error { + out.Username = in.Username + out.UID = in.UID + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_authentication_UserInfo_To_v1_UserInfo(in *authentication.UserInfo, out *UserInfo, s conversion.Scope) error { + return autoConvert_authentication_UserInfo_To_v1_UserInfo(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..0bc5640671 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.deepcopy.go @@ -0,0 +1,106 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TokenReview, InType: reflect.TypeOf(&TokenReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TokenReviewSpec, InType: reflect.TypeOf(&TokenReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TokenReviewStatus, InType: reflect.TypeOf(&TokenReviewStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_UserInfo, InType: reflect.TypeOf(&UserInfo{})}, + ) +} + +func DeepCopy_v1_TokenReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReview) + out := out.(*TokenReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_TokenReviewStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_TokenReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReviewSpec) + out := out.(*TokenReviewSpec) + *out = *in + return nil + } +} + +func DeepCopy_v1_TokenReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReviewStatus) + out := out.(*TokenReviewStatus) + *out = *in + if err := DeepCopy_v1_UserInfo(&in.User, &out.User, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_UserInfo(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*UserInfo) + out := out.(*UserInfo) + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.defaults.go new file mode 100644 index 0000000000..6df448eb9f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/conversion.go new file mode 100644 index 0000000000..51f3adfc71 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/conversion.go @@ -0,0 +1,26 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + return scheme.AddConversionFuncs() +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/defaults.go new file mode 100644 index 0000000000..1a4566479f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/defaults.go @@ -0,0 +1,25 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return scheme.AddDefaultingFuncs() +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/doc.go new file mode 100644 index 0000000000..342f201268 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=authentication.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.pb.go new file mode 100644 index 0000000000..760416e637 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.pb.go @@ -0,0 +1,1282 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.proto + + It has these top-level messages: + ExtraValue + TokenReview + TokenReviewSpec + TokenReviewStatus + UserInfo +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func init() { + proto.RegisterType((*ExtraValue)(nil), "k8s.io.client-go.pkg.apis.authentication.v1beta1.ExtraValue") + proto.RegisterType((*TokenReview)(nil), "k8s.io.client-go.pkg.apis.authentication.v1beta1.TokenReview") + proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authentication.v1beta1.TokenReviewSpec") + proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.client-go.pkg.apis.authentication.v1beta1.TokenReviewStatus") + proto.RegisterType((*UserInfo)(nil), "k8s.io.client-go.pkg.apis.authentication.v1beta1.UserInfo") +} +func (m ExtraValue) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m ExtraValue) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *TokenReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TokenReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *TokenReviewSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TokenReviewSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Token))) + i += copy(data[i:], m.Token) + return i, nil +} + +func (m *TokenReviewStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TokenReviewStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Authenticated { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.User.Size())) + n4, err := m.User.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Error))) + i += copy(data[i:], m.Error) + return i, nil +} + +func (m *UserInfo) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *UserInfo) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Username))) + i += copy(data[i:], m.Username) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.UID))) + i += copy(data[i:], m.UID) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Extra) > 0 { + for k := range m.Extra { + data[i] = 0x22 + i++ + v := m.Extra[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n5, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m ExtraValue) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TokenReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenReviewSpec) Size() (n int) { + var l int + _ = l + l = len(m.Token) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenReviewStatus) Size() (n int) { + var l int + _ = l + n += 2 + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *UserInfo) Size() (n int) { + var l int + _ = l + l = len(m.Username) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *TokenReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReviewSpec{`, + `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReviewStatus{`, + `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, + `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *UserInfo) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&UserInfo{`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExtraValue) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReviewSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReviewStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Authenticated", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Authenticated = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.User.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserInfo) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 668 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x4a, + 0x14, 0x8d, 0xf3, 0xd1, 0x97, 0x4c, 0x5e, 0xdf, 0xeb, 0x1b, 0xe9, 0x49, 0x51, 0x24, 0x9c, 0x28, + 0x6c, 0x8a, 0x54, 0xc6, 0xa4, 0xa0, 0x52, 0xb5, 0x62, 0x51, 0xab, 0x05, 0x75, 0x81, 0x90, 0xa6, + 0x94, 0x05, 0x12, 0x12, 0x13, 0xe7, 0xd6, 0x31, 0x8e, 0x3f, 0x34, 0x1e, 0xa7, 0xed, 0xae, 0x3f, + 0x81, 0x25, 0x4b, 0xfe, 0x0b, 0x9b, 0x2e, 0xbb, 0x60, 0xc1, 0x02, 0x55, 0x24, 0xfc, 0x11, 0x34, + 0xe3, 0xa1, 0x76, 0x49, 0x2b, 0x44, 0xbb, 0xf3, 0x9c, 0x7b, 0xcf, 0xb9, 0xf7, 0xdc, 0xeb, 0x8b, + 0xb6, 0xfc, 0xf5, 0x84, 0x78, 0x91, 0xe5, 0xa7, 0x03, 0xe0, 0x21, 0x08, 0x48, 0xac, 0xd8, 0x77, + 0x2d, 0x16, 0x7b, 0x89, 0xc5, 0x52, 0x31, 0x82, 0x50, 0x78, 0x0e, 0x13, 0x5e, 0x14, 0x5a, 0x93, + 0xfe, 0x00, 0x04, 0xeb, 0x5b, 0x2e, 0x84, 0xc0, 0x99, 0x80, 0x21, 0x89, 0x79, 0x24, 0x22, 0xdc, + 0xcf, 0x24, 0x48, 0x2e, 0x41, 0x62, 0xdf, 0x25, 0x52, 0x82, 0x5c, 0x96, 0x20, 0x5a, 0xa2, 0x7d, + 0xdf, 0xf5, 0xc4, 0x28, 0x1d, 0x10, 0x27, 0x0a, 0x2c, 0x37, 0x72, 0x23, 0x4b, 0x29, 0x0d, 0xd2, + 0x03, 0xf5, 0x52, 0x0f, 0xf5, 0x95, 0x55, 0x68, 0x3f, 0xd2, 0x4d, 0xb2, 0xd8, 0x0b, 0x98, 0x33, + 0xf2, 0x42, 0xe0, 0xc7, 0x79, 0x9b, 0x01, 0x08, 0x66, 0x4d, 0xe6, 0xfa, 0x6a, 0x5b, 0xd7, 0xb1, + 0x78, 0x1a, 0x0a, 0x2f, 0x80, 0x39, 0xc2, 0xda, 0xef, 0x08, 0x89, 0x33, 0x82, 0x80, 0xcd, 0xf1, + 0x1e, 0x5e, 0xc7, 0x4b, 0x85, 0x37, 0xb6, 0xbc, 0x50, 0x24, 0x82, 0xcf, 0x91, 0x0a, 0x9e, 0x12, + 0xe0, 0x13, 0xe0, 0xb9, 0x21, 0x38, 0x62, 0x41, 0x3c, 0x86, 0xab, 0x3c, 0xad, 0x5c, 0xbb, 0xae, + 0x2b, 0xb2, 0x7b, 0x8f, 0x11, 0xda, 0x39, 0x12, 0x9c, 0xbd, 0x62, 0xe3, 0x14, 0x70, 0x07, 0xd5, + 0x3c, 0x01, 0x41, 0xd2, 0x32, 0xba, 0x95, 0xe5, 0x86, 0xdd, 0x98, 0x9d, 0x77, 0x6a, 0xbb, 0x12, + 0xa0, 0x19, 0xbe, 0x51, 0xff, 0xf0, 0xb1, 0x53, 0x3a, 0xf9, 0xda, 0x2d, 0xf5, 0x3e, 0x95, 0x51, + 0xf3, 0x65, 0xe4, 0x43, 0x48, 0x61, 0xe2, 0xc1, 0x21, 0x7e, 0x8b, 0xea, 0x72, 0xca, 0x43, 0x26, + 0x58, 0xcb, 0xe8, 0x1a, 0xcb, 0xcd, 0xd5, 0x07, 0x44, 0x6f, 0xbd, 0x68, 0x3a, 0xdf, 0xbb, 0xcc, + 0x26, 0x93, 0x3e, 0x79, 0x31, 0x78, 0x07, 0x8e, 0x78, 0x0e, 0x82, 0xd9, 0xf8, 0xf4, 0xbc, 0x53, + 0x9a, 0x9d, 0x77, 0x50, 0x8e, 0xd1, 0x0b, 0x55, 0x3c, 0x44, 0xd5, 0x24, 0x06, 0xa7, 0x55, 0x56, + 0xea, 0x36, 0xf9, 0xe3, 0x7f, 0x8a, 0x14, 0xfa, 0xdd, 0x8b, 0xc1, 0xb1, 0xff, 0xd6, 0xf5, 0xaa, + 0xf2, 0x45, 0x95, 0x3a, 0x1e, 0xa3, 0x85, 0x44, 0x30, 0x91, 0x26, 0xad, 0x8a, 0xaa, 0xb3, 0x7d, + 0xcb, 0x3a, 0x4a, 0xcb, 0xfe, 0x47, 0x57, 0x5a, 0xc8, 0xde, 0x54, 0xd7, 0xe8, 0xad, 0xa1, 0x7f, + 0x7f, 0x69, 0x0a, 0xdf, 0x45, 0x35, 0x21, 0x21, 0x35, 0xc5, 0x86, 0xbd, 0xa8, 0x99, 0xb5, 0x2c, + 0x2f, 0x8b, 0xf5, 0x3e, 0x1b, 0xe8, 0xbf, 0xb9, 0x2a, 0x78, 0x13, 0x2d, 0x16, 0x3a, 0x82, 0xa1, + 0x92, 0xa8, 0xdb, 0xff, 0x6b, 0x89, 0xc5, 0xad, 0x62, 0x90, 0x5e, 0xce, 0xc5, 0x6f, 0x50, 0x35, + 0x4d, 0x80, 0xeb, 0xf1, 0x6e, 0xde, 0xc0, 0xf6, 0x7e, 0x02, 0x7c, 0x37, 0x3c, 0x88, 0xf2, 0xb9, + 0x4a, 0x84, 0x2a, 0x59, 0x69, 0x0b, 0x38, 0x8f, 0xb8, 0x1a, 0x6b, 0xc1, 0xd6, 0x8e, 0x04, 0x69, + 0x16, 0xeb, 0x4d, 0xcb, 0xa8, 0xfe, 0x53, 0x05, 0xaf, 0xa0, 0xba, 0x64, 0x86, 0x2c, 0x00, 0x3d, + 0x8b, 0x25, 0x4d, 0x52, 0x39, 0x12, 0xa7, 0x17, 0x19, 0xf8, 0x0e, 0xaa, 0xa4, 0xde, 0x50, 0x75, + 0xdf, 0xb0, 0x9b, 0x3a, 0xb1, 0xb2, 0xbf, 0xbb, 0x4d, 0x25, 0x8e, 0x7b, 0x68, 0xc1, 0xe5, 0x51, + 0x1a, 0xcb, 0xb5, 0xca, 0x5f, 0x1b, 0xc9, 0x65, 0x3c, 0x53, 0x08, 0xd5, 0x11, 0xec, 0xa3, 0x1a, + 0xc8, 0x5b, 0x68, 0x55, 0xbb, 0x95, 0xe5, 0xe6, 0xea, 0xd3, 0x5b, 0x8c, 0x80, 0xa8, 0xa3, 0xda, + 0x09, 0x05, 0x3f, 0x2e, 0x58, 0x95, 0x18, 0xcd, 0x6a, 0xb4, 0x0f, 0xf5, 0xe1, 0xa9, 0x1c, 0xbc, + 0x84, 0x2a, 0x3e, 0x1c, 0x67, 0x36, 0xa9, 0xfc, 0xc4, 0x7b, 0xa8, 0x36, 0x91, 0x37, 0xa9, 0xf7, + 0xf1, 0xe4, 0x06, 0xcd, 0xe4, 0x87, 0x4d, 0x33, 0xad, 0x8d, 0xf2, 0xba, 0x61, 0xdf, 0x3b, 0x9d, + 0x9a, 0xa5, 0xb3, 0xa9, 0x59, 0xfa, 0x32, 0x35, 0x4b, 0x27, 0x33, 0xd3, 0x38, 0x9d, 0x99, 0xc6, + 0xd9, 0xcc, 0x34, 0xbe, 0xcd, 0x4c, 0xe3, 0xfd, 0x77, 0xb3, 0xf4, 0xfa, 0x2f, 0x2d, 0xf0, 0x23, + 0x00, 0x00, 0xff, 0xff, 0xb9, 0x87, 0xc6, 0x94, 0xfa, 0x05, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.proto new file mode 100644 index 0000000000..cbc050970a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.proto @@ -0,0 +1,101 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.authentication.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// TokenReview attempts to authenticate a token to a known user. +// Note: TokenReview requests may be cached by the webhook token authenticator +// plugin in the kube-apiserver. +message TokenReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated + optional TokenReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request can be authenticated. + // +optional + optional TokenReviewStatus status = 3; +} + +// TokenReviewSpec is a description of the token authentication request. +message TokenReviewSpec { + // Token is the opaque bearer token. + // +optional + optional string token = 1; +} + +// TokenReviewStatus is the result of the token authentication request. +message TokenReviewStatus { + // Authenticated indicates that the token was associated with a known user. + // +optional + optional bool authenticated = 1; + + // User is the UserInfo associated with the provided token. + // +optional + optional UserInfo user = 2; + + // Error indicates that the token couldn't be checked + // +optional + optional string error = 3; +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +message UserInfo { + // The name that uniquely identifies this user among all active users. + // +optional + optional string username = 1; + + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + // +optional + optional string uid = 2; + + // The names of groups this user is a part of. + // +optional + repeated string groups = 3; + + // Any additional information provided by the authenticator. + // +optional + map extra = 4; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/register.go new file mode 100644 index 0000000000..ddaa197021 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/register.go @@ -0,0 +1,48 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &TokenReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.generated.go new file mode 100644 index 0000000000..b8990af180 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.generated.go @@ -0,0 +1,1568 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 + } +} + +func (x *TokenReview) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *TokenReview) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *TokenReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = TokenReviewSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = TokenReviewStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *TokenReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = TokenReviewSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = TokenReviewStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *TokenReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Token != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Token)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("token")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Token)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *TokenReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *TokenReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "token": + if r.TryDecodeAsNil() { + x.Token = "" + } else { + yyv4 := &x.Token + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *TokenReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Token = "" + } else { + yyv7 := &x.Token + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *TokenReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Authenticated != false + yyq2[1] = true + yyq2[2] = x.Error != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.Authenticated)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("authenticated")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.Authenticated)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy7 := &x.User + yy7.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.User + yy9.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Error)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("error")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Error)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *TokenReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *TokenReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "authenticated": + if r.TryDecodeAsNil() { + x.Authenticated = false + } else { + yyv4 := &x.Authenticated + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } + } + case "user": + if r.TryDecodeAsNil() { + x.User = UserInfo{} + } else { + yyv6 := &x.User + yyv6.CodecDecodeSelf(d) + } + case "error": + if r.TryDecodeAsNil() { + x.Error = "" + } else { + yyv7 := &x.Error + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *TokenReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Authenticated = false + } else { + yyv10 := &x.Authenticated + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.User = UserInfo{} + } else { + yyv12 := &x.User + yyv12.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Error = "" + } else { + yyv13 := &x.Error + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *UserInfo) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Username != "" + yyq2[1] = x.UID != "" + yyq2[2] = len(x.Groups) != 0 + yyq2[3] = len(x.Extra) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Username)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("username")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Username)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("uid")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Groups == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("groups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Groups == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Extra == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("extra")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Extra == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *UserInfo) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *UserInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "username": + if r.TryDecodeAsNil() { + x.Username = "" + } else { + yyv4 := &x.Username + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "uid": + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv6 := &x.UID + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "groups": + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv8 := &x.Groups + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceStringX(yyv8, false, d) + } + } + case "extra": + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv10 := &x.Extra + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *UserInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Username = "" + } else { + yyv13 := &x.Username + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv15 := &x.UID + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv17 := &x.Groups + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + z.F.DecSliceStringX(yyv17, false, d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv19 := &x.Extra + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ExtraValue) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encExtraValue((ExtraValue)(x), e) + } + } +} + +func (x *ExtraValue) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decExtraValue((*ExtraValue)(x), d) + } +} + +func (x codecSelfer1234) encMapstringExtraValue(v map[string]ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decMapstringExtraValue(v *map[string]ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string]ExtraValue, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 ExtraValue + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv4 := &yymv1 + yyv4.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv5 := &yymk1 + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv7 := &yymv1 + yyv7.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encExtraValue(v ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyv1)) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]string, yyrl1) + } + } else { + yyv1 = make([]string, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 string + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.go new file mode 100644 index 0000000000..57c96e3bc4 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.go @@ -0,0 +1,91 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// TokenReview attempts to authenticate a token to a known user. +// Note: TokenReview requests may be cached by the webhook token authenticator +// plugin in the kube-apiserver. +type TokenReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated + Spec TokenReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request can be authenticated. + // +optional + Status TokenReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// TokenReviewSpec is a description of the token authentication request. +type TokenReviewSpec struct { + // Token is the opaque bearer token. + // +optional + Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` +} + +// TokenReviewStatus is the result of the token authentication request. +type TokenReviewStatus struct { + // Authenticated indicates that the token was associated with a known user. + // +optional + Authenticated bool `json:"authenticated,omitempty" protobuf:"varint,1,opt,name=authenticated"` + // User is the UserInfo associated with the provided token. + // +optional + User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // Error indicates that the token couldn't be checked + // +optional + Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +type UserInfo struct { + // The name that uniquely identifies this user among all active users. + // +optional + Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"` + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + // +optional + UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` + // The names of groups this user is a part of. + // +optional + Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` + // Any additional information provided by the authenticator. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..f910bea6fc --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,72 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_TokenReview = map[string]string{ + "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the request can be authenticated.", +} + +func (TokenReview) SwaggerDoc() map[string]string { + return map_TokenReview +} + +var map_TokenReviewSpec = map[string]string{ + "": "TokenReviewSpec is a description of the token authentication request.", + "token": "Token is the opaque bearer token.", +} + +func (TokenReviewSpec) SwaggerDoc() map[string]string { + return map_TokenReviewSpec +} + +var map_TokenReviewStatus = map[string]string{ + "": "TokenReviewStatus is the result of the token authentication request.", + "authenticated": "Authenticated indicates that the token was associated with a known user.", + "user": "User is the UserInfo associated with the provided token.", + "error": "Error indicates that the token couldn't be checked", +} + +func (TokenReviewStatus) SwaggerDoc() map[string]string { + return map_TokenReviewStatus +} + +var map_UserInfo = map[string]string{ + "": "UserInfo holds the information about the user needed to implement the user.Info interface.", + "username": "The name that uniquely identifies this user among all active users.", + "uid": "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.", + "groups": "The names of groups this user is a part of.", + "extra": "Any additional information provided by the authenticator.", +} + +func (UserInfo) SwaggerDoc() map[string]string { + return map_UserInfo +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.conversion.go new file mode 100644 index 0000000000..5fc83362f8 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.conversion.go @@ -0,0 +1,145 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + authentication "k8s.io/client-go/pkg/apis/authentication" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_TokenReview_To_authentication_TokenReview, + Convert_authentication_TokenReview_To_v1beta1_TokenReview, + Convert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec, + Convert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec, + Convert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus, + Convert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus, + Convert_v1beta1_UserInfo_To_authentication_UserInfo, + Convert_authentication_UserInfo_To_v1beta1_UserInfo, + ) +} + +func autoConvert_v1beta1_TokenReview_To_authentication_TokenReview(in *TokenReview, out *authentication.TokenReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_TokenReview_To_authentication_TokenReview(in *TokenReview, out *authentication.TokenReview, s conversion.Scope) error { + return autoConvert_v1beta1_TokenReview_To_authentication_TokenReview(in, out, s) +} + +func autoConvert_authentication_TokenReview_To_v1beta1_TokenReview(in *authentication.TokenReview, out *TokenReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authentication_TokenReview_To_v1beta1_TokenReview(in *authentication.TokenReview, out *TokenReview, s conversion.Scope) error { + return autoConvert_authentication_TokenReview_To_v1beta1_TokenReview(in, out, s) +} + +func autoConvert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { + out.Token = in.Token + return nil +} + +func Convert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { + return autoConvert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(in, out, s) +} + +func autoConvert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { + out.Token = in.Token + return nil +} + +func Convert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { + return autoConvert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in, out, s) +} + +func autoConvert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error { + out.Authenticated = in.Authenticated + if err := Convert_v1beta1_UserInfo_To_authentication_UserInfo(&in.User, &out.User, s); err != nil { + return err + } + out.Error = in.Error + return nil +} + +func Convert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error { + return autoConvert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus(in, out, s) +} + +func autoConvert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { + out.Authenticated = in.Authenticated + if err := Convert_authentication_UserInfo_To_v1beta1_UserInfo(&in.User, &out.User, s); err != nil { + return err + } + out.Error = in.Error + return nil +} + +func Convert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { + return autoConvert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in, out, s) +} + +func autoConvert_v1beta1_UserInfo_To_authentication_UserInfo(in *UserInfo, out *authentication.UserInfo, s conversion.Scope) error { + out.Username = in.Username + out.UID = in.UID + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]authentication.ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_v1beta1_UserInfo_To_authentication_UserInfo(in *UserInfo, out *authentication.UserInfo, s conversion.Scope) error { + return autoConvert_v1beta1_UserInfo_To_authentication_UserInfo(in, out, s) +} + +func autoConvert_authentication_UserInfo_To_v1beta1_UserInfo(in *authentication.UserInfo, out *UserInfo, s conversion.Scope) error { + out.Username = in.Username + out.UID = in.UID + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_authentication_UserInfo_To_v1beta1_UserInfo(in *authentication.UserInfo, out *UserInfo, s conversion.Scope) error { + return autoConvert_authentication_UserInfo_To_v1beta1_UserInfo(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..01260cc133 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,106 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_TokenReview, InType: reflect.TypeOf(&TokenReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_TokenReviewSpec, InType: reflect.TypeOf(&TokenReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_TokenReviewStatus, InType: reflect.TypeOf(&TokenReviewStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_UserInfo, InType: reflect.TypeOf(&UserInfo{})}, + ) +} + +func DeepCopy_v1beta1_TokenReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReview) + out := out.(*TokenReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_TokenReviewStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_TokenReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReviewSpec) + out := out.(*TokenReviewSpec) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_TokenReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReviewStatus) + out := out.(*TokenReviewStatus) + *out = *in + if err := DeepCopy_v1beta1_UserInfo(&in.User, &out.User, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_UserInfo(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*UserInfo) + out := out.(*UserInfo) + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.defaults.go new file mode 100644 index 0000000000..e24e70be38 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authentication/zz_generated.deepcopy.go new file mode 100644 index 0000000000..ec322c5f88 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/zz_generated.deepcopy.go @@ -0,0 +1,106 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package authentication + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authentication_TokenReview, InType: reflect.TypeOf(&TokenReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authentication_TokenReviewSpec, InType: reflect.TypeOf(&TokenReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authentication_TokenReviewStatus, InType: reflect.TypeOf(&TokenReviewStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authentication_UserInfo, InType: reflect.TypeOf(&UserInfo{})}, + ) +} + +func DeepCopy_authentication_TokenReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReview) + out := out.(*TokenReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_authentication_TokenReviewStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_authentication_TokenReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReviewSpec) + out := out.(*TokenReviewSpec) + *out = *in + return nil + } +} + +func DeepCopy_authentication_TokenReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReviewStatus) + out := out.(*TokenReviewStatus) + *out = *in + if err := DeepCopy_authentication_UserInfo(&in.User, &out.User, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_authentication_UserInfo(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*UserInfo) + out := out.(*UserInfo) + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/OWNERS b/vendor/k8s.io/client-go/pkg/apis/authorization/OWNERS new file mode 100755 index 0000000000..2fef50443b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/OWNERS @@ -0,0 +1,17 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- liggitt +- nikhiljindal +- erictune +- sttts +- ncdc +- timothysc +- dims +- mml +- mbohlool +- david-mcmahon +- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/doc.go b/vendor/k8s.io/client-go/pkg/apis/authorization/doc.go new file mode 100644 index 0000000000..91344f6746 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=authorization.k8s.io +package authorization diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/install/install.go b/vendor/k8s.io/client-go/pkg/apis/authorization/install/install.go new file mode 100644 index 0000000000..33eee6618c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/install/install.go @@ -0,0 +1,53 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/authorization" + "k8s.io/client-go/pkg/apis/authorization/v1" + "k8s.io/client-go/pkg/apis/authorization/v1beta1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: authorization.GroupName, + VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version, v1beta1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/authorization", + RootScopedKinds: sets.NewString("SubjectAccessReview", "SelfSubjectAccessReview"), + AddInternalObjectsToScheme: authorization.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + v1.SchemeGroupVersion.Version: v1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/register.go b/vendor/k8s.io/client-go/pkg/apis/authorization/register.go new file mode 100644 index 0000000000..5693885e4e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authorization + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectAccessReview{}, + &SubjectAccessReview{}, + &LocalSubjectAccessReview{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/types.go b/vendor/k8s.io/client-go/pkg/apis/authorization/types.go new file mode 100644 index 0000000000..d8ccfaf358 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/types.go @@ -0,0 +1,146 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authorization + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// SubjectAccessReview checks whether or not a user or group can perform an action. Not filling in a +// spec.namespace means "in all namespaces". +type SubjectAccessReview struct { + metav1.TypeMeta + metav1.ObjectMeta + + // Spec holds information about the request being evaluated + Spec SubjectAccessReviewSpec + + // Status is filled in by the server and indicates whether the request is allowed or not + Status SubjectAccessReviewStatus +} + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a +// spec.namespace means "in all namespaces". Self is a special case, because users should always be able +// to check whether they can perform an action +type SelfSubjectAccessReview struct { + metav1.TypeMeta + metav1.ObjectMeta + + // Spec holds information about the request being evaluated. + Spec SelfSubjectAccessReviewSpec + + // Status is filled in by the server and indicates whether the request is allowed or not + Status SubjectAccessReviewStatus +} + +// +genclient=true +// +noMethods=true + +// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. +// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions +// checking. +type LocalSubjectAccessReview struct { + metav1.TypeMeta + metav1.ObjectMeta + + // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace + // you made the request against. If empty, it is defaulted. + Spec SubjectAccessReviewSpec + + // Status is filled in by the server and indicates whether the request is allowed or not + Status SubjectAccessReviewStatus +} + +// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +type ResourceAttributes struct { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // "" (empty) is defaulted for LocalSubjectAccessReviews + // "" (empty) is empty for cluster-scoped resources + // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + Namespace string + // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + Verb string + // Group is the API Group of the Resource. "*" means all. + Group string + // Version is the API Version of the Resource. "*" means all. + Version string + // Resource is one of the existing resource types. "*" means all. + Resource string + // Subresource is one of the existing resource types. "" means none. + Subresource string + // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + Name string +} + +// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface +type NonResourceAttributes struct { + // Path is the URL path of the request + Path string + // Verb is the standard HTTP verb + Verb string +} + +// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAttributes +// and NonResourceAttributes must be set +type SubjectAccessReviewSpec struct { + // ResourceAttributes describes information for a resource access request + ResourceAttributes *ResourceAttributes + // NonResourceAttributes describes information for a non-resource access request + NonResourceAttributes *NonResourceAttributes + + // User is the user you're testing for. + // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups + User string + // Groups is the groups you're testing for. + Groups []string + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + Extra map[string]ExtraValue +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +type ExtraValue []string + +// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAttributes +// and NonResourceAttributes must be set +type SelfSubjectAccessReviewSpec struct { + // ResourceAttributes describes information for a resource access request + ResourceAttributes *ResourceAttributes + // NonResourceAttributes describes information for a non-resource access request + NonResourceAttributes *NonResourceAttributes +} + +// SubjectAccessReviewStatus +type SubjectAccessReviewStatus struct { + // Allowed is required. True if the action would be allowed, false otherwise. + Allowed bool + // Reason is optional. It indicates why a request was allowed or denied. + Reason string + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. + // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. + EvaluationError string +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/conversion.go new file mode 100644 index 0000000000..2ff5732d6d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/conversion.go @@ -0,0 +1,26 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + return scheme.AddConversionFuncs() +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/defaults.go new file mode 100644 index 0000000000..d63d917543 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/defaults.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return scheme.AddDefaultingFuncs() +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/doc.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/doc.go new file mode 100644 index 0000000000..41741dd537 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=authorization.k8s.io +package v1 diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.pb.go new file mode 100644 index 0000000000..7bacc51693 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.pb.go @@ -0,0 +1,2344 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/authorization/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/authorization/v1/generated.proto + + It has these top-level messages: + ExtraValue + LocalSubjectAccessReview + NonResourceAttributes + ResourceAttributes + SelfSubjectAccessReview + SelfSubjectAccessReviewSpec + SubjectAccessReview + SubjectAccessReviewSpec + SubjectAccessReviewStatus +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } +func (*LocalSubjectAccessReview) ProtoMessage() {} +func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } +func (*NonResourceAttributes) ProtoMessage() {} +func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } +func (*SelfSubjectAccessReview) ProtoMessage() {} +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } +func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} +func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{5} +} + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } +func (*SubjectAccessReviewSpec) ProtoMessage() {} +func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } +func (*SubjectAccessReviewStatus) ProtoMessage() {} +func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{8} +} + +func init() { + proto.RegisterType((*ExtraValue)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.ExtraValue") + proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.LocalSubjectAccessReview") + proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.NonResourceAttributes") + proto.RegisterType((*ResourceAttributes)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.ResourceAttributes") + proto.RegisterType((*SelfSubjectAccessReview)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.SelfSubjectAccessReview") + proto.RegisterType((*SelfSubjectAccessReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.SelfSubjectAccessReviewSpec") + proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.SubjectAccessReview") + proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.SubjectAccessReviewSpec") + proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.SubjectAccessReviewStatus") +} +func (m ExtraValue) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m ExtraValue) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *LocalSubjectAccessReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LocalSubjectAccessReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *NonResourceAttributes) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NonResourceAttributes) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Verb))) + i += copy(data[i:], m.Verb) + return i, nil +} + +func (m *ResourceAttributes) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceAttributes) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Verb))) + i += copy(data[i:], m.Verb) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Version))) + i += copy(data[i:], m.Version) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) + i += copy(data[i:], m.Resource) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Subresource))) + i += copy(data[i:], m.Subresource) + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + return i, nil +} + +func (m *SelfSubjectAccessReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SelfSubjectAccessReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n6, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + return i, nil +} + +func (m *SelfSubjectAccessReviewSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SelfSubjectAccessReviewSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ResourceAttributes != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ResourceAttributes.Size())) + n7, err := m.ResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.NonResourceAttributes != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NonResourceAttributes.Size())) + n8, err := m.NonResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *SubjectAccessReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SubjectAccessReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n10, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n11, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + return i, nil +} + +func (m *SubjectAccessReviewSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SubjectAccessReviewSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ResourceAttributes != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ResourceAttributes.Size())) + n12, err := m.ResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.NonResourceAttributes != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NonResourceAttributes.Size())) + n13, err := m.NonResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.User))) + i += copy(data[i:], m.User) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Extra) > 0 { + for k := range m.Extra { + data[i] = 0x2a + i++ + v := m.Extra[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n14, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + } + } + return i, nil +} + +func (m *SubjectAccessReviewStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SubjectAccessReviewStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Allowed { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.EvaluationError))) + i += copy(data[i:], m.EvaluationError) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m ExtraValue) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LocalSubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourceAttributes) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceAttributes) Size() (n int) { + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subresource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectAccessReviewSpec) Size() (n int) { + var l int + _ = l + if m.ResourceAttributes != nil { + l = m.ResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NonResourceAttributes != nil { + l = m.NonResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectAccessReviewSpec) Size() (n int) { + var l int + _ = l + if m.ResourceAttributes != nil { + l = m.ResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NonResourceAttributes != nil { + l = m.NonResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SubjectAccessReviewStatus) Size() (n int) { + var l int + _ = l + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LocalSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalSubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourceAttributes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourceAttributes{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAttributes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceAttributes{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectAccessReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, + `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewSpec) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&SubjectAccessReviewSpec{`, + `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReviewStatus{`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExtraValue) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalSubjectAccessReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourceAttributes) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAttributes) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectAccessReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectAccessReviewSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAttributes == nil { + m.ResourceAttributes = &ResourceAttributes{} + } + if err := m.ResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAttributes == nil { + m.ResourceAttributes = &ResourceAttributes{} + } + if err := m.ResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 904 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xfa, 0x5f, 0xec, 0x09, 0x90, 0x32, 0x55, 0xc9, 0x36, 0x48, 0xb6, 0x65, 0x10, 0x0a, + 0xa2, 0xec, 0x92, 0xf2, 0xa7, 0x55, 0x39, 0xa0, 0xac, 0x08, 0x7f, 0x24, 0xda, 0xa2, 0x89, 0xc8, + 0x01, 0x2e, 0x8c, 0x37, 0x2f, 0xf6, 0xd6, 0xde, 0x9d, 0x65, 0x66, 0x76, 0xdb, 0x70, 0xea, 0x8d, + 0x2b, 0x12, 0x17, 0x8e, 0x7c, 0x05, 0x3e, 0x00, 0x9c, 0x73, 0xa3, 0x07, 0x24, 0x38, 0x20, 0x8b, + 0x2c, 0x17, 0x3e, 0x06, 0x9a, 0xd9, 0x89, 0x37, 0xc6, 0x6b, 0x2a, 0x43, 0x25, 0x7a, 0xe8, 0x6d, + 0xe7, 0xbd, 0xdf, 0xef, 0xbd, 0xdf, 0xbc, 0x79, 0xb3, 0x6f, 0xd0, 0xdb, 0xe3, 0xeb, 0xc2, 0x09, + 0x98, 0x3b, 0x4e, 0x06, 0xc0, 0x23, 0x90, 0x20, 0xdc, 0x78, 0x3c, 0x74, 0x69, 0x1c, 0x08, 0x97, + 0x26, 0x72, 0xc4, 0x78, 0xf0, 0x25, 0x95, 0x01, 0x8b, 0xdc, 0x74, 0xc7, 0x1d, 0x42, 0x04, 0x9c, + 0x4a, 0x38, 0x74, 0x62, 0xce, 0x24, 0xc3, 0xaf, 0xe4, 0x64, 0xa7, 0x20, 0x3b, 0xf1, 0x78, 0xe8, + 0x28, 0xb2, 0x33, 0x47, 0x76, 0xd2, 0x9d, 0xad, 0x57, 0x87, 0x81, 0x1c, 0x25, 0x03, 0xc7, 0x67, + 0xa1, 0x3b, 0x64, 0x43, 0xe6, 0xea, 0x18, 0x83, 0xe4, 0x48, 0xaf, 0xf4, 0x42, 0x7f, 0xe5, 0xb1, + 0xb7, 0xde, 0x30, 0xc2, 0x68, 0x1c, 0x84, 0xd4, 0x1f, 0x05, 0x11, 0xf0, 0xe3, 0x42, 0x5a, 0x08, + 0x92, 0x96, 0x28, 0xda, 0x72, 0x97, 0xb1, 0x78, 0x12, 0xc9, 0x20, 0x84, 0x05, 0xc2, 0x5b, 0x0f, + 0x23, 0x08, 0x7f, 0x04, 0x21, 0x5d, 0xe0, 0xbd, 0xbe, 0x8c, 0x97, 0xc8, 0x60, 0xe2, 0x06, 0x91, + 0x14, 0x92, 0x2f, 0x90, 0xce, 0xed, 0x49, 0x00, 0x4f, 0x81, 0x17, 0x1b, 0x82, 0x7b, 0x34, 0x8c, + 0x27, 0x50, 0xb6, 0xa7, 0x2b, 0x4b, 0x8f, 0xa8, 0x04, 0xdd, 0xbf, 0x86, 0xd0, 0xde, 0x3d, 0xc9, + 0xe9, 0x01, 0x9d, 0x24, 0x80, 0xbb, 0xa8, 0x11, 0x48, 0x08, 0x85, 0x6d, 0xf5, 0x6a, 0xdb, 0x6d, + 0xaf, 0x9d, 0x4d, 0xbb, 0x8d, 0x0f, 0x95, 0x81, 0xe4, 0xf6, 0x1b, 0xad, 0x6f, 0xbf, 0xeb, 0x56, + 0xee, 0xff, 0xd6, 0xab, 0xf4, 0x7f, 0xae, 0x22, 0xfb, 0x23, 0xe6, 0xd3, 0xc9, 0x7e, 0x32, 0xb8, + 0x03, 0xbe, 0xdc, 0xf5, 0x7d, 0x10, 0x82, 0x40, 0x1a, 0xc0, 0x5d, 0xfc, 0x39, 0x6a, 0xa9, 0x92, + 0x1f, 0x52, 0x49, 0x6d, 0xab, 0x67, 0x6d, 0xaf, 0x5f, 0x7d, 0xcd, 0x31, 0x87, 0x7f, 0xbe, 0x02, + 0xc5, 0xf1, 0x2b, 0xb4, 0x93, 0xee, 0x38, 0xb7, 0x75, 0xac, 0x9b, 0x20, 0xa9, 0x87, 0x4f, 0xa6, + 0xdd, 0x4a, 0x36, 0xed, 0xa2, 0xc2, 0x46, 0x66, 0x51, 0xf1, 0x11, 0xaa, 0x8b, 0x18, 0x7c, 0xbb, + 0xaa, 0xa3, 0xbf, 0xeb, 0xac, 0xd0, 0x5a, 0x4e, 0x89, 0xe2, 0xfd, 0x18, 0x7c, 0xef, 0x29, 0x93, + 0xb1, 0xae, 0x56, 0x44, 0xc7, 0xc7, 0x11, 0x6a, 0x0a, 0x49, 0x65, 0x22, 0xec, 0x9a, 0xce, 0xf4, + 0xde, 0x7f, 0xce, 0xa4, 0xa3, 0x79, 0xcf, 0x98, 0x5c, 0xcd, 0x7c, 0x4d, 0x4c, 0x96, 0xfe, 0x67, + 0xe8, 0xd2, 0x2d, 0x16, 0x11, 0x10, 0x2c, 0xe1, 0x3e, 0xec, 0x4a, 0xc9, 0x83, 0x41, 0x22, 0x41, + 0xe0, 0x1e, 0xaa, 0xc7, 0x54, 0x8e, 0x74, 0x39, 0xdb, 0x85, 0xd4, 0x8f, 0xa9, 0x1c, 0x11, 0xed, + 0x51, 0x88, 0x14, 0xf8, 0x40, 0x97, 0xe4, 0x1c, 0xe2, 0x00, 0xf8, 0x80, 0x68, 0x4f, 0xff, 0xc7, + 0x2a, 0xc2, 0x25, 0xa1, 0x5d, 0xd4, 0x8e, 0x68, 0x08, 0x22, 0xa6, 0x3e, 0x98, 0xf8, 0xcf, 0x1a, + 0x76, 0xfb, 0xd6, 0x99, 0x83, 0x14, 0x98, 0x87, 0x67, 0xc2, 0x2f, 0xa0, 0xc6, 0x90, 0xb3, 0x24, + 0xd6, 0x55, 0x6b, 0x7b, 0x4f, 0x1b, 0x48, 0xe3, 0x7d, 0x65, 0x24, 0xb9, 0x0f, 0xbf, 0x8c, 0xd6, + 0x52, 0xe0, 0x22, 0x60, 0x91, 0x5d, 0xd7, 0xb0, 0x0d, 0x03, 0x5b, 0x3b, 0xc8, 0xcd, 0xe4, 0xcc, + 0x8f, 0xaf, 0xa0, 0x16, 0x37, 0xc2, 0xed, 0x86, 0xc6, 0x5e, 0x30, 0xd8, 0xd6, 0xd9, 0x86, 0xc8, + 0x0c, 0x81, 0xdf, 0x44, 0xeb, 0x22, 0x19, 0xcc, 0x08, 0x4d, 0x4d, 0xb8, 0x68, 0x08, 0xeb, 0xfb, + 0x85, 0x8b, 0x9c, 0xc7, 0xa9, 0x6d, 0xa9, 0x3d, 0xda, 0x6b, 0xf3, 0xdb, 0x52, 0x25, 0x20, 0xda, + 0xd3, 0xff, 0xa5, 0x8a, 0x36, 0xf7, 0x61, 0x72, 0xf4, 0xff, 0xf4, 0xfc, 0x9d, 0xb9, 0x9e, 0xff, + 0x60, 0xb5, 0x4e, 0x2c, 0x57, 0xfd, 0xd8, 0xf4, 0xfd, 0x0f, 0x55, 0xf4, 0xfc, 0x3f, 0x68, 0xc4, + 0x5f, 0x59, 0x08, 0xf3, 0x85, 0xd6, 0x35, 0x85, 0x7e, 0x67, 0x25, 0x71, 0x8b, 0x37, 0xc0, 0x7b, + 0x2e, 0x9b, 0x76, 0x4b, 0x6e, 0x06, 0x29, 0x49, 0x89, 0xbf, 0xb1, 0xd0, 0xa5, 0xa8, 0xec, 0x8a, + 0x9a, 0x73, 0xf1, 0x56, 0x12, 0x53, 0x7a, 0xd9, 0xbd, 0xcb, 0xd9, 0xb4, 0x5b, 0xfe, 0x1f, 0x20, + 0xe5, 0xb9, 0xfb, 0x3f, 0x55, 0xd1, 0xc5, 0x27, 0x7f, 0xe2, 0x47, 0xd9, 0x91, 0x7f, 0xd6, 0xd1, + 0xe6, 0x93, 0x6e, 0xfc, 0x57, 0xdd, 0x38, 0x1b, 0x10, 0xb5, 0xf9, 0x3f, 0xe9, 0x27, 0x02, 0xb8, + 0x19, 0x10, 0x7d, 0xd4, 0xd4, 0x43, 0x40, 0xd8, 0x75, 0xfd, 0xd4, 0x40, 0xea, 0x04, 0xf4, 0x74, + 0x10, 0xc4, 0x78, 0xb0, 0x44, 0x0d, 0x50, 0x6f, 0x13, 0xbb, 0xd1, 0xab, 0x6d, 0xaf, 0x5f, 0xbd, + 0xfd, 0x28, 0x5a, 0xcb, 0xd1, 0xaf, 0x9d, 0xbd, 0x48, 0xf2, 0xe3, 0x62, 0x2a, 0x69, 0x1b, 0xc9, + 0x93, 0x6d, 0x7d, 0x61, 0x5e, 0x44, 0x1a, 0x83, 0x2f, 0xa0, 0xda, 0x18, 0x8e, 0xf3, 0xa9, 0x48, + 0xd4, 0x27, 0xbe, 0x89, 0x1a, 0xa9, 0x7a, 0x2c, 0x99, 0x02, 0x5f, 0x5b, 0x49, 0x55, 0xf1, 0xd6, + 0x22, 0x79, 0x94, 0x1b, 0xd5, 0xeb, 0x56, 0xff, 0x7b, 0x0b, 0x5d, 0x5e, 0xda, 0xa0, 0x6a, 0x4c, + 0xd2, 0xc9, 0x84, 0xdd, 0x85, 0x43, 0x2d, 0xa3, 0x55, 0x8c, 0xc9, 0xdd, 0xdc, 0x4c, 0xce, 0xfc, + 0xf8, 0x25, 0xd4, 0xe4, 0x40, 0x05, 0x8b, 0xcc, 0x68, 0x9e, 0xf5, 0x36, 0xd1, 0x56, 0x62, 0xbc, + 0x78, 0x17, 0x6d, 0x80, 0x4a, 0xaf, 0x75, 0xed, 0x71, 0xce, 0xb8, 0x39, 0xaa, 0x4d, 0x43, 0xd8, + 0xd8, 0x9b, 0x77, 0x93, 0xbf, 0xe3, 0xbd, 0x17, 0x4f, 0x4e, 0x3b, 0x95, 0x07, 0xa7, 0x9d, 0xca, + 0xaf, 0xa7, 0x9d, 0xca, 0xfd, 0xac, 0x63, 0x9d, 0x64, 0x1d, 0xeb, 0x41, 0xd6, 0xb1, 0x7e, 0xcf, + 0x3a, 0xd6, 0xd7, 0x7f, 0x74, 0x2a, 0x9f, 0x56, 0xd3, 0x9d, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, + 0x8a, 0xc4, 0x1f, 0xd5, 0x30, 0x0c, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.proto new file mode 100644 index 0000000000..7036b8eb33 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.proto @@ -0,0 +1,185 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.authorization.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. +// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions +// checking. +message LocalSubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace + // you made the request against. If empty, it is defaulted. + optional SubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface +message NonResourceAttributes { + // Path is the URL path of the request + // +optional + optional string path = 1; + + // Verb is the standard HTTP verb + // +optional + optional string verb = 2; +} + +// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +message ResourceAttributes { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // "" (empty) is defaulted for LocalSubjectAccessReviews + // "" (empty) is empty for cluster-scoped resources + // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + // +optional + optional string namespace = 1; + + // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +optional + optional string verb = 2; + + // Group is the API Group of the Resource. "*" means all. + // +optional + optional string group = 3; + + // Version is the API Version of the Resource. "*" means all. + // +optional + optional string version = 4; + + // Resource is one of the existing resource types. "*" means all. + // +optional + optional string resource = 5; + + // Subresource is one of the existing resource types. "" means none. + // +optional + optional string subresource = 6; + + // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + // +optional + optional string name = 7; +} + +// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a +// spec.namespace means "in all namespaces". Self is a special case, because users should always be able +// to check whether they can perform an action +message SelfSubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated. user and groups must be empty + optional SelfSubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +message SelfSubjectAccessReviewSpec { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + optional ResourceAttributes resourceAttributes = 1; + + // NonResourceAttributes describes information for a non-resource access request + // +optional + optional NonResourceAttributes nonResourceAttributes = 2; +} + +// SubjectAccessReview checks whether or not a user or group can perform an action. +message SubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated + optional SubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +message SubjectAccessReviewSpec { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + optional ResourceAttributes resourceAttributes = 1; + + // NonResourceAttributes describes information for a non-resource access request + // +optional + optional NonResourceAttributes nonResourceAttributes = 2; + + // User is the user you're testing for. + // If you specify "User" but not "Groups", then is it interpreted as "What if User were not a member of any groups + // +optional + optional string verb = 3; + + // Groups is the groups you're testing for. + // +optional + repeated string groups = 4; + + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + // +optional + map extra = 5; +} + +// SubjectAccessReviewStatus +message SubjectAccessReviewStatus { + // Allowed is required. True if the action would be allowed, false otherwise. + optional bool allowed = 1; + + // Reason is optional. It indicates why a request was allowed or denied. + // +optional + optional string reason = 2; + + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. + // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. + // +optional + optional string evaluationError = 3; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/register.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/register.go new file mode 100644 index 0000000000..909bc0a7db --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectAccessReview{}, + &SubjectAccessReview{}, + &LocalSubjectAccessReview{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +func (obj *LocalSubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *SubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *SelfSubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.generated.go new file mode 100644 index 0000000000..2528afa8e8 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.generated.go @@ -0,0 +1,3233 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 + } +} + +func (x *SubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SelfSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SelfSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SelfSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = SelfSubjectAccessReviewSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SelfSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = SelfSubjectAccessReviewSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LocalSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LocalSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LocalSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LocalSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Namespace != "" + yyq2[1] = x.Verb != "" + yyq2[2] = x.Group != "" + yyq2[3] = x.Version != "" + yyq2[4] = x.Resource != "" + yyq2[5] = x.Subresource != "" + yyq2[6] = x.Name != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("verb")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Group)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("group")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Group)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Version)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("version")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Version)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subresource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv4 := &x.Namespace + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "verb": + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv6 := &x.Verb + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "group": + if r.TryDecodeAsNil() { + x.Group = "" + } else { + yyv8 := &x.Group + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "version": + if r.TryDecodeAsNil() { + x.Version = "" + } else { + yyv10 := &x.Version + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "resource": + if r.TryDecodeAsNil() { + x.Resource = "" + } else { + yyv12 := &x.Resource + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "subresource": + if r.TryDecodeAsNil() { + x.Subresource = "" + } else { + yyv14 := &x.Subresource + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv16 := &x.Name + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv19 := &x.Namespace + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv21 := &x.Verb + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Group = "" + } else { + yyv23 := &x.Group + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Version = "" + } else { + yyv25 := &x.Version + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resource = "" + } else { + yyv27 := &x.Resource + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subresource = "" + } else { + yyv29 := &x.Subresource + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv31 := &x.Name + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NonResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Path != "" + yyq2[1] = x.Verb != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("verb")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NonResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NonResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "verb": + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv6 := &x.Verb + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NonResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv9 := &x.Path + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv11 := &x.Verb + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ResourceAttributes != nil + yyq2[1] = x.NonResourceAttributes != nil + yyq2[2] = x.User != "" + yyq2[3] = len(x.Groups) != 0 + yyq2[4] = len(x.Extra) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Groups == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("groups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Groups == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Extra == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("extra")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Extra == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "resourceAttributes": + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + case "nonResourceAttributes": + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + case "user": + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv6 := &x.User + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "groups": + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv8 := &x.Groups + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceStringX(yyv8, false, d) + } + } + case "extra": + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv10 := &x.Extra + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv15 := &x.User + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv17 := &x.Groups + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + z.F.DecSliceStringX(yyv17, false, d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv19 := &x.Extra + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ExtraValue) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encExtraValue((ExtraValue)(x), e) + } + } +} + +func (x *ExtraValue) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decExtraValue((*ExtraValue)(x), d) + } +} + +func (x *SelfSubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ResourceAttributes != nil + yyq2[1] = x.NonResourceAttributes != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SelfSubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "resourceAttributes": + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + case "nonResourceAttributes": + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SubjectAccessReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Reason != "" + yyq2[2] = x.EvaluationError != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.Allowed)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("allowed")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.Allowed)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EvaluationError)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("evaluationError")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EvaluationError)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SubjectAccessReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SubjectAccessReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "allowed": + if r.TryDecodeAsNil() { + x.Allowed = false + } else { + yyv4 := &x.Allowed + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv6 := &x.Reason + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "evaluationError": + if r.TryDecodeAsNil() { + x.EvaluationError = "" + } else { + yyv8 := &x.EvaluationError + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SubjectAccessReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Allowed = false + } else { + yyv11 := &x.Allowed + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv13 := &x.Reason + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EvaluationError = "" + } else { + yyv15 := &x.EvaluationError + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encMapstringExtraValue(v map[string]ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decMapstringExtraValue(v *map[string]ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string]ExtraValue, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 ExtraValue + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv4 := &yymv1 + yyv4.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv5 := &yymk1 + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv7 := &yymv1 + yyv7.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encExtraValue(v ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyv1)) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]string, yyrl1) + } + } else { + yyv1 = make([]string, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 string + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.go new file mode 100644 index 0000000000..38c314ffcf --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.go @@ -0,0 +1,176 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// SubjectAccessReview checks whether or not a user or group can perform an action. +type SubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated + Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a +// spec.namespace means "in all namespaces". Self is a special case, because users should always be able +// to check whether they can perform an action +type SelfSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated. user and groups must be empty + Spec SelfSubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient=true +// +noMethods=true + +// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. +// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions +// checking. +type LocalSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace + // you made the request against. If empty, it is defaulted. + Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +type ResourceAttributes struct { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // "" (empty) is defaulted for LocalSubjectAccessReviews + // "" (empty) is empty for cluster-scoped resources + // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` + // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +optional + Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"` + // Group is the API Group of the Resource. "*" means all. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` + // Version is the API Version of the Resource. "*" means all. + // +optional + Version string `json:"version,omitempty" protobuf:"bytes,4,opt,name=version"` + // Resource is one of the existing resource types. "*" means all. + // +optional + Resource string `json:"resource,omitempty" protobuf:"bytes,5,opt,name=resource"` + // Subresource is one of the existing resource types. "" means none. + // +optional + Subresource string `json:"subresource,omitempty" protobuf:"bytes,6,opt,name=subresource"` + // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"` +} + +// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface +type NonResourceAttributes struct { + // Path is the URL path of the request + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` + // Verb is the standard HTTP verb + // +optional + Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"` +} + +// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +type SubjectAccessReviewSpec struct { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"` + // NonResourceAttributes describes information for a non-resource access request + // +optional + NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"` + + // User is the user you're testing for. + // If you specify "User" but not "Groups", then is it interpreted as "What if User were not a member of any groups + // +optional + User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=verb"` + // Groups is the groups you're testing for. + // +optional + Groups []string `json:"groups,omitempty" protobuf:"bytes,4,rep,name=groups"` + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +type SelfSubjectAccessReviewSpec struct { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"` + // NonResourceAttributes describes information for a non-resource access request + // +optional + NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"` +} + +// SubjectAccessReviewStatus +type SubjectAccessReviewStatus struct { + // Allowed is required. True if the action would be allowed, false otherwise. + Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` + // Reason is optional. It indicates why a request was allowed or denied. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. + // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. + // +optional + EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,3,opt,name=evaluationError"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..33c0035b48 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types_swagger_doc_generated.go @@ -0,0 +1,119 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_LocalSubjectAccessReview = map[string]string{ + "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", + "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { + return map_LocalSubjectAccessReview +} + +var map_NonResourceAttributes = map[string]string{ + "": "NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface", + "path": "Path is the URL path of the request", + "verb": "Verb is the standard HTTP verb", +} + +func (NonResourceAttributes) SwaggerDoc() map[string]string { + return map_NonResourceAttributes +} + +var map_ResourceAttributes = map[string]string{ + "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", + "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", + "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "group": "Group is the API Group of the Resource. \"*\" means all.", + "version": "Version is the API Version of the Resource. \"*\" means all.", + "resource": "Resource is one of the existing resource types. \"*\" means all.", + "subresource": "Subresource is one of the existing resource types. \"\" means none.", + "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", +} + +func (ResourceAttributes) SwaggerDoc() map[string]string { + return map_ResourceAttributes +} + +var map_SelfSubjectAccessReview = map[string]string{ + "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action", + "spec": "Spec holds information about the request being evaluated. user and groups must be empty", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (SelfSubjectAccessReview) SwaggerDoc() map[string]string { + return map_SelfSubjectAccessReview +} + +var map_SelfSubjectAccessReviewSpec = map[string]string{ + "": "SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", + "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", +} + +func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string { + return map_SelfSubjectAccessReviewSpec +} + +var map_SubjectAccessReview = map[string]string{ + "": "SubjectAccessReview checks whether or not a user or group can perform an action.", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (SubjectAccessReview) SwaggerDoc() map[string]string { + return map_SubjectAccessReview +} + +var map_SubjectAccessReviewSpec = map[string]string{ + "": "SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", + "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", + "user": "User is the user you're testing for. If you specify \"User\" but not \"Groups\", then is it interpreted as \"What if User were not a member of any groups", + "groups": "Groups is the groups you're testing for.", + "extra": "Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.", +} + +func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { + return map_SubjectAccessReviewSpec +} + +var map_SubjectAccessReviewStatus = map[string]string{ + "": "SubjectAccessReviewStatus", + "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "reason": "Reason is optional. It indicates why a request was allowed or denied.", + "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.", +} + +func (SubjectAccessReviewStatus) SwaggerDoc() map[string]string { + return map_SubjectAccessReviewStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.conversion.go new file mode 100644 index 0000000000..92d1308440 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.conversion.go @@ -0,0 +1,263 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + authorization "k8s.io/client-go/pkg/apis/authorization" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview, + Convert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview, + Convert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes, + Convert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes, + Convert_v1_ResourceAttributes_To_authorization_ResourceAttributes, + Convert_authorization_ResourceAttributes_To_v1_ResourceAttributes, + Convert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview, + Convert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview, + Convert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec, + Convert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec, + Convert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview, + Convert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview, + Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec, + Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec, + Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus, + Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus, + ) +} + +func autoConvert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview(in, out, s) +} + +func autoConvert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { + out.Path = in.Path + out.Verb = in.Verb + return nil +} + +func Convert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { + return autoConvert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes(in, out, s) +} + +func autoConvert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { + out.Path = in.Path + out.Verb = in.Verb + return nil +} + +func Convert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { + return autoConvert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes(in, out, s) +} + +func autoConvert_v1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Verb = in.Verb + out.Group = in.Group + out.Version = in.Version + out.Resource = in.Resource + out.Subresource = in.Subresource + out.Name = in.Name + return nil +} + +func Convert_v1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { + return autoConvert_v1_ResourceAttributes_To_authorization_ResourceAttributes(in, out, s) +} + +func autoConvert_authorization_ResourceAttributes_To_v1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Verb = in.Verb + out.Group = in.Group + out.Version = in.Version + out.Resource = in.Resource + out.Subresource = in.Subresource + out.Name = in.Name + return nil +} + +func Convert_authorization_ResourceAttributes_To_v1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { + return autoConvert_authorization_ResourceAttributes_To_v1_ResourceAttributes(in, out, s) +} + +func autoConvert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview(in, out, s) +} + +func autoConvert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*authorization.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*authorization.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + return nil +} + +func Convert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + return nil +} + +func Convert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview(in, out, s) +} + +func autoConvert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*authorization.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*authorization.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + out.User = in.User + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]authorization.ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + out.User = in.User + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { + out.Allowed = in.Allowed + out.Reason = in.Reason + out.EvaluationError = in.EvaluationError + return nil +} + +func Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { + return autoConvert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { + out.Allowed = in.Allowed + out.Reason = in.Reason + out.EvaluationError = in.EvaluationError + return nil +} + +func Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..1f3199a358 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.deepcopy.go @@ -0,0 +1,179 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LocalSubjectAccessReview, InType: reflect.TypeOf(&LocalSubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NonResourceAttributes, InType: reflect.TypeOf(&NonResourceAttributes{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceAttributes, InType: reflect.TypeOf(&ResourceAttributes{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SelfSubjectAccessReview, InType: reflect.TypeOf(&SelfSubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SelfSubjectAccessReviewSpec, InType: reflect.TypeOf(&SelfSubjectAccessReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SubjectAccessReview, InType: reflect.TypeOf(&SubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SubjectAccessReviewSpec, InType: reflect.TypeOf(&SubjectAccessReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SubjectAccessReviewStatus, InType: reflect.TypeOf(&SubjectAccessReviewStatus{})}, + ) +} + +func DeepCopy_v1_LocalSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LocalSubjectAccessReview) + out := out.(*LocalSubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_NonResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NonResourceAttributes) + out := out.(*NonResourceAttributes) + *out = *in + return nil + } +} + +func DeepCopy_v1_ResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceAttributes) + out := out.(*ResourceAttributes) + *out = *in + return nil + } +} + +func DeepCopy_v1_SelfSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SelfSubjectAccessReview) + out := out.(*SelfSubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_SelfSubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SelfSubjectAccessReviewSpec) + out := out.(*SelfSubjectAccessReviewSpec) + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + **out = **in + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_SubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReview) + out := out.(*SubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_SubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReviewSpec) + out := out.(*SubjectAccessReviewSpec) + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + **out = **in + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + **out = **in + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } + } + return nil + } +} + +func DeepCopy_v1_SubjectAccessReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReviewStatus) + out := out.(*SubjectAccessReviewStatus) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.defaults.go new file mode 100644 index 0000000000..6df448eb9f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/conversion.go new file mode 100644 index 0000000000..c401383656 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/conversion.go @@ -0,0 +1,26 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + return scheme.AddConversionFuncs() +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/defaults.go new file mode 100644 index 0000000000..cb49b06ac2 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/defaults.go @@ -0,0 +1,25 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return scheme.AddDefaultingFuncs() +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/doc.go new file mode 100644 index 0000000000..738b0b6d29 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=authorization.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.pb.go new file mode 100644 index 0000000000..65877a2ad7 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.pb.go @@ -0,0 +1,2344 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.proto + + It has these top-level messages: + ExtraValue + LocalSubjectAccessReview + NonResourceAttributes + ResourceAttributes + SelfSubjectAccessReview + SelfSubjectAccessReviewSpec + SubjectAccessReview + SubjectAccessReviewSpec + SubjectAccessReviewStatus +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } +func (*LocalSubjectAccessReview) ProtoMessage() {} +func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } +func (*NonResourceAttributes) ProtoMessage() {} +func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } +func (*SelfSubjectAccessReview) ProtoMessage() {} +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } +func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} +func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{5} +} + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } +func (*SubjectAccessReviewSpec) ProtoMessage() {} +func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } +func (*SubjectAccessReviewStatus) ProtoMessage() {} +func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{8} +} + +func init() { + proto.RegisterType((*ExtraValue)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.ExtraValue") + proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.LocalSubjectAccessReview") + proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.NonResourceAttributes") + proto.RegisterType((*ResourceAttributes)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.ResourceAttributes") + proto.RegisterType((*SelfSubjectAccessReview)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.SelfSubjectAccessReview") + proto.RegisterType((*SelfSubjectAccessReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.SelfSubjectAccessReviewSpec") + proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.SubjectAccessReview") + proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.SubjectAccessReviewSpec") + proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.SubjectAccessReviewStatus") +} +func (m ExtraValue) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m ExtraValue) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *LocalSubjectAccessReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LocalSubjectAccessReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *NonResourceAttributes) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NonResourceAttributes) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Verb))) + i += copy(data[i:], m.Verb) + return i, nil +} + +func (m *ResourceAttributes) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceAttributes) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Verb))) + i += copy(data[i:], m.Verb) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Version))) + i += copy(data[i:], m.Version) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) + i += copy(data[i:], m.Resource) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Subresource))) + i += copy(data[i:], m.Subresource) + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + return i, nil +} + +func (m *SelfSubjectAccessReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SelfSubjectAccessReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n6, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + return i, nil +} + +func (m *SelfSubjectAccessReviewSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SelfSubjectAccessReviewSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ResourceAttributes != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ResourceAttributes.Size())) + n7, err := m.ResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.NonResourceAttributes != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NonResourceAttributes.Size())) + n8, err := m.NonResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *SubjectAccessReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SubjectAccessReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n10, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n11, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + return i, nil +} + +func (m *SubjectAccessReviewSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SubjectAccessReviewSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ResourceAttributes != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ResourceAttributes.Size())) + n12, err := m.ResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.NonResourceAttributes != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NonResourceAttributes.Size())) + n13, err := m.NonResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.User))) + i += copy(data[i:], m.User) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Extra) > 0 { + for k := range m.Extra { + data[i] = 0x2a + i++ + v := m.Extra[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n14, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + } + } + return i, nil +} + +func (m *SubjectAccessReviewStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SubjectAccessReviewStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Allowed { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.EvaluationError))) + i += copy(data[i:], m.EvaluationError) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m ExtraValue) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LocalSubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourceAttributes) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceAttributes) Size() (n int) { + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subresource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectAccessReviewSpec) Size() (n int) { + var l int + _ = l + if m.ResourceAttributes != nil { + l = m.ResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NonResourceAttributes != nil { + l = m.NonResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectAccessReviewSpec) Size() (n int) { + var l int + _ = l + if m.ResourceAttributes != nil { + l = m.ResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NonResourceAttributes != nil { + l = m.NonResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SubjectAccessReviewStatus) Size() (n int) { + var l int + _ = l + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LocalSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalSubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourceAttributes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourceAttributes{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAttributes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceAttributes{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectAccessReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, + `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewSpec) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&SubjectAccessReviewSpec{`, + `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReviewStatus{`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExtraValue) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalSubjectAccessReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourceAttributes) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAttributes) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectAccessReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectAccessReviewSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAttributes == nil { + m.ResourceAttributes = &ResourceAttributes{} + } + if err := m.ResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAttributes == nil { + m.ResourceAttributes = &ResourceAttributes{} + } + if err := m.ResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 904 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0xdc, 0x44, + 0x14, 0x5f, 0xef, 0x9f, 0x64, 0x77, 0x02, 0xa4, 0x4c, 0x55, 0xe2, 0x06, 0x69, 0x77, 0xb5, 0x48, + 0x28, 0x95, 0x8a, 0xdd, 0x94, 0x7f, 0x55, 0xc5, 0x81, 0x58, 0x44, 0x55, 0x05, 0x2d, 0x68, 0x02, + 0x39, 0xc0, 0x85, 0xb1, 0xf3, 0xba, 0x6b, 0x76, 0xed, 0xb1, 0x66, 0xc6, 0x6e, 0xc3, 0xa9, 0x1f, + 0x80, 0x03, 0xc7, 0x1e, 0xf9, 0x0a, 0x7c, 0x01, 0xae, 0xe4, 0xd8, 0x23, 0x48, 0x68, 0x45, 0xcc, + 0xb7, 0xe0, 0x84, 0x66, 0x3c, 0xbb, 0xce, 0xb2, 0x0e, 0xd5, 0x42, 0x11, 0x1c, 0x72, 0xb3, 0xdf, + 0xfb, 0xbd, 0xf7, 0x7e, 0xef, 0xcd, 0x9b, 0x79, 0x0f, 0xbd, 0x3f, 0xbe, 0x25, 0x9c, 0x90, 0xb9, + 0xe3, 0xd4, 0x07, 0x1e, 0x83, 0x04, 0xe1, 0x26, 0xe3, 0xa1, 0x4b, 0x93, 0x50, 0xb8, 0x34, 0x95, + 0x23, 0xc6, 0xc3, 0xaf, 0xa9, 0x0c, 0x59, 0xec, 0x66, 0xbb, 0x3e, 0x48, 0xba, 0xeb, 0x0e, 0x21, + 0x06, 0x4e, 0x25, 0x1c, 0x39, 0x09, 0x67, 0x92, 0xe1, 0x1b, 0x85, 0x07, 0xa7, 0xf4, 0xe0, 0x24, + 0xe3, 0xa1, 0xa3, 0x3c, 0x38, 0x0b, 0x1e, 0x1c, 0xe3, 0x61, 0xfb, 0x8d, 0x61, 0x28, 0x47, 0xa9, + 0xef, 0x04, 0x2c, 0x72, 0x87, 0x6c, 0xc8, 0x5c, 0xed, 0xc8, 0x4f, 0x1f, 0xe8, 0x3f, 0xfd, 0xa3, + 0xbf, 0x8a, 0x00, 0xdb, 0x6f, 0x19, 0x8a, 0x34, 0x09, 0x23, 0x1a, 0x8c, 0xc2, 0x18, 0xf8, 0x71, + 0x49, 0x32, 0x02, 0x49, 0xdd, 0x6c, 0x89, 0xd6, 0xb6, 0x7b, 0x9e, 0x15, 0x4f, 0x63, 0x19, 0x46, + 0xb0, 0x64, 0xf0, 0xce, 0xb3, 0x0c, 0x44, 0x30, 0x82, 0x88, 0x2e, 0xd9, 0xbd, 0x79, 0x9e, 0x5d, + 0x2a, 0xc3, 0x89, 0x1b, 0xc6, 0x52, 0x48, 0xbe, 0x64, 0x74, 0x26, 0x27, 0x01, 0x3c, 0x03, 0x5e, + 0x26, 0x04, 0x8f, 0x68, 0x94, 0x4c, 0xa0, 0x2a, 0xa7, 0xeb, 0xe7, 0x1e, 0x56, 0x05, 0x7a, 0xf0, + 0x2e, 0x42, 0xfb, 0x8f, 0x24, 0xa7, 0x87, 0x74, 0x92, 0x02, 0xee, 0xa1, 0x56, 0x28, 0x21, 0x12, + 0xb6, 0xd5, 0x6f, 0xec, 0x74, 0xbc, 0x4e, 0x3e, 0xed, 0xb5, 0xee, 0x2a, 0x01, 0x29, 0xe4, 0xb7, + 0xdb, 0x4f, 0xbe, 0xeb, 0xd5, 0x1e, 0xff, 0xd2, 0xaf, 0x0d, 0xa6, 0x75, 0x64, 0x7f, 0xc4, 0x02, + 0x3a, 0x39, 0x48, 0xfd, 0xaf, 0x20, 0x90, 0x7b, 0x41, 0x00, 0x42, 0x10, 0xc8, 0x42, 0x78, 0x88, + 0xbf, 0x44, 0x6d, 0x55, 0xf2, 0x23, 0x2a, 0xa9, 0x6d, 0xf5, 0xad, 0x9d, 0x8d, 0x9b, 0x37, 0x1c, + 0xd3, 0x01, 0x67, 0x2b, 0x50, 0xf6, 0x80, 0x42, 0x3b, 0xd9, 0xae, 0xf3, 0xb1, 0xf6, 0x75, 0x0f, + 0x24, 0xf5, 0xf0, 0xc9, 0xb4, 0x57, 0xcb, 0xa7, 0x3d, 0x54, 0xca, 0xc8, 0xdc, 0x2b, 0x1e, 0xa3, + 0xa6, 0x48, 0x20, 0xb0, 0xeb, 0xda, 0xfb, 0x5d, 0x67, 0xd5, 0xfe, 0x72, 0x2a, 0x68, 0x1f, 0x24, + 0x10, 0x78, 0x2f, 0x98, 0xb0, 0x4d, 0xf5, 0x47, 0x74, 0x10, 0x2c, 0xd0, 0x9a, 0x90, 0x54, 0xa6, + 0xc2, 0x6e, 0xe8, 0x70, 0x1f, 0x3e, 0x9f, 0x70, 0xda, 0xa5, 0xf7, 0x92, 0x09, 0xb8, 0x56, 0xfc, + 0x13, 0x13, 0x6a, 0xf0, 0x05, 0xba, 0x72, 0x9f, 0xc5, 0x04, 0x04, 0x4b, 0x79, 0x00, 0x7b, 0x52, + 0xf2, 0xd0, 0x4f, 0x25, 0x08, 0xdc, 0x47, 0xcd, 0x84, 0xca, 0x91, 0x2e, 0x6c, 0xa7, 0xe4, 0xfb, + 0x09, 0x95, 0x23, 0xa2, 0x35, 0x0a, 0x91, 0x01, 0xf7, 0x75, 0x71, 0xce, 0x20, 0x0e, 0x81, 0xfb, + 0x44, 0x6b, 0x06, 0x3f, 0xd4, 0x11, 0xae, 0x70, 0xed, 0xa2, 0x4e, 0x4c, 0x23, 0x10, 0x09, 0x0d, + 0xc0, 0xf8, 0x7f, 0xd9, 0x58, 0x77, 0xee, 0xcf, 0x14, 0xa4, 0xc4, 0x3c, 0x3b, 0x12, 0x7e, 0x0d, + 0xb5, 0x86, 0x9c, 0xa5, 0x89, 0x2e, 0x5d, 0xc7, 0x7b, 0xd1, 0x40, 0x5a, 0x77, 0x94, 0x90, 0x14, + 0x3a, 0x7c, 0x0d, 0xad, 0x67, 0xc0, 0x45, 0xc8, 0x62, 0xbb, 0xa9, 0x61, 0x9b, 0x06, 0xb6, 0x7e, + 0x58, 0x88, 0xc9, 0x4c, 0x8f, 0xaf, 0xa3, 0x36, 0x37, 0xc4, 0xed, 0x96, 0xc6, 0x5e, 0x32, 0xd8, + 0xf6, 0x2c, 0x21, 0x32, 0x47, 0xe0, 0xb7, 0xd1, 0x86, 0x48, 0xfd, 0xb9, 0xc1, 0x9a, 0x36, 0xb8, + 0x6c, 0x0c, 0x36, 0x0e, 0x4a, 0x15, 0x39, 0x8b, 0x53, 0x69, 0xa9, 0x1c, 0xed, 0xf5, 0xc5, 0xb4, + 0x54, 0x09, 0x88, 0xd6, 0x0c, 0x4e, 0xeb, 0x68, 0xeb, 0x00, 0x26, 0x0f, 0xfe, 0x9b, 0xee, 0x67, + 0x0b, 0xdd, 0x7f, 0xef, 0x6f, 0xb4, 0x63, 0x35, 0xf5, 0xff, 0xd7, 0x0d, 0xf8, 0xb1, 0x8e, 0x5e, + 0xfd, 0x0b, 0xa2, 0xf8, 0x1b, 0x0b, 0x61, 0xbe, 0xd4, 0xc4, 0xa6, 0xe4, 0x1f, 0xac, 0xce, 0x70, + 0xf9, 0x42, 0x78, 0xaf, 0xe4, 0xd3, 0x5e, 0xc5, 0x45, 0x21, 0x15, 0x71, 0xf1, 0x13, 0x0b, 0x5d, + 0x89, 0xab, 0x6e, 0xac, 0x39, 0xa6, 0x3b, 0xab, 0x33, 0xaa, 0x7c, 0x00, 0xbc, 0xab, 0xf9, 0xb4, + 0x57, 0xfd, 0x36, 0x90, 0x6a, 0x02, 0x83, 0x9f, 0xeb, 0xe8, 0xf2, 0xc5, 0x3b, 0xfd, 0xef, 0x74, + 0xe9, 0xef, 0x4d, 0xb4, 0x75, 0xd1, 0xa1, 0xff, 0xb0, 0x43, 0xe7, 0x83, 0xa4, 0xb1, 0xf8, 0xe2, + 0x7e, 0x26, 0x80, 0x9b, 0x41, 0xd2, 0x9f, 0x0d, 0x92, 0xa6, 0xde, 0x4d, 0x90, 0x3a, 0x0a, 0x3d, + 0x44, 0xc4, 0x6c, 0x8a, 0x1c, 0xa3, 0x16, 0xa8, 0x5d, 0xc6, 0x6e, 0xf5, 0x1b, 0x3b, 0x1b, 0x37, + 0x3f, 0x7d, 0x6e, 0xcd, 0xe6, 0xe8, 0x15, 0x69, 0x3f, 0x96, 0xfc, 0xb8, 0x1c, 0x60, 0x5a, 0x46, + 0x8a, 0x88, 0xdb, 0x99, 0x59, 0xa3, 0x34, 0x06, 0x5f, 0x42, 0x8d, 0x31, 0x1c, 0x17, 0x03, 0x94, + 0xa8, 0x4f, 0x4c, 0x50, 0x2b, 0x53, 0x1b, 0x96, 0x29, 0xf4, 0x7b, 0xab, 0x53, 0x2b, 0xb7, 0x34, + 0x52, 0xb8, 0xba, 0x5d, 0xbf, 0x65, 0x0d, 0xbe, 0xb7, 0xd0, 0xd5, 0x73, 0x5b, 0x56, 0x8d, 0x55, + 0x3a, 0x99, 0xb0, 0x87, 0x70, 0xa4, 0xb9, 0xb4, 0xcb, 0xb1, 0xba, 0x57, 0x88, 0xc9, 0x4c, 0x8f, + 0x5f, 0x47, 0x6b, 0x1c, 0xa8, 0x60, 0xb1, 0x19, 0xe5, 0xf3, 0x6e, 0x27, 0x5a, 0x4a, 0x8c, 0x16, + 0xef, 0xa1, 0x4d, 0x50, 0xe1, 0x35, 0xb9, 0x7d, 0xce, 0x19, 0x37, 0x47, 0xb6, 0x65, 0x0c, 0x36, + 0xf7, 0x17, 0xd5, 0xe4, 0xcf, 0x78, 0xef, 0xda, 0xc9, 0x69, 0xb7, 0xf6, 0xf4, 0xb4, 0x5b, 0xfb, + 0xe9, 0xb4, 0x5b, 0x7b, 0x9c, 0x77, 0xad, 0x93, 0xbc, 0x6b, 0x3d, 0xcd, 0xbb, 0xd6, 0xaf, 0x79, + 0xd7, 0xfa, 0xf6, 0xb7, 0x6e, 0xed, 0xf3, 0x75, 0x93, 0xf4, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x38, 0xbb, 0x77, 0xc4, 0x79, 0x0c, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.proto new file mode 100644 index 0000000000..6b77db4623 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.proto @@ -0,0 +1,185 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.authorization.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. +// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions +// checking. +message LocalSubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace + // you made the request against. If empty, it is defaulted. + optional SubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface +message NonResourceAttributes { + // Path is the URL path of the request + // +optional + optional string path = 1; + + // Verb is the standard HTTP verb + // +optional + optional string verb = 2; +} + +// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +message ResourceAttributes { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // "" (empty) is defaulted for LocalSubjectAccessReviews + // "" (empty) is empty for cluster-scoped resources + // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + // +optional + optional string namespace = 1; + + // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +optional + optional string verb = 2; + + // Group is the API Group of the Resource. "*" means all. + // +optional + optional string group = 3; + + // Version is the API Version of the Resource. "*" means all. + // +optional + optional string version = 4; + + // Resource is one of the existing resource types. "*" means all. + // +optional + optional string resource = 5; + + // Subresource is one of the existing resource types. "" means none. + // +optional + optional string subresource = 6; + + // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + // +optional + optional string name = 7; +} + +// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a +// spec.namespace means "in all namespaces". Self is a special case, because users should always be able +// to check whether they can perform an action +message SelfSubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated. user and groups must be empty + optional SelfSubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +message SelfSubjectAccessReviewSpec { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + optional ResourceAttributes resourceAttributes = 1; + + // NonResourceAttributes describes information for a non-resource access request + // +optional + optional NonResourceAttributes nonResourceAttributes = 2; +} + +// SubjectAccessReview checks whether or not a user or group can perform an action. +message SubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated + optional SubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +message SubjectAccessReviewSpec { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + optional ResourceAttributes resourceAttributes = 1; + + // NonResourceAttributes describes information for a non-resource access request + // +optional + optional NonResourceAttributes nonResourceAttributes = 2; + + // User is the user you're testing for. + // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups + // +optional + optional string verb = 3; + + // Groups is the groups you're testing for. + // +optional + repeated string group = 4; + + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + // +optional + map extra = 5; +} + +// SubjectAccessReviewStatus +message SubjectAccessReviewStatus { + // Allowed is required. True if the action would be allowed, false otherwise. + optional bool allowed = 1; + + // Reason is optional. It indicates why a request was allowed or denied. + // +optional + optional string reason = 2; + + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. + // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. + // +optional + optional string evaluationError = 3; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/register.go new file mode 100644 index 0000000000..66549ed832 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectAccessReview{}, + &SubjectAccessReview{}, + &LocalSubjectAccessReview{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +func (obj *LocalSubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *SubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *SelfSubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.generated.go new file mode 100644 index 0000000000..939603c58a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.generated.go @@ -0,0 +1,3233 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 + } +} + +func (x *SubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SelfSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SelfSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SelfSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = SelfSubjectAccessReviewSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SelfSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = SelfSubjectAccessReviewSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LocalSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LocalSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LocalSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LocalSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Namespace != "" + yyq2[1] = x.Verb != "" + yyq2[2] = x.Group != "" + yyq2[3] = x.Version != "" + yyq2[4] = x.Resource != "" + yyq2[5] = x.Subresource != "" + yyq2[6] = x.Name != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("verb")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Group)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("group")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Group)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Version)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("version")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Version)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subresource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv4 := &x.Namespace + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "verb": + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv6 := &x.Verb + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "group": + if r.TryDecodeAsNil() { + x.Group = "" + } else { + yyv8 := &x.Group + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "version": + if r.TryDecodeAsNil() { + x.Version = "" + } else { + yyv10 := &x.Version + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "resource": + if r.TryDecodeAsNil() { + x.Resource = "" + } else { + yyv12 := &x.Resource + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "subresource": + if r.TryDecodeAsNil() { + x.Subresource = "" + } else { + yyv14 := &x.Subresource + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv16 := &x.Name + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv19 := &x.Namespace + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv21 := &x.Verb + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Group = "" + } else { + yyv23 := &x.Group + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Version = "" + } else { + yyv25 := &x.Version + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resource = "" + } else { + yyv27 := &x.Resource + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subresource = "" + } else { + yyv29 := &x.Subresource + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv31 := &x.Name + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NonResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Path != "" + yyq2[1] = x.Verb != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("verb")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NonResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NonResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "verb": + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv6 := &x.Verb + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NonResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv9 := &x.Path + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv11 := &x.Verb + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ResourceAttributes != nil + yyq2[1] = x.NonResourceAttributes != nil + yyq2[2] = x.User != "" + yyq2[3] = len(x.Groups) != 0 + yyq2[4] = len(x.Extra) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Groups == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("group")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Groups == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Extra == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("extra")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Extra == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "resourceAttributes": + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + case "nonResourceAttributes": + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + case "user": + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv6 := &x.User + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "group": + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv8 := &x.Groups + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceStringX(yyv8, false, d) + } + } + case "extra": + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv10 := &x.Extra + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv15 := &x.User + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv17 := &x.Groups + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + z.F.DecSliceStringX(yyv17, false, d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv19 := &x.Extra + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ExtraValue) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encExtraValue((ExtraValue)(x), e) + } + } +} + +func (x *ExtraValue) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decExtraValue((*ExtraValue)(x), d) + } +} + +func (x *SelfSubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ResourceAttributes != nil + yyq2[1] = x.NonResourceAttributes != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SelfSubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "resourceAttributes": + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + case "nonResourceAttributes": + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SubjectAccessReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Reason != "" + yyq2[2] = x.EvaluationError != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.Allowed)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("allowed")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.Allowed)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EvaluationError)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("evaluationError")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EvaluationError)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SubjectAccessReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SubjectAccessReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "allowed": + if r.TryDecodeAsNil() { + x.Allowed = false + } else { + yyv4 := &x.Allowed + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv6 := &x.Reason + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "evaluationError": + if r.TryDecodeAsNil() { + x.EvaluationError = "" + } else { + yyv8 := &x.EvaluationError + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SubjectAccessReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Allowed = false + } else { + yyv11 := &x.Allowed + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv13 := &x.Reason + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EvaluationError = "" + } else { + yyv15 := &x.EvaluationError + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encMapstringExtraValue(v map[string]ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decMapstringExtraValue(v *map[string]ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string]ExtraValue, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 ExtraValue + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv4 := &yymv1 + yyv4.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv5 := &yymk1 + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv7 := &yymv1 + yyv7.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encExtraValue(v ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyv1)) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]string, yyrl1) + } + } else { + yyv1 = make([]string, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 string + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.go new file mode 100644 index 0000000000..8a1727423b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.go @@ -0,0 +1,176 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// SubjectAccessReview checks whether or not a user or group can perform an action. +type SubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated + Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a +// spec.namespace means "in all namespaces". Self is a special case, because users should always be able +// to check whether they can perform an action +type SelfSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated. user and groups must be empty + Spec SelfSubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient=true +// +noMethods=true + +// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. +// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions +// checking. +type LocalSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace + // you made the request against. If empty, it is defaulted. + Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +type ResourceAttributes struct { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // "" (empty) is defaulted for LocalSubjectAccessReviews + // "" (empty) is empty for cluster-scoped resources + // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` + // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +optional + Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"` + // Group is the API Group of the Resource. "*" means all. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` + // Version is the API Version of the Resource. "*" means all. + // +optional + Version string `json:"version,omitempty" protobuf:"bytes,4,opt,name=version"` + // Resource is one of the existing resource types. "*" means all. + // +optional + Resource string `json:"resource,omitempty" protobuf:"bytes,5,opt,name=resource"` + // Subresource is one of the existing resource types. "" means none. + // +optional + Subresource string `json:"subresource,omitempty" protobuf:"bytes,6,opt,name=subresource"` + // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"` +} + +// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface +type NonResourceAttributes struct { + // Path is the URL path of the request + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` + // Verb is the standard HTTP verb + // +optional + Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"` +} + +// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +type SubjectAccessReviewSpec struct { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"` + // NonResourceAttributes describes information for a non-resource access request + // +optional + NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"` + + // User is the user you're testing for. + // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups + // +optional + User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=verb"` + // Groups is the groups you're testing for. + // +optional + Groups []string `json:"group,omitempty" protobuf:"bytes,4,rep,name=group"` + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +type SelfSubjectAccessReviewSpec struct { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"` + // NonResourceAttributes describes information for a non-resource access request + // +optional + NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"` +} + +// SubjectAccessReviewStatus +type SubjectAccessReviewStatus struct { + // Allowed is required. True if the action would be allowed, false otherwise. + Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` + // Reason is optional. It indicates why a request was allowed or denied. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. + // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. + // +optional + EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,3,opt,name=evaluationError"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..8e521ba16f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,119 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_LocalSubjectAccessReview = map[string]string{ + "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", + "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { + return map_LocalSubjectAccessReview +} + +var map_NonResourceAttributes = map[string]string{ + "": "NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface", + "path": "Path is the URL path of the request", + "verb": "Verb is the standard HTTP verb", +} + +func (NonResourceAttributes) SwaggerDoc() map[string]string { + return map_NonResourceAttributes +} + +var map_ResourceAttributes = map[string]string{ + "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", + "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", + "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "group": "Group is the API Group of the Resource. \"*\" means all.", + "version": "Version is the API Version of the Resource. \"*\" means all.", + "resource": "Resource is one of the existing resource types. \"*\" means all.", + "subresource": "Subresource is one of the existing resource types. \"\" means none.", + "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", +} + +func (ResourceAttributes) SwaggerDoc() map[string]string { + return map_ResourceAttributes +} + +var map_SelfSubjectAccessReview = map[string]string{ + "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action", + "spec": "Spec holds information about the request being evaluated. user and groups must be empty", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (SelfSubjectAccessReview) SwaggerDoc() map[string]string { + return map_SelfSubjectAccessReview +} + +var map_SelfSubjectAccessReviewSpec = map[string]string{ + "": "SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", + "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", +} + +func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string { + return map_SelfSubjectAccessReviewSpec +} + +var map_SubjectAccessReview = map[string]string{ + "": "SubjectAccessReview checks whether or not a user or group can perform an action.", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (SubjectAccessReview) SwaggerDoc() map[string]string { + return map_SubjectAccessReview +} + +var map_SubjectAccessReviewSpec = map[string]string{ + "": "SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", + "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", + "user": "User is the user you're testing for. If you specify \"User\" but not \"Group\", then is it interpreted as \"What if User were not a member of any groups", + "group": "Groups is the groups you're testing for.", + "extra": "Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.", +} + +func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { + return map_SubjectAccessReviewSpec +} + +var map_SubjectAccessReviewStatus = map[string]string{ + "": "SubjectAccessReviewStatus", + "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "reason": "Reason is optional. It indicates why a request was allowed or denied.", + "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.", +} + +func (SubjectAccessReviewStatus) SwaggerDoc() map[string]string { + return map_SubjectAccessReviewStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.conversion.go new file mode 100644 index 0000000000..7702201d56 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.conversion.go @@ -0,0 +1,263 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + authorization "k8s.io/client-go/pkg/apis/authorization" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview, + Convert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview, + Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes, + Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes, + Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes, + Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes, + Convert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview, + Convert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview, + Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec, + Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec, + Convert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview, + Convert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview, + Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec, + Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec, + Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus, + Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus, + ) +} + +func autoConvert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in, out, s) +} + +func autoConvert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { + out.Path = in.Path + out.Verb = in.Verb + return nil +} + +func Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { + return autoConvert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in, out, s) +} + +func autoConvert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { + out.Path = in.Path + out.Verb = in.Verb + return nil +} + +func Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { + return autoConvert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in, out, s) +} + +func autoConvert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Verb = in.Verb + out.Group = in.Group + out.Version = in.Version + out.Resource = in.Resource + out.Subresource = in.Subresource + out.Name = in.Name + return nil +} + +func Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { + return autoConvert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in, out, s) +} + +func autoConvert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Verb = in.Verb + out.Group = in.Group + out.Version = in.Version + out.Resource = in.Resource + out.Subresource = in.Subresource + out.Name = in.Name + return nil +} + +func Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { + return autoConvert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in, out, s) +} + +func autoConvert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in, out, s) +} + +func autoConvert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*authorization.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*authorization.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + return nil +} + +func Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + return nil +} + +func Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in, out, s) +} + +func autoConvert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*authorization.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*authorization.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + out.User = in.User + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]authorization.ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + out.User = in.User + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { + out.Allowed = in.Allowed + out.Reason = in.Reason + out.EvaluationError = in.EvaluationError + return nil +} + +func Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { + return autoConvert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { + out.Allowed = in.Allowed + out.Reason = in.Reason + out.EvaluationError = in.EvaluationError + return nil +} + +func Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..2bbd414ad2 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,179 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_LocalSubjectAccessReview, InType: reflect.TypeOf(&LocalSubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NonResourceAttributes, InType: reflect.TypeOf(&NonResourceAttributes{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ResourceAttributes, InType: reflect.TypeOf(&ResourceAttributes{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SelfSubjectAccessReview, InType: reflect.TypeOf(&SelfSubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SelfSubjectAccessReviewSpec, InType: reflect.TypeOf(&SelfSubjectAccessReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SubjectAccessReview, InType: reflect.TypeOf(&SubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SubjectAccessReviewSpec, InType: reflect.TypeOf(&SubjectAccessReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SubjectAccessReviewStatus, InType: reflect.TypeOf(&SubjectAccessReviewStatus{})}, + ) +} + +func DeepCopy_v1beta1_LocalSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LocalSubjectAccessReview) + out := out.(*LocalSubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_NonResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NonResourceAttributes) + out := out.(*NonResourceAttributes) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_ResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceAttributes) + out := out.(*ResourceAttributes) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_SelfSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SelfSubjectAccessReview) + out := out.(*SelfSubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_SelfSubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SelfSubjectAccessReviewSpec) + out := out.(*SelfSubjectAccessReviewSpec) + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + **out = **in + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + **out = **in + } + return nil + } +} + +func DeepCopy_v1beta1_SubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReview) + out := out.(*SubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_SubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReviewSpec) + out := out.(*SubjectAccessReviewSpec) + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + **out = **in + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + **out = **in + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_SubjectAccessReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReviewStatus) + out := out.(*SubjectAccessReviewStatus) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.defaults.go new file mode 100644 index 0000000000..e24e70be38 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authorization/zz_generated.deepcopy.go new file mode 100644 index 0000000000..19ccebdaaa --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/zz_generated.deepcopy.go @@ -0,0 +1,179 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package authorization + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_LocalSubjectAccessReview, InType: reflect.TypeOf(&LocalSubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_NonResourceAttributes, InType: reflect.TypeOf(&NonResourceAttributes{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_ResourceAttributes, InType: reflect.TypeOf(&ResourceAttributes{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_SelfSubjectAccessReview, InType: reflect.TypeOf(&SelfSubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_SelfSubjectAccessReviewSpec, InType: reflect.TypeOf(&SelfSubjectAccessReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_SubjectAccessReview, InType: reflect.TypeOf(&SubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_SubjectAccessReviewSpec, InType: reflect.TypeOf(&SubjectAccessReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_SubjectAccessReviewStatus, InType: reflect.TypeOf(&SubjectAccessReviewStatus{})}, + ) +} + +func DeepCopy_authorization_LocalSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LocalSubjectAccessReview) + out := out.(*LocalSubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_authorization_NonResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NonResourceAttributes) + out := out.(*NonResourceAttributes) + *out = *in + return nil + } +} + +func DeepCopy_authorization_ResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceAttributes) + out := out.(*ResourceAttributes) + *out = *in + return nil + } +} + +func DeepCopy_authorization_SelfSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SelfSubjectAccessReview) + out := out.(*SelfSubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_authorization_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_authorization_SelfSubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SelfSubjectAccessReviewSpec) + out := out.(*SelfSubjectAccessReviewSpec) + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + **out = **in + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + **out = **in + } + return nil + } +} + +func DeepCopy_authorization_SubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReview) + out := out.(*SubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_authorization_SubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReviewSpec) + out := out.(*SubjectAccessReviewSpec) + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + **out = **in + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + **out = **in + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } + } + return nil + } +} + +func DeepCopy_authorization_SubjectAccessReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReviewStatus) + out := out.(*SubjectAccessReviewStatus) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/OWNERS b/vendor/k8s.io/client-go/pkg/apis/autoscaling/OWNERS new file mode 100755 index 0000000000..9fbc54e93a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/OWNERS @@ -0,0 +1,20 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- caesarxuchao +- erictune +- sttts +- ncdc +- timothysc +- piosz +- dims +- errordeveloper +- madhusudancs +- krousey +- mml +- mbohlool +- david-mcmahon +- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/annotations.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/annotations.go new file mode 100644 index 0000000000..4c377561bb --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/annotations.go @@ -0,0 +1,30 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling + +// MetricSpecsAnnotation is the annotation which holds non-CPU-utilization HPA metric +// specs when converting the `Metrics` field from autoscaling/v2alpha1 +const MetricSpecsAnnotation = "autoscaling.alpha.kubernetes.io/metrics" + +// MetricStatusesAnnotation is the annotation which holds non-CPU-utilization HPA metric +// statuses when converting the `CurrentMetrics` field from autoscaling/v2alpha1 +const MetricStatusesAnnotation = "autoscaling.alpha.kubernetes.io/current-metrics" + +// DefaultCPUUtilization is the default value for CPU utilization, provided no other +// metrics are present. This is here because it's used by both the v2alpha1 defaulting +// logic, and the pseudo-defaulting done in v1 conversion. +const DefaultCPUUtilization = 80 diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/doc.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/doc.go new file mode 100644 index 0000000000..83ac827674 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/install/install.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/install/install.go new file mode 100644 index 0000000000..0ecc0af9b7 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/install/install.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/autoscaling" + "k8s.io/client-go/pkg/apis/autoscaling/v1" + "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: autoscaling.GroupName, + VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version, v2alpha1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/autoscaling", + AddInternalObjectsToScheme: autoscaling.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1.SchemeGroupVersion.Version: v1.AddToScheme, + v2alpha1.SchemeGroupVersion.Version: v2alpha1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/register.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/register.go new file mode 100644 index 0000000000..2bcea84b9b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "autoscaling" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Scale{}, + &HorizontalPodAutoscaler{}, + &HorizontalPodAutoscalerList{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/types.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/types.go new file mode 100644 index 0000000000..39f206b1f1 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/types.go @@ -0,0 +1,305 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api" +) + +// Scale represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus +} + +// ScaleSpec describes the attributes of a scale subresource. +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 +} + +// ScaleStatus represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 + + // label query over pods that should match the replicas count. This is same + // as the label selector but in the string format to avoid introspection + // by clients. The string will be in the same format as the query-param syntax. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector string +} + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +type CrossVersionObjectReference struct { + // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + Kind string + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string + // API version of the referent + // +optional + APIVersion string +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +type HorizontalPodAutoscalerSpec struct { + // ScaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + ScaleTargetRef CrossVersionObjectReference + // MinReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. + // It defaults to 1 pod. + // +optional + MinReplicas *int32 + // MaxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + MaxReplicas int32 + // Metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // +optional + Metrics []MetricSpec +} + +// MetricSourceType indicates the type of metric. +type MetricSourceType string + +var ( + // ObjectMetricSourceType is a metric describing a kubernetes object + // (for example, hits-per-second on an Ingress object). + ObjectMetricSourceType MetricSourceType = "Object" + // PodsMetricSourceType is a metric describing each pod in the current scale + // target (for example, transactions-processed-per-second). The values + // will be averaged together before being compared to the target value. + PodsMetricSourceType MetricSourceType = "Pods" + // ResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ResourceMetricSourceType MetricSourceType = "Resource" +) + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +type MetricSpec struct { + // Type is the type of metric source. It should match one of the fields below. + Type MetricSourceType + + // Object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricSource + // Pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricSource + // Resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricSource +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricSource struct { + // Target is the described Kubernetes object. + Target CrossVersionObjectReference + + // MetricName is the name of the metric in question. + MetricName string + // TargetValue is the target value of the metric (as a quantity). + TargetValue resource.Quantity +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +type PodsMetricSource struct { + // MetricName is the name of the metric in question + MetricName string + // TargetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + TargetAverageValue resource.Quantity +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ResourceMetricSource struct { + // Name is the name of the resource in question. + Name api.ResourceName + // TargetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 + // TargetAverageValue is the the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity +} + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +type HorizontalPodAutoscalerStatus struct { + // ObservedGeneration is the most recent generation observed by this autoscaler. + // +optional + ObservedGeneration *int64 + + // LastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + LastScaleTime *metav1.Time + + // CurrentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + CurrentReplicas int32 + + // DesiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + DesiredReplicas int32 + + // CurrentMetrics is the last read state of the metrics used by this autoscaler. + CurrentMetrics []MetricStatus +} + +// MetricStatus describes the last-read state of a single metric. +type MetricStatus struct { + // Type is the type of metric source. It will match one of the fields below. + Type MetricSourceType + + // Object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricStatus + // Pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricStatus + // Resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricStatus +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricStatus struct { + // Target is the described Kubernetes object. + Target CrossVersionObjectReference + + // MetricName is the name of the metric in question. + MetricName string + // CurrentValue is the current value of the metric (as a quantity). + CurrentValue resource.Quantity +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +type PodsMetricStatus struct { + // MetricName is the name of the metric in question + MetricName string + // CurrentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + CurrentAverageValue resource.Quantity +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ResourceMetricStatus struct { + // Name is the name of the resource in question. + Name api.ResourceName + // CurrentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + CurrentAverageUtilization *int32 + // CurrentAverageValue is the the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity +} + +// +genclient=true + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +type HorizontalPodAutoscaler struct { + metav1.TypeMeta + // Metadata is the standard object metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // Spec is the specification for the behaviour of the autoscaler. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + Spec HorizontalPodAutoscalerSpec + + // Status is the current information about the autoscaler. + // +optional + Status HorizontalPodAutoscalerStatus +} + +// HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects. +type HorizontalPodAutoscalerList struct { + metav1.TypeMeta + // Metadata is the standard list metadata. + // +optional + metav1.ListMeta + + // Items is the list of horizontal pod autoscaler objects. + Items []HorizontalPodAutoscaler +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/conversion.go new file mode 100644 index 0000000000..6b8306922f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/conversion.go @@ -0,0 +1,244 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/autoscaling" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs( + Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler, + Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler, + Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec, + Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, + Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus, + Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus, + ) + if err != nil { + return err + } + + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { + if err := autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in, out, s); err != nil { + return err + } + + otherMetrics := make([]MetricSpec, 0, len(in.Spec.Metrics)) + for _, metric := range in.Spec.Metrics { + if metric.Type == autoscaling.ResourceMetricSourceType && metric.Resource != nil && metric.Resource.Name == api.ResourceCPU && metric.Resource.TargetAverageUtilization != nil { + continue + } + + convMetric := MetricSpec{} + if err := Convert_autoscaling_MetricSpec_To_v1_MetricSpec(&metric, &convMetric, s); err != nil { + return err + } + otherMetrics = append(otherMetrics, convMetric) + } + + // NB: we need to save the status even if it maps to a CPU utilization status in order to save the raw value as well + currentMetrics := make([]MetricStatus, len(in.Status.CurrentMetrics)) + for i, currentMetric := range in.Status.CurrentMetrics { + if err := Convert_autoscaling_MetricStatus_To_v1_MetricStatus(¤tMetric, ¤tMetrics[i], s); err != nil { + return err + } + } + + if len(otherMetrics) > 0 || len(in.Status.CurrentMetrics) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)+2) + if old != nil { + for k, v := range old { + out.Annotations[k] = v + } + } + } + + if len(otherMetrics) > 0 { + otherMetricsEnc, err := json.Marshal(otherMetrics) + if err != nil { + return err + } + out.Annotations[autoscaling.MetricSpecsAnnotation] = string(otherMetricsEnc) + } + + if len(in.Status.CurrentMetrics) > 0 { + currentMetricsEnc, err := json.Marshal(currentMetrics) + if err != nil { + return err + } + out.Annotations[autoscaling.MetricStatusesAnnotation] = string(currentMetricsEnc) + } + + return nil +} + +func Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { + if err := autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s); err != nil { + return err + } + + if otherMetricsEnc, hasOtherMetrics := out.Annotations[autoscaling.MetricSpecsAnnotation]; hasOtherMetrics { + var otherMetrics []MetricSpec + if err := json.Unmarshal([]byte(otherMetricsEnc), &otherMetrics); err != nil { + return err + } + + // the normal Spec conversion could have populated out.Spec.Metrics with a single element, so deal with that + outMetrics := make([]autoscaling.MetricSpec, len(otherMetrics)+len(out.Spec.Metrics)) + for i, metric := range otherMetrics { + if err := Convert_v1_MetricSpec_To_autoscaling_MetricSpec(&metric, &outMetrics[i], s); err != nil { + return err + } + } + if out.Spec.Metrics != nil { + outMetrics[len(otherMetrics)] = out.Spec.Metrics[0] + } + out.Spec.Metrics = outMetrics + delete(out.Annotations, autoscaling.MetricSpecsAnnotation) + } + + if currentMetricsEnc, hasCurrentMetrics := out.Annotations[autoscaling.MetricStatusesAnnotation]; hasCurrentMetrics { + // ignore any existing status values -- the ones here have more information + var currentMetrics []MetricStatus + if err := json.Unmarshal([]byte(currentMetricsEnc), ¤tMetrics); err != nil { + return err + } + + out.Status.CurrentMetrics = make([]autoscaling.MetricStatus, len(currentMetrics)) + for i, currentMetric := range currentMetrics { + if err := Convert_v1_MetricStatus_To_autoscaling_MetricStatus(¤tMetric, &out.Status.CurrentMetrics[i], s); err != nil { + return err + } + } + delete(out.Annotations, autoscaling.MetricStatusesAnnotation) + } + + // autoscaling/v1 formerly had an implicit default applied in the controller. In v2alpha1, we apply it explicitly. + // We apply it here, explicitly, since we have access to the full set of metrics from the annotation. + if len(out.Spec.Metrics) == 0 { + // no other metrics, no explicit CPU value set + out.Spec.Metrics = []autoscaling.MetricSpec{ + { + Type: autoscaling.ResourceMetricSourceType, + Resource: &autoscaling.ResourceMetricSource{ + Name: api.ResourceCPU, + }, + }, + } + out.Spec.Metrics[0].Resource.TargetAverageUtilization = new(int32) + *out.Spec.Metrics[0].Resource.TargetAverageUtilization = autoscaling.DefaultCPUUtilization + } + + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + + out.MinReplicas = in.MinReplicas + out.MaxReplicas = in.MaxReplicas + + for _, metric := range in.Metrics { + if metric.Type == autoscaling.ResourceMetricSourceType && metric.Resource != nil && metric.Resource.Name == api.ResourceCPU { + if metric.Resource.TargetAverageUtilization != nil { + out.TargetCPUUtilizationPercentage = new(int32) + *out.TargetCPUUtilizationPercentage = *metric.Resource.TargetAverageUtilization + } + break + } + } + + return nil +} + +func Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + + out.MinReplicas = in.MinReplicas + out.MaxReplicas = in.MaxReplicas + + if in.TargetCPUUtilizationPercentage != nil { + out.Metrics = []autoscaling.MetricSpec{ + { + Type: autoscaling.ResourceMetricSourceType, + Resource: &autoscaling.ResourceMetricSource{ + Name: api.ResourceCPU, + }, + }, + } + out.Metrics[0].Resource.TargetAverageUtilization = new(int32) + *out.Metrics[0].Resource.TargetAverageUtilization = *in.TargetCPUUtilizationPercentage + } + + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.LastScaleTime = in.LastScaleTime + + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + + for _, metric := range in.CurrentMetrics { + if metric.Type == autoscaling.ResourceMetricSourceType && metric.Resource != nil && metric.Resource.Name == api.ResourceCPU { + if metric.Resource.CurrentAverageUtilization != nil { + + out.CurrentCPUUtilizationPercentage = new(int32) + *out.CurrentCPUUtilizationPercentage = *metric.Resource.CurrentAverageUtilization + } + } + } + return nil +} + +func Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.LastScaleTime = in.LastScaleTime + + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + + if in.CurrentCPUUtilizationPercentage != nil { + out.CurrentMetrics = []autoscaling.MetricStatus{ + { + Type: autoscaling.ResourceMetricSourceType, + Resource: &autoscaling.ResourceMetricStatus{ + Name: api.ResourceCPU, + }, + }, + } + out.CurrentMetrics[0].Resource.CurrentAverageUtilization = new(int32) + *out.CurrentMetrics[0].Resource.CurrentAverageUtilization = *in.CurrentCPUUtilizationPercentage + } + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/defaults.go new file mode 100644 index 0000000000..d423ad1253 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/defaults.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_HorizontalPodAutoscaler, + ) +} + +func SetDefaults_HorizontalPodAutoscaler(obj *HorizontalPodAutoscaler) { + if obj.Spec.MinReplicas == nil { + minReplicas := int32(1) + obj.Spec.MinReplicas = &minReplicas + } + + // NB: we apply a default for CPU utilization in conversion because + // we need access to the annotations to properly apply the default. +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/doc.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/doc.go new file mode 100644 index 0000000000..c7be42d5a1 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.pb.go new file mode 100644 index 0000000000..96b8335f8f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.pb.go @@ -0,0 +1,3498 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto + + It has these top-level messages: + CrossVersionObjectReference + HorizontalPodAutoscaler + HorizontalPodAutoscalerList + HorizontalPodAutoscalerSpec + HorizontalPodAutoscalerStatus + MetricSpec + MetricStatus + ObjectMetricSource + ObjectMetricStatus + PodsMetricSource + PodsMetricStatus + ResourceMetricSource + ResourceMetricStatus + Scale + ScaleSpec + ScaleStatus +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } +func (*CrossVersionObjectReference) ProtoMessage() {} +func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{0} +} + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } +func (*HorizontalPodAutoscalerList) ProtoMessage() {} +func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } +func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} +func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{3} +} + +func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } +func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} +func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{4} +} + +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func init() { + proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.CrossVersionObjectReference") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerStatus") + proto.RegisterType((*MetricSpec)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.MetricSpec") + proto.RegisterType((*MetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.MetricStatus") + proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.ObjectMetricSource") + proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.ObjectMetricStatus") + proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.PodsMetricSource") + proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.PodsMetricStatus") + proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.ResourceMetricSource") + proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.ResourceMetricStatus") + proto.RegisterType((*Scale)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.ScaleStatus") +} +func (m *CrossVersionObjectReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CrossVersionObjectReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + return i, nil +} + +func (m *HorizontalPodAutoscaler) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscaler) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *HorizontalPodAutoscalerList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HorizontalPodAutoscalerSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ScaleTargetRef.Size())) + n5, err := m.ScaleTargetRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.MinReplicas != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.MinReplicas)) + } + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxReplicas)) + if m.TargetCPUUtilizationPercentage != nil { + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TargetCPUUtilizationPercentage)) + } + return i, nil +} + +func (m *HorizontalPodAutoscalerStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ObservedGeneration != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastScaleTime.Size())) + n6, err := m.LastScaleTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + } + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentReplicas)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DesiredReplicas)) + if m.CurrentCPUUtilizationPercentage != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.CurrentCPUUtilizationPercentage)) + } + return i, nil +} + +func (m *MetricSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *MetricSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.Object != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) + n7, err := m.Object.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.Pods != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Pods.Size())) + n8, err := m.Pods.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.Resource != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Resource.Size())) + n9, err := m.Resource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} + +func (m *MetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *MetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.Object != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) + n10, err := m.Object.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.Pods != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Pods.Size())) + n11, err := m.Pods.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.Resource != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Resource.Size())) + n12, err := m.Resource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} + +func (m *ObjectMetricSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectMetricSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) + n13, err := m.Target.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetValue.Size())) + n14, err := m.TargetValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + return i, nil +} + +func (m *ObjectMetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectMetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) + n15, err := m.Target.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n15 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentValue.Size())) + n16, err := m.CurrentValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n16 + return i, nil +} + +func (m *PodsMetricSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodsMetricSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetAverageValue.Size())) + n17, err := m.TargetAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n17 + return i, nil +} + +func (m *PodsMetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodsMetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentAverageValue.Size())) + n18, err := m.CurrentAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n18 + return i, nil +} + +func (m *ResourceMetricSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceMetricSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if m.TargetAverageUtilization != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetAverageValue.Size())) + n19, err := m.TargetAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n19 + } + return i, nil +} + +func (m *ResourceMetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceMetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if m.CurrentAverageUtilization != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.CurrentAverageUtilization)) + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentAverageValue.Size())) + n20, err := m.CurrentAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n20 + return i, nil +} + +func (m *Scale) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Scale) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n21, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n21 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n22, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n22 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n23, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n23 + return i, nil +} + +func (m *ScaleSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + return i, nil +} + +func (m *ScaleStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Selector))) + i += copy(data[i:], m.Selector) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *CrossVersionObjectReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscaler) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + var l int + _ = l + l = m.ScaleTargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.MinReplicas != nil { + n += 1 + sovGenerated(uint64(*m.MinReplicas)) + } + n += 1 + sovGenerated(uint64(m.MaxReplicas)) + if m.TargetCPUUtilizationPercentage != nil { + n += 1 + sovGenerated(uint64(*m.TargetCPUUtilizationPercentage)) + } + return n +} + +func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + l = m.LastScaleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.DesiredReplicas)) + if m.CurrentCPUUtilizationPercentage != nil { + n += 1 + sovGenerated(uint64(*m.CurrentCPUUtilizationPercentage)) + } + return n +} + +func (m *MetricSpec) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ObjectMetricSource) Size() (n int) { + var l int + _ = l + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectMetricStatus) Size() (n int) { + var l int + _ = l + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodsMetricSource) Size() (n int) { + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodsMetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceMetricSource) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ResourceMetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.CurrentAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.CurrentAverageUtilization)) + } + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Scale) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleSpec) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func (m *ScaleStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + l = len(m.Selector) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CrossVersionObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CrossVersionObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscaler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscaler{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, + `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, + `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, + `TargetCPUUtilizationPercentage:` + valueToStringGenerated(this.TargetCPUUtilizationPercentage) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, + `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, + `CurrentCPUUtilizationPercentage:` + valueToStringGenerated(this.CurrentCPUUtilizationPercentage) + `,`, + `}`, + }, "") + return s +} +func (this *MetricSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricStatus{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricSource{`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricStatus{`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricSource{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricStatus{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, HorizontalPodAutoscaler{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ScaleTargetRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MinReplicas = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) + } + m.MaxReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MaxReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetCPUUtilizationPercentage", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetCPUUtilizationPercentage = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScaleTime == nil { + m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.LastScaleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.CurrentReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) + } + m.DesiredReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.DesiredReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentCPUUtilizationPercentage", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentCPUUtilizationPercentage = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricSource{} + } + if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricSource{} + } + if err := m.Pods.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricSource{} + } + if err := m.Resource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricStatus{} + } + if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricStatus{} + } + if err := m.Pods.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricStatus{} + } + if err := m.Resource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_kubernetes_pkg_api_v1.ResourceName(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetAverageValue == nil { + m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + } + if err := m.TargetAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_kubernetes_pkg_api_v1.ResourceName(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scale) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 1279 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x57, 0xcd, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xda, 0x4e, 0x94, 0xce, 0xa6, 0x1f, 0x4c, 0xaa, 0xd4, 0x4d, 0xa8, 0x37, 0x5a, 0x38, + 0xb4, 0xa8, 0xec, 0x12, 0x13, 0x2a, 0x22, 0x84, 0x50, 0x6c, 0x54, 0x5a, 0x51, 0xb7, 0x61, 0xe2, + 0x46, 0x7c, 0x09, 0x31, 0x59, 0x4f, 0x9d, 0x69, 0xbc, 0x1f, 0x9a, 0x1d, 0x5b, 0x4d, 0x24, 0x24, + 0x4e, 0x9c, 0xb9, 0x70, 0x46, 0xf0, 0x4f, 0x70, 0x2e, 0x12, 0x52, 0x8e, 0xbd, 0xc1, 0xc9, 0x22, + 0x0b, 0x37, 0xc4, 0x3f, 0x50, 0x71, 0x40, 0x3b, 0x3b, 0x5e, 0xef, 0xda, 0x5e, 0x27, 0x4e, 0xd3, + 0x22, 0x6e, 0xbb, 0x33, 0xef, 0xfd, 0x7e, 0xef, 0xfd, 0xe6, 0xcd, 0x9b, 0x19, 0xb0, 0xb6, 0xfb, + 0xb6, 0x6f, 0x50, 0xd7, 0xdc, 0x6d, 0x6f, 0x13, 0xe6, 0x10, 0x4e, 0x7c, 0xd3, 0xdb, 0x6d, 0x9a, + 0xd8, 0xa3, 0xbe, 0x89, 0xdb, 0xdc, 0xf5, 0x2d, 0xdc, 0xa2, 0x4e, 0xd3, 0xec, 0xac, 0x98, 0x4d, + 0xe2, 0x10, 0x86, 0x39, 0x69, 0x18, 0x1e, 0x73, 0xb9, 0x0b, 0xaf, 0x45, 0xae, 0x46, 0xdf, 0xd5, + 0xf0, 0x76, 0x9b, 0x46, 0xe8, 0x6a, 0x24, 0x5c, 0x8d, 0xce, 0xca, 0xe2, 0xeb, 0x4d, 0xca, 0x77, + 0xda, 0xdb, 0x86, 0xe5, 0xda, 0x66, 0xd3, 0x6d, 0xba, 0xa6, 0x40, 0xd8, 0x6e, 0x3f, 0x10, 0x7f, + 0xe2, 0x47, 0x7c, 0x45, 0xc8, 0x8b, 0xab, 0x32, 0x28, 0xec, 0x51, 0x1b, 0x5b, 0x3b, 0xd4, 0x21, + 0x6c, 0xaf, 0x17, 0x96, 0xc9, 0x88, 0xef, 0xb6, 0x99, 0x45, 0x06, 0xe3, 0x19, 0xeb, 0xe5, 0x9b, + 0x36, 0xe1, 0x78, 0x44, 0x16, 0x8b, 0x66, 0x96, 0x17, 0x6b, 0x3b, 0x9c, 0xda, 0xc3, 0x34, 0x37, + 0x8e, 0x72, 0xf0, 0xad, 0x1d, 0x62, 0xe3, 0x21, 0xbf, 0x37, 0xb3, 0xfc, 0xda, 0x9c, 0xb6, 0x4c, + 0xea, 0x70, 0x9f, 0xb3, 0x71, 0x39, 0xf9, 0x84, 0x75, 0x08, 0xeb, 0x27, 0x44, 0x1e, 0x61, 0xdb, + 0x6b, 0x91, 0x51, 0x39, 0x5d, 0xcf, 0x5c, 0xd4, 0x11, 0xd6, 0xfa, 0x77, 0x0a, 0x58, 0xaa, 0x32, + 0xd7, 0xf7, 0xb7, 0x08, 0xf3, 0xa9, 0xeb, 0xdc, 0xdb, 0x7e, 0x48, 0x2c, 0x8e, 0xc8, 0x03, 0xc2, + 0x88, 0x63, 0x11, 0xb8, 0x0c, 0x0a, 0xbb, 0xd4, 0x69, 0x14, 0x95, 0x65, 0xe5, 0xea, 0x99, 0xca, + 0xdc, 0x41, 0x57, 0x9b, 0x0a, 0xba, 0x5a, 0xe1, 0x43, 0xea, 0x34, 0x90, 0x98, 0x09, 0x2d, 0x1c, + 0x6c, 0x93, 0x62, 0x2e, 0x6d, 0x71, 0x17, 0xdb, 0x04, 0x89, 0x19, 0x58, 0x06, 0x00, 0x7b, 0x54, + 0x12, 0x14, 0xf3, 0xc2, 0x0e, 0x4a, 0x3b, 0xb0, 0xbe, 0x71, 0x5b, 0xce, 0xa0, 0x84, 0x95, 0xfe, + 0x6b, 0x0e, 0x5c, 0xba, 0xe5, 0x32, 0xba, 0xef, 0x3a, 0x1c, 0xb7, 0x36, 0xdc, 0xc6, 0xba, 0x2c, + 0x2a, 0xc2, 0xe0, 0x97, 0x60, 0x36, 0x5c, 0xd0, 0x06, 0xe6, 0x58, 0xc4, 0xa5, 0x96, 0xdf, 0x30, + 0x64, 0x39, 0x26, 0xf5, 0xed, 0x17, 0x64, 0x68, 0x6d, 0x74, 0x56, 0x8c, 0x28, 0xb9, 0x1a, 0xe1, + 0xb8, 0xcf, 0xdf, 0x1f, 0x43, 0x31, 0x2a, 0xdc, 0x01, 0x05, 0xdf, 0x23, 0x96, 0xc8, 0x49, 0x2d, + 0xdf, 0x34, 0x8e, 0x5d, 0xec, 0x46, 0x46, 0xcc, 0x9b, 0x1e, 0xb1, 0xfa, 0xda, 0x84, 0x7f, 0x48, + 0x30, 0x40, 0x0f, 0xcc, 0xf8, 0x1c, 0xf3, 0xb6, 0x2f, 0x74, 0x51, 0xcb, 0xb7, 0x4e, 0x81, 0x4b, + 0xe0, 0x55, 0xce, 0x49, 0xb6, 0x99, 0xe8, 0x1f, 0x49, 0x1e, 0xfd, 0x4f, 0x05, 0x2c, 0x65, 0x78, + 0xde, 0xa1, 0x3e, 0x87, 0x9f, 0x0f, 0xa9, 0x6b, 0x1c, 0x4f, 0xdd, 0xd0, 0x5b, 0x68, 0x7b, 0x41, + 0x32, 0xcf, 0xf6, 0x46, 0x12, 0xca, 0x36, 0xc1, 0x34, 0xe5, 0xc4, 0xf6, 0x8b, 0xb9, 0xe5, 0xfc, + 0x55, 0xb5, 0x5c, 0x79, 0xf6, 0x74, 0x2b, 0x67, 0x25, 0xdd, 0xf4, 0xed, 0x10, 0x18, 0x45, 0xf8, + 0xfa, 0x3f, 0xb9, 0xcc, 0x34, 0x43, 0xf9, 0xe1, 0x37, 0x0a, 0x38, 0x27, 0x7e, 0xeb, 0x98, 0x35, + 0x49, 0x58, 0xf1, 0x32, 0xdb, 0x49, 0x56, 0x7b, 0xcc, 0xce, 0xa9, 0x2c, 0xc8, 0xb0, 0xce, 0x6d, + 0xa6, 0x58, 0xd0, 0x00, 0x2b, 0x5c, 0x01, 0xaa, 0x4d, 0x1d, 0x44, 0xbc, 0x16, 0xb5, 0xb0, 0x2f, + 0x4a, 0x6e, 0xba, 0x72, 0x3e, 0xe8, 0x6a, 0x6a, 0xad, 0x3f, 0x8c, 0x92, 0x36, 0xf0, 0x2d, 0xa0, + 0xda, 0xf8, 0x51, 0xec, 0x92, 0x17, 0x2e, 0xf3, 0x92, 0x4f, 0xad, 0xf5, 0xa7, 0x50, 0xd2, 0x0e, + 0x3e, 0x04, 0x25, 0x2e, 0x68, 0xab, 0x1b, 0xf7, 0xef, 0x73, 0xda, 0xa2, 0xfb, 0x98, 0x53, 0xd7, + 0xd9, 0x20, 0xcc, 0x22, 0x0e, 0xc7, 0x4d, 0x52, 0x2c, 0x08, 0x24, 0x3d, 0xe8, 0x6a, 0xa5, 0xfa, + 0x58, 0x4b, 0x74, 0x04, 0x92, 0xfe, 0x38, 0x0f, 0xae, 0x8c, 0xad, 0x4f, 0x78, 0x13, 0x40, 0x77, + 0x5b, 0xf4, 0xb5, 0xc6, 0x07, 0x51, 0x53, 0x0a, 0xbb, 0x43, 0xb8, 0x06, 0xf9, 0xca, 0x42, 0xd0, + 0xd5, 0xe0, 0xbd, 0xa1, 0x59, 0x34, 0xc2, 0x03, 0x5a, 0xe0, 0x6c, 0x0b, 0xfb, 0x3c, 0x52, 0x99, + 0xca, 0x46, 0xa4, 0x96, 0x5f, 0x3b, 0x5e, 0xd1, 0x86, 0x1e, 0x95, 0x97, 0x82, 0xae, 0x76, 0xf6, + 0x4e, 0x12, 0x04, 0xa5, 0x31, 0xe1, 0x3a, 0x38, 0x6f, 0xb5, 0x19, 0x23, 0x0e, 0x1f, 0x50, 0xfd, + 0x92, 0x54, 0xfd, 0x7c, 0x35, 0x3d, 0x8d, 0x06, 0xed, 0x43, 0x88, 0x06, 0xf1, 0x29, 0x23, 0x8d, + 0x18, 0xa2, 0x90, 0x86, 0x78, 0x3f, 0x3d, 0x8d, 0x06, 0xed, 0xa1, 0x0d, 0x34, 0x89, 0x9a, 0xb9, + 0x82, 0xd3, 0x02, 0xf2, 0x95, 0xa0, 0xab, 0x69, 0xd5, 0xf1, 0xa6, 0xe8, 0x28, 0x2c, 0xfd, 0xaf, + 0x1c, 0x00, 0x35, 0xc2, 0x19, 0xb5, 0xc4, 0x8e, 0x59, 0x05, 0x05, 0xbe, 0xe7, 0x11, 0x79, 0x14, + 0x2c, 0xf7, 0x9a, 0x59, 0x7d, 0xcf, 0x23, 0x4f, 0xbb, 0xda, 0x05, 0x69, 0x29, 0x8e, 0xe7, 0x70, + 0x0c, 0x09, 0x6b, 0x88, 0xc1, 0x8c, 0x2b, 0x76, 0x86, 0x5c, 0x97, 0x77, 0x27, 0xd8, 0x5e, 0x71, + 0x6f, 0x8e, 0x81, 0x2b, 0x20, 0xec, 0x68, 0x72, 0xab, 0x49, 0x60, 0xf8, 0x09, 0x28, 0x78, 0x6e, + 0xa3, 0xd7, 0x41, 0xdf, 0x99, 0x80, 0x60, 0xc3, 0x6d, 0xf8, 0x29, 0xf8, 0xd9, 0x30, 0xa3, 0x70, + 0x14, 0x09, 0x48, 0x48, 0xc1, 0x6c, 0xef, 0xca, 0x21, 0x56, 0x4b, 0x2d, 0xbf, 0x37, 0x01, 0x3c, + 0x92, 0xae, 0x29, 0x8a, 0xb9, 0xb0, 0x33, 0xf6, 0x66, 0x50, 0x0c, 0xaf, 0xff, 0x9d, 0x03, 0x73, + 0xd2, 0x30, 0xda, 0x20, 0xff, 0xb1, 0xde, 0xd1, 0x29, 0xf2, 0xdc, 0xf4, 0x8e, 0xe0, 0x9f, 0xab, + 0xde, 0x11, 0x45, 0x96, 0xde, 0xdf, 0xe7, 0x00, 0x1c, 0x2e, 0x30, 0xe8, 0x80, 0x99, 0xa8, 0xb5, + 0x9d, 0xf2, 0x71, 0x10, 0x1f, 0xc7, 0xb2, 0xf3, 0x4b, 0x96, 0xf0, 0x72, 0x64, 0x0b, 0xfe, 0xbb, + 0xfd, 0x4b, 0x54, 0x7c, 0x39, 0xa9, 0xc5, 0x33, 0x28, 0x61, 0x05, 0x09, 0x50, 0x23, 0xef, 0x2d, + 0xdc, 0x6a, 0x13, 0xb9, 0x0e, 0x63, 0x4f, 0x69, 0xa3, 0x97, 0xb6, 0xf1, 0x51, 0x1b, 0x3b, 0x9c, + 0xf2, 0xbd, 0xfe, 0x79, 0x51, 0xef, 0x43, 0xa1, 0x24, 0xae, 0xfe, 0xe3, 0xa0, 0x42, 0x51, 0x5d, + 0xfe, 0x1f, 0x14, 0xda, 0x01, 0x73, 0xb2, 0xbb, 0x3d, 0x8b, 0x44, 0x17, 0x25, 0xcb, 0x5c, 0x35, + 0x81, 0x85, 0x52, 0xc8, 0xfa, 0xcf, 0x0a, 0xb8, 0x30, 0xd8, 0x46, 0x06, 0x42, 0x56, 0x8e, 0x15, + 0xf2, 0x3e, 0x80, 0x51, 0xc2, 0xeb, 0x1d, 0xc2, 0x70, 0x93, 0x44, 0x81, 0xe7, 0x4e, 0x14, 0xf8, + 0xa2, 0xe4, 0x82, 0xf5, 0x21, 0x44, 0x34, 0x82, 0x45, 0xff, 0x25, 0x9d, 0x44, 0xb4, 0xce, 0x27, + 0x49, 0xe2, 0x2b, 0x30, 0x2f, 0xd5, 0x39, 0x85, 0x2c, 0x96, 0x24, 0xd9, 0x7c, 0x75, 0x18, 0x12, + 0x8d, 0xe2, 0xd1, 0x7f, 0xca, 0x81, 0x8b, 0xa3, 0x9a, 0x2e, 0xac, 0xc9, 0x47, 0x4a, 0x94, 0xc5, + 0x5a, 0xf2, 0x91, 0xf2, 0xb4, 0xab, 0x5d, 0x1b, 0xf7, 0x64, 0x8a, 0xbb, 0x4a, 0xe2, 0x45, 0xf3, + 0x31, 0x28, 0xa6, 0x54, 0x4c, 0x9c, 0x9f, 0xf2, 0x02, 0xf7, 0x72, 0xd0, 0xd5, 0x8a, 0xf5, 0x0c, + 0x1b, 0x94, 0xe9, 0x0d, 0x3b, 0x23, 0xab, 0xe0, 0x64, 0xe5, 0xbb, 0x30, 0x41, 0x05, 0x3c, 0x1e, + 0x56, 0x2e, 0xaa, 0x82, 0x53, 0x56, 0xee, 0x33, 0x70, 0x39, 0xbd, 0x70, 0xc3, 0xd2, 0x5d, 0x09, + 0xba, 0xda, 0xe5, 0x6a, 0x96, 0x11, 0xca, 0xf6, 0xcf, 0xaa, 0xbe, 0xfc, 0x0b, 0xaa, 0xbe, 0x1f, + 0x72, 0x60, 0x5a, 0x5c, 0x19, 0x5f, 0xc0, 0x0b, 0x75, 0x2b, 0xf5, 0x42, 0x5d, 0x9d, 0xa0, 0x05, + 0x8b, 0x08, 0x33, 0xdf, 0xa3, 0x5f, 0x0c, 0xbc, 0x47, 0x6f, 0x4c, 0x8c, 0x3c, 0xfe, 0xf5, 0xb9, + 0x06, 0xce, 0xc4, 0x01, 0xc0, 0xeb, 0xe1, 0x69, 0x2f, 0xef, 0xc2, 0x8a, 0x58, 0xfb, 0xf8, 0xe9, + 0x18, 0x5f, 0x82, 0x63, 0x0b, 0x9d, 0x02, 0x35, 0xc1, 0x30, 0x99, 0x73, 0x68, 0xed, 0x93, 0x16, + 0xb1, 0xb8, 0xcb, 0xe4, 0x11, 0x12, 0x5b, 0x6f, 0xca, 0x71, 0x14, 0x5b, 0x54, 0x5e, 0x3d, 0x38, + 0x2c, 0x4d, 0x3d, 0x39, 0x2c, 0x4d, 0xfd, 0x76, 0x58, 0x9a, 0xfa, 0x3a, 0x28, 0x29, 0x07, 0x41, + 0x49, 0x79, 0x12, 0x94, 0x94, 0xdf, 0x83, 0x92, 0xf2, 0xed, 0x1f, 0xa5, 0xa9, 0x4f, 0x73, 0x9d, + 0x95, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x61, 0xfc, 0xd4, 0x31, 0x3f, 0x13, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.proto new file mode 100644 index 0000000000..4953624af6 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.proto @@ -0,0 +1,298 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.autoscaling.v1; + +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +message CrossVersionObjectReference { + // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + optional string kind = 1; + + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + optional string name = 2; + + // API version of the referent + // +optional + optional string apiVersion = 3; +} + +// configuration of a horizontal pod autoscaler. +message HorizontalPodAutoscaler { + // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + optional HorizontalPodAutoscalerSpec spec = 2; + + // current information about the autoscaler. + // +optional + optional HorizontalPodAutoscalerStatus status = 3; +} + +// list of horizontal pod autoscaler objects. +message HorizontalPodAutoscalerList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // list of horizontal pod autoscaler objects. + repeated HorizontalPodAutoscaler items = 2; +} + +// specification of a horizontal pod autoscaler. +message HorizontalPodAutoscalerSpec { + // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption + // and will set the desired number of pods by using its Scale subresource. + optional CrossVersionObjectReference scaleTargetRef = 1; + + // lower limit for the number of pods that can be set by the autoscaler, default 1. + // +optional + optional int32 minReplicas = 2; + + // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + optional int32 maxReplicas = 3; + + // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + // if not specified the default autoscaling policy will be used. + // +optional + optional int32 targetCPUUtilizationPercentage = 4; +} + +// current status of a horizontal pod autoscaler +message HorizontalPodAutoscalerStatus { + // most recent generation observed by this autoscaler. + // +optional + optional int64 observedGeneration = 1; + + // last time the HorizontalPodAutoscaler scaled the number of pods; + // used by the autoscaler to control how often the number of pods is changed. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + + // current number of replicas of pods managed by this autoscaler. + optional int32 currentReplicas = 3; + + // desired number of replicas of pods managed by this autoscaler. + optional int32 desiredReplicas = 4; + + // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // e.g. 70 means that an average pod is using now 70% of its requested CPU. + // +optional + optional int32 currentCPUUtilizationPercentage = 5; +} + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +message MetricSpec { + // type is the type of metric source. It should match one of the fields below. + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricSource object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricSource pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricSource resource = 4; +} + +// MetricStatus describes the last-read state of a single metric. +message MetricStatus { + // type is the type of metric source. It will match one of the fields below. + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricStatus object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricStatus pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricStatus resource = 4; +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricSource { + // target is the described Kubernetes object. + optional CrossVersionObjectReference target = 1; + + // metricName is the name of the metric in question. + optional string metricName = 2; + + // targetValue is the target value of the metric (as a quantity). + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricStatus { + // target is the described Kubernetes object. + optional CrossVersionObjectReference target = 1; + + // metricName is the name of the metric in question. + optional string metricName = 2; + + // currentValue is the current value of the metric (as a quantity). + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +message PodsMetricSource { + // metricName is the name of the metric in question + optional string metricName = 1; + + // targetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +message PodsMetricStatus { + // metricName is the name of the metric in question + optional string metricName = 1; + + // currentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + optional int32 targetAverageUtilization = 2; + + // targetAverageValue is the the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ResourceMetricStatus { + // name is the name of the resource in question. + optional string name = 1; + + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + optional int32 currentAverageUtilization = 2; + + // currentAverageValue is the the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; +} + +// Scale represents a scaling request for a resource. +message Scale { + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + optional ScaleSpec spec = 2; + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + optional ScaleStatus status = 3; +} + +// ScaleSpec describes the attributes of a scale subresource. +message ScaleSpec { + // desired number of instances for the scaled object. + // +optional + optional int32 replicas = 1; +} + +// ScaleStatus represents the current status of a scale subresource. +message ScaleStatus { + // actual number of observed instances of the scaled object. + optional int32 replicas = 1; + + // label query over pods that should match the replicas count. This is same + // as the label selector but in the string format to avoid introspection + // by clients. The string will be in the same format as the query-param syntax. + // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + optional string selector = 2; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/register.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/register.go new file mode 100644 index 0000000000..aaa225261c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "autoscaling" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &HorizontalPodAutoscaler{}, + &HorizontalPodAutoscalerList{}, + &Scale{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.generated.go new file mode 100644 index 0000000000..aea5e38319 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.generated.go @@ -0,0 +1,5216 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg3_resource "k8s.io/apimachinery/pkg/api/resource" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + pkg4_v1 "k8s.io/client-go/pkg/api/v1" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg3_resource.Quantity + var v1 pkg1_v1.Time + var v2 pkg2_types.UID + var v3 pkg4_v1.ResourceName + var v4 time.Time + _, _, _, _, _ = v0, v1, v2, v3, v4 + } +} + +func (x *CrossVersionObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CrossVersionObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CrossVersionObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv6 := &x.Name + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv8 := &x.APIVersion + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CrossVersionObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv13 := &x.Name + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.MinReplicas != nil + yyq2[3] = x.TargetCPUUtilizationPercentage != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.ScaleTargetRef + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleTargetRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ScaleTargetRef + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.MinReplicas == nil { + r.EncodeNil() + } else { + yy9 := *x.MinReplicas + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MinReplicas == nil { + r.EncodeNil() + } else { + yy11 := *x.MinReplicas + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeInt(int64(yy11)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.MaxReplicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeInt(int64(x.MaxReplicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.TargetCPUUtilizationPercentage == nil { + r.EncodeNil() + } else { + yy17 := *x.TargetCPUUtilizationPercentage + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeInt(int64(yy17)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetCPUUtilizationPercentage")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetCPUUtilizationPercentage == nil { + r.EncodeNil() + } else { + yy19 := *x.TargetCPUUtilizationPercentage + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(yy19)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "scaleTargetRef": + if r.TryDecodeAsNil() { + x.ScaleTargetRef = CrossVersionObjectReference{} + } else { + yyv4 := &x.ScaleTargetRef + yyv4.CodecDecodeSelf(d) + } + case "minReplicas": + if r.TryDecodeAsNil() { + if x.MinReplicas != nil { + x.MinReplicas = nil + } + } else { + if x.MinReplicas == nil { + x.MinReplicas = new(int32) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) + } + } + case "maxReplicas": + if r.TryDecodeAsNil() { + x.MaxReplicas = 0 + } else { + yyv7 := &x.MaxReplicas + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + case "targetCPUUtilizationPercentage": + if r.TryDecodeAsNil() { + if x.TargetCPUUtilizationPercentage != nil { + x.TargetCPUUtilizationPercentage = nil + } + } else { + if x.TargetCPUUtilizationPercentage == nil { + x.TargetCPUUtilizationPercentage = new(int32) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ScaleTargetRef = CrossVersionObjectReference{} + } else { + yyv12 := &x.ScaleTargetRef + yyv12.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.MinReplicas != nil { + x.MinReplicas = nil + } + } else { + if x.MinReplicas == nil { + x.MinReplicas = new(int32) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MaxReplicas = 0 + } else { + yyv15 := &x.MaxReplicas + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(yyv15)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TargetCPUUtilizationPercentage != nil { + x.TargetCPUUtilizationPercentage = nil + } + } else { + if x.TargetCPUUtilizationPercentage == nil { + x.TargetCPUUtilizationPercentage = new(int32) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) + } + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != nil + yyq2[1] = x.LastScaleTime != nil + yyq2[4] = x.CurrentCPUUtilizationPercentage != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy4 := *x.ObservedGeneration + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy6 := *x.ObservedGeneration + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.LastScaleTime == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { + } else if yym9 { + z.EncBinaryMarshal(x.LastScaleTime) + } else if !yym9 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScaleTime) + } else { + z.EncFallback(x.LastScaleTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastScaleTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LastScaleTime == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { + } else if yym10 { + z.EncBinaryMarshal(x.LastScaleTime) + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScaleTime) + } else { + z.EncFallback(x.LastScaleTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeInt(int64(x.CurrentReplicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.CurrentReplicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeInt(int64(x.DesiredReplicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("desiredReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.DesiredReplicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.CurrentCPUUtilizationPercentage == nil { + r.EncodeNil() + } else { + yy18 := *x.CurrentCPUUtilizationPercentage + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(yy18)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentCPUUtilizationPercentage")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CurrentCPUUtilizationPercentage == nil { + r.EncodeNil() + } else { + yy20 := *x.CurrentCPUUtilizationPercentage + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeInt(int64(yy20)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "observedGeneration": + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + case "lastScaleTime": + if r.TryDecodeAsNil() { + if x.LastScaleTime != nil { + x.LastScaleTime = nil + } + } else { + if x.LastScaleTime == nil { + x.LastScaleTime = new(pkg1_v1.Time) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { + } else if yym7 { + z.DecBinaryUnmarshal(x.LastScaleTime) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScaleTime) + } else { + z.DecFallback(x.LastScaleTime, false) + } + } + case "currentReplicas": + if r.TryDecodeAsNil() { + x.CurrentReplicas = 0 + } else { + yyv8 := &x.CurrentReplicas + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "desiredReplicas": + if r.TryDecodeAsNil() { + x.DesiredReplicas = 0 + } else { + yyv10 := &x.DesiredReplicas + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "currentCPUUtilizationPercentage": + if r.TryDecodeAsNil() { + if x.CurrentCPUUtilizationPercentage != nil { + x.CurrentCPUUtilizationPercentage = nil + } + } else { + if x.CurrentCPUUtilizationPercentage == nil { + x.CurrentCPUUtilizationPercentage = new(int32) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LastScaleTime != nil { + x.LastScaleTime = nil + } + } else { + if x.LastScaleTime == nil { + x.LastScaleTime = new(pkg1_v1.Time) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { + } else if yym18 { + z.DecBinaryUnmarshal(x.LastScaleTime) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScaleTime) + } else { + z.DecFallback(x.LastScaleTime, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentReplicas = 0 + } else { + yyv19 := &x.CurrentReplicas + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int32)(yyv19)) = int32(r.DecodeInt(32)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DesiredReplicas = 0 + } else { + yyv21 := &x.DesiredReplicas + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CurrentCPUUtilizationPercentage != nil { + x.CurrentCPUUtilizationPercentage = nil + } + } else { + if x.CurrentCPUUtilizationPercentage == nil { + x.CurrentCPUUtilizationPercentage = new(int32) + } + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = HorizontalPodAutoscalerSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = HorizontalPodAutoscalerStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = HorizontalPodAutoscalerSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = HorizontalPodAutoscalerStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ScaleSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ScaleStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ScaleSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ScaleStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv7 := &x.Replicas + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Selector != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Selector)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Selector)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + x.Selector = "" + } else { + yyv6 := &x.Selector + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv9 := &x.Replicas + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*int32)(yyv9)) = int32(r.DecodeInt(32)) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Selector = "" + } else { + yyv11 := &x.Selector + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x MetricSourceType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *MetricSourceType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *MetricSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Object != nil + yyq2[2] = x.Pods != nil + yyq2[3] = x.Resource != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("object")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *MetricSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *MetricSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "object": + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricSource) + } + x.Object.CodecDecodeSelf(d) + } + case "pods": + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricSource) + } + x.Pods.CodecDecodeSelf(d) + } + case "resource": + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricSource) + } + x.Resource.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *MetricSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv9 := &x.Type + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricSource) + } + x.Object.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricSource) + } + x.Pods.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricSource) + } + x.Resource.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ObjectMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.Target + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("target")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.Target + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.TargetValue + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.TargetValue + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(yy14) { + } else if !yym15 && z.IsJSONHandle() { + z.EncJSONMarshal(yy14) + } else { + z.EncFallback(yy14) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "target": + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv4 := &x.Target + yyv4.CodecDecodeSelf(d) + } + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv5 := &x.MetricName + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "targetValue": + if r.TryDecodeAsNil() { + x.TargetValue = pkg3_resource.Quantity{} + } else { + yyv7 := &x.TargetValue + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv10 := &x.Target + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv11 := &x.MetricName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetValue = pkg3_resource.Quantity{} + } else { + yyv13 := &x.TargetValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodsMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.TargetAverageValue + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.TargetAverageValue + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodsMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodsMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv4 := &x.MetricName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "targetAverageValue": + if r.TryDecodeAsNil() { + x.TargetAverageValue = pkg3_resource.Quantity{} + } else { + yyv6 := &x.TargetAverageValue + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodsMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv9 := &x.MetricName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetAverageValue = pkg3_resource.Quantity{} + } else { + yyv11 := &x.TargetAverageValue + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.TargetAverageUtilization != nil + yyq2[2] = x.TargetAverageValue != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf4 := &x.Name + yysf4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf5 := &x.Name + yysf5.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.TargetAverageUtilization == nil { + r.EncodeNil() + } else { + yy7 := *x.TargetAverageUtilization + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetAverageUtilization")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetAverageUtilization == nil { + r.EncodeNil() + } else { + yy9 := *x.TargetAverageUtilization + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.TargetAverageValue == nil { + r.EncodeNil() + } else { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.EncExt(x.TargetAverageValue) { + } else if !yym12 && z.IsJSONHandle() { + z.EncJSONMarshal(x.TargetAverageValue) + } else { + z.EncFallback(x.TargetAverageValue) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetAverageValue == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.TargetAverageValue) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(x.TargetAverageValue) + } else { + z.EncFallback(x.TargetAverageValue) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yyv4.CodecDecodeSelf(d) + } + case "targetAverageUtilization": + if r.TryDecodeAsNil() { + if x.TargetAverageUtilization != nil { + x.TargetAverageUtilization = nil + } + } else { + if x.TargetAverageUtilization == nil { + x.TargetAverageUtilization = new(int32) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int32)(x.TargetAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + case "targetAverageValue": + if r.TryDecodeAsNil() { + if x.TargetAverageValue != nil { + x.TargetAverageValue = nil + } + } else { + if x.TargetAverageValue == nil { + x.TargetAverageValue = new(pkg3_resource.Quantity) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(x.TargetAverageValue) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.TargetAverageValue) + } else { + z.DecFallback(x.TargetAverageValue, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv10 := &x.Name + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TargetAverageUtilization != nil { + x.TargetAverageUtilization = nil + } + } else { + if x.TargetAverageUtilization == nil { + x.TargetAverageUtilization = new(int32) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(x.TargetAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TargetAverageValue != nil { + x.TargetAverageValue = nil + } + } else { + if x.TargetAverageValue == nil { + x.TargetAverageValue = new(pkg3_resource.Quantity) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(x.TargetAverageValue) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.TargetAverageValue) + } else { + z.DecFallback(x.TargetAverageValue, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *MetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Object != nil + yyq2[2] = x.Pods != nil + yyq2[3] = x.Resource != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("object")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *MetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *MetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "object": + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricStatus) + } + x.Object.CodecDecodeSelf(d) + } + case "pods": + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricStatus) + } + x.Pods.CodecDecodeSelf(d) + } + case "resource": + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricStatus) + } + x.Resource.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *MetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv9 := &x.Type + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricStatus) + } + x.Object.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricStatus) + } + x.Pods.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricStatus) + } + x.Resource.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ObjectMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.Target + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("target")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.Target + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.CurrentValue + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.CurrentValue + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(yy14) { + } else if !yym15 && z.IsJSONHandle() { + z.EncJSONMarshal(yy14) + } else { + z.EncFallback(yy14) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "target": + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv4 := &x.Target + yyv4.CodecDecodeSelf(d) + } + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv5 := &x.MetricName + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "currentValue": + if r.TryDecodeAsNil() { + x.CurrentValue = pkg3_resource.Quantity{} + } else { + yyv7 := &x.CurrentValue + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv10 := &x.Target + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv11 := &x.MetricName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentValue = pkg3_resource.Quantity{} + } else { + yyv13 := &x.CurrentValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodsMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.CurrentAverageValue + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.CurrentAverageValue + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodsMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodsMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv4 := &x.MetricName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "currentAverageValue": + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg3_resource.Quantity{} + } else { + yyv6 := &x.CurrentAverageValue + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodsMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv9 := &x.MetricName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg3_resource.Quantity{} + } else { + yyv11 := &x.CurrentAverageValue + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.CurrentAverageUtilization != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf4 := &x.Name + yysf4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf5 := &x.Name + yysf5.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.CurrentAverageUtilization == nil { + r.EncodeNil() + } else { + yy7 := *x.CurrentAverageUtilization + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentAverageUtilization")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CurrentAverageUtilization == nil { + r.EncodeNil() + } else { + yy9 := *x.CurrentAverageUtilization + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.CurrentAverageValue + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.CurrentAverageValue + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(yy14) { + } else if !yym15 && z.IsJSONHandle() { + z.EncJSONMarshal(yy14) + } else { + z.EncFallback(yy14) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yyv4.CodecDecodeSelf(d) + } + case "currentAverageUtilization": + if r.TryDecodeAsNil() { + if x.CurrentAverageUtilization != nil { + x.CurrentAverageUtilization = nil + } + } else { + if x.CurrentAverageUtilization == nil { + x.CurrentAverageUtilization = new(int32) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int32)(x.CurrentAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + case "currentAverageValue": + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg3_resource.Quantity{} + } else { + yyv7 := &x.CurrentAverageValue + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv10 := &x.Name + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CurrentAverageUtilization != nil { + x.CurrentAverageUtilization = nil + } + } else { + if x.CurrentAverageUtilization == nil { + x.CurrentAverageUtilization = new(int32) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(x.CurrentAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg3_resource.Quantity{} + } else { + yyv13 := &x.CurrentAverageValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []HorizontalPodAutoscaler{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 360) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]HorizontalPodAutoscaler, yyrl1) + } + } else { + yyv1 = make([]HorizontalPodAutoscaler, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, HorizontalPodAutoscaler{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, HorizontalPodAutoscaler{}) // var yyz1 HorizontalPodAutoscaler + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []HorizontalPodAutoscaler{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.go new file mode 100644 index 0000000000..47fd31e273 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.go @@ -0,0 +1,296 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api/v1" +) + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +type CrossVersionObjectReference struct { + // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + // API version of the referent + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` +} + +// specification of a horizontal pod autoscaler. +type HorizontalPodAutoscalerSpec struct { + // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption + // and will set the desired number of pods by using its Scale subresource. + ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` + // lower limit for the number of pods that can be set by the autoscaler, default 1. + // +optional + MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + // if not specified the default autoscaling policy will be used. + // +optional + TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty" protobuf:"varint,4,opt,name=targetCPUUtilizationPercentage"` +} + +// current status of a horizontal pod autoscaler +type HorizontalPodAutoscalerStatus struct { + // most recent generation observed by this autoscaler. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // last time the HorizontalPodAutoscaler scaled the number of pods; + // used by the autoscaler to control how often the number of pods is changed. + // +optional + LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` + + // current number of replicas of pods managed by this autoscaler. + CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` + + // desired number of replicas of pods managed by this autoscaler. + DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` + + // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // e.g. 70 means that an average pod is using now 70% of its requested CPU. + // +optional + CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"` +} + +// +genclient=true + +// configuration of a horizontal pod autoscaler. +type HorizontalPodAutoscaler struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // current information about the autoscaler. + // +optional + Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// list of horizontal pod autoscaler objects. +type HorizontalPodAutoscalerList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // list of horizontal pod autoscaler objects. + Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Scale represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ScaleSpec describes the attributes of a scale subresource. +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` +} + +// ScaleStatus represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // label query over pods that should match the replicas count. This is same + // as the label selector but in the string format to avoid introspection + // by clients. The string will be in the same format as the query-param syntax. + // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector string `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` +} + +// the types below are used in the alpha metrics annotation + +// MetricSourceType indicates the type of metric. +type MetricSourceType string + +var ( + // ObjectMetricSourceType is a metric describing a kubernetes object + // (for example, hits-per-second on an Ingress object). + ObjectMetricSourceType MetricSourceType = "Object" + // PodsMetricSourceType is a metric describing each pod in the current scale + // target (for example, transactions-processed-per-second). The values + // will be averaged together before being compared to the target value. + PodsMetricSourceType MetricSourceType = "Pods" + // ResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ResourceMetricSourceType MetricSourceType = "Resource" +) + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +type MetricSpec struct { + // type is the type of metric source. It should match one of the fields below. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricSource struct { + // target is the described Kubernetes object. + Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` + + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // targetValue is the target value of the metric (as a quantity). + TargetValue resource.Quantity `json:"targetValue" protobuf:"bytes,3,name=targetValue"` +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +type PodsMetricSource struct { + // metricName is the name of the metric in question + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // targetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + TargetAverageValue resource.Quantity `json:"targetAverageValue" protobuf:"bytes,2,name=targetAverageValue"` +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` +} + +// MetricStatus describes the last-read state of a single metric. +type MetricStatus struct { + // type is the type of metric source. It will match one of the fields below. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricStatus struct { + // target is the described Kubernetes object. + Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` + + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // currentValue is the current value of the metric (as a quantity). + CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +type PodsMetricStatus struct { + // metricName is the name of the metric in question + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // currentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,2,name=currentAverageValue"` +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ResourceMetricStatus struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..01d205f87e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go @@ -0,0 +1,205 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_CrossVersionObjectReference = map[string]string{ + "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", + "kind": "Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds\"", + "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "apiVersion": "API version of the referent", +} + +func (CrossVersionObjectReference) SwaggerDoc() map[string]string { + return map_CrossVersionObjectReference +} + +var map_HorizontalPodAutoscaler = map[string]string{ + "": "configuration of a horizontal pod autoscaler.", + "metadata": "Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "status": "current information about the autoscaler.", +} + +func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscaler +} + +var map_HorizontalPodAutoscalerList = map[string]string{ + "": "list of horizontal pod autoscaler objects.", + "metadata": "Standard list metadata.", + "items": "list of horizontal pod autoscaler objects.", +} + +func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerList +} + +var map_HorizontalPodAutoscalerSpec = map[string]string{ + "": "specification of a horizontal pod autoscaler.", + "scaleTargetRef": "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.", + "minReplicas": "lower limit for the number of pods that can be set by the autoscaler, default 1.", + "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", + "targetCPUUtilizationPercentage": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", +} + +func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerSpec +} + +var map_HorizontalPodAutoscalerStatus = map[string]string{ + "": "current status of a horizontal pod autoscaler", + "observedGeneration": "most recent generation observed by this autoscaler.", + "lastScaleTime": "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", + "currentReplicas": "current number of replicas of pods managed by this autoscaler.", + "desiredReplicas": "desired number of replicas of pods managed by this autoscaler.", + "currentCPUUtilizationPercentage": "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", +} + +func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerStatus +} + +var map_MetricSpec = map[string]string{ + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should match one of the fields below.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", +} + +func (MetricSpec) SwaggerDoc() map[string]string { + return map_MetricSpec +} + +var map_MetricStatus = map[string]string{ + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will match one of the fields below.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", +} + +func (MetricStatus) SwaggerDoc() map[string]string { + return map_MetricStatus +} + +var map_ObjectMetricSource = map[string]string{ + "": "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "target": "target is the described Kubernetes object.", + "metricName": "metricName is the name of the metric in question.", + "targetValue": "targetValue is the target value of the metric (as a quantity).", +} + +func (ObjectMetricSource) SwaggerDoc() map[string]string { + return map_ObjectMetricSource +} + +var map_ObjectMetricStatus = map[string]string{ + "": "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "target": "target is the described Kubernetes object.", + "metricName": "metricName is the name of the metric in question.", + "currentValue": "currentValue is the current value of the metric (as a quantity).", +} + +func (ObjectMetricStatus) SwaggerDoc() map[string]string { + return map_ObjectMetricStatus +} + +var map_PodsMetricSource = map[string]string{ + "": "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "metricName": "metricName is the name of the metric in question", + "targetAverageValue": "targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)", +} + +func (PodsMetricSource) SwaggerDoc() map[string]string { + return map_PodsMetricSource +} + +var map_PodsMetricStatus = map[string]string{ + "": "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).", + "metricName": "metricName is the name of the metric in question", + "currentAverageValue": "currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)", +} + +func (PodsMetricStatus) SwaggerDoc() map[string]string { + return map_PodsMetricStatus +} + +var map_ResourceMetricSource = map[string]string{ + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "targetAverageValue": "targetAverageValue is the the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", +} + +func (ResourceMetricSource) SwaggerDoc() map[string]string { + return map_ResourceMetricSource +} + +var map_ResourceMetricStatus = map[string]string{ + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", + "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", + "currentAverageValue": "currentAverageValue is the the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", +} + +func (ResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ResourceMetricStatus +} + +var map_Scale = map[string]string{ + "": "Scale represents a scaling request for a resource.", + "metadata": "Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.", +} + +func (Scale) SwaggerDoc() map[string]string { + return map_Scale +} + +var map_ScaleSpec = map[string]string{ + "": "ScaleSpec describes the attributes of a scale subresource.", + "replicas": "desired number of instances for the scaled object.", +} + +func (ScaleSpec) SwaggerDoc() map[string]string { + return map_ScaleSpec +} + +var map_ScaleStatus = map[string]string{ + "": "ScaleStatus represents the current status of a scale subresource.", + "replicas": "actual number of observed instances of the scaled object.", + "selector": "label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors", +} + +func (ScaleStatus) SwaggerDoc() map[string]string { + return map_ScaleStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.conversion.go new file mode 100644 index 0000000000..923d793d89 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.conversion.go @@ -0,0 +1,449 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/client-go/pkg/api" + api_v1 "k8s.io/client-go/pkg/api/v1" + autoscaling "k8s.io/client-go/pkg/apis/autoscaling" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference, + Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference, + Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler, + Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler, + Convert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList, + Convert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList, + Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, + Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec, + Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus, + Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus, + Convert_v1_MetricSpec_To_autoscaling_MetricSpec, + Convert_autoscaling_MetricSpec_To_v1_MetricSpec, + Convert_v1_MetricStatus_To_autoscaling_MetricStatus, + Convert_autoscaling_MetricStatus_To_v1_MetricStatus, + Convert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource, + Convert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource, + Convert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus, + Convert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus, + Convert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource, + Convert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource, + Convert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus, + Convert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus, + Convert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource, + Convert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource, + Convert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus, + Convert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus, + Convert_v1_Scale_To_autoscaling_Scale, + Convert_autoscaling_Scale_To_v1_Scale, + Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec, + Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec, + Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus, + Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus, + ) +} + +func autoConvert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Name = in.Name + out.APIVersion = in.APIVersion + return nil +} + +func Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { + return autoConvert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in, out, s) +} + +func autoConvert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Name = in.Name + out.APIVersion = in.APIVersion + return nil +} + +func Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { + return autoConvert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in, out, s) +} + +func autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]autoscaling.HorizontalPodAutoscaler, len(*in)) + for i := range *in { + if err := Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { + return autoConvert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + if err := Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in, out, s) +} + +func autoConvert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) + out.MaxReplicas = in.MaxReplicas + // WARNING: in.TargetCPUUtilizationPercentage requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) + out.MaxReplicas = in.MaxReplicas + // WARNING: in.Metrics requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.LastScaleTime = (*meta_v1.Time)(unsafe.Pointer(in.LastScaleTime)) + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + // WARNING: in.CurrentCPUUtilizationPercentage requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.LastScaleTime = (*meta_v1.Time)(unsafe.Pointer(in.LastScaleTime)) + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + // WARNING: in.CurrentMetrics requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1_MetricSpec_To_autoscaling_MetricSpec(in *MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error { + out.Type = autoscaling.MetricSourceType(in.Type) + out.Object = (*autoscaling.ObjectMetricSource)(unsafe.Pointer(in.Object)) + out.Pods = (*autoscaling.PodsMetricSource)(unsafe.Pointer(in.Pods)) + out.Resource = (*autoscaling.ResourceMetricSource)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_v1_MetricSpec_To_autoscaling_MetricSpec(in *MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error { + return autoConvert_v1_MetricSpec_To_autoscaling_MetricSpec(in, out, s) +} + +func autoConvert_autoscaling_MetricSpec_To_v1_MetricSpec(in *autoscaling.MetricSpec, out *MetricSpec, s conversion.Scope) error { + out.Type = MetricSourceType(in.Type) + out.Object = (*ObjectMetricSource)(unsafe.Pointer(in.Object)) + out.Pods = (*PodsMetricSource)(unsafe.Pointer(in.Pods)) + out.Resource = (*ResourceMetricSource)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_autoscaling_MetricSpec_To_v1_MetricSpec(in *autoscaling.MetricSpec, out *MetricSpec, s conversion.Scope) error { + return autoConvert_autoscaling_MetricSpec_To_v1_MetricSpec(in, out, s) +} + +func autoConvert_v1_MetricStatus_To_autoscaling_MetricStatus(in *MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error { + out.Type = autoscaling.MetricSourceType(in.Type) + out.Object = (*autoscaling.ObjectMetricStatus)(unsafe.Pointer(in.Object)) + out.Pods = (*autoscaling.PodsMetricStatus)(unsafe.Pointer(in.Pods)) + out.Resource = (*autoscaling.ResourceMetricStatus)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_v1_MetricStatus_To_autoscaling_MetricStatus(in *MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error { + return autoConvert_v1_MetricStatus_To_autoscaling_MetricStatus(in, out, s) +} + +func autoConvert_autoscaling_MetricStatus_To_v1_MetricStatus(in *autoscaling.MetricStatus, out *MetricStatus, s conversion.Scope) error { + out.Type = MetricSourceType(in.Type) + out.Object = (*ObjectMetricStatus)(unsafe.Pointer(in.Object)) + out.Pods = (*PodsMetricStatus)(unsafe.Pointer(in.Pods)) + out.Resource = (*ResourceMetricStatus)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_autoscaling_MetricStatus_To_v1_MetricStatus(in *autoscaling.MetricStatus, out *MetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_MetricStatus_To_v1_MetricStatus(in, out, s) +} + +func autoConvert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error { + if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.TargetValue = in.TargetValue + return nil +} + +func Convert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error { + return autoConvert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in, out, s) +} + +func autoConvert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *ObjectMetricSource, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.TargetValue = in.TargetValue + return nil +} + +func Convert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *ObjectMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource(in, out, s) +} + +func autoConvert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error { + if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.CurrentValue = in.CurrentValue + return nil +} + +func Convert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error { + return autoConvert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *ObjectMetricStatus, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.CurrentValue = in.CurrentValue + return nil +} + +func Convert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *ObjectMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus(in, out, s) +} + +func autoConvert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { + out.MetricName = in.MetricName + out.TargetAverageValue = in.TargetAverageValue + return nil +} + +func Convert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { + return autoConvert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource(in, out, s) +} + +func autoConvert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *PodsMetricSource, s conversion.Scope) error { + out.MetricName = in.MetricName + out.TargetAverageValue = in.TargetAverageValue + return nil +} + +func Convert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *PodsMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource(in, out, s) +} + +func autoConvert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error { + out.MetricName = in.MetricName + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error { + return autoConvert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *PodsMetricStatus, s conversion.Scope) error { + out.MetricName = in.MetricName + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *PodsMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus(in, out, s) +} + +func autoConvert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { + out.Name = api.ResourceName(in.Name) + out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization)) + out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue)) + return nil +} + +func Convert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { + return autoConvert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in, out, s) +} + +func autoConvert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *ResourceMetricSource, s conversion.Scope) error { + out.Name = api_v1.ResourceName(in.Name) + out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization)) + out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue)) + return nil +} + +func Convert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *ResourceMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource(in, out, s) +} + +func autoConvert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { + out.Name = api.ResourceName(in.Name) + out.CurrentAverageUtilization = (*int32)(unsafe.Pointer(in.CurrentAverageUtilization)) + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { + return autoConvert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *ResourceMetricStatus, s conversion.Scope) error { + out.Name = api_v1.ResourceName(in.Name) + out.CurrentAverageUtilization = (*int32)(unsafe.Pointer(in.CurrentAverageUtilization)) + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *ResourceMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus(in, out, s) +} + +func autoConvert_v1_Scale_To_autoscaling_Scale(in *Scale, out *autoscaling.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Scale_To_autoscaling_Scale(in *Scale, out *autoscaling.Scale, s conversion.Scope) error { + return autoConvert_v1_Scale_To_autoscaling_Scale(in, out, s) +} + +func autoConvert_autoscaling_Scale_To_v1_Scale(in *autoscaling.Scale, out *Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_autoscaling_Scale_To_v1_Scale(in *autoscaling.Scale, out *Scale, s conversion.Scope) error { + return autoConvert_autoscaling_Scale_To_v1_Scale(in, out, s) +} + +func autoConvert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in *ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +func Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in *ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in, out, s) +} + +func autoConvert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in *autoscaling.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +func Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in *autoscaling.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { + return autoConvert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in, out, s) +} + +func autoConvert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in *ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = in.Selector + return nil +} + +func Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in *ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { + return autoConvert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in, out, s) +} + +func autoConvert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in *autoscaling.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = in.Selector + return nil +} + +func Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in *autoscaling.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { + return autoConvert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..07047c2b06 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go @@ -0,0 +1,312 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_CrossVersionObjectReference, InType: reflect.TypeOf(&CrossVersionObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HorizontalPodAutoscaler, InType: reflect.TypeOf(&HorizontalPodAutoscaler{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HorizontalPodAutoscalerList, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HorizontalPodAutoscalerSpec, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HorizontalPodAutoscalerStatus, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_MetricSpec, InType: reflect.TypeOf(&MetricSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_MetricStatus, InType: reflect.TypeOf(&MetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectMetricSource, InType: reflect.TypeOf(&ObjectMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectMetricStatus, InType: reflect.TypeOf(&ObjectMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodsMetricSource, InType: reflect.TypeOf(&PodsMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodsMetricStatus, InType: reflect.TypeOf(&PodsMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceMetricSource, InType: reflect.TypeOf(&ResourceMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceMetricStatus, InType: reflect.TypeOf(&ResourceMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Scale, InType: reflect.TypeOf(&Scale{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, + ) +} + +func DeepCopy_v1_CrossVersionObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CrossVersionObjectReference) + out := out.(*CrossVersionObjectReference) + *out = *in + return nil + } +} + +func DeepCopy_v1_HorizontalPodAutoscaler(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscaler) + out := out.(*HorizontalPodAutoscaler) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_HorizontalPodAutoscalerList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerList) + out := out.(*HorizontalPodAutoscalerList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + if err := DeepCopy_v1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_HorizontalPodAutoscalerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerSpec) + out := out.(*HorizontalPodAutoscalerSpec) + *out = *in + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = **in + } + if in.TargetCPUUtilizationPercentage != nil { + in, out := &in.TargetCPUUtilizationPercentage, &out.TargetCPUUtilizationPercentage + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_HorizontalPodAutoscalerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerStatus) + out := out.(*HorizontalPodAutoscalerStatus) + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.LastScaleTime != nil { + in, out := &in.LastScaleTime, &out.LastScaleTime + *out = new(meta_v1.Time) + **out = (*in).DeepCopy() + } + if in.CurrentCPUUtilizationPercentage != nil { + in, out := &in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_MetricSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*MetricSpec) + out := out.(*MetricSpec) + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricSource) + if err := DeepCopy_v1_ObjectMetricSource(*in, *out, c); err != nil { + return err + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricSource) + if err := DeepCopy_v1_PodsMetricSource(*in, *out, c); err != nil { + return err + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricSource) + if err := DeepCopy_v1_ResourceMetricSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_MetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*MetricStatus) + out := out.(*MetricStatus) + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricStatus) + if err := DeepCopy_v1_ObjectMetricStatus(*in, *out, c); err != nil { + return err + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricStatus) + if err := DeepCopy_v1_PodsMetricStatus(*in, *out, c); err != nil { + return err + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricStatus) + if err := DeepCopy_v1_ResourceMetricStatus(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_ObjectMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMetricSource) + out := out.(*ObjectMetricSource) + *out = *in + out.TargetValue = in.TargetValue.DeepCopy() + return nil + } +} + +func DeepCopy_v1_ObjectMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMetricStatus) + out := out.(*ObjectMetricStatus) + *out = *in + out.CurrentValue = in.CurrentValue.DeepCopy() + return nil + } +} + +func DeepCopy_v1_PodsMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodsMetricSource) + out := out.(*PodsMetricSource) + *out = *in + out.TargetAverageValue = in.TargetAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_v1_PodsMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodsMetricStatus) + out := out.(*PodsMetricStatus) + *out = *in + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_v1_ResourceMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceMetricSource) + out := out.(*ResourceMetricSource) + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + *out = new(int32) + **out = **in + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + *out = new(resource.Quantity) + **out = (*in).DeepCopy() + } + return nil + } +} + +func DeepCopy_v1_ResourceMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceMetricStatus) + out := out.(*ResourceMetricStatus) + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + *out = new(int32) + **out = **in + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_v1_Scale(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Scale) + out := out.(*Scale) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + return nil + } +} + +func DeepCopy_v1_ScaleSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleSpec) + out := out.(*ScaleSpec) + *out = *in + return nil + } +} + +func DeepCopy_v1_ScaleStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleStatus) + out := out.(*ScaleStatus) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.defaults.go new file mode 100644 index 0000000000..af20e98849 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.defaults.go @@ -0,0 +1,47 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&HorizontalPodAutoscaler{}, func(obj interface{}) { SetObjectDefaults_HorizontalPodAutoscaler(obj.(*HorizontalPodAutoscaler)) }) + scheme.AddTypeDefaultingFunc(&HorizontalPodAutoscalerList{}, func(obj interface{}) { + SetObjectDefaults_HorizontalPodAutoscalerList(obj.(*HorizontalPodAutoscalerList)) + }) + return nil +} + +func SetObjectDefaults_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler) { + SetDefaults_HorizontalPodAutoscaler(in) +} + +func SetObjectDefaults_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_HorizontalPodAutoscaler(a) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/defaults.go new file mode 100644 index 0000000000..6cc60d298a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/defaults.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/pkg/apis/autoscaling" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return scheme.AddDefaultingFuncs( + SetDefaults_HorizontalPodAutoscaler, + ) +} + +func SetDefaults_HorizontalPodAutoscaler(obj *HorizontalPodAutoscaler) { + if obj.Spec.MinReplicas == nil { + minReplicas := int32(1) + obj.Spec.MinReplicas = &minReplicas + } + + if len(obj.Spec.Metrics) == 0 { + utilizationDefaultVal := int32(autoscaling.DefaultCPUUtilization) + obj.Spec.Metrics = []MetricSpec{ + { + Type: ResourceMetricSourceType, + Resource: &ResourceMetricSource{ + Name: v1.ResourceCPU, + TargetAverageUtilization: &utilizationDefaultVal, + }, + }, + } + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/doc.go new file mode 100644 index 0000000000..a9fe60b1c7 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.pb.go new file mode 100644 index 0000000000..69e71edd99 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.pb.go @@ -0,0 +1,3062 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v2alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.proto + + It has these top-level messages: + CrossVersionObjectReference + HorizontalPodAutoscaler + HorizontalPodAutoscalerList + HorizontalPodAutoscalerSpec + HorizontalPodAutoscalerStatus + MetricSpec + MetricStatus + ObjectMetricSource + ObjectMetricStatus + PodsMetricSource + PodsMetricStatus + ResourceMetricSource + ResourceMetricStatus +*/ +package v2alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } +func (*CrossVersionObjectReference) ProtoMessage() {} +func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{0} +} + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } +func (*HorizontalPodAutoscalerList) ProtoMessage() {} +func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } +func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} +func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{3} +} + +func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } +func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} +func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{4} +} + +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func init() { + proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.CrossVersionObjectReference") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerStatus") + proto.RegisterType((*MetricSpec)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.MetricSpec") + proto.RegisterType((*MetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.MetricStatus") + proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.ObjectMetricSource") + proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.ObjectMetricStatus") + proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.PodsMetricSource") + proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.PodsMetricStatus") + proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.ResourceMetricSource") + proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.ResourceMetricStatus") +} +func (m *CrossVersionObjectReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CrossVersionObjectReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + return i, nil +} + +func (m *HorizontalPodAutoscaler) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscaler) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *HorizontalPodAutoscalerList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HorizontalPodAutoscalerSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ScaleTargetRef.Size())) + n5, err := m.ScaleTargetRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.MinReplicas != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.MinReplicas)) + } + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxReplicas)) + if len(m.Metrics) > 0 { + for _, msg := range m.Metrics { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HorizontalPodAutoscalerStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ObservedGeneration != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastScaleTime.Size())) + n6, err := m.LastScaleTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + } + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentReplicas)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DesiredReplicas)) + if len(m.CurrentMetrics) > 0 { + for _, msg := range m.CurrentMetrics { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MetricSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *MetricSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.Object != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) + n7, err := m.Object.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.Pods != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Pods.Size())) + n8, err := m.Pods.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.Resource != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Resource.Size())) + n9, err := m.Resource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} + +func (m *MetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *MetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.Object != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) + n10, err := m.Object.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.Pods != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Pods.Size())) + n11, err := m.Pods.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.Resource != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Resource.Size())) + n12, err := m.Resource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} + +func (m *ObjectMetricSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectMetricSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) + n13, err := m.Target.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetValue.Size())) + n14, err := m.TargetValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + return i, nil +} + +func (m *ObjectMetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectMetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) + n15, err := m.Target.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n15 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentValue.Size())) + n16, err := m.CurrentValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n16 + return i, nil +} + +func (m *PodsMetricSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodsMetricSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetAverageValue.Size())) + n17, err := m.TargetAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n17 + return i, nil +} + +func (m *PodsMetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodsMetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentAverageValue.Size())) + n18, err := m.CurrentAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n18 + return i, nil +} + +func (m *ResourceMetricSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceMetricSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if m.TargetAverageUtilization != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetAverageValue.Size())) + n19, err := m.TargetAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n19 + } + return i, nil +} + +func (m *ResourceMetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceMetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if m.CurrentAverageUtilization != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.CurrentAverageUtilization)) + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentAverageValue.Size())) + n20, err := m.CurrentAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n20 + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *CrossVersionObjectReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscaler) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + var l int + _ = l + l = m.ScaleTargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.MinReplicas != nil { + n += 1 + sovGenerated(uint64(*m.MinReplicas)) + } + n += 1 + sovGenerated(uint64(m.MaxReplicas)) + if len(m.Metrics) > 0 { + for _, e := range m.Metrics { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + l = m.LastScaleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.DesiredReplicas)) + if len(m.CurrentMetrics) > 0 { + for _, e := range m.CurrentMetrics { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MetricSpec) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ObjectMetricSource) Size() (n int) { + var l int + _ = l + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectMetricStatus) Size() (n int) { + var l int + _ = l + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodsMetricSource) Size() (n int) { + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodsMetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceMetricSource) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ResourceMetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.CurrentAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.CurrentAverageUtilization)) + } + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CrossVersionObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CrossVersionObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscaler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscaler{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, + `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, + `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, + `Metrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Metrics), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, + `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, + `CurrentMetrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentMetrics), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricStatus{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricSource{`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricStatus{`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricSource{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricStatus{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, HorizontalPodAutoscaler{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ScaleTargetRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MinReplicas = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) + } + m.MaxReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MaxReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metrics = append(m.Metrics, MetricSpec{}) + if err := m.Metrics[len(m.Metrics)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScaleTime == nil { + m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.LastScaleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.CurrentReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) + } + m.DesiredReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.DesiredReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentMetrics = append(m.CurrentMetrics, MetricStatus{}) + if err := m.CurrentMetrics[len(m.CurrentMetrics)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricSource{} + } + if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricSource{} + } + if err := m.Pods.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricSource{} + } + if err := m.Resource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricStatus{} + } + if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricStatus{} + } + if err := m.Pods.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricStatus{} + } + if err := m.Resource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_kubernetes_pkg_api_v1.ResourceName(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetAverageValue == nil { + m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + } + if err := m.TargetAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_kubernetes_pkg_api_v1.ResourceName(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 1208 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x57, 0x5b, 0x6f, 0x1b, 0x45, + 0x14, 0x8e, 0x2f, 0x49, 0xc3, 0x38, 0x37, 0x26, 0x55, 0xea, 0x26, 0xd4, 0x8e, 0xf6, 0xa9, 0x54, + 0xb0, 0x4b, 0x4c, 0x41, 0x54, 0x08, 0xa1, 0xd8, 0x5c, 0x5a, 0x11, 0xa7, 0x61, 0x1a, 0x2a, 0x04, + 0x48, 0x30, 0x59, 0x4f, 0x9c, 0x21, 0xde, 0x8b, 0x76, 0x66, 0xad, 0x26, 0x52, 0x25, 0x7e, 0x00, + 0x0f, 0xbc, 0xf0, 0x13, 0x90, 0xf8, 0x07, 0x3c, 0x83, 0x84, 0x94, 0xc7, 0xf2, 0xc6, 0x93, 0x45, + 0xdc, 0x37, 0x7e, 0x42, 0x25, 0x2e, 0xda, 0x99, 0xf1, 0x5e, 0xbc, 0x5e, 0x13, 0x87, 0xb4, 0x82, + 0x37, 0xef, 0xcc, 0x39, 0xdf, 0x77, 0xce, 0xf9, 0xce, 0x9c, 0x19, 0x83, 0xb7, 0x0f, 0xdf, 0x60, + 0x3a, 0x75, 0x8c, 0x43, 0x7f, 0x8f, 0x78, 0x36, 0xe1, 0x84, 0x19, 0xee, 0x61, 0xdb, 0xc0, 0x2e, + 0x65, 0x06, 0xf6, 0xb9, 0xc3, 0x4c, 0xdc, 0xa1, 0x76, 0xdb, 0xe8, 0xd6, 0x70, 0xc7, 0x3d, 0xc0, + 0x1b, 0x46, 0x9b, 0xd8, 0xc4, 0xc3, 0x9c, 0xb4, 0x74, 0xd7, 0x73, 0xb8, 0x03, 0x0d, 0x09, 0xa0, + 0x47, 0x00, 0xba, 0x7b, 0xd8, 0xd6, 0x03, 0x00, 0x3d, 0x06, 0xa0, 0x0f, 0x00, 0x56, 0x5f, 0x6e, + 0x53, 0x7e, 0xe0, 0xef, 0xe9, 0xa6, 0x63, 0x19, 0x6d, 0xa7, 0xed, 0x18, 0x02, 0x67, 0xcf, 0xdf, + 0x17, 0x5f, 0xe2, 0x43, 0xfc, 0x92, 0xf8, 0xab, 0x37, 0x55, 0x80, 0xd8, 0xa5, 0x16, 0x36, 0x0f, + 0xa8, 0x4d, 0xbc, 0xa3, 0x41, 0x88, 0x86, 0x47, 0x98, 0xe3, 0x7b, 0x26, 0x19, 0x8e, 0x6a, 0xac, + 0x17, 0x33, 0x2c, 0xc2, 0xb1, 0xd1, 0x4d, 0xe5, 0xb2, 0x6a, 0x64, 0x79, 0x79, 0xbe, 0xcd, 0xa9, + 0x95, 0xa6, 0x79, 0xfd, 0x9f, 0x1c, 0x98, 0x79, 0x40, 0x2c, 0x9c, 0xf2, 0x7b, 0x35, 0xcb, 0xcf, + 0xe7, 0xb4, 0x63, 0x50, 0x9b, 0x33, 0xee, 0x8d, 0xcb, 0x89, 0x11, 0xaf, 0x4b, 0xbc, 0x28, 0x21, + 0xf2, 0x00, 0x5b, 0x6e, 0x87, 0x8c, 0xca, 0xe9, 0xa5, 0x4c, 0x81, 0x47, 0x59, 0xdf, 0x3a, 0x6b, + 0x3b, 0xa4, 0x5c, 0xb5, 0x6f, 0x73, 0x60, 0xad, 0xe1, 0x39, 0x8c, 0xdd, 0x27, 0x1e, 0xa3, 0x8e, + 0x7d, 0x77, 0xef, 0x4b, 0x62, 0x72, 0x44, 0xf6, 0x89, 0x47, 0x6c, 0x93, 0xc0, 0x75, 0x50, 0x3c, + 0xa4, 0x76, 0xab, 0x9c, 0x5b, 0xcf, 0x5d, 0x7f, 0xae, 0x3e, 0x77, 0xd2, 0xab, 0x4e, 0xf5, 0x7b, + 0xd5, 0xe2, 0x07, 0xd4, 0x6e, 0x21, 0xb1, 0x13, 0x58, 0xd8, 0xd8, 0x22, 0xe5, 0x7c, 0xd2, 0x62, + 0x1b, 0x5b, 0x04, 0x89, 0x1d, 0x58, 0x03, 0x00, 0xbb, 0x54, 0x11, 0x94, 0x0b, 0xc2, 0x0e, 0x2a, + 0x3b, 0xb0, 0xb9, 0x73, 0x47, 0xed, 0xa0, 0x98, 0x95, 0xf6, 0x38, 0x0f, 0xae, 0xdc, 0x76, 0x3c, + 0x7a, 0xec, 0xd8, 0x1c, 0x77, 0x76, 0x9c, 0xd6, 0xa6, 0xca, 0x83, 0x78, 0xf0, 0x0b, 0x30, 0x1b, + 0xf4, 0x42, 0x0b, 0x73, 0x2c, 0xe2, 0x2a, 0xd5, 0x5e, 0xd1, 0x55, 0x3f, 0xc7, 0xa5, 0x89, 0x3a, + 0x3a, 0xb0, 0xd6, 0xbb, 0x1b, 0xba, 0x4c, 0xae, 0x49, 0x38, 0x8e, 0xf8, 0xa3, 0x35, 0x14, 0xa2, + 0x42, 0x1b, 0x14, 0x99, 0x4b, 0x4c, 0x91, 0x53, 0xa9, 0xb6, 0xa5, 0x4f, 0x78, 0x5a, 0xf4, 0x8c, + 0xc8, 0xef, 0xb9, 0xc4, 0x8c, 0x2a, 0x14, 0x7c, 0x21, 0xc1, 0x03, 0xbb, 0x60, 0x86, 0x71, 0xcc, + 0x7d, 0x26, 0xaa, 0x53, 0xaa, 0x6d, 0x5f, 0x18, 0xa3, 0x40, 0xad, 0x2f, 0x28, 0xce, 0x19, 0xf9, + 0x8d, 0x14, 0x9b, 0xf6, 0x7b, 0x0e, 0xac, 0x65, 0x78, 0x6e, 0x51, 0xc6, 0xe1, 0x67, 0xa9, 0x4a, + 0xeb, 0x67, 0xab, 0x74, 0xe0, 0x2d, 0xea, 0xbc, 0xa4, 0x98, 0x67, 0x07, 0x2b, 0xb1, 0x2a, 0x5b, + 0x60, 0x9a, 0x72, 0x62, 0xb1, 0x72, 0x7e, 0xbd, 0x70, 0xbd, 0x54, 0xbb, 0x7d, 0x51, 0x49, 0xd7, + 0xe7, 0x15, 0xe9, 0xf4, 0x9d, 0x00, 0x1e, 0x49, 0x16, 0xed, 0xcf, 0x7c, 0x66, 0xb2, 0x81, 0x14, + 0xf0, 0xeb, 0x1c, 0x58, 0x10, 0x9f, 0xbb, 0xd8, 0x6b, 0x93, 0xe0, 0x0c, 0xa8, 0x9c, 0x27, 0xd7, + 0x7f, 0xcc, 0x89, 0xaa, 0xaf, 0xa8, 0xe0, 0x16, 0xee, 0x25, 0xb8, 0xd0, 0x10, 0x37, 0xdc, 0x00, + 0x25, 0x8b, 0xda, 0x88, 0xb8, 0x1d, 0x6a, 0x62, 0x26, 0x5a, 0x71, 0xba, 0xbe, 0xd8, 0xef, 0x55, + 0x4b, 0xcd, 0x68, 0x19, 0xc5, 0x6d, 0xe0, 0x6b, 0xa0, 0x64, 0xe1, 0x07, 0xa1, 0x4b, 0x41, 0xb8, + 0x2c, 0x2b, 0xbe, 0x52, 0x33, 0xda, 0x42, 0x71, 0x3b, 0xb8, 0x0f, 0x2e, 0x59, 0x84, 0x7b, 0xd4, + 0x64, 0xe5, 0xa2, 0x50, 0xe2, 0xcd, 0x89, 0x13, 0x6e, 0x0a, 0x7f, 0xd1, 0xdf, 0x8b, 0x8a, 0xef, + 0x92, 0x5c, 0x63, 0x68, 0x00, 0xae, 0xfd, 0x52, 0x00, 0xd7, 0xc6, 0xf6, 0x29, 0x7c, 0x0f, 0x40, + 0x67, 0x4f, 0x8c, 0xc9, 0xd6, 0xfb, 0x72, 0x50, 0x05, 0x13, 0x23, 0x50, 0xa1, 0x50, 0x5f, 0xe9, + 0xf7, 0xaa, 0xf0, 0x6e, 0x6a, 0x17, 0x8d, 0xf0, 0x80, 0x26, 0x98, 0xef, 0x60, 0xc6, 0x65, 0x85, + 0xa9, 0x1a, 0x4e, 0xa5, 0xda, 0x8d, 0xb3, 0x35, 0x6f, 0xe0, 0x51, 0x7f, 0xbe, 0xdf, 0xab, 0xce, + 0x6f, 0xc5, 0x41, 0x50, 0x12, 0x13, 0x6e, 0x82, 0x45, 0xd3, 0xf7, 0x3c, 0x62, 0xf3, 0xa1, 0x8a, + 0x5f, 0x51, 0x15, 0x58, 0x6c, 0x24, 0xb7, 0xd1, 0xb0, 0x7d, 0x00, 0xd1, 0x22, 0x8c, 0x7a, 0xa4, + 0x15, 0x42, 0x14, 0x93, 0x10, 0xef, 0x24, 0xb7, 0xd1, 0xb0, 0x3d, 0x7c, 0x08, 0x16, 0x14, 0xaa, + 0xaa, 0x77, 0x79, 0x5a, 0x68, 0xf8, 0xd6, 0x79, 0x35, 0x94, 0x13, 0x23, 0xec, 0xd2, 0x46, 0x02, + 0x1c, 0x0d, 0x91, 0x69, 0x7f, 0xe4, 0x01, 0x88, 0xc4, 0x87, 0x37, 0x41, 0x91, 0x1f, 0xb9, 0x44, + 0x5d, 0x17, 0xeb, 0x83, 0x51, 0xb7, 0x7b, 0xe4, 0x92, 0x27, 0xbd, 0xea, 0x92, 0xb2, 0x14, 0xb7, + 0x7f, 0xb0, 0x86, 0x84, 0x35, 0x6c, 0x83, 0x19, 0x47, 0x9c, 0x12, 0xa5, 0x53, 0x63, 0xe2, 0xd8, + 0xc3, 0x29, 0x1e, 0xc2, 0xd7, 0x41, 0x30, 0xef, 0xd4, 0xe1, 0x53, 0xf0, 0xf0, 0x73, 0x50, 0x74, + 0x9d, 0xd6, 0x60, 0xca, 0x6e, 0x4e, 0x4c, 0xb3, 0xe3, 0xb4, 0x58, 0x82, 0x64, 0x36, 0xc8, 0x2e, + 0x58, 0x45, 0x02, 0x18, 0x3a, 0x60, 0x76, 0xf0, 0xba, 0x11, 0x4a, 0x96, 0x6a, 0xef, 0x4e, 0x4c, + 0x82, 0x14, 0x40, 0x82, 0x68, 0x2e, 0x98, 0xa1, 0x83, 0x1d, 0x14, 0x92, 0x68, 0x7f, 0xe5, 0xc1, + 0x5c, 0x5c, 0xb8, 0xff, 0x86, 0x02, 0xb2, 0x87, 0x9e, 0xb2, 0x02, 0x92, 0xe4, 0x19, 0x28, 0x20, + 0x89, 0xb2, 0x14, 0xf8, 0x2e, 0x0f, 0x60, 0xba, 0xfd, 0x20, 0x07, 0x33, 0x5c, 0xcc, 0xf2, 0xa7, + 0x72, 0x89, 0x84, 0x17, 0xba, 0xba, 0x2f, 0x14, 0x57, 0xf0, 0xd4, 0x92, 0xd3, 0x76, 0x3b, 0x7a, + 0x92, 0x85, 0x4f, 0x9d, 0x66, 0xb8, 0x83, 0x62, 0x56, 0x90, 0x80, 0x92, 0xf4, 0xbe, 0x8f, 0x3b, + 0x3e, 0x51, 0xca, 0x8c, 0xbd, 0xe7, 0xf5, 0x41, 0xf2, 0xfa, 0x87, 0x3e, 0xb6, 0x39, 0xe5, 0x47, + 0xd1, 0x2d, 0xb3, 0x1b, 0x41, 0xa1, 0x38, 0xae, 0xf6, 0xfd, 0x70, 0x9d, 0x64, 0xbf, 0xfe, 0x7f, + 0xea, 0x74, 0x00, 0xe6, 0xd4, 0xf0, 0xfb, 0x37, 0x85, 0xba, 0xac, 0x58, 0xe6, 0x1a, 0x31, 0x2c, + 0x94, 0x40, 0xd6, 0x7e, 0xca, 0x81, 0xa5, 0xe1, 0x51, 0x33, 0x14, 0x72, 0xee, 0x4c, 0x21, 0x1f, + 0x03, 0x28, 0x13, 0xde, 0xec, 0x12, 0x0f, 0xb7, 0x89, 0x0c, 0x3c, 0x7f, 0xae, 0xc0, 0x57, 0x15, + 0x17, 0xdc, 0x4d, 0x21, 0xa2, 0x11, 0x2c, 0xda, 0xcf, 0xc9, 0x24, 0xa4, 0xda, 0xe7, 0x49, 0xe2, + 0x21, 0x58, 0x56, 0xd5, 0xb9, 0x80, 0x2c, 0xd6, 0x14, 0xd9, 0x72, 0x23, 0x0d, 0x89, 0x46, 0xf1, + 0x68, 0x3f, 0xe4, 0xc1, 0xe5, 0x51, 0x23, 0x19, 0x36, 0xd5, 0x1f, 0x1f, 0x99, 0xc5, 0xad, 0xf8, + 0x1f, 0x9f, 0x27, 0xbd, 0xea, 0x8b, 0xe3, 0xfe, 0xc1, 0x85, 0x13, 0x26, 0xf6, 0x2f, 0xe9, 0x63, + 0x50, 0x4e, 0x54, 0xf1, 0x23, 0x4e, 0x3b, 0xf4, 0x58, 0xbe, 0x80, 0xe4, 0xe3, 0xef, 0x85, 0x7e, + 0xaf, 0x5a, 0xde, 0xcd, 0xb0, 0x41, 0x99, 0xde, 0xb0, 0x3b, 0xb2, 0x0b, 0xce, 0xd7, 0xbe, 0x2b, + 0x13, 0x74, 0xc0, 0x8f, 0xe9, 0xca, 0xc9, 0x2e, 0xb8, 0xe0, 0xca, 0x7d, 0x0a, 0xae, 0x26, 0x85, + 0x4b, 0x97, 0xee, 0x5a, 0xbf, 0x57, 0xbd, 0xda, 0xc8, 0x32, 0x42, 0xd9, 0xfe, 0x59, 0xdd, 0x57, + 0x78, 0x36, 0xdd, 0x57, 0xbf, 0x71, 0x72, 0x5a, 0x99, 0x7a, 0x74, 0x5a, 0x99, 0xfa, 0xf5, 0xb4, + 0x32, 0xf5, 0x55, 0xbf, 0x92, 0x3b, 0xe9, 0x57, 0x72, 0x8f, 0xfa, 0x95, 0xdc, 0x6f, 0xfd, 0x4a, + 0xee, 0x9b, 0xc7, 0x95, 0xa9, 0x4f, 0x66, 0x07, 0x83, 0xf0, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x95, 0xf2, 0xec, 0x8a, 0x16, 0x12, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.proto new file mode 100644 index 0000000000..27b99c2242 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.proto @@ -0,0 +1,275 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.autoscaling.v2alpha1; + +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v2alpha1"; + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +message CrossVersionObjectReference { + // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + optional string kind = 1; + + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + optional string name = 2; + + // API version of the referent + // +optional + optional string apiVersion = 3; +} + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +message HorizontalPodAutoscaler { + // metadata is the standard object metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the specification for the behaviour of the autoscaler. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + optional HorizontalPodAutoscalerSpec spec = 2; + + // status is the current information about the autoscaler. + // +optional + optional HorizontalPodAutoscalerStatus status = 3; +} + +// HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects. +message HorizontalPodAutoscalerList { + // metadata is the standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of horizontal pod autoscaler objects. + repeated HorizontalPodAutoscaler items = 2; +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +message HorizontalPodAutoscalerSpec { + // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + optional CrossVersionObjectReference scaleTargetRef = 1; + + // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. + // It defaults to 1 pod. + // +optional + optional int32 minReplicas = 2; + + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + optional int32 maxReplicas = 3; + + // metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // +optional + repeated MetricSpec metrics = 4; +} + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +message HorizontalPodAutoscalerStatus { + // observedGeneration is the most recent generation observed by this autoscaler. + // +optional + optional int64 observedGeneration = 1; + + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + + // currentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + optional int32 currentReplicas = 3; + + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + optional int32 desiredReplicas = 4; + + // currentMetrics is the last read state of the metrics used by this autoscaler. + repeated MetricStatus currentMetrics = 5; +} + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +message MetricSpec { + // type is the type of metric source. It should match one of the fields below. + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricSource object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricSource pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricSource resource = 4; +} + +// MetricStatus describes the last-read state of a single metric. +message MetricStatus { + // type is the type of metric source. It will match one of the fields below. + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricStatus object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricStatus pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricStatus resource = 4; +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricSource { + // target is the described Kubernetes object. + optional CrossVersionObjectReference target = 1; + + // metricName is the name of the metric in question. + optional string metricName = 2; + + // targetValue is the target value of the metric (as a quantity). + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricStatus { + // target is the described Kubernetes object. + optional CrossVersionObjectReference target = 1; + + // metricName is the name of the metric in question. + optional string metricName = 2; + + // currentValue is the current value of the metric (as a quantity). + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +message PodsMetricSource { + // metricName is the name of the metric in question + optional string metricName = 1; + + // targetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +message PodsMetricStatus { + // metricName is the name of the metric in question + optional string metricName = 1; + + // currentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + optional int32 targetAverageUtilization = 2; + + // targetAverageValue is the the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ResourceMetricStatus { + // name is the name of the resource in question. + optional string name = 1; + + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + optional int32 currentAverageUtilization = 2; + + // currentAverageValue is the the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/register.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/register.go new file mode 100644 index 0000000000..2fc437f939 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/register.go @@ -0,0 +1,44 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "autoscaling" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2alpha1"} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &HorizontalPodAutoscaler{}, + &HorizontalPodAutoscalerList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.generated.go new file mode 100644 index 0000000000..9eb6919a36 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.generated.go @@ -0,0 +1,4621 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v2alpha1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_resource "k8s.io/apimachinery/pkg/api/resource" + pkg3_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg4_types "k8s.io/apimachinery/pkg/types" + pkg2_v1 "k8s.io/client-go/pkg/api/v1" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_resource.Quantity + var v1 pkg3_v1.Time + var v2 pkg4_types.UID + var v3 pkg2_v1.ResourceName + var v4 time.Time + _, _, _, _, _ = v0, v1, v2, v3, v4 + } +} + +func (x *CrossVersionObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CrossVersionObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CrossVersionObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv6 := &x.Name + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv8 := &x.APIVersion + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CrossVersionObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv13 := &x.Name + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.MinReplicas != nil + yyq2[3] = len(x.Metrics) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.ScaleTargetRef + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleTargetRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ScaleTargetRef + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.MinReplicas == nil { + r.EncodeNil() + } else { + yy9 := *x.MinReplicas + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MinReplicas == nil { + r.EncodeNil() + } else { + yy11 := *x.MinReplicas + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeInt(int64(yy11)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.MaxReplicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeInt(int64(x.MaxReplicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Metrics == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSliceMetricSpec(([]MetricSpec)(x.Metrics), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metrics")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Metrics == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSliceMetricSpec(([]MetricSpec)(x.Metrics), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "scaleTargetRef": + if r.TryDecodeAsNil() { + x.ScaleTargetRef = CrossVersionObjectReference{} + } else { + yyv4 := &x.ScaleTargetRef + yyv4.CodecDecodeSelf(d) + } + case "minReplicas": + if r.TryDecodeAsNil() { + if x.MinReplicas != nil { + x.MinReplicas = nil + } + } else { + if x.MinReplicas == nil { + x.MinReplicas = new(int32) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) + } + } + case "maxReplicas": + if r.TryDecodeAsNil() { + x.MaxReplicas = 0 + } else { + yyv7 := &x.MaxReplicas + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + case "metrics": + if r.TryDecodeAsNil() { + x.Metrics = nil + } else { + yyv9 := &x.Metrics + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceMetricSpec((*[]MetricSpec)(yyv9), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ScaleTargetRef = CrossVersionObjectReference{} + } else { + yyv12 := &x.ScaleTargetRef + yyv12.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.MinReplicas != nil { + x.MinReplicas = nil + } + } else { + if x.MinReplicas == nil { + x.MinReplicas = new(int32) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MaxReplicas = 0 + } else { + yyv15 := &x.MaxReplicas + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(yyv15)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Metrics = nil + } else { + yyv17 := &x.Metrics + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + h.decSliceMetricSpec((*[]MetricSpec)(yyv17), d) + } + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x MetricSourceType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *MetricSourceType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *MetricSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Object != nil + yyq2[2] = x.Pods != nil + yyq2[3] = x.Resource != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("object")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *MetricSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *MetricSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "object": + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricSource) + } + x.Object.CodecDecodeSelf(d) + } + case "pods": + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricSource) + } + x.Pods.CodecDecodeSelf(d) + } + case "resource": + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricSource) + } + x.Resource.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *MetricSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv9 := &x.Type + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricSource) + } + x.Object.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricSource) + } + x.Pods.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricSource) + } + x.Resource.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ObjectMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.Target + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("target")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.Target + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.TargetValue + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.TargetValue + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(yy14) { + } else if !yym15 && z.IsJSONHandle() { + z.EncJSONMarshal(yy14) + } else { + z.EncFallback(yy14) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "target": + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv4 := &x.Target + yyv4.CodecDecodeSelf(d) + } + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv5 := &x.MetricName + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "targetValue": + if r.TryDecodeAsNil() { + x.TargetValue = pkg1_resource.Quantity{} + } else { + yyv7 := &x.TargetValue + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv10 := &x.Target + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv11 := &x.MetricName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetValue = pkg1_resource.Quantity{} + } else { + yyv13 := &x.TargetValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodsMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.TargetAverageValue + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.TargetAverageValue + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodsMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodsMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv4 := &x.MetricName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "targetAverageValue": + if r.TryDecodeAsNil() { + x.TargetAverageValue = pkg1_resource.Quantity{} + } else { + yyv6 := &x.TargetAverageValue + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodsMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv9 := &x.MetricName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetAverageValue = pkg1_resource.Quantity{} + } else { + yyv11 := &x.TargetAverageValue + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.TargetAverageUtilization != nil + yyq2[2] = x.TargetAverageValue != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf4 := &x.Name + yysf4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf5 := &x.Name + yysf5.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.TargetAverageUtilization == nil { + r.EncodeNil() + } else { + yy7 := *x.TargetAverageUtilization + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetAverageUtilization")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetAverageUtilization == nil { + r.EncodeNil() + } else { + yy9 := *x.TargetAverageUtilization + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.TargetAverageValue == nil { + r.EncodeNil() + } else { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.EncExt(x.TargetAverageValue) { + } else if !yym12 && z.IsJSONHandle() { + z.EncJSONMarshal(x.TargetAverageValue) + } else { + z.EncFallback(x.TargetAverageValue) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetAverageValue == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.TargetAverageValue) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(x.TargetAverageValue) + } else { + z.EncFallback(x.TargetAverageValue) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yyv4.CodecDecodeSelf(d) + } + case "targetAverageUtilization": + if r.TryDecodeAsNil() { + if x.TargetAverageUtilization != nil { + x.TargetAverageUtilization = nil + } + } else { + if x.TargetAverageUtilization == nil { + x.TargetAverageUtilization = new(int32) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int32)(x.TargetAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + case "targetAverageValue": + if r.TryDecodeAsNil() { + if x.TargetAverageValue != nil { + x.TargetAverageValue = nil + } + } else { + if x.TargetAverageValue == nil { + x.TargetAverageValue = new(pkg1_resource.Quantity) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(x.TargetAverageValue) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.TargetAverageValue) + } else { + z.DecFallback(x.TargetAverageValue, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv10 := &x.Name + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TargetAverageUtilization != nil { + x.TargetAverageUtilization = nil + } + } else { + if x.TargetAverageUtilization == nil { + x.TargetAverageUtilization = new(int32) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(x.TargetAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TargetAverageValue != nil { + x.TargetAverageValue = nil + } + } else { + if x.TargetAverageValue == nil { + x.TargetAverageValue = new(pkg1_resource.Quantity) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(x.TargetAverageValue) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.TargetAverageValue) + } else { + z.DecFallback(x.TargetAverageValue, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != nil + yyq2[1] = x.LastScaleTime != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy4 := *x.ObservedGeneration + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy6 := *x.ObservedGeneration + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.LastScaleTime == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { + } else if yym9 { + z.EncBinaryMarshal(x.LastScaleTime) + } else if !yym9 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScaleTime) + } else { + z.EncFallback(x.LastScaleTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastScaleTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LastScaleTime == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { + } else if yym10 { + z.EncBinaryMarshal(x.LastScaleTime) + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScaleTime) + } else { + z.EncFallback(x.LastScaleTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeInt(int64(x.CurrentReplicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.CurrentReplicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeInt(int64(x.DesiredReplicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("desiredReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.DesiredReplicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.CurrentMetrics == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSliceMetricStatus(([]MetricStatus)(x.CurrentMetrics), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentMetrics")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CurrentMetrics == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceMetricStatus(([]MetricStatus)(x.CurrentMetrics), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "observedGeneration": + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + case "lastScaleTime": + if r.TryDecodeAsNil() { + if x.LastScaleTime != nil { + x.LastScaleTime = nil + } + } else { + if x.LastScaleTime == nil { + x.LastScaleTime = new(pkg3_v1.Time) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { + } else if yym7 { + z.DecBinaryUnmarshal(x.LastScaleTime) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScaleTime) + } else { + z.DecFallback(x.LastScaleTime, false) + } + } + case "currentReplicas": + if r.TryDecodeAsNil() { + x.CurrentReplicas = 0 + } else { + yyv8 := &x.CurrentReplicas + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "desiredReplicas": + if r.TryDecodeAsNil() { + x.DesiredReplicas = 0 + } else { + yyv10 := &x.DesiredReplicas + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "currentMetrics": + if r.TryDecodeAsNil() { + x.CurrentMetrics = nil + } else { + yyv12 := &x.CurrentMetrics + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + h.decSliceMetricStatus((*[]MetricStatus)(yyv12), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LastScaleTime != nil { + x.LastScaleTime = nil + } + } else { + if x.LastScaleTime == nil { + x.LastScaleTime = new(pkg3_v1.Time) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { + } else if yym18 { + z.DecBinaryUnmarshal(x.LastScaleTime) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScaleTime) + } else { + z.DecFallback(x.LastScaleTime, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentReplicas = 0 + } else { + yyv19 := &x.CurrentReplicas + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int32)(yyv19)) = int32(r.DecodeInt(32)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DesiredReplicas = 0 + } else { + yyv21 := &x.DesiredReplicas + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentMetrics = nil + } else { + yyv23 := &x.CurrentMetrics + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + h.decSliceMetricStatus((*[]MetricStatus)(yyv23), d) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *MetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Object != nil + yyq2[2] = x.Pods != nil + yyq2[3] = x.Resource != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("object")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *MetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *MetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "object": + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricStatus) + } + x.Object.CodecDecodeSelf(d) + } + case "pods": + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricStatus) + } + x.Pods.CodecDecodeSelf(d) + } + case "resource": + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricStatus) + } + x.Resource.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *MetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv9 := &x.Type + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricStatus) + } + x.Object.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricStatus) + } + x.Pods.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricStatus) + } + x.Resource.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ObjectMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.Target + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("target")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.Target + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.CurrentValue + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.CurrentValue + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(yy14) { + } else if !yym15 && z.IsJSONHandle() { + z.EncJSONMarshal(yy14) + } else { + z.EncFallback(yy14) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "target": + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv4 := &x.Target + yyv4.CodecDecodeSelf(d) + } + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv5 := &x.MetricName + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "currentValue": + if r.TryDecodeAsNil() { + x.CurrentValue = pkg1_resource.Quantity{} + } else { + yyv7 := &x.CurrentValue + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv10 := &x.Target + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv11 := &x.MetricName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentValue = pkg1_resource.Quantity{} + } else { + yyv13 := &x.CurrentValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodsMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.CurrentAverageValue + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.CurrentAverageValue + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodsMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodsMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv4 := &x.MetricName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "currentAverageValue": + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg1_resource.Quantity{} + } else { + yyv6 := &x.CurrentAverageValue + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodsMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv9 := &x.MetricName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg1_resource.Quantity{} + } else { + yyv11 := &x.CurrentAverageValue + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.CurrentAverageUtilization != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf4 := &x.Name + yysf4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf5 := &x.Name + yysf5.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.CurrentAverageUtilization == nil { + r.EncodeNil() + } else { + yy7 := *x.CurrentAverageUtilization + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentAverageUtilization")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CurrentAverageUtilization == nil { + r.EncodeNil() + } else { + yy9 := *x.CurrentAverageUtilization + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.CurrentAverageValue + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.CurrentAverageValue + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(yy14) { + } else if !yym15 && z.IsJSONHandle() { + z.EncJSONMarshal(yy14) + } else { + z.EncFallback(yy14) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yyv4.CodecDecodeSelf(d) + } + case "currentAverageUtilization": + if r.TryDecodeAsNil() { + if x.CurrentAverageUtilization != nil { + x.CurrentAverageUtilization = nil + } + } else { + if x.CurrentAverageUtilization == nil { + x.CurrentAverageUtilization = new(int32) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int32)(x.CurrentAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + case "currentAverageValue": + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg1_resource.Quantity{} + } else { + yyv7 := &x.CurrentAverageValue + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv10 := &x.Name + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CurrentAverageUtilization != nil { + x.CurrentAverageUtilization = nil + } + } else { + if x.CurrentAverageUtilization == nil { + x.CurrentAverageUtilization = new(int32) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(x.CurrentAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg1_resource.Quantity{} + } else { + yyv13 := &x.CurrentAverageValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg3_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = HorizontalPodAutoscalerSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = HorizontalPodAutoscalerStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg3_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = HorizontalPodAutoscalerSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = HorizontalPodAutoscalerStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg3_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg3_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceMetricSpec(v []MetricSpec, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceMetricSpec(v *[]MetricSpec, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []MetricSpec{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]MetricSpec, yyrl1) + } + } else { + yyv1 = make([]MetricSpec, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = MetricSpec{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, MetricSpec{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = MetricSpec{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, MetricSpec{}) // var yyz1 MetricSpec + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = MetricSpec{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []MetricSpec{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceMetricStatus(v []MetricStatus, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceMetricStatus(v *[]MetricStatus, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []MetricStatus{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]MetricStatus, yyrl1) + } + } else { + yyv1 = make([]MetricStatus, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = MetricStatus{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, MetricStatus{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = MetricStatus{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, MetricStatus{}) // var yyz1 MetricStatus + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = MetricStatus{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []MetricStatus{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []HorizontalPodAutoscaler{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 392) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]HorizontalPodAutoscaler, yyrl1) + } + } else { + yyv1 = make([]HorizontalPodAutoscaler, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, HorizontalPodAutoscaler{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, HorizontalPodAutoscaler{}) // var yyz1 HorizontalPodAutoscaler + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []HorizontalPodAutoscaler{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.go new file mode 100644 index 0000000000..4df8ae4142 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.go @@ -0,0 +1,269 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api/v1" +) + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +type CrossVersionObjectReference struct { + // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + // API version of the referent + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +type HorizontalPodAutoscalerSpec struct { + // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` + // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. + // It defaults to 1 pod. + // +optional + MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // +optional + Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"` +} + +// MetricSourceType indicates the type of metric. +type MetricSourceType string + +var ( + // ObjectMetricSourceType is a metric describing a kubernetes object + // (for example, hits-per-second on an Ingress object). + ObjectMetricSourceType MetricSourceType = "Object" + // PodsMetricSourceType is a metric describing each pod in the current scale + // target (for example, transactions-processed-per-second). The values + // will be averaged together before being compared to the target value. + PodsMetricSourceType MetricSourceType = "Pods" + // ResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ResourceMetricSourceType MetricSourceType = "Resource" +) + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +type MetricSpec struct { + // type is the type of metric source. It should match one of the fields below. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricSource struct { + // target is the described Kubernetes object. + Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` + + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // targetValue is the target value of the metric (as a quantity). + TargetValue resource.Quantity `json:"targetValue" protobuf:"bytes,3,name=targetValue"` +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +type PodsMetricSource struct { + // metricName is the name of the metric in question + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // targetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + TargetAverageValue resource.Quantity `json:"targetAverageValue" protobuf:"bytes,2,name=targetAverageValue"` +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` +} + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +type HorizontalPodAutoscalerStatus struct { + // observedGeneration is the most recent generation observed by this autoscaler. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` + + // currentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` + + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` + + // currentMetrics is the last read state of the metrics used by this autoscaler. + CurrentMetrics []MetricStatus `json:"currentMetrics" protobuf:"bytes,5,rep,name=currentMetrics"` +} + +// MetricStatus describes the last-read state of a single metric. +type MetricStatus struct { + // type is the type of metric source. It will match one of the fields below. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricStatus struct { + // target is the described Kubernetes object. + Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` + + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // currentValue is the current value of the metric (as a quantity). + CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +type PodsMetricStatus struct { + // metricName is the name of the metric in question + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // currentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,2,name=currentAverageValue"` +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ResourceMetricStatus struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` +} + +// +genclient=true + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +type HorizontalPodAutoscaler struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard object metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the specification for the behaviour of the autoscaler. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // status is the current information about the autoscaler. + // +optional + Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects. +type HorizontalPodAutoscalerList struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of horizontal pod autoscaler objects. + Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..6030e2dc33 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types_swagger_doc_generated.go @@ -0,0 +1,175 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_CrossVersionObjectReference = map[string]string{ + "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", + "kind": "Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds\"", + "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "apiVersion": "API version of the referent", +} + +func (CrossVersionObjectReference) SwaggerDoc() map[string]string { + return map_CrossVersionObjectReference +} + +var map_HorizontalPodAutoscaler = map[string]string{ + "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", + "metadata": "metadata is the standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "spec is the specification for the behaviour of the autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "status": "status is the current information about the autoscaler.", +} + +func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscaler +} + +var map_HorizontalPodAutoscalerList = map[string]string{ + "": "HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects.", + "metadata": "metadata is the standard list metadata.", + "items": "items is the list of horizontal pod autoscaler objects.", +} + +func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerList +} + +var map_HorizontalPodAutoscalerSpec = map[string]string{ + "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", + "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod.", + "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", + "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond.", +} + +func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerSpec +} + +var map_HorizontalPodAutoscalerStatus = map[string]string{ + "": "HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.", + "observedGeneration": "observedGeneration is the most recent generation observed by this autoscaler.", + "lastScaleTime": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, used by the autoscaler to control how often the number of pods is changed.", + "currentReplicas": "currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler.", + "desiredReplicas": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler.", + "currentMetrics": "currentMetrics is the last read state of the metrics used by this autoscaler.", +} + +func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerStatus +} + +var map_MetricSpec = map[string]string{ + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should match one of the fields below.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", +} + +func (MetricSpec) SwaggerDoc() map[string]string { + return map_MetricSpec +} + +var map_MetricStatus = map[string]string{ + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will match one of the fields below.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", +} + +func (MetricStatus) SwaggerDoc() map[string]string { + return map_MetricStatus +} + +var map_ObjectMetricSource = map[string]string{ + "": "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "target": "target is the described Kubernetes object.", + "metricName": "metricName is the name of the metric in question.", + "targetValue": "targetValue is the target value of the metric (as a quantity).", +} + +func (ObjectMetricSource) SwaggerDoc() map[string]string { + return map_ObjectMetricSource +} + +var map_ObjectMetricStatus = map[string]string{ + "": "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "target": "target is the described Kubernetes object.", + "metricName": "metricName is the name of the metric in question.", + "currentValue": "currentValue is the current value of the metric (as a quantity).", +} + +func (ObjectMetricStatus) SwaggerDoc() map[string]string { + return map_ObjectMetricStatus +} + +var map_PodsMetricSource = map[string]string{ + "": "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "metricName": "metricName is the name of the metric in question", + "targetAverageValue": "targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)", +} + +func (PodsMetricSource) SwaggerDoc() map[string]string { + return map_PodsMetricSource +} + +var map_PodsMetricStatus = map[string]string{ + "": "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).", + "metricName": "metricName is the name of the metric in question", + "currentAverageValue": "currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)", +} + +func (PodsMetricStatus) SwaggerDoc() map[string]string { + return map_PodsMetricStatus +} + +var map_ResourceMetricSource = map[string]string{ + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "targetAverageValue": "targetAverageValue is the the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", +} + +func (ResourceMetricSource) SwaggerDoc() map[string]string { + return map_ResourceMetricSource +} + +var map_ResourceMetricStatus = map[string]string{ + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", + "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", + "currentAverageValue": "currentAverageValue is the the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", +} + +func (ResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ResourceMetricStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.conversion.go new file mode 100644 index 0000000000..e39667fefc --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.conversion.go @@ -0,0 +1,379 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v2alpha1 + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/client-go/pkg/api" + api_v1 "k8s.io/client-go/pkg/api/v1" + autoscaling "k8s.io/client-go/pkg/apis/autoscaling" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference, + Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference, + Convert_v2alpha1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler, + Convert_autoscaling_HorizontalPodAutoscaler_To_v2alpha1_HorizontalPodAutoscaler, + Convert_v2alpha1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList, + Convert_autoscaling_HorizontalPodAutoscalerList_To_v2alpha1_HorizontalPodAutoscalerList, + Convert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, + Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec, + Convert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus, + Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus, + Convert_v2alpha1_MetricSpec_To_autoscaling_MetricSpec, + Convert_autoscaling_MetricSpec_To_v2alpha1_MetricSpec, + Convert_v2alpha1_MetricStatus_To_autoscaling_MetricStatus, + Convert_autoscaling_MetricStatus_To_v2alpha1_MetricStatus, + Convert_v2alpha1_ObjectMetricSource_To_autoscaling_ObjectMetricSource, + Convert_autoscaling_ObjectMetricSource_To_v2alpha1_ObjectMetricSource, + Convert_v2alpha1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus, + Convert_autoscaling_ObjectMetricStatus_To_v2alpha1_ObjectMetricStatus, + Convert_v2alpha1_PodsMetricSource_To_autoscaling_PodsMetricSource, + Convert_autoscaling_PodsMetricSource_To_v2alpha1_PodsMetricSource, + Convert_v2alpha1_PodsMetricStatus_To_autoscaling_PodsMetricStatus, + Convert_autoscaling_PodsMetricStatus_To_v2alpha1_PodsMetricStatus, + Convert_v2alpha1_ResourceMetricSource_To_autoscaling_ResourceMetricSource, + Convert_autoscaling_ResourceMetricSource_To_v2alpha1_ResourceMetricSource, + Convert_v2alpha1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus, + Convert_autoscaling_ResourceMetricStatus_To_v2alpha1_ResourceMetricStatus, + ) +} + +func autoConvert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Name = in.Name + out.APIVersion = in.APIVersion + return nil +} + +func Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { + return autoConvert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in, out, s) +} + +func autoConvert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Name = in.Name + out.APIVersion = in.APIVersion + return nil +} + +func Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { + return autoConvert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(in, out, s) +} + +func autoConvert_v2alpha1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { + return autoConvert_v2alpha1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2alpha1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscaler_To_v2alpha1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2alpha1_HorizontalPodAutoscaler(in, out, s) +} + +func autoConvert_v2alpha1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]autoscaling.HorizontalPodAutoscaler)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v2alpha1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { + return autoConvert_v2alpha1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v2alpha1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]HorizontalPodAutoscaler)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerList_To_v2alpha1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v2alpha1_HorizontalPodAutoscalerList(in, out, s) +} + +func autoConvert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) + out.MaxReplicas = in.MaxReplicas + out.Metrics = *(*[]autoscaling.MetricSpec)(unsafe.Pointer(&in.Metrics)) + return nil +} + +func Convert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + return autoConvert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) + out.MaxReplicas = in.MaxReplicas + out.Metrics = *(*[]MetricSpec)(unsafe.Pointer(&in.Metrics)) + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec(in, out, s) +} + +func autoConvert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.LastScaleTime = (*v1.Time)(unsafe.Pointer(in.LastScaleTime)) + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + out.CurrentMetrics = *(*[]autoscaling.MetricStatus)(unsafe.Pointer(&in.CurrentMetrics)) + return nil +} + +func Convert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.LastScaleTime = (*v1.Time)(unsafe.Pointer(in.LastScaleTime)) + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + out.CurrentMetrics = *(*[]MetricStatus)(unsafe.Pointer(&in.CurrentMetrics)) + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus(in, out, s) +} + +func autoConvert_v2alpha1_MetricSpec_To_autoscaling_MetricSpec(in *MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error { + out.Type = autoscaling.MetricSourceType(in.Type) + out.Object = (*autoscaling.ObjectMetricSource)(unsafe.Pointer(in.Object)) + out.Pods = (*autoscaling.PodsMetricSource)(unsafe.Pointer(in.Pods)) + out.Resource = (*autoscaling.ResourceMetricSource)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_v2alpha1_MetricSpec_To_autoscaling_MetricSpec(in *MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error { + return autoConvert_v2alpha1_MetricSpec_To_autoscaling_MetricSpec(in, out, s) +} + +func autoConvert_autoscaling_MetricSpec_To_v2alpha1_MetricSpec(in *autoscaling.MetricSpec, out *MetricSpec, s conversion.Scope) error { + out.Type = MetricSourceType(in.Type) + out.Object = (*ObjectMetricSource)(unsafe.Pointer(in.Object)) + out.Pods = (*PodsMetricSource)(unsafe.Pointer(in.Pods)) + out.Resource = (*ResourceMetricSource)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_autoscaling_MetricSpec_To_v2alpha1_MetricSpec(in *autoscaling.MetricSpec, out *MetricSpec, s conversion.Scope) error { + return autoConvert_autoscaling_MetricSpec_To_v2alpha1_MetricSpec(in, out, s) +} + +func autoConvert_v2alpha1_MetricStatus_To_autoscaling_MetricStatus(in *MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error { + out.Type = autoscaling.MetricSourceType(in.Type) + out.Object = (*autoscaling.ObjectMetricStatus)(unsafe.Pointer(in.Object)) + out.Pods = (*autoscaling.PodsMetricStatus)(unsafe.Pointer(in.Pods)) + out.Resource = (*autoscaling.ResourceMetricStatus)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_v2alpha1_MetricStatus_To_autoscaling_MetricStatus(in *MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_MetricStatus_To_autoscaling_MetricStatus(in, out, s) +} + +func autoConvert_autoscaling_MetricStatus_To_v2alpha1_MetricStatus(in *autoscaling.MetricStatus, out *MetricStatus, s conversion.Scope) error { + out.Type = MetricSourceType(in.Type) + out.Object = (*ObjectMetricStatus)(unsafe.Pointer(in.Object)) + out.Pods = (*PodsMetricStatus)(unsafe.Pointer(in.Pods)) + out.Resource = (*ResourceMetricStatus)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_autoscaling_MetricStatus_To_v2alpha1_MetricStatus(in *autoscaling.MetricStatus, out *MetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_MetricStatus_To_v2alpha1_MetricStatus(in, out, s) +} + +func autoConvert_v2alpha1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error { + if err := Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.TargetValue = in.TargetValue + return nil +} + +func Convert_v2alpha1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error { + return autoConvert_v2alpha1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in, out, s) +} + +func autoConvert_autoscaling_ObjectMetricSource_To_v2alpha1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *ObjectMetricSource, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.TargetValue = in.TargetValue + return nil +} + +func Convert_autoscaling_ObjectMetricSource_To_v2alpha1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *ObjectMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_ObjectMetricSource_To_v2alpha1_ObjectMetricSource(in, out, s) +} + +func autoConvert_v2alpha1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error { + if err := Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.CurrentValue = in.CurrentValue + return nil +} + +func Convert_v2alpha1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_ObjectMetricStatus_To_v2alpha1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *ObjectMetricStatus, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.CurrentValue = in.CurrentValue + return nil +} + +func Convert_autoscaling_ObjectMetricStatus_To_v2alpha1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *ObjectMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_ObjectMetricStatus_To_v2alpha1_ObjectMetricStatus(in, out, s) +} + +func autoConvert_v2alpha1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { + out.MetricName = in.MetricName + out.TargetAverageValue = in.TargetAverageValue + return nil +} + +func Convert_v2alpha1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { + return autoConvert_v2alpha1_PodsMetricSource_To_autoscaling_PodsMetricSource(in, out, s) +} + +func autoConvert_autoscaling_PodsMetricSource_To_v2alpha1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *PodsMetricSource, s conversion.Scope) error { + out.MetricName = in.MetricName + out.TargetAverageValue = in.TargetAverageValue + return nil +} + +func Convert_autoscaling_PodsMetricSource_To_v2alpha1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *PodsMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_PodsMetricSource_To_v2alpha1_PodsMetricSource(in, out, s) +} + +func autoConvert_v2alpha1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error { + out.MetricName = in.MetricName + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_v2alpha1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_PodsMetricStatus_To_v2alpha1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *PodsMetricStatus, s conversion.Scope) error { + out.MetricName = in.MetricName + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_autoscaling_PodsMetricStatus_To_v2alpha1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *PodsMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_PodsMetricStatus_To_v2alpha1_PodsMetricStatus(in, out, s) +} + +func autoConvert_v2alpha1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { + out.Name = api.ResourceName(in.Name) + out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization)) + out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue)) + return nil +} + +func Convert_v2alpha1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { + return autoConvert_v2alpha1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in, out, s) +} + +func autoConvert_autoscaling_ResourceMetricSource_To_v2alpha1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *ResourceMetricSource, s conversion.Scope) error { + out.Name = api_v1.ResourceName(in.Name) + out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization)) + out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue)) + return nil +} + +func Convert_autoscaling_ResourceMetricSource_To_v2alpha1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *ResourceMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_ResourceMetricSource_To_v2alpha1_ResourceMetricSource(in, out, s) +} + +func autoConvert_v2alpha1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { + out.Name = api.ResourceName(in.Name) + out.CurrentAverageUtilization = (*int32)(unsafe.Pointer(in.CurrentAverageUtilization)) + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_v2alpha1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_ResourceMetricStatus_To_v2alpha1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *ResourceMetricStatus, s conversion.Scope) error { + out.Name = api_v1.ResourceName(in.Name) + out.CurrentAverageUtilization = (*int32)(unsafe.Pointer(in.CurrentAverageUtilization)) + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_autoscaling_ResourceMetricStatus_To_v2alpha1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *ResourceMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_ResourceMetricStatus_To_v2alpha1_ResourceMetricStatus(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..1953eaea66 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.deepcopy.go @@ -0,0 +1,285 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v2alpha1 + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_CrossVersionObjectReference, InType: reflect.TypeOf(&CrossVersionObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_HorizontalPodAutoscaler, InType: reflect.TypeOf(&HorizontalPodAutoscaler{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_HorizontalPodAutoscalerList, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_HorizontalPodAutoscalerSpec, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_HorizontalPodAutoscalerStatus, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_MetricSpec, InType: reflect.TypeOf(&MetricSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_MetricStatus, InType: reflect.TypeOf(&MetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_ObjectMetricSource, InType: reflect.TypeOf(&ObjectMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_ObjectMetricStatus, InType: reflect.TypeOf(&ObjectMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_PodsMetricSource, InType: reflect.TypeOf(&PodsMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_PodsMetricStatus, InType: reflect.TypeOf(&PodsMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_ResourceMetricSource, InType: reflect.TypeOf(&ResourceMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_ResourceMetricStatus, InType: reflect.TypeOf(&ResourceMetricStatus{})}, + ) +} + +func DeepCopy_v2alpha1_CrossVersionObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CrossVersionObjectReference) + out := out.(*CrossVersionObjectReference) + *out = *in + return nil + } +} + +func DeepCopy_v2alpha1_HorizontalPodAutoscaler(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscaler) + out := out.(*HorizontalPodAutoscaler) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v2alpha1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v2alpha1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v2alpha1_HorizontalPodAutoscalerList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerList) + out := out.(*HorizontalPodAutoscalerList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + if err := DeepCopy_v2alpha1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v2alpha1_HorizontalPodAutoscalerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerSpec) + out := out.(*HorizontalPodAutoscalerSpec) + *out = *in + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricSpec, len(*in)) + for i := range *in { + if err := DeepCopy_v2alpha1_MetricSpec(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v2alpha1_HorizontalPodAutoscalerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerStatus) + out := out.(*HorizontalPodAutoscalerStatus) + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.LastScaleTime != nil { + in, out := &in.LastScaleTime, &out.LastScaleTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + if in.CurrentMetrics != nil { + in, out := &in.CurrentMetrics, &out.CurrentMetrics + *out = make([]MetricStatus, len(*in)) + for i := range *in { + if err := DeepCopy_v2alpha1_MetricStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v2alpha1_MetricSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*MetricSpec) + out := out.(*MetricSpec) + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricSource) + if err := DeepCopy_v2alpha1_ObjectMetricSource(*in, *out, c); err != nil { + return err + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricSource) + if err := DeepCopy_v2alpha1_PodsMetricSource(*in, *out, c); err != nil { + return err + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricSource) + if err := DeepCopy_v2alpha1_ResourceMetricSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v2alpha1_MetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*MetricStatus) + out := out.(*MetricStatus) + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricStatus) + if err := DeepCopy_v2alpha1_ObjectMetricStatus(*in, *out, c); err != nil { + return err + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricStatus) + if err := DeepCopy_v2alpha1_PodsMetricStatus(*in, *out, c); err != nil { + return err + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricStatus) + if err := DeepCopy_v2alpha1_ResourceMetricStatus(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v2alpha1_ObjectMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMetricSource) + out := out.(*ObjectMetricSource) + *out = *in + out.TargetValue = in.TargetValue.DeepCopy() + return nil + } +} + +func DeepCopy_v2alpha1_ObjectMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMetricStatus) + out := out.(*ObjectMetricStatus) + *out = *in + out.CurrentValue = in.CurrentValue.DeepCopy() + return nil + } +} + +func DeepCopy_v2alpha1_PodsMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodsMetricSource) + out := out.(*PodsMetricSource) + *out = *in + out.TargetAverageValue = in.TargetAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_v2alpha1_PodsMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodsMetricStatus) + out := out.(*PodsMetricStatus) + *out = *in + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_v2alpha1_ResourceMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceMetricSource) + out := out.(*ResourceMetricSource) + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + *out = new(int32) + **out = **in + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + *out = new(resource.Quantity) + **out = (*in).DeepCopy() + } + return nil + } +} + +func DeepCopy_v2alpha1_ResourceMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceMetricStatus) + out := out.(*ResourceMetricStatus) + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + *out = new(int32) + **out = **in + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/zz_generated.deepcopy.go new file mode 100644 index 0000000000..8639502a97 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/zz_generated.deepcopy.go @@ -0,0 +1,320 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package autoscaling + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_CrossVersionObjectReference, InType: reflect.TypeOf(&CrossVersionObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_HorizontalPodAutoscaler, InType: reflect.TypeOf(&HorizontalPodAutoscaler{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_HorizontalPodAutoscalerList, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_HorizontalPodAutoscalerSpec, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_HorizontalPodAutoscalerStatus, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_MetricSpec, InType: reflect.TypeOf(&MetricSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_MetricStatus, InType: reflect.TypeOf(&MetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ObjectMetricSource, InType: reflect.TypeOf(&ObjectMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ObjectMetricStatus, InType: reflect.TypeOf(&ObjectMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_PodsMetricSource, InType: reflect.TypeOf(&PodsMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_PodsMetricStatus, InType: reflect.TypeOf(&PodsMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ResourceMetricSource, InType: reflect.TypeOf(&ResourceMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ResourceMetricStatus, InType: reflect.TypeOf(&ResourceMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_Scale, InType: reflect.TypeOf(&Scale{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, + ) +} + +func DeepCopy_autoscaling_CrossVersionObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CrossVersionObjectReference) + out := out.(*CrossVersionObjectReference) + *out = *in + return nil + } +} + +func DeepCopy_autoscaling_HorizontalPodAutoscaler(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscaler) + out := out.(*HorizontalPodAutoscaler) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_autoscaling_HorizontalPodAutoscalerList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerList) + out := out.(*HorizontalPodAutoscalerList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + if err := DeepCopy_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_autoscaling_HorizontalPodAutoscalerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerSpec) + out := out.(*HorizontalPodAutoscalerSpec) + *out = *in + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricSpec, len(*in)) + for i := range *in { + if err := DeepCopy_autoscaling_MetricSpec(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_autoscaling_HorizontalPodAutoscalerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerStatus) + out := out.(*HorizontalPodAutoscalerStatus) + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.LastScaleTime != nil { + in, out := &in.LastScaleTime, &out.LastScaleTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + if in.CurrentMetrics != nil { + in, out := &in.CurrentMetrics, &out.CurrentMetrics + *out = make([]MetricStatus, len(*in)) + for i := range *in { + if err := DeepCopy_autoscaling_MetricStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_autoscaling_MetricSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*MetricSpec) + out := out.(*MetricSpec) + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricSource) + if err := DeepCopy_autoscaling_ObjectMetricSource(*in, *out, c); err != nil { + return err + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricSource) + if err := DeepCopy_autoscaling_PodsMetricSource(*in, *out, c); err != nil { + return err + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricSource) + if err := DeepCopy_autoscaling_ResourceMetricSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_autoscaling_MetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*MetricStatus) + out := out.(*MetricStatus) + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricStatus) + if err := DeepCopy_autoscaling_ObjectMetricStatus(*in, *out, c); err != nil { + return err + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricStatus) + if err := DeepCopy_autoscaling_PodsMetricStatus(*in, *out, c); err != nil { + return err + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricStatus) + if err := DeepCopy_autoscaling_ResourceMetricStatus(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_autoscaling_ObjectMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMetricSource) + out := out.(*ObjectMetricSource) + *out = *in + out.TargetValue = in.TargetValue.DeepCopy() + return nil + } +} + +func DeepCopy_autoscaling_ObjectMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMetricStatus) + out := out.(*ObjectMetricStatus) + *out = *in + out.CurrentValue = in.CurrentValue.DeepCopy() + return nil + } +} + +func DeepCopy_autoscaling_PodsMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodsMetricSource) + out := out.(*PodsMetricSource) + *out = *in + out.TargetAverageValue = in.TargetAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_autoscaling_PodsMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodsMetricStatus) + out := out.(*PodsMetricStatus) + *out = *in + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_autoscaling_ResourceMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceMetricSource) + out := out.(*ResourceMetricSource) + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + *out = new(int32) + **out = **in + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + *out = new(resource.Quantity) + **out = (*in).DeepCopy() + } + return nil + } +} + +func DeepCopy_autoscaling_ResourceMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceMetricStatus) + out := out.(*ResourceMetricStatus) + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + *out = new(int32) + **out = **in + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_autoscaling_Scale(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Scale) + out := out.(*Scale) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + return nil + } +} + +func DeepCopy_autoscaling_ScaleSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleSpec) + out := out.(*ScaleSpec) + *out = *in + return nil + } +} + +func DeepCopy_autoscaling_ScaleStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleStatus) + out := out.(*ScaleStatus) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/OWNERS b/vendor/k8s.io/client-go/pkg/apis/batch/OWNERS new file mode 100755 index 0000000000..502f907711 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/OWNERS @@ -0,0 +1,19 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- caesarxuchao +- erictune +- sttts +- saad-ali +- ncdc +- timothysc +- soltysh +- dims +- errordeveloper +- mml +- mbohlool +- david-mcmahon +- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/doc.go b/vendor/k8s.io/client-go/pkg/apis/batch/doc.go new file mode 100644 index 0000000000..6fe9522260 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package batch diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/install/install.go b/vendor/k8s.io/client-go/pkg/apis/batch/install/install.go new file mode 100644 index 0000000000..d37f31820d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/install/install.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the batch API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/batch" + "k8s.io/client-go/pkg/apis/batch/v1" + "k8s.io/client-go/pkg/apis/batch/v2alpha1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: batch.GroupName, + VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version, v2alpha1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/batch", + AddInternalObjectsToScheme: batch.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1.SchemeGroupVersion.Version: v1.AddToScheme, + v2alpha1.SchemeGroupVersion.Version: v2alpha1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/register.go b/vendor/k8s.io/client-go/pkg/apis/batch/register.go new file mode 100644 index 0000000000..4601ca4ec1 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/register.go @@ -0,0 +1,57 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package batch + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "batch" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Job{}, + &JobList{}, + &JobTemplate{}, + &CronJob{}, + &CronJobList{}, + ) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJob"), &CronJob{}) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJobList"), &CronJobList{}) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/types.go b/vendor/k8s.io/client-go/pkg/apis/batch/types.go new file mode 100644 index 0000000000..7a2ad011fe --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/types.go @@ -0,0 +1,286 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package batch + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api" +) + +// +genclient=true + +// Job represents the configuration of a single job. +type Job struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // Spec is a structure defining the expected behavior of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec JobSpec + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status JobStatus +} + +// JobList is a collection of jobs. +type JobList struct { + metav1.TypeMeta + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta + + // Items is the list of Job. + Items []Job +} + +// JobTemplate describes a template for creating copies of a predefined pod. +type JobTemplate struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // Template defines jobs that will be created from this template + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Template JobTemplateSpec +} + +// JobTemplateSpec describes the data a Job should have when created from a template +type JobTemplateSpec struct { + // Standard object's metadata of the jobs created from this template. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // Specification of the desired behavior of the job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec JobSpec +} + +// JobSpec describes how the job execution will look like. +type JobSpec struct { + + // Parallelism specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + // +optional + Parallelism *int32 + + // Completions specifies the desired number of successfully finished pods the + // job should be run with. Setting to nil means that the success of any + // pod signals the success of all pods, and allows parallelism to have any positive + // value. Setting to 1 means that parallelism is limited to 1 and the success of that + // pod signals the success of the job. + // +optional + Completions *int32 + + // Optional duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer + // +optional + ActiveDeadlineSeconds *int64 + + // Selector is a label query over pods that should match the pod count. + // Normally, the system sets this field for you. + // +optional + Selector *metav1.LabelSelector + + // ManualSelector controls generation of pod labels and pod selectors. + // Leave `manualSelector` unset unless you are certain what you are doing. + // When false or unset, the system pick labels unique to this job + // and appends those labels to the pod template. When true, + // the user is responsible for picking unique labels and specifying + // the selector. Failure to pick a unique label may cause this + // and other jobs to not function correctly. However, You may see + // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + // API. + // +optional + ManualSelector *bool + + // Template is the object that describes the pod that will be created when + // executing a job. + Template api.PodTemplateSpec +} + +// JobStatus represents the current state of a Job. +type JobStatus struct { + + // Conditions represent the latest available observations of an object's current state. + // +optional + Conditions []JobCondition + + // StartTime represents time when the job was acknowledged by the Job Manager. + // It is not guaranteed to be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + // +optional + StartTime *metav1.Time + + // CompletionTime represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + // +optional + CompletionTime *metav1.Time + + // Active is the number of actively running pods. + // +optional + Active int32 + + // Succeeded is the number of pods which reached Phase Succeeded. + // +optional + Succeeded int32 + + // Failed is the number of pods which reached Phase Failed. + // +optional + Failed int32 +} + +type JobConditionType string + +// These are valid conditions of a job. +const ( + // JobComplete means the job has completed its execution. + JobComplete JobConditionType = "Complete" + // JobFailed means the job has failed its execution. + JobFailed JobConditionType = "Failed" +) + +// JobCondition describes current state of a job. +type JobCondition struct { + // Type of job condition, Complete or Failed. + Type JobConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // Last time the condition was checked. + // +optional + LastProbeTime metav1.Time + // Last time the condition transit from one status to another. + // +optional + LastTransitionTime metav1.Time + // (brief) reason for the condition's last transition. + // +optional + Reason string + // Human readable message indicating details about last transition. + // +optional + Message string +} + +// +genclient=true + +// CronJob represents the configuration of a single cron job. +type CronJob struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // Spec is a structure defining the expected behavior of a job, including the schedule. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec CronJobSpec + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status CronJobStatus +} + +// CronJobList is a collection of cron jobs. +type CronJobList struct { + metav1.TypeMeta + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta + + // Items is the list of CronJob. + Items []CronJob +} + +// CronJobSpec describes how the job execution will look like and when it will actually run. +type CronJobSpec struct { + + // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + Schedule string + + // Optional deadline in seconds for starting the job if it misses scheduled + // time for any reason. Missed jobs executions will be counted as failed ones. + // +optional + StartingDeadlineSeconds *int64 + + // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + // +optional + ConcurrencyPolicy ConcurrencyPolicy + + // Suspend flag tells the controller to suspend subsequent executions, it does + // not apply to already started executions. Defaults to false. + // +optional + Suspend *bool + + // JobTemplate is the object that describes the job that will be created when + // executing a CronJob. + JobTemplate JobTemplateSpec + + // The number of successful finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + SuccessfulJobsHistoryLimit *int32 + + // The number of failed finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + FailedJobsHistoryLimit *int32 +} + +// ConcurrencyPolicy describes how the job will be handled. +// Only one of the following concurrent policies may be specified. +// If none of the following policies is specified, the default one +// is AllowConcurrent. +type ConcurrencyPolicy string + +const ( + // AllowConcurrent allows CronJobs to run concurrently. + AllowConcurrent ConcurrencyPolicy = "Allow" + + // ForbidConcurrent forbids concurrent runs, skipping next run if previous + // hasn't finished yet. + ForbidConcurrent ConcurrencyPolicy = "Forbid" + + // ReplaceConcurrent cancels currently running job and replaces it with a new one. + ReplaceConcurrent ConcurrencyPolicy = "Replace" +) + +// CronJobStatus represents the current state of a cron job. +type CronJobStatus struct { + // Active holds pointers to currently running jobs. + // +optional + Active []api.ObjectReference + + // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. + // +optional + LastScheduleTime *metav1.Time +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/conversion.go new file mode 100644 index 0000000000..7f2bc9d157 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/conversion.go @@ -0,0 +1,84 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/pkg/apis/batch" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs( + Convert_batch_JobSpec_To_v1_JobSpec, + Convert_v1_JobSpec_To_batch_JobSpec, + ) + if err != nil { + return err + } + + return scheme.AddFieldLabelConversionFunc("batch/v1", "Job", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace", "status.successful": + return label, value, nil + default: + return "", "", fmt.Errorf("field label %q not supported for Job", label) + } + }, + ) +} + +func Convert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + out.Selector = in.Selector + if in.ManualSelector != nil { + out.ManualSelector = new(bool) + *out.ManualSelector = *in.ManualSelector + } else { + out.ManualSelector = nil + } + + if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_v1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + out.Selector = in.Selector + if in.ManualSelector != nil { + out.ManualSelector = new(bool) + *out.ManualSelector = *in.ManualSelector + } else { + out.ManualSelector = nil + } + + if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/defaults.go new file mode 100644 index 0000000000..3603247f24 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/defaults.go @@ -0,0 +1,47 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_Job, + ) +} + +func SetDefaults_Job(obj *Job) { + // For a non-parallel job, you can leave both `.spec.completions` and + // `.spec.parallelism` unset. When both are unset, both are defaulted to 1. + if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil { + obj.Spec.Completions = new(int32) + *obj.Spec.Completions = 1 + obj.Spec.Parallelism = new(int32) + *obj.Spec.Parallelism = 1 + } + if obj.Spec.Parallelism == nil { + obj.Spec.Parallelism = new(int32) + *obj.Spec.Parallelism = 1 + } + labels := obj.Spec.Template.Labels + if labels != nil && len(obj.Labels) == 0 { + obj.Labels = labels + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/doc.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/doc.go new file mode 100644 index 0000000000..c7be42d5a1 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.pb.go new file mode 100644 index 0000000000..0ebc07be72 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.pb.go @@ -0,0 +1,1580 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto + + It has these top-level messages: + Job + JobCondition + JobList + JobSpec + JobStatus +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *Job) Reset() { *m = Job{} } +func (*Job) ProtoMessage() {} +func (*Job) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *JobCondition) Reset() { *m = JobCondition{} } +func (*JobCondition) ProtoMessage() {} +func (*JobCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *JobList) Reset() { *m = JobList{} } +func (*JobList) ProtoMessage() {} +func (*JobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *JobSpec) Reset() { *m = JobSpec{} } +func (*JobSpec) ProtoMessage() {} +func (*JobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *JobStatus) Reset() { *m = JobStatus{} } +func (*JobStatus) ProtoMessage() {} +func (*JobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func init() { + proto.RegisterType((*Job)(nil), "k8s.io.client-go.pkg.apis.batch.v1.Job") + proto.RegisterType((*JobCondition)(nil), "k8s.io.client-go.pkg.apis.batch.v1.JobCondition") + proto.RegisterType((*JobList)(nil), "k8s.io.client-go.pkg.apis.batch.v1.JobList") + proto.RegisterType((*JobSpec)(nil), "k8s.io.client-go.pkg.apis.batch.v1.JobSpec") + proto.RegisterType((*JobStatus)(nil), "k8s.io.client-go.pkg.apis.batch.v1.JobStatus") +} +func (m *Job) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Job) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *JobCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) + n4, err := m.LastProbeTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n5, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *JobList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *JobSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Parallelism != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Parallelism)) + } + if m.Completions != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Completions)) + } + if m.ActiveDeadlineSeconds != nil { + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds)) + } + if m.Selector != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n7, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.ManualSelector != nil { + data[i] = 0x28 + i++ + if *m.ManualSelector { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n8, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + return i, nil +} + +func (m *JobStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.StartTime != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) + n9, err := m.StartTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.CompletionTime != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CompletionTime.Size())) + n10, err := m.CompletionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + } + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Active)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Succeeded)) + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Failed)) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *Job) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *JobSpec) Size() (n int) { + var l int + _ = l + if m.Parallelism != nil { + n += 1 + sovGenerated(uint64(*m.Parallelism)) + } + if m.Completions != nil { + n += 1 + sovGenerated(uint64(*m.Completions)) + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ManualSelector != nil { + n += 2 + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobStatus) Size() (n int) { + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.StartTime != nil { + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CompletionTime != nil { + l = m.CompletionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Active)) + n += 1 + sovGenerated(uint64(m.Succeeded)) + n += 1 + sovGenerated(uint64(m.Failed)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Job) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Job{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "JobSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "JobStatus", "JobStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *JobCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *JobList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Job", "Job", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *JobSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobSpec{`, + `Parallelism:` + valueToStringGenerated(this.Parallelism) + `,`, + `Completions:` + valueToStringGenerated(this.Completions) + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `ManualSelector:` + valueToStringGenerated(this.ManualSelector) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *JobStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobStatus{`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "JobCondition", "JobCondition", 1), `&`, ``, 1) + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `CompletionTime:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `Active:` + fmt.Sprintf("%v", this.Active) + `,`, + `Succeeded:` + fmt.Sprintf("%v", this.Succeeded) + `,`, + `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Job) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Job: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = JobConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Job{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Parallelism = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Completions", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Completions = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ManualSelector", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ManualSelector = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, JobCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartTime == nil { + m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletionTime == nil { + m.CompletionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.CompletionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + m.Active = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Active |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) + } + m.Succeeded = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Succeeded |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + m.Failed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Failed |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 885 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x54, 0xdd, 0x6e, 0xe3, 0x44, + 0x14, 0xce, 0x4f, 0xd3, 0x26, 0x93, 0xb6, 0xbb, 0x8c, 0x54, 0x29, 0xf4, 0x22, 0x59, 0x05, 0x84, + 0x0a, 0xda, 0xb5, 0x49, 0xbb, 0x42, 0x88, 0x0b, 0x24, 0x5c, 0x84, 0x44, 0xd5, 0xb2, 0xd5, 0xa4, + 0x02, 0x89, 0x1f, 0x89, 0xb1, 0x7d, 0x9a, 0x0e, 0xb5, 0x3d, 0x96, 0x67, 0x12, 0xd1, 0x3b, 0xde, + 0x00, 0x1e, 0x06, 0x21, 0x1e, 0xa1, 0x97, 0xbd, 0xe4, 0x2a, 0xa2, 0xe6, 0x2d, 0xf6, 0x0a, 0xcd, + 0x78, 0xfc, 0x93, 0x4d, 0x0a, 0xd9, 0xbd, 0xb3, 0xcf, 0x7c, 0xdf, 0x37, 0x67, 0xce, 0xf9, 0xce, + 0x41, 0x47, 0xd7, 0x1f, 0x0b, 0x8b, 0x71, 0xfb, 0x7a, 0xea, 0x42, 0x12, 0x81, 0x04, 0x61, 0xc7, + 0xd7, 0x13, 0x9b, 0xc6, 0x4c, 0xd8, 0x2e, 0x95, 0xde, 0x95, 0x3d, 0x1b, 0xd9, 0x13, 0x88, 0x20, + 0xa1, 0x12, 0x7c, 0x2b, 0x4e, 0xb8, 0xe4, 0xf8, 0x9d, 0x8c, 0x64, 0x95, 0x24, 0x2b, 0xbe, 0x9e, + 0x58, 0x8a, 0x64, 0x69, 0x92, 0x35, 0x1b, 0xed, 0x3f, 0x9b, 0x30, 0x79, 0x35, 0x75, 0x2d, 0x8f, + 0x87, 0xf6, 0x84, 0x4f, 0xb8, 0xad, 0xb9, 0xee, 0xf4, 0x52, 0xff, 0xe9, 0x1f, 0xfd, 0x95, 0x69, + 0xee, 0x3f, 0x37, 0x89, 0xd0, 0x98, 0x85, 0xd4, 0xbb, 0x62, 0x11, 0x24, 0x37, 0x65, 0x2a, 0x21, + 0x48, 0xba, 0x22, 0x93, 0x7d, 0xfb, 0x21, 0x56, 0x32, 0x8d, 0x24, 0x0b, 0x61, 0x89, 0xf0, 0xd1, + 0xff, 0x11, 0x84, 0x77, 0x05, 0x21, 0x5d, 0xe2, 0x1d, 0x3d, 0xc4, 0x9b, 0x4a, 0x16, 0xd8, 0x2c, + 0x92, 0x42, 0x26, 0x4b, 0xa4, 0xca, 0x9b, 0x04, 0x24, 0x33, 0x48, 0xca, 0x07, 0xc1, 0xcf, 0x34, + 0x8c, 0x03, 0x58, 0xf5, 0xa6, 0xa7, 0x0f, 0xb6, 0x64, 0x05, 0x7a, 0xf8, 0x6b, 0x03, 0x35, 0x4f, + 0xb8, 0x8b, 0x7f, 0x44, 0x6d, 0x55, 0x24, 0x9f, 0x4a, 0xda, 0xab, 0x3f, 0xa9, 0x1f, 0x74, 0x0f, + 0x3f, 0xb4, 0x4c, 0x9b, 0xaa, 0x39, 0x97, 0x8d, 0x52, 0x68, 0x6b, 0x36, 0xb2, 0x5e, 0xb8, 0x3f, + 0x81, 0x27, 0xcf, 0x40, 0x52, 0x07, 0xdf, 0xce, 0x07, 0xb5, 0x74, 0x3e, 0x40, 0x65, 0x8c, 0x14, + 0xaa, 0xf8, 0x2b, 0xb4, 0x21, 0x62, 0xf0, 0x7a, 0x0d, 0xad, 0xfe, 0xd4, 0x5a, 0xc3, 0x04, 0xd6, + 0x09, 0x77, 0xc7, 0x31, 0x78, 0xce, 0xb6, 0x51, 0xde, 0x50, 0x7f, 0x44, 0xeb, 0xe0, 0xaf, 0xd1, + 0xa6, 0x90, 0x54, 0x4e, 0x45, 0xaf, 0xa9, 0x15, 0xad, 0xb5, 0x15, 0x35, 0xcb, 0xd9, 0x35, 0x9a, + 0x9b, 0xd9, 0x3f, 0x31, 0x6a, 0xc3, 0xbb, 0x26, 0xda, 0x3e, 0xe1, 0xee, 0x31, 0x8f, 0x7c, 0x26, + 0x19, 0x8f, 0xf0, 0x73, 0xb4, 0x21, 0x6f, 0x62, 0xd0, 0x65, 0xe9, 0x38, 0x4f, 0xf2, 0x54, 0x2e, + 0x6e, 0x62, 0x78, 0x39, 0x1f, 0x3c, 0xae, 0x62, 0x55, 0x8c, 0x68, 0x74, 0x25, 0xbd, 0x86, 0xe6, + 0x7d, 0xba, 0x78, 0xdd, 0xcb, 0xf9, 0xe0, 0x3f, 0x1b, 0x65, 0x15, 0x9a, 0x8b, 0xe9, 0xe1, 0x09, + 0xda, 0x09, 0xa8, 0x90, 0xe7, 0x09, 0x77, 0xe1, 0x82, 0x85, 0x60, 0x5e, 0xff, 0xc1, 0x7a, 0xdd, + 0x52, 0x0c, 0x67, 0xcf, 0xa4, 0xb2, 0x73, 0x5a, 0x15, 0x22, 0x8b, 0xba, 0x78, 0x86, 0xb0, 0x0a, + 0x5c, 0x24, 0x34, 0x12, 0xd9, 0xe3, 0xd4, 0x6d, 0x1b, 0xaf, 0x7d, 0xdb, 0xbe, 0xb9, 0x0d, 0x9f, + 0x2e, 0xa9, 0x91, 0x15, 0x37, 0xe0, 0xf7, 0xd0, 0x66, 0x02, 0x54, 0xf0, 0xa8, 0xd7, 0xd2, 0x85, + 0x2b, 0xfa, 0x44, 0x74, 0x94, 0x98, 0x53, 0xfc, 0x3e, 0xda, 0x0a, 0x41, 0x08, 0x3a, 0x81, 0xde, + 0xa6, 0x06, 0x3e, 0x32, 0xc0, 0xad, 0xb3, 0x2c, 0x4c, 0xf2, 0xf3, 0xe1, 0x1f, 0x75, 0xb4, 0x75, + 0xc2, 0xdd, 0x53, 0x26, 0x24, 0xfe, 0x7e, 0xc9, 0xe8, 0xd6, 0x7a, 0x8f, 0x51, 0x6c, 0x6d, 0xf3, + 0xc7, 0xe6, 0x9e, 0x76, 0x1e, 0xa9, 0x98, 0xfc, 0x0c, 0xb5, 0x98, 0x84, 0x50, 0x35, 0xbd, 0x79, + 0xd0, 0x3d, 0x3c, 0x58, 0xd7, 0x93, 0xce, 0x8e, 0x11, 0x6d, 0x7d, 0xa9, 0xe8, 0x24, 0x53, 0x19, + 0xfe, 0xd9, 0xd4, 0x89, 0x2b, 0xd7, 0xe3, 0x11, 0xea, 0xc6, 0x34, 0xa1, 0x41, 0x00, 0x01, 0x13, + 0xa1, 0xce, 0xbd, 0xe5, 0x3c, 0x4a, 0xe7, 0x83, 0xee, 0x79, 0x19, 0x26, 0x55, 0x8c, 0xa2, 0x78, + 0x5c, 0xed, 0x09, 0x55, 0xdc, 0xcc, 0x88, 0x86, 0x72, 0x5c, 0x86, 0x49, 0x15, 0x83, 0x5f, 0xa0, + 0x3d, 0xea, 0x49, 0x36, 0x83, 0xcf, 0x81, 0xfa, 0x01, 0x8b, 0x60, 0x0c, 0x1e, 0x8f, 0xfc, 0x6c, + 0xc8, 0x9a, 0xce, 0xdb, 0xe9, 0x7c, 0xb0, 0xf7, 0xd9, 0x2a, 0x00, 0x59, 0xcd, 0xc3, 0x3f, 0xa0, + 0xb6, 0x80, 0x00, 0x3c, 0xc9, 0x13, 0x63, 0x9e, 0xa3, 0x35, 0xeb, 0x4d, 0x5d, 0x08, 0xc6, 0x86, + 0xea, 0x6c, 0xab, 0x82, 0xe7, 0x7f, 0xa4, 0x90, 0xc4, 0x9f, 0xa0, 0xdd, 0x90, 0x46, 0x53, 0x5a, + 0x20, 0xb5, 0x6b, 0xda, 0x0e, 0x4e, 0xe7, 0x83, 0xdd, 0xb3, 0x85, 0x13, 0xf2, 0x0a, 0x12, 0x7f, + 0x87, 0xda, 0x12, 0xc2, 0x38, 0xa0, 0x32, 0xb3, 0x50, 0xf7, 0xf0, 0xd9, 0xc3, 0xfd, 0x52, 0x29, + 0x9d, 0x73, 0xff, 0xc2, 0x10, 0xf4, 0x5a, 0x2a, 0x9c, 0x90, 0x47, 0x49, 0x21, 0x38, 0xfc, 0xbd, + 0x89, 0x3a, 0xc5, 0xb2, 0xc1, 0x80, 0x90, 0x97, 0x0f, 0xb4, 0xe8, 0xd5, 0xb5, 0x39, 0x46, 0xeb, + 0x9a, 0xa3, 0x58, 0x05, 0xe5, 0x86, 0x2d, 0x42, 0x82, 0x54, 0x84, 0xf1, 0x37, 0xa8, 0x23, 0x24, + 0x4d, 0xa4, 0x1e, 0xd5, 0xc6, 0x6b, 0x8f, 0xea, 0x4e, 0x3a, 0x1f, 0x74, 0xc6, 0xb9, 0x00, 0x29, + 0xb5, 0xf0, 0x25, 0xda, 0x2d, 0x5d, 0xf2, 0x86, 0x6b, 0x47, 0xb7, 0xe4, 0x78, 0x41, 0x85, 0xbc, + 0xa2, 0xaa, 0x86, 0x3f, 0xb3, 0x91, 0xf6, 0x4a, 0xab, 0x1c, 0xfe, 0xcc, 0x73, 0xc4, 0x9c, 0x62, + 0x1b, 0x75, 0xc4, 0xd4, 0xf3, 0x00, 0x7c, 0xf0, 0x75, 0xc7, 0x5b, 0xce, 0x5b, 0x06, 0xda, 0x19, + 0xe7, 0x07, 0xa4, 0xc4, 0x28, 0xe1, 0x4b, 0xca, 0x02, 0xf0, 0x75, 0xa7, 0x2b, 0xc2, 0x5f, 0xe8, + 0x28, 0x31, 0xa7, 0xce, 0xbb, 0xb7, 0xf7, 0xfd, 0xda, 0xdd, 0x7d, 0xbf, 0xf6, 0xd7, 0x7d, 0xbf, + 0xf6, 0x4b, 0xda, 0xaf, 0xdf, 0xa6, 0xfd, 0xfa, 0x5d, 0xda, 0xaf, 0xff, 0x9d, 0xf6, 0xeb, 0xbf, + 0xfd, 0xd3, 0xaf, 0x7d, 0xdb, 0x98, 0x8d, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xe7, 0x0a, + 0x8d, 0xf7, 0x08, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.proto new file mode 100644 index 0000000000..283da1c8dc --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.proto @@ -0,0 +1,168 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.batch.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Job represents the configuration of a single job. +message Job { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is a structure defining the expected behavior of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional JobSpec spec = 2; + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional JobStatus status = 3; +} + +// JobCondition describes current state of a job. +message JobCondition { + // Type of job condition, Complete or Failed. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition was checked. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + + // Last time the condition transit from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // (brief) reason for the condition's last transition. + // +optional + optional string reason = 5; + + // Human readable message indicating details about last transition. + // +optional + optional string message = 6; +} + +// JobList is a collection of jobs. +message JobList { + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Job. + repeated Job items = 2; +} + +// JobSpec describes how the job execution will look like. +message JobSpec { + // Parallelism specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + // More info: http://kubernetes.io/docs/user-guide/jobs + // +optional + optional int32 parallelism = 1; + + // Completions specifies the desired number of successfully finished pods the + // job should be run with. Setting to nil means that the success of any + // pod signals the success of all pods, and allows parallelism to have any positive + // value. Setting to 1 means that parallelism is limited to 1 and the success of that + // pod signals the success of the job. + // More info: http://kubernetes.io/docs/user-guide/jobs + // +optional + optional int32 completions = 2; + + // Optional duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer + // +optional + optional int64 activeDeadlineSeconds = 3; + + // Selector is a label query over pods that should match the pod count. + // Normally, the system sets this field for you. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + + // ManualSelector controls generation of pod labels and pod selectors. + // Leave `manualSelector` unset unless you are certain what you are doing. + // When false or unset, the system pick labels unique to this job + // and appends those labels to the pod template. When true, + // the user is responsible for picking unique labels and specifying + // the selector. Failure to pick a unique label may cause this + // and other jobs to not function correctly. However, You may see + // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + // API. + // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md + // +optional + optional bool manualSelector = 5; + + // Template is the object that describes the pod that will be created when + // executing a job. + // More info: http://kubernetes.io/docs/user-guide/jobs + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6; +} + +// JobStatus represents the current state of a Job. +message JobStatus { + // Conditions represent the latest available observations of an object's current state. + // More info: http://kubernetes.io/docs/user-guide/jobs + // +optional + repeated JobCondition conditions = 1; + + // StartTime represents time when the job was acknowledged by the Job Manager. + // It is not guaranteed to be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; + + // CompletionTime represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; + + // Active is the number of actively running pods. + // +optional + optional int32 active = 4; + + // Succeeded is the number of pods which reached Phase Succeeded. + // +optional + optional int32 succeeded = 5; + + // Failed is the number of pods which reached Phase Failed. + // +optional + optional int32 failed = 6; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/register.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/register.go new file mode 100644 index 0000000000..4ba570d1b5 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/register.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "batch" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Job{}, + &JobList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/types.generated.go new file mode 100644 index 0000000000..49dab1f886 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/types.generated.go @@ -0,0 +1,2681 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg4_resource "k8s.io/apimachinery/pkg/api/resource" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + pkg5_intstr "k8s.io/apimachinery/pkg/util/intstr" + pkg3_v1 "k8s.io/client-go/pkg/api/v1" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg4_resource.Quantity + var v1 pkg1_v1.TypeMeta + var v2 pkg2_types.UID + var v3 pkg5_intstr.IntOrString + var v4 pkg3_v1.PodTemplateSpec + var v5 time.Time + _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 + } +} + +func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = JobSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = JobStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = JobSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = JobStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceJob(([]Job)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceJob(([]Job)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceJob((*[]Job)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceJob((*[]Job)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Parallelism != nil + yyq2[1] = x.Completions != nil + yyq2[2] = x.ActiveDeadlineSeconds != nil + yyq2[3] = x.Selector != nil + yyq2[4] = x.ManualSelector != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Parallelism == nil { + r.EncodeNil() + } else { + yy4 := *x.Parallelism + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("parallelism")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Parallelism == nil { + r.EncodeNil() + } else { + yy6 := *x.Parallelism + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Completions == nil { + r.EncodeNil() + } else { + yy9 := *x.Completions + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("completions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Completions == nil { + r.EncodeNil() + } else { + yy11 := *x.Completions + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeInt(int64(yy11)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ActiveDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy14 := *x.ActiveDeadlineSeconds + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeInt(int64(yy14)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ActiveDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy16 := *x.ActiveDeadlineSeconds + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(yy16)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.ManualSelector == nil { + r.EncodeNil() + } else { + yy22 := *x.ManualSelector + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(yy22)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("manualSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ManualSelector == nil { + r.EncodeNil() + } else { + yy24 := *x.ManualSelector + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeBool(bool(yy24)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy27 := &x.Template + yy27.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy29 := &x.Template + yy29.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "parallelism": + if r.TryDecodeAsNil() { + if x.Parallelism != nil { + x.Parallelism = nil + } + } else { + if x.Parallelism == nil { + x.Parallelism = new(int32) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) + } + } + case "completions": + if r.TryDecodeAsNil() { + if x.Completions != nil { + x.Completions = nil + } + } else { + if x.Completions == nil { + x.Completions = new(int32) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) + } + } + case "activeDeadlineSeconds": + if r.TryDecodeAsNil() { + if x.ActiveDeadlineSeconds != nil { + x.ActiveDeadlineSeconds = nil + } + } else { + if x.ActiveDeadlineSeconds == nil { + x.ActiveDeadlineSeconds = new(int64) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "manualSelector": + if r.TryDecodeAsNil() { + if x.ManualSelector != nil { + x.ManualSelector = nil + } + } else { + if x.ManualSelector == nil { + x.ManualSelector = new(bool) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(x.ManualSelector)) = r.DecodeBool() + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = pkg3_v1.PodTemplateSpec{} + } else { + yyv14 := &x.Template + yyv14.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj15 int + var yyb15 bool + var yyhl15 bool = l >= 0 + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Parallelism != nil { + x.Parallelism = nil + } + } else { + if x.Parallelism == nil { + x.Parallelism = new(int32) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Completions != nil { + x.Completions = nil + } + } else { + if x.Completions == nil { + x.Completions = new(int32) + } + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ActiveDeadlineSeconds != nil { + x.ActiveDeadlineSeconds = nil + } + } else { + if x.ActiveDeadlineSeconds == nil { + x.ActiveDeadlineSeconds = new(int64) + } + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ManualSelector != nil { + x.ManualSelector = nil + } + } else { + if x.ManualSelector == nil { + x.ManualSelector = new(bool) + } + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*bool)(x.ManualSelector)) = r.DecodeBool() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = pkg3_v1.PodTemplateSpec{} + } else { + yyv26 := &x.Template + yyv26.CodecDecodeSelf(d) + } + for { + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj15-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Conditions) != 0 + yyq2[1] = x.StartTime != nil + yyq2[2] = x.CompletionTime != nil + yyq2[3] = x.Active != 0 + yyq2[4] = x.Succeeded != 0 + yyq2[5] = x.Failed != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.StartTime == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.StartTime) { + } else if yym7 { + z.EncBinaryMarshal(x.StartTime) + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(x.StartTime) + } else { + z.EncFallback(x.StartTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("startTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StartTime == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.StartTime) { + } else if yym8 { + z.EncBinaryMarshal(x.StartTime) + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(x.StartTime) + } else { + z.EncFallback(x.StartTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.CompletionTime == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { + } else if yym10 { + z.EncBinaryMarshal(x.CompletionTime) + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(x.CompletionTime) + } else { + z.EncFallback(x.CompletionTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("completionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CompletionTime == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { + } else if yym11 { + z.EncBinaryMarshal(x.CompletionTime) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(x.CompletionTime) + } else { + z.EncFallback(x.CompletionTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.Active)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("active")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.Active)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.Succeeded)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("succeeded")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.Succeeded)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.Failed)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("failed")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.Failed)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv4 := &x.Conditions + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceJobCondition((*[]JobCondition)(yyv4), d) + } + } + case "startTime": + if r.TryDecodeAsNil() { + if x.StartTime != nil { + x.StartTime = nil + } + } else { + if x.StartTime == nil { + x.StartTime = new(pkg1_v1.Time) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.StartTime) { + } else if yym7 { + z.DecBinaryUnmarshal(x.StartTime) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.StartTime) + } else { + z.DecFallback(x.StartTime, false) + } + } + case "completionTime": + if r.TryDecodeAsNil() { + if x.CompletionTime != nil { + x.CompletionTime = nil + } + } else { + if x.CompletionTime == nil { + x.CompletionTime = new(pkg1_v1.Time) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { + } else if yym9 { + z.DecBinaryUnmarshal(x.CompletionTime) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.CompletionTime) + } else { + z.DecFallback(x.CompletionTime, false) + } + } + case "active": + if r.TryDecodeAsNil() { + x.Active = 0 + } else { + yyv10 := &x.Active + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "succeeded": + if r.TryDecodeAsNil() { + x.Succeeded = 0 + } else { + yyv12 := &x.Succeeded + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(yyv12)) = int32(r.DecodeInt(32)) + } + } + case "failed": + if r.TryDecodeAsNil() { + x.Failed = 0 + } else { + yyv14 := &x.Failed + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv17 := &x.Conditions + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + h.decSliceJobCondition((*[]JobCondition)(yyv17), d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StartTime != nil { + x.StartTime = nil + } + } else { + if x.StartTime == nil { + x.StartTime = new(pkg1_v1.Time) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(x.StartTime) { + } else if yym20 { + z.DecBinaryUnmarshal(x.StartTime) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.StartTime) + } else { + z.DecFallback(x.StartTime, false) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CompletionTime != nil { + x.CompletionTime = nil + } + } else { + if x.CompletionTime == nil { + x.CompletionTime = new(pkg1_v1.Time) + } + yym22 := z.DecBinary() + _ = yym22 + if false { + } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { + } else if yym22 { + z.DecBinaryUnmarshal(x.CompletionTime) + } else if !yym22 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.CompletionTime) + } else { + z.DecFallback(x.CompletionTime, false) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Active = 0 + } else { + yyv23 := &x.Active + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Succeeded = 0 + } else { + yyv25 := &x.Succeeded + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Failed = 0 + } else { + yyv27 := &x.Failed + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(yyv27)) = int32(r.DecodeInt(32)) + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj16-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x JobConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *JobConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf7 := &x.Status + yysf7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf8 := &x.Status + yysf8.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastProbeTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastProbeTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) + } else { + z.EncFallback(yy17) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "lastProbeTime": + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg1_v1.Time{} + } else { + yyv6 := &x.LastProbeTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv10 := &x.Reason + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv12 := &x.Message + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv15 := &x.Type + yyv15.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv16 := &x.Status + yyv16.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg1_v1.Time{} + } else { + yyv17 := &x.LastProbeTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv19 := &x.LastTransitionTime + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if yym20 { + z.DecBinaryUnmarshal(yyv19) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv21 := &x.Reason + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv23 := &x.Message + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Job{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 880) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Job, yyrl1) + } + } else { + yyv1 = make([]Job, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Job{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Job{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Job{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Job{}) // var yyz1 Job + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Job{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Job{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []JobCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]JobCondition, yyrl1) + } + } else { + yyv1 = make([]JobCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = JobCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, JobCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = JobCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, JobCondition{}) // var yyz1 JobCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = JobCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []JobCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/types.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/types.go new file mode 100644 index 0000000000..734e6204d4 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/types.go @@ -0,0 +1,168 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api/v1" +) + +// +genclient=true + +// Job represents the configuration of a single job. +type Job struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is a structure defining the expected behavior of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// JobList is a collection of jobs. +type JobList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Job. + Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// JobSpec describes how the job execution will look like. +type JobSpec struct { + + // Parallelism specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + // More info: http://kubernetes.io/docs/user-guide/jobs + // +optional + Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` + + // Completions specifies the desired number of successfully finished pods the + // job should be run with. Setting to nil means that the success of any + // pod signals the success of all pods, and allows parallelism to have any positive + // value. Setting to 1 means that parallelism is limited to 1 and the success of that + // pod signals the success of the job. + // More info: http://kubernetes.io/docs/user-guide/jobs + // +optional + Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` + + // Optional duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer + // +optional + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` + + // Selector is a label query over pods that should match the pod count. + // Normally, the system sets this field for you. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` + + // ManualSelector controls generation of pod labels and pod selectors. + // Leave `manualSelector` unset unless you are certain what you are doing. + // When false or unset, the system pick labels unique to this job + // and appends those labels to the pod template. When true, + // the user is responsible for picking unique labels and specifying + // the selector. Failure to pick a unique label may cause this + // and other jobs to not function correctly. However, You may see + // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + // API. + // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md + // +optional + ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` + + // Template is the object that describes the pod that will be created when + // executing a job. + // More info: http://kubernetes.io/docs/user-guide/jobs + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` +} + +// JobStatus represents the current state of a Job. +type JobStatus struct { + + // Conditions represent the latest available observations of an object's current state. + // More info: http://kubernetes.io/docs/user-guide/jobs + // +optional + Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + + // StartTime represents time when the job was acknowledged by the Job Manager. + // It is not guaranteed to be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + // +optional + StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` + + // CompletionTime represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + // +optional + CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` + + // Active is the number of actively running pods. + // +optional + Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` + + // Succeeded is the number of pods which reached Phase Succeeded. + // +optional + Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` + + // Failed is the number of pods which reached Phase Failed. + // +optional + Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` +} + +type JobConditionType string + +// These are valid conditions of a job. +const ( + // JobComplete means the job has completed its execution. + JobComplete JobConditionType = "Complete" + // JobFailed means the job has failed its execution. + JobFailed JobConditionType = "Failed" +) + +// JobCondition describes current state of a job. +type JobCondition struct { + // Type of job condition, Complete or Failed. + Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // Last time the condition was checked. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transit from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // (brief) reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..3d224e703a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/types_swagger_doc_generated.go @@ -0,0 +1,93 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Job = map[string]string{ + "": "Job represents the configuration of a single job.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (Job) SwaggerDoc() map[string]string { + return map_Job +} + +var map_JobCondition = map[string]string{ + "": "JobCondition describes current state of a job.", + "type": "Type of job condition, Complete or Failed.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastProbeTime": "Last time the condition was checked.", + "lastTransitionTime": "Last time the condition transit from one status to another.", + "reason": "(brief) reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (JobCondition) SwaggerDoc() map[string]string { + return map_JobCondition +} + +var map_JobList = map[string]string{ + "": "JobList is a collection of jobs.", + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of Job.", +} + +func (JobList) SwaggerDoc() map[string]string { + return map_JobList +} + +var map_JobSpec = map[string]string{ + "": "JobSpec describes how the job execution will look like.", + "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://kubernetes.io/docs/user-guide/jobs", + "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://kubernetes.io/docs/user-guide/jobs", + "activeDeadlineSeconds": "Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", + "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "manualSelector": "ManualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md", + "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://kubernetes.io/docs/user-guide/jobs", +} + +func (JobSpec) SwaggerDoc() map[string]string { + return map_JobSpec +} + +var map_JobStatus = map[string]string{ + "": "JobStatus represents the current state of a Job.", + "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://kubernetes.io/docs/user-guide/jobs", + "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + "active": "Active is the number of actively running pods.", + "succeeded": "Succeeded is the number of pods which reached Phase Succeeded.", + "failed": "Failed is the number of pods which reached Phase Failed.", +} + +func (JobStatus) SwaggerDoc() map[string]string { + return map_JobStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.conversion.go new file mode 100644 index 0000000000..718037076f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.conversion.go @@ -0,0 +1,202 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/client-go/pkg/api" + api_v1 "k8s.io/client-go/pkg/api/v1" + batch "k8s.io/client-go/pkg/apis/batch" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_Job_To_batch_Job, + Convert_batch_Job_To_v1_Job, + Convert_v1_JobCondition_To_batch_JobCondition, + Convert_batch_JobCondition_To_v1_JobCondition, + Convert_v1_JobList_To_batch_JobList, + Convert_batch_JobList_To_v1_JobList, + Convert_v1_JobSpec_To_batch_JobSpec, + Convert_batch_JobSpec_To_v1_JobSpec, + Convert_v1_JobStatus_To_batch_JobStatus, + Convert_batch_JobStatus_To_v1_JobStatus, + ) +} + +func autoConvert_v1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_JobStatus_To_batch_JobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { + return autoConvert_v1_Job_To_batch_Job(in, out, s) +} + +func autoConvert_batch_Job_To_v1_Job(in *batch.Job, out *Job, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_batch_JobSpec_To_v1_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_batch_JobStatus_To_v1_JobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_batch_Job_To_v1_Job(in *batch.Job, out *Job, s conversion.Scope) error { + return autoConvert_batch_Job_To_v1_Job(in, out, s) +} + +func autoConvert_v1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { + out.Type = batch.JobConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { + return autoConvert_v1_JobCondition_To_batch_JobCondition(in, out, s) +} + +func autoConvert_batch_JobCondition_To_v1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { + out.Type = JobConditionType(in.Type) + out.Status = api_v1.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_batch_JobCondition_To_v1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { + return autoConvert_batch_JobCondition_To_v1_JobCondition(in, out, s) +} + +func autoConvert_v1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]batch.Job, len(*in)) + for i := range *in { + if err := Convert_v1_Job_To_batch_Job(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { + return autoConvert_v1_JobList_To_batch_JobList(in, out, s) +} + +func autoConvert_batch_JobList_To_v1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Job, len(*in)) + for i := range *in { + if err := Convert_batch_Job_To_v1_Job(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_batch_JobList_To_v1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { + return autoConvert_batch_JobList_To_v1_JobList(in, out, s) +} + +func autoConvert_v1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { + out.Parallelism = (*int32)(unsafe.Pointer(in.Parallelism)) + out.Completions = (*int32)(unsafe.Pointer(in.Completions)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + out.ManualSelector = (*bool)(unsafe.Pointer(in.ManualSelector)) + if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func autoConvert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { + out.Parallelism = (*int32)(unsafe.Pointer(in.Parallelism)) + out.Completions = (*int32)(unsafe.Pointer(in.Completions)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + out.ManualSelector = (*bool)(unsafe.Pointer(in.ManualSelector)) + if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { + out.Conditions = *(*[]batch.JobCondition)(unsafe.Pointer(&in.Conditions)) + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + out.CompletionTime = (*meta_v1.Time)(unsafe.Pointer(in.CompletionTime)) + out.Active = in.Active + out.Succeeded = in.Succeeded + out.Failed = in.Failed + return nil +} + +func Convert_v1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { + return autoConvert_v1_JobStatus_To_batch_JobStatus(in, out, s) +} + +func autoConvert_batch_JobStatus_To_v1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { + out.Conditions = *(*[]JobCondition)(unsafe.Pointer(&in.Conditions)) + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + out.CompletionTime = (*meta_v1.Time)(unsafe.Pointer(in.CompletionTime)) + out.Active = in.Active + out.Succeeded = in.Succeeded + out.Failed = in.Failed + return nil +} + +func Convert_batch_JobStatus_To_v1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { + return autoConvert_batch_JobStatus_To_v1_JobStatus(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..5e31d5964b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.deepcopy.go @@ -0,0 +1,162 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api_v1 "k8s.io/client-go/pkg/api/v1" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Job, InType: reflect.TypeOf(&Job{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_JobCondition, InType: reflect.TypeOf(&JobCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_JobList, InType: reflect.TypeOf(&JobList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_JobSpec, InType: reflect.TypeOf(&JobSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_JobStatus, InType: reflect.TypeOf(&JobStatus{})}, + ) +} + +func DeepCopy_v1_Job(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Job) + out := out.(*Job) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_JobSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_JobStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_JobCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobCondition) + out := out.(*JobCondition) + *out = *in + out.LastProbeTime = in.LastProbeTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1_JobList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobList) + out := out.(*JobList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Job, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Job(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_JobSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobSpec) + out := out.(*JobSpec) + *out = *in + if in.Parallelism != nil { + in, out := &in.Parallelism, &out.Parallelism + *out = new(int32) + **out = **in + } + if in.Completions != nil { + in, out := &in.Completions, &out.Completions + *out = new(int32) + **out = **in + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*meta_v1.LabelSelector) + } + } + if in.ManualSelector != nil { + in, out := &in.ManualSelector, &out.ManualSelector + *out = new(bool) + **out = **in + } + if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_JobStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobStatus) + out := out.(*JobStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]JobCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1_JobCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(meta_v1.Time) + **out = (*in).DeepCopy() + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = new(meta_v1.Time) + **out = (*in).DeepCopy() + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.defaults.go new file mode 100644 index 0000000000..417024f62d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.defaults.go @@ -0,0 +1,176 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + api_v1 "k8s.io/client-go/pkg/api/v1" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&Job{}, func(obj interface{}) { SetObjectDefaults_Job(obj.(*Job)) }) + scheme.AddTypeDefaultingFunc(&JobList{}, func(obj interface{}) { SetObjectDefaults_JobList(obj.(*JobList)) }) + return nil +} + +func SetObjectDefaults_Job(in *Job) { + SetDefaults_Job(in) + api_v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + api_v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + api_v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + api_v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + api_v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + api_v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + api_v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + api_v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + api_v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + api_v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + api_v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + api_v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + api_v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + api_v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + api_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + api_v1.SetDefaults_ResourceList(&a.Resources.Limits) + api_v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + api_v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + api_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + api_v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + api_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + api_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + api_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + api_v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + api_v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + api_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + api_v1.SetDefaults_ResourceList(&a.Resources.Limits) + api_v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + api_v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + api_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + api_v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + api_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + api_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + api_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_JobList(in *JobList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Job(a) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/conversion.go new file mode 100644 index 0000000000..2393fdca97 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/conversion.go @@ -0,0 +1,44 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + var err error + // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. + for _, k := range []string{"Job", "JobTemplate", "CronJob"} { + kind := k // don't close over range variables + err = scheme.AddFieldLabelConversionFunc("batch/v2alpha1", kind, + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace", "status.successful": + return label, value, nil + default: + return "", "", fmt.Errorf("field label %q not supported for %q", label, kind) + } + }) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/defaults.go new file mode 100644 index 0000000000..6da07cc7d2 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/defaults.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_CronJob, + ) +} + +func SetDefaults_CronJob(obj *CronJob) { + if obj.Spec.ConcurrencyPolicy == "" { + obj.Spec.ConcurrencyPolicy = AllowConcurrent + } + if obj.Spec.Suspend == nil { + obj.Spec.Suspend = new(bool) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/doc.go new file mode 100644 index 0000000000..a9fe60b1c7 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.pb.go new file mode 100644 index 0000000000..81dab99089 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.pb.go @@ -0,0 +1,1505 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v2alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto + + It has these top-level messages: + CronJob + CronJobList + CronJobSpec + CronJobStatus + JobTemplate + JobTemplateSpec +*/ +package v2alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *CronJob) Reset() { *m = CronJob{} } +func (*CronJob) ProtoMessage() {} +func (*CronJob) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *CronJobList) Reset() { *m = CronJobList{} } +func (*CronJobList) ProtoMessage() {} +func (*CronJobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } +func (*CronJobSpec) ProtoMessage() {} +func (*CronJobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } +func (*CronJobStatus) ProtoMessage() {} +func (*CronJobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *JobTemplate) Reset() { *m = JobTemplate{} } +func (*JobTemplate) ProtoMessage() {} +func (*JobTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } +func (*JobTemplateSpec) ProtoMessage() {} +func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func init() { + proto.RegisterType((*CronJob)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.CronJob") + proto.RegisterType((*CronJobList)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.CronJobList") + proto.RegisterType((*CronJobSpec)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.CronJobSpec") + proto.RegisterType((*CronJobStatus)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.CronJobStatus") + proto.RegisterType((*JobTemplate)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.JobTemplate") + proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.JobTemplateSpec") +} +func (m *CronJob) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CronJob) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *CronJobList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CronJobList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CronJobSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CronJobSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Schedule))) + i += copy(data[i:], m.Schedule) + if m.StartingDeadlineSeconds != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.StartingDeadlineSeconds)) + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ConcurrencyPolicy))) + i += copy(data[i:], m.ConcurrencyPolicy) + if m.Suspend != nil { + data[i] = 0x20 + i++ + if *m.Suspend { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.JobTemplate.Size())) + n5, err := m.JobTemplate.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.SuccessfulJobsHistoryLimit != nil { + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.SuccessfulJobsHistoryLimit)) + } + if m.FailedJobsHistoryLimit != nil { + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.FailedJobsHistoryLimit)) + } + return i, nil +} + +func (m *CronJobStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CronJobStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Active) > 0 { + for _, msg := range m.Active { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.LastScheduleTime != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastScheduleTime.Size())) + n6, err := m.LastScheduleTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *JobTemplate) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobTemplate) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n8, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + return i, nil +} + +func (m *JobTemplateSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobTemplateSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n10, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *CronJob) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CronJobList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CronJobSpec) Size() (n int) { + var l int + _ = l + l = len(m.Schedule) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartingDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.StartingDeadlineSeconds)) + } + l = len(m.ConcurrencyPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.Suspend != nil { + n += 2 + } + l = m.JobTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.SuccessfulJobsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.SuccessfulJobsHistoryLimit)) + } + if m.FailedJobsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.FailedJobsHistoryLimit)) + } + return n +} + +func (m *CronJobStatus) Size() (n int) { + var l int + _ = l + if len(m.Active) > 0 { + for _, e := range m.Active { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.LastScheduleTime != nil { + l = m.LastScheduleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *JobTemplate) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobTemplateSpec) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CronJob) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CronJob{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CronJobList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CronJobList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CronJob", "CronJob", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CronJobSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CronJobSpec{`, + `Schedule:` + fmt.Sprintf("%v", this.Schedule) + `,`, + `StartingDeadlineSeconds:` + valueToStringGenerated(this.StartingDeadlineSeconds) + `,`, + `ConcurrencyPolicy:` + fmt.Sprintf("%v", this.ConcurrencyPolicy) + `,`, + `Suspend:` + valueToStringGenerated(this.Suspend) + `,`, + `JobTemplate:` + strings.Replace(strings.Replace(this.JobTemplate.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, + `SuccessfulJobsHistoryLimit:` + valueToStringGenerated(this.SuccessfulJobsHistoryLimit) + `,`, + `FailedJobsHistoryLimit:` + valueToStringGenerated(this.FailedJobsHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *CronJobStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CronJobStatus{`, + `Active:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Active), "ObjectReference", "k8s_io_kubernetes_pkg_api_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *JobTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *JobTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "k8s_io_kubernetes_pkg_apis_batch_v1.JobSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CronJob) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJob: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJob: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronJobList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJobList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJobList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CronJob{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronJobSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJobSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schedule = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartingDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.StartingDeadlineSeconds = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConcurrencyPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConcurrencyPolicy = ConcurrencyPolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Suspend = &b + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobTemplate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.JobTemplate.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessfulJobsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SuccessfulJobsHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailedJobsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FailedJobsHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronJobStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJobStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Active = append(m.Active, k8s_io_kubernetes_pkg_api_v1.ObjectReference{}) + if err := m.Active[len(m.Active)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScheduleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScheduleTime == nil { + m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.LastScheduleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobTemplate) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobTemplateSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 799 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x4f, 0xe3, 0x46, + 0x18, 0xc7, 0xe3, 0x90, 0x37, 0x26, 0xa5, 0x05, 0xb7, 0x82, 0x28, 0x95, 0x9c, 0x28, 0x52, 0xa5, + 0x14, 0x81, 0x5d, 0x42, 0x85, 0x68, 0x6f, 0x35, 0x55, 0xd5, 0x22, 0xfa, 0x22, 0x07, 0xd4, 0xaa, + 0x42, 0x15, 0x63, 0xe7, 0x49, 0x32, 0xc4, 0x6f, 0xf5, 0x8c, 0xa3, 0xe6, 0xd6, 0x8f, 0xd0, 0x6f, + 0xd1, 0x6f, 0xb1, 0x97, 0xdd, 0x03, 0x47, 0x0e, 0x7b, 0xd8, 0xbd, 0x44, 0x8b, 0xf7, 0x5b, 0xec, + 0x69, 0xe5, 0x89, 0x13, 0x07, 0x1c, 0x2f, 0x61, 0x57, 0xe2, 0xe6, 0x19, 0x3f, 0xff, 0xdf, 0x3c, + 0xcf, 0xf3, 0x7f, 0x66, 0xd0, 0x37, 0x83, 0x43, 0x2a, 0x13, 0x47, 0x19, 0xf8, 0x3a, 0x78, 0x36, + 0x30, 0xa0, 0x8a, 0x3b, 0xe8, 0x29, 0xd8, 0x25, 0x54, 0xd1, 0x31, 0x33, 0xfa, 0xca, 0xb0, 0x85, + 0x4d, 0xb7, 0x8f, 0xf7, 0x94, 0x1e, 0xd8, 0xe0, 0x61, 0x06, 0x1d, 0xd9, 0xf5, 0x1c, 0xe6, 0x88, + 0x5f, 0x4e, 0xa4, 0x72, 0x2c, 0x95, 0xdd, 0x41, 0x4f, 0x0e, 0xa5, 0x32, 0x97, 0xca, 0x53, 0x69, + 0x75, 0xb7, 0x47, 0x58, 0xdf, 0xd7, 0x65, 0xc3, 0xb1, 0x94, 0x9e, 0xd3, 0x73, 0x14, 0x4e, 0xd0, + 0xfd, 0x2e, 0x5f, 0xf1, 0x05, 0xff, 0x9a, 0x90, 0xab, 0x5f, 0x47, 0x49, 0x61, 0x97, 0x58, 0xd8, + 0xe8, 0x13, 0x1b, 0xbc, 0x51, 0x9c, 0x96, 0x05, 0x0c, 0x2b, 0xc3, 0x44, 0x3e, 0x55, 0x25, 0x4d, + 0xe5, 0xf9, 0x36, 0x23, 0x16, 0x24, 0x04, 0x07, 0xf7, 0x09, 0xa8, 0xd1, 0x07, 0x0b, 0x27, 0x74, + 0xfb, 0x69, 0x3a, 0x9f, 0x11, 0x53, 0x21, 0x36, 0xa3, 0xcc, 0x4b, 0x88, 0xe6, 0x6a, 0xa2, 0xe0, + 0x0d, 0xc1, 0x8b, 0x0b, 0x82, 0x7f, 0xb0, 0xe5, 0x9a, 0xb0, 0xa8, 0xa6, 0x9d, 0x54, 0x7b, 0x16, + 0x45, 0xef, 0xdf, 0x6f, 0x66, 0x42, 0xd4, 0xf8, 0x3f, 0x8b, 0x8a, 0x47, 0x9e, 0x63, 0x1f, 0x3b, + 0xba, 0x78, 0x81, 0x4a, 0x61, 0x77, 0x3b, 0x98, 0xe1, 0x8a, 0x50, 0x17, 0x9a, 0xe5, 0xd6, 0x57, + 0x72, 0xe4, 0xf2, 0x7c, 0xb1, 0xb1, 0xcf, 0x61, 0xb4, 0x3c, 0xdc, 0x93, 0x7f, 0xd5, 0x2f, 0xc1, + 0x60, 0x3f, 0x03, 0xc3, 0xaa, 0x78, 0x35, 0xae, 0x65, 0x82, 0x71, 0x0d, 0xc5, 0x7b, 0xda, 0x8c, + 0x2a, 0xfe, 0x81, 0x72, 0xd4, 0x05, 0xa3, 0x92, 0xe5, 0xf4, 0x03, 0x79, 0xe9, 0x19, 0x92, 0xa3, + 0x1c, 0xdb, 0x2e, 0x18, 0xea, 0x47, 0xd1, 0x19, 0xb9, 0x70, 0xa5, 0x71, 0xa2, 0x78, 0x81, 0x0a, + 0x94, 0x61, 0xe6, 0xd3, 0xca, 0x0a, 0x67, 0x1f, 0xbe, 0x07, 0x9b, 0xeb, 0xd5, 0x8f, 0x23, 0x7a, + 0x61, 0xb2, 0xd6, 0x22, 0x6e, 0xe3, 0x99, 0x80, 0xca, 0x51, 0xe4, 0x09, 0xa1, 0x4c, 0x3c, 0x4f, + 0x74, 0x4b, 0x5e, 0xae, 0x5b, 0xa1, 0x9a, 0xf7, 0x6a, 0x3d, 0x3a, 0xa9, 0x34, 0xdd, 0x99, 0xeb, + 0xd4, 0xef, 0x28, 0x4f, 0x18, 0x58, 0xb4, 0x92, 0xad, 0xaf, 0x34, 0xcb, 0xad, 0xd6, 0xc3, 0xcb, + 0x51, 0xd7, 0x22, 0x7c, 0xfe, 0xa7, 0x10, 0xa4, 0x4d, 0x78, 0x8d, 0x27, 0xb9, 0x59, 0x19, 0x61, + 0xfb, 0xc4, 0x1d, 0x54, 0x0a, 0x07, 0xbd, 0xe3, 0x9b, 0xc0, 0xcb, 0x58, 0x8d, 0xd3, 0x6a, 0x47, + 0xfb, 0xda, 0x2c, 0x42, 0x3c, 0x43, 0x5b, 0x94, 0x61, 0x8f, 0x11, 0xbb, 0xf7, 0x3d, 0xe0, 0x8e, + 0x49, 0x6c, 0x68, 0x83, 0xe1, 0xd8, 0x1d, 0xca, 0x3d, 0x5d, 0x51, 0x3f, 0x0f, 0xc6, 0xb5, 0xad, + 0xf6, 0xe2, 0x10, 0x2d, 0x4d, 0x2b, 0x9e, 0xa3, 0x0d, 0xc3, 0xb1, 0x0d, 0xdf, 0xf3, 0xc0, 0x36, + 0x46, 0xbf, 0x39, 0x26, 0x31, 0x46, 0xdc, 0xc8, 0x55, 0x55, 0x8e, 0xb2, 0xd9, 0x38, 0xba, 0x1b, + 0xf0, 0x66, 0xd1, 0xa6, 0x96, 0x04, 0x89, 0x5f, 0xa0, 0x22, 0xf5, 0xa9, 0x0b, 0x76, 0xa7, 0x92, + 0xab, 0x0b, 0xcd, 0x92, 0x5a, 0x0e, 0xc6, 0xb5, 0x62, 0x7b, 0xb2, 0xa5, 0x4d, 0xff, 0x89, 0x7f, + 0xa3, 0xf2, 0xa5, 0xa3, 0x9f, 0x82, 0xe5, 0x9a, 0x98, 0x41, 0x25, 0xcf, 0x3d, 0xfd, 0xf6, 0x01, + 0x8d, 0x3f, 0x8e, 0xd5, 0x7c, 0x4e, 0x3f, 0x8d, 0x52, 0x2f, 0xcf, 0xfd, 0xd0, 0xe6, 0xcf, 0x10, + 0xff, 0x42, 0x55, 0xea, 0x1b, 0x06, 0x50, 0xda, 0xf5, 0xcd, 0x63, 0x47, 0xa7, 0x3f, 0x12, 0xca, + 0x1c, 0x6f, 0x74, 0x42, 0x2c, 0xc2, 0x2a, 0x85, 0xba, 0xd0, 0xcc, 0xab, 0x52, 0x30, 0xae, 0x55, + 0xdb, 0xa9, 0x51, 0xda, 0x3b, 0x08, 0xa2, 0x86, 0x36, 0xbb, 0x98, 0x98, 0xd0, 0x49, 0xb0, 0x8b, + 0x9c, 0x5d, 0x0d, 0xc6, 0xb5, 0xcd, 0x1f, 0x16, 0x46, 0x68, 0x29, 0xca, 0xc6, 0x73, 0x01, 0xad, + 0xdd, 0xba, 0x31, 0xe2, 0x19, 0x2a, 0x60, 0x83, 0x91, 0x61, 0x38, 0x40, 0xe1, 0xb0, 0xee, 0xa6, + 0xf7, 0x2c, 0x7e, 0x2d, 0x34, 0xe8, 0x42, 0x68, 0x12, 0xc4, 0x17, 0xee, 0x3b, 0x0e, 0xd1, 0x22, + 0x98, 0x68, 0xa2, 0x75, 0x13, 0x53, 0x36, 0x9d, 0xc2, 0x53, 0x62, 0x01, 0xf7, 0xaf, 0xdc, 0xda, + 0x5e, 0xee, 0xa2, 0x85, 0x0a, 0xf5, 0xb3, 0x60, 0x5c, 0x5b, 0x3f, 0xb9, 0xc3, 0xd1, 0x12, 0xe4, + 0xc6, 0x4b, 0x01, 0xcd, 0xfb, 0xf4, 0x08, 0x8f, 0x61, 0x1f, 0x95, 0xd8, 0x74, 0xd8, 0xb2, 0x1f, + 0x3c, 0x6c, 0xb3, 0x5b, 0x3b, 0x9b, 0xb4, 0x19, 0xbd, 0xf1, 0x54, 0x40, 0x9f, 0xdc, 0x89, 0x7f, + 0x84, 0xfa, 0x7e, 0xb9, 0xf5, 0xd8, 0xef, 0x2c, 0x51, 0x1b, 0xaf, 0x2a, 0xed, 0x89, 0x57, 0xb7, + 0xaf, 0x6e, 0xa4, 0xcc, 0xf5, 0x8d, 0x94, 0x79, 0x71, 0x23, 0x65, 0xfe, 0x0d, 0x24, 0xe1, 0x2a, + 0x90, 0x84, 0xeb, 0x40, 0x12, 0x5e, 0x05, 0x92, 0xf0, 0xdf, 0x6b, 0x29, 0xf3, 0x67, 0x69, 0xda, + 0x9d, 0xb7, 0x01, 0x00, 0x00, 0xff, 0xff, 0x32, 0x5e, 0xac, 0x56, 0xd9, 0x08, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.proto new file mode 100644 index 0000000000..4f51d6161a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.proto @@ -0,0 +1,134 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.batch.v2alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v2alpha1"; + +// CronJob represents the configuration of a single cron job. +message CronJob { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is a structure defining the expected behavior of a job, including the schedule. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional CronJobSpec spec = 2; + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional CronJobStatus status = 3; +} + +// CronJobList is a collection of cron jobs. +message CronJobList { + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of CronJob. + repeated CronJob items = 2; +} + +// CronJobSpec describes how the job execution will look like and when it will actually run. +message CronJobSpec { + // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + optional string schedule = 1; + + // Optional deadline in seconds for starting the job if it misses scheduled + // time for any reason. Missed jobs executions will be counted as failed ones. + // +optional + optional int64 startingDeadlineSeconds = 2; + + // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + // +optional + optional string concurrencyPolicy = 3; + + // Suspend flag tells the controller to suspend subsequent executions, it does + // not apply to already started executions. Defaults to false. + // +optional + optional bool suspend = 4; + + // JobTemplate is the object that describes the job that will be created when + // executing a CronJob. + optional JobTemplateSpec jobTemplate = 5; + + // The number of successful finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + optional int32 successfulJobsHistoryLimit = 6; + + // The number of failed finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + optional int32 failedJobsHistoryLimit = 7; +} + +// CronJobStatus represents the current state of a cron job. +message CronJobStatus { + // Active holds pointers to currently running jobs. + // +optional + repeated k8s.io.kubernetes.pkg.api.v1.ObjectReference active = 1; + + // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; +} + +// JobTemplate describes a template for creating copies of a predefined pod. +message JobTemplate { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Template defines jobs that will be created from this template + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional JobTemplateSpec template = 2; +} + +// JobTemplateSpec describes the data a Job should have when created from a template +message JobTemplateSpec { + // Standard object's metadata of the jobs created from this template. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional k8s.io.kubernetes.pkg.apis.batch.v1.JobSpec spec = 2; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/register.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/register.go new file mode 100644 index 0000000000..5286ca4a08 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "batch" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &JobTemplate{}, + &CronJob{}, + &CronJobList{}, + ) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJob"), &CronJob{}) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJobList"), &CronJobList{}) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.generated.go new file mode 100644 index 0000000000..8dd93175ff --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.generated.go @@ -0,0 +1,2525 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v2alpha1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg5_resource "k8s.io/apimachinery/pkg/api/resource" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + pkg6_intstr "k8s.io/apimachinery/pkg/util/intstr" + pkg4_v1 "k8s.io/client-go/pkg/api/v1" + pkg3_v1 "k8s.io/client-go/pkg/apis/batch/v1" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg5_resource.Quantity + var v1 pkg1_v1.TypeMeta + var v2 pkg2_types.UID + var v3 pkg6_intstr.IntOrString + var v4 pkg4_v1.PodTemplateSpec + var v5 pkg3_v1.JobSpec + var v6 time.Time + _, _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5, v6 + } +} + +func (x *JobTemplate) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Template + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Template + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobTemplate) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = JobTemplateSpec{} + } else { + yyv10 := &x.Template + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = JobTemplateSpec{} + } else { + yyv18 := &x.Template + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *JobTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = pkg3_v1.JobSpec{} + } else { + yyv6 := &x.Spec + yyv6.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = pkg3_v1.JobSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CronJob) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CronJob) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CronJob) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = CronJobSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = CronJobStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CronJob) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = CronJobSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = CronJobStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CronJobList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceCronJob(([]CronJob)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceCronJob(([]CronJob)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CronJobList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CronJobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceCronJob((*[]CronJob)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CronJobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceCronJob((*[]CronJob)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CronJobSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.StartingDeadlineSeconds != nil + yyq2[2] = x.ConcurrencyPolicy != "" + yyq2[3] = x.Suspend != nil + yyq2[5] = x.SuccessfulJobsHistoryLimit != nil + yyq2[6] = x.FailedJobsHistoryLimit != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Schedule)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("schedule")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Schedule)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.StartingDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy7 := *x.StartingDeadlineSeconds + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("startingDeadlineSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StartingDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy9 := *x.StartingDeadlineSeconds + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + x.ConcurrencyPolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("concurrencyPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.ConcurrencyPolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Suspend == nil { + r.EncodeNil() + } else { + yy15 := *x.Suspend + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(yy15)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("suspend")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Suspend == nil { + r.EncodeNil() + } else { + yy17 := *x.Suspend + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeBool(bool(yy17)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy20 := &x.JobTemplate + yy20.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("jobTemplate")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.JobTemplate + yy22.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.SuccessfulJobsHistoryLimit == nil { + r.EncodeNil() + } else { + yy25 := *x.SuccessfulJobsHistoryLimit + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeInt(int64(yy25)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("successfulJobsHistoryLimit")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SuccessfulJobsHistoryLimit == nil { + r.EncodeNil() + } else { + yy27 := *x.SuccessfulJobsHistoryLimit + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeInt(int64(yy27)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.FailedJobsHistoryLimit == nil { + r.EncodeNil() + } else { + yy30 := *x.FailedJobsHistoryLimit + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeInt(int64(yy30)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("failedJobsHistoryLimit")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FailedJobsHistoryLimit == nil { + r.EncodeNil() + } else { + yy32 := *x.FailedJobsHistoryLimit + yym33 := z.EncBinary() + _ = yym33 + if false { + } else { + r.EncodeInt(int64(yy32)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CronJobSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CronJobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "schedule": + if r.TryDecodeAsNil() { + x.Schedule = "" + } else { + yyv4 := &x.Schedule + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "startingDeadlineSeconds": + if r.TryDecodeAsNil() { + if x.StartingDeadlineSeconds != nil { + x.StartingDeadlineSeconds = nil + } + } else { + if x.StartingDeadlineSeconds == nil { + x.StartingDeadlineSeconds = new(int64) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + case "concurrencyPolicy": + if r.TryDecodeAsNil() { + x.ConcurrencyPolicy = "" + } else { + yyv8 := &x.ConcurrencyPolicy + yyv8.CodecDecodeSelf(d) + } + case "suspend": + if r.TryDecodeAsNil() { + if x.Suspend != nil { + x.Suspend = nil + } + } else { + if x.Suspend == nil { + x.Suspend = new(bool) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*bool)(x.Suspend)) = r.DecodeBool() + } + } + case "jobTemplate": + if r.TryDecodeAsNil() { + x.JobTemplate = JobTemplateSpec{} + } else { + yyv11 := &x.JobTemplate + yyv11.CodecDecodeSelf(d) + } + case "successfulJobsHistoryLimit": + if r.TryDecodeAsNil() { + if x.SuccessfulJobsHistoryLimit != nil { + x.SuccessfulJobsHistoryLimit = nil + } + } else { + if x.SuccessfulJobsHistoryLimit == nil { + x.SuccessfulJobsHistoryLimit = new(int32) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(x.SuccessfulJobsHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + case "failedJobsHistoryLimit": + if r.TryDecodeAsNil() { + if x.FailedJobsHistoryLimit != nil { + x.FailedJobsHistoryLimit = nil + } + } else { + if x.FailedJobsHistoryLimit == nil { + x.FailedJobsHistoryLimit = new(int32) + } + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(x.FailedJobsHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CronJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Schedule = "" + } else { + yyv17 := &x.Schedule + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StartingDeadlineSeconds != nil { + x.StartingDeadlineSeconds = nil + } + } else { + if x.StartingDeadlineSeconds == nil { + x.StartingDeadlineSeconds = new(int64) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ConcurrencyPolicy = "" + } else { + yyv21 := &x.ConcurrencyPolicy + yyv21.CodecDecodeSelf(d) + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Suspend != nil { + x.Suspend = nil + } + } else { + if x.Suspend == nil { + x.Suspend = new(bool) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*bool)(x.Suspend)) = r.DecodeBool() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.JobTemplate = JobTemplateSpec{} + } else { + yyv24 := &x.JobTemplate + yyv24.CodecDecodeSelf(d) + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SuccessfulJobsHistoryLimit != nil { + x.SuccessfulJobsHistoryLimit = nil + } + } else { + if x.SuccessfulJobsHistoryLimit == nil { + x.SuccessfulJobsHistoryLimit = new(int32) + } + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(x.SuccessfulJobsHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FailedJobsHistoryLimit != nil { + x.FailedJobsHistoryLimit = nil + } + } else { + if x.FailedJobsHistoryLimit == nil { + x.FailedJobsHistoryLimit = new(int32) + } + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(x.FailedJobsHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj16-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ConcurrencyPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ConcurrencyPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *CronJobStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Active) != 0 + yyq2[1] = x.LastScheduleTime != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Active == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicev1_ObjectReference(([]pkg4_v1.ObjectReference)(x.Active), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("active")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Active == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicev1_ObjectReference(([]pkg4_v1.ObjectReference)(x.Active), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.LastScheduleTime == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) { + } else if yym7 { + z.EncBinaryMarshal(x.LastScheduleTime) + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScheduleTime) + } else { + z.EncFallback(x.LastScheduleTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastScheduleTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LastScheduleTime == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) { + } else if yym8 { + z.EncBinaryMarshal(x.LastScheduleTime) + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScheduleTime) + } else { + z.EncFallback(x.LastScheduleTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CronJobStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CronJobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "active": + if r.TryDecodeAsNil() { + x.Active = nil + } else { + yyv4 := &x.Active + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicev1_ObjectReference((*[]pkg4_v1.ObjectReference)(yyv4), d) + } + } + case "lastScheduleTime": + if r.TryDecodeAsNil() { + if x.LastScheduleTime != nil { + x.LastScheduleTime = nil + } + } else { + if x.LastScheduleTime == nil { + x.LastScheduleTime = new(pkg1_v1.Time) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) { + } else if yym7 { + z.DecBinaryUnmarshal(x.LastScheduleTime) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScheduleTime) + } else { + z.DecFallback(x.LastScheduleTime, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CronJobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Active = nil + } else { + yyv9 := &x.Active + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicev1_ObjectReference((*[]pkg4_v1.ObjectReference)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LastScheduleTime != nil { + x.LastScheduleTime = nil + } + } else { + if x.LastScheduleTime == nil { + x.LastScheduleTime = new(pkg1_v1.Time) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) { + } else if yym12 { + z.DecBinaryUnmarshal(x.LastScheduleTime) + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScheduleTime) + } else { + z.DecFallback(x.LastScheduleTime, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceCronJob(v []CronJob, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCronJob(v *[]CronJob, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []CronJob{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 1144) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]CronJob, yyrl1) + } + } else { + yyv1 = make([]CronJob, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CronJob{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, CronJob{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CronJob{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, CronJob{}) // var yyz1 CronJob + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = CronJob{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []CronJob{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicev1_ObjectReference(v []pkg4_v1.ObjectReference, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicev1_ObjectReference(v *[]pkg4_v1.ObjectReference, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg4_v1.ObjectReference{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]pkg4_v1.ObjectReference, yyrl1) + } + } else { + yyv1 = make([]pkg4_v1.ObjectReference, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg4_v1.ObjectReference{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, pkg4_v1.ObjectReference{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg4_v1.ObjectReference{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, pkg4_v1.ObjectReference{}) // var yyz1 pkg4_v1.ObjectReference + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg4_v1.ObjectReference{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg4_v1.ObjectReference{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.go new file mode 100644 index 0000000000..67f1c95e40 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.go @@ -0,0 +1,147 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api/v1" + batchv1 "k8s.io/client-go/pkg/apis/batch/v1" +) + +// JobTemplate describes a template for creating copies of a predefined pod. +type JobTemplate struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Template defines jobs that will be created from this template + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` +} + +// JobTemplateSpec describes the data a Job should have when created from a template +type JobTemplateSpec struct { + // Standard object's metadata of the jobs created from this template. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +genclient=true + +// CronJob represents the configuration of a single cron job. +type CronJob struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is a structure defining the expected behavior of a job, including the schedule. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// CronJobList is a collection of cron jobs. +type CronJobList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of CronJob. + Items []CronJob `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// CronJobSpec describes how the job execution will look like and when it will actually run. +type CronJobSpec struct { + + // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"` + + // Optional deadline in seconds for starting the job if it misses scheduled + // time for any reason. Missed jobs executions will be counted as failed ones. + // +optional + StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` + + // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + // +optional + ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` + + // Suspend flag tells the controller to suspend subsequent executions, it does + // not apply to already started executions. Defaults to false. + // +optional + Suspend *bool `json:"suspend,omitempty" protobuf:"varint,4,opt,name=suspend"` + + // JobTemplate is the object that describes the job that will be created when + // executing a CronJob. + JobTemplate JobTemplateSpec `json:"jobTemplate" protobuf:"bytes,5,opt,name=jobTemplate"` + + // The number of successful finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty" protobuf:"varint,6,opt,name=successfulJobsHistoryLimit"` + + // The number of failed finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty" protobuf:"varint,7,opt,name=failedJobsHistoryLimit"` +} + +// ConcurrencyPolicy describes how the job will be handled. +// Only one of the following concurrent policies may be specified. +// If none of the following policies is specified, the default one +// is AllowConcurrent. +type ConcurrencyPolicy string + +const ( + // AllowConcurrent allows CronJobs to run concurrently. + AllowConcurrent ConcurrencyPolicy = "Allow" + + // ForbidConcurrent forbids concurrent runs, skipping next run if previous + // hasn't finished yet. + ForbidConcurrent ConcurrencyPolicy = "Forbid" + + // ReplaceConcurrent cancels currently running job and replaces it with a new one. + ReplaceConcurrent ConcurrencyPolicy = "Replace" +) + +// CronJobStatus represents the current state of a cron job. +type CronJobStatus struct { + // Active holds pointers to currently running jobs. + // +optional + Active []v1.ObjectReference `json:"active,omitempty" protobuf:"bytes,1,rep,name=active"` + + // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. + // +optional + LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty" protobuf:"bytes,4,opt,name=lastScheduleTime"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..c0b53b8e02 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go @@ -0,0 +1,96 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_CronJob = map[string]string{ + "": "CronJob represents the configuration of a single cron job.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec is a structure defining the expected behavior of a job, including the schedule. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (CronJob) SwaggerDoc() map[string]string { + return map_CronJob +} + +var map_CronJobList = map[string]string{ + "": "CronJobList is a collection of cron jobs.", + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of CronJob.", +} + +func (CronJobList) SwaggerDoc() map[string]string { + return map_CronJobList +} + +var map_CronJobSpec = map[string]string{ + "": "CronJobSpec describes how the job execution will look like and when it will actually run.", + "schedule": "Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", + "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", + "concurrencyPolicy": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.", + "suspend": "Suspend flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", + "jobTemplate": "JobTemplate is the object that describes the job that will be created when executing a CronJob.", + "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.", + "failedJobsHistoryLimit": "The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.", +} + +func (CronJobSpec) SwaggerDoc() map[string]string { + return map_CronJobSpec +} + +var map_CronJobStatus = map[string]string{ + "": "CronJobStatus represents the current state of a cron job.", + "active": "Active holds pointers to currently running jobs.", + "lastScheduleTime": "LastScheduleTime keeps information of when was the last time the job was successfully scheduled.", +} + +func (CronJobStatus) SwaggerDoc() map[string]string { + return map_CronJobStatus +} + +var map_JobTemplate = map[string]string{ + "": "JobTemplate describes a template for creating copies of a predefined pod.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "template": "Template defines jobs that will be created from this template http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (JobTemplate) SwaggerDoc() map[string]string { + return map_JobTemplate +} + +var map_JobTemplateSpec = map[string]string{ + "": "JobTemplateSpec describes the data a Job should have when created from a template", + "metadata": "Standard object's metadata of the jobs created from this template. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (JobTemplateSpec) SwaggerDoc() map[string]string { + return map_JobTemplateSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.conversion.go new file mode 100644 index 0000000000..d3d04d3f00 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.conversion.go @@ -0,0 +1,227 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v2alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/client-go/pkg/api" + api_v1 "k8s.io/client-go/pkg/api/v1" + batch "k8s.io/client-go/pkg/apis/batch" + batch_v1 "k8s.io/client-go/pkg/apis/batch/v1" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v2alpha1_CronJob_To_batch_CronJob, + Convert_batch_CronJob_To_v2alpha1_CronJob, + Convert_v2alpha1_CronJobList_To_batch_CronJobList, + Convert_batch_CronJobList_To_v2alpha1_CronJobList, + Convert_v2alpha1_CronJobSpec_To_batch_CronJobSpec, + Convert_batch_CronJobSpec_To_v2alpha1_CronJobSpec, + Convert_v2alpha1_CronJobStatus_To_batch_CronJobStatus, + Convert_batch_CronJobStatus_To_v2alpha1_CronJobStatus, + Convert_v2alpha1_JobTemplate_To_batch_JobTemplate, + Convert_batch_JobTemplate_To_v2alpha1_JobTemplate, + Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec, + Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec, + ) +} + +func autoConvert_v2alpha1_CronJob_To_batch_CronJob(in *CronJob, out *batch.CronJob, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v2alpha1_CronJobSpec_To_batch_CronJobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v2alpha1_CronJobStatus_To_batch_CronJobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_CronJob_To_batch_CronJob(in *CronJob, out *batch.CronJob, s conversion.Scope) error { + return autoConvert_v2alpha1_CronJob_To_batch_CronJob(in, out, s) +} + +func autoConvert_batch_CronJob_To_v2alpha1_CronJob(in *batch.CronJob, out *CronJob, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_batch_CronJobSpec_To_v2alpha1_CronJobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_batch_CronJobStatus_To_v2alpha1_CronJobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_batch_CronJob_To_v2alpha1_CronJob(in *batch.CronJob, out *CronJob, s conversion.Scope) error { + return autoConvert_batch_CronJob_To_v2alpha1_CronJob(in, out, s) +} + +func autoConvert_v2alpha1_CronJobList_To_batch_CronJobList(in *CronJobList, out *batch.CronJobList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]batch.CronJob, len(*in)) + for i := range *in { + if err := Convert_v2alpha1_CronJob_To_batch_CronJob(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v2alpha1_CronJobList_To_batch_CronJobList(in *CronJobList, out *batch.CronJobList, s conversion.Scope) error { + return autoConvert_v2alpha1_CronJobList_To_batch_CronJobList(in, out, s) +} + +func autoConvert_batch_CronJobList_To_v2alpha1_CronJobList(in *batch.CronJobList, out *CronJobList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CronJob, len(*in)) + for i := range *in { + if err := Convert_batch_CronJob_To_v2alpha1_CronJob(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_batch_CronJobList_To_v2alpha1_CronJobList(in *batch.CronJobList, out *CronJobList, s conversion.Scope) error { + return autoConvert_batch_CronJobList_To_v2alpha1_CronJobList(in, out, s) +} + +func autoConvert_v2alpha1_CronJobSpec_To_batch_CronJobSpec(in *CronJobSpec, out *batch.CronJobSpec, s conversion.Scope) error { + out.Schedule = in.Schedule + out.StartingDeadlineSeconds = (*int64)(unsafe.Pointer(in.StartingDeadlineSeconds)) + out.ConcurrencyPolicy = batch.ConcurrencyPolicy(in.ConcurrencyPolicy) + out.Suspend = (*bool)(unsafe.Pointer(in.Suspend)) + if err := Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil { + return err + } + out.SuccessfulJobsHistoryLimit = (*int32)(unsafe.Pointer(in.SuccessfulJobsHistoryLimit)) + out.FailedJobsHistoryLimit = (*int32)(unsafe.Pointer(in.FailedJobsHistoryLimit)) + return nil +} + +func Convert_v2alpha1_CronJobSpec_To_batch_CronJobSpec(in *CronJobSpec, out *batch.CronJobSpec, s conversion.Scope) error { + return autoConvert_v2alpha1_CronJobSpec_To_batch_CronJobSpec(in, out, s) +} + +func autoConvert_batch_CronJobSpec_To_v2alpha1_CronJobSpec(in *batch.CronJobSpec, out *CronJobSpec, s conversion.Scope) error { + out.Schedule = in.Schedule + out.StartingDeadlineSeconds = (*int64)(unsafe.Pointer(in.StartingDeadlineSeconds)) + out.ConcurrencyPolicy = ConcurrencyPolicy(in.ConcurrencyPolicy) + out.Suspend = (*bool)(unsafe.Pointer(in.Suspend)) + if err := Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil { + return err + } + out.SuccessfulJobsHistoryLimit = (*int32)(unsafe.Pointer(in.SuccessfulJobsHistoryLimit)) + out.FailedJobsHistoryLimit = (*int32)(unsafe.Pointer(in.FailedJobsHistoryLimit)) + return nil +} + +func Convert_batch_CronJobSpec_To_v2alpha1_CronJobSpec(in *batch.CronJobSpec, out *CronJobSpec, s conversion.Scope) error { + return autoConvert_batch_CronJobSpec_To_v2alpha1_CronJobSpec(in, out, s) +} + +func autoConvert_v2alpha1_CronJobStatus_To_batch_CronJobStatus(in *CronJobStatus, out *batch.CronJobStatus, s conversion.Scope) error { + out.Active = *(*[]api.ObjectReference)(unsafe.Pointer(&in.Active)) + out.LastScheduleTime = (*v1.Time)(unsafe.Pointer(in.LastScheduleTime)) + return nil +} + +func Convert_v2alpha1_CronJobStatus_To_batch_CronJobStatus(in *CronJobStatus, out *batch.CronJobStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_CronJobStatus_To_batch_CronJobStatus(in, out, s) +} + +func autoConvert_batch_CronJobStatus_To_v2alpha1_CronJobStatus(in *batch.CronJobStatus, out *CronJobStatus, s conversion.Scope) error { + out.Active = *(*[]api_v1.ObjectReference)(unsafe.Pointer(&in.Active)) + out.LastScheduleTime = (*v1.Time)(unsafe.Pointer(in.LastScheduleTime)) + return nil +} + +func Convert_batch_CronJobStatus_To_v2alpha1_CronJobStatus(in *batch.CronJobStatus, out *CronJobStatus, s conversion.Scope) error { + return autoConvert_batch_CronJobStatus_To_v2alpha1_CronJobStatus(in, out, s) +} + +func autoConvert_v2alpha1_JobTemplate_To_batch_JobTemplate(in *JobTemplate, out *batch.JobTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_JobTemplate_To_batch_JobTemplate(in *JobTemplate, out *batch.JobTemplate, s conversion.Scope) error { + return autoConvert_v2alpha1_JobTemplate_To_batch_JobTemplate(in, out, s) +} + +func autoConvert_batch_JobTemplate_To_v2alpha1_JobTemplate(in *batch.JobTemplate, out *JobTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_batch_JobTemplate_To_v2alpha1_JobTemplate(in *batch.JobTemplate, out *JobTemplate, s conversion.Scope) error { + return autoConvert_batch_JobTemplate_To_v2alpha1_JobTemplate(in, out, s) +} + +func autoConvert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in *JobTemplateSpec, out *batch.JobTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := batch_v1.Convert_v1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in *JobTemplateSpec, out *batch.JobTemplateSpec, s conversion.Scope) error { + return autoConvert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in, out, s) +} + +func autoConvert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in *batch.JobTemplateSpec, out *JobTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := batch_v1.Convert_batch_JobSpec_To_v1_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in *batch.JobTemplateSpec, out *JobTemplateSpec, s conversion.Scope) error { + return autoConvert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..8c9010d9f2 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.deepcopy.go @@ -0,0 +1,170 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v2alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api_v1 "k8s.io/client-go/pkg/api/v1" + batch_v1 "k8s.io/client-go/pkg/apis/batch/v1" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_CronJob, InType: reflect.TypeOf(&CronJob{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_CronJobList, InType: reflect.TypeOf(&CronJobList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_CronJobSpec, InType: reflect.TypeOf(&CronJobSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_CronJobStatus, InType: reflect.TypeOf(&CronJobStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_JobTemplate, InType: reflect.TypeOf(&JobTemplate{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_JobTemplateSpec, InType: reflect.TypeOf(&JobTemplateSpec{})}, + ) +} + +func DeepCopy_v2alpha1_CronJob(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CronJob) + out := out.(*CronJob) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v2alpha1_CronJobSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v2alpha1_CronJobStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v2alpha1_CronJobList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CronJobList) + out := out.(*CronJobList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CronJob, len(*in)) + for i := range *in { + if err := DeepCopy_v2alpha1_CronJob(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v2alpha1_CronJobSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CronJobSpec) + out := out.(*CronJobSpec) + *out = *in + if in.StartingDeadlineSeconds != nil { + in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.Suspend != nil { + in, out := &in.Suspend, &out.Suspend + *out = new(bool) + **out = **in + } + if err := DeepCopy_v2alpha1_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, c); err != nil { + return err + } + if in.SuccessfulJobsHistoryLimit != nil { + in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit + *out = new(int32) + **out = **in + } + if in.FailedJobsHistoryLimit != nil { + in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v2alpha1_CronJobStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CronJobStatus) + out := out.(*CronJobStatus) + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = make([]api_v1.ObjectReference, len(*in)) + copy(*out, *in) + } + if in.LastScheduleTime != nil { + in, out := &in.LastScheduleTime, &out.LastScheduleTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + return nil + } +} + +func DeepCopy_v2alpha1_JobTemplate(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobTemplate) + out := out.(*JobTemplate) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v2alpha1_JobTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v2alpha1_JobTemplateSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobTemplateSpec) + out := out.(*JobTemplateSpec) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := batch_v1.DeepCopy_v1_JobSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.defaults.go new file mode 100644 index 0000000000..1c0bd0ee0a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.defaults.go @@ -0,0 +1,310 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v2alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/client-go/pkg/api/v1" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&CronJob{}, func(obj interface{}) { SetObjectDefaults_CronJob(obj.(*CronJob)) }) + scheme.AddTypeDefaultingFunc(&CronJobList{}, func(obj interface{}) { SetObjectDefaults_CronJobList(obj.(*CronJobList)) }) + scheme.AddTypeDefaultingFunc(&JobTemplate{}, func(obj interface{}) { SetObjectDefaults_JobTemplate(obj.(*JobTemplate)) }) + return nil +} + +func SetObjectDefaults_CronJob(in *CronJob) { + SetDefaults_CronJob(in) + v1.SetDefaults_PodSpec(&in.Spec.JobTemplate.Spec.Template.Spec) + for i := range in.Spec.JobTemplate.Spec.Template.Spec.Volumes { + a := &in.Spec.JobTemplate.Spec.Template.Spec.Volumes[i] + v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.JobTemplate.Spec.Template.Spec.InitContainers { + a := &in.Spec.JobTemplate.Spec.Template.Spec.InitContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.JobTemplate.Spec.Template.Spec.Containers { + a := &in.Spec.JobTemplate.Spec.Template.Spec.Containers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_CronJobList(in *CronJobList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_CronJob(a) + } +} + +func SetObjectDefaults_JobTemplate(in *JobTemplate) { + v1.SetDefaults_PodSpec(&in.Template.Spec.Template.Spec) + for i := range in.Template.Spec.Template.Spec.Volumes { + a := &in.Template.Spec.Template.Spec.Volumes[i] + v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Template.Spec.Template.Spec.InitContainers { + a := &in.Template.Spec.Template.Spec.InitContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Template.Spec.Template.Spec.Containers { + a := &in.Template.Spec.Template.Spec.Containers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/batch/zz_generated.deepcopy.go new file mode 100644 index 0000000000..6e75ecdc09 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/zz_generated.deepcopy.go @@ -0,0 +1,291 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package batch + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/client-go/pkg/api" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_CronJob, InType: reflect.TypeOf(&CronJob{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_CronJobList, InType: reflect.TypeOf(&CronJobList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_CronJobSpec, InType: reflect.TypeOf(&CronJobSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_CronJobStatus, InType: reflect.TypeOf(&CronJobStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_Job, InType: reflect.TypeOf(&Job{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_JobCondition, InType: reflect.TypeOf(&JobCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_JobList, InType: reflect.TypeOf(&JobList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_JobSpec, InType: reflect.TypeOf(&JobSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_JobStatus, InType: reflect.TypeOf(&JobStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_JobTemplate, InType: reflect.TypeOf(&JobTemplate{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_JobTemplateSpec, InType: reflect.TypeOf(&JobTemplateSpec{})}, + ) +} + +func DeepCopy_batch_CronJob(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CronJob) + out := out.(*CronJob) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_batch_CronJobSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_batch_CronJobStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_batch_CronJobList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CronJobList) + out := out.(*CronJobList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CronJob, len(*in)) + for i := range *in { + if err := DeepCopy_batch_CronJob(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_batch_CronJobSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CronJobSpec) + out := out.(*CronJobSpec) + *out = *in + if in.StartingDeadlineSeconds != nil { + in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.Suspend != nil { + in, out := &in.Suspend, &out.Suspend + *out = new(bool) + **out = **in + } + if err := DeepCopy_batch_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, c); err != nil { + return err + } + if in.SuccessfulJobsHistoryLimit != nil { + in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit + *out = new(int32) + **out = **in + } + if in.FailedJobsHistoryLimit != nil { + in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_batch_CronJobStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CronJobStatus) + out := out.(*CronJobStatus) + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = make([]api.ObjectReference, len(*in)) + copy(*out, *in) + } + if in.LastScheduleTime != nil { + in, out := &in.LastScheduleTime, &out.LastScheduleTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + return nil + } +} + +func DeepCopy_batch_Job(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Job) + out := out.(*Job) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_batch_JobSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_batch_JobStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_batch_JobCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobCondition) + out := out.(*JobCondition) + *out = *in + out.LastProbeTime = in.LastProbeTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_batch_JobList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobList) + out := out.(*JobList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Job, len(*in)) + for i := range *in { + if err := DeepCopy_batch_Job(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_batch_JobSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobSpec) + out := out.(*JobSpec) + *out = *in + if in.Parallelism != nil { + in, out := &in.Parallelism, &out.Parallelism + *out = new(int32) + **out = **in + } + if in.Completions != nil { + in, out := &in.Completions, &out.Completions + *out = new(int32) + **out = **in + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if in.ManualSelector != nil { + in, out := &in.ManualSelector, &out.ManualSelector + *out = new(bool) + **out = **in + } + if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_batch_JobStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobStatus) + out := out.(*JobStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]JobCondition, len(*in)) + for i := range *in { + if err := DeepCopy_batch_JobCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + return nil + } +} + +func DeepCopy_batch_JobTemplate(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobTemplate) + out := out.(*JobTemplate) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_batch_JobTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_batch_JobTemplateSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobTemplateSpec) + out := out.(*JobTemplateSpec) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_batch_JobSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/OWNERS b/vendor/k8s.io/client-go/pkg/apis/certificates/OWNERS new file mode 100755 index 0000000000..c67bd1172a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/OWNERS @@ -0,0 +1,14 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- deads2k +- caesarxuchao +- liggitt +- sttts +- timothysc +- dims +- errordeveloper +- mbohlool +- david-mcmahon +- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/doc.go b/vendor/k8s.io/client-go/pkg/apis/certificates/doc.go new file mode 100644 index 0000000000..a101774691 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=certificates.k8s.io +package certificates diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/helpers.go b/vendor/k8s.io/client-go/pkg/apis/certificates/helpers.go new file mode 100644 index 0000000000..2608e40762 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/helpers.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificates + +import ( + "crypto/x509" + "encoding/pem" + "errors" +) + +// ParseCSR extracts the CSR from the API object and decodes it. +func ParseCSR(obj *CertificateSigningRequest) (*x509.CertificateRequest, error) { + // extract PEM from request object + pemBytes := obj.Spec.Request + block, _ := pem.Decode(pemBytes) + if block == nil || block.Type != "CERTIFICATE REQUEST" { + return nil, errors.New("PEM block type must be CERTIFICATE REQUEST") + } + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return nil, err + } + return csr, nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/install/install.go b/vendor/k8s.io/client-go/pkg/apis/certificates/install/install.go new file mode 100644 index 0000000000..8850e07aa0 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/install/install.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the certificates API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/certificates" + "k8s.io/client-go/pkg/apis/certificates/v1beta1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: certificates.GroupName, + VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/certificates", + RootScopedKinds: sets.NewString("CertificateSigningRequest"), + AddInternalObjectsToScheme: certificates.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/register.go b/vendor/k8s.io/client-go/pkg/apis/certificates/register.go new file mode 100644 index 0000000000..f9d228d00e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificates + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// GroupName is the group name use in this package +const GroupName = "certificates.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CertificateSigningRequest{}, + &CertificateSigningRequestList{}, + ) + return nil +} + +func (obj *CertificateSigningRequest) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *CertificateSigningRequestList) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/types.go b/vendor/k8s.io/client-go/pkg/apis/certificates/types.go new file mode 100644 index 0000000000..4a7884a1ea --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/types.go @@ -0,0 +1,143 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificates + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient=true +// +nonNamespaced=true + +// Describes a certificate signing request +type CertificateSigningRequest struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // The certificate request itself and any additional information. + // +optional + Spec CertificateSigningRequestSpec + + // Derived information about the request. + // +optional + Status CertificateSigningRequestStatus +} + +// This information is immutable after the request is created. Only the Request +// and Usages fields can be set on creation, other fields are derived by +// Kubernetes and cannot be modified by users. +type CertificateSigningRequestSpec struct { + // Base64-encoded PKCS#10 CSR data + Request []byte + + // usages specifies a set of usage contexts the key will be + // valid for. + // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + Usages []KeyUsage + + // Information about the requesting user. + // See user.Info interface for details. + // +optional + Username string + // UID information about the requesting user. + // See user.Info interface for details. + // +optional + UID string + // Group information about the requesting user. + // See user.Info interface for details. + // +optional + Groups []string + // Extra information about the requesting user. + // See user.Info interface for details. + // +optional + Extra map[string]ExtraValue +} + +// ExtraValue masks the value so protobuf can generate +type ExtraValue []string + +type CertificateSigningRequestStatus struct { + // Conditions applied to the request, such as approval or denial. + // +optional + Conditions []CertificateSigningRequestCondition + + // If request was approved, the controller will place the issued certificate here. + // +optional + Certificate []byte +} + +type RequestConditionType string + +// These are the possible conditions for a certificate request. +const ( + CertificateApproved RequestConditionType = "Approved" + CertificateDenied RequestConditionType = "Denied" +) + +type CertificateSigningRequestCondition struct { + // request approval state, currently Approved or Denied. + Type RequestConditionType + // brief reason for the request state + // +optional + Reason string + // human readable message with details about the request state + // +optional + Message string + // timestamp for the last update to this condition + // +optional + LastUpdateTime metav1.Time +} + +type CertificateSigningRequestList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // +optional + Items []CertificateSigningRequest +} + +// KeyUsages specifies valid usage contexts for keys. +// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 +type KeyUsage string + +const ( + UsageSigning KeyUsage = "signing" + UsageDigitalSignature KeyUsage = "digital signature" + UsageContentCommittment KeyUsage = "content committment" + UsageKeyEncipherment KeyUsage = "key encipherment" + UsageKeyAgreement KeyUsage = "key agreement" + UsageDataEncipherment KeyUsage = "data encipherment" + UsageCertSign KeyUsage = "cert sign" + UsageCRLSign KeyUsage = "crl sign" + UsageEncipherOnly KeyUsage = "encipher only" + UsageDecipherOnly KeyUsage = "decipher only" + UsageAny KeyUsage = "any" + UsageServerAuth KeyUsage = "server auth" + UsageClientAuth KeyUsage = "client auth" + UsageCodeSigning KeyUsage = "code signing" + UsageEmailProtection KeyUsage = "email protection" + UsageSMIME KeyUsage = "s/mime" + UsageIPsecEndSystem KeyUsage = "ipsec end system" + UsageIPsecTunnel KeyUsage = "ipsec tunnel" + UsageIPsecUser KeyUsage = "ipsec user" + UsageTimestamping KeyUsage = "timestamping" + UsageOCSPSigning KeyUsage = "ocsp signing" + UsageMicrosoftSGC KeyUsage = "microsoft sgc" + UsageNetscapSGC KeyUsage = "netscape sgc" +) diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/conversion.go new file mode 100644 index 0000000000..b9cf0b0163 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/conversion.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions here. Currently there are none. + + return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.String(), "CertificateSigningRequest", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/defaults.go new file mode 100644 index 0000000000..cd6a29d339 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/defaults.go @@ -0,0 +1,31 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import "k8s.io/apimachinery/pkg/runtime" + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_CertificateSigningRequestSpec, + ) +} +func SetDefaults_CertificateSigningRequestSpec(obj *CertificateSigningRequestSpec) { + if obj.Usages == nil { + obj.Usages = []KeyUsage{UsageDigitalSignature, UsageKeyEncipherment} + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/doc.go new file mode 100644 index 0000000000..6f257909de --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=certificates.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.pb.go new file mode 100644 index 0000000000..11833465da --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.pb.go @@ -0,0 +1,1674 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/certificates/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/certificates/v1beta1/generated.proto + + It has these top-level messages: + CertificateSigningRequest + CertificateSigningRequestCondition + CertificateSigningRequestList + CertificateSigningRequestSpec + CertificateSigningRequestStatus + ExtraValue +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} } +func (*CertificateSigningRequest) ProtoMessage() {} +func (*CertificateSigningRequest) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{0} +} + +func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} } +func (*CertificateSigningRequestCondition) ProtoMessage() {} +func (*CertificateSigningRequestCondition) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} } +func (*CertificateSigningRequestList) ProtoMessage() {} +func (*CertificateSigningRequestList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} } +func (*CertificateSigningRequestSpec) ProtoMessage() {} +func (*CertificateSigningRequestSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{3} +} + +func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} } +func (*CertificateSigningRequestStatus) ProtoMessage() {} +func (*CertificateSigningRequestStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{4} +} + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func init() { + proto.RegisterType((*CertificateSigningRequest)(nil), "k8s.io.client-go.pkg.apis.certificates.v1beta1.CertificateSigningRequest") + proto.RegisterType((*CertificateSigningRequestCondition)(nil), "k8s.io.client-go.pkg.apis.certificates.v1beta1.CertificateSigningRequestCondition") + proto.RegisterType((*CertificateSigningRequestList)(nil), "k8s.io.client-go.pkg.apis.certificates.v1beta1.CertificateSigningRequestList") + proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.client-go.pkg.apis.certificates.v1beta1.CertificateSigningRequestSpec") + proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.client-go.pkg.apis.certificates.v1beta1.CertificateSigningRequestStatus") + proto.RegisterType((*ExtraValue)(nil), "k8s.io.client-go.pkg.apis.certificates.v1beta1.ExtraValue") +} +func (m *CertificateSigningRequest) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CertificateSigningRequest) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *CertificateSigningRequestCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CertificateSigningRequestCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastUpdateTime.Size())) + n4, err := m.LastUpdateTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + return i, nil +} + +func (m *CertificateSigningRequestList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CertificateSigningRequestList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CertificateSigningRequestSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CertificateSigningRequestSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Request != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Request))) + i += copy(data[i:], m.Request) + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Username))) + i += copy(data[i:], m.Username) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.UID))) + i += copy(data[i:], m.UID) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Usages) > 0 { + for _, s := range m.Usages { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Extra) > 0 { + for k := range m.Extra { + data[i] = 0x32 + i++ + v := m.Extra[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n6, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + } + } + return i, nil +} + +func (m *CertificateSigningRequestStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CertificateSigningRequestStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Certificate != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Certificate))) + i += copy(data[i:], m.Certificate) + } + return i, nil +} + +func (m ExtraValue) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m ExtraValue) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *CertificateSigningRequest) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CertificateSigningRequestCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CertificateSigningRequestList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CertificateSigningRequestSpec) Size() (n int) { + var l int + _ = l + if m.Request != nil { + l = len(m.Request) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Username) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Usages) > 0 { + for _, s := range m.Usages { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *CertificateSigningRequestStatus) Size() (n int) { + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Certificate != nil { + l = len(m.Certificate) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m ExtraValue) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CertificateSigningRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CertificateSigningRequest{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CertificateSigningRequestSpec", "CertificateSigningRequestSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CertificateSigningRequestStatus", "CertificateSigningRequestStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CertificateSigningRequestCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CertificateSigningRequestCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CertificateSigningRequestList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CertificateSigningRequestList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CertificateSigningRequest", "CertificateSigningRequest", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CertificateSigningRequestSpec) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&CertificateSigningRequestSpec{`, + `Request:` + valueToStringGenerated(this.Request) + `,`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Usages:` + fmt.Sprintf("%v", this.Usages) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func (this *CertificateSigningRequestStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CertificateSigningRequestStatus{`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "CertificateSigningRequestCondition", "CertificateSigningRequestCondition", 1), `&`, ``, 1) + `,`, + `Certificate:` + valueToStringGenerated(this.Certificate) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CertificateSigningRequest) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CertificateSigningRequestCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequestCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequestCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = RequestConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CertificateSigningRequestList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequestList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequestList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CertificateSigningRequest{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequestSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = append(m.Request[:0], data[iNdEx:postIndex]...) + if m.Request == nil { + m.Request = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usages", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Usages = append(m.Usages, KeyUsage(data[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CertificateSigningRequestStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequestStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, CertificateSigningRequestCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Certificate = append(m.Certificate[:0], data[iNdEx:postIndex]...) + if m.Certificate == nil { + m.Certificate = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExtraValue) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 839 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x54, 0xcf, 0x8f, 0xdb, 0x44, + 0x14, 0x8e, 0xf3, 0x6b, 0x93, 0xc9, 0xb2, 0xad, 0x46, 0xa8, 0x32, 0x2b, 0xd5, 0x5e, 0x59, 0x80, + 0xb6, 0xa8, 0xd8, 0x64, 0x41, 0xb0, 0x2a, 0x07, 0x24, 0x97, 0x0a, 0x15, 0x5a, 0x7e, 0xcc, 0x36, + 0x48, 0x20, 0x0e, 0x4c, 0x9c, 0x57, 0xef, 0x34, 0xf1, 0x0f, 0x3c, 0xe3, 0x68, 0x73, 0x41, 0xbd, + 0x71, 0xe5, 0xc8, 0x05, 0x89, 0x3f, 0x67, 0x8f, 0x3d, 0x72, 0x40, 0x11, 0x1b, 0x4e, 0x5c, 0xf8, + 0x03, 0x7a, 0x42, 0x33, 0x9e, 0xc4, 0x66, 0xd3, 0xd0, 0x56, 0xca, 0xcd, 0xf3, 0xcd, 0xf7, 0xbe, + 0xf7, 0xde, 0xf7, 0x9e, 0x07, 0x7d, 0x34, 0x3e, 0xe6, 0x2e, 0x4b, 0xbc, 0x71, 0x3e, 0x84, 0x2c, + 0x06, 0x01, 0xdc, 0x4b, 0xc7, 0xa1, 0x47, 0x53, 0xc6, 0xbd, 0x00, 0x32, 0xc1, 0x1e, 0xb2, 0x80, + 0x4a, 0x74, 0xda, 0x1f, 0x82, 0xa0, 0x7d, 0x2f, 0x84, 0x18, 0x32, 0x2a, 0x60, 0xe4, 0xa6, 0x59, + 0x22, 0x12, 0xec, 0x15, 0x02, 0x6e, 0x29, 0xe0, 0xa6, 0xe3, 0xd0, 0x95, 0x02, 0x6e, 0x55, 0xc0, + 0xd5, 0x02, 0xfb, 0x6f, 0x87, 0x4c, 0x9c, 0xe6, 0x43, 0x37, 0x48, 0x22, 0x2f, 0x4c, 0xc2, 0xc4, + 0x53, 0x3a, 0xc3, 0xfc, 0xa1, 0x3a, 0xa9, 0x83, 0xfa, 0x2a, 0xf4, 0xf7, 0xdf, 0xd3, 0x05, 0xd2, + 0x94, 0x45, 0x34, 0x38, 0x65, 0x31, 0x64, 0xb3, 0xb2, 0xc4, 0x08, 0x04, 0xf5, 0xa6, 0x6b, 0x55, + 0xed, 0x7b, 0x9b, 0xa2, 0xb2, 0x3c, 0x16, 0x2c, 0x82, 0xb5, 0x80, 0xf7, 0x9f, 0x17, 0xc0, 0x83, + 0x53, 0x88, 0xe8, 0x5a, 0xdc, 0xbb, 0x9b, 0xe2, 0x72, 0xc1, 0x26, 0x1e, 0x8b, 0x05, 0x17, 0xd9, + 0x5a, 0x50, 0xa5, 0x27, 0x0e, 0xd9, 0x14, 0xb2, 0xb2, 0x21, 0x38, 0xa3, 0x51, 0x3a, 0x81, 0x67, + 0xf5, 0x74, 0x73, 0xe3, 0xa8, 0x9e, 0xc1, 0x76, 0xfe, 0xae, 0xa3, 0xd7, 0x6e, 0x97, 0xfe, 0x9f, + 0xb0, 0x30, 0x66, 0x71, 0x48, 0xe0, 0x87, 0x1c, 0xb8, 0xc0, 0xdf, 0xa3, 0x8e, 0xb4, 0x6e, 0x44, + 0x05, 0x35, 0x8d, 0x03, 0xe3, 0xb0, 0x77, 0xf4, 0x8e, 0xab, 0x07, 0x59, 0xed, 0xa4, 0x1c, 0xa5, + 0x64, 0xbb, 0xd3, 0xbe, 0xfb, 0xc5, 0xf0, 0x11, 0x04, 0xe2, 0x3e, 0x08, 0xea, 0xe3, 0xf3, 0xb9, + 0x5d, 0x5b, 0xcc, 0x6d, 0x54, 0x62, 0x64, 0xa5, 0x8a, 0x53, 0xd4, 0xe4, 0x29, 0x04, 0x66, 0x5d, + 0xa9, 0x7f, 0xee, 0xbe, 0xe4, 0x9a, 0xb8, 0x1b, 0x6b, 0x3f, 0x49, 0x21, 0xf0, 0x77, 0x75, 0xee, + 0xa6, 0x3c, 0x11, 0x95, 0x09, 0x9f, 0xa1, 0x36, 0x17, 0x54, 0xe4, 0xdc, 0x6c, 0xa8, 0x9c, 0x5f, + 0x6e, 0x31, 0xa7, 0xd2, 0xf5, 0xf7, 0x74, 0xd6, 0x76, 0x71, 0x26, 0x3a, 0x9f, 0xf3, 0x6b, 0x1d, + 0x39, 0x1b, 0x63, 0x6f, 0x27, 0xf1, 0x88, 0x09, 0x96, 0xc4, 0xf8, 0x18, 0x35, 0xc5, 0x2c, 0x05, + 0x65, 0x78, 0xd7, 0x7f, 0x7d, 0xd9, 0xc2, 0x83, 0x59, 0x0a, 0x4f, 0xe7, 0xf6, 0xab, 0x97, 0xf9, + 0x12, 0x27, 0x2a, 0x02, 0xbf, 0x89, 0xda, 0x19, 0x50, 0x9e, 0xc4, 0xca, 0xce, 0x6e, 0x59, 0x08, + 0x51, 0x28, 0xd1, 0xb7, 0xf8, 0x06, 0xda, 0x89, 0x80, 0x73, 0x1a, 0x82, 0xf2, 0xa0, 0xeb, 0x5f, + 0xd1, 0xc4, 0x9d, 0xfb, 0x05, 0x4c, 0x96, 0xf7, 0xf8, 0x11, 0xda, 0x9b, 0x50, 0x2e, 0x06, 0xe9, + 0x88, 0x0a, 0x78, 0xc0, 0x22, 0x30, 0x9b, 0xca, 0xb5, 0xb7, 0x5e, 0x6c, 0x0f, 0x64, 0x84, 0x7f, + 0x4d, 0xab, 0xef, 0xdd, 0xfb, 0x8f, 0x12, 0xb9, 0xa4, 0xec, 0xfc, 0x63, 0xa0, 0xeb, 0x1b, 0xfd, + 0xb9, 0xc7, 0xb8, 0xc0, 0xdf, 0xad, 0xed, 0xa3, 0xfb, 0x62, 0x75, 0xc8, 0x68, 0xb5, 0x8d, 0x57, + 0x75, 0x2d, 0x9d, 0x25, 0x52, 0xd9, 0xc5, 0x04, 0xb5, 0x98, 0x80, 0x88, 0x9b, 0xf5, 0x83, 0xc6, + 0x61, 0xef, 0xe8, 0xd3, 0xed, 0x2d, 0x86, 0xff, 0x8a, 0x4e, 0xdb, 0xba, 0x2b, 0x13, 0x90, 0x22, + 0x8f, 0xb3, 0x68, 0xfc, 0x4f, 0xc3, 0x72, 0x65, 0xf1, 0x1b, 0x68, 0x27, 0x2b, 0x8e, 0xaa, 0xdf, + 0x5d, 0xbf, 0x27, 0xa7, 0xa4, 0x19, 0x64, 0x79, 0x87, 0x6f, 0xa2, 0x4e, 0xce, 0x21, 0x8b, 0x69, + 0x04, 0x7a, 0xf4, 0xab, 0x3e, 0x07, 0x1a, 0x27, 0x2b, 0x06, 0xbe, 0x8e, 0x1a, 0x39, 0x1b, 0xe9, + 0xd1, 0xf7, 0x34, 0xb1, 0x31, 0xb8, 0xfb, 0x31, 0x91, 0x38, 0x76, 0x50, 0x3b, 0xcc, 0x92, 0x3c, + 0xe5, 0x66, 0xf3, 0xa0, 0x71, 0xd8, 0xf5, 0x91, 0xdc, 0xa0, 0x4f, 0x14, 0x42, 0xf4, 0x0d, 0x3e, + 0x42, 0x9d, 0x31, 0xcc, 0x06, 0x6a, 0x85, 0x5a, 0x8a, 0x75, 0x4d, 0xb2, 0x14, 0xc0, 0x9f, 0xce, + 0xed, 0xce, 0x67, 0xfa, 0x96, 0xac, 0x78, 0xf8, 0x47, 0xd4, 0x82, 0x33, 0x91, 0x51, 0xb3, 0xad, + 0xec, 0xfd, 0x66, 0xbb, 0xff, 0xba, 0x7b, 0x47, 0x6a, 0xdf, 0x89, 0x45, 0x36, 0x2b, 0xdd, 0x56, + 0x18, 0x29, 0xd2, 0xee, 0xe7, 0x08, 0x95, 0x1c, 0x7c, 0x15, 0x35, 0xc6, 0x30, 0x2b, 0x7e, 0x32, + 0x22, 0x3f, 0xf1, 0x57, 0xa8, 0x35, 0xa5, 0x93, 0x1c, 0xf4, 0x5b, 0xf4, 0xe1, 0x4b, 0xd7, 0xa7, + 0xd4, 0xbf, 0x96, 0x12, 0xa4, 0x50, 0xba, 0x55, 0x3f, 0x36, 0x9c, 0xb9, 0x81, 0xec, 0xe7, 0xbc, + 0x18, 0xf8, 0x27, 0x03, 0xa1, 0x60, 0xf9, 0x43, 0x73, 0xd3, 0x50, 0x06, 0x9d, 0x6c, 0xcf, 0xa0, + 0xd5, 0x63, 0x51, 0xbe, 0xc6, 0x2b, 0x88, 0x93, 0x4a, 0x6a, 0xdc, 0x47, 0xbd, 0x8a, 0xb4, 0xb2, + 0x62, 0xd7, 0xbf, 0xb2, 0x98, 0xdb, 0xbd, 0x8a, 0x38, 0xa9, 0x72, 0x9c, 0x0f, 0xb4, 0xaf, 0xaa, + 0x73, 0x6c, 0x2f, 0x7f, 0x22, 0x43, 0xad, 0x45, 0xf7, 0xf2, 0xd2, 0xdf, 0xea, 0xfc, 0xf2, 0x9b, + 0x5d, 0x7b, 0xfc, 0xc7, 0x41, 0xcd, 0xbf, 0x71, 0x7e, 0x61, 0xd5, 0x9e, 0x5c, 0x58, 0xb5, 0xdf, + 0x2f, 0xac, 0xda, 0xe3, 0x85, 0x65, 0x9c, 0x2f, 0x2c, 0xe3, 0xc9, 0xc2, 0x32, 0xfe, 0x5c, 0x58, + 0xc6, 0xcf, 0x7f, 0x59, 0xb5, 0x6f, 0x77, 0x74, 0x67, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x07, + 0x0c, 0x3b, 0x3a, 0x80, 0x08, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.proto new file mode 100644 index 0000000000..215dc39a42 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.proto @@ -0,0 +1,124 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.certificates.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Describes a certificate signing request +message CertificateSigningRequest { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The certificate request itself and any additional information. + // +optional + optional CertificateSigningRequestSpec spec = 2; + + // Derived information about the request. + // +optional + optional CertificateSigningRequestStatus status = 3; +} + +message CertificateSigningRequestCondition { + // request approval state, currently Approved or Denied. + optional string type = 1; + + // brief reason for the request state + // +optional + optional string reason = 2; + + // human readable message with details about the request state + // +optional + optional string message = 3; + + // timestamp for the last update to this condition + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; +} + +message CertificateSigningRequestList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated CertificateSigningRequest items = 2; +} + +// This information is immutable after the request is created. Only the Request +// and Usages fields can be set on creation, other fields are derived by +// Kubernetes and cannot be modified by users. +message CertificateSigningRequestSpec { + // Base64-encoded PKCS#10 CSR data + optional bytes request = 1; + + // allowedUsages specifies a set of usage contexts the key will be + // valid for. + // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + repeated string keyUsage = 5; + + // Information about the requesting user. + // See user.Info interface for details. + // +optional + optional string username = 2; + + // UID information about the requesting user. + // See user.Info interface for details. + // +optional + optional string uid = 3; + + // Group information about the requesting user. + // See user.Info interface for details. + // +optional + repeated string groups = 4; + + // Extra information about the requesting user. + // See user.Info interface for details. + // +optional + map extra = 6; +} + +message CertificateSigningRequestStatus { + // Conditions applied to the request, such as approval or denial. + // +optional + repeated CertificateSigningRequestCondition conditions = 1; + + // If request was approved, the controller will place the issued certificate here. + // +optional + optional bytes certificate = 2; +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/helpers.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/helpers.go new file mode 100644 index 0000000000..1375063c10 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/helpers.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "crypto/x509" + "encoding/pem" + "errors" +) + +// ParseCSR extracts the CSR from the API object and decodes it. +func ParseCSR(obj *CertificateSigningRequest) (*x509.CertificateRequest, error) { + // extract PEM from request object + pemBytes := obj.Spec.Request + block, _ := pem.Decode(pemBytes) + if block == nil || block.Type != "CERTIFICATE REQUEST" { + return nil, errors.New("PEM block type must be CERTIFICATE REQUEST") + } + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return nil, err + } + return csr, nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/register.go new file mode 100644 index 0000000000..6574de971b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/register.go @@ -0,0 +1,59 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "certificates.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addConversionFuncs, addDefaultingFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CertificateSigningRequest{}, + &CertificateSigningRequestList{}, + ) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +func (obj *CertificateSigningRequest) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *CertificateSigningRequestList) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.generated.go new file mode 100644 index 0000000000..50d908bd7c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.generated.go @@ -0,0 +1,2624 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 + } +} + +func (x *CertificateSigningRequest) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CertificateSigningRequest) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CertificateSigningRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = CertificateSigningRequestSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = CertificateSigningRequestStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CertificateSigningRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = CertificateSigningRequestSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = CertificateSigningRequestStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CertificateSigningRequestSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.Usages) != 0 + yyq2[2] = x.Username != "" + yyq2[3] = x.UID != "" + yyq2[4] = len(x.Groups) != 0 + yyq2[5] = len(x.Extra) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Request == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Request)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("request")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Request == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Request)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Usages == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyUsage(([]KeyUsage)(x.Usages), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("usages")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Usages == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyUsage(([]KeyUsage)(x.Usages), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Username)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("username")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Username)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("uid")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Groups == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("groups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Groups == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.Extra == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("extra")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Extra == nil { + r.EncodeNil() + } else { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CertificateSigningRequestSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CertificateSigningRequestSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "request": + if r.TryDecodeAsNil() { + x.Request = nil + } else { + yyv4 := &x.Request + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *yyv4 = r.DecodeBytes(*(*[]byte)(yyv4), false, false) + } + } + case "usages": + if r.TryDecodeAsNil() { + x.Usages = nil + } else { + yyv6 := &x.Usages + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceKeyUsage((*[]KeyUsage)(yyv6), d) + } + } + case "username": + if r.TryDecodeAsNil() { + x.Username = "" + } else { + yyv8 := &x.Username + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "uid": + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv10 := &x.UID + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "groups": + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv12 := &x.Groups + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + z.F.DecSliceStringX(yyv12, false, d) + } + } + case "extra": + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv14 := &x.Extra + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv14), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CertificateSigningRequestSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Request = nil + } else { + yyv17 := &x.Request + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *yyv17 = r.DecodeBytes(*(*[]byte)(yyv17), false, false) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Usages = nil + } else { + yyv19 := &x.Usages + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceKeyUsage((*[]KeyUsage)(yyv19), d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Username = "" + } else { + yyv21 := &x.Username + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv23 := &x.UID + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv25 := &x.Groups + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + z.F.DecSliceStringX(yyv25, false, d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv27 := &x.Extra + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv27), d) + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj16-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ExtraValue) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encExtraValue((ExtraValue)(x), e) + } + } +} + +func (x *ExtraValue) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decExtraValue((*ExtraValue)(x), d) + } +} + +func (x *CertificateSigningRequestStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Conditions) != 0 + yyq2[1] = len(x.Certificate) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceCertificateSigningRequestCondition(([]CertificateSigningRequestCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceCertificateSigningRequestCondition(([]CertificateSigningRequestCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Certificate == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Certificate)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("certificate")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Certificate == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Certificate)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CertificateSigningRequestStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CertificateSigningRequestStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv4 := &x.Conditions + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceCertificateSigningRequestCondition((*[]CertificateSigningRequestCondition)(yyv4), d) + } + } + case "certificate": + if r.TryDecodeAsNil() { + x.Certificate = nil + } else { + yyv6 := &x.Certificate + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *yyv6 = r.DecodeBytes(*(*[]byte)(yyv6), false, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CertificateSigningRequestStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv9 := &x.Conditions + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceCertificateSigningRequestCondition((*[]CertificateSigningRequestCondition)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Certificate = nil + } else { + yyv11 := &x.Certificate + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *yyv11 = r.DecodeBytes(*(*[]byte)(yyv11), false, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x RequestConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *RequestConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *CertificateSigningRequestCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Reason != "" + yyq2[2] = x.Message != "" + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy13 := &x.LastUpdateTime + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(yy13) { + } else if yym14 { + z.EncBinaryMarshal(yy13) + } else if !yym14 && z.IsJSONHandle() { + z.EncJSONMarshal(yy13) + } else { + z.EncFallback(yy13) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastUpdateTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy15 := &x.LastUpdateTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CertificateSigningRequestCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CertificateSigningRequestCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv5 := &x.Reason + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv7 := &x.Message + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + case "lastUpdateTime": + if r.TryDecodeAsNil() { + x.LastUpdateTime = pkg1_v1.Time{} + } else { + yyv9 := &x.LastUpdateTime + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if yym10 { + z.DecBinaryUnmarshal(yyv9) + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) + } else { + z.DecFallback(yyv9, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CertificateSigningRequestCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv12 := &x.Type + yyv12.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv13 := &x.Reason + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv15 := &x.Message + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastUpdateTime = pkg1_v1.Time{} + } else { + yyv17 := &x.LastUpdateTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CertificateSigningRequestList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceCertificateSigningRequest(([]CertificateSigningRequest)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceCertificateSigningRequest(([]CertificateSigningRequest)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CertificateSigningRequestList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CertificateSigningRequestList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceCertificateSigningRequest((*[]CertificateSigningRequest)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CertificateSigningRequestList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceCertificateSigningRequest((*[]CertificateSigningRequest)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x KeyUsage) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *KeyUsage) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x codecSelfer1234) encSliceKeyUsage(v []KeyUsage, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceKeyUsage(v *[]KeyUsage, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []KeyUsage{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]KeyUsage, yyrl1) + } + } else { + yyv1 = make([]KeyUsage, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 KeyUsage + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []KeyUsage{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encMapstringExtraValue(v map[string]ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decMapstringExtraValue(v *map[string]ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string]ExtraValue, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 ExtraValue + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv4 := &yymv1 + yyv4.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv5 := &yymk1 + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv7 := &yymv1 + yyv7.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encExtraValue(v ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyv1)) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]string, yyrl1) + } + } else { + yyv1 = make([]string, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 string + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceCertificateSigningRequestCondition(v []CertificateSigningRequestCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCertificateSigningRequestCondition(v *[]CertificateSigningRequestCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []CertificateSigningRequestCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]CertificateSigningRequestCondition, yyrl1) + } + } else { + yyv1 = make([]CertificateSigningRequestCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CertificateSigningRequestCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, CertificateSigningRequestCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CertificateSigningRequestCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, CertificateSigningRequestCondition{}) // var yyz1 CertificateSigningRequestCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = CertificateSigningRequestCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []CertificateSigningRequestCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceCertificateSigningRequest(v []CertificateSigningRequest, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCertificateSigningRequest(v *[]CertificateSigningRequest, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []CertificateSigningRequest{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 416) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]CertificateSigningRequest, yyrl1) + } + } else { + yyv1 = make([]CertificateSigningRequest, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CertificateSigningRequest{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, CertificateSigningRequest{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CertificateSigningRequest{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, CertificateSigningRequest{}) // var yyz1 CertificateSigningRequest + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = CertificateSigningRequest{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []CertificateSigningRequest{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.go new file mode 100644 index 0000000000..a9149ba8df --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.go @@ -0,0 +1,152 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true + +// Describes a certificate signing request +type CertificateSigningRequest struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The certificate request itself and any additional information. + // +optional + Spec CertificateSigningRequestSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Derived information about the request. + // +optional + Status CertificateSigningRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// This information is immutable after the request is created. Only the Request +// and Usages fields can be set on creation, other fields are derived by +// Kubernetes and cannot be modified by users. +type CertificateSigningRequestSpec struct { + // Base64-encoded PKCS#10 CSR data + Request []byte `json:"request" protobuf:"bytes,1,opt,name=request"` + + // allowedUsages specifies a set of usage contexts the key will be + // valid for. + // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + Usages []KeyUsage `json:"usages,omitempty" protobuf:"bytes,5,opt,name=keyUsage"` + + // Information about the requesting user. + // See user.Info interface for details. + // +optional + Username string `json:"username,omitempty" protobuf:"bytes,2,opt,name=username"` + // UID information about the requesting user. + // See user.Info interface for details. + // +optional + UID string `json:"uid,omitempty" protobuf:"bytes,3,opt,name=uid"` + // Group information about the requesting user. + // See user.Info interface for details. + // +optional + Groups []string `json:"groups,omitempty" protobuf:"bytes,4,rep,name=groups"` + // Extra information about the requesting user. + // See user.Info interface for details. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,6,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +type CertificateSigningRequestStatus struct { + // Conditions applied to the request, such as approval or denial. + // +optional + Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` + + // If request was approved, the controller will place the issued certificate here. + // +optional + Certificate []byte `json:"certificate,omitempty" protobuf:"bytes,2,opt,name=certificate"` +} + +type RequestConditionType string + +// These are the possible conditions for a certificate request. +const ( + CertificateApproved RequestConditionType = "Approved" + CertificateDenied RequestConditionType = "Denied" +) + +type CertificateSigningRequestCondition struct { + // request approval state, currently Approved or Denied. + Type RequestConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=RequestConditionType"` + // brief reason for the request state + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` + // human readable message with details about the request state + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + // timestamp for the last update to this condition + // +optional + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,4,opt,name=lastUpdateTime"` +} + +type CertificateSigningRequestList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []CertificateSigningRequest `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// KeyUsages specifies valid usage contexts for keys. +// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 +type KeyUsage string + +const ( + UsageSigning KeyUsage = "signing" + UsageDigitalSignature KeyUsage = "digital signature" + UsageContentCommittment KeyUsage = "content committment" + UsageKeyEncipherment KeyUsage = "key encipherment" + UsageKeyAgreement KeyUsage = "key agreement" + UsageDataEncipherment KeyUsage = "data encipherment" + UsageCertSign KeyUsage = "cert sign" + UsageCRLSign KeyUsage = "crl sign" + UsageEncipherOnly KeyUsage = "encipher only" + UsageDecipherOnly KeyUsage = "decipher only" + UsageAny KeyUsage = "any" + UsageServerAuth KeyUsage = "server auth" + UsageClientAuth KeyUsage = "client auth" + UsageCodeSigning KeyUsage = "code signing" + UsageEmailProtection KeyUsage = "email protection" + UsageSMIME KeyUsage = "s/mime" + UsageIPsecEndSystem KeyUsage = "ipsec end system" + UsageIPsecTunnel KeyUsage = "ipsec tunnel" + UsageIPsecUser KeyUsage = "ipsec user" + UsageTimestamping KeyUsage = "timestamping" + UsageOCSPSigning KeyUsage = "ocsp signing" + UsageMicrosoftSGC KeyUsage = "microsoft sgc" + UsageNetscapSGC KeyUsage = "netscape sgc" +) diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..4fd91df061 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,74 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_CertificateSigningRequest = map[string]string{ + "": "Describes a certificate signing request", + "spec": "The certificate request itself and any additional information.", + "status": "Derived information about the request.", +} + +func (CertificateSigningRequest) SwaggerDoc() map[string]string { + return map_CertificateSigningRequest +} + +var map_CertificateSigningRequestCondition = map[string]string{ + "type": "request approval state, currently Approved or Denied.", + "reason": "brief reason for the request state", + "message": "human readable message with details about the request state", + "lastUpdateTime": "timestamp for the last update to this condition", +} + +func (CertificateSigningRequestCondition) SwaggerDoc() map[string]string { + return map_CertificateSigningRequestCondition +} + +var map_CertificateSigningRequestSpec = map[string]string{ + "": "This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.", + "request": "Base64-encoded PKCS#10 CSR data", + "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12", + "username": "Information about the requesting user. See user.Info interface for details.", + "uid": "UID information about the requesting user. See user.Info interface for details.", + "groups": "Group information about the requesting user. See user.Info interface for details.", + "extra": "Extra information about the requesting user. See user.Info interface for details.", +} + +func (CertificateSigningRequestSpec) SwaggerDoc() map[string]string { + return map_CertificateSigningRequestSpec +} + +var map_CertificateSigningRequestStatus = map[string]string{ + "conditions": "Conditions applied to the request, such as approval or denial.", + "certificate": "If request was approved, the controller will place the issued certificate here.", +} + +func (CertificateSigningRequestStatus) SwaggerDoc() map[string]string { + return map_CertificateSigningRequestStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.conversion.go new file mode 100644 index 0000000000..bd90770412 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.conversion.go @@ -0,0 +1,171 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + certificates "k8s.io/client-go/pkg/apis/certificates" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest, + Convert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest, + Convert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition, + Convert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition, + Convert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList, + Convert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList, + Convert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec, + Convert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec, + Convert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus, + Convert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus, + ) +} + +func autoConvert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in *CertificateSigningRequest, out *certificates.CertificateSigningRequest, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in *CertificateSigningRequest, out *certificates.CertificateSigningRequest, s conversion.Scope) error { + return autoConvert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in, out, s) +} + +func autoConvert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest(in *certificates.CertificateSigningRequest, out *CertificateSigningRequest, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest(in *certificates.CertificateSigningRequest, out *CertificateSigningRequest, s conversion.Scope) error { + return autoConvert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest(in, out, s) +} + +func autoConvert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in *CertificateSigningRequestCondition, out *certificates.CertificateSigningRequestCondition, s conversion.Scope) error { + out.Type = certificates.RequestConditionType(in.Type) + out.Reason = in.Reason + out.Message = in.Message + out.LastUpdateTime = in.LastUpdateTime + return nil +} + +func Convert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in *CertificateSigningRequestCondition, out *certificates.CertificateSigningRequestCondition, s conversion.Scope) error { + return autoConvert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in, out, s) +} + +func autoConvert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition(in *certificates.CertificateSigningRequestCondition, out *CertificateSigningRequestCondition, s conversion.Scope) error { + out.Type = RequestConditionType(in.Type) + out.Reason = in.Reason + out.Message = in.Message + out.LastUpdateTime = in.LastUpdateTime + return nil +} + +func Convert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition(in *certificates.CertificateSigningRequestCondition, out *CertificateSigningRequestCondition, s conversion.Scope) error { + return autoConvert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition(in, out, s) +} + +func autoConvert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in *CertificateSigningRequestList, out *certificates.CertificateSigningRequestList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]certificates.CertificateSigningRequest)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in *CertificateSigningRequestList, out *certificates.CertificateSigningRequestList, s conversion.Scope) error { + return autoConvert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in, out, s) +} + +func autoConvert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList(in *certificates.CertificateSigningRequestList, out *CertificateSigningRequestList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]CertificateSigningRequest)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList(in *certificates.CertificateSigningRequestList, out *CertificateSigningRequestList, s conversion.Scope) error { + return autoConvert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList(in, out, s) +} + +func autoConvert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in *CertificateSigningRequestSpec, out *certificates.CertificateSigningRequestSpec, s conversion.Scope) error { + out.Request = *(*[]byte)(unsafe.Pointer(&in.Request)) + out.Usages = *(*[]certificates.KeyUsage)(unsafe.Pointer(&in.Usages)) + out.Username = in.Username + out.UID = in.UID + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]certificates.ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in *CertificateSigningRequestSpec, out *certificates.CertificateSigningRequestSpec, s conversion.Scope) error { + return autoConvert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in, out, s) +} + +func autoConvert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(in *certificates.CertificateSigningRequestSpec, out *CertificateSigningRequestSpec, s conversion.Scope) error { + out.Request = *(*[]byte)(unsafe.Pointer(&in.Request)) + out.Usages = *(*[]KeyUsage)(unsafe.Pointer(&in.Usages)) + out.Username = in.Username + out.UID = in.UID + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(in *certificates.CertificateSigningRequestSpec, out *CertificateSigningRequestSpec, s conversion.Scope) error { + return autoConvert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(in, out, s) +} + +func autoConvert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in *CertificateSigningRequestStatus, out *certificates.CertificateSigningRequestStatus, s conversion.Scope) error { + out.Conditions = *(*[]certificates.CertificateSigningRequestCondition)(unsafe.Pointer(&in.Conditions)) + out.Certificate = *(*[]byte)(unsafe.Pointer(&in.Certificate)) + return nil +} + +func Convert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in *CertificateSigningRequestStatus, out *certificates.CertificateSigningRequestStatus, s conversion.Scope) error { + return autoConvert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in, out, s) +} + +func autoConvert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(in *certificates.CertificateSigningRequestStatus, out *CertificateSigningRequestStatus, s conversion.Scope) error { + out.Conditions = *(*[]CertificateSigningRequestCondition)(unsafe.Pointer(&in.Conditions)) + out.Certificate = *(*[]byte)(unsafe.Pointer(&in.Certificate)) + return nil +} + +func Convert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(in *certificates.CertificateSigningRequestStatus, out *CertificateSigningRequestStatus, s conversion.Scope) error { + return autoConvert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..800cdee477 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,150 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequest, InType: reflect.TypeOf(&CertificateSigningRequest{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequestCondition, InType: reflect.TypeOf(&CertificateSigningRequestCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequestList, InType: reflect.TypeOf(&CertificateSigningRequestList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequestSpec, InType: reflect.TypeOf(&CertificateSigningRequestSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequestStatus, InType: reflect.TypeOf(&CertificateSigningRequestStatus{})}, + ) +} + +func DeepCopy_v1beta1_CertificateSigningRequest(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequest) + out := out.(*CertificateSigningRequest) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_CertificateSigningRequestSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_CertificateSigningRequestStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_CertificateSigningRequestCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestCondition) + out := out.(*CertificateSigningRequestCondition) + *out = *in + out.LastUpdateTime = in.LastUpdateTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1beta1_CertificateSigningRequestList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestList) + out := out.(*CertificateSigningRequestList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CertificateSigningRequest, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_CertificateSigningRequest(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_CertificateSigningRequestSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestSpec) + out := out.(*CertificateSigningRequestSpec) + *out = *in + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Usages != nil { + in, out := &in.Usages, &out.Usages + *out = make([]KeyUsage, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_CertificateSigningRequestStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestStatus) + out := out.(*CertificateSigningRequestStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]CertificateSigningRequestCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_CertificateSigningRequestCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.defaults.go new file mode 100644 index 0000000000..3c5ae03627 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.defaults.go @@ -0,0 +1,47 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&CertificateSigningRequest{}, func(obj interface{}) { SetObjectDefaults_CertificateSigningRequest(obj.(*CertificateSigningRequest)) }) + scheme.AddTypeDefaultingFunc(&CertificateSigningRequestList{}, func(obj interface{}) { + SetObjectDefaults_CertificateSigningRequestList(obj.(*CertificateSigningRequestList)) + }) + return nil +} + +func SetObjectDefaults_CertificateSigningRequest(in *CertificateSigningRequest) { + SetDefaults_CertificateSigningRequestSpec(&in.Spec) +} + +func SetObjectDefaults_CertificateSigningRequestList(in *CertificateSigningRequestList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_CertificateSigningRequest(a) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/certificates/zz_generated.deepcopy.go new file mode 100644 index 0000000000..8769028913 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/zz_generated.deepcopy.go @@ -0,0 +1,150 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package certificates + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequest, InType: reflect.TypeOf(&CertificateSigningRequest{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequestCondition, InType: reflect.TypeOf(&CertificateSigningRequestCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequestList, InType: reflect.TypeOf(&CertificateSigningRequestList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequestSpec, InType: reflect.TypeOf(&CertificateSigningRequestSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequestStatus, InType: reflect.TypeOf(&CertificateSigningRequestStatus{})}, + ) +} + +func DeepCopy_certificates_CertificateSigningRequest(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequest) + out := out.(*CertificateSigningRequest) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_certificates_CertificateSigningRequestSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_certificates_CertificateSigningRequestStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_certificates_CertificateSigningRequestCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestCondition) + out := out.(*CertificateSigningRequestCondition) + *out = *in + out.LastUpdateTime = in.LastUpdateTime.DeepCopy() + return nil + } +} + +func DeepCopy_certificates_CertificateSigningRequestList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestList) + out := out.(*CertificateSigningRequestList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CertificateSigningRequest, len(*in)) + for i := range *in { + if err := DeepCopy_certificates_CertificateSigningRequest(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_certificates_CertificateSigningRequestSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestSpec) + out := out.(*CertificateSigningRequestSpec) + *out = *in + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Usages != nil { + in, out := &in.Usages, &out.Usages + *out = make([]KeyUsage, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } + } + return nil + } +} + +func DeepCopy_certificates_CertificateSigningRequestStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestStatus) + out := out.(*CertificateSigningRequestStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]CertificateSigningRequestCondition, len(*in)) + for i := range *in { + if err := DeepCopy_certificates_CertificateSigningRequestCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/OWNERS b/vendor/k8s.io/client-go/pkg/apis/extensions/OWNERS new file mode 100755 index 0000000000..494763a693 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/OWNERS @@ -0,0 +1,41 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- nikhiljindal +- bprashanth +- erictune +- pmorie +- sttts +- kargakis +- saad-ali +- janetkuo +- justinsb +- ncdc +- timstclair +- mwielgus +- timothysc +- soltysh +- piosz +- dims +- errordeveloper +- madhusudancs +- rootfs +- jszczepkowski +- mml +- resouer +- mbohlool +- david-mcmahon +- therc +- pweil- +- tmrts +- mqliang +- lukaszo +- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/doc.go b/vendor/k8s.io/client-go/pkg/apis/extensions/doc.go new file mode 100644 index 0000000000..87edee41cb --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extensions diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/helpers.go b/vendor/k8s.io/client-go/pkg/apis/extensions/helpers.go new file mode 100644 index 0000000000..27d3e23add --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/helpers.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extensions + +import ( + "strings" +) + +// SysctlsFromPodSecurityPolicyAnnotation parses an annotation value of the key +// SysctlsSecurityPolocyAnnotationKey into a slice of sysctls. An empty slice +// is returned if annotation is the empty string. +func SysctlsFromPodSecurityPolicyAnnotation(annotation string) ([]string, error) { + if len(annotation) == 0 { + return []string{}, nil + } + + return strings.Split(annotation, ","), nil +} + +// PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls. +func PodAnnotationsFromSysctls(sysctls []string) string { + return strings.Join(sysctls, ",") +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/install/install.go b/vendor/k8s.io/client-go/pkg/apis/extensions/install/install.go new file mode 100644 index 0000000000..1f968e861c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/install/install.go @@ -0,0 +1,51 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/extensions" + "k8s.io/client-go/pkg/apis/extensions/v1beta1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: extensions.GroupName, + VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/extensions", + RootScopedKinds: sets.NewString("PodSecurityPolicy", "ThirdPartyResource"), + AddInternalObjectsToScheme: extensions.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/register.go b/vendor/k8s.io/client-go/pkg/apis/extensions/register.go new file mode 100644 index 0000000000..5983636c22 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/register.go @@ -0,0 +1,70 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extensions + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "extensions" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + // TODO this gets cleaned up when the types are fixed + scheme.AddKnownTypes(SchemeGroupVersion, + &Deployment{}, + &DeploymentList{}, + &DeploymentRollback{}, + &ReplicationControllerDummy{}, + &Scale{}, + &ThirdPartyResource{}, + &ThirdPartyResourceList{}, + &DaemonSetList{}, + &DaemonSet{}, + &ThirdPartyResourceData{}, + &ThirdPartyResourceDataList{}, + &Ingress{}, + &IngressList{}, + &ReplicaSet{}, + &ReplicaSetList{}, + &PodSecurityPolicy{}, + &PodSecurityPolicyList{}, + &NetworkPolicy{}, + &NetworkPolicyList{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/types.go b/vendor/k8s.io/client-go/pkg/apis/extensions/types.go new file mode 100644 index 0000000000..945c5fa2b1 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/types.go @@ -0,0 +1,1124 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +This file (together with pkg/apis/extensions/v1beta1/types.go) contain the experimental +types in kubernetes. These API objects are experimental, meaning that the +APIs may be broken at any time by the kubernetes team. + +DISCLAIMER: The implementation of the experimental API group itself is +a temporary one meant as a stopgap solution until kubernetes has proper +support for multiple API groups. The transition may require changes +beyond registration differences. In other words, experimental API group +support is experimental. +*/ + +package extensions + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/pkg/api" +) + +const ( + // SysctlsPodSecurityPolicyAnnotationKey represents the key of a whitelist of + // allowed safe and unsafe sysctls in a pod spec. It's a comma-separated list of plain sysctl + // names or sysctl patterns (which end in *). The string "*" matches all sysctls. + SysctlsPodSecurityPolicyAnnotationKey string = "security.alpha.kubernetes.io/sysctls" +) + +// describes the attributes of a scale subresource +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 +} + +// represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 + + // label query over pods that should match the replicas count. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector *metav1.LabelSelector +} + +// +genclient=true +// +noMethods=true + +// represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus +} + +// Dummy definition +type ReplicationControllerDummy struct { + metav1.TypeMeta +} + +// Alpha-level support for Custom Metrics in HPA (as annotations). +type CustomMetricTarget struct { + // Custom Metric name. + Name string + // Custom Metric value (average). + TargetValue resource.Quantity +} + +type CustomMetricTargetList struct { + Items []CustomMetricTarget +} + +type CustomMetricCurrentStatus struct { + // Custom Metric name. + Name string + // Custom Metric value (average). + CurrentValue resource.Quantity +} + +type CustomMetricCurrentStatusList struct { + Items []CustomMetricCurrentStatus +} + +// +genclient=true +// +nonNamespaced=true + +// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource +// types to the API. It consists of one or more Versions of the api. +type ThirdPartyResource struct { + metav1.TypeMeta + + // Standard object metadata + // +optional + metav1.ObjectMeta + + // Description is the description of this object. + // +optional + Description string + + // Versions are versions for this third party object + Versions []APIVersion +} + +type ThirdPartyResourceList struct { + metav1.TypeMeta + + // Standard list metadata. + // +optional + metav1.ListMeta + + // Items is the list of horizontal pod autoscalers. + Items []ThirdPartyResource +} + +// An APIVersion represents a single concrete version of an object model. +// TODO: we should consider merge this struct with GroupVersion in metav1.go +type APIVersion struct { + // Name of this version (e.g. 'v1'). + Name string +} + +// An internal object, used for versioned storage in etcd. Not exposed to the end user. +type ThirdPartyResourceData struct { + metav1.TypeMeta + // Standard object metadata. + // +optional + metav1.ObjectMeta + + // Data is the raw JSON data for this data. + // +optional + Data []byte +} + +// +genclient=true + +type Deployment struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus +} + +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas int32 + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + Selector *metav1.LabelSelector + + // Template describes the pods that will be created. + Template api.PodTemplateSpec + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + Strategy DeploymentStrategy + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + RevisionHistoryLimit *int32 + + // Indicates that the deployment is paused and will not be processed by the + // deployment controller. + // +optional + Paused bool + + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + RollbackTo *RollbackConfig + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Once autoRollback is + // implemented, the deployment controller will automatically rollback failed + // deployments. Note that progress will not be estimated during the time a + // deployment is paused. This is not set by default. + ProgressDeadlineSeconds *int32 +} + +// DeploymentRollback stores the information required to rollback a deployment. +type DeploymentRollback struct { + metav1.TypeMeta + // Required: This must match the Name of a deployment. + Name string + // The annotations to be updated to a deployment + // +optional + UpdatedAnnotations map[string]string + // The config of this deployment rollback. + RollbackTo RollbackConfig +} + +type RollbackConfig struct { + // The revision to rollback to. If set to 0, rollbck to the last revision. + // +optional + Revision int64 +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing RCs (and label key that is added to its pods) to prevent the existing RCs + // to select new pods (and old pods being select by new RC). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // By default, a fixed value of 1 is used. + // Example: when this is set to 30%, the old RC can be scaled down by 30% + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that at least 70% of original number of pods are available at all times + // during the update. + // +optional + MaxUnavailable intstr.IntOrString + + // The maximum number of pods that can be scheduled above the original number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of total pods at + // the start of the update (ex: 10%). This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // By default, a value of 1 is used. + // Example: when this is set to 30%, the new RC can be scaled up by 30% + // immediately when the rolling update starts. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of original pods. + // +optional + MaxSurge intstr.IntOrString +} + +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 + + // Total number of unavailable pods targeted by this deployment. + // +optional + UnavailableReplicas int32 + + // Represents the latest available observations of a deployment's current state. + Conditions []DeploymentCondition +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // The last time this condition was updated. + LastUpdateTime metav1.Time + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + Reason string + // A human readable message indicating details about the transition. + Message string +} + +type DeploymentList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is the list of deployments. + Items []Deployment +} + +type DaemonSetUpdateStrategy struct { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". + // Default is OnDelete. + // +optional + Type DaemonSetUpdateStrategyType + + // Rolling update config params. Present only if type = "RollingUpdate". + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as DeploymentStrategy.RollingUpdate. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + RollingUpdate *RollingUpdateDaemonSet +} + +type DaemonSetUpdateStrategyType string + +const ( + // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. + RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" + + // Replace the old daemons only when it's killed + OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" +) + +// Spec to control the desired behavior of daemon set rolling update. +type RollingUpdateDaemonSet struct { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + MaxUnavailable intstr.IntOrString +} + +// DaemonSetSpec is the specification of a daemon set. +type DaemonSetSpec struct { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // If empty, defaulted to labels on Pod template. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector *metav1.LabelSelector + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + Template api.PodTemplateSpec + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + UpdateStrategy DaemonSetUpdateStrategy + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + MinReadySeconds int32 + + // A sequence number representing a specific generation of the template. + // Populated by the system. It can be set only during the creation. + // +optional + TemplateGeneration int64 +} + +// DaemonSetStatus represents the current status of a daemon set. +type DaemonSetStatus struct { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + CurrentNumberScheduled int32 + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + NumberMisscheduled int32 + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + DesiredNumberScheduled int32 + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + NumberReady int32 + + // The most recent generation observed by the daemon set controller. + // +optional + ObservedGeneration int64 + + // The total number of nodes that are running updated daemon pod + // +optional + UpdatedNumberScheduled int32 + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + NumberAvailable int32 + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + NumberUnavailable int32 +} + +// +genclient=true + +// DaemonSet represents the configuration of a daemon set. +type DaemonSet struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // The desired behavior of this daemon set. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec DaemonSetSpec + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status DaemonSetStatus +} + +const ( + // DaemonSetTemplateGenerationKey is the key of the labels that is added + // to daemon set pods to distinguish between old and new pod templates + // during DaemonSet template update. + DaemonSetTemplateGenerationKey string = "pod-template-generation" +) + +// DaemonSetList is a collection of daemon sets. +type DaemonSetList struct { + metav1.TypeMeta + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta + + // A list of daemon sets. + Items []DaemonSet +} + +type ThirdPartyResourceDataList struct { + metav1.TypeMeta + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta + // Items is a list of third party objects + Items []ThirdPartyResourceData +} + +// +genclient=true + +// Ingress is a collection of rules that allow inbound connections to reach the +// endpoints defined by a backend. An Ingress can be configured to give services +// externally-reachable urls, load balance traffic, terminate SSL, offer name +// based virtual hosting etc. +type Ingress struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // Spec is the desired state of the Ingress. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec IngressSpec + + // Status is the current state of the Ingress. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status IngressStatus +} + +// IngressList is a collection of Ingress. +type IngressList struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta + + // Items is the list of Ingress. + Items []Ingress +} + +// IngressSpec describes the Ingress the user wishes to exist. +type IngressSpec struct { + // A default backend capable of servicing requests that don't match any + // rule. At least one of 'backend' or 'rules' must be specified. This field + // is optional to allow the loadbalancer controller or defaulting logic to + // specify a global default. + // +optional + Backend *IngressBackend + + // TLS configuration. Currently the Ingress only supports a single TLS + // port, 443. If multiple members of this list specify different hosts, they + // will be multiplexed on the same port according to the hostname specified + // through the SNI TLS extension, if the ingress controller fulfilling the + // ingress supports SNI. + // +optional + TLS []IngressTLS + + // A list of host rules used to configure the Ingress. If unspecified, or + // no rule matches, all traffic is sent to the default backend. + // +optional + Rules []IngressRule + // TODO: Add the ability to specify load-balancer IP through claims +} + +// IngressTLS describes the transport layer security associated with an Ingress. +type IngressTLS struct { + // Hosts are a list of hosts included in the TLS certificate. The values in + // this list must match the name/s used in the tlsSecret. Defaults to the + // wildcard host setting for the loadbalancer controller fulfilling this + // Ingress, if left unspecified. + // +optional + Hosts []string + // SecretName is the name of the secret used to terminate SSL traffic on 443. + // Field is left optional to allow SSL routing based on SNI hostname alone. + // If the SNI host in a listener conflicts with the "Host" header field used + // by an IngressRule, the SNI host is used for termination and value of the + // Host header is used for routing. + // +optional + SecretName string + // TODO: Consider specifying different modes of termination, protocols etc. +} + +// IngressStatus describe the current state of the Ingress. +type IngressStatus struct { + // LoadBalancer contains the current status of the load-balancer. + // +optional + LoadBalancer api.LoadBalancerStatus +} + +// IngressRule represents the rules mapping the paths under a specified host to +// the related backend services. Incoming requests are first evaluated for a host +// match, then routed to the backend associated with the matching IngressRuleValue. +type IngressRule struct { + // Host is the fully qualified domain name of a network host, as defined + // by RFC 3986. Note the following deviations from the "host" part of the + // URI as defined in the RFC: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the + // IP in the Spec of the parent Ingress. + // 2. The `:` delimiter is not respected because ports are not allowed. + // Currently the port of an Ingress is implicitly :80 for http and + // :443 for https. + // Both these may change in the future. + // Incoming requests are matched against the host before the IngressRuleValue. + // If the host is unspecified, the Ingress routes all traffic based on the + // specified IngressRuleValue. + // +optional + Host string + // IngressRuleValue represents a rule to route requests for this IngressRule. + // If unspecified, the rule defaults to a http catch-all. Whether that sends + // just traffic matching the host to the default backend or all traffic to the + // default backend, is left to the controller fulfilling the Ingress. Http is + // currently the only supported IngressRuleValue. + // +optional + IngressRuleValue +} + +// IngressRuleValue represents a rule to apply against incoming requests. If the +// rule is satisfied, the request is routed to the specified backend. Currently +// mixing different types of rules in a single Ingress is disallowed, so exactly +// one of the following must be set. +type IngressRuleValue struct { + //TODO: + // 1. Consider renaming this resource and the associated rules so they + // aren't tied to Ingress. They can be used to route intra-cluster traffic. + // 2. Consider adding fields for ingress-type specific global options + // usable by a loadbalancer, like http keep-alive. + + // +optional + HTTP *HTTPIngressRuleValue +} + +// HTTPIngressRuleValue is a list of http selectors pointing to backends. +// In the example: http:///? -> backend where +// where parts of the url correspond to RFC 3986, this resource will be used +// to match against everything after the last '/' and before the first '?' +// or '#'. +type HTTPIngressRuleValue struct { + // A collection of paths that map requests to backends. + Paths []HTTPIngressPath + // TODO: Consider adding fields for ingress-type specific global + // options usable by a loadbalancer, like http keep-alive. +} + +// HTTPIngressPath associates a path regex with a backend. Incoming urls matching +// the path are forwarded to the backend. +type HTTPIngressPath struct { + // Path is an extended POSIX regex as defined by IEEE Std 1003.1, + // (i.e this follows the egrep/unix syntax, not the perl syntax) + // matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" + // part of a URL as defined by RFC 3986. Paths must begin with + // a '/'. If unspecified, the path defaults to a catch all sending + // traffic to the backend. + // +optional + Path string + + // Backend defines the referenced service endpoint to which the traffic + // will be forwarded to. + Backend IngressBackend +} + +// IngressBackend describes all endpoints for a given service and port. +type IngressBackend struct { + // Specifies the name of the referenced service. + ServiceName string + + // Specifies the port of the referenced service. + ServicePort intstr.IntOrString +} + +// +genclient=true + +// ReplicaSet represents the configuration of a replica set. +type ReplicaSet struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired behavior of this ReplicaSet. + // +optional + Spec ReplicaSetSpec + + // Status is the current status of this ReplicaSet. This data may be + // out of date by some window of time. + // +optional + Status ReplicaSetStatus +} + +// ReplicaSetList is a collection of ReplicaSets. +type ReplicaSetList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ReplicaSet +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +// As the internal representation of a ReplicaSet, it must have +// a Template set. +type ReplicaSetSpec struct { + // Replicas is the number of desired replicas. + Replicas int32 + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // Selector is a label query over pods that should match the replica count. + // Must match in order to be controlled. + // If empty, defaulted to labels on pod template. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector *metav1.LabelSelector + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // +optional + Template api.PodTemplateSpec +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +type ReplicaSetStatus struct { + // Replicas is the number of actual replicas. + Replicas int32 + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + FullyLabeledReplicas int32 + + // The number of ready replicas for this replica set. + // +optional + ReadyReplicas int32 + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + AvailableReplicas int32 + + // ObservedGeneration is the most recent generation observed by the controller. + // +optional + ObservedGeneration int64 + + // Represents the latest available observations of a replica set's current state. + // +optional + Conditions []ReplicaSetCondition +} + +type ReplicaSetConditionType string + +// These are valid conditions of a replica set. +const ( + // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created + // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted + // due to kubelet being down or finalizers are failing. + ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" +) + +// ReplicaSetCondition describes the state of a replica set at a certain point. +type ReplicaSetCondition struct { + // Type of replica set condition. + Type ReplicaSetConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + // +optional + Reason string + // A human readable message indicating details about the transition. + // +optional + Message string +} + +// +genclient=true +// +nonNamespaced=true + +// PodSecurityPolicy governs the ability to make requests that affect the SecurityContext +// that will be applied to a pod and container. +type PodSecurityPolicy struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the policy enforced. + // +optional + Spec PodSecurityPolicySpec +} + +// PodSecurityPolicySpec defines the policy enforced. +type PodSecurityPolicySpec struct { + // Privileged determines if a pod can request to be run as privileged. + // +optional + Privileged bool + // DefaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capability in both + // DefaultAddCapabilities and RequiredDropCapabilities. + // +optional + DefaultAddCapabilities []api.Capability + // RequiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + // +optional + RequiredDropCapabilities []api.Capability + // AllowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field may be added at the pod author's discretion. + // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. + // +optional + AllowedCapabilities []api.Capability + // Volumes is a white list of allowed volume plugins. Empty indicates that all plugins + // may be used. + // +optional + Volumes []FSType + // HostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + // +optional + HostNetwork bool + // HostPorts determines which host port ranges are allowed to be exposed. + // +optional + HostPorts []HostPortRange + // HostPID determines if the policy allows the use of HostPID in the pod spec. + // +optional + HostPID bool + // HostIPC determines if the policy allows the use of HostIPC in the pod spec. + // +optional + HostIPC bool + // SELinux is the strategy that will dictate the allowable labels that may be set. + SELinux SELinuxStrategyOptions + // RunAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. + RunAsUser RunAsUserStrategyOptions + // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + SupplementalGroups SupplementalGroupsStrategyOptions + // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + FSGroup FSGroupStrategyOptions + // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the PSP should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + // +optional + ReadOnlyRootFilesystem bool +} + +// HostPortRange defines a range of host ports that will be enabled by a policy +// for pods to use. It requires both the start and end to be defined. +type HostPortRange struct { + // Min is the start of the range, inclusive. + Min int + // Max is the end of the range, inclusive. + Max int +} + +// FSType gives strong typing to different file systems that are used by volumes. +type FSType string + +var ( + AzureFile FSType = "azureFile" + Flocker FSType = "flocker" + FlexVolume FSType = "flexVolume" + HostPath FSType = "hostPath" + EmptyDir FSType = "emptyDir" + GCEPersistentDisk FSType = "gcePersistentDisk" + AWSElasticBlockStore FSType = "awsElasticBlockStore" + GitRepo FSType = "gitRepo" + Secret FSType = "secret" + NFS FSType = "nfs" + ISCSI FSType = "iscsi" + Glusterfs FSType = "glusterfs" + PersistentVolumeClaim FSType = "persistentVolumeClaim" + RBD FSType = "rbd" + Cinder FSType = "cinder" + CephFS FSType = "cephFS" + DownwardAPI FSType = "downwardAPI" + FC FSType = "fc" + ConfigMap FSType = "configMap" + VsphereVolume FSType = "vsphereVolume" + Quobyte FSType = "quobyte" + AzureDisk FSType = "azureDisk" + PhotonPersistentDisk FSType = "photonPersistentDisk" + Projected FSType = "projected" + PortworxVolume FSType = "portworxVolume" + ScaleIO FSType = "scaleIO" + All FSType = "*" +) + +// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. +type SELinuxStrategyOptions struct { + // Rule is the strategy that will dictate the allowable labels that may be set. + Rule SELinuxStrategy + // seLinuxOptions required to run as; required for MustRunAs + // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context + // +optional + SELinuxOptions *api.SELinuxOptions +} + +// SELinuxStrategy denotes strategy types for generating SELinux options for a +// Security. +type SELinuxStrategy string + +const ( + // container must have SELinux labels of X applied. + SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" + // container may make requests for any SELinux context labels. + SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" +) + +// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. +type RunAsUserStrategyOptions struct { + // Rule is the strategy that will dictate the allowable RunAsUser values that may be set. + Rule RunAsUserStrategy + // Ranges are the allowed ranges of uids that may be used. + // +optional + Ranges []IDRange +} + +// IDRange provides a min/max of an allowed range of IDs. +type IDRange struct { + // Min is the start of the range, inclusive. + Min int64 + // Max is the end of the range, inclusive. + Max int64 +} + +// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a +// SecurityContext. +type RunAsUserStrategy string + +const ( + // container must run as a particular uid. + RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" + // container must run as a non-root uid + RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" + // container may make requests for any uid. + RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" +) + +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +type FSGroupStrategyOptions struct { + // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + // +optional + Rule FSGroupStrategyType + // Ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. + // +optional + Ranges []IDRange +} + +// FSGroupStrategyType denotes strategy types for generating FSGroup values for a +// SecurityContext +type FSGroupStrategyType string + +const ( + // container must have FSGroup of X applied. + FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" + // container may make requests for any FSGroup labels. + FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" +) + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +type SupplementalGroupsStrategyOptions struct { + // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // +optional + Rule SupplementalGroupsStrategyType + // Ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. + // +optional + Ranges []IDRange +} + +// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental +// groups for a SecurityContext. +type SupplementalGroupsStrategyType string + +const ( + // container must run as a particular gid. + SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" + // container may make requests for any gid. + SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" +) + +// PodSecurityPolicyList is a list of PodSecurityPolicy objects. +type PodSecurityPolicyList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []PodSecurityPolicy +} + +// +genclient=true + +type NetworkPolicy struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Specification of the desired behavior for this NetworkPolicy. + // +optional + Spec NetworkPolicySpec +} + +type NetworkPolicySpec struct { + // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules + // is applied to any pods selected by this field. Multiple network policies can select the + // same set of pods. In this case, the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. + PodSelector metav1.LabelSelector + + // List of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, + // OR if the traffic source is the pod's local node, + // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy + // objects whose podSelector matches the pod. + // If this field is empty then this NetworkPolicy does not affect ingress isolation. + // If this field is present and contains at least one rule, this policy allows any traffic + // which matches at least one of the ingress rules in this list. + // +optional + Ingress []NetworkPolicyIngressRule +} + +// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. +type NetworkPolicyIngressRule struct { + // List of ports which should be made accessible on the pods selected for this rule. + // Each item in this list is combined using a logical OR. + // If this field is not provided, this rule matches all ports (traffic not restricted by port). + // If this field is empty, this rule matches no ports (no traffic matches). + // If this field is present and contains at least one item, then this rule allows traffic + // only if the traffic matches at least one port in the list. + // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. + // +optional + Ports []NetworkPolicyPort + + // List of sources which should be able to access the pods selected for this rule. + // Items in this list are combined using a logical OR operation. + // If this field is not provided, this rule matches all sources (traffic not restricted by source). + // If this field is empty, this rule matches no sources (no traffic matches). + // If this field is present and contains at least on item, this rule allows traffic only if the + // traffic matches at least one item in the from list. + // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. + // +optional + From []NetworkPolicyPeer +} + +type NetworkPolicyPort struct { + // Optional. The protocol (TCP or UDP) which traffic must match. + // If not specified, this field defaults to TCP. + // +optional + Protocol *api.Protocol + + // If specified, the port on the given protocol. This can + // either be a numerical or named port on a pod. If this field is not provided, + // this matches all port names and numbers. + // If present, only traffic on the specified protocol AND port + // will be matched. + // +optional + Port *intstr.IntOrString +} + +type NetworkPolicyPeer struct { + // Exactly one of the following must be specified. + + // This is a label selector which selects Pods in this namespace. + // This field follows standard label selector semantics. + // If not provided, this selector selects no pods. + // If present but empty, this selector selects all pods in this namespace. + // +optional + PodSelector *metav1.LabelSelector + + // Selects Namespaces using cluster scoped-labels. This + // matches all pods in all namespaces selected by this label selector. + // This field follows standard label selector semantics. + // If omitted, this selector selects no namespaces. + // If present but empty, this selector selects all namespaces. + // +optional + NamespaceSelector *metav1.LabelSelector +} + +// NetworkPolicyList is a list of NetworkPolicy objects. +type NetworkPolicyList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []NetworkPolicy +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/conversion.go new file mode 100644 index 0000000000..d53dbc47ae --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/conversion.go @@ -0,0 +1,262 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/pkg/apis/extensions" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs( + Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, + Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, + Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, + Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, + Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, + Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, + Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, + Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, + Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet, + Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet, + Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec, + Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, + ) + if err != nil { + return err + } + + // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. + for _, k := range []string{"DaemonSet", "Deployment", "Ingress"} { + kind := k // don't close over range variables + err = scheme.AddFieldLabelConversionFunc("extensions/v1beta1", kind, + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace": + return label, value, nil + default: + return "", "", fmt.Errorf("field label %q not supported for %q", label, kind) + } + }, + ) + if err != nil { + return err + } + } + + return nil +} + +func Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { + out.Replicas = int32(in.Replicas) + + out.Selector = nil + out.TargetSelector = "" + if in.Selector != nil { + if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { + out.Selector = in.Selector.MatchLabels + } + + selector, err := metav1.LabelSelectorAsSelector(in.Selector) + if err != nil { + return fmt.Errorf("invalid label selector: %v", err) + } + out.TargetSelector = selector.String() + } + return nil +} + +func Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + + // Normally when 2 fields map to the same internal value we favor the old field, since + // old clients can't be expected to know about new fields but clients that know about the + // new field can be expected to know about the old field (though that's not quite true, due + // to kubectl apply). However, these fields are readonly, so any non-nil value should work. + if in.TargetSelector != "" { + labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) + if err != nil { + out.Selector = nil + return fmt.Errorf("failed to parse target selector: %v", err) + } + out.Selector = labelSelector + } else if in.Selector != nil { + out.Selector = new(metav1.LabelSelector) + selector := make(map[string]string) + for key, val := range in.Selector { + selector[key] = val + } + out.Selector.MatchLabels = selector + } else { + out.Selector = nil + } + return nil +} + +func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error { + out.Replicas = &in.Replicas + out.Selector = in.Selector + if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + if in.RevisionHistoryLimit != nil { + out.RevisionHistoryLimit = new(int32) + *out.RevisionHistoryLimit = int32(*in.RevisionHistoryLimit) + } + out.MinReadySeconds = int32(in.MinReadySeconds) + out.Paused = in.Paused + if in.RollbackTo != nil { + out.RollbackTo = new(RollbackConfig) + out.RollbackTo.Revision = int64(in.RollbackTo.Revision) + } else { + out.RollbackTo = nil + } + if in.ProgressDeadlineSeconds != nil { + out.ProgressDeadlineSeconds = new(int32) + *out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds + } + return nil +} + +func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = *in.Replicas + } + out.Selector = in.Selector + if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + out.RevisionHistoryLimit = in.RevisionHistoryLimit + out.MinReadySeconds = in.MinReadySeconds + out.Paused = in.Paused + if in.RollbackTo != nil { + out.RollbackTo = new(extensions.RollbackConfig) + out.RollbackTo.Revision = in.RollbackTo.Revision + } else { + out.RollbackTo = nil + } + if in.ProgressDeadlineSeconds != nil { + out.ProgressDeadlineSeconds = new(int32) + *out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds + } + return nil +} + +func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { + out.Type = DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + out.RollingUpdate = new(RollingUpdateDeployment) + if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { + out.Type = extensions.DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + out.RollingUpdate = new(extensions.RollingUpdateDeployment) + if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { + if out.MaxUnavailable == nil { + out.MaxUnavailable = &intstr.IntOrString{} + } + if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { + return err + } + if out.MaxSurge == nil { + out.MaxSurge = &intstr.IntOrString{} + } + if err := s.Convert(&in.MaxSurge, out.MaxSurge, 0); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { + if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { + return err + } + if err := s.Convert(in.MaxSurge, &out.MaxSurge, 0); err != nil { + return err + } + return nil +} + +func Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *RollingUpdateDaemonSet, s conversion.Scope) error { + if out.MaxUnavailable == nil { + out.MaxUnavailable = &intstr.IntOrString{} + } + if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { + if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { + return err + } + return nil +} + +func Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error { + out.Replicas = new(int32) + *out.Replicas = int32(in.Replicas) + out.MinReadySeconds = in.MinReadySeconds + out.Selector = in.Selector + if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = *in.Replicas + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = in.Selector + if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/defaults.go new file mode 100644 index 0000000000..298c568db3 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/defaults.go @@ -0,0 +1,138 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/pkg/api/v1" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_DaemonSet, + SetDefaults_Deployment, + SetDefaults_ReplicaSet, + SetDefaults_NetworkPolicy, + ) +} + +func SetDefaults_DaemonSet(obj *DaemonSet) { + labels := obj.Spec.Template.Labels + + // TODO: support templates defined elsewhere when we support them in the API + if labels != nil { + if obj.Spec.Selector == nil { + obj.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: labels, + } + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + updateStrategy := &obj.Spec.UpdateStrategy + if updateStrategy.Type == "" { + updateStrategy.Type = OnDeleteDaemonSetStrategyType + } + if updateStrategy.Type == RollingUpdateDaemonSetStrategyType { + if updateStrategy.RollingUpdate == nil { + rollingUpdate := RollingUpdateDaemonSet{} + updateStrategy.RollingUpdate = &rollingUpdate + } + if updateStrategy.RollingUpdate.MaxUnavailable == nil { + // Set default MaxUnavailable as 1 by default. + maxUnavailable := intstr.FromInt(1) + updateStrategy.RollingUpdate.MaxUnavailable = &maxUnavailable + } + } +} + +func SetDefaults_Deployment(obj *Deployment) { + // Default labels and selector to labels from pod template spec. + labels := obj.Spec.Template.Labels + + if labels != nil { + if obj.Spec.Selector == nil { + obj.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels} + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + // Set DeploymentSpec.Replicas to 1 if it is not set. + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } + strategy := &obj.Spec.Strategy + // Set default DeploymentStrategyType as RollingUpdate. + if strategy.Type == "" { + strategy.Type = RollingUpdateDeploymentStrategyType + } + if strategy.Type == RollingUpdateDeploymentStrategyType || strategy.RollingUpdate != nil { + if strategy.RollingUpdate == nil { + rollingUpdate := RollingUpdateDeployment{} + strategy.RollingUpdate = &rollingUpdate + } + if strategy.RollingUpdate.MaxUnavailable == nil { + // Set default MaxUnavailable as 1 by default. + maxUnavailable := intstr.FromInt(1) + strategy.RollingUpdate.MaxUnavailable = &maxUnavailable + } + if strategy.RollingUpdate.MaxSurge == nil { + // Set default MaxSurge as 1 by default. + maxSurge := intstr.FromInt(1) + strategy.RollingUpdate.MaxSurge = &maxSurge + } + } +} + +func SetDefaults_ReplicaSet(obj *ReplicaSet) { + labels := obj.Spec.Template.Labels + + // TODO: support templates defined elsewhere when we support them in the API + if labels != nil { + if obj.Spec.Selector == nil { + obj.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: labels, + } + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } +} + +func SetDefaults_NetworkPolicy(obj *NetworkPolicy) { + // Default any undefined Protocol fields to TCP. + for _, i := range obj.Spec.Ingress { + // TODO: Update Ports to be a pointer to slice as soon as auto-generation supports it. + for _, p := range i.Ports { + if p.Protocol == nil { + proto := v1.ProtocolTCP + p.Protocol = &proto + } + } + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/doc.go new file mode 100644 index 0000000000..a397b30e92 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.pb.go new file mode 100644 index 0000000000..1c72f73116 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.pb.go @@ -0,0 +1,11993 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto + + It has these top-level messages: + APIVersion + CustomMetricCurrentStatus + CustomMetricCurrentStatusList + CustomMetricTarget + CustomMetricTargetList + DaemonSet + DaemonSetList + DaemonSetSpec + DaemonSetStatus + DaemonSetUpdateStrategy + Deployment + DeploymentCondition + DeploymentList + DeploymentRollback + DeploymentSpec + DeploymentStatus + DeploymentStrategy + FSGroupStrategyOptions + HTTPIngressPath + HTTPIngressRuleValue + HostPortRange + IDRange + Ingress + IngressBackend + IngressList + IngressRule + IngressRuleValue + IngressSpec + IngressStatus + IngressTLS + NetworkPolicy + NetworkPolicyIngressRule + NetworkPolicyList + NetworkPolicyPeer + NetworkPolicyPort + NetworkPolicySpec + PodSecurityPolicy + PodSecurityPolicyList + PodSecurityPolicySpec + ReplicaSet + ReplicaSetCondition + ReplicaSetList + ReplicaSetSpec + ReplicaSetStatus + ReplicationControllerDummy + RollbackConfig + RollingUpdateDaemonSet + RollingUpdateDeployment + RunAsUserStrategyOptions + SELinuxStrategyOptions + Scale + ScaleSpec + ScaleStatus + SupplementalGroupsStrategyOptions + ThirdPartyResource + ThirdPartyResourceData + ThirdPartyResourceDataList + ThirdPartyResourceList +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *APIVersion) Reset() { *m = APIVersion{} } +func (*APIVersion) ProtoMessage() {} +func (*APIVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *CustomMetricCurrentStatus) Reset() { *m = CustomMetricCurrentStatus{} } +func (*CustomMetricCurrentStatus) ProtoMessage() {} +func (*CustomMetricCurrentStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *CustomMetricCurrentStatusList) Reset() { *m = CustomMetricCurrentStatusList{} } +func (*CustomMetricCurrentStatusList) ProtoMessage() {} +func (*CustomMetricCurrentStatusList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *CustomMetricTarget) Reset() { *m = CustomMetricTarget{} } +func (*CustomMetricTarget) ProtoMessage() {} +func (*CustomMetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *CustomMetricTargetList) Reset() { *m = CustomMetricTargetList{} } +func (*CustomMetricTargetList) ProtoMessage() {} +func (*CustomMetricTargetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } +func (*DeploymentRollback) ProtoMessage() {} +func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + +func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } +func (*FSGroupStrategyOptions) ProtoMessage() {} +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } + +func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } +func (*HTTPIngressPath) ProtoMessage() {} +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } + +func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } +func (*HTTPIngressRuleValue) ProtoMessage() {} +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + +func (m *HostPortRange) Reset() { *m = HostPortRange{} } +func (*HostPortRange) ProtoMessage() {} +func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } + +func (m *IDRange) Reset() { *m = IDRange{} } +func (*IDRange) ProtoMessage() {} +func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } + +func (m *Ingress) Reset() { *m = Ingress{} } +func (*Ingress) ProtoMessage() {} +func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } + +func (m *IngressBackend) Reset() { *m = IngressBackend{} } +func (*IngressBackend) ProtoMessage() {} +func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } + +func (m *IngressList) Reset() { *m = IngressList{} } +func (*IngressList) ProtoMessage() {} +func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } + +func (m *IngressRule) Reset() { *m = IngressRule{} } +func (*IngressRule) ProtoMessage() {} +func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + +func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } +func (*IngressRuleValue) ProtoMessage() {} +func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } + +func (m *IngressSpec) Reset() { *m = IngressSpec{} } +func (*IngressSpec) ProtoMessage() {} +func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } + +func (m *IngressStatus) Reset() { *m = IngressStatus{} } +func (*IngressStatus) ProtoMessage() {} +func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } + +func (m *IngressTLS) Reset() { *m = IngressTLS{} } +func (*IngressTLS) ProtoMessage() {} +func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } + +func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } +func (*NetworkPolicy) ProtoMessage() {} +func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } + +func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } +func (*NetworkPolicyIngressRule) ProtoMessage() {} +func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{31} +} + +func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } +func (*NetworkPolicyList) ProtoMessage() {} +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } + +func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } +func (*NetworkPolicyPeer) ProtoMessage() {} +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } + +func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } +func (*NetworkPolicyPort) ProtoMessage() {} +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } + +func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } +func (*NetworkPolicySpec) ProtoMessage() {} +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } + +func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } +func (*PodSecurityPolicy) ProtoMessage() {} +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } + +func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } +func (*PodSecurityPolicyList) ProtoMessage() {} +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } + +func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } +func (*PodSecurityPolicySpec) ProtoMessage() {} +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } + +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } + +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } + +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } + +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } + +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } + +func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } +func (*ReplicationControllerDummy) ProtoMessage() {} +func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{44} +} + +func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } +func (*RollbackConfig) ProtoMessage() {} +func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } + +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } + +func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } +func (*RollingUpdateDeployment) ProtoMessage() {} +func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{47} +} + +func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } +func (*RunAsUserStrategyOptions) ProtoMessage() {} +func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{48} +} + +func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } +func (*SELinuxStrategyOptions) ProtoMessage() {} +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } + +func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } +func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} +func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{53} +} + +func (m *ThirdPartyResource) Reset() { *m = ThirdPartyResource{} } +func (*ThirdPartyResource) ProtoMessage() {} +func (*ThirdPartyResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } + +func (m *ThirdPartyResourceData) Reset() { *m = ThirdPartyResourceData{} } +func (*ThirdPartyResourceData) ProtoMessage() {} +func (*ThirdPartyResourceData) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } + +func (m *ThirdPartyResourceDataList) Reset() { *m = ThirdPartyResourceDataList{} } +func (*ThirdPartyResourceDataList) ProtoMessage() {} +func (*ThirdPartyResourceDataList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{56} +} + +func (m *ThirdPartyResourceList) Reset() { *m = ThirdPartyResourceList{} } +func (*ThirdPartyResourceList) ProtoMessage() {} +func (*ThirdPartyResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } + +func init() { + proto.RegisterType((*APIVersion)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.APIVersion") + proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.CustomMetricCurrentStatus") + proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.CustomMetricCurrentStatusList") + proto.RegisterType((*CustomMetricTarget)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.CustomMetricTarget") + proto.RegisterType((*CustomMetricTargetList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.CustomMetricTargetList") + proto.RegisterType((*DaemonSet)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DaemonSet") + proto.RegisterType((*DaemonSetList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DaemonSetList") + proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DaemonSetSpec") + proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DaemonSetStatus") + proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DaemonSetUpdateStrategy") + proto.RegisterType((*Deployment)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.Deployment") + proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DeploymentCondition") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DeploymentList") + proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DeploymentRollback") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DeploymentStrategy") + proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.FSGroupStrategyOptions") + proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.HTTPIngressPath") + proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.HTTPIngressRuleValue") + proto.RegisterType((*HostPortRange)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.HostPortRange") + proto.RegisterType((*IDRange)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IDRange") + proto.RegisterType((*Ingress)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.Ingress") + proto.RegisterType((*IngressBackend)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressBackend") + proto.RegisterType((*IngressList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressList") + proto.RegisterType((*IngressRule)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressRule") + proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressRuleValue") + proto.RegisterType((*IngressSpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressSpec") + proto.RegisterType((*IngressStatus)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressStatus") + proto.RegisterType((*IngressTLS)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressTLS") + proto.RegisterType((*NetworkPolicy)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.NetworkPolicy") + proto.RegisterType((*NetworkPolicyIngressRule)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.NetworkPolicyIngressRule") + proto.RegisterType((*NetworkPolicyList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.NetworkPolicyList") + proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.NetworkPolicyPeer") + proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.NetworkPolicyPort") + proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.NetworkPolicySpec") + proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.PodSecurityPolicy") + proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.PodSecurityPolicyList") + proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.PodSecurityPolicySpec") + proto.RegisterType((*ReplicaSet)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ReplicaSet") + proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ReplicaSetCondition") + proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ReplicaSetList") + proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ReplicaSetSpec") + proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ReplicaSetStatus") + proto.RegisterType((*ReplicationControllerDummy)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ReplicationControllerDummy") + proto.RegisterType((*RollbackConfig)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.RollbackConfig") + proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.RollingUpdateDaemonSet") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.RollingUpdateDeployment") + proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.RunAsUserStrategyOptions") + proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.SELinuxStrategyOptions") + proto.RegisterType((*Scale)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ScaleStatus") + proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.SupplementalGroupsStrategyOptions") + proto.RegisterType((*ThirdPartyResource)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ThirdPartyResource") + proto.RegisterType((*ThirdPartyResourceData)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ThirdPartyResourceData") + proto.RegisterType((*ThirdPartyResourceDataList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ThirdPartyResourceDataList") + proto.RegisterType((*ThirdPartyResourceList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ThirdPartyResourceList") +} +func (m *APIVersion) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *APIVersion) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + return i, nil +} + +func (m *CustomMetricCurrentStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CustomMetricCurrentStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentValue.Size())) + n1, err := m.CurrentValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + return i, nil +} + +func (m *CustomMetricCurrentStatusList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CustomMetricCurrentStatusList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CustomMetricTarget) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CustomMetricTarget) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetValue.Size())) + n2, err := m.TargetValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func (m *CustomMetricTargetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CustomMetricTargetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DaemonSet) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DaemonSet) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n3, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n4, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n5, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *DaemonSetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DaemonSetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DaemonSetSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DaemonSetSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Selector != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n7, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n8, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.UpdateStrategy.Size())) + n9, err := m.UpdateStrategy.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TemplateGeneration)) + return i, nil +} + +func (m *DaemonSetStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DaemonSetStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentNumberScheduled)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NumberMisscheduled)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DesiredNumberScheduled)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NumberReady)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(m.UpdatedNumberScheduled)) + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NumberAvailable)) + data[i] = 0x40 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NumberUnavailable)) + return i, nil +} + +func (m *DaemonSetUpdateStrategy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DaemonSetUpdateStrategy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.RollingUpdate != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollingUpdate.Size())) + n10, err := m.RollingUpdate.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + } + return i, nil +} + +func (m *Deployment) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Deployment) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n11, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n12, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n13, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + return i, nil +} + +func (m *DeploymentCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastUpdateTime.Size())) + n14, err := m.LastUpdateTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n15, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n15 + return i, nil +} + +func (m *DeploymentList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n16, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n16 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DeploymentRollback) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentRollback) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if len(m.UpdatedAnnotations) > 0 { + for k := range m.UpdatedAnnotations { + data[i] = 0x12 + i++ + v := m.UpdatedAnnotations[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) + n17, err := m.RollbackTo.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n17 + return i, nil +} + +func (m *DeploymentSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n18, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n18 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n19, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n19 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Strategy.Size())) + n20, err := m.Strategy.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n20 + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.RevisionHistoryLimit)) + } + data[i] = 0x38 + i++ + if m.Paused { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if m.RollbackTo != nil { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) + n21, err := m.RollbackTo.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.ProgressDeadlineSeconds != nil { + data[i] = 0x48 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ProgressDeadlineSeconds)) + } + return i, nil +} + +func (m *DeploymentStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.UpdatedReplicas)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AvailableReplicas)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ReadyReplicas)) + return i, nil +} + +func (m *DeploymentStrategy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentStrategy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.RollingUpdate != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollingUpdate.Size())) + n22, err := m.RollingUpdate.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n22 + } + return i, nil +} + +func (m *FSGroupStrategyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *FSGroupStrategyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) + i += copy(data[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HTTPIngressPath) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HTTPIngressPath) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Backend.Size())) + n23, err := m.Backend.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n23 + return i, nil +} + +func (m *HTTPIngressRuleValue) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HTTPIngressRuleValue) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for _, msg := range m.Paths { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HostPortRange) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HostPortRange) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Min)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Max)) + return i, nil +} + +func (m *IDRange) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IDRange) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Min)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Max)) + return i, nil +} + +func (m *Ingress) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Ingress) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n24, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n24 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n25, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n25 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n26, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n26 + return i, nil +} + +func (m *IngressBackend) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressBackend) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ServiceName))) + i += copy(data[i:], m.ServiceName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ServicePort.Size())) + n27, err := m.ServicePort.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n27 + return i, nil +} + +func (m *IngressList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n28, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n28 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *IngressRule) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressRule) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Host))) + i += copy(data[i:], m.Host) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.IngressRuleValue.Size())) + n29, err := m.IngressRuleValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n29 + return i, nil +} + +func (m *IngressRuleValue) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressRuleValue) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.HTTP != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.HTTP.Size())) + n30, err := m.HTTP.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n30 + } + return i, nil +} + +func (m *IngressSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Backend != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Backend.Size())) + n31, err := m.Backend.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if len(m.TLS) > 0 { + for _, msg := range m.TLS { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *IngressStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LoadBalancer.Size())) + n32, err := m.LoadBalancer.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n32 + return i, nil +} + +func (m *IngressTLS) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressTLS) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SecretName))) + i += copy(data[i:], m.SecretName) + return i, nil +} + +func (m *NetworkPolicy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NetworkPolicy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n33, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n33 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n34, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n34 + return i, nil +} + +func (m *NetworkPolicyIngressRule) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NetworkPolicyIngressRule) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.From) > 0 { + for _, msg := range m.From { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NetworkPolicyList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NetworkPolicyList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n35, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n35 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NetworkPolicyPeer) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NetworkPolicyPeer) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PodSelector != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodSelector.Size())) + n36, err := m.PodSelector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n36 + } + if m.NamespaceSelector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NamespaceSelector.Size())) + n37, err := m.NamespaceSelector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n37 + } + return i, nil +} + +func (m *NetworkPolicyPort) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NetworkPolicyPort) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Protocol != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.Protocol))) + i += copy(data[i:], *m.Protocol) + } + if m.Port != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) + n38, err := m.Port.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n38 + } + return i, nil +} + +func (m *NetworkPolicySpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NetworkPolicySpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodSelector.Size())) + n39, err := m.PodSelector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n39 + if len(m.Ingress) > 0 { + for _, msg := range m.Ingress { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodSecurityPolicy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodSecurityPolicy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n40, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n40 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n41, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n41 + return i, nil +} + +func (m *PodSecurityPolicyList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodSecurityPolicyList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n42, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n42 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodSecurityPolicySpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodSecurityPolicySpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Privileged { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if len(m.DefaultAddCapabilities) > 0 { + for _, s := range m.DefaultAddCapabilities { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.RequiredDropCapabilities) > 0 { + for _, s := range m.RequiredDropCapabilities { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.AllowedCapabilities) > 0 { + for _, s := range m.AllowedCapabilities { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x30 + i++ + if m.HostNetwork { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if len(m.HostPorts) > 0 { + for _, msg := range m.HostPorts { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x40 + i++ + if m.HostPID { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x48 + i++ + if m.HostIPC { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SELinux.Size())) + n43, err := m.SELinux.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n43 + data[i] = 0x5a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RunAsUser.Size())) + n44, err := m.RunAsUser.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n44 + data[i] = 0x62 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SupplementalGroups.Size())) + n45, err := m.SupplementalGroups.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n45 + data[i] = 0x6a + i++ + i = encodeVarintGenerated(data, i, uint64(m.FSGroup.Size())) + n46, err := m.FSGroup.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n46 + data[i] = 0x70 + i++ + if m.ReadOnlyRootFilesystem { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *ReplicaSet) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicaSet) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n47, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n47 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n48, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n48 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n49, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n49 + return i, nil +} + +func (m *ReplicaSetCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicaSetCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n50, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n50 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *ReplicaSetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicaSetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n51, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n51 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReplicaSetSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicaSetSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n52, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n52 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n53, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n53 + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) + return i, nil +} + +func (m *ReplicaSetStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicaSetStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FullyLabeledReplicas)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ReadyReplicas)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReplicationControllerDummy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicationControllerDummy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *RollbackConfig) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RollbackConfig) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Revision)) + return i, nil +} + +func (m *RollingUpdateDaemonSet) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RollingUpdateDaemonSet) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxUnavailable.Size())) + n54, err := m.MaxUnavailable.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n54 + } + return i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxUnavailable.Size())) + n55, err := m.MaxUnavailable.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n55 + } + if m.MaxSurge != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxSurge.Size())) + n56, err := m.MaxSurge.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n56 + } + return i, nil +} + +func (m *RunAsUserStrategyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RunAsUserStrategyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) + i += copy(data[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *SELinuxStrategyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SELinuxStrategyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) + i += copy(data[i:], m.Rule) + if m.SELinuxOptions != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) + n57, err := m.SELinuxOptions.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n57 + } + return i, nil +} + +func (m *Scale) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Scale) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n58, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n58 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n59, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n59 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n60, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n60 + return i, nil +} + +func (m *ScaleSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + return i, nil +} + +func (m *ScaleStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k := range m.Selector { + data[i] = 0x12 + i++ + v := m.Selector[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.TargetSelector))) + i += copy(data[i:], m.TargetSelector) + return i, nil +} + +func (m *SupplementalGroupsStrategyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SupplementalGroupsStrategyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) + i += copy(data[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ThirdPartyResource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ThirdPartyResource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n61, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n61 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Description))) + i += copy(data[i:], m.Description) + if len(m.Versions) > 0 { + for _, msg := range m.Versions { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ThirdPartyResourceData) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ThirdPartyResourceData) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n62, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n62 + if m.Data != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Data))) + i += copy(data[i:], m.Data) + } + return i, nil +} + +func (m *ThirdPartyResourceDataList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ThirdPartyResourceDataList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n63, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n63 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ThirdPartyResourceList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ThirdPartyResourceList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n64, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n64 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *APIVersion) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomMetricCurrentStatus) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomMetricCurrentStatusList) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomMetricTarget) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomMetricTargetList) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSetSpec) Size() (n int) { + var l int + _ = l + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + n += 1 + sovGenerated(uint64(m.TemplateGeneration)) + return n +} + +func (m *DaemonSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) + n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberReady)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberAvailable)) + n += 1 + sovGenerated(uint64(m.NumberUnavailable)) + return n +} + +func (m *DaemonSetUpdateStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Deployment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentRollback) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UpdatedAnnotations) > 0 { + for k, v := range m.UpdatedAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.RollbackTo != nil { + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n +} + +func (m *DeploymentStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + return n +} + +func (m *DeploymentStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *FSGroupStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HTTPIngressPath) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HTTPIngressRuleValue) Size() (n int) { + var l int + _ = l + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HostPortRange) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Min)) + n += 1 + sovGenerated(uint64(m.Max)) + return n +} + +func (m *IDRange) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Min)) + n += 1 + sovGenerated(uint64(m.Max)) + return n +} + +func (m *Ingress) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressBackend) Size() (n int) { + var l int + _ = l + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ServicePort.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IngressRule) Size() (n int) { + var l int + _ = l + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = m.IngressRuleValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressRuleValue) Size() (n int) { + var l int + _ = l + if m.HTTP != nil { + l = m.HTTP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *IngressSpec) Size() (n int) { + var l int + _ = l + if m.Backend != nil { + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.TLS) > 0 { + for _, e := range m.TLS { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IngressStatus) Size() (n int) { + var l int + _ = l + l = m.LoadBalancer.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressTLS) Size() (n int) { + var l int + _ = l + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NetworkPolicy) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NetworkPolicyIngressRule) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.From) > 0 { + for _, e := range m.From { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyPeer) Size() (n int) { + var l int + _ = l + if m.PodSelector != nil { + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NetworkPolicyPort) Size() (n int) { + var l int + _ = l + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NetworkPolicySpec) Size() (n int) { + var l int + _ = l + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSecurityPolicy) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityPolicyList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSecurityPolicySpec) Size() (n int) { + var l int + _ = l + n += 2 + if len(m.DefaultAddCapabilities) > 0 { + for _, s := range m.DefaultAddCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.RequiredDropCapabilities) > 0 { + for _, s := range m.RequiredDropCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedCapabilities) > 0 { + for _, s := range m.AllowedCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.HostPorts) > 0 { + for _, e := range m.HostPorts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + n += 2 + l = m.SELinux.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.RunAsUser.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.SupplementalGroups.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FSGroup.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ReplicaSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicaSetSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *ReplicaSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicationControllerDummy) Size() (n int) { + var l int + _ = l + return n +} + +func (m *RollbackConfig) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Revision)) + return n +} + +func (m *RollingUpdateDaemonSet) Size() (n int) { + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingUpdateDeployment) Size() (n int) { + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RunAsUserStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SELinuxStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Scale) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleSpec) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func (m *ScaleStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.TargetSelector) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SupplementalGroupsStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ThirdPartyResource) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ThirdPartyResourceData) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ThirdPartyResourceDataList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ThirdPartyResourceList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *APIVersion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIVersion{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *CustomMetricCurrentStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomMetricCurrentStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomMetricCurrentStatusList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomMetricCurrentStatusList{`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricCurrentStatus", "CustomMetricCurrentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomMetricTarget) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomMetricTarget{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomMetricTargetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomMetricTargetList{`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricTarget", "CustomMetricTarget", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetSpec{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetStatus{`, + `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, + `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, + `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, + `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, + `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, + `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetUpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Deployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentRollback) String() string { + if this == nil { + return "nil" + } + keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) + for k := range this.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + mapStringForUpdatedAnnotations := "map[string]string{" + for _, k := range keysForUpdatedAnnotations { + mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) + } + mapStringForUpdatedAnnotations += "}" + s := strings.Join([]string{`&DeploymentRollback{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, + `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `RollbackTo:` + strings.Replace(fmt.Sprintf("%v", this.RollbackTo), "RollbackConfig", "RollbackConfig", 1) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *FSGroupStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FSGroupStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPIngressPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPIngressPath{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPIngressRuleValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPIngressRuleValue{`, + `Paths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Paths), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HostPortRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostPortRange{`, + `Min:` + fmt.Sprintf("%v", this.Min) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `}`, + }, "") + return s +} +func (this *IDRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IDRange{`, + `Min:` + fmt.Sprintf("%v", this.Min) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `}`, + }, "") + return s +} +func (this *Ingress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Ingress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressBackend) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressBackend{`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(this.ServicePort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Ingress", "Ingress", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressRule{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressRuleValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressRuleValue{`, + `HTTP:` + strings.Replace(fmt.Sprintf("%v", this.HTTP), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressSpec{`, + `Backend:` + strings.Replace(fmt.Sprintf("%v", this.Backend), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TLS), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "IngressRule", "IngressRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "k8s_io_kubernetes_pkg_api_v1.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressTLS) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressTLS{`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyIngressRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyIngressRule{`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyPeer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyPeer{`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyPort{`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicySpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicySpec{`, + `PodSelector:` + strings.Replace(strings.Replace(this.PodSelector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicyList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicySpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicySpec{`, + `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, + `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, + `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, + `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, + `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, + `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, + `HostPorts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostPorts), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + `,`, + `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, + `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, + `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, + `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, + `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, + `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, + `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerDummy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationControllerDummy{`, + `}`, + }, "") + return s +} +func (this *RollbackConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollbackConfig{`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDaemonSet{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RunAsUserStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RunAsUserStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SELinuxStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SELinuxStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "k8s_io_kubernetes_pkg_api_v1.SELinuxOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleStatus) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, + `}`, + }, "") + return s +} +func (this *SupplementalGroupsStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ThirdPartyResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ThirdPartyResource{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "APIVersion", "APIVersion", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ThirdPartyResourceData) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ThirdPartyResourceData{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + valueToStringGenerated(this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *ThirdPartyResourceDataList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ThirdPartyResourceDataList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ThirdPartyResourceData", "ThirdPartyResourceData", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ThirdPartyResourceList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ThirdPartyResourceList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ThirdPartyResource", "ThirdPartyResource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *APIVersion) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomMetricCurrentStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomMetricCurrentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomMetricCurrentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomMetricCurrentStatusList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomMetricCurrentStatusList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomMetricCurrentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CustomMetricCurrentStatus{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomMetricTarget) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomMetricTarget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomMetricTarget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomMetricTargetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomMetricTargetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomMetricTargetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CustomMetricTarget{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSet) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DaemonSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UpdateStrategy.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateGeneration", wireType) + } + m.TemplateGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.TemplateGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) + } + m.CurrentNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) + } + m.NumberMisscheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.NumberMisscheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) + } + m.DesiredNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) + } + m.NumberReady = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.NumberReady |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) + } + m.UpdatedNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) + } + m.NumberAvailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.NumberAvailable |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) + } + m.NumberUnavailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.NumberUnavailable |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetUpdateStrategy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetUpdateStrategyType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDaemonSet{} + } + if err := m.RollingUpdate.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Deployment) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentRollback) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.UpdatedAnnotations == nil { + m.UpdatedAnnotations = make(map[string]string) + } + m.UpdatedAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollbackTo == nil { + m.RollbackTo = &RollbackConfig{} + } + if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStrategy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FSGroupStrategyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = FSGroupStrategyType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPIngressPath) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Backend.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPIngressRuleValue) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, HTTPIngressPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostPortRange) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + m.Min = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Min |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Max |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IDRange) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IDRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + m.Min = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Min |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Max |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ingress) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressBackend) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServicePort.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Ingress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressRule) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.IngressRuleValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressRuleValue) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTP == nil { + m.HTTP = &HTTPIngressRuleValue{} + } + if err := m.HTTP.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Backend == nil { + m.Backend = &IngressBackend{} + } + if err := m.Backend.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TLS = append(m.TLS, IngressTLS{}) + if err := m.TLS[len(m.TLS)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, IngressRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LoadBalancer.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressTLS) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyIngressRule) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.From = append(m.From, NetworkPolicyPeer{}) + if err := m.From[len(m.From)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, NetworkPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyPeer) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodSelector == nil { + m.PodSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.PodSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyPort) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_kubernetes_pkg_api_v1.Protocol(data[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.Port.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicySpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicyList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodSecurityPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Privileged = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_kubernetes_pkg_api_v1.Capability(data[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_kubernetes_pkg_api_v1.Capability(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_kubernetes_pkg_api_v1.Capability(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, FSType(data[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostNetwork = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostPorts = append(m.HostPorts, HostPortRange{}) + if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostPID = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostIPC = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SELinux.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RunAsUser.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SupplementalGroups.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FSGroup.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnlyRootFilesystem = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSet) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ReplicaSetConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ReplicaSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ReplicaSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerDummy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerDummy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerDummy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollbackConfig) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDaemonSet) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunAsUserStrategyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = RunAsUserStrategy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SELinuxStrategyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = SELinuxStrategy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &k8s_io_kubernetes_pkg_api_v1.SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scale) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Selector == nil { + m.Selector = make(map[string]string) + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetSelector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SupplementalGroupsStrategyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = SupplementalGroupsStrategyType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ThirdPartyResource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ThirdPartyResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ThirdPartyResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, APIVersion{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ThirdPartyResourceData) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ThirdPartyResourceData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ThirdPartyResourceData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ThirdPartyResourceDataList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ThirdPartyResourceDataList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ThirdPartyResourceDataList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ThirdPartyResourceData{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ThirdPartyResourceList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ThirdPartyResourceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ThirdPartyResourceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ThirdPartyResource{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 3369 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe4, 0x5b, 0xdb, 0x6f, 0x1b, 0xc7, + 0xd5, 0xf7, 0x8a, 0xa4, 0x45, 0x1d, 0x59, 0x92, 0x35, 0x72, 0x64, 0x46, 0x49, 0x44, 0x67, 0x3f, + 0x7c, 0x89, 0xf3, 0x7d, 0x09, 0xf5, 0xc5, 0xf9, 0x9c, 0x26, 0x4e, 0xe2, 0x44, 0x94, 0x7c, 0x51, + 0x21, 0xc9, 0xcc, 0x90, 0x32, 0x1a, 0xe7, 0xd6, 0x15, 0x39, 0xa2, 0xd6, 0xde, 0x5b, 0x76, 0x67, + 0x15, 0x11, 0x41, 0xdb, 0x00, 0x45, 0xf3, 0x58, 0xb4, 0x2f, 0x45, 0x0a, 0xa4, 0x8f, 0x7d, 0xe8, + 0x4b, 0x9b, 0x3c, 0xb4, 0x69, 0xff, 0x82, 0xfa, 0xa1, 0x28, 0x52, 0xa0, 0x05, 0x8a, 0x22, 0x15, + 0x6a, 0x05, 0xcd, 0x3f, 0xd0, 0xe6, 0xc5, 0x4f, 0xc5, 0xcc, 0xce, 0xde, 0x77, 0x65, 0x93, 0xb2, + 0x89, 0x02, 0x7d, 0xe3, 0xce, 0x9c, 0xf3, 0x3b, 0x97, 0x39, 0x73, 0xe6, 0xcc, 0x85, 0xf0, 0xd2, + 0x8d, 0xe7, 0x9c, 0x9a, 0x6a, 0x2e, 0xdc, 0x70, 0x37, 0x89, 0x6d, 0x10, 0x4a, 0x9c, 0x05, 0xeb, + 0x46, 0x77, 0x41, 0xb1, 0x54, 0x67, 0x81, 0xec, 0x52, 0x62, 0x38, 0xaa, 0x69, 0x38, 0x0b, 0x3b, + 0x4f, 0x6f, 0x12, 0xaa, 0x3c, 0xbd, 0xd0, 0x25, 0x06, 0xb1, 0x15, 0x4a, 0x3a, 0x35, 0xcb, 0x36, + 0xa9, 0x89, 0x9e, 0xf2, 0xd8, 0x6b, 0x21, 0x7b, 0xcd, 0xba, 0xd1, 0xad, 0x31, 0xf6, 0x5a, 0xc8, + 0x5e, 0x13, 0xec, 0x73, 0x4f, 0x75, 0x55, 0xba, 0xed, 0x6e, 0xd6, 0xda, 0xa6, 0xbe, 0xd0, 0x35, + 0xbb, 0xe6, 0x02, 0x47, 0xd9, 0x74, 0xb7, 0xf8, 0x17, 0xff, 0xe0, 0xbf, 0x3c, 0xf4, 0xb9, 0xff, + 0x17, 0xca, 0x29, 0x96, 0xaa, 0x2b, 0xed, 0x6d, 0xd5, 0x20, 0x76, 0xcf, 0x57, 0x6f, 0xc1, 0x26, + 0x8e, 0xe9, 0xda, 0x6d, 0x92, 0xd4, 0xe9, 0x40, 0x2e, 0x67, 0x41, 0x27, 0x54, 0x59, 0xd8, 0x49, + 0x59, 0x32, 0xb7, 0x90, 0xc7, 0x65, 0xbb, 0x06, 0x55, 0xf5, 0xb4, 0x98, 0x67, 0xef, 0xc4, 0xe0, + 0xb4, 0xb7, 0x89, 0xae, 0xa4, 0xf8, 0x9e, 0xc9, 0xe3, 0x73, 0xa9, 0xaa, 0x2d, 0xa8, 0x06, 0x75, + 0xa8, 0x7d, 0x90, 0x4d, 0x0e, 0xb1, 0x77, 0x88, 0x1d, 0x1d, 0x25, 0x45, 0xb7, 0x34, 0x92, 0x65, + 0xd3, 0x93, 0xb9, 0x83, 0x9b, 0x41, 0x2d, 0xd7, 0x00, 0x16, 0x1b, 0x2b, 0x57, 0x89, 0xcd, 0xc6, + 0x0c, 0x9d, 0x82, 0xa2, 0xa1, 0xe8, 0xa4, 0x22, 0x9d, 0x92, 0x4e, 0x8f, 0xd5, 0x8f, 0xdd, 0xdc, + 0xab, 0x1e, 0xd9, 0xdf, 0xab, 0x16, 0xd7, 0x15, 0x9d, 0x60, 0xde, 0x23, 0xff, 0x58, 0x82, 0x07, + 0x97, 0x5c, 0x87, 0x9a, 0xfa, 0x1a, 0xa1, 0xb6, 0xda, 0x5e, 0x72, 0x6d, 0x9b, 0x18, 0xb4, 0x49, + 0x15, 0xea, 0x3a, 0x77, 0xe6, 0x47, 0xd7, 0xa0, 0xb4, 0xa3, 0x68, 0x2e, 0xa9, 0x8c, 0x9c, 0x92, + 0x4e, 0x8f, 0x9f, 0xa9, 0xd5, 0x44, 0x2c, 0x45, 0x1d, 0xe3, 0x47, 0x53, 0xcd, 0x1f, 0xed, 0xda, + 0xab, 0xae, 0x62, 0x50, 0x95, 0xf6, 0xea, 0x27, 0x04, 0xe4, 0x31, 0x21, 0xf7, 0x2a, 0xc3, 0xc2, + 0x1e, 0xa4, 0xfc, 0x7d, 0x09, 0x1e, 0xc9, 0xd5, 0x6d, 0x55, 0x75, 0x28, 0xd2, 0xa1, 0xa4, 0x52, + 0xa2, 0x3b, 0x15, 0xe9, 0x54, 0xe1, 0xf4, 0xf8, 0x99, 0xcb, 0xb5, 0xbe, 0x22, 0xb9, 0x96, 0x0b, + 0x5e, 0x9f, 0x10, 0x7a, 0x95, 0x56, 0x18, 0x3c, 0xf6, 0xa4, 0xc8, 0x3f, 0x94, 0x00, 0x45, 0x79, + 0x5a, 0x8a, 0xdd, 0x25, 0xf4, 0x2e, 0xbc, 0xf4, 0xda, 0xe1, 0xbc, 0x34, 0x23, 0x20, 0xc7, 0x3d, + 0x81, 0x31, 0x27, 0xbd, 0x2f, 0xc1, 0x6c, 0x5a, 0x27, 0xee, 0x9d, 0xad, 0xb8, 0x77, 0x16, 0x0f, + 0xe1, 0x1d, 0x0f, 0x35, 0xc7, 0x2d, 0xbf, 0x1c, 0x81, 0xb1, 0x65, 0x85, 0xe8, 0xa6, 0xd1, 0x24, + 0x14, 0x7d, 0x13, 0xca, 0x6c, 0x7a, 0x76, 0x14, 0xaa, 0x70, 0x8f, 0x8c, 0x9f, 0xf9, 0xbf, 0x83, + 0xcc, 0x75, 0x6a, 0x8c, 0xba, 0xb6, 0xf3, 0x74, 0xed, 0xca, 0xe6, 0x75, 0xd2, 0xa6, 0x6b, 0x84, + 0x2a, 0x75, 0x24, 0xe4, 0x40, 0xd8, 0x86, 0x03, 0x54, 0xf4, 0x16, 0x14, 0x1d, 0x8b, 0xb4, 0x85, + 0x33, 0x5f, 0xec, 0xd3, 0xac, 0x40, 0xd3, 0xa6, 0x45, 0xda, 0xe1, 0x68, 0xb1, 0x2f, 0xcc, 0x71, + 0xd1, 0x16, 0x1c, 0x75, 0x78, 0x18, 0x54, 0x0a, 0x5c, 0xc2, 0xf9, 0x81, 0x25, 0x78, 0xc1, 0x34, + 0x29, 0x64, 0x1c, 0xf5, 0xbe, 0xb1, 0x40, 0x97, 0x7f, 0x27, 0xc1, 0x44, 0x40, 0xcb, 0x47, 0xec, + 0x8d, 0x94, 0xef, 0x6a, 0x77, 0xe7, 0x3b, 0xc6, 0xcd, 0x3d, 0x77, 0x5c, 0xc8, 0x2a, 0xfb, 0x2d, + 0x11, 0xbf, 0xbd, 0xe9, 0xc7, 0xc3, 0x08, 0x8f, 0x87, 0xe7, 0x06, 0x35, 0x2b, 0x27, 0x0c, 0xbe, + 0x28, 0x44, 0xcc, 0x61, 0xee, 0x44, 0x6f, 0x42, 0xd9, 0x21, 0x1a, 0x69, 0x53, 0xd3, 0x16, 0xe6, + 0x3c, 0x73, 0x97, 0xe6, 0x28, 0x9b, 0x44, 0x6b, 0x0a, 0xd6, 0xfa, 0x31, 0x66, 0x8f, 0xff, 0x85, + 0x03, 0x48, 0xf4, 0x3a, 0x94, 0x29, 0xd1, 0x2d, 0x4d, 0xa1, 0xfe, 0xc4, 0x7a, 0x2a, 0xdf, 0x24, + 0x06, 0xdb, 0x30, 0x3b, 0x2d, 0xc1, 0xc0, 0x07, 0x3f, 0x70, 0x96, 0xdf, 0x8a, 0x03, 0x40, 0xf4, + 0x81, 0x04, 0x93, 0xae, 0xd5, 0x61, 0xa4, 0x94, 0x25, 0xd8, 0x6e, 0x4f, 0x44, 0xc3, 0xc5, 0x41, + 0xdd, 0xb6, 0x11, 0x43, 0xab, 0xcf, 0x0a, 0xe1, 0x93, 0xf1, 0x76, 0x9c, 0x90, 0x8a, 0x16, 0x61, + 0x4a, 0x57, 0x0d, 0x4c, 0x94, 0x4e, 0xaf, 0x49, 0xda, 0xa6, 0xd1, 0x71, 0x2a, 0xc5, 0x53, 0xd2, + 0xe9, 0x52, 0xfd, 0xa4, 0x00, 0x98, 0x5a, 0x8b, 0x77, 0xe3, 0x24, 0x3d, 0xfa, 0x3a, 0x20, 0xdf, + 0xae, 0x4b, 0xde, 0x7a, 0xa1, 0x9a, 0x46, 0xa5, 0x74, 0x4a, 0x3a, 0x5d, 0xa8, 0xcf, 0x09, 0x14, + 0xd4, 0x4a, 0x51, 0xe0, 0x0c, 0x2e, 0xf9, 0x9f, 0x45, 0x98, 0x4a, 0x04, 0x38, 0xba, 0x0a, 0xb3, + 0x6d, 0x2f, 0x7d, 0xae, 0xbb, 0xfa, 0x26, 0xb1, 0x9b, 0xed, 0x6d, 0xd2, 0x71, 0x35, 0xd2, 0xe1, + 0xa3, 0x5e, 0xaa, 0xcf, 0x0b, 0x19, 0xb3, 0x4b, 0x99, 0x54, 0x38, 0x87, 0x9b, 0xe9, 0x6d, 0xf0, + 0xa6, 0x35, 0xd5, 0x71, 0x02, 0xcc, 0x11, 0x8e, 0x19, 0xe8, 0xbd, 0x9e, 0xa2, 0xc0, 0x19, 0x5c, + 0x4c, 0xc7, 0x0e, 0x71, 0x54, 0x9b, 0x74, 0x92, 0x3a, 0x16, 0xe2, 0x3a, 0x2e, 0x67, 0x52, 0xe1, + 0x1c, 0x6e, 0x74, 0x16, 0xc6, 0x3d, 0x69, 0xdc, 0xe3, 0x62, 0x68, 0x82, 0x84, 0xbd, 0x1e, 0x76, + 0xe1, 0x28, 0x1d, 0x33, 0xcd, 0xdc, 0xe4, 0x55, 0x40, 0x27, 0x7f, 0x48, 0xae, 0xa4, 0x28, 0x70, + 0x06, 0x17, 0x33, 0xcd, 0x8b, 0x99, 0x94, 0x69, 0x47, 0xe3, 0xa6, 0x6d, 0x64, 0x52, 0xe1, 0x1c, + 0x6e, 0x16, 0x79, 0x9e, 0xca, 0x8b, 0x3b, 0x8a, 0xaa, 0x29, 0x9b, 0x1a, 0xa9, 0x8c, 0xc6, 0x23, + 0x6f, 0x3d, 0xde, 0x8d, 0x93, 0xf4, 0xe8, 0x12, 0x4c, 0x7b, 0x4d, 0x1b, 0x86, 0x12, 0x80, 0x94, + 0x39, 0xc8, 0x83, 0x02, 0x64, 0x7a, 0x3d, 0x49, 0x80, 0xd3, 0x3c, 0xf2, 0x5f, 0x24, 0x38, 0x99, + 0x33, 0x93, 0xd0, 0xcb, 0x50, 0xa4, 0x3d, 0xcb, 0x5f, 0x7f, 0xff, 0xd7, 0xcf, 0xe8, 0xad, 0x9e, + 0x45, 0x6e, 0xef, 0x55, 0x1f, 0xca, 0x61, 0x63, 0xdd, 0x98, 0x33, 0xa2, 0x6f, 0xc3, 0x84, 0x6d, + 0x6a, 0x9a, 0x6a, 0x74, 0x3d, 0x12, 0x91, 0x4d, 0x2e, 0xf4, 0x39, 0xd3, 0x71, 0x14, 0x23, 0xcc, + 0x96, 0xd3, 0xfb, 0x7b, 0xd5, 0x89, 0x58, 0x1f, 0x8e, 0x8b, 0x93, 0x7f, 0x3d, 0x02, 0xb0, 0x4c, + 0x2c, 0xcd, 0xec, 0xe9, 0xc4, 0x18, 0xc6, 0x0a, 0xfa, 0x76, 0x6c, 0x05, 0x7d, 0xa9, 0xdf, 0x8c, + 0x16, 0xa8, 0x9a, 0xbb, 0x84, 0x76, 0x13, 0x4b, 0xe8, 0xcb, 0x83, 0x8b, 0x38, 0x78, 0x0d, 0xbd, + 0x55, 0x80, 0x99, 0x90, 0x78, 0xc9, 0x34, 0x3a, 0x2a, 0x9f, 0x13, 0x2f, 0xc4, 0x62, 0xe2, 0xf1, + 0x44, 0x4c, 0x9c, 0xcc, 0x60, 0x89, 0xc4, 0xc3, 0xd5, 0x40, 0xfb, 0x11, 0xce, 0x7e, 0x3e, 0x2e, + 0xfc, 0xf6, 0x5e, 0xf5, 0xc0, 0xa2, 0xbc, 0x16, 0x60, 0xc6, 0x95, 0x45, 0x8f, 0xc1, 0x51, 0x9b, + 0x28, 0x8e, 0x69, 0xf0, 0x34, 0x31, 0x16, 0x1a, 0x85, 0x79, 0x2b, 0x16, 0xbd, 0xe8, 0x09, 0x18, + 0xd5, 0x89, 0xe3, 0x28, 0x5d, 0xc2, 0x33, 0xc2, 0x58, 0x7d, 0x4a, 0x10, 0x8e, 0xae, 0x79, 0xcd, + 0xd8, 0xef, 0x47, 0xd7, 0x61, 0x52, 0x53, 0x1c, 0x11, 0xda, 0x2d, 0x55, 0x27, 0x7c, 0xce, 0x8f, + 0x9f, 0xf9, 0x9f, 0xbb, 0x8b, 0x18, 0xc6, 0x11, 0xae, 0x44, 0xab, 0x31, 0x24, 0x9c, 0x40, 0x46, + 0x3b, 0x80, 0x58, 0x4b, 0xcb, 0x56, 0x0c, 0xc7, 0x73, 0x19, 0x93, 0x37, 0xda, 0xb7, 0xbc, 0x20, + 0xbf, 0xad, 0xa6, 0xd0, 0x70, 0x86, 0x04, 0xf9, 0xf7, 0x12, 0x4c, 0x86, 0x03, 0x36, 0x84, 0x42, + 0xe9, 0xad, 0x78, 0xa1, 0xf4, 0xfc, 0xc0, 0xc1, 0x9b, 0x53, 0x29, 0x7d, 0x58, 0x00, 0x14, 0x12, + 0xb1, 0xd4, 0xb0, 0xa9, 0xb4, 0x6f, 0xdc, 0xc5, 0x3e, 0xe2, 0xa7, 0x12, 0x20, 0x91, 0xac, 0x17, + 0x0d, 0xc3, 0xa4, 0x3c, 0xff, 0xfb, 0x6a, 0xbe, 0x36, 0xb0, 0x9a, 0xbe, 0x06, 0xb5, 0x8d, 0x14, + 0xf6, 0x05, 0x83, 0xda, 0xbd, 0x70, 0xc4, 0xd2, 0x04, 0x38, 0x43, 0x21, 0xf4, 0x0e, 0x80, 0x2d, + 0x30, 0x5b, 0xa6, 0x48, 0x01, 0x2f, 0x0d, 0x90, 0x4d, 0x19, 0xc0, 0x92, 0x69, 0x6c, 0xa9, 0xdd, + 0x30, 0xa1, 0xe1, 0x00, 0x18, 0x47, 0x84, 0xcc, 0x5d, 0x80, 0x93, 0x39, 0xda, 0xa3, 0xe3, 0x50, + 0xb8, 0x41, 0x7a, 0x9e, 0x5b, 0x31, 0xfb, 0x89, 0x4e, 0x44, 0xf7, 0x63, 0x63, 0x62, 0x2b, 0x75, + 0x6e, 0xe4, 0x39, 0x49, 0xfe, 0xb2, 0x14, 0x8d, 0x35, 0x5e, 0xc5, 0x9e, 0x86, 0xb2, 0x4d, 0x2c, + 0x4d, 0x6d, 0x2b, 0x8e, 0xa8, 0x67, 0x78, 0x41, 0x8a, 0x45, 0x1b, 0x0e, 0x7a, 0x63, 0xf5, 0xee, + 0xc8, 0xfd, 0xad, 0x77, 0x0b, 0xf7, 0xba, 0xde, 0x35, 0xa1, 0xec, 0xf8, 0x85, 0x6e, 0x91, 0x83, + 0x2f, 0x1e, 0x22, 0x67, 0x8b, 0x1a, 0x37, 0x10, 0x18, 0x54, 0xb7, 0x81, 0x90, 0xac, 0xba, 0xb6, + 0xd4, 0x67, 0x5d, 0xbb, 0x0a, 0x27, 0x6c, 0xb2, 0xa3, 0x32, 0x35, 0x2e, 0xab, 0x0e, 0x35, 0xed, + 0xde, 0xaa, 0xaa, 0xab, 0x54, 0x94, 0x3d, 0x95, 0xfd, 0xbd, 0xea, 0x09, 0x9c, 0xd1, 0x8f, 0x33, + 0xb9, 0x58, 0x76, 0xb6, 0x14, 0xd7, 0x21, 0x1d, 0x9e, 0xd2, 0xca, 0x61, 0x76, 0x6e, 0xf0, 0x56, + 0x2c, 0x7a, 0x91, 0x1e, 0x0b, 0xee, 0xf2, 0xbd, 0x08, 0xee, 0xc9, 0xfc, 0xc0, 0x46, 0x1b, 0x70, + 0xd2, 0xb2, 0xcd, 0xae, 0x4d, 0x1c, 0x67, 0x99, 0x28, 0x1d, 0x4d, 0x35, 0x88, 0xef, 0xaf, 0x31, + 0x6e, 0xe7, 0x43, 0xfb, 0x7b, 0xd5, 0x93, 0x8d, 0x6c, 0x12, 0x9c, 0xc7, 0x2b, 0x7f, 0x54, 0x84, + 0xe3, 0xc9, 0x55, 0x36, 0xa7, 0x2a, 0x95, 0x06, 0xaa, 0x4a, 0x9f, 0x8c, 0x4c, 0x1b, 0xaf, 0x64, + 0x0f, 0xa2, 0x21, 0x63, 0xea, 0x2c, 0xc2, 0x94, 0xc8, 0x23, 0x7e, 0xa7, 0xa8, 0xcb, 0x83, 0x68, + 0xd8, 0x88, 0x77, 0xe3, 0x24, 0x3d, 0xab, 0x35, 0xc3, 0x12, 0xd2, 0x07, 0x29, 0xc6, 0x6b, 0xcd, + 0xc5, 0x24, 0x01, 0x4e, 0xf3, 0xa0, 0x35, 0x98, 0x71, 0x8d, 0x34, 0x94, 0x17, 0x9d, 0x0f, 0x09, + 0xa8, 0x99, 0x8d, 0x34, 0x09, 0xce, 0xe2, 0x43, 0x3b, 0x00, 0x6d, 0xbf, 0x20, 0x70, 0x2a, 0x47, + 0x79, 0xae, 0xae, 0x0f, 0x3c, 0xb7, 0x82, 0xda, 0x22, 0xcc, 0x88, 0x41, 0x93, 0x83, 0x23, 0x92, + 0xd0, 0x0b, 0x30, 0x61, 0xf3, 0x8d, 0x87, 0x6f, 0x80, 0x57, 0xbc, 0x3f, 0x20, 0xd8, 0x26, 0x70, + 0xb4, 0x13, 0xc7, 0x69, 0xe5, 0x3f, 0x48, 0xd1, 0x25, 0x2a, 0x28, 0xb5, 0xcf, 0xc5, 0xca, 0xaa, + 0xc7, 0x12, 0x65, 0xd5, 0x6c, 0x9a, 0x23, 0x52, 0x55, 0x7d, 0x27, 0xbb, 0xca, 0xbe, 0x78, 0xa8, + 0x2a, 0x3b, 0x5c, 0x6a, 0xef, 0x5c, 0x66, 0x7f, 0x22, 0xc1, 0xec, 0xc5, 0xe6, 0x25, 0xdb, 0x74, + 0x2d, 0x5f, 0xbd, 0x2b, 0x96, 0xe7, 0xab, 0xaf, 0x41, 0xd1, 0x76, 0x35, 0xdf, 0xae, 0xff, 0xf2, + 0xed, 0xc2, 0xae, 0xc6, 0xec, 0x9a, 0x49, 0x70, 0x79, 0x46, 0x31, 0x06, 0xf4, 0x16, 0x1c, 0xb5, + 0x15, 0xa3, 0x4b, 0xfc, 0x45, 0xf8, 0xd9, 0x3e, 0xad, 0x59, 0x59, 0xc6, 0x8c, 0x3d, 0x52, 0x0a, + 0x72, 0x34, 0x2c, 0x50, 0xe5, 0x9f, 0x48, 0x30, 0x75, 0xb9, 0xd5, 0x6a, 0xac, 0x18, 0x7c, 0x16, + 0x37, 0x14, 0xba, 0xcd, 0xea, 0x04, 0x4b, 0xa1, 0xdb, 0xc9, 0x3a, 0x81, 0xf5, 0x61, 0xde, 0x83, + 0xb6, 0x61, 0x94, 0x65, 0x0f, 0x62, 0x74, 0x06, 0x2c, 0xf1, 0x85, 0xb8, 0xba, 0x07, 0x12, 0xd6, + 0x9f, 0xa2, 0x01, 0xfb, 0xf0, 0xf2, 0x7b, 0x70, 0x22, 0xa2, 0x1e, 0xf3, 0x17, 0x3f, 0x9d, 0x44, + 0x6d, 0x28, 0x31, 0x4d, 0xfc, 0xb3, 0xc7, 0x7e, 0x8f, 0xd0, 0x12, 0x26, 0x87, 0x75, 0x14, 0xfb, + 0x72, 0xb0, 0x87, 0x2d, 0xaf, 0xc1, 0xc4, 0x65, 0xd3, 0xa1, 0x0d, 0xd3, 0xa6, 0xdc, 0x6d, 0xe8, + 0x11, 0x28, 0xe8, 0xaa, 0x21, 0x56, 0xe9, 0x71, 0xc1, 0x53, 0x60, 0xeb, 0x08, 0x6b, 0xe7, 0xdd, + 0xca, 0xae, 0xc8, 0x46, 0x61, 0xb7, 0xb2, 0x8b, 0x59, 0xbb, 0x7c, 0x09, 0x46, 0xc5, 0x70, 0x44, + 0x81, 0x0a, 0x07, 0x03, 0x15, 0x32, 0x80, 0x7e, 0x31, 0x02, 0xa3, 0x42, 0xfb, 0x21, 0x6c, 0xe6, + 0xde, 0x88, 0x6d, 0xe6, 0xce, 0x0d, 0x36, 0xd2, 0xb9, 0x3b, 0xb9, 0x4e, 0x62, 0x27, 0xf7, 0xe2, + 0x80, 0xf8, 0x07, 0x6f, 0xe3, 0x3e, 0x96, 0x60, 0x32, 0x1e, 0x73, 0xe8, 0x2c, 0x8c, 0xb3, 0x35, + 0x45, 0x6d, 0x93, 0xf5, 0xb0, 0x28, 0x0e, 0x0e, 0x56, 0x9a, 0x61, 0x17, 0x8e, 0xd2, 0xa1, 0x6e, + 0xc0, 0xc6, 0xc2, 0x42, 0x38, 0x25, 0xdf, 0xe5, 0x2e, 0x55, 0xb5, 0x9a, 0x77, 0x5f, 0x53, 0x5b, + 0x31, 0xe8, 0x15, 0xbb, 0x49, 0x6d, 0xd5, 0xe8, 0xa6, 0x04, 0xf1, 0x18, 0x8b, 0x22, 0xcb, 0x37, + 0x25, 0x18, 0x17, 0x2a, 0x0f, 0x61, 0x4b, 0xf2, 0x7a, 0x7c, 0x4b, 0xf2, 0xec, 0x80, 0xf3, 0x39, + 0x7b, 0x3f, 0xf2, 0x69, 0x68, 0x0a, 0x9b, 0xc1, 0x2c, 0xc1, 0x6c, 0x9b, 0x0e, 0x4d, 0x26, 0x18, + 0x36, 0xd7, 0x30, 0xef, 0x41, 0xdf, 0x93, 0xe0, 0xb8, 0x9a, 0x98, 0xf3, 0xc2, 0xd7, 0x2f, 0x0f, + 0xa6, 0x5a, 0x00, 0x53, 0xaf, 0x08, 0x79, 0xc7, 0x93, 0x3d, 0x38, 0x25, 0x52, 0x76, 0x21, 0x45, + 0x85, 0x14, 0x28, 0x6e, 0x53, 0x6a, 0x89, 0x41, 0x58, 0x1a, 0x3c, 0xf3, 0x84, 0x2a, 0x95, 0xb9, + 0xf9, 0xad, 0x56, 0x03, 0x73, 0x68, 0xf9, 0xe7, 0x23, 0x81, 0xc3, 0x9a, 0xde, 0x24, 0x09, 0xf2, + 0xad, 0x74, 0x2f, 0xf2, 0xed, 0x78, 0x56, 0xae, 0x45, 0xdf, 0x80, 0x02, 0xd5, 0x06, 0xdd, 0x94, + 0x0a, 0x09, 0xad, 0xd5, 0x66, 0x98, 0xb0, 0x5a, 0xab, 0x4d, 0xcc, 0x20, 0xd1, 0xdb, 0x50, 0x62, + 0xab, 0x19, 0x9b, 0xe3, 0x85, 0xc1, 0x73, 0x08, 0xf3, 0x57, 0x18, 0x61, 0xec, 0xcb, 0xc1, 0x1e, + 0xae, 0xfc, 0x1e, 0x4c, 0xc4, 0x12, 0x01, 0xba, 0x0e, 0xc7, 0x34, 0x53, 0xe9, 0xd4, 0x15, 0x4d, + 0x31, 0xda, 0xc4, 0x4e, 0xa6, 0xc6, 0xec, 0xfd, 0xcc, 0x6a, 0x84, 0x43, 0x24, 0x94, 0xe0, 0x02, + 0x31, 0xda, 0x87, 0x63, 0xd8, 0xb2, 0x02, 0x10, 0x5a, 0x8f, 0xaa, 0x50, 0x62, 0x21, 0xec, 0xad, + 0x4c, 0x63, 0xf5, 0x31, 0xa6, 0x2b, 0x8b, 0x6c, 0x07, 0x7b, 0xed, 0xe8, 0x0c, 0x80, 0x43, 0xda, + 0x36, 0xa1, 0x3c, 0xef, 0x78, 0x27, 0x40, 0x41, 0x06, 0x6e, 0x06, 0x3d, 0x38, 0x42, 0x25, 0xff, + 0x49, 0x82, 0x89, 0x75, 0x42, 0xdf, 0x35, 0xed, 0x1b, 0x0d, 0x53, 0x53, 0xdb, 0xbd, 0x21, 0xe4, + 0xfd, 0xcd, 0x58, 0xde, 0x7f, 0xa5, 0xcf, 0x31, 0x8b, 0x69, 0x9b, 0x97, 0xfd, 0xe5, 0xbf, 0x4b, + 0x50, 0x89, 0x51, 0x46, 0xd3, 0x04, 0x81, 0x92, 0x65, 0xda, 0xd4, 0x5f, 0xe3, 0x0f, 0xa5, 0x01, + 0x4b, 0xa9, 0x91, 0x55, 0x9e, 0xc1, 0x62, 0x0f, 0x9d, 0xd9, 0xb9, 0x65, 0x9b, 0xba, 0x88, 0xfb, + 0xc3, 0x49, 0x21, 0xc4, 0x0e, 0xed, 0xbc, 0x68, 0x9b, 0x3a, 0xe6, 0xd8, 0xf2, 0x1f, 0x25, 0x98, + 0x8e, 0x51, 0x0e, 0x21, 0xa5, 0x2b, 0xf1, 0x94, 0xfe, 0xe2, 0x61, 0x0c, 0xcb, 0x49, 0xec, 0x5f, + 0x25, 0xcd, 0x62, 0x0e, 0x40, 0x5b, 0x30, 0x6e, 0x99, 0x9d, 0xe6, 0x3d, 0xb8, 0x99, 0x9b, 0x62, + 0x2b, 0x64, 0x23, 0xc4, 0xc2, 0x51, 0x60, 0xb4, 0x0b, 0xd3, 0x86, 0xa2, 0x13, 0xc7, 0x52, 0xda, + 0xa4, 0x79, 0x0f, 0xce, 0x45, 0x1e, 0xe0, 0xb7, 0x05, 0x49, 0x44, 0x9c, 0x16, 0x22, 0xff, 0x2a, + 0x65, 0xb7, 0x69, 0x53, 0xf4, 0x2a, 0x94, 0xf9, 0x23, 0x89, 0xb6, 0xa9, 0x89, 0xa5, 0xed, 0x2c, + 0x1b, 0x9a, 0x86, 0x68, 0xbb, 0xbd, 0x57, 0xfd, 0xef, 0x03, 0x8f, 0x75, 0x7d, 0x42, 0x1c, 0xc0, + 0xa0, 0x75, 0x28, 0x5a, 0x87, 0x29, 0x33, 0xf8, 0xc2, 0xc2, 0x6b, 0x0b, 0x8e, 0x23, 0xff, 0x23, + 0xa9, 0x38, 0x5f, 0x5e, 0xae, 0xdf, 0xb3, 0x01, 0x0b, 0xca, 0x9a, 0xdc, 0x41, 0xb3, 0x61, 0x54, + 0xac, 0xb2, 0x22, 0x2e, 0x2f, 0x1d, 0x26, 0x2e, 0xa3, 0x2b, 0x43, 0xb0, 0x89, 0xf0, 0x1b, 0x7d, + 0x41, 0xf2, 0x5f, 0x25, 0x98, 0xe6, 0x0a, 0xb5, 0x5d, 0x5b, 0xa5, 0xbd, 0xa1, 0x65, 0xd0, 0xad, + 0x58, 0x06, 0x5d, 0xee, 0xd3, 0xd0, 0x94, 0xc6, 0xb9, 0x59, 0xf4, 0x73, 0x09, 0x1e, 0x48, 0x51, + 0x0f, 0x21, 0xc3, 0x90, 0x78, 0x86, 0x79, 0xe5, 0xb0, 0x06, 0xe6, 0x64, 0x99, 0x9b, 0x90, 0x61, + 0x1e, 0x0f, 0xdc, 0x33, 0x00, 0x96, 0xad, 0xee, 0xa8, 0x1a, 0xe9, 0x8a, 0xcb, 0xe0, 0x72, 0x38, + 0x24, 0x8d, 0xa0, 0x07, 0x47, 0xa8, 0xd0, 0xb7, 0x60, 0xb6, 0x43, 0xb6, 0x14, 0x57, 0xa3, 0x8b, + 0x9d, 0xce, 0x92, 0x62, 0x29, 0x9b, 0xaa, 0xa6, 0x52, 0x55, 0xec, 0xb0, 0xc7, 0xea, 0x17, 0xbc, + 0x4b, 0xda, 0x2c, 0x8a, 0xdb, 0x7b, 0xd5, 0xc7, 0x0f, 0xbe, 0x98, 0xf1, 0x89, 0x7b, 0x38, 0x47, + 0x08, 0xfa, 0xae, 0x04, 0x15, 0x9b, 0xbc, 0xe3, 0xaa, 0x36, 0xe9, 0x2c, 0xdb, 0xa6, 0x15, 0xd3, + 0xa0, 0xc0, 0x35, 0xb8, 0xb4, 0xbf, 0x57, 0xad, 0xe0, 0x1c, 0x9a, 0x7e, 0x74, 0xc8, 0x15, 0x84, + 0x28, 0xcc, 0x28, 0x9a, 0x66, 0xbe, 0x4b, 0xe2, 0x1e, 0x28, 0x72, 0xf9, 0xf5, 0xfd, 0xbd, 0xea, + 0xcc, 0x62, 0xba, 0xbb, 0x1f, 0xd1, 0x59, 0xf0, 0x68, 0x01, 0x46, 0x77, 0x4c, 0xcd, 0xd5, 0x89, + 0x53, 0x29, 0x71, 0x49, 0x2c, 0xe3, 0x8e, 0x5e, 0xf5, 0x9a, 0x6e, 0xef, 0x55, 0x8f, 0x5e, 0x6c, + 0xf2, 0xa3, 0x0f, 0x9f, 0x8a, 0xed, 0xd1, 0x58, 0xcd, 0x24, 0xa6, 0x3c, 0x3f, 0x77, 0x2d, 0x87, + 0x39, 0xe6, 0x72, 0xd8, 0x85, 0xa3, 0x74, 0x48, 0x87, 0xb1, 0x6d, 0xb1, 0x6f, 0x77, 0x2a, 0xa3, + 0x03, 0xad, 0x7e, 0xb1, 0x7d, 0x7f, 0x7d, 0x5a, 0x88, 0x1c, 0xf3, 0x9b, 0x1d, 0x1c, 0x4a, 0x40, + 0x4f, 0xc0, 0x28, 0xff, 0x58, 0x59, 0xe6, 0xa7, 0xb5, 0xe5, 0x30, 0x13, 0x5d, 0xf6, 0x9a, 0xb1, + 0xdf, 0xef, 0x93, 0xae, 0x34, 0x96, 0xf8, 0xe1, 0x6a, 0x82, 0x74, 0xa5, 0xb1, 0x84, 0xfd, 0x7e, + 0x64, 0xc1, 0xa8, 0x43, 0x56, 0x55, 0xc3, 0xdd, 0xad, 0xc0, 0x40, 0xd7, 0xc5, 0xcd, 0x0b, 0x9c, + 0x3b, 0x71, 0x14, 0x15, 0x4a, 0x14, 0xfd, 0xd8, 0x17, 0x83, 0x76, 0x61, 0xcc, 0x76, 0x8d, 0x45, + 0x67, 0xc3, 0x21, 0x76, 0x65, 0x9c, 0xcb, 0xec, 0x37, 0x39, 0x63, 0x9f, 0x3f, 0x29, 0x35, 0xf0, + 0x60, 0x40, 0x81, 0x43, 0x61, 0xe8, 0x23, 0x09, 0x90, 0xe3, 0x5a, 0x96, 0x46, 0x74, 0x62, 0x50, + 0x45, 0xe3, 0xa7, 0x61, 0x4e, 0xe5, 0x18, 0xd7, 0xa1, 0xd1, 0xaf, 0xdd, 0x29, 0xa0, 0xa4, 0x32, + 0xc1, 0x51, 0x73, 0x9a, 0x14, 0x67, 0xe8, 0xc1, 0x86, 0x62, 0xcb, 0xe1, 0xbf, 0x2b, 0x13, 0x03, + 0x0d, 0x45, 0xf6, 0xa9, 0x60, 0x38, 0x14, 0xa2, 0x1f, 0xfb, 0x62, 0xd0, 0x55, 0x98, 0xb5, 0x89, + 0xd2, 0xb9, 0x62, 0x68, 0x3d, 0x6c, 0x9a, 0xf4, 0xa2, 0xaa, 0x11, 0xa7, 0xe7, 0x50, 0xa2, 0x57, + 0x26, 0x79, 0xd8, 0x04, 0x4f, 0x2e, 0x70, 0x26, 0x15, 0xce, 0xe1, 0xe6, 0x2f, 0x01, 0xc4, 0x19, + 0xec, 0x70, 0xde, 0xd2, 0x1d, 0xee, 0x25, 0x40, 0xa8, 0xea, 0x7d, 0x7b, 0x09, 0x10, 0x11, 0x71, + 0xf0, 0x11, 0xd2, 0x57, 0x23, 0x30, 0x13, 0x12, 0xdf, 0xf5, 0x4b, 0x80, 0x0c, 0x96, 0x21, 0xbc, + 0x04, 0xc8, 0xbe, 0x4a, 0x2f, 0xdc, 0xef, 0xab, 0xf4, 0xfb, 0xf0, 0x02, 0x81, 0xdf, 0xce, 0x87, + 0x4e, 0xfc, 0xf7, 0xbf, 0x9d, 0x0f, 0x75, 0xcd, 0x29, 0x67, 0x7e, 0x33, 0x12, 0x35, 0xe8, 0x3f, + 0xe8, 0x0a, 0xf8, 0xf0, 0x2f, 0x0d, 0xe5, 0xcf, 0x0b, 0x70, 0x3c, 0x39, 0x63, 0x63, 0x37, 0x81, + 0xd2, 0x1d, 0x6f, 0x02, 0x1b, 0x70, 0x62, 0xcb, 0xd5, 0xb4, 0x1e, 0x77, 0x48, 0xe4, 0x3a, 0xd0, + 0x3b, 0xb5, 0x7f, 0x58, 0x70, 0x9e, 0xb8, 0x98, 0x41, 0x83, 0x33, 0x39, 0x73, 0x6e, 0x35, 0x0b, + 0x03, 0xdd, 0x6a, 0xa6, 0x2e, 0xd5, 0x8a, 0x77, 0x7f, 0xa9, 0x96, 0x7d, 0x43, 0x59, 0x1a, 0xe0, + 0x86, 0xf2, 0x5e, 0x5c, 0x29, 0x66, 0x24, 0xbe, 0x3b, 0x5d, 0x29, 0xca, 0x0f, 0xc3, 0x9c, 0x60, + 0x63, 0xdf, 0x4b, 0xa6, 0x41, 0x6d, 0x53, 0xd3, 0x88, 0xbd, 0xec, 0xea, 0x7a, 0x4f, 0x3e, 0x0f, + 0x93, 0xf1, 0x7b, 0x6d, 0x6f, 0xe4, 0xbd, 0xab, 0x76, 0x71, 0x97, 0x12, 0x19, 0x79, 0xaf, 0x1d, + 0x07, 0x14, 0xf2, 0x07, 0x12, 0xcc, 0x66, 0xbf, 0xa1, 0x43, 0x1a, 0x4c, 0xea, 0xca, 0x6e, 0xf4, + 0x11, 0xa1, 0x34, 0xe0, 0x8e, 0x1b, 0xed, 0xef, 0x55, 0x27, 0xd7, 0x62, 0x58, 0x38, 0x81, 0x2d, + 0x7f, 0x21, 0xc1, 0xc9, 0x9c, 0x6b, 0xc6, 0xe1, 0x6a, 0x82, 0xae, 0x41, 0x59, 0x57, 0x76, 0x9b, + 0xae, 0xdd, 0x25, 0x03, 0x9f, 0x31, 0xf0, 0x5c, 0xb2, 0x26, 0x50, 0x70, 0x80, 0x27, 0x7f, 0x22, + 0x41, 0x25, 0xaf, 0x1e, 0x44, 0x67, 0x63, 0x17, 0xa2, 0x8f, 0x26, 0x2e, 0x44, 0xa7, 0x53, 0x7c, + 0x43, 0xba, 0x0e, 0xfd, 0x54, 0x82, 0xd9, 0xec, 0xba, 0x19, 0x3d, 0x13, 0xd3, 0xb8, 0x9a, 0xd0, + 0x78, 0x2a, 0xc1, 0x25, 0xf4, 0xdd, 0x86, 0x49, 0x51, 0x5d, 0x0b, 0x18, 0xe1, 0xe5, 0x27, 0x0f, + 0xce, 0xaa, 0x02, 0xcc, 0xaf, 0x13, 0xf9, 0x48, 0xc6, 0xdb, 0x70, 0x02, 0x57, 0xfe, 0xd9, 0x08, + 0x94, 0x9a, 0x6d, 0x45, 0x23, 0x43, 0x28, 0xea, 0xae, 0xc5, 0x8a, 0xba, 0x7e, 0xdf, 0xf9, 0x73, + 0x2d, 0x73, 0xeb, 0xb9, 0xcd, 0x44, 0x3d, 0x77, 0x6e, 0x20, 0xf4, 0x83, 0x4b, 0xb9, 0xe7, 0x61, + 0x2c, 0x50, 0xa2, 0xbf, 0xd5, 0x43, 0xfe, 0x78, 0x04, 0xc6, 0x23, 0x22, 0xfa, 0x5c, 0x7b, 0x76, + 0x62, 0xab, 0xf7, 0x20, 0x7f, 0x29, 0x8a, 0xc8, 0xae, 0xf9, 0xeb, 0xb7, 0xf7, 0x86, 0x2e, 0x7c, + 0x0b, 0x95, 0x5e, 0xd6, 0xcf, 0xc3, 0x24, 0xe5, 0xff, 0xb0, 0x09, 0xce, 0xf8, 0x0a, 0x3c, 0x8a, + 0x83, 0x97, 0x99, 0xad, 0x58, 0x2f, 0x4e, 0x50, 0xcf, 0xbd, 0x00, 0x13, 0x31, 0x61, 0x7d, 0x3d, + 0x79, 0xfb, 0xad, 0x04, 0x8f, 0xde, 0x71, 0x4f, 0x86, 0xea, 0xb1, 0xe9, 0x55, 0x4b, 0x4c, 0xaf, + 0xf9, 0x7c, 0x80, 0x21, 0x3e, 0x96, 0xf8, 0xd1, 0x08, 0xa0, 0xd6, 0xb6, 0x6a, 0x77, 0x1a, 0x8a, + 0x4d, 0x7b, 0x58, 0xfc, 0x8f, 0x6a, 0x08, 0x13, 0xee, 0x2c, 0x8c, 0x77, 0x88, 0xd3, 0xb6, 0x55, + 0xee, 0x2c, 0xb1, 0x57, 0x08, 0xce, 0x41, 0x96, 0xc3, 0x2e, 0x1c, 0xa5, 0x43, 0x5d, 0x28, 0xef, + 0x78, 0xff, 0xd4, 0xf3, 0x6f, 0xde, 0xfa, 0x2d, 0x66, 0xc3, 0xff, 0xfa, 0x85, 0xf1, 0x25, 0x1a, + 0x1c, 0x1c, 0x80, 0xcb, 0x1f, 0x4a, 0x30, 0x9b, 0x76, 0xcc, 0x32, 0x53, 0xfd, 0xfe, 0x3b, 0xe7, + 0x61, 0x28, 0x72, 0x74, 0xe6, 0x95, 0x63, 0xde, 0x89, 0x37, 0x93, 0x8c, 0x79, 0xab, 0xfc, 0xa5, + 0x04, 0x73, 0xd9, 0xaa, 0x0d, 0x61, 0x2b, 0x71, 0x3d, 0xbe, 0x95, 0xe8, 0xf7, 0xd8, 0x20, 0x5b, + 0xef, 0x9c, 0x6d, 0xc5, 0x5e, 0xe6, 0x18, 0x0c, 0xc1, 0xc8, 0xad, 0xb8, 0x91, 0x8b, 0x87, 0x36, + 0x32, 0xdb, 0xc0, 0xfa, 0x13, 0x37, 0x6f, 0xcd, 0x1f, 0xf9, 0xec, 0xd6, 0xfc, 0x91, 0x3f, 0xdf, + 0x9a, 0x3f, 0xf2, 0xfe, 0xfe, 0xbc, 0x74, 0x73, 0x7f, 0x5e, 0xfa, 0x6c, 0x7f, 0x5e, 0xfa, 0xdb, + 0xfe, 0xbc, 0xf4, 0x83, 0x2f, 0xe6, 0x8f, 0x5c, 0x1b, 0x15, 0x98, 0xff, 0x0a, 0x00, 0x00, 0xff, + 0xff, 0x56, 0x18, 0xbd, 0xf5, 0xb1, 0x3c, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.proto new file mode 100644 index 0000000000..8ba21bc9b4 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.proto @@ -0,0 +1,1005 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.extensions.v1beta1; + +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// An APIVersion represents a single concrete version of an object model. +message APIVersion { + // Name of this version (e.g. 'v1'). + // +optional + optional string name = 1; +} + +message CustomMetricCurrentStatus { + // Custom Metric name. + optional string name = 1; + + // Custom Metric value (average). + optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; +} + +message CustomMetricCurrentStatusList { + repeated CustomMetricCurrentStatus items = 1; +} + +// Alpha-level support for Custom Metrics in HPA (as annotations). +message CustomMetricTarget { + // Custom Metric name. + optional string name = 1; + + // Custom Metric value (average). + optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; +} + +message CustomMetricTargetList { + repeated CustomMetricTarget items = 1; +} + +// DaemonSet represents the configuration of a daemon set. +message DaemonSet { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The desired behavior of this daemon set. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional DaemonSetSpec spec = 2; + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional DaemonSetStatus status = 3; +} + +// DaemonSetList is a collection of daemon sets. +message DaemonSetList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // A list of daemon sets. + repeated DaemonSet items = 2; +} + +// DaemonSetSpec is the specification of a daemon set. +message DaemonSetSpec { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // If empty, defaulted to labels on Pod template. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 2; + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + optional DaemonSetUpdateStrategy updateStrategy = 3; + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + optional int32 minReadySeconds = 4; + + // A sequence number representing a specific generation of the template. + // Populated by the system. It can be set only during the creation. + // +optional + optional int64 templateGeneration = 5; +} + +// DaemonSetStatus represents the current status of a daemon set. +message DaemonSetStatus { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + optional int32 currentNumberScheduled = 1; + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + optional int32 numberMisscheduled = 2; + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + optional int32 desiredNumberScheduled = 3; + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + optional int32 numberReady = 4; + + // The most recent generation observed by the daemon set controller. + // +optional + optional int64 observedGeneration = 5; + + // The total number of nodes that are running updated daemon pod + // +optional + optional int32 updatedNumberScheduled = 6; + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + optional int32 numberAvailable = 7; + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + optional int32 numberUnavailable = 8; +} + +message DaemonSetUpdateStrategy { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". + // Default is OnDelete. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if type = "RollingUpdate". + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as DeploymentStrategy.RollingUpdate. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + optional RollingUpdateDaemonSet rollingUpdate = 2; +} + +// Deployment enables declarative updates for Pods and ReplicaSets. +message Deployment { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the Deployment. + // +optional + optional DeploymentSpec spec = 2; + + // Most recently observed status of the Deployment. + // +optional + optional DeploymentStatus status = 3; +} + +// DeploymentCondition describes the state of a deployment at a certain point. +message DeploymentCondition { + // Type of deployment condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // Last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// DeploymentList is a list of Deployments. +message DeploymentList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Deployments. + repeated Deployment items = 2; +} + +// DeploymentRollback stores the information required to rollback a deployment. +message DeploymentRollback { + // Required: This must match the Name of a deployment. + optional string name = 1; + + // The annotations to be updated to a deployment + // +optional + map updatedAnnotations = 2; + + // The config of this deployment rollback. + optional RollbackConfig rollbackTo = 3; +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +message DeploymentSpec { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + optional int32 replicas = 1; + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template describes the pods that will be created. + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + optional DeploymentStrategy strategy = 4; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 5; + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + optional int32 revisionHistoryLimit = 6; + + // Indicates that the deployment is paused and will not be processed by the + // deployment controller. + // +optional + optional bool paused = 7; + + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + optional RollbackConfig rollbackTo = 8; + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Once autoRollback is + // implemented, the deployment controller will automatically rollback failed + // deployments. Note that progress will not be estimated during the time a + // deployment is paused. This is not set by default. + optional int32 progressDeadlineSeconds = 9; +} + +// DeploymentStatus is the most recently observed status of the Deployment. +message DeploymentStatus { + // The generation observed by the deployment controller. + // +optional + optional int64 observedGeneration = 1; + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + optional int32 replicas = 2; + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + optional int32 updatedReplicas = 3; + + // Total number of ready pods targeted by this deployment. + // +optional + optional int32 readyReplicas = 7; + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + optional int32 availableReplicas = 4; + + // Total number of unavailable pods targeted by this deployment. + // +optional + optional int32 unavailableReplicas = 5; + + // Represents the latest available observations of a deployment's current state. + repeated DeploymentCondition conditions = 6; +} + +// DeploymentStrategy describes how to replace existing pods with new ones. +message DeploymentStrategy { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + optional RollingUpdateDeployment rollingUpdate = 2; +} + +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +message FSGroupStrategyOptions { + // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + // +optional + optional string rule = 1; + + // Ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. + // +optional + repeated IDRange ranges = 2; +} + +// HTTPIngressPath associates a path regex with a backend. Incoming urls matching +// the path are forwarded to the backend. +message HTTPIngressPath { + // Path is an extended POSIX regex as defined by IEEE Std 1003.1, + // (i.e this follows the egrep/unix syntax, not the perl syntax) + // matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" + // part of a URL as defined by RFC 3986. Paths must begin with + // a '/'. If unspecified, the path defaults to a catch all sending + // traffic to the backend. + // +optional + optional string path = 1; + + // Backend defines the referenced service endpoint to which the traffic + // will be forwarded to. + optional IngressBackend backend = 2; +} + +// HTTPIngressRuleValue is a list of http selectors pointing to backends. +// In the example: http:///? -> backend where +// where parts of the url correspond to RFC 3986, this resource will be used +// to match against everything after the last '/' and before the first '?' +// or '#'. +message HTTPIngressRuleValue { + // A collection of paths that map requests to backends. + repeated HTTPIngressPath paths = 1; +} + +// Host Port Range defines a range of host ports that will be enabled by a policy +// for pods to use. It requires both the start and end to be defined. +message HostPortRange { + // min is the start of the range, inclusive. + optional int32 min = 1; + + // max is the end of the range, inclusive. + optional int32 max = 2; +} + +// ID Range provides a min/max of an allowed range of IDs. +message IDRange { + // Min is the start of the range, inclusive. + optional int64 min = 1; + + // Max is the end of the range, inclusive. + optional int64 max = 2; +} + +// Ingress is a collection of rules that allow inbound connections to reach the +// endpoints defined by a backend. An Ingress can be configured to give services +// externally-reachable urls, load balance traffic, terminate SSL, offer name +// based virtual hosting etc. +message Ingress { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is the desired state of the Ingress. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional IngressSpec spec = 2; + + // Status is the current state of the Ingress. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional IngressStatus status = 3; +} + +// IngressBackend describes all endpoints for a given service and port. +message IngressBackend { + // Specifies the name of the referenced service. + optional string serviceName = 1; + + // Specifies the port of the referenced service. + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; +} + +// IngressList is a collection of Ingress. +message IngressList { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Ingress. + repeated Ingress items = 2; +} + +// IngressRule represents the rules mapping the paths under a specified host to +// the related backend services. Incoming requests are first evaluated for a host +// match, then routed to the backend associated with the matching IngressRuleValue. +message IngressRule { + // Host is the fully qualified domain name of a network host, as defined + // by RFC 3986. Note the following deviations from the "host" part of the + // URI as defined in the RFC: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the + // IP in the Spec of the parent Ingress. + // 2. The `:` delimiter is not respected because ports are not allowed. + // Currently the port of an Ingress is implicitly :80 for http and + // :443 for https. + // Both these may change in the future. + // Incoming requests are matched against the host before the IngressRuleValue. + // If the host is unspecified, the Ingress routes all traffic based on the + // specified IngressRuleValue. + // +optional + optional string host = 1; + + // IngressRuleValue represents a rule to route requests for this IngressRule. + // If unspecified, the rule defaults to a http catch-all. Whether that sends + // just traffic matching the host to the default backend or all traffic to the + // default backend, is left to the controller fulfilling the Ingress. Http is + // currently the only supported IngressRuleValue. + // +optional + optional IngressRuleValue ingressRuleValue = 2; +} + +// IngressRuleValue represents a rule to apply against incoming requests. If the +// rule is satisfied, the request is routed to the specified backend. Currently +// mixing different types of rules in a single Ingress is disallowed, so exactly +// one of the following must be set. +message IngressRuleValue { + // +optional + optional HTTPIngressRuleValue http = 1; +} + +// IngressSpec describes the Ingress the user wishes to exist. +message IngressSpec { + // A default backend capable of servicing requests that don't match any + // rule. At least one of 'backend' or 'rules' must be specified. This field + // is optional to allow the loadbalancer controller or defaulting logic to + // specify a global default. + // +optional + optional IngressBackend backend = 1; + + // TLS configuration. Currently the Ingress only supports a single TLS + // port, 443. If multiple members of this list specify different hosts, they + // will be multiplexed on the same port according to the hostname specified + // through the SNI TLS extension, if the ingress controller fulfilling the + // ingress supports SNI. + // +optional + repeated IngressTLS tls = 2; + + // A list of host rules used to configure the Ingress. If unspecified, or + // no rule matches, all traffic is sent to the default backend. + // +optional + repeated IngressRule rules = 3; +} + +// IngressStatus describe the current state of the Ingress. +message IngressStatus { + // LoadBalancer contains the current status of the load-balancer. + // +optional + optional k8s.io.kubernetes.pkg.api.v1.LoadBalancerStatus loadBalancer = 1; +} + +// IngressTLS describes the transport layer security associated with an Ingress. +message IngressTLS { + // Hosts are a list of hosts included in the TLS certificate. The values in + // this list must match the name/s used in the tlsSecret. Defaults to the + // wildcard host setting for the loadbalancer controller fulfilling this + // Ingress, if left unspecified. + // +optional + repeated string hosts = 1; + + // SecretName is the name of the secret used to terminate SSL traffic on 443. + // Field is left optional to allow SSL routing based on SNI hostname alone. + // If the SNI host in a listener conflicts with the "Host" header field used + // by an IngressRule, the SNI host is used for termination and value of the + // Host header is used for routing. + // +optional + optional string secretName = 2; +} + +message NetworkPolicy { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior for this NetworkPolicy. + // +optional + optional NetworkPolicySpec spec = 2; +} + +// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. +message NetworkPolicyIngressRule { + // List of ports which should be made accessible on the pods selected for this rule. + // Each item in this list is combined using a logical OR. + // If this field is not provided, this rule matches all ports (traffic not restricted by port). + // If this field is empty, this rule matches no ports (no traffic matches). + // If this field is present and contains at least one item, then this rule allows traffic + // only if the traffic matches at least one port in the list. + // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. + // +optional + repeated NetworkPolicyPort ports = 1; + + // List of sources which should be able to access the pods selected for this rule. + // Items in this list are combined using a logical OR operation. + // If this field is not provided, this rule matches all sources (traffic not restricted by source). + // If this field is empty, this rule matches no sources (no traffic matches). + // If this field is present and contains at least on item, this rule allows traffic only if the + // traffic matches at least one item in the from list. + // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. + // +optional + repeated NetworkPolicyPeer from = 2; +} + +// Network Policy List is a list of NetworkPolicy objects. +message NetworkPolicyList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated NetworkPolicy items = 2; +} + +message NetworkPolicyPeer { + // This is a label selector which selects Pods in this namespace. + // This field follows standard label selector semantics. + // If not provided, this selector selects no pods. + // If present but empty, this selector selects all pods in this namespace. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + + // Selects Namespaces using cluster scoped-labels. This + // matches all pods in all namespaces selected by this label selector. + // This field follows standard label selector semantics. + // If omitted, this selector selects no namespaces. + // If present but empty, this selector selects all namespaces. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; +} + +message NetworkPolicyPort { + // Optional. The protocol (TCP or UDP) which traffic must match. + // If not specified, this field defaults to TCP. + // +optional + optional string protocol = 1; + + // If specified, the port on the given protocol. This can + // either be a numerical or named port on a pod. If this field is not provided, + // this matches all port names and numbers. + // If present, only traffic on the specified protocol AND port + // will be matched. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; +} + +message NetworkPolicySpec { + // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules + // is applied to any pods selected by this field. Multiple network policies can select the + // same set of pods. In this case, the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + + // List of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, + // OR if the traffic source is the pod's local node, + // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy + // objects whose podSelector matches the pod. + // If this field is empty then this NetworkPolicy does not affect ingress isolation. + // If this field is present and contains at least one rule, this policy allows any traffic + // which matches at least one of the ingress rules in this list. + // +optional + repeated NetworkPolicyIngressRule ingress = 2; +} + +// Pod Security Policy governs the ability to make requests that affect the Security Context +// that will be applied to a pod and container. +message PodSecurityPolicy { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec defines the policy enforced. + // +optional + optional PodSecurityPolicySpec spec = 2; +} + +// Pod Security Policy List is a list of PodSecurityPolicy objects. +message PodSecurityPolicyList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated PodSecurityPolicy items = 2; +} + +// Pod Security Policy Spec defines the policy enforced. +message PodSecurityPolicySpec { + // privileged determines if a pod can request to be run as privileged. + // +optional + optional bool privileged = 1; + + // DefaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capabiility in both + // DefaultAddCapabilities and RequiredDropCapabilities. + // +optional + repeated string defaultAddCapabilities = 2; + + // RequiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + // +optional + repeated string requiredDropCapabilities = 3; + + // AllowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field may be added at the pod author's discretion. + // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. + // +optional + repeated string allowedCapabilities = 4; + + // volumes is a white list of allowed volume plugins. Empty indicates that all plugins + // may be used. + // +optional + repeated string volumes = 5; + + // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + // +optional + optional bool hostNetwork = 6; + + // hostPorts determines which host port ranges are allowed to be exposed. + // +optional + repeated HostPortRange hostPorts = 7; + + // hostPID determines if the policy allows the use of HostPID in the pod spec. + // +optional + optional bool hostPID = 8; + + // hostIPC determines if the policy allows the use of HostIPC in the pod spec. + // +optional + optional bool hostIPC = 9; + + // seLinux is the strategy that will dictate the allowable labels that may be set. + optional SELinuxStrategyOptions seLinux = 10; + + // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. + optional RunAsUserStrategyOptions runAsUser = 11; + + // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + optional SupplementalGroupsStrategyOptions supplementalGroups = 12; + + // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + optional FSGroupStrategyOptions fsGroup = 13; + + // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the PSP should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + // +optional + optional bool readOnlyRootFilesystem = 14; +} + +// ReplicaSet represents the configuration of a ReplicaSet. +message ReplicaSet { + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetSpec spec = 2; + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetStatus status = 3; +} + +// ReplicaSetCondition describes the state of a replica set at a certain point. +message ReplicaSetCondition { + // Type of replica set condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// ReplicaSetList is a collection of ReplicaSets. +message ReplicaSetList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ReplicaSets. + // More info: http://kubernetes.io/docs/user-guide/replication-controller + repeated ReplicaSet items = 2; +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +message ReplicaSetSpec { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // +optional + optional int32 replicas = 1; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 4; + + // Selector is a label query over pods that should match the replica count. + // If the selector is empty, it is defaulted to the labels present on the pod template. + // Label keys and values that must match in order to be controlled by this replica set. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + // +optional + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +message ReplicaSetStatus { + // Replicas is the most recently oberved number of replicas. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + optional int32 replicas = 1; + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + optional int32 fullyLabeledReplicas = 2; + + // The number of ready replicas for this replica set. + // +optional + optional int32 readyReplicas = 4; + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + optional int32 availableReplicas = 5; + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + optional int64 observedGeneration = 3; + + // Represents the latest available observations of a replica set's current state. + // +optional + repeated ReplicaSetCondition conditions = 6; +} + +// Dummy definition +message ReplicationControllerDummy { +} + +message RollbackConfig { + // The revision to rollback to. If set to 0, rollbck to the last revision. + // +optional + optional int64 revision = 1; +} + +// Spec to control the desired behavior of daemon set rolling update. +message RollingUpdateDaemonSet { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; +} + +// Spec to control the desired behavior of rolling update. +message RollingUpdateDeployment { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // By default, a fixed value of 1 is used. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // By default, a value of 1 is used. + // Example: when this is set to 30%, the new RC can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; +} + +// Run A sUser Strategy Options defines the strategy type and any options used to create the strategy. +message RunAsUserStrategyOptions { + // Rule is the strategy that will dictate the allowable RunAsUser values that may be set. + optional string rule = 1; + + // Ranges are the allowed ranges of uids that may be used. + // +optional + repeated IDRange ranges = 2; +} + +// SELinux Strategy Options defines the strategy type and any options used to create the strategy. +message SELinuxStrategyOptions { + // type is the strategy that will dictate the allowable labels that may be set. + optional string rule = 1; + + // seLinuxOptions required to run as; required for MustRunAs + // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context + // +optional + optional k8s.io.kubernetes.pkg.api.v1.SELinuxOptions seLinuxOptions = 2; +} + +// represents a scaling request for a resource. +message Scale { + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + optional ScaleSpec spec = 2; + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + optional ScaleStatus status = 3; +} + +// describes the attributes of a scale subresource +message ScaleSpec { + // desired number of instances for the scaled object. + // +optional + optional int32 replicas = 1; +} + +// represents the current status of a scale subresource. +message ScaleStatus { + // actual number of observed instances of the scaled object. + optional int32 replicas = 1; + + // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + map selector = 2; + + // label selector for pods that should match the replicas count. This is a serializated + // version of both map-based and more expressive set-based selectors. This is done to + // avoid introspection in the clients. The string will be in the same format as the + // query-param syntax. If the target type only supports map-based selectors, both this + // field and map-based selector field are populated. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + optional string targetSelector = 3; +} + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +message SupplementalGroupsStrategyOptions { + // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // +optional + optional string rule = 1; + + // Ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. + // +optional + repeated IDRange ranges = 2; +} + +// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource +// types to the API. It consists of one or more Versions of the api. +message ThirdPartyResource { + // Standard object metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Description is the description of this object. + // +optional + optional string description = 2; + + // Versions are versions for this third party object + // +optional + repeated APIVersion versions = 3; +} + +// An internal object, used for versioned storage in etcd. Not exposed to the end user. +message ThirdPartyResourceData { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Data is the raw JSON data for this data. + // +optional + optional bytes data = 2; +} + +// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. +message ThirdPartyResourceDataList { + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ThirdpartyResourceData. + repeated ThirdPartyResourceData items = 2; +} + +// ThirdPartyResourceList is a list of ThirdPartyResources. +message ThirdPartyResourceList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ThirdPartyResources. + repeated ThirdPartyResource items = 2; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/register.go new file mode 100644 index 0000000000..fa5b2e1b24 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/register.go @@ -0,0 +1,67 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "extensions" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Deployment{}, + &DeploymentList{}, + &DeploymentRollback{}, + &ReplicationControllerDummy{}, + &Scale{}, + &ThirdPartyResource{}, + &ThirdPartyResourceList{}, + &DaemonSetList{}, + &DaemonSet{}, + &ThirdPartyResourceData{}, + &ThirdPartyResourceDataList{}, + &Ingress{}, + &IngressList{}, + &ReplicaSet{}, + &ReplicaSetList{}, + &PodSecurityPolicy{}, + &PodSecurityPolicyList{}, + &NetworkPolicy{}, + &NetworkPolicyList{}, + ) + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.generated.go new file mode 100644 index 0000000000..4f179e34f5 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.generated.go @@ -0,0 +1,21745 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg3_resource "k8s.io/apimachinery/pkg/api/resource" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + pkg5_intstr "k8s.io/apimachinery/pkg/util/intstr" + pkg4_v1 "k8s.io/client-go/pkg/api/v1" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg3_resource.Quantity + var v1 pkg1_v1.TypeMeta + var v2 pkg2_types.UID + var v3 pkg5_intstr.IntOrString + var v4 pkg4_v1.PodTemplateSpec + var v5 time.Time + _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 + } +} + +func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv7 := &x.Replicas + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.Selector) != 0 + yyq2[2] = x.TargetSelector != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv6 := &x.Selector + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecMapStringStringX(yyv6, false, d) + } + } + case "targetSelector": + if r.TryDecodeAsNil() { + x.TargetSelector = "" + } else { + yyv8 := &x.TargetSelector + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv11 := &x.Replicas + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv13 := &x.Selector + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecMapStringStringX(yyv13, false, d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetSelector = "" + } else { + yyv15 := &x.TargetSelector + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ScaleSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ScaleStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ScaleSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ScaleStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicationControllerDummy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicationControllerDummy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicationControllerDummy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicationControllerDummy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv9 := &x.Kind + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv11 := &x.APIVersion + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CustomMetricTarget) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.TargetValue + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.TargetValue + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CustomMetricTarget) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CustomMetricTarget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "value": + if r.TryDecodeAsNil() { + x.TargetValue = pkg3_resource.Quantity{} + } else { + yyv6 := &x.TargetValue + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CustomMetricTarget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv9 := &x.Name + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetValue = pkg3_resource.Quantity{} + } else { + yyv11 := &x.TargetValue + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CustomMetricTargetList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CustomMetricTargetList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CustomMetricTargetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv4 := &x.Items + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CustomMetricTargetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv7 := &x.Items + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CustomMetricCurrentStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.CurrentValue + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.CurrentValue + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CustomMetricCurrentStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CustomMetricCurrentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "value": + if r.TryDecodeAsNil() { + x.CurrentValue = pkg3_resource.Quantity{} + } else { + yyv6 := &x.CurrentValue + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CustomMetricCurrentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv9 := &x.Name + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentValue = pkg3_resource.Quantity{} + } else { + yyv11 := &x.CurrentValue + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CustomMetricCurrentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CustomMetricCurrentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv4 := &x.Items + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv7 := &x.Items + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ThirdPartyResource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = x.Description != "" + yyq2[4] = len(x.Versions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Description)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("description")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Description)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Versions == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("versions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Versions == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ThirdPartyResource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ThirdPartyResource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "description": + if r.TryDecodeAsNil() { + x.Description = "" + } else { + yyv10 := &x.Description + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "versions": + if r.TryDecodeAsNil() { + x.Versions = nil + } else { + yyv12 := &x.Versions + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + h.decSliceAPIVersion((*[]APIVersion)(yyv12), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ThirdPartyResource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv15 := &x.Kind + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv17 := &x.APIVersion + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv19 := &x.ObjectMeta + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Description = "" + } else { + yyv21 := &x.Description + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Versions = nil + } else { + yyv23 := &x.Versions + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + h.decSliceAPIVersion((*[]APIVersion)(yyv23), d) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ThirdPartyResourceList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ThirdPartyResourceList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ThirdPartyResourceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ThirdPartyResourceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *APIVersion) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *APIVersion) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *APIVersion) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *APIVersion) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv7 := &x.Name + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ThirdPartyResourceData) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = len(x.Data) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Data == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("data")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Data == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ThirdPartyResourceData) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ThirdPartyResourceData) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "data": + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv10 := &x.Data + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *yyv10 = r.DecodeBytes(*(*[]byte)(yyv10), false, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ThirdPartyResourceData) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv19 := &x.Data + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *yyv19 = r.DecodeBytes(*(*[]byte)(yyv19), false, false) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Deployment) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Deployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = DeploymentSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = DeploymentStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = DeploymentSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = DeploymentStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [9]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != nil + yyq2[1] = x.Selector != nil + yyq2[3] = true + yyq2[4] = x.MinReadySeconds != 0 + yyq2[5] = x.RevisionHistoryLimit != nil + yyq2[6] = x.Paused != false + yyq2[7] = x.RollbackTo != nil + yyq2[8] = x.ProgressDeadlineSeconds != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(9) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Replicas == nil { + r.EncodeNil() + } else { + yy4 := *x.Replicas + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Replicas == nil { + r.EncodeNil() + } else { + yy6 := *x.Replicas + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.Template + yy12.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.Template + yy14.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy17 := &x.Strategy + yy17.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("strategy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy19 := &x.Strategy + yy19.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.RevisionHistoryLimit == nil { + r.EncodeNil() + } else { + yy25 := *x.RevisionHistoryLimit + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeInt(int64(yy25)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("revisionHistoryLimit")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RevisionHistoryLimit == nil { + r.EncodeNil() + } else { + yy27 := *x.RevisionHistoryLimit + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeInt(int64(yy27)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym30 := z.EncBinary() + _ = yym30 + if false { + } else { + r.EncodeBool(bool(x.Paused)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("paused")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeBool(bool(x.Paused)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.RollbackTo == nil { + r.EncodeNil() + } else { + x.RollbackTo.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RollbackTo == nil { + r.EncodeNil() + } else { + x.RollbackTo.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.ProgressDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy36 := *x.ProgressDeadlineSeconds + yym37 := z.EncBinary() + _ = yym37 + if false { + } else { + r.EncodeInt(int64(yy36)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("progressDeadlineSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ProgressDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy38 := *x.ProgressDeadlineSeconds + yym39 := z.EncBinary() + _ = yym39 + if false { + } else { + r.EncodeInt(int64(yy38)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = pkg4_v1.PodTemplateSpec{} + } else { + yyv8 := &x.Template + yyv8.CodecDecodeSelf(d) + } + case "strategy": + if r.TryDecodeAsNil() { + x.Strategy = DeploymentStrategy{} + } else { + yyv9 := &x.Strategy + yyv9.CodecDecodeSelf(d) + } + case "minReadySeconds": + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv10 := &x.MinReadySeconds + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "revisionHistoryLimit": + if r.TryDecodeAsNil() { + if x.RevisionHistoryLimit != nil { + x.RevisionHistoryLimit = nil + } + } else { + if x.RevisionHistoryLimit == nil { + x.RevisionHistoryLimit = new(int32) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + case "paused": + if r.TryDecodeAsNil() { + x.Paused = false + } else { + yyv14 := &x.Paused + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "rollbackTo": + if r.TryDecodeAsNil() { + if x.RollbackTo != nil { + x.RollbackTo = nil + } + } else { + if x.RollbackTo == nil { + x.RollbackTo = new(RollbackConfig) + } + x.RollbackTo.CodecDecodeSelf(d) + } + case "progressDeadlineSeconds": + if r.TryDecodeAsNil() { + if x.ProgressDeadlineSeconds != nil { + x.ProgressDeadlineSeconds = nil + } + } else { + if x.ProgressDeadlineSeconds == nil { + x.ProgressDeadlineSeconds = new(int32) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj19 int + var yyb19 bool + var yyhl19 bool = l >= 0 + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = pkg4_v1.PodTemplateSpec{} + } else { + yyv24 := &x.Template + yyv24.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Strategy = DeploymentStrategy{} + } else { + yyv25 := &x.Strategy + yyv25.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv26 := &x.MinReadySeconds + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*int32)(yyv26)) = int32(r.DecodeInt(32)) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RevisionHistoryLimit != nil { + x.RevisionHistoryLimit = nil + } + } else { + if x.RevisionHistoryLimit == nil { + x.RevisionHistoryLimit = new(int32) + } + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Paused = false + } else { + yyv30 := &x.Paused + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*bool)(yyv30)) = r.DecodeBool() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RollbackTo != nil { + x.RollbackTo = nil + } + } else { + if x.RollbackTo == nil { + x.RollbackTo = new(RollbackConfig) + } + x.RollbackTo.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ProgressDeadlineSeconds != nil { + x.ProgressDeadlineSeconds = nil + } + } else { + if x.ProgressDeadlineSeconds == nil { + x.ProgressDeadlineSeconds = new(int32) + } + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) + } + } + for { + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj19-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentRollback) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[3] = len(x.UpdatedAnnotations) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.UpdatedAnnotations == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("updatedAnnotations")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.UpdatedAnnotations == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy16 := &x.RollbackTo + yy16.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy18 := &x.RollbackTo + yy18.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentRollback) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentRollback) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "updatedAnnotations": + if r.TryDecodeAsNil() { + x.UpdatedAnnotations = nil + } else { + yyv10 := &x.UpdatedAnnotations + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + z.F.DecMapStringStringX(yyv10, false, d) + } + } + case "rollbackTo": + if r.TryDecodeAsNil() { + x.RollbackTo = RollbackConfig{} + } else { + yyv12 := &x.RollbackTo + yyv12.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentRollback) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv14 := &x.Kind + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv16 := &x.APIVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv18 := &x.Name + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UpdatedAnnotations = nil + } else { + yyv20 := &x.UpdatedAnnotations + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + z.F.DecMapStringStringX(yyv20, false, d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RollbackTo = RollbackConfig{} + } else { + yyv22 := &x.RollbackTo + yyv22.CodecDecodeSelf(d) + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RollbackConfig) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Revision != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Revision)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("revision")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Revision)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RollbackConfig) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RollbackConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "revision": + if r.TryDecodeAsNil() { + x.Revision = 0 + } else { + yyv4 := &x.Revision + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RollbackConfig) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Revision = 0 + } else { + yyv7 := &x.Revision + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int64)(yyv7)) = int64(r.DecodeInt(64)) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentStrategy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Type != "" + yyq2[1] = x.RollingUpdate != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Type.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.RollingUpdate == nil { + r.EncodeNil() + } else { + x.RollingUpdate.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RollingUpdate == nil { + r.EncodeNil() + } else { + x.RollingUpdate.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentStrategy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "rollingUpdate": + if r.TryDecodeAsNil() { + if x.RollingUpdate != nil { + x.RollingUpdate = nil + } + } else { + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDeployment) + } + x.RollingUpdate.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv7 := &x.Type + yyv7.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RollingUpdate != nil { + x.RollingUpdate = nil + } + } else { + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDeployment) + } + x.RollingUpdate.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x DeploymentStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DeploymentStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.MaxUnavailable != nil + yyq2[1] = x.MaxSurge != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.MaxUnavailable == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { + } else if !yym4 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxUnavailable) + } else { + z.EncFallback(x.MaxUnavailable) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MaxUnavailable == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxUnavailable) + } else { + z.EncFallback(x.MaxUnavailable) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.MaxSurge == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxSurge) + } else { + z.EncFallback(x.MaxSurge) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxSurge")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MaxSurge == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxSurge) + } else { + z.EncFallback(x.MaxSurge) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RollingUpdateDeployment) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "maxUnavailable": + if r.TryDecodeAsNil() { + if x.MaxUnavailable != nil { + x.MaxUnavailable = nil + } + } else { + if x.MaxUnavailable == nil { + x.MaxUnavailable = new(pkg5_intstr.IntOrString) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxUnavailable) + } else { + z.DecFallback(x.MaxUnavailable, false) + } + } + case "maxSurge": + if r.TryDecodeAsNil() { + if x.MaxSurge != nil { + x.MaxSurge = nil + } + } else { + if x.MaxSurge == nil { + x.MaxSurge = new(pkg5_intstr.IntOrString) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxSurge) + } else { + z.DecFallback(x.MaxSurge, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RollingUpdateDeployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.MaxUnavailable != nil { + x.MaxUnavailable = nil + } + } else { + if x.MaxUnavailable == nil { + x.MaxUnavailable = new(pkg5_intstr.IntOrString) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxUnavailable) + } else { + z.DecFallback(x.MaxUnavailable, false) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.MaxSurge != nil { + x.MaxSurge = nil + } + } else { + if x.MaxSurge == nil { + x.MaxSurge = new(pkg5_intstr.IntOrString) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxSurge) + } else { + z.DecFallback(x.MaxSurge, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != 0 + yyq2[1] = x.Replicas != 0 + yyq2[2] = x.UpdatedReplicas != 0 + yyq2[3] = x.ReadyReplicas != 0 + yyq2[4] = x.AvailableReplicas != 0 + yyq2[5] = x.UnavailableReplicas != 0 + yyq2[6] = len(x.Conditions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.UpdatedReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("updatedReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.UpdatedReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readyReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.UnavailableReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("unavailableReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.UnavailableReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "observedGeneration": + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv4 := &x.ObservedGeneration + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } + } + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv6 := &x.Replicas + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "updatedReplicas": + if r.TryDecodeAsNil() { + x.UpdatedReplicas = 0 + } else { + yyv8 := &x.UpdatedReplicas + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "readyReplicas": + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv10 := &x.ReadyReplicas + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "availableReplicas": + if r.TryDecodeAsNil() { + x.AvailableReplicas = 0 + } else { + yyv12 := &x.AvailableReplicas + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(yyv12)) = int32(r.DecodeInt(32)) + } + } + case "unavailableReplicas": + if r.TryDecodeAsNil() { + x.UnavailableReplicas = 0 + } else { + yyv14 := &x.UnavailableReplicas + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv16 := &x.Conditions + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv16), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv19 := &x.ObservedGeneration + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(yyv19)) = int64(r.DecodeInt(64)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv21 := &x.Replicas + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UpdatedReplicas = 0 + } else { + yyv23 := &x.UpdatedReplicas + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv25 := &x.ReadyReplicas + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AvailableReplicas = 0 + } else { + yyv27 := &x.AvailableReplicas + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(yyv27)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UnavailableReplicas = 0 + } else { + yyv29 := &x.UnavailableReplicas + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*int32)(yyv29)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv31 := &x.Conditions + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv31), d) + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x DeploymentConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DeploymentConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *DeploymentCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf7 := &x.Status + yysf7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf8 := &x.Status + yysf8.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastUpdateTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastUpdateTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastUpdateTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) + } else { + z.EncFallback(yy17) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "lastUpdateTime": + if r.TryDecodeAsNil() { + x.LastUpdateTime = pkg1_v1.Time{} + } else { + yyv6 := &x.LastUpdateTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv10 := &x.Reason + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv12 := &x.Message + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv15 := &x.Type + yyv15.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv16 := &x.Status + yyv16.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastUpdateTime = pkg1_v1.Time{} + } else { + yyv17 := &x.LastUpdateTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv19 := &x.LastTransitionTime + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if yym20 { + z.DecBinaryUnmarshal(yyv19) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv21 := &x.Reason + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv23 := &x.Message + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceDeployment(([]Deployment)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceDeployment(([]Deployment)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceDeployment((*[]Deployment)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceDeployment((*[]Deployment)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DaemonSetUpdateStrategy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Type != "" + yyq2[1] = x.RollingUpdate != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Type.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.RollingUpdate == nil { + r.EncodeNil() + } else { + x.RollingUpdate.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RollingUpdate == nil { + r.EncodeNil() + } else { + x.RollingUpdate.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DaemonSetUpdateStrategy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DaemonSetUpdateStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "rollingUpdate": + if r.TryDecodeAsNil() { + if x.RollingUpdate != nil { + x.RollingUpdate = nil + } + } else { + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDaemonSet) + } + x.RollingUpdate.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DaemonSetUpdateStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv7 := &x.Type + yyv7.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RollingUpdate != nil { + x.RollingUpdate = nil + } + } else { + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDaemonSet) + } + x.RollingUpdate.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x DaemonSetUpdateStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DaemonSetUpdateStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *RollingUpdateDaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.MaxUnavailable != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.MaxUnavailable == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { + } else if !yym4 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxUnavailable) + } else { + z.EncFallback(x.MaxUnavailable) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MaxUnavailable == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxUnavailable) + } else { + z.EncFallback(x.MaxUnavailable) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RollingUpdateDaemonSet) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RollingUpdateDaemonSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "maxUnavailable": + if r.TryDecodeAsNil() { + if x.MaxUnavailable != nil { + x.MaxUnavailable = nil + } + } else { + if x.MaxUnavailable == nil { + x.MaxUnavailable = new(pkg5_intstr.IntOrString) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxUnavailable) + } else { + z.DecFallback(x.MaxUnavailable, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RollingUpdateDaemonSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.MaxUnavailable != nil { + x.MaxUnavailable = nil + } + } else { + if x.MaxUnavailable == nil { + x.MaxUnavailable = new(pkg5_intstr.IntOrString) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxUnavailable) + } else { + z.DecFallback(x.MaxUnavailable, false) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DaemonSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Selector != nil + yyq2[2] = true + yyq2[3] = x.MinReadySeconds != 0 + yyq2[4] = x.TemplateGeneration != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.Template + yy7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.Template + yy9.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy12 := &x.UpdateStrategy + yy12.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("updateStrategy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.UpdateStrategy + yy14.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.TemplateGeneration)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("templateGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeInt(int64(x.TemplateGeneration)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DaemonSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DaemonSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = pkg4_v1.PodTemplateSpec{} + } else { + yyv6 := &x.Template + yyv6.CodecDecodeSelf(d) + } + case "updateStrategy": + if r.TryDecodeAsNil() { + x.UpdateStrategy = DaemonSetUpdateStrategy{} + } else { + yyv7 := &x.UpdateStrategy + yyv7.CodecDecodeSelf(d) + } + case "minReadySeconds": + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv8 := &x.MinReadySeconds + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "templateGeneration": + if r.TryDecodeAsNil() { + x.TemplateGeneration = 0 + } else { + yyv10 := &x.TemplateGeneration + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int64)(yyv10)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DaemonSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = pkg4_v1.PodTemplateSpec{} + } else { + yyv15 := &x.Template + yyv15.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UpdateStrategy = DaemonSetUpdateStrategy{} + } else { + yyv16 := &x.UpdateStrategy + yyv16.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv17 := &x.MinReadySeconds + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TemplateGeneration = 0 + } else { + yyv19 := &x.TemplateGeneration + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(yyv19)) = int64(r.DecodeInt(64)) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DaemonSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[4] = x.ObservedGeneration != 0 + yyq2[5] = x.UpdatedNumberScheduled != 0 + yyq2[6] = x.NumberAvailable != 0 + yyq2[7] = x.NumberUnavailable != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(8) + } else { + yynn2 = 4 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.CurrentNumberScheduled)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentNumberScheduled")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.CurrentNumberScheduled)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.NumberMisscheduled)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("numberMisscheduled")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.NumberMisscheduled)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.DesiredNumberScheduled)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("desiredNumberScheduled")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.DesiredNumberScheduled)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.NumberReady)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("numberReady")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.NumberReady)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.UpdatedNumberScheduled)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("updatedNumberScheduled")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.UpdatedNumberScheduled)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeInt(int64(x.NumberAvailable)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("numberAvailable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeInt(int64(x.NumberAvailable)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeInt(int64(x.NumberUnavailable)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("numberUnavailable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeInt(int64(x.NumberUnavailable)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DaemonSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DaemonSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "currentNumberScheduled": + if r.TryDecodeAsNil() { + x.CurrentNumberScheduled = 0 + } else { + yyv4 := &x.CurrentNumberScheduled + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "numberMisscheduled": + if r.TryDecodeAsNil() { + x.NumberMisscheduled = 0 + } else { + yyv6 := &x.NumberMisscheduled + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "desiredNumberScheduled": + if r.TryDecodeAsNil() { + x.DesiredNumberScheduled = 0 + } else { + yyv8 := &x.DesiredNumberScheduled + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "numberReady": + if r.TryDecodeAsNil() { + x.NumberReady = 0 + } else { + yyv10 := &x.NumberReady + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "observedGeneration": + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv12 := &x.ObservedGeneration + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int64)(yyv12)) = int64(r.DecodeInt(64)) + } + } + case "updatedNumberScheduled": + if r.TryDecodeAsNil() { + x.UpdatedNumberScheduled = 0 + } else { + yyv14 := &x.UpdatedNumberScheduled + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } + } + case "numberAvailable": + if r.TryDecodeAsNil() { + x.NumberAvailable = 0 + } else { + yyv16 := &x.NumberAvailable + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*int32)(yyv16)) = int32(r.DecodeInt(32)) + } + } + case "numberUnavailable": + if r.TryDecodeAsNil() { + x.NumberUnavailable = 0 + } else { + yyv18 := &x.NumberUnavailable + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*int32)(yyv18)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DaemonSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj20 int + var yyb20 bool + var yyhl20 bool = l >= 0 + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentNumberScheduled = 0 + } else { + yyv21 := &x.CurrentNumberScheduled + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NumberMisscheduled = 0 + } else { + yyv23 := &x.NumberMisscheduled + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DesiredNumberScheduled = 0 + } else { + yyv25 := &x.DesiredNumberScheduled + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NumberReady = 0 + } else { + yyv27 := &x.NumberReady + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(yyv27)) = int32(r.DecodeInt(32)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv29 := &x.ObservedGeneration + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*int64)(yyv29)) = int64(r.DecodeInt(64)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UpdatedNumberScheduled = 0 + } else { + yyv31 := &x.UpdatedNumberScheduled + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*int32)(yyv31)) = int32(r.DecodeInt(32)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NumberAvailable = 0 + } else { + yyv33 := &x.NumberAvailable + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*int32)(yyv33)) = int32(r.DecodeInt(32)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NumberUnavailable = 0 + } else { + yyv35 := &x.NumberUnavailable + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*int32)(yyv35)) = int32(r.DecodeInt(32)) + } + } + for { + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj20-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DaemonSet) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DaemonSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = DaemonSetSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = DaemonSetStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DaemonSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = DaemonSetSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = DaemonSetStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DaemonSetList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DaemonSetList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DaemonSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceDaemonSet((*[]DaemonSet)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DaemonSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceDaemonSet((*[]DaemonSet)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ThirdPartyResourceDataList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ThirdPartyResourceDataList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ThirdPartyResourceDataList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ThirdPartyResourceDataList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Ingress) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Ingress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = IngressSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = IngressStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Ingress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = IngressSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = IngressStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *IngressList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceIngress(([]Ingress)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceIngress(([]Ingress)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *IngressList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *IngressList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceIngress((*[]Ingress)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *IngressList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceIngress((*[]Ingress)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Backend != nil + yyq2[1] = len(x.TLS) != 0 + yyq2[2] = len(x.Rules) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Backend == nil { + r.EncodeNil() + } else { + x.Backend.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("backend")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Backend == nil { + r.EncodeNil() + } else { + x.Backend.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.TLS == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tls")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TLS == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Rules == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceIngressRule(([]IngressRule)(x.Rules), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rules")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encSliceIngressRule(([]IngressRule)(x.Rules), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *IngressSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *IngressSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "backend": + if r.TryDecodeAsNil() { + if x.Backend != nil { + x.Backend = nil + } + } else { + if x.Backend == nil { + x.Backend = new(IngressBackend) + } + x.Backend.CodecDecodeSelf(d) + } + case "tls": + if r.TryDecodeAsNil() { + x.TLS = nil + } else { + yyv5 := &x.TLS + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceIngressTLS((*[]IngressTLS)(yyv5), d) + } + } + case "rules": + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv7 := &x.Rules + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceIngressRule((*[]IngressRule)(yyv7), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *IngressSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Backend != nil { + x.Backend = nil + } + } else { + if x.Backend == nil { + x.Backend = new(IngressBackend) + } + x.Backend.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TLS = nil + } else { + yyv11 := &x.TLS + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceIngressTLS((*[]IngressTLS)(yyv11), d) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv13 := &x.Rules + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceIngressRule((*[]IngressRule)(yyv13), d) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *IngressTLS) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Hosts) != 0 + yyq2[1] = x.SecretName != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Hosts == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.Hosts, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hosts")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Hosts == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.Hosts, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *IngressTLS) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *IngressTLS) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "hosts": + if r.TryDecodeAsNil() { + x.Hosts = nil + } else { + yyv4 := &x.Hosts + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "secretName": + if r.TryDecodeAsNil() { + x.SecretName = "" + } else { + yyv6 := &x.SecretName + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *IngressTLS) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hosts = nil + } else { + yyv9 := &x.Hosts + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + z.F.DecSliceStringX(yyv9, false, d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SecretName = "" + } else { + yyv11 := &x.SecretName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *IngressStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.LoadBalancer + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("loadBalancer")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.LoadBalancer + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *IngressStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *IngressStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "loadBalancer": + if r.TryDecodeAsNil() { + x.LoadBalancer = pkg4_v1.LoadBalancerStatus{} + } else { + yyv4 := &x.LoadBalancer + yyv4.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *IngressStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LoadBalancer = pkg4_v1.LoadBalancerStatus{} + } else { + yyv6 := &x.LoadBalancer + yyv6.CodecDecodeSelf(d) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *IngressRule) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Host != "" + yyq2[1] = x.IngressRuleValue.HTTP != nil && x.HTTP != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("host")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + } + } + } + var yyn6 bool + if x.IngressRuleValue.HTTP == nil { + yyn6 = true + goto LABEL6 + } + LABEL6: + if yyr2 || yy2arr2 { + if yyn6 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.HTTP == nil { + r.EncodeNil() + } else { + x.HTTP.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("http")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn6 { + r.EncodeNil() + } else { + if x.HTTP == nil { + r.EncodeNil() + } else { + x.HTTP.CodecEncodeSelf(e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *IngressRule) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *IngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "host": + if r.TryDecodeAsNil() { + x.Host = "" + } else { + yyv4 := &x.Host + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "http": + if x.IngressRuleValue.HTTP == nil { + x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue) + } + if r.TryDecodeAsNil() { + if x.HTTP != nil { + x.HTTP = nil + } + } else { + if x.HTTP == nil { + x.HTTP = new(HTTPIngressRuleValue) + } + x.HTTP.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *IngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Host = "" + } else { + yyv8 := &x.Host + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + if x.IngressRuleValue.HTTP == nil { + x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HTTP != nil { + x.HTTP = nil + } + } else { + if x.HTTP == nil { + x.HTTP = new(HTTPIngressRuleValue) + } + x.HTTP.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *IngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.HTTP != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.HTTP == nil { + r.EncodeNil() + } else { + x.HTTP.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("http")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.HTTP == nil { + r.EncodeNil() + } else { + x.HTTP.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *IngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *IngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "http": + if r.TryDecodeAsNil() { + if x.HTTP != nil { + x.HTTP = nil + } + } else { + if x.HTTP == nil { + x.HTTP = new(HTTPIngressRuleValue) + } + x.HTTP.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *IngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HTTP != nil { + x.HTTP = nil + } + } else { + if x.HTTP == nil { + x.HTTP = new(HTTPIngressRuleValue) + } + x.HTTP.CodecDecodeSelf(d) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HTTPIngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Paths == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("paths")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Paths == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HTTPIngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HTTPIngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "paths": + if r.TryDecodeAsNil() { + x.Paths = nil + } else { + yyv4 := &x.Paths + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HTTPIngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Paths = nil + } else { + yyv7 := &x.Paths + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HTTPIngressPath) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Path != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.Backend + yy7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("backend")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.Backend + yy9.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HTTPIngressPath) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HTTPIngressPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "backend": + if r.TryDecodeAsNil() { + x.Backend = IngressBackend{} + } else { + yyv6 := &x.Backend + yyv6.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HTTPIngressPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv8 := &x.Path + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Backend = IngressBackend{} + } else { + yyv10 := &x.Backend + yyv10.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *IngressBackend) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serviceName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.ServicePort + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("servicePort")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.ServicePort + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *IngressBackend) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *IngressBackend) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "serviceName": + if r.TryDecodeAsNil() { + x.ServiceName = "" + } else { + yyv4 := &x.ServiceName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "servicePort": + if r.TryDecodeAsNil() { + x.ServicePort = pkg5_intstr.IntOrString{} + } else { + yyv6 := &x.ServicePort + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *IngressBackend) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ServiceName = "" + } else { + yyv9 := &x.ServiceName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ServicePort = pkg5_intstr.IntOrString{} + } else { + yyv11 := &x.ServicePort + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicaSet) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicaSet) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicaSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ReplicaSetSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ReplicaSetStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicaSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ReplicaSetSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ReplicaSetStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicaSetList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicaSetList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicaSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceReplicaSet((*[]ReplicaSet)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicaSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceReplicaSet((*[]ReplicaSet)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicaSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != nil + yyq2[1] = x.MinReadySeconds != 0 + yyq2[2] = x.Selector != nil + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Replicas == nil { + r.EncodeNil() + } else { + yy4 := *x.Replicas + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Replicas == nil { + r.EncodeNil() + } else { + yy6 := *x.Replicas + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Template + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Template + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicaSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicaSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + case "minReadySeconds": + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv6 := &x.MinReadySeconds + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = pkg4_v1.PodTemplateSpec{} + } else { + yyv10 := &x.Template + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicaSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv14 := &x.MinReadySeconds + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = pkg4_v1.PodTemplateSpec{} + } else { + yyv18 := &x.Template + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FullyLabeledReplicas != 0 + yyq2[2] = x.ReadyReplicas != 0 + yyq2[3] = x.AvailableReplicas != 0 + yyq2[4] = x.ObservedGeneration != 0 + yyq2[5] = len(x.Conditions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.FullyLabeledReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.FullyLabeledReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readyReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceReplicaSetCondition(([]ReplicaSetCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + h.encSliceReplicaSetCondition(([]ReplicaSetCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicaSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicaSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "fullyLabeledReplicas": + if r.TryDecodeAsNil() { + x.FullyLabeledReplicas = 0 + } else { + yyv6 := &x.FullyLabeledReplicas + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "readyReplicas": + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv8 := &x.ReadyReplicas + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "availableReplicas": + if r.TryDecodeAsNil() { + x.AvailableReplicas = 0 + } else { + yyv10 := &x.AvailableReplicas + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "observedGeneration": + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv12 := &x.ObservedGeneration + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int64)(yyv12)) = int64(r.DecodeInt(64)) + } + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv14 := &x.Conditions + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + h.decSliceReplicaSetCondition((*[]ReplicaSetCondition)(yyv14), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv17 := &x.Replicas + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FullyLabeledReplicas = 0 + } else { + yyv19 := &x.FullyLabeledReplicas + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int32)(yyv19)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv21 := &x.ReadyReplicas + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AvailableReplicas = 0 + } else { + yyv23 := &x.AvailableReplicas + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv25 := &x.ObservedGeneration + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int64)(yyv25)) = int64(r.DecodeInt(64)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv27 := &x.Conditions + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + h.decSliceReplicaSetCondition((*[]ReplicaSetCondition)(yyv27), d) + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj16-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ReplicaSetConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ReplicaSetConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ReplicaSetCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = x.Reason != "" + yyq2[4] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf7 := &x.Status + yysf7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf8 := &x.Status + yysf8.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastTransitionTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastTransitionTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicaSetCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicaSetCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv6 := &x.LastTransitionTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv8 := &x.Reason + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv10 := &x.Message + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicaSetCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv13 := &x.Type + yyv13.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv14 := &x.Status + yyv14.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv15 := &x.LastTransitionTime + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else if yym16 { + z.DecBinaryUnmarshal(yyv15) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv15) + } else { + z.DecFallback(yyv15, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv17 := &x.Reason + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv19 := &x.Message + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodSecurityPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodSecurityPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PodSecurityPolicySpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodSecurityPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PodSecurityPolicySpec{} + } else { + yyv18 := &x.Spec + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [14]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Privileged != false + yyq2[1] = len(x.DefaultAddCapabilities) != 0 + yyq2[2] = len(x.RequiredDropCapabilities) != 0 + yyq2[3] = len(x.AllowedCapabilities) != 0 + yyq2[4] = len(x.Volumes) != 0 + yyq2[5] = x.HostNetwork != false + yyq2[6] = len(x.HostPorts) != 0 + yyq2[7] = x.HostPID != false + yyq2[8] = x.HostIPC != false + yyq2[13] = x.ReadOnlyRootFilesystem != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(14) + } else { + yynn2 = 4 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.Privileged)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("privileged")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.Privileged)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.DefaultAddCapabilities == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.DefaultAddCapabilities), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultAddCapabilities")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultAddCapabilities == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.DefaultAddCapabilities), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.RequiredDropCapabilities == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.RequiredDropCapabilities), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDropCapabilities")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDropCapabilities == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.RequiredDropCapabilities), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.AllowedCapabilities == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.AllowedCapabilities), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("allowedCapabilities")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AllowedCapabilities == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.AllowedCapabilities), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Volumes == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceFSType(([]FSType)(x.Volumes), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Volumes == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSliceFSType(([]FSType)(x.Volumes), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.HostNetwork)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.HostNetwork)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.HostPorts == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPorts")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.HostPorts == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeBool(bool(x.HostPID)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeBool(bool(x.HostPID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeBool(bool(x.HostIPC)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeBool(bool(x.HostIPC)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy31 := &x.SELinux + yy31.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("seLinux")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy33 := &x.SELinux + yy33.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy36 := &x.RunAsUser + yy36.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy38 := &x.RunAsUser + yy38.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy41 := &x.SupplementalGroups + yy41.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy43 := &x.SupplementalGroups + yy43.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy46 := &x.FSGroup + yy46.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsGroup")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy48 := &x.FSGroup + yy48.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + yym51 := z.EncBinary() + _ = yym51 + if false { + } else { + r.EncodeBool(bool(x.ReadOnlyRootFilesystem)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym52 := z.EncBinary() + _ = yym52 + if false { + } else { + r.EncodeBool(bool(x.ReadOnlyRootFilesystem)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodSecurityPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodSecurityPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "privileged": + if r.TryDecodeAsNil() { + x.Privileged = false + } else { + yyv4 := &x.Privileged + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } + } + case "defaultAddCapabilities": + if r.TryDecodeAsNil() { + x.DefaultAddCapabilities = nil + } else { + yyv6 := &x.DefaultAddCapabilities + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv6), d) + } + } + case "requiredDropCapabilities": + if r.TryDecodeAsNil() { + x.RequiredDropCapabilities = nil + } else { + yyv8 := &x.RequiredDropCapabilities + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv8), d) + } + } + case "allowedCapabilities": + if r.TryDecodeAsNil() { + x.AllowedCapabilities = nil + } else { + yyv10 := &x.AllowedCapabilities + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv10), d) + } + } + case "volumes": + if r.TryDecodeAsNil() { + x.Volumes = nil + } else { + yyv12 := &x.Volumes + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + h.decSliceFSType((*[]FSType)(yyv12), d) + } + } + case "hostNetwork": + if r.TryDecodeAsNil() { + x.HostNetwork = false + } else { + yyv14 := &x.HostNetwork + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "hostPorts": + if r.TryDecodeAsNil() { + x.HostPorts = nil + } else { + yyv16 := &x.HostPorts + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + h.decSliceHostPortRange((*[]HostPortRange)(yyv16), d) + } + } + case "hostPID": + if r.TryDecodeAsNil() { + x.HostPID = false + } else { + yyv18 := &x.HostPID + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*bool)(yyv18)) = r.DecodeBool() + } + } + case "hostIPC": + if r.TryDecodeAsNil() { + x.HostIPC = false + } else { + yyv20 := &x.HostIPC + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*bool)(yyv20)) = r.DecodeBool() + } + } + case "seLinux": + if r.TryDecodeAsNil() { + x.SELinux = SELinuxStrategyOptions{} + } else { + yyv22 := &x.SELinux + yyv22.CodecDecodeSelf(d) + } + case "runAsUser": + if r.TryDecodeAsNil() { + x.RunAsUser = RunAsUserStrategyOptions{} + } else { + yyv23 := &x.RunAsUser + yyv23.CodecDecodeSelf(d) + } + case "supplementalGroups": + if r.TryDecodeAsNil() { + x.SupplementalGroups = SupplementalGroupsStrategyOptions{} + } else { + yyv24 := &x.SupplementalGroups + yyv24.CodecDecodeSelf(d) + } + case "fsGroup": + if r.TryDecodeAsNil() { + x.FSGroup = FSGroupStrategyOptions{} + } else { + yyv25 := &x.FSGroup + yyv25.CodecDecodeSelf(d) + } + case "readOnlyRootFilesystem": + if r.TryDecodeAsNil() { + x.ReadOnlyRootFilesystem = false + } else { + yyv26 := &x.ReadOnlyRootFilesystem + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*bool)(yyv26)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj28 int + var yyb28 bool + var yyhl28 bool = l >= 0 + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Privileged = false + } else { + yyv29 := &x.Privileged + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DefaultAddCapabilities = nil + } else { + yyv31 := &x.DefaultAddCapabilities + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv31), d) + } + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RequiredDropCapabilities = nil + } else { + yyv33 := &x.RequiredDropCapabilities + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv33), d) + } + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AllowedCapabilities = nil + } else { + yyv35 := &x.AllowedCapabilities + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv35), d) + } + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Volumes = nil + } else { + yyv37 := &x.Volumes + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + h.decSliceFSType((*[]FSType)(yyv37), d) + } + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostNetwork = false + } else { + yyv39 := &x.HostNetwork + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*bool)(yyv39)) = r.DecodeBool() + } + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostPorts = nil + } else { + yyv41 := &x.HostPorts + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + h.decSliceHostPortRange((*[]HostPortRange)(yyv41), d) + } + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostPID = false + } else { + yyv43 := &x.HostPID + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*bool)(yyv43)) = r.DecodeBool() + } + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostIPC = false + } else { + yyv45 := &x.HostIPC + yym46 := z.DecBinary() + _ = yym46 + if false { + } else { + *((*bool)(yyv45)) = r.DecodeBool() + } + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SELinux = SELinuxStrategyOptions{} + } else { + yyv47 := &x.SELinux + yyv47.CodecDecodeSelf(d) + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RunAsUser = RunAsUserStrategyOptions{} + } else { + yyv48 := &x.RunAsUser + yyv48.CodecDecodeSelf(d) + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SupplementalGroups = SupplementalGroupsStrategyOptions{} + } else { + yyv49 := &x.SupplementalGroups + yyv49.CodecDecodeSelf(d) + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSGroup = FSGroupStrategyOptions{} + } else { + yyv50 := &x.FSGroup + yyv50.CodecDecodeSelf(d) + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnlyRootFilesystem = false + } else { + yyv51 := &x.ReadOnlyRootFilesystem + yym52 := z.DecBinary() + _ = yym52 + if false { + } else { + *((*bool)(yyv51)) = r.DecodeBool() + } + } + for { + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj28-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x FSType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *FSType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *HostPortRange) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Min)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("min")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Min)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.Max)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("max")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.Max)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HostPortRange) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HostPortRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "min": + if r.TryDecodeAsNil() { + x.Min = 0 + } else { + yyv4 := &x.Min + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "max": + if r.TryDecodeAsNil() { + x.Max = 0 + } else { + yyv6 := &x.Max + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HostPortRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Min = 0 + } else { + yyv9 := &x.Min + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*int32)(yyv9)) = int32(r.DecodeInt(32)) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Max = 0 + } else { + yyv11 := &x.Max + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SELinuxStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.SELinuxOptions != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Rule.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rule")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Rule.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.SELinuxOptions == nil { + r.EncodeNil() + } else { + x.SELinuxOptions.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SELinuxOptions == nil { + r.EncodeNil() + } else { + x.SELinuxOptions.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SELinuxStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SELinuxStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "rule": + if r.TryDecodeAsNil() { + x.Rule = "" + } else { + yyv4 := &x.Rule + yyv4.CodecDecodeSelf(d) + } + case "seLinuxOptions": + if r.TryDecodeAsNil() { + if x.SELinuxOptions != nil { + x.SELinuxOptions = nil + } + } else { + if x.SELinuxOptions == nil { + x.SELinuxOptions = new(pkg4_v1.SELinuxOptions) + } + x.SELinuxOptions.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SELinuxStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rule = "" + } else { + yyv7 := &x.Rule + yyv7.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SELinuxOptions != nil { + x.SELinuxOptions = nil + } + } else { + if x.SELinuxOptions == nil { + x.SELinuxOptions = new(pkg4_v1.SELinuxOptions) + } + x.SELinuxOptions.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x SELinuxStrategy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *SELinuxStrategy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *RunAsUserStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.Ranges) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Rule.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rule")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Rule.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Ranges == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ranges")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ranges == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RunAsUserStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RunAsUserStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "rule": + if r.TryDecodeAsNil() { + x.Rule = "" + } else { + yyv4 := &x.Rule + yyv4.CodecDecodeSelf(d) + } + case "ranges": + if r.TryDecodeAsNil() { + x.Ranges = nil + } else { + yyv5 := &x.Ranges + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceIDRange((*[]IDRange)(yyv5), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RunAsUserStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rule = "" + } else { + yyv8 := &x.Rule + yyv8.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ranges = nil + } else { + yyv9 := &x.Ranges + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceIDRange((*[]IDRange)(yyv9), d) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *IDRange) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Min)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("min")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Min)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.Max)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("max")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.Max)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *IDRange) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *IDRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "min": + if r.TryDecodeAsNil() { + x.Min = 0 + } else { + yyv4 := &x.Min + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } + } + case "max": + if r.TryDecodeAsNil() { + x.Max = 0 + } else { + yyv6 := &x.Max + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int64)(yyv6)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *IDRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Min = 0 + } else { + yyv9 := &x.Min + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*int64)(yyv9)) = int64(r.DecodeInt(64)) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Max = 0 + } else { + yyv11 := &x.Max + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int64)(yyv11)) = int64(r.DecodeInt(64)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x RunAsUserStrategy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *RunAsUserStrategy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *FSGroupStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Rule != "" + yyq2[1] = len(x.Ranges) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Rule.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rule")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Rule.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Ranges == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ranges")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ranges == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *FSGroupStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *FSGroupStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "rule": + if r.TryDecodeAsNil() { + x.Rule = "" + } else { + yyv4 := &x.Rule + yyv4.CodecDecodeSelf(d) + } + case "ranges": + if r.TryDecodeAsNil() { + x.Ranges = nil + } else { + yyv5 := &x.Ranges + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceIDRange((*[]IDRange)(yyv5), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *FSGroupStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rule = "" + } else { + yyv8 := &x.Rule + yyv8.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ranges = nil + } else { + yyv9 := &x.Ranges + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceIDRange((*[]IDRange)(yyv9), d) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x FSGroupStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *FSGroupStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *SupplementalGroupsStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Rule != "" + yyq2[1] = len(x.Ranges) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Rule.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rule")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Rule.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Ranges == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ranges")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ranges == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SupplementalGroupsStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "rule": + if r.TryDecodeAsNil() { + x.Rule = "" + } else { + yyv4 := &x.Rule + yyv4.CodecDecodeSelf(d) + } + case "ranges": + if r.TryDecodeAsNil() { + x.Ranges = nil + } else { + yyv5 := &x.Ranges + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceIDRange((*[]IDRange)(yyv5), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rule = "" + } else { + yyv8 := &x.Rule + yyv8.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ranges = nil + } else { + yyv9 := &x.Ranges + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceIDRange((*[]IDRange)(yyv9), d) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x SupplementalGroupsStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *SupplementalGroupsStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodSecurityPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodSecurityPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodSecurityPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NetworkPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NetworkPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NetworkPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = NetworkPolicySpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NetworkPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = NetworkPolicySpec{} + } else { + yyv18 := &x.Spec + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NetworkPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.Ingress) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.PodSelector + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.PodSelector + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Ingress == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ingress")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ingress == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NetworkPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NetworkPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "podSelector": + if r.TryDecodeAsNil() { + x.PodSelector = pkg1_v1.LabelSelector{} + } else { + yyv4 := &x.PodSelector + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "ingress": + if r.TryDecodeAsNil() { + x.Ingress = nil + } else { + yyv6 := &x.Ingress + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NetworkPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodSelector = pkg1_v1.LabelSelector{} + } else { + yyv9 := &x.PodSelector + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else { + z.DecFallback(yyv9, false) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ingress = nil + } else { + yyv11 := &x.Ingress + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NetworkPolicyIngressRule) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Ports) != 0 + yyq2[1] = len(x.From) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Ports == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ports")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ports == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.From == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("from")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.From == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NetworkPolicyIngressRule) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NetworkPolicyIngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "ports": + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv4 := &x.Ports + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv4), d) + } + } + case "from": + if r.TryDecodeAsNil() { + x.From = nil + } else { + yyv6 := &x.From + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NetworkPolicyIngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv9 := &x.Ports + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.From = nil + } else { + yyv11 := &x.From + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NetworkPolicyPort) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Protocol != nil + yyq2[1] = x.Port != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Protocol == nil { + r.EncodeNil() + } else { + yy4 := *x.Protocol + yysf5 := &yy4 + yysf5.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("protocol")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Protocol == nil { + r.EncodeNil() + } else { + yy6 := *x.Protocol + yysf7 := &yy6 + yysf7.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Port == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.Port) { + } else if !yym9 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Port) + } else { + z.EncFallback(x.Port) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("port")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Port == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.Port) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Port) + } else { + z.EncFallback(x.Port) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NetworkPolicyPort) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NetworkPolicyPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "protocol": + if r.TryDecodeAsNil() { + if x.Protocol != nil { + x.Protocol = nil + } + } else { + if x.Protocol == nil { + x.Protocol = new(pkg4_v1.Protocol) + } + x.Protocol.CodecDecodeSelf(d) + } + case "port": + if r.TryDecodeAsNil() { + if x.Port != nil { + x.Port = nil + } + } else { + if x.Port == nil { + x.Port = new(pkg5_intstr.IntOrString) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else if z.HasExtensions() && z.DecExt(x.Port) { + } else if !yym6 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Port) + } else { + z.DecFallback(x.Port, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NetworkPolicyPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Protocol != nil { + x.Protocol = nil + } + } else { + if x.Protocol == nil { + x.Protocol = new(pkg4_v1.Protocol) + } + x.Protocol.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Port != nil { + x.Port = nil + } + } else { + if x.Port == nil { + x.Port = new(pkg5_intstr.IntOrString) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(x.Port) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Port) + } else { + z.DecFallback(x.Port, false) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NetworkPolicyPeer) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.PodSelector != nil + yyq2[1] = x.NamespaceSelector != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.PodSelector == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.PodSelector) { + } else { + z.EncFallback(x.PodSelector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodSelector == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.PodSelector) { + } else { + z.EncFallback(x.PodSelector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.NamespaceSelector == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.NamespaceSelector) { + } else { + z.EncFallback(x.NamespaceSelector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespaceSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NamespaceSelector == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.NamespaceSelector) { + } else { + z.EncFallback(x.NamespaceSelector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NetworkPolicyPeer) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NetworkPolicyPeer) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "podSelector": + if r.TryDecodeAsNil() { + if x.PodSelector != nil { + x.PodSelector = nil + } + } else { + if x.PodSelector == nil { + x.PodSelector = new(pkg1_v1.LabelSelector) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.PodSelector) { + } else { + z.DecFallback(x.PodSelector, false) + } + } + case "namespaceSelector": + if r.TryDecodeAsNil() { + if x.NamespaceSelector != nil { + x.NamespaceSelector = nil + } + } else { + if x.NamespaceSelector == nil { + x.NamespaceSelector = new(pkg1_v1.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.NamespaceSelector) { + } else { + z.DecFallback(x.NamespaceSelector, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NetworkPolicyPeer) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodSelector != nil { + x.PodSelector = nil + } + } else { + if x.PodSelector == nil { + x.PodSelector = new(pkg1_v1.LabelSelector) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(x.PodSelector) { + } else { + z.DecFallback(x.PodSelector, false) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NamespaceSelector != nil { + x.NamespaceSelector = nil + } + } else { + if x.NamespaceSelector == nil { + x.NamespaceSelector = new(pkg1_v1.LabelSelector) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.NamespaceSelector) { + } else { + z.DecFallback(x.NamespaceSelector, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NetworkPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NetworkPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NetworkPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NetworkPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceCustomMetricTarget(v []CustomMetricTarget, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []CustomMetricTarget{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]CustomMetricTarget, yyrl1) + } + } else { + yyv1 = make([]CustomMetricTarget, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricTarget{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, CustomMetricTarget{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricTarget{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, CustomMetricTarget{}) // var yyz1 CustomMetricTarget + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricTarget{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []CustomMetricTarget{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceCustomMetricCurrentStatus(v []CustomMetricCurrentStatus, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurrentStatus, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []CustomMetricCurrentStatus{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]CustomMetricCurrentStatus, yyrl1) + } + } else { + yyv1 = make([]CustomMetricCurrentStatus, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricCurrentStatus{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, CustomMetricCurrentStatus{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricCurrentStatus{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, CustomMetricCurrentStatus{}) // var yyz1 CustomMetricCurrentStatus + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricCurrentStatus{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []CustomMetricCurrentStatus{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceAPIVersion(v []APIVersion, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []APIVersion{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]APIVersion, yyrl1) + } + } else { + yyv1 = make([]APIVersion, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = APIVersion{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, APIVersion{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = APIVersion{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, APIVersion{}) // var yyz1 APIVersion + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = APIVersion{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []APIVersion{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceThirdPartyResource(v []ThirdPartyResource, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ThirdPartyResource{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ThirdPartyResource, yyrl1) + } + } else { + yyv1 = make([]ThirdPartyResource, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ThirdPartyResource{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ThirdPartyResource{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ThirdPartyResource{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ThirdPartyResource{}) // var yyz1 ThirdPartyResource + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ThirdPartyResource{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ThirdPartyResource{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceDeploymentCondition(v []DeploymentCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceDeploymentCondition(v *[]DeploymentCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []DeploymentCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]DeploymentCondition, yyrl1) + } + } else { + yyv1 = make([]DeploymentCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DeploymentCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, DeploymentCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DeploymentCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, DeploymentCondition{}) // var yyz1 DeploymentCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = DeploymentCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []DeploymentCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Deployment{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 920) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Deployment, yyrl1) + } + } else { + yyv1 = make([]Deployment, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Deployment{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Deployment{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Deployment{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Deployment{}) // var yyz1 Deployment + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Deployment{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Deployment{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceDaemonSet(v []DaemonSet, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []DaemonSet{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 872) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]DaemonSet, yyrl1) + } + } else { + yyv1 = make([]DaemonSet, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DaemonSet{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, DaemonSet{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DaemonSet{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, DaemonSet{}) // var yyz1 DaemonSet + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = DaemonSet{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []DaemonSet{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceThirdPartyResourceData(v []ThirdPartyResourceData, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceData, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ThirdPartyResourceData{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ThirdPartyResourceData, yyrl1) + } + } else { + yyv1 = make([]ThirdPartyResourceData, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ThirdPartyResourceData{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ThirdPartyResourceData{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ThirdPartyResourceData{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ThirdPartyResourceData{}) // var yyz1 ThirdPartyResourceData + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ThirdPartyResourceData{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ThirdPartyResourceData{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceIngress(v []Ingress, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Ingress{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 336) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Ingress, yyrl1) + } + } else { + yyv1 = make([]Ingress, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Ingress{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Ingress{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Ingress{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Ingress{}) // var yyz1 Ingress + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Ingress{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Ingress{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceIngressTLS(v []IngressTLS, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []IngressTLS{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]IngressTLS, yyrl1) + } + } else { + yyv1 = make([]IngressTLS, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = IngressTLS{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, IngressTLS{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = IngressTLS{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, IngressTLS{}) // var yyz1 IngressTLS + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = IngressTLS{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []IngressTLS{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceIngressRule(v []IngressRule, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []IngressRule{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]IngressRule, yyrl1) + } + } else { + yyv1 = make([]IngressRule, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = IngressRule{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, IngressRule{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = IngressRule{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, IngressRule{}) // var yyz1 IngressRule + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = IngressRule{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []IngressRule{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceHTTPIngressPath(v []HTTPIngressPath, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []HTTPIngressPath{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]HTTPIngressPath, yyrl1) + } + } else { + yyv1 = make([]HTTPIngressPath, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HTTPIngressPath{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, HTTPIngressPath{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HTTPIngressPath{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, HTTPIngressPath{}) // var yyz1 HTTPIngressPath + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = HTTPIngressPath{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []HTTPIngressPath{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceReplicaSet(v []ReplicaSet, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ReplicaSet{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 856) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ReplicaSet, yyrl1) + } + } else { + yyv1 = make([]ReplicaSet, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicaSet{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ReplicaSet{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicaSet{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ReplicaSet{}) // var yyz1 ReplicaSet + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicaSet{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ReplicaSet{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceReplicaSetCondition(v []ReplicaSetCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceReplicaSetCondition(v *[]ReplicaSetCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ReplicaSetCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 88) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ReplicaSetCondition, yyrl1) + } + } else { + yyv1 = make([]ReplicaSetCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicaSetCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ReplicaSetCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicaSetCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ReplicaSetCondition{}) // var yyz1 ReplicaSetCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicaSetCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ReplicaSetCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicev1_Capability(v []pkg4_v1.Capability, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf2 := &yyv1 + yysf2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicev1_Capability(v *[]pkg4_v1.Capability, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg4_v1.Capability{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]pkg4_v1.Capability, yyrl1) + } + } else { + yyv1 = make([]pkg4_v1.Capability, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 pkg4_v1.Capability + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg4_v1.Capability{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceFSType(v []FSType, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []FSType{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]FSType, yyrl1) + } + } else { + yyv1 = make([]FSType, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 FSType + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []FSType{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceHostPortRange(v []HostPortRange, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []HostPortRange{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]HostPortRange, yyrl1) + } + } else { + yyv1 = make([]HostPortRange, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HostPortRange{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, HostPortRange{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HostPortRange{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, HostPortRange{}) // var yyz1 HostPortRange + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = HostPortRange{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []HostPortRange{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceIDRange(v []IDRange, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []IDRange{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]IDRange, yyrl1) + } + } else { + yyv1 = make([]IDRange, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = IDRange{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, IDRange{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = IDRange{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, IDRange{}) // var yyz1 IDRange + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = IDRange{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []IDRange{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePodSecurityPolicy(v []PodSecurityPolicy, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodSecurityPolicy{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 552) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PodSecurityPolicy, yyrl1) + } + } else { + yyv1 = make([]PodSecurityPolicy, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodSecurityPolicy{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodSecurityPolicy{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodSecurityPolicy{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodSecurityPolicy{}) // var yyz1 PodSecurityPolicy + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodSecurityPolicy{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodSecurityPolicy{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNetworkPolicyIngressRule(v []NetworkPolicyIngressRule, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNetworkPolicyIngressRule(v *[]NetworkPolicyIngressRule, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NetworkPolicyIngressRule{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NetworkPolicyIngressRule, yyrl1) + } + } else { + yyv1 = make([]NetworkPolicyIngressRule, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicyIngressRule{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NetworkPolicyIngressRule{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicyIngressRule{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NetworkPolicyIngressRule{}) // var yyz1 NetworkPolicyIngressRule + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicyIngressRule{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NetworkPolicyIngressRule{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNetworkPolicyPort(v []NetworkPolicyPort, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNetworkPolicyPort(v *[]NetworkPolicyPort, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NetworkPolicyPort{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NetworkPolicyPort, yyrl1) + } + } else { + yyv1 = make([]NetworkPolicyPort, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicyPort{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NetworkPolicyPort{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicyPort{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NetworkPolicyPort{}) // var yyz1 NetworkPolicyPort + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicyPort{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NetworkPolicyPort{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNetworkPolicyPeer(v []NetworkPolicyPeer, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNetworkPolicyPeer(v *[]NetworkPolicyPeer, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NetworkPolicyPeer{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NetworkPolicyPeer, yyrl1) + } + } else { + yyv1 = make([]NetworkPolicyPeer, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicyPeer{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NetworkPolicyPeer{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicyPeer{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NetworkPolicyPeer{}) // var yyz1 NetworkPolicyPeer + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicyPeer{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NetworkPolicyPeer{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNetworkPolicy(v []NetworkPolicy, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NetworkPolicy{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 312) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NetworkPolicy, yyrl1) + } + } else { + yyv1 = make([]NetworkPolicy, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicy{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NetworkPolicy{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicy{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NetworkPolicy{}) // var yyz1 NetworkPolicy + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicy{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NetworkPolicy{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.go new file mode 100644 index 0000000000..43dd2a9b02 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.go @@ -0,0 +1,1147 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/pkg/api/v1" +) + +// describes the attributes of a scale subresource +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` +} + +// represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` + + // label selector for pods that should match the replicas count. This is a serializated + // version of both map-based and more expressive set-based selectors. This is done to + // avoid introspection in the clients. The string will be in the same format as the + // query-param syntax. If the target type only supports map-based selectors, both this + // field and map-based selector field are populated. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` +} + +// +genclient=true +// +noMethods=true + +// represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// Dummy definition +type ReplicationControllerDummy struct { + metav1.TypeMeta `json:",inline"` +} + +// Alpha-level support for Custom Metrics in HPA (as annotations). +type CustomMetricTarget struct { + // Custom Metric name. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Custom Metric value (average). + TargetValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` +} + +type CustomMetricTargetList struct { + Items []CustomMetricTarget `json:"items" protobuf:"bytes,1,rep,name=items"` +} + +type CustomMetricCurrentStatus struct { + // Custom Metric name. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Custom Metric value (average). + CurrentValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` +} + +type CustomMetricCurrentStatusList struct { + Items []CustomMetricCurrentStatus `json:"items" protobuf:"bytes,1,rep,name=items"` +} + +// +genclient=true +// +nonNamespaced=true + +// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource +// types to the API. It consists of one or more Versions of the api. +type ThirdPartyResource struct { + metav1.TypeMeta `json:",inline"` + + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Description is the description of this object. + // +optional + Description string `json:"description,omitempty" protobuf:"bytes,2,opt,name=description"` + + // Versions are versions for this third party object + // +optional + Versions []APIVersion `json:"versions,omitempty" protobuf:"bytes,3,rep,name=versions"` +} + +// ThirdPartyResourceList is a list of ThirdPartyResources. +type ThirdPartyResourceList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ThirdPartyResources. + Items []ThirdPartyResource `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// An APIVersion represents a single concrete version of an object model. +type APIVersion struct { + // Name of this version (e.g. 'v1'). + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` +} + +// An internal object, used for versioned storage in etcd. Not exposed to the end user. +type ThirdPartyResourceData struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Data is the raw JSON data for this data. + // +optional + Data []byte `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` +} + +// +genclient=true + +// Deployment enables declarative updates for Pods and ReplicaSets. +type Deployment struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // Template describes the pods that will be created. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` + + // Indicates that the deployment is paused and will not be processed by the + // deployment controller. + // +optional + Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` + + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"` + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Once autoRollback is + // implemented, the deployment controller will automatically rollback failed + // deployments. Note that progress will not be estimated during the time a + // deployment is paused. This is not set by default. + ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` +} + +// DeploymentRollback stores the information required to rollback a deployment. +type DeploymentRollback struct { + metav1.TypeMeta `json:",inline"` + // Required: This must match the Name of a deployment. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // The annotations to be updated to a deployment + // +optional + UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"` + // The config of this deployment rollback. + RollbackTo RollbackConfig `json:"rollbackTo" protobuf:"bytes,3,opt,name=rollbackTo"` +} + +type RollbackConfig struct { + // The revision to rollback to. If set to 0, rollbck to the last revision. + // +optional + Revision int64 `json:"revision,omitempty" protobuf:"varint,1,opt,name=revision"` +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing RCs (and label key that is added to its pods) to prevent the existing RCs + // to select new pods (and old pods being select by new RC). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +// DeploymentStrategy describes how to replace existing pods with new ones. +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // By default, a fixed value of 1 is used. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // By default, a value of 1 is used. + // Example: when this is set to 30%, the new RC can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of desired pods. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` +} + +// DeploymentStatus is the most recently observed status of the Deployment. +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` + + // Total number of unavailable pods targeted by this deployment. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` + + // Represents the latest available observations of a deployment's current state. + Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// DeploymentList is a list of Deployments. +type DeploymentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Deployments. + Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +type DaemonSetUpdateStrategy struct { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". + // Default is OnDelete. + // +optional + Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + + // Rolling update config params. Present only if type = "RollingUpdate". + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as DeploymentStrategy.RollingUpdate. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DaemonSetUpdateStrategyType string + +const ( + // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. + RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" + + // Replace the old daemons only when it's killed + OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" +) + +// Spec to control the desired behavior of daemon set rolling update. +type RollingUpdateDaemonSet struct { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` +} + +// DaemonSetSpec is the specification of a daemon set. +type DaemonSetSpec struct { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // If empty, defaulted to labels on Pod template. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"` + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // A sequence number representing a specific generation of the template. + // Populated by the system. It can be set only during the creation. + // +optional + TemplateGeneration int64 `json:"templateGeneration,omitempty" protobuf:"varint,5,opt,name=templateGeneration"` +} + +// DaemonSetStatus represents the current status of a daemon set. +type DaemonSetStatus struct { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"` + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"` + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"` + + // The most recent generation observed by the daemon set controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"` + + // The total number of nodes that are running updated daemon pod + // +optional + UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"` + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"` + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"` +} + +// +genclient=true + +// DaemonSet represents the configuration of a daemon set. +type DaemonSet struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The desired behavior of this daemon set. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +const ( + // DaemonSetTemplateGenerationKey is the key of the labels that is added + // to daemon set pods to distinguish between old and new pod templates + // during DaemonSet template update. + DaemonSetTemplateGenerationKey string = "pod-template-generation" +) + +// DaemonSetList is a collection of daemon sets. +type DaemonSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // A list of daemon sets. + Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. +type ThirdPartyResourceDataList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ThirdpartyResourceData. + Items []ThirdPartyResourceData `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// Ingress is a collection of rules that allow inbound connections to reach the +// endpoints defined by a backend. An Ingress can be configured to give services +// externally-reachable urls, load balance traffic, terminate SSL, offer name +// based virtual hosting etc. +type Ingress struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is the desired state of the Ingress. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the current state of the Ingress. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// IngressList is a collection of Ingress. +type IngressList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Ingress. + Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// IngressSpec describes the Ingress the user wishes to exist. +type IngressSpec struct { + // A default backend capable of servicing requests that don't match any + // rule. At least one of 'backend' or 'rules' must be specified. This field + // is optional to allow the loadbalancer controller or defaulting logic to + // specify a global default. + // +optional + Backend *IngressBackend `json:"backend,omitempty" protobuf:"bytes,1,opt,name=backend"` + + // TLS configuration. Currently the Ingress only supports a single TLS + // port, 443. If multiple members of this list specify different hosts, they + // will be multiplexed on the same port according to the hostname specified + // through the SNI TLS extension, if the ingress controller fulfilling the + // ingress supports SNI. + // +optional + TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` + + // A list of host rules used to configure the Ingress. If unspecified, or + // no rule matches, all traffic is sent to the default backend. + // +optional + Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + // TODO: Add the ability to specify load-balancer IP through claims +} + +// IngressTLS describes the transport layer security associated with an Ingress. +type IngressTLS struct { + // Hosts are a list of hosts included in the TLS certificate. The values in + // this list must match the name/s used in the tlsSecret. Defaults to the + // wildcard host setting for the loadbalancer controller fulfilling this + // Ingress, if left unspecified. + // +optional + Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` + // SecretName is the name of the secret used to terminate SSL traffic on 443. + // Field is left optional to allow SSL routing based on SNI hostname alone. + // If the SNI host in a listener conflicts with the "Host" header field used + // by an IngressRule, the SNI host is used for termination and value of the + // Host header is used for routing. + // +optional + SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"` + // TODO: Consider specifying different modes of termination, protocols etc. +} + +// IngressStatus describe the current state of the Ingress. +type IngressStatus struct { + // LoadBalancer contains the current status of the load-balancer. + // +optional + LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` +} + +// IngressRule represents the rules mapping the paths under a specified host to +// the related backend services. Incoming requests are first evaluated for a host +// match, then routed to the backend associated with the matching IngressRuleValue. +type IngressRule struct { + // Host is the fully qualified domain name of a network host, as defined + // by RFC 3986. Note the following deviations from the "host" part of the + // URI as defined in the RFC: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the + // IP in the Spec of the parent Ingress. + // 2. The `:` delimiter is not respected because ports are not allowed. + // Currently the port of an Ingress is implicitly :80 for http and + // :443 for https. + // Both these may change in the future. + // Incoming requests are matched against the host before the IngressRuleValue. + // If the host is unspecified, the Ingress routes all traffic based on the + // specified IngressRuleValue. + // +optional + Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` + // IngressRuleValue represents a rule to route requests for this IngressRule. + // If unspecified, the rule defaults to a http catch-all. Whether that sends + // just traffic matching the host to the default backend or all traffic to the + // default backend, is left to the controller fulfilling the Ingress. Http is + // currently the only supported IngressRuleValue. + // +optional + IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` +} + +// IngressRuleValue represents a rule to apply against incoming requests. If the +// rule is satisfied, the request is routed to the specified backend. Currently +// mixing different types of rules in a single Ingress is disallowed, so exactly +// one of the following must be set. +type IngressRuleValue struct { + //TODO: + // 1. Consider renaming this resource and the associated rules so they + // aren't tied to Ingress. They can be used to route intra-cluster traffic. + // 2. Consider adding fields for ingress-type specific global options + // usable by a loadbalancer, like http keep-alive. + + // +optional + HTTP *HTTPIngressRuleValue `json:"http,omitempty" protobuf:"bytes,1,opt,name=http"` +} + +// HTTPIngressRuleValue is a list of http selectors pointing to backends. +// In the example: http:///? -> backend where +// where parts of the url correspond to RFC 3986, this resource will be used +// to match against everything after the last '/' and before the first '?' +// or '#'. +type HTTPIngressRuleValue struct { + // A collection of paths that map requests to backends. + Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` + // TODO: Consider adding fields for ingress-type specific global + // options usable by a loadbalancer, like http keep-alive. +} + +// HTTPIngressPath associates a path regex with a backend. Incoming urls matching +// the path are forwarded to the backend. +type HTTPIngressPath struct { + // Path is an extended POSIX regex as defined by IEEE Std 1003.1, + // (i.e this follows the egrep/unix syntax, not the perl syntax) + // matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" + // part of a URL as defined by RFC 3986. Paths must begin with + // a '/'. If unspecified, the path defaults to a catch all sending + // traffic to the backend. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` + + // Backend defines the referenced service endpoint to which the traffic + // will be forwarded to. + Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` +} + +// IngressBackend describes all endpoints for a given service and port. +type IngressBackend struct { + // Specifies the name of the referenced service. + ServiceName string `json:"serviceName" protobuf:"bytes,1,opt,name=serviceName"` + + // Specifies the port of the referenced service. + ServicePort intstr.IntOrString `json:"servicePort" protobuf:"bytes,2,opt,name=servicePort"` +} + +// +genclient=true + +// ReplicaSet represents the configuration of a ReplicaSet. +type ReplicaSet struct { + metav1.TypeMeta `json:",inline"` + + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ReplicaSetList is a collection of ReplicaSets. +type ReplicaSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of ReplicaSets. + // More info: http://kubernetes.io/docs/user-guide/replication-controller + Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +type ReplicaSetSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // Selector is a label query over pods that should match the replica count. + // If the selector is empty, it is defaulted to the labels present on the pod template. + // Label keys and values that must match in order to be controlled by this replica set. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + // +optional + Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +type ReplicaSetStatus struct { + // Replicas is the most recently oberved number of replicas. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` + + // The number of ready replicas for this replica set. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` + + // Represents the latest available observations of a replica set's current state. + // +optional + Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` +} + +type ReplicaSetConditionType string + +// These are valid conditions of a replica set. +const ( + // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created + // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted + // due to kubelet being down or finalizers are failing. + ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" +) + +// ReplicaSetCondition describes the state of a replica set at a certain point. +type ReplicaSetCondition struct { + // Type of replica set condition. + Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient=true +// +nonNamespaced=true + +// Pod Security Policy governs the ability to make requests that affect the Security Context +// that will be applied to a pod and container. +type PodSecurityPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec defines the policy enforced. + // +optional + Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// Pod Security Policy Spec defines the policy enforced. +type PodSecurityPolicySpec struct { + // privileged determines if a pod can request to be run as privileged. + // +optional + Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` + // DefaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capabiility in both + // DefaultAddCapabilities and RequiredDropCapabilities. + // +optional + DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/kubernetes/pkg/api/v1.Capability"` + // RequiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + // +optional + RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/kubernetes/pkg/api/v1.Capability"` + // AllowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field may be added at the pod author's discretion. + // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. + // +optional + AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/kubernetes/pkg/api/v1.Capability"` + // volumes is a white list of allowed volume plugins. Empty indicates that all plugins + // may be used. + // +optional + Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` + // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + // +optional + HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"` + // hostPorts determines which host port ranges are allowed to be exposed. + // +optional + HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"` + // hostPID determines if the policy allows the use of HostPID in the pod spec. + // +optional + HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"` + // hostIPC determines if the policy allows the use of HostIPC in the pod spec. + // +optional + HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"` + // seLinux is the strategy that will dictate the allowable labels that may be set. + SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` + // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. + RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` + // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` + // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` + // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the PSP should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + // +optional + ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` +} + +// FS Type gives strong typing to different file systems that are used by volumes. +type FSType string + +var ( + AzureFile FSType = "azureFile" + Flocker FSType = "flocker" + FlexVolume FSType = "flexVolume" + HostPath FSType = "hostPath" + EmptyDir FSType = "emptyDir" + GCEPersistentDisk FSType = "gcePersistentDisk" + AWSElasticBlockStore FSType = "awsElasticBlockStore" + GitRepo FSType = "gitRepo" + Secret FSType = "secret" + NFS FSType = "nfs" + ISCSI FSType = "iscsi" + Glusterfs FSType = "glusterfs" + PersistentVolumeClaim FSType = "persistentVolumeClaim" + RBD FSType = "rbd" + Cinder FSType = "cinder" + CephFS FSType = "cephFS" + DownwardAPI FSType = "downwardAPI" + FC FSType = "fc" + ConfigMap FSType = "configMap" + Quobyte FSType = "quobyte" + AzureDisk FSType = "azureDisk" + All FSType = "*" +) + +// Host Port Range defines a range of host ports that will be enabled by a policy +// for pods to use. It requires both the start and end to be defined. +type HostPortRange struct { + // min is the start of the range, inclusive. + Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` + // max is the end of the range, inclusive. + Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` +} + +// SELinux Strategy Options defines the strategy type and any options used to create the strategy. +type SELinuxStrategyOptions struct { + // type is the strategy that will dictate the allowable labels that may be set. + Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` + // seLinuxOptions required to run as; required for MustRunAs + // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context + // +optional + SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` +} + +// SELinuxStrategy denotes strategy types for generating SELinux options for a +// Security Context. +type SELinuxStrategy string + +const ( + // container must have SELinux labels of X applied. + SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" + // container may make requests for any SELinux context labels. + SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" +) + +// Run A sUser Strategy Options defines the strategy type and any options used to create the strategy. +type RunAsUserStrategyOptions struct { + // Rule is the strategy that will dictate the allowable RunAsUser values that may be set. + Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` + // Ranges are the allowed ranges of uids that may be used. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// ID Range provides a min/max of an allowed range of IDs. +type IDRange struct { + // Min is the start of the range, inclusive. + Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` + // Max is the end of the range, inclusive. + Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` +} + +// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a +// Security Context. +type RunAsUserStrategy string + +const ( + // container must run as a particular uid. + RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" + // container must run as a non-root uid + RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" + // container may make requests for any uid. + RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" +) + +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +type FSGroupStrategyOptions struct { + // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + // +optional + Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` + // Ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// FSGroupStrategyType denotes strategy types for generating FSGroup values for a +// SecurityContext +type FSGroupStrategyType string + +const ( + // container must have FSGroup of X applied. + FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" + // container may make requests for any FSGroup labels. + FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" +) + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +type SupplementalGroupsStrategyOptions struct { + // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // +optional + Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` + // Ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental +// groups for a SecurityContext. +type SupplementalGroupsStrategyType string + +const ( + // container must run as a particular gid. + SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" + // container may make requests for any gid. + SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" +) + +// Pod Security Policy List is a list of PodSecurityPolicy objects. +type PodSecurityPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +type NetworkPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior for this NetworkPolicy. + // +optional + Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +type NetworkPolicySpec struct { + // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules + // is applied to any pods selected by this field. Multiple network policies can select the + // same set of pods. In this case, the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. + PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` + + // List of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, + // OR if the traffic source is the pod's local node, + // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy + // objects whose podSelector matches the pod. + // If this field is empty then this NetworkPolicy does not affect ingress isolation. + // If this field is present and contains at least one rule, this policy allows any traffic + // which matches at least one of the ingress rules in this list. + // +optional + Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` +} + +// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. +type NetworkPolicyIngressRule struct { + // List of ports which should be made accessible on the pods selected for this rule. + // Each item in this list is combined using a logical OR. + // If this field is not provided, this rule matches all ports (traffic not restricted by port). + // If this field is empty, this rule matches no ports (no traffic matches). + // If this field is present and contains at least one item, then this rule allows traffic + // only if the traffic matches at least one port in the list. + // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. + // +optional + Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` + + // List of sources which should be able to access the pods selected for this rule. + // Items in this list are combined using a logical OR operation. + // If this field is not provided, this rule matches all sources (traffic not restricted by source). + // If this field is empty, this rule matches no sources (no traffic matches). + // If this field is present and contains at least on item, this rule allows traffic only if the + // traffic matches at least one item in the from list. + // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. + // +optional + From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` +} + +type NetworkPolicyPort struct { + // Optional. The protocol (TCP or UDP) which traffic must match. + // If not specified, this field defaults to TCP. + // +optional + Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/kubernetes/pkg/api/v1.Protocol"` + + // If specified, the port on the given protocol. This can + // either be a numerical or named port on a pod. If this field is not provided, + // this matches all port names and numbers. + // If present, only traffic on the specified protocol AND port + // will be matched. + // +optional + Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` +} + +type NetworkPolicyPeer struct { + // Exactly one of the following must be specified. + + // This is a label selector which selects Pods in this namespace. + // This field follows standard label selector semantics. + // If not provided, this selector selects no pods. + // If present but empty, this selector selects all pods in this namespace. + // +optional + PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` + + // Selects Namespaces using cluster scoped-labels. This + // matches all pods in all namespaces selected by this label selector. + // This field follows standard label selector semantics. + // If omitted, this selector selects no namespaces. + // If present but empty, this selector selects all namespaces. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` +} + +// Network Policy List is a list of NetworkPolicy objects. +type NetworkPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..587615590e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,627 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_APIVersion = map[string]string{ + "": "An APIVersion represents a single concrete version of an object model.", + "name": "Name of this version (e.g. 'v1').", +} + +func (APIVersion) SwaggerDoc() map[string]string { + return map_APIVersion +} + +var map_CustomMetricCurrentStatus = map[string]string{ + "name": "Custom Metric name.", + "value": "Custom Metric value (average).", +} + +func (CustomMetricCurrentStatus) SwaggerDoc() map[string]string { + return map_CustomMetricCurrentStatus +} + +var map_CustomMetricTarget = map[string]string{ + "": "Alpha-level support for Custom Metrics in HPA (as annotations).", + "name": "Custom Metric name.", + "value": "Custom Metric value (average).", +} + +func (CustomMetricTarget) SwaggerDoc() map[string]string { + return map_CustomMetricTarget +} + +var map_DaemonSet = map[string]string{ + "": "DaemonSet represents the configuration of a daemon set.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (DaemonSet) SwaggerDoc() map[string]string { + return map_DaemonSet +} + +var map_DaemonSetList = map[string]string{ + "": "DaemonSetList is a collection of daemon sets.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "A list of daemon sets.", +} + +func (DaemonSetList) SwaggerDoc() map[string]string { + return map_DaemonSetList +} + +var map_DaemonSetSpec = map[string]string{ + "": "DaemonSetSpec is the specification of a daemon set.", + "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template", + "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", + "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", + "templateGeneration": "A sequence number representing a specific generation of the template. Populated by the system. It can be set only during the creation.", +} + +func (DaemonSetSpec) SwaggerDoc() map[string]string { + return map_DaemonSetSpec +} + +var map_DaemonSetStatus = map[string]string{ + "": "DaemonSetStatus represents the current status of a daemon set.", + "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", + "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", + "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", + "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", + "observedGeneration": "The most recent generation observed by the daemon set controller.", + "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod", + "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", +} + +func (DaemonSetStatus) SwaggerDoc() map[string]string { + return map_DaemonSetStatus +} + +var map_DaemonSetUpdateStrategy = map[string]string{ + "type": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is OnDelete.", + "rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".", +} + +func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string { + return map_DaemonSetUpdateStrategy +} + +var map_Deployment = map[string]string{ + "": "Deployment enables declarative updates for Pods and ReplicaSets.", + "metadata": "Standard object metadata.", + "spec": "Specification of the desired behavior of the Deployment.", + "status": "Most recently observed status of the Deployment.", +} + +func (Deployment) SwaggerDoc() map[string]string { + return map_Deployment +} + +var map_DeploymentCondition = map[string]string{ + "": "DeploymentCondition describes the state of a deployment at a certain point.", + "type": "Type of deployment condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DeploymentCondition) SwaggerDoc() map[string]string { + return map_DeploymentCondition +} + +var map_DeploymentList = map[string]string{ + "": "DeploymentList is a list of Deployments.", + "metadata": "Standard list metadata.", + "items": "Items is the list of Deployments.", +} + +func (DeploymentList) SwaggerDoc() map[string]string { + return map_DeploymentList +} + +var map_DeploymentRollback = map[string]string{ + "": "DeploymentRollback stores the information required to rollback a deployment.", + "name": "Required: This must match the Name of a deployment.", + "updatedAnnotations": "The annotations to be updated to a deployment", + "rollbackTo": "The config of this deployment rollback.", +} + +func (DeploymentRollback) SwaggerDoc() map[string]string { + return map_DeploymentRollback +} + +var map_DeploymentSpec = map[string]string{ + "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", + "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", + "template": "Template describes the pods that will be created.", + "strategy": "The deployment strategy to use to replace existing pods with new ones.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified.", + "paused": "Indicates that the deployment is paused and will not be processed by the deployment controller.", + "rollbackTo": "The config this deployment is rolling back to. Will be cleared after rollback is done.", + "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Once autoRollback is implemented, the deployment controller will automatically rollback failed deployments. Note that progress will not be estimated during the time a deployment is paused. This is not set by default.", +} + +func (DeploymentSpec) SwaggerDoc() map[string]string { + return map_DeploymentSpec +} + +var map_DeploymentStatus = map[string]string{ + "": "DeploymentStatus is the most recently observed status of the Deployment.", + "observedGeneration": "The generation observed by the deployment controller.", + "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "readyReplicas": "Total number of ready pods targeted by this deployment.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + "unavailableReplicas": "Total number of unavailable pods targeted by this deployment.", + "conditions": "Represents the latest available observations of a deployment's current state.", +} + +func (DeploymentStatus) SwaggerDoc() map[string]string { + return map_DeploymentStatus +} + +var map_DeploymentStrategy = map[string]string{ + "": "DeploymentStrategy describes how to replace existing pods with new ones.", + "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", +} + +func (DeploymentStrategy) SwaggerDoc() map[string]string { + return map_DeploymentStrategy +} + +var map_FSGroupStrategyOptions = map[string]string{ + "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.", + "rule": "Rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", + "ranges": "Ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end.", +} + +func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { + return map_FSGroupStrategyOptions +} + +var map_HTTPIngressPath = map[string]string{ + "": "HTTPIngressPath associates a path regex with a backend. Incoming urls matching the path are forwarded to the backend.", + "path": "Path is an extended POSIX regex as defined by IEEE Std 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. If unspecified, the path defaults to a catch all sending traffic to the backend.", + "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", +} + +func (HTTPIngressPath) SwaggerDoc() map[string]string { + return map_HTTPIngressPath +} + +var map_HTTPIngressRuleValue = map[string]string{ + "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", + "paths": "A collection of paths that map requests to backends.", +} + +func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { + return map_HTTPIngressRuleValue +} + +var map_HostPortRange = map[string]string{ + "": "Host Port Range defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined.", + "min": "min is the start of the range, inclusive.", + "max": "max is the end of the range, inclusive.", +} + +func (HostPortRange) SwaggerDoc() map[string]string { + return map_HostPortRange +} + +var map_IDRange = map[string]string{ + "": "ID Range provides a min/max of an allowed range of IDs.", + "min": "Min is the start of the range, inclusive.", + "max": "Max is the end of the range, inclusive.", +} + +func (IDRange) SwaggerDoc() map[string]string { + return map_IDRange +} + +var map_Ingress = map[string]string{ + "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec is the desired state of the Ingress. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is the current state of the Ingress. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (Ingress) SwaggerDoc() map[string]string { + return map_Ingress +} + +var map_IngressBackend = map[string]string{ + "": "IngressBackend describes all endpoints for a given service and port.", + "serviceName": "Specifies the name of the referenced service.", + "servicePort": "Specifies the port of the referenced service.", +} + +func (IngressBackend) SwaggerDoc() map[string]string { + return map_IngressBackend +} + +var map_IngressList = map[string]string{ + "": "IngressList is a collection of Ingress.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of Ingress.", +} + +func (IngressList) SwaggerDoc() map[string]string { + return map_IngressList +} + +var map_IngressRule = map[string]string{ + "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", + "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in the RFC: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the\n\t IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.", +} + +func (IngressRule) SwaggerDoc() map[string]string { + return map_IngressRule +} + +var map_IngressRuleValue = map[string]string{ + "": "IngressRuleValue represents a rule to apply against incoming requests. If the rule is satisfied, the request is routed to the specified backend. Currently mixing different types of rules in a single Ingress is disallowed, so exactly one of the following must be set.", +} + +func (IngressRuleValue) SwaggerDoc() map[string]string { + return map_IngressRuleValue +} + +var map_IngressSpec = map[string]string{ + "": "IngressSpec describes the Ingress the user wishes to exist.", + "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", + "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", +} + +func (IngressSpec) SwaggerDoc() map[string]string { + return map_IngressSpec +} + +var map_IngressStatus = map[string]string{ + "": "IngressStatus describe the current state of the Ingress.", + "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", +} + +func (IngressStatus) SwaggerDoc() map[string]string { + return map_IngressStatus +} + +var map_IngressTLS = map[string]string{ + "": "IngressTLS describes the transport layer security associated with an Ingress.", + "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "secretName": "SecretName is the name of the secret used to terminate SSL traffic on 443. Field is left optional to allow SSL routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", +} + +func (IngressTLS) SwaggerDoc() map[string]string { + return map_IngressTLS +} + +var map_NetworkPolicy = map[string]string{ + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior for this NetworkPolicy.", +} + +func (NetworkPolicy) SwaggerDoc() map[string]string { + return map_NetworkPolicy +} + +var map_NetworkPolicyIngressRule = map[string]string{ + "": "This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", + "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is not provided, this rule matches all ports (traffic not restricted by port). If this field is empty, this rule matches no ports (no traffic matches). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is not provided, this rule matches all sources (traffic not restricted by source). If this field is empty, this rule matches no sources (no traffic matches). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", +} + +func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { + return map_NetworkPolicyIngressRule +} + +var map_NetworkPolicyList = map[string]string{ + "": "Network Policy List is a list of NetworkPolicy objects.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (NetworkPolicyList) SwaggerDoc() map[string]string { + return map_NetworkPolicyList +} + +var map_NetworkPolicyPeer = map[string]string{ + "podSelector": "This is a label selector which selects Pods in this namespace. This field follows standard label selector semantics. If not provided, this selector selects no pods. If present but empty, this selector selects all pods in this namespace.", + "namespaceSelector": "Selects Namespaces using cluster scoped-labels. This matches all pods in all namespaces selected by this label selector. This field follows standard label selector semantics. If omitted, this selector selects no namespaces. If present but empty, this selector selects all namespaces.", +} + +func (NetworkPolicyPeer) SwaggerDoc() map[string]string { + return map_NetworkPolicyPeer +} + +var map_NetworkPolicyPort = map[string]string{ + "protocol": "Optional. The protocol (TCP or UDP) which traffic must match. If not specified, this field defaults to TCP.", + "port": "If specified, the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", +} + +func (NetworkPolicyPort) SwaggerDoc() map[string]string { + return map_NetworkPolicyPort +} + +var map_NetworkPolicySpec = map[string]string{ + "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", + "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not affect ingress isolation. If this field is present and contains at least one rule, this policy allows any traffic which matches at least one of the ingress rules in this list.", +} + +func (NetworkPolicySpec) SwaggerDoc() map[string]string { + return map_NetworkPolicySpec +} + +var map_PodSecurityPolicy = map[string]string{ + "": "Pod Security Policy governs the ability to make requests that affect the Security Context that will be applied to a pod and container.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "spec defines the policy enforced.", +} + +func (PodSecurityPolicy) SwaggerDoc() map[string]string { + return map_PodSecurityPolicy +} + +var map_PodSecurityPolicyList = map[string]string{ + "": "Pod Security Policy List is a list of PodSecurityPolicy objects.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (PodSecurityPolicyList) SwaggerDoc() map[string]string { + return map_PodSecurityPolicyList +} + +var map_PodSecurityPolicySpec = map[string]string{ + "": "Pod Security Policy Spec defines the policy enforced.", + "privileged": "privileged determines if a pod can request to be run as privileged.", + "defaultAddCapabilities": "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.", + "requiredDropCapabilities": "RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", + "allowedCapabilities": "AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.", + "volumes": "volumes is a white list of allowed volume plugins. Empty indicates that all plugins may be used.", + "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", + "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", + "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", + "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", + "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", + "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", + "supplementalGroups": "SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", + "fsGroup": "FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.", + "readOnlyRootFilesystem": "ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", +} + +func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { + return map_PodSecurityPolicySpec +} + +var map_ReplicaSet = map[string]string{ + "": "ReplicaSet represents the configuration of a ReplicaSet.", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (ReplicaSet) SwaggerDoc() map[string]string { + return map_ReplicaSet +} + +var map_ReplicaSetCondition = map[string]string{ + "": "ReplicaSetCondition describes the state of a replica set at a certain point.", + "type": "Type of replica set condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (ReplicaSetCondition) SwaggerDoc() map[string]string { + return map_ReplicaSetCondition +} + +var map_ReplicaSetList = map[string]string{ + "": "ReplicaSetList is a collection of ReplicaSets.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of ReplicaSets. More info: http://kubernetes.io/docs/user-guide/replication-controller", +} + +func (ReplicaSetList) SwaggerDoc() map[string]string { + return map_ReplicaSetList +} + +var map_ReplicaSetSpec = map[string]string{ + "": "ReplicaSetSpec is the specification of a ReplicaSet.", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "selector": "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template", +} + +func (ReplicaSetSpec) SwaggerDoc() map[string]string { + return map_ReplicaSetSpec +} + +var map_ReplicaSetStatus = map[string]string{ + "": "ReplicaSetStatus represents the current status of a ReplicaSet.", + "replicas": "Replicas is the most recently oberved number of replicas. More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller", + "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", + "readyReplicas": "The number of ready replicas for this replica set.", + "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", + "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", + "conditions": "Represents the latest available observations of a replica set's current state.", +} + +func (ReplicaSetStatus) SwaggerDoc() map[string]string { + return map_ReplicaSetStatus +} + +var map_ReplicationControllerDummy = map[string]string{ + "": "Dummy definition", +} + +func (ReplicationControllerDummy) SwaggerDoc() map[string]string { + return map_ReplicationControllerDummy +} + +var map_RollbackConfig = map[string]string{ + "revision": "The revision to rollback to. If set to 0, rollbck to the last revision.", +} + +func (RollbackConfig) SwaggerDoc() map[string]string { + return map_RollbackConfig +} + +var map_RollingUpdateDaemonSet = map[string]string{ + "": "Spec to control the desired behavior of daemon set rolling update.", + "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", +} + +func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { + return map_RollingUpdateDaemonSet +} + +var map_RollingUpdateDeployment = map[string]string{ + "": "Spec to control the desired behavior of rolling update.", + "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. By default, a fixed value of 1 is used. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. By default, a value of 1 is used. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", +} + +func (RollingUpdateDeployment) SwaggerDoc() map[string]string { + return map_RollingUpdateDeployment +} + +var map_RunAsUserStrategyOptions = map[string]string{ + "": "Run A sUser Strategy Options defines the strategy type and any options used to create the strategy.", + "rule": "Rule is the strategy that will dictate the allowable RunAsUser values that may be set.", + "ranges": "Ranges are the allowed ranges of uids that may be used.", +} + +func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { + return map_RunAsUserStrategyOptions +} + +var map_SELinuxStrategyOptions = map[string]string{ + "": "SELinux Strategy Options defines the strategy type and any options used to create the strategy.", + "rule": "type is the strategy that will dictate the allowable labels that may be set.", + "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context", +} + +func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { + return map_SELinuxStrategyOptions +} + +var map_Scale = map[string]string{ + "": "represents a scaling request for a resource.", + "metadata": "Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.", +} + +func (Scale) SwaggerDoc() map[string]string { + return map_Scale +} + +var map_ScaleSpec = map[string]string{ + "": "describes the attributes of a scale subresource", + "replicas": "desired number of instances for the scaled object.", +} + +func (ScaleSpec) SwaggerDoc() map[string]string { + return map_ScaleSpec +} + +var map_ScaleStatus = map[string]string{ + "": "represents the current status of a scale subresource.", + "replicas": "actual number of observed instances of the scaled object.", + "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", +} + +func (ScaleStatus) SwaggerDoc() map[string]string { + return map_ScaleStatus +} + +var map_SupplementalGroupsStrategyOptions = map[string]string{ + "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.", + "rule": "Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", + "ranges": "Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.", +} + +func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { + return map_SupplementalGroupsStrategyOptions +} + +var map_ThirdPartyResource = map[string]string{ + "": "A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource types to the API. It consists of one or more Versions of the api.", + "metadata": "Standard object metadata", + "description": "Description is the description of this object.", + "versions": "Versions are versions for this third party object", +} + +func (ThirdPartyResource) SwaggerDoc() map[string]string { + return map_ThirdPartyResource +} + +var map_ThirdPartyResourceData = map[string]string{ + "": "An internal object, used for versioned storage in etcd. Not exposed to the end user.", + "metadata": "Standard object metadata.", + "data": "Data is the raw JSON data for this data.", +} + +func (ThirdPartyResourceData) SwaggerDoc() map[string]string { + return map_ThirdPartyResourceData +} + +var map_ThirdPartyResourceDataList = map[string]string{ + "": "ThirdPartyResrouceDataList is a list of ThirdPartyResourceData.", + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of ThirdpartyResourceData.", +} + +func (ThirdPartyResourceDataList) SwaggerDoc() map[string]string { + return map_ThirdPartyResourceDataList +} + +var map_ThirdPartyResourceList = map[string]string{ + "": "ThirdPartyResourceList is a list of ThirdPartyResources.", + "metadata": "Standard list metadata.", + "items": "Items is the list of ThirdPartyResources.", +} + +func (ThirdPartyResourceList) SwaggerDoc() map[string]string { + return map_ThirdPartyResourceList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.conversion.go new file mode 100644 index 0000000000..017feeb6c1 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.conversion.go @@ -0,0 +1,1600 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" + api "k8s.io/client-go/pkg/api" + api_v1 "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_APIVersion_To_extensions_APIVersion, + Convert_extensions_APIVersion_To_v1beta1_APIVersion, + Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus, + Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus, + Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList, + Convert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList, + Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget, + Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget, + Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList, + Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList, + Convert_v1beta1_DaemonSet_To_extensions_DaemonSet, + Convert_extensions_DaemonSet_To_v1beta1_DaemonSet, + Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList, + Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList, + Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec, + Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec, + Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus, + Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus, + Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy, + Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy, + Convert_v1beta1_Deployment_To_extensions_Deployment, + Convert_extensions_Deployment_To_v1beta1_Deployment, + Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition, + Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition, + Convert_v1beta1_DeploymentList_To_extensions_DeploymentList, + Convert_extensions_DeploymentList_To_v1beta1_DeploymentList, + Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback, + Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback, + Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, + Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, + Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus, + Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus, + Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, + Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, + Convert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions, + Convert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions, + Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath, + Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath, + Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue, + Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue, + Convert_v1beta1_HostPortRange_To_extensions_HostPortRange, + Convert_extensions_HostPortRange_To_v1beta1_HostPortRange, + Convert_v1beta1_IDRange_To_extensions_IDRange, + Convert_extensions_IDRange_To_v1beta1_IDRange, + Convert_v1beta1_Ingress_To_extensions_Ingress, + Convert_extensions_Ingress_To_v1beta1_Ingress, + Convert_v1beta1_IngressBackend_To_extensions_IngressBackend, + Convert_extensions_IngressBackend_To_v1beta1_IngressBackend, + Convert_v1beta1_IngressList_To_extensions_IngressList, + Convert_extensions_IngressList_To_v1beta1_IngressList, + Convert_v1beta1_IngressRule_To_extensions_IngressRule, + Convert_extensions_IngressRule_To_v1beta1_IngressRule, + Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue, + Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue, + Convert_v1beta1_IngressSpec_To_extensions_IngressSpec, + Convert_extensions_IngressSpec_To_v1beta1_IngressSpec, + Convert_v1beta1_IngressStatus_To_extensions_IngressStatus, + Convert_extensions_IngressStatus_To_v1beta1_IngressStatus, + Convert_v1beta1_IngressTLS_To_extensions_IngressTLS, + Convert_extensions_IngressTLS_To_v1beta1_IngressTLS, + Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy, + Convert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy, + Convert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule, + Convert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule, + Convert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList, + Convert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList, + Convert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer, + Convert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer, + Convert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort, + Convert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort, + Convert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec, + Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec, + Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy, + Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy, + Convert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList, + Convert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList, + Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec, + Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec, + Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet, + Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet, + Convert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition, + Convert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition, + Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList, + Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList, + Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, + Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec, + Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus, + Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus, + Convert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy, + Convert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy, + Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig, + Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig, + Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet, + Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet, + Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, + Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, + Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions, + Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions, + Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions, + Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions, + Convert_v1beta1_Scale_To_extensions_Scale, + Convert_extensions_Scale_To_v1beta1_Scale, + Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec, + Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec, + Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, + Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, + Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions, + Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions, + Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource, + Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource, + Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData, + Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData, + Convert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList, + Convert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList, + Convert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList, + Convert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList, + ) +} + +func autoConvert_v1beta1_APIVersion_To_extensions_APIVersion(in *APIVersion, out *extensions.APIVersion, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +func Convert_v1beta1_APIVersion_To_extensions_APIVersion(in *APIVersion, out *extensions.APIVersion, s conversion.Scope) error { + return autoConvert_v1beta1_APIVersion_To_extensions_APIVersion(in, out, s) +} + +func autoConvert_extensions_APIVersion_To_v1beta1_APIVersion(in *extensions.APIVersion, out *APIVersion, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +func Convert_extensions_APIVersion_To_v1beta1_APIVersion(in *extensions.APIVersion, out *APIVersion, s conversion.Scope) error { + return autoConvert_extensions_APIVersion_To_v1beta1_APIVersion(in, out, s) +} + +func autoConvert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in *CustomMetricCurrentStatus, out *extensions.CustomMetricCurrentStatus, s conversion.Scope) error { + out.Name = in.Name + out.CurrentValue = in.CurrentValue + return nil +} + +func Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in *CustomMetricCurrentStatus, out *extensions.CustomMetricCurrentStatus, s conversion.Scope) error { + return autoConvert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in, out, s) +} + +func autoConvert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in *extensions.CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, s conversion.Scope) error { + out.Name = in.Name + out.CurrentValue = in.CurrentValue + return nil +} + +func Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in *extensions.CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, s conversion.Scope) error { + return autoConvert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in, out, s) +} + +func autoConvert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in *CustomMetricCurrentStatusList, out *extensions.CustomMetricCurrentStatusList, s conversion.Scope) error { + out.Items = *(*[]extensions.CustomMetricCurrentStatus)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in *CustomMetricCurrentStatusList, out *extensions.CustomMetricCurrentStatusList, s conversion.Scope) error { + return autoConvert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in, out, s) +} + +func autoConvert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, s conversion.Scope) error { + out.Items = *(*[]CustomMetricCurrentStatus)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, s conversion.Scope) error { + return autoConvert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in, out, s) +} + +func autoConvert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in *CustomMetricTarget, out *extensions.CustomMetricTarget, s conversion.Scope) error { + out.Name = in.Name + out.TargetValue = in.TargetValue + return nil +} + +func Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in *CustomMetricTarget, out *extensions.CustomMetricTarget, s conversion.Scope) error { + return autoConvert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in, out, s) +} + +func autoConvert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in *extensions.CustomMetricTarget, out *CustomMetricTarget, s conversion.Scope) error { + out.Name = in.Name + out.TargetValue = in.TargetValue + return nil +} + +func Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in *extensions.CustomMetricTarget, out *CustomMetricTarget, s conversion.Scope) error { + return autoConvert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in, out, s) +} + +func autoConvert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in *CustomMetricTargetList, out *extensions.CustomMetricTargetList, s conversion.Scope) error { + out.Items = *(*[]extensions.CustomMetricTarget)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in *CustomMetricTargetList, out *extensions.CustomMetricTargetList, s conversion.Scope) error { + return autoConvert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in, out, s) +} + +func autoConvert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in *extensions.CustomMetricTargetList, out *CustomMetricTargetList, s conversion.Scope) error { + out.Items = *(*[]CustomMetricTarget)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in *extensions.CustomMetricTargetList, out *CustomMetricTargetList, s conversion.Scope) error { + return autoConvert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in, out, s) +} + +func autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in, out, s) +} + +func autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *DaemonSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *DaemonSet, s conversion.Scope) error { + return autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in, out, s) +} + +func autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.DaemonSet, len(*in)) + for i := range *in { + if err := Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in, out, s) +} + +func autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *DaemonSetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DaemonSet, len(*in)) + for i := range *in { + if err := Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *DaemonSetList, s conversion.Scope) error { + return autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in, out, s) +} + +func autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.TemplateGeneration = in.TemplateGeneration + return nil +} + +func Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in, out, s) +} + +func autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error { + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.TemplateGeneration = in.TemplateGeneration + return nil +} + +func Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error { + return autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in, out, s) +} + +func autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { + out.CurrentNumberScheduled = in.CurrentNumberScheduled + out.NumberMisscheduled = in.NumberMisscheduled + out.DesiredNumberScheduled = in.DesiredNumberScheduled + out.NumberReady = in.NumberReady + out.ObservedGeneration = in.ObservedGeneration + out.UpdatedNumberScheduled = in.UpdatedNumberScheduled + out.NumberAvailable = in.NumberAvailable + out.NumberUnavailable = in.NumberUnavailable + return nil +} + +func Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in, out, s) +} + +func autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *DaemonSetStatus, s conversion.Scope) error { + out.CurrentNumberScheduled = in.CurrentNumberScheduled + out.NumberMisscheduled = in.NumberMisscheduled + out.DesiredNumberScheduled = in.DesiredNumberScheduled + out.NumberReady = in.NumberReady + out.ObservedGeneration = in.ObservedGeneration + out.UpdatedNumberScheduled = in.UpdatedNumberScheduled + out.NumberAvailable = in.NumberAvailable + out.NumberUnavailable = in.NumberUnavailable + return nil +} + +func Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *DaemonSetStatus, s conversion.Scope) error { + return autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in, out, s) +} + +func autoConvert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = extensions.DaemonSetUpdateStrategyType(in.Type) + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(extensions.RollingUpdateDaemonSet) + if err := Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in, out, s) +} + +func autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = DaemonSetUpdateStrategyType(in.Type) + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDaemonSet) + if err := Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *DaemonSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in, out, s) +} + +func autoConvert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *extensions.Deployment, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *extensions.Deployment, s conversion.Scope) error { + return autoConvert_v1beta1_Deployment_To_extensions_Deployment(in, out, s) +} + +func autoConvert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *Deployment, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *Deployment, s conversion.Scope) error { + return autoConvert_extensions_Deployment_To_v1beta1_Deployment(in, out, s) +} + +func autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { + out.Type = extensions.DeploymentConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + out.LastUpdateTime = in.LastUpdateTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in, out, s) +} + +func autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in *extensions.DeploymentCondition, out *DeploymentCondition, s conversion.Scope) error { + out.Type = DeploymentConditionType(in.Type) + out.Status = api_v1.ConditionStatus(in.Status) + out.LastUpdateTime = in.LastUpdateTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in *extensions.DeploymentCondition, out *DeploymentCondition, s conversion.Scope) error { + return autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in, out, s) +} + +func autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.Deployment, len(*in)) + for i := range *in { + if err := Convert_v1beta1_Deployment_To_extensions_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in, out, s) +} + +func autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *DeploymentList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + if err := Convert_extensions_Deployment_To_v1beta1_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *DeploymentList, s conversion.Scope) error { + return autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in, out, s) +} + +func autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { + out.Name = in.Name + out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations)) + if err := Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in, out, s) +} + +func autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *DeploymentRollback, s conversion.Scope) error { + out.Name = in.Name + out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations)) + if err := Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *DeploymentRollback, s conversion.Scope) error { + return autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in, out, s) +} + +func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { + if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) + out.Paused = in.Paused + out.RollbackTo = (*extensions.RollbackConfig)(unsafe.Pointer(in.RollbackTo)) + out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds)) + return nil +} + +func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error { + if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) + out.Paused = in.Paused + out.RollbackTo = (*RollbackConfig)(unsafe.Pointer(in.RollbackTo)) + out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds)) + return nil +} + +func autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.Replicas = in.Replicas + out.UpdatedReplicas = in.UpdatedReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.UnavailableReplicas = in.UnavailableReplicas + out.Conditions = *(*[]extensions.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +func Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in, out, s) +} + +func autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *DeploymentStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.Replicas = in.Replicas + out.UpdatedReplicas = in.UpdatedReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.UnavailableReplicas = in.UnavailableReplicas + out.Conditions = *(*[]DeploymentCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +func Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *DeploymentStatus, s conversion.Scope) error { + return autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in, out, s) +} + +func autoConvert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { + out.Type = extensions.DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(extensions.RollingUpdateDeployment) + if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func autoConvert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { + out.Type = DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func autoConvert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(in *FSGroupStrategyOptions, out *extensions.FSGroupStrategyOptions, s conversion.Scope) error { + out.Rule = extensions.FSGroupStrategyType(in.Rule) + out.Ranges = *(*[]extensions.IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +func Convert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(in *FSGroupStrategyOptions, out *extensions.FSGroupStrategyOptions, s conversion.Scope) error { + return autoConvert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(in, out, s) +} + +func autoConvert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(in *extensions.FSGroupStrategyOptions, out *FSGroupStrategyOptions, s conversion.Scope) error { + out.Rule = FSGroupStrategyType(in.Rule) + out.Ranges = *(*[]IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +func Convert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(in *extensions.FSGroupStrategyOptions, out *FSGroupStrategyOptions, s conversion.Scope) error { + return autoConvert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(in, out, s) +} + +func autoConvert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in *HTTPIngressPath, out *extensions.HTTPIngressPath, s conversion.Scope) error { + out.Path = in.Path + if err := Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(&in.Backend, &out.Backend, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in *HTTPIngressPath, out *extensions.HTTPIngressPath, s conversion.Scope) error { + return autoConvert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in, out, s) +} + +func autoConvert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in *extensions.HTTPIngressPath, out *HTTPIngressPath, s conversion.Scope) error { + out.Path = in.Path + if err := Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(&in.Backend, &out.Backend, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in *extensions.HTTPIngressPath, out *HTTPIngressPath, s conversion.Scope) error { + return autoConvert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in, out, s) +} + +func autoConvert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in *HTTPIngressRuleValue, out *extensions.HTTPIngressRuleValue, s conversion.Scope) error { + out.Paths = *(*[]extensions.HTTPIngressPath)(unsafe.Pointer(&in.Paths)) + return nil +} + +func Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in *HTTPIngressRuleValue, out *extensions.HTTPIngressRuleValue, s conversion.Scope) error { + return autoConvert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in, out, s) +} + +func autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error { + out.Paths = *(*[]HTTPIngressPath)(unsafe.Pointer(&in.Paths)) + return nil +} + +func Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error { + return autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in, out, s) +} + +func autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *HostPortRange, out *extensions.HostPortRange, s conversion.Scope) error { + out.Min = int(in.Min) + out.Max = int(in.Max) + return nil +} + +func Convert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *HostPortRange, out *extensions.HostPortRange, s conversion.Scope) error { + return autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange(in, out, s) +} + +func autoConvert_extensions_HostPortRange_To_v1beta1_HostPortRange(in *extensions.HostPortRange, out *HostPortRange, s conversion.Scope) error { + out.Min = int32(in.Min) + out.Max = int32(in.Max) + return nil +} + +func Convert_extensions_HostPortRange_To_v1beta1_HostPortRange(in *extensions.HostPortRange, out *HostPortRange, s conversion.Scope) error { + return autoConvert_extensions_HostPortRange_To_v1beta1_HostPortRange(in, out, s) +} + +func autoConvert_v1beta1_IDRange_To_extensions_IDRange(in *IDRange, out *extensions.IDRange, s conversion.Scope) error { + out.Min = in.Min + out.Max = in.Max + return nil +} + +func Convert_v1beta1_IDRange_To_extensions_IDRange(in *IDRange, out *extensions.IDRange, s conversion.Scope) error { + return autoConvert_v1beta1_IDRange_To_extensions_IDRange(in, out, s) +} + +func autoConvert_extensions_IDRange_To_v1beta1_IDRange(in *extensions.IDRange, out *IDRange, s conversion.Scope) error { + out.Min = in.Min + out.Max = in.Max + return nil +} + +func Convert_extensions_IDRange_To_v1beta1_IDRange(in *extensions.IDRange, out *IDRange, s conversion.Scope) error { + return autoConvert_extensions_IDRange_To_v1beta1_IDRange(in, out, s) +} + +func autoConvert_v1beta1_Ingress_To_extensions_Ingress(in *Ingress, out *extensions.Ingress, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_IngressSpec_To_extensions_IngressSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_IngressStatus_To_extensions_IngressStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_Ingress_To_extensions_Ingress(in *Ingress, out *extensions.Ingress, s conversion.Scope) error { + return autoConvert_v1beta1_Ingress_To_extensions_Ingress(in, out, s) +} + +func autoConvert_extensions_Ingress_To_v1beta1_Ingress(in *extensions.Ingress, out *Ingress, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_IngressSpec_To_v1beta1_IngressSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_extensions_IngressStatus_To_v1beta1_IngressStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_Ingress_To_v1beta1_Ingress(in *extensions.Ingress, out *Ingress, s conversion.Scope) error { + return autoConvert_extensions_Ingress_To_v1beta1_Ingress(in, out, s) +} + +func autoConvert_v1beta1_IngressBackend_To_extensions_IngressBackend(in *IngressBackend, out *extensions.IngressBackend, s conversion.Scope) error { + out.ServiceName = in.ServiceName + out.ServicePort = in.ServicePort + return nil +} + +func Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(in *IngressBackend, out *extensions.IngressBackend, s conversion.Scope) error { + return autoConvert_v1beta1_IngressBackend_To_extensions_IngressBackend(in, out, s) +} + +func autoConvert_extensions_IngressBackend_To_v1beta1_IngressBackend(in *extensions.IngressBackend, out *IngressBackend, s conversion.Scope) error { + out.ServiceName = in.ServiceName + out.ServicePort = in.ServicePort + return nil +} + +func Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(in *extensions.IngressBackend, out *IngressBackend, s conversion.Scope) error { + return autoConvert_extensions_IngressBackend_To_v1beta1_IngressBackend(in, out, s) +} + +func autoConvert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, out *extensions.IngressList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]extensions.Ingress)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, out *extensions.IngressList, s conversion.Scope) error { + return autoConvert_v1beta1_IngressList_To_extensions_IngressList(in, out, s) +} + +func autoConvert_extensions_IngressList_To_v1beta1_IngressList(in *extensions.IngressList, out *IngressList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Ingress)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_extensions_IngressList_To_v1beta1_IngressList(in *extensions.IngressList, out *IngressList, s conversion.Scope) error { + return autoConvert_extensions_IngressList_To_v1beta1_IngressList(in, out, s) +} + +func autoConvert_v1beta1_IngressRule_To_extensions_IngressRule(in *IngressRule, out *extensions.IngressRule, s conversion.Scope) error { + out.Host = in.Host + if err := Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_IngressRule_To_extensions_IngressRule(in *IngressRule, out *extensions.IngressRule, s conversion.Scope) error { + return autoConvert_v1beta1_IngressRule_To_extensions_IngressRule(in, out, s) +} + +func autoConvert_extensions_IngressRule_To_v1beta1_IngressRule(in *extensions.IngressRule, out *IngressRule, s conversion.Scope) error { + out.Host = in.Host + if err := Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_IngressRule_To_v1beta1_IngressRule(in *extensions.IngressRule, out *IngressRule, s conversion.Scope) error { + return autoConvert_extensions_IngressRule_To_v1beta1_IngressRule(in, out, s) +} + +func autoConvert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in *IngressRuleValue, out *extensions.IngressRuleValue, s conversion.Scope) error { + out.HTTP = (*extensions.HTTPIngressRuleValue)(unsafe.Pointer(in.HTTP)) + return nil +} + +func Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in *IngressRuleValue, out *extensions.IngressRuleValue, s conversion.Scope) error { + return autoConvert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in, out, s) +} + +func autoConvert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in *extensions.IngressRuleValue, out *IngressRuleValue, s conversion.Scope) error { + out.HTTP = (*HTTPIngressRuleValue)(unsafe.Pointer(in.HTTP)) + return nil +} + +func Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in *extensions.IngressRuleValue, out *IngressRuleValue, s conversion.Scope) error { + return autoConvert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in, out, s) +} + +func autoConvert_v1beta1_IngressSpec_To_extensions_IngressSpec(in *IngressSpec, out *extensions.IngressSpec, s conversion.Scope) error { + out.Backend = (*extensions.IngressBackend)(unsafe.Pointer(in.Backend)) + out.TLS = *(*[]extensions.IngressTLS)(unsafe.Pointer(&in.TLS)) + out.Rules = *(*[]extensions.IngressRule)(unsafe.Pointer(&in.Rules)) + return nil +} + +func Convert_v1beta1_IngressSpec_To_extensions_IngressSpec(in *IngressSpec, out *extensions.IngressSpec, s conversion.Scope) error { + return autoConvert_v1beta1_IngressSpec_To_extensions_IngressSpec(in, out, s) +} + +func autoConvert_extensions_IngressSpec_To_v1beta1_IngressSpec(in *extensions.IngressSpec, out *IngressSpec, s conversion.Scope) error { + out.Backend = (*IngressBackend)(unsafe.Pointer(in.Backend)) + out.TLS = *(*[]IngressTLS)(unsafe.Pointer(&in.TLS)) + out.Rules = *(*[]IngressRule)(unsafe.Pointer(&in.Rules)) + return nil +} + +func Convert_extensions_IngressSpec_To_v1beta1_IngressSpec(in *extensions.IngressSpec, out *IngressSpec, s conversion.Scope) error { + return autoConvert_extensions_IngressSpec_To_v1beta1_IngressSpec(in, out, s) +} + +func autoConvert_v1beta1_IngressStatus_To_extensions_IngressStatus(in *IngressStatus, out *extensions.IngressStatus, s conversion.Scope) error { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.LoadBalancer, &out.LoadBalancer, 0); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_IngressStatus_To_extensions_IngressStatus(in *IngressStatus, out *extensions.IngressStatus, s conversion.Scope) error { + return autoConvert_v1beta1_IngressStatus_To_extensions_IngressStatus(in, out, s) +} + +func autoConvert_extensions_IngressStatus_To_v1beta1_IngressStatus(in *extensions.IngressStatus, out *IngressStatus, s conversion.Scope) error { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.LoadBalancer, &out.LoadBalancer, 0); err != nil { + return err + } + return nil +} + +func Convert_extensions_IngressStatus_To_v1beta1_IngressStatus(in *extensions.IngressStatus, out *IngressStatus, s conversion.Scope) error { + return autoConvert_extensions_IngressStatus_To_v1beta1_IngressStatus(in, out, s) +} + +func autoConvert_v1beta1_IngressTLS_To_extensions_IngressTLS(in *IngressTLS, out *extensions.IngressTLS, s conversion.Scope) error { + out.Hosts = *(*[]string)(unsafe.Pointer(&in.Hosts)) + out.SecretName = in.SecretName + return nil +} + +func Convert_v1beta1_IngressTLS_To_extensions_IngressTLS(in *IngressTLS, out *extensions.IngressTLS, s conversion.Scope) error { + return autoConvert_v1beta1_IngressTLS_To_extensions_IngressTLS(in, out, s) +} + +func autoConvert_extensions_IngressTLS_To_v1beta1_IngressTLS(in *extensions.IngressTLS, out *IngressTLS, s conversion.Scope) error { + out.Hosts = *(*[]string)(unsafe.Pointer(&in.Hosts)) + out.SecretName = in.SecretName + return nil +} + +func Convert_extensions_IngressTLS_To_v1beta1_IngressTLS(in *extensions.IngressTLS, out *IngressTLS, s conversion.Scope) error { + return autoConvert_extensions_IngressTLS_To_v1beta1_IngressTLS(in, out, s) +} + +func autoConvert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in *NetworkPolicy, out *extensions.NetworkPolicy, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in *NetworkPolicy, out *extensions.NetworkPolicy, s conversion.Scope) error { + return autoConvert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in, out, s) +} + +func autoConvert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in *extensions.NetworkPolicy, out *NetworkPolicy, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in *extensions.NetworkPolicy, out *NetworkPolicy, s conversion.Scope) error { + return autoConvert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in, out, s) +} + +func autoConvert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(in *NetworkPolicyIngressRule, out *extensions.NetworkPolicyIngressRule, s conversion.Scope) error { + out.Ports = *(*[]extensions.NetworkPolicyPort)(unsafe.Pointer(&in.Ports)) + out.From = *(*[]extensions.NetworkPolicyPeer)(unsafe.Pointer(&in.From)) + return nil +} + +func Convert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(in *NetworkPolicyIngressRule, out *extensions.NetworkPolicyIngressRule, s conversion.Scope) error { + return autoConvert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(in, out, s) +} + +func autoConvert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in *extensions.NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, s conversion.Scope) error { + out.Ports = *(*[]NetworkPolicyPort)(unsafe.Pointer(&in.Ports)) + out.From = *(*[]NetworkPolicyPeer)(unsafe.Pointer(&in.From)) + return nil +} + +func Convert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in *extensions.NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, s conversion.Scope) error { + return autoConvert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in, out, s) +} + +func autoConvert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in *NetworkPolicyList, out *extensions.NetworkPolicyList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]extensions.NetworkPolicy)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in *NetworkPolicyList, out *extensions.NetworkPolicyList, s conversion.Scope) error { + return autoConvert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in, out, s) +} + +func autoConvert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in *extensions.NetworkPolicyList, out *NetworkPolicyList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NetworkPolicy)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in *extensions.NetworkPolicyList, out *NetworkPolicyList, s conversion.Scope) error { + return autoConvert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in, out, s) +} + +func autoConvert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in *NetworkPolicyPeer, out *extensions.NetworkPolicyPeer, s conversion.Scope) error { + out.PodSelector = (*v1.LabelSelector)(unsafe.Pointer(in.PodSelector)) + out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) + return nil +} + +func Convert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in *NetworkPolicyPeer, out *extensions.NetworkPolicyPeer, s conversion.Scope) error { + return autoConvert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in, out, s) +} + +func autoConvert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in *extensions.NetworkPolicyPeer, out *NetworkPolicyPeer, s conversion.Scope) error { + out.PodSelector = (*v1.LabelSelector)(unsafe.Pointer(in.PodSelector)) + out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) + return nil +} + +func Convert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in *extensions.NetworkPolicyPeer, out *NetworkPolicyPeer, s conversion.Scope) error { + return autoConvert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in, out, s) +} + +func autoConvert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(in *NetworkPolicyPort, out *extensions.NetworkPolicyPort, s conversion.Scope) error { + out.Protocol = (*api.Protocol)(unsafe.Pointer(in.Protocol)) + out.Port = (*intstr.IntOrString)(unsafe.Pointer(in.Port)) + return nil +} + +func Convert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(in *NetworkPolicyPort, out *extensions.NetworkPolicyPort, s conversion.Scope) error { + return autoConvert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(in, out, s) +} + +func autoConvert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in *extensions.NetworkPolicyPort, out *NetworkPolicyPort, s conversion.Scope) error { + out.Protocol = (*api_v1.Protocol)(unsafe.Pointer(in.Protocol)) + out.Port = (*intstr.IntOrString)(unsafe.Pointer(in.Port)) + return nil +} + +func Convert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in *extensions.NetworkPolicyPort, out *NetworkPolicyPort, s conversion.Scope) error { + return autoConvert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in, out, s) +} + +func autoConvert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(in *NetworkPolicySpec, out *extensions.NetworkPolicySpec, s conversion.Scope) error { + out.PodSelector = in.PodSelector + out.Ingress = *(*[]extensions.NetworkPolicyIngressRule)(unsafe.Pointer(&in.Ingress)) + return nil +} + +func Convert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(in *NetworkPolicySpec, out *extensions.NetworkPolicySpec, s conversion.Scope) error { + return autoConvert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(in, out, s) +} + +func autoConvert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in *extensions.NetworkPolicySpec, out *NetworkPolicySpec, s conversion.Scope) error { + out.PodSelector = in.PodSelector + out.Ingress = *(*[]NetworkPolicyIngressRule)(unsafe.Pointer(&in.Ingress)) + return nil +} + +func Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in *extensions.NetworkPolicySpec, out *NetworkPolicySpec, s conversion.Scope) error { + return autoConvert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in, out, s) +} + +func autoConvert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in *PodSecurityPolicy, out *extensions.PodSecurityPolicy, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in *PodSecurityPolicy, out *extensions.PodSecurityPolicy, s conversion.Scope) error { + return autoConvert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in, out, s) +} + +func autoConvert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in *extensions.PodSecurityPolicy, out *PodSecurityPolicy, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in *extensions.PodSecurityPolicy, out *PodSecurityPolicy, s conversion.Scope) error { + return autoConvert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in, out, s) +} + +func autoConvert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in *PodSecurityPolicyList, out *extensions.PodSecurityPolicyList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.PodSecurityPolicy, len(*in)) + for i := range *in { + if err := Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in *PodSecurityPolicyList, out *extensions.PodSecurityPolicyList, s conversion.Scope) error { + return autoConvert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in, out, s) +} + +func autoConvert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in *extensions.PodSecurityPolicyList, out *PodSecurityPolicyList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodSecurityPolicy, len(*in)) + for i := range *in { + if err := Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in *extensions.PodSecurityPolicyList, out *PodSecurityPolicyList, s conversion.Scope) error { + return autoConvert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in, out, s) +} + +func autoConvert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in *PodSecurityPolicySpec, out *extensions.PodSecurityPolicySpec, s conversion.Scope) error { + out.Privileged = in.Privileged + out.DefaultAddCapabilities = *(*[]api.Capability)(unsafe.Pointer(&in.DefaultAddCapabilities)) + out.RequiredDropCapabilities = *(*[]api.Capability)(unsafe.Pointer(&in.RequiredDropCapabilities)) + out.AllowedCapabilities = *(*[]api.Capability)(unsafe.Pointer(&in.AllowedCapabilities)) + out.Volumes = *(*[]extensions.FSType)(unsafe.Pointer(&in.Volumes)) + out.HostNetwork = in.HostNetwork + if in.HostPorts != nil { + in, out := &in.HostPorts, &out.HostPorts + *out = make([]extensions.HostPortRange, len(*in)) + for i := range *in { + if err := Convert_v1beta1_HostPortRange_To_extensions_HostPortRange(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.HostPorts = nil + } + out.HostPID = in.HostPID + out.HostIPC = in.HostIPC + if err := Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, s); err != nil { + return err + } + if err := Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { + return err + } + if err := Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { + return err + } + if err := Convert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, s); err != nil { + return err + } + out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem + return nil +} + +func Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in *PodSecurityPolicySpec, out *extensions.PodSecurityPolicySpec, s conversion.Scope) error { + return autoConvert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in, out, s) +} + +func autoConvert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *extensions.PodSecurityPolicySpec, out *PodSecurityPolicySpec, s conversion.Scope) error { + out.Privileged = in.Privileged + out.DefaultAddCapabilities = *(*[]api_v1.Capability)(unsafe.Pointer(&in.DefaultAddCapabilities)) + out.RequiredDropCapabilities = *(*[]api_v1.Capability)(unsafe.Pointer(&in.RequiredDropCapabilities)) + out.AllowedCapabilities = *(*[]api_v1.Capability)(unsafe.Pointer(&in.AllowedCapabilities)) + out.Volumes = *(*[]FSType)(unsafe.Pointer(&in.Volumes)) + out.HostNetwork = in.HostNetwork + if in.HostPorts != nil { + in, out := &in.HostPorts, &out.HostPorts + *out = make([]HostPortRange, len(*in)) + for i := range *in { + if err := Convert_extensions_HostPortRange_To_v1beta1_HostPortRange(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.HostPorts = nil + } + out.HostPID = in.HostPID + out.HostIPC = in.HostIPC + if err := Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, s); err != nil { + return err + } + if err := Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { + return err + } + if err := Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { + return err + } + if err := Convert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, s); err != nil { + return err + } + out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem + return nil +} + +func Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *extensions.PodSecurityPolicySpec, out *PodSecurityPolicySpec, s conversion.Scope) error { + return autoConvert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in, out, s) +} + +func autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { + return autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in, out, s) +} + +func autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *ReplicaSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *ReplicaSet, s conversion.Scope) error { + return autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in, out, s) +} + +func autoConvert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { + out.Type = extensions.ReplicaSetConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { + return autoConvert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in, out, s) +} + +func autoConvert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *ReplicaSetCondition, s conversion.Scope) error { + out.Type = ReplicaSetConditionType(in.Type) + out.Status = api_v1.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *ReplicaSetCondition, s conversion.Scope) error { + return autoConvert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in, out, s) +} + +func autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.ReplicaSet, len(*in)) + for i := range *in { + if err := Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { + return autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in, out, s) +} + +func autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *ReplicaSetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + if err := Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *ReplicaSetList, s conversion.Scope) error { + return autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in, out, s) +} + +func autoConvert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { + if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func autoConvert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error { + if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]extensions.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +func Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { + return autoConvert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in, out, s) +} + +func autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *ReplicaSetStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +func Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *ReplicaSetStatus, s conversion.Scope) error { + return autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in, out, s) +} + +func autoConvert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in *ReplicationControllerDummy, out *extensions.ReplicationControllerDummy, s conversion.Scope) error { + return nil +} + +func Convert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in *ReplicationControllerDummy, out *extensions.ReplicationControllerDummy, s conversion.Scope) error { + return autoConvert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in, out, s) +} + +func autoConvert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in *extensions.ReplicationControllerDummy, out *ReplicationControllerDummy, s conversion.Scope) error { + return nil +} + +func Convert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in *extensions.ReplicationControllerDummy, out *ReplicationControllerDummy, s conversion.Scope) error { + return autoConvert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in, out, s) +} + +func autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { + out.Revision = in.Revision + return nil +} + +func Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { + return autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in, out, s) +} + +func autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *RollbackConfig, s conversion.Scope) error { + out.Revision = in.Revision + return nil +} + +func Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *RollbackConfig, s conversion.Scope) error { + return autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s) +} + +func autoConvert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { + // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) + return nil +} + +func autoConvert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *RollingUpdateDaemonSet, s conversion.Scope) error { + // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) + return nil +} + +func autoConvert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { + // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) + // WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) + return nil +} + +func autoConvert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { + // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) + // WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) + return nil +} + +func autoConvert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(in *RunAsUserStrategyOptions, out *extensions.RunAsUserStrategyOptions, s conversion.Scope) error { + out.Rule = extensions.RunAsUserStrategy(in.Rule) + out.Ranges = *(*[]extensions.IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +func Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(in *RunAsUserStrategyOptions, out *extensions.RunAsUserStrategyOptions, s conversion.Scope) error { + return autoConvert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(in, out, s) +} + +func autoConvert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in *extensions.RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, s conversion.Scope) error { + out.Rule = RunAsUserStrategy(in.Rule) + out.Ranges = *(*[]IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +func Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in *extensions.RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, s conversion.Scope) error { + return autoConvert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in, out, s) +} + +func autoConvert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(in *SELinuxStrategyOptions, out *extensions.SELinuxStrategyOptions, s conversion.Scope) error { + out.Rule = extensions.SELinuxStrategy(in.Rule) + out.SELinuxOptions = (*api.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + return nil +} + +func Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(in *SELinuxStrategyOptions, out *extensions.SELinuxStrategyOptions, s conversion.Scope) error { + return autoConvert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(in, out, s) +} + +func autoConvert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in *extensions.SELinuxStrategyOptions, out *SELinuxStrategyOptions, s conversion.Scope) error { + out.Rule = SELinuxStrategy(in.Rule) + out.SELinuxOptions = (*api_v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + return nil +} + +func Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in *extensions.SELinuxStrategyOptions, out *SELinuxStrategyOptions, s conversion.Scope) error { + return autoConvert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in, out, s) +} + +func autoConvert_v1beta1_Scale_To_extensions_Scale(in *Scale, out *extensions.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_Scale_To_extensions_Scale(in *Scale, out *extensions.Scale, s conversion.Scope) error { + return autoConvert_v1beta1_Scale_To_extensions_Scale(in, out, s) +} + +func autoConvert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *Scale, s conversion.Scope) error { + return autoConvert_extensions_Scale_To_v1beta1_Scale(in, out, s) +} + +func autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +func Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in, out, s) +} + +func autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +func Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { + return autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) +} + +func autoConvert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) + // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) + return nil +} + +func autoConvert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in *SupplementalGroupsStrategyOptions, out *extensions.SupplementalGroupsStrategyOptions, s conversion.Scope) error { + out.Rule = extensions.SupplementalGroupsStrategyType(in.Rule) + out.Ranges = *(*[]extensions.IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +func Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in *SupplementalGroupsStrategyOptions, out *extensions.SupplementalGroupsStrategyOptions, s conversion.Scope) error { + return autoConvert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in, out, s) +} + +func autoConvert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in *extensions.SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, s conversion.Scope) error { + out.Rule = SupplementalGroupsStrategyType(in.Rule) + out.Ranges = *(*[]IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +func Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in *extensions.SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, s conversion.Scope) error { + return autoConvert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in, out, s) +} + +func autoConvert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in *ThirdPartyResource, out *extensions.ThirdPartyResource, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Description = in.Description + out.Versions = *(*[]extensions.APIVersion)(unsafe.Pointer(&in.Versions)) + return nil +} + +func Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in *ThirdPartyResource, out *extensions.ThirdPartyResource, s conversion.Scope) error { + return autoConvert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in, out, s) +} + +func autoConvert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in *extensions.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Description = in.Description + out.Versions = *(*[]APIVersion)(unsafe.Pointer(&in.Versions)) + return nil +} + +func Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in *extensions.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error { + return autoConvert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in, out, s) +} + +func autoConvert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in *ThirdPartyResourceData, out *extensions.ThirdPartyResourceData, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + return nil +} + +func Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in *ThirdPartyResourceData, out *extensions.ThirdPartyResourceData, s conversion.Scope) error { + return autoConvert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in, out, s) +} + +func autoConvert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in *extensions.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + return nil +} + +func Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in *extensions.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error { + return autoConvert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in, out, s) +} + +func autoConvert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in *ThirdPartyResourceDataList, out *extensions.ThirdPartyResourceDataList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]extensions.ThirdPartyResourceData)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in *ThirdPartyResourceDataList, out *extensions.ThirdPartyResourceDataList, s conversion.Scope) error { + return autoConvert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in, out, s) +} + +func autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]ThirdPartyResourceData)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error { + return autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in, out, s) +} + +func autoConvert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in *ThirdPartyResourceList, out *extensions.ThirdPartyResourceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]extensions.ThirdPartyResource)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in *ThirdPartyResourceList, out *extensions.ThirdPartyResourceList, s conversion.Scope) error { + return autoConvert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in, out, s) +} + +func autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in *extensions.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]ThirdPartyResource)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in *extensions.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error { + return autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..a8088c8fb5 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,1087 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" + api_v1 "k8s.io/client-go/pkg/api/v1" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_APIVersion, InType: reflect.TypeOf(&APIVersion{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CustomMetricCurrentStatus, InType: reflect.TypeOf(&CustomMetricCurrentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CustomMetricCurrentStatusList, InType: reflect.TypeOf(&CustomMetricCurrentStatusList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CustomMetricTarget, InType: reflect.TypeOf(&CustomMetricTarget{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CustomMetricTargetList, InType: reflect.TypeOf(&CustomMetricTargetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSet, InType: reflect.TypeOf(&DaemonSet{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetList, InType: reflect.TypeOf(&DaemonSetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetSpec, InType: reflect.TypeOf(&DaemonSetSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetStatus, InType: reflect.TypeOf(&DaemonSetStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetUpdateStrategy, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Deployment, InType: reflect.TypeOf(&Deployment{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentCondition, InType: reflect.TypeOf(&DeploymentCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentList, InType: reflect.TypeOf(&DeploymentList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentRollback, InType: reflect.TypeOf(&DeploymentRollback{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentSpec, InType: reflect.TypeOf(&DeploymentSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentStatus, InType: reflect.TypeOf(&DeploymentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentStrategy, InType: reflect.TypeOf(&DeploymentStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_FSGroupStrategyOptions, InType: reflect.TypeOf(&FSGroupStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HTTPIngressPath, InType: reflect.TypeOf(&HTTPIngressPath{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HTTPIngressRuleValue, InType: reflect.TypeOf(&HTTPIngressRuleValue{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HostPortRange, InType: reflect.TypeOf(&HostPortRange{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IDRange, InType: reflect.TypeOf(&IDRange{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Ingress, InType: reflect.TypeOf(&Ingress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressBackend, InType: reflect.TypeOf(&IngressBackend{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressList, InType: reflect.TypeOf(&IngressList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressRule, InType: reflect.TypeOf(&IngressRule{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressRuleValue, InType: reflect.TypeOf(&IngressRuleValue{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressSpec, InType: reflect.TypeOf(&IngressSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressStatus, InType: reflect.TypeOf(&IngressStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressTLS, InType: reflect.TypeOf(&IngressTLS{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicy, InType: reflect.TypeOf(&NetworkPolicy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicyIngressRule, InType: reflect.TypeOf(&NetworkPolicyIngressRule{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicyList, InType: reflect.TypeOf(&NetworkPolicyList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicyPeer, InType: reflect.TypeOf(&NetworkPolicyPeer{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicyPort, InType: reflect.TypeOf(&NetworkPolicyPort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicySpec, InType: reflect.TypeOf(&NetworkPolicySpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodSecurityPolicy, InType: reflect.TypeOf(&PodSecurityPolicy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodSecurityPolicyList, InType: reflect.TypeOf(&PodSecurityPolicyList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodSecurityPolicySpec, InType: reflect.TypeOf(&PodSecurityPolicySpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicaSet, InType: reflect.TypeOf(&ReplicaSet{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicaSetCondition, InType: reflect.TypeOf(&ReplicaSetCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicaSetList, InType: reflect.TypeOf(&ReplicaSetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicaSetSpec, InType: reflect.TypeOf(&ReplicaSetSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicaSetStatus, InType: reflect.TypeOf(&ReplicaSetStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicationControllerDummy, InType: reflect.TypeOf(&ReplicationControllerDummy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollbackConfig, InType: reflect.TypeOf(&RollbackConfig{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollingUpdateDaemonSet, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollingUpdateDeployment, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RunAsUserStrategyOptions, InType: reflect.TypeOf(&RunAsUserStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SELinuxStrategyOptions, InType: reflect.TypeOf(&SELinuxStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Scale, InType: reflect.TypeOf(&Scale{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SupplementalGroupsStrategyOptions, InType: reflect.TypeOf(&SupplementalGroupsStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ThirdPartyResource, InType: reflect.TypeOf(&ThirdPartyResource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ThirdPartyResourceData, InType: reflect.TypeOf(&ThirdPartyResourceData{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ThirdPartyResourceDataList, InType: reflect.TypeOf(&ThirdPartyResourceDataList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ThirdPartyResourceList, InType: reflect.TypeOf(&ThirdPartyResourceList{})}, + ) +} + +func DeepCopy_v1beta1_APIVersion(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIVersion) + out := out.(*APIVersion) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_CustomMetricCurrentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricCurrentStatus) + out := out.(*CustomMetricCurrentStatus) + *out = *in + out.CurrentValue = in.CurrentValue.DeepCopy() + return nil + } +} + +func DeepCopy_v1beta1_CustomMetricCurrentStatusList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricCurrentStatusList) + out := out.(*CustomMetricCurrentStatusList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomMetricCurrentStatus, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_CustomMetricCurrentStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_CustomMetricTarget(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricTarget) + out := out.(*CustomMetricTarget) + *out = *in + out.TargetValue = in.TargetValue.DeepCopy() + return nil + } +} + +func DeepCopy_v1beta1_CustomMetricTargetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricTargetList) + out := out.(*CustomMetricTargetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomMetricTarget, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_CustomMetricTarget(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_DaemonSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSet) + out := out.(*DaemonSet) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_DaemonSetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_DaemonSetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetList) + out := out.(*DaemonSetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DaemonSet, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_DaemonSet(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_DaemonSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetSpec) + out := out.(*DaemonSetSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_DaemonSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetStatus) + out := out.(*DaemonSetStatus) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_DaemonSetUpdateStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetUpdateStrategy) + out := out.(*DaemonSetUpdateStrategy) + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDaemonSet) + if err := DeepCopy_v1beta1_RollingUpdateDaemonSet(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1beta1_Deployment(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Deployment) + out := out.(*Deployment) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_DeploymentStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentCondition) + out := out.(*DeploymentCondition) + *out = *in + out.LastUpdateTime = in.LastUpdateTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1beta1_DeploymentList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentList) + out := out.(*DeploymentList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_Deployment(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentRollback(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentRollback) + out := out.(*DeploymentRollback) + *out = *in + if in.UpdatedAnnotations != nil { + in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentSpec) + out := out.(*DeploymentSpec) + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, c); err != nil { + return err + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.RollbackTo != nil { + in, out := &in.RollbackTo, &out.RollbackTo + *out = new(RollbackConfig) + **out = **in + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentStatus) + out := out.(*DeploymentStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_DeploymentCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentStrategy) + out := out.(*DeploymentStrategy) + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + if err := DeepCopy_v1beta1_RollingUpdateDeployment(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1beta1_FSGroupStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FSGroupStrategyOptions) + out := out.(*FSGroupStrategyOptions) + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_HTTPIngressPath(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPIngressPath) + out := out.(*HTTPIngressPath) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_HTTPIngressRuleValue(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPIngressRuleValue) + out := out.(*HTTPIngressRuleValue) + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]HTTPIngressPath, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_HostPortRange(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HostPortRange) + out := out.(*HostPortRange) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_IDRange(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IDRange) + out := out.(*IDRange) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_Ingress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Ingress) + out := out.(*Ingress) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_IngressSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_IngressStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_IngressBackend(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressBackend) + out := out.(*IngressBackend) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_IngressList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressList) + out := out.(*IngressList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Ingress, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_Ingress(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_IngressRule(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressRule) + out := out.(*IngressRule) + *out = *in + if err := DeepCopy_v1beta1_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_IngressRuleValue(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressRuleValue) + out := out.(*IngressRuleValue) + *out = *in + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPIngressRuleValue) + if err := DeepCopy_v1beta1_HTTPIngressRuleValue(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1beta1_IngressSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressSpec) + out := out.(*IngressSpec) + *out = *in + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = new(IngressBackend) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]IngressTLS, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_IngressTLS(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]IngressRule, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_IngressRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_IngressStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressStatus) + out := out.(*IngressStatus) + *out = *in + if err := api_v1.DeepCopy_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_IngressTLS(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressTLS) + out := out.(*IngressTLS) + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_NetworkPolicy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicy) + out := out.(*NetworkPolicy) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_NetworkPolicySpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_NetworkPolicyIngressRule(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyIngressRule) + out := out.(*NetworkPolicyIngressRule) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]NetworkPolicyPort, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_NetworkPolicyPort(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.From != nil { + in, out := &in.From, &out.From + *out = make([]NetworkPolicyPeer, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_NetworkPolicyPeer(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_NetworkPolicyList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyList) + out := out.(*NetworkPolicyList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NetworkPolicy, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_NetworkPolicy(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_NetworkPolicyPeer(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyPeer) + out := out.(*NetworkPolicyPeer) + *out = *in + if in.PodSelector != nil { + in, out := &in.PodSelector, &out.PodSelector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + return nil + } +} + +func DeepCopy_v1beta1_NetworkPolicyPort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyPort) + out := out.(*NetworkPolicyPort) + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(api_v1.Protocol) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(intstr.IntOrString) + **out = **in + } + return nil + } +} + +func DeepCopy_v1beta1_NetworkPolicySpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicySpec) + out := out.(*NetworkPolicySpec) + *out = *in + if newVal, err := c.DeepCopy(&in.PodSelector); err != nil { + return err + } else { + out.PodSelector = *newVal.(*v1.LabelSelector) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]NetworkPolicyIngressRule, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_NetworkPolicyIngressRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_PodSecurityPolicy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityPolicy) + out := out.(*PodSecurityPolicy) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_PodSecurityPolicySpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_PodSecurityPolicyList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityPolicyList) + out := out.(*PodSecurityPolicyList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodSecurityPolicy, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_PodSecurityPolicy(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_PodSecurityPolicySpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityPolicySpec) + out := out.(*PodSecurityPolicySpec) + *out = *in + if in.DefaultAddCapabilities != nil { + in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities + *out = make([]api_v1.Capability, len(*in)) + copy(*out, *in) + } + if in.RequiredDropCapabilities != nil { + in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities + *out = make([]api_v1.Capability, len(*in)) + copy(*out, *in) + } + if in.AllowedCapabilities != nil { + in, out := &in.AllowedCapabilities, &out.AllowedCapabilities + *out = make([]api_v1.Capability, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]FSType, len(*in)) + copy(*out, *in) + } + if in.HostPorts != nil { + in, out := &in.HostPorts, &out.HostPorts + *out = make([]HostPortRange, len(*in)) + copy(*out, *in) + } + if err := DeepCopy_v1beta1_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_ReplicaSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSet) + out := out.(*ReplicaSet) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_ReplicaSetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_ReplicaSetStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_ReplicaSetCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetCondition) + out := out.(*ReplicaSetCondition) + *out = *in + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1beta1_ReplicaSetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetList) + out := out.(*ReplicaSetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_ReplicaSet(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_ReplicaSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetSpec) + out := out.(*ReplicaSetSpec) + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_ReplicaSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetStatus) + out := out.(*ReplicaSetStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicaSetCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_ReplicaSetCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_ReplicationControllerDummy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerDummy) + out := out.(*ReplicationControllerDummy) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_RollbackConfig(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollbackConfig) + out := out.(*RollbackConfig) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_RollingUpdateDaemonSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollingUpdateDaemonSet) + out := out.(*RollingUpdateDaemonSet) + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + return nil + } +} + +func DeepCopy_v1beta1_RollingUpdateDeployment(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollingUpdateDeployment) + out := out.(*RollingUpdateDeployment) + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } + return nil + } +} + +func DeepCopy_v1beta1_RunAsUserStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RunAsUserStrategyOptions) + out := out.(*RunAsUserStrategyOptions) + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_SELinuxStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SELinuxStrategyOptions) + out := out.(*SELinuxStrategyOptions) + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(api_v1.SELinuxOptions) + **out = **in + } + return nil + } +} + +func DeepCopy_v1beta1_Scale(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Scale) + out := out.(*Scale) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_ScaleStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_ScaleSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleSpec) + out := out.(*ScaleSpec) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_ScaleStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleStatus) + out := out.(*ScaleStatus) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1beta1_SupplementalGroupsStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SupplementalGroupsStrategyOptions) + out := out.(*SupplementalGroupsStrategyOptions) + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_ThirdPartyResource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResource) + out := out.(*ThirdPartyResource) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]APIVersion, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_ThirdPartyResourceData(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResourceData) + out := out.(*ThirdPartyResourceData) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_ThirdPartyResourceDataList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResourceDataList) + out := out.(*ThirdPartyResourceDataList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ThirdPartyResourceData, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_ThirdPartyResourceData(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_ThirdPartyResourceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResourceList) + out := out.(*ThirdPartyResourceList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ThirdPartyResource, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_ThirdPartyResource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.defaults.go new file mode 100644 index 0000000000..770faa513f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.defaults.go @@ -0,0 +1,475 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/client-go/pkg/api/v1" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&DaemonSet{}, func(obj interface{}) { SetObjectDefaults_DaemonSet(obj.(*DaemonSet)) }) + scheme.AddTypeDefaultingFunc(&DaemonSetList{}, func(obj interface{}) { SetObjectDefaults_DaemonSetList(obj.(*DaemonSetList)) }) + scheme.AddTypeDefaultingFunc(&Deployment{}, func(obj interface{}) { SetObjectDefaults_Deployment(obj.(*Deployment)) }) + scheme.AddTypeDefaultingFunc(&DeploymentList{}, func(obj interface{}) { SetObjectDefaults_DeploymentList(obj.(*DeploymentList)) }) + scheme.AddTypeDefaultingFunc(&NetworkPolicy{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicy(obj.(*NetworkPolicy)) }) + scheme.AddTypeDefaultingFunc(&NetworkPolicyList{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicyList(obj.(*NetworkPolicyList)) }) + scheme.AddTypeDefaultingFunc(&ReplicaSet{}, func(obj interface{}) { SetObjectDefaults_ReplicaSet(obj.(*ReplicaSet)) }) + scheme.AddTypeDefaultingFunc(&ReplicaSetList{}, func(obj interface{}) { SetObjectDefaults_ReplicaSetList(obj.(*ReplicaSetList)) }) + return nil +} + +func SetObjectDefaults_DaemonSet(in *DaemonSet) { + SetDefaults_DaemonSet(in) + v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_DaemonSetList(in *DaemonSetList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_DaemonSet(a) + } +} + +func SetObjectDefaults_Deployment(in *Deployment) { + SetDefaults_Deployment(in) + v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_DeploymentList(in *DeploymentList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Deployment(a) + } +} + +func SetObjectDefaults_NetworkPolicy(in *NetworkPolicy) { + SetDefaults_NetworkPolicy(in) +} + +func SetObjectDefaults_NetworkPolicyList(in *NetworkPolicyList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_NetworkPolicy(a) + } +} + +func SetObjectDefaults_ReplicaSet(in *ReplicaSet) { + SetDefaults_ReplicaSet(in) + v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_ReplicaSetList(in *ReplicaSetList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ReplicaSet(a) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/extensions/zz_generated.deepcopy.go new file mode 100644 index 0000000000..93409cb610 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/zz_generated.deepcopy.go @@ -0,0 +1,1059 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package extensions + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" + api "k8s.io/client-go/pkg/api" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_APIVersion, InType: reflect.TypeOf(&APIVersion{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_CustomMetricCurrentStatus, InType: reflect.TypeOf(&CustomMetricCurrentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_CustomMetricCurrentStatusList, InType: reflect.TypeOf(&CustomMetricCurrentStatusList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_CustomMetricTarget, InType: reflect.TypeOf(&CustomMetricTarget{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_CustomMetricTargetList, InType: reflect.TypeOf(&CustomMetricTargetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSet, InType: reflect.TypeOf(&DaemonSet{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetList, InType: reflect.TypeOf(&DaemonSetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetSpec, InType: reflect.TypeOf(&DaemonSetSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetStatus, InType: reflect.TypeOf(&DaemonSetStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetUpdateStrategy, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_Deployment, InType: reflect.TypeOf(&Deployment{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentCondition, InType: reflect.TypeOf(&DeploymentCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentList, InType: reflect.TypeOf(&DeploymentList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentRollback, InType: reflect.TypeOf(&DeploymentRollback{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentSpec, InType: reflect.TypeOf(&DeploymentSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentStatus, InType: reflect.TypeOf(&DeploymentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentStrategy, InType: reflect.TypeOf(&DeploymentStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_FSGroupStrategyOptions, InType: reflect.TypeOf(&FSGroupStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_HTTPIngressPath, InType: reflect.TypeOf(&HTTPIngressPath{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_HTTPIngressRuleValue, InType: reflect.TypeOf(&HTTPIngressRuleValue{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_HostPortRange, InType: reflect.TypeOf(&HostPortRange{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IDRange, InType: reflect.TypeOf(&IDRange{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_Ingress, InType: reflect.TypeOf(&Ingress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressBackend, InType: reflect.TypeOf(&IngressBackend{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressList, InType: reflect.TypeOf(&IngressList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressRule, InType: reflect.TypeOf(&IngressRule{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressRuleValue, InType: reflect.TypeOf(&IngressRuleValue{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressSpec, InType: reflect.TypeOf(&IngressSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressStatus, InType: reflect.TypeOf(&IngressStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressTLS, InType: reflect.TypeOf(&IngressTLS{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicy, InType: reflect.TypeOf(&NetworkPolicy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicyIngressRule, InType: reflect.TypeOf(&NetworkPolicyIngressRule{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicyList, InType: reflect.TypeOf(&NetworkPolicyList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicyPeer, InType: reflect.TypeOf(&NetworkPolicyPeer{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicyPort, InType: reflect.TypeOf(&NetworkPolicyPort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicySpec, InType: reflect.TypeOf(&NetworkPolicySpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_PodSecurityPolicy, InType: reflect.TypeOf(&PodSecurityPolicy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_PodSecurityPolicyList, InType: reflect.TypeOf(&PodSecurityPolicyList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_PodSecurityPolicySpec, InType: reflect.TypeOf(&PodSecurityPolicySpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSet, InType: reflect.TypeOf(&ReplicaSet{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSetCondition, InType: reflect.TypeOf(&ReplicaSetCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSetList, InType: reflect.TypeOf(&ReplicaSetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSetSpec, InType: reflect.TypeOf(&ReplicaSetSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSetStatus, InType: reflect.TypeOf(&ReplicaSetStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicationControllerDummy, InType: reflect.TypeOf(&ReplicationControllerDummy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RollbackConfig, InType: reflect.TypeOf(&RollbackConfig{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RollingUpdateDaemonSet, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RollingUpdateDeployment, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RunAsUserStrategyOptions, InType: reflect.TypeOf(&RunAsUserStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_SELinuxStrategyOptions, InType: reflect.TypeOf(&SELinuxStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_Scale, InType: reflect.TypeOf(&Scale{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_SupplementalGroupsStrategyOptions, InType: reflect.TypeOf(&SupplementalGroupsStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResource, InType: reflect.TypeOf(&ThirdPartyResource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResourceData, InType: reflect.TypeOf(&ThirdPartyResourceData{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResourceDataList, InType: reflect.TypeOf(&ThirdPartyResourceDataList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResourceList, InType: reflect.TypeOf(&ThirdPartyResourceList{})}, + ) +} + +func DeepCopy_extensions_APIVersion(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIVersion) + out := out.(*APIVersion) + *out = *in + return nil + } +} + +func DeepCopy_extensions_CustomMetricCurrentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricCurrentStatus) + out := out.(*CustomMetricCurrentStatus) + *out = *in + out.CurrentValue = in.CurrentValue.DeepCopy() + return nil + } +} + +func DeepCopy_extensions_CustomMetricCurrentStatusList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricCurrentStatusList) + out := out.(*CustomMetricCurrentStatusList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomMetricCurrentStatus, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_CustomMetricCurrentStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_CustomMetricTarget(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricTarget) + out := out.(*CustomMetricTarget) + *out = *in + out.TargetValue = in.TargetValue.DeepCopy() + return nil + } +} + +func DeepCopy_extensions_CustomMetricTargetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricTargetList) + out := out.(*CustomMetricTargetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomMetricTarget, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_CustomMetricTarget(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_DaemonSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSet) + out := out.(*DaemonSet) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_DaemonSetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_DaemonSetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetList) + out := out.(*DaemonSetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DaemonSet, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_DaemonSet(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_DaemonSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetSpec) + out := out.(*DaemonSetSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + if err := DeepCopy_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_DaemonSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetStatus) + out := out.(*DaemonSetStatus) + *out = *in + return nil + } +} + +func DeepCopy_extensions_DaemonSetUpdateStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetUpdateStrategy) + out := out.(*DaemonSetUpdateStrategy) + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDaemonSet) + **out = **in + } + return nil + } +} + +func DeepCopy_extensions_Deployment(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Deployment) + out := out.(*Deployment) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_DeploymentSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_extensions_DeploymentStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_DeploymentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentCondition) + out := out.(*DeploymentCondition) + *out = *in + out.LastUpdateTime = in.LastUpdateTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_extensions_DeploymentList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentList) + out := out.(*DeploymentList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_Deployment(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_DeploymentRollback(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentRollback) + out := out.(*DeploymentRollback) + *out = *in + if in.UpdatedAnnotations != nil { + in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_extensions_DeploymentSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentSpec) + out := out.(*DeploymentSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + if err := DeepCopy_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, c); err != nil { + return err + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.RollbackTo != nil { + in, out := &in.RollbackTo, &out.RollbackTo + *out = new(RollbackConfig) + **out = **in + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_extensions_DeploymentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentStatus) + out := out.(*DeploymentStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_DeploymentCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_DeploymentStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentStrategy) + out := out.(*DeploymentStrategy) + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + **out = **in + } + return nil + } +} + +func DeepCopy_extensions_FSGroupStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FSGroupStrategyOptions) + out := out.(*FSGroupStrategyOptions) + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_extensions_HTTPIngressPath(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPIngressPath) + out := out.(*HTTPIngressPath) + *out = *in + return nil + } +} + +func DeepCopy_extensions_HTTPIngressRuleValue(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPIngressRuleValue) + out := out.(*HTTPIngressRuleValue) + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]HTTPIngressPath, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_extensions_HostPortRange(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HostPortRange) + out := out.(*HostPortRange) + *out = *in + return nil + } +} + +func DeepCopy_extensions_IDRange(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IDRange) + out := out.(*IDRange) + *out = *in + return nil + } +} + +func DeepCopy_extensions_Ingress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Ingress) + out := out.(*Ingress) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_IngressSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_extensions_IngressStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_IngressBackend(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressBackend) + out := out.(*IngressBackend) + *out = *in + return nil + } +} + +func DeepCopy_extensions_IngressList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressList) + out := out.(*IngressList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Ingress, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_Ingress(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_IngressRule(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressRule) + out := out.(*IngressRule) + *out = *in + if err := DeepCopy_extensions_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_IngressRuleValue(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressRuleValue) + out := out.(*IngressRuleValue) + *out = *in + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPIngressRuleValue) + if err := DeepCopy_extensions_HTTPIngressRuleValue(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_extensions_IngressSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressSpec) + out := out.(*IngressSpec) + *out = *in + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = new(IngressBackend) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]IngressTLS, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_IngressTLS(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]IngressRule, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_IngressRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_IngressStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressStatus) + out := out.(*IngressStatus) + *out = *in + if err := api.DeepCopy_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_IngressTLS(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressTLS) + out := out.(*IngressTLS) + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_extensions_NetworkPolicy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicy) + out := out.(*NetworkPolicy) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_NetworkPolicySpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_NetworkPolicyIngressRule(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyIngressRule) + out := out.(*NetworkPolicyIngressRule) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]NetworkPolicyPort, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_NetworkPolicyPort(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.From != nil { + in, out := &in.From, &out.From + *out = make([]NetworkPolicyPeer, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_NetworkPolicyPeer(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_NetworkPolicyList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyList) + out := out.(*NetworkPolicyList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NetworkPolicy, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_NetworkPolicy(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_NetworkPolicyPeer(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyPeer) + out := out.(*NetworkPolicyPeer) + *out = *in + if in.PodSelector != nil { + in, out := &in.PodSelector, &out.PodSelector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + return nil + } +} + +func DeepCopy_extensions_NetworkPolicyPort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyPort) + out := out.(*NetworkPolicyPort) + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(api.Protocol) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(intstr.IntOrString) + **out = **in + } + return nil + } +} + +func DeepCopy_extensions_NetworkPolicySpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicySpec) + out := out.(*NetworkPolicySpec) + *out = *in + if newVal, err := c.DeepCopy(&in.PodSelector); err != nil { + return err + } else { + out.PodSelector = *newVal.(*v1.LabelSelector) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]NetworkPolicyIngressRule, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_NetworkPolicyIngressRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_PodSecurityPolicy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityPolicy) + out := out.(*PodSecurityPolicy) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_PodSecurityPolicySpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_PodSecurityPolicyList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityPolicyList) + out := out.(*PodSecurityPolicyList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodSecurityPolicy, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_PodSecurityPolicy(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_PodSecurityPolicySpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityPolicySpec) + out := out.(*PodSecurityPolicySpec) + *out = *in + if in.DefaultAddCapabilities != nil { + in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities + *out = make([]api.Capability, len(*in)) + copy(*out, *in) + } + if in.RequiredDropCapabilities != nil { + in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities + *out = make([]api.Capability, len(*in)) + copy(*out, *in) + } + if in.AllowedCapabilities != nil { + in, out := &in.AllowedCapabilities, &out.AllowedCapabilities + *out = make([]api.Capability, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]FSType, len(*in)) + copy(*out, *in) + } + if in.HostPorts != nil { + in, out := &in.HostPorts, &out.HostPorts + *out = make([]HostPortRange, len(*in)) + copy(*out, *in) + } + if err := DeepCopy_extensions_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, c); err != nil { + return err + } + if err := DeepCopy_extensions_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, c); err != nil { + return err + } + if err := DeepCopy_extensions_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, c); err != nil { + return err + } + if err := DeepCopy_extensions_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_ReplicaSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSet) + out := out.(*ReplicaSet) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_extensions_ReplicaSetStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_ReplicaSetCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetCondition) + out := out.(*ReplicaSetCondition) + *out = *in + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_extensions_ReplicaSetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetList) + out := out.(*ReplicaSetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_ReplicaSet(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_ReplicaSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetSpec) + out := out.(*ReplicaSetSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_ReplicaSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetStatus) + out := out.(*ReplicaSetStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicaSetCondition, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_ReplicaSetCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_ReplicationControllerDummy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerDummy) + out := out.(*ReplicationControllerDummy) + *out = *in + return nil + } +} + +func DeepCopy_extensions_RollbackConfig(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollbackConfig) + out := out.(*RollbackConfig) + *out = *in + return nil + } +} + +func DeepCopy_extensions_RollingUpdateDaemonSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollingUpdateDaemonSet) + out := out.(*RollingUpdateDaemonSet) + *out = *in + return nil + } +} + +func DeepCopy_extensions_RollingUpdateDeployment(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollingUpdateDeployment) + out := out.(*RollingUpdateDeployment) + *out = *in + return nil + } +} + +func DeepCopy_extensions_RunAsUserStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RunAsUserStrategyOptions) + out := out.(*RunAsUserStrategyOptions) + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_extensions_SELinuxStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SELinuxStrategyOptions) + out := out.(*SELinuxStrategyOptions) + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(api.SELinuxOptions) + **out = **in + } + return nil + } +} + +func DeepCopy_extensions_Scale(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Scale) + out := out.(*Scale) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_ScaleStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_ScaleSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleSpec) + out := out.(*ScaleSpec) + *out = *in + return nil + } +} + +func DeepCopy_extensions_ScaleStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleStatus) + out := out.(*ScaleStatus) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + return nil + } +} + +func DeepCopy_extensions_SupplementalGroupsStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SupplementalGroupsStrategyOptions) + out := out.(*SupplementalGroupsStrategyOptions) + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_extensions_ThirdPartyResource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResource) + out := out.(*ThirdPartyResource) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]APIVersion, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_extensions_ThirdPartyResourceData(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResourceData) + out := out.(*ThirdPartyResourceData) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_extensions_ThirdPartyResourceDataList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResourceDataList) + out := out.(*ThirdPartyResourceDataList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ThirdPartyResourceData, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_ThirdPartyResourceData(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_ThirdPartyResourceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResourceList) + out := out.(*ThirdPartyResourceList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ThirdPartyResource, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_ThirdPartyResource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/OWNERS b/vendor/k8s.io/client-go/pkg/apis/policy/OWNERS new file mode 100755 index 0000000000..8d0eea5163 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/OWNERS @@ -0,0 +1,14 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- deads2k +- caesarxuchao +- sttts +- ncdc +- timothysc +- dims +- mml +- mbohlool +- david-mcmahon +- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/doc.go b/vendor/k8s.io/client-go/pkg/apis/policy/doc.go new file mode 100644 index 0000000000..8feeef8e41 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/install/install.go b/vendor/k8s.io/client-go/pkg/apis/policy/install/install.go new file mode 100644 index 0000000000..6253d5bc29 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/install/install.go @@ -0,0 +1,49 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/policy" + "k8s.io/client-go/pkg/apis/policy/v1beta1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: policy.GroupName, + VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/policy", + AddInternalObjectsToScheme: policy.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/register.go b/vendor/k8s.io/client-go/pkg/apis/policy/register.go new file mode 100644 index 0000000000..5aadc3f1b8 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/register.go @@ -0,0 +1,54 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "policy" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + // TODO this gets cleaned up when the types are fixed + scheme.AddKnownTypes(SchemeGroupVersion, + &PodDisruptionBudget{}, + &PodDisruptionBudgetList{}, + &Eviction{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/types.go b/vendor/k8s.io/client-go/pkg/apis/policy/types.go new file mode 100644 index 0000000000..ef2ff713ad --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/types.go @@ -0,0 +1,113 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. +type PodDisruptionBudgetSpec struct { + // An eviction is allowed if at least "minAvailable" pods selected by + // "selector" will still be available after the eviction, i.e. even in the + // absence of the evicted pod. So for example you can prevent all voluntary + // evictions by specifying "100%". + // +optional + MinAvailable intstr.IntOrString + + // Label query over pods whose evictions are managed by the disruption + // budget. + // +optional + Selector *metav1.LabelSelector +} + +// PodDisruptionBudgetStatus represents information about the status of a +// PodDisruptionBudget. Status may trail the actual state of a system. +type PodDisruptionBudgetStatus struct { + // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other + // status informatio is valid only if observedGeneration equals to PDB's object generation. + // +optional + ObservedGeneration int64 + + // DisruptedPods contains information about pods whose eviction was + // processed by the API server eviction subresource handler but has not + // yet been observed by the PodDisruptionBudget controller. + // A pod will be in this map from the time when the API server processed the + // eviction request to the time when the pod is seen by PDB controller + // as having been marked for deletion (or after a timeout). The key in the map is the name of the pod + // and the value is the time when the API server processed the eviction request. If + // the deletion didn't occur and a pod is still there it will be removed from + // the list automatically by PodDisruptionBudget controller after some time. + // If everything goes smooth this map should be empty for the most of the time. + // Large number of entries in the map may indicate problems with pod deletions. + DisruptedPods map[string]metav1.Time + + // Number of pod disruptions that are currently allowed. + PodDisruptionsAllowed int32 + + // current number of healthy pods + CurrentHealthy int32 + + // minimum desired number of healthy pods + DesiredHealthy int32 + + // total number of pods counted by this disruption budget + ExpectedPods int32 +} + +// +genclient=true + +// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods +type PodDisruptionBudget struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Specification of the desired behavior of the PodDisruptionBudget. + // +optional + Spec PodDisruptionBudgetSpec + // Most recently observed status of the PodDisruptionBudget. + // +optional + Status PodDisruptionBudgetStatus +} + +// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. +type PodDisruptionBudgetList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PodDisruptionBudget +} + +// +genclient=true +// +noMethods=true + +// Eviction evicts a pod from its node subject to certain policies and safety constraints. +// This is a subresource of Pod. A request to cause such an eviction is +// created by POSTing to .../pods//eviction. +type Eviction struct { + metav1.TypeMeta + + // ObjectMeta describes the pod that is being evicted. + // +optional + metav1.ObjectMeta + + // DeleteOptions may be provided + // +optional + DeleteOptions *metav1.DeleteOptions +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/doc.go new file mode 100644 index 0000000000..aadbe14164 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package policy is for any kind of policy object. Suitable examples, even if +// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, +// NetworkPolicy, etc. +package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.pb.go new file mode 100644 index 0000000000..3b68f56fac --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.pb.go @@ -0,0 +1,1375 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/policy/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/policy/v1beta1/generated.proto + + It has these top-level messages: + Eviction + PodDisruptionBudget + PodDisruptionBudgetList + PodDisruptionBudgetSpec + PodDisruptionBudgetStatus +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *Eviction) Reset() { *m = Eviction{} } +func (*Eviction) ProtoMessage() {} +func (*Eviction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } +func (*PodDisruptionBudget) ProtoMessage() {} +func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } +func (*PodDisruptionBudgetList) ProtoMessage() {} +func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } +func (*PodDisruptionBudgetSpec) ProtoMessage() {} +func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } +func (*PodDisruptionBudgetStatus) ProtoMessage() {} +func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{4} +} + +func init() { + proto.RegisterType((*Eviction)(nil), "k8s.io.client-go.pkg.apis.policy.v1beta1.Eviction") + proto.RegisterType((*PodDisruptionBudget)(nil), "k8s.io.client-go.pkg.apis.policy.v1beta1.PodDisruptionBudget") + proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.client-go.pkg.apis.policy.v1beta1.PodDisruptionBudgetList") + proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.client-go.pkg.apis.policy.v1beta1.PodDisruptionBudgetSpec") + proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.client-go.pkg.apis.policy.v1beta1.PodDisruptionBudgetStatus") +} +func (m *Eviction) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Eviction) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.DeleteOptions != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DeleteOptions.Size())) + n2, err := m.DeleteOptions.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *PodDisruptionBudget) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodDisruptionBudget) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n3, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n4, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n5, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *PodDisruptionBudgetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodDisruptionBudgetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodDisruptionBudgetSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodDisruptionBudgetSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.MinAvailable.Size())) + n7, err := m.MinAvailable.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + if m.Selector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n8, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *PodDisruptionBudgetStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodDisruptionBudgetStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) + if len(m.DisruptedPods) > 0 { + for k := range m.DisruptedPods { + data[i] = 0x12 + i++ + v := m.DisruptedPods[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n9, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + } + } + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodDisruptionsAllowed)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentHealthy)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DesiredHealthy)) + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ExpectedPods)) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *Eviction) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DeleteOptions != nil { + l = m.DeleteOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodDisruptionBudget) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodDisruptionBudgetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodDisruptionBudgetSpec) Size() (n int) { + var l int + _ = l + l = m.MinAvailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodDisruptionBudgetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if len(m.DisruptedPods) > 0 { + for k, v := range m.DisruptedPods { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + n += 1 + sovGenerated(uint64(m.PodDisruptionsAllowed)) + n += 1 + sovGenerated(uint64(m.CurrentHealthy)) + n += 1 + sovGenerated(uint64(m.DesiredHealthy)) + n += 1 + sovGenerated(uint64(m.ExpectedPods)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Eviction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Eviction{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudget) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDisruptionBudget{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDisruptionBudgetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, + `MinAvailable:` + strings.Replace(strings.Replace(this.MinAvailable.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetStatus) String() string { + if this == nil { + return "nil" + } + keysForDisruptedPods := make([]string, 0, len(this.DisruptedPods)) + for k := range this.DisruptedPods { + keysForDisruptedPods = append(keysForDisruptedPods, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) + mapStringForDisruptedPods := "map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time{" + for _, k := range keysForDisruptedPods { + mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) + } + mapStringForDisruptedPods += "}" + s := strings.Join([]string{`&PodDisruptionBudgetStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `DisruptedPods:` + mapStringForDisruptedPods + `,`, + `PodDisruptionsAllowed:` + fmt.Sprintf("%v", this.PodDisruptionsAllowed) + `,`, + `CurrentHealthy:` + fmt.Sprintf("%v", this.CurrentHealthy) + `,`, + `DesiredHealthy:` + fmt.Sprintf("%v", this.DesiredHealthy) + `,`, + `ExpectedPods:` + fmt.Sprintf("%v", this.ExpectedPods) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Eviction) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Eviction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Eviction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeleteOptions == nil { + m.DeleteOptions = &k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions{} + } + if err := m.DeleteOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudget) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodDisruptionBudget{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MinAvailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MinAvailable.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DisruptedPods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.DisruptedPods == nil { + m.DisruptedPods = make(map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time) + } + m.DisruptedPods[mapkey] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PodDisruptionsAllowed", wireType) + } + m.PodDisruptionsAllowed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.PodDisruptionsAllowed |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType) + } + m.CurrentHealthy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.CurrentHealthy |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType) + } + m.DesiredHealthy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.DesiredHealthy |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType) + } + m.ExpectedPods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ExpectedPods |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 773 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x94, 0xcb, 0x6e, 0xf3, 0x44, + 0x18, 0x86, 0xe3, 0x26, 0x29, 0x61, 0x9a, 0x54, 0x65, 0xa0, 0x10, 0x22, 0xe1, 0xa2, 0xac, 0x5a, + 0x04, 0x63, 0xda, 0x22, 0x54, 0x58, 0x54, 0xd4, 0xa4, 0x82, 0xa2, 0x56, 0xa9, 0x5c, 0x24, 0x24, + 0x04, 0x12, 0x63, 0xfb, 0xc3, 0x99, 0xc6, 0x27, 0x8d, 0xc7, 0xa1, 0xd9, 0x71, 0x09, 0x2c, 0xb8, + 0xa8, 0x4a, 0x6c, 0xba, 0x44, 0x08, 0x55, 0x34, 0x70, 0x0b, 0xec, 0x91, 0xc7, 0x93, 0x83, 0x9b, + 0x44, 0x0d, 0xea, 0xaf, 0x7f, 0xe7, 0x39, 0x3c, 0xef, 0xfb, 0x9d, 0xc6, 0xe8, 0x93, 0xfe, 0x51, + 0x42, 0x58, 0x64, 0xf4, 0x53, 0x1b, 0x78, 0x08, 0x02, 0x12, 0x23, 0xee, 0x7b, 0x06, 0x8d, 0x59, + 0x62, 0xc4, 0x91, 0xcf, 0x9c, 0xa1, 0x31, 0xd8, 0xb7, 0x41, 0xd0, 0x7d, 0xc3, 0x83, 0x10, 0x38, + 0x15, 0xe0, 0x92, 0x98, 0x47, 0x22, 0xc2, 0x7b, 0x39, 0x4a, 0xa6, 0x28, 0x89, 0xfb, 0x1e, 0xc9, + 0x50, 0x92, 0xa3, 0x44, 0xa1, 0xad, 0x0f, 0x3c, 0x26, 0x7a, 0xa9, 0x4d, 0x9c, 0x28, 0x30, 0xbc, + 0xc8, 0x8b, 0x0c, 0xa9, 0x60, 0xa7, 0x3f, 0xca, 0x95, 0x5c, 0xc8, 0xaf, 0x5c, 0xb9, 0xf5, 0x91, + 0x0a, 0x8a, 0xc6, 0x2c, 0xa0, 0x4e, 0x8f, 0x85, 0xc0, 0x87, 0xd3, 0xb0, 0x02, 0x10, 0xd4, 0x18, + 0xcc, 0xc5, 0xd3, 0x32, 0x96, 0x51, 0x3c, 0x0d, 0x05, 0x0b, 0x60, 0x0e, 0xf8, 0xf8, 0x29, 0x20, + 0x71, 0x7a, 0x10, 0xd0, 0x39, 0xee, 0x70, 0x19, 0x97, 0x0a, 0xe6, 0x1b, 0x2c, 0x14, 0x89, 0xe0, + 0x73, 0xd0, 0x4c, 0x4e, 0x09, 0xf0, 0x01, 0xf0, 0x69, 0x42, 0x70, 0x43, 0x83, 0xd8, 0x87, 0x45, + 0x39, 0xbd, 0xbf, 0xb4, 0x3d, 0x0b, 0x6e, 0xb7, 0xff, 0xd0, 0x50, 0xed, 0x74, 0xc0, 0x1c, 0xc1, + 0xa2, 0x10, 0xff, 0x80, 0x6a, 0x59, 0xa5, 0x5c, 0x2a, 0x68, 0x53, 0x7b, 0x57, 0xdb, 0xdd, 0x38, + 0xf8, 0x90, 0xa8, 0x8e, 0xcd, 0x06, 0x3e, 0xed, 0x59, 0x76, 0x9b, 0x0c, 0xf6, 0x49, 0xd7, 0xbe, + 0x06, 0x47, 0x5c, 0x80, 0xa0, 0x26, 0xbe, 0xbd, 0xdf, 0x29, 0x8d, 0xee, 0x77, 0xd0, 0x74, 0xcf, + 0x9a, 0xa8, 0x62, 0x1f, 0x35, 0x5c, 0xf0, 0x41, 0x40, 0x37, 0xce, 0x1c, 0x93, 0xe6, 0x9a, 0xb4, + 0x39, 0x5c, 0xcd, 0xa6, 0x33, 0x8b, 0x9a, 0xaf, 0x8d, 0xee, 0x77, 0x1a, 0x85, 0x2d, 0xab, 0x28, + 0xde, 0xfe, 0x6d, 0x0d, 0xbd, 0x7e, 0x19, 0xb9, 0x1d, 0x96, 0xf0, 0x54, 0x6e, 0x99, 0xa9, 0xeb, + 0x81, 0x78, 0x09, 0x79, 0xba, 0xa8, 0x92, 0xc4, 0xe0, 0xa8, 0xf4, 0x4c, 0xb2, 0xf2, 0xdc, 0x93, + 0x05, 0xf1, 0x5e, 0xc5, 0xe0, 0x98, 0x75, 0xe5, 0x57, 0xc9, 0x56, 0x96, 0x54, 0xc7, 0x3e, 0x5a, + 0x4f, 0x04, 0x15, 0x69, 0xd2, 0x2c, 0x4b, 0x9f, 0xce, 0x33, 0x7d, 0xa4, 0x96, 0xb9, 0xa9, 0x9c, + 0xd6, 0xf3, 0xb5, 0xa5, 0x3c, 0xda, 0x7f, 0x6a, 0xe8, 0xad, 0x05, 0xd4, 0x39, 0x4b, 0x04, 0xfe, + 0x6e, 0xae, 0xa2, 0x64, 0xb5, 0x8a, 0x66, 0xb4, 0xac, 0xe7, 0x96, 0x72, 0xad, 0x8d, 0x77, 0x66, + 0xaa, 0xe9, 0xa0, 0x2a, 0x13, 0x10, 0x64, 0xd3, 0x52, 0xde, 0xdd, 0x38, 0x38, 0x7e, 0x5e, 0x9a, + 0x66, 0x43, 0x59, 0x55, 0xcf, 0x32, 0x51, 0x2b, 0xd7, 0x6e, 0xff, 0xb3, 0x38, 0xbd, 0xac, 0xdc, + 0xf8, 0x1a, 0xd5, 0x03, 0x16, 0x9e, 0x0c, 0x28, 0xf3, 0xa9, 0xed, 0xc3, 0x93, 0x43, 0x93, 0xbd, + 0x6a, 0x92, 0xbf, 0x6a, 0x72, 0x16, 0x8a, 0x2e, 0xbf, 0x12, 0x9c, 0x85, 0x9e, 0xf9, 0x86, 0x72, + 0xae, 0x5f, 0xcc, 0xa8, 0x59, 0x05, 0x6d, 0xfc, 0x3d, 0xaa, 0x25, 0xe0, 0x83, 0x23, 0x22, 0xfe, + 0xff, 0x5e, 0xc7, 0x39, 0xb5, 0xc1, 0xbf, 0x52, 0xa8, 0x59, 0xcf, 0x6a, 0x39, 0x5e, 0x59, 0x13, + 0xc9, 0xf6, 0xbf, 0x15, 0xf4, 0xf6, 0xd2, 0xde, 0xe3, 0xaf, 0x10, 0x8e, 0x6c, 0xf9, 0xb3, 0x71, + 0xbf, 0xc8, 0xff, 0x14, 0x2c, 0x0a, 0x65, 0xba, 0x65, 0xb3, 0xa5, 0x82, 0xc7, 0xdd, 0xb9, 0x1b, + 0xd6, 0x02, 0x0a, 0xff, 0xaa, 0xa1, 0x86, 0x9b, 0xdb, 0x80, 0x7b, 0x19, 0xb9, 0xe3, 0xf6, 0x7d, + 0xf3, 0x22, 0xa6, 0x94, 0x74, 0x66, 0x95, 0x4f, 0x43, 0xc1, 0x87, 0xe6, 0xb6, 0x0a, 0xb0, 0x51, + 0x38, 0xb3, 0x8a, 0x41, 0xe0, 0x0b, 0x84, 0xdd, 0x89, 0x64, 0x72, 0xe2, 0xfb, 0xd1, 0x4f, 0xe0, + 0xca, 0x07, 0x54, 0x35, 0xdf, 0x51, 0x0a, 0xdb, 0x05, 0xdf, 0xf1, 0x25, 0x6b, 0x01, 0x88, 0x8f, + 0xd1, 0xa6, 0x93, 0x72, 0x0e, 0xa1, 0xf8, 0x12, 0xa8, 0x2f, 0x7a, 0xc3, 0x66, 0x45, 0x4a, 0xbd, + 0xa9, 0xa4, 0x36, 0x3f, 0x2f, 0x9c, 0x5a, 0x8f, 0x6e, 0x67, 0xbc, 0x0b, 0x09, 0xe3, 0xe0, 0x8e, + 0xf9, 0x6a, 0x91, 0xef, 0x14, 0x4e, 0xad, 0x47, 0xb7, 0xf1, 0x11, 0xaa, 0xc3, 0x4d, 0x0c, 0xce, + 0xb8, 0xc6, 0xeb, 0x92, 0x9e, 0x0c, 0xda, 0xe9, 0xcc, 0x99, 0x55, 0xb8, 0xd9, 0xf2, 0x11, 0x9e, + 0x2f, 0x22, 0xde, 0x42, 0xe5, 0x3e, 0x0c, 0x65, 0xcb, 0x5f, 0xb5, 0xb2, 0x4f, 0xfc, 0x19, 0xaa, + 0x0e, 0xa8, 0x9f, 0x82, 0x9a, 0xc6, 0xf7, 0x56, 0x9b, 0xc6, 0xaf, 0x59, 0x00, 0x56, 0x0e, 0x7e, + 0xba, 0x76, 0xa4, 0x99, 0x7b, 0xb7, 0x0f, 0x7a, 0xe9, 0xee, 0x41, 0x2f, 0xfd, 0xfe, 0xa0, 0x97, + 0x7e, 0x1e, 0xe9, 0xda, 0xed, 0x48, 0xd7, 0xee, 0x46, 0xba, 0xf6, 0xd7, 0x48, 0xd7, 0x7e, 0xf9, + 0x5b, 0x2f, 0x7d, 0xfb, 0x8a, 0x6a, 0xfa, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1b, 0xe9, 0x2f, + 0xf1, 0x61, 0x08, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.proto new file mode 100644 index 0000000000..d14d5de30e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.proto @@ -0,0 +1,109 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.policy.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Eviction evicts a pod from its node subject to certain policies and safety constraints. +// This is a subresource of Pod. A request to cause such an eviction is +// created by POSTing to .../pods//evictions. +message Eviction { + // ObjectMeta describes the pod that is being evicted. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // DeleteOptions may be provided + optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; +} + +// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods +message PodDisruptionBudget { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the PodDisruptionBudget. + optional PodDisruptionBudgetSpec spec = 2; + + // Most recently observed status of the PodDisruptionBudget. + optional PodDisruptionBudgetStatus status = 3; +} + +// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. +message PodDisruptionBudgetList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated PodDisruptionBudget items = 2; +} + +// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. +message PodDisruptionBudgetSpec { + // An eviction is allowed if at least "minAvailable" pods selected by + // "selector" will still be available after the eviction, i.e. even in the + // absence of the evicted pod. So for example you can prevent all voluntary + // evictions by specifying "100%". + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; + + // Label query over pods whose evictions are managed by the disruption + // budget. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; +} + +// PodDisruptionBudgetStatus represents information about the status of a +// PodDisruptionBudget. Status may trail the actual state of a system. +message PodDisruptionBudgetStatus { + // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other + // status informatio is valid only if observedGeneration equals to PDB's object generation. + // +optional + optional int64 observedGeneration = 1; + + // DisruptedPods contains information about pods whose eviction was + // processed by the API server eviction subresource handler but has not + // yet been observed by the PodDisruptionBudget controller. + // A pod will be in this map from the time when the API server processed the + // eviction request to the time when the pod is seen by PDB controller + // as having been marked for deletion (or after a timeout). The key in the map is the name of the pod + // and the value is the time when the API server processed the eviction request. If + // the deletion didn't occur and a pod is still there it will be removed from + // the list automatically by PodDisruptionBudget controller after some time. + // If everything goes smooth this map should be empty for the most of the time. + // Large number of entries in the map may indicate problems with pod deletions. + map disruptedPods = 2; + + // Number of pod disruptions that are currently allowed. + optional int32 disruptionsAllowed = 3; + + // current number of healthy pods + optional int32 currentHealthy = 4; + + // minimum desired number of healthy pods + optional int32 desiredHealthy = 5; + + // total number of pods counted by this disruption budget + optional int32 expectedPods = 6; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/register.go new file mode 100644 index 0000000000..52bd65c8ba --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "policy" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PodDisruptionBudget{}, + &PodDisruptionBudgetList{}, + &Eviction{}, + ) + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.generated.go new file mode 100644 index 0000000000..049bfe8e69 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.generated.go @@ -0,0 +1,2203 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg2_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg3_types "k8s.io/apimachinery/pkg/types" + pkg1_intstr "k8s.io/apimachinery/pkg/util/intstr" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg2_v1.LabelSelector + var v1 pkg3_types.UID + var v2 pkg1_intstr.IntOrString + var v3 time.Time + _, _, _, _ = v0, v1, v2, v3 + } +} + +func (x *PodDisruptionBudgetSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = x.Selector != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.MinAvailable + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(yy4) + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minAvailable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.MinAvailable + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(yy6) + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodDisruptionBudgetSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "minAvailable": + if r.TryDecodeAsNil() { + x.MinAvailable = pkg1_intstr.IntOrString{} + } else { + yyv4 := &x.MinAvailable + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) + } else { + z.DecFallback(yyv4, false) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg2_v1.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MinAvailable = pkg1_intstr.IntOrString{} + } else { + yyv9 := &x.MinAvailable + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) + } else { + z.DecFallback(yyv9, false) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg2_v1.LabelSelector) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodDisruptionBudgetStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 5 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.DisruptedPods == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encMapstringv1_Time((map[string]pkg2_v1.Time)(x.DisruptedPods), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("disruptedPods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DisruptedPods == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encMapstringv1_Time((map[string]pkg2_v1.Time)(x.DisruptedPods), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.PodDisruptionsAllowed)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("disruptionsAllowed")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.PodDisruptionsAllowed)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.CurrentHealthy)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentHealthy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.CurrentHealthy)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.DesiredHealthy)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("desiredHealthy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.DesiredHealthy)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.ExpectedPods)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("expectedPods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.ExpectedPods)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodDisruptionBudgetStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "observedGeneration": + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv4 := &x.ObservedGeneration + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } + } + case "disruptedPods": + if r.TryDecodeAsNil() { + x.DisruptedPods = nil + } else { + yyv6 := &x.DisruptedPods + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decMapstringv1_Time((*map[string]pkg2_v1.Time)(yyv6), d) + } + } + case "disruptionsAllowed": + if r.TryDecodeAsNil() { + x.PodDisruptionsAllowed = 0 + } else { + yyv8 := &x.PodDisruptionsAllowed + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "currentHealthy": + if r.TryDecodeAsNil() { + x.CurrentHealthy = 0 + } else { + yyv10 := &x.CurrentHealthy + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "desiredHealthy": + if r.TryDecodeAsNil() { + x.DesiredHealthy = 0 + } else { + yyv12 := &x.DesiredHealthy + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(yyv12)) = int32(r.DecodeInt(32)) + } + } + case "expectedPods": + if r.TryDecodeAsNil() { + x.ExpectedPods = 0 + } else { + yyv14 := &x.ExpectedPods + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv17 := &x.ObservedGeneration + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int64)(yyv17)) = int64(r.DecodeInt(64)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DisruptedPods = nil + } else { + yyv19 := &x.DisruptedPods + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decMapstringv1_Time((*map[string]pkg2_v1.Time)(yyv19), d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodDisruptionsAllowed = 0 + } else { + yyv21 := &x.PodDisruptionsAllowed + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentHealthy = 0 + } else { + yyv23 := &x.CurrentHealthy + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DesiredHealthy = 0 + } else { + yyv25 := &x.DesiredHealthy + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ExpectedPods = 0 + } else { + yyv27 := &x.ExpectedPods + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(yyv27)) = int32(r.DecodeInt(32)) + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj16-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodDisruptionBudget) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodDisruptionBudget) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodDisruptionBudget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PodDisruptionBudgetSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = PodDisruptionBudgetStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodDisruptionBudget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PodDisruptionBudgetSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = PodDisruptionBudgetStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodDisruptionBudgetList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodDisruptionBudgetList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodDisruptionBudgetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodDisruptionBudgetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Eviction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = x.DeleteOptions != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.DeleteOptions == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(x.DeleteOptions) { + } else { + z.EncFallback(x.DeleteOptions) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("deleteOptions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DeleteOptions == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(x.DeleteOptions) { + } else { + z.EncFallback(x.DeleteOptions) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Eviction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Eviction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "deleteOptions": + if r.TryDecodeAsNil() { + if x.DeleteOptions != nil { + x.DeleteOptions = nil + } + } else { + if x.DeleteOptions == nil { + x.DeleteOptions = new(pkg2_v1.DeleteOptions) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.DecExt(x.DeleteOptions) { + } else { + z.DecFallback(x.DeleteOptions, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Eviction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DeleteOptions != nil { + x.DeleteOptions = nil + } + } else { + if x.DeleteOptions == nil { + x.DeleteOptions = new(pkg2_v1.DeleteOptions) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(x.DeleteOptions) { + } else { + z.DecFallback(x.DeleteOptions, false) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encMapstringv1_Time(v map[string]pkg2_v1.Time, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy3 := &yyv1 + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(yy3) { + } else if yym4 { + z.EncBinaryMarshal(yy3) + } else if !yym4 && z.IsJSONHandle() { + z.EncJSONMarshal(yy3) + } else { + z.EncFallback(yy3) + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decMapstringv1_Time(v *map[string]pkg2_v1.Time, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string]pkg2_v1.Time, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 pkg2_v1.Time + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = pkg2_v1.Time{} + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = pkg2_v1.Time{} + } else { + yyv4 := &yymv1 + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if yym5 { + z.DecBinaryUnmarshal(yyv4) + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) + } else { + z.DecFallback(yyv4, false) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv6 := &yymk1 + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = pkg2_v1.Time{} + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = pkg2_v1.Time{} + } else { + yyv8 := &yymv1 + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encSlicePodDisruptionBudget(v []PodDisruptionBudget, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePodDisruptionBudget(v *[]PodDisruptionBudget, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodDisruptionBudget{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PodDisruptionBudget, yyrl1) + } + } else { + yyv1 = make([]PodDisruptionBudget, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodDisruptionBudget{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodDisruptionBudget{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodDisruptionBudget{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodDisruptionBudget{}) // var yyz1 PodDisruptionBudget + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodDisruptionBudget{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodDisruptionBudget{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.go new file mode 100644 index 0000000000..ca5ea37306 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.go @@ -0,0 +1,105 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. +type PodDisruptionBudgetSpec struct { + // An eviction is allowed if at least "minAvailable" pods selected by + // "selector" will still be available after the eviction, i.e. even in the + // absence of the evicted pod. So for example you can prevent all voluntary + // evictions by specifying "100%". + MinAvailable intstr.IntOrString `json:"minAvailable,omitempty" protobuf:"bytes,1,opt,name=minAvailable"` + + // Label query over pods whose evictions are managed by the disruption + // budget. + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` +} + +// PodDisruptionBudgetStatus represents information about the status of a +// PodDisruptionBudget. Status may trail the actual state of a system. +type PodDisruptionBudgetStatus struct { + // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other + // status informatio is valid only if observedGeneration equals to PDB's object generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // DisruptedPods contains information about pods whose eviction was + // processed by the API server eviction subresource handler but has not + // yet been observed by the PodDisruptionBudget controller. + // A pod will be in this map from the time when the API server processed the + // eviction request to the time when the pod is seen by PDB controller + // as having been marked for deletion (or after a timeout). The key in the map is the name of the pod + // and the value is the time when the API server processed the eviction request. If + // the deletion didn't occur and a pod is still there it will be removed from + // the list automatically by PodDisruptionBudget controller after some time. + // If everything goes smooth this map should be empty for the most of the time. + // Large number of entries in the map may indicate problems with pod deletions. + DisruptedPods map[string]metav1.Time `json:"disruptedPods" protobuf:"bytes,2,rep,name=disruptedPods"` + + // Number of pod disruptions that are currently allowed. + PodDisruptionsAllowed int32 `json:"disruptionsAllowed" protobuf:"varint,3,opt,name=disruptionsAllowed"` + + // current number of healthy pods + CurrentHealthy int32 `json:"currentHealthy" protobuf:"varint,4,opt,name=currentHealthy"` + + // minimum desired number of healthy pods + DesiredHealthy int32 `json:"desiredHealthy" protobuf:"varint,5,opt,name=desiredHealthy"` + + // total number of pods counted by this disruption budget + ExpectedPods int32 `json:"expectedPods" protobuf:"varint,6,opt,name=expectedPods"` +} + +// +genclient=true + +// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods +type PodDisruptionBudget struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the PodDisruptionBudget. + Spec PodDisruptionBudgetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Most recently observed status of the PodDisruptionBudget. + Status PodDisruptionBudgetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. +type PodDisruptionBudgetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true +// +noMethods=true + +// Eviction evicts a pod from its node subject to certain policies and safety constraints. +// This is a subresource of Pod. A request to cause such an eviction is +// created by POSTing to .../pods//evictions. +type Eviction struct { + metav1.TypeMeta `json:",inline"` + + // ObjectMeta describes the pod that is being evicted. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // DeleteOptions may be provided + DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..d919856b25 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,82 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Eviction = map[string]string{ + "": "Eviction evicts a pod from its node subject to certain policies and safety constraints. This is a subresource of Pod. A request to cause such an eviction is created by POSTing to .../pods//evictions.", + "metadata": "ObjectMeta describes the pod that is being evicted.", + "deleteOptions": "DeleteOptions may be provided", +} + +func (Eviction) SwaggerDoc() map[string]string { + return map_Eviction +} + +var map_PodDisruptionBudget = map[string]string{ + "": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", + "spec": "Specification of the desired behavior of the PodDisruptionBudget.", + "status": "Most recently observed status of the PodDisruptionBudget.", +} + +func (PodDisruptionBudget) SwaggerDoc() map[string]string { + return map_PodDisruptionBudget +} + +var map_PodDisruptionBudgetList = map[string]string{ + "": "PodDisruptionBudgetList is a collection of PodDisruptionBudgets.", +} + +func (PodDisruptionBudgetList) SwaggerDoc() map[string]string { + return map_PodDisruptionBudgetList +} + +var map_PodDisruptionBudgetSpec = map[string]string{ + "": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", + "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", + "selector": "Label query over pods whose evictions are managed by the disruption budget.", +} + +func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { + return map_PodDisruptionBudgetSpec +} + +var map_PodDisruptionBudgetStatus = map[string]string{ + "": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", + "observedGeneration": "Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other status informatio is valid only if observedGeneration equals to PDB's object generation.", + "disruptedPods": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.", + "disruptionsAllowed": "Number of pod disruptions that are currently allowed.", + "currentHealthy": "current number of healthy pods", + "desiredHealthy": "minimum desired number of healthy pods", + "expectedPods": "total number of pods counted by this disruption budget", +} + +func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string { + return map_PodDisruptionBudgetStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.conversion.go new file mode 100644 index 0000000000..cb4cbb6d65 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.conversion.go @@ -0,0 +1,168 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + policy "k8s.io/client-go/pkg/apis/policy" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_Eviction_To_policy_Eviction, + Convert_policy_Eviction_To_v1beta1_Eviction, + Convert_v1beta1_PodDisruptionBudget_To_policy_PodDisruptionBudget, + Convert_policy_PodDisruptionBudget_To_v1beta1_PodDisruptionBudget, + Convert_v1beta1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList, + Convert_policy_PodDisruptionBudgetList_To_v1beta1_PodDisruptionBudgetList, + Convert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec, + Convert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec, + Convert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus, + Convert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus, + ) +} + +func autoConvert_v1beta1_Eviction_To_policy_Eviction(in *Eviction, out *policy.Eviction, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.DeleteOptions = (*v1.DeleteOptions)(unsafe.Pointer(in.DeleteOptions)) + return nil +} + +func Convert_v1beta1_Eviction_To_policy_Eviction(in *Eviction, out *policy.Eviction, s conversion.Scope) error { + return autoConvert_v1beta1_Eviction_To_policy_Eviction(in, out, s) +} + +func autoConvert_policy_Eviction_To_v1beta1_Eviction(in *policy.Eviction, out *Eviction, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.DeleteOptions = (*v1.DeleteOptions)(unsafe.Pointer(in.DeleteOptions)) + return nil +} + +func Convert_policy_Eviction_To_v1beta1_Eviction(in *policy.Eviction, out *Eviction, s conversion.Scope) error { + return autoConvert_policy_Eviction_To_v1beta1_Eviction(in, out, s) +} + +func autoConvert_v1beta1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *PodDisruptionBudget, out *policy.PodDisruptionBudget, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *PodDisruptionBudget, out *policy.PodDisruptionBudget, s conversion.Scope) error { + return autoConvert_v1beta1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in, out, s) +} + +func autoConvert_policy_PodDisruptionBudget_To_v1beta1_PodDisruptionBudget(in *policy.PodDisruptionBudget, out *PodDisruptionBudget, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_policy_PodDisruptionBudget_To_v1beta1_PodDisruptionBudget(in *policy.PodDisruptionBudget, out *PodDisruptionBudget, s conversion.Scope) error { + return autoConvert_policy_PodDisruptionBudget_To_v1beta1_PodDisruptionBudget(in, out, s) +} + +func autoConvert_v1beta1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in *PodDisruptionBudgetList, out *policy.PodDisruptionBudgetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]policy.PodDisruptionBudget)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in *PodDisruptionBudgetList, out *policy.PodDisruptionBudgetList, s conversion.Scope) error { + return autoConvert_v1beta1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in, out, s) +} + +func autoConvert_policy_PodDisruptionBudgetList_To_v1beta1_PodDisruptionBudgetList(in *policy.PodDisruptionBudgetList, out *PodDisruptionBudgetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]PodDisruptionBudget)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_policy_PodDisruptionBudgetList_To_v1beta1_PodDisruptionBudgetList(in *policy.PodDisruptionBudgetList, out *PodDisruptionBudgetList, s conversion.Scope) error { + return autoConvert_policy_PodDisruptionBudgetList_To_v1beta1_PodDisruptionBudgetList(in, out, s) +} + +func autoConvert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in *PodDisruptionBudgetSpec, out *policy.PodDisruptionBudgetSpec, s conversion.Scope) error { + out.MinAvailable = in.MinAvailable + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + return nil +} + +func Convert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in *PodDisruptionBudgetSpec, out *policy.PodDisruptionBudgetSpec, s conversion.Scope) error { + return autoConvert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in, out, s) +} + +func autoConvert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(in *policy.PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, s conversion.Scope) error { + out.MinAvailable = in.MinAvailable + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + return nil +} + +func Convert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(in *policy.PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, s conversion.Scope) error { + return autoConvert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(in, out, s) +} + +func autoConvert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.DisruptedPods = *(*map[string]v1.Time)(unsafe.Pointer(&in.DisruptedPods)) + out.PodDisruptionsAllowed = in.PodDisruptionsAllowed + out.CurrentHealthy = in.CurrentHealthy + out.DesiredHealthy = in.DesiredHealthy + out.ExpectedPods = in.ExpectedPods + return nil +} + +func Convert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error { + return autoConvert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in, out, s) +} + +func autoConvert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.DisruptedPods = *(*map[string]v1.Time)(unsafe.Pointer(&in.DisruptedPods)) + out.PodDisruptionsAllowed = in.PodDisruptionsAllowed + out.CurrentHealthy = in.CurrentHealthy + out.DesiredHealthy = in.DesiredHealthy + out.ExpectedPods = in.ExpectedPods + return nil +} + +func Convert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, s conversion.Scope) error { + return autoConvert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..395e6689e3 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,137 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Eviction, InType: reflect.TypeOf(&Eviction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodDisruptionBudget, InType: reflect.TypeOf(&PodDisruptionBudget{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodDisruptionBudgetList, InType: reflect.TypeOf(&PodDisruptionBudgetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodDisruptionBudgetSpec, InType: reflect.TypeOf(&PodDisruptionBudgetSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodDisruptionBudgetStatus, InType: reflect.TypeOf(&PodDisruptionBudgetStatus{})}, + ) +} + +func DeepCopy_v1beta1_Eviction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Eviction) + out := out.(*Eviction) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.DeleteOptions != nil { + in, out := &in.DeleteOptions, &out.DeleteOptions + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.DeleteOptions) + } + } + return nil + } +} + +func DeepCopy_v1beta1_PodDisruptionBudget(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodDisruptionBudget) + out := out.(*PodDisruptionBudget) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_PodDisruptionBudgetStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_PodDisruptionBudgetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodDisruptionBudgetList) + out := out.(*PodDisruptionBudgetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodDisruptionBudget, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_PodDisruptionBudget(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_PodDisruptionBudgetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodDisruptionBudgetSpec) + out := out.(*PodDisruptionBudgetSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + return nil + } +} + +func DeepCopy_v1beta1_PodDisruptionBudgetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodDisruptionBudgetStatus) + out := out.(*PodDisruptionBudgetStatus) + *out = *in + if in.DisruptedPods != nil { + in, out := &in.DisruptedPods, &out.DisruptedPods + *out = make(map[string]v1.Time) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/policy/zz_generated.deepcopy.go new file mode 100644 index 0000000000..298ec5fdab --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/zz_generated.deepcopy.go @@ -0,0 +1,137 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package policy + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_policy_Eviction, InType: reflect.TypeOf(&Eviction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_policy_PodDisruptionBudget, InType: reflect.TypeOf(&PodDisruptionBudget{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_policy_PodDisruptionBudgetList, InType: reflect.TypeOf(&PodDisruptionBudgetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_policy_PodDisruptionBudgetSpec, InType: reflect.TypeOf(&PodDisruptionBudgetSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_policy_PodDisruptionBudgetStatus, InType: reflect.TypeOf(&PodDisruptionBudgetStatus{})}, + ) +} + +func DeepCopy_policy_Eviction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Eviction) + out := out.(*Eviction) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.DeleteOptions != nil { + in, out := &in.DeleteOptions, &out.DeleteOptions + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.DeleteOptions) + } + } + return nil + } +} + +func DeepCopy_policy_PodDisruptionBudget(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodDisruptionBudget) + out := out.(*PodDisruptionBudget) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_policy_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_policy_PodDisruptionBudgetStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_policy_PodDisruptionBudgetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodDisruptionBudgetList) + out := out.(*PodDisruptionBudgetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodDisruptionBudget, len(*in)) + for i := range *in { + if err := DeepCopy_policy_PodDisruptionBudget(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_policy_PodDisruptionBudgetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodDisruptionBudgetSpec) + out := out.(*PodDisruptionBudgetSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + return nil + } +} + +func DeepCopy_policy_PodDisruptionBudgetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodDisruptionBudgetStatus) + out := out.(*PodDisruptionBudgetStatus) + *out = *in + if in.DisruptedPods != nil { + in, out := &in.DisruptedPods, &out.DisruptedPods + *out = make(map[string]v1.Time) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/OWNERS b/vendor/k8s.io/client-go/pkg/apis/rbac/OWNERS new file mode 100755 index 0000000000..a35477b920 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/OWNERS @@ -0,0 +1,17 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- deads2k +- sttts +- ncdc +- timothysc +- dims +- krousey +- mml +- mbohlool +- david-mcmahon +- ericchiang +- lixiaobing10051267 +- jianhuiz +- liggitt diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/doc.go b/vendor/k8s.io/client-go/pkg/apis/rbac/doc.go new file mode 100644 index 0000000000..c5f057484c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=rbac.authorization.k8s.io +package rbac diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/helpers.go b/vendor/k8s.io/client-go/pkg/apis/rbac/helpers.go new file mode 100644 index 0000000000..a2ec9e8a53 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/helpers.go @@ -0,0 +1,340 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rbac + +import ( + "fmt" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +func RoleRefGroupKind(roleRef RoleRef) schema.GroupKind { + return schema.GroupKind{Group: roleRef.APIGroup, Kind: roleRef.Kind} +} + +func VerbMatches(rule PolicyRule, requestedVerb string) bool { + for _, ruleVerb := range rule.Verbs { + if ruleVerb == VerbAll { + return true + } + if ruleVerb == requestedVerb { + return true + } + } + + return false +} + +func APIGroupMatches(rule PolicyRule, requestedGroup string) bool { + for _, ruleGroup := range rule.APIGroups { + if ruleGroup == APIGroupAll { + return true + } + if ruleGroup == requestedGroup { + return true + } + } + + return false +} + +func ResourceMatches(rule PolicyRule, requestedResource string) bool { + for _, ruleResource := range rule.Resources { + if ruleResource == ResourceAll { + return true + } + if ruleResource == requestedResource { + return true + } + } + + return false +} + +func ResourceNameMatches(rule PolicyRule, requestedName string) bool { + if len(rule.ResourceNames) == 0 { + return true + } + + for _, ruleName := range rule.ResourceNames { + if ruleName == requestedName { + return true + } + } + + return false +} + +func NonResourceURLMatches(rule PolicyRule, requestedURL string) bool { + for _, ruleURL := range rule.NonResourceURLs { + if ruleURL == NonResourceAll { + return true + } + if ruleURL == requestedURL { + return true + } + if strings.HasSuffix(ruleURL, "*") && strings.HasPrefix(requestedURL, strings.TrimRight(ruleURL, "*")) { + return true + } + } + + return false +} + +// subjectsStrings returns users, groups, serviceaccounts, unknown for display purposes. +func SubjectsStrings(subjects []Subject) ([]string, []string, []string, []string) { + users := []string{} + groups := []string{} + sas := []string{} + others := []string{} + + for _, subject := range subjects { + switch subject.Kind { + case ServiceAccountKind: + sas = append(sas, fmt.Sprintf("%s/%s", subject.Namespace, subject.Name)) + + case UserKind: + users = append(users, subject.Name) + + case GroupKind: + groups = append(groups, subject.Name) + + default: + others = append(others, fmt.Sprintf("%s/%s/%s", subject.Kind, subject.Namespace, subject.Name)) + } + } + + return users, groups, sas, others +} + +// PolicyRuleBuilder let's us attach methods. A no-no for API types. +// We use it to construct rules in code. It's more compact than trying to write them +// out in a literal and allows us to perform some basic checking during construction +type PolicyRuleBuilder struct { + PolicyRule PolicyRule +} + +func NewRule(verbs ...string) *PolicyRuleBuilder { + return &PolicyRuleBuilder{ + PolicyRule: PolicyRule{Verbs: sets.NewString(verbs...).List()}, + } +} + +func (r *PolicyRuleBuilder) Groups(groups ...string) *PolicyRuleBuilder { + r.PolicyRule.APIGroups = combine(r.PolicyRule.APIGroups, groups) + return r +} + +func (r *PolicyRuleBuilder) Resources(resources ...string) *PolicyRuleBuilder { + r.PolicyRule.Resources = combine(r.PolicyRule.Resources, resources) + return r +} + +func (r *PolicyRuleBuilder) Names(names ...string) *PolicyRuleBuilder { + r.PolicyRule.ResourceNames = combine(r.PolicyRule.ResourceNames, names) + return r +} + +func (r *PolicyRuleBuilder) URLs(urls ...string) *PolicyRuleBuilder { + r.PolicyRule.NonResourceURLs = combine(r.PolicyRule.NonResourceURLs, urls) + return r +} + +func (r *PolicyRuleBuilder) RuleOrDie() PolicyRule { + ret, err := r.Rule() + if err != nil { + panic(err) + } + return ret +} + +func combine(s1, s2 []string) []string { + s := sets.NewString(s1...) + s.Insert(s2...) + return s.List() +} + +func (r *PolicyRuleBuilder) Rule() (PolicyRule, error) { + if len(r.PolicyRule.Verbs) == 0 { + return PolicyRule{}, fmt.Errorf("verbs are required: %#v", r.PolicyRule) + } + + switch { + case len(r.PolicyRule.NonResourceURLs) > 0: + if len(r.PolicyRule.APIGroups) != 0 || len(r.PolicyRule.Resources) != 0 || len(r.PolicyRule.ResourceNames) != 0 { + return PolicyRule{}, fmt.Errorf("non-resource rule may not have apiGroups, resources, or resourceNames: %#v", r.PolicyRule) + } + case len(r.PolicyRule.Resources) > 0: + if len(r.PolicyRule.NonResourceURLs) != 0 { + return PolicyRule{}, fmt.Errorf("resource rule may not have nonResourceURLs: %#v", r.PolicyRule) + } + if len(r.PolicyRule.APIGroups) == 0 { + // this a common bug + return PolicyRule{}, fmt.Errorf("resource rule must have apiGroups: %#v", r.PolicyRule) + } + default: + return PolicyRule{}, fmt.Errorf("a rule must have either nonResourceURLs or resources: %#v", r.PolicyRule) + } + + return r.PolicyRule, nil +} + +// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. +// We use it to construct bindings in code. It's more compact than trying to write them +// out in a literal. +type ClusterRoleBindingBuilder struct { + ClusterRoleBinding ClusterRoleBinding +} + +func NewClusterBinding(clusterRoleName string) *ClusterRoleBindingBuilder { + return &ClusterRoleBindingBuilder{ + ClusterRoleBinding: ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: clusterRoleName}, + RoleRef: RoleRef{ + APIGroup: GroupName, + Kind: "ClusterRole", + Name: clusterRoleName, + }, + }, + } +} + +func (r *ClusterRoleBindingBuilder) Groups(groups ...string) *ClusterRoleBindingBuilder { + for _, group := range groups { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: GroupKind, APIGroup: GroupName, Name: group}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) Users(users ...string) *ClusterRoleBindingBuilder { + for _, user := range users { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: UserKind, APIGroup: GroupName, Name: user}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *ClusterRoleBindingBuilder { + for _, saName := range serviceAccountNames { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: ServiceAccountKind, Namespace: namespace, Name: saName}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) BindingOrDie() ClusterRoleBinding { + ret, err := r.Binding() + if err != nil { + panic(err) + } + return ret +} + +func (r *ClusterRoleBindingBuilder) Binding() (ClusterRoleBinding, error) { + if len(r.ClusterRoleBinding.Subjects) == 0 { + return ClusterRoleBinding{}, fmt.Errorf("subjects are required: %#v", r.ClusterRoleBinding) + } + + return r.ClusterRoleBinding, nil +} + +// RoleBindingBuilder let's us attach methods. It is similar to +// ClusterRoleBindingBuilder above. +type RoleBindingBuilder struct { + RoleBinding RoleBinding +} + +// NewRoleBinding creates a RoleBinding builder that can be used +// to define the subjects of a role binding. At least one of +// the `Groups`, `Users` or `SAs` method must be called before +// calling the `Binding*` methods. +func NewRoleBinding(roleName, namespace string) *RoleBindingBuilder { + return &RoleBindingBuilder{ + RoleBinding: RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + Namespace: namespace, + }, + RoleRef: RoleRef{ + APIGroup: GroupName, + Kind: "Role", + Name: roleName, + }, + }, + } +} + +func NewRoleBindingForClusterRole(roleName, namespace string) *RoleBindingBuilder { + return &RoleBindingBuilder{ + RoleBinding: RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + Namespace: namespace, + }, + RoleRef: RoleRef{ + APIGroup: GroupName, + Kind: "ClusterRole", + Name: roleName, + }, + }, + } +} + +// Groups adds the specified groups as the subjects of the RoleBinding. +func (r *RoleBindingBuilder) Groups(groups ...string) *RoleBindingBuilder { + for _, group := range groups { + r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, Subject{Kind: GroupKind, Name: group}) + } + return r +} + +// Users adds the specified users as the subjects of the RoleBinding. +func (r *RoleBindingBuilder) Users(users ...string) *RoleBindingBuilder { + for _, user := range users { + r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, Subject{Kind: UserKind, Name: user}) + } + return r +} + +// SAs adds the specified service accounts as the subjects of the +// RoleBinding. +func (r *RoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *RoleBindingBuilder { + for _, saName := range serviceAccountNames { + r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, Subject{Kind: ServiceAccountKind, Namespace: namespace, Name: saName}) + } + return r +} + +// BindingOrDie calls the binding method and panics if there is an error. +func (r *RoleBindingBuilder) BindingOrDie() RoleBinding { + ret, err := r.Binding() + if err != nil { + panic(err) + } + return ret +} + +// Binding builds and returns the RoleBinding API object from the builder +// object. +func (r *RoleBindingBuilder) Binding() (RoleBinding, error) { + if len(r.RoleBinding.Subjects) == 0 { + return RoleBinding{}, fmt.Errorf("subjects are required: %#v", r.RoleBinding) + } + + return r.RoleBinding, nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/install/install.go b/vendor/k8s.io/client-go/pkg/apis/rbac/install/install.go new file mode 100644 index 0000000000..f92c11e06c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/install/install.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the batch API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/rbac" + "k8s.io/client-go/pkg/apis/rbac/v1alpha1" + "k8s.io/client-go/pkg/apis/rbac/v1beta1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: rbac.GroupName, + VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version, v1alpha1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/rbac", + RootScopedKinds: sets.NewString("ClusterRole", "ClusterRoleBinding"), + AddInternalObjectsToScheme: rbac.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/register.go b/vendor/k8s.io/client-go/pkg/apis/rbac/register.go new file mode 100644 index 0000000000..f4a838bd89 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rbac + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "rbac.authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/types.go b/vendor/k8s.io/client-go/pkg/apis/rbac/types.go new file mode 100644 index 0000000000..ddc2456a02 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/types.go @@ -0,0 +1,188 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rbac + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + + GroupKind = "Group" + ServiceAccountKind = "ServiceAccount" + UserKind = "User" + + // AutoUpdateAnnotationKey is the name of an annotation which prevents reconciliation if set to "false" + AutoUpdateAnnotationKey = "rbac.authorization.kubernetes.io/autoupdate" +) + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +type PolicyRule struct { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + Verbs []string + + // APIGroups is the name of the APIGroup that contains the resources. + // If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. + APIGroups []string + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + Resources []string + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + ResourceNames []string + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // If an action is not a resource API request, then the URL is split on '/' and is checked against the NonResourceURLs to look for a match. + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + NonResourceURLs []string +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +type Subject struct { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + Kind string + // APIGroup holds the API group of the referenced subject. + // Defaults to "" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. + APIGroup string + // Name of the object being referenced. + Name string + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + Namespace string +} + +// RoleRef contains information that points to the role being used +type RoleRef struct { + // APIGroup is the group for the resource being referenced + APIGroup string + // Kind is the type of resource being referenced + Kind string + // Name is the name of resource being referenced + Name string +} + +// +genclient=true + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +type Role struct { + metav1.TypeMeta + // Standard object's metadata. + metav1.ObjectMeta + + // Rules holds all the PolicyRules for this Role + Rules []PolicyRule +} + +// +genclient=true + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +type RoleBinding struct { + metav1.TypeMeta + metav1.ObjectMeta + + // Subjects holds references to the objects the role applies to. + Subjects []Subject + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef +} + +// RoleBindingList is a collection of RoleBindings +type RoleBindingList struct { + metav1.TypeMeta + // Standard object's metadata. + metav1.ListMeta + + // Items is a list of roleBindings + Items []RoleBinding +} + +// RoleList is a collection of Roles +type RoleList struct { + metav1.TypeMeta + // Standard object's metadata. + metav1.ListMeta + + // Items is a list of roles + Items []Role +} + +// +genclient=true +// +nonNamespaced=true + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +type ClusterRole struct { + metav1.TypeMeta + // Standard object's metadata. + metav1.ObjectMeta + + // Rules holds all the PolicyRules for this ClusterRole + Rules []PolicyRule +} + +// +genclient=true +// +nonNamespaced=true + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +type ClusterRoleBinding struct { + metav1.TypeMeta + // Standard object's metadata. + metav1.ObjectMeta + + // Subjects holds references to the objects the role applies to. + Subjects []Subject + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +type ClusterRoleBindingList struct { + metav1.TypeMeta + // Standard object's metadata. + metav1.ListMeta + + // Items is a list of ClusterRoleBindings + Items []ClusterRoleBinding +} + +// ClusterRoleList is a collection of ClusterRoles +type ClusterRoleList struct { + metav1.TypeMeta + // Standard object's metadata. + metav1.ListMeta + + // Items is a list of ClusterRoles + Items []ClusterRole +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/conversion.go new file mode 100644 index 0000000000..22b3c40767 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/conversion.go @@ -0,0 +1,81 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" + api "k8s.io/client-go/pkg/apis/rbac" +) + +// allAuthenticated matches k8s.io/apiserver/pkg/authentication/user.AllAuthenticated, +// but we don't want an client library (which must include types), depending on a server library +const allAuthenticated = "system:authenticated" + +func Convert_v1alpha1_Subject_To_rbac_Subject(in *Subject, out *api.Subject, s conversion.Scope) error { + if err := autoConvert_v1alpha1_Subject_To_rbac_Subject(in, out, s); err != nil { + return err + } + + // specifically set the APIGroup for the three subjects recognized in v1alpha1 + switch { + case in.Kind == ServiceAccountKind: + out.APIGroup = "" + case in.Kind == UserKind: + out.APIGroup = GroupName + case in.Kind == GroupKind: + out.APIGroup = GroupName + default: + // For unrecognized kinds, use the group portion of the APIVersion if we can get it + if gv, err := schema.ParseGroupVersion(in.APIVersion); err == nil { + out.APIGroup = gv.Group + } + } + + // User * in v1alpha1 will only match all authenticated users + // This is only for compatibility with old RBAC bindings + // Special treatment for * should not be included in v1beta1 + if out.Kind == UserKind && out.APIGroup == GroupName && out.Name == "*" { + out.Kind = GroupKind + out.Name = allAuthenticated + } + + return nil +} + +func Convert_rbac_Subject_To_v1alpha1_Subject(in *api.Subject, out *Subject, s conversion.Scope) error { + if err := autoConvert_rbac_Subject_To_v1alpha1_Subject(in, out, s); err != nil { + return err + } + + switch { + case in.Kind == ServiceAccountKind && in.APIGroup == "": + // Make service accounts v1 + out.APIVersion = "v1" + case in.Kind == UserKind && in.APIGroup == GroupName: + // users in the rbac API group get v1alpha + out.APIVersion = SchemeGroupVersion.String() + case in.Kind == GroupKind && in.APIGroup == GroupName: + // groups in the rbac API group get v1alpha + out.APIVersion = SchemeGroupVersion.String() + default: + // otherwise, they get an unspecified version of a group + out.APIVersion = schema.GroupVersion{Group: in.APIGroup}.String() + } + + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/defaults.go new file mode 100644 index 0000000000..49e934916e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/defaults.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_ClusterRoleBinding, + SetDefaults_RoleBinding, + SetDefaults_Subject, + ) +} + +func SetDefaults_ClusterRoleBinding(obj *ClusterRoleBinding) { + if len(obj.RoleRef.APIGroup) == 0 { + obj.RoleRef.APIGroup = GroupName + } +} +func SetDefaults_RoleBinding(obj *RoleBinding) { + if len(obj.RoleRef.APIGroup) == 0 { + obj.RoleRef.APIGroup = GroupName + } +} +func SetDefaults_Subject(obj *Subject) { + if len(obj.APIVersion) == 0 { + switch obj.Kind { + case ServiceAccountKind: + obj.APIVersion = "v1" + case UserKind: + obj.APIVersion = SchemeGroupVersion.String() + case GroupKind: + obj.APIVersion = SchemeGroupVersion.String() + } + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/doc.go new file mode 100644 index 0000000000..f43f7bc084 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=rbac.authorization.k8s.io +package v1alpha1 diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.pb.go new file mode 100644 index 0000000000..406c12c175 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.pb.go @@ -0,0 +1,2817 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto + + It has these top-level messages: + ClusterRole + ClusterRoleBinding + ClusterRoleBindingBuilder + ClusterRoleBindingList + ClusterRoleList + PolicyRule + PolicyRuleBuilder + Role + RoleBinding + RoleBindingList + RoleList + RoleRef + Subject +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *ClusterRoleBindingBuilder) Reset() { *m = ClusterRoleBindingBuilder{} } +func (*ClusterRoleBindingBuilder) ProtoMessage() {} +func (*ClusterRoleBindingBuilder) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *PolicyRuleBuilder) Reset() { *m = PolicyRuleBuilder{} } +func (*PolicyRuleBuilder) ProtoMessage() {} +func (*PolicyRuleBuilder) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func init() { + proto.RegisterType((*ClusterRole)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.ClusterRole") + proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.ClusterRoleBinding") + proto.RegisterType((*ClusterRoleBindingBuilder)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.ClusterRoleBindingBuilder") + proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.ClusterRoleBindingList") + proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.ClusterRoleList") + proto.RegisterType((*PolicyRule)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.PolicyRule") + proto.RegisterType((*PolicyRuleBuilder)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.PolicyRuleBuilder") + proto.RegisterType((*Role)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.Role") + proto.RegisterType((*RoleBinding)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.RoleBinding") + proto.RegisterType((*RoleBindingList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.RoleBindingList") + proto.RegisterType((*RoleList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.RoleList") + proto.RegisterType((*RoleRef)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.RoleRef") + proto.RegisterType((*Subject)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.Subject") +} +func (m *ClusterRole) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRole) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterRoleBinding) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBinding) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n2, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) + n3, err := m.RoleRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *ClusterRoleBindingBuilder) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBindingBuilder) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ClusterRoleBinding.Size())) + n4, err := m.ClusterRoleBinding.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + return i, nil +} + +func (m *ClusterRoleBindingList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBindingList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterRoleList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PolicyRule) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PolicyRule) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + data[i] = 0x32 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *PolicyRuleBuilder) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PolicyRuleBuilder) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PolicyRule.Size())) + n7, err := m.PolicyRule.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + return i, nil +} + +func (m *Role) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Role) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n8, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleBinding) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleBinding) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) + n10, err := m.RoleRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + return i, nil +} + +func (m *RoleBindingList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleBindingList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n11, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n12, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleRef) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleRef) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIGroup))) + i += copy(data[i:], m.APIGroup) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + return i, nil +} + +func (m *Subject) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Subject) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *ClusterRole) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleBinding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterRoleBindingBuilder) Size() (n int) { + var l int + _ = l + l = m.ClusterRoleBinding.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterRoleBindingList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRule) Size() (n int) { + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRuleBuilder) Size() (n int) { + var l int + _ = l + l = m.PolicyRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Role) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBinding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RoleBindingList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleRef) Size() (n int) { + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Subject) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterRole) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRole{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBindingBuilder) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleBindingBuilder{`, + `ClusterRoleBinding:` + strings.Replace(strings.Replace(this.ClusterRoleBinding.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBindingList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRuleBuilder) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PolicyRuleBuilder{`, + `PolicyRule:` + strings.Replace(strings.Replace(this.PolicyRule.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Role) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Role{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleRef{`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Subject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Subject{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterRole) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBinding) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBindingBuilder) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingBuilder: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingBuilder: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleBinding", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClusterRoleBinding.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRole{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRule) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRuleBuilder) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRuleBuilder: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRuleBuilder: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PolicyRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PolicyRule.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBinding) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Role{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleRef) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 847 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x54, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xce, 0xb4, 0x09, 0x4d, 0x5e, 0xa9, 0x4a, 0x07, 0x09, 0x99, 0x1e, 0x9c, 0xca, 0xa7, 0x0a, + 0x16, 0x9b, 0x96, 0x05, 0xf6, 0x00, 0x87, 0x35, 0x07, 0x54, 0xb1, 0x94, 0x6a, 0x56, 0xac, 0xc4, + 0x6a, 0x25, 0x98, 0x38, 0xb3, 0xc9, 0x10, 0xff, 0xd2, 0x8c, 0x1d, 0xb1, 0x42, 0x48, 0x1c, 0x39, + 0xf2, 0x57, 0x70, 0xe4, 0x80, 0xc4, 0x91, 0x13, 0x97, 0x0a, 0x2e, 0x3d, 0xc2, 0x25, 0xa2, 0xe6, + 0x0f, 0x01, 0x79, 0x3c, 0xfe, 0x51, 0x9c, 0xaa, 0x3f, 0x90, 0x22, 0x21, 0x71, 0x4a, 0xfc, 0xde, + 0xf7, 0x7d, 0xf3, 0xbe, 0x79, 0xf6, 0x07, 0xf7, 0x66, 0xf7, 0xa4, 0xcd, 0x23, 0x67, 0x96, 0x8e, + 0x98, 0x08, 0x59, 0xc2, 0xa4, 0x13, 0xcf, 0x26, 0x0e, 0x8d, 0xb9, 0x74, 0xc4, 0x88, 0x7a, 0xce, + 0xfc, 0x80, 0xfa, 0xf1, 0x94, 0x1e, 0x38, 0x13, 0x16, 0x32, 0x41, 0x13, 0x36, 0xb6, 0x63, 0x11, + 0x25, 0x11, 0xde, 0x2f, 0x98, 0x76, 0xcd, 0xb4, 0xe3, 0xd9, 0xc4, 0xce, 0x99, 0x76, 0xce, 0xb4, + 0x4b, 0xe6, 0xee, 0x6b, 0x13, 0x9e, 0x4c, 0xd3, 0x91, 0xed, 0x45, 0x81, 0x33, 0x89, 0x26, 0x91, + 0xa3, 0x04, 0x46, 0xe9, 0x53, 0xf5, 0xa4, 0x1e, 0xd4, 0xbf, 0x42, 0x78, 0xf7, 0xae, 0x1e, 0x89, + 0xc6, 0x3c, 0xa0, 0xde, 0x94, 0x87, 0x4c, 0x3c, 0xab, 0x87, 0x0a, 0x58, 0x42, 0x9d, 0x79, 0x6b, + 0x9c, 0x5d, 0xe7, 0x32, 0x96, 0x48, 0xc3, 0x84, 0x07, 0xac, 0x45, 0x78, 0xeb, 0x2a, 0x82, 0xf4, + 0xa6, 0x2c, 0xa0, 0x2d, 0xde, 0x1b, 0x97, 0xf1, 0xd2, 0x84, 0xfb, 0x0e, 0x0f, 0x13, 0x99, 0x88, + 0x16, 0xa9, 0xe1, 0x49, 0x32, 0x31, 0x67, 0xa2, 0x36, 0xc4, 0xbe, 0xa0, 0x41, 0xec, 0xb3, 0x65, + 0x9e, 0xee, 0x5c, 0xba, 0x9c, 0x25, 0x68, 0xeb, 0x17, 0x04, 0x9b, 0xef, 0xf9, 0xa9, 0x4c, 0x98, + 0x20, 0x91, 0xcf, 0xf0, 0x67, 0xd0, 0xcf, 0x2f, 0x6b, 0x4c, 0x13, 0x6a, 0xa0, 0x3d, 0xb4, 0xbf, + 0x79, 0xf8, 0xba, 0xad, 0x77, 0xd6, 0x9c, 0xbd, 0xde, 0x5a, 0x8e, 0xb6, 0xe7, 0x07, 0xf6, 0x47, + 0xa3, 0xcf, 0x99, 0x97, 0x7c, 0xc8, 0x12, 0xea, 0xe2, 0xd3, 0xc5, 0xb0, 0x93, 0x2d, 0x86, 0x50, + 0xd7, 0x48, 0xa5, 0x8a, 0x3f, 0x81, 0x9e, 0x48, 0x7d, 0x26, 0x8d, 0xb5, 0xbd, 0xf5, 0xfd, 0xcd, + 0xc3, 0xbb, 0xf6, 0x75, 0x5f, 0x09, 0xfb, 0x24, 0xf2, 0xb9, 0xf7, 0x8c, 0xa4, 0x3e, 0x73, 0xb7, + 0xf4, 0x11, 0xbd, 0xfc, 0x49, 0x92, 0x42, 0xd1, 0xfa, 0x71, 0x0d, 0x70, 0xc3, 0x8c, 0xcb, 0xc3, + 0x31, 0x0f, 0x27, 0x2b, 0xf0, 0xf4, 0x29, 0xf4, 0x65, 0xaa, 0x1a, 0xa5, 0xad, 0x83, 0xeb, 0xdb, + 0x7a, 0x58, 0x30, 0xdd, 0x17, 0xf4, 0x11, 0x7d, 0x5d, 0x90, 0xa4, 0x12, 0xc5, 0x4f, 0x60, 0x43, + 0x44, 0x3e, 0x23, 0xec, 0xa9, 0xb1, 0xae, 0x1c, 0xdc, 0x40, 0x9f, 0x14, 0x44, 0x77, 0x5b, 0xeb, + 0x6f, 0xe8, 0x02, 0x29, 0x25, 0xad, 0xef, 0x10, 0xbc, 0xdc, 0xbe, 0x37, 0x37, 0xe5, 0xfe, 0x98, + 0x09, 0xfc, 0x0d, 0x02, 0xec, 0xb5, 0xba, 0xfa, 0x26, 0xdf, 0xb9, 0xfe, 0x1c, 0x4b, 0x4e, 0xd8, + 0xd5, 0x23, 0x2d, 0xd9, 0x1a, 0x59, 0x72, 0xa6, 0xf5, 0x3b, 0x82, 0x97, 0xda, 0xd0, 0x07, 0x5c, + 0x26, 0xf8, 0x49, 0x6b, 0xc9, 0xf6, 0xf5, 0x96, 0x9c, 0xb3, 0xd5, 0x8a, 0xab, 0xfb, 0x2f, 0x2b, + 0x8d, 0x05, 0x53, 0xe8, 0xf1, 0x84, 0x05, 0xe5, 0x76, 0xff, 0x9d, 0xeb, 0xea, 0xe5, 0x3d, 0xca, + 0x25, 0x49, 0xa1, 0x6c, 0xfd, 0x8a, 0x60, 0xbb, 0x01, 0x5e, 0x81, 0xa9, 0xc7, 0x17, 0x4d, 0xbd, + 0x79, 0x3b, 0x53, 0xcb, 0xdd, 0xfc, 0x85, 0x00, 0xea, 0xef, 0x15, 0x0f, 0xa1, 0x37, 0x67, 0x62, + 0x24, 0x0d, 0xb4, 0xb7, 0xbe, 0x3f, 0x70, 0x07, 0x39, 0xfe, 0x51, 0x5e, 0x20, 0x45, 0x1d, 0xbf, + 0x0a, 0x03, 0x1a, 0xf3, 0xf7, 0x45, 0x94, 0xc6, 0xd2, 0x58, 0x57, 0xa0, 0xad, 0x6c, 0x31, 0x1c, + 0xdc, 0x3f, 0x39, 0x2a, 0x8a, 0xa4, 0xee, 0xe7, 0x60, 0xc1, 0x64, 0x94, 0x0a, 0x8f, 0x49, 0xa3, + 0x5b, 0x83, 0x49, 0x59, 0x24, 0x75, 0x1f, 0xbf, 0x0d, 0x5b, 0xe5, 0xc3, 0x31, 0x0d, 0x98, 0x34, + 0x7a, 0x8a, 0xb0, 0x93, 0x2d, 0x86, 0x5b, 0xa4, 0xd9, 0x20, 0x17, 0x71, 0xf8, 0x5d, 0xd8, 0x0e, + 0xa3, 0xb0, 0x84, 0x7c, 0x4c, 0x1e, 0x48, 0xe3, 0x39, 0x45, 0x7d, 0x31, 0x5b, 0x0c, 0xb7, 0x8f, + 0x2f, 0xb6, 0xc8, 0x3f, 0xb1, 0xd6, 0x57, 0xb0, 0xd3, 0x08, 0x2c, 0xfd, 0x2d, 0x4d, 0x01, 0xe2, + 0xaa, 0xa8, 0x57, 0x7a, 0xbb, 0x04, 0xac, 0x02, 0xa9, 0xae, 0x91, 0x86, 0xb6, 0xf5, 0x33, 0x82, + 0xee, 0x7f, 0x3f, 0xd1, 0xbf, 0x5f, 0x83, 0xcd, 0xff, 0xa3, 0xfc, 0x06, 0x51, 0x9e, 0xa7, 0xc8, + 0x6a, 0xa3, 0xf1, 0xf6, 0x29, 0x72, 0x75, 0x26, 0xfe, 0x84, 0xa0, 0xbf, 0xa2, 0x30, 0x7c, 0x78, + 0xd1, 0x86, 0x7d, 0x43, 0x1b, 0xcb, 0xe7, 0xff, 0x12, 0xca, 0x0d, 0xe1, 0x3b, 0xd0, 0x2f, 0x03, + 0x4c, 0x4d, 0x3f, 0xa8, 0xa7, 0x29, 0x33, 0x8e, 0x54, 0x08, 0xbc, 0x07, 0xdd, 0x19, 0x0f, 0xc7, + 0xc6, 0x9a, 0x42, 0x3e, 0xaf, 0x91, 0xdd, 0x0f, 0x78, 0x38, 0x26, 0xaa, 0x93, 0x23, 0x42, 0x1a, + 0x30, 0xf5, 0x0e, 0x35, 0x10, 0x79, 0x74, 0x11, 0xd5, 0xb1, 0x7e, 0x40, 0xb0, 0xa1, 0xdf, 0xbf, + 0x4a, 0x0f, 0x5d, 0xaa, 0x77, 0x08, 0x40, 0x63, 0xfe, 0x88, 0x09, 0xc9, 0xa3, 0x50, 0x9f, 0x5b, + 0x7d, 0x29, 0xf7, 0x4f, 0x8e, 0x74, 0x87, 0x34, 0x50, 0x57, 0xcf, 0x80, 0x1d, 0x18, 0xe4, 0xbf, + 0x32, 0xa6, 0x1e, 0x33, 0xba, 0x0a, 0xb6, 0xa3, 0x61, 0x83, 0xe3, 0xb2, 0x41, 0x6a, 0x8c, 0xfb, + 0xca, 0xe9, 0xb9, 0xd9, 0x39, 0x3b, 0x37, 0x3b, 0xbf, 0x9d, 0x9b, 0x9d, 0xaf, 0x33, 0x13, 0x9d, + 0x66, 0x26, 0x3a, 0xcb, 0x4c, 0xf4, 0x47, 0x66, 0xa2, 0x6f, 0xff, 0x34, 0x3b, 0x8f, 0xfb, 0xe5, + 0xc5, 0xff, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x53, 0x64, 0x1d, 0x0e, 0x87, 0x0c, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.proto new file mode 100644 index 0000000000..5a75fb240b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.proto @@ -0,0 +1,202 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.rbac.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +message ClusterRole { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this ClusterRole + repeated PolicyRule rules = 2; +} + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +message ClusterRoleBinding { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + repeated Subject subjects = 2; + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional RoleRef roleRef = 3; +} + +// +k8s:deepcopy-gen=false +// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. +// We use it to construct bindings in code. It's more compact than trying to write them +// out in a literal. +message ClusterRoleBindingBuilder { + optional ClusterRoleBinding clusterRoleBinding = 1; +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +message ClusterRoleBindingList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoleBindings + repeated ClusterRoleBinding items = 2; +} + +// ClusterRoleList is a collection of ClusterRoles +message ClusterRoleList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoles + repeated ClusterRole items = 2; +} + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +message PolicyRule { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + repeated string verbs = 1; + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + // +optional + repeated string apiGroups = 3; + + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // +optional + repeated string resources = 4; + + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +optional + repeated string resourceNames = 5; + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + // +optional + repeated string nonResourceURLs = 6; +} + +// +k8s:deepcopy-gen=false +// PolicyRuleBuilder let's us attach methods. A no-no for API types. +// We use it to construct rules in code. It's more compact than trying to write them +// out in a literal and allows us to perform some basic checking during construction +message PolicyRuleBuilder { + optional PolicyRule policyRule = 1; +} + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +message Role { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this Role + repeated PolicyRule rules = 2; +} + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +message RoleBinding { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + repeated Subject subjects = 2; + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional RoleRef roleRef = 3; +} + +// RoleBindingList is a collection of RoleBindings +message RoleBindingList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of RoleBindings + repeated RoleBinding items = 2; +} + +// RoleList is a collection of Roles +message RoleList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of Roles + repeated Role items = 2; +} + +// RoleRef contains information that points to the role being used +message RoleRef { + // APIGroup is the group for the resource being referenced + optional string apiGroup = 1; + + // Kind is the type of resource being referenced + optional string kind = 2; + + // Name is the name of resource being referenced + optional string name = 3; +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +message Subject { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + optional string kind = 1; + + // APIVersion holds the API group and version of the referenced subject. + // Defaults to "v1" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io/v1alpha1" for User and Group subjects. + // +k8s:conversion-gen=false + // +optional + optional string apiVersion = 2; + + // Name of the object being referenced. + optional string name = 3; + + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + // +optional + optional string namespace = 4; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/helpers.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/helpers.go new file mode 100644 index 0000000000..f417f3be05 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/helpers.go @@ -0,0 +1,146 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PolicyRuleBuilder let's us attach methods. A no-no for API types. +// We use it to construct rules in code. It's more compact than trying to write them +// out in a literal and allows us to perform some basic checking during construction +type PolicyRuleBuilder struct { + PolicyRule PolicyRule `protobuf:"bytes,1,opt,name=policyRule"` +} + +func NewRule(verbs ...string) *PolicyRuleBuilder { + return &PolicyRuleBuilder{ + PolicyRule: PolicyRule{Verbs: verbs}, + } +} + +func (r *PolicyRuleBuilder) Groups(groups ...string) *PolicyRuleBuilder { + r.PolicyRule.APIGroups = append(r.PolicyRule.APIGroups, groups...) + return r +} + +func (r *PolicyRuleBuilder) Resources(resources ...string) *PolicyRuleBuilder { + r.PolicyRule.Resources = append(r.PolicyRule.Resources, resources...) + return r +} + +func (r *PolicyRuleBuilder) Names(names ...string) *PolicyRuleBuilder { + r.PolicyRule.ResourceNames = append(r.PolicyRule.ResourceNames, names...) + return r +} + +func (r *PolicyRuleBuilder) URLs(urls ...string) *PolicyRuleBuilder { + r.PolicyRule.NonResourceURLs = append(r.PolicyRule.NonResourceURLs, urls...) + return r +} + +func (r *PolicyRuleBuilder) RuleOrDie() PolicyRule { + ret, err := r.Rule() + if err != nil { + panic(err) + } + return ret +} + +func (r *PolicyRuleBuilder) Rule() (PolicyRule, error) { + if len(r.PolicyRule.Verbs) == 0 { + return PolicyRule{}, fmt.Errorf("verbs are required: %#v", r.PolicyRule) + } + + switch { + case len(r.PolicyRule.NonResourceURLs) > 0: + if len(r.PolicyRule.APIGroups) != 0 || len(r.PolicyRule.Resources) != 0 || len(r.PolicyRule.ResourceNames) != 0 { + return PolicyRule{}, fmt.Errorf("non-resource rule may not have apiGroups, resources, or resourceNames: %#v", r.PolicyRule) + } + case len(r.PolicyRule.Resources) > 0: + if len(r.PolicyRule.NonResourceURLs) != 0 { + return PolicyRule{}, fmt.Errorf("resource rule may not have nonResourceURLs: %#v", r.PolicyRule) + } + if len(r.PolicyRule.APIGroups) == 0 { + // this a common bug + return PolicyRule{}, fmt.Errorf("resource rule must have apiGroups: %#v", r.PolicyRule) + } + default: + return PolicyRule{}, fmt.Errorf("a rule must have either nonResourceURLs or resources: %#v", r.PolicyRule) + } + + return r.PolicyRule, nil +} + +// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. +// We use it to construct bindings in code. It's more compact than trying to write them +// out in a literal. +type ClusterRoleBindingBuilder struct { + ClusterRoleBinding ClusterRoleBinding `protobuf:"bytes,1,opt,name=clusterRoleBinding"` +} + +func NewClusterBinding(clusterRoleName string) *ClusterRoleBindingBuilder { + return &ClusterRoleBindingBuilder{ + ClusterRoleBinding: ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: clusterRoleName}, + RoleRef: RoleRef{ + APIGroup: GroupName, + Kind: "ClusterRole", + Name: clusterRoleName, + }, + }, + } +} + +func (r *ClusterRoleBindingBuilder) Groups(groups ...string) *ClusterRoleBindingBuilder { + for _, group := range groups { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: GroupKind, Name: group}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) Users(users ...string) *ClusterRoleBindingBuilder { + for _, user := range users { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: UserKind, Name: user}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *ClusterRoleBindingBuilder { + for _, saName := range serviceAccountNames { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: ServiceAccountKind, Namespace: namespace, Name: saName}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) BindingOrDie() ClusterRoleBinding { + ret, err := r.Binding() + if err != nil { + panic(err) + } + return ret +} + +func (r *ClusterRoleBindingBuilder) Binding() (ClusterRoleBinding, error) { + if len(r.ClusterRoleBinding.Subjects) == 0 { + return ClusterRoleBinding{}, fmt.Errorf("subjects are required: %#v", r.ClusterRoleBinding) + } + + return r.ClusterRoleBinding, nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/register.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/register.go new file mode 100644 index 0000000000..3977e99c51 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "rbac.authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.generated.go new file mode 100644 index 0000000000..64ba53a9be --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.generated.go @@ -0,0 +1,4879 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1alpha1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 + } +} + +func (x *PolicyRule) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.APIGroups) != 0 + yyq2[2] = len(x.Resources) != 0 + yyq2[3] = len(x.ResourceNames) != 0 + yyq2[4] = len(x.NonResourceURLs) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Verbs == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.Verbs, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("verbs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Verbs == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.Verbs, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.APIGroups == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncSliceStringV(x.APIGroups, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiGroups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.APIGroups == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncSliceStringV(x.APIGroups, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Resources == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceStringV(x.Resources, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resources")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Resources == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceStringV(x.Resources, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.ResourceNames == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncSliceStringV(x.ResourceNames, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceNames")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceNames == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncSliceStringV(x.ResourceNames, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.NonResourceURLs == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncSliceStringV(x.NonResourceURLs, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nonResourceURLs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NonResourceURLs == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncSliceStringV(x.NonResourceURLs, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PolicyRule) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PolicyRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "verbs": + if r.TryDecodeAsNil() { + x.Verbs = nil + } else { + yyv4 := &x.Verbs + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "apiGroups": + if r.TryDecodeAsNil() { + x.APIGroups = nil + } else { + yyv6 := &x.APIGroups + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + case "resources": + if r.TryDecodeAsNil() { + x.Resources = nil + } else { + yyv8 := &x.Resources + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceStringX(yyv8, false, d) + } + } + case "resourceNames": + if r.TryDecodeAsNil() { + x.ResourceNames = nil + } else { + yyv10 := &x.ResourceNames + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + z.F.DecSliceStringX(yyv10, false, d) + } + } + case "nonResourceURLs": + if r.TryDecodeAsNil() { + x.NonResourceURLs = nil + } else { + yyv12 := &x.NonResourceURLs + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + z.F.DecSliceStringX(yyv12, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PolicyRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Verbs = nil + } else { + yyv15 := &x.Verbs + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + z.F.DecSliceStringX(yyv15, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIGroups = nil + } else { + yyv17 := &x.APIGroups + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + z.F.DecSliceStringX(yyv17, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resources = nil + } else { + yyv19 := &x.Resources + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + z.F.DecSliceStringX(yyv19, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ResourceNames = nil + } else { + yyv21 := &x.ResourceNames + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + z.F.DecSliceStringX(yyv21, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NonResourceURLs = nil + } else { + yyv23 := &x.NonResourceURLs + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + z.F.DecSliceStringX(yyv23, false, d) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Subject) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.APIVersion != "" + yyq2[3] = x.Namespace != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Subject) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Subject) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv10 := &x.Namespace + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Subject) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv17 := &x.Name + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv19 := &x.Namespace + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleRef) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiGroup")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleRef) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleRef) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "apiGroup": + if r.TryDecodeAsNil() { + x.APIGroup = "" + } else { + yyv4 := &x.APIGroup + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv6 := &x.Kind + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleRef) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIGroup = "" + } else { + yyv11 := &x.APIGroup + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv15 := &x.Name + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Role) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rules")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Role) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Role) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "rules": + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv10 := &x.Rules + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Role) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv19 := &x.Rules + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subjects")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy18 := &x.RoleRef + yy18.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("roleRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy20 := &x.RoleRef + yy20.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "subjects": + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv10 := &x.Subjects + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv10), d) + } + } + case "roleRef": + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv12 := &x.RoleRef + yyv12.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv14 := &x.Kind + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv16 := &x.APIVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv18 := &x.ObjectMeta + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else { + z.DecFallback(yyv18, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv20 := &x.Subjects + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv20), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv22 := &x.RoleRef + yyv22.CodecDecodeSelf(d) + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceRoleBinding((*[]RoleBinding)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceRoleBinding((*[]RoleBinding)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceRole(([]Role)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceRole(([]Role)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceRole((*[]Role)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceRole((*[]Role)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRole) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rules")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRole) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRole) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "rules": + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv10 := &x.Rules + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRole) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv19 := &x.Rules + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subjects")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy18 := &x.RoleRef + yy18.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("roleRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy20 := &x.RoleRef + yy20.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "subjects": + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv10 := &x.Subjects + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv10), d) + } + } + case "roleRef": + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv12 := &x.RoleRef + yyv12.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv14 := &x.Kind + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv16 := &x.APIVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv18 := &x.ObjectMeta + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else { + z.DecFallback(yyv18, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv20 := &x.Subjects + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv20), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv22 := &x.RoleRef + yyv22.CodecDecodeSelf(d) + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRoleList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceClusterRole(([]ClusterRole)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceClusterRole(([]ClusterRole)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRoleList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceClusterRole((*[]ClusterRole)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceClusterRole((*[]ClusterRole)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSlicePolicyRule(v []PolicyRule, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePolicyRule(v *[]PolicyRule, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PolicyRule{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 120) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PolicyRule, yyrl1) + } + } else { + yyv1 = make([]PolicyRule, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PolicyRule{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PolicyRule{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PolicyRule{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PolicyRule{}) // var yyz1 PolicyRule + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PolicyRule{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PolicyRule{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceSubject(v []Subject, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceSubject(v *[]Subject, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Subject{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Subject, yyrl1) + } + } else { + yyv1 = make([]Subject, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Subject{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Subject{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Subject{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Subject{}) // var yyz1 Subject + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Subject{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Subject{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceRoleBinding(v []RoleBinding, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceRoleBinding(v *[]RoleBinding, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []RoleBinding{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]RoleBinding, yyrl1) + } + } else { + yyv1 = make([]RoleBinding, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = RoleBinding{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, RoleBinding{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = RoleBinding{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, RoleBinding{}) // var yyz1 RoleBinding + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = RoleBinding{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []RoleBinding{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceRole(v []Role, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceRole(v *[]Role, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Role{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Role, yyrl1) + } + } else { + yyv1 = make([]Role, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Role{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Role{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Role{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Role{}) // var yyz1 Role + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Role{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Role{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceClusterRoleBinding(v []ClusterRoleBinding, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceClusterRoleBinding(v *[]ClusterRoleBinding, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ClusterRoleBinding{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ClusterRoleBinding, yyrl1) + } + } else { + yyv1 = make([]ClusterRoleBinding, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRoleBinding{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ClusterRoleBinding{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRoleBinding{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ClusterRoleBinding{}) // var yyz1 ClusterRoleBinding + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRoleBinding{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ClusterRoleBinding{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceClusterRole(v []ClusterRole, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceClusterRole(v *[]ClusterRole, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ClusterRole{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ClusterRole, yyrl1) + } + } else { + yyv1 = make([]ClusterRole, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRole{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ClusterRole{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRole{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ClusterRole{}) // var yyz1 ClusterRole + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRole{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ClusterRole{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.go new file mode 100644 index 0000000000..e9f8efb3b4 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.go @@ -0,0 +1,209 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + + GroupKind = "Group" + ServiceAccountKind = "ServiceAccount" + UserKind = "User" + + // AutoUpdateAnnotationKey is the name of an annotation which prevents reconciliation if set to "false" + AutoUpdateAnnotationKey = "rbac.authorization.kubernetes.io/autoupdate" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +type PolicyRule struct { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + // +optional + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,3,rep,name=apiGroups"` + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // +optional + Resources []string `json:"resources,omitempty" protobuf:"bytes,4,rep,name=resources"` + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"` + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + // +optional + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,6,rep,name=nonResourceURLs"` +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +type Subject struct { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // APIVersion holds the API group and version of the referenced subject. + // Defaults to "v1" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io/v1alpha1" for User and Group subjects. + // +k8s:conversion-gen=false + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt.name=apiVersion"` + // Name of the object being referenced. + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` +} + +// RoleRef contains information that points to the role being used +type RoleRef struct { + // APIGroup is the group for the resource being referenced + APIGroup string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"` + // Kind is the type of resource being referenced + Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` + // Name is the name of resource being referenced + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` +} + +// +genclient=true + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +type Role struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this Role + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// +genclient=true + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +type RoleBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// RoleBindingList is a collection of RoleBindings +type RoleBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of RoleBindings + Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// RoleList is a collection of Roles +type RoleList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of Roles + Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true +// +nonNamespaced=true + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +type ClusterRole struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this ClusterRole + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// +genclient=true +// +nonNamespaced=true + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +type ClusterRoleBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +type ClusterRoleBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoleBindings + Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ClusterRoleList is a collection of ClusterRoles +type ClusterRoleList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoles + Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..d58a722af1 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,148 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ClusterRole = map[string]string{ + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", +} + +func (ClusterRole) SwaggerDoc() map[string]string { + return map_ClusterRole +} + +var map_ClusterRoleBinding = map[string]string{ + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (ClusterRoleBinding) SwaggerDoc() map[string]string { + return map_ClusterRoleBinding +} + +var map_ClusterRoleBindingList = map[string]string{ + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoleBindings", +} + +func (ClusterRoleBindingList) SwaggerDoc() map[string]string { + return map_ClusterRoleBindingList +} + +var map_ClusterRoleList = map[string]string{ + "": "ClusterRoleList is a collection of ClusterRoles", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoles", +} + +func (ClusterRoleList) SwaggerDoc() map[string]string { + return map_ClusterRoleList +} + +var map_PolicyRule = map[string]string{ + "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", + "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", +} + +func (PolicyRule) SwaggerDoc() map[string]string { + return map_PolicyRule +} + +var map_Role = map[string]string{ + "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this Role", +} + +func (Role) SwaggerDoc() map[string]string { + return map_Role +} + +var map_RoleBinding = map[string]string{ + "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (RoleBinding) SwaggerDoc() map[string]string { + return map_RoleBinding +} + +var map_RoleBindingList = map[string]string{ + "": "RoleBindingList is a collection of RoleBindings", + "metadata": "Standard object's metadata.", + "items": "Items is a list of RoleBindings", +} + +func (RoleBindingList) SwaggerDoc() map[string]string { + return map_RoleBindingList +} + +var map_RoleList = map[string]string{ + "": "RoleList is a collection of Roles", + "metadata": "Standard object's metadata.", + "items": "Items is a list of Roles", +} + +func (RoleList) SwaggerDoc() map[string]string { + return map_RoleList +} + +var map_RoleRef = map[string]string{ + "": "RoleRef contains information that points to the role being used", + "apiGroup": "APIGroup is the group for the resource being referenced", + "kind": "Kind is the type of resource being referenced", + "name": "Name is the name of resource being referenced", +} + +func (RoleRef) SwaggerDoc() map[string]string { + return map_RoleRef +} + +var map_Subject = map[string]string{ + "": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", + "kind": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", + "apiVersion": "APIVersion holds the API group and version of the referenced subject. Defaults to \"v1\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io/v1alpha1\" for User and Group subjects.", + "name": "Name of the object being referenced.", + "namespace": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go new file mode 100644 index 0000000000..b90d2195ed --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,425 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1alpha1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + rbac "k8s.io/client-go/pkg/apis/rbac" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole, + Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole, + Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding, + Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding, + Convert_v1alpha1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder, + Convert_rbac_ClusterRoleBindingBuilder_To_v1alpha1_ClusterRoleBindingBuilder, + Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList, + Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList, + Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList, + Convert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList, + Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule, + Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule, + Convert_v1alpha1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder, + Convert_rbac_PolicyRuleBuilder_To_v1alpha1_PolicyRuleBuilder, + Convert_v1alpha1_Role_To_rbac_Role, + Convert_rbac_Role_To_v1alpha1_Role, + Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding, + Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding, + Convert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList, + Convert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList, + Convert_v1alpha1_RoleList_To_rbac_RoleList, + Convert_rbac_RoleList_To_v1alpha1_RoleList, + Convert_v1alpha1_RoleRef_To_rbac_RoleRef, + Convert_rbac_RoleRef_To_v1alpha1_RoleRef, + Convert_v1alpha1_Subject_To_rbac_Subject, + Convert_rbac_Subject_To_v1alpha1_Subject, + ) +} + +func autoConvert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules)) + return nil +} + +func Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in, out, s) +} + +func autoConvert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) + return nil +} + +func Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { + return autoConvert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in, out, s) +} + +func autoConvert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]rbac.Subject, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Subject_To_rbac_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + if err := Convert_v1alpha1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in, out, s) +} + +func autoConvert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + for i := range *in { + if err := Convert_rbac_Subject_To_v1alpha1_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + if err := Convert_rbac_RoleRef_To_v1alpha1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in, out, s) +} + +func autoConvert_v1alpha1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in *ClusterRoleBindingBuilder, out *rbac.ClusterRoleBindingBuilder, s conversion.Scope) error { + if err := Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(&in.ClusterRoleBinding, &out.ClusterRoleBinding, s); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in *ClusterRoleBindingBuilder, out *rbac.ClusterRoleBindingBuilder, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in, out, s) +} + +func autoConvert_rbac_ClusterRoleBindingBuilder_To_v1alpha1_ClusterRoleBindingBuilder(in *rbac.ClusterRoleBindingBuilder, out *ClusterRoleBindingBuilder, s conversion.Scope) error { + if err := Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(&in.ClusterRoleBinding, &out.ClusterRoleBinding, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_ClusterRoleBindingBuilder_To_v1alpha1_ClusterRoleBindingBuilder(in *rbac.ClusterRoleBindingBuilder, out *ClusterRoleBindingBuilder, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBindingBuilder_To_v1alpha1_ClusterRoleBindingBuilder(in, out, s) +} + +func autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]rbac.ClusterRoleBinding, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in, out, s) +} + +func autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + if err := Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in, out, s) +} + +func autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]rbac.ClusterRole)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in, out, s) +} + +func autoConvert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]ClusterRole)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in, out, s) +} + +func autoConvert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { + out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) + out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) + out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) + return nil +} + +func Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { + return autoConvert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in, out, s) +} + +func autoConvert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { + out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) + out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) + out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) + return nil +} + +func Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { + return autoConvert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in, out, s) +} + +func autoConvert_v1alpha1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in *PolicyRuleBuilder, out *rbac.PolicyRuleBuilder, s conversion.Scope) error { + if err := Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(&in.PolicyRule, &out.PolicyRule, s); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in *PolicyRuleBuilder, out *rbac.PolicyRuleBuilder, s conversion.Scope) error { + return autoConvert_v1alpha1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in, out, s) +} + +func autoConvert_rbac_PolicyRuleBuilder_To_v1alpha1_PolicyRuleBuilder(in *rbac.PolicyRuleBuilder, out *PolicyRuleBuilder, s conversion.Scope) error { + if err := Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(&in.PolicyRule, &out.PolicyRule, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_PolicyRuleBuilder_To_v1alpha1_PolicyRuleBuilder(in *rbac.PolicyRuleBuilder, out *PolicyRuleBuilder, s conversion.Scope) error { + return autoConvert_rbac_PolicyRuleBuilder_To_v1alpha1_PolicyRuleBuilder(in, out, s) +} + +func autoConvert_v1alpha1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules)) + return nil +} + +func Convert_v1alpha1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { + return autoConvert_v1alpha1_Role_To_rbac_Role(in, out, s) +} + +func autoConvert_rbac_Role_To_v1alpha1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) + return nil +} + +func Convert_rbac_Role_To_v1alpha1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { + return autoConvert_rbac_Role_To_v1alpha1_Role(in, out, s) +} + +func autoConvert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]rbac.Subject, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Subject_To_rbac_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + if err := Convert_v1alpha1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { + return autoConvert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in, out, s) +} + +func autoConvert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + for i := range *in { + if err := Convert_rbac_Subject_To_v1alpha1_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + if err := Convert_rbac_RoleRef_To_v1alpha1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { + return autoConvert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in, out, s) +} + +func autoConvert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]rbac.RoleBinding, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { + return autoConvert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in, out, s) +} + +func autoConvert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + if err := Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { + return autoConvert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in, out, s) +} + +func autoConvert_v1alpha1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]rbac.Role)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1alpha1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { + return autoConvert_v1alpha1_RoleList_To_rbac_RoleList(in, out, s) +} + +func autoConvert_rbac_RoleList_To_v1alpha1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Role)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_rbac_RoleList_To_v1alpha1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { + return autoConvert_rbac_RoleList_To_v1alpha1_RoleList(in, out, s) +} + +func autoConvert_v1alpha1_RoleRef_To_rbac_RoleRef(in *RoleRef, out *rbac.RoleRef, s conversion.Scope) error { + out.APIGroup = in.APIGroup + out.Kind = in.Kind + out.Name = in.Name + return nil +} + +func Convert_v1alpha1_RoleRef_To_rbac_RoleRef(in *RoleRef, out *rbac.RoleRef, s conversion.Scope) error { + return autoConvert_v1alpha1_RoleRef_To_rbac_RoleRef(in, out, s) +} + +func autoConvert_rbac_RoleRef_To_v1alpha1_RoleRef(in *rbac.RoleRef, out *RoleRef, s conversion.Scope) error { + out.APIGroup = in.APIGroup + out.Kind = in.Kind + out.Name = in.Name + return nil +} + +func Convert_rbac_RoleRef_To_v1alpha1_RoleRef(in *rbac.RoleRef, out *RoleRef, s conversion.Scope) error { + return autoConvert_rbac_RoleRef_To_v1alpha1_RoleRef(in, out, s) +} + +func autoConvert_v1alpha1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error { + out.Kind = in.Kind + // INFO: in.APIVersion opted out of conversion generation + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +func autoConvert_rbac_Subject_To_v1alpha1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error { + out.Kind = in.Kind + // WARNING: in.APIGroup requires manual conversion: does not exist in peer-type + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..f4dfd3ca7c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,258 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ClusterRole, InType: reflect.TypeOf(&ClusterRole{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ClusterRoleBinding, InType: reflect.TypeOf(&ClusterRoleBinding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ClusterRoleBindingList, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ClusterRoleList, InType: reflect.TypeOf(&ClusterRoleList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_PolicyRule, InType: reflect.TypeOf(&PolicyRule{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_Role, InType: reflect.TypeOf(&Role{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_RoleBinding, InType: reflect.TypeOf(&RoleBinding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_RoleBindingList, InType: reflect.TypeOf(&RoleBindingList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_RoleList, InType: reflect.TypeOf(&RoleList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_RoleRef, InType: reflect.TypeOf(&RoleRef{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_Subject, InType: reflect.TypeOf(&Subject{})}, + ) +} + +func DeepCopy_v1alpha1_ClusterRole(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRole) + out := out.(*ClusterRole) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + if err := DeepCopy_v1alpha1_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1alpha1_ClusterRoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleBinding) + out := out.(*ClusterRoleBinding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1alpha1_ClusterRoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleBindingList) + out := out.(*ClusterRoleBindingList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + if err := DeepCopy_v1alpha1_ClusterRoleBinding(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1alpha1_ClusterRoleList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleList) + out := out.(*ClusterRoleList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRole, len(*in)) + for i := range *in { + if err := DeepCopy_v1alpha1_ClusterRole(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1alpha1_PolicyRule(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PolicyRule) + out := out.(*PolicyRule) + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1alpha1_Role(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Role) + out := out.(*Role) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + if err := DeepCopy_v1alpha1_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1alpha1_RoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleBinding) + out := out.(*RoleBinding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1alpha1_RoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleBindingList) + out := out.(*RoleBindingList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + if err := DeepCopy_v1alpha1_RoleBinding(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1alpha1_RoleList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleList) + out := out.(*RoleList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + if err := DeepCopy_v1alpha1_Role(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1alpha1_RoleRef(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleRef) + out := out.(*RoleRef) + *out = *in + return nil + } +} + +func DeepCopy_v1alpha1_Subject(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Subject) + out := out.(*Subject) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go new file mode 100644 index 0000000000..1a5749be30 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,66 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&ClusterRoleBinding{}, func(obj interface{}) { SetObjectDefaults_ClusterRoleBinding(obj.(*ClusterRoleBinding)) }) + scheme.AddTypeDefaultingFunc(&ClusterRoleBindingList{}, func(obj interface{}) { SetObjectDefaults_ClusterRoleBindingList(obj.(*ClusterRoleBindingList)) }) + scheme.AddTypeDefaultingFunc(&RoleBinding{}, func(obj interface{}) { SetObjectDefaults_RoleBinding(obj.(*RoleBinding)) }) + scheme.AddTypeDefaultingFunc(&RoleBindingList{}, func(obj interface{}) { SetObjectDefaults_RoleBindingList(obj.(*RoleBindingList)) }) + return nil +} + +func SetObjectDefaults_ClusterRoleBinding(in *ClusterRoleBinding) { + SetDefaults_ClusterRoleBinding(in) + for i := range in.Subjects { + a := &in.Subjects[i] + SetDefaults_Subject(a) + } +} + +func SetObjectDefaults_ClusterRoleBindingList(in *ClusterRoleBindingList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ClusterRoleBinding(a) + } +} + +func SetObjectDefaults_RoleBinding(in *RoleBinding) { + SetDefaults_RoleBinding(in) + for i := range in.Subjects { + a := &in.Subjects[i] + SetDefaults_Subject(a) + } +} + +func SetObjectDefaults_RoleBindingList(in *RoleBindingList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_RoleBinding(a) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/defaults.go new file mode 100644 index 0000000000..6c29ae500e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/defaults.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_ClusterRoleBinding, + SetDefaults_RoleBinding, + SetDefaults_Subject, + ) +} + +func SetDefaults_ClusterRoleBinding(obj *ClusterRoleBinding) { + if len(obj.RoleRef.APIGroup) == 0 { + obj.RoleRef.APIGroup = GroupName + } +} +func SetDefaults_RoleBinding(obj *RoleBinding) { + if len(obj.RoleRef.APIGroup) == 0 { + obj.RoleRef.APIGroup = GroupName + } +} +func SetDefaults_Subject(obj *Subject) { + if len(obj.APIGroup) == 0 { + switch obj.Kind { + case ServiceAccountKind: + obj.APIGroup = "" + case UserKind: + obj.APIGroup = GroupName + case GroupKind: + obj.APIGroup = GroupName + } + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/doc.go new file mode 100644 index 0000000000..a9ad4296ed --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=rbac.authorization.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.pb.go new file mode 100644 index 0000000000..5f553a57eb --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.pb.go @@ -0,0 +1,2816 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/rbac/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/rbac/v1beta1/generated.proto + + It has these top-level messages: + ClusterRole + ClusterRoleBinding + ClusterRoleBindingBuilder + ClusterRoleBindingList + ClusterRoleList + PolicyRule + PolicyRuleBuilder + Role + RoleBinding + RoleBindingList + RoleList + RoleRef + Subject +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *ClusterRoleBindingBuilder) Reset() { *m = ClusterRoleBindingBuilder{} } +func (*ClusterRoleBindingBuilder) ProtoMessage() {} +func (*ClusterRoleBindingBuilder) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *PolicyRuleBuilder) Reset() { *m = PolicyRuleBuilder{} } +func (*PolicyRuleBuilder) ProtoMessage() {} +func (*PolicyRuleBuilder) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func init() { + proto.RegisterType((*ClusterRole)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.ClusterRole") + proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.ClusterRoleBinding") + proto.RegisterType((*ClusterRoleBindingBuilder)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.ClusterRoleBindingBuilder") + proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.ClusterRoleBindingList") + proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.ClusterRoleList") + proto.RegisterType((*PolicyRule)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.PolicyRule") + proto.RegisterType((*PolicyRuleBuilder)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.PolicyRuleBuilder") + proto.RegisterType((*Role)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.Role") + proto.RegisterType((*RoleBinding)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.RoleBinding") + proto.RegisterType((*RoleBindingList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.RoleBindingList") + proto.RegisterType((*RoleList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.RoleList") + proto.RegisterType((*RoleRef)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.RoleRef") + proto.RegisterType((*Subject)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.Subject") +} +func (m *ClusterRole) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRole) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterRoleBinding) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBinding) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n2, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) + n3, err := m.RoleRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *ClusterRoleBindingBuilder) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBindingBuilder) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ClusterRoleBinding.Size())) + n4, err := m.ClusterRoleBinding.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + return i, nil +} + +func (m *ClusterRoleBindingList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBindingList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterRoleList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PolicyRule) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PolicyRule) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *PolicyRuleBuilder) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PolicyRuleBuilder) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PolicyRule.Size())) + n7, err := m.PolicyRule.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + return i, nil +} + +func (m *Role) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Role) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n8, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleBinding) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleBinding) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) + n10, err := m.RoleRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + return i, nil +} + +func (m *RoleBindingList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleBindingList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n11, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n12, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleRef) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleRef) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIGroup))) + i += copy(data[i:], m.APIGroup) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + return i, nil +} + +func (m *Subject) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Subject) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIGroup))) + i += copy(data[i:], m.APIGroup) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *ClusterRole) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleBinding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterRoleBindingBuilder) Size() (n int) { + var l int + _ = l + l = m.ClusterRoleBinding.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterRoleBindingList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRule) Size() (n int) { + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRuleBuilder) Size() (n int) { + var l int + _ = l + l = m.PolicyRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Role) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBinding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RoleBindingList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleRef) Size() (n int) { + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Subject) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterRole) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRole{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBindingBuilder) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleBindingBuilder{`, + `ClusterRoleBinding:` + strings.Replace(strings.Replace(this.ClusterRoleBinding.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBindingList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRuleBuilder) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PolicyRuleBuilder{`, + `PolicyRule:` + strings.Replace(strings.Replace(this.PolicyRule.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Role) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Role{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleRef{`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Subject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Subject{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterRole) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBinding) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBindingBuilder) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingBuilder: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingBuilder: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleBinding", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClusterRoleBinding.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRole{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRule) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRuleBuilder) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRuleBuilder: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRuleBuilder: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PolicyRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PolicyRule.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBinding) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Role{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleRef) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 830 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x54, 0xbf, 0x8f, 0xe3, 0x44, + 0x14, 0xce, 0x64, 0x13, 0x6d, 0x3c, 0xcb, 0x2a, 0xec, 0x20, 0x21, 0x93, 0xc2, 0x89, 0xdc, 0xb0, + 0x88, 0x3b, 0xfb, 0xf6, 0xee, 0xc4, 0x21, 0x21, 0x0a, 0x4c, 0x81, 0x4e, 0x1c, 0xcb, 0x69, 0x10, + 0x88, 0x5f, 0x42, 0x37, 0x71, 0xe6, 0xbc, 0x43, 0xfc, 0x4b, 0x33, 0xe3, 0x88, 0x13, 0x14, 0x74, + 0xb4, 0xfc, 0x13, 0x74, 0xd7, 0xd1, 0x52, 0x51, 0x2d, 0x54, 0x57, 0x6e, 0x15, 0xb1, 0xe6, 0x0f, + 0x01, 0xd9, 0x1e, 0xff, 0x08, 0x4e, 0xd8, 0xb0, 0x48, 0x91, 0x90, 0xa8, 0x92, 0x79, 0xef, 0xfb, + 0xde, 0xbc, 0xef, 0xbd, 0xf1, 0x07, 0xef, 0xcd, 0x5f, 0x17, 0x16, 0x8b, 0xec, 0x79, 0x32, 0xa5, + 0x3c, 0xa4, 0x92, 0x0a, 0x3b, 0x9e, 0x7b, 0x36, 0x89, 0x99, 0xb0, 0xf9, 0x94, 0xb8, 0xf6, 0xe2, + 0x64, 0x4a, 0x25, 0x39, 0xb1, 0x3d, 0x1a, 0x52, 0x4e, 0x24, 0x9d, 0x59, 0x31, 0x8f, 0x64, 0x84, + 0x5e, 0x2e, 0x88, 0x56, 0x4d, 0xb4, 0xe2, 0xb9, 0x67, 0x65, 0x44, 0x2b, 0x23, 0x5a, 0x8a, 0x38, + 0xba, 0xe9, 0x31, 0x79, 0x96, 0x4c, 0x2d, 0x37, 0x0a, 0x6c, 0x2f, 0xf2, 0x22, 0x3b, 0xe7, 0x4f, + 0x93, 0xc7, 0xf9, 0x29, 0x3f, 0xe4, 0xff, 0x8a, 0xba, 0xa3, 0xbb, 0xaa, 0x21, 0x12, 0xb3, 0x80, + 0xb8, 0x67, 0x2c, 0xa4, 0xfc, 0x49, 0xdd, 0x52, 0x40, 0x25, 0xb1, 0x17, 0xad, 0x6e, 0x46, 0xf6, + 0x26, 0x16, 0x4f, 0x42, 0xc9, 0x02, 0xda, 0x22, 0xbc, 0x76, 0x15, 0x41, 0xb8, 0x67, 0x34, 0x20, + 0x2d, 0xde, 0x9d, 0x4d, 0xbc, 0x44, 0x32, 0xdf, 0x66, 0xa1, 0x14, 0x92, 0xb7, 0x48, 0x0d, 0x4d, + 0x82, 0xf2, 0x05, 0xe5, 0xb5, 0x20, 0xfa, 0x15, 0x09, 0x62, 0x9f, 0xae, 0xd3, 0x74, 0x63, 0xe3, + 0x6a, 0xd6, 0xa0, 0xcd, 0x5f, 0x00, 0x3c, 0x78, 0xdb, 0x4f, 0x84, 0xa4, 0x1c, 0x47, 0x3e, 0x45, + 0x8f, 0xe0, 0x20, 0x1b, 0xd6, 0x8c, 0x48, 0xa2, 0x83, 0x09, 0x38, 0x3e, 0xb8, 0x7d, 0xcb, 0x52, + 0x2b, 0x6b, 0xf6, 0x5e, 0x2f, 0x2d, 0x43, 0x5b, 0x8b, 0x13, 0xeb, 0xfd, 0xe9, 0x97, 0xd4, 0x95, + 0xef, 0x51, 0x49, 0x1c, 0x74, 0xbe, 0x1c, 0x77, 0xd2, 0xe5, 0x18, 0xd6, 0x31, 0x5c, 0x55, 0x45, + 0x1f, 0xc3, 0x3e, 0x4f, 0x7c, 0x2a, 0xf4, 0xee, 0x64, 0xef, 0xf8, 0xe0, 0xf6, 0x1d, 0x6b, 0xcb, + 0x17, 0x61, 0x3d, 0x8c, 0x7c, 0xe6, 0x3e, 0xc1, 0x89, 0x4f, 0x9d, 0x43, 0x75, 0x43, 0x3f, 0x3b, + 0x09, 0x5c, 0x14, 0x34, 0x7f, 0xec, 0x42, 0xd4, 0xd0, 0xe2, 0xb0, 0x70, 0xc6, 0x42, 0x6f, 0x07, + 0x92, 0xbe, 0x80, 0x03, 0x91, 0xe4, 0x89, 0x52, 0xd5, 0xad, 0xad, 0x55, 0x7d, 0x50, 0x10, 0x9d, + 0xe7, 0xd5, 0x0d, 0x03, 0x15, 0x10, 0xb8, 0xaa, 0x89, 0x3e, 0x83, 0xfb, 0x3c, 0xf2, 0x29, 0xa6, + 0x8f, 0xf5, 0xbd, 0x55, 0x01, 0x57, 0x96, 0xc7, 0x05, 0xcf, 0x19, 0xaa, 0xf2, 0xfb, 0x2a, 0x80, + 0xcb, 0x8a, 0xe6, 0x0f, 0x00, 0xbe, 0xd4, 0x9e, 0x9a, 0x93, 0x30, 0x7f, 0x46, 0x39, 0xfa, 0x0e, + 0x40, 0xe4, 0xb6, 0xb2, 0x6a, 0x8e, 0x6f, 0x6c, 0xdd, 0xc6, 0x9a, 0x0b, 0x46, 0xaa, 0xa3, 0x35, + 0x2b, 0xc3, 0x6b, 0xae, 0x34, 0x2f, 0x00, 0x7c, 0xb1, 0x0d, 0x7d, 0xc0, 0x84, 0x44, 0x9f, 0xb7, + 0x36, 0x6c, 0x6d, 0xb7, 0xe1, 0x8c, 0x9d, 0xef, 0xb7, 0x9a, 0x7e, 0x19, 0x69, 0x6c, 0xf7, 0x11, + 0xec, 0x33, 0x49, 0x83, 0x72, 0xb5, 0xff, 0x4a, 0x74, 0xf5, 0x70, 0xef, 0x67, 0x15, 0x71, 0x51, + 0xd8, 0xfc, 0x15, 0xc0, 0x61, 0x03, 0xbc, 0x03, 0x4d, 0x9f, 0xac, 0x6a, 0xba, 0x7b, 0x2d, 0x4d, + 0xeb, 0xc5, 0xfc, 0x01, 0x20, 0xac, 0x3f, 0x55, 0x34, 0x86, 0xfd, 0x05, 0xe5, 0x53, 0xa1, 0x83, + 0xc9, 0xde, 0xb1, 0xe6, 0x68, 0x19, 0xfe, 0xa3, 0x2c, 0x80, 0x8b, 0x38, 0x7a, 0x15, 0x6a, 0x24, + 0x66, 0xef, 0xf0, 0x28, 0x89, 0x8b, 0x76, 0x34, 0xe7, 0x30, 0x5d, 0x8e, 0xb5, 0xb7, 0x1e, 0xde, + 0x2f, 0x82, 0xb8, 0xce, 0x67, 0x60, 0x4e, 0x45, 0x94, 0x70, 0x97, 0x0a, 0x7d, 0xaf, 0x06, 0xe3, + 0x32, 0x88, 0xeb, 0x3c, 0xba, 0x07, 0x0f, 0xcb, 0xc3, 0x29, 0x09, 0xa8, 0xd0, 0x7b, 0x39, 0xe1, + 0x28, 0x5d, 0x8e, 0x0f, 0x71, 0x33, 0x81, 0x57, 0x71, 0xe8, 0x4d, 0x38, 0x0c, 0xa3, 0xb0, 0x84, + 0x7c, 0x88, 0x1f, 0x08, 0xbd, 0x9f, 0x53, 0x5f, 0x48, 0x97, 0xe3, 0xe1, 0xe9, 0x6a, 0x0a, 0xff, + 0x15, 0x6b, 0x7e, 0x03, 0x8f, 0x1a, 0x5e, 0xa5, 0x3e, 0x24, 0x0f, 0xc2, 0xb8, 0x0a, 0xaa, 0x8d, + 0x5e, 0xcb, 0xfb, 0x2a, 0x2b, 0xaa, 0x63, 0xb8, 0x51, 0xda, 0xfc, 0x19, 0xc0, 0xde, 0x7f, 0xde, + 0xca, 0x9f, 0x76, 0xe1, 0xc1, 0xff, 0x1e, 0xbe, 0xb5, 0x87, 0x67, 0x06, 0xb2, 0x5b, 0x53, 0xbc, + 0xb6, 0x81, 0x5c, 0xed, 0x86, 0x3f, 0x01, 0x38, 0xd8, 0x91, 0x0d, 0xe2, 0x55, 0x15, 0x37, 0xff, + 0x99, 0x8a, 0xf5, 0xed, 0x7f, 0x0d, 0xcb, 0xfd, 0xa0, 0x1b, 0x70, 0x50, 0x5a, 0x57, 0xde, 0xbc, + 0x56, 0x37, 0x53, 0xba, 0x1b, 0xae, 0x10, 0x68, 0x02, 0x7b, 0x73, 0x16, 0xce, 0xf4, 0x6e, 0x8e, + 0x7c, 0x4e, 0x21, 0x7b, 0xef, 0xb2, 0x70, 0x86, 0xf3, 0x4c, 0x86, 0x08, 0x49, 0x40, 0xf3, 0x07, + 0xd4, 0x40, 0x64, 0xa6, 0x85, 0xf3, 0x8c, 0xf9, 0x14, 0xc0, 0x7d, 0xf5, 0xf8, 0xaa, 0x7a, 0x60, + 0x63, 0xbd, 0x66, 0x7f, 0xdd, 0x6d, 0xfa, 0xfb, 0xfb, 0xdb, 0x91, 0x0d, 0xb5, 0xec, 0x57, 0xc4, + 0xc4, 0xa5, 0x7a, 0x2f, 0x87, 0x1d, 0x29, 0x98, 0x76, 0x5a, 0x26, 0x70, 0x8d, 0x71, 0x5e, 0x39, + 0xbf, 0x34, 0x3a, 0xcf, 0x2e, 0x8d, 0xce, 0xc5, 0xa5, 0xd1, 0xf9, 0x36, 0x35, 0xc0, 0x79, 0x6a, + 0x80, 0x67, 0xa9, 0x01, 0x7e, 0x4b, 0x0d, 0xf0, 0xfd, 0xef, 0x46, 0xe7, 0xd3, 0x7d, 0x35, 0xf1, + 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x96, 0xa1, 0xd4, 0x72, 0x0c, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.proto new file mode 100644 index 0000000000..542e2b0276 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.proto @@ -0,0 +1,200 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.rbac.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +message ClusterRole { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this ClusterRole + repeated PolicyRule rules = 2; +} + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +message ClusterRoleBinding { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + repeated Subject subjects = 2; + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional RoleRef roleRef = 3; +} + +// +k8s:deepcopy-gen=false +// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. +// We use it to construct bindings in code. It's more compact than trying to write them +// out in a literal. +message ClusterRoleBindingBuilder { + optional ClusterRoleBinding clusterRoleBinding = 1; +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +message ClusterRoleBindingList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoleBindings + repeated ClusterRoleBinding items = 2; +} + +// ClusterRoleList is a collection of ClusterRoles +message ClusterRoleList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoles + repeated ClusterRole items = 2; +} + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +message PolicyRule { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + repeated string verbs = 1; + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + // +optional + repeated string apiGroups = 2; + + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // +optional + repeated string resources = 3; + + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +optional + repeated string resourceNames = 4; + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + // +optional + repeated string nonResourceURLs = 5; +} + +// +k8s:deepcopy-gen=false +// PolicyRuleBuilder let's us attach methods. A no-no for API types. +// We use it to construct rules in code. It's more compact than trying to write them +// out in a literal and allows us to perform some basic checking during construction +message PolicyRuleBuilder { + optional PolicyRule policyRule = 1; +} + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +message Role { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this Role + repeated PolicyRule rules = 2; +} + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +message RoleBinding { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + repeated Subject subjects = 2; + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional RoleRef roleRef = 3; +} + +// RoleBindingList is a collection of RoleBindings +message RoleBindingList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of RoleBindings + repeated RoleBinding items = 2; +} + +// RoleList is a collection of Roles +message RoleList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of Roles + repeated Role items = 2; +} + +// RoleRef contains information that points to the role being used +message RoleRef { + // APIGroup is the group for the resource being referenced + optional string apiGroup = 1; + + // Kind is the type of resource being referenced + optional string kind = 2; + + // Name is the name of resource being referenced + optional string name = 3; +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +message Subject { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + optional string kind = 1; + + // APIGroup holds the API group of the referenced subject. + // Defaults to "" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. + // +optional + optional string apiGroup = 2; + + // Name of the object being referenced. + optional string name = 3; + + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + // +optional + optional string namespace = 4; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/helpers.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/helpers.go new file mode 100644 index 0000000000..063b9ed3f6 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/helpers.go @@ -0,0 +1,146 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PolicyRuleBuilder let's us attach methods. A no-no for API types. +// We use it to construct rules in code. It's more compact than trying to write them +// out in a literal and allows us to perform some basic checking during construction +type PolicyRuleBuilder struct { + PolicyRule PolicyRule `protobuf:"bytes,1,opt,name=policyRule"` +} + +func NewRule(verbs ...string) *PolicyRuleBuilder { + return &PolicyRuleBuilder{ + PolicyRule: PolicyRule{Verbs: verbs}, + } +} + +func (r *PolicyRuleBuilder) Groups(groups ...string) *PolicyRuleBuilder { + r.PolicyRule.APIGroups = append(r.PolicyRule.APIGroups, groups...) + return r +} + +func (r *PolicyRuleBuilder) Resources(resources ...string) *PolicyRuleBuilder { + r.PolicyRule.Resources = append(r.PolicyRule.Resources, resources...) + return r +} + +func (r *PolicyRuleBuilder) Names(names ...string) *PolicyRuleBuilder { + r.PolicyRule.ResourceNames = append(r.PolicyRule.ResourceNames, names...) + return r +} + +func (r *PolicyRuleBuilder) URLs(urls ...string) *PolicyRuleBuilder { + r.PolicyRule.NonResourceURLs = append(r.PolicyRule.NonResourceURLs, urls...) + return r +} + +func (r *PolicyRuleBuilder) RuleOrDie() PolicyRule { + ret, err := r.Rule() + if err != nil { + panic(err) + } + return ret +} + +func (r *PolicyRuleBuilder) Rule() (PolicyRule, error) { + if len(r.PolicyRule.Verbs) == 0 { + return PolicyRule{}, fmt.Errorf("verbs are required: %#v", r.PolicyRule) + } + + switch { + case len(r.PolicyRule.NonResourceURLs) > 0: + if len(r.PolicyRule.APIGroups) != 0 || len(r.PolicyRule.Resources) != 0 || len(r.PolicyRule.ResourceNames) != 0 { + return PolicyRule{}, fmt.Errorf("non-resource rule may not have apiGroups, resources, or resourceNames: %#v", r.PolicyRule) + } + case len(r.PolicyRule.Resources) > 0: + if len(r.PolicyRule.NonResourceURLs) != 0 { + return PolicyRule{}, fmt.Errorf("resource rule may not have nonResourceURLs: %#v", r.PolicyRule) + } + if len(r.PolicyRule.APIGroups) == 0 { + // this a common bug + return PolicyRule{}, fmt.Errorf("resource rule must have apiGroups: %#v", r.PolicyRule) + } + default: + return PolicyRule{}, fmt.Errorf("a rule must have either nonResourceURLs or resources: %#v", r.PolicyRule) + } + + return r.PolicyRule, nil +} + +// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. +// We use it to construct bindings in code. It's more compact than trying to write them +// out in a literal. +type ClusterRoleBindingBuilder struct { + ClusterRoleBinding ClusterRoleBinding `protobuf:"bytes,1,opt,name=clusterRoleBinding"` +} + +func NewClusterBinding(clusterRoleName string) *ClusterRoleBindingBuilder { + return &ClusterRoleBindingBuilder{ + ClusterRoleBinding: ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: clusterRoleName}, + RoleRef: RoleRef{ + APIGroup: GroupName, + Kind: "ClusterRole", + Name: clusterRoleName, + }, + }, + } +} + +func (r *ClusterRoleBindingBuilder) Groups(groups ...string) *ClusterRoleBindingBuilder { + for _, group := range groups { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: GroupKind, Name: group}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) Users(users ...string) *ClusterRoleBindingBuilder { + for _, user := range users { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: UserKind, Name: user}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *ClusterRoleBindingBuilder { + for _, saName := range serviceAccountNames { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: ServiceAccountKind, Namespace: namespace, Name: saName}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) BindingOrDie() ClusterRoleBinding { + ret, err := r.Binding() + if err != nil { + panic(err) + } + return ret +} + +func (r *ClusterRoleBindingBuilder) Binding() (ClusterRoleBinding, error) { + if len(r.ClusterRoleBinding.Subjects) == 0 { + return ClusterRoleBinding{}, fmt.Errorf("subjects are required: %#v", r.ClusterRoleBinding) + } + + return r.ClusterRoleBinding, nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/register.go new file mode 100644 index 0000000000..cab2e77e4b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "rbac.authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.generated.go new file mode 100644 index 0000000000..56de3e3e7b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.generated.go @@ -0,0 +1,4879 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 + } +} + +func (x *PolicyRule) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.APIGroups) != 0 + yyq2[2] = len(x.Resources) != 0 + yyq2[3] = len(x.ResourceNames) != 0 + yyq2[4] = len(x.NonResourceURLs) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Verbs == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.Verbs, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("verbs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Verbs == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.Verbs, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.APIGroups == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncSliceStringV(x.APIGroups, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiGroups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.APIGroups == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncSliceStringV(x.APIGroups, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Resources == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceStringV(x.Resources, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resources")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Resources == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceStringV(x.Resources, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.ResourceNames == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncSliceStringV(x.ResourceNames, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceNames")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceNames == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncSliceStringV(x.ResourceNames, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.NonResourceURLs == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncSliceStringV(x.NonResourceURLs, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nonResourceURLs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NonResourceURLs == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncSliceStringV(x.NonResourceURLs, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PolicyRule) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PolicyRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "verbs": + if r.TryDecodeAsNil() { + x.Verbs = nil + } else { + yyv4 := &x.Verbs + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "apiGroups": + if r.TryDecodeAsNil() { + x.APIGroups = nil + } else { + yyv6 := &x.APIGroups + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + case "resources": + if r.TryDecodeAsNil() { + x.Resources = nil + } else { + yyv8 := &x.Resources + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceStringX(yyv8, false, d) + } + } + case "resourceNames": + if r.TryDecodeAsNil() { + x.ResourceNames = nil + } else { + yyv10 := &x.ResourceNames + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + z.F.DecSliceStringX(yyv10, false, d) + } + } + case "nonResourceURLs": + if r.TryDecodeAsNil() { + x.NonResourceURLs = nil + } else { + yyv12 := &x.NonResourceURLs + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + z.F.DecSliceStringX(yyv12, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PolicyRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Verbs = nil + } else { + yyv15 := &x.Verbs + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + z.F.DecSliceStringX(yyv15, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIGroups = nil + } else { + yyv17 := &x.APIGroups + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + z.F.DecSliceStringX(yyv17, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resources = nil + } else { + yyv19 := &x.Resources + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + z.F.DecSliceStringX(yyv19, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ResourceNames = nil + } else { + yyv21 := &x.ResourceNames + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + z.F.DecSliceStringX(yyv21, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NonResourceURLs = nil + } else { + yyv23 := &x.NonResourceURLs + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + z.F.DecSliceStringX(yyv23, false, d) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Subject) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.APIGroup != "" + yyq2[3] = x.Namespace != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiGroup")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Subject) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Subject) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiGroup": + if r.TryDecodeAsNil() { + x.APIGroup = "" + } else { + yyv6 := &x.APIGroup + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv10 := &x.Namespace + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Subject) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIGroup = "" + } else { + yyv15 := &x.APIGroup + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv17 := &x.Name + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv19 := &x.Namespace + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleRef) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiGroup")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleRef) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleRef) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "apiGroup": + if r.TryDecodeAsNil() { + x.APIGroup = "" + } else { + yyv4 := &x.APIGroup + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv6 := &x.Kind + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleRef) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIGroup = "" + } else { + yyv11 := &x.APIGroup + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv15 := &x.Name + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Role) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rules")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Role) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Role) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "rules": + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv10 := &x.Rules + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Role) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv19 := &x.Rules + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subjects")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy18 := &x.RoleRef + yy18.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("roleRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy20 := &x.RoleRef + yy20.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "subjects": + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv10 := &x.Subjects + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv10), d) + } + } + case "roleRef": + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv12 := &x.RoleRef + yyv12.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv14 := &x.Kind + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv16 := &x.APIVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv18 := &x.ObjectMeta + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else { + z.DecFallback(yyv18, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv20 := &x.Subjects + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv20), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv22 := &x.RoleRef + yyv22.CodecDecodeSelf(d) + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceRoleBinding((*[]RoleBinding)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceRoleBinding((*[]RoleBinding)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceRole(([]Role)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceRole(([]Role)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceRole((*[]Role)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceRole((*[]Role)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRole) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rules")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRole) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRole) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "rules": + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv10 := &x.Rules + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRole) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv19 := &x.Rules + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subjects")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy18 := &x.RoleRef + yy18.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("roleRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy20 := &x.RoleRef + yy20.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "subjects": + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv10 := &x.Subjects + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv10), d) + } + } + case "roleRef": + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv12 := &x.RoleRef + yyv12.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv14 := &x.Kind + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv16 := &x.APIVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv18 := &x.ObjectMeta + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else { + z.DecFallback(yyv18, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv20 := &x.Subjects + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv20), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv22 := &x.RoleRef + yyv22.CodecDecodeSelf(d) + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRoleList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceClusterRole(([]ClusterRole)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceClusterRole(([]ClusterRole)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRoleList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceClusterRole((*[]ClusterRole)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceClusterRole((*[]ClusterRole)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSlicePolicyRule(v []PolicyRule, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePolicyRule(v *[]PolicyRule, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PolicyRule{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 120) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PolicyRule, yyrl1) + } + } else { + yyv1 = make([]PolicyRule, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PolicyRule{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PolicyRule{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PolicyRule{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PolicyRule{}) // var yyz1 PolicyRule + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PolicyRule{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PolicyRule{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceSubject(v []Subject, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceSubject(v *[]Subject, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Subject{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Subject, yyrl1) + } + } else { + yyv1 = make([]Subject, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Subject{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Subject{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Subject{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Subject{}) // var yyz1 Subject + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Subject{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Subject{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceRoleBinding(v []RoleBinding, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceRoleBinding(v *[]RoleBinding, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []RoleBinding{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]RoleBinding, yyrl1) + } + } else { + yyv1 = make([]RoleBinding, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = RoleBinding{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, RoleBinding{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = RoleBinding{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, RoleBinding{}) // var yyz1 RoleBinding + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = RoleBinding{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []RoleBinding{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceRole(v []Role, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceRole(v *[]Role, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Role{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Role, yyrl1) + } + } else { + yyv1 = make([]Role, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Role{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Role{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Role{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Role{}) // var yyz1 Role + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Role{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Role{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceClusterRoleBinding(v []ClusterRoleBinding, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceClusterRoleBinding(v *[]ClusterRoleBinding, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ClusterRoleBinding{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ClusterRoleBinding, yyrl1) + } + } else { + yyv1 = make([]ClusterRoleBinding, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRoleBinding{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ClusterRoleBinding{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRoleBinding{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ClusterRoleBinding{}) // var yyz1 ClusterRoleBinding + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRoleBinding{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ClusterRoleBinding{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceClusterRole(v []ClusterRole, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceClusterRole(v *[]ClusterRole, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ClusterRole{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ClusterRole, yyrl1) + } + } else { + yyv1 = make([]ClusterRole, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRole{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ClusterRole{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRole{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ClusterRole{}) // var yyz1 ClusterRole + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRole{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ClusterRole{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.go new file mode 100644 index 0000000000..89a47db9e3 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.go @@ -0,0 +1,207 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + + GroupKind = "Group" + ServiceAccountKind = "ServiceAccount" + UserKind = "User" + + // AutoUpdateAnnotationKey is the name of an annotation which prevents reconciliation if set to "false" + AutoUpdateAnnotationKey = "rbac.authorization.kubernetes.io/autoupdate" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +type PolicyRule struct { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + // +optional + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // +optional + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + // +optional + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,5,rep,name=nonResourceURLs"` +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +type Subject struct { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // APIGroup holds the API group of the referenced subject. + // Defaults to "" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. + // +optional + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"` + // Name of the object being referenced. + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` +} + +// RoleRef contains information that points to the role being used +type RoleRef struct { + // APIGroup is the group for the resource being referenced + APIGroup string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"` + // Kind is the type of resource being referenced + Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` + // Name is the name of resource being referenced + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` +} + +// +genclient=true + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +type Role struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this Role + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// +genclient=true + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +type RoleBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// RoleBindingList is a collection of RoleBindings +type RoleBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of RoleBindings + Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// RoleList is a collection of Roles +type RoleList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of Roles + Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true +// +nonNamespaced=true + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +type ClusterRole struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this ClusterRole + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// +genclient=true +// +nonNamespaced=true + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +type ClusterRoleBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +type ClusterRoleBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoleBindings + Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ClusterRoleList is a collection of ClusterRoles +type ClusterRoleList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoles + Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..1463d8feac --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,148 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ClusterRole = map[string]string{ + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", +} + +func (ClusterRole) SwaggerDoc() map[string]string { + return map_ClusterRole +} + +var map_ClusterRoleBinding = map[string]string{ + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (ClusterRoleBinding) SwaggerDoc() map[string]string { + return map_ClusterRoleBinding +} + +var map_ClusterRoleBindingList = map[string]string{ + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoleBindings", +} + +func (ClusterRoleBindingList) SwaggerDoc() map[string]string { + return map_ClusterRoleBindingList +} + +var map_ClusterRoleList = map[string]string{ + "": "ClusterRoleList is a collection of ClusterRoles", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoles", +} + +func (ClusterRoleList) SwaggerDoc() map[string]string { + return map_ClusterRoleList +} + +var map_PolicyRule = map[string]string{ + "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", + "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", +} + +func (PolicyRule) SwaggerDoc() map[string]string { + return map_PolicyRule +} + +var map_Role = map[string]string{ + "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this Role", +} + +func (Role) SwaggerDoc() map[string]string { + return map_Role +} + +var map_RoleBinding = map[string]string{ + "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (RoleBinding) SwaggerDoc() map[string]string { + return map_RoleBinding +} + +var map_RoleBindingList = map[string]string{ + "": "RoleBindingList is a collection of RoleBindings", + "metadata": "Standard object's metadata.", + "items": "Items is a list of RoleBindings", +} + +func (RoleBindingList) SwaggerDoc() map[string]string { + return map_RoleBindingList +} + +var map_RoleList = map[string]string{ + "": "RoleList is a collection of Roles", + "metadata": "Standard object's metadata.", + "items": "Items is a list of Roles", +} + +func (RoleList) SwaggerDoc() map[string]string { + return map_RoleList +} + +var map_RoleRef = map[string]string{ + "": "RoleRef contains information that points to the role being used", + "apiGroup": "APIGroup is the group for the resource being referenced", + "kind": "Kind is the type of resource being referenced", + "name": "Name is the name of resource being referenced", +} + +func (RoleRef) SwaggerDoc() map[string]string { + return map_RoleRef +} + +var map_Subject = map[string]string{ + "": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", + "kind": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", + "apiGroup": "APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.", + "name": "Name of the object being referenced.", + "namespace": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.conversion.go new file mode 100644 index 0000000000..0f495534b4 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.conversion.go @@ -0,0 +1,353 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + rbac "k8s.io/client-go/pkg/apis/rbac" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_ClusterRole_To_rbac_ClusterRole, + Convert_rbac_ClusterRole_To_v1beta1_ClusterRole, + Convert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding, + Convert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding, + Convert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder, + Convert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder, + Convert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList, + Convert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList, + Convert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList, + Convert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList, + Convert_v1beta1_PolicyRule_To_rbac_PolicyRule, + Convert_rbac_PolicyRule_To_v1beta1_PolicyRule, + Convert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder, + Convert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder, + Convert_v1beta1_Role_To_rbac_Role, + Convert_rbac_Role_To_v1beta1_Role, + Convert_v1beta1_RoleBinding_To_rbac_RoleBinding, + Convert_rbac_RoleBinding_To_v1beta1_RoleBinding, + Convert_v1beta1_RoleBindingList_To_rbac_RoleBindingList, + Convert_rbac_RoleBindingList_To_v1beta1_RoleBindingList, + Convert_v1beta1_RoleList_To_rbac_RoleList, + Convert_rbac_RoleList_To_v1beta1_RoleList, + Convert_v1beta1_RoleRef_To_rbac_RoleRef, + Convert_rbac_RoleRef_To_v1beta1_RoleRef, + Convert_v1beta1_Subject_To_rbac_Subject, + Convert_rbac_Subject_To_v1beta1_Subject, + ) +} + +func autoConvert_v1beta1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules)) + return nil +} + +func Convert_v1beta1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterRole_To_rbac_ClusterRole(in, out, s) +} + +func autoConvert_rbac_ClusterRole_To_v1beta1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) + return nil +} + +func Convert_rbac_ClusterRole_To_v1beta1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { + return autoConvert_rbac_ClusterRole_To_v1beta1_ClusterRole(in, out, s) +} + +func autoConvert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subjects = *(*[]rbac.Subject)(unsafe.Pointer(&in.Subjects)) + if err := Convert_v1beta1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in, out, s) +} + +func autoConvert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subjects = *(*[]Subject)(unsafe.Pointer(&in.Subjects)) + if err := Convert_rbac_RoleRef_To_v1beta1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(in, out, s) +} + +func autoConvert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in *ClusterRoleBindingBuilder, out *rbac.ClusterRoleBindingBuilder, s conversion.Scope) error { + if err := Convert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(&in.ClusterRoleBinding, &out.ClusterRoleBinding, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in *ClusterRoleBindingBuilder, out *rbac.ClusterRoleBindingBuilder, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in, out, s) +} + +func autoConvert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder(in *rbac.ClusterRoleBindingBuilder, out *ClusterRoleBindingBuilder, s conversion.Scope) error { + if err := Convert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(&in.ClusterRoleBinding, &out.ClusterRoleBinding, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder(in *rbac.ClusterRoleBindingBuilder, out *ClusterRoleBindingBuilder, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder(in, out, s) +} + +func autoConvert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]rbac.ClusterRoleBinding)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in, out, s) +} + +func autoConvert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]ClusterRoleBinding)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList(in, out, s) +} + +func autoConvert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]rbac.ClusterRole)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList(in, out, s) +} + +func autoConvert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]ClusterRole)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList(in, out, s) +} + +func autoConvert_v1beta1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { + out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) + out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) + out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) + return nil +} + +func Convert_v1beta1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { + return autoConvert_v1beta1_PolicyRule_To_rbac_PolicyRule(in, out, s) +} + +func autoConvert_rbac_PolicyRule_To_v1beta1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { + out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) + out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) + out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) + return nil +} + +func Convert_rbac_PolicyRule_To_v1beta1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { + return autoConvert_rbac_PolicyRule_To_v1beta1_PolicyRule(in, out, s) +} + +func autoConvert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in *PolicyRuleBuilder, out *rbac.PolicyRuleBuilder, s conversion.Scope) error { + if err := Convert_v1beta1_PolicyRule_To_rbac_PolicyRule(&in.PolicyRule, &out.PolicyRule, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in *PolicyRuleBuilder, out *rbac.PolicyRuleBuilder, s conversion.Scope) error { + return autoConvert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in, out, s) +} + +func autoConvert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder(in *rbac.PolicyRuleBuilder, out *PolicyRuleBuilder, s conversion.Scope) error { + if err := Convert_rbac_PolicyRule_To_v1beta1_PolicyRule(&in.PolicyRule, &out.PolicyRule, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder(in *rbac.PolicyRuleBuilder, out *PolicyRuleBuilder, s conversion.Scope) error { + return autoConvert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder(in, out, s) +} + +func autoConvert_v1beta1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules)) + return nil +} + +func Convert_v1beta1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { + return autoConvert_v1beta1_Role_To_rbac_Role(in, out, s) +} + +func autoConvert_rbac_Role_To_v1beta1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) + return nil +} + +func Convert_rbac_Role_To_v1beta1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { + return autoConvert_rbac_Role_To_v1beta1_Role(in, out, s) +} + +func autoConvert_v1beta1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subjects = *(*[]rbac.Subject)(unsafe.Pointer(&in.Subjects)) + if err := Convert_v1beta1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { + return autoConvert_v1beta1_RoleBinding_To_rbac_RoleBinding(in, out, s) +} + +func autoConvert_rbac_RoleBinding_To_v1beta1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subjects = *(*[]Subject)(unsafe.Pointer(&in.Subjects)) + if err := Convert_rbac_RoleRef_To_v1beta1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_RoleBinding_To_v1beta1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { + return autoConvert_rbac_RoleBinding_To_v1beta1_RoleBinding(in, out, s) +} + +func autoConvert_v1beta1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]rbac.RoleBinding)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { + return autoConvert_v1beta1_RoleBindingList_To_rbac_RoleBindingList(in, out, s) +} + +func autoConvert_rbac_RoleBindingList_To_v1beta1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]RoleBinding)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_rbac_RoleBindingList_To_v1beta1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { + return autoConvert_rbac_RoleBindingList_To_v1beta1_RoleBindingList(in, out, s) +} + +func autoConvert_v1beta1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]rbac.Role)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { + return autoConvert_v1beta1_RoleList_To_rbac_RoleList(in, out, s) +} + +func autoConvert_rbac_RoleList_To_v1beta1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Role)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_rbac_RoleList_To_v1beta1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { + return autoConvert_rbac_RoleList_To_v1beta1_RoleList(in, out, s) +} + +func autoConvert_v1beta1_RoleRef_To_rbac_RoleRef(in *RoleRef, out *rbac.RoleRef, s conversion.Scope) error { + out.APIGroup = in.APIGroup + out.Kind = in.Kind + out.Name = in.Name + return nil +} + +func Convert_v1beta1_RoleRef_To_rbac_RoleRef(in *RoleRef, out *rbac.RoleRef, s conversion.Scope) error { + return autoConvert_v1beta1_RoleRef_To_rbac_RoleRef(in, out, s) +} + +func autoConvert_rbac_RoleRef_To_v1beta1_RoleRef(in *rbac.RoleRef, out *RoleRef, s conversion.Scope) error { + out.APIGroup = in.APIGroup + out.Kind = in.Kind + out.Name = in.Name + return nil +} + +func Convert_rbac_RoleRef_To_v1beta1_RoleRef(in *rbac.RoleRef, out *RoleRef, s conversion.Scope) error { + return autoConvert_rbac_RoleRef_To_v1beta1_RoleRef(in, out, s) +} + +func autoConvert_v1beta1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error { + out.Kind = in.Kind + out.APIGroup = in.APIGroup + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +func Convert_v1beta1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error { + return autoConvert_v1beta1_Subject_To_rbac_Subject(in, out, s) +} + +func autoConvert_rbac_Subject_To_v1beta1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error { + out.Kind = in.Kind + out.APIGroup = in.APIGroup + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +func Convert_rbac_Subject_To_v1beta1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error { + return autoConvert_rbac_Subject_To_v1beta1_Subject(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..dccae9b4f5 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,258 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterRole, InType: reflect.TypeOf(&ClusterRole{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterRoleBinding, InType: reflect.TypeOf(&ClusterRoleBinding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterRoleBindingList, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterRoleList, InType: reflect.TypeOf(&ClusterRoleList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PolicyRule, InType: reflect.TypeOf(&PolicyRule{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Role, InType: reflect.TypeOf(&Role{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RoleBinding, InType: reflect.TypeOf(&RoleBinding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RoleBindingList, InType: reflect.TypeOf(&RoleBindingList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RoleList, InType: reflect.TypeOf(&RoleList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RoleRef, InType: reflect.TypeOf(&RoleRef{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Subject, InType: reflect.TypeOf(&Subject{})}, + ) +} + +func DeepCopy_v1beta1_ClusterRole(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRole) + out := out.(*ClusterRole) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_ClusterRoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleBinding) + out := out.(*ClusterRoleBinding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_ClusterRoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleBindingList) + out := out.(*ClusterRoleBindingList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_ClusterRoleBinding(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_ClusterRoleList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleList) + out := out.(*ClusterRoleList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRole, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_ClusterRole(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_PolicyRule(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PolicyRule) + out := out.(*PolicyRule) + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_Role(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Role) + out := out.(*Role) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_RoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleBinding) + out := out.(*RoleBinding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_RoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleBindingList) + out := out.(*RoleBindingList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_RoleBinding(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_RoleList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleList) + out := out.(*RoleList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_Role(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_RoleRef(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleRef) + out := out.(*RoleRef) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_Subject(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Subject) + out := out.(*Subject) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.defaults.go new file mode 100644 index 0000000000..fa5bfb6abb --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.defaults.go @@ -0,0 +1,66 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&ClusterRoleBinding{}, func(obj interface{}) { SetObjectDefaults_ClusterRoleBinding(obj.(*ClusterRoleBinding)) }) + scheme.AddTypeDefaultingFunc(&ClusterRoleBindingList{}, func(obj interface{}) { SetObjectDefaults_ClusterRoleBindingList(obj.(*ClusterRoleBindingList)) }) + scheme.AddTypeDefaultingFunc(&RoleBinding{}, func(obj interface{}) { SetObjectDefaults_RoleBinding(obj.(*RoleBinding)) }) + scheme.AddTypeDefaultingFunc(&RoleBindingList{}, func(obj interface{}) { SetObjectDefaults_RoleBindingList(obj.(*RoleBindingList)) }) + return nil +} + +func SetObjectDefaults_ClusterRoleBinding(in *ClusterRoleBinding) { + SetDefaults_ClusterRoleBinding(in) + for i := range in.Subjects { + a := &in.Subjects[i] + SetDefaults_Subject(a) + } +} + +func SetObjectDefaults_ClusterRoleBindingList(in *ClusterRoleBindingList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ClusterRoleBinding(a) + } +} + +func SetObjectDefaults_RoleBinding(in *RoleBinding) { + SetDefaults_RoleBinding(in) + for i := range in.Subjects { + a := &in.Subjects[i] + SetDefaults_Subject(a) + } +} + +func SetObjectDefaults_RoleBindingList(in *RoleBindingList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_RoleBinding(a) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/rbac/zz_generated.deepcopy.go new file mode 100644 index 0000000000..db5d08cdf3 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/zz_generated.deepcopy.go @@ -0,0 +1,258 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package rbac + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_ClusterRole, InType: reflect.TypeOf(&ClusterRole{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_ClusterRoleBinding, InType: reflect.TypeOf(&ClusterRoleBinding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_ClusterRoleBindingList, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_ClusterRoleList, InType: reflect.TypeOf(&ClusterRoleList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_PolicyRule, InType: reflect.TypeOf(&PolicyRule{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_Role, InType: reflect.TypeOf(&Role{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_RoleBinding, InType: reflect.TypeOf(&RoleBinding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_RoleBindingList, InType: reflect.TypeOf(&RoleBindingList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_RoleList, InType: reflect.TypeOf(&RoleList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_RoleRef, InType: reflect.TypeOf(&RoleRef{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_Subject, InType: reflect.TypeOf(&Subject{})}, + ) +} + +func DeepCopy_rbac_ClusterRole(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRole) + out := out.(*ClusterRole) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + if err := DeepCopy_rbac_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_rbac_ClusterRoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleBinding) + out := out.(*ClusterRoleBinding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_rbac_ClusterRoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleBindingList) + out := out.(*ClusterRoleBindingList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + if err := DeepCopy_rbac_ClusterRoleBinding(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_rbac_ClusterRoleList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleList) + out := out.(*ClusterRoleList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRole, len(*in)) + for i := range *in { + if err := DeepCopy_rbac_ClusterRole(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_rbac_PolicyRule(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PolicyRule) + out := out.(*PolicyRule) + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_rbac_Role(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Role) + out := out.(*Role) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + if err := DeepCopy_rbac_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_rbac_RoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleBinding) + out := out.(*RoleBinding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_rbac_RoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleBindingList) + out := out.(*RoleBindingList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + if err := DeepCopy_rbac_RoleBinding(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_rbac_RoleList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleList) + out := out.(*RoleList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + if err := DeepCopy_rbac_Role(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_rbac_RoleRef(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleRef) + out := out.(*RoleRef) + *out = *in + return nil + } +} + +func DeepCopy_rbac_Subject(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Subject) + out := out.(*Subject) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/doc.go b/vendor/k8s.io/client-go/pkg/apis/settings/doc.go new file mode 100644 index 0000000000..90ccf882a2 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=settings.k8s.io +package settings diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/install/install.go b/vendor/k8s.io/client-go/pkg/apis/settings/install/install.go new file mode 100644 index 0000000000..ccdc70400a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/install/install.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the settings API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/settings" + "k8s.io/client-go/pkg/apis/settings/v1alpha1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: settings.GroupName, + VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/settings", + AddInternalObjectsToScheme: settings.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/register.go b/vendor/k8s.io/client-go/pkg/apis/settings/register.go new file mode 100644 index 0000000000..8584701279 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// GroupName is the group name use in this package +const GroupName = "settings.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PodPreset{}, + &PodPresetList{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/types.go b/vendor/k8s.io/client-go/pkg/apis/settings/types.go new file mode 100644 index 0000000000..f89a16d25c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/types.go @@ -0,0 +1,63 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api" +) + +// +genclient=true + +// PodPreset is a policy resource that defines additional runtime +// requirements for a Pod. +type PodPreset struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // +optional + Spec PodPresetSpec +} + +// PodPresetSpec is a description of a pod injection policy. +type PodPresetSpec struct { + // Selector is a label query over a set of resources, in this case pods. + // Required. + Selector metav1.LabelSelector + // Env defines the collection of EnvVar to inject into containers. + // +optional + Env []api.EnvVar + // EnvFrom defines the collection of EnvFromSource to inject into containers. + // +optional + EnvFrom []api.EnvFromSource + // Volumes defines the collection of Volume to inject into the pod. + // +optional + Volumes []api.Volume + // VolumeMounts defines the collection of VolumeMount to inject into containers. + // +optional + VolumeMounts []api.VolumeMount +} + +// PodPresetList is a list of PodPreset objects. +type PodPresetList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []PodPreset +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/doc.go new file mode 100644 index 0000000000..86390b5237 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=settings.k8s.io +package v1alpha1 diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.pb.go new file mode 100644 index 0000000000..88f85877a9 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.pb.go @@ -0,0 +1,924 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/settings/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/settings/v1alpha1/generated.proto + + It has these top-level messages: + PodPreset + PodPresetList + PodPresetSpec +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *PodPreset) Reset() { *m = PodPreset{} } +func (*PodPreset) ProtoMessage() {} +func (*PodPreset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *PodPresetList) Reset() { *m = PodPresetList{} } +func (*PodPresetList) ProtoMessage() {} +func (*PodPresetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *PodPresetSpec) Reset() { *m = PodPresetSpec{} } +func (*PodPresetSpec) ProtoMessage() {} +func (*PodPresetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func init() { + proto.RegisterType((*PodPreset)(nil), "k8s.io.client-go.pkg.apis.settings.v1alpha1.PodPreset") + proto.RegisterType((*PodPresetList)(nil), "k8s.io.client-go.pkg.apis.settings.v1alpha1.PodPresetList") + proto.RegisterType((*PodPresetSpec)(nil), "k8s.io.client-go.pkg.apis.settings.v1alpha1.PodPresetSpec") +} +func (m *PodPreset) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodPreset) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func (m *PodPresetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodPresetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodPresetSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodPresetSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n4, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Env) > 0 { + for _, msg := range m.Env { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.EnvFrom) > 0 { + for _, msg := range m.EnvFrom { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Volumes) > 0 { + for _, msg := range m.Volumes { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.VolumeMounts) > 0 { + for _, msg := range m.VolumeMounts { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *PodPreset) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodPresetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodPresetSpec) Size() (n int) { + var l int + _ = l + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PodPreset) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodPreset{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodPresetSpec", "PodPresetSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodPresetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodPresetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodPreset", "PodPreset", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodPresetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodPresetSpec{`, + `Selector:` + strings.Replace(strings.Replace(this.Selector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "k8s_io_kubernetes_pkg_api_v1.EnvVar", 1), `&`, ``, 1) + `,`, + `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "k8s_io_kubernetes_pkg_api_v1.EnvFromSource", 1), `&`, ``, 1) + `,`, + `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "k8s_io_kubernetes_pkg_api_v1.Volume", 1), `&`, ``, 1) + `,`, + `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "k8s_io_kubernetes_pkg_api_v1.VolumeMount", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PodPreset) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodPreset: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodPreset: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodPresetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodPresetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodPresetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodPreset{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodPresetSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodPresetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodPresetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, k8s_io_kubernetes_pkg_api_v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EnvFrom = append(m.EnvFrom, k8s_io_kubernetes_pkg_api_v1.EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, k8s_io_kubernetes_pkg_api_v1.Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMounts = append(m.VolumeMounts, k8s_io_kubernetes_pkg_api_v1.VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 550 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x9b, 0x6d, 0x4b, 0xeb, 0xb4, 0x8b, 0x12, 0x3c, 0x84, 0x1e, 0xb2, 0x4b, 0xf1, 0xb0, + 0xea, 0x3a, 0xb1, 0xab, 0xa8, 0xa0, 0xa7, 0xc8, 0x0a, 0x82, 0xcb, 0x2e, 0x29, 0xf4, 0x20, 0x2b, + 0x38, 0x4d, 0x9f, 0x69, 0x6c, 0x93, 0x09, 0x33, 0x93, 0xa0, 0x37, 0x3f, 0x82, 0x5f, 0x4a, 0x28, + 0xe8, 0x61, 0x8f, 0x9e, 0x16, 0x5b, 0xbf, 0x88, 0xcc, 0x74, 0xd2, 0x44, 0xba, 0x65, 0xab, 0xb7, + 0x79, 0x8f, 0xf7, 0xff, 0xbd, 0xff, 0x3f, 0x2f, 0xe8, 0xc5, 0xe4, 0x19, 0xc7, 0x21, 0x75, 0x26, + 0xe9, 0x10, 0x58, 0x0c, 0x02, 0xb8, 0x93, 0x4c, 0x02, 0x87, 0x24, 0x21, 0x77, 0x38, 0x08, 0x11, + 0xc6, 0x01, 0x77, 0xb2, 0x1e, 0x99, 0x26, 0x63, 0xd2, 0x73, 0x02, 0x88, 0x81, 0x11, 0x01, 0x23, + 0x9c, 0x30, 0x2a, 0xa8, 0x79, 0xb8, 0x54, 0xe3, 0x42, 0x8d, 0x93, 0x49, 0x80, 0xa5, 0x1a, 0xe7, + 0x6a, 0x9c, 0xab, 0x3b, 0x0f, 0x82, 0x50, 0x8c, 0xd3, 0x21, 0xf6, 0x69, 0xe4, 0x04, 0x34, 0xa0, + 0x8e, 0x82, 0x0c, 0xd3, 0x0f, 0xaa, 0x52, 0x85, 0x7a, 0x2d, 0xe1, 0x9d, 0xc7, 0xda, 0x1a, 0x49, + 0xc2, 0x88, 0xf8, 0xe3, 0x30, 0x06, 0xf6, 0xb9, 0x30, 0x17, 0x81, 0x20, 0x4e, 0xb6, 0x66, 0xa9, + 0xe3, 0x6c, 0x52, 0xb1, 0x34, 0x16, 0x61, 0x04, 0x6b, 0x82, 0x27, 0xd7, 0x09, 0xb8, 0x3f, 0x86, + 0x88, 0xac, 0xe9, 0x4a, 0xf6, 0x38, 0xb0, 0x0c, 0x58, 0xe1, 0x0d, 0x3e, 0x91, 0x28, 0x99, 0xc2, + 0x55, 0xf6, 0x0e, 0x37, 0x7e, 0xef, 0x2b, 0xa6, 0xbb, 0x3f, 0x0c, 0x74, 0xe3, 0x8c, 0x8e, 0xce, + 0x18, 0x70, 0x10, 0xe6, 0x7b, 0xd4, 0x94, 0xa9, 0x47, 0x44, 0x10, 0xcb, 0xd8, 0x37, 0x0e, 0x5a, + 0x47, 0x0f, 0xb1, 0x3e, 0x40, 0xd9, 0x7c, 0x71, 0x02, 0x39, 0x8d, 0xb3, 0x1e, 0x3e, 0x1d, 0x7e, + 0x04, 0x5f, 0x9c, 0x80, 0x20, 0xae, 0x39, 0xbb, 0xdc, 0xab, 0x2c, 0x2e, 0xf7, 0x50, 0xd1, 0xf3, + 0x56, 0x54, 0xf3, 0x1d, 0xaa, 0xf1, 0x04, 0x7c, 0x6b, 0x47, 0xd1, 0x9f, 0xe3, 0x7f, 0x39, 0x2f, + 0x5e, 0x19, 0xed, 0x27, 0xe0, 0xbb, 0x6d, 0xbd, 0xa8, 0x26, 0x2b, 0x4f, 0x61, 0xbb, 0xdf, 0x0d, + 0xb4, 0xbb, 0x9a, 0x7a, 0x13, 0x72, 0x61, 0x9e, 0xaf, 0x45, 0xc2, 0xdb, 0x45, 0x92, 0x6a, 0x15, + 0xe8, 0x96, 0xde, 0xd3, 0xcc, 0x3b, 0xa5, 0x38, 0xe7, 0xa8, 0x1e, 0x0a, 0x88, 0xb8, 0xb5, 0xb3, + 0x5f, 0x3d, 0x68, 0x1d, 0x3d, 0xfd, 0xcf, 0x3c, 0xee, 0xae, 0xde, 0x51, 0x7f, 0x2d, 0x69, 0xde, + 0x12, 0xda, 0xfd, 0x56, 0x2d, 0xa5, 0x91, 0x29, 0x4d, 0x82, 0x9a, 0x1c, 0xa6, 0xe0, 0x0b, 0xca, + 0x74, 0x9a, 0x47, 0x5b, 0xa6, 0x21, 0x43, 0x98, 0xf6, 0xb5, 0xb4, 0x88, 0x94, 0x77, 0xbc, 0x15, + 0xd6, 0x7c, 0x89, 0xaa, 0x10, 0x67, 0x3a, 0xd0, 0x9d, 0xcd, 0x81, 0x24, 0xf5, 0x38, 0xce, 0x06, + 0x84, 0xb9, 0x2d, 0x8d, 0xab, 0x1e, 0xc7, 0x99, 0x27, 0xd5, 0xe6, 0x00, 0x35, 0x20, 0xce, 0x5e, + 0x31, 0x1a, 0x59, 0x55, 0x05, 0xba, 0x7f, 0x2d, 0x48, 0x0e, 0xf7, 0x69, 0xca, 0x7c, 0x70, 0x6f, + 0x6a, 0x5e, 0x43, 0xb7, 0xbd, 0x1c, 0x66, 0x9e, 0xa2, 0x46, 0x46, 0xa7, 0x69, 0x04, 0xdc, 0xaa, + 0x6d, 0x63, 0x70, 0xa0, 0x86, 0x0b, 0xe0, 0xb2, 0xe6, 0x5e, 0x4e, 0x31, 0x7d, 0xd4, 0x5e, 0x3e, + 0x4f, 0x68, 0x1a, 0x0b, 0x6e, 0xd5, 0x15, 0xf5, 0xee, 0x36, 0x54, 0xa5, 0x70, 0x6f, 0x6b, 0x74, + 0xbb, 0xd4, 0xe4, 0xde, 0x5f, 0x50, 0xf7, 0xde, 0x6c, 0x6e, 0x57, 0x2e, 0xe6, 0x76, 0xe5, 0xe7, + 0xdc, 0xae, 0x7c, 0x59, 0xd8, 0xc6, 0x6c, 0x61, 0x1b, 0x17, 0x0b, 0xdb, 0xf8, 0xb5, 0xb0, 0x8d, + 0xaf, 0xbf, 0xed, 0xca, 0xdb, 0x66, 0xfe, 0x4f, 0xfc, 0x09, 0x00, 0x00, 0xff, 0xff, 0x91, 0x84, + 0x03, 0x10, 0x2f, 0x05, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.proto new file mode 100644 index 0000000000..b5a80fa637 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.proto @@ -0,0 +1,76 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.settings.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// PodPreset is a policy resource that defines additional runtime +// requirements for a Pod. +message PodPreset { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // +optional + optional PodPresetSpec spec = 2; +} + +// PodPresetList is a list of PodPreset objects. +message PodPresetList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated PodPreset items = 2; +} + +// PodPresetSpec is a description of a pod injection policy. +message PodPresetSpec { + // Selector is a label query over a set of resources, in this case pods. + // Required. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + + // Env defines the collection of EnvVar to inject into containers. + // +optional + repeated k8s.io.kubernetes.pkg.api.v1.EnvVar env = 2; + + // EnvFrom defines the collection of EnvFromSource to inject into containers. + // +optional + repeated k8s.io.kubernetes.pkg.api.v1.EnvFromSource envFrom = 3; + + // Volumes defines the collection of Volume to inject into the pod. + // +optional + repeated k8s.io.kubernetes.pkg.api.v1.Volume volumes = 4; + + // VolumeMounts defines the collection of VolumeMount to inject into containers. + // +optional + repeated k8s.io.kubernetes.pkg.api.v1.VolumeMount volumeMounts = 5; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/register.go b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/register.go new file mode 100644 index 0000000000..45afb50ca9 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/register.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "settings.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PodPreset{}, + &PodPresetList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types.go new file mode 100644 index 0000000000..5b0c5d19f8 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types.go @@ -0,0 +1,67 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api/v1" +) + +// +genclient=true + +// PodPreset is a policy resource that defines additional runtime +// requirements for a Pod. +type PodPreset struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // +optional + Spec PodPresetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// PodPresetSpec is a description of a pod injection policy. +type PodPresetSpec struct { + // Selector is a label query over a set of resources, in this case pods. + // Required. + Selector metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` + + // Env defines the collection of EnvVar to inject into containers. + // +optional + Env []v1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"` + // EnvFrom defines the collection of EnvFromSource to inject into containers. + // +optional + EnvFrom []v1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,3,rep,name=envFrom"` + // Volumes defines the collection of Volume to inject into the pod. + // +optional + Volumes []v1.Volume `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"` + // VolumeMounts defines the collection of VolumeMount to inject into containers. + // +optional + VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty" protobuf:"bytes,5,rep,name=volumeMounts"` +} + +// PodPresetList is a list of PodPreset objects. +type PodPresetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []PodPreset `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..5b4dc665de --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,61 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_PodPreset = map[string]string{ + "": "PodPreset is a policy resource that defines additional runtime requirements for a Pod.", +} + +func (PodPreset) SwaggerDoc() map[string]string { + return map_PodPreset +} + +var map_PodPresetList = map[string]string{ + "": "PodPresetList is a list of PodPreset objects.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (PodPresetList) SwaggerDoc() map[string]string { + return map_PodPresetList +} + +var map_PodPresetSpec = map[string]string{ + "": "PodPresetSpec is a description of a pod injection policy.", + "selector": "Selector is a label query over a set of resources, in this case pods. Required.", + "env": "Env defines the collection of EnvVar to inject into containers.", + "envFrom": "EnvFrom defines the collection of EnvFromSource to inject into containers.", + "volumes": "Volumes defines the collection of Volume to inject into the pod.", + "volumeMounts": "VolumeMounts defines the collection of VolumeMount to inject into containers.", +} + +func (PodPresetSpec) SwaggerDoc() map[string]string { + return map_PodPresetSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.conversion.go new file mode 100644 index 0000000000..c140797bad --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,159 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1alpha1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/client-go/pkg/api" + v1 "k8s.io/client-go/pkg/api/v1" + settings "k8s.io/client-go/pkg/apis/settings" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_PodPreset_To_settings_PodPreset, + Convert_settings_PodPreset_To_v1alpha1_PodPreset, + Convert_v1alpha1_PodPresetList_To_settings_PodPresetList, + Convert_settings_PodPresetList_To_v1alpha1_PodPresetList, + Convert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec, + Convert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec, + ) +} + +func autoConvert_v1alpha1_PodPreset_To_settings_PodPreset(in *PodPreset, out *settings.PodPreset, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_PodPreset_To_settings_PodPreset(in *PodPreset, out *settings.PodPreset, s conversion.Scope) error { + return autoConvert_v1alpha1_PodPreset_To_settings_PodPreset(in, out, s) +} + +func autoConvert_settings_PodPreset_To_v1alpha1_PodPreset(in *settings.PodPreset, out *PodPreset, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_settings_PodPreset_To_v1alpha1_PodPreset(in *settings.PodPreset, out *PodPreset, s conversion.Scope) error { + return autoConvert_settings_PodPreset_To_v1alpha1_PodPreset(in, out, s) +} + +func autoConvert_v1alpha1_PodPresetList_To_settings_PodPresetList(in *PodPresetList, out *settings.PodPresetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]settings.PodPreset, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_PodPreset_To_settings_PodPreset(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_PodPresetList_To_settings_PodPresetList(in *PodPresetList, out *settings.PodPresetList, s conversion.Scope) error { + return autoConvert_v1alpha1_PodPresetList_To_settings_PodPresetList(in, out, s) +} + +func autoConvert_settings_PodPresetList_To_v1alpha1_PodPresetList(in *settings.PodPresetList, out *PodPresetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodPreset, len(*in)) + for i := range *in { + if err := Convert_settings_PodPreset_To_v1alpha1_PodPreset(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_settings_PodPresetList_To_v1alpha1_PodPresetList(in *settings.PodPresetList, out *PodPresetList, s conversion.Scope) error { + return autoConvert_settings_PodPresetList_To_v1alpha1_PodPresetList(in, out, s) +} + +func autoConvert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec(in *PodPresetSpec, out *settings.PodPresetSpec, s conversion.Scope) error { + out.Selector = in.Selector + out.Env = *(*[]api.EnvVar)(unsafe.Pointer(&in.Env)) + out.EnvFrom = *(*[]api.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]api.Volume, len(*in)) + for i := range *in { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + out.VolumeMounts = *(*[]api.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + return nil +} + +func Convert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec(in *PodPresetSpec, out *settings.PodPresetSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec(in, out, s) +} + +func autoConvert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec(in *settings.PodPresetSpec, out *PodPresetSpec, s conversion.Scope) error { + out.Selector = in.Selector + out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env)) + out.EnvFrom = *(*[]v1.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + return nil +} + +func Convert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec(in *settings.PodPresetSpec, out *PodPresetSpec, s conversion.Scope) error { + return autoConvert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..017d8ccb06 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,124 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api_v1 "k8s.io/client-go/pkg/api/v1" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_PodPreset, InType: reflect.TypeOf(&PodPreset{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_PodPresetList, InType: reflect.TypeOf(&PodPresetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_PodPresetSpec, InType: reflect.TypeOf(&PodPresetSpec{})}, + ) +} + +func DeepCopy_v1alpha1_PodPreset(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPreset) + out := out.(*PodPreset) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1alpha1_PodPresetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1alpha1_PodPresetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPresetList) + out := out.(*PodPresetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodPreset, len(*in)) + for i := range *in { + if err := DeepCopy_v1alpha1_PodPreset(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1alpha1_PodPresetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPresetSpec) + out := out.(*PodPresetSpec) + *out = *in + if newVal, err := c.DeepCopy(&in.Selector); err != nil { + return err + } else { + out.Selector = *newVal.(*v1.LabelSelector) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]api_v1.EnvVar, len(*in)) + for i := range *in { + if err := api_v1.DeepCopy_v1_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]api_v1.EnvFromSource, len(*in)) + for i := range *in { + if err := api_v1.DeepCopy_v1_EnvFromSource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]api_v1.Volume, len(*in)) + for i := range *in { + if err := api_v1.DeepCopy_v1_Volume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]api_v1.VolumeMount, len(*in)) + copy(*out, *in) + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.defaults.go new file mode 100644 index 0000000000..c178a3072c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,98 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/client-go/pkg/api/v1" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&PodPreset{}, func(obj interface{}) { SetObjectDefaults_PodPreset(obj.(*PodPreset)) }) + scheme.AddTypeDefaultingFunc(&PodPresetList{}, func(obj interface{}) { SetObjectDefaults_PodPresetList(obj.(*PodPresetList)) }) + return nil +} + +func SetObjectDefaults_PodPreset(in *PodPreset) { + for i := range in.Spec.Env { + a := &in.Spec.Env[i] + if a.ValueFrom != nil { + if a.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(a.ValueFrom.FieldRef) + } + } + } + for i := range in.Spec.Volumes { + a := &in.Spec.Volumes[i] + v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } +} + +func SetObjectDefaults_PodPresetList(in *PodPresetList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_PodPreset(a) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/settings/zz_generated.deepcopy.go new file mode 100644 index 0000000000..eb109b553c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/zz_generated.deepcopy.go @@ -0,0 +1,124 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package settings + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/client-go/pkg/api" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_settings_PodPreset, InType: reflect.TypeOf(&PodPreset{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_settings_PodPresetList, InType: reflect.TypeOf(&PodPresetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_settings_PodPresetSpec, InType: reflect.TypeOf(&PodPresetSpec{})}, + ) +} + +func DeepCopy_settings_PodPreset(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPreset) + out := out.(*PodPreset) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_settings_PodPresetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_settings_PodPresetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPresetList) + out := out.(*PodPresetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodPreset, len(*in)) + for i := range *in { + if err := DeepCopy_settings_PodPreset(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_settings_PodPresetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPresetSpec) + out := out.(*PodPresetSpec) + *out = *in + if newVal, err := c.DeepCopy(&in.Selector); err != nil { + return err + } else { + out.Selector = *newVal.(*v1.LabelSelector) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]api.EnvVar, len(*in)) + for i := range *in { + if err := api.DeepCopy_api_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]api.EnvFromSource, len(*in)) + for i := range *in { + if err := api.DeepCopy_api_EnvFromSource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]api.Volume, len(*in)) + for i := range *in { + if err := api.DeepCopy_api_Volume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]api.VolumeMount, len(*in)) + copy(*out, *in) + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/OWNERS b/vendor/k8s.io/client-go/pkg/apis/storage/OWNERS new file mode 100755 index 0000000000..d59ed6e1d8 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/OWNERS @@ -0,0 +1,3 @@ +reviewers: +- deads2k +- mbohlool diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/doc.go b/vendor/k8s.io/client-go/pkg/apis/storage/doc.go new file mode 100644 index 0000000000..ef6d2a9bad --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=storage.k8s.io +package storage diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/install/install.go b/vendor/k8s.io/client-go/pkg/apis/storage/install/install.go new file mode 100644 index 0000000000..fb590fecd6 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/install/install.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/storage" + "k8s.io/client-go/pkg/apis/storage/v1" + "k8s.io/client-go/pkg/apis/storage/v1beta1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: storage.GroupName, + // TODO: change the order when GKE supports v1 + VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version, v1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/storage", + RootScopedKinds: sets.NewString("StorageClass"), + AddInternalObjectsToScheme: storage.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1.SchemeGroupVersion.Version: v1.AddToScheme, + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/register.go b/vendor/k8s.io/client-go/pkg/apis/storage/register.go new file mode 100644 index 0000000000..aaa619b4d9 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "storage.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &StorageClass{}, + &StorageClassList{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/types.go b/vendor/k8s.io/client-go/pkg/apis/storage/types.go new file mode 100644 index 0000000000..3d589de5c1 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/types.go @@ -0,0 +1,60 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient=true +// +nonNamespaced=true + +// StorageClass describes a named "class" of storage offered in a cluster. +// Different classes might map to quality-of-service levels, or to backup policies, +// or to arbitrary policies determined by the cluster administrators. Kubernetes +// itself is unopinionated about what classes represent. This concept is sometimes +// called "profiles" in other storage systems. +// The name of a StorageClass object is significant, and is how users can request a particular class. +type StorageClass struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // provisioner is the driver expected to handle this StorageClass. + // This is an optionally-prefixed name, like a label key. + // For example: "kubernetes.io/gce-pd" or "kubernetes.io/aws-ebs". + // This value may not be empty. + Provisioner string + + // parameters holds parameters for the provisioner. + // These values are opaque to the system and are passed directly + // to the provisioner. The only validation done on keys is that they are + // not empty. The maximum number of parameters is + // 512, with a cumulative max size of 256K + // +optional + Parameters map[string]string +} + +// StorageClassList is a collection of storage classes. +type StorageClassList struct { + metav1.TypeMeta + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta + + // Items is the list of StorageClasses + Items []StorageClass +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/doc.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/doc.go new file mode 100644 index 0000000000..2b8d05cfd5 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=storage.k8s.io +package v1 diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.pb.go new file mode 100644 index 0000000000..531282ba68 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.pb.go @@ -0,0 +1,730 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/storage/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/storage/v1/generated.proto + + It has these top-level messages: + StorageClass + StorageClassList +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func init() { + proto.RegisterType((*StorageClass)(nil), "k8s.io.client-go.pkg.apis.storage.v1.StorageClass") + proto.RegisterType((*StorageClassList)(nil), "k8s.io.client-go.pkg.apis.storage.v1.StorageClassList") +} +func (m *StorageClass) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StorageClass) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Provisioner))) + i += copy(data[i:], m.Provisioner) + if len(m.Parameters) > 0 { + for k := range m.Parameters { + data[i] = 0x1a + i++ + v := m.Parameters[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + return i, nil +} + +func (m *StorageClassList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StorageClassList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *StorageClass) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Provisioner) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *StorageClassList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *StorageClass) String() string { + if this == nil { + return "nil" + } + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&StorageClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `}`, + }, "") + return s +} +func (this *StorageClassList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *StorageClass) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provisioner = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageClassList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 474 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x92, 0x4f, 0x6f, 0xd3, 0x30, + 0x18, 0xc6, 0xe3, 0x54, 0x95, 0x36, 0x17, 0x44, 0x15, 0x38, 0x54, 0x3d, 0x64, 0xd5, 0x04, 0x52, + 0x2f, 0xd8, 0x74, 0x63, 0x68, 0x42, 0xe2, 0xd2, 0x89, 0x03, 0x12, 0x88, 0x29, 0x5c, 0x10, 0xe2, + 0x80, 0xdb, 0xbd, 0xa4, 0x26, 0x4d, 0x1c, 0xd9, 0x6f, 0x02, 0xbd, 0xf1, 0x11, 0xf8, 0x58, 0x15, + 0xa7, 0x1d, 0x39, 0x0d, 0x1a, 0xbe, 0x08, 0xca, 0x1f, 0x96, 0x88, 0x6c, 0xa2, 0xda, 0x2d, 0xaf, + 0xed, 0xdf, 0xe3, 0xe7, 0x79, 0x1c, 0x7a, 0x14, 0x1c, 0x1b, 0x26, 0x15, 0x0f, 0x92, 0x19, 0xe8, + 0x08, 0x10, 0x0c, 0x8f, 0x03, 0x9f, 0x8b, 0x58, 0x1a, 0x6e, 0x50, 0x69, 0xe1, 0x03, 0x4f, 0x27, + 0xdc, 0x87, 0x08, 0xb4, 0x40, 0x38, 0x63, 0xb1, 0x56, 0xa8, 0x9c, 0x07, 0x25, 0xc6, 0x6a, 0x8c, + 0xc5, 0x81, 0xcf, 0x72, 0x8c, 0x55, 0x18, 0x4b, 0x27, 0xc3, 0x87, 0xbe, 0xc4, 0x45, 0x32, 0x63, + 0x73, 0x15, 0x72, 0x5f, 0xf9, 0x8a, 0x17, 0xf4, 0x2c, 0xf9, 0x58, 0x4c, 0xc5, 0x50, 0x7c, 0x95, + 0xaa, 0xc3, 0xc7, 0x95, 0x19, 0x11, 0xcb, 0x50, 0xcc, 0x17, 0x32, 0x02, 0xbd, 0xaa, 0xed, 0x84, + 0x80, 0xe2, 0x0a, 0x2f, 0x43, 0x7e, 0x1d, 0xa5, 0x93, 0x08, 0x65, 0x08, 0x2d, 0xe0, 0xc9, 0xff, + 0x00, 0x33, 0x5f, 0x40, 0x28, 0x5a, 0xdc, 0xe1, 0x75, 0x5c, 0x82, 0x72, 0xc9, 0x65, 0x84, 0x06, + 0x75, 0x0b, 0x6a, 0x64, 0x32, 0xa0, 0x53, 0xd0, 0x75, 0x20, 0xf8, 0x22, 0xc2, 0x78, 0x79, 0x55, + 0xbf, 0xfb, 0x3f, 0x6d, 0x7a, 0xeb, 0x4d, 0xd9, 0xe3, 0xc9, 0x52, 0x18, 0xe3, 0x7c, 0xa0, 0x3b, + 0x79, 0xfe, 0x33, 0x81, 0x62, 0x40, 0x46, 0x64, 0xdc, 0x3b, 0x78, 0xc4, 0xaa, 0x37, 0x68, 0xda, + 0xa9, 0x5f, 0x21, 0x3f, 0xcd, 0xd2, 0x09, 0x7b, 0x3d, 0xfb, 0x04, 0x73, 0x7c, 0x05, 0x28, 0xa6, + 0xce, 0xfa, 0x62, 0xcf, 0xca, 0x2e, 0xf6, 0x68, 0xbd, 0xe6, 0x5d, 0xaa, 0x3a, 0x47, 0xb4, 0x17, + 0x6b, 0x95, 0x4a, 0x23, 0x55, 0x04, 0x7a, 0x60, 0x8f, 0xc8, 0x78, 0x77, 0x7a, 0xb7, 0x42, 0x7a, + 0xa7, 0xf5, 0x96, 0xd7, 0x3c, 0xe7, 0x7c, 0xa6, 0x34, 0x16, 0x5a, 0x84, 0x80, 0xa0, 0xcd, 0xa0, + 0x33, 0xea, 0x8c, 0x7b, 0x07, 0x27, 0x6c, 0xab, 0xdf, 0x83, 0x35, 0x13, 0xb2, 0xd3, 0x4b, 0x95, + 0xe7, 0x11, 0xea, 0x55, 0xed, 0xb6, 0xde, 0xf0, 0x1a, 0x57, 0x0d, 0x9f, 0xd1, 0x3b, 0xff, 0x20, + 0x4e, 0x9f, 0x76, 0x02, 0x58, 0x15, 0xfd, 0xec, 0x7a, 0xf9, 0xa7, 0x73, 0x8f, 0x76, 0x53, 0xb1, + 0x4c, 0xa0, 0x8c, 0xe3, 0x95, 0xc3, 0x53, 0xfb, 0x98, 0xec, 0x7f, 0x27, 0xb4, 0xdf, 0xbc, 0xff, + 0xa5, 0x34, 0xe8, 0xbc, 0x6f, 0xb5, 0xcc, 0xb6, 0x6b, 0x39, 0xa7, 0x8b, 0x8e, 0xfb, 0x95, 0xeb, + 0x9d, 0xbf, 0x2b, 0x8d, 0x86, 0xdf, 0xd2, 0xae, 0x44, 0x08, 0xcd, 0xc0, 0x2e, 0x5a, 0x3a, 0xbc, + 0x41, 0x4b, 0xd3, 0xdb, 0x95, 0x7e, 0xf7, 0x45, 0xae, 0xe4, 0x95, 0x82, 0xd3, 0xfb, 0xeb, 0x8d, + 0x6b, 0x9d, 0x6f, 0x5c, 0xeb, 0xc7, 0xc6, 0xb5, 0xbe, 0x66, 0x2e, 0x59, 0x67, 0x2e, 0x39, 0xcf, + 0x5c, 0xf2, 0x2b, 0x73, 0xc9, 0xb7, 0xdf, 0xae, 0xf5, 0xce, 0x4e, 0x27, 0x7f, 0x02, 0x00, 0x00, + 0xff, 0xff, 0xe8, 0xe1, 0xb9, 0x93, 0xec, 0x03, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.proto new file mode 100644 index 0000000000..92e18cf9e9 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.proto @@ -0,0 +1,63 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.storage.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +message StorageClass { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Provisioner indicates the type of the provisioner. + optional string provisioner = 2; + + // Parameters holds the parameters for the provisioner that should + // create volumes of this storage class. + // +optional + map parameters = 3; +} + +// StorageClassList is a collection of storage classes. +message StorageClassList { + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of StorageClasses + repeated StorageClass items = 2; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/register.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/register.go new file mode 100644 index 0000000000..24d6bfa7d3 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "storage.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &StorageClass{}, + &StorageClassList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/types.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/types.go new file mode 100644 index 0000000000..0591b397cd --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1/types.go @@ -0,0 +1,57 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +type StorageClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Provisioner indicates the type of the provisioner. + Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` + + // Parameters holds the parameters for the provisioner that should + // create volumes of this storage class. + // +optional + Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` +} + +// StorageClassList is a collection of storage classes. +type StorageClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of StorageClasses + Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..6f27177085 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1/types_swagger_doc_generated.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_StorageClass = map[string]string{ + "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "provisioner": "Provisioner indicates the type of the provisioner.", + "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", +} + +func (StorageClass) SwaggerDoc() map[string]string { + return map_StorageClass +} + +var map_StorageClassList = map[string]string{ + "": "StorageClassList is a collection of storage classes.", + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of StorageClasses", +} + +func (StorageClassList) SwaggerDoc() map[string]string { + return map_StorageClassList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.conversion.go new file mode 100644 index 0000000000..d26f1c75ec --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.conversion.go @@ -0,0 +1,85 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + storage "k8s.io/client-go/pkg/apis/storage" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_StorageClass_To_storage_StorageClass, + Convert_storage_StorageClass_To_v1_StorageClass, + Convert_v1_StorageClassList_To_storage_StorageClassList, + Convert_storage_StorageClassList_To_v1_StorageClassList, + ) +} + +func autoConvert_v1_StorageClass_To_storage_StorageClass(in *StorageClass, out *storage.StorageClass, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Provisioner = in.Provisioner + out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters)) + return nil +} + +func Convert_v1_StorageClass_To_storage_StorageClass(in *StorageClass, out *storage.StorageClass, s conversion.Scope) error { + return autoConvert_v1_StorageClass_To_storage_StorageClass(in, out, s) +} + +func autoConvert_storage_StorageClass_To_v1_StorageClass(in *storage.StorageClass, out *StorageClass, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Provisioner = in.Provisioner + out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters)) + return nil +} + +func Convert_storage_StorageClass_To_v1_StorageClass(in *storage.StorageClass, out *StorageClass, s conversion.Scope) error { + return autoConvert_storage_StorageClass_To_v1_StorageClass(in, out, s) +} + +func autoConvert_v1_StorageClassList_To_storage_StorageClassList(in *StorageClassList, out *storage.StorageClassList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]storage.StorageClass)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_StorageClassList_To_storage_StorageClassList(in *StorageClassList, out *storage.StorageClassList, s conversion.Scope) error { + return autoConvert_v1_StorageClassList_To_storage_StorageClassList(in, out, s) +} + +func autoConvert_storage_StorageClassList_To_v1_StorageClassList(in *storage.StorageClassList, out *StorageClassList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]StorageClass)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_storage_StorageClassList_To_v1_StorageClassList(in *storage.StorageClassList, out *StorageClassList, s conversion.Scope) error { + return autoConvert_storage_StorageClassList_To_v1_StorageClassList(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..8b3786ab19 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.deepcopy.go @@ -0,0 +1,80 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_StorageClass, InType: reflect.TypeOf(&StorageClass{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_StorageClassList, InType: reflect.TypeOf(&StorageClassList{})}, + ) +} + +func DeepCopy_v1_StorageClass(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClass) + out := out.(*StorageClass) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1_StorageClassList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClassList) + out := out.(*StorageClassList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageClass, len(*in)) + for i := range *in { + if err := DeepCopy_v1_StorageClass(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.defaults.go new file mode 100644 index 0000000000..6df448eb9f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/doc.go new file mode 100644 index 0000000000..3643217106 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=storage.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.pb.go new file mode 100644 index 0000000000..f1130cf37a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.pb.go @@ -0,0 +1,731 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.proto + + It has these top-level messages: + StorageClass + StorageClassList +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func init() { + proto.RegisterType((*StorageClass)(nil), "k8s.io.client-go.pkg.apis.storage.v1beta1.StorageClass") + proto.RegisterType((*StorageClassList)(nil), "k8s.io.client-go.pkg.apis.storage.v1beta1.StorageClassList") +} +func (m *StorageClass) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StorageClass) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Provisioner))) + i += copy(data[i:], m.Provisioner) + if len(m.Parameters) > 0 { + for k := range m.Parameters { + data[i] = 0x1a + i++ + v := m.Parameters[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + return i, nil +} + +func (m *StorageClassList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StorageClassList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *StorageClass) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Provisioner) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *StorageClassList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *StorageClass) String() string { + if this == nil { + return "nil" + } + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&StorageClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `}`, + }, "") + return s +} +func (this *StorageClassList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *StorageClass) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provisioner = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageClassList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 486 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x92, 0xcf, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x33, 0x5d, 0x8a, 0xbb, 0x53, 0xc5, 0x12, 0x3d, 0x94, 0x1e, 0xb2, 0x65, 0x4f, 0x55, + 0x74, 0xc6, 0xae, 0x3f, 0x28, 0x0b, 0x5e, 0x2a, 0x82, 0x82, 0xe2, 0x12, 0x6f, 0xa2, 0xe0, 0xa4, + 0xfb, 0x4c, 0xc7, 0x34, 0x99, 0x30, 0xf3, 0x12, 0x2c, 0x78, 0xf0, 0x4f, 0xf0, 0xcf, 0xea, 0xcd, + 0x3d, 0x7a, 0x5a, 0x6c, 0xf4, 0x0f, 0x91, 0xfc, 0x70, 0x13, 0xcc, 0x16, 0x17, 0x6f, 0x99, 0x99, + 0xf7, 0xf9, 0xbe, 0xef, 0xfb, 0xbe, 0xd0, 0xa3, 0x60, 0x6a, 0x98, 0x54, 0x3c, 0x48, 0x3c, 0xd0, + 0x11, 0x20, 0x18, 0x1e, 0x07, 0x3e, 0x17, 0xb1, 0x34, 0xdc, 0xa0, 0xd2, 0xc2, 0x07, 0x9e, 0x4e, + 0x3c, 0x40, 0x31, 0xe1, 0x3e, 0x44, 0xa0, 0x05, 0xc2, 0x09, 0x8b, 0xb5, 0x42, 0x65, 0xdf, 0x2e, + 0x59, 0x56, 0xb3, 0x2c, 0x0e, 0x7c, 0x96, 0xb3, 0xac, 0x62, 0x59, 0xc5, 0x0e, 0xef, 0xfa, 0x12, + 0x17, 0x89, 0xc7, 0xe6, 0x2a, 0xe4, 0xbe, 0xf2, 0x15, 0x2f, 0x24, 0xbc, 0xe4, 0x43, 0x71, 0x2a, + 0x0e, 0xc5, 0x57, 0x29, 0x3d, 0x7c, 0x50, 0xd9, 0x12, 0xb1, 0x0c, 0xc5, 0x7c, 0x21, 0x23, 0xd0, + 0xab, 0xda, 0x58, 0x08, 0x28, 0x78, 0xda, 0x32, 0x34, 0xe4, 0xdb, 0x28, 0x9d, 0x44, 0x28, 0x43, + 0x68, 0x01, 0x8f, 0xfe, 0x05, 0x98, 0xf9, 0x02, 0x42, 0xd1, 0xe2, 0xee, 0x6f, 0xe3, 0x12, 0x94, + 0x4b, 0x2e, 0x23, 0x34, 0xa8, 0x5b, 0x50, 0x63, 0x26, 0x03, 0x3a, 0x05, 0x5d, 0x0f, 0x04, 0x9f, + 0x44, 0x18, 0x2f, 0xe1, 0xa2, 0x99, 0xee, 0x6c, 0x5d, 0xd0, 0x05, 0xd5, 0x07, 0xbf, 0x3a, 0xf4, + 0xea, 0xeb, 0x32, 0xfa, 0x27, 0x4b, 0x61, 0x8c, 0xfd, 0x9e, 0xee, 0xe6, 0x69, 0x9d, 0x08, 0x14, + 0x03, 0x32, 0x22, 0xe3, 0xde, 0xe1, 0x3d, 0x56, 0xad, 0xad, 0x69, 0xbe, 0x5e, 0x5c, 0x5e, 0xcd, + 0xd2, 0x09, 0x7b, 0xe5, 0x7d, 0x84, 0x39, 0xbe, 0x04, 0x14, 0x33, 0x7b, 0x7d, 0xb6, 0x6f, 0x65, + 0x67, 0xfb, 0xb4, 0xbe, 0x73, 0xcf, 0x55, 0xed, 0x87, 0xb4, 0x17, 0x6b, 0x95, 0x4a, 0x23, 0x55, + 0x04, 0x7a, 0xd0, 0x19, 0x91, 0xf1, 0xde, 0xec, 0x46, 0x85, 0xf4, 0x8e, 0xeb, 0x27, 0xb7, 0x59, + 0x67, 0x7f, 0xa6, 0x34, 0x16, 0x5a, 0x84, 0x80, 0xa0, 0xcd, 0x60, 0x67, 0xb4, 0x33, 0xee, 0x1d, + 0x3e, 0x63, 0x97, 0xff, 0xa3, 0x58, 0x73, 0x4c, 0x76, 0x7c, 0x2e, 0xf5, 0x34, 0x42, 0xbd, 0xaa, + 0x2d, 0xd7, 0x0f, 0x6e, 0xa3, 0xdf, 0xf0, 0x31, 0xbd, 0xfe, 0x17, 0x62, 0xf7, 0xe9, 0x4e, 0x00, + 0xab, 0x22, 0xa4, 0x3d, 0x37, 0xff, 0xb4, 0x6f, 0xd2, 0x6e, 0x2a, 0x96, 0x09, 0x94, 0x33, 0xb9, + 0xe5, 0xe1, 0xa8, 0x33, 0x25, 0x07, 0xdf, 0x08, 0xed, 0x37, 0xfb, 0xbf, 0x90, 0x06, 0xed, 0xb7, + 0xad, 0xa8, 0xd9, 0xe5, 0xa2, 0xce, 0xe9, 0x22, 0xe8, 0x7e, 0xe5, 0x7a, 0xf7, 0xcf, 0x4d, 0x23, + 0xe6, 0x77, 0xb4, 0x2b, 0x11, 0x42, 0x33, 0xe8, 0x14, 0x51, 0x4d, 0xff, 0x37, 0xaa, 0xd9, 0xb5, + 0xaa, 0x49, 0xf7, 0x79, 0x2e, 0xe7, 0x96, 0xaa, 0xb3, 0x5b, 0xeb, 0x8d, 0x63, 0x9d, 0x6e, 0x1c, + 0xeb, 0xfb, 0xc6, 0xb1, 0xbe, 0x64, 0x0e, 0x59, 0x67, 0x0e, 0x39, 0xcd, 0x1c, 0xf2, 0x23, 0x73, + 0xc8, 0xd7, 0x9f, 0x8e, 0xf5, 0xe6, 0x4a, 0xa5, 0xf6, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x99, 0xdf, + 0x66, 0x94, 0x33, 0x04, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.proto new file mode 100644 index 0000000000..0b9dbaeada --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.proto @@ -0,0 +1,64 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.storage.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +message StorageClass { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Provisioner indicates the type of the provisioner. + optional string provisioner = 2; + + // Parameters holds the parameters for the provisioner that should + // create volumes of this storage class. + // +optional + map parameters = 3; +} + +// StorageClassList is a collection of storage classes. +message StorageClassList { + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of StorageClasses + repeated StorageClass items = 2; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/register.go new file mode 100644 index 0000000000..70087f3797 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "storage.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &StorageClass{}, + &StorageClassList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.generated.go new file mode 100644 index 0000000000..ddc516b28f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.generated.go @@ -0,0 +1,985 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 + } +} + +func (x *StorageClass) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = len(x.Parameters) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Provisioner)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("provisioner")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Provisioner)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Parameters == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + z.F.EncMapStringStringV(x.Parameters, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("parameters")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Parameters == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + z.F.EncMapStringStringV(x.Parameters, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StorageClass) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StorageClass) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "provisioner": + if r.TryDecodeAsNil() { + x.Provisioner = "" + } else { + yyv10 := &x.Provisioner + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "parameters": + if r.TryDecodeAsNil() { + x.Parameters = nil + } else { + yyv12 := &x.Parameters + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + z.F.DecMapStringStringX(yyv12, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StorageClass) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv15 := &x.Kind + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv17 := &x.APIVersion + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv19 := &x.ObjectMeta + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Provisioner = "" + } else { + yyv21 := &x.Provisioner + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Parameters = nil + } else { + yyv23 := &x.Parameters + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + z.F.DecMapStringStringX(yyv23, false, d) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StorageClassList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceStorageClass(([]StorageClass)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceStorageClass(([]StorageClass)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StorageClassList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StorageClassList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceStorageClass((*[]StorageClass)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StorageClassList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceStorageClass((*[]StorageClass)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceStorageClass(v []StorageClass, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceStorageClass(v *[]StorageClass, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []StorageClass{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]StorageClass, yyrl1) + } + } else { + yyv1 = make([]StorageClass, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = StorageClass{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, StorageClass{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = StorageClass{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, StorageClass{}) // var yyz1 StorageClass + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = StorageClass{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []StorageClass{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.go new file mode 100644 index 0000000000..afbd5bcdbf --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.go @@ -0,0 +1,57 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +type StorageClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Provisioner indicates the type of the provisioner. + Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` + + // Parameters holds the parameters for the provisioner that should + // create volumes of this storage class. + // +optional + Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` +} + +// StorageClassList is a collection of storage classes. +type StorageClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of StorageClasses + Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..e8362e381f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_StorageClass = map[string]string{ + "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "provisioner": "Provisioner indicates the type of the provisioner.", + "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", +} + +func (StorageClass) SwaggerDoc() map[string]string { + return map_StorageClass +} + +var map_StorageClassList = map[string]string{ + "": "StorageClassList is a collection of storage classes.", + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of StorageClasses", +} + +func (StorageClassList) SwaggerDoc() map[string]string { + return map_StorageClassList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.conversion.go new file mode 100644 index 0000000000..867a528dd0 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.conversion.go @@ -0,0 +1,85 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + storage "k8s.io/client-go/pkg/apis/storage" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_StorageClass_To_storage_StorageClass, + Convert_storage_StorageClass_To_v1beta1_StorageClass, + Convert_v1beta1_StorageClassList_To_storage_StorageClassList, + Convert_storage_StorageClassList_To_v1beta1_StorageClassList, + ) +} + +func autoConvert_v1beta1_StorageClass_To_storage_StorageClass(in *StorageClass, out *storage.StorageClass, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Provisioner = in.Provisioner + out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters)) + return nil +} + +func Convert_v1beta1_StorageClass_To_storage_StorageClass(in *StorageClass, out *storage.StorageClass, s conversion.Scope) error { + return autoConvert_v1beta1_StorageClass_To_storage_StorageClass(in, out, s) +} + +func autoConvert_storage_StorageClass_To_v1beta1_StorageClass(in *storage.StorageClass, out *StorageClass, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Provisioner = in.Provisioner + out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters)) + return nil +} + +func Convert_storage_StorageClass_To_v1beta1_StorageClass(in *storage.StorageClass, out *StorageClass, s conversion.Scope) error { + return autoConvert_storage_StorageClass_To_v1beta1_StorageClass(in, out, s) +} + +func autoConvert_v1beta1_StorageClassList_To_storage_StorageClassList(in *StorageClassList, out *storage.StorageClassList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]storage.StorageClass)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_StorageClassList_To_storage_StorageClassList(in *StorageClassList, out *storage.StorageClassList, s conversion.Scope) error { + return autoConvert_v1beta1_StorageClassList_To_storage_StorageClassList(in, out, s) +} + +func autoConvert_storage_StorageClassList_To_v1beta1_StorageClassList(in *storage.StorageClassList, out *StorageClassList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]StorageClass)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_storage_StorageClassList_To_v1beta1_StorageClassList(in *storage.StorageClassList, out *StorageClassList, s conversion.Scope) error { + return autoConvert_storage_StorageClassList_To_v1beta1_StorageClassList(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..f2ea4f2fb1 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,80 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StorageClass, InType: reflect.TypeOf(&StorageClass{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StorageClassList, InType: reflect.TypeOf(&StorageClassList{})}, + ) +} + +func DeepCopy_v1beta1_StorageClass(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClass) + out := out.(*StorageClass) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1beta1_StorageClassList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClassList) + out := out.(*StorageClassList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageClass, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_StorageClass(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.defaults.go new file mode 100644 index 0000000000..e24e70be38 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/storage/zz_generated.deepcopy.go new file mode 100644 index 0000000000..2ef9f1767f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/zz_generated.deepcopy.go @@ -0,0 +1,80 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package storage + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_storage_StorageClass, InType: reflect.TypeOf(&StorageClass{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_storage_StorageClassList, InType: reflect.TypeOf(&StorageClassList{})}, + ) +} + +func DeepCopy_storage_StorageClass(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClass) + out := out.(*StorageClass) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_storage_StorageClassList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClassList) + out := out.(*StorageClassList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageClass, len(*in)) + for i := range *in { + if err := DeepCopy_storage_StorageClass(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/util/doc.go b/vendor/k8s.io/client-go/pkg/util/doc.go new file mode 100644 index 0000000000..1747db5506 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/util/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package util implements various utility functions used in both testing and implementation +// of Kubernetes. Package util may not depend on any other package in the Kubernetes +// package tree. +package util diff --git a/vendor/k8s.io/client-go/pkg/util/parsers/parsers.go b/vendor/k8s.io/client-go/pkg/util/parsers/parsers.go new file mode 100644 index 0000000000..4e70cc6827 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/util/parsers/parsers.go @@ -0,0 +1,54 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parsers + +import ( + "fmt" + + dockerref "github.com/docker/distribution/reference" +) + +const ( + DefaultImageTag = "latest" +) + +// ParseImageName parses a docker image string into three parts: repo, tag and digest. +// If both tag and digest are empty, a default image tag will be returned. +func ParseImageName(image string) (string, string, string, error) { + named, err := dockerref.ParseNamed(image) + if err != nil { + return "", "", "", fmt.Errorf("couldn't parse image name: %v", err) + } + + repoToPull := named.Name() + var tag, digest string + + tagged, ok := named.(dockerref.Tagged) + if ok { + tag = tagged.Tag() + } + + digested, ok := named.(dockerref.Digested) + if ok { + digest = digested.Digest().String() + } + // If no tag was specified, use the default "latest". + if len(tag) == 0 && len(digest) == 0 { + tag = DefaultImageTag + } + return repoToPull, tag, digest, nil +} diff --git a/vendor/k8s.io/client-go/pkg/util/template.go b/vendor/k8s.io/client-go/pkg/util/template.go new file mode 100644 index 0000000000..d09d7dc867 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/util/template.go @@ -0,0 +1,48 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "bytes" + "go/doc" + "io" + "strings" + "text/template" +) + +func wrap(indent string, s string) string { + var buf bytes.Buffer + doc.ToText(&buf, s, indent, indent+" ", 80-len(indent)) + return buf.String() +} + +// ExecuteTemplate executes templateText with data and output written to w. +func ExecuteTemplate(w io.Writer, templateText string, data interface{}) error { + t := template.New("top") + t.Funcs(template.FuncMap{ + "trim": strings.TrimSpace, + "wrap": wrap, + }) + template.Must(t.Parse(templateText)) + return t.Execute(w, data) +} + +func ExecuteTemplateToString(templateText string, data interface{}) (string, error) { + b := bytes.Buffer{} + err := ExecuteTemplate(&b, templateText, data) + return b.String(), err +} diff --git a/vendor/k8s.io/client-go/pkg/util/umask.go b/vendor/k8s.io/client-go/pkg/util/umask.go new file mode 100644 index 0000000000..35ccce50bd --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/util/umask.go @@ -0,0 +1,27 @@ +// +build !windows + +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "syscall" +) + +func Umask(mask int) (old int, err error) { + return syscall.Umask(mask), nil +} diff --git a/vendor/k8s.io/client-go/pkg/util/umask_windows.go b/vendor/k8s.io/client-go/pkg/util/umask_windows.go new file mode 100644 index 0000000000..7a1ba15386 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/util/umask_windows.go @@ -0,0 +1,27 @@ +// +build windows + +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "errors" +) + +func Umask(mask int) (int, error) { + return 0, errors.New("platform and architecture is not supported") +} diff --git a/vendor/k8s.io/client-go/pkg/util/util.go b/vendor/k8s.io/client-go/pkg/util/util.go new file mode 100644 index 0000000000..356b295a3e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/util/util.go @@ -0,0 +1,131 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "os" + "reflect" + "regexp" +) + +// Takes a list of strings and compiles them into a list of regular expressions +func CompileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) { + regexps := []*regexp.Regexp{} + for _, regexpStr := range regexpStrings { + r, err := regexp.Compile(regexpStr) + if err != nil { + return []*regexp.Regexp{}, err + } + regexps = append(regexps, r) + } + return regexps, nil +} + +// Detects if using systemd as the init system +// Please note that simply reading /proc/1/cmdline can be misleading because +// some installation of various init programs can automatically make /sbin/init +// a symlink or even a renamed version of their main program. +// TODO(dchen1107): realiably detects the init system using on the system: +// systemd, upstart, initd, etc. +func UsingSystemdInitSystem() bool { + if _, err := os.Stat("/run/systemd/system"); err == nil { + return true + } + + return false +} + +// Tests whether all pointer fields in a struct are nil. This is useful when, +// for example, an API struct is handled by plugins which need to distinguish +// "no plugin accepted this spec" from "this spec is empty". +// +// This function is only valid for structs and pointers to structs. Any other +// type will cause a panic. Passing a typed nil pointer will return true. +func AllPtrFieldsNil(obj interface{}) bool { + v := reflect.ValueOf(obj) + if !v.IsValid() { + panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj)) + } + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return true + } + v = v.Elem() + } + for i := 0; i < v.NumField(); i++ { + if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() { + return false + } + } + return true +} + +func FileExists(filename string) (bool, error) { + if _, err := os.Stat(filename); os.IsNotExist(err) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} + +// ReadDirNoStat returns a string of files/directories contained +// in dirname without calling lstat on them. +func ReadDirNoStat(dirname string) ([]string, error) { + if dirname == "" { + dirname = "." + } + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + return f.Readdirnames(-1) +} + +// IntPtr returns a pointer to an int +func IntPtr(i int) *int { + o := i + return &o +} + +// Int32Ptr returns a pointer to an int32 +func Int32Ptr(i int32) *int32 { + o := i + return &o +} + +// IntPtrDerefOr dereference the int ptr and returns it i not nil, +// else returns def. +func IntPtrDerefOr(ptr *int, def int) int { + if ptr != nil { + return *ptr + } + return def +} + +// Int32PtrDerefOr dereference the int32 ptr and returns it i not nil, +// else returns def. +func Int32PtrDerefOr(ptr *int32, def int32) int32 { + if ptr != nil { + return *ptr + } + return def +} diff --git a/vendor/k8s.io/kubernetes/pkg/version/.gitattributes b/vendor/k8s.io/client-go/pkg/version/.gitattributes similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/version/.gitattributes rename to vendor/k8s.io/client-go/pkg/version/.gitattributes diff --git a/vendor/k8s.io/kubernetes/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go similarity index 90% rename from vendor/k8s.io/kubernetes/pkg/version/base.go rename to vendor/k8s.io/client-go/pkg/version/base.go index 6c8af1c70c..c377705fe8 100644 --- a/vendor/k8s.io/kubernetes/pkg/version/base.go +++ b/vendor/k8s.io/client-go/pkg/version/base.go @@ -29,7 +29,7 @@ package version // works for GitHub tar downloads. // // When releasing a new Kubernetes version, this file is updated by -// build-tools/mark_new_version.sh to reflect the new version, and then a +// build/mark_new_version.sh to reflect the new version, and then a // git annotated tag (using format vX.Y where X == Major version and Y // == Minor version) is created to point to the commit that updates // pkg/version/base.go @@ -39,8 +39,8 @@ var ( // them irrelevant. (Next we'll take it out, which may muck with // scripts consuming the kubectl version output - but most of // these should be looking at gitVersion already anyways.) - gitMajor string = "1" // major version, always numeric - gitMinor string = "5" // minor version, numeric possibly followed by "+" + gitMajor string = "" // major version, always numeric + gitMinor string = "" // minor version, numeric possibly followed by "+" // semantic version, derived by build scripts (see // https://github.com/kubernetes/kubernetes/blob/master/docs/design/versioning.md @@ -51,7 +51,7 @@ var ( // semantic version is a git hash, but the version itself is no // longer the direct output of "git describe", but a slight // translation to be semver compliant. - gitVersion string = "v1.5.1+$Format:%h$" + gitVersion string = "v0.0.0-master+$Format:%h$" gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty" diff --git a/vendor/k8s.io/client-go/pkg/version/doc.go b/vendor/k8s.io/client-go/pkg/version/doc.go new file mode 100644 index 0000000000..ccedec76f6 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/version/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package version supplies version information collected at build time to +// kubernetes components. +package version diff --git a/vendor/k8s.io/client-go/pkg/version/version.go b/vendor/k8s.io/client-go/pkg/version/version.go new file mode 100644 index 0000000000..8c8350d13b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/version/version.go @@ -0,0 +1,42 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "runtime" + + apimachineryversion "k8s.io/apimachinery/pkg/version" +) + +// Get returns the overall codebase version. It's for detecting +// what code a binary was built from. +func Get() apimachineryversion.Info { + // These variables typically come from -ldflags settings and in + // their absence fallback to the settings in pkg/version/base.go + return apimachineryversion.Info{ + Major: gitMajor, + Minor: gitMinor, + GitVersion: gitVersion, + GitCommit: gitCommit, + GitTreeState: gitTreeState, + BuildDate: buildDate, + GoVersion: runtime.Version(), + Compiler: runtime.Compiler, + Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), + } +} diff --git a/vendor/k8s.io/client-go/rest/OWNERS b/vendor/k8s.io/client-go/rest/OWNERS new file mode 100755 index 0000000000..8d97da007d --- /dev/null +++ b/vendor/k8s.io/client-go/rest/OWNERS @@ -0,0 +1,24 @@ +reviewers: +- thockin +- smarterclayton +- caesarxuchao +- wojtek-t +- deads2k +- brendandburns +- liggitt +- nikhiljindal +- gmarek +- erictune +- sttts +- luxas +- dims +- errordeveloper +- hongchaodeng +- krousey +- resouer +- cjcullen +- rmmh +- lixiaobing10051267 +- asalkeld +- juanvallejo +- lojies diff --git a/vendor/k8s.io/client-go/rest/client.go b/vendor/k8s.io/client-go/rest/client.go new file mode 100644 index 0000000000..524e0d8eb9 --- /dev/null +++ b/vendor/k8s.io/client-go/rest/client.go @@ -0,0 +1,258 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "fmt" + "mime" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/flowcontrol" +) + +const ( + // Environment variables: Note that the duration should be long enough that the backoff + // persists for some reasonable time (i.e. 120 seconds). The typical base might be "1". + envBackoffBase = "KUBE_CLIENT_BACKOFF_BASE" + envBackoffDuration = "KUBE_CLIENT_BACKOFF_DURATION" +) + +// Interface captures the set of operations for generically interacting with Kubernetes REST apis. +type Interface interface { + GetRateLimiter() flowcontrol.RateLimiter + Verb(verb string) *Request + Post() *Request + Put() *Request + Patch(pt types.PatchType) *Request + Get() *Request + Delete() *Request + APIVersion() schema.GroupVersion +} + +// RESTClient imposes common Kubernetes API conventions on a set of resource paths. +// The baseURL is expected to point to an HTTP or HTTPS path that is the parent +// of one or more resources. The server should return a decodable API resource +// object, or an api.Status object which contains information about the reason for +// any failure. +// +// Most consumers should use client.New() to get a Kubernetes API client. +type RESTClient struct { + // base is the root URL for all invocations of the client + base *url.URL + // versionedAPIPath is a path segment connecting the base URL to the resource root + versionedAPIPath string + + // contentConfig is the information used to communicate with the server. + contentConfig ContentConfig + + // serializers contain all serializers for underlying content type. + serializers Serializers + + // creates BackoffManager that is passed to requests. + createBackoffMgr func() BackoffManager + + // TODO extract this into a wrapper interface via the RESTClient interface in kubectl. + Throttle flowcontrol.RateLimiter + + // Set specific behavior of the client. If not set http.DefaultClient will be used. + Client *http.Client +} + +type Serializers struct { + Encoder runtime.Encoder + Decoder runtime.Decoder + StreamingSerializer runtime.Serializer + Framer runtime.Framer + RenegotiatedDecoder func(contentType string, params map[string]string) (runtime.Decoder, error) +} + +// NewRESTClient creates a new RESTClient. This client performs generic REST functions +// such as Get, Put, Post, and Delete on specified paths. Codec controls encoding and +// decoding of responses from the server. +func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConfig, maxQPS float32, maxBurst int, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) { + base := *baseURL + if !strings.HasSuffix(base.Path, "/") { + base.Path += "/" + } + base.RawQuery = "" + base.Fragment = "" + + if config.GroupVersion == nil { + config.GroupVersion = &schema.GroupVersion{} + } + if len(config.ContentType) == 0 { + config.ContentType = "application/json" + } + serializers, err := createSerializers(config) + if err != nil { + return nil, err + } + + var throttle flowcontrol.RateLimiter + if maxQPS > 0 && rateLimiter == nil { + throttle = flowcontrol.NewTokenBucketRateLimiter(maxQPS, maxBurst) + } else if rateLimiter != nil { + throttle = rateLimiter + } + return &RESTClient{ + base: &base, + versionedAPIPath: versionedAPIPath, + contentConfig: config, + serializers: *serializers, + createBackoffMgr: readExpBackoffConfig, + Throttle: throttle, + Client: client, + }, nil +} + +// GetRateLimiter returns rate limier for a given client, or nil if it's called on a nil client +func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter { + if c == nil { + return nil + } + return c.Throttle +} + +// readExpBackoffConfig handles the internal logic of determining what the +// backoff policy is. By default if no information is available, NoBackoff. +// TODO Generalize this see #17727 . +func readExpBackoffConfig() BackoffManager { + backoffBase := os.Getenv(envBackoffBase) + backoffDuration := os.Getenv(envBackoffDuration) + + backoffBaseInt, errBase := strconv.ParseInt(backoffBase, 10, 64) + backoffDurationInt, errDuration := strconv.ParseInt(backoffDuration, 10, 64) + if errBase != nil || errDuration != nil { + return &NoBackoff{} + } + return &URLBackoff{ + Backoff: flowcontrol.NewBackOff( + time.Duration(backoffBaseInt)*time.Second, + time.Duration(backoffDurationInt)*time.Second)} +} + +// createSerializers creates all necessary serializers for given contentType. +// TODO: the negotiated serializer passed to this method should probably return +// serializers that control decoding and versioning without this package +// being aware of the types. Depends on whether RESTClient must deal with +// generic infrastructure. +func createSerializers(config ContentConfig) (*Serializers, error) { + mediaTypes := config.NegotiatedSerializer.SupportedMediaTypes() + contentType := config.ContentType + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + return nil, fmt.Errorf("the content type specified in the client configuration is not recognized: %v", err) + } + info, ok := runtime.SerializerInfoForMediaType(mediaTypes, mediaType) + if !ok { + if len(contentType) != 0 || len(mediaTypes) == 0 { + return nil, fmt.Errorf("no serializers registered for %s", contentType) + } + info = mediaTypes[0] + } + + internalGV := schema.GroupVersions{ + { + Group: config.GroupVersion.Group, + Version: runtime.APIVersionInternal, + }, + // always include the legacy group as a decoding target to handle non-error `Status` return types + { + Group: "", + Version: runtime.APIVersionInternal, + }, + } + + s := &Serializers{ + Encoder: config.NegotiatedSerializer.EncoderForVersion(info.Serializer, *config.GroupVersion), + Decoder: config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), + + RenegotiatedDecoder: func(contentType string, params map[string]string) (runtime.Decoder, error) { + info, ok := runtime.SerializerInfoForMediaType(mediaTypes, contentType) + if !ok { + return nil, fmt.Errorf("serializer for %s not registered", contentType) + } + return config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), nil + }, + } + if info.StreamSerializer != nil { + s.StreamingSerializer = info.StreamSerializer.Serializer + s.Framer = info.StreamSerializer.Framer + } + + return s, nil +} + +// Verb begins a request with a verb (GET, POST, PUT, DELETE). +// +// Example usage of RESTClient's request building interface: +// c, err := NewRESTClient(...) +// if err != nil { ... } +// resp, err := c.Verb("GET"). +// Path("pods"). +// SelectorParam("labels", "area=staging"). +// Timeout(10*time.Second). +// Do() +// if err != nil { ... } +// list, ok := resp.(*api.PodList) +// +func (c *RESTClient) Verb(verb string) *Request { + backoff := c.createBackoffMgr() + + if c.Client == nil { + return NewRequest(nil, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle) + } + return NewRequest(c.Client, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle) +} + +// Post begins a POST request. Short for c.Verb("POST"). +func (c *RESTClient) Post() *Request { + return c.Verb("POST") +} + +// Put begins a PUT request. Short for c.Verb("PUT"). +func (c *RESTClient) Put() *Request { + return c.Verb("PUT") +} + +// Patch begins a PATCH request. Short for c.Verb("Patch"). +func (c *RESTClient) Patch(pt types.PatchType) *Request { + return c.Verb("PATCH").SetHeader("Content-Type", string(pt)) +} + +// Get begins a GET request. Short for c.Verb("GET"). +func (c *RESTClient) Get() *Request { + return c.Verb("GET") +} + +// Delete begins a DELETE request. Short for c.Verb("DELETE"). +func (c *RESTClient) Delete() *Request { + return c.Verb("DELETE") +} + +// APIVersion returns the APIVersion this RESTClient is expected to use. +func (c *RESTClient) APIVersion() schema.GroupVersion { + return *c.contentConfig.GroupVersion +} diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go new file mode 100644 index 0000000000..2a2c03dff1 --- /dev/null +++ b/vendor/k8s.io/client-go/rest/config.go @@ -0,0 +1,384 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "path" + gruntime "runtime" + "strings" + "time" + + "github.com/golang/glog" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/version" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + certutil "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/flowcontrol" +) + +const ( + DefaultQPS float32 = 5.0 + DefaultBurst int = 10 +) + +// Config holds the common attributes that can be passed to a Kubernetes client on +// initialization. +type Config struct { + // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. + // If a URL is given then the (optional) Path of that URL represents a prefix that must + // be appended to all request URIs used to access the apiserver. This allows a frontend + // proxy to easily relocate all of the apiserver endpoints. + Host string + // APIPath is a sub-path that points to an API root. + APIPath string + // Prefix is the sub path of the server. If not specified, the client will set + // a default value. Use "/" to indicate the server root should be used + Prefix string + + // ContentConfig contains settings that affect how objects are transformed when + // sent to the server. + ContentConfig + + // Server requires Basic authentication + Username string + Password string + + // Server requires Bearer authentication. This client will not attempt to use + // refresh tokens for an OAuth2 flow. + // TODO: demonstrate an OAuth2 compatible client. + BearerToken string + + // Impersonate is the configuration that RESTClient will use for impersonation. + Impersonate ImpersonationConfig + + // Server requires plugin-specified authentication. + AuthProvider *clientcmdapi.AuthProviderConfig + + // Callback to persist config for AuthProvider. + AuthConfigPersister AuthProviderConfigPersister + + // TLSClientConfig contains settings to enable transport layer security + TLSClientConfig + + // UserAgent is an optional field that specifies the caller of this request. + UserAgent string + + // Transport may be used for custom HTTP behavior. This attribute may not + // be specified with the TLS client certificate options. Use WrapTransport + // for most client level operations. + Transport http.RoundTripper + // WrapTransport will be invoked for custom HTTP behavior after the underlying + // transport is initialized (either the transport created from TLSClientConfig, + // Transport, or http.DefaultTransport). The config may layer other RoundTrippers + // on top of the returned RoundTripper. + WrapTransport func(rt http.RoundTripper) http.RoundTripper + + // QPS indicates the maximum QPS to the master from this client. + // If it's zero, the created RESTClient will use DefaultQPS: 5 + QPS float32 + + // Maximum burst for throttle. + // If it's zero, the created RESTClient will use DefaultBurst: 10. + Burst int + + // Rate limiter for limiting connections to the master from this client. If present overwrites QPS/Burst + RateLimiter flowcontrol.RateLimiter + + // The maximum length of time to wait before giving up on a server request. A value of zero means no timeout. + Timeout time.Duration + + // Version forces a specific version to be used (if registered) + // Do we need this? + // Version string +} + +// ImpersonationConfig has all the available impersonation options +type ImpersonationConfig struct { + // UserName is the username to impersonate on each request. + UserName string + // Groups are the groups to impersonate on each request. + Groups []string + // Extra is a free-form field which can be used to link some authentication information + // to authorization information. This field allows you to impersonate it. + Extra map[string][]string +} + +// TLSClientConfig contains settings to enable transport layer security +type TLSClientConfig struct { + // Server should be accessed without verifying the TLS certificate. For testing only. + Insecure bool + // ServerName is passed to the server for SNI and is used in the client to check server + // ceritificates against. If ServerName is empty, the hostname used to contact the + // server is used. + ServerName string + + // Server requires TLS client certificate authentication + CertFile string + // Server requires TLS client certificate authentication + KeyFile string + // Trusted root certificates for server + CAFile string + + // CertData holds PEM-encoded bytes (typically read from a client certificate file). + // CertData takes precedence over CertFile + CertData []byte + // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). + // KeyData takes precedence over KeyFile + KeyData []byte + // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). + // CAData takes precedence over CAFile + CAData []byte +} + +type ContentConfig struct { + // AcceptContentTypes specifies the types the client will accept and is optional. + // If not set, ContentType will be used to define the Accept header + AcceptContentTypes string + // ContentType specifies the wire format used to communicate with the server. + // This value will be set as the Accept header on requests made to the server, and + // as the default content type on any object sent to the server. If not set, + // "application/json" is used. + ContentType string + // GroupVersion is the API version to talk to. Must be provided when initializing + // a RESTClient directly. When initializing a Client, will be set with the default + // code version. + GroupVersion *schema.GroupVersion + // NegotiatedSerializer is used for obtaining encoders and decoders for multiple + // supported media types. + NegotiatedSerializer runtime.NegotiatedSerializer +} + +// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config +// object. Note that a RESTClient may require fields that are optional when initializing a Client. +// A RESTClient created by this method is generic - it expects to operate on an API that follows +// the Kubernetes conventions, but may not be the Kubernetes API. +func RESTClientFor(config *Config) (*RESTClient, error) { + if config.GroupVersion == nil { + return nil, fmt.Errorf("GroupVersion is required when initializing a RESTClient") + } + if config.NegotiatedSerializer == nil { + return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") + } + qps := config.QPS + if config.QPS == 0.0 { + qps = DefaultQPS + } + burst := config.Burst + if config.Burst == 0 { + burst = DefaultBurst + } + + baseURL, versionedAPIPath, err := defaultServerUrlFor(config) + if err != nil { + return nil, err + } + + transport, err := TransportFor(config) + if err != nil { + return nil, err + } + + var httpClient *http.Client + if transport != http.DefaultTransport { + httpClient = &http.Client{Transport: transport} + if config.Timeout > 0 { + httpClient.Timeout = config.Timeout + } + } + + return NewRESTClient(baseURL, versionedAPIPath, config.ContentConfig, qps, burst, config.RateLimiter, httpClient) +} + +// UnversionedRESTClientFor is the same as RESTClientFor, except that it allows +// the config.Version to be empty. +func UnversionedRESTClientFor(config *Config) (*RESTClient, error) { + if config.NegotiatedSerializer == nil { + return nil, fmt.Errorf("NeogitatedSerializer is required when initializing a RESTClient") + } + + baseURL, versionedAPIPath, err := defaultServerUrlFor(config) + if err != nil { + return nil, err + } + + transport, err := TransportFor(config) + if err != nil { + return nil, err + } + + var httpClient *http.Client + if transport != http.DefaultTransport { + httpClient = &http.Client{Transport: transport} + if config.Timeout > 0 { + httpClient.Timeout = config.Timeout + } + } + + versionConfig := config.ContentConfig + if versionConfig.GroupVersion == nil { + v := metav1.SchemeGroupVersion + versionConfig.GroupVersion = &v + } + + return NewRESTClient(baseURL, versionedAPIPath, versionConfig, config.QPS, config.Burst, config.RateLimiter, httpClient) +} + +// SetKubernetesDefaults sets default values on the provided client config for accessing the +// Kubernetes API or returns an error if any of the defaults are impossible or invalid. +func SetKubernetesDefaults(config *Config) error { + if len(config.UserAgent) == 0 { + config.UserAgent = DefaultKubernetesUserAgent() + } + return nil +} + +// DefaultKubernetesUserAgent returns the default user agent that clients can use. +func DefaultKubernetesUserAgent() string { + commit := version.Get().GitCommit + if len(commit) > 7 { + commit = commit[:7] + } + if len(commit) == 0 { + commit = "unknown" + } + version := version.Get().GitVersion + seg := strings.SplitN(version, "-", 2) + version = seg[0] + return fmt.Sprintf("%s/%s (%s/%s) kubernetes/%s", path.Base(os.Args[0]), version, gruntime.GOOS, gruntime.GOARCH, commit) +} + +// InClusterConfig returns a config object which uses the service account +// kubernetes gives to pods. It's intended for clients that expect to be +// running inside a pod running on kubernetes. It will return an error if +// called from a process not running in a kubernetes environment. +func InClusterConfig() (*Config, error) { + host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT") + if len(host) == 0 || len(port) == 0 { + return nil, fmt.Errorf("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined") + } + + token, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountTokenKey) + if err != nil { + return nil, err + } + tlsClientConfig := TLSClientConfig{} + rootCAFile := "/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountRootCAKey + if _, err := certutil.NewPool(rootCAFile); err != nil { + glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) + } else { + tlsClientConfig.CAFile = rootCAFile + } + + return &Config{ + // TODO: switch to using cluster DNS. + Host: "https://" + net.JoinHostPort(host, port), + BearerToken: string(token), + TLSClientConfig: tlsClientConfig, + }, nil +} + +// IsConfigTransportTLS returns true if and only if the provided +// config will result in a protected connection to the server when it +// is passed to restclient.RESTClientFor(). Use to determine when to +// send credentials over the wire. +// +// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are +// still possible. +func IsConfigTransportTLS(config Config) bool { + baseURL, _, err := defaultServerUrlFor(&config) + if err != nil { + return false + } + return baseURL.Scheme == "https" +} + +// LoadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, +// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are +// either populated or were empty to start. +func LoadTLSFiles(c *Config) error { + var err error + c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile) + if err != nil { + return err + } + + c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile) + if err != nil { + return err + } + + c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile) + if err != nil { + return err + } + return nil +} + +// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, +// or an error if an error occurred reading the file +func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { + if len(data) > 0 { + return data, nil + } + if len(file) > 0 { + fileData, err := ioutil.ReadFile(file) + if err != nil { + return []byte{}, err + } + return fileData, nil + } + return nil, nil +} + +func AddUserAgent(config *Config, userAgent string) *Config { + fullUserAgent := DefaultKubernetesUserAgent() + "/" + userAgent + config.UserAgent = fullUserAgent + return config +} + +// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) removed +func AnonymousClientConfig(config *Config) *Config { + // copy only known safe fields + return &Config{ + Host: config.Host, + APIPath: config.APIPath, + Prefix: config.Prefix, + ContentConfig: config.ContentConfig, + TLSClientConfig: TLSClientConfig{ + Insecure: config.Insecure, + ServerName: config.ServerName, + CAFile: config.TLSClientConfig.CAFile, + CAData: config.TLSClientConfig.CAData, + }, + RateLimiter: config.RateLimiter, + UserAgent: config.UserAgent, + Transport: config.Transport, + WrapTransport: config.WrapTransport, + QPS: config.QPS, + Burst: config.Burst, + Timeout: config.Timeout, + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/plugin.go b/vendor/k8s.io/client-go/rest/plugin.go similarity index 96% rename from vendor/k8s.io/kubernetes/pkg/client/restclient/plugin.go rename to vendor/k8s.io/client-go/rest/plugin.go index 06ac3cce16..cf8fbabfdf 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/restclient/plugin.go +++ b/vendor/k8s.io/client-go/rest/plugin.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restclient +package rest import ( "fmt" @@ -23,7 +23,7 @@ import ( "github.com/golang/glog" - clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) type AuthProvider interface { diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/request.go b/vendor/k8s.io/client-go/rest/request.go similarity index 86% rename from vendor/k8s.io/kubernetes/pkg/client/restclient/request.go rename to vendor/k8s.io/client-go/rest/request.go index 0f17d0badc..b87ddaff51 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/restclient/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -14,10 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restclient +package rest import ( "bytes" + "context" "encoding/hex" "fmt" "io" @@ -32,20 +33,20 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - pathvalidation "k8s.io/kubernetes/pkg/api/validation/path" - "k8s.io/kubernetes/pkg/client/metrics" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer/streaming" - "k8s.io/kubernetes/pkg/util/flowcontrol" - "k8s.io/kubernetes/pkg/util/net" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/watch" - "k8s.io/kubernetes/pkg/watch/versioned" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" + "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/pkg/api/v1" + restclientwatch "k8s.io/client-go/rest/watch" + "k8s.io/client-go/tools/metrics" + "k8s.io/client-go/util/flowcontrol" ) var ( @@ -104,16 +105,14 @@ type Request struct { resource string resourceName string subresource string - selector labels.Selector timeout time.Duration // output err error body io.Reader - // The constructed request and the response - req *http.Request - resp *http.Response + // This is only used for per-request timeouts, deadlines, and cancellations. + ctx context.Context backoffMgr BackoffManager throttle flowcontrol.RateLimiter @@ -179,7 +178,7 @@ func (r *Request) Resource(resource string) *Request { r.err = fmt.Errorf("resource already set to %q, cannot change to %q", r.resource, resource) return r } - if msgs := pathvalidation.IsValidPathSegmentName(resource); len(msgs) != 0 { + if msgs := IsValidPathSegmentName(resource); len(msgs) != 0 { r.err = fmt.Errorf("invalid resource %q: %v", resource, msgs) return r } @@ -199,7 +198,7 @@ func (r *Request) SubResource(subresources ...string) *Request { return r } for _, s := range subresources { - if msgs := pathvalidation.IsValidPathSegmentName(s); len(msgs) != 0 { + if msgs := IsValidPathSegmentName(s); len(msgs) != 0 { r.err = fmt.Errorf("invalid subresource %q: %v", s, msgs) return r } @@ -221,7 +220,7 @@ func (r *Request) Name(resourceName string) *Request { r.err = fmt.Errorf("resource name already set to %q, cannot change to %q", r.resourceName, resourceName) return r } - if msgs := pathvalidation.IsValidPathSegmentName(resourceName); len(msgs) != 0 { + if msgs := IsValidPathSegmentName(resourceName); len(msgs) != 0 { r.err = fmt.Errorf("invalid resource name %q: %v", resourceName, msgs) return r } @@ -238,7 +237,7 @@ func (r *Request) Namespace(namespace string) *Request { r.err = fmt.Errorf("namespace already set to %q, cannot change to %q", r.namespace, namespace) return r } - if msgs := pathvalidation.IsValidPathSegmentName(namespace); len(msgs) != 0 { + if msgs := IsValidPathSegmentName(namespace); len(msgs) != 0 { r.err = fmt.Errorf("invalid namespace %q: %v", namespace, msgs) return r } @@ -333,10 +332,10 @@ func (r resourceTypeToFieldMapping) filterField(resourceType, field, value strin return fMapping.filterField(field, value) } -type versionToResourceToFieldMapping map[unversioned.GroupVersion]resourceTypeToFieldMapping +type versionToResourceToFieldMapping map[schema.GroupVersion]resourceTypeToFieldMapping // filterField transforms the given field/value selector for the given groupVersion and resource -func (v versionToResourceToFieldMapping) filterField(groupVersion *unversioned.GroupVersion, resourceType, field, value string) (newField, newValue string, err error) { +func (v versionToResourceToFieldMapping) filterField(groupVersion *schema.GroupVersion, resourceType, field, value string) (newField, newValue string, err error) { rMapping, ok := v[*groupVersion] if !ok { // no groupVersion overrides registered, default to identity mapping @@ -404,7 +403,7 @@ func (r *Request) FieldsSelectorParam(s fields.Selector) *Request { r.err = err return r } - return r.setParam(unversioned.FieldSelectorQueryParam(r.content.GroupVersion.String()), s2.String()) + return r.setParam(metav1.FieldSelectorQueryParam(r.content.GroupVersion.String()), s2.String()) } // LabelsSelectorParam adds the given selector as a query parameter @@ -418,7 +417,7 @@ func (r *Request) LabelsSelectorParam(s labels.Selector) *Request { if s.Empty() { return r } - return r.setParam(unversioned.LabelSelectorQueryParam(r.content.GroupVersion.String()), s.String()) + return r.setParam(metav1.LabelSelectorQueryParam(r.content.GroupVersion.String()), s.String()) } // UintParam creates a query parameter with the given value. @@ -453,14 +452,14 @@ func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCod for _, value := range v { // TODO: Move it to setParam method, once we get rid of // FieldSelectorParam & LabelSelectorParam methods. - if k == unversioned.LabelSelectorQueryParam(r.content.GroupVersion.String()) && value == "" { + if k == metav1.LabelSelectorQueryParam(r.content.GroupVersion.String()) && value == "" { // Don't set an empty selector for backward compatibility. // Since there is no way to get the difference between empty // and unspecified string, we don't set it to avoid having // labelSelector= param in every request. continue } - if k == unversioned.FieldSelectorQueryParam(r.content.GroupVersion.String()) { + if k == metav1.FieldSelectorQueryParam(r.content.GroupVersion.String()) { if len(value) == 0 { // Don't set an empty selector for backward compatibility. // Since there is no way to get the difference between empty @@ -539,10 +538,10 @@ func (r *Request) Body(obj interface{}) *Request { r.err = err return r } - glog.V(8).Infof("Request Body: %#v", string(data)) + glogBody("Request Body", data) r.body = bytes.NewReader(data) case []byte: - glog.V(8).Infof("Request Body: %#v", string(t)) + glogBody("Request Body", t) r.body = bytes.NewReader(t) case io.Reader: r.body = t @@ -556,7 +555,7 @@ func (r *Request) Body(obj interface{}) *Request { r.err = err return r } - glog.V(8).Infof("Request Body: %#v", string(data)) + glogBody("Request Body", data) r.body = bytes.NewReader(data) r.SetHeader("Content-Type", r.content.ContentType) default: @@ -565,6 +564,13 @@ func (r *Request) Body(obj interface{}) *Request { return r } +// Context adds a context to the request. Contexts are only used for +// timeouts, deadlines, and cancellations. +func (r *Request) Context(ctx context.Context) *Request { + r.ctx = ctx + return r +} + // URL returns the current working URL. func (r *Request) URL() *url.URL { p := r.pathPrefix @@ -650,6 +656,9 @@ func (r *Request) Watch() (watch.Interface, error) { if err != nil { return nil, err } + if r.ctx != nil { + req = req.WithContext(r.ctx) + } req.Header = r.headers client := r.client if client == nil { @@ -682,7 +691,7 @@ func (r *Request) Watch() (watch.Interface, error) { } framer := r.serializers.Framer.NewFrameReader(resp.Body) decoder := streaming.NewDecoder(framer, r.serializers.StreamingSerializer) - return watch.NewStreamWatcher(versioned.NewDecoder(decoder, r.serializers.Decoder)), nil + return watch.NewStreamWatcher(restclientwatch.NewDecoder(decoder, r.serializers.Decoder)), nil } // updateURLMetrics is a convenience function for pushing metrics. @@ -693,9 +702,10 @@ func updateURLMetrics(req *Request, resp *http.Response, err error) { url = req.baseURL.Host } - // If we have an error (i.e. apiserver down) we report that as a metric label. + // Errors can be arbitrary strings. Unbound label cardinality is not suitable for a metric + // system so we just report them as ``. if err != nil { - metrics.RequestResult.Increment(err.Error(), req.verb, url) + metrics.RequestResult.Increment("", req.verb, url) } else { //Metrics for failure codes metrics.RequestResult.Increment(strconv.Itoa(resp.StatusCode), req.verb, url) @@ -718,6 +728,9 @@ func (r *Request) Stream() (io.ReadCloser, error) { if err != nil { return nil, err } + if r.ctx != nil { + req = req.WithContext(r.ctx) + } req.Header = r.headers client := r.client if client == nil { @@ -746,10 +759,11 @@ func (r *Request) Stream() (io.ReadCloser, error) { defer resp.Body.Close() result := r.transformResponse(resp, req) - if result.err != nil { - return nil, result.err + err := result.Error() + if err == nil { + err = fmt.Errorf("%d while accessing %v: %s", result.statusCode, url, string(result.body)) } - return nil, fmt.Errorf("%d while accessing %v: %s", result.statusCode, url, string(result.body)) + return nil, err } } @@ -792,6 +806,9 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { if err != nil { return err } + if r.ctx != nil { + req = req.WithContext(r.ctx) + } req.Header = r.headers r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) @@ -809,7 +826,20 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode) } if err != nil { - return err + // "Connection reset by peer" is usually a transient error. + // Thus in case of "GET" operations, we simply retry it. + // We are not automatically retrying "write" operations, as + // they are not idempotent. + if !net.IsConnectionReset(err) || r.verb != "GET" { + return err + } + // For the purpose of retry, we set the artificial "retry-after" response. + // TODO: Should we clean the original response if it exists? + resp = &http.Response{ + StatusCode: http.StatusInternalServerError, + Header: http.Header{"Retry-After": []string{"1"}}, + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } } done := func() bool { @@ -876,6 +906,7 @@ func (r *Request) DoRaw() ([]byte, error) { var result Result err := r.request(func(req *http.Request, resp *http.Response) { result.body, result.err = ioutil.ReadAll(resp.Body) + glogBody("Response Body", result.body) if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent { result.err = r.transformUnstructuredResponseError(resp, req, result.body) } @@ -895,15 +926,7 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu } } - if glog.V(8) { - if bytes.IndexFunc(body, func(r rune) bool { - return r < 0x0a - }) != -1 { - glog.Infof("Response Body:\n%s", hex.Dump(body)) - } else { - glog.Infof("Response Body: %s", string(body)) - } - } + glogBody("Response Body", body) // verify the content type is accurate contentType := resp.Header.Get("Content-Type") @@ -955,6 +978,21 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu } } +// glogBody logs a body output that could be either JSON or protobuf. It explicitly guards against +// allocating a new string for the body output unless necessary. Uses a simple heuristic to determine +// whether the body is printable. +func glogBody(prefix string, body []byte) { + if glog.V(8) { + if bytes.IndexFunc(body, func(r rune) bool { + return r < 0x0a + }) != -1 { + glog.Infof("%s:\n%s", prefix, hex.Dump(body)) + } else { + glog.Infof("%s: %s", prefix, string(body)) + } + } +} + // maxUnstructuredResponseTextBytes is an upper bound on how much output to include in the unstructured error. const maxUnstructuredResponseTextBytes = 2048 @@ -992,19 +1030,20 @@ func (r *Request) newUnstructuredResponseError(body []byte, isTextResponse bool, if len(body) > maxUnstructuredResponseTextBytes { body = body[:maxUnstructuredResponseTextBytes] } - glog.V(8).Infof("Response Body: %#v", string(body)) message := "unknown" if isTextResponse { message = strings.TrimSpace(string(body)) } + var groupResource schema.GroupResource + if len(r.resource) > 0 { + groupResource.Group = r.content.GroupVersion.Group + groupResource.Resource = r.resource + } return errors.NewGenericServerResponse( statusCode, method, - unversioned.GroupResource{ - Group: r.content.GroupVersion.Group, - Resource: r.resource, - }, + groupResource, r.resourceName, message, retryAfter, @@ -1082,9 +1121,9 @@ func (r Result) Get() (runtime.Object, error) { return nil, err } switch t := out.(type) { - case *unversioned.Status: + case *metav1.Status: // any status besides StatusSuccess is considered an error. - if t.Status != unversioned.StatusSuccess { + if t.Status != metav1.StatusSuccess { return nil, errors.FromObject(t) } } @@ -1117,9 +1156,9 @@ func (r Result) Into(obj runtime.Object) error { // if a different object is returned, see if it is Status and avoid double decoding // the object. switch t := out.(type) { - case *unversioned.Status: + case *metav1.Status: // any status besides StatusSuccess is considered an error. - if t.Status != unversioned.StatusSuccess { + if t.Status != metav1.StatusSuccess { return errors.FromObject(t) } } @@ -1146,17 +1185,63 @@ func (r Result) Error() error { // attempt to convert the body into a Status object // to be backwards compatible with old servers that do not return a version, default to "v1" - out, _, err := r.decoder.Decode(r.body, &unversioned.GroupVersionKind{Version: "v1"}, nil) + out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil) if err != nil { glog.V(5).Infof("body was not decodable (unable to check for Status): %v", err) return r.err } switch t := out.(type) { - case *unversioned.Status: + case *metav1.Status: // because we default the kind, we *must* check for StatusFailure - if t.Status == unversioned.StatusFailure { + if t.Status == metav1.StatusFailure { return errors.FromObject(t) } } return r.err } + +// NameMayNotBe specifies strings that cannot be used as names specified as path segments (like the REST API or etcd store) +var NameMayNotBe = []string{".", ".."} + +// NameMayNotContain specifies substrings that cannot be used in names specified as path segments (like the REST API or etcd store) +var NameMayNotContain = []string{"/", "%"} + +// IsValidPathSegmentName validates the name can be safely encoded as a path segment +func IsValidPathSegmentName(name string) []string { + for _, illegalName := range NameMayNotBe { + if name == illegalName { + return []string{fmt.Sprintf(`may not be '%s'`, illegalName)} + } + } + + var errors []string + for _, illegalContent := range NameMayNotContain { + if strings.Contains(name, illegalContent) { + errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent)) + } + } + + return errors +} + +// IsValidPathSegmentPrefix validates the name can be used as a prefix for a name which will be encoded as a path segment +// It does not check for exact matches with disallowed names, since an arbitrary suffix might make the name valid +func IsValidPathSegmentPrefix(name string) []string { + var errors []string + for _, illegalContent := range NameMayNotContain { + if strings.Contains(name, illegalContent) { + errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent)) + } + } + + return errors +} + +// ValidatePathSegmentName validates the name can be safely encoded as a path segment +func ValidatePathSegmentName(name string, prefix bool) []string { + if prefix { + return IsValidPathSegmentPrefix(name) + } else { + return IsValidPathSegmentName(name) + } +} diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go new file mode 100644 index 0000000000..ba43752bc9 --- /dev/null +++ b/vendor/k8s.io/client-go/rest/transport.go @@ -0,0 +1,99 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "crypto/tls" + "net/http" + + "k8s.io/client-go/transport" +) + +// TLSConfigFor returns a tls.Config that will provide the transport level security defined +// by the provided Config. Will return nil if no transport level security is requested. +func TLSConfigFor(config *Config) (*tls.Config, error) { + cfg, err := config.TransportConfig() + if err != nil { + return nil, err + } + return transport.TLSConfigFor(cfg) +} + +// TransportFor returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. Will return the +// default http.DefaultTransport if no special case behavior is needed. +func TransportFor(config *Config) (http.RoundTripper, error) { + cfg, err := config.TransportConfig() + if err != nil { + return nil, err + } + return transport.New(cfg) +} + +// HTTPWrappersForConfig wraps a round tripper with any relevant layered behavior from the +// config. Exposed to allow more clients that need HTTP-like behavior but then must hijack +// the underlying connection (like WebSocket or HTTP2 clients). Pure HTTP clients should use +// the higher level TransportFor or RESTClientFor methods. +func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) { + cfg, err := config.TransportConfig() + if err != nil { + return nil, err + } + return transport.HTTPWrappersForConfig(cfg, rt) +} + +// TransportConfig converts a client config to an appropriate transport config. +func (c *Config) TransportConfig() (*transport.Config, error) { + wt := c.WrapTransport + if c.AuthProvider != nil { + provider, err := GetAuthProvider(c.Host, c.AuthProvider, c.AuthConfigPersister) + if err != nil { + return nil, err + } + if wt != nil { + previousWT := wt + wt = func(rt http.RoundTripper) http.RoundTripper { + return provider.WrapTransport(previousWT(rt)) + } + } else { + wt = provider.WrapTransport + } + } + return &transport.Config{ + UserAgent: c.UserAgent, + Transport: c.Transport, + WrapTransport: wt, + TLS: transport.TLSConfig{ + Insecure: c.Insecure, + ServerName: c.ServerName, + CAFile: c.CAFile, + CAData: c.CAData, + CertFile: c.CertFile, + CertData: c.CertData, + KeyFile: c.KeyFile, + KeyData: c.KeyData, + }, + Username: c.Username, + Password: c.Password, + BearerToken: c.BearerToken, + Impersonate: transport.ImpersonationConfig{ + UserName: c.Impersonate.UserName, + Groups: c.Impersonate.Groups, + Extra: c.Impersonate.Extra, + }, + }, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/url_utils.go b/vendor/k8s.io/client-go/rest/url_utils.go similarity index 88% rename from vendor/k8s.io/kubernetes/pkg/client/restclient/url_utils.go rename to vendor/k8s.io/client-go/rest/url_utils.go index 81f16d63ea..14f94650a9 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/restclient/url_utils.go +++ b/vendor/k8s.io/client-go/rest/url_utils.go @@ -14,29 +14,26 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restclient +package rest import ( "fmt" "net/url" "path" - "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/apimachinery/pkg/runtime/schema" ) // DefaultServerURL converts a host, host:port, or URL string to the default base server API path // to use with a Client at a given API version following the standard conventions for a // Kubernetes API. -func DefaultServerURL(host, apiPath string, groupVersion unversioned.GroupVersion, defaultTLS bool) (*url.URL, string, error) { +func DefaultServerURL(host, apiPath string, groupVersion schema.GroupVersion, defaultTLS bool) (*url.URL, string, error) { if host == "" { return nil, "", fmt.Errorf("host must be a URL or a host:port pair") } base := host hostURL, err := url.Parse(base) - if err != nil { - return nil, "", err - } - if hostURL.Scheme == "" || hostURL.Host == "" { + if err != nil || hostURL.Scheme == "" || hostURL.Host == "" { scheme := "http://" if defaultTLS { scheme = "https://" @@ -89,5 +86,5 @@ func defaultServerUrlFor(config *Config) (*url.URL, string, error) { if config.GroupVersion != nil { return DefaultServerURL(host, config.APIPath, *config.GroupVersion, defaultTLS) } - return DefaultServerURL(host, config.APIPath, unversioned.GroupVersion{}, defaultTLS) + return DefaultServerURL(host, config.APIPath, schema.GroupVersion{}, defaultTLS) } diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/urlbackoff.go b/vendor/k8s.io/client-go/rest/urlbackoff.go similarity index 97% rename from vendor/k8s.io/kubernetes/pkg/client/restclient/urlbackoff.go rename to vendor/k8s.io/client-go/rest/urlbackoff.go index 24a89ed975..eff848abc1 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/restclient/urlbackoff.go +++ b/vendor/k8s.io/client-go/rest/urlbackoff.go @@ -14,15 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restclient +package rest import ( "net/url" "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/util/flowcontrol" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/util/flowcontrol" ) // Set of resp. Codes that we backoff for. diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/versions.go b/vendor/k8s.io/client-go/rest/versions.go similarity index 94% rename from vendor/k8s.io/kubernetes/pkg/client/restclient/versions.go rename to vendor/k8s.io/client-go/rest/versions.go index 3376434474..9d41812f26 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/restclient/versions.go +++ b/vendor/k8s.io/client-go/rest/versions.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restclient +package rest import ( "encoding/json" @@ -22,7 +22,7 @@ import ( "net/http" "path" - "k8s.io/kubernetes/pkg/api/unversioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( @@ -57,7 +57,7 @@ func ServerAPIVersions(c *Config) (groupVersions []string, err error) { if err != nil { return nil, err } - var v unversioned.APIVersions + var v metav1.APIVersions defer resp.Body.Close() err = json.NewDecoder(resp.Body).Decode(&v) if err != nil { @@ -71,7 +71,7 @@ func ServerAPIVersions(c *Config) (groupVersions []string, err error) { if err != nil { return nil, err } - var apiGroupList unversioned.APIGroupList + var apiGroupList metav1.APIGroupList defer resp2.Body.Close() err = json.NewDecoder(resp2.Body).Decode(&apiGroupList) if err != nil { diff --git a/vendor/k8s.io/client-go/rest/watch/decoder.go b/vendor/k8s.io/client-go/rest/watch/decoder.go new file mode 100644 index 0000000000..73bb63addf --- /dev/null +++ b/vendor/k8s.io/client-go/rest/watch/decoder.go @@ -0,0 +1,72 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versioned + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" + "k8s.io/apimachinery/pkg/watch" +) + +// Decoder implements the watch.Decoder interface for io.ReadClosers that +// have contents which consist of a series of watchEvent objects encoded +// with the given streaming decoder. The internal objects will be then +// decoded by the embedded decoder. +type Decoder struct { + decoder streaming.Decoder + embeddedDecoder runtime.Decoder +} + +// NewDecoder creates an Decoder for the given writer and codec. +func NewDecoder(decoder streaming.Decoder, embeddedDecoder runtime.Decoder) *Decoder { + return &Decoder{ + decoder: decoder, + embeddedDecoder: embeddedDecoder, + } +} + +// Decode blocks until it can return the next object in the reader. Returns an error +// if the reader is closed or an object can't be decoded. +func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) { + var got metav1.WatchEvent + res, _, err := d.decoder.Decode(nil, &got) + if err != nil { + return "", nil, err + } + if res != &got { + return "", nil, fmt.Errorf("unable to decode to metav1.Event") + } + switch got.Type { + case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error): + default: + return "", nil, fmt.Errorf("got invalid watch event type: %v", got.Type) + } + + obj, err := runtime.Decode(d.embeddedDecoder, got.Object.Raw) + if err != nil { + return "", nil, fmt.Errorf("unable to decode watch event: %v", err) + } + return watch.EventType(got.Type), obj, nil +} + +// Close closes the underlying r. +func (d *Decoder) Close() { + d.decoder.Close() +} diff --git a/vendor/k8s.io/kubernetes/pkg/watch/versioned/encoder.go b/vendor/k8s.io/client-go/rest/watch/encoder.go similarity index 79% rename from vendor/k8s.io/kubernetes/pkg/watch/versioned/encoder.go rename to vendor/k8s.io/client-go/rest/watch/encoder.go index df23e0bd16..e55aa12d9b 100644 --- a/vendor/k8s.io/kubernetes/pkg/watch/versioned/encoder.go +++ b/vendor/k8s.io/client-go/rest/watch/encoder.go @@ -19,14 +19,16 @@ package versioned import ( "encoding/json" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer/streaming" - "k8s.io/kubernetes/pkg/watch" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" + "k8s.io/apimachinery/pkg/watch" ) // Encoder serializes watch.Events into io.Writer. The internal objects // are encoded using embedded encoder, and the outer Event is serialized // using encoder. +// TODO: this type is only used by tests type Encoder struct { encoder streaming.Encoder embeddedEncoder runtime.Encoder @@ -47,5 +49,8 @@ func (e *Encoder) Encode(event *watch.Event) error { return err } // FIXME: get rid of json.RawMessage. - return e.encoder.Encode(&Event{string(event.Type), runtime.RawExtension{Raw: json.RawMessage(data)}}) + return e.encoder.Encode(&metav1.WatchEvent{ + Type: string(event.Type), + Object: runtime.RawExtension{Raw: json.RawMessage(data)}, + }) } diff --git a/vendor/k8s.io/client-go/testing/actions.go b/vendor/k8s.io/client-go/testing/actions.go new file mode 100644 index 0000000000..fdcebc9bdc --- /dev/null +++ b/vendor/k8s.io/client-go/testing/actions.go @@ -0,0 +1,471 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "path" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func NewRootGetAction(resource schema.GroupVersionResource, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Name = name + + return action +} + +func NewGetAction(resource schema.GroupVersionResource, namespace, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewRootListAction(resource schema.GroupVersionResource, opts interface{}) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewListAction(resource schema.GroupVersionResource, namespace string, opts interface{}) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Namespace = namespace + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewRootCreateAction(resource schema.GroupVersionResource, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Object = object + + return action +} + +func NewCreateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootUpdateAction(resource schema.GroupVersionResource, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Object = object + + return action +} + +func NewUpdateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootPatchAction(resource schema.GroupVersionResource, name string, patch []byte) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Name = name + action.Patch = patch + + return action +} + +func NewPatchAction(resource schema.GroupVersionResource, namespace string, name string, patch []byte) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Namespace = namespace + action.Name = name + action.Patch = patch + + return action +} + +func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name string, patch []byte, subresources ...string) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Subresource = path.Join(subresources...) + action.Name = name + action.Patch = patch + + return action +} + +func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, name string, patch []byte, subresources ...string) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Subresource = path.Join(subresources...) + action.Namespace = namespace + action.Name = name + action.Patch = patch + + return action +} + +func NewRootUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Subresource = subresource + action.Object = object + + return action +} +func NewUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, namespace string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Subresource = subresource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootDeleteAction(resource schema.GroupVersionResource, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Name = name + + return action +} + +func NewDeleteAction(resource schema.GroupVersionResource, namespace, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewRootDeleteCollectionAction(resource schema.GroupVersionResource, opts interface{}) DeleteCollectionActionImpl { + action := DeleteCollectionActionImpl{} + action.Verb = "delete-collection" + action.Resource = resource + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewDeleteCollectionAction(resource schema.GroupVersionResource, namespace string, opts interface{}) DeleteCollectionActionImpl { + action := DeleteCollectionActionImpl{} + action.Verb = "delete-collection" + action.Resource = resource + action.Namespace = namespace + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewRootWatchAction(resource schema.GroupVersionResource, opts interface{}) WatchActionImpl { + action := WatchActionImpl{} + action.Verb = "watch" + action.Resource = resource + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) + action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} + + return action +} + +func ExtractFromListOptions(opts interface{}) (labelSelector labels.Selector, fieldSelector fields.Selector, resourceVersion string) { + var err error + switch t := opts.(type) { + case metav1.ListOptions: + labelSelector, err = labels.Parse(t.LabelSelector) + if err != nil { + panic(fmt.Errorf("invalid selector %q: %v", t.LabelSelector, err)) + } + fieldSelector, err = fields.ParseSelector(t.FieldSelector) + if err != nil { + panic(fmt.Errorf("invalid selector %q: %v", t.FieldSelector, err)) + } + resourceVersion = t.ResourceVersion + default: + panic(fmt.Errorf("expect a ListOptions %T", opts)) + } + if labelSelector == nil { + labelSelector = labels.Everything() + } + if fieldSelector == nil { + fieldSelector = fields.Everything() + } + return labelSelector, fieldSelector, resourceVersion +} + +func NewWatchAction(resource schema.GroupVersionResource, namespace string, opts interface{}) WatchActionImpl { + action := WatchActionImpl{} + action.Verb = "watch" + action.Resource = resource + action.Namespace = namespace + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) + action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} + + return action +} + +func NewProxyGetAction(resource schema.GroupVersionResource, namespace, scheme, name, port, path string, params map[string]string) ProxyGetActionImpl { + action := ProxyGetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Namespace = namespace + action.Scheme = scheme + action.Name = name + action.Port = port + action.Path = path + action.Params = params + return action +} + +type ListRestrictions struct { + Labels labels.Selector + Fields fields.Selector +} +type WatchRestrictions struct { + Labels labels.Selector + Fields fields.Selector + ResourceVersion string +} + +type Action interface { + GetNamespace() string + GetVerb() string + GetResource() schema.GroupVersionResource + GetSubresource() string + Matches(verb, resource string) bool +} + +type GenericAction interface { + Action + GetValue() interface{} +} + +type GetAction interface { + Action + GetName() string +} + +type ListAction interface { + Action + GetListRestrictions() ListRestrictions +} + +type CreateAction interface { + Action + GetObject() runtime.Object +} + +type UpdateAction interface { + Action + GetObject() runtime.Object +} + +type DeleteAction interface { + Action + GetName() string +} + +type WatchAction interface { + Action + GetWatchRestrictions() WatchRestrictions +} + +type ProxyGetAction interface { + Action + GetScheme() string + GetName() string + GetPort() string + GetPath() string + GetParams() map[string]string +} + +type ActionImpl struct { + Namespace string + Verb string + Resource schema.GroupVersionResource + Subresource string +} + +func (a ActionImpl) GetNamespace() string { + return a.Namespace +} +func (a ActionImpl) GetVerb() string { + return a.Verb +} +func (a ActionImpl) GetResource() schema.GroupVersionResource { + return a.Resource +} +func (a ActionImpl) GetSubresource() string { + return a.Subresource +} +func (a ActionImpl) Matches(verb, resource string) bool { + return strings.ToLower(verb) == strings.ToLower(a.Verb) && + strings.ToLower(resource) == strings.ToLower(a.Resource.Resource) +} + +type GenericActionImpl struct { + ActionImpl + Value interface{} +} + +func (a GenericActionImpl) GetValue() interface{} { + return a.Value +} + +type GetActionImpl struct { + ActionImpl + Name string +} + +func (a GetActionImpl) GetName() string { + return a.Name +} + +type ListActionImpl struct { + ActionImpl + ListRestrictions ListRestrictions +} + +func (a ListActionImpl) GetListRestrictions() ListRestrictions { + return a.ListRestrictions +} + +type CreateActionImpl struct { + ActionImpl + Object runtime.Object +} + +func (a CreateActionImpl) GetObject() runtime.Object { + return a.Object +} + +type UpdateActionImpl struct { + ActionImpl + Object runtime.Object +} + +func (a UpdateActionImpl) GetObject() runtime.Object { + return a.Object +} + +type PatchActionImpl struct { + ActionImpl + Name string + Patch []byte +} + +func (a PatchActionImpl) GetName() string { + return a.Name +} + +func (a PatchActionImpl) GetPatch() []byte { + return a.Patch +} + +type DeleteActionImpl struct { + ActionImpl + Name string +} + +func (a DeleteActionImpl) GetName() string { + return a.Name +} + +type DeleteCollectionActionImpl struct { + ActionImpl + ListRestrictions ListRestrictions +} + +func (a DeleteCollectionActionImpl) GetListRestrictions() ListRestrictions { + return a.ListRestrictions +} + +type WatchActionImpl struct { + ActionImpl + WatchRestrictions WatchRestrictions +} + +func (a WatchActionImpl) GetWatchRestrictions() WatchRestrictions { + return a.WatchRestrictions +} + +type ProxyGetActionImpl struct { + ActionImpl + Scheme string + Name string + Port string + Path string + Params map[string]string +} + +func (a ProxyGetActionImpl) GetScheme() string { + return a.Scheme +} + +func (a ProxyGetActionImpl) GetName() string { + return a.Name +} + +func (a ProxyGetActionImpl) GetPort() string { + return a.Port +} + +func (a ProxyGetActionImpl) GetPath() string { + return a.Path +} + +func (a ProxyGetActionImpl) GetParams() map[string]string { + return a.Params +} diff --git a/vendor/k8s.io/client-go/testing/fake.go b/vendor/k8s.io/client-go/testing/fake.go new file mode 100644 index 0000000000..da47b23b95 --- /dev/null +++ b/vendor/k8s.io/client-go/testing/fake.go @@ -0,0 +1,259 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "sync" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/version" + "k8s.io/apimachinery/pkg/watch" + kubeversion "k8s.io/client-go/pkg/version" + restclient "k8s.io/client-go/rest" +) + +// Fake implements client.Interface. Meant to be embedded into a struct to get +// a default implementation. This makes faking out just the method you want to +// test easier. +type Fake struct { + sync.RWMutex + actions []Action // these may be castable to other types, but "Action" is the minimum + + // ReactionChain is the list of reactors that will be attempted for every + // request in the order they are tried. + ReactionChain []Reactor + // WatchReactionChain is the list of watch reactors that will be attempted + // for every request in the order they are tried. + WatchReactionChain []WatchReactor + // ProxyReactionChain is the list of proxy reactors that will be attempted + // for every request in the order they are tried. + ProxyReactionChain []ProxyReactor + + Resources []*metav1.APIResourceList +} + +// Reactor is an interface to allow the composition of reaction functions. +type Reactor interface { + // Handles indicates whether or not this Reactor deals with a given + // action. + Handles(action Action) bool + // React handles the action and returns results. It may choose to + // delegate by indicated handled=false. + React(action Action) (handled bool, ret runtime.Object, err error) +} + +// WatchReactor is an interface to allow the composition of watch functions. +type WatchReactor interface { + // Handles indicates whether or not this Reactor deals with a given + // action. + Handles(action Action) bool + // React handles a watch action and returns results. It may choose to + // delegate by indicating handled=false. + React(action Action) (handled bool, ret watch.Interface, err error) +} + +// ProxyReactor is an interface to allow the composition of proxy get +// functions. +type ProxyReactor interface { + // Handles indicates whether or not this Reactor deals with a given + // action. + Handles(action Action) bool + // React handles a watch action and returns results. It may choose to + // delegate by indicating handled=false. + React(action Action) (handled bool, ret restclient.ResponseWrapper, err error) +} + +// ReactionFunc is a function that returns an object or error for a given +// Action. If "handled" is false, then the test client will ignore the +// results and continue to the next ReactionFunc. A ReactionFunc can describe +// reactions on subresources by testing the result of the action's +// GetSubresource() method. +type ReactionFunc func(action Action) (handled bool, ret runtime.Object, err error) + +// WatchReactionFunc is a function that returns a watch interface. If +// "handled" is false, then the test client will ignore the results and +// continue to the next ReactionFunc. +type WatchReactionFunc func(action Action) (handled bool, ret watch.Interface, err error) + +// ProxyReactionFunc is a function that returns a ResponseWrapper interface +// for a given Action. If "handled" is false, then the test client will +// ignore the results and continue to the next ProxyReactionFunc. +type ProxyReactionFunc func(action Action) (handled bool, ret restclient.ResponseWrapper, err error) + +// AddReactor appends a reactor to the end of the chain. +func (c *Fake) AddReactor(verb, resource string, reaction ReactionFunc) { + c.ReactionChain = append(c.ReactionChain, &SimpleReactor{verb, resource, reaction}) +} + +// PrependReactor adds a reactor to the beginning of the chain. +func (c *Fake) PrependReactor(verb, resource string, reaction ReactionFunc) { + c.ReactionChain = append([]Reactor{&SimpleReactor{verb, resource, reaction}}, c.ReactionChain...) +} + +// AddWatchReactor appends a reactor to the end of the chain. +func (c *Fake) AddWatchReactor(resource string, reaction WatchReactionFunc) { + c.WatchReactionChain = append(c.WatchReactionChain, &SimpleWatchReactor{resource, reaction}) +} + +// PrependWatchReactor adds a reactor to the beginning of the chain. +func (c *Fake) PrependWatchReactor(resource string, reaction WatchReactionFunc) { + c.WatchReactionChain = append([]WatchReactor{&SimpleWatchReactor{resource, reaction}}, c.WatchReactionChain...) +} + +// AddProxyReactor appends a reactor to the end of the chain. +func (c *Fake) AddProxyReactor(resource string, reaction ProxyReactionFunc) { + c.ProxyReactionChain = append(c.ProxyReactionChain, &SimpleProxyReactor{resource, reaction}) +} + +// PrependProxyReactor adds a reactor to the beginning of the chain. +func (c *Fake) PrependProxyReactor(resource string, reaction ProxyReactionFunc) { + c.ProxyReactionChain = append([]ProxyReactor{&SimpleProxyReactor{resource, reaction}}, c.ProxyReactionChain...) +} + +// Invokes records the provided Action and then invokes the ReactionFunc that +// handles the action if one exists. defaultReturnObj is expected to be of the +// same type a normal call would return. +func (c *Fake) Invokes(action Action, defaultReturnObj runtime.Object) (runtime.Object, error) { + c.Lock() + defer c.Unlock() + + c.actions = append(c.actions, action) + for _, reactor := range c.ReactionChain { + if !reactor.Handles(action) { + continue + } + + handled, ret, err := reactor.React(action) + if !handled { + continue + } + + return ret, err + } + + return defaultReturnObj, nil +} + +// InvokesWatch records the provided Action and then invokes the ReactionFunc +// that handles the action if one exists. +func (c *Fake) InvokesWatch(action Action) (watch.Interface, error) { + c.Lock() + defer c.Unlock() + + c.actions = append(c.actions, action) + for _, reactor := range c.WatchReactionChain { + if !reactor.Handles(action) { + continue + } + + handled, ret, err := reactor.React(action) + if !handled { + continue + } + + return ret, err + } + + return nil, fmt.Errorf("unhandled watch: %#v", action) +} + +// InvokesProxy records the provided Action and then invokes the ReactionFunc +// that handles the action if one exists. +func (c *Fake) InvokesProxy(action Action) restclient.ResponseWrapper { + c.Lock() + defer c.Unlock() + + c.actions = append(c.actions, action) + for _, reactor := range c.ProxyReactionChain { + if !reactor.Handles(action) { + continue + } + + handled, ret, err := reactor.React(action) + if !handled || err != nil { + continue + } + + return ret + } + + return nil +} + +// ClearActions clears the history of actions called on the fake client. +func (c *Fake) ClearActions() { + c.Lock() + defer c.Unlock() + + c.actions = make([]Action, 0) +} + +// Actions returns a chronologically ordered slice fake actions called on the +// fake client. +func (c *Fake) Actions() []Action { + c.RLock() + defer c.RUnlock() + fa := make([]Action, len(c.actions)) + copy(fa, c.actions) + return fa +} + +// TODO: this probably should be moved to somewhere else. +type FakeDiscovery struct { + *Fake +} + +func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + action := ActionImpl{ + Verb: "get", + Resource: schema.GroupVersionResource{Resource: "resource"}, + } + c.Invokes(action, nil) + for _, rl := range c.Resources { + if rl.GroupVersion == groupVersion { + return rl, nil + } + } + + return nil, fmt.Errorf("GroupVersion %q not found", groupVersion) +} + +func (c *FakeDiscovery) ServerResources() ([]*metav1.APIResourceList, error) { + action := ActionImpl{ + Verb: "get", + Resource: schema.GroupVersionResource{Resource: "resource"}, + } + c.Invokes(action, nil) + return c.Resources, nil +} + +func (c *FakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) { + return nil, nil +} + +func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { + action := ActionImpl{} + action.Verb = "get" + action.Resource = schema.GroupVersionResource{Resource: "version"} + + c.Invokes(action, nil) + versionInfo := kubeversion.Get() + return &versionInfo, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/testing/core/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go similarity index 77% rename from vendor/k8s.io/kubernetes/pkg/client/testing/core/fixture.go rename to vendor/k8s.io/client-go/testing/fixture.go index 9bc6a27807..f685853574 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/testing/core/fixture.go +++ b/vendor/k8s.io/client-go/testing/fixture.go @@ -14,19 +14,20 @@ See the License for the specific language governing permissions and limitations under the License. */ -package core +package testing import ( "fmt" "sync" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/watch" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apimachinery/registered" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" ) // ObjectTracker keeps track of objects. It is intended to be used to @@ -38,19 +39,22 @@ type ObjectTracker interface { Add(obj runtime.Object) error // Get retrieves the object by its kind, namespace and name. - Get(gvk unversioned.GroupVersionKind, ns, name string) (runtime.Object, error) + Get(gvk schema.GroupVersionKind, ns, name string) (runtime.Object, error) - // Update updates an existing object in the tracker. - Update(obj runtime.Object) error + // Create adds an object to the tracker in the specified namespace. + Create(obj runtime.Object, ns string) error + + // Update updates an existing object in the tracker in the specified namespace. + Update(obj runtime.Object, ns string) error // List retrieves all objects of a given kind in the given // namespace. Only non-List kinds are accepted. - List(gvk unversioned.GroupVersionKind, ns string) (runtime.Object, error) + List(gvk schema.GroupVersionKind, ns string) (runtime.Object, error) // Delete deletes an existing object from the tracker. If object // didn't exist in the tracker prior to deletion, Delete returns // no error. - Delete(gvk unversioned.GroupVersionKind, ns, name string) error + Delete(gvk schema.GroupVersionKind, ns, name string) error } // ObjectScheme abstracts the implementation of common operations on objects. @@ -101,12 +105,12 @@ func ObjectReaction(tracker ObjectTracker, mapper meta.RESTMapper) ReactionFunc return true, nil, err } if action.GetSubresource() == "" { - err = tracker.Add(action.GetObject()) + err = tracker.Create(action.GetObject(), ns) } else { // TODO: Currently we're handling subresource creation as an update // on the enclosing resource. This works for some subresources but // might not be generic enough. - err = tracker.Update(action.GetObject()) + err = tracker.Update(action.GetObject(), ns) } if err != nil { return true, nil, err @@ -119,7 +123,7 @@ func ObjectReaction(tracker ObjectTracker, mapper meta.RESTMapper) ReactionFunc if err != nil { return true, nil, err } - err = tracker.Update(action.GetObject()) + err = tracker.Update(action.GetObject(), ns) if err != nil { return true, nil, err } @@ -140,25 +144,27 @@ func ObjectReaction(tracker ObjectTracker, mapper meta.RESTMapper) ReactionFunc } type tracker struct { - scheme ObjectScheme - decoder runtime.Decoder - lock sync.RWMutex - objects map[unversioned.GroupVersionKind][]runtime.Object + registry *registered.APIRegistrationManager + scheme ObjectScheme + decoder runtime.Decoder + lock sync.RWMutex + objects map[schema.GroupVersionKind][]runtime.Object } var _ ObjectTracker = &tracker{} // NewObjectTracker returns an ObjectTracker that can be used to keep track // of objects for the fake clientset. Mostly useful for unit tests. -func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder) ObjectTracker { +func NewObjectTracker(registry *registered.APIRegistrationManager, scheme ObjectScheme, decoder runtime.Decoder) ObjectTracker { return &tracker{ - scheme: scheme, - decoder: decoder, - objects: make(map[unversioned.GroupVersionKind][]runtime.Object), + registry: registry, + scheme: scheme, + decoder: decoder, + objects: make(map[schema.GroupVersionKind][]runtime.Object), } } -func (t *tracker) List(gvk unversioned.GroupVersionKind, ns string) (runtime.Object, error) { +func (t *tracker) List(gvk schema.GroupVersionKind, ns string) (runtime.Object, error) { // Heuristic for list kind: original kind + List suffix. Might // not always be true but this tracker has a pretty limited // understanding of the actual API model. @@ -195,12 +201,12 @@ func (t *tracker) List(gvk unversioned.GroupVersionKind, ns string) (runtime.Obj return list, nil } -func (t *tracker) Get(gvk unversioned.GroupVersionKind, ns, name string) (runtime.Object, error) { - if err := checkNamespace(gvk, ns); err != nil { +func (t *tracker) Get(gvk schema.GroupVersionKind, ns, name string) (runtime.Object, error) { + if err := checkNamespace(t.registry, gvk, ns); err != nil { return nil, err } - errNotFound := errors.NewNotFound(unversioned.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, name) + errNotFound := errors.NewNotFound(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, name) t.lock.RLock() defer t.lock.RUnlock() @@ -229,11 +235,11 @@ func (t *tracker) Get(gvk unversioned.GroupVersionKind, ns, name string) (runtim return nil, err } - if status, ok := obj.(*unversioned.Status); ok { + if status, ok := obj.(*metav1.Status); ok { if status.Details != nil { status.Details.Kind = gvk.Kind } - if status.Status != unversioned.StatusSuccess { + if status.Status != metav1.StatusSuccess { return nil, &errors.StatusError{ErrStatus: *status} } } @@ -242,18 +248,26 @@ func (t *tracker) Get(gvk unversioned.GroupVersionKind, ns, name string) (runtim } func (t *tracker) Add(obj runtime.Object) error { - return t.add(obj, false) + if meta.IsListType(obj) { + return t.addList(obj, false) + } + + objMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + return t.add(obj, objMeta.GetNamespace(), false) } -func (t *tracker) Update(obj runtime.Object) error { - return t.add(obj, true) +func (t *tracker) Create(obj runtime.Object, ns string) error { + return t.add(obj, ns, false) } -func (t *tracker) add(obj runtime.Object, replaceExisting bool) error { - if meta.IsListType(obj) { - return t.addList(obj, replaceExisting) - } +func (t *tracker) Update(obj runtime.Object, ns string) error { + return t.add(obj, ns, true) +} +func (t *tracker) add(obj runtime.Object, ns string, replaceExisting bool) error { gvks, _, err := t.scheme.ObjectKinds(obj) if err != nil { return err @@ -266,7 +280,7 @@ func (t *tracker) add(obj runtime.Object, replaceExisting bool) error { defer t.lock.Unlock() for _, gvk := range gvks { - gr := unversioned.GroupResource{Group: gvk.Group, Resource: gvk.Kind} + gr := schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind} // To avoid the object from being accidentally modified by caller // after it's been added to the tracker, we always store the deep @@ -276,7 +290,7 @@ func (t *tracker) add(obj runtime.Object, replaceExisting bool) error { return err } - if status, ok := obj.(*unversioned.Status); ok && status.Details != nil { + if status, ok := obj.(*metav1.Status); ok && status.Details != nil { gvk.Kind = status.Details.Kind } @@ -285,7 +299,17 @@ func (t *tracker) add(obj runtime.Object, replaceExisting bool) error { return err } - if err := checkNamespace(gvk, newMeta.GetNamespace()); err != nil { + // Propagate namespace to the new object if hasn't already been set. + if len(newMeta.GetNamespace()) == 0 { + newMeta.SetNamespace(ns) + } + + if ns != newMeta.GetNamespace() { + msg := fmt.Sprintf("request namespace does not match object namespace, request: %q object: %q", ns, newMeta.GetNamespace()) + return errors.NewBadRequest(msg) + } + + if err := checkNamespace(t.registry, gvk, newMeta.GetNamespace()); err != nil { return err } @@ -324,7 +348,11 @@ func (t *tracker) addList(obj runtime.Object, replaceExisting bool) error { return errs[0] } for _, obj := range list { - err := t.add(obj, replaceExisting) + objMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + err = t.add(obj, objMeta.GetNamespace(), replaceExisting) if err != nil { return err } @@ -332,8 +360,8 @@ func (t *tracker) addList(obj runtime.Object, replaceExisting bool) error { return nil } -func (t *tracker) Delete(gvk unversioned.GroupVersionKind, ns, name string) error { - if err := checkNamespace(gvk, ns); err != nil { +func (t *tracker) Delete(gvk schema.GroupVersionKind, ns, name string) error { + if err := checkNamespace(t.registry, gvk, ns); err != nil { return err } @@ -358,7 +386,7 @@ func (t *tracker) Delete(gvk unversioned.GroupVersionKind, ns, name string) erro return nil } - return errors.NewNotFound(unversioned.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, name) + return errors.NewNotFound(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, name) } // filterByNamespaceAndName returns all objects in the collection that @@ -387,8 +415,8 @@ func filterByNamespaceAndName(objs []runtime.Object, ns, name string) ([]runtime // checkNamespace makes sure that the scope of gvk matches ns. It // returns an error if namespace is empty but gvk is a namespaced // kind, or if ns is non-empty and gvk is a namespaced kind. -func checkNamespace(gvk unversioned.GroupVersionKind, ns string) error { - group, err := registered.Group(gvk.Group) +func checkNamespace(registry *registered.APIRegistrationManager, gvk schema.GroupVersionKind, ns string) error { + group, err := registry.Group(gvk.Group) if err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/auth/clientauth.go b/vendor/k8s.io/client-go/tools/auth/clientauth.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/client/unversioned/auth/clientauth.go rename to vendor/k8s.io/client-go/tools/auth/clientauth.go index 128597f93a..2213b98781 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/auth/clientauth.go +++ b/vendor/k8s.io/client-go/tools/auth/clientauth.go @@ -68,7 +68,7 @@ import ( "io/ioutil" "os" - "k8s.io/kubernetes/pkg/client/restclient" + restclient "k8s.io/client-go/rest" ) // Info holds Kubernetes API authorization config. It is intended diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS new file mode 100755 index 0000000000..e923c77092 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -0,0 +1,41 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- nikhiljindal +- bprashanth +- erictune +- davidopp +- pmorie +- kargakis +- janetkuo +- justinsb +- eparis +- soltysh +- jsafrane +- dims +- madhusudancs +- hongchaodeng +- krousey +- markturansky +- fgrzadkowski +- xiang90 +- mml +- ingvagabund +- resouer +- jessfraz +- david-mcmahon +- mfojtik +- '249043822' +- lixiaobing10051267 +- ddysher +- mqliang +- feihujiang +- sdminonne diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go similarity index 88% rename from vendor/k8s.io/kubernetes/pkg/client/cache/controller.go rename to vendor/k8s.io/client-go/tools/cache/controller.go index 5ab84b8dd8..06bc04f85e 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -20,9 +20,10 @@ import ( "sync" "time" - "k8s.io/kubernetes/pkg/runtime" - utilruntime "k8s.io/kubernetes/pkg/util/runtime" - "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/clock" ) // Config contains all the settings for a Controller. @@ -50,6 +51,11 @@ type Config struct { // queue. FullResyncPeriod time.Duration + // ShouldResync, if specified, is invoked when the controller's reflector determines the next + // periodic sync should occur. If this returns true, it means the reflector should proceed with + // the resync. + ShouldResync ShouldResyncFunc + // If true, when Process() returns an error, re-enqueue the object. // TODO: add interface to let you inject a delay/backoff or drop // the object completely if desired. Pass the object in @@ -57,26 +63,33 @@ type Config struct { RetryOnError bool } +// ShouldResyncFunc is a type of function that indicates if a reflector should perform a +// resync or not. It can be used by a shared informer to support multiple event handlers with custom +// resync periods. +type ShouldResyncFunc func() bool + // ProcessFunc processes a single object. type ProcessFunc func(obj interface{}) error // Controller is a generic controller framework. -type Controller struct { +type controller struct { config Config reflector *Reflector reflectorMutex sync.RWMutex + clock clock.Clock } -// TODO make the "Controller" private, and convert all references to use ControllerInterface instead -type ControllerInterface interface { +type Controller interface { Run(stopCh <-chan struct{}) HasSynced() bool + LastSyncResourceVersion() string } // New makes a new Controller from the given Config. -func New(c *Config) *Controller { - ctlr := &Controller{ +func New(c *Config) Controller { + ctlr := &controller{ config: *c, + clock: &clock.RealClock{}, } return ctlr } @@ -84,14 +97,20 @@ func New(c *Config) *Controller { // Run begins processing items, and will continue until a value is sent down stopCh. // It's an error to call Run more than once. // Run blocks; call via go. -func (c *Controller) Run(stopCh <-chan struct{}) { +func (c *controller) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() + go func() { + <-stopCh + c.config.Queue.Close() + }() r := NewReflector( c.config.ListerWatcher, c.config.ObjectType, c.config.Queue, c.config.FullResyncPeriod, ) + r.ShouldResync = c.config.ShouldResync + r.clock = c.clock c.reflectorMutex.Lock() c.reflector = r @@ -103,18 +122,15 @@ func (c *Controller) Run(stopCh <-chan struct{}) { } // Returns true once this controller has completed an initial resource listing -func (c *Controller) HasSynced() bool { +func (c *controller) HasSynced() bool { return c.config.Queue.HasSynced() } -// Requeue adds the provided object back into the queue if it does not already exist. -func (c *Controller) Requeue(obj interface{}) error { - return c.config.Queue.AddIfNotPresent(Deltas{ - Delta{ - Type: Sync, - Object: obj, - }, - }) +func (c *controller) LastSyncResourceVersion() string { + if c.reflector == nil { + return "" + } + return c.reflector.LastSyncResourceVersion() } // processLoop drains the work queue. @@ -126,10 +142,13 @@ func (c *Controller) Requeue(obj interface{}) error { // actually exit when the controller is stopped. Or just give up on this stuff // ever being stoppable. Converting this whole package to use Context would // also be helpful. -func (c *Controller) processLoop() { +func (c *controller) processLoop() { for { obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process)) if err != nil { + if err == FIFOClosedError { + return + } if c.config.RetryOnError { // This is the safe way to re-enqueue. c.config.Queue.AddIfNotPresent(obj) @@ -218,7 +237,7 @@ func NewInformer( objType runtime.Object, resyncPeriod time.Duration, h ResourceEventHandler, -) (Store, *Controller) { +) (Store, Controller) { // This will hold the client state, as we know it. clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) @@ -284,7 +303,7 @@ func NewIndexerInformer( resyncPeriod time.Duration, h ResourceEventHandler, indexers Indexers, -) (Indexer, *Controller) { +) (Indexer, Controller) { // This will hold the client state, as we know it. clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go similarity index 94% rename from vendor/k8s.io/kubernetes/pkg/client/cache/delta_fifo.go rename to vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 1d26dcde14..a71db60488 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -21,7 +21,7 @@ import ( "fmt" "sync" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/sets" "github.com/golang/glog" ) @@ -118,6 +118,12 @@ type DeltaFIFO struct { // purpose of figuring out which items have been deleted // when Replace() or Delete() is called. knownObjects KeyListerGetter + + // Indication the queue is closed. + // Used to indicate a queue is closed so a control loop can exit when a queue is empty. + // Currently, not used to gate any of CRED operations. + closed bool + closedLock sync.Mutex } var ( @@ -132,6 +138,14 @@ var ( ErrZeroLengthDeltasObject = errors.New("0 length Deltas object; can't get key") ) +// Close the queue. +func (f *DeltaFIFO) Close() { + f.closedLock.Lock() + defer f.closedLock.Unlock() + f.closed = true + f.cond.Broadcast() +} + // KeyOf exposes f's keyFunc, but also detects the key of a Deltas object or // DeletedFinalStateUnknown objects. func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) { @@ -387,6 +401,16 @@ func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err err return d, exists, nil } +// Checks if the queue is closed +func (f *DeltaFIFO) IsClosed() bool { + f.closedLock.Lock() + defer f.closedLock.Unlock() + if f.closed { + return true + } + return false +} + // Pop blocks until an item is added to the queue, and then returns it. If // multiple items are ready, they are returned in the order in which they were // added/updated. The item is removed from the queue (and the store) before it @@ -404,6 +428,13 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { defer f.lock.Unlock() for { for len(f.queue) == 0 { + // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. + // When Close() is called, the f.closed is set and the condition is broadcasted. + // Which causes this loop to continue and return from the Pop(). + if f.IsClosed() { + return nil, FIFOClosedError + } + f.cond.Wait() } id := f.queue[0] @@ -505,14 +536,12 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { // Resync will send a sync event for each item func (f *DeltaFIFO) Resync() error { - var keys []string - func() { - f.lock.RLock() - defer f.lock.RUnlock() - keys = f.knownObjects.ListKeys() - }() + f.lock.Lock() + defer f.lock.Unlock() + + keys := f.knownObjects.ListKeys() for _, k := range keys { - if err := f.syncKey(k); err != nil { + if err := f.syncKeyLocked(k); err != nil { return err } } @@ -522,6 +551,11 @@ func (f *DeltaFIFO) Resync() error { func (f *DeltaFIFO) syncKey(key string) error { f.lock.Lock() defer f.lock.Unlock() + + return f.syncKeyLocked(key) +} + +func (f *DeltaFIFO) syncKeyLocked(key string) error { obj, exists, err := f.knownObjects.GetByKey(key) if err != nil { glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key) diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/doc.go b/vendor/k8s.io/client-go/tools/cache/doc.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/client/cache/doc.go rename to vendor/k8s.io/client-go/tools/cache/doc.go diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache.go rename to vendor/k8s.io/client-go/tools/cache/expiration_cache.go index 88ef89b3ee..befac1c758 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go @@ -21,7 +21,7 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/util/clock" + "k8s.io/client-go/util/clock" ) // ExpirationCache implements the store interface diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache_fakes.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache_fakes.go rename to vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go index da180b77f1..ab2f57687e 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache_fakes.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go @@ -17,8 +17,8 @@ limitations under the License. package cache import ( - "k8s.io/kubernetes/pkg/util/clock" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/util/clock" ) type fakeThreadSafeMap struct { diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/fake_custom_store.go b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/client/cache/fake_custom_store.go rename to vendor/k8s.io/client-go/tools/cache/fake_custom_store.go diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go similarity index 90% rename from vendor/k8s.io/kubernetes/pkg/client/cache/fifo.go rename to vendor/k8s.io/client-go/tools/cache/fifo.go index a6d5e0ab83..3f6e2a9480 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -17,9 +17,10 @@ limitations under the License. package cache import ( + "errors" "sync" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/sets" ) // PopProcessFunc is passed to Pop() method of Queue interface. @@ -33,6 +34,8 @@ type ErrRequeue struct { Err error } +var FIFOClosedError error = errors.New("DeltaFIFO: manipulating with closed queue") + func (e ErrRequeue) Error() string { if e.Err == nil { return "the popped item should be requeued without returning an error" @@ -58,6 +61,9 @@ type Queue interface { // Return true if the first batch of items has been popped HasSynced() bool + + // Close queue + Close() } // Helper function for popping from Queue. @@ -100,12 +106,26 @@ type FIFO struct { // keyFunc is used to make the key used for queued item insertion and retrieval, and // should be deterministic. keyFunc KeyFunc + + // Indication the queue is closed. + // Used to indicate a queue is closed so a control loop can exit when a queue is empty. + // Currently, not used to gate any of CRED operations. + closed bool + closedLock sync.Mutex } var ( _ = Queue(&FIFO{}) // FIFO is a Queue ) +// Close the queue. +func (f *FIFO) Close() { + f.closedLock.Lock() + defer f.closedLock.Unlock() + f.closed = true + f.cond.Broadcast() +} + // Return true if an Add/Update/Delete/AddIfNotPresent are called first, // or an Update called first but the first batch of items inserted by Replace() has been popped func (f *FIFO) HasSynced() bool { @@ -222,6 +242,16 @@ func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) { return item, exists, nil } +// Checks if the queue is closed +func (f *FIFO) IsClosed() bool { + f.closedLock.Lock() + defer f.closedLock.Unlock() + if f.closed { + return true + } + return false +} + // Pop waits until an item is ready and processes it. If multiple items are // ready, they are returned in the order in which they were added/updated. // The item is removed from the queue (and the store) before it is processed, @@ -233,6 +263,13 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { defer f.lock.Unlock() for { for len(f.queue) == 0 { + // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. + // When Close() is called, the f.closed is set and the condition is broadcasted. + // Which causes this loop to continue and return from the Pop(). + if f.IsClosed() { + return nil, FIFOClosedError + } + f.cond.Wait() } id := f.queue[0] diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/index.go b/vendor/k8s.io/client-go/tools/cache/index.go similarity index 97% rename from vendor/k8s.io/kubernetes/pkg/client/cache/index.go rename to vendor/k8s.io/client-go/tools/cache/index.go index 218f3c8a53..42fc6c7a22 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/index.go +++ b/vendor/k8s.io/client-go/tools/cache/index.go @@ -19,8 +19,8 @@ package cache import ( "fmt" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/util/sets" ) // Indexer is a storage interface that lets you list objects using multiple indexing functions diff --git a/vendor/k8s.io/client-go/tools/cache/listers.go b/vendor/k8s.io/client-go/tools/cache/listers.go new file mode 100644 index 0000000000..27d51a6b38 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/listers.go @@ -0,0 +1,160 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// AppendFunc is used to add a matching item to whatever list the caller is using +type AppendFunc func(interface{}) + +func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { + for _, m := range store.List() { + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + } + return nil +} + +func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { + if namespace == metav1.NamespaceAll { + for _, m := range indexer.List() { + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + } + return nil + } + + items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace}) + if err != nil { + // Ignore error; do slow search without index. + glog.Warningf("can not retrieve list of objects using index : %v", err) + for _, m := range indexer.List() { + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if metadata.GetNamespace() == namespace && selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + + } + return nil + } + for _, m := range items { + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + } + + return nil +} + +// GenericLister is a lister skin on a generic Indexer +type GenericLister interface { + // List will return all objects across namespaces + List(selector labels.Selector) (ret []runtime.Object, err error) + // Get will attempt to retrieve assuming that name==key + Get(name string) (runtime.Object, error) + // ByNamespace will give you a GenericNamespaceLister for one namespace + ByNamespace(namespace string) GenericNamespaceLister +} + +// GenericNamespaceLister is a lister skin on a generic Indexer +type GenericNamespaceLister interface { + // List will return all objects in this namespace + List(selector labels.Selector) (ret []runtime.Object, err error) + // Get will attempt to retrieve by namespace and name + Get(name string) (runtime.Object, error) +} + +func NewGenericLister(indexer Indexer, resource schema.GroupResource) GenericLister { + return &genericLister{indexer: indexer, resource: resource} +} + +type genericLister struct { + indexer Indexer + resource schema.GroupResource +} + +func (s *genericLister) List(selector labels.Selector) (ret []runtime.Object, err error) { + err = ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(runtime.Object)) + }) + return ret, err +} + +func (s *genericLister) ByNamespace(namespace string) GenericNamespaceLister { + return &genericNamespaceLister{indexer: s.indexer, namespace: namespace, resource: s.resource} +} + +func (s *genericLister) Get(name string) (runtime.Object, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(s.resource, name) + } + return obj.(runtime.Object), nil +} + +type genericNamespaceLister struct { + indexer Indexer + namespace string + resource schema.GroupResource +} + +func (s *genericNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) { + err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(runtime.Object)) + }) + return ret, err +} + +func (s *genericNamespaceLister) Get(name string) (runtime.Object, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(s.resource, name) + } + return obj.(runtime.Object), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/listwatch.go b/vendor/k8s.io/client-go/tools/cache/listwatch.go similarity index 77% rename from vendor/k8s.io/kubernetes/pkg/client/cache/listwatch.go rename to vendor/k8s.io/client-go/tools/cache/listwatch.go index 3956ccb1e2..af01d47457 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/listwatch.go +++ b/vendor/k8s.io/client-go/tools/cache/listwatch.go @@ -19,28 +19,28 @@ package cache import ( "time" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/watch" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" ) // ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource. type ListerWatcher interface { // List should return a list type object; the Items field will be extracted, and the // ResourceVersion field will be used to start the watch in the right place. - List(options api.ListOptions) (runtime.Object, error) + List(options metav1.ListOptions) (runtime.Object, error) // Watch should begin a watch at the specified version. - Watch(options api.ListOptions) (watch.Interface, error) + Watch(options metav1.ListOptions) (watch.Interface, error) } // ListFunc knows how to list resources -type ListFunc func(options api.ListOptions) (runtime.Object, error) +type ListFunc func(options metav1.ListOptions) (runtime.Object, error) // WatchFunc knows how to watch resources -type WatchFunc func(options api.ListOptions) (watch.Interface, error) +type WatchFunc func(options metav1.ListOptions) (watch.Interface, error) // ListWatch knows how to list and watch a set of apiserver resources. It satisfies the ListerWatcher interface. // It is a convenience function for users of NewReflector, etc. @@ -57,28 +57,28 @@ type Getter interface { // NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector. func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSelector fields.Selector) *ListWatch { - listFunc := func(options api.ListOptions) (runtime.Object, error) { + listFunc := func(options metav1.ListOptions) (runtime.Object, error) { return c.Get(). Namespace(namespace). Resource(resource). - VersionedParams(&options, api.ParameterCodec). + VersionedParams(&options, metav1.ParameterCodec). FieldsSelectorParam(fieldSelector). Do(). Get() } - watchFunc := func(options api.ListOptions) (watch.Interface, error) { + watchFunc := func(options metav1.ListOptions) (watch.Interface, error) { + options.Watch = true return c.Get(). - Prefix("watch"). Namespace(namespace). Resource(resource). - VersionedParams(&options, api.ParameterCodec). + VersionedParams(&options, metav1.ParameterCodec). FieldsSelectorParam(fieldSelector). Watch() } return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc} } -func timeoutFromListOptions(options api.ListOptions) time.Duration { +func timeoutFromListOptions(options metav1.ListOptions) time.Duration { if options.TimeoutSeconds != nil { return time.Duration(*options.TimeoutSeconds) * time.Second } @@ -86,12 +86,12 @@ func timeoutFromListOptions(options api.ListOptions) time.Duration { } // List a set of apiserver resources -func (lw *ListWatch) List(options api.ListOptions) (runtime.Object, error) { +func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) { return lw.ListFunc(options) } // Watch a set of apiserver resources -func (lw *ListWatch) Watch(options api.ListOptions) (watch.Interface, error) { +func (lw *ListWatch) Watch(options metav1.ListOptions) (watch.Interface, error) { return lw.WatchFunc(options) } @@ -101,7 +101,7 @@ func ListWatchUntil(timeout time.Duration, lw ListerWatcher, conditions ...watch return nil, nil } - list, err := lw.List(api.ListOptions{}) + list, err := lw.List(metav1.ListOptions{}) if err != nil { return nil, err } @@ -153,7 +153,7 @@ func ListWatchUntil(timeout time.Duration, lw ListerWatcher, conditions ...watch } currResourceVersion := metaObj.GetResourceVersion() - watchInterface, err := lw.Watch(api.ListOptions{ResourceVersion: currResourceVersion}) + watchInterface, err := lw.Watch(metav1.ListOptions{ResourceVersion: currResourceVersion}) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/mutation_detector.go b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go similarity index 97% rename from vendor/k8s.io/kubernetes/pkg/client/cache/mutation_detector.go rename to vendor/k8s.io/client-go/tools/cache/mutation_detector.go index 11d0e6ffa2..cc7399114f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/mutation_detector.go +++ b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go @@ -24,9 +24,9 @@ import ( "sync" "time" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/diff" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/client-go/pkg/api" ) var mutationDetectionEnabled = false diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go similarity index 89% rename from vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go rename to vendor/k8s.io/client-go/tools/cache/reflector.go index b4cd417f12..b43f43a5e7 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -34,13 +34,14 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - apierrs "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/runtime" - utilruntime "k8s.io/kubernetes/pkg/util/runtime" - "k8s.io/kubernetes/pkg/util/wait" - "k8s.io/kubernetes/pkg/watch" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/util/clock" ) // Reflector watches a specified resource and causes all changes to be reflected in the given store. @@ -58,8 +59,9 @@ type Reflector struct { // the beginning of the next one. period time.Duration resyncPeriod time.Duration - // now() returns current time - exposed for testing purposes - now func() time.Time + ShouldResync func() bool + // clock allows tests to manipulate time + clock clock.Clock // lastSyncResourceVersion is the resource version token last // observed when doing a sync with the underlying store // it is thread safe, but not synchronized with the underlying store @@ -103,14 +105,14 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, expectedType: reflect.TypeOf(expectedType), period: time.Second, resyncPeriod: resyncPeriod, - now: time.Now, + clock: &clock.RealClock{}, } return r } // internalPackages are packages that ignored when creating a default reflector name. These packages are in the common // call chains to NewReflector, so they'd be low entropy names for reflectors -var internalPackages = []string{"kubernetes/pkg/client/cache/", "/runtime/asm_"} +var internalPackages = []string{"client-go/tools/cache/", "/runtime/asm_"} // getDefaultReflectorName walks back through the call stack until we find a caller from outside of the ignoredPackages // it returns back a shortpath/filename:line to aid in identification of this reflector when it starts logging @@ -149,8 +151,8 @@ func hasPackage(file string, ignoredPackages []string) bool { // trimPackagePrefix reduces duplicate values off the front of a package name. func trimPackagePrefix(file string) string { - if l := strings.LastIndex(file, "k8s.io/kubernetes/pkg/"); l >= 0 { - return file[l+len("k8s.io/kubernetes/"):] + if l := strings.LastIndex(file, "k8s.io/client-go/pkg/"); l >= 0 { + return file[l+len("k8s.io/client-go/"):] } if l := strings.LastIndex(file, "/src/"); l >= 0 { return file[l+5:] @@ -223,8 +225,8 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { // always fail so we end up listing frequently. Then, if we don't // manually stop the timer, we could end up with many timers active // concurrently. - t := time.NewTimer(r.resyncPeriod) - return t.C, t.Stop + t := r.clock.NewTimer(r.resyncPeriod) + return t.C(), t.Stop } // ListAndWatch first lists all items and get the resource version at the moment of call, @@ -239,7 +241,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // Explicitly set "0" as resource version - it's fine for the List() // to be served from cache and potentially be delayed relative to // etcd contents. Reflector framework will catch up via Watch() eventually. - options := api.ListOptions{ResourceVersion: "0"} + options := metav1.ListOptions{ResourceVersion: "0"} list, err := r.listerWatcher.List(options) if err != nil { return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err) @@ -270,10 +272,12 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { case <-cancelCh: return } - glog.V(4).Infof("%s: forcing resync", r.name) - if err := r.store.Resync(); err != nil { - resyncerrc <- err - return + if r.ShouldResync == nil || r.ShouldResync() { + glog.V(4).Infof("%s: forcing resync", r.name) + if err := r.store.Resync(); err != nil { + resyncerrc <- err + return + } } cleanup() resyncCh, cleanup = r.resyncChan() @@ -282,7 +286,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { for { timemoutseconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) - options = api.ListOptions{ + options = metav1.ListOptions{ ResourceVersion: resourceVersion, // We want to avoid situations of hanging watchers. Stop any wachers that do not // receive any events within the timeout window. @@ -334,7 +338,7 @@ func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) err // watchHandler watches w and keeps *resourceVersion up to date. func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error { - start := time.Now() + start := r.clock.Now() eventCount := 0 // Stopping the watcher should be idempotent and if we return from this function there's no way @@ -367,14 +371,23 @@ loop: newResourceVersion := meta.GetResourceVersion() switch event.Type { case watch.Added: - r.store.Add(event.Object) + err := r.store.Add(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", r.name, event.Object, err)) + } case watch.Modified: - r.store.Update(event.Object) + err := r.store.Update(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", r.name, event.Object, err)) + } case watch.Deleted: // TODO: Will any consumers need access to the "last known // state", which is passed in event.Object? If so, may need // to change this. - r.store.Delete(event.Object) + err := r.store.Delete(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err)) + } default: utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event)) } @@ -384,7 +397,7 @@ loop: } } - watchDuration := time.Now().Sub(start) + watchDuration := r.clock.Now().Sub(start) if watchDuration < 1*time.Second && eventCount == 0 { glog.V(4).Infof("%s: Unexpected watch close - watch lasted less than a second and no items received", r.name) return errors.New("very short watch") diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go new file mode 100644 index 0000000000..1349f335d0 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -0,0 +1,581 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/clock" + + "github.com/golang/glog" +) + +// SharedInformer has a shared data cache and is capable of distributing notifications for changes +// to the cache to multiple listeners who registered via AddEventHandler. If you use this, there is +// one behavior change compared to a standard Informer. When you receive a notification, the cache +// will be AT LEAST as fresh as the notification, but it MAY be more fresh. You should NOT depend +// on the contents of the cache exactly matching the notification you've received in handler +// functions. If there was a create, followed by a delete, the cache may NOT have your item. This +// has advantages over the broadcaster since it allows us to share a common cache across many +// controllers. Extending the broadcaster would have required us keep duplicate caches for each +// watch. +type SharedInformer interface { + // AddEventHandler adds an event handler to the shared informer using the shared informer's resync + // period. Events to a single handler are delivered sequentially, but there is no coordination + // between different handlers. + AddEventHandler(handler ResourceEventHandler) + // AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the + // specified resync period. Events to a single handler are delivered sequentially, but there is + // no coordination between different handlers. + AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) + // GetStore returns the Store. + GetStore() Store + // GetController gives back a synthetic interface that "votes" to start the informer + GetController() Controller + // Run starts the shared informer, which will be stopped when stopCh is closed. + Run(stopCh <-chan struct{}) + // HasSynced returns true if the shared informer's store has synced. + HasSynced() bool + // LastSyncResourceVersion is the resource version observed when last synced with the underlying + // store. The value returned is not synchronized with access to the underlying store and is not + // thread-safe. + LastSyncResourceVersion() string +} + +type SharedIndexInformer interface { + SharedInformer + // AddIndexers add indexers to the informer before it starts. + AddIndexers(indexers Indexers) error + GetIndexer() Indexer +} + +// NewSharedInformer creates a new instance for the listwatcher. +func NewSharedInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer { + return NewSharedIndexInformer(lw, objType, resyncPeriod, Indexers{}) +} + +// NewSharedIndexInformer creates a new instance for the listwatcher. +func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { + realClock := &clock.RealClock{} + sharedIndexInformer := &sharedIndexInformer{ + processor: &sharedProcessor{clock: realClock}, + indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers), + listerWatcher: lw, + objectType: objType, + resyncCheckPeriod: defaultEventHandlerResyncPeriod, + defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod, + cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", objType)), + clock: realClock, + } + return sharedIndexInformer +} + +// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced. +type InformerSynced func() bool + +// syncedPollPeriod controls how often you look at the status of your sync funcs +const syncedPollPeriod = 100 * time.Millisecond + +// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false +// if the contoller should shutdown +func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { + err := wait.PollUntil(syncedPollPeriod, + func() (bool, error) { + for _, syncFunc := range cacheSyncs { + if !syncFunc() { + return false, nil + } + } + return true, nil + }, + stopCh) + if err != nil { + glog.V(2).Infof("stop requested") + return false + } + + glog.V(4).Infof("caches populated") + return true +} + +type sharedIndexInformer struct { + indexer Indexer + controller Controller + + processor *sharedProcessor + cacheMutationDetector CacheMutationDetector + + // This block is tracked to handle late initialization of the controller + listerWatcher ListerWatcher + objectType runtime.Object + + // resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call + // shouldResync to check if any of our listeners need a resync. + resyncCheckPeriod time.Duration + // defaultEventHandlerResyncPeriod is the default resync period for any handlers added via + // AddEventHandler (i.e. they don't specify one and just want to use the shared informer's default + // value). + defaultEventHandlerResyncPeriod time.Duration + // clock allows for testability + clock clock.Clock + + started bool + startedLock sync.Mutex + + // blockDeltas gives a way to stop all event distribution so that a late event handler + // can safely join the shared informer. + blockDeltas sync.Mutex + // stopCh is the channel used to stop the main Run process. We have to track it so that + // late joiners can have a proper stop + stopCh <-chan struct{} +} + +// dummyController hides the fact that a SharedInformer is different from a dedicated one +// where a caller can `Run`. The run method is disonnected in this case, because higher +// level logic will decide when to start the SharedInformer and related controller. +// Because returning information back is always asynchronous, the legacy callers shouldn't +// notice any change in behavior. +type dummyController struct { + informer *sharedIndexInformer +} + +func (v *dummyController) Run(stopCh <-chan struct{}) { +} + +func (v *dummyController) HasSynced() bool { + return v.informer.HasSynced() +} + +func (c *dummyController) LastSyncResourceVersion() string { + return "" +} + +type updateNotification struct { + oldObj interface{} + newObj interface{} +} + +type addNotification struct { + newObj interface{} +} + +type deleteNotification struct { + oldObj interface{} +} + +func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, s.indexer) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: s.listerWatcher, + ObjectType: s.objectType, + FullResyncPeriod: s.resyncCheckPeriod, + RetryOnError: false, + ShouldResync: s.processor.shouldResync, + + Process: s.HandleDeltas, + } + + func() { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + s.controller = New(cfg) + s.controller.(*controller).clock = s.clock + s.started = true + }() + + s.stopCh = stopCh + s.cacheMutationDetector.Run(stopCh) + s.processor.run(stopCh) + s.controller.Run(stopCh) +} + +func (s *sharedIndexInformer) isStarted() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + return s.started +} + +func (s *sharedIndexInformer) HasSynced() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.controller == nil { + return false + } + return s.controller.HasSynced() +} + +func (s *sharedIndexInformer) LastSyncResourceVersion() string { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.controller == nil { + return "" + } + return s.controller.LastSyncResourceVersion() +} + +func (s *sharedIndexInformer) GetStore() Store { + return s.indexer +} + +func (s *sharedIndexInformer) GetIndexer() Indexer { + return s.indexer +} + +func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.started { + return fmt.Errorf("informer has already started") + } + + return s.indexer.AddIndexers(indexers) +} + +func (s *sharedIndexInformer) GetController() Controller { + return &dummyController{informer: s} +} + +func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) { + s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod) +} + +func determineResyncPeriod(desired, check time.Duration) time.Duration { + if desired == 0 { + return desired + } + if check == 0 { + glog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired) + return 0 + } + if desired < check { + glog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check) + return check + } + return desired +} + +const minimumResyncPeriod = 1 * time.Second + +func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if resyncPeriod > 0 { + if resyncPeriod < minimumResyncPeriod { + glog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod) + resyncPeriod = minimumResyncPeriod + } + + if resyncPeriod < s.resyncCheckPeriod { + if s.started { + glog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) + resyncPeriod = s.resyncCheckPeriod + } else { + // if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update + // resyncCheckPeriod to match resyncPeriod and adjust the resync periods of all the listeners + // accordingly + s.resyncCheckPeriod = resyncPeriod + s.processor.resyncCheckPeriodChanged(resyncPeriod) + } + } + } + + listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now()) + + if !s.started { + s.processor.addListener(listener) + return + } + + // in order to safely join, we have to + // 1. stop sending add/update/delete notifications + // 2. do a list against the store + // 3. send synthetic "Add" events to the new handler + // 4. unblock + s.blockDeltas.Lock() + defer s.blockDeltas.Unlock() + + s.processor.addListener(listener) + + go listener.run(s.stopCh) + go listener.pop(s.stopCh) + + items := s.indexer.List() + for i := range items { + listener.add(addNotification{newObj: items[i]}) + } +} + +func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error { + s.blockDeltas.Lock() + defer s.blockDeltas.Unlock() + + // from oldest to newest + for _, d := range obj.(Deltas) { + switch d.Type { + case Sync, Added, Updated: + isSync := d.Type == Sync + s.cacheMutationDetector.AddObject(d.Object) + if old, exists, err := s.indexer.Get(d.Object); err == nil && exists { + if err := s.indexer.Update(d.Object); err != nil { + return err + } + s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}, isSync) + } else { + if err := s.indexer.Add(d.Object); err != nil { + return err + } + s.processor.distribute(addNotification{newObj: d.Object}, isSync) + } + case Deleted: + if err := s.indexer.Delete(d.Object); err != nil { + return err + } + s.processor.distribute(deleteNotification{oldObj: d.Object}, false) + } + } + return nil +} + +type sharedProcessor struct { + listenersLock sync.RWMutex + listeners []*processorListener + syncingListeners []*processorListener + clock clock.Clock +} + +func (p *sharedProcessor) addListener(listener *processorListener) { + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + + p.listeners = append(p.listeners, listener) + p.syncingListeners = append(p.syncingListeners, listener) +} + +func (p *sharedProcessor) distribute(obj interface{}, sync bool) { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + + if sync { + for _, listener := range p.syncingListeners { + listener.add(obj) + } + } else { + for _, listener := range p.listeners { + listener.add(obj) + } + } +} + +func (p *sharedProcessor) run(stopCh <-chan struct{}) { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + + for _, listener := range p.listeners { + go listener.run(stopCh) + go listener.pop(stopCh) + } +} + +// shouldResync queries every listener to determine if any of them need a resync, based on each +// listener's resyncPeriod. +func (p *sharedProcessor) shouldResync() bool { + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + + p.syncingListeners = []*processorListener{} + + resyncNeeded := false + now := p.clock.Now() + for _, listener := range p.listeners { + // need to loop through all the listeners to see if they need to resync so we can prepare any + // listeners that are going to be resyncing. + if listener.shouldResync(now) { + resyncNeeded = true + p.syncingListeners = append(p.syncingListeners, listener) + listener.determineNextResync(now) + } + } + return resyncNeeded +} + +func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Duration) { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + + for _, listener := range p.listeners { + resyncPeriod := determineResyncPeriod(listener.requestedResyncPeriod, resyncCheckPeriod) + listener.setResyncPeriod(resyncPeriod) + } +} + +type processorListener struct { + // lock/cond protects access to 'pendingNotifications'. + lock sync.RWMutex + cond sync.Cond + + // pendingNotifications is an unbounded slice that holds all notifications not yet distributed + // there is one per listener, but a failing/stalled listener will have infinite pendingNotifications + // added until we OOM. + // TODO This is no worse that before, since reflectors were backed by unbounded DeltaFIFOs, but + // we should try to do something better + pendingNotifications []interface{} + + nextCh chan interface{} + + handler ResourceEventHandler + + // requestedResyncPeriod is how frequently the listener wants a full resync from the shared informer + requestedResyncPeriod time.Duration + // resyncPeriod is how frequently the listener wants a full resync from the shared informer. This + // value may differ from requestedResyncPeriod if the shared informer adjusts it to align with the + // informer's overall resync check period. + resyncPeriod time.Duration + // nextResync is the earliest time the listener should get a full resync + nextResync time.Time + // resyncLock guards access to resyncPeriod and nextResync + resyncLock sync.Mutex +} + +func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time) *processorListener { + ret := &processorListener{ + pendingNotifications: []interface{}{}, + nextCh: make(chan interface{}), + handler: handler, + requestedResyncPeriod: requestedResyncPeriod, + resyncPeriod: resyncPeriod, + } + + ret.cond.L = &ret.lock + + ret.determineNextResync(now) + + return ret +} + +func (p *processorListener) add(notification interface{}) { + p.lock.Lock() + defer p.lock.Unlock() + + p.pendingNotifications = append(p.pendingNotifications, notification) + p.cond.Broadcast() +} + +func (p *processorListener) pop(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + for { + blockingGet := func() (interface{}, bool) { + p.lock.Lock() + defer p.lock.Unlock() + + for len(p.pendingNotifications) == 0 { + // check if we're shutdown + select { + case <-stopCh: + return nil, true + default: + } + p.cond.Wait() + } + + nt := p.pendingNotifications[0] + p.pendingNotifications = p.pendingNotifications[1:] + return nt, false + } + + notification, stopped := blockingGet() + if stopped { + return + } + + select { + case <-stopCh: + return + case p.nextCh <- notification: + } + } +} + +func (p *processorListener) run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + for { + var next interface{} + select { + case <-stopCh: + func() { + p.lock.Lock() + defer p.lock.Unlock() + p.cond.Broadcast() + }() + return + case next = <-p.nextCh: + } + + switch notification := next.(type) { + case updateNotification: + p.handler.OnUpdate(notification.oldObj, notification.newObj) + case addNotification: + p.handler.OnAdd(notification.newObj) + case deleteNotification: + p.handler.OnDelete(notification.oldObj) + default: + utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next)) + } + } +} + +// shouldResync deterimines if the listener needs a resync. If the listener's resyncPeriod is 0, +// this always returns false. +func (p *processorListener) shouldResync(now time.Time) bool { + p.resyncLock.Lock() + defer p.resyncLock.Unlock() + + if p.resyncPeriod == 0 { + return false + } + + return now.After(p.nextResync) || now.Equal(p.nextResync) +} + +func (p *processorListener) determineNextResync(now time.Time) { + p.resyncLock.Lock() + defer p.resyncLock.Unlock() + + p.nextResync = now.Add(p.resyncPeriod) +} + +func (p *processorListener) setResyncPeriod(resyncPeriod time.Duration) { + p.resyncLock.Lock() + defer p.resyncLock.Unlock() + + p.resyncPeriod = resyncPeriod +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/store.go b/vendor/k8s.io/client-go/tools/cache/store.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/client/cache/store.go rename to vendor/k8s.io/client-go/tools/cache/store.go index 4cd2479bdc..ee6b3bd467 100755 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/store.go +++ b/vendor/k8s.io/client-go/tools/cache/store.go @@ -20,7 +20,7 @@ import ( "fmt" "strings" - "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/meta" ) // Store is a generic object storage interface. Reflector knows how to watch a server diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/thread_safe_store.go b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/client/cache/thread_safe_store.go rename to vendor/k8s.io/client-go/tools/cache/thread_safe_store.go index 74fe03807f..4d6a012976 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/thread_safe_store.go +++ b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -20,7 +20,7 @@ import ( "fmt" "sync" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/sets" ) // ThreadSafeStore is an interface that allows concurrent access to a storage backend. diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/undelta_store.go b/vendor/k8s.io/client-go/tools/cache/undelta_store.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/client/cache/undelta_store.go rename to vendor/k8s.io/client-go/tools/cache/undelta_store.go diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/helpers.go b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/helpers.go rename to vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/latest.go b/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go similarity index 80% rename from vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/latest.go rename to vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go index 40a08a434e..5fbbe3f13a 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/latest.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go @@ -17,18 +17,18 @@ limitations under the License. package latest import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" - "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer/json" - "k8s.io/kubernetes/pkg/runtime/serializer/versioning" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/runtime/serializer/versioning" + "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/client-go/tools/clientcmd/api/v1" ) // Version is the string that represents the current external default version. const Version = "v1" -var ExternalVersion = unversioned.GroupVersion{Group: "", Version: "v1"} +var ExternalVersion = schema.GroupVersion{Group: "", Version: "v1"} // OldestVersion is the string that represents the oldest server version supported, // for client code that wants to hardcode the lowest common denominator. @@ -60,7 +60,7 @@ func init() { Scheme, yamlSerializer, yamlSerializer, - unversioned.GroupVersion{Version: Version}, + schema.GroupVersion{Version: Version}, runtime.InternalGroupVersioner, ) } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/register.go b/vendor/k8s.io/client-go/tools/clientcmd/api/register.go new file mode 100644 index 0000000000..2eec3881cd --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/register.go @@ -0,0 +1,46 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +// TODO this should be in the "kubeconfig" group +var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Config{}, + ) + return nil +} + +func (obj *Config) GetObjectKind() schema.ObjectKind { return obj } +func (obj *Config) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *Config) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go new file mode 100644 index 0000000000..fa9d40a9bb --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go @@ -0,0 +1,178 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// Where possible, json tags match the cli argument names. +// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. + +// Config holds the information needed to build connect to remote kubernetes clusters as a given user +// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() +type Config struct { + // Legacy field from pkg/api/types.go TypeMeta. + // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + // +optional + Kind string `json:"kind,omitempty"` + // DEPRECATED: APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). + // Because a cluster can run multiple API groups and potentially multiple versions of each, it no longer makes sense to specify + // a single value for the cluster version. + // This field isn't really needed anyway, so we are deprecating it without replacement. + // It will be ignored if it is present. + // +optional + APIVersion string `json:"apiVersion,omitempty"` + // Preferences holds general information to be use for cli interactions + Preferences Preferences `json:"preferences"` + // Clusters is a map of referencable names to cluster configs + Clusters map[string]*Cluster `json:"clusters"` + // AuthInfos is a map of referencable names to user configs + AuthInfos map[string]*AuthInfo `json:"users"` + // Contexts is a map of referencable names to context configs + Contexts map[string]*Context `json:"contexts"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `json:"current-context"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() +type Preferences struct { + // +optional + Colors bool `json:"colors,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// Cluster contains information about how to communicate with a kubernetes cluster +type Cluster struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). + // +optional + APIVersion string `json:"api-version,omitempty"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. + // +optional + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CertificateAuthority is the path to a cert file for the certificate authority. + // +optional + CertificateAuthority string `json:"certificate-authority,omitempty"` + // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority + // +optional + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. +type AuthInfo struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // ClientCertificate is the path to a client cert file for TLS. + // +optional + ClientCertificate string `json:"client-certificate,omitempty"` + // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate + // +optional + ClientCertificateData []byte `json:"client-certificate-data,omitempty"` + // ClientKey is the path to a client key file for TLS. + // +optional + ClientKey string `json:"client-key,omitempty"` + // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey + // +optional + ClientKeyData []byte `json:"client-key-data,omitempty"` + // Token is the bearer token for authentication to the kubernetes cluster. + // +optional + Token string `json:"token,omitempty"` + // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. + // +optional + TokenFile string `json:"tokenFile,omitempty"` + // Impersonate is the username to act-as. + // +optional + Impersonate string `json:"act-as,omitempty"` + // Username is the username for basic authentication to the kubernetes cluster. + // +optional + Username string `json:"username,omitempty"` + // Password is the password for basic authentication to the kubernetes cluster. + // +optional + Password string `json:"password,omitempty"` + // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. + // +optional + AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) +type Context struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Cluster is the name of the cluster for this context + Cluster string `json:"cluster"` + // AuthInfo is the name of the authInfo for this context + AuthInfo string `json:"user"` + // Namespace is the default namespace to use on unspecified requests + // +optional + Namespace string `json:"namespace,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// AuthProviderConfig holds the configuration for a specified auth provider. +type AuthProviderConfig struct { + Name string `json:"name"` + // +optional + Config map[string]string `json:"config,omitempty"` +} + +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func NewConfig() *Config { + return &Config{ + Preferences: *NewPreferences(), + Clusters: make(map[string]*Cluster), + AuthInfos: make(map[string]*AuthInfo), + Contexts: make(map[string]*Context), + Extensions: make(map[string]runtime.Object), + } +} + +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func NewContext() *Context { + return &Context{Extensions: make(map[string]runtime.Object)} +} + +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func NewCluster() *Cluster { + return &Cluster{Extensions: make(map[string]runtime.Object)} +} + +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func NewAuthInfo() *AuthInfo { + return &AuthInfo{Extensions: make(map[string]runtime.Object)} +} + +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func NewPreferences() *Preferences { + return &Preferences{Extensions: make(map[string]runtime.Object)} +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go new file mode 100644 index 0000000000..b47bfbca23 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go @@ -0,0 +1,227 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "sort" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/clientcmd/api" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + return scheme.AddConversionFuncs( + func(in *Cluster, out *api.Cluster, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.Cluster, out *Cluster, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *Preferences, out *api.Preferences, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.Preferences, out *Preferences, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *AuthInfo, out *api.AuthInfo, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.AuthInfo, out *AuthInfo, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *Context, out *api.Context, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.Context, out *Context, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + + func(in *Config, out *api.Config, s conversion.Scope) error { + out.CurrentContext = in.CurrentContext + if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil { + return err + } + + out.Clusters = make(map[string]*api.Cluster) + if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil { + return err + } + out.AuthInfos = make(map[string]*api.AuthInfo) + if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil { + return err + } + out.Contexts = make(map[string]*api.Context) + if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil { + return err + } + out.Extensions = make(map[string]runtime.Object) + if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil { + return err + } + return nil + }, + func(in *api.Config, out *Config, s conversion.Scope) error { + out.CurrentContext = in.CurrentContext + if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil { + return err + } + + out.Clusters = make([]NamedCluster, 0, 0) + if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil { + return err + } + out.AuthInfos = make([]NamedAuthInfo, 0, 0) + if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil { + return err + } + out.Contexts = make([]NamedContext, 0, 0) + if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil { + return err + } + out.Extensions = make([]NamedExtension, 0, 0) + if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil { + return err + } + return nil + }, + func(in *[]NamedCluster, out *map[string]*api.Cluster, s conversion.Scope) error { + for _, curr := range *in { + newCluster := api.NewCluster() + if err := s.Convert(&curr.Cluster, newCluster, 0); err != nil { + return err + } + (*out)[curr.Name] = newCluster + } + + return nil + }, + func(in *map[string]*api.Cluster, out *[]NamedCluster, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newCluster := (*in)[key] + oldCluster := &Cluster{} + if err := s.Convert(newCluster, oldCluster, 0); err != nil { + return err + } + + namedCluster := NamedCluster{key, *oldCluster} + *out = append(*out, namedCluster) + } + + return nil + }, + func(in *[]NamedAuthInfo, out *map[string]*api.AuthInfo, s conversion.Scope) error { + for _, curr := range *in { + newAuthInfo := api.NewAuthInfo() + if err := s.Convert(&curr.AuthInfo, newAuthInfo, 0); err != nil { + return err + } + (*out)[curr.Name] = newAuthInfo + } + + return nil + }, + func(in *map[string]*api.AuthInfo, out *[]NamedAuthInfo, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newAuthInfo := (*in)[key] + oldAuthInfo := &AuthInfo{} + if err := s.Convert(newAuthInfo, oldAuthInfo, 0); err != nil { + return err + } + + namedAuthInfo := NamedAuthInfo{key, *oldAuthInfo} + *out = append(*out, namedAuthInfo) + } + + return nil + }, + func(in *[]NamedContext, out *map[string]*api.Context, s conversion.Scope) error { + for _, curr := range *in { + newContext := api.NewContext() + if err := s.Convert(&curr.Context, newContext, 0); err != nil { + return err + } + (*out)[curr.Name] = newContext + } + + return nil + }, + func(in *map[string]*api.Context, out *[]NamedContext, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newContext := (*in)[key] + oldContext := &Context{} + if err := s.Convert(newContext, oldContext, 0); err != nil { + return err + } + + namedContext := NamedContext{key, *oldContext} + *out = append(*out, namedContext) + } + + return nil + }, + func(in *[]NamedExtension, out *map[string]runtime.Object, s conversion.Scope) error { + for _, curr := range *in { + var newExtension runtime.Object + if err := s.Convert(&curr.Extension, &newExtension, 0); err != nil { + return err + } + (*out)[curr.Name] = newExtension + } + + return nil + }, + func(in *map[string]runtime.Object, out *[]NamedExtension, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newExtension := (*in)[key] + oldExtension := &runtime.RawExtension{} + if err := s.Convert(newExtension, oldExtension, 0); err != nil { + return err + } + + namedExtension := NamedExtension{key, *oldExtension} + *out = append(*out, namedExtension) + } + + return nil + }, + ) +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go new file mode 100644 index 0000000000..bc87706db3 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go @@ -0,0 +1,46 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +// TODO this should be in the "kubeconfig" group +var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Config{}, + ) + return nil +} + +func (obj *Config) GetObjectKind() schema.ObjectKind { return obj } +func (obj *Config) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *Config) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go new file mode 100644 index 0000000000..a0777ba53e --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go @@ -0,0 +1,170 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// Where possible, json tags match the cli argument names. +// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. + +// Config holds the information needed to build connect to remote kubernetes clusters as a given user +type Config struct { + // Legacy field from pkg/api/types.go TypeMeta. + // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + // +optional + Kind string `json:"kind,omitempty"` + // DEPRECATED: APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). + // Because a cluster can run multiple API groups and potentially multiple versions of each, it no longer makes sense to specify + // a single value for the cluster version. + // This field isn't really needed anyway, so we are deprecating it without replacement. + // It will be ignored if it is present. + // +optional + APIVersion string `json:"apiVersion,omitempty"` + // Preferences holds general information to be use for cli interactions + Preferences Preferences `json:"preferences"` + // Clusters is a map of referencable names to cluster configs + Clusters []NamedCluster `json:"clusters"` + // AuthInfos is a map of referencable names to user configs + AuthInfos []NamedAuthInfo `json:"users"` + // Contexts is a map of referencable names to context configs + Contexts []NamedContext `json:"contexts"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `json:"current-context"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +type Preferences struct { + // +optional + Colors bool `json:"colors,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// Cluster contains information about how to communicate with a kubernetes cluster +type Cluster struct { + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). + // +optional + APIVersion string `json:"api-version,omitempty"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. + // +optional + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CertificateAuthority is the path to a cert file for the certificate authority. + // +optional + CertificateAuthority string `json:"certificate-authority,omitempty"` + // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority + // +optional + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. +type AuthInfo struct { + // ClientCertificate is the path to a client cert file for TLS. + // +optional + ClientCertificate string `json:"client-certificate,omitempty"` + // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate + // +optional + ClientCertificateData []byte `json:"client-certificate-data,omitempty"` + // ClientKey is the path to a client key file for TLS. + // +optional + ClientKey string `json:"client-key,omitempty"` + // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey + // +optional + ClientKeyData []byte `json:"client-key-data,omitempty"` + // Token is the bearer token for authentication to the kubernetes cluster. + // +optional + Token string `json:"token,omitempty"` + // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. + // +optional + TokenFile string `json:"tokenFile,omitempty"` + // Impersonate is the username to imperonate. The name matches the flag. + // +optional + Impersonate string `json:"as,omitempty"` + // Username is the username for basic authentication to the kubernetes cluster. + // +optional + Username string `json:"username,omitempty"` + // Password is the password for basic authentication to the kubernetes cluster. + // +optional + Password string `json:"password,omitempty"` + // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. + // +optional + AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) +type Context struct { + // Cluster is the name of the cluster for this context + Cluster string `json:"cluster"` + // AuthInfo is the name of the authInfo for this context + AuthInfo string `json:"user"` + // Namespace is the default namespace to use on unspecified requests + // +optional + Namespace string `json:"namespace,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// NamedCluster relates nicknames to cluster information +type NamedCluster struct { + // Name is the nickname for this Cluster + Name string `json:"name"` + // Cluster holds the cluster information + Cluster Cluster `json:"cluster"` +} + +// NamedContext relates nicknames to context information +type NamedContext struct { + // Name is the nickname for this Context + Name string `json:"name"` + // Context holds the context information + Context Context `json:"context"` +} + +// NamedAuthInfo relates nicknames to auth information +type NamedAuthInfo struct { + // Name is the nickname for this AuthInfo + Name string `json:"name"` + // AuthInfo holds the auth information + AuthInfo AuthInfo `json:"user"` +} + +// NamedExtension relates nicknames to extension information +type NamedExtension struct { + // Name is the nickname for this Extension + Name string `json:"name"` + // Extension holds the extension information + Extension runtime.RawExtension `json:"extension"` +} + +// AuthProviderConfig holds the configuration for a specified auth provider. +type AuthProviderConfig struct { + Name string `json:"name"` + Config map[string]string `json:"config"` +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/auth_loaders.go b/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/auth_loaders.go rename to vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go index 1e7c1159e4..12331f6e65 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/auth_loaders.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go @@ -24,7 +24,7 @@ import ( "os" "github.com/howeyc/gopass" - clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth" + clientauth "k8s.io/client-go/tools/auth" ) // AuthLoaders are used to build clientauth.Info objects. diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go similarity index 83% rename from vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go rename to vendor/k8s.io/client-go/tools/clientcmd/client_config.go index aa9996d8b8..0411e6235c 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -27,13 +27,10 @@ import ( "github.com/golang/glog" "github.com/imdario/mergo" - "strconv" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/restclient" - clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth" - clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" + "k8s.io/client-go/pkg/api" + restclient "k8s.io/client-go/rest" + clientauth "k8s.io/client-go/tools/auth" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) var ( @@ -44,7 +41,7 @@ var ( // DEPRECATED will be replace DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{ ClusterDefaults: ClusterDefaults, - }, nil, NewDefaultClientConfigLoadingRules()} + }, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}} ) // getDefaultServer returns a default setting for DefaultClientConfig @@ -72,6 +69,11 @@ type ClientConfig interface { type PersistAuthProviderConfigForUser func(user string) restclient.AuthProviderConfigPersister +type promptedCredentials struct { + username string + password string +} + // DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information type DirectClientConfig struct { config clientcmdapi.Config @@ -79,21 +81,23 @@ type DirectClientConfig struct { overrides *ConfigOverrides fallbackReader io.Reader configAccess ConfigAccess + // promptedCredentials store the credentials input by the user + promptedCredentials promptedCredentials } // NewDefaultClientConfig creates a DirectClientConfig using the config.CurrentContext as the context name func NewDefaultClientConfig(config clientcmdapi.Config, overrides *ConfigOverrides) ClientConfig { - return &DirectClientConfig{config, config.CurrentContext, overrides, nil, NewDefaultClientConfigLoadingRules()} + return &DirectClientConfig{config, config.CurrentContext, overrides, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}} } // NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information func NewNonInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, configAccess ConfigAccess) ClientConfig { - return &DirectClientConfig{config, contextName, overrides, nil, configAccess} + return &DirectClientConfig{config, contextName, overrides, nil, configAccess, promptedCredentials{}} } // NewInteractiveClientConfig creates a DirectClientConfig using the passed context name and a reader in case auth information is not provided via files or flags func NewInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, fallbackReader io.Reader, configAccess ConfigAccess) ClientConfig { - return &DirectClientConfig{config, contextName, overrides, fallbackReader, configAccess} + return &DirectClientConfig{config, contextName, overrides, fallbackReader, configAccess, promptedCredentials{}} } func (config *DirectClientConfig) RawConfig() (clientcmdapi.Config, error) { @@ -129,13 +133,11 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { clientConfig.Host = configClusterInfo.Server if len(config.overrides.Timeout) > 0 { - if i, err := strconv.ParseInt(config.overrides.Timeout, 10, 64); err == nil && i >= 0 { - clientConfig.Timeout = time.Duration(i) * time.Second - } else if requestTimeout, err := time.ParseDuration(config.overrides.Timeout); err == nil { - clientConfig.Timeout = requestTimeout - } else { - return nil, fmt.Errorf("Invalid value for option '--request-timeout'. Value must be a single integer, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h)") + timeout, err := ParseTimeout(config.overrides.Timeout) + if err != nil { + return nil, err } + clientConfig.Timeout = timeout } if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { @@ -144,7 +146,7 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { clientConfig.Host = u.String() } if len(configAuthInfo.Impersonate) > 0 { - clientConfig.Impersonate = configAuthInfo.Impersonate + clientConfig.Impersonate = restclient.ImpersonationConfig{UserName: configAuthInfo.Impersonate} } // only try to read the auth information if we are secure @@ -159,7 +161,7 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { authInfoName, _ := config.getAuthInfoName() persister = PersisterForUser(config.configAccess, authInfoName) } - userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister) + userAuthPartialConfig, err := config.getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister) if err != nil { return nil, err } @@ -201,7 +203,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, // 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) // 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file // 4. if there is not enough information to identify the user, prompt if possible -func getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister) (*restclient.Config, error) { +func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister) (*restclient.Config, error) { mergedConfig := &restclient.Config{} // blindly overwrite existing values based on precedence @@ -215,7 +217,7 @@ func getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fa mergedConfig.BearerToken = string(tokenBytes) } if len(configAuthInfo.Impersonate) > 0 { - mergedConfig.Impersonate = configAuthInfo.Impersonate + mergedConfig.Impersonate = restclient.ImpersonationConfig{UserName: configAuthInfo.Impersonate} } if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { mergedConfig.CertFile = configAuthInfo.ClientCertificate @@ -234,6 +236,11 @@ func getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fa // if there still isn't enough information to authenticate the user, try prompting if !canIdentifyUser(*mergedConfig) && (fallbackReader != nil) { + if len(config.promptedCredentials.username) > 0 && len(config.promptedCredentials.password) > 0 { + mergedConfig.Username = config.promptedCredentials.username + mergedConfig.Password = config.promptedCredentials.password + return mergedConfig, nil + } prompter := NewPromptingAuthLoader(fallbackReader) promptedAuthInfo, err := prompter.Prompt() if err != nil { @@ -244,6 +251,8 @@ func getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fa mergedConfig = &restclient.Config{} mergo.Merge(mergedConfig, promptedConfig) mergo.Merge(mergedConfig, previouslyMergedConfig) + config.promptedCredentials.username = mergedConfig.Username + config.promptedCredentials.password = mergedConfig.Password } return mergedConfig, nil @@ -377,15 +386,15 @@ func (config *DirectClientConfig) getContext() (clientcmdapi.Context, error) { contexts := config.config.Contexts contextName, required := config.getContextName() - var mergedContext clientcmdapi.Context + mergedContext := clientcmdapi.NewContext() if configContext, exists := contexts[contextName]; exists { - mergo.Merge(&mergedContext, configContext) + mergo.Merge(mergedContext, configContext) } else if required { return clientcmdapi.Context{}, fmt.Errorf("context %q does not exist", contextName) } - mergo.Merge(&mergedContext, config.overrides.Context) + mergo.Merge(mergedContext, config.overrides.Context) - return mergedContext, nil + return *mergedContext, nil } // getAuthInfo returns the clientcmdapi.AuthInfo, or an error if a required auth info is not found. @@ -393,15 +402,15 @@ func (config *DirectClientConfig) getAuthInfo() (clientcmdapi.AuthInfo, error) { authInfos := config.config.AuthInfos authInfoName, required := config.getAuthInfoName() - var mergedAuthInfo clientcmdapi.AuthInfo + mergedAuthInfo := clientcmdapi.NewAuthInfo() if configAuthInfo, exists := authInfos[authInfoName]; exists { - mergo.Merge(&mergedAuthInfo, configAuthInfo) + mergo.Merge(mergedAuthInfo, configAuthInfo) } else if required { return clientcmdapi.AuthInfo{}, fmt.Errorf("auth info %q does not exist", authInfoName) } - mergo.Merge(&mergedAuthInfo, config.overrides.AuthInfo) + mergo.Merge(mergedAuthInfo, config.overrides.AuthInfo) - return mergedAuthInfo, nil + return *mergedAuthInfo, nil } // getCluster returns the clientcmdapi.Cluster, or an error if a required cluster is not found. @@ -409,14 +418,14 @@ func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) { clusterInfos := config.config.Clusters clusterInfoName, required := config.getClusterName() - var mergedClusterInfo clientcmdapi.Cluster - mergo.Merge(&mergedClusterInfo, config.overrides.ClusterDefaults) + mergedClusterInfo := clientcmdapi.NewCluster() + mergo.Merge(mergedClusterInfo, config.overrides.ClusterDefaults) if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { - mergo.Merge(&mergedClusterInfo, configClusterInfo) + mergo.Merge(mergedClusterInfo, configClusterInfo) } else if required { return clientcmdapi.Cluster{}, fmt.Errorf("cluster %q does not exist", clusterInfoName) } - mergo.Merge(&mergedClusterInfo, config.overrides.ClusterInfo) + mergo.Merge(mergedClusterInfo, config.overrides.ClusterInfo) // An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data // otherwise, a kubeconfig containing a CA reference would return an error that "CA and insecure-skip-tls-verify couldn't both be set" caLen := len(config.overrides.ClusterInfo.CertificateAuthority) @@ -426,23 +435,50 @@ func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) { mergedClusterInfo.CertificateAuthorityData = nil } - return mergedClusterInfo, nil + return *mergedClusterInfo, nil } // inClusterClientConfig makes a config that will work from within a kubernetes cluster container environment. -type inClusterClientConfig struct{} +// Can take options overrides for flags explicitly provided to the command inside the cluster container. +type inClusterClientConfig struct { + overrides *ConfigOverrides + inClusterConfigProvider func() (*restclient.Config, error) +} -var _ ClientConfig = inClusterClientConfig{} +var _ ClientConfig = &inClusterClientConfig{} -func (inClusterClientConfig) RawConfig() (clientcmdapi.Config, error) { +func (config *inClusterClientConfig) RawConfig() (clientcmdapi.Config, error) { return clientcmdapi.Config{}, fmt.Errorf("inCluster environment config doesn't support multiple clusters") } -func (inClusterClientConfig) ClientConfig() (*restclient.Config, error) { - return restclient.InClusterConfig() +func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error) { + if config.inClusterConfigProvider == nil { + config.inClusterConfigProvider = restclient.InClusterConfig + } + + icc, err := config.inClusterConfigProvider() + if err != nil { + return nil, err + } + + // in-cluster configs only takes a host, token, or CA file + // if any of them were individually provided, ovewrite anything else + if config.overrides != nil { + if server := config.overrides.ClusterInfo.Server; len(server) > 0 { + icc.Host = server + } + if token := config.overrides.AuthInfo.Token; len(token) > 0 { + icc.BearerToken = token + } + if certificateAuthorityFile := config.overrides.ClusterInfo.CertificateAuthority; len(certificateAuthorityFile) > 0 { + icc.TLSClientConfig.CAFile = certificateAuthorityFile + } + } + + return icc, err } -func (inClusterClientConfig) Namespace() (string, bool, error) { +func (config *inClusterClientConfig) Namespace() (string, bool, error) { // This way assumes you've set the POD_NAMESPACE environment variable using the downward API. // This check has to be done first for backwards compatibility with the way InClusterConfig was originally set up if ns := os.Getenv("POD_NAMESPACE"); ns != "" { @@ -459,12 +495,12 @@ func (inClusterClientConfig) Namespace() (string, bool, error) { return "default", false, nil } -func (inClusterClientConfig) ConfigAccess() ConfigAccess { +func (config *inClusterClientConfig) ConfigAccess() ConfigAccess { return NewDefaultClientConfigLoadingRules() } // Possible returns true if loading an inside-kubernetes-cluster is possible. -func (inClusterClientConfig) Possible() bool { +func (config *inClusterClientConfig) Possible() bool { fi, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount/token") return os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "" && diff --git a/vendor/k8s.io/client-go/tools/clientcmd/config.go b/vendor/k8s.io/client-go/tools/clientcmd/config.go new file mode 100644 index 0000000000..16ccdaf20a --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/config.go @@ -0,0 +1,472 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "errors" + "os" + "path" + "path/filepath" + "reflect" + "sort" + + "github.com/golang/glog" + + restclient "k8s.io/client-go/rest" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +// ConfigAccess is used by subcommands and methods in this package to load and modify the appropriate config files +type ConfigAccess interface { + // GetLoadingPrecedence returns the slice of files that should be used for loading and inspecting the config + GetLoadingPrecedence() []string + // GetStartingConfig returns the config that subcommands should being operating against. It may or may not be merged depending on loading rules + GetStartingConfig() (*clientcmdapi.Config, error) + // GetDefaultFilename returns the name of the file you should write into (create if necessary), if you're trying to create a new stanza as opposed to updating an existing one. + GetDefaultFilename() string + // IsExplicitFile indicates whether or not this command is interested in exactly one file. This implementation only ever does that via a flag, but implementations that handle local, global, and flags may have more + IsExplicitFile() bool + // GetExplicitFile returns the particular file this command is operating against. This implementation only ever has one, but implementations that handle local, global, and flags may have more + GetExplicitFile() string +} + +type PathOptions struct { + // GlobalFile is the full path to the file to load as the global (final) option + GlobalFile string + // EnvVar is the env var name that points to the list of kubeconfig files to load + EnvVar string + // ExplicitFileFlag is the name of the flag to use for prompting for the kubeconfig file + ExplicitFileFlag string + + // GlobalFileSubpath is an optional value used for displaying help + GlobalFileSubpath string + + LoadingRules *ClientConfigLoadingRules +} + +func (o *PathOptions) GetEnvVarFiles() []string { + if len(o.EnvVar) == 0 { + return []string{} + } + + envVarValue := os.Getenv(o.EnvVar) + if len(envVarValue) == 0 { + return []string{} + } + + return filepath.SplitList(envVarValue) +} + +func (o *PathOptions) GetLoadingPrecedence() []string { + if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { + return envVarFiles + } + + return []string{o.GlobalFile} +} + +func (o *PathOptions) GetStartingConfig() (*clientcmdapi.Config, error) { + // don't mutate the original + loadingRules := *o.LoadingRules + loadingRules.Precedence = o.GetLoadingPrecedence() + + clientConfig := NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, &ConfigOverrides{}) + rawConfig, err := clientConfig.RawConfig() + if os.IsNotExist(err) { + return clientcmdapi.NewConfig(), nil + } + if err != nil { + return nil, err + } + + return &rawConfig, nil +} + +func (o *PathOptions) GetDefaultFilename() string { + if o.IsExplicitFile() { + return o.GetExplicitFile() + } + + if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { + if len(envVarFiles) == 1 { + return envVarFiles[0] + } + + // if any of the envvar files already exists, return it + for _, envVarFile := range envVarFiles { + if _, err := os.Stat(envVarFile); err == nil { + return envVarFile + } + } + + // otherwise, return the last one in the list + return envVarFiles[len(envVarFiles)-1] + } + + return o.GlobalFile +} + +func (o *PathOptions) IsExplicitFile() bool { + if len(o.LoadingRules.ExplicitPath) > 0 { + return true + } + + return false +} + +func (o *PathOptions) GetExplicitFile() string { + return o.LoadingRules.ExplicitPath +} + +func NewDefaultPathOptions() *PathOptions { + ret := &PathOptions{ + GlobalFile: RecommendedHomeFile, + EnvVar: RecommendedConfigPathEnvVar, + ExplicitFileFlag: RecommendedConfigPathFlag, + + GlobalFileSubpath: path.Join(RecommendedHomeDir, RecommendedFileName), + + LoadingRules: NewDefaultClientConfigLoadingRules(), + } + ret.LoadingRules.DoNotResolvePaths = true + + return ret +} + +// ModifyConfig takes a Config object, iterates through Clusters, AuthInfos, and Contexts, uses the LocationOfOrigin if specified or +// uses the default destination file to write the results into. This results in multiple file reads, but it's very easy to follow. +// Preferences and CurrentContext should always be set in the default destination file. Since we can't distinguish between empty and missing values +// (no nil strings), we're forced have separate handling for them. In the kubeconfig cases, newConfig should have at most one difference, +// that means that this code will only write into a single file. If you want to relativizePaths, you must provide a fully qualified path in any +// modified element. +func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, relativizePaths bool) error { + possibleSources := configAccess.GetLoadingPrecedence() + // sort the possible kubeconfig files so we always "lock" in the same order + // to avoid deadlock (note: this can fail w/ symlinks, but... come on). + sort.Strings(possibleSources) + for _, filename := range possibleSources { + if err := lockFile(filename); err != nil { + return err + } + defer unlockFile(filename) + } + + startingConfig, err := configAccess.GetStartingConfig() + if err != nil { + return err + } + + // We need to find all differences, locate their original files, read a partial config to modify only that stanza and write out the file. + // Special case the test for current context and preferences since those always write to the default file. + if reflect.DeepEqual(*startingConfig, newConfig) { + // nothing to do + return nil + } + + if startingConfig.CurrentContext != newConfig.CurrentContext { + if err := writeCurrentContext(configAccess, newConfig.CurrentContext); err != nil { + return err + } + } + + if !reflect.DeepEqual(startingConfig.Preferences, newConfig.Preferences) { + if err := writePreferences(configAccess, newConfig.Preferences); err != nil { + return err + } + } + + // Search every cluster, authInfo, and context. First from new to old for differences, then from old to new for deletions + for key, cluster := range newConfig.Clusters { + startingCluster, exists := startingConfig.Clusters[key] + if !reflect.DeepEqual(cluster, startingCluster) || !exists { + destinationFile := cluster.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + t := *cluster + + configToWrite.Clusters[key] = &t + configToWrite.Clusters[key].LocationOfOrigin = destinationFile + if relativizePaths { + if err := RelativizeClusterLocalPaths(configToWrite.Clusters[key]); err != nil { + return err + } + } + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, context := range newConfig.Contexts { + startingContext, exists := startingConfig.Contexts[key] + if !reflect.DeepEqual(context, startingContext) || !exists { + destinationFile := context.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + configToWrite.Contexts[key] = context + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, authInfo := range newConfig.AuthInfos { + startingAuthInfo, exists := startingConfig.AuthInfos[key] + if !reflect.DeepEqual(authInfo, startingAuthInfo) || !exists { + destinationFile := authInfo.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + t := *authInfo + configToWrite.AuthInfos[key] = &t + configToWrite.AuthInfos[key].LocationOfOrigin = destinationFile + if relativizePaths { + if err := RelativizeAuthInfoLocalPaths(configToWrite.AuthInfos[key]); err != nil { + return err + } + } + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, cluster := range startingConfig.Clusters { + if _, exists := newConfig.Clusters[key]; !exists { + destinationFile := cluster.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + delete(configToWrite.Clusters, key) + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, context := range startingConfig.Contexts { + if _, exists := newConfig.Contexts[key]; !exists { + destinationFile := context.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + delete(configToWrite.Contexts, key) + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, authInfo := range startingConfig.AuthInfos { + if _, exists := newConfig.AuthInfos[key]; !exists { + destinationFile := authInfo.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + delete(configToWrite.AuthInfos, key) + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + return nil +} + +func PersisterForUser(configAccess ConfigAccess, user string) restclient.AuthProviderConfigPersister { + return &persister{configAccess, user} +} + +type persister struct { + configAccess ConfigAccess + user string +} + +func (p *persister) Persist(config map[string]string) error { + newConfig, err := p.configAccess.GetStartingConfig() + if err != nil { + return err + } + authInfo, ok := newConfig.AuthInfos[p.user] + if ok && authInfo.AuthProvider != nil { + authInfo.AuthProvider.Config = config + ModifyConfig(p.configAccess, *newConfig, false) + } + return nil +} + +// writeCurrentContext takes three possible paths. +// If newCurrentContext is the same as the startingConfig's current context, then we exit. +// If newCurrentContext has a value, then that value is written into the default destination file. +// If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file +func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error { + if startingConfig, err := configAccess.GetStartingConfig(); err != nil { + return err + } else if startingConfig.CurrentContext == newCurrentContext { + return nil + } + + if configAccess.IsExplicitFile() { + file := configAccess.GetExplicitFile() + currConfig, err := getConfigFromFile(file) + if err != nil { + return err + } + currConfig.CurrentContext = newCurrentContext + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + + if len(newCurrentContext) > 0 { + destinationFile := configAccess.GetDefaultFilename() + config, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + config.CurrentContext = newCurrentContext + + if err := WriteToFile(*config, destinationFile); err != nil { + return err + } + + return nil + } + + // we're supposed to be clearing the current context. We need to find the first spot in the chain that is setting it and clear it + for _, file := range configAccess.GetLoadingPrecedence() { + if _, err := os.Stat(file); err == nil { + currConfig, err := getConfigFromFile(file) + if err != nil { + return err + } + + if len(currConfig.CurrentContext) > 0 { + currConfig.CurrentContext = newCurrentContext + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + } + } + + return errors.New("no config found to write context") +} + +func writePreferences(configAccess ConfigAccess, newPrefs clientcmdapi.Preferences) error { + if startingConfig, err := configAccess.GetStartingConfig(); err != nil { + return err + } else if reflect.DeepEqual(startingConfig.Preferences, newPrefs) { + return nil + } + + if configAccess.IsExplicitFile() { + file := configAccess.GetExplicitFile() + currConfig, err := getConfigFromFile(file) + if err != nil { + return err + } + currConfig.Preferences = newPrefs + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + + for _, file := range configAccess.GetLoadingPrecedence() { + currConfig, err := getConfigFromFile(file) + if err != nil { + return err + } + + if !reflect.DeepEqual(currConfig.Preferences, newPrefs) { + currConfig.Preferences = newPrefs + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + } + + return errors.New("no config found to write preferences") +} + +// getConfigFromFile tries to read a kubeconfig file and if it can't, returns an error. One exception, missing files result in empty configs, not an error. +func getConfigFromFile(filename string) (*clientcmdapi.Config, error) { + config, err := LoadFromFile(filename) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if config == nil { + config = clientcmdapi.NewConfig() + } + return config, nil +} + +// GetConfigFromFileOrDie tries to read a kubeconfig file and if it can't, it calls exit. One exception, missing files result in empty configs, not an exit +func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config { + config, err := getConfigFromFile(filename) + if err != nil { + glog.FatalDepth(1, err) + } + + return config +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/doc.go new file mode 100644 index 0000000000..c07ace6a58 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/doc.go @@ -0,0 +1,37 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package clientcmd provides one stop shopping for building a working client from a fixed config, +from a .kubeconfig file, from command line flags, or from any merged combination. + +Sample usage from merged .kubeconfig files (local directory, home directory) + + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + // if you want to change the loading rules (which files in which order), you can do so here + + configOverrides := &clientcmd.ConfigOverrides{} + // if you want to change override values or bind them to flags, there are methods to help you + + kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) + config, err := kubeConfig.ClientConfig() + if err != nil { + // Do something + } + client, err := metav1.New(config) + // ... +*/ +package clientcmd diff --git a/vendor/k8s.io/client-go/tools/clientcmd/helpers.go b/vendor/k8s.io/client-go/tools/clientcmd/helpers.go new file mode 100644 index 0000000000..b609d1a766 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/helpers.go @@ -0,0 +1,35 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "fmt" + "strconv" + "time" +) + +// ParseTimeout returns a parsed duration from a string +// A duration string value must be a positive integer, optionally followed by a corresponding time unit (s|m|h). +func ParseTimeout(duration string) (time.Duration, error) { + if i, err := strconv.ParseInt(duration, 10, 64); err == nil && i >= 0 { + return (time.Duration(i) * time.Second), nil + } + if requestTimeout, err := time.ParseDuration(duration); err == nil { + return requestTimeout, nil + } + return 0, fmt.Errorf("Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h)") +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go b/vendor/k8s.io/client-go/tools/clientcmd/loader.go similarity index 97% rename from vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go rename to vendor/k8s.io/client-go/tools/clientcmd/loader.go index 654dfe98d0..1fcc510382 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/loader.go @@ -30,13 +30,13 @@ import ( "github.com/golang/glog" "github.com/imdario/mergo" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/client/restclient" - clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" - clientcmdlatest "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest" - "k8s.io/kubernetes/pkg/runtime" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/homedir" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + restclient "k8s.io/client-go/rest" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest" + "k8s.io/client-go/util/homedir" ) const ( @@ -388,7 +388,7 @@ func Load(data []byte) (*clientcmdapi.Config, error) { if len(data) == 0 { return config, nil } - decoded, _, err := clientcmdlatest.Codec.Decode(data, &unversioned.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"}, config) + decoded, _, err := clientcmdlatest.Codec.Decode(data, &schema.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"}, config) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go rename to vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go index 4c3645121b..92c1a5a006 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go @@ -22,8 +22,8 @@ import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/client/restclient" - clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" + restclient "k8s.io/client-go/rest" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) // DeferredLoadingClientConfig is a ClientConfig interface that is backed by a client config loader. @@ -51,12 +51,12 @@ type InClusterConfig interface { // NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig { - return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: inClusterClientConfig{}} + return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}} } // NewInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name and the fallback auth reader func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig { - return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: inClusterClientConfig{}, fallbackReader: fallbackReader} + return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}, fallbackReader: fallbackReader} } func (config *DeferredLoadingClientConfig) createClientConfig() (ClientConfig, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go b/vendor/k8s.io/client-go/tools/clientcmd/overrides.go similarity index 92% rename from vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go rename to vendor/k8s.io/client-go/tools/clientcmd/overrides.go index 626cdeaae4..f18aa6a4ae 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/overrides.go @@ -21,7 +21,7 @@ import ( "github.com/spf13/pflag" - clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) // ConfigOverrides holds values that should override whatever information is pulled from the actual Config object. You can't @@ -85,16 +85,23 @@ type FlagInfo struct { Description string } +// AddSecretAnnotation add secret flag to Annotation. +func (f FlagInfo) AddSecretAnnotation(flags *pflag.FlagSet) FlagInfo { + flags.SetAnnotation(f.LongName, "classified", []string{"true"}) + return f +} + // BindStringFlag binds the flag based on the provided info. If LongName == "", nothing is registered -func (f FlagInfo) BindStringFlag(flags *pflag.FlagSet, target *string) { +func (f FlagInfo) BindStringFlag(flags *pflag.FlagSet, target *string) FlagInfo { // you can't register a flag without a long name if len(f.LongName) > 0 { flags.StringVarP(target, f.LongName, f.ShortName, f.Default, f.Description) } + return f } // BindBoolFlag binds the flag based on the provided info. If LongName == "", nothing is registered -func (f FlagInfo) BindBoolFlag(flags *pflag.FlagSet, target *bool) { +func (f FlagInfo) BindBoolFlag(flags *pflag.FlagSet, target *bool) FlagInfo { // you can't register a flag without a long name if len(f.LongName) > 0 { // try to parse Default as a bool. If it fails, assume false @@ -105,6 +112,7 @@ func (f FlagInfo) BindBoolFlag(flags *pflag.FlagSet, target *bool) { flags.BoolVarP(target, f.LongName, f.ShortName, boolVal, f.Description) } + return f } const ( @@ -126,6 +134,18 @@ const ( FlagTimeout = "request-timeout" ) +// RecommendedConfigOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing +func RecommendedConfigOverrideFlags(prefix string) ConfigOverrideFlags { + return ConfigOverrideFlags{ + AuthOverrideFlags: RecommendedAuthOverrideFlags(prefix), + ClusterOverrideFlags: RecommendedClusterOverrideFlags(prefix), + ContextOverrideFlags: RecommendedContextOverrideFlags(prefix), + + CurrentContext: FlagInfo{prefix + FlagContext, "", "", "The name of the kubeconfig context to use"}, + Timeout: FlagInfo{prefix + FlagTimeout, "", "0", "The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests."}, + } +} + // RecommendedAuthOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing func RecommendedAuthOverrideFlags(prefix string) AuthOverrideFlags { return AuthOverrideFlags{ @@ -148,18 +168,6 @@ func RecommendedClusterOverrideFlags(prefix string) ClusterOverrideFlags { } } -// RecommendedConfigOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing -func RecommendedConfigOverrideFlags(prefix string) ConfigOverrideFlags { - return ConfigOverrideFlags{ - AuthOverrideFlags: RecommendedAuthOverrideFlags(prefix), - ClusterOverrideFlags: RecommendedClusterOverrideFlags(prefix), - ContextOverrideFlags: RecommendedContextOverrideFlags(prefix), - - CurrentContext: FlagInfo{prefix + FlagContext, "", "", "The name of the kubeconfig context to use"}, - Timeout: FlagInfo{prefix + FlagTimeout, "", "0", "The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests."}, - } -} - // RecommendedContextOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing func RecommendedContextOverrideFlags(prefix string) ContextOverrideFlags { return ContextOverrideFlags{ @@ -169,14 +177,23 @@ func RecommendedContextOverrideFlags(prefix string) ContextOverrideFlags { } } +// BindOverrideFlags is a convenience method to bind the specified flags to their associated variables +func BindOverrideFlags(overrides *ConfigOverrides, flags *pflag.FlagSet, flagNames ConfigOverrideFlags) { + BindAuthInfoFlags(&overrides.AuthInfo, flags, flagNames.AuthOverrideFlags) + BindClusterFlags(&overrides.ClusterInfo, flags, flagNames.ClusterOverrideFlags) + BindContextFlags(&overrides.Context, flags, flagNames.ContextOverrideFlags) + flagNames.CurrentContext.BindStringFlag(flags, &overrides.CurrentContext) + flagNames.Timeout.BindStringFlag(flags, &overrides.Timeout) +} + // BindAuthInfoFlags is a convenience method to bind the specified flags to their associated variables func BindAuthInfoFlags(authInfo *clientcmdapi.AuthInfo, flags *pflag.FlagSet, flagNames AuthOverrideFlags) { - flagNames.ClientCertificate.BindStringFlag(flags, &authInfo.ClientCertificate) - flagNames.ClientKey.BindStringFlag(flags, &authInfo.ClientKey) - flagNames.Token.BindStringFlag(flags, &authInfo.Token) - flagNames.Impersonate.BindStringFlag(flags, &authInfo.Impersonate) - flagNames.Username.BindStringFlag(flags, &authInfo.Username) - flagNames.Password.BindStringFlag(flags, &authInfo.Password) + flagNames.ClientCertificate.BindStringFlag(flags, &authInfo.ClientCertificate).AddSecretAnnotation(flags) + flagNames.ClientKey.BindStringFlag(flags, &authInfo.ClientKey).AddSecretAnnotation(flags) + flagNames.Token.BindStringFlag(flags, &authInfo.Token).AddSecretAnnotation(flags) + flagNames.Impersonate.BindStringFlag(flags, &authInfo.Impersonate).AddSecretAnnotation(flags) + flagNames.Username.BindStringFlag(flags, &authInfo.Username).AddSecretAnnotation(flags) + flagNames.Password.BindStringFlag(flags, &authInfo.Password).AddSecretAnnotation(flags) } // BindClusterFlags is a convenience method to bind the specified flags to their associated variables @@ -189,15 +206,6 @@ func BindClusterFlags(clusterInfo *clientcmdapi.Cluster, flags *pflag.FlagSet, f flagNames.InsecureSkipTLSVerify.BindBoolFlag(flags, &clusterInfo.InsecureSkipTLSVerify) } -// BindOverrideFlags is a convenience method to bind the specified flags to their associated variables -func BindOverrideFlags(overrides *ConfigOverrides, flags *pflag.FlagSet, flagNames ConfigOverrideFlags) { - BindAuthInfoFlags(&overrides.AuthInfo, flags, flagNames.AuthOverrideFlags) - BindClusterFlags(&overrides.ClusterInfo, flags, flagNames.ClusterOverrideFlags) - BindContextFlags(&overrides.Context, flags, flagNames.ContextOverrideFlags) - flagNames.CurrentContext.BindStringFlag(flags, &overrides.CurrentContext) - flagNames.Timeout.BindStringFlag(flags, &overrides.Timeout) -} - // BindFlags is a convenience method to bind the specified flags to their associated variables func BindContextFlags(contextInfo *clientcmdapi.Context, flags *pflag.FlagSet, flagNames ContextOverrideFlags) { flagNames.ClusterName.BindStringFlag(flags, &contextInfo.Cluster) diff --git a/vendor/k8s.io/client-go/tools/clientcmd/validation.go b/vendor/k8s.io/client-go/tools/clientcmd/validation.go new file mode 100644 index 0000000000..ceeeb042e8 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/validation.go @@ -0,0 +1,270 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "errors" + "fmt" + "os" + "reflect" + "strings" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/validation" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +var ( + ErrNoContext = errors.New("no context chosen") + ErrEmptyConfig = errors.New("no configuration has been provided") + // message is for consistency with old behavior + ErrEmptyCluster = errors.New("cluster has no server defined") +) + +type errContextNotFound struct { + ContextName string +} + +func (e *errContextNotFound) Error() string { + return fmt.Sprintf("context was not found for specified context: %v", e.ContextName) +} + +// IsContextNotFound returns a boolean indicating whether the error is known to +// report that a context was not found +func IsContextNotFound(err error) bool { + if err == nil { + return false + } + if _, ok := err.(*errContextNotFound); ok || err == ErrNoContext { + return true + } + return strings.Contains(err.Error(), "context was not found for specified context") +} + +// IsEmptyConfig returns true if the provided error indicates the provided configuration +// is empty. +func IsEmptyConfig(err error) bool { + switch t := err.(type) { + case errConfigurationInvalid: + return len(t) == 1 && t[0] == ErrEmptyConfig + } + return err == ErrEmptyConfig +} + +// errConfigurationInvalid is a set of errors indicating the configuration is invalid. +type errConfigurationInvalid []error + +// errConfigurationInvalid implements error and Aggregate +var _ error = errConfigurationInvalid{} +var _ utilerrors.Aggregate = errConfigurationInvalid{} + +func newErrConfigurationInvalid(errs []error) error { + switch len(errs) { + case 0: + return nil + default: + return errConfigurationInvalid(errs) + } +} + +// Error implements the error interface +func (e errConfigurationInvalid) Error() string { + return fmt.Sprintf("invalid configuration: %v", utilerrors.NewAggregate(e).Error()) +} + +// Errors implements the AggregateError interface +func (e errConfigurationInvalid) Errors() []error { + return e +} + +// IsConfigurationInvalid returns true if the provided error indicates the configuration is invalid. +func IsConfigurationInvalid(err error) bool { + switch err.(type) { + case *errContextNotFound, errConfigurationInvalid: + return true + } + return IsContextNotFound(err) +} + +// Validate checks for errors in the Config. It does not return early so that it can find as many errors as possible. +func Validate(config clientcmdapi.Config) error { + validationErrors := make([]error, 0) + + if clientcmdapi.IsConfigEmpty(&config) { + return newErrConfigurationInvalid([]error{ErrEmptyConfig}) + } + + if len(config.CurrentContext) != 0 { + if _, exists := config.Contexts[config.CurrentContext]; !exists { + validationErrors = append(validationErrors, &errContextNotFound{config.CurrentContext}) + } + } + + for contextName, context := range config.Contexts { + validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) + } + + for authInfoName, authInfo := range config.AuthInfos { + validationErrors = append(validationErrors, validateAuthInfo(authInfoName, *authInfo)...) + } + + for clusterName, clusterInfo := range config.Clusters { + validationErrors = append(validationErrors, validateClusterInfo(clusterName, *clusterInfo)...) + } + + return newErrConfigurationInvalid(validationErrors) +} + +// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, +// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. +func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error { + validationErrors := make([]error, 0) + + if clientcmdapi.IsConfigEmpty(&config) { + return newErrConfigurationInvalid([]error{ErrEmptyConfig}) + } + + var contextName string + if len(passedContextName) != 0 { + contextName = passedContextName + } else { + contextName = config.CurrentContext + } + + if len(contextName) == 0 { + return ErrNoContext + } + + context, exists := config.Contexts[contextName] + if !exists { + validationErrors = append(validationErrors, &errContextNotFound{contextName}) + } + + if exists { + validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) + validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *config.AuthInfos[context.AuthInfo])...) + validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *config.Clusters[context.Cluster])...) + } + + return newErrConfigurationInvalid(validationErrors) +} + +// validateClusterInfo looks for conflicts and errors in the cluster info +func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) []error { + validationErrors := make([]error, 0) + + if reflect.DeepEqual(clientcmdapi.Cluster{}, clusterInfo) { + return []error{ErrEmptyCluster} + } + + if len(clusterInfo.Server) == 0 { + if len(clusterName) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("default cluster has no server defined")) + } else { + validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName)) + } + } + // Make sure CA data and CA file aren't both specified + if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override.", clusterName)) + } + if len(clusterInfo.CertificateAuthority) != 0 { + clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) + defer clientCertCA.Close() + if err != nil { + validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) + } + } + + return validationErrors +} + +// validateAuthInfo looks for conflicts and errors in the auth info +func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []error { + validationErrors := make([]error, 0) + + usingAuthPath := false + methods := make([]string, 0, 3) + if len(authInfo.Token) != 0 { + methods = append(methods, "token") + } + if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { + methods = append(methods, "basicAuth") + } + + if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { + // Make sure cert data and file aren't both specified + if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override.", authInfoName)) + } + // Make sure key data and file aren't both specified + if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) + } + // Make sure a key is specified + if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method.", authInfoName)) + } + + if len(authInfo.ClientCertificate) != 0 { + clientCertFile, err := os.Open(authInfo.ClientCertificate) + defer clientCertFile.Close() + if err != nil { + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) + } + } + if len(authInfo.ClientKey) != 0 { + clientKeyFile, err := os.Open(authInfo.ClientKey) + defer clientKeyFile.Close() + if err != nil { + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) + } + } + } + + // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case + if (len(methods) > 1) && (!usingAuthPath) { + validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) + } + + return validationErrors +} + +// validateContext looks for errors in the context. It is not transitive, so errors in the reference authInfo or cluster configs are not included in this return +func validateContext(contextName string, context clientcmdapi.Context, config clientcmdapi.Config) []error { + validationErrors := make([]error, 0) + + if len(context.AuthInfo) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("user was not specified for context %q", contextName)) + } else if _, exists := config.AuthInfos[context.AuthInfo]; !exists { + validationErrors = append(validationErrors, fmt.Errorf("user %q was not found for context %q", context.AuthInfo, contextName)) + } + + if len(context.Cluster) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("cluster was not specified for context %q", contextName)) + } else if _, exists := config.Clusters[context.Cluster]; !exists { + validationErrors = append(validationErrors, fmt.Errorf("cluster %q was not found for context %q", context.Cluster, contextName)) + } + + if len(context.Namespace) != 0 { + if len(validation.IsDNS1123Label(context.Namespace)) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("namespace %q for context %q does not conform to the kubernetes DNS_LABEL rules", context.Namespace, contextName)) + } + } + + return validationErrors +} diff --git a/vendor/k8s.io/client-go/tools/metrics/OWNERS b/vendor/k8s.io/client-go/tools/metrics/OWNERS new file mode 100755 index 0000000000..ff51798071 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/metrics/OWNERS @@ -0,0 +1,7 @@ +reviewers: +- wojtek-t +- eparis +- krousey +- jayunit100 +- fgrzadkowski +- tmrts diff --git a/vendor/k8s.io/kubernetes/pkg/client/metrics/metrics.go b/vendor/k8s.io/client-go/tools/metrics/metrics.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/client/metrics/metrics.go rename to vendor/k8s.io/client-go/tools/metrics/metrics.go diff --git a/vendor/k8s.io/client-go/tools/record/OWNERS b/vendor/k8s.io/client-go/tools/record/OWNERS new file mode 100755 index 0000000000..4dd54bbce9 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/OWNERS @@ -0,0 +1,27 @@ +reviewers: +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- derekwaynecarr +- caesarxuchao +- vishh +- mikedanese +- liggitt +- nikhiljindal +- erictune +- pmorie +- dchen1107 +- saad-ali +- luxas +- yifan-gu +- eparis +- mwielgus +- timothysc +- jsafrane +- dims +- krousey +- a-robinson +- aveshagarwal +- resouer +- cjcullen diff --git a/vendor/k8s.io/kubernetes/pkg/client/record/doc.go b/vendor/k8s.io/client-go/tools/record/doc.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/client/record/doc.go rename to vendor/k8s.io/client-go/tools/record/doc.go diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go new file mode 100644 index 0000000000..26e036be36 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -0,0 +1,317 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "fmt" + "math/rand" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/pkg/api/v1" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/util/clock" + + "net/http" + + "github.com/golang/glog" +) + +const maxTriesPerEvent = 12 + +var defaultSleepDuration = 10 * time.Second + +const maxQueuedEvents = 1000 + +// EventSink knows how to store events (client.Client implements it.) +// EventSink must respect the namespace that will be embedded in 'event'. +// It is assumed that EventSink will return the same sorts of errors as +// pkg/client's REST client. +type EventSink interface { + Create(event *v1.Event) (*v1.Event, error) + Update(event *v1.Event) (*v1.Event, error) + Patch(oldEvent *v1.Event, data []byte) (*v1.Event, error) +} + +// EventRecorder knows how to record events on behalf of an EventSource. +type EventRecorder interface { + // Event constructs an event from the given information and puts it in the queue for sending. + // 'object' is the object this event is about. Event will make a reference-- or you may also + // pass a reference to the object directly. + // 'type' of this event, and can be one of Normal, Warning. New types could be added in future + // 'reason' is the reason this event is generated. 'reason' should be short and unique; it + // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used + // to automate handling of events, so imagine people writing switch statements to handle them. + // You want to make that easy. + // 'message' is intended to be human readable. + // + // The resulting event will be created in the same namespace as the reference object. + Event(object runtime.Object, eventtype, reason, message string) + + // Eventf is just like Event, but with Sprintf for the message field. + Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) + + // PastEventf is just like Eventf, but with an option to specify the event's 'timestamp' field. + PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) +} + +// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. +type EventBroadcaster interface { + // StartEventWatcher starts sending events received from this EventBroadcaster to the given + // event handler function. The return value can be ignored or used to stop recording, if + // desired. + StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface + + // StartRecordingToSink starts sending events received from this EventBroadcaster to the given + // sink. The return value can be ignored or used to stop recording, if desired. + StartRecordingToSink(sink EventSink) watch.Interface + + // StartLogging starts sending events received from this EventBroadcaster to the given logging + // function. The return value can be ignored or used to stop recording, if desired. + StartLogging(logf func(format string, args ...interface{})) watch.Interface + + // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster + // with the event source set to the given event source. + NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder +} + +// Creates a new event broadcaster. +func NewBroadcaster() EventBroadcaster { + return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration} +} + +func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { + return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration} +} + +type eventBroadcasterImpl struct { + *watch.Broadcaster + sleepDuration time.Duration +} + +// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. +// The return value can be ignored or used to stop recording, if desired. +// TODO: make me an object with parameterizable queue length and retry interval +func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface { + // The default math/rand package functions aren't thread safe, so create a + // new Rand object for each StartRecording call. + randGen := rand.New(rand.NewSource(time.Now().UnixNano())) + eventCorrelator := NewEventCorrelator(clock.RealClock{}) + return eventBroadcaster.StartEventWatcher( + func(event *v1.Event) { + recordToSink(sink, event, eventCorrelator, randGen, eventBroadcaster.sleepDuration) + }) +} + +func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, randGen *rand.Rand, sleepDuration time.Duration) { + // Make a copy before modification, because there could be multiple listeners. + // Events are safe to copy like this. + eventCopy := *event + event = &eventCopy + result, err := eventCorrelator.EventCorrelate(event) + if err != nil { + utilruntime.HandleError(err) + } + if result.Skip { + return + } + tries := 0 + for { + if recordEvent(sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) { + break + } + tries++ + if tries >= maxTriesPerEvent { + glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) + break + } + // Randomize the first sleep so that various clients won't all be + // synced up if the master goes down. + if tries == 1 { + time.Sleep(time.Duration(float64(sleepDuration) * randGen.Float64())) + } else { + time.Sleep(sleepDuration) + } + } +} + +func isKeyNotFoundError(err error) bool { + statusErr, _ := err.(*errors.StatusError) + + if statusErr != nil && statusErr.Status().Code == http.StatusNotFound { + return true + } + + return false +} + +// recordEvent attempts to write event to a sink. It returns true if the event +// was successfully recorded or discarded, false if it should be retried. +// If updateExistingEvent is false, it creates a new event, otherwise it updates +// existing event. +func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool { + var newEvent *v1.Event + var err error + if updateExistingEvent { + newEvent, err = sink.Patch(event, patch) + } + // Update can fail because the event may have been removed and it no longer exists. + if !updateExistingEvent || (updateExistingEvent && isKeyNotFoundError(err)) { + // Making sure that ResourceVersion is empty on creation + event.ResourceVersion = "" + newEvent, err = sink.Create(event) + } + if err == nil { + // we need to update our event correlator with the server returned state to handle name/resourceversion + eventCorrelator.UpdateState(newEvent) + return true + } + + // If we can't contact the server, then hold everything while we keep trying. + // Otherwise, something about the event is malformed and we should abandon it. + switch err.(type) { + case *restclient.RequestConstructionError: + // We will construct the request the same next time, so don't keep trying. + glog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) + return true + case *errors.StatusError: + if errors.IsAlreadyExists(err) { + glog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } else { + glog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } + return true + case *errors.UnexpectedObjectError: + // We don't expect this; it implies the server's response didn't match a + // known pattern. Go ahead and retry. + default: + // This case includes actual http transport errors. Go ahead and retry. + } + glog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) + return false +} + +// StartLogging starts sending events received from this EventBroadcaster to the given logging function. +// The return value can be ignored or used to stop recording, if desired. +func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface { + return eventBroadcaster.StartEventWatcher( + func(e *v1.Event) { + logf("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message) + }) +} + +// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. +// The return value can be ignored or used to stop recording, if desired. +func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface { + watcher := eventBroadcaster.Watch() + go func() { + defer utilruntime.HandleCrash() + for { + watchEvent, open := <-watcher.ResultChan() + if !open { + return + } + event, ok := watchEvent.Object.(*v1.Event) + if !ok { + // This is all local, so there's no reason this should + // ever happen. + continue + } + eventHandler(event) + } + }() + return watcher +} + +// NewRecorder returns an EventRecorder that records events with the given event source. +func (eventBroadcaster *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder { + return &recorderImpl{scheme, source, eventBroadcaster.Broadcaster, clock.RealClock{}} +} + +type recorderImpl struct { + scheme *runtime.Scheme + source v1.EventSource + *watch.Broadcaster + clock clock.Clock +} + +func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp metav1.Time, eventtype, reason, message string) { + ref, err := v1.GetReference(recorder.scheme, object) + if err != nil { + glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) + return + } + + if !validateEventType(eventtype) { + glog.Errorf("Unsupported event type: '%v'", eventtype) + return + } + + event := recorder.makeEvent(ref, eventtype, reason, message) + event.Source = recorder.source + + go func() { + // NOTE: events should be a non-blocking operation + defer utilruntime.HandleCrash() + recorder.Action(watch.Added, event) + }() +} + +func validateEventType(eventtype string) bool { + switch eventtype { + case v1.EventTypeNormal, v1.EventTypeWarning: + return true + } + return false +} + +func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) { + recorder.generateEvent(object, metav1.Now(), eventtype, reason, message) +} + +func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.generateEvent(object, timestamp, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, eventtype, reason, message string) *v1.Event { + t := metav1.Time{Time: recorder.clock.Now()} + namespace := ref.Namespace + if namespace == "" { + namespace = metav1.NamespaceDefault + } + return &v1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), + Namespace: namespace, + }, + InvolvedObject: *ref, + Reason: reason, + Message: message, + FirstTimestamp: t, + LastTimestamp: t, + Count: 1, + Type: eventtype, + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/record/events_cache.go b/vendor/k8s.io/client-go/tools/record/events_cache.go similarity index 88% rename from vendor/k8s.io/kubernetes/pkg/client/record/events_cache.go rename to vendor/k8s.io/client-go/tools/record/events_cache.go index 8ff65776cb..d5246df5b6 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/record/events_cache.go +++ b/vendor/k8s.io/client-go/tools/record/events_cache.go @@ -25,11 +25,11 @@ import ( "github.com/golang/groupcache/lru" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/util/clock" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/util/strategicpatch" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/util/clock" ) const ( @@ -42,7 +42,7 @@ const ( ) // getEventKey builds unique event key based on source, involvedObject, reason, message -func getEventKey(event *api.Event) string { +func getEventKey(event *v1.Event) string { return strings.Join([]string{ event.Source.Component, event.Source.Host, @@ -59,10 +59,10 @@ func getEventKey(event *api.Event) string { } // EventFilterFunc is a function that returns true if the event should be skipped -type EventFilterFunc func(event *api.Event) bool +type EventFilterFunc func(event *v1.Event) bool // DefaultEventFilterFunc returns false for all incoming events -func DefaultEventFilterFunc(event *api.Event) bool { +func DefaultEventFilterFunc(event *v1.Event) bool { return false } @@ -70,10 +70,10 @@ func DefaultEventFilterFunc(event *api.Event) bool { // It returns a tuple of the following: // aggregateKey - key the identifies the aggregate group to bucket this event // localKey - key that makes this event in the local group -type EventAggregatorKeyFunc func(event *api.Event) (aggregateKey string, localKey string) +type EventAggregatorKeyFunc func(event *v1.Event) (aggregateKey string, localKey string) // EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, event.Type and event.Reason -func EventAggregatorByReasonFunc(event *api.Event) (string, string) { +func EventAggregatorByReasonFunc(event *v1.Event) (string, string) { return strings.Join([]string{ event.Source.Component, event.Source.Host, @@ -89,10 +89,10 @@ func EventAggregatorByReasonFunc(event *api.Event) (string, string) { } // EventAggregatorMessageFunc is responsible for producing an aggregation message -type EventAggregatorMessageFunc func(event *api.Event) string +type EventAggregatorMessageFunc func(event *v1.Event) string // EventAggregratorByReasonMessageFunc returns an aggregate message by prefixing the incoming message -func EventAggregatorByReasonMessageFunc(event *api.Event) string { +func EventAggregatorByReasonMessageFunc(event *v1.Event) string { return "(events with common reason combined)" } @@ -138,13 +138,13 @@ type aggregateRecord struct { // if the size of this set exceeds the max, we know we need to aggregate localKeys sets.String // The last time at which the aggregate was recorded - lastTimestamp unversioned.Time + lastTimestamp metav1.Time } // EventAggregate identifies similar events and groups into a common event if required -func (e *EventAggregator) EventAggregate(newEvent *api.Event) (*api.Event, error) { +func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, error) { aggregateKey, localKey := e.keyFunc(newEvent) - now := unversioned.NewTime(e.clock.Now()) + now := metav1.NewTime(e.clock.Now()) record := aggregateRecord{localKeys: sets.NewString(), lastTimestamp: now} e.Lock() defer e.Unlock() @@ -171,8 +171,8 @@ func (e *EventAggregator) EventAggregate(newEvent *api.Event) (*api.Event, error record.localKeys.PopAny() // create a new aggregate event - eventCopy := &api.Event{ - ObjectMeta: api.ObjectMeta{ + eventCopy := &v1.Event{ + ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%v.%x", newEvent.InvolvedObject.Name, now.UnixNano()), Namespace: newEvent.Namespace, }, @@ -194,7 +194,7 @@ type eventLog struct { count int // The time at which the event was first recorded. - firstTimestamp unversioned.Time + firstTimestamp metav1.Time // The unique name of the first occurrence of this event name string @@ -216,7 +216,7 @@ func newEventLogger(lruCacheEntries int, clock clock.Clock) *eventLogger { } // eventObserve records the event, and determines if its frequency should update -func (e *eventLogger) eventObserve(newEvent *api.Event) (*api.Event, []byte, error) { +func (e *eventLogger) eventObserve(newEvent *v1.Event) (*v1.Event, []byte, error) { var ( patch []byte err error @@ -240,11 +240,11 @@ func (e *eventLogger) eventObserve(newEvent *api.Event) (*api.Event, []byte, err eventCopy2 := *event eventCopy2.Count = 0 - eventCopy2.LastTimestamp = unversioned.NewTime(time.Unix(0, 0)) + eventCopy2.LastTimestamp = metav1.NewTime(time.Unix(0, 0)) newData, _ := json.Marshal(event) oldData, _ := json.Marshal(eventCopy2) - patch, err = strategicpatch.CreateStrategicMergePatch(oldData, newData, event) + patch, err = strategicpatch.CreateTwoWayMergePatch(oldData, newData, event) } // record our new observation @@ -261,7 +261,7 @@ func (e *eventLogger) eventObserve(newEvent *api.Event) (*api.Event, []byte, err } // updateState updates its internal tracking information based on latest server state -func (e *eventLogger) updateState(event *api.Event) { +func (e *eventLogger) updateState(event *v1.Event) { key := getEventKey(event) e.Lock() defer e.Unlock() @@ -305,7 +305,7 @@ type EventCorrelator struct { // EventCorrelateResult is the result of a Correlate type EventCorrelateResult struct { // the event after correlation - Event *api.Event + Event *v1.Event // if provided, perform a strategic patch when updating the record on the server Patch []byte // if true, do no further processing of the event @@ -342,7 +342,7 @@ func NewEventCorrelator(clock clock.Clock) *EventCorrelator { } // EventCorrelate filters, aggregates, counts, and de-duplicates all incoming events -func (c *EventCorrelator) EventCorrelate(newEvent *api.Event) (*EventCorrelateResult, error) { +func (c *EventCorrelator) EventCorrelate(newEvent *v1.Event) (*EventCorrelateResult, error) { if c.filterFunc(newEvent) { return &EventCorrelateResult{Skip: true}, nil } @@ -355,6 +355,6 @@ func (c *EventCorrelator) EventCorrelate(newEvent *api.Event) (*EventCorrelateRe } // UpdateState based on the latest observed state from server -func (c *EventCorrelator) UpdateState(event *api.Event) { +func (c *EventCorrelator) UpdateState(event *v1.Event) { c.logger.updateState(event) } diff --git a/vendor/k8s.io/client-go/tools/record/fake.go b/vendor/k8s.io/client-go/tools/record/fake.go new file mode 100644 index 0000000000..c0e8eedbb7 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/fake.go @@ -0,0 +1,54 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// FakeRecorder is used as a fake during tests. It is thread safe. It is usable +// when created manually and not by NewFakeRecorder, however all events may be +// thrown away in this case. +type FakeRecorder struct { + Events chan string +} + +func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { + if f.Events != nil { + f.Events <- fmt.Sprintf("%s %s %s", eventtype, reason, message) + } +} + +func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + if f.Events != nil { + f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + } +} + +func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) { +} + +// NewFakeRecorder creates new fake event recorder with event channel with +// buffer of given size. +func NewFakeRecorder(bufferSize int) *FakeRecorder { + return &FakeRecorder{ + Events: make(chan string, bufferSize), + } +} diff --git a/vendor/k8s.io/client-go/transport/OWNERS b/vendor/k8s.io/client-go/transport/OWNERS new file mode 100755 index 0000000000..bf0ba5b9f9 --- /dev/null +++ b/vendor/k8s.io/client-go/transport/OWNERS @@ -0,0 +1,7 @@ +reviewers: +- smarterclayton +- wojtek-t +- deads2k +- liggitt +- krousey +- caesarxuchao diff --git a/vendor/k8s.io/kubernetes/pkg/client/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/client/transport/cache.go rename to vendor/k8s.io/client-go/transport/cache.go index eedfd3d78b..8d76def345 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/transport/cache.go +++ b/vendor/k8s.io/client-go/transport/cache.go @@ -23,7 +23,7 @@ import ( "sync" "time" - utilnet "k8s.io/kubernetes/pkg/util/net" + utilnet "k8s.io/apimachinery/pkg/util/net" ) // TlsTransportCache caches TLS http.RoundTrippers different configurations. The diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go new file mode 100644 index 0000000000..820594ba35 --- /dev/null +++ b/vendor/k8s.io/client-go/transport/config.go @@ -0,0 +1,95 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import "net/http" + +// Config holds various options for establishing a transport. +type Config struct { + // UserAgent is an optional field that specifies the caller of this + // request. + UserAgent string + + // The base TLS configuration for this transport. + TLS TLSConfig + + // Username and password for basic authentication + Username string + Password string + + // Bearer token for authentication + BearerToken string + + // Impersonate is the config that this Config will impersonate using + Impersonate ImpersonationConfig + + // Transport may be used for custom HTTP behavior. This attribute may + // not be specified with the TLS client certificate options. Use + // WrapTransport for most client level operations. + Transport http.RoundTripper + + // WrapTransport will be invoked for custom HTTP behavior after the + // underlying transport is initialized (either the transport created + // from TLSClientConfig, Transport, or http.DefaultTransport). The + // config may layer other RoundTrippers on top of the returned + // RoundTripper. + WrapTransport func(rt http.RoundTripper) http.RoundTripper +} + +// ImpersonationConfig has all the available impersonation options +type ImpersonationConfig struct { + // UserName matches user.Info.GetName() + UserName string + // Groups matches user.Info.GetGroups() + Groups []string + // Extra matches user.Info.GetExtra() + Extra map[string][]string +} + +// HasCA returns whether the configuration has a certificate authority or not. +func (c *Config) HasCA() bool { + return len(c.TLS.CAData) > 0 || len(c.TLS.CAFile) > 0 +} + +// HasBasicAuth returns whether the configuration has basic authentication or not. +func (c *Config) HasBasicAuth() bool { + return len(c.Username) != 0 +} + +// HasTokenAuth returns whether the configuration has token authentication or not. +func (c *Config) HasTokenAuth() bool { + return len(c.BearerToken) != 0 +} + +// HasCertAuth returns whether the configuration has certificate authentication or not. +func (c *Config) HasCertAuth() bool { + return len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0 +} + +// TLSConfig holds the information needed to set up a TLS transport. +type TLSConfig struct { + CAFile string // Path of the PEM-encoded server trusted root certificates. + CertFile string // Path of the PEM-encoded client certificate. + KeyFile string // Path of the PEM-encoded client key. + + Insecure bool // Server should be accessed without verifying the certificate. For testing only. + ServerName string // Override for the server name passed to the server for SNI and used to verify certificates. + + CAData []byte // Bytes of the PEM-encoded server trusted root certificates. Supercedes CAFile. + CertData []byte // Bytes of the PEM-encoded client certificate. Supercedes CertFile. + KeyData []byte // Bytes of the PEM-encoded client key. Supercedes KeyFile. +} diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go new file mode 100644 index 0000000000..a6f396fbb0 --- /dev/null +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -0,0 +1,436 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "fmt" + "net/http" + "strings" + "time" + + "github.com/golang/glog" +) + +// HTTPWrappersForConfig wraps a round tripper with any relevant layered +// behavior from the config. Exposed to allow more clients that need HTTP-like +// behavior but then must hijack the underlying connection (like WebSocket or +// HTTP2 clients). Pure HTTP clients should use the RoundTripper returned from +// New. +func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) { + if config.WrapTransport != nil { + rt = config.WrapTransport(rt) + } + + rt = DebugWrappers(rt) + + // Set authentication wrappers + switch { + case config.HasBasicAuth() && config.HasTokenAuth(): + return nil, fmt.Errorf("username/password or bearer token may be set, but not both") + case config.HasTokenAuth(): + rt = NewBearerAuthRoundTripper(config.BearerToken, rt) + case config.HasBasicAuth(): + rt = NewBasicAuthRoundTripper(config.Username, config.Password, rt) + } + if len(config.UserAgent) > 0 { + rt = NewUserAgentRoundTripper(config.UserAgent, rt) + } + if len(config.Impersonate.UserName) > 0 || + len(config.Impersonate.Groups) > 0 || + len(config.Impersonate.Extra) > 0 { + rt = NewImpersonatingRoundTripper(config.Impersonate, rt) + } + return rt, nil +} + +// DebugWrappers wraps a round tripper and logs based on the current log level. +func DebugWrappers(rt http.RoundTripper) http.RoundTripper { + switch { + case bool(glog.V(9)): + rt = newDebuggingRoundTripper(rt, debugCurlCommand, debugURLTiming, debugResponseHeaders) + case bool(glog.V(8)): + rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus, debugResponseHeaders) + case bool(glog.V(7)): + rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus) + case bool(glog.V(6)): + rt = newDebuggingRoundTripper(rt, debugURLTiming) + } + + return rt +} + +type requestCanceler interface { + CancelRequest(*http.Request) +} + +type authProxyRoundTripper struct { + username string + groups []string + extra map[string][]string + + rt http.RoundTripper +} + +// NewAuthProxyRoundTripper provides a roundtripper which will add auth proxy fields to requests for +// authentication terminating proxy cases +// assuming you pull the user from the context: +// username is the user.Info.GetName() of the user +// groups is the user.Info.GetGroups() of the user +// extra is the user.Info.GetExtra() of the user +// extra can contain any additional information that the authenticator +// thought was interesting, for example authorization scopes. +// In order to faithfully round-trip through an impersonation flow, these keys +// MUST be lowercase. +func NewAuthProxyRoundTripper(username string, groups []string, extra map[string][]string, rt http.RoundTripper) http.RoundTripper { + return &authProxyRoundTripper{ + username: username, + groups: groups, + extra: extra, + rt: rt, + } +} + +func (rt *authProxyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + SetAuthProxyHeaders(req, rt.username, rt.groups, rt.extra) + + return rt.rt.RoundTrip(req) +} + +// SetAuthProxyHeaders stomps the auth proxy header fields. It mutates its argument. +func SetAuthProxyHeaders(req *http.Request, username string, groups []string, extra map[string][]string) { + req.Header.Del("X-Remote-User") + req.Header.Del("X-Remote-Group") + for key := range req.Header { + if strings.HasPrefix(strings.ToLower(key), strings.ToLower("X-Remote-Extra-")) { + req.Header.Del(key) + } + } + + req.Header.Set("X-Remote-User", username) + for _, group := range groups { + req.Header.Add("X-Remote-Group", group) + } + for key, values := range extra { + for _, value := range values { + req.Header.Add("X-Remote-Extra-"+key, value) + } + } +} + +func (rt *authProxyRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.rt.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *authProxyRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } + +type userAgentRoundTripper struct { + agent string + rt http.RoundTripper +} + +func NewUserAgentRoundTripper(agent string, rt http.RoundTripper) http.RoundTripper { + return &userAgentRoundTripper{agent, rt} +} + +func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("User-Agent")) != 0 { + return rt.rt.RoundTrip(req) + } + req = cloneRequest(req) + req.Header.Set("User-Agent", rt.agent) + return rt.rt.RoundTrip(req) +} + +func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.rt.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *userAgentRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } + +type basicAuthRoundTripper struct { + username string + password string + rt http.RoundTripper +} + +// NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a +// request unless it has already been set. +func NewBasicAuthRoundTripper(username, password string, rt http.RoundTripper) http.RoundTripper { + return &basicAuthRoundTripper{username, password, rt} +} + +func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("Authorization")) != 0 { + return rt.rt.RoundTrip(req) + } + req = cloneRequest(req) + req.SetBasicAuth(rt.username, rt.password) + return rt.rt.RoundTrip(req) +} + +func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.rt.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *basicAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } + +// These correspond to the headers used in pkg/apis/authentication. We don't want the package dependency, +// but you must not change the values. +const ( + // ImpersonateUserHeader is used to impersonate a particular user during an API server request + ImpersonateUserHeader = "Impersonate-User" + + // ImpersonateGroupHeader is used to impersonate a particular group during an API server request. + // It can be repeated multiplied times for multiple groups. + ImpersonateGroupHeader = "Impersonate-Group" + + // ImpersonateUserExtraHeaderPrefix is a prefix for a header used to impersonate an entry in the + // extra map[string][]string for user.Info. The key for the `extra` map is suffix. + // The same key can be repeated multiple times to have multiple elements in the slice under a single key. + // For instance: + // Impersonate-Extra-Foo: one + // Impersonate-Extra-Foo: two + // results in extra["Foo"] = []string{"one", "two"} + ImpersonateUserExtraHeaderPrefix = "Impersonate-Extra-" +) + +type impersonatingRoundTripper struct { + impersonate ImpersonationConfig + delegate http.RoundTripper +} + +// NewImpersonatingRoundTripper will add an Act-As header to a request unless it has already been set. +func NewImpersonatingRoundTripper(impersonate ImpersonationConfig, delegate http.RoundTripper) http.RoundTripper { + return &impersonatingRoundTripper{impersonate, delegate} +} + +func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + // use the user header as marker for the rest. + if len(req.Header.Get(ImpersonateUserHeader)) != 0 { + return rt.delegate.RoundTrip(req) + } + req = cloneRequest(req) + req.Header.Set(ImpersonateUserHeader, rt.impersonate.UserName) + + for _, group := range rt.impersonate.Groups { + req.Header.Add(ImpersonateGroupHeader, group) + } + for k, vv := range rt.impersonate.Extra { + for _, v := range vv { + req.Header.Add(ImpersonateUserExtraHeaderPrefix+k, v) + } + } + + return rt.delegate.RoundTrip(req) +} + +func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.delegate.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.delegate } + +type bearerAuthRoundTripper struct { + bearer string + rt http.RoundTripper +} + +// NewBearerAuthRoundTripper adds the provided bearer token to a request +// unless the authorization header has already been set. +func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper { + return &bearerAuthRoundTripper{bearer, rt} +} + +func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("Authorization")) != 0 { + return rt.rt.RoundTrip(req) + } + + req = cloneRequest(req) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rt.bearer)) + return rt.rt.RoundTrip(req) +} + +func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.rt.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *bearerAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +// requestInfo keeps track of information about a request/response combination +type requestInfo struct { + RequestHeaders http.Header + RequestVerb string + RequestURL string + + ResponseStatus string + ResponseHeaders http.Header + ResponseErr error + + Duration time.Duration +} + +// newRequestInfo creates a new RequestInfo based on an http request +func newRequestInfo(req *http.Request) *requestInfo { + return &requestInfo{ + RequestURL: req.URL.String(), + RequestVerb: req.Method, + RequestHeaders: req.Header, + } +} + +// complete adds information about the response to the requestInfo +func (r *requestInfo) complete(response *http.Response, err error) { + if err != nil { + r.ResponseErr = err + return + } + r.ResponseStatus = response.Status + r.ResponseHeaders = response.Header +} + +// toCurl returns a string that can be run as a command in a terminal (minus the body) +func (r *requestInfo) toCurl() string { + headers := "" + for key, values := range r.RequestHeaders { + for _, value := range values { + headers += fmt.Sprintf(` -H %q`, fmt.Sprintf("%s: %s", key, value)) + } + } + + return fmt.Sprintf("curl -k -v -X%s %s %s", r.RequestVerb, headers, r.RequestURL) +} + +// debuggingRoundTripper will display information about the requests passing +// through it based on what is configured +type debuggingRoundTripper struct { + delegatedRoundTripper http.RoundTripper + + levels map[debugLevel]bool +} + +type debugLevel int + +const ( + debugJustURL debugLevel = iota + debugURLTiming + debugCurlCommand + debugRequestHeaders + debugResponseStatus + debugResponseHeaders +) + +func newDebuggingRoundTripper(rt http.RoundTripper, levels ...debugLevel) *debuggingRoundTripper { + drt := &debuggingRoundTripper{ + delegatedRoundTripper: rt, + levels: make(map[debugLevel]bool, len(levels)), + } + for _, v := range levels { + drt.levels[v] = true + } + return drt +} + +func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + reqInfo := newRequestInfo(req) + + if rt.levels[debugJustURL] { + glog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL) + } + if rt.levels[debugCurlCommand] { + glog.Infof("%s", reqInfo.toCurl()) + + } + if rt.levels[debugRequestHeaders] { + glog.Infof("Request Headers:") + for key, values := range reqInfo.RequestHeaders { + for _, value := range values { + glog.Infof(" %s: %s", key, value) + } + } + } + + startTime := time.Now() + response, err := rt.delegatedRoundTripper.RoundTrip(req) + reqInfo.Duration = time.Since(startTime) + + reqInfo.complete(response, err) + + if rt.levels[debugURLTiming] { + glog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + } + if rt.levels[debugResponseStatus] { + glog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + } + if rt.levels[debugResponseHeaders] { + glog.Infof("Response Headers:") + for key, values := range reqInfo.ResponseHeaders { + for _, value := range values { + glog.Infof(" %s: %s", key, value) + } + } + } + + return response, err +} + +func (rt *debuggingRoundTripper) WrappedRoundTripper() http.RoundTripper { + return rt.delegatedRoundTripper +} diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go new file mode 100644 index 0000000000..15be0a3e6b --- /dev/null +++ b/vendor/k8s.io/client-go/transport/transport.go @@ -0,0 +1,141 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" +) + +// New returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. +func New(config *Config) (http.RoundTripper, error) { + // Set transport level security + if config.Transport != nil && (config.HasCA() || config.HasCertAuth() || config.TLS.Insecure) { + return nil, fmt.Errorf("using a custom transport with TLS certificate options or the insecure flag is not allowed") + } + + var ( + rt http.RoundTripper + err error + ) + + if config.Transport != nil { + rt = config.Transport + } else { + rt, err = tlsCache.get(config) + if err != nil { + return nil, err + } + } + + return HTTPWrappersForConfig(config, rt) +} + +// TLSConfigFor returns a tls.Config that will provide the transport level security defined +// by the provided Config. Will return nil if no transport level security is requested. +func TLSConfigFor(c *Config) (*tls.Config, error) { + if !(c.HasCA() || c.HasCertAuth() || c.TLS.Insecure) { + return nil, nil + } + if c.HasCA() && c.TLS.Insecure { + return nil, fmt.Errorf("specifying a root certificates file with the insecure flag is not allowed") + } + if err := loadTLSFiles(c); err != nil { + return nil, err + } + + tlsConfig := &tls.Config{ + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + MinVersion: tls.VersionTLS12, + InsecureSkipVerify: c.TLS.Insecure, + ServerName: c.TLS.ServerName, + } + + if c.HasCA() { + tlsConfig.RootCAs = rootCertPool(c.TLS.CAData) + } + + if c.HasCertAuth() { + cert, err := tls.X509KeyPair(c.TLS.CertData, c.TLS.KeyData) + if err != nil { + return nil, err + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + return tlsConfig, nil +} + +// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, +// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are +// either populated or were empty to start. +func loadTLSFiles(c *Config) error { + var err error + c.TLS.CAData, err = dataFromSliceOrFile(c.TLS.CAData, c.TLS.CAFile) + if err != nil { + return err + } + + c.TLS.CertData, err = dataFromSliceOrFile(c.TLS.CertData, c.TLS.CertFile) + if err != nil { + return err + } + + c.TLS.KeyData, err = dataFromSliceOrFile(c.TLS.KeyData, c.TLS.KeyFile) + if err != nil { + return err + } + return nil +} + +// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, +// or an error if an error occurred reading the file +func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { + if len(data) > 0 { + return data, nil + } + if len(file) > 0 { + fileData, err := ioutil.ReadFile(file) + if err != nil { + return []byte{}, err + } + return fileData, nil + } + return nil, nil +} + +// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs". +// When caData is not empty, it will be the ONLY information used in the CertPool. +func rootCertPool(caData []byte) *x509.CertPool { + // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go + // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values + // It doesn't allow trusting either/or, but hopefully that won't be an issue + if len(caData) == 0 { + return nil + } + + // if we have caData, use it + certPool := x509.NewCertPool() + certPool.AppendCertsFromPEM(caData) + return certPool +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/util/cert/cert.go rename to vendor/k8s.io/client-go/util/cert/cert.go index fff5b38d63..6854d4152f 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/cert/cert.go +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -25,6 +25,7 @@ import ( "crypto/x509" "crypto/x509/pkix" "encoding/pem" + "errors" "fmt" "math" "math/big" @@ -42,6 +43,7 @@ type Config struct { CommonName string Organization []string AltNames AltNames + Usages []x509.ExtKeyUsage } // AltNames contains the domain names and IP addresses that will be added @@ -86,11 +88,17 @@ func NewSignedCert(cfg Config, key *rsa.PrivateKey, caCert *x509.Certificate, ca if err != nil { return nil, err } + if len(cfg.CommonName) == 0 { + return nil, errors.New("must specify a CommonName") + } + if len(cfg.Usages) == 0 { + return nil, errors.New("must specify at least one ExtKeyUsage") + } certTmpl := x509.Certificate{ Subject: pkix.Name{ CommonName: cfg.CommonName, - Organization: caCert.Subject.Organization, + Organization: cfg.Organization, }, DNSNames: cfg.AltNames.DNSNames, IPAddresses: cfg.AltNames.IPs, @@ -98,7 +106,7 @@ func NewSignedCert(cfg Config, key *rsa.PrivateKey, caCert *x509.Certificate, ca NotBefore: caCert.NotBefore, NotAfter: time.Now().Add(duration365d).UTC(), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + ExtKeyUsage: cfg.Usages, } certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey) if err != nil { @@ -120,7 +128,7 @@ func MakeEllipticPrivateKeyPEM() ([]byte, error) { } privateKeyPemBlock := &pem.Block{ - Type: "EC PRIVATE KEY", + Type: ECPrivateKeyBlockType, Bytes: derBytes, } return pem.EncodeToMemory(privateKeyPemBlock), nil @@ -165,13 +173,13 @@ func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS // Generate cert certBuffer := bytes.Buffer{} - if err := pem.Encode(&certBuffer, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { + if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: derBytes}); err != nil { return nil, nil, err } // Generate key keyBuffer := bytes.Buffer{} - if err := pem.Encode(&keyBuffer, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { + if err := pem.Encode(&keyBuffer, &pem.Block{Type: RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { return nil, nil, err } diff --git a/vendor/k8s.io/client-go/util/cert/csr.go b/vendor/k8s.io/client-go/util/cert/csr.go new file mode 100644 index 0000000000..280d249fea --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/csr.go @@ -0,0 +1,75 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cert + +import ( + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "net" +) + +// MakeCSR generates a PEM-encoded CSR using the supplied private key, subject, and SANs. +// All key types that are implemented via crypto.Signer are supported (This includes *rsa.PrivateKey and *ecdsa.PrivateKey.) +func MakeCSR(privateKey interface{}, subject *pkix.Name, dnsSANs []string, ipSANs []net.IP) (csr []byte, err error) { + template := &x509.CertificateRequest{ + Subject: *subject, + DNSNames: dnsSANs, + IPAddresses: ipSANs, + } + + return MakeCSRFromTemplate(privateKey, template) +} + +// MakeCSRFromTemplate generates a PEM-encoded CSR using the supplied private +// key and certificate request as a template. All key types that are +// implemented via crypto.Signer are supported (This includes *rsa.PrivateKey +// and *ecdsa.PrivateKey.) +func MakeCSRFromTemplate(privateKey interface{}, template *x509.CertificateRequest) ([]byte, error) { + t := *template + t.SignatureAlgorithm = sigType(privateKey) + + csrDER, err := x509.CreateCertificateRequest(cryptorand.Reader, &t, privateKey) + if err != nil { + return nil, err + } + + csrPemBlock := &pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csrDER, + } + + return pem.EncodeToMemory(csrPemBlock), nil +} + +func sigType(privateKey interface{}) x509.SignatureAlgorithm { + // Customize the signature for RSA keys, depending on the key size + if privateKey, ok := privateKey.(*rsa.PrivateKey); ok { + keySize := privateKey.N.BitLen() + switch { + case keySize >= 4096: + return x509.SHA512WithRSA + case keySize >= 3072: + return x509.SHA384WithRSA + default: + return x509.SHA256WithRSA + } + } + return x509.UnknownSignatureAlgorithm +} diff --git a/vendor/k8s.io/client-go/util/cert/io.go b/vendor/k8s.io/client-go/util/cert/io.go new file mode 100644 index 0000000000..b6b6903835 --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/io.go @@ -0,0 +1,150 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cert + +import ( + "crypto/x509" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// CanReadCertAndKey returns true if the certificate and key files already exists, +// otherwise returns false. If lost one of cert and key, returns error. +func CanReadCertAndKey(certPath, keyPath string) (bool, error) { + certReadable := canReadFile(certPath) + keyReadable := canReadFile(keyPath) + + if certReadable == false && keyReadable == false { + return false, nil + } + + if certReadable == false { + return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", certPath) + } + + if keyReadable == false { + return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", keyPath) + } + + return true, nil +} + +// If the file represented by path exists and +// readable, returns true otherwise returns false. +func canReadFile(path string) bool { + f, err := os.Open(path) + if err != nil { + return false + } + + defer f.Close() + + return true +} + +// WriteCert writes the pem-encoded certificate data to certPath. +// The certificate file will be created with file mode 0644. +// If the certificate file already exists, it will be overwritten. +// The parent directory of the certPath will be created as needed with file mode 0755. +func WriteCert(certPath string, data []byte) error { + if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil { + return err + } + if err := ioutil.WriteFile(certPath, data, os.FileMode(0644)); err != nil { + return err + } + return nil +} + +// WriteKey writes the pem-encoded key data to keyPath. +// The key file will be created with file mode 0600. +// If the key file already exists, it will be overwritten. +// The parent directory of the keyPath will be created as needed with file mode 0755. +func WriteKey(keyPath string, data []byte) error { + if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { + return err + } + if err := ioutil.WriteFile(keyPath, data, os.FileMode(0600)); err != nil { + return err + } + return nil +} + +// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it +// can't find one, it will generate a new key and store it there. +func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) { + loadedData, err := ioutil.ReadFile(keyPath) + if err == nil { + return loadedData, false, err + } + if !os.IsNotExist(err) { + return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err) + } + + generatedData, err := MakeEllipticPrivateKeyPEM() + if err != nil { + return nil, false, fmt.Errorf("error generating key: %v", err) + } + if err := WriteKey(keyPath, generatedData); err != nil { + return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err) + } + return generatedData, true, nil +} + +// NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file. +// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates +func NewPool(filename string) (*x509.CertPool, error) { + certs, err := CertsFromFile(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} + +// CertsFromFile returns the x509.Certificates contained in the given PEM-encoded file. +// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates +func CertsFromFile(file string) ([]*x509.Certificate, error) { + pemBlock, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + certs, err := ParseCertsPEM(pemBlock) + if err != nil { + return nil, fmt.Errorf("error reading %s: %s", file, err) + } + return certs, nil +} + +// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file. +// Returns an error if the file could not be read or if the private key could not be parsed. +func PrivateKeyFromFile(file string) (interface{}, error) { + pemBlock, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + key, err := ParsePrivateKeyPEM(pemBlock) + if err != nil { + return nil, fmt.Errorf("error reading %s: %v", file, err) + } + return key, nil +} diff --git a/vendor/k8s.io/client-go/util/cert/pem.go b/vendor/k8s.io/client-go/util/cert/pem.go new file mode 100644 index 0000000000..899845857c --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/pem.go @@ -0,0 +1,138 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cert + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" +) + +const ( + // ECPrivateKeyBlockType is a possible value for pem.Block.Type. + ECPrivateKeyBlockType = "EC PRIVATE KEY" + // RSAPrivateKeyBlockType is a possible value for pem.Block.Type. + RSAPrivateKeyBlockType = "RSA PRIVATE KEY" + // CertificateBlockType is a possible value for pem.Block.Type. + CertificateBlockType = "CERTIFICATE" + // CertificateRequestBlockType is a possible value for pem.Block.Type. + CertificateRequestBlockType = "CERTIFICATE REQUEST" + // PrivateKeyBlockType is a possible value for pem.Block.Type. + PrivateKeyBlockType = "PRIVATE KEY" + // PublicKeyBlockType is a possible value for pem.Block.Type. + PublicKeyBlockType = "PUBLIC KEY" +) + +// EncodePublicKeyPEM returns PEM-endcode public data +func EncodePublicKeyPEM(key *rsa.PublicKey) ([]byte, error) { + der, err := x509.MarshalPKIXPublicKey(key) + if err != nil { + return []byte{}, err + } + block := pem.Block{ + Type: PublicKeyBlockType, + Bytes: der, + } + return pem.EncodeToMemory(&block), nil +} + +// EncodePrivateKeyPEM returns PEM-encoded private key data +func EncodePrivateKeyPEM(key *rsa.PrivateKey) []byte { + block := pem.Block{ + Type: RSAPrivateKeyBlockType, + Bytes: x509.MarshalPKCS1PrivateKey(key), + } + return pem.EncodeToMemory(&block) +} + +// EncodeCertPEM returns PEM-endcoded certificate data +func EncodeCertPEM(cert *x509.Certificate) []byte { + block := pem.Block{ + Type: CertificateBlockType, + Bytes: cert.Raw, + } + return pem.EncodeToMemory(&block) +} + +// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data. +// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY" +func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) { + var privateKeyPemBlock *pem.Block + for { + privateKeyPemBlock, keyData = pem.Decode(keyData) + if privateKeyPemBlock == nil { + break + } + + switch privateKeyPemBlock.Type { + case ECPrivateKeyBlockType: + // ECDSA Private Key in ASN.1 format + if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + case RSAPrivateKeyBlockType: + // RSA Private Key in PKCS#1 format + if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + case PrivateKeyBlockType: + // RSA or ECDSA Private Key in unencrypted PKCS#8 format + if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + } + + // tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks + // originally, only the first PEM block was parsed and expected to be a key block + } + + // we read all the PEM blocks and didn't recognize one + return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key") +} + +// ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array +// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates +func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { + ok := false + certs := []*x509.Certificate{} + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + // Only use PEM "CERTIFICATE" blocks without extra headers + if block.Type != CertificateBlockType || len(block.Headers) != 0 { + continue + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return certs, err + } + + certs = append(certs, cert) + ok = true + } + + if !ok { + return certs, errors.New("could not read any certificates") + } + return certs, nil +} diff --git a/vendor/k8s.io/client-go/util/clock/clock.go b/vendor/k8s.io/client-go/util/clock/clock.go new file mode 100644 index 0000000000..c303a212a0 --- /dev/null +++ b/vendor/k8s.io/client-go/util/clock/clock.go @@ -0,0 +1,327 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clock + +import ( + "sync" + "time" +) + +// Clock allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type Clock interface { + Now() time.Time + Since(time.Time) time.Duration + After(d time.Duration) <-chan time.Time + NewTimer(d time.Duration) Timer + Sleep(d time.Duration) + Tick(d time.Duration) <-chan time.Time +} + +var ( + _ = Clock(RealClock{}) + _ = Clock(&FakeClock{}) + _ = Clock(&IntervalClock{}) +) + +// RealClock really calls time.Now() +type RealClock struct{} + +// Now returns the current time. +func (RealClock) Now() time.Time { + return time.Now() +} + +// Since returns time since the specified timestamp. +func (RealClock) Since(ts time.Time) time.Duration { + return time.Since(ts) +} + +// Same as time.After(d). +func (RealClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +func (RealClock) NewTimer(d time.Duration) Timer { + return &realTimer{ + timer: time.NewTimer(d), + } +} + +func (RealClock) Tick(d time.Duration) <-chan time.Time { + return time.Tick(d) +} + +func (RealClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +// FakeClock implements Clock, but returns an arbitrary time. +type FakeClock struct { + lock sync.RWMutex + time time.Time + + // waiters are waiting for the fake time to pass their specified time + waiters []fakeClockWaiter +} + +type fakeClockWaiter struct { + targetTime time.Time + stepInterval time.Duration + skipIfBlocked bool + destChan chan time.Time + fired bool +} + +func NewFakeClock(t time.Time) *FakeClock { + return &FakeClock{ + time: t, + } +} + +// Now returns f's time. +func (f *FakeClock) Now() time.Time { + f.lock.RLock() + defer f.lock.RUnlock() + return f.time +} + +// Since returns time since the time in f. +func (f *FakeClock) Since(ts time.Time) time.Duration { + f.lock.RLock() + defer f.lock.RUnlock() + return f.time.Sub(ts) +} + +// Fake version of time.After(d). +func (f *FakeClock) After(d time.Duration) <-chan time.Time { + f.lock.Lock() + defer f.lock.Unlock() + stopTime := f.time.Add(d) + ch := make(chan time.Time, 1) // Don't block! + f.waiters = append(f.waiters, fakeClockWaiter{ + targetTime: stopTime, + destChan: ch, + }) + return ch +} + +// Fake version of time.NewTimer(d). +func (f *FakeClock) NewTimer(d time.Duration) Timer { + f.lock.Lock() + defer f.lock.Unlock() + stopTime := f.time.Add(d) + ch := make(chan time.Time, 1) // Don't block! + timer := &fakeTimer{ + fakeClock: f, + waiter: fakeClockWaiter{ + targetTime: stopTime, + destChan: ch, + }, + } + f.waiters = append(f.waiters, timer.waiter) + return timer +} + +func (f *FakeClock) Tick(d time.Duration) <-chan time.Time { + f.lock.Lock() + defer f.lock.Unlock() + tickTime := f.time.Add(d) + ch := make(chan time.Time, 1) // hold one tick + f.waiters = append(f.waiters, fakeClockWaiter{ + targetTime: tickTime, + stepInterval: d, + skipIfBlocked: true, + destChan: ch, + }) + + return ch +} + +// Move clock by Duration, notify anyone that's called After, Tick, or NewTimer +func (f *FakeClock) Step(d time.Duration) { + f.lock.Lock() + defer f.lock.Unlock() + f.setTimeLocked(f.time.Add(d)) +} + +// Sets the time. +func (f *FakeClock) SetTime(t time.Time) { + f.lock.Lock() + defer f.lock.Unlock() + f.setTimeLocked(t) +} + +// Actually changes the time and checks any waiters. f must be write-locked. +func (f *FakeClock) setTimeLocked(t time.Time) { + f.time = t + newWaiters := make([]fakeClockWaiter, 0, len(f.waiters)) + for i := range f.waiters { + w := &f.waiters[i] + if !w.targetTime.After(t) { + + if w.skipIfBlocked { + select { + case w.destChan <- t: + w.fired = true + default: + } + } else { + w.destChan <- t + w.fired = true + } + + if w.stepInterval > 0 { + for !w.targetTime.After(t) { + w.targetTime = w.targetTime.Add(w.stepInterval) + } + newWaiters = append(newWaiters, *w) + } + + } else { + newWaiters = append(newWaiters, f.waiters[i]) + } + } + f.waiters = newWaiters +} + +// Returns true if After has been called on f but not yet satisfied (so you can +// write race-free tests). +func (f *FakeClock) HasWaiters() bool { + f.lock.RLock() + defer f.lock.RUnlock() + return len(f.waiters) > 0 +} + +func (f *FakeClock) Sleep(d time.Duration) { + f.Step(d) +} + +// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration +type IntervalClock struct { + Time time.Time + Duration time.Duration +} + +// Now returns i's time. +func (i *IntervalClock) Now() time.Time { + i.Time = i.Time.Add(i.Duration) + return i.Time +} + +// Since returns time since the time in i. +func (i *IntervalClock) Since(ts time.Time) time.Duration { + return i.Time.Sub(ts) +} + +// Unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) After(d time.Duration) <-chan time.Time { + panic("IntervalClock doesn't implement After") +} + +// Unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) NewTimer(d time.Duration) Timer { + panic("IntervalClock doesn't implement NewTimer") +} + +// Unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) Tick(d time.Duration) <-chan time.Time { + panic("IntervalClock doesn't implement Tick") +} + +func (*IntervalClock) Sleep(d time.Duration) { + panic("IntervalClock doesn't implement Sleep") +} + +// Timer allows for injecting fake or real timers into code that +// needs to do arbitrary things based on time. +type Timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +var ( + _ = Timer(&realTimer{}) + _ = Timer(&fakeTimer{}) +) + +// realTimer is backed by an actual time.Timer. +type realTimer struct { + timer *time.Timer +} + +// C returns the underlying timer's channel. +func (r *realTimer) C() <-chan time.Time { + return r.timer.C +} + +// Stop calls Stop() on the underlying timer. +func (r *realTimer) Stop() bool { + return r.timer.Stop() +} + +// Reset calls Reset() on the underlying timer. +func (r *realTimer) Reset(d time.Duration) bool { + return r.timer.Reset(d) +} + +// fakeTimer implements Timer based on a FakeClock. +type fakeTimer struct { + fakeClock *FakeClock + waiter fakeClockWaiter +} + +// C returns the channel that notifies when this timer has fired. +func (f *fakeTimer) C() <-chan time.Time { + return f.waiter.destChan +} + +// Stop stops the timer and returns true if the timer has not yet fired, or false otherwise. +func (f *fakeTimer) Stop() bool { + f.fakeClock.lock.Lock() + defer f.fakeClock.lock.Unlock() + + newWaiters := make([]fakeClockWaiter, 0, len(f.fakeClock.waiters)) + for i := range f.fakeClock.waiters { + w := &f.fakeClock.waiters[i] + if w != &f.waiter { + newWaiters = append(newWaiters, *w) + } + } + + f.fakeClock.waiters = newWaiters + + return !f.waiter.fired +} + +// Reset resets the timer to the fake clock's "now" + d. It returns true if the timer has not yet +// fired, or false otherwise. +func (f *fakeTimer) Reset(d time.Duration) bool { + f.fakeClock.lock.Lock() + defer f.fakeClock.lock.Unlock() + + active := !f.waiter.fired + + f.waiter.fired = false + f.waiter.targetTime = f.fakeClock.time.Add(d) + + return active +} diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go new file mode 100644 index 0000000000..030b15a560 --- /dev/null +++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go @@ -0,0 +1,149 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "sync" + "time" + + "k8s.io/client-go/util/clock" + "k8s.io/client-go/util/integer" +) + +type backoffEntry struct { + backoff time.Duration + lastUpdate time.Time +} + +type Backoff struct { + sync.Mutex + Clock clock.Clock + defaultDuration time.Duration + maxDuration time.Duration + perItemBackoff map[string]*backoffEntry +} + +func NewFakeBackOff(initial, max time.Duration, tc *clock.FakeClock) *Backoff { + return &Backoff{ + perItemBackoff: map[string]*backoffEntry{}, + Clock: tc, + defaultDuration: initial, + maxDuration: max, + } +} + +func NewBackOff(initial, max time.Duration) *Backoff { + return &Backoff{ + perItemBackoff: map[string]*backoffEntry{}, + Clock: clock.RealClock{}, + defaultDuration: initial, + maxDuration: max, + } +} + +// Get the current backoff Duration +func (p *Backoff) Get(id string) time.Duration { + p.Lock() + defer p.Unlock() + var delay time.Duration + entry, ok := p.perItemBackoff[id] + if ok { + delay = entry.backoff + } + return delay +} + +// move backoff to the next mark, capping at maxDuration +func (p *Backoff) Next(id string, eventTime time.Time) { + p.Lock() + defer p.Unlock() + entry, ok := p.perItemBackoff[id] + if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + entry = p.initEntryUnsafe(id) + } else { + delay := entry.backoff * 2 // exponential + entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration))) + } + entry.lastUpdate = p.Clock.Now() +} + +// Reset forces clearing of all backoff data for a given key. +func (p *Backoff) Reset(id string) { + p.Lock() + defer p.Unlock() + delete(p.perItemBackoff, id) +} + +// Returns True if the elapsed time since eventTime is smaller than the current backoff window +func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool { + p.Lock() + defer p.Unlock() + entry, ok := p.perItemBackoff[id] + if !ok { + return false + } + if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + return false + } + return p.Clock.Now().Sub(eventTime) < entry.backoff +} + +// Returns True if time since lastupdate is less than the current backoff window. +func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool { + p.Lock() + defer p.Unlock() + entry, ok := p.perItemBackoff[id] + if !ok { + return false + } + if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + return false + } + return eventTime.Sub(entry.lastUpdate) < entry.backoff +} + +// Garbage collect records that have aged past maxDuration. Backoff users are expected +// to invoke this periodically. +func (p *Backoff) GC() { + p.Lock() + defer p.Unlock() + now := p.Clock.Now() + for id, entry := range p.perItemBackoff { + if now.Sub(entry.lastUpdate) > p.maxDuration*2 { + // GC when entry has not been updated for 2*maxDuration + delete(p.perItemBackoff, id) + } + } +} + +func (p *Backoff) DeleteEntry(id string) { + p.Lock() + defer p.Unlock() + delete(p.perItemBackoff, id) +} + +// Take a lock on *Backoff, before calling initEntryUnsafe +func (p *Backoff) initEntryUnsafe(id string) *backoffEntry { + entry := &backoffEntry{backoff: p.defaultDuration} + p.perItemBackoff[id] = entry + return entry +} + +// After 2*maxDuration we restart the backoff factor to the beginning +func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool { + return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/throttle.go b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/util/flowcontrol/throttle.go rename to vendor/k8s.io/client-go/util/flowcontrol/throttle.go diff --git a/vendor/k8s.io/kubernetes/pkg/util/homedir/homedir.go b/vendor/k8s.io/client-go/util/homedir/homedir.go similarity index 87% rename from vendor/k8s.io/kubernetes/pkg/util/homedir/homedir.go rename to vendor/k8s.io/client-go/util/homedir/homedir.go index 403475491a..816db57f59 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/homedir/homedir.go +++ b/vendor/k8s.io/client-go/util/homedir/homedir.go @@ -24,6 +24,13 @@ import ( // HomeDir returns the home directory for the current user func HomeDir() string { if runtime.GOOS == "windows" { + + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); len(home) > 0 { + if _, err := os.Stat(home); err == nil { + return home + } + } if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 { homeDir := homeDrive + homePath if _, err := os.Stat(homeDir); err == nil { diff --git a/vendor/k8s.io/kubernetes/pkg/util/integer/integer.go b/vendor/k8s.io/client-go/util/integer/integer.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/util/integer/integer.go rename to vendor/k8s.io/client-go/util/integer/integer.go diff --git a/vendor/k8s.io/kubernetes/pkg/util/workqueue/default_rate_limiters.go b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/util/workqueue/default_rate_limiters.go rename to vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go diff --git a/vendor/k8s.io/kubernetes/pkg/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/util/workqueue/delaying_queue.go rename to vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index eaa6b5bb43..593ad9ad41 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/workqueue/delaying_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -20,8 +20,8 @@ import ( "sort" "time" - "k8s.io/kubernetes/pkg/util/clock" - utilruntime "k8s.io/kubernetes/pkg/util/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/util/clock" ) // DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to diff --git a/vendor/k8s.io/kubernetes/pkg/util/workqueue/doc.go b/vendor/k8s.io/client-go/util/workqueue/doc.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/util/workqueue/doc.go rename to vendor/k8s.io/client-go/util/workqueue/doc.go diff --git a/vendor/k8s.io/kubernetes/pkg/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/util/workqueue/metrics.go rename to vendor/k8s.io/client-go/util/workqueue/metrics.go diff --git a/vendor/k8s.io/kubernetes/pkg/util/workqueue/parallelizer.go b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/util/workqueue/parallelizer.go rename to vendor/k8s.io/client-go/util/workqueue/parallelizer.go index b66cebe698..be668c4233 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/workqueue/parallelizer.go +++ b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go @@ -19,7 +19,7 @@ package workqueue import ( "sync" - utilruntime "k8s.io/kubernetes/pkg/util/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) type DoWorkPieceFunc func(piece int) diff --git a/vendor/k8s.io/kubernetes/pkg/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/util/workqueue/queue.go rename to vendor/k8s.io/client-go/util/workqueue/queue.go diff --git a/vendor/k8s.io/kubernetes/pkg/util/workqueue/rate_limitting_queue.go b/vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/util/workqueue/rate_limitting_queue.go rename to vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go diff --git a/vendor/k8s.io/kubernetes/pkg/util/workqueue/timed_queue.go b/vendor/k8s.io/client-go/util/workqueue/timed_queue.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/util/workqueue/timed_queue.go rename to vendor/k8s.io/client-go/util/workqueue/timed_queue.go diff --git a/vendor/k8s.io/kubernetes/LICENSE b/vendor/k8s.io/kubernetes/LICENSE index 00b2401109..d645695673 100644 --- a/vendor/k8s.io/kubernetes/LICENSE +++ b/vendor/k8s.io/kubernetes/LICENSE @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2014 The Kubernetes Authors. + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/BUILD b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/BUILD deleted file mode 100644 index 52e63444b4..0000000000 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/BUILD +++ /dev/null @@ -1,27 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "env.go", - "register.go", - "types.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/runtime:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/doc.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/doc.go deleted file mode 100644 index 973b2e8c7b..0000000000 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +groupName=kubeadm.k8s.io -package kubeadm diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/env.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/env.go deleted file mode 100644 index 687e89edc7..0000000000 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/env.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubeadm - -import ( - "fmt" - "os" - "runtime" - "strings" -) - -var GlobalEnvParams = SetEnvParams() - -// TODO(phase2) use componentconfig -// we need some params for testing etc, let's keep these hidden for now -func SetEnvParams() *EnvParams { - - envParams := map[string]string{ - // TODO(phase1+): Mode prefix and host_pki_path to another place as constants, and use them everywhere - // Right now they're used here and there, but not consequently - "kubernetes_dir": "/etc/kubernetes", - "host_pki_path": "/etc/kubernetes/pki", - "host_etcd_path": "/var/lib/etcd", - "hyperkube_image": "", - "discovery_image": fmt.Sprintf("gcr.io/google_containers/kube-discovery-%s:%s", runtime.GOARCH, "1.0"), - "etcd_image": "", - "component_loglevel": "--v=2", - } - - for k := range envParams { - if v := os.Getenv(fmt.Sprintf("KUBE_%s", strings.ToUpper(k))); v != "" { - envParams[k] = v - } - } - - return &EnvParams{ - KubernetesDir: envParams["kubernetes_dir"], - HostPKIPath: envParams["host_pki_path"], - HostEtcdPath: envParams["host_etcd_path"], - HyperkubeImage: envParams["hyperkube_image"], - DiscoveryImage: envParams["discovery_image"], - EtcdImage: envParams["etcd_image"], - ComponentLoglevel: envParams["component_loglevel"], - } -} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install/BUILD b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install/BUILD deleted file mode 100644 index 256c30e79c..0000000000 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "install.go", - ], - tags = ["automanaged"], - deps = [ - "//cmd/kubeadm/app/apis/kubeadm:go_default_library", - "//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library", - "//pkg/apimachinery/announced:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install/doc.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install/doc.go deleted file mode 100644 index 0ca2c59db7..0000000000 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package install diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install/install.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install/install.go deleted file mode 100644 index e27b629117..0000000000 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install/install.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package install - -import ( - "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1" - "k8s.io/kubernetes/pkg/apimachinery/announced" -) - -func init() { - if err := announced.NewGroupMetaFactory( - &announced.GroupMetaFactoryArgs{ - GroupName: kubeadm.GroupName, - VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version}, - ImportPrefix: "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm", - AddInternalObjectsToScheme: kubeadm.AddToScheme, - }, - announced.VersionToSchemeFunc{ - v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme, - }, - ).Announce().RegisterAndEnable(); err != nil { - panic(err) - } -} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/register.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/register.go deleted file mode 100644 index 77211e9c0e..0000000000 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/register.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubeadm - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// GroupName is the group name use in this package -const GroupName = "kubeadm.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &MasterConfiguration{}, - &NodeConfiguration{}, - &ClusterInfo{}, - &api.ListOptions{}, - &api.DeleteOptions{}, - &api.ExportOptions{}, - ) - return nil -} - -func (obj *MasterConfiguration) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *NodeConfiguration) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *ClusterInfo) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/types.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/types.go deleted file mode 100644 index 22ffbafb7d..0000000000 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/types.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubeadm - -import "k8s.io/kubernetes/pkg/api/unversioned" - -type EnvParams struct { - KubernetesDir string - HostPKIPath string - HostEtcdPath string - HyperkubeImage string - DiscoveryImage string - EtcdImage string - ComponentLoglevel string -} - -type MasterConfiguration struct { - unversioned.TypeMeta - - Secrets Secrets - API API - Discovery Discovery - Etcd Etcd - Networking Networking - KubernetesVersion string - CloudProvider string -} - -type API struct { - AdvertiseAddresses []string - ExternalDNSNames []string - BindPort int32 -} - -type Discovery struct { - BindPort int32 -} - -type Networking struct { - ServiceSubnet string - PodSubnet string - DNSDomain string -} - -type Etcd struct { - Endpoints []string - CAFile string - CertFile string - KeyFile string -} - -type Secrets struct { - GivenToken string // dot-separated `.` set by the user - TokenID string // optional on master side, will be generated if not specified - Token []byte // optional on master side, will be generated if not specified - BearerToken string // set based on Token -} - -type NodeConfiguration struct { - unversioned.TypeMeta - - MasterAddresses []string - Secrets Secrets - APIPort int32 - DiscoveryPort int32 -} - -// ClusterInfo TODO add description -type ClusterInfo struct { - unversioned.TypeMeta - // TODO(phase1+) this may become simply `api.Config` - CertificateAuthorities []string `json:"certificateAuthorities"` - Endpoints []string `json:"endpoints"` -} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD deleted file mode 100644 index 0ce0637bf5..0000000000 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD +++ /dev/null @@ -1,28 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "defaults.go", - "doc.go", - "register.go", - "types.go", - "zz_generated.defaults.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/runtime:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go deleted file mode 100644 index 1c44d2dbb9..0000000000 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/kubernetes/pkg/runtime" -) - -const ( - DefaultServiceDNSDomain = "cluster.local" - DefaultServicesSubnet = "10.96.0.0/12" - DefaultKubernetesVersion = "v1.4.4" - DefaultAPIBindPort = 6443 - DefaultDiscoveryBindPort = 9898 -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - RegisterDefaults(scheme) - return scheme.AddDefaultingFuncs( - SetDefaults_MasterConfiguration, - SetDefaults_NodeConfiguration, - ) -} - -func SetDefaults_MasterConfiguration(obj *MasterConfiguration) { - if obj.KubernetesVersion == "" { - obj.KubernetesVersion = DefaultKubernetesVersion - } - - if obj.API.BindPort == 0 { - obj.API.BindPort = DefaultAPIBindPort - } - - if obj.Discovery.BindPort == 0 { - obj.Discovery.BindPort = DefaultDiscoveryBindPort - } - - if obj.Networking.ServiceSubnet == "" { - obj.Networking.ServiceSubnet = DefaultServicesSubnet - } - - if obj.Networking.DNSDomain == "" { - obj.Networking.DNSDomain = DefaultServiceDNSDomain - } -} - -func SetDefaults_NodeConfiguration(obj *NodeConfiguration) { - if obj.APIPort == 0 { - obj.APIPort = DefaultAPIBindPort - } - - if obj.DiscoveryPort == 0 { - obj.DiscoveryPort = DefaultDiscoveryBindPort - } -} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/doc.go deleted file mode 100644 index 7ca9c7c2e1..0000000000 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:defaulter-gen=TypeMeta -// +groupName=kubeadm.k8s.io -package v1alpha1 diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/register.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/register.go deleted file mode 100644 index 72af07e938..0000000000 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/register.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" -) - -// GroupName is the group name use in this package -const GroupName = "kubeadm.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &MasterConfiguration{}, - &NodeConfiguration{}, - &ClusterInfo{}, - &v1.ListOptions{}, - &v1.DeleteOptions{}, - &v1.ExportOptions{}, - ) - return nil -} - -func (obj *MasterConfiguration) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *NodeConfiguration) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *ClusterInfo) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go deleted file mode 100644 index 59c53e4b45..0000000000 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import "k8s.io/kubernetes/pkg/api/unversioned" - -type MasterConfiguration struct { - unversioned.TypeMeta `json:",inline"` - - Secrets Secrets `json:"secrets"` - API API `json:"api"` - Etcd Etcd `json:"etcd"` - Discovery Discovery `json:"discovery"` - Networking Networking `json:"networking"` - KubernetesVersion string `json:"kubernetesVersion"` - CloudProvider string `json:"cloudProvider"` -} - -type API struct { - AdvertiseAddresses []string `json:"advertiseAddresses"` - ExternalDNSNames []string `json:"externalDNSNames"` - BindPort int32 `json:"bindPort"` -} - -type Discovery struct { - BindPort int32 `json:"bindPort"` -} - -type Networking struct { - ServiceSubnet string `json:"serviceSubnet"` - PodSubnet string `json:"podSubnet"` - DNSDomain string `json:"dnsDomain"` -} - -type Etcd struct { - Endpoints []string `json:"endpoints"` - CAFile string `json:"caFile"` - CertFile string `json:"certFile"` - KeyFile string `json:"keyFile"` -} - -type Secrets struct { - GivenToken string `json:"givenToken"` // dot-separated `.` set by the user - TokenID string `json:"tokenID"` // optional on master side, will be generated if not specified - Token []byte `json:"token"` // optional on master side, will be generated if not specified - BearerToken string `json:"bearerToken"` // set based on Token -} - -type NodeConfiguration struct { - unversioned.TypeMeta `json:",inline"` - - MasterAddresses []string `json:"masterAddresses"` - Secrets Secrets `json:"secrets"` - APIPort int32 `json:"apiPort"` - DiscoveryPort int32 `json:"discoveryPort"` -} - -// ClusterInfo TODO add description -type ClusterInfo struct { - unversioned.TypeMeta `json:",inline"` - // TODO(phase1+) this may become simply `api.Config` - CertificateAuthorities []string `json:"certificateAuthorities"` - Endpoints []string `json:"endpoints"` -} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.defaults.go deleted file mode 100644 index ead6c7e92b..0000000000 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.defaults.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by defaulter-gen. Do not edit it manually! - -package v1alpha1 - -import ( - runtime "k8s.io/kubernetes/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&MasterConfiguration{}, func(obj interface{}) { SetObjectDefaults_MasterConfiguration(obj.(*MasterConfiguration)) }) - scheme.AddTypeDefaultingFunc(&NodeConfiguration{}, func(obj interface{}) { SetObjectDefaults_NodeConfiguration(obj.(*NodeConfiguration)) }) - return nil -} - -func SetObjectDefaults_MasterConfiguration(in *MasterConfiguration) { - SetDefaults_MasterConfiguration(in) -} - -func SetObjectDefaults_NodeConfiguration(in *NodeConfiguration) { - SetDefaults_NodeConfiguration(in) -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/BUILD b/vendor/k8s.io/kubernetes/federation/apis/federation/BUILD deleted file mode 100644 index d12e144d53..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/BUILD +++ /dev/null @@ -1,31 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.generated.go", - "types.go", - "zz_generated.deepcopy.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//vendor:github.com/ugorji/go/codec", - ], -) diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/doc.go b/vendor/k8s.io/kubernetes/federation/apis/federation/doc.go deleted file mode 100644 index 7a45fb7bb8..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register - -package federation diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/install/BUILD b/vendor/k8s.io/kubernetes/federation/apis/federation/install/BUILD deleted file mode 100644 index 75c93828ae..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/install/BUILD +++ /dev/null @@ -1,44 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["install.go"], - tags = ["automanaged"], - deps = [ - "//federation/apis/federation:go_default_library", - "//federation/apis/federation/v1beta1:go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apimachinery:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/sets:go_default_library", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = ["install_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//federation/apis/federation:go_default_library", - "//federation/apis/federation/v1beta1:go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/runtime:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/install/install.go b/vendor/k8s.io/kubernetes/federation/apis/federation/install/install.go deleted file mode 100644 index e55897e79f..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/install/install.go +++ /dev/null @@ -1,134 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package install - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/kubernetes/federation/apis/federation" - "k8s.io/kubernetes/federation/apis/federation/v1beta1" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/sets" -) - -const importPrefix = "k8s.io/kubernetes/federation/apis/federation" - -var accessor = meta.NewAccessor() - -// availableVersions lists all known external versions for this group from most preferred to least preferred -var availableVersions = []unversioned.GroupVersion{v1beta1.SchemeGroupVersion} - -func init() { - registered.RegisterVersions(availableVersions) - externalVersions := []unversioned.GroupVersion{} - for _, v := range availableVersions { - if registered.IsAllowedVersion(v) { - externalVersions = append(externalVersions, v) - } - } - if len(externalVersions) == 0 { - glog.V(4).Infof("No version is registered for group %v", federation.GroupName) - return - } - - if err := registered.EnableVersions(externalVersions...); err != nil { - glog.V(4).Infof("%v", err) - return - } - if err := enableVersions(externalVersions); err != nil { - glog.V(4).Infof("%v", err) - return - } -} - -// TODO: enableVersions should be centralized rather than spread in each API -// group. -// We can combine registered.RegisterVersions, registered.EnableVersions and -// registered.RegisterGroup once we have moved enableVersions there. -func enableVersions(externalVersions []unversioned.GroupVersion) error { - addVersionsToScheme(externalVersions...) - preferredExternalVersion := externalVersions[0] - - groupMeta := apimachinery.GroupMeta{ - GroupVersion: preferredExternalVersion, - GroupVersions: externalVersions, - RESTMapper: newRESTMapper(externalVersions), - SelfLinker: runtime.SelfLinker(accessor), - InterfacesFor: interfacesFor, - } - - if err := registered.RegisterGroup(groupMeta); err != nil { - return err - } - return nil -} - -func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { - // the list of kinds that are scoped at the root of the api hierarchy - // if a kind is not enumerated here, it is assumed to have a namespace scope - rootScoped := sets.NewString( - "Cluster", - ) - - ignoredKinds := sets.NewString() - - return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) -} - -// interfacesFor returns the default Codec and ResourceVersioner for a given version -// string, or an error if the version is not known. -func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - switch version { - case v1beta1.SchemeGroupVersion: - return &meta.VersionInterfaces{ - ObjectConvertor: api.Scheme, - MetadataAccessor: accessor, - }, nil - default: - g, _ := registered.Group(federation.GroupName) - return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) - } -} - -func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { - // add the internal version to Scheme - if err := federation.AddToScheme(api.Scheme); err != nil { - // Programmer error, detect immediately - panic(err) - } - // add the enabled external versions to Scheme - for _, v := range externalVersions { - if !registered.IsEnabledVersion(v) { - glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) - continue - } - switch v { - case v1beta1.SchemeGroupVersion: - if err := v1beta1.AddToScheme(api.Scheme); err != nil { - // Programmer error, detect immediately - panic(err) - } - } - } -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/register.go b/vendor/k8s.io/kubernetes/federation/apis/federation/register.go deleted file mode 100644 index b854279250..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/register.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package federation - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// GroupName is the group name use in this package -const GroupName = "federation" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Cluster{}, - &ClusterList{}, - &api.ListOptions{}, - &api.DeleteOptions{}, - ) - return nil -} - -func (obj *Cluster) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *ClusterList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/types.generated.go b/vendor/k8s.io/kubernetes/federation/apis/federation/types.generated.go deleted file mode 100644 index b154773ea4..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/types.generated.go +++ /dev/null @@ -1,2954 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package federation - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_api "k8s.io/kubernetes/pkg/api" - pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg3_types "k8s.io/kubernetes/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_api.LocalObjectReference - var v1 pkg2_unversioned.Time - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 - } -} - -func (x *ServerAddressByClientCIDR) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClientCIDR)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clientCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClientCIDR)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServerAddress)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serverAddress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServerAddress)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServerAddressByClientCIDR) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct10 := r.ContainerType() - if yyct10 == codecSelferValueTypeMap1234 { - yyl10 := r.ReadMapStart() - if yyl10 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl10, d) - } - } else if yyct10 == codecSelferValueTypeArray1234 { - yyl10 := r.ReadArrayStart() - if yyl10 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl10, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServerAddressByClientCIDR) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys11Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys11Slc - var yyhl11 bool = l >= 0 - for yyj11 := 0; ; yyj11++ { - if yyhl11 { - if yyj11 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys11Slc = r.DecodeBytes(yys11Slc, true, true) - yys11 := string(yys11Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys11 { - case "clientCIDR": - if r.TryDecodeAsNil() { - x.ClientCIDR = "" - } else { - x.ClientCIDR = string(r.DecodeString()) - } - case "serverAddress": - if r.TryDecodeAsNil() { - x.ServerAddress = "" - } else { - x.ServerAddress = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys11) - } // end switch yys11 - } // end for yyj11 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServerAddressByClientCIDR) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClientCIDR = "" - } else { - x.ClientCIDR = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServerAddress = "" - } else { - x.ServerAddress = string(r.DecodeString()) - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep18 := !z.EncBinary() - yy2arr18 := z.EncBasicHandle().StructToArray - var yyq18 [2]bool - _, _, _ = yysep18, yyq18, yy2arr18 - const yyr18 bool = false - yyq18[1] = x.SecretRef != nil - var yynn18 int - if yyr18 || yy2arr18 { - r.EncodeArrayStart(2) - } else { - yynn18 = 1 - for _, b := range yyq18 { - if b { - yynn18++ - } - } - r.EncodeMapStart(yynn18) - yynn18 = 0 - } - if yyr18 || yy2arr18 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.ServerAddressByClientCIDRs == nil { - r.EncodeNil() - } else { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - h.encSliceServerAddressByClientCIDR(([]ServerAddressByClientCIDR)(x.ServerAddressByClientCIDRs), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serverAddressByClientCIDRs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ServerAddressByClientCIDRs == nil { - r.EncodeNil() - } else { - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - h.encSliceServerAddressByClientCIDR(([]ServerAddressByClientCIDR)(x.ServerAddressByClientCIDRs), e) - } - } - } - if yyr18 || yy2arr18 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq18[1] { - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq18[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - } - if yyr18 || yy2arr18 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym23 := z.DecBinary() - _ = yym23 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct24 := r.ContainerType() - if yyct24 == codecSelferValueTypeMap1234 { - yyl24 := r.ReadMapStart() - if yyl24 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl24, d) - } - } else if yyct24 == codecSelferValueTypeArray1234 { - yyl24 := r.ReadArrayStart() - if yyl24 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl24, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys25Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys25Slc - var yyhl25 bool = l >= 0 - for yyj25 := 0; ; yyj25++ { - if yyhl25 { - if yyj25 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys25Slc = r.DecodeBytes(yys25Slc, true, true) - yys25 := string(yys25Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys25 { - case "serverAddressByClientCIDRs": - if r.TryDecodeAsNil() { - x.ServerAddressByClientCIDRs = nil - } else { - yyv26 := &x.ServerAddressByClientCIDRs - yym27 := z.DecBinary() - _ = yym27 - if false { - } else { - h.decSliceServerAddressByClientCIDR((*[]ServerAddressByClientCIDR)(yyv26), d) - } - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(pkg1_api.LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys25) - } // end switch yys25 - } // end for yyj25 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj29 int - var yyb29 bool - var yyhl29 bool = l >= 0 - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServerAddressByClientCIDRs = nil - } else { - yyv30 := &x.ServerAddressByClientCIDRs - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - h.decSliceServerAddressByClientCIDR((*[]ServerAddressByClientCIDR)(yyv30), d) - } - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(pkg1_api.LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - for { - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj29-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ClusterConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym33 := z.EncBinary() - _ = yym33 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ClusterConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym34 := z.DecBinary() - _ = yym34 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ClusterCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym35 := z.EncBinary() - _ = yym35 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep36 := !z.EncBinary() - yy2arr36 := z.EncBasicHandle().StructToArray - var yyq36 [6]bool - _, _, _ = yysep36, yyq36, yy2arr36 - const yyr36 bool = false - yyq36[2] = true - yyq36[3] = true - yyq36[4] = x.Reason != "" - yyq36[5] = x.Message != "" - var yynn36 int - if yyr36 || yy2arr36 { - r.EncodeArrayStart(6) - } else { - yynn36 = 2 - for _, b := range yyq36 { - if b { - yynn36++ - } - } - r.EncodeMapStart(yynn36) - yynn36 = 0 - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym39 := z.EncBinary() - _ = yym39 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym40 := z.EncBinary() - _ = yym40 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[2] { - yy42 := &x.LastProbeTime - yym43 := z.EncBinary() - _ = yym43 - if false { - } else if z.HasExtensions() && z.EncExt(yy42) { - } else if yym43 { - z.EncBinaryMarshal(yy42) - } else if !yym43 && z.IsJSONHandle() { - z.EncJSONMarshal(yy42) - } else { - z.EncFallback(yy42) - } - } else { - r.EncodeNil() - } - } else { - if yyq36[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy44 := &x.LastProbeTime - yym45 := z.EncBinary() - _ = yym45 - if false { - } else if z.HasExtensions() && z.EncExt(yy44) { - } else if yym45 { - z.EncBinaryMarshal(yy44) - } else if !yym45 && z.IsJSONHandle() { - z.EncJSONMarshal(yy44) - } else { - z.EncFallback(yy44) - } - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[3] { - yy47 := &x.LastTransitionTime - yym48 := z.EncBinary() - _ = yym48 - if false { - } else if z.HasExtensions() && z.EncExt(yy47) { - } else if yym48 { - z.EncBinaryMarshal(yy47) - } else if !yym48 && z.IsJSONHandle() { - z.EncJSONMarshal(yy47) - } else { - z.EncFallback(yy47) - } - } else { - r.EncodeNil() - } - } else { - if yyq36[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy49 := &x.LastTransitionTime - yym50 := z.EncBinary() - _ = yym50 - if false { - } else if z.HasExtensions() && z.EncExt(yy49) { - } else if yym50 { - z.EncBinaryMarshal(yy49) - } else if !yym50 && z.IsJSONHandle() { - z.EncJSONMarshal(yy49) - } else { - z.EncFallback(yy49) - } - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[4] { - yym52 := z.EncBinary() - _ = yym52 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq36[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym53 := z.EncBinary() - _ = yym53 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[5] { - yym55 := z.EncBinary() - _ = yym55 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq36[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym56 := z.EncBinary() - _ = yym56 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym57 := z.DecBinary() - _ = yym57 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct58 := r.ContainerType() - if yyct58 == codecSelferValueTypeMap1234 { - yyl58 := r.ReadMapStart() - if yyl58 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl58, d) - } - } else if yyct58 == codecSelferValueTypeArray1234 { - yyl58 := r.ReadArrayStart() - if yyl58 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl58, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys59Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys59Slc - var yyhl59 bool = l >= 0 - for yyj59 := 0; ; yyj59++ { - if yyhl59 { - if yyj59 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys59Slc = r.DecodeBytes(yys59Slc, true, true) - yys59 := string(yys59Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys59 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ClusterConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg1_api.ConditionStatus(r.DecodeString()) - } - case "lastProbeTime": - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} - } else { - yyv62 := &x.LastProbeTime - yym63 := z.DecBinary() - _ = yym63 - if false { - } else if z.HasExtensions() && z.DecExt(yyv62) { - } else if yym63 { - z.DecBinaryUnmarshal(yyv62) - } else if !yym63 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv62) - } else { - z.DecFallback(yyv62, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv64 := &x.LastTransitionTime - yym65 := z.DecBinary() - _ = yym65 - if false { - } else if z.HasExtensions() && z.DecExt(yyv64) { - } else if yym65 { - z.DecBinaryUnmarshal(yyv64) - } else if !yym65 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv64) - } else { - z.DecFallback(yyv64, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys59) - } // end switch yys59 - } // end for yyj59 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj68 int - var yyb68 bool - var yyhl68 bool = l >= 0 - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ClusterConditionType(r.DecodeString()) - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg1_api.ConditionStatus(r.DecodeString()) - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} - } else { - yyv71 := &x.LastProbeTime - yym72 := z.DecBinary() - _ = yym72 - if false { - } else if z.HasExtensions() && z.DecExt(yyv71) { - } else if yym72 { - z.DecBinaryUnmarshal(yyv71) - } else if !yym72 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv71) - } else { - z.DecFallback(yyv71, false) - } - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv73 := &x.LastTransitionTime - yym74 := z.DecBinary() - _ = yym74 - if false { - } else if z.HasExtensions() && z.DecExt(yyv73) { - } else if yym74 { - z.DecBinaryUnmarshal(yyv73) - } else if !yym74 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv73) - } else { - z.DecFallback(yyv73, false) - } - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj68-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym77 := z.EncBinary() - _ = yym77 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep78 := !z.EncBinary() - yy2arr78 := z.EncBasicHandle().StructToArray - var yyq78 [3]bool - _, _, _ = yysep78, yyq78, yy2arr78 - const yyr78 bool = false - yyq78[0] = len(x.Conditions) != 0 - yyq78[1] = len(x.Zones) != 0 - yyq78[2] = x.Region != "" - var yynn78 int - if yyr78 || yy2arr78 { - r.EncodeArrayStart(3) - } else { - yynn78 = 0 - for _, b := range yyq78 { - if b { - yynn78++ - } - } - r.EncodeMapStart(yynn78) - yynn78 = 0 - } - if yyr78 || yy2arr78 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq78[0] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym80 := z.EncBinary() - _ = yym80 - if false { - } else { - h.encSliceClusterCondition(([]ClusterCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq78[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym81 := z.EncBinary() - _ = yym81 - if false { - } else { - h.encSliceClusterCondition(([]ClusterCondition)(x.Conditions), e) - } - } - } - } - if yyr78 || yy2arr78 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq78[1] { - if x.Zones == nil { - r.EncodeNil() - } else { - yym83 := z.EncBinary() - _ = yym83 - if false { - } else { - z.F.EncSliceStringV(x.Zones, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq78[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("zones")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Zones == nil { - r.EncodeNil() - } else { - yym84 := z.EncBinary() - _ = yym84 - if false { - } else { - z.F.EncSliceStringV(x.Zones, false, e) - } - } - } - } - if yyr78 || yy2arr78 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq78[2] { - yym86 := z.EncBinary() - _ = yym86 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Region)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq78[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("region")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym87 := z.EncBinary() - _ = yym87 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Region)) - } - } - } - if yyr78 || yy2arr78 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym88 := z.DecBinary() - _ = yym88 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct89 := r.ContainerType() - if yyct89 == codecSelferValueTypeMap1234 { - yyl89 := r.ReadMapStart() - if yyl89 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl89, d) - } - } else if yyct89 == codecSelferValueTypeArray1234 { - yyl89 := r.ReadArrayStart() - if yyl89 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl89, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys90Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys90Slc - var yyhl90 bool = l >= 0 - for yyj90 := 0; ; yyj90++ { - if yyhl90 { - if yyj90 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys90Slc = r.DecodeBytes(yys90Slc, true, true) - yys90 := string(yys90Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys90 { - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv91 := &x.Conditions - yym92 := z.DecBinary() - _ = yym92 - if false { - } else { - h.decSliceClusterCondition((*[]ClusterCondition)(yyv91), d) - } - } - case "zones": - if r.TryDecodeAsNil() { - x.Zones = nil - } else { - yyv93 := &x.Zones - yym94 := z.DecBinary() - _ = yym94 - if false { - } else { - z.F.DecSliceStringX(yyv93, false, d) - } - } - case "region": - if r.TryDecodeAsNil() { - x.Region = "" - } else { - x.Region = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys90) - } // end switch yys90 - } // end for yyj90 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj96 int - var yyb96 bool - var yyhl96 bool = l >= 0 - yyj96++ - if yyhl96 { - yyb96 = yyj96 > l - } else { - yyb96 = r.CheckBreak() - } - if yyb96 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv97 := &x.Conditions - yym98 := z.DecBinary() - _ = yym98 - if false { - } else { - h.decSliceClusterCondition((*[]ClusterCondition)(yyv97), d) - } - } - yyj96++ - if yyhl96 { - yyb96 = yyj96 > l - } else { - yyb96 = r.CheckBreak() - } - if yyb96 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Zones = nil - } else { - yyv99 := &x.Zones - yym100 := z.DecBinary() - _ = yym100 - if false { - } else { - z.F.DecSliceStringX(yyv99, false, d) - } - } - yyj96++ - if yyhl96 { - yyb96 = yyj96 > l - } else { - yyb96 = r.CheckBreak() - } - if yyb96 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Region = "" - } else { - x.Region = string(r.DecodeString()) - } - for { - yyj96++ - if yyhl96 { - yyb96 = yyj96 > l - } else { - yyb96 = r.CheckBreak() - } - if yyb96 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj96-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Cluster) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym102 := z.EncBinary() - _ = yym102 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep103 := !z.EncBinary() - yy2arr103 := z.EncBasicHandle().StructToArray - var yyq103 [5]bool - _, _, _ = yysep103, yyq103, yy2arr103 - const yyr103 bool = false - yyq103[0] = x.Kind != "" - yyq103[1] = x.APIVersion != "" - yyq103[2] = true - yyq103[3] = true - yyq103[4] = true - var yynn103 int - if yyr103 || yy2arr103 { - r.EncodeArrayStart(5) - } else { - yynn103 = 0 - for _, b := range yyq103 { - if b { - yynn103++ - } - } - r.EncodeMapStart(yynn103) - yynn103 = 0 - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[0] { - yym105 := z.EncBinary() - _ = yym105 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq103[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym106 := z.EncBinary() - _ = yym106 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[1] { - yym108 := z.EncBinary() - _ = yym108 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq103[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym109 := z.EncBinary() - _ = yym109 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[2] { - yy111 := &x.ObjectMeta - yy111.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq103[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy112 := &x.ObjectMeta - yy112.CodecEncodeSelf(e) - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[3] { - yy114 := &x.Spec - yy114.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq103[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy115 := &x.Spec - yy115.CodecEncodeSelf(e) - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[4] { - yy117 := &x.Status - yy117.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq103[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy118 := &x.Status - yy118.CodecEncodeSelf(e) - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Cluster) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym119 := z.DecBinary() - _ = yym119 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct120 := r.ContainerType() - if yyct120 == codecSelferValueTypeMap1234 { - yyl120 := r.ReadMapStart() - if yyl120 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl120, d) - } - } else if yyct120 == codecSelferValueTypeArray1234 { - yyl120 := r.ReadArrayStart() - if yyl120 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl120, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Cluster) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys121Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys121Slc - var yyhl121 bool = l >= 0 - for yyj121 := 0; ; yyj121++ { - if yyhl121 { - if yyj121 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys121Slc = r.DecodeBytes(yys121Slc, true, true) - yys121 := string(yys121Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys121 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_api.ObjectMeta{} - } else { - yyv124 := &x.ObjectMeta - yyv124.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ClusterSpec{} - } else { - yyv125 := &x.Spec - yyv125.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ClusterStatus{} - } else { - yyv126 := &x.Status - yyv126.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys121) - } // end switch yys121 - } // end for yyj121 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Cluster) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj127 int - var yyb127 bool - var yyhl127 bool = l >= 0 - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_api.ObjectMeta{} - } else { - yyv130 := &x.ObjectMeta - yyv130.CodecDecodeSelf(d) - } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ClusterSpec{} - } else { - yyv131 := &x.Spec - yyv131.CodecDecodeSelf(d) - } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ClusterStatus{} - } else { - yyv132 := &x.Status - yyv132.CodecDecodeSelf(d) - } - for { - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj127-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym133 := z.EncBinary() - _ = yym133 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep134 := !z.EncBinary() - yy2arr134 := z.EncBasicHandle().StructToArray - var yyq134 [4]bool - _, _, _ = yysep134, yyq134, yy2arr134 - const yyr134 bool = false - yyq134[0] = x.Kind != "" - yyq134[1] = x.APIVersion != "" - yyq134[2] = true - var yynn134 int - if yyr134 || yy2arr134 { - r.EncodeArrayStart(4) - } else { - yynn134 = 1 - for _, b := range yyq134 { - if b { - yynn134++ - } - } - r.EncodeMapStart(yynn134) - yynn134 = 0 - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq134[0] { - yym136 := z.EncBinary() - _ = yym136 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq134[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym137 := z.EncBinary() - _ = yym137 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq134[1] { - yym139 := z.EncBinary() - _ = yym139 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq134[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym140 := z.EncBinary() - _ = yym140 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq134[2] { - yy142 := &x.ListMeta - yym143 := z.EncBinary() - _ = yym143 - if false { - } else if z.HasExtensions() && z.EncExt(yy142) { - } else { - z.EncFallback(yy142) - } - } else { - r.EncodeNil() - } - } else { - if yyq134[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy144 := &x.ListMeta - yym145 := z.EncBinary() - _ = yym145 - if false { - } else if z.HasExtensions() && z.EncExt(yy144) { - } else { - z.EncFallback(yy144) - } - } - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym147 := z.EncBinary() - _ = yym147 - if false { - } else { - h.encSliceCluster(([]Cluster)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym148 := z.EncBinary() - _ = yym148 - if false { - } else { - h.encSliceCluster(([]Cluster)(x.Items), e) - } - } - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym149 := z.DecBinary() - _ = yym149 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct150 := r.ContainerType() - if yyct150 == codecSelferValueTypeMap1234 { - yyl150 := r.ReadMapStart() - if yyl150 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl150, d) - } - } else if yyct150 == codecSelferValueTypeArray1234 { - yyl150 := r.ReadArrayStart() - if yyl150 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl150, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys151Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys151Slc - var yyhl151 bool = l >= 0 - for yyj151 := 0; ; yyj151++ { - if yyhl151 { - if yyj151 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys151Slc = r.DecodeBytes(yys151Slc, true, true) - yys151 := string(yys151Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys151 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv154 := &x.ListMeta - yym155 := z.DecBinary() - _ = yym155 - if false { - } else if z.HasExtensions() && z.DecExt(yyv154) { - } else { - z.DecFallback(yyv154, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv156 := &x.Items - yym157 := z.DecBinary() - _ = yym157 - if false { - } else { - h.decSliceCluster((*[]Cluster)(yyv156), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys151) - } // end switch yys151 - } // end for yyj151 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj158 int - var yyb158 bool - var yyhl158 bool = l >= 0 - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv161 := &x.ListMeta - yym162 := z.DecBinary() - _ = yym162 - if false { - } else if z.HasExtensions() && z.DecExt(yyv161) { - } else { - z.DecFallback(yyv161, false) - } - } - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv163 := &x.Items - yym164 := z.DecBinary() - _ = yym164 - if false { - } else { - h.decSliceCluster((*[]Cluster)(yyv163), d) - } - } - for { - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj158-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *FederatedReplicaSetPreferences) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym165 := z.EncBinary() - _ = yym165 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep166 := !z.EncBinary() - yy2arr166 := z.EncBasicHandle().StructToArray - var yyq166 [2]bool - _, _, _ = yysep166, yyq166, yy2arr166 - const yyr166 bool = false - yyq166[0] = x.Rebalance != false - yyq166[1] = len(x.Clusters) != 0 - var yynn166 int - if yyr166 || yy2arr166 { - r.EncodeArrayStart(2) - } else { - yynn166 = 0 - for _, b := range yyq166 { - if b { - yynn166++ - } - } - r.EncodeMapStart(yynn166) - yynn166 = 0 - } - if yyr166 || yy2arr166 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq166[0] { - yym168 := z.EncBinary() - _ = yym168 - if false { - } else { - r.EncodeBool(bool(x.Rebalance)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq166[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rebalance")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym169 := z.EncBinary() - _ = yym169 - if false { - } else { - r.EncodeBool(bool(x.Rebalance)) - } - } - } - if yyr166 || yy2arr166 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq166[1] { - if x.Clusters == nil { - r.EncodeNil() - } else { - yym171 := z.EncBinary() - _ = yym171 - if false { - } else { - h.encMapstringClusterReplicaSetPreferences((map[string]ClusterReplicaSetPreferences)(x.Clusters), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq166[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusters")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Clusters == nil { - r.EncodeNil() - } else { - yym172 := z.EncBinary() - _ = yym172 - if false { - } else { - h.encMapstringClusterReplicaSetPreferences((map[string]ClusterReplicaSetPreferences)(x.Clusters), e) - } - } - } - } - if yyr166 || yy2arr166 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *FederatedReplicaSetPreferences) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym173 := z.DecBinary() - _ = yym173 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct174 := r.ContainerType() - if yyct174 == codecSelferValueTypeMap1234 { - yyl174 := r.ReadMapStart() - if yyl174 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl174, d) - } - } else if yyct174 == codecSelferValueTypeArray1234 { - yyl174 := r.ReadArrayStart() - if yyl174 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl174, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *FederatedReplicaSetPreferences) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys175Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys175Slc - var yyhl175 bool = l >= 0 - for yyj175 := 0; ; yyj175++ { - if yyhl175 { - if yyj175 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys175Slc = r.DecodeBytes(yys175Slc, true, true) - yys175 := string(yys175Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys175 { - case "rebalance": - if r.TryDecodeAsNil() { - x.Rebalance = false - } else { - x.Rebalance = bool(r.DecodeBool()) - } - case "clusters": - if r.TryDecodeAsNil() { - x.Clusters = nil - } else { - yyv177 := &x.Clusters - yym178 := z.DecBinary() - _ = yym178 - if false { - } else { - h.decMapstringClusterReplicaSetPreferences((*map[string]ClusterReplicaSetPreferences)(yyv177), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys175) - } // end switch yys175 - } // end for yyj175 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *FederatedReplicaSetPreferences) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj179 int - var yyb179 bool - var yyhl179 bool = l >= 0 - yyj179++ - if yyhl179 { - yyb179 = yyj179 > l - } else { - yyb179 = r.CheckBreak() - } - if yyb179 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rebalance = false - } else { - x.Rebalance = bool(r.DecodeBool()) - } - yyj179++ - if yyhl179 { - yyb179 = yyj179 > l - } else { - yyb179 = r.CheckBreak() - } - if yyb179 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Clusters = nil - } else { - yyv181 := &x.Clusters - yym182 := z.DecBinary() - _ = yym182 - if false { - } else { - h.decMapstringClusterReplicaSetPreferences((*map[string]ClusterReplicaSetPreferences)(yyv181), d) - } - } - for { - yyj179++ - if yyhl179 { - yyb179 = yyj179 > l - } else { - yyb179 = r.CheckBreak() - } - if yyb179 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj179-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterReplicaSetPreferences) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym183 := z.EncBinary() - _ = yym183 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep184 := !z.EncBinary() - yy2arr184 := z.EncBasicHandle().StructToArray - var yyq184 [3]bool - _, _, _ = yysep184, yyq184, yy2arr184 - const yyr184 bool = false - yyq184[0] = x.MinReplicas != 0 - yyq184[1] = x.MaxReplicas != nil - var yynn184 int - if yyr184 || yy2arr184 { - r.EncodeArrayStart(3) - } else { - yynn184 = 1 - for _, b := range yyq184 { - if b { - yynn184++ - } - } - r.EncodeMapStart(yynn184) - yynn184 = 0 - } - if yyr184 || yy2arr184 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq184[0] { - yym186 := z.EncBinary() - _ = yym186 - if false { - } else { - r.EncodeInt(int64(x.MinReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq184[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym187 := z.EncBinary() - _ = yym187 - if false { - } else { - r.EncodeInt(int64(x.MinReplicas)) - } - } - } - if yyr184 || yy2arr184 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq184[1] { - if x.MaxReplicas == nil { - r.EncodeNil() - } else { - yy189 := *x.MaxReplicas - yym190 := z.EncBinary() - _ = yym190 - if false { - } else { - r.EncodeInt(int64(yy189)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq184[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MaxReplicas == nil { - r.EncodeNil() - } else { - yy191 := *x.MaxReplicas - yym192 := z.EncBinary() - _ = yym192 - if false { - } else { - r.EncodeInt(int64(yy191)) - } - } - } - } - if yyr184 || yy2arr184 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym194 := z.EncBinary() - _ = yym194 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Weight")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym195 := z.EncBinary() - _ = yym195 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } - if yyr184 || yy2arr184 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterReplicaSetPreferences) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym196 := z.DecBinary() - _ = yym196 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct197 := r.ContainerType() - if yyct197 == codecSelferValueTypeMap1234 { - yyl197 := r.ReadMapStart() - if yyl197 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl197, d) - } - } else if yyct197 == codecSelferValueTypeArray1234 { - yyl197 := r.ReadArrayStart() - if yyl197 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl197, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterReplicaSetPreferences) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys198Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys198Slc - var yyhl198 bool = l >= 0 - for yyj198 := 0; ; yyj198++ { - if yyhl198 { - if yyj198 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys198Slc = r.DecodeBytes(yys198Slc, true, true) - yys198 := string(yys198Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys198 { - case "minReplicas": - if r.TryDecodeAsNil() { - x.MinReplicas = 0 - } else { - x.MinReplicas = int64(r.DecodeInt(64)) - } - case "maxReplicas": - if r.TryDecodeAsNil() { - if x.MaxReplicas != nil { - x.MaxReplicas = nil - } - } else { - if x.MaxReplicas == nil { - x.MaxReplicas = new(int64) - } - yym201 := z.DecBinary() - _ = yym201 - if false { - } else { - *((*int64)(x.MaxReplicas)) = int64(r.DecodeInt(64)) - } - } - case "Weight": - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - x.Weight = int64(r.DecodeInt(64)) - } - default: - z.DecStructFieldNotFound(-1, yys198) - } // end switch yys198 - } // end for yyj198 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterReplicaSetPreferences) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj203 int - var yyb203 bool - var yyhl203 bool = l >= 0 - yyj203++ - if yyhl203 { - yyb203 = yyj203 > l - } else { - yyb203 = r.CheckBreak() - } - if yyb203 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinReplicas = 0 - } else { - x.MinReplicas = int64(r.DecodeInt(64)) - } - yyj203++ - if yyhl203 { - yyb203 = yyj203 > l - } else { - yyb203 = r.CheckBreak() - } - if yyb203 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.MaxReplicas != nil { - x.MaxReplicas = nil - } - } else { - if x.MaxReplicas == nil { - x.MaxReplicas = new(int64) - } - yym206 := z.DecBinary() - _ = yym206 - if false { - } else { - *((*int64)(x.MaxReplicas)) = int64(r.DecodeInt(64)) - } - } - yyj203++ - if yyhl203 { - yyb203 = yyj203 > l - } else { - yyb203 = r.CheckBreak() - } - if yyb203 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - x.Weight = int64(r.DecodeInt(64)) - } - for { - yyj203++ - if yyhl203 { - yyb203 = yyj203 > l - } else { - yyb203 = r.CheckBreak() - } - if yyb203 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj203-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceServerAddressByClientCIDR(v []ServerAddressByClientCIDR, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv208 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy209 := &yyv208 - yy209.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceServerAddressByClientCIDR(v *[]ServerAddressByClientCIDR, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv210 := *v - yyh210, yyl210 := z.DecSliceHelperStart() - var yyc210 bool - if yyl210 == 0 { - if yyv210 == nil { - yyv210 = []ServerAddressByClientCIDR{} - yyc210 = true - } else if len(yyv210) != 0 { - yyv210 = yyv210[:0] - yyc210 = true - } - } else if yyl210 > 0 { - var yyrr210, yyrl210 int - var yyrt210 bool - if yyl210 > cap(yyv210) { - - yyrg210 := len(yyv210) > 0 - yyv2210 := yyv210 - yyrl210, yyrt210 = z.DecInferLen(yyl210, z.DecBasicHandle().MaxInitLen, 32) - if yyrt210 { - if yyrl210 <= cap(yyv210) { - yyv210 = yyv210[:yyrl210] - } else { - yyv210 = make([]ServerAddressByClientCIDR, yyrl210) - } - } else { - yyv210 = make([]ServerAddressByClientCIDR, yyrl210) - } - yyc210 = true - yyrr210 = len(yyv210) - if yyrg210 { - copy(yyv210, yyv2210) - } - } else if yyl210 != len(yyv210) { - yyv210 = yyv210[:yyl210] - yyc210 = true - } - yyj210 := 0 - for ; yyj210 < yyrr210; yyj210++ { - yyh210.ElemContainerState(yyj210) - if r.TryDecodeAsNil() { - yyv210[yyj210] = ServerAddressByClientCIDR{} - } else { - yyv211 := &yyv210[yyj210] - yyv211.CodecDecodeSelf(d) - } - - } - if yyrt210 { - for ; yyj210 < yyl210; yyj210++ { - yyv210 = append(yyv210, ServerAddressByClientCIDR{}) - yyh210.ElemContainerState(yyj210) - if r.TryDecodeAsNil() { - yyv210[yyj210] = ServerAddressByClientCIDR{} - } else { - yyv212 := &yyv210[yyj210] - yyv212.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj210 := 0 - for ; !r.CheckBreak(); yyj210++ { - - if yyj210 >= len(yyv210) { - yyv210 = append(yyv210, ServerAddressByClientCIDR{}) // var yyz210 ServerAddressByClientCIDR - yyc210 = true - } - yyh210.ElemContainerState(yyj210) - if yyj210 < len(yyv210) { - if r.TryDecodeAsNil() { - yyv210[yyj210] = ServerAddressByClientCIDR{} - } else { - yyv213 := &yyv210[yyj210] - yyv213.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj210 < len(yyv210) { - yyv210 = yyv210[:yyj210] - yyc210 = true - } else if yyj210 == 0 && yyv210 == nil { - yyv210 = []ServerAddressByClientCIDR{} - yyc210 = true - } - } - yyh210.End() - if yyc210 { - *v = yyv210 - } -} - -func (x codecSelfer1234) encSliceClusterCondition(v []ClusterCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv214 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy215 := &yyv214 - yy215.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceClusterCondition(v *[]ClusterCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv216 := *v - yyh216, yyl216 := z.DecSliceHelperStart() - var yyc216 bool - if yyl216 == 0 { - if yyv216 == nil { - yyv216 = []ClusterCondition{} - yyc216 = true - } else if len(yyv216) != 0 { - yyv216 = yyv216[:0] - yyc216 = true - } - } else if yyl216 > 0 { - var yyrr216, yyrl216 int - var yyrt216 bool - if yyl216 > cap(yyv216) { - - yyrg216 := len(yyv216) > 0 - yyv2216 := yyv216 - yyrl216, yyrt216 = z.DecInferLen(yyl216, z.DecBasicHandle().MaxInitLen, 112) - if yyrt216 { - if yyrl216 <= cap(yyv216) { - yyv216 = yyv216[:yyrl216] - } else { - yyv216 = make([]ClusterCondition, yyrl216) - } - } else { - yyv216 = make([]ClusterCondition, yyrl216) - } - yyc216 = true - yyrr216 = len(yyv216) - if yyrg216 { - copy(yyv216, yyv2216) - } - } else if yyl216 != len(yyv216) { - yyv216 = yyv216[:yyl216] - yyc216 = true - } - yyj216 := 0 - for ; yyj216 < yyrr216; yyj216++ { - yyh216.ElemContainerState(yyj216) - if r.TryDecodeAsNil() { - yyv216[yyj216] = ClusterCondition{} - } else { - yyv217 := &yyv216[yyj216] - yyv217.CodecDecodeSelf(d) - } - - } - if yyrt216 { - for ; yyj216 < yyl216; yyj216++ { - yyv216 = append(yyv216, ClusterCondition{}) - yyh216.ElemContainerState(yyj216) - if r.TryDecodeAsNil() { - yyv216[yyj216] = ClusterCondition{} - } else { - yyv218 := &yyv216[yyj216] - yyv218.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj216 := 0 - for ; !r.CheckBreak(); yyj216++ { - - if yyj216 >= len(yyv216) { - yyv216 = append(yyv216, ClusterCondition{}) // var yyz216 ClusterCondition - yyc216 = true - } - yyh216.ElemContainerState(yyj216) - if yyj216 < len(yyv216) { - if r.TryDecodeAsNil() { - yyv216[yyj216] = ClusterCondition{} - } else { - yyv219 := &yyv216[yyj216] - yyv219.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj216 < len(yyv216) { - yyv216 = yyv216[:yyj216] - yyc216 = true - } else if yyj216 == 0 && yyv216 == nil { - yyv216 = []ClusterCondition{} - yyc216 = true - } - } - yyh216.End() - if yyc216 { - *v = yyv216 - } -} - -func (x codecSelfer1234) encSliceCluster(v []Cluster, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv220 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy221 := &yyv220 - yy221.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCluster(v *[]Cluster, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv222 := *v - yyh222, yyl222 := z.DecSliceHelperStart() - var yyc222 bool - if yyl222 == 0 { - if yyv222 == nil { - yyv222 = []Cluster{} - yyc222 = true - } else if len(yyv222) != 0 { - yyv222 = yyv222[:0] - yyc222 = true - } - } else if yyl222 > 0 { - var yyrr222, yyrl222 int - var yyrt222 bool - if yyl222 > cap(yyv222) { - - yyrg222 := len(yyv222) > 0 - yyv2222 := yyv222 - yyrl222, yyrt222 = z.DecInferLen(yyl222, z.DecBasicHandle().MaxInitLen, 352) - if yyrt222 { - if yyrl222 <= cap(yyv222) { - yyv222 = yyv222[:yyrl222] - } else { - yyv222 = make([]Cluster, yyrl222) - } - } else { - yyv222 = make([]Cluster, yyrl222) - } - yyc222 = true - yyrr222 = len(yyv222) - if yyrg222 { - copy(yyv222, yyv2222) - } - } else if yyl222 != len(yyv222) { - yyv222 = yyv222[:yyl222] - yyc222 = true - } - yyj222 := 0 - for ; yyj222 < yyrr222; yyj222++ { - yyh222.ElemContainerState(yyj222) - if r.TryDecodeAsNil() { - yyv222[yyj222] = Cluster{} - } else { - yyv223 := &yyv222[yyj222] - yyv223.CodecDecodeSelf(d) - } - - } - if yyrt222 { - for ; yyj222 < yyl222; yyj222++ { - yyv222 = append(yyv222, Cluster{}) - yyh222.ElemContainerState(yyj222) - if r.TryDecodeAsNil() { - yyv222[yyj222] = Cluster{} - } else { - yyv224 := &yyv222[yyj222] - yyv224.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj222 := 0 - for ; !r.CheckBreak(); yyj222++ { - - if yyj222 >= len(yyv222) { - yyv222 = append(yyv222, Cluster{}) // var yyz222 Cluster - yyc222 = true - } - yyh222.ElemContainerState(yyj222) - if yyj222 < len(yyv222) { - if r.TryDecodeAsNil() { - yyv222[yyj222] = Cluster{} - } else { - yyv225 := &yyv222[yyj222] - yyv225.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj222 < len(yyv222) { - yyv222 = yyv222[:yyj222] - yyc222 = true - } else if yyj222 == 0 && yyv222 == nil { - yyv222 = []Cluster{} - yyc222 = true - } - } - yyh222.End() - if yyc222 { - *v = yyv222 - } -} - -func (x codecSelfer1234) encMapstringClusterReplicaSetPreferences(v map[string]ClusterReplicaSetPreferences, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk226, yyv226 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym227 := z.EncBinary() - _ = yym227 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk226)) - } - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy228 := &yyv226 - yy228.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decMapstringClusterReplicaSetPreferences(v *map[string]ClusterReplicaSetPreferences, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv229 := *v - yyl229 := r.ReadMapStart() - yybh229 := z.DecBasicHandle() - if yyv229 == nil { - yyrl229, _ := z.DecInferLen(yyl229, yybh229.MaxInitLen, 40) - yyv229 = make(map[string]ClusterReplicaSetPreferences, yyrl229) - *v = yyv229 - } - var yymk229 string - var yymv229 ClusterReplicaSetPreferences - var yymg229 bool - if yybh229.MapValueReset { - yymg229 = true - } - if yyl229 > 0 { - for yyj229 := 0; yyj229 < yyl229; yyj229++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk229 = "" - } else { - yymk229 = string(r.DecodeString()) - } - - if yymg229 { - yymv229 = yyv229[yymk229] - } else { - yymv229 = ClusterReplicaSetPreferences{} - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv229 = ClusterReplicaSetPreferences{} - } else { - yyv231 := &yymv229 - yyv231.CodecDecodeSelf(d) - } - - if yyv229 != nil { - yyv229[yymk229] = yymv229 - } - } - } else if yyl229 < 0 { - for yyj229 := 0; !r.CheckBreak(); yyj229++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk229 = "" - } else { - yymk229 = string(r.DecodeString()) - } - - if yymg229 { - yymv229 = yyv229[yymk229] - } else { - yymv229 = ClusterReplicaSetPreferences{} - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv229 = ClusterReplicaSetPreferences{} - } else { - yyv233 := &yymv229 - yyv233.CodecDecodeSelf(d) - } - - if yyv229 != nil { - yyv229[yymk229] = yymv229 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/types.go b/vendor/k8s.io/kubernetes/federation/apis/federation/types.go deleted file mode 100644 index ebd44bc102..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/types.go +++ /dev/null @@ -1,155 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package federation - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -type ServerAddressByClientCIDR struct { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"` - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"` -} - -// ClusterSpec describes the attributes of a kubernetes cluster. -type ClusterSpec struct { - // A map of client CIDR to server address. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" patchStrategy:"merge" patchMergeKey:"clientCIDR"` - // Name of the secret containing kubeconfig to access this cluster. - // The secret is read from the kubernetes cluster that is hosting federation control plane. - // Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key "kubeconfig". - // This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. - // This can be left empty if the cluster allows insecure access. - // +optional - SecretRef *api.LocalObjectReference `json:"secretRef,omitempty"` -} - -type ClusterConditionType string - -// These are valid conditions of a cluster. -const ( - // ClusterReady means the cluster is ready to accept workloads. - ClusterReady ClusterConditionType = "Ready" - // ClusterOffline means the cluster is temporarily down or not reachable - ClusterOffline ClusterConditionType = "Offline" -) - -// ClusterCondition describes current state of a cluster. -type ClusterCondition struct { - // Type of cluster condition, Complete or Failed. - Type ClusterConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status api.ConditionStatus `json:"status"` - // Last time the condition was checked. - // +optional - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty"` - // Last time the condition transit from one status to another. - // +optional - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` - // (brief) reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // Human readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty"` -} - -// ClusterStatus is information about the current status of a cluster updated by cluster controller peridocally. -type ClusterStatus struct { - // Conditions is an array of current cluster conditions. - // +optional - Conditions []ClusterCondition `json:"conditions,omitempty"` - // Zones is the list of availability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. - // These will always be in the same region. - // +optional - Zones []string `json:"zones,omitempty"` - // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. - // +optional - Region string `json:"region,omitempty"` -} - -// +genclient=true -// +nonNamespaced=true - -// Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation. -type Cluster struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - api.ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the behavior of the Cluster. - // +optional - Spec ClusterSpec `json:"spec,omitempty"` - // Status describes the current status of a Cluster - // +optional - Status ClusterStatus `json:"status,omitempty"` -} - -// A list of all the kubernetes clusters registered to the federation -type ClusterList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - // +optional - unversioned.ListMeta `json:"metadata,omitempty"` - - // List of Cluster objects. - Items []Cluster `json:"items"` -} - -// Temporary/alpha structures to support custom replica assignments within FederatedReplicaSet. - -// A set of preferences that can be added to federated version of ReplicaSet as a json-serialized annotation. -// The preferences allow the user to express in which culsters he wants to put his replicas within the -// mentiond FederatedReplicaSet. -type FederatedReplicaSetPreferences struct { - // If set to true then already scheduled and running replicas may be moved to other clusters to - // in order to bring cluster replicasets towards a desired state. Otherwise, if set to false, - // up and running replicas will not be moved. - // +optional - Rebalance bool `json:"rebalance,omitempty"` - - // A mapping between cluster names and preferences regarding local ReplicaSet in these clusters. - // "*" (if provided) applies to all clusters if an explicit mapping is not provided. If there is no - // "*" that clusters without explicit preferences should not have any replicas scheduled. - // +optional - Clusters map[string]ClusterReplicaSetPreferences `json:"clusters,omitempty"` -} - -// Preferences regarding number of replicas assigned to a cluster replicaset within a federated replicaset. -type ClusterReplicaSetPreferences struct { - // Minimum number of replicas that should be assigned to this Local ReplicaSet. 0 by default. - // +optional - MinReplicas int64 `json:"minReplicas,omitempty"` - - // Maximum number of replicas that should be assigned to this Local ReplicaSet. Unbounded if no value provided (default). - // +optional - MaxReplicas *int64 `json:"maxReplicas,omitempty"` - - // A number expressing the preference to put an additional replica to this LocalReplicaSet. 0 by default. - Weight int64 -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/BUILD b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/BUILD deleted file mode 100644 index 81d64b75af..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/BUILD +++ /dev/null @@ -1,40 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - ], - tags = ["automanaged"], - deps = [ - "//federation/apis/federation:go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//pkg/watch/versioned:go_default_library", - "//vendor:github.com/gogo/protobuf/proto", - "//vendor:github.com/ugorji/go/codec", - ], -) diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/conversion.go deleted file mode 100644 index 5881109c6a..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/conversion.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/runtime" -) - -func addConversionFuncs(scheme *runtime.Scheme) error { - return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.String(), "Cluster", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }, - ) -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/defaults.go deleted file mode 100644 index 5704712048..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/defaults.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/kubernetes/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return nil -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/doc.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/doc.go deleted file mode 100644 index 2110288a0c..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=k8s.io/kubernetes/federation/apis/federation -// +k8s:openapi-gen=true -// +k8s:defaulter-gen=TypeMeta -package v1beta1 diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.pb.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.pb.go deleted file mode 100644 index 0c275262ca..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.pb.go +++ /dev/null @@ -1,1541 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto - - It has these top-level messages: - Cluster - ClusterCondition - ClusterList - ClusterSpec - ClusterStatus - ServerAddressByClientCIDR -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 - -func (m *Cluster) Reset() { *m = Cluster{} } -func (*Cluster) ProtoMessage() {} -func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *ClusterCondition) Reset() { *m = ClusterCondition{} } -func (*ClusterCondition) ProtoMessage() {} -func (*ClusterCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *ClusterList) Reset() { *m = ClusterList{} } -func (*ClusterList) ProtoMessage() {} -func (*ClusterList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *ClusterSpec) Reset() { *m = ClusterSpec{} } -func (*ClusterSpec) ProtoMessage() {} -func (*ClusterSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *ClusterStatus) Reset() { *m = ClusterStatus{} } -func (*ClusterStatus) ProtoMessage() {} -func (*ClusterStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } -func (*ServerAddressByClientCIDR) ProtoMessage() {} -func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} -} - -func init() { - proto.RegisterType((*Cluster)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.Cluster") - proto.RegisterType((*ClusterCondition)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ClusterCondition") - proto.RegisterType((*ClusterList)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ClusterList") - proto.RegisterType((*ClusterSpec)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ClusterSpec") - proto.RegisterType((*ClusterStatus)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ClusterStatus") - proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ServerAddressByClientCIDR") -} -func (m *Cluster) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Cluster) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *ClusterCondition) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ClusterCondition) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) - n4, err := m.LastProbeTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n4 - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n5, err := m.LastTransitionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n5 - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - return i, nil -} - -func (m *ClusterList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ClusterList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ClusterSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ClusterSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.SecretRef != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) - n7, err := m.SecretRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n7 - } - return i, nil -} - -func (m *ClusterStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ClusterStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Zones) > 0 { - for _, s := range m.Zones { - data[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Region))) - i += copy(data[i:], m.Region) - return i, nil -} - -func (m *ServerAddressByClientCIDR) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ServerAddressByClientCIDR) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ClientCIDR))) - i += copy(data[i:], m.ClientCIDR) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ServerAddress))) - i += copy(data[i:], m.ServerAddress) - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *Cluster) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ClusterCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ClusterList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ClusterSpec) Size() (n int) { - var l int - _ = l - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, e := range m.ServerAddressByClientCIDRs { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ClusterStatus) Size() (n int) { - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Zones) > 0 { - for _, s := range m.Zones { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Region) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServerAddressByClientCIDR) Size() (n int) { - var l int - _ = l - l = len(m.ClientCIDR) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ServerAddress) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Cluster) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Cluster{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterSpec", "ClusterSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ClusterStatus", "ClusterStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Cluster", "Cluster", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterSpec{`, - `ServerAddressByClientCIDRs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServerAddressByClientCIDRs), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "k8s_io_kubernetes_pkg_api_v1.LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterStatus{`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ClusterCondition", "ClusterCondition", 1), `&`, ``, 1) + `,`, - `Zones:` + fmt.Sprintf("%v", this.Zones) + `,`, - `Region:` + fmt.Sprintf("%v", this.Region) + `,`, - `}`, - }, "") - return s -} -func (this *ServerAddressByClientCIDR) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServerAddressByClientCIDR{`, - `ClientCIDR:` + fmt.Sprintf("%v", this.ClientCIDR) + `,`, - `ServerAddress:` + fmt.Sprintf("%v", this.ServerAddress) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Cluster) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Cluster: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Cluster: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterCondition) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = ClusterConditionType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Cluster{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) - if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &k8s_io_kubernetes_pkg_api_v1.LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, ClusterCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Zones = append(m.Zones, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Region = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServerAddressByClientCIDR) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServerAddressByClientCIDR: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServerAddressByClientCIDR: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientCIDR", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientCIDR = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerAddress = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -var fileDescriptorGenerated = []byte{ - // 787 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x54, 0xdd, 0x6a, 0xe3, 0x46, - 0x14, 0xb6, 0xfc, 0x1b, 0x4f, 0xea, 0x36, 0x0c, 0x2d, 0xb8, 0xbe, 0x90, 0x83, 0x29, 0xc5, 0xa1, - 0x8d, 0x84, 0x4d, 0x0b, 0x81, 0xd2, 0x42, 0xe4, 0x50, 0x08, 0x38, 0xa4, 0x4c, 0x42, 0x29, 0x81, - 0x52, 0x64, 0xf9, 0x58, 0x51, 0x6d, 0x4b, 0x62, 0x66, 0xe4, 0x92, 0x5c, 0xf5, 0x01, 0x7a, 0xd1, - 0x87, 0xd8, 0x37, 0x58, 0xf6, 0x1d, 0x72, 0x99, 0x8b, 0xbd, 0x58, 0xf6, 0xc2, 0x6c, 0xbc, 0x6f, - 0x91, 0xab, 0x65, 0x46, 0x63, 0xd9, 0x8a, 0x63, 0xb3, 0x9b, 0xdc, 0xe9, 0x1c, 0x9d, 0xf3, 0x7d, - 0xdf, 0x9c, 0x3f, 0x74, 0x34, 0x3c, 0x60, 0x86, 0x17, 0x98, 0xc3, 0xa8, 0x07, 0xd4, 0x07, 0x0e, - 0xcc, 0x1c, 0x40, 0x1f, 0xa8, 0xcd, 0xbd, 0xc0, 0x37, 0xed, 0xd0, 0x4b, 0xd9, 0x93, 0x56, 0x0f, - 0xb8, 0xdd, 0x32, 0x5d, 0xf0, 0x85, 0x0b, 0xfa, 0x46, 0x48, 0x03, 0x1e, 0xe0, 0x1f, 0x62, 0x14, - 0x63, 0x81, 0x62, 0x2c, 0xb2, 0x0c, 0x81, 0xb2, 0x6c, 0x2b, 0x94, 0xda, 0xbe, 0xeb, 0xf1, 0xcb, - 0xa8, 0x67, 0x38, 0xc1, 0xd8, 0x74, 0x03, 0x37, 0x30, 0x25, 0x58, 0x2f, 0x1a, 0x48, 0x4b, 0x1a, - 0xf2, 0x2b, 0x26, 0xa9, 0xb5, 0x57, 0xa5, 0x86, 0x43, 0x57, 0x68, 0x34, 0x29, 0xb0, 0x20, 0xa2, - 0x0e, 0x3c, 0x14, 0x56, 0xfb, 0x71, 0x7d, 0x4e, 0xe4, 0x4f, 0x80, 0x32, 0x2f, 0xf0, 0xa1, 0xbf, - 0x92, 0xf6, 0xfd, 0xfa, 0xb4, 0xc9, 0xca, 0xeb, 0x6b, 0xfb, 0x8f, 0x47, 0xd3, 0xc8, 0xe7, 0xde, - 0x78, 0x55, 0x53, 0xeb, 0xf1, 0xf0, 0x88, 0x7b, 0x23, 0xd3, 0xf3, 0x39, 0xe3, 0xf4, 0x61, 0x4a, - 0xe3, 0x55, 0x16, 0x95, 0x3a, 0xa3, 0x88, 0x71, 0xa0, 0xf8, 0x0f, 0xb4, 0x35, 0x06, 0x6e, 0xf7, - 0x6d, 0x6e, 0x57, 0xb5, 0x5d, 0xad, 0xb9, 0xdd, 0x6e, 0x1a, 0xab, 0xe5, 0x0f, 0x87, 0xae, 0xa8, - 0xbb, 0x31, 0x69, 0x19, 0xa7, 0xbd, 0xbf, 0xc1, 0xe1, 0x27, 0xc0, 0x6d, 0x0b, 0xdf, 0x4c, 0xeb, - 0x99, 0xd9, 0xb4, 0x8e, 0x16, 0x3e, 0x92, 0xa0, 0x61, 0x07, 0xe5, 0x59, 0x08, 0x4e, 0x35, 0x2b, - 0x51, 0x0f, 0x8d, 0xa7, 0x34, 0xd5, 0x50, 0x32, 0xcf, 0x42, 0x70, 0xac, 0xcf, 0x14, 0x5d, 0x5e, - 0x58, 0x44, 0x82, 0xe3, 0x21, 0x2a, 0x32, 0x6e, 0xf3, 0x88, 0x55, 0x73, 0x92, 0xa6, 0xf3, 0x3c, - 0x1a, 0x09, 0x65, 0x7d, 0xae, 0x88, 0x8a, 0xb1, 0x4d, 0x14, 0x45, 0xe3, 0x6d, 0x0e, 0xed, 0xa8, - 0xc8, 0x4e, 0xe0, 0xf7, 0x3d, 0x01, 0x81, 0x0f, 0x50, 0x9e, 0x5f, 0x85, 0x20, 0x8b, 0x57, 0xb6, - 0xbe, 0x99, 0x6b, 0x3c, 0xbf, 0x0a, 0xe1, 0x7e, 0x5a, 0xff, 0xf2, 0x61, 0xbc, 0xf0, 0x13, 0x99, - 0x81, 0x7f, 0x4f, 0xb4, 0x67, 0x65, 0xee, 0x2f, 0x69, 0xda, 0xfb, 0x69, 0x7d, 0xe3, 0xe0, 0x18, - 0x09, 0x66, 0x5a, 0x26, 0xbe, 0x44, 0x95, 0x91, 0xcd, 0xf8, 0x6f, 0x34, 0xe8, 0xc1, 0xb9, 0x37, - 0x06, 0x55, 0x9a, 0xef, 0x36, 0xf4, 0x75, 0x69, 0x7a, 0x0d, 0x91, 0x62, 0x7d, 0xa5, 0xb4, 0x54, - 0xba, 0xcb, 0x48, 0x24, 0x0d, 0x8c, 0xff, 0x41, 0x58, 0x38, 0xce, 0xa9, 0xed, 0xb3, 0xf8, 0x75, - 0x82, 0x2e, 0xff, 0xe9, 0x74, 0x35, 0x45, 0x87, 0xbb, 0x2b, 0x70, 0xe4, 0x11, 0x0a, 0xfc, 0x2d, - 0x2a, 0x52, 0xb0, 0x59, 0xe0, 0x57, 0x0b, 0xb2, 0x74, 0x49, 0xc7, 0x88, 0xf4, 0x12, 0xf5, 0x17, - 0xef, 0xa1, 0xd2, 0x18, 0x18, 0xb3, 0x5d, 0xa8, 0x16, 0x65, 0xe0, 0x17, 0x2a, 0xb0, 0x74, 0x12, - 0xbb, 0xc9, 0xfc, 0x7f, 0xe3, 0x56, 0x43, 0xdb, 0xaa, 0x59, 0x5d, 0x8f, 0x71, 0xfc, 0xe7, 0xca, - 0x62, 0x98, 0x1f, 0xf9, 0x22, 0x91, 0x2e, 0xf7, 0x63, 0x47, 0x91, 0x6d, 0xcd, 0x3d, 0x4b, 0xdb, - 0xd1, 0x43, 0x05, 0x8f, 0xc3, 0x58, 0xf4, 0x3e, 0xd7, 0xdc, 0x6e, 0xff, 0xfc, 0xac, 0xb9, 0xb5, - 0x2a, 0x8a, 0xa9, 0x70, 0x2c, 0x30, 0x49, 0x0c, 0xdd, 0x78, 0x91, 0x4d, 0x9e, 0x24, 0x56, 0x06, - 0xbf, 0xd4, 0x50, 0x8d, 0x01, 0x9d, 0x00, 0x3d, 0xec, 0xf7, 0x29, 0x30, 0x66, 0x5d, 0x75, 0x46, - 0x1e, 0xf8, 0xbc, 0x73, 0x7c, 0x44, 0x58, 0x55, 0x93, 0x4a, 0x4e, 0x9f, 0xa6, 0xe4, 0x6c, 0x1d, - 0xae, 0xd5, 0x50, 0xda, 0x6a, 0x6b, 0x43, 0x18, 0xd9, 0x20, 0x0b, 0xff, 0x85, 0xca, 0x0c, 0x1c, - 0x0a, 0x9c, 0xc0, 0x40, 0x1d, 0x93, 0xf6, 0xe6, 0x13, 0xd5, 0x0d, 0x1c, 0x7b, 0x14, 0xdf, 0x24, - 0x02, 0x03, 0xa0, 0xe0, 0x3b, 0x60, 0x55, 0x66, 0xd3, 0x7a, 0xf9, 0x6c, 0x0e, 0x44, 0x16, 0x98, - 0x8d, 0xd7, 0x1a, 0xaa, 0xa4, 0x0e, 0x00, 0xbe, 0x46, 0xc8, 0x99, 0x2f, 0xd7, 0xbc, 0x2e, 0xbf, - 0x3e, 0xab, 0x43, 0xc9, 0xae, 0x2e, 0x8e, 0x66, 0xe2, 0x62, 0x64, 0x89, 0x0d, 0xd7, 0x51, 0xe1, - 0x3a, 0xf0, 0x81, 0x55, 0x0b, 0xbb, 0xb9, 0x66, 0xd9, 0x2a, 0x8b, 0xae, 0x5e, 0x08, 0x07, 0x89, - 0xfd, 0xf1, 0xec, 0xbb, 0x5e, 0xe0, 0xab, 0x91, 0x5e, 0x9a, 0x7d, 0xe1, 0x25, 0xea, 0x6f, 0xe3, - 0x3f, 0x0d, 0x7d, 0xbd, 0xb6, 0xe4, 0xb8, 0x8d, 0x90, 0x93, 0x58, 0xea, 0x78, 0x2d, 0xa4, 0x25, - 0x7f, 0xc8, 0x52, 0x14, 0xfe, 0x09, 0x55, 0x52, 0x7d, 0x52, 0x77, 0x2b, 0xb9, 0x15, 0x29, 0x36, - 0x92, 0x8e, 0xb5, 0xf6, 0x6e, 0xee, 0xf4, 0xcc, 0xed, 0x9d, 0x9e, 0x79, 0x73, 0xa7, 0x67, 0xfe, - 0x9d, 0xe9, 0xda, 0xcd, 0x4c, 0xd7, 0x6e, 0x67, 0xba, 0xf6, 0x6e, 0xa6, 0x6b, 0xff, 0xbf, 0xd7, - 0x33, 0x17, 0x25, 0x55, 0xb3, 0x0f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x48, 0x5d, 0x6b, 0x0c, 0x46, - 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto deleted file mode 100644 index 9ad46d690f..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.federation.apis.federation.v1beta1; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation. -message Cluster { - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec defines the behavior of the Cluster. - // +optional - optional ClusterSpec spec = 2; - - // Status describes the current status of a Cluster - // +optional - optional ClusterStatus status = 3; -} - -// ClusterCondition describes current state of a cluster. -message ClusterCondition { - // Type of cluster condition, Complete or Failed. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition was checked. - // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; - - // Last time the condition transit from one status to another. - // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; - - // (brief) reason for the condition's last transition. - // +optional - optional string reason = 5; - - // Human readable message indicating details about last transition. - // +optional - optional string message = 6; -} - -// A list of all the kubernetes clusters registered to the federation -message ClusterList { - // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of Cluster objects. - repeated Cluster items = 2; -} - -// ClusterSpec describes the attributes of a kubernetes cluster. -message ClusterSpec { - // A map of client CIDR to server address. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 1; - - // Name of the secret containing kubeconfig to access this cluster. - // The secret is read from the kubernetes cluster that is hosting federation control plane. - // Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key "kubeconfig". - // This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. - // This can be left empty if the cluster allows insecure access. - // +optional - optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference secretRef = 2; -} - -// ClusterStatus is information about the current status of a cluster updated by cluster controller peridocally. -message ClusterStatus { - // Conditions is an array of current cluster conditions. - // +optional - repeated ClusterCondition conditions = 1; - - // Zones is the list of availability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. - // These will always be in the same region. - // +optional - repeated string zones = 5; - - // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. - // +optional - optional string region = 6; -} - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -message ServerAddressByClientCIDR { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - optional string clientCIDR = 1; - - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - optional string serverAddress = 2; -} - diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/register.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/register.go deleted file mode 100644 index 1e8405ae67..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/register.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" -) - -// GroupName is the group name use in this package -const GroupName = "federation" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) - AddToScheme = SchemeBuilder.AddToScheme -) - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Cluster{}, - &ClusterList{}, - &v1.ListOptions{}, - &v1.DeleteOptions{}, - &v1.ExportOptions{}, - ) - versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} - -func (obj *Cluster) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *ClusterList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types.generated.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types.generated.go deleted file mode 100644 index 488be61eec..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types.generated.go +++ /dev/null @@ -1,2334 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1beta1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg1_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg3_types "k8s.io/kubernetes/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_unversioned.Time - var v1 pkg1_v1.LocalObjectReference - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 - } -} - -func (x *ServerAddressByClientCIDR) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClientCIDR)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clientCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClientCIDR)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServerAddress)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serverAddress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServerAddress)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServerAddressByClientCIDR) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct10 := r.ContainerType() - if yyct10 == codecSelferValueTypeMap1234 { - yyl10 := r.ReadMapStart() - if yyl10 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl10, d) - } - } else if yyct10 == codecSelferValueTypeArray1234 { - yyl10 := r.ReadArrayStart() - if yyl10 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl10, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServerAddressByClientCIDR) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys11Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys11Slc - var yyhl11 bool = l >= 0 - for yyj11 := 0; ; yyj11++ { - if yyhl11 { - if yyj11 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys11Slc = r.DecodeBytes(yys11Slc, true, true) - yys11 := string(yys11Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys11 { - case "clientCIDR": - if r.TryDecodeAsNil() { - x.ClientCIDR = "" - } else { - x.ClientCIDR = string(r.DecodeString()) - } - case "serverAddress": - if r.TryDecodeAsNil() { - x.ServerAddress = "" - } else { - x.ServerAddress = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys11) - } // end switch yys11 - } // end for yyj11 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServerAddressByClientCIDR) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClientCIDR = "" - } else { - x.ClientCIDR = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServerAddress = "" - } else { - x.ServerAddress = string(r.DecodeString()) - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep18 := !z.EncBinary() - yy2arr18 := z.EncBasicHandle().StructToArray - var yyq18 [2]bool - _, _, _ = yysep18, yyq18, yy2arr18 - const yyr18 bool = false - yyq18[1] = x.SecretRef != nil - var yynn18 int - if yyr18 || yy2arr18 { - r.EncodeArrayStart(2) - } else { - yynn18 = 1 - for _, b := range yyq18 { - if b { - yynn18++ - } - } - r.EncodeMapStart(yynn18) - yynn18 = 0 - } - if yyr18 || yy2arr18 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.ServerAddressByClientCIDRs == nil { - r.EncodeNil() - } else { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - h.encSliceServerAddressByClientCIDR(([]ServerAddressByClientCIDR)(x.ServerAddressByClientCIDRs), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serverAddressByClientCIDRs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ServerAddressByClientCIDRs == nil { - r.EncodeNil() - } else { - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - h.encSliceServerAddressByClientCIDR(([]ServerAddressByClientCIDR)(x.ServerAddressByClientCIDRs), e) - } - } - } - if yyr18 || yy2arr18 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq18[1] { - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq18[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - } - if yyr18 || yy2arr18 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym23 := z.DecBinary() - _ = yym23 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct24 := r.ContainerType() - if yyct24 == codecSelferValueTypeMap1234 { - yyl24 := r.ReadMapStart() - if yyl24 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl24, d) - } - } else if yyct24 == codecSelferValueTypeArray1234 { - yyl24 := r.ReadArrayStart() - if yyl24 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl24, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys25Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys25Slc - var yyhl25 bool = l >= 0 - for yyj25 := 0; ; yyj25++ { - if yyhl25 { - if yyj25 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys25Slc = r.DecodeBytes(yys25Slc, true, true) - yys25 := string(yys25Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys25 { - case "serverAddressByClientCIDRs": - if r.TryDecodeAsNil() { - x.ServerAddressByClientCIDRs = nil - } else { - yyv26 := &x.ServerAddressByClientCIDRs - yym27 := z.DecBinary() - _ = yym27 - if false { - } else { - h.decSliceServerAddressByClientCIDR((*[]ServerAddressByClientCIDR)(yyv26), d) - } - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(pkg1_v1.LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys25) - } // end switch yys25 - } // end for yyj25 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj29 int - var yyb29 bool - var yyhl29 bool = l >= 0 - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServerAddressByClientCIDRs = nil - } else { - yyv30 := &x.ServerAddressByClientCIDRs - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - h.decSliceServerAddressByClientCIDR((*[]ServerAddressByClientCIDR)(yyv30), d) - } - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(pkg1_v1.LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - for { - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj29-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ClusterConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym33 := z.EncBinary() - _ = yym33 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ClusterConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym34 := z.DecBinary() - _ = yym34 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ClusterCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym35 := z.EncBinary() - _ = yym35 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep36 := !z.EncBinary() - yy2arr36 := z.EncBasicHandle().StructToArray - var yyq36 [6]bool - _, _, _ = yysep36, yyq36, yy2arr36 - const yyr36 bool = false - yyq36[2] = true - yyq36[3] = true - yyq36[4] = x.Reason != "" - yyq36[5] = x.Message != "" - var yynn36 int - if yyr36 || yy2arr36 { - r.EncodeArrayStart(6) - } else { - yynn36 = 2 - for _, b := range yyq36 { - if b { - yynn36++ - } - } - r.EncodeMapStart(yynn36) - yynn36 = 0 - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym39 := z.EncBinary() - _ = yym39 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym40 := z.EncBinary() - _ = yym40 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[2] { - yy42 := &x.LastProbeTime - yym43 := z.EncBinary() - _ = yym43 - if false { - } else if z.HasExtensions() && z.EncExt(yy42) { - } else if yym43 { - z.EncBinaryMarshal(yy42) - } else if !yym43 && z.IsJSONHandle() { - z.EncJSONMarshal(yy42) - } else { - z.EncFallback(yy42) - } - } else { - r.EncodeNil() - } - } else { - if yyq36[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy44 := &x.LastProbeTime - yym45 := z.EncBinary() - _ = yym45 - if false { - } else if z.HasExtensions() && z.EncExt(yy44) { - } else if yym45 { - z.EncBinaryMarshal(yy44) - } else if !yym45 && z.IsJSONHandle() { - z.EncJSONMarshal(yy44) - } else { - z.EncFallback(yy44) - } - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[3] { - yy47 := &x.LastTransitionTime - yym48 := z.EncBinary() - _ = yym48 - if false { - } else if z.HasExtensions() && z.EncExt(yy47) { - } else if yym48 { - z.EncBinaryMarshal(yy47) - } else if !yym48 && z.IsJSONHandle() { - z.EncJSONMarshal(yy47) - } else { - z.EncFallback(yy47) - } - } else { - r.EncodeNil() - } - } else { - if yyq36[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy49 := &x.LastTransitionTime - yym50 := z.EncBinary() - _ = yym50 - if false { - } else if z.HasExtensions() && z.EncExt(yy49) { - } else if yym50 { - z.EncBinaryMarshal(yy49) - } else if !yym50 && z.IsJSONHandle() { - z.EncJSONMarshal(yy49) - } else { - z.EncFallback(yy49) - } - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[4] { - yym52 := z.EncBinary() - _ = yym52 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq36[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym53 := z.EncBinary() - _ = yym53 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[5] { - yym55 := z.EncBinary() - _ = yym55 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq36[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym56 := z.EncBinary() - _ = yym56 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym57 := z.DecBinary() - _ = yym57 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct58 := r.ContainerType() - if yyct58 == codecSelferValueTypeMap1234 { - yyl58 := r.ReadMapStart() - if yyl58 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl58, d) - } - } else if yyct58 == codecSelferValueTypeArray1234 { - yyl58 := r.ReadArrayStart() - if yyl58 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl58, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys59Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys59Slc - var yyhl59 bool = l >= 0 - for yyj59 := 0; ; yyj59++ { - if yyhl59 { - if yyj59 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys59Slc = r.DecodeBytes(yys59Slc, true, true) - yys59 := string(yys59Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys59 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ClusterConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg1_v1.ConditionStatus(r.DecodeString()) - } - case "lastProbeTime": - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} - } else { - yyv62 := &x.LastProbeTime - yym63 := z.DecBinary() - _ = yym63 - if false { - } else if z.HasExtensions() && z.DecExt(yyv62) { - } else if yym63 { - z.DecBinaryUnmarshal(yyv62) - } else if !yym63 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv62) - } else { - z.DecFallback(yyv62, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv64 := &x.LastTransitionTime - yym65 := z.DecBinary() - _ = yym65 - if false { - } else if z.HasExtensions() && z.DecExt(yyv64) { - } else if yym65 { - z.DecBinaryUnmarshal(yyv64) - } else if !yym65 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv64) - } else { - z.DecFallback(yyv64, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys59) - } // end switch yys59 - } // end for yyj59 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj68 int - var yyb68 bool - var yyhl68 bool = l >= 0 - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ClusterConditionType(r.DecodeString()) - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg1_v1.ConditionStatus(r.DecodeString()) - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} - } else { - yyv71 := &x.LastProbeTime - yym72 := z.DecBinary() - _ = yym72 - if false { - } else if z.HasExtensions() && z.DecExt(yyv71) { - } else if yym72 { - z.DecBinaryUnmarshal(yyv71) - } else if !yym72 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv71) - } else { - z.DecFallback(yyv71, false) - } - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv73 := &x.LastTransitionTime - yym74 := z.DecBinary() - _ = yym74 - if false { - } else if z.HasExtensions() && z.DecExt(yyv73) { - } else if yym74 { - z.DecBinaryUnmarshal(yyv73) - } else if !yym74 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv73) - } else { - z.DecFallback(yyv73, false) - } - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj68-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym77 := z.EncBinary() - _ = yym77 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep78 := !z.EncBinary() - yy2arr78 := z.EncBasicHandle().StructToArray - var yyq78 [3]bool - _, _, _ = yysep78, yyq78, yy2arr78 - const yyr78 bool = false - yyq78[0] = len(x.Conditions) != 0 - yyq78[1] = len(x.Zones) != 0 - yyq78[2] = x.Region != "" - var yynn78 int - if yyr78 || yy2arr78 { - r.EncodeArrayStart(3) - } else { - yynn78 = 0 - for _, b := range yyq78 { - if b { - yynn78++ - } - } - r.EncodeMapStart(yynn78) - yynn78 = 0 - } - if yyr78 || yy2arr78 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq78[0] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym80 := z.EncBinary() - _ = yym80 - if false { - } else { - h.encSliceClusterCondition(([]ClusterCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq78[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym81 := z.EncBinary() - _ = yym81 - if false { - } else { - h.encSliceClusterCondition(([]ClusterCondition)(x.Conditions), e) - } - } - } - } - if yyr78 || yy2arr78 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq78[1] { - if x.Zones == nil { - r.EncodeNil() - } else { - yym83 := z.EncBinary() - _ = yym83 - if false { - } else { - z.F.EncSliceStringV(x.Zones, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq78[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("zones")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Zones == nil { - r.EncodeNil() - } else { - yym84 := z.EncBinary() - _ = yym84 - if false { - } else { - z.F.EncSliceStringV(x.Zones, false, e) - } - } - } - } - if yyr78 || yy2arr78 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq78[2] { - yym86 := z.EncBinary() - _ = yym86 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Region)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq78[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("region")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym87 := z.EncBinary() - _ = yym87 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Region)) - } - } - } - if yyr78 || yy2arr78 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym88 := z.DecBinary() - _ = yym88 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct89 := r.ContainerType() - if yyct89 == codecSelferValueTypeMap1234 { - yyl89 := r.ReadMapStart() - if yyl89 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl89, d) - } - } else if yyct89 == codecSelferValueTypeArray1234 { - yyl89 := r.ReadArrayStart() - if yyl89 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl89, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys90Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys90Slc - var yyhl90 bool = l >= 0 - for yyj90 := 0; ; yyj90++ { - if yyhl90 { - if yyj90 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys90Slc = r.DecodeBytes(yys90Slc, true, true) - yys90 := string(yys90Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys90 { - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv91 := &x.Conditions - yym92 := z.DecBinary() - _ = yym92 - if false { - } else { - h.decSliceClusterCondition((*[]ClusterCondition)(yyv91), d) - } - } - case "zones": - if r.TryDecodeAsNil() { - x.Zones = nil - } else { - yyv93 := &x.Zones - yym94 := z.DecBinary() - _ = yym94 - if false { - } else { - z.F.DecSliceStringX(yyv93, false, d) - } - } - case "region": - if r.TryDecodeAsNil() { - x.Region = "" - } else { - x.Region = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys90) - } // end switch yys90 - } // end for yyj90 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj96 int - var yyb96 bool - var yyhl96 bool = l >= 0 - yyj96++ - if yyhl96 { - yyb96 = yyj96 > l - } else { - yyb96 = r.CheckBreak() - } - if yyb96 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv97 := &x.Conditions - yym98 := z.DecBinary() - _ = yym98 - if false { - } else { - h.decSliceClusterCondition((*[]ClusterCondition)(yyv97), d) - } - } - yyj96++ - if yyhl96 { - yyb96 = yyj96 > l - } else { - yyb96 = r.CheckBreak() - } - if yyb96 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Zones = nil - } else { - yyv99 := &x.Zones - yym100 := z.DecBinary() - _ = yym100 - if false { - } else { - z.F.DecSliceStringX(yyv99, false, d) - } - } - yyj96++ - if yyhl96 { - yyb96 = yyj96 > l - } else { - yyb96 = r.CheckBreak() - } - if yyb96 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Region = "" - } else { - x.Region = string(r.DecodeString()) - } - for { - yyj96++ - if yyhl96 { - yyb96 = yyj96 > l - } else { - yyb96 = r.CheckBreak() - } - if yyb96 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj96-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Cluster) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym102 := z.EncBinary() - _ = yym102 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep103 := !z.EncBinary() - yy2arr103 := z.EncBasicHandle().StructToArray - var yyq103 [5]bool - _, _, _ = yysep103, yyq103, yy2arr103 - const yyr103 bool = false - yyq103[0] = x.Kind != "" - yyq103[1] = x.APIVersion != "" - yyq103[2] = true - yyq103[3] = true - yyq103[4] = true - var yynn103 int - if yyr103 || yy2arr103 { - r.EncodeArrayStart(5) - } else { - yynn103 = 0 - for _, b := range yyq103 { - if b { - yynn103++ - } - } - r.EncodeMapStart(yynn103) - yynn103 = 0 - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[0] { - yym105 := z.EncBinary() - _ = yym105 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq103[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym106 := z.EncBinary() - _ = yym106 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[1] { - yym108 := z.EncBinary() - _ = yym108 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq103[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym109 := z.EncBinary() - _ = yym109 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[2] { - yy111 := &x.ObjectMeta - yy111.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq103[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy112 := &x.ObjectMeta - yy112.CodecEncodeSelf(e) - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[3] { - yy114 := &x.Spec - yy114.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq103[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy115 := &x.Spec - yy115.CodecEncodeSelf(e) - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[4] { - yy117 := &x.Status - yy117.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq103[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy118 := &x.Status - yy118.CodecEncodeSelf(e) - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Cluster) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym119 := z.DecBinary() - _ = yym119 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct120 := r.ContainerType() - if yyct120 == codecSelferValueTypeMap1234 { - yyl120 := r.ReadMapStart() - if yyl120 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl120, d) - } - } else if yyct120 == codecSelferValueTypeArray1234 { - yyl120 := r.ReadArrayStart() - if yyl120 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl120, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Cluster) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys121Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys121Slc - var yyhl121 bool = l >= 0 - for yyj121 := 0; ; yyj121++ { - if yyhl121 { - if yyj121 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys121Slc = r.DecodeBytes(yys121Slc, true, true) - yys121 := string(yys121Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys121 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv124 := &x.ObjectMeta - yyv124.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ClusterSpec{} - } else { - yyv125 := &x.Spec - yyv125.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ClusterStatus{} - } else { - yyv126 := &x.Status - yyv126.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys121) - } // end switch yys121 - } // end for yyj121 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Cluster) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj127 int - var yyb127 bool - var yyhl127 bool = l >= 0 - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv130 := &x.ObjectMeta - yyv130.CodecDecodeSelf(d) - } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ClusterSpec{} - } else { - yyv131 := &x.Spec - yyv131.CodecDecodeSelf(d) - } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ClusterStatus{} - } else { - yyv132 := &x.Status - yyv132.CodecDecodeSelf(d) - } - for { - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj127-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym133 := z.EncBinary() - _ = yym133 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep134 := !z.EncBinary() - yy2arr134 := z.EncBasicHandle().StructToArray - var yyq134 [4]bool - _, _, _ = yysep134, yyq134, yy2arr134 - const yyr134 bool = false - yyq134[0] = x.Kind != "" - yyq134[1] = x.APIVersion != "" - yyq134[2] = true - var yynn134 int - if yyr134 || yy2arr134 { - r.EncodeArrayStart(4) - } else { - yynn134 = 1 - for _, b := range yyq134 { - if b { - yynn134++ - } - } - r.EncodeMapStart(yynn134) - yynn134 = 0 - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq134[0] { - yym136 := z.EncBinary() - _ = yym136 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq134[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym137 := z.EncBinary() - _ = yym137 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq134[1] { - yym139 := z.EncBinary() - _ = yym139 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq134[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym140 := z.EncBinary() - _ = yym140 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq134[2] { - yy142 := &x.ListMeta - yym143 := z.EncBinary() - _ = yym143 - if false { - } else if z.HasExtensions() && z.EncExt(yy142) { - } else { - z.EncFallback(yy142) - } - } else { - r.EncodeNil() - } - } else { - if yyq134[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy144 := &x.ListMeta - yym145 := z.EncBinary() - _ = yym145 - if false { - } else if z.HasExtensions() && z.EncExt(yy144) { - } else { - z.EncFallback(yy144) - } - } - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym147 := z.EncBinary() - _ = yym147 - if false { - } else { - h.encSliceCluster(([]Cluster)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym148 := z.EncBinary() - _ = yym148 - if false { - } else { - h.encSliceCluster(([]Cluster)(x.Items), e) - } - } - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym149 := z.DecBinary() - _ = yym149 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct150 := r.ContainerType() - if yyct150 == codecSelferValueTypeMap1234 { - yyl150 := r.ReadMapStart() - if yyl150 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl150, d) - } - } else if yyct150 == codecSelferValueTypeArray1234 { - yyl150 := r.ReadArrayStart() - if yyl150 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl150, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys151Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys151Slc - var yyhl151 bool = l >= 0 - for yyj151 := 0; ; yyj151++ { - if yyhl151 { - if yyj151 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys151Slc = r.DecodeBytes(yys151Slc, true, true) - yys151 := string(yys151Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys151 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv154 := &x.ListMeta - yym155 := z.DecBinary() - _ = yym155 - if false { - } else if z.HasExtensions() && z.DecExt(yyv154) { - } else { - z.DecFallback(yyv154, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv156 := &x.Items - yym157 := z.DecBinary() - _ = yym157 - if false { - } else { - h.decSliceCluster((*[]Cluster)(yyv156), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys151) - } // end switch yys151 - } // end for yyj151 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj158 int - var yyb158 bool - var yyhl158 bool = l >= 0 - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv161 := &x.ListMeta - yym162 := z.DecBinary() - _ = yym162 - if false { - } else if z.HasExtensions() && z.DecExt(yyv161) { - } else { - z.DecFallback(yyv161, false) - } - } - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv163 := &x.Items - yym164 := z.DecBinary() - _ = yym164 - if false { - } else { - h.decSliceCluster((*[]Cluster)(yyv163), d) - } - } - for { - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj158-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceServerAddressByClientCIDR(v []ServerAddressByClientCIDR, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv165 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy166 := &yyv165 - yy166.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceServerAddressByClientCIDR(v *[]ServerAddressByClientCIDR, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv167 := *v - yyh167, yyl167 := z.DecSliceHelperStart() - var yyc167 bool - if yyl167 == 0 { - if yyv167 == nil { - yyv167 = []ServerAddressByClientCIDR{} - yyc167 = true - } else if len(yyv167) != 0 { - yyv167 = yyv167[:0] - yyc167 = true - } - } else if yyl167 > 0 { - var yyrr167, yyrl167 int - var yyrt167 bool - if yyl167 > cap(yyv167) { - - yyrg167 := len(yyv167) > 0 - yyv2167 := yyv167 - yyrl167, yyrt167 = z.DecInferLen(yyl167, z.DecBasicHandle().MaxInitLen, 32) - if yyrt167 { - if yyrl167 <= cap(yyv167) { - yyv167 = yyv167[:yyrl167] - } else { - yyv167 = make([]ServerAddressByClientCIDR, yyrl167) - } - } else { - yyv167 = make([]ServerAddressByClientCIDR, yyrl167) - } - yyc167 = true - yyrr167 = len(yyv167) - if yyrg167 { - copy(yyv167, yyv2167) - } - } else if yyl167 != len(yyv167) { - yyv167 = yyv167[:yyl167] - yyc167 = true - } - yyj167 := 0 - for ; yyj167 < yyrr167; yyj167++ { - yyh167.ElemContainerState(yyj167) - if r.TryDecodeAsNil() { - yyv167[yyj167] = ServerAddressByClientCIDR{} - } else { - yyv168 := &yyv167[yyj167] - yyv168.CodecDecodeSelf(d) - } - - } - if yyrt167 { - for ; yyj167 < yyl167; yyj167++ { - yyv167 = append(yyv167, ServerAddressByClientCIDR{}) - yyh167.ElemContainerState(yyj167) - if r.TryDecodeAsNil() { - yyv167[yyj167] = ServerAddressByClientCIDR{} - } else { - yyv169 := &yyv167[yyj167] - yyv169.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj167 := 0 - for ; !r.CheckBreak(); yyj167++ { - - if yyj167 >= len(yyv167) { - yyv167 = append(yyv167, ServerAddressByClientCIDR{}) // var yyz167 ServerAddressByClientCIDR - yyc167 = true - } - yyh167.ElemContainerState(yyj167) - if yyj167 < len(yyv167) { - if r.TryDecodeAsNil() { - yyv167[yyj167] = ServerAddressByClientCIDR{} - } else { - yyv170 := &yyv167[yyj167] - yyv170.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj167 < len(yyv167) { - yyv167 = yyv167[:yyj167] - yyc167 = true - } else if yyj167 == 0 && yyv167 == nil { - yyv167 = []ServerAddressByClientCIDR{} - yyc167 = true - } - } - yyh167.End() - if yyc167 { - *v = yyv167 - } -} - -func (x codecSelfer1234) encSliceClusterCondition(v []ClusterCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv171 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy172 := &yyv171 - yy172.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceClusterCondition(v *[]ClusterCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv173 := *v - yyh173, yyl173 := z.DecSliceHelperStart() - var yyc173 bool - if yyl173 == 0 { - if yyv173 == nil { - yyv173 = []ClusterCondition{} - yyc173 = true - } else if len(yyv173) != 0 { - yyv173 = yyv173[:0] - yyc173 = true - } - } else if yyl173 > 0 { - var yyrr173, yyrl173 int - var yyrt173 bool - if yyl173 > cap(yyv173) { - - yyrg173 := len(yyv173) > 0 - yyv2173 := yyv173 - yyrl173, yyrt173 = z.DecInferLen(yyl173, z.DecBasicHandle().MaxInitLen, 112) - if yyrt173 { - if yyrl173 <= cap(yyv173) { - yyv173 = yyv173[:yyrl173] - } else { - yyv173 = make([]ClusterCondition, yyrl173) - } - } else { - yyv173 = make([]ClusterCondition, yyrl173) - } - yyc173 = true - yyrr173 = len(yyv173) - if yyrg173 { - copy(yyv173, yyv2173) - } - } else if yyl173 != len(yyv173) { - yyv173 = yyv173[:yyl173] - yyc173 = true - } - yyj173 := 0 - for ; yyj173 < yyrr173; yyj173++ { - yyh173.ElemContainerState(yyj173) - if r.TryDecodeAsNil() { - yyv173[yyj173] = ClusterCondition{} - } else { - yyv174 := &yyv173[yyj173] - yyv174.CodecDecodeSelf(d) - } - - } - if yyrt173 { - for ; yyj173 < yyl173; yyj173++ { - yyv173 = append(yyv173, ClusterCondition{}) - yyh173.ElemContainerState(yyj173) - if r.TryDecodeAsNil() { - yyv173[yyj173] = ClusterCondition{} - } else { - yyv175 := &yyv173[yyj173] - yyv175.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj173 := 0 - for ; !r.CheckBreak(); yyj173++ { - - if yyj173 >= len(yyv173) { - yyv173 = append(yyv173, ClusterCondition{}) // var yyz173 ClusterCondition - yyc173 = true - } - yyh173.ElemContainerState(yyj173) - if yyj173 < len(yyv173) { - if r.TryDecodeAsNil() { - yyv173[yyj173] = ClusterCondition{} - } else { - yyv176 := &yyv173[yyj173] - yyv176.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj173 < len(yyv173) { - yyv173 = yyv173[:yyj173] - yyc173 = true - } else if yyj173 == 0 && yyv173 == nil { - yyv173 = []ClusterCondition{} - yyc173 = true - } - } - yyh173.End() - if yyc173 { - *v = yyv173 - } -} - -func (x codecSelfer1234) encSliceCluster(v []Cluster, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv177 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy178 := &yyv177 - yy178.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCluster(v *[]Cluster, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv179 := *v - yyh179, yyl179 := z.DecSliceHelperStart() - var yyc179 bool - if yyl179 == 0 { - if yyv179 == nil { - yyv179 = []Cluster{} - yyc179 = true - } else if len(yyv179) != 0 { - yyv179 = yyv179[:0] - yyc179 = true - } - } else if yyl179 > 0 { - var yyrr179, yyrl179 int - var yyrt179 bool - if yyl179 > cap(yyv179) { - - yyrg179 := len(yyv179) > 0 - yyv2179 := yyv179 - yyrl179, yyrt179 = z.DecInferLen(yyl179, z.DecBasicHandle().MaxInitLen, 352) - if yyrt179 { - if yyrl179 <= cap(yyv179) { - yyv179 = yyv179[:yyrl179] - } else { - yyv179 = make([]Cluster, yyrl179) - } - } else { - yyv179 = make([]Cluster, yyrl179) - } - yyc179 = true - yyrr179 = len(yyv179) - if yyrg179 { - copy(yyv179, yyv2179) - } - } else if yyl179 != len(yyv179) { - yyv179 = yyv179[:yyl179] - yyc179 = true - } - yyj179 := 0 - for ; yyj179 < yyrr179; yyj179++ { - yyh179.ElemContainerState(yyj179) - if r.TryDecodeAsNil() { - yyv179[yyj179] = Cluster{} - } else { - yyv180 := &yyv179[yyj179] - yyv180.CodecDecodeSelf(d) - } - - } - if yyrt179 { - for ; yyj179 < yyl179; yyj179++ { - yyv179 = append(yyv179, Cluster{}) - yyh179.ElemContainerState(yyj179) - if r.TryDecodeAsNil() { - yyv179[yyj179] = Cluster{} - } else { - yyv181 := &yyv179[yyj179] - yyv181.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj179 := 0 - for ; !r.CheckBreak(); yyj179++ { - - if yyj179 >= len(yyv179) { - yyv179 = append(yyv179, Cluster{}) // var yyz179 Cluster - yyc179 = true - } - yyh179.ElemContainerState(yyj179) - if yyj179 < len(yyv179) { - if r.TryDecodeAsNil() { - yyv179[yyj179] = Cluster{} - } else { - yyv182 := &yyv179[yyj179] - yyv182.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj179 < len(yyv179) { - yyv179 = yyv179[:yyj179] - yyc179 = true - } else if yyj179 == 0 && yyv179 == nil { - yyv179 = []Cluster{} - yyc179 = true - } - } - yyh179.End() - if yyc179 { - *v = yyv179 - } -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types.go deleted file mode 100644 index 8493172395..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" -) - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -type ServerAddressByClientCIDR struct { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"` - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"` -} - -// ClusterSpec describes the attributes of a kubernetes cluster. -type ClusterSpec struct { - // A map of client CIDR to server address. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" patchStrategy:"merge" patchMergeKey:"clientCIDR" protobuf:"bytes,1,rep,name=serverAddressByClientCIDRs"` - // Name of the secret containing kubeconfig to access this cluster. - // The secret is read from the kubernetes cluster that is hosting federation control plane. - // Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key "kubeconfig". - // This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. - // This can be left empty if the cluster allows insecure access. - // +optional - SecretRef *v1.LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,2,opt,name=secretRef"` -} - -type ClusterConditionType string - -// These are valid conditions of a cluster. -const ( - // ClusterReady means the cluster is ready to accept workloads. - ClusterReady ClusterConditionType = "Ready" - // ClusterOffline means the cluster is temporarily down or not reachable - ClusterOffline ClusterConditionType = "Offline" -) - -// ClusterCondition describes current state of a cluster. -type ClusterCondition struct { - // Type of cluster condition, Complete or Failed. - Type ClusterConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ClusterConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` - // Last time the condition was checked. - // +optional - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` - // Last time the condition transit from one status to another. - // +optional - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // (brief) reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // Human readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` -} - -// ClusterStatus is information about the current status of a cluster updated by cluster controller peridocally. -type ClusterStatus struct { - // Conditions is an array of current cluster conditions. - // +optional - Conditions []ClusterCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` - // Zones is the list of availability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. - // These will always be in the same region. - // +optional - Zones []string `json:"zones,omitempty" protobuf:"bytes,5,rep,name=zones"` - // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. - // +optional - Region string `json:"region,omitempty" protobuf:"bytes,6,opt,name=region"` -} - -// +genclient=true -// +nonNamespaced=true - -// Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation. -type Cluster struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the behavior of the Cluster. - // +optional - Spec ClusterSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status describes the current status of a Cluster - // +optional - Status ClusterStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// A list of all the kubernetes clusters registered to the federation -type ClusterList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of Cluster objects. - Items []Cluster `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index 55985fcb8c..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_Cluster = map[string]string{ - "": "Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of the Cluster.", - "status": "Status describes the current status of a Cluster", -} - -func (Cluster) SwaggerDoc() map[string]string { - return map_Cluster -} - -var map_ClusterCondition = map[string]string{ - "": "ClusterCondition describes current state of a cluster.", - "type": "Type of cluster condition, Complete or Failed.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastProbeTime": "Last time the condition was checked.", - "lastTransitionTime": "Last time the condition transit from one status to another.", - "reason": "(brief) reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (ClusterCondition) SwaggerDoc() map[string]string { - return map_ClusterCondition -} - -var map_ClusterList = map[string]string{ - "": "A list of all the kubernetes clusters registered to the federation", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "List of Cluster objects.", -} - -func (ClusterList) SwaggerDoc() map[string]string { - return map_ClusterList -} - -var map_ClusterSpec = map[string]string{ - "": "ClusterSpec describes the attributes of a kubernetes cluster.", - "serverAddressByClientCIDRs": "A map of client CIDR to server address. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR.", - "secretRef": "Name of the secret containing kubeconfig to access this cluster. The secret is read from the kubernetes cluster that is hosting federation control plane. Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key \"kubeconfig\". This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. This can be left empty if the cluster allows insecure access.", -} - -func (ClusterSpec) SwaggerDoc() map[string]string { - return map_ClusterSpec -} - -var map_ClusterStatus = map[string]string{ - "": "ClusterStatus is information about the current status of a cluster updated by cluster controller peridocally.", - "conditions": "Conditions is an array of current cluster conditions.", - "zones": "Zones is the list of availability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. These will always be in the same region.", - "region": "Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'.", -} - -func (ClusterStatus) SwaggerDoc() map[string]string { - return map_ClusterStatus -} - -var map_ServerAddressByClientCIDR = map[string]string{ - "": "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.", - "clientCIDR": "The CIDR with which clients can match their IP to figure out the server address that they should use.", - "serverAddress": "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.", -} - -func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string { - return map_ServerAddressByClientCIDR -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/zz_generated.conversion.go deleted file mode 100644 index a20a758d51..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/zz_generated.conversion.go +++ /dev/null @@ -1,199 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1beta1 - -import ( - federation "k8s.io/kubernetes/federation/apis/federation" - api "k8s.io/kubernetes/pkg/api" - v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1beta1_Cluster_To_federation_Cluster, - Convert_federation_Cluster_To_v1beta1_Cluster, - Convert_v1beta1_ClusterCondition_To_federation_ClusterCondition, - Convert_federation_ClusterCondition_To_v1beta1_ClusterCondition, - Convert_v1beta1_ClusterList_To_federation_ClusterList, - Convert_federation_ClusterList_To_v1beta1_ClusterList, - Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec, - Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec, - Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus, - Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus, - Convert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR, - Convert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR, - ) -} - -func autoConvert_v1beta1_Cluster_To_federation_Cluster(in *Cluster, out *federation.Cluster, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_Cluster_To_federation_Cluster(in *Cluster, out *federation.Cluster, s conversion.Scope) error { - return autoConvert_v1beta1_Cluster_To_federation_Cluster(in, out, s) -} - -func autoConvert_federation_Cluster_To_v1beta1_Cluster(in *federation.Cluster, out *Cluster, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_federation_Cluster_To_v1beta1_Cluster(in *federation.Cluster, out *Cluster, s conversion.Scope) error { - return autoConvert_federation_Cluster_To_v1beta1_Cluster(in, out, s) -} - -func autoConvert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in *ClusterCondition, out *federation.ClusterCondition, s conversion.Scope) error { - out.Type = federation.ClusterConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in *ClusterCondition, out *federation.ClusterCondition, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in, out, s) -} - -func autoConvert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in *federation.ClusterCondition, out *ClusterCondition, s conversion.Scope) error { - out.Type = ClusterConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in *federation.ClusterCondition, out *ClusterCondition, s conversion.Scope) error { - return autoConvert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in, out, s) -} - -func autoConvert_v1beta1_ClusterList_To_federation_ClusterList(in *ClusterList, out *federation.ClusterList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]federation.Cluster)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1beta1_ClusterList_To_federation_ClusterList(in *ClusterList, out *federation.ClusterList, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterList_To_federation_ClusterList(in, out, s) -} - -func autoConvert_federation_ClusterList_To_v1beta1_ClusterList(in *federation.ClusterList, out *ClusterList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]Cluster)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_federation_ClusterList_To_v1beta1_ClusterList(in *federation.ClusterList, out *ClusterList, s conversion.Scope) error { - return autoConvert_federation_ClusterList_To_v1beta1_ClusterList(in, out, s) -} - -func autoConvert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in *ClusterSpec, out *federation.ClusterSpec, s conversion.Scope) error { - out.ServerAddressByClientCIDRs = *(*[]federation.ServerAddressByClientCIDR)(unsafe.Pointer(&in.ServerAddressByClientCIDRs)) - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - return nil -} - -func Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in *ClusterSpec, out *federation.ClusterSpec, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in, out, s) -} - -func autoConvert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in *federation.ClusterSpec, out *ClusterSpec, s conversion.Scope) error { - out.ServerAddressByClientCIDRs = *(*[]ServerAddressByClientCIDR)(unsafe.Pointer(&in.ServerAddressByClientCIDRs)) - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - return nil -} - -func Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in *federation.ClusterSpec, out *ClusterSpec, s conversion.Scope) error { - return autoConvert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in, out, s) -} - -func autoConvert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in *ClusterStatus, out *federation.ClusterStatus, s conversion.Scope) error { - out.Conditions = *(*[]federation.ClusterCondition)(unsafe.Pointer(&in.Conditions)) - out.Zones = *(*[]string)(unsafe.Pointer(&in.Zones)) - out.Region = in.Region - return nil -} - -func Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in *ClusterStatus, out *federation.ClusterStatus, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in, out, s) -} - -func autoConvert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in *federation.ClusterStatus, out *ClusterStatus, s conversion.Scope) error { - out.Conditions = *(*[]ClusterCondition)(unsafe.Pointer(&in.Conditions)) - out.Zones = *(*[]string)(unsafe.Pointer(&in.Zones)) - out.Region = in.Region - return nil -} - -func Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in *federation.ClusterStatus, out *ClusterStatus, s conversion.Scope) error { - return autoConvert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in, out, s) -} - -func autoConvert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in *ServerAddressByClientCIDR, out *federation.ServerAddressByClientCIDR, s conversion.Scope) error { - out.ClientCIDR = in.ClientCIDR - out.ServerAddress = in.ServerAddress - return nil -} - -func Convert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in *ServerAddressByClientCIDR, out *federation.ServerAddressByClientCIDR, s conversion.Scope) error { - return autoConvert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in, out, s) -} - -func autoConvert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in *federation.ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, s conversion.Scope) error { - out.ClientCIDR = in.ClientCIDR - out.ServerAddress = in.ServerAddress - return nil -} - -func Convert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in *federation.ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, s conversion.Scope) error { - return autoConvert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index fbe4374567..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,159 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1beta1 - -import ( - v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Cluster, InType: reflect.TypeOf(&Cluster{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterCondition, InType: reflect.TypeOf(&ClusterCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterList, InType: reflect.TypeOf(&ClusterList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterSpec, InType: reflect.TypeOf(&ClusterSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterStatus, InType: reflect.TypeOf(&ClusterStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ServerAddressByClientCIDR, InType: reflect.TypeOf(&ServerAddressByClientCIDR{})}, - ) -} - -func DeepCopy_v1beta1_Cluster(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Cluster) - out := out.(*Cluster) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_ClusterSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_ClusterStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_ClusterCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterCondition) - out := out.(*ClusterCondition) - out.Type = in.Type - out.Status = in.Status - out.LastProbeTime = in.LastProbeTime.DeepCopy() - out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message - return nil - } -} - -func DeepCopy_v1beta1_ClusterList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterList) - out := out.(*ClusterList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Cluster, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_Cluster(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil - } -} - -func DeepCopy_v1beta1_ClusterSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterSpec) - out := out.(*ClusterSpec) - if in.ServerAddressByClientCIDRs != nil { - in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]ServerAddressByClientCIDR, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.ServerAddressByClientCIDRs = nil - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(v1.LocalObjectReference) - **out = **in - } else { - out.SecretRef = nil - } - return nil - } -} - -func DeepCopy_v1beta1_ClusterStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterStatus) - out := out.(*ClusterStatus) - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ClusterCondition, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_ClusterCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - if in.Zones != nil { - in, out := &in.Zones, &out.Zones - *out = make([]string, len(*in)) - copy(*out, *in) - } else { - out.Zones = nil - } - out.Region = in.Region - return nil - } -} - -func DeepCopy_v1beta1_ServerAddressByClientCIDR(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServerAddressByClientCIDR) - out := out.(*ServerAddressByClientCIDR) - out.ClientCIDR = in.ClientCIDR - out.ServerAddress = in.ServerAddress - return nil - } -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/federation/apis/federation/zz_generated.deepcopy.go deleted file mode 100644 index 5126feb89c..0000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/zz_generated.deepcopy.go +++ /dev/null @@ -1,200 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package federation - -import ( - api "k8s.io/kubernetes/pkg/api" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_Cluster, InType: reflect.TypeOf(&Cluster{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ClusterCondition, InType: reflect.TypeOf(&ClusterCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ClusterList, InType: reflect.TypeOf(&ClusterList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ClusterReplicaSetPreferences, InType: reflect.TypeOf(&ClusterReplicaSetPreferences{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ClusterSpec, InType: reflect.TypeOf(&ClusterSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ClusterStatus, InType: reflect.TypeOf(&ClusterStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_FederatedReplicaSetPreferences, InType: reflect.TypeOf(&FederatedReplicaSetPreferences{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ServerAddressByClientCIDR, InType: reflect.TypeOf(&ServerAddressByClientCIDR{})}, - ) -} - -func DeepCopy_federation_Cluster(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Cluster) - out := out.(*Cluster) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_federation_ClusterSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_federation_ClusterStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_federation_ClusterCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterCondition) - out := out.(*ClusterCondition) - out.Type = in.Type - out.Status = in.Status - out.LastProbeTime = in.LastProbeTime.DeepCopy() - out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message - return nil - } -} - -func DeepCopy_federation_ClusterList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterList) - out := out.(*ClusterList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Cluster, len(*in)) - for i := range *in { - if err := DeepCopy_federation_Cluster(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil - } -} - -func DeepCopy_federation_ClusterReplicaSetPreferences(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterReplicaSetPreferences) - out := out.(*ClusterReplicaSetPreferences) - out.MinReplicas = in.MinReplicas - if in.MaxReplicas != nil { - in, out := &in.MaxReplicas, &out.MaxReplicas - *out = new(int64) - **out = **in - } else { - out.MaxReplicas = nil - } - out.Weight = in.Weight - return nil - } -} - -func DeepCopy_federation_ClusterSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterSpec) - out := out.(*ClusterSpec) - if in.ServerAddressByClientCIDRs != nil { - in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]ServerAddressByClientCIDR, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.ServerAddressByClientCIDRs = nil - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(api.LocalObjectReference) - **out = **in - } else { - out.SecretRef = nil - } - return nil - } -} - -func DeepCopy_federation_ClusterStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterStatus) - out := out.(*ClusterStatus) - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ClusterCondition, len(*in)) - for i := range *in { - if err := DeepCopy_federation_ClusterCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - if in.Zones != nil { - in, out := &in.Zones, &out.Zones - *out = make([]string, len(*in)) - copy(*out, *in) - } else { - out.Zones = nil - } - out.Region = in.Region - return nil - } -} - -func DeepCopy_federation_FederatedReplicaSetPreferences(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*FederatedReplicaSetPreferences) - out := out.(*FederatedReplicaSetPreferences) - out.Rebalance = in.Rebalance - if in.Clusters != nil { - in, out := &in.Clusters, &out.Clusters - *out = make(map[string]ClusterReplicaSetPreferences) - for key, val := range *in { - newVal := new(ClusterReplicaSetPreferences) - if err := DeepCopy_federation_ClusterReplicaSetPreferences(&val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Clusters = nil - } - return nil - } -} - -func DeepCopy_federation_ServerAddressByClientCIDR(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServerAddressByClientCIDR) - out := out.(*ServerAddressByClientCIDR) - out.ClientCIDR = in.ClientCIDR - out.ServerAddress = in.ServerAddress - return nil - } -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/BUILD b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/BUILD deleted file mode 100644 index 8134f2827e..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/BUILD +++ /dev/null @@ -1,32 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "clientset.go", - "doc.go", - "import_known_versions.go", - ], - tags = ["automanaged"], - deps = [ - "//federation/apis/federation/install:go_default_library", - "//federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion:go_default_library", - "//federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion:go_default_library", - "//federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/typed/discovery:go_default_library", - "//pkg/util/flowcontrol:go_default_library", - "//plugin/pkg/client/auth:go_default_library", - "//vendor:github.com/golang/glog", - ], -) diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/clientset.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/clientset.go deleted file mode 100644 index 021501d7c4..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/clientset.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package federation_internalclientset - -import ( - "github.com/golang/glog" - internalversioncore "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion" - internalversionextensions "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion" - internalversionfederation "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion" - restclient "k8s.io/kubernetes/pkg/client/restclient" - discovery "k8s.io/kubernetes/pkg/client/typed/discovery" - "k8s.io/kubernetes/pkg/util/flowcontrol" - _ "k8s.io/kubernetes/plugin/pkg/client/auth" -) - -type Interface interface { - Discovery() discovery.DiscoveryInterface - Core() internalversioncore.CoreInterface - - Extensions() internalversionextensions.ExtensionsInterface - - Federation() internalversionfederation.FederationInterface -} - -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. -type Clientset struct { - *discovery.DiscoveryClient - *internalversioncore.CoreClient - *internalversionextensions.ExtensionsClient - *internalversionfederation.FederationClient -} - -// Core retrieves the CoreClient -func (c *Clientset) Core() internalversioncore.CoreInterface { - if c == nil { - return nil - } - return c.CoreClient -} - -// Extensions retrieves the ExtensionsClient -func (c *Clientset) Extensions() internalversionextensions.ExtensionsInterface { - if c == nil { - return nil - } - return c.ExtensionsClient -} - -// Federation retrieves the FederationClient -func (c *Clientset) Federation() internalversionfederation.FederationInterface { - if c == nil { - return nil - } - return c.FederationClient -} - -// Discovery retrieves the DiscoveryClient -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - return c.DiscoveryClient -} - -// NewForConfig creates a new Clientset for the given config. -func NewForConfig(c *restclient.Config) (*Clientset, error) { - configShallowCopy := *c - if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { - configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) - } - var clientset Clientset - var err error - clientset.CoreClient, err = internalversioncore.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - clientset.ExtensionsClient, err = internalversionextensions.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - clientset.FederationClient, err = internalversionfederation.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - - clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) - if err != nil { - glog.Errorf("failed to create the DiscoveryClient: %v", err) - return nil, err - } - return &clientset, nil -} - -// NewForConfigOrDie creates a new Clientset for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *Clientset { - var clientset Clientset - clientset.CoreClient = internalversioncore.NewForConfigOrDie(c) - clientset.ExtensionsClient = internalversionextensions.NewForConfigOrDie(c) - clientset.FederationClient = internalversionfederation.NewForConfigOrDie(c) - - clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &clientset -} - -// New creates a new Clientset for the given RESTClient. -func New(c restclient.Interface) *Clientset { - var clientset Clientset - clientset.CoreClient = internalversioncore.New(c) - clientset.ExtensionsClient = internalversionextensions.New(c) - clientset.FederationClient = internalversionfederation.New(c) - - clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) - return &clientset -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/doc.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/doc.go deleted file mode 100644 index 75da2a41f1..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with arguments: --clientset-name=federation_internalclientset --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/Service,api/Namespace,extensions/ReplicaSet,api/Secret,extensions/Ingress,extensions/Deployment,extensions/DaemonSet,api/ConfigMap,api/Event] --input=[../../federation/apis/federation/,api/,extensions/] - -// This package has the automatically generated clientset. -package federation_internalclientset diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/import_known_versions.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/import_known_versions.go deleted file mode 100644 index aa7329b391..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/import_known_versions.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package federation_internalclientset - -// These imports are the API groups the client will support. -import ( - _ "k8s.io/kubernetes/federation/apis/federation/install" -) - -func init() { -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/BUILD b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/BUILD deleted file mode 100644 index eb6000c90d..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "configmap.go", - "core_client.go", - "doc.go", - "event.go", - "generated_expansion.go", - "namespace.go", - "namespace_expansion.go", - "secret.go", - "service.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/configmap.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/configmap.go deleted file mode 100644 index 0956777bb7..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/configmap.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ConfigMapsGetter has a method to return a ConfigMapInterface. -// A group's client should implement this interface. -type ConfigMapsGetter interface { - ConfigMaps(namespace string) ConfigMapInterface -} - -// ConfigMapInterface has methods to work with ConfigMap resources. -type ConfigMapInterface interface { - Create(*api.ConfigMap) (*api.ConfigMap, error) - Update(*api.ConfigMap) (*api.ConfigMap, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.ConfigMap, error) - List(opts api.ListOptions) (*api.ConfigMapList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.ConfigMap, err error) - ConfigMapExpansion -} - -// configMaps implements ConfigMapInterface -type configMaps struct { - client restclient.Interface - ns string -} - -// newConfigMaps returns a ConfigMaps -func newConfigMaps(c *CoreClient, namespace string) *configMaps { - return &configMaps{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Create(configMap *api.ConfigMap) (result *api.ConfigMap, err error) { - result = &api.ConfigMap{} - err = c.client.Post(). - Namespace(c.ns). - Resource("configmaps"). - Body(configMap). - Do(). - Into(result) - return -} - -// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Update(configMap *api.ConfigMap) (result *api.ConfigMap, err error) { - result = &api.ConfigMap{} - err = c.client.Put(). - Namespace(c.ns). - Resource("configmaps"). - Name(configMap.Name). - Body(configMap). - Do(). - Into(result) - return -} - -// Delete takes name of the configMap and deletes it. Returns an error if one occurs. -func (c *configMaps) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *configMaps) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. -func (c *configMaps) Get(name string) (result *api.ConfigMap, err error) { - result = &api.ConfigMap{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. -func (c *configMaps) List(opts api.ListOptions) (result *api.ConfigMapList, err error) { - result = &api.ConfigMapList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested configMaps. -func (c *configMaps) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched configMap. -func (c *configMaps) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.ConfigMap, err error) { - result = &api.ConfigMap{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("configmaps"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/core_client.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/core_client.go deleted file mode 100644 index d5c79fe56a..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/core_client.go +++ /dev/null @@ -1,119 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type CoreInterface interface { - RESTClient() restclient.Interface - ConfigMapsGetter - EventsGetter - NamespacesGetter - SecretsGetter - ServicesGetter -} - -// CoreClient is used to interact with features provided by the k8s.io/kubernetes/pkg/apimachinery/registered.Group group. -type CoreClient struct { - restClient restclient.Interface -} - -func (c *CoreClient) ConfigMaps(namespace string) ConfigMapInterface { - return newConfigMaps(c, namespace) -} - -func (c *CoreClient) Events(namespace string) EventInterface { - return newEvents(c, namespace) -} - -func (c *CoreClient) Namespaces() NamespaceInterface { - return newNamespaces(c) -} - -func (c *CoreClient) Secrets(namespace string) SecretInterface { - return newSecrets(c, namespace) -} - -func (c *CoreClient) Services(namespace string) ServiceInterface { - return newServices(c, namespace) -} - -// NewForConfig creates a new CoreClient for the given config. -func NewForConfig(c *restclient.Config) (*CoreClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &CoreClient{client}, nil -} - -// NewForConfigOrDie creates a new CoreClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *CoreClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new CoreClient for the given RESTClient. -func New(c restclient.Interface) *CoreClient { - return &CoreClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if core group is not registered, return an error - g, err := registered.Group("") - if err != nil { - return err - } - config.APIPath = "/api" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != g.GroupVersion.Group { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - } - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *CoreClient) RESTClient() restclient.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/doc.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/doc.go deleted file mode 100644 index 26c978f7d5..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with arguments: --clientset-name=federation_internalclientset --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/Service,api/Namespace,extensions/ReplicaSet,api/Secret,extensions/Ingress,extensions/Deployment,extensions/DaemonSet,api/ConfigMap,api/Event] --input=[../../federation/apis/federation/,api/,extensions/] - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/event.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/event.go deleted file mode 100644 index 8667ac4688..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/event.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// EventsGetter has a method to return a EventInterface. -// A group's client should implement this interface. -type EventsGetter interface { - Events(namespace string) EventInterface -} - -// EventInterface has methods to work with Event resources. -type EventInterface interface { - Create(*api.Event) (*api.Event, error) - Update(*api.Event) (*api.Event, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.Event, error) - List(opts api.ListOptions) (*api.EventList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Event, err error) - EventExpansion -} - -// events implements EventInterface -type events struct { - client restclient.Interface - ns string -} - -// newEvents returns a Events -func newEvents(c *CoreClient, namespace string) *events { - return &events{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(event *api.Event) (result *api.Event, err error) { - result = &api.Event{} - err = c.client.Post(). - Namespace(c.ns). - Resource("events"). - Body(event). - Do(). - Into(result) - return -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(event *api.Event) (result *api.Event, err error) { - result = &api.Event{} - err = c.client.Put(). - Namespace(c.ns). - Resource("events"). - Name(event.Name). - Body(event). - Do(). - Into(result) - return -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(name string) (result *api.Event, err error) { - result = &api.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(opts api.ListOptions) (result *api.EventList, err error) { - result = &api.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched event. -func (c *events) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Event, err error) { - result = &api.Event{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("events"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/generated_expansion.go deleted file mode 100644 index 335a9f8a4b..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/generated_expansion.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -type ConfigMapExpansion interface{} - -type EventExpansion interface{} - -type SecretExpansion interface{} - -type ServiceExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/namespace.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/namespace.go deleted file mode 100644 index b94a2aa27f..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/namespace.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// NamespacesGetter has a method to return a NamespaceInterface. -// A group's client should implement this interface. -type NamespacesGetter interface { - Namespaces() NamespaceInterface -} - -// NamespaceInterface has methods to work with Namespace resources. -type NamespaceInterface interface { - Create(*api.Namespace) (*api.Namespace, error) - Update(*api.Namespace) (*api.Namespace, error) - UpdateStatus(*api.Namespace) (*api.Namespace, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.Namespace, error) - List(opts api.ListOptions) (*api.NamespaceList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Namespace, err error) - NamespaceExpansion -} - -// namespaces implements NamespaceInterface -type namespaces struct { - client restclient.Interface -} - -// newNamespaces returns a Namespaces -func newNamespaces(c *CoreClient) *namespaces { - return &namespaces{ - client: c.RESTClient(), - } -} - -// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Create(namespace *api.Namespace) (result *api.Namespace, err error) { - result = &api.Namespace{} - err = c.client.Post(). - Resource("namespaces"). - Body(namespace). - Do(). - Into(result) - return -} - -// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Update(namespace *api.Namespace) (result *api.Namespace, err error) { - result = &api.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - Body(namespace). - Do(). - Into(result) - return -} - -func (c *namespaces) UpdateStatus(namespace *api.Namespace) (result *api.Namespace, err error) { - result = &api.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - SubResource("status"). - Body(namespace). - Do(). - Into(result) - return -} - -// Delete takes name of the namespace and deletes it. Returns an error if one occurs. -func (c *namespaces) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("namespaces"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *namespaces) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("namespaces"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. -func (c *namespaces) Get(name string) (result *api.Namespace, err error) { - result = &api.Namespace{} - err = c.client.Get(). - Resource("namespaces"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Namespaces that match those selectors. -func (c *namespaces) List(opts api.ListOptions) (result *api.NamespaceList, err error) { - result = &api.NamespaceList{} - err = c.client.Get(). - Resource("namespaces"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested namespaces. -func (c *namespaces) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("namespaces"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched namespace. -func (c *namespaces) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Namespace, err error) { - result = &api.Namespace{} - err = c.client.Patch(pt). - Resource("namespaces"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/namespace_expansion.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/namespace_expansion.go deleted file mode 100644 index 456de1cdfc..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/namespace_expansion.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import "k8s.io/kubernetes/pkg/api" - -// The NamespaceExpansion interface allows manually adding extra methods to the NamespaceInterface. -type NamespaceExpansion interface { - Finalize(item *api.Namespace) (*api.Namespace, error) -} - -// Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. -func (c *namespaces) Finalize(namespace *api.Namespace) (result *api.Namespace, err error) { - result = &api.Namespace{} - err = c.client.Put().Resource("namespaces").Name(namespace.Name).SubResource("finalize").Body(namespace).Do().Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/secret.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/secret.go deleted file mode 100644 index 6fb40f5a6e..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/secret.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// SecretsGetter has a method to return a SecretInterface. -// A group's client should implement this interface. -type SecretsGetter interface { - Secrets(namespace string) SecretInterface -} - -// SecretInterface has methods to work with Secret resources. -type SecretInterface interface { - Create(*api.Secret) (*api.Secret, error) - Update(*api.Secret) (*api.Secret, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.Secret, error) - List(opts api.ListOptions) (*api.SecretList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Secret, err error) - SecretExpansion -} - -// secrets implements SecretInterface -type secrets struct { - client restclient.Interface - ns string -} - -// newSecrets returns a Secrets -func newSecrets(c *CoreClient, namespace string) *secrets { - return &secrets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Create(secret *api.Secret) (result *api.Secret, err error) { - result = &api.Secret{} - err = c.client.Post(). - Namespace(c.ns). - Resource("secrets"). - Body(secret). - Do(). - Into(result) - return -} - -// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Update(secret *api.Secret) (result *api.Secret, err error) { - result = &api.Secret{} - err = c.client.Put(). - Namespace(c.ns). - Resource("secrets"). - Name(secret.Name). - Body(secret). - Do(). - Into(result) - return -} - -// Delete takes name of the secret and deletes it. Returns an error if one occurs. -func (c *secrets) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *secrets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. -func (c *secrets) Get(name string) (result *api.Secret, err error) { - result = &api.Secret{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Secrets that match those selectors. -func (c *secrets) List(opts api.ListOptions) (result *api.SecretList, err error) { - result = &api.SecretList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested secrets. -func (c *secrets) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched secret. -func (c *secrets) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Secret, err error) { - result = &api.Secret{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("secrets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/service.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/service.go deleted file mode 100644 index 2e912e4b1d..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/service.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ServicesGetter has a method to return a ServiceInterface. -// A group's client should implement this interface. -type ServicesGetter interface { - Services(namespace string) ServiceInterface -} - -// ServiceInterface has methods to work with Service resources. -type ServiceInterface interface { - Create(*api.Service) (*api.Service, error) - Update(*api.Service) (*api.Service, error) - UpdateStatus(*api.Service) (*api.Service, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.Service, error) - List(opts api.ListOptions) (*api.ServiceList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Service, err error) - ServiceExpansion -} - -// services implements ServiceInterface -type services struct { - client restclient.Interface - ns string -} - -// newServices returns a Services -func newServices(c *CoreClient, namespace string) *services { - return &services{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Create(service *api.Service) (result *api.Service, err error) { - result = &api.Service{} - err = c.client.Post(). - Namespace(c.ns). - Resource("services"). - Body(service). - Do(). - Into(result) - return -} - -// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Update(service *api.Service) (result *api.Service, err error) { - result = &api.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - Body(service). - Do(). - Into(result) - return -} - -func (c *services) UpdateStatus(service *api.Service) (result *api.Service, err error) { - result = &api.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - SubResource("status"). - Body(service). - Do(). - Into(result) - return -} - -// Delete takes name of the service and deletes it. Returns an error if one occurs. -func (c *services) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("services"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *services) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the service, and returns the corresponding service object, and an error if there is any. -func (c *services) Get(name string) (result *api.Service, err error) { - result = &api.Service{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Services that match those selectors. -func (c *services) List(opts api.ListOptions) (result *api.ServiceList, err error) { - result = &api.ServiceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested services. -func (c *services) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched service. -func (c *services) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Service, err error) { - result = &api.Service{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("services"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/BUILD b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/BUILD deleted file mode 100644 index 3544dbb7c5..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/BUILD +++ /dev/null @@ -1,32 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "daemonset.go", - "deployment.go", - "doc.go", - "extensions_client.go", - "generated_expansion.go", - "ingress.go", - "replicaset.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/daemonset.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/daemonset.go deleted file mode 100644 index c219deddb9..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/daemonset.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// DaemonSetsGetter has a method to return a DaemonSetInterface. -// A group's client should implement this interface. -type DaemonSetsGetter interface { - DaemonSets(namespace string) DaemonSetInterface -} - -// DaemonSetInterface has methods to work with DaemonSet resources. -type DaemonSetInterface interface { - Create(*extensions.DaemonSet) (*extensions.DaemonSet, error) - Update(*extensions.DaemonSet) (*extensions.DaemonSet, error) - UpdateStatus(*extensions.DaemonSet) (*extensions.DaemonSet, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*extensions.DaemonSet, error) - List(opts api.ListOptions) (*extensions.DaemonSetList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.DaemonSet, err error) - DaemonSetExpansion -} - -// daemonSets implements DaemonSetInterface -type daemonSets struct { - client restclient.Interface - ns string -} - -// newDaemonSets returns a DaemonSets -func newDaemonSets(c *ExtensionsClient, namespace string) *daemonSets { - return &daemonSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - Body(daemonSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - Body(daemonSet). - Do(). - Into(result) - return -} - -func (c *daemonSets) UpdateStatus(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - Body(daemonSet). - Do(). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(name string) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(opts api.ListOptions) (result *extensions.DaemonSetList, err error) { - result = &extensions.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/deployment.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/deployment.go deleted file mode 100644 index 7da091b6c2..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/deployment.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// DeploymentsGetter has a method to return a DeploymentInterface. -// A group's client should implement this interface. -type DeploymentsGetter interface { - Deployments(namespace string) DeploymentInterface -} - -// DeploymentInterface has methods to work with Deployment resources. -type DeploymentInterface interface { - Create(*extensions.Deployment) (*extensions.Deployment, error) - Update(*extensions.Deployment) (*extensions.Deployment, error) - UpdateStatus(*extensions.Deployment) (*extensions.Deployment, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*extensions.Deployment, error) - List(opts api.ListOptions) (*extensions.DeploymentList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.Deployment, err error) - DeploymentExpansion -} - -// deployments implements DeploymentInterface -type deployments struct { - client restclient.Interface - ns string -} - -// newDeployments returns a Deployments -func newDeployments(c *ExtensionsClient, namespace string) *deployments { - return &deployments{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - Body(deployment). - Do(). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - Body(deployment). - Do(). - Into(result) - return -} - -func (c *deployments) UpdateStatus(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - Body(deployment). - Do(). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(name string) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(opts api.ListOptions) (result *extensions.DeploymentList, err error) { - result = &extensions.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/doc.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/doc.go deleted file mode 100644 index 26c978f7d5..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with arguments: --clientset-name=federation_internalclientset --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/Service,api/Namespace,extensions/ReplicaSet,api/Secret,extensions/Ingress,extensions/Deployment,extensions/DaemonSet,api/ConfigMap,api/Event] --input=[../../federation/apis/federation/,api/,extensions/] - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/extensions_client.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/extensions_client.go deleted file mode 100644 index ba916199ef..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/extensions_client.go +++ /dev/null @@ -1,114 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type ExtensionsInterface interface { - RESTClient() restclient.Interface - DaemonSetsGetter - DeploymentsGetter - IngressesGetter - ReplicaSetsGetter -} - -// ExtensionsClient is used to interact with features provided by the k8s.io/kubernetes/pkg/apimachinery/registered.Group group. -type ExtensionsClient struct { - restClient restclient.Interface -} - -func (c *ExtensionsClient) DaemonSets(namespace string) DaemonSetInterface { - return newDaemonSets(c, namespace) -} - -func (c *ExtensionsClient) Deployments(namespace string) DeploymentInterface { - return newDeployments(c, namespace) -} - -func (c *ExtensionsClient) Ingresses(namespace string) IngressInterface { - return newIngresses(c, namespace) -} - -func (c *ExtensionsClient) ReplicaSets(namespace string) ReplicaSetInterface { - return newReplicaSets(c, namespace) -} - -// NewForConfig creates a new ExtensionsClient for the given config. -func NewForConfig(c *restclient.Config) (*ExtensionsClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &ExtensionsClient{client}, nil -} - -// NewForConfigOrDie creates a new ExtensionsClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *ExtensionsClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new ExtensionsClient for the given RESTClient. -func New(c restclient.Interface) *ExtensionsClient { - return &ExtensionsClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if extensions group is not registered, return an error - g, err := registered.Group("extensions") - if err != nil { - return err - } - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != g.GroupVersion.Group { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - } - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *ExtensionsClient) RESTClient() restclient.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/generated_expansion.go deleted file mode 100644 index c68ea50b49..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/generated_expansion.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -type DaemonSetExpansion interface{} - -type DeploymentExpansion interface{} - -type IngressExpansion interface{} - -type ReplicaSetExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/ingress.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/ingress.go deleted file mode 100644 index 2b031e9ae2..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/ingress.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// IngressesGetter has a method to return a IngressInterface. -// A group's client should implement this interface. -type IngressesGetter interface { - Ingresses(namespace string) IngressInterface -} - -// IngressInterface has methods to work with Ingress resources. -type IngressInterface interface { - Create(*extensions.Ingress) (*extensions.Ingress, error) - Update(*extensions.Ingress) (*extensions.Ingress, error) - UpdateStatus(*extensions.Ingress) (*extensions.Ingress, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*extensions.Ingress, error) - List(opts api.ListOptions) (*extensions.IngressList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.Ingress, err error) - IngressExpansion -} - -// ingresses implements IngressInterface -type ingresses struct { - client restclient.Interface - ns string -} - -// newIngresses returns a Ingresses -func newIngresses(c *ExtensionsClient, namespace string) *ingresses { - return &ingresses{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - Body(ingress). - Do(). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - Body(ingress). - Do(). - Into(result) - return -} - -func (c *ingresses) UpdateStatus(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - Body(ingress). - Do(). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(name string) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(opts api.ListOptions) (result *extensions.IngressList, err error) { - result = &extensions.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ingresses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/replicaset.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/replicaset.go deleted file mode 100644 index 02d386574b..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/replicaset.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ReplicaSetsGetter has a method to return a ReplicaSetInterface. -// A group's client should implement this interface. -type ReplicaSetsGetter interface { - ReplicaSets(namespace string) ReplicaSetInterface -} - -// ReplicaSetInterface has methods to work with ReplicaSet resources. -type ReplicaSetInterface interface { - Create(*extensions.ReplicaSet) (*extensions.ReplicaSet, error) - Update(*extensions.ReplicaSet) (*extensions.ReplicaSet, error) - UpdateStatus(*extensions.ReplicaSet) (*extensions.ReplicaSet, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*extensions.ReplicaSet, error) - List(opts api.ListOptions) (*extensions.ReplicaSetList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.ReplicaSet, err error) - ReplicaSetExpansion -} - -// replicaSets implements ReplicaSetInterface -type replicaSets struct { - client restclient.Interface - ns string -} - -// newReplicaSets returns a ReplicaSets -func newReplicaSets(c *ExtensionsClient, namespace string) *replicaSets { - return &replicaSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - Body(replicaSet). - Do(). - Into(result) - return -} - -func (c *replicaSets) UpdateStatus(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(name string) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(opts api.ListOptions) (result *extensions.ReplicaSetList, err error) { - result = &extensions.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion/BUILD b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion/BUILD deleted file mode 100644 index d73158c8ff..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "cluster.go", - "doc.go", - "federation_client.go", - "generated_expansion.go", - ], - tags = ["automanaged"], - deps = [ - "//federation/apis/federation:go_default_library", - "//pkg/api:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion/cluster.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion/cluster.go deleted file mode 100644 index 8777e3a0a1..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion/cluster.go +++ /dev/null @@ -1,155 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - federation "k8s.io/kubernetes/federation/apis/federation" - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ClustersGetter has a method to return a ClusterInterface. -// A group's client should implement this interface. -type ClustersGetter interface { - Clusters() ClusterInterface -} - -// ClusterInterface has methods to work with Cluster resources. -type ClusterInterface interface { - Create(*federation.Cluster) (*federation.Cluster, error) - Update(*federation.Cluster) (*federation.Cluster, error) - UpdateStatus(*federation.Cluster) (*federation.Cluster, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*federation.Cluster, error) - List(opts api.ListOptions) (*federation.ClusterList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *federation.Cluster, err error) - ClusterExpansion -} - -// clusters implements ClusterInterface -type clusters struct { - client restclient.Interface -} - -// newClusters returns a Clusters -func newClusters(c *FederationClient) *clusters { - return &clusters{ - client: c.RESTClient(), - } -} - -// Create takes the representation of a cluster and creates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *clusters) Create(cluster *federation.Cluster) (result *federation.Cluster, err error) { - result = &federation.Cluster{} - err = c.client.Post(). - Resource("clusters"). - Body(cluster). - Do(). - Into(result) - return -} - -// Update takes the representation of a cluster and updates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *clusters) Update(cluster *federation.Cluster) (result *federation.Cluster, err error) { - result = &federation.Cluster{} - err = c.client.Put(). - Resource("clusters"). - Name(cluster.Name). - Body(cluster). - Do(). - Into(result) - return -} - -func (c *clusters) UpdateStatus(cluster *federation.Cluster) (result *federation.Cluster, err error) { - result = &federation.Cluster{} - err = c.client.Put(). - Resource("clusters"). - Name(cluster.Name). - SubResource("status"). - Body(cluster). - Do(). - Into(result) - return -} - -// Delete takes name of the cluster and deletes it. Returns an error if one occurs. -func (c *clusters) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("clusters"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusters) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("clusters"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the cluster, and returns the corresponding cluster object, and an error if there is any. -func (c *clusters) Get(name string) (result *federation.Cluster, err error) { - result = &federation.Cluster{} - err = c.client.Get(). - Resource("clusters"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Clusters that match those selectors. -func (c *clusters) List(opts api.ListOptions) (result *federation.ClusterList, err error) { - result = &federation.ClusterList{} - err = c.client.Get(). - Resource("clusters"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusters. -func (c *clusters) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("clusters"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched cluster. -func (c *clusters) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *federation.Cluster, err error) { - result = &federation.Cluster{} - err = c.client.Patch(pt). - Resource("clusters"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion/doc.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion/doc.go deleted file mode 100644 index 26c978f7d5..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with arguments: --clientset-name=federation_internalclientset --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/Service,api/Namespace,extensions/ReplicaSet,api/Secret,extensions/Ingress,extensions/Deployment,extensions/DaemonSet,api/ConfigMap,api/Event] --input=[../../federation/apis/federation/,api/,extensions/] - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion/federation_client.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion/federation_client.go deleted file mode 100644 index 5e9816df2b..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion/federation_client.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type FederationInterface interface { - RESTClient() restclient.Interface - ClustersGetter -} - -// FederationClient is used to interact with features provided by the k8s.io/kubernetes/pkg/apimachinery/registered.Group group. -type FederationClient struct { - restClient restclient.Interface -} - -func (c *FederationClient) Clusters() ClusterInterface { - return newClusters(c) -} - -// NewForConfig creates a new FederationClient for the given config. -func NewForConfig(c *restclient.Config) (*FederationClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &FederationClient{client}, nil -} - -// NewForConfigOrDie creates a new FederationClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *FederationClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new FederationClient for the given RESTClient. -func New(c restclient.Interface) *FederationClient { - return &FederationClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if federation group is not registered, return an error - g, err := registered.Group("federation") - if err != nil { - return err - } - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != g.GroupVersion.Group { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - } - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FederationClient) RESTClient() restclient.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion/generated_expansion.go deleted file mode 100644 index d18a8dda92..0000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion/generated_expansion.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -type ClusterExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/api/BUILD b/vendor/k8s.io/kubernetes/pkg/api/BUILD index 208845a981..5e6f7c804b 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/api/BUILD @@ -4,81 +4,66 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", "go_test", - "cgo_library", ) go_library( name = "go_default_library", srcs = [ - "context.go", - "conversion.go", "defaults.go", "doc.go", "field_constants.go", - "generate.go", "helpers.go", - "mapper.go", - "meta.go", + "json.go", "ref.go", "register.go", - "requestcontext.go", "resource_helpers.go", - "types.generated.go", "types.go", "zz_generated.deepcopy.go", ], tags = ["automanaged"], deps = [ - "//pkg/api/meta:go_default_library", - "//pkg/api/meta/metatypes:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/auth/user:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/fields:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer:go_default_library", - "//pkg/selection:go_default_library", - "//pkg/types:go_default_library", - "//pkg/util/intstr:go_default_library", - "//pkg/util/labels:go_default_library", - "//pkg/util/rand:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/util/uuid:go_default_library", - "//pkg/util/validation/field:go_default_library", "//vendor:github.com/davecgh/go-spew/spew", - "//vendor:github.com/golang/glog", - "//vendor:github.com/ugorji/go/codec", - "//vendor:golang.org/x/net/context", + "//vendor:k8s.io/apimachinery/pkg/api/meta", + "//vendor:k8s.io/apimachinery/pkg/api/resource", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/announced", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/registered", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/fields", + "//vendor:k8s.io/apimachinery/pkg/labels", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + "//vendor:k8s.io/apimachinery/pkg/selection", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/util/intstr", + "//vendor:k8s.io/apimachinery/pkg/util/sets", ], ) go_test( name = "go_default_test", srcs = [ - "generate_test.go", "helpers_test.go", "ref_test.go", "resource_helpers_test.go", ], - library = "go_default_library", + library = ":go_default_library", tags = ["automanaged"], deps = [ - "//pkg/api/resource:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/runtime:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/api/resource", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/labels", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", ], ) go_test( name = "go_default_xtest", srcs = [ - "context_test.go", "conversion_test.go", "copy_test.go", "deep_copy_test.go", @@ -86,35 +71,65 @@ go_test( "meta_test.go", "serialization_proto_test.go", "serialization_test.go", + "unstructured_test.go", ], tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/meta/metatypes:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", - "//pkg/api/unversioned:go_default_library", "//pkg/api/v1:go_default_library", - "//pkg/apimachinery/registered:go_default_library", "//pkg/apis/batch/v2alpha1:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/apis/extensions/v1beta1:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer/protobuf:go_default_library", - "//pkg/runtime/serializer/streaming:go_default_library", - "//pkg/types:go_default_library", - "//pkg/util/diff:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/util/uuid:go_default_library", - "//pkg/watch:go_default_library", - "//pkg/watch/versioned:go_default_library", - "//vendor:github.com/davecgh/go-spew/spew", "//vendor:github.com/gogo/protobuf/proto", "//vendor:github.com/golang/protobuf/proto", "//vendor:github.com/google/gofuzz", - "//vendor:github.com/spf13/pflag", "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/api/equality", + "//vendor:k8s.io/apimachinery/pkg/api/meta", + "//vendor:k8s.io/apimachinery/pkg/api/testing", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/conversion/unstructured", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer/protobuf", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer/streaming", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/util/diff", + "//vendor:k8s.io/apimachinery/pkg/util/json", + "//vendor:k8s.io/apimachinery/pkg/util/sets", + "//vendor:k8s.io/apimachinery/pkg/watch", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/api/annotations:all-srcs", + "//pkg/api/endpoints:all-srcs", + "//pkg/api/errors:all-srcs", + "//pkg/api/events:all-srcs", + "//pkg/api/install:all-srcs", + "//pkg/api/meta:all-srcs", + "//pkg/api/pod:all-srcs", + "//pkg/api/resource:all-srcs", + "//pkg/api/service:all-srcs", + "//pkg/api/testapi:all-srcs", + "//pkg/api/testing:all-srcs", + "//pkg/api/unversioned:all-srcs", + "//pkg/api/util:all-srcs", + "//pkg/api/v1:all-srcs", + "//pkg/api/validation:all-srcs", ], + tags = ["automanaged"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/api/OWNERS b/vendor/k8s.io/kubernetes/pkg/api/OWNERS index d28472e0fd..3a9b0c6d15 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/api/OWNERS @@ -1,6 +1,44 @@ -assignees: - - bgrant0607 - - erictune - - lavalamp - - smarterclayton - - thockin +approvers: +- erictune +- lavalamp +- smarterclayton +- thockin +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- yujuhong +- brendandburns +- derekwaynecarr +- caesarxuchao +- vishh +- mikedanese +- liggitt +- nikhiljindal +- bprashanth +- gmarek +- erictune +- davidopp +- pmorie +- sttts +- kargakis +- dchen1107 +- saad-ali +- zmerlynn +- luxas +- janetkuo +- justinsb +- pwittrock +- roberthbailey +- ncdc +- timstclair +- yifan-gu +- eparis +- mwielgus +- timothysc +- soltysh +- piosz +- jsafrane +- jbeda diff --git a/vendor/k8s.io/kubernetes/pkg/api/annotations/BUILD b/vendor/k8s.io/kubernetes/pkg/api/annotations/BUILD deleted file mode 100644 index a26fe73f86..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/annotations/BUILD +++ /dev/null @@ -1,20 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "annotations.go", - "doc.go", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/annotations/annotations.go b/vendor/k8s.io/kubernetes/pkg/api/annotations/annotations.go deleted file mode 100644 index 2f9a6c4422..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/annotations/annotations.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package annotations - -const kubectlPrefix = "kubectl.kubernetes.io/" - -// LastAppliedConfigAnnotation is the annotation used to store the previous -// configuration of a resource for use in a three way diff by UpdateApplyAnnotation. -const LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" diff --git a/vendor/k8s.io/kubernetes/pkg/api/annotations/doc.go b/vendor/k8s.io/kubernetes/pkg/api/annotations/doc.go deleted file mode 100644 index 0c36c85cc1..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/annotations/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package annotations defines annotation keys that shared between server and client -package annotations diff --git a/vendor/k8s.io/kubernetes/pkg/api/context.go b/vendor/k8s.io/kubernetes/pkg/api/context.go deleted file mode 100644 index 0f735f4686..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/context.go +++ /dev/null @@ -1,152 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - stderrs "errors" - "time" - - "golang.org/x/net/context" - "k8s.io/kubernetes/pkg/auth/user" - "k8s.io/kubernetes/pkg/types" -) - -// Context carries values across API boundaries. -// This context matches the context.Context interface -// (https://blog.golang.org/context), for the purposes -// of passing the api.Context through to the storage tier. -// TODO: Determine the extent that this abstraction+interface -// is used by the api, and whether we can remove. -type Context interface { - // Value returns the value associated with key or nil if none. - Value(key interface{}) interface{} - - // Deadline returns the time when this Context will be canceled, if any. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that is closed when this Context is canceled - // or times out. - Done() <-chan struct{} - - // Err indicates why this context was canceled, after the Done channel - // is closed. - Err() error -} - -// The key type is unexported to prevent collisions -type key int - -const ( - // namespaceKey is the context key for the request namespace. - namespaceKey key = iota - - // userKey is the context key for the request user. - userKey - - // uidKey is the context key for the uid to assign to an object on create. - uidKey - - // userAgentKey is the context key for the request user agent. - userAgentKey -) - -// NewContext instantiates a base context object for request flows. -func NewContext() Context { - return context.TODO() -} - -// NewDefaultContext instantiates a base context object for request flows in the default namespace -func NewDefaultContext() Context { - return WithNamespace(NewContext(), NamespaceDefault) -} - -// WithValue returns a copy of parent in which the value associated with key is val. -func WithValue(parent Context, key interface{}, val interface{}) Context { - internalCtx, ok := parent.(context.Context) - if !ok { - panic(stderrs.New("Invalid context type")) - } - return context.WithValue(internalCtx, key, val) -} - -// WithNamespace returns a copy of parent in which the namespace value is set -func WithNamespace(parent Context, namespace string) Context { - return WithValue(parent, namespaceKey, namespace) -} - -// NamespaceFrom returns the value of the namespace key on the ctx -func NamespaceFrom(ctx Context) (string, bool) { - namespace, ok := ctx.Value(namespaceKey).(string) - return namespace, ok -} - -// NamespaceValue returns the value of the namespace key on the ctx, or the empty string if none -func NamespaceValue(ctx Context) string { - namespace, _ := NamespaceFrom(ctx) - return namespace -} - -// ValidNamespace returns false if the namespace on the context differs from the resource. If the resource has no namespace, it is set to the value in the context. -func ValidNamespace(ctx Context, resource *ObjectMeta) bool { - ns, ok := NamespaceFrom(ctx) - if len(resource.Namespace) == 0 { - resource.Namespace = ns - } - return ns == resource.Namespace && ok -} - -// WithNamespaceDefaultIfNone returns a context whose namespace is the default if and only if the parent context has no namespace value -func WithNamespaceDefaultIfNone(parent Context) Context { - namespace, ok := NamespaceFrom(parent) - if !ok || len(namespace) == 0 { - return WithNamespace(parent, NamespaceDefault) - } - return parent -} - -// WithUser returns a copy of parent in which the user value is set -func WithUser(parent Context, user user.Info) Context { - return WithValue(parent, userKey, user) -} - -// UserFrom returns the value of the user key on the ctx -func UserFrom(ctx Context) (user.Info, bool) { - user, ok := ctx.Value(userKey).(user.Info) - return user, ok -} - -// WithUID returns a copy of parent in which the uid value is set -func WithUID(parent Context, uid types.UID) Context { - return WithValue(parent, uidKey, uid) -} - -// UIDFrom returns the value of the uid key on the ctx -func UIDFrom(ctx Context) (types.UID, bool) { - uid, ok := ctx.Value(uidKey).(types.UID) - return uid, ok -} - -// WithUserAgent returns a copy of parent in which the user value is set -func WithUserAgent(parent Context, userAgent string) Context { - return WithValue(parent, userAgentKey, userAgent) -} - -// UserAgentFrom returns the value of the userAgent key on the ctx -func UserAgentFrom(ctx Context) (string, bool) { - userAgent, ok := ctx.Value(userAgentKey).(string) - return userAgent, ok -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/conversion.go b/vendor/k8s.io/kubernetes/pkg/api/conversion.go deleted file mode 100644 index 34fd1a1e87..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/conversion.go +++ /dev/null @@ -1,250 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/intstr" - utillabels "k8s.io/kubernetes/pkg/util/labels" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -func addConversionFuncs(scheme *runtime.Scheme) error { - return scheme.AddConversionFuncs( - Convert_unversioned_TypeMeta_To_unversioned_TypeMeta, - - Convert_unversioned_ListMeta_To_unversioned_ListMeta, - - Convert_intstr_IntOrString_To_intstr_IntOrString, - - Convert_unversioned_Time_To_unversioned_Time, - - Convert_Slice_string_To_unversioned_Time, - - Convert_resource_Quantity_To_resource_Quantity, - - Convert_string_To_labels_Selector, - Convert_labels_Selector_To_string, - - Convert_string_To_fields_Selector, - Convert_fields_Selector_To_string, - - Convert_Pointer_bool_To_bool, - Convert_bool_To_Pointer_bool, - - Convert_Pointer_string_To_string, - Convert_string_To_Pointer_string, - - Convert_Pointer_int64_To_int, - Convert_int_To_Pointer_int64, - - Convert_Pointer_int32_To_int32, - Convert_int32_To_Pointer_int32, - - Convert_Pointer_float64_To_float64, - Convert_float64_To_Pointer_float64, - - Convert_map_to_unversioned_LabelSelector, - Convert_unversioned_LabelSelector_to_map, - ) -} - -func Convert_Pointer_float64_To_float64(in **float64, out *float64, s conversion.Scope) error { - if *in == nil { - *out = 0 - return nil - } - *out = float64(**in) - return nil -} - -func Convert_float64_To_Pointer_float64(in *float64, out **float64, s conversion.Scope) error { - temp := float64(*in) - *out = &temp - return nil -} - -func Convert_Pointer_int32_To_int32(in **int32, out *int32, s conversion.Scope) error { - if *in == nil { - *out = 0 - return nil - } - *out = int32(**in) - return nil -} - -func Convert_int32_To_Pointer_int32(in *int32, out **int32, s conversion.Scope) error { - temp := int32(*in) - *out = &temp - return nil -} - -func Convert_Pointer_int64_To_int(in **int64, out *int, s conversion.Scope) error { - if *in == nil { - *out = 0 - return nil - } - *out = int(**in) - return nil -} - -func Convert_int_To_Pointer_int64(in *int, out **int64, s conversion.Scope) error { - temp := int64(*in) - *out = &temp - return nil -} - -func Convert_Pointer_string_To_string(in **string, out *string, s conversion.Scope) error { - if *in == nil { - *out = "" - return nil - } - *out = **in - return nil -} - -func Convert_string_To_Pointer_string(in *string, out **string, s conversion.Scope) error { - if in == nil { - stringVar := "" - *out = &stringVar - return nil - } - *out = in - return nil -} - -func Convert_Pointer_bool_To_bool(in **bool, out *bool, s conversion.Scope) error { - if *in == nil { - *out = false - return nil - } - *out = **in - return nil -} - -func Convert_bool_To_Pointer_bool(in *bool, out **bool, s conversion.Scope) error { - if in == nil { - boolVar := false - *out = &boolVar - return nil - } - *out = in - return nil -} - -// +k8s:conversion-fn=drop -func Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(in, out *unversioned.TypeMeta, s conversion.Scope) error { - // These values are explicitly not copied - //out.APIVersion = in.APIVersion - //out.Kind = in.Kind - return nil -} - -// +k8s:conversion-fn=copy-only -func Convert_unversioned_ListMeta_To_unversioned_ListMeta(in, out *unversioned.ListMeta, s conversion.Scope) error { - *out = *in - return nil -} - -// +k8s:conversion-fn=copy-only -func Convert_intstr_IntOrString_To_intstr_IntOrString(in, out *intstr.IntOrString, s conversion.Scope) error { - *out = *in - return nil -} - -// +k8s:conversion-fn=copy-only -func Convert_unversioned_Time_To_unversioned_Time(in *unversioned.Time, out *unversioned.Time, s conversion.Scope) error { - // Cannot deep copy these, because time.Time has unexported fields. - *out = *in - return nil -} - -// Convert_Slice_string_To_unversioned_Time allows converting a URL query parameter value -func Convert_Slice_string_To_unversioned_Time(input *[]string, out *unversioned.Time, s conversion.Scope) error { - str := "" - if len(*input) > 0 { - str = (*input)[0] - } - return out.UnmarshalQueryParameter(str) -} - -func Convert_string_To_labels_Selector(in *string, out *labels.Selector, s conversion.Scope) error { - selector, err := labels.Parse(*in) - if err != nil { - return err - } - *out = selector - return nil -} - -func Convert_string_To_fields_Selector(in *string, out *fields.Selector, s conversion.Scope) error { - selector, err := fields.ParseSelector(*in) - if err != nil { - return err - } - *out = selector - return nil -} - -func Convert_labels_Selector_To_string(in *labels.Selector, out *string, s conversion.Scope) error { - if *in == nil { - return nil - } - *out = (*in).String() - return nil -} - -func Convert_fields_Selector_To_string(in *fields.Selector, out *string, s conversion.Scope) error { - if *in == nil { - return nil - } - *out = (*in).String() - return nil -} - -// +k8s:conversion-fn=copy-only -func Convert_resource_Quantity_To_resource_Quantity(in *resource.Quantity, out *resource.Quantity, s conversion.Scope) error { - *out = *in - return nil -} - -func Convert_map_to_unversioned_LabelSelector(in *map[string]string, out *unversioned.LabelSelector, s conversion.Scope) error { - if in == nil { - return nil - } - out = new(unversioned.LabelSelector) - for labelKey, labelValue := range *in { - utillabels.AddLabelToSelector(out, labelKey, labelValue) - } - return nil -} - -func Convert_unversioned_LabelSelector_to_map(in *unversioned.LabelSelector, out *map[string]string, s conversion.Scope) error { - var err error - *out, err = unversioned.LabelSelectorAsMap(in) - if err != nil { - err = field.Invalid(field.NewPath("labelSelector"), *in, fmt.Sprintf("cannot convert to old selector: %v", err)) - } - return err -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/defaults.go b/vendor/k8s.io/kubernetes/pkg/api/defaults.go index a2b01b7fd6..baa49a8d7e 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/api/defaults.go @@ -17,9 +17,9 @@ limitations under the License. package api import ( - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { diff --git a/vendor/k8s.io/kubernetes/pkg/api/endpoints/BUILD b/vendor/k8s.io/kubernetes/pkg/api/endpoints/BUILD deleted file mode 100644 index f0f5a5c6a4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/endpoints/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["util.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/types:go_default_library", - "//pkg/util/hash:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["util_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/types:go_default_library", - "//vendor:github.com/davecgh/go-spew/spew", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go b/vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go deleted file mode 100644 index 792a2536fe..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go +++ /dev/null @@ -1,238 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package endpoints - -import ( - "bytes" - "crypto/md5" - "encoding/hex" - "hash" - "sort" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/types" - hashutil "k8s.io/kubernetes/pkg/util/hash" -) - -const ( - // TODO: to be deleted after v1.3 is released - // Its value is the json representation of map[string(IP)][HostRecord] - // example: '{"10.245.1.6":{"HostName":"my-webserver"}}' - PodHostnamesAnnotation = "endpoints.beta.kubernetes.io/hostnames-map" -) - -// TODO: to be deleted after v1.3 is released -type HostRecord struct { - HostName string -} - -// RepackSubsets takes a slice of EndpointSubset objects, expands it to the full -// representation, and then repacks that into the canonical layout. This -// ensures that code which operates on these objects can rely on the common -// form for things like comparison. The result is a newly allocated slice. -func RepackSubsets(subsets []api.EndpointSubset) []api.EndpointSubset { - // First map each unique port definition to the sets of hosts that - // offer it. - allAddrs := map[addressKey]*api.EndpointAddress{} - portToAddrReadyMap := map[api.EndpointPort]addressSet{} - for i := range subsets { - for _, port := range subsets[i].Ports { - for k := range subsets[i].Addresses { - mapAddressByPort(&subsets[i].Addresses[k], port, true, allAddrs, portToAddrReadyMap) - } - for k := range subsets[i].NotReadyAddresses { - mapAddressByPort(&subsets[i].NotReadyAddresses[k], port, false, allAddrs, portToAddrReadyMap) - } - } - } - - // Next, map the sets of hosts to the sets of ports they offer. - // Go does not allow maps or slices as keys to maps, so we have - // to synthesize an artificial key and do a sort of 2-part - // associative entity. - type keyString string - keyToAddrReadyMap := map[keyString]addressSet{} - addrReadyMapKeyToPorts := map[keyString][]api.EndpointPort{} - for port, addrs := range portToAddrReadyMap { - key := keyString(hashAddresses(addrs)) - keyToAddrReadyMap[key] = addrs - addrReadyMapKeyToPorts[key] = append(addrReadyMapKeyToPorts[key], port) - } - - // Next, build the N-to-M association the API wants. - final := []api.EndpointSubset{} - for key, ports := range addrReadyMapKeyToPorts { - var readyAddrs, notReadyAddrs []api.EndpointAddress - for addr, ready := range keyToAddrReadyMap[key] { - if ready { - readyAddrs = append(readyAddrs, *addr) - } else { - notReadyAddrs = append(notReadyAddrs, *addr) - } - } - final = append(final, api.EndpointSubset{Addresses: readyAddrs, NotReadyAddresses: notReadyAddrs, Ports: ports}) - } - - // Finally, sort it. - return SortSubsets(final) -} - -// The sets of hosts must be de-duped, using IP+UID as the key. -type addressKey struct { - ip string - uid types.UID -} - -// mapAddressByPort adds an address into a map by its ports, registering the address with a unique pointer, and preserving -// any existing ready state. -func mapAddressByPort(addr *api.EndpointAddress, port api.EndpointPort, ready bool, allAddrs map[addressKey]*api.EndpointAddress, portToAddrReadyMap map[api.EndpointPort]addressSet) *api.EndpointAddress { - // use addressKey to distinguish between two endpoints that are identical addresses - // but may have come from different hosts, for attribution. For instance, Mesos - // assigns pods the node IP, but the pods are distinct. - key := addressKey{ip: addr.IP} - if addr.TargetRef != nil { - key.uid = addr.TargetRef.UID - } - - // Accumulate the address. The full EndpointAddress structure is preserved for use when - // we rebuild the subsets so that the final TargetRef has all of the necessary data. - existingAddress := allAddrs[key] - if existingAddress == nil { - // Make a copy so we don't write to the - // input args of this function. - existingAddress = &api.EndpointAddress{} - *existingAddress = *addr - allAddrs[key] = existingAddress - } - - // Remember that this port maps to this address. - if _, found := portToAddrReadyMap[port]; !found { - portToAddrReadyMap[port] = addressSet{} - } - // if we have not yet recorded this port for this address, or if the previous - // state was ready, write the current ready state. not ready always trumps - // ready. - if wasReady, found := portToAddrReadyMap[port][existingAddress]; !found || wasReady { - portToAddrReadyMap[port][existingAddress] = ready - } - return existingAddress -} - -type addressSet map[*api.EndpointAddress]bool - -type addrReady struct { - addr *api.EndpointAddress - ready bool -} - -func hashAddresses(addrs addressSet) string { - // Flatten the list of addresses into a string so it can be used as a - // map key. Unfortunately, DeepHashObject is implemented in terms of - // spew, and spew does not handle non-primitive map keys well. So - // first we collapse it into a slice, sort the slice, then hash that. - slice := make([]addrReady, 0, len(addrs)) - for k, ready := range addrs { - slice = append(slice, addrReady{k, ready}) - } - sort.Sort(addrsReady(slice)) - hasher := md5.New() - hashutil.DeepHashObject(hasher, slice) - return hex.EncodeToString(hasher.Sum(nil)[0:]) -} - -func lessAddrReady(a, b addrReady) bool { - // ready is not significant to hashing since we can't have duplicate addresses - return LessEndpointAddress(a.addr, b.addr) -} - -type addrsReady []addrReady - -func (sl addrsReady) Len() int { return len(sl) } -func (sl addrsReady) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } -func (sl addrsReady) Less(i, j int) bool { - return lessAddrReady(sl[i], sl[j]) -} - -func LessEndpointAddress(a, b *api.EndpointAddress) bool { - ipComparison := bytes.Compare([]byte(a.IP), []byte(b.IP)) - if ipComparison != 0 { - return ipComparison < 0 - } - if b.TargetRef == nil { - return false - } - if a.TargetRef == nil { - return true - } - return a.TargetRef.UID < b.TargetRef.UID -} - -type addrPtrsByIpAndUID []*api.EndpointAddress - -func (sl addrPtrsByIpAndUID) Len() int { return len(sl) } -func (sl addrPtrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } -func (sl addrPtrsByIpAndUID) Less(i, j int) bool { - return LessEndpointAddress(sl[i], sl[j]) -} - -// SortSubsets sorts an array of EndpointSubset objects in place. For ease of -// use it returns the input slice. -func SortSubsets(subsets []api.EndpointSubset) []api.EndpointSubset { - for i := range subsets { - ss := &subsets[i] - sort.Sort(addrsByIpAndUID(ss.Addresses)) - sort.Sort(addrsByIpAndUID(ss.NotReadyAddresses)) - sort.Sort(portsByHash(ss.Ports)) - } - sort.Sort(subsetsByHash(subsets)) - return subsets -} - -func hashObject(hasher hash.Hash, obj interface{}) []byte { - hashutil.DeepHashObject(hasher, obj) - return hasher.Sum(nil) -} - -type subsetsByHash []api.EndpointSubset - -func (sl subsetsByHash) Len() int { return len(sl) } -func (sl subsetsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } -func (sl subsetsByHash) Less(i, j int) bool { - hasher := md5.New() - h1 := hashObject(hasher, sl[i]) - h2 := hashObject(hasher, sl[j]) - return bytes.Compare(h1, h2) < 0 -} - -type addrsByIpAndUID []api.EndpointAddress - -func (sl addrsByIpAndUID) Len() int { return len(sl) } -func (sl addrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } -func (sl addrsByIpAndUID) Less(i, j int) bool { - return LessEndpointAddress(&sl[i], &sl[j]) -} - -type portsByHash []api.EndpointPort - -func (sl portsByHash) Len() int { return len(sl) } -func (sl portsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } -func (sl portsByHash) Less(i, j int) bool { - hasher := md5.New() - h1 := hashObject(hasher, sl[i]) - h2 := hashObject(hasher, sl[j]) - return bytes.Compare(h1, h2) < 0 -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/errors/BUILD b/vendor/k8s.io/kubernetes/pkg/api/errors/BUILD deleted file mode 100644 index 60a7717b76..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/errors/BUILD +++ /dev/null @@ -1,38 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "errors.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/validation/field:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["errors_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/validation/field:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/errors/errors.go b/vendor/k8s.io/kubernetes/pkg/api/errors/errors.go deleted file mode 100644 index f54823ffcc..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/errors/errors.go +++ /dev/null @@ -1,470 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package errors - -import ( - "encoding/json" - "fmt" - "net/http" - "strings" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -// HTTP Status codes not in the golang http package. -const ( - StatusUnprocessableEntity = 422 - StatusTooManyRequests = 429 - // StatusServerTimeout is an indication that a transient server error has - // occurred and the client *should* retry, with an optional Retry-After - // header to specify the back off window. - StatusServerTimeout = 504 -) - -// StatusError is an error intended for consumption by a REST API server; it can also be -// reconstructed by clients from a REST response. Public to allow easy type switches. -type StatusError struct { - ErrStatus unversioned.Status -} - -// APIStatus is exposed by errors that can be converted to an api.Status object -// for finer grained details. -type APIStatus interface { - Status() unversioned.Status -} - -var _ error = &StatusError{} - -// Error implements the Error interface. -func (e *StatusError) Error() string { - return e.ErrStatus.Message -} - -// Status allows access to e's status without having to know the detailed workings -// of StatusError. Used by pkg/apiserver. -func (e *StatusError) Status() unversioned.Status { - return e.ErrStatus -} - -// DebugError reports extended info about the error to debug output. -func (e *StatusError) DebugError() (string, []interface{}) { - if out, err := json.MarshalIndent(e.ErrStatus, "", " "); err == nil { - return "server response object: %s", []interface{}{string(out)} - } - return "server response object: %#v", []interface{}{e.ErrStatus} -} - -// UnexpectedObjectError can be returned by FromObject if it's passed a non-status object. -type UnexpectedObjectError struct { - Object runtime.Object -} - -// Error returns an error message describing 'u'. -func (u *UnexpectedObjectError) Error() string { - return fmt.Sprintf("unexpected object: %v", u.Object) -} - -// FromObject generates an StatusError from an unversioned.Status, if that is the type of obj; otherwise, -// returns an UnexpecteObjectError. -func FromObject(obj runtime.Object) error { - switch t := obj.(type) { - case *unversioned.Status: - return &StatusError{*t} - } - return &UnexpectedObjectError{obj} -} - -// NewNotFound returns a new error which indicates that the resource of the kind and the name was not found. -func NewNotFound(qualifiedResource unversioned.GroupResource, name string) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusNotFound, - Reason: unversioned.StatusReasonNotFound, - Details: &unversioned.StatusDetails{ - Group: qualifiedResource.Group, - Kind: qualifiedResource.Resource, - Name: name, - }, - Message: fmt.Sprintf("%s %q not found", qualifiedResource.String(), name), - }} -} - -// NewAlreadyExists returns an error indicating the item requested exists by that identifier. -func NewAlreadyExists(qualifiedResource unversioned.GroupResource, name string) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusConflict, - Reason: unversioned.StatusReasonAlreadyExists, - Details: &unversioned.StatusDetails{ - Group: qualifiedResource.Group, - Kind: qualifiedResource.Resource, - Name: name, - }, - Message: fmt.Sprintf("%s %q already exists", qualifiedResource.String(), name), - }} -} - -// NewUnauthorized returns an error indicating the client is not authorized to perform the requested -// action. -func NewUnauthorized(reason string) *StatusError { - message := reason - if len(message) == 0 { - message = "not authorized" - } - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusUnauthorized, - Reason: unversioned.StatusReasonUnauthorized, - Message: message, - }} -} - -// NewForbidden returns an error indicating the requested action was forbidden -func NewForbidden(qualifiedResource unversioned.GroupResource, name string, err error) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusForbidden, - Reason: unversioned.StatusReasonForbidden, - Details: &unversioned.StatusDetails{ - Group: qualifiedResource.Group, - Kind: qualifiedResource.Resource, - Name: name, - }, - Message: fmt.Sprintf("%s %q is forbidden: %v", qualifiedResource.String(), name, err), - }} -} - -// NewConflict returns an error indicating the item can't be updated as provided. -func NewConflict(qualifiedResource unversioned.GroupResource, name string, err error) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusConflict, - Reason: unversioned.StatusReasonConflict, - Details: &unversioned.StatusDetails{ - Group: qualifiedResource.Group, - Kind: qualifiedResource.Resource, - Name: name, - }, - Message: fmt.Sprintf("Operation cannot be fulfilled on %s %q: %v", qualifiedResource.String(), name, err), - }} -} - -// NewGone returns an error indicating the item no longer available at the server and no forwarding address is known. -func NewGone(message string) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusGone, - Reason: unversioned.StatusReasonGone, - Message: message, - }} -} - -// NewInvalid returns an error indicating the item is invalid and cannot be processed. -func NewInvalid(qualifiedKind unversioned.GroupKind, name string, errs field.ErrorList) *StatusError { - causes := make([]unversioned.StatusCause, 0, len(errs)) - for i := range errs { - err := errs[i] - causes = append(causes, unversioned.StatusCause{ - Type: unversioned.CauseType(err.Type), - Message: err.ErrorBody(), - Field: err.Field, - }) - } - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: StatusUnprocessableEntity, // RFC 4918: StatusUnprocessableEntity - Reason: unversioned.StatusReasonInvalid, - Details: &unversioned.StatusDetails{ - Group: qualifiedKind.Group, - Kind: qualifiedKind.Kind, - Name: name, - Causes: causes, - }, - Message: fmt.Sprintf("%s %q is invalid: %v", qualifiedKind.String(), name, errs.ToAggregate()), - }} -} - -// NewBadRequest creates an error that indicates that the request is invalid and can not be processed. -func NewBadRequest(reason string) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusBadRequest, - Reason: unversioned.StatusReasonBadRequest, - Message: reason, - }} -} - -// NewServiceUnavailable creates an error that indicates that the requested service is unavailable. -func NewServiceUnavailable(reason string) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusServiceUnavailable, - Reason: unversioned.StatusReasonServiceUnavailable, - Message: reason, - }} -} - -// NewMethodNotSupported returns an error indicating the requested action is not supported on this kind. -func NewMethodNotSupported(qualifiedResource unversioned.GroupResource, action string) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusMethodNotAllowed, - Reason: unversioned.StatusReasonMethodNotAllowed, - Details: &unversioned.StatusDetails{ - Group: qualifiedResource.Group, - Kind: qualifiedResource.Resource, - }, - Message: fmt.Sprintf("%s is not supported on resources of kind %q", action, qualifiedResource.String()), - }} -} - -// NewServerTimeout returns an error indicating the requested action could not be completed due to a -// transient error, and the client should try again. -func NewServerTimeout(qualifiedResource unversioned.GroupResource, operation string, retryAfterSeconds int) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusInternalServerError, - Reason: unversioned.StatusReasonServerTimeout, - Details: &unversioned.StatusDetails{ - Group: qualifiedResource.Group, - Kind: qualifiedResource.Resource, - Name: operation, - RetryAfterSeconds: int32(retryAfterSeconds), - }, - Message: fmt.Sprintf("The %s operation against %s could not be completed at this time, please try again.", operation, qualifiedResource.String()), - }} -} - -// NewServerTimeoutForKind should not exist. Server timeouts happen when accessing resources, the Kind is just what we -// happened to be looking at when the request failed. This delegates to keep code sane, but we should work towards removing this. -func NewServerTimeoutForKind(qualifiedKind unversioned.GroupKind, operation string, retryAfterSeconds int) *StatusError { - return NewServerTimeout(unversioned.GroupResource{Group: qualifiedKind.Group, Resource: qualifiedKind.Kind}, operation, retryAfterSeconds) -} - -// NewInternalError returns an error indicating the item is invalid and cannot be processed. -func NewInternalError(err error) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusInternalServerError, - Reason: unversioned.StatusReasonInternalError, - Details: &unversioned.StatusDetails{ - Causes: []unversioned.StatusCause{{Message: err.Error()}}, - }, - Message: fmt.Sprintf("Internal error occurred: %v", err), - }} -} - -// NewTimeoutError returns an error indicating that a timeout occurred before the request -// could be completed. Clients may retry, but the operation may still complete. -func NewTimeoutError(message string, retryAfterSeconds int) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: StatusServerTimeout, - Reason: unversioned.StatusReasonTimeout, - Message: fmt.Sprintf("Timeout: %s", message), - Details: &unversioned.StatusDetails{ - RetryAfterSeconds: int32(retryAfterSeconds), - }, - }} -} - -// NewGenericServerResponse returns a new error for server responses that are not in a recognizable form. -func NewGenericServerResponse(code int, verb string, qualifiedResource unversioned.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError { - reason := unversioned.StatusReasonUnknown - message := fmt.Sprintf("the server responded with the status code %d but did not return more information", code) - switch code { - case http.StatusConflict: - if verb == "POST" { - reason = unversioned.StatusReasonAlreadyExists - } else { - reason = unversioned.StatusReasonConflict - } - message = "the server reported a conflict" - case http.StatusNotFound: - reason = unversioned.StatusReasonNotFound - message = "the server could not find the requested resource" - case http.StatusBadRequest: - reason = unversioned.StatusReasonBadRequest - message = "the server rejected our request for an unknown reason" - case http.StatusUnauthorized: - reason = unversioned.StatusReasonUnauthorized - message = "the server has asked for the client to provide credentials" - case http.StatusForbidden: - reason = unversioned.StatusReasonForbidden - message = "the server does not allow access to the requested resource" - case http.StatusMethodNotAllowed: - reason = unversioned.StatusReasonMethodNotAllowed - message = "the server does not allow this method on the requested resource" - case StatusUnprocessableEntity: - reason = unversioned.StatusReasonInvalid - message = "the server rejected our request due to an error in our request" - case StatusServerTimeout: - reason = unversioned.StatusReasonServerTimeout - message = "the server cannot complete the requested operation at this time, try again later" - case StatusTooManyRequests: - reason = unversioned.StatusReasonTimeout - message = "the server has received too many requests and has asked us to try again later" - default: - if code >= 500 { - reason = unversioned.StatusReasonInternalError - message = fmt.Sprintf("an error on the server (%q) has prevented the request from succeeding", serverMessage) - } - } - switch { - case !qualifiedResource.Empty() && len(name) > 0: - message = fmt.Sprintf("%s (%s %s %s)", message, strings.ToLower(verb), qualifiedResource.String(), name) - case !qualifiedResource.Empty(): - message = fmt.Sprintf("%s (%s %s)", message, strings.ToLower(verb), qualifiedResource.String()) - } - var causes []unversioned.StatusCause - if isUnexpectedResponse { - causes = []unversioned.StatusCause{ - { - Type: unversioned.CauseTypeUnexpectedServerResponse, - Message: serverMessage, - }, - } - } else { - causes = nil - } - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: int32(code), - Reason: reason, - Details: &unversioned.StatusDetails{ - Group: qualifiedResource.Group, - Kind: qualifiedResource.Resource, - Name: name, - - Causes: causes, - RetryAfterSeconds: int32(retryAfterSeconds), - }, - Message: message, - }} -} - -// IsNotFound returns true if the specified error was created by NewNotFound. -func IsNotFound(err error) bool { - return reasonForError(err) == unversioned.StatusReasonNotFound -} - -// IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists. -func IsAlreadyExists(err error) bool { - return reasonForError(err) == unversioned.StatusReasonAlreadyExists -} - -// IsConflict determines if the err is an error which indicates the provided update conflicts. -func IsConflict(err error) bool { - return reasonForError(err) == unversioned.StatusReasonConflict -} - -// IsInvalid determines if the err is an error which indicates the provided resource is not valid. -func IsInvalid(err error) bool { - return reasonForError(err) == unversioned.StatusReasonInvalid -} - -// IsMethodNotSupported determines if the err is an error which indicates the provided action could not -// be performed because it is not supported by the server. -func IsMethodNotSupported(err error) bool { - return reasonForError(err) == unversioned.StatusReasonMethodNotAllowed -} - -// IsBadRequest determines if err is an error which indicates that the request is invalid. -func IsBadRequest(err error) bool { - return reasonForError(err) == unversioned.StatusReasonBadRequest -} - -// IsUnauthorized determines if err is an error which indicates that the request is unauthorized and -// requires authentication by the user. -func IsUnauthorized(err error) bool { - return reasonForError(err) == unversioned.StatusReasonUnauthorized -} - -// IsForbidden determines if err is an error which indicates that the request is forbidden and cannot -// be completed as requested. -func IsForbidden(err error) bool { - return reasonForError(err) == unversioned.StatusReasonForbidden -} - -// IsServerTimeout determines if err is an error which indicates that the request needs to be retried -// by the client. -func IsServerTimeout(err error) bool { - return reasonForError(err) == unversioned.StatusReasonServerTimeout -} - -// IsInternalError determines if err is an error which indicates an internal server error. -func IsInternalError(err error) bool { - return reasonForError(err) == unversioned.StatusReasonInternalError -} - -// IsTooManyRequests determines if err is an error which indicates that there are too many requests -// that the server cannot handle. -// TODO: update IsTooManyRequests() when the TooManyRequests(429) error returned from the API server has a non-empty Reason field -func IsTooManyRequests(err error) bool { - switch t := err.(type) { - case APIStatus: - return t.Status().Code == StatusTooManyRequests - } - return false -} - -// IsUnexpectedServerError returns true if the server response was not in the expected API format, -// and may be the result of another HTTP actor. -func IsUnexpectedServerError(err error) bool { - switch t := err.(type) { - case APIStatus: - if d := t.Status().Details; d != nil { - for _, cause := range d.Causes { - if cause.Type == unversioned.CauseTypeUnexpectedServerResponse { - return true - } - } - } - } - return false -} - -// IsUnexpectedObjectError determines if err is due to an unexpected object from the master. -func IsUnexpectedObjectError(err error) bool { - _, ok := err.(*UnexpectedObjectError) - return err != nil && ok -} - -// SuggestsClientDelay returns true if this error suggests a client delay as well as the -// suggested seconds to wait, or false if the error does not imply a wait. -func SuggestsClientDelay(err error) (int, bool) { - switch t := err.(type) { - case APIStatus: - if t.Status().Details != nil { - switch t.Status().Reason { - case unversioned.StatusReasonServerTimeout, unversioned.StatusReasonTimeout: - return int(t.Status().Details.RetryAfterSeconds), true - } - } - } - return 0, false -} - -func reasonForError(err error) unversioned.StatusReason { - switch t := err.(type) { - case APIStatus: - return t.Status().Reason - } - return unversioned.StatusReasonUnknown -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/events/BUILD b/vendor/k8s.io/kubernetes/pkg/api/events/BUILD deleted file mode 100644 index dfde78d383..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/events/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["sorted_event_list.go"], - tags = ["automanaged"], - deps = ["//pkg/api:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = ["sorted_event_list_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/events/sorted_event_list.go b/vendor/k8s.io/kubernetes/pkg/api/events/sorted_event_list.go deleted file mode 100644 index b96c615deb..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/events/sorted_event_list.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package events - -import ( - "k8s.io/kubernetes/pkg/api" -) - -// SortableEvents implements sort.Interface for []api.Event based on the Timestamp field -type SortableEvents []api.Event - -func (list SortableEvents) Len() int { - return len(list) -} - -func (list SortableEvents) Swap(i, j int) { - list[i], list[j] = list[j], list[i] -} - -func (list SortableEvents) Less(i, j int) bool { - return list[i].LastTimestamp.Time.Before(list[j].LastTimestamp.Time) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/generate.go b/vendor/k8s.io/kubernetes/pkg/api/generate.go deleted file mode 100644 index 19379d301e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/generate.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "fmt" - - utilrand "k8s.io/kubernetes/pkg/util/rand" -) - -// NameGenerator generates names for objects. Some backends may have more information -// available to guide selection of new names and this interface hides those details. -type NameGenerator interface { - // GenerateName generates a valid name from the base name, adding a random suffix to the - // the base. If base is valid, the returned name must also be valid. The generator is - // responsible for knowing the maximum valid name length. - GenerateName(base string) string -} - -// GenerateName will resolve the object name of the provided ObjectMeta to a generated version if -// necessary. It expects that validation for ObjectMeta has already completed (that Base is a -// valid name) and that the NameGenerator generates a name that is also valid. -func GenerateName(u NameGenerator, meta *ObjectMeta) { - if len(meta.GenerateName) == 0 || len(meta.Name) != 0 { - return - } - meta.Name = u.GenerateName(meta.GenerateName) -} - -// simpleNameGenerator generates random names. -type simpleNameGenerator struct{} - -// SimpleNameGenerator is a generator that returns the name plus a random suffix of five alphanumerics -// when a name is requested. The string is guaranteed to not exceed the length of a standard Kubernetes -// name (63 characters) -var SimpleNameGenerator NameGenerator = simpleNameGenerator{} - -const ( - // TODO: make this flexible for non-core resources with alternate naming rules. - maxNameLength = 63 - randomLength = 5 - maxGeneratedNameLength = maxNameLength - randomLength -) - -func (simpleNameGenerator) GenerateName(base string) string { - if len(base) > maxGeneratedNameLength { - base = base[:maxGeneratedNameLength] - } - return fmt.Sprintf("%s%s", base, utilrand.String(randomLength)) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/helpers.go b/vendor/k8s.io/kubernetes/pkg/api/helpers.go index 58d1a33b53..faa7df7c0f 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/api/helpers.go @@ -24,17 +24,16 @@ import ( "strings" "time" - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/selection" - "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util/sets" - "github.com/davecgh/go-spew/spew" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/sets" ) // Conversion error conveniently packages up errors in conversions. @@ -51,8 +50,25 @@ func (c *ConversionError) Error() string { ) } +const ( + // annotation key prefix used to identify non-convertible json paths. + NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" +) + +// NonConvertibleFields iterates over the provided map and filters out all but +// any keys with the "non-convertible.kubernetes.io" prefix. +func NonConvertibleFields(annotations map[string]string) map[string]string { + nonConvertibleKeys := map[string]string{} + for key, value := range annotations { + if strings.HasPrefix(key, NonConvertibleAnnotationPrefix) { + nonConvertibleKeys[key] = value + } + } + return nonConvertibleKeys +} + // Semantic can do semantic deep equality checks for api objects. -// Example: api.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true +// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true var Semantic = conversion.EqualitiesOrDie( func(a, b resource.Quantity) bool { // Ignore formatting, only care that numeric value stayed the same. @@ -61,7 +77,7 @@ var Semantic = conversion.EqualitiesOrDie( // Uninitialized quantities are equivalent to 0 quantities. return a.Cmp(b) == 0 }, - func(a, b unversioned.Time) bool { + func(a, b metav1.Time) bool { return a.UTC() == b.UTC() }, func(a, b labels.Selector) bool { @@ -212,27 +228,6 @@ func IsIntegerResourceName(str string) bool { return integerResources.Has(str) || IsOpaqueIntResourceName(ResourceName(str)) } -// NewDeleteOptions returns a DeleteOptions indicating the resource should -// be deleted within the specified grace period. Use zero to indicate -// immediate deletion. If you would prefer to use the default grace period, -// use &api.DeleteOptions{} directly. -func NewDeleteOptions(grace int64) *DeleteOptions { - return &DeleteOptions{GracePeriodSeconds: &grace} -} - -// NewPreconditionDeleteOptions returns a DeleteOptions with a UID precondition set. -func NewPreconditionDeleteOptions(uid string) *DeleteOptions { - u := types.UID(uid) - p := Preconditions{UID: &u} - return &DeleteOptions{Preconditions: &p} -} - -// NewUIDPreconditions returns a Preconditions with UID set. -func NewUIDPreconditions(uid string) *Preconditions { - u := types.UID(uid) - return &Preconditions{UID: &u} -} - // this function aims to check if the service's ClusterIP is set or not // the objective is not to perform validation here func IsServiceIPSet(service *Service) bool { @@ -250,7 +245,7 @@ func IsServiceIPRequested(service *Service) bool { var standardFinalizers = sets.NewString( string(FinalizerKubernetes), - FinalizerOrphan, + metav1.FinalizerOrphanDependents, ) // HasAnnotation returns a bool if passed in annotation exists @@ -271,14 +266,6 @@ func IsStandardFinalizerName(str string) bool { return standardFinalizers.Has(str) } -// SingleObject returns a ListOptions for watching a single object. -func SingleObject(meta ObjectMeta) ListOptions { - return ListOptions{ - FieldSelector: fields.OneTermEqualSelector("metadata.name", meta.Name), - ResourceVersion: meta.ResourceVersion, - } -} - // AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, // only if they do not already exist func AddToNodeAddresses(addresses *[]NodeAddress, addAddresses ...NodeAddress) { @@ -397,15 +384,15 @@ func containsAccessMode(modes []PersistentVolumeAccessMode, mode PersistentVolum } // ParseRFC3339 parses an RFC3339 date in either RFC3339Nano or RFC3339 format. -func ParseRFC3339(s string, nowFn func() unversioned.Time) (unversioned.Time, error) { +func ParseRFC3339(s string, nowFn func() metav1.Time) (metav1.Time, error) { if t, timeErr := time.Parse(time.RFC3339Nano, s); timeErr == nil { - return unversioned.Time{Time: t}, nil + return metav1.Time{Time: t}, nil } t, err := time.Parse(time.RFC3339, s) if err != nil { - return unversioned.Time{}, err + return metav1.Time{}, err } - return unversioned.Time{Time: t}, nil + return metav1.Time{Time: t}, nil } // NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements @@ -443,10 +430,6 @@ func NodeSelectorRequirementsAsSelector(nsm []NodeSelectorRequirement) (labels.S } const ( - // AffinityAnnotationKey represents the key of affinity data (json serialized) - // in the Annotations of a Pod. - AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity" - // TolerationsAnnotationKey represents the key of tolerations data (json serialized) // in the Annotations of a Pod. TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" @@ -484,21 +467,17 @@ const ( // is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet // will fail to launch. UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls" -) -// GetAffinityFromPod gets the json serialized affinity data from Pod.Annotations -// and converts it to the Affinity type in api. -func GetAffinityFromPodAnnotations(annotations map[string]string) (*Affinity, error) { - if len(annotations) > 0 && annotations[AffinityAnnotationKey] != "" { - var affinity Affinity - err := json.Unmarshal([]byte(annotations[AffinityAnnotationKey]), &affinity) - if err != nil { - return nil, err - } - return &affinity, nil - } - return nil, nil -} + // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // an object (e.g. secret, config map) before fetching it again from apiserver. + // This annotation can be attached to node. + ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" + + // AffinityAnnotationKey represents the key of affinity data (json serialized) + // in the Annotations of a Pod. + // TODO: remove when alpha support for affinity is removed + AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity" +) // GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations // and converts it to the []Toleration type in api. @@ -513,17 +492,42 @@ func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]Tolerati return tolerations, nil } -// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations -// and converts it to the []Taint type in api. -func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]Taint, error) { - var taints []Taint - if len(annotations) > 0 && annotations[TaintsAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[TaintsAnnotationKey]), &taints) - if err != nil { - return []Taint{}, err +// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. +// Returns true if something was updated, false otherwise. +func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) { + podTolerations := pod.Spec.Tolerations + + var newTolerations []Toleration + updated := false + for i := range podTolerations { + if toleration.MatchToleration(&podTolerations[i]) { + if Semantic.DeepEqual(toleration, podTolerations[i]) { + return false, nil + } + newTolerations = append(newTolerations, *toleration) + updated = true + continue } + + newTolerations = append(newTolerations, podTolerations[i]) } - return taints, nil + + if !updated { + newTolerations = append(newTolerations, *toleration) + } + + pod.Spec.Tolerations = newTolerations + return true, nil +} + +// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , +// if the two tolerations have same combination, regard as they match. +// TODO: uniqueness check for tolerations in api validations. +func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { + return t.Key == tolerationToMatch.Key && + t.Effect == tolerationToMatch.Effect && + t.Operator == tolerationToMatch.Operator && + t.Value == tolerationToMatch.Value } // TolerationToleratesTaint checks if the toleration tolerates the taint. @@ -571,15 +575,17 @@ func (t *Taint) ToString() string { return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) } -func GetAvoidPodsFromNodeAnnotations(annotations map[string]string) (AvoidPods, error) { - var avoidPods AvoidPods - if len(annotations) > 0 && annotations[PreferAvoidPodsAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[PreferAvoidPodsAnnotationKey]), &avoidPods) +// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations +// and converts it to the []Taint type in api. +func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]Taint, error) { + var taints []Taint + if len(annotations) > 0 && annotations[TaintsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[TaintsAnnotationKey]), &taints) if err != nil { - return avoidPods, err + return []Taint{}, err } } - return avoidPods, nil + return taints, nil } // SysctlsFromPodAnnotations parses the sysctl annotations into a slice of safe Sysctls @@ -629,3 +635,57 @@ func PodAnnotationsFromSysctls(sysctls []Sysctl) string { } return strings.Join(kvs, ",") } + +// GetAffinityFromPodAnnotations gets the json serialized affinity data from Pod.Annotations +// and converts it to the Affinity type in api. +// TODO: remove when alpha support for affinity is removed +func GetAffinityFromPodAnnotations(annotations map[string]string) (*Affinity, error) { + if len(annotations) > 0 && annotations[AffinityAnnotationKey] != "" { + var affinity Affinity + err := json.Unmarshal([]byte(annotations[AffinityAnnotationKey]), &affinity) + if err != nil { + return nil, err + } + return &affinity, nil + } + return nil, nil +} + +// GetPersistentVolumeClass returns StorageClassName. +func GetPersistentVolumeClass(volume *PersistentVolume) string { + // Use beta annotation first + if class, found := volume.Annotations[BetaStorageClassAnnotation]; found { + return class + } + + return volume.Spec.StorageClassName +} + +// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was +// requested, it returns "". +func GetPersistentVolumeClaimClass(claim *PersistentVolumeClaim) string { + // Use beta annotation first + if class, found := claim.Annotations[BetaStorageClassAnnotation]; found { + return class + } + + if claim.Spec.StorageClassName != nil { + return *claim.Spec.StorageClassName + } + + return "" +} + +// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. +func PersistentVolumeClaimHasClass(claim *PersistentVolumeClaim) bool { + // Use beta annotation first + if _, found := claim.Annotations[BetaStorageClassAnnotation]; found { + return true + } + + if claim.Spec.StorageClassName != nil { + return true + } + + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/install/BUILD b/vendor/k8s.io/kubernetes/pkg/api/install/BUILD index e832894608..398e2df27a 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/api/install/BUILD @@ -4,10 +4,8 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", "go_test", - "cgo_library", ) go_library( @@ -16,26 +14,37 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/unversioned:go_default_library", "//pkg/api/v1:go_default_library", - "//pkg/apimachinery:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/sets:go_default_library", - "//vendor:github.com/golang/glog", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/announced", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/registered", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/util/sets", ], ) go_test( name = "go_default_test", srcs = ["install_test.go"], - library = "go_default_library", + library = ":go_default_library", tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/runtime:go_default_library", + "//pkg/api/v1:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/api/install/OWNERS b/vendor/k8s.io/kubernetes/pkg/api/install/OWNERS new file mode 100755 index 0000000000..01d6b37021 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/install/OWNERS @@ -0,0 +1,11 @@ +reviewers: +- lavalamp +- smarterclayton +- deads2k +- caesarxuchao +- liggitt +- nikhiljindal +- dims +- krousey +- david-mcmahon +- feihujiang diff --git a/vendor/k8s.io/kubernetes/pkg/api/install/install.go b/vendor/k8s.io/kubernetes/pkg/api/install/install.go index 644daad2ba..53730940ae 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/api/install/install.go @@ -19,135 +19,52 @@ limitations under the License. package install import ( - "fmt" - - "github.com/golang/glog" - + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/apimachinery" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/sets" ) -const importPrefix = "k8s.io/kubernetes/pkg/api" - -var accessor = meta.NewAccessor() - -// availableVersions lists all known external versions for this group from most preferred to least preferred -var availableVersions = []unversioned.GroupVersion{v1.SchemeGroupVersion} - func init() { - registered.RegisterVersions(availableVersions) - externalVersions := []unversioned.GroupVersion{} - for _, v := range availableVersions { - if registered.IsAllowedVersion(v) { - externalVersions = append(externalVersions, v) - } - } - if len(externalVersions) == 0 { - glog.V(4).Infof("No version is registered for group %v", api.GroupName) - return - } - - if err := registered.EnableVersions(externalVersions...); err != nil { - glog.V(4).Infof("%v", err) - return - } - if err := enableVersions(externalVersions); err != nil { - glog.V(4).Infof("%v", err) - return - } + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) } -// TODO: enableVersions should be centralized rather than spread in each API -// group. -// We can combine registered.RegisterVersions, registered.EnableVersions and -// registered.RegisterGroup once we have moved enableVersions there. -func enableVersions(externalVersions []unversioned.GroupVersion) error { - addVersionsToScheme(externalVersions...) - preferredExternalVersion := externalVersions[0] - - groupMeta := apimachinery.GroupMeta{ - GroupVersion: preferredExternalVersion, - GroupVersions: externalVersions, - RESTMapper: newRESTMapper(externalVersions), - SelfLinker: runtime.SelfLinker(accessor), - InterfacesFor: interfacesFor, - } - - if err := registered.RegisterGroup(groupMeta); err != nil { - return err - } - return nil -} - -func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { - // the list of kinds that are scoped at the root of the api hierarchy - // if a kind is not enumerated here, it is assumed to have a namespace scope - rootScoped := sets.NewString( - "Node", - "Namespace", - "PersistentVolume", - "ComponentStatus", - ) - - // these kinds should be excluded from the list of resources - ignoredKinds := sets.NewString( - "ListOptions", - "DeleteOptions", - "Status", - "PodLogOptions", - "PodExecOptions", - "PodAttachOptions", - "PodProxyOptions", - "NodeProxyOptions", - "ServiceProxyOptions", - "ThirdPartyResource", - "ThirdPartyResourceData", - "ThirdPartyResourceList") - - mapper := api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) - - return mapper -} - -// InterfacesFor returns the default Codec and ResourceVersioner for a given version -// string, or an error if the version is not known. -func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - switch version { - case v1.SchemeGroupVersion: - return &meta.VersionInterfaces{ - ObjectConvertor: api.Scheme, - MetadataAccessor: accessor, - }, nil - default: - g, _ := registered.Group(api.GroupName) - return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) - } -} - -func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { - // add the internal version to Scheme - if err := api.AddToScheme(api.Scheme); err != nil { - // Programmer error, detect immediately +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: api.GroupName, + VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/kubernetes/pkg/api", + AddInternalObjectsToScheme: api.AddToScheme, + RootScopedKinds: sets.NewString( + "Node", + "Namespace", + "PersistentVolume", + "ComponentStatus", + ), + IgnoredKinds: sets.NewString( + "ListOptions", + "DeleteOptions", + "Status", + "PodLogOptions", + "PodExecOptions", + "PodAttachOptions", + "PodPortForwardOptions", + "PodProxyOptions", + "NodeProxyOptions", + "ServiceProxyOptions", + "ThirdPartyResource", + "ThirdPartyResourceData", + "ThirdPartyResourceList", + ), + }, + announced.VersionToSchemeFunc{ + v1.SchemeGroupVersion.Version: v1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { panic(err) } - // add the enabled external versions to Scheme - for _, v := range externalVersions { - if !registered.IsEnabledVersion(v) { - glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) - continue - } - switch v { - case v1.SchemeGroupVersion: - if err := v1.AddToScheme(api.Scheme); err != nil { - // Programmer error, detect immediately - panic(err) - } - } - } } diff --git a/vendor/k8s.io/kubernetes/pkg/api/json.go b/vendor/k8s.io/kubernetes/pkg/api/json.go new file mode 100644 index 0000000000..3a6e04c182 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/json.go @@ -0,0 +1,28 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import "encoding/json" + +// This file implements json marshaling/unmarshaling interfaces on objects that are currently marshaled into annotations +// to prevent anyone from marshaling these internal structs. + +var _ = json.Marshaler(&AvoidPods{}) +var _ = json.Unmarshaler(&AvoidPods{}) + +func (AvoidPods) MarshalJSON() ([]byte, error) { panic("do not marshal internal struct") } +func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") } diff --git a/vendor/k8s.io/kubernetes/pkg/api/mapper.go b/vendor/k8s.io/kubernetes/pkg/api/mapper.go deleted file mode 100644 index c5743c4b58..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/mapper.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "strings" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/sets" -) - -// Instantiates a DefaultRESTMapper based on types registered in api.Scheme -func NewDefaultRESTMapper(defaultGroupVersions []unversioned.GroupVersion, interfacesFunc meta.VersionInterfacesFunc, - importPathPrefix string, ignoredKinds, rootScoped sets.String) *meta.DefaultRESTMapper { - return NewDefaultRESTMapperFromScheme(defaultGroupVersions, interfacesFunc, importPathPrefix, ignoredKinds, rootScoped, Scheme) -} - -// Instantiates a DefaultRESTMapper based on types registered in the given scheme. -func NewDefaultRESTMapperFromScheme(defaultGroupVersions []unversioned.GroupVersion, interfacesFunc meta.VersionInterfacesFunc, - importPathPrefix string, ignoredKinds, rootScoped sets.String, scheme *runtime.Scheme) *meta.DefaultRESTMapper { - - mapper := meta.NewDefaultRESTMapper(defaultGroupVersions, interfacesFunc) - // enumerate all supported versions, get the kinds, and register with the mapper how to address - // our resources. - for _, gv := range defaultGroupVersions { - for kind, oType := range scheme.KnownTypes(gv) { - gvk := gv.WithKind(kind) - // TODO: Remove import path check. - // We check the import path because we currently stuff both "api" and "extensions" objects - // into the same group within Scheme since Scheme has no notion of groups yet. - if !strings.Contains(oType.PkgPath(), importPathPrefix) || ignoredKinds.Has(kind) { - continue - } - scope := meta.RESTScopeNamespace - if rootScoped.Has(kind) { - scope = meta.RESTScopeRoot - } - mapper.Add(gvk, scope) - } - } - return mapper -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta.go b/vendor/k8s.io/kubernetes/pkg/api/meta.go deleted file mode 100644 index a05d694435..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/meta.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/meta/metatypes" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util/uuid" -) - -// FillObjectMetaSystemFields populates fields that are managed by the system on ObjectMeta. -func FillObjectMetaSystemFields(ctx Context, meta *ObjectMeta) { - meta.CreationTimestamp = unversioned.Now() - // allows admission controllers to assign a UID earlier in the request processing - // to support tracking resources pending creation. - uid, found := UIDFrom(ctx) - if !found { - uid = uuid.NewUUID() - } - meta.UID = uid - meta.SelfLink = "" -} - -// HasObjectMetaSystemFieldValues returns true if fields that are managed by the system on ObjectMeta have values. -func HasObjectMetaSystemFieldValues(meta *ObjectMeta) bool { - return !meta.CreationTimestamp.Time.IsZero() || - len(meta.UID) != 0 -} - -// ObjectMetaFor returns a pointer to a provided object's ObjectMeta. -// TODO: allow runtime.Unknown to extract this object -// TODO: Remove this function and use meta.Accessor() instead. -func ObjectMetaFor(obj runtime.Object) (*ObjectMeta, error) { - v, err := conversion.EnforcePtr(obj) - if err != nil { - return nil, err - } - var meta *ObjectMeta - err = runtime.FieldPtr(v, "ObjectMeta", &meta) - return meta, err -} - -// ListMetaFor returns a pointer to a provided object's ListMeta, -// or an error if the object does not have that pointer. -// TODO: allow runtime.Unknown to extract this object -func ListMetaFor(obj runtime.Object) (*unversioned.ListMeta, error) { - v, err := conversion.EnforcePtr(obj) - if err != nil { - return nil, err - } - var meta *unversioned.ListMeta - err = runtime.FieldPtr(v, "ListMeta", &meta) - return meta, err -} - -func (obj *ObjectMeta) GetObjectMeta() meta.Object { return obj } - -// Namespace implements meta.Object for any object with an ObjectMeta typed field. Allows -// fast, direct access to metadata fields for API objects. -func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } -func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } -func (meta *ObjectMeta) GetName() string { return meta.Name } -func (meta *ObjectMeta) SetName(name string) { meta.Name = name } -func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName } -func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName } -func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } -func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } -func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } -func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } -func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink } -func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } -func (meta *ObjectMeta) GetCreationTimestamp() unversioned.Time { return meta.CreationTimestamp } -func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp unversioned.Time) { - meta.CreationTimestamp = creationTimestamp -} -func (meta *ObjectMeta) GetDeletionTimestamp() *unversioned.Time { return meta.DeletionTimestamp } -func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *unversioned.Time) { - meta.DeletionTimestamp = deletionTimestamp -} -func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels } -func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } -func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } -func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } -func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } -func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } - -func (meta *ObjectMeta) GetOwnerReferences() []metatypes.OwnerReference { - ret := make([]metatypes.OwnerReference, len(meta.OwnerReferences)) - for i := 0; i < len(meta.OwnerReferences); i++ { - ret[i].Kind = meta.OwnerReferences[i].Kind - ret[i].Name = meta.OwnerReferences[i].Name - ret[i].UID = meta.OwnerReferences[i].UID - ret[i].APIVersion = meta.OwnerReferences[i].APIVersion - if meta.OwnerReferences[i].Controller != nil { - value := *meta.OwnerReferences[i].Controller - ret[i].Controller = &value - } - } - return ret -} - -func (meta *ObjectMeta) SetOwnerReferences(references []metatypes.OwnerReference) { - newReferences := make([]OwnerReference, len(references)) - for i := 0; i < len(references); i++ { - newReferences[i].Kind = references[i].Kind - newReferences[i].Name = references[i].Name - newReferences[i].UID = references[i].UID - newReferences[i].APIVersion = references[i].APIVersion - if references[i].Controller != nil { - value := *references[i].Controller - newReferences[i].Controller = &value - } - } - meta.OwnerReferences = newReferences -} - -func (meta *ObjectMeta) GetClusterName() string { - return meta.ClusterName -} -func (meta *ObjectMeta) SetClusterName(clusterName string) { - meta.ClusterName = clusterName -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/BUILD b/vendor/k8s.io/kubernetes/pkg/api/meta/BUILD deleted file mode 100644 index 0ce289bfc0..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/BUILD +++ /dev/null @@ -1,76 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "errors.go", - "firsthit_restmapper.go", - "help.go", - "interfaces.go", - "meta.go", - "multirestmapper.go", - "priority.go", - "restmapper.go", - "unstructured.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api/meta/metatypes:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//pkg/util/errors:go_default_library", - "//pkg/util/sets:go_default_library", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "multirestmapper_test.go", - "priority_test.go", - "restmapper_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/runtime:go_default_library", - ], -) - -go_test( - name = "go_default_xtest", - srcs = [ - "help_test.go", - "meta_test.go", - "scheme_test.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/meta/metatypes:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//pkg/util/diff:go_default_library", - "//pkg/util/sets:go_default_library", - "//vendor:github.com/google/gofuzz", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/errors.go b/vendor/k8s.io/kubernetes/pkg/api/meta/errors.go deleted file mode 100644 index 70452965e5..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/errors.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// AmbiguousResourceError is returned if the RESTMapper finds multiple matches for a resource -type AmbiguousResourceError struct { - PartialResource unversioned.GroupVersionResource - - MatchingResources []unversioned.GroupVersionResource - MatchingKinds []unversioned.GroupVersionKind -} - -func (e *AmbiguousResourceError) Error() string { - switch { - case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0: - return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialResource, e.MatchingResources, e.MatchingKinds) - case len(e.MatchingKinds) > 0: - return fmt.Sprintf("%v matches multiple kinds %v", e.PartialResource, e.MatchingKinds) - case len(e.MatchingResources) > 0: - return fmt.Sprintf("%v matches multiple resources %v", e.PartialResource, e.MatchingResources) - } - return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialResource) -} - -// AmbiguousKindError is returned if the RESTMapper finds multiple matches for a kind -type AmbiguousKindError struct { - PartialKind unversioned.GroupVersionKind - - MatchingResources []unversioned.GroupVersionResource - MatchingKinds []unversioned.GroupVersionKind -} - -func (e *AmbiguousKindError) Error() string { - switch { - case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0: - return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialKind, e.MatchingResources, e.MatchingKinds) - case len(e.MatchingKinds) > 0: - return fmt.Sprintf("%v matches multiple kinds %v", e.PartialKind, e.MatchingKinds) - case len(e.MatchingResources) > 0: - return fmt.Sprintf("%v matches multiple resources %v", e.PartialKind, e.MatchingResources) - } - return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialKind) -} - -func IsAmbiguousError(err error) bool { - if err == nil { - return false - } - switch err.(type) { - case *AmbiguousResourceError, *AmbiguousKindError: - return true - default: - return false - } -} - -// NoResourceMatchError is returned if the RESTMapper can't find any match for a resource -type NoResourceMatchError struct { - PartialResource unversioned.GroupVersionResource -} - -func (e *NoResourceMatchError) Error() string { - return fmt.Sprintf("no matches for %v", e.PartialResource) -} - -// NoKindMatchError is returned if the RESTMapper can't find any match for a kind -type NoKindMatchError struct { - PartialKind unversioned.GroupVersionKind -} - -func (e *NoKindMatchError) Error() string { - return fmt.Sprintf("no matches for %v", e.PartialKind) -} - -func IsNoMatchError(err error) bool { - if err == nil { - return false - } - switch err.(type) { - case *NoResourceMatchError, *NoKindMatchError: - return true - default: - return false - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/interfaces.go b/vendor/k8s.io/kubernetes/pkg/api/meta/interfaces.go deleted file mode 100644 index 488f78d372..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/interfaces.go +++ /dev/null @@ -1,184 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "k8s.io/kubernetes/pkg/api/meta/metatypes" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/types" -) - -// VersionInterfaces contains the interfaces one should use for dealing with types of a particular version. -type VersionInterfaces struct { - runtime.ObjectConvertor - MetadataAccessor -} - -type ObjectMetaAccessor interface { - GetObjectMeta() Object -} - -// Object lets you work with object metadata from any of the versioned or -// internal API objects. Attempting to set or retrieve a field on an object that does -// not support that field (Name, UID, Namespace on lists) will be a no-op and return -// a default value. -type Object interface { - GetNamespace() string - SetNamespace(namespace string) - GetName() string - SetName(name string) - GetGenerateName() string - SetGenerateName(name string) - GetUID() types.UID - SetUID(uid types.UID) - GetResourceVersion() string - SetResourceVersion(version string) - GetSelfLink() string - SetSelfLink(selfLink string) - GetCreationTimestamp() unversioned.Time - SetCreationTimestamp(timestamp unversioned.Time) - GetDeletionTimestamp() *unversioned.Time - SetDeletionTimestamp(timestamp *unversioned.Time) - GetLabels() map[string]string - SetLabels(labels map[string]string) - GetAnnotations() map[string]string - SetAnnotations(annotations map[string]string) - GetFinalizers() []string - SetFinalizers(finalizers []string) - GetOwnerReferences() []metatypes.OwnerReference - SetOwnerReferences([]metatypes.OwnerReference) - GetClusterName() string - SetClusterName(clusterName string) -} - -var _ Object = &runtime.Unstructured{} - -type ListMetaAccessor interface { - GetListMeta() List -} - -// List lets you work with list metadata from any of the versioned or -// internal API objects. Attempting to set or retrieve a field on an object that does -// not support that field will be a no-op and return a default value. -type List unversioned.List - -// Type exposes the type and APIVersion of versioned or internal API objects. -type Type unversioned.Type - -// MetadataAccessor lets you work with object and list metadata from any of the versioned or -// internal API objects. Attempting to set or retrieve a field on an object that does -// not support that field (Name, UID, Namespace on lists) will be a no-op and return -// a default value. -// -// MetadataAccessor exposes Interface in a way that can be used with multiple objects. -type MetadataAccessor interface { - APIVersion(obj runtime.Object) (string, error) - SetAPIVersion(obj runtime.Object, version string) error - - Kind(obj runtime.Object) (string, error) - SetKind(obj runtime.Object, kind string) error - - Namespace(obj runtime.Object) (string, error) - SetNamespace(obj runtime.Object, namespace string) error - - Name(obj runtime.Object) (string, error) - SetName(obj runtime.Object, name string) error - - GenerateName(obj runtime.Object) (string, error) - SetGenerateName(obj runtime.Object, name string) error - - UID(obj runtime.Object) (types.UID, error) - SetUID(obj runtime.Object, uid types.UID) error - - SelfLink(obj runtime.Object) (string, error) - SetSelfLink(obj runtime.Object, selfLink string) error - - Labels(obj runtime.Object) (map[string]string, error) - SetLabels(obj runtime.Object, labels map[string]string) error - - Annotations(obj runtime.Object) (map[string]string, error) - SetAnnotations(obj runtime.Object, annotations map[string]string) error - - runtime.ResourceVersioner -} - -type RESTScopeName string - -const ( - RESTScopeNameNamespace RESTScopeName = "namespace" - RESTScopeNameRoot RESTScopeName = "root" -) - -// RESTScope contains the information needed to deal with REST resources that are in a resource hierarchy -type RESTScope interface { - // Name of the scope - Name() RESTScopeName - // ParamName is the optional name of the parameter that should be inserted in the resource url - // If empty, no param will be inserted - ParamName() string - // ArgumentName is the optional name that should be used for the variable holding the value. - ArgumentName() string - // ParamDescription is the optional description to use to document the parameter in api documentation - ParamDescription() string -} - -// RESTMapping contains the information needed to deal with objects of a specific -// resource and kind in a RESTful manner. -type RESTMapping struct { - // Resource is a string representing the name of this resource as a REST client would see it - Resource string - - GroupVersionKind unversioned.GroupVersionKind - - // Scope contains the information needed to deal with REST Resources that are in a resource hierarchy - Scope RESTScope - - runtime.ObjectConvertor - MetadataAccessor -} - -// RESTMapper allows clients to map resources to kind, and map kind and version -// to interfaces for manipulating those objects. It is primarily intended for -// consumers of Kubernetes compatible REST APIs as defined in docs/devel/api-conventions.md. -// -// The Kubernetes API provides versioned resources and object kinds which are scoped -// to API groups. In other words, kinds and resources should not be assumed to be -// unique across groups. -// -// TODO: split into sub-interfaces -type RESTMapper interface { - // KindFor takes a partial resource and returns the single match. Returns an error if there are multiple matches - KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) - - // KindsFor takes a partial resource and returns the list of potential kinds in priority order - KindsFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionKind, error) - - // ResourceFor takes a partial resource and returns the single match. Returns an error if there are multiple matches - ResourceFor(input unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) - - // ResourcesFor takes a partial resource and returns the list of potential resource in priority order - ResourcesFor(input unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) - - // RESTMapping identifies a preferred resource mapping for the provided group kind. - RESTMapping(gk unversioned.GroupKind, versions ...string) (*RESTMapping, error) - // RESTMappings returns all resource mappings for the provided group kind. - RESTMappings(gk unversioned.GroupKind) ([]*RESTMapping, error) - - AliasesForResource(resource string) ([]string, bool) - ResourceSingularizer(resource string) (singular string, err error) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/meta.go b/vendor/k8s.io/kubernetes/pkg/api/meta/meta.go deleted file mode 100644 index 876aa4faaf..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/meta.go +++ /dev/null @@ -1,567 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "fmt" - "reflect" - - "k8s.io/kubernetes/pkg/api/meta/metatypes" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/types" - - "github.com/golang/glog" -) - -// errNotList is returned when an object implements the Object style interfaces but not the List style -// interfaces. -var errNotList = fmt.Errorf("object does not implement the List interfaces") - -// ListAccessor returns a List interface for the provided object or an error if the object does -// not provide List. -// IMPORTANT: Objects are a superset of lists, so all Objects return List metadata. Do not use this -// check to determine whether an object *is* a List. -// TODO: return bool instead of error -func ListAccessor(obj interface{}) (List, error) { - switch t := obj.(type) { - case List: - return t, nil - case unversioned.List: - return t, nil - case ListMetaAccessor: - if m := t.GetListMeta(); m != nil { - return m, nil - } - return nil, errNotList - case unversioned.ListMetaAccessor: - if m := t.GetListMeta(); m != nil { - return m, nil - } - return nil, errNotList - case Object: - return t, nil - case ObjectMetaAccessor: - if m := t.GetObjectMeta(); m != nil { - return m, nil - } - return nil, errNotList - default: - return nil, errNotList - } -} - -// errNotObject is returned when an object implements the List style interfaces but not the Object style -// interfaces. -var errNotObject = fmt.Errorf("object does not implement the Object interfaces") - -// Accessor takes an arbitrary object pointer and returns meta.Interface. -// obj must be a pointer to an API type. An error is returned if the minimum -// required fields are missing. Fields that are not required return the default -// value and are a no-op if set. -// TODO: return bool instead of error -func Accessor(obj interface{}) (Object, error) { - switch t := obj.(type) { - case Object: - return t, nil - case ObjectMetaAccessor: - if m := t.GetObjectMeta(); m != nil { - return m, nil - } - return nil, errNotObject - case List, unversioned.List, ListMetaAccessor, unversioned.ListMetaAccessor: - return nil, errNotObject - default: - return nil, errNotObject - } -} - -// TypeAccessor returns an interface that allows retrieving and modifying the APIVersion -// and Kind of an in-memory internal object. -// TODO: this interface is used to test code that does not have ObjectMeta or ListMeta -// in round tripping (objects which can use apiVersion/kind, but do not fit the Kube -// api conventions). -func TypeAccessor(obj interface{}) (Type, error) { - if typed, ok := obj.(runtime.Object); ok { - return objectAccessor{typed}, nil - } - v, err := conversion.EnforcePtr(obj) - if err != nil { - return nil, err - } - t := v.Type() - if v.Kind() != reflect.Struct { - return nil, fmt.Errorf("expected struct, but got %v: %v (%#v)", v.Kind(), t, v.Interface()) - } - - typeMeta := v.FieldByName("TypeMeta") - if !typeMeta.IsValid() { - return nil, fmt.Errorf("struct %v lacks embedded TypeMeta type", t) - } - a := &genericAccessor{} - if err := extractFromTypeMeta(typeMeta, a); err != nil { - return nil, fmt.Errorf("unable to find type fields on %#v: %v", typeMeta, err) - } - return a, nil -} - -type objectAccessor struct { - runtime.Object -} - -func (obj objectAccessor) GetKind() string { - return obj.GetObjectKind().GroupVersionKind().Kind -} - -func (obj objectAccessor) SetKind(kind string) { - gvk := obj.GetObjectKind().GroupVersionKind() - gvk.Kind = kind - obj.GetObjectKind().SetGroupVersionKind(gvk) -} - -func (obj objectAccessor) GetAPIVersion() string { - return obj.GetObjectKind().GroupVersionKind().GroupVersion().String() -} - -func (obj objectAccessor) SetAPIVersion(version string) { - gvk := obj.GetObjectKind().GroupVersionKind() - gv, err := unversioned.ParseGroupVersion(version) - if err != nil { - gv = unversioned.GroupVersion{Version: version} - } - gvk.Group, gvk.Version = gv.Group, gv.Version - obj.GetObjectKind().SetGroupVersionKind(gvk) -} - -// NewAccessor returns a MetadataAccessor that can retrieve -// or manipulate resource version on objects derived from core API -// metadata concepts. -func NewAccessor() MetadataAccessor { - return resourceAccessor{} -} - -// resourceAccessor implements ResourceVersioner and SelfLinker. -type resourceAccessor struct{} - -func (resourceAccessor) Kind(obj runtime.Object) (string, error) { - return objectAccessor{obj}.GetKind(), nil -} - -func (resourceAccessor) SetKind(obj runtime.Object, kind string) error { - objectAccessor{obj}.SetKind(kind) - return nil -} - -func (resourceAccessor) APIVersion(obj runtime.Object) (string, error) { - return objectAccessor{obj}.GetAPIVersion(), nil -} - -func (resourceAccessor) SetAPIVersion(obj runtime.Object, version string) error { - objectAccessor{obj}.SetAPIVersion(version) - return nil -} - -func (resourceAccessor) Namespace(obj runtime.Object) (string, error) { - accessor, err := Accessor(obj) - if err != nil { - return "", err - } - return accessor.GetNamespace(), nil -} - -func (resourceAccessor) SetNamespace(obj runtime.Object, namespace string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetNamespace(namespace) - return nil -} - -func (resourceAccessor) Name(obj runtime.Object) (string, error) { - accessor, err := Accessor(obj) - if err != nil { - return "", err - } - return accessor.GetName(), nil -} - -func (resourceAccessor) SetName(obj runtime.Object, name string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetName(name) - return nil -} - -func (resourceAccessor) GenerateName(obj runtime.Object) (string, error) { - accessor, err := Accessor(obj) - if err != nil { - return "", err - } - return accessor.GetGenerateName(), nil -} - -func (resourceAccessor) SetGenerateName(obj runtime.Object, name string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetGenerateName(name) - return nil -} - -func (resourceAccessor) UID(obj runtime.Object) (types.UID, error) { - accessor, err := Accessor(obj) - if err != nil { - return "", err - } - return accessor.GetUID(), nil -} - -func (resourceAccessor) SetUID(obj runtime.Object, uid types.UID) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetUID(uid) - return nil -} - -func (resourceAccessor) SelfLink(obj runtime.Object) (string, error) { - accessor, err := ListAccessor(obj) - if err != nil { - return "", err - } - return accessor.GetSelfLink(), nil -} - -func (resourceAccessor) SetSelfLink(obj runtime.Object, selfLink string) error { - accessor, err := ListAccessor(obj) - if err != nil { - return err - } - accessor.SetSelfLink(selfLink) - return nil -} - -func (resourceAccessor) Labels(obj runtime.Object) (map[string]string, error) { - accessor, err := Accessor(obj) - if err != nil { - return nil, err - } - return accessor.GetLabels(), nil -} - -func (resourceAccessor) SetLabels(obj runtime.Object, labels map[string]string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetLabels(labels) - return nil -} - -func (resourceAccessor) Annotations(obj runtime.Object) (map[string]string, error) { - accessor, err := Accessor(obj) - if err != nil { - return nil, err - } - return accessor.GetAnnotations(), nil -} - -func (resourceAccessor) SetAnnotations(obj runtime.Object, annotations map[string]string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetAnnotations(annotations) - return nil -} - -func (resourceAccessor) ResourceVersion(obj runtime.Object) (string, error) { - accessor, err := ListAccessor(obj) - if err != nil { - return "", err - } - return accessor.GetResourceVersion(), nil -} - -func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) error { - accessor, err := ListAccessor(obj) - if err != nil { - return err - } - accessor.SetResourceVersion(version) - return nil -} - -// extractFromOwnerReference extracts v to o. v is the OwnerReferences field of an object. -func extractFromOwnerReference(v reflect.Value, o *metatypes.OwnerReference) error { - if err := runtime.Field(v, "APIVersion", &o.APIVersion); err != nil { - return err - } - if err := runtime.Field(v, "Kind", &o.Kind); err != nil { - return err - } - if err := runtime.Field(v, "Name", &o.Name); err != nil { - return err - } - if err := runtime.Field(v, "UID", &o.UID); err != nil { - return err - } - var controllerPtr *bool - if err := runtime.Field(v, "Controller", &controllerPtr); err != nil { - return err - } - if controllerPtr != nil { - controller := *controllerPtr - o.Controller = &controller - } - return nil -} - -// setOwnerReference sets v to o. v is the OwnerReferences field of an object. -func setOwnerReference(v reflect.Value, o *metatypes.OwnerReference) error { - if err := runtime.SetField(o.APIVersion, v, "APIVersion"); err != nil { - return err - } - if err := runtime.SetField(o.Kind, v, "Kind"); err != nil { - return err - } - if err := runtime.SetField(o.Name, v, "Name"); err != nil { - return err - } - if err := runtime.SetField(o.UID, v, "UID"); err != nil { - return err - } - if o.Controller != nil { - controller := *(o.Controller) - if err := runtime.SetField(&controller, v, "Controller"); err != nil { - return err - } - } - return nil -} - -// genericAccessor contains pointers to strings that can modify an arbitrary -// struct and implements the Accessor interface. -type genericAccessor struct { - namespace *string - name *string - generateName *string - uid *types.UID - apiVersion *string - kind *string - resourceVersion *string - selfLink *string - creationTimestamp *unversioned.Time - deletionTimestamp **unversioned.Time - labels *map[string]string - annotations *map[string]string - ownerReferences reflect.Value - finalizers *[]string -} - -func (a genericAccessor) GetNamespace() string { - if a.namespace == nil { - return "" - } - return *a.namespace -} - -func (a genericAccessor) SetNamespace(namespace string) { - if a.namespace == nil { - return - } - *a.namespace = namespace -} - -func (a genericAccessor) GetName() string { - if a.name == nil { - return "" - } - return *a.name -} - -func (a genericAccessor) SetName(name string) { - if a.name == nil { - return - } - *a.name = name -} - -func (a genericAccessor) GetGenerateName() string { - if a.generateName == nil { - return "" - } - return *a.generateName -} - -func (a genericAccessor) SetGenerateName(generateName string) { - if a.generateName == nil { - return - } - *a.generateName = generateName -} - -func (a genericAccessor) GetUID() types.UID { - if a.uid == nil { - return "" - } - return *a.uid -} - -func (a genericAccessor) SetUID(uid types.UID) { - if a.uid == nil { - return - } - *a.uid = uid -} - -func (a genericAccessor) GetAPIVersion() string { - return *a.apiVersion -} - -func (a genericAccessor) SetAPIVersion(version string) { - *a.apiVersion = version -} - -func (a genericAccessor) GetKind() string { - return *a.kind -} - -func (a genericAccessor) SetKind(kind string) { - *a.kind = kind -} - -func (a genericAccessor) GetResourceVersion() string { - return *a.resourceVersion -} - -func (a genericAccessor) SetResourceVersion(version string) { - *a.resourceVersion = version -} - -func (a genericAccessor) GetSelfLink() string { - return *a.selfLink -} - -func (a genericAccessor) SetSelfLink(selfLink string) { - *a.selfLink = selfLink -} - -func (a genericAccessor) GetCreationTimestamp() unversioned.Time { - return *a.creationTimestamp -} - -func (a genericAccessor) SetCreationTimestamp(timestamp unversioned.Time) { - *a.creationTimestamp = timestamp -} - -func (a genericAccessor) GetDeletionTimestamp() *unversioned.Time { - return *a.deletionTimestamp -} - -func (a genericAccessor) SetDeletionTimestamp(timestamp *unversioned.Time) { - *a.deletionTimestamp = timestamp -} - -func (a genericAccessor) GetLabels() map[string]string { - if a.labels == nil { - return nil - } - return *a.labels -} - -func (a genericAccessor) SetLabels(labels map[string]string) { - *a.labels = labels -} - -func (a genericAccessor) GetAnnotations() map[string]string { - if a.annotations == nil { - return nil - } - return *a.annotations -} - -func (a genericAccessor) SetAnnotations(annotations map[string]string) { - if a.annotations == nil { - emptyAnnotations := make(map[string]string) - a.annotations = &emptyAnnotations - } - *a.annotations = annotations -} - -func (a genericAccessor) GetFinalizers() []string { - if a.finalizers == nil { - return nil - } - return *a.finalizers -} - -func (a genericAccessor) SetFinalizers(finalizers []string) { - *a.finalizers = finalizers -} - -func (a genericAccessor) GetOwnerReferences() []metatypes.OwnerReference { - var ret []metatypes.OwnerReference - s := a.ownerReferences - if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { - glog.Errorf("expect %v to be a pointer to slice", s) - return ret - } - s = s.Elem() - // Set the capacity to one element greater to avoid copy if the caller later append an element. - ret = make([]metatypes.OwnerReference, s.Len(), s.Len()+1) - for i := 0; i < s.Len(); i++ { - if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil { - glog.Errorf("extractFromOwnerReference failed: %v", err) - return ret - } - } - return ret -} - -func (a genericAccessor) SetOwnerReferences(references []metatypes.OwnerReference) { - s := a.ownerReferences - if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { - glog.Errorf("expect %v to be a pointer to slice", s) - } - s = s.Elem() - newReferences := reflect.MakeSlice(s.Type(), len(references), len(references)) - for i := 0; i < len(references); i++ { - if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil { - glog.Errorf("setOwnerReference failed: %v", err) - return - } - } - s.Set(newReferences) -} - -// extractFromTypeMeta extracts pointers to version and kind fields from an object -func extractFromTypeMeta(v reflect.Value, a *genericAccessor) error { - if err := runtime.FieldPtr(v, "APIVersion", &a.apiVersion); err != nil { - return err - } - if err := runtime.FieldPtr(v, "Kind", &a.kind); err != nil { - return err - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/metatypes/BUILD b/vendor/k8s.io/kubernetes/pkg/api/meta/metatypes/BUILD deleted file mode 100644 index ded660072f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/metatypes/BUILD +++ /dev/null @@ -1,18 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["types.go"], - tags = ["automanaged"], - deps = ["//pkg/types:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/metatypes/types.go b/vendor/k8s.io/kubernetes/pkg/api/meta/metatypes/types.go deleted file mode 100644 index 41e6596d77..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/metatypes/types.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// The types defined in this package are used by the meta package to represent -// the in-memory version of objects. We cannot reuse the __internal version of -// API objects because it causes import cycle. -package metatypes - -import "k8s.io/kubernetes/pkg/types" - -type OwnerReference struct { - APIVersion string - Kind string - UID types.UID - Name string - Controller *bool -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/restmapper.go b/vendor/k8s.io/kubernetes/pkg/api/meta/restmapper.go deleted file mode 100644 index 19d0b25207..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/restmapper.go +++ /dev/null @@ -1,601 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// TODO: move everything in this file to pkg/api/rest -package meta - -import ( - "fmt" - "sort" - "strings" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// Implements RESTScope interface -type restScope struct { - name RESTScopeName - paramName string - argumentName string - paramDescription string -} - -func (r *restScope) Name() RESTScopeName { - return r.name -} -func (r *restScope) ParamName() string { - return r.paramName -} -func (r *restScope) ArgumentName() string { - return r.argumentName -} -func (r *restScope) ParamDescription() string { - return r.paramDescription -} - -var RESTScopeNamespace = &restScope{ - name: RESTScopeNameNamespace, - paramName: "namespaces", - argumentName: "namespace", - paramDescription: "object name and auth scope, such as for teams and projects", -} - -var RESTScopeRoot = &restScope{ - name: RESTScopeNameRoot, -} - -// DefaultRESTMapper exposes mappings between the types defined in a -// runtime.Scheme. It assumes that all types defined the provided scheme -// can be mapped with the provided MetadataAccessor and Codec interfaces. -// -// The resource name of a Kind is defined as the lowercase, -// English-plural version of the Kind string. -// When converting from resource to Kind, the singular version of the -// resource name is also accepted for convenience. -// -// TODO: Only accept plural for some operations for increased control? -// (`get pod bar` vs `get pods bar`) -type DefaultRESTMapper struct { - defaultGroupVersions []unversioned.GroupVersion - - resourceToKind map[unversioned.GroupVersionResource]unversioned.GroupVersionKind - kindToPluralResource map[unversioned.GroupVersionKind]unversioned.GroupVersionResource - kindToScope map[unversioned.GroupVersionKind]RESTScope - singularToPlural map[unversioned.GroupVersionResource]unversioned.GroupVersionResource - pluralToSingular map[unversioned.GroupVersionResource]unversioned.GroupVersionResource - - interfacesFunc VersionInterfacesFunc - - // aliasToResource is used for mapping aliases to resources - aliasToResource map[string][]string -} - -func (m *DefaultRESTMapper) String() string { - return fmt.Sprintf("DefaultRESTMapper{kindToPluralResource=%v}", m.kindToPluralResource) -} - -var _ RESTMapper = &DefaultRESTMapper{} - -// VersionInterfacesFunc returns the appropriate typer, and metadata accessor for a -// given api version, or an error if no such api version exists. -type VersionInterfacesFunc func(version unversioned.GroupVersion) (*VersionInterfaces, error) - -// NewDefaultRESTMapper initializes a mapping between Kind and APIVersion -// to a resource name and back based on the objects in a runtime.Scheme -// and the Kubernetes API conventions. Takes a group name, a priority list of the versions -// to search when an object has no default version (set empty to return an error), -// and a function that retrieves the correct metadata for a given version. -func NewDefaultRESTMapper(defaultGroupVersions []unversioned.GroupVersion, f VersionInterfacesFunc) *DefaultRESTMapper { - resourceToKind := make(map[unversioned.GroupVersionResource]unversioned.GroupVersionKind) - kindToPluralResource := make(map[unversioned.GroupVersionKind]unversioned.GroupVersionResource) - kindToScope := make(map[unversioned.GroupVersionKind]RESTScope) - singularToPlural := make(map[unversioned.GroupVersionResource]unversioned.GroupVersionResource) - pluralToSingular := make(map[unversioned.GroupVersionResource]unversioned.GroupVersionResource) - aliasToResource := make(map[string][]string) - // TODO: verify name mappings work correctly when versions differ - - return &DefaultRESTMapper{ - resourceToKind: resourceToKind, - kindToPluralResource: kindToPluralResource, - kindToScope: kindToScope, - defaultGroupVersions: defaultGroupVersions, - singularToPlural: singularToPlural, - pluralToSingular: pluralToSingular, - aliasToResource: aliasToResource, - interfacesFunc: f, - } -} - -func (m *DefaultRESTMapper) Add(kind unversioned.GroupVersionKind, scope RESTScope) { - plural, singular := KindToResource(kind) - - m.singularToPlural[singular] = plural - m.pluralToSingular[plural] = singular - - m.resourceToKind[singular] = kind - m.resourceToKind[plural] = kind - - m.kindToPluralResource[kind] = plural - m.kindToScope[kind] = scope -} - -// unpluralizedSuffixes is a list of resource suffixes that are the same plural and singular -// This is only is only necessary because some bits of code are lazy and don't actually use the RESTMapper like they should. -// TODO eliminate this so that different callers can correctly map to resources. This probably means updating all -// callers to use the RESTMapper they mean. -var unpluralizedSuffixes = []string{ - "endpoints", -} - -// KindToResource converts Kind to a resource name. -// Broken. This method only "sort of" works when used outside of this package. It assumes that Kinds and Resources match -// and they aren't guaranteed to do so. -func KindToResource(kind unversioned.GroupVersionKind) ( /*plural*/ unversioned.GroupVersionResource /*singular*/, unversioned.GroupVersionResource) { - kindName := kind.Kind - if len(kindName) == 0 { - return unversioned.GroupVersionResource{}, unversioned.GroupVersionResource{} - } - singularName := strings.ToLower(kindName) - singular := kind.GroupVersion().WithResource(singularName) - - for _, skip := range unpluralizedSuffixes { - if strings.HasSuffix(singularName, skip) { - return singular, singular - } - } - - switch string(singularName[len(singularName)-1]) { - case "s": - return kind.GroupVersion().WithResource(singularName + "es"), singular - case "y": - return kind.GroupVersion().WithResource(strings.TrimSuffix(singularName, "y") + "ies"), singular - } - - return kind.GroupVersion().WithResource(singularName + "s"), singular -} - -// ResourceSingularizer implements RESTMapper -// It converts a resource name from plural to singular (e.g., from pods to pod) -func (m *DefaultRESTMapper) ResourceSingularizer(resourceType string) (string, error) { - partialResource := unversioned.GroupVersionResource{Resource: resourceType} - resources, err := m.ResourcesFor(partialResource) - if err != nil { - return resourceType, err - } - - singular := unversioned.GroupVersionResource{} - for _, curr := range resources { - currSingular, ok := m.pluralToSingular[curr] - if !ok { - continue - } - if singular.Empty() { - singular = currSingular - continue - } - - if currSingular.Resource != singular.Resource { - return resourceType, fmt.Errorf("multiple possible singular resources (%v) found for %v", resources, resourceType) - } - } - - if singular.Empty() { - return resourceType, fmt.Errorf("no singular of resource %v has been defined", resourceType) - } - - return singular.Resource, nil -} - -// coerceResourceForMatching makes the resource lower case and converts internal versions to unspecified (legacy behavior) -func coerceResourceForMatching(resource unversioned.GroupVersionResource) unversioned.GroupVersionResource { - resource.Resource = strings.ToLower(resource.Resource) - if resource.Version == runtime.APIVersionInternal { - resource.Version = "" - } - - return resource -} - -func (m *DefaultRESTMapper) ResourcesFor(input unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) { - resource := coerceResourceForMatching(input) - - hasResource := len(resource.Resource) > 0 - hasGroup := len(resource.Group) > 0 - hasVersion := len(resource.Version) > 0 - - if !hasResource { - return nil, fmt.Errorf("a resource must be present, got: %v", resource) - } - - ret := []unversioned.GroupVersionResource{} - switch { - case hasGroup && hasVersion: - // fully qualified. Find the exact match - for plural, singular := range m.pluralToSingular { - if singular == resource { - ret = append(ret, plural) - break - } - if plural == resource { - ret = append(ret, plural) - break - } - } - - case hasGroup: - // given a group, prefer an exact match. If you don't find one, resort to a prefix match on group - foundExactMatch := false - requestedGroupResource := resource.GroupResource() - for plural, singular := range m.pluralToSingular { - if singular.GroupResource() == requestedGroupResource { - foundExactMatch = true - ret = append(ret, plural) - } - if plural.GroupResource() == requestedGroupResource { - foundExactMatch = true - ret = append(ret, plural) - } - } - - // if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match - // storageclass.storage.k8s.io - if !foundExactMatch { - for plural, singular := range m.pluralToSingular { - if !strings.HasPrefix(plural.Group, requestedGroupResource.Group) { - continue - } - if singular.Resource == requestedGroupResource.Resource { - ret = append(ret, plural) - } - if plural.Resource == requestedGroupResource.Resource { - ret = append(ret, plural) - } - } - - } - - case hasVersion: - for plural, singular := range m.pluralToSingular { - if singular.Version == resource.Version && singular.Resource == resource.Resource { - ret = append(ret, plural) - } - if plural.Version == resource.Version && plural.Resource == resource.Resource { - ret = append(ret, plural) - } - } - - default: - for plural, singular := range m.pluralToSingular { - if singular.Resource == resource.Resource { - ret = append(ret, plural) - } - if plural.Resource == resource.Resource { - ret = append(ret, plural) - } - } - } - - if len(ret) == 0 { - return nil, &NoResourceMatchError{PartialResource: resource} - } - - sort.Sort(resourceByPreferredGroupVersion{ret, m.defaultGroupVersions}) - return ret, nil -} - -func (m *DefaultRESTMapper) ResourceFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) { - resources, err := m.ResourcesFor(resource) - if err != nil { - return unversioned.GroupVersionResource{}, err - } - if len(resources) == 1 { - return resources[0], nil - } - - return unversioned.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources} -} - -func (m *DefaultRESTMapper) KindsFor(input unversioned.GroupVersionResource) ([]unversioned.GroupVersionKind, error) { - resource := coerceResourceForMatching(input) - - hasResource := len(resource.Resource) > 0 - hasGroup := len(resource.Group) > 0 - hasVersion := len(resource.Version) > 0 - - if !hasResource { - return nil, fmt.Errorf("a resource must be present, got: %v", resource) - } - - ret := []unversioned.GroupVersionKind{} - switch { - // fully qualified. Find the exact match - case hasGroup && hasVersion: - kind, exists := m.resourceToKind[resource] - if exists { - ret = append(ret, kind) - } - - case hasGroup: - foundExactMatch := false - requestedGroupResource := resource.GroupResource() - for currResource, currKind := range m.resourceToKind { - if currResource.GroupResource() == requestedGroupResource { - foundExactMatch = true - ret = append(ret, currKind) - } - } - - // if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match - // storageclass.storage.k8s.io - if !foundExactMatch { - for currResource, currKind := range m.resourceToKind { - if !strings.HasPrefix(currResource.Group, requestedGroupResource.Group) { - continue - } - if currResource.Resource == requestedGroupResource.Resource { - ret = append(ret, currKind) - } - } - - } - - case hasVersion: - for currResource, currKind := range m.resourceToKind { - if currResource.Version == resource.Version && currResource.Resource == resource.Resource { - ret = append(ret, currKind) - } - } - - default: - for currResource, currKind := range m.resourceToKind { - if currResource.Resource == resource.Resource { - ret = append(ret, currKind) - } - } - } - - if len(ret) == 0 { - return nil, &NoResourceMatchError{PartialResource: input} - } - - sort.Sort(kindByPreferredGroupVersion{ret, m.defaultGroupVersions}) - return ret, nil -} - -func (m *DefaultRESTMapper) KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) { - kinds, err := m.KindsFor(resource) - if err != nil { - return unversioned.GroupVersionKind{}, err - } - if len(kinds) == 1 { - return kinds[0], nil - } - - return unversioned.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds} -} - -type kindByPreferredGroupVersion struct { - list []unversioned.GroupVersionKind - sortOrder []unversioned.GroupVersion -} - -func (o kindByPreferredGroupVersion) Len() int { return len(o.list) } -func (o kindByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] } -func (o kindByPreferredGroupVersion) Less(i, j int) bool { - lhs := o.list[i] - rhs := o.list[j] - if lhs == rhs { - return false - } - - if lhs.GroupVersion() == rhs.GroupVersion() { - return lhs.Kind < rhs.Kind - } - - // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order - lhsIndex := -1 - rhsIndex := -1 - - for i := range o.sortOrder { - if o.sortOrder[i] == lhs.GroupVersion() { - lhsIndex = i - } - if o.sortOrder[i] == rhs.GroupVersion() { - rhsIndex = i - } - } - - if rhsIndex == -1 { - return true - } - - return lhsIndex < rhsIndex -} - -type resourceByPreferredGroupVersion struct { - list []unversioned.GroupVersionResource - sortOrder []unversioned.GroupVersion -} - -func (o resourceByPreferredGroupVersion) Len() int { return len(o.list) } -func (o resourceByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] } -func (o resourceByPreferredGroupVersion) Less(i, j int) bool { - lhs := o.list[i] - rhs := o.list[j] - if lhs == rhs { - return false - } - - if lhs.GroupVersion() == rhs.GroupVersion() { - return lhs.Resource < rhs.Resource - } - - // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order - lhsIndex := -1 - rhsIndex := -1 - - for i := range o.sortOrder { - if o.sortOrder[i] == lhs.GroupVersion() { - lhsIndex = i - } - if o.sortOrder[i] == rhs.GroupVersion() { - rhsIndex = i - } - } - - if rhsIndex == -1 { - return true - } - - return lhsIndex < rhsIndex -} - -// RESTMapping returns a struct representing the resource path and conversion interfaces a -// RESTClient should use to operate on the provided group/kind in order of versions. If a version search -// order is not provided, the search order provided to DefaultRESTMapper will be used to resolve which -// version should be used to access the named group/kind. -// TODO: consider refactoring to use RESTMappings in a way that preserves version ordering and preference -func (m *DefaultRESTMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (*RESTMapping, error) { - // Pick an appropriate version - var gvk *unversioned.GroupVersionKind - hadVersion := false - for _, version := range versions { - if len(version) == 0 || version == runtime.APIVersionInternal { - continue - } - - currGVK := gk.WithVersion(version) - hadVersion = true - if _, ok := m.kindToPluralResource[currGVK]; ok { - gvk = &currGVK - break - } - } - // Use the default preferred versions - if !hadVersion && (gvk == nil) { - for _, gv := range m.defaultGroupVersions { - if gv.Group != gk.Group { - continue - } - - currGVK := gk.WithVersion(gv.Version) - if _, ok := m.kindToPluralResource[currGVK]; ok { - gvk = &currGVK - break - } - } - } - if gvk == nil { - return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")} - } - - // Ensure we have a REST mapping - resource, ok := m.kindToPluralResource[*gvk] - if !ok { - found := []unversioned.GroupVersion{} - for _, gv := range m.defaultGroupVersions { - if _, ok := m.kindToPluralResource[*gvk]; ok { - found = append(found, gv) - } - } - if len(found) > 0 { - return nil, fmt.Errorf("object with kind %q exists in versions %v, not %v", gvk.Kind, found, gvk.GroupVersion().String()) - } - return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported object", gvk.GroupVersion().String(), gvk.Kind) - } - - // Ensure we have a REST scope - scope, ok := m.kindToScope[*gvk] - if !ok { - return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported scope", gvk.GroupVersion().String(), gvk.Kind) - } - - interfaces, err := m.interfacesFunc(gvk.GroupVersion()) - if err != nil { - return nil, fmt.Errorf("the provided version %q has no relevant versions: %v", gvk.GroupVersion().String(), err) - } - - retVal := &RESTMapping{ - Resource: resource.Resource, - GroupVersionKind: *gvk, - Scope: scope, - - ObjectConvertor: interfaces.ObjectConvertor, - MetadataAccessor: interfaces.MetadataAccessor, - } - - return retVal, nil -} - -// RESTMappings returns the RESTMappings for the provided group kind in a rough internal preferred order. If no -// kind is found it will return a NoResourceMatchError. -func (m *DefaultRESTMapper) RESTMappings(gk unversioned.GroupKind) ([]*RESTMapping, error) { - // Use the default preferred versions - var mappings []*RESTMapping - for _, gv := range m.defaultGroupVersions { - if gv.Group != gk.Group { - continue - } - - gvk := gk.WithVersion(gv.Version) - gvr, ok := m.kindToPluralResource[gvk] - if !ok { - continue - } - - // Ensure we have a REST scope - scope, ok := m.kindToScope[gvk] - if !ok { - return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported scope", gvk.GroupVersion(), gvk.Kind) - } - - interfaces, err := m.interfacesFunc(gvk.GroupVersion()) - if err != nil { - return nil, fmt.Errorf("the provided version %q has no relevant versions: %v", gvk.GroupVersion().String(), err) - } - - mappings = append(mappings, &RESTMapping{ - Resource: gvr.Resource, - GroupVersionKind: gvk, - Scope: scope, - - ObjectConvertor: interfaces.ObjectConvertor, - MetadataAccessor: interfaces.MetadataAccessor, - }) - } - - if len(mappings) == 0 { - return nil, &NoResourceMatchError{PartialResource: unversioned.GroupVersionResource{Group: gk.Group, Resource: gk.Kind}} - } - return mappings, nil -} - -// AddResourceAlias maps aliases to resources -func (m *DefaultRESTMapper) AddResourceAlias(alias string, resources ...string) { - if len(resources) == 0 { - return - } - m.aliasToResource[alias] = resources -} - -// AliasesForResource returns whether a resource has an alias or not -func (m *DefaultRESTMapper) AliasesForResource(alias string) ([]string, bool) { - if res, ok := m.aliasToResource[alias]; ok { - return res, true - } - return nil, false -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/unstructured.go b/vendor/k8s.io/kubernetes/pkg/api/meta/unstructured.go deleted file mode 100644 index 784cbf05c3..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/unstructured.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// InterfacesForUnstructured returns VersionInterfaces suitable for -// dealing with runtime.Unstructured objects. -func InterfacesForUnstructured(unversioned.GroupVersion) (*VersionInterfaces, error) { - return &VersionInterfaces{ - ObjectConvertor: &runtime.UnstructuredObjectConverter{}, - MetadataAccessor: NewAccessor(), - }, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD b/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD deleted file mode 100644 index fa65464060..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD +++ /dev/null @@ -1,32 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["util.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/util/intstr:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["util_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/util/intstr:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/pod/util.go b/vendor/k8s.io/kubernetes/pkg/api/pod/util.go deleted file mode 100644 index dfc12db608..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/pod/util.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package pod - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/util/intstr" -) - -const ( - // TODO: to be de!eted after v1.3 is released. PodSpec has a dedicated Hostname field. - // The annotation value is a string specifying the hostname to be used for the pod e.g 'my-webserver-1' - PodHostnameAnnotation = "pod.beta.kubernetes.io/hostname" - - // TODO: to be de!eted after v1.3 is released. PodSpec has a dedicated Subdomain field. - // The annotation value is a string specifying the subdomain e.g. "my-web-service" - // If specified, on the pod itself, ".my-web-service..svc." would resolve to - // the pod's IP. - // If there is a headless service named "my-web-service" in the same namespace as the pod, then, - // .my-web-service..svc." would be resolved by the cluster DNS Server. - PodSubdomainAnnotation = "pod.beta.kubernetes.io/subdomain" -) - -// FindPort locates the container port for the given pod and portName. If the -// targetPort is a number, use that. If the targetPort is a string, look that -// string up in all named ports in all containers in the target pod. If no -// match is found, fail. -func FindPort(pod *api.Pod, svcPort *api.ServicePort) (int, error) { - portName := svcPort.TargetPort - switch portName.Type { - case intstr.String: - name := portName.StrVal - for _, container := range pod.Spec.Containers { - for _, port := range container.Ports { - if port.Name == name && port.Protocol == svcPort.Protocol { - return int(port.ContainerPort), nil - } - } - } - case intstr.Int: - return portName.IntValue(), nil - } - - return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/ref.go b/vendor/k8s.io/kubernetes/pkg/api/ref.go index 443343a97c..370cf5513a 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/ref.go +++ b/vendor/k8s.io/kubernetes/pkg/api/ref.go @@ -22,9 +22,9 @@ import ( "net/url" "strings" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) var ( @@ -37,7 +37,7 @@ var ( // object, or an error if the object doesn't follow the conventions // that would allow this. // TODO: should take a meta.Interface see http://issue.k8s.io/7127 -func GetReference(obj runtime.Object) (*ObjectReference, error) { +func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, error) { if obj == nil { return nil, ErrNilObject } @@ -53,7 +53,7 @@ func GetReference(obj runtime.Object) (*ObjectReference, error) { kind := gvk.Kind if len(kind) == 0 { // TODO: this is wrong - gvks, _, err := Scheme.ObjectKinds(obj) + gvks, _, err := scheme.ObjectKinds(obj) if err != nil { return nil, err } @@ -111,8 +111,8 @@ func GetReference(obj runtime.Object) (*ObjectReference, error) { } // GetPartialReference is exactly like GetReference, but allows you to set the FieldPath. -func GetPartialReference(obj runtime.Object, fieldPath string) (*ObjectReference, error) { - ref, err := GetReference(obj) +func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*ObjectReference, error) { + ref, err := GetReference(scheme, obj) if err != nil { return nil, err } @@ -122,11 +122,11 @@ func GetPartialReference(obj runtime.Object, fieldPath string) (*ObjectReference // IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that // intend only to get a reference to that object. This simplifies the event recording interface. -func (obj *ObjectReference) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { +func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() } -func (obj *ObjectReference) GroupVersionKind() unversioned.GroupVersionKind { - return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) } -func (obj *ObjectReference) GetObjectKind() unversioned.ObjectKind { return obj } +func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/kubernetes/pkg/api/register.go b/vendor/k8s.io/kubernetes/pkg/api/register.go index 26cb658ff4..bd842b182e 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/register.go +++ b/vendor/k8s.io/kubernetes/pkg/api/register.go @@ -17,11 +17,23 @@ limitations under the License. package api import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer" + "os" + + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" ) +// GroupFactoryRegistry is the APIGroupFactoryRegistry (overlaps a bit with Registry, see comments in package for details) +var GroupFactoryRegistry = make(announced.APIGroupFactoryRegistry) + +// Registry is an instance of an API registry. This is an interim step to start removing the idea of a global +// API registry. +var Registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS")) + // Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. // NOTE: If you are copying this file to start a new api group, STOP! Copy the // extensions group instead. This Scheme is special and should appear ONLY in @@ -36,22 +48,22 @@ var Codecs = serializer.NewCodecFactory(Scheme) const GroupName = "" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} // Unversioned is group version for unversioned API objects // TODO: this should be v1 probably -var Unversioned = unversioned.GroupVersion{Group: "", Version: "v1"} +var Unversioned = schema.GroupVersion{Group: "", Version: "v1"} // ParameterCodec handles versioning of objects that are converted to query parameters. var ParameterCodec = runtime.NewParameterCodec(Scheme) // Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { +func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } // Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { +func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } @@ -60,22 +72,8 @@ var ( AddToScheme = SchemeBuilder.AddToScheme ) -func init() { - // TODO(lavalamp): move this call to scheme builder above. Can't - // remove it from here because lots of people inappropriately rely on it - // (specifically the unversioned time conversion). Can't have it in - // both places because then it gets double registered. Consequence of - // current state is that it only ever gets registered in the main - // api.Scheme, even though everyone that uses anything from unversioned - // needs these. - if err := addConversionFuncs(Scheme); err != nil { - // Programmer error. - panic(err) - } -} - func addKnownTypes(scheme *runtime.Scheme) error { - if err := scheme.AddIgnoredConversionType(&unversioned.TypeMeta{}, &unversioned.TypeMeta{}); err != nil { + if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { return err } scheme.AddKnownTypes(SchemeGroupVersion, @@ -112,11 +110,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &PersistentVolumeList{}, &PersistentVolumeClaim{}, &PersistentVolumeClaimList{}, - &DeleteOptions{}, - &ListOptions{}, &PodAttachOptions{}, &PodLogOptions{}, &PodExecOptions{}, + &PodPortForwardOptions{}, &PodProxyOptions{}, &ComponentStatus{}, &ComponentStatusList{}, @@ -128,12 +125,11 @@ func addKnownTypes(scheme *runtime.Scheme) error { // Register Unversioned types under their own special group scheme.AddUnversionedTypes(Unversioned, - &unversioned.ExportOptions{}, - &unversioned.Status{}, - &unversioned.APIVersions{}, - &unversioned.APIGroupList{}, - &unversioned.APIGroup{}, - &unversioned.APIResourceList{}, + &metav1.Status{}, + &metav1.APIVersions{}, + &metav1.APIGroupList{}, + &metav1.APIGroup{}, + &metav1.APIResourceList{}, ) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/api/requestcontext.go b/vendor/k8s.io/kubernetes/pkg/api/requestcontext.go deleted file mode 100644 index 724fea811d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/requestcontext.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "errors" - "net/http" - "sync" - - "github.com/golang/glog" -) - -// RequestContextMapper keeps track of the context associated with a particular request -type RequestContextMapper interface { - // Get returns the context associated with the given request (if any), and true if the request has an associated context, and false if it does not. - Get(req *http.Request) (Context, bool) - // Update maps the request to the given context. If no context was previously associated with the request, an error is returned. - // Update should only be called with a descendant context of the previously associated context. - // Updating to an unrelated context may return an error in the future. - // The context associated with a request should only be updated by a limited set of callers. - // Valid examples include the authentication layer, or an audit/tracing layer. - Update(req *http.Request, context Context) error -} - -type requestContextMap struct { - contexts map[*http.Request]Context - lock sync.Mutex -} - -// NewRequestContextMapper returns a new RequestContextMapper. -// The returned mapper must be added as a request filter using NewRequestContextFilter. -func NewRequestContextMapper() RequestContextMapper { - return &requestContextMap{ - contexts: make(map[*http.Request]Context), - } -} - -// Get returns the context associated with the given request (if any), and true if the request has an associated context, and false if it does not. -// Get will only return a valid context when called from inside the filter chain set up by NewRequestContextFilter() -func (c *requestContextMap) Get(req *http.Request) (Context, bool) { - c.lock.Lock() - defer c.lock.Unlock() - context, ok := c.contexts[req] - return context, ok -} - -// Update maps the request to the given context. -// If no context was previously associated with the request, an error is returned and the context is ignored. -func (c *requestContextMap) Update(req *http.Request, context Context) error { - c.lock.Lock() - defer c.lock.Unlock() - if _, ok := c.contexts[req]; !ok { - return errors.New("No context associated") - } - // TODO: ensure the new context is a descendant of the existing one - c.contexts[req] = context - return nil -} - -// init maps the request to the given context and returns true if there was no context associated with the request already. -// if a context was already associated with the request, it ignores the given context and returns false. -// init is intentionally unexported to ensure that all init calls are paired with a remove after a request is handled -func (c *requestContextMap) init(req *http.Request, context Context) bool { - c.lock.Lock() - defer c.lock.Unlock() - if _, exists := c.contexts[req]; exists { - return false - } - c.contexts[req] = context - return true -} - -// remove is intentionally unexported to ensure that the context is not removed until a request is handled -func (c *requestContextMap) remove(req *http.Request) { - c.lock.Lock() - defer c.lock.Unlock() - delete(c.contexts, req) -} - -// WithRequestContext ensures there is a Context object associated with the request before calling the passed handler. -// After the passed handler runs, the context is cleaned up. -func WithRequestContext(handler http.Handler, mapper RequestContextMapper) http.Handler { - rcMap, ok := mapper.(*requestContextMap) - if !ok { - glog.Fatal("Unknown RequestContextMapper implementation.") - } - - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - if rcMap.init(req, NewContext()) { - // If we were the ones to successfully initialize, pair with a remove - defer rcMap.remove(req) - } - handler.ServeHTTP(w, req) - }) -} - -// IsEmpty returns true if there are no contexts registered, or an error if it could not be determined. Intended for use by tests. -func IsEmpty(requestsToContexts RequestContextMapper) (bool, error) { - if requestsToContexts, ok := requestsToContexts.(*requestContextMap); ok { - return len(requestsToContexts.contexts) == 0, nil - } - return true, errors.New("Unknown RequestContextMapper implementation") -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/BUILD b/vendor/k8s.io/kubernetes/pkg/api/resource/BUILD deleted file mode 100644 index 77e315a536..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/resource/BUILD +++ /dev/null @@ -1,56 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "amount.go", - "generated.pb.go", - "math.go", - "quantity.go", - "quantity_proto.go", - "scale_int.go", - "suffix.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/genericapiserver/openapi/common:go_default_library", - "//vendor:github.com/go-openapi/spec", - "//vendor:github.com/gogo/protobuf/proto", - "//vendor:github.com/spf13/pflag", - "//vendor:gopkg.in/inf.v0", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "amount_test.go", - "math_test.go", - "quantity_test.go", - "scale_int_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//vendor:github.com/google/gofuzz", - "//vendor:github.com/spf13/pflag", - "//vendor:gopkg.in/inf.v0", - ], -) - -go_test( - name = "go_default_xtest", - srcs = ["quantity_example_test.go"], - tags = ["automanaged"], - deps = ["//pkg/api/resource:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/api/resource/generated.pb.go deleted file mode 100644 index 1c152c4e82..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/resource/generated.pb.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/api/resource/generated.proto -// DO NOT EDIT! - -/* - Package resource is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/api/resource/generated.proto - - It has these top-level messages: - Quantity -*/ -package resource - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 - -func (m *Quantity) Reset() { *m = Quantity{} } -func (*Quantity) ProtoMessage() {} -func (*Quantity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func init() { - proto.RegisterType((*Quantity)(nil), "k8s.io.kubernetes.pkg.api.resource.Quantity") -} - -var fileDescriptorGenerated = []byte{ - // 236 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x8f, 0xa1, 0x4e, 0x03, 0x41, - 0x10, 0x86, 0x67, 0x0d, 0x29, 0x27, 0x1b, 0x42, 0x48, 0xc5, 0x5e, 0x53, 0x45, 0x48, 0xd8, 0x09, - 0xa8, 0x06, 0xc9, 0x1b, 0x80, 0xc4, 0xdd, 0x95, 0x61, 0x99, 0x1c, 0xec, 0x6e, 0x76, 0x67, 0x05, - 0xae, 0x12, 0x59, 0x89, 0xec, 0xbd, 0x4d, 0x65, 0x25, 0x02, 0xc1, 0x1d, 0x2f, 0x42, 0x72, 0xa5, - 0x21, 0x21, 0xb8, 0xf9, 0xc4, 0x37, 0xf9, 0xfe, 0xe2, 0xb2, 0x99, 0x27, 0xc3, 0x1e, 0x9b, 0x5c, - 0x53, 0x74, 0x24, 0x94, 0x30, 0x34, 0x16, 0xab, 0xc0, 0x18, 0x29, 0xf9, 0x1c, 0x17, 0x84, 0x96, - 0x1c, 0xc5, 0x4a, 0xe8, 0xde, 0x84, 0xe8, 0xc5, 0x8f, 0x67, 0x3b, 0xc7, 0xfc, 0x3a, 0x26, 0x34, - 0xd6, 0x54, 0x81, 0xcd, 0xde, 0x99, 0x9c, 0x5b, 0x96, 0xc7, 0x5c, 0x9b, 0x85, 0x7f, 0x46, 0xeb, - 0xad, 0xc7, 0x41, 0xad, 0xf3, 0xc3, 0x40, 0x03, 0x0c, 0xd7, 0xee, 0xe5, 0xe4, 0xe2, 0xff, 0x8c, - 0x2c, 0xfc, 0x84, 0xec, 0x24, 0x49, 0xfc, 0x5b, 0x31, 0x9b, 0x17, 0xa3, 0x9b, 0x5c, 0x39, 0x61, - 0x79, 0x19, 0x1f, 0x17, 0x07, 0x49, 0x22, 0x3b, 0x7b, 0xa2, 0xa6, 0xea, 0xf4, 0xf0, 0xf6, 0x87, - 0xae, 0x8e, 0xde, 0xd6, 0x25, 0xbc, 0xb6, 0x25, 0xac, 0xda, 0x12, 0xd6, 0x6d, 0x09, 0xcb, 0x8f, - 0x29, 0x5c, 0x9f, 0x6d, 0x3a, 0x0d, 0xdb, 0x4e, 0xc3, 0x7b, 0xa7, 0x61, 0xd9, 0x6b, 0xb5, 0xe9, - 0xb5, 0xda, 0xf6, 0x5a, 0x7d, 0xf6, 0x5a, 0xad, 0xbe, 0x34, 0xdc, 0x8d, 0xf6, 0x3b, 0xbe, 0x03, - 0x00, 0x00, 0xff, 0xff, 0x90, 0x1c, 0x7f, 0xff, 0x20, 0x01, 0x00, 0x00, -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/generated.proto b/vendor/k8s.io/kubernetes/pkg/api/resource/generated.proto deleted file mode 100644 index c26921d57d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/resource/generated.proto +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.api.resource; - -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "resource"; - -// Quantity is a fixed-point representation of a number. -// It provides convenient marshaling/unmarshaling in JSON and YAML, -// in addition to String() and Int64() accessors. -// -// The serialization format is: -// -// ::= -// (Note that may be empty, from the "" case in .) -// ::= 0 | 1 | ... | 9 -// ::= | -// ::= | . | . | . -// ::= "+" | "-" -// ::= | -// ::= | | -// ::= Ki | Mi | Gi | Ti | Pi | Ei -// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) -// ::= m | "" | k | M | G | T | P | E -// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) -// ::= "e" | "E" -// -// No matter which of the three exponent forms is used, no quantity may represent -// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal -// places. Numbers larger or more precise will be capped or rounded up. -// (E.g.: 0.1m will rounded up to 1m.) -// This may be extended in the future if we require larger or smaller quantities. -// -// When a Quantity is parsed from a string, it will remember the type of suffix -// it had, and will use the same type again when it is serialized. -// -// Before serializing, Quantity will be put in "canonical form". -// This means that Exponent/suffix will be adjusted up or down (with a -// corresponding increase or decrease in Mantissa) such that: -// a. No precision is lost -// b. No fractional digits will be emitted -// c. The exponent (or suffix) is as large as possible. -// The sign will be omitted unless the number is negative. -// -// Examples: -// 1.5 will be serialized as "1500m" -// 1.5Gi will be serialized as "1536Mi" -// -// NOTE: We reserve the right to amend this canonical format, perhaps to -// allow 1.5 to be canonical. -// TODO: Remove above disclaimer after all bikeshedding about format is over, -// or after March 2015. -// -// Note that the quantity will NEVER be internally represented by a -// floating point number. That is the whole point of this exercise. -// -// Non-canonical values will still parse as long as they are well formed, -// but will be re-emitted in their canonical form. (So always use canonical -// form, or don't diff.) -// -// This format is intended to make it difficult to use these numbers without -// writing some sort of special handling code in the hopes that that will -// cause implementors to also use a fixed point implementation. -// -// +protobuf=true -// +protobuf.embed=string -// +protobuf.options.marshal=false -// +protobuf.options.(gogoproto.goproto_stringer)=false -// +k8s:openapi-gen=true -message Quantity { - optional string string = 1; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource_helpers.go b/vendor/k8s.io/kubernetes/pkg/api/resource_helpers.go index e926094bb6..88d0f80d76 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/resource_helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/api/resource_helpers.go @@ -19,8 +19,8 @@ package api import ( "time" - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Returns string version of ResourceName. @@ -81,7 +81,7 @@ func GetExistingContainerStatus(statuses []ContainerStatus, name string) Contain // of that, there are two cases when a pod can be considered available: // 1. minReadySeconds == 0, or // 2. LastTransitionTime (is set) + minReadySeconds < current time -func IsPodAvailable(pod *Pod, minReadySeconds int32, now unversioned.Time) bool { +func IsPodAvailable(pod *Pod, minReadySeconds int32, now metav1.Time) bool { if !IsPodReady(pod) { return false } @@ -144,7 +144,7 @@ func GetNodeCondition(status *NodeStatus, conditionType NodeConditionType) (int, // status has changed. // Returns true if pod condition has changed or has been added. func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool { - condition.LastTransitionTime = unversioned.Now() + condition.LastTransitionTime = metav1.Now() // Try to find this pod condition. conditionIndex, oldCondition := GetPodCondition(status, condition.Type) diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/BUILD b/vendor/k8s.io/kubernetes/pkg/api/rest/BUILD deleted file mode 100644 index 7183fb7e98..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/rest/BUILD +++ /dev/null @@ -1,36 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "create.go", - "delete.go", - "doc.go", - "export.go", - "rest.go", - "types.go", - "update.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/validation:go_default_library", - "//pkg/api/validation/path:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/validation/field:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/create.go b/vendor/k8s.io/kubernetes/pkg/api/rest/create.go deleted file mode 100644 index 4519032d36..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/rest/create.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rest - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/validation" - path "k8s.io/kubernetes/pkg/api/validation/path" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -// RESTCreateStrategy defines the minimum validation, accepted input, and -// name generation behavior to create an object that follows Kubernetes -// API conventions. -type RESTCreateStrategy interface { - runtime.ObjectTyper - // The name generate is used when the standard GenerateName field is set. - // The NameGenerator will be invoked prior to validation. - api.NameGenerator - - // NamespaceScoped returns true if the object must be within a namespace. - NamespaceScoped() bool - // PrepareForCreate is invoked on create before validation to normalize - // the object. For example: remove fields that are not to be persisted, - // sort order-insensitive list fields, etc. This should not remove fields - // whose presence would be considered a validation error. - PrepareForCreate(ctx api.Context, obj runtime.Object) - // Validate is invoked after default fields in the object have been filled in before - // the object is persisted. This method should not mutate the object. - Validate(ctx api.Context, obj runtime.Object) field.ErrorList - // Canonicalize is invoked after validation has succeeded but before the - // object has been persisted. This method may mutate the object. - Canonicalize(obj runtime.Object) -} - -// BeforeCreate ensures that common operations for all resources are performed on creation. It only returns -// errors that can be converted to api.Status. It invokes PrepareForCreate, then GenerateName, then Validate. -// It returns nil if the object should be created. -func BeforeCreate(strategy RESTCreateStrategy, ctx api.Context, obj runtime.Object) error { - objectMeta, kind, kerr := objectMetaAndKind(strategy, obj) - if kerr != nil { - return kerr - } - - if strategy.NamespaceScoped() { - if !api.ValidNamespace(ctx, objectMeta) { - return errors.NewBadRequest("the namespace of the provided object does not match the namespace sent on the request") - } - } else { - objectMeta.Namespace = api.NamespaceNone - } - objectMeta.DeletionTimestamp = nil - objectMeta.DeletionGracePeriodSeconds = nil - strategy.PrepareForCreate(ctx, obj) - api.FillObjectMetaSystemFields(ctx, objectMeta) - api.GenerateName(strategy, objectMeta) - - // ClusterName is ignored and should not be saved - objectMeta.ClusterName = "" - - if errs := strategy.Validate(ctx, obj); len(errs) > 0 { - return errors.NewInvalid(kind.GroupKind(), objectMeta.Name, errs) - } - - // Custom validation (including name validation) passed - // Now run common validation on object meta - // Do this *after* custom validation so that specific error messages are shown whenever possible - if errs := validation.ValidateObjectMeta(objectMeta, strategy.NamespaceScoped(), path.ValidatePathSegmentName, field.NewPath("metadata")); len(errs) > 0 { - return errors.NewInvalid(kind.GroupKind(), objectMeta.Name, errs) - } - - strategy.Canonicalize(obj) - - return nil -} - -// CheckGeneratedNameError checks whether an error that occurred creating a resource is due -// to generation being unable to pick a valid name. -func CheckGeneratedNameError(strategy RESTCreateStrategy, err error, obj runtime.Object) error { - if !errors.IsAlreadyExists(err) { - return err - } - - objectMeta, kind, kerr := objectMetaAndKind(strategy, obj) - if kerr != nil { - return kerr - } - - if len(objectMeta.GenerateName) == 0 { - return err - } - - return errors.NewServerTimeoutForKind(kind.GroupKind(), "POST", 0) -} - -// objectMetaAndKind retrieves kind and ObjectMeta from a runtime object, or returns an error. -func objectMetaAndKind(typer runtime.ObjectTyper, obj runtime.Object) (*api.ObjectMeta, unversioned.GroupVersionKind, error) { - objectMeta, err := api.ObjectMetaFor(obj) - if err != nil { - return nil, unversioned.GroupVersionKind{}, errors.NewInternalError(err) - } - kinds, _, err := typer.ObjectKinds(obj) - if err != nil { - return nil, unversioned.GroupVersionKind{}, errors.NewInternalError(err) - } - return objectMeta, kinds[0], nil -} - -// NamespaceScopedStrategy has a method to tell if the object must be in a namespace. -type NamespaceScopedStrategy interface { - // NamespaceScoped returns if the object must be in a namespace. - NamespaceScoped() bool -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/delete.go b/vendor/k8s.io/kubernetes/pkg/api/rest/delete.go deleted file mode 100644 index 3f853d8c54..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/rest/delete.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rest - -import ( - "fmt" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// RESTDeleteStrategy defines deletion behavior on an object that follows Kubernetes -// API conventions. -type RESTDeleteStrategy interface { - runtime.ObjectTyper -} - -type GarbageCollectionPolicy string - -const ( - DeleteDependents GarbageCollectionPolicy = "DeleteDependents" - OrphanDependents GarbageCollectionPolicy = "OrphanDependents" -) - -// GarbageCollectionDeleteStrategy must be implemented by the registry that wants to -// orphan dependents by default. -type GarbageCollectionDeleteStrategy interface { - // DefaultGarbageCollectionPolicy returns the default garbage collection behavior. - DefaultGarbageCollectionPolicy() GarbageCollectionPolicy -} - -// RESTGracefulDeleteStrategy must be implemented by the registry that supports -// graceful deletion. -type RESTGracefulDeleteStrategy interface { - // CheckGracefulDelete should return true if the object can be gracefully deleted and set - // any default values on the DeleteOptions. - CheckGracefulDelete(ctx api.Context, obj runtime.Object, options *api.DeleteOptions) bool -} - -// BeforeDelete tests whether the object can be gracefully deleted. If graceful is set the object -// should be gracefully deleted, if gracefulPending is set the object has already been gracefully deleted -// (and the provided grace period is longer than the time to deletion), and an error is returned if the -// condition cannot be checked or the gracePeriodSeconds is invalid. The options argument may be updated with -// default values if graceful is true. Second place where we set deletionTimestamp is pkg/registry/generic/registry/store.go -// this function is responsible for setting deletionTimestamp during gracefulDeletion, other one for cascading deletions. -func BeforeDelete(strategy RESTDeleteStrategy, ctx api.Context, obj runtime.Object, options *api.DeleteOptions) (graceful, gracefulPending bool, err error) { - objectMeta, gvk, kerr := objectMetaAndKind(strategy, obj) - if kerr != nil { - return false, false, kerr - } - // Checking the Preconditions here to fail early. They'll be enforced later on when we actually do the deletion, too. - if options.Preconditions != nil && options.Preconditions.UID != nil && *options.Preconditions.UID != objectMeta.UID { - return false, false, errors.NewConflict(unversioned.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, objectMeta.Name, fmt.Errorf("the UID in the precondition (%s) does not match the UID in record (%s). The object might have been deleted and then recreated", *options.Preconditions.UID, objectMeta.UID)) - } - gracefulStrategy, ok := strategy.(RESTGracefulDeleteStrategy) - if !ok { - // If we're not deleting gracefully there's no point in updating Generation, as we won't update - // the obcject before deleting it. - return false, false, nil - } - // if the object is already being deleted, no need to update generation. - if objectMeta.DeletionTimestamp != nil { - // if we are already being deleted, we may only shorten the deletion grace period - // this means the object was gracefully deleted previously but deletionGracePeriodSeconds was not set, - // so we force deletion immediately - // IMPORTANT: - // The deletion operation happens in two phases. - // 1. Update to set DeletionGracePeriodSeconds and DeletionTimestamp - // 2. Delete the object from storage. - // If the update succeeds, but the delete fails (network error, internal storage error, etc.), - // a resource was previously left in a state that was non-recoverable. We - // check if the existing stored resource has a grace period as 0 and if so - // attempt to delete immediately in order to recover from this scenario. - if objectMeta.DeletionGracePeriodSeconds == nil || *objectMeta.DeletionGracePeriodSeconds == 0 { - return false, false, nil - } - // only a shorter grace period may be provided by a user - if options.GracePeriodSeconds != nil { - period := int64(*options.GracePeriodSeconds) - if period >= *objectMeta.DeletionGracePeriodSeconds { - return false, true, nil - } - newDeletionTimestamp := unversioned.NewTime( - objectMeta.DeletionTimestamp.Add(-time.Second * time.Duration(*objectMeta.DeletionGracePeriodSeconds)). - Add(time.Second * time.Duration(*options.GracePeriodSeconds))) - objectMeta.DeletionTimestamp = &newDeletionTimestamp - objectMeta.DeletionGracePeriodSeconds = &period - return true, false, nil - } - // graceful deletion is pending, do nothing - options.GracePeriodSeconds = objectMeta.DeletionGracePeriodSeconds - return false, true, nil - } - - if !gracefulStrategy.CheckGracefulDelete(ctx, obj, options) { - return false, false, nil - } - now := unversioned.NewTime(unversioned.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds))) - objectMeta.DeletionTimestamp = &now - objectMeta.DeletionGracePeriodSeconds = options.GracePeriodSeconds - // If it's the first graceful deletion we are going to set the DeletionTimestamp to non-nil. - // Controllers of the object that's being deleted shouldn't take any nontrivial actions, hence its behavior changes. - // Thus we need to bump object's Generation (if set). This handles generation bump during graceful deletion. - // The bump for objects that don't support graceful deletion is handled in pkg/registry/generic/registry/store.go. - if objectMeta.Generation > 0 { - objectMeta.Generation++ - } - return true, false, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/doc.go b/vendor/k8s.io/kubernetes/pkg/api/rest/doc.go deleted file mode 100644 index ee7c4145f4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/rest/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package rest defines common logic around changes to Kubernetes resources. -package rest diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/export.go b/vendor/k8s.io/kubernetes/pkg/api/rest/export.go deleted file mode 100644 index f072517583..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/rest/export.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rest - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/runtime" -) - -// RESTExportStrategy is the interface that defines how to export a Kubernetes object -type RESTExportStrategy interface { - // Export strips fields that can not be set by the user. If 'exact' is false - // fields specific to the cluster are also stripped - Export(ctx api.Context, obj runtime.Object, exact bool) error -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/rest.go b/vendor/k8s.io/kubernetes/pkg/api/rest/rest.go deleted file mode 100644 index 2098ccdbe7..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/rest/rest.go +++ /dev/null @@ -1,310 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rest - -import ( - "io" - "net/http" - "net/url" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/watch" -) - -//TODO: -// Storage interfaces need to be separated into two groups; those that operate -// on collections and those that operate on individually named items. -// Collection interfaces: -// (Method: Current -> Proposed) -// GET: Lister -> CollectionGetter -// WATCH: Watcher -> CollectionWatcher -// CREATE: Creater -> CollectionCreater -// DELETE: (n/a) -> CollectionDeleter -// UPDATE: (n/a) -> CollectionUpdater -// -// Single item interfaces: -// (Method: Current -> Proposed) -// GET: Getter -> NamedGetter -// WATCH: (n/a) -> NamedWatcher -// CREATE: (n/a) -> NamedCreater -// DELETE: Deleter -> NamedDeleter -// UPDATE: Update -> NamedUpdater - -// Storage is a generic interface for RESTful storage services. -// Resources which are exported to the RESTful API of apiserver need to implement this interface. It is expected -// that objects may implement any of the below interfaces. -type Storage interface { - // New returns an empty object that can be used with Create and Update after request data has been put into it. - // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) - New() runtime.Object -} - -// KindProvider specifies a different kind for its API than for its internal storage. This is necessary for external -// objects that are not compiled into the api server. For such objects, there is no in-memory representation for -// the object, so they must be represented as generic objects (e.g. runtime.Unknown), but when we present the object as part of -// API discovery we want to present the specific kind, not the generic internal representation. -type KindProvider interface { - Kind() string -} - -// Lister is an object that can retrieve resources that match the provided field and label criteria. -type Lister interface { - // NewList returns an empty object that can be used with the List call. - // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) - NewList() runtime.Object - // List selects resources in the storage which match to the selector. 'options' can be nil. - List(ctx api.Context, options *api.ListOptions) (runtime.Object, error) -} - -// Exporter is an object that knows how to strip a RESTful resource for export -type Exporter interface { - // Export an object. Fields that are not user specified (e.g. Status, ObjectMeta.ResourceVersion) are stripped out - // Returns the stripped object. If 'exact' is true, fields that are specific to the cluster (e.g. namespace) are - // retained, otherwise they are stripped also. - Export(ctx api.Context, name string, opts unversioned.ExportOptions) (runtime.Object, error) -} - -// Getter is an object that can retrieve a named RESTful resource. -type Getter interface { - // Get finds a resource in the storage by name and returns it. - // Although it can return an arbitrary error value, IsNotFound(err) is true for the - // returned error value err when the specified resource is not found. - Get(ctx api.Context, name string) (runtime.Object, error) -} - -// GetterWithOptions is an object that retrieve a named RESTful resource and takes -// additional options on the get request. It allows a caller to also receive the -// subpath of the GET request. -type GetterWithOptions interface { - // Get finds a resource in the storage by name and returns it. - // Although it can return an arbitrary error value, IsNotFound(err) is true for the - // returned error value err when the specified resource is not found. - // The options object passed to it is of the same type returned by the NewGetOptions - // method. - Get(ctx api.Context, name string, options runtime.Object) (runtime.Object, error) - - // NewGetOptions returns an empty options object that will be used to pass - // options to the Get method. It may return a bool and a string, if true, the - // value of the request path below the object will be included as the named - // string in the serialization of the runtime object. E.g., returning "path" - // will convert the trailing request scheme value to "path" in the map[string][]string - // passed to the converter. - NewGetOptions() (runtime.Object, bool, string) -} - -// Deleter is an object that can delete a named RESTful resource. -type Deleter interface { - // Delete finds a resource in the storage and deletes it. - // Although it can return an arbitrary error value, IsNotFound(err) is true for the - // returned error value err when the specified resource is not found. - // Delete *may* return the object that was deleted, or a status object indicating additional - // information about deletion. - Delete(ctx api.Context, name string) (runtime.Object, error) -} - -// GracefulDeleter knows how to pass deletion options to allow delayed deletion of a -// RESTful object. -type GracefulDeleter interface { - // Delete finds a resource in the storage and deletes it. - // If options are provided, the resource will attempt to honor them or return an invalid - // request error. - // Although it can return an arbitrary error value, IsNotFound(err) is true for the - // returned error value err when the specified resource is not found. - // Delete *may* return the object that was deleted, or a status object indicating additional - // information about deletion. - Delete(ctx api.Context, name string, options *api.DeleteOptions) (runtime.Object, error) -} - -// GracefulDeleteAdapter adapts the Deleter interface to GracefulDeleter -type GracefulDeleteAdapter struct { - Deleter -} - -// Delete implements RESTGracefulDeleter in terms of Deleter -func (w GracefulDeleteAdapter) Delete(ctx api.Context, name string, options *api.DeleteOptions) (runtime.Object, error) { - return w.Deleter.Delete(ctx, name) -} - -// CollectionDeleter is an object that can delete a collection -// of RESTful resources. -type CollectionDeleter interface { - // DeleteCollection selects all resources in the storage matching given 'listOptions' - // and deletes them. If 'options' are provided, the resource will attempt to honor - // them or return an invalid request error. - // DeleteCollection may not be atomic - i.e. it may delete some objects and still - // return an error after it. On success, returns a list of deleted objects. - DeleteCollection(ctx api.Context, options *api.DeleteOptions, listOptions *api.ListOptions) (runtime.Object, error) -} - -// Creater is an object that can create an instance of a RESTful object. -type Creater interface { - // New returns an empty object that can be used with Create after request data has been put into it. - // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) - New() runtime.Object - - // Create creates a new version of a resource. - Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) -} - -// NamedCreater is an object that can create an instance of a RESTful object using a name parameter. -type NamedCreater interface { - // New returns an empty object that can be used with Create after request data has been put into it. - // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) - New() runtime.Object - - // Create creates a new version of a resource. It expects a name parameter from the path. - // This is needed for create operations on subresources which include the name of the parent - // resource in the path. - Create(ctx api.Context, name string, obj runtime.Object) (runtime.Object, error) -} - -// UpdatedObjectInfo provides information about an updated object to an Updater. -// It requires access to the old object in order to return the newly updated object. -type UpdatedObjectInfo interface { - // Returns preconditions built from the updated object, if applicable. - // May return nil, or a preconditions object containing nil fields, - // if no preconditions can be determined from the updated object. - Preconditions() *api.Preconditions - - // UpdatedObject returns the updated object, given a context and old object. - // The only time an empty oldObj should be passed in is if a "create on update" is occurring (there is no oldObj). - UpdatedObject(ctx api.Context, oldObj runtime.Object) (newObj runtime.Object, err error) -} - -// Updater is an object that can update an instance of a RESTful object. -type Updater interface { - // New returns an empty object that can be used with Update after request data has been put into it. - // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) - New() runtime.Object - - // Update finds a resource in the storage and updates it. Some implementations - // may allow updates creates the object - they should set the created boolean - // to true. - Update(ctx api.Context, name string, objInfo UpdatedObjectInfo) (runtime.Object, bool, error) -} - -// CreaterUpdater is a storage object that must support both create and update. -// Go prevents embedded interfaces that implement the same method. -type CreaterUpdater interface { - Creater - Update(ctx api.Context, name string, objInfo UpdatedObjectInfo) (runtime.Object, bool, error) -} - -// CreaterUpdater must satisfy the Updater interface. -var _ Updater = CreaterUpdater(nil) - -// Patcher is a storage object that supports both get and update. -type Patcher interface { - Getter - Updater -} - -// Watcher should be implemented by all Storage objects that -// want to offer the ability to watch for changes through the watch api. -type Watcher interface { - // 'label' selects on labels; 'field' selects on the object's fields. Not all fields - // are supported; an error should be returned if 'field' tries to select on a field that - // isn't supported. 'resourceVersion' allows for continuing/starting a watch at a - // particular version. - Watch(ctx api.Context, options *api.ListOptions) (watch.Interface, error) -} - -// StandardStorage is an interface covering the common verbs. Provided for testing whether a -// resource satisfies the normal storage methods. Use Storage when passing opaque storage objects. -type StandardStorage interface { - Getter - Lister - CreaterUpdater - GracefulDeleter - CollectionDeleter - Watcher -} - -// Redirector know how to return a remote resource's location. -type Redirector interface { - // ResourceLocation should return the remote location of the given resource, and an optional transport to use to request it, or an error. - ResourceLocation(ctx api.Context, id string) (remoteLocation *url.URL, transport http.RoundTripper, err error) -} - -// Responder abstracts the normal response behavior for a REST method and is passed to callers that -// may wish to handle the response directly in some cases, but delegate to the normal error or object -// behavior in other cases. -type Responder interface { - // Object writes the provided object to the response. Invoking this method multiple times is undefined. - Object(statusCode int, obj runtime.Object) - // Error writes the provided error to the response. This method may only be invoked once. - Error(err error) -} - -// Connecter is a storage object that responds to a connection request. -type Connecter interface { - // Connect returns an http.Handler that will handle the request/response for a given API invocation. - // The provided responder may be used for common API responses. The responder will write both status - // code and body, so the ServeHTTP method should exit after invoking the responder. The Handler will - // be used for a single API request and then discarded. The Responder is guaranteed to write to the - // same http.ResponseWriter passed to ServeHTTP. - Connect(ctx api.Context, id string, options runtime.Object, r Responder) (http.Handler, error) - - // NewConnectOptions returns an empty options object that will be used to pass - // options to the Connect method. If nil, then a nil options object is passed to - // Connect. It may return a bool and a string. If true, the value of the request - // path below the object will be included as the named string in the serialization - // of the runtime object. - NewConnectOptions() (runtime.Object, bool, string) - - // ConnectMethods returns the list of HTTP methods handled by Connect - ConnectMethods() []string -} - -// ResourceStreamer is an interface implemented by objects that prefer to be streamed from the server -// instead of decoded directly. -type ResourceStreamer interface { - // InputStream should return an io.ReadCloser if the provided object supports streaming. The desired - // api version and an accept header (may be empty) are passed to the call. If no error occurs, - // the caller may return a flag indicating whether the result should be flushed as writes occur - // and a content type string that indicates the type of the stream. - // If a null stream is returned, a StatusNoContent response wil be generated. - InputStream(apiVersion, acceptHeader string) (stream io.ReadCloser, flush bool, mimeType string, err error) -} - -// StorageMetadata is an optional interface that callers can implement to provide additional -// information about their Storage objects. -type StorageMetadata interface { - // ProducesMIMETypes returns a list of the MIME types the specified HTTP verb (GET, POST, DELETE, - // PATCH) can respond with. - ProducesMIMETypes(verb string) []string - - // ProducesObject returns an object the specified HTTP verb respond with. It will overwrite storage object if - // it is not nil. Only the type of the return object matters, the value will be ignored. - ProducesObject(verb string) interface{} -} - -// ConnectRequest is an object passed to admission control for Connect operations -type ConnectRequest struct { - // Name is the name of the object on which the connect request was made - Name string - - // Options is the options object passed to the connect request. See the NewConnectOptions method on Connecter - Options runtime.Object - - // ResourcePath is the path for the resource in the REST server (ie. "pods/proxy") - ResourcePath string -} - -func (obj *ConnectRequest) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/types.go b/vendor/k8s.io/kubernetes/pkg/api/rest/types.go deleted file mode 100644 index 85e786465a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/rest/types.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rest - -import ( - "k8s.io/kubernetes/pkg/runtime" -) - -// ObjectFunc is a function to act on a given object. An error may be returned -// if the hook cannot be completed. An ObjectFunc may transform the provided -// object. -type ObjectFunc func(obj runtime.Object) error - -// AllFuncs returns an ObjectFunc that attempts to run all of the provided functions -// in order, returning early if there are any errors. -func AllFuncs(fns ...ObjectFunc) ObjectFunc { - return func(obj runtime.Object) error { - for _, fn := range fns { - if fn == nil { - continue - } - if err := fn(obj); err != nil { - return err - } - } - return nil - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/update.go b/vendor/k8s.io/kubernetes/pkg/api/rest/update.go deleted file mode 100644 index af65dbc463..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/rest/update.go +++ /dev/null @@ -1,225 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rest - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -// RESTUpdateStrategy defines the minimum validation, accepted input, and -// name generation behavior to update an object that follows Kubernetes -// API conventions. A resource may have many UpdateStrategies, depending on -// the call pattern in use. -type RESTUpdateStrategy interface { - runtime.ObjectTyper - // NamespaceScoped returns true if the object must be within a namespace. - NamespaceScoped() bool - // AllowCreateOnUpdate returns true if the object can be created by a PUT. - AllowCreateOnUpdate() bool - // PrepareForUpdate is invoked on update before validation to normalize - // the object. For example: remove fields that are not to be persisted, - // sort order-insensitive list fields, etc. This should not remove fields - // whose presence would be considered a validation error. - PrepareForUpdate(ctx api.Context, obj, old runtime.Object) - // ValidateUpdate is invoked after default fields in the object have been - // filled in before the object is persisted. This method should not mutate - // the object. - ValidateUpdate(ctx api.Context, obj, old runtime.Object) field.ErrorList - // Canonicalize is invoked after validation has succeeded but before the - // object has been persisted. This method may mutate the object. - Canonicalize(obj runtime.Object) - // AllowUnconditionalUpdate returns true if the object can be updated - // unconditionally (irrespective of the latest resource version), when - // there is no resource version specified in the object. - AllowUnconditionalUpdate() bool -} - -// TODO: add other common fields that require global validation. -func validateCommonFields(obj, old runtime.Object) (field.ErrorList, error) { - allErrs := field.ErrorList{} - objectMeta, err := api.ObjectMetaFor(obj) - if err != nil { - return nil, fmt.Errorf("failed to get new object metadata: %v", err) - } - oldObjectMeta, err := api.ObjectMetaFor(old) - if err != nil { - return nil, fmt.Errorf("failed to get old object metadata: %v", err) - } - allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(objectMeta, oldObjectMeta, field.NewPath("metadata"))...) - - return allErrs, nil -} - -// BeforeUpdate ensures that common operations for all resources are performed on update. It only returns -// errors that can be converted to api.Status. It will invoke update validation with the provided existing -// and updated objects. -func BeforeUpdate(strategy RESTUpdateStrategy, ctx api.Context, obj, old runtime.Object) error { - objectMeta, kind, kerr := objectMetaAndKind(strategy, obj) - if kerr != nil { - return kerr - } - if strategy.NamespaceScoped() { - if !api.ValidNamespace(ctx, objectMeta) { - return errors.NewBadRequest("the namespace of the provided object does not match the namespace sent on the request") - } - } else { - objectMeta.Namespace = api.NamespaceNone - } - // Ensure requests cannot update generation - oldMeta, err := api.ObjectMetaFor(old) - if err != nil { - return err - } - objectMeta.Generation = oldMeta.Generation - - strategy.PrepareForUpdate(ctx, obj, old) - - // ClusterName is ignored and should not be saved - objectMeta.ClusterName = "" - - // Ensure some common fields, like UID, are validated for all resources. - errs, err := validateCommonFields(obj, old) - if err != nil { - return errors.NewInternalError(err) - } - - errs = append(errs, strategy.ValidateUpdate(ctx, obj, old)...) - if len(errs) > 0 { - return errors.NewInvalid(kind.GroupKind(), objectMeta.Name, errs) - } - - strategy.Canonicalize(obj) - - return nil -} - -// TransformFunc is a function to transform and return newObj -type TransformFunc func(ctx api.Context, newObj runtime.Object, oldObj runtime.Object) (transformedNewObj runtime.Object, err error) - -// defaultUpdatedObjectInfo implements UpdatedObjectInfo -type defaultUpdatedObjectInfo struct { - // obj is the updated object - obj runtime.Object - - // copier makes a copy of the object before returning it. - // this allows repeated calls to UpdatedObject() to return - // pristine data, even if the returned value is mutated. - copier runtime.ObjectCopier - - // transformers is an optional list of transforming functions that modify or - // replace obj using information from the context, old object, or other sources. - transformers []TransformFunc -} - -// DefaultUpdatedObjectInfo returns an UpdatedObjectInfo impl based on the specified object. -func DefaultUpdatedObjectInfo(obj runtime.Object, copier runtime.ObjectCopier, transformers ...TransformFunc) UpdatedObjectInfo { - return &defaultUpdatedObjectInfo{obj, copier, transformers} -} - -// Preconditions satisfies the UpdatedObjectInfo interface. -func (i *defaultUpdatedObjectInfo) Preconditions() *api.Preconditions { - // Attempt to get the UID out of the object - accessor, err := meta.Accessor(i.obj) - if err != nil { - // If no UID can be read, no preconditions are possible - return nil - } - - // If empty, no preconditions needed - uid := accessor.GetUID() - if len(uid) == 0 { - return nil - } - - return &api.Preconditions{UID: &uid} -} - -// UpdatedObject satisfies the UpdatedObjectInfo interface. -// It returns a copy of the held obj, passed through any configured transformers. -func (i *defaultUpdatedObjectInfo) UpdatedObject(ctx api.Context, oldObj runtime.Object) (runtime.Object, error) { - var err error - // Start with the configured object - newObj := i.obj - - // If the original is non-nil (might be nil if the first transformer builds the object from the oldObj), make a copy, - // so we don't return the original. BeforeUpdate can mutate the returned object, doing things like clearing ResourceVersion. - // If we're re-called, we need to be able to return the pristine version. - if newObj != nil { - newObj, err = i.copier.Copy(newObj) - if err != nil { - return nil, err - } - } - - // Allow any configured transformers to update the new object - for _, transformer := range i.transformers { - newObj, err = transformer(ctx, newObj, oldObj) - if err != nil { - return nil, err - } - } - - return newObj, nil -} - -// wrappedUpdatedObjectInfo allows wrapping an existing objInfo and -// chaining additional transformations/checks on the result of UpdatedObject() -type wrappedUpdatedObjectInfo struct { - // obj is the updated object - objInfo UpdatedObjectInfo - - // transformers is an optional list of transforming functions that modify or - // replace obj using information from the context, old object, or other sources. - transformers []TransformFunc -} - -// WrapUpdatedObjectInfo returns an UpdatedObjectInfo impl that delegates to -// the specified objInfo, then calls the passed transformers -func WrapUpdatedObjectInfo(objInfo UpdatedObjectInfo, transformers ...TransformFunc) UpdatedObjectInfo { - return &wrappedUpdatedObjectInfo{objInfo, transformers} -} - -// Preconditions satisfies the UpdatedObjectInfo interface. -func (i *wrappedUpdatedObjectInfo) Preconditions() *api.Preconditions { - return i.objInfo.Preconditions() -} - -// UpdatedObject satisfies the UpdatedObjectInfo interface. -// It delegates to the wrapped objInfo and passes the result through any configured transformers. -func (i *wrappedUpdatedObjectInfo) UpdatedObject(ctx api.Context, oldObj runtime.Object) (runtime.Object, error) { - newObj, err := i.objInfo.UpdatedObject(ctx, oldObj) - if err != nil { - return newObj, err - } - - // Allow any configured transformers to update the new object or error - for _, transformer := range i.transformers { - newObj, err = transformer(ctx, newObj, oldObj) - if err != nil { - return nil, err - } - } - - return newObj, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/service/BUILD b/vendor/k8s.io/kubernetes/pkg/api/service/BUILD deleted file mode 100644 index e97b7e1aae..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/service/BUILD +++ /dev/null @@ -1,36 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "annotations.go", - "util.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/util/net/sets:go_default_library", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = ["util_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/util/net/sets:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/service/annotations.go b/vendor/k8s.io/kubernetes/pkg/api/service/annotations.go deleted file mode 100644 index 4b45549983..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/service/annotations.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package service - -import ( - "strconv" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" -) - -const ( - // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers - // - // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to - // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow - // access only from the CIDRs currently allocated to MIT & the USPS. - // - // Not all cloud providers support this annotation, though AWS & GCE do. - AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" - - // AnnotationValueExternalTrafficLocal Value of annotation to specify local endpoints behaviour - AnnotationValueExternalTrafficLocal = "OnlyLocal" - // AnnotationValueExternalTrafficGlobal Value of annotation to specify global (legacy) behaviour - AnnotationValueExternalTrafficGlobal = "Global" - - // TODO: The alpha annotations have been deprecated, remove them when we move this feature to GA. - - // AlphaAnnotationHealthCheckNodePort Annotation specifying the healthcheck nodePort for the service - // If not specified, annotation is created by the service api backend with the allocated nodePort - // Will use user-specified nodePort value if specified by the client - AlphaAnnotationHealthCheckNodePort = "service.alpha.kubernetes.io/healthcheck-nodeport" - - // AlphaAnnotationExternalTraffic An annotation that denotes if this Service desires to route external traffic to local - // endpoints only. This preserves Source IP and avoids a second hop. - AlphaAnnotationExternalTraffic = "service.alpha.kubernetes.io/external-traffic" - - // BetaAnnotationHealthCheckNodePort is the beta version of AlphaAnnotationHealthCheckNodePort. - BetaAnnotationHealthCheckNodePort = "service.beta.kubernetes.io/healthcheck-nodeport" - - // BetaAnnotationExternalTraffic is the beta version of AlphaAnnotationExternalTraffic. - BetaAnnotationExternalTraffic = "service.beta.kubernetes.io/external-traffic" -) - -// NeedsHealthCheck Check service for health check annotations -func NeedsHealthCheck(service *api.Service) bool { - // First check the alpha annotation and then the beta. This is so existing - // Services continue to work till the user decides to transition to beta. - // If they transition to beta, there's no way to go back to alpha without - // rolling back the cluster. - for _, annotation := range []string{AlphaAnnotationExternalTraffic, BetaAnnotationExternalTraffic} { - if l, ok := service.Annotations[annotation]; ok { - if l == AnnotationValueExternalTrafficLocal { - return true - } else if l == AnnotationValueExternalTrafficGlobal { - return false - } else { - glog.Errorf("Invalid value for annotation %v: %v", annotation, l) - } - } - } - return false -} - -// GetServiceHealthCheckNodePort Return health check node port annotation for service, if one exists -func GetServiceHealthCheckNodePort(service *api.Service) int32 { - if !NeedsHealthCheck(service) { - return 0 - } - // First check the alpha annotation and then the beta. This is so existing - // Services continue to work till the user decides to transition to beta. - // If they transition to beta, there's no way to go back to alpha without - // rolling back the cluster. - for _, annotation := range []string{AlphaAnnotationHealthCheckNodePort, BetaAnnotationHealthCheckNodePort} { - if l, ok := service.Annotations[annotation]; ok { - p, err := strconv.Atoi(l) - if err != nil { - glog.Errorf("Failed to parse annotation %v: %v", annotation, err) - continue - } - return int32(p) - } - } - return 0 -} - -// GetServiceHealthCheckPathPort Return the path and nodePort programmed into the Cloud LB Health Check -func GetServiceHealthCheckPathPort(service *api.Service) (string, int32) { - if !NeedsHealthCheck(service) { - return "", 0 - } - port := GetServiceHealthCheckNodePort(service) - if port == 0 { - return "", 0 - } - return "/healthz", port -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/service/util.go b/vendor/k8s.io/kubernetes/pkg/api/service/util.go deleted file mode 100644 index 6f0e14e2bd..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/service/util.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package service - -import ( - "fmt" - "strings" - - "k8s.io/kubernetes/pkg/api" - netsets "k8s.io/kubernetes/pkg/util/net/sets" -) - -const ( - defaultLoadBalancerSourceRanges = "0.0.0.0/0" -) - -// IsAllowAll checks whether the netsets.IPNet allows traffic from 0.0.0.0/0 -func IsAllowAll(ipnets netsets.IPNet) bool { - for _, s := range ipnets.StringSlice() { - if s == "0.0.0.0/0" { - return true - } - } - return false -} - -// GetLoadBalancerSourceRanges first try to parse and verify LoadBalancerSourceRanges field from a service. -// If the field is not specified, turn to parse and verify the AnnotationLoadBalancerSourceRangesKey annotation from a service, -// extracting the source ranges to allow, and if not present returns a default (allow-all) value. -func GetLoadBalancerSourceRanges(service *api.Service) (netsets.IPNet, error) { - var ipnets netsets.IPNet - var err error - // if SourceRange field is specified, ignore sourceRange annotation - if len(service.Spec.LoadBalancerSourceRanges) > 0 { - specs := service.Spec.LoadBalancerSourceRanges - ipnets, err = netsets.ParseIPNets(specs...) - - if err != nil { - return nil, fmt.Errorf("service.Spec.LoadBalancerSourceRanges: %v is not valid. Expecting a list of IP ranges. For example, 10.0.0.0/24. Error msg: %v", specs, err) - } - } else { - val := service.Annotations[AnnotationLoadBalancerSourceRangesKey] - val = strings.TrimSpace(val) - if val == "" { - val = defaultLoadBalancerSourceRanges - } - specs := strings.Split(val, ",") - ipnets, err = netsets.ParseIPNets(specs...) - if err != nil { - return nil, fmt.Errorf("%s: %s is not valid. Expecting a comma-separated list of source IP ranges. For example, 10.0.0.0/24,192.168.2.0/24", AnnotationLoadBalancerSourceRangesKey, val) - } - } - return ipnets, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/testapi/BUILD b/vendor/k8s.io/kubernetes/pkg/api/testapi/BUILD deleted file mode 100644 index 410d65a5f5..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/testapi/BUILD +++ /dev/null @@ -1,63 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["testapi.go"], - tags = ["automanaged"], - deps = [ - "//cmd/kubeadm/app/apis/kubeadm:go_default_library", - "//cmd/kubeadm/app/apis/kubeadm/install:go_default_library", - "//federation/apis/federation:go_default_library", - "//federation/apis/federation/install:go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/apps:go_default_library", - "//pkg/apis/apps/install:go_default_library", - "//pkg/apis/authentication/install:go_default_library", - "//pkg/apis/authorization:go_default_library", - "//pkg/apis/authorization/install:go_default_library", - "//pkg/apis/autoscaling:go_default_library", - "//pkg/apis/autoscaling/install:go_default_library", - "//pkg/apis/batch:go_default_library", - "//pkg/apis/batch/install:go_default_library", - "//pkg/apis/certificates:go_default_library", - "//pkg/apis/certificates/install:go_default_library", - "//pkg/apis/componentconfig/install:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/apis/extensions/install:go_default_library", - "//pkg/apis/imagepolicy:go_default_library", - "//pkg/apis/imagepolicy/install:go_default_library", - "//pkg/apis/policy:go_default_library", - "//pkg/apis/policy/install:go_default_library", - "//pkg/apis/rbac:go_default_library", - "//pkg/apis/rbac/install:go_default_library", - "//pkg/apis/storage:go_default_library", - "//pkg/apis/storage/install:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer/recognizer:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["testapi_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/runtime:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/testapi/testapi.go b/vendor/k8s.io/kubernetes/pkg/api/testapi/testapi.go deleted file mode 100644 index 50a18e34ed..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/testapi/testapi.go +++ /dev/null @@ -1,478 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package testapi provides a helper for retrieving the KUBE_TEST_API environment variable. -// -// TODO(lavalamp): this package is a huge disaster at the moment. I intend to -// refactor. All code currently using this package should change: -// 1. Declare your own registered.APIGroupRegistrationManager in your own test code. -// 2. Import the relevant install packages. -// 3. Register the types you need, from the announced.APIGroupAnnouncementManager. -package testapi - -import ( - "fmt" - "mime" - "os" - "reflect" - "strings" - - "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - "k8s.io/kubernetes/federation/apis/federation" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/authorization" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/certificates" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/imagepolicy" - "k8s.io/kubernetes/pkg/apis/policy" - "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/apis/storage" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer/recognizer" - - _ "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install" - _ "k8s.io/kubernetes/federation/apis/federation/install" - _ "k8s.io/kubernetes/pkg/api/install" - _ "k8s.io/kubernetes/pkg/apis/apps/install" - _ "k8s.io/kubernetes/pkg/apis/authentication/install" - _ "k8s.io/kubernetes/pkg/apis/authorization/install" - _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" - _ "k8s.io/kubernetes/pkg/apis/batch/install" - _ "k8s.io/kubernetes/pkg/apis/certificates/install" - _ "k8s.io/kubernetes/pkg/apis/componentconfig/install" - _ "k8s.io/kubernetes/pkg/apis/extensions/install" - _ "k8s.io/kubernetes/pkg/apis/imagepolicy/install" - _ "k8s.io/kubernetes/pkg/apis/policy/install" - _ "k8s.io/kubernetes/pkg/apis/rbac/install" - _ "k8s.io/kubernetes/pkg/apis/storage/install" -) - -var ( - Groups = make(map[string]TestGroup) - Default TestGroup - Authorization TestGroup - Autoscaling TestGroup - Batch TestGroup - Extensions TestGroup - Apps TestGroup - Policy TestGroup - Federation TestGroup - Rbac TestGroup - Certificates TestGroup - Storage TestGroup - ImagePolicy TestGroup - - serializer runtime.SerializerInfo - storageSerializer runtime.SerializerInfo -) - -type TestGroup struct { - externalGroupVersion unversioned.GroupVersion - internalGroupVersion unversioned.GroupVersion - internalTypes map[string]reflect.Type - externalTypes map[string]reflect.Type -} - -func init() { - if apiMediaType := os.Getenv("KUBE_TEST_API_TYPE"); len(apiMediaType) > 0 { - var ok bool - mediaType, _, err := mime.ParseMediaType(apiMediaType) - if err != nil { - panic(err) - } - serializer, ok = runtime.SerializerInfoForMediaType(api.Codecs.SupportedMediaTypes(), mediaType) - if !ok { - panic(fmt.Sprintf("no serializer for %s", apiMediaType)) - } - } - - if storageMediaType := StorageMediaType(); len(storageMediaType) > 0 { - var ok bool - mediaType, _, err := mime.ParseMediaType(storageMediaType) - if err != nil { - panic(err) - } - storageSerializer, ok = runtime.SerializerInfoForMediaType(api.Codecs.SupportedMediaTypes(), mediaType) - if !ok { - panic(fmt.Sprintf("no serializer for %s", storageMediaType)) - } - } - - kubeTestAPI := os.Getenv("KUBE_TEST_API") - if len(kubeTestAPI) != 0 { - // priority is "first in list preferred", so this has to run in reverse order - testGroupVersions := strings.Split(kubeTestAPI, ",") - for i := len(testGroupVersions) - 1; i >= 0; i-- { - gvString := testGroupVersions[i] - groupVersion, err := unversioned.ParseGroupVersion(gvString) - if err != nil { - panic(fmt.Sprintf("Error parsing groupversion %v: %v", gvString, err)) - } - - internalGroupVersion := unversioned.GroupVersion{Group: groupVersion.Group, Version: runtime.APIVersionInternal} - Groups[groupVersion.Group] = TestGroup{ - externalGroupVersion: groupVersion, - internalGroupVersion: internalGroupVersion, - internalTypes: api.Scheme.KnownTypes(internalGroupVersion), - externalTypes: api.Scheme.KnownTypes(groupVersion), - } - } - } - - if _, ok := Groups[api.GroupName]; !ok { - externalGroupVersion := unversioned.GroupVersion{Group: api.GroupName, Version: registered.GroupOrDie(api.GroupName).GroupVersion.Version} - Groups[api.GroupName] = TestGroup{ - externalGroupVersion: externalGroupVersion, - internalGroupVersion: api.SchemeGroupVersion, - internalTypes: api.Scheme.KnownTypes(api.SchemeGroupVersion), - externalTypes: api.Scheme.KnownTypes(externalGroupVersion), - } - } - if _, ok := Groups[extensions.GroupName]; !ok { - externalGroupVersion := unversioned.GroupVersion{Group: extensions.GroupName, Version: registered.GroupOrDie(extensions.GroupName).GroupVersion.Version} - Groups[extensions.GroupName] = TestGroup{ - externalGroupVersion: externalGroupVersion, - internalGroupVersion: extensions.SchemeGroupVersion, - internalTypes: api.Scheme.KnownTypes(extensions.SchemeGroupVersion), - externalTypes: api.Scheme.KnownTypes(externalGroupVersion), - } - } - if _, ok := Groups[autoscaling.GroupName]; !ok { - internalTypes := make(map[string]reflect.Type) - for k, t := range api.Scheme.KnownTypes(extensions.SchemeGroupVersion) { - if k == "Scale" { - continue - } - internalTypes[k] = t - } - externalGroupVersion := unversioned.GroupVersion{Group: autoscaling.GroupName, Version: registered.GroupOrDie(autoscaling.GroupName).GroupVersion.Version} - Groups[autoscaling.GroupName] = TestGroup{ - externalGroupVersion: externalGroupVersion, - internalGroupVersion: extensions.SchemeGroupVersion, - internalTypes: internalTypes, - externalTypes: api.Scheme.KnownTypes(externalGroupVersion), - } - } - if _, ok := Groups[autoscaling.GroupName+"IntraGroup"]; !ok { - internalTypes := make(map[string]reflect.Type) - for k, t := range api.Scheme.KnownTypes(extensions.SchemeGroupVersion) { - if k == "Scale" { - internalTypes[k] = t - break - } - } - externalGroupVersion := unversioned.GroupVersion{Group: autoscaling.GroupName, Version: registered.GroupOrDie(autoscaling.GroupName).GroupVersion.Version} - Groups[autoscaling.GroupName] = TestGroup{ - externalGroupVersion: externalGroupVersion, - internalGroupVersion: autoscaling.SchemeGroupVersion, - internalTypes: internalTypes, - externalTypes: api.Scheme.KnownTypes(externalGroupVersion), - } - } - if _, ok := Groups[batch.GroupName]; !ok { - externalGroupVersion := unversioned.GroupVersion{Group: batch.GroupName, Version: registered.GroupOrDie(batch.GroupName).GroupVersion.Version} - Groups[batch.GroupName] = TestGroup{ - externalGroupVersion: externalGroupVersion, - internalGroupVersion: batch.SchemeGroupVersion, - internalTypes: api.Scheme.KnownTypes(batch.SchemeGroupVersion), - externalTypes: api.Scheme.KnownTypes(externalGroupVersion), - } - } - if _, ok := Groups[apps.GroupName]; !ok { - externalGroupVersion := unversioned.GroupVersion{Group: apps.GroupName, Version: registered.GroupOrDie(apps.GroupName).GroupVersion.Version} - Groups[apps.GroupName] = TestGroup{ - externalGroupVersion: externalGroupVersion, - internalGroupVersion: extensions.SchemeGroupVersion, - internalTypes: api.Scheme.KnownTypes(extensions.SchemeGroupVersion), - externalTypes: api.Scheme.KnownTypes(externalGroupVersion), - } - } - if _, ok := Groups[policy.GroupName]; !ok { - externalGroupVersion := unversioned.GroupVersion{Group: policy.GroupName, Version: registered.GroupOrDie(policy.GroupName).GroupVersion.Version} - Groups[policy.GroupName] = TestGroup{ - externalGroupVersion: externalGroupVersion, - internalGroupVersion: policy.SchemeGroupVersion, - internalTypes: api.Scheme.KnownTypes(policy.SchemeGroupVersion), - externalTypes: api.Scheme.KnownTypes(externalGroupVersion), - } - } - if _, ok := Groups[federation.GroupName]; !ok { - externalGroupVersion := unversioned.GroupVersion{Group: federation.GroupName, Version: registered.GroupOrDie(federation.GroupName).GroupVersion.Version} - Groups[federation.GroupName] = TestGroup{ - externalGroupVersion: externalGroupVersion, - internalGroupVersion: federation.SchemeGroupVersion, - internalTypes: api.Scheme.KnownTypes(federation.SchemeGroupVersion), - externalTypes: api.Scheme.KnownTypes(externalGroupVersion), - } - } - if _, ok := Groups[rbac.GroupName]; !ok { - externalGroupVersion := unversioned.GroupVersion{Group: rbac.GroupName, Version: registered.GroupOrDie(rbac.GroupName).GroupVersion.Version} - Groups[rbac.GroupName] = TestGroup{ - externalGroupVersion: externalGroupVersion, - internalGroupVersion: rbac.SchemeGroupVersion, - internalTypes: api.Scheme.KnownTypes(rbac.SchemeGroupVersion), - externalTypes: api.Scheme.KnownTypes(externalGroupVersion), - } - } - if _, ok := Groups[storage.GroupName]; !ok { - externalGroupVersion := unversioned.GroupVersion{Group: storage.GroupName, Version: registered.GroupOrDie(storage.GroupName).GroupVersion.Version} - Groups[storage.GroupName] = TestGroup{ - externalGroupVersion: externalGroupVersion, - internalGroupVersion: storage.SchemeGroupVersion, - internalTypes: api.Scheme.KnownTypes(storage.SchemeGroupVersion), - externalTypes: api.Scheme.KnownTypes(externalGroupVersion), - } - } - if _, ok := Groups[certificates.GroupName]; !ok { - externalGroupVersion := unversioned.GroupVersion{Group: certificates.GroupName, Version: registered.GroupOrDie(certificates.GroupName).GroupVersion.Version} - Groups[certificates.GroupName] = TestGroup{ - externalGroupVersion: externalGroupVersion, - internalGroupVersion: certificates.SchemeGroupVersion, - internalTypes: api.Scheme.KnownTypes(certificates.SchemeGroupVersion), - externalTypes: api.Scheme.KnownTypes(externalGroupVersion), - } - } - if _, ok := Groups[imagepolicy.GroupName]; !ok { - externalGroupVersion := unversioned.GroupVersion{Group: imagepolicy.GroupName, Version: registered.GroupOrDie(imagepolicy.GroupName).GroupVersion.Version} - Groups[imagepolicy.GroupName] = TestGroup{ - externalGroupVersion: externalGroupVersion, - internalGroupVersion: imagepolicy.SchemeGroupVersion, - internalTypes: api.Scheme.KnownTypes(imagepolicy.SchemeGroupVersion), - externalTypes: api.Scheme.KnownTypes(externalGroupVersion), - } - } - if _, ok := Groups[authorization.GroupName]; !ok { - externalGroupVersion := unversioned.GroupVersion{Group: authorization.GroupName, Version: registered.GroupOrDie(authorization.GroupName).GroupVersion.Version} - Groups[authorization.GroupName] = TestGroup{ - externalGroupVersion: externalGroupVersion, - internalGroupVersion: authorization.SchemeGroupVersion, - internalTypes: api.Scheme.KnownTypes(authorization.SchemeGroupVersion), - externalTypes: api.Scheme.KnownTypes(externalGroupVersion), - } - } - if _, ok := Groups[kubeadm.GroupName]; !ok { - externalGroupVersion := unversioned.GroupVersion{Group: kubeadm.GroupName, Version: registered.GroupOrDie(kubeadm.GroupName).GroupVersion.Version} - Groups[kubeadm.GroupName] = TestGroup{ - externalGroupVersion: externalGroupVersion, - internalGroupVersion: kubeadm.SchemeGroupVersion, - internalTypes: api.Scheme.KnownTypes(kubeadm.SchemeGroupVersion), - externalTypes: api.Scheme.KnownTypes(externalGroupVersion), - } - } - - Default = Groups[api.GroupName] - Autoscaling = Groups[autoscaling.GroupName] - Batch = Groups[batch.GroupName] - Apps = Groups[apps.GroupName] - Policy = Groups[policy.GroupName] - Certificates = Groups[certificates.GroupName] - Extensions = Groups[extensions.GroupName] - Federation = Groups[federation.GroupName] - Rbac = Groups[rbac.GroupName] - Storage = Groups[storage.GroupName] - ImagePolicy = Groups[imagepolicy.GroupName] - Authorization = Groups[authorization.GroupName] -} - -func (g TestGroup) ContentConfig() (string, *unversioned.GroupVersion, runtime.Codec) { - return "application/json", g.GroupVersion(), g.Codec() -} - -func (g TestGroup) GroupVersion() *unversioned.GroupVersion { - copyOfGroupVersion := g.externalGroupVersion - return ©OfGroupVersion -} - -// InternalGroupVersion returns the group,version used to identify the internal -// types for this API -func (g TestGroup) InternalGroupVersion() unversioned.GroupVersion { - return g.internalGroupVersion -} - -// InternalTypes returns a map of internal API types' kind names to their Go types. -func (g TestGroup) InternalTypes() map[string]reflect.Type { - return g.internalTypes -} - -// ExternalTypes returns a map of external API types' kind names to their Go types. -func (g TestGroup) ExternalTypes() map[string]reflect.Type { - return g.externalTypes -} - -// Codec returns the codec for the API version to test against, as set by the -// KUBE_TEST_API_TYPE env var. -func (g TestGroup) Codec() runtime.Codec { - if serializer.Serializer == nil { - return api.Codecs.LegacyCodec(g.externalGroupVersion) - } - return api.Codecs.CodecForVersions(serializer.Serializer, api.Codecs.UniversalDeserializer(), unversioned.GroupVersions{g.externalGroupVersion}, nil) -} - -// NegotiatedSerializer returns the negotiated serializer for the server. -func (g TestGroup) NegotiatedSerializer() runtime.NegotiatedSerializer { - return api.Codecs -} - -func StorageMediaType() string { - return os.Getenv("KUBE_TEST_API_STORAGE_TYPE") -} - -// StorageCodec returns the codec for the API version to store in etcd, as set by the -// KUBE_TEST_API_STORAGE_TYPE env var. -func (g TestGroup) StorageCodec() runtime.Codec { - s := storageSerializer.Serializer - - if s == nil { - return api.Codecs.LegacyCodec(g.externalGroupVersion) - } - - // etcd2 only supports string data - we must wrap any result before returning - // TODO: remove for etcd3 / make parameterizable - if !storageSerializer.EncodesAsText { - s = runtime.NewBase64Serializer(s) - } - ds := recognizer.NewDecoder(s, api.Codecs.UniversalDeserializer()) - - return api.Codecs.CodecForVersions(s, ds, unversioned.GroupVersions{g.externalGroupVersion}, nil) -} - -// Converter returns the api.Scheme for the API version to test against, as set by the -// KUBE_TEST_API env var. -func (g TestGroup) Converter() runtime.ObjectConvertor { - interfaces, err := registered.GroupOrDie(g.externalGroupVersion.Group).InterfacesFor(g.externalGroupVersion) - if err != nil { - panic(err) - } - return interfaces.ObjectConvertor -} - -// MetadataAccessor returns the MetadataAccessor for the API version to test against, -// as set by the KUBE_TEST_API env var. -func (g TestGroup) MetadataAccessor() meta.MetadataAccessor { - interfaces, err := registered.GroupOrDie(g.externalGroupVersion.Group).InterfacesFor(g.externalGroupVersion) - if err != nil { - panic(err) - } - return interfaces.MetadataAccessor -} - -// SelfLink returns a self link that will appear to be for the version Version(). -// 'resource' should be the resource path, e.g. "pods" for the Pod type. 'name' should be -// empty for lists. -func (g TestGroup) SelfLink(resource, name string) string { - if g.externalGroupVersion.Group == api.GroupName { - if name == "" { - return fmt.Sprintf("/api/%s/%s", g.externalGroupVersion.Version, resource) - } - return fmt.Sprintf("/api/%s/%s/%s", g.externalGroupVersion.Version, resource, name) - } else { - // TODO: will need a /apis prefix once we have proper multi-group - // support - if name == "" { - return fmt.Sprintf("/apis/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource) - } - return fmt.Sprintf("/apis/%s/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource, name) - } -} - -// Returns the appropriate path for the given prefix (watch, proxy, redirect, etc), resource, namespace and name. -// For ex, this is of the form: -// /api/v1/watch/namespaces/foo/pods/pod0 for v1. -func (g TestGroup) ResourcePathWithPrefix(prefix, resource, namespace, name string) string { - var path string - if g.externalGroupVersion.Group == api.GroupName { - path = "/api/" + g.externalGroupVersion.Version - } else { - // TODO: switch back once we have proper multiple group support - // path = "/apis/" + g.Group + "/" + Version(group...) - path = "/apis/" + g.externalGroupVersion.Group + "/" + g.externalGroupVersion.Version - } - - if prefix != "" { - path = path + "/" + prefix - } - if namespace != "" { - path = path + "/namespaces/" + namespace - } - // Resource names are lower case. - resource = strings.ToLower(resource) - if resource != "" { - path = path + "/" + resource - } - if name != "" { - path = path + "/" + name - } - return path -} - -// Returns the appropriate path for the given resource, namespace and name. -// For example, this is of the form: -// /api/v1/namespaces/foo/pods/pod0 for v1. -func (g TestGroup) ResourcePath(resource, namespace, name string) string { - return g.ResourcePathWithPrefix("", resource, namespace, name) -} - -func (g TestGroup) RESTMapper() meta.RESTMapper { - return registered.RESTMapper() -} - -// ExternalGroupVersions returns all external group versions allowed for the server. -func ExternalGroupVersions() unversioned.GroupVersions { - versions := []unversioned.GroupVersion{} - for _, g := range Groups { - gv := g.GroupVersion() - versions = append(versions, *gv) - } - return versions -} - -// Get codec based on runtime.Object -func GetCodecForObject(obj runtime.Object) (runtime.Codec, error) { - kinds, _, err := api.Scheme.ObjectKinds(obj) - if err != nil { - return nil, fmt.Errorf("unexpected encoding error: %v", err) - } - kind := kinds[0] - - for _, group := range Groups { - if group.GroupVersion().Group != kind.Group { - continue - } - - if api.Scheme.Recognizes(kind) { - return group.Codec(), nil - } - } - // Codec used for unversioned types - if api.Scheme.Recognizes(kind) { - serializer, ok := runtime.SerializerInfoForMediaType(api.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON) - if !ok { - return nil, fmt.Errorf("no serializer registered for json") - } - return serializer.Serializer, nil - } - return nil, fmt.Errorf("unexpected kind: %v", kind) -} - -func NewTestGroup(external, internal unversioned.GroupVersion, internalTypes map[string]reflect.Type, externalTypes map[string]reflect.Type) TestGroup { - return TestGroup{external, internal, internalTypes, externalTypes} -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/types.generated.go b/vendor/k8s.io/kubernetes/pkg/api/types.generated.go deleted file mode 100644 index 82cbe1f269..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/types.generated.go +++ /dev/null @@ -1,63626 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package api - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg3_resource "k8s.io/kubernetes/pkg/api/resource" - pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg6_fields "k8s.io/kubernetes/pkg/fields" - pkg5_labels "k8s.io/kubernetes/pkg/labels" - pkg7_runtime "k8s.io/kubernetes/pkg/runtime" - pkg1_types "k8s.io/kubernetes/pkg/types" - pkg4_intstr "k8s.io/kubernetes/pkg/util/intstr" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg3_resource.Quantity - var v1 pkg2_unversioned.Time - var v2 pkg6_fields.Selector - var v3 pkg5_labels.Selector - var v4 pkg7_runtime.Object - var v5 pkg1_types.UID - var v6 pkg4_intstr.IntOrString - var v7 time.Time - _, _, _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5, v6, v7 - } -} - -func (x *ObjectMeta) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [15]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - yyq2[1] = x.GenerateName != "" - yyq2[2] = x.Namespace != "" - yyq2[3] = x.SelfLink != "" - yyq2[4] = x.UID != "" - yyq2[5] = x.ResourceVersion != "" - yyq2[6] = x.Generation != 0 - yyq2[7] = true - yyq2[8] = x.DeletionTimestamp != nil - yyq2[9] = x.DeletionGracePeriodSeconds != nil - yyq2[10] = len(x.Labels) != 0 - yyq2[11] = len(x.Annotations) != 0 - yyq2[12] = len(x.OwnerReferences) != 0 - yyq2[13] = len(x.Finalizers) != 0 - yyq2[14] = x.ClusterName != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(15) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("generateName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selfLink")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeInt(int64(x.Generation)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("generation")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeInt(int64(x.Generation)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yy25 := &x.CreationTimestamp - yym26 := z.EncBinary() - _ = yym26 - if false { - } else if z.HasExtensions() && z.EncExt(yy25) { - } else if yym26 { - z.EncBinaryMarshal(yy25) - } else if !yym26 && z.IsJSONHandle() { - z.EncJSONMarshal(yy25) - } else { - z.EncFallback(yy25) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("creationTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy27 := &x.CreationTimestamp - yym28 := z.EncBinary() - _ = yym28 - if false { - } else if z.HasExtensions() && z.EncExt(yy27) { - } else if yym28 { - z.EncBinaryMarshal(yy27) - } else if !yym28 && z.IsJSONHandle() { - z.EncJSONMarshal(yy27) - } else { - z.EncFallback(yy27) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.DeletionTimestamp == nil { - r.EncodeNil() - } else { - yym30 := z.EncBinary() - _ = yym30 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { - } else if yym30 { - z.EncBinaryMarshal(x.DeletionTimestamp) - } else if !yym30 && z.IsJSONHandle() { - z.EncJSONMarshal(x.DeletionTimestamp) - } else { - z.EncFallback(x.DeletionTimestamp) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletionTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DeletionTimestamp == nil { - r.EncodeNil() - } else { - yym31 := z.EncBinary() - _ = yym31 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { - } else if yym31 { - z.EncBinaryMarshal(x.DeletionTimestamp) - } else if !yym31 && z.IsJSONHandle() { - z.EncJSONMarshal(x.DeletionTimestamp) - } else { - z.EncFallback(x.DeletionTimestamp) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.DeletionGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy33 := *x.DeletionGracePeriodSeconds - yym34 := z.EncBinary() - _ = yym34 - if false { - } else { - r.EncodeInt(int64(yy33)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletionGracePeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DeletionGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy35 := *x.DeletionGracePeriodSeconds - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - r.EncodeInt(int64(yy35)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.Labels == nil { - r.EncodeNil() - } else { - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - z.F.EncMapStringStringV(x.Labels, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("labels")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Labels == nil { - r.EncodeNil() - } else { - yym39 := z.EncBinary() - _ = yym39 - if false { - } else { - z.F.EncMapStringStringV(x.Labels, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.Annotations == nil { - r.EncodeNil() - } else { - yym41 := z.EncBinary() - _ = yym41 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("annotations")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Annotations == nil { - r.EncodeNil() - } else { - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.OwnerReferences == nil { - r.EncodeNil() - } else { - yym44 := z.EncBinary() - _ = yym44 - if false { - } else { - h.encSliceOwnerReference(([]OwnerReference)(x.OwnerReferences), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ownerReferences")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.OwnerReferences == nil { - r.EncodeNil() - } else { - yym45 := z.EncBinary() - _ = yym45 - if false { - } else { - h.encSliceOwnerReference(([]OwnerReference)(x.OwnerReferences), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym47 := z.EncBinary() - _ = yym47 - if false { - } else { - z.F.EncSliceStringV(x.Finalizers, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("finalizers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym48 := z.EncBinary() - _ = yym48 - if false { - } else { - z.F.EncSliceStringV(x.Finalizers, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - yym50 := z.EncBinary() - _ = yym50 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym51 := z.EncBinary() - _ = yym51 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ObjectMeta) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym52 := z.DecBinary() - _ = yym52 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct53 := r.ContainerType() - if yyct53 == codecSelferValueTypeMap1234 { - yyl53 := r.ReadMapStart() - if yyl53 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl53, d) - } - } else if yyct53 == codecSelferValueTypeArray1234 { - yyl53 := r.ReadArrayStart() - if yyl53 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl53, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ObjectMeta) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys54Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys54Slc - var yyhl54 bool = l >= 0 - for yyj54 := 0; ; yyj54++ { - if yyhl54 { - if yyj54 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys54Slc = r.DecodeBytes(yys54Slc, true, true) - yys54 := string(yys54Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys54 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "generateName": - if r.TryDecodeAsNil() { - x.GenerateName = "" - } else { - x.GenerateName = string(r.DecodeString()) - } - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - case "selfLink": - if r.TryDecodeAsNil() { - x.SelfLink = "" - } else { - x.SelfLink = string(r.DecodeString()) - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg1_types.UID(r.DecodeString()) - } - case "resourceVersion": - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - case "generation": - if r.TryDecodeAsNil() { - x.Generation = 0 - } else { - x.Generation = int64(r.DecodeInt(64)) - } - case "creationTimestamp": - if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg2_unversioned.Time{} - } else { - yyv62 := &x.CreationTimestamp - yym63 := z.DecBinary() - _ = yym63 - if false { - } else if z.HasExtensions() && z.DecExt(yyv62) { - } else if yym63 { - z.DecBinaryUnmarshal(yyv62) - } else if !yym63 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv62) - } else { - z.DecFallback(yyv62, false) - } - } - case "deletionTimestamp": - if r.TryDecodeAsNil() { - if x.DeletionTimestamp != nil { - x.DeletionTimestamp = nil - } - } else { - if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg2_unversioned.Time) - } - yym65 := z.DecBinary() - _ = yym65 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym65 { - z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym65 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.DeletionTimestamp) - } else { - z.DecFallback(x.DeletionTimestamp, false) - } - } - case "deletionGracePeriodSeconds": - if r.TryDecodeAsNil() { - if x.DeletionGracePeriodSeconds != nil { - x.DeletionGracePeriodSeconds = nil - } - } else { - if x.DeletionGracePeriodSeconds == nil { - x.DeletionGracePeriodSeconds = new(int64) - } - yym67 := z.DecBinary() - _ = yym67 - if false { - } else { - *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - case "labels": - if r.TryDecodeAsNil() { - x.Labels = nil - } else { - yyv68 := &x.Labels - yym69 := z.DecBinary() - _ = yym69 - if false { - } else { - z.F.DecMapStringStringX(yyv68, false, d) - } - } - case "annotations": - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv70 := &x.Annotations - yym71 := z.DecBinary() - _ = yym71 - if false { - } else { - z.F.DecMapStringStringX(yyv70, false, d) - } - } - case "ownerReferences": - if r.TryDecodeAsNil() { - x.OwnerReferences = nil - } else { - yyv72 := &x.OwnerReferences - yym73 := z.DecBinary() - _ = yym73 - if false { - } else { - h.decSliceOwnerReference((*[]OwnerReference)(yyv72), d) - } - } - case "finalizers": - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv74 := &x.Finalizers - yym75 := z.DecBinary() - _ = yym75 - if false { - } else { - z.F.DecSliceStringX(yyv74, false, d) - } - } - case "clusterName": - if r.TryDecodeAsNil() { - x.ClusterName = "" - } else { - x.ClusterName = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys54) - } // end switch yys54 - } // end for yyj54 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj77 int - var yyb77 bool - var yyhl77 bool = l >= 0 - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.GenerateName = "" - } else { - x.GenerateName = string(r.DecodeString()) - } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SelfLink = "" - } else { - x.SelfLink = string(r.DecodeString()) - } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg1_types.UID(r.DecodeString()) - } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Generation = 0 - } else { - x.Generation = int64(r.DecodeInt(64)) - } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg2_unversioned.Time{} - } else { - yyv85 := &x.CreationTimestamp - yym86 := z.DecBinary() - _ = yym86 - if false { - } else if z.HasExtensions() && z.DecExt(yyv85) { - } else if yym86 { - z.DecBinaryUnmarshal(yyv85) - } else if !yym86 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv85) - } else { - z.DecFallback(yyv85, false) - } - } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeletionTimestamp != nil { - x.DeletionTimestamp = nil - } - } else { - if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg2_unversioned.Time) - } - yym88 := z.DecBinary() - _ = yym88 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym88 { - z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym88 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.DeletionTimestamp) - } else { - z.DecFallback(x.DeletionTimestamp, false) - } - } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeletionGracePeriodSeconds != nil { - x.DeletionGracePeriodSeconds = nil - } - } else { - if x.DeletionGracePeriodSeconds == nil { - x.DeletionGracePeriodSeconds = new(int64) - } - yym90 := z.DecBinary() - _ = yym90 - if false { - } else { - *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Labels = nil - } else { - yyv91 := &x.Labels - yym92 := z.DecBinary() - _ = yym92 - if false { - } else { - z.F.DecMapStringStringX(yyv91, false, d) - } - } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv93 := &x.Annotations - yym94 := z.DecBinary() - _ = yym94 - if false { - } else { - z.F.DecMapStringStringX(yyv93, false, d) - } - } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OwnerReferences = nil - } else { - yyv95 := &x.OwnerReferences - yym96 := z.DecBinary() - _ = yym96 - if false { - } else { - h.decSliceOwnerReference((*[]OwnerReference)(yyv95), d) - } - } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv97 := &x.Finalizers - yym98 := z.DecBinary() - _ = yym98 - if false { - } else { - z.F.DecSliceStringX(yyv97, false, d) - } - } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterName = "" - } else { - x.ClusterName = string(r.DecodeString()) - } - for { - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj77-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym100 := z.EncBinary() - _ = yym100 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep101 := !z.EncBinary() - yy2arr101 := z.EncBasicHandle().StructToArray - var yyq101 [24]bool - _, _, _ = yysep101, yyq101, yy2arr101 - const yyr101 bool = false - yyq101[1] = x.VolumeSource.HostPath != nil && x.HostPath != nil - yyq101[2] = x.VolumeSource.EmptyDir != nil && x.EmptyDir != nil - yyq101[3] = x.VolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil - yyq101[4] = x.VolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil - yyq101[5] = x.VolumeSource.GitRepo != nil && x.GitRepo != nil - yyq101[6] = x.VolumeSource.Secret != nil && x.Secret != nil - yyq101[7] = x.VolumeSource.NFS != nil && x.NFS != nil - yyq101[8] = x.VolumeSource.ISCSI != nil && x.ISCSI != nil - yyq101[9] = x.VolumeSource.Glusterfs != nil && x.Glusterfs != nil - yyq101[10] = x.VolumeSource.PersistentVolumeClaim != nil && x.PersistentVolumeClaim != nil - yyq101[11] = x.VolumeSource.RBD != nil && x.RBD != nil - yyq101[12] = x.VolumeSource.Quobyte != nil && x.Quobyte != nil - yyq101[13] = x.VolumeSource.FlexVolume != nil && x.FlexVolume != nil - yyq101[14] = x.VolumeSource.Cinder != nil && x.Cinder != nil - yyq101[15] = x.VolumeSource.CephFS != nil && x.CephFS != nil - yyq101[16] = x.VolumeSource.Flocker != nil && x.Flocker != nil - yyq101[17] = x.VolumeSource.DownwardAPI != nil && x.DownwardAPI != nil - yyq101[18] = x.VolumeSource.FC != nil && x.FC != nil - yyq101[19] = x.VolumeSource.AzureFile != nil && x.AzureFile != nil - yyq101[20] = x.VolumeSource.ConfigMap != nil && x.ConfigMap != nil - yyq101[21] = x.VolumeSource.VsphereVolume != nil && x.VsphereVolume != nil - yyq101[22] = x.VolumeSource.AzureDisk != nil && x.AzureDisk != nil - yyq101[23] = x.VolumeSource.PhotonPersistentDisk != nil && x.PhotonPersistentDisk != nil - var yynn101 int - if yyr101 || yy2arr101 { - r.EncodeArrayStart(24) - } else { - yynn101 = 1 - for _, b := range yyq101 { - if b { - yynn101++ - } - } - r.EncodeMapStart(yynn101) - yynn101 = 0 - } - if yyr101 || yy2arr101 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym103 := z.EncBinary() - _ = yym103 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym104 := z.EncBinary() - _ = yym104 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - var yyn105 bool - if x.VolumeSource.HostPath == nil { - yyn105 = true - goto LABEL105 - } - LABEL105: - if yyr101 || yy2arr101 { - if yyn105 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[1] { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn105 { - r.EncodeNil() - } else { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } - } - } - var yyn106 bool - if x.VolumeSource.EmptyDir == nil { - yyn106 = true - goto LABEL106 - } - LABEL106: - if yyr101 || yy2arr101 { - if yyn106 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[2] { - if x.EmptyDir == nil { - r.EncodeNil() - } else { - x.EmptyDir.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("emptyDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn106 { - r.EncodeNil() - } else { - if x.EmptyDir == nil { - r.EncodeNil() - } else { - x.EmptyDir.CodecEncodeSelf(e) - } - } - } - } - var yyn107 bool - if x.VolumeSource.GCEPersistentDisk == nil { - yyn107 = true - goto LABEL107 - } - LABEL107: - if yyr101 || yy2arr101 { - if yyn107 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[3] { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn107 { - r.EncodeNil() - } else { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } - } - } - var yyn108 bool - if x.VolumeSource.AWSElasticBlockStore == nil { - yyn108 = true - goto LABEL108 - } - LABEL108: - if yyr101 || yy2arr101 { - if yyn108 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[4] { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn108 { - r.EncodeNil() - } else { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } - } - } - var yyn109 bool - if x.VolumeSource.GitRepo == nil { - yyn109 = true - goto LABEL109 - } - LABEL109: - if yyr101 || yy2arr101 { - if yyn109 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[5] { - if x.GitRepo == nil { - r.EncodeNil() - } else { - x.GitRepo.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gitRepo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn109 { - r.EncodeNil() - } else { - if x.GitRepo == nil { - r.EncodeNil() - } else { - x.GitRepo.CodecEncodeSelf(e) - } - } - } - } - var yyn110 bool - if x.VolumeSource.Secret == nil { - yyn110 = true - goto LABEL110 - } - LABEL110: - if yyr101 || yy2arr101 { - if yyn110 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[6] { - if x.Secret == nil { - r.EncodeNil() - } else { - x.Secret.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secret")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn110 { - r.EncodeNil() - } else { - if x.Secret == nil { - r.EncodeNil() - } else { - x.Secret.CodecEncodeSelf(e) - } - } - } - } - var yyn111 bool - if x.VolumeSource.NFS == nil { - yyn111 = true - goto LABEL111 - } - LABEL111: - if yyr101 || yy2arr101 { - if yyn111 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[7] { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn111 { - r.EncodeNil() - } else { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } - } - } - var yyn112 bool - if x.VolumeSource.ISCSI == nil { - yyn112 = true - goto LABEL112 - } - LABEL112: - if yyr101 || yy2arr101 { - if yyn112 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[8] { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsi")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn112 { - r.EncodeNil() - } else { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } - } - } - var yyn113 bool - if x.VolumeSource.Glusterfs == nil { - yyn113 = true - goto LABEL113 - } - LABEL113: - if yyr101 || yy2arr101 { - if yyn113 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[9] { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn113 { - r.EncodeNil() - } else { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } - } - } - var yyn114 bool - if x.VolumeSource.PersistentVolumeClaim == nil { - yyn114 = true - goto LABEL114 - } - LABEL114: - if yyr101 || yy2arr101 { - if yyn114 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[10] { - if x.PersistentVolumeClaim == nil { - r.EncodeNil() - } else { - x.PersistentVolumeClaim.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn114 { - r.EncodeNil() - } else { - if x.PersistentVolumeClaim == nil { - r.EncodeNil() - } else { - x.PersistentVolumeClaim.CodecEncodeSelf(e) - } - } - } - } - var yyn115 bool - if x.VolumeSource.RBD == nil { - yyn115 = true - goto LABEL115 - } - LABEL115: - if yyr101 || yy2arr101 { - if yyn115 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[11] { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rbd")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn115 { - r.EncodeNil() - } else { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } - } - } - var yyn116 bool - if x.VolumeSource.Quobyte == nil { - yyn116 = true - goto LABEL116 - } - LABEL116: - if yyr101 || yy2arr101 { - if yyn116 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[12] { - if x.Quobyte == nil { - r.EncodeNil() - } else { - x.Quobyte.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("quobyte")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn116 { - r.EncodeNil() - } else { - if x.Quobyte == nil { - r.EncodeNil() - } else { - x.Quobyte.CodecEncodeSelf(e) - } - } - } - } - var yyn117 bool - if x.VolumeSource.FlexVolume == nil { - yyn117 = true - goto LABEL117 - } - LABEL117: - if yyr101 || yy2arr101 { - if yyn117 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[13] { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn117 { - r.EncodeNil() - } else { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } - } - } - var yyn118 bool - if x.VolumeSource.Cinder == nil { - yyn118 = true - goto LABEL118 - } - LABEL118: - if yyr101 || yy2arr101 { - if yyn118 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[14] { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cinder")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn118 { - r.EncodeNil() - } else { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } - } - } - var yyn119 bool - if x.VolumeSource.CephFS == nil { - yyn119 = true - goto LABEL119 - } - LABEL119: - if yyr101 || yy2arr101 { - if yyn119 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[15] { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cephfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn119 { - r.EncodeNil() - } else { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } - } - } - var yyn120 bool - if x.VolumeSource.Flocker == nil { - yyn120 = true - goto LABEL120 - } - LABEL120: - if yyr101 || yy2arr101 { - if yyn120 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[16] { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flocker")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn120 { - r.EncodeNil() - } else { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } - } - } - var yyn121 bool - if x.VolumeSource.DownwardAPI == nil { - yyn121 = true - goto LABEL121 - } - LABEL121: - if yyr101 || yy2arr101 { - if yyn121 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[17] { - if x.DownwardAPI == nil { - r.EncodeNil() - } else { - x.DownwardAPI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[17] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn121 { - r.EncodeNil() - } else { - if x.DownwardAPI == nil { - r.EncodeNil() - } else { - x.DownwardAPI.CodecEncodeSelf(e) - } - } - } - } - var yyn122 bool - if x.VolumeSource.FC == nil { - yyn122 = true - goto LABEL122 - } - LABEL122: - if yyr101 || yy2arr101 { - if yyn122 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[18] { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[18] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fc")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn122 { - r.EncodeNil() - } else { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } - } - } - var yyn123 bool - if x.VolumeSource.AzureFile == nil { - yyn123 = true - goto LABEL123 - } - LABEL123: - if yyr101 || yy2arr101 { - if yyn123 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[19] { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[19] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn123 { - r.EncodeNil() - } else { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } - } - } - var yyn124 bool - if x.VolumeSource.ConfigMap == nil { - yyn124 = true - goto LABEL124 - } - LABEL124: - if yyr101 || yy2arr101 { - if yyn124 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[20] { - if x.ConfigMap == nil { - r.EncodeNil() - } else { - x.ConfigMap.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[20] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("configMap")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn124 { - r.EncodeNil() - } else { - if x.ConfigMap == nil { - r.EncodeNil() - } else { - x.ConfigMap.CodecEncodeSelf(e) - } - } - } - } - var yyn125 bool - if x.VolumeSource.VsphereVolume == nil { - yyn125 = true - goto LABEL125 - } - LABEL125: - if yyr101 || yy2arr101 { - if yyn125 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[21] { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[21] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn125 { - r.EncodeNil() - } else { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } - } - } - var yyn126 bool - if x.VolumeSource.AzureDisk == nil { - yyn126 = true - goto LABEL126 - } - LABEL126: - if yyr101 || yy2arr101 { - if yyn126 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[22] { - if x.AzureDisk == nil { - r.EncodeNil() - } else { - x.AzureDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[22] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn126 { - r.EncodeNil() - } else { - if x.AzureDisk == nil { - r.EncodeNil() - } else { - x.AzureDisk.CodecEncodeSelf(e) - } - } - } - } - var yyn127 bool - if x.VolumeSource.PhotonPersistentDisk == nil { - yyn127 = true - goto LABEL127 - } - LABEL127: - if yyr101 || yy2arr101 { - if yyn127 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[23] { - if x.PhotonPersistentDisk == nil { - r.EncodeNil() - } else { - x.PhotonPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq101[23] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn127 { - r.EncodeNil() - } else { - if x.PhotonPersistentDisk == nil { - r.EncodeNil() - } else { - x.PhotonPersistentDisk.CodecEncodeSelf(e) - } - } - } - } - if yyr101 || yy2arr101 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Volume) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym128 := z.DecBinary() - _ = yym128 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct129 := r.ContainerType() - if yyct129 == codecSelferValueTypeMap1234 { - yyl129 := r.ReadMapStart() - if yyl129 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl129, d) - } - } else if yyct129 == codecSelferValueTypeArray1234 { - yyl129 := r.ReadArrayStart() - if yyl129 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl129, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Volume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys130Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys130Slc - var yyhl130 bool = l >= 0 - for yyj130 := 0; ; yyj130++ { - if yyhl130 { - if yyj130 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys130Slc = r.DecodeBytes(yys130Slc, true, true) - yys130 := string(yys130Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys130 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "hostPath": - if x.VolumeSource.HostPath == nil { - x.VolumeSource.HostPath = new(HostPathVolumeSource) - } - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - case "emptyDir": - if x.VolumeSource.EmptyDir == nil { - x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource) - } - if r.TryDecodeAsNil() { - if x.EmptyDir != nil { - x.EmptyDir = nil - } - } else { - if x.EmptyDir == nil { - x.EmptyDir = new(EmptyDirVolumeSource) - } - x.EmptyDir.CodecDecodeSelf(d) - } - case "gcePersistentDisk": - if x.VolumeSource.GCEPersistentDisk == nil { - x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - case "awsElasticBlockStore": - if x.VolumeSource.AWSElasticBlockStore == nil { - x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - case "gitRepo": - if x.VolumeSource.GitRepo == nil { - x.VolumeSource.GitRepo = new(GitRepoVolumeSource) - } - if r.TryDecodeAsNil() { - if x.GitRepo != nil { - x.GitRepo = nil - } - } else { - if x.GitRepo == nil { - x.GitRepo = new(GitRepoVolumeSource) - } - x.GitRepo.CodecDecodeSelf(d) - } - case "secret": - if x.VolumeSource.Secret == nil { - x.VolumeSource.Secret = new(SecretVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Secret != nil { - x.Secret = nil - } - } else { - if x.Secret == nil { - x.Secret = new(SecretVolumeSource) - } - x.Secret.CodecDecodeSelf(d) - } - case "nfs": - if x.VolumeSource.NFS == nil { - x.VolumeSource.NFS = new(NFSVolumeSource) - } - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - case "iscsi": - if x.VolumeSource.ISCSI == nil { - x.VolumeSource.ISCSI = new(ISCSIVolumeSource) - } - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - case "glusterfs": - if x.VolumeSource.Glusterfs == nil { - x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - case "persistentVolumeClaim": - if x.VolumeSource.PersistentVolumeClaim == nil { - x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - if r.TryDecodeAsNil() { - if x.PersistentVolumeClaim != nil { - x.PersistentVolumeClaim = nil - } - } else { - if x.PersistentVolumeClaim == nil { - x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - x.PersistentVolumeClaim.CodecDecodeSelf(d) - } - case "rbd": - if x.VolumeSource.RBD == nil { - x.VolumeSource.RBD = new(RBDVolumeSource) - } - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - case "quobyte": - if x.VolumeSource.Quobyte == nil { - x.VolumeSource.Quobyte = new(QuobyteVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Quobyte != nil { - x.Quobyte = nil - } - } else { - if x.Quobyte == nil { - x.Quobyte = new(QuobyteVolumeSource) - } - x.Quobyte.CodecDecodeSelf(d) - } - case "flexVolume": - if x.VolumeSource.FlexVolume == nil { - x.VolumeSource.FlexVolume = new(FlexVolumeSource) - } - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - case "cinder": - if x.VolumeSource.Cinder == nil { - x.VolumeSource.Cinder = new(CinderVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - case "cephfs": - if x.VolumeSource.CephFS == nil { - x.VolumeSource.CephFS = new(CephFSVolumeSource) - } - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - case "flocker": - if x.VolumeSource.Flocker == nil { - x.VolumeSource.Flocker = new(FlockerVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - case "downwardAPI": - if x.VolumeSource.DownwardAPI == nil { - x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource) - } - if r.TryDecodeAsNil() { - if x.DownwardAPI != nil { - x.DownwardAPI = nil - } - } else { - if x.DownwardAPI == nil { - x.DownwardAPI = new(DownwardAPIVolumeSource) - } - x.DownwardAPI.CodecDecodeSelf(d) - } - case "fc": - if x.VolumeSource.FC == nil { - x.VolumeSource.FC = new(FCVolumeSource) - } - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - case "azureFile": - if x.VolumeSource.AzureFile == nil { - x.VolumeSource.AzureFile = new(AzureFileVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - case "configMap": - if x.VolumeSource.ConfigMap == nil { - x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource) - } - if r.TryDecodeAsNil() { - if x.ConfigMap != nil { - x.ConfigMap = nil - } - } else { - if x.ConfigMap == nil { - x.ConfigMap = new(ConfigMapVolumeSource) - } - x.ConfigMap.CodecDecodeSelf(d) - } - case "vsphereVolume": - if x.VolumeSource.VsphereVolume == nil { - x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - case "azureDisk": - if x.VolumeSource.AzureDisk == nil { - x.VolumeSource.AzureDisk = new(AzureDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AzureDisk != nil { - x.AzureDisk = nil - } - } else { - if x.AzureDisk == nil { - x.AzureDisk = new(AzureDiskVolumeSource) - } - x.AzureDisk.CodecDecodeSelf(d) - } - case "photonPersistentDisk": - if x.VolumeSource.PhotonPersistentDisk == nil { - x.VolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.PhotonPersistentDisk != nil { - x.PhotonPersistentDisk = nil - } - } else { - if x.PhotonPersistentDisk == nil { - x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - x.PhotonPersistentDisk.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys130) - } // end switch yys130 - } // end for yyj130 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj155 int - var yyb155 bool - var yyhl155 bool = l >= 0 - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - if x.VolumeSource.HostPath == nil { - x.VolumeSource.HostPath = new(HostPathVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - if x.VolumeSource.EmptyDir == nil { - x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.EmptyDir != nil { - x.EmptyDir = nil - } - } else { - if x.EmptyDir == nil { - x.EmptyDir = new(EmptyDirVolumeSource) - } - x.EmptyDir.CodecDecodeSelf(d) - } - if x.VolumeSource.GCEPersistentDisk == nil { - x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - if x.VolumeSource.AWSElasticBlockStore == nil { - x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - if x.VolumeSource.GitRepo == nil { - x.VolumeSource.GitRepo = new(GitRepoVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GitRepo != nil { - x.GitRepo = nil - } - } else { - if x.GitRepo == nil { - x.GitRepo = new(GitRepoVolumeSource) - } - x.GitRepo.CodecDecodeSelf(d) - } - if x.VolumeSource.Secret == nil { - x.VolumeSource.Secret = new(SecretVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Secret != nil { - x.Secret = nil - } - } else { - if x.Secret == nil { - x.Secret = new(SecretVolumeSource) - } - x.Secret.CodecDecodeSelf(d) - } - if x.VolumeSource.NFS == nil { - x.VolumeSource.NFS = new(NFSVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - if x.VolumeSource.ISCSI == nil { - x.VolumeSource.ISCSI = new(ISCSIVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - if x.VolumeSource.Glusterfs == nil { - x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - if x.VolumeSource.PersistentVolumeClaim == nil { - x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PersistentVolumeClaim != nil { - x.PersistentVolumeClaim = nil - } - } else { - if x.PersistentVolumeClaim == nil { - x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - x.PersistentVolumeClaim.CodecDecodeSelf(d) - } - if x.VolumeSource.RBD == nil { - x.VolumeSource.RBD = new(RBDVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - if x.VolumeSource.Quobyte == nil { - x.VolumeSource.Quobyte = new(QuobyteVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Quobyte != nil { - x.Quobyte = nil - } - } else { - if x.Quobyte == nil { - x.Quobyte = new(QuobyteVolumeSource) - } - x.Quobyte.CodecDecodeSelf(d) - } - if x.VolumeSource.FlexVolume == nil { - x.VolumeSource.FlexVolume = new(FlexVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - if x.VolumeSource.Cinder == nil { - x.VolumeSource.Cinder = new(CinderVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - if x.VolumeSource.CephFS == nil { - x.VolumeSource.CephFS = new(CephFSVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - if x.VolumeSource.Flocker == nil { - x.VolumeSource.Flocker = new(FlockerVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - if x.VolumeSource.DownwardAPI == nil { - x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DownwardAPI != nil { - x.DownwardAPI = nil - } - } else { - if x.DownwardAPI == nil { - x.DownwardAPI = new(DownwardAPIVolumeSource) - } - x.DownwardAPI.CodecDecodeSelf(d) - } - if x.VolumeSource.FC == nil { - x.VolumeSource.FC = new(FCVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - if x.VolumeSource.AzureFile == nil { - x.VolumeSource.AzureFile = new(AzureFileVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - if x.VolumeSource.ConfigMap == nil { - x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ConfigMap != nil { - x.ConfigMap = nil - } - } else { - if x.ConfigMap == nil { - x.ConfigMap = new(ConfigMapVolumeSource) - } - x.ConfigMap.CodecDecodeSelf(d) - } - if x.VolumeSource.VsphereVolume == nil { - x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - if x.VolumeSource.AzureDisk == nil { - x.VolumeSource.AzureDisk = new(AzureDiskVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureDisk != nil { - x.AzureDisk = nil - } - } else { - if x.AzureDisk == nil { - x.AzureDisk = new(AzureDiskVolumeSource) - } - x.AzureDisk.CodecDecodeSelf(d) - } - if x.VolumeSource.PhotonPersistentDisk == nil { - x.VolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PhotonPersistentDisk != nil { - x.PhotonPersistentDisk = nil - } - } else { - if x.PhotonPersistentDisk == nil { - x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - x.PhotonPersistentDisk.CodecDecodeSelf(d) - } - for { - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l - } else { - yyb155 = r.CheckBreak() - } - if yyb155 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj155-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym180 := z.EncBinary() - _ = yym180 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep181 := !z.EncBinary() - yy2arr181 := z.EncBasicHandle().StructToArray - var yyq181 [23]bool - _, _, _ = yysep181, yyq181, yy2arr181 - const yyr181 bool = false - yyq181[0] = x.HostPath != nil - yyq181[1] = x.EmptyDir != nil - yyq181[2] = x.GCEPersistentDisk != nil - yyq181[3] = x.AWSElasticBlockStore != nil - yyq181[4] = x.GitRepo != nil - yyq181[5] = x.Secret != nil - yyq181[6] = x.NFS != nil - yyq181[7] = x.ISCSI != nil - yyq181[8] = x.Glusterfs != nil - yyq181[9] = x.PersistentVolumeClaim != nil - yyq181[10] = x.RBD != nil - yyq181[11] = x.Quobyte != nil - yyq181[12] = x.FlexVolume != nil - yyq181[13] = x.Cinder != nil - yyq181[14] = x.CephFS != nil - yyq181[15] = x.Flocker != nil - yyq181[16] = x.DownwardAPI != nil - yyq181[17] = x.FC != nil - yyq181[18] = x.AzureFile != nil - yyq181[19] = x.ConfigMap != nil - yyq181[20] = x.VsphereVolume != nil - yyq181[21] = x.AzureDisk != nil - yyq181[22] = x.PhotonPersistentDisk != nil - var yynn181 int - if yyr181 || yy2arr181 { - r.EncodeArrayStart(23) - } else { - yynn181 = 0 - for _, b := range yyq181 { - if b { - yynn181++ - } - } - r.EncodeMapStart(yynn181) - yynn181 = 0 - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[0] { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[1] { - if x.EmptyDir == nil { - r.EncodeNil() - } else { - x.EmptyDir.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("emptyDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.EmptyDir == nil { - r.EncodeNil() - } else { - x.EmptyDir.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[2] { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[3] { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[4] { - if x.GitRepo == nil { - r.EncodeNil() - } else { - x.GitRepo.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gitRepo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.GitRepo == nil { - r.EncodeNil() - } else { - x.GitRepo.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[5] { - if x.Secret == nil { - r.EncodeNil() - } else { - x.Secret.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secret")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Secret == nil { - r.EncodeNil() - } else { - x.Secret.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[6] { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[7] { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsi")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[8] { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[9] { - if x.PersistentVolumeClaim == nil { - r.EncodeNil() - } else { - x.PersistentVolumeClaim.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PersistentVolumeClaim == nil { - r.EncodeNil() - } else { - x.PersistentVolumeClaim.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[10] { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rbd")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[11] { - if x.Quobyte == nil { - r.EncodeNil() - } else { - x.Quobyte.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("quobyte")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Quobyte == nil { - r.EncodeNil() - } else { - x.Quobyte.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[12] { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[13] { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cinder")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[14] { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cephfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[15] { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flocker")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[16] { - if x.DownwardAPI == nil { - r.EncodeNil() - } else { - x.DownwardAPI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DownwardAPI == nil { - r.EncodeNil() - } else { - x.DownwardAPI.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[17] { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[17] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fc")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[18] { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[18] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[19] { - if x.ConfigMap == nil { - r.EncodeNil() - } else { - x.ConfigMap.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[19] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("configMap")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ConfigMap == nil { - r.EncodeNil() - } else { - x.ConfigMap.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[20] { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[20] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[21] { - if x.AzureDisk == nil { - r.EncodeNil() - } else { - x.AzureDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[21] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AzureDisk == nil { - r.EncodeNil() - } else { - x.AzureDisk.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[22] { - if x.PhotonPersistentDisk == nil { - r.EncodeNil() - } else { - x.PhotonPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq181[22] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PhotonPersistentDisk == nil { - r.EncodeNil() - } else { - x.PhotonPersistentDisk.CodecEncodeSelf(e) - } - } - } - if yyr181 || yy2arr181 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *VolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym205 := z.DecBinary() - _ = yym205 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct206 := r.ContainerType() - if yyct206 == codecSelferValueTypeMap1234 { - yyl206 := r.ReadMapStart() - if yyl206 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl206, d) - } - } else if yyct206 == codecSelferValueTypeArray1234 { - yyl206 := r.ReadArrayStart() - if yyl206 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl206, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *VolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys207Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys207Slc - var yyhl207 bool = l >= 0 - for yyj207 := 0; ; yyj207++ { - if yyhl207 { - if yyj207 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys207Slc = r.DecodeBytes(yys207Slc, true, true) - yys207 := string(yys207Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys207 { - case "hostPath": - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - case "emptyDir": - if r.TryDecodeAsNil() { - if x.EmptyDir != nil { - x.EmptyDir = nil - } - } else { - if x.EmptyDir == nil { - x.EmptyDir = new(EmptyDirVolumeSource) - } - x.EmptyDir.CodecDecodeSelf(d) - } - case "gcePersistentDisk": - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - case "awsElasticBlockStore": - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - case "gitRepo": - if r.TryDecodeAsNil() { - if x.GitRepo != nil { - x.GitRepo = nil - } - } else { - if x.GitRepo == nil { - x.GitRepo = new(GitRepoVolumeSource) - } - x.GitRepo.CodecDecodeSelf(d) - } - case "secret": - if r.TryDecodeAsNil() { - if x.Secret != nil { - x.Secret = nil - } - } else { - if x.Secret == nil { - x.Secret = new(SecretVolumeSource) - } - x.Secret.CodecDecodeSelf(d) - } - case "nfs": - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - case "iscsi": - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - case "glusterfs": - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - case "persistentVolumeClaim": - if r.TryDecodeAsNil() { - if x.PersistentVolumeClaim != nil { - x.PersistentVolumeClaim = nil - } - } else { - if x.PersistentVolumeClaim == nil { - x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - x.PersistentVolumeClaim.CodecDecodeSelf(d) - } - case "rbd": - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - case "quobyte": - if r.TryDecodeAsNil() { - if x.Quobyte != nil { - x.Quobyte = nil - } - } else { - if x.Quobyte == nil { - x.Quobyte = new(QuobyteVolumeSource) - } - x.Quobyte.CodecDecodeSelf(d) - } - case "flexVolume": - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - case "cinder": - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - case "cephfs": - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - case "flocker": - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - case "downwardAPI": - if r.TryDecodeAsNil() { - if x.DownwardAPI != nil { - x.DownwardAPI = nil - } - } else { - if x.DownwardAPI == nil { - x.DownwardAPI = new(DownwardAPIVolumeSource) - } - x.DownwardAPI.CodecDecodeSelf(d) - } - case "fc": - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - case "azureFile": - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - case "configMap": - if r.TryDecodeAsNil() { - if x.ConfigMap != nil { - x.ConfigMap = nil - } - } else { - if x.ConfigMap == nil { - x.ConfigMap = new(ConfigMapVolumeSource) - } - x.ConfigMap.CodecDecodeSelf(d) - } - case "vsphereVolume": - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - case "azureDisk": - if r.TryDecodeAsNil() { - if x.AzureDisk != nil { - x.AzureDisk = nil - } - } else { - if x.AzureDisk == nil { - x.AzureDisk = new(AzureDiskVolumeSource) - } - x.AzureDisk.CodecDecodeSelf(d) - } - case "photonPersistentDisk": - if r.TryDecodeAsNil() { - if x.PhotonPersistentDisk != nil { - x.PhotonPersistentDisk = nil - } - } else { - if x.PhotonPersistentDisk == nil { - x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - x.PhotonPersistentDisk.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys207) - } // end switch yys207 - } // end for yyj207 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj231 int - var yyb231 bool - var yyhl231 bool = l >= 0 - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.EmptyDir != nil { - x.EmptyDir = nil - } - } else { - if x.EmptyDir == nil { - x.EmptyDir = new(EmptyDirVolumeSource) - } - x.EmptyDir.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GitRepo != nil { - x.GitRepo = nil - } - } else { - if x.GitRepo == nil { - x.GitRepo = new(GitRepoVolumeSource) - } - x.GitRepo.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Secret != nil { - x.Secret = nil - } - } else { - if x.Secret == nil { - x.Secret = new(SecretVolumeSource) - } - x.Secret.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PersistentVolumeClaim != nil { - x.PersistentVolumeClaim = nil - } - } else { - if x.PersistentVolumeClaim == nil { - x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - x.PersistentVolumeClaim.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Quobyte != nil { - x.Quobyte = nil - } - } else { - if x.Quobyte == nil { - x.Quobyte = new(QuobyteVolumeSource) - } - x.Quobyte.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DownwardAPI != nil { - x.DownwardAPI = nil - } - } else { - if x.DownwardAPI == nil { - x.DownwardAPI = new(DownwardAPIVolumeSource) - } - x.DownwardAPI.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ConfigMap != nil { - x.ConfigMap = nil - } - } else { - if x.ConfigMap == nil { - x.ConfigMap = new(ConfigMapVolumeSource) - } - x.ConfigMap.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureDisk != nil { - x.AzureDisk = nil - } - } else { - if x.AzureDisk == nil { - x.AzureDisk = new(AzureDiskVolumeSource) - } - x.AzureDisk.CodecDecodeSelf(d) - } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PhotonPersistentDisk != nil { - x.PhotonPersistentDisk = nil - } - } else { - if x.PhotonPersistentDisk == nil { - x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - x.PhotonPersistentDisk.CodecDecodeSelf(d) - } - for { - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l - } else { - yyb231 = r.CheckBreak() - } - if yyb231 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj231-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym255 := z.EncBinary() - _ = yym255 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep256 := !z.EncBinary() - yy2arr256 := z.EncBasicHandle().StructToArray - var yyq256 [17]bool - _, _, _ = yysep256, yyq256, yy2arr256 - const yyr256 bool = false - yyq256[0] = x.GCEPersistentDisk != nil - yyq256[1] = x.AWSElasticBlockStore != nil - yyq256[2] = x.HostPath != nil - yyq256[3] = x.Glusterfs != nil - yyq256[4] = x.NFS != nil - yyq256[5] = x.RBD != nil - yyq256[6] = x.Quobyte != nil - yyq256[7] = x.ISCSI != nil - yyq256[8] = x.FlexVolume != nil - yyq256[9] = x.Cinder != nil - yyq256[10] = x.CephFS != nil - yyq256[11] = x.FC != nil - yyq256[12] = x.Flocker != nil - yyq256[13] = x.AzureFile != nil - yyq256[14] = x.VsphereVolume != nil - yyq256[15] = x.AzureDisk != nil - yyq256[16] = x.PhotonPersistentDisk != nil - var yynn256 int - if yyr256 || yy2arr256 { - r.EncodeArrayStart(17) - } else { - yynn256 = 0 - for _, b := range yyq256 { - if b { - yynn256++ - } - } - r.EncodeMapStart(yynn256) - yynn256 = 0 - } - if yyr256 || yy2arr256 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq256[0] { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq256[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } - } - if yyr256 || yy2arr256 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq256[1] { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq256[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } - } - if yyr256 || yy2arr256 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq256[2] { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq256[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } - } - if yyr256 || yy2arr256 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq256[3] { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq256[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } - } - if yyr256 || yy2arr256 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq256[4] { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq256[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } - } - if yyr256 || yy2arr256 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq256[5] { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq256[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rbd")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } - } - if yyr256 || yy2arr256 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq256[6] { - if x.Quobyte == nil { - r.EncodeNil() - } else { - x.Quobyte.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq256[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("quobyte")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Quobyte == nil { - r.EncodeNil() - } else { - x.Quobyte.CodecEncodeSelf(e) - } - } - } - if yyr256 || yy2arr256 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq256[7] { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq256[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsi")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } - } - if yyr256 || yy2arr256 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq256[8] { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq256[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } - } - if yyr256 || yy2arr256 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq256[9] { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq256[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cinder")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } - } - if yyr256 || yy2arr256 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq256[10] { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq256[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cephfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } - } - if yyr256 || yy2arr256 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq256[11] { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq256[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fc")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } - } - if yyr256 || yy2arr256 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq256[12] { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq256[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flocker")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } - } - if yyr256 || yy2arr256 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq256[13] { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq256[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } - } - if yyr256 || yy2arr256 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq256[14] { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq256[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } - } - if yyr256 || yy2arr256 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq256[15] { - if x.AzureDisk == nil { - r.EncodeNil() - } else { - x.AzureDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq256[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AzureDisk == nil { - r.EncodeNil() - } else { - x.AzureDisk.CodecEncodeSelf(e) - } - } - } - if yyr256 || yy2arr256 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq256[16] { - if x.PhotonPersistentDisk == nil { - r.EncodeNil() - } else { - x.PhotonPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq256[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PhotonPersistentDisk == nil { - r.EncodeNil() - } else { - x.PhotonPersistentDisk.CodecEncodeSelf(e) - } - } - } - if yyr256 || yy2arr256 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym274 := z.DecBinary() - _ = yym274 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct275 := r.ContainerType() - if yyct275 == codecSelferValueTypeMap1234 { - yyl275 := r.ReadMapStart() - if yyl275 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl275, d) - } - } else if yyct275 == codecSelferValueTypeArray1234 { - yyl275 := r.ReadArrayStart() - if yyl275 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl275, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys276Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys276Slc - var yyhl276 bool = l >= 0 - for yyj276 := 0; ; yyj276++ { - if yyhl276 { - if yyj276 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys276Slc = r.DecodeBytes(yys276Slc, true, true) - yys276 := string(yys276Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys276 { - case "gcePersistentDisk": - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - case "awsElasticBlockStore": - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - case "hostPath": - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - case "glusterfs": - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - case "nfs": - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - case "rbd": - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - case "quobyte": - if r.TryDecodeAsNil() { - if x.Quobyte != nil { - x.Quobyte = nil - } - } else { - if x.Quobyte == nil { - x.Quobyte = new(QuobyteVolumeSource) - } - x.Quobyte.CodecDecodeSelf(d) - } - case "iscsi": - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - case "flexVolume": - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - case "cinder": - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - case "cephfs": - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - case "fc": - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - case "flocker": - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - case "azureFile": - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - case "vsphereVolume": - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - case "azureDisk": - if r.TryDecodeAsNil() { - if x.AzureDisk != nil { - x.AzureDisk = nil - } - } else { - if x.AzureDisk == nil { - x.AzureDisk = new(AzureDiskVolumeSource) - } - x.AzureDisk.CodecDecodeSelf(d) - } - case "photonPersistentDisk": - if r.TryDecodeAsNil() { - if x.PhotonPersistentDisk != nil { - x.PhotonPersistentDisk = nil - } - } else { - if x.PhotonPersistentDisk == nil { - x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - x.PhotonPersistentDisk.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys276) - } // end switch yys276 - } // end for yyj276 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj294 int - var yyb294 bool - var yyhl294 bool = l >= 0 - yyj294++ - if yyhl294 { - yyb294 = yyj294 > l - } else { - yyb294 = r.CheckBreak() - } - if yyb294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - yyj294++ - if yyhl294 { - yyb294 = yyj294 > l - } else { - yyb294 = r.CheckBreak() - } - if yyb294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - yyj294++ - if yyhl294 { - yyb294 = yyj294 > l - } else { - yyb294 = r.CheckBreak() - } - if yyb294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - yyj294++ - if yyhl294 { - yyb294 = yyj294 > l - } else { - yyb294 = r.CheckBreak() - } - if yyb294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - yyj294++ - if yyhl294 { - yyb294 = yyj294 > l - } else { - yyb294 = r.CheckBreak() - } - if yyb294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - yyj294++ - if yyhl294 { - yyb294 = yyj294 > l - } else { - yyb294 = r.CheckBreak() - } - if yyb294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - yyj294++ - if yyhl294 { - yyb294 = yyj294 > l - } else { - yyb294 = r.CheckBreak() - } - if yyb294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Quobyte != nil { - x.Quobyte = nil - } - } else { - if x.Quobyte == nil { - x.Quobyte = new(QuobyteVolumeSource) - } - x.Quobyte.CodecDecodeSelf(d) - } - yyj294++ - if yyhl294 { - yyb294 = yyj294 > l - } else { - yyb294 = r.CheckBreak() - } - if yyb294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - yyj294++ - if yyhl294 { - yyb294 = yyj294 > l - } else { - yyb294 = r.CheckBreak() - } - if yyb294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - yyj294++ - if yyhl294 { - yyb294 = yyj294 > l - } else { - yyb294 = r.CheckBreak() - } - if yyb294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - yyj294++ - if yyhl294 { - yyb294 = yyj294 > l - } else { - yyb294 = r.CheckBreak() - } - if yyb294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - yyj294++ - if yyhl294 { - yyb294 = yyj294 > l - } else { - yyb294 = r.CheckBreak() - } - if yyb294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - yyj294++ - if yyhl294 { - yyb294 = yyj294 > l - } else { - yyb294 = r.CheckBreak() - } - if yyb294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - yyj294++ - if yyhl294 { - yyb294 = yyj294 > l - } else { - yyb294 = r.CheckBreak() - } - if yyb294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - yyj294++ - if yyhl294 { - yyb294 = yyj294 > l - } else { - yyb294 = r.CheckBreak() - } - if yyb294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - yyj294++ - if yyhl294 { - yyb294 = yyj294 > l - } else { - yyb294 = r.CheckBreak() - } - if yyb294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureDisk != nil { - x.AzureDisk = nil - } - } else { - if x.AzureDisk == nil { - x.AzureDisk = new(AzureDiskVolumeSource) - } - x.AzureDisk.CodecDecodeSelf(d) - } - yyj294++ - if yyhl294 { - yyb294 = yyj294 > l - } else { - yyb294 = r.CheckBreak() - } - if yyb294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PhotonPersistentDisk != nil { - x.PhotonPersistentDisk = nil - } - } else { - if x.PhotonPersistentDisk == nil { - x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - x.PhotonPersistentDisk.CodecDecodeSelf(d) - } - for { - yyj294++ - if yyhl294 { - yyb294 = yyj294 > l - } else { - yyb294 = r.CheckBreak() - } - if yyb294 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj294-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaimVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym312 := z.EncBinary() - _ = yym312 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep313 := !z.EncBinary() - yy2arr313 := z.EncBasicHandle().StructToArray - var yyq313 [2]bool - _, _, _ = yysep313, yyq313, yy2arr313 - const yyr313 bool = false - yyq313[1] = x.ReadOnly != false - var yynn313 int - if yyr313 || yy2arr313 { - r.EncodeArrayStart(2) - } else { - yynn313 = 1 - for _, b := range yyq313 { - if b { - yynn313++ - } - } - r.EncodeMapStart(yynn313) - yynn313 = 0 - } - if yyr313 || yy2arr313 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym315 := z.EncBinary() - _ = yym315 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("claimName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym316 := z.EncBinary() - _ = yym316 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName)) - } - } - if yyr313 || yy2arr313 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq313[1] { - yym318 := z.EncBinary() - _ = yym318 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq313[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym319 := z.EncBinary() - _ = yym319 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr313 || yy2arr313 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaimVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym320 := z.DecBinary() - _ = yym320 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct321 := r.ContainerType() - if yyct321 == codecSelferValueTypeMap1234 { - yyl321 := r.ReadMapStart() - if yyl321 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl321, d) - } - } else if yyct321 == codecSelferValueTypeArray1234 { - yyl321 := r.ReadArrayStart() - if yyl321 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl321, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys322Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys322Slc - var yyhl322 bool = l >= 0 - for yyj322 := 0; ; yyj322++ { - if yyhl322 { - if yyj322 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys322Slc = r.DecodeBytes(yys322Slc, true, true) - yys322 := string(yys322Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys322 { - case "claimName": - if r.TryDecodeAsNil() { - x.ClaimName = "" - } else { - x.ClaimName = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys322) - } // end switch yys322 - } // end for yyj322 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj325 int - var yyb325 bool - var yyhl325 bool = l >= 0 - yyj325++ - if yyhl325 { - yyb325 = yyj325 > l - } else { - yyb325 = r.CheckBreak() - } - if yyb325 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClaimName = "" - } else { - x.ClaimName = string(r.DecodeString()) - } - yyj325++ - if yyhl325 { - yyb325 = yyj325 > l - } else { - yyb325 = r.CheckBreak() - } - if yyb325 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj325++ - if yyhl325 { - yyb325 = yyj325 > l - } else { - yyb325 = r.CheckBreak() - } - if yyb325 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj325-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolume) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym328 := z.EncBinary() - _ = yym328 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep329 := !z.EncBinary() - yy2arr329 := z.EncBasicHandle().StructToArray - var yyq329 [5]bool - _, _, _ = yysep329, yyq329, yy2arr329 - const yyr329 bool = false - yyq329[0] = x.Kind != "" - yyq329[1] = x.APIVersion != "" - yyq329[2] = true - yyq329[3] = true - yyq329[4] = true - var yynn329 int - if yyr329 || yy2arr329 { - r.EncodeArrayStart(5) - } else { - yynn329 = 0 - for _, b := range yyq329 { - if b { - yynn329++ - } - } - r.EncodeMapStart(yynn329) - yynn329 = 0 - } - if yyr329 || yy2arr329 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq329[0] { - yym331 := z.EncBinary() - _ = yym331 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq329[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym332 := z.EncBinary() - _ = yym332 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr329 || yy2arr329 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq329[1] { - yym334 := z.EncBinary() - _ = yym334 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq329[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym335 := z.EncBinary() - _ = yym335 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr329 || yy2arr329 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq329[2] { - yy337 := &x.ObjectMeta - yy337.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq329[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy338 := &x.ObjectMeta - yy338.CodecEncodeSelf(e) - } - } - if yyr329 || yy2arr329 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq329[3] { - yy340 := &x.Spec - yy340.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq329[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy341 := &x.Spec - yy341.CodecEncodeSelf(e) - } - } - if yyr329 || yy2arr329 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq329[4] { - yy343 := &x.Status - yy343.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq329[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy344 := &x.Status - yy344.CodecEncodeSelf(e) - } - } - if yyr329 || yy2arr329 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolume) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym345 := z.DecBinary() - _ = yym345 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct346 := r.ContainerType() - if yyct346 == codecSelferValueTypeMap1234 { - yyl346 := r.ReadMapStart() - if yyl346 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl346, d) - } - } else if yyct346 == codecSelferValueTypeArray1234 { - yyl346 := r.ReadArrayStart() - if yyl346 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl346, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys347Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys347Slc - var yyhl347 bool = l >= 0 - for yyj347 := 0; ; yyj347++ { - if yyhl347 { - if yyj347 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys347Slc = r.DecodeBytes(yys347Slc, true, true) - yys347 := string(yys347Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys347 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv350 := &x.ObjectMeta - yyv350.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PersistentVolumeSpec{} - } else { - yyv351 := &x.Spec - yyv351.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PersistentVolumeStatus{} - } else { - yyv352 := &x.Status - yyv352.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys347) - } // end switch yys347 - } // end for yyj347 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj353 int - var yyb353 bool - var yyhl353 bool = l >= 0 - yyj353++ - if yyhl353 { - yyb353 = yyj353 > l - } else { - yyb353 = r.CheckBreak() - } - if yyb353 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj353++ - if yyhl353 { - yyb353 = yyj353 > l - } else { - yyb353 = r.CheckBreak() - } - if yyb353 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj353++ - if yyhl353 { - yyb353 = yyj353 > l - } else { - yyb353 = r.CheckBreak() - } - if yyb353 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv356 := &x.ObjectMeta - yyv356.CodecDecodeSelf(d) - } - yyj353++ - if yyhl353 { - yyb353 = yyj353 > l - } else { - yyb353 = r.CheckBreak() - } - if yyb353 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PersistentVolumeSpec{} - } else { - yyv357 := &x.Spec - yyv357.CodecDecodeSelf(d) - } - yyj353++ - if yyhl353 { - yyb353 = yyj353 > l - } else { - yyb353 = r.CheckBreak() - } - if yyb353 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PersistentVolumeStatus{} - } else { - yyv358 := &x.Status - yyv358.CodecDecodeSelf(d) - } - for { - yyj353++ - if yyhl353 { - yyb353 = yyj353 > l - } else { - yyb353 = r.CheckBreak() - } - if yyb353 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj353-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym359 := z.EncBinary() - _ = yym359 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep360 := !z.EncBinary() - yy2arr360 := z.EncBasicHandle().StructToArray - var yyq360 [21]bool - _, _, _ = yysep360, yyq360, yy2arr360 - const yyr360 bool = false - yyq360[1] = x.PersistentVolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil - yyq360[2] = x.PersistentVolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil - yyq360[3] = x.PersistentVolumeSource.HostPath != nil && x.HostPath != nil - yyq360[4] = x.PersistentVolumeSource.Glusterfs != nil && x.Glusterfs != nil - yyq360[5] = x.PersistentVolumeSource.NFS != nil && x.NFS != nil - yyq360[6] = x.PersistentVolumeSource.RBD != nil && x.RBD != nil - yyq360[7] = x.PersistentVolumeSource.Quobyte != nil && x.Quobyte != nil - yyq360[8] = x.PersistentVolumeSource.ISCSI != nil && x.ISCSI != nil - yyq360[9] = x.PersistentVolumeSource.FlexVolume != nil && x.FlexVolume != nil - yyq360[10] = x.PersistentVolumeSource.Cinder != nil && x.Cinder != nil - yyq360[11] = x.PersistentVolumeSource.CephFS != nil && x.CephFS != nil - yyq360[12] = x.PersistentVolumeSource.FC != nil && x.FC != nil - yyq360[13] = x.PersistentVolumeSource.Flocker != nil && x.Flocker != nil - yyq360[14] = x.PersistentVolumeSource.AzureFile != nil && x.AzureFile != nil - yyq360[15] = x.PersistentVolumeSource.VsphereVolume != nil && x.VsphereVolume != nil - yyq360[16] = x.PersistentVolumeSource.AzureDisk != nil && x.AzureDisk != nil - yyq360[17] = x.PersistentVolumeSource.PhotonPersistentDisk != nil && x.PhotonPersistentDisk != nil - yyq360[18] = len(x.AccessModes) != 0 - yyq360[19] = x.ClaimRef != nil - yyq360[20] = x.PersistentVolumeReclaimPolicy != "" - var yynn360 int - if yyr360 || yy2arr360 { - r.EncodeArrayStart(21) - } else { - yynn360 = 1 - for _, b := range yyq360 { - if b { - yynn360++ - } - } - r.EncodeMapStart(yynn360) - yynn360 = 0 - } - if yyr360 || yy2arr360 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capacity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } - var yyn362 bool - if x.PersistentVolumeSource.GCEPersistentDisk == nil { - yyn362 = true - goto LABEL362 - } - LABEL362: - if yyr360 || yy2arr360 { - if yyn362 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[1] { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq360[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn362 { - r.EncodeNil() - } else { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } - } - } - var yyn363 bool - if x.PersistentVolumeSource.AWSElasticBlockStore == nil { - yyn363 = true - goto LABEL363 - } - LABEL363: - if yyr360 || yy2arr360 { - if yyn363 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[2] { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq360[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn363 { - r.EncodeNil() - } else { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } - } - } - var yyn364 bool - if x.PersistentVolumeSource.HostPath == nil { - yyn364 = true - goto LABEL364 - } - LABEL364: - if yyr360 || yy2arr360 { - if yyn364 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[3] { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq360[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn364 { - r.EncodeNil() - } else { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } - } - } - var yyn365 bool - if x.PersistentVolumeSource.Glusterfs == nil { - yyn365 = true - goto LABEL365 - } - LABEL365: - if yyr360 || yy2arr360 { - if yyn365 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[4] { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq360[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn365 { - r.EncodeNil() - } else { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } - } - } - var yyn366 bool - if x.PersistentVolumeSource.NFS == nil { - yyn366 = true - goto LABEL366 - } - LABEL366: - if yyr360 || yy2arr360 { - if yyn366 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[5] { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq360[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn366 { - r.EncodeNil() - } else { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } - } - } - var yyn367 bool - if x.PersistentVolumeSource.RBD == nil { - yyn367 = true - goto LABEL367 - } - LABEL367: - if yyr360 || yy2arr360 { - if yyn367 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[6] { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq360[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rbd")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn367 { - r.EncodeNil() - } else { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } - } - } - var yyn368 bool - if x.PersistentVolumeSource.Quobyte == nil { - yyn368 = true - goto LABEL368 - } - LABEL368: - if yyr360 || yy2arr360 { - if yyn368 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[7] { - if x.Quobyte == nil { - r.EncodeNil() - } else { - x.Quobyte.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq360[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("quobyte")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn368 { - r.EncodeNil() - } else { - if x.Quobyte == nil { - r.EncodeNil() - } else { - x.Quobyte.CodecEncodeSelf(e) - } - } - } - } - var yyn369 bool - if x.PersistentVolumeSource.ISCSI == nil { - yyn369 = true - goto LABEL369 - } - LABEL369: - if yyr360 || yy2arr360 { - if yyn369 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[8] { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq360[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsi")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn369 { - r.EncodeNil() - } else { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } - } - } - var yyn370 bool - if x.PersistentVolumeSource.FlexVolume == nil { - yyn370 = true - goto LABEL370 - } - LABEL370: - if yyr360 || yy2arr360 { - if yyn370 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[9] { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq360[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn370 { - r.EncodeNil() - } else { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } - } - } - var yyn371 bool - if x.PersistentVolumeSource.Cinder == nil { - yyn371 = true - goto LABEL371 - } - LABEL371: - if yyr360 || yy2arr360 { - if yyn371 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[10] { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq360[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cinder")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn371 { - r.EncodeNil() - } else { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } - } - } - var yyn372 bool - if x.PersistentVolumeSource.CephFS == nil { - yyn372 = true - goto LABEL372 - } - LABEL372: - if yyr360 || yy2arr360 { - if yyn372 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[11] { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq360[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cephfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn372 { - r.EncodeNil() - } else { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } - } - } - var yyn373 bool - if x.PersistentVolumeSource.FC == nil { - yyn373 = true - goto LABEL373 - } - LABEL373: - if yyr360 || yy2arr360 { - if yyn373 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[12] { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq360[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fc")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn373 { - r.EncodeNil() - } else { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } - } - } - var yyn374 bool - if x.PersistentVolumeSource.Flocker == nil { - yyn374 = true - goto LABEL374 - } - LABEL374: - if yyr360 || yy2arr360 { - if yyn374 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[13] { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq360[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flocker")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn374 { - r.EncodeNil() - } else { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } - } - } - var yyn375 bool - if x.PersistentVolumeSource.AzureFile == nil { - yyn375 = true - goto LABEL375 - } - LABEL375: - if yyr360 || yy2arr360 { - if yyn375 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[14] { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq360[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn375 { - r.EncodeNil() - } else { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } - } - } - var yyn376 bool - if x.PersistentVolumeSource.VsphereVolume == nil { - yyn376 = true - goto LABEL376 - } - LABEL376: - if yyr360 || yy2arr360 { - if yyn376 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[15] { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq360[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn376 { - r.EncodeNil() - } else { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } - } - } - var yyn377 bool - if x.PersistentVolumeSource.AzureDisk == nil { - yyn377 = true - goto LABEL377 - } - LABEL377: - if yyr360 || yy2arr360 { - if yyn377 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[16] { - if x.AzureDisk == nil { - r.EncodeNil() - } else { - x.AzureDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq360[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn377 { - r.EncodeNil() - } else { - if x.AzureDisk == nil { - r.EncodeNil() - } else { - x.AzureDisk.CodecEncodeSelf(e) - } - } - } - } - var yyn378 bool - if x.PersistentVolumeSource.PhotonPersistentDisk == nil { - yyn378 = true - goto LABEL378 - } - LABEL378: - if yyr360 || yy2arr360 { - if yyn378 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[17] { - if x.PhotonPersistentDisk == nil { - r.EncodeNil() - } else { - x.PhotonPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq360[17] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn378 { - r.EncodeNil() - } else { - if x.PhotonPersistentDisk == nil { - r.EncodeNil() - } else { - x.PhotonPersistentDisk.CodecEncodeSelf(e) - } - } - } - } - if yyr360 || yy2arr360 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[18] { - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym380 := z.EncBinary() - _ = yym380 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq360[18] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("accessModes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym381 := z.EncBinary() - _ = yym381 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } - } - if yyr360 || yy2arr360 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[19] { - if x.ClaimRef == nil { - r.EncodeNil() - } else { - x.ClaimRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq360[19] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("claimRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ClaimRef == nil { - r.EncodeNil() - } else { - x.ClaimRef.CodecEncodeSelf(e) - } - } - } - if yyr360 || yy2arr360 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[20] { - x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq360[20] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeReclaimPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e) - } - } - if yyr360 || yy2arr360 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym384 := z.DecBinary() - _ = yym384 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct385 := r.ContainerType() - if yyct385 == codecSelferValueTypeMap1234 { - yyl385 := r.ReadMapStart() - if yyl385 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl385, d) - } - } else if yyct385 == codecSelferValueTypeArray1234 { - yyl385 := r.ReadArrayStart() - if yyl385 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl385, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys386Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys386Slc - var yyhl386 bool = l >= 0 - for yyj386 := 0; ; yyj386++ { - if yyhl386 { - if yyj386 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys386Slc = r.DecodeBytes(yys386Slc, true, true) - yys386 := string(yys386Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys386 { - case "capacity": - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv387 := &x.Capacity - yyv387.CodecDecodeSelf(d) - } - case "gcePersistentDisk": - if x.PersistentVolumeSource.GCEPersistentDisk == nil { - x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - case "awsElasticBlockStore": - if x.PersistentVolumeSource.AWSElasticBlockStore == nil { - x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - case "hostPath": - if x.PersistentVolumeSource.HostPath == nil { - x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource) - } - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - case "glusterfs": - if x.PersistentVolumeSource.Glusterfs == nil { - x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - case "nfs": - if x.PersistentVolumeSource.NFS == nil { - x.PersistentVolumeSource.NFS = new(NFSVolumeSource) - } - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - case "rbd": - if x.PersistentVolumeSource.RBD == nil { - x.PersistentVolumeSource.RBD = new(RBDVolumeSource) - } - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - case "quobyte": - if x.PersistentVolumeSource.Quobyte == nil { - x.PersistentVolumeSource.Quobyte = new(QuobyteVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Quobyte != nil { - x.Quobyte = nil - } - } else { - if x.Quobyte == nil { - x.Quobyte = new(QuobyteVolumeSource) - } - x.Quobyte.CodecDecodeSelf(d) - } - case "iscsi": - if x.PersistentVolumeSource.ISCSI == nil { - x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource) - } - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - case "flexVolume": - if x.PersistentVolumeSource.FlexVolume == nil { - x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource) - } - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - case "cinder": - if x.PersistentVolumeSource.Cinder == nil { - x.PersistentVolumeSource.Cinder = new(CinderVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - case "cephfs": - if x.PersistentVolumeSource.CephFS == nil { - x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource) - } - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - case "fc": - if x.PersistentVolumeSource.FC == nil { - x.PersistentVolumeSource.FC = new(FCVolumeSource) - } - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - case "flocker": - if x.PersistentVolumeSource.Flocker == nil { - x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - case "azureFile": - if x.PersistentVolumeSource.AzureFile == nil { - x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - case "vsphereVolume": - if x.PersistentVolumeSource.VsphereVolume == nil { - x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - case "azureDisk": - if x.PersistentVolumeSource.AzureDisk == nil { - x.PersistentVolumeSource.AzureDisk = new(AzureDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AzureDisk != nil { - x.AzureDisk = nil - } - } else { - if x.AzureDisk == nil { - x.AzureDisk = new(AzureDiskVolumeSource) - } - x.AzureDisk.CodecDecodeSelf(d) - } - case "photonPersistentDisk": - if x.PersistentVolumeSource.PhotonPersistentDisk == nil { - x.PersistentVolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.PhotonPersistentDisk != nil { - x.PhotonPersistentDisk = nil - } - } else { - if x.PhotonPersistentDisk == nil { - x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - x.PhotonPersistentDisk.CodecDecodeSelf(d) - } - case "accessModes": - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv405 := &x.AccessModes - yym406 := z.DecBinary() - _ = yym406 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv405), d) - } - } - case "claimRef": - if r.TryDecodeAsNil() { - if x.ClaimRef != nil { - x.ClaimRef = nil - } - } else { - if x.ClaimRef == nil { - x.ClaimRef = new(ObjectReference) - } - x.ClaimRef.CodecDecodeSelf(d) - } - case "persistentVolumeReclaimPolicy": - if r.TryDecodeAsNil() { - x.PersistentVolumeReclaimPolicy = "" - } else { - x.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys386) - } // end switch yys386 - } // end for yyj386 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj409 int - var yyb409 bool - var yyhl409 bool = l >= 0 - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv410 := &x.Capacity - yyv410.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.GCEPersistentDisk == nil { - x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.AWSElasticBlockStore == nil { - x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.HostPath == nil { - x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource) - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.Glusterfs == nil { - x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource) - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.NFS == nil { - x.PersistentVolumeSource.NFS = new(NFSVolumeSource) - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.RBD == nil { - x.PersistentVolumeSource.RBD = new(RBDVolumeSource) - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.Quobyte == nil { - x.PersistentVolumeSource.Quobyte = new(QuobyteVolumeSource) - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Quobyte != nil { - x.Quobyte = nil - } - } else { - if x.Quobyte == nil { - x.Quobyte = new(QuobyteVolumeSource) - } - x.Quobyte.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.ISCSI == nil { - x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource) - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.FlexVolume == nil { - x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource) - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.Cinder == nil { - x.PersistentVolumeSource.Cinder = new(CinderVolumeSource) - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.CephFS == nil { - x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource) - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.FC == nil { - x.PersistentVolumeSource.FC = new(FCVolumeSource) - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.Flocker == nil { - x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource) - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.AzureFile == nil { - x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource) - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.VsphereVolume == nil { - x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.AzureDisk == nil { - x.PersistentVolumeSource.AzureDisk = new(AzureDiskVolumeSource) - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureDisk != nil { - x.AzureDisk = nil - } - } else { - if x.AzureDisk == nil { - x.AzureDisk = new(AzureDiskVolumeSource) - } - x.AzureDisk.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.PhotonPersistentDisk == nil { - x.PersistentVolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PhotonPersistentDisk != nil { - x.PhotonPersistentDisk = nil - } - } else { - if x.PhotonPersistentDisk == nil { - x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - x.PhotonPersistentDisk.CodecDecodeSelf(d) - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv428 := &x.AccessModes - yym429 := z.DecBinary() - _ = yym429 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv428), d) - } - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ClaimRef != nil { - x.ClaimRef = nil - } - } else { - if x.ClaimRef == nil { - x.ClaimRef = new(ObjectReference) - } - x.ClaimRef.CodecDecodeSelf(d) - } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PersistentVolumeReclaimPolicy = "" - } else { - x.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(r.DecodeString()) - } - for { - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l - } else { - yyb409 = r.CheckBreak() - } - if yyb409 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj409-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x PersistentVolumeReclaimPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym432 := z.EncBinary() - _ = yym432 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PersistentVolumeReclaimPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym433 := z.DecBinary() - _ = yym433 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PersistentVolumeStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym434 := z.EncBinary() - _ = yym434 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep435 := !z.EncBinary() - yy2arr435 := z.EncBasicHandle().StructToArray - var yyq435 [3]bool - _, _, _ = yysep435, yyq435, yy2arr435 - const yyr435 bool = false - yyq435[0] = x.Phase != "" - yyq435[1] = x.Message != "" - yyq435[2] = x.Reason != "" - var yynn435 int - if yyr435 || yy2arr435 { - r.EncodeArrayStart(3) - } else { - yynn435 = 0 - for _, b := range yyq435 { - if b { - yynn435++ - } - } - r.EncodeMapStart(yynn435) - yynn435 = 0 - } - if yyr435 || yy2arr435 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq435[0] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq435[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr435 || yy2arr435 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq435[1] { - yym438 := z.EncBinary() - _ = yym438 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq435[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym439 := z.EncBinary() - _ = yym439 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr435 || yy2arr435 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq435[2] { - yym441 := z.EncBinary() - _ = yym441 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq435[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym442 := z.EncBinary() - _ = yym442 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr435 || yy2arr435 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym443 := z.DecBinary() - _ = yym443 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct444 := r.ContainerType() - if yyct444 == codecSelferValueTypeMap1234 { - yyl444 := r.ReadMapStart() - if yyl444 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl444, d) - } - } else if yyct444 == codecSelferValueTypeArray1234 { - yyl444 := r.ReadArrayStart() - if yyl444 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl444, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys445Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys445Slc - var yyhl445 bool = l >= 0 - for yyj445 := 0; ; yyj445++ { - if yyhl445 { - if yyj445 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys445Slc = r.DecodeBytes(yys445Slc, true, true) - yys445 := string(yys445Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys445 { - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = PersistentVolumePhase(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys445) - } // end switch yys445 - } // end for yyj445 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj449 int - var yyb449 bool - var yyhl449 bool = l >= 0 - yyj449++ - if yyhl449 { - yyb449 = yyj449 > l - } else { - yyb449 = r.CheckBreak() - } - if yyb449 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = PersistentVolumePhase(r.DecodeString()) - } - yyj449++ - if yyhl449 { - yyb449 = yyj449 > l - } else { - yyb449 = r.CheckBreak() - } - if yyb449 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - yyj449++ - if yyhl449 { - yyb449 = yyj449 > l - } else { - yyb449 = r.CheckBreak() - } - if yyb449 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - for { - yyj449++ - if yyhl449 { - yyb449 = yyj449 > l - } else { - yyb449 = r.CheckBreak() - } - if yyb449 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj449-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym453 := z.EncBinary() - _ = yym453 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep454 := !z.EncBinary() - yy2arr454 := z.EncBasicHandle().StructToArray - var yyq454 [4]bool - _, _, _ = yysep454, yyq454, yy2arr454 - const yyr454 bool = false - yyq454[0] = x.Kind != "" - yyq454[1] = x.APIVersion != "" - yyq454[2] = true - var yynn454 int - if yyr454 || yy2arr454 { - r.EncodeArrayStart(4) - } else { - yynn454 = 1 - for _, b := range yyq454 { - if b { - yynn454++ - } - } - r.EncodeMapStart(yynn454) - yynn454 = 0 - } - if yyr454 || yy2arr454 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq454[0] { - yym456 := z.EncBinary() - _ = yym456 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq454[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym457 := z.EncBinary() - _ = yym457 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr454 || yy2arr454 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq454[1] { - yym459 := z.EncBinary() - _ = yym459 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq454[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym460 := z.EncBinary() - _ = yym460 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr454 || yy2arr454 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq454[2] { - yy462 := &x.ListMeta - yym463 := z.EncBinary() - _ = yym463 - if false { - } else if z.HasExtensions() && z.EncExt(yy462) { - } else { - z.EncFallback(yy462) - } - } else { - r.EncodeNil() - } - } else { - if yyq454[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy464 := &x.ListMeta - yym465 := z.EncBinary() - _ = yym465 - if false { - } else if z.HasExtensions() && z.EncExt(yy464) { - } else { - z.EncFallback(yy464) - } - } - } - if yyr454 || yy2arr454 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym467 := z.EncBinary() - _ = yym467 - if false { - } else { - h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym468 := z.EncBinary() - _ = yym468 - if false { - } else { - h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e) - } - } - } - if yyr454 || yy2arr454 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym469 := z.DecBinary() - _ = yym469 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct470 := r.ContainerType() - if yyct470 == codecSelferValueTypeMap1234 { - yyl470 := r.ReadMapStart() - if yyl470 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl470, d) - } - } else if yyct470 == codecSelferValueTypeArray1234 { - yyl470 := r.ReadArrayStart() - if yyl470 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl470, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys471Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys471Slc - var yyhl471 bool = l >= 0 - for yyj471 := 0; ; yyj471++ { - if yyhl471 { - if yyj471 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys471Slc = r.DecodeBytes(yys471Slc, true, true) - yys471 := string(yys471Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys471 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv474 := &x.ListMeta - yym475 := z.DecBinary() - _ = yym475 - if false { - } else if z.HasExtensions() && z.DecExt(yyv474) { - } else { - z.DecFallback(yyv474, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv476 := &x.Items - yym477 := z.DecBinary() - _ = yym477 - if false { - } else { - h.decSlicePersistentVolume((*[]PersistentVolume)(yyv476), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys471) - } // end switch yys471 - } // end for yyj471 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj478 int - var yyb478 bool - var yyhl478 bool = l >= 0 - yyj478++ - if yyhl478 { - yyb478 = yyj478 > l - } else { - yyb478 = r.CheckBreak() - } - if yyb478 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj478++ - if yyhl478 { - yyb478 = yyj478 > l - } else { - yyb478 = r.CheckBreak() - } - if yyb478 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj478++ - if yyhl478 { - yyb478 = yyj478 > l - } else { - yyb478 = r.CheckBreak() - } - if yyb478 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv481 := &x.ListMeta - yym482 := z.DecBinary() - _ = yym482 - if false { - } else if z.HasExtensions() && z.DecExt(yyv481) { - } else { - z.DecFallback(yyv481, false) - } - } - yyj478++ - if yyhl478 { - yyb478 = yyj478 > l - } else { - yyb478 = r.CheckBreak() - } - if yyb478 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv483 := &x.Items - yym484 := z.DecBinary() - _ = yym484 - if false { - } else { - h.decSlicePersistentVolume((*[]PersistentVolume)(yyv483), d) - } - } - for { - yyj478++ - if yyhl478 { - yyb478 = yyj478 > l - } else { - yyb478 = r.CheckBreak() - } - if yyb478 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj478-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaim) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym485 := z.EncBinary() - _ = yym485 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep486 := !z.EncBinary() - yy2arr486 := z.EncBasicHandle().StructToArray - var yyq486 [5]bool - _, _, _ = yysep486, yyq486, yy2arr486 - const yyr486 bool = false - yyq486[0] = x.Kind != "" - yyq486[1] = x.APIVersion != "" - yyq486[2] = true - yyq486[3] = true - yyq486[4] = true - var yynn486 int - if yyr486 || yy2arr486 { - r.EncodeArrayStart(5) - } else { - yynn486 = 0 - for _, b := range yyq486 { - if b { - yynn486++ - } - } - r.EncodeMapStart(yynn486) - yynn486 = 0 - } - if yyr486 || yy2arr486 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq486[0] { - yym488 := z.EncBinary() - _ = yym488 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq486[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym489 := z.EncBinary() - _ = yym489 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr486 || yy2arr486 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq486[1] { - yym491 := z.EncBinary() - _ = yym491 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq486[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym492 := z.EncBinary() - _ = yym492 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr486 || yy2arr486 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq486[2] { - yy494 := &x.ObjectMeta - yy494.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq486[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy495 := &x.ObjectMeta - yy495.CodecEncodeSelf(e) - } - } - if yyr486 || yy2arr486 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq486[3] { - yy497 := &x.Spec - yy497.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq486[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy498 := &x.Spec - yy498.CodecEncodeSelf(e) - } - } - if yyr486 || yy2arr486 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq486[4] { - yy500 := &x.Status - yy500.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq486[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy501 := &x.Status - yy501.CodecEncodeSelf(e) - } - } - if yyr486 || yy2arr486 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaim) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym502 := z.DecBinary() - _ = yym502 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct503 := r.ContainerType() - if yyct503 == codecSelferValueTypeMap1234 { - yyl503 := r.ReadMapStart() - if yyl503 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl503, d) - } - } else if yyct503 == codecSelferValueTypeArray1234 { - yyl503 := r.ReadArrayStart() - if yyl503 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl503, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaim) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys504Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys504Slc - var yyhl504 bool = l >= 0 - for yyj504 := 0; ; yyj504++ { - if yyhl504 { - if yyj504 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys504Slc = r.DecodeBytes(yys504Slc, true, true) - yys504 := string(yys504Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys504 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv507 := &x.ObjectMeta - yyv507.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PersistentVolumeClaimSpec{} - } else { - yyv508 := &x.Spec - yyv508.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PersistentVolumeClaimStatus{} - } else { - yyv509 := &x.Status - yyv509.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys504) - } // end switch yys504 - } // end for yyj504 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaim) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj510 int - var yyb510 bool - var yyhl510 bool = l >= 0 - yyj510++ - if yyhl510 { - yyb510 = yyj510 > l - } else { - yyb510 = r.CheckBreak() - } - if yyb510 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj510++ - if yyhl510 { - yyb510 = yyj510 > l - } else { - yyb510 = r.CheckBreak() - } - if yyb510 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj510++ - if yyhl510 { - yyb510 = yyj510 > l - } else { - yyb510 = r.CheckBreak() - } - if yyb510 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv513 := &x.ObjectMeta - yyv513.CodecDecodeSelf(d) - } - yyj510++ - if yyhl510 { - yyb510 = yyj510 > l - } else { - yyb510 = r.CheckBreak() - } - if yyb510 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PersistentVolumeClaimSpec{} - } else { - yyv514 := &x.Spec - yyv514.CodecDecodeSelf(d) - } - yyj510++ - if yyhl510 { - yyb510 = yyj510 > l - } else { - yyb510 = r.CheckBreak() - } - if yyb510 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PersistentVolumeClaimStatus{} - } else { - yyv515 := &x.Status - yyv515.CodecDecodeSelf(d) - } - for { - yyj510++ - if yyhl510 { - yyb510 = yyj510 > l - } else { - yyb510 = r.CheckBreak() - } - if yyb510 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj510-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaimList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym516 := z.EncBinary() - _ = yym516 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep517 := !z.EncBinary() - yy2arr517 := z.EncBasicHandle().StructToArray - var yyq517 [4]bool - _, _, _ = yysep517, yyq517, yy2arr517 - const yyr517 bool = false - yyq517[0] = x.Kind != "" - yyq517[1] = x.APIVersion != "" - yyq517[2] = true - var yynn517 int - if yyr517 || yy2arr517 { - r.EncodeArrayStart(4) - } else { - yynn517 = 1 - for _, b := range yyq517 { - if b { - yynn517++ - } - } - r.EncodeMapStart(yynn517) - yynn517 = 0 - } - if yyr517 || yy2arr517 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq517[0] { - yym519 := z.EncBinary() - _ = yym519 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq517[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym520 := z.EncBinary() - _ = yym520 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr517 || yy2arr517 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq517[1] { - yym522 := z.EncBinary() - _ = yym522 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq517[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym523 := z.EncBinary() - _ = yym523 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr517 || yy2arr517 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq517[2] { - yy525 := &x.ListMeta - yym526 := z.EncBinary() - _ = yym526 - if false { - } else if z.HasExtensions() && z.EncExt(yy525) { - } else { - z.EncFallback(yy525) - } - } else { - r.EncodeNil() - } - } else { - if yyq517[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy527 := &x.ListMeta - yym528 := z.EncBinary() - _ = yym528 - if false { - } else if z.HasExtensions() && z.EncExt(yy527) { - } else { - z.EncFallback(yy527) - } - } - } - if yyr517 || yy2arr517 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym530 := z.EncBinary() - _ = yym530 - if false { - } else { - h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym531 := z.EncBinary() - _ = yym531 - if false { - } else { - h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e) - } - } - } - if yyr517 || yy2arr517 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaimList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym532 := z.DecBinary() - _ = yym532 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct533 := r.ContainerType() - if yyct533 == codecSelferValueTypeMap1234 { - yyl533 := r.ReadMapStart() - if yyl533 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl533, d) - } - } else if yyct533 == codecSelferValueTypeArray1234 { - yyl533 := r.ReadArrayStart() - if yyl533 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl533, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaimList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys534Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys534Slc - var yyhl534 bool = l >= 0 - for yyj534 := 0; ; yyj534++ { - if yyhl534 { - if yyj534 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys534Slc = r.DecodeBytes(yys534Slc, true, true) - yys534 := string(yys534Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys534 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv537 := &x.ListMeta - yym538 := z.DecBinary() - _ = yym538 - if false { - } else if z.HasExtensions() && z.DecExt(yyv537) { - } else { - z.DecFallback(yyv537, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv539 := &x.Items - yym540 := z.DecBinary() - _ = yym540 - if false { - } else { - h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv539), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys534) - } // end switch yys534 - } // end for yyj534 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaimList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj541 int - var yyb541 bool - var yyhl541 bool = l >= 0 - yyj541++ - if yyhl541 { - yyb541 = yyj541 > l - } else { - yyb541 = r.CheckBreak() - } - if yyb541 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj541++ - if yyhl541 { - yyb541 = yyj541 > l - } else { - yyb541 = r.CheckBreak() - } - if yyb541 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj541++ - if yyhl541 { - yyb541 = yyj541 > l - } else { - yyb541 = r.CheckBreak() - } - if yyb541 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv544 := &x.ListMeta - yym545 := z.DecBinary() - _ = yym545 - if false { - } else if z.HasExtensions() && z.DecExt(yyv544) { - } else { - z.DecFallback(yyv544, false) - } - } - yyj541++ - if yyhl541 { - yyb541 = yyj541 > l - } else { - yyb541 = r.CheckBreak() - } - if yyb541 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv546 := &x.Items - yym547 := z.DecBinary() - _ = yym547 - if false { - } else { - h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv546), d) - } - } - for { - yyj541++ - if yyhl541 { - yyb541 = yyj541 > l - } else { - yyb541 = r.CheckBreak() - } - if yyb541 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj541-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaimSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym548 := z.EncBinary() - _ = yym548 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep549 := !z.EncBinary() - yy2arr549 := z.EncBasicHandle().StructToArray - var yyq549 [4]bool - _, _, _ = yysep549, yyq549, yy2arr549 - const yyr549 bool = false - yyq549[0] = len(x.AccessModes) != 0 - yyq549[1] = x.Selector != nil - yyq549[2] = true - yyq549[3] = x.VolumeName != "" - var yynn549 int - if yyr549 || yy2arr549 { - r.EncodeArrayStart(4) - } else { - yynn549 = 0 - for _, b := range yyq549 { - if b { - yynn549++ - } - } - r.EncodeMapStart(yynn549) - yynn549 = 0 - } - if yyr549 || yy2arr549 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq549[0] { - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym551 := z.EncBinary() - _ = yym551 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq549[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("accessModes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym552 := z.EncBinary() - _ = yym552 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } - } - if yyr549 || yy2arr549 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq549[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym554 := z.EncBinary() - _ = yym554 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq549[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym555 := z.EncBinary() - _ = yym555 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr549 || yy2arr549 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq549[2] { - yy557 := &x.Resources - yy557.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq549[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resources")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy558 := &x.Resources - yy558.CodecEncodeSelf(e) - } - } - if yyr549 || yy2arr549 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq549[3] { - yym560 := z.EncBinary() - _ = yym560 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq549[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym561 := z.EncBinary() - _ = yym561 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) - } - } - } - if yyr549 || yy2arr549 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaimSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym562 := z.DecBinary() - _ = yym562 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct563 := r.ContainerType() - if yyct563 == codecSelferValueTypeMap1234 { - yyl563 := r.ReadMapStart() - if yyl563 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl563, d) - } - } else if yyct563 == codecSelferValueTypeArray1234 { - yyl563 := r.ReadArrayStart() - if yyl563 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl563, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys564Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys564Slc - var yyhl564 bool = l >= 0 - for yyj564 := 0; ; yyj564++ { - if yyhl564 { - if yyj564 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys564Slc = r.DecodeBytes(yys564Slc, true, true) - yys564 := string(yys564Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys564 { - case "accessModes": - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv565 := &x.AccessModes - yym566 := z.DecBinary() - _ = yym566 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv565), d) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg2_unversioned.LabelSelector) - } - yym568 := z.DecBinary() - _ = yym568 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "resources": - if r.TryDecodeAsNil() { - x.Resources = ResourceRequirements{} - } else { - yyv569 := &x.Resources - yyv569.CodecDecodeSelf(d) - } - case "volumeName": - if r.TryDecodeAsNil() { - x.VolumeName = "" - } else { - x.VolumeName = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys564) - } // end switch yys564 - } // end for yyj564 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj571 int - var yyb571 bool - var yyhl571 bool = l >= 0 - yyj571++ - if yyhl571 { - yyb571 = yyj571 > l - } else { - yyb571 = r.CheckBreak() - } - if yyb571 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv572 := &x.AccessModes - yym573 := z.DecBinary() - _ = yym573 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv572), d) - } - } - yyj571++ - if yyhl571 { - yyb571 = yyj571 > l - } else { - yyb571 = r.CheckBreak() - } - if yyb571 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg2_unversioned.LabelSelector) - } - yym575 := z.DecBinary() - _ = yym575 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj571++ - if yyhl571 { - yyb571 = yyj571 > l - } else { - yyb571 = r.CheckBreak() - } - if yyb571 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resources = ResourceRequirements{} - } else { - yyv576 := &x.Resources - yyv576.CodecDecodeSelf(d) - } - yyj571++ - if yyhl571 { - yyb571 = yyj571 > l - } else { - yyb571 = r.CheckBreak() - } - if yyb571 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeName = "" - } else { - x.VolumeName = string(r.DecodeString()) - } - for { - yyj571++ - if yyhl571 { - yyb571 = yyj571 > l - } else { - yyb571 = r.CheckBreak() - } - if yyb571 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj571-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaimStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym578 := z.EncBinary() - _ = yym578 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep579 := !z.EncBinary() - yy2arr579 := z.EncBasicHandle().StructToArray - var yyq579 [3]bool - _, _, _ = yysep579, yyq579, yy2arr579 - const yyr579 bool = false - yyq579[0] = x.Phase != "" - yyq579[1] = len(x.AccessModes) != 0 - yyq579[2] = len(x.Capacity) != 0 - var yynn579 int - if yyr579 || yy2arr579 { - r.EncodeArrayStart(3) - } else { - yynn579 = 0 - for _, b := range yyq579 { - if b { - yynn579++ - } - } - r.EncodeMapStart(yynn579) - yynn579 = 0 - } - if yyr579 || yy2arr579 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq579[0] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq579[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr579 || yy2arr579 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq579[1] { - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym582 := z.EncBinary() - _ = yym582 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq579[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("accessModes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym583 := z.EncBinary() - _ = yym583 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } - } - if yyr579 || yy2arr579 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq579[2] { - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq579[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capacity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } - } - if yyr579 || yy2arr579 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaimStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym585 := z.DecBinary() - _ = yym585 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct586 := r.ContainerType() - if yyct586 == codecSelferValueTypeMap1234 { - yyl586 := r.ReadMapStart() - if yyl586 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl586, d) - } - } else if yyct586 == codecSelferValueTypeArray1234 { - yyl586 := r.ReadArrayStart() - if yyl586 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl586, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys587Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys587Slc - var yyhl587 bool = l >= 0 - for yyj587 := 0; ; yyj587++ { - if yyhl587 { - if yyj587 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys587Slc = r.DecodeBytes(yys587Slc, true, true) - yys587 := string(yys587Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys587 { - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = PersistentVolumeClaimPhase(r.DecodeString()) - } - case "accessModes": - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv589 := &x.AccessModes - yym590 := z.DecBinary() - _ = yym590 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv589), d) - } - } - case "capacity": - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv591 := &x.Capacity - yyv591.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys587) - } // end switch yys587 - } // end for yyj587 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj592 int - var yyb592 bool - var yyhl592 bool = l >= 0 - yyj592++ - if yyhl592 { - yyb592 = yyj592 > l - } else { - yyb592 = r.CheckBreak() - } - if yyb592 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = PersistentVolumeClaimPhase(r.DecodeString()) - } - yyj592++ - if yyhl592 { - yyb592 = yyj592 > l - } else { - yyb592 = r.CheckBreak() - } - if yyb592 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv594 := &x.AccessModes - yym595 := z.DecBinary() - _ = yym595 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv594), d) - } - } - yyj592++ - if yyhl592 { - yyb592 = yyj592 > l - } else { - yyb592 = r.CheckBreak() - } - if yyb592 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv596 := &x.Capacity - yyv596.CodecDecodeSelf(d) - } - for { - yyj592++ - if yyhl592 { - yyb592 = yyj592 > l - } else { - yyb592 = r.CheckBreak() - } - if yyb592 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj592-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x PersistentVolumeAccessMode) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym597 := z.EncBinary() - _ = yym597 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PersistentVolumeAccessMode) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym598 := z.DecBinary() - _ = yym598 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x PersistentVolumePhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym599 := z.EncBinary() - _ = yym599 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PersistentVolumePhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym600 := z.DecBinary() - _ = yym600 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x PersistentVolumeClaimPhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym601 := z.EncBinary() - _ = yym601 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PersistentVolumeClaimPhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym602 := z.DecBinary() - _ = yym602 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *HostPathVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym603 := z.EncBinary() - _ = yym603 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep604 := !z.EncBinary() - yy2arr604 := z.EncBasicHandle().StructToArray - var yyq604 [1]bool - _, _, _ = yysep604, yyq604, yy2arr604 - const yyr604 bool = false - var yynn604 int - if yyr604 || yy2arr604 { - r.EncodeArrayStart(1) - } else { - yynn604 = 1 - for _, b := range yyq604 { - if b { - yynn604++ - } - } - r.EncodeMapStart(yynn604) - yynn604 = 0 - } - if yyr604 || yy2arr604 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym606 := z.EncBinary() - _ = yym606 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym607 := z.EncBinary() - _ = yym607 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr604 || yy2arr604 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HostPathVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym608 := z.DecBinary() - _ = yym608 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct609 := r.ContainerType() - if yyct609 == codecSelferValueTypeMap1234 { - yyl609 := r.ReadMapStart() - if yyl609 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl609, d) - } - } else if yyct609 == codecSelferValueTypeArray1234 { - yyl609 := r.ReadArrayStart() - if yyl609 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl609, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HostPathVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys610Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys610Slc - var yyhl610 bool = l >= 0 - for yyj610 := 0; ; yyj610++ { - if yyhl610 { - if yyj610 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys610Slc = r.DecodeBytes(yys610Slc, true, true) - yys610 := string(yys610Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys610 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys610) - } // end switch yys610 - } // end for yyj610 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HostPathVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj612 int - var yyb612 bool - var yyhl612 bool = l >= 0 - yyj612++ - if yyhl612 { - yyb612 = yyj612 > l - } else { - yyb612 = r.CheckBreak() - } - if yyb612 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - for { - yyj612++ - if yyhl612 { - yyb612 = yyj612 > l - } else { - yyb612 = r.CheckBreak() - } - if yyb612 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj612-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EmptyDirVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym614 := z.EncBinary() - _ = yym614 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep615 := !z.EncBinary() - yy2arr615 := z.EncBasicHandle().StructToArray - var yyq615 [1]bool - _, _, _ = yysep615, yyq615, yy2arr615 - const yyr615 bool = false - yyq615[0] = x.Medium != "" - var yynn615 int - if yyr615 || yy2arr615 { - r.EncodeArrayStart(1) - } else { - yynn615 = 0 - for _, b := range yyq615 { - if b { - yynn615++ - } - } - r.EncodeMapStart(yynn615) - yynn615 = 0 - } - if yyr615 || yy2arr615 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq615[0] { - x.Medium.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq615[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("medium")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Medium.CodecEncodeSelf(e) - } - } - if yyr615 || yy2arr615 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EmptyDirVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym617 := z.DecBinary() - _ = yym617 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct618 := r.ContainerType() - if yyct618 == codecSelferValueTypeMap1234 { - yyl618 := r.ReadMapStart() - if yyl618 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl618, d) - } - } else if yyct618 == codecSelferValueTypeArray1234 { - yyl618 := r.ReadArrayStart() - if yyl618 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl618, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EmptyDirVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys619Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys619Slc - var yyhl619 bool = l >= 0 - for yyj619 := 0; ; yyj619++ { - if yyhl619 { - if yyj619 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys619Slc = r.DecodeBytes(yys619Slc, true, true) - yys619 := string(yys619Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys619 { - case "medium": - if r.TryDecodeAsNil() { - x.Medium = "" - } else { - x.Medium = StorageMedium(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys619) - } // end switch yys619 - } // end for yyj619 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EmptyDirVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj621 int - var yyb621 bool - var yyhl621 bool = l >= 0 - yyj621++ - if yyhl621 { - yyb621 = yyj621 > l - } else { - yyb621 = r.CheckBreak() - } - if yyb621 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Medium = "" - } else { - x.Medium = StorageMedium(r.DecodeString()) - } - for { - yyj621++ - if yyhl621 { - yyb621 = yyj621 > l - } else { - yyb621 = r.CheckBreak() - } - if yyb621 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj621-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x StorageMedium) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym623 := z.EncBinary() - _ = yym623 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *StorageMedium) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym624 := z.DecBinary() - _ = yym624 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x Protocol) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym625 := z.EncBinary() - _ = yym625 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *Protocol) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym626 := z.DecBinary() - _ = yym626 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *GCEPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym627 := z.EncBinary() - _ = yym627 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep628 := !z.EncBinary() - yy2arr628 := z.EncBasicHandle().StructToArray - var yyq628 [4]bool - _, _, _ = yysep628, yyq628, yy2arr628 - const yyr628 bool = false - yyq628[1] = x.FSType != "" - yyq628[2] = x.Partition != 0 - yyq628[3] = x.ReadOnly != false - var yynn628 int - if yyr628 || yy2arr628 { - r.EncodeArrayStart(4) - } else { - yynn628 = 1 - for _, b := range yyq628 { - if b { - yynn628++ - } - } - r.EncodeMapStart(yynn628) - yynn628 = 0 - } - if yyr628 || yy2arr628 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym630 := z.EncBinary() - _ = yym630 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PDName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("pdName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym631 := z.EncBinary() - _ = yym631 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PDName)) - } - } - if yyr628 || yy2arr628 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq628[1] { - yym633 := z.EncBinary() - _ = yym633 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq628[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym634 := z.EncBinary() - _ = yym634 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr628 || yy2arr628 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq628[2] { - yym636 := z.EncBinary() - _ = yym636 - if false { - } else { - r.EncodeInt(int64(x.Partition)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq628[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("partition")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym637 := z.EncBinary() - _ = yym637 - if false { - } else { - r.EncodeInt(int64(x.Partition)) - } - } - } - if yyr628 || yy2arr628 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq628[3] { - yym639 := z.EncBinary() - _ = yym639 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq628[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym640 := z.EncBinary() - _ = yym640 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr628 || yy2arr628 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *GCEPersistentDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym641 := z.DecBinary() - _ = yym641 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct642 := r.ContainerType() - if yyct642 == codecSelferValueTypeMap1234 { - yyl642 := r.ReadMapStart() - if yyl642 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl642, d) - } - } else if yyct642 == codecSelferValueTypeArray1234 { - yyl642 := r.ReadArrayStart() - if yyl642 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl642, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys643Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys643Slc - var yyhl643 bool = l >= 0 - for yyj643 := 0; ; yyj643++ { - if yyhl643 { - if yyj643 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys643Slc = r.DecodeBytes(yys643Slc, true, true) - yys643 := string(yys643Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys643 { - case "pdName": - if r.TryDecodeAsNil() { - x.PDName = "" - } else { - x.PDName = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "partition": - if r.TryDecodeAsNil() { - x.Partition = 0 - } else { - x.Partition = int32(r.DecodeInt(32)) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys643) - } // end switch yys643 - } // end for yyj643 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj648 int - var yyb648 bool - var yyhl648 bool = l >= 0 - yyj648++ - if yyhl648 { - yyb648 = yyj648 > l - } else { - yyb648 = r.CheckBreak() - } - if yyb648 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PDName = "" - } else { - x.PDName = string(r.DecodeString()) - } - yyj648++ - if yyhl648 { - yyb648 = yyj648 > l - } else { - yyb648 = r.CheckBreak() - } - if yyb648 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj648++ - if yyhl648 { - yyb648 = yyj648 > l - } else { - yyb648 = r.CheckBreak() - } - if yyb648 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Partition = 0 - } else { - x.Partition = int32(r.DecodeInt(32)) - } - yyj648++ - if yyhl648 { - yyb648 = yyj648 > l - } else { - yyb648 = r.CheckBreak() - } - if yyb648 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj648++ - if yyhl648 { - yyb648 = yyj648 > l - } else { - yyb648 = r.CheckBreak() - } - if yyb648 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj648-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ISCSIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym653 := z.EncBinary() - _ = yym653 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep654 := !z.EncBinary() - yy2arr654 := z.EncBasicHandle().StructToArray - var yyq654 [6]bool - _, _, _ = yysep654, yyq654, yy2arr654 - const yyr654 bool = false - yyq654[0] = x.TargetPortal != "" - yyq654[1] = x.IQN != "" - yyq654[2] = x.Lun != 0 - yyq654[3] = x.ISCSIInterface != "" - yyq654[4] = x.FSType != "" - yyq654[5] = x.ReadOnly != false - var yynn654 int - if yyr654 || yy2arr654 { - r.EncodeArrayStart(6) - } else { - yynn654 = 0 - for _, b := range yyq654 { - if b { - yynn654++ - } - } - r.EncodeMapStart(yynn654) - yynn654 = 0 - } - if yyr654 || yy2arr654 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq654[0] { - yym656 := z.EncBinary() - _ = yym656 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq654[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetPortal")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym657 := z.EncBinary() - _ = yym657 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal)) - } - } - } - if yyr654 || yy2arr654 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq654[1] { - yym659 := z.EncBinary() - _ = yym659 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IQN)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq654[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iqn")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym660 := z.EncBinary() - _ = yym660 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IQN)) - } - } - } - if yyr654 || yy2arr654 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq654[2] { - yym662 := z.EncBinary() - _ = yym662 - if false { - } else { - r.EncodeInt(int64(x.Lun)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq654[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lun")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym663 := z.EncBinary() - _ = yym663 - if false { - } else { - r.EncodeInt(int64(x.Lun)) - } - } - } - if yyr654 || yy2arr654 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq654[3] { - yym665 := z.EncBinary() - _ = yym665 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq654[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsiInterface")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym666 := z.EncBinary() - _ = yym666 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface)) - } - } - } - if yyr654 || yy2arr654 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq654[4] { - yym668 := z.EncBinary() - _ = yym668 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq654[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym669 := z.EncBinary() - _ = yym669 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr654 || yy2arr654 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq654[5] { - yym671 := z.EncBinary() - _ = yym671 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq654[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym672 := z.EncBinary() - _ = yym672 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr654 || yy2arr654 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ISCSIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym673 := z.DecBinary() - _ = yym673 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct674 := r.ContainerType() - if yyct674 == codecSelferValueTypeMap1234 { - yyl674 := r.ReadMapStart() - if yyl674 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl674, d) - } - } else if yyct674 == codecSelferValueTypeArray1234 { - yyl674 := r.ReadArrayStart() - if yyl674 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl674, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ISCSIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys675Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys675Slc - var yyhl675 bool = l >= 0 - for yyj675 := 0; ; yyj675++ { - if yyhl675 { - if yyj675 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys675Slc = r.DecodeBytes(yys675Slc, true, true) - yys675 := string(yys675Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys675 { - case "targetPortal": - if r.TryDecodeAsNil() { - x.TargetPortal = "" - } else { - x.TargetPortal = string(r.DecodeString()) - } - case "iqn": - if r.TryDecodeAsNil() { - x.IQN = "" - } else { - x.IQN = string(r.DecodeString()) - } - case "lun": - if r.TryDecodeAsNil() { - x.Lun = 0 - } else { - x.Lun = int32(r.DecodeInt(32)) - } - case "iscsiInterface": - if r.TryDecodeAsNil() { - x.ISCSIInterface = "" - } else { - x.ISCSIInterface = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys675) - } // end switch yys675 - } // end for yyj675 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj682 int - var yyb682 bool - var yyhl682 bool = l >= 0 - yyj682++ - if yyhl682 { - yyb682 = yyj682 > l - } else { - yyb682 = r.CheckBreak() - } - if yyb682 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetPortal = "" - } else { - x.TargetPortal = string(r.DecodeString()) - } - yyj682++ - if yyhl682 { - yyb682 = yyj682 > l - } else { - yyb682 = r.CheckBreak() - } - if yyb682 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IQN = "" - } else { - x.IQN = string(r.DecodeString()) - } - yyj682++ - if yyhl682 { - yyb682 = yyj682 > l - } else { - yyb682 = r.CheckBreak() - } - if yyb682 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Lun = 0 - } else { - x.Lun = int32(r.DecodeInt(32)) - } - yyj682++ - if yyhl682 { - yyb682 = yyj682 > l - } else { - yyb682 = r.CheckBreak() - } - if yyb682 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ISCSIInterface = "" - } else { - x.ISCSIInterface = string(r.DecodeString()) - } - yyj682++ - if yyhl682 { - yyb682 = yyj682 > l - } else { - yyb682 = r.CheckBreak() - } - if yyb682 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj682++ - if yyhl682 { - yyb682 = yyj682 > l - } else { - yyb682 = r.CheckBreak() - } - if yyb682 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj682++ - if yyhl682 { - yyb682 = yyj682 > l - } else { - yyb682 = r.CheckBreak() - } - if yyb682 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj682-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *FCVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym689 := z.EncBinary() - _ = yym689 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep690 := !z.EncBinary() - yy2arr690 := z.EncBasicHandle().StructToArray - var yyq690 [4]bool - _, _, _ = yysep690, yyq690, yy2arr690 - const yyr690 bool = false - yyq690[2] = x.FSType != "" - yyq690[3] = x.ReadOnly != false - var yynn690 int - if yyr690 || yy2arr690 { - r.EncodeArrayStart(4) - } else { - yynn690 = 2 - for _, b := range yyq690 { - if b { - yynn690++ - } - } - r.EncodeMapStart(yynn690) - yynn690 = 0 - } - if yyr690 || yy2arr690 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.TargetWWNs == nil { - r.EncodeNil() - } else { - yym692 := z.EncBinary() - _ = yym692 - if false { - } else { - z.F.EncSliceStringV(x.TargetWWNs, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetWWNs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TargetWWNs == nil { - r.EncodeNil() - } else { - yym693 := z.EncBinary() - _ = yym693 - if false { - } else { - z.F.EncSliceStringV(x.TargetWWNs, false, e) - } - } - } - if yyr690 || yy2arr690 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Lun == nil { - r.EncodeNil() - } else { - yy695 := *x.Lun - yym696 := z.EncBinary() - _ = yym696 - if false { - } else { - r.EncodeInt(int64(yy695)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lun")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Lun == nil { - r.EncodeNil() - } else { - yy697 := *x.Lun - yym698 := z.EncBinary() - _ = yym698 - if false { - } else { - r.EncodeInt(int64(yy697)) - } - } - } - if yyr690 || yy2arr690 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq690[2] { - yym700 := z.EncBinary() - _ = yym700 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq690[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym701 := z.EncBinary() - _ = yym701 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr690 || yy2arr690 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq690[3] { - yym703 := z.EncBinary() - _ = yym703 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq690[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym704 := z.EncBinary() - _ = yym704 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr690 || yy2arr690 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *FCVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym705 := z.DecBinary() - _ = yym705 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct706 := r.ContainerType() - if yyct706 == codecSelferValueTypeMap1234 { - yyl706 := r.ReadMapStart() - if yyl706 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl706, d) - } - } else if yyct706 == codecSelferValueTypeArray1234 { - yyl706 := r.ReadArrayStart() - if yyl706 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl706, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *FCVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys707Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys707Slc - var yyhl707 bool = l >= 0 - for yyj707 := 0; ; yyj707++ { - if yyhl707 { - if yyj707 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys707Slc = r.DecodeBytes(yys707Slc, true, true) - yys707 := string(yys707Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys707 { - case "targetWWNs": - if r.TryDecodeAsNil() { - x.TargetWWNs = nil - } else { - yyv708 := &x.TargetWWNs - yym709 := z.DecBinary() - _ = yym709 - if false { - } else { - z.F.DecSliceStringX(yyv708, false, d) - } - } - case "lun": - if r.TryDecodeAsNil() { - if x.Lun != nil { - x.Lun = nil - } - } else { - if x.Lun == nil { - x.Lun = new(int32) - } - yym711 := z.DecBinary() - _ = yym711 - if false { - } else { - *((*int32)(x.Lun)) = int32(r.DecodeInt(32)) - } - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys707) - } // end switch yys707 - } // end for yyj707 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *FCVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj714 int - var yyb714 bool - var yyhl714 bool = l >= 0 - yyj714++ - if yyhl714 { - yyb714 = yyj714 > l - } else { - yyb714 = r.CheckBreak() - } - if yyb714 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetWWNs = nil - } else { - yyv715 := &x.TargetWWNs - yym716 := z.DecBinary() - _ = yym716 - if false { - } else { - z.F.DecSliceStringX(yyv715, false, d) - } - } - yyj714++ - if yyhl714 { - yyb714 = yyj714 > l - } else { - yyb714 = r.CheckBreak() - } - if yyb714 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Lun != nil { - x.Lun = nil - } - } else { - if x.Lun == nil { - x.Lun = new(int32) - } - yym718 := z.DecBinary() - _ = yym718 - if false { - } else { - *((*int32)(x.Lun)) = int32(r.DecodeInt(32)) - } - } - yyj714++ - if yyhl714 { - yyb714 = yyj714 > l - } else { - yyb714 = r.CheckBreak() - } - if yyb714 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj714++ - if yyhl714 { - yyb714 = yyj714 > l - } else { - yyb714 = r.CheckBreak() - } - if yyb714 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj714++ - if yyhl714 { - yyb714 = yyj714 > l - } else { - yyb714 = r.CheckBreak() - } - if yyb714 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj714-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *FlexVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym721 := z.EncBinary() - _ = yym721 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep722 := !z.EncBinary() - yy2arr722 := z.EncBasicHandle().StructToArray - var yyq722 [5]bool - _, _, _ = yysep722, yyq722, yy2arr722 - const yyr722 bool = false - yyq722[1] = x.FSType != "" - yyq722[2] = x.SecretRef != nil - yyq722[3] = x.ReadOnly != false - yyq722[4] = len(x.Options) != 0 - var yynn722 int - if yyr722 || yy2arr722 { - r.EncodeArrayStart(5) - } else { - yynn722 = 1 - for _, b := range yyq722 { - if b { - yynn722++ - } - } - r.EncodeMapStart(yynn722) - yynn722 = 0 - } - if yyr722 || yy2arr722 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym724 := z.EncBinary() - _ = yym724 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Driver)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("driver")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym725 := z.EncBinary() - _ = yym725 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Driver)) - } - } - if yyr722 || yy2arr722 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq722[1] { - yym727 := z.EncBinary() - _ = yym727 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq722[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym728 := z.EncBinary() - _ = yym728 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr722 || yy2arr722 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq722[2] { - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq722[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - } - if yyr722 || yy2arr722 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq722[3] { - yym731 := z.EncBinary() - _ = yym731 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq722[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym732 := z.EncBinary() - _ = yym732 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr722 || yy2arr722 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq722[4] { - if x.Options == nil { - r.EncodeNil() - } else { - yym734 := z.EncBinary() - _ = yym734 - if false { - } else { - z.F.EncMapStringStringV(x.Options, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq722[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("options")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Options == nil { - r.EncodeNil() - } else { - yym735 := z.EncBinary() - _ = yym735 - if false { - } else { - z.F.EncMapStringStringV(x.Options, false, e) - } - } - } - } - if yyr722 || yy2arr722 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *FlexVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym736 := z.DecBinary() - _ = yym736 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct737 := r.ContainerType() - if yyct737 == codecSelferValueTypeMap1234 { - yyl737 := r.ReadMapStart() - if yyl737 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl737, d) - } - } else if yyct737 == codecSelferValueTypeArray1234 { - yyl737 := r.ReadArrayStart() - if yyl737 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl737, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *FlexVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys738Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys738Slc - var yyhl738 bool = l >= 0 - for yyj738 := 0; ; yyj738++ { - if yyhl738 { - if yyj738 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys738Slc = r.DecodeBytes(yys738Slc, true, true) - yys738 := string(yys738Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys738 { - case "driver": - if r.TryDecodeAsNil() { - x.Driver = "" - } else { - x.Driver = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - case "options": - if r.TryDecodeAsNil() { - x.Options = nil - } else { - yyv743 := &x.Options - yym744 := z.DecBinary() - _ = yym744 - if false { - } else { - z.F.DecMapStringStringX(yyv743, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys738) - } // end switch yys738 - } // end for yyj738 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *FlexVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj745 int - var yyb745 bool - var yyhl745 bool = l >= 0 - yyj745++ - if yyhl745 { - yyb745 = yyj745 > l - } else { - yyb745 = r.CheckBreak() - } - if yyb745 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Driver = "" - } else { - x.Driver = string(r.DecodeString()) - } - yyj745++ - if yyhl745 { - yyb745 = yyj745 > l - } else { - yyb745 = r.CheckBreak() - } - if yyb745 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj745++ - if yyhl745 { - yyb745 = yyj745 > l - } else { - yyb745 = r.CheckBreak() - } - if yyb745 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - yyj745++ - if yyhl745 { - yyb745 = yyj745 > l - } else { - yyb745 = r.CheckBreak() - } - if yyb745 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - yyj745++ - if yyhl745 { - yyb745 = yyj745 > l - } else { - yyb745 = r.CheckBreak() - } - if yyb745 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Options = nil - } else { - yyv750 := &x.Options - yym751 := z.DecBinary() - _ = yym751 - if false { - } else { - z.F.DecMapStringStringX(yyv750, false, d) - } - } - for { - yyj745++ - if yyhl745 { - yyb745 = yyj745 > l - } else { - yyb745 = r.CheckBreak() - } - if yyb745 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj745-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *AWSElasticBlockStoreVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym752 := z.EncBinary() - _ = yym752 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep753 := !z.EncBinary() - yy2arr753 := z.EncBasicHandle().StructToArray - var yyq753 [4]bool - _, _, _ = yysep753, yyq753, yy2arr753 - const yyr753 bool = false - yyq753[1] = x.FSType != "" - yyq753[2] = x.Partition != 0 - yyq753[3] = x.ReadOnly != false - var yynn753 int - if yyr753 || yy2arr753 { - r.EncodeArrayStart(4) - } else { - yynn753 = 1 - for _, b := range yyq753 { - if b { - yynn753++ - } - } - r.EncodeMapStart(yynn753) - yynn753 = 0 - } - if yyr753 || yy2arr753 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym755 := z.EncBinary() - _ = yym755 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym756 := z.EncBinary() - _ = yym756 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) - } - } - if yyr753 || yy2arr753 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq753[1] { - yym758 := z.EncBinary() - _ = yym758 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq753[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym759 := z.EncBinary() - _ = yym759 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr753 || yy2arr753 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq753[2] { - yym761 := z.EncBinary() - _ = yym761 - if false { - } else { - r.EncodeInt(int64(x.Partition)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq753[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("partition")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym762 := z.EncBinary() - _ = yym762 - if false { - } else { - r.EncodeInt(int64(x.Partition)) - } - } - } - if yyr753 || yy2arr753 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq753[3] { - yym764 := z.EncBinary() - _ = yym764 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq753[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym765 := z.EncBinary() - _ = yym765 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr753 || yy2arr753 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *AWSElasticBlockStoreVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym766 := z.DecBinary() - _ = yym766 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct767 := r.ContainerType() - if yyct767 == codecSelferValueTypeMap1234 { - yyl767 := r.ReadMapStart() - if yyl767 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl767, d) - } - } else if yyct767 == codecSelferValueTypeArray1234 { - yyl767 := r.ReadArrayStart() - if yyl767 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl767, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys768Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys768Slc - var yyhl768 bool = l >= 0 - for yyj768 := 0; ; yyj768++ { - if yyhl768 { - if yyj768 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys768Slc = r.DecodeBytes(yys768Slc, true, true) - yys768 := string(yys768Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys768 { - case "volumeID": - if r.TryDecodeAsNil() { - x.VolumeID = "" - } else { - x.VolumeID = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "partition": - if r.TryDecodeAsNil() { - x.Partition = 0 - } else { - x.Partition = int32(r.DecodeInt(32)) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys768) - } // end switch yys768 - } // end for yyj768 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj773 int - var yyb773 bool - var yyhl773 bool = l >= 0 - yyj773++ - if yyhl773 { - yyb773 = yyj773 > l - } else { - yyb773 = r.CheckBreak() - } - if yyb773 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeID = "" - } else { - x.VolumeID = string(r.DecodeString()) - } - yyj773++ - if yyhl773 { - yyb773 = yyj773 > l - } else { - yyb773 = r.CheckBreak() - } - if yyb773 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj773++ - if yyhl773 { - yyb773 = yyj773 > l - } else { - yyb773 = r.CheckBreak() - } - if yyb773 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Partition = 0 - } else { - x.Partition = int32(r.DecodeInt(32)) - } - yyj773++ - if yyhl773 { - yyb773 = yyj773 > l - } else { - yyb773 = r.CheckBreak() - } - if yyb773 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj773++ - if yyhl773 { - yyb773 = yyj773 > l - } else { - yyb773 = r.CheckBreak() - } - if yyb773 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj773-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *GitRepoVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym778 := z.EncBinary() - _ = yym778 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep779 := !z.EncBinary() - yy2arr779 := z.EncBasicHandle().StructToArray - var yyq779 [3]bool - _, _, _ = yysep779, yyq779, yy2arr779 - const yyr779 bool = false - yyq779[1] = x.Revision != "" - yyq779[2] = x.Directory != "" - var yynn779 int - if yyr779 || yy2arr779 { - r.EncodeArrayStart(3) - } else { - yynn779 = 1 - for _, b := range yyq779 { - if b { - yynn779++ - } - } - r.EncodeMapStart(yynn779) - yynn779 = 0 - } - if yyr779 || yy2arr779 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym781 := z.EncBinary() - _ = yym781 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Repository)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("repository")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym782 := z.EncBinary() - _ = yym782 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Repository)) - } - } - if yyr779 || yy2arr779 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq779[1] { - yym784 := z.EncBinary() - _ = yym784 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Revision)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq779[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("revision")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym785 := z.EncBinary() - _ = yym785 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Revision)) - } - } - } - if yyr779 || yy2arr779 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq779[2] { - yym787 := z.EncBinary() - _ = yym787 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Directory)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq779[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("directory")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym788 := z.EncBinary() - _ = yym788 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Directory)) - } - } - } - if yyr779 || yy2arr779 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *GitRepoVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym789 := z.DecBinary() - _ = yym789 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct790 := r.ContainerType() - if yyct790 == codecSelferValueTypeMap1234 { - yyl790 := r.ReadMapStart() - if yyl790 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl790, d) - } - } else if yyct790 == codecSelferValueTypeArray1234 { - yyl790 := r.ReadArrayStart() - if yyl790 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl790, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *GitRepoVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys791Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys791Slc - var yyhl791 bool = l >= 0 - for yyj791 := 0; ; yyj791++ { - if yyhl791 { - if yyj791 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys791Slc = r.DecodeBytes(yys791Slc, true, true) - yys791 := string(yys791Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys791 { - case "repository": - if r.TryDecodeAsNil() { - x.Repository = "" - } else { - x.Repository = string(r.DecodeString()) - } - case "revision": - if r.TryDecodeAsNil() { - x.Revision = "" - } else { - x.Revision = string(r.DecodeString()) - } - case "directory": - if r.TryDecodeAsNil() { - x.Directory = "" - } else { - x.Directory = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys791) - } // end switch yys791 - } // end for yyj791 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *GitRepoVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj795 int - var yyb795 bool - var yyhl795 bool = l >= 0 - yyj795++ - if yyhl795 { - yyb795 = yyj795 > l - } else { - yyb795 = r.CheckBreak() - } - if yyb795 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Repository = "" - } else { - x.Repository = string(r.DecodeString()) - } - yyj795++ - if yyhl795 { - yyb795 = yyj795 > l - } else { - yyb795 = r.CheckBreak() - } - if yyb795 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Revision = "" - } else { - x.Revision = string(r.DecodeString()) - } - yyj795++ - if yyhl795 { - yyb795 = yyj795 > l - } else { - yyb795 = r.CheckBreak() - } - if yyb795 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Directory = "" - } else { - x.Directory = string(r.DecodeString()) - } - for { - yyj795++ - if yyhl795 { - yyb795 = yyj795 > l - } else { - yyb795 = r.CheckBreak() - } - if yyb795 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj795-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SecretVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym799 := z.EncBinary() - _ = yym799 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep800 := !z.EncBinary() - yy2arr800 := z.EncBasicHandle().StructToArray - var yyq800 [3]bool - _, _, _ = yysep800, yyq800, yy2arr800 - const yyr800 bool = false - yyq800[0] = x.SecretName != "" - yyq800[1] = len(x.Items) != 0 - yyq800[2] = x.DefaultMode != nil - var yynn800 int - if yyr800 || yy2arr800 { - r.EncodeArrayStart(3) - } else { - yynn800 = 0 - for _, b := range yyq800 { - if b { - yynn800++ - } - } - r.EncodeMapStart(yynn800) - yynn800 = 0 - } - if yyr800 || yy2arr800 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq800[0] { - yym802 := z.EncBinary() - _ = yym802 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq800[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym803 := z.EncBinary() - _ = yym803 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } - } - if yyr800 || yy2arr800 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq800[1] { - if x.Items == nil { - r.EncodeNil() - } else { - yym805 := z.EncBinary() - _ = yym805 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq800[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym806 := z.EncBinary() - _ = yym806 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } - } - if yyr800 || yy2arr800 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq800[2] { - if x.DefaultMode == nil { - r.EncodeNil() - } else { - yy808 := *x.DefaultMode - yym809 := z.EncBinary() - _ = yym809 - if false { - } else { - r.EncodeInt(int64(yy808)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq800[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DefaultMode == nil { - r.EncodeNil() - } else { - yy810 := *x.DefaultMode - yym811 := z.EncBinary() - _ = yym811 - if false { - } else { - r.EncodeInt(int64(yy810)) - } - } - } - } - if yyr800 || yy2arr800 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SecretVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym812 := z.DecBinary() - _ = yym812 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct813 := r.ContainerType() - if yyct813 == codecSelferValueTypeMap1234 { - yyl813 := r.ReadMapStart() - if yyl813 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl813, d) - } - } else if yyct813 == codecSelferValueTypeArray1234 { - yyl813 := r.ReadArrayStart() - if yyl813 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl813, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SecretVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys814Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys814Slc - var yyhl814 bool = l >= 0 - for yyj814 := 0; ; yyj814++ { - if yyhl814 { - if yyj814 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys814Slc = r.DecodeBytes(yys814Slc, true, true) - yys814 := string(yys814Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys814 { - case "secretName": - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - x.SecretName = string(r.DecodeString()) - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv816 := &x.Items - yym817 := z.DecBinary() - _ = yym817 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv816), d) - } - } - case "defaultMode": - if r.TryDecodeAsNil() { - if x.DefaultMode != nil { - x.DefaultMode = nil - } - } else { - if x.DefaultMode == nil { - x.DefaultMode = new(int32) - } - yym819 := z.DecBinary() - _ = yym819 - if false { - } else { - *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys814) - } // end switch yys814 - } // end for yyj814 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SecretVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj820 int - var yyb820 bool - var yyhl820 bool = l >= 0 - yyj820++ - if yyhl820 { - yyb820 = yyj820 > l - } else { - yyb820 = r.CheckBreak() - } - if yyb820 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - x.SecretName = string(r.DecodeString()) - } - yyj820++ - if yyhl820 { - yyb820 = yyj820 > l - } else { - yyb820 = r.CheckBreak() - } - if yyb820 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv822 := &x.Items - yym823 := z.DecBinary() - _ = yym823 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv822), d) - } - } - yyj820++ - if yyhl820 { - yyb820 = yyj820 > l - } else { - yyb820 = r.CheckBreak() - } - if yyb820 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DefaultMode != nil { - x.DefaultMode = nil - } - } else { - if x.DefaultMode == nil { - x.DefaultMode = new(int32) - } - yym825 := z.DecBinary() - _ = yym825 - if false { - } else { - *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) - } - } - for { - yyj820++ - if yyhl820 { - yyb820 = yyj820 > l - } else { - yyb820 = r.CheckBreak() - } - if yyb820 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj820-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym826 := z.EncBinary() - _ = yym826 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep827 := !z.EncBinary() - yy2arr827 := z.EncBasicHandle().StructToArray - var yyq827 [3]bool - _, _, _ = yysep827, yyq827, yy2arr827 - const yyr827 bool = false - yyq827[2] = x.ReadOnly != false - var yynn827 int - if yyr827 || yy2arr827 { - r.EncodeArrayStart(3) - } else { - yynn827 = 2 - for _, b := range yyq827 { - if b { - yynn827++ - } - } - r.EncodeMapStart(yynn827) - yynn827 = 0 - } - if yyr827 || yy2arr827 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym829 := z.EncBinary() - _ = yym829 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Server)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("server")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym830 := z.EncBinary() - _ = yym830 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Server)) - } - } - if yyr827 || yy2arr827 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym832 := z.EncBinary() - _ = yym832 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym833 := z.EncBinary() - _ = yym833 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr827 || yy2arr827 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq827[2] { - yym835 := z.EncBinary() - _ = yym835 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq827[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym836 := z.EncBinary() - _ = yym836 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr827 || yy2arr827 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym837 := z.DecBinary() - _ = yym837 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct838 := r.ContainerType() - if yyct838 == codecSelferValueTypeMap1234 { - yyl838 := r.ReadMapStart() - if yyl838 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl838, d) - } - } else if yyct838 == codecSelferValueTypeArray1234 { - yyl838 := r.ReadArrayStart() - if yyl838 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl838, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys839Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys839Slc - var yyhl839 bool = l >= 0 - for yyj839 := 0; ; yyj839++ { - if yyhl839 { - if yyj839 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys839Slc = r.DecodeBytes(yys839Slc, true, true) - yys839 := string(yys839Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys839 { - case "server": - if r.TryDecodeAsNil() { - x.Server = "" - } else { - x.Server = string(r.DecodeString()) - } - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys839) - } // end switch yys839 - } // end for yyj839 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj843 int - var yyb843 bool - var yyhl843 bool = l >= 0 - yyj843++ - if yyhl843 { - yyb843 = yyj843 > l - } else { - yyb843 = r.CheckBreak() - } - if yyb843 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Server = "" - } else { - x.Server = string(r.DecodeString()) - } - yyj843++ - if yyhl843 { - yyb843 = yyj843 > l - } else { - yyb843 = r.CheckBreak() - } - if yyb843 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj843++ - if yyhl843 { - yyb843 = yyj843 > l - } else { - yyb843 = r.CheckBreak() - } - if yyb843 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj843++ - if yyhl843 { - yyb843 = yyj843 > l - } else { - yyb843 = r.CheckBreak() - } - if yyb843 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj843-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *QuobyteVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym847 := z.EncBinary() - _ = yym847 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep848 := !z.EncBinary() - yy2arr848 := z.EncBasicHandle().StructToArray - var yyq848 [5]bool - _, _, _ = yysep848, yyq848, yy2arr848 - const yyr848 bool = false - yyq848[2] = x.ReadOnly != false - yyq848[3] = x.User != "" - yyq848[4] = x.Group != "" - var yynn848 int - if yyr848 || yy2arr848 { - r.EncodeArrayStart(5) - } else { - yynn848 = 2 - for _, b := range yyq848 { - if b { - yynn848++ - } - } - r.EncodeMapStart(yynn848) - yynn848 = 0 - } - if yyr848 || yy2arr848 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym850 := z.EncBinary() - _ = yym850 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Registry)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("registry")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym851 := z.EncBinary() - _ = yym851 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Registry)) - } - } - if yyr848 || yy2arr848 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym853 := z.EncBinary() - _ = yym853 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Volume)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym854 := z.EncBinary() - _ = yym854 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Volume)) - } - } - if yyr848 || yy2arr848 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq848[2] { - yym856 := z.EncBinary() - _ = yym856 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq848[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym857 := z.EncBinary() - _ = yym857 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr848 || yy2arr848 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq848[3] { - yym859 := z.EncBinary() - _ = yym859 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq848[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("user")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym860 := z.EncBinary() - _ = yym860 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } - } - if yyr848 || yy2arr848 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq848[4] { - yym862 := z.EncBinary() - _ = yym862 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Group)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq848[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("group")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym863 := z.EncBinary() - _ = yym863 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Group)) - } - } - } - if yyr848 || yy2arr848 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *QuobyteVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym864 := z.DecBinary() - _ = yym864 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct865 := r.ContainerType() - if yyct865 == codecSelferValueTypeMap1234 { - yyl865 := r.ReadMapStart() - if yyl865 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl865, d) - } - } else if yyct865 == codecSelferValueTypeArray1234 { - yyl865 := r.ReadArrayStart() - if yyl865 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl865, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *QuobyteVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys866Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys866Slc - var yyhl866 bool = l >= 0 - for yyj866 := 0; ; yyj866++ { - if yyhl866 { - if yyj866 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys866Slc = r.DecodeBytes(yys866Slc, true, true) - yys866 := string(yys866Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys866 { - case "registry": - if r.TryDecodeAsNil() { - x.Registry = "" - } else { - x.Registry = string(r.DecodeString()) - } - case "volume": - if r.TryDecodeAsNil() { - x.Volume = "" - } else { - x.Volume = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - case "user": - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - case "group": - if r.TryDecodeAsNil() { - x.Group = "" - } else { - x.Group = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys866) - } // end switch yys866 - } // end for yyj866 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *QuobyteVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj872 int - var yyb872 bool - var yyhl872 bool = l >= 0 - yyj872++ - if yyhl872 { - yyb872 = yyj872 > l - } else { - yyb872 = r.CheckBreak() - } - if yyb872 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Registry = "" - } else { - x.Registry = string(r.DecodeString()) - } - yyj872++ - if yyhl872 { - yyb872 = yyj872 > l - } else { - yyb872 = r.CheckBreak() - } - if yyb872 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Volume = "" - } else { - x.Volume = string(r.DecodeString()) - } - yyj872++ - if yyhl872 { - yyb872 = yyj872 > l - } else { - yyb872 = r.CheckBreak() - } - if yyb872 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - yyj872++ - if yyhl872 { - yyb872 = yyj872 > l - } else { - yyb872 = r.CheckBreak() - } - if yyb872 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - yyj872++ - if yyhl872 { - yyb872 = yyj872 > l - } else { - yyb872 = r.CheckBreak() - } - if yyb872 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Group = "" - } else { - x.Group = string(r.DecodeString()) - } - for { - yyj872++ - if yyhl872 { - yyb872 = yyj872 > l - } else { - yyb872 = r.CheckBreak() - } - if yyb872 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj872-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *GlusterfsVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym878 := z.EncBinary() - _ = yym878 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep879 := !z.EncBinary() - yy2arr879 := z.EncBasicHandle().StructToArray - var yyq879 [3]bool - _, _, _ = yysep879, yyq879, yy2arr879 - const yyr879 bool = false - yyq879[2] = x.ReadOnly != false - var yynn879 int - if yyr879 || yy2arr879 { - r.EncodeArrayStart(3) - } else { - yynn879 = 2 - for _, b := range yyq879 { - if b { - yynn879++ - } - } - r.EncodeMapStart(yynn879) - yynn879 = 0 - } - if yyr879 || yy2arr879 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym881 := z.EncBinary() - _ = yym881 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("endpoints")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym882 := z.EncBinary() - _ = yym882 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName)) - } - } - if yyr879 || yy2arr879 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym884 := z.EncBinary() - _ = yym884 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym885 := z.EncBinary() - _ = yym885 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr879 || yy2arr879 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq879[2] { - yym887 := z.EncBinary() - _ = yym887 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq879[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym888 := z.EncBinary() - _ = yym888 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr879 || yy2arr879 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *GlusterfsVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym889 := z.DecBinary() - _ = yym889 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct890 := r.ContainerType() - if yyct890 == codecSelferValueTypeMap1234 { - yyl890 := r.ReadMapStart() - if yyl890 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl890, d) - } - } else if yyct890 == codecSelferValueTypeArray1234 { - yyl890 := r.ReadArrayStart() - if yyl890 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl890, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *GlusterfsVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys891Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys891Slc - var yyhl891 bool = l >= 0 - for yyj891 := 0; ; yyj891++ { - if yyhl891 { - if yyj891 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys891Slc = r.DecodeBytes(yys891Slc, true, true) - yys891 := string(yys891Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys891 { - case "endpoints": - if r.TryDecodeAsNil() { - x.EndpointsName = "" - } else { - x.EndpointsName = string(r.DecodeString()) - } - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys891) - } // end switch yys891 - } // end for yyj891 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *GlusterfsVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj895 int - var yyb895 bool - var yyhl895 bool = l >= 0 - yyj895++ - if yyhl895 { - yyb895 = yyj895 > l - } else { - yyb895 = r.CheckBreak() - } - if yyb895 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EndpointsName = "" - } else { - x.EndpointsName = string(r.DecodeString()) - } - yyj895++ - if yyhl895 { - yyb895 = yyj895 > l - } else { - yyb895 = r.CheckBreak() - } - if yyb895 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj895++ - if yyhl895 { - yyb895 = yyj895 > l - } else { - yyb895 = r.CheckBreak() - } - if yyb895 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj895++ - if yyhl895 { - yyb895 = yyj895 > l - } else { - yyb895 = r.CheckBreak() - } - if yyb895 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj895-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RBDVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym899 := z.EncBinary() - _ = yym899 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep900 := !z.EncBinary() - yy2arr900 := z.EncBasicHandle().StructToArray - var yyq900 [8]bool - _, _, _ = yysep900, yyq900, yy2arr900 - const yyr900 bool = false - yyq900[2] = x.FSType != "" - yyq900[3] = x.RBDPool != "" - yyq900[4] = x.RadosUser != "" - yyq900[5] = x.Keyring != "" - yyq900[6] = x.SecretRef != nil - yyq900[7] = x.ReadOnly != false - var yynn900 int - if yyr900 || yy2arr900 { - r.EncodeArrayStart(8) - } else { - yynn900 = 2 - for _, b := range yyq900 { - if b { - yynn900++ - } - } - r.EncodeMapStart(yynn900) - yynn900 = 0 - } - if yyr900 || yy2arr900 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.CephMonitors == nil { - r.EncodeNil() - } else { - yym902 := z.EncBinary() - _ = yym902 - if false { - } else { - z.F.EncSliceStringV(x.CephMonitors, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("monitors")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CephMonitors == nil { - r.EncodeNil() - } else { - yym903 := z.EncBinary() - _ = yym903 - if false { - } else { - z.F.EncSliceStringV(x.CephMonitors, false, e) - } - } - } - if yyr900 || yy2arr900 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym905 := z.EncBinary() - _ = yym905 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("image")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym906 := z.EncBinary() - _ = yym906 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage)) - } - } - if yyr900 || yy2arr900 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq900[2] { - yym908 := z.EncBinary() - _ = yym908 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq900[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym909 := z.EncBinary() - _ = yym909 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr900 || yy2arr900 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq900[3] { - yym911 := z.EncBinary() - _ = yym911 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq900[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("pool")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym912 := z.EncBinary() - _ = yym912 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool)) - } - } - } - if yyr900 || yy2arr900 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq900[4] { - yym914 := z.EncBinary() - _ = yym914 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq900[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("user")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym915 := z.EncBinary() - _ = yym915 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser)) - } - } - } - if yyr900 || yy2arr900 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq900[5] { - yym917 := z.EncBinary() - _ = yym917 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Keyring)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq900[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("keyring")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym918 := z.EncBinary() - _ = yym918 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Keyring)) - } - } - } - if yyr900 || yy2arr900 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq900[6] { - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq900[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - } - if yyr900 || yy2arr900 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq900[7] { - yym921 := z.EncBinary() - _ = yym921 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq900[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym922 := z.EncBinary() - _ = yym922 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr900 || yy2arr900 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RBDVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym923 := z.DecBinary() - _ = yym923 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct924 := r.ContainerType() - if yyct924 == codecSelferValueTypeMap1234 { - yyl924 := r.ReadMapStart() - if yyl924 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl924, d) - } - } else if yyct924 == codecSelferValueTypeArray1234 { - yyl924 := r.ReadArrayStart() - if yyl924 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl924, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RBDVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys925Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys925Slc - var yyhl925 bool = l >= 0 - for yyj925 := 0; ; yyj925++ { - if yyhl925 { - if yyj925 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys925Slc = r.DecodeBytes(yys925Slc, true, true) - yys925 := string(yys925Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys925 { - case "monitors": - if r.TryDecodeAsNil() { - x.CephMonitors = nil - } else { - yyv926 := &x.CephMonitors - yym927 := z.DecBinary() - _ = yym927 - if false { - } else { - z.F.DecSliceStringX(yyv926, false, d) - } - } - case "image": - if r.TryDecodeAsNil() { - x.RBDImage = "" - } else { - x.RBDImage = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "pool": - if r.TryDecodeAsNil() { - x.RBDPool = "" - } else { - x.RBDPool = string(r.DecodeString()) - } - case "user": - if r.TryDecodeAsNil() { - x.RadosUser = "" - } else { - x.RadosUser = string(r.DecodeString()) - } - case "keyring": - if r.TryDecodeAsNil() { - x.Keyring = "" - } else { - x.Keyring = string(r.DecodeString()) - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys925) - } // end switch yys925 - } // end for yyj925 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RBDVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj935 int - var yyb935 bool - var yyhl935 bool = l >= 0 - yyj935++ - if yyhl935 { - yyb935 = yyj935 > l - } else { - yyb935 = r.CheckBreak() - } - if yyb935 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CephMonitors = nil - } else { - yyv936 := &x.CephMonitors - yym937 := z.DecBinary() - _ = yym937 - if false { - } else { - z.F.DecSliceStringX(yyv936, false, d) - } - } - yyj935++ - if yyhl935 { - yyb935 = yyj935 > l - } else { - yyb935 = r.CheckBreak() - } - if yyb935 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RBDImage = "" - } else { - x.RBDImage = string(r.DecodeString()) - } - yyj935++ - if yyhl935 { - yyb935 = yyj935 > l - } else { - yyb935 = r.CheckBreak() - } - if yyb935 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj935++ - if yyhl935 { - yyb935 = yyj935 > l - } else { - yyb935 = r.CheckBreak() - } - if yyb935 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RBDPool = "" - } else { - x.RBDPool = string(r.DecodeString()) - } - yyj935++ - if yyhl935 { - yyb935 = yyj935 > l - } else { - yyb935 = r.CheckBreak() - } - if yyb935 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RadosUser = "" - } else { - x.RadosUser = string(r.DecodeString()) - } - yyj935++ - if yyhl935 { - yyb935 = yyj935 > l - } else { - yyb935 = r.CheckBreak() - } - if yyb935 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Keyring = "" - } else { - x.Keyring = string(r.DecodeString()) - } - yyj935++ - if yyhl935 { - yyb935 = yyj935 > l - } else { - yyb935 = r.CheckBreak() - } - if yyb935 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - yyj935++ - if yyhl935 { - yyb935 = yyj935 > l - } else { - yyb935 = r.CheckBreak() - } - if yyb935 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj935++ - if yyhl935 { - yyb935 = yyj935 > l - } else { - yyb935 = r.CheckBreak() - } - if yyb935 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj935-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CinderVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym945 := z.EncBinary() - _ = yym945 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep946 := !z.EncBinary() - yy2arr946 := z.EncBasicHandle().StructToArray - var yyq946 [3]bool - _, _, _ = yysep946, yyq946, yy2arr946 - const yyr946 bool = false - yyq946[1] = x.FSType != "" - yyq946[2] = x.ReadOnly != false - var yynn946 int - if yyr946 || yy2arr946 { - r.EncodeArrayStart(3) - } else { - yynn946 = 1 - for _, b := range yyq946 { - if b { - yynn946++ - } - } - r.EncodeMapStart(yynn946) - yynn946 = 0 - } - if yyr946 || yy2arr946 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym948 := z.EncBinary() - _ = yym948 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym949 := z.EncBinary() - _ = yym949 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) - } - } - if yyr946 || yy2arr946 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq946[1] { - yym951 := z.EncBinary() - _ = yym951 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq946[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym952 := z.EncBinary() - _ = yym952 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr946 || yy2arr946 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq946[2] { - yym954 := z.EncBinary() - _ = yym954 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq946[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym955 := z.EncBinary() - _ = yym955 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr946 || yy2arr946 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CinderVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym956 := z.DecBinary() - _ = yym956 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct957 := r.ContainerType() - if yyct957 == codecSelferValueTypeMap1234 { - yyl957 := r.ReadMapStart() - if yyl957 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl957, d) - } - } else if yyct957 == codecSelferValueTypeArray1234 { - yyl957 := r.ReadArrayStart() - if yyl957 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl957, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CinderVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys958Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys958Slc - var yyhl958 bool = l >= 0 - for yyj958 := 0; ; yyj958++ { - if yyhl958 { - if yyj958 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys958Slc = r.DecodeBytes(yys958Slc, true, true) - yys958 := string(yys958Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys958 { - case "volumeID": - if r.TryDecodeAsNil() { - x.VolumeID = "" - } else { - x.VolumeID = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys958) - } // end switch yys958 - } // end for yyj958 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CinderVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj962 int - var yyb962 bool - var yyhl962 bool = l >= 0 - yyj962++ - if yyhl962 { - yyb962 = yyj962 > l - } else { - yyb962 = r.CheckBreak() - } - if yyb962 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeID = "" - } else { - x.VolumeID = string(r.DecodeString()) - } - yyj962++ - if yyhl962 { - yyb962 = yyj962 > l - } else { - yyb962 = r.CheckBreak() - } - if yyb962 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj962++ - if yyhl962 { - yyb962 = yyj962 > l - } else { - yyb962 = r.CheckBreak() - } - if yyb962 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj962++ - if yyhl962 { - yyb962 = yyj962 > l - } else { - yyb962 = r.CheckBreak() - } - if yyb962 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj962-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CephFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym966 := z.EncBinary() - _ = yym966 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep967 := !z.EncBinary() - yy2arr967 := z.EncBasicHandle().StructToArray - var yyq967 [6]bool - _, _, _ = yysep967, yyq967, yy2arr967 - const yyr967 bool = false - yyq967[1] = x.Path != "" - yyq967[2] = x.User != "" - yyq967[3] = x.SecretFile != "" - yyq967[4] = x.SecretRef != nil - yyq967[5] = x.ReadOnly != false - var yynn967 int - if yyr967 || yy2arr967 { - r.EncodeArrayStart(6) - } else { - yynn967 = 1 - for _, b := range yyq967 { - if b { - yynn967++ - } - } - r.EncodeMapStart(yynn967) - yynn967 = 0 - } - if yyr967 || yy2arr967 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Monitors == nil { - r.EncodeNil() - } else { - yym969 := z.EncBinary() - _ = yym969 - if false { - } else { - z.F.EncSliceStringV(x.Monitors, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("monitors")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Monitors == nil { - r.EncodeNil() - } else { - yym970 := z.EncBinary() - _ = yym970 - if false { - } else { - z.F.EncSliceStringV(x.Monitors, false, e) - } - } - } - if yyr967 || yy2arr967 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq967[1] { - yym972 := z.EncBinary() - _ = yym972 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq967[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym973 := z.EncBinary() - _ = yym973 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr967 || yy2arr967 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq967[2] { - yym975 := z.EncBinary() - _ = yym975 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq967[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("user")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym976 := z.EncBinary() - _ = yym976 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } - } - if yyr967 || yy2arr967 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq967[3] { - yym978 := z.EncBinary() - _ = yym978 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq967[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym979 := z.EncBinary() - _ = yym979 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile)) - } - } - } - if yyr967 || yy2arr967 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq967[4] { - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq967[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - } - if yyr967 || yy2arr967 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq967[5] { - yym982 := z.EncBinary() - _ = yym982 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq967[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym983 := z.EncBinary() - _ = yym983 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr967 || yy2arr967 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CephFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym984 := z.DecBinary() - _ = yym984 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct985 := r.ContainerType() - if yyct985 == codecSelferValueTypeMap1234 { - yyl985 := r.ReadMapStart() - if yyl985 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl985, d) - } - } else if yyct985 == codecSelferValueTypeArray1234 { - yyl985 := r.ReadArrayStart() - if yyl985 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl985, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CephFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys986Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys986Slc - var yyhl986 bool = l >= 0 - for yyj986 := 0; ; yyj986++ { - if yyhl986 { - if yyj986 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys986Slc = r.DecodeBytes(yys986Slc, true, true) - yys986 := string(yys986Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys986 { - case "monitors": - if r.TryDecodeAsNil() { - x.Monitors = nil - } else { - yyv987 := &x.Monitors - yym988 := z.DecBinary() - _ = yym988 - if false { - } else { - z.F.DecSliceStringX(yyv987, false, d) - } - } - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "user": - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - case "secretFile": - if r.TryDecodeAsNil() { - x.SecretFile = "" - } else { - x.SecretFile = string(r.DecodeString()) - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys986) - } // end switch yys986 - } // end for yyj986 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CephFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj994 int - var yyb994 bool - var yyhl994 bool = l >= 0 - yyj994++ - if yyhl994 { - yyb994 = yyj994 > l - } else { - yyb994 = r.CheckBreak() - } - if yyb994 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Monitors = nil - } else { - yyv995 := &x.Monitors - yym996 := z.DecBinary() - _ = yym996 - if false { - } else { - z.F.DecSliceStringX(yyv995, false, d) - } - } - yyj994++ - if yyhl994 { - yyb994 = yyj994 > l - } else { - yyb994 = r.CheckBreak() - } - if yyb994 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj994++ - if yyhl994 { - yyb994 = yyj994 > l - } else { - yyb994 = r.CheckBreak() - } - if yyb994 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - yyj994++ - if yyhl994 { - yyb994 = yyj994 > l - } else { - yyb994 = r.CheckBreak() - } - if yyb994 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SecretFile = "" - } else { - x.SecretFile = string(r.DecodeString()) - } - yyj994++ - if yyhl994 { - yyb994 = yyj994 > l - } else { - yyb994 = r.CheckBreak() - } - if yyb994 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - yyj994++ - if yyhl994 { - yyb994 = yyj994 > l - } else { - yyb994 = r.CheckBreak() - } - if yyb994 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj994++ - if yyhl994 { - yyb994 = yyj994 > l - } else { - yyb994 = r.CheckBreak() - } - if yyb994 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj994-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *FlockerVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1002 := z.EncBinary() - _ = yym1002 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1003 := !z.EncBinary() - yy2arr1003 := z.EncBasicHandle().StructToArray - var yyq1003 [2]bool - _, _, _ = yysep1003, yyq1003, yy2arr1003 - const yyr1003 bool = false - yyq1003[0] = x.DatasetName != "" - yyq1003[1] = x.DatasetUUID != "" - var yynn1003 int - if yyr1003 || yy2arr1003 { - r.EncodeArrayStart(2) - } else { - yynn1003 = 0 - for _, b := range yyq1003 { - if b { - yynn1003++ - } - } - r.EncodeMapStart(yynn1003) - yynn1003 = 0 - } - if yyr1003 || yy2arr1003 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1003[0] { - yym1005 := z.EncBinary() - _ = yym1005 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1003[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("datasetName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1006 := z.EncBinary() - _ = yym1006 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName)) - } - } - } - if yyr1003 || yy2arr1003 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1003[1] { - yym1008 := z.EncBinary() - _ = yym1008 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DatasetUUID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1003[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("datasetUUID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1009 := z.EncBinary() - _ = yym1009 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DatasetUUID)) - } - } - } - if yyr1003 || yy2arr1003 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *FlockerVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1010 := z.DecBinary() - _ = yym1010 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1011 := r.ContainerType() - if yyct1011 == codecSelferValueTypeMap1234 { - yyl1011 := r.ReadMapStart() - if yyl1011 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1011, d) - } - } else if yyct1011 == codecSelferValueTypeArray1234 { - yyl1011 := r.ReadArrayStart() - if yyl1011 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1011, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *FlockerVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1012Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1012Slc - var yyhl1012 bool = l >= 0 - for yyj1012 := 0; ; yyj1012++ { - if yyhl1012 { - if yyj1012 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1012Slc = r.DecodeBytes(yys1012Slc, true, true) - yys1012 := string(yys1012Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1012 { - case "datasetName": - if r.TryDecodeAsNil() { - x.DatasetName = "" - } else { - x.DatasetName = string(r.DecodeString()) - } - case "datasetUUID": - if r.TryDecodeAsNil() { - x.DatasetUUID = "" - } else { - x.DatasetUUID = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys1012) - } // end switch yys1012 - } // end for yyj1012 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *FlockerVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1015 int - var yyb1015 bool - var yyhl1015 bool = l >= 0 - yyj1015++ - if yyhl1015 { - yyb1015 = yyj1015 > l - } else { - yyb1015 = r.CheckBreak() - } - if yyb1015 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DatasetName = "" - } else { - x.DatasetName = string(r.DecodeString()) - } - yyj1015++ - if yyhl1015 { - yyb1015 = yyj1015 > l - } else { - yyb1015 = r.CheckBreak() - } - if yyb1015 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DatasetUUID = "" - } else { - x.DatasetUUID = string(r.DecodeString()) - } - for { - yyj1015++ - if yyhl1015 { - yyb1015 = yyj1015 > l - } else { - yyb1015 = r.CheckBreak() - } - if yyb1015 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1015-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DownwardAPIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1018 := z.EncBinary() - _ = yym1018 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1019 := !z.EncBinary() - yy2arr1019 := z.EncBasicHandle().StructToArray - var yyq1019 [2]bool - _, _, _ = yysep1019, yyq1019, yy2arr1019 - const yyr1019 bool = false - yyq1019[0] = len(x.Items) != 0 - yyq1019[1] = x.DefaultMode != nil - var yynn1019 int - if yyr1019 || yy2arr1019 { - r.EncodeArrayStart(2) - } else { - yynn1019 = 0 - for _, b := range yyq1019 { - if b { - yynn1019++ - } - } - r.EncodeMapStart(yynn1019) - yynn1019 = 0 - } - if yyr1019 || yy2arr1019 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1019[0] { - if x.Items == nil { - r.EncodeNil() - } else { - yym1021 := z.EncBinary() - _ = yym1021 - if false { - } else { - h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1019[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym1022 := z.EncBinary() - _ = yym1022 - if false { - } else { - h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) - } - } - } - } - if yyr1019 || yy2arr1019 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1019[1] { - if x.DefaultMode == nil { - r.EncodeNil() - } else { - yy1024 := *x.DefaultMode - yym1025 := z.EncBinary() - _ = yym1025 - if false { - } else { - r.EncodeInt(int64(yy1024)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1019[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DefaultMode == nil { - r.EncodeNil() - } else { - yy1026 := *x.DefaultMode - yym1027 := z.EncBinary() - _ = yym1027 - if false { - } else { - r.EncodeInt(int64(yy1026)) - } - } - } - } - if yyr1019 || yy2arr1019 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DownwardAPIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1028 := z.DecBinary() - _ = yym1028 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1029 := r.ContainerType() - if yyct1029 == codecSelferValueTypeMap1234 { - yyl1029 := r.ReadMapStart() - if yyl1029 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1029, d) - } - } else if yyct1029 == codecSelferValueTypeArray1234 { - yyl1029 := r.ReadArrayStart() - if yyl1029 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1029, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DownwardAPIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1030Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1030Slc - var yyhl1030 bool = l >= 0 - for yyj1030 := 0; ; yyj1030++ { - if yyhl1030 { - if yyj1030 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1030Slc = r.DecodeBytes(yys1030Slc, true, true) - yys1030 := string(yys1030Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1030 { - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv1031 := &x.Items - yym1032 := z.DecBinary() - _ = yym1032 - if false { - } else { - h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv1031), d) - } - } - case "defaultMode": - if r.TryDecodeAsNil() { - if x.DefaultMode != nil { - x.DefaultMode = nil - } - } else { - if x.DefaultMode == nil { - x.DefaultMode = new(int32) - } - yym1034 := z.DecBinary() - _ = yym1034 - if false { - } else { - *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys1030) - } // end switch yys1030 - } // end for yyj1030 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DownwardAPIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1035 int - var yyb1035 bool - var yyhl1035 bool = l >= 0 - yyj1035++ - if yyhl1035 { - yyb1035 = yyj1035 > l - } else { - yyb1035 = r.CheckBreak() - } - if yyb1035 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv1036 := &x.Items - yym1037 := z.DecBinary() - _ = yym1037 - if false { - } else { - h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv1036), d) - } - } - yyj1035++ - if yyhl1035 { - yyb1035 = yyj1035 > l - } else { - yyb1035 = r.CheckBreak() - } - if yyb1035 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DefaultMode != nil { - x.DefaultMode = nil - } - } else { - if x.DefaultMode == nil { - x.DefaultMode = new(int32) - } - yym1039 := z.DecBinary() - _ = yym1039 - if false { - } else { - *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) - } - } - for { - yyj1035++ - if yyhl1035 { - yyb1035 = yyj1035 > l - } else { - yyb1035 = r.CheckBreak() - } - if yyb1035 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1035-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DownwardAPIVolumeFile) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1040 := z.EncBinary() - _ = yym1040 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1041 := !z.EncBinary() - yy2arr1041 := z.EncBasicHandle().StructToArray - var yyq1041 [4]bool - _, _, _ = yysep1041, yyq1041, yy2arr1041 - const yyr1041 bool = false - yyq1041[1] = x.FieldRef != nil - yyq1041[2] = x.ResourceFieldRef != nil - yyq1041[3] = x.Mode != nil - var yynn1041 int - if yyr1041 || yy2arr1041 { - r.EncodeArrayStart(4) - } else { - yynn1041 = 1 - for _, b := range yyq1041 { - if b { - yynn1041++ - } - } - r.EncodeMapStart(yynn1041) - yynn1041 = 0 - } - if yyr1041 || yy2arr1041 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1043 := z.EncBinary() - _ = yym1043 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1044 := z.EncBinary() - _ = yym1044 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr1041 || yy2arr1041 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1041[1] { - if x.FieldRef == nil { - r.EncodeNil() - } else { - x.FieldRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1041[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FieldRef == nil { - r.EncodeNil() - } else { - x.FieldRef.CodecEncodeSelf(e) - } - } - } - if yyr1041 || yy2arr1041 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1041[2] { - if x.ResourceFieldRef == nil { - r.EncodeNil() - } else { - x.ResourceFieldRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1041[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceFieldRef == nil { - r.EncodeNil() - } else { - x.ResourceFieldRef.CodecEncodeSelf(e) - } - } - } - if yyr1041 || yy2arr1041 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1041[3] { - if x.Mode == nil { - r.EncodeNil() - } else { - yy1048 := *x.Mode - yym1049 := z.EncBinary() - _ = yym1049 - if false { - } else { - r.EncodeInt(int64(yy1048)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1041[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("mode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Mode == nil { - r.EncodeNil() - } else { - yy1050 := *x.Mode - yym1051 := z.EncBinary() - _ = yym1051 - if false { - } else { - r.EncodeInt(int64(yy1050)) - } - } - } - } - if yyr1041 || yy2arr1041 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DownwardAPIVolumeFile) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1052 := z.DecBinary() - _ = yym1052 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1053 := r.ContainerType() - if yyct1053 == codecSelferValueTypeMap1234 { - yyl1053 := r.ReadMapStart() - if yyl1053 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1053, d) - } - } else if yyct1053 == codecSelferValueTypeArray1234 { - yyl1053 := r.ReadArrayStart() - if yyl1053 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1053, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DownwardAPIVolumeFile) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1054Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1054Slc - var yyhl1054 bool = l >= 0 - for yyj1054 := 0; ; yyj1054++ { - if yyhl1054 { - if yyj1054 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1054Slc = r.DecodeBytes(yys1054Slc, true, true) - yys1054 := string(yys1054Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1054 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "fieldRef": - if r.TryDecodeAsNil() { - if x.FieldRef != nil { - x.FieldRef = nil - } - } else { - if x.FieldRef == nil { - x.FieldRef = new(ObjectFieldSelector) - } - x.FieldRef.CodecDecodeSelf(d) - } - case "resourceFieldRef": - if r.TryDecodeAsNil() { - if x.ResourceFieldRef != nil { - x.ResourceFieldRef = nil - } - } else { - if x.ResourceFieldRef == nil { - x.ResourceFieldRef = new(ResourceFieldSelector) - } - x.ResourceFieldRef.CodecDecodeSelf(d) - } - case "mode": - if r.TryDecodeAsNil() { - if x.Mode != nil { - x.Mode = nil - } - } else { - if x.Mode == nil { - x.Mode = new(int32) - } - yym1059 := z.DecBinary() - _ = yym1059 - if false { - } else { - *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys1054) - } // end switch yys1054 - } // end for yyj1054 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DownwardAPIVolumeFile) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1060 int - var yyb1060 bool - var yyhl1060 bool = l >= 0 - yyj1060++ - if yyhl1060 { - yyb1060 = yyj1060 > l - } else { - yyb1060 = r.CheckBreak() - } - if yyb1060 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj1060++ - if yyhl1060 { - yyb1060 = yyj1060 > l - } else { - yyb1060 = r.CheckBreak() - } - if yyb1060 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FieldRef != nil { - x.FieldRef = nil - } - } else { - if x.FieldRef == nil { - x.FieldRef = new(ObjectFieldSelector) - } - x.FieldRef.CodecDecodeSelf(d) - } - yyj1060++ - if yyhl1060 { - yyb1060 = yyj1060 > l - } else { - yyb1060 = r.CheckBreak() - } - if yyb1060 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ResourceFieldRef != nil { - x.ResourceFieldRef = nil - } - } else { - if x.ResourceFieldRef == nil { - x.ResourceFieldRef = new(ResourceFieldSelector) - } - x.ResourceFieldRef.CodecDecodeSelf(d) - } - yyj1060++ - if yyhl1060 { - yyb1060 = yyj1060 > l - } else { - yyb1060 = r.CheckBreak() - } - if yyb1060 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Mode != nil { - x.Mode = nil - } - } else { - if x.Mode == nil { - x.Mode = new(int32) - } - yym1065 := z.DecBinary() - _ = yym1065 - if false { - } else { - *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) - } - } - for { - yyj1060++ - if yyhl1060 { - yyb1060 = yyj1060 > l - } else { - yyb1060 = r.CheckBreak() - } - if yyb1060 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1060-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *AzureFileVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1066 := z.EncBinary() - _ = yym1066 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1067 := !z.EncBinary() - yy2arr1067 := z.EncBasicHandle().StructToArray - var yyq1067 [3]bool - _, _, _ = yysep1067, yyq1067, yy2arr1067 - const yyr1067 bool = false - yyq1067[2] = x.ReadOnly != false - var yynn1067 int - if yyr1067 || yy2arr1067 { - r.EncodeArrayStart(3) - } else { - yynn1067 = 2 - for _, b := range yyq1067 { - if b { - yynn1067++ - } - } - r.EncodeMapStart(yynn1067) - yynn1067 = 0 - } - if yyr1067 || yy2arr1067 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1069 := z.EncBinary() - _ = yym1069 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1070 := z.EncBinary() - _ = yym1070 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } - if yyr1067 || yy2arr1067 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1072 := z.EncBinary() - _ = yym1072 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ShareName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("shareName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1073 := z.EncBinary() - _ = yym1073 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ShareName)) - } - } - if yyr1067 || yy2arr1067 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1067[2] { - yym1075 := z.EncBinary() - _ = yym1075 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq1067[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1076 := z.EncBinary() - _ = yym1076 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr1067 || yy2arr1067 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *AzureFileVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1077 := z.DecBinary() - _ = yym1077 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1078 := r.ContainerType() - if yyct1078 == codecSelferValueTypeMap1234 { - yyl1078 := r.ReadMapStart() - if yyl1078 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1078, d) - } - } else if yyct1078 == codecSelferValueTypeArray1234 { - yyl1078 := r.ReadArrayStart() - if yyl1078 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1078, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *AzureFileVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1079Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1079Slc - var yyhl1079 bool = l >= 0 - for yyj1079 := 0; ; yyj1079++ { - if yyhl1079 { - if yyj1079 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1079Slc = r.DecodeBytes(yys1079Slc, true, true) - yys1079 := string(yys1079Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1079 { - case "secretName": - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - x.SecretName = string(r.DecodeString()) - } - case "shareName": - if r.TryDecodeAsNil() { - x.ShareName = "" - } else { - x.ShareName = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys1079) - } // end switch yys1079 - } // end for yyj1079 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *AzureFileVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1083 int - var yyb1083 bool - var yyhl1083 bool = l >= 0 - yyj1083++ - if yyhl1083 { - yyb1083 = yyj1083 > l - } else { - yyb1083 = r.CheckBreak() - } - if yyb1083 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - x.SecretName = string(r.DecodeString()) - } - yyj1083++ - if yyhl1083 { - yyb1083 = yyj1083 > l - } else { - yyb1083 = r.CheckBreak() - } - if yyb1083 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ShareName = "" - } else { - x.ShareName = string(r.DecodeString()) - } - yyj1083++ - if yyhl1083 { - yyb1083 = yyj1083 > l - } else { - yyb1083 = r.CheckBreak() - } - if yyb1083 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj1083++ - if yyhl1083 { - yyb1083 = yyj1083 > l - } else { - yyb1083 = r.CheckBreak() - } - if yyb1083 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1083-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *VsphereVirtualDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1087 := z.EncBinary() - _ = yym1087 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1088 := !z.EncBinary() - yy2arr1088 := z.EncBasicHandle().StructToArray - var yyq1088 [2]bool - _, _, _ = yysep1088, yyq1088, yy2arr1088 - const yyr1088 bool = false - yyq1088[1] = x.FSType != "" - var yynn1088 int - if yyr1088 || yy2arr1088 { - r.EncodeArrayStart(2) - } else { - yynn1088 = 1 - for _, b := range yyq1088 { - if b { - yynn1088++ - } - } - r.EncodeMapStart(yynn1088) - yynn1088 = 0 - } - if yyr1088 || yy2arr1088 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1090 := z.EncBinary() - _ = yym1090 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumePath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1091 := z.EncBinary() - _ = yym1091 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) - } - } - if yyr1088 || yy2arr1088 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1088[1] { - yym1093 := z.EncBinary() - _ = yym1093 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1088[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1094 := z.EncBinary() - _ = yym1094 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr1088 || yy2arr1088 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *VsphereVirtualDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1095 := z.DecBinary() - _ = yym1095 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1096 := r.ContainerType() - if yyct1096 == codecSelferValueTypeMap1234 { - yyl1096 := r.ReadMapStart() - if yyl1096 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1096, d) - } - } else if yyct1096 == codecSelferValueTypeArray1234 { - yyl1096 := r.ReadArrayStart() - if yyl1096 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1096, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1097Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1097Slc - var yyhl1097 bool = l >= 0 - for yyj1097 := 0; ; yyj1097++ { - if yyhl1097 { - if yyj1097 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1097Slc = r.DecodeBytes(yys1097Slc, true, true) - yys1097 := string(yys1097Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1097 { - case "volumePath": - if r.TryDecodeAsNil() { - x.VolumePath = "" - } else { - x.VolumePath = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys1097) - } // end switch yys1097 - } // end for yyj1097 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1100 int - var yyb1100 bool - var yyhl1100 bool = l >= 0 - yyj1100++ - if yyhl1100 { - yyb1100 = yyj1100 > l - } else { - yyb1100 = r.CheckBreak() - } - if yyb1100 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumePath = "" - } else { - x.VolumePath = string(r.DecodeString()) - } - yyj1100++ - if yyhl1100 { - yyb1100 = yyj1100 > l - } else { - yyb1100 = r.CheckBreak() - } - if yyb1100 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - for { - yyj1100++ - if yyhl1100 { - yyb1100 = yyj1100 > l - } else { - yyb1100 = r.CheckBreak() - } - if yyb1100 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1100-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PhotonPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1103 := z.EncBinary() - _ = yym1103 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1104 := !z.EncBinary() - yy2arr1104 := z.EncBasicHandle().StructToArray - var yyq1104 [2]bool - _, _, _ = yysep1104, yyq1104, yy2arr1104 - const yyr1104 bool = false - yyq1104[1] = x.FSType != "" - var yynn1104 int - if yyr1104 || yy2arr1104 { - r.EncodeArrayStart(2) - } else { - yynn1104 = 1 - for _, b := range yyq1104 { - if b { - yynn1104++ - } - } - r.EncodeMapStart(yynn1104) - yynn1104 = 0 - } - if yyr1104 || yy2arr1104 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1106 := z.EncBinary() - _ = yym1106 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PdID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("pdID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1107 := z.EncBinary() - _ = yym1107 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PdID)) - } - } - if yyr1104 || yy2arr1104 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1104[1] { - yym1109 := z.EncBinary() - _ = yym1109 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1104[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1110 := z.EncBinary() - _ = yym1110 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr1104 || yy2arr1104 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PhotonPersistentDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1111 := z.DecBinary() - _ = yym1111 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1112 := r.ContainerType() - if yyct1112 == codecSelferValueTypeMap1234 { - yyl1112 := r.ReadMapStart() - if yyl1112 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1112, d) - } - } else if yyct1112 == codecSelferValueTypeArray1234 { - yyl1112 := r.ReadArrayStart() - if yyl1112 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1112, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PhotonPersistentDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1113Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1113Slc - var yyhl1113 bool = l >= 0 - for yyj1113 := 0; ; yyj1113++ { - if yyhl1113 { - if yyj1113 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1113Slc = r.DecodeBytes(yys1113Slc, true, true) - yys1113 := string(yys1113Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1113 { - case "pdID": - if r.TryDecodeAsNil() { - x.PdID = "" - } else { - x.PdID = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys1113) - } // end switch yys1113 - } // end for yyj1113 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PhotonPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1116 int - var yyb1116 bool - var yyhl1116 bool = l >= 0 - yyj1116++ - if yyhl1116 { - yyb1116 = yyj1116 > l - } else { - yyb1116 = r.CheckBreak() - } - if yyb1116 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PdID = "" - } else { - x.PdID = string(r.DecodeString()) - } - yyj1116++ - if yyhl1116 { - yyb1116 = yyj1116 > l - } else { - yyb1116 = r.CheckBreak() - } - if yyb1116 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - for { - yyj1116++ - if yyhl1116 { - yyb1116 = yyj1116 > l - } else { - yyb1116 = r.CheckBreak() - } - if yyb1116 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1116-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x AzureDataDiskCachingMode) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1119 := z.EncBinary() - _ = yym1119 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *AzureDataDiskCachingMode) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1120 := z.DecBinary() - _ = yym1120 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *AzureDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1121 := z.EncBinary() - _ = yym1121 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1122 := !z.EncBinary() - yy2arr1122 := z.EncBasicHandle().StructToArray - var yyq1122 [5]bool - _, _, _ = yysep1122, yyq1122, yy2arr1122 - const yyr1122 bool = false - yyq1122[2] = x.CachingMode != nil - yyq1122[3] = x.FSType != nil - yyq1122[4] = x.ReadOnly != nil - var yynn1122 int - if yyr1122 || yy2arr1122 { - r.EncodeArrayStart(5) - } else { - yynn1122 = 2 - for _, b := range yyq1122 { - if b { - yynn1122++ - } - } - r.EncodeMapStart(yynn1122) - yynn1122 = 0 - } - if yyr1122 || yy2arr1122 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1124 := z.EncBinary() - _ = yym1124 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DiskName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("diskName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1125 := z.EncBinary() - _ = yym1125 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DiskName)) - } - } - if yyr1122 || yy2arr1122 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1127 := z.EncBinary() - _ = yym1127 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DataDiskURI)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("diskURI")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1128 := z.EncBinary() - _ = yym1128 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DataDiskURI)) - } - } - if yyr1122 || yy2arr1122 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1122[2] { - if x.CachingMode == nil { - r.EncodeNil() - } else { - yy1130 := *x.CachingMode - yy1130.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1122[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cachingMode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CachingMode == nil { - r.EncodeNil() - } else { - yy1131 := *x.CachingMode - yy1131.CodecEncodeSelf(e) - } - } - } - if yyr1122 || yy2arr1122 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1122[3] { - if x.FSType == nil { - r.EncodeNil() - } else { - yy1133 := *x.FSType - yym1134 := z.EncBinary() - _ = yym1134 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy1133)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1122[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FSType == nil { - r.EncodeNil() - } else { - yy1135 := *x.FSType - yym1136 := z.EncBinary() - _ = yym1136 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy1135)) - } - } - } - } - if yyr1122 || yy2arr1122 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1122[4] { - if x.ReadOnly == nil { - r.EncodeNil() - } else { - yy1138 := *x.ReadOnly - yym1139 := z.EncBinary() - _ = yym1139 - if false { - } else { - r.EncodeBool(bool(yy1138)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1122[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ReadOnly == nil { - r.EncodeNil() - } else { - yy1140 := *x.ReadOnly - yym1141 := z.EncBinary() - _ = yym1141 - if false { - } else { - r.EncodeBool(bool(yy1140)) - } - } - } - } - if yyr1122 || yy2arr1122 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *AzureDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1142 := z.DecBinary() - _ = yym1142 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1143 := r.ContainerType() - if yyct1143 == codecSelferValueTypeMap1234 { - yyl1143 := r.ReadMapStart() - if yyl1143 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1143, d) - } - } else if yyct1143 == codecSelferValueTypeArray1234 { - yyl1143 := r.ReadArrayStart() - if yyl1143 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1143, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *AzureDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1144Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1144Slc - var yyhl1144 bool = l >= 0 - for yyj1144 := 0; ; yyj1144++ { - if yyhl1144 { - if yyj1144 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1144Slc = r.DecodeBytes(yys1144Slc, true, true) - yys1144 := string(yys1144Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1144 { - case "diskName": - if r.TryDecodeAsNil() { - x.DiskName = "" - } else { - x.DiskName = string(r.DecodeString()) - } - case "diskURI": - if r.TryDecodeAsNil() { - x.DataDiskURI = "" - } else { - x.DataDiskURI = string(r.DecodeString()) - } - case "cachingMode": - if r.TryDecodeAsNil() { - if x.CachingMode != nil { - x.CachingMode = nil - } - } else { - if x.CachingMode == nil { - x.CachingMode = new(AzureDataDiskCachingMode) - } - x.CachingMode.CodecDecodeSelf(d) - } - case "fsType": - if r.TryDecodeAsNil() { - if x.FSType != nil { - x.FSType = nil - } - } else { - if x.FSType == nil { - x.FSType = new(string) - } - yym1149 := z.DecBinary() - _ = yym1149 - if false { - } else { - *((*string)(x.FSType)) = r.DecodeString() - } - } - case "readOnly": - if r.TryDecodeAsNil() { - if x.ReadOnly != nil { - x.ReadOnly = nil - } - } else { - if x.ReadOnly == nil { - x.ReadOnly = new(bool) - } - yym1151 := z.DecBinary() - _ = yym1151 - if false { - } else { - *((*bool)(x.ReadOnly)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys1144) - } // end switch yys1144 - } // end for yyj1144 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *AzureDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1152 int - var yyb1152 bool - var yyhl1152 bool = l >= 0 - yyj1152++ - if yyhl1152 { - yyb1152 = yyj1152 > l - } else { - yyb1152 = r.CheckBreak() - } - if yyb1152 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DiskName = "" - } else { - x.DiskName = string(r.DecodeString()) - } - yyj1152++ - if yyhl1152 { - yyb1152 = yyj1152 > l - } else { - yyb1152 = r.CheckBreak() - } - if yyb1152 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DataDiskURI = "" - } else { - x.DataDiskURI = string(r.DecodeString()) - } - yyj1152++ - if yyhl1152 { - yyb1152 = yyj1152 > l - } else { - yyb1152 = r.CheckBreak() - } - if yyb1152 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CachingMode != nil { - x.CachingMode = nil - } - } else { - if x.CachingMode == nil { - x.CachingMode = new(AzureDataDiskCachingMode) - } - x.CachingMode.CodecDecodeSelf(d) - } - yyj1152++ - if yyhl1152 { - yyb1152 = yyj1152 > l - } else { - yyb1152 = r.CheckBreak() - } - if yyb1152 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FSType != nil { - x.FSType = nil - } - } else { - if x.FSType == nil { - x.FSType = new(string) - } - yym1157 := z.DecBinary() - _ = yym1157 - if false { - } else { - *((*string)(x.FSType)) = r.DecodeString() - } - } - yyj1152++ - if yyhl1152 { - yyb1152 = yyj1152 > l - } else { - yyb1152 = r.CheckBreak() - } - if yyb1152 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ReadOnly != nil { - x.ReadOnly = nil - } - } else { - if x.ReadOnly == nil { - x.ReadOnly = new(bool) - } - yym1159 := z.DecBinary() - _ = yym1159 - if false { - } else { - *((*bool)(x.ReadOnly)) = r.DecodeBool() - } - } - for { - yyj1152++ - if yyhl1152 { - yyb1152 = yyj1152 > l - } else { - yyb1152 = r.CheckBreak() - } - if yyb1152 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1152-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1160 := z.EncBinary() - _ = yym1160 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1161 := !z.EncBinary() - yy2arr1161 := z.EncBasicHandle().StructToArray - var yyq1161 [3]bool - _, _, _ = yysep1161, yyq1161, yy2arr1161 - const yyr1161 bool = false - yyq1161[1] = len(x.Items) != 0 - yyq1161[2] = x.DefaultMode != nil - var yynn1161 int - if yyr1161 || yy2arr1161 { - r.EncodeArrayStart(3) - } else { - yynn1161 = 1 - for _, b := range yyq1161 { - if b { - yynn1161++ - } - } - r.EncodeMapStart(yynn1161) - yynn1161 = 0 - } - if yyr1161 || yy2arr1161 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1163 := z.EncBinary() - _ = yym1163 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1164 := z.EncBinary() - _ = yym1164 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr1161 || yy2arr1161 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1161[1] { - if x.Items == nil { - r.EncodeNil() - } else { - yym1166 := z.EncBinary() - _ = yym1166 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1161[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym1167 := z.EncBinary() - _ = yym1167 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } - } - if yyr1161 || yy2arr1161 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1161[2] { - if x.DefaultMode == nil { - r.EncodeNil() - } else { - yy1169 := *x.DefaultMode - yym1170 := z.EncBinary() - _ = yym1170 - if false { - } else { - r.EncodeInt(int64(yy1169)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1161[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DefaultMode == nil { - r.EncodeNil() - } else { - yy1171 := *x.DefaultMode - yym1172 := z.EncBinary() - _ = yym1172 - if false { - } else { - r.EncodeInt(int64(yy1171)) - } - } - } - } - if yyr1161 || yy2arr1161 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ConfigMapVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1173 := z.DecBinary() - _ = yym1173 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1174 := r.ContainerType() - if yyct1174 == codecSelferValueTypeMap1234 { - yyl1174 := r.ReadMapStart() - if yyl1174 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1174, d) - } - } else if yyct1174 == codecSelferValueTypeArray1234 { - yyl1174 := r.ReadArrayStart() - if yyl1174 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1174, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ConfigMapVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1175Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1175Slc - var yyhl1175 bool = l >= 0 - for yyj1175 := 0; ; yyj1175++ { - if yyhl1175 { - if yyj1175 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1175Slc = r.DecodeBytes(yys1175Slc, true, true) - yys1175 := string(yys1175Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1175 { - case "Name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv1177 := &x.Items - yym1178 := z.DecBinary() - _ = yym1178 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv1177), d) - } - } - case "defaultMode": - if r.TryDecodeAsNil() { - if x.DefaultMode != nil { - x.DefaultMode = nil - } - } else { - if x.DefaultMode == nil { - x.DefaultMode = new(int32) - } - yym1180 := z.DecBinary() - _ = yym1180 - if false { - } else { - *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys1175) - } // end switch yys1175 - } // end for yyj1175 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ConfigMapVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1181 int - var yyb1181 bool - var yyhl1181 bool = l >= 0 - yyj1181++ - if yyhl1181 { - yyb1181 = yyj1181 > l - } else { - yyb1181 = r.CheckBreak() - } - if yyb1181 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj1181++ - if yyhl1181 { - yyb1181 = yyj1181 > l - } else { - yyb1181 = r.CheckBreak() - } - if yyb1181 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv1183 := &x.Items - yym1184 := z.DecBinary() - _ = yym1184 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv1183), d) - } - } - yyj1181++ - if yyhl1181 { - yyb1181 = yyj1181 > l - } else { - yyb1181 = r.CheckBreak() - } - if yyb1181 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DefaultMode != nil { - x.DefaultMode = nil - } - } else { - if x.DefaultMode == nil { - x.DefaultMode = new(int32) - } - yym1186 := z.DecBinary() - _ = yym1186 - if false { - } else { - *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) - } - } - for { - yyj1181++ - if yyhl1181 { - yyb1181 = yyj1181 > l - } else { - yyb1181 = r.CheckBreak() - } - if yyb1181 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1181-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *KeyToPath) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1187 := z.EncBinary() - _ = yym1187 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1188 := !z.EncBinary() - yy2arr1188 := z.EncBasicHandle().StructToArray - var yyq1188 [3]bool - _, _, _ = yysep1188, yyq1188, yy2arr1188 - const yyr1188 bool = false - yyq1188[2] = x.Mode != nil - var yynn1188 int - if yyr1188 || yy2arr1188 { - r.EncodeArrayStart(3) - } else { - yynn1188 = 2 - for _, b := range yyq1188 { - if b { - yynn1188++ - } - } - r.EncodeMapStart(yynn1188) - yynn1188 = 0 - } - if yyr1188 || yy2arr1188 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1190 := z.EncBinary() - _ = yym1190 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1191 := z.EncBinary() - _ = yym1191 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr1188 || yy2arr1188 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1193 := z.EncBinary() - _ = yym1193 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1194 := z.EncBinary() - _ = yym1194 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr1188 || yy2arr1188 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1188[2] { - if x.Mode == nil { - r.EncodeNil() - } else { - yy1196 := *x.Mode - yym1197 := z.EncBinary() - _ = yym1197 - if false { - } else { - r.EncodeInt(int64(yy1196)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1188[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("mode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Mode == nil { - r.EncodeNil() - } else { - yy1198 := *x.Mode - yym1199 := z.EncBinary() - _ = yym1199 - if false { - } else { - r.EncodeInt(int64(yy1198)) - } - } - } - } - if yyr1188 || yy2arr1188 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *KeyToPath) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1200 := z.DecBinary() - _ = yym1200 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1201 := r.ContainerType() - if yyct1201 == codecSelferValueTypeMap1234 { - yyl1201 := r.ReadMapStart() - if yyl1201 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1201, d) - } - } else if yyct1201 == codecSelferValueTypeArray1234 { - yyl1201 := r.ReadArrayStart() - if yyl1201 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1201, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *KeyToPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1202Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1202Slc - var yyhl1202 bool = l >= 0 - for yyj1202 := 0; ; yyj1202++ { - if yyhl1202 { - if yyj1202 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1202Slc = r.DecodeBytes(yys1202Slc, true, true) - yys1202 := string(yys1202Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1202 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "mode": - if r.TryDecodeAsNil() { - if x.Mode != nil { - x.Mode = nil - } - } else { - if x.Mode == nil { - x.Mode = new(int32) - } - yym1206 := z.DecBinary() - _ = yym1206 - if false { - } else { - *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys1202) - } // end switch yys1202 - } // end for yyj1202 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *KeyToPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1207 int - var yyb1207 bool - var yyhl1207 bool = l >= 0 - yyj1207++ - if yyhl1207 { - yyb1207 = yyj1207 > l - } else { - yyb1207 = r.CheckBreak() - } - if yyb1207 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj1207++ - if yyhl1207 { - yyb1207 = yyj1207 > l - } else { - yyb1207 = r.CheckBreak() - } - if yyb1207 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj1207++ - if yyhl1207 { - yyb1207 = yyj1207 > l - } else { - yyb1207 = r.CheckBreak() - } - if yyb1207 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Mode != nil { - x.Mode = nil - } - } else { - if x.Mode == nil { - x.Mode = new(int32) - } - yym1211 := z.DecBinary() - _ = yym1211 - if false { - } else { - *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) - } - } - for { - yyj1207++ - if yyhl1207 { - yyb1207 = yyj1207 > l - } else { - yyb1207 = r.CheckBreak() - } - if yyb1207 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1207-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerPort) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1212 := z.EncBinary() - _ = yym1212 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1213 := !z.EncBinary() - yy2arr1213 := z.EncBasicHandle().StructToArray - var yyq1213 [5]bool - _, _, _ = yysep1213, yyq1213, yy2arr1213 - const yyr1213 bool = false - yyq1213[0] = x.Name != "" - yyq1213[1] = x.HostPort != 0 - yyq1213[3] = x.Protocol != "" - yyq1213[4] = x.HostIP != "" - var yynn1213 int - if yyr1213 || yy2arr1213 { - r.EncodeArrayStart(5) - } else { - yynn1213 = 1 - for _, b := range yyq1213 { - if b { - yynn1213++ - } - } - r.EncodeMapStart(yynn1213) - yynn1213 = 0 - } - if yyr1213 || yy2arr1213 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1213[0] { - yym1215 := z.EncBinary() - _ = yym1215 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1213[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1216 := z.EncBinary() - _ = yym1216 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr1213 || yy2arr1213 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1213[1] { - yym1218 := z.EncBinary() - _ = yym1218 - if false { - } else { - r.EncodeInt(int64(x.HostPort)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq1213[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1219 := z.EncBinary() - _ = yym1219 - if false { - } else { - r.EncodeInt(int64(x.HostPort)) - } - } - } - if yyr1213 || yy2arr1213 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1221 := z.EncBinary() - _ = yym1221 - if false { - } else { - r.EncodeInt(int64(x.ContainerPort)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1222 := z.EncBinary() - _ = yym1222 - if false { - } else { - r.EncodeInt(int64(x.ContainerPort)) - } - } - if yyr1213 || yy2arr1213 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1213[3] { - x.Protocol.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1213[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("protocol")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Protocol.CodecEncodeSelf(e) - } - } - if yyr1213 || yy2arr1213 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1213[4] { - yym1225 := z.EncBinary() - _ = yym1225 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1213[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1226 := z.EncBinary() - _ = yym1226 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) - } - } - } - if yyr1213 || yy2arr1213 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerPort) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1227 := z.DecBinary() - _ = yym1227 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1228 := r.ContainerType() - if yyct1228 == codecSelferValueTypeMap1234 { - yyl1228 := r.ReadMapStart() - if yyl1228 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1228, d) - } - } else if yyct1228 == codecSelferValueTypeArray1234 { - yyl1228 := r.ReadArrayStart() - if yyl1228 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1228, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1229Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1229Slc - var yyhl1229 bool = l >= 0 - for yyj1229 := 0; ; yyj1229++ { - if yyhl1229 { - if yyj1229 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1229Slc = r.DecodeBytes(yys1229Slc, true, true) - yys1229 := string(yys1229Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1229 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "hostPort": - if r.TryDecodeAsNil() { - x.HostPort = 0 - } else { - x.HostPort = int32(r.DecodeInt(32)) - } - case "containerPort": - if r.TryDecodeAsNil() { - x.ContainerPort = 0 - } else { - x.ContainerPort = int32(r.DecodeInt(32)) - } - case "protocol": - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - x.Protocol = Protocol(r.DecodeString()) - } - case "hostIP": - if r.TryDecodeAsNil() { - x.HostIP = "" - } else { - x.HostIP = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys1229) - } // end switch yys1229 - } // end for yyj1229 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1235 int - var yyb1235 bool - var yyhl1235 bool = l >= 0 - yyj1235++ - if yyhl1235 { - yyb1235 = yyj1235 > l - } else { - yyb1235 = r.CheckBreak() - } - if yyb1235 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj1235++ - if yyhl1235 { - yyb1235 = yyj1235 > l - } else { - yyb1235 = r.CheckBreak() - } - if yyb1235 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostPort = 0 - } else { - x.HostPort = int32(r.DecodeInt(32)) - } - yyj1235++ - if yyhl1235 { - yyb1235 = yyj1235 > l - } else { - yyb1235 = r.CheckBreak() - } - if yyb1235 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerPort = 0 - } else { - x.ContainerPort = int32(r.DecodeInt(32)) - } - yyj1235++ - if yyhl1235 { - yyb1235 = yyj1235 > l - } else { - yyb1235 = r.CheckBreak() - } - if yyb1235 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - x.Protocol = Protocol(r.DecodeString()) - } - yyj1235++ - if yyhl1235 { - yyb1235 = yyj1235 > l - } else { - yyb1235 = r.CheckBreak() - } - if yyb1235 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostIP = "" - } else { - x.HostIP = string(r.DecodeString()) - } - for { - yyj1235++ - if yyhl1235 { - yyb1235 = yyj1235 > l - } else { - yyb1235 = r.CheckBreak() - } - if yyb1235 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1235-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *VolumeMount) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1241 := z.EncBinary() - _ = yym1241 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1242 := !z.EncBinary() - yy2arr1242 := z.EncBasicHandle().StructToArray - var yyq1242 [4]bool - _, _, _ = yysep1242, yyq1242, yy2arr1242 - const yyr1242 bool = false - yyq1242[1] = x.ReadOnly != false - yyq1242[3] = x.SubPath != "" - var yynn1242 int - if yyr1242 || yy2arr1242 { - r.EncodeArrayStart(4) - } else { - yynn1242 = 2 - for _, b := range yyq1242 { - if b { - yynn1242++ - } - } - r.EncodeMapStart(yynn1242) - yynn1242 = 0 - } - if yyr1242 || yy2arr1242 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1244 := z.EncBinary() - _ = yym1244 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1245 := z.EncBinary() - _ = yym1245 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr1242 || yy2arr1242 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1242[1] { - yym1247 := z.EncBinary() - _ = yym1247 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq1242[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1248 := z.EncBinary() - _ = yym1248 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr1242 || yy2arr1242 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1250 := z.EncBinary() - _ = yym1250 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("mountPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1251 := z.EncBinary() - _ = yym1251 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) - } - } - if yyr1242 || yy2arr1242 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1242[3] { - yym1253 := z.EncBinary() - _ = yym1253 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1242[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1254 := z.EncBinary() - _ = yym1254 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) - } - } - } - if yyr1242 || yy2arr1242 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *VolumeMount) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1255 := z.DecBinary() - _ = yym1255 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1256 := r.ContainerType() - if yyct1256 == codecSelferValueTypeMap1234 { - yyl1256 := r.ReadMapStart() - if yyl1256 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1256, d) - } - } else if yyct1256 == codecSelferValueTypeArray1234 { - yyl1256 := r.ReadArrayStart() - if yyl1256 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1256, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *VolumeMount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1257Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1257Slc - var yyhl1257 bool = l >= 0 - for yyj1257 := 0; ; yyj1257++ { - if yyhl1257 { - if yyj1257 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1257Slc = r.DecodeBytes(yys1257Slc, true, true) - yys1257 := string(yys1257Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1257 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - case "mountPath": - if r.TryDecodeAsNil() { - x.MountPath = "" - } else { - x.MountPath = string(r.DecodeString()) - } - case "subPath": - if r.TryDecodeAsNil() { - x.SubPath = "" - } else { - x.SubPath = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys1257) - } // end switch yys1257 - } // end for yyj1257 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *VolumeMount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1262 int - var yyb1262 bool - var yyhl1262 bool = l >= 0 - yyj1262++ - if yyhl1262 { - yyb1262 = yyj1262 > l - } else { - yyb1262 = r.CheckBreak() - } - if yyb1262 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj1262++ - if yyhl1262 { - yyb1262 = yyj1262 > l - } else { - yyb1262 = r.CheckBreak() - } - if yyb1262 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - yyj1262++ - if yyhl1262 { - yyb1262 = yyj1262 > l - } else { - yyb1262 = r.CheckBreak() - } - if yyb1262 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MountPath = "" - } else { - x.MountPath = string(r.DecodeString()) - } - yyj1262++ - if yyhl1262 { - yyb1262 = yyj1262 > l - } else { - yyb1262 = r.CheckBreak() - } - if yyb1262 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SubPath = "" - } else { - x.SubPath = string(r.DecodeString()) - } - for { - yyj1262++ - if yyhl1262 { - yyb1262 = yyj1262 > l - } else { - yyb1262 = r.CheckBreak() - } - if yyb1262 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1262-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EnvVar) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1267 := z.EncBinary() - _ = yym1267 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1268 := !z.EncBinary() - yy2arr1268 := z.EncBasicHandle().StructToArray - var yyq1268 [3]bool - _, _, _ = yysep1268, yyq1268, yy2arr1268 - const yyr1268 bool = false - yyq1268[1] = x.Value != "" - yyq1268[2] = x.ValueFrom != nil - var yynn1268 int - if yyr1268 || yy2arr1268 { - r.EncodeArrayStart(3) - } else { - yynn1268 = 1 - for _, b := range yyq1268 { - if b { - yynn1268++ - } - } - r.EncodeMapStart(yynn1268) - yynn1268 = 0 - } - if yyr1268 || yy2arr1268 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1270 := z.EncBinary() - _ = yym1270 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1271 := z.EncBinary() - _ = yym1271 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr1268 || yy2arr1268 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1268[1] { - yym1273 := z.EncBinary() - _ = yym1273 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1268[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1274 := z.EncBinary() - _ = yym1274 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } - } - if yyr1268 || yy2arr1268 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1268[2] { - if x.ValueFrom == nil { - r.EncodeNil() - } else { - x.ValueFrom.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1268[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("valueFrom")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ValueFrom == nil { - r.EncodeNil() - } else { - x.ValueFrom.CodecEncodeSelf(e) - } - } - } - if yyr1268 || yy2arr1268 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EnvVar) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1276 := z.DecBinary() - _ = yym1276 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1277 := r.ContainerType() - if yyct1277 == codecSelferValueTypeMap1234 { - yyl1277 := r.ReadMapStart() - if yyl1277 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1277, d) - } - } else if yyct1277 == codecSelferValueTypeArray1234 { - yyl1277 := r.ReadArrayStart() - if yyl1277 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1277, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EnvVar) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1278Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1278Slc - var yyhl1278 bool = l >= 0 - for yyj1278 := 0; ; yyj1278++ { - if yyhl1278 { - if yyj1278 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1278Slc = r.DecodeBytes(yys1278Slc, true, true) - yys1278 := string(yys1278Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1278 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - case "valueFrom": - if r.TryDecodeAsNil() { - if x.ValueFrom != nil { - x.ValueFrom = nil - } - } else { - if x.ValueFrom == nil { - x.ValueFrom = new(EnvVarSource) - } - x.ValueFrom.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys1278) - } // end switch yys1278 - } // end for yyj1278 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EnvVar) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1282 int - var yyb1282 bool - var yyhl1282 bool = l >= 0 - yyj1282++ - if yyhl1282 { - yyb1282 = yyj1282 > l - } else { - yyb1282 = r.CheckBreak() - } - if yyb1282 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj1282++ - if yyhl1282 { - yyb1282 = yyj1282 > l - } else { - yyb1282 = r.CheckBreak() - } - if yyb1282 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - yyj1282++ - if yyhl1282 { - yyb1282 = yyj1282 > l - } else { - yyb1282 = r.CheckBreak() - } - if yyb1282 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ValueFrom != nil { - x.ValueFrom = nil - } - } else { - if x.ValueFrom == nil { - x.ValueFrom = new(EnvVarSource) - } - x.ValueFrom.CodecDecodeSelf(d) - } - for { - yyj1282++ - if yyhl1282 { - yyb1282 = yyj1282 > l - } else { - yyb1282 = r.CheckBreak() - } - if yyb1282 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1282-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1286 := z.EncBinary() - _ = yym1286 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1287 := !z.EncBinary() - yy2arr1287 := z.EncBasicHandle().StructToArray - var yyq1287 [4]bool - _, _, _ = yysep1287, yyq1287, yy2arr1287 - const yyr1287 bool = false - yyq1287[0] = x.FieldRef != nil - yyq1287[1] = x.ResourceFieldRef != nil - yyq1287[2] = x.ConfigMapKeyRef != nil - yyq1287[3] = x.SecretKeyRef != nil - var yynn1287 int - if yyr1287 || yy2arr1287 { - r.EncodeArrayStart(4) - } else { - yynn1287 = 0 - for _, b := range yyq1287 { - if b { - yynn1287++ - } - } - r.EncodeMapStart(yynn1287) - yynn1287 = 0 - } - if yyr1287 || yy2arr1287 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1287[0] { - if x.FieldRef == nil { - r.EncodeNil() - } else { - x.FieldRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1287[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FieldRef == nil { - r.EncodeNil() - } else { - x.FieldRef.CodecEncodeSelf(e) - } - } - } - if yyr1287 || yy2arr1287 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1287[1] { - if x.ResourceFieldRef == nil { - r.EncodeNil() - } else { - x.ResourceFieldRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1287[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceFieldRef == nil { - r.EncodeNil() - } else { - x.ResourceFieldRef.CodecEncodeSelf(e) - } - } - } - if yyr1287 || yy2arr1287 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1287[2] { - if x.ConfigMapKeyRef == nil { - r.EncodeNil() - } else { - x.ConfigMapKeyRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1287[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("configMapKeyRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ConfigMapKeyRef == nil { - r.EncodeNil() - } else { - x.ConfigMapKeyRef.CodecEncodeSelf(e) - } - } - } - if yyr1287 || yy2arr1287 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1287[3] { - if x.SecretKeyRef == nil { - r.EncodeNil() - } else { - x.SecretKeyRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1287[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretKeyRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretKeyRef == nil { - r.EncodeNil() - } else { - x.SecretKeyRef.CodecEncodeSelf(e) - } - } - } - if yyr1287 || yy2arr1287 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EnvVarSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1292 := z.DecBinary() - _ = yym1292 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1293 := r.ContainerType() - if yyct1293 == codecSelferValueTypeMap1234 { - yyl1293 := r.ReadMapStart() - if yyl1293 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1293, d) - } - } else if yyct1293 == codecSelferValueTypeArray1234 { - yyl1293 := r.ReadArrayStart() - if yyl1293 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1293, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EnvVarSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1294Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1294Slc - var yyhl1294 bool = l >= 0 - for yyj1294 := 0; ; yyj1294++ { - if yyhl1294 { - if yyj1294 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1294Slc = r.DecodeBytes(yys1294Slc, true, true) - yys1294 := string(yys1294Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1294 { - case "fieldRef": - if r.TryDecodeAsNil() { - if x.FieldRef != nil { - x.FieldRef = nil - } - } else { - if x.FieldRef == nil { - x.FieldRef = new(ObjectFieldSelector) - } - x.FieldRef.CodecDecodeSelf(d) - } - case "resourceFieldRef": - if r.TryDecodeAsNil() { - if x.ResourceFieldRef != nil { - x.ResourceFieldRef = nil - } - } else { - if x.ResourceFieldRef == nil { - x.ResourceFieldRef = new(ResourceFieldSelector) - } - x.ResourceFieldRef.CodecDecodeSelf(d) - } - case "configMapKeyRef": - if r.TryDecodeAsNil() { - if x.ConfigMapKeyRef != nil { - x.ConfigMapKeyRef = nil - } - } else { - if x.ConfigMapKeyRef == nil { - x.ConfigMapKeyRef = new(ConfigMapKeySelector) - } - x.ConfigMapKeyRef.CodecDecodeSelf(d) - } - case "secretKeyRef": - if r.TryDecodeAsNil() { - if x.SecretKeyRef != nil { - x.SecretKeyRef = nil - } - } else { - if x.SecretKeyRef == nil { - x.SecretKeyRef = new(SecretKeySelector) - } - x.SecretKeyRef.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys1294) - } // end switch yys1294 - } // end for yyj1294 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EnvVarSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1299 int - var yyb1299 bool - var yyhl1299 bool = l >= 0 - yyj1299++ - if yyhl1299 { - yyb1299 = yyj1299 > l - } else { - yyb1299 = r.CheckBreak() - } - if yyb1299 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FieldRef != nil { - x.FieldRef = nil - } - } else { - if x.FieldRef == nil { - x.FieldRef = new(ObjectFieldSelector) - } - x.FieldRef.CodecDecodeSelf(d) - } - yyj1299++ - if yyhl1299 { - yyb1299 = yyj1299 > l - } else { - yyb1299 = r.CheckBreak() - } - if yyb1299 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ResourceFieldRef != nil { - x.ResourceFieldRef = nil - } - } else { - if x.ResourceFieldRef == nil { - x.ResourceFieldRef = new(ResourceFieldSelector) - } - x.ResourceFieldRef.CodecDecodeSelf(d) - } - yyj1299++ - if yyhl1299 { - yyb1299 = yyj1299 > l - } else { - yyb1299 = r.CheckBreak() - } - if yyb1299 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ConfigMapKeyRef != nil { - x.ConfigMapKeyRef = nil - } - } else { - if x.ConfigMapKeyRef == nil { - x.ConfigMapKeyRef = new(ConfigMapKeySelector) - } - x.ConfigMapKeyRef.CodecDecodeSelf(d) - } - yyj1299++ - if yyhl1299 { - yyb1299 = yyj1299 > l - } else { - yyb1299 = r.CheckBreak() - } - if yyb1299 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretKeyRef != nil { - x.SecretKeyRef = nil - } - } else { - if x.SecretKeyRef == nil { - x.SecretKeyRef = new(SecretKeySelector) - } - x.SecretKeyRef.CodecDecodeSelf(d) - } - for { - yyj1299++ - if yyhl1299 { - yyb1299 = yyj1299 > l - } else { - yyb1299 = r.CheckBreak() - } - if yyb1299 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1299-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ObjectFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1304 := z.EncBinary() - _ = yym1304 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1305 := !z.EncBinary() - yy2arr1305 := z.EncBasicHandle().StructToArray - var yyq1305 [2]bool - _, _, _ = yysep1305, yyq1305, yy2arr1305 - const yyr1305 bool = false - var yynn1305 int - if yyr1305 || yy2arr1305 { - r.EncodeArrayStart(2) - } else { - yynn1305 = 2 - for _, b := range yyq1305 { - if b { - yynn1305++ - } - } - r.EncodeMapStart(yynn1305) - yynn1305 = 0 - } - if yyr1305 || yy2arr1305 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1307 := z.EncBinary() - _ = yym1307 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1308 := z.EncBinary() - _ = yym1308 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - if yyr1305 || yy2arr1305 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1310 := z.EncBinary() - _ = yym1310 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1311 := z.EncBinary() - _ = yym1311 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) - } - } - if yyr1305 || yy2arr1305 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ObjectFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1312 := z.DecBinary() - _ = yym1312 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1313 := r.ContainerType() - if yyct1313 == codecSelferValueTypeMap1234 { - yyl1313 := r.ReadMapStart() - if yyl1313 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1313, d) - } - } else if yyct1313 == codecSelferValueTypeArray1234 { - yyl1313 := r.ReadArrayStart() - if yyl1313 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1313, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ObjectFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1314Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1314Slc - var yyhl1314 bool = l >= 0 - for yyj1314 := 0; ; yyj1314++ { - if yyhl1314 { - if yyj1314 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1314Slc = r.DecodeBytes(yys1314Slc, true, true) - yys1314 := string(yys1314Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1314 { - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "fieldPath": - if r.TryDecodeAsNil() { - x.FieldPath = "" - } else { - x.FieldPath = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys1314) - } // end switch yys1314 - } // end for yyj1314 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ObjectFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1317 int - var yyb1317 bool - var yyhl1317 bool = l >= 0 - yyj1317++ - if yyhl1317 { - yyb1317 = yyj1317 > l - } else { - yyb1317 = r.CheckBreak() - } - if yyb1317 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj1317++ - if yyhl1317 { - yyb1317 = yyj1317 > l - } else { - yyb1317 = r.CheckBreak() - } - if yyb1317 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FieldPath = "" - } else { - x.FieldPath = string(r.DecodeString()) - } - for { - yyj1317++ - if yyhl1317 { - yyb1317 = yyj1317 > l - } else { - yyb1317 = r.CheckBreak() - } - if yyb1317 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1317-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1320 := z.EncBinary() - _ = yym1320 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1321 := !z.EncBinary() - yy2arr1321 := z.EncBasicHandle().StructToArray - var yyq1321 [3]bool - _, _, _ = yysep1321, yyq1321, yy2arr1321 - const yyr1321 bool = false - yyq1321[0] = x.ContainerName != "" - yyq1321[2] = true - var yynn1321 int - if yyr1321 || yy2arr1321 { - r.EncodeArrayStart(3) - } else { - yynn1321 = 1 - for _, b := range yyq1321 { - if b { - yynn1321++ - } - } - r.EncodeMapStart(yynn1321) - yynn1321 = 0 - } - if yyr1321 || yy2arr1321 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1321[0] { - yym1323 := z.EncBinary() - _ = yym1323 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1321[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1324 := z.EncBinary() - _ = yym1324 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) - } - } - } - if yyr1321 || yy2arr1321 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1326 := z.EncBinary() - _ = yym1326 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1327 := z.EncBinary() - _ = yym1327 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) - } - } - if yyr1321 || yy2arr1321 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1321[2] { - yy1329 := &x.Divisor - yym1330 := z.EncBinary() - _ = yym1330 - if false { - } else if z.HasExtensions() && z.EncExt(yy1329) { - } else if !yym1330 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1329) - } else { - z.EncFallback(yy1329) - } - } else { - r.EncodeNil() - } - } else { - if yyq1321[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("divisor")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1331 := &x.Divisor - yym1332 := z.EncBinary() - _ = yym1332 - if false { - } else if z.HasExtensions() && z.EncExt(yy1331) { - } else if !yym1332 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1331) - } else { - z.EncFallback(yy1331) - } - } - } - if yyr1321 || yy2arr1321 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1333 := z.DecBinary() - _ = yym1333 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1334 := r.ContainerType() - if yyct1334 == codecSelferValueTypeMap1234 { - yyl1334 := r.ReadMapStart() - if yyl1334 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1334, d) - } - } else if yyct1334 == codecSelferValueTypeArray1234 { - yyl1334 := r.ReadArrayStart() - if yyl1334 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1334, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1335Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1335Slc - var yyhl1335 bool = l >= 0 - for yyj1335 := 0; ; yyj1335++ { - if yyhl1335 { - if yyj1335 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1335Slc = r.DecodeBytes(yys1335Slc, true, true) - yys1335 := string(yys1335Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1335 { - case "containerName": - if r.TryDecodeAsNil() { - x.ContainerName = "" - } else { - x.ContainerName = string(r.DecodeString()) - } - case "resource": - if r.TryDecodeAsNil() { - x.Resource = "" - } else { - x.Resource = string(r.DecodeString()) - } - case "divisor": - if r.TryDecodeAsNil() { - x.Divisor = pkg3_resource.Quantity{} - } else { - yyv1338 := &x.Divisor - yym1339 := z.DecBinary() - _ = yym1339 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1338) { - } else if !yym1339 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1338) - } else { - z.DecFallback(yyv1338, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys1335) - } // end switch yys1335 - } // end for yyj1335 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1340 int - var yyb1340 bool - var yyhl1340 bool = l >= 0 - yyj1340++ - if yyhl1340 { - yyb1340 = yyj1340 > l - } else { - yyb1340 = r.CheckBreak() - } - if yyb1340 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerName = "" - } else { - x.ContainerName = string(r.DecodeString()) - } - yyj1340++ - if yyhl1340 { - yyb1340 = yyj1340 > l - } else { - yyb1340 = r.CheckBreak() - } - if yyb1340 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resource = "" - } else { - x.Resource = string(r.DecodeString()) - } - yyj1340++ - if yyhl1340 { - yyb1340 = yyj1340 > l - } else { - yyb1340 = r.CheckBreak() - } - if yyb1340 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Divisor = pkg3_resource.Quantity{} - } else { - yyv1343 := &x.Divisor - yym1344 := z.DecBinary() - _ = yym1344 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1343) { - } else if !yym1344 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1343) - } else { - z.DecFallback(yyv1343, false) - } - } - for { - yyj1340++ - if yyhl1340 { - yyb1340 = yyj1340 > l - } else { - yyb1340 = r.CheckBreak() - } - if yyb1340 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1340-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1345 := z.EncBinary() - _ = yym1345 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1346 := !z.EncBinary() - yy2arr1346 := z.EncBasicHandle().StructToArray - var yyq1346 [2]bool - _, _, _ = yysep1346, yyq1346, yy2arr1346 - const yyr1346 bool = false - var yynn1346 int - if yyr1346 || yy2arr1346 { - r.EncodeArrayStart(2) - } else { - yynn1346 = 2 - for _, b := range yyq1346 { - if b { - yynn1346++ - } - } - r.EncodeMapStart(yynn1346) - yynn1346 = 0 - } - if yyr1346 || yy2arr1346 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1348 := z.EncBinary() - _ = yym1348 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1349 := z.EncBinary() - _ = yym1349 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr1346 || yy2arr1346 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1351 := z.EncBinary() - _ = yym1351 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1352 := z.EncBinary() - _ = yym1352 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr1346 || yy2arr1346 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ConfigMapKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1353 := z.DecBinary() - _ = yym1353 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1354 := r.ContainerType() - if yyct1354 == codecSelferValueTypeMap1234 { - yyl1354 := r.ReadMapStart() - if yyl1354 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1354, d) - } - } else if yyct1354 == codecSelferValueTypeArray1234 { - yyl1354 := r.ReadArrayStart() - if yyl1354 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1354, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1355Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1355Slc - var yyhl1355 bool = l >= 0 - for yyj1355 := 0; ; yyj1355++ { - if yyhl1355 { - if yyj1355 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1355Slc = r.DecodeBytes(yys1355Slc, true, true) - yys1355 := string(yys1355Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1355 { - case "Name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys1355) - } // end switch yys1355 - } // end for yyj1355 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ConfigMapKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1358 int - var yyb1358 bool - var yyhl1358 bool = l >= 0 - yyj1358++ - if yyhl1358 { - yyb1358 = yyj1358 > l - } else { - yyb1358 = r.CheckBreak() - } - if yyb1358 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj1358++ - if yyhl1358 { - yyb1358 = yyj1358 > l - } else { - yyb1358 = r.CheckBreak() - } - if yyb1358 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - for { - yyj1358++ - if yyhl1358 { - yyb1358 = yyj1358 > l - } else { - yyb1358 = r.CheckBreak() - } - if yyb1358 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1358-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SecretKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1361 := z.EncBinary() - _ = yym1361 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1362 := !z.EncBinary() - yy2arr1362 := z.EncBasicHandle().StructToArray - var yyq1362 [2]bool - _, _, _ = yysep1362, yyq1362, yy2arr1362 - const yyr1362 bool = false - var yynn1362 int - if yyr1362 || yy2arr1362 { - r.EncodeArrayStart(2) - } else { - yynn1362 = 2 - for _, b := range yyq1362 { - if b { - yynn1362++ - } - } - r.EncodeMapStart(yynn1362) - yynn1362 = 0 - } - if yyr1362 || yy2arr1362 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1364 := z.EncBinary() - _ = yym1364 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1365 := z.EncBinary() - _ = yym1365 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr1362 || yy2arr1362 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1367 := z.EncBinary() - _ = yym1367 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1368 := z.EncBinary() - _ = yym1368 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr1362 || yy2arr1362 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SecretKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1369 := z.DecBinary() - _ = yym1369 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1370 := r.ContainerType() - if yyct1370 == codecSelferValueTypeMap1234 { - yyl1370 := r.ReadMapStart() - if yyl1370 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1370, d) - } - } else if yyct1370 == codecSelferValueTypeArray1234 { - yyl1370 := r.ReadArrayStart() - if yyl1370 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1370, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SecretKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1371Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1371Slc - var yyhl1371 bool = l >= 0 - for yyj1371 := 0; ; yyj1371++ { - if yyhl1371 { - if yyj1371 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1371Slc = r.DecodeBytes(yys1371Slc, true, true) - yys1371 := string(yys1371Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1371 { - case "Name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys1371) - } // end switch yys1371 - } // end for yyj1371 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SecretKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1374 int - var yyb1374 bool - var yyhl1374 bool = l >= 0 - yyj1374++ - if yyhl1374 { - yyb1374 = yyj1374 > l - } else { - yyb1374 = r.CheckBreak() - } - if yyb1374 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj1374++ - if yyhl1374 { - yyb1374 = yyj1374 > l - } else { - yyb1374 = r.CheckBreak() - } - if yyb1374 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - for { - yyj1374++ - if yyhl1374 { - yyb1374 = yyj1374 > l - } else { - yyb1374 = r.CheckBreak() - } - if yyb1374 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1374-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HTTPHeader) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1377 := z.EncBinary() - _ = yym1377 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1378 := !z.EncBinary() - yy2arr1378 := z.EncBasicHandle().StructToArray - var yyq1378 [2]bool - _, _, _ = yysep1378, yyq1378, yy2arr1378 - const yyr1378 bool = false - var yynn1378 int - if yyr1378 || yy2arr1378 { - r.EncodeArrayStart(2) - } else { - yynn1378 = 2 - for _, b := range yyq1378 { - if b { - yynn1378++ - } - } - r.EncodeMapStart(yynn1378) - yynn1378 = 0 - } - if yyr1378 || yy2arr1378 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1380 := z.EncBinary() - _ = yym1380 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1381 := z.EncBinary() - _ = yym1381 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr1378 || yy2arr1378 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1383 := z.EncBinary() - _ = yym1383 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1384 := z.EncBinary() - _ = yym1384 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } - if yyr1378 || yy2arr1378 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HTTPHeader) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1385 := z.DecBinary() - _ = yym1385 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1386 := r.ContainerType() - if yyct1386 == codecSelferValueTypeMap1234 { - yyl1386 := r.ReadMapStart() - if yyl1386 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1386, d) - } - } else if yyct1386 == codecSelferValueTypeArray1234 { - yyl1386 := r.ReadArrayStart() - if yyl1386 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1386, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HTTPHeader) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1387Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1387Slc - var yyhl1387 bool = l >= 0 - for yyj1387 := 0; ; yyj1387++ { - if yyhl1387 { - if yyj1387 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1387Slc = r.DecodeBytes(yys1387Slc, true, true) - yys1387 := string(yys1387Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1387 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys1387) - } // end switch yys1387 - } // end for yyj1387 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HTTPHeader) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1390 int - var yyb1390 bool - var yyhl1390 bool = l >= 0 - yyj1390++ - if yyhl1390 { - yyb1390 = yyj1390 > l - } else { - yyb1390 = r.CheckBreak() - } - if yyb1390 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj1390++ - if yyhl1390 { - yyb1390 = yyj1390 > l - } else { - yyb1390 = r.CheckBreak() - } - if yyb1390 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - for { - yyj1390++ - if yyhl1390 { - yyb1390 = yyj1390 > l - } else { - yyb1390 = r.CheckBreak() - } - if yyb1390 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1390-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HTTPGetAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1393 := z.EncBinary() - _ = yym1393 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1394 := !z.EncBinary() - yy2arr1394 := z.EncBasicHandle().StructToArray - var yyq1394 [5]bool - _, _, _ = yysep1394, yyq1394, yy2arr1394 - const yyr1394 bool = false - yyq1394[0] = x.Path != "" - yyq1394[1] = true - yyq1394[2] = x.Host != "" - yyq1394[3] = x.Scheme != "" - yyq1394[4] = len(x.HTTPHeaders) != 0 - var yynn1394 int - if yyr1394 || yy2arr1394 { - r.EncodeArrayStart(5) - } else { - yynn1394 = 0 - for _, b := range yyq1394 { - if b { - yynn1394++ - } - } - r.EncodeMapStart(yynn1394) - yynn1394 = 0 - } - if yyr1394 || yy2arr1394 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1394[0] { - yym1396 := z.EncBinary() - _ = yym1396 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1394[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1397 := z.EncBinary() - _ = yym1397 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr1394 || yy2arr1394 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1394[1] { - yy1399 := &x.Port - yym1400 := z.EncBinary() - _ = yym1400 - if false { - } else if z.HasExtensions() && z.EncExt(yy1399) { - } else if !yym1400 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1399) - } else { - z.EncFallback(yy1399) - } - } else { - r.EncodeNil() - } - } else { - if yyq1394[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1401 := &x.Port - yym1402 := z.EncBinary() - _ = yym1402 - if false { - } else if z.HasExtensions() && z.EncExt(yy1401) { - } else if !yym1402 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1401) - } else { - z.EncFallback(yy1401) - } - } - } - if yyr1394 || yy2arr1394 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1394[2] { - yym1404 := z.EncBinary() - _ = yym1404 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1394[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("host")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1405 := z.EncBinary() - _ = yym1405 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } - } - if yyr1394 || yy2arr1394 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1394[3] { - x.Scheme.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1394[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scheme")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Scheme.CodecEncodeSelf(e) - } - } - if yyr1394 || yy2arr1394 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1394[4] { - if x.HTTPHeaders == nil { - r.EncodeNil() - } else { - yym1408 := z.EncBinary() - _ = yym1408 - if false { - } else { - h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1394[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("httpHeaders")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HTTPHeaders == nil { - r.EncodeNil() - } else { - yym1409 := z.EncBinary() - _ = yym1409 - if false { - } else { - h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e) - } - } - } - } - if yyr1394 || yy2arr1394 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HTTPGetAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1410 := z.DecBinary() - _ = yym1410 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1411 := r.ContainerType() - if yyct1411 == codecSelferValueTypeMap1234 { - yyl1411 := r.ReadMapStart() - if yyl1411 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1411, d) - } - } else if yyct1411 == codecSelferValueTypeArray1234 { - yyl1411 := r.ReadArrayStart() - if yyl1411 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1411, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HTTPGetAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1412Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1412Slc - var yyhl1412 bool = l >= 0 - for yyj1412 := 0; ; yyj1412++ { - if yyhl1412 { - if yyj1412 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1412Slc = r.DecodeBytes(yys1412Slc, true, true) - yys1412 := string(yys1412Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1412 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "port": - if r.TryDecodeAsNil() { - x.Port = pkg4_intstr.IntOrString{} - } else { - yyv1414 := &x.Port - yym1415 := z.DecBinary() - _ = yym1415 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1414) { - } else if !yym1415 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1414) - } else { - z.DecFallback(yyv1414, false) - } - } - case "host": - if r.TryDecodeAsNil() { - x.Host = "" - } else { - x.Host = string(r.DecodeString()) - } - case "scheme": - if r.TryDecodeAsNil() { - x.Scheme = "" - } else { - x.Scheme = URIScheme(r.DecodeString()) - } - case "httpHeaders": - if r.TryDecodeAsNil() { - x.HTTPHeaders = nil - } else { - yyv1418 := &x.HTTPHeaders - yym1419 := z.DecBinary() - _ = yym1419 - if false { - } else { - h.decSliceHTTPHeader((*[]HTTPHeader)(yyv1418), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1412) - } // end switch yys1412 - } // end for yyj1412 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HTTPGetAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1420 int - var yyb1420 bool - var yyhl1420 bool = l >= 0 - yyj1420++ - if yyhl1420 { - yyb1420 = yyj1420 > l - } else { - yyb1420 = r.CheckBreak() - } - if yyb1420 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj1420++ - if yyhl1420 { - yyb1420 = yyj1420 > l - } else { - yyb1420 = r.CheckBreak() - } - if yyb1420 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = pkg4_intstr.IntOrString{} - } else { - yyv1422 := &x.Port - yym1423 := z.DecBinary() - _ = yym1423 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1422) { - } else if !yym1423 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1422) - } else { - z.DecFallback(yyv1422, false) - } - } - yyj1420++ - if yyhl1420 { - yyb1420 = yyj1420 > l - } else { - yyb1420 = r.CheckBreak() - } - if yyb1420 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Host = "" - } else { - x.Host = string(r.DecodeString()) - } - yyj1420++ - if yyhl1420 { - yyb1420 = yyj1420 > l - } else { - yyb1420 = r.CheckBreak() - } - if yyb1420 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Scheme = "" - } else { - x.Scheme = URIScheme(r.DecodeString()) - } - yyj1420++ - if yyhl1420 { - yyb1420 = yyj1420 > l - } else { - yyb1420 = r.CheckBreak() - } - if yyb1420 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HTTPHeaders = nil - } else { - yyv1426 := &x.HTTPHeaders - yym1427 := z.DecBinary() - _ = yym1427 - if false { - } else { - h.decSliceHTTPHeader((*[]HTTPHeader)(yyv1426), d) - } - } - for { - yyj1420++ - if yyhl1420 { - yyb1420 = yyj1420 > l - } else { - yyb1420 = r.CheckBreak() - } - if yyb1420 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1420-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x URIScheme) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1428 := z.EncBinary() - _ = yym1428 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *URIScheme) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1429 := z.DecBinary() - _ = yym1429 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *TCPSocketAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1430 := z.EncBinary() - _ = yym1430 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1431 := !z.EncBinary() - yy2arr1431 := z.EncBasicHandle().StructToArray - var yyq1431 [1]bool - _, _, _ = yysep1431, yyq1431, yy2arr1431 - const yyr1431 bool = false - yyq1431[0] = true - var yynn1431 int - if yyr1431 || yy2arr1431 { - r.EncodeArrayStart(1) - } else { - yynn1431 = 0 - for _, b := range yyq1431 { - if b { - yynn1431++ - } - } - r.EncodeMapStart(yynn1431) - yynn1431 = 0 - } - if yyr1431 || yy2arr1431 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1431[0] { - yy1433 := &x.Port - yym1434 := z.EncBinary() - _ = yym1434 - if false { - } else if z.HasExtensions() && z.EncExt(yy1433) { - } else if !yym1434 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1433) - } else { - z.EncFallback(yy1433) - } - } else { - r.EncodeNil() - } - } else { - if yyq1431[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1435 := &x.Port - yym1436 := z.EncBinary() - _ = yym1436 - if false { - } else if z.HasExtensions() && z.EncExt(yy1435) { - } else if !yym1436 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1435) - } else { - z.EncFallback(yy1435) - } - } - } - if yyr1431 || yy2arr1431 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *TCPSocketAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1437 := z.DecBinary() - _ = yym1437 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1438 := r.ContainerType() - if yyct1438 == codecSelferValueTypeMap1234 { - yyl1438 := r.ReadMapStart() - if yyl1438 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1438, d) - } - } else if yyct1438 == codecSelferValueTypeArray1234 { - yyl1438 := r.ReadArrayStart() - if yyl1438 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1438, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *TCPSocketAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1439Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1439Slc - var yyhl1439 bool = l >= 0 - for yyj1439 := 0; ; yyj1439++ { - if yyhl1439 { - if yyj1439 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1439Slc = r.DecodeBytes(yys1439Slc, true, true) - yys1439 := string(yys1439Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1439 { - case "port": - if r.TryDecodeAsNil() { - x.Port = pkg4_intstr.IntOrString{} - } else { - yyv1440 := &x.Port - yym1441 := z.DecBinary() - _ = yym1441 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1440) { - } else if !yym1441 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1440) - } else { - z.DecFallback(yyv1440, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys1439) - } // end switch yys1439 - } // end for yyj1439 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *TCPSocketAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1442 int - var yyb1442 bool - var yyhl1442 bool = l >= 0 - yyj1442++ - if yyhl1442 { - yyb1442 = yyj1442 > l - } else { - yyb1442 = r.CheckBreak() - } - if yyb1442 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = pkg4_intstr.IntOrString{} - } else { - yyv1443 := &x.Port - yym1444 := z.DecBinary() - _ = yym1444 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1443) { - } else if !yym1444 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1443) - } else { - z.DecFallback(yyv1443, false) - } - } - for { - yyj1442++ - if yyhl1442 { - yyb1442 = yyj1442 > l - } else { - yyb1442 = r.CheckBreak() - } - if yyb1442 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1442-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ExecAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1445 := z.EncBinary() - _ = yym1445 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1446 := !z.EncBinary() - yy2arr1446 := z.EncBasicHandle().StructToArray - var yyq1446 [1]bool - _, _, _ = yysep1446, yyq1446, yy2arr1446 - const yyr1446 bool = false - yyq1446[0] = len(x.Command) != 0 - var yynn1446 int - if yyr1446 || yy2arr1446 { - r.EncodeArrayStart(1) - } else { - yynn1446 = 0 - for _, b := range yyq1446 { - if b { - yynn1446++ - } - } - r.EncodeMapStart(yynn1446) - yynn1446 = 0 - } - if yyr1446 || yy2arr1446 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1446[0] { - if x.Command == nil { - r.EncodeNil() - } else { - yym1448 := z.EncBinary() - _ = yym1448 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1446[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("command")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Command == nil { - r.EncodeNil() - } else { - yym1449 := z.EncBinary() - _ = yym1449 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } - } - if yyr1446 || yy2arr1446 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ExecAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1450 := z.DecBinary() - _ = yym1450 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1451 := r.ContainerType() - if yyct1451 == codecSelferValueTypeMap1234 { - yyl1451 := r.ReadMapStart() - if yyl1451 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1451, d) - } - } else if yyct1451 == codecSelferValueTypeArray1234 { - yyl1451 := r.ReadArrayStart() - if yyl1451 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1451, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ExecAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1452Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1452Slc - var yyhl1452 bool = l >= 0 - for yyj1452 := 0; ; yyj1452++ { - if yyhl1452 { - if yyj1452 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1452Slc = r.DecodeBytes(yys1452Slc, true, true) - yys1452 := string(yys1452Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1452 { - case "command": - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv1453 := &x.Command - yym1454 := z.DecBinary() - _ = yym1454 - if false { - } else { - z.F.DecSliceStringX(yyv1453, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1452) - } // end switch yys1452 - } // end for yyj1452 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ExecAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1455 int - var yyb1455 bool - var yyhl1455 bool = l >= 0 - yyj1455++ - if yyhl1455 { - yyb1455 = yyj1455 > l - } else { - yyb1455 = r.CheckBreak() - } - if yyb1455 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv1456 := &x.Command - yym1457 := z.DecBinary() - _ = yym1457 - if false { - } else { - z.F.DecSliceStringX(yyv1456, false, d) - } - } - for { - yyj1455++ - if yyhl1455 { - yyb1455 = yyj1455 > l - } else { - yyb1455 = r.CheckBreak() - } - if yyb1455 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1455-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1458 := z.EncBinary() - _ = yym1458 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1459 := !z.EncBinary() - yy2arr1459 := z.EncBasicHandle().StructToArray - var yyq1459 [8]bool - _, _, _ = yysep1459, yyq1459, yy2arr1459 - const yyr1459 bool = false - yyq1459[0] = x.Handler.Exec != nil && x.Exec != nil - yyq1459[1] = x.Handler.HTTPGet != nil && x.HTTPGet != nil - yyq1459[2] = x.Handler.TCPSocket != nil && x.TCPSocket != nil - yyq1459[3] = x.InitialDelaySeconds != 0 - yyq1459[4] = x.TimeoutSeconds != 0 - yyq1459[5] = x.PeriodSeconds != 0 - yyq1459[6] = x.SuccessThreshold != 0 - yyq1459[7] = x.FailureThreshold != 0 - var yynn1459 int - if yyr1459 || yy2arr1459 { - r.EncodeArrayStart(8) - } else { - yynn1459 = 0 - for _, b := range yyq1459 { - if b { - yynn1459++ - } - } - r.EncodeMapStart(yynn1459) - yynn1459 = 0 - } - var yyn1460 bool - if x.Handler.Exec == nil { - yyn1460 = true - goto LABEL1460 - } - LABEL1460: - if yyr1459 || yy2arr1459 { - if yyn1460 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1459[0] { - if x.Exec == nil { - r.EncodeNil() - } else { - x.Exec.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq1459[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn1460 { - r.EncodeNil() - } else { - if x.Exec == nil { - r.EncodeNil() - } else { - x.Exec.CodecEncodeSelf(e) - } - } - } - } - var yyn1461 bool - if x.Handler.HTTPGet == nil { - yyn1461 = true - goto LABEL1461 - } - LABEL1461: - if yyr1459 || yy2arr1459 { - if yyn1461 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1459[1] { - if x.HTTPGet == nil { - r.EncodeNil() - } else { - x.HTTPGet.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq1459[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("httpGet")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn1461 { - r.EncodeNil() - } else { - if x.HTTPGet == nil { - r.EncodeNil() - } else { - x.HTTPGet.CodecEncodeSelf(e) - } - } - } - } - var yyn1462 bool - if x.Handler.TCPSocket == nil { - yyn1462 = true - goto LABEL1462 - } - LABEL1462: - if yyr1459 || yy2arr1459 { - if yyn1462 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1459[2] { - if x.TCPSocket == nil { - r.EncodeNil() - } else { - x.TCPSocket.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq1459[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tcpSocket")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn1462 { - r.EncodeNil() - } else { - if x.TCPSocket == nil { - r.EncodeNil() - } else { - x.TCPSocket.CodecEncodeSelf(e) - } - } - } - } - if yyr1459 || yy2arr1459 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1459[3] { - yym1464 := z.EncBinary() - _ = yym1464 - if false { - } else { - r.EncodeInt(int64(x.InitialDelaySeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq1459[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("initialDelaySeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1465 := z.EncBinary() - _ = yym1465 - if false { - } else { - r.EncodeInt(int64(x.InitialDelaySeconds)) - } - } - } - if yyr1459 || yy2arr1459 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1459[4] { - yym1467 := z.EncBinary() - _ = yym1467 - if false { - } else { - r.EncodeInt(int64(x.TimeoutSeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq1459[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("timeoutSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1468 := z.EncBinary() - _ = yym1468 - if false { - } else { - r.EncodeInt(int64(x.TimeoutSeconds)) - } - } - } - if yyr1459 || yy2arr1459 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1459[5] { - yym1470 := z.EncBinary() - _ = yym1470 - if false { - } else { - r.EncodeInt(int64(x.PeriodSeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq1459[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("periodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1471 := z.EncBinary() - _ = yym1471 - if false { - } else { - r.EncodeInt(int64(x.PeriodSeconds)) - } - } - } - if yyr1459 || yy2arr1459 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1459[6] { - yym1473 := z.EncBinary() - _ = yym1473 - if false { - } else { - r.EncodeInt(int64(x.SuccessThreshold)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq1459[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("successThreshold")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1474 := z.EncBinary() - _ = yym1474 - if false { - } else { - r.EncodeInt(int64(x.SuccessThreshold)) - } - } - } - if yyr1459 || yy2arr1459 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1459[7] { - yym1476 := z.EncBinary() - _ = yym1476 - if false { - } else { - r.EncodeInt(int64(x.FailureThreshold)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq1459[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("failureThreshold")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1477 := z.EncBinary() - _ = yym1477 - if false { - } else { - r.EncodeInt(int64(x.FailureThreshold)) - } - } - } - if yyr1459 || yy2arr1459 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Probe) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1478 := z.DecBinary() - _ = yym1478 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1479 := r.ContainerType() - if yyct1479 == codecSelferValueTypeMap1234 { - yyl1479 := r.ReadMapStart() - if yyl1479 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1479, d) - } - } else if yyct1479 == codecSelferValueTypeArray1234 { - yyl1479 := r.ReadArrayStart() - if yyl1479 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1479, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Probe) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1480Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1480Slc - var yyhl1480 bool = l >= 0 - for yyj1480 := 0; ; yyj1480++ { - if yyhl1480 { - if yyj1480 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1480Slc = r.DecodeBytes(yys1480Slc, true, true) - yys1480 := string(yys1480Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1480 { - case "exec": - if x.Handler.Exec == nil { - x.Handler.Exec = new(ExecAction) - } - if r.TryDecodeAsNil() { - if x.Exec != nil { - x.Exec = nil - } - } else { - if x.Exec == nil { - x.Exec = new(ExecAction) - } - x.Exec.CodecDecodeSelf(d) - } - case "httpGet": - if x.Handler.HTTPGet == nil { - x.Handler.HTTPGet = new(HTTPGetAction) - } - if r.TryDecodeAsNil() { - if x.HTTPGet != nil { - x.HTTPGet = nil - } - } else { - if x.HTTPGet == nil { - x.HTTPGet = new(HTTPGetAction) - } - x.HTTPGet.CodecDecodeSelf(d) - } - case "tcpSocket": - if x.Handler.TCPSocket == nil { - x.Handler.TCPSocket = new(TCPSocketAction) - } - if r.TryDecodeAsNil() { - if x.TCPSocket != nil { - x.TCPSocket = nil - } - } else { - if x.TCPSocket == nil { - x.TCPSocket = new(TCPSocketAction) - } - x.TCPSocket.CodecDecodeSelf(d) - } - case "initialDelaySeconds": - if r.TryDecodeAsNil() { - x.InitialDelaySeconds = 0 - } else { - x.InitialDelaySeconds = int32(r.DecodeInt(32)) - } - case "timeoutSeconds": - if r.TryDecodeAsNil() { - x.TimeoutSeconds = 0 - } else { - x.TimeoutSeconds = int32(r.DecodeInt(32)) - } - case "periodSeconds": - if r.TryDecodeAsNil() { - x.PeriodSeconds = 0 - } else { - x.PeriodSeconds = int32(r.DecodeInt(32)) - } - case "successThreshold": - if r.TryDecodeAsNil() { - x.SuccessThreshold = 0 - } else { - x.SuccessThreshold = int32(r.DecodeInt(32)) - } - case "failureThreshold": - if r.TryDecodeAsNil() { - x.FailureThreshold = 0 - } else { - x.FailureThreshold = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys1480) - } // end switch yys1480 - } // end for yyj1480 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1489 int - var yyb1489 bool - var yyhl1489 bool = l >= 0 - if x.Handler.Exec == nil { - x.Handler.Exec = new(ExecAction) - } - yyj1489++ - if yyhl1489 { - yyb1489 = yyj1489 > l - } else { - yyb1489 = r.CheckBreak() - } - if yyb1489 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Exec != nil { - x.Exec = nil - } - } else { - if x.Exec == nil { - x.Exec = new(ExecAction) - } - x.Exec.CodecDecodeSelf(d) - } - if x.Handler.HTTPGet == nil { - x.Handler.HTTPGet = new(HTTPGetAction) - } - yyj1489++ - if yyhl1489 { - yyb1489 = yyj1489 > l - } else { - yyb1489 = r.CheckBreak() - } - if yyb1489 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HTTPGet != nil { - x.HTTPGet = nil - } - } else { - if x.HTTPGet == nil { - x.HTTPGet = new(HTTPGetAction) - } - x.HTTPGet.CodecDecodeSelf(d) - } - if x.Handler.TCPSocket == nil { - x.Handler.TCPSocket = new(TCPSocketAction) - } - yyj1489++ - if yyhl1489 { - yyb1489 = yyj1489 > l - } else { - yyb1489 = r.CheckBreak() - } - if yyb1489 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TCPSocket != nil { - x.TCPSocket = nil - } - } else { - if x.TCPSocket == nil { - x.TCPSocket = new(TCPSocketAction) - } - x.TCPSocket.CodecDecodeSelf(d) - } - yyj1489++ - if yyhl1489 { - yyb1489 = yyj1489 > l - } else { - yyb1489 = r.CheckBreak() - } - if yyb1489 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.InitialDelaySeconds = 0 - } else { - x.InitialDelaySeconds = int32(r.DecodeInt(32)) - } - yyj1489++ - if yyhl1489 { - yyb1489 = yyj1489 > l - } else { - yyb1489 = r.CheckBreak() - } - if yyb1489 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TimeoutSeconds = 0 - } else { - x.TimeoutSeconds = int32(r.DecodeInt(32)) - } - yyj1489++ - if yyhl1489 { - yyb1489 = yyj1489 > l - } else { - yyb1489 = r.CheckBreak() - } - if yyb1489 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PeriodSeconds = 0 - } else { - x.PeriodSeconds = int32(r.DecodeInt(32)) - } - yyj1489++ - if yyhl1489 { - yyb1489 = yyj1489 > l - } else { - yyb1489 = r.CheckBreak() - } - if yyb1489 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SuccessThreshold = 0 - } else { - x.SuccessThreshold = int32(r.DecodeInt(32)) - } - yyj1489++ - if yyhl1489 { - yyb1489 = yyj1489 > l - } else { - yyb1489 = r.CheckBreak() - } - if yyb1489 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FailureThreshold = 0 - } else { - x.FailureThreshold = int32(r.DecodeInt(32)) - } - for { - yyj1489++ - if yyhl1489 { - yyb1489 = yyj1489 > l - } else { - yyb1489 = r.CheckBreak() - } - if yyb1489 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1489-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x PullPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1498 := z.EncBinary() - _ = yym1498 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PullPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1499 := z.DecBinary() - _ = yym1499 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x Capability) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1500 := z.EncBinary() - _ = yym1500 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *Capability) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1501 := z.DecBinary() - _ = yym1501 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *Capabilities) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1502 := z.EncBinary() - _ = yym1502 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1503 := !z.EncBinary() - yy2arr1503 := z.EncBasicHandle().StructToArray - var yyq1503 [2]bool - _, _, _ = yysep1503, yyq1503, yy2arr1503 - const yyr1503 bool = false - yyq1503[0] = len(x.Add) != 0 - yyq1503[1] = len(x.Drop) != 0 - var yynn1503 int - if yyr1503 || yy2arr1503 { - r.EncodeArrayStart(2) - } else { - yynn1503 = 0 - for _, b := range yyq1503 { - if b { - yynn1503++ - } - } - r.EncodeMapStart(yynn1503) - yynn1503 = 0 - } - if yyr1503 || yy2arr1503 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1503[0] { - if x.Add == nil { - r.EncodeNil() - } else { - yym1505 := z.EncBinary() - _ = yym1505 - if false { - } else { - h.encSliceCapability(([]Capability)(x.Add), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1503[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("add")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Add == nil { - r.EncodeNil() - } else { - yym1506 := z.EncBinary() - _ = yym1506 - if false { - } else { - h.encSliceCapability(([]Capability)(x.Add), e) - } - } - } - } - if yyr1503 || yy2arr1503 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1503[1] { - if x.Drop == nil { - r.EncodeNil() - } else { - yym1508 := z.EncBinary() - _ = yym1508 - if false { - } else { - h.encSliceCapability(([]Capability)(x.Drop), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1503[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("drop")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Drop == nil { - r.EncodeNil() - } else { - yym1509 := z.EncBinary() - _ = yym1509 - if false { - } else { - h.encSliceCapability(([]Capability)(x.Drop), e) - } - } - } - } - if yyr1503 || yy2arr1503 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Capabilities) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1510 := z.DecBinary() - _ = yym1510 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1511 := r.ContainerType() - if yyct1511 == codecSelferValueTypeMap1234 { - yyl1511 := r.ReadMapStart() - if yyl1511 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1511, d) - } - } else if yyct1511 == codecSelferValueTypeArray1234 { - yyl1511 := r.ReadArrayStart() - if yyl1511 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1511, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Capabilities) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1512Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1512Slc - var yyhl1512 bool = l >= 0 - for yyj1512 := 0; ; yyj1512++ { - if yyhl1512 { - if yyj1512 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1512Slc = r.DecodeBytes(yys1512Slc, true, true) - yys1512 := string(yys1512Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1512 { - case "add": - if r.TryDecodeAsNil() { - x.Add = nil - } else { - yyv1513 := &x.Add - yym1514 := z.DecBinary() - _ = yym1514 - if false { - } else { - h.decSliceCapability((*[]Capability)(yyv1513), d) - } - } - case "drop": - if r.TryDecodeAsNil() { - x.Drop = nil - } else { - yyv1515 := &x.Drop - yym1516 := z.DecBinary() - _ = yym1516 - if false { - } else { - h.decSliceCapability((*[]Capability)(yyv1515), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1512) - } // end switch yys1512 - } // end for yyj1512 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Capabilities) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1517 int - var yyb1517 bool - var yyhl1517 bool = l >= 0 - yyj1517++ - if yyhl1517 { - yyb1517 = yyj1517 > l - } else { - yyb1517 = r.CheckBreak() - } - if yyb1517 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Add = nil - } else { - yyv1518 := &x.Add - yym1519 := z.DecBinary() - _ = yym1519 - if false { - } else { - h.decSliceCapability((*[]Capability)(yyv1518), d) - } - } - yyj1517++ - if yyhl1517 { - yyb1517 = yyj1517 > l - } else { - yyb1517 = r.CheckBreak() - } - if yyb1517 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Drop = nil - } else { - yyv1520 := &x.Drop - yym1521 := z.DecBinary() - _ = yym1521 - if false { - } else { - h.decSliceCapability((*[]Capability)(yyv1520), d) - } - } - for { - yyj1517++ - if yyhl1517 { - yyb1517 = yyj1517 > l - } else { - yyb1517 = r.CheckBreak() - } - if yyb1517 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1517-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceRequirements) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1522 := z.EncBinary() - _ = yym1522 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1523 := !z.EncBinary() - yy2arr1523 := z.EncBasicHandle().StructToArray - var yyq1523 [2]bool - _, _, _ = yysep1523, yyq1523, yy2arr1523 - const yyr1523 bool = false - yyq1523[0] = len(x.Limits) != 0 - yyq1523[1] = len(x.Requests) != 0 - var yynn1523 int - if yyr1523 || yy2arr1523 { - r.EncodeArrayStart(2) - } else { - yynn1523 = 0 - for _, b := range yyq1523 { - if b { - yynn1523++ - } - } - r.EncodeMapStart(yynn1523) - yynn1523 = 0 - } - if yyr1523 || yy2arr1523 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1523[0] { - if x.Limits == nil { - r.EncodeNil() - } else { - x.Limits.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1523[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("limits")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Limits == nil { - r.EncodeNil() - } else { - x.Limits.CodecEncodeSelf(e) - } - } - } - if yyr1523 || yy2arr1523 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1523[1] { - if x.Requests == nil { - r.EncodeNil() - } else { - x.Requests.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1523[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requests")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Requests == nil { - r.EncodeNil() - } else { - x.Requests.CodecEncodeSelf(e) - } - } - } - if yyr1523 || yy2arr1523 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceRequirements) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1526 := z.DecBinary() - _ = yym1526 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1527 := r.ContainerType() - if yyct1527 == codecSelferValueTypeMap1234 { - yyl1527 := r.ReadMapStart() - if yyl1527 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1527, d) - } - } else if yyct1527 == codecSelferValueTypeArray1234 { - yyl1527 := r.ReadArrayStart() - if yyl1527 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1527, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceRequirements) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1528Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1528Slc - var yyhl1528 bool = l >= 0 - for yyj1528 := 0; ; yyj1528++ { - if yyhl1528 { - if yyj1528 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1528Slc = r.DecodeBytes(yys1528Slc, true, true) - yys1528 := string(yys1528Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1528 { - case "limits": - if r.TryDecodeAsNil() { - x.Limits = nil - } else { - yyv1529 := &x.Limits - yyv1529.CodecDecodeSelf(d) - } - case "requests": - if r.TryDecodeAsNil() { - x.Requests = nil - } else { - yyv1530 := &x.Requests - yyv1530.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys1528) - } // end switch yys1528 - } // end for yyj1528 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceRequirements) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1531 int - var yyb1531 bool - var yyhl1531 bool = l >= 0 - yyj1531++ - if yyhl1531 { - yyb1531 = yyj1531 > l - } else { - yyb1531 = r.CheckBreak() - } - if yyb1531 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Limits = nil - } else { - yyv1532 := &x.Limits - yyv1532.CodecDecodeSelf(d) - } - yyj1531++ - if yyhl1531 { - yyb1531 = yyj1531 > l - } else { - yyb1531 = r.CheckBreak() - } - if yyb1531 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Requests = nil - } else { - yyv1533 := &x.Requests - yyv1533.CodecDecodeSelf(d) - } - for { - yyj1531++ - if yyhl1531 { - yyb1531 = yyj1531 > l - } else { - yyb1531 = r.CheckBreak() - } - if yyb1531 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1531-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1534 := z.EncBinary() - _ = yym1534 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1535 := !z.EncBinary() - yy2arr1535 := z.EncBasicHandle().StructToArray - var yyq1535 [18]bool - _, _, _ = yysep1535, yyq1535, yy2arr1535 - const yyr1535 bool = false - yyq1535[2] = len(x.Command) != 0 - yyq1535[3] = len(x.Args) != 0 - yyq1535[4] = x.WorkingDir != "" - yyq1535[5] = len(x.Ports) != 0 - yyq1535[6] = len(x.Env) != 0 - yyq1535[7] = true - yyq1535[8] = len(x.VolumeMounts) != 0 - yyq1535[9] = x.LivenessProbe != nil - yyq1535[10] = x.ReadinessProbe != nil - yyq1535[11] = x.Lifecycle != nil - yyq1535[12] = x.TerminationMessagePath != "" - yyq1535[14] = x.SecurityContext != nil - yyq1535[15] = x.Stdin != false - yyq1535[16] = x.StdinOnce != false - yyq1535[17] = x.TTY != false - var yynn1535 int - if yyr1535 || yy2arr1535 { - r.EncodeArrayStart(18) - } else { - yynn1535 = 3 - for _, b := range yyq1535 { - if b { - yynn1535++ - } - } - r.EncodeMapStart(yynn1535) - yynn1535 = 0 - } - if yyr1535 || yy2arr1535 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1537 := z.EncBinary() - _ = yym1537 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1538 := z.EncBinary() - _ = yym1538 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr1535 || yy2arr1535 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1540 := z.EncBinary() - _ = yym1540 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("image")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1541 := z.EncBinary() - _ = yym1541 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } - if yyr1535 || yy2arr1535 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1535[2] { - if x.Command == nil { - r.EncodeNil() - } else { - yym1543 := z.EncBinary() - _ = yym1543 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1535[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("command")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Command == nil { - r.EncodeNil() - } else { - yym1544 := z.EncBinary() - _ = yym1544 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } - } - if yyr1535 || yy2arr1535 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1535[3] { - if x.Args == nil { - r.EncodeNil() - } else { - yym1546 := z.EncBinary() - _ = yym1546 - if false { - } else { - z.F.EncSliceStringV(x.Args, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1535[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("args")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Args == nil { - r.EncodeNil() - } else { - yym1547 := z.EncBinary() - _ = yym1547 - if false { - } else { - z.F.EncSliceStringV(x.Args, false, e) - } - } - } - } - if yyr1535 || yy2arr1535 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1535[4] { - yym1549 := z.EncBinary() - _ = yym1549 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1535[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("workingDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1550 := z.EncBinary() - _ = yym1550 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir)) - } - } - } - if yyr1535 || yy2arr1535 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1535[5] { - if x.Ports == nil { - r.EncodeNil() - } else { - yym1552 := z.EncBinary() - _ = yym1552 - if false { - } else { - h.encSliceContainerPort(([]ContainerPort)(x.Ports), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1535[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ports")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym1553 := z.EncBinary() - _ = yym1553 - if false { - } else { - h.encSliceContainerPort(([]ContainerPort)(x.Ports), e) - } - } - } - } - if yyr1535 || yy2arr1535 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1535[6] { - if x.Env == nil { - r.EncodeNil() - } else { - yym1555 := z.EncBinary() - _ = yym1555 - if false { - } else { - h.encSliceEnvVar(([]EnvVar)(x.Env), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1535[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("env")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Env == nil { - r.EncodeNil() - } else { - yym1556 := z.EncBinary() - _ = yym1556 - if false { - } else { - h.encSliceEnvVar(([]EnvVar)(x.Env), e) - } - } - } - } - if yyr1535 || yy2arr1535 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1535[7] { - yy1558 := &x.Resources - yy1558.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq1535[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resources")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1559 := &x.Resources - yy1559.CodecEncodeSelf(e) - } - } - if yyr1535 || yy2arr1535 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1535[8] { - if x.VolumeMounts == nil { - r.EncodeNil() - } else { - yym1561 := z.EncBinary() - _ = yym1561 - if false { - } else { - h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1535[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeMounts")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VolumeMounts == nil { - r.EncodeNil() - } else { - yym1562 := z.EncBinary() - _ = yym1562 - if false { - } else { - h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e) - } - } - } - } - if yyr1535 || yy2arr1535 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1535[9] { - if x.LivenessProbe == nil { - r.EncodeNil() - } else { - x.LivenessProbe.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1535[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("livenessProbe")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LivenessProbe == nil { - r.EncodeNil() - } else { - x.LivenessProbe.CodecEncodeSelf(e) - } - } - } - if yyr1535 || yy2arr1535 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1535[10] { - if x.ReadinessProbe == nil { - r.EncodeNil() - } else { - x.ReadinessProbe.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1535[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readinessProbe")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ReadinessProbe == nil { - r.EncodeNil() - } else { - x.ReadinessProbe.CodecEncodeSelf(e) - } - } - } - if yyr1535 || yy2arr1535 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1535[11] { - if x.Lifecycle == nil { - r.EncodeNil() - } else { - x.Lifecycle.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1535[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lifecycle")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Lifecycle == nil { - r.EncodeNil() - } else { - x.Lifecycle.CodecEncodeSelf(e) - } - } - } - if yyr1535 || yy2arr1535 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1535[12] { - yym1567 := z.EncBinary() - _ = yym1567 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1535[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("terminationMessagePath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1568 := z.EncBinary() - _ = yym1568 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath)) - } - } - } - if yyr1535 || yy2arr1535 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.ImagePullPolicy.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imagePullPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.ImagePullPolicy.CodecEncodeSelf(e) - } - if yyr1535 || yy2arr1535 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1535[14] { - if x.SecurityContext == nil { - r.EncodeNil() - } else { - x.SecurityContext.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1535[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("securityContext")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecurityContext == nil { - r.EncodeNil() - } else { - x.SecurityContext.CodecEncodeSelf(e) - } - } - } - if yyr1535 || yy2arr1535 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1535[15] { - yym1572 := z.EncBinary() - _ = yym1572 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq1535[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdin")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1573 := z.EncBinary() - _ = yym1573 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } - } - if yyr1535 || yy2arr1535 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1535[16] { - yym1575 := z.EncBinary() - _ = yym1575 - if false { - } else { - r.EncodeBool(bool(x.StdinOnce)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq1535[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdinOnce")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1576 := z.EncBinary() - _ = yym1576 - if false { - } else { - r.EncodeBool(bool(x.StdinOnce)) - } - } - } - if yyr1535 || yy2arr1535 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1535[17] { - yym1578 := z.EncBinary() - _ = yym1578 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq1535[17] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tty")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1579 := z.EncBinary() - _ = yym1579 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } - } - if yyr1535 || yy2arr1535 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Container) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1580 := z.DecBinary() - _ = yym1580 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1581 := r.ContainerType() - if yyct1581 == codecSelferValueTypeMap1234 { - yyl1581 := r.ReadMapStart() - if yyl1581 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1581, d) - } - } else if yyct1581 == codecSelferValueTypeArray1234 { - yyl1581 := r.ReadArrayStart() - if yyl1581 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1581, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Container) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1582Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1582Slc - var yyhl1582 bool = l >= 0 - for yyj1582 := 0; ; yyj1582++ { - if yyhl1582 { - if yyj1582 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1582Slc = r.DecodeBytes(yys1582Slc, true, true) - yys1582 := string(yys1582Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1582 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "image": - if r.TryDecodeAsNil() { - x.Image = "" - } else { - x.Image = string(r.DecodeString()) - } - case "command": - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv1585 := &x.Command - yym1586 := z.DecBinary() - _ = yym1586 - if false { - } else { - z.F.DecSliceStringX(yyv1585, false, d) - } - } - case "args": - if r.TryDecodeAsNil() { - x.Args = nil - } else { - yyv1587 := &x.Args - yym1588 := z.DecBinary() - _ = yym1588 - if false { - } else { - z.F.DecSliceStringX(yyv1587, false, d) - } - } - case "workingDir": - if r.TryDecodeAsNil() { - x.WorkingDir = "" - } else { - x.WorkingDir = string(r.DecodeString()) - } - case "ports": - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv1590 := &x.Ports - yym1591 := z.DecBinary() - _ = yym1591 - if false { - } else { - h.decSliceContainerPort((*[]ContainerPort)(yyv1590), d) - } - } - case "env": - if r.TryDecodeAsNil() { - x.Env = nil - } else { - yyv1592 := &x.Env - yym1593 := z.DecBinary() - _ = yym1593 - if false { - } else { - h.decSliceEnvVar((*[]EnvVar)(yyv1592), d) - } - } - case "resources": - if r.TryDecodeAsNil() { - x.Resources = ResourceRequirements{} - } else { - yyv1594 := &x.Resources - yyv1594.CodecDecodeSelf(d) - } - case "volumeMounts": - if r.TryDecodeAsNil() { - x.VolumeMounts = nil - } else { - yyv1595 := &x.VolumeMounts - yym1596 := z.DecBinary() - _ = yym1596 - if false { - } else { - h.decSliceVolumeMount((*[]VolumeMount)(yyv1595), d) - } - } - case "livenessProbe": - if r.TryDecodeAsNil() { - if x.LivenessProbe != nil { - x.LivenessProbe = nil - } - } else { - if x.LivenessProbe == nil { - x.LivenessProbe = new(Probe) - } - x.LivenessProbe.CodecDecodeSelf(d) - } - case "readinessProbe": - if r.TryDecodeAsNil() { - if x.ReadinessProbe != nil { - x.ReadinessProbe = nil - } - } else { - if x.ReadinessProbe == nil { - x.ReadinessProbe = new(Probe) - } - x.ReadinessProbe.CodecDecodeSelf(d) - } - case "lifecycle": - if r.TryDecodeAsNil() { - if x.Lifecycle != nil { - x.Lifecycle = nil - } - } else { - if x.Lifecycle == nil { - x.Lifecycle = new(Lifecycle) - } - x.Lifecycle.CodecDecodeSelf(d) - } - case "terminationMessagePath": - if r.TryDecodeAsNil() { - x.TerminationMessagePath = "" - } else { - x.TerminationMessagePath = string(r.DecodeString()) - } - case "imagePullPolicy": - if r.TryDecodeAsNil() { - x.ImagePullPolicy = "" - } else { - x.ImagePullPolicy = PullPolicy(r.DecodeString()) - } - case "securityContext": - if r.TryDecodeAsNil() { - if x.SecurityContext != nil { - x.SecurityContext = nil - } - } else { - if x.SecurityContext == nil { - x.SecurityContext = new(SecurityContext) - } - x.SecurityContext.CodecDecodeSelf(d) - } - case "stdin": - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - x.Stdin = bool(r.DecodeBool()) - } - case "stdinOnce": - if r.TryDecodeAsNil() { - x.StdinOnce = false - } else { - x.StdinOnce = bool(r.DecodeBool()) - } - case "tty": - if r.TryDecodeAsNil() { - x.TTY = false - } else { - x.TTY = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys1582) - } // end switch yys1582 - } // end for yyj1582 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1606 int - var yyb1606 bool - var yyhl1606 bool = l >= 0 - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l - } else { - yyb1606 = r.CheckBreak() - } - if yyb1606 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l - } else { - yyb1606 = r.CheckBreak() - } - if yyb1606 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Image = "" - } else { - x.Image = string(r.DecodeString()) - } - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l - } else { - yyb1606 = r.CheckBreak() - } - if yyb1606 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv1609 := &x.Command - yym1610 := z.DecBinary() - _ = yym1610 - if false { - } else { - z.F.DecSliceStringX(yyv1609, false, d) - } - } - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l - } else { - yyb1606 = r.CheckBreak() - } - if yyb1606 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Args = nil - } else { - yyv1611 := &x.Args - yym1612 := z.DecBinary() - _ = yym1612 - if false { - } else { - z.F.DecSliceStringX(yyv1611, false, d) - } - } - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l - } else { - yyb1606 = r.CheckBreak() - } - if yyb1606 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.WorkingDir = "" - } else { - x.WorkingDir = string(r.DecodeString()) - } - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l - } else { - yyb1606 = r.CheckBreak() - } - if yyb1606 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv1614 := &x.Ports - yym1615 := z.DecBinary() - _ = yym1615 - if false { - } else { - h.decSliceContainerPort((*[]ContainerPort)(yyv1614), d) - } - } - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l - } else { - yyb1606 = r.CheckBreak() - } - if yyb1606 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Env = nil - } else { - yyv1616 := &x.Env - yym1617 := z.DecBinary() - _ = yym1617 - if false { - } else { - h.decSliceEnvVar((*[]EnvVar)(yyv1616), d) - } - } - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l - } else { - yyb1606 = r.CheckBreak() - } - if yyb1606 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resources = ResourceRequirements{} - } else { - yyv1618 := &x.Resources - yyv1618.CodecDecodeSelf(d) - } - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l - } else { - yyb1606 = r.CheckBreak() - } - if yyb1606 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeMounts = nil - } else { - yyv1619 := &x.VolumeMounts - yym1620 := z.DecBinary() - _ = yym1620 - if false { - } else { - h.decSliceVolumeMount((*[]VolumeMount)(yyv1619), d) - } - } - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l - } else { - yyb1606 = r.CheckBreak() - } - if yyb1606 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LivenessProbe != nil { - x.LivenessProbe = nil - } - } else { - if x.LivenessProbe == nil { - x.LivenessProbe = new(Probe) - } - x.LivenessProbe.CodecDecodeSelf(d) - } - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l - } else { - yyb1606 = r.CheckBreak() - } - if yyb1606 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ReadinessProbe != nil { - x.ReadinessProbe = nil - } - } else { - if x.ReadinessProbe == nil { - x.ReadinessProbe = new(Probe) - } - x.ReadinessProbe.CodecDecodeSelf(d) - } - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l - } else { - yyb1606 = r.CheckBreak() - } - if yyb1606 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Lifecycle != nil { - x.Lifecycle = nil - } - } else { - if x.Lifecycle == nil { - x.Lifecycle = new(Lifecycle) - } - x.Lifecycle.CodecDecodeSelf(d) - } - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l - } else { - yyb1606 = r.CheckBreak() - } - if yyb1606 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TerminationMessagePath = "" - } else { - x.TerminationMessagePath = string(r.DecodeString()) - } - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l - } else { - yyb1606 = r.CheckBreak() - } - if yyb1606 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImagePullPolicy = "" - } else { - x.ImagePullPolicy = PullPolicy(r.DecodeString()) - } - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l - } else { - yyb1606 = r.CheckBreak() - } - if yyb1606 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecurityContext != nil { - x.SecurityContext = nil - } - } else { - if x.SecurityContext == nil { - x.SecurityContext = new(SecurityContext) - } - x.SecurityContext.CodecDecodeSelf(d) - } - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l - } else { - yyb1606 = r.CheckBreak() - } - if yyb1606 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - x.Stdin = bool(r.DecodeBool()) - } - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l - } else { - yyb1606 = r.CheckBreak() - } - if yyb1606 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.StdinOnce = false - } else { - x.StdinOnce = bool(r.DecodeBool()) - } - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l - } else { - yyb1606 = r.CheckBreak() - } - if yyb1606 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TTY = false - } else { - x.TTY = bool(r.DecodeBool()) - } - for { - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l - } else { - yyb1606 = r.CheckBreak() - } - if yyb1606 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1606-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Handler) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1630 := z.EncBinary() - _ = yym1630 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1631 := !z.EncBinary() - yy2arr1631 := z.EncBasicHandle().StructToArray - var yyq1631 [3]bool - _, _, _ = yysep1631, yyq1631, yy2arr1631 - const yyr1631 bool = false - yyq1631[0] = x.Exec != nil - yyq1631[1] = x.HTTPGet != nil - yyq1631[2] = x.TCPSocket != nil - var yynn1631 int - if yyr1631 || yy2arr1631 { - r.EncodeArrayStart(3) - } else { - yynn1631 = 0 - for _, b := range yyq1631 { - if b { - yynn1631++ - } - } - r.EncodeMapStart(yynn1631) - yynn1631 = 0 - } - if yyr1631 || yy2arr1631 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1631[0] { - if x.Exec == nil { - r.EncodeNil() - } else { - x.Exec.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1631[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Exec == nil { - r.EncodeNil() - } else { - x.Exec.CodecEncodeSelf(e) - } - } - } - if yyr1631 || yy2arr1631 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1631[1] { - if x.HTTPGet == nil { - r.EncodeNil() - } else { - x.HTTPGet.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1631[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("httpGet")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HTTPGet == nil { - r.EncodeNil() - } else { - x.HTTPGet.CodecEncodeSelf(e) - } - } - } - if yyr1631 || yy2arr1631 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1631[2] { - if x.TCPSocket == nil { - r.EncodeNil() - } else { - x.TCPSocket.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1631[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tcpSocket")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TCPSocket == nil { - r.EncodeNil() - } else { - x.TCPSocket.CodecEncodeSelf(e) - } - } - } - if yyr1631 || yy2arr1631 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Handler) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1635 := z.DecBinary() - _ = yym1635 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1636 := r.ContainerType() - if yyct1636 == codecSelferValueTypeMap1234 { - yyl1636 := r.ReadMapStart() - if yyl1636 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1636, d) - } - } else if yyct1636 == codecSelferValueTypeArray1234 { - yyl1636 := r.ReadArrayStart() - if yyl1636 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1636, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Handler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1637Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1637Slc - var yyhl1637 bool = l >= 0 - for yyj1637 := 0; ; yyj1637++ { - if yyhl1637 { - if yyj1637 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1637Slc = r.DecodeBytes(yys1637Slc, true, true) - yys1637 := string(yys1637Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1637 { - case "exec": - if r.TryDecodeAsNil() { - if x.Exec != nil { - x.Exec = nil - } - } else { - if x.Exec == nil { - x.Exec = new(ExecAction) - } - x.Exec.CodecDecodeSelf(d) - } - case "httpGet": - if r.TryDecodeAsNil() { - if x.HTTPGet != nil { - x.HTTPGet = nil - } - } else { - if x.HTTPGet == nil { - x.HTTPGet = new(HTTPGetAction) - } - x.HTTPGet.CodecDecodeSelf(d) - } - case "tcpSocket": - if r.TryDecodeAsNil() { - if x.TCPSocket != nil { - x.TCPSocket = nil - } - } else { - if x.TCPSocket == nil { - x.TCPSocket = new(TCPSocketAction) - } - x.TCPSocket.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys1637) - } // end switch yys1637 - } // end for yyj1637 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Handler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1641 int - var yyb1641 bool - var yyhl1641 bool = l >= 0 - yyj1641++ - if yyhl1641 { - yyb1641 = yyj1641 > l - } else { - yyb1641 = r.CheckBreak() - } - if yyb1641 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Exec != nil { - x.Exec = nil - } - } else { - if x.Exec == nil { - x.Exec = new(ExecAction) - } - x.Exec.CodecDecodeSelf(d) - } - yyj1641++ - if yyhl1641 { - yyb1641 = yyj1641 > l - } else { - yyb1641 = r.CheckBreak() - } - if yyb1641 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HTTPGet != nil { - x.HTTPGet = nil - } - } else { - if x.HTTPGet == nil { - x.HTTPGet = new(HTTPGetAction) - } - x.HTTPGet.CodecDecodeSelf(d) - } - yyj1641++ - if yyhl1641 { - yyb1641 = yyj1641 > l - } else { - yyb1641 = r.CheckBreak() - } - if yyb1641 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TCPSocket != nil { - x.TCPSocket = nil - } - } else { - if x.TCPSocket == nil { - x.TCPSocket = new(TCPSocketAction) - } - x.TCPSocket.CodecDecodeSelf(d) - } - for { - yyj1641++ - if yyhl1641 { - yyb1641 = yyj1641 > l - } else { - yyb1641 = r.CheckBreak() - } - if yyb1641 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1641-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Lifecycle) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1645 := z.EncBinary() - _ = yym1645 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1646 := !z.EncBinary() - yy2arr1646 := z.EncBasicHandle().StructToArray - var yyq1646 [2]bool - _, _, _ = yysep1646, yyq1646, yy2arr1646 - const yyr1646 bool = false - yyq1646[0] = x.PostStart != nil - yyq1646[1] = x.PreStop != nil - var yynn1646 int - if yyr1646 || yy2arr1646 { - r.EncodeArrayStart(2) - } else { - yynn1646 = 0 - for _, b := range yyq1646 { - if b { - yynn1646++ - } - } - r.EncodeMapStart(yynn1646) - yynn1646 = 0 - } - if yyr1646 || yy2arr1646 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1646[0] { - if x.PostStart == nil { - r.EncodeNil() - } else { - x.PostStart.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1646[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("postStart")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PostStart == nil { - r.EncodeNil() - } else { - x.PostStart.CodecEncodeSelf(e) - } - } - } - if yyr1646 || yy2arr1646 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1646[1] { - if x.PreStop == nil { - r.EncodeNil() - } else { - x.PreStop.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1646[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preStop")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PreStop == nil { - r.EncodeNil() - } else { - x.PreStop.CodecEncodeSelf(e) - } - } - } - if yyr1646 || yy2arr1646 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Lifecycle) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1649 := z.DecBinary() - _ = yym1649 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1650 := r.ContainerType() - if yyct1650 == codecSelferValueTypeMap1234 { - yyl1650 := r.ReadMapStart() - if yyl1650 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1650, d) - } - } else if yyct1650 == codecSelferValueTypeArray1234 { - yyl1650 := r.ReadArrayStart() - if yyl1650 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1650, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Lifecycle) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1651Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1651Slc - var yyhl1651 bool = l >= 0 - for yyj1651 := 0; ; yyj1651++ { - if yyhl1651 { - if yyj1651 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1651Slc = r.DecodeBytes(yys1651Slc, true, true) - yys1651 := string(yys1651Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1651 { - case "postStart": - if r.TryDecodeAsNil() { - if x.PostStart != nil { - x.PostStart = nil - } - } else { - if x.PostStart == nil { - x.PostStart = new(Handler) - } - x.PostStart.CodecDecodeSelf(d) - } - case "preStop": - if r.TryDecodeAsNil() { - if x.PreStop != nil { - x.PreStop = nil - } - } else { - if x.PreStop == nil { - x.PreStop = new(Handler) - } - x.PreStop.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys1651) - } // end switch yys1651 - } // end for yyj1651 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Lifecycle) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1654 int - var yyb1654 bool - var yyhl1654 bool = l >= 0 - yyj1654++ - if yyhl1654 { - yyb1654 = yyj1654 > l - } else { - yyb1654 = r.CheckBreak() - } - if yyb1654 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PostStart != nil { - x.PostStart = nil - } - } else { - if x.PostStart == nil { - x.PostStart = new(Handler) - } - x.PostStart.CodecDecodeSelf(d) - } - yyj1654++ - if yyhl1654 { - yyb1654 = yyj1654 > l - } else { - yyb1654 = r.CheckBreak() - } - if yyb1654 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PreStop != nil { - x.PreStop = nil - } - } else { - if x.PreStop == nil { - x.PreStop = new(Handler) - } - x.PreStop.CodecDecodeSelf(d) - } - for { - yyj1654++ - if yyhl1654 { - yyb1654 = yyj1654 > l - } else { - yyb1654 = r.CheckBreak() - } - if yyb1654 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1654-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ConditionStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1657 := z.EncBinary() - _ = yym1657 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ConditionStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1658 := z.DecBinary() - _ = yym1658 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ContainerStateWaiting) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1659 := z.EncBinary() - _ = yym1659 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1660 := !z.EncBinary() - yy2arr1660 := z.EncBasicHandle().StructToArray - var yyq1660 [2]bool - _, _, _ = yysep1660, yyq1660, yy2arr1660 - const yyr1660 bool = false - yyq1660[0] = x.Reason != "" - yyq1660[1] = x.Message != "" - var yynn1660 int - if yyr1660 || yy2arr1660 { - r.EncodeArrayStart(2) - } else { - yynn1660 = 0 - for _, b := range yyq1660 { - if b { - yynn1660++ - } - } - r.EncodeMapStart(yynn1660) - yynn1660 = 0 - } - if yyr1660 || yy2arr1660 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1660[0] { - yym1662 := z.EncBinary() - _ = yym1662 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1660[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1663 := z.EncBinary() - _ = yym1663 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr1660 || yy2arr1660 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1660[1] { - yym1665 := z.EncBinary() - _ = yym1665 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1660[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1666 := z.EncBinary() - _ = yym1666 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr1660 || yy2arr1660 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerStateWaiting) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1667 := z.DecBinary() - _ = yym1667 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1668 := r.ContainerType() - if yyct1668 == codecSelferValueTypeMap1234 { - yyl1668 := r.ReadMapStart() - if yyl1668 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1668, d) - } - } else if yyct1668 == codecSelferValueTypeArray1234 { - yyl1668 := r.ReadArrayStart() - if yyl1668 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1668, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerStateWaiting) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1669Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1669Slc - var yyhl1669 bool = l >= 0 - for yyj1669 := 0; ; yyj1669++ { - if yyhl1669 { - if yyj1669 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1669Slc = r.DecodeBytes(yys1669Slc, true, true) - yys1669 := string(yys1669Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1669 { - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys1669) - } // end switch yys1669 - } // end for yyj1669 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerStateWaiting) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1672 int - var yyb1672 bool - var yyhl1672 bool = l >= 0 - yyj1672++ - if yyhl1672 { - yyb1672 = yyj1672 > l - } else { - yyb1672 = r.CheckBreak() - } - if yyb1672 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj1672++ - if yyhl1672 { - yyb1672 = yyj1672 > l - } else { - yyb1672 = r.CheckBreak() - } - if yyb1672 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj1672++ - if yyhl1672 { - yyb1672 = yyj1672 > l - } else { - yyb1672 = r.CheckBreak() - } - if yyb1672 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1672-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerStateRunning) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1675 := z.EncBinary() - _ = yym1675 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1676 := !z.EncBinary() - yy2arr1676 := z.EncBasicHandle().StructToArray - var yyq1676 [1]bool - _, _, _ = yysep1676, yyq1676, yy2arr1676 - const yyr1676 bool = false - yyq1676[0] = true - var yynn1676 int - if yyr1676 || yy2arr1676 { - r.EncodeArrayStart(1) - } else { - yynn1676 = 0 - for _, b := range yyq1676 { - if b { - yynn1676++ - } - } - r.EncodeMapStart(yynn1676) - yynn1676 = 0 - } - if yyr1676 || yy2arr1676 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1676[0] { - yy1678 := &x.StartedAt - yym1679 := z.EncBinary() - _ = yym1679 - if false { - } else if z.HasExtensions() && z.EncExt(yy1678) { - } else if yym1679 { - z.EncBinaryMarshal(yy1678) - } else if !yym1679 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1678) - } else { - z.EncFallback(yy1678) - } - } else { - r.EncodeNil() - } - } else { - if yyq1676[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startedAt")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1680 := &x.StartedAt - yym1681 := z.EncBinary() - _ = yym1681 - if false { - } else if z.HasExtensions() && z.EncExt(yy1680) { - } else if yym1681 { - z.EncBinaryMarshal(yy1680) - } else if !yym1681 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1680) - } else { - z.EncFallback(yy1680) - } - } - } - if yyr1676 || yy2arr1676 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerStateRunning) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1682 := z.DecBinary() - _ = yym1682 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1683 := r.ContainerType() - if yyct1683 == codecSelferValueTypeMap1234 { - yyl1683 := r.ReadMapStart() - if yyl1683 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1683, d) - } - } else if yyct1683 == codecSelferValueTypeArray1234 { - yyl1683 := r.ReadArrayStart() - if yyl1683 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1683, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerStateRunning) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1684Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1684Slc - var yyhl1684 bool = l >= 0 - for yyj1684 := 0; ; yyj1684++ { - if yyhl1684 { - if yyj1684 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1684Slc = r.DecodeBytes(yys1684Slc, true, true) - yys1684 := string(yys1684Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1684 { - case "startedAt": - if r.TryDecodeAsNil() { - x.StartedAt = pkg2_unversioned.Time{} - } else { - yyv1685 := &x.StartedAt - yym1686 := z.DecBinary() - _ = yym1686 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1685) { - } else if yym1686 { - z.DecBinaryUnmarshal(yyv1685) - } else if !yym1686 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1685) - } else { - z.DecFallback(yyv1685, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys1684) - } // end switch yys1684 - } // end for yyj1684 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerStateRunning) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1687 int - var yyb1687 bool - var yyhl1687 bool = l >= 0 - yyj1687++ - if yyhl1687 { - yyb1687 = yyj1687 > l - } else { - yyb1687 = r.CheckBreak() - } - if yyb1687 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.StartedAt = pkg2_unversioned.Time{} - } else { - yyv1688 := &x.StartedAt - yym1689 := z.DecBinary() - _ = yym1689 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1688) { - } else if yym1689 { - z.DecBinaryUnmarshal(yyv1688) - } else if !yym1689 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1688) - } else { - z.DecFallback(yyv1688, false) - } - } - for { - yyj1687++ - if yyhl1687 { - yyb1687 = yyj1687 > l - } else { - yyb1687 = r.CheckBreak() - } - if yyb1687 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1687-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerStateTerminated) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1690 := z.EncBinary() - _ = yym1690 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1691 := !z.EncBinary() - yy2arr1691 := z.EncBasicHandle().StructToArray - var yyq1691 [7]bool - _, _, _ = yysep1691, yyq1691, yy2arr1691 - const yyr1691 bool = false - yyq1691[1] = x.Signal != 0 - yyq1691[2] = x.Reason != "" - yyq1691[3] = x.Message != "" - yyq1691[4] = true - yyq1691[5] = true - yyq1691[6] = x.ContainerID != "" - var yynn1691 int - if yyr1691 || yy2arr1691 { - r.EncodeArrayStart(7) - } else { - yynn1691 = 1 - for _, b := range yyq1691 { - if b { - yynn1691++ - } - } - r.EncodeMapStart(yynn1691) - yynn1691 = 0 - } - if yyr1691 || yy2arr1691 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1693 := z.EncBinary() - _ = yym1693 - if false { - } else { - r.EncodeInt(int64(x.ExitCode)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exitCode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1694 := z.EncBinary() - _ = yym1694 - if false { - } else { - r.EncodeInt(int64(x.ExitCode)) - } - } - if yyr1691 || yy2arr1691 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1691[1] { - yym1696 := z.EncBinary() - _ = yym1696 - if false { - } else { - r.EncodeInt(int64(x.Signal)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq1691[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("signal")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1697 := z.EncBinary() - _ = yym1697 - if false { - } else { - r.EncodeInt(int64(x.Signal)) - } - } - } - if yyr1691 || yy2arr1691 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1691[2] { - yym1699 := z.EncBinary() - _ = yym1699 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1691[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1700 := z.EncBinary() - _ = yym1700 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr1691 || yy2arr1691 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1691[3] { - yym1702 := z.EncBinary() - _ = yym1702 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1691[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1703 := z.EncBinary() - _ = yym1703 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr1691 || yy2arr1691 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1691[4] { - yy1705 := &x.StartedAt - yym1706 := z.EncBinary() - _ = yym1706 - if false { - } else if z.HasExtensions() && z.EncExt(yy1705) { - } else if yym1706 { - z.EncBinaryMarshal(yy1705) - } else if !yym1706 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1705) - } else { - z.EncFallback(yy1705) - } - } else { - r.EncodeNil() - } - } else { - if yyq1691[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startedAt")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1707 := &x.StartedAt - yym1708 := z.EncBinary() - _ = yym1708 - if false { - } else if z.HasExtensions() && z.EncExt(yy1707) { - } else if yym1708 { - z.EncBinaryMarshal(yy1707) - } else if !yym1708 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1707) - } else { - z.EncFallback(yy1707) - } - } - } - if yyr1691 || yy2arr1691 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1691[5] { - yy1710 := &x.FinishedAt - yym1711 := z.EncBinary() - _ = yym1711 - if false { - } else if z.HasExtensions() && z.EncExt(yy1710) { - } else if yym1711 { - z.EncBinaryMarshal(yy1710) - } else if !yym1711 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1710) - } else { - z.EncFallback(yy1710) - } - } else { - r.EncodeNil() - } - } else { - if yyq1691[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("finishedAt")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1712 := &x.FinishedAt - yym1713 := z.EncBinary() - _ = yym1713 - if false { - } else if z.HasExtensions() && z.EncExt(yy1712) { - } else if yym1713 { - z.EncBinaryMarshal(yy1712) - } else if !yym1713 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1712) - } else { - z.EncFallback(yy1712) - } - } - } - if yyr1691 || yy2arr1691 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1691[6] { - yym1715 := z.EncBinary() - _ = yym1715 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1691[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1716 := z.EncBinary() - _ = yym1716 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) - } - } - } - if yyr1691 || yy2arr1691 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerStateTerminated) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1717 := z.DecBinary() - _ = yym1717 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1718 := r.ContainerType() - if yyct1718 == codecSelferValueTypeMap1234 { - yyl1718 := r.ReadMapStart() - if yyl1718 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1718, d) - } - } else if yyct1718 == codecSelferValueTypeArray1234 { - yyl1718 := r.ReadArrayStart() - if yyl1718 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1718, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerStateTerminated) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1719Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1719Slc - var yyhl1719 bool = l >= 0 - for yyj1719 := 0; ; yyj1719++ { - if yyhl1719 { - if yyj1719 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1719Slc = r.DecodeBytes(yys1719Slc, true, true) - yys1719 := string(yys1719Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1719 { - case "exitCode": - if r.TryDecodeAsNil() { - x.ExitCode = 0 - } else { - x.ExitCode = int32(r.DecodeInt(32)) - } - case "signal": - if r.TryDecodeAsNil() { - x.Signal = 0 - } else { - x.Signal = int32(r.DecodeInt(32)) - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - case "startedAt": - if r.TryDecodeAsNil() { - x.StartedAt = pkg2_unversioned.Time{} - } else { - yyv1724 := &x.StartedAt - yym1725 := z.DecBinary() - _ = yym1725 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1724) { - } else if yym1725 { - z.DecBinaryUnmarshal(yyv1724) - } else if !yym1725 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1724) - } else { - z.DecFallback(yyv1724, false) - } - } - case "finishedAt": - if r.TryDecodeAsNil() { - x.FinishedAt = pkg2_unversioned.Time{} - } else { - yyv1726 := &x.FinishedAt - yym1727 := z.DecBinary() - _ = yym1727 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1726) { - } else if yym1727 { - z.DecBinaryUnmarshal(yyv1726) - } else if !yym1727 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1726) - } else { - z.DecFallback(yyv1726, false) - } - } - case "containerID": - if r.TryDecodeAsNil() { - x.ContainerID = "" - } else { - x.ContainerID = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys1719) - } // end switch yys1719 - } // end for yyj1719 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerStateTerminated) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1729 int - var yyb1729 bool - var yyhl1729 bool = l >= 0 - yyj1729++ - if yyhl1729 { - yyb1729 = yyj1729 > l - } else { - yyb1729 = r.CheckBreak() - } - if yyb1729 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExitCode = 0 - } else { - x.ExitCode = int32(r.DecodeInt(32)) - } - yyj1729++ - if yyhl1729 { - yyb1729 = yyj1729 > l - } else { - yyb1729 = r.CheckBreak() - } - if yyb1729 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Signal = 0 - } else { - x.Signal = int32(r.DecodeInt(32)) - } - yyj1729++ - if yyhl1729 { - yyb1729 = yyj1729 > l - } else { - yyb1729 = r.CheckBreak() - } - if yyb1729 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj1729++ - if yyhl1729 { - yyb1729 = yyj1729 > l - } else { - yyb1729 = r.CheckBreak() - } - if yyb1729 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - yyj1729++ - if yyhl1729 { - yyb1729 = yyj1729 > l - } else { - yyb1729 = r.CheckBreak() - } - if yyb1729 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.StartedAt = pkg2_unversioned.Time{} - } else { - yyv1734 := &x.StartedAt - yym1735 := z.DecBinary() - _ = yym1735 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1734) { - } else if yym1735 { - z.DecBinaryUnmarshal(yyv1734) - } else if !yym1735 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1734) - } else { - z.DecFallback(yyv1734, false) - } - } - yyj1729++ - if yyhl1729 { - yyb1729 = yyj1729 > l - } else { - yyb1729 = r.CheckBreak() - } - if yyb1729 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FinishedAt = pkg2_unversioned.Time{} - } else { - yyv1736 := &x.FinishedAt - yym1737 := z.DecBinary() - _ = yym1737 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1736) { - } else if yym1737 { - z.DecBinaryUnmarshal(yyv1736) - } else if !yym1737 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1736) - } else { - z.DecFallback(yyv1736, false) - } - } - yyj1729++ - if yyhl1729 { - yyb1729 = yyj1729 > l - } else { - yyb1729 = r.CheckBreak() - } - if yyb1729 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerID = "" - } else { - x.ContainerID = string(r.DecodeString()) - } - for { - yyj1729++ - if yyhl1729 { - yyb1729 = yyj1729 > l - } else { - yyb1729 = r.CheckBreak() - } - if yyb1729 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1729-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerState) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1739 := z.EncBinary() - _ = yym1739 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1740 := !z.EncBinary() - yy2arr1740 := z.EncBasicHandle().StructToArray - var yyq1740 [3]bool - _, _, _ = yysep1740, yyq1740, yy2arr1740 - const yyr1740 bool = false - yyq1740[0] = x.Waiting != nil - yyq1740[1] = x.Running != nil - yyq1740[2] = x.Terminated != nil - var yynn1740 int - if yyr1740 || yy2arr1740 { - r.EncodeArrayStart(3) - } else { - yynn1740 = 0 - for _, b := range yyq1740 { - if b { - yynn1740++ - } - } - r.EncodeMapStart(yynn1740) - yynn1740 = 0 - } - if yyr1740 || yy2arr1740 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1740[0] { - if x.Waiting == nil { - r.EncodeNil() - } else { - x.Waiting.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1740[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("waiting")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Waiting == nil { - r.EncodeNil() - } else { - x.Waiting.CodecEncodeSelf(e) - } - } - } - if yyr1740 || yy2arr1740 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1740[1] { - if x.Running == nil { - r.EncodeNil() - } else { - x.Running.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1740[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("running")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Running == nil { - r.EncodeNil() - } else { - x.Running.CodecEncodeSelf(e) - } - } - } - if yyr1740 || yy2arr1740 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1740[2] { - if x.Terminated == nil { - r.EncodeNil() - } else { - x.Terminated.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1740[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("terminated")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Terminated == nil { - r.EncodeNil() - } else { - x.Terminated.CodecEncodeSelf(e) - } - } - } - if yyr1740 || yy2arr1740 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerState) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1744 := z.DecBinary() - _ = yym1744 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1745 := r.ContainerType() - if yyct1745 == codecSelferValueTypeMap1234 { - yyl1745 := r.ReadMapStart() - if yyl1745 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1745, d) - } - } else if yyct1745 == codecSelferValueTypeArray1234 { - yyl1745 := r.ReadArrayStart() - if yyl1745 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1745, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerState) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1746Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1746Slc - var yyhl1746 bool = l >= 0 - for yyj1746 := 0; ; yyj1746++ { - if yyhl1746 { - if yyj1746 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1746Slc = r.DecodeBytes(yys1746Slc, true, true) - yys1746 := string(yys1746Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1746 { - case "waiting": - if r.TryDecodeAsNil() { - if x.Waiting != nil { - x.Waiting = nil - } - } else { - if x.Waiting == nil { - x.Waiting = new(ContainerStateWaiting) - } - x.Waiting.CodecDecodeSelf(d) - } - case "running": - if r.TryDecodeAsNil() { - if x.Running != nil { - x.Running = nil - } - } else { - if x.Running == nil { - x.Running = new(ContainerStateRunning) - } - x.Running.CodecDecodeSelf(d) - } - case "terminated": - if r.TryDecodeAsNil() { - if x.Terminated != nil { - x.Terminated = nil - } - } else { - if x.Terminated == nil { - x.Terminated = new(ContainerStateTerminated) - } - x.Terminated.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys1746) - } // end switch yys1746 - } // end for yyj1746 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerState) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1750 int - var yyb1750 bool - var yyhl1750 bool = l >= 0 - yyj1750++ - if yyhl1750 { - yyb1750 = yyj1750 > l - } else { - yyb1750 = r.CheckBreak() - } - if yyb1750 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Waiting != nil { - x.Waiting = nil - } - } else { - if x.Waiting == nil { - x.Waiting = new(ContainerStateWaiting) - } - x.Waiting.CodecDecodeSelf(d) - } - yyj1750++ - if yyhl1750 { - yyb1750 = yyj1750 > l - } else { - yyb1750 = r.CheckBreak() - } - if yyb1750 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Running != nil { - x.Running = nil - } - } else { - if x.Running == nil { - x.Running = new(ContainerStateRunning) - } - x.Running.CodecDecodeSelf(d) - } - yyj1750++ - if yyhl1750 { - yyb1750 = yyj1750 > l - } else { - yyb1750 = r.CheckBreak() - } - if yyb1750 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Terminated != nil { - x.Terminated = nil - } - } else { - if x.Terminated == nil { - x.Terminated = new(ContainerStateTerminated) - } - x.Terminated.CodecDecodeSelf(d) - } - for { - yyj1750++ - if yyhl1750 { - yyb1750 = yyj1750 > l - } else { - yyb1750 = r.CheckBreak() - } - if yyb1750 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1750-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1754 := z.EncBinary() - _ = yym1754 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1755 := !z.EncBinary() - yy2arr1755 := z.EncBasicHandle().StructToArray - var yyq1755 [8]bool - _, _, _ = yysep1755, yyq1755, yy2arr1755 - const yyr1755 bool = false - yyq1755[1] = true - yyq1755[2] = true - yyq1755[7] = x.ContainerID != "" - var yynn1755 int - if yyr1755 || yy2arr1755 { - r.EncodeArrayStart(8) - } else { - yynn1755 = 5 - for _, b := range yyq1755 { - if b { - yynn1755++ - } - } - r.EncodeMapStart(yynn1755) - yynn1755 = 0 - } - if yyr1755 || yy2arr1755 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1757 := z.EncBinary() - _ = yym1757 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1758 := z.EncBinary() - _ = yym1758 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr1755 || yy2arr1755 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1755[1] { - yy1760 := &x.State - yy1760.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq1755[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("state")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1761 := &x.State - yy1761.CodecEncodeSelf(e) - } - } - if yyr1755 || yy2arr1755 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1755[2] { - yy1763 := &x.LastTerminationState - yy1763.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq1755[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastState")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1764 := &x.LastTerminationState - yy1764.CodecEncodeSelf(e) - } - } - if yyr1755 || yy2arr1755 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1766 := z.EncBinary() - _ = yym1766 - if false { - } else { - r.EncodeBool(bool(x.Ready)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ready")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1767 := z.EncBinary() - _ = yym1767 - if false { - } else { - r.EncodeBool(bool(x.Ready)) - } - } - if yyr1755 || yy2arr1755 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1769 := z.EncBinary() - _ = yym1769 - if false { - } else { - r.EncodeInt(int64(x.RestartCount)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("restartCount")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1770 := z.EncBinary() - _ = yym1770 - if false { - } else { - r.EncodeInt(int64(x.RestartCount)) - } - } - if yyr1755 || yy2arr1755 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1772 := z.EncBinary() - _ = yym1772 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("image")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1773 := z.EncBinary() - _ = yym1773 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } - if yyr1755 || yy2arr1755 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1775 := z.EncBinary() - _ = yym1775 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ImageID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imageID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1776 := z.EncBinary() - _ = yym1776 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ImageID)) - } - } - if yyr1755 || yy2arr1755 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1755[7] { - yym1778 := z.EncBinary() - _ = yym1778 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1755[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1779 := z.EncBinary() - _ = yym1779 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) - } - } - } - if yyr1755 || yy2arr1755 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1780 := z.DecBinary() - _ = yym1780 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1781 := r.ContainerType() - if yyct1781 == codecSelferValueTypeMap1234 { - yyl1781 := r.ReadMapStart() - if yyl1781 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1781, d) - } - } else if yyct1781 == codecSelferValueTypeArray1234 { - yyl1781 := r.ReadArrayStart() - if yyl1781 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1781, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1782Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1782Slc - var yyhl1782 bool = l >= 0 - for yyj1782 := 0; ; yyj1782++ { - if yyhl1782 { - if yyj1782 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1782Slc = r.DecodeBytes(yys1782Slc, true, true) - yys1782 := string(yys1782Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1782 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "state": - if r.TryDecodeAsNil() { - x.State = ContainerState{} - } else { - yyv1784 := &x.State - yyv1784.CodecDecodeSelf(d) - } - case "lastState": - if r.TryDecodeAsNil() { - x.LastTerminationState = ContainerState{} - } else { - yyv1785 := &x.LastTerminationState - yyv1785.CodecDecodeSelf(d) - } - case "ready": - if r.TryDecodeAsNil() { - x.Ready = false - } else { - x.Ready = bool(r.DecodeBool()) - } - case "restartCount": - if r.TryDecodeAsNil() { - x.RestartCount = 0 - } else { - x.RestartCount = int32(r.DecodeInt(32)) - } - case "image": - if r.TryDecodeAsNil() { - x.Image = "" - } else { - x.Image = string(r.DecodeString()) - } - case "imageID": - if r.TryDecodeAsNil() { - x.ImageID = "" - } else { - x.ImageID = string(r.DecodeString()) - } - case "containerID": - if r.TryDecodeAsNil() { - x.ContainerID = "" - } else { - x.ContainerID = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys1782) - } // end switch yys1782 - } // end for yyj1782 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1791 int - var yyb1791 bool - var yyhl1791 bool = l >= 0 - yyj1791++ - if yyhl1791 { - yyb1791 = yyj1791 > l - } else { - yyb1791 = r.CheckBreak() - } - if yyb1791 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj1791++ - if yyhl1791 { - yyb1791 = yyj1791 > l - } else { - yyb1791 = r.CheckBreak() - } - if yyb1791 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.State = ContainerState{} - } else { - yyv1793 := &x.State - yyv1793.CodecDecodeSelf(d) - } - yyj1791++ - if yyhl1791 { - yyb1791 = yyj1791 > l - } else { - yyb1791 = r.CheckBreak() - } - if yyb1791 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTerminationState = ContainerState{} - } else { - yyv1794 := &x.LastTerminationState - yyv1794.CodecDecodeSelf(d) - } - yyj1791++ - if yyhl1791 { - yyb1791 = yyj1791 > l - } else { - yyb1791 = r.CheckBreak() - } - if yyb1791 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ready = false - } else { - x.Ready = bool(r.DecodeBool()) - } - yyj1791++ - if yyhl1791 { - yyb1791 = yyj1791 > l - } else { - yyb1791 = r.CheckBreak() - } - if yyb1791 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RestartCount = 0 - } else { - x.RestartCount = int32(r.DecodeInt(32)) - } - yyj1791++ - if yyhl1791 { - yyb1791 = yyj1791 > l - } else { - yyb1791 = r.CheckBreak() - } - if yyb1791 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Image = "" - } else { - x.Image = string(r.DecodeString()) - } - yyj1791++ - if yyhl1791 { - yyb1791 = yyj1791 > l - } else { - yyb1791 = r.CheckBreak() - } - if yyb1791 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImageID = "" - } else { - x.ImageID = string(r.DecodeString()) - } - yyj1791++ - if yyhl1791 { - yyb1791 = yyj1791 > l - } else { - yyb1791 = r.CheckBreak() - } - if yyb1791 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerID = "" - } else { - x.ContainerID = string(r.DecodeString()) - } - for { - yyj1791++ - if yyhl1791 { - yyb1791 = yyj1791 > l - } else { - yyb1791 = r.CheckBreak() - } - if yyb1791 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1791-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x PodPhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1800 := z.EncBinary() - _ = yym1800 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PodPhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1801 := z.DecBinary() - _ = yym1801 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x PodConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1802 := z.EncBinary() - _ = yym1802 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PodConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1803 := z.DecBinary() - _ = yym1803 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PodCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1804 := z.EncBinary() - _ = yym1804 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1805 := !z.EncBinary() - yy2arr1805 := z.EncBasicHandle().StructToArray - var yyq1805 [6]bool - _, _, _ = yysep1805, yyq1805, yy2arr1805 - const yyr1805 bool = false - yyq1805[2] = true - yyq1805[3] = true - yyq1805[4] = x.Reason != "" - yyq1805[5] = x.Message != "" - var yynn1805 int - if yyr1805 || yy2arr1805 { - r.EncodeArrayStart(6) - } else { - yynn1805 = 2 - for _, b := range yyq1805 { - if b { - yynn1805++ - } - } - r.EncodeMapStart(yynn1805) - yynn1805 = 0 - } - if yyr1805 || yy2arr1805 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr1805 || yy2arr1805 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Status.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Status.CodecEncodeSelf(e) - } - if yyr1805 || yy2arr1805 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1805[2] { - yy1809 := &x.LastProbeTime - yym1810 := z.EncBinary() - _ = yym1810 - if false { - } else if z.HasExtensions() && z.EncExt(yy1809) { - } else if yym1810 { - z.EncBinaryMarshal(yy1809) - } else if !yym1810 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1809) - } else { - z.EncFallback(yy1809) - } - } else { - r.EncodeNil() - } - } else { - if yyq1805[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1811 := &x.LastProbeTime - yym1812 := z.EncBinary() - _ = yym1812 - if false { - } else if z.HasExtensions() && z.EncExt(yy1811) { - } else if yym1812 { - z.EncBinaryMarshal(yy1811) - } else if !yym1812 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1811) - } else { - z.EncFallback(yy1811) - } - } - } - if yyr1805 || yy2arr1805 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1805[3] { - yy1814 := &x.LastTransitionTime - yym1815 := z.EncBinary() - _ = yym1815 - if false { - } else if z.HasExtensions() && z.EncExt(yy1814) { - } else if yym1815 { - z.EncBinaryMarshal(yy1814) - } else if !yym1815 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1814) - } else { - z.EncFallback(yy1814) - } - } else { - r.EncodeNil() - } - } else { - if yyq1805[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1816 := &x.LastTransitionTime - yym1817 := z.EncBinary() - _ = yym1817 - if false { - } else if z.HasExtensions() && z.EncExt(yy1816) { - } else if yym1817 { - z.EncBinaryMarshal(yy1816) - } else if !yym1817 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1816) - } else { - z.EncFallback(yy1816) - } - } - } - if yyr1805 || yy2arr1805 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1805[4] { - yym1819 := z.EncBinary() - _ = yym1819 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1805[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1820 := z.EncBinary() - _ = yym1820 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr1805 || yy2arr1805 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1805[5] { - yym1822 := z.EncBinary() - _ = yym1822 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1805[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1823 := z.EncBinary() - _ = yym1823 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr1805 || yy2arr1805 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1824 := z.DecBinary() - _ = yym1824 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1825 := r.ContainerType() - if yyct1825 == codecSelferValueTypeMap1234 { - yyl1825 := r.ReadMapStart() - if yyl1825 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1825, d) - } - } else if yyct1825 == codecSelferValueTypeArray1234 { - yyl1825 := r.ReadArrayStart() - if yyl1825 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1825, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1826Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1826Slc - var yyhl1826 bool = l >= 0 - for yyj1826 := 0; ; yyj1826++ { - if yyhl1826 { - if yyj1826 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1826Slc = r.DecodeBytes(yys1826Slc, true, true) - yys1826 := string(yys1826Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1826 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = PodConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - case "lastProbeTime": - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} - } else { - yyv1829 := &x.LastProbeTime - yym1830 := z.DecBinary() - _ = yym1830 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1829) { - } else if yym1830 { - z.DecBinaryUnmarshal(yyv1829) - } else if !yym1830 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1829) - } else { - z.DecFallback(yyv1829, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv1831 := &x.LastTransitionTime - yym1832 := z.DecBinary() - _ = yym1832 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1831) { - } else if yym1832 { - z.DecBinaryUnmarshal(yyv1831) - } else if !yym1832 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1831) - } else { - z.DecFallback(yyv1831, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys1826) - } // end switch yys1826 - } // end for yyj1826 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1835 int - var yyb1835 bool - var yyhl1835 bool = l >= 0 - yyj1835++ - if yyhl1835 { - yyb1835 = yyj1835 > l - } else { - yyb1835 = r.CheckBreak() - } - if yyb1835 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = PodConditionType(r.DecodeString()) - } - yyj1835++ - if yyhl1835 { - yyb1835 = yyj1835 > l - } else { - yyb1835 = r.CheckBreak() - } - if yyb1835 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - yyj1835++ - if yyhl1835 { - yyb1835 = yyj1835 > l - } else { - yyb1835 = r.CheckBreak() - } - if yyb1835 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} - } else { - yyv1838 := &x.LastProbeTime - yym1839 := z.DecBinary() - _ = yym1839 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1838) { - } else if yym1839 { - z.DecBinaryUnmarshal(yyv1838) - } else if !yym1839 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1838) - } else { - z.DecFallback(yyv1838, false) - } - } - yyj1835++ - if yyhl1835 { - yyb1835 = yyj1835 > l - } else { - yyb1835 = r.CheckBreak() - } - if yyb1835 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv1840 := &x.LastTransitionTime - yym1841 := z.DecBinary() - _ = yym1841 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1840) { - } else if yym1841 { - z.DecBinaryUnmarshal(yyv1840) - } else if !yym1841 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1840) - } else { - z.DecFallback(yyv1840, false) - } - } - yyj1835++ - if yyhl1835 { - yyb1835 = yyj1835 > l - } else { - yyb1835 = r.CheckBreak() - } - if yyb1835 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj1835++ - if yyhl1835 { - yyb1835 = yyj1835 > l - } else { - yyb1835 = r.CheckBreak() - } - if yyb1835 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj1835++ - if yyhl1835 { - yyb1835 = yyj1835 > l - } else { - yyb1835 = r.CheckBreak() - } - if yyb1835 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1835-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x RestartPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1844 := z.EncBinary() - _ = yym1844 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *RestartPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1845 := z.DecBinary() - _ = yym1845 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PodList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1846 := z.EncBinary() - _ = yym1846 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1847 := !z.EncBinary() - yy2arr1847 := z.EncBasicHandle().StructToArray - var yyq1847 [4]bool - _, _, _ = yysep1847, yyq1847, yy2arr1847 - const yyr1847 bool = false - yyq1847[0] = x.Kind != "" - yyq1847[1] = x.APIVersion != "" - yyq1847[2] = true - var yynn1847 int - if yyr1847 || yy2arr1847 { - r.EncodeArrayStart(4) - } else { - yynn1847 = 1 - for _, b := range yyq1847 { - if b { - yynn1847++ - } - } - r.EncodeMapStart(yynn1847) - yynn1847 = 0 - } - if yyr1847 || yy2arr1847 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1847[0] { - yym1849 := z.EncBinary() - _ = yym1849 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1847[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1850 := z.EncBinary() - _ = yym1850 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr1847 || yy2arr1847 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1847[1] { - yym1852 := z.EncBinary() - _ = yym1852 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1847[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1853 := z.EncBinary() - _ = yym1853 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr1847 || yy2arr1847 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1847[2] { - yy1855 := &x.ListMeta - yym1856 := z.EncBinary() - _ = yym1856 - if false { - } else if z.HasExtensions() && z.EncExt(yy1855) { - } else { - z.EncFallback(yy1855) - } - } else { - r.EncodeNil() - } - } else { - if yyq1847[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1857 := &x.ListMeta - yym1858 := z.EncBinary() - _ = yym1858 - if false { - } else if z.HasExtensions() && z.EncExt(yy1857) { - } else { - z.EncFallback(yy1857) - } - } - } - if yyr1847 || yy2arr1847 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym1860 := z.EncBinary() - _ = yym1860 - if false { - } else { - h.encSlicePod(([]Pod)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym1861 := z.EncBinary() - _ = yym1861 - if false { - } else { - h.encSlicePod(([]Pod)(x.Items), e) - } - } - } - if yyr1847 || yy2arr1847 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1862 := z.DecBinary() - _ = yym1862 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1863 := r.ContainerType() - if yyct1863 == codecSelferValueTypeMap1234 { - yyl1863 := r.ReadMapStart() - if yyl1863 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1863, d) - } - } else if yyct1863 == codecSelferValueTypeArray1234 { - yyl1863 := r.ReadArrayStart() - if yyl1863 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1863, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1864Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1864Slc - var yyhl1864 bool = l >= 0 - for yyj1864 := 0; ; yyj1864++ { - if yyhl1864 { - if yyj1864 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1864Slc = r.DecodeBytes(yys1864Slc, true, true) - yys1864 := string(yys1864Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1864 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv1867 := &x.ListMeta - yym1868 := z.DecBinary() - _ = yym1868 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1867) { - } else { - z.DecFallback(yyv1867, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv1869 := &x.Items - yym1870 := z.DecBinary() - _ = yym1870 - if false { - } else { - h.decSlicePod((*[]Pod)(yyv1869), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1864) - } // end switch yys1864 - } // end for yyj1864 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1871 int - var yyb1871 bool - var yyhl1871 bool = l >= 0 - yyj1871++ - if yyhl1871 { - yyb1871 = yyj1871 > l - } else { - yyb1871 = r.CheckBreak() - } - if yyb1871 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj1871++ - if yyhl1871 { - yyb1871 = yyj1871 > l - } else { - yyb1871 = r.CheckBreak() - } - if yyb1871 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj1871++ - if yyhl1871 { - yyb1871 = yyj1871 > l - } else { - yyb1871 = r.CheckBreak() - } - if yyb1871 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv1874 := &x.ListMeta - yym1875 := z.DecBinary() - _ = yym1875 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1874) { - } else { - z.DecFallback(yyv1874, false) - } - } - yyj1871++ - if yyhl1871 { - yyb1871 = yyj1871 > l - } else { - yyb1871 = r.CheckBreak() - } - if yyb1871 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv1876 := &x.Items - yym1877 := z.DecBinary() - _ = yym1877 - if false { - } else { - h.decSlicePod((*[]Pod)(yyv1876), d) - } - } - for { - yyj1871++ - if yyhl1871 { - yyb1871 = yyj1871 > l - } else { - yyb1871 = r.CheckBreak() - } - if yyb1871 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1871-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x DNSPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1878 := z.EncBinary() - _ = yym1878 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *DNSPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1879 := z.DecBinary() - _ = yym1879 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *NodeSelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1880 := z.EncBinary() - _ = yym1880 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1881 := !z.EncBinary() - yy2arr1881 := z.EncBasicHandle().StructToArray - var yyq1881 [1]bool - _, _, _ = yysep1881, yyq1881, yy2arr1881 - const yyr1881 bool = false - var yynn1881 int - if yyr1881 || yy2arr1881 { - r.EncodeArrayStart(1) - } else { - yynn1881 = 1 - for _, b := range yyq1881 { - if b { - yynn1881++ - } - } - r.EncodeMapStart(yynn1881) - yynn1881 = 0 - } - if yyr1881 || yy2arr1881 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.NodeSelectorTerms == nil { - r.EncodeNil() - } else { - yym1883 := z.EncBinary() - _ = yym1883 - if false { - } else { - h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeSelectorTerms")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NodeSelectorTerms == nil { - r.EncodeNil() - } else { - yym1884 := z.EncBinary() - _ = yym1884 - if false { - } else { - h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e) - } - } - } - if yyr1881 || yy2arr1881 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1885 := z.DecBinary() - _ = yym1885 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1886 := r.ContainerType() - if yyct1886 == codecSelferValueTypeMap1234 { - yyl1886 := r.ReadMapStart() - if yyl1886 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1886, d) - } - } else if yyct1886 == codecSelferValueTypeArray1234 { - yyl1886 := r.ReadArrayStart() - if yyl1886 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1886, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1887Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1887Slc - var yyhl1887 bool = l >= 0 - for yyj1887 := 0; ; yyj1887++ { - if yyhl1887 { - if yyj1887 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1887Slc = r.DecodeBytes(yys1887Slc, true, true) - yys1887 := string(yys1887Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1887 { - case "nodeSelectorTerms": - if r.TryDecodeAsNil() { - x.NodeSelectorTerms = nil - } else { - yyv1888 := &x.NodeSelectorTerms - yym1889 := z.DecBinary() - _ = yym1889 - if false { - } else { - h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv1888), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1887) - } // end switch yys1887 - } // end for yyj1887 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1890 int - var yyb1890 bool - var yyhl1890 bool = l >= 0 - yyj1890++ - if yyhl1890 { - yyb1890 = yyj1890 > l - } else { - yyb1890 = r.CheckBreak() - } - if yyb1890 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeSelectorTerms = nil - } else { - yyv1891 := &x.NodeSelectorTerms - yym1892 := z.DecBinary() - _ = yym1892 - if false { - } else { - h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv1891), d) - } - } - for { - yyj1890++ - if yyhl1890 { - yyb1890 = yyj1890 > l - } else { - yyb1890 = r.CheckBreak() - } - if yyb1890 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1890-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeSelectorTerm) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1893 := z.EncBinary() - _ = yym1893 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1894 := !z.EncBinary() - yy2arr1894 := z.EncBasicHandle().StructToArray - var yyq1894 [1]bool - _, _, _ = yysep1894, yyq1894, yy2arr1894 - const yyr1894 bool = false - var yynn1894 int - if yyr1894 || yy2arr1894 { - r.EncodeArrayStart(1) - } else { - yynn1894 = 1 - for _, b := range yyq1894 { - if b { - yynn1894++ - } - } - r.EncodeMapStart(yynn1894) - yynn1894 = 0 - } - if yyr1894 || yy2arr1894 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.MatchExpressions == nil { - r.EncodeNil() - } else { - yym1896 := z.EncBinary() - _ = yym1896 - if false { - } else { - h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("matchExpressions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MatchExpressions == nil { - r.EncodeNil() - } else { - yym1897 := z.EncBinary() - _ = yym1897 - if false { - } else { - h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e) - } - } - } - if yyr1894 || yy2arr1894 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSelectorTerm) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1898 := z.DecBinary() - _ = yym1898 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1899 := r.ContainerType() - if yyct1899 == codecSelferValueTypeMap1234 { - yyl1899 := r.ReadMapStart() - if yyl1899 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1899, d) - } - } else if yyct1899 == codecSelferValueTypeArray1234 { - yyl1899 := r.ReadArrayStart() - if yyl1899 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1899, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSelectorTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1900Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1900Slc - var yyhl1900 bool = l >= 0 - for yyj1900 := 0; ; yyj1900++ { - if yyhl1900 { - if yyj1900 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1900Slc = r.DecodeBytes(yys1900Slc, true, true) - yys1900 := string(yys1900Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1900 { - case "matchExpressions": - if r.TryDecodeAsNil() { - x.MatchExpressions = nil - } else { - yyv1901 := &x.MatchExpressions - yym1902 := z.DecBinary() - _ = yym1902 - if false { - } else { - h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv1901), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1900) - } // end switch yys1900 - } // end for yyj1900 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSelectorTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1903 int - var yyb1903 bool - var yyhl1903 bool = l >= 0 - yyj1903++ - if yyhl1903 { - yyb1903 = yyj1903 > l - } else { - yyb1903 = r.CheckBreak() - } - if yyb1903 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MatchExpressions = nil - } else { - yyv1904 := &x.MatchExpressions - yym1905 := z.DecBinary() - _ = yym1905 - if false { - } else { - h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv1904), d) - } - } - for { - yyj1903++ - if yyhl1903 { - yyb1903 = yyj1903 > l - } else { - yyb1903 = r.CheckBreak() - } - if yyb1903 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1903-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1906 := z.EncBinary() - _ = yym1906 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1907 := !z.EncBinary() - yy2arr1907 := z.EncBasicHandle().StructToArray - var yyq1907 [3]bool - _, _, _ = yysep1907, yyq1907, yy2arr1907 - const yyr1907 bool = false - yyq1907[2] = len(x.Values) != 0 - var yynn1907 int - if yyr1907 || yy2arr1907 { - r.EncodeArrayStart(3) - } else { - yynn1907 = 2 - for _, b := range yyq1907 { - if b { - yynn1907++ - } - } - r.EncodeMapStart(yynn1907) - yynn1907 = 0 - } - if yyr1907 || yy2arr1907 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1909 := z.EncBinary() - _ = yym1909 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1910 := z.EncBinary() - _ = yym1910 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr1907 || yy2arr1907 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Operator.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("operator")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Operator.CodecEncodeSelf(e) - } - if yyr1907 || yy2arr1907 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1907[2] { - if x.Values == nil { - r.EncodeNil() - } else { - yym1913 := z.EncBinary() - _ = yym1913 - if false { - } else { - z.F.EncSliceStringV(x.Values, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1907[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("values")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Values == nil { - r.EncodeNil() - } else { - yym1914 := z.EncBinary() - _ = yym1914 - if false { - } else { - z.F.EncSliceStringV(x.Values, false, e) - } - } - } - } - if yyr1907 || yy2arr1907 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSelectorRequirement) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1915 := z.DecBinary() - _ = yym1915 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1916 := r.ContainerType() - if yyct1916 == codecSelferValueTypeMap1234 { - yyl1916 := r.ReadMapStart() - if yyl1916 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1916, d) - } - } else if yyct1916 == codecSelferValueTypeArray1234 { - yyl1916 := r.ReadArrayStart() - if yyl1916 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1916, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSelectorRequirement) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1917Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1917Slc - var yyhl1917 bool = l >= 0 - for yyj1917 := 0; ; yyj1917++ { - if yyhl1917 { - if yyj1917 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1917Slc = r.DecodeBytes(yys1917Slc, true, true) - yys1917 := string(yys1917Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1917 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "operator": - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - x.Operator = NodeSelectorOperator(r.DecodeString()) - } - case "values": - if r.TryDecodeAsNil() { - x.Values = nil - } else { - yyv1920 := &x.Values - yym1921 := z.DecBinary() - _ = yym1921 - if false { - } else { - z.F.DecSliceStringX(yyv1920, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1917) - } // end switch yys1917 - } // end for yyj1917 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1922 int - var yyb1922 bool - var yyhl1922 bool = l >= 0 - yyj1922++ - if yyhl1922 { - yyb1922 = yyj1922 > l - } else { - yyb1922 = r.CheckBreak() - } - if yyb1922 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj1922++ - if yyhl1922 { - yyb1922 = yyj1922 > l - } else { - yyb1922 = r.CheckBreak() - } - if yyb1922 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - x.Operator = NodeSelectorOperator(r.DecodeString()) - } - yyj1922++ - if yyhl1922 { - yyb1922 = yyj1922 > l - } else { - yyb1922 = r.CheckBreak() - } - if yyb1922 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Values = nil - } else { - yyv1925 := &x.Values - yym1926 := z.DecBinary() - _ = yym1926 - if false { - } else { - z.F.DecSliceStringX(yyv1925, false, d) - } - } - for { - yyj1922++ - if yyhl1922 { - yyb1922 = yyj1922 > l - } else { - yyb1922 = r.CheckBreak() - } - if yyb1922 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1922-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x NodeSelectorOperator) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1927 := z.EncBinary() - _ = yym1927 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NodeSelectorOperator) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1928 := z.DecBinary() - _ = yym1928 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1929 := z.EncBinary() - _ = yym1929 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1930 := !z.EncBinary() - yy2arr1930 := z.EncBasicHandle().StructToArray - var yyq1930 [3]bool - _, _, _ = yysep1930, yyq1930, yy2arr1930 - const yyr1930 bool = false - yyq1930[0] = x.NodeAffinity != nil - yyq1930[1] = x.PodAffinity != nil - yyq1930[2] = x.PodAntiAffinity != nil - var yynn1930 int - if yyr1930 || yy2arr1930 { - r.EncodeArrayStart(3) - } else { - yynn1930 = 0 - for _, b := range yyq1930 { - if b { - yynn1930++ - } - } - r.EncodeMapStart(yynn1930) - yynn1930 = 0 - } - if yyr1930 || yy2arr1930 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1930[0] { - if x.NodeAffinity == nil { - r.EncodeNil() - } else { - x.NodeAffinity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1930[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeAffinity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NodeAffinity == nil { - r.EncodeNil() - } else { - x.NodeAffinity.CodecEncodeSelf(e) - } - } - } - if yyr1930 || yy2arr1930 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1930[1] { - if x.PodAffinity == nil { - r.EncodeNil() - } else { - x.PodAffinity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1930[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podAffinity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PodAffinity == nil { - r.EncodeNil() - } else { - x.PodAffinity.CodecEncodeSelf(e) - } - } - } - if yyr1930 || yy2arr1930 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1930[2] { - if x.PodAntiAffinity == nil { - r.EncodeNil() - } else { - x.PodAntiAffinity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1930[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podAntiAffinity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PodAntiAffinity == nil { - r.EncodeNil() - } else { - x.PodAntiAffinity.CodecEncodeSelf(e) - } - } - } - if yyr1930 || yy2arr1930 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Affinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1934 := z.DecBinary() - _ = yym1934 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1935 := r.ContainerType() - if yyct1935 == codecSelferValueTypeMap1234 { - yyl1935 := r.ReadMapStart() - if yyl1935 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1935, d) - } - } else if yyct1935 == codecSelferValueTypeArray1234 { - yyl1935 := r.ReadArrayStart() - if yyl1935 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1935, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Affinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1936Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1936Slc - var yyhl1936 bool = l >= 0 - for yyj1936 := 0; ; yyj1936++ { - if yyhl1936 { - if yyj1936 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1936Slc = r.DecodeBytes(yys1936Slc, true, true) - yys1936 := string(yys1936Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1936 { - case "nodeAffinity": - if r.TryDecodeAsNil() { - if x.NodeAffinity != nil { - x.NodeAffinity = nil - } - } else { - if x.NodeAffinity == nil { - x.NodeAffinity = new(NodeAffinity) - } - x.NodeAffinity.CodecDecodeSelf(d) - } - case "podAffinity": - if r.TryDecodeAsNil() { - if x.PodAffinity != nil { - x.PodAffinity = nil - } - } else { - if x.PodAffinity == nil { - x.PodAffinity = new(PodAffinity) - } - x.PodAffinity.CodecDecodeSelf(d) - } - case "podAntiAffinity": - if r.TryDecodeAsNil() { - if x.PodAntiAffinity != nil { - x.PodAntiAffinity = nil - } - } else { - if x.PodAntiAffinity == nil { - x.PodAntiAffinity = new(PodAntiAffinity) - } - x.PodAntiAffinity.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys1936) - } // end switch yys1936 - } // end for yyj1936 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Affinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1940 int - var yyb1940 bool - var yyhl1940 bool = l >= 0 - yyj1940++ - if yyhl1940 { - yyb1940 = yyj1940 > l - } else { - yyb1940 = r.CheckBreak() - } - if yyb1940 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NodeAffinity != nil { - x.NodeAffinity = nil - } - } else { - if x.NodeAffinity == nil { - x.NodeAffinity = new(NodeAffinity) - } - x.NodeAffinity.CodecDecodeSelf(d) - } - yyj1940++ - if yyhl1940 { - yyb1940 = yyj1940 > l - } else { - yyb1940 = r.CheckBreak() - } - if yyb1940 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PodAffinity != nil { - x.PodAffinity = nil - } - } else { - if x.PodAffinity == nil { - x.PodAffinity = new(PodAffinity) - } - x.PodAffinity.CodecDecodeSelf(d) - } - yyj1940++ - if yyhl1940 { - yyb1940 = yyj1940 > l - } else { - yyb1940 = r.CheckBreak() - } - if yyb1940 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PodAntiAffinity != nil { - x.PodAntiAffinity = nil - } - } else { - if x.PodAntiAffinity == nil { - x.PodAntiAffinity = new(PodAntiAffinity) - } - x.PodAntiAffinity.CodecDecodeSelf(d) - } - for { - yyj1940++ - if yyhl1940 { - yyb1940 = yyj1940 > l - } else { - yyb1940 = r.CheckBreak() - } - if yyb1940 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1940-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodAffinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1944 := z.EncBinary() - _ = yym1944 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1945 := !z.EncBinary() - yy2arr1945 := z.EncBasicHandle().StructToArray - var yyq1945 [2]bool - _, _, _ = yysep1945, yyq1945, yy2arr1945 - const yyr1945 bool = false - yyq1945[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 - yyq1945[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 - var yynn1945 int - if yyr1945 || yy2arr1945 { - r.EncodeArrayStart(2) - } else { - yynn1945 = 0 - for _, b := range yyq1945 { - if b { - yynn1945++ - } - } - r.EncodeMapStart(yynn1945) - yynn1945 = 0 - } - if yyr1945 || yy2arr1945 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1945[0] { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym1947 := z.EncBinary() - _ = yym1947 - if false { - } else { - h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1945[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym1948 := z.EncBinary() - _ = yym1948 - if false { - } else { - h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr1945 || yy2arr1945 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1945[1] { - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym1950 := z.EncBinary() - _ = yym1950 - if false { - } else { - h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1945[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym1951 := z.EncBinary() - _ = yym1951 - if false { - } else { - h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr1945 || yy2arr1945 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodAffinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1952 := z.DecBinary() - _ = yym1952 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1953 := r.ContainerType() - if yyct1953 == codecSelferValueTypeMap1234 { - yyl1953 := r.ReadMapStart() - if yyl1953 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1953, d) - } - } else if yyct1953 == codecSelferValueTypeArray1234 { - yyl1953 := r.ReadArrayStart() - if yyl1953 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1953, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1954Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1954Slc - var yyhl1954 bool = l >= 0 - for yyj1954 := 0; ; yyj1954++ { - if yyhl1954 { - if yyj1954 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1954Slc = r.DecodeBytes(yys1954Slc, true, true) - yys1954 := string(yys1954Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1954 { - case "requiredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv1955 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym1956 := z.DecBinary() - _ = yym1956 - if false { - } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv1955), d) - } - } - case "preferredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv1957 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym1958 := z.DecBinary() - _ = yym1958 - if false { - } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv1957), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1954) - } // end switch yys1954 - } // end for yyj1954 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1959 int - var yyb1959 bool - var yyhl1959 bool = l >= 0 - yyj1959++ - if yyhl1959 { - yyb1959 = yyj1959 > l - } else { - yyb1959 = r.CheckBreak() - } - if yyb1959 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv1960 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym1961 := z.DecBinary() - _ = yym1961 - if false { - } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv1960), d) - } - } - yyj1959++ - if yyhl1959 { - yyb1959 = yyj1959 > l - } else { - yyb1959 = r.CheckBreak() - } - if yyb1959 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv1962 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym1963 := z.DecBinary() - _ = yym1963 - if false { - } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv1962), d) - } - } - for { - yyj1959++ - if yyhl1959 { - yyb1959 = yyj1959 > l - } else { - yyb1959 = r.CheckBreak() - } - if yyb1959 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1959-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodAntiAffinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1964 := z.EncBinary() - _ = yym1964 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1965 := !z.EncBinary() - yy2arr1965 := z.EncBasicHandle().StructToArray - var yyq1965 [2]bool - _, _, _ = yysep1965, yyq1965, yy2arr1965 - const yyr1965 bool = false - yyq1965[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 - yyq1965[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 - var yynn1965 int - if yyr1965 || yy2arr1965 { - r.EncodeArrayStart(2) - } else { - yynn1965 = 0 - for _, b := range yyq1965 { - if b { - yynn1965++ - } - } - r.EncodeMapStart(yynn1965) - yynn1965 = 0 - } - if yyr1965 || yy2arr1965 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1965[0] { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym1967 := z.EncBinary() - _ = yym1967 - if false { - } else { - h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1965[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym1968 := z.EncBinary() - _ = yym1968 - if false { - } else { - h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr1965 || yy2arr1965 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1965[1] { - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym1970 := z.EncBinary() - _ = yym1970 - if false { - } else { - h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1965[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym1971 := z.EncBinary() - _ = yym1971 - if false { - } else { - h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr1965 || yy2arr1965 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodAntiAffinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1972 := z.DecBinary() - _ = yym1972 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1973 := r.ContainerType() - if yyct1973 == codecSelferValueTypeMap1234 { - yyl1973 := r.ReadMapStart() - if yyl1973 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1973, d) - } - } else if yyct1973 == codecSelferValueTypeArray1234 { - yyl1973 := r.ReadArrayStart() - if yyl1973 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1973, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodAntiAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1974Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1974Slc - var yyhl1974 bool = l >= 0 - for yyj1974 := 0; ; yyj1974++ { - if yyhl1974 { - if yyj1974 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1974Slc = r.DecodeBytes(yys1974Slc, true, true) - yys1974 := string(yys1974Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1974 { - case "requiredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv1975 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym1976 := z.DecBinary() - _ = yym1976 - if false { - } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv1975), d) - } - } - case "preferredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv1977 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym1978 := z.DecBinary() - _ = yym1978 - if false { - } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv1977), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1974) - } // end switch yys1974 - } // end for yyj1974 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodAntiAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1979 int - var yyb1979 bool - var yyhl1979 bool = l >= 0 - yyj1979++ - if yyhl1979 { - yyb1979 = yyj1979 > l - } else { - yyb1979 = r.CheckBreak() - } - if yyb1979 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv1980 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym1981 := z.DecBinary() - _ = yym1981 - if false { - } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv1980), d) - } - } - yyj1979++ - if yyhl1979 { - yyb1979 = yyj1979 > l - } else { - yyb1979 = r.CheckBreak() - } - if yyb1979 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv1982 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym1983 := z.DecBinary() - _ = yym1983 - if false { - } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv1982), d) - } - } - for { - yyj1979++ - if yyhl1979 { - yyb1979 = yyj1979 > l - } else { - yyb1979 = r.CheckBreak() - } - if yyb1979 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1979-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *WeightedPodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1984 := z.EncBinary() - _ = yym1984 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1985 := !z.EncBinary() - yy2arr1985 := z.EncBasicHandle().StructToArray - var yyq1985 [2]bool - _, _, _ = yysep1985, yyq1985, yy2arr1985 - const yyr1985 bool = false - var yynn1985 int - if yyr1985 || yy2arr1985 { - r.EncodeArrayStart(2) - } else { - yynn1985 = 2 - for _, b := range yyq1985 { - if b { - yynn1985++ - } - } - r.EncodeMapStart(yynn1985) - yynn1985 = 0 - } - if yyr1985 || yy2arr1985 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1987 := z.EncBinary() - _ = yym1987 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("weight")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1988 := z.EncBinary() - _ = yym1988 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } - if yyr1985 || yy2arr1985 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1990 := &x.PodAffinityTerm - yy1990.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podAffinityTerm")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1991 := &x.PodAffinityTerm - yy1991.CodecEncodeSelf(e) - } - if yyr1985 || yy2arr1985 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *WeightedPodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1992 := z.DecBinary() - _ = yym1992 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1993 := r.ContainerType() - if yyct1993 == codecSelferValueTypeMap1234 { - yyl1993 := r.ReadMapStart() - if yyl1993 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1993, d) - } - } else if yyct1993 == codecSelferValueTypeArray1234 { - yyl1993 := r.ReadArrayStart() - if yyl1993 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1993, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *WeightedPodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1994Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1994Slc - var yyhl1994 bool = l >= 0 - for yyj1994 := 0; ; yyj1994++ { - if yyhl1994 { - if yyj1994 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1994Slc = r.DecodeBytes(yys1994Slc, true, true) - yys1994 := string(yys1994Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1994 { - case "weight": - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - x.Weight = int32(r.DecodeInt(32)) - } - case "podAffinityTerm": - if r.TryDecodeAsNil() { - x.PodAffinityTerm = PodAffinityTerm{} - } else { - yyv1996 := &x.PodAffinityTerm - yyv1996.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys1994) - } // end switch yys1994 - } // end for yyj1994 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *WeightedPodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1997 int - var yyb1997 bool - var yyhl1997 bool = l >= 0 - yyj1997++ - if yyhl1997 { - yyb1997 = yyj1997 > l - } else { - yyb1997 = r.CheckBreak() - } - if yyb1997 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - x.Weight = int32(r.DecodeInt(32)) - } - yyj1997++ - if yyhl1997 { - yyb1997 = yyj1997 > l - } else { - yyb1997 = r.CheckBreak() - } - if yyb1997 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodAffinityTerm = PodAffinityTerm{} - } else { - yyv1999 := &x.PodAffinityTerm - yyv1999.CodecDecodeSelf(d) - } - for { - yyj1997++ - if yyhl1997 { - yyb1997 = yyj1997 > l - } else { - yyb1997 = r.CheckBreak() - } - if yyb1997 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1997-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2000 := z.EncBinary() - _ = yym2000 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2001 := !z.EncBinary() - yy2arr2001 := z.EncBasicHandle().StructToArray - var yyq2001 [3]bool - _, _, _ = yysep2001, yyq2001, yy2arr2001 - const yyr2001 bool = false - yyq2001[0] = x.LabelSelector != nil - yyq2001[2] = x.TopologyKey != "" - var yynn2001 int - if yyr2001 || yy2arr2001 { - r.EncodeArrayStart(3) - } else { - yynn2001 = 1 - for _, b := range yyq2001 { - if b { - yynn2001++ - } - } - r.EncodeMapStart(yynn2001) - yynn2001 = 0 - } - if yyr2001 || yy2arr2001 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2001[0] { - if x.LabelSelector == nil { - r.EncodeNil() - } else { - yym2003 := z.EncBinary() - _ = yym2003 - if false { - } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { - } else { - z.EncFallback(x.LabelSelector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2001[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("labelSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LabelSelector == nil { - r.EncodeNil() - } else { - yym2004 := z.EncBinary() - _ = yym2004 - if false { - } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { - } else { - z.EncFallback(x.LabelSelector) - } - } - } - } - if yyr2001 || yy2arr2001 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Namespaces == nil { - r.EncodeNil() - } else { - yym2006 := z.EncBinary() - _ = yym2006 - if false { - } else { - z.F.EncSliceStringV(x.Namespaces, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespaces")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Namespaces == nil { - r.EncodeNil() - } else { - yym2007 := z.EncBinary() - _ = yym2007 - if false { - } else { - z.F.EncSliceStringV(x.Namespaces, false, e) - } - } - } - if yyr2001 || yy2arr2001 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2001[2] { - yym2009 := z.EncBinary() - _ = yym2009 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2001[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("topologyKey")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2010 := z.EncBinary() - _ = yym2010 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) - } - } - } - if yyr2001 || yy2arr2001 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2011 := z.DecBinary() - _ = yym2011 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2012 := r.ContainerType() - if yyct2012 == codecSelferValueTypeMap1234 { - yyl2012 := r.ReadMapStart() - if yyl2012 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2012, d) - } - } else if yyct2012 == codecSelferValueTypeArray1234 { - yyl2012 := r.ReadArrayStart() - if yyl2012 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2012, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2013Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2013Slc - var yyhl2013 bool = l >= 0 - for yyj2013 := 0; ; yyj2013++ { - if yyhl2013 { - if yyj2013 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2013Slc = r.DecodeBytes(yys2013Slc, true, true) - yys2013 := string(yys2013Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2013 { - case "labelSelector": - if r.TryDecodeAsNil() { - if x.LabelSelector != nil { - x.LabelSelector = nil - } - } else { - if x.LabelSelector == nil { - x.LabelSelector = new(pkg2_unversioned.LabelSelector) - } - yym2015 := z.DecBinary() - _ = yym2015 - if false { - } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { - } else { - z.DecFallback(x.LabelSelector, false) - } - } - case "namespaces": - if r.TryDecodeAsNil() { - x.Namespaces = nil - } else { - yyv2016 := &x.Namespaces - yym2017 := z.DecBinary() - _ = yym2017 - if false { - } else { - z.F.DecSliceStringX(yyv2016, false, d) - } - } - case "topologyKey": - if r.TryDecodeAsNil() { - x.TopologyKey = "" - } else { - x.TopologyKey = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys2013) - } // end switch yys2013 - } // end for yyj2013 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2019 int - var yyb2019 bool - var yyhl2019 bool = l >= 0 - yyj2019++ - if yyhl2019 { - yyb2019 = yyj2019 > l - } else { - yyb2019 = r.CheckBreak() - } - if yyb2019 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LabelSelector != nil { - x.LabelSelector = nil - } - } else { - if x.LabelSelector == nil { - x.LabelSelector = new(pkg2_unversioned.LabelSelector) - } - yym2021 := z.DecBinary() - _ = yym2021 - if false { - } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { - } else { - z.DecFallback(x.LabelSelector, false) - } - } - yyj2019++ - if yyhl2019 { - yyb2019 = yyj2019 > l - } else { - yyb2019 = r.CheckBreak() - } - if yyb2019 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespaces = nil - } else { - yyv2022 := &x.Namespaces - yym2023 := z.DecBinary() - _ = yym2023 - if false { - } else { - z.F.DecSliceStringX(yyv2022, false, d) - } - } - yyj2019++ - if yyhl2019 { - yyb2019 = yyj2019 > l - } else { - yyb2019 = r.CheckBreak() - } - if yyb2019 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TopologyKey = "" - } else { - x.TopologyKey = string(r.DecodeString()) - } - for { - yyj2019++ - if yyhl2019 { - yyb2019 = yyj2019 > l - } else { - yyb2019 = r.CheckBreak() - } - if yyb2019 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2019-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2025 := z.EncBinary() - _ = yym2025 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2026 := !z.EncBinary() - yy2arr2026 := z.EncBasicHandle().StructToArray - var yyq2026 [2]bool - _, _, _ = yysep2026, yyq2026, yy2arr2026 - const yyr2026 bool = false - yyq2026[0] = x.RequiredDuringSchedulingIgnoredDuringExecution != nil - yyq2026[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 - var yynn2026 int - if yyr2026 || yy2arr2026 { - r.EncodeArrayStart(2) - } else { - yynn2026 = 0 - for _, b := range yyq2026 { - if b { - yynn2026++ - } - } - r.EncodeMapStart(yynn2026) - yynn2026 = 0 - } - if yyr2026 || yy2arr2026 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2026[0] { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2026[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) - } - } - } - if yyr2026 || yy2arr2026 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2026[1] { - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym2029 := z.EncBinary() - _ = yym2029 - if false { - } else { - h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2026[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym2030 := z.EncBinary() - _ = yym2030 - if false { - } else { - h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr2026 || yy2arr2026 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeAffinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2031 := z.DecBinary() - _ = yym2031 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2032 := r.ContainerType() - if yyct2032 == codecSelferValueTypeMap1234 { - yyl2032 := r.ReadMapStart() - if yyl2032 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2032, d) - } - } else if yyct2032 == codecSelferValueTypeArray1234 { - yyl2032 := r.ReadArrayStart() - if yyl2032 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2032, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2033Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2033Slc - var yyhl2033 bool = l >= 0 - for yyj2033 := 0; ; yyj2033++ { - if yyhl2033 { - if yyj2033 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2033Slc = r.DecodeBytes(yys2033Slc, true, true) - yys2033 := string(yys2033Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2033 { - case "requiredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - } else { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) - } - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) - } - case "preferredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv2035 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym2036 := z.DecBinary() - _ = yym2036 - if false { - } else { - h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv2035), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys2033) - } // end switch yys2033 - } // end for yyj2033 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2037 int - var yyb2037 bool - var yyhl2037 bool = l >= 0 - yyj2037++ - if yyhl2037 { - yyb2037 = yyj2037 > l - } else { - yyb2037 = r.CheckBreak() - } - if yyb2037 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - } else { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) - } - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) - } - yyj2037++ - if yyhl2037 { - yyb2037 = yyj2037 > l - } else { - yyb2037 = r.CheckBreak() - } - if yyb2037 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv2039 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym2040 := z.DecBinary() - _ = yym2040 - if false { - } else { - h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv2039), d) - } - } - for { - yyj2037++ - if yyhl2037 { - yyb2037 = yyj2037 > l - } else { - yyb2037 = r.CheckBreak() - } - if yyb2037 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2037-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2041 := z.EncBinary() - _ = yym2041 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2042 := !z.EncBinary() - yy2arr2042 := z.EncBasicHandle().StructToArray - var yyq2042 [2]bool - _, _, _ = yysep2042, yyq2042, yy2arr2042 - const yyr2042 bool = false - var yynn2042 int - if yyr2042 || yy2arr2042 { - r.EncodeArrayStart(2) - } else { - yynn2042 = 2 - for _, b := range yyq2042 { - if b { - yynn2042++ - } - } - r.EncodeMapStart(yynn2042) - yynn2042 = 0 - } - if yyr2042 || yy2arr2042 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2044 := z.EncBinary() - _ = yym2044 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("weight")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2045 := z.EncBinary() - _ = yym2045 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } - if yyr2042 || yy2arr2042 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2047 := &x.Preference - yy2047.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preference")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2048 := &x.Preference - yy2048.CodecEncodeSelf(e) - } - if yyr2042 || yy2arr2042 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PreferredSchedulingTerm) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2049 := z.DecBinary() - _ = yym2049 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2050 := r.ContainerType() - if yyct2050 == codecSelferValueTypeMap1234 { - yyl2050 := r.ReadMapStart() - if yyl2050 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2050, d) - } - } else if yyct2050 == codecSelferValueTypeArray1234 { - yyl2050 := r.ReadArrayStart() - if yyl2050 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2050, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PreferredSchedulingTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2051Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2051Slc - var yyhl2051 bool = l >= 0 - for yyj2051 := 0; ; yyj2051++ { - if yyhl2051 { - if yyj2051 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2051Slc = r.DecodeBytes(yys2051Slc, true, true) - yys2051 := string(yys2051Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2051 { - case "weight": - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - x.Weight = int32(r.DecodeInt(32)) - } - case "preference": - if r.TryDecodeAsNil() { - x.Preference = NodeSelectorTerm{} - } else { - yyv2053 := &x.Preference - yyv2053.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys2051) - } // end switch yys2051 - } // end for yyj2051 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PreferredSchedulingTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2054 int - var yyb2054 bool - var yyhl2054 bool = l >= 0 - yyj2054++ - if yyhl2054 { - yyb2054 = yyj2054 > l - } else { - yyb2054 = r.CheckBreak() - } - if yyb2054 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - x.Weight = int32(r.DecodeInt(32)) - } - yyj2054++ - if yyhl2054 { - yyb2054 = yyj2054 > l - } else { - yyb2054 = r.CheckBreak() - } - if yyb2054 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Preference = NodeSelectorTerm{} - } else { - yyv2056 := &x.Preference - yyv2056.CodecDecodeSelf(d) - } - for { - yyj2054++ - if yyhl2054 { - yyb2054 = yyj2054 > l - } else { - yyb2054 = r.CheckBreak() - } - if yyb2054 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2054-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Taint) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2057 := z.EncBinary() - _ = yym2057 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2058 := !z.EncBinary() - yy2arr2058 := z.EncBasicHandle().StructToArray - var yyq2058 [3]bool - _, _, _ = yysep2058, yyq2058, yy2arr2058 - const yyr2058 bool = false - yyq2058[1] = x.Value != "" - var yynn2058 int - if yyr2058 || yy2arr2058 { - r.EncodeArrayStart(3) - } else { - yynn2058 = 2 - for _, b := range yyq2058 { - if b { - yynn2058++ - } - } - r.EncodeMapStart(yynn2058) - yynn2058 = 0 - } - if yyr2058 || yy2arr2058 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2060 := z.EncBinary() - _ = yym2060 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2061 := z.EncBinary() - _ = yym2061 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr2058 || yy2arr2058 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2058[1] { - yym2063 := z.EncBinary() - _ = yym2063 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2058[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2064 := z.EncBinary() - _ = yym2064 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } - } - if yyr2058 || yy2arr2058 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Effect.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("effect")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Effect.CodecEncodeSelf(e) - } - if yyr2058 || yy2arr2058 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Taint) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2066 := z.DecBinary() - _ = yym2066 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2067 := r.ContainerType() - if yyct2067 == codecSelferValueTypeMap1234 { - yyl2067 := r.ReadMapStart() - if yyl2067 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2067, d) - } - } else if yyct2067 == codecSelferValueTypeArray1234 { - yyl2067 := r.ReadArrayStart() - if yyl2067 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2067, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Taint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2068Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2068Slc - var yyhl2068 bool = l >= 0 - for yyj2068 := 0; ; yyj2068++ { - if yyhl2068 { - if yyj2068 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2068Slc = r.DecodeBytes(yys2068Slc, true, true) - yys2068 := string(yys2068Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2068 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - case "effect": - if r.TryDecodeAsNil() { - x.Effect = "" - } else { - x.Effect = TaintEffect(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys2068) - } // end switch yys2068 - } // end for yyj2068 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Taint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2072 int - var yyb2072 bool - var yyhl2072 bool = l >= 0 - yyj2072++ - if yyhl2072 { - yyb2072 = yyj2072 > l - } else { - yyb2072 = r.CheckBreak() - } - if yyb2072 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj2072++ - if yyhl2072 { - yyb2072 = yyj2072 > l - } else { - yyb2072 = r.CheckBreak() - } - if yyb2072 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - yyj2072++ - if yyhl2072 { - yyb2072 = yyj2072 > l - } else { - yyb2072 = r.CheckBreak() - } - if yyb2072 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Effect = "" - } else { - x.Effect = TaintEffect(r.DecodeString()) - } - for { - yyj2072++ - if yyhl2072 { - yyb2072 = yyj2072 > l - } else { - yyb2072 = r.CheckBreak() - } - if yyb2072 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2072-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x TaintEffect) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym2076 := z.EncBinary() - _ = yym2076 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *TaintEffect) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2077 := z.DecBinary() - _ = yym2077 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *Toleration) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2078 := z.EncBinary() - _ = yym2078 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2079 := !z.EncBinary() - yy2arr2079 := z.EncBasicHandle().StructToArray - var yyq2079 [4]bool - _, _, _ = yysep2079, yyq2079, yy2arr2079 - const yyr2079 bool = false - yyq2079[0] = x.Key != "" - yyq2079[1] = x.Operator != "" - yyq2079[2] = x.Value != "" - yyq2079[3] = x.Effect != "" - var yynn2079 int - if yyr2079 || yy2arr2079 { - r.EncodeArrayStart(4) - } else { - yynn2079 = 0 - for _, b := range yyq2079 { - if b { - yynn2079++ - } - } - r.EncodeMapStart(yynn2079) - yynn2079 = 0 - } - if yyr2079 || yy2arr2079 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2079[0] { - yym2081 := z.EncBinary() - _ = yym2081 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2079[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2082 := z.EncBinary() - _ = yym2082 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - } - if yyr2079 || yy2arr2079 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2079[1] { - x.Operator.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2079[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("operator")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Operator.CodecEncodeSelf(e) - } - } - if yyr2079 || yy2arr2079 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2079[2] { - yym2085 := z.EncBinary() - _ = yym2085 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2079[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2086 := z.EncBinary() - _ = yym2086 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } - } - if yyr2079 || yy2arr2079 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2079[3] { - x.Effect.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2079[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("effect")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Effect.CodecEncodeSelf(e) - } - } - if yyr2079 || yy2arr2079 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Toleration) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2088 := z.DecBinary() - _ = yym2088 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2089 := r.ContainerType() - if yyct2089 == codecSelferValueTypeMap1234 { - yyl2089 := r.ReadMapStart() - if yyl2089 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2089, d) - } - } else if yyct2089 == codecSelferValueTypeArray1234 { - yyl2089 := r.ReadArrayStart() - if yyl2089 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2089, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Toleration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2090Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2090Slc - var yyhl2090 bool = l >= 0 - for yyj2090 := 0; ; yyj2090++ { - if yyhl2090 { - if yyj2090 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2090Slc = r.DecodeBytes(yys2090Slc, true, true) - yys2090 := string(yys2090Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2090 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "operator": - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - x.Operator = TolerationOperator(r.DecodeString()) - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - case "effect": - if r.TryDecodeAsNil() { - x.Effect = "" - } else { - x.Effect = TaintEffect(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys2090) - } // end switch yys2090 - } // end for yyj2090 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Toleration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2095 int - var yyb2095 bool - var yyhl2095 bool = l >= 0 - yyj2095++ - if yyhl2095 { - yyb2095 = yyj2095 > l - } else { - yyb2095 = r.CheckBreak() - } - if yyb2095 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj2095++ - if yyhl2095 { - yyb2095 = yyj2095 > l - } else { - yyb2095 = r.CheckBreak() - } - if yyb2095 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - x.Operator = TolerationOperator(r.DecodeString()) - } - yyj2095++ - if yyhl2095 { - yyb2095 = yyj2095 > l - } else { - yyb2095 = r.CheckBreak() - } - if yyb2095 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - yyj2095++ - if yyhl2095 { - yyb2095 = yyj2095 > l - } else { - yyb2095 = r.CheckBreak() - } - if yyb2095 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Effect = "" - } else { - x.Effect = TaintEffect(r.DecodeString()) - } - for { - yyj2095++ - if yyhl2095 { - yyb2095 = yyj2095 > l - } else { - yyb2095 = r.CheckBreak() - } - if yyb2095 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2095-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x TolerationOperator) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym2100 := z.EncBinary() - _ = yym2100 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *TolerationOperator) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2101 := z.DecBinary() - _ = yym2101 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2102 := z.EncBinary() - _ = yym2102 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2103 := !z.EncBinary() - yy2arr2103 := z.EncBasicHandle().StructToArray - var yyq2103 [13]bool - _, _, _ = yysep2103, yyq2103, yy2arr2103 - const yyr2103 bool = false - yyq2103[2] = x.RestartPolicy != "" - yyq2103[3] = x.TerminationGracePeriodSeconds != nil - yyq2103[4] = x.ActiveDeadlineSeconds != nil - yyq2103[5] = x.DNSPolicy != "" - yyq2103[6] = len(x.NodeSelector) != 0 - yyq2103[8] = x.NodeName != "" - yyq2103[9] = x.SecurityContext != nil - yyq2103[10] = len(x.ImagePullSecrets) != 0 - yyq2103[11] = x.Hostname != "" - yyq2103[12] = x.Subdomain != "" - var yynn2103 int - if yyr2103 || yy2arr2103 { - r.EncodeArrayStart(13) - } else { - yynn2103 = 3 - for _, b := range yyq2103 { - if b { - yynn2103++ - } - } - r.EncodeMapStart(yynn2103) - yynn2103 = 0 - } - if yyr2103 || yy2arr2103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Volumes == nil { - r.EncodeNil() - } else { - yym2105 := z.EncBinary() - _ = yym2105 - if false { - } else { - h.encSliceVolume(([]Volume)(x.Volumes), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Volumes == nil { - r.EncodeNil() - } else { - yym2106 := z.EncBinary() - _ = yym2106 - if false { - } else { - h.encSliceVolume(([]Volume)(x.Volumes), e) - } - } - } - if yyr2103 || yy2arr2103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Containers == nil { - r.EncodeNil() - } else { - yym2108 := z.EncBinary() - _ = yym2108 - if false { - } else { - h.encSliceContainer(([]Container)(x.Containers), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Containers == nil { - r.EncodeNil() - } else { - yym2109 := z.EncBinary() - _ = yym2109 - if false { - } else { - h.encSliceContainer(([]Container)(x.Containers), e) - } - } - } - if yyr2103 || yy2arr2103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2103[2] { - x.RestartPolicy.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2103[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("restartPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.RestartPolicy.CodecEncodeSelf(e) - } - } - if yyr2103 || yy2arr2103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2103[3] { - if x.TerminationGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy2112 := *x.TerminationGracePeriodSeconds - yym2113 := z.EncBinary() - _ = yym2113 - if false { - } else { - r.EncodeInt(int64(yy2112)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2103[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("terminationGracePeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TerminationGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy2114 := *x.TerminationGracePeriodSeconds - yym2115 := z.EncBinary() - _ = yym2115 - if false { - } else { - r.EncodeInt(int64(yy2114)) - } - } - } - } - if yyr2103 || yy2arr2103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2103[4] { - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy2117 := *x.ActiveDeadlineSeconds - yym2118 := z.EncBinary() - _ = yym2118 - if false { - } else { - r.EncodeInt(int64(yy2117)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2103[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy2119 := *x.ActiveDeadlineSeconds - yym2120 := z.EncBinary() - _ = yym2120 - if false { - } else { - r.EncodeInt(int64(yy2119)) - } - } - } - } - if yyr2103 || yy2arr2103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2103[5] { - x.DNSPolicy.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2103[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("dnsPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.DNSPolicy.CodecEncodeSelf(e) - } - } - if yyr2103 || yy2arr2103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2103[6] { - if x.NodeSelector == nil { - r.EncodeNil() - } else { - yym2123 := z.EncBinary() - _ = yym2123 - if false { - } else { - z.F.EncMapStringStringV(x.NodeSelector, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2103[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NodeSelector == nil { - r.EncodeNil() - } else { - yym2124 := z.EncBinary() - _ = yym2124 - if false { - } else { - z.F.EncMapStringStringV(x.NodeSelector, false, e) - } - } - } - } - if yyr2103 || yy2arr2103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2126 := z.EncBinary() - _ = yym2126 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceAccountName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2127 := z.EncBinary() - _ = yym2127 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName)) - } - } - if yyr2103 || yy2arr2103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2103[8] { - yym2129 := z.EncBinary() - _ = yym2129 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NodeName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2103[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2130 := z.EncBinary() - _ = yym2130 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NodeName)) - } - } - } - if yyr2103 || yy2arr2103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2103[9] { - if x.SecurityContext == nil { - r.EncodeNil() - } else { - x.SecurityContext.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2103[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("securityContext")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecurityContext == nil { - r.EncodeNil() - } else { - x.SecurityContext.CodecEncodeSelf(e) - } - } - } - if yyr2103 || yy2arr2103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2103[10] { - if x.ImagePullSecrets == nil { - r.EncodeNil() - } else { - yym2133 := z.EncBinary() - _ = yym2133 - if false { - } else { - h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2103[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ImagePullSecrets == nil { - r.EncodeNil() - } else { - yym2134 := z.EncBinary() - _ = yym2134 - if false { - } else { - h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) - } - } - } - } - if yyr2103 || yy2arr2103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2103[11] { - yym2136 := z.EncBinary() - _ = yym2136 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2103[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostname")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2137 := z.EncBinary() - _ = yym2137 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } - } - if yyr2103 || yy2arr2103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2103[12] { - yym2139 := z.EncBinary() - _ = yym2139 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2103[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subdomain")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2140 := z.EncBinary() - _ = yym2140 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) - } - } - } - if yyr2103 || yy2arr2103 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2141 := z.DecBinary() - _ = yym2141 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2142 := r.ContainerType() - if yyct2142 == codecSelferValueTypeMap1234 { - yyl2142 := r.ReadMapStart() - if yyl2142 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2142, d) - } - } else if yyct2142 == codecSelferValueTypeArray1234 { - yyl2142 := r.ReadArrayStart() - if yyl2142 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2142, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2143Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2143Slc - var yyhl2143 bool = l >= 0 - for yyj2143 := 0; ; yyj2143++ { - if yyhl2143 { - if yyj2143 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2143Slc = r.DecodeBytes(yys2143Slc, true, true) - yys2143 := string(yys2143Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2143 { - case "volumes": - if r.TryDecodeAsNil() { - x.Volumes = nil - } else { - yyv2144 := &x.Volumes - yym2145 := z.DecBinary() - _ = yym2145 - if false { - } else { - h.decSliceVolume((*[]Volume)(yyv2144), d) - } - } - case "containers": - if r.TryDecodeAsNil() { - x.Containers = nil - } else { - yyv2146 := &x.Containers - yym2147 := z.DecBinary() - _ = yym2147 - if false { - } else { - h.decSliceContainer((*[]Container)(yyv2146), d) - } - } - case "restartPolicy": - if r.TryDecodeAsNil() { - x.RestartPolicy = "" - } else { - x.RestartPolicy = RestartPolicy(r.DecodeString()) - } - case "terminationGracePeriodSeconds": - if r.TryDecodeAsNil() { - if x.TerminationGracePeriodSeconds != nil { - x.TerminationGracePeriodSeconds = nil - } - } else { - if x.TerminationGracePeriodSeconds == nil { - x.TerminationGracePeriodSeconds = new(int64) - } - yym2150 := z.DecBinary() - _ = yym2150 - if false { - } else { - *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - case "activeDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym2152 := z.DecBinary() - _ = yym2152 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - case "dnsPolicy": - if r.TryDecodeAsNil() { - x.DNSPolicy = "" - } else { - x.DNSPolicy = DNSPolicy(r.DecodeString()) - } - case "nodeSelector": - if r.TryDecodeAsNil() { - x.NodeSelector = nil - } else { - yyv2154 := &x.NodeSelector - yym2155 := z.DecBinary() - _ = yym2155 - if false { - } else { - z.F.DecMapStringStringX(yyv2154, false, d) - } - } - case "serviceAccountName": - if r.TryDecodeAsNil() { - x.ServiceAccountName = "" - } else { - x.ServiceAccountName = string(r.DecodeString()) - } - case "nodeName": - if r.TryDecodeAsNil() { - x.NodeName = "" - } else { - x.NodeName = string(r.DecodeString()) - } - case "securityContext": - if r.TryDecodeAsNil() { - if x.SecurityContext != nil { - x.SecurityContext = nil - } - } else { - if x.SecurityContext == nil { - x.SecurityContext = new(PodSecurityContext) - } - x.SecurityContext.CodecDecodeSelf(d) - } - case "imagePullSecrets": - if r.TryDecodeAsNil() { - x.ImagePullSecrets = nil - } else { - yyv2159 := &x.ImagePullSecrets - yym2160 := z.DecBinary() - _ = yym2160 - if false { - } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv2159), d) - } - } - case "hostname": - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - x.Hostname = string(r.DecodeString()) - } - case "subdomain": - if r.TryDecodeAsNil() { - x.Subdomain = "" - } else { - x.Subdomain = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys2143) - } // end switch yys2143 - } // end for yyj2143 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2163 int - var yyb2163 bool - var yyhl2163 bool = l >= 0 - yyj2163++ - if yyhl2163 { - yyb2163 = yyj2163 > l - } else { - yyb2163 = r.CheckBreak() - } - if yyb2163 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Volumes = nil - } else { - yyv2164 := &x.Volumes - yym2165 := z.DecBinary() - _ = yym2165 - if false { - } else { - h.decSliceVolume((*[]Volume)(yyv2164), d) - } - } - yyj2163++ - if yyhl2163 { - yyb2163 = yyj2163 > l - } else { - yyb2163 = r.CheckBreak() - } - if yyb2163 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Containers = nil - } else { - yyv2166 := &x.Containers - yym2167 := z.DecBinary() - _ = yym2167 - if false { - } else { - h.decSliceContainer((*[]Container)(yyv2166), d) - } - } - yyj2163++ - if yyhl2163 { - yyb2163 = yyj2163 > l - } else { - yyb2163 = r.CheckBreak() - } - if yyb2163 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RestartPolicy = "" - } else { - x.RestartPolicy = RestartPolicy(r.DecodeString()) - } - yyj2163++ - if yyhl2163 { - yyb2163 = yyj2163 > l - } else { - yyb2163 = r.CheckBreak() - } - if yyb2163 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TerminationGracePeriodSeconds != nil { - x.TerminationGracePeriodSeconds = nil - } - } else { - if x.TerminationGracePeriodSeconds == nil { - x.TerminationGracePeriodSeconds = new(int64) - } - yym2170 := z.DecBinary() - _ = yym2170 - if false { - } else { - *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj2163++ - if yyhl2163 { - yyb2163 = yyj2163 > l - } else { - yyb2163 = r.CheckBreak() - } - if yyb2163 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym2172 := z.DecBinary() - _ = yym2172 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj2163++ - if yyhl2163 { - yyb2163 = yyj2163 > l - } else { - yyb2163 = r.CheckBreak() - } - if yyb2163 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DNSPolicy = "" - } else { - x.DNSPolicy = DNSPolicy(r.DecodeString()) - } - yyj2163++ - if yyhl2163 { - yyb2163 = yyj2163 > l - } else { - yyb2163 = r.CheckBreak() - } - if yyb2163 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeSelector = nil - } else { - yyv2174 := &x.NodeSelector - yym2175 := z.DecBinary() - _ = yym2175 - if false { - } else { - z.F.DecMapStringStringX(yyv2174, false, d) - } - } - yyj2163++ - if yyhl2163 { - yyb2163 = yyj2163 > l - } else { - yyb2163 = r.CheckBreak() - } - if yyb2163 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServiceAccountName = "" - } else { - x.ServiceAccountName = string(r.DecodeString()) - } - yyj2163++ - if yyhl2163 { - yyb2163 = yyj2163 > l - } else { - yyb2163 = r.CheckBreak() - } - if yyb2163 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeName = "" - } else { - x.NodeName = string(r.DecodeString()) - } - yyj2163++ - if yyhl2163 { - yyb2163 = yyj2163 > l - } else { - yyb2163 = r.CheckBreak() - } - if yyb2163 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecurityContext != nil { - x.SecurityContext = nil - } - } else { - if x.SecurityContext == nil { - x.SecurityContext = new(PodSecurityContext) - } - x.SecurityContext.CodecDecodeSelf(d) - } - yyj2163++ - if yyhl2163 { - yyb2163 = yyj2163 > l - } else { - yyb2163 = r.CheckBreak() - } - if yyb2163 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImagePullSecrets = nil - } else { - yyv2179 := &x.ImagePullSecrets - yym2180 := z.DecBinary() - _ = yym2180 - if false { - } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv2179), d) - } - } - yyj2163++ - if yyhl2163 { - yyb2163 = yyj2163 > l - } else { - yyb2163 = r.CheckBreak() - } - if yyb2163 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - x.Hostname = string(r.DecodeString()) - } - yyj2163++ - if yyhl2163 { - yyb2163 = yyj2163 > l - } else { - yyb2163 = r.CheckBreak() - } - if yyb2163 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subdomain = "" - } else { - x.Subdomain = string(r.DecodeString()) - } - for { - yyj2163++ - if yyhl2163 { - yyb2163 = yyj2163 > l - } else { - yyb2163 = r.CheckBreak() - } - if yyb2163 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2163-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Sysctl) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2183 := z.EncBinary() - _ = yym2183 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2184 := !z.EncBinary() - yy2arr2184 := z.EncBasicHandle().StructToArray - var yyq2184 [2]bool - _, _, _ = yysep2184, yyq2184, yy2arr2184 - const yyr2184 bool = false - var yynn2184 int - if yyr2184 || yy2arr2184 { - r.EncodeArrayStart(2) - } else { - yynn2184 = 2 - for _, b := range yyq2184 { - if b { - yynn2184++ - } - } - r.EncodeMapStart(yynn2184) - yynn2184 = 0 - } - if yyr2184 || yy2arr2184 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2186 := z.EncBinary() - _ = yym2186 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2187 := z.EncBinary() - _ = yym2187 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2184 || yy2arr2184 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2189 := z.EncBinary() - _ = yym2189 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2190 := z.EncBinary() - _ = yym2190 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } - if yyr2184 || yy2arr2184 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Sysctl) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2191 := z.DecBinary() - _ = yym2191 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2192 := r.ContainerType() - if yyct2192 == codecSelferValueTypeMap1234 { - yyl2192 := r.ReadMapStart() - if yyl2192 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2192, d) - } - } else if yyct2192 == codecSelferValueTypeArray1234 { - yyl2192 := r.ReadArrayStart() - if yyl2192 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2192, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Sysctl) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2193Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2193Slc - var yyhl2193 bool = l >= 0 - for yyj2193 := 0; ; yyj2193++ { - if yyhl2193 { - if yyj2193 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2193Slc = r.DecodeBytes(yys2193Slc, true, true) - yys2193 := string(yys2193Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2193 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys2193) - } // end switch yys2193 - } // end for yyj2193 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Sysctl) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2196 int - var yyb2196 bool - var yyhl2196 bool = l >= 0 - yyj2196++ - if yyhl2196 { - yyb2196 = yyj2196 > l - } else { - yyb2196 = r.CheckBreak() - } - if yyb2196 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj2196++ - if yyhl2196 { - yyb2196 = yyj2196 > l - } else { - yyb2196 = r.CheckBreak() - } - if yyb2196 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - for { - yyj2196++ - if yyhl2196 { - yyb2196 = yyj2196 > l - } else { - yyb2196 = r.CheckBreak() - } - if yyb2196 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2196-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodSecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2199 := z.EncBinary() - _ = yym2199 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2200 := !z.EncBinary() - yy2arr2200 := z.EncBasicHandle().StructToArray - var yyq2200 [8]bool - _, _, _ = yysep2200, yyq2200, yy2arr2200 - const yyr2200 bool = false - yyq2200[0] = x.HostNetwork != false - yyq2200[1] = x.HostPID != false - yyq2200[2] = x.HostIPC != false - yyq2200[3] = x.SELinuxOptions != nil - yyq2200[4] = x.RunAsUser != nil - yyq2200[5] = x.RunAsNonRoot != nil - yyq2200[6] = len(x.SupplementalGroups) != 0 - yyq2200[7] = x.FSGroup != nil - var yynn2200 int - if yyr2200 || yy2arr2200 { - r.EncodeArrayStart(8) - } else { - yynn2200 = 0 - for _, b := range yyq2200 { - if b { - yynn2200++ - } - } - r.EncodeMapStart(yynn2200) - yynn2200 = 0 - } - if yyr2200 || yy2arr2200 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2200[0] { - yym2202 := z.EncBinary() - _ = yym2202 - if false { - } else { - r.EncodeBool(bool(x.HostNetwork)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2200[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2203 := z.EncBinary() - _ = yym2203 - if false { - } else { - r.EncodeBool(bool(x.HostNetwork)) - } - } - } - if yyr2200 || yy2arr2200 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2200[1] { - yym2205 := z.EncBinary() - _ = yym2205 - if false { - } else { - r.EncodeBool(bool(x.HostPID)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2200[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2206 := z.EncBinary() - _ = yym2206 - if false { - } else { - r.EncodeBool(bool(x.HostPID)) - } - } - } - if yyr2200 || yy2arr2200 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2200[2] { - yym2208 := z.EncBinary() - _ = yym2208 - if false { - } else { - r.EncodeBool(bool(x.HostIPC)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2200[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2209 := z.EncBinary() - _ = yym2209 - if false { - } else { - r.EncodeBool(bool(x.HostIPC)) - } - } - } - if yyr2200 || yy2arr2200 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2200[3] { - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2200[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } - } - if yyr2200 || yy2arr2200 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2200[4] { - if x.RunAsUser == nil { - r.EncodeNil() - } else { - yy2212 := *x.RunAsUser - yym2213 := z.EncBinary() - _ = yym2213 - if false { - } else { - r.EncodeInt(int64(yy2212)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2200[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RunAsUser == nil { - r.EncodeNil() - } else { - yy2214 := *x.RunAsUser - yym2215 := z.EncBinary() - _ = yym2215 - if false { - } else { - r.EncodeInt(int64(yy2214)) - } - } - } - } - if yyr2200 || yy2arr2200 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2200[5] { - if x.RunAsNonRoot == nil { - r.EncodeNil() - } else { - yy2217 := *x.RunAsNonRoot - yym2218 := z.EncBinary() - _ = yym2218 - if false { - } else { - r.EncodeBool(bool(yy2217)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2200[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RunAsNonRoot == nil { - r.EncodeNil() - } else { - yy2219 := *x.RunAsNonRoot - yym2220 := z.EncBinary() - _ = yym2220 - if false { - } else { - r.EncodeBool(bool(yy2219)) - } - } - } - } - if yyr2200 || yy2arr2200 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2200[6] { - if x.SupplementalGroups == nil { - r.EncodeNil() - } else { - yym2222 := z.EncBinary() - _ = yym2222 - if false { - } else { - z.F.EncSliceInt64V(x.SupplementalGroups, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2200[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SupplementalGroups == nil { - r.EncodeNil() - } else { - yym2223 := z.EncBinary() - _ = yym2223 - if false { - } else { - z.F.EncSliceInt64V(x.SupplementalGroups, false, e) - } - } - } - } - if yyr2200 || yy2arr2200 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2200[7] { - if x.FSGroup == nil { - r.EncodeNil() - } else { - yy2225 := *x.FSGroup - yym2226 := z.EncBinary() - _ = yym2226 - if false { - } else { - r.EncodeInt(int64(yy2225)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2200[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsGroup")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FSGroup == nil { - r.EncodeNil() - } else { - yy2227 := *x.FSGroup - yym2228 := z.EncBinary() - _ = yym2228 - if false { - } else { - r.EncodeInt(int64(yy2227)) - } - } - } - } - if yyr2200 || yy2arr2200 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSecurityContext) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2229 := z.DecBinary() - _ = yym2229 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2230 := r.ContainerType() - if yyct2230 == codecSelferValueTypeMap1234 { - yyl2230 := r.ReadMapStart() - if yyl2230 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2230, d) - } - } else if yyct2230 == codecSelferValueTypeArray1234 { - yyl2230 := r.ReadArrayStart() - if yyl2230 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2230, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2231Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2231Slc - var yyhl2231 bool = l >= 0 - for yyj2231 := 0; ; yyj2231++ { - if yyhl2231 { - if yyj2231 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2231Slc = r.DecodeBytes(yys2231Slc, true, true) - yys2231 := string(yys2231Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2231 { - case "hostNetwork": - if r.TryDecodeAsNil() { - x.HostNetwork = false - } else { - x.HostNetwork = bool(r.DecodeBool()) - } - case "hostPID": - if r.TryDecodeAsNil() { - x.HostPID = false - } else { - x.HostPID = bool(r.DecodeBool()) - } - case "hostIPC": - if r.TryDecodeAsNil() { - x.HostIPC = false - } else { - x.HostIPC = bool(r.DecodeBool()) - } - case "seLinuxOptions": - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - case "runAsUser": - if r.TryDecodeAsNil() { - if x.RunAsUser != nil { - x.RunAsUser = nil - } - } else { - if x.RunAsUser == nil { - x.RunAsUser = new(int64) - } - yym2237 := z.DecBinary() - _ = yym2237 - if false { - } else { - *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) - } - } - case "runAsNonRoot": - if r.TryDecodeAsNil() { - if x.RunAsNonRoot != nil { - x.RunAsNonRoot = nil - } - } else { - if x.RunAsNonRoot == nil { - x.RunAsNonRoot = new(bool) - } - yym2239 := z.DecBinary() - _ = yym2239 - if false { - } else { - *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() - } - } - case "supplementalGroups": - if r.TryDecodeAsNil() { - x.SupplementalGroups = nil - } else { - yyv2240 := &x.SupplementalGroups - yym2241 := z.DecBinary() - _ = yym2241 - if false { - } else { - z.F.DecSliceInt64X(yyv2240, false, d) - } - } - case "fsGroup": - if r.TryDecodeAsNil() { - if x.FSGroup != nil { - x.FSGroup = nil - } - } else { - if x.FSGroup == nil { - x.FSGroup = new(int64) - } - yym2243 := z.DecBinary() - _ = yym2243 - if false { - } else { - *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys2231) - } // end switch yys2231 - } // end for yyj2231 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2244 int - var yyb2244 bool - var yyhl2244 bool = l >= 0 - yyj2244++ - if yyhl2244 { - yyb2244 = yyj2244 > l - } else { - yyb2244 = r.CheckBreak() - } - if yyb2244 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostNetwork = false - } else { - x.HostNetwork = bool(r.DecodeBool()) - } - yyj2244++ - if yyhl2244 { - yyb2244 = yyj2244 > l - } else { - yyb2244 = r.CheckBreak() - } - if yyb2244 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostPID = false - } else { - x.HostPID = bool(r.DecodeBool()) - } - yyj2244++ - if yyhl2244 { - yyb2244 = yyj2244 > l - } else { - yyb2244 = r.CheckBreak() - } - if yyb2244 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostIPC = false - } else { - x.HostIPC = bool(r.DecodeBool()) - } - yyj2244++ - if yyhl2244 { - yyb2244 = yyj2244 > l - } else { - yyb2244 = r.CheckBreak() - } - if yyb2244 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - yyj2244++ - if yyhl2244 { - yyb2244 = yyj2244 > l - } else { - yyb2244 = r.CheckBreak() - } - if yyb2244 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RunAsUser != nil { - x.RunAsUser = nil - } - } else { - if x.RunAsUser == nil { - x.RunAsUser = new(int64) - } - yym2250 := z.DecBinary() - _ = yym2250 - if false { - } else { - *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) - } - } - yyj2244++ - if yyhl2244 { - yyb2244 = yyj2244 > l - } else { - yyb2244 = r.CheckBreak() - } - if yyb2244 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RunAsNonRoot != nil { - x.RunAsNonRoot = nil - } - } else { - if x.RunAsNonRoot == nil { - x.RunAsNonRoot = new(bool) - } - yym2252 := z.DecBinary() - _ = yym2252 - if false { - } else { - *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() - } - } - yyj2244++ - if yyhl2244 { - yyb2244 = yyj2244 > l - } else { - yyb2244 = r.CheckBreak() - } - if yyb2244 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SupplementalGroups = nil - } else { - yyv2253 := &x.SupplementalGroups - yym2254 := z.DecBinary() - _ = yym2254 - if false { - } else { - z.F.DecSliceInt64X(yyv2253, false, d) - } - } - yyj2244++ - if yyhl2244 { - yyb2244 = yyj2244 > l - } else { - yyb2244 = r.CheckBreak() - } - if yyb2244 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FSGroup != nil { - x.FSGroup = nil - } - } else { - if x.FSGroup == nil { - x.FSGroup = new(int64) - } - yym2256 := z.DecBinary() - _ = yym2256 - if false { - } else { - *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64)) - } - } - for { - yyj2244++ - if yyhl2244 { - yyb2244 = yyj2244 > l - } else { - yyb2244 = r.CheckBreak() - } - if yyb2244 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2244-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2257 := z.EncBinary() - _ = yym2257 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2258 := !z.EncBinary() - yy2arr2258 := z.EncBasicHandle().StructToArray - var yyq2258 [8]bool - _, _, _ = yysep2258, yyq2258, yy2arr2258 - const yyr2258 bool = false - yyq2258[0] = x.Phase != "" - yyq2258[1] = len(x.Conditions) != 0 - yyq2258[2] = x.Message != "" - yyq2258[3] = x.Reason != "" - yyq2258[4] = x.HostIP != "" - yyq2258[5] = x.PodIP != "" - yyq2258[6] = x.StartTime != nil - yyq2258[7] = len(x.ContainerStatuses) != 0 - var yynn2258 int - if yyr2258 || yy2arr2258 { - r.EncodeArrayStart(8) - } else { - yynn2258 = 0 - for _, b := range yyq2258 { - if b { - yynn2258++ - } - } - r.EncodeMapStart(yynn2258) - yynn2258 = 0 - } - if yyr2258 || yy2arr2258 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2258[0] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2258[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr2258 || yy2arr2258 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2258[1] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym2261 := z.EncBinary() - _ = yym2261 - if false { - } else { - h.encSlicePodCondition(([]PodCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2258[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym2262 := z.EncBinary() - _ = yym2262 - if false { - } else { - h.encSlicePodCondition(([]PodCondition)(x.Conditions), e) - } - } - } - } - if yyr2258 || yy2arr2258 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2258[2] { - yym2264 := z.EncBinary() - _ = yym2264 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2258[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2265 := z.EncBinary() - _ = yym2265 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2258 || yy2arr2258 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2258[3] { - yym2267 := z.EncBinary() - _ = yym2267 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2258[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2268 := z.EncBinary() - _ = yym2268 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2258 || yy2arr2258 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2258[4] { - yym2270 := z.EncBinary() - _ = yym2270 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2258[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2271 := z.EncBinary() - _ = yym2271 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) - } - } - } - if yyr2258 || yy2arr2258 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2258[5] { - yym2273 := z.EncBinary() - _ = yym2273 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2258[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2274 := z.EncBinary() - _ = yym2274 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodIP)) - } - } - } - if yyr2258 || yy2arr2258 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2258[6] { - if x.StartTime == nil { - r.EncodeNil() - } else { - yym2276 := z.EncBinary() - _ = yym2276 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym2276 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym2276 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2258[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.StartTime == nil { - r.EncodeNil() - } else { - yym2277 := z.EncBinary() - _ = yym2277 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym2277 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym2277 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } - } - if yyr2258 || yy2arr2258 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2258[7] { - if x.ContainerStatuses == nil { - r.EncodeNil() - } else { - yym2279 := z.EncBinary() - _ = yym2279 - if false { - } else { - h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2258[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerStatuses")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ContainerStatuses == nil { - r.EncodeNil() - } else { - yym2280 := z.EncBinary() - _ = yym2280 - if false { - } else { - h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e) - } - } - } - } - if yyr2258 || yy2arr2258 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2281 := z.DecBinary() - _ = yym2281 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2282 := r.ContainerType() - if yyct2282 == codecSelferValueTypeMap1234 { - yyl2282 := r.ReadMapStart() - if yyl2282 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2282, d) - } - } else if yyct2282 == codecSelferValueTypeArray1234 { - yyl2282 := r.ReadArrayStart() - if yyl2282 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2282, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2283Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2283Slc - var yyhl2283 bool = l >= 0 - for yyj2283 := 0; ; yyj2283++ { - if yyhl2283 { - if yyj2283 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2283Slc = r.DecodeBytes(yys2283Slc, true, true) - yys2283 := string(yys2283Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2283 { - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = PodPhase(r.DecodeString()) - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv2285 := &x.Conditions - yym2286 := z.DecBinary() - _ = yym2286 - if false { - } else { - h.decSlicePodCondition((*[]PodCondition)(yyv2285), d) - } - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "hostIP": - if r.TryDecodeAsNil() { - x.HostIP = "" - } else { - x.HostIP = string(r.DecodeString()) - } - case "podIP": - if r.TryDecodeAsNil() { - x.PodIP = "" - } else { - x.PodIP = string(r.DecodeString()) - } - case "startTime": - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg2_unversioned.Time) - } - yym2292 := z.DecBinary() - _ = yym2292 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym2292 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym2292 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - case "containerStatuses": - if r.TryDecodeAsNil() { - x.ContainerStatuses = nil - } else { - yyv2293 := &x.ContainerStatuses - yym2294 := z.DecBinary() - _ = yym2294 - if false { - } else { - h.decSliceContainerStatus((*[]ContainerStatus)(yyv2293), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys2283) - } // end switch yys2283 - } // end for yyj2283 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2295 int - var yyb2295 bool - var yyhl2295 bool = l >= 0 - yyj2295++ - if yyhl2295 { - yyb2295 = yyj2295 > l - } else { - yyb2295 = r.CheckBreak() - } - if yyb2295 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = PodPhase(r.DecodeString()) - } - yyj2295++ - if yyhl2295 { - yyb2295 = yyj2295 > l - } else { - yyb2295 = r.CheckBreak() - } - if yyb2295 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv2297 := &x.Conditions - yym2298 := z.DecBinary() - _ = yym2298 - if false { - } else { - h.decSlicePodCondition((*[]PodCondition)(yyv2297), d) - } - } - yyj2295++ - if yyhl2295 { - yyb2295 = yyj2295 > l - } else { - yyb2295 = r.CheckBreak() - } - if yyb2295 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - yyj2295++ - if yyhl2295 { - yyb2295 = yyj2295 > l - } else { - yyb2295 = r.CheckBreak() - } - if yyb2295 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj2295++ - if yyhl2295 { - yyb2295 = yyj2295 > l - } else { - yyb2295 = r.CheckBreak() - } - if yyb2295 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostIP = "" - } else { - x.HostIP = string(r.DecodeString()) - } - yyj2295++ - if yyhl2295 { - yyb2295 = yyj2295 > l - } else { - yyb2295 = r.CheckBreak() - } - if yyb2295 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodIP = "" - } else { - x.PodIP = string(r.DecodeString()) - } - yyj2295++ - if yyhl2295 { - yyb2295 = yyj2295 > l - } else { - yyb2295 = r.CheckBreak() - } - if yyb2295 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg2_unversioned.Time) - } - yym2304 := z.DecBinary() - _ = yym2304 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym2304 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym2304 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - yyj2295++ - if yyhl2295 { - yyb2295 = yyj2295 > l - } else { - yyb2295 = r.CheckBreak() - } - if yyb2295 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerStatuses = nil - } else { - yyv2305 := &x.ContainerStatuses - yym2306 := z.DecBinary() - _ = yym2306 - if false { - } else { - h.decSliceContainerStatus((*[]ContainerStatus)(yyv2305), d) - } - } - for { - yyj2295++ - if yyhl2295 { - yyb2295 = yyj2295 > l - } else { - yyb2295 = r.CheckBreak() - } - if yyb2295 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2295-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodStatusResult) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2307 := z.EncBinary() - _ = yym2307 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2308 := !z.EncBinary() - yy2arr2308 := z.EncBasicHandle().StructToArray - var yyq2308 [4]bool - _, _, _ = yysep2308, yyq2308, yy2arr2308 - const yyr2308 bool = false - yyq2308[0] = x.Kind != "" - yyq2308[1] = x.APIVersion != "" - yyq2308[2] = true - yyq2308[3] = true - var yynn2308 int - if yyr2308 || yy2arr2308 { - r.EncodeArrayStart(4) - } else { - yynn2308 = 0 - for _, b := range yyq2308 { - if b { - yynn2308++ - } - } - r.EncodeMapStart(yynn2308) - yynn2308 = 0 - } - if yyr2308 || yy2arr2308 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2308[0] { - yym2310 := z.EncBinary() - _ = yym2310 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2308[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2311 := z.EncBinary() - _ = yym2311 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2308 || yy2arr2308 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2308[1] { - yym2313 := z.EncBinary() - _ = yym2313 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2308[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2314 := z.EncBinary() - _ = yym2314 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2308 || yy2arr2308 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2308[2] { - yy2316 := &x.ObjectMeta - yy2316.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2308[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2317 := &x.ObjectMeta - yy2317.CodecEncodeSelf(e) - } - } - if yyr2308 || yy2arr2308 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2308[3] { - yy2319 := &x.Status - yy2319.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2308[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2320 := &x.Status - yy2320.CodecEncodeSelf(e) - } - } - if yyr2308 || yy2arr2308 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodStatusResult) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2321 := z.DecBinary() - _ = yym2321 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2322 := r.ContainerType() - if yyct2322 == codecSelferValueTypeMap1234 { - yyl2322 := r.ReadMapStart() - if yyl2322 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2322, d) - } - } else if yyct2322 == codecSelferValueTypeArray1234 { - yyl2322 := r.ReadArrayStart() - if yyl2322 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2322, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodStatusResult) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2323Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2323Slc - var yyhl2323 bool = l >= 0 - for yyj2323 := 0; ; yyj2323++ { - if yyhl2323 { - if yyj2323 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2323Slc = r.DecodeBytes(yys2323Slc, true, true) - yys2323 := string(yys2323Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2323 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv2326 := &x.ObjectMeta - yyv2326.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PodStatus{} - } else { - yyv2327 := &x.Status - yyv2327.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys2323) - } // end switch yys2323 - } // end for yyj2323 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodStatusResult) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2328 int - var yyb2328 bool - var yyhl2328 bool = l >= 0 - yyj2328++ - if yyhl2328 { - yyb2328 = yyj2328 > l - } else { - yyb2328 = r.CheckBreak() - } - if yyb2328 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj2328++ - if yyhl2328 { - yyb2328 = yyj2328 > l - } else { - yyb2328 = r.CheckBreak() - } - if yyb2328 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj2328++ - if yyhl2328 { - yyb2328 = yyj2328 > l - } else { - yyb2328 = r.CheckBreak() - } - if yyb2328 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv2331 := &x.ObjectMeta - yyv2331.CodecDecodeSelf(d) - } - yyj2328++ - if yyhl2328 { - yyb2328 = yyj2328 > l - } else { - yyb2328 = r.CheckBreak() - } - if yyb2328 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PodStatus{} - } else { - yyv2332 := &x.Status - yyv2332.CodecDecodeSelf(d) - } - for { - yyj2328++ - if yyhl2328 { - yyb2328 = yyj2328 > l - } else { - yyb2328 = r.CheckBreak() - } - if yyb2328 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2328-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Pod) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2333 := z.EncBinary() - _ = yym2333 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2334 := !z.EncBinary() - yy2arr2334 := z.EncBasicHandle().StructToArray - var yyq2334 [5]bool - _, _, _ = yysep2334, yyq2334, yy2arr2334 - const yyr2334 bool = false - yyq2334[0] = x.Kind != "" - yyq2334[1] = x.APIVersion != "" - yyq2334[2] = true - yyq2334[3] = true - yyq2334[4] = true - var yynn2334 int - if yyr2334 || yy2arr2334 { - r.EncodeArrayStart(5) - } else { - yynn2334 = 0 - for _, b := range yyq2334 { - if b { - yynn2334++ - } - } - r.EncodeMapStart(yynn2334) - yynn2334 = 0 - } - if yyr2334 || yy2arr2334 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2334[0] { - yym2336 := z.EncBinary() - _ = yym2336 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2334[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2337 := z.EncBinary() - _ = yym2337 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2334 || yy2arr2334 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2334[1] { - yym2339 := z.EncBinary() - _ = yym2339 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2334[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2340 := z.EncBinary() - _ = yym2340 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2334 || yy2arr2334 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2334[2] { - yy2342 := &x.ObjectMeta - yy2342.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2334[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2343 := &x.ObjectMeta - yy2343.CodecEncodeSelf(e) - } - } - if yyr2334 || yy2arr2334 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2334[3] { - yy2345 := &x.Spec - yy2345.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2334[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2346 := &x.Spec - yy2346.CodecEncodeSelf(e) - } - } - if yyr2334 || yy2arr2334 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2334[4] { - yy2348 := &x.Status - yy2348.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2334[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2349 := &x.Status - yy2349.CodecEncodeSelf(e) - } - } - if yyr2334 || yy2arr2334 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Pod) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2350 := z.DecBinary() - _ = yym2350 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2351 := r.ContainerType() - if yyct2351 == codecSelferValueTypeMap1234 { - yyl2351 := r.ReadMapStart() - if yyl2351 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2351, d) - } - } else if yyct2351 == codecSelferValueTypeArray1234 { - yyl2351 := r.ReadArrayStart() - if yyl2351 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2351, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Pod) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2352Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2352Slc - var yyhl2352 bool = l >= 0 - for yyj2352 := 0; ; yyj2352++ { - if yyhl2352 { - if yyj2352 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2352Slc = r.DecodeBytes(yys2352Slc, true, true) - yys2352 := string(yys2352Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2352 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv2355 := &x.ObjectMeta - yyv2355.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PodSpec{} - } else { - yyv2356 := &x.Spec - yyv2356.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PodStatus{} - } else { - yyv2357 := &x.Status - yyv2357.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys2352) - } // end switch yys2352 - } // end for yyj2352 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Pod) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2358 int - var yyb2358 bool - var yyhl2358 bool = l >= 0 - yyj2358++ - if yyhl2358 { - yyb2358 = yyj2358 > l - } else { - yyb2358 = r.CheckBreak() - } - if yyb2358 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj2358++ - if yyhl2358 { - yyb2358 = yyj2358 > l - } else { - yyb2358 = r.CheckBreak() - } - if yyb2358 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj2358++ - if yyhl2358 { - yyb2358 = yyj2358 > l - } else { - yyb2358 = r.CheckBreak() - } - if yyb2358 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv2361 := &x.ObjectMeta - yyv2361.CodecDecodeSelf(d) - } - yyj2358++ - if yyhl2358 { - yyb2358 = yyj2358 > l - } else { - yyb2358 = r.CheckBreak() - } - if yyb2358 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PodSpec{} - } else { - yyv2362 := &x.Spec - yyv2362.CodecDecodeSelf(d) - } - yyj2358++ - if yyhl2358 { - yyb2358 = yyj2358 > l - } else { - yyb2358 = r.CheckBreak() - } - if yyb2358 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PodStatus{} - } else { - yyv2363 := &x.Status - yyv2363.CodecDecodeSelf(d) - } - for { - yyj2358++ - if yyhl2358 { - yyb2358 = yyj2358 > l - } else { - yyb2358 = r.CheckBreak() - } - if yyb2358 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2358-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2364 := z.EncBinary() - _ = yym2364 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2365 := !z.EncBinary() - yy2arr2365 := z.EncBasicHandle().StructToArray - var yyq2365 [2]bool - _, _, _ = yysep2365, yyq2365, yy2arr2365 - const yyr2365 bool = false - yyq2365[0] = true - yyq2365[1] = true - var yynn2365 int - if yyr2365 || yy2arr2365 { - r.EncodeArrayStart(2) - } else { - yynn2365 = 0 - for _, b := range yyq2365 { - if b { - yynn2365++ - } - } - r.EncodeMapStart(yynn2365) - yynn2365 = 0 - } - if yyr2365 || yy2arr2365 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2365[0] { - yy2367 := &x.ObjectMeta - yy2367.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2365[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2368 := &x.ObjectMeta - yy2368.CodecEncodeSelf(e) - } - } - if yyr2365 || yy2arr2365 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2365[1] { - yy2370 := &x.Spec - yy2370.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2365[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2371 := &x.Spec - yy2371.CodecEncodeSelf(e) - } - } - if yyr2365 || yy2arr2365 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2372 := z.DecBinary() - _ = yym2372 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2373 := r.ContainerType() - if yyct2373 == codecSelferValueTypeMap1234 { - yyl2373 := r.ReadMapStart() - if yyl2373 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2373, d) - } - } else if yyct2373 == codecSelferValueTypeArray1234 { - yyl2373 := r.ReadArrayStart() - if yyl2373 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2373, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2374Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2374Slc - var yyhl2374 bool = l >= 0 - for yyj2374 := 0; ; yyj2374++ { - if yyhl2374 { - if yyj2374 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2374Slc = r.DecodeBytes(yys2374Slc, true, true) - yys2374 := string(yys2374Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2374 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv2375 := &x.ObjectMeta - yyv2375.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PodSpec{} - } else { - yyv2376 := &x.Spec - yyv2376.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys2374) - } // end switch yys2374 - } // end for yyj2374 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2377 int - var yyb2377 bool - var yyhl2377 bool = l >= 0 - yyj2377++ - if yyhl2377 { - yyb2377 = yyj2377 > l - } else { - yyb2377 = r.CheckBreak() - } - if yyb2377 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv2378 := &x.ObjectMeta - yyv2378.CodecDecodeSelf(d) - } - yyj2377++ - if yyhl2377 { - yyb2377 = yyj2377 > l - } else { - yyb2377 = r.CheckBreak() - } - if yyb2377 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PodSpec{} - } else { - yyv2379 := &x.Spec - yyv2379.CodecDecodeSelf(d) - } - for { - yyj2377++ - if yyhl2377 { - yyb2377 = yyj2377 > l - } else { - yyb2377 = r.CheckBreak() - } - if yyb2377 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2377-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodTemplate) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2380 := z.EncBinary() - _ = yym2380 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2381 := !z.EncBinary() - yy2arr2381 := z.EncBasicHandle().StructToArray - var yyq2381 [4]bool - _, _, _ = yysep2381, yyq2381, yy2arr2381 - const yyr2381 bool = false - yyq2381[0] = x.Kind != "" - yyq2381[1] = x.APIVersion != "" - yyq2381[2] = true - yyq2381[3] = true - var yynn2381 int - if yyr2381 || yy2arr2381 { - r.EncodeArrayStart(4) - } else { - yynn2381 = 0 - for _, b := range yyq2381 { - if b { - yynn2381++ - } - } - r.EncodeMapStart(yynn2381) - yynn2381 = 0 - } - if yyr2381 || yy2arr2381 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2381[0] { - yym2383 := z.EncBinary() - _ = yym2383 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2381[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2384 := z.EncBinary() - _ = yym2384 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2381 || yy2arr2381 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2381[1] { - yym2386 := z.EncBinary() - _ = yym2386 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2381[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2387 := z.EncBinary() - _ = yym2387 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2381 || yy2arr2381 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2381[2] { - yy2389 := &x.ObjectMeta - yy2389.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2381[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2390 := &x.ObjectMeta - yy2390.CodecEncodeSelf(e) - } - } - if yyr2381 || yy2arr2381 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2381[3] { - yy2392 := &x.Template - yy2392.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2381[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2393 := &x.Template - yy2393.CodecEncodeSelf(e) - } - } - if yyr2381 || yy2arr2381 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodTemplate) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2394 := z.DecBinary() - _ = yym2394 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2395 := r.ContainerType() - if yyct2395 == codecSelferValueTypeMap1234 { - yyl2395 := r.ReadMapStart() - if yyl2395 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2395, d) - } - } else if yyct2395 == codecSelferValueTypeArray1234 { - yyl2395 := r.ReadArrayStart() - if yyl2395 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2395, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2396Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2396Slc - var yyhl2396 bool = l >= 0 - for yyj2396 := 0; ; yyj2396++ { - if yyhl2396 { - if yyj2396 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2396Slc = r.DecodeBytes(yys2396Slc, true, true) - yys2396 := string(yys2396Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2396 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv2399 := &x.ObjectMeta - yyv2399.CodecDecodeSelf(d) - } - case "template": - if r.TryDecodeAsNil() { - x.Template = PodTemplateSpec{} - } else { - yyv2400 := &x.Template - yyv2400.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys2396) - } // end switch yys2396 - } // end for yyj2396 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2401 int - var yyb2401 bool - var yyhl2401 bool = l >= 0 - yyj2401++ - if yyhl2401 { - yyb2401 = yyj2401 > l - } else { - yyb2401 = r.CheckBreak() - } - if yyb2401 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj2401++ - if yyhl2401 { - yyb2401 = yyj2401 > l - } else { - yyb2401 = r.CheckBreak() - } - if yyb2401 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj2401++ - if yyhl2401 { - yyb2401 = yyj2401 > l - } else { - yyb2401 = r.CheckBreak() - } - if yyb2401 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv2404 := &x.ObjectMeta - yyv2404.CodecDecodeSelf(d) - } - yyj2401++ - if yyhl2401 { - yyb2401 = yyj2401 > l - } else { - yyb2401 = r.CheckBreak() - } - if yyb2401 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = PodTemplateSpec{} - } else { - yyv2405 := &x.Template - yyv2405.CodecDecodeSelf(d) - } - for { - yyj2401++ - if yyhl2401 { - yyb2401 = yyj2401 > l - } else { - yyb2401 = r.CheckBreak() - } - if yyb2401 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2401-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodTemplateList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2406 := z.EncBinary() - _ = yym2406 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2407 := !z.EncBinary() - yy2arr2407 := z.EncBasicHandle().StructToArray - var yyq2407 [4]bool - _, _, _ = yysep2407, yyq2407, yy2arr2407 - const yyr2407 bool = false - yyq2407[0] = x.Kind != "" - yyq2407[1] = x.APIVersion != "" - yyq2407[2] = true - var yynn2407 int - if yyr2407 || yy2arr2407 { - r.EncodeArrayStart(4) - } else { - yynn2407 = 1 - for _, b := range yyq2407 { - if b { - yynn2407++ - } - } - r.EncodeMapStart(yynn2407) - yynn2407 = 0 - } - if yyr2407 || yy2arr2407 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2407[0] { - yym2409 := z.EncBinary() - _ = yym2409 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2407[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2410 := z.EncBinary() - _ = yym2410 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2407 || yy2arr2407 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2407[1] { - yym2412 := z.EncBinary() - _ = yym2412 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2407[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2413 := z.EncBinary() - _ = yym2413 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2407 || yy2arr2407 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2407[2] { - yy2415 := &x.ListMeta - yym2416 := z.EncBinary() - _ = yym2416 - if false { - } else if z.HasExtensions() && z.EncExt(yy2415) { - } else { - z.EncFallback(yy2415) - } - } else { - r.EncodeNil() - } - } else { - if yyq2407[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2417 := &x.ListMeta - yym2418 := z.EncBinary() - _ = yym2418 - if false { - } else if z.HasExtensions() && z.EncExt(yy2417) { - } else { - z.EncFallback(yy2417) - } - } - } - if yyr2407 || yy2arr2407 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym2420 := z.EncBinary() - _ = yym2420 - if false { - } else { - h.encSlicePodTemplate(([]PodTemplate)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym2421 := z.EncBinary() - _ = yym2421 - if false { - } else { - h.encSlicePodTemplate(([]PodTemplate)(x.Items), e) - } - } - } - if yyr2407 || yy2arr2407 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodTemplateList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2422 := z.DecBinary() - _ = yym2422 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2423 := r.ContainerType() - if yyct2423 == codecSelferValueTypeMap1234 { - yyl2423 := r.ReadMapStart() - if yyl2423 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2423, d) - } - } else if yyct2423 == codecSelferValueTypeArray1234 { - yyl2423 := r.ReadArrayStart() - if yyl2423 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2423, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodTemplateList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2424Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2424Slc - var yyhl2424 bool = l >= 0 - for yyj2424 := 0; ; yyj2424++ { - if yyhl2424 { - if yyj2424 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2424Slc = r.DecodeBytes(yys2424Slc, true, true) - yys2424 := string(yys2424Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2424 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv2427 := &x.ListMeta - yym2428 := z.DecBinary() - _ = yym2428 - if false { - } else if z.HasExtensions() && z.DecExt(yyv2427) { - } else { - z.DecFallback(yyv2427, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv2429 := &x.Items - yym2430 := z.DecBinary() - _ = yym2430 - if false { - } else { - h.decSlicePodTemplate((*[]PodTemplate)(yyv2429), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys2424) - } // end switch yys2424 - } // end for yyj2424 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodTemplateList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2431 int - var yyb2431 bool - var yyhl2431 bool = l >= 0 - yyj2431++ - if yyhl2431 { - yyb2431 = yyj2431 > l - } else { - yyb2431 = r.CheckBreak() - } - if yyb2431 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj2431++ - if yyhl2431 { - yyb2431 = yyj2431 > l - } else { - yyb2431 = r.CheckBreak() - } - if yyb2431 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj2431++ - if yyhl2431 { - yyb2431 = yyj2431 > l - } else { - yyb2431 = r.CheckBreak() - } - if yyb2431 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv2434 := &x.ListMeta - yym2435 := z.DecBinary() - _ = yym2435 - if false { - } else if z.HasExtensions() && z.DecExt(yyv2434) { - } else { - z.DecFallback(yyv2434, false) - } - } - yyj2431++ - if yyhl2431 { - yyb2431 = yyj2431 > l - } else { - yyb2431 = r.CheckBreak() - } - if yyb2431 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv2436 := &x.Items - yym2437 := z.DecBinary() - _ = yym2437 - if false { - } else { - h.decSlicePodTemplate((*[]PodTemplate)(yyv2436), d) - } - } - for { - yyj2431++ - if yyhl2431 { - yyb2431 = yyj2431 > l - } else { - yyb2431 = r.CheckBreak() - } - if yyb2431 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2431-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationControllerSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2438 := z.EncBinary() - _ = yym2438 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2439 := !z.EncBinary() - yy2arr2439 := z.EncBasicHandle().StructToArray - var yyq2439 [4]bool - _, _, _ = yysep2439, yyq2439, yy2arr2439 - const yyr2439 bool = false - yyq2439[1] = x.MinReadySeconds != 0 - yyq2439[3] = x.Template != nil - var yynn2439 int - if yyr2439 || yy2arr2439 { - r.EncodeArrayStart(4) - } else { - yynn2439 = 2 - for _, b := range yyq2439 { - if b { - yynn2439++ - } - } - r.EncodeMapStart(yynn2439) - yynn2439 = 0 - } - if yyr2439 || yy2arr2439 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2441 := z.EncBinary() - _ = yym2441 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2442 := z.EncBinary() - _ = yym2442 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2439 || yy2arr2439 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2439[1] { - yym2444 := z.EncBinary() - _ = yym2444 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2439[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2445 := z.EncBinary() - _ = yym2445 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } - } - if yyr2439 || yy2arr2439 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym2447 := z.EncBinary() - _ = yym2447 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym2448 := z.EncBinary() - _ = yym2448 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } - if yyr2439 || yy2arr2439 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2439[3] { - if x.Template == nil { - r.EncodeNil() - } else { - x.Template.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2439[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Template == nil { - r.EncodeNil() - } else { - x.Template.CodecEncodeSelf(e) - } - } - } - if yyr2439 || yy2arr2439 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationControllerSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2450 := z.DecBinary() - _ = yym2450 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2451 := r.ContainerType() - if yyct2451 == codecSelferValueTypeMap1234 { - yyl2451 := r.ReadMapStart() - if yyl2451 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2451, d) - } - } else if yyct2451 == codecSelferValueTypeArray1234 { - yyl2451 := r.ReadArrayStart() - if yyl2451 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2451, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationControllerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2452Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2452Slc - var yyhl2452 bool = l >= 0 - for yyj2452 := 0; ; yyj2452++ { - if yyhl2452 { - if yyj2452 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2452Slc = r.DecodeBytes(yys2452Slc, true, true) - yys2452 := string(yys2452Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2452 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "minReadySeconds": - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - x.MinReadySeconds = int32(r.DecodeInt(32)) - } - case "selector": - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv2455 := &x.Selector - yym2456 := z.DecBinary() - _ = yym2456 - if false { - } else { - z.F.DecMapStringStringX(yyv2455, false, d) - } - } - case "template": - if r.TryDecodeAsNil() { - if x.Template != nil { - x.Template = nil - } - } else { - if x.Template == nil { - x.Template = new(PodTemplateSpec) - } - x.Template.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys2452) - } // end switch yys2452 - } // end for yyj2452 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationControllerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2458 int - var yyb2458 bool - var yyhl2458 bool = l >= 0 - yyj2458++ - if yyhl2458 { - yyb2458 = yyj2458 > l - } else { - yyb2458 = r.CheckBreak() - } - if yyb2458 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj2458++ - if yyhl2458 { - yyb2458 = yyj2458 > l - } else { - yyb2458 = r.CheckBreak() - } - if yyb2458 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - x.MinReadySeconds = int32(r.DecodeInt(32)) - } - yyj2458++ - if yyhl2458 { - yyb2458 = yyj2458 > l - } else { - yyb2458 = r.CheckBreak() - } - if yyb2458 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv2461 := &x.Selector - yym2462 := z.DecBinary() - _ = yym2462 - if false { - } else { - z.F.DecMapStringStringX(yyv2461, false, d) - } - } - yyj2458++ - if yyhl2458 { - yyb2458 = yyj2458 > l - } else { - yyb2458 = r.CheckBreak() - } - if yyb2458 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Template != nil { - x.Template = nil - } - } else { - if x.Template == nil { - x.Template = new(PodTemplateSpec) - } - x.Template.CodecDecodeSelf(d) - } - for { - yyj2458++ - if yyhl2458 { - yyb2458 = yyj2458 > l - } else { - yyb2458 = r.CheckBreak() - } - if yyb2458 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2458-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2464 := z.EncBinary() - _ = yym2464 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2465 := !z.EncBinary() - yy2arr2465 := z.EncBasicHandle().StructToArray - var yyq2465 [6]bool - _, _, _ = yysep2465, yyq2465, yy2arr2465 - const yyr2465 bool = false - yyq2465[1] = x.FullyLabeledReplicas != 0 - yyq2465[2] = x.ReadyReplicas != 0 - yyq2465[3] = x.AvailableReplicas != 0 - yyq2465[4] = x.ObservedGeneration != 0 - yyq2465[5] = len(x.Conditions) != 0 - var yynn2465 int - if yyr2465 || yy2arr2465 { - r.EncodeArrayStart(6) - } else { - yynn2465 = 1 - for _, b := range yyq2465 { - if b { - yynn2465++ - } - } - r.EncodeMapStart(yynn2465) - yynn2465 = 0 - } - if yyr2465 || yy2arr2465 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2467 := z.EncBinary() - _ = yym2467 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2468 := z.EncBinary() - _ = yym2468 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2465 || yy2arr2465 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2465[1] { - yym2470 := z.EncBinary() - _ = yym2470 - if false { - } else { - r.EncodeInt(int64(x.FullyLabeledReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2465[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2471 := z.EncBinary() - _ = yym2471 - if false { - } else { - r.EncodeInt(int64(x.FullyLabeledReplicas)) - } - } - } - if yyr2465 || yy2arr2465 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2465[2] { - yym2473 := z.EncBinary() - _ = yym2473 - if false { - } else { - r.EncodeInt(int64(x.ReadyReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2465[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readyReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2474 := z.EncBinary() - _ = yym2474 - if false { - } else { - r.EncodeInt(int64(x.ReadyReplicas)) - } - } - } - if yyr2465 || yy2arr2465 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2465[3] { - yym2476 := z.EncBinary() - _ = yym2476 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2465[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2477 := z.EncBinary() - _ = yym2477 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } - } - if yyr2465 || yy2arr2465 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2465[4] { - yym2479 := z.EncBinary() - _ = yym2479 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2465[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2480 := z.EncBinary() - _ = yym2480 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } - } - if yyr2465 || yy2arr2465 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2465[5] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym2482 := z.EncBinary() - _ = yym2482 - if false { - } else { - h.encSliceReplicationControllerCondition(([]ReplicationControllerCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2465[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym2483 := z.EncBinary() - _ = yym2483 - if false { - } else { - h.encSliceReplicationControllerCondition(([]ReplicationControllerCondition)(x.Conditions), e) - } - } - } - } - if yyr2465 || yy2arr2465 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationControllerStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2484 := z.DecBinary() - _ = yym2484 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2485 := r.ContainerType() - if yyct2485 == codecSelferValueTypeMap1234 { - yyl2485 := r.ReadMapStart() - if yyl2485 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2485, d) - } - } else if yyct2485 == codecSelferValueTypeArray1234 { - yyl2485 := r.ReadArrayStart() - if yyl2485 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2485, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationControllerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2486Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2486Slc - var yyhl2486 bool = l >= 0 - for yyj2486 := 0; ; yyj2486++ { - if yyhl2486 { - if yyj2486 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2486Slc = r.DecodeBytes(yys2486Slc, true, true) - yys2486 := string(yys2486Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2486 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "fullyLabeledReplicas": - if r.TryDecodeAsNil() { - x.FullyLabeledReplicas = 0 - } else { - x.FullyLabeledReplicas = int32(r.DecodeInt(32)) - } - case "readyReplicas": - if r.TryDecodeAsNil() { - x.ReadyReplicas = 0 - } else { - x.ReadyReplicas = int32(r.DecodeInt(32)) - } - case "availableReplicas": - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - x.AvailableReplicas = int32(r.DecodeInt(32)) - } - case "observedGeneration": - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv2492 := &x.Conditions - yym2493 := z.DecBinary() - _ = yym2493 - if false { - } else { - h.decSliceReplicationControllerCondition((*[]ReplicationControllerCondition)(yyv2492), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys2486) - } // end switch yys2486 - } // end for yyj2486 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2494 int - var yyb2494 bool - var yyhl2494 bool = l >= 0 - yyj2494++ - if yyhl2494 { - yyb2494 = yyj2494 > l - } else { - yyb2494 = r.CheckBreak() - } - if yyb2494 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj2494++ - if yyhl2494 { - yyb2494 = yyj2494 > l - } else { - yyb2494 = r.CheckBreak() - } - if yyb2494 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FullyLabeledReplicas = 0 - } else { - x.FullyLabeledReplicas = int32(r.DecodeInt(32)) - } - yyj2494++ - if yyhl2494 { - yyb2494 = yyj2494 > l - } else { - yyb2494 = r.CheckBreak() - } - if yyb2494 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadyReplicas = 0 - } else { - x.ReadyReplicas = int32(r.DecodeInt(32)) - } - yyj2494++ - if yyhl2494 { - yyb2494 = yyj2494 > l - } else { - yyb2494 = r.CheckBreak() - } - if yyb2494 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - x.AvailableReplicas = int32(r.DecodeInt(32)) - } - yyj2494++ - if yyhl2494 { - yyb2494 = yyj2494 > l - } else { - yyb2494 = r.CheckBreak() - } - if yyb2494 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - yyj2494++ - if yyhl2494 { - yyb2494 = yyj2494 > l - } else { - yyb2494 = r.CheckBreak() - } - if yyb2494 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv2500 := &x.Conditions - yym2501 := z.DecBinary() - _ = yym2501 - if false { - } else { - h.decSliceReplicationControllerCondition((*[]ReplicationControllerCondition)(yyv2500), d) - } - } - for { - yyj2494++ - if yyhl2494 { - yyb2494 = yyj2494 > l - } else { - yyb2494 = r.CheckBreak() - } - if yyb2494 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2494-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ReplicationControllerConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym2502 := z.EncBinary() - _ = yym2502 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ReplicationControllerConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2503 := z.DecBinary() - _ = yym2503 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ReplicationControllerCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2504 := z.EncBinary() - _ = yym2504 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2505 := !z.EncBinary() - yy2arr2505 := z.EncBasicHandle().StructToArray - var yyq2505 [5]bool - _, _, _ = yysep2505, yyq2505, yy2arr2505 - const yyr2505 bool = false - yyq2505[2] = true - yyq2505[3] = x.Reason != "" - yyq2505[4] = x.Message != "" - var yynn2505 int - if yyr2505 || yy2arr2505 { - r.EncodeArrayStart(5) - } else { - yynn2505 = 2 - for _, b := range yyq2505 { - if b { - yynn2505++ - } - } - r.EncodeMapStart(yynn2505) - yynn2505 = 0 - } - if yyr2505 || yy2arr2505 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2505 || yy2arr2505 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Status.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Status.CodecEncodeSelf(e) - } - if yyr2505 || yy2arr2505 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2505[2] { - yy2509 := &x.LastTransitionTime - yym2510 := z.EncBinary() - _ = yym2510 - if false { - } else if z.HasExtensions() && z.EncExt(yy2509) { - } else if yym2510 { - z.EncBinaryMarshal(yy2509) - } else if !yym2510 && z.IsJSONHandle() { - z.EncJSONMarshal(yy2509) - } else { - z.EncFallback(yy2509) - } - } else { - r.EncodeNil() - } - } else { - if yyq2505[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2511 := &x.LastTransitionTime - yym2512 := z.EncBinary() - _ = yym2512 - if false { - } else if z.HasExtensions() && z.EncExt(yy2511) { - } else if yym2512 { - z.EncBinaryMarshal(yy2511) - } else if !yym2512 && z.IsJSONHandle() { - z.EncJSONMarshal(yy2511) - } else { - z.EncFallback(yy2511) - } - } - } - if yyr2505 || yy2arr2505 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2505[3] { - yym2514 := z.EncBinary() - _ = yym2514 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2505[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2515 := z.EncBinary() - _ = yym2515 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2505 || yy2arr2505 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2505[4] { - yym2517 := z.EncBinary() - _ = yym2517 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2505[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2518 := z.EncBinary() - _ = yym2518 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2505 || yy2arr2505 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationControllerCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2519 := z.DecBinary() - _ = yym2519 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2520 := r.ContainerType() - if yyct2520 == codecSelferValueTypeMap1234 { - yyl2520 := r.ReadMapStart() - if yyl2520 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2520, d) - } - } else if yyct2520 == codecSelferValueTypeArray1234 { - yyl2520 := r.ReadArrayStart() - if yyl2520 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2520, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationControllerCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2521Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2521Slc - var yyhl2521 bool = l >= 0 - for yyj2521 := 0; ; yyj2521++ { - if yyhl2521 { - if yyj2521 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2521Slc = r.DecodeBytes(yys2521Slc, true, true) - yys2521 := string(yys2521Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2521 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ReplicationControllerConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv2524 := &x.LastTransitionTime - yym2525 := z.DecBinary() - _ = yym2525 - if false { - } else if z.HasExtensions() && z.DecExt(yyv2524) { - } else if yym2525 { - z.DecBinaryUnmarshal(yyv2524) - } else if !yym2525 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv2524) - } else { - z.DecFallback(yyv2524, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys2521) - } // end switch yys2521 - } // end for yyj2521 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationControllerCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2528 int - var yyb2528 bool - var yyhl2528 bool = l >= 0 - yyj2528++ - if yyhl2528 { - yyb2528 = yyj2528 > l - } else { - yyb2528 = r.CheckBreak() - } - if yyb2528 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ReplicationControllerConditionType(r.DecodeString()) - } - yyj2528++ - if yyhl2528 { - yyb2528 = yyj2528 > l - } else { - yyb2528 = r.CheckBreak() - } - if yyb2528 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - yyj2528++ - if yyhl2528 { - yyb2528 = yyj2528 > l - } else { - yyb2528 = r.CheckBreak() - } - if yyb2528 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv2531 := &x.LastTransitionTime - yym2532 := z.DecBinary() - _ = yym2532 - if false { - } else if z.HasExtensions() && z.DecExt(yyv2531) { - } else if yym2532 { - z.DecBinaryUnmarshal(yyv2531) - } else if !yym2532 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv2531) - } else { - z.DecFallback(yyv2531, false) - } - } - yyj2528++ - if yyhl2528 { - yyb2528 = yyj2528 > l - } else { - yyb2528 = r.CheckBreak() - } - if yyb2528 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj2528++ - if yyhl2528 { - yyb2528 = yyj2528 > l - } else { - yyb2528 = r.CheckBreak() - } - if yyb2528 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj2528++ - if yyhl2528 { - yyb2528 = yyj2528 > l - } else { - yyb2528 = r.CheckBreak() - } - if yyb2528 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2528-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationController) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2535 := z.EncBinary() - _ = yym2535 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2536 := !z.EncBinary() - yy2arr2536 := z.EncBasicHandle().StructToArray - var yyq2536 [5]bool - _, _, _ = yysep2536, yyq2536, yy2arr2536 - const yyr2536 bool = false - yyq2536[0] = x.Kind != "" - yyq2536[1] = x.APIVersion != "" - yyq2536[2] = true - yyq2536[3] = true - yyq2536[4] = true - var yynn2536 int - if yyr2536 || yy2arr2536 { - r.EncodeArrayStart(5) - } else { - yynn2536 = 0 - for _, b := range yyq2536 { - if b { - yynn2536++ - } - } - r.EncodeMapStart(yynn2536) - yynn2536 = 0 - } - if yyr2536 || yy2arr2536 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2536[0] { - yym2538 := z.EncBinary() - _ = yym2538 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2536[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2539 := z.EncBinary() - _ = yym2539 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2536 || yy2arr2536 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2536[1] { - yym2541 := z.EncBinary() - _ = yym2541 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2536[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2542 := z.EncBinary() - _ = yym2542 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2536 || yy2arr2536 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2536[2] { - yy2544 := &x.ObjectMeta - yy2544.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2536[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2545 := &x.ObjectMeta - yy2545.CodecEncodeSelf(e) - } - } - if yyr2536 || yy2arr2536 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2536[3] { - yy2547 := &x.Spec - yy2547.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2536[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2548 := &x.Spec - yy2548.CodecEncodeSelf(e) - } - } - if yyr2536 || yy2arr2536 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2536[4] { - yy2550 := &x.Status - yy2550.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2536[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2551 := &x.Status - yy2551.CodecEncodeSelf(e) - } - } - if yyr2536 || yy2arr2536 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationController) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2552 := z.DecBinary() - _ = yym2552 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2553 := r.ContainerType() - if yyct2553 == codecSelferValueTypeMap1234 { - yyl2553 := r.ReadMapStart() - if yyl2553 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2553, d) - } - } else if yyct2553 == codecSelferValueTypeArray1234 { - yyl2553 := r.ReadArrayStart() - if yyl2553 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2553, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationController) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2554Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2554Slc - var yyhl2554 bool = l >= 0 - for yyj2554 := 0; ; yyj2554++ { - if yyhl2554 { - if yyj2554 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2554Slc = r.DecodeBytes(yys2554Slc, true, true) - yys2554 := string(yys2554Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2554 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv2557 := &x.ObjectMeta - yyv2557.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ReplicationControllerSpec{} - } else { - yyv2558 := &x.Spec - yyv2558.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ReplicationControllerStatus{} - } else { - yyv2559 := &x.Status - yyv2559.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys2554) - } // end switch yys2554 - } // end for yyj2554 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationController) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2560 int - var yyb2560 bool - var yyhl2560 bool = l >= 0 - yyj2560++ - if yyhl2560 { - yyb2560 = yyj2560 > l - } else { - yyb2560 = r.CheckBreak() - } - if yyb2560 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj2560++ - if yyhl2560 { - yyb2560 = yyj2560 > l - } else { - yyb2560 = r.CheckBreak() - } - if yyb2560 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj2560++ - if yyhl2560 { - yyb2560 = yyj2560 > l - } else { - yyb2560 = r.CheckBreak() - } - if yyb2560 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv2563 := &x.ObjectMeta - yyv2563.CodecDecodeSelf(d) - } - yyj2560++ - if yyhl2560 { - yyb2560 = yyj2560 > l - } else { - yyb2560 = r.CheckBreak() - } - if yyb2560 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ReplicationControllerSpec{} - } else { - yyv2564 := &x.Spec - yyv2564.CodecDecodeSelf(d) - } - yyj2560++ - if yyhl2560 { - yyb2560 = yyj2560 > l - } else { - yyb2560 = r.CheckBreak() - } - if yyb2560 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ReplicationControllerStatus{} - } else { - yyv2565 := &x.Status - yyv2565.CodecDecodeSelf(d) - } - for { - yyj2560++ - if yyhl2560 { - yyb2560 = yyj2560 > l - } else { - yyb2560 = r.CheckBreak() - } - if yyb2560 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2560-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationControllerList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2566 := z.EncBinary() - _ = yym2566 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2567 := !z.EncBinary() - yy2arr2567 := z.EncBasicHandle().StructToArray - var yyq2567 [4]bool - _, _, _ = yysep2567, yyq2567, yy2arr2567 - const yyr2567 bool = false - yyq2567[0] = x.Kind != "" - yyq2567[1] = x.APIVersion != "" - yyq2567[2] = true - var yynn2567 int - if yyr2567 || yy2arr2567 { - r.EncodeArrayStart(4) - } else { - yynn2567 = 1 - for _, b := range yyq2567 { - if b { - yynn2567++ - } - } - r.EncodeMapStart(yynn2567) - yynn2567 = 0 - } - if yyr2567 || yy2arr2567 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2567[0] { - yym2569 := z.EncBinary() - _ = yym2569 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2567[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2570 := z.EncBinary() - _ = yym2570 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2567 || yy2arr2567 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2567[1] { - yym2572 := z.EncBinary() - _ = yym2572 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2567[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2573 := z.EncBinary() - _ = yym2573 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2567 || yy2arr2567 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2567[2] { - yy2575 := &x.ListMeta - yym2576 := z.EncBinary() - _ = yym2576 - if false { - } else if z.HasExtensions() && z.EncExt(yy2575) { - } else { - z.EncFallback(yy2575) - } - } else { - r.EncodeNil() - } - } else { - if yyq2567[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2577 := &x.ListMeta - yym2578 := z.EncBinary() - _ = yym2578 - if false { - } else if z.HasExtensions() && z.EncExt(yy2577) { - } else { - z.EncFallback(yy2577) - } - } - } - if yyr2567 || yy2arr2567 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym2580 := z.EncBinary() - _ = yym2580 - if false { - } else { - h.encSliceReplicationController(([]ReplicationController)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym2581 := z.EncBinary() - _ = yym2581 - if false { - } else { - h.encSliceReplicationController(([]ReplicationController)(x.Items), e) - } - } - } - if yyr2567 || yy2arr2567 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationControllerList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2582 := z.DecBinary() - _ = yym2582 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2583 := r.ContainerType() - if yyct2583 == codecSelferValueTypeMap1234 { - yyl2583 := r.ReadMapStart() - if yyl2583 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2583, d) - } - } else if yyct2583 == codecSelferValueTypeArray1234 { - yyl2583 := r.ReadArrayStart() - if yyl2583 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2583, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationControllerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2584Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2584Slc - var yyhl2584 bool = l >= 0 - for yyj2584 := 0; ; yyj2584++ { - if yyhl2584 { - if yyj2584 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2584Slc = r.DecodeBytes(yys2584Slc, true, true) - yys2584 := string(yys2584Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2584 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv2587 := &x.ListMeta - yym2588 := z.DecBinary() - _ = yym2588 - if false { - } else if z.HasExtensions() && z.DecExt(yyv2587) { - } else { - z.DecFallback(yyv2587, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv2589 := &x.Items - yym2590 := z.DecBinary() - _ = yym2590 - if false { - } else { - h.decSliceReplicationController((*[]ReplicationController)(yyv2589), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys2584) - } // end switch yys2584 - } // end for yyj2584 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationControllerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2591 int - var yyb2591 bool - var yyhl2591 bool = l >= 0 - yyj2591++ - if yyhl2591 { - yyb2591 = yyj2591 > l - } else { - yyb2591 = r.CheckBreak() - } - if yyb2591 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj2591++ - if yyhl2591 { - yyb2591 = yyj2591 > l - } else { - yyb2591 = r.CheckBreak() - } - if yyb2591 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj2591++ - if yyhl2591 { - yyb2591 = yyj2591 > l - } else { - yyb2591 = r.CheckBreak() - } - if yyb2591 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv2594 := &x.ListMeta - yym2595 := z.DecBinary() - _ = yym2595 - if false { - } else if z.HasExtensions() && z.DecExt(yyv2594) { - } else { - z.DecFallback(yyv2594, false) - } - } - yyj2591++ - if yyhl2591 { - yyb2591 = yyj2591 > l - } else { - yyb2591 = r.CheckBreak() - } - if yyb2591 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv2596 := &x.Items - yym2597 := z.DecBinary() - _ = yym2597 - if false { - } else { - h.decSliceReplicationController((*[]ReplicationController)(yyv2596), d) - } - } - for { - yyj2591++ - if yyhl2591 { - yyb2591 = yyj2591 > l - } else { - yyb2591 = r.CheckBreak() - } - if yyb2591 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2591-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2598 := z.EncBinary() - _ = yym2598 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2599 := !z.EncBinary() - yy2arr2599 := z.EncBasicHandle().StructToArray - var yyq2599 [4]bool - _, _, _ = yysep2599, yyq2599, yy2arr2599 - const yyr2599 bool = false - yyq2599[0] = x.Kind != "" - yyq2599[1] = x.APIVersion != "" - yyq2599[2] = true - var yynn2599 int - if yyr2599 || yy2arr2599 { - r.EncodeArrayStart(4) - } else { - yynn2599 = 1 - for _, b := range yyq2599 { - if b { - yynn2599++ - } - } - r.EncodeMapStart(yynn2599) - yynn2599 = 0 - } - if yyr2599 || yy2arr2599 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2599[0] { - yym2601 := z.EncBinary() - _ = yym2601 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2599[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2602 := z.EncBinary() - _ = yym2602 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2599 || yy2arr2599 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2599[1] { - yym2604 := z.EncBinary() - _ = yym2604 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2599[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2605 := z.EncBinary() - _ = yym2605 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2599 || yy2arr2599 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2599[2] { - yy2607 := &x.ListMeta - yym2608 := z.EncBinary() - _ = yym2608 - if false { - } else if z.HasExtensions() && z.EncExt(yy2607) { - } else { - z.EncFallback(yy2607) - } - } else { - r.EncodeNil() - } - } else { - if yyq2599[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2609 := &x.ListMeta - yym2610 := z.EncBinary() - _ = yym2610 - if false { - } else if z.HasExtensions() && z.EncExt(yy2609) { - } else { - z.EncFallback(yy2609) - } - } - } - if yyr2599 || yy2arr2599 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym2612 := z.EncBinary() - _ = yym2612 - if false { - } else { - h.encSliceService(([]Service)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym2613 := z.EncBinary() - _ = yym2613 - if false { - } else { - h.encSliceService(([]Service)(x.Items), e) - } - } - } - if yyr2599 || yy2arr2599 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2614 := z.DecBinary() - _ = yym2614 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2615 := r.ContainerType() - if yyct2615 == codecSelferValueTypeMap1234 { - yyl2615 := r.ReadMapStart() - if yyl2615 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2615, d) - } - } else if yyct2615 == codecSelferValueTypeArray1234 { - yyl2615 := r.ReadArrayStart() - if yyl2615 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2615, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2616Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2616Slc - var yyhl2616 bool = l >= 0 - for yyj2616 := 0; ; yyj2616++ { - if yyhl2616 { - if yyj2616 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2616Slc = r.DecodeBytes(yys2616Slc, true, true) - yys2616 := string(yys2616Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2616 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv2619 := &x.ListMeta - yym2620 := z.DecBinary() - _ = yym2620 - if false { - } else if z.HasExtensions() && z.DecExt(yyv2619) { - } else { - z.DecFallback(yyv2619, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv2621 := &x.Items - yym2622 := z.DecBinary() - _ = yym2622 - if false { - } else { - h.decSliceService((*[]Service)(yyv2621), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys2616) - } // end switch yys2616 - } // end for yyj2616 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2623 int - var yyb2623 bool - var yyhl2623 bool = l >= 0 - yyj2623++ - if yyhl2623 { - yyb2623 = yyj2623 > l - } else { - yyb2623 = r.CheckBreak() - } - if yyb2623 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj2623++ - if yyhl2623 { - yyb2623 = yyj2623 > l - } else { - yyb2623 = r.CheckBreak() - } - if yyb2623 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj2623++ - if yyhl2623 { - yyb2623 = yyj2623 > l - } else { - yyb2623 = r.CheckBreak() - } - if yyb2623 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv2626 := &x.ListMeta - yym2627 := z.DecBinary() - _ = yym2627 - if false { - } else if z.HasExtensions() && z.DecExt(yyv2626) { - } else { - z.DecFallback(yyv2626, false) - } - } - yyj2623++ - if yyhl2623 { - yyb2623 = yyj2623 > l - } else { - yyb2623 = r.CheckBreak() - } - if yyb2623 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv2628 := &x.Items - yym2629 := z.DecBinary() - _ = yym2629 - if false { - } else { - h.decSliceService((*[]Service)(yyv2628), d) - } - } - for { - yyj2623++ - if yyhl2623 { - yyb2623 = yyj2623 > l - } else { - yyb2623 = r.CheckBreak() - } - if yyb2623 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2623-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ServiceAffinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym2630 := z.EncBinary() - _ = yym2630 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ServiceAffinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2631 := z.DecBinary() - _ = yym2631 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x ServiceType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym2632 := z.EncBinary() - _ = yym2632 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ServiceType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2633 := z.DecBinary() - _ = yym2633 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ServiceStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2634 := z.EncBinary() - _ = yym2634 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2635 := !z.EncBinary() - yy2arr2635 := z.EncBasicHandle().StructToArray - var yyq2635 [1]bool - _, _, _ = yysep2635, yyq2635, yy2arr2635 - const yyr2635 bool = false - yyq2635[0] = true - var yynn2635 int - if yyr2635 || yy2arr2635 { - r.EncodeArrayStart(1) - } else { - yynn2635 = 0 - for _, b := range yyq2635 { - if b { - yynn2635++ - } - } - r.EncodeMapStart(yynn2635) - yynn2635 = 0 - } - if yyr2635 || yy2arr2635 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2635[0] { - yy2637 := &x.LoadBalancer - yy2637.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2635[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("loadBalancer")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2638 := &x.LoadBalancer - yy2638.CodecEncodeSelf(e) - } - } - if yyr2635 || yy2arr2635 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2639 := z.DecBinary() - _ = yym2639 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2640 := r.ContainerType() - if yyct2640 == codecSelferValueTypeMap1234 { - yyl2640 := r.ReadMapStart() - if yyl2640 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2640, d) - } - } else if yyct2640 == codecSelferValueTypeArray1234 { - yyl2640 := r.ReadArrayStart() - if yyl2640 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2640, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2641Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2641Slc - var yyhl2641 bool = l >= 0 - for yyj2641 := 0; ; yyj2641++ { - if yyhl2641 { - if yyj2641 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2641Slc = r.DecodeBytes(yys2641Slc, true, true) - yys2641 := string(yys2641Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2641 { - case "loadBalancer": - if r.TryDecodeAsNil() { - x.LoadBalancer = LoadBalancerStatus{} - } else { - yyv2642 := &x.LoadBalancer - yyv2642.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys2641) - } // end switch yys2641 - } // end for yyj2641 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2643 int - var yyb2643 bool - var yyhl2643 bool = l >= 0 - yyj2643++ - if yyhl2643 { - yyb2643 = yyj2643 > l - } else { - yyb2643 = r.CheckBreak() - } - if yyb2643 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LoadBalancer = LoadBalancerStatus{} - } else { - yyv2644 := &x.LoadBalancer - yyv2644.CodecDecodeSelf(d) - } - for { - yyj2643++ - if yyhl2643 { - yyb2643 = yyj2643 > l - } else { - yyb2643 = r.CheckBreak() - } - if yyb2643 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2643-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LoadBalancerStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2645 := z.EncBinary() - _ = yym2645 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2646 := !z.EncBinary() - yy2arr2646 := z.EncBasicHandle().StructToArray - var yyq2646 [1]bool - _, _, _ = yysep2646, yyq2646, yy2arr2646 - const yyr2646 bool = false - yyq2646[0] = len(x.Ingress) != 0 - var yynn2646 int - if yyr2646 || yy2arr2646 { - r.EncodeArrayStart(1) - } else { - yynn2646 = 0 - for _, b := range yyq2646 { - if b { - yynn2646++ - } - } - r.EncodeMapStart(yynn2646) - yynn2646 = 0 - } - if yyr2646 || yy2arr2646 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2646[0] { - if x.Ingress == nil { - r.EncodeNil() - } else { - yym2648 := z.EncBinary() - _ = yym2648 - if false { - } else { - h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2646[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ingress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ingress == nil { - r.EncodeNil() - } else { - yym2649 := z.EncBinary() - _ = yym2649 - if false { - } else { - h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e) - } - } - } - } - if yyr2646 || yy2arr2646 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LoadBalancerStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2650 := z.DecBinary() - _ = yym2650 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2651 := r.ContainerType() - if yyct2651 == codecSelferValueTypeMap1234 { - yyl2651 := r.ReadMapStart() - if yyl2651 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2651, d) - } - } else if yyct2651 == codecSelferValueTypeArray1234 { - yyl2651 := r.ReadArrayStart() - if yyl2651 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2651, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LoadBalancerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2652Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2652Slc - var yyhl2652 bool = l >= 0 - for yyj2652 := 0; ; yyj2652++ { - if yyhl2652 { - if yyj2652 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2652Slc = r.DecodeBytes(yys2652Slc, true, true) - yys2652 := string(yys2652Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2652 { - case "ingress": - if r.TryDecodeAsNil() { - x.Ingress = nil - } else { - yyv2653 := &x.Ingress - yym2654 := z.DecBinary() - _ = yym2654 - if false { - } else { - h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv2653), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys2652) - } // end switch yys2652 - } // end for yyj2652 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LoadBalancerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2655 int - var yyb2655 bool - var yyhl2655 bool = l >= 0 - yyj2655++ - if yyhl2655 { - yyb2655 = yyj2655 > l - } else { - yyb2655 = r.CheckBreak() - } - if yyb2655 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ingress = nil - } else { - yyv2656 := &x.Ingress - yym2657 := z.DecBinary() - _ = yym2657 - if false { - } else { - h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv2656), d) - } - } - for { - yyj2655++ - if yyhl2655 { - yyb2655 = yyj2655 > l - } else { - yyb2655 = r.CheckBreak() - } - if yyb2655 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2655-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LoadBalancerIngress) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2658 := z.EncBinary() - _ = yym2658 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2659 := !z.EncBinary() - yy2arr2659 := z.EncBasicHandle().StructToArray - var yyq2659 [2]bool - _, _, _ = yysep2659, yyq2659, yy2arr2659 - const yyr2659 bool = false - yyq2659[0] = x.IP != "" - yyq2659[1] = x.Hostname != "" - var yynn2659 int - if yyr2659 || yy2arr2659 { - r.EncodeArrayStart(2) - } else { - yynn2659 = 0 - for _, b := range yyq2659 { - if b { - yynn2659++ - } - } - r.EncodeMapStart(yynn2659) - yynn2659 = 0 - } - if yyr2659 || yy2arr2659 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2659[0] { - yym2661 := z.EncBinary() - _ = yym2661 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2659[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ip")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2662 := z.EncBinary() - _ = yym2662 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IP)) - } - } - } - if yyr2659 || yy2arr2659 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2659[1] { - yym2664 := z.EncBinary() - _ = yym2664 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2659[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostname")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2665 := z.EncBinary() - _ = yym2665 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } - } - if yyr2659 || yy2arr2659 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LoadBalancerIngress) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2666 := z.DecBinary() - _ = yym2666 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2667 := r.ContainerType() - if yyct2667 == codecSelferValueTypeMap1234 { - yyl2667 := r.ReadMapStart() - if yyl2667 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2667, d) - } - } else if yyct2667 == codecSelferValueTypeArray1234 { - yyl2667 := r.ReadArrayStart() - if yyl2667 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2667, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LoadBalancerIngress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2668Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2668Slc - var yyhl2668 bool = l >= 0 - for yyj2668 := 0; ; yyj2668++ { - if yyhl2668 { - if yyj2668 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2668Slc = r.DecodeBytes(yys2668Slc, true, true) - yys2668 := string(yys2668Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2668 { - case "ip": - if r.TryDecodeAsNil() { - x.IP = "" - } else { - x.IP = string(r.DecodeString()) - } - case "hostname": - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - x.Hostname = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys2668) - } // end switch yys2668 - } // end for yyj2668 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LoadBalancerIngress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2671 int - var yyb2671 bool - var yyhl2671 bool = l >= 0 - yyj2671++ - if yyhl2671 { - yyb2671 = yyj2671 > l - } else { - yyb2671 = r.CheckBreak() - } - if yyb2671 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IP = "" - } else { - x.IP = string(r.DecodeString()) - } - yyj2671++ - if yyhl2671 { - yyb2671 = yyj2671 > l - } else { - yyb2671 = r.CheckBreak() - } - if yyb2671 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - x.Hostname = string(r.DecodeString()) - } - for { - yyj2671++ - if yyhl2671 { - yyb2671 = yyj2671 > l - } else { - yyb2671 = r.CheckBreak() - } - if yyb2671 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2671-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2674 := z.EncBinary() - _ = yym2674 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2675 := !z.EncBinary() - yy2arr2675 := z.EncBasicHandle().StructToArray - var yyq2675 [9]bool - _, _, _ = yysep2675, yyq2675, yy2arr2675 - const yyr2675 bool = false - yyq2675[0] = x.Type != "" - yyq2675[3] = x.ClusterIP != "" - yyq2675[5] = len(x.ExternalIPs) != 0 - yyq2675[6] = x.LoadBalancerIP != "" - yyq2675[7] = x.SessionAffinity != "" - yyq2675[8] = len(x.LoadBalancerSourceRanges) != 0 - var yynn2675 int - if yyr2675 || yy2arr2675 { - r.EncodeArrayStart(9) - } else { - yynn2675 = 3 - for _, b := range yyq2675 { - if b { - yynn2675++ - } - } - r.EncodeMapStart(yynn2675) - yynn2675 = 0 - } - if yyr2675 || yy2arr2675 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2675[0] { - x.Type.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2675[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - } - if yyr2675 || yy2arr2675 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym2678 := z.EncBinary() - _ = yym2678 - if false { - } else { - h.encSliceServicePort(([]ServicePort)(x.Ports), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ports")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym2679 := z.EncBinary() - _ = yym2679 - if false { - } else { - h.encSliceServicePort(([]ServicePort)(x.Ports), e) - } - } - } - if yyr2675 || yy2arr2675 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym2681 := z.EncBinary() - _ = yym2681 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym2682 := z.EncBinary() - _ = yym2682 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } - if yyr2675 || yy2arr2675 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2675[3] { - yym2684 := z.EncBinary() - _ = yym2684 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2675[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2685 := z.EncBinary() - _ = yym2685 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP)) - } - } - } - if yyr2675 || yy2arr2675 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2687 := z.EncBinary() - _ = yym2687 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ExternalName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ExternalName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2688 := z.EncBinary() - _ = yym2688 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ExternalName)) - } - } - if yyr2675 || yy2arr2675 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2675[5] { - if x.ExternalIPs == nil { - r.EncodeNil() - } else { - yym2690 := z.EncBinary() - _ = yym2690 - if false { - } else { - z.F.EncSliceStringV(x.ExternalIPs, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2675[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("externalIPs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ExternalIPs == nil { - r.EncodeNil() - } else { - yym2691 := z.EncBinary() - _ = yym2691 - if false { - } else { - z.F.EncSliceStringV(x.ExternalIPs, false, e) - } - } - } - } - if yyr2675 || yy2arr2675 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2675[6] { - yym2693 := z.EncBinary() - _ = yym2693 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2675[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("loadBalancerIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2694 := z.EncBinary() - _ = yym2694 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP)) - } - } - } - if yyr2675 || yy2arr2675 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2675[7] { - x.SessionAffinity.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2675[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("sessionAffinity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.SessionAffinity.CodecEncodeSelf(e) - } - } - if yyr2675 || yy2arr2675 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2675[8] { - if x.LoadBalancerSourceRanges == nil { - r.EncodeNil() - } else { - yym2697 := z.EncBinary() - _ = yym2697 - if false { - } else { - z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2675[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("loadBalancerSourceRanges")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LoadBalancerSourceRanges == nil { - r.EncodeNil() - } else { - yym2698 := z.EncBinary() - _ = yym2698 - if false { - } else { - z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) - } - } - } - } - if yyr2675 || yy2arr2675 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2699 := z.DecBinary() - _ = yym2699 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2700 := r.ContainerType() - if yyct2700 == codecSelferValueTypeMap1234 { - yyl2700 := r.ReadMapStart() - if yyl2700 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2700, d) - } - } else if yyct2700 == codecSelferValueTypeArray1234 { - yyl2700 := r.ReadArrayStart() - if yyl2700 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2700, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2701Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2701Slc - var yyhl2701 bool = l >= 0 - for yyj2701 := 0; ; yyj2701++ { - if yyhl2701 { - if yyj2701 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2701Slc = r.DecodeBytes(yys2701Slc, true, true) - yys2701 := string(yys2701Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2701 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ServiceType(r.DecodeString()) - } - case "ports": - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv2703 := &x.Ports - yym2704 := z.DecBinary() - _ = yym2704 - if false { - } else { - h.decSliceServicePort((*[]ServicePort)(yyv2703), d) - } - } - case "selector": - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv2705 := &x.Selector - yym2706 := z.DecBinary() - _ = yym2706 - if false { - } else { - z.F.DecMapStringStringX(yyv2705, false, d) - } - } - case "clusterIP": - if r.TryDecodeAsNil() { - x.ClusterIP = "" - } else { - x.ClusterIP = string(r.DecodeString()) - } - case "ExternalName": - if r.TryDecodeAsNil() { - x.ExternalName = "" - } else { - x.ExternalName = string(r.DecodeString()) - } - case "externalIPs": - if r.TryDecodeAsNil() { - x.ExternalIPs = nil - } else { - yyv2709 := &x.ExternalIPs - yym2710 := z.DecBinary() - _ = yym2710 - if false { - } else { - z.F.DecSliceStringX(yyv2709, false, d) - } - } - case "loadBalancerIP": - if r.TryDecodeAsNil() { - x.LoadBalancerIP = "" - } else { - x.LoadBalancerIP = string(r.DecodeString()) - } - case "sessionAffinity": - if r.TryDecodeAsNil() { - x.SessionAffinity = "" - } else { - x.SessionAffinity = ServiceAffinity(r.DecodeString()) - } - case "loadBalancerSourceRanges": - if r.TryDecodeAsNil() { - x.LoadBalancerSourceRanges = nil - } else { - yyv2713 := &x.LoadBalancerSourceRanges - yym2714 := z.DecBinary() - _ = yym2714 - if false { - } else { - z.F.DecSliceStringX(yyv2713, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys2701) - } // end switch yys2701 - } // end for yyj2701 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2715 int - var yyb2715 bool - var yyhl2715 bool = l >= 0 - yyj2715++ - if yyhl2715 { - yyb2715 = yyj2715 > l - } else { - yyb2715 = r.CheckBreak() - } - if yyb2715 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ServiceType(r.DecodeString()) - } - yyj2715++ - if yyhl2715 { - yyb2715 = yyj2715 > l - } else { - yyb2715 = r.CheckBreak() - } - if yyb2715 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv2717 := &x.Ports - yym2718 := z.DecBinary() - _ = yym2718 - if false { - } else { - h.decSliceServicePort((*[]ServicePort)(yyv2717), d) - } - } - yyj2715++ - if yyhl2715 { - yyb2715 = yyj2715 > l - } else { - yyb2715 = r.CheckBreak() - } - if yyb2715 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv2719 := &x.Selector - yym2720 := z.DecBinary() - _ = yym2720 - if false { - } else { - z.F.DecMapStringStringX(yyv2719, false, d) - } - } - yyj2715++ - if yyhl2715 { - yyb2715 = yyj2715 > l - } else { - yyb2715 = r.CheckBreak() - } - if yyb2715 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterIP = "" - } else { - x.ClusterIP = string(r.DecodeString()) - } - yyj2715++ - if yyhl2715 { - yyb2715 = yyj2715 > l - } else { - yyb2715 = r.CheckBreak() - } - if yyb2715 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExternalName = "" - } else { - x.ExternalName = string(r.DecodeString()) - } - yyj2715++ - if yyhl2715 { - yyb2715 = yyj2715 > l - } else { - yyb2715 = r.CheckBreak() - } - if yyb2715 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExternalIPs = nil - } else { - yyv2723 := &x.ExternalIPs - yym2724 := z.DecBinary() - _ = yym2724 - if false { - } else { - z.F.DecSliceStringX(yyv2723, false, d) - } - } - yyj2715++ - if yyhl2715 { - yyb2715 = yyj2715 > l - } else { - yyb2715 = r.CheckBreak() - } - if yyb2715 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LoadBalancerIP = "" - } else { - x.LoadBalancerIP = string(r.DecodeString()) - } - yyj2715++ - if yyhl2715 { - yyb2715 = yyj2715 > l - } else { - yyb2715 = r.CheckBreak() - } - if yyb2715 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SessionAffinity = "" - } else { - x.SessionAffinity = ServiceAffinity(r.DecodeString()) - } - yyj2715++ - if yyhl2715 { - yyb2715 = yyj2715 > l - } else { - yyb2715 = r.CheckBreak() - } - if yyb2715 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LoadBalancerSourceRanges = nil - } else { - yyv2727 := &x.LoadBalancerSourceRanges - yym2728 := z.DecBinary() - _ = yym2728 - if false { - } else { - z.F.DecSliceStringX(yyv2727, false, d) - } - } - for { - yyj2715++ - if yyhl2715 { - yyb2715 = yyj2715 > l - } else { - yyb2715 = r.CheckBreak() - } - if yyb2715 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2715-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServicePort) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2729 := z.EncBinary() - _ = yym2729 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2730 := !z.EncBinary() - yy2arr2730 := z.EncBasicHandle().StructToArray - var yyq2730 [5]bool - _, _, _ = yysep2730, yyq2730, yy2arr2730 - const yyr2730 bool = false - var yynn2730 int - if yyr2730 || yy2arr2730 { - r.EncodeArrayStart(5) - } else { - yynn2730 = 5 - for _, b := range yyq2730 { - if b { - yynn2730++ - } - } - r.EncodeMapStart(yynn2730) - yynn2730 = 0 - } - if yyr2730 || yy2arr2730 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2732 := z.EncBinary() - _ = yym2732 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2733 := z.EncBinary() - _ = yym2733 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2730 || yy2arr2730 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Protocol.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("protocol")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Protocol.CodecEncodeSelf(e) - } - if yyr2730 || yy2arr2730 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2736 := z.EncBinary() - _ = yym2736 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2737 := z.EncBinary() - _ = yym2737 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } - if yyr2730 || yy2arr2730 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2739 := &x.TargetPort - yym2740 := z.EncBinary() - _ = yym2740 - if false { - } else if z.HasExtensions() && z.EncExt(yy2739) { - } else if !yym2740 && z.IsJSONHandle() { - z.EncJSONMarshal(yy2739) - } else { - z.EncFallback(yy2739) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2741 := &x.TargetPort - yym2742 := z.EncBinary() - _ = yym2742 - if false { - } else if z.HasExtensions() && z.EncExt(yy2741) { - } else if !yym2742 && z.IsJSONHandle() { - z.EncJSONMarshal(yy2741) - } else { - z.EncFallback(yy2741) - } - } - if yyr2730 || yy2arr2730 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2744 := z.EncBinary() - _ = yym2744 - if false { - } else { - r.EncodeInt(int64(x.NodePort)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodePort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2745 := z.EncBinary() - _ = yym2745 - if false { - } else { - r.EncodeInt(int64(x.NodePort)) - } - } - if yyr2730 || yy2arr2730 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServicePort) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2746 := z.DecBinary() - _ = yym2746 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2747 := r.ContainerType() - if yyct2747 == codecSelferValueTypeMap1234 { - yyl2747 := r.ReadMapStart() - if yyl2747 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2747, d) - } - } else if yyct2747 == codecSelferValueTypeArray1234 { - yyl2747 := r.ReadArrayStart() - if yyl2747 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2747, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServicePort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2748Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2748Slc - var yyhl2748 bool = l >= 0 - for yyj2748 := 0; ; yyj2748++ { - if yyhl2748 { - if yyj2748 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2748Slc = r.DecodeBytes(yys2748Slc, true, true) - yys2748 := string(yys2748Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2748 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "protocol": - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - x.Protocol = Protocol(r.DecodeString()) - } - case "port": - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - case "targetPort": - if r.TryDecodeAsNil() { - x.TargetPort = pkg4_intstr.IntOrString{} - } else { - yyv2752 := &x.TargetPort - yym2753 := z.DecBinary() - _ = yym2753 - if false { - } else if z.HasExtensions() && z.DecExt(yyv2752) { - } else if !yym2753 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv2752) - } else { - z.DecFallback(yyv2752, false) - } - } - case "nodePort": - if r.TryDecodeAsNil() { - x.NodePort = 0 - } else { - x.NodePort = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys2748) - } // end switch yys2748 - } // end for yyj2748 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServicePort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2755 int - var yyb2755 bool - var yyhl2755 bool = l >= 0 - yyj2755++ - if yyhl2755 { - yyb2755 = yyj2755 > l - } else { - yyb2755 = r.CheckBreak() - } - if yyb2755 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj2755++ - if yyhl2755 { - yyb2755 = yyj2755 > l - } else { - yyb2755 = r.CheckBreak() - } - if yyb2755 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - x.Protocol = Protocol(r.DecodeString()) - } - yyj2755++ - if yyhl2755 { - yyb2755 = yyj2755 > l - } else { - yyb2755 = r.CheckBreak() - } - if yyb2755 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - yyj2755++ - if yyhl2755 { - yyb2755 = yyj2755 > l - } else { - yyb2755 = r.CheckBreak() - } - if yyb2755 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetPort = pkg4_intstr.IntOrString{} - } else { - yyv2759 := &x.TargetPort - yym2760 := z.DecBinary() - _ = yym2760 - if false { - } else if z.HasExtensions() && z.DecExt(yyv2759) { - } else if !yym2760 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv2759) - } else { - z.DecFallback(yyv2759, false) - } - } - yyj2755++ - if yyhl2755 { - yyb2755 = yyj2755 > l - } else { - yyb2755 = r.CheckBreak() - } - if yyb2755 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodePort = 0 - } else { - x.NodePort = int32(r.DecodeInt(32)) - } - for { - yyj2755++ - if yyhl2755 { - yyb2755 = yyj2755 > l - } else { - yyb2755 = r.CheckBreak() - } - if yyb2755 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2755-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Service) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2762 := z.EncBinary() - _ = yym2762 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2763 := !z.EncBinary() - yy2arr2763 := z.EncBasicHandle().StructToArray - var yyq2763 [5]bool - _, _, _ = yysep2763, yyq2763, yy2arr2763 - const yyr2763 bool = false - yyq2763[0] = x.Kind != "" - yyq2763[1] = x.APIVersion != "" - yyq2763[2] = true - yyq2763[3] = true - yyq2763[4] = true - var yynn2763 int - if yyr2763 || yy2arr2763 { - r.EncodeArrayStart(5) - } else { - yynn2763 = 0 - for _, b := range yyq2763 { - if b { - yynn2763++ - } - } - r.EncodeMapStart(yynn2763) - yynn2763 = 0 - } - if yyr2763 || yy2arr2763 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2763[0] { - yym2765 := z.EncBinary() - _ = yym2765 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2763[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2766 := z.EncBinary() - _ = yym2766 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2763 || yy2arr2763 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2763[1] { - yym2768 := z.EncBinary() - _ = yym2768 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2763[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2769 := z.EncBinary() - _ = yym2769 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2763 || yy2arr2763 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2763[2] { - yy2771 := &x.ObjectMeta - yy2771.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2763[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2772 := &x.ObjectMeta - yy2772.CodecEncodeSelf(e) - } - } - if yyr2763 || yy2arr2763 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2763[3] { - yy2774 := &x.Spec - yy2774.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2763[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2775 := &x.Spec - yy2775.CodecEncodeSelf(e) - } - } - if yyr2763 || yy2arr2763 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2763[4] { - yy2777 := &x.Status - yy2777.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2763[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2778 := &x.Status - yy2778.CodecEncodeSelf(e) - } - } - if yyr2763 || yy2arr2763 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Service) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2779 := z.DecBinary() - _ = yym2779 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2780 := r.ContainerType() - if yyct2780 == codecSelferValueTypeMap1234 { - yyl2780 := r.ReadMapStart() - if yyl2780 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2780, d) - } - } else if yyct2780 == codecSelferValueTypeArray1234 { - yyl2780 := r.ReadArrayStart() - if yyl2780 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2780, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Service) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2781Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2781Slc - var yyhl2781 bool = l >= 0 - for yyj2781 := 0; ; yyj2781++ { - if yyhl2781 { - if yyj2781 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2781Slc = r.DecodeBytes(yys2781Slc, true, true) - yys2781 := string(yys2781Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2781 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv2784 := &x.ObjectMeta - yyv2784.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ServiceSpec{} - } else { - yyv2785 := &x.Spec - yyv2785.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ServiceStatus{} - } else { - yyv2786 := &x.Status - yyv2786.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys2781) - } // end switch yys2781 - } // end for yyj2781 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Service) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2787 int - var yyb2787 bool - var yyhl2787 bool = l >= 0 - yyj2787++ - if yyhl2787 { - yyb2787 = yyj2787 > l - } else { - yyb2787 = r.CheckBreak() - } - if yyb2787 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj2787++ - if yyhl2787 { - yyb2787 = yyj2787 > l - } else { - yyb2787 = r.CheckBreak() - } - if yyb2787 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj2787++ - if yyhl2787 { - yyb2787 = yyj2787 > l - } else { - yyb2787 = r.CheckBreak() - } - if yyb2787 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv2790 := &x.ObjectMeta - yyv2790.CodecDecodeSelf(d) - } - yyj2787++ - if yyhl2787 { - yyb2787 = yyj2787 > l - } else { - yyb2787 = r.CheckBreak() - } - if yyb2787 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ServiceSpec{} - } else { - yyv2791 := &x.Spec - yyv2791.CodecDecodeSelf(d) - } - yyj2787++ - if yyhl2787 { - yyb2787 = yyj2787 > l - } else { - yyb2787 = r.CheckBreak() - } - if yyb2787 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ServiceStatus{} - } else { - yyv2792 := &x.Status - yyv2792.CodecDecodeSelf(d) - } - for { - yyj2787++ - if yyhl2787 { - yyb2787 = yyj2787 > l - } else { - yyb2787 = r.CheckBreak() - } - if yyb2787 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2787-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceAccount) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2793 := z.EncBinary() - _ = yym2793 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2794 := !z.EncBinary() - yy2arr2794 := z.EncBasicHandle().StructToArray - var yyq2794 [5]bool - _, _, _ = yysep2794, yyq2794, yy2arr2794 - const yyr2794 bool = false - yyq2794[0] = x.Kind != "" - yyq2794[1] = x.APIVersion != "" - yyq2794[2] = true - yyq2794[4] = len(x.ImagePullSecrets) != 0 - var yynn2794 int - if yyr2794 || yy2arr2794 { - r.EncodeArrayStart(5) - } else { - yynn2794 = 1 - for _, b := range yyq2794 { - if b { - yynn2794++ - } - } - r.EncodeMapStart(yynn2794) - yynn2794 = 0 - } - if yyr2794 || yy2arr2794 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2794[0] { - yym2796 := z.EncBinary() - _ = yym2796 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2794[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2797 := z.EncBinary() - _ = yym2797 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2794 || yy2arr2794 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2794[1] { - yym2799 := z.EncBinary() - _ = yym2799 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2794[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2800 := z.EncBinary() - _ = yym2800 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2794 || yy2arr2794 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2794[2] { - yy2802 := &x.ObjectMeta - yy2802.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2794[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2803 := &x.ObjectMeta - yy2803.CodecEncodeSelf(e) - } - } - if yyr2794 || yy2arr2794 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Secrets == nil { - r.EncodeNil() - } else { - yym2805 := z.EncBinary() - _ = yym2805 - if false { - } else { - h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secrets")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Secrets == nil { - r.EncodeNil() - } else { - yym2806 := z.EncBinary() - _ = yym2806 - if false { - } else { - h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e) - } - } - } - if yyr2794 || yy2arr2794 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2794[4] { - if x.ImagePullSecrets == nil { - r.EncodeNil() - } else { - yym2808 := z.EncBinary() - _ = yym2808 - if false { - } else { - h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2794[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ImagePullSecrets == nil { - r.EncodeNil() - } else { - yym2809 := z.EncBinary() - _ = yym2809 - if false { - } else { - h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) - } - } - } - } - if yyr2794 || yy2arr2794 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceAccount) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2810 := z.DecBinary() - _ = yym2810 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2811 := r.ContainerType() - if yyct2811 == codecSelferValueTypeMap1234 { - yyl2811 := r.ReadMapStart() - if yyl2811 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2811, d) - } - } else if yyct2811 == codecSelferValueTypeArray1234 { - yyl2811 := r.ReadArrayStart() - if yyl2811 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2811, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceAccount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2812Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2812Slc - var yyhl2812 bool = l >= 0 - for yyj2812 := 0; ; yyj2812++ { - if yyhl2812 { - if yyj2812 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2812Slc = r.DecodeBytes(yys2812Slc, true, true) - yys2812 := string(yys2812Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2812 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv2815 := &x.ObjectMeta - yyv2815.CodecDecodeSelf(d) - } - case "secrets": - if r.TryDecodeAsNil() { - x.Secrets = nil - } else { - yyv2816 := &x.Secrets - yym2817 := z.DecBinary() - _ = yym2817 - if false { - } else { - h.decSliceObjectReference((*[]ObjectReference)(yyv2816), d) - } - } - case "imagePullSecrets": - if r.TryDecodeAsNil() { - x.ImagePullSecrets = nil - } else { - yyv2818 := &x.ImagePullSecrets - yym2819 := z.DecBinary() - _ = yym2819 - if false { - } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv2818), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys2812) - } // end switch yys2812 - } // end for yyj2812 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceAccount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2820 int - var yyb2820 bool - var yyhl2820 bool = l >= 0 - yyj2820++ - if yyhl2820 { - yyb2820 = yyj2820 > l - } else { - yyb2820 = r.CheckBreak() - } - if yyb2820 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj2820++ - if yyhl2820 { - yyb2820 = yyj2820 > l - } else { - yyb2820 = r.CheckBreak() - } - if yyb2820 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj2820++ - if yyhl2820 { - yyb2820 = yyj2820 > l - } else { - yyb2820 = r.CheckBreak() - } - if yyb2820 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv2823 := &x.ObjectMeta - yyv2823.CodecDecodeSelf(d) - } - yyj2820++ - if yyhl2820 { - yyb2820 = yyj2820 > l - } else { - yyb2820 = r.CheckBreak() - } - if yyb2820 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Secrets = nil - } else { - yyv2824 := &x.Secrets - yym2825 := z.DecBinary() - _ = yym2825 - if false { - } else { - h.decSliceObjectReference((*[]ObjectReference)(yyv2824), d) - } - } - yyj2820++ - if yyhl2820 { - yyb2820 = yyj2820 > l - } else { - yyb2820 = r.CheckBreak() - } - if yyb2820 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImagePullSecrets = nil - } else { - yyv2826 := &x.ImagePullSecrets - yym2827 := z.DecBinary() - _ = yym2827 - if false { - } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv2826), d) - } - } - for { - yyj2820++ - if yyhl2820 { - yyb2820 = yyj2820 > l - } else { - yyb2820 = r.CheckBreak() - } - if yyb2820 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2820-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceAccountList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2828 := z.EncBinary() - _ = yym2828 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2829 := !z.EncBinary() - yy2arr2829 := z.EncBasicHandle().StructToArray - var yyq2829 [4]bool - _, _, _ = yysep2829, yyq2829, yy2arr2829 - const yyr2829 bool = false - yyq2829[0] = x.Kind != "" - yyq2829[1] = x.APIVersion != "" - yyq2829[2] = true - var yynn2829 int - if yyr2829 || yy2arr2829 { - r.EncodeArrayStart(4) - } else { - yynn2829 = 1 - for _, b := range yyq2829 { - if b { - yynn2829++ - } - } - r.EncodeMapStart(yynn2829) - yynn2829 = 0 - } - if yyr2829 || yy2arr2829 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2829[0] { - yym2831 := z.EncBinary() - _ = yym2831 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2829[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2832 := z.EncBinary() - _ = yym2832 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2829 || yy2arr2829 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2829[1] { - yym2834 := z.EncBinary() - _ = yym2834 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2829[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2835 := z.EncBinary() - _ = yym2835 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2829 || yy2arr2829 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2829[2] { - yy2837 := &x.ListMeta - yym2838 := z.EncBinary() - _ = yym2838 - if false { - } else if z.HasExtensions() && z.EncExt(yy2837) { - } else { - z.EncFallback(yy2837) - } - } else { - r.EncodeNil() - } - } else { - if yyq2829[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2839 := &x.ListMeta - yym2840 := z.EncBinary() - _ = yym2840 - if false { - } else if z.HasExtensions() && z.EncExt(yy2839) { - } else { - z.EncFallback(yy2839) - } - } - } - if yyr2829 || yy2arr2829 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym2842 := z.EncBinary() - _ = yym2842 - if false { - } else { - h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym2843 := z.EncBinary() - _ = yym2843 - if false { - } else { - h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e) - } - } - } - if yyr2829 || yy2arr2829 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceAccountList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2844 := z.DecBinary() - _ = yym2844 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2845 := r.ContainerType() - if yyct2845 == codecSelferValueTypeMap1234 { - yyl2845 := r.ReadMapStart() - if yyl2845 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2845, d) - } - } else if yyct2845 == codecSelferValueTypeArray1234 { - yyl2845 := r.ReadArrayStart() - if yyl2845 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2845, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceAccountList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2846Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2846Slc - var yyhl2846 bool = l >= 0 - for yyj2846 := 0; ; yyj2846++ { - if yyhl2846 { - if yyj2846 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2846Slc = r.DecodeBytes(yys2846Slc, true, true) - yys2846 := string(yys2846Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2846 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv2849 := &x.ListMeta - yym2850 := z.DecBinary() - _ = yym2850 - if false { - } else if z.HasExtensions() && z.DecExt(yyv2849) { - } else { - z.DecFallback(yyv2849, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv2851 := &x.Items - yym2852 := z.DecBinary() - _ = yym2852 - if false { - } else { - h.decSliceServiceAccount((*[]ServiceAccount)(yyv2851), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys2846) - } // end switch yys2846 - } // end for yyj2846 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceAccountList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2853 int - var yyb2853 bool - var yyhl2853 bool = l >= 0 - yyj2853++ - if yyhl2853 { - yyb2853 = yyj2853 > l - } else { - yyb2853 = r.CheckBreak() - } - if yyb2853 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj2853++ - if yyhl2853 { - yyb2853 = yyj2853 > l - } else { - yyb2853 = r.CheckBreak() - } - if yyb2853 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj2853++ - if yyhl2853 { - yyb2853 = yyj2853 > l - } else { - yyb2853 = r.CheckBreak() - } - if yyb2853 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv2856 := &x.ListMeta - yym2857 := z.DecBinary() - _ = yym2857 - if false { - } else if z.HasExtensions() && z.DecExt(yyv2856) { - } else { - z.DecFallback(yyv2856, false) - } - } - yyj2853++ - if yyhl2853 { - yyb2853 = yyj2853 > l - } else { - yyb2853 = r.CheckBreak() - } - if yyb2853 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv2858 := &x.Items - yym2859 := z.DecBinary() - _ = yym2859 - if false { - } else { - h.decSliceServiceAccount((*[]ServiceAccount)(yyv2858), d) - } - } - for { - yyj2853++ - if yyhl2853 { - yyb2853 = yyj2853 > l - } else { - yyb2853 = r.CheckBreak() - } - if yyb2853 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2853-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Endpoints) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2860 := z.EncBinary() - _ = yym2860 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2861 := !z.EncBinary() - yy2arr2861 := z.EncBasicHandle().StructToArray - var yyq2861 [4]bool - _, _, _ = yysep2861, yyq2861, yy2arr2861 - const yyr2861 bool = false - yyq2861[0] = x.Kind != "" - yyq2861[1] = x.APIVersion != "" - yyq2861[2] = true - var yynn2861 int - if yyr2861 || yy2arr2861 { - r.EncodeArrayStart(4) - } else { - yynn2861 = 1 - for _, b := range yyq2861 { - if b { - yynn2861++ - } - } - r.EncodeMapStart(yynn2861) - yynn2861 = 0 - } - if yyr2861 || yy2arr2861 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2861[0] { - yym2863 := z.EncBinary() - _ = yym2863 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2861[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2864 := z.EncBinary() - _ = yym2864 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2861 || yy2arr2861 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2861[1] { - yym2866 := z.EncBinary() - _ = yym2866 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2861[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2867 := z.EncBinary() - _ = yym2867 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2861 || yy2arr2861 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2861[2] { - yy2869 := &x.ObjectMeta - yy2869.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2861[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2870 := &x.ObjectMeta - yy2870.CodecEncodeSelf(e) - } - } - if yyr2861 || yy2arr2861 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Subsets == nil { - r.EncodeNil() - } else { - yym2872 := z.EncBinary() - _ = yym2872 - if false { - } else { - h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Subsets")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Subsets == nil { - r.EncodeNil() - } else { - yym2873 := z.EncBinary() - _ = yym2873 - if false { - } else { - h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e) - } - } - } - if yyr2861 || yy2arr2861 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Endpoints) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2874 := z.DecBinary() - _ = yym2874 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2875 := r.ContainerType() - if yyct2875 == codecSelferValueTypeMap1234 { - yyl2875 := r.ReadMapStart() - if yyl2875 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2875, d) - } - } else if yyct2875 == codecSelferValueTypeArray1234 { - yyl2875 := r.ReadArrayStart() - if yyl2875 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2875, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Endpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2876Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2876Slc - var yyhl2876 bool = l >= 0 - for yyj2876 := 0; ; yyj2876++ { - if yyhl2876 { - if yyj2876 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2876Slc = r.DecodeBytes(yys2876Slc, true, true) - yys2876 := string(yys2876Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2876 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv2879 := &x.ObjectMeta - yyv2879.CodecDecodeSelf(d) - } - case "Subsets": - if r.TryDecodeAsNil() { - x.Subsets = nil - } else { - yyv2880 := &x.Subsets - yym2881 := z.DecBinary() - _ = yym2881 - if false { - } else { - h.decSliceEndpointSubset((*[]EndpointSubset)(yyv2880), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys2876) - } // end switch yys2876 - } // end for yyj2876 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Endpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2882 int - var yyb2882 bool - var yyhl2882 bool = l >= 0 - yyj2882++ - if yyhl2882 { - yyb2882 = yyj2882 > l - } else { - yyb2882 = r.CheckBreak() - } - if yyb2882 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj2882++ - if yyhl2882 { - yyb2882 = yyj2882 > l - } else { - yyb2882 = r.CheckBreak() - } - if yyb2882 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj2882++ - if yyhl2882 { - yyb2882 = yyj2882 > l - } else { - yyb2882 = r.CheckBreak() - } - if yyb2882 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv2885 := &x.ObjectMeta - yyv2885.CodecDecodeSelf(d) - } - yyj2882++ - if yyhl2882 { - yyb2882 = yyj2882 > l - } else { - yyb2882 = r.CheckBreak() - } - if yyb2882 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subsets = nil - } else { - yyv2886 := &x.Subsets - yym2887 := z.DecBinary() - _ = yym2887 - if false { - } else { - h.decSliceEndpointSubset((*[]EndpointSubset)(yyv2886), d) - } - } - for { - yyj2882++ - if yyhl2882 { - yyb2882 = yyj2882 > l - } else { - yyb2882 = r.CheckBreak() - } - if yyb2882 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2882-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EndpointSubset) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2888 := z.EncBinary() - _ = yym2888 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2889 := !z.EncBinary() - yy2arr2889 := z.EncBasicHandle().StructToArray - var yyq2889 [3]bool - _, _, _ = yysep2889, yyq2889, yy2arr2889 - const yyr2889 bool = false - var yynn2889 int - if yyr2889 || yy2arr2889 { - r.EncodeArrayStart(3) - } else { - yynn2889 = 3 - for _, b := range yyq2889 { - if b { - yynn2889++ - } - } - r.EncodeMapStart(yynn2889) - yynn2889 = 0 - } - if yyr2889 || yy2arr2889 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Addresses == nil { - r.EncodeNil() - } else { - yym2891 := z.EncBinary() - _ = yym2891 - if false { - } else { - h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Addresses")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Addresses == nil { - r.EncodeNil() - } else { - yym2892 := z.EncBinary() - _ = yym2892 - if false { - } else { - h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e) - } - } - } - if yyr2889 || yy2arr2889 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.NotReadyAddresses == nil { - r.EncodeNil() - } else { - yym2894 := z.EncBinary() - _ = yym2894 - if false { - } else { - h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("NotReadyAddresses")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NotReadyAddresses == nil { - r.EncodeNil() - } else { - yym2895 := z.EncBinary() - _ = yym2895 - if false { - } else { - h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e) - } - } - } - if yyr2889 || yy2arr2889 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym2897 := z.EncBinary() - _ = yym2897 - if false { - } else { - h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Ports")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym2898 := z.EncBinary() - _ = yym2898 - if false { - } else { - h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e) - } - } - } - if yyr2889 || yy2arr2889 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EndpointSubset) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2899 := z.DecBinary() - _ = yym2899 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2900 := r.ContainerType() - if yyct2900 == codecSelferValueTypeMap1234 { - yyl2900 := r.ReadMapStart() - if yyl2900 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2900, d) - } - } else if yyct2900 == codecSelferValueTypeArray1234 { - yyl2900 := r.ReadArrayStart() - if yyl2900 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2900, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EndpointSubset) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2901Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2901Slc - var yyhl2901 bool = l >= 0 - for yyj2901 := 0; ; yyj2901++ { - if yyhl2901 { - if yyj2901 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2901Slc = r.DecodeBytes(yys2901Slc, true, true) - yys2901 := string(yys2901Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2901 { - case "Addresses": - if r.TryDecodeAsNil() { - x.Addresses = nil - } else { - yyv2902 := &x.Addresses - yym2903 := z.DecBinary() - _ = yym2903 - if false { - } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv2902), d) - } - } - case "NotReadyAddresses": - if r.TryDecodeAsNil() { - x.NotReadyAddresses = nil - } else { - yyv2904 := &x.NotReadyAddresses - yym2905 := z.DecBinary() - _ = yym2905 - if false { - } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv2904), d) - } - } - case "Ports": - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv2906 := &x.Ports - yym2907 := z.DecBinary() - _ = yym2907 - if false { - } else { - h.decSliceEndpointPort((*[]EndpointPort)(yyv2906), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys2901) - } // end switch yys2901 - } // end for yyj2901 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EndpointSubset) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2908 int - var yyb2908 bool - var yyhl2908 bool = l >= 0 - yyj2908++ - if yyhl2908 { - yyb2908 = yyj2908 > l - } else { - yyb2908 = r.CheckBreak() - } - if yyb2908 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Addresses = nil - } else { - yyv2909 := &x.Addresses - yym2910 := z.DecBinary() - _ = yym2910 - if false { - } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv2909), d) - } - } - yyj2908++ - if yyhl2908 { - yyb2908 = yyj2908 > l - } else { - yyb2908 = r.CheckBreak() - } - if yyb2908 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NotReadyAddresses = nil - } else { - yyv2911 := &x.NotReadyAddresses - yym2912 := z.DecBinary() - _ = yym2912 - if false { - } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv2911), d) - } - } - yyj2908++ - if yyhl2908 { - yyb2908 = yyj2908 > l - } else { - yyb2908 = r.CheckBreak() - } - if yyb2908 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv2913 := &x.Ports - yym2914 := z.DecBinary() - _ = yym2914 - if false { - } else { - h.decSliceEndpointPort((*[]EndpointPort)(yyv2913), d) - } - } - for { - yyj2908++ - if yyhl2908 { - yyb2908 = yyj2908 > l - } else { - yyb2908 = r.CheckBreak() - } - if yyb2908 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2908-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EndpointAddress) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2915 := z.EncBinary() - _ = yym2915 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2916 := !z.EncBinary() - yy2arr2916 := z.EncBasicHandle().StructToArray - var yyq2916 [4]bool - _, _, _ = yysep2916, yyq2916, yy2arr2916 - const yyr2916 bool = false - yyq2916[1] = x.Hostname != "" - yyq2916[2] = x.NodeName != nil - var yynn2916 int - if yyr2916 || yy2arr2916 { - r.EncodeArrayStart(4) - } else { - yynn2916 = 2 - for _, b := range yyq2916 { - if b { - yynn2916++ - } - } - r.EncodeMapStart(yynn2916) - yynn2916 = 0 - } - if yyr2916 || yy2arr2916 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2918 := z.EncBinary() - _ = yym2918 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IP)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("IP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2919 := z.EncBinary() - _ = yym2919 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IP)) - } - } - if yyr2916 || yy2arr2916 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2916[1] { - yym2921 := z.EncBinary() - _ = yym2921 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2916[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostname")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2922 := z.EncBinary() - _ = yym2922 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } - } - if yyr2916 || yy2arr2916 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2916[2] { - if x.NodeName == nil { - r.EncodeNil() - } else { - yy2924 := *x.NodeName - yym2925 := z.EncBinary() - _ = yym2925 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy2924)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2916[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NodeName == nil { - r.EncodeNil() - } else { - yy2926 := *x.NodeName - yym2927 := z.EncBinary() - _ = yym2927 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy2926)) - } - } - } - } - if yyr2916 || yy2arr2916 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.TargetRef == nil { - r.EncodeNil() - } else { - x.TargetRef.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("TargetRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TargetRef == nil { - r.EncodeNil() - } else { - x.TargetRef.CodecEncodeSelf(e) - } - } - if yyr2916 || yy2arr2916 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EndpointAddress) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2929 := z.DecBinary() - _ = yym2929 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2930 := r.ContainerType() - if yyct2930 == codecSelferValueTypeMap1234 { - yyl2930 := r.ReadMapStart() - if yyl2930 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2930, d) - } - } else if yyct2930 == codecSelferValueTypeArray1234 { - yyl2930 := r.ReadArrayStart() - if yyl2930 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2930, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EndpointAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2931Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2931Slc - var yyhl2931 bool = l >= 0 - for yyj2931 := 0; ; yyj2931++ { - if yyhl2931 { - if yyj2931 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2931Slc = r.DecodeBytes(yys2931Slc, true, true) - yys2931 := string(yys2931Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2931 { - case "IP": - if r.TryDecodeAsNil() { - x.IP = "" - } else { - x.IP = string(r.DecodeString()) - } - case "hostname": - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - x.Hostname = string(r.DecodeString()) - } - case "nodeName": - if r.TryDecodeAsNil() { - if x.NodeName != nil { - x.NodeName = nil - } - } else { - if x.NodeName == nil { - x.NodeName = new(string) - } - yym2935 := z.DecBinary() - _ = yym2935 - if false { - } else { - *((*string)(x.NodeName)) = r.DecodeString() - } - } - case "TargetRef": - if r.TryDecodeAsNil() { - if x.TargetRef != nil { - x.TargetRef = nil - } - } else { - if x.TargetRef == nil { - x.TargetRef = new(ObjectReference) - } - x.TargetRef.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys2931) - } // end switch yys2931 - } // end for yyj2931 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2937 int - var yyb2937 bool - var yyhl2937 bool = l >= 0 - yyj2937++ - if yyhl2937 { - yyb2937 = yyj2937 > l - } else { - yyb2937 = r.CheckBreak() - } - if yyb2937 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IP = "" - } else { - x.IP = string(r.DecodeString()) - } - yyj2937++ - if yyhl2937 { - yyb2937 = yyj2937 > l - } else { - yyb2937 = r.CheckBreak() - } - if yyb2937 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - x.Hostname = string(r.DecodeString()) - } - yyj2937++ - if yyhl2937 { - yyb2937 = yyj2937 > l - } else { - yyb2937 = r.CheckBreak() - } - if yyb2937 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NodeName != nil { - x.NodeName = nil - } - } else { - if x.NodeName == nil { - x.NodeName = new(string) - } - yym2941 := z.DecBinary() - _ = yym2941 - if false { - } else { - *((*string)(x.NodeName)) = r.DecodeString() - } - } - yyj2937++ - if yyhl2937 { - yyb2937 = yyj2937 > l - } else { - yyb2937 = r.CheckBreak() - } - if yyb2937 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TargetRef != nil { - x.TargetRef = nil - } - } else { - if x.TargetRef == nil { - x.TargetRef = new(ObjectReference) - } - x.TargetRef.CodecDecodeSelf(d) - } - for { - yyj2937++ - if yyhl2937 { - yyb2937 = yyj2937 > l - } else { - yyb2937 = r.CheckBreak() - } - if yyb2937 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2937-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EndpointPort) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2943 := z.EncBinary() - _ = yym2943 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2944 := !z.EncBinary() - yy2arr2944 := z.EncBasicHandle().StructToArray - var yyq2944 [3]bool - _, _, _ = yysep2944, yyq2944, yy2arr2944 - const yyr2944 bool = false - var yynn2944 int - if yyr2944 || yy2arr2944 { - r.EncodeArrayStart(3) - } else { - yynn2944 = 3 - for _, b := range yyq2944 { - if b { - yynn2944++ - } - } - r.EncodeMapStart(yynn2944) - yynn2944 = 0 - } - if yyr2944 || yy2arr2944 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2946 := z.EncBinary() - _ = yym2946 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2947 := z.EncBinary() - _ = yym2947 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2944 || yy2arr2944 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2949 := z.EncBinary() - _ = yym2949 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2950 := z.EncBinary() - _ = yym2950 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } - if yyr2944 || yy2arr2944 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Protocol.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Protocol")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Protocol.CodecEncodeSelf(e) - } - if yyr2944 || yy2arr2944 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EndpointPort) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2952 := z.DecBinary() - _ = yym2952 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2953 := r.ContainerType() - if yyct2953 == codecSelferValueTypeMap1234 { - yyl2953 := r.ReadMapStart() - if yyl2953 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2953, d) - } - } else if yyct2953 == codecSelferValueTypeArray1234 { - yyl2953 := r.ReadArrayStart() - if yyl2953 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2953, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EndpointPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2954Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2954Slc - var yyhl2954 bool = l >= 0 - for yyj2954 := 0; ; yyj2954++ { - if yyhl2954 { - if yyj2954 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2954Slc = r.DecodeBytes(yys2954Slc, true, true) - yys2954 := string(yys2954Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2954 { - case "Name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "Port": - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - case "Protocol": - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - x.Protocol = Protocol(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys2954) - } // end switch yys2954 - } // end for yyj2954 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EndpointPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2958 int - var yyb2958 bool - var yyhl2958 bool = l >= 0 - yyj2958++ - if yyhl2958 { - yyb2958 = yyj2958 > l - } else { - yyb2958 = r.CheckBreak() - } - if yyb2958 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj2958++ - if yyhl2958 { - yyb2958 = yyj2958 > l - } else { - yyb2958 = r.CheckBreak() - } - if yyb2958 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - yyj2958++ - if yyhl2958 { - yyb2958 = yyj2958 > l - } else { - yyb2958 = r.CheckBreak() - } - if yyb2958 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - x.Protocol = Protocol(r.DecodeString()) - } - for { - yyj2958++ - if yyhl2958 { - yyb2958 = yyj2958 > l - } else { - yyb2958 = r.CheckBreak() - } - if yyb2958 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2958-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EndpointsList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2962 := z.EncBinary() - _ = yym2962 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2963 := !z.EncBinary() - yy2arr2963 := z.EncBasicHandle().StructToArray - var yyq2963 [4]bool - _, _, _ = yysep2963, yyq2963, yy2arr2963 - const yyr2963 bool = false - yyq2963[0] = x.Kind != "" - yyq2963[1] = x.APIVersion != "" - yyq2963[2] = true - var yynn2963 int - if yyr2963 || yy2arr2963 { - r.EncodeArrayStart(4) - } else { - yynn2963 = 1 - for _, b := range yyq2963 { - if b { - yynn2963++ - } - } - r.EncodeMapStart(yynn2963) - yynn2963 = 0 - } - if yyr2963 || yy2arr2963 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2963[0] { - yym2965 := z.EncBinary() - _ = yym2965 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2963[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2966 := z.EncBinary() - _ = yym2966 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2963 || yy2arr2963 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2963[1] { - yym2968 := z.EncBinary() - _ = yym2968 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2963[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2969 := z.EncBinary() - _ = yym2969 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2963 || yy2arr2963 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2963[2] { - yy2971 := &x.ListMeta - yym2972 := z.EncBinary() - _ = yym2972 - if false { - } else if z.HasExtensions() && z.EncExt(yy2971) { - } else { - z.EncFallback(yy2971) - } - } else { - r.EncodeNil() - } - } else { - if yyq2963[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2973 := &x.ListMeta - yym2974 := z.EncBinary() - _ = yym2974 - if false { - } else if z.HasExtensions() && z.EncExt(yy2973) { - } else { - z.EncFallback(yy2973) - } - } - } - if yyr2963 || yy2arr2963 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym2976 := z.EncBinary() - _ = yym2976 - if false { - } else { - h.encSliceEndpoints(([]Endpoints)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym2977 := z.EncBinary() - _ = yym2977 - if false { - } else { - h.encSliceEndpoints(([]Endpoints)(x.Items), e) - } - } - } - if yyr2963 || yy2arr2963 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EndpointsList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym2978 := z.DecBinary() - _ = yym2978 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2979 := r.ContainerType() - if yyct2979 == codecSelferValueTypeMap1234 { - yyl2979 := r.ReadMapStart() - if yyl2979 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2979, d) - } - } else if yyct2979 == codecSelferValueTypeArray1234 { - yyl2979 := r.ReadArrayStart() - if yyl2979 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2979, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EndpointsList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys2980Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2980Slc - var yyhl2980 bool = l >= 0 - for yyj2980 := 0; ; yyj2980++ { - if yyhl2980 { - if yyj2980 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2980Slc = r.DecodeBytes(yys2980Slc, true, true) - yys2980 := string(yys2980Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2980 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv2983 := &x.ListMeta - yym2984 := z.DecBinary() - _ = yym2984 - if false { - } else if z.HasExtensions() && z.DecExt(yyv2983) { - } else { - z.DecFallback(yyv2983, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv2985 := &x.Items - yym2986 := z.DecBinary() - _ = yym2986 - if false { - } else { - h.decSliceEndpoints((*[]Endpoints)(yyv2985), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys2980) - } // end switch yys2980 - } // end for yyj2980 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EndpointsList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj2987 int - var yyb2987 bool - var yyhl2987 bool = l >= 0 - yyj2987++ - if yyhl2987 { - yyb2987 = yyj2987 > l - } else { - yyb2987 = r.CheckBreak() - } - if yyb2987 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj2987++ - if yyhl2987 { - yyb2987 = yyj2987 > l - } else { - yyb2987 = r.CheckBreak() - } - if yyb2987 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj2987++ - if yyhl2987 { - yyb2987 = yyj2987 > l - } else { - yyb2987 = r.CheckBreak() - } - if yyb2987 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv2990 := &x.ListMeta - yym2991 := z.DecBinary() - _ = yym2991 - if false { - } else if z.HasExtensions() && z.DecExt(yyv2990) { - } else { - z.DecFallback(yyv2990, false) - } - } - yyj2987++ - if yyhl2987 { - yyb2987 = yyj2987 > l - } else { - yyb2987 = r.CheckBreak() - } - if yyb2987 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv2992 := &x.Items - yym2993 := z.DecBinary() - _ = yym2993 - if false { - } else { - h.decSliceEndpoints((*[]Endpoints)(yyv2992), d) - } - } - for { - yyj2987++ - if yyhl2987 { - yyb2987 = yyj2987 > l - } else { - yyb2987 = r.CheckBreak() - } - if yyb2987 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2987-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym2994 := z.EncBinary() - _ = yym2994 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2995 := !z.EncBinary() - yy2arr2995 := z.EncBasicHandle().StructToArray - var yyq2995 [4]bool - _, _, _ = yysep2995, yyq2995, yy2arr2995 - const yyr2995 bool = false - yyq2995[0] = x.PodCIDR != "" - yyq2995[1] = x.ExternalID != "" - yyq2995[2] = x.ProviderID != "" - yyq2995[3] = x.Unschedulable != false - var yynn2995 int - if yyr2995 || yy2arr2995 { - r.EncodeArrayStart(4) - } else { - yynn2995 = 0 - for _, b := range yyq2995 { - if b { - yynn2995++ - } - } - r.EncodeMapStart(yynn2995) - yynn2995 = 0 - } - if yyr2995 || yy2arr2995 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2995[0] { - yym2997 := z.EncBinary() - _ = yym2997 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2995[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2998 := z.EncBinary() - _ = yym2998 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) - } - } - } - if yyr2995 || yy2arr2995 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2995[1] { - yym3000 := z.EncBinary() - _ = yym3000 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2995[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("externalID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3001 := z.EncBinary() - _ = yym3001 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID)) - } - } - } - if yyr2995 || yy2arr2995 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2995[2] { - yym3003 := z.EncBinary() - _ = yym3003 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2995[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("providerID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3004 := z.EncBinary() - _ = yym3004 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID)) - } - } - } - if yyr2995 || yy2arr2995 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2995[3] { - yym3006 := z.EncBinary() - _ = yym3006 - if false { - } else { - r.EncodeBool(bool(x.Unschedulable)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2995[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("unschedulable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3007 := z.EncBinary() - _ = yym3007 - if false { - } else { - r.EncodeBool(bool(x.Unschedulable)) - } - } - } - if yyr2995 || yy2arr2995 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3008 := z.DecBinary() - _ = yym3008 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3009 := r.ContainerType() - if yyct3009 == codecSelferValueTypeMap1234 { - yyl3009 := r.ReadMapStart() - if yyl3009 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3009, d) - } - } else if yyct3009 == codecSelferValueTypeArray1234 { - yyl3009 := r.ReadArrayStart() - if yyl3009 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3009, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3010Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3010Slc - var yyhl3010 bool = l >= 0 - for yyj3010 := 0; ; yyj3010++ { - if yyhl3010 { - if yyj3010 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3010Slc = r.DecodeBytes(yys3010Slc, true, true) - yys3010 := string(yys3010Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3010 { - case "podCIDR": - if r.TryDecodeAsNil() { - x.PodCIDR = "" - } else { - x.PodCIDR = string(r.DecodeString()) - } - case "externalID": - if r.TryDecodeAsNil() { - x.ExternalID = "" - } else { - x.ExternalID = string(r.DecodeString()) - } - case "providerID": - if r.TryDecodeAsNil() { - x.ProviderID = "" - } else { - x.ProviderID = string(r.DecodeString()) - } - case "unschedulable": - if r.TryDecodeAsNil() { - x.Unschedulable = false - } else { - x.Unschedulable = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3010) - } // end switch yys3010 - } // end for yyj3010 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3015 int - var yyb3015 bool - var yyhl3015 bool = l >= 0 - yyj3015++ - if yyhl3015 { - yyb3015 = yyj3015 > l - } else { - yyb3015 = r.CheckBreak() - } - if yyb3015 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodCIDR = "" - } else { - x.PodCIDR = string(r.DecodeString()) - } - yyj3015++ - if yyhl3015 { - yyb3015 = yyj3015 > l - } else { - yyb3015 = r.CheckBreak() - } - if yyb3015 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExternalID = "" - } else { - x.ExternalID = string(r.DecodeString()) - } - yyj3015++ - if yyhl3015 { - yyb3015 = yyj3015 > l - } else { - yyb3015 = r.CheckBreak() - } - if yyb3015 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ProviderID = "" - } else { - x.ProviderID = string(r.DecodeString()) - } - yyj3015++ - if yyhl3015 { - yyb3015 = yyj3015 > l - } else { - yyb3015 = r.CheckBreak() - } - if yyb3015 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Unschedulable = false - } else { - x.Unschedulable = bool(r.DecodeBool()) - } - for { - yyj3015++ - if yyhl3015 { - yyb3015 = yyj3015 > l - } else { - yyb3015 = r.CheckBreak() - } - if yyb3015 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3015-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonEndpoint) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3020 := z.EncBinary() - _ = yym3020 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3021 := !z.EncBinary() - yy2arr3021 := z.EncBasicHandle().StructToArray - var yyq3021 [1]bool - _, _, _ = yysep3021, yyq3021, yy2arr3021 - const yyr3021 bool = false - var yynn3021 int - if yyr3021 || yy2arr3021 { - r.EncodeArrayStart(1) - } else { - yynn3021 = 1 - for _, b := range yyq3021 { - if b { - yynn3021++ - } - } - r.EncodeMapStart(yynn3021) - yynn3021 = 0 - } - if yyr3021 || yy2arr3021 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3023 := z.EncBinary() - _ = yym3023 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3024 := z.EncBinary() - _ = yym3024 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } - if yyr3021 || yy2arr3021 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonEndpoint) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3025 := z.DecBinary() - _ = yym3025 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3026 := r.ContainerType() - if yyct3026 == codecSelferValueTypeMap1234 { - yyl3026 := r.ReadMapStart() - if yyl3026 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3026, d) - } - } else if yyct3026 == codecSelferValueTypeArray1234 { - yyl3026 := r.ReadArrayStart() - if yyl3026 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3026, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonEndpoint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3027Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3027Slc - var yyhl3027 bool = l >= 0 - for yyj3027 := 0; ; yyj3027++ { - if yyhl3027 { - if yyj3027 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3027Slc = r.DecodeBytes(yys3027Slc, true, true) - yys3027 := string(yys3027Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3027 { - case "Port": - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3027) - } // end switch yys3027 - } // end for yyj3027 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonEndpoint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3029 int - var yyb3029 bool - var yyhl3029 bool = l >= 0 - yyj3029++ - if yyhl3029 { - yyb3029 = yyj3029 > l - } else { - yyb3029 = r.CheckBreak() - } - if yyb3029 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - for { - yyj3029++ - if yyhl3029 { - yyb3029 = yyj3029 > l - } else { - yyb3029 = r.CheckBreak() - } - if yyb3029 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3029-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeDaemonEndpoints) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3031 := z.EncBinary() - _ = yym3031 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3032 := !z.EncBinary() - yy2arr3032 := z.EncBasicHandle().StructToArray - var yyq3032 [1]bool - _, _, _ = yysep3032, yyq3032, yy2arr3032 - const yyr3032 bool = false - yyq3032[0] = true - var yynn3032 int - if yyr3032 || yy2arr3032 { - r.EncodeArrayStart(1) - } else { - yynn3032 = 0 - for _, b := range yyq3032 { - if b { - yynn3032++ - } - } - r.EncodeMapStart(yynn3032) - yynn3032 = 0 - } - if yyr3032 || yy2arr3032 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3032[0] { - yy3034 := &x.KubeletEndpoint - yy3034.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq3032[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeletEndpoint")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3035 := &x.KubeletEndpoint - yy3035.CodecEncodeSelf(e) - } - } - if yyr3032 || yy2arr3032 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeDaemonEndpoints) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3036 := z.DecBinary() - _ = yym3036 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3037 := r.ContainerType() - if yyct3037 == codecSelferValueTypeMap1234 { - yyl3037 := r.ReadMapStart() - if yyl3037 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3037, d) - } - } else if yyct3037 == codecSelferValueTypeArray1234 { - yyl3037 := r.ReadArrayStart() - if yyl3037 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3037, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeDaemonEndpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3038Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3038Slc - var yyhl3038 bool = l >= 0 - for yyj3038 := 0; ; yyj3038++ { - if yyhl3038 { - if yyj3038 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3038Slc = r.DecodeBytes(yys3038Slc, true, true) - yys3038 := string(yys3038Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3038 { - case "kubeletEndpoint": - if r.TryDecodeAsNil() { - x.KubeletEndpoint = DaemonEndpoint{} - } else { - yyv3039 := &x.KubeletEndpoint - yyv3039.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3038) - } // end switch yys3038 - } // end for yyj3038 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeDaemonEndpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3040 int - var yyb3040 bool - var yyhl3040 bool = l >= 0 - yyj3040++ - if yyhl3040 { - yyb3040 = yyj3040 > l - } else { - yyb3040 = r.CheckBreak() - } - if yyb3040 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeletEndpoint = DaemonEndpoint{} - } else { - yyv3041 := &x.KubeletEndpoint - yyv3041.CodecDecodeSelf(d) - } - for { - yyj3040++ - if yyhl3040 { - yyb3040 = yyj3040 > l - } else { - yyb3040 = r.CheckBreak() - } - if yyb3040 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3040-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3042 := z.EncBinary() - _ = yym3042 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3043 := !z.EncBinary() - yy2arr3043 := z.EncBasicHandle().StructToArray - var yyq3043 [10]bool - _, _, _ = yysep3043, yyq3043, yy2arr3043 - const yyr3043 bool = false - var yynn3043 int - if yyr3043 || yy2arr3043 { - r.EncodeArrayStart(10) - } else { - yynn3043 = 10 - for _, b := range yyq3043 { - if b { - yynn3043++ - } - } - r.EncodeMapStart(yynn3043) - yynn3043 = 0 - } - if yyr3043 || yy2arr3043 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3045 := z.EncBinary() - _ = yym3045 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MachineID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("machineID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3046 := z.EncBinary() - _ = yym3046 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MachineID)) - } - } - if yyr3043 || yy2arr3043 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3048 := z.EncBinary() - _ = yym3048 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("systemUUID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3049 := z.EncBinary() - _ = yym3049 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID)) - } - } - if yyr3043 || yy2arr3043 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3051 := z.EncBinary() - _ = yym3051 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.BootID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("bootID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3052 := z.EncBinary() - _ = yym3052 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.BootID)) - } - } - if yyr3043 || yy2arr3043 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3054 := z.EncBinary() - _ = yym3054 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kernelVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3055 := z.EncBinary() - _ = yym3055 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion)) - } - } - if yyr3043 || yy2arr3043 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3057 := z.EncBinary() - _ = yym3057 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.OSImage)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("osImage")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3058 := z.EncBinary() - _ = yym3058 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.OSImage)) - } - } - if yyr3043 || yy2arr3043 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3060 := z.EncBinary() - _ = yym3060 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerRuntimeVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3061 := z.EncBinary() - _ = yym3061 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion)) - } - } - if yyr3043 || yy2arr3043 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3063 := z.EncBinary() - _ = yym3063 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeletVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3064 := z.EncBinary() - _ = yym3064 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion)) - } - } - if yyr3043 || yy2arr3043 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3066 := z.EncBinary() - _ = yym3066 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeProxyVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3067 := z.EncBinary() - _ = yym3067 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion)) - } - } - if yyr3043 || yy2arr3043 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3069 := z.EncBinary() - _ = yym3069 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("operatingSystem")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3070 := z.EncBinary() - _ = yym3070 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) - } - } - if yyr3043 || yy2arr3043 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3072 := z.EncBinary() - _ = yym3072 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("architecture")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3073 := z.EncBinary() - _ = yym3073 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) - } - } - if yyr3043 || yy2arr3043 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSystemInfo) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3074 := z.DecBinary() - _ = yym3074 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3075 := r.ContainerType() - if yyct3075 == codecSelferValueTypeMap1234 { - yyl3075 := r.ReadMapStart() - if yyl3075 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3075, d) - } - } else if yyct3075 == codecSelferValueTypeArray1234 { - yyl3075 := r.ReadArrayStart() - if yyl3075 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3075, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSystemInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3076Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3076Slc - var yyhl3076 bool = l >= 0 - for yyj3076 := 0; ; yyj3076++ { - if yyhl3076 { - if yyj3076 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3076Slc = r.DecodeBytes(yys3076Slc, true, true) - yys3076 := string(yys3076Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3076 { - case "machineID": - if r.TryDecodeAsNil() { - x.MachineID = "" - } else { - x.MachineID = string(r.DecodeString()) - } - case "systemUUID": - if r.TryDecodeAsNil() { - x.SystemUUID = "" - } else { - x.SystemUUID = string(r.DecodeString()) - } - case "bootID": - if r.TryDecodeAsNil() { - x.BootID = "" - } else { - x.BootID = string(r.DecodeString()) - } - case "kernelVersion": - if r.TryDecodeAsNil() { - x.KernelVersion = "" - } else { - x.KernelVersion = string(r.DecodeString()) - } - case "osImage": - if r.TryDecodeAsNil() { - x.OSImage = "" - } else { - x.OSImage = string(r.DecodeString()) - } - case "containerRuntimeVersion": - if r.TryDecodeAsNil() { - x.ContainerRuntimeVersion = "" - } else { - x.ContainerRuntimeVersion = string(r.DecodeString()) - } - case "kubeletVersion": - if r.TryDecodeAsNil() { - x.KubeletVersion = "" - } else { - x.KubeletVersion = string(r.DecodeString()) - } - case "kubeProxyVersion": - if r.TryDecodeAsNil() { - x.KubeProxyVersion = "" - } else { - x.KubeProxyVersion = string(r.DecodeString()) - } - case "operatingSystem": - if r.TryDecodeAsNil() { - x.OperatingSystem = "" - } else { - x.OperatingSystem = string(r.DecodeString()) - } - case "architecture": - if r.TryDecodeAsNil() { - x.Architecture = "" - } else { - x.Architecture = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3076) - } // end switch yys3076 - } // end for yyj3076 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3087 int - var yyb3087 bool - var yyhl3087 bool = l >= 0 - yyj3087++ - if yyhl3087 { - yyb3087 = yyj3087 > l - } else { - yyb3087 = r.CheckBreak() - } - if yyb3087 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MachineID = "" - } else { - x.MachineID = string(r.DecodeString()) - } - yyj3087++ - if yyhl3087 { - yyb3087 = yyj3087 > l - } else { - yyb3087 = r.CheckBreak() - } - if yyb3087 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SystemUUID = "" - } else { - x.SystemUUID = string(r.DecodeString()) - } - yyj3087++ - if yyhl3087 { - yyb3087 = yyj3087 > l - } else { - yyb3087 = r.CheckBreak() - } - if yyb3087 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.BootID = "" - } else { - x.BootID = string(r.DecodeString()) - } - yyj3087++ - if yyhl3087 { - yyb3087 = yyj3087 > l - } else { - yyb3087 = r.CheckBreak() - } - if yyb3087 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KernelVersion = "" - } else { - x.KernelVersion = string(r.DecodeString()) - } - yyj3087++ - if yyhl3087 { - yyb3087 = yyj3087 > l - } else { - yyb3087 = r.CheckBreak() - } - if yyb3087 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OSImage = "" - } else { - x.OSImage = string(r.DecodeString()) - } - yyj3087++ - if yyhl3087 { - yyb3087 = yyj3087 > l - } else { - yyb3087 = r.CheckBreak() - } - if yyb3087 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerRuntimeVersion = "" - } else { - x.ContainerRuntimeVersion = string(r.DecodeString()) - } - yyj3087++ - if yyhl3087 { - yyb3087 = yyj3087 > l - } else { - yyb3087 = r.CheckBreak() - } - if yyb3087 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeletVersion = "" - } else { - x.KubeletVersion = string(r.DecodeString()) - } - yyj3087++ - if yyhl3087 { - yyb3087 = yyj3087 > l - } else { - yyb3087 = r.CheckBreak() - } - if yyb3087 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeProxyVersion = "" - } else { - x.KubeProxyVersion = string(r.DecodeString()) - } - yyj3087++ - if yyhl3087 { - yyb3087 = yyj3087 > l - } else { - yyb3087 = r.CheckBreak() - } - if yyb3087 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OperatingSystem = "" - } else { - x.OperatingSystem = string(r.DecodeString()) - } - yyj3087++ - if yyhl3087 { - yyb3087 = yyj3087 > l - } else { - yyb3087 = r.CheckBreak() - } - if yyb3087 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Architecture = "" - } else { - x.Architecture = string(r.DecodeString()) - } - for { - yyj3087++ - if yyhl3087 { - yyb3087 = yyj3087 > l - } else { - yyb3087 = r.CheckBreak() - } - if yyb3087 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3087-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3098 := z.EncBinary() - _ = yym3098 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3099 := !z.EncBinary() - yy2arr3099 := z.EncBasicHandle().StructToArray - var yyq3099 [10]bool - _, _, _ = yysep3099, yyq3099, yy2arr3099 - const yyr3099 bool = false - yyq3099[0] = len(x.Capacity) != 0 - yyq3099[1] = len(x.Allocatable) != 0 - yyq3099[2] = x.Phase != "" - yyq3099[3] = len(x.Conditions) != 0 - yyq3099[4] = len(x.Addresses) != 0 - yyq3099[5] = true - yyq3099[6] = true - yyq3099[7] = len(x.Images) != 0 - yyq3099[8] = len(x.VolumesInUse) != 0 - yyq3099[9] = len(x.VolumesAttached) != 0 - var yynn3099 int - if yyr3099 || yy2arr3099 { - r.EncodeArrayStart(10) - } else { - yynn3099 = 0 - for _, b := range yyq3099 { - if b { - yynn3099++ - } - } - r.EncodeMapStart(yynn3099) - yynn3099 = 0 - } - if yyr3099 || yy2arr3099 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3099[0] { - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq3099[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capacity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } - } - if yyr3099 || yy2arr3099 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3099[1] { - if x.Allocatable == nil { - r.EncodeNil() - } else { - x.Allocatable.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq3099[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("allocatable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Allocatable == nil { - r.EncodeNil() - } else { - x.Allocatable.CodecEncodeSelf(e) - } - } - } - if yyr3099 || yy2arr3099 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3099[2] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3099[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr3099 || yy2arr3099 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3099[3] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym3104 := z.EncBinary() - _ = yym3104 - if false { - } else { - h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq3099[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym3105 := z.EncBinary() - _ = yym3105 - if false { - } else { - h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e) - } - } - } - } - if yyr3099 || yy2arr3099 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3099[4] { - if x.Addresses == nil { - r.EncodeNil() - } else { - yym3107 := z.EncBinary() - _ = yym3107 - if false { - } else { - h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq3099[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("addresses")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Addresses == nil { - r.EncodeNil() - } else { - yym3108 := z.EncBinary() - _ = yym3108 - if false { - } else { - h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e) - } - } - } - } - if yyr3099 || yy2arr3099 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3099[5] { - yy3110 := &x.DaemonEndpoints - yy3110.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq3099[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("daemonEndpoints")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3111 := &x.DaemonEndpoints - yy3111.CodecEncodeSelf(e) - } - } - if yyr3099 || yy2arr3099 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3099[6] { - yy3113 := &x.NodeInfo - yy3113.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq3099[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeInfo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3114 := &x.NodeInfo - yy3114.CodecEncodeSelf(e) - } - } - if yyr3099 || yy2arr3099 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3099[7] { - if x.Images == nil { - r.EncodeNil() - } else { - yym3116 := z.EncBinary() - _ = yym3116 - if false { - } else { - h.encSliceContainerImage(([]ContainerImage)(x.Images), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq3099[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("images")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Images == nil { - r.EncodeNil() - } else { - yym3117 := z.EncBinary() - _ = yym3117 - if false { - } else { - h.encSliceContainerImage(([]ContainerImage)(x.Images), e) - } - } - } - } - if yyr3099 || yy2arr3099 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3099[8] { - if x.VolumesInUse == nil { - r.EncodeNil() - } else { - yym3119 := z.EncBinary() - _ = yym3119 - if false { - } else { - h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq3099[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumesInUse")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VolumesInUse == nil { - r.EncodeNil() - } else { - yym3120 := z.EncBinary() - _ = yym3120 - if false { - } else { - h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e) - } - } - } - } - if yyr3099 || yy2arr3099 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3099[9] { - if x.VolumesAttached == nil { - r.EncodeNil() - } else { - yym3122 := z.EncBinary() - _ = yym3122 - if false { - } else { - h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq3099[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumesAttached")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VolumesAttached == nil { - r.EncodeNil() - } else { - yym3123 := z.EncBinary() - _ = yym3123 - if false { - } else { - h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e) - } - } - } - } - if yyr3099 || yy2arr3099 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3124 := z.DecBinary() - _ = yym3124 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3125 := r.ContainerType() - if yyct3125 == codecSelferValueTypeMap1234 { - yyl3125 := r.ReadMapStart() - if yyl3125 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3125, d) - } - } else if yyct3125 == codecSelferValueTypeArray1234 { - yyl3125 := r.ReadArrayStart() - if yyl3125 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3125, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3126Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3126Slc - var yyhl3126 bool = l >= 0 - for yyj3126 := 0; ; yyj3126++ { - if yyhl3126 { - if yyj3126 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3126Slc = r.DecodeBytes(yys3126Slc, true, true) - yys3126 := string(yys3126Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3126 { - case "capacity": - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv3127 := &x.Capacity - yyv3127.CodecDecodeSelf(d) - } - case "allocatable": - if r.TryDecodeAsNil() { - x.Allocatable = nil - } else { - yyv3128 := &x.Allocatable - yyv3128.CodecDecodeSelf(d) - } - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = NodePhase(r.DecodeString()) - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv3130 := &x.Conditions - yym3131 := z.DecBinary() - _ = yym3131 - if false { - } else { - h.decSliceNodeCondition((*[]NodeCondition)(yyv3130), d) - } - } - case "addresses": - if r.TryDecodeAsNil() { - x.Addresses = nil - } else { - yyv3132 := &x.Addresses - yym3133 := z.DecBinary() - _ = yym3133 - if false { - } else { - h.decSliceNodeAddress((*[]NodeAddress)(yyv3132), d) - } - } - case "daemonEndpoints": - if r.TryDecodeAsNil() { - x.DaemonEndpoints = NodeDaemonEndpoints{} - } else { - yyv3134 := &x.DaemonEndpoints - yyv3134.CodecDecodeSelf(d) - } - case "nodeInfo": - if r.TryDecodeAsNil() { - x.NodeInfo = NodeSystemInfo{} - } else { - yyv3135 := &x.NodeInfo - yyv3135.CodecDecodeSelf(d) - } - case "images": - if r.TryDecodeAsNil() { - x.Images = nil - } else { - yyv3136 := &x.Images - yym3137 := z.DecBinary() - _ = yym3137 - if false { - } else { - h.decSliceContainerImage((*[]ContainerImage)(yyv3136), d) - } - } - case "volumesInUse": - if r.TryDecodeAsNil() { - x.VolumesInUse = nil - } else { - yyv3138 := &x.VolumesInUse - yym3139 := z.DecBinary() - _ = yym3139 - if false { - } else { - h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv3138), d) - } - } - case "volumesAttached": - if r.TryDecodeAsNil() { - x.VolumesAttached = nil - } else { - yyv3140 := &x.VolumesAttached - yym3141 := z.DecBinary() - _ = yym3141 - if false { - } else { - h.decSliceAttachedVolume((*[]AttachedVolume)(yyv3140), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3126) - } // end switch yys3126 - } // end for yyj3126 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3142 int - var yyb3142 bool - var yyhl3142 bool = l >= 0 - yyj3142++ - if yyhl3142 { - yyb3142 = yyj3142 > l - } else { - yyb3142 = r.CheckBreak() - } - if yyb3142 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv3143 := &x.Capacity - yyv3143.CodecDecodeSelf(d) - } - yyj3142++ - if yyhl3142 { - yyb3142 = yyj3142 > l - } else { - yyb3142 = r.CheckBreak() - } - if yyb3142 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Allocatable = nil - } else { - yyv3144 := &x.Allocatable - yyv3144.CodecDecodeSelf(d) - } - yyj3142++ - if yyhl3142 { - yyb3142 = yyj3142 > l - } else { - yyb3142 = r.CheckBreak() - } - if yyb3142 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = NodePhase(r.DecodeString()) - } - yyj3142++ - if yyhl3142 { - yyb3142 = yyj3142 > l - } else { - yyb3142 = r.CheckBreak() - } - if yyb3142 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv3146 := &x.Conditions - yym3147 := z.DecBinary() - _ = yym3147 - if false { - } else { - h.decSliceNodeCondition((*[]NodeCondition)(yyv3146), d) - } - } - yyj3142++ - if yyhl3142 { - yyb3142 = yyj3142 > l - } else { - yyb3142 = r.CheckBreak() - } - if yyb3142 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Addresses = nil - } else { - yyv3148 := &x.Addresses - yym3149 := z.DecBinary() - _ = yym3149 - if false { - } else { - h.decSliceNodeAddress((*[]NodeAddress)(yyv3148), d) - } - } - yyj3142++ - if yyhl3142 { - yyb3142 = yyj3142 > l - } else { - yyb3142 = r.CheckBreak() - } - if yyb3142 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DaemonEndpoints = NodeDaemonEndpoints{} - } else { - yyv3150 := &x.DaemonEndpoints - yyv3150.CodecDecodeSelf(d) - } - yyj3142++ - if yyhl3142 { - yyb3142 = yyj3142 > l - } else { - yyb3142 = r.CheckBreak() - } - if yyb3142 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeInfo = NodeSystemInfo{} - } else { - yyv3151 := &x.NodeInfo - yyv3151.CodecDecodeSelf(d) - } - yyj3142++ - if yyhl3142 { - yyb3142 = yyj3142 > l - } else { - yyb3142 = r.CheckBreak() - } - if yyb3142 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Images = nil - } else { - yyv3152 := &x.Images - yym3153 := z.DecBinary() - _ = yym3153 - if false { - } else { - h.decSliceContainerImage((*[]ContainerImage)(yyv3152), d) - } - } - yyj3142++ - if yyhl3142 { - yyb3142 = yyj3142 > l - } else { - yyb3142 = r.CheckBreak() - } - if yyb3142 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumesInUse = nil - } else { - yyv3154 := &x.VolumesInUse - yym3155 := z.DecBinary() - _ = yym3155 - if false { - } else { - h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv3154), d) - } - } - yyj3142++ - if yyhl3142 { - yyb3142 = yyj3142 > l - } else { - yyb3142 = r.CheckBreak() - } - if yyb3142 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumesAttached = nil - } else { - yyv3156 := &x.VolumesAttached - yym3157 := z.DecBinary() - _ = yym3157 - if false { - } else { - h.decSliceAttachedVolume((*[]AttachedVolume)(yyv3156), d) - } - } - for { - yyj3142++ - if yyhl3142 { - yyb3142 = yyj3142 > l - } else { - yyb3142 = r.CheckBreak() - } - if yyb3142 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3142-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x UniqueVolumeName) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym3158 := z.EncBinary() - _ = yym3158 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *UniqueVolumeName) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3159 := z.DecBinary() - _ = yym3159 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *AttachedVolume) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3160 := z.EncBinary() - _ = yym3160 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3161 := !z.EncBinary() - yy2arr3161 := z.EncBasicHandle().StructToArray - var yyq3161 [2]bool - _, _, _ = yysep3161, yyq3161, yy2arr3161 - const yyr3161 bool = false - var yynn3161 int - if yyr3161 || yy2arr3161 { - r.EncodeArrayStart(2) - } else { - yynn3161 = 2 - for _, b := range yyq3161 { - if b { - yynn3161++ - } - } - r.EncodeMapStart(yynn3161) - yynn3161 = 0 - } - if yyr3161 || yy2arr3161 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Name.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Name.CodecEncodeSelf(e) - } - if yyr3161 || yy2arr3161 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3164 := z.EncBinary() - _ = yym3164 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("devicePath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3165 := z.EncBinary() - _ = yym3165 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath)) - } - } - if yyr3161 || yy2arr3161 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *AttachedVolume) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3166 := z.DecBinary() - _ = yym3166 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3167 := r.ContainerType() - if yyct3167 == codecSelferValueTypeMap1234 { - yyl3167 := r.ReadMapStart() - if yyl3167 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3167, d) - } - } else if yyct3167 == codecSelferValueTypeArray1234 { - yyl3167 := r.ReadArrayStart() - if yyl3167 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3167, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *AttachedVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3168Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3168Slc - var yyhl3168 bool = l >= 0 - for yyj3168 := 0; ; yyj3168++ { - if yyhl3168 { - if yyj3168 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3168Slc = r.DecodeBytes(yys3168Slc, true, true) - yys3168 := string(yys3168Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3168 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = UniqueVolumeName(r.DecodeString()) - } - case "devicePath": - if r.TryDecodeAsNil() { - x.DevicePath = "" - } else { - x.DevicePath = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3168) - } // end switch yys3168 - } // end for yyj3168 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *AttachedVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3171 int - var yyb3171 bool - var yyhl3171 bool = l >= 0 - yyj3171++ - if yyhl3171 { - yyb3171 = yyj3171 > l - } else { - yyb3171 = r.CheckBreak() - } - if yyb3171 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = UniqueVolumeName(r.DecodeString()) - } - yyj3171++ - if yyhl3171 { - yyb3171 = yyj3171 > l - } else { - yyb3171 = r.CheckBreak() - } - if yyb3171 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DevicePath = "" - } else { - x.DevicePath = string(r.DecodeString()) - } - for { - yyj3171++ - if yyhl3171 { - yyb3171 = yyj3171 > l - } else { - yyb3171 = r.CheckBreak() - } - if yyb3171 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3171-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *AvoidPods) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3174 := z.EncBinary() - _ = yym3174 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3175 := !z.EncBinary() - yy2arr3175 := z.EncBasicHandle().StructToArray - var yyq3175 [1]bool - _, _, _ = yysep3175, yyq3175, yy2arr3175 - const yyr3175 bool = false - yyq3175[0] = len(x.PreferAvoidPods) != 0 - var yynn3175 int - if yyr3175 || yy2arr3175 { - r.EncodeArrayStart(1) - } else { - yynn3175 = 0 - for _, b := range yyq3175 { - if b { - yynn3175++ - } - } - r.EncodeMapStart(yynn3175) - yynn3175 = 0 - } - if yyr3175 || yy2arr3175 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3175[0] { - if x.PreferAvoidPods == nil { - r.EncodeNil() - } else { - yym3177 := z.EncBinary() - _ = yym3177 - if false { - } else { - h.encSlicePreferAvoidPodsEntry(([]PreferAvoidPodsEntry)(x.PreferAvoidPods), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq3175[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preferAvoidPods")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PreferAvoidPods == nil { - r.EncodeNil() - } else { - yym3178 := z.EncBinary() - _ = yym3178 - if false { - } else { - h.encSlicePreferAvoidPodsEntry(([]PreferAvoidPodsEntry)(x.PreferAvoidPods), e) - } - } - } - } - if yyr3175 || yy2arr3175 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *AvoidPods) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3179 := z.DecBinary() - _ = yym3179 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3180 := r.ContainerType() - if yyct3180 == codecSelferValueTypeMap1234 { - yyl3180 := r.ReadMapStart() - if yyl3180 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3180, d) - } - } else if yyct3180 == codecSelferValueTypeArray1234 { - yyl3180 := r.ReadArrayStart() - if yyl3180 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3180, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *AvoidPods) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3181Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3181Slc - var yyhl3181 bool = l >= 0 - for yyj3181 := 0; ; yyj3181++ { - if yyhl3181 { - if yyj3181 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3181Slc = r.DecodeBytes(yys3181Slc, true, true) - yys3181 := string(yys3181Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3181 { - case "preferAvoidPods": - if r.TryDecodeAsNil() { - x.PreferAvoidPods = nil - } else { - yyv3182 := &x.PreferAvoidPods - yym3183 := z.DecBinary() - _ = yym3183 - if false { - } else { - h.decSlicePreferAvoidPodsEntry((*[]PreferAvoidPodsEntry)(yyv3182), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3181) - } // end switch yys3181 - } // end for yyj3181 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *AvoidPods) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3184 int - var yyb3184 bool - var yyhl3184 bool = l >= 0 - yyj3184++ - if yyhl3184 { - yyb3184 = yyj3184 > l - } else { - yyb3184 = r.CheckBreak() - } - if yyb3184 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PreferAvoidPods = nil - } else { - yyv3185 := &x.PreferAvoidPods - yym3186 := z.DecBinary() - _ = yym3186 - if false { - } else { - h.decSlicePreferAvoidPodsEntry((*[]PreferAvoidPodsEntry)(yyv3185), d) - } - } - for { - yyj3184++ - if yyhl3184 { - yyb3184 = yyj3184 > l - } else { - yyb3184 = r.CheckBreak() - } - if yyb3184 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3184-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PreferAvoidPodsEntry) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3187 := z.EncBinary() - _ = yym3187 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3188 := !z.EncBinary() - yy2arr3188 := z.EncBasicHandle().StructToArray - var yyq3188 [4]bool - _, _, _ = yysep3188, yyq3188, yy2arr3188 - const yyr3188 bool = false - yyq3188[1] = true - yyq3188[2] = x.Reason != "" - yyq3188[3] = x.Message != "" - var yynn3188 int - if yyr3188 || yy2arr3188 { - r.EncodeArrayStart(4) - } else { - yynn3188 = 1 - for _, b := range yyq3188 { - if b { - yynn3188++ - } - } - r.EncodeMapStart(yynn3188) - yynn3188 = 0 - } - if yyr3188 || yy2arr3188 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy3190 := &x.PodSignature - yy3190.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podSignature")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3191 := &x.PodSignature - yy3191.CodecEncodeSelf(e) - } - if yyr3188 || yy2arr3188 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3188[1] { - yy3193 := &x.EvictionTime - yym3194 := z.EncBinary() - _ = yym3194 - if false { - } else if z.HasExtensions() && z.EncExt(yy3193) { - } else if yym3194 { - z.EncBinaryMarshal(yy3193) - } else if !yym3194 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3193) - } else { - z.EncFallback(yy3193) - } - } else { - r.EncodeNil() - } - } else { - if yyq3188[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("evictionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3195 := &x.EvictionTime - yym3196 := z.EncBinary() - _ = yym3196 - if false { - } else if z.HasExtensions() && z.EncExt(yy3195) { - } else if yym3196 { - z.EncBinaryMarshal(yy3195) - } else if !yym3196 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3195) - } else { - z.EncFallback(yy3195) - } - } - } - if yyr3188 || yy2arr3188 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3188[2] { - yym3198 := z.EncBinary() - _ = yym3198 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3188[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3199 := z.EncBinary() - _ = yym3199 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr3188 || yy2arr3188 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3188[3] { - yym3201 := z.EncBinary() - _ = yym3201 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3188[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3202 := z.EncBinary() - _ = yym3202 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr3188 || yy2arr3188 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PreferAvoidPodsEntry) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3203 := z.DecBinary() - _ = yym3203 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3204 := r.ContainerType() - if yyct3204 == codecSelferValueTypeMap1234 { - yyl3204 := r.ReadMapStart() - if yyl3204 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3204, d) - } - } else if yyct3204 == codecSelferValueTypeArray1234 { - yyl3204 := r.ReadArrayStart() - if yyl3204 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3204, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PreferAvoidPodsEntry) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3205Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3205Slc - var yyhl3205 bool = l >= 0 - for yyj3205 := 0; ; yyj3205++ { - if yyhl3205 { - if yyj3205 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3205Slc = r.DecodeBytes(yys3205Slc, true, true) - yys3205 := string(yys3205Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3205 { - case "podSignature": - if r.TryDecodeAsNil() { - x.PodSignature = PodSignature{} - } else { - yyv3206 := &x.PodSignature - yyv3206.CodecDecodeSelf(d) - } - case "evictionTime": - if r.TryDecodeAsNil() { - x.EvictionTime = pkg2_unversioned.Time{} - } else { - yyv3207 := &x.EvictionTime - yym3208 := z.DecBinary() - _ = yym3208 - if false { - } else if z.HasExtensions() && z.DecExt(yyv3207) { - } else if yym3208 { - z.DecBinaryUnmarshal(yyv3207) - } else if !yym3208 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv3207) - } else { - z.DecFallback(yyv3207, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3205) - } // end switch yys3205 - } // end for yyj3205 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PreferAvoidPodsEntry) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3211 int - var yyb3211 bool - var yyhl3211 bool = l >= 0 - yyj3211++ - if yyhl3211 { - yyb3211 = yyj3211 > l - } else { - yyb3211 = r.CheckBreak() - } - if yyb3211 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodSignature = PodSignature{} - } else { - yyv3212 := &x.PodSignature - yyv3212.CodecDecodeSelf(d) - } - yyj3211++ - if yyhl3211 { - yyb3211 = yyj3211 > l - } else { - yyb3211 = r.CheckBreak() - } - if yyb3211 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EvictionTime = pkg2_unversioned.Time{} - } else { - yyv3213 := &x.EvictionTime - yym3214 := z.DecBinary() - _ = yym3214 - if false { - } else if z.HasExtensions() && z.DecExt(yyv3213) { - } else if yym3214 { - z.DecBinaryUnmarshal(yyv3213) - } else if !yym3214 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv3213) - } else { - z.DecFallback(yyv3213, false) - } - } - yyj3211++ - if yyhl3211 { - yyb3211 = yyj3211 > l - } else { - yyb3211 = r.CheckBreak() - } - if yyb3211 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj3211++ - if yyhl3211 { - yyb3211 = yyj3211 > l - } else { - yyb3211 = r.CheckBreak() - } - if yyb3211 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj3211++ - if yyhl3211 { - yyb3211 = yyj3211 > l - } else { - yyb3211 = r.CheckBreak() - } - if yyb3211 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3211-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodSignature) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3217 := z.EncBinary() - _ = yym3217 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3218 := !z.EncBinary() - yy2arr3218 := z.EncBasicHandle().StructToArray - var yyq3218 [1]bool - _, _, _ = yysep3218, yyq3218, yy2arr3218 - const yyr3218 bool = false - yyq3218[0] = x.PodController != nil - var yynn3218 int - if yyr3218 || yy2arr3218 { - r.EncodeArrayStart(1) - } else { - yynn3218 = 0 - for _, b := range yyq3218 { - if b { - yynn3218++ - } - } - r.EncodeMapStart(yynn3218) - yynn3218 = 0 - } - if yyr3218 || yy2arr3218 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3218[0] { - if x.PodController == nil { - r.EncodeNil() - } else { - x.PodController.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq3218[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podController")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PodController == nil { - r.EncodeNil() - } else { - x.PodController.CodecEncodeSelf(e) - } - } - } - if yyr3218 || yy2arr3218 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSignature) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3220 := z.DecBinary() - _ = yym3220 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3221 := r.ContainerType() - if yyct3221 == codecSelferValueTypeMap1234 { - yyl3221 := r.ReadMapStart() - if yyl3221 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3221, d) - } - } else if yyct3221 == codecSelferValueTypeArray1234 { - yyl3221 := r.ReadArrayStart() - if yyl3221 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3221, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSignature) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3222Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3222Slc - var yyhl3222 bool = l >= 0 - for yyj3222 := 0; ; yyj3222++ { - if yyhl3222 { - if yyj3222 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3222Slc = r.DecodeBytes(yys3222Slc, true, true) - yys3222 := string(yys3222Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3222 { - case "podController": - if r.TryDecodeAsNil() { - if x.PodController != nil { - x.PodController = nil - } - } else { - if x.PodController == nil { - x.PodController = new(OwnerReference) - } - x.PodController.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3222) - } // end switch yys3222 - } // end for yyj3222 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSignature) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3224 int - var yyb3224 bool - var yyhl3224 bool = l >= 0 - yyj3224++ - if yyhl3224 { - yyb3224 = yyj3224 > l - } else { - yyb3224 = r.CheckBreak() - } - if yyb3224 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PodController != nil { - x.PodController = nil - } - } else { - if x.PodController == nil { - x.PodController = new(OwnerReference) - } - x.PodController.CodecDecodeSelf(d) - } - for { - yyj3224++ - if yyhl3224 { - yyb3224 = yyj3224 > l - } else { - yyb3224 = r.CheckBreak() - } - if yyb3224 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3224-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerImage) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3226 := z.EncBinary() - _ = yym3226 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3227 := !z.EncBinary() - yy2arr3227 := z.EncBasicHandle().StructToArray - var yyq3227 [2]bool - _, _, _ = yysep3227, yyq3227, yy2arr3227 - const yyr3227 bool = false - yyq3227[1] = x.SizeBytes != 0 - var yynn3227 int - if yyr3227 || yy2arr3227 { - r.EncodeArrayStart(2) - } else { - yynn3227 = 1 - for _, b := range yyq3227 { - if b { - yynn3227++ - } - } - r.EncodeMapStart(yynn3227) - yynn3227 = 0 - } - if yyr3227 || yy2arr3227 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Names == nil { - r.EncodeNil() - } else { - yym3229 := z.EncBinary() - _ = yym3229 - if false { - } else { - z.F.EncSliceStringV(x.Names, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("names")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Names == nil { - r.EncodeNil() - } else { - yym3230 := z.EncBinary() - _ = yym3230 - if false { - } else { - z.F.EncSliceStringV(x.Names, false, e) - } - } - } - if yyr3227 || yy2arr3227 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3227[1] { - yym3232 := z.EncBinary() - _ = yym3232 - if false { - } else { - r.EncodeInt(int64(x.SizeBytes)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq3227[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("sizeBytes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3233 := z.EncBinary() - _ = yym3233 - if false { - } else { - r.EncodeInt(int64(x.SizeBytes)) - } - } - } - if yyr3227 || yy2arr3227 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerImage) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3234 := z.DecBinary() - _ = yym3234 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3235 := r.ContainerType() - if yyct3235 == codecSelferValueTypeMap1234 { - yyl3235 := r.ReadMapStart() - if yyl3235 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3235, d) - } - } else if yyct3235 == codecSelferValueTypeArray1234 { - yyl3235 := r.ReadArrayStart() - if yyl3235 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3235, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerImage) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3236Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3236Slc - var yyhl3236 bool = l >= 0 - for yyj3236 := 0; ; yyj3236++ { - if yyhl3236 { - if yyj3236 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3236Slc = r.DecodeBytes(yys3236Slc, true, true) - yys3236 := string(yys3236Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3236 { - case "names": - if r.TryDecodeAsNil() { - x.Names = nil - } else { - yyv3237 := &x.Names - yym3238 := z.DecBinary() - _ = yym3238 - if false { - } else { - z.F.DecSliceStringX(yyv3237, false, d) - } - } - case "sizeBytes": - if r.TryDecodeAsNil() { - x.SizeBytes = 0 - } else { - x.SizeBytes = int64(r.DecodeInt(64)) - } - default: - z.DecStructFieldNotFound(-1, yys3236) - } // end switch yys3236 - } // end for yyj3236 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerImage) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3240 int - var yyb3240 bool - var yyhl3240 bool = l >= 0 - yyj3240++ - if yyhl3240 { - yyb3240 = yyj3240 > l - } else { - yyb3240 = r.CheckBreak() - } - if yyb3240 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Names = nil - } else { - yyv3241 := &x.Names - yym3242 := z.DecBinary() - _ = yym3242 - if false { - } else { - z.F.DecSliceStringX(yyv3241, false, d) - } - } - yyj3240++ - if yyhl3240 { - yyb3240 = yyj3240 > l - } else { - yyb3240 = r.CheckBreak() - } - if yyb3240 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SizeBytes = 0 - } else { - x.SizeBytes = int64(r.DecodeInt(64)) - } - for { - yyj3240++ - if yyhl3240 { - yyb3240 = yyj3240 > l - } else { - yyb3240 = r.CheckBreak() - } - if yyb3240 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3240-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x NodePhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym3244 := z.EncBinary() - _ = yym3244 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NodePhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3245 := z.DecBinary() - _ = yym3245 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x NodeConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym3246 := z.EncBinary() - _ = yym3246 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NodeConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3247 := z.DecBinary() - _ = yym3247 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *NodeCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3248 := z.EncBinary() - _ = yym3248 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3249 := !z.EncBinary() - yy2arr3249 := z.EncBasicHandle().StructToArray - var yyq3249 [6]bool - _, _, _ = yysep3249, yyq3249, yy2arr3249 - const yyr3249 bool = false - yyq3249[2] = true - yyq3249[3] = true - yyq3249[4] = x.Reason != "" - yyq3249[5] = x.Message != "" - var yynn3249 int - if yyr3249 || yy2arr3249 { - r.EncodeArrayStart(6) - } else { - yynn3249 = 2 - for _, b := range yyq3249 { - if b { - yynn3249++ - } - } - r.EncodeMapStart(yynn3249) - yynn3249 = 0 - } - if yyr3249 || yy2arr3249 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr3249 || yy2arr3249 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Status.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Status.CodecEncodeSelf(e) - } - if yyr3249 || yy2arr3249 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3249[2] { - yy3253 := &x.LastHeartbeatTime - yym3254 := z.EncBinary() - _ = yym3254 - if false { - } else if z.HasExtensions() && z.EncExt(yy3253) { - } else if yym3254 { - z.EncBinaryMarshal(yy3253) - } else if !yym3254 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3253) - } else { - z.EncFallback(yy3253) - } - } else { - r.EncodeNil() - } - } else { - if yyq3249[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastHeartbeatTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3255 := &x.LastHeartbeatTime - yym3256 := z.EncBinary() - _ = yym3256 - if false { - } else if z.HasExtensions() && z.EncExt(yy3255) { - } else if yym3256 { - z.EncBinaryMarshal(yy3255) - } else if !yym3256 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3255) - } else { - z.EncFallback(yy3255) - } - } - } - if yyr3249 || yy2arr3249 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3249[3] { - yy3258 := &x.LastTransitionTime - yym3259 := z.EncBinary() - _ = yym3259 - if false { - } else if z.HasExtensions() && z.EncExt(yy3258) { - } else if yym3259 { - z.EncBinaryMarshal(yy3258) - } else if !yym3259 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3258) - } else { - z.EncFallback(yy3258) - } - } else { - r.EncodeNil() - } - } else { - if yyq3249[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3260 := &x.LastTransitionTime - yym3261 := z.EncBinary() - _ = yym3261 - if false { - } else if z.HasExtensions() && z.EncExt(yy3260) { - } else if yym3261 { - z.EncBinaryMarshal(yy3260) - } else if !yym3261 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3260) - } else { - z.EncFallback(yy3260) - } - } - } - if yyr3249 || yy2arr3249 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3249[4] { - yym3263 := z.EncBinary() - _ = yym3263 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3249[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3264 := z.EncBinary() - _ = yym3264 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr3249 || yy2arr3249 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3249[5] { - yym3266 := z.EncBinary() - _ = yym3266 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3249[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3267 := z.EncBinary() - _ = yym3267 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr3249 || yy2arr3249 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3268 := z.DecBinary() - _ = yym3268 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3269 := r.ContainerType() - if yyct3269 == codecSelferValueTypeMap1234 { - yyl3269 := r.ReadMapStart() - if yyl3269 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3269, d) - } - } else if yyct3269 == codecSelferValueTypeArray1234 { - yyl3269 := r.ReadArrayStart() - if yyl3269 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3269, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3270Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3270Slc - var yyhl3270 bool = l >= 0 - for yyj3270 := 0; ; yyj3270++ { - if yyhl3270 { - if yyj3270 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3270Slc = r.DecodeBytes(yys3270Slc, true, true) - yys3270 := string(yys3270Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3270 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = NodeConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - case "lastHeartbeatTime": - if r.TryDecodeAsNil() { - x.LastHeartbeatTime = pkg2_unversioned.Time{} - } else { - yyv3273 := &x.LastHeartbeatTime - yym3274 := z.DecBinary() - _ = yym3274 - if false { - } else if z.HasExtensions() && z.DecExt(yyv3273) { - } else if yym3274 { - z.DecBinaryUnmarshal(yyv3273) - } else if !yym3274 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv3273) - } else { - z.DecFallback(yyv3273, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv3275 := &x.LastTransitionTime - yym3276 := z.DecBinary() - _ = yym3276 - if false { - } else if z.HasExtensions() && z.DecExt(yyv3275) { - } else if yym3276 { - z.DecBinaryUnmarshal(yyv3275) - } else if !yym3276 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv3275) - } else { - z.DecFallback(yyv3275, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3270) - } // end switch yys3270 - } // end for yyj3270 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3279 int - var yyb3279 bool - var yyhl3279 bool = l >= 0 - yyj3279++ - if yyhl3279 { - yyb3279 = yyj3279 > l - } else { - yyb3279 = r.CheckBreak() - } - if yyb3279 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = NodeConditionType(r.DecodeString()) - } - yyj3279++ - if yyhl3279 { - yyb3279 = yyj3279 > l - } else { - yyb3279 = r.CheckBreak() - } - if yyb3279 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - yyj3279++ - if yyhl3279 { - yyb3279 = yyj3279 > l - } else { - yyb3279 = r.CheckBreak() - } - if yyb3279 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastHeartbeatTime = pkg2_unversioned.Time{} - } else { - yyv3282 := &x.LastHeartbeatTime - yym3283 := z.DecBinary() - _ = yym3283 - if false { - } else if z.HasExtensions() && z.DecExt(yyv3282) { - } else if yym3283 { - z.DecBinaryUnmarshal(yyv3282) - } else if !yym3283 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv3282) - } else { - z.DecFallback(yyv3282, false) - } - } - yyj3279++ - if yyhl3279 { - yyb3279 = yyj3279 > l - } else { - yyb3279 = r.CheckBreak() - } - if yyb3279 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv3284 := &x.LastTransitionTime - yym3285 := z.DecBinary() - _ = yym3285 - if false { - } else if z.HasExtensions() && z.DecExt(yyv3284) { - } else if yym3285 { - z.DecBinaryUnmarshal(yyv3284) - } else if !yym3285 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv3284) - } else { - z.DecFallback(yyv3284, false) - } - } - yyj3279++ - if yyhl3279 { - yyb3279 = yyj3279 > l - } else { - yyb3279 = r.CheckBreak() - } - if yyb3279 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj3279++ - if yyhl3279 { - yyb3279 = yyj3279 > l - } else { - yyb3279 = r.CheckBreak() - } - if yyb3279 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj3279++ - if yyhl3279 { - yyb3279 = yyj3279 > l - } else { - yyb3279 = r.CheckBreak() - } - if yyb3279 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3279-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x NodeAddressType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym3288 := z.EncBinary() - _ = yym3288 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NodeAddressType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3289 := z.DecBinary() - _ = yym3289 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *NodeAddress) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3290 := z.EncBinary() - _ = yym3290 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3291 := !z.EncBinary() - yy2arr3291 := z.EncBasicHandle().StructToArray - var yyq3291 [2]bool - _, _, _ = yysep3291, yyq3291, yy2arr3291 - const yyr3291 bool = false - var yynn3291 int - if yyr3291 || yy2arr3291 { - r.EncodeArrayStart(2) - } else { - yynn3291 = 2 - for _, b := range yyq3291 { - if b { - yynn3291++ - } - } - r.EncodeMapStart(yynn3291) - yynn3291 = 0 - } - if yyr3291 || yy2arr3291 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr3291 || yy2arr3291 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3294 := z.EncBinary() - _ = yym3294 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("address")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3295 := z.EncBinary() - _ = yym3295 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } - if yyr3291 || yy2arr3291 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeAddress) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3296 := z.DecBinary() - _ = yym3296 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3297 := r.ContainerType() - if yyct3297 == codecSelferValueTypeMap1234 { - yyl3297 := r.ReadMapStart() - if yyl3297 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3297, d) - } - } else if yyct3297 == codecSelferValueTypeArray1234 { - yyl3297 := r.ReadArrayStart() - if yyl3297 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3297, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3298Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3298Slc - var yyhl3298 bool = l >= 0 - for yyj3298 := 0; ; yyj3298++ { - if yyhl3298 { - if yyj3298 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3298Slc = r.DecodeBytes(yys3298Slc, true, true) - yys3298 := string(yys3298Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3298 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = NodeAddressType(r.DecodeString()) - } - case "address": - if r.TryDecodeAsNil() { - x.Address = "" - } else { - x.Address = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3298) - } // end switch yys3298 - } // end for yyj3298 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3301 int - var yyb3301 bool - var yyhl3301 bool = l >= 0 - yyj3301++ - if yyhl3301 { - yyb3301 = yyj3301 > l - } else { - yyb3301 = r.CheckBreak() - } - if yyb3301 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = NodeAddressType(r.DecodeString()) - } - yyj3301++ - if yyhl3301 { - yyb3301 = yyj3301 > l - } else { - yyb3301 = r.CheckBreak() - } - if yyb3301 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Address = "" - } else { - x.Address = string(r.DecodeString()) - } - for { - yyj3301++ - if yyhl3301 { - yyb3301 = yyj3301 > l - } else { - yyb3301 = r.CheckBreak() - } - if yyb3301 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3301-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeResources) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3304 := z.EncBinary() - _ = yym3304 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3305 := !z.EncBinary() - yy2arr3305 := z.EncBasicHandle().StructToArray - var yyq3305 [1]bool - _, _, _ = yysep3305, yyq3305, yy2arr3305 - const yyr3305 bool = false - yyq3305[0] = len(x.Capacity) != 0 - var yynn3305 int - if yyr3305 || yy2arr3305 { - r.EncodeArrayStart(1) - } else { - yynn3305 = 0 - for _, b := range yyq3305 { - if b { - yynn3305++ - } - } - r.EncodeMapStart(yynn3305) - yynn3305 = 0 - } - if yyr3305 || yy2arr3305 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3305[0] { - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq3305[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capacity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } - } - if yyr3305 || yy2arr3305 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeResources) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3307 := z.DecBinary() - _ = yym3307 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3308 := r.ContainerType() - if yyct3308 == codecSelferValueTypeMap1234 { - yyl3308 := r.ReadMapStart() - if yyl3308 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3308, d) - } - } else if yyct3308 == codecSelferValueTypeArray1234 { - yyl3308 := r.ReadArrayStart() - if yyl3308 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3308, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeResources) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3309Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3309Slc - var yyhl3309 bool = l >= 0 - for yyj3309 := 0; ; yyj3309++ { - if yyhl3309 { - if yyj3309 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3309Slc = r.DecodeBytes(yys3309Slc, true, true) - yys3309 := string(yys3309Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3309 { - case "capacity": - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv3310 := &x.Capacity - yyv3310.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3309) - } // end switch yys3309 - } // end for yyj3309 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeResources) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3311 int - var yyb3311 bool - var yyhl3311 bool = l >= 0 - yyj3311++ - if yyhl3311 { - yyb3311 = yyj3311 > l - } else { - yyb3311 = r.CheckBreak() - } - if yyb3311 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv3312 := &x.Capacity - yyv3312.CodecDecodeSelf(d) - } - for { - yyj3311++ - if yyhl3311 { - yyb3311 = yyj3311 > l - } else { - yyb3311 = r.CheckBreak() - } - if yyb3311 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3311-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ResourceName) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym3313 := z.EncBinary() - _ = yym3313 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ResourceName) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3314 := z.DecBinary() - _ = yym3314 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x ResourceList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3315 := z.EncBinary() - _ = yym3315 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - h.encResourceList((ResourceList)(x), e) - } - } -} - -func (x *ResourceList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3316 := z.DecBinary() - _ = yym3316 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - h.decResourceList((*ResourceList)(x), d) - } -} - -func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3317 := z.EncBinary() - _ = yym3317 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3318 := !z.EncBinary() - yy2arr3318 := z.EncBasicHandle().StructToArray - var yyq3318 [5]bool - _, _, _ = yysep3318, yyq3318, yy2arr3318 - const yyr3318 bool = false - yyq3318[0] = x.Kind != "" - yyq3318[1] = x.APIVersion != "" - yyq3318[2] = true - yyq3318[3] = true - yyq3318[4] = true - var yynn3318 int - if yyr3318 || yy2arr3318 { - r.EncodeArrayStart(5) - } else { - yynn3318 = 0 - for _, b := range yyq3318 { - if b { - yynn3318++ - } - } - r.EncodeMapStart(yynn3318) - yynn3318 = 0 - } - if yyr3318 || yy2arr3318 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3318[0] { - yym3320 := z.EncBinary() - _ = yym3320 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3318[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3321 := z.EncBinary() - _ = yym3321 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr3318 || yy2arr3318 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3318[1] { - yym3323 := z.EncBinary() - _ = yym3323 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3318[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3324 := z.EncBinary() - _ = yym3324 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr3318 || yy2arr3318 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3318[2] { - yy3326 := &x.ObjectMeta - yy3326.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq3318[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3327 := &x.ObjectMeta - yy3327.CodecEncodeSelf(e) - } - } - if yyr3318 || yy2arr3318 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3318[3] { - yy3329 := &x.Spec - yy3329.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq3318[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3330 := &x.Spec - yy3330.CodecEncodeSelf(e) - } - } - if yyr3318 || yy2arr3318 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3318[4] { - yy3332 := &x.Status - yy3332.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq3318[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3333 := &x.Status - yy3333.CodecEncodeSelf(e) - } - } - if yyr3318 || yy2arr3318 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3334 := z.DecBinary() - _ = yym3334 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3335 := r.ContainerType() - if yyct3335 == codecSelferValueTypeMap1234 { - yyl3335 := r.ReadMapStart() - if yyl3335 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3335, d) - } - } else if yyct3335 == codecSelferValueTypeArray1234 { - yyl3335 := r.ReadArrayStart() - if yyl3335 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3335, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3336Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3336Slc - var yyhl3336 bool = l >= 0 - for yyj3336 := 0; ; yyj3336++ { - if yyhl3336 { - if yyj3336 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3336Slc = r.DecodeBytes(yys3336Slc, true, true) - yys3336 := string(yys3336Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3336 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv3339 := &x.ObjectMeta - yyv3339.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = NodeSpec{} - } else { - yyv3340 := &x.Spec - yyv3340.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = NodeStatus{} - } else { - yyv3341 := &x.Status - yyv3341.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3336) - } // end switch yys3336 - } // end for yyj3336 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3342 int - var yyb3342 bool - var yyhl3342 bool = l >= 0 - yyj3342++ - if yyhl3342 { - yyb3342 = yyj3342 > l - } else { - yyb3342 = r.CheckBreak() - } - if yyb3342 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj3342++ - if yyhl3342 { - yyb3342 = yyj3342 > l - } else { - yyb3342 = r.CheckBreak() - } - if yyb3342 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj3342++ - if yyhl3342 { - yyb3342 = yyj3342 > l - } else { - yyb3342 = r.CheckBreak() - } - if yyb3342 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv3345 := &x.ObjectMeta - yyv3345.CodecDecodeSelf(d) - } - yyj3342++ - if yyhl3342 { - yyb3342 = yyj3342 > l - } else { - yyb3342 = r.CheckBreak() - } - if yyb3342 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = NodeSpec{} - } else { - yyv3346 := &x.Spec - yyv3346.CodecDecodeSelf(d) - } - yyj3342++ - if yyhl3342 { - yyb3342 = yyj3342 > l - } else { - yyb3342 = r.CheckBreak() - } - if yyb3342 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = NodeStatus{} - } else { - yyv3347 := &x.Status - yyv3347.CodecDecodeSelf(d) - } - for { - yyj3342++ - if yyhl3342 { - yyb3342 = yyj3342 > l - } else { - yyb3342 = r.CheckBreak() - } - if yyb3342 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3342-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3348 := z.EncBinary() - _ = yym3348 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3349 := !z.EncBinary() - yy2arr3349 := z.EncBasicHandle().StructToArray - var yyq3349 [4]bool - _, _, _ = yysep3349, yyq3349, yy2arr3349 - const yyr3349 bool = false - yyq3349[0] = x.Kind != "" - yyq3349[1] = x.APIVersion != "" - yyq3349[2] = true - var yynn3349 int - if yyr3349 || yy2arr3349 { - r.EncodeArrayStart(4) - } else { - yynn3349 = 1 - for _, b := range yyq3349 { - if b { - yynn3349++ - } - } - r.EncodeMapStart(yynn3349) - yynn3349 = 0 - } - if yyr3349 || yy2arr3349 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3349[0] { - yym3351 := z.EncBinary() - _ = yym3351 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3349[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3352 := z.EncBinary() - _ = yym3352 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr3349 || yy2arr3349 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3349[1] { - yym3354 := z.EncBinary() - _ = yym3354 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3349[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3355 := z.EncBinary() - _ = yym3355 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr3349 || yy2arr3349 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3349[2] { - yy3357 := &x.ListMeta - yym3358 := z.EncBinary() - _ = yym3358 - if false { - } else if z.HasExtensions() && z.EncExt(yy3357) { - } else { - z.EncFallback(yy3357) - } - } else { - r.EncodeNil() - } - } else { - if yyq3349[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3359 := &x.ListMeta - yym3360 := z.EncBinary() - _ = yym3360 - if false { - } else if z.HasExtensions() && z.EncExt(yy3359) { - } else { - z.EncFallback(yy3359) - } - } - } - if yyr3349 || yy2arr3349 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym3362 := z.EncBinary() - _ = yym3362 - if false { - } else { - h.encSliceNode(([]Node)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym3363 := z.EncBinary() - _ = yym3363 - if false { - } else { - h.encSliceNode(([]Node)(x.Items), e) - } - } - } - if yyr3349 || yy2arr3349 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3364 := z.DecBinary() - _ = yym3364 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3365 := r.ContainerType() - if yyct3365 == codecSelferValueTypeMap1234 { - yyl3365 := r.ReadMapStart() - if yyl3365 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3365, d) - } - } else if yyct3365 == codecSelferValueTypeArray1234 { - yyl3365 := r.ReadArrayStart() - if yyl3365 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3365, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3366Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3366Slc - var yyhl3366 bool = l >= 0 - for yyj3366 := 0; ; yyj3366++ { - if yyhl3366 { - if yyj3366 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3366Slc = r.DecodeBytes(yys3366Slc, true, true) - yys3366 := string(yys3366Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3366 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv3369 := &x.ListMeta - yym3370 := z.DecBinary() - _ = yym3370 - if false { - } else if z.HasExtensions() && z.DecExt(yyv3369) { - } else { - z.DecFallback(yyv3369, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv3371 := &x.Items - yym3372 := z.DecBinary() - _ = yym3372 - if false { - } else { - h.decSliceNode((*[]Node)(yyv3371), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3366) - } // end switch yys3366 - } // end for yyj3366 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3373 int - var yyb3373 bool - var yyhl3373 bool = l >= 0 - yyj3373++ - if yyhl3373 { - yyb3373 = yyj3373 > l - } else { - yyb3373 = r.CheckBreak() - } - if yyb3373 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj3373++ - if yyhl3373 { - yyb3373 = yyj3373 > l - } else { - yyb3373 = r.CheckBreak() - } - if yyb3373 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj3373++ - if yyhl3373 { - yyb3373 = yyj3373 > l - } else { - yyb3373 = r.CheckBreak() - } - if yyb3373 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv3376 := &x.ListMeta - yym3377 := z.DecBinary() - _ = yym3377 - if false { - } else if z.HasExtensions() && z.DecExt(yyv3376) { - } else { - z.DecFallback(yyv3376, false) - } - } - yyj3373++ - if yyhl3373 { - yyb3373 = yyj3373 > l - } else { - yyb3373 = r.CheckBreak() - } - if yyb3373 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv3378 := &x.Items - yym3379 := z.DecBinary() - _ = yym3379 - if false { - } else { - h.decSliceNode((*[]Node)(yyv3378), d) - } - } - for { - yyj3373++ - if yyhl3373 { - yyb3373 = yyj3373 > l - } else { - yyb3373 = r.CheckBreak() - } - if yyb3373 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3373-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NamespaceSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3380 := z.EncBinary() - _ = yym3380 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3381 := !z.EncBinary() - yy2arr3381 := z.EncBasicHandle().StructToArray - var yyq3381 [1]bool - _, _, _ = yysep3381, yyq3381, yy2arr3381 - const yyr3381 bool = false - var yynn3381 int - if yyr3381 || yy2arr3381 { - r.EncodeArrayStart(1) - } else { - yynn3381 = 1 - for _, b := range yyq3381 { - if b { - yynn3381++ - } - } - r.EncodeMapStart(yynn3381) - yynn3381 = 0 - } - if yyr3381 || yy2arr3381 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym3383 := z.EncBinary() - _ = yym3383 - if false { - } else { - h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Finalizers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym3384 := z.EncBinary() - _ = yym3384 - if false { - } else { - h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e) - } - } - } - if yyr3381 || yy2arr3381 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NamespaceSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3385 := z.DecBinary() - _ = yym3385 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3386 := r.ContainerType() - if yyct3386 == codecSelferValueTypeMap1234 { - yyl3386 := r.ReadMapStart() - if yyl3386 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3386, d) - } - } else if yyct3386 == codecSelferValueTypeArray1234 { - yyl3386 := r.ReadArrayStart() - if yyl3386 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3386, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NamespaceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3387Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3387Slc - var yyhl3387 bool = l >= 0 - for yyj3387 := 0; ; yyj3387++ { - if yyhl3387 { - if yyj3387 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3387Slc = r.DecodeBytes(yys3387Slc, true, true) - yys3387 := string(yys3387Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3387 { - case "Finalizers": - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv3388 := &x.Finalizers - yym3389 := z.DecBinary() - _ = yym3389 - if false { - } else { - h.decSliceFinalizerName((*[]FinalizerName)(yyv3388), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3387) - } // end switch yys3387 - } // end for yyj3387 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NamespaceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3390 int - var yyb3390 bool - var yyhl3390 bool = l >= 0 - yyj3390++ - if yyhl3390 { - yyb3390 = yyj3390 > l - } else { - yyb3390 = r.CheckBreak() - } - if yyb3390 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv3391 := &x.Finalizers - yym3392 := z.DecBinary() - _ = yym3392 - if false { - } else { - h.decSliceFinalizerName((*[]FinalizerName)(yyv3391), d) - } - } - for { - yyj3390++ - if yyhl3390 { - yyb3390 = yyj3390 > l - } else { - yyb3390 = r.CheckBreak() - } - if yyb3390 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3390-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x FinalizerName) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym3393 := z.EncBinary() - _ = yym3393 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *FinalizerName) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3394 := z.DecBinary() - _ = yym3394 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *NamespaceStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3395 := z.EncBinary() - _ = yym3395 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3396 := !z.EncBinary() - yy2arr3396 := z.EncBasicHandle().StructToArray - var yyq3396 [1]bool - _, _, _ = yysep3396, yyq3396, yy2arr3396 - const yyr3396 bool = false - yyq3396[0] = x.Phase != "" - var yynn3396 int - if yyr3396 || yy2arr3396 { - r.EncodeArrayStart(1) - } else { - yynn3396 = 0 - for _, b := range yyq3396 { - if b { - yynn3396++ - } - } - r.EncodeMapStart(yynn3396) - yynn3396 = 0 - } - if yyr3396 || yy2arr3396 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3396[0] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3396[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr3396 || yy2arr3396 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NamespaceStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3398 := z.DecBinary() - _ = yym3398 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3399 := r.ContainerType() - if yyct3399 == codecSelferValueTypeMap1234 { - yyl3399 := r.ReadMapStart() - if yyl3399 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3399, d) - } - } else if yyct3399 == codecSelferValueTypeArray1234 { - yyl3399 := r.ReadArrayStart() - if yyl3399 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3399, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NamespaceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3400Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3400Slc - var yyhl3400 bool = l >= 0 - for yyj3400 := 0; ; yyj3400++ { - if yyhl3400 { - if yyj3400 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3400Slc = r.DecodeBytes(yys3400Slc, true, true) - yys3400 := string(yys3400Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3400 { - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = NamespacePhase(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3400) - } // end switch yys3400 - } // end for yyj3400 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NamespaceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3402 int - var yyb3402 bool - var yyhl3402 bool = l >= 0 - yyj3402++ - if yyhl3402 { - yyb3402 = yyj3402 > l - } else { - yyb3402 = r.CheckBreak() - } - if yyb3402 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = NamespacePhase(r.DecodeString()) - } - for { - yyj3402++ - if yyhl3402 { - yyb3402 = yyj3402 > l - } else { - yyb3402 = r.CheckBreak() - } - if yyb3402 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3402-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x NamespacePhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym3404 := z.EncBinary() - _ = yym3404 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NamespacePhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3405 := z.DecBinary() - _ = yym3405 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *Namespace) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3406 := z.EncBinary() - _ = yym3406 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3407 := !z.EncBinary() - yy2arr3407 := z.EncBasicHandle().StructToArray - var yyq3407 [5]bool - _, _, _ = yysep3407, yyq3407, yy2arr3407 - const yyr3407 bool = false - yyq3407[0] = x.Kind != "" - yyq3407[1] = x.APIVersion != "" - yyq3407[2] = true - yyq3407[3] = true - yyq3407[4] = true - var yynn3407 int - if yyr3407 || yy2arr3407 { - r.EncodeArrayStart(5) - } else { - yynn3407 = 0 - for _, b := range yyq3407 { - if b { - yynn3407++ - } - } - r.EncodeMapStart(yynn3407) - yynn3407 = 0 - } - if yyr3407 || yy2arr3407 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3407[0] { - yym3409 := z.EncBinary() - _ = yym3409 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3407[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3410 := z.EncBinary() - _ = yym3410 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr3407 || yy2arr3407 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3407[1] { - yym3412 := z.EncBinary() - _ = yym3412 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3407[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3413 := z.EncBinary() - _ = yym3413 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr3407 || yy2arr3407 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3407[2] { - yy3415 := &x.ObjectMeta - yy3415.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq3407[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3416 := &x.ObjectMeta - yy3416.CodecEncodeSelf(e) - } - } - if yyr3407 || yy2arr3407 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3407[3] { - yy3418 := &x.Spec - yy3418.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq3407[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3419 := &x.Spec - yy3419.CodecEncodeSelf(e) - } - } - if yyr3407 || yy2arr3407 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3407[4] { - yy3421 := &x.Status - yy3421.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq3407[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3422 := &x.Status - yy3422.CodecEncodeSelf(e) - } - } - if yyr3407 || yy2arr3407 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Namespace) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3423 := z.DecBinary() - _ = yym3423 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3424 := r.ContainerType() - if yyct3424 == codecSelferValueTypeMap1234 { - yyl3424 := r.ReadMapStart() - if yyl3424 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3424, d) - } - } else if yyct3424 == codecSelferValueTypeArray1234 { - yyl3424 := r.ReadArrayStart() - if yyl3424 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3424, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Namespace) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3425Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3425Slc - var yyhl3425 bool = l >= 0 - for yyj3425 := 0; ; yyj3425++ { - if yyhl3425 { - if yyj3425 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3425Slc = r.DecodeBytes(yys3425Slc, true, true) - yys3425 := string(yys3425Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3425 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv3428 := &x.ObjectMeta - yyv3428.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = NamespaceSpec{} - } else { - yyv3429 := &x.Spec - yyv3429.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = NamespaceStatus{} - } else { - yyv3430 := &x.Status - yyv3430.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3425) - } // end switch yys3425 - } // end for yyj3425 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Namespace) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3431 int - var yyb3431 bool - var yyhl3431 bool = l >= 0 - yyj3431++ - if yyhl3431 { - yyb3431 = yyj3431 > l - } else { - yyb3431 = r.CheckBreak() - } - if yyb3431 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj3431++ - if yyhl3431 { - yyb3431 = yyj3431 > l - } else { - yyb3431 = r.CheckBreak() - } - if yyb3431 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj3431++ - if yyhl3431 { - yyb3431 = yyj3431 > l - } else { - yyb3431 = r.CheckBreak() - } - if yyb3431 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv3434 := &x.ObjectMeta - yyv3434.CodecDecodeSelf(d) - } - yyj3431++ - if yyhl3431 { - yyb3431 = yyj3431 > l - } else { - yyb3431 = r.CheckBreak() - } - if yyb3431 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = NamespaceSpec{} - } else { - yyv3435 := &x.Spec - yyv3435.CodecDecodeSelf(d) - } - yyj3431++ - if yyhl3431 { - yyb3431 = yyj3431 > l - } else { - yyb3431 = r.CheckBreak() - } - if yyb3431 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = NamespaceStatus{} - } else { - yyv3436 := &x.Status - yyv3436.CodecDecodeSelf(d) - } - for { - yyj3431++ - if yyhl3431 { - yyb3431 = yyj3431 > l - } else { - yyb3431 = r.CheckBreak() - } - if yyb3431 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3431-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NamespaceList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3437 := z.EncBinary() - _ = yym3437 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3438 := !z.EncBinary() - yy2arr3438 := z.EncBasicHandle().StructToArray - var yyq3438 [4]bool - _, _, _ = yysep3438, yyq3438, yy2arr3438 - const yyr3438 bool = false - yyq3438[0] = x.Kind != "" - yyq3438[1] = x.APIVersion != "" - yyq3438[2] = true - var yynn3438 int - if yyr3438 || yy2arr3438 { - r.EncodeArrayStart(4) - } else { - yynn3438 = 1 - for _, b := range yyq3438 { - if b { - yynn3438++ - } - } - r.EncodeMapStart(yynn3438) - yynn3438 = 0 - } - if yyr3438 || yy2arr3438 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3438[0] { - yym3440 := z.EncBinary() - _ = yym3440 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3438[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3441 := z.EncBinary() - _ = yym3441 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr3438 || yy2arr3438 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3438[1] { - yym3443 := z.EncBinary() - _ = yym3443 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3438[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3444 := z.EncBinary() - _ = yym3444 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr3438 || yy2arr3438 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3438[2] { - yy3446 := &x.ListMeta - yym3447 := z.EncBinary() - _ = yym3447 - if false { - } else if z.HasExtensions() && z.EncExt(yy3446) { - } else { - z.EncFallback(yy3446) - } - } else { - r.EncodeNil() - } - } else { - if yyq3438[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3448 := &x.ListMeta - yym3449 := z.EncBinary() - _ = yym3449 - if false { - } else if z.HasExtensions() && z.EncExt(yy3448) { - } else { - z.EncFallback(yy3448) - } - } - } - if yyr3438 || yy2arr3438 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym3451 := z.EncBinary() - _ = yym3451 - if false { - } else { - h.encSliceNamespace(([]Namespace)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym3452 := z.EncBinary() - _ = yym3452 - if false { - } else { - h.encSliceNamespace(([]Namespace)(x.Items), e) - } - } - } - if yyr3438 || yy2arr3438 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NamespaceList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3453 := z.DecBinary() - _ = yym3453 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3454 := r.ContainerType() - if yyct3454 == codecSelferValueTypeMap1234 { - yyl3454 := r.ReadMapStart() - if yyl3454 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3454, d) - } - } else if yyct3454 == codecSelferValueTypeArray1234 { - yyl3454 := r.ReadArrayStart() - if yyl3454 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3454, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NamespaceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3455Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3455Slc - var yyhl3455 bool = l >= 0 - for yyj3455 := 0; ; yyj3455++ { - if yyhl3455 { - if yyj3455 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3455Slc = r.DecodeBytes(yys3455Slc, true, true) - yys3455 := string(yys3455Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3455 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv3458 := &x.ListMeta - yym3459 := z.DecBinary() - _ = yym3459 - if false { - } else if z.HasExtensions() && z.DecExt(yyv3458) { - } else { - z.DecFallback(yyv3458, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv3460 := &x.Items - yym3461 := z.DecBinary() - _ = yym3461 - if false { - } else { - h.decSliceNamespace((*[]Namespace)(yyv3460), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3455) - } // end switch yys3455 - } // end for yyj3455 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NamespaceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3462 int - var yyb3462 bool - var yyhl3462 bool = l >= 0 - yyj3462++ - if yyhl3462 { - yyb3462 = yyj3462 > l - } else { - yyb3462 = r.CheckBreak() - } - if yyb3462 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj3462++ - if yyhl3462 { - yyb3462 = yyj3462 > l - } else { - yyb3462 = r.CheckBreak() - } - if yyb3462 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj3462++ - if yyhl3462 { - yyb3462 = yyj3462 > l - } else { - yyb3462 = r.CheckBreak() - } - if yyb3462 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv3465 := &x.ListMeta - yym3466 := z.DecBinary() - _ = yym3466 - if false { - } else if z.HasExtensions() && z.DecExt(yyv3465) { - } else { - z.DecFallback(yyv3465, false) - } - } - yyj3462++ - if yyhl3462 { - yyb3462 = yyj3462 > l - } else { - yyb3462 = r.CheckBreak() - } - if yyb3462 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv3467 := &x.Items - yym3468 := z.DecBinary() - _ = yym3468 - if false { - } else { - h.decSliceNamespace((*[]Namespace)(yyv3467), d) - } - } - for { - yyj3462++ - if yyhl3462 { - yyb3462 = yyj3462 > l - } else { - yyb3462 = r.CheckBreak() - } - if yyb3462 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3462-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Binding) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3469 := z.EncBinary() - _ = yym3469 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3470 := !z.EncBinary() - yy2arr3470 := z.EncBasicHandle().StructToArray - var yyq3470 [4]bool - _, _, _ = yysep3470, yyq3470, yy2arr3470 - const yyr3470 bool = false - yyq3470[0] = x.Kind != "" - yyq3470[1] = x.APIVersion != "" - yyq3470[2] = true - var yynn3470 int - if yyr3470 || yy2arr3470 { - r.EncodeArrayStart(4) - } else { - yynn3470 = 1 - for _, b := range yyq3470 { - if b { - yynn3470++ - } - } - r.EncodeMapStart(yynn3470) - yynn3470 = 0 - } - if yyr3470 || yy2arr3470 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3470[0] { - yym3472 := z.EncBinary() - _ = yym3472 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3470[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3473 := z.EncBinary() - _ = yym3473 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr3470 || yy2arr3470 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3470[1] { - yym3475 := z.EncBinary() - _ = yym3475 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3470[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3476 := z.EncBinary() - _ = yym3476 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr3470 || yy2arr3470 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3470[2] { - yy3478 := &x.ObjectMeta - yy3478.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq3470[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3479 := &x.ObjectMeta - yy3479.CodecEncodeSelf(e) - } - } - if yyr3470 || yy2arr3470 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy3481 := &x.Target - yy3481.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("target")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3482 := &x.Target - yy3482.CodecEncodeSelf(e) - } - if yyr3470 || yy2arr3470 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Binding) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3483 := z.DecBinary() - _ = yym3483 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3484 := r.ContainerType() - if yyct3484 == codecSelferValueTypeMap1234 { - yyl3484 := r.ReadMapStart() - if yyl3484 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3484, d) - } - } else if yyct3484 == codecSelferValueTypeArray1234 { - yyl3484 := r.ReadArrayStart() - if yyl3484 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3484, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Binding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3485Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3485Slc - var yyhl3485 bool = l >= 0 - for yyj3485 := 0; ; yyj3485++ { - if yyhl3485 { - if yyj3485 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3485Slc = r.DecodeBytes(yys3485Slc, true, true) - yys3485 := string(yys3485Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3485 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv3488 := &x.ObjectMeta - yyv3488.CodecDecodeSelf(d) - } - case "target": - if r.TryDecodeAsNil() { - x.Target = ObjectReference{} - } else { - yyv3489 := &x.Target - yyv3489.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3485) - } // end switch yys3485 - } // end for yyj3485 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Binding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3490 int - var yyb3490 bool - var yyhl3490 bool = l >= 0 - yyj3490++ - if yyhl3490 { - yyb3490 = yyj3490 > l - } else { - yyb3490 = r.CheckBreak() - } - if yyb3490 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj3490++ - if yyhl3490 { - yyb3490 = yyj3490 > l - } else { - yyb3490 = r.CheckBreak() - } - if yyb3490 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj3490++ - if yyhl3490 { - yyb3490 = yyj3490 > l - } else { - yyb3490 = r.CheckBreak() - } - if yyb3490 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv3493 := &x.ObjectMeta - yyv3493.CodecDecodeSelf(d) - } - yyj3490++ - if yyhl3490 { - yyb3490 = yyj3490 > l - } else { - yyb3490 = r.CheckBreak() - } - if yyb3490 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Target = ObjectReference{} - } else { - yyv3494 := &x.Target - yyv3494.CodecDecodeSelf(d) - } - for { - yyj3490++ - if yyhl3490 { - yyb3490 = yyj3490 > l - } else { - yyb3490 = r.CheckBreak() - } - if yyb3490 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3490-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Preconditions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3495 := z.EncBinary() - _ = yym3495 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3496 := !z.EncBinary() - yy2arr3496 := z.EncBasicHandle().StructToArray - var yyq3496 [1]bool - _, _, _ = yysep3496, yyq3496, yy2arr3496 - const yyr3496 bool = false - yyq3496[0] = x.UID != nil - var yynn3496 int - if yyr3496 || yy2arr3496 { - r.EncodeArrayStart(1) - } else { - yynn3496 = 0 - for _, b := range yyq3496 { - if b { - yynn3496++ - } - } - r.EncodeMapStart(yynn3496) - yynn3496 = 0 - } - if yyr3496 || yy2arr3496 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3496[0] { - if x.UID == nil { - r.EncodeNil() - } else { - yy3498 := *x.UID - yym3499 := z.EncBinary() - _ = yym3499 - if false { - } else if z.HasExtensions() && z.EncExt(yy3498) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy3498)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq3496[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.UID == nil { - r.EncodeNil() - } else { - yy3500 := *x.UID - yym3501 := z.EncBinary() - _ = yym3501 - if false { - } else if z.HasExtensions() && z.EncExt(yy3500) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy3500)) - } - } - } - } - if yyr3496 || yy2arr3496 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Preconditions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3502 := z.DecBinary() - _ = yym3502 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3503 := r.ContainerType() - if yyct3503 == codecSelferValueTypeMap1234 { - yyl3503 := r.ReadMapStart() - if yyl3503 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3503, d) - } - } else if yyct3503 == codecSelferValueTypeArray1234 { - yyl3503 := r.ReadArrayStart() - if yyl3503 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3503, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Preconditions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3504Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3504Slc - var yyhl3504 bool = l >= 0 - for yyj3504 := 0; ; yyj3504++ { - if yyhl3504 { - if yyj3504 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3504Slc = r.DecodeBytes(yys3504Slc, true, true) - yys3504 := string(yys3504Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3504 { - case "uid": - if r.TryDecodeAsNil() { - if x.UID != nil { - x.UID = nil - } - } else { - if x.UID == nil { - x.UID = new(pkg1_types.UID) - } - yym3506 := z.DecBinary() - _ = yym3506 - if false { - } else if z.HasExtensions() && z.DecExt(x.UID) { - } else { - *((*string)(x.UID)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3504) - } // end switch yys3504 - } // end for yyj3504 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Preconditions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3507 int - var yyb3507 bool - var yyhl3507 bool = l >= 0 - yyj3507++ - if yyhl3507 { - yyb3507 = yyj3507 > l - } else { - yyb3507 = r.CheckBreak() - } - if yyb3507 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.UID != nil { - x.UID = nil - } - } else { - if x.UID == nil { - x.UID = new(pkg1_types.UID) - } - yym3509 := z.DecBinary() - _ = yym3509 - if false { - } else if z.HasExtensions() && z.DecExt(x.UID) { - } else { - *((*string)(x.UID)) = r.DecodeString() - } - } - for { - yyj3507++ - if yyhl3507 { - yyb3507 = yyj3507 > l - } else { - yyb3507 = r.CheckBreak() - } - if yyb3507 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3507-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3510 := z.EncBinary() - _ = yym3510 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3511 := !z.EncBinary() - yy2arr3511 := z.EncBasicHandle().StructToArray - var yyq3511 [5]bool - _, _, _ = yysep3511, yyq3511, yy2arr3511 - const yyr3511 bool = false - yyq3511[0] = x.Kind != "" - yyq3511[1] = x.APIVersion != "" - yyq3511[2] = x.GracePeriodSeconds != nil - yyq3511[3] = x.Preconditions != nil - yyq3511[4] = x.OrphanDependents != nil - var yynn3511 int - if yyr3511 || yy2arr3511 { - r.EncodeArrayStart(5) - } else { - yynn3511 = 0 - for _, b := range yyq3511 { - if b { - yynn3511++ - } - } - r.EncodeMapStart(yynn3511) - yynn3511 = 0 - } - if yyr3511 || yy2arr3511 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3511[0] { - yym3513 := z.EncBinary() - _ = yym3513 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3511[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3514 := z.EncBinary() - _ = yym3514 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr3511 || yy2arr3511 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3511[1] { - yym3516 := z.EncBinary() - _ = yym3516 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3511[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3517 := z.EncBinary() - _ = yym3517 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr3511 || yy2arr3511 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3511[2] { - if x.GracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy3519 := *x.GracePeriodSeconds - yym3520 := z.EncBinary() - _ = yym3520 - if false { - } else { - r.EncodeInt(int64(yy3519)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq3511[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gracePeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.GracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy3521 := *x.GracePeriodSeconds - yym3522 := z.EncBinary() - _ = yym3522 - if false { - } else { - r.EncodeInt(int64(yy3521)) - } - } - } - } - if yyr3511 || yy2arr3511 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3511[3] { - if x.Preconditions == nil { - r.EncodeNil() - } else { - x.Preconditions.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq3511[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preconditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Preconditions == nil { - r.EncodeNil() - } else { - x.Preconditions.CodecEncodeSelf(e) - } - } - } - if yyr3511 || yy2arr3511 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3511[4] { - if x.OrphanDependents == nil { - r.EncodeNil() - } else { - yy3525 := *x.OrphanDependents - yym3526 := z.EncBinary() - _ = yym3526 - if false { - } else { - r.EncodeBool(bool(yy3525)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq3511[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("orphanDependents")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.OrphanDependents == nil { - r.EncodeNil() - } else { - yy3527 := *x.OrphanDependents - yym3528 := z.EncBinary() - _ = yym3528 - if false { - } else { - r.EncodeBool(bool(yy3527)) - } - } - } - } - if yyr3511 || yy2arr3511 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3529 := z.DecBinary() - _ = yym3529 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3530 := r.ContainerType() - if yyct3530 == codecSelferValueTypeMap1234 { - yyl3530 := r.ReadMapStart() - if yyl3530 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3530, d) - } - } else if yyct3530 == codecSelferValueTypeArray1234 { - yyl3530 := r.ReadArrayStart() - if yyl3530 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3530, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3531Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3531Slc - var yyhl3531 bool = l >= 0 - for yyj3531 := 0; ; yyj3531++ { - if yyhl3531 { - if yyj3531 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3531Slc = r.DecodeBytes(yys3531Slc, true, true) - yys3531 := string(yys3531Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3531 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "gracePeriodSeconds": - if r.TryDecodeAsNil() { - if x.GracePeriodSeconds != nil { - x.GracePeriodSeconds = nil - } - } else { - if x.GracePeriodSeconds == nil { - x.GracePeriodSeconds = new(int64) - } - yym3535 := z.DecBinary() - _ = yym3535 - if false { - } else { - *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - case "preconditions": - if r.TryDecodeAsNil() { - if x.Preconditions != nil { - x.Preconditions = nil - } - } else { - if x.Preconditions == nil { - x.Preconditions = new(Preconditions) - } - x.Preconditions.CodecDecodeSelf(d) - } - case "orphanDependents": - if r.TryDecodeAsNil() { - if x.OrphanDependents != nil { - x.OrphanDependents = nil - } - } else { - if x.OrphanDependents == nil { - x.OrphanDependents = new(bool) - } - yym3538 := z.DecBinary() - _ = yym3538 - if false { - } else { - *((*bool)(x.OrphanDependents)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3531) - } // end switch yys3531 - } // end for yyj3531 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3539 int - var yyb3539 bool - var yyhl3539 bool = l >= 0 - yyj3539++ - if yyhl3539 { - yyb3539 = yyj3539 > l - } else { - yyb3539 = r.CheckBreak() - } - if yyb3539 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj3539++ - if yyhl3539 { - yyb3539 = yyj3539 > l - } else { - yyb3539 = r.CheckBreak() - } - if yyb3539 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj3539++ - if yyhl3539 { - yyb3539 = yyj3539 > l - } else { - yyb3539 = r.CheckBreak() - } - if yyb3539 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GracePeriodSeconds != nil { - x.GracePeriodSeconds = nil - } - } else { - if x.GracePeriodSeconds == nil { - x.GracePeriodSeconds = new(int64) - } - yym3543 := z.DecBinary() - _ = yym3543 - if false { - } else { - *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj3539++ - if yyhl3539 { - yyb3539 = yyj3539 > l - } else { - yyb3539 = r.CheckBreak() - } - if yyb3539 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Preconditions != nil { - x.Preconditions = nil - } - } else { - if x.Preconditions == nil { - x.Preconditions = new(Preconditions) - } - x.Preconditions.CodecDecodeSelf(d) - } - yyj3539++ - if yyhl3539 { - yyb3539 = yyj3539 > l - } else { - yyb3539 = r.CheckBreak() - } - if yyb3539 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.OrphanDependents != nil { - x.OrphanDependents = nil - } - } else { - if x.OrphanDependents == nil { - x.OrphanDependents = new(bool) - } - yym3546 := z.DecBinary() - _ = yym3546 - if false { - } else { - *((*bool)(x.OrphanDependents)) = r.DecodeBool() - } - } - for { - yyj3539++ - if yyhl3539 { - yyb3539 = yyj3539 > l - } else { - yyb3539 = r.CheckBreak() - } - if yyb3539 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3539-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ExportOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3547 := z.EncBinary() - _ = yym3547 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3548 := !z.EncBinary() - yy2arr3548 := z.EncBasicHandle().StructToArray - var yyq3548 [4]bool - _, _, _ = yysep3548, yyq3548, yy2arr3548 - const yyr3548 bool = false - yyq3548[0] = x.Kind != "" - yyq3548[1] = x.APIVersion != "" - var yynn3548 int - if yyr3548 || yy2arr3548 { - r.EncodeArrayStart(4) - } else { - yynn3548 = 2 - for _, b := range yyq3548 { - if b { - yynn3548++ - } - } - r.EncodeMapStart(yynn3548) - yynn3548 = 0 - } - if yyr3548 || yy2arr3548 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3548[0] { - yym3550 := z.EncBinary() - _ = yym3550 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3548[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3551 := z.EncBinary() - _ = yym3551 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr3548 || yy2arr3548 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3548[1] { - yym3553 := z.EncBinary() - _ = yym3553 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3548[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3554 := z.EncBinary() - _ = yym3554 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr3548 || yy2arr3548 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3556 := z.EncBinary() - _ = yym3556 - if false { - } else { - r.EncodeBool(bool(x.Export)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("export")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3557 := z.EncBinary() - _ = yym3557 - if false { - } else { - r.EncodeBool(bool(x.Export)) - } - } - if yyr3548 || yy2arr3548 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3559 := z.EncBinary() - _ = yym3559 - if false { - } else { - r.EncodeBool(bool(x.Exact)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exact")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3560 := z.EncBinary() - _ = yym3560 - if false { - } else { - r.EncodeBool(bool(x.Exact)) - } - } - if yyr3548 || yy2arr3548 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ExportOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3561 := z.DecBinary() - _ = yym3561 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3562 := r.ContainerType() - if yyct3562 == codecSelferValueTypeMap1234 { - yyl3562 := r.ReadMapStart() - if yyl3562 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3562, d) - } - } else if yyct3562 == codecSelferValueTypeArray1234 { - yyl3562 := r.ReadArrayStart() - if yyl3562 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3562, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ExportOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3563Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3563Slc - var yyhl3563 bool = l >= 0 - for yyj3563 := 0; ; yyj3563++ { - if yyhl3563 { - if yyj3563 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3563Slc = r.DecodeBytes(yys3563Slc, true, true) - yys3563 := string(yys3563Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3563 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "export": - if r.TryDecodeAsNil() { - x.Export = false - } else { - x.Export = bool(r.DecodeBool()) - } - case "exact": - if r.TryDecodeAsNil() { - x.Exact = false - } else { - x.Exact = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3563) - } // end switch yys3563 - } // end for yyj3563 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ExportOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3568 int - var yyb3568 bool - var yyhl3568 bool = l >= 0 - yyj3568++ - if yyhl3568 { - yyb3568 = yyj3568 > l - } else { - yyb3568 = r.CheckBreak() - } - if yyb3568 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj3568++ - if yyhl3568 { - yyb3568 = yyj3568 > l - } else { - yyb3568 = r.CheckBreak() - } - if yyb3568 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj3568++ - if yyhl3568 { - yyb3568 = yyj3568 > l - } else { - yyb3568 = r.CheckBreak() - } - if yyb3568 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Export = false - } else { - x.Export = bool(r.DecodeBool()) - } - yyj3568++ - if yyhl3568 { - yyb3568 = yyj3568 > l - } else { - yyb3568 = r.CheckBreak() - } - if yyb3568 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Exact = false - } else { - x.Exact = bool(r.DecodeBool()) - } - for { - yyj3568++ - if yyhl3568 { - yyb3568 = yyj3568 > l - } else { - yyb3568 = r.CheckBreak() - } - if yyb3568 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3568-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ListOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3573 := z.EncBinary() - _ = yym3573 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3574 := !z.EncBinary() - yy2arr3574 := z.EncBasicHandle().StructToArray - var yyq3574 [7]bool - _, _, _ = yysep3574, yyq3574, yy2arr3574 - const yyr3574 bool = false - yyq3574[0] = x.Kind != "" - yyq3574[1] = x.APIVersion != "" - var yynn3574 int - if yyr3574 || yy2arr3574 { - r.EncodeArrayStart(7) - } else { - yynn3574 = 5 - for _, b := range yyq3574 { - if b { - yynn3574++ - } - } - r.EncodeMapStart(yynn3574) - yynn3574 = 0 - } - if yyr3574 || yy2arr3574 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3574[0] { - yym3576 := z.EncBinary() - _ = yym3576 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3574[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3577 := z.EncBinary() - _ = yym3577 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr3574 || yy2arr3574 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3574[1] { - yym3579 := z.EncBinary() - _ = yym3579 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3574[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3580 := z.EncBinary() - _ = yym3580 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr3574 || yy2arr3574 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.LabelSelector == nil { - r.EncodeNil() - } else { - yym3582 := z.EncBinary() - _ = yym3582 - if false { - } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { - } else { - z.EncFallback(x.LabelSelector) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("LabelSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LabelSelector == nil { - r.EncodeNil() - } else { - yym3583 := z.EncBinary() - _ = yym3583 - if false { - } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { - } else { - z.EncFallback(x.LabelSelector) - } - } - } - if yyr3574 || yy2arr3574 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.FieldSelector == nil { - r.EncodeNil() - } else { - yym3585 := z.EncBinary() - _ = yym3585 - if false { - } else if z.HasExtensions() && z.EncExt(x.FieldSelector) { - } else { - z.EncFallback(x.FieldSelector) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("FieldSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FieldSelector == nil { - r.EncodeNil() - } else { - yym3586 := z.EncBinary() - _ = yym3586 - if false { - } else if z.HasExtensions() && z.EncExt(x.FieldSelector) { - } else { - z.EncFallback(x.FieldSelector) - } - } - } - if yyr3574 || yy2arr3574 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3588 := z.EncBinary() - _ = yym3588 - if false { - } else { - r.EncodeBool(bool(x.Watch)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Watch")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3589 := z.EncBinary() - _ = yym3589 - if false { - } else { - r.EncodeBool(bool(x.Watch)) - } - } - if yyr3574 || yy2arr3574 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3591 := z.EncBinary() - _ = yym3591 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ResourceVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3592 := z.EncBinary() - _ = yym3592 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } - if yyr3574 || yy2arr3574 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.TimeoutSeconds == nil { - r.EncodeNil() - } else { - yy3594 := *x.TimeoutSeconds - yym3595 := z.EncBinary() - _ = yym3595 - if false { - } else { - r.EncodeInt(int64(yy3594)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("TimeoutSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TimeoutSeconds == nil { - r.EncodeNil() - } else { - yy3596 := *x.TimeoutSeconds - yym3597 := z.EncBinary() - _ = yym3597 - if false { - } else { - r.EncodeInt(int64(yy3596)) - } - } - } - if yyr3574 || yy2arr3574 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ListOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3598 := z.DecBinary() - _ = yym3598 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3599 := r.ContainerType() - if yyct3599 == codecSelferValueTypeMap1234 { - yyl3599 := r.ReadMapStart() - if yyl3599 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3599, d) - } - } else if yyct3599 == codecSelferValueTypeArray1234 { - yyl3599 := r.ReadArrayStart() - if yyl3599 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3599, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ListOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3600Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3600Slc - var yyhl3600 bool = l >= 0 - for yyj3600 := 0; ; yyj3600++ { - if yyhl3600 { - if yyj3600 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3600Slc = r.DecodeBytes(yys3600Slc, true, true) - yys3600 := string(yys3600Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3600 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "LabelSelector": - if r.TryDecodeAsNil() { - x.LabelSelector = nil - } else { - yyv3603 := &x.LabelSelector - yym3604 := z.DecBinary() - _ = yym3604 - if false { - } else if z.HasExtensions() && z.DecExt(yyv3603) { - } else { - z.DecFallback(yyv3603, true) - } - } - case "FieldSelector": - if r.TryDecodeAsNil() { - x.FieldSelector = nil - } else { - yyv3605 := &x.FieldSelector - yym3606 := z.DecBinary() - _ = yym3606 - if false { - } else if z.HasExtensions() && z.DecExt(yyv3605) { - } else { - z.DecFallback(yyv3605, true) - } - } - case "Watch": - if r.TryDecodeAsNil() { - x.Watch = false - } else { - x.Watch = bool(r.DecodeBool()) - } - case "ResourceVersion": - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - case "TimeoutSeconds": - if r.TryDecodeAsNil() { - if x.TimeoutSeconds != nil { - x.TimeoutSeconds = nil - } - } else { - if x.TimeoutSeconds == nil { - x.TimeoutSeconds = new(int64) - } - yym3610 := z.DecBinary() - _ = yym3610 - if false { - } else { - *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3600) - } // end switch yys3600 - } // end for yyj3600 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ListOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3611 int - var yyb3611 bool - var yyhl3611 bool = l >= 0 - yyj3611++ - if yyhl3611 { - yyb3611 = yyj3611 > l - } else { - yyb3611 = r.CheckBreak() - } - if yyb3611 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj3611++ - if yyhl3611 { - yyb3611 = yyj3611 > l - } else { - yyb3611 = r.CheckBreak() - } - if yyb3611 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj3611++ - if yyhl3611 { - yyb3611 = yyj3611 > l - } else { - yyb3611 = r.CheckBreak() - } - if yyb3611 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LabelSelector = nil - } else { - yyv3614 := &x.LabelSelector - yym3615 := z.DecBinary() - _ = yym3615 - if false { - } else if z.HasExtensions() && z.DecExt(yyv3614) { - } else { - z.DecFallback(yyv3614, true) - } - } - yyj3611++ - if yyhl3611 { - yyb3611 = yyj3611 > l - } else { - yyb3611 = r.CheckBreak() - } - if yyb3611 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FieldSelector = nil - } else { - yyv3616 := &x.FieldSelector - yym3617 := z.DecBinary() - _ = yym3617 - if false { - } else if z.HasExtensions() && z.DecExt(yyv3616) { - } else { - z.DecFallback(yyv3616, true) - } - } - yyj3611++ - if yyhl3611 { - yyb3611 = yyj3611 > l - } else { - yyb3611 = r.CheckBreak() - } - if yyb3611 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Watch = false - } else { - x.Watch = bool(r.DecodeBool()) - } - yyj3611++ - if yyhl3611 { - yyb3611 = yyj3611 > l - } else { - yyb3611 = r.CheckBreak() - } - if yyb3611 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - yyj3611++ - if yyhl3611 { - yyb3611 = yyj3611 > l - } else { - yyb3611 = r.CheckBreak() - } - if yyb3611 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TimeoutSeconds != nil { - x.TimeoutSeconds = nil - } - } else { - if x.TimeoutSeconds == nil { - x.TimeoutSeconds = new(int64) - } - yym3621 := z.DecBinary() - _ = yym3621 - if false { - } else { - *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64)) - } - } - for { - yyj3611++ - if yyhl3611 { - yyb3611 = yyj3611 > l - } else { - yyb3611 = r.CheckBreak() - } - if yyb3611 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3611-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodLogOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3622 := z.EncBinary() - _ = yym3622 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3623 := !z.EncBinary() - yy2arr3623 := z.EncBasicHandle().StructToArray - var yyq3623 [10]bool - _, _, _ = yysep3623, yyq3623, yy2arr3623 - const yyr3623 bool = false - yyq3623[0] = x.Kind != "" - yyq3623[1] = x.APIVersion != "" - var yynn3623 int - if yyr3623 || yy2arr3623 { - r.EncodeArrayStart(10) - } else { - yynn3623 = 8 - for _, b := range yyq3623 { - if b { - yynn3623++ - } - } - r.EncodeMapStart(yynn3623) - yynn3623 = 0 - } - if yyr3623 || yy2arr3623 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3623[0] { - yym3625 := z.EncBinary() - _ = yym3625 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3623[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3626 := z.EncBinary() - _ = yym3626 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr3623 || yy2arr3623 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3623[1] { - yym3628 := z.EncBinary() - _ = yym3628 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3623[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3629 := z.EncBinary() - _ = yym3629 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr3623 || yy2arr3623 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3631 := z.EncBinary() - _ = yym3631 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Container")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3632 := z.EncBinary() - _ = yym3632 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } - if yyr3623 || yy2arr3623 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3634 := z.EncBinary() - _ = yym3634 - if false { - } else { - r.EncodeBool(bool(x.Follow)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Follow")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3635 := z.EncBinary() - _ = yym3635 - if false { - } else { - r.EncodeBool(bool(x.Follow)) - } - } - if yyr3623 || yy2arr3623 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3637 := z.EncBinary() - _ = yym3637 - if false { - } else { - r.EncodeBool(bool(x.Previous)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Previous")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3638 := z.EncBinary() - _ = yym3638 - if false { - } else { - r.EncodeBool(bool(x.Previous)) - } - } - if yyr3623 || yy2arr3623 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.SinceSeconds == nil { - r.EncodeNil() - } else { - yy3640 := *x.SinceSeconds - yym3641 := z.EncBinary() - _ = yym3641 - if false { - } else { - r.EncodeInt(int64(yy3640)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("SinceSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SinceSeconds == nil { - r.EncodeNil() - } else { - yy3642 := *x.SinceSeconds - yym3643 := z.EncBinary() - _ = yym3643 - if false { - } else { - r.EncodeInt(int64(yy3642)) - } - } - } - if yyr3623 || yy2arr3623 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.SinceTime == nil { - r.EncodeNil() - } else { - yym3645 := z.EncBinary() - _ = yym3645 - if false { - } else if z.HasExtensions() && z.EncExt(x.SinceTime) { - } else if yym3645 { - z.EncBinaryMarshal(x.SinceTime) - } else if !yym3645 && z.IsJSONHandle() { - z.EncJSONMarshal(x.SinceTime) - } else { - z.EncFallback(x.SinceTime) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("SinceTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SinceTime == nil { - r.EncodeNil() - } else { - yym3646 := z.EncBinary() - _ = yym3646 - if false { - } else if z.HasExtensions() && z.EncExt(x.SinceTime) { - } else if yym3646 { - z.EncBinaryMarshal(x.SinceTime) - } else if !yym3646 && z.IsJSONHandle() { - z.EncJSONMarshal(x.SinceTime) - } else { - z.EncFallback(x.SinceTime) - } - } - } - if yyr3623 || yy2arr3623 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3648 := z.EncBinary() - _ = yym3648 - if false { - } else { - r.EncodeBool(bool(x.Timestamps)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Timestamps")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3649 := z.EncBinary() - _ = yym3649 - if false { - } else { - r.EncodeBool(bool(x.Timestamps)) - } - } - if yyr3623 || yy2arr3623 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.TailLines == nil { - r.EncodeNil() - } else { - yy3651 := *x.TailLines - yym3652 := z.EncBinary() - _ = yym3652 - if false { - } else { - r.EncodeInt(int64(yy3651)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("TailLines")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TailLines == nil { - r.EncodeNil() - } else { - yy3653 := *x.TailLines - yym3654 := z.EncBinary() - _ = yym3654 - if false { - } else { - r.EncodeInt(int64(yy3653)) - } - } - } - if yyr3623 || yy2arr3623 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.LimitBytes == nil { - r.EncodeNil() - } else { - yy3656 := *x.LimitBytes - yym3657 := z.EncBinary() - _ = yym3657 - if false { - } else { - r.EncodeInt(int64(yy3656)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("LimitBytes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LimitBytes == nil { - r.EncodeNil() - } else { - yy3658 := *x.LimitBytes - yym3659 := z.EncBinary() - _ = yym3659 - if false { - } else { - r.EncodeInt(int64(yy3658)) - } - } - } - if yyr3623 || yy2arr3623 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodLogOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3660 := z.DecBinary() - _ = yym3660 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3661 := r.ContainerType() - if yyct3661 == codecSelferValueTypeMap1234 { - yyl3661 := r.ReadMapStart() - if yyl3661 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3661, d) - } - } else if yyct3661 == codecSelferValueTypeArray1234 { - yyl3661 := r.ReadArrayStart() - if yyl3661 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3661, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodLogOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3662Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3662Slc - var yyhl3662 bool = l >= 0 - for yyj3662 := 0; ; yyj3662++ { - if yyhl3662 { - if yyj3662 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3662Slc = r.DecodeBytes(yys3662Slc, true, true) - yys3662 := string(yys3662Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3662 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "Container": - if r.TryDecodeAsNil() { - x.Container = "" - } else { - x.Container = string(r.DecodeString()) - } - case "Follow": - if r.TryDecodeAsNil() { - x.Follow = false - } else { - x.Follow = bool(r.DecodeBool()) - } - case "Previous": - if r.TryDecodeAsNil() { - x.Previous = false - } else { - x.Previous = bool(r.DecodeBool()) - } - case "SinceSeconds": - if r.TryDecodeAsNil() { - if x.SinceSeconds != nil { - x.SinceSeconds = nil - } - } else { - if x.SinceSeconds == nil { - x.SinceSeconds = new(int64) - } - yym3669 := z.DecBinary() - _ = yym3669 - if false { - } else { - *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64)) - } - } - case "SinceTime": - if r.TryDecodeAsNil() { - if x.SinceTime != nil { - x.SinceTime = nil - } - } else { - if x.SinceTime == nil { - x.SinceTime = new(pkg2_unversioned.Time) - } - yym3671 := z.DecBinary() - _ = yym3671 - if false { - } else if z.HasExtensions() && z.DecExt(x.SinceTime) { - } else if yym3671 { - z.DecBinaryUnmarshal(x.SinceTime) - } else if !yym3671 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.SinceTime) - } else { - z.DecFallback(x.SinceTime, false) - } - } - case "Timestamps": - if r.TryDecodeAsNil() { - x.Timestamps = false - } else { - x.Timestamps = bool(r.DecodeBool()) - } - case "TailLines": - if r.TryDecodeAsNil() { - if x.TailLines != nil { - x.TailLines = nil - } - } else { - if x.TailLines == nil { - x.TailLines = new(int64) - } - yym3674 := z.DecBinary() - _ = yym3674 - if false { - } else { - *((*int64)(x.TailLines)) = int64(r.DecodeInt(64)) - } - } - case "LimitBytes": - if r.TryDecodeAsNil() { - if x.LimitBytes != nil { - x.LimitBytes = nil - } - } else { - if x.LimitBytes == nil { - x.LimitBytes = new(int64) - } - yym3676 := z.DecBinary() - _ = yym3676 - if false { - } else { - *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3662) - } // end switch yys3662 - } // end for yyj3662 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3677 int - var yyb3677 bool - var yyhl3677 bool = l >= 0 - yyj3677++ - if yyhl3677 { - yyb3677 = yyj3677 > l - } else { - yyb3677 = r.CheckBreak() - } - if yyb3677 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj3677++ - if yyhl3677 { - yyb3677 = yyj3677 > l - } else { - yyb3677 = r.CheckBreak() - } - if yyb3677 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj3677++ - if yyhl3677 { - yyb3677 = yyj3677 > l - } else { - yyb3677 = r.CheckBreak() - } - if yyb3677 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Container = "" - } else { - x.Container = string(r.DecodeString()) - } - yyj3677++ - if yyhl3677 { - yyb3677 = yyj3677 > l - } else { - yyb3677 = r.CheckBreak() - } - if yyb3677 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Follow = false - } else { - x.Follow = bool(r.DecodeBool()) - } - yyj3677++ - if yyhl3677 { - yyb3677 = yyj3677 > l - } else { - yyb3677 = r.CheckBreak() - } - if yyb3677 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Previous = false - } else { - x.Previous = bool(r.DecodeBool()) - } - yyj3677++ - if yyhl3677 { - yyb3677 = yyj3677 > l - } else { - yyb3677 = r.CheckBreak() - } - if yyb3677 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SinceSeconds != nil { - x.SinceSeconds = nil - } - } else { - if x.SinceSeconds == nil { - x.SinceSeconds = new(int64) - } - yym3684 := z.DecBinary() - _ = yym3684 - if false { - } else { - *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj3677++ - if yyhl3677 { - yyb3677 = yyj3677 > l - } else { - yyb3677 = r.CheckBreak() - } - if yyb3677 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SinceTime != nil { - x.SinceTime = nil - } - } else { - if x.SinceTime == nil { - x.SinceTime = new(pkg2_unversioned.Time) - } - yym3686 := z.DecBinary() - _ = yym3686 - if false { - } else if z.HasExtensions() && z.DecExt(x.SinceTime) { - } else if yym3686 { - z.DecBinaryUnmarshal(x.SinceTime) - } else if !yym3686 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.SinceTime) - } else { - z.DecFallback(x.SinceTime, false) - } - } - yyj3677++ - if yyhl3677 { - yyb3677 = yyj3677 > l - } else { - yyb3677 = r.CheckBreak() - } - if yyb3677 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Timestamps = false - } else { - x.Timestamps = bool(r.DecodeBool()) - } - yyj3677++ - if yyhl3677 { - yyb3677 = yyj3677 > l - } else { - yyb3677 = r.CheckBreak() - } - if yyb3677 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TailLines != nil { - x.TailLines = nil - } - } else { - if x.TailLines == nil { - x.TailLines = new(int64) - } - yym3689 := z.DecBinary() - _ = yym3689 - if false { - } else { - *((*int64)(x.TailLines)) = int64(r.DecodeInt(64)) - } - } - yyj3677++ - if yyhl3677 { - yyb3677 = yyj3677 > l - } else { - yyb3677 = r.CheckBreak() - } - if yyb3677 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LimitBytes != nil { - x.LimitBytes = nil - } - } else { - if x.LimitBytes == nil { - x.LimitBytes = new(int64) - } - yym3691 := z.DecBinary() - _ = yym3691 - if false { - } else { - *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64)) - } - } - for { - yyj3677++ - if yyhl3677 { - yyb3677 = yyj3677 > l - } else { - yyb3677 = r.CheckBreak() - } - if yyb3677 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3677-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodAttachOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3692 := z.EncBinary() - _ = yym3692 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3693 := !z.EncBinary() - yy2arr3693 := z.EncBasicHandle().StructToArray - var yyq3693 [7]bool - _, _, _ = yysep3693, yyq3693, yy2arr3693 - const yyr3693 bool = false - yyq3693[0] = x.Kind != "" - yyq3693[1] = x.APIVersion != "" - yyq3693[2] = x.Stdin != false - yyq3693[3] = x.Stdout != false - yyq3693[4] = x.Stderr != false - yyq3693[5] = x.TTY != false - yyq3693[6] = x.Container != "" - var yynn3693 int - if yyr3693 || yy2arr3693 { - r.EncodeArrayStart(7) - } else { - yynn3693 = 0 - for _, b := range yyq3693 { - if b { - yynn3693++ - } - } - r.EncodeMapStart(yynn3693) - yynn3693 = 0 - } - if yyr3693 || yy2arr3693 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3693[0] { - yym3695 := z.EncBinary() - _ = yym3695 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3693[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3696 := z.EncBinary() - _ = yym3696 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr3693 || yy2arr3693 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3693[1] { - yym3698 := z.EncBinary() - _ = yym3698 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3693[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3699 := z.EncBinary() - _ = yym3699 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr3693 || yy2arr3693 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3693[2] { - yym3701 := z.EncBinary() - _ = yym3701 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq3693[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdin")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3702 := z.EncBinary() - _ = yym3702 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } - } - if yyr3693 || yy2arr3693 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3693[3] { - yym3704 := z.EncBinary() - _ = yym3704 - if false { - } else { - r.EncodeBool(bool(x.Stdout)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq3693[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdout")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3705 := z.EncBinary() - _ = yym3705 - if false { - } else { - r.EncodeBool(bool(x.Stdout)) - } - } - } - if yyr3693 || yy2arr3693 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3693[4] { - yym3707 := z.EncBinary() - _ = yym3707 - if false { - } else { - r.EncodeBool(bool(x.Stderr)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq3693[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stderr")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3708 := z.EncBinary() - _ = yym3708 - if false { - } else { - r.EncodeBool(bool(x.Stderr)) - } - } - } - if yyr3693 || yy2arr3693 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3693[5] { - yym3710 := z.EncBinary() - _ = yym3710 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq3693[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tty")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3711 := z.EncBinary() - _ = yym3711 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } - } - if yyr3693 || yy2arr3693 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3693[6] { - yym3713 := z.EncBinary() - _ = yym3713 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3693[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("container")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3714 := z.EncBinary() - _ = yym3714 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } - } - if yyr3693 || yy2arr3693 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodAttachOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3715 := z.DecBinary() - _ = yym3715 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3716 := r.ContainerType() - if yyct3716 == codecSelferValueTypeMap1234 { - yyl3716 := r.ReadMapStart() - if yyl3716 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3716, d) - } - } else if yyct3716 == codecSelferValueTypeArray1234 { - yyl3716 := r.ReadArrayStart() - if yyl3716 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3716, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodAttachOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3717Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3717Slc - var yyhl3717 bool = l >= 0 - for yyj3717 := 0; ; yyj3717++ { - if yyhl3717 { - if yyj3717 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3717Slc = r.DecodeBytes(yys3717Slc, true, true) - yys3717 := string(yys3717Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3717 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "stdin": - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - x.Stdin = bool(r.DecodeBool()) - } - case "stdout": - if r.TryDecodeAsNil() { - x.Stdout = false - } else { - x.Stdout = bool(r.DecodeBool()) - } - case "stderr": - if r.TryDecodeAsNil() { - x.Stderr = false - } else { - x.Stderr = bool(r.DecodeBool()) - } - case "tty": - if r.TryDecodeAsNil() { - x.TTY = false - } else { - x.TTY = bool(r.DecodeBool()) - } - case "container": - if r.TryDecodeAsNil() { - x.Container = "" - } else { - x.Container = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3717) - } // end switch yys3717 - } // end for yyj3717 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodAttachOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3725 int - var yyb3725 bool - var yyhl3725 bool = l >= 0 - yyj3725++ - if yyhl3725 { - yyb3725 = yyj3725 > l - } else { - yyb3725 = r.CheckBreak() - } - if yyb3725 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj3725++ - if yyhl3725 { - yyb3725 = yyj3725 > l - } else { - yyb3725 = r.CheckBreak() - } - if yyb3725 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj3725++ - if yyhl3725 { - yyb3725 = yyj3725 > l - } else { - yyb3725 = r.CheckBreak() - } - if yyb3725 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - x.Stdin = bool(r.DecodeBool()) - } - yyj3725++ - if yyhl3725 { - yyb3725 = yyj3725 > l - } else { - yyb3725 = r.CheckBreak() - } - if yyb3725 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdout = false - } else { - x.Stdout = bool(r.DecodeBool()) - } - yyj3725++ - if yyhl3725 { - yyb3725 = yyj3725 > l - } else { - yyb3725 = r.CheckBreak() - } - if yyb3725 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stderr = false - } else { - x.Stderr = bool(r.DecodeBool()) - } - yyj3725++ - if yyhl3725 { - yyb3725 = yyj3725 > l - } else { - yyb3725 = r.CheckBreak() - } - if yyb3725 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TTY = false - } else { - x.TTY = bool(r.DecodeBool()) - } - yyj3725++ - if yyhl3725 { - yyb3725 = yyj3725 > l - } else { - yyb3725 = r.CheckBreak() - } - if yyb3725 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Container = "" - } else { - x.Container = string(r.DecodeString()) - } - for { - yyj3725++ - if yyhl3725 { - yyb3725 = yyj3725 > l - } else { - yyb3725 = r.CheckBreak() - } - if yyb3725 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3725-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodExecOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3733 := z.EncBinary() - _ = yym3733 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3734 := !z.EncBinary() - yy2arr3734 := z.EncBasicHandle().StructToArray - var yyq3734 [8]bool - _, _, _ = yysep3734, yyq3734, yy2arr3734 - const yyr3734 bool = false - yyq3734[0] = x.Kind != "" - yyq3734[1] = x.APIVersion != "" - var yynn3734 int - if yyr3734 || yy2arr3734 { - r.EncodeArrayStart(8) - } else { - yynn3734 = 6 - for _, b := range yyq3734 { - if b { - yynn3734++ - } - } - r.EncodeMapStart(yynn3734) - yynn3734 = 0 - } - if yyr3734 || yy2arr3734 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3734[0] { - yym3736 := z.EncBinary() - _ = yym3736 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3734[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3737 := z.EncBinary() - _ = yym3737 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr3734 || yy2arr3734 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3734[1] { - yym3739 := z.EncBinary() - _ = yym3739 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3734[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3740 := z.EncBinary() - _ = yym3740 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr3734 || yy2arr3734 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3742 := z.EncBinary() - _ = yym3742 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Stdin")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3743 := z.EncBinary() - _ = yym3743 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } - if yyr3734 || yy2arr3734 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3745 := z.EncBinary() - _ = yym3745 - if false { - } else { - r.EncodeBool(bool(x.Stdout)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Stdout")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3746 := z.EncBinary() - _ = yym3746 - if false { - } else { - r.EncodeBool(bool(x.Stdout)) - } - } - if yyr3734 || yy2arr3734 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3748 := z.EncBinary() - _ = yym3748 - if false { - } else { - r.EncodeBool(bool(x.Stderr)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Stderr")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3749 := z.EncBinary() - _ = yym3749 - if false { - } else { - r.EncodeBool(bool(x.Stderr)) - } - } - if yyr3734 || yy2arr3734 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3751 := z.EncBinary() - _ = yym3751 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("TTY")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3752 := z.EncBinary() - _ = yym3752 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } - if yyr3734 || yy2arr3734 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3754 := z.EncBinary() - _ = yym3754 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Container")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3755 := z.EncBinary() - _ = yym3755 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } - if yyr3734 || yy2arr3734 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Command == nil { - r.EncodeNil() - } else { - yym3757 := z.EncBinary() - _ = yym3757 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Command")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Command == nil { - r.EncodeNil() - } else { - yym3758 := z.EncBinary() - _ = yym3758 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } - if yyr3734 || yy2arr3734 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodExecOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3759 := z.DecBinary() - _ = yym3759 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3760 := r.ContainerType() - if yyct3760 == codecSelferValueTypeMap1234 { - yyl3760 := r.ReadMapStart() - if yyl3760 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3760, d) - } - } else if yyct3760 == codecSelferValueTypeArray1234 { - yyl3760 := r.ReadArrayStart() - if yyl3760 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3760, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodExecOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3761Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3761Slc - var yyhl3761 bool = l >= 0 - for yyj3761 := 0; ; yyj3761++ { - if yyhl3761 { - if yyj3761 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3761Slc = r.DecodeBytes(yys3761Slc, true, true) - yys3761 := string(yys3761Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3761 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "Stdin": - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - x.Stdin = bool(r.DecodeBool()) - } - case "Stdout": - if r.TryDecodeAsNil() { - x.Stdout = false - } else { - x.Stdout = bool(r.DecodeBool()) - } - case "Stderr": - if r.TryDecodeAsNil() { - x.Stderr = false - } else { - x.Stderr = bool(r.DecodeBool()) - } - case "TTY": - if r.TryDecodeAsNil() { - x.TTY = false - } else { - x.TTY = bool(r.DecodeBool()) - } - case "Container": - if r.TryDecodeAsNil() { - x.Container = "" - } else { - x.Container = string(r.DecodeString()) - } - case "Command": - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv3769 := &x.Command - yym3770 := z.DecBinary() - _ = yym3770 - if false { - } else { - z.F.DecSliceStringX(yyv3769, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3761) - } // end switch yys3761 - } // end for yyj3761 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodExecOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3771 int - var yyb3771 bool - var yyhl3771 bool = l >= 0 - yyj3771++ - if yyhl3771 { - yyb3771 = yyj3771 > l - } else { - yyb3771 = r.CheckBreak() - } - if yyb3771 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj3771++ - if yyhl3771 { - yyb3771 = yyj3771 > l - } else { - yyb3771 = r.CheckBreak() - } - if yyb3771 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj3771++ - if yyhl3771 { - yyb3771 = yyj3771 > l - } else { - yyb3771 = r.CheckBreak() - } - if yyb3771 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - x.Stdin = bool(r.DecodeBool()) - } - yyj3771++ - if yyhl3771 { - yyb3771 = yyj3771 > l - } else { - yyb3771 = r.CheckBreak() - } - if yyb3771 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdout = false - } else { - x.Stdout = bool(r.DecodeBool()) - } - yyj3771++ - if yyhl3771 { - yyb3771 = yyj3771 > l - } else { - yyb3771 = r.CheckBreak() - } - if yyb3771 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stderr = false - } else { - x.Stderr = bool(r.DecodeBool()) - } - yyj3771++ - if yyhl3771 { - yyb3771 = yyj3771 > l - } else { - yyb3771 = r.CheckBreak() - } - if yyb3771 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TTY = false - } else { - x.TTY = bool(r.DecodeBool()) - } - yyj3771++ - if yyhl3771 { - yyb3771 = yyj3771 > l - } else { - yyb3771 = r.CheckBreak() - } - if yyb3771 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Container = "" - } else { - x.Container = string(r.DecodeString()) - } - yyj3771++ - if yyhl3771 { - yyb3771 = yyj3771 > l - } else { - yyb3771 = r.CheckBreak() - } - if yyb3771 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv3779 := &x.Command - yym3780 := z.DecBinary() - _ = yym3780 - if false { - } else { - z.F.DecSliceStringX(yyv3779, false, d) - } - } - for { - yyj3771++ - if yyhl3771 { - yyb3771 = yyj3771 > l - } else { - yyb3771 = r.CheckBreak() - } - if yyb3771 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3771-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3781 := z.EncBinary() - _ = yym3781 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3782 := !z.EncBinary() - yy2arr3782 := z.EncBasicHandle().StructToArray - var yyq3782 [3]bool - _, _, _ = yysep3782, yyq3782, yy2arr3782 - const yyr3782 bool = false - yyq3782[0] = x.Kind != "" - yyq3782[1] = x.APIVersion != "" - var yynn3782 int - if yyr3782 || yy2arr3782 { - r.EncodeArrayStart(3) - } else { - yynn3782 = 1 - for _, b := range yyq3782 { - if b { - yynn3782++ - } - } - r.EncodeMapStart(yynn3782) - yynn3782 = 0 - } - if yyr3782 || yy2arr3782 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3782[0] { - yym3784 := z.EncBinary() - _ = yym3784 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3782[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3785 := z.EncBinary() - _ = yym3785 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr3782 || yy2arr3782 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3782[1] { - yym3787 := z.EncBinary() - _ = yym3787 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3782[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3788 := z.EncBinary() - _ = yym3788 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr3782 || yy2arr3782 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3790 := z.EncBinary() - _ = yym3790 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3791 := z.EncBinary() - _ = yym3791 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr3782 || yy2arr3782 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3792 := z.DecBinary() - _ = yym3792 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3793 := r.ContainerType() - if yyct3793 == codecSelferValueTypeMap1234 { - yyl3793 := r.ReadMapStart() - if yyl3793 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3793, d) - } - } else if yyct3793 == codecSelferValueTypeArray1234 { - yyl3793 := r.ReadArrayStart() - if yyl3793 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3793, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3794Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3794Slc - var yyhl3794 bool = l >= 0 - for yyj3794 := 0; ; yyj3794++ { - if yyhl3794 { - if yyj3794 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3794Slc = r.DecodeBytes(yys3794Slc, true, true) - yys3794 := string(yys3794Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3794 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "Path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3794) - } // end switch yys3794 - } // end for yyj3794 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3798 int - var yyb3798 bool - var yyhl3798 bool = l >= 0 - yyj3798++ - if yyhl3798 { - yyb3798 = yyj3798 > l - } else { - yyb3798 = r.CheckBreak() - } - if yyb3798 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj3798++ - if yyhl3798 { - yyb3798 = yyj3798 > l - } else { - yyb3798 = r.CheckBreak() - } - if yyb3798 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj3798++ - if yyhl3798 { - yyb3798 = yyj3798 > l - } else { - yyb3798 = r.CheckBreak() - } - if yyb3798 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - for { - yyj3798++ - if yyhl3798 { - yyb3798 = yyj3798 > l - } else { - yyb3798 = r.CheckBreak() - } - if yyb3798 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3798-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3802 := z.EncBinary() - _ = yym3802 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3803 := !z.EncBinary() - yy2arr3803 := z.EncBasicHandle().StructToArray - var yyq3803 [3]bool - _, _, _ = yysep3803, yyq3803, yy2arr3803 - const yyr3803 bool = false - yyq3803[0] = x.Kind != "" - yyq3803[1] = x.APIVersion != "" - var yynn3803 int - if yyr3803 || yy2arr3803 { - r.EncodeArrayStart(3) - } else { - yynn3803 = 1 - for _, b := range yyq3803 { - if b { - yynn3803++ - } - } - r.EncodeMapStart(yynn3803) - yynn3803 = 0 - } - if yyr3803 || yy2arr3803 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3803[0] { - yym3805 := z.EncBinary() - _ = yym3805 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3803[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3806 := z.EncBinary() - _ = yym3806 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr3803 || yy2arr3803 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3803[1] { - yym3808 := z.EncBinary() - _ = yym3808 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3803[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3809 := z.EncBinary() - _ = yym3809 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr3803 || yy2arr3803 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3811 := z.EncBinary() - _ = yym3811 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3812 := z.EncBinary() - _ = yym3812 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr3803 || yy2arr3803 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3813 := z.DecBinary() - _ = yym3813 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3814 := r.ContainerType() - if yyct3814 == codecSelferValueTypeMap1234 { - yyl3814 := r.ReadMapStart() - if yyl3814 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3814, d) - } - } else if yyct3814 == codecSelferValueTypeArray1234 { - yyl3814 := r.ReadArrayStart() - if yyl3814 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3814, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3815Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3815Slc - var yyhl3815 bool = l >= 0 - for yyj3815 := 0; ; yyj3815++ { - if yyhl3815 { - if yyj3815 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3815Slc = r.DecodeBytes(yys3815Slc, true, true) - yys3815 := string(yys3815Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3815 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "Path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3815) - } // end switch yys3815 - } // end for yyj3815 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3819 int - var yyb3819 bool - var yyhl3819 bool = l >= 0 - yyj3819++ - if yyhl3819 { - yyb3819 = yyj3819 > l - } else { - yyb3819 = r.CheckBreak() - } - if yyb3819 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj3819++ - if yyhl3819 { - yyb3819 = yyj3819 > l - } else { - yyb3819 = r.CheckBreak() - } - if yyb3819 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj3819++ - if yyhl3819 { - yyb3819 = yyj3819 > l - } else { - yyb3819 = r.CheckBreak() - } - if yyb3819 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - for { - yyj3819++ - if yyhl3819 { - yyb3819 = yyj3819 > l - } else { - yyb3819 = r.CheckBreak() - } - if yyb3819 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3819-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3823 := z.EncBinary() - _ = yym3823 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3824 := !z.EncBinary() - yy2arr3824 := z.EncBasicHandle().StructToArray - var yyq3824 [3]bool - _, _, _ = yysep3824, yyq3824, yy2arr3824 - const yyr3824 bool = false - yyq3824[0] = x.Kind != "" - yyq3824[1] = x.APIVersion != "" - var yynn3824 int - if yyr3824 || yy2arr3824 { - r.EncodeArrayStart(3) - } else { - yynn3824 = 1 - for _, b := range yyq3824 { - if b { - yynn3824++ - } - } - r.EncodeMapStart(yynn3824) - yynn3824 = 0 - } - if yyr3824 || yy2arr3824 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3824[0] { - yym3826 := z.EncBinary() - _ = yym3826 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3824[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3827 := z.EncBinary() - _ = yym3827 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr3824 || yy2arr3824 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3824[1] { - yym3829 := z.EncBinary() - _ = yym3829 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3824[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3830 := z.EncBinary() - _ = yym3830 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr3824 || yy2arr3824 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3832 := z.EncBinary() - _ = yym3832 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3833 := z.EncBinary() - _ = yym3833 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr3824 || yy2arr3824 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3834 := z.DecBinary() - _ = yym3834 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3835 := r.ContainerType() - if yyct3835 == codecSelferValueTypeMap1234 { - yyl3835 := r.ReadMapStart() - if yyl3835 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3835, d) - } - } else if yyct3835 == codecSelferValueTypeArray1234 { - yyl3835 := r.ReadArrayStart() - if yyl3835 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3835, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3836Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3836Slc - var yyhl3836 bool = l >= 0 - for yyj3836 := 0; ; yyj3836++ { - if yyhl3836 { - if yyj3836 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3836Slc = r.DecodeBytes(yys3836Slc, true, true) - yys3836 := string(yys3836Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3836 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "Path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3836) - } // end switch yys3836 - } // end for yyj3836 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3840 int - var yyb3840 bool - var yyhl3840 bool = l >= 0 - yyj3840++ - if yyhl3840 { - yyb3840 = yyj3840 > l - } else { - yyb3840 = r.CheckBreak() - } - if yyb3840 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj3840++ - if yyhl3840 { - yyb3840 = yyj3840 > l - } else { - yyb3840 = r.CheckBreak() - } - if yyb3840 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj3840++ - if yyhl3840 { - yyb3840 = yyj3840 > l - } else { - yyb3840 = r.CheckBreak() - } - if yyb3840 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - for { - yyj3840++ - if yyhl3840 { - yyb3840 = yyj3840 > l - } else { - yyb3840 = r.CheckBreak() - } - if yyb3840 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3840-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *OwnerReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3844 := z.EncBinary() - _ = yym3844 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3845 := !z.EncBinary() - yy2arr3845 := z.EncBasicHandle().StructToArray - var yyq3845 [5]bool - _, _, _ = yysep3845, yyq3845, yy2arr3845 - const yyr3845 bool = false - yyq3845[4] = x.Controller != nil - var yynn3845 int - if yyr3845 || yy2arr3845 { - r.EncodeArrayStart(5) - } else { - yynn3845 = 4 - for _, b := range yyq3845 { - if b { - yynn3845++ - } - } - r.EncodeMapStart(yynn3845) - yynn3845 = 0 - } - if yyr3845 || yy2arr3845 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3847 := z.EncBinary() - _ = yym3847 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3848 := z.EncBinary() - _ = yym3848 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - if yyr3845 || yy2arr3845 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3850 := z.EncBinary() - _ = yym3850 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3851 := z.EncBinary() - _ = yym3851 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - if yyr3845 || yy2arr3845 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3853 := z.EncBinary() - _ = yym3853 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3854 := z.EncBinary() - _ = yym3854 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr3845 || yy2arr3845 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3856 := z.EncBinary() - _ = yym3856 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3857 := z.EncBinary() - _ = yym3857 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - if yyr3845 || yy2arr3845 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3845[4] { - if x.Controller == nil { - r.EncodeNil() - } else { - yy3859 := *x.Controller - yym3860 := z.EncBinary() - _ = yym3860 - if false { - } else { - r.EncodeBool(bool(yy3859)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq3845[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("controller")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Controller == nil { - r.EncodeNil() - } else { - yy3861 := *x.Controller - yym3862 := z.EncBinary() - _ = yym3862 - if false { - } else { - r.EncodeBool(bool(yy3861)) - } - } - } - } - if yyr3845 || yy2arr3845 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *OwnerReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3863 := z.DecBinary() - _ = yym3863 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3864 := r.ContainerType() - if yyct3864 == codecSelferValueTypeMap1234 { - yyl3864 := r.ReadMapStart() - if yyl3864 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3864, d) - } - } else if yyct3864 == codecSelferValueTypeArray1234 { - yyl3864 := r.ReadArrayStart() - if yyl3864 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3864, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *OwnerReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3865Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3865Slc - var yyhl3865 bool = l >= 0 - for yyj3865 := 0; ; yyj3865++ { - if yyhl3865 { - if yyj3865 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3865Slc = r.DecodeBytes(yys3865Slc, true, true) - yys3865 := string(yys3865Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3865 { - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg1_types.UID(r.DecodeString()) - } - case "controller": - if r.TryDecodeAsNil() { - if x.Controller != nil { - x.Controller = nil - } - } else { - if x.Controller == nil { - x.Controller = new(bool) - } - yym3871 := z.DecBinary() - _ = yym3871 - if false { - } else { - *((*bool)(x.Controller)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3865) - } // end switch yys3865 - } // end for yyj3865 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *OwnerReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3872 int - var yyb3872 bool - var yyhl3872 bool = l >= 0 - yyj3872++ - if yyhl3872 { - yyb3872 = yyj3872 > l - } else { - yyb3872 = r.CheckBreak() - } - if yyb3872 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj3872++ - if yyhl3872 { - yyb3872 = yyj3872 > l - } else { - yyb3872 = r.CheckBreak() - } - if yyb3872 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj3872++ - if yyhl3872 { - yyb3872 = yyj3872 > l - } else { - yyb3872 = r.CheckBreak() - } - if yyb3872 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj3872++ - if yyhl3872 { - yyb3872 = yyj3872 > l - } else { - yyb3872 = r.CheckBreak() - } - if yyb3872 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg1_types.UID(r.DecodeString()) - } - yyj3872++ - if yyhl3872 { - yyb3872 = yyj3872 > l - } else { - yyb3872 = r.CheckBreak() - } - if yyb3872 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Controller != nil { - x.Controller = nil - } - } else { - if x.Controller == nil { - x.Controller = new(bool) - } - yym3878 := z.DecBinary() - _ = yym3878 - if false { - } else { - *((*bool)(x.Controller)) = r.DecodeBool() - } - } - for { - yyj3872++ - if yyhl3872 { - yyb3872 = yyj3872 > l - } else { - yyb3872 = r.CheckBreak() - } - if yyb3872 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3872-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3879 := z.EncBinary() - _ = yym3879 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3880 := !z.EncBinary() - yy2arr3880 := z.EncBasicHandle().StructToArray - var yyq3880 [7]bool - _, _, _ = yysep3880, yyq3880, yy2arr3880 - const yyr3880 bool = false - yyq3880[0] = x.Kind != "" - yyq3880[1] = x.Namespace != "" - yyq3880[2] = x.Name != "" - yyq3880[3] = x.UID != "" - yyq3880[4] = x.APIVersion != "" - yyq3880[5] = x.ResourceVersion != "" - yyq3880[6] = x.FieldPath != "" - var yynn3880 int - if yyr3880 || yy2arr3880 { - r.EncodeArrayStart(7) - } else { - yynn3880 = 0 - for _, b := range yyq3880 { - if b { - yynn3880++ - } - } - r.EncodeMapStart(yynn3880) - yynn3880 = 0 - } - if yyr3880 || yy2arr3880 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3880[0] { - yym3882 := z.EncBinary() - _ = yym3882 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3880[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3883 := z.EncBinary() - _ = yym3883 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr3880 || yy2arr3880 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3880[1] { - yym3885 := z.EncBinary() - _ = yym3885 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3880[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3886 := z.EncBinary() - _ = yym3886 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr3880 || yy2arr3880 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3880[2] { - yym3888 := z.EncBinary() - _ = yym3888 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3880[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3889 := z.EncBinary() - _ = yym3889 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr3880 || yy2arr3880 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3880[3] { - yym3891 := z.EncBinary() - _ = yym3891 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3880[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3892 := z.EncBinary() - _ = yym3892 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - } - if yyr3880 || yy2arr3880 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3880[4] { - yym3894 := z.EncBinary() - _ = yym3894 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3880[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3895 := z.EncBinary() - _ = yym3895 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr3880 || yy2arr3880 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3880[5] { - yym3897 := z.EncBinary() - _ = yym3897 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3880[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3898 := z.EncBinary() - _ = yym3898 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } - } - if yyr3880 || yy2arr3880 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3880[6] { - yym3900 := z.EncBinary() - _ = yym3900 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3880[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3901 := z.EncBinary() - _ = yym3901 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) - } - } - } - if yyr3880 || yy2arr3880 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3902 := z.DecBinary() - _ = yym3902 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3903 := r.ContainerType() - if yyct3903 == codecSelferValueTypeMap1234 { - yyl3903 := r.ReadMapStart() - if yyl3903 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3903, d) - } - } else if yyct3903 == codecSelferValueTypeArray1234 { - yyl3903 := r.ReadArrayStart() - if yyl3903 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3903, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3904Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3904Slc - var yyhl3904 bool = l >= 0 - for yyj3904 := 0; ; yyj3904++ { - if yyhl3904 { - if yyj3904 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3904Slc = r.DecodeBytes(yys3904Slc, true, true) - yys3904 := string(yys3904Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3904 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg1_types.UID(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "resourceVersion": - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - case "fieldPath": - if r.TryDecodeAsNil() { - x.FieldPath = "" - } else { - x.FieldPath = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3904) - } // end switch yys3904 - } // end for yyj3904 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3912 int - var yyb3912 bool - var yyhl3912 bool = l >= 0 - yyj3912++ - if yyhl3912 { - yyb3912 = yyj3912 > l - } else { - yyb3912 = r.CheckBreak() - } - if yyb3912 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj3912++ - if yyhl3912 { - yyb3912 = yyj3912 > l - } else { - yyb3912 = r.CheckBreak() - } - if yyb3912 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - yyj3912++ - if yyhl3912 { - yyb3912 = yyj3912 > l - } else { - yyb3912 = r.CheckBreak() - } - if yyb3912 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj3912++ - if yyhl3912 { - yyb3912 = yyj3912 > l - } else { - yyb3912 = r.CheckBreak() - } - if yyb3912 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg1_types.UID(r.DecodeString()) - } - yyj3912++ - if yyhl3912 { - yyb3912 = yyj3912 > l - } else { - yyb3912 = r.CheckBreak() - } - if yyb3912 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj3912++ - if yyhl3912 { - yyb3912 = yyj3912 > l - } else { - yyb3912 = r.CheckBreak() - } - if yyb3912 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - yyj3912++ - if yyhl3912 { - yyb3912 = yyj3912 > l - } else { - yyb3912 = r.CheckBreak() - } - if yyb3912 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FieldPath = "" - } else { - x.FieldPath = string(r.DecodeString()) - } - for { - yyj3912++ - if yyhl3912 { - yyb3912 = yyj3912 > l - } else { - yyb3912 = r.CheckBreak() - } - if yyb3912 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3912-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LocalObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3920 := z.EncBinary() - _ = yym3920 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3921 := !z.EncBinary() - yy2arr3921 := z.EncBasicHandle().StructToArray - var yyq3921 [1]bool - _, _, _ = yysep3921, yyq3921, yy2arr3921 - const yyr3921 bool = false - var yynn3921 int - if yyr3921 || yy2arr3921 { - r.EncodeArrayStart(1) - } else { - yynn3921 = 1 - for _, b := range yyq3921 { - if b { - yynn3921++ - } - } - r.EncodeMapStart(yynn3921) - yynn3921 = 0 - } - if yyr3921 || yy2arr3921 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3923 := z.EncBinary() - _ = yym3923 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3924 := z.EncBinary() - _ = yym3924 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr3921 || yy2arr3921 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LocalObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3925 := z.DecBinary() - _ = yym3925 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3926 := r.ContainerType() - if yyct3926 == codecSelferValueTypeMap1234 { - yyl3926 := r.ReadMapStart() - if yyl3926 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3926, d) - } - } else if yyct3926 == codecSelferValueTypeArray1234 { - yyl3926 := r.ReadArrayStart() - if yyl3926 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3926, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LocalObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3927Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3927Slc - var yyhl3927 bool = l >= 0 - for yyj3927 := 0; ; yyj3927++ { - if yyhl3927 { - if yyj3927 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3927Slc = r.DecodeBytes(yys3927Slc, true, true) - yys3927 := string(yys3927Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3927 { - case "Name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3927) - } // end switch yys3927 - } // end for yyj3927 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LocalObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3929 int - var yyb3929 bool - var yyhl3929 bool = l >= 0 - yyj3929++ - if yyhl3929 { - yyb3929 = yyj3929 > l - } else { - yyb3929 = r.CheckBreak() - } - if yyb3929 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - for { - yyj3929++ - if yyhl3929 { - yyb3929 = yyj3929 > l - } else { - yyb3929 = r.CheckBreak() - } - if yyb3929 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3929-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SerializedReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3931 := z.EncBinary() - _ = yym3931 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3932 := !z.EncBinary() - yy2arr3932 := z.EncBasicHandle().StructToArray - var yyq3932 [3]bool - _, _, _ = yysep3932, yyq3932, yy2arr3932 - const yyr3932 bool = false - yyq3932[0] = x.Kind != "" - yyq3932[1] = x.APIVersion != "" - yyq3932[2] = true - var yynn3932 int - if yyr3932 || yy2arr3932 { - r.EncodeArrayStart(3) - } else { - yynn3932 = 0 - for _, b := range yyq3932 { - if b { - yynn3932++ - } - } - r.EncodeMapStart(yynn3932) - yynn3932 = 0 - } - if yyr3932 || yy2arr3932 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3932[0] { - yym3934 := z.EncBinary() - _ = yym3934 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3932[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3935 := z.EncBinary() - _ = yym3935 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr3932 || yy2arr3932 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3932[1] { - yym3937 := z.EncBinary() - _ = yym3937 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3932[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3938 := z.EncBinary() - _ = yym3938 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr3932 || yy2arr3932 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3932[2] { - yy3940 := &x.Reference - yy3940.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq3932[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reference")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3941 := &x.Reference - yy3941.CodecEncodeSelf(e) - } - } - if yyr3932 || yy2arr3932 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SerializedReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3942 := z.DecBinary() - _ = yym3942 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3943 := r.ContainerType() - if yyct3943 == codecSelferValueTypeMap1234 { - yyl3943 := r.ReadMapStart() - if yyl3943 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3943, d) - } - } else if yyct3943 == codecSelferValueTypeArray1234 { - yyl3943 := r.ReadArrayStart() - if yyl3943 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3943, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SerializedReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3944Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3944Slc - var yyhl3944 bool = l >= 0 - for yyj3944 := 0; ; yyj3944++ { - if yyhl3944 { - if yyj3944 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3944Slc = r.DecodeBytes(yys3944Slc, true, true) - yys3944 := string(yys3944Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3944 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "reference": - if r.TryDecodeAsNil() { - x.Reference = ObjectReference{} - } else { - yyv3947 := &x.Reference - yyv3947.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3944) - } // end switch yys3944 - } // end for yyj3944 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SerializedReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3948 int - var yyb3948 bool - var yyhl3948 bool = l >= 0 - yyj3948++ - if yyhl3948 { - yyb3948 = yyj3948 > l - } else { - yyb3948 = r.CheckBreak() - } - if yyb3948 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj3948++ - if yyhl3948 { - yyb3948 = yyj3948 > l - } else { - yyb3948 = r.CheckBreak() - } - if yyb3948 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj3948++ - if yyhl3948 { - yyb3948 = yyj3948 > l - } else { - yyb3948 = r.CheckBreak() - } - if yyb3948 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reference = ObjectReference{} - } else { - yyv3951 := &x.Reference - yyv3951.CodecDecodeSelf(d) - } - for { - yyj3948++ - if yyhl3948 { - yyb3948 = yyj3948 > l - } else { - yyb3948 = r.CheckBreak() - } - if yyb3948 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3948-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EventSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3952 := z.EncBinary() - _ = yym3952 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3953 := !z.EncBinary() - yy2arr3953 := z.EncBasicHandle().StructToArray - var yyq3953 [2]bool - _, _, _ = yysep3953, yyq3953, yy2arr3953 - const yyr3953 bool = false - yyq3953[0] = x.Component != "" - yyq3953[1] = x.Host != "" - var yynn3953 int - if yyr3953 || yy2arr3953 { - r.EncodeArrayStart(2) - } else { - yynn3953 = 0 - for _, b := range yyq3953 { - if b { - yynn3953++ - } - } - r.EncodeMapStart(yynn3953) - yynn3953 = 0 - } - if yyr3953 || yy2arr3953 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3953[0] { - yym3955 := z.EncBinary() - _ = yym3955 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Component)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3953[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("component")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3956 := z.EncBinary() - _ = yym3956 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Component)) - } - } - } - if yyr3953 || yy2arr3953 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3953[1] { - yym3958 := z.EncBinary() - _ = yym3958 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3953[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("host")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3959 := z.EncBinary() - _ = yym3959 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } - } - if yyr3953 || yy2arr3953 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EventSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3960 := z.DecBinary() - _ = yym3960 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3961 := r.ContainerType() - if yyct3961 == codecSelferValueTypeMap1234 { - yyl3961 := r.ReadMapStart() - if yyl3961 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3961, d) - } - } else if yyct3961 == codecSelferValueTypeArray1234 { - yyl3961 := r.ReadArrayStart() - if yyl3961 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3961, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EventSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3962Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3962Slc - var yyhl3962 bool = l >= 0 - for yyj3962 := 0; ; yyj3962++ { - if yyhl3962 { - if yyj3962 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3962Slc = r.DecodeBytes(yys3962Slc, true, true) - yys3962 := string(yys3962Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3962 { - case "component": - if r.TryDecodeAsNil() { - x.Component = "" - } else { - x.Component = string(r.DecodeString()) - } - case "host": - if r.TryDecodeAsNil() { - x.Host = "" - } else { - x.Host = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3962) - } // end switch yys3962 - } // end for yyj3962 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EventSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3965 int - var yyb3965 bool - var yyhl3965 bool = l >= 0 - yyj3965++ - if yyhl3965 { - yyb3965 = yyj3965 > l - } else { - yyb3965 = r.CheckBreak() - } - if yyb3965 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Component = "" - } else { - x.Component = string(r.DecodeString()) - } - yyj3965++ - if yyhl3965 { - yyb3965 = yyj3965 > l - } else { - yyb3965 = r.CheckBreak() - } - if yyb3965 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Host = "" - } else { - x.Host = string(r.DecodeString()) - } - for { - yyj3965++ - if yyhl3965 { - yyb3965 = yyj3965 > l - } else { - yyb3965 = r.CheckBreak() - } - if yyb3965 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3965-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Event) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3968 := z.EncBinary() - _ = yym3968 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3969 := !z.EncBinary() - yy2arr3969 := z.EncBasicHandle().StructToArray - var yyq3969 [11]bool - _, _, _ = yysep3969, yyq3969, yy2arr3969 - const yyr3969 bool = false - yyq3969[0] = x.Kind != "" - yyq3969[1] = x.APIVersion != "" - yyq3969[2] = true - yyq3969[3] = true - yyq3969[4] = x.Reason != "" - yyq3969[5] = x.Message != "" - yyq3969[6] = true - yyq3969[7] = true - yyq3969[8] = true - yyq3969[9] = x.Count != 0 - yyq3969[10] = x.Type != "" - var yynn3969 int - if yyr3969 || yy2arr3969 { - r.EncodeArrayStart(11) - } else { - yynn3969 = 0 - for _, b := range yyq3969 { - if b { - yynn3969++ - } - } - r.EncodeMapStart(yynn3969) - yynn3969 = 0 - } - if yyr3969 || yy2arr3969 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3969[0] { - yym3971 := z.EncBinary() - _ = yym3971 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3969[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3972 := z.EncBinary() - _ = yym3972 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr3969 || yy2arr3969 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3969[1] { - yym3974 := z.EncBinary() - _ = yym3974 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3969[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3975 := z.EncBinary() - _ = yym3975 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr3969 || yy2arr3969 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3969[2] { - yy3977 := &x.ObjectMeta - yy3977.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq3969[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3978 := &x.ObjectMeta - yy3978.CodecEncodeSelf(e) - } - } - if yyr3969 || yy2arr3969 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3969[3] { - yy3980 := &x.InvolvedObject - yy3980.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq3969[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("involvedObject")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3981 := &x.InvolvedObject - yy3981.CodecEncodeSelf(e) - } - } - if yyr3969 || yy2arr3969 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3969[4] { - yym3983 := z.EncBinary() - _ = yym3983 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3969[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3984 := z.EncBinary() - _ = yym3984 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr3969 || yy2arr3969 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3969[5] { - yym3986 := z.EncBinary() - _ = yym3986 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3969[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3987 := z.EncBinary() - _ = yym3987 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr3969 || yy2arr3969 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3969[6] { - yy3989 := &x.Source - yy3989.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq3969[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("source")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3990 := &x.Source - yy3990.CodecEncodeSelf(e) - } - } - if yyr3969 || yy2arr3969 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3969[7] { - yy3992 := &x.FirstTimestamp - yym3993 := z.EncBinary() - _ = yym3993 - if false { - } else if z.HasExtensions() && z.EncExt(yy3992) { - } else if yym3993 { - z.EncBinaryMarshal(yy3992) - } else if !yym3993 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3992) - } else { - z.EncFallback(yy3992) - } - } else { - r.EncodeNil() - } - } else { - if yyq3969[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("firstTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3994 := &x.FirstTimestamp - yym3995 := z.EncBinary() - _ = yym3995 - if false { - } else if z.HasExtensions() && z.EncExt(yy3994) { - } else if yym3995 { - z.EncBinaryMarshal(yy3994) - } else if !yym3995 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3994) - } else { - z.EncFallback(yy3994) - } - } - } - if yyr3969 || yy2arr3969 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3969[8] { - yy3997 := &x.LastTimestamp - yym3998 := z.EncBinary() - _ = yym3998 - if false { - } else if z.HasExtensions() && z.EncExt(yy3997) { - } else if yym3998 { - z.EncBinaryMarshal(yy3997) - } else if !yym3998 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3997) - } else { - z.EncFallback(yy3997) - } - } else { - r.EncodeNil() - } - } else { - if yyq3969[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3999 := &x.LastTimestamp - yym4000 := z.EncBinary() - _ = yym4000 - if false { - } else if z.HasExtensions() && z.EncExt(yy3999) { - } else if yym4000 { - z.EncBinaryMarshal(yy3999) - } else if !yym4000 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3999) - } else { - z.EncFallback(yy3999) - } - } - } - if yyr3969 || yy2arr3969 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3969[9] { - yym4002 := z.EncBinary() - _ = yym4002 - if false { - } else { - r.EncodeInt(int64(x.Count)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq3969[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("count")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4003 := z.EncBinary() - _ = yym4003 - if false { - } else { - r.EncodeInt(int64(x.Count)) - } - } - } - if yyr3969 || yy2arr3969 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3969[10] { - yym4005 := z.EncBinary() - _ = yym4005 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Type)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3969[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4006 := z.EncBinary() - _ = yym4006 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Type)) - } - } - } - if yyr3969 || yy2arr3969 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Event) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4007 := z.DecBinary() - _ = yym4007 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4008 := r.ContainerType() - if yyct4008 == codecSelferValueTypeMap1234 { - yyl4008 := r.ReadMapStart() - if yyl4008 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4008, d) - } - } else if yyct4008 == codecSelferValueTypeArray1234 { - yyl4008 := r.ReadArrayStart() - if yyl4008 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4008, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Event) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4009Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4009Slc - var yyhl4009 bool = l >= 0 - for yyj4009 := 0; ; yyj4009++ { - if yyhl4009 { - if yyj4009 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4009Slc = r.DecodeBytes(yys4009Slc, true, true) - yys4009 := string(yys4009Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4009 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4012 := &x.ObjectMeta - yyv4012.CodecDecodeSelf(d) - } - case "involvedObject": - if r.TryDecodeAsNil() { - x.InvolvedObject = ObjectReference{} - } else { - yyv4013 := &x.InvolvedObject - yyv4013.CodecDecodeSelf(d) - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - case "source": - if r.TryDecodeAsNil() { - x.Source = EventSource{} - } else { - yyv4016 := &x.Source - yyv4016.CodecDecodeSelf(d) - } - case "firstTimestamp": - if r.TryDecodeAsNil() { - x.FirstTimestamp = pkg2_unversioned.Time{} - } else { - yyv4017 := &x.FirstTimestamp - yym4018 := z.DecBinary() - _ = yym4018 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4017) { - } else if yym4018 { - z.DecBinaryUnmarshal(yyv4017) - } else if !yym4018 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4017) - } else { - z.DecFallback(yyv4017, false) - } - } - case "lastTimestamp": - if r.TryDecodeAsNil() { - x.LastTimestamp = pkg2_unversioned.Time{} - } else { - yyv4019 := &x.LastTimestamp - yym4020 := z.DecBinary() - _ = yym4020 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4019) { - } else if yym4020 { - z.DecBinaryUnmarshal(yyv4019) - } else if !yym4020 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4019) - } else { - z.DecFallback(yyv4019, false) - } - } - case "count": - if r.TryDecodeAsNil() { - x.Count = 0 - } else { - x.Count = int32(r.DecodeInt(32)) - } - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys4009) - } // end switch yys4009 - } // end for yyj4009 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Event) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4023 int - var yyb4023 bool - var yyhl4023 bool = l >= 0 - yyj4023++ - if yyhl4023 { - yyb4023 = yyj4023 > l - } else { - yyb4023 = r.CheckBreak() - } - if yyb4023 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj4023++ - if yyhl4023 { - yyb4023 = yyj4023 > l - } else { - yyb4023 = r.CheckBreak() - } - if yyb4023 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj4023++ - if yyhl4023 { - yyb4023 = yyj4023 > l - } else { - yyb4023 = r.CheckBreak() - } - if yyb4023 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4026 := &x.ObjectMeta - yyv4026.CodecDecodeSelf(d) - } - yyj4023++ - if yyhl4023 { - yyb4023 = yyj4023 > l - } else { - yyb4023 = r.CheckBreak() - } - if yyb4023 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.InvolvedObject = ObjectReference{} - } else { - yyv4027 := &x.InvolvedObject - yyv4027.CodecDecodeSelf(d) - } - yyj4023++ - if yyhl4023 { - yyb4023 = yyj4023 > l - } else { - yyb4023 = r.CheckBreak() - } - if yyb4023 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj4023++ - if yyhl4023 { - yyb4023 = yyj4023 > l - } else { - yyb4023 = r.CheckBreak() - } - if yyb4023 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - yyj4023++ - if yyhl4023 { - yyb4023 = yyj4023 > l - } else { - yyb4023 = r.CheckBreak() - } - if yyb4023 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Source = EventSource{} - } else { - yyv4030 := &x.Source - yyv4030.CodecDecodeSelf(d) - } - yyj4023++ - if yyhl4023 { - yyb4023 = yyj4023 > l - } else { - yyb4023 = r.CheckBreak() - } - if yyb4023 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FirstTimestamp = pkg2_unversioned.Time{} - } else { - yyv4031 := &x.FirstTimestamp - yym4032 := z.DecBinary() - _ = yym4032 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4031) { - } else if yym4032 { - z.DecBinaryUnmarshal(yyv4031) - } else if !yym4032 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4031) - } else { - z.DecFallback(yyv4031, false) - } - } - yyj4023++ - if yyhl4023 { - yyb4023 = yyj4023 > l - } else { - yyb4023 = r.CheckBreak() - } - if yyb4023 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTimestamp = pkg2_unversioned.Time{} - } else { - yyv4033 := &x.LastTimestamp - yym4034 := z.DecBinary() - _ = yym4034 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4033) { - } else if yym4034 { - z.DecBinaryUnmarshal(yyv4033) - } else if !yym4034 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4033) - } else { - z.DecFallback(yyv4033, false) - } - } - yyj4023++ - if yyhl4023 { - yyb4023 = yyj4023 > l - } else { - yyb4023 = r.CheckBreak() - } - if yyb4023 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Count = 0 - } else { - x.Count = int32(r.DecodeInt(32)) - } - yyj4023++ - if yyhl4023 { - yyb4023 = yyj4023 > l - } else { - yyb4023 = r.CheckBreak() - } - if yyb4023 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = string(r.DecodeString()) - } - for { - yyj4023++ - if yyhl4023 { - yyb4023 = yyj4023 > l - } else { - yyb4023 = r.CheckBreak() - } - if yyb4023 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4023-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EventList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4037 := z.EncBinary() - _ = yym4037 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4038 := !z.EncBinary() - yy2arr4038 := z.EncBasicHandle().StructToArray - var yyq4038 [4]bool - _, _, _ = yysep4038, yyq4038, yy2arr4038 - const yyr4038 bool = false - yyq4038[0] = x.Kind != "" - yyq4038[1] = x.APIVersion != "" - yyq4038[2] = true - var yynn4038 int - if yyr4038 || yy2arr4038 { - r.EncodeArrayStart(4) - } else { - yynn4038 = 1 - for _, b := range yyq4038 { - if b { - yynn4038++ - } - } - r.EncodeMapStart(yynn4038) - yynn4038 = 0 - } - if yyr4038 || yy2arr4038 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4038[0] { - yym4040 := z.EncBinary() - _ = yym4040 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4038[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4041 := z.EncBinary() - _ = yym4041 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr4038 || yy2arr4038 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4038[1] { - yym4043 := z.EncBinary() - _ = yym4043 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4038[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4044 := z.EncBinary() - _ = yym4044 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr4038 || yy2arr4038 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4038[2] { - yy4046 := &x.ListMeta - yym4047 := z.EncBinary() - _ = yym4047 - if false { - } else if z.HasExtensions() && z.EncExt(yy4046) { - } else { - z.EncFallback(yy4046) - } - } else { - r.EncodeNil() - } - } else { - if yyq4038[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4048 := &x.ListMeta - yym4049 := z.EncBinary() - _ = yym4049 - if false { - } else if z.HasExtensions() && z.EncExt(yy4048) { - } else { - z.EncFallback(yy4048) - } - } - } - if yyr4038 || yy2arr4038 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4051 := z.EncBinary() - _ = yym4051 - if false { - } else { - h.encSliceEvent(([]Event)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4052 := z.EncBinary() - _ = yym4052 - if false { - } else { - h.encSliceEvent(([]Event)(x.Items), e) - } - } - } - if yyr4038 || yy2arr4038 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EventList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4053 := z.DecBinary() - _ = yym4053 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4054 := r.ContainerType() - if yyct4054 == codecSelferValueTypeMap1234 { - yyl4054 := r.ReadMapStart() - if yyl4054 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4054, d) - } - } else if yyct4054 == codecSelferValueTypeArray1234 { - yyl4054 := r.ReadArrayStart() - if yyl4054 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4054, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EventList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4055Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4055Slc - var yyhl4055 bool = l >= 0 - for yyj4055 := 0; ; yyj4055++ { - if yyhl4055 { - if yyj4055 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4055Slc = r.DecodeBytes(yys4055Slc, true, true) - yys4055 := string(yys4055Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4055 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4058 := &x.ListMeta - yym4059 := z.DecBinary() - _ = yym4059 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4058) { - } else { - z.DecFallback(yyv4058, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4060 := &x.Items - yym4061 := z.DecBinary() - _ = yym4061 - if false { - } else { - h.decSliceEvent((*[]Event)(yyv4060), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys4055) - } // end switch yys4055 - } // end for yyj4055 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EventList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4062 int - var yyb4062 bool - var yyhl4062 bool = l >= 0 - yyj4062++ - if yyhl4062 { - yyb4062 = yyj4062 > l - } else { - yyb4062 = r.CheckBreak() - } - if yyb4062 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj4062++ - if yyhl4062 { - yyb4062 = yyj4062 > l - } else { - yyb4062 = r.CheckBreak() - } - if yyb4062 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj4062++ - if yyhl4062 { - yyb4062 = yyj4062 > l - } else { - yyb4062 = r.CheckBreak() - } - if yyb4062 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4065 := &x.ListMeta - yym4066 := z.DecBinary() - _ = yym4066 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4065) { - } else { - z.DecFallback(yyv4065, false) - } - } - yyj4062++ - if yyhl4062 { - yyb4062 = yyj4062 > l - } else { - yyb4062 = r.CheckBreak() - } - if yyb4062 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4067 := &x.Items - yym4068 := z.DecBinary() - _ = yym4068 - if false { - } else { - h.decSliceEvent((*[]Event)(yyv4067), d) - } - } - for { - yyj4062++ - if yyhl4062 { - yyb4062 = yyj4062 > l - } else { - yyb4062 = r.CheckBreak() - } - if yyb4062 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4062-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *List) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4069 := z.EncBinary() - _ = yym4069 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4070 := !z.EncBinary() - yy2arr4070 := z.EncBasicHandle().StructToArray - var yyq4070 [4]bool - _, _, _ = yysep4070, yyq4070, yy2arr4070 - const yyr4070 bool = false - yyq4070[0] = x.Kind != "" - yyq4070[1] = x.APIVersion != "" - yyq4070[2] = true - var yynn4070 int - if yyr4070 || yy2arr4070 { - r.EncodeArrayStart(4) - } else { - yynn4070 = 1 - for _, b := range yyq4070 { - if b { - yynn4070++ - } - } - r.EncodeMapStart(yynn4070) - yynn4070 = 0 - } - if yyr4070 || yy2arr4070 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4070[0] { - yym4072 := z.EncBinary() - _ = yym4072 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4070[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4073 := z.EncBinary() - _ = yym4073 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr4070 || yy2arr4070 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4070[1] { - yym4075 := z.EncBinary() - _ = yym4075 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4070[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4076 := z.EncBinary() - _ = yym4076 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr4070 || yy2arr4070 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4070[2] { - yy4078 := &x.ListMeta - yym4079 := z.EncBinary() - _ = yym4079 - if false { - } else if z.HasExtensions() && z.EncExt(yy4078) { - } else { - z.EncFallback(yy4078) - } - } else { - r.EncodeNil() - } - } else { - if yyq4070[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4080 := &x.ListMeta - yym4081 := z.EncBinary() - _ = yym4081 - if false { - } else if z.HasExtensions() && z.EncExt(yy4080) { - } else { - z.EncFallback(yy4080) - } - } - } - if yyr4070 || yy2arr4070 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4083 := z.EncBinary() - _ = yym4083 - if false { - } else { - h.encSliceruntime_Object(([]pkg7_runtime.Object)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4084 := z.EncBinary() - _ = yym4084 - if false { - } else { - h.encSliceruntime_Object(([]pkg7_runtime.Object)(x.Items), e) - } - } - } - if yyr4070 || yy2arr4070 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *List) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4085 := z.DecBinary() - _ = yym4085 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4086 := r.ContainerType() - if yyct4086 == codecSelferValueTypeMap1234 { - yyl4086 := r.ReadMapStart() - if yyl4086 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4086, d) - } - } else if yyct4086 == codecSelferValueTypeArray1234 { - yyl4086 := r.ReadArrayStart() - if yyl4086 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4086, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *List) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4087Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4087Slc - var yyhl4087 bool = l >= 0 - for yyj4087 := 0; ; yyj4087++ { - if yyhl4087 { - if yyj4087 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4087Slc = r.DecodeBytes(yys4087Slc, true, true) - yys4087 := string(yys4087Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4087 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4090 := &x.ListMeta - yym4091 := z.DecBinary() - _ = yym4091 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4090) { - } else { - z.DecFallback(yyv4090, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4092 := &x.Items - yym4093 := z.DecBinary() - _ = yym4093 - if false { - } else { - h.decSliceruntime_Object((*[]pkg7_runtime.Object)(yyv4092), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys4087) - } // end switch yys4087 - } // end for yyj4087 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *List) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4094 int - var yyb4094 bool - var yyhl4094 bool = l >= 0 - yyj4094++ - if yyhl4094 { - yyb4094 = yyj4094 > l - } else { - yyb4094 = r.CheckBreak() - } - if yyb4094 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj4094++ - if yyhl4094 { - yyb4094 = yyj4094 > l - } else { - yyb4094 = r.CheckBreak() - } - if yyb4094 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj4094++ - if yyhl4094 { - yyb4094 = yyj4094 > l - } else { - yyb4094 = r.CheckBreak() - } - if yyb4094 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4097 := &x.ListMeta - yym4098 := z.DecBinary() - _ = yym4098 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4097) { - } else { - z.DecFallback(yyv4097, false) - } - } - yyj4094++ - if yyhl4094 { - yyb4094 = yyj4094 > l - } else { - yyb4094 = r.CheckBreak() - } - if yyb4094 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4099 := &x.Items - yym4100 := z.DecBinary() - _ = yym4100 - if false { - } else { - h.decSliceruntime_Object((*[]pkg7_runtime.Object)(yyv4099), d) - } - } - for { - yyj4094++ - if yyhl4094 { - yyb4094 = yyj4094 > l - } else { - yyb4094 = r.CheckBreak() - } - if yyb4094 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4094-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x LimitType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym4101 := z.EncBinary() - _ = yym4101 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *LimitType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4102 := z.DecBinary() - _ = yym4102 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4103 := z.EncBinary() - _ = yym4103 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4104 := !z.EncBinary() - yy2arr4104 := z.EncBasicHandle().StructToArray - var yyq4104 [6]bool - _, _, _ = yysep4104, yyq4104, yy2arr4104 - const yyr4104 bool = false - yyq4104[0] = x.Type != "" - yyq4104[1] = len(x.Max) != 0 - yyq4104[2] = len(x.Min) != 0 - yyq4104[3] = len(x.Default) != 0 - yyq4104[4] = len(x.DefaultRequest) != 0 - yyq4104[5] = len(x.MaxLimitRequestRatio) != 0 - var yynn4104 int - if yyr4104 || yy2arr4104 { - r.EncodeArrayStart(6) - } else { - yynn4104 = 0 - for _, b := range yyq4104 { - if b { - yynn4104++ - } - } - r.EncodeMapStart(yynn4104) - yynn4104 = 0 - } - if yyr4104 || yy2arr4104 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4104[0] { - x.Type.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4104[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - } - if yyr4104 || yy2arr4104 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4104[1] { - if x.Max == nil { - r.EncodeNil() - } else { - x.Max.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq4104[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("max")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Max == nil { - r.EncodeNil() - } else { - x.Max.CodecEncodeSelf(e) - } - } - } - if yyr4104 || yy2arr4104 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4104[2] { - if x.Min == nil { - r.EncodeNil() - } else { - x.Min.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq4104[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("min")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Min == nil { - r.EncodeNil() - } else { - x.Min.CodecEncodeSelf(e) - } - } - } - if yyr4104 || yy2arr4104 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4104[3] { - if x.Default == nil { - r.EncodeNil() - } else { - x.Default.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq4104[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("default")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Default == nil { - r.EncodeNil() - } else { - x.Default.CodecEncodeSelf(e) - } - } - } - if yyr4104 || yy2arr4104 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4104[4] { - if x.DefaultRequest == nil { - r.EncodeNil() - } else { - x.DefaultRequest.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq4104[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("defaultRequest")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DefaultRequest == nil { - r.EncodeNil() - } else { - x.DefaultRequest.CodecEncodeSelf(e) - } - } - } - if yyr4104 || yy2arr4104 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4104[5] { - if x.MaxLimitRequestRatio == nil { - r.EncodeNil() - } else { - x.MaxLimitRequestRatio.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq4104[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxLimitRequestRatio")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MaxLimitRequestRatio == nil { - r.EncodeNil() - } else { - x.MaxLimitRequestRatio.CodecEncodeSelf(e) - } - } - } - if yyr4104 || yy2arr4104 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LimitRangeItem) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4111 := z.DecBinary() - _ = yym4111 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4112 := r.ContainerType() - if yyct4112 == codecSelferValueTypeMap1234 { - yyl4112 := r.ReadMapStart() - if yyl4112 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4112, d) - } - } else if yyct4112 == codecSelferValueTypeArray1234 { - yyl4112 := r.ReadArrayStart() - if yyl4112 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4112, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LimitRangeItem) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4113Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4113Slc - var yyhl4113 bool = l >= 0 - for yyj4113 := 0; ; yyj4113++ { - if yyhl4113 { - if yyj4113 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4113Slc = r.DecodeBytes(yys4113Slc, true, true) - yys4113 := string(yys4113Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4113 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = LimitType(r.DecodeString()) - } - case "max": - if r.TryDecodeAsNil() { - x.Max = nil - } else { - yyv4115 := &x.Max - yyv4115.CodecDecodeSelf(d) - } - case "min": - if r.TryDecodeAsNil() { - x.Min = nil - } else { - yyv4116 := &x.Min - yyv4116.CodecDecodeSelf(d) - } - case "default": - if r.TryDecodeAsNil() { - x.Default = nil - } else { - yyv4117 := &x.Default - yyv4117.CodecDecodeSelf(d) - } - case "defaultRequest": - if r.TryDecodeAsNil() { - x.DefaultRequest = nil - } else { - yyv4118 := &x.DefaultRequest - yyv4118.CodecDecodeSelf(d) - } - case "maxLimitRequestRatio": - if r.TryDecodeAsNil() { - x.MaxLimitRequestRatio = nil - } else { - yyv4119 := &x.MaxLimitRequestRatio - yyv4119.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys4113) - } // end switch yys4113 - } // end for yyj4113 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LimitRangeItem) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4120 int - var yyb4120 bool - var yyhl4120 bool = l >= 0 - yyj4120++ - if yyhl4120 { - yyb4120 = yyj4120 > l - } else { - yyb4120 = r.CheckBreak() - } - if yyb4120 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = LimitType(r.DecodeString()) - } - yyj4120++ - if yyhl4120 { - yyb4120 = yyj4120 > l - } else { - yyb4120 = r.CheckBreak() - } - if yyb4120 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Max = nil - } else { - yyv4122 := &x.Max - yyv4122.CodecDecodeSelf(d) - } - yyj4120++ - if yyhl4120 { - yyb4120 = yyj4120 > l - } else { - yyb4120 = r.CheckBreak() - } - if yyb4120 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Min = nil - } else { - yyv4123 := &x.Min - yyv4123.CodecDecodeSelf(d) - } - yyj4120++ - if yyhl4120 { - yyb4120 = yyj4120 > l - } else { - yyb4120 = r.CheckBreak() - } - if yyb4120 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Default = nil - } else { - yyv4124 := &x.Default - yyv4124.CodecDecodeSelf(d) - } - yyj4120++ - if yyhl4120 { - yyb4120 = yyj4120 > l - } else { - yyb4120 = r.CheckBreak() - } - if yyb4120 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DefaultRequest = nil - } else { - yyv4125 := &x.DefaultRequest - yyv4125.CodecDecodeSelf(d) - } - yyj4120++ - if yyhl4120 { - yyb4120 = yyj4120 > l - } else { - yyb4120 = r.CheckBreak() - } - if yyb4120 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxLimitRequestRatio = nil - } else { - yyv4126 := &x.MaxLimitRequestRatio - yyv4126.CodecDecodeSelf(d) - } - for { - yyj4120++ - if yyhl4120 { - yyb4120 = yyj4120 > l - } else { - yyb4120 = r.CheckBreak() - } - if yyb4120 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4120-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LimitRangeSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4127 := z.EncBinary() - _ = yym4127 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4128 := !z.EncBinary() - yy2arr4128 := z.EncBasicHandle().StructToArray - var yyq4128 [1]bool - _, _, _ = yysep4128, yyq4128, yy2arr4128 - const yyr4128 bool = false - var yynn4128 int - if yyr4128 || yy2arr4128 { - r.EncodeArrayStart(1) - } else { - yynn4128 = 1 - for _, b := range yyq4128 { - if b { - yynn4128++ - } - } - r.EncodeMapStart(yynn4128) - yynn4128 = 0 - } - if yyr4128 || yy2arr4128 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Limits == nil { - r.EncodeNil() - } else { - yym4130 := z.EncBinary() - _ = yym4130 - if false { - } else { - h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("limits")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Limits == nil { - r.EncodeNil() - } else { - yym4131 := z.EncBinary() - _ = yym4131 - if false { - } else { - h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e) - } - } - } - if yyr4128 || yy2arr4128 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LimitRangeSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4132 := z.DecBinary() - _ = yym4132 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4133 := r.ContainerType() - if yyct4133 == codecSelferValueTypeMap1234 { - yyl4133 := r.ReadMapStart() - if yyl4133 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4133, d) - } - } else if yyct4133 == codecSelferValueTypeArray1234 { - yyl4133 := r.ReadArrayStart() - if yyl4133 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4133, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LimitRangeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4134Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4134Slc - var yyhl4134 bool = l >= 0 - for yyj4134 := 0; ; yyj4134++ { - if yyhl4134 { - if yyj4134 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4134Slc = r.DecodeBytes(yys4134Slc, true, true) - yys4134 := string(yys4134Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4134 { - case "limits": - if r.TryDecodeAsNil() { - x.Limits = nil - } else { - yyv4135 := &x.Limits - yym4136 := z.DecBinary() - _ = yym4136 - if false { - } else { - h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv4135), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys4134) - } // end switch yys4134 - } // end for yyj4134 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LimitRangeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4137 int - var yyb4137 bool - var yyhl4137 bool = l >= 0 - yyj4137++ - if yyhl4137 { - yyb4137 = yyj4137 > l - } else { - yyb4137 = r.CheckBreak() - } - if yyb4137 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Limits = nil - } else { - yyv4138 := &x.Limits - yym4139 := z.DecBinary() - _ = yym4139 - if false { - } else { - h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv4138), d) - } - } - for { - yyj4137++ - if yyhl4137 { - yyb4137 = yyj4137 > l - } else { - yyb4137 = r.CheckBreak() - } - if yyb4137 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4137-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LimitRange) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4140 := z.EncBinary() - _ = yym4140 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4141 := !z.EncBinary() - yy2arr4141 := z.EncBasicHandle().StructToArray - var yyq4141 [4]bool - _, _, _ = yysep4141, yyq4141, yy2arr4141 - const yyr4141 bool = false - yyq4141[0] = x.Kind != "" - yyq4141[1] = x.APIVersion != "" - yyq4141[2] = true - yyq4141[3] = true - var yynn4141 int - if yyr4141 || yy2arr4141 { - r.EncodeArrayStart(4) - } else { - yynn4141 = 0 - for _, b := range yyq4141 { - if b { - yynn4141++ - } - } - r.EncodeMapStart(yynn4141) - yynn4141 = 0 - } - if yyr4141 || yy2arr4141 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4141[0] { - yym4143 := z.EncBinary() - _ = yym4143 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4141[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4144 := z.EncBinary() - _ = yym4144 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr4141 || yy2arr4141 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4141[1] { - yym4146 := z.EncBinary() - _ = yym4146 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4141[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4147 := z.EncBinary() - _ = yym4147 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr4141 || yy2arr4141 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4141[2] { - yy4149 := &x.ObjectMeta - yy4149.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq4141[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4150 := &x.ObjectMeta - yy4150.CodecEncodeSelf(e) - } - } - if yyr4141 || yy2arr4141 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4141[3] { - yy4152 := &x.Spec - yy4152.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq4141[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4153 := &x.Spec - yy4153.CodecEncodeSelf(e) - } - } - if yyr4141 || yy2arr4141 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LimitRange) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4154 := z.DecBinary() - _ = yym4154 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4155 := r.ContainerType() - if yyct4155 == codecSelferValueTypeMap1234 { - yyl4155 := r.ReadMapStart() - if yyl4155 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4155, d) - } - } else if yyct4155 == codecSelferValueTypeArray1234 { - yyl4155 := r.ReadArrayStart() - if yyl4155 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4155, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LimitRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4156Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4156Slc - var yyhl4156 bool = l >= 0 - for yyj4156 := 0; ; yyj4156++ { - if yyhl4156 { - if yyj4156 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4156Slc = r.DecodeBytes(yys4156Slc, true, true) - yys4156 := string(yys4156Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4156 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4159 := &x.ObjectMeta - yyv4159.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = LimitRangeSpec{} - } else { - yyv4160 := &x.Spec - yyv4160.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys4156) - } // end switch yys4156 - } // end for yyj4156 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LimitRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4161 int - var yyb4161 bool - var yyhl4161 bool = l >= 0 - yyj4161++ - if yyhl4161 { - yyb4161 = yyj4161 > l - } else { - yyb4161 = r.CheckBreak() - } - if yyb4161 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj4161++ - if yyhl4161 { - yyb4161 = yyj4161 > l - } else { - yyb4161 = r.CheckBreak() - } - if yyb4161 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj4161++ - if yyhl4161 { - yyb4161 = yyj4161 > l - } else { - yyb4161 = r.CheckBreak() - } - if yyb4161 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4164 := &x.ObjectMeta - yyv4164.CodecDecodeSelf(d) - } - yyj4161++ - if yyhl4161 { - yyb4161 = yyj4161 > l - } else { - yyb4161 = r.CheckBreak() - } - if yyb4161 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = LimitRangeSpec{} - } else { - yyv4165 := &x.Spec - yyv4165.CodecDecodeSelf(d) - } - for { - yyj4161++ - if yyhl4161 { - yyb4161 = yyj4161 > l - } else { - yyb4161 = r.CheckBreak() - } - if yyb4161 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4161-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LimitRangeList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4166 := z.EncBinary() - _ = yym4166 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4167 := !z.EncBinary() - yy2arr4167 := z.EncBasicHandle().StructToArray - var yyq4167 [4]bool - _, _, _ = yysep4167, yyq4167, yy2arr4167 - const yyr4167 bool = false - yyq4167[0] = x.Kind != "" - yyq4167[1] = x.APIVersion != "" - yyq4167[2] = true - var yynn4167 int - if yyr4167 || yy2arr4167 { - r.EncodeArrayStart(4) - } else { - yynn4167 = 1 - for _, b := range yyq4167 { - if b { - yynn4167++ - } - } - r.EncodeMapStart(yynn4167) - yynn4167 = 0 - } - if yyr4167 || yy2arr4167 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4167[0] { - yym4169 := z.EncBinary() - _ = yym4169 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4167[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4170 := z.EncBinary() - _ = yym4170 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr4167 || yy2arr4167 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4167[1] { - yym4172 := z.EncBinary() - _ = yym4172 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4167[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4173 := z.EncBinary() - _ = yym4173 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr4167 || yy2arr4167 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4167[2] { - yy4175 := &x.ListMeta - yym4176 := z.EncBinary() - _ = yym4176 - if false { - } else if z.HasExtensions() && z.EncExt(yy4175) { - } else { - z.EncFallback(yy4175) - } - } else { - r.EncodeNil() - } - } else { - if yyq4167[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4177 := &x.ListMeta - yym4178 := z.EncBinary() - _ = yym4178 - if false { - } else if z.HasExtensions() && z.EncExt(yy4177) { - } else { - z.EncFallback(yy4177) - } - } - } - if yyr4167 || yy2arr4167 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4180 := z.EncBinary() - _ = yym4180 - if false { - } else { - h.encSliceLimitRange(([]LimitRange)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4181 := z.EncBinary() - _ = yym4181 - if false { - } else { - h.encSliceLimitRange(([]LimitRange)(x.Items), e) - } - } - } - if yyr4167 || yy2arr4167 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LimitRangeList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4182 := z.DecBinary() - _ = yym4182 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4183 := r.ContainerType() - if yyct4183 == codecSelferValueTypeMap1234 { - yyl4183 := r.ReadMapStart() - if yyl4183 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4183, d) - } - } else if yyct4183 == codecSelferValueTypeArray1234 { - yyl4183 := r.ReadArrayStart() - if yyl4183 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4183, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LimitRangeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4184Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4184Slc - var yyhl4184 bool = l >= 0 - for yyj4184 := 0; ; yyj4184++ { - if yyhl4184 { - if yyj4184 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4184Slc = r.DecodeBytes(yys4184Slc, true, true) - yys4184 := string(yys4184Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4184 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4187 := &x.ListMeta - yym4188 := z.DecBinary() - _ = yym4188 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4187) { - } else { - z.DecFallback(yyv4187, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4189 := &x.Items - yym4190 := z.DecBinary() - _ = yym4190 - if false { - } else { - h.decSliceLimitRange((*[]LimitRange)(yyv4189), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys4184) - } // end switch yys4184 - } // end for yyj4184 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LimitRangeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4191 int - var yyb4191 bool - var yyhl4191 bool = l >= 0 - yyj4191++ - if yyhl4191 { - yyb4191 = yyj4191 > l - } else { - yyb4191 = r.CheckBreak() - } - if yyb4191 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj4191++ - if yyhl4191 { - yyb4191 = yyj4191 > l - } else { - yyb4191 = r.CheckBreak() - } - if yyb4191 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj4191++ - if yyhl4191 { - yyb4191 = yyj4191 > l - } else { - yyb4191 = r.CheckBreak() - } - if yyb4191 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4194 := &x.ListMeta - yym4195 := z.DecBinary() - _ = yym4195 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4194) { - } else { - z.DecFallback(yyv4194, false) - } - } - yyj4191++ - if yyhl4191 { - yyb4191 = yyj4191 > l - } else { - yyb4191 = r.CheckBreak() - } - if yyb4191 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4196 := &x.Items - yym4197 := z.DecBinary() - _ = yym4197 - if false { - } else { - h.decSliceLimitRange((*[]LimitRange)(yyv4196), d) - } - } - for { - yyj4191++ - if yyhl4191 { - yyb4191 = yyj4191 > l - } else { - yyb4191 = r.CheckBreak() - } - if yyb4191 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4191-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ResourceQuotaScope) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym4198 := z.EncBinary() - _ = yym4198 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ResourceQuotaScope) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4199 := z.DecBinary() - _ = yym4199 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4200 := z.EncBinary() - _ = yym4200 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4201 := !z.EncBinary() - yy2arr4201 := z.EncBasicHandle().StructToArray - var yyq4201 [2]bool - _, _, _ = yysep4201, yyq4201, yy2arr4201 - const yyr4201 bool = false - yyq4201[0] = len(x.Hard) != 0 - yyq4201[1] = len(x.Scopes) != 0 - var yynn4201 int - if yyr4201 || yy2arr4201 { - r.EncodeArrayStart(2) - } else { - yynn4201 = 0 - for _, b := range yyq4201 { - if b { - yynn4201++ - } - } - r.EncodeMapStart(yynn4201) - yynn4201 = 0 - } - if yyr4201 || yy2arr4201 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4201[0] { - if x.Hard == nil { - r.EncodeNil() - } else { - x.Hard.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq4201[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hard")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Hard == nil { - r.EncodeNil() - } else { - x.Hard.CodecEncodeSelf(e) - } - } - } - if yyr4201 || yy2arr4201 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4201[1] { - if x.Scopes == nil { - r.EncodeNil() - } else { - yym4204 := z.EncBinary() - _ = yym4204 - if false { - } else { - h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq4201[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scopes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Scopes == nil { - r.EncodeNil() - } else { - yym4205 := z.EncBinary() - _ = yym4205 - if false { - } else { - h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e) - } - } - } - } - if yyr4201 || yy2arr4201 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceQuotaSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4206 := z.DecBinary() - _ = yym4206 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4207 := r.ContainerType() - if yyct4207 == codecSelferValueTypeMap1234 { - yyl4207 := r.ReadMapStart() - if yyl4207 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4207, d) - } - } else if yyct4207 == codecSelferValueTypeArray1234 { - yyl4207 := r.ReadArrayStart() - if yyl4207 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4207, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceQuotaSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4208Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4208Slc - var yyhl4208 bool = l >= 0 - for yyj4208 := 0; ; yyj4208++ { - if yyhl4208 { - if yyj4208 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4208Slc = r.DecodeBytes(yys4208Slc, true, true) - yys4208 := string(yys4208Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4208 { - case "hard": - if r.TryDecodeAsNil() { - x.Hard = nil - } else { - yyv4209 := &x.Hard - yyv4209.CodecDecodeSelf(d) - } - case "scopes": - if r.TryDecodeAsNil() { - x.Scopes = nil - } else { - yyv4210 := &x.Scopes - yym4211 := z.DecBinary() - _ = yym4211 - if false { - } else { - h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv4210), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys4208) - } // end switch yys4208 - } // end for yyj4208 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceQuotaSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4212 int - var yyb4212 bool - var yyhl4212 bool = l >= 0 - yyj4212++ - if yyhl4212 { - yyb4212 = yyj4212 > l - } else { - yyb4212 = r.CheckBreak() - } - if yyb4212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hard = nil - } else { - yyv4213 := &x.Hard - yyv4213.CodecDecodeSelf(d) - } - yyj4212++ - if yyhl4212 { - yyb4212 = yyj4212 > l - } else { - yyb4212 = r.CheckBreak() - } - if yyb4212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Scopes = nil - } else { - yyv4214 := &x.Scopes - yym4215 := z.DecBinary() - _ = yym4215 - if false { - } else { - h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv4214), d) - } - } - for { - yyj4212++ - if yyhl4212 { - yyb4212 = yyj4212 > l - } else { - yyb4212 = r.CheckBreak() - } - if yyb4212 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4212-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceQuotaStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4216 := z.EncBinary() - _ = yym4216 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4217 := !z.EncBinary() - yy2arr4217 := z.EncBasicHandle().StructToArray - var yyq4217 [2]bool - _, _, _ = yysep4217, yyq4217, yy2arr4217 - const yyr4217 bool = false - yyq4217[0] = len(x.Hard) != 0 - yyq4217[1] = len(x.Used) != 0 - var yynn4217 int - if yyr4217 || yy2arr4217 { - r.EncodeArrayStart(2) - } else { - yynn4217 = 0 - for _, b := range yyq4217 { - if b { - yynn4217++ - } - } - r.EncodeMapStart(yynn4217) - yynn4217 = 0 - } - if yyr4217 || yy2arr4217 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4217[0] { - if x.Hard == nil { - r.EncodeNil() - } else { - x.Hard.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq4217[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hard")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Hard == nil { - r.EncodeNil() - } else { - x.Hard.CodecEncodeSelf(e) - } - } - } - if yyr4217 || yy2arr4217 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4217[1] { - if x.Used == nil { - r.EncodeNil() - } else { - x.Used.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq4217[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("used")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Used == nil { - r.EncodeNil() - } else { - x.Used.CodecEncodeSelf(e) - } - } - } - if yyr4217 || yy2arr4217 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceQuotaStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4220 := z.DecBinary() - _ = yym4220 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4221 := r.ContainerType() - if yyct4221 == codecSelferValueTypeMap1234 { - yyl4221 := r.ReadMapStart() - if yyl4221 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4221, d) - } - } else if yyct4221 == codecSelferValueTypeArray1234 { - yyl4221 := r.ReadArrayStart() - if yyl4221 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4221, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceQuotaStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4222Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4222Slc - var yyhl4222 bool = l >= 0 - for yyj4222 := 0; ; yyj4222++ { - if yyhl4222 { - if yyj4222 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4222Slc = r.DecodeBytes(yys4222Slc, true, true) - yys4222 := string(yys4222Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4222 { - case "hard": - if r.TryDecodeAsNil() { - x.Hard = nil - } else { - yyv4223 := &x.Hard - yyv4223.CodecDecodeSelf(d) - } - case "used": - if r.TryDecodeAsNil() { - x.Used = nil - } else { - yyv4224 := &x.Used - yyv4224.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys4222) - } // end switch yys4222 - } // end for yyj4222 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceQuotaStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4225 int - var yyb4225 bool - var yyhl4225 bool = l >= 0 - yyj4225++ - if yyhl4225 { - yyb4225 = yyj4225 > l - } else { - yyb4225 = r.CheckBreak() - } - if yyb4225 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hard = nil - } else { - yyv4226 := &x.Hard - yyv4226.CodecDecodeSelf(d) - } - yyj4225++ - if yyhl4225 { - yyb4225 = yyj4225 > l - } else { - yyb4225 = r.CheckBreak() - } - if yyb4225 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Used = nil - } else { - yyv4227 := &x.Used - yyv4227.CodecDecodeSelf(d) - } - for { - yyj4225++ - if yyhl4225 { - yyb4225 = yyj4225 > l - } else { - yyb4225 = r.CheckBreak() - } - if yyb4225 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4225-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceQuota) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4228 := z.EncBinary() - _ = yym4228 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4229 := !z.EncBinary() - yy2arr4229 := z.EncBasicHandle().StructToArray - var yyq4229 [5]bool - _, _, _ = yysep4229, yyq4229, yy2arr4229 - const yyr4229 bool = false - yyq4229[0] = x.Kind != "" - yyq4229[1] = x.APIVersion != "" - yyq4229[2] = true - yyq4229[3] = true - yyq4229[4] = true - var yynn4229 int - if yyr4229 || yy2arr4229 { - r.EncodeArrayStart(5) - } else { - yynn4229 = 0 - for _, b := range yyq4229 { - if b { - yynn4229++ - } - } - r.EncodeMapStart(yynn4229) - yynn4229 = 0 - } - if yyr4229 || yy2arr4229 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4229[0] { - yym4231 := z.EncBinary() - _ = yym4231 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4229[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4232 := z.EncBinary() - _ = yym4232 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr4229 || yy2arr4229 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4229[1] { - yym4234 := z.EncBinary() - _ = yym4234 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4229[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4235 := z.EncBinary() - _ = yym4235 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr4229 || yy2arr4229 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4229[2] { - yy4237 := &x.ObjectMeta - yy4237.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq4229[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4238 := &x.ObjectMeta - yy4238.CodecEncodeSelf(e) - } - } - if yyr4229 || yy2arr4229 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4229[3] { - yy4240 := &x.Spec - yy4240.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq4229[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4241 := &x.Spec - yy4241.CodecEncodeSelf(e) - } - } - if yyr4229 || yy2arr4229 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4229[4] { - yy4243 := &x.Status - yy4243.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq4229[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4244 := &x.Status - yy4244.CodecEncodeSelf(e) - } - } - if yyr4229 || yy2arr4229 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceQuota) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4245 := z.DecBinary() - _ = yym4245 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4246 := r.ContainerType() - if yyct4246 == codecSelferValueTypeMap1234 { - yyl4246 := r.ReadMapStart() - if yyl4246 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4246, d) - } - } else if yyct4246 == codecSelferValueTypeArray1234 { - yyl4246 := r.ReadArrayStart() - if yyl4246 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4246, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceQuota) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4247Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4247Slc - var yyhl4247 bool = l >= 0 - for yyj4247 := 0; ; yyj4247++ { - if yyhl4247 { - if yyj4247 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4247Slc = r.DecodeBytes(yys4247Slc, true, true) - yys4247 := string(yys4247Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4247 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4250 := &x.ObjectMeta - yyv4250.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ResourceQuotaSpec{} - } else { - yyv4251 := &x.Spec - yyv4251.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ResourceQuotaStatus{} - } else { - yyv4252 := &x.Status - yyv4252.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys4247) - } // end switch yys4247 - } // end for yyj4247 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceQuota) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4253 int - var yyb4253 bool - var yyhl4253 bool = l >= 0 - yyj4253++ - if yyhl4253 { - yyb4253 = yyj4253 > l - } else { - yyb4253 = r.CheckBreak() - } - if yyb4253 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj4253++ - if yyhl4253 { - yyb4253 = yyj4253 > l - } else { - yyb4253 = r.CheckBreak() - } - if yyb4253 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj4253++ - if yyhl4253 { - yyb4253 = yyj4253 > l - } else { - yyb4253 = r.CheckBreak() - } - if yyb4253 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4256 := &x.ObjectMeta - yyv4256.CodecDecodeSelf(d) - } - yyj4253++ - if yyhl4253 { - yyb4253 = yyj4253 > l - } else { - yyb4253 = r.CheckBreak() - } - if yyb4253 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ResourceQuotaSpec{} - } else { - yyv4257 := &x.Spec - yyv4257.CodecDecodeSelf(d) - } - yyj4253++ - if yyhl4253 { - yyb4253 = yyj4253 > l - } else { - yyb4253 = r.CheckBreak() - } - if yyb4253 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ResourceQuotaStatus{} - } else { - yyv4258 := &x.Status - yyv4258.CodecDecodeSelf(d) - } - for { - yyj4253++ - if yyhl4253 { - yyb4253 = yyj4253 > l - } else { - yyb4253 = r.CheckBreak() - } - if yyb4253 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4253-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceQuotaList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4259 := z.EncBinary() - _ = yym4259 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4260 := !z.EncBinary() - yy2arr4260 := z.EncBasicHandle().StructToArray - var yyq4260 [4]bool - _, _, _ = yysep4260, yyq4260, yy2arr4260 - const yyr4260 bool = false - yyq4260[0] = x.Kind != "" - yyq4260[1] = x.APIVersion != "" - yyq4260[2] = true - var yynn4260 int - if yyr4260 || yy2arr4260 { - r.EncodeArrayStart(4) - } else { - yynn4260 = 1 - for _, b := range yyq4260 { - if b { - yynn4260++ - } - } - r.EncodeMapStart(yynn4260) - yynn4260 = 0 - } - if yyr4260 || yy2arr4260 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4260[0] { - yym4262 := z.EncBinary() - _ = yym4262 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4260[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4263 := z.EncBinary() - _ = yym4263 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr4260 || yy2arr4260 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4260[1] { - yym4265 := z.EncBinary() - _ = yym4265 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4260[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4266 := z.EncBinary() - _ = yym4266 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr4260 || yy2arr4260 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4260[2] { - yy4268 := &x.ListMeta - yym4269 := z.EncBinary() - _ = yym4269 - if false { - } else if z.HasExtensions() && z.EncExt(yy4268) { - } else { - z.EncFallback(yy4268) - } - } else { - r.EncodeNil() - } - } else { - if yyq4260[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4270 := &x.ListMeta - yym4271 := z.EncBinary() - _ = yym4271 - if false { - } else if z.HasExtensions() && z.EncExt(yy4270) { - } else { - z.EncFallback(yy4270) - } - } - } - if yyr4260 || yy2arr4260 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4273 := z.EncBinary() - _ = yym4273 - if false { - } else { - h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4274 := z.EncBinary() - _ = yym4274 - if false { - } else { - h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e) - } - } - } - if yyr4260 || yy2arr4260 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceQuotaList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4275 := z.DecBinary() - _ = yym4275 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4276 := r.ContainerType() - if yyct4276 == codecSelferValueTypeMap1234 { - yyl4276 := r.ReadMapStart() - if yyl4276 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4276, d) - } - } else if yyct4276 == codecSelferValueTypeArray1234 { - yyl4276 := r.ReadArrayStart() - if yyl4276 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4276, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceQuotaList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4277Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4277Slc - var yyhl4277 bool = l >= 0 - for yyj4277 := 0; ; yyj4277++ { - if yyhl4277 { - if yyj4277 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4277Slc = r.DecodeBytes(yys4277Slc, true, true) - yys4277 := string(yys4277Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4277 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4280 := &x.ListMeta - yym4281 := z.DecBinary() - _ = yym4281 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4280) { - } else { - z.DecFallback(yyv4280, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4282 := &x.Items - yym4283 := z.DecBinary() - _ = yym4283 - if false { - } else { - h.decSliceResourceQuota((*[]ResourceQuota)(yyv4282), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys4277) - } // end switch yys4277 - } // end for yyj4277 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceQuotaList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4284 int - var yyb4284 bool - var yyhl4284 bool = l >= 0 - yyj4284++ - if yyhl4284 { - yyb4284 = yyj4284 > l - } else { - yyb4284 = r.CheckBreak() - } - if yyb4284 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj4284++ - if yyhl4284 { - yyb4284 = yyj4284 > l - } else { - yyb4284 = r.CheckBreak() - } - if yyb4284 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj4284++ - if yyhl4284 { - yyb4284 = yyj4284 > l - } else { - yyb4284 = r.CheckBreak() - } - if yyb4284 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4287 := &x.ListMeta - yym4288 := z.DecBinary() - _ = yym4288 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4287) { - } else { - z.DecFallback(yyv4287, false) - } - } - yyj4284++ - if yyhl4284 { - yyb4284 = yyj4284 > l - } else { - yyb4284 = r.CheckBreak() - } - if yyb4284 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4289 := &x.Items - yym4290 := z.DecBinary() - _ = yym4290 - if false { - } else { - h.decSliceResourceQuota((*[]ResourceQuota)(yyv4289), d) - } - } - for { - yyj4284++ - if yyhl4284 { - yyb4284 = yyj4284 > l - } else { - yyb4284 = r.CheckBreak() - } - if yyb4284 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4284-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Secret) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4291 := z.EncBinary() - _ = yym4291 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4292 := !z.EncBinary() - yy2arr4292 := z.EncBasicHandle().StructToArray - var yyq4292 [5]bool - _, _, _ = yysep4292, yyq4292, yy2arr4292 - const yyr4292 bool = false - yyq4292[0] = x.Kind != "" - yyq4292[1] = x.APIVersion != "" - yyq4292[2] = true - yyq4292[3] = len(x.Data) != 0 - yyq4292[4] = x.Type != "" - var yynn4292 int - if yyr4292 || yy2arr4292 { - r.EncodeArrayStart(5) - } else { - yynn4292 = 0 - for _, b := range yyq4292 { - if b { - yynn4292++ - } - } - r.EncodeMapStart(yynn4292) - yynn4292 = 0 - } - if yyr4292 || yy2arr4292 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4292[0] { - yym4294 := z.EncBinary() - _ = yym4294 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4292[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4295 := z.EncBinary() - _ = yym4295 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr4292 || yy2arr4292 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4292[1] { - yym4297 := z.EncBinary() - _ = yym4297 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4292[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4298 := z.EncBinary() - _ = yym4298 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr4292 || yy2arr4292 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4292[2] { - yy4300 := &x.ObjectMeta - yy4300.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq4292[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4301 := &x.ObjectMeta - yy4301.CodecEncodeSelf(e) - } - } - if yyr4292 || yy2arr4292 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4292[3] { - if x.Data == nil { - r.EncodeNil() - } else { - yym4303 := z.EncBinary() - _ = yym4303 - if false { - } else { - h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq4292[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("data")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym4304 := z.EncBinary() - _ = yym4304 - if false { - } else { - h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e) - } - } - } - } - if yyr4292 || yy2arr4292 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4292[4] { - x.Type.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4292[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - } - if yyr4292 || yy2arr4292 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Secret) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4306 := z.DecBinary() - _ = yym4306 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4307 := r.ContainerType() - if yyct4307 == codecSelferValueTypeMap1234 { - yyl4307 := r.ReadMapStart() - if yyl4307 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4307, d) - } - } else if yyct4307 == codecSelferValueTypeArray1234 { - yyl4307 := r.ReadArrayStart() - if yyl4307 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4307, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Secret) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4308Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4308Slc - var yyhl4308 bool = l >= 0 - for yyj4308 := 0; ; yyj4308++ { - if yyhl4308 { - if yyj4308 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4308Slc = r.DecodeBytes(yys4308Slc, true, true) - yys4308 := string(yys4308Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4308 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4311 := &x.ObjectMeta - yyv4311.CodecDecodeSelf(d) - } - case "data": - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv4312 := &x.Data - yym4313 := z.DecBinary() - _ = yym4313 - if false { - } else { - h.decMapstringSliceuint8((*map[string][]uint8)(yyv4312), d) - } - } - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = SecretType(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys4308) - } // end switch yys4308 - } // end for yyj4308 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Secret) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4315 int - var yyb4315 bool - var yyhl4315 bool = l >= 0 - yyj4315++ - if yyhl4315 { - yyb4315 = yyj4315 > l - } else { - yyb4315 = r.CheckBreak() - } - if yyb4315 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj4315++ - if yyhl4315 { - yyb4315 = yyj4315 > l - } else { - yyb4315 = r.CheckBreak() - } - if yyb4315 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj4315++ - if yyhl4315 { - yyb4315 = yyj4315 > l - } else { - yyb4315 = r.CheckBreak() - } - if yyb4315 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4318 := &x.ObjectMeta - yyv4318.CodecDecodeSelf(d) - } - yyj4315++ - if yyhl4315 { - yyb4315 = yyj4315 > l - } else { - yyb4315 = r.CheckBreak() - } - if yyb4315 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv4319 := &x.Data - yym4320 := z.DecBinary() - _ = yym4320 - if false { - } else { - h.decMapstringSliceuint8((*map[string][]uint8)(yyv4319), d) - } - } - yyj4315++ - if yyhl4315 { - yyb4315 = yyj4315 > l - } else { - yyb4315 = r.CheckBreak() - } - if yyb4315 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = SecretType(r.DecodeString()) - } - for { - yyj4315++ - if yyhl4315 { - yyb4315 = yyj4315 > l - } else { - yyb4315 = r.CheckBreak() - } - if yyb4315 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4315-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x SecretType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym4322 := z.EncBinary() - _ = yym4322 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *SecretType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4323 := z.DecBinary() - _ = yym4323 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *SecretList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4324 := z.EncBinary() - _ = yym4324 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4325 := !z.EncBinary() - yy2arr4325 := z.EncBasicHandle().StructToArray - var yyq4325 [4]bool - _, _, _ = yysep4325, yyq4325, yy2arr4325 - const yyr4325 bool = false - yyq4325[0] = x.Kind != "" - yyq4325[1] = x.APIVersion != "" - yyq4325[2] = true - var yynn4325 int - if yyr4325 || yy2arr4325 { - r.EncodeArrayStart(4) - } else { - yynn4325 = 1 - for _, b := range yyq4325 { - if b { - yynn4325++ - } - } - r.EncodeMapStart(yynn4325) - yynn4325 = 0 - } - if yyr4325 || yy2arr4325 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4325[0] { - yym4327 := z.EncBinary() - _ = yym4327 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4325[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4328 := z.EncBinary() - _ = yym4328 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr4325 || yy2arr4325 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4325[1] { - yym4330 := z.EncBinary() - _ = yym4330 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4325[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4331 := z.EncBinary() - _ = yym4331 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr4325 || yy2arr4325 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4325[2] { - yy4333 := &x.ListMeta - yym4334 := z.EncBinary() - _ = yym4334 - if false { - } else if z.HasExtensions() && z.EncExt(yy4333) { - } else { - z.EncFallback(yy4333) - } - } else { - r.EncodeNil() - } - } else { - if yyq4325[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4335 := &x.ListMeta - yym4336 := z.EncBinary() - _ = yym4336 - if false { - } else if z.HasExtensions() && z.EncExt(yy4335) { - } else { - z.EncFallback(yy4335) - } - } - } - if yyr4325 || yy2arr4325 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4338 := z.EncBinary() - _ = yym4338 - if false { - } else { - h.encSliceSecret(([]Secret)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4339 := z.EncBinary() - _ = yym4339 - if false { - } else { - h.encSliceSecret(([]Secret)(x.Items), e) - } - } - } - if yyr4325 || yy2arr4325 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SecretList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4340 := z.DecBinary() - _ = yym4340 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4341 := r.ContainerType() - if yyct4341 == codecSelferValueTypeMap1234 { - yyl4341 := r.ReadMapStart() - if yyl4341 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4341, d) - } - } else if yyct4341 == codecSelferValueTypeArray1234 { - yyl4341 := r.ReadArrayStart() - if yyl4341 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4341, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SecretList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4342Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4342Slc - var yyhl4342 bool = l >= 0 - for yyj4342 := 0; ; yyj4342++ { - if yyhl4342 { - if yyj4342 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4342Slc = r.DecodeBytes(yys4342Slc, true, true) - yys4342 := string(yys4342Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4342 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4345 := &x.ListMeta - yym4346 := z.DecBinary() - _ = yym4346 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4345) { - } else { - z.DecFallback(yyv4345, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4347 := &x.Items - yym4348 := z.DecBinary() - _ = yym4348 - if false { - } else { - h.decSliceSecret((*[]Secret)(yyv4347), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys4342) - } // end switch yys4342 - } // end for yyj4342 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SecretList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4349 int - var yyb4349 bool - var yyhl4349 bool = l >= 0 - yyj4349++ - if yyhl4349 { - yyb4349 = yyj4349 > l - } else { - yyb4349 = r.CheckBreak() - } - if yyb4349 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj4349++ - if yyhl4349 { - yyb4349 = yyj4349 > l - } else { - yyb4349 = r.CheckBreak() - } - if yyb4349 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj4349++ - if yyhl4349 { - yyb4349 = yyj4349 > l - } else { - yyb4349 = r.CheckBreak() - } - if yyb4349 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4352 := &x.ListMeta - yym4353 := z.DecBinary() - _ = yym4353 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4352) { - } else { - z.DecFallback(yyv4352, false) - } - } - yyj4349++ - if yyhl4349 { - yyb4349 = yyj4349 > l - } else { - yyb4349 = r.CheckBreak() - } - if yyb4349 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4354 := &x.Items - yym4355 := z.DecBinary() - _ = yym4355 - if false { - } else { - h.decSliceSecret((*[]Secret)(yyv4354), d) - } - } - for { - yyj4349++ - if yyhl4349 { - yyb4349 = yyj4349 > l - } else { - yyb4349 = r.CheckBreak() - } - if yyb4349 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4349-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ConfigMap) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4356 := z.EncBinary() - _ = yym4356 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4357 := !z.EncBinary() - yy2arr4357 := z.EncBasicHandle().StructToArray - var yyq4357 [4]bool - _, _, _ = yysep4357, yyq4357, yy2arr4357 - const yyr4357 bool = false - yyq4357[0] = x.Kind != "" - yyq4357[1] = x.APIVersion != "" - yyq4357[2] = true - yyq4357[3] = len(x.Data) != 0 - var yynn4357 int - if yyr4357 || yy2arr4357 { - r.EncodeArrayStart(4) - } else { - yynn4357 = 0 - for _, b := range yyq4357 { - if b { - yynn4357++ - } - } - r.EncodeMapStart(yynn4357) - yynn4357 = 0 - } - if yyr4357 || yy2arr4357 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4357[0] { - yym4359 := z.EncBinary() - _ = yym4359 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4357[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4360 := z.EncBinary() - _ = yym4360 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr4357 || yy2arr4357 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4357[1] { - yym4362 := z.EncBinary() - _ = yym4362 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4357[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4363 := z.EncBinary() - _ = yym4363 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr4357 || yy2arr4357 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4357[2] { - yy4365 := &x.ObjectMeta - yy4365.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq4357[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4366 := &x.ObjectMeta - yy4366.CodecEncodeSelf(e) - } - } - if yyr4357 || yy2arr4357 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4357[3] { - if x.Data == nil { - r.EncodeNil() - } else { - yym4368 := z.EncBinary() - _ = yym4368 - if false { - } else { - z.F.EncMapStringStringV(x.Data, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq4357[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("data")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym4369 := z.EncBinary() - _ = yym4369 - if false { - } else { - z.F.EncMapStringStringV(x.Data, false, e) - } - } - } - } - if yyr4357 || yy2arr4357 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ConfigMap) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4370 := z.DecBinary() - _ = yym4370 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4371 := r.ContainerType() - if yyct4371 == codecSelferValueTypeMap1234 { - yyl4371 := r.ReadMapStart() - if yyl4371 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4371, d) - } - } else if yyct4371 == codecSelferValueTypeArray1234 { - yyl4371 := r.ReadArrayStart() - if yyl4371 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4371, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ConfigMap) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4372Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4372Slc - var yyhl4372 bool = l >= 0 - for yyj4372 := 0; ; yyj4372++ { - if yyhl4372 { - if yyj4372 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4372Slc = r.DecodeBytes(yys4372Slc, true, true) - yys4372 := string(yys4372Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4372 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4375 := &x.ObjectMeta - yyv4375.CodecDecodeSelf(d) - } - case "data": - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv4376 := &x.Data - yym4377 := z.DecBinary() - _ = yym4377 - if false { - } else { - z.F.DecMapStringStringX(yyv4376, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys4372) - } // end switch yys4372 - } // end for yyj4372 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ConfigMap) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4378 int - var yyb4378 bool - var yyhl4378 bool = l >= 0 - yyj4378++ - if yyhl4378 { - yyb4378 = yyj4378 > l - } else { - yyb4378 = r.CheckBreak() - } - if yyb4378 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj4378++ - if yyhl4378 { - yyb4378 = yyj4378 > l - } else { - yyb4378 = r.CheckBreak() - } - if yyb4378 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj4378++ - if yyhl4378 { - yyb4378 = yyj4378 > l - } else { - yyb4378 = r.CheckBreak() - } - if yyb4378 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4381 := &x.ObjectMeta - yyv4381.CodecDecodeSelf(d) - } - yyj4378++ - if yyhl4378 { - yyb4378 = yyj4378 > l - } else { - yyb4378 = r.CheckBreak() - } - if yyb4378 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv4382 := &x.Data - yym4383 := z.DecBinary() - _ = yym4383 - if false { - } else { - z.F.DecMapStringStringX(yyv4382, false, d) - } - } - for { - yyj4378++ - if yyhl4378 { - yyb4378 = yyj4378 > l - } else { - yyb4378 = r.CheckBreak() - } - if yyb4378 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4378-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ConfigMapList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4384 := z.EncBinary() - _ = yym4384 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4385 := !z.EncBinary() - yy2arr4385 := z.EncBasicHandle().StructToArray - var yyq4385 [4]bool - _, _, _ = yysep4385, yyq4385, yy2arr4385 - const yyr4385 bool = false - yyq4385[0] = x.Kind != "" - yyq4385[1] = x.APIVersion != "" - yyq4385[2] = true - var yynn4385 int - if yyr4385 || yy2arr4385 { - r.EncodeArrayStart(4) - } else { - yynn4385 = 1 - for _, b := range yyq4385 { - if b { - yynn4385++ - } - } - r.EncodeMapStart(yynn4385) - yynn4385 = 0 - } - if yyr4385 || yy2arr4385 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4385[0] { - yym4387 := z.EncBinary() - _ = yym4387 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4385[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4388 := z.EncBinary() - _ = yym4388 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr4385 || yy2arr4385 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4385[1] { - yym4390 := z.EncBinary() - _ = yym4390 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4385[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4391 := z.EncBinary() - _ = yym4391 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr4385 || yy2arr4385 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4385[2] { - yy4393 := &x.ListMeta - yym4394 := z.EncBinary() - _ = yym4394 - if false { - } else if z.HasExtensions() && z.EncExt(yy4393) { - } else { - z.EncFallback(yy4393) - } - } else { - r.EncodeNil() - } - } else { - if yyq4385[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4395 := &x.ListMeta - yym4396 := z.EncBinary() - _ = yym4396 - if false { - } else if z.HasExtensions() && z.EncExt(yy4395) { - } else { - z.EncFallback(yy4395) - } - } - } - if yyr4385 || yy2arr4385 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4398 := z.EncBinary() - _ = yym4398 - if false { - } else { - h.encSliceConfigMap(([]ConfigMap)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4399 := z.EncBinary() - _ = yym4399 - if false { - } else { - h.encSliceConfigMap(([]ConfigMap)(x.Items), e) - } - } - } - if yyr4385 || yy2arr4385 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ConfigMapList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4400 := z.DecBinary() - _ = yym4400 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4401 := r.ContainerType() - if yyct4401 == codecSelferValueTypeMap1234 { - yyl4401 := r.ReadMapStart() - if yyl4401 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4401, d) - } - } else if yyct4401 == codecSelferValueTypeArray1234 { - yyl4401 := r.ReadArrayStart() - if yyl4401 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4401, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ConfigMapList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4402Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4402Slc - var yyhl4402 bool = l >= 0 - for yyj4402 := 0; ; yyj4402++ { - if yyhl4402 { - if yyj4402 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4402Slc = r.DecodeBytes(yys4402Slc, true, true) - yys4402 := string(yys4402Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4402 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4405 := &x.ListMeta - yym4406 := z.DecBinary() - _ = yym4406 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4405) { - } else { - z.DecFallback(yyv4405, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4407 := &x.Items - yym4408 := z.DecBinary() - _ = yym4408 - if false { - } else { - h.decSliceConfigMap((*[]ConfigMap)(yyv4407), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys4402) - } // end switch yys4402 - } // end for yyj4402 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ConfigMapList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4409 int - var yyb4409 bool - var yyhl4409 bool = l >= 0 - yyj4409++ - if yyhl4409 { - yyb4409 = yyj4409 > l - } else { - yyb4409 = r.CheckBreak() - } - if yyb4409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj4409++ - if yyhl4409 { - yyb4409 = yyj4409 > l - } else { - yyb4409 = r.CheckBreak() - } - if yyb4409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj4409++ - if yyhl4409 { - yyb4409 = yyj4409 > l - } else { - yyb4409 = r.CheckBreak() - } - if yyb4409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4412 := &x.ListMeta - yym4413 := z.DecBinary() - _ = yym4413 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4412) { - } else { - z.DecFallback(yyv4412, false) - } - } - yyj4409++ - if yyhl4409 { - yyb4409 = yyj4409 > l - } else { - yyb4409 = r.CheckBreak() - } - if yyb4409 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4414 := &x.Items - yym4415 := z.DecBinary() - _ = yym4415 - if false { - } else { - h.decSliceConfigMap((*[]ConfigMap)(yyv4414), d) - } - } - for { - yyj4409++ - if yyhl4409 { - yyb4409 = yyj4409 > l - } else { - yyb4409 = r.CheckBreak() - } - if yyb4409 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4409-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x PatchType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym4416 := z.EncBinary() - _ = yym4416 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PatchType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4417 := z.DecBinary() - _ = yym4417 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x ComponentConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym4418 := z.EncBinary() - _ = yym4418 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ComponentConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4419 := z.DecBinary() - _ = yym4419 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ComponentCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4420 := z.EncBinary() - _ = yym4420 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4421 := !z.EncBinary() - yy2arr4421 := z.EncBasicHandle().StructToArray - var yyq4421 [4]bool - _, _, _ = yysep4421, yyq4421, yy2arr4421 - const yyr4421 bool = false - yyq4421[2] = x.Message != "" - yyq4421[3] = x.Error != "" - var yynn4421 int - if yyr4421 || yy2arr4421 { - r.EncodeArrayStart(4) - } else { - yynn4421 = 2 - for _, b := range yyq4421 { - if b { - yynn4421++ - } - } - r.EncodeMapStart(yynn4421) - yynn4421 = 0 - } - if yyr4421 || yy2arr4421 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr4421 || yy2arr4421 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Status.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Status.CodecEncodeSelf(e) - } - if yyr4421 || yy2arr4421 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4421[2] { - yym4425 := z.EncBinary() - _ = yym4425 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4421[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4426 := z.EncBinary() - _ = yym4426 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr4421 || yy2arr4421 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4421[3] { - yym4428 := z.EncBinary() - _ = yym4428 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Error)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4421[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("error")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4429 := z.EncBinary() - _ = yym4429 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Error)) - } - } - } - if yyr4421 || yy2arr4421 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ComponentCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4430 := z.DecBinary() - _ = yym4430 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4431 := r.ContainerType() - if yyct4431 == codecSelferValueTypeMap1234 { - yyl4431 := r.ReadMapStart() - if yyl4431 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4431, d) - } - } else if yyct4431 == codecSelferValueTypeArray1234 { - yyl4431 := r.ReadArrayStart() - if yyl4431 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4431, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ComponentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4432Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4432Slc - var yyhl4432 bool = l >= 0 - for yyj4432 := 0; ; yyj4432++ { - if yyhl4432 { - if yyj4432 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4432Slc = r.DecodeBytes(yys4432Slc, true, true) - yys4432 := string(yys4432Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4432 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ComponentConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - case "error": - if r.TryDecodeAsNil() { - x.Error = "" - } else { - x.Error = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys4432) - } // end switch yys4432 - } // end for yyj4432 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ComponentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4437 int - var yyb4437 bool - var yyhl4437 bool = l >= 0 - yyj4437++ - if yyhl4437 { - yyb4437 = yyj4437 > l - } else { - yyb4437 = r.CheckBreak() - } - if yyb4437 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ComponentConditionType(r.DecodeString()) - } - yyj4437++ - if yyhl4437 { - yyb4437 = yyj4437 > l - } else { - yyb4437 = r.CheckBreak() - } - if yyb4437 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - yyj4437++ - if yyhl4437 { - yyb4437 = yyj4437 > l - } else { - yyb4437 = r.CheckBreak() - } - if yyb4437 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - yyj4437++ - if yyhl4437 { - yyb4437 = yyj4437 > l - } else { - yyb4437 = r.CheckBreak() - } - if yyb4437 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Error = "" - } else { - x.Error = string(r.DecodeString()) - } - for { - yyj4437++ - if yyhl4437 { - yyb4437 = yyj4437 > l - } else { - yyb4437 = r.CheckBreak() - } - if yyb4437 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4437-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ComponentStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4442 := z.EncBinary() - _ = yym4442 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4443 := !z.EncBinary() - yy2arr4443 := z.EncBasicHandle().StructToArray - var yyq4443 [4]bool - _, _, _ = yysep4443, yyq4443, yy2arr4443 - const yyr4443 bool = false - yyq4443[0] = x.Kind != "" - yyq4443[1] = x.APIVersion != "" - yyq4443[2] = true - yyq4443[3] = len(x.Conditions) != 0 - var yynn4443 int - if yyr4443 || yy2arr4443 { - r.EncodeArrayStart(4) - } else { - yynn4443 = 0 - for _, b := range yyq4443 { - if b { - yynn4443++ - } - } - r.EncodeMapStart(yynn4443) - yynn4443 = 0 - } - if yyr4443 || yy2arr4443 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4443[0] { - yym4445 := z.EncBinary() - _ = yym4445 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4443[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4446 := z.EncBinary() - _ = yym4446 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr4443 || yy2arr4443 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4443[1] { - yym4448 := z.EncBinary() - _ = yym4448 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4443[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4449 := z.EncBinary() - _ = yym4449 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr4443 || yy2arr4443 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4443[2] { - yy4451 := &x.ObjectMeta - yy4451.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq4443[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4452 := &x.ObjectMeta - yy4452.CodecEncodeSelf(e) - } - } - if yyr4443 || yy2arr4443 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4443[3] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym4454 := z.EncBinary() - _ = yym4454 - if false { - } else { - h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq4443[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym4455 := z.EncBinary() - _ = yym4455 - if false { - } else { - h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e) - } - } - } - } - if yyr4443 || yy2arr4443 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ComponentStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4456 := z.DecBinary() - _ = yym4456 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4457 := r.ContainerType() - if yyct4457 == codecSelferValueTypeMap1234 { - yyl4457 := r.ReadMapStart() - if yyl4457 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4457, d) - } - } else if yyct4457 == codecSelferValueTypeArray1234 { - yyl4457 := r.ReadArrayStart() - if yyl4457 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4457, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ComponentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4458Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4458Slc - var yyhl4458 bool = l >= 0 - for yyj4458 := 0; ; yyj4458++ { - if yyhl4458 { - if yyj4458 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4458Slc = r.DecodeBytes(yys4458Slc, true, true) - yys4458 := string(yys4458Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4458 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4461 := &x.ObjectMeta - yyv4461.CodecDecodeSelf(d) - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv4462 := &x.Conditions - yym4463 := z.DecBinary() - _ = yym4463 - if false { - } else { - h.decSliceComponentCondition((*[]ComponentCondition)(yyv4462), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys4458) - } // end switch yys4458 - } // end for yyj4458 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ComponentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4464 int - var yyb4464 bool - var yyhl4464 bool = l >= 0 - yyj4464++ - if yyhl4464 { - yyb4464 = yyj4464 > l - } else { - yyb4464 = r.CheckBreak() - } - if yyb4464 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj4464++ - if yyhl4464 { - yyb4464 = yyj4464 > l - } else { - yyb4464 = r.CheckBreak() - } - if yyb4464 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj4464++ - if yyhl4464 { - yyb4464 = yyj4464 > l - } else { - yyb4464 = r.CheckBreak() - } - if yyb4464 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4467 := &x.ObjectMeta - yyv4467.CodecDecodeSelf(d) - } - yyj4464++ - if yyhl4464 { - yyb4464 = yyj4464 > l - } else { - yyb4464 = r.CheckBreak() - } - if yyb4464 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv4468 := &x.Conditions - yym4469 := z.DecBinary() - _ = yym4469 - if false { - } else { - h.decSliceComponentCondition((*[]ComponentCondition)(yyv4468), d) - } - } - for { - yyj4464++ - if yyhl4464 { - yyb4464 = yyj4464 > l - } else { - yyb4464 = r.CheckBreak() - } - if yyb4464 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4464-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ComponentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4470 := z.EncBinary() - _ = yym4470 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4471 := !z.EncBinary() - yy2arr4471 := z.EncBasicHandle().StructToArray - var yyq4471 [4]bool - _, _, _ = yysep4471, yyq4471, yy2arr4471 - const yyr4471 bool = false - yyq4471[0] = x.Kind != "" - yyq4471[1] = x.APIVersion != "" - yyq4471[2] = true - var yynn4471 int - if yyr4471 || yy2arr4471 { - r.EncodeArrayStart(4) - } else { - yynn4471 = 1 - for _, b := range yyq4471 { - if b { - yynn4471++ - } - } - r.EncodeMapStart(yynn4471) - yynn4471 = 0 - } - if yyr4471 || yy2arr4471 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4471[0] { - yym4473 := z.EncBinary() - _ = yym4473 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4471[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4474 := z.EncBinary() - _ = yym4474 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr4471 || yy2arr4471 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4471[1] { - yym4476 := z.EncBinary() - _ = yym4476 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4471[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4477 := z.EncBinary() - _ = yym4477 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr4471 || yy2arr4471 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4471[2] { - yy4479 := &x.ListMeta - yym4480 := z.EncBinary() - _ = yym4480 - if false { - } else if z.HasExtensions() && z.EncExt(yy4479) { - } else { - z.EncFallback(yy4479) - } - } else { - r.EncodeNil() - } - } else { - if yyq4471[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4481 := &x.ListMeta - yym4482 := z.EncBinary() - _ = yym4482 - if false { - } else if z.HasExtensions() && z.EncExt(yy4481) { - } else { - z.EncFallback(yy4481) - } - } - } - if yyr4471 || yy2arr4471 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4484 := z.EncBinary() - _ = yym4484 - if false { - } else { - h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4485 := z.EncBinary() - _ = yym4485 - if false { - } else { - h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e) - } - } - } - if yyr4471 || yy2arr4471 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ComponentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4486 := z.DecBinary() - _ = yym4486 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4487 := r.ContainerType() - if yyct4487 == codecSelferValueTypeMap1234 { - yyl4487 := r.ReadMapStart() - if yyl4487 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4487, d) - } - } else if yyct4487 == codecSelferValueTypeArray1234 { - yyl4487 := r.ReadArrayStart() - if yyl4487 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4487, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ComponentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4488Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4488Slc - var yyhl4488 bool = l >= 0 - for yyj4488 := 0; ; yyj4488++ { - if yyhl4488 { - if yyj4488 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4488Slc = r.DecodeBytes(yys4488Slc, true, true) - yys4488 := string(yys4488Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4488 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4491 := &x.ListMeta - yym4492 := z.DecBinary() - _ = yym4492 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4491) { - } else { - z.DecFallback(yyv4491, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4493 := &x.Items - yym4494 := z.DecBinary() - _ = yym4494 - if false { - } else { - h.decSliceComponentStatus((*[]ComponentStatus)(yyv4493), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys4488) - } // end switch yys4488 - } // end for yyj4488 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ComponentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4495 int - var yyb4495 bool - var yyhl4495 bool = l >= 0 - yyj4495++ - if yyhl4495 { - yyb4495 = yyj4495 > l - } else { - yyb4495 = r.CheckBreak() - } - if yyb4495 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj4495++ - if yyhl4495 { - yyb4495 = yyj4495 > l - } else { - yyb4495 = r.CheckBreak() - } - if yyb4495 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj4495++ - if yyhl4495 { - yyb4495 = yyj4495 > l - } else { - yyb4495 = r.CheckBreak() - } - if yyb4495 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4498 := &x.ListMeta - yym4499 := z.DecBinary() - _ = yym4499 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4498) { - } else { - z.DecFallback(yyv4498, false) - } - } - yyj4495++ - if yyhl4495 { - yyb4495 = yyj4495 > l - } else { - yyb4495 = r.CheckBreak() - } - if yyb4495 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4500 := &x.Items - yym4501 := z.DecBinary() - _ = yym4501 - if false { - } else { - h.decSliceComponentStatus((*[]ComponentStatus)(yyv4500), d) - } - } - for { - yyj4495++ - if yyhl4495 { - yyb4495 = yyj4495 > l - } else { - yyb4495 = r.CheckBreak() - } - if yyb4495 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4495-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4502 := z.EncBinary() - _ = yym4502 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4503 := !z.EncBinary() - yy2arr4503 := z.EncBasicHandle().StructToArray - var yyq4503 [6]bool - _, _, _ = yysep4503, yyq4503, yy2arr4503 - const yyr4503 bool = false - yyq4503[0] = x.Capabilities != nil - yyq4503[1] = x.Privileged != nil - yyq4503[2] = x.SELinuxOptions != nil - yyq4503[3] = x.RunAsUser != nil - yyq4503[4] = x.RunAsNonRoot != nil - yyq4503[5] = x.ReadOnlyRootFilesystem != nil - var yynn4503 int - if yyr4503 || yy2arr4503 { - r.EncodeArrayStart(6) - } else { - yynn4503 = 0 - for _, b := range yyq4503 { - if b { - yynn4503++ - } - } - r.EncodeMapStart(yynn4503) - yynn4503 = 0 - } - if yyr4503 || yy2arr4503 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4503[0] { - if x.Capabilities == nil { - r.EncodeNil() - } else { - x.Capabilities.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq4503[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capabilities")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capabilities == nil { - r.EncodeNil() - } else { - x.Capabilities.CodecEncodeSelf(e) - } - } - } - if yyr4503 || yy2arr4503 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4503[1] { - if x.Privileged == nil { - r.EncodeNil() - } else { - yy4506 := *x.Privileged - yym4507 := z.EncBinary() - _ = yym4507 - if false { - } else { - r.EncodeBool(bool(yy4506)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq4503[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("privileged")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Privileged == nil { - r.EncodeNil() - } else { - yy4508 := *x.Privileged - yym4509 := z.EncBinary() - _ = yym4509 - if false { - } else { - r.EncodeBool(bool(yy4508)) - } - } - } - } - if yyr4503 || yy2arr4503 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4503[2] { - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq4503[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } - } - if yyr4503 || yy2arr4503 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4503[3] { - if x.RunAsUser == nil { - r.EncodeNil() - } else { - yy4512 := *x.RunAsUser - yym4513 := z.EncBinary() - _ = yym4513 - if false { - } else { - r.EncodeInt(int64(yy4512)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq4503[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RunAsUser == nil { - r.EncodeNil() - } else { - yy4514 := *x.RunAsUser - yym4515 := z.EncBinary() - _ = yym4515 - if false { - } else { - r.EncodeInt(int64(yy4514)) - } - } - } - } - if yyr4503 || yy2arr4503 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4503[4] { - if x.RunAsNonRoot == nil { - r.EncodeNil() - } else { - yy4517 := *x.RunAsNonRoot - yym4518 := z.EncBinary() - _ = yym4518 - if false { - } else { - r.EncodeBool(bool(yy4517)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq4503[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RunAsNonRoot == nil { - r.EncodeNil() - } else { - yy4519 := *x.RunAsNonRoot - yym4520 := z.EncBinary() - _ = yym4520 - if false { - } else { - r.EncodeBool(bool(yy4519)) - } - } - } - } - if yyr4503 || yy2arr4503 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4503[5] { - if x.ReadOnlyRootFilesystem == nil { - r.EncodeNil() - } else { - yy4522 := *x.ReadOnlyRootFilesystem - yym4523 := z.EncBinary() - _ = yym4523 - if false { - } else { - r.EncodeBool(bool(yy4522)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq4503[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ReadOnlyRootFilesystem == nil { - r.EncodeNil() - } else { - yy4524 := *x.ReadOnlyRootFilesystem - yym4525 := z.EncBinary() - _ = yym4525 - if false { - } else { - r.EncodeBool(bool(yy4524)) - } - } - } - } - if yyr4503 || yy2arr4503 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SecurityContext) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4526 := z.DecBinary() - _ = yym4526 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4527 := r.ContainerType() - if yyct4527 == codecSelferValueTypeMap1234 { - yyl4527 := r.ReadMapStart() - if yyl4527 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4527, d) - } - } else if yyct4527 == codecSelferValueTypeArray1234 { - yyl4527 := r.ReadArrayStart() - if yyl4527 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4527, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4528Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4528Slc - var yyhl4528 bool = l >= 0 - for yyj4528 := 0; ; yyj4528++ { - if yyhl4528 { - if yyj4528 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4528Slc = r.DecodeBytes(yys4528Slc, true, true) - yys4528 := string(yys4528Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4528 { - case "capabilities": - if r.TryDecodeAsNil() { - if x.Capabilities != nil { - x.Capabilities = nil - } - } else { - if x.Capabilities == nil { - x.Capabilities = new(Capabilities) - } - x.Capabilities.CodecDecodeSelf(d) - } - case "privileged": - if r.TryDecodeAsNil() { - if x.Privileged != nil { - x.Privileged = nil - } - } else { - if x.Privileged == nil { - x.Privileged = new(bool) - } - yym4531 := z.DecBinary() - _ = yym4531 - if false { - } else { - *((*bool)(x.Privileged)) = r.DecodeBool() - } - } - case "seLinuxOptions": - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - case "runAsUser": - if r.TryDecodeAsNil() { - if x.RunAsUser != nil { - x.RunAsUser = nil - } - } else { - if x.RunAsUser == nil { - x.RunAsUser = new(int64) - } - yym4534 := z.DecBinary() - _ = yym4534 - if false { - } else { - *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) - } - } - case "runAsNonRoot": - if r.TryDecodeAsNil() { - if x.RunAsNonRoot != nil { - x.RunAsNonRoot = nil - } - } else { - if x.RunAsNonRoot == nil { - x.RunAsNonRoot = new(bool) - } - yym4536 := z.DecBinary() - _ = yym4536 - if false { - } else { - *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() - } - } - case "readOnlyRootFilesystem": - if r.TryDecodeAsNil() { - if x.ReadOnlyRootFilesystem != nil { - x.ReadOnlyRootFilesystem = nil - } - } else { - if x.ReadOnlyRootFilesystem == nil { - x.ReadOnlyRootFilesystem = new(bool) - } - yym4538 := z.DecBinary() - _ = yym4538 - if false { - } else { - *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys4528) - } // end switch yys4528 - } // end for yyj4528 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4539 int - var yyb4539 bool - var yyhl4539 bool = l >= 0 - yyj4539++ - if yyhl4539 { - yyb4539 = yyj4539 > l - } else { - yyb4539 = r.CheckBreak() - } - if yyb4539 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Capabilities != nil { - x.Capabilities = nil - } - } else { - if x.Capabilities == nil { - x.Capabilities = new(Capabilities) - } - x.Capabilities.CodecDecodeSelf(d) - } - yyj4539++ - if yyhl4539 { - yyb4539 = yyj4539 > l - } else { - yyb4539 = r.CheckBreak() - } - if yyb4539 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Privileged != nil { - x.Privileged = nil - } - } else { - if x.Privileged == nil { - x.Privileged = new(bool) - } - yym4542 := z.DecBinary() - _ = yym4542 - if false { - } else { - *((*bool)(x.Privileged)) = r.DecodeBool() - } - } - yyj4539++ - if yyhl4539 { - yyb4539 = yyj4539 > l - } else { - yyb4539 = r.CheckBreak() - } - if yyb4539 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - yyj4539++ - if yyhl4539 { - yyb4539 = yyj4539 > l - } else { - yyb4539 = r.CheckBreak() - } - if yyb4539 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RunAsUser != nil { - x.RunAsUser = nil - } - } else { - if x.RunAsUser == nil { - x.RunAsUser = new(int64) - } - yym4545 := z.DecBinary() - _ = yym4545 - if false { - } else { - *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) - } - } - yyj4539++ - if yyhl4539 { - yyb4539 = yyj4539 > l - } else { - yyb4539 = r.CheckBreak() - } - if yyb4539 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RunAsNonRoot != nil { - x.RunAsNonRoot = nil - } - } else { - if x.RunAsNonRoot == nil { - x.RunAsNonRoot = new(bool) - } - yym4547 := z.DecBinary() - _ = yym4547 - if false { - } else { - *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() - } - } - yyj4539++ - if yyhl4539 { - yyb4539 = yyj4539 > l - } else { - yyb4539 = r.CheckBreak() - } - if yyb4539 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ReadOnlyRootFilesystem != nil { - x.ReadOnlyRootFilesystem = nil - } - } else { - if x.ReadOnlyRootFilesystem == nil { - x.ReadOnlyRootFilesystem = new(bool) - } - yym4549 := z.DecBinary() - _ = yym4549 - if false { - } else { - *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool() - } - } - for { - yyj4539++ - if yyhl4539 { - yyb4539 = yyj4539 > l - } else { - yyb4539 = r.CheckBreak() - } - if yyb4539 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4539-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SELinuxOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4550 := z.EncBinary() - _ = yym4550 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4551 := !z.EncBinary() - yy2arr4551 := z.EncBasicHandle().StructToArray - var yyq4551 [4]bool - _, _, _ = yysep4551, yyq4551, yy2arr4551 - const yyr4551 bool = false - yyq4551[0] = x.User != "" - yyq4551[1] = x.Role != "" - yyq4551[2] = x.Type != "" - yyq4551[3] = x.Level != "" - var yynn4551 int - if yyr4551 || yy2arr4551 { - r.EncodeArrayStart(4) - } else { - yynn4551 = 0 - for _, b := range yyq4551 { - if b { - yynn4551++ - } - } - r.EncodeMapStart(yynn4551) - yynn4551 = 0 - } - if yyr4551 || yy2arr4551 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4551[0] { - yym4553 := z.EncBinary() - _ = yym4553 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4551[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("user")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4554 := z.EncBinary() - _ = yym4554 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } - } - if yyr4551 || yy2arr4551 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4551[1] { - yym4556 := z.EncBinary() - _ = yym4556 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Role)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4551[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("role")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4557 := z.EncBinary() - _ = yym4557 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Role)) - } - } - } - if yyr4551 || yy2arr4551 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4551[2] { - yym4559 := z.EncBinary() - _ = yym4559 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Type)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4551[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4560 := z.EncBinary() - _ = yym4560 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Type)) - } - } - } - if yyr4551 || yy2arr4551 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4551[3] { - yym4562 := z.EncBinary() - _ = yym4562 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Level)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4551[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("level")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4563 := z.EncBinary() - _ = yym4563 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Level)) - } - } - } - if yyr4551 || yy2arr4551 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SELinuxOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4564 := z.DecBinary() - _ = yym4564 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4565 := r.ContainerType() - if yyct4565 == codecSelferValueTypeMap1234 { - yyl4565 := r.ReadMapStart() - if yyl4565 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4565, d) - } - } else if yyct4565 == codecSelferValueTypeArray1234 { - yyl4565 := r.ReadArrayStart() - if yyl4565 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4565, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SELinuxOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4566Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4566Slc - var yyhl4566 bool = l >= 0 - for yyj4566 := 0; ; yyj4566++ { - if yyhl4566 { - if yyj4566 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4566Slc = r.DecodeBytes(yys4566Slc, true, true) - yys4566 := string(yys4566Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4566 { - case "user": - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - case "role": - if r.TryDecodeAsNil() { - x.Role = "" - } else { - x.Role = string(r.DecodeString()) - } - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = string(r.DecodeString()) - } - case "level": - if r.TryDecodeAsNil() { - x.Level = "" - } else { - x.Level = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys4566) - } // end switch yys4566 - } // end for yyj4566 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SELinuxOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4571 int - var yyb4571 bool - var yyhl4571 bool = l >= 0 - yyj4571++ - if yyhl4571 { - yyb4571 = yyj4571 > l - } else { - yyb4571 = r.CheckBreak() - } - if yyb4571 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - yyj4571++ - if yyhl4571 { - yyb4571 = yyj4571 > l - } else { - yyb4571 = r.CheckBreak() - } - if yyb4571 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Role = "" - } else { - x.Role = string(r.DecodeString()) - } - yyj4571++ - if yyhl4571 { - yyb4571 = yyj4571 > l - } else { - yyb4571 = r.CheckBreak() - } - if yyb4571 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = string(r.DecodeString()) - } - yyj4571++ - if yyhl4571 { - yyb4571 = yyj4571 > l - } else { - yyb4571 = r.CheckBreak() - } - if yyb4571 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Level = "" - } else { - x.Level = string(r.DecodeString()) - } - for { - yyj4571++ - if yyhl4571 { - yyb4571 = yyj4571 > l - } else { - yyb4571 = r.CheckBreak() - } - if yyb4571 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4571-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RangeAllocation) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym4576 := z.EncBinary() - _ = yym4576 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep4577 := !z.EncBinary() - yy2arr4577 := z.EncBasicHandle().StructToArray - var yyq4577 [5]bool - _, _, _ = yysep4577, yyq4577, yy2arr4577 - const yyr4577 bool = false - yyq4577[0] = x.Kind != "" - yyq4577[1] = x.APIVersion != "" - yyq4577[2] = true - var yynn4577 int - if yyr4577 || yy2arr4577 { - r.EncodeArrayStart(5) - } else { - yynn4577 = 2 - for _, b := range yyq4577 { - if b { - yynn4577++ - } - } - r.EncodeMapStart(yynn4577) - yynn4577 = 0 - } - if yyr4577 || yy2arr4577 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4577[0] { - yym4579 := z.EncBinary() - _ = yym4579 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4577[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4580 := z.EncBinary() - _ = yym4580 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr4577 || yy2arr4577 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4577[1] { - yym4582 := z.EncBinary() - _ = yym4582 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq4577[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4583 := z.EncBinary() - _ = yym4583 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr4577 || yy2arr4577 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4577[2] { - yy4585 := &x.ObjectMeta - yy4585.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq4577[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4586 := &x.ObjectMeta - yy4586.CodecEncodeSelf(e) - } - } - if yyr4577 || yy2arr4577 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4588 := z.EncBinary() - _ = yym4588 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Range)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("range")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4589 := z.EncBinary() - _ = yym4589 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Range)) - } - } - if yyr4577 || yy2arr4577 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym4591 := z.EncBinary() - _ = yym4591 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("data")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym4592 := z.EncBinary() - _ = yym4592 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) - } - } - } - if yyr4577 || yy2arr4577 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RangeAllocation) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym4593 := z.DecBinary() - _ = yym4593 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct4594 := r.ContainerType() - if yyct4594 == codecSelferValueTypeMap1234 { - yyl4594 := r.ReadMapStart() - if yyl4594 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl4594, d) - } - } else if yyct4594 == codecSelferValueTypeArray1234 { - yyl4594 := r.ReadArrayStart() - if yyl4594 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl4594, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RangeAllocation) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys4595Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4595Slc - var yyhl4595 bool = l >= 0 - for yyj4595 := 0; ; yyj4595++ { - if yyhl4595 { - if yyj4595 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4595Slc = r.DecodeBytes(yys4595Slc, true, true) - yys4595 := string(yys4595Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4595 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4598 := &x.ObjectMeta - yyv4598.CodecDecodeSelf(d) - } - case "range": - if r.TryDecodeAsNil() { - x.Range = "" - } else { - x.Range = string(r.DecodeString()) - } - case "data": - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv4600 := &x.Data - yym4601 := z.DecBinary() - _ = yym4601 - if false { - } else { - *yyv4600 = r.DecodeBytes(*(*[]byte)(yyv4600), false, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys4595) - } // end switch yys4595 - } // end for yyj4595 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RangeAllocation) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4602 int - var yyb4602 bool - var yyhl4602 bool = l >= 0 - yyj4602++ - if yyhl4602 { - yyb4602 = yyj4602 > l - } else { - yyb4602 = r.CheckBreak() - } - if yyb4602 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj4602++ - if yyhl4602 { - yyb4602 = yyj4602 > l - } else { - yyb4602 = r.CheckBreak() - } - if yyb4602 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj4602++ - if yyhl4602 { - yyb4602 = yyj4602 > l - } else { - yyb4602 = r.CheckBreak() - } - if yyb4602 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4605 := &x.ObjectMeta - yyv4605.CodecDecodeSelf(d) - } - yyj4602++ - if yyhl4602 { - yyb4602 = yyj4602 > l - } else { - yyb4602 = r.CheckBreak() - } - if yyb4602 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Range = "" - } else { - x.Range = string(r.DecodeString()) - } - yyj4602++ - if yyhl4602 { - yyb4602 = yyj4602 > l - } else { - yyb4602 = r.CheckBreak() - } - if yyb4602 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv4607 := &x.Data - yym4608 := z.DecBinary() - _ = yym4608 - if false { - } else { - *yyv4607 = r.DecodeBytes(*(*[]byte)(yyv4607), false, false) - } - } - for { - yyj4602++ - if yyhl4602 { - yyb4602 = yyj4602 > l - } else { - yyb4602 = r.CheckBreak() - } - if yyb4602 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4602-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceOwnerReference(v []OwnerReference, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4609 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4610 := &yyv4609 - yy4610.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceOwnerReference(v *[]OwnerReference, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4611 := *v - yyh4611, yyl4611 := z.DecSliceHelperStart() - var yyc4611 bool - if yyl4611 == 0 { - if yyv4611 == nil { - yyv4611 = []OwnerReference{} - yyc4611 = true - } else if len(yyv4611) != 0 { - yyv4611 = yyv4611[:0] - yyc4611 = true - } - } else if yyl4611 > 0 { - var yyrr4611, yyrl4611 int - var yyrt4611 bool - if yyl4611 > cap(yyv4611) { - - yyrg4611 := len(yyv4611) > 0 - yyv24611 := yyv4611 - yyrl4611, yyrt4611 = z.DecInferLen(yyl4611, z.DecBasicHandle().MaxInitLen, 72) - if yyrt4611 { - if yyrl4611 <= cap(yyv4611) { - yyv4611 = yyv4611[:yyrl4611] - } else { - yyv4611 = make([]OwnerReference, yyrl4611) - } - } else { - yyv4611 = make([]OwnerReference, yyrl4611) - } - yyc4611 = true - yyrr4611 = len(yyv4611) - if yyrg4611 { - copy(yyv4611, yyv24611) - } - } else if yyl4611 != len(yyv4611) { - yyv4611 = yyv4611[:yyl4611] - yyc4611 = true - } - yyj4611 := 0 - for ; yyj4611 < yyrr4611; yyj4611++ { - yyh4611.ElemContainerState(yyj4611) - if r.TryDecodeAsNil() { - yyv4611[yyj4611] = OwnerReference{} - } else { - yyv4612 := &yyv4611[yyj4611] - yyv4612.CodecDecodeSelf(d) - } - - } - if yyrt4611 { - for ; yyj4611 < yyl4611; yyj4611++ { - yyv4611 = append(yyv4611, OwnerReference{}) - yyh4611.ElemContainerState(yyj4611) - if r.TryDecodeAsNil() { - yyv4611[yyj4611] = OwnerReference{} - } else { - yyv4613 := &yyv4611[yyj4611] - yyv4613.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4611 := 0 - for ; !r.CheckBreak(); yyj4611++ { - - if yyj4611 >= len(yyv4611) { - yyv4611 = append(yyv4611, OwnerReference{}) // var yyz4611 OwnerReference - yyc4611 = true - } - yyh4611.ElemContainerState(yyj4611) - if yyj4611 < len(yyv4611) { - if r.TryDecodeAsNil() { - yyv4611[yyj4611] = OwnerReference{} - } else { - yyv4614 := &yyv4611[yyj4611] - yyv4614.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4611 < len(yyv4611) { - yyv4611 = yyv4611[:yyj4611] - yyc4611 = true - } else if yyj4611 == 0 && yyv4611 == nil { - yyv4611 = []OwnerReference{} - yyc4611 = true - } - } - yyh4611.End() - if yyc4611 { - *v = yyv4611 - } -} - -func (x codecSelfer1234) encSlicePersistentVolumeAccessMode(v []PersistentVolumeAccessMode, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4615 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv4615.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePersistentVolumeAccessMode(v *[]PersistentVolumeAccessMode, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4616 := *v - yyh4616, yyl4616 := z.DecSliceHelperStart() - var yyc4616 bool - if yyl4616 == 0 { - if yyv4616 == nil { - yyv4616 = []PersistentVolumeAccessMode{} - yyc4616 = true - } else if len(yyv4616) != 0 { - yyv4616 = yyv4616[:0] - yyc4616 = true - } - } else if yyl4616 > 0 { - var yyrr4616, yyrl4616 int - var yyrt4616 bool - if yyl4616 > cap(yyv4616) { - - yyrl4616, yyrt4616 = z.DecInferLen(yyl4616, z.DecBasicHandle().MaxInitLen, 16) - if yyrt4616 { - if yyrl4616 <= cap(yyv4616) { - yyv4616 = yyv4616[:yyrl4616] - } else { - yyv4616 = make([]PersistentVolumeAccessMode, yyrl4616) - } - } else { - yyv4616 = make([]PersistentVolumeAccessMode, yyrl4616) - } - yyc4616 = true - yyrr4616 = len(yyv4616) - } else if yyl4616 != len(yyv4616) { - yyv4616 = yyv4616[:yyl4616] - yyc4616 = true - } - yyj4616 := 0 - for ; yyj4616 < yyrr4616; yyj4616++ { - yyh4616.ElemContainerState(yyj4616) - if r.TryDecodeAsNil() { - yyv4616[yyj4616] = "" - } else { - yyv4616[yyj4616] = PersistentVolumeAccessMode(r.DecodeString()) - } - - } - if yyrt4616 { - for ; yyj4616 < yyl4616; yyj4616++ { - yyv4616 = append(yyv4616, "") - yyh4616.ElemContainerState(yyj4616) - if r.TryDecodeAsNil() { - yyv4616[yyj4616] = "" - } else { - yyv4616[yyj4616] = PersistentVolumeAccessMode(r.DecodeString()) - } - - } - } - - } else { - yyj4616 := 0 - for ; !r.CheckBreak(); yyj4616++ { - - if yyj4616 >= len(yyv4616) { - yyv4616 = append(yyv4616, "") // var yyz4616 PersistentVolumeAccessMode - yyc4616 = true - } - yyh4616.ElemContainerState(yyj4616) - if yyj4616 < len(yyv4616) { - if r.TryDecodeAsNil() { - yyv4616[yyj4616] = "" - } else { - yyv4616[yyj4616] = PersistentVolumeAccessMode(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj4616 < len(yyv4616) { - yyv4616 = yyv4616[:yyj4616] - yyc4616 = true - } else if yyj4616 == 0 && yyv4616 == nil { - yyv4616 = []PersistentVolumeAccessMode{} - yyc4616 = true - } - } - yyh4616.End() - if yyc4616 { - *v = yyv4616 - } -} - -func (x codecSelfer1234) encSlicePersistentVolume(v []PersistentVolume, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4620 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4621 := &yyv4620 - yy4621.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePersistentVolume(v *[]PersistentVolume, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4622 := *v - yyh4622, yyl4622 := z.DecSliceHelperStart() - var yyc4622 bool - if yyl4622 == 0 { - if yyv4622 == nil { - yyv4622 = []PersistentVolume{} - yyc4622 = true - } else if len(yyv4622) != 0 { - yyv4622 = yyv4622[:0] - yyc4622 = true - } - } else if yyl4622 > 0 { - var yyrr4622, yyrl4622 int - var yyrt4622 bool - if yyl4622 > cap(yyv4622) { - - yyrg4622 := len(yyv4622) > 0 - yyv24622 := yyv4622 - yyrl4622, yyrt4622 = z.DecInferLen(yyl4622, z.DecBasicHandle().MaxInitLen, 496) - if yyrt4622 { - if yyrl4622 <= cap(yyv4622) { - yyv4622 = yyv4622[:yyrl4622] - } else { - yyv4622 = make([]PersistentVolume, yyrl4622) - } - } else { - yyv4622 = make([]PersistentVolume, yyrl4622) - } - yyc4622 = true - yyrr4622 = len(yyv4622) - if yyrg4622 { - copy(yyv4622, yyv24622) - } - } else if yyl4622 != len(yyv4622) { - yyv4622 = yyv4622[:yyl4622] - yyc4622 = true - } - yyj4622 := 0 - for ; yyj4622 < yyrr4622; yyj4622++ { - yyh4622.ElemContainerState(yyj4622) - if r.TryDecodeAsNil() { - yyv4622[yyj4622] = PersistentVolume{} - } else { - yyv4623 := &yyv4622[yyj4622] - yyv4623.CodecDecodeSelf(d) - } - - } - if yyrt4622 { - for ; yyj4622 < yyl4622; yyj4622++ { - yyv4622 = append(yyv4622, PersistentVolume{}) - yyh4622.ElemContainerState(yyj4622) - if r.TryDecodeAsNil() { - yyv4622[yyj4622] = PersistentVolume{} - } else { - yyv4624 := &yyv4622[yyj4622] - yyv4624.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4622 := 0 - for ; !r.CheckBreak(); yyj4622++ { - - if yyj4622 >= len(yyv4622) { - yyv4622 = append(yyv4622, PersistentVolume{}) // var yyz4622 PersistentVolume - yyc4622 = true - } - yyh4622.ElemContainerState(yyj4622) - if yyj4622 < len(yyv4622) { - if r.TryDecodeAsNil() { - yyv4622[yyj4622] = PersistentVolume{} - } else { - yyv4625 := &yyv4622[yyj4622] - yyv4625.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4622 < len(yyv4622) { - yyv4622 = yyv4622[:yyj4622] - yyc4622 = true - } else if yyj4622 == 0 && yyv4622 == nil { - yyv4622 = []PersistentVolume{} - yyc4622 = true - } - } - yyh4622.End() - if yyc4622 { - *v = yyv4622 - } -} - -func (x codecSelfer1234) encSlicePersistentVolumeClaim(v []PersistentVolumeClaim, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4626 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4627 := &yyv4626 - yy4627.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePersistentVolumeClaim(v *[]PersistentVolumeClaim, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4628 := *v - yyh4628, yyl4628 := z.DecSliceHelperStart() - var yyc4628 bool - if yyl4628 == 0 { - if yyv4628 == nil { - yyv4628 = []PersistentVolumeClaim{} - yyc4628 = true - } else if len(yyv4628) != 0 { - yyv4628 = yyv4628[:0] - yyc4628 = true - } - } else if yyl4628 > 0 { - var yyrr4628, yyrl4628 int - var yyrt4628 bool - if yyl4628 > cap(yyv4628) { - - yyrg4628 := len(yyv4628) > 0 - yyv24628 := yyv4628 - yyrl4628, yyrt4628 = z.DecInferLen(yyl4628, z.DecBasicHandle().MaxInitLen, 368) - if yyrt4628 { - if yyrl4628 <= cap(yyv4628) { - yyv4628 = yyv4628[:yyrl4628] - } else { - yyv4628 = make([]PersistentVolumeClaim, yyrl4628) - } - } else { - yyv4628 = make([]PersistentVolumeClaim, yyrl4628) - } - yyc4628 = true - yyrr4628 = len(yyv4628) - if yyrg4628 { - copy(yyv4628, yyv24628) - } - } else if yyl4628 != len(yyv4628) { - yyv4628 = yyv4628[:yyl4628] - yyc4628 = true - } - yyj4628 := 0 - for ; yyj4628 < yyrr4628; yyj4628++ { - yyh4628.ElemContainerState(yyj4628) - if r.TryDecodeAsNil() { - yyv4628[yyj4628] = PersistentVolumeClaim{} - } else { - yyv4629 := &yyv4628[yyj4628] - yyv4629.CodecDecodeSelf(d) - } - - } - if yyrt4628 { - for ; yyj4628 < yyl4628; yyj4628++ { - yyv4628 = append(yyv4628, PersistentVolumeClaim{}) - yyh4628.ElemContainerState(yyj4628) - if r.TryDecodeAsNil() { - yyv4628[yyj4628] = PersistentVolumeClaim{} - } else { - yyv4630 := &yyv4628[yyj4628] - yyv4630.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4628 := 0 - for ; !r.CheckBreak(); yyj4628++ { - - if yyj4628 >= len(yyv4628) { - yyv4628 = append(yyv4628, PersistentVolumeClaim{}) // var yyz4628 PersistentVolumeClaim - yyc4628 = true - } - yyh4628.ElemContainerState(yyj4628) - if yyj4628 < len(yyv4628) { - if r.TryDecodeAsNil() { - yyv4628[yyj4628] = PersistentVolumeClaim{} - } else { - yyv4631 := &yyv4628[yyj4628] - yyv4631.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4628 < len(yyv4628) { - yyv4628 = yyv4628[:yyj4628] - yyc4628 = true - } else if yyj4628 == 0 && yyv4628 == nil { - yyv4628 = []PersistentVolumeClaim{} - yyc4628 = true - } - } - yyh4628.End() - if yyc4628 { - *v = yyv4628 - } -} - -func (x codecSelfer1234) encSliceKeyToPath(v []KeyToPath, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4632 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4633 := &yyv4632 - yy4633.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceKeyToPath(v *[]KeyToPath, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4634 := *v - yyh4634, yyl4634 := z.DecSliceHelperStart() - var yyc4634 bool - if yyl4634 == 0 { - if yyv4634 == nil { - yyv4634 = []KeyToPath{} - yyc4634 = true - } else if len(yyv4634) != 0 { - yyv4634 = yyv4634[:0] - yyc4634 = true - } - } else if yyl4634 > 0 { - var yyrr4634, yyrl4634 int - var yyrt4634 bool - if yyl4634 > cap(yyv4634) { - - yyrg4634 := len(yyv4634) > 0 - yyv24634 := yyv4634 - yyrl4634, yyrt4634 = z.DecInferLen(yyl4634, z.DecBasicHandle().MaxInitLen, 40) - if yyrt4634 { - if yyrl4634 <= cap(yyv4634) { - yyv4634 = yyv4634[:yyrl4634] - } else { - yyv4634 = make([]KeyToPath, yyrl4634) - } - } else { - yyv4634 = make([]KeyToPath, yyrl4634) - } - yyc4634 = true - yyrr4634 = len(yyv4634) - if yyrg4634 { - copy(yyv4634, yyv24634) - } - } else if yyl4634 != len(yyv4634) { - yyv4634 = yyv4634[:yyl4634] - yyc4634 = true - } - yyj4634 := 0 - for ; yyj4634 < yyrr4634; yyj4634++ { - yyh4634.ElemContainerState(yyj4634) - if r.TryDecodeAsNil() { - yyv4634[yyj4634] = KeyToPath{} - } else { - yyv4635 := &yyv4634[yyj4634] - yyv4635.CodecDecodeSelf(d) - } - - } - if yyrt4634 { - for ; yyj4634 < yyl4634; yyj4634++ { - yyv4634 = append(yyv4634, KeyToPath{}) - yyh4634.ElemContainerState(yyj4634) - if r.TryDecodeAsNil() { - yyv4634[yyj4634] = KeyToPath{} - } else { - yyv4636 := &yyv4634[yyj4634] - yyv4636.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4634 := 0 - for ; !r.CheckBreak(); yyj4634++ { - - if yyj4634 >= len(yyv4634) { - yyv4634 = append(yyv4634, KeyToPath{}) // var yyz4634 KeyToPath - yyc4634 = true - } - yyh4634.ElemContainerState(yyj4634) - if yyj4634 < len(yyv4634) { - if r.TryDecodeAsNil() { - yyv4634[yyj4634] = KeyToPath{} - } else { - yyv4637 := &yyv4634[yyj4634] - yyv4637.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4634 < len(yyv4634) { - yyv4634 = yyv4634[:yyj4634] - yyc4634 = true - } else if yyj4634 == 0 && yyv4634 == nil { - yyv4634 = []KeyToPath{} - yyc4634 = true - } - } - yyh4634.End() - if yyc4634 { - *v = yyv4634 - } -} - -func (x codecSelfer1234) encSliceDownwardAPIVolumeFile(v []DownwardAPIVolumeFile, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4638 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4639 := &yyv4638 - yy4639.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFile, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4640 := *v - yyh4640, yyl4640 := z.DecSliceHelperStart() - var yyc4640 bool - if yyl4640 == 0 { - if yyv4640 == nil { - yyv4640 = []DownwardAPIVolumeFile{} - yyc4640 = true - } else if len(yyv4640) != 0 { - yyv4640 = yyv4640[:0] - yyc4640 = true - } - } else if yyl4640 > 0 { - var yyrr4640, yyrl4640 int - var yyrt4640 bool - if yyl4640 > cap(yyv4640) { - - yyrg4640 := len(yyv4640) > 0 - yyv24640 := yyv4640 - yyrl4640, yyrt4640 = z.DecInferLen(yyl4640, z.DecBasicHandle().MaxInitLen, 40) - if yyrt4640 { - if yyrl4640 <= cap(yyv4640) { - yyv4640 = yyv4640[:yyrl4640] - } else { - yyv4640 = make([]DownwardAPIVolumeFile, yyrl4640) - } - } else { - yyv4640 = make([]DownwardAPIVolumeFile, yyrl4640) - } - yyc4640 = true - yyrr4640 = len(yyv4640) - if yyrg4640 { - copy(yyv4640, yyv24640) - } - } else if yyl4640 != len(yyv4640) { - yyv4640 = yyv4640[:yyl4640] - yyc4640 = true - } - yyj4640 := 0 - for ; yyj4640 < yyrr4640; yyj4640++ { - yyh4640.ElemContainerState(yyj4640) - if r.TryDecodeAsNil() { - yyv4640[yyj4640] = DownwardAPIVolumeFile{} - } else { - yyv4641 := &yyv4640[yyj4640] - yyv4641.CodecDecodeSelf(d) - } - - } - if yyrt4640 { - for ; yyj4640 < yyl4640; yyj4640++ { - yyv4640 = append(yyv4640, DownwardAPIVolumeFile{}) - yyh4640.ElemContainerState(yyj4640) - if r.TryDecodeAsNil() { - yyv4640[yyj4640] = DownwardAPIVolumeFile{} - } else { - yyv4642 := &yyv4640[yyj4640] - yyv4642.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4640 := 0 - for ; !r.CheckBreak(); yyj4640++ { - - if yyj4640 >= len(yyv4640) { - yyv4640 = append(yyv4640, DownwardAPIVolumeFile{}) // var yyz4640 DownwardAPIVolumeFile - yyc4640 = true - } - yyh4640.ElemContainerState(yyj4640) - if yyj4640 < len(yyv4640) { - if r.TryDecodeAsNil() { - yyv4640[yyj4640] = DownwardAPIVolumeFile{} - } else { - yyv4643 := &yyv4640[yyj4640] - yyv4643.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4640 < len(yyv4640) { - yyv4640 = yyv4640[:yyj4640] - yyc4640 = true - } else if yyj4640 == 0 && yyv4640 == nil { - yyv4640 = []DownwardAPIVolumeFile{} - yyc4640 = true - } - } - yyh4640.End() - if yyc4640 { - *v = yyv4640 - } -} - -func (x codecSelfer1234) encSliceHTTPHeader(v []HTTPHeader, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4644 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4645 := &yyv4644 - yy4645.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceHTTPHeader(v *[]HTTPHeader, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4646 := *v - yyh4646, yyl4646 := z.DecSliceHelperStart() - var yyc4646 bool - if yyl4646 == 0 { - if yyv4646 == nil { - yyv4646 = []HTTPHeader{} - yyc4646 = true - } else if len(yyv4646) != 0 { - yyv4646 = yyv4646[:0] - yyc4646 = true - } - } else if yyl4646 > 0 { - var yyrr4646, yyrl4646 int - var yyrt4646 bool - if yyl4646 > cap(yyv4646) { - - yyrg4646 := len(yyv4646) > 0 - yyv24646 := yyv4646 - yyrl4646, yyrt4646 = z.DecInferLen(yyl4646, z.DecBasicHandle().MaxInitLen, 32) - if yyrt4646 { - if yyrl4646 <= cap(yyv4646) { - yyv4646 = yyv4646[:yyrl4646] - } else { - yyv4646 = make([]HTTPHeader, yyrl4646) - } - } else { - yyv4646 = make([]HTTPHeader, yyrl4646) - } - yyc4646 = true - yyrr4646 = len(yyv4646) - if yyrg4646 { - copy(yyv4646, yyv24646) - } - } else if yyl4646 != len(yyv4646) { - yyv4646 = yyv4646[:yyl4646] - yyc4646 = true - } - yyj4646 := 0 - for ; yyj4646 < yyrr4646; yyj4646++ { - yyh4646.ElemContainerState(yyj4646) - if r.TryDecodeAsNil() { - yyv4646[yyj4646] = HTTPHeader{} - } else { - yyv4647 := &yyv4646[yyj4646] - yyv4647.CodecDecodeSelf(d) - } - - } - if yyrt4646 { - for ; yyj4646 < yyl4646; yyj4646++ { - yyv4646 = append(yyv4646, HTTPHeader{}) - yyh4646.ElemContainerState(yyj4646) - if r.TryDecodeAsNil() { - yyv4646[yyj4646] = HTTPHeader{} - } else { - yyv4648 := &yyv4646[yyj4646] - yyv4648.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4646 := 0 - for ; !r.CheckBreak(); yyj4646++ { - - if yyj4646 >= len(yyv4646) { - yyv4646 = append(yyv4646, HTTPHeader{}) // var yyz4646 HTTPHeader - yyc4646 = true - } - yyh4646.ElemContainerState(yyj4646) - if yyj4646 < len(yyv4646) { - if r.TryDecodeAsNil() { - yyv4646[yyj4646] = HTTPHeader{} - } else { - yyv4649 := &yyv4646[yyj4646] - yyv4649.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4646 < len(yyv4646) { - yyv4646 = yyv4646[:yyj4646] - yyc4646 = true - } else if yyj4646 == 0 && yyv4646 == nil { - yyv4646 = []HTTPHeader{} - yyc4646 = true - } - } - yyh4646.End() - if yyc4646 { - *v = yyv4646 - } -} - -func (x codecSelfer1234) encSliceCapability(v []Capability, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4650 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv4650.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCapability(v *[]Capability, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4651 := *v - yyh4651, yyl4651 := z.DecSliceHelperStart() - var yyc4651 bool - if yyl4651 == 0 { - if yyv4651 == nil { - yyv4651 = []Capability{} - yyc4651 = true - } else if len(yyv4651) != 0 { - yyv4651 = yyv4651[:0] - yyc4651 = true - } - } else if yyl4651 > 0 { - var yyrr4651, yyrl4651 int - var yyrt4651 bool - if yyl4651 > cap(yyv4651) { - - yyrl4651, yyrt4651 = z.DecInferLen(yyl4651, z.DecBasicHandle().MaxInitLen, 16) - if yyrt4651 { - if yyrl4651 <= cap(yyv4651) { - yyv4651 = yyv4651[:yyrl4651] - } else { - yyv4651 = make([]Capability, yyrl4651) - } - } else { - yyv4651 = make([]Capability, yyrl4651) - } - yyc4651 = true - yyrr4651 = len(yyv4651) - } else if yyl4651 != len(yyv4651) { - yyv4651 = yyv4651[:yyl4651] - yyc4651 = true - } - yyj4651 := 0 - for ; yyj4651 < yyrr4651; yyj4651++ { - yyh4651.ElemContainerState(yyj4651) - if r.TryDecodeAsNil() { - yyv4651[yyj4651] = "" - } else { - yyv4651[yyj4651] = Capability(r.DecodeString()) - } - - } - if yyrt4651 { - for ; yyj4651 < yyl4651; yyj4651++ { - yyv4651 = append(yyv4651, "") - yyh4651.ElemContainerState(yyj4651) - if r.TryDecodeAsNil() { - yyv4651[yyj4651] = "" - } else { - yyv4651[yyj4651] = Capability(r.DecodeString()) - } - - } - } - - } else { - yyj4651 := 0 - for ; !r.CheckBreak(); yyj4651++ { - - if yyj4651 >= len(yyv4651) { - yyv4651 = append(yyv4651, "") // var yyz4651 Capability - yyc4651 = true - } - yyh4651.ElemContainerState(yyj4651) - if yyj4651 < len(yyv4651) { - if r.TryDecodeAsNil() { - yyv4651[yyj4651] = "" - } else { - yyv4651[yyj4651] = Capability(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj4651 < len(yyv4651) { - yyv4651 = yyv4651[:yyj4651] - yyc4651 = true - } else if yyj4651 == 0 && yyv4651 == nil { - yyv4651 = []Capability{} - yyc4651 = true - } - } - yyh4651.End() - if yyc4651 { - *v = yyv4651 - } -} - -func (x codecSelfer1234) encSliceContainerPort(v []ContainerPort, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4655 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4656 := &yyv4655 - yy4656.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceContainerPort(v *[]ContainerPort, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4657 := *v - yyh4657, yyl4657 := z.DecSliceHelperStart() - var yyc4657 bool - if yyl4657 == 0 { - if yyv4657 == nil { - yyv4657 = []ContainerPort{} - yyc4657 = true - } else if len(yyv4657) != 0 { - yyv4657 = yyv4657[:0] - yyc4657 = true - } - } else if yyl4657 > 0 { - var yyrr4657, yyrl4657 int - var yyrt4657 bool - if yyl4657 > cap(yyv4657) { - - yyrg4657 := len(yyv4657) > 0 - yyv24657 := yyv4657 - yyrl4657, yyrt4657 = z.DecInferLen(yyl4657, z.DecBasicHandle().MaxInitLen, 56) - if yyrt4657 { - if yyrl4657 <= cap(yyv4657) { - yyv4657 = yyv4657[:yyrl4657] - } else { - yyv4657 = make([]ContainerPort, yyrl4657) - } - } else { - yyv4657 = make([]ContainerPort, yyrl4657) - } - yyc4657 = true - yyrr4657 = len(yyv4657) - if yyrg4657 { - copy(yyv4657, yyv24657) - } - } else if yyl4657 != len(yyv4657) { - yyv4657 = yyv4657[:yyl4657] - yyc4657 = true - } - yyj4657 := 0 - for ; yyj4657 < yyrr4657; yyj4657++ { - yyh4657.ElemContainerState(yyj4657) - if r.TryDecodeAsNil() { - yyv4657[yyj4657] = ContainerPort{} - } else { - yyv4658 := &yyv4657[yyj4657] - yyv4658.CodecDecodeSelf(d) - } - - } - if yyrt4657 { - for ; yyj4657 < yyl4657; yyj4657++ { - yyv4657 = append(yyv4657, ContainerPort{}) - yyh4657.ElemContainerState(yyj4657) - if r.TryDecodeAsNil() { - yyv4657[yyj4657] = ContainerPort{} - } else { - yyv4659 := &yyv4657[yyj4657] - yyv4659.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4657 := 0 - for ; !r.CheckBreak(); yyj4657++ { - - if yyj4657 >= len(yyv4657) { - yyv4657 = append(yyv4657, ContainerPort{}) // var yyz4657 ContainerPort - yyc4657 = true - } - yyh4657.ElemContainerState(yyj4657) - if yyj4657 < len(yyv4657) { - if r.TryDecodeAsNil() { - yyv4657[yyj4657] = ContainerPort{} - } else { - yyv4660 := &yyv4657[yyj4657] - yyv4660.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4657 < len(yyv4657) { - yyv4657 = yyv4657[:yyj4657] - yyc4657 = true - } else if yyj4657 == 0 && yyv4657 == nil { - yyv4657 = []ContainerPort{} - yyc4657 = true - } - } - yyh4657.End() - if yyc4657 { - *v = yyv4657 - } -} - -func (x codecSelfer1234) encSliceEnvVar(v []EnvVar, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4661 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4662 := &yyv4661 - yy4662.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEnvVar(v *[]EnvVar, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4663 := *v - yyh4663, yyl4663 := z.DecSliceHelperStart() - var yyc4663 bool - if yyl4663 == 0 { - if yyv4663 == nil { - yyv4663 = []EnvVar{} - yyc4663 = true - } else if len(yyv4663) != 0 { - yyv4663 = yyv4663[:0] - yyc4663 = true - } - } else if yyl4663 > 0 { - var yyrr4663, yyrl4663 int - var yyrt4663 bool - if yyl4663 > cap(yyv4663) { - - yyrg4663 := len(yyv4663) > 0 - yyv24663 := yyv4663 - yyrl4663, yyrt4663 = z.DecInferLen(yyl4663, z.DecBasicHandle().MaxInitLen, 40) - if yyrt4663 { - if yyrl4663 <= cap(yyv4663) { - yyv4663 = yyv4663[:yyrl4663] - } else { - yyv4663 = make([]EnvVar, yyrl4663) - } - } else { - yyv4663 = make([]EnvVar, yyrl4663) - } - yyc4663 = true - yyrr4663 = len(yyv4663) - if yyrg4663 { - copy(yyv4663, yyv24663) - } - } else if yyl4663 != len(yyv4663) { - yyv4663 = yyv4663[:yyl4663] - yyc4663 = true - } - yyj4663 := 0 - for ; yyj4663 < yyrr4663; yyj4663++ { - yyh4663.ElemContainerState(yyj4663) - if r.TryDecodeAsNil() { - yyv4663[yyj4663] = EnvVar{} - } else { - yyv4664 := &yyv4663[yyj4663] - yyv4664.CodecDecodeSelf(d) - } - - } - if yyrt4663 { - for ; yyj4663 < yyl4663; yyj4663++ { - yyv4663 = append(yyv4663, EnvVar{}) - yyh4663.ElemContainerState(yyj4663) - if r.TryDecodeAsNil() { - yyv4663[yyj4663] = EnvVar{} - } else { - yyv4665 := &yyv4663[yyj4663] - yyv4665.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4663 := 0 - for ; !r.CheckBreak(); yyj4663++ { - - if yyj4663 >= len(yyv4663) { - yyv4663 = append(yyv4663, EnvVar{}) // var yyz4663 EnvVar - yyc4663 = true - } - yyh4663.ElemContainerState(yyj4663) - if yyj4663 < len(yyv4663) { - if r.TryDecodeAsNil() { - yyv4663[yyj4663] = EnvVar{} - } else { - yyv4666 := &yyv4663[yyj4663] - yyv4666.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4663 < len(yyv4663) { - yyv4663 = yyv4663[:yyj4663] - yyc4663 = true - } else if yyj4663 == 0 && yyv4663 == nil { - yyv4663 = []EnvVar{} - yyc4663 = true - } - } - yyh4663.End() - if yyc4663 { - *v = yyv4663 - } -} - -func (x codecSelfer1234) encSliceVolumeMount(v []VolumeMount, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4667 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4668 := &yyv4667 - yy4668.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceVolumeMount(v *[]VolumeMount, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4669 := *v - yyh4669, yyl4669 := z.DecSliceHelperStart() - var yyc4669 bool - if yyl4669 == 0 { - if yyv4669 == nil { - yyv4669 = []VolumeMount{} - yyc4669 = true - } else if len(yyv4669) != 0 { - yyv4669 = yyv4669[:0] - yyc4669 = true - } - } else if yyl4669 > 0 { - var yyrr4669, yyrl4669 int - var yyrt4669 bool - if yyl4669 > cap(yyv4669) { - - yyrg4669 := len(yyv4669) > 0 - yyv24669 := yyv4669 - yyrl4669, yyrt4669 = z.DecInferLen(yyl4669, z.DecBasicHandle().MaxInitLen, 56) - if yyrt4669 { - if yyrl4669 <= cap(yyv4669) { - yyv4669 = yyv4669[:yyrl4669] - } else { - yyv4669 = make([]VolumeMount, yyrl4669) - } - } else { - yyv4669 = make([]VolumeMount, yyrl4669) - } - yyc4669 = true - yyrr4669 = len(yyv4669) - if yyrg4669 { - copy(yyv4669, yyv24669) - } - } else if yyl4669 != len(yyv4669) { - yyv4669 = yyv4669[:yyl4669] - yyc4669 = true - } - yyj4669 := 0 - for ; yyj4669 < yyrr4669; yyj4669++ { - yyh4669.ElemContainerState(yyj4669) - if r.TryDecodeAsNil() { - yyv4669[yyj4669] = VolumeMount{} - } else { - yyv4670 := &yyv4669[yyj4669] - yyv4670.CodecDecodeSelf(d) - } - - } - if yyrt4669 { - for ; yyj4669 < yyl4669; yyj4669++ { - yyv4669 = append(yyv4669, VolumeMount{}) - yyh4669.ElemContainerState(yyj4669) - if r.TryDecodeAsNil() { - yyv4669[yyj4669] = VolumeMount{} - } else { - yyv4671 := &yyv4669[yyj4669] - yyv4671.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4669 := 0 - for ; !r.CheckBreak(); yyj4669++ { - - if yyj4669 >= len(yyv4669) { - yyv4669 = append(yyv4669, VolumeMount{}) // var yyz4669 VolumeMount - yyc4669 = true - } - yyh4669.ElemContainerState(yyj4669) - if yyj4669 < len(yyv4669) { - if r.TryDecodeAsNil() { - yyv4669[yyj4669] = VolumeMount{} - } else { - yyv4672 := &yyv4669[yyj4669] - yyv4672.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4669 < len(yyv4669) { - yyv4669 = yyv4669[:yyj4669] - yyc4669 = true - } else if yyj4669 == 0 && yyv4669 == nil { - yyv4669 = []VolumeMount{} - yyc4669 = true - } - } - yyh4669.End() - if yyc4669 { - *v = yyv4669 - } -} - -func (x codecSelfer1234) encSlicePod(v []Pod, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4673 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4674 := &yyv4673 - yy4674.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePod(v *[]Pod, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4675 := *v - yyh4675, yyl4675 := z.DecSliceHelperStart() - var yyc4675 bool - if yyl4675 == 0 { - if yyv4675 == nil { - yyv4675 = []Pod{} - yyc4675 = true - } else if len(yyv4675) != 0 { - yyv4675 = yyv4675[:0] - yyc4675 = true - } - } else if yyl4675 > 0 { - var yyrr4675, yyrl4675 int - var yyrt4675 bool - if yyl4675 > cap(yyv4675) { - - yyrg4675 := len(yyv4675) > 0 - yyv24675 := yyv4675 - yyrl4675, yyrt4675 = z.DecInferLen(yyl4675, z.DecBasicHandle().MaxInitLen, 640) - if yyrt4675 { - if yyrl4675 <= cap(yyv4675) { - yyv4675 = yyv4675[:yyrl4675] - } else { - yyv4675 = make([]Pod, yyrl4675) - } - } else { - yyv4675 = make([]Pod, yyrl4675) - } - yyc4675 = true - yyrr4675 = len(yyv4675) - if yyrg4675 { - copy(yyv4675, yyv24675) - } - } else if yyl4675 != len(yyv4675) { - yyv4675 = yyv4675[:yyl4675] - yyc4675 = true - } - yyj4675 := 0 - for ; yyj4675 < yyrr4675; yyj4675++ { - yyh4675.ElemContainerState(yyj4675) - if r.TryDecodeAsNil() { - yyv4675[yyj4675] = Pod{} - } else { - yyv4676 := &yyv4675[yyj4675] - yyv4676.CodecDecodeSelf(d) - } - - } - if yyrt4675 { - for ; yyj4675 < yyl4675; yyj4675++ { - yyv4675 = append(yyv4675, Pod{}) - yyh4675.ElemContainerState(yyj4675) - if r.TryDecodeAsNil() { - yyv4675[yyj4675] = Pod{} - } else { - yyv4677 := &yyv4675[yyj4675] - yyv4677.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4675 := 0 - for ; !r.CheckBreak(); yyj4675++ { - - if yyj4675 >= len(yyv4675) { - yyv4675 = append(yyv4675, Pod{}) // var yyz4675 Pod - yyc4675 = true - } - yyh4675.ElemContainerState(yyj4675) - if yyj4675 < len(yyv4675) { - if r.TryDecodeAsNil() { - yyv4675[yyj4675] = Pod{} - } else { - yyv4678 := &yyv4675[yyj4675] - yyv4678.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4675 < len(yyv4675) { - yyv4675 = yyv4675[:yyj4675] - yyc4675 = true - } else if yyj4675 == 0 && yyv4675 == nil { - yyv4675 = []Pod{} - yyc4675 = true - } - } - yyh4675.End() - if yyc4675 { - *v = yyv4675 - } -} - -func (x codecSelfer1234) encSliceNodeSelectorTerm(v []NodeSelectorTerm, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4679 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4680 := &yyv4679 - yy4680.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNodeSelectorTerm(v *[]NodeSelectorTerm, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4681 := *v - yyh4681, yyl4681 := z.DecSliceHelperStart() - var yyc4681 bool - if yyl4681 == 0 { - if yyv4681 == nil { - yyv4681 = []NodeSelectorTerm{} - yyc4681 = true - } else if len(yyv4681) != 0 { - yyv4681 = yyv4681[:0] - yyc4681 = true - } - } else if yyl4681 > 0 { - var yyrr4681, yyrl4681 int - var yyrt4681 bool - if yyl4681 > cap(yyv4681) { - - yyrg4681 := len(yyv4681) > 0 - yyv24681 := yyv4681 - yyrl4681, yyrt4681 = z.DecInferLen(yyl4681, z.DecBasicHandle().MaxInitLen, 24) - if yyrt4681 { - if yyrl4681 <= cap(yyv4681) { - yyv4681 = yyv4681[:yyrl4681] - } else { - yyv4681 = make([]NodeSelectorTerm, yyrl4681) - } - } else { - yyv4681 = make([]NodeSelectorTerm, yyrl4681) - } - yyc4681 = true - yyrr4681 = len(yyv4681) - if yyrg4681 { - copy(yyv4681, yyv24681) - } - } else if yyl4681 != len(yyv4681) { - yyv4681 = yyv4681[:yyl4681] - yyc4681 = true - } - yyj4681 := 0 - for ; yyj4681 < yyrr4681; yyj4681++ { - yyh4681.ElemContainerState(yyj4681) - if r.TryDecodeAsNil() { - yyv4681[yyj4681] = NodeSelectorTerm{} - } else { - yyv4682 := &yyv4681[yyj4681] - yyv4682.CodecDecodeSelf(d) - } - - } - if yyrt4681 { - for ; yyj4681 < yyl4681; yyj4681++ { - yyv4681 = append(yyv4681, NodeSelectorTerm{}) - yyh4681.ElemContainerState(yyj4681) - if r.TryDecodeAsNil() { - yyv4681[yyj4681] = NodeSelectorTerm{} - } else { - yyv4683 := &yyv4681[yyj4681] - yyv4683.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4681 := 0 - for ; !r.CheckBreak(); yyj4681++ { - - if yyj4681 >= len(yyv4681) { - yyv4681 = append(yyv4681, NodeSelectorTerm{}) // var yyz4681 NodeSelectorTerm - yyc4681 = true - } - yyh4681.ElemContainerState(yyj4681) - if yyj4681 < len(yyv4681) { - if r.TryDecodeAsNil() { - yyv4681[yyj4681] = NodeSelectorTerm{} - } else { - yyv4684 := &yyv4681[yyj4681] - yyv4684.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4681 < len(yyv4681) { - yyv4681 = yyv4681[:yyj4681] - yyc4681 = true - } else if yyj4681 == 0 && yyv4681 == nil { - yyv4681 = []NodeSelectorTerm{} - yyc4681 = true - } - } - yyh4681.End() - if yyc4681 { - *v = yyv4681 - } -} - -func (x codecSelfer1234) encSliceNodeSelectorRequirement(v []NodeSelectorRequirement, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4685 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4686 := &yyv4685 - yy4686.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNodeSelectorRequirement(v *[]NodeSelectorRequirement, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4687 := *v - yyh4687, yyl4687 := z.DecSliceHelperStart() - var yyc4687 bool - if yyl4687 == 0 { - if yyv4687 == nil { - yyv4687 = []NodeSelectorRequirement{} - yyc4687 = true - } else if len(yyv4687) != 0 { - yyv4687 = yyv4687[:0] - yyc4687 = true - } - } else if yyl4687 > 0 { - var yyrr4687, yyrl4687 int - var yyrt4687 bool - if yyl4687 > cap(yyv4687) { - - yyrg4687 := len(yyv4687) > 0 - yyv24687 := yyv4687 - yyrl4687, yyrt4687 = z.DecInferLen(yyl4687, z.DecBasicHandle().MaxInitLen, 56) - if yyrt4687 { - if yyrl4687 <= cap(yyv4687) { - yyv4687 = yyv4687[:yyrl4687] - } else { - yyv4687 = make([]NodeSelectorRequirement, yyrl4687) - } - } else { - yyv4687 = make([]NodeSelectorRequirement, yyrl4687) - } - yyc4687 = true - yyrr4687 = len(yyv4687) - if yyrg4687 { - copy(yyv4687, yyv24687) - } - } else if yyl4687 != len(yyv4687) { - yyv4687 = yyv4687[:yyl4687] - yyc4687 = true - } - yyj4687 := 0 - for ; yyj4687 < yyrr4687; yyj4687++ { - yyh4687.ElemContainerState(yyj4687) - if r.TryDecodeAsNil() { - yyv4687[yyj4687] = NodeSelectorRequirement{} - } else { - yyv4688 := &yyv4687[yyj4687] - yyv4688.CodecDecodeSelf(d) - } - - } - if yyrt4687 { - for ; yyj4687 < yyl4687; yyj4687++ { - yyv4687 = append(yyv4687, NodeSelectorRequirement{}) - yyh4687.ElemContainerState(yyj4687) - if r.TryDecodeAsNil() { - yyv4687[yyj4687] = NodeSelectorRequirement{} - } else { - yyv4689 := &yyv4687[yyj4687] - yyv4689.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4687 := 0 - for ; !r.CheckBreak(); yyj4687++ { - - if yyj4687 >= len(yyv4687) { - yyv4687 = append(yyv4687, NodeSelectorRequirement{}) // var yyz4687 NodeSelectorRequirement - yyc4687 = true - } - yyh4687.ElemContainerState(yyj4687) - if yyj4687 < len(yyv4687) { - if r.TryDecodeAsNil() { - yyv4687[yyj4687] = NodeSelectorRequirement{} - } else { - yyv4690 := &yyv4687[yyj4687] - yyv4690.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4687 < len(yyv4687) { - yyv4687 = yyv4687[:yyj4687] - yyc4687 = true - } else if yyj4687 == 0 && yyv4687 == nil { - yyv4687 = []NodeSelectorRequirement{} - yyc4687 = true - } - } - yyh4687.End() - if yyc4687 { - *v = yyv4687 - } -} - -func (x codecSelfer1234) encSlicePodAffinityTerm(v []PodAffinityTerm, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4691 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4692 := &yyv4691 - yy4692.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodAffinityTerm(v *[]PodAffinityTerm, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4693 := *v - yyh4693, yyl4693 := z.DecSliceHelperStart() - var yyc4693 bool - if yyl4693 == 0 { - if yyv4693 == nil { - yyv4693 = []PodAffinityTerm{} - yyc4693 = true - } else if len(yyv4693) != 0 { - yyv4693 = yyv4693[:0] - yyc4693 = true - } - } else if yyl4693 > 0 { - var yyrr4693, yyrl4693 int - var yyrt4693 bool - if yyl4693 > cap(yyv4693) { - - yyrg4693 := len(yyv4693) > 0 - yyv24693 := yyv4693 - yyrl4693, yyrt4693 = z.DecInferLen(yyl4693, z.DecBasicHandle().MaxInitLen, 48) - if yyrt4693 { - if yyrl4693 <= cap(yyv4693) { - yyv4693 = yyv4693[:yyrl4693] - } else { - yyv4693 = make([]PodAffinityTerm, yyrl4693) - } - } else { - yyv4693 = make([]PodAffinityTerm, yyrl4693) - } - yyc4693 = true - yyrr4693 = len(yyv4693) - if yyrg4693 { - copy(yyv4693, yyv24693) - } - } else if yyl4693 != len(yyv4693) { - yyv4693 = yyv4693[:yyl4693] - yyc4693 = true - } - yyj4693 := 0 - for ; yyj4693 < yyrr4693; yyj4693++ { - yyh4693.ElemContainerState(yyj4693) - if r.TryDecodeAsNil() { - yyv4693[yyj4693] = PodAffinityTerm{} - } else { - yyv4694 := &yyv4693[yyj4693] - yyv4694.CodecDecodeSelf(d) - } - - } - if yyrt4693 { - for ; yyj4693 < yyl4693; yyj4693++ { - yyv4693 = append(yyv4693, PodAffinityTerm{}) - yyh4693.ElemContainerState(yyj4693) - if r.TryDecodeAsNil() { - yyv4693[yyj4693] = PodAffinityTerm{} - } else { - yyv4695 := &yyv4693[yyj4693] - yyv4695.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4693 := 0 - for ; !r.CheckBreak(); yyj4693++ { - - if yyj4693 >= len(yyv4693) { - yyv4693 = append(yyv4693, PodAffinityTerm{}) // var yyz4693 PodAffinityTerm - yyc4693 = true - } - yyh4693.ElemContainerState(yyj4693) - if yyj4693 < len(yyv4693) { - if r.TryDecodeAsNil() { - yyv4693[yyj4693] = PodAffinityTerm{} - } else { - yyv4696 := &yyv4693[yyj4693] - yyv4696.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4693 < len(yyv4693) { - yyv4693 = yyv4693[:yyj4693] - yyc4693 = true - } else if yyj4693 == 0 && yyv4693 == nil { - yyv4693 = []PodAffinityTerm{} - yyc4693 = true - } - } - yyh4693.End() - if yyc4693 { - *v = yyv4693 - } -} - -func (x codecSelfer1234) encSliceWeightedPodAffinityTerm(v []WeightedPodAffinityTerm, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4697 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4698 := &yyv4697 - yy4698.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceWeightedPodAffinityTerm(v *[]WeightedPodAffinityTerm, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4699 := *v - yyh4699, yyl4699 := z.DecSliceHelperStart() - var yyc4699 bool - if yyl4699 == 0 { - if yyv4699 == nil { - yyv4699 = []WeightedPodAffinityTerm{} - yyc4699 = true - } else if len(yyv4699) != 0 { - yyv4699 = yyv4699[:0] - yyc4699 = true - } - } else if yyl4699 > 0 { - var yyrr4699, yyrl4699 int - var yyrt4699 bool - if yyl4699 > cap(yyv4699) { - - yyrg4699 := len(yyv4699) > 0 - yyv24699 := yyv4699 - yyrl4699, yyrt4699 = z.DecInferLen(yyl4699, z.DecBasicHandle().MaxInitLen, 56) - if yyrt4699 { - if yyrl4699 <= cap(yyv4699) { - yyv4699 = yyv4699[:yyrl4699] - } else { - yyv4699 = make([]WeightedPodAffinityTerm, yyrl4699) - } - } else { - yyv4699 = make([]WeightedPodAffinityTerm, yyrl4699) - } - yyc4699 = true - yyrr4699 = len(yyv4699) - if yyrg4699 { - copy(yyv4699, yyv24699) - } - } else if yyl4699 != len(yyv4699) { - yyv4699 = yyv4699[:yyl4699] - yyc4699 = true - } - yyj4699 := 0 - for ; yyj4699 < yyrr4699; yyj4699++ { - yyh4699.ElemContainerState(yyj4699) - if r.TryDecodeAsNil() { - yyv4699[yyj4699] = WeightedPodAffinityTerm{} - } else { - yyv4700 := &yyv4699[yyj4699] - yyv4700.CodecDecodeSelf(d) - } - - } - if yyrt4699 { - for ; yyj4699 < yyl4699; yyj4699++ { - yyv4699 = append(yyv4699, WeightedPodAffinityTerm{}) - yyh4699.ElemContainerState(yyj4699) - if r.TryDecodeAsNil() { - yyv4699[yyj4699] = WeightedPodAffinityTerm{} - } else { - yyv4701 := &yyv4699[yyj4699] - yyv4701.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4699 := 0 - for ; !r.CheckBreak(); yyj4699++ { - - if yyj4699 >= len(yyv4699) { - yyv4699 = append(yyv4699, WeightedPodAffinityTerm{}) // var yyz4699 WeightedPodAffinityTerm - yyc4699 = true - } - yyh4699.ElemContainerState(yyj4699) - if yyj4699 < len(yyv4699) { - if r.TryDecodeAsNil() { - yyv4699[yyj4699] = WeightedPodAffinityTerm{} - } else { - yyv4702 := &yyv4699[yyj4699] - yyv4702.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4699 < len(yyv4699) { - yyv4699 = yyv4699[:yyj4699] - yyc4699 = true - } else if yyj4699 == 0 && yyv4699 == nil { - yyv4699 = []WeightedPodAffinityTerm{} - yyc4699 = true - } - } - yyh4699.End() - if yyc4699 { - *v = yyv4699 - } -} - -func (x codecSelfer1234) encSlicePreferredSchedulingTerm(v []PreferredSchedulingTerm, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4703 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4704 := &yyv4703 - yy4704.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePreferredSchedulingTerm(v *[]PreferredSchedulingTerm, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4705 := *v - yyh4705, yyl4705 := z.DecSliceHelperStart() - var yyc4705 bool - if yyl4705 == 0 { - if yyv4705 == nil { - yyv4705 = []PreferredSchedulingTerm{} - yyc4705 = true - } else if len(yyv4705) != 0 { - yyv4705 = yyv4705[:0] - yyc4705 = true - } - } else if yyl4705 > 0 { - var yyrr4705, yyrl4705 int - var yyrt4705 bool - if yyl4705 > cap(yyv4705) { - - yyrg4705 := len(yyv4705) > 0 - yyv24705 := yyv4705 - yyrl4705, yyrt4705 = z.DecInferLen(yyl4705, z.DecBasicHandle().MaxInitLen, 32) - if yyrt4705 { - if yyrl4705 <= cap(yyv4705) { - yyv4705 = yyv4705[:yyrl4705] - } else { - yyv4705 = make([]PreferredSchedulingTerm, yyrl4705) - } - } else { - yyv4705 = make([]PreferredSchedulingTerm, yyrl4705) - } - yyc4705 = true - yyrr4705 = len(yyv4705) - if yyrg4705 { - copy(yyv4705, yyv24705) - } - } else if yyl4705 != len(yyv4705) { - yyv4705 = yyv4705[:yyl4705] - yyc4705 = true - } - yyj4705 := 0 - for ; yyj4705 < yyrr4705; yyj4705++ { - yyh4705.ElemContainerState(yyj4705) - if r.TryDecodeAsNil() { - yyv4705[yyj4705] = PreferredSchedulingTerm{} - } else { - yyv4706 := &yyv4705[yyj4705] - yyv4706.CodecDecodeSelf(d) - } - - } - if yyrt4705 { - for ; yyj4705 < yyl4705; yyj4705++ { - yyv4705 = append(yyv4705, PreferredSchedulingTerm{}) - yyh4705.ElemContainerState(yyj4705) - if r.TryDecodeAsNil() { - yyv4705[yyj4705] = PreferredSchedulingTerm{} - } else { - yyv4707 := &yyv4705[yyj4705] - yyv4707.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4705 := 0 - for ; !r.CheckBreak(); yyj4705++ { - - if yyj4705 >= len(yyv4705) { - yyv4705 = append(yyv4705, PreferredSchedulingTerm{}) // var yyz4705 PreferredSchedulingTerm - yyc4705 = true - } - yyh4705.ElemContainerState(yyj4705) - if yyj4705 < len(yyv4705) { - if r.TryDecodeAsNil() { - yyv4705[yyj4705] = PreferredSchedulingTerm{} - } else { - yyv4708 := &yyv4705[yyj4705] - yyv4708.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4705 < len(yyv4705) { - yyv4705 = yyv4705[:yyj4705] - yyc4705 = true - } else if yyj4705 == 0 && yyv4705 == nil { - yyv4705 = []PreferredSchedulingTerm{} - yyc4705 = true - } - } - yyh4705.End() - if yyc4705 { - *v = yyv4705 - } -} - -func (x codecSelfer1234) encSliceVolume(v []Volume, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4709 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4710 := &yyv4709 - yy4710.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceVolume(v *[]Volume, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4711 := *v - yyh4711, yyl4711 := z.DecSliceHelperStart() - var yyc4711 bool - if yyl4711 == 0 { - if yyv4711 == nil { - yyv4711 = []Volume{} - yyc4711 = true - } else if len(yyv4711) != 0 { - yyv4711 = yyv4711[:0] - yyc4711 = true - } - } else if yyl4711 > 0 { - var yyrr4711, yyrl4711 int - var yyrt4711 bool - if yyl4711 > cap(yyv4711) { - - yyrg4711 := len(yyv4711) > 0 - yyv24711 := yyv4711 - yyrl4711, yyrt4711 = z.DecInferLen(yyl4711, z.DecBasicHandle().MaxInitLen, 200) - if yyrt4711 { - if yyrl4711 <= cap(yyv4711) { - yyv4711 = yyv4711[:yyrl4711] - } else { - yyv4711 = make([]Volume, yyrl4711) - } - } else { - yyv4711 = make([]Volume, yyrl4711) - } - yyc4711 = true - yyrr4711 = len(yyv4711) - if yyrg4711 { - copy(yyv4711, yyv24711) - } - } else if yyl4711 != len(yyv4711) { - yyv4711 = yyv4711[:yyl4711] - yyc4711 = true - } - yyj4711 := 0 - for ; yyj4711 < yyrr4711; yyj4711++ { - yyh4711.ElemContainerState(yyj4711) - if r.TryDecodeAsNil() { - yyv4711[yyj4711] = Volume{} - } else { - yyv4712 := &yyv4711[yyj4711] - yyv4712.CodecDecodeSelf(d) - } - - } - if yyrt4711 { - for ; yyj4711 < yyl4711; yyj4711++ { - yyv4711 = append(yyv4711, Volume{}) - yyh4711.ElemContainerState(yyj4711) - if r.TryDecodeAsNil() { - yyv4711[yyj4711] = Volume{} - } else { - yyv4713 := &yyv4711[yyj4711] - yyv4713.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4711 := 0 - for ; !r.CheckBreak(); yyj4711++ { - - if yyj4711 >= len(yyv4711) { - yyv4711 = append(yyv4711, Volume{}) // var yyz4711 Volume - yyc4711 = true - } - yyh4711.ElemContainerState(yyj4711) - if yyj4711 < len(yyv4711) { - if r.TryDecodeAsNil() { - yyv4711[yyj4711] = Volume{} - } else { - yyv4714 := &yyv4711[yyj4711] - yyv4714.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4711 < len(yyv4711) { - yyv4711 = yyv4711[:yyj4711] - yyc4711 = true - } else if yyj4711 == 0 && yyv4711 == nil { - yyv4711 = []Volume{} - yyc4711 = true - } - } - yyh4711.End() - if yyc4711 { - *v = yyv4711 - } -} - -func (x codecSelfer1234) encSliceContainer(v []Container, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4715 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4716 := &yyv4715 - yy4716.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceContainer(v *[]Container, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4717 := *v - yyh4717, yyl4717 := z.DecSliceHelperStart() - var yyc4717 bool - if yyl4717 == 0 { - if yyv4717 == nil { - yyv4717 = []Container{} - yyc4717 = true - } else if len(yyv4717) != 0 { - yyv4717 = yyv4717[:0] - yyc4717 = true - } - } else if yyl4717 > 0 { - var yyrr4717, yyrl4717 int - var yyrt4717 bool - if yyl4717 > cap(yyv4717) { - - yyrg4717 := len(yyv4717) > 0 - yyv24717 := yyv4717 - yyrl4717, yyrt4717 = z.DecInferLen(yyl4717, z.DecBasicHandle().MaxInitLen, 256) - if yyrt4717 { - if yyrl4717 <= cap(yyv4717) { - yyv4717 = yyv4717[:yyrl4717] - } else { - yyv4717 = make([]Container, yyrl4717) - } - } else { - yyv4717 = make([]Container, yyrl4717) - } - yyc4717 = true - yyrr4717 = len(yyv4717) - if yyrg4717 { - copy(yyv4717, yyv24717) - } - } else if yyl4717 != len(yyv4717) { - yyv4717 = yyv4717[:yyl4717] - yyc4717 = true - } - yyj4717 := 0 - for ; yyj4717 < yyrr4717; yyj4717++ { - yyh4717.ElemContainerState(yyj4717) - if r.TryDecodeAsNil() { - yyv4717[yyj4717] = Container{} - } else { - yyv4718 := &yyv4717[yyj4717] - yyv4718.CodecDecodeSelf(d) - } - - } - if yyrt4717 { - for ; yyj4717 < yyl4717; yyj4717++ { - yyv4717 = append(yyv4717, Container{}) - yyh4717.ElemContainerState(yyj4717) - if r.TryDecodeAsNil() { - yyv4717[yyj4717] = Container{} - } else { - yyv4719 := &yyv4717[yyj4717] - yyv4719.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4717 := 0 - for ; !r.CheckBreak(); yyj4717++ { - - if yyj4717 >= len(yyv4717) { - yyv4717 = append(yyv4717, Container{}) // var yyz4717 Container - yyc4717 = true - } - yyh4717.ElemContainerState(yyj4717) - if yyj4717 < len(yyv4717) { - if r.TryDecodeAsNil() { - yyv4717[yyj4717] = Container{} - } else { - yyv4720 := &yyv4717[yyj4717] - yyv4720.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4717 < len(yyv4717) { - yyv4717 = yyv4717[:yyj4717] - yyc4717 = true - } else if yyj4717 == 0 && yyv4717 == nil { - yyv4717 = []Container{} - yyc4717 = true - } - } - yyh4717.End() - if yyc4717 { - *v = yyv4717 - } -} - -func (x codecSelfer1234) encSliceLocalObjectReference(v []LocalObjectReference, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4721 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4722 := &yyv4721 - yy4722.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceLocalObjectReference(v *[]LocalObjectReference, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4723 := *v - yyh4723, yyl4723 := z.DecSliceHelperStart() - var yyc4723 bool - if yyl4723 == 0 { - if yyv4723 == nil { - yyv4723 = []LocalObjectReference{} - yyc4723 = true - } else if len(yyv4723) != 0 { - yyv4723 = yyv4723[:0] - yyc4723 = true - } - } else if yyl4723 > 0 { - var yyrr4723, yyrl4723 int - var yyrt4723 bool - if yyl4723 > cap(yyv4723) { - - yyrg4723 := len(yyv4723) > 0 - yyv24723 := yyv4723 - yyrl4723, yyrt4723 = z.DecInferLen(yyl4723, z.DecBasicHandle().MaxInitLen, 16) - if yyrt4723 { - if yyrl4723 <= cap(yyv4723) { - yyv4723 = yyv4723[:yyrl4723] - } else { - yyv4723 = make([]LocalObjectReference, yyrl4723) - } - } else { - yyv4723 = make([]LocalObjectReference, yyrl4723) - } - yyc4723 = true - yyrr4723 = len(yyv4723) - if yyrg4723 { - copy(yyv4723, yyv24723) - } - } else if yyl4723 != len(yyv4723) { - yyv4723 = yyv4723[:yyl4723] - yyc4723 = true - } - yyj4723 := 0 - for ; yyj4723 < yyrr4723; yyj4723++ { - yyh4723.ElemContainerState(yyj4723) - if r.TryDecodeAsNil() { - yyv4723[yyj4723] = LocalObjectReference{} - } else { - yyv4724 := &yyv4723[yyj4723] - yyv4724.CodecDecodeSelf(d) - } - - } - if yyrt4723 { - for ; yyj4723 < yyl4723; yyj4723++ { - yyv4723 = append(yyv4723, LocalObjectReference{}) - yyh4723.ElemContainerState(yyj4723) - if r.TryDecodeAsNil() { - yyv4723[yyj4723] = LocalObjectReference{} - } else { - yyv4725 := &yyv4723[yyj4723] - yyv4725.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4723 := 0 - for ; !r.CheckBreak(); yyj4723++ { - - if yyj4723 >= len(yyv4723) { - yyv4723 = append(yyv4723, LocalObjectReference{}) // var yyz4723 LocalObjectReference - yyc4723 = true - } - yyh4723.ElemContainerState(yyj4723) - if yyj4723 < len(yyv4723) { - if r.TryDecodeAsNil() { - yyv4723[yyj4723] = LocalObjectReference{} - } else { - yyv4726 := &yyv4723[yyj4723] - yyv4726.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4723 < len(yyv4723) { - yyv4723 = yyv4723[:yyj4723] - yyc4723 = true - } else if yyj4723 == 0 && yyv4723 == nil { - yyv4723 = []LocalObjectReference{} - yyc4723 = true - } - } - yyh4723.End() - if yyc4723 { - *v = yyv4723 - } -} - -func (x codecSelfer1234) encSlicePodCondition(v []PodCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4727 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4728 := &yyv4727 - yy4728.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodCondition(v *[]PodCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4729 := *v - yyh4729, yyl4729 := z.DecSliceHelperStart() - var yyc4729 bool - if yyl4729 == 0 { - if yyv4729 == nil { - yyv4729 = []PodCondition{} - yyc4729 = true - } else if len(yyv4729) != 0 { - yyv4729 = yyv4729[:0] - yyc4729 = true - } - } else if yyl4729 > 0 { - var yyrr4729, yyrl4729 int - var yyrt4729 bool - if yyl4729 > cap(yyv4729) { - - yyrg4729 := len(yyv4729) > 0 - yyv24729 := yyv4729 - yyrl4729, yyrt4729 = z.DecInferLen(yyl4729, z.DecBasicHandle().MaxInitLen, 112) - if yyrt4729 { - if yyrl4729 <= cap(yyv4729) { - yyv4729 = yyv4729[:yyrl4729] - } else { - yyv4729 = make([]PodCondition, yyrl4729) - } - } else { - yyv4729 = make([]PodCondition, yyrl4729) - } - yyc4729 = true - yyrr4729 = len(yyv4729) - if yyrg4729 { - copy(yyv4729, yyv24729) - } - } else if yyl4729 != len(yyv4729) { - yyv4729 = yyv4729[:yyl4729] - yyc4729 = true - } - yyj4729 := 0 - for ; yyj4729 < yyrr4729; yyj4729++ { - yyh4729.ElemContainerState(yyj4729) - if r.TryDecodeAsNil() { - yyv4729[yyj4729] = PodCondition{} - } else { - yyv4730 := &yyv4729[yyj4729] - yyv4730.CodecDecodeSelf(d) - } - - } - if yyrt4729 { - for ; yyj4729 < yyl4729; yyj4729++ { - yyv4729 = append(yyv4729, PodCondition{}) - yyh4729.ElemContainerState(yyj4729) - if r.TryDecodeAsNil() { - yyv4729[yyj4729] = PodCondition{} - } else { - yyv4731 := &yyv4729[yyj4729] - yyv4731.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4729 := 0 - for ; !r.CheckBreak(); yyj4729++ { - - if yyj4729 >= len(yyv4729) { - yyv4729 = append(yyv4729, PodCondition{}) // var yyz4729 PodCondition - yyc4729 = true - } - yyh4729.ElemContainerState(yyj4729) - if yyj4729 < len(yyv4729) { - if r.TryDecodeAsNil() { - yyv4729[yyj4729] = PodCondition{} - } else { - yyv4732 := &yyv4729[yyj4729] - yyv4732.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4729 < len(yyv4729) { - yyv4729 = yyv4729[:yyj4729] - yyc4729 = true - } else if yyj4729 == 0 && yyv4729 == nil { - yyv4729 = []PodCondition{} - yyc4729 = true - } - } - yyh4729.End() - if yyc4729 { - *v = yyv4729 - } -} - -func (x codecSelfer1234) encSliceContainerStatus(v []ContainerStatus, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4733 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4734 := &yyv4733 - yy4734.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceContainerStatus(v *[]ContainerStatus, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4735 := *v - yyh4735, yyl4735 := z.DecSliceHelperStart() - var yyc4735 bool - if yyl4735 == 0 { - if yyv4735 == nil { - yyv4735 = []ContainerStatus{} - yyc4735 = true - } else if len(yyv4735) != 0 { - yyv4735 = yyv4735[:0] - yyc4735 = true - } - } else if yyl4735 > 0 { - var yyrr4735, yyrl4735 int - var yyrt4735 bool - if yyl4735 > cap(yyv4735) { - - yyrg4735 := len(yyv4735) > 0 - yyv24735 := yyv4735 - yyrl4735, yyrt4735 = z.DecInferLen(yyl4735, z.DecBasicHandle().MaxInitLen, 120) - if yyrt4735 { - if yyrl4735 <= cap(yyv4735) { - yyv4735 = yyv4735[:yyrl4735] - } else { - yyv4735 = make([]ContainerStatus, yyrl4735) - } - } else { - yyv4735 = make([]ContainerStatus, yyrl4735) - } - yyc4735 = true - yyrr4735 = len(yyv4735) - if yyrg4735 { - copy(yyv4735, yyv24735) - } - } else if yyl4735 != len(yyv4735) { - yyv4735 = yyv4735[:yyl4735] - yyc4735 = true - } - yyj4735 := 0 - for ; yyj4735 < yyrr4735; yyj4735++ { - yyh4735.ElemContainerState(yyj4735) - if r.TryDecodeAsNil() { - yyv4735[yyj4735] = ContainerStatus{} - } else { - yyv4736 := &yyv4735[yyj4735] - yyv4736.CodecDecodeSelf(d) - } - - } - if yyrt4735 { - for ; yyj4735 < yyl4735; yyj4735++ { - yyv4735 = append(yyv4735, ContainerStatus{}) - yyh4735.ElemContainerState(yyj4735) - if r.TryDecodeAsNil() { - yyv4735[yyj4735] = ContainerStatus{} - } else { - yyv4737 := &yyv4735[yyj4735] - yyv4737.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4735 := 0 - for ; !r.CheckBreak(); yyj4735++ { - - if yyj4735 >= len(yyv4735) { - yyv4735 = append(yyv4735, ContainerStatus{}) // var yyz4735 ContainerStatus - yyc4735 = true - } - yyh4735.ElemContainerState(yyj4735) - if yyj4735 < len(yyv4735) { - if r.TryDecodeAsNil() { - yyv4735[yyj4735] = ContainerStatus{} - } else { - yyv4738 := &yyv4735[yyj4735] - yyv4738.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4735 < len(yyv4735) { - yyv4735 = yyv4735[:yyj4735] - yyc4735 = true - } else if yyj4735 == 0 && yyv4735 == nil { - yyv4735 = []ContainerStatus{} - yyc4735 = true - } - } - yyh4735.End() - if yyc4735 { - *v = yyv4735 - } -} - -func (x codecSelfer1234) encSlicePodTemplate(v []PodTemplate, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4739 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4740 := &yyv4739 - yy4740.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodTemplate(v *[]PodTemplate, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4741 := *v - yyh4741, yyl4741 := z.DecSliceHelperStart() - var yyc4741 bool - if yyl4741 == 0 { - if yyv4741 == nil { - yyv4741 = []PodTemplate{} - yyc4741 = true - } else if len(yyv4741) != 0 { - yyv4741 = yyv4741[:0] - yyc4741 = true - } - } else if yyl4741 > 0 { - var yyrr4741, yyrl4741 int - var yyrt4741 bool - if yyl4741 > cap(yyv4741) { - - yyrg4741 := len(yyv4741) > 0 - yyv24741 := yyv4741 - yyrl4741, yyrt4741 = z.DecInferLen(yyl4741, z.DecBasicHandle().MaxInitLen, 704) - if yyrt4741 { - if yyrl4741 <= cap(yyv4741) { - yyv4741 = yyv4741[:yyrl4741] - } else { - yyv4741 = make([]PodTemplate, yyrl4741) - } - } else { - yyv4741 = make([]PodTemplate, yyrl4741) - } - yyc4741 = true - yyrr4741 = len(yyv4741) - if yyrg4741 { - copy(yyv4741, yyv24741) - } - } else if yyl4741 != len(yyv4741) { - yyv4741 = yyv4741[:yyl4741] - yyc4741 = true - } - yyj4741 := 0 - for ; yyj4741 < yyrr4741; yyj4741++ { - yyh4741.ElemContainerState(yyj4741) - if r.TryDecodeAsNil() { - yyv4741[yyj4741] = PodTemplate{} - } else { - yyv4742 := &yyv4741[yyj4741] - yyv4742.CodecDecodeSelf(d) - } - - } - if yyrt4741 { - for ; yyj4741 < yyl4741; yyj4741++ { - yyv4741 = append(yyv4741, PodTemplate{}) - yyh4741.ElemContainerState(yyj4741) - if r.TryDecodeAsNil() { - yyv4741[yyj4741] = PodTemplate{} - } else { - yyv4743 := &yyv4741[yyj4741] - yyv4743.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4741 := 0 - for ; !r.CheckBreak(); yyj4741++ { - - if yyj4741 >= len(yyv4741) { - yyv4741 = append(yyv4741, PodTemplate{}) // var yyz4741 PodTemplate - yyc4741 = true - } - yyh4741.ElemContainerState(yyj4741) - if yyj4741 < len(yyv4741) { - if r.TryDecodeAsNil() { - yyv4741[yyj4741] = PodTemplate{} - } else { - yyv4744 := &yyv4741[yyj4741] - yyv4744.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4741 < len(yyv4741) { - yyv4741 = yyv4741[:yyj4741] - yyc4741 = true - } else if yyj4741 == 0 && yyv4741 == nil { - yyv4741 = []PodTemplate{} - yyc4741 = true - } - } - yyh4741.End() - if yyc4741 { - *v = yyv4741 - } -} - -func (x codecSelfer1234) encSliceReplicationControllerCondition(v []ReplicationControllerCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4745 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4746 := &yyv4745 - yy4746.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceReplicationControllerCondition(v *[]ReplicationControllerCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4747 := *v - yyh4747, yyl4747 := z.DecSliceHelperStart() - var yyc4747 bool - if yyl4747 == 0 { - if yyv4747 == nil { - yyv4747 = []ReplicationControllerCondition{} - yyc4747 = true - } else if len(yyv4747) != 0 { - yyv4747 = yyv4747[:0] - yyc4747 = true - } - } else if yyl4747 > 0 { - var yyrr4747, yyrl4747 int - var yyrt4747 bool - if yyl4747 > cap(yyv4747) { - - yyrg4747 := len(yyv4747) > 0 - yyv24747 := yyv4747 - yyrl4747, yyrt4747 = z.DecInferLen(yyl4747, z.DecBasicHandle().MaxInitLen, 88) - if yyrt4747 { - if yyrl4747 <= cap(yyv4747) { - yyv4747 = yyv4747[:yyrl4747] - } else { - yyv4747 = make([]ReplicationControllerCondition, yyrl4747) - } - } else { - yyv4747 = make([]ReplicationControllerCondition, yyrl4747) - } - yyc4747 = true - yyrr4747 = len(yyv4747) - if yyrg4747 { - copy(yyv4747, yyv24747) - } - } else if yyl4747 != len(yyv4747) { - yyv4747 = yyv4747[:yyl4747] - yyc4747 = true - } - yyj4747 := 0 - for ; yyj4747 < yyrr4747; yyj4747++ { - yyh4747.ElemContainerState(yyj4747) - if r.TryDecodeAsNil() { - yyv4747[yyj4747] = ReplicationControllerCondition{} - } else { - yyv4748 := &yyv4747[yyj4747] - yyv4748.CodecDecodeSelf(d) - } - - } - if yyrt4747 { - for ; yyj4747 < yyl4747; yyj4747++ { - yyv4747 = append(yyv4747, ReplicationControllerCondition{}) - yyh4747.ElemContainerState(yyj4747) - if r.TryDecodeAsNil() { - yyv4747[yyj4747] = ReplicationControllerCondition{} - } else { - yyv4749 := &yyv4747[yyj4747] - yyv4749.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4747 := 0 - for ; !r.CheckBreak(); yyj4747++ { - - if yyj4747 >= len(yyv4747) { - yyv4747 = append(yyv4747, ReplicationControllerCondition{}) // var yyz4747 ReplicationControllerCondition - yyc4747 = true - } - yyh4747.ElemContainerState(yyj4747) - if yyj4747 < len(yyv4747) { - if r.TryDecodeAsNil() { - yyv4747[yyj4747] = ReplicationControllerCondition{} - } else { - yyv4750 := &yyv4747[yyj4747] - yyv4750.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4747 < len(yyv4747) { - yyv4747 = yyv4747[:yyj4747] - yyc4747 = true - } else if yyj4747 == 0 && yyv4747 == nil { - yyv4747 = []ReplicationControllerCondition{} - yyc4747 = true - } - } - yyh4747.End() - if yyc4747 { - *v = yyv4747 - } -} - -func (x codecSelfer1234) encSliceReplicationController(v []ReplicationController, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4751 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4752 := &yyv4751 - yy4752.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceReplicationController(v *[]ReplicationController, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4753 := *v - yyh4753, yyl4753 := z.DecSliceHelperStart() - var yyc4753 bool - if yyl4753 == 0 { - if yyv4753 == nil { - yyv4753 = []ReplicationController{} - yyc4753 = true - } else if len(yyv4753) != 0 { - yyv4753 = yyv4753[:0] - yyc4753 = true - } - } else if yyl4753 > 0 { - var yyrr4753, yyrl4753 int - var yyrt4753 bool - if yyl4753 > cap(yyv4753) { - - yyrg4753 := len(yyv4753) > 0 - yyv24753 := yyv4753 - yyrl4753, yyrt4753 = z.DecInferLen(yyl4753, z.DecBasicHandle().MaxInitLen, 328) - if yyrt4753 { - if yyrl4753 <= cap(yyv4753) { - yyv4753 = yyv4753[:yyrl4753] - } else { - yyv4753 = make([]ReplicationController, yyrl4753) - } - } else { - yyv4753 = make([]ReplicationController, yyrl4753) - } - yyc4753 = true - yyrr4753 = len(yyv4753) - if yyrg4753 { - copy(yyv4753, yyv24753) - } - } else if yyl4753 != len(yyv4753) { - yyv4753 = yyv4753[:yyl4753] - yyc4753 = true - } - yyj4753 := 0 - for ; yyj4753 < yyrr4753; yyj4753++ { - yyh4753.ElemContainerState(yyj4753) - if r.TryDecodeAsNil() { - yyv4753[yyj4753] = ReplicationController{} - } else { - yyv4754 := &yyv4753[yyj4753] - yyv4754.CodecDecodeSelf(d) - } - - } - if yyrt4753 { - for ; yyj4753 < yyl4753; yyj4753++ { - yyv4753 = append(yyv4753, ReplicationController{}) - yyh4753.ElemContainerState(yyj4753) - if r.TryDecodeAsNil() { - yyv4753[yyj4753] = ReplicationController{} - } else { - yyv4755 := &yyv4753[yyj4753] - yyv4755.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4753 := 0 - for ; !r.CheckBreak(); yyj4753++ { - - if yyj4753 >= len(yyv4753) { - yyv4753 = append(yyv4753, ReplicationController{}) // var yyz4753 ReplicationController - yyc4753 = true - } - yyh4753.ElemContainerState(yyj4753) - if yyj4753 < len(yyv4753) { - if r.TryDecodeAsNil() { - yyv4753[yyj4753] = ReplicationController{} - } else { - yyv4756 := &yyv4753[yyj4753] - yyv4756.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4753 < len(yyv4753) { - yyv4753 = yyv4753[:yyj4753] - yyc4753 = true - } else if yyj4753 == 0 && yyv4753 == nil { - yyv4753 = []ReplicationController{} - yyc4753 = true - } - } - yyh4753.End() - if yyc4753 { - *v = yyv4753 - } -} - -func (x codecSelfer1234) encSliceService(v []Service, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4757 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4758 := &yyv4757 - yy4758.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceService(v *[]Service, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4759 := *v - yyh4759, yyl4759 := z.DecSliceHelperStart() - var yyc4759 bool - if yyl4759 == 0 { - if yyv4759 == nil { - yyv4759 = []Service{} - yyc4759 = true - } else if len(yyv4759) != 0 { - yyv4759 = yyv4759[:0] - yyc4759 = true - } - } else if yyl4759 > 0 { - var yyrr4759, yyrl4759 int - var yyrt4759 bool - if yyl4759 > cap(yyv4759) { - - yyrg4759 := len(yyv4759) > 0 - yyv24759 := yyv4759 - yyrl4759, yyrt4759 = z.DecInferLen(yyl4759, z.DecBasicHandle().MaxInitLen, 440) - if yyrt4759 { - if yyrl4759 <= cap(yyv4759) { - yyv4759 = yyv4759[:yyrl4759] - } else { - yyv4759 = make([]Service, yyrl4759) - } - } else { - yyv4759 = make([]Service, yyrl4759) - } - yyc4759 = true - yyrr4759 = len(yyv4759) - if yyrg4759 { - copy(yyv4759, yyv24759) - } - } else if yyl4759 != len(yyv4759) { - yyv4759 = yyv4759[:yyl4759] - yyc4759 = true - } - yyj4759 := 0 - for ; yyj4759 < yyrr4759; yyj4759++ { - yyh4759.ElemContainerState(yyj4759) - if r.TryDecodeAsNil() { - yyv4759[yyj4759] = Service{} - } else { - yyv4760 := &yyv4759[yyj4759] - yyv4760.CodecDecodeSelf(d) - } - - } - if yyrt4759 { - for ; yyj4759 < yyl4759; yyj4759++ { - yyv4759 = append(yyv4759, Service{}) - yyh4759.ElemContainerState(yyj4759) - if r.TryDecodeAsNil() { - yyv4759[yyj4759] = Service{} - } else { - yyv4761 := &yyv4759[yyj4759] - yyv4761.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4759 := 0 - for ; !r.CheckBreak(); yyj4759++ { - - if yyj4759 >= len(yyv4759) { - yyv4759 = append(yyv4759, Service{}) // var yyz4759 Service - yyc4759 = true - } - yyh4759.ElemContainerState(yyj4759) - if yyj4759 < len(yyv4759) { - if r.TryDecodeAsNil() { - yyv4759[yyj4759] = Service{} - } else { - yyv4762 := &yyv4759[yyj4759] - yyv4762.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4759 < len(yyv4759) { - yyv4759 = yyv4759[:yyj4759] - yyc4759 = true - } else if yyj4759 == 0 && yyv4759 == nil { - yyv4759 = []Service{} - yyc4759 = true - } - } - yyh4759.End() - if yyc4759 { - *v = yyv4759 - } -} - -func (x codecSelfer1234) encSliceLoadBalancerIngress(v []LoadBalancerIngress, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4763 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4764 := &yyv4763 - yy4764.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceLoadBalancerIngress(v *[]LoadBalancerIngress, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4765 := *v - yyh4765, yyl4765 := z.DecSliceHelperStart() - var yyc4765 bool - if yyl4765 == 0 { - if yyv4765 == nil { - yyv4765 = []LoadBalancerIngress{} - yyc4765 = true - } else if len(yyv4765) != 0 { - yyv4765 = yyv4765[:0] - yyc4765 = true - } - } else if yyl4765 > 0 { - var yyrr4765, yyrl4765 int - var yyrt4765 bool - if yyl4765 > cap(yyv4765) { - - yyrg4765 := len(yyv4765) > 0 - yyv24765 := yyv4765 - yyrl4765, yyrt4765 = z.DecInferLen(yyl4765, z.DecBasicHandle().MaxInitLen, 32) - if yyrt4765 { - if yyrl4765 <= cap(yyv4765) { - yyv4765 = yyv4765[:yyrl4765] - } else { - yyv4765 = make([]LoadBalancerIngress, yyrl4765) - } - } else { - yyv4765 = make([]LoadBalancerIngress, yyrl4765) - } - yyc4765 = true - yyrr4765 = len(yyv4765) - if yyrg4765 { - copy(yyv4765, yyv24765) - } - } else if yyl4765 != len(yyv4765) { - yyv4765 = yyv4765[:yyl4765] - yyc4765 = true - } - yyj4765 := 0 - for ; yyj4765 < yyrr4765; yyj4765++ { - yyh4765.ElemContainerState(yyj4765) - if r.TryDecodeAsNil() { - yyv4765[yyj4765] = LoadBalancerIngress{} - } else { - yyv4766 := &yyv4765[yyj4765] - yyv4766.CodecDecodeSelf(d) - } - - } - if yyrt4765 { - for ; yyj4765 < yyl4765; yyj4765++ { - yyv4765 = append(yyv4765, LoadBalancerIngress{}) - yyh4765.ElemContainerState(yyj4765) - if r.TryDecodeAsNil() { - yyv4765[yyj4765] = LoadBalancerIngress{} - } else { - yyv4767 := &yyv4765[yyj4765] - yyv4767.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4765 := 0 - for ; !r.CheckBreak(); yyj4765++ { - - if yyj4765 >= len(yyv4765) { - yyv4765 = append(yyv4765, LoadBalancerIngress{}) // var yyz4765 LoadBalancerIngress - yyc4765 = true - } - yyh4765.ElemContainerState(yyj4765) - if yyj4765 < len(yyv4765) { - if r.TryDecodeAsNil() { - yyv4765[yyj4765] = LoadBalancerIngress{} - } else { - yyv4768 := &yyv4765[yyj4765] - yyv4768.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4765 < len(yyv4765) { - yyv4765 = yyv4765[:yyj4765] - yyc4765 = true - } else if yyj4765 == 0 && yyv4765 == nil { - yyv4765 = []LoadBalancerIngress{} - yyc4765 = true - } - } - yyh4765.End() - if yyc4765 { - *v = yyv4765 - } -} - -func (x codecSelfer1234) encSliceServicePort(v []ServicePort, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4769 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4770 := &yyv4769 - yy4770.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceServicePort(v *[]ServicePort, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4771 := *v - yyh4771, yyl4771 := z.DecSliceHelperStart() - var yyc4771 bool - if yyl4771 == 0 { - if yyv4771 == nil { - yyv4771 = []ServicePort{} - yyc4771 = true - } else if len(yyv4771) != 0 { - yyv4771 = yyv4771[:0] - yyc4771 = true - } - } else if yyl4771 > 0 { - var yyrr4771, yyrl4771 int - var yyrt4771 bool - if yyl4771 > cap(yyv4771) { - - yyrg4771 := len(yyv4771) > 0 - yyv24771 := yyv4771 - yyrl4771, yyrt4771 = z.DecInferLen(yyl4771, z.DecBasicHandle().MaxInitLen, 80) - if yyrt4771 { - if yyrl4771 <= cap(yyv4771) { - yyv4771 = yyv4771[:yyrl4771] - } else { - yyv4771 = make([]ServicePort, yyrl4771) - } - } else { - yyv4771 = make([]ServicePort, yyrl4771) - } - yyc4771 = true - yyrr4771 = len(yyv4771) - if yyrg4771 { - copy(yyv4771, yyv24771) - } - } else if yyl4771 != len(yyv4771) { - yyv4771 = yyv4771[:yyl4771] - yyc4771 = true - } - yyj4771 := 0 - for ; yyj4771 < yyrr4771; yyj4771++ { - yyh4771.ElemContainerState(yyj4771) - if r.TryDecodeAsNil() { - yyv4771[yyj4771] = ServicePort{} - } else { - yyv4772 := &yyv4771[yyj4771] - yyv4772.CodecDecodeSelf(d) - } - - } - if yyrt4771 { - for ; yyj4771 < yyl4771; yyj4771++ { - yyv4771 = append(yyv4771, ServicePort{}) - yyh4771.ElemContainerState(yyj4771) - if r.TryDecodeAsNil() { - yyv4771[yyj4771] = ServicePort{} - } else { - yyv4773 := &yyv4771[yyj4771] - yyv4773.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4771 := 0 - for ; !r.CheckBreak(); yyj4771++ { - - if yyj4771 >= len(yyv4771) { - yyv4771 = append(yyv4771, ServicePort{}) // var yyz4771 ServicePort - yyc4771 = true - } - yyh4771.ElemContainerState(yyj4771) - if yyj4771 < len(yyv4771) { - if r.TryDecodeAsNil() { - yyv4771[yyj4771] = ServicePort{} - } else { - yyv4774 := &yyv4771[yyj4771] - yyv4774.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4771 < len(yyv4771) { - yyv4771 = yyv4771[:yyj4771] - yyc4771 = true - } else if yyj4771 == 0 && yyv4771 == nil { - yyv4771 = []ServicePort{} - yyc4771 = true - } - } - yyh4771.End() - if yyc4771 { - *v = yyv4771 - } -} - -func (x codecSelfer1234) encSliceObjectReference(v []ObjectReference, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4775 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4776 := &yyv4775 - yy4776.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceObjectReference(v *[]ObjectReference, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4777 := *v - yyh4777, yyl4777 := z.DecSliceHelperStart() - var yyc4777 bool - if yyl4777 == 0 { - if yyv4777 == nil { - yyv4777 = []ObjectReference{} - yyc4777 = true - } else if len(yyv4777) != 0 { - yyv4777 = yyv4777[:0] - yyc4777 = true - } - } else if yyl4777 > 0 { - var yyrr4777, yyrl4777 int - var yyrt4777 bool - if yyl4777 > cap(yyv4777) { - - yyrg4777 := len(yyv4777) > 0 - yyv24777 := yyv4777 - yyrl4777, yyrt4777 = z.DecInferLen(yyl4777, z.DecBasicHandle().MaxInitLen, 112) - if yyrt4777 { - if yyrl4777 <= cap(yyv4777) { - yyv4777 = yyv4777[:yyrl4777] - } else { - yyv4777 = make([]ObjectReference, yyrl4777) - } - } else { - yyv4777 = make([]ObjectReference, yyrl4777) - } - yyc4777 = true - yyrr4777 = len(yyv4777) - if yyrg4777 { - copy(yyv4777, yyv24777) - } - } else if yyl4777 != len(yyv4777) { - yyv4777 = yyv4777[:yyl4777] - yyc4777 = true - } - yyj4777 := 0 - for ; yyj4777 < yyrr4777; yyj4777++ { - yyh4777.ElemContainerState(yyj4777) - if r.TryDecodeAsNil() { - yyv4777[yyj4777] = ObjectReference{} - } else { - yyv4778 := &yyv4777[yyj4777] - yyv4778.CodecDecodeSelf(d) - } - - } - if yyrt4777 { - for ; yyj4777 < yyl4777; yyj4777++ { - yyv4777 = append(yyv4777, ObjectReference{}) - yyh4777.ElemContainerState(yyj4777) - if r.TryDecodeAsNil() { - yyv4777[yyj4777] = ObjectReference{} - } else { - yyv4779 := &yyv4777[yyj4777] - yyv4779.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4777 := 0 - for ; !r.CheckBreak(); yyj4777++ { - - if yyj4777 >= len(yyv4777) { - yyv4777 = append(yyv4777, ObjectReference{}) // var yyz4777 ObjectReference - yyc4777 = true - } - yyh4777.ElemContainerState(yyj4777) - if yyj4777 < len(yyv4777) { - if r.TryDecodeAsNil() { - yyv4777[yyj4777] = ObjectReference{} - } else { - yyv4780 := &yyv4777[yyj4777] - yyv4780.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4777 < len(yyv4777) { - yyv4777 = yyv4777[:yyj4777] - yyc4777 = true - } else if yyj4777 == 0 && yyv4777 == nil { - yyv4777 = []ObjectReference{} - yyc4777 = true - } - } - yyh4777.End() - if yyc4777 { - *v = yyv4777 - } -} - -func (x codecSelfer1234) encSliceServiceAccount(v []ServiceAccount, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4781 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4782 := &yyv4781 - yy4782.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceServiceAccount(v *[]ServiceAccount, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4783 := *v - yyh4783, yyl4783 := z.DecSliceHelperStart() - var yyc4783 bool - if yyl4783 == 0 { - if yyv4783 == nil { - yyv4783 = []ServiceAccount{} - yyc4783 = true - } else if len(yyv4783) != 0 { - yyv4783 = yyv4783[:0] - yyc4783 = true - } - } else if yyl4783 > 0 { - var yyrr4783, yyrl4783 int - var yyrt4783 bool - if yyl4783 > cap(yyv4783) { - - yyrg4783 := len(yyv4783) > 0 - yyv24783 := yyv4783 - yyrl4783, yyrt4783 = z.DecInferLen(yyl4783, z.DecBasicHandle().MaxInitLen, 304) - if yyrt4783 { - if yyrl4783 <= cap(yyv4783) { - yyv4783 = yyv4783[:yyrl4783] - } else { - yyv4783 = make([]ServiceAccount, yyrl4783) - } - } else { - yyv4783 = make([]ServiceAccount, yyrl4783) - } - yyc4783 = true - yyrr4783 = len(yyv4783) - if yyrg4783 { - copy(yyv4783, yyv24783) - } - } else if yyl4783 != len(yyv4783) { - yyv4783 = yyv4783[:yyl4783] - yyc4783 = true - } - yyj4783 := 0 - for ; yyj4783 < yyrr4783; yyj4783++ { - yyh4783.ElemContainerState(yyj4783) - if r.TryDecodeAsNil() { - yyv4783[yyj4783] = ServiceAccount{} - } else { - yyv4784 := &yyv4783[yyj4783] - yyv4784.CodecDecodeSelf(d) - } - - } - if yyrt4783 { - for ; yyj4783 < yyl4783; yyj4783++ { - yyv4783 = append(yyv4783, ServiceAccount{}) - yyh4783.ElemContainerState(yyj4783) - if r.TryDecodeAsNil() { - yyv4783[yyj4783] = ServiceAccount{} - } else { - yyv4785 := &yyv4783[yyj4783] - yyv4785.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4783 := 0 - for ; !r.CheckBreak(); yyj4783++ { - - if yyj4783 >= len(yyv4783) { - yyv4783 = append(yyv4783, ServiceAccount{}) // var yyz4783 ServiceAccount - yyc4783 = true - } - yyh4783.ElemContainerState(yyj4783) - if yyj4783 < len(yyv4783) { - if r.TryDecodeAsNil() { - yyv4783[yyj4783] = ServiceAccount{} - } else { - yyv4786 := &yyv4783[yyj4783] - yyv4786.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4783 < len(yyv4783) { - yyv4783 = yyv4783[:yyj4783] - yyc4783 = true - } else if yyj4783 == 0 && yyv4783 == nil { - yyv4783 = []ServiceAccount{} - yyc4783 = true - } - } - yyh4783.End() - if yyc4783 { - *v = yyv4783 - } -} - -func (x codecSelfer1234) encSliceEndpointSubset(v []EndpointSubset, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4787 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4788 := &yyv4787 - yy4788.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEndpointSubset(v *[]EndpointSubset, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4789 := *v - yyh4789, yyl4789 := z.DecSliceHelperStart() - var yyc4789 bool - if yyl4789 == 0 { - if yyv4789 == nil { - yyv4789 = []EndpointSubset{} - yyc4789 = true - } else if len(yyv4789) != 0 { - yyv4789 = yyv4789[:0] - yyc4789 = true - } - } else if yyl4789 > 0 { - var yyrr4789, yyrl4789 int - var yyrt4789 bool - if yyl4789 > cap(yyv4789) { - - yyrg4789 := len(yyv4789) > 0 - yyv24789 := yyv4789 - yyrl4789, yyrt4789 = z.DecInferLen(yyl4789, z.DecBasicHandle().MaxInitLen, 72) - if yyrt4789 { - if yyrl4789 <= cap(yyv4789) { - yyv4789 = yyv4789[:yyrl4789] - } else { - yyv4789 = make([]EndpointSubset, yyrl4789) - } - } else { - yyv4789 = make([]EndpointSubset, yyrl4789) - } - yyc4789 = true - yyrr4789 = len(yyv4789) - if yyrg4789 { - copy(yyv4789, yyv24789) - } - } else if yyl4789 != len(yyv4789) { - yyv4789 = yyv4789[:yyl4789] - yyc4789 = true - } - yyj4789 := 0 - for ; yyj4789 < yyrr4789; yyj4789++ { - yyh4789.ElemContainerState(yyj4789) - if r.TryDecodeAsNil() { - yyv4789[yyj4789] = EndpointSubset{} - } else { - yyv4790 := &yyv4789[yyj4789] - yyv4790.CodecDecodeSelf(d) - } - - } - if yyrt4789 { - for ; yyj4789 < yyl4789; yyj4789++ { - yyv4789 = append(yyv4789, EndpointSubset{}) - yyh4789.ElemContainerState(yyj4789) - if r.TryDecodeAsNil() { - yyv4789[yyj4789] = EndpointSubset{} - } else { - yyv4791 := &yyv4789[yyj4789] - yyv4791.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4789 := 0 - for ; !r.CheckBreak(); yyj4789++ { - - if yyj4789 >= len(yyv4789) { - yyv4789 = append(yyv4789, EndpointSubset{}) // var yyz4789 EndpointSubset - yyc4789 = true - } - yyh4789.ElemContainerState(yyj4789) - if yyj4789 < len(yyv4789) { - if r.TryDecodeAsNil() { - yyv4789[yyj4789] = EndpointSubset{} - } else { - yyv4792 := &yyv4789[yyj4789] - yyv4792.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4789 < len(yyv4789) { - yyv4789 = yyv4789[:yyj4789] - yyc4789 = true - } else if yyj4789 == 0 && yyv4789 == nil { - yyv4789 = []EndpointSubset{} - yyc4789 = true - } - } - yyh4789.End() - if yyc4789 { - *v = yyv4789 - } -} - -func (x codecSelfer1234) encSliceEndpointAddress(v []EndpointAddress, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4793 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4794 := &yyv4793 - yy4794.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEndpointAddress(v *[]EndpointAddress, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4795 := *v - yyh4795, yyl4795 := z.DecSliceHelperStart() - var yyc4795 bool - if yyl4795 == 0 { - if yyv4795 == nil { - yyv4795 = []EndpointAddress{} - yyc4795 = true - } else if len(yyv4795) != 0 { - yyv4795 = yyv4795[:0] - yyc4795 = true - } - } else if yyl4795 > 0 { - var yyrr4795, yyrl4795 int - var yyrt4795 bool - if yyl4795 > cap(yyv4795) { - - yyrg4795 := len(yyv4795) > 0 - yyv24795 := yyv4795 - yyrl4795, yyrt4795 = z.DecInferLen(yyl4795, z.DecBasicHandle().MaxInitLen, 48) - if yyrt4795 { - if yyrl4795 <= cap(yyv4795) { - yyv4795 = yyv4795[:yyrl4795] - } else { - yyv4795 = make([]EndpointAddress, yyrl4795) - } - } else { - yyv4795 = make([]EndpointAddress, yyrl4795) - } - yyc4795 = true - yyrr4795 = len(yyv4795) - if yyrg4795 { - copy(yyv4795, yyv24795) - } - } else if yyl4795 != len(yyv4795) { - yyv4795 = yyv4795[:yyl4795] - yyc4795 = true - } - yyj4795 := 0 - for ; yyj4795 < yyrr4795; yyj4795++ { - yyh4795.ElemContainerState(yyj4795) - if r.TryDecodeAsNil() { - yyv4795[yyj4795] = EndpointAddress{} - } else { - yyv4796 := &yyv4795[yyj4795] - yyv4796.CodecDecodeSelf(d) - } - - } - if yyrt4795 { - for ; yyj4795 < yyl4795; yyj4795++ { - yyv4795 = append(yyv4795, EndpointAddress{}) - yyh4795.ElemContainerState(yyj4795) - if r.TryDecodeAsNil() { - yyv4795[yyj4795] = EndpointAddress{} - } else { - yyv4797 := &yyv4795[yyj4795] - yyv4797.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4795 := 0 - for ; !r.CheckBreak(); yyj4795++ { - - if yyj4795 >= len(yyv4795) { - yyv4795 = append(yyv4795, EndpointAddress{}) // var yyz4795 EndpointAddress - yyc4795 = true - } - yyh4795.ElemContainerState(yyj4795) - if yyj4795 < len(yyv4795) { - if r.TryDecodeAsNil() { - yyv4795[yyj4795] = EndpointAddress{} - } else { - yyv4798 := &yyv4795[yyj4795] - yyv4798.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4795 < len(yyv4795) { - yyv4795 = yyv4795[:yyj4795] - yyc4795 = true - } else if yyj4795 == 0 && yyv4795 == nil { - yyv4795 = []EndpointAddress{} - yyc4795 = true - } - } - yyh4795.End() - if yyc4795 { - *v = yyv4795 - } -} - -func (x codecSelfer1234) encSliceEndpointPort(v []EndpointPort, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4799 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4800 := &yyv4799 - yy4800.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEndpointPort(v *[]EndpointPort, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4801 := *v - yyh4801, yyl4801 := z.DecSliceHelperStart() - var yyc4801 bool - if yyl4801 == 0 { - if yyv4801 == nil { - yyv4801 = []EndpointPort{} - yyc4801 = true - } else if len(yyv4801) != 0 { - yyv4801 = yyv4801[:0] - yyc4801 = true - } - } else if yyl4801 > 0 { - var yyrr4801, yyrl4801 int - var yyrt4801 bool - if yyl4801 > cap(yyv4801) { - - yyrg4801 := len(yyv4801) > 0 - yyv24801 := yyv4801 - yyrl4801, yyrt4801 = z.DecInferLen(yyl4801, z.DecBasicHandle().MaxInitLen, 40) - if yyrt4801 { - if yyrl4801 <= cap(yyv4801) { - yyv4801 = yyv4801[:yyrl4801] - } else { - yyv4801 = make([]EndpointPort, yyrl4801) - } - } else { - yyv4801 = make([]EndpointPort, yyrl4801) - } - yyc4801 = true - yyrr4801 = len(yyv4801) - if yyrg4801 { - copy(yyv4801, yyv24801) - } - } else if yyl4801 != len(yyv4801) { - yyv4801 = yyv4801[:yyl4801] - yyc4801 = true - } - yyj4801 := 0 - for ; yyj4801 < yyrr4801; yyj4801++ { - yyh4801.ElemContainerState(yyj4801) - if r.TryDecodeAsNil() { - yyv4801[yyj4801] = EndpointPort{} - } else { - yyv4802 := &yyv4801[yyj4801] - yyv4802.CodecDecodeSelf(d) - } - - } - if yyrt4801 { - for ; yyj4801 < yyl4801; yyj4801++ { - yyv4801 = append(yyv4801, EndpointPort{}) - yyh4801.ElemContainerState(yyj4801) - if r.TryDecodeAsNil() { - yyv4801[yyj4801] = EndpointPort{} - } else { - yyv4803 := &yyv4801[yyj4801] - yyv4803.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4801 := 0 - for ; !r.CheckBreak(); yyj4801++ { - - if yyj4801 >= len(yyv4801) { - yyv4801 = append(yyv4801, EndpointPort{}) // var yyz4801 EndpointPort - yyc4801 = true - } - yyh4801.ElemContainerState(yyj4801) - if yyj4801 < len(yyv4801) { - if r.TryDecodeAsNil() { - yyv4801[yyj4801] = EndpointPort{} - } else { - yyv4804 := &yyv4801[yyj4801] - yyv4804.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4801 < len(yyv4801) { - yyv4801 = yyv4801[:yyj4801] - yyc4801 = true - } else if yyj4801 == 0 && yyv4801 == nil { - yyv4801 = []EndpointPort{} - yyc4801 = true - } - } - yyh4801.End() - if yyc4801 { - *v = yyv4801 - } -} - -func (x codecSelfer1234) encSliceEndpoints(v []Endpoints, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4805 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4806 := &yyv4805 - yy4806.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEndpoints(v *[]Endpoints, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4807 := *v - yyh4807, yyl4807 := z.DecSliceHelperStart() - var yyc4807 bool - if yyl4807 == 0 { - if yyv4807 == nil { - yyv4807 = []Endpoints{} - yyc4807 = true - } else if len(yyv4807) != 0 { - yyv4807 = yyv4807[:0] - yyc4807 = true - } - } else if yyl4807 > 0 { - var yyrr4807, yyrl4807 int - var yyrt4807 bool - if yyl4807 > cap(yyv4807) { - - yyrg4807 := len(yyv4807) > 0 - yyv24807 := yyv4807 - yyrl4807, yyrt4807 = z.DecInferLen(yyl4807, z.DecBasicHandle().MaxInitLen, 280) - if yyrt4807 { - if yyrl4807 <= cap(yyv4807) { - yyv4807 = yyv4807[:yyrl4807] - } else { - yyv4807 = make([]Endpoints, yyrl4807) - } - } else { - yyv4807 = make([]Endpoints, yyrl4807) - } - yyc4807 = true - yyrr4807 = len(yyv4807) - if yyrg4807 { - copy(yyv4807, yyv24807) - } - } else if yyl4807 != len(yyv4807) { - yyv4807 = yyv4807[:yyl4807] - yyc4807 = true - } - yyj4807 := 0 - for ; yyj4807 < yyrr4807; yyj4807++ { - yyh4807.ElemContainerState(yyj4807) - if r.TryDecodeAsNil() { - yyv4807[yyj4807] = Endpoints{} - } else { - yyv4808 := &yyv4807[yyj4807] - yyv4808.CodecDecodeSelf(d) - } - - } - if yyrt4807 { - for ; yyj4807 < yyl4807; yyj4807++ { - yyv4807 = append(yyv4807, Endpoints{}) - yyh4807.ElemContainerState(yyj4807) - if r.TryDecodeAsNil() { - yyv4807[yyj4807] = Endpoints{} - } else { - yyv4809 := &yyv4807[yyj4807] - yyv4809.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4807 := 0 - for ; !r.CheckBreak(); yyj4807++ { - - if yyj4807 >= len(yyv4807) { - yyv4807 = append(yyv4807, Endpoints{}) // var yyz4807 Endpoints - yyc4807 = true - } - yyh4807.ElemContainerState(yyj4807) - if yyj4807 < len(yyv4807) { - if r.TryDecodeAsNil() { - yyv4807[yyj4807] = Endpoints{} - } else { - yyv4810 := &yyv4807[yyj4807] - yyv4810.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4807 < len(yyv4807) { - yyv4807 = yyv4807[:yyj4807] - yyc4807 = true - } else if yyj4807 == 0 && yyv4807 == nil { - yyv4807 = []Endpoints{} - yyc4807 = true - } - } - yyh4807.End() - if yyc4807 { - *v = yyv4807 - } -} - -func (x codecSelfer1234) encSliceNodeCondition(v []NodeCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4811 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4812 := &yyv4811 - yy4812.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNodeCondition(v *[]NodeCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4813 := *v - yyh4813, yyl4813 := z.DecSliceHelperStart() - var yyc4813 bool - if yyl4813 == 0 { - if yyv4813 == nil { - yyv4813 = []NodeCondition{} - yyc4813 = true - } else if len(yyv4813) != 0 { - yyv4813 = yyv4813[:0] - yyc4813 = true - } - } else if yyl4813 > 0 { - var yyrr4813, yyrl4813 int - var yyrt4813 bool - if yyl4813 > cap(yyv4813) { - - yyrg4813 := len(yyv4813) > 0 - yyv24813 := yyv4813 - yyrl4813, yyrt4813 = z.DecInferLen(yyl4813, z.DecBasicHandle().MaxInitLen, 112) - if yyrt4813 { - if yyrl4813 <= cap(yyv4813) { - yyv4813 = yyv4813[:yyrl4813] - } else { - yyv4813 = make([]NodeCondition, yyrl4813) - } - } else { - yyv4813 = make([]NodeCondition, yyrl4813) - } - yyc4813 = true - yyrr4813 = len(yyv4813) - if yyrg4813 { - copy(yyv4813, yyv24813) - } - } else if yyl4813 != len(yyv4813) { - yyv4813 = yyv4813[:yyl4813] - yyc4813 = true - } - yyj4813 := 0 - for ; yyj4813 < yyrr4813; yyj4813++ { - yyh4813.ElemContainerState(yyj4813) - if r.TryDecodeAsNil() { - yyv4813[yyj4813] = NodeCondition{} - } else { - yyv4814 := &yyv4813[yyj4813] - yyv4814.CodecDecodeSelf(d) - } - - } - if yyrt4813 { - for ; yyj4813 < yyl4813; yyj4813++ { - yyv4813 = append(yyv4813, NodeCondition{}) - yyh4813.ElemContainerState(yyj4813) - if r.TryDecodeAsNil() { - yyv4813[yyj4813] = NodeCondition{} - } else { - yyv4815 := &yyv4813[yyj4813] - yyv4815.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4813 := 0 - for ; !r.CheckBreak(); yyj4813++ { - - if yyj4813 >= len(yyv4813) { - yyv4813 = append(yyv4813, NodeCondition{}) // var yyz4813 NodeCondition - yyc4813 = true - } - yyh4813.ElemContainerState(yyj4813) - if yyj4813 < len(yyv4813) { - if r.TryDecodeAsNil() { - yyv4813[yyj4813] = NodeCondition{} - } else { - yyv4816 := &yyv4813[yyj4813] - yyv4816.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4813 < len(yyv4813) { - yyv4813 = yyv4813[:yyj4813] - yyc4813 = true - } else if yyj4813 == 0 && yyv4813 == nil { - yyv4813 = []NodeCondition{} - yyc4813 = true - } - } - yyh4813.End() - if yyc4813 { - *v = yyv4813 - } -} - -func (x codecSelfer1234) encSliceNodeAddress(v []NodeAddress, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4817 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4818 := &yyv4817 - yy4818.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNodeAddress(v *[]NodeAddress, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4819 := *v - yyh4819, yyl4819 := z.DecSliceHelperStart() - var yyc4819 bool - if yyl4819 == 0 { - if yyv4819 == nil { - yyv4819 = []NodeAddress{} - yyc4819 = true - } else if len(yyv4819) != 0 { - yyv4819 = yyv4819[:0] - yyc4819 = true - } - } else if yyl4819 > 0 { - var yyrr4819, yyrl4819 int - var yyrt4819 bool - if yyl4819 > cap(yyv4819) { - - yyrg4819 := len(yyv4819) > 0 - yyv24819 := yyv4819 - yyrl4819, yyrt4819 = z.DecInferLen(yyl4819, z.DecBasicHandle().MaxInitLen, 32) - if yyrt4819 { - if yyrl4819 <= cap(yyv4819) { - yyv4819 = yyv4819[:yyrl4819] - } else { - yyv4819 = make([]NodeAddress, yyrl4819) - } - } else { - yyv4819 = make([]NodeAddress, yyrl4819) - } - yyc4819 = true - yyrr4819 = len(yyv4819) - if yyrg4819 { - copy(yyv4819, yyv24819) - } - } else if yyl4819 != len(yyv4819) { - yyv4819 = yyv4819[:yyl4819] - yyc4819 = true - } - yyj4819 := 0 - for ; yyj4819 < yyrr4819; yyj4819++ { - yyh4819.ElemContainerState(yyj4819) - if r.TryDecodeAsNil() { - yyv4819[yyj4819] = NodeAddress{} - } else { - yyv4820 := &yyv4819[yyj4819] - yyv4820.CodecDecodeSelf(d) - } - - } - if yyrt4819 { - for ; yyj4819 < yyl4819; yyj4819++ { - yyv4819 = append(yyv4819, NodeAddress{}) - yyh4819.ElemContainerState(yyj4819) - if r.TryDecodeAsNil() { - yyv4819[yyj4819] = NodeAddress{} - } else { - yyv4821 := &yyv4819[yyj4819] - yyv4821.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4819 := 0 - for ; !r.CheckBreak(); yyj4819++ { - - if yyj4819 >= len(yyv4819) { - yyv4819 = append(yyv4819, NodeAddress{}) // var yyz4819 NodeAddress - yyc4819 = true - } - yyh4819.ElemContainerState(yyj4819) - if yyj4819 < len(yyv4819) { - if r.TryDecodeAsNil() { - yyv4819[yyj4819] = NodeAddress{} - } else { - yyv4822 := &yyv4819[yyj4819] - yyv4822.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4819 < len(yyv4819) { - yyv4819 = yyv4819[:yyj4819] - yyc4819 = true - } else if yyj4819 == 0 && yyv4819 == nil { - yyv4819 = []NodeAddress{} - yyc4819 = true - } - } - yyh4819.End() - if yyc4819 { - *v = yyv4819 - } -} - -func (x codecSelfer1234) encSliceContainerImage(v []ContainerImage, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4823 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4824 := &yyv4823 - yy4824.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceContainerImage(v *[]ContainerImage, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4825 := *v - yyh4825, yyl4825 := z.DecSliceHelperStart() - var yyc4825 bool - if yyl4825 == 0 { - if yyv4825 == nil { - yyv4825 = []ContainerImage{} - yyc4825 = true - } else if len(yyv4825) != 0 { - yyv4825 = yyv4825[:0] - yyc4825 = true - } - } else if yyl4825 > 0 { - var yyrr4825, yyrl4825 int - var yyrt4825 bool - if yyl4825 > cap(yyv4825) { - - yyrg4825 := len(yyv4825) > 0 - yyv24825 := yyv4825 - yyrl4825, yyrt4825 = z.DecInferLen(yyl4825, z.DecBasicHandle().MaxInitLen, 32) - if yyrt4825 { - if yyrl4825 <= cap(yyv4825) { - yyv4825 = yyv4825[:yyrl4825] - } else { - yyv4825 = make([]ContainerImage, yyrl4825) - } - } else { - yyv4825 = make([]ContainerImage, yyrl4825) - } - yyc4825 = true - yyrr4825 = len(yyv4825) - if yyrg4825 { - copy(yyv4825, yyv24825) - } - } else if yyl4825 != len(yyv4825) { - yyv4825 = yyv4825[:yyl4825] - yyc4825 = true - } - yyj4825 := 0 - for ; yyj4825 < yyrr4825; yyj4825++ { - yyh4825.ElemContainerState(yyj4825) - if r.TryDecodeAsNil() { - yyv4825[yyj4825] = ContainerImage{} - } else { - yyv4826 := &yyv4825[yyj4825] - yyv4826.CodecDecodeSelf(d) - } - - } - if yyrt4825 { - for ; yyj4825 < yyl4825; yyj4825++ { - yyv4825 = append(yyv4825, ContainerImage{}) - yyh4825.ElemContainerState(yyj4825) - if r.TryDecodeAsNil() { - yyv4825[yyj4825] = ContainerImage{} - } else { - yyv4827 := &yyv4825[yyj4825] - yyv4827.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4825 := 0 - for ; !r.CheckBreak(); yyj4825++ { - - if yyj4825 >= len(yyv4825) { - yyv4825 = append(yyv4825, ContainerImage{}) // var yyz4825 ContainerImage - yyc4825 = true - } - yyh4825.ElemContainerState(yyj4825) - if yyj4825 < len(yyv4825) { - if r.TryDecodeAsNil() { - yyv4825[yyj4825] = ContainerImage{} - } else { - yyv4828 := &yyv4825[yyj4825] - yyv4828.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4825 < len(yyv4825) { - yyv4825 = yyv4825[:yyj4825] - yyc4825 = true - } else if yyj4825 == 0 && yyv4825 == nil { - yyv4825 = []ContainerImage{} - yyc4825 = true - } - } - yyh4825.End() - if yyc4825 { - *v = yyv4825 - } -} - -func (x codecSelfer1234) encSliceUniqueVolumeName(v []UniqueVolumeName, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4829 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv4829.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceUniqueVolumeName(v *[]UniqueVolumeName, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4830 := *v - yyh4830, yyl4830 := z.DecSliceHelperStart() - var yyc4830 bool - if yyl4830 == 0 { - if yyv4830 == nil { - yyv4830 = []UniqueVolumeName{} - yyc4830 = true - } else if len(yyv4830) != 0 { - yyv4830 = yyv4830[:0] - yyc4830 = true - } - } else if yyl4830 > 0 { - var yyrr4830, yyrl4830 int - var yyrt4830 bool - if yyl4830 > cap(yyv4830) { - - yyrl4830, yyrt4830 = z.DecInferLen(yyl4830, z.DecBasicHandle().MaxInitLen, 16) - if yyrt4830 { - if yyrl4830 <= cap(yyv4830) { - yyv4830 = yyv4830[:yyrl4830] - } else { - yyv4830 = make([]UniqueVolumeName, yyrl4830) - } - } else { - yyv4830 = make([]UniqueVolumeName, yyrl4830) - } - yyc4830 = true - yyrr4830 = len(yyv4830) - } else if yyl4830 != len(yyv4830) { - yyv4830 = yyv4830[:yyl4830] - yyc4830 = true - } - yyj4830 := 0 - for ; yyj4830 < yyrr4830; yyj4830++ { - yyh4830.ElemContainerState(yyj4830) - if r.TryDecodeAsNil() { - yyv4830[yyj4830] = "" - } else { - yyv4830[yyj4830] = UniqueVolumeName(r.DecodeString()) - } - - } - if yyrt4830 { - for ; yyj4830 < yyl4830; yyj4830++ { - yyv4830 = append(yyv4830, "") - yyh4830.ElemContainerState(yyj4830) - if r.TryDecodeAsNil() { - yyv4830[yyj4830] = "" - } else { - yyv4830[yyj4830] = UniqueVolumeName(r.DecodeString()) - } - - } - } - - } else { - yyj4830 := 0 - for ; !r.CheckBreak(); yyj4830++ { - - if yyj4830 >= len(yyv4830) { - yyv4830 = append(yyv4830, "") // var yyz4830 UniqueVolumeName - yyc4830 = true - } - yyh4830.ElemContainerState(yyj4830) - if yyj4830 < len(yyv4830) { - if r.TryDecodeAsNil() { - yyv4830[yyj4830] = "" - } else { - yyv4830[yyj4830] = UniqueVolumeName(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj4830 < len(yyv4830) { - yyv4830 = yyv4830[:yyj4830] - yyc4830 = true - } else if yyj4830 == 0 && yyv4830 == nil { - yyv4830 = []UniqueVolumeName{} - yyc4830 = true - } - } - yyh4830.End() - if yyc4830 { - *v = yyv4830 - } -} - -func (x codecSelfer1234) encSliceAttachedVolume(v []AttachedVolume, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4834 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4835 := &yyv4834 - yy4835.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceAttachedVolume(v *[]AttachedVolume, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4836 := *v - yyh4836, yyl4836 := z.DecSliceHelperStart() - var yyc4836 bool - if yyl4836 == 0 { - if yyv4836 == nil { - yyv4836 = []AttachedVolume{} - yyc4836 = true - } else if len(yyv4836) != 0 { - yyv4836 = yyv4836[:0] - yyc4836 = true - } - } else if yyl4836 > 0 { - var yyrr4836, yyrl4836 int - var yyrt4836 bool - if yyl4836 > cap(yyv4836) { - - yyrg4836 := len(yyv4836) > 0 - yyv24836 := yyv4836 - yyrl4836, yyrt4836 = z.DecInferLen(yyl4836, z.DecBasicHandle().MaxInitLen, 32) - if yyrt4836 { - if yyrl4836 <= cap(yyv4836) { - yyv4836 = yyv4836[:yyrl4836] - } else { - yyv4836 = make([]AttachedVolume, yyrl4836) - } - } else { - yyv4836 = make([]AttachedVolume, yyrl4836) - } - yyc4836 = true - yyrr4836 = len(yyv4836) - if yyrg4836 { - copy(yyv4836, yyv24836) - } - } else if yyl4836 != len(yyv4836) { - yyv4836 = yyv4836[:yyl4836] - yyc4836 = true - } - yyj4836 := 0 - for ; yyj4836 < yyrr4836; yyj4836++ { - yyh4836.ElemContainerState(yyj4836) - if r.TryDecodeAsNil() { - yyv4836[yyj4836] = AttachedVolume{} - } else { - yyv4837 := &yyv4836[yyj4836] - yyv4837.CodecDecodeSelf(d) - } - - } - if yyrt4836 { - for ; yyj4836 < yyl4836; yyj4836++ { - yyv4836 = append(yyv4836, AttachedVolume{}) - yyh4836.ElemContainerState(yyj4836) - if r.TryDecodeAsNil() { - yyv4836[yyj4836] = AttachedVolume{} - } else { - yyv4838 := &yyv4836[yyj4836] - yyv4838.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4836 := 0 - for ; !r.CheckBreak(); yyj4836++ { - - if yyj4836 >= len(yyv4836) { - yyv4836 = append(yyv4836, AttachedVolume{}) // var yyz4836 AttachedVolume - yyc4836 = true - } - yyh4836.ElemContainerState(yyj4836) - if yyj4836 < len(yyv4836) { - if r.TryDecodeAsNil() { - yyv4836[yyj4836] = AttachedVolume{} - } else { - yyv4839 := &yyv4836[yyj4836] - yyv4839.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4836 < len(yyv4836) { - yyv4836 = yyv4836[:yyj4836] - yyc4836 = true - } else if yyj4836 == 0 && yyv4836 == nil { - yyv4836 = []AttachedVolume{} - yyc4836 = true - } - } - yyh4836.End() - if yyc4836 { - *v = yyv4836 - } -} - -func (x codecSelfer1234) encSlicePreferAvoidPodsEntry(v []PreferAvoidPodsEntry, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4840 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4841 := &yyv4840 - yy4841.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePreferAvoidPodsEntry(v *[]PreferAvoidPodsEntry, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4842 := *v - yyh4842, yyl4842 := z.DecSliceHelperStart() - var yyc4842 bool - if yyl4842 == 0 { - if yyv4842 == nil { - yyv4842 = []PreferAvoidPodsEntry{} - yyc4842 = true - } else if len(yyv4842) != 0 { - yyv4842 = yyv4842[:0] - yyc4842 = true - } - } else if yyl4842 > 0 { - var yyrr4842, yyrl4842 int - var yyrt4842 bool - if yyl4842 > cap(yyv4842) { - - yyrg4842 := len(yyv4842) > 0 - yyv24842 := yyv4842 - yyrl4842, yyrt4842 = z.DecInferLen(yyl4842, z.DecBasicHandle().MaxInitLen, 64) - if yyrt4842 { - if yyrl4842 <= cap(yyv4842) { - yyv4842 = yyv4842[:yyrl4842] - } else { - yyv4842 = make([]PreferAvoidPodsEntry, yyrl4842) - } - } else { - yyv4842 = make([]PreferAvoidPodsEntry, yyrl4842) - } - yyc4842 = true - yyrr4842 = len(yyv4842) - if yyrg4842 { - copy(yyv4842, yyv24842) - } - } else if yyl4842 != len(yyv4842) { - yyv4842 = yyv4842[:yyl4842] - yyc4842 = true - } - yyj4842 := 0 - for ; yyj4842 < yyrr4842; yyj4842++ { - yyh4842.ElemContainerState(yyj4842) - if r.TryDecodeAsNil() { - yyv4842[yyj4842] = PreferAvoidPodsEntry{} - } else { - yyv4843 := &yyv4842[yyj4842] - yyv4843.CodecDecodeSelf(d) - } - - } - if yyrt4842 { - for ; yyj4842 < yyl4842; yyj4842++ { - yyv4842 = append(yyv4842, PreferAvoidPodsEntry{}) - yyh4842.ElemContainerState(yyj4842) - if r.TryDecodeAsNil() { - yyv4842[yyj4842] = PreferAvoidPodsEntry{} - } else { - yyv4844 := &yyv4842[yyj4842] - yyv4844.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4842 := 0 - for ; !r.CheckBreak(); yyj4842++ { - - if yyj4842 >= len(yyv4842) { - yyv4842 = append(yyv4842, PreferAvoidPodsEntry{}) // var yyz4842 PreferAvoidPodsEntry - yyc4842 = true - } - yyh4842.ElemContainerState(yyj4842) - if yyj4842 < len(yyv4842) { - if r.TryDecodeAsNil() { - yyv4842[yyj4842] = PreferAvoidPodsEntry{} - } else { - yyv4845 := &yyv4842[yyj4842] - yyv4845.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4842 < len(yyv4842) { - yyv4842 = yyv4842[:yyj4842] - yyc4842 = true - } else if yyj4842 == 0 && yyv4842 == nil { - yyv4842 = []PreferAvoidPodsEntry{} - yyc4842 = true - } - } - yyh4842.End() - if yyc4842 { - *v = yyv4842 - } -} - -func (x codecSelfer1234) encResourceList(v ResourceList, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk4846, yyv4846 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yyk4846.CodecEncodeSelf(e) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4847 := &yyv4846 - yym4848 := z.EncBinary() - _ = yym4848 - if false { - } else if z.HasExtensions() && z.EncExt(yy4847) { - } else if !yym4848 && z.IsJSONHandle() { - z.EncJSONMarshal(yy4847) - } else { - z.EncFallback(yy4847) - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decResourceList(v *ResourceList, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4849 := *v - yyl4849 := r.ReadMapStart() - yybh4849 := z.DecBasicHandle() - if yyv4849 == nil { - yyrl4849, _ := z.DecInferLen(yyl4849, yybh4849.MaxInitLen, 72) - yyv4849 = make(map[ResourceName]pkg3_resource.Quantity, yyrl4849) - *v = yyv4849 - } - var yymk4849 ResourceName - var yymv4849 pkg3_resource.Quantity - var yymg4849 bool - if yybh4849.MapValueReset { - yymg4849 = true - } - if yyl4849 > 0 { - for yyj4849 := 0; yyj4849 < yyl4849; yyj4849++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk4849 = "" - } else { - yymk4849 = ResourceName(r.DecodeString()) - } - - if yymg4849 { - yymv4849 = yyv4849[yymk4849] - } else { - yymv4849 = pkg3_resource.Quantity{} - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv4849 = pkg3_resource.Quantity{} - } else { - yyv4851 := &yymv4849 - yym4852 := z.DecBinary() - _ = yym4852 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4851) { - } else if !yym4852 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4851) - } else { - z.DecFallback(yyv4851, false) - } - } - - if yyv4849 != nil { - yyv4849[yymk4849] = yymv4849 - } - } - } else if yyl4849 < 0 { - for yyj4849 := 0; !r.CheckBreak(); yyj4849++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk4849 = "" - } else { - yymk4849 = ResourceName(r.DecodeString()) - } - - if yymg4849 { - yymv4849 = yyv4849[yymk4849] - } else { - yymv4849 = pkg3_resource.Quantity{} - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv4849 = pkg3_resource.Quantity{} - } else { - yyv4854 := &yymv4849 - yym4855 := z.DecBinary() - _ = yym4855 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4854) { - } else if !yym4855 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4854) - } else { - z.DecFallback(yyv4854, false) - } - } - - if yyv4849 != nil { - yyv4849[yymk4849] = yymv4849 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encSliceNode(v []Node, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4856 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4857 := &yyv4856 - yy4857.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNode(v *[]Node, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4858 := *v - yyh4858, yyl4858 := z.DecSliceHelperStart() - var yyc4858 bool - if yyl4858 == 0 { - if yyv4858 == nil { - yyv4858 = []Node{} - yyc4858 = true - } else if len(yyv4858) != 0 { - yyv4858 = yyv4858[:0] - yyc4858 = true - } - } else if yyl4858 > 0 { - var yyrr4858, yyrl4858 int - var yyrt4858 bool - if yyl4858 > cap(yyv4858) { - - yyrg4858 := len(yyv4858) > 0 - yyv24858 := yyv4858 - yyrl4858, yyrt4858 = z.DecInferLen(yyl4858, z.DecBasicHandle().MaxInitLen, 632) - if yyrt4858 { - if yyrl4858 <= cap(yyv4858) { - yyv4858 = yyv4858[:yyrl4858] - } else { - yyv4858 = make([]Node, yyrl4858) - } - } else { - yyv4858 = make([]Node, yyrl4858) - } - yyc4858 = true - yyrr4858 = len(yyv4858) - if yyrg4858 { - copy(yyv4858, yyv24858) - } - } else if yyl4858 != len(yyv4858) { - yyv4858 = yyv4858[:yyl4858] - yyc4858 = true - } - yyj4858 := 0 - for ; yyj4858 < yyrr4858; yyj4858++ { - yyh4858.ElemContainerState(yyj4858) - if r.TryDecodeAsNil() { - yyv4858[yyj4858] = Node{} - } else { - yyv4859 := &yyv4858[yyj4858] - yyv4859.CodecDecodeSelf(d) - } - - } - if yyrt4858 { - for ; yyj4858 < yyl4858; yyj4858++ { - yyv4858 = append(yyv4858, Node{}) - yyh4858.ElemContainerState(yyj4858) - if r.TryDecodeAsNil() { - yyv4858[yyj4858] = Node{} - } else { - yyv4860 := &yyv4858[yyj4858] - yyv4860.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4858 := 0 - for ; !r.CheckBreak(); yyj4858++ { - - if yyj4858 >= len(yyv4858) { - yyv4858 = append(yyv4858, Node{}) // var yyz4858 Node - yyc4858 = true - } - yyh4858.ElemContainerState(yyj4858) - if yyj4858 < len(yyv4858) { - if r.TryDecodeAsNil() { - yyv4858[yyj4858] = Node{} - } else { - yyv4861 := &yyv4858[yyj4858] - yyv4861.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4858 < len(yyv4858) { - yyv4858 = yyv4858[:yyj4858] - yyc4858 = true - } else if yyj4858 == 0 && yyv4858 == nil { - yyv4858 = []Node{} - yyc4858 = true - } - } - yyh4858.End() - if yyc4858 { - *v = yyv4858 - } -} - -func (x codecSelfer1234) encSliceFinalizerName(v []FinalizerName, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4862 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv4862.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceFinalizerName(v *[]FinalizerName, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4863 := *v - yyh4863, yyl4863 := z.DecSliceHelperStart() - var yyc4863 bool - if yyl4863 == 0 { - if yyv4863 == nil { - yyv4863 = []FinalizerName{} - yyc4863 = true - } else if len(yyv4863) != 0 { - yyv4863 = yyv4863[:0] - yyc4863 = true - } - } else if yyl4863 > 0 { - var yyrr4863, yyrl4863 int - var yyrt4863 bool - if yyl4863 > cap(yyv4863) { - - yyrl4863, yyrt4863 = z.DecInferLen(yyl4863, z.DecBasicHandle().MaxInitLen, 16) - if yyrt4863 { - if yyrl4863 <= cap(yyv4863) { - yyv4863 = yyv4863[:yyrl4863] - } else { - yyv4863 = make([]FinalizerName, yyrl4863) - } - } else { - yyv4863 = make([]FinalizerName, yyrl4863) - } - yyc4863 = true - yyrr4863 = len(yyv4863) - } else if yyl4863 != len(yyv4863) { - yyv4863 = yyv4863[:yyl4863] - yyc4863 = true - } - yyj4863 := 0 - for ; yyj4863 < yyrr4863; yyj4863++ { - yyh4863.ElemContainerState(yyj4863) - if r.TryDecodeAsNil() { - yyv4863[yyj4863] = "" - } else { - yyv4863[yyj4863] = FinalizerName(r.DecodeString()) - } - - } - if yyrt4863 { - for ; yyj4863 < yyl4863; yyj4863++ { - yyv4863 = append(yyv4863, "") - yyh4863.ElemContainerState(yyj4863) - if r.TryDecodeAsNil() { - yyv4863[yyj4863] = "" - } else { - yyv4863[yyj4863] = FinalizerName(r.DecodeString()) - } - - } - } - - } else { - yyj4863 := 0 - for ; !r.CheckBreak(); yyj4863++ { - - if yyj4863 >= len(yyv4863) { - yyv4863 = append(yyv4863, "") // var yyz4863 FinalizerName - yyc4863 = true - } - yyh4863.ElemContainerState(yyj4863) - if yyj4863 < len(yyv4863) { - if r.TryDecodeAsNil() { - yyv4863[yyj4863] = "" - } else { - yyv4863[yyj4863] = FinalizerName(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj4863 < len(yyv4863) { - yyv4863 = yyv4863[:yyj4863] - yyc4863 = true - } else if yyj4863 == 0 && yyv4863 == nil { - yyv4863 = []FinalizerName{} - yyc4863 = true - } - } - yyh4863.End() - if yyc4863 { - *v = yyv4863 - } -} - -func (x codecSelfer1234) encSliceNamespace(v []Namespace, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4867 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4868 := &yyv4867 - yy4868.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNamespace(v *[]Namespace, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4869 := *v - yyh4869, yyl4869 := z.DecSliceHelperStart() - var yyc4869 bool - if yyl4869 == 0 { - if yyv4869 == nil { - yyv4869 = []Namespace{} - yyc4869 = true - } else if len(yyv4869) != 0 { - yyv4869 = yyv4869[:0] - yyc4869 = true - } - } else if yyl4869 > 0 { - var yyrr4869, yyrl4869 int - var yyrt4869 bool - if yyl4869 > cap(yyv4869) { - - yyrg4869 := len(yyv4869) > 0 - yyv24869 := yyv4869 - yyrl4869, yyrt4869 = z.DecInferLen(yyl4869, z.DecBasicHandle().MaxInitLen, 296) - if yyrt4869 { - if yyrl4869 <= cap(yyv4869) { - yyv4869 = yyv4869[:yyrl4869] - } else { - yyv4869 = make([]Namespace, yyrl4869) - } - } else { - yyv4869 = make([]Namespace, yyrl4869) - } - yyc4869 = true - yyrr4869 = len(yyv4869) - if yyrg4869 { - copy(yyv4869, yyv24869) - } - } else if yyl4869 != len(yyv4869) { - yyv4869 = yyv4869[:yyl4869] - yyc4869 = true - } - yyj4869 := 0 - for ; yyj4869 < yyrr4869; yyj4869++ { - yyh4869.ElemContainerState(yyj4869) - if r.TryDecodeAsNil() { - yyv4869[yyj4869] = Namespace{} - } else { - yyv4870 := &yyv4869[yyj4869] - yyv4870.CodecDecodeSelf(d) - } - - } - if yyrt4869 { - for ; yyj4869 < yyl4869; yyj4869++ { - yyv4869 = append(yyv4869, Namespace{}) - yyh4869.ElemContainerState(yyj4869) - if r.TryDecodeAsNil() { - yyv4869[yyj4869] = Namespace{} - } else { - yyv4871 := &yyv4869[yyj4869] - yyv4871.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4869 := 0 - for ; !r.CheckBreak(); yyj4869++ { - - if yyj4869 >= len(yyv4869) { - yyv4869 = append(yyv4869, Namespace{}) // var yyz4869 Namespace - yyc4869 = true - } - yyh4869.ElemContainerState(yyj4869) - if yyj4869 < len(yyv4869) { - if r.TryDecodeAsNil() { - yyv4869[yyj4869] = Namespace{} - } else { - yyv4872 := &yyv4869[yyj4869] - yyv4872.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4869 < len(yyv4869) { - yyv4869 = yyv4869[:yyj4869] - yyc4869 = true - } else if yyj4869 == 0 && yyv4869 == nil { - yyv4869 = []Namespace{} - yyc4869 = true - } - } - yyh4869.End() - if yyc4869 { - *v = yyv4869 - } -} - -func (x codecSelfer1234) encSliceEvent(v []Event, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4873 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4874 := &yyv4873 - yy4874.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEvent(v *[]Event, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4875 := *v - yyh4875, yyl4875 := z.DecSliceHelperStart() - var yyc4875 bool - if yyl4875 == 0 { - if yyv4875 == nil { - yyv4875 = []Event{} - yyc4875 = true - } else if len(yyv4875) != 0 { - yyv4875 = yyv4875[:0] - yyc4875 = true - } - } else if yyl4875 > 0 { - var yyrr4875, yyrl4875 int - var yyrt4875 bool - if yyl4875 > cap(yyv4875) { - - yyrg4875 := len(yyv4875) > 0 - yyv24875 := yyv4875 - yyrl4875, yyrt4875 = z.DecInferLen(yyl4875, z.DecBasicHandle().MaxInitLen, 504) - if yyrt4875 { - if yyrl4875 <= cap(yyv4875) { - yyv4875 = yyv4875[:yyrl4875] - } else { - yyv4875 = make([]Event, yyrl4875) - } - } else { - yyv4875 = make([]Event, yyrl4875) - } - yyc4875 = true - yyrr4875 = len(yyv4875) - if yyrg4875 { - copy(yyv4875, yyv24875) - } - } else if yyl4875 != len(yyv4875) { - yyv4875 = yyv4875[:yyl4875] - yyc4875 = true - } - yyj4875 := 0 - for ; yyj4875 < yyrr4875; yyj4875++ { - yyh4875.ElemContainerState(yyj4875) - if r.TryDecodeAsNil() { - yyv4875[yyj4875] = Event{} - } else { - yyv4876 := &yyv4875[yyj4875] - yyv4876.CodecDecodeSelf(d) - } - - } - if yyrt4875 { - for ; yyj4875 < yyl4875; yyj4875++ { - yyv4875 = append(yyv4875, Event{}) - yyh4875.ElemContainerState(yyj4875) - if r.TryDecodeAsNil() { - yyv4875[yyj4875] = Event{} - } else { - yyv4877 := &yyv4875[yyj4875] - yyv4877.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4875 := 0 - for ; !r.CheckBreak(); yyj4875++ { - - if yyj4875 >= len(yyv4875) { - yyv4875 = append(yyv4875, Event{}) // var yyz4875 Event - yyc4875 = true - } - yyh4875.ElemContainerState(yyj4875) - if yyj4875 < len(yyv4875) { - if r.TryDecodeAsNil() { - yyv4875[yyj4875] = Event{} - } else { - yyv4878 := &yyv4875[yyj4875] - yyv4878.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4875 < len(yyv4875) { - yyv4875 = yyv4875[:yyj4875] - yyc4875 = true - } else if yyj4875 == 0 && yyv4875 == nil { - yyv4875 = []Event{} - yyc4875 = true - } - } - yyh4875.End() - if yyc4875 { - *v = yyv4875 - } -} - -func (x codecSelfer1234) encSliceruntime_Object(v []pkg7_runtime.Object, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4879 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyv4879 == nil { - r.EncodeNil() - } else { - yym4880 := z.EncBinary() - _ = yym4880 - if false { - } else if z.HasExtensions() && z.EncExt(yyv4879) { - } else { - z.EncFallback(yyv4879) - } - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceruntime_Object(v *[]pkg7_runtime.Object, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4881 := *v - yyh4881, yyl4881 := z.DecSliceHelperStart() - var yyc4881 bool - if yyl4881 == 0 { - if yyv4881 == nil { - yyv4881 = []pkg7_runtime.Object{} - yyc4881 = true - } else if len(yyv4881) != 0 { - yyv4881 = yyv4881[:0] - yyc4881 = true - } - } else if yyl4881 > 0 { - var yyrr4881, yyrl4881 int - var yyrt4881 bool - if yyl4881 > cap(yyv4881) { - - yyrg4881 := len(yyv4881) > 0 - yyv24881 := yyv4881 - yyrl4881, yyrt4881 = z.DecInferLen(yyl4881, z.DecBasicHandle().MaxInitLen, 16) - if yyrt4881 { - if yyrl4881 <= cap(yyv4881) { - yyv4881 = yyv4881[:yyrl4881] - } else { - yyv4881 = make([]pkg7_runtime.Object, yyrl4881) - } - } else { - yyv4881 = make([]pkg7_runtime.Object, yyrl4881) - } - yyc4881 = true - yyrr4881 = len(yyv4881) - if yyrg4881 { - copy(yyv4881, yyv24881) - } - } else if yyl4881 != len(yyv4881) { - yyv4881 = yyv4881[:yyl4881] - yyc4881 = true - } - yyj4881 := 0 - for ; yyj4881 < yyrr4881; yyj4881++ { - yyh4881.ElemContainerState(yyj4881) - if r.TryDecodeAsNil() { - yyv4881[yyj4881] = nil - } else { - yyv4882 := &yyv4881[yyj4881] - yym4883 := z.DecBinary() - _ = yym4883 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4882) { - } else { - z.DecFallback(yyv4882, true) - } - } - - } - if yyrt4881 { - for ; yyj4881 < yyl4881; yyj4881++ { - yyv4881 = append(yyv4881, nil) - yyh4881.ElemContainerState(yyj4881) - if r.TryDecodeAsNil() { - yyv4881[yyj4881] = nil - } else { - yyv4884 := &yyv4881[yyj4881] - yym4885 := z.DecBinary() - _ = yym4885 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4884) { - } else { - z.DecFallback(yyv4884, true) - } - } - - } - } - - } else { - yyj4881 := 0 - for ; !r.CheckBreak(); yyj4881++ { - - if yyj4881 >= len(yyv4881) { - yyv4881 = append(yyv4881, nil) // var yyz4881 pkg7_runtime.Object - yyc4881 = true - } - yyh4881.ElemContainerState(yyj4881) - if yyj4881 < len(yyv4881) { - if r.TryDecodeAsNil() { - yyv4881[yyj4881] = nil - } else { - yyv4886 := &yyv4881[yyj4881] - yym4887 := z.DecBinary() - _ = yym4887 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4886) { - } else { - z.DecFallback(yyv4886, true) - } - } - - } else { - z.DecSwallow() - } - - } - if yyj4881 < len(yyv4881) { - yyv4881 = yyv4881[:yyj4881] - yyc4881 = true - } else if yyj4881 == 0 && yyv4881 == nil { - yyv4881 = []pkg7_runtime.Object{} - yyc4881 = true - } - } - yyh4881.End() - if yyc4881 { - *v = yyv4881 - } -} - -func (x codecSelfer1234) encSliceLimitRangeItem(v []LimitRangeItem, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4888 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4889 := &yyv4888 - yy4889.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceLimitRangeItem(v *[]LimitRangeItem, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4890 := *v - yyh4890, yyl4890 := z.DecSliceHelperStart() - var yyc4890 bool - if yyl4890 == 0 { - if yyv4890 == nil { - yyv4890 = []LimitRangeItem{} - yyc4890 = true - } else if len(yyv4890) != 0 { - yyv4890 = yyv4890[:0] - yyc4890 = true - } - } else if yyl4890 > 0 { - var yyrr4890, yyrl4890 int - var yyrt4890 bool - if yyl4890 > cap(yyv4890) { - - yyrg4890 := len(yyv4890) > 0 - yyv24890 := yyv4890 - yyrl4890, yyrt4890 = z.DecInferLen(yyl4890, z.DecBasicHandle().MaxInitLen, 56) - if yyrt4890 { - if yyrl4890 <= cap(yyv4890) { - yyv4890 = yyv4890[:yyrl4890] - } else { - yyv4890 = make([]LimitRangeItem, yyrl4890) - } - } else { - yyv4890 = make([]LimitRangeItem, yyrl4890) - } - yyc4890 = true - yyrr4890 = len(yyv4890) - if yyrg4890 { - copy(yyv4890, yyv24890) - } - } else if yyl4890 != len(yyv4890) { - yyv4890 = yyv4890[:yyl4890] - yyc4890 = true - } - yyj4890 := 0 - for ; yyj4890 < yyrr4890; yyj4890++ { - yyh4890.ElemContainerState(yyj4890) - if r.TryDecodeAsNil() { - yyv4890[yyj4890] = LimitRangeItem{} - } else { - yyv4891 := &yyv4890[yyj4890] - yyv4891.CodecDecodeSelf(d) - } - - } - if yyrt4890 { - for ; yyj4890 < yyl4890; yyj4890++ { - yyv4890 = append(yyv4890, LimitRangeItem{}) - yyh4890.ElemContainerState(yyj4890) - if r.TryDecodeAsNil() { - yyv4890[yyj4890] = LimitRangeItem{} - } else { - yyv4892 := &yyv4890[yyj4890] - yyv4892.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4890 := 0 - for ; !r.CheckBreak(); yyj4890++ { - - if yyj4890 >= len(yyv4890) { - yyv4890 = append(yyv4890, LimitRangeItem{}) // var yyz4890 LimitRangeItem - yyc4890 = true - } - yyh4890.ElemContainerState(yyj4890) - if yyj4890 < len(yyv4890) { - if r.TryDecodeAsNil() { - yyv4890[yyj4890] = LimitRangeItem{} - } else { - yyv4893 := &yyv4890[yyj4890] - yyv4893.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4890 < len(yyv4890) { - yyv4890 = yyv4890[:yyj4890] - yyc4890 = true - } else if yyj4890 == 0 && yyv4890 == nil { - yyv4890 = []LimitRangeItem{} - yyc4890 = true - } - } - yyh4890.End() - if yyc4890 { - *v = yyv4890 - } -} - -func (x codecSelfer1234) encSliceLimitRange(v []LimitRange, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4894 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4895 := &yyv4894 - yy4895.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceLimitRange(v *[]LimitRange, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4896 := *v - yyh4896, yyl4896 := z.DecSliceHelperStart() - var yyc4896 bool - if yyl4896 == 0 { - if yyv4896 == nil { - yyv4896 = []LimitRange{} - yyc4896 = true - } else if len(yyv4896) != 0 { - yyv4896 = yyv4896[:0] - yyc4896 = true - } - } else if yyl4896 > 0 { - var yyrr4896, yyrl4896 int - var yyrt4896 bool - if yyl4896 > cap(yyv4896) { - - yyrg4896 := len(yyv4896) > 0 - yyv24896 := yyv4896 - yyrl4896, yyrt4896 = z.DecInferLen(yyl4896, z.DecBasicHandle().MaxInitLen, 280) - if yyrt4896 { - if yyrl4896 <= cap(yyv4896) { - yyv4896 = yyv4896[:yyrl4896] - } else { - yyv4896 = make([]LimitRange, yyrl4896) - } - } else { - yyv4896 = make([]LimitRange, yyrl4896) - } - yyc4896 = true - yyrr4896 = len(yyv4896) - if yyrg4896 { - copy(yyv4896, yyv24896) - } - } else if yyl4896 != len(yyv4896) { - yyv4896 = yyv4896[:yyl4896] - yyc4896 = true - } - yyj4896 := 0 - for ; yyj4896 < yyrr4896; yyj4896++ { - yyh4896.ElemContainerState(yyj4896) - if r.TryDecodeAsNil() { - yyv4896[yyj4896] = LimitRange{} - } else { - yyv4897 := &yyv4896[yyj4896] - yyv4897.CodecDecodeSelf(d) - } - - } - if yyrt4896 { - for ; yyj4896 < yyl4896; yyj4896++ { - yyv4896 = append(yyv4896, LimitRange{}) - yyh4896.ElemContainerState(yyj4896) - if r.TryDecodeAsNil() { - yyv4896[yyj4896] = LimitRange{} - } else { - yyv4898 := &yyv4896[yyj4896] - yyv4898.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4896 := 0 - for ; !r.CheckBreak(); yyj4896++ { - - if yyj4896 >= len(yyv4896) { - yyv4896 = append(yyv4896, LimitRange{}) // var yyz4896 LimitRange - yyc4896 = true - } - yyh4896.ElemContainerState(yyj4896) - if yyj4896 < len(yyv4896) { - if r.TryDecodeAsNil() { - yyv4896[yyj4896] = LimitRange{} - } else { - yyv4899 := &yyv4896[yyj4896] - yyv4899.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4896 < len(yyv4896) { - yyv4896 = yyv4896[:yyj4896] - yyc4896 = true - } else if yyj4896 == 0 && yyv4896 == nil { - yyv4896 = []LimitRange{} - yyc4896 = true - } - } - yyh4896.End() - if yyc4896 { - *v = yyv4896 - } -} - -func (x codecSelfer1234) encSliceResourceQuotaScope(v []ResourceQuotaScope, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4900 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv4900.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceResourceQuotaScope(v *[]ResourceQuotaScope, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4901 := *v - yyh4901, yyl4901 := z.DecSliceHelperStart() - var yyc4901 bool - if yyl4901 == 0 { - if yyv4901 == nil { - yyv4901 = []ResourceQuotaScope{} - yyc4901 = true - } else if len(yyv4901) != 0 { - yyv4901 = yyv4901[:0] - yyc4901 = true - } - } else if yyl4901 > 0 { - var yyrr4901, yyrl4901 int - var yyrt4901 bool - if yyl4901 > cap(yyv4901) { - - yyrl4901, yyrt4901 = z.DecInferLen(yyl4901, z.DecBasicHandle().MaxInitLen, 16) - if yyrt4901 { - if yyrl4901 <= cap(yyv4901) { - yyv4901 = yyv4901[:yyrl4901] - } else { - yyv4901 = make([]ResourceQuotaScope, yyrl4901) - } - } else { - yyv4901 = make([]ResourceQuotaScope, yyrl4901) - } - yyc4901 = true - yyrr4901 = len(yyv4901) - } else if yyl4901 != len(yyv4901) { - yyv4901 = yyv4901[:yyl4901] - yyc4901 = true - } - yyj4901 := 0 - for ; yyj4901 < yyrr4901; yyj4901++ { - yyh4901.ElemContainerState(yyj4901) - if r.TryDecodeAsNil() { - yyv4901[yyj4901] = "" - } else { - yyv4901[yyj4901] = ResourceQuotaScope(r.DecodeString()) - } - - } - if yyrt4901 { - for ; yyj4901 < yyl4901; yyj4901++ { - yyv4901 = append(yyv4901, "") - yyh4901.ElemContainerState(yyj4901) - if r.TryDecodeAsNil() { - yyv4901[yyj4901] = "" - } else { - yyv4901[yyj4901] = ResourceQuotaScope(r.DecodeString()) - } - - } - } - - } else { - yyj4901 := 0 - for ; !r.CheckBreak(); yyj4901++ { - - if yyj4901 >= len(yyv4901) { - yyv4901 = append(yyv4901, "") // var yyz4901 ResourceQuotaScope - yyc4901 = true - } - yyh4901.ElemContainerState(yyj4901) - if yyj4901 < len(yyv4901) { - if r.TryDecodeAsNil() { - yyv4901[yyj4901] = "" - } else { - yyv4901[yyj4901] = ResourceQuotaScope(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj4901 < len(yyv4901) { - yyv4901 = yyv4901[:yyj4901] - yyc4901 = true - } else if yyj4901 == 0 && yyv4901 == nil { - yyv4901 = []ResourceQuotaScope{} - yyc4901 = true - } - } - yyh4901.End() - if yyc4901 { - *v = yyv4901 - } -} - -func (x codecSelfer1234) encSliceResourceQuota(v []ResourceQuota, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4905 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4906 := &yyv4905 - yy4906.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceResourceQuota(v *[]ResourceQuota, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4907 := *v - yyh4907, yyl4907 := z.DecSliceHelperStart() - var yyc4907 bool - if yyl4907 == 0 { - if yyv4907 == nil { - yyv4907 = []ResourceQuota{} - yyc4907 = true - } else if len(yyv4907) != 0 { - yyv4907 = yyv4907[:0] - yyc4907 = true - } - } else if yyl4907 > 0 { - var yyrr4907, yyrl4907 int - var yyrt4907 bool - if yyl4907 > cap(yyv4907) { - - yyrg4907 := len(yyv4907) > 0 - yyv24907 := yyv4907 - yyrl4907, yyrt4907 = z.DecInferLen(yyl4907, z.DecBasicHandle().MaxInitLen, 304) - if yyrt4907 { - if yyrl4907 <= cap(yyv4907) { - yyv4907 = yyv4907[:yyrl4907] - } else { - yyv4907 = make([]ResourceQuota, yyrl4907) - } - } else { - yyv4907 = make([]ResourceQuota, yyrl4907) - } - yyc4907 = true - yyrr4907 = len(yyv4907) - if yyrg4907 { - copy(yyv4907, yyv24907) - } - } else if yyl4907 != len(yyv4907) { - yyv4907 = yyv4907[:yyl4907] - yyc4907 = true - } - yyj4907 := 0 - for ; yyj4907 < yyrr4907; yyj4907++ { - yyh4907.ElemContainerState(yyj4907) - if r.TryDecodeAsNil() { - yyv4907[yyj4907] = ResourceQuota{} - } else { - yyv4908 := &yyv4907[yyj4907] - yyv4908.CodecDecodeSelf(d) - } - - } - if yyrt4907 { - for ; yyj4907 < yyl4907; yyj4907++ { - yyv4907 = append(yyv4907, ResourceQuota{}) - yyh4907.ElemContainerState(yyj4907) - if r.TryDecodeAsNil() { - yyv4907[yyj4907] = ResourceQuota{} - } else { - yyv4909 := &yyv4907[yyj4907] - yyv4909.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4907 := 0 - for ; !r.CheckBreak(); yyj4907++ { - - if yyj4907 >= len(yyv4907) { - yyv4907 = append(yyv4907, ResourceQuota{}) // var yyz4907 ResourceQuota - yyc4907 = true - } - yyh4907.ElemContainerState(yyj4907) - if yyj4907 < len(yyv4907) { - if r.TryDecodeAsNil() { - yyv4907[yyj4907] = ResourceQuota{} - } else { - yyv4910 := &yyv4907[yyj4907] - yyv4910.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4907 < len(yyv4907) { - yyv4907 = yyv4907[:yyj4907] - yyc4907 = true - } else if yyj4907 == 0 && yyv4907 == nil { - yyv4907 = []ResourceQuota{} - yyc4907 = true - } - } - yyh4907.End() - if yyc4907 { - *v = yyv4907 - } -} - -func (x codecSelfer1234) encMapstringSliceuint8(v map[string][]uint8, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk4911, yyv4911 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym4912 := z.EncBinary() - _ = yym4912 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk4911)) - } - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyv4911 == nil { - r.EncodeNil() - } else { - yym4913 := z.EncBinary() - _ = yym4913 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(yyv4911)) - } - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decMapstringSliceuint8(v *map[string][]uint8, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4914 := *v - yyl4914 := r.ReadMapStart() - yybh4914 := z.DecBasicHandle() - if yyv4914 == nil { - yyrl4914, _ := z.DecInferLen(yyl4914, yybh4914.MaxInitLen, 40) - yyv4914 = make(map[string][]uint8, yyrl4914) - *v = yyv4914 - } - var yymk4914 string - var yymv4914 []uint8 - var yymg4914 bool - if yybh4914.MapValueReset { - yymg4914 = true - } - if yyl4914 > 0 { - for yyj4914 := 0; yyj4914 < yyl4914; yyj4914++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk4914 = "" - } else { - yymk4914 = string(r.DecodeString()) - } - - if yymg4914 { - yymv4914 = yyv4914[yymk4914] - } else { - yymv4914 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv4914 = nil - } else { - yyv4916 := &yymv4914 - yym4917 := z.DecBinary() - _ = yym4917 - if false { - } else { - *yyv4916 = r.DecodeBytes(*(*[]byte)(yyv4916), false, false) - } - } - - if yyv4914 != nil { - yyv4914[yymk4914] = yymv4914 - } - } - } else if yyl4914 < 0 { - for yyj4914 := 0; !r.CheckBreak(); yyj4914++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk4914 = "" - } else { - yymk4914 = string(r.DecodeString()) - } - - if yymg4914 { - yymv4914 = yyv4914[yymk4914] - } else { - yymv4914 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv4914 = nil - } else { - yyv4919 := &yymv4914 - yym4920 := z.DecBinary() - _ = yym4920 - if false { - } else { - *yyv4919 = r.DecodeBytes(*(*[]byte)(yyv4919), false, false) - } - } - - if yyv4914 != nil { - yyv4914[yymk4914] = yymv4914 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encSliceSecret(v []Secret, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4921 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4922 := &yyv4921 - yy4922.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceSecret(v *[]Secret, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4923 := *v - yyh4923, yyl4923 := z.DecSliceHelperStart() - var yyc4923 bool - if yyl4923 == 0 { - if yyv4923 == nil { - yyv4923 = []Secret{} - yyc4923 = true - } else if len(yyv4923) != 0 { - yyv4923 = yyv4923[:0] - yyc4923 = true - } - } else if yyl4923 > 0 { - var yyrr4923, yyrl4923 int - var yyrt4923 bool - if yyl4923 > cap(yyv4923) { - - yyrg4923 := len(yyv4923) > 0 - yyv24923 := yyv4923 - yyrl4923, yyrt4923 = z.DecInferLen(yyl4923, z.DecBasicHandle().MaxInitLen, 280) - if yyrt4923 { - if yyrl4923 <= cap(yyv4923) { - yyv4923 = yyv4923[:yyrl4923] - } else { - yyv4923 = make([]Secret, yyrl4923) - } - } else { - yyv4923 = make([]Secret, yyrl4923) - } - yyc4923 = true - yyrr4923 = len(yyv4923) - if yyrg4923 { - copy(yyv4923, yyv24923) - } - } else if yyl4923 != len(yyv4923) { - yyv4923 = yyv4923[:yyl4923] - yyc4923 = true - } - yyj4923 := 0 - for ; yyj4923 < yyrr4923; yyj4923++ { - yyh4923.ElemContainerState(yyj4923) - if r.TryDecodeAsNil() { - yyv4923[yyj4923] = Secret{} - } else { - yyv4924 := &yyv4923[yyj4923] - yyv4924.CodecDecodeSelf(d) - } - - } - if yyrt4923 { - for ; yyj4923 < yyl4923; yyj4923++ { - yyv4923 = append(yyv4923, Secret{}) - yyh4923.ElemContainerState(yyj4923) - if r.TryDecodeAsNil() { - yyv4923[yyj4923] = Secret{} - } else { - yyv4925 := &yyv4923[yyj4923] - yyv4925.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4923 := 0 - for ; !r.CheckBreak(); yyj4923++ { - - if yyj4923 >= len(yyv4923) { - yyv4923 = append(yyv4923, Secret{}) // var yyz4923 Secret - yyc4923 = true - } - yyh4923.ElemContainerState(yyj4923) - if yyj4923 < len(yyv4923) { - if r.TryDecodeAsNil() { - yyv4923[yyj4923] = Secret{} - } else { - yyv4926 := &yyv4923[yyj4923] - yyv4926.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4923 < len(yyv4923) { - yyv4923 = yyv4923[:yyj4923] - yyc4923 = true - } else if yyj4923 == 0 && yyv4923 == nil { - yyv4923 = []Secret{} - yyc4923 = true - } - } - yyh4923.End() - if yyc4923 { - *v = yyv4923 - } -} - -func (x codecSelfer1234) encSliceConfigMap(v []ConfigMap, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4927 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4928 := &yyv4927 - yy4928.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceConfigMap(v *[]ConfigMap, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4929 := *v - yyh4929, yyl4929 := z.DecSliceHelperStart() - var yyc4929 bool - if yyl4929 == 0 { - if yyv4929 == nil { - yyv4929 = []ConfigMap{} - yyc4929 = true - } else if len(yyv4929) != 0 { - yyv4929 = yyv4929[:0] - yyc4929 = true - } - } else if yyl4929 > 0 { - var yyrr4929, yyrl4929 int - var yyrt4929 bool - if yyl4929 > cap(yyv4929) { - - yyrg4929 := len(yyv4929) > 0 - yyv24929 := yyv4929 - yyrl4929, yyrt4929 = z.DecInferLen(yyl4929, z.DecBasicHandle().MaxInitLen, 264) - if yyrt4929 { - if yyrl4929 <= cap(yyv4929) { - yyv4929 = yyv4929[:yyrl4929] - } else { - yyv4929 = make([]ConfigMap, yyrl4929) - } - } else { - yyv4929 = make([]ConfigMap, yyrl4929) - } - yyc4929 = true - yyrr4929 = len(yyv4929) - if yyrg4929 { - copy(yyv4929, yyv24929) - } - } else if yyl4929 != len(yyv4929) { - yyv4929 = yyv4929[:yyl4929] - yyc4929 = true - } - yyj4929 := 0 - for ; yyj4929 < yyrr4929; yyj4929++ { - yyh4929.ElemContainerState(yyj4929) - if r.TryDecodeAsNil() { - yyv4929[yyj4929] = ConfigMap{} - } else { - yyv4930 := &yyv4929[yyj4929] - yyv4930.CodecDecodeSelf(d) - } - - } - if yyrt4929 { - for ; yyj4929 < yyl4929; yyj4929++ { - yyv4929 = append(yyv4929, ConfigMap{}) - yyh4929.ElemContainerState(yyj4929) - if r.TryDecodeAsNil() { - yyv4929[yyj4929] = ConfigMap{} - } else { - yyv4931 := &yyv4929[yyj4929] - yyv4931.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4929 := 0 - for ; !r.CheckBreak(); yyj4929++ { - - if yyj4929 >= len(yyv4929) { - yyv4929 = append(yyv4929, ConfigMap{}) // var yyz4929 ConfigMap - yyc4929 = true - } - yyh4929.ElemContainerState(yyj4929) - if yyj4929 < len(yyv4929) { - if r.TryDecodeAsNil() { - yyv4929[yyj4929] = ConfigMap{} - } else { - yyv4932 := &yyv4929[yyj4929] - yyv4932.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4929 < len(yyv4929) { - yyv4929 = yyv4929[:yyj4929] - yyc4929 = true - } else if yyj4929 == 0 && yyv4929 == nil { - yyv4929 = []ConfigMap{} - yyc4929 = true - } - } - yyh4929.End() - if yyc4929 { - *v = yyv4929 - } -} - -func (x codecSelfer1234) encSliceComponentCondition(v []ComponentCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4933 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4934 := &yyv4933 - yy4934.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceComponentCondition(v *[]ComponentCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4935 := *v - yyh4935, yyl4935 := z.DecSliceHelperStart() - var yyc4935 bool - if yyl4935 == 0 { - if yyv4935 == nil { - yyv4935 = []ComponentCondition{} - yyc4935 = true - } else if len(yyv4935) != 0 { - yyv4935 = yyv4935[:0] - yyc4935 = true - } - } else if yyl4935 > 0 { - var yyrr4935, yyrl4935 int - var yyrt4935 bool - if yyl4935 > cap(yyv4935) { - - yyrg4935 := len(yyv4935) > 0 - yyv24935 := yyv4935 - yyrl4935, yyrt4935 = z.DecInferLen(yyl4935, z.DecBasicHandle().MaxInitLen, 64) - if yyrt4935 { - if yyrl4935 <= cap(yyv4935) { - yyv4935 = yyv4935[:yyrl4935] - } else { - yyv4935 = make([]ComponentCondition, yyrl4935) - } - } else { - yyv4935 = make([]ComponentCondition, yyrl4935) - } - yyc4935 = true - yyrr4935 = len(yyv4935) - if yyrg4935 { - copy(yyv4935, yyv24935) - } - } else if yyl4935 != len(yyv4935) { - yyv4935 = yyv4935[:yyl4935] - yyc4935 = true - } - yyj4935 := 0 - for ; yyj4935 < yyrr4935; yyj4935++ { - yyh4935.ElemContainerState(yyj4935) - if r.TryDecodeAsNil() { - yyv4935[yyj4935] = ComponentCondition{} - } else { - yyv4936 := &yyv4935[yyj4935] - yyv4936.CodecDecodeSelf(d) - } - - } - if yyrt4935 { - for ; yyj4935 < yyl4935; yyj4935++ { - yyv4935 = append(yyv4935, ComponentCondition{}) - yyh4935.ElemContainerState(yyj4935) - if r.TryDecodeAsNil() { - yyv4935[yyj4935] = ComponentCondition{} - } else { - yyv4937 := &yyv4935[yyj4935] - yyv4937.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4935 := 0 - for ; !r.CheckBreak(); yyj4935++ { - - if yyj4935 >= len(yyv4935) { - yyv4935 = append(yyv4935, ComponentCondition{}) // var yyz4935 ComponentCondition - yyc4935 = true - } - yyh4935.ElemContainerState(yyj4935) - if yyj4935 < len(yyv4935) { - if r.TryDecodeAsNil() { - yyv4935[yyj4935] = ComponentCondition{} - } else { - yyv4938 := &yyv4935[yyj4935] - yyv4938.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4935 < len(yyv4935) { - yyv4935 = yyv4935[:yyj4935] - yyc4935 = true - } else if yyj4935 == 0 && yyv4935 == nil { - yyv4935 = []ComponentCondition{} - yyc4935 = true - } - } - yyh4935.End() - if yyc4935 { - *v = yyv4935 - } -} - -func (x codecSelfer1234) encSliceComponentStatus(v []ComponentStatus, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv4939 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4940 := &yyv4939 - yy4940.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceComponentStatus(v *[]ComponentStatus, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv4941 := *v - yyh4941, yyl4941 := z.DecSliceHelperStart() - var yyc4941 bool - if yyl4941 == 0 { - if yyv4941 == nil { - yyv4941 = []ComponentStatus{} - yyc4941 = true - } else if len(yyv4941) != 0 { - yyv4941 = yyv4941[:0] - yyc4941 = true - } - } else if yyl4941 > 0 { - var yyrr4941, yyrl4941 int - var yyrt4941 bool - if yyl4941 > cap(yyv4941) { - - yyrg4941 := len(yyv4941) > 0 - yyv24941 := yyv4941 - yyrl4941, yyrt4941 = z.DecInferLen(yyl4941, z.DecBasicHandle().MaxInitLen, 280) - if yyrt4941 { - if yyrl4941 <= cap(yyv4941) { - yyv4941 = yyv4941[:yyrl4941] - } else { - yyv4941 = make([]ComponentStatus, yyrl4941) - } - } else { - yyv4941 = make([]ComponentStatus, yyrl4941) - } - yyc4941 = true - yyrr4941 = len(yyv4941) - if yyrg4941 { - copy(yyv4941, yyv24941) - } - } else if yyl4941 != len(yyv4941) { - yyv4941 = yyv4941[:yyl4941] - yyc4941 = true - } - yyj4941 := 0 - for ; yyj4941 < yyrr4941; yyj4941++ { - yyh4941.ElemContainerState(yyj4941) - if r.TryDecodeAsNil() { - yyv4941[yyj4941] = ComponentStatus{} - } else { - yyv4942 := &yyv4941[yyj4941] - yyv4942.CodecDecodeSelf(d) - } - - } - if yyrt4941 { - for ; yyj4941 < yyl4941; yyj4941++ { - yyv4941 = append(yyv4941, ComponentStatus{}) - yyh4941.ElemContainerState(yyj4941) - if r.TryDecodeAsNil() { - yyv4941[yyj4941] = ComponentStatus{} - } else { - yyv4943 := &yyv4941[yyj4941] - yyv4943.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj4941 := 0 - for ; !r.CheckBreak(); yyj4941++ { - - if yyj4941 >= len(yyv4941) { - yyv4941 = append(yyv4941, ComponentStatus{}) // var yyz4941 ComponentStatus - yyc4941 = true - } - yyh4941.ElemContainerState(yyj4941) - if yyj4941 < len(yyv4941) { - if r.TryDecodeAsNil() { - yyv4941[yyj4941] = ComponentStatus{} - } else { - yyv4944 := &yyv4941[yyj4941] - yyv4944.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj4941 < len(yyv4941) { - yyv4941 = yyv4941[:yyj4941] - yyc4941 = true - } else if yyj4941 == 0 && yyv4941 == nil { - yyv4941 = []ComponentStatus{} - yyc4941 = true - } - } - yyh4941.End() - if yyc4941 { - *v = yyv4941 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/types.go b/vendor/k8s.io/kubernetes/pkg/api/types.go index 6599fd6b3d..4f42369508 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/types.go +++ b/vendor/k8s.io/kubernetes/pkg/api/types.go @@ -17,13 +17,13 @@ limitations under the License. package api import ( - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util/intstr" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" ) // Common string formats @@ -57,13 +57,14 @@ import ( // ObjectMeta is metadata that all persisted resources must have, which includes all objects // users must create. +// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. type ObjectMeta struct { // Name is unique within a namespace. Name is required when creating resources, although // some resources may allow a client to request the generation of an appropriate name // automatically. Name is primarily intended for creation idempotence and configuration // definition. // +optional - Name string `json:"name,omitempty"` + Name string // GenerateName indicates that the name should be made unique by the server prior to persisting // it. A non-empty value for the field indicates the name will be made unique (and the name @@ -77,42 +78,42 @@ type ObjectMeta struct { // ServerTimeout indicating a unique name could not be found in the time allotted, and the client // should retry (optionally after the time indicated in the Retry-After header). // +optional - GenerateName string `json:"generateName,omitempty"` + GenerateName string // Namespace defines the space within which name must be unique. An empty namespace is // equivalent to the "default" namespace, but "default" is the canonical representation. // Not all objects are required to be scoped to a namespace - the value of this field for // those objects will be empty. // +optional - Namespace string `json:"namespace,omitempty"` + Namespace string // SelfLink is a URL representing this object. // +optional - SelfLink string `json:"selfLink,omitempty"` + SelfLink string // UID is the unique in time and space value for this object. It is typically generated by // the server on successful creation of a resource and is not allowed to change on PUT // operations. // +optional - UID types.UID `json:"uid,omitempty"` + UID types.UID // An opaque value that represents the version of this resource. May be used for optimistic // concurrency, change detection, and the watch operation on a resource or set of resources. // Clients must treat these values as opaque and values may only be valid for a particular // resource or set of resources. Only servers will generate resource versions. // +optional - ResourceVersion string `json:"resourceVersion,omitempty"` + ResourceVersion string // A sequence number representing a specific generation of the desired state. // Populated by the system. Read-only. // +optional - Generation int64 `json:"generation,omitempty"` + Generation int64 // CreationTimestamp is a timestamp representing the server time when this object was // created. It is not guaranteed to be set in happens-before order across separate operations. // Clients may not set this value. It is represented in RFC3339 form and is in UTC. // +optional - CreationTimestamp unversioned.Time `json:"creationTimestamp,omitempty"` + CreationTimestamp metav1.Time // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This // field is set by the server when a graceful deletion is requested by the user, and is not @@ -132,12 +133,12 @@ type ObjectMeta struct { // Read-only. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - DeletionTimestamp *unversioned.Time `json:"deletionTimestamp,omitempty"` + DeletionTimestamp *metav1.Time // DeletionGracePeriodSeconds records the graceful deletion value set when graceful deletion // was requested. Represents the most recent grace period, and may only be shortened once set. // +optional - DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"` + DeletionGracePeriodSeconds *int64 // Labels are key value pairs that may be used to scope and select individual resources. // Label keys are of the form: @@ -149,34 +150,34 @@ type ObjectMeta struct { // to the user. Other system components that wish to use labels must specify a prefix. The // "kubernetes.io/" prefix is reserved for use by kubernetes components. // +optional - Labels map[string]string `json:"labels,omitempty"` + Labels map[string]string // Annotations are unstructured key value data stored with a resource that may be set by // external tooling. They are not queryable and should be preserved when modifying // objects. Annotation keys have the same formatting restrictions as Label keys. See the // comments on Labels for details. // +optional - Annotations map[string]string `json:"annotations,omitempty"` + Annotations map[string]string // List of objects depended by this object. If ALL objects in the list have // been deleted, this object will be garbage collected. If this object is managed by a controller, // then an entry in this list will point to this controller, with the controller field set to true. // There cannot be more than one managing controller. // +optional - OwnerReferences []OwnerReference `json:"ownerReferences,omitempty"` + OwnerReferences []metav1.OwnerReference // Must be empty before the object is deleted from the registry. Each entry // is an identifier for the responsible component that will remove the entry // from the list. If the deletionTimestamp of the object is non-nil, entries // in this list can only be removed. // +optional - Finalizers []string `json:"finalizers,omitempty"` + Finalizers []string // The name of the cluster which the object belongs to. // This is used to distinguish resources with same name and namespace in different clusters. // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. // +optional - ClusterName string `json:"clusterName,omitempty"` + ClusterName string } const ( @@ -188,6 +189,8 @@ const ( NamespaceNone string = "" // NamespaceSystem is the system namespace where we place system components. NamespaceSystem string = "kube-system" + // NamespacePublic is the namespace where we place public info (ConfigMaps) + NamespacePublic string = "kube-public" // TerminationMessagePathDefault means the default path to capture the application termination message running in a container TerminationMessagePathDefault string = "/dev/termination-log" ) @@ -196,12 +199,12 @@ const ( type Volume struct { // Required: This must be a DNS_LABEL. Each volume in a pod must have // a unique name. - Name string `json:"name"` + Name string // The VolumeSource represents the location and type of a volume to mount. // This is optional for now. If not specified, the Volume is implied to be an EmptyDir. // This implied behavior is deprecated and will be removed in a future version. // +optional - VolumeSource `json:",inline,omitempty"` + VolumeSource } // VolumeSource represents the source location of a volume to mount. @@ -215,82 +218,90 @@ type VolumeSource struct { // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not // mount host directories as read/write. // +optional - HostPath *HostPathVolumeSource `json:"hostPath,omitempty"` + HostPath *HostPathVolumeSource // EmptyDir represents a temporary directory that shares a pod's lifetime. // +optional - EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty"` + EmptyDir *EmptyDirVolumeSource // GCEPersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. // +optional - GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty"` + GCEPersistentDisk *GCEPersistentDiskVolumeSource // AWSElasticBlockStore represents an AWS EBS disk that is attached to a // kubelet's host machine and then exposed to the pod. // +optional - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty"` + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource // GitRepo represents a git repository at a particular revision. // +optional - GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty"` + GitRepo *GitRepoVolumeSource // Secret represents a secret that should populate this volume. // +optional - Secret *SecretVolumeSource `json:"secret,omitempty"` + Secret *SecretVolumeSource // NFS represents an NFS mount on the host that shares a pod's lifetime // +optional - NFS *NFSVolumeSource `json:"nfs,omitempty"` + NFS *NFSVolumeSource // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. // +optional - ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty"` + ISCSI *ISCSIVolumeSource // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime // +optional - Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty"` + Glusterfs *GlusterfsVolumeSource // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace // +optional - PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"` + PersistentVolumeClaim *PersistentVolumeClaimVolumeSource // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime // +optional - RBD *RBDVolumeSource `json:"rbd,omitempty"` + RBD *RBDVolumeSource // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime // +optional - Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty"` + Quobyte *QuobyteVolumeSource // FlexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. // +optional - FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty"` + FlexVolume *FlexVolumeSource // Cinder represents a cinder volume attached and mounted on kubelets host machine // +optional - Cinder *CinderVolumeSource `json:"cinder,omitempty"` + Cinder *CinderVolumeSource // CephFS represents a Cephfs mount on the host that shares a pod's lifetime // +optional - CephFS *CephFSVolumeSource `json:"cephfs,omitempty"` + CephFS *CephFSVolumeSource // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running // +optional - Flocker *FlockerVolumeSource `json:"flocker,omitempty"` + Flocker *FlockerVolumeSource // DownwardAPI represents metadata about the pod that should populate this volume // +optional - DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty"` + DownwardAPI *DownwardAPIVolumeSource // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. // +optional - FC *FCVolumeSource `json:"fc,omitempty"` + FC *FCVolumeSource // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. // +optional - AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty"` + AzureFile *AzureFileVolumeSource // ConfigMap represents a configMap that should populate this volume // +optional - ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty"` + ConfigMap *ConfigMapVolumeSource // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine // +optional - VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty"` + VsphereVolume *VsphereVirtualDiskVolumeSource // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. // +optional - AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty"` + AzureDisk *AzureDiskVolumeSource // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine - PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty"` + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // Items for all in one resources secrets, configmaps, and downward API + Projected *ProjectedVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource } // Similar to VolumeSource but meant for the administrator who creates PVs. @@ -299,106 +310,122 @@ type PersistentVolumeSource struct { // GCEPersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. // +optional - GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty"` + GCEPersistentDisk *GCEPersistentDiskVolumeSource // AWSElasticBlockStore represents an AWS EBS disk that is attached to a // kubelet's host machine and then exposed to the pod. // +optional - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty"` + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource // HostPath represents a directory on the host. // Provisioned by a developer or tester. // This is useful for single-node development and testing only! // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. // +optional - HostPath *HostPathVolumeSource `json:"hostPath,omitempty"` + HostPath *HostPathVolumeSource // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod // +optional - Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty"` + Glusterfs *GlusterfsVolumeSource // NFS represents an NFS mount on the host that shares a pod's lifetime // +optional - NFS *NFSVolumeSource `json:"nfs,omitempty"` + NFS *NFSVolumeSource // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime // +optional - RBD *RBDVolumeSource `json:"rbd,omitempty"` + RBD *RBDVolumeSource // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime // +optional - Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty"` + Quobyte *QuobyteVolumeSource // ISCSIVolumeSource represents an ISCSI resource that is attached to a // kubelet's host machine and then exposed to the pod. // +optional - ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty"` + ISCSI *ISCSIVolumeSource // FlexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. // +optional - FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty"` + FlexVolume *FlexVolumeSource // Cinder represents a cinder volume attached and mounted on kubelets host machine // +optional - Cinder *CinderVolumeSource `json:"cinder,omitempty"` + Cinder *CinderVolumeSource // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime // +optional - CephFS *CephFSVolumeSource `json:"cephfs,omitempty"` + CephFS *CephFSVolumeSource // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. // +optional - FC *FCVolumeSource `json:"fc,omitempty"` + FC *FCVolumeSource // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running // +optional - Flocker *FlockerVolumeSource `json:"flocker,omitempty"` + Flocker *FlockerVolumeSource // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. // +optional - AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty"` + AzureFile *AzureFileVolumeSource // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine // +optional - VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty"` + VsphereVolume *VsphereVirtualDiskVolumeSource // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. // +optional - AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty"` + AzureDisk *AzureDiskVolumeSource // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine - PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty"` + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource } type PersistentVolumeClaimVolumeSource struct { // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume - ClaimName string `json:"claimName"` + ClaimName string // Optional: Defaults to false (read/write). ReadOnly here // will force the ReadOnly setting in VolumeMounts // +optional - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool } +const ( + // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. + // It's currently still used and will be held for backwards compatibility + BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" +) + // +genclient=true // +nonNamespaced=true type PersistentVolume struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta //Spec defines a persistent volume owned by the cluster // +optional - Spec PersistentVolumeSpec `json:"spec,omitempty"` + Spec PersistentVolumeSpec // Status represents the current information about persistent volume. // +optional - Status PersistentVolumeStatus `json:"status,omitempty"` + Status PersistentVolumeStatus } type PersistentVolumeSpec struct { // Resources represents the actual resources of the volume - Capacity ResourceList `json:"capacity"` + Capacity ResourceList // Source represents the location and type of a volume to mount. - PersistentVolumeSource `json:",inline"` + PersistentVolumeSource // AccessModes contains all ways the volume can be mounted // +optional - AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"` + AccessModes []PersistentVolumeAccessMode // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. // ClaimRef is expected to be non-nil when bound. // claim.VolumeName is the authoritative bind between PV and PVC. // When set to non-nil value, PVC.Spec.Selector of the referenced PVC is // ignored, i.e. labels of this PV do not need to match PVC selector. // +optional - ClaimRef *ObjectReference `json:"claimRef,omitempty"` + ClaimRef *ObjectReference // Optional: what happens to a persistent volume when released from its claim. // +optional - PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty"` + PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + StorageClassName string } // PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes @@ -419,44 +446,44 @@ const ( type PersistentVolumeStatus struct { // Phase indicates if a volume is available, bound to a claim, or released by a claim // +optional - Phase PersistentVolumePhase `json:"phase,omitempty"` + Phase PersistentVolumePhase // A human-readable message indicating details about why the volume is in this state. // +optional - Message string `json:"message,omitempty"` + Message string // Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI // +optional - Reason string `json:"reason,omitempty"` + Reason string } type PersistentVolumeList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` - Items []PersistentVolume `json:"items"` + metav1.ListMeta + Items []PersistentVolume } // +genclient=true // PersistentVolumeClaim is a user's request for and claim to a persistent volume type PersistentVolumeClaim struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Spec defines the volume requested by a pod author // +optional - Spec PersistentVolumeClaimSpec `json:"spec,omitempty"` + Spec PersistentVolumeClaimSpec // Status represents the current information about a claim // +optional - Status PersistentVolumeClaimStatus `json:"status,omitempty"` + Status PersistentVolumeClaimStatus } type PersistentVolumeClaimList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` - Items []PersistentVolumeClaim `json:"items"` + metav1.ListMeta + Items []PersistentVolumeClaim } // PersistentVolumeClaimSpec describes the common attributes of storage devices @@ -464,30 +491,34 @@ type PersistentVolumeClaimList struct { type PersistentVolumeClaimSpec struct { // Contains the types of access modes required // +optional - AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"` + AccessModes []PersistentVolumeAccessMode // A label query over volumes to consider for binding. This selector is // ignored when VolumeName is set // +optional - Selector *unversioned.LabelSelector `json:"selector,omitempty"` + Selector *metav1.LabelSelector // Resources represents the minimum resources required // +optional - Resources ResourceRequirements `json:"resources,omitempty"` + Resources ResourceRequirements // VolumeName is the binding reference to the PersistentVolume backing this // claim. When set to non-empty value Selector is not evaluated // +optional - VolumeName string `json:"volumeName,omitempty"` + VolumeName string + // Name of the StorageClass required by the claim. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1 + // +optional + StorageClassName *string } type PersistentVolumeClaimStatus struct { // Phase represents the current phase of PersistentVolumeClaim // +optional - Phase PersistentVolumeClaimPhase `json:"phase,omitempty"` + Phase PersistentVolumeClaimPhase // AccessModes contains all ways the volume backing the PVC can be mounted // +optional - AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"` + AccessModes []PersistentVolumeAccessMode // Represents the actual resources of the underlying volume // +optional - Capacity ResourceList `json:"capacity,omitempty"` + Capacity ResourceList } type PersistentVolumeAccessMode string @@ -535,7 +566,7 @@ const ( // Represents a host path mapped into a pod. // Host path volumes do not support ownership management or SELinux relabeling. type HostPathVolumeSource struct { - Path string `json:"path"` + Path string } // Represents an empty directory for a pod. @@ -548,7 +579,7 @@ type EmptyDirVolumeSource struct { // Optional: what type of storage medium should back this directory. // The default is "" which means to use the node's default medium. // +optional - Medium StorageMedium `json:"medium,omitempty"` + Medium StorageMedium } // StorageMedium defines ways that storage can be allocated to a volume. @@ -577,22 +608,22 @@ const ( // PDs support ownership management and SELinux relabeling. type GCEPersistentDiskVolumeSource struct { // Unique name of the PD resource. Used to identify the disk in GCE - PDName string `json:"pdName"` + PDName string // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional - FSType string `json:"fsType,omitempty"` + FSType string // Optional: Partition on the disk to mount. // If omitted, kubelet will attempt to mount the device name. // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. // +optional - Partition int32 `json:"partition,omitempty"` + Partition int32 // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool } // Represents an ISCSI disk. @@ -602,26 +633,30 @@ type ISCSIVolumeSource struct { // Required: iSCSI target portal // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) // +optional - TargetPortal string `json:"targetPortal,omitempty"` + TargetPortal string // Required: target iSCSI Qualified Name // +optional - IQN string `json:"iqn,omitempty"` + IQN string // Required: iSCSI target lun number // +optional - Lun int32 `json:"lun,omitempty"` + Lun int32 // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. // +optional - ISCSIInterface string `json:"iscsiInterface,omitempty"` + ISCSIInterface string // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional - FSType string `json:"fsType,omitempty"` + FSType string // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool + // Required: list of iSCSI target portal ips for high availability. + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + Portals []string } // Represents a Fibre Channel volume. @@ -629,45 +664,45 @@ type ISCSIVolumeSource struct { // Fibre Channel volumes support ownership management and SELinux relabeling. type FCVolumeSource struct { // Required: FC target worldwide names (WWNs) - TargetWWNs []string `json:"targetWWNs"` + TargetWWNs []string // Required: FC target lun number - Lun *int32 `json:"lun"` + Lun *int32 // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional - FSType string `json:"fsType,omitempty"` + FSType string // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool } // FlexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. type FlexVolumeSource struct { // Driver is the name of the driver to use for this volume. - Driver string `json:"driver"` + Driver string // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. // +optional - FSType string `json:"fsType,omitempty"` + FSType string // Optional: SecretRef is reference to the secret object containing // sensitive information to pass to the plugin scripts. This may be // empty if no secret object is specified. If the secret object // contains more than one secret, all secrets are passed to the plugin // scripts. // +optional - SecretRef *LocalObjectReference `json:"secretRef,omitempty"` + SecretRef *LocalObjectReference // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool // Optional: Extra driver options if any. // +optional - Options map[string]string `json:"options,omitempty"` + Options map[string]string } // Represents a Persistent Disk resource in AWS. @@ -678,22 +713,22 @@ type FlexVolumeSource struct { // ownership management and SELinux relabeling. type AWSElasticBlockStoreVolumeSource struct { // Unique id of the persistent disk resource. Used to identify the disk in AWS - VolumeID string `json:"volumeID"` + VolumeID string // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional - FSType string `json:"fsType,omitempty"` + FSType string // Optional: Partition on the disk to mount. // If omitted, kubelet will attempt to mount the device name. // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. // +optional - Partition int32 `json:"partition,omitempty"` + Partition int32 // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool } // Represents a volume that is populated with the contents of a git repository. @@ -701,16 +736,16 @@ type AWSElasticBlockStoreVolumeSource struct { // Git repo volumes support SELinux relabeling. type GitRepoVolumeSource struct { // Repository URL - Repository string `json:"repository"` + Repository string // Commit hash, this is optional // +optional - Revision string `json:"revision,omitempty"` + Revision string // Clone target, this is optional // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the // git repository. Otherwise, if specified, the volume will contain the git repository in // the subdirectory with the given name. // +optional - Directory string `json:"directory,omitempty"` + Directory string // TODO: Consider credentials here. } @@ -722,38 +757,63 @@ type GitRepoVolumeSource struct { type SecretVolumeSource struct { // Name of the secret in the pod's namespace to use. // +optional - SecretName string `json:"secretName,omitempty"` + SecretName string // If unspecified, each key-value pair in the Data field of the referenced // Secret will be projected into the volume as a file whose name is the // key and content is the value. If specified, the listed keys will be // projected into the specified paths, and unlisted keys will not be // present. If a key is specified which is not present in the Secret, - // the volume setup will error. Paths must be relative and may not contain - // the '..' path or start with '..'. + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. // +optional - Items []KeyToPath `json:"items,omitempty"` + Items []KeyToPath // Mode bits to use on created files by default. Must be a value between // 0 and 0777. // Directories within the path are not affected by this setting. // This might be in conflict with other options that affect the file // mode, like fsGroup, and the result can be other mode bits set. // +optional - DefaultMode *int32 `json:"defaultMode,omitempty"` + DefaultMode *int32 + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +type SecretProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool } // Represents an NFS mount that lasts the lifetime of a pod. // NFS volumes do not support ownership management or SELinux relabeling. type NFSVolumeSource struct { // Server is the hostname or IP address of the NFS server - Server string `json:"server"` + Server string // Path is the exported NFS share - Path string `json:"path"` + Path string // Optional: Defaults to false (read/write). ReadOnly here will force // the NFS export to be mounted with read-only permissions // +optional - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool } // Represents a Quobyte mount that lasts the lifetime of a pod. @@ -762,71 +822,71 @@ type QuobyteVolumeSource struct { // Registry represents a single or multiple Quobyte Registry services // specified as a string as host:port pair (multiple entries are separated with commas) // which acts as the central registry for volumes - Registry string `json:"registry"` + Registry string // Volume is a string that references an already created Quobyte volume by name. - Volume string `json:"volume"` + Volume string // Defaults to false (read/write). ReadOnly here will force // the Quobyte to be mounted with read-only permissions // +optional - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool // User to map volume access to // Defaults to the root user // +optional - User string `json:"user,omitempty"` + User string // Group to map volume access to // Default is no group // +optional - Group string `json:"group,omitempty"` + Group string } // Represents a Glusterfs mount that lasts the lifetime of a pod. // Glusterfs volumes do not support ownership management or SELinux relabeling. type GlusterfsVolumeSource struct { // Required: EndpointsName is the endpoint name that details Glusterfs topology - EndpointsName string `json:"endpoints"` + EndpointsName string // Required: Path is the Glusterfs volume path - Path string `json:"path"` + Path string // Optional: Defaults to false (read/write). ReadOnly here will force // the Glusterfs to be mounted with read-only permissions // +optional - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool } // Represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. type RBDVolumeSource struct { // Required: CephMonitors is a collection of Ceph monitors - CephMonitors []string `json:"monitors"` + CephMonitors []string // Required: RBDImage is the rados image name - RBDImage string `json:"image"` + RBDImage string // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional - FSType string `json:"fsType,omitempty"` + FSType string // Optional: RadosPool is the rados pool name,default is rbd // +optional - RBDPool string `json:"pool,omitempty"` + RBDPool string // Optional: RBDUser is the rados user name, default is admin // +optional - RadosUser string `json:"user,omitempty"` + RadosUser string // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring // +optional - Keyring string `json:"keyring,omitempty"` + Keyring string // Optional: SecretRef is name of the authentication secret for RBDUser, default is nil. // +optional - SecretRef *LocalObjectReference `json:"secretRef,omitempty"` + SecretRef *LocalObjectReference // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool } // Represents a cinder volume resource in Openstack. A Cinder volume @@ -835,39 +895,39 @@ type RBDVolumeSource struct { // management and SELinux relabeling. type CinderVolumeSource struct { // Unique id of the volume used to identify the cinder volume - VolumeID string `json:"volumeID"` + VolumeID string // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional - FSType string `json:"fsType,omitempty"` + FSType string // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool } // Represents a Ceph Filesystem mount that lasts the lifetime of a pod // Cephfs volumes do not support ownership management or SELinux relabeling. type CephFSVolumeSource struct { // Required: Monitors is a collection of Ceph monitors - Monitors []string `json:"monitors"` + Monitors []string // Optional: Used as the mounted root, rather than the full Ceph tree, default is / // +optional - Path string `json:"path,omitempty"` + Path string // Optional: User is the rados user name, default is admin // +optional - User string `json:"user,omitempty"` + User string // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret // +optional - SecretFile string `json:"secretFile,omitempty"` + SecretFile string // Optional: SecretRef is reference to the authentication secret for User, default is empty. // +optional - SecretRef *LocalObjectReference `json:"secretRef,omitempty"` + SecretRef *LocalObjectReference // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool } // Represents a Flocker volume mounted by the Flocker agent. @@ -877,10 +937,10 @@ type FlockerVolumeSource struct { // Name of the dataset stored as metadata -> name on the dataset for Flocker // should be considered as deprecated // +optional - DatasetName string `json:"datasetName,omitempty"` + DatasetName string // UUID of the dataset. This is unique identifier of a Flocker dataset // +optional - DatasetUUID string `json:"datasetUUID,omitempty"` + DatasetUUID string } // Represents a volume containing downward API info. @@ -888,66 +948,90 @@ type FlockerVolumeSource struct { type DownwardAPIVolumeSource struct { // Items is a list of DownwardAPIVolume file // +optional - Items []DownwardAPIVolumeFile `json:"items,omitempty"` + Items []DownwardAPIVolumeFile // Mode bits to use on created files by default. Must be a value between // 0 and 0777. // Directories within the path are not affected by this setting. // This might be in conflict with other options that affect the file // mode, like fsGroup, and the result can be other mode bits set. // +optional - DefaultMode *int32 `json:"defaultMode,omitempty"` + DefaultMode *int32 } // Represents a single file containing information from the downward API type DownwardAPIVolumeFile struct { // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' - Path string `json:"path"` + Path string // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. // +optional - FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty"` + FieldRef *ObjectFieldSelector // Selects a resource of the container: only resources limits and requests // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. // +optional - ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty"` + ResourceFieldRef *ResourceFieldSelector // Optional: mode bits to use on this file, must be a value between 0 // and 0777. If not specified, the volume defaultMode will be used. // This might be in conflict with other options that affect the file // mode, like fsGroup, and the result can be other mode bits set. // +optional - Mode *int32 `json:"mode,omitempty"` + Mode *int32 +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +type DownwardAPIProjection struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile } // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. type AzureFileVolumeSource struct { // the name of secret that contains Azure Storage Account Name and Key - SecretName string `json:"secretName"` + SecretName string // Share Name - ShareName string `json:"shareName"` + ShareName string // Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool } // Represents a vSphere volume resource. type VsphereVirtualDiskVolumeSource struct { // Path that identifies vSphere volume vmdk - VolumePath string `json:"volumePath"` + VolumePath string // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional - FSType string `json:"fsType,omitempty"` + FSType string } // Represents a Photon Controller persistent disk resource. type PhotonPersistentDiskVolumeSource struct { // ID that identifies Photon Controller persistent disk - PdID string `json:"pdID"` + PdID string // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - FSType string `json:"fsType,omitempty"` + FSType string +} + +// PortworxVolumeSource represents a Portworx volume resource. +type PortworxVolumeSource struct { + // VolumeID uniquely identifies a Portworx volume + VolumeID string + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool } type AzureDataDiskCachingMode string @@ -961,21 +1045,56 @@ const ( // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. type AzureDiskVolumeSource struct { // The Name of the data disk in the blob storage - DiskName string `json:"diskName"` + DiskName string // The URI the the data disk in the blob storage - DataDiskURI string `json:"diskURI"` + DataDiskURI string // Host Caching mode: None, Read Only, Read Write. // +optional - CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty"` + CachingMode *AzureDataDiskCachingMode // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional - FSType *string `json:"fsType,omitempty"` + FSType *string // Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional - ReadOnly *bool `json:"readOnly,omitempty"` + ReadOnly *bool +} + +// ScaleIOVolumeSource represents a persistent ScaleIO volume +type ScaleIOVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string + // The name of the storage system as configured in ScaleIO. + System string + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *LocalObjectReference + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool + // The name of the Protection Domain for the configured storage (defaults to "default"). + // +optional + ProtectionDomain string + // The Storage Pool associated with the protection domain (defaults to "default"). + // +optional + StoragePool string + // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). + // +optional + StorageMode string + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + VolumeName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool } // Adapts a ConfigMap into a volume. @@ -985,41 +1104,92 @@ type AzureDiskVolumeSource struct { // the items element is populated with specific mappings of keys to paths. // ConfigMap volumes support ownership management and SELinux relabeling. type ConfigMapVolumeSource struct { - LocalObjectReference `json:",inline"` + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool +} + +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +type ConfigMapProjection struct { + LocalObjectReference // If unspecified, each key-value pair in the Data field of the referenced // ConfigMap will be projected into the volume as a file whose name is the // key and content is the value. If specified, the listed keys will be // projected into the specified paths, and unlisted keys will not be // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error. Paths must be relative and may not contain - // the '..' path or start with '..'. + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. // +optional - Items []KeyToPath `json:"items,omitempty"` + Items []KeyToPath + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool +} + +// Represents a projected volume source +type ProjectedVolumeSource struct { + // list of volume projections + Sources []VolumeProjection // Mode bits to use on created files by default. Must be a value between // 0 and 0777. // Directories within the path are not affected by this setting. // This might be in conflict with other options that affect the file // mode, like fsGroup, and the result can be other mode bits set. // +optional - DefaultMode *int32 `json:"defaultMode,omitempty"` + DefaultMode *int32 +} + +// Projection that may be projected along with other supported volume types +type VolumeProjection struct { + // all types below are the supported types for projection into the same volume + + // information about the secret data to project + Secret *SecretProjection + // information about the downwardAPI data to project + DownwardAPI *DownwardAPIProjection + // information about the configMap data to project + ConfigMap *ConfigMapProjection } // Maps a string key to a path within a volume. type KeyToPath struct { // The key to project. - Key string `json:"key"` + Key string // The relative path of the file to map the key to. // May not be an absolute path. // May not contain the path element '..'. // May not start with the string '..'. - Path string `json:"path"` + Path string // Optional: mode bits to use on this file, should be a value between 0 // and 0777. If not specified, the volume defaultMode will be used. // This might be in conflict with other options that affect the file // mode, like fsGroup, and the result can be other mode bits set. // +optional - Mode *int32 `json:"mode,omitempty"` + Mode *int32 } // ContainerPort represents a network port in a single container @@ -1027,40 +1197,40 @@ type ContainerPort struct { // Optional: If specified, this must be an IANA_SVC_NAME Each named port // in a pod must have a unique name. // +optional - Name string `json:"name,omitempty"` + Name string // Optional: If specified, this must be a valid port number, 0 < x < 65536. // If HostNetwork is specified, this must match ContainerPort. // +optional - HostPort int32 `json:"hostPort,omitempty"` + HostPort int32 // Required: This must be a valid port number, 0 < x < 65536. - ContainerPort int32 `json:"containerPort"` + ContainerPort int32 // Required: Supports "TCP" and "UDP". // +optional - Protocol Protocol `json:"protocol,omitempty"` + Protocol Protocol // Optional: What host IP to bind the external port to. // +optional - HostIP string `json:"hostIP,omitempty"` + HostIP string } // VolumeMount describes a mounting of a Volume within a container. type VolumeMount struct { // Required: This must match the Name of a Volume [above]. - Name string `json:"name"` + Name string // Optional: Defaults to false (read-write). // +optional - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool // Required. Must not contain ':'. - MountPath string `json:"mountPath"` + MountPath string // Path within the volume from which the container's volume should be mounted. // Defaults to "" (volume's root). // +optional - SubPath string `json:"subPath,omitempty"` + SubPath string } // EnvVar represents an environment variable present in a Container. type EnvVar struct { // Required: This must be a C_IDENTIFIER. - Name string `json:"name"` + Name string // Optional: no more than one of the following may be specified. // Optional: Defaults to ""; variable references $(VAR_NAME) are expanded // using the previous defined environment variables in the container and @@ -1070,10 +1240,10 @@ type EnvVar struct { // references will never be expanded, regardless of whether the variable // exists or not. // +optional - Value string `json:"value,omitempty"` + Value string // Optional: Specifies a source the value of this var should come from. // +optional - ValueFrom *EnvVarSource `json:"valueFrom,omitempty"` + ValueFrom *EnvVarSource } // EnvVarSource represents a source for the value of an EnvVar. @@ -1082,17 +1252,17 @@ type EnvVarSource struct { // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, // spec.nodeName, spec.serviceAccountName, status.podIP. // +optional - FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty"` + FieldRef *ObjectFieldSelector // Selects a resource of the container: only resources limits and requests // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. // +optional - ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty"` + ResourceFieldRef *ResourceFieldSelector // Selects a key of a ConfigMap. // +optional - ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty"` + ConfigMapKeyRef *ConfigMapKeySelector // Selects a key of a secret in the pod's namespace. // +optional - SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty"` + SecretKeyRef *SecretKeySelector } // ObjectFieldSelector selects an APIVersioned field of an object. @@ -1100,65 +1270,110 @@ type ObjectFieldSelector struct { // Required: Version of the schema the FieldPath is written in terms of. // If no value is specified, it will be defaulted to the APIVersion of the // enclosing object. - APIVersion string `json:"apiVersion"` + APIVersion string // Required: Path of the field to select in the specified API version - FieldPath string `json:"fieldPath"` + FieldPath string } // ResourceFieldSelector represents container resources (cpu, memory) and their output format type ResourceFieldSelector struct { // Container name: required for volumes, optional for env vars // +optional - ContainerName string `json:"containerName,omitempty"` + ContainerName string // Required: resource to select - Resource string `json:"resource"` + Resource string // Specifies the output format of the exposed resources, defaults to "1" // +optional - Divisor resource.Quantity `json:"divisor,omitempty"` + Divisor resource.Quantity } // Selects a key from a ConfigMap. type ConfigMapKeySelector struct { // The ConfigMap to select from. - LocalObjectReference `json:",inline"` + LocalObjectReference // The key to select. - Key string `json:"key"` + Key string + // Specify whether the ConfigMap or it's key must be defined + // +optional + Optional *bool } // SecretKeySelector selects a key of a Secret. type SecretKeySelector struct { // The name of the secret in the pod's namespace to select from. - LocalObjectReference `json:",inline"` + LocalObjectReference // The key of the secret to select from. Must be a valid secret key. - Key string `json:"key"` + Key string + // Specify whether the Secret or it's key must be defined + // +optional + Optional *bool +} + +// EnvFromSource represents the source of a set of ConfigMaps +type EnvFromSource struct { + // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // +optional + Prefix string + // The ConfigMap to select from. + //+optional + ConfigMapRef *ConfigMapEnvSource + // The Secret to select from. + //+optional + SecretRef *SecretEnvSource +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +type ConfigMapEnvSource struct { + // The ConfigMap to select from. + LocalObjectReference + // Specify whether the ConfigMap must be defined + // +optional + Optional *bool +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +type SecretEnvSource struct { + // The Secret to select from. + LocalObjectReference + // Specify whether the Secret must be defined + // +optional + Optional *bool } // HTTPHeader describes a custom header to be used in HTTP probes type HTTPHeader struct { // The header field name - Name string `json:"name"` + Name string // The header field value - Value string `json:"value"` + Value string } // HTTPGetAction describes an action based on HTTP Get requests. type HTTPGetAction struct { // Optional: Path to access on the HTTP server. // +optional - Path string `json:"path,omitempty"` + Path string // Required: Name or number of the port to access on the container. // +optional - Port intstr.IntOrString `json:"port,omitempty"` + Port intstr.IntOrString // Optional: Host name to connect to, defaults to the pod IP. You // probably want to set "Host" in httpHeaders instead. // +optional - Host string `json:"host,omitempty"` + Host string // Optional: Scheme to use for connecting to the host, defaults to HTTP. // +optional - Scheme URIScheme `json:"scheme,omitempty"` + Scheme URIScheme // Optional: Custom headers to set in the request. HTTP allows repeated headers. // +optional - HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty"` + HTTPHeaders []HTTPHeader } // URIScheme identifies the scheme used for connection to a host for Get actions @@ -1175,7 +1390,7 @@ const ( type TCPSocketAction struct { // Required: Port to connect to. // +optional - Port intstr.IntOrString `json:"port,omitempty"` + Port intstr.IntOrString } // ExecAction describes a "run in container" action. @@ -1185,30 +1400,30 @@ type ExecAction struct { // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use // a shell, you need to explicitly call out to that shell. // +optional - Command []string `json:"command,omitempty"` + Command []string } // Probe describes a health check to be performed against a container to determine whether it is // alive or ready to receive traffic. type Probe struct { // The action taken to determine the health of a container - Handler `json:",inline"` + Handler // Length of time before health checking is activated. In seconds. // +optional - InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty"` + InitialDelaySeconds int32 // Length of time before health checking times out. In seconds. // +optional - TimeoutSeconds int32 `json:"timeoutSeconds,omitempty"` + TimeoutSeconds int32 // How often (in seconds) to perform the probe. // +optional - PeriodSeconds int32 `json:"periodSeconds,omitempty"` + PeriodSeconds int32 // Minimum consecutive successes for the probe to be considered successful after having failed. // Must be 1 for liveness. // +optional - SuccessThreshold int32 `json:"successThreshold,omitempty"` + SuccessThreshold int32 // Minimum consecutive failures for the probe to be considered failed after having succeeded. // +optional - FailureThreshold int32 `json:"failureThreshold,omitempty"` + FailureThreshold int32 } // PullPolicy describes a policy for if/when to pull a container image @@ -1223,6 +1438,19 @@ const ( PullIfNotPresent PullPolicy = "IfNotPresent" ) +// TerminationMessagePolicy describes how termination messages are retrieved from a container. +type TerminationMessagePolicy string + +const ( + // TerminationMessageReadFile is the default behavior and will set the container status message to + // the contents of the container's terminationMessagePath when the container exits. + TerminationMessageReadFile TerminationMessagePolicy = "File" + // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs + // for the container status message when the container exits with an error and the + // terminationMessagePath has no contents. + TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" +) + // Capability represent POSIX capabilities type type Capability string @@ -1230,81 +1458,91 @@ type Capability string type Capabilities struct { // Added capabilities // +optional - Add []Capability `json:"add,omitempty"` + Add []Capability // Removed capabilities // +optional - Drop []Capability `json:"drop,omitempty"` + Drop []Capability } // ResourceRequirements describes the compute resource requirements. type ResourceRequirements struct { // Limits describes the maximum amount of compute resources allowed. // +optional - Limits ResourceList `json:"limits,omitempty"` + Limits ResourceList // Requests describes the minimum amount of compute resources required. // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, // otherwise to an implementation-defined value // +optional - Requests ResourceList `json:"requests,omitempty"` + Requests ResourceList } // Container represents a single container that is expected to be run on the host. type Container struct { // Required: This must be a DNS_LABEL. Each container in a pod must // have a unique name. - Name string `json:"name"` + Name string // Required. - Image string `json:"image"` + Image string // Optional: The docker image's entrypoint is used if this is not provided; cannot be updated. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, // regardless of whether the variable exists or not. // +optional - Command []string `json:"command,omitempty"` + Command []string // Optional: The docker image's cmd is used if this is not provided; cannot be updated. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, // regardless of whether the variable exists or not. // +optional - Args []string `json:"args,omitempty"` + Args []string // Optional: Defaults to Docker's default. // +optional - WorkingDir string `json:"workingDir,omitempty"` + WorkingDir string // +optional - Ports []ContainerPort `json:"ports,omitempty"` + Ports []ContainerPort + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. // +optional - Env []EnvVar `json:"env,omitempty"` + EnvFrom []EnvFromSource + // +optional + Env []EnvVar // Compute resource requirements. // +optional - Resources ResourceRequirements `json:"resources,omitempty"` + Resources ResourceRequirements // +optional - VolumeMounts []VolumeMount `json:"volumeMounts,omitempty"` + VolumeMounts []VolumeMount // +optional - LivenessProbe *Probe `json:"livenessProbe,omitempty"` + LivenessProbe *Probe // +optional - ReadinessProbe *Probe `json:"readinessProbe,omitempty"` + ReadinessProbe *Probe // +optional - Lifecycle *Lifecycle `json:"lifecycle,omitempty"` + Lifecycle *Lifecycle // Required. // +optional - TerminationMessagePath string `json:"terminationMessagePath,omitempty"` + TerminationMessagePath string + // +optional + TerminationMessagePolicy TerminationMessagePolicy // Required: Policy for pulling images for this container - ImagePullPolicy PullPolicy `json:"imagePullPolicy"` + ImagePullPolicy PullPolicy // Optional: SecurityContext defines the security options the container should be run with. // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. // +optional - SecurityContext *SecurityContext `json:"securityContext,omitempty"` + SecurityContext *SecurityContext // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) // and shouldn't be used for general purpose containers. // +optional - Stdin bool `json:"stdin,omitempty"` + Stdin bool // +optional - StdinOnce bool `json:"stdinOnce,omitempty"` + StdinOnce bool // +optional - TTY bool `json:"tty,omitempty"` + TTY bool } // Handler defines a specific action that should be taken @@ -1313,14 +1551,14 @@ type Handler struct { // One and only one of the following should be specified. // Exec specifies the action to take. // +optional - Exec *ExecAction `json:"exec,omitempty"` + Exec *ExecAction // HTTPGet specifies the http request to perform. // +optional - HTTPGet *HTTPGetAction `json:"httpGet,omitempty"` + HTTPGet *HTTPGetAction // TCPSocket specifies an action involving a TCP port. // TODO: implement a realistic TCP lifecycle hook // +optional - TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty"` + TCPSocket *TCPSocketAction } // Lifecycle describes actions that the management system should take in response to container lifecycle @@ -1330,11 +1568,11 @@ type Lifecycle struct { // PostStart is called immediately after a container is created. If the handler fails, the container // is terminated and restarted. // +optional - PostStart *Handler `json:"postStart,omitempty"` + PostStart *Handler // PreStop is called immediately before a container is terminated. The reason for termination is // passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. // +optional - PreStop *Handler `json:"preStop,omitempty"` + PreStop *Handler } // The below types are used by kube_client and api_server. @@ -1354,31 +1592,31 @@ const ( type ContainerStateWaiting struct { // A brief CamelCase string indicating details about why the container is in waiting state. // +optional - Reason string `json:"reason,omitempty"` + Reason string // A human-readable message indicating details about why the container is in waiting state. // +optional - Message string `json:"message,omitempty"` + Message string } type ContainerStateRunning struct { // +optional - StartedAt unversioned.Time `json:"startedAt,omitempty"` + StartedAt metav1.Time } type ContainerStateTerminated struct { - ExitCode int32 `json:"exitCode"` + ExitCode int32 // +optional - Signal int32 `json:"signal,omitempty"` + Signal int32 // +optional - Reason string `json:"reason,omitempty"` + Reason string // +optional - Message string `json:"message,omitempty"` + Message string // +optional - StartedAt unversioned.Time `json:"startedAt,omitempty"` + StartedAt metav1.Time // +optional - FinishedAt unversioned.Time `json:"finishedAt,omitempty"` + FinishedAt metav1.Time // +optional - ContainerID string `json:"containerID,omitempty"` + ContainerID string } // ContainerState holds a possible state of container. @@ -1386,29 +1624,29 @@ type ContainerStateTerminated struct { // If none of them is specified, the default one is ContainerStateWaiting. type ContainerState struct { // +optional - Waiting *ContainerStateWaiting `json:"waiting,omitempty"` + Waiting *ContainerStateWaiting // +optional - Running *ContainerStateRunning `json:"running,omitempty"` + Running *ContainerStateRunning // +optional - Terminated *ContainerStateTerminated `json:"terminated,omitempty"` + Terminated *ContainerStateTerminated } type ContainerStatus struct { // Each container in a pod must have a unique name. - Name string `json:"name"` + Name string // +optional - State ContainerState `json:"state,omitempty"` + State ContainerState // +optional - LastTerminationState ContainerState `json:"lastState,omitempty"` + LastTerminationState ContainerState // Ready specifies whether the container has passed its readiness check. - Ready bool `json:"ready"` + Ready bool // Note that this is calculated from dead containers. But those containers are subject to // garbage collection. This value will get capped at 5 by GC. - RestartCount int32 `json:"restartCount"` - Image string `json:"image"` - ImageID string `json:"imageID"` + RestartCount int32 + Image string + ImageID string // +optional - ContainerID string `json:"containerID,omitempty"` + ContainerID string } // PodPhase is a label for the condition of a pod at the current time. @@ -1451,16 +1689,16 @@ const ( ) type PodCondition struct { - Type PodConditionType `json:"type"` - Status ConditionStatus `json:"status"` + Type PodConditionType + Status ConditionStatus // +optional - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty"` + LastProbeTime metav1.Time // +optional - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` + LastTransitionTime metav1.Time // +optional - Reason string `json:"reason,omitempty"` + Reason string // +optional - Message string `json:"message,omitempty"` + Message string } // RestartPolicy describes how the container should be restarted. @@ -1477,20 +1715,25 @@ const ( // PodList is a list of Pods. type PodList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta - Items []Pod `json:"items"` + Items []Pod } // DNSPolicy defines how a pod's DNS will be configured. type DNSPolicy string const ( + // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS + // first, if it is available, then fall back on the default + // (as determined by kubelet) DNS settings. + DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" + // DNSClusterFirst indicates that the pod should use cluster DNS - // first, if it is available, then fall back on the default (as - // determined by kubelet) DNS settings. + // first unless hostNetwork is true, if it is available, then + // fall back on the default (as determined by kubelet) DNS settings. DNSClusterFirst DNSPolicy = "ClusterFirst" // DNSDefault indicates that the pod should use the default (as @@ -1503,30 +1746,30 @@ const ( // by the node selector terms. type NodeSelector struct { //Required. A list of node selector terms. The terms are ORed. - NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms"` + NodeSelectorTerms []NodeSelectorTerm } // A null or empty node selector term matches no objects. type NodeSelectorTerm struct { //Required. A list of node selector requirements. The requirements are ANDed. - MatchExpressions []NodeSelectorRequirement `json:"matchExpressions"` + MatchExpressions []NodeSelectorRequirement } // A node selector requirement is a selector that contains values, a key, and an operator // that relates the key and values. type NodeSelectorRequirement struct { // The label key that the selector applies to. - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key"` + Key string // Represents a key's relationship to a set of values. // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - Operator NodeSelectorOperator `json:"operator"` + Operator NodeSelectorOperator // An array of string values. If the operator is In or NotIn, // the values array must be non-empty. If the operator is Exists or DoesNotExist, // the values array must be empty. If the operator is Gt or Lt, the values // array must have a single element, which will be interpreted as an integer. // This array is replaced during a strategic merge patch. // +optional - Values []string `json:"values,omitempty"` + Values []string } // A node selector operator is the set of operators that can be used in @@ -1546,13 +1789,13 @@ const ( type Affinity struct { // Describes node affinity scheduling rules for the pod. // +optional - NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty"` + NodeAffinity *NodeAffinity // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). // +optional - PodAffinity *PodAffinity `json:"podAffinity,omitempty"` + PodAffinity *PodAffinity // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). // +optional - PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty"` + PodAntiAffinity *PodAntiAffinity } // Pod affinity is a group of inter pod affinity scheduling rules. @@ -1566,7 +1809,7 @@ type PodAffinity struct { // When there are multiple elements, the lists of nodes corresponding to each // podAffinityTerm are intersected, i.e. all terms must be satisfied. // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm // If the affinity requirements specified by this field are not met at // scheduling time, the pod will not be scheduled onto the node. // If the affinity requirements specified by this field cease to be met @@ -1575,7 +1818,7 @@ type PodAffinity struct { // When there are multiple elements, the lists of nodes corresponding to each // podAffinityTerm are intersected, i.e. all terms must be satisfied. // +optional - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm // The scheduler will prefer to schedule pods to nodes that satisfy // the affinity expressions specified by this field, but it may choose // a node that violates one or more of the expressions. The node that is @@ -1586,7 +1829,7 @@ type PodAffinity struct { // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the // node(s) with the highest sum are the most preferred. // +optional - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm } // Pod anti affinity is a group of inter pod anti affinity scheduling rules. @@ -1600,7 +1843,7 @@ type PodAntiAffinity struct { // When there are multiple elements, the lists of nodes corresponding to each // podAffinityTerm are intersected, i.e. all terms must be satisfied. // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm // If the anti-affinity requirements specified by this field are not met at // scheduling time, the pod will not be scheduled onto the node. // If the anti-affinity requirements specified by this field cease to be met @@ -1609,7 +1852,7 @@ type PodAntiAffinity struct { // When there are multiple elements, the lists of nodes corresponding to each // podAffinityTerm are intersected, i.e. all terms must be satisfied. // +optional - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm // The scheduler will prefer to schedule pods to nodes that satisfy // the anti-affinity expressions specified by this field, but it may choose // a node that violates one or more of the expressions. The node that is @@ -1620,16 +1863,16 @@ type PodAntiAffinity struct { // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the // node(s) with the highest sum are the most preferred. // +optional - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm } // The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) type WeightedPodAffinityTerm struct { // weight associated with matching the corresponding podAffinityTerm, // in the range 1-100. - Weight int32 `json:"weight"` + Weight int32 // Required. A pod affinity term, associated with the corresponding weight. - PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm"` + PodAffinityTerm PodAffinityTerm } // Defines a set of pods (namely those matching the labelSelector @@ -1641,12 +1884,10 @@ type WeightedPodAffinityTerm struct { type PodAffinityTerm struct { // A label query over a set of resources, in this case pods. // +optional - LabelSelector *unversioned.LabelSelector `json:"labelSelector,omitempty"` + LabelSelector *metav1.LabelSelector // namespaces specifies which namespaces the labelSelector applies to (matches against); - // nil list means "this pod's namespace," empty list means "all namespaces" - // The json tag here is not "omitempty" since we need to distinguish nil and empty. - // See https://golang.org/pkg/encoding/json/#Marshal for more details. - Namespaces []string `json:"namespaces"` + // null or empty list means "this pod's namespace" + Namespaces []string // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching // the labelSelector in the specified namespaces, where co-located is defined as running on a node // whose value of the label with key topologyKey matches that of any node on which any of the @@ -1655,7 +1896,7 @@ type PodAffinityTerm struct { // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. // +optional - TopologyKey string `json:"topologyKey,omitempty"` + TopologyKey string } // Node affinity is a group of node affinity scheduling rules. @@ -1667,7 +1908,7 @@ type NodeAffinity struct { // at some point during pod execution (e.g. due to an update), the system // will try to eventually evict the pod from its node. // +optional - // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector // If the affinity requirements specified by this field are not met at // scheduling time, the pod will not be scheduled onto the node. @@ -1675,7 +1916,7 @@ type NodeAffinity struct { // at some point during pod execution (e.g. due to an update), the system // may or may not try to eventually evict the pod from its node. // +optional - RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` + RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector // The scheduler will prefer to schedule pods to nodes that satisfy // the affinity expressions specified by this field, but it may choose // a node that violates one or more of the expressions. The node that is @@ -1686,30 +1927,34 @@ type NodeAffinity struct { // "weight" to the sum if the node matches the corresponding matchExpressions; the // node(s) with the highest sum are the most preferred. // +optional - PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` + PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm } // An empty preferred scheduling term matches all objects with implicit weight 0 // (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). type PreferredSchedulingTerm struct { // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - Weight int32 `json:"weight"` + Weight int32 // A node selector term, associated with the corresponding weight. - Preference NodeSelectorTerm `json:"preference"` + Preference NodeSelectorTerm } // The node this Taint is attached to has the effect "effect" on // any pod that that does not tolerate the Taint. type Taint struct { // Required. The taint key to be applied to a node. - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key"` + Key string // Required. The taint value corresponding to the taint key. // +optional - Value string `json:"value,omitempty"` + Value string // Required. The effect of the taint on pods // that do not tolerate the taint. - // Valid effects are NoSchedule and PreferNoSchedule. - Effect TaintEffect `json:"effect"` + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + Effect TaintEffect + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + TimeAdded metav1.Time } type TaintEffect string @@ -1725,41 +1970,42 @@ const ( // onto the node entirely. Enforced by the scheduler. TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // Do not allow new pods to schedule onto the node unless they tolerate the taint, - // do not allow pods to start on Kubelet unless they tolerate the taint, - // but allow all already-running pods to continue running. - // Enforced by the scheduler and Kubelet. + // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to + // Kubelet without going through the scheduler to start. + // Enforced by Kubelet and the scheduler. // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // Do not allow new pods to schedule onto the node unless they tolerate the taint, - // do not allow pods to start on Kubelet unless they tolerate the taint, - // and evict any already-running pods that do not tolerate the taint. - // Enforced by the scheduler and Kubelet. - // TaintEffectNoScheduleNoAdmitNoExecute = "NoScheduleNoAdmitNoExecute" + // Evict any already-running pods that do not tolerate the taint. + // Currently enforced by NodeController. + TaintEffectNoExecute TaintEffect = "NoExecute" ) // The pod this Toleration is attached to tolerates any taint that matches // the triple using the matching operator . type Toleration struct { - // Required. Key is the taint key that the toleration applies to. + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. // +optional - Key string `json:"key,omitempty" patchStrategy:"merge" patchMergeKey:"key"` - // operator represents a key's relationship to the value. + Key string + // Operator represents a key's relationship to the value. // Valid operators are Exists and Equal. Defaults to Equal. // Exists is equivalent to wildcard for value, so that a pod can // tolerate all taints of a particular category. // +optional - Operator TolerationOperator `json:"operator,omitempty"` + Operator TolerationOperator // Value is the taint value the toleration matches to. // If the operator is Exists, the value should be empty, otherwise just a regular string. // +optional - Value string `json:"value,omitempty"` + Value string // Effect indicates the taint effect to match. Empty means match all taint effects. - // When specified, allowed values are NoSchedule and PreferNoSchedule. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + // +optional + Effect TaintEffect + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. // +optional - Effect TaintEffect `json:"effect,omitempty"` - // TODO: For forgiveness (#1574), we'd eventually add at least a grace period - // here, and possibly an occurrence threshold and period. + TolerationSeconds *int64 } // A toleration operator is the set of operators that can be used in a toleration. @@ -1772,13 +2018,13 @@ const ( // PodSpec is a description of a pod type PodSpec struct { - Volumes []Volume `json:"volumes"` + Volumes []Volume // List of initialization containers belonging to the pod. - InitContainers []Container `json:"-"` + InitContainers []Container // List of containers belonging to the pod. - Containers []Container `json:"containers"` + Containers []Container // +optional - RestartPolicy RestartPolicy `json:"restartPolicy,omitempty"` + RestartPolicy RestartPolicy // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. // Value must be non-negative integer. The value zero indicates delete immediately. // If this value is nil, the default grace period will be used instead. @@ -1786,52 +2032,65 @@ type PodSpec struct { // a termination signal and the time when the processes are forcibly halted with a kill signal. // Set this value longer than the expected cleanup time for your process. // +optional - TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` + TerminationGracePeriodSeconds *int64 // Optional duration in seconds relative to the StartTime that the pod may be active on a node // before the system actively tries to terminate the pod; value must be positive integer // +optional - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` + ActiveDeadlineSeconds *int64 // Required: Set DNS policy. // +optional - DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty"` + DNSPolicy DNSPolicy // NodeSelector is a selector which must be true for the pod to fit on a node // +optional - NodeSelector map[string]string `json:"nodeSelector,omitempty"` + NodeSelector map[string]string // ServiceAccountName is the name of the ServiceAccount to use to run this pod // The pod will be allowed to use secrets referenced by the ServiceAccount - ServiceAccountName string `json:"serviceAccountName"` + ServiceAccountName string + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + AutomountServiceAccountToken *bool // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, // the scheduler simply schedules this pod onto that node, assuming that it fits resource // requirements. // +optional - NodeName string `json:"nodeName,omitempty"` + NodeName string // SecurityContext holds pod-level security attributes and common container settings. // Optional: Defaults to empty. See type description for default values of each field. // +optional - SecurityContext *PodSecurityContext `json:"securityContext,omitempty"` + SecurityContext *PodSecurityContext // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. // If specified, these secrets will be passed to individual puller implementations for them to use. For example, // in the case of docker, only DockerConfig type secrets are honored. // +optional - ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty"` + ImagePullSecrets []LocalObjectReference // Specifies the hostname of the Pod. // If not specified, the pod's hostname will be set to a system-defined value. // +optional - Hostname string `json:"hostname,omitempty"` + Hostname string // If specified, the fully qualified Pod hostname will be "...svc.". // If not specified, the pod will not have a domainname at all. // +optional - Subdomain string `json:"subdomain,omitempty"` + Subdomain string + // If specified, the pod's scheduling constraints + // +optional + Affinity *Affinity + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string + // If specified, the pod's tolerations. + // +optional + Tolerations []Toleration } // Sysctl defines a kernel parameter to be set type Sysctl struct { // Name of a property to set - Name string `json:"name"` + Name string // Value of a property to set - Value string `json:"value"` + Value string } // PodSecurityContext holds pod-level security attributes and common container settings. @@ -1843,31 +2102,31 @@ type PodSecurityContext struct { // Optional: Default to false // +k8s:conversion-gen=false // +optional - HostNetwork bool `json:"hostNetwork,omitempty"` + HostNetwork bool // Use the host's pid namespace. // Optional: Default to false. // +k8s:conversion-gen=false // +optional - HostPID bool `json:"hostPID,omitempty"` + HostPID bool // Use the host's ipc namespace. // Optional: Default to false. // +k8s:conversion-gen=false // +optional - HostIPC bool `json:"hostIPC,omitempty"` + HostIPC bool // The SELinux context to be applied to all containers. // If unspecified, the container runtime will allocate a random SELinux context for each // container. May also be set in SecurityContext. If set in // both SecurityContext and PodSecurityContext, the value specified in SecurityContext // takes precedence for that container. // +optional - SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty"` + SELinuxOptions *SELinuxOptions // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in SecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence // for that container. // +optional - RunAsUser *int64 `json:"runAsUser,omitempty"` + RunAsUser *int64 // Indicates that the container must run as a non-root user. // If true, the Kubelet will validate the image at runtime to ensure that it // does not run as UID 0 (root) and fail to start the container if it does. @@ -1875,12 +2134,12 @@ type PodSecurityContext struct { // May also be set in SecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional - RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"` + RunAsNonRoot *bool // A list of groups applied to the first process run in each container, in addition // to the container's primary GID. If unspecified, no groups will be added to // any container. // +optional - SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` + SupplementalGroups []int64 // A special supplemental group that applies to all containers in a pod. // Some volume types allow the Kubelet to change the ownership of that volume // to be owned by the pod: @@ -1891,107 +2150,121 @@ type PodSecurityContext struct { // // If unset, the Kubelet will not modify the ownership and permissions of any volume. // +optional - FSGroup *int64 `json:"fsGroup,omitempty"` + FSGroup *int64 } +// PodQOSClass defines the supported qos classes of Pods. +type PodQOSClass string + +const ( + // PodQOSGuaranteed is the Guaranteed qos class. + PodQOSGuaranteed PodQOSClass = "Guaranteed" + // PodQOSBurstable is the Burstable qos class. + PodQOSBurstable PodQOSClass = "Burstable" + // PodQOSBestEffort is the BestEffort qos class. + PodQOSBestEffort PodQOSClass = "BestEffort" +) + // PodStatus represents information about the status of a pod. Status may trail the actual // state of a system. type PodStatus struct { // +optional - Phase PodPhase `json:"phase,omitempty"` + Phase PodPhase // +optional - Conditions []PodCondition `json:"conditions,omitempty"` + Conditions []PodCondition // A human readable message indicating details about why the pod is in this state. // +optional - Message string `json:"message,omitempty"` + Message string // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk' // +optional - Reason string `json:"reason,omitempty"` + Reason string // +optional - HostIP string `json:"hostIP,omitempty"` + HostIP string // +optional - PodIP string `json:"podIP,omitempty"` + PodIP string // Date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. // +optional - StartTime *unversioned.Time `json:"startTime,omitempty"` + StartTime *metav1.Time + // +optional + QOSClass PodQOSClass // The list has one entry per init container in the manifest. The most recent successful // init container will have ready = true, the most recently started container will have // startTime set. // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses - InitContainerStatuses []ContainerStatus `json:"-"` + InitContainerStatuses []ContainerStatus // The list has one entry per container in the manifest. Each entry is // currently the output of `docker inspect`. This output format is *not* // final and should not be relied upon. // TODO: Make real decisions about what our info should look like. Re-enable fuzz test // when we have done this. // +optional - ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty"` + ContainerStatuses []ContainerStatus } // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded type PodStatusResult struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Status represents the current information about a pod. This data may not be up // to date. // +optional - Status PodStatus `json:"status,omitempty"` + Status PodStatus } // +genclient=true // Pod is a collection of containers, used as either input (create, update) or as output (list, get). type Pod struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Spec defines the behavior of a pod. // +optional - Spec PodSpec `json:"spec,omitempty"` + Spec PodSpec // Status represents the current information about a pod. This data may not be up // to date. // +optional - Status PodStatus `json:"status,omitempty"` + Status PodStatus } // PodTemplateSpec describes the data a pod should have when created from a template type PodTemplateSpec struct { // Metadata of the pods created from this template. // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Spec defines the behavior of a pod. // +optional - Spec PodSpec `json:"spec,omitempty"` + Spec PodSpec } // +genclient=true // PodTemplate describes a template for creating copies of a predefined pod. type PodTemplate struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Template defines the pods that will be created from this pod template // +optional - Template PodTemplateSpec `json:"template,omitempty"` + Template PodTemplateSpec } // PodTemplateList is a list of PodTemplates. type PodTemplateList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta - Items []PodTemplate `json:"items"` + Items []PodTemplate } // ReplicationControllerSpec is the specification of a replication controller. @@ -1999,55 +2272,55 @@ type PodTemplateList struct { // a TemplateRef or a Template set. type ReplicationControllerSpec struct { // Replicas is the number of desired replicas. - Replicas int32 `json:"replicas"` + Replicas int32 // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty"` + MinReadySeconds int32 // Selector is a label query over pods that should match the Replicas count. - Selector map[string]string `json:"selector"` + Selector map[string]string // TemplateRef is a reference to an object that describes the pod that will be created if // insufficient replicas are detected. This reference is ignored if a Template is set. // Must be set before converting to a versioned API object // +optional - //TemplateRef *ObjectReference `json:"templateRef,omitempty"` + //TemplateRef *ObjectReference // Template is the object that describes the pod that will be created if // insufficient replicas are detected. Internally, this takes precedence over a // TemplateRef. // +optional - Template *PodTemplateSpec `json:"template,omitempty"` + Template *PodTemplateSpec } // ReplicationControllerStatus represents the current status of a replication // controller. type ReplicationControllerStatus struct { // Replicas is the number of actual replicas. - Replicas int32 `json:"replicas"` + Replicas int32 // The number of pods that have labels matching the labels of the pod template of the replication controller. // +optional - FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty"` + FullyLabeledReplicas int32 // The number of ready replicas for this replication controller. // +optional - ReadyReplicas int32 `json:"readyReplicas,omitempty"` + ReadyReplicas int32 // The number of available replicas (ready for at least minReadySeconds) for this replication controller. // +optional - AvailableReplicas int32 `json:"availableReplicas,omitempty"` + AvailableReplicas int32 // ObservedGeneration is the most recent generation observed by the controller. // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty"` + ObservedGeneration int64 // Represents the latest available observations of a replication controller's current state. // +optional - Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + Conditions []ReplicationControllerCondition } type ReplicationControllerConditionType string @@ -2063,45 +2336,45 @@ const ( // ReplicationControllerCondition describes the state of a replication controller at a certain point. type ReplicationControllerCondition struct { // Type of replication controller condition. - Type ReplicationControllerConditionType `json:"type"` + Type ReplicationControllerConditionType // Status of the condition, one of True, False, Unknown. - Status ConditionStatus `json:"status"` + Status ConditionStatus // The last time the condition transitioned from one status to another. // +optional - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` + LastTransitionTime metav1.Time // The reason for the condition's last transition. // +optional - Reason string `json:"reason,omitempty"` + Reason string // A human readable message indicating details about the transition. // +optional - Message string `json:"message,omitempty"` + Message string } // +genclient=true // ReplicationController represents the configuration of a replication controller. type ReplicationController struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Spec defines the desired behavior of this replication controller. // +optional - Spec ReplicationControllerSpec `json:"spec,omitempty"` + Spec ReplicationControllerSpec // Status is the current status of this replication controller. This data may be // out of date by some window of time. // +optional - Status ReplicationControllerStatus `json:"status,omitempty"` + Status ReplicationControllerStatus } // ReplicationControllerList is a collection of replication controllers. type ReplicationControllerList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta - Items []ReplicationController `json:"items"` + Items []ReplicationController } const ( @@ -2112,11 +2385,11 @@ const ( // ServiceList holds a list of services. type ServiceList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta - Items []Service `json:"items"` + Items []Service } // Session Affinity Type string @@ -2158,7 +2431,7 @@ type ServiceStatus struct { // LoadBalancer contains the current status of the load-balancer, // if one is present. // +optional - LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty"` + LoadBalancer LoadBalancerStatus } // LoadBalancerStatus represents the status of a load-balancer @@ -2166,7 +2439,7 @@ type LoadBalancerStatus struct { // Ingress is a list containing ingress points for the load-balancer; // traffic intended for the service should be sent to these ingress points. // +optional - Ingress []LoadBalancerIngress `json:"ingress,omitempty"` + Ingress []LoadBalancerIngress } // LoadBalancerIngress represents the status of a load-balancer ingress point: @@ -2175,12 +2448,12 @@ type LoadBalancerIngress struct { // IP is set for load-balancer ingress points that are IP based // (typically GCE or OpenStack load-balancers) // +optional - IP string `json:"ip,omitempty"` + IP string // Hostname is set for load-balancer ingress points that are DNS based // (typically AWS load-balancers) // +optional - Hostname string `json:"hostname,omitempty"` + Hostname string } // ServiceSpec describes the attributes that a user creates on a service @@ -2200,10 +2473,10 @@ type ServiceSpec struct { // to the clusterIP. // More info: http://kubernetes.io/docs/user-guide/services#overview // +optional - Type ServiceType `json:"type,omitempty"` + Type ServiceType // Required: The list of ports that are exposed by this service. - Ports []ServicePort `json:"ports"` + Ports []ServicePort // Route service traffic to pods with label keys and values matching this // selector. If empty or not present, the service is assumed to have an @@ -2211,7 +2484,7 @@ type ServiceSpec struct { // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. // Ignored if type is ExternalName. // More info: http://kubernetes.io/docs/user-guide/services#overview - Selector map[string]string `json:"selector"` + Selector map[string]string // ClusterIP is the IP address of the service and is usually assigned // randomly by the master. If an address is specified manually and is not in @@ -2223,7 +2496,7 @@ type ServiceSpec struct { // type is ExternalName. // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies // +optional - ClusterIP string `json:"clusterIP,omitempty"` + ClusterIP string // ExternalName is the external reference that kubedns or equivalent will // return as a CNAME record for this service. No proxying will be involved. @@ -2233,7 +2506,7 @@ type ServiceSpec struct { // ExternalIPs are used by external load balancers, or can be set by // users to handle external traffic that arrives at a node. // +optional - ExternalIPs []string `json:"externalIPs,omitempty"` + ExternalIPs []string // Only applies to Service Type: LoadBalancer // LoadBalancer will get created with the IP specified in this field. @@ -2241,17 +2514,17 @@ type ServiceSpec struct { // the loadBalancerIP when a load balancer is created. // This field will be ignored if the cloud-provider does not support the feature. // +optional - LoadBalancerIP string `json:"loadBalancerIP,omitempty"` + LoadBalancerIP string // Optional: Supports "ClientIP" and "None". Used to maintain session affinity. // +optional - SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty"` + SessionAffinity ServiceAffinity // Optional: If specified and supported by the platform, this will restrict traffic through the cloud-provider // load-balancer will be restricted to the specified client IPs. This field will be ignored if the // cloud-provider does not support the feature." // +optional - LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"` + LoadBalancerSourceRanges []string } type ServicePort struct { @@ -2259,13 +2532,13 @@ type ServicePort struct { // name of this port within the service. This must be a DNS_LABEL. // All ports within a ServiceSpec must have unique names. This maps to // the 'Name' field in EndpointPort objects. - Name string `json:"name"` + Name string // The IP protocol for this port. Supports "TCP" and "UDP". - Protocol Protocol `json:"protocol"` + Protocol Protocol // The port that will be exposed on the service. - Port int32 `json:"port"` + Port int32 // Optional: The target port on pods selected by this service. If this // is a string, it will be looked up as a named port in the target @@ -2273,11 +2546,11 @@ type ServicePort struct { // of the 'port' field is used (an identity map). // This field is ignored for services with clusterIP=None, and should be // omitted or set equal to the 'port' field. - TargetPort intstr.IntOrString `json:"targetPort"` + TargetPort intstr.IntOrString // The port on each node on which this service is exposed. // Default is to auto-allocate a port if the ServiceType of this Service requires one. - NodePort int32 `json:"nodePort"` + NodePort int32 } // +genclient=true @@ -2286,17 +2559,17 @@ type ServicePort struct { // (for example 3306) that the proxy listens on, and the selector that determines which pods // will answer requests sent through the proxy. type Service struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Spec defines the behavior of a service. // +optional - Spec ServiceSpec `json:"spec,omitempty"` + Spec ServiceSpec // Status represents the current status of a service. // +optional - Status ServiceStatus `json:"status,omitempty"` + Status ServiceStatus } // +genclient=true @@ -2306,27 +2579,32 @@ type Service struct { // * a principal that can be authenticated and authorized // * a set of secrets type ServiceAccount struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount - Secrets []ObjectReference `json:"secrets"` + Secrets []ObjectReference // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. // +optional - ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty"` + ImagePullSecrets []LocalObjectReference + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + AutomountServiceAccountToken *bool } // ServiceAccountList is a list of ServiceAccount objects type ServiceAccountList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta - Items []ServiceAccount `json:"items"` + Items []ServiceAccount } // +genclient=true @@ -2344,9 +2622,9 @@ type ServiceAccountList struct { // }, // ] type Endpoints struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // The set of all endpoints is the union of all subsets. Subsets []EndpointSubset @@ -2378,10 +2656,10 @@ type EndpointAddress struct { // Optional: Hostname of this endpoint // Meant to be used by DNS servers etc. // +optional - Hostname string `json:"hostname,omitempty"` + Hostname string // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. // +optional - NodeName *string `json:"nodeName,omitempty"` + NodeName *string // Optional: The kubernetes object related to the entry point. TargetRef *ObjectReference } @@ -2401,11 +2679,11 @@ type EndpointPort struct { // EndpointsList is a list of endpoints. type EndpointsList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta - Items []Endpoints `json:"items"` + Items []Endpoints } // NodeSpec describes the attributes that a node is created with. @@ -2413,20 +2691,24 @@ type NodeSpec struct { // PodCIDR represents the pod IP range assigned to the node // Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs. // +optional - PodCIDR string `json:"podCIDR,omitempty"` + PodCIDR string // External ID of the node assigned by some machine database (e.g. a cloud provider) // +optional - ExternalID string `json:"externalID,omitempty"` + ExternalID string // ID of the node assigned by the cloud provider // Note: format is "://" // +optional - ProviderID string `json:"providerID,omitempty"` + ProviderID string // Unschedulable controls node schedulability of new pods. By default node is schedulable. // +optional - Unschedulable bool `json:"unschedulable,omitempty"` + Unschedulable bool + + // If specified, the node's taints. + // +optional + Taints []Taint } // DaemonEndpoint contains information about a single Daemon endpoint. @@ -2438,14 +2720,14 @@ type DaemonEndpoint struct { */ // Port number of the given endpoint. - Port int32 `json:"Port"` + Port int32 } // NodeDaemonEndpoints lists ports opened by daemons running on the Node. type NodeDaemonEndpoints struct { // Endpoint on which Kubelet is listening. // +optional - KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty"` + KubeletEndpoint DaemonEndpoint } // NodeSystemInfo is a set of ids/uuids to uniquely identify the node. @@ -2453,61 +2735,61 @@ type NodeSystemInfo struct { // MachineID reported by the node. For unique machine identification // in the cluster this field is prefered. Learn more from man(5) // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html - MachineID string `json:"machineID"` + MachineID string // SystemUUID reported by the node. For unique machine identification // MachineID is prefered. This field is specific to Red Hat hosts // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html - SystemUUID string `json:"systemUUID"` + SystemUUID string // Boot ID reported by the node. - BootID string `json:"bootID"` + BootID string // Kernel Version reported by the node. - KernelVersion string `json:"kernelVersion"` + KernelVersion string // OS Image reported by the node. - OSImage string `json:"osImage"` + OSImage string // ContainerRuntime Version reported by the node. - ContainerRuntimeVersion string `json:"containerRuntimeVersion"` + ContainerRuntimeVersion string // Kubelet Version reported by the node. - KubeletVersion string `json:"kubeletVersion"` + KubeletVersion string // KubeProxy Version reported by the node. - KubeProxyVersion string `json:"kubeProxyVersion"` + KubeProxyVersion string // The Operating System reported by the node - OperatingSystem string `json:"operatingSystem"` + OperatingSystem string // The Architecture reported by the node - Architecture string `json:"architecture"` + Architecture string } // NodeStatus is information about the current status of a node. type NodeStatus struct { // Capacity represents the total resources of a node. // +optional - Capacity ResourceList `json:"capacity,omitempty"` + Capacity ResourceList // Allocatable represents the resources of a node that are available for scheduling. // +optional - Allocatable ResourceList `json:"allocatable,omitempty"` + Allocatable ResourceList // NodePhase is the current lifecycle phase of the node. // +optional - Phase NodePhase `json:"phase,omitempty"` + Phase NodePhase // Conditions is an array of current node conditions. // +optional - Conditions []NodeCondition `json:"conditions,omitempty"` + Conditions []NodeCondition // Queried from cloud provider, if available. // +optional - Addresses []NodeAddress `json:"addresses,omitempty"` + Addresses []NodeAddress // Endpoints of daemons running on the Node. // +optional - DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty"` + DaemonEndpoints NodeDaemonEndpoints // Set of ids/uuids to uniquely identify the node. // +optional - NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty"` + NodeInfo NodeSystemInfo // List of container images on this node // +optional - Images []ContainerImage `json:"images,omitempty"` + Images []ContainerImage // List of attachable volumes in use (mounted) by the node. // +optional - VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty"` + VolumesInUse []UniqueVolumeName // List of volumes that are attached to the node. // +optional - VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty"` + VolumesAttached []AttachedVolume } type UniqueVolumeName string @@ -2515,10 +2797,10 @@ type UniqueVolumeName string // AttachedVolume describes a volume attached to a node type AttachedVolume struct { // Name of the attached volume - Name UniqueVolumeName `json:"name"` + Name UniqueVolumeName // DevicePath represents the device path where the volume should be available - DevicePath string `json:"devicePath"` + DevicePath string } // AvoidPods describes pods that should avoid this node. This is the value for a @@ -2528,22 +2810,22 @@ type AvoidPods struct { // Bounded-sized list of signatures of pods that should avoid this node, sorted // in timestamp order from oldest to newest. Size of the slice is unspecified. // +optional - PreferAvoidPods []PreferAvoidPodsEntry `json:"preferAvoidPods,omitempty"` + PreferAvoidPods []PreferAvoidPodsEntry } // Describes a class of pods that should avoid this node. type PreferAvoidPodsEntry struct { // The class of pods. - PodSignature PodSignature `json:"podSignature"` + PodSignature PodSignature // Time at which this entry was added to the list. // +optional - EvictionTime unversioned.Time `json:"evictionTime,omitempty"` + EvictionTime metav1.Time // (brief) reason why this entry was added to the list. // +optional - Reason string `json:"reason,omitempty"` + Reason string // Human readable message indicating why this entry was added to the list. // +optional - Message string `json:"message,omitempty"` + Message string } // Describes the class of pods that should avoid this node. @@ -2551,16 +2833,16 @@ type PreferAvoidPodsEntry struct { type PodSignature struct { // Reference to controller whose pods should avoid this node. // +optional - PodController *OwnerReference `json:"podController,omitempty"` + PodController *metav1.OwnerReference } // Describe a container image type ContainerImage struct { // Names by which this image is known. - Names []string `json:"names"` + Names []string // The size of the image in bytes. // +optional - SizeBytes int64 `json:"sizeBytes,omitempty"` + SizeBytes int64 } type NodePhase string @@ -2595,16 +2877,16 @@ const ( ) type NodeCondition struct { - Type NodeConditionType `json:"type"` - Status ConditionStatus `json:"status"` + Type NodeConditionType + Status ConditionStatus // +optional - LastHeartbeatTime unversioned.Time `json:"lastHeartbeatTime,omitempty"` + LastHeartbeatTime metav1.Time // +optional - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` + LastTransitionTime metav1.Time // +optional - Reason string `json:"reason,omitempty"` + Reason string // +optional - Message string `json:"message,omitempty"` + Message string } type NodeAddressType string @@ -2617,11 +2899,13 @@ const ( NodeHostName NodeAddressType = "Hostname" NodeExternalIP NodeAddressType = "ExternalIP" NodeInternalIP NodeAddressType = "InternalIP" + NodeExternalDNS NodeAddressType = "ExternalDNS" + NodeInternalDNS NodeAddressType = "InternalDNS" ) type NodeAddress struct { - Type NodeAddressType `json:"type"` - Address string `json:"address"` + Type NodeAddressType + Address string } // NodeResources is an object for conveying resource information about a node. @@ -2629,7 +2913,7 @@ type NodeAddress struct { type NodeResources struct { // Capacity represents the available resources of a node // +optional - Capacity ResourceList `json:"capacity,omitempty"` + Capacity ResourceList } // ResourceName is the name identifying various resources in a ResourceList. @@ -2666,26 +2950,26 @@ type ResourceList map[ResourceName]resource.Quantity // Node is a worker node in Kubernetes // The name of the node according to etcd is in ObjectMeta.Name. type Node struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Spec defines the behavior of a node. // +optional - Spec NodeSpec `json:"spec,omitempty"` + Spec NodeSpec // Status describes the current status of a Node // +optional - Status NodeStatus `json:"status,omitempty"` + Status NodeStatus } // NodeList is a list of nodes. type NodeList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta - Items []Node `json:"items"` + Items []Node } // NamespaceSpec describes the attributes on a Namespace @@ -2694,19 +2978,20 @@ type NamespaceSpec struct { Finalizers []FinalizerName } +// FinalizerName is the name identifying a finalizer during namespace lifecycle. type FinalizerName string -// These are internal finalizer values to Kubernetes, must be qualified name unless defined here +// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or +// in metav1. const ( FinalizerKubernetes FinalizerName = "kubernetes" - FinalizerOrphan string = "orphan" ) // NamespaceStatus is information about the current status of a Namespace. type NamespaceStatus struct { // Phase is the current lifecycle phase of the namespace. // +optional - Phase NamespacePhase `json:"phase,omitempty"` + Phase NamespacePhase } type NamespacePhase string @@ -2725,80 +3010,96 @@ const ( // A namespace provides a scope for Names. // Use of multiple namespaces is optional type Namespace struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Spec defines the behavior of the Namespace. // +optional - Spec NamespaceSpec `json:"spec,omitempty"` + Spec NamespaceSpec // Status describes the current status of a Namespace // +optional - Status NamespaceStatus `json:"status,omitempty"` + Status NamespaceStatus } // NamespaceList is a list of Namespaces. type NamespaceList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta - Items []Namespace `json:"items"` + Items []Namespace } // Binding ties one object to another - for example, a pod is bound to a node by a scheduler. type Binding struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // ObjectMeta describes the object that is being bound. // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Target is the object to bind to. - Target ObjectReference `json:"target"` + Target ObjectReference } // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. type Preconditions struct { // Specifies the target UID. // +optional - UID *types.UID `json:"uid,omitempty"` + UID *types.UID } +// DeletionPropagation decides whether and how garbage collection will be performed. +type DeletionPropagation string + +const ( + // Orphans the dependents. + DeletePropagationOrphan DeletionPropagation = "Orphan" + // Deletes the object from the key-value store, the garbage collector will delete the dependents in the background. + DeletePropagationBackground DeletionPropagation = "Background" + // The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store. + // API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp. + // This policy is cascading, i.e., the dependents will be deleted with Foreground. + DeletePropagationForeground DeletionPropagation = "Foreground" +) + // DeleteOptions may be provided when deleting an API object +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. type DeleteOptions struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // Optional duration in seconds before the object should be deleted. Value must be non-negative integer. // The value zero indicates delete immediately. If this value is nil, the default grace period for the // specified type will be used. // +optional - GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty"` + GracePeriodSeconds *int64 // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be // returned. // +optional - Preconditions *Preconditions `json:"preconditions,omitempty"` + Preconditions *Preconditions + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. // Should the dependent objects be orphaned. If true/false, the "orphan" // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. // +optional - OrphanDependents *bool `json:"orphanDependents,omitempty"` -} + OrphanDependents *bool -// ExportOptions is the query options to the standard REST get call. -type ExportOptions struct { - unversioned.TypeMeta `json:",inline"` - // Should this value be exported. Export strips fields that a user can not specify. - Export bool `json:"export"` - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - Exact bool `json:"exact"` + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // +optional + PropagationPolicy *DeletionPropagation } // ListOptions is the query options to a standard REST list call, and has future support for // watch calls. +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. type ListOptions struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // A selector based on labels LabelSelector labels.Selector @@ -2806,11 +3107,12 @@ type ListOptions struct { FieldSelector fields.Selector // If true, watch for changes to this list Watch bool - // For watch, it's the resource version to watch. - // For list, + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: // - if unset, then the result is returned from remote storage based on quorum-read flag; // - if it's 0, then we simply return what we currently have in cache, no guarantee; - // - if set to non zero, then the result is as fresh as given rv. + // - if set to non zero, then the result is at least as fresh as given rv. ResourceVersion string // Timeout for the list/watch call. TimeoutSeconds *int64 @@ -2818,7 +3120,7 @@ type ListOptions struct { // PodLogOptions is the query options for a Pod's logs REST call type PodLogOptions struct { - unversioned.TypeMeta + metav1.TypeMeta // Container for which to return logs Container string @@ -2835,7 +3137,7 @@ type PodLogOptions struct { // precedes the time a pod was started, only logs since the pod start will be returned. // If this value is in the future, no logs will be returned. // Only one of sinceSeconds or sinceTime may be specified. - SinceTime *unversioned.Time + SinceTime *metav1.Time // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line // of log output. Timestamps bool @@ -2851,32 +3153,32 @@ type PodLogOptions struct { // PodAttachOptions is the query options to a Pod's remote attach call // TODO: merge w/ PodExecOptions below for stdin, stdout, etc type PodAttachOptions struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // Stdin if true indicates that stdin is to be redirected for the attach call // +optional - Stdin bool `json:"stdin,omitempty"` + Stdin bool // Stdout if true indicates that stdout is to be redirected for the attach call // +optional - Stdout bool `json:"stdout,omitempty"` + Stdout bool // Stderr if true indicates that stderr is to be redirected for the attach call // +optional - Stderr bool `json:"stderr,omitempty"` + Stderr bool // TTY if true indicates that a tty will be allocated for the attach call // +optional - TTY bool `json:"tty,omitempty"` + TTY bool // Container to attach to. // +optional - Container string `json:"container,omitempty"` + Container string } // PodExecOptions is the query options to a Pod's remote exec call type PodExecOptions struct { - unversioned.TypeMeta + metav1.TypeMeta // Stdin if true indicates that stdin is to be redirected for the exec call Stdin bool @@ -2897,9 +3199,18 @@ type PodExecOptions struct { Command []string } +// PodPortForwardOptions is the query options to a Pod's port forward call +type PodPortForwardOptions struct { + metav1.TypeMeta + + // The list of ports to forward + // +optional + Ports []int32 +} + // PodProxyOptions is the query options to a Pod's proxy call type PodProxyOptions struct { - unversioned.TypeMeta + metav1.TypeMeta // Path is the URL path to use for the current proxy request Path string @@ -2907,7 +3218,7 @@ type PodProxyOptions struct { // NodeProxyOptions is the query options to a Node's proxy call type NodeProxyOptions struct { - unversioned.TypeMeta + metav1.TypeMeta // Path is the URL path to use for the current proxy request Path string @@ -2915,7 +3226,7 @@ type NodeProxyOptions struct { // ServiceProxyOptions is the query options to a Service's proxy call. type ServiceProxyOptions struct { - unversioned.TypeMeta + metav1.TypeMeta // Path is the part of URLs that include service endpoints, suffixes, // and parameters to use for the current proxy request to service. @@ -2925,40 +3236,20 @@ type ServiceProxyOptions struct { Path string } -// OwnerReference contains enough information to let you identify an owning -// object. Currently, an owning object must be in the same namespace, so there -// is no namespace field. -type OwnerReference struct { - // API version of the referent. - APIVersion string `json:"apiVersion"` - // Kind of the referent. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - Kind string `json:"kind"` - // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names - Name string `json:"name"` - // UID of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids - UID types.UID `json:"uid"` - // If true, this reference points to the managing controller. - // +optional - Controller *bool `json:"controller,omitempty"` -} - // ObjectReference contains enough information to let you inspect or modify the referred object. type ObjectReference struct { // +optional - Kind string `json:"kind,omitempty"` + Kind string // +optional - Namespace string `json:"namespace,omitempty"` + Namespace string // +optional - Name string `json:"name,omitempty"` + Name string // +optional - UID types.UID `json:"uid,omitempty"` + UID types.UID // +optional - APIVersion string `json:"apiVersion,omitempty"` + APIVersion string // +optional - ResourceVersion string `json:"resourceVersion,omitempty"` + ResourceVersion string // Optional. If referring to a piece of an object instead of an entire object, this string // should contain information to identify the sub-object. For example, if the object @@ -2969,7 +3260,7 @@ type ObjectReference struct { // referencing a part of an object. // TODO: this design is not final and this field is subject to change in the future. // +optional - FieldPath string `json:"fieldPath,omitempty"` + FieldPath string } // LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. @@ -2979,18 +3270,18 @@ type LocalObjectReference struct { } type SerializedReference struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - Reference ObjectReference `json:"reference,omitempty"` + Reference ObjectReference } type EventSource struct { // Component from which the event is generated. // +optional - Component string `json:"component,omitempty"` + Component string // Node name on which the event is generated. // +optional - Host string `json:"host,omitempty"` + Host string } // Valid values for event types (new types could be added in future) @@ -3006,63 +3297,63 @@ const ( // Event is a report of an event somewhere in the cluster. // TODO: Decide whether to store these separately or with the object they apply to. type Event struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Required. The object that this event is about. // +optional - InvolvedObject ObjectReference `json:"involvedObject,omitempty"` + InvolvedObject ObjectReference // Optional; this should be a short, machine understandable string that gives the reason // for this event being generated. For example, if the event is reporting that a container // can't start, the Reason might be "ImageNotFound". // TODO: provide exact specification for format. // +optional - Reason string `json:"reason,omitempty"` + Reason string // Optional. A human-readable description of the status of this operation. // TODO: decide on maximum length. // +optional - Message string `json:"message,omitempty"` + Message string // Optional. The component reporting this event. Should be a short machine understandable string. // +optional - Source EventSource `json:"source,omitempty"` + Source EventSource // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) // +optional - FirstTimestamp unversioned.Time `json:"firstTimestamp,omitempty"` + FirstTimestamp metav1.Time // The time at which the most recent occurrence of this event was recorded. // +optional - LastTimestamp unversioned.Time `json:"lastTimestamp,omitempty"` + LastTimestamp metav1.Time // The number of times this event has occurred. // +optional - Count int32 `json:"count,omitempty"` + Count int32 // Type of this event (Normal, Warning), new types could be added in the future. // +optional - Type string `json:"type,omitempty"` + Type string } // EventList is a list of events. type EventList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta - Items []Event `json:"items"` + Items []Event } // List holds a list of objects, which may not be known by the server. type List struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta - Items []runtime.Object `json:"items"` + Items []runtime.Object } // A type of object that is limited @@ -3081,51 +3372,51 @@ const ( type LimitRangeItem struct { // Type of resource that this limit applies to // +optional - Type LimitType `json:"type,omitempty"` + Type LimitType // Max usage constraints on this kind by resource name // +optional - Max ResourceList `json:"max,omitempty"` + Max ResourceList // Min usage constraints on this kind by resource name // +optional - Min ResourceList `json:"min,omitempty"` + Min ResourceList // Default resource requirement limit value by resource name. // +optional - Default ResourceList `json:"default,omitempty"` + Default ResourceList // DefaultRequest resource requirement request value by resource name. // +optional - DefaultRequest ResourceList `json:"defaultRequest,omitempty"` + DefaultRequest ResourceList // MaxLimitRequestRatio represents the max burst value for the named resource // +optional - MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty"` + MaxLimitRequestRatio ResourceList } // LimitRangeSpec defines a min/max usage limit for resources that match on kind type LimitRangeSpec struct { // Limits is the list of LimitRangeItem objects that are enforced - Limits []LimitRangeItem `json:"limits"` + Limits []LimitRangeItem } // +genclient=true // LimitRange sets resource usage limits for each kind of resource in a Namespace type LimitRange struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Spec defines the limits enforced // +optional - Spec LimitRangeSpec `json:"spec,omitempty"` + Spec LimitRangeSpec } // LimitRangeList is a list of LimitRange items. type LimitRangeList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta // Items is a list of LimitRange objects - Items []LimitRange `json:"items"` + Items []LimitRange } // The following identify resource constants for Kubernetes object types @@ -3178,48 +3469,48 @@ const ( type ResourceQuotaSpec struct { // Hard is the set of desired hard limits for each named resource // +optional - Hard ResourceList `json:"hard,omitempty"` + Hard ResourceList // A collection of filters that must match each object tracked by a quota. // If not specified, the quota matches all objects. // +optional - Scopes []ResourceQuotaScope `json:"scopes,omitempty"` + Scopes []ResourceQuotaScope } // ResourceQuotaStatus defines the enforced hard limits and observed use type ResourceQuotaStatus struct { // Hard is the set of enforced hard limits for each named resource // +optional - Hard ResourceList `json:"hard,omitempty"` + Hard ResourceList // Used is the current observed total usage of the resource in the namespace // +optional - Used ResourceList `json:"used,omitempty"` + Used ResourceList } // +genclient=true // ResourceQuota sets aggregate quota restrictions enforced per namespace type ResourceQuota struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Spec defines the desired quota // +optional - Spec ResourceQuotaSpec `json:"spec,omitempty"` + Spec ResourceQuotaSpec // Status defines the actual enforced quota and its current usage // +optional - Status ResourceQuotaStatus `json:"status,omitempty"` + Status ResourceQuotaStatus } // ResourceQuotaList is a list of ResourceQuota items type ResourceQuotaList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta // Items is a list of ResourceQuota objects - Items []ResourceQuota `json:"items"` + Items []ResourceQuota } // +genclient=true @@ -3227,20 +3518,20 @@ type ResourceQuotaList struct { // Secret holds secret data of a certain type. The total bytes of the values in // the Data field must be less than MaxSecretSize bytes. type Secret struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN // or leading dot followed by valid DNS_SUBDOMAIN. // The serialized form of the secret data is a base64 encoded string, // representing the arbitrary (possibly non-string) data value here. // +optional - Data map[string][]byte `json:"data,omitempty"` + Data map[string][]byte // Used to facilitate programmatic handling of secret data. // +optional - Type SecretType `json:"type,omitempty"` + Type SecretType } const MaxSecretSize = 1 * 1024 * 1024 @@ -3328,35 +3619,35 @@ const ( ) type SecretList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta - Items []Secret `json:"items"` + Items []Secret } // +genclient=true // ConfigMap holds configuration data for components or applications to consume. type ConfigMap struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Data contains the configuration data. // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. // +optional - Data map[string]string `json:"data,omitempty"` + Data map[string]string } // ConfigMapList is a resource containing a list of ConfigMap objects. type ConfigMapList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta // Items is the list of ConfigMaps. - Items []ConfigMap `json:"items"` + Items []ConfigMap } // These constants are for remote command execution and port forwarding and are @@ -3400,17 +3691,6 @@ const ( PortForwardRequestIDHeader = "requestID" ) -// Similarly to above, these are constants to support HTTP PATCH utilized by -// both the client and server that didn't make sense for a whole package to be -// dedicated to. -type PatchType string - -const ( - JSONPatchType PatchType = "application/json-patch+json" - MergePatchType PatchType = "application/merge-patch+json" - StrategicMergePatchType PatchType = "application/strategic-merge-patch+json" -) - // Type and constants for component health validation. type ComponentConditionType string @@ -3420,12 +3700,12 @@ const ( ) type ComponentCondition struct { - Type ComponentConditionType `json:"type"` - Status ConditionStatus `json:"status"` + Type ComponentConditionType + Status ConditionStatus // +optional - Message string `json:"message,omitempty"` + Message string // +optional - Error string `json:"error,omitempty"` + Error string } // +genclient=true @@ -3433,20 +3713,20 @@ type ComponentCondition struct { // ComponentStatus (and ComponentStatusList) holds the cluster validation info. type ComponentStatus struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // +optional - Conditions []ComponentCondition `json:"conditions,omitempty"` + Conditions []ComponentCondition } type ComponentStatusList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta - Items []ComponentStatus `json:"items"` + Items []ComponentStatus } // SecurityContext holds security configuration that will be applied to a container. @@ -3456,24 +3736,24 @@ type SecurityContext struct { // The capabilities to add/drop when running containers. // Defaults to the default set of capabilities granted by the container runtime. // +optional - Capabilities *Capabilities `json:"capabilities,omitempty"` + Capabilities *Capabilities // Run container in privileged mode. // Processes in privileged containers are essentially equivalent to root on the host. // Defaults to false. // +optional - Privileged *bool `json:"privileged,omitempty"` + Privileged *bool // The SELinux context to be applied to the container. // If unspecified, the container runtime will allocate a random SELinux context for each // container. May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional - SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty"` + SELinuxOptions *SELinuxOptions // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional - RunAsUser *int64 `json:"runAsUser,omitempty"` + RunAsUser *int64 // Indicates that the container must run as a non-root user. // If true, the Kubelet will validate the image at runtime to ensure that it // does not run as UID 0 (root) and fail to start the container if it does. @@ -3481,27 +3761,27 @@ type SecurityContext struct { // May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional - RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"` + RunAsNonRoot *bool // The read-only root filesystem allows you to restrict the locations that an application can write // files to, ensuring the persistent data can only be written to mounts. // +optional - ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"` + ReadOnlyRootFilesystem *bool } // SELinuxOptions are the labels to be applied to the container. type SELinuxOptions struct { // SELinux user label // +optional - User string `json:"user,omitempty"` + User string // SELinux role label // +optional - Role string `json:"role,omitempty"` + Role string // SELinux type label // +optional - Type string `json:"type,omitempty"` + Type string // SELinux level label. // +optional - Level string `json:"level,omitempty"` + Level string } // RangeAllocation is an opaque API object (not exposed to end users) that can be persisted to record @@ -3512,18 +3792,18 @@ type SELinuxOptions struct { // data encoding hints). A range allocation should *ALWAYS* be recreatable at any time by observation // of the cluster, thus the object is less strongly typed than most. type RangeAllocation struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // A string representing a unique label for a range of resources, such as a CIDR "10.0.0.0/8" or // port range "10000-30000". Range is not strongly schema'd here. The Range is expected to define // a start and end unless there is an implicit end. - Range string `json:"range"` + Range string // A byte array representing the serialized state of a range allocation. Additional clarifiers on // the type or format of data should be represented with annotations. For IP allocations, this is // represented as a bit array starting at the base IP of the CIDR in Range, with each bit representing // a single allocated address (the fifth bit on CIDR 10.0.0.0/8 is 10.0.0.4). - Data []byte `json:"data"` + Data []byte } const ( @@ -3538,5 +3818,5 @@ const ( // When the --failure-domains scheduler flag is not specified, // DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity. - DefaultFailureDomains string = unversioned.LabelHostname + "," + unversioned.LabelZoneFailureDomain + "," + unversioned.LabelZoneRegion + DefaultFailureDomains string = metav1.LabelHostname + "," + metav1.LabelZoneFailureDomain + "," + metav1.LabelZoneRegion ) diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/BUILD b/vendor/k8s.io/kubernetes/pkg/api/unversioned/BUILD deleted file mode 100644 index a41681a864..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/BUILD +++ /dev/null @@ -1,58 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "duration.go", - "generated.pb.go", - "group_version.go", - "helpers.go", - "meta.go", - "register.go", - "time.go", - "time_proto.go", - "types.go", - "types_swagger_doc_generated.go", - "well_known_labels.go", - "zz_generated.deepcopy.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/conversion:go_default_library", - "//pkg/genericapiserver/openapi/common:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/selection:go_default_library", - "//vendor:github.com/go-openapi/spec", - "//vendor:github.com/gogo/protobuf/proto", - "//vendor:github.com/gogo/protobuf/sortkeys", - "//vendor:github.com/google/gofuzz", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "duration_test.go", - "group_version_test.go", - "helpers_test.go", - "time_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/labels:go_default_library", - "//vendor:github.com/ghodss/yaml", - "//vendor:github.com/ugorji/go/codec", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/doc.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/doc.go deleted file mode 100644 index 720c686a90..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true -// +k8s:defaulter-gen=TypeMeta - -package unversioned diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/duration.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/duration.go deleted file mode 100644 index ed54e515da..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/duration.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "encoding/json" - "time" -) - -// Duration is a wrapper around time.Duration which supports correct -// marshaling to YAML and JSON. In particular, it marshals into strings, which -// can be used as map keys in json. -type Duration struct { - time.Duration `protobuf:"varint,1,opt,name=duration,casttype=time.Duration"` -} - -// UnmarshalJSON implements the json.Unmarshaller interface. -func (d *Duration) UnmarshalJSON(b []byte) error { - var str string - json.Unmarshal(b, &str) - - pd, err := time.ParseDuration(str) - if err != nil { - return err - } - d.Duration = pd - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (d Duration) MarshalJSON() ([]byte, error) { - return json.Marshal(d.Duration.String()) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.pb.go deleted file mode 100644 index a93366bbdf..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.pb.go +++ /dev/null @@ -1,4537 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/api/unversioned/generated.proto -// DO NOT EDIT! - -/* - Package unversioned is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/api/unversioned/generated.proto - - It has these top-level messages: - APIGroup - APIGroupList - APIResource - APIResourceList - APIVersions - Duration - ExportOptions - GroupKind - GroupResource - GroupVersion - GroupVersionForDiscovery - GroupVersionKind - GroupVersionResource - LabelSelector - LabelSelectorRequirement - ListMeta - RootPaths - ServerAddressByClientCIDR - Status - StatusCause - StatusDetails - Time - Timestamp - TypeMeta -*/ -package unversioned - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import time "time" - -import strings "strings" -import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 - -func (m *APIGroup) Reset() { *m = APIGroup{} } -func (*APIGroup) ProtoMessage() {} -func (*APIGroup) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *APIGroupList) Reset() { *m = APIGroupList{} } -func (*APIGroupList) ProtoMessage() {} -func (*APIGroupList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *APIResource) Reset() { *m = APIResource{} } -func (*APIResource) ProtoMessage() {} -func (*APIResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *APIResourceList) Reset() { *m = APIResourceList{} } -func (*APIResourceList) ProtoMessage() {} -func (*APIResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *APIVersions) Reset() { *m = APIVersions{} } -func (*APIVersions) ProtoMessage() {} -func (*APIVersions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *Duration) Reset() { *m = Duration{} } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *ExportOptions) Reset() { *m = ExportOptions{} } -func (*ExportOptions) ProtoMessage() {} -func (*ExportOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *GroupKind) Reset() { *m = GroupKind{} } -func (*GroupKind) ProtoMessage() {} -func (*GroupKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *GroupResource) Reset() { *m = GroupResource{} } -func (*GroupResource) ProtoMessage() {} -func (*GroupResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *GroupVersion) Reset() { *m = GroupVersion{} } -func (*GroupVersion) ProtoMessage() {} -func (*GroupVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } -func (*GroupVersionForDiscovery) ProtoMessage() {} -func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} -} - -func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } -func (*GroupVersionKind) ProtoMessage() {} -func (*GroupVersionKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } -func (*GroupVersionResource) ProtoMessage() {} -func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } - -func (m *LabelSelector) Reset() { *m = LabelSelector{} } -func (*LabelSelector) ProtoMessage() {} -func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } - -func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } -func (*LabelSelectorRequirement) ProtoMessage() {} -func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{14} -} - -func (m *ListMeta) Reset() { *m = ListMeta{} } -func (*ListMeta) ProtoMessage() {} -func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } - -func (m *RootPaths) Reset() { *m = RootPaths{} } -func (*RootPaths) ProtoMessage() {} -func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } - -func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } -func (*ServerAddressByClientCIDR) ProtoMessage() {} -func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{17} -} - -func (m *Status) Reset() { *m = Status{} } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } - -func (m *StatusCause) Reset() { *m = StatusCause{} } -func (*StatusCause) ProtoMessage() {} -func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } - -func (m *StatusDetails) Reset() { *m = StatusDetails{} } -func (*StatusDetails) ProtoMessage() {} -func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } - -func (m *Time) Reset() { *m = Time{} } -func (*Time) ProtoMessage() {} -func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } - -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } - -func (m *TypeMeta) Reset() { *m = TypeMeta{} } -func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } - -func init() { - proto.RegisterType((*APIGroup)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIGroup") - proto.RegisterType((*APIGroupList)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIGroupList") - proto.RegisterType((*APIResource)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIResource") - proto.RegisterType((*APIResourceList)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIResourceList") - proto.RegisterType((*APIVersions)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIVersions") - proto.RegisterType((*Duration)(nil), "k8s.io.kubernetes.pkg.api.unversioned.Duration") - proto.RegisterType((*ExportOptions)(nil), "k8s.io.kubernetes.pkg.api.unversioned.ExportOptions") - proto.RegisterType((*GroupKind)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupKind") - proto.RegisterType((*GroupResource)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupResource") - proto.RegisterType((*GroupVersion)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupVersion") - proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupVersionForDiscovery") - proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupVersionKind") - proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupVersionResource") - proto.RegisterType((*LabelSelector)(nil), "k8s.io.kubernetes.pkg.api.unversioned.LabelSelector") - proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.kubernetes.pkg.api.unversioned.LabelSelectorRequirement") - proto.RegisterType((*ListMeta)(nil), "k8s.io.kubernetes.pkg.api.unversioned.ListMeta") - proto.RegisterType((*RootPaths)(nil), "k8s.io.kubernetes.pkg.api.unversioned.RootPaths") - proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.kubernetes.pkg.api.unversioned.ServerAddressByClientCIDR") - proto.RegisterType((*Status)(nil), "k8s.io.kubernetes.pkg.api.unversioned.Status") - proto.RegisterType((*StatusCause)(nil), "k8s.io.kubernetes.pkg.api.unversioned.StatusCause") - proto.RegisterType((*StatusDetails)(nil), "k8s.io.kubernetes.pkg.api.unversioned.StatusDetails") - proto.RegisterType((*Time)(nil), "k8s.io.kubernetes.pkg.api.unversioned.Time") - proto.RegisterType((*Timestamp)(nil), "k8s.io.kubernetes.pkg.api.unversioned.Timestamp") - proto.RegisterType((*TypeMeta)(nil), "k8s.io.kubernetes.pkg.api.unversioned.TypeMeta") -} -func (m *APIGroup) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *APIGroup) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - if len(m.Versions) > 0 { - for _, msg := range m.Versions { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.PreferredVersion.Size())) - n1, err := m.PreferredVersion.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *APIGroupList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *APIGroupList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Groups) > 0 { - for _, msg := range m.Groups { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *APIResource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *APIResource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x10 - i++ - if m.Namespaced { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - return i, nil -} - -func (m *APIResourceList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *APIResourceList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.GroupVersion))) - i += copy(data[i:], m.GroupVersion) - if len(m.APIResources) > 0 { - for _, msg := range m.APIResources { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *APIVersions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *APIVersions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Versions) > 0 { - for _, s := range m.Versions { - data[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *Duration) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Duration) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Duration)) - return i, nil -} - -func (m *ExportOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ExportOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - if m.Export { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x10 - i++ - if m.Exact { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *GroupKind) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *GroupKind) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - return i, nil -} - -func (m *GroupResource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *GroupResource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) - i += copy(data[i:], m.Resource) - return i, nil -} - -func (m *GroupVersion) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *GroupVersion) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Version))) - i += copy(data[i:], m.Version) - return i, nil -} - -func (m *GroupVersionForDiscovery) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *GroupVersionForDiscovery) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.GroupVersion))) - i += copy(data[i:], m.GroupVersion) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Version))) - i += copy(data[i:], m.Version) - return i, nil -} - -func (m *GroupVersionKind) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *GroupVersionKind) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Version))) - i += copy(data[i:], m.Version) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - return i, nil -} - -func (m *GroupVersionResource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *GroupVersionResource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Version))) - i += copy(data[i:], m.Version) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) - i += copy(data[i:], m.Resource) - return i, nil -} - -func (m *LabelSelector) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LabelSelector) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.MatchLabels) > 0 { - for k := range m.MatchLabels { - data[i] = 0xa - i++ - v := m.MatchLabels[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *LabelSelectorRequirement) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LabelSelectorRequirement) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Key))) - i += copy(data[i:], m.Key) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) - i += copy(data[i:], m.Operator) - if len(m.Values) > 0 { - for _, s := range m.Values { - data[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *ListMeta) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ListMeta) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SelfLink))) - i += copy(data[i:], m.SelfLink) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) - i += copy(data[i:], m.ResourceVersion) - return i, nil -} - -func (m *RootPaths) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *RootPaths) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Paths) > 0 { - for _, s := range m.Paths { - data[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *ServerAddressByClientCIDR) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ServerAddressByClientCIDR) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ClientCIDR))) - i += copy(data[i:], m.ClientCIDR) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ServerAddress))) - i += copy(data[i:], m.ServerAddress) - return i, nil -} - -func (m *Status) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Status) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - if m.Details != nil { - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Details.Size())) - n3, err := m.Details.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - } - data[i] = 0x30 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Code)) - return i, nil -} - -func (m *StatusCause) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *StatusCause) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Field))) - i += copy(data[i:], m.Field) - return i, nil -} - -func (m *StatusDetails) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *StatusDetails) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - if len(m.Causes) > 0 { - for _, msg := range m.Causes { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.RetryAfterSeconds)) - return i, nil -} - -func (m *Timestamp) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Timestamp) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Seconds)) - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Nanos)) - return i, nil -} - -func (m *TypeMeta) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *TypeMeta) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *APIGroup) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Versions) > 0 { - for _, e := range m.Versions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.PreferredVersion.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, e := range m.ServerAddressByClientCIDRs { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *APIGroupList) Size() (n int) { - var l int - _ = l - if len(m.Groups) > 0 { - for _, e := range m.Groups { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *APIResource) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *APIResourceList) Size() (n int) { - var l int - _ = l - l = len(m.GroupVersion) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.APIResources) > 0 { - for _, e := range m.APIResources { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *APIVersions) Size() (n int) { - var l int - _ = l - if len(m.Versions) > 0 { - for _, s := range m.Versions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, e := range m.ServerAddressByClientCIDRs { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Duration) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Duration)) - return n -} - -func (m *ExportOptions) Size() (n int) { - var l int - _ = l - n += 2 - n += 2 - return n -} - -func (m *GroupKind) Size() (n int) { - var l int - _ = l - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GroupResource) Size() (n int) { - var l int - _ = l - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GroupVersion) Size() (n int) { - var l int - _ = l - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Version) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GroupVersionForDiscovery) Size() (n int) { - var l int - _ = l - l = len(m.GroupVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Version) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GroupVersionKind) Size() (n int) { - var l int - _ = l - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Version) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GroupVersionResource) Size() (n int) { - var l int - _ = l - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Version) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *LabelSelector) Size() (n int) { - var l int - _ = l - if len(m.MatchLabels) > 0 { - for k, v := range m.MatchLabels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *LabelSelectorRequirement) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ListMeta) Size() (n int) { - var l int - _ = l - l = len(m.SelfLink) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *RootPaths) Size() (n int) { - var l int - _ = l - if len(m.Paths) > 0 { - for _, s := range m.Paths { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ServerAddressByClientCIDR) Size() (n int) { - var l int - _ = l - l = len(m.ClientCIDR) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ServerAddress) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Status) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - if m.Details != nil { - l = m.Details.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.Code)) - return n -} - -func (m *StatusCause) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Field) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *StatusDetails) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Causes) > 0 { - for _, e := range m.Causes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 1 + sovGenerated(uint64(m.RetryAfterSeconds)) - return n -} - -func (m *Timestamp) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Seconds)) - n += 1 + sovGenerated(uint64(m.Nanos)) - return n -} - -func (m *TypeMeta) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *APIGroup) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&APIGroup{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, - `PreferredVersion:` + strings.Replace(strings.Replace(this.PreferredVersion.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, - `ServerAddressByClientCIDRs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServerAddressByClientCIDRs), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *APIGroupList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&APIGroupList{`, - `Groups:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Groups), "APIGroup", "APIGroup", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *APIResource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&APIResource{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespaced:` + fmt.Sprintf("%v", this.Namespaced) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `}`, - }, "") - return s -} -func (this *APIResourceList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&APIResourceList{`, - `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, - `APIResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.APIResources), "APIResource", "APIResource", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Duration) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Duration{`, - `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, - `}`, - }, "") - return s -} -func (this *ExportOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExportOptions{`, - `Export:` + fmt.Sprintf("%v", this.Export) + `,`, - `Exact:` + fmt.Sprintf("%v", this.Exact) + `,`, - `}`, - }, "") - return s -} -func (this *GroupVersionForDiscovery) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GroupVersionForDiscovery{`, - `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, - `Version:` + fmt.Sprintf("%v", this.Version) + `,`, - `}`, - }, "") - return s -} -func (this *LabelSelector) String() string { - if this == nil { - return "nil" - } - keysForMatchLabels := make([]string, 0, len(this.MatchLabels)) - for k := range this.MatchLabels { - keysForMatchLabels = append(keysForMatchLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels) - mapStringForMatchLabels := "map[string]string{" - for _, k := range keysForMatchLabels { - mapStringForMatchLabels += fmt.Sprintf("%v: %v,", k, this.MatchLabels[k]) - } - mapStringForMatchLabels += "}" - s := strings.Join([]string{`&LabelSelector{`, - `MatchLabels:` + mapStringForMatchLabels + `,`, - `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LabelSelectorRequirement) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelSelectorRequirement{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s -} -func (this *ListMeta) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListMeta{`, - `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, - `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, - `}`, - }, "") - return s -} -func (this *RootPaths) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RootPaths{`, - `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, - `}`, - }, "") - return s -} -func (this *ServerAddressByClientCIDR) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServerAddressByClientCIDR{`, - `ClientCIDR:` + fmt.Sprintf("%v", this.ClientCIDR) + `,`, - `ServerAddress:` + fmt.Sprintf("%v", this.ServerAddress) + `,`, - `}`, - }, "") - return s -} -func (this *Status) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Status{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Details:` + strings.Replace(fmt.Sprintf("%v", this.Details), "StatusDetails", "StatusDetails", 1) + `,`, - `Code:` + fmt.Sprintf("%v", this.Code) + `,`, - `}`, - }, "") - return s -} -func (this *StatusCause) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatusCause{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Field:` + fmt.Sprintf("%v", this.Field) + `,`, - `}`, - }, "") - return s -} -func (this *StatusDetails) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatusDetails{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Group:` + fmt.Sprintf("%v", this.Group) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Causes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Causes), "StatusCause", "StatusCause", 1), `&`, ``, 1) + `,`, - `RetryAfterSeconds:` + fmt.Sprintf("%v", this.RetryAfterSeconds) + `,`, - `}`, - }, "") - return s -} -func (this *Timestamp) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Timestamp{`, - `Seconds:` + fmt.Sprintf("%v", this.Seconds) + `,`, - `Nanos:` + fmt.Sprintf("%v", this.Nanos) + `,`, - `}`, - }, "") - return s -} -func (this *TypeMeta) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TypeMeta{`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *APIGroup) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIGroup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIGroup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Versions = append(m.Versions, GroupVersionForDiscovery{}) - if err := m.Versions[len(m.Versions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferredVersion", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PreferredVersion.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) - if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *APIGroupList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIGroupList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIGroupList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, APIGroup{}) - if err := m.Groups[len(m.Groups)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *APIResource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIResource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIResource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespaced", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Namespaced = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *APIResourceList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIResourceList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIResourceList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GroupVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIResources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIResources = append(m.APIResources, APIResource{}) - if err := m.APIResources[len(m.APIResources)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *APIVersions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIVersions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIVersions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Versions = append(m.Versions, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) - if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Duration) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Duration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) - } - m.Duration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Duration |= (time.Duration(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExportOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Export = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Exact = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupKind) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupKind: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupResource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupResource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupResource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupVersion) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupVersion: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersion: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupVersionForDiscovery) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupVersionForDiscovery: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionForDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GroupVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupVersionKind) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupVersionResource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelSelector) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.MatchLabels == nil { - m.MatchLabels = make(map[string]string) - } - m.MatchLabels[mapkey] = mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{}) - if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operator = LabelSelectorOperator(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, string(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListMeta) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SelfLink = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RootPaths) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RootPaths: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RootPaths: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Paths = append(m.Paths, string(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServerAddressByClientCIDR) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServerAddressByClientCIDR: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServerAddressByClientCIDR: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientCIDR", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientCIDR = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerAddress = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Status) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Status: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = StatusReason(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Details == nil { - m.Details = &StatusDetails{} - } - if err := m.Details.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) - } - m.Code = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Code |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusCause) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusCause: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusCause: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = CauseType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Field = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusDetails) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusDetails: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusDetails: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Causes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Causes = append(m.Causes, StatusCause{}) - if err := m.Causes[len(m.Causes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RetryAfterSeconds", wireType) - } - m.RetryAfterSeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.RetryAfterSeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Timestamp) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) - } - m.Seconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Seconds |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) - } - m.Nanos = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Nanos |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TypeMeta) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -var fileDescriptorGenerated = []byte{ - // 1390 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xda, 0xb1, 0xbb, 0x7e, 0x8e, 0x49, 0xba, 0xa4, 0x62, 0x1b, 0x09, 0xdb, 0x6c, 0x05, - 0x4a, 0xa5, 0xd6, 0x56, 0x23, 0x40, 0x55, 0x11, 0x7f, 0xe2, 0x24, 0xad, 0xa2, 0x36, 0x6d, 0x34, - 0xa9, 0x8a, 0xd4, 0x16, 0x89, 0x8d, 0x77, 0xe2, 0xac, 0x6c, 0xef, 0x2e, 0x33, 0xb3, 0x51, 0x2d, - 0x90, 0xe8, 0xa5, 0x12, 0x07, 0x84, 0x7a, 0xe4, 0x02, 0x6a, 0xa5, 0x7e, 0x03, 0xbe, 0x44, 0xc5, - 0xa9, 0x17, 0x24, 0x0e, 0xc8, 0xa2, 0xe1, 0xc2, 0x95, 0x6b, 0x4e, 0x68, 0x66, 0x67, 0xd6, 0xbb, - 0x6e, 0x4d, 0x36, 0xd0, 0x03, 0x27, 0xef, 0xfb, 0xff, 0xe6, 0xbd, 0xdf, 0xbc, 0x37, 0x86, 0xf7, - 0x7a, 0x17, 0x69, 0xd3, 0xf5, 0x5b, 0xbd, 0x70, 0x07, 0x13, 0x0f, 0x33, 0x4c, 0x5b, 0x41, 0xaf, - 0xdb, 0xb2, 0x03, 0xb7, 0x15, 0x7a, 0xfb, 0x98, 0x50, 0xd7, 0xf7, 0xb0, 0xd3, 0xea, 0x62, 0x0f, - 0x13, 0x9b, 0x61, 0xa7, 0x19, 0x10, 0x9f, 0xf9, 0xc6, 0xdb, 0x91, 0x59, 0x73, 0x6c, 0xd6, 0x0c, - 0x7a, 0xdd, 0xa6, 0x1d, 0xb8, 0xcd, 0x84, 0xd9, 0xe2, 0xf9, 0xae, 0xcb, 0xf6, 0xc2, 0x9d, 0x66, - 0xc7, 0x1f, 0xb4, 0xba, 0x7e, 0xd7, 0x6f, 0x09, 0xeb, 0x9d, 0x70, 0x57, 0x50, 0x82, 0x10, 0x5f, - 0x91, 0xd7, 0xc5, 0xf3, 0x2f, 0x4f, 0x86, 0x84, 0x1e, 0x73, 0x07, 0x78, 0x32, 0x89, 0xc5, 0x0b, - 0x2f, 0x57, 0x0f, 0x99, 0xdb, 0x6f, 0xb9, 0x1e, 0xa3, 0x8c, 0x4c, 0x9a, 0x58, 0x3f, 0x17, 0x40, - 0x5f, 0xd9, 0xda, 0xb8, 0x42, 0xfc, 0x30, 0x30, 0x1a, 0x30, 0xe3, 0xd9, 0x03, 0x6c, 0x6a, 0x0d, - 0x6d, 0xa9, 0xdc, 0x9e, 0x7d, 0x3a, 0xaa, 0xe7, 0x0e, 0x46, 0xf5, 0x99, 0xeb, 0xf6, 0x00, 0x23, - 0x21, 0x31, 0x06, 0xa0, 0xcb, 0xc3, 0x50, 0x33, 0xdf, 0x28, 0x2c, 0x55, 0x96, 0x3f, 0x6e, 0x66, - 0x3a, 0x79, 0x53, 0x44, 0xb8, 0x15, 0x91, 0x97, 0x7d, 0xb2, 0xe6, 0xd2, 0x8e, 0xbf, 0x8f, 0xc9, - 0xb0, 0x3d, 0x2f, 0xc3, 0xe8, 0x52, 0x48, 0x51, 0x1c, 0xc2, 0x78, 0xa0, 0xc1, 0x7c, 0x40, 0xf0, - 0x2e, 0x26, 0x04, 0x3b, 0x52, 0x6e, 0x16, 0x1a, 0xda, 0xab, 0x88, 0x6b, 0xca, 0xb8, 0xf3, 0x5b, - 0x13, 0x01, 0xd0, 0x0b, 0x21, 0x8d, 0x27, 0x1a, 0x2c, 0x52, 0x4c, 0xf6, 0x31, 0x59, 0x71, 0x1c, - 0x82, 0x29, 0x6d, 0x0f, 0x57, 0xfb, 0x2e, 0xf6, 0xd8, 0xea, 0xc6, 0x1a, 0xa2, 0xe6, 0x8c, 0xa8, - 0xc4, 0x27, 0x19, 0x33, 0xda, 0x9e, 0xe6, 0xa8, 0x6d, 0xc9, 0x94, 0x16, 0xa7, 0xaa, 0x50, 0xf4, - 0x0f, 0x79, 0x58, 0x5d, 0x98, 0x55, 0xbd, 0xbc, 0xe6, 0x52, 0x66, 0x7c, 0x0a, 0xa5, 0x2e, 0x27, - 0xa8, 0xa9, 0x89, 0x0c, 0x5b, 0x19, 0x33, 0x54, 0x4e, 0xda, 0xaf, 0xc9, 0x84, 0x4a, 0x82, 0xa4, - 0x48, 0xba, 0xb3, 0x1e, 0x68, 0x50, 0x59, 0xd9, 0xda, 0x40, 0x98, 0xfa, 0x21, 0xe9, 0xe0, 0x0c, - 0xc0, 0x59, 0x06, 0xe0, 0xbf, 0x34, 0xb0, 0x3b, 0xd8, 0x31, 0xf3, 0x0d, 0x6d, 0x49, 0x6f, 0x1b, - 0x52, 0x0f, 0xae, 0xc7, 0x12, 0x94, 0xd0, 0xe2, 0x5e, 0x7b, 0xae, 0xe7, 0x88, 0x86, 0x27, 0xbc, - 0x5e, 0x75, 0x3d, 0x07, 0x09, 0x89, 0xf5, 0x93, 0x06, 0x73, 0x89, 0x3c, 0xc4, 0xa1, 0x2f, 0xc2, - 0x6c, 0x37, 0xd1, 0x73, 0x99, 0xd3, 0x82, 0xb4, 0x9e, 0x4d, 0xe2, 0x01, 0xa5, 0x34, 0x8d, 0x5d, - 0x28, 0x13, 0xe9, 0x49, 0xa1, 0x7b, 0x39, 0x7b, 0xc5, 0x54, 0x12, 0xe3, 0x50, 0x09, 0x26, 0x45, - 0x63, 0xd7, 0xd6, 0x9f, 0x51, 0xf5, 0x14, 0xde, 0x8d, 0xa5, 0xc4, 0xa5, 0xe2, 0x8d, 0x2a, 0xb7, - 0x67, 0xa7, 0xdc, 0x87, 0x23, 0x70, 0x98, 0xff, 0x7f, 0xe0, 0xf0, 0x92, 0xfe, 0xfd, 0xa3, 0x7a, - 0xee, 0xfe, 0x6f, 0x8d, 0x9c, 0xb5, 0x01, 0xfa, 0x5a, 0x48, 0x6c, 0xc6, 0xcb, 0xfb, 0x21, 0xe8, - 0x8e, 0xfc, 0x16, 0x4d, 0x29, 0xb4, 0xdf, 0x52, 0x57, 0x5f, 0xe9, 0x1c, 0x8e, 0xea, 0x55, 0x3e, - 0xd9, 0x9a, 0x8a, 0x81, 0x62, 0x13, 0xeb, 0x2e, 0x54, 0xd7, 0xef, 0x05, 0x3e, 0x61, 0x37, 0x02, - 0x26, 0x8a, 0xf1, 0x0e, 0x94, 0xb0, 0x60, 0x08, 0x6f, 0xfa, 0x18, 0xac, 0x91, 0x1a, 0x92, 0x52, - 0xe3, 0x0c, 0x14, 0xf1, 0x3d, 0xbb, 0xc3, 0x24, 0xea, 0xaa, 0x52, 0xad, 0xb8, 0xce, 0x99, 0x28, - 0x92, 0x59, 0x77, 0xa1, 0x2c, 0x90, 0xc1, 0xc1, 0xc5, 0x2d, 0x04, 0x30, 0x24, 0x76, 0x62, 0x0b, - 0xa1, 0x81, 0x22, 0x59, 0x8c, 0xce, 0xfc, 0x34, 0x74, 0x26, 0xca, 0xd0, 0x87, 0x6a, 0x64, 0xab, - 0x2e, 0x4c, 0xa6, 0x08, 0xe7, 0x40, 0x57, 0xa0, 0x91, 0x51, 0xe2, 0x59, 0xa9, 0x1c, 0xa1, 0x58, - 0x23, 0x11, 0x6d, 0x0f, 0x52, 0x28, 0xcf, 0x16, 0xec, 0x2c, 0x9c, 0x90, 0xd0, 0x90, 0xb1, 0xe6, - 0xa4, 0xda, 0x09, 0x75, 0x59, 0x94, 0x3c, 0x11, 0xe9, 0x6b, 0x30, 0xa7, 0xcd, 0xd7, 0xff, 0x70, - 0x0f, 0xb3, 0xa7, 0x62, 0x7d, 0xa7, 0xc1, 0x7c, 0xd2, 0x53, 0xf6, 0xf6, 0x65, 0x0f, 0x72, 0xf4, - 0x1c, 0x4a, 0x54, 0xe4, 0x47, 0x0d, 0x16, 0x52, 0x47, 0x3b, 0x56, 0xc7, 0x8f, 0x91, 0x54, 0x12, - 0x1c, 0x85, 0x63, 0x80, 0xe3, 0x97, 0x3c, 0x54, 0xaf, 0xd9, 0x3b, 0xb8, 0xbf, 0x8d, 0xfb, 0xb8, - 0xc3, 0x7c, 0x62, 0x7c, 0x05, 0x95, 0x81, 0xcd, 0x3a, 0x7b, 0x82, 0xab, 0x56, 0xc5, 0x7a, 0xc6, - 0x21, 0x92, 0x72, 0xd5, 0xdc, 0x1c, 0xfb, 0x59, 0xf7, 0x18, 0x19, 0xb6, 0x5f, 0x97, 0x39, 0x55, - 0x12, 0x12, 0x94, 0x0c, 0x27, 0x56, 0xbc, 0xa0, 0xd7, 0xef, 0x05, 0x7c, 0x92, 0xfc, 0x8b, 0xa7, - 0x45, 0x2a, 0x07, 0x84, 0xbf, 0x08, 0x5d, 0x82, 0x07, 0xd8, 0x63, 0xe3, 0x15, 0xbf, 0x39, 0x11, - 0x00, 0xbd, 0x10, 0x72, 0xf1, 0x23, 0x98, 0x9f, 0xcc, 0xde, 0x98, 0x87, 0x42, 0x0f, 0x0f, 0xa3, - 0x8e, 0x21, 0xfe, 0x69, 0x2c, 0x40, 0x71, 0xdf, 0xee, 0x87, 0xf2, 0x3e, 0xa2, 0x88, 0xb8, 0x94, - 0xbf, 0xa8, 0x59, 0x4f, 0x34, 0x30, 0xa7, 0x25, 0x62, 0xbc, 0x99, 0x70, 0xd4, 0xae, 0xc8, 0xac, - 0x0a, 0x57, 0xf1, 0x30, 0xf2, 0xba, 0x0e, 0xba, 0x1f, 0xf0, 0x67, 0x99, 0x4f, 0x64, 0xdf, 0xcf, - 0xaa, 0x5e, 0xde, 0x90, 0xfc, 0xc3, 0x51, 0xfd, 0x54, 0xca, 0xbd, 0x12, 0xa0, 0xd8, 0xd4, 0xb0, - 0xa0, 0x24, 0xf2, 0xa1, 0x66, 0x41, 0x6c, 0x11, 0xe0, 0xc3, 0xf0, 0x96, 0xe0, 0x20, 0x29, 0xb1, - 0xbe, 0x04, 0x9d, 0x6f, 0xc9, 0x4d, 0xcc, 0x6c, 0x0e, 0x21, 0x8a, 0xfb, 0xbb, 0xd7, 0x5c, 0xaf, - 0x27, 0x53, 0x8b, 0x21, 0xb4, 0x2d, 0xf9, 0x28, 0xd6, 0x30, 0x56, 0x60, 0x4e, 0xc1, 0xe9, 0x56, - 0x0a, 0xa3, 0x6f, 0x48, 0xa3, 0x39, 0x94, 0x16, 0xa3, 0x49, 0x7d, 0xeb, 0x1c, 0x94, 0x91, 0xef, - 0xb3, 0x2d, 0x9b, 0xed, 0x51, 0xa3, 0x0e, 0xc5, 0x80, 0x7f, 0xc8, 0x95, 0x57, 0xe6, 0x97, 0x41, - 0x48, 0x50, 0xc4, 0xb7, 0xbe, 0xd5, 0xe0, 0xf4, 0xd4, 0x05, 0xc4, 0x1f, 0x14, 0x9d, 0x98, 0x92, - 0xe9, 0xc7, 0x0f, 0x8a, 0xb1, 0x1e, 0x4a, 0x68, 0x19, 0x1f, 0x40, 0x35, 0xb5, 0xb5, 0xe4, 0x01, - 0x4e, 0x49, 0xb3, 0x6a, 0x2a, 0x1a, 0x4a, 0xeb, 0x5a, 0x7f, 0xe5, 0xa1, 0xb4, 0xcd, 0x6c, 0x16, - 0x52, 0xe3, 0x33, 0xd0, 0x07, 0x98, 0xd9, 0x8e, 0xcd, 0x6c, 0x11, 0x39, 0xfb, 0xcb, 0x4a, 0xd5, - 0x7e, 0x5c, 0x69, 0xc5, 0x41, 0xb1, 0x4b, 0xbe, 0xd8, 0xa8, 0x08, 0x24, 0xf3, 0x8b, 0x17, 0x5b, - 0x14, 0x1e, 0x49, 0x29, 0x9f, 0x16, 0x03, 0x4c, 0xa9, 0xdd, 0x55, 0x13, 0x20, 0x9e, 0x16, 0x9b, - 0x11, 0x1b, 0x29, 0xb9, 0xf1, 0x3e, 0x94, 0x08, 0xb6, 0xa9, 0xef, 0x99, 0x33, 0x42, 0xb3, 0xa6, - 0x5c, 0x22, 0xc1, 0x3d, 0x1c, 0xd5, 0x67, 0xa5, 0x73, 0x41, 0x23, 0xa9, 0x6d, 0xdc, 0x81, 0x13, - 0x0e, 0x66, 0xb6, 0xdb, 0xa7, 0x66, 0x51, 0x1c, 0xf4, 0xdd, 0xac, 0x8f, 0x0b, 0xe1, 0x6d, 0x2d, - 0xb2, 0x6d, 0x57, 0x78, 0x52, 0x92, 0x40, 0xca, 0x23, 0x9f, 0xab, 0x1d, 0xdf, 0xc1, 0x66, 0xa9, - 0xa1, 0x2d, 0x15, 0xc7, 0x73, 0x75, 0xd5, 0x77, 0x30, 0x12, 0x12, 0xeb, 0xa1, 0x06, 0x95, 0xc8, - 0xd3, 0xaa, 0x1d, 0x52, 0x6c, 0x5c, 0x88, 0x8f, 0x11, 0x35, 0xfc, 0xb4, 0xb2, 0xb9, 0x39, 0x0c, - 0xf0, 0xe1, 0xa8, 0x5e, 0x16, 0x6a, 0x9c, 0x88, 0x4f, 0x90, 0x28, 0x52, 0xfe, 0x88, 0x22, 0x9d, - 0x81, 0xe2, 0xae, 0x8b, 0xfb, 0x6a, 0xd0, 0xc7, 0x23, 0xfa, 0x32, 0x67, 0xa2, 0x48, 0x66, 0xfd, - 0x90, 0x87, 0x6a, 0xea, 0x70, 0x19, 0x1e, 0xbf, 0xf1, 0xec, 0xcf, 0x67, 0x78, 0x4f, 0x4c, 0xdd, - 0x32, 0xc6, 0x6d, 0x28, 0x75, 0xf8, 0xf9, 0xd4, 0x1f, 0x8e, 0xe5, 0x63, 0xf5, 0x42, 0x94, 0x66, - 0x8c, 0x25, 0x41, 0x52, 0x24, 0x3d, 0x1a, 0x57, 0xe0, 0x24, 0xc1, 0x8c, 0x0c, 0x57, 0x76, 0x19, - 0x26, 0xdb, 0xb8, 0xe3, 0x7b, 0x4e, 0xd4, 0xf2, 0x62, 0x5c, 0xe4, 0x93, 0x68, 0x52, 0x01, 0xbd, - 0x68, 0x63, 0xf5, 0x61, 0xe6, 0xa6, 0x3b, 0xc0, 0xbc, 0xee, 0x54, 0xba, 0x89, 0x1e, 0x7b, 0x71, - 0xdd, 0x95, 0xb1, 0x92, 0xf3, 0xf2, 0x78, 0xb6, 0xe7, 0x47, 0x70, 0x2f, 0x8e, 0xcb, 0x73, 0x9d, - 0x33, 0x51, 0x24, 0xbb, 0xb4, 0xc0, 0x37, 0xd8, 0x37, 0x8f, 0xeb, 0xb9, 0x87, 0x8f, 0xeb, 0xb9, - 0x47, 0x8f, 0xe5, 0x36, 0xbb, 0x03, 0x65, 0x1e, 0x8d, 0x32, 0x7b, 0x10, 0xbc, 0xea, 0x90, 0xd6, - 0xe7, 0xa0, 0x73, 0x28, 0x89, 0x59, 0xa9, 0xba, 0xa3, 0x4d, 0xed, 0xce, 0x32, 0x80, 0x1d, 0xb8, - 0xe9, 0xd1, 0x18, 0x0f, 0xa4, 0xf1, 0x73, 0x1f, 0x25, 0xb4, 0xda, 0xe7, 0x9f, 0x3e, 0xaf, 0xe5, - 0x9e, 0x3d, 0xaf, 0xe5, 0x7e, 0x7d, 0x5e, 0xcb, 0xdd, 0x3f, 0xa8, 0x69, 0x4f, 0x0f, 0x6a, 0xda, - 0xb3, 0x83, 0x9a, 0xf6, 0xfb, 0x41, 0x4d, 0x7b, 0xf8, 0x47, 0x2d, 0x77, 0xbb, 0x92, 0x68, 0xe4, - 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x22, 0x00, 0xde, 0x9c, 0x10, 0x00, 0x00, -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.proto b/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.proto deleted file mode 100644 index 91d3fbf651..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.proto +++ /dev/null @@ -1,400 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.api.unversioned; - -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "unversioned"; - -// APIGroup contains the name, the supported versions, and the preferred version -// of a group. -message APIGroup { - // name is the name of the group. - optional string name = 1; - - // versions are the versions supported in this group. - repeated GroupVersionForDiscovery versions = 2; - - // preferredVersion is the version preferred by the API server, which - // probably is the storage version. - // +optional - optional GroupVersionForDiscovery preferredVersion = 3; - - // a map of client CIDR to server address that is serving this group. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - // The server returns only those CIDRs that it thinks that the client can match. - // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. - // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4; -} - -// APIGroupList is a list of APIGroup, to allow clients to discover the API at -// /apis. -message APIGroupList { - // groups is a list of APIGroup. - repeated APIGroup groups = 1; -} - -// APIResource specifies the name of a resource and whether it is namespaced. -message APIResource { - // name is the name of the resource. - optional string name = 1; - - // namespaced indicates if a resource is namespaced or not. - optional bool namespaced = 2; - - // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') - optional string kind = 3; -} - -// APIResourceList is a list of APIResource, it is used to expose the name of the -// resources supported in a specific group and version, and if the resource -// is namespaced. -message APIResourceList { - // groupVersion is the group and version this APIResourceList is for. - optional string groupVersion = 1; - - // resources contains the name of the resources and if they are namespaced. - repeated APIResource resources = 2; -} - -// APIVersions lists the versions that are available, to allow clients to -// discover the API at /api, which is the root path of the legacy v1 API. -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message APIVersions { - // versions are the api versions that are available. - repeated string versions = 1; - - // a map of client CIDR to server address that is serving this group. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - // The server returns only those CIDRs that it thinks that the client can match. - // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. - // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2; -} - -// Duration is a wrapper around time.Duration which supports correct -// marshaling to YAML and JSON. In particular, it marshals into strings, which -// can be used as map keys in json. -message Duration { - optional int64 duration = 1; -} - -// ExportOptions is the query options to the standard REST get call. -message ExportOptions { - // Should this value be exported. Export strips fields that a user can not specify.` - optional bool export = 1; - - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - optional bool exact = 2; -} - -// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying -// concepts during lookup stages without having partially valid types -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupKind { - optional string group = 1; - - optional string kind = 2; -} - -// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying -// concepts during lookup stages without having partially valid types -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupResource { - optional string group = 1; - - optional string resource = 2; -} - -// GroupVersion contains the "group" and the "version", which uniquely identifies the API. -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupVersion { - optional string group = 1; - - optional string version = 2; -} - -// GroupVersion contains the "group/version" and "version" string of a version. -// It is made a struct to keep extensibility. -message GroupVersionForDiscovery { - // groupVersion specifies the API group and version in the form "group/version" - optional string groupVersion = 1; - - // version specifies the version in the form of "version". This is to save - // the clients the trouble of splitting the GroupVersion. - optional string version = 2; -} - -// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupVersionKind { - optional string group = 1; - - optional string version = 2; - - optional string kind = 3; -} - -// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupVersionResource { - optional string group = 1; - - optional string version = 2; - - optional string resource = 3; -} - -// A label selector is a label query over a set of resources. The result of matchLabels and -// matchExpressions are ANDed. An empty label selector matches all objects. A null -// label selector matches no objects. -message LabelSelector { - // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - // map is equivalent to an element of matchExpressions, whose key field is "key", the - // operator is "In", and the values array contains only "value". The requirements are ANDed. - // +optional - map matchLabels = 1; - - // matchExpressions is a list of label selector requirements. The requirements are ANDed. - // +optional - repeated LabelSelectorRequirement matchExpressions = 2; -} - -// A label selector requirement is a selector that contains values, a key, and an operator that -// relates the key and values. -message LabelSelectorRequirement { - // key is the label key that the selector applies to. - optional string key = 1; - - // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. - optional string operator = 2; - - // values is an array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. This array is replaced during a strategic - // merge patch. - // +optional - repeated string values = 3; -} - -// ListMeta describes metadata that synthetic resources must have, including lists and -// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. -message ListMeta { - // SelfLink is a URL representing this object. - // Populated by the system. - // Read-only. - // +optional - optional string selfLink = 1; - - // String that identifies the server's internal version of this object that - // can be used by clients to determine when objects have changed. - // Value must be treated as opaque by clients and passed unmodified back to the server. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency - // +optional - optional string resourceVersion = 2; -} - -// RootPaths lists the paths available at root. -// For example: "/healthz", "/apis". -message RootPaths { - // paths are the paths available at root. - repeated string paths = 1; -} - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -message ServerAddressByClientCIDR { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - optional string clientCIDR = 1; - - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - optional string serverAddress = 2; -} - -// Status is a return value for calls that don't return other objects. -message Status { - // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - // +optional - optional ListMeta metadata = 1; - - // Status of the operation. - // One of: "Success" or "Failure". - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - optional string status = 2; - - // A human-readable description of the status of this operation. - // +optional - optional string message = 3; - - // A machine-readable description of why this operation is in the - // "Failure" status. If this value is empty there - // is no information available. A Reason clarifies an HTTP status - // code but does not override it. - // +optional - optional string reason = 4; - - // Extended data associated with the reason. Each reason may define its - // own extended details. This field is optional and the data returned - // is not guaranteed to conform to any schema except that defined by - // the reason type. - // +optional - optional StatusDetails details = 5; - - // Suggested HTTP return code for this status, 0 if not set. - // +optional - optional int32 code = 6; -} - -// StatusCause provides more information about an api.Status failure, including -// cases when multiple errors are encountered. -message StatusCause { - // A machine-readable description of the cause of the error. If this value is - // empty there is no information available. - // +optional - optional string reason = 1; - - // A human-readable description of the cause of the error. This field may be - // presented as-is to a reader. - // +optional - optional string message = 2; - - // The field of the resource that has caused this error, as named by its JSON - // serialization. May include dot and postfix notation for nested attributes. - // Arrays are zero-indexed. Fields may appear more than once in an array of - // causes due to fields having multiple errors. - // Optional. - // - // Examples: - // "name" - the field "name" on the current resource - // "items[0].name" - the field "name" on the first array entry in "items" - // +optional - optional string field = 3; -} - -// StatusDetails is a set of additional properties that MAY be set by the -// server to provide additional information about a response. The Reason -// field of a Status object defines what attributes will be set. Clients -// must ignore fields that do not match the defined type of each attribute, -// and should assume that any attribute may be empty, invalid, or under -// defined. -message StatusDetails { - // The name attribute of the resource associated with the status StatusReason - // (when there is a single name which can be described). - // +optional - optional string name = 1; - - // The group attribute of the resource associated with the status StatusReason. - // +optional - optional string group = 2; - - // The kind attribute of the resource associated with the status StatusReason. - // On some operations may differ from the requested resource Kind. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - // +optional - optional string kind = 3; - - // The Causes array includes more details associated with the StatusReason - // failure. Not all StatusReasons may provide detailed causes. - // +optional - repeated StatusCause causes = 4; - - // If specified, the time in seconds before the operation should be retried. - // +optional - optional int32 retryAfterSeconds = 5; -} - -// Time is a wrapper around time.Time which supports correct -// marshaling to YAML and JSON. Wrappers are provided for many -// of the factory methods that the time package offers. -// -// +protobuf.options.marshal=false -// +protobuf.as=Timestamp -// +protobuf.options.(gogoproto.goproto_stringer)=false -message Time { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - optional int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. This field may be limited in precision depending on context. - optional int32 nanos = 2; -} - -// Timestamp is a struct that is equivalent to Time, but intended for -// protobuf marshalling/unmarshalling. It is generated into a serialization -// that matches Time. Do not use in Go structs. -message Timestamp { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - optional int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. This field may be limited in precision depending on context. - optional int32 nanos = 2; -} - -// TypeMeta describes an individual object in an API response or request -// with strings representing the type of the object and its API schema version. -// Structures that are versioned or persisted should inline TypeMeta. -message TypeMeta { - // Kind is a string value representing the REST resource this object represents. - // Servers may infer this from the endpoint the client submits requests to. - // Cannot be updated. - // In CamelCase. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - // +optional - optional string kind = 1; - - // APIVersion defines the versioned schema of this representation of an object. - // Servers should convert recognized schemas to the latest internal value, and - // may reject unrecognized values. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources - // +optional - optional string apiVersion = 2; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/group_version.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/group_version.go deleted file mode 100644 index db842affe6..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/group_version.go +++ /dev/null @@ -1,345 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "encoding/json" - "fmt" - "strings" -) - -// ParseResourceArg takes the common style of string which may be either `resource.group.com` or `resource.version.group.com` -// and parses it out into both possibilities. This code takes no responsibility for knowing which representation was intended -// but with a knowledge of all GroupVersions, calling code can take a very good guess. If there are only two segments, then -// `*GroupVersionResource` is nil. -// `resource.group.com` -> `group=com, version=group, resource=resource` and `group=group.com, resource=resource` -func ParseResourceArg(arg string) (*GroupVersionResource, GroupResource) { - var gvr *GroupVersionResource - if strings.Count(arg, ".") >= 2 { - s := strings.SplitN(arg, ".", 3) - gvr = &GroupVersionResource{Group: s[2], Version: s[1], Resource: s[0]} - } - - return gvr, ParseGroupResource(arg) -} - -// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying -// concepts during lookup stages without having partially valid types -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupResource struct { - Group string `protobuf:"bytes,1,opt,name=group"` - Resource string `protobuf:"bytes,2,opt,name=resource"` -} - -func (gr GroupResource) WithVersion(version string) GroupVersionResource { - return GroupVersionResource{Group: gr.Group, Version: version, Resource: gr.Resource} -} - -func (gr GroupResource) Empty() bool { - return len(gr.Group) == 0 && len(gr.Resource) == 0 -} - -func (gr *GroupResource) String() string { - if len(gr.Group) == 0 { - return gr.Resource - } - return gr.Resource + "." + gr.Group -} - -// ParseGroupResource turns "resource.group" string into a GroupResource struct. Empty strings are allowed -// for each field. -func ParseGroupResource(gr string) GroupResource { - if i := strings.Index(gr, "."); i == -1 { - return GroupResource{Resource: gr} - } else { - return GroupResource{Group: gr[i+1:], Resource: gr[:i]} - } -} - -// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupVersionResource struct { - Group string `protobuf:"bytes,1,opt,name=group"` - Version string `protobuf:"bytes,2,opt,name=version"` - Resource string `protobuf:"bytes,3,opt,name=resource"` -} - -func (gvr GroupVersionResource) Empty() bool { - return len(gvr.Group) == 0 && len(gvr.Version) == 0 && len(gvr.Resource) == 0 -} - -func (gvr GroupVersionResource) GroupResource() GroupResource { - return GroupResource{Group: gvr.Group, Resource: gvr.Resource} -} - -func (gvr GroupVersionResource) GroupVersion() GroupVersion { - return GroupVersion{Group: gvr.Group, Version: gvr.Version} -} - -func (gvr *GroupVersionResource) String() string { - return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") -} - -// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying -// concepts during lookup stages without having partially valid types -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupKind struct { - Group string `protobuf:"bytes,1,opt,name=group"` - Kind string `protobuf:"bytes,2,opt,name=kind"` -} - -func (gk GroupKind) Empty() bool { - return len(gk.Group) == 0 && len(gk.Kind) == 0 -} - -func (gk GroupKind) WithVersion(version string) GroupVersionKind { - return GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind} -} - -func (gk *GroupKind) String() string { - if len(gk.Group) == 0 { - return gk.Kind - } - return gk.Kind + "." + gk.Group -} - -// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupVersionKind struct { - Group string `protobuf:"bytes,1,opt,name=group"` - Version string `protobuf:"bytes,2,opt,name=version"` - Kind string `protobuf:"bytes,3,opt,name=kind"` -} - -// Empty returns true if group, version, and kind are empty -func (gvk GroupVersionKind) Empty() bool { - return len(gvk.Group) == 0 && len(gvk.Version) == 0 && len(gvk.Kind) == 0 -} - -func (gvk GroupVersionKind) GroupKind() GroupKind { - return GroupKind{Group: gvk.Group, Kind: gvk.Kind} -} - -func (gvk GroupVersionKind) GroupVersion() GroupVersion { - return GroupVersion{Group: gvk.Group, Version: gvk.Version} -} - -func (gvk GroupVersionKind) String() string { - return gvk.Group + "/" + gvk.Version + ", Kind=" + gvk.Kind -} - -// GroupVersion contains the "group" and the "version", which uniquely identifies the API. -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupVersion struct { - Group string `protobuf:"bytes,1,opt,name=group"` - Version string `protobuf:"bytes,2,opt,name=version"` -} - -// Empty returns true if group and version are empty -func (gv GroupVersion) Empty() bool { - return len(gv.Group) == 0 && len(gv.Version) == 0 -} - -// String puts "group" and "version" into a single "group/version" string. For the legacy v1 -// it returns "v1". -func (gv GroupVersion) String() string { - // special case the internal apiVersion for the legacy kube types - if gv.Empty() { - return "" - } - - // special case of "v1" for backward compatibility - if len(gv.Group) == 0 && gv.Version == "v1" { - return gv.Version - } - if len(gv.Group) > 0 { - return gv.Group + "/" + gv.Version - } - return gv.Version -} - -// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false -// if none of the options match the group. It prefers a match to group and version over just group. -// TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme. -// TODO: Introduce an adapter type between GroupVersion and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) -// in fewer places. -func (gv GroupVersion) KindForGroupVersionKinds(kinds []GroupVersionKind) (target GroupVersionKind, ok bool) { - for _, gvk := range kinds { - if gvk.Group == gv.Group && gvk.Version == gv.Version { - return gvk, true - } - } - for _, gvk := range kinds { - if gvk.Group == gv.Group { - return gv.WithKind(gvk.Kind), true - } - } - return GroupVersionKind{}, false -} - -// ParseGroupVersion turns "group/version" string into a GroupVersion struct. It reports error -// if it cannot parse the string. -func ParseGroupVersion(gv string) (GroupVersion, error) { - // this can be the internal version for the legacy kube types - // TODO once we've cleared the last uses as strings, this special case should be removed. - if (len(gv) == 0) || (gv == "/") { - return GroupVersion{}, nil - } - - switch strings.Count(gv, "/") { - case 0: - return GroupVersion{"", gv}, nil - case 1: - i := strings.Index(gv, "/") - return GroupVersion{gv[:i], gv[i+1:]}, nil - default: - return GroupVersion{}, fmt.Errorf("unexpected GroupVersion string: %v", gv) - } -} - -// WithKind creates a GroupVersionKind based on the method receiver's GroupVersion and the passed Kind. -func (gv GroupVersion) WithKind(kind string) GroupVersionKind { - return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind} -} - -// WithResource creates a GroupVersionResource based on the method receiver's GroupVersion and the passed Resource. -func (gv GroupVersion) WithResource(resource string) GroupVersionResource { - return GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: resource} -} - -// MarshalJSON implements the json.Marshaller interface. -func (gv GroupVersion) MarshalJSON() ([]byte, error) { - s := gv.String() - if strings.Count(s, "/") > 1 { - return []byte{}, fmt.Errorf("illegal GroupVersion %v: contains more than one /", s) - } - return json.Marshal(s) -} - -func (gv *GroupVersion) unmarshal(value []byte) error { - var s string - if err := json.Unmarshal(value, &s); err != nil { - return err - } - parsed, err := ParseGroupVersion(s) - if err != nil { - return err - } - *gv = parsed - return nil -} - -// UnmarshalJSON implements the json.Unmarshaller interface. -func (gv *GroupVersion) UnmarshalJSON(value []byte) error { - return gv.unmarshal(value) -} - -// UnmarshalTEXT implements the Ugorji's encoding.TextUnmarshaler interface. -func (gv *GroupVersion) UnmarshalText(value []byte) error { - return gv.unmarshal(value) -} - -// GroupVersions can be used to represent a set of desired group versions. -// TODO: Move GroupVersions to a package under pkg/runtime, since it's used by scheme. -// TODO: Introduce an adapter type between GroupVersions and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) -// in fewer places. -type GroupVersions []GroupVersion - -// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false -// if none of the options match the group. -func (gvs GroupVersions) KindForGroupVersionKinds(kinds []GroupVersionKind) (GroupVersionKind, bool) { - var targets []GroupVersionKind - for _, gv := range gvs { - target, ok := gv.KindForGroupVersionKinds(kinds) - if !ok { - continue - } - targets = append(targets, target) - } - if len(targets) == 1 { - return targets[0], true - } - if len(targets) > 1 { - return bestMatch(kinds, targets), true - } - return GroupVersionKind{}, false -} - -// bestMatch tries to pick best matching GroupVersionKind and falls back to the first -// found if no exact match exists. -func bestMatch(kinds []GroupVersionKind, targets []GroupVersionKind) GroupVersionKind { - for _, gvk := range targets { - for _, k := range kinds { - if k == gvk { - return k - } - } - } - return targets[0] -} - -// ToAPIVersionAndKind is a convenience method for satisfying runtime.Object on types that -// do not use TypeMeta. -func (gvk *GroupVersionKind) ToAPIVersionAndKind() (string, string) { - if gvk == nil { - return "", "" - } - return gvk.GroupVersion().String(), gvk.Kind -} - -// FromAPIVersionAndKind returns a GVK representing the provided fields for types that -// do not use TypeMeta. This method exists to support test types and legacy serializations -// that have a distinct group and kind. -// TODO: further reduce usage of this method. -func FromAPIVersionAndKind(apiVersion, kind string) GroupVersionKind { - if gv, err := ParseGroupVersion(apiVersion); err == nil { - return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind} - } - return GroupVersionKind{Kind: kind} -} - -// All objects that are serialized from a Scheme encode their type information. This interface is used -// by serialization to set type information from the Scheme onto the serialized version of an object. -// For objects that cannot be serialized or have unique requirements, this interface may be a no-op. -// TODO: this belongs in pkg/runtime, move unversioned.GVK into runtime. -type ObjectKind interface { - // SetGroupVersionKind sets or clears the intended serialized kind of an object. Passing kind nil - // should clear the current setting. - SetGroupVersionKind(kind GroupVersionKind) - // GroupVersionKind returns the stored group, version, and kind of an object, or nil if the object does - // not expose or provide these fields. - GroupVersionKind() GroupVersionKind -} - -// EmptyObjectKind implements the ObjectKind interface as a noop -// TODO: this belongs in pkg/runtime, move unversioned.GVK into runtime. -var EmptyObjectKind = emptyObjectKind{} - -type emptyObjectKind struct{} - -// SetGroupVersionKind implements the ObjectKind interface -func (emptyObjectKind) SetGroupVersionKind(gvk GroupVersionKind) {} - -// GroupVersionKind implements the ObjectKind interface -func (emptyObjectKind) GroupVersionKind() GroupVersionKind { return GroupVersionKind{} } diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/helpers.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/helpers.go deleted file mode 100644 index aed3f0edac..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/helpers.go +++ /dev/null @@ -1,183 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/selection" -) - -// LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements -// labels.Selector -// Note: This function should be kept in sync with the selector methods in pkg/labels/selector.go -func LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) { - if ps == nil { - return labels.Nothing(), nil - } - if len(ps.MatchLabels)+len(ps.MatchExpressions) == 0 { - return labels.Everything(), nil - } - selector := labels.NewSelector() - for k, v := range ps.MatchLabels { - r, err := labels.NewRequirement(k, selection.Equals, []string{v}) - if err != nil { - return nil, err - } - selector = selector.Add(*r) - } - for _, expr := range ps.MatchExpressions { - var op selection.Operator - switch expr.Operator { - case LabelSelectorOpIn: - op = selection.In - case LabelSelectorOpNotIn: - op = selection.NotIn - case LabelSelectorOpExists: - op = selection.Exists - case LabelSelectorOpDoesNotExist: - op = selection.DoesNotExist - default: - return nil, fmt.Errorf("%q is not a valid pod selector operator", expr.Operator) - } - r, err := labels.NewRequirement(expr.Key, op, append([]string(nil), expr.Values...)) - if err != nil { - return nil, err - } - selector = selector.Add(*r) - } - return selector, nil -} - -// LabelSelectorAsMap converts the LabelSelector api type into a map of strings, ie. the -// original structure of a label selector. Operators that cannot be converted into plain -// labels (Exists, DoesNotExist, NotIn, and In with more than one value) will result in -// an error. -func LabelSelectorAsMap(ps *LabelSelector) (map[string]string, error) { - if ps == nil { - return nil, nil - } - selector := map[string]string{} - for k, v := range ps.MatchLabels { - selector[k] = v - } - for _, expr := range ps.MatchExpressions { - switch expr.Operator { - case LabelSelectorOpIn: - if len(expr.Values) != 1 { - return selector, fmt.Errorf("operator %q without a single value cannot be converted into the old label selector format", expr.Operator) - } - // Should we do anything in case this will override a previous key-value pair? - selector[expr.Key] = expr.Values[0] - case LabelSelectorOpNotIn, LabelSelectorOpExists, LabelSelectorOpDoesNotExist: - return selector, fmt.Errorf("operator %q cannot be converted into the old label selector format", expr.Operator) - default: - return selector, fmt.Errorf("%q is not a valid selector operator", expr.Operator) - } - } - return selector, nil -} - -// ParseToLabelSelector parses a string representing a selector into a LabelSelector object. -// Note: This function should be kept in sync with the parser in pkg/labels/selector.go -func ParseToLabelSelector(selector string) (*LabelSelector, error) { - reqs, err := labels.ParseToRequirements(selector) - if err != nil { - return nil, fmt.Errorf("couldn't parse the selector string \"%s\": %v", selector, err) - } - - labelSelector := &LabelSelector{ - MatchLabels: map[string]string{}, - MatchExpressions: []LabelSelectorRequirement{}, - } - for _, req := range reqs { - var op LabelSelectorOperator - switch req.Operator() { - case selection.Equals, selection.DoubleEquals: - vals := req.Values() - if vals.Len() != 1 { - return nil, fmt.Errorf("equals operator must have exactly one value") - } - val, ok := vals.PopAny() - if !ok { - return nil, fmt.Errorf("equals operator has exactly one value but it cannot be retrieved") - } - labelSelector.MatchLabels[req.Key()] = val - continue - case selection.In: - op = LabelSelectorOpIn - case selection.NotIn: - op = LabelSelectorOpNotIn - case selection.Exists: - op = LabelSelectorOpExists - case selection.DoesNotExist: - op = LabelSelectorOpDoesNotExist - case selection.GreaterThan, selection.LessThan: - // Adding a separate case for these operators to indicate that this is deliberate - return nil, fmt.Errorf("%q isn't supported in label selectors", req.Operator()) - default: - return nil, fmt.Errorf("%q is not a valid label selector operator", req.Operator()) - } - labelSelector.MatchExpressions = append(labelSelector.MatchExpressions, LabelSelectorRequirement{ - Key: req.Key(), - Operator: op, - Values: req.Values().List(), - }) - } - return labelSelector, nil -} - -// SetAsLabelSelector converts the labels.Set object into a LabelSelector api object. -func SetAsLabelSelector(ls labels.Set) *LabelSelector { - if ls == nil { - return nil - } - - selector := &LabelSelector{ - MatchLabels: make(map[string]string), - } - for label, value := range ls { - selector.MatchLabels[label] = value - } - - return selector -} - -// FormatLabelSelector convert labelSelector into plain string -func FormatLabelSelector(labelSelector *LabelSelector) string { - selector, err := LabelSelectorAsSelector(labelSelector) - if err != nil { - return "" - } - - l := selector.String() - if len(l) == 0 { - l = "" - } - return l -} - -func ExtractGroupVersions(l *APIGroupList) []string { - var groupVersions []string - for _, g := range l.Groups { - for _, gv := range g.Versions { - groupVersions = append(groupVersions, gv.GroupVersion) - } - } - return groupVersions -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/meta.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/meta.go deleted file mode 100644 index 48009da162..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/meta.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -// ListMetaAccessor retrieves the list interface from an object -// TODO: move this, and TypeMeta and ListMeta, to a different package -type ListMetaAccessor interface { - GetListMeta() List -} - -// List lets you work with list metadata from any of the versioned or -// internal API objects. Attempting to set or retrieve a field on an object that does -// not support that field will be a no-op and return a default value. -// TODO: move this, and TypeMeta and ListMeta, to a different package -type List interface { - GetResourceVersion() string - SetResourceVersion(version string) - GetSelfLink() string - SetSelfLink(selfLink string) -} - -// Type exposes the type and APIVersion of versioned or internal API objects. -// TODO: move this, and TypeMeta and ListMeta, to a different package -type Type interface { - GetAPIVersion() string - SetAPIVersion(version string) - GetKind() string - SetKind(kind string) -} - -func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceVersion } -func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } -func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink } -func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } - -func (obj *TypeMeta) GetObjectKind() ObjectKind { return obj } - -// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta -func (obj *TypeMeta) SetGroupVersionKind(gvk GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} - -// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta -func (obj *TypeMeta) GroupVersionKind() GroupVersionKind { - return FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} - -func (obj *ListMeta) GetListMeta() List { return obj } diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/register.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/register.go deleted file mode 100644 index 907188bc74..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/register.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = GroupVersion{Group: "", Version: ""} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/types.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/types.go deleted file mode 100644 index 6178f199b8..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/types.go +++ /dev/null @@ -1,482 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package unversioned contains API types that are common to all versions. -// -// The package contains two categories of types: -// - external (serialized) types that lack their own version (e.g TypeMeta) -// - internal (never-serialized) types that are needed by several different -// api groups, and so live here, to avoid duplication and/or import loops -// (e.g. LabelSelector). -// In the future, we will probably move these categories of objects into -// separate packages. -package unversioned - -import "strings" - -// TypeMeta describes an individual object in an API response or request -// with strings representing the type of the object and its API schema version. -// Structures that are versioned or persisted should inline TypeMeta. -type TypeMeta struct { - // Kind is a string value representing the REST resource this object represents. - // Servers may infer this from the endpoint the client submits requests to. - // Cannot be updated. - // In CamelCase. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - // +optional - Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` - - // APIVersion defines the versioned schema of this representation of an object. - // Servers should convert recognized schemas to the latest internal value, and - // may reject unrecognized values. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources - // +optional - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` -} - -// ListMeta describes metadata that synthetic resources must have, including lists and -// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. -type ListMeta struct { - // SelfLink is a URL representing this object. - // Populated by the system. - // Read-only. - // +optional - SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"` - - // String that identifies the server's internal version of this object that - // can be used by clients to determine when objects have changed. - // Value must be treated as opaque by clients and passed unmodified back to the server. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency - // +optional - ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` -} - -// ExportOptions is the query options to the standard REST get call. -type ExportOptions struct { - TypeMeta `json:",inline"` - // Should this value be exported. Export strips fields that a user can not specify.` - Export bool `json:"export" protobuf:"varint,1,opt,name=export"` - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` -} - -// Status is a return value for calls that don't return other objects. -type Status struct { - TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - // +optional - ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Status of the operation. - // One of: "Success" or "Failure". - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - Status string `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` - // A human-readable description of the status of this operation. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` - // A machine-readable description of why this operation is in the - // "Failure" status. If this value is empty there - // is no information available. A Reason clarifies an HTTP status - // code but does not override it. - // +optional - Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason,casttype=StatusReason"` - // Extended data associated with the reason. Each reason may define its - // own extended details. This field is optional and the data returned - // is not guaranteed to conform to any schema except that defined by - // the reason type. - // +optional - Details *StatusDetails `json:"details,omitempty" protobuf:"bytes,5,opt,name=details"` - // Suggested HTTP return code for this status, 0 if not set. - // +optional - Code int32 `json:"code,omitempty" protobuf:"varint,6,opt,name=code"` -} - -// StatusDetails is a set of additional properties that MAY be set by the -// server to provide additional information about a response. The Reason -// field of a Status object defines what attributes will be set. Clients -// must ignore fields that do not match the defined type of each attribute, -// and should assume that any attribute may be empty, invalid, or under -// defined. -type StatusDetails struct { - // The name attribute of the resource associated with the status StatusReason - // (when there is a single name which can be described). - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // The group attribute of the resource associated with the status StatusReason. - // +optional - Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"` - // The kind attribute of the resource associated with the status StatusReason. - // On some operations may differ from the requested resource Kind. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - // +optional - Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` - // The Causes array includes more details associated with the StatusReason - // failure. Not all StatusReasons may provide detailed causes. - // +optional - Causes []StatusCause `json:"causes,omitempty" protobuf:"bytes,4,rep,name=causes"` - // If specified, the time in seconds before the operation should be retried. - // +optional - RetryAfterSeconds int32 `json:"retryAfterSeconds,omitempty" protobuf:"varint,5,opt,name=retryAfterSeconds"` -} - -// Values of Status.Status -const ( - StatusSuccess = "Success" - StatusFailure = "Failure" -) - -// StatusReason is an enumeration of possible failure causes. Each StatusReason -// must map to a single HTTP status code, but multiple reasons may map -// to the same HTTP status code. -// TODO: move to apiserver -type StatusReason string - -const ( - // StatusReasonUnknown means the server has declined to indicate a specific reason. - // The details field may contain other information about this error. - // Status code 500. - StatusReasonUnknown StatusReason = "" - - // StatusReasonUnauthorized means the server can be reached and understood the request, but requires - // the user to present appropriate authorization credentials (identified by the WWW-Authenticate header) - // in order for the action to be completed. If the user has specified credentials on the request, the - // server considers them insufficient. - // Status code 401 - StatusReasonUnauthorized StatusReason = "Unauthorized" - - // StatusReasonForbidden means the server can be reached and understood the request, but refuses - // to take any further action. It is the result of the server being configured to deny access for some reason - // to the requested resource by the client. - // Details (optional): - // "kind" string - the kind attribute of the forbidden resource - // on some operations may differ from the requested - // resource. - // "id" string - the identifier of the forbidden resource - // Status code 403 - StatusReasonForbidden StatusReason = "Forbidden" - - // StatusReasonNotFound means one or more resources required for this operation - // could not be found. - // Details (optional): - // "kind" string - the kind attribute of the missing resource - // on some operations may differ from the requested - // resource. - // "id" string - the identifier of the missing resource - // Status code 404 - StatusReasonNotFound StatusReason = "NotFound" - - // StatusReasonAlreadyExists means the resource you are creating already exists. - // Details (optional): - // "kind" string - the kind attribute of the conflicting resource - // "id" string - the identifier of the conflicting resource - // Status code 409 - StatusReasonAlreadyExists StatusReason = "AlreadyExists" - - // StatusReasonConflict means the requested operation cannot be completed - // due to a conflict in the operation. The client may need to alter the - // request. Each resource may define custom details that indicate the - // nature of the conflict. - // Status code 409 - StatusReasonConflict StatusReason = "Conflict" - - // StatusReasonGone means the item is no longer available at the server and no - // forwarding address is known. - // Status code 410 - StatusReasonGone StatusReason = "Gone" - - // StatusReasonInvalid means the requested create or update operation cannot be - // completed due to invalid data provided as part of the request. The client may - // need to alter the request. When set, the client may use the StatusDetails - // message field as a summary of the issues encountered. - // Details (optional): - // "kind" string - the kind attribute of the invalid resource - // "id" string - the identifier of the invalid resource - // "causes" - one or more StatusCause entries indicating the data in the - // provided resource that was invalid. The code, message, and - // field attributes will be set. - // Status code 422 - StatusReasonInvalid StatusReason = "Invalid" - - // StatusReasonServerTimeout means the server can be reached and understood the request, - // but cannot complete the action in a reasonable time. The client should retry the request. - // This is may be due to temporary server load or a transient communication issue with - // another server. Status code 500 is used because the HTTP spec provides no suitable - // server-requested client retry and the 5xx class represents actionable errors. - // Details (optional): - // "kind" string - the kind attribute of the resource being acted on. - // "id" string - the operation that is being attempted. - // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried - // Status code 500 - StatusReasonServerTimeout StatusReason = "ServerTimeout" - - // StatusReasonTimeout means that the request could not be completed within the given time. - // Clients can get this response only when they specified a timeout param in the request, - // or if the server cannot complete the operation within a reasonable amount of time. - // The request might succeed with an increased value of timeout param. The client *should* - // wait at least the number of seconds specified by the retryAfterSeconds field. - // Details (optional): - // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried - // Status code 504 - StatusReasonTimeout StatusReason = "Timeout" - - // StatusReasonBadRequest means that the request itself was invalid, because the request - // doesn't make any sense, for example deleting a read-only object. This is different than - // StatusReasonInvalid above which indicates that the API call could possibly succeed, but the - // data was invalid. API calls that return BadRequest can never succeed. - StatusReasonBadRequest StatusReason = "BadRequest" - - // StatusReasonMethodNotAllowed means that the action the client attempted to perform on the - // resource was not supported by the code - for instance, attempting to delete a resource that - // can only be created. API calls that return MethodNotAllowed can never succeed. - StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed" - - // StatusReasonInternalError indicates that an internal error occurred, it is unexpected - // and the outcome of the call is unknown. - // Details (optional): - // "causes" - The original error - // Status code 500 - StatusReasonInternalError StatusReason = "InternalError" - - // StatusReasonExpired indicates that the request is invalid because the content you are requesting - // has expired and is no longer available. It is typically associated with watches that can't be - // serviced. - // Status code 410 (gone) - StatusReasonExpired StatusReason = "Expired" - - // StatusReasonServiceUnavailable means that the request itself was valid, - // but the requested service is unavailable at this time. - // Retrying the request after some time might succeed. - // Status code 503 - StatusReasonServiceUnavailable StatusReason = "ServiceUnavailable" -) - -// StatusCause provides more information about an api.Status failure, including -// cases when multiple errors are encountered. -type StatusCause struct { - // A machine-readable description of the cause of the error. If this value is - // empty there is no information available. - // +optional - Type CauseType `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason,casttype=CauseType"` - // A human-readable description of the cause of the error. This field may be - // presented as-is to a reader. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` - // The field of the resource that has caused this error, as named by its JSON - // serialization. May include dot and postfix notation for nested attributes. - // Arrays are zero-indexed. Fields may appear more than once in an array of - // causes due to fields having multiple errors. - // Optional. - // - // Examples: - // "name" - the field "name" on the current resource - // "items[0].name" - the field "name" on the first array entry in "items" - // +optional - Field string `json:"field,omitempty" protobuf:"bytes,3,opt,name=field"` -} - -// CauseType is a machine readable value providing more detail about what -// occurred in a status response. An operation may have multiple causes for a -// status (whether Failure or Success). -type CauseType string - -const ( - // CauseTypeFieldValueNotFound is used to report failure to find a requested value - // (e.g. looking up an ID). - CauseTypeFieldValueNotFound CauseType = "FieldValueNotFound" - // CauseTypeFieldValueRequired is used to report required values that are not - // provided (e.g. empty strings, null values, or empty arrays). - CauseTypeFieldValueRequired CauseType = "FieldValueRequired" - // CauseTypeFieldValueDuplicate is used to report collisions of values that must be - // unique (e.g. unique IDs). - CauseTypeFieldValueDuplicate CauseType = "FieldValueDuplicate" - // CauseTypeFieldValueInvalid is used to report malformed values (e.g. failed regex - // match). - CauseTypeFieldValueInvalid CauseType = "FieldValueInvalid" - // CauseTypeFieldValueNotSupported is used to report valid (as per formatting rules) - // values that can not be handled (e.g. an enumerated string). - CauseTypeFieldValueNotSupported CauseType = "FieldValueNotSupported" - // CauseTypeUnexpectedServerResponse is used to report when the server responded to the client - // without the expected return type. The presence of this cause indicates the error may be - // due to an intervening proxy or the server software malfunctioning. - CauseTypeUnexpectedServerResponse CauseType = "UnexpectedServerResponse" -) - -// APIVersions lists the versions that are available, to allow clients to -// discover the API at /api, which is the root path of the legacy v1 API. -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type APIVersions struct { - TypeMeta `json:",inline"` - // versions are the api versions that are available. - Versions []string `json:"versions" protobuf:"bytes,1,rep,name=versions"` - // a map of client CIDR to server address that is serving this group. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - // The server returns only those CIDRs that it thinks that the client can match. - // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. - // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,2,rep,name=serverAddressByClientCIDRs"` -} - -// APIGroupList is a list of APIGroup, to allow clients to discover the API at -// /apis. -type APIGroupList struct { - TypeMeta `json:",inline"` - // groups is a list of APIGroup. - Groups []APIGroup `json:"groups" protobuf:"bytes,1,rep,name=groups"` -} - -// APIGroup contains the name, the supported versions, and the preferred version -// of a group. -type APIGroup struct { - TypeMeta `json:",inline"` - // name is the name of the group. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // versions are the versions supported in this group. - Versions []GroupVersionForDiscovery `json:"versions" protobuf:"bytes,2,rep,name=versions"` - // preferredVersion is the version preferred by the API server, which - // probably is the storage version. - // +optional - PreferredVersion GroupVersionForDiscovery `json:"preferredVersion,omitempty" protobuf:"bytes,3,opt,name=preferredVersion"` - // a map of client CIDR to server address that is serving this group. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - // The server returns only those CIDRs that it thinks that the client can match. - // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. - // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"` -} - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -type ServerAddressByClientCIDR struct { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"` - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"` -} - -// GroupVersion contains the "group/version" and "version" string of a version. -// It is made a struct to keep extensibility. -type GroupVersionForDiscovery struct { - // groupVersion specifies the API group and version in the form "group/version" - GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"` - // version specifies the version in the form of "version". This is to save - // the clients the trouble of splitting the GroupVersion. - Version string `json:"version" protobuf:"bytes,2,opt,name=version"` -} - -// APIResource specifies the name of a resource and whether it is namespaced. -type APIResource struct { - // name is the name of the resource. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // namespaced indicates if a resource is namespaced or not. - Namespaced bool `json:"namespaced" protobuf:"varint,2,opt,name=namespaced"` - // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') - Kind string `json:"kind" protobuf:"bytes,3,opt,name=kind"` -} - -// APIResourceList is a list of APIResource, it is used to expose the name of the -// resources supported in a specific group and version, and if the resource -// is namespaced. -type APIResourceList struct { - TypeMeta `json:",inline"` - // groupVersion is the group and version this APIResourceList is for. - GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"` - // resources contains the name of the resources and if they are namespaced. - APIResources []APIResource `json:"resources" protobuf:"bytes,2,rep,name=resources"` -} - -// RootPaths lists the paths available at root. -// For example: "/healthz", "/apis". -type RootPaths struct { - // paths are the paths available at root. - Paths []string `json:"paths" protobuf:"bytes,1,rep,name=paths"` -} - -// TODO: remove me when watch is refactored -func LabelSelectorQueryParam(version string) string { - return "labelSelector" -} - -// TODO: remove me when watch is refactored -func FieldSelectorQueryParam(version string) string { - return "fieldSelector" -} - -// String returns available api versions as a human-friendly version string. -func (apiVersions APIVersions) String() string { - return strings.Join(apiVersions.Versions, ",") -} - -func (apiVersions APIVersions) GoString() string { - return apiVersions.String() -} - -// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. -type Patch struct{} - -// Note: -// There are two different styles of label selectors used in versioned types: -// an older style which is represented as just a string in versioned types, and a -// newer style that is structured. LabelSelector is an internal representation for the -// latter style. - -// A label selector is a label query over a set of resources. The result of matchLabels and -// matchExpressions are ANDed. An empty label selector matches all objects. A null -// label selector matches no objects. -type LabelSelector struct { - // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - // map is equivalent to an element of matchExpressions, whose key field is "key", the - // operator is "In", and the values array contains only "value". The requirements are ANDed. - // +optional - MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` - // matchExpressions is a list of label selector requirements. The requirements are ANDed. - // +optional - MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` -} - -// A label selector requirement is a selector that contains values, a key, and an operator that -// relates the key and values. -type LabelSelectorRequirement struct { - // key is the label key that the selector applies to. - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` - // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. - Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` - // values is an array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. This array is replaced during a strategic - // merge patch. - // +optional - Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` -} - -// A label selector operator is the set of operators that can be used in a selector requirement. -type LabelSelectorOperator string - -const ( - LabelSelectorOpIn LabelSelectorOperator = "In" - LabelSelectorOpNotIn LabelSelectorOperator = "NotIn" - LabelSelectorOpExists LabelSelectorOperator = "Exists" - LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/types_swagger_doc_generated.go deleted file mode 100644 index 3f08115a6c..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/types_swagger_doc_generated.go +++ /dev/null @@ -1,208 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_APIGroup = map[string]string{ - "": "APIGroup contains the name, the supported versions, and the preferred version of a group.", - "name": "name is the name of the group.", - "versions": "versions are the versions supported in this group.", - "preferredVersion": "preferredVersion is the version preferred by the API server, which probably is the storage version.", - "serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", -} - -func (APIGroup) SwaggerDoc() map[string]string { - return map_APIGroup -} - -var map_APIGroupList = map[string]string{ - "": "APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.", - "groups": "groups is a list of APIGroup.", -} - -func (APIGroupList) SwaggerDoc() map[string]string { - return map_APIGroupList -} - -var map_APIResource = map[string]string{ - "": "APIResource specifies the name of a resource and whether it is namespaced.", - "name": "name is the name of the resource.", - "namespaced": "namespaced indicates if a resource is namespaced or not.", - "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", -} - -func (APIResource) SwaggerDoc() map[string]string { - return map_APIResource -} - -var map_APIResourceList = map[string]string{ - "": "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.", - "groupVersion": "groupVersion is the group and version this APIResourceList is for.", - "resources": "resources contains the name of the resources and if they are namespaced.", -} - -func (APIResourceList) SwaggerDoc() map[string]string { - return map_APIResourceList -} - -var map_APIVersions = map[string]string{ - "": "APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API.", - "versions": "versions are the api versions that are available.", - "serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", -} - -func (APIVersions) SwaggerDoc() map[string]string { - return map_APIVersions -} - -var map_ExportOptions = map[string]string{ - "": "ExportOptions is the query options to the standard REST get call.", - "export": "Should this value be exported. Export strips fields that a user can not specify.`", - "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'", -} - -func (ExportOptions) SwaggerDoc() map[string]string { - return map_ExportOptions -} - -var map_GroupVersionForDiscovery = map[string]string{ - "": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.", - "groupVersion": "groupVersion specifies the API group and version in the form \"group/version\"", - "version": "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.", -} - -func (GroupVersionForDiscovery) SwaggerDoc() map[string]string { - return map_GroupVersionForDiscovery -} - -var map_LabelSelector = map[string]string{ - "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", - "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", - "matchExpressions": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", -} - -func (LabelSelector) SwaggerDoc() map[string]string { - return map_LabelSelector -} - -var map_LabelSelectorRequirement = map[string]string{ - "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - "key": "key is the label key that the selector applies to.", - "operator": "operator represents a key's relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist.", - "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", -} - -func (LabelSelectorRequirement) SwaggerDoc() map[string]string { - return map_LabelSelectorRequirement -} - -var map_ListMeta = map[string]string{ - "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", - "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", - "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency", -} - -func (ListMeta) SwaggerDoc() map[string]string { - return map_ListMeta -} - -var map_Patch = map[string]string{ - "": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.", -} - -func (Patch) SwaggerDoc() map[string]string { - return map_Patch -} - -var map_RootPaths = map[string]string{ - "": "RootPaths lists the paths available at root. For example: \"/healthz\", \"/apis\".", - "paths": "paths are the paths available at root.", -} - -func (RootPaths) SwaggerDoc() map[string]string { - return map_RootPaths -} - -var map_ServerAddressByClientCIDR = map[string]string{ - "": "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.", - "clientCIDR": "The CIDR with which clients can match their IP to figure out the server address that they should use.", - "serverAddress": "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.", -} - -func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string { - return map_ServerAddressByClientCIDR -} - -var map_Status = map[string]string{ - "": "Status is a return value for calls that don't return other objects.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "message": "A human-readable description of the status of this operation.", - "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", - "details": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", - "code": "Suggested HTTP return code for this status, 0 if not set.", -} - -func (Status) SwaggerDoc() map[string]string { - return map_Status -} - -var map_StatusCause = map[string]string{ - "": "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.", - "reason": "A machine-readable description of the cause of the error. If this value is empty there is no information available.", - "message": "A human-readable description of the cause of the error. This field may be presented as-is to a reader.", - "field": "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"", -} - -func (StatusCause) SwaggerDoc() map[string]string { - return map_StatusCause -} - -var map_StatusDetails = map[string]string{ - "": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", - "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", - "group": "The group attribute of the resource associated with the status StatusReason.", - "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", - "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried.", -} - -func (StatusDetails) SwaggerDoc() map[string]string { - return map_StatusDetails -} - -var map_TypeMeta = map[string]string{ - "": "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.", - "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources", -} - -func (TypeMeta) SwaggerDoc() map[string]string { - return map_TypeMeta -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/BUILD deleted file mode 100644 index d0d0cbd14f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/BUILD +++ /dev/null @@ -1,30 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["validation.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/util/validation:go_default_library", - "//pkg/util/validation/field:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["validation_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = ["//pkg/util/validation/field:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/validation.go deleted file mode 100644 index ecb968bdcb..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/validation.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/util/validation" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -func ValidateLabelSelector(ps *unversioned.LabelSelector, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if ps == nil { - return allErrs - } - allErrs = append(allErrs, ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...) - for i, expr := range ps.MatchExpressions { - allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, fldPath.Child("matchExpressions").Index(i))...) - } - return allErrs -} - -func ValidateLabelSelectorRequirement(sr unversioned.LabelSelectorRequirement, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - switch sr.Operator { - case unversioned.LabelSelectorOpIn, unversioned.LabelSelectorOpNotIn: - if len(sr.Values) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) - } - case unversioned.LabelSelectorOpExists, unversioned.LabelSelectorOpDoesNotExist: - if len(sr.Values) > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) - } - default: - allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) - } - allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...) - return allErrs -} - -// ValidateLabelName validates that the label name is correctly defined. -func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for _, msg := range validation.IsQualifiedName(labelName) { - allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg)) - } - return allErrs -} - -// ValidateLabels validates that a set of labels are correctly defined. -func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for k, v := range labels { - allErrs = append(allErrs, ValidateLabelName(k, fldPath)...) - for _, msg := range validation.IsValidLabelValue(v) { - allErrs = append(allErrs, field.Invalid(fldPath, v, msg)) - } - } - return allErrs -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/zz_generated.deepcopy.go deleted file mode 100644 index 627bb817ec..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/zz_generated.deepcopy.go +++ /dev/null @@ -1,390 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package unversioned - -import ( - conversion "k8s.io/kubernetes/pkg/conversion" - time "time" -) - -func DeepCopy_unversioned_APIGroup(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*APIGroup) - out := out.(*APIGroup) - out.TypeMeta = in.TypeMeta - out.Name = in.Name - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]GroupVersionForDiscovery, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Versions = nil - } - out.PreferredVersion = in.PreferredVersion - if in.ServerAddressByClientCIDRs != nil { - in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]ServerAddressByClientCIDR, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.ServerAddressByClientCIDRs = nil - } - return nil - } -} - -func DeepCopy_unversioned_APIGroupList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*APIGroupList) - out := out.(*APIGroupList) - out.TypeMeta = in.TypeMeta - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]APIGroup, len(*in)) - for i := range *in { - if err := DeepCopy_unversioned_APIGroup(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Groups = nil - } - return nil - } -} - -func DeepCopy_unversioned_APIResource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*APIResource) - out := out.(*APIResource) - out.Name = in.Name - out.Namespaced = in.Namespaced - out.Kind = in.Kind - return nil - } -} - -func DeepCopy_unversioned_APIResourceList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*APIResourceList) - out := out.(*APIResourceList) - out.TypeMeta = in.TypeMeta - out.GroupVersion = in.GroupVersion - if in.APIResources != nil { - in, out := &in.APIResources, &out.APIResources - *out = make([]APIResource, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.APIResources = nil - } - return nil - } -} - -func DeepCopy_unversioned_APIVersions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*APIVersions) - out := out.(*APIVersions) - out.TypeMeta = in.TypeMeta - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]string, len(*in)) - copy(*out, *in) - } else { - out.Versions = nil - } - if in.ServerAddressByClientCIDRs != nil { - in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]ServerAddressByClientCIDR, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.ServerAddressByClientCIDRs = nil - } - return nil - } -} - -func DeepCopy_unversioned_Duration(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Duration) - out := out.(*Duration) - out.Duration = in.Duration - return nil - } -} - -func DeepCopy_unversioned_ExportOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ExportOptions) - out := out.(*ExportOptions) - out.TypeMeta = in.TypeMeta - out.Export = in.Export - out.Exact = in.Exact - return nil - } -} - -func DeepCopy_unversioned_GroupKind(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GroupKind) - out := out.(*GroupKind) - out.Group = in.Group - out.Kind = in.Kind - return nil - } -} - -func DeepCopy_unversioned_GroupResource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GroupResource) - out := out.(*GroupResource) - out.Group = in.Group - out.Resource = in.Resource - return nil - } -} - -func DeepCopy_unversioned_GroupVersion(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GroupVersion) - out := out.(*GroupVersion) - out.Group = in.Group - out.Version = in.Version - return nil - } -} - -func DeepCopy_unversioned_GroupVersionForDiscovery(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GroupVersionForDiscovery) - out := out.(*GroupVersionForDiscovery) - out.GroupVersion = in.GroupVersion - out.Version = in.Version - return nil - } -} - -func DeepCopy_unversioned_GroupVersionKind(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GroupVersionKind) - out := out.(*GroupVersionKind) - out.Group = in.Group - out.Version = in.Version - out.Kind = in.Kind - return nil - } -} - -func DeepCopy_unversioned_GroupVersionResource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GroupVersionResource) - out := out.(*GroupVersionResource) - out.Group = in.Group - out.Version = in.Version - out.Resource = in.Resource - return nil - } -} - -func DeepCopy_unversioned_LabelSelector(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LabelSelector) - out := out.(*LabelSelector) - if in.MatchLabels != nil { - in, out := &in.MatchLabels, &out.MatchLabels - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } else { - out.MatchLabels = nil - } - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]LabelSelectorRequirement, len(*in)) - for i := range *in { - if err := DeepCopy_unversioned_LabelSelectorRequirement(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.MatchExpressions = nil - } - return nil - } -} - -func DeepCopy_unversioned_LabelSelectorRequirement(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LabelSelectorRequirement) - out := out.(*LabelSelectorRequirement) - out.Key = in.Key - out.Operator = in.Operator - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } else { - out.Values = nil - } - return nil - } -} - -func DeepCopy_unversioned_ListMeta(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ListMeta) - out := out.(*ListMeta) - out.SelfLink = in.SelfLink - out.ResourceVersion = in.ResourceVersion - return nil - } -} - -func DeepCopy_unversioned_Patch(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Patch) - out := out.(*Patch) - _ = in - _ = out - return nil - } -} - -func DeepCopy_unversioned_RootPaths(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RootPaths) - out := out.(*RootPaths) - if in.Paths != nil { - in, out := &in.Paths, &out.Paths - *out = make([]string, len(*in)) - copy(*out, *in) - } else { - out.Paths = nil - } - return nil - } -} - -func DeepCopy_unversioned_ServerAddressByClientCIDR(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServerAddressByClientCIDR) - out := out.(*ServerAddressByClientCIDR) - out.ClientCIDR = in.ClientCIDR - out.ServerAddress = in.ServerAddress - return nil - } -} - -func DeepCopy_unversioned_Status(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Status) - out := out.(*Status) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - out.Status = in.Status - out.Message = in.Message - out.Reason = in.Reason - if in.Details != nil { - in, out := &in.Details, &out.Details - *out = new(StatusDetails) - if err := DeepCopy_unversioned_StatusDetails(*in, *out, c); err != nil { - return err - } - } else { - out.Details = nil - } - out.Code = in.Code - return nil - } -} - -func DeepCopy_unversioned_StatusCause(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StatusCause) - out := out.(*StatusCause) - out.Type = in.Type - out.Message = in.Message - out.Field = in.Field - return nil - } -} - -func DeepCopy_unversioned_StatusDetails(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StatusDetails) - out := out.(*StatusDetails) - out.Name = in.Name - out.Group = in.Group - out.Kind = in.Kind - if in.Causes != nil { - in, out := &in.Causes, &out.Causes - *out = make([]StatusCause, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Causes = nil - } - out.RetryAfterSeconds = in.RetryAfterSeconds - return nil - } -} - -func DeepCopy_unversioned_Time(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Time) - out := out.(*Time) - if newVal, err := c.DeepCopy(&in.Time); err != nil { - return err - } else { - out.Time = *newVal.(*time.Time) - } - return nil - } -} - -func DeepCopy_unversioned_Timestamp(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Timestamp) - out := out.(*Timestamp) - out.Seconds = in.Seconds - out.Nanos = in.Nanos - return nil - } -} - -func DeepCopy_unversioned_TypeMeta(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TypeMeta) - out := out.(*TypeMeta) - out.Kind = in.Kind - out.APIVersion = in.APIVersion - return nil - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/util/BUILD b/vendor/k8s.io/kubernetes/pkg/api/util/BUILD deleted file mode 100644 index 4637acb3d6..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/util/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["group_version.go"], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = ["group_version_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/util/group_version.go b/vendor/k8s.io/kubernetes/pkg/api/util/group_version.go deleted file mode 100644 index fea2f17f81..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/util/group_version.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// TODO: This GetVersion/GetGroup arrangement is temporary and will be replaced -// with a GroupAndVersion type. -package util - -import "strings" - -func GetVersion(groupVersion string) string { - s := strings.Split(groupVersion, "/") - if len(s) != 2 { - // e.g. return "v1" for groupVersion="v1" - return s[len(s)-1] - } - return s[1] -} - -func GetGroup(groupVersion string) string { - s := strings.Split(groupVersion, "/") - if len(s) == 1 { - // e.g. return "" for groupVersion="v1" - return "" - } - return s[0] -} - -// GetGroupVersion returns the "group/version". It returns "version" is if group -// is empty. It returns "group/" if version is empty. -func GetGroupVersion(group, version string) string { - if len(group) == 0 { - return version - } - return group + "/" + version -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/api/v1/BUILD index 812ae84bdc..69f5da9f4f 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/BUILD @@ -4,10 +4,8 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", "go_test", - "cgo_library", ) go_library( @@ -16,10 +14,13 @@ go_library( "conversion.go", "defaults.go", "doc.go", + "generate.go", "generated.pb.go", "helpers.go", "meta.go", + "ref.go", "register.go", + "resource_helpers.go", "types.generated.go", "types.go", "types_swagger_doc_generated.go", @@ -30,22 +31,25 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/meta/metatypes:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/api/unversioned:go_default_library", "//pkg/apis/extensions:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", "//pkg/util:go_default_library", - "//pkg/util/intstr:go_default_library", "//pkg/util/parsers:go_default_library", - "//pkg/util/validation/field:go_default_library", - "//pkg/watch/versioned:go_default_library", "//vendor:github.com/gogo/protobuf/proto", "//vendor:github.com/gogo/protobuf/sortkeys", "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/api/meta", + "//vendor:k8s.io/apimachinery/pkg/api/resource", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/labels", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/selection", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/util/intstr", + "//vendor:k8s.io/apimachinery/pkg/util/rand", + "//vendor:k8s.io/apimachinery/pkg/util/sets", + "//vendor:k8s.io/apimachinery/pkg/util/validation/field", ], ) @@ -59,14 +63,51 @@ go_test( tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/resource:go_default_library", + "//pkg/api/install:go_default_library", "//pkg/api/testing/compat:go_default_library", - "//pkg/api/unversioned:go_default_library", "//pkg/api/v1:go_default_library", "//pkg/api/validation:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/diff:go_default_library", - "//pkg/util/intstr:go_default_library", - "//pkg/util/validation/field:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/api/equality", + "//vendor:k8s.io/apimachinery/pkg/api/resource", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/util/diff", + "//vendor:k8s.io/apimachinery/pkg/util/intstr", + "//vendor:k8s.io/apimachinery/pkg/util/validation/field", ], ) + +go_test( + name = "go_default_test", + srcs = [ + "helpers_test.go", + "resource_helpers_test.go", + ], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//vendor:k8s.io/apimachinery/pkg/api/equality", + "//vendor:k8s.io/apimachinery/pkg/api/resource", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/labels", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/api/v1/endpoints:all-srcs", + "//pkg/api/v1/pod:all-srcs", + "//pkg/api/v1/service:all-srcs", + "//pkg/api/v1/validation:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/OWNERS b/vendor/k8s.io/kubernetes/pkg/api/v1/OWNERS new file mode 100755 index 0000000000..fdb84b24a9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/OWNERS @@ -0,0 +1,41 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- yujuhong +- brendandburns +- derekwaynecarr +- caesarxuchao +- vishh +- mikedanese +- liggitt +- nikhiljindal +- bprashanth +- gmarek +- erictune +- davidopp +- pmorie +- sttts +- kargakis +- dchen1107 +- saad-ali +- zmerlynn +- luxas +- janetkuo +- justinsb +- roberthbailey +- ncdc +- timstclair +- eparis +- timothysc +- piosz +- jsafrane +- dims +- errordeveloper +- madhusudancs +- krousey +- jayunit100 +- rootfs +- markturansky diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go index 160b063b01..6784efcae4 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go @@ -21,23 +21,12 @@ import ( "fmt" "reflect" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/watch/versioned" -) - -const ( - // Annotation key used to identify mirror pods. - mirrorAnnotationKey = "kubernetes.io/config.mirror" - - // Value used to identify mirror pods from pre-v1.1 kubelet. - mirrorAnnotationValue_1_0 = "mirror" - - // annotation key prefix used to identify non-convertible json paths. - NonConvertibleAnnotationPrefix = "kubernetes.io/non-convertible" ) // This is a "fast-path" that avoids reflection for common types. It focuses on the objects that are @@ -123,15 +112,15 @@ func addFastPathConversionFuncs(scheme *runtime.Scheme) error { return true, Convert_api_Endpoints_To_v1_Endpoints(a, b, s) } - case *versioned.Event: + case *metav1.WatchEvent: switch b := objB.(type) { - case *versioned.InternalEvent: - return true, versioned.Convert_versioned_Event_to_versioned_InternalEvent(a, b, s) + case *metav1.InternalEvent: + return true, metav1.Convert_versioned_Event_to_versioned_InternalEvent(a, b, s) } - case *versioned.InternalEvent: + case *metav1.InternalEvent: switch b := objB.(type) { - case *versioned.Event: - return true, versioned.Convert_versioned_InternalEvent_to_versioned_Event(a, b, s) + case *metav1.WatchEvent: + return true, metav1.Convert_versioned_InternalEvent_to_versioned_Event(a, b, s) } } return false, nil @@ -269,9 +258,7 @@ func addConversionFuncs(scheme *runtime.Scheme) error { } func Convert_v1_ReplicationController_to_extensions_ReplicaSet(in *ReplicationController, out *extensions.ReplicaSet, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -284,7 +271,7 @@ func Convert_v1_ReplicationController_to_extensions_ReplicaSet(in *ReplicationCo func Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec(in *ReplicationControllerSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { out.Replicas = *in.Replicas if in.Selector != nil { - api.Convert_map_to_unversioned_LabelSelector(&in.Selector, out.Selector, s) + metav1.Convert_map_to_unversioned_LabelSelector(&in.Selector, out.Selector, s) } if in.Template != nil { if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, &out.Template, s); err != nil { @@ -304,9 +291,7 @@ func Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus(in *R } func Convert_extensions_ReplicaSet_to_v1_ReplicationController(in *extensions.ReplicaSet, out *ReplicationController, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { fieldErr, ok := err.(*field.Error) if !ok { @@ -315,7 +300,7 @@ func Convert_extensions_ReplicaSet_to_v1_ReplicationController(in *extensions.Re if out.Annotations == nil { out.Annotations = make(map[string]string) } - out.Annotations[NonConvertibleAnnotationPrefix+"/"+fieldErr.Field] = reflect.ValueOf(fieldErr.BadValue).String() + out.Annotations[api.NonConvertibleAnnotationPrefix+"/"+fieldErr.Field] = reflect.ValueOf(fieldErr.BadValue).String() } if err := Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { return err @@ -329,7 +314,7 @@ func Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec(in *exten out.MinReadySeconds = in.MinReadySeconds var invalidErr error if in.Selector != nil { - invalidErr = api.Convert_unversioned_LabelSelector_to_map(in.Selector, &out.Selector, s) + invalidErr = metav1.Convert_unversioned_LabelSelector_to_map(in.Selector, &out.Selector, s) } out.Template = new(PodTemplateSpec) if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, out.Template, s); err != nil { @@ -493,6 +478,12 @@ func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out // taking responsibility to ensure mutation of in is not exposed // back to the caller. in.Spec.InitContainers = values + + // Call defaulters explicitly until annotations are removed + for i := range in.Spec.InitContainers { + c := &in.Spec.InitContainers[i] + SetDefaults_Container(c) + } } if err := autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s); err != nil { @@ -588,17 +579,6 @@ func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error out.Annotations[PodInitContainerStatusesBetaAnnotationKey] = string(value) } - // We need to reset certain fields for mirror pods from pre-v1.1 kubelet - // (#15960). - // TODO: Remove this code after we drop support for v1.0 kubelets. - if value, ok := in.Annotations[mirrorAnnotationKey]; ok && value == mirrorAnnotationValue_1_0 { - // Reset the TerminationGracePeriodSeconds. - out.Spec.TerminationGracePeriodSeconds = nil - // Reset the resource requests. - for i := range out.Spec.Containers { - out.Spec.Containers[i].Resources.Requests = nil - } - } return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go index c1ea75c445..9146ce51d4 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go @@ -17,9 +17,9 @@ limitations under the License. package v1 import ( - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/kubernetes/pkg/util" - "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/parsers" ) @@ -39,6 +39,7 @@ func addDefaultingFuncs(scheme *runtime.Scheme) error { SetDefaults_SecretVolumeSource, SetDefaults_ConfigMapVolumeSource, SetDefaults_DownwardAPIVolumeSource, + SetDefaults_ProjectedVolumeSource, SetDefaults_Secret, SetDefaults_PersistentVolume, SetDefaults_PersistentVolumeClaim, @@ -112,7 +113,6 @@ func SetDefaults_Container(obj *Container) { _, tag, _, _ := parsers.ParseImageName(obj.Image) // Check image tag - if tag == "latest" { obj.ImagePullPolicy = PullAlways } else { @@ -122,6 +122,9 @@ func SetDefaults_Container(obj *Container) { if obj.TerminationMessagePath == "" { obj.TerminationMessagePath = TerminationMessagePathDefault } + if obj.TerminationMessagePolicy == "" { + obj.TerminationMessagePolicy = TerminationMessageReadFile + } } func SetDefaults_ServiceSpec(obj *ServiceSpec) { if obj.SessionAffinity == "" { @@ -157,6 +160,18 @@ func SetDefaults_Pod(obj *Pod) { } } } + for i := range obj.Spec.InitContainers { + if obj.Spec.InitContainers[i].Resources.Limits != nil { + if obj.Spec.InitContainers[i].Resources.Requests == nil { + obj.Spec.InitContainers[i].Resources.Requests = make(ResourceList) + } + for key, value := range obj.Spec.InitContainers[i].Resources.Limits { + if _, exists := obj.Spec.InitContainers[i].Resources.Requests[key]; !exists { + obj.Spec.InitContainers[i].Resources.Requests[key] = *(value.Copy()) + } + } + } + } } func SetDefaults_PodSpec(obj *PodSpec) { if obj.DNSPolicy == "" { @@ -167,6 +182,7 @@ func SetDefaults_PodSpec(obj *PodSpec) { } if obj.HostNetwork { defaultHostNetworkPorts(&obj.Containers) + defaultHostNetworkPorts(&obj.InitContainers) } if obj.SecurityContext == nil { obj.SecurityContext = &PodSecurityContext{} @@ -175,6 +191,9 @@ func SetDefaults_PodSpec(obj *PodSpec) { period := int64(DefaultTerminationGracePeriodSeconds) obj.TerminationGracePeriodSeconds = &period } + if obj.SchedulerName == "" { + obj.SchedulerName = DefaultSchedulerName + } } func SetDefaults_Probe(obj *Probe) { if obj.TimeoutSeconds == 0 { @@ -213,6 +232,12 @@ func SetDefaults_Secret(obj *Secret) { obj.Type = SecretTypeOpaque } } +func SetDefaults_ProjectedVolumeSource(obj *ProjectedVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(ProjectedVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} func SetDefaults_PersistentVolume(obj *PersistentVolume) { if obj.Status.Phase == "" { obj.Status.Phase = VolumePending @@ -347,3 +372,18 @@ func SetDefaults_RBDVolumeSource(obj *RBDVolumeSource) { obj.Keyring = "/etc/ceph/keyring" } } + +func SetDefaults_ScaleIOVolumeSource(obj *ScaleIOVolumeSource) { + if obj.ProtectionDomain == "" { + obj.ProtectionDomain = "default" + } + if obj.StoragePool == "" { + obj.StoragePool = "default" + } + if obj.StorageMode == "" { + obj.StorageMode = "ThinProvisioned" + } + if obj.FSType == "" { + obj.FSType = "xfs" + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/generate.go b/vendor/k8s.io/kubernetes/pkg/api/v1/generate.go new file mode 100644 index 0000000000..b8c44e4c7f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/generate.go @@ -0,0 +1,64 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + utilrand "k8s.io/apimachinery/pkg/util/rand" +) + +// NameGenerator generates names for objects. Some backends may have more information +// available to guide selection of new names and this interface hides those details. +type NameGenerator interface { + // GenerateName generates a valid name from the base name, adding a random suffix to the + // the base. If base is valid, the returned name must also be valid. The generator is + // responsible for knowing the maximum valid name length. + GenerateName(base string) string +} + +// GenerateName will resolve the object name of the provided ObjectMeta to a generated version if +// necessary. It expects that validation for ObjectMeta has already completed (that Base is a +// valid name) and that the NameGenerator generates a name that is also valid. +func GenerateName(u NameGenerator, meta *ObjectMeta) { + if len(meta.GenerateName) == 0 || len(meta.Name) != 0 { + return + } + meta.Name = u.GenerateName(meta.GenerateName) +} + +// simpleNameGenerator generates random names. +type simpleNameGenerator struct{} + +// SimpleNameGenerator is a generator that returns the name plus a random suffix of five alphanumerics +// when a name is requested. The string is guaranteed to not exceed the length of a standard Kubernetes +// name (63 characters) +var SimpleNameGenerator NameGenerator = simpleNameGenerator{} + +const ( + // TODO: make this flexible for non-core resources with alternate naming rules. + maxNameLength = 63 + randomLength = 5 + maxGeneratedNameLength = maxNameLength - randomLength +) + +func (simpleNameGenerator) GenerateName(base string) string { + if len(base) > maxGeneratedNameLength { + base = base[:maxGeneratedNameLength] + } + return fmt.Sprintf("%s%s", base, utilrand.String(randomLength)) +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/api/v1/generated.pb.go index 109c0c1bd8..d9638bd1b7 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/generated.pb.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -39,8 +39,10 @@ limitations under the License. ComponentStatus ComponentStatusList ConfigMap + ConfigMapEnvSource ConfigMapKeySelector ConfigMapList + ConfigMapProjection ConfigMapVolumeSource Container ContainerImage @@ -52,6 +54,7 @@ limitations under the License. ContainerStatus DaemonEndpoint DeleteOptions + DownwardAPIProjection DownwardAPIVolumeFile DownwardAPIVolumeSource EmptyDirVolumeSource @@ -60,13 +63,13 @@ limitations under the License. EndpointSubset Endpoints EndpointsList + EnvFromSource EnvVar EnvVarSource Event EventList EventSource ExecAction - ExportOptions FCVolumeSource FlexVolumeSource FlockerVolumeSource @@ -101,6 +104,7 @@ limitations under the License. NodeDaemonEndpoints NodeList NodeProxyOptions + NodeResources NodeSelector NodeSelectorRequirement NodeSelectorTerm @@ -110,7 +114,6 @@ limitations under the License. ObjectFieldSelector ObjectMeta ObjectReference - OwnerReference PersistentVolume PersistentVolumeClaim PersistentVolumeClaimList @@ -131,6 +134,7 @@ limitations under the License. PodExecOptions PodList PodLogOptions + PodPortForwardOptions PodProxyOptions PodSecurityContext PodSignature @@ -140,10 +144,12 @@ limitations under the License. PodTemplate PodTemplateList PodTemplateSpec + PortworxVolumeSource Preconditions PreferAvoidPodsEntry PreferredSchedulingTerm Probe + ProjectedVolumeSource QuobyteVolumeSource RBDVolumeSource RangeAllocation @@ -159,9 +165,12 @@ limitations under the License. ResourceQuotaStatus ResourceRequirements SELinuxOptions + ScaleIOVolumeSource Secret + SecretEnvSource SecretKeySelector SecretList + SecretProjection SecretVolumeSource SecurityContext SerializedReference @@ -173,11 +182,13 @@ limitations under the License. ServiceProxyOptions ServiceSpec ServiceStatus + Sysctl TCPSocketAction Taint Toleration Volume VolumeMount + VolumeProjection VolumeSource VsphereVirtualDiskVolumeSource WeightedPodAffinityTerm @@ -188,11 +199,11 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_api_resource "k8s.io/kubernetes/pkg/api/resource" -import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" -import k8s_io_kubernetes_pkg_runtime "k8s.io/kubernetes/pkg/runtime" +import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" -import k8s_io_kubernetes_pkg_types "k8s.io/kubernetes/pkg/types" +import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" import strings "strings" import reflect "reflect" @@ -267,604 +278,648 @@ func (m *ConfigMap) Reset() { *m = ConfigMap{} } func (*ConfigMap) ProtoMessage() {} func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } +func (*ConfigMapEnvSource) ProtoMessage() {} +func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } func (*ConfigMapKeySelector) ProtoMessage() {} -func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } func (*ConfigMapList) ProtoMessage() {} -func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + +func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } +func (*ConfigMapProjection) ProtoMessage() {} +func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } func (*ConfigMapVolumeSource) ProtoMessage() {} -func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *Container) Reset() { *m = Container{} } func (*Container) ProtoMessage() {} -func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *ContainerImage) Reset() { *m = ContainerImage{} } func (*ContainerImage) ProtoMessage() {} -func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *ContainerPort) Reset() { *m = ContainerPort{} } func (*ContainerPort) ProtoMessage() {} -func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} -func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} -func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{22} + return fileDescriptorGenerated, []int{24} } func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} -func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} -func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} -func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } func (*DeleteOptions) ProtoMessage() {} -func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } + +func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } +func (*DownwardAPIProjection) ProtoMessage() {} +func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } func (*DownwardAPIVolumeFile) ProtoMessage() {} -func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } func (*DownwardAPIVolumeSource) ProtoMessage() {} func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{28} + return fileDescriptorGenerated, []int{31} } func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } func (*EmptyDirVolumeSource) ProtoMessage() {} -func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } func (*EndpointAddress) ProtoMessage() {} -func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} -func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } func (*EndpointSubset) ProtoMessage() {} -func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } func (m *Endpoints) Reset() { *m = Endpoints{} } func (*Endpoints) ProtoMessage() {} -func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func (m *EndpointsList) Reset() { *m = EndpointsList{} } func (*EndpointsList) ProtoMessage() {} -func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } + +func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } +func (*EnvFromSource) ProtoMessage() {} +func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *EnvVar) Reset() { *m = EnvVar{} } func (*EnvVar) ProtoMessage() {} -func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } func (*EnvVarSource) ProtoMessage() {} -func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} -func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } func (m *ExecAction) Reset() { *m = ExecAction{} } func (*ExecAction) ProtoMessage() {} -func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } - -func (m *ExportOptions) Reset() { *m = ExportOptions{} } -func (*ExportOptions) ProtoMessage() {} -func (*ExportOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} -func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} -func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} -func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{45} + return fileDescriptorGenerated, []int{48} } func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} -func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} -func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} -func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } +func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} -func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } func (m *Handler) Reset() { *m = Handler{} } func (*Handler) ProtoMessage() {} -func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} -func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} -func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} -func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } +func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} -func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} -func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} -func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } +func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} -func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } +func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} -func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } func (m *ListOptions) Reset() { *m = ListOptions{} } func (*ListOptions) ProtoMessage() {} -func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } +func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} -func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} -func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} -func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } +func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} -func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} -func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } +func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} -func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } +func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} -func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } +func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} -func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } +func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} -func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } +func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} -func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } +func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} -func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } +func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} -func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} -func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } +func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} -func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } + +func (m *NodeResources) Reset() { *m = NodeResources{} } +func (*NodeResources) ProtoMessage() {} +func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} -func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } +func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{77} + return fileDescriptorGenerated, []int{81} } func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} -func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} -func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } +func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} -func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} -func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} -func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } func (*ObjectMeta) ProtoMessage() {} -func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } +func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} -func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } - -func (m *OwnerReference) Reset() { *m = OwnerReference{} } -func (*OwnerReference) ProtoMessage() {} -func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } +func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} -func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } +func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} -func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{88} + return fileDescriptorGenerated, []int{91} } func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{89} + return fileDescriptorGenerated, []int{92} } func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{90} + return fileDescriptorGenerated, []int{93} } func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{91} + return fileDescriptorGenerated, []int{94} } func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} -func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} } +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} -func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } +func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} -func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} -func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } +func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{96} + return fileDescriptorGenerated, []int{99} } func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} -func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } +func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} -func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } +func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{101} } func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} -func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{102} } func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} -func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{103} } func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} -func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{101} } +func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{104} } func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} -func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{102} } +func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{105} } func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} -func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{103} } +func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{106} } func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} -func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{104} } +func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{107} } func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} -func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{105} } +func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } + +func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } +func (*PodPortForwardOptions) ProtoMessage() {} +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{109} } func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} -func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{106} } +func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} -func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{107} } +func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} -func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } +func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} -func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{109} } +func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} -func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } +func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} -func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } +func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} -func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } +func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} -func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } +func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} -func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } + +func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } +func (*PortworxVolumeSource) ProtoMessage() {} +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} -func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{117} + return fileDescriptorGenerated, []int{122} } func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} -func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } +func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } + +func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } +func (*ProjectedVolumeSource) ProtoMessage() {} +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} -func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} -func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} -func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } +func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} -func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } +func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{123} + return fileDescriptorGenerated, []int{129} } func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{124} + return fileDescriptorGenerated, []int{130} } func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{125} + return fileDescriptorGenerated, []int{131} } func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{126} + return fileDescriptorGenerated, []int{132} } func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} -func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} -func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } +func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} -func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} -func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} -func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} -func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } +func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{138} } func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} -func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } +func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } + +func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } +func (*ScaleIOVolumeSource) ProtoMessage() {} +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} -func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } + +func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } +func (*SecretEnvSource) ProtoMessage() {} +func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} -func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } +func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} -func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } +func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{144} } + +func (m *SecretProjection) Reset() { *m = SecretProjection{} } +func (*SecretProjection) ProtoMessage() {} +func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{145} } func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} -func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{146} } func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} -func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{138} } +func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{147} } func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} -func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } +func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} -func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} -func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } +func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} -func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } +func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} -func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } +func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} -func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{144} } +func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} -func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{145} } +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} -func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{146} } +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} -func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{147} } +func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } + +func (m *Sysctl) Reset() { *m = Sysctl{} } +func (*Sysctl) ProtoMessage() {} +func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} -func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } +func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} -func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } +func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} -func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } +func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} -func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } +func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } + +func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } +func (*VolumeProjection) ProtoMessage() {} +func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} -func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } +func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{154} + return fileDescriptorGenerated, []int{165} } func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{155} + return fileDescriptorGenerated, []int{166} } func init() { @@ -882,8 +937,10 @@ func init() { proto.RegisterType((*ComponentStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.ComponentStatus") proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.kubernetes.pkg.api.v1.ComponentStatusList") proto.RegisterType((*ConfigMap)(nil), "k8s.io.kubernetes.pkg.api.v1.ConfigMap") + proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.kubernetes.pkg.api.v1.ConfigMapEnvSource") proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.kubernetes.pkg.api.v1.ConfigMapKeySelector") proto.RegisterType((*ConfigMapList)(nil), "k8s.io.kubernetes.pkg.api.v1.ConfigMapList") + proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.kubernetes.pkg.api.v1.ConfigMapProjection") proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.ConfigMapVolumeSource") proto.RegisterType((*Container)(nil), "k8s.io.kubernetes.pkg.api.v1.Container") proto.RegisterType((*ContainerImage)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerImage") @@ -895,6 +952,7 @@ func init() { proto.RegisterType((*ContainerStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerStatus") proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.kubernetes.pkg.api.v1.DaemonEndpoint") proto.RegisterType((*DeleteOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.DeleteOptions") + proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.kubernetes.pkg.api.v1.DownwardAPIProjection") proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.kubernetes.pkg.api.v1.DownwardAPIVolumeFile") proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.DownwardAPIVolumeSource") proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.EmptyDirVolumeSource") @@ -903,13 +961,13 @@ func init() { proto.RegisterType((*EndpointSubset)(nil), "k8s.io.kubernetes.pkg.api.v1.EndpointSubset") proto.RegisterType((*Endpoints)(nil), "k8s.io.kubernetes.pkg.api.v1.Endpoints") proto.RegisterType((*EndpointsList)(nil), "k8s.io.kubernetes.pkg.api.v1.EndpointsList") + proto.RegisterType((*EnvFromSource)(nil), "k8s.io.kubernetes.pkg.api.v1.EnvFromSource") proto.RegisterType((*EnvVar)(nil), "k8s.io.kubernetes.pkg.api.v1.EnvVar") proto.RegisterType((*EnvVarSource)(nil), "k8s.io.kubernetes.pkg.api.v1.EnvVarSource") proto.RegisterType((*Event)(nil), "k8s.io.kubernetes.pkg.api.v1.Event") proto.RegisterType((*EventList)(nil), "k8s.io.kubernetes.pkg.api.v1.EventList") proto.RegisterType((*EventSource)(nil), "k8s.io.kubernetes.pkg.api.v1.EventSource") proto.RegisterType((*ExecAction)(nil), "k8s.io.kubernetes.pkg.api.v1.ExecAction") - proto.RegisterType((*ExportOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.ExportOptions") proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.FCVolumeSource") proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.FlexVolumeSource") proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.FlockerVolumeSource") @@ -944,6 +1002,7 @@ func init() { proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeDaemonEndpoints") proto.RegisterType((*NodeList)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeList") proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeProxyOptions") + proto.RegisterType((*NodeResources)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeResources") proto.RegisterType((*NodeSelector)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeSelector") proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeSelectorRequirement") proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeSelectorTerm") @@ -953,7 +1012,6 @@ func init() { proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.kubernetes.pkg.api.v1.ObjectFieldSelector") proto.RegisterType((*ObjectMeta)(nil), "k8s.io.kubernetes.pkg.api.v1.ObjectMeta") proto.RegisterType((*ObjectReference)(nil), "k8s.io.kubernetes.pkg.api.v1.ObjectReference") - proto.RegisterType((*OwnerReference)(nil), "k8s.io.kubernetes.pkg.api.v1.OwnerReference") proto.RegisterType((*PersistentVolume)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolume") proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaim") proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaimList") @@ -974,6 +1032,7 @@ func init() { proto.RegisterType((*PodExecOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.PodExecOptions") proto.RegisterType((*PodList)(nil), "k8s.io.kubernetes.pkg.api.v1.PodList") proto.RegisterType((*PodLogOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.PodLogOptions") + proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.PodPortForwardOptions") proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.PodProxyOptions") proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.kubernetes.pkg.api.v1.PodSecurityContext") proto.RegisterType((*PodSignature)(nil), "k8s.io.kubernetes.pkg.api.v1.PodSignature") @@ -983,10 +1042,12 @@ func init() { proto.RegisterType((*PodTemplate)(nil), "k8s.io.kubernetes.pkg.api.v1.PodTemplate") proto.RegisterType((*PodTemplateList)(nil), "k8s.io.kubernetes.pkg.api.v1.PodTemplateList") proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec") + proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.PortworxVolumeSource") proto.RegisterType((*Preconditions)(nil), "k8s.io.kubernetes.pkg.api.v1.Preconditions") proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.kubernetes.pkg.api.v1.PreferAvoidPodsEntry") proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.kubernetes.pkg.api.v1.PreferredSchedulingTerm") proto.RegisterType((*Probe)(nil), "k8s.io.kubernetes.pkg.api.v1.Probe") + proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.ProjectedVolumeSource") proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.QuobyteVolumeSource") proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.RBDVolumeSource") proto.RegisterType((*RangeAllocation)(nil), "k8s.io.kubernetes.pkg.api.v1.RangeAllocation") @@ -1002,9 +1063,12 @@ func init() { proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceQuotaStatus") proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceRequirements") proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.SELinuxOptions") + proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.ScaleIOVolumeSource") proto.RegisterType((*Secret)(nil), "k8s.io.kubernetes.pkg.api.v1.Secret") + proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.kubernetes.pkg.api.v1.SecretEnvSource") proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.kubernetes.pkg.api.v1.SecretKeySelector") proto.RegisterType((*SecretList)(nil), "k8s.io.kubernetes.pkg.api.v1.SecretList") + proto.RegisterType((*SecretProjection)(nil), "k8s.io.kubernetes.pkg.api.v1.SecretProjection") proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.SecretVolumeSource") proto.RegisterType((*SecurityContext)(nil), "k8s.io.kubernetes.pkg.api.v1.SecurityContext") proto.RegisterType((*SerializedReference)(nil), "k8s.io.kubernetes.pkg.api.v1.SerializedReference") @@ -1016,11 +1080,13 @@ func init() { proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceProxyOptions") proto.RegisterType((*ServiceSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceSpec") proto.RegisterType((*ServiceStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceStatus") + proto.RegisterType((*Sysctl)(nil), "k8s.io.kubernetes.pkg.api.v1.Sysctl") proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.kubernetes.pkg.api.v1.TCPSocketAction") proto.RegisterType((*Taint)(nil), "k8s.io.kubernetes.pkg.api.v1.Taint") proto.RegisterType((*Toleration)(nil), "k8s.io.kubernetes.pkg.api.v1.Toleration") proto.RegisterType((*Volume)(nil), "k8s.io.kubernetes.pkg.api.v1.Volume") proto.RegisterType((*VolumeMount)(nil), "k8s.io.kubernetes.pkg.api.v1.VolumeMount") + proto.RegisterType((*VolumeProjection)(nil), "k8s.io.kubernetes.pkg.api.v1.VolumeProjection") proto.RegisterType((*VolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.VolumeSource") proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.VsphereVirtualDiskVolumeSource") proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.kubernetes.pkg.api.v1.WeightedPodAffinityTerm") @@ -1580,7 +1646,7 @@ func (m *ConfigMap) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *ConfigMapKeySelector) Marshal() (data []byte, err error) { +func (m *ConfigMapEnvSource) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1590,7 +1656,7 @@ func (m *ConfigMapKeySelector) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *ConfigMapKeySelector) MarshalTo(data []byte) (int, error) { +func (m *ConfigMapEnvSource) MarshalTo(data []byte) (int, error) { var i int _ = i var l int @@ -1603,10 +1669,56 @@ func (m *ConfigMapKeySelector) MarshalTo(data []byte) (int, error) { return 0, err } i += n10 + if m.Optional != nil { + data[i] = 0x10 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ConfigMapKeySelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConfigMapKeySelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n11, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(len(m.Key))) i += copy(data[i:], m.Key) + if m.Optional != nil { + data[i] = 0x18 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } return i, nil } @@ -1628,11 +1740,11 @@ func (m *ConfigMapList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(data[i:]) + n12, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n11 + i += n12 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -1648,6 +1760,54 @@ func (m *ConfigMapList) MarshalTo(data []byte) (int, error) { return i, nil } +func (m *ConfigMapProjection) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConfigMapProjection) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n13, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Optional != nil { + data[i] = 0x20 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + func (m *ConfigMapVolumeSource) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -1666,11 +1826,11 @@ func (m *ConfigMapVolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) - n12, err := m.LocalObjectReference.MarshalTo(data[i:]) + n14, err := m.LocalObjectReference.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n12 + i += n14 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -1688,6 +1848,16 @@ func (m *ConfigMapVolumeSource) MarshalTo(data []byte) (int, error) { i++ i = encodeVarintGenerated(data, i, uint64(*m.DefaultMode)) } + if m.Optional != nil { + data[i] = 0x20 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } return i, nil } @@ -1775,11 +1945,11 @@ func (m *Container) MarshalTo(data []byte) (int, error) { data[i] = 0x42 i++ i = encodeVarintGenerated(data, i, uint64(m.Resources.Size())) - n13, err := m.Resources.MarshalTo(data[i:]) + n15, err := m.Resources.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n13 + i += n15 if len(m.VolumeMounts) > 0 { for _, msg := range m.VolumeMounts { data[i] = 0x4a @@ -1796,31 +1966,31 @@ func (m *Container) MarshalTo(data []byte) (int, error) { data[i] = 0x52 i++ i = encodeVarintGenerated(data, i, uint64(m.LivenessProbe.Size())) - n14, err := m.LivenessProbe.MarshalTo(data[i:]) + n16, err := m.LivenessProbe.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n14 + i += n16 } if m.ReadinessProbe != nil { data[i] = 0x5a i++ i = encodeVarintGenerated(data, i, uint64(m.ReadinessProbe.Size())) - n15, err := m.ReadinessProbe.MarshalTo(data[i:]) + n17, err := m.ReadinessProbe.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n15 + i += n17 } if m.Lifecycle != nil { data[i] = 0x62 i++ i = encodeVarintGenerated(data, i, uint64(m.Lifecycle.Size())) - n16, err := m.Lifecycle.MarshalTo(data[i:]) + n18, err := m.Lifecycle.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n16 + i += n18 } data[i] = 0x6a i++ @@ -1834,11 +2004,11 @@ func (m *Container) MarshalTo(data []byte) (int, error) { data[i] = 0x7a i++ i = encodeVarintGenerated(data, i, uint64(m.SecurityContext.Size())) - n17, err := m.SecurityContext.MarshalTo(data[i:]) + n19, err := m.SecurityContext.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n17 + i += n19 } data[i] = 0x80 i++ @@ -1870,6 +2040,26 @@ func (m *Container) MarshalTo(data []byte) (int, error) { data[i] = 0 } i++ + if len(m.EnvFrom) > 0 { + for _, msg := range m.EnvFrom { + data[i] = 0x9a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0xa2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.TerminationMessagePolicy))) + i += copy(data[i:], m.TerminationMessagePolicy) return i, nil } @@ -1964,31 +2154,31 @@ func (m *ContainerState) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.Waiting.Size())) - n18, err := m.Waiting.MarshalTo(data[i:]) + n20, err := m.Waiting.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n18 + i += n20 } if m.Running != nil { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Running.Size())) - n19, err := m.Running.MarshalTo(data[i:]) + n21, err := m.Running.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n19 + i += n21 } if m.Terminated != nil { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Terminated.Size())) - n20, err := m.Terminated.MarshalTo(data[i:]) + n22, err := m.Terminated.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n20 + i += n22 } return i, nil } @@ -2011,11 +2201,11 @@ func (m *ContainerStateRunning) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.StartedAt.Size())) - n21, err := m.StartedAt.MarshalTo(data[i:]) + n23, err := m.StartedAt.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n21 + i += n23 return i, nil } @@ -2051,19 +2241,19 @@ func (m *ContainerStateTerminated) MarshalTo(data []byte) (int, error) { data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(m.StartedAt.Size())) - n22, err := m.StartedAt.MarshalTo(data[i:]) + n24, err := m.StartedAt.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n22 + i += n24 data[i] = 0x32 i++ i = encodeVarintGenerated(data, i, uint64(m.FinishedAt.Size())) - n23, err := m.FinishedAt.MarshalTo(data[i:]) + n25, err := m.FinishedAt.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n23 + i += n25 data[i] = 0x3a i++ i = encodeVarintGenerated(data, i, uint64(len(m.ContainerID))) @@ -2119,19 +2309,19 @@ func (m *ContainerStatus) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.State.Size())) - n24, err := m.State.MarshalTo(data[i:]) + n26, err := m.State.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n24 + i += n26 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.LastTerminationState.Size())) - n25, err := m.LastTerminationState.MarshalTo(data[i:]) + n27, err := m.LastTerminationState.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n25 + i += n27 data[i] = 0x20 i++ if m.Ready { @@ -2203,11 +2393,11 @@ func (m *DeleteOptions) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Preconditions.Size())) - n26, err := m.Preconditions.MarshalTo(data[i:]) + n28, err := m.Preconditions.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n26 + i += n28 } if m.OrphanDependents != nil { data[i] = 0x18 @@ -2219,6 +2409,42 @@ func (m *DeleteOptions) MarshalTo(data []byte) (int, error) { } i++ } + if m.PropagationPolicy != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.PropagationPolicy))) + i += copy(data[i:], *m.PropagationPolicy) + } + return i, nil +} + +func (m *DownwardAPIProjection) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DownwardAPIProjection) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -2245,21 +2471,21 @@ func (m *DownwardAPIVolumeFile) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.FieldRef.Size())) - n27, err := m.FieldRef.MarshalTo(data[i:]) + n29, err := m.FieldRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n27 + i += n29 } if m.ResourceFieldRef != nil { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.ResourceFieldRef.Size())) - n28, err := m.ResourceFieldRef.MarshalTo(data[i:]) + n30, err := m.ResourceFieldRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n28 + i += n30 } if m.Mode != nil { data[i] = 0x20 @@ -2349,11 +2575,11 @@ func (m *EndpointAddress) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.TargetRef.Size())) - n29, err := m.TargetRef.MarshalTo(data[i:]) + n31, err := m.TargetRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n29 + i += n31 } data[i] = 0x1a i++ @@ -2469,11 +2695,11 @@ func (m *Endpoints) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n30, err := m.ObjectMeta.MarshalTo(data[i:]) + n32, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n30 + i += n32 if len(m.Subsets) > 0 { for _, msg := range m.Subsets { data[i] = 0x12 @@ -2507,11 +2733,11 @@ func (m *EndpointsList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n31, err := m.ListMeta.MarshalTo(data[i:]) + n33, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n31 + i += n33 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -2527,6 +2753,48 @@ func (m *EndpointsList) MarshalTo(data []byte) (int, error) { return i, nil } +func (m *EnvFromSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EnvFromSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Prefix))) + i += copy(data[i:], m.Prefix) + if m.ConfigMapRef != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ConfigMapRef.Size())) + n34, err := m.ConfigMapRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n34 + } + if m.SecretRef != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) + n35, err := m.SecretRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n35 + } + return i, nil +} + func (m *EnvVar) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -2554,11 +2822,11 @@ func (m *EnvVar) MarshalTo(data []byte) (int, error) { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.ValueFrom.Size())) - n32, err := m.ValueFrom.MarshalTo(data[i:]) + n36, err := m.ValueFrom.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n32 + i += n36 } return i, nil } @@ -2582,41 +2850,41 @@ func (m *EnvVarSource) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.FieldRef.Size())) - n33, err := m.FieldRef.MarshalTo(data[i:]) + n37, err := m.FieldRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n33 + i += n37 } if m.ResourceFieldRef != nil { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.ResourceFieldRef.Size())) - n34, err := m.ResourceFieldRef.MarshalTo(data[i:]) + n38, err := m.ResourceFieldRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n34 + i += n38 } if m.ConfigMapKeyRef != nil { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.ConfigMapKeyRef.Size())) - n35, err := m.ConfigMapKeyRef.MarshalTo(data[i:]) + n39, err := m.ConfigMapKeyRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n35 + i += n39 } if m.SecretKeyRef != nil { data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(m.SecretKeyRef.Size())) - n36, err := m.SecretKeyRef.MarshalTo(data[i:]) + n40, err := m.SecretKeyRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n36 + i += n40 } return i, nil } @@ -2639,19 +2907,19 @@ func (m *Event) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n37, err := m.ObjectMeta.MarshalTo(data[i:]) + n41, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n37 + i += n41 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.InvolvedObject.Size())) - n38, err := m.InvolvedObject.MarshalTo(data[i:]) + n42, err := m.InvolvedObject.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n38 + i += n42 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) @@ -2663,27 +2931,27 @@ func (m *Event) MarshalTo(data []byte) (int, error) { data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(m.Source.Size())) - n39, err := m.Source.MarshalTo(data[i:]) + n43, err := m.Source.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n39 + i += n43 data[i] = 0x32 i++ i = encodeVarintGenerated(data, i, uint64(m.FirstTimestamp.Size())) - n40, err := m.FirstTimestamp.MarshalTo(data[i:]) + n44, err := m.FirstTimestamp.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n40 + i += n44 data[i] = 0x3a i++ i = encodeVarintGenerated(data, i, uint64(m.LastTimestamp.Size())) - n41, err := m.LastTimestamp.MarshalTo(data[i:]) + n45, err := m.LastTimestamp.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n41 + i += n45 data[i] = 0x40 i++ i = encodeVarintGenerated(data, i, uint64(m.Count)) @@ -2712,11 +2980,11 @@ func (m *EventList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n42, err := m.ListMeta.MarshalTo(data[i:]) + n46, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n42 + i += n46 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -2791,40 +3059,6 @@ func (m *ExecAction) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *ExportOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ExportOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - if m.Export { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x10 - i++ - if m.Exact { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - func (m *FCVolumeSource) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -2902,11 +3136,11 @@ func (m *FlexVolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) - n43, err := m.SecretRef.MarshalTo(data[i:]) + n47, err := m.SecretRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n43 + i += n47 } data[i] = 0x20 i++ @@ -3085,11 +3319,11 @@ func (m *HTTPGetAction) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) - n44, err := m.Port.MarshalTo(data[i:]) + n48, err := m.Port.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n44 + i += n48 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(len(m.Host))) @@ -3158,31 +3392,31 @@ func (m *Handler) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.Exec.Size())) - n45, err := m.Exec.MarshalTo(data[i:]) + n49, err := m.Exec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n45 + i += n49 } if m.HTTPGet != nil { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.HTTPGet.Size())) - n46, err := m.HTTPGet.MarshalTo(data[i:]) + n50, err := m.HTTPGet.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n46 + i += n50 } if m.TCPSocket != nil { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.TCPSocket.Size())) - n47, err := m.TCPSocket.MarshalTo(data[i:]) + n51, err := m.TCPSocket.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n47 + i += n51 } return i, nil } @@ -3251,6 +3485,21 @@ func (m *ISCSIVolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0 } i++ + if len(m.Portals) > 0 { + for _, s := range m.Portals { + data[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } return i, nil } @@ -3304,21 +3553,21 @@ func (m *Lifecycle) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.PostStart.Size())) - n48, err := m.PostStart.MarshalTo(data[i:]) + n52, err := m.PostStart.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n48 + i += n52 } if m.PreStop != nil { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.PreStop.Size())) - n49, err := m.PreStop.MarshalTo(data[i:]) + n53, err := m.PreStop.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n49 + i += n53 } return i, nil } @@ -3341,19 +3590,19 @@ func (m *LimitRange) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n50, err := m.ObjectMeta.MarshalTo(data[i:]) + n54, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n50 + i += n54 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n51, err := m.Spec.MarshalTo(data[i:]) + n55, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n51 + i += n55 return i, nil } @@ -3391,11 +3640,11 @@ func (m *LimitRangeItem) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n52, err := (&v).MarshalTo(data[i:]) + n56, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n52 + i += n56 } } if len(m.Min) > 0 { @@ -3413,11 +3662,11 @@ func (m *LimitRangeItem) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n53, err := (&v).MarshalTo(data[i:]) + n57, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n53 + i += n57 } } if len(m.Default) > 0 { @@ -3435,11 +3684,11 @@ func (m *LimitRangeItem) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n54, err := (&v).MarshalTo(data[i:]) + n58, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n54 + i += n58 } } if len(m.DefaultRequest) > 0 { @@ -3457,11 +3706,11 @@ func (m *LimitRangeItem) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n55, err := (&v).MarshalTo(data[i:]) + n59, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n55 + i += n59 } } if len(m.MaxLimitRequestRatio) > 0 { @@ -3479,11 +3728,11 @@ func (m *LimitRangeItem) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n56, err := (&v).MarshalTo(data[i:]) + n60, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n56 + i += n60 } } return i, nil @@ -3507,11 +3756,11 @@ func (m *LimitRangeList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n57, err := m.ListMeta.MarshalTo(data[i:]) + n61, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n57 + i += n61 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -3575,11 +3824,11 @@ func (m *List) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n58, err := m.ListMeta.MarshalTo(data[i:]) + n62, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n58 + i += n62 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -3768,27 +4017,27 @@ func (m *Namespace) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n59, err := m.ObjectMeta.MarshalTo(data[i:]) + n63, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n59 + i += n63 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n60, err := m.Spec.MarshalTo(data[i:]) + n64, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n60 + i += n64 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n61, err := m.Status.MarshalTo(data[i:]) + n65, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n61 + i += n65 return i, nil } @@ -3810,11 +4059,11 @@ func (m *NamespaceList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n62, err := m.ListMeta.MarshalTo(data[i:]) + n66, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n62 + i += n66 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -3903,27 +4152,27 @@ func (m *Node) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n63, err := m.ObjectMeta.MarshalTo(data[i:]) + n67, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n63 + i += n67 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n64, err := m.Spec.MarshalTo(data[i:]) + n68, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n64 + i += n68 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n65, err := m.Status.MarshalTo(data[i:]) + n69, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n65 + i += n69 return i, nil } @@ -3972,11 +4221,11 @@ func (m *NodeAffinity) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) - n66, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(data[i:]) + n70, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n66 + i += n70 } if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { @@ -4019,19 +4268,19 @@ func (m *NodeCondition) MarshalTo(data []byte) (int, error) { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.LastHeartbeatTime.Size())) - n67, err := m.LastHeartbeatTime.MarshalTo(data[i:]) + n71, err := m.LastHeartbeatTime.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n67 + i += n71 data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n68, err := m.LastTransitionTime.MarshalTo(data[i:]) + n72, err := m.LastTransitionTime.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n68 + i += n72 data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) @@ -4061,11 +4310,11 @@ func (m *NodeDaemonEndpoints) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.KubeletEndpoint.Size())) - n69, err := m.KubeletEndpoint.MarshalTo(data[i:]) + n73, err := m.KubeletEndpoint.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n69 + i += n73 return i, nil } @@ -4087,11 +4336,11 @@ func (m *NodeList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n70, err := m.ListMeta.MarshalTo(data[i:]) + n74, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n70 + i += n74 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -4129,6 +4378,46 @@ func (m *NodeProxyOptions) MarshalTo(data []byte) (int, error) { return i, nil } +func (m *NodeResources) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeResources) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + for k := range m.Capacity { + data[i] = 0xa + i++ + v := m.Capacity[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n75, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n75 + } + } + return i, nil +} + func (m *NodeSelector) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -4265,6 +4554,18 @@ func (m *NodeSpec) MarshalTo(data []byte) (int, error) { data[i] = 0 } i++ + if len(m.Taints) > 0 { + for _, msg := range m.Taints { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -4298,11 +4599,11 @@ func (m *NodeStatus) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n71, err := (&v).MarshalTo(data[i:]) + n76, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n71 + i += n76 } } if len(m.Allocatable) > 0 { @@ -4320,11 +4621,11 @@ func (m *NodeStatus) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n72, err := (&v).MarshalTo(data[i:]) + n77, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n72 + i += n77 } } data[i] = 0x1a @@ -4358,19 +4659,19 @@ func (m *NodeStatus) MarshalTo(data []byte) (int, error) { data[i] = 0x32 i++ i = encodeVarintGenerated(data, i, uint64(m.DaemonEndpoints.Size())) - n73, err := m.DaemonEndpoints.MarshalTo(data[i:]) + n78, err := m.DaemonEndpoints.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n73 + i += n78 data[i] = 0x3a i++ i = encodeVarintGenerated(data, i, uint64(m.NodeInfo.Size())) - n74, err := m.NodeInfo.MarshalTo(data[i:]) + n79, err := m.NodeInfo.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n74 + i += n79 if len(m.Images) > 0 { for _, msg := range m.Images { data[i] = 0x42 @@ -4542,20 +4843,20 @@ func (m *ObjectMeta) MarshalTo(data []byte) (int, error) { data[i] = 0x42 i++ i = encodeVarintGenerated(data, i, uint64(m.CreationTimestamp.Size())) - n75, err := m.CreationTimestamp.MarshalTo(data[i:]) + n80, err := m.CreationTimestamp.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n75 + i += n80 if m.DeletionTimestamp != nil { data[i] = 0x4a i++ i = encodeVarintGenerated(data, i, uint64(m.DeletionTimestamp.Size())) - n76, err := m.DeletionTimestamp.MarshalTo(data[i:]) + n81, err := m.DeletionTimestamp.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n76 + i += n81 } if m.DeletionGracePeriodSeconds != nil { data[i] = 0x50 @@ -4676,50 +4977,6 @@ func (m *ObjectReference) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *OwnerReference) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *OwnerReference) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.UID))) - i += copy(data[i:], m.UID) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) - if m.Controller != nil { - data[i] = 0x30 - i++ - if *m.Controller { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - } - return i, nil -} - func (m *PersistentVolume) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -4738,27 +4995,27 @@ func (m *PersistentVolume) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n77, err := m.ObjectMeta.MarshalTo(data[i:]) + n82, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n77 + i += n82 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n78, err := m.Spec.MarshalTo(data[i:]) + n83, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n78 + i += n83 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n79, err := m.Status.MarshalTo(data[i:]) + n84, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n79 + i += n84 return i, nil } @@ -4780,27 +5037,27 @@ func (m *PersistentVolumeClaim) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n80, err := m.ObjectMeta.MarshalTo(data[i:]) + n85, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n80 + i += n85 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n81, err := m.Spec.MarshalTo(data[i:]) + n86, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n81 + i += n86 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n82, err := m.Status.MarshalTo(data[i:]) + n87, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n82 + i += n87 return i, nil } @@ -4822,11 +5079,11 @@ func (m *PersistentVolumeClaimList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n83, err := m.ListMeta.MarshalTo(data[i:]) + n88, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n83 + i += n88 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -4875,11 +5132,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Resources.Size())) - n84, err := m.Resources.MarshalTo(data[i:]) + n89, err := m.Resources.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n84 + i += n89 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(len(m.VolumeName))) @@ -4888,11 +5145,17 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(data []byte) (int, error) { data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n85, err := m.Selector.MarshalTo(data[i:]) + n90, err := m.Selector.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n85 + i += n90 + } + if m.StorageClassName != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.StorageClassName))) + i += copy(data[i:], *m.StorageClassName) } return i, nil } @@ -4946,11 +5209,11 @@ func (m *PersistentVolumeClaimStatus) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n86, err := (&v).MarshalTo(data[i:]) + n91, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n86 + i += n91 } } return i, nil @@ -5004,11 +5267,11 @@ func (m *PersistentVolumeList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n87, err := m.ListMeta.MarshalTo(data[i:]) + n92, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n87 + i += n92 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -5043,151 +5306,151 @@ func (m *PersistentVolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.GCEPersistentDisk.Size())) - n88, err := m.GCEPersistentDisk.MarshalTo(data[i:]) + n93, err := m.GCEPersistentDisk.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n88 + i += n93 } if m.AWSElasticBlockStore != nil { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.AWSElasticBlockStore.Size())) - n89, err := m.AWSElasticBlockStore.MarshalTo(data[i:]) + n94, err := m.AWSElasticBlockStore.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n89 + i += n94 } if m.HostPath != nil { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.HostPath.Size())) - n90, err := m.HostPath.MarshalTo(data[i:]) + n95, err := m.HostPath.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n90 + i += n95 } if m.Glusterfs != nil { data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(m.Glusterfs.Size())) - n91, err := m.Glusterfs.MarshalTo(data[i:]) + n96, err := m.Glusterfs.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n91 + i += n96 } if m.NFS != nil { data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(m.NFS.Size())) - n92, err := m.NFS.MarshalTo(data[i:]) + n97, err := m.NFS.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n92 + i += n97 } if m.RBD != nil { data[i] = 0x32 i++ i = encodeVarintGenerated(data, i, uint64(m.RBD.Size())) - n93, err := m.RBD.MarshalTo(data[i:]) + n98, err := m.RBD.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n93 + i += n98 } if m.ISCSI != nil { data[i] = 0x3a i++ i = encodeVarintGenerated(data, i, uint64(m.ISCSI.Size())) - n94, err := m.ISCSI.MarshalTo(data[i:]) + n99, err := m.ISCSI.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n94 + i += n99 } if m.Cinder != nil { data[i] = 0x42 i++ i = encodeVarintGenerated(data, i, uint64(m.Cinder.Size())) - n95, err := m.Cinder.MarshalTo(data[i:]) + n100, err := m.Cinder.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n95 + i += n100 } if m.CephFS != nil { data[i] = 0x4a i++ i = encodeVarintGenerated(data, i, uint64(m.CephFS.Size())) - n96, err := m.CephFS.MarshalTo(data[i:]) + n101, err := m.CephFS.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n96 + i += n101 } if m.FC != nil { data[i] = 0x52 i++ i = encodeVarintGenerated(data, i, uint64(m.FC.Size())) - n97, err := m.FC.MarshalTo(data[i:]) + n102, err := m.FC.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n97 + i += n102 } if m.Flocker != nil { data[i] = 0x5a i++ i = encodeVarintGenerated(data, i, uint64(m.Flocker.Size())) - n98, err := m.Flocker.MarshalTo(data[i:]) + n103, err := m.Flocker.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n98 + i += n103 } if m.FlexVolume != nil { data[i] = 0x62 i++ i = encodeVarintGenerated(data, i, uint64(m.FlexVolume.Size())) - n99, err := m.FlexVolume.MarshalTo(data[i:]) + n104, err := m.FlexVolume.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n99 + i += n104 } if m.AzureFile != nil { data[i] = 0x6a i++ i = encodeVarintGenerated(data, i, uint64(m.AzureFile.Size())) - n100, err := m.AzureFile.MarshalTo(data[i:]) + n105, err := m.AzureFile.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n100 + i += n105 } if m.VsphereVolume != nil { data[i] = 0x72 i++ i = encodeVarintGenerated(data, i, uint64(m.VsphereVolume.Size())) - n101, err := m.VsphereVolume.MarshalTo(data[i:]) + n106, err := m.VsphereVolume.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n101 + i += n106 } if m.Quobyte != nil { data[i] = 0x7a i++ i = encodeVarintGenerated(data, i, uint64(m.Quobyte.Size())) - n102, err := m.Quobyte.MarshalTo(data[i:]) + n107, err := m.Quobyte.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n102 + i += n107 } if m.AzureDisk != nil { data[i] = 0x82 @@ -5195,11 +5458,11 @@ func (m *PersistentVolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0x1 i++ i = encodeVarintGenerated(data, i, uint64(m.AzureDisk.Size())) - n103, err := m.AzureDisk.MarshalTo(data[i:]) + n108, err := m.AzureDisk.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n103 + i += n108 } if m.PhotonPersistentDisk != nil { data[i] = 0x8a @@ -5207,11 +5470,35 @@ func (m *PersistentVolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0x1 i++ i = encodeVarintGenerated(data, i, uint64(m.PhotonPersistentDisk.Size())) - n104, err := m.PhotonPersistentDisk.MarshalTo(data[i:]) + n109, err := m.PhotonPersistentDisk.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n104 + i += n109 + } + if m.PortworxVolume != nil { + data[i] = 0x92 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PortworxVolume.Size())) + n110, err := m.PortworxVolume.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n110 + } + if m.ScaleIO != nil { + data[i] = 0x9a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ScaleIO.Size())) + n111, err := m.ScaleIO.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n111 } return i, nil } @@ -5246,21 +5533,21 @@ func (m *PersistentVolumeSpec) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n105, err := (&v).MarshalTo(data[i:]) + n112, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n105 + i += n112 } } data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.PersistentVolumeSource.Size())) - n106, err := m.PersistentVolumeSource.MarshalTo(data[i:]) + n113, err := m.PersistentVolumeSource.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n106 + i += n113 if len(m.AccessModes) > 0 { for _, s := range m.AccessModes { data[i] = 0x1a @@ -5280,16 +5567,20 @@ func (m *PersistentVolumeSpec) MarshalTo(data []byte) (int, error) { data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(m.ClaimRef.Size())) - n107, err := m.ClaimRef.MarshalTo(data[i:]) + n114, err := m.ClaimRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n107 + i += n114 } data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(len(m.PersistentVolumeReclaimPolicy))) i += copy(data[i:], m.PersistentVolumeReclaimPolicy) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.StorageClassName))) + i += copy(data[i:], m.StorageClassName) return i, nil } @@ -5367,27 +5658,27 @@ func (m *Pod) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n108, err := m.ObjectMeta.MarshalTo(data[i:]) + n115, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n108 + i += n115 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n109, err := m.Spec.MarshalTo(data[i:]) + n116, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n109 + i += n116 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n110, err := m.Status.MarshalTo(data[i:]) + n117, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n110 + i += n117 return i, nil } @@ -5452,11 +5743,11 @@ func (m *PodAffinityTerm) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.LabelSelector.Size())) - n111, err := m.LabelSelector.MarshalTo(data[i:]) + n118, err := m.LabelSelector.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n111 + i += n118 } if len(m.Namespaces) > 0 { for _, s := range m.Namespaces { @@ -5602,19 +5893,19 @@ func (m *PodCondition) MarshalTo(data []byte) (int, error) { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) - n112, err := m.LastProbeTime.MarshalTo(data[i:]) + n119, err := m.LastProbeTime.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n112 + i += n119 data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n113, err := m.LastTransitionTime.MarshalTo(data[i:]) + n120, err := m.LastTransitionTime.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n113 + i += n120 data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) @@ -5713,11 +6004,11 @@ func (m *PodList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n114, err := m.ListMeta.MarshalTo(data[i:]) + n121, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n114 + i += n121 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -5777,11 +6068,11 @@ func (m *PodLogOptions) MarshalTo(data []byte) (int, error) { data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(m.SinceTime.Size())) - n115, err := m.SinceTime.MarshalTo(data[i:]) + n122, err := m.SinceTime.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n115 + i += n122 } data[i] = 0x30 i++ @@ -5804,6 +6095,31 @@ func (m *PodLogOptions) MarshalTo(data []byte) (int, error) { return i, nil } +func (m *PodPortForwardOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodPortForwardOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, num := range m.Ports { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(num)) + } + } + return i, nil +} + func (m *PodProxyOptions) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -5845,11 +6161,11 @@ func (m *PodSecurityContext) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) - n116, err := m.SELinuxOptions.MarshalTo(data[i:]) + n123, err := m.SELinuxOptions.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n116 + i += n123 } if m.RunAsUser != nil { data[i] = 0x10 @@ -5900,11 +6216,11 @@ func (m *PodSignature) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.PodController.Size())) - n117, err := m.PodController.MarshalTo(data[i:]) + n124, err := m.PodController.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n117 + i += n124 } return i, nil } @@ -6023,11 +6339,11 @@ func (m *PodSpec) MarshalTo(data []byte) (int, error) { data[i] = 0x72 i++ i = encodeVarintGenerated(data, i, uint64(m.SecurityContext.Size())) - n118, err := m.SecurityContext.MarshalTo(data[i:]) + n125, err := m.SecurityContext.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n118 + i += n125 } if len(m.ImagePullSecrets) > 0 { for _, msg := range m.ImagePullSecrets { @@ -6053,6 +6369,64 @@ func (m *PodSpec) MarshalTo(data []byte) (int, error) { i++ i = encodeVarintGenerated(data, i, uint64(len(m.Subdomain))) i += copy(data[i:], m.Subdomain) + if m.Affinity != nil { + data[i] = 0x92 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Affinity.Size())) + n126, err := m.Affinity.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n126 + } + data[i] = 0x9a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SchedulerName))) + i += copy(data[i:], m.SchedulerName) + if len(m.InitContainers) > 0 { + for _, msg := range m.InitContainers { + data[i] = 0xa2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.AutomountServiceAccountToken != nil { + data[i] = 0xa8 + i++ + data[i] = 0x1 + i++ + if *m.AutomountServiceAccountToken { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if len(m.Tolerations) > 0 { + for _, msg := range m.Tolerations { + data[i] = 0xb2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -6107,11 +6481,11 @@ func (m *PodStatus) MarshalTo(data []byte) (int, error) { data[i] = 0x3a i++ i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) - n119, err := m.StartTime.MarshalTo(data[i:]) + n127, err := m.StartTime.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n119 + i += n127 } if len(m.ContainerStatuses) > 0 { for _, msg := range m.ContainerStatuses { @@ -6125,6 +6499,22 @@ func (m *PodStatus) MarshalTo(data []byte) (int, error) { i += n } } + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.QOSClass))) + i += copy(data[i:], m.QOSClass) + if len(m.InitContainerStatuses) > 0 { + for _, msg := range m.InitContainerStatuses { + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -6146,19 +6536,19 @@ func (m *PodStatusResult) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n120, err := m.ObjectMeta.MarshalTo(data[i:]) + n128, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n120 + i += n128 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n121, err := m.Status.MarshalTo(data[i:]) + n129, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n121 + i += n129 return i, nil } @@ -6180,19 +6570,19 @@ func (m *PodTemplate) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n122, err := m.ObjectMeta.MarshalTo(data[i:]) + n130, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n122 + i += n130 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n123, err := m.Template.MarshalTo(data[i:]) + n131, err := m.Template.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n123 + i += n131 return i, nil } @@ -6214,11 +6604,11 @@ func (m *PodTemplateList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n124, err := m.ListMeta.MarshalTo(data[i:]) + n132, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n124 + i += n132 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -6252,19 +6642,53 @@ func (m *PodTemplateSpec) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n125, err := m.ObjectMeta.MarshalTo(data[i:]) + n133, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n125 + i += n133 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n126, err := m.Spec.MarshalTo(data[i:]) + n134, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n126 + i += n134 + return i, nil +} + +func (m *PortworxVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PortworxVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.VolumeID))) + i += copy(data[i:], m.VolumeID) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x18 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ return i, nil } @@ -6310,19 +6734,19 @@ func (m *PreferAvoidPodsEntry) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.PodSignature.Size())) - n127, err := m.PodSignature.MarshalTo(data[i:]) + n135, err := m.PodSignature.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n127 + i += n135 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.EvictionTime.Size())) - n128, err := m.EvictionTime.MarshalTo(data[i:]) + n136, err := m.EvictionTime.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n128 + i += n136 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) @@ -6355,11 +6779,11 @@ func (m *PreferredSchedulingTerm) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Preference.Size())) - n129, err := m.Preference.MarshalTo(data[i:]) + n137, err := m.Preference.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n129 + i += n137 return i, nil } @@ -6381,11 +6805,11 @@ func (m *Probe) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.Handler.Size())) - n130, err := m.Handler.MarshalTo(data[i:]) + n138, err := m.Handler.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n130 + i += n138 data[i] = 0x10 i++ i = encodeVarintGenerated(data, i, uint64(m.InitialDelaySeconds)) @@ -6404,6 +6828,41 @@ func (m *Probe) MarshalTo(data []byte) (int, error) { return i, nil } +func (m *ProjectedVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ProjectedVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Sources) > 0 { + for _, msg := range m.Sources { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.DefaultMode != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.DefaultMode)) + } + return i, nil +} + func (m *QuobyteVolumeSource) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -6500,11 +6959,11 @@ func (m *RBDVolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0x3a i++ i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) - n131, err := m.SecretRef.MarshalTo(data[i:]) + n139, err := m.SecretRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n131 + i += n139 } data[i] = 0x40 i++ @@ -6535,11 +6994,11 @@ func (m *RangeAllocation) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n132, err := m.ObjectMeta.MarshalTo(data[i:]) + n140, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n132 + i += n140 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(len(m.Range))) @@ -6571,27 +7030,27 @@ func (m *ReplicationController) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n133, err := m.ObjectMeta.MarshalTo(data[i:]) + n141, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n133 + i += n141 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n134, err := m.Spec.MarshalTo(data[i:]) + n142, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n134 + i += n142 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n135, err := m.Status.MarshalTo(data[i:]) + n143, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n135 + i += n143 return i, nil } @@ -6621,11 +7080,11 @@ func (m *ReplicationControllerCondition) MarshalTo(data []byte) (int, error) { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n136, err := m.LastTransitionTime.MarshalTo(data[i:]) + n144, err := m.LastTransitionTime.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n136 + i += n144 data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) @@ -6655,11 +7114,11 @@ func (m *ReplicationControllerList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n137, err := m.ListMeta.MarshalTo(data[i:]) + n145, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n137 + i += n145 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -6716,11 +7175,11 @@ func (m *ReplicationControllerSpec) MarshalTo(data []byte) (int, error) { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n138, err := m.Template.MarshalTo(data[i:]) + n146, err := m.Template.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n138 + i += n146 } data[i] = 0x20 i++ @@ -6799,11 +7258,11 @@ func (m *ResourceFieldSelector) MarshalTo(data []byte) (int, error) { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Divisor.Size())) - n139, err := m.Divisor.MarshalTo(data[i:]) + n147, err := m.Divisor.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n139 + i += n147 return i, nil } @@ -6825,27 +7284,27 @@ func (m *ResourceQuota) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n140, err := m.ObjectMeta.MarshalTo(data[i:]) + n148, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n140 + i += n148 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n141, err := m.Spec.MarshalTo(data[i:]) + n149, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n141 + i += n149 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n142, err := m.Status.MarshalTo(data[i:]) + n150, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n142 + i += n150 return i, nil } @@ -6867,11 +7326,11 @@ func (m *ResourceQuotaList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n143, err := m.ListMeta.MarshalTo(data[i:]) + n151, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n143 + i += n151 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -6917,11 +7376,11 @@ func (m *ResourceQuotaSpec) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n144, err := (&v).MarshalTo(data[i:]) + n152, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n144 + i += n152 } } if len(m.Scopes) > 0 { @@ -6972,11 +7431,11 @@ func (m *ResourceQuotaStatus) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n145, err := (&v).MarshalTo(data[i:]) + n153, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n145 + i += n153 } } if len(m.Used) > 0 { @@ -6994,11 +7453,11 @@ func (m *ResourceQuotaStatus) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n146, err := (&v).MarshalTo(data[i:]) + n154, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n146 + i += n154 } } return i, nil @@ -7034,11 +7493,11 @@ func (m *ResourceRequirements) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n147, err := (&v).MarshalTo(data[i:]) + n155, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n147 + i += n155 } } if len(m.Requests) > 0 { @@ -7056,11 +7515,11 @@ func (m *ResourceRequirements) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n148, err := (&v).MarshalTo(data[i:]) + n156, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n148 + i += n156 } } return i, nil @@ -7100,6 +7559,78 @@ func (m *SELinuxOptions) MarshalTo(data []byte) (int, error) { return i, nil } +func (m *ScaleIOVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleIOVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Gateway))) + i += copy(data[i:], m.Gateway) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.System))) + i += copy(data[i:], m.System) + if m.SecretRef != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) + n157, err := m.SecretRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n157 + } + data[i] = 0x20 + i++ + if m.SSLEnabled { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ProtectionDomain))) + i += copy(data[i:], m.ProtectionDomain) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.StoragePool))) + i += copy(data[i:], m.StoragePool) + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.StorageMode))) + i += copy(data[i:], m.StorageMode) + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.VolumeName))) + i += copy(data[i:], m.VolumeName) + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x50 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + func (m *Secret) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -7118,11 +7649,11 @@ func (m *Secret) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n149, err := m.ObjectMeta.MarshalTo(data[i:]) + n158, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n149 + i += n158 if len(m.Data) > 0 { for k := range m.Data { data[i] = 0x12 @@ -7164,6 +7695,42 @@ func (m *Secret) MarshalTo(data []byte) (int, error) { return i, nil } +func (m *SecretEnvSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecretEnvSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n159, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n159 + if m.Optional != nil { + data[i] = 0x10 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + func (m *SecretKeySelector) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -7182,15 +7749,25 @@ func (m *SecretKeySelector) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) - n150, err := m.LocalObjectReference.MarshalTo(data[i:]) + n160, err := m.LocalObjectReference.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n150 + i += n160 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(len(m.Key))) i += copy(data[i:], m.Key) + if m.Optional != nil { + data[i] = 0x18 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } return i, nil } @@ -7212,11 +7789,49 @@ func (m *SecretList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n151, err := m.ListMeta.MarshalTo(data[i:]) + n161, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n151 + i += n161 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *SecretProjection) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecretProjection) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n162, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n162 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -7229,6 +7844,16 @@ func (m *SecretList) MarshalTo(data []byte) (int, error) { i += n } } + if m.Optional != nil { + data[i] = 0x20 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } return i, nil } @@ -7268,6 +7893,16 @@ func (m *SecretVolumeSource) MarshalTo(data []byte) (int, error) { i++ i = encodeVarintGenerated(data, i, uint64(*m.DefaultMode)) } + if m.Optional != nil { + data[i] = 0x20 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } return i, nil } @@ -7290,11 +7925,11 @@ func (m *SecurityContext) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.Capabilities.Size())) - n152, err := m.Capabilities.MarshalTo(data[i:]) + n163, err := m.Capabilities.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n152 + i += n163 } if m.Privileged != nil { data[i] = 0x10 @@ -7310,11 +7945,11 @@ func (m *SecurityContext) MarshalTo(data []byte) (int, error) { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) - n153, err := m.SELinuxOptions.MarshalTo(data[i:]) + n164, err := m.SELinuxOptions.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n153 + i += n164 } if m.RunAsUser != nil { data[i] = 0x20 @@ -7362,11 +7997,11 @@ func (m *SerializedReference) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.Reference.Size())) - n154, err := m.Reference.MarshalTo(data[i:]) + n165, err := m.Reference.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n154 + i += n165 return i, nil } @@ -7388,27 +8023,27 @@ func (m *Service) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n155, err := m.ObjectMeta.MarshalTo(data[i:]) + n166, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n155 + i += n166 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n156, err := m.Spec.MarshalTo(data[i:]) + n167, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n156 + i += n167 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n157, err := m.Status.MarshalTo(data[i:]) + n168, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n157 + i += n168 return i, nil } @@ -7430,11 +8065,11 @@ func (m *ServiceAccount) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n158, err := m.ObjectMeta.MarshalTo(data[i:]) + n169, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n158 + i += n169 if len(m.Secrets) > 0 { for _, msg := range m.Secrets { data[i] = 0x12 @@ -7459,6 +8094,16 @@ func (m *ServiceAccount) MarshalTo(data []byte) (int, error) { i += n } } + if m.AutomountServiceAccountToken != nil { + data[i] = 0x20 + i++ + if *m.AutomountServiceAccountToken { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } return i, nil } @@ -7480,11 +8125,11 @@ func (m *ServiceAccountList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n159, err := m.ListMeta.MarshalTo(data[i:]) + n170, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n159 + i += n170 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -7518,11 +8163,11 @@ func (m *ServiceList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n160, err := m.ListMeta.MarshalTo(data[i:]) + n171, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n160 + i += n171 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -7567,11 +8212,11 @@ func (m *ServicePort) MarshalTo(data []byte) (int, error) { data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(m.TargetPort.Size())) - n161, err := m.TargetPort.MarshalTo(data[i:]) + n172, err := m.TargetPort.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n161 + i += n172 data[i] = 0x28 i++ i = encodeVarintGenerated(data, i, uint64(m.NodePort)) @@ -7730,11 +8375,37 @@ func (m *ServiceStatus) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.LoadBalancer.Size())) - n162, err := m.LoadBalancer.MarshalTo(data[i:]) + n173, err := m.LoadBalancer.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n162 + i += n173 + return i, nil +} + +func (m *Sysctl) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Sysctl) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Value))) + i += copy(data[i:], m.Value) return i, nil } @@ -7756,11 +8427,11 @@ func (m *TCPSocketAction) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) - n163, err := m.Port.MarshalTo(data[i:]) + n174, err := m.Port.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n163 + i += n174 return i, nil } @@ -7791,6 +8462,14 @@ func (m *Taint) MarshalTo(data []byte) (int, error) { i++ i = encodeVarintGenerated(data, i, uint64(len(m.Effect))) i += copy(data[i:], m.Effect) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TimeAdded.Size())) + n175, err := m.TimeAdded.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n175 return i, nil } @@ -7825,6 +8504,11 @@ func (m *Toleration) MarshalTo(data []byte) (int, error) { i++ i = encodeVarintGenerated(data, i, uint64(len(m.Effect))) i += copy(data[i:], m.Effect) + if m.TolerationSeconds != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TolerationSeconds)) + } return i, nil } @@ -7850,11 +8534,11 @@ func (m *Volume) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.VolumeSource.Size())) - n164, err := m.VolumeSource.MarshalTo(data[i:]) + n176, err := m.VolumeSource.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n164 + i += n176 return i, nil } @@ -7896,6 +8580,54 @@ func (m *VolumeMount) MarshalTo(data []byte) (int, error) { return i, nil } +func (m *VolumeProjection) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *VolumeProjection) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Secret != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Secret.Size())) + n177, err := m.Secret.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n177 + } + if m.DownwardAPI != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DownwardAPI.Size())) + n178, err := m.DownwardAPI.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n178 + } + if m.ConfigMap != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.ConfigMap.Size())) + n179, err := m.ConfigMap.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n179 + } + return i, nil +} + func (m *VolumeSource) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -7915,151 +8647,151 @@ func (m *VolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.HostPath.Size())) - n165, err := m.HostPath.MarshalTo(data[i:]) + n180, err := m.HostPath.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n165 + i += n180 } if m.EmptyDir != nil { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.EmptyDir.Size())) - n166, err := m.EmptyDir.MarshalTo(data[i:]) + n181, err := m.EmptyDir.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n166 + i += n181 } if m.GCEPersistentDisk != nil { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.GCEPersistentDisk.Size())) - n167, err := m.GCEPersistentDisk.MarshalTo(data[i:]) + n182, err := m.GCEPersistentDisk.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n167 + i += n182 } if m.AWSElasticBlockStore != nil { data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(m.AWSElasticBlockStore.Size())) - n168, err := m.AWSElasticBlockStore.MarshalTo(data[i:]) + n183, err := m.AWSElasticBlockStore.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n168 + i += n183 } if m.GitRepo != nil { data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(m.GitRepo.Size())) - n169, err := m.GitRepo.MarshalTo(data[i:]) + n184, err := m.GitRepo.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n169 + i += n184 } if m.Secret != nil { data[i] = 0x32 i++ i = encodeVarintGenerated(data, i, uint64(m.Secret.Size())) - n170, err := m.Secret.MarshalTo(data[i:]) + n185, err := m.Secret.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n170 + i += n185 } if m.NFS != nil { data[i] = 0x3a i++ i = encodeVarintGenerated(data, i, uint64(m.NFS.Size())) - n171, err := m.NFS.MarshalTo(data[i:]) + n186, err := m.NFS.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n171 + i += n186 } if m.ISCSI != nil { data[i] = 0x42 i++ i = encodeVarintGenerated(data, i, uint64(m.ISCSI.Size())) - n172, err := m.ISCSI.MarshalTo(data[i:]) + n187, err := m.ISCSI.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n172 + i += n187 } if m.Glusterfs != nil { data[i] = 0x4a i++ i = encodeVarintGenerated(data, i, uint64(m.Glusterfs.Size())) - n173, err := m.Glusterfs.MarshalTo(data[i:]) + n188, err := m.Glusterfs.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n173 + i += n188 } if m.PersistentVolumeClaim != nil { data[i] = 0x52 i++ i = encodeVarintGenerated(data, i, uint64(m.PersistentVolumeClaim.Size())) - n174, err := m.PersistentVolumeClaim.MarshalTo(data[i:]) + n189, err := m.PersistentVolumeClaim.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n174 + i += n189 } if m.RBD != nil { data[i] = 0x5a i++ i = encodeVarintGenerated(data, i, uint64(m.RBD.Size())) - n175, err := m.RBD.MarshalTo(data[i:]) + n190, err := m.RBD.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n175 + i += n190 } if m.FlexVolume != nil { data[i] = 0x62 i++ i = encodeVarintGenerated(data, i, uint64(m.FlexVolume.Size())) - n176, err := m.FlexVolume.MarshalTo(data[i:]) + n191, err := m.FlexVolume.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n176 + i += n191 } if m.Cinder != nil { data[i] = 0x6a i++ i = encodeVarintGenerated(data, i, uint64(m.Cinder.Size())) - n177, err := m.Cinder.MarshalTo(data[i:]) + n192, err := m.Cinder.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n177 + i += n192 } if m.CephFS != nil { data[i] = 0x72 i++ i = encodeVarintGenerated(data, i, uint64(m.CephFS.Size())) - n178, err := m.CephFS.MarshalTo(data[i:]) + n193, err := m.CephFS.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n178 + i += n193 } if m.Flocker != nil { data[i] = 0x7a i++ i = encodeVarintGenerated(data, i, uint64(m.Flocker.Size())) - n179, err := m.Flocker.MarshalTo(data[i:]) + n194, err := m.Flocker.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n179 + i += n194 } if m.DownwardAPI != nil { data[i] = 0x82 @@ -8067,11 +8799,11 @@ func (m *VolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0x1 i++ i = encodeVarintGenerated(data, i, uint64(m.DownwardAPI.Size())) - n180, err := m.DownwardAPI.MarshalTo(data[i:]) + n195, err := m.DownwardAPI.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n180 + i += n195 } if m.FC != nil { data[i] = 0x8a @@ -8079,11 +8811,11 @@ func (m *VolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0x1 i++ i = encodeVarintGenerated(data, i, uint64(m.FC.Size())) - n181, err := m.FC.MarshalTo(data[i:]) + n196, err := m.FC.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n181 + i += n196 } if m.AzureFile != nil { data[i] = 0x92 @@ -8091,11 +8823,11 @@ func (m *VolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0x1 i++ i = encodeVarintGenerated(data, i, uint64(m.AzureFile.Size())) - n182, err := m.AzureFile.MarshalTo(data[i:]) + n197, err := m.AzureFile.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n182 + i += n197 } if m.ConfigMap != nil { data[i] = 0x9a @@ -8103,11 +8835,11 @@ func (m *VolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0x1 i++ i = encodeVarintGenerated(data, i, uint64(m.ConfigMap.Size())) - n183, err := m.ConfigMap.MarshalTo(data[i:]) + n198, err := m.ConfigMap.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n183 + i += n198 } if m.VsphereVolume != nil { data[i] = 0xa2 @@ -8115,11 +8847,11 @@ func (m *VolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0x1 i++ i = encodeVarintGenerated(data, i, uint64(m.VsphereVolume.Size())) - n184, err := m.VsphereVolume.MarshalTo(data[i:]) + n199, err := m.VsphereVolume.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n184 + i += n199 } if m.Quobyte != nil { data[i] = 0xaa @@ -8127,11 +8859,11 @@ func (m *VolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0x1 i++ i = encodeVarintGenerated(data, i, uint64(m.Quobyte.Size())) - n185, err := m.Quobyte.MarshalTo(data[i:]) + n200, err := m.Quobyte.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n185 + i += n200 } if m.AzureDisk != nil { data[i] = 0xb2 @@ -8139,11 +8871,11 @@ func (m *VolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0x1 i++ i = encodeVarintGenerated(data, i, uint64(m.AzureDisk.Size())) - n186, err := m.AzureDisk.MarshalTo(data[i:]) + n201, err := m.AzureDisk.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n186 + i += n201 } if m.PhotonPersistentDisk != nil { data[i] = 0xba @@ -8151,11 +8883,47 @@ func (m *VolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0x1 i++ i = encodeVarintGenerated(data, i, uint64(m.PhotonPersistentDisk.Size())) - n187, err := m.PhotonPersistentDisk.MarshalTo(data[i:]) + n202, err := m.PhotonPersistentDisk.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n187 + i += n202 + } + if m.PortworxVolume != nil { + data[i] = 0xc2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PortworxVolume.Size())) + n203, err := m.PortworxVolume.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n203 + } + if m.ScaleIO != nil { + data[i] = 0xca + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ScaleIO.Size())) + n204, err := m.ScaleIO.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n204 + } + if m.Projected != nil { + data[i] = 0xd2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Projected.Size())) + n205, err := m.Projected.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n205 } return i, nil } @@ -8207,11 +8975,11 @@ func (m *WeightedPodAffinityTerm) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.PodAffinityTerm.Size())) - n188, err := m.PodAffinityTerm.MarshalTo(data[i:]) + n206, err := m.PodAffinityTerm.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n188 + i += n206 return i, nil } @@ -8446,6 +9214,17 @@ func (m *ConfigMap) Size() (n int) { return n } +func (m *ConfigMapEnvSource) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + func (m *ConfigMapKeySelector) Size() (n int) { var l int _ = l @@ -8453,6 +9232,9 @@ func (m *ConfigMapKeySelector) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Key) n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } return n } @@ -8470,6 +9252,23 @@ func (m *ConfigMapList) Size() (n int) { return n } +func (m *ConfigMapProjection) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Optional != nil { + n += 2 + } + return n +} + func (m *ConfigMapVolumeSource) Size() (n int) { var l int _ = l @@ -8484,6 +9283,9 @@ func (m *ConfigMapVolumeSource) Size() (n int) { if m.DefaultMode != nil { n += 1 + sovGenerated(uint64(*m.DefaultMode)) } + if m.Optional != nil { + n += 2 + } return n } @@ -8551,6 +9353,14 @@ func (m *Container) Size() (n int) { n += 3 n += 3 n += 3 + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TerminationMessagePolicy) + n += 2 + l + sovGenerated(uint64(l)) return n } @@ -8675,6 +9485,22 @@ func (m *DeleteOptions) Size() (n int) { if m.OrphanDependents != nil { n += 2 } + if m.PropagationPolicy != nil { + l = len(*m.PropagationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DownwardAPIProjection) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -8801,6 +9627,22 @@ func (m *EndpointsList) Size() (n int) { return n } +func (m *EnvFromSource) Size() (n int) { + var l int + _ = l + l = len(m.Prefix) + n += 1 + l + sovGenerated(uint64(l)) + if m.ConfigMapRef != nil { + l = m.ConfigMapRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *EnvVar) Size() (n int) { var l int _ = l @@ -8896,14 +9738,6 @@ func (m *ExecAction) Size() (n int) { return n } -func (m *ExportOptions) Size() (n int) { - var l int - _ = l - n += 2 - n += 2 - return n -} - func (m *FCVolumeSource) Size() (n int) { var l int _ = l @@ -9059,6 +9893,12 @@ func (m *ISCSIVolumeSource) Size() (n int) { l = len(m.FSType) n += 1 + l + sovGenerated(uint64(l)) n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -9381,6 +10221,21 @@ func (m *NodeProxyOptions) Size() (n int) { return n } +func (m *NodeResources) Size() (n int) { + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + func (m *NodeSelector) Size() (n int) { var l int _ = l @@ -9431,6 +10286,12 @@ func (m *NodeSpec) Size() (n int) { l = len(m.ProviderID) n += 1 + l + sovGenerated(uint64(l)) n += 2 + if len(m.Taints) > 0 { + for _, e := range m.Taints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -9608,23 +10469,6 @@ func (m *ObjectReference) Size() (n int) { return n } -func (m *OwnerReference) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - if m.Controller != nil { - n += 2 - } - return n -} - func (m *PersistentVolume) Size() (n int) { var l int _ = l @@ -9680,6 +10524,10 @@ func (m *PersistentVolumeClaimSpec) Size() (n int) { l = m.Selector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.StorageClassName != nil { + l = len(*m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -9800,6 +10648,14 @@ func (m *PersistentVolumeSource) Size() (n int) { l = m.PhotonPersistentDisk.Size() n += 2 + l + sovGenerated(uint64(l)) } + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -9829,6 +10685,8 @@ func (m *PersistentVolumeSpec) Size() (n int) { } l = len(m.PersistentVolumeReclaimPolicy) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -10006,6 +10864,17 @@ func (m *PodLogOptions) Size() (n int) { return n } +func (m *PodPortForwardOptions) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + n += 1 + sovGenerated(uint64(e)) + } + } + return n +} + func (m *PodProxyOptions) Size() (n int) { var l int _ = l @@ -10104,6 +10973,27 @@ func (m *PodSpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) l = len(m.Subdomain) n += 2 + l + sovGenerated(uint64(l)) + if m.Affinity != nil { + l = m.Affinity.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + l = len(m.SchedulerName) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.InitContainers) > 0 { + for _, e := range m.InitContainers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.AutomountServiceAccountToken != nil { + n += 3 + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -10136,6 +11026,14 @@ func (m *PodStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + l = len(m.QOSClass) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.InitContainerStatuses) > 0 { + for _, e := range m.InitContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -10183,6 +11081,17 @@ func (m *PodTemplateSpec) Size() (n int) { return n } +func (m *PortworxVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + func (m *Preconditions) Size() (n int) { var l int _ = l @@ -10229,6 +11138,21 @@ func (m *Probe) Size() (n int) { return n } +func (m *ProjectedVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.Sources) > 0 { + for _, e := range m.Sources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + return n +} + func (m *QuobyteVolumeSource) Size() (n int) { var l int _ = l @@ -10487,6 +11411,32 @@ func (m *SELinuxOptions) Size() (n int) { return n } +func (m *ScaleIOVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.System) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + l = len(m.ProtectionDomain) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageMode) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + func (m *Secret) Size() (n int) { var l int _ = l @@ -10513,6 +11463,17 @@ func (m *Secret) Size() (n int) { return n } +func (m *SecretEnvSource) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + func (m *SecretKeySelector) Size() (n int) { var l int _ = l @@ -10520,6 +11481,9 @@ func (m *SecretKeySelector) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Key) n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } return n } @@ -10537,6 +11501,23 @@ func (m *SecretList) Size() (n int) { return n } +func (m *SecretProjection) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Optional != nil { + n += 2 + } + return n +} + func (m *SecretVolumeSource) Size() (n int) { var l int _ = l @@ -10551,6 +11532,9 @@ func (m *SecretVolumeSource) Size() (n int) { if m.DefaultMode != nil { n += 1 + sovGenerated(uint64(*m.DefaultMode)) } + if m.Optional != nil { + n += 2 + } return n } @@ -10617,6 +11601,9 @@ func (m *ServiceAccount) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.AutomountServiceAccountToken != nil { + n += 2 + } return n } @@ -10726,6 +11713,16 @@ func (m *ServiceStatus) Size() (n int) { return n } +func (m *Sysctl) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *TCPSocketAction) Size() (n int) { var l int _ = l @@ -10743,6 +11740,8 @@ func (m *Taint) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Effect) n += 1 + l + sovGenerated(uint64(l)) + l = m.TimeAdded.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -10757,6 +11756,9 @@ func (m *Toleration) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Effect) n += 1 + l + sovGenerated(uint64(l)) + if m.TolerationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TolerationSeconds)) + } return n } @@ -10783,6 +11785,24 @@ func (m *VolumeMount) Size() (n int) { return n } +func (m *VolumeProjection) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *VolumeSource) Size() (n int) { var l int _ = l @@ -10878,6 +11898,18 @@ func (m *VolumeSource) Size() (n int) { l = m.PhotonPersistentDisk.Size() n += 2 + l + sovGenerated(uint64(l)) } + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Projected != nil { + l = m.Projected.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -10990,7 +12022,7 @@ func (this *Binding) String() string { return "nil" } s := strings.Join([]string{`&Binding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -11052,7 +12084,7 @@ func (this *ComponentStatus) String() string { return "nil" } s := strings.Join([]string{`&ComponentStatus{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ComponentCondition", "ComponentCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -11063,7 +12095,7 @@ func (this *ComponentStatusList) String() string { return "nil" } s := strings.Join([]string{`&ComponentStatusList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ComponentStatus", "ComponentStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -11084,12 +12116,23 @@ func (this *ConfigMap) String() string { } mapStringForData += "}" s := strings.Join([]string{`&ConfigMap{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Data:` + mapStringForData + `,`, `}`, }, "") return s } +func (this *ConfigMapEnvSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} func (this *ConfigMapKeySelector) String() string { if this == nil { return "nil" @@ -11097,6 +12140,7 @@ func (this *ConfigMapKeySelector) String() string { s := strings.Join([]string{`&ConfigMapKeySelector{`, `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, `}`, }, "") return s @@ -11106,12 +12150,24 @@ func (this *ConfigMapList) String() string { return "nil" } s := strings.Join([]string{`&ConfigMapList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ConfigMap", "ConfigMap", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } +func (this *ConfigMapProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} func (this *ConfigMapVolumeSource) String() string { if this == nil { return "nil" @@ -11120,6 +12176,7 @@ func (this *ConfigMapVolumeSource) String() string { `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, `}`, }, "") return s @@ -11147,6 +12204,8 @@ func (this *Container) String() string { `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + `,`, + `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, `}`, }, "") return s @@ -11193,7 +12252,7 @@ func (this *ContainerStateRunning) String() string { return "nil" } s := strings.Join([]string{`&ContainerStateRunning{`, - `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, + `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -11207,8 +12266,8 @@ func (this *ContainerStateTerminated) String() string { `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, - `FinishedAt:` + strings.Replace(strings.Replace(this.FinishedAt.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, + `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `FinishedAt:` + strings.Replace(strings.Replace(this.FinishedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, `}`, }, "") @@ -11260,6 +12319,17 @@ func (this *DeleteOptions) String() string { `GracePeriodSeconds:` + valueToStringGenerated(this.GracePeriodSeconds) + `,`, `Preconditions:` + strings.Replace(fmt.Sprintf("%v", this.Preconditions), "Preconditions", "Preconditions", 1) + `,`, `OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`, + `PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DownwardAPIProjection{`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -11340,7 +12410,7 @@ func (this *Endpoints) String() string { return "nil" } s := strings.Join([]string{`&Endpoints{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Subsets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subsets), "EndpointSubset", "EndpointSubset", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -11351,12 +12421,24 @@ func (this *EndpointsList) String() string { return "nil" } s := strings.Join([]string{`&EndpointsList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Endpoints", "Endpoints", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } +func (this *EnvFromSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvFromSource{`, + `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, + `ConfigMapRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapRef), "ConfigMapEnvSource", "ConfigMapEnvSource", 1) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretEnvSource", "SecretEnvSource", 1) + `,`, + `}`, + }, "") + return s +} func (this *EnvVar) String() string { if this == nil { return "nil" @@ -11387,13 +12469,13 @@ func (this *Event) String() string { return "nil" } s := strings.Join([]string{`&Event{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `InvolvedObject:` + strings.Replace(strings.Replace(this.InvolvedObject.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `Source:` + strings.Replace(strings.Replace(this.Source.String(), "EventSource", "EventSource", 1), `&`, ``, 1) + `,`, - `FirstTimestamp:` + strings.Replace(strings.Replace(this.FirstTimestamp.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, - `LastTimestamp:` + strings.Replace(strings.Replace(this.LastTimestamp.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, + `FirstTimestamp:` + strings.Replace(strings.Replace(this.FirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTimestamp:` + strings.Replace(strings.Replace(this.LastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Count:` + fmt.Sprintf("%v", this.Count) + `,`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `}`, @@ -11405,7 +12487,7 @@ func (this *EventList) String() string { return "nil" } s := strings.Join([]string{`&EventList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -11432,17 +12514,6 @@ func (this *ExecAction) String() string { }, "") return s } -func (this *ExportOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExportOptions{`, - `Export:` + fmt.Sprintf("%v", this.Export) + `,`, - `Exact:` + fmt.Sprintf("%v", this.Exact) + `,`, - `}`, - }, "") - return s -} func (this *FCVolumeSource) String() string { if this == nil { return "nil" @@ -11534,7 +12605,7 @@ func (this *HTTPGetAction) String() string { } s := strings.Join([]string{`&HTTPGetAction{`, `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_kubernetes_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, `Host:` + fmt.Sprintf("%v", this.Host) + `,`, `Scheme:` + fmt.Sprintf("%v", this.Scheme) + `,`, `HTTPHeaders:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HTTPHeaders), "HTTPHeader", "HTTPHeader", 1), `&`, ``, 1) + `,`, @@ -11586,6 +12657,7 @@ func (this *ISCSIVolumeSource) String() string { `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, `}`, }, "") return s @@ -11618,7 +12690,7 @@ func (this *LimitRange) String() string { return "nil" } s := strings.Join([]string{`&LimitRange{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LimitRangeSpec", "LimitRangeSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -11694,7 +12766,7 @@ func (this *LimitRangeList) String() string { return "nil" } s := strings.Join([]string{`&LimitRangeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "LimitRange", "LimitRange", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -11715,8 +12787,8 @@ func (this *List) String() string { return "nil" } s := strings.Join([]string{`&List{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_kubernetes_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -11783,7 +12855,7 @@ func (this *Namespace) String() string { return "nil" } s := strings.Join([]string{`&Namespace{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NamespaceSpec", "NamespaceSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NamespaceStatus", "NamespaceStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -11795,7 +12867,7 @@ func (this *NamespaceList) String() string { return "nil" } s := strings.Join([]string{`&NamespaceList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -11826,7 +12898,7 @@ func (this *Node) String() string { return "nil" } s := strings.Join([]string{`&Node{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -11862,8 +12934,8 @@ func (this *NodeCondition) String() string { s := strings.Join([]string{`&NodeCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastHeartbeatTime:` + strings.Replace(strings.Replace(this.LastHeartbeatTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, + `LastHeartbeatTime:` + strings.Replace(strings.Replace(this.LastHeartbeatTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -11885,7 +12957,7 @@ func (this *NodeList) String() string { return "nil" } s := strings.Join([]string{`&NodeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Node", "Node", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -11901,6 +12973,26 @@ func (this *NodeProxyOptions) String() string { }, "") return s } +func (this *NodeResources) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&NodeResources{`, + `Capacity:` + mapStringForCapacity + `,`, + `}`, + }, "") + return s +} func (this *NodeSelector) String() string { if this == nil { return "nil" @@ -11942,6 +13034,7 @@ func (this *NodeSpec) String() string { `ExternalID:` + fmt.Sprintf("%v", this.ExternalID) + `,`, `ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`, `Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`, + `Taints:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Taints), "Taint", "Taint", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -12047,12 +13140,12 @@ func (this *ObjectMeta) String() string { `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, - `CreationTimestamp:` + strings.Replace(strings.Replace(this.CreationTimestamp.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, - `DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1) + `,`, + `CreationTimestamp:` + strings.Replace(strings.Replace(this.CreationTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, `DeletionGracePeriodSeconds:` + valueToStringGenerated(this.DeletionGracePeriodSeconds) + `,`, `Labels:` + mapStringForLabels + `,`, `Annotations:` + mapStringForAnnotations + `,`, - `OwnerReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OwnerReferences), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + `,`, + `OwnerReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OwnerReferences), "OwnerReference", "k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference", 1), `&`, ``, 1) + `,`, `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, `}`, @@ -12075,26 +13168,12 @@ func (this *ObjectReference) String() string { }, "") return s } -func (this *OwnerReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&OwnerReference{`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `Controller:` + valueToStringGenerated(this.Controller) + `,`, - `}`, - }, "") - return s -} func (this *PersistentVolume) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&PersistentVolume{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeSpec", "PersistentVolumeSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeStatus", "PersistentVolumeStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -12106,7 +13185,7 @@ func (this *PersistentVolumeClaim) String() string { return "nil" } s := strings.Join([]string{`&PersistentVolumeClaim{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeClaimSpec", "PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeClaimStatus", "PersistentVolumeClaimStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -12118,7 +13197,7 @@ func (this *PersistentVolumeClaimList) String() string { return "nil" } s := strings.Join([]string{`&PersistentVolumeClaimList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolumeClaim", "PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -12132,7 +13211,8 @@ func (this *PersistentVolumeClaimSpec) String() string { `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_kubernetes_pkg_api_unversioned.LabelSelector", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, `}`, }, "") return s @@ -12175,7 +13255,7 @@ func (this *PersistentVolumeList) String() string { return "nil" } s := strings.Join([]string{`&PersistentVolumeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolume", "PersistentVolume", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -12203,6 +13283,8 @@ func (this *PersistentVolumeSource) String() string { `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, `}`, }, "") return s @@ -12227,6 +13309,7 @@ func (this *PersistentVolumeSpec) String() string { `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, `ClaimRef:` + strings.Replace(fmt.Sprintf("%v", this.ClaimRef), "ObjectReference", "ObjectReference", 1) + `,`, `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, + `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, `}`, }, "") return s @@ -12259,7 +13342,7 @@ func (this *Pod) String() string { return "nil" } s := strings.Join([]string{`&Pod{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -12282,7 +13365,7 @@ func (this *PodAffinityTerm) String() string { return "nil" } s := strings.Join([]string{`&PodAffinityTerm{`, - `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "k8s_io_kubernetes_pkg_api_unversioned.LabelSelector", 1) + `,`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, `}`, @@ -12321,8 +13404,8 @@ func (this *PodCondition) String() string { s := strings.Join([]string{`&PodCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -12349,7 +13432,7 @@ func (this *PodList) String() string { return "nil" } s := strings.Join([]string{`&PodList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Pod", "Pod", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -12364,7 +13447,7 @@ func (this *PodLogOptions) String() string { `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, - `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, @@ -12372,6 +13455,16 @@ func (this *PodLogOptions) String() string { }, "") return s } +func (this *PodPortForwardOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodPortForwardOptions{`, + `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, + `}`, + }, "") + return s +} func (this *PodProxyOptions) String() string { if this == nil { return "nil" @@ -12401,7 +13494,7 @@ func (this *PodSignature) String() string { return "nil" } s := strings.Join([]string{`&PodSignature{`, - `PodController:` + strings.Replace(fmt.Sprintf("%v", this.PodController), "OwnerReference", "OwnerReference", 1) + `,`, + `PodController:` + strings.Replace(fmt.Sprintf("%v", this.PodController), "OwnerReference", "k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference", 1) + `,`, `}`, }, "") return s @@ -12438,6 +13531,11 @@ func (this *PodSpec) String() string { `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, + `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "Affinity", 1) + `,`, + `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, + `InitContainers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainers), "Container", "Container", 1), `&`, ``, 1) + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `Tolerations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tolerations), "Toleration", "Toleration", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -12453,8 +13551,10 @@ func (this *PodStatus) String() string { `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, - `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1) + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, `ContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, + `QOSClass:` + fmt.Sprintf("%v", this.QOSClass) + `,`, + `InitContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -12464,7 +13564,7 @@ func (this *PodStatusResult) String() string { return "nil" } s := strings.Join([]string{`&PodStatusResult{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -12475,7 +13575,7 @@ func (this *PodTemplate) String() string { return "nil" } s := strings.Join([]string{`&PodTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -12486,7 +13586,7 @@ func (this *PodTemplateList) String() string { return "nil" } s := strings.Join([]string{`&PodTemplateList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodTemplate", "PodTemplate", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -12497,12 +13597,24 @@ func (this *PodTemplateSpec) String() string { return "nil" } s := strings.Join([]string{`&PodTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } +func (this *PortworxVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortworxVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} func (this *Preconditions) String() string { if this == nil { return "nil" @@ -12519,7 +13631,7 @@ func (this *PreferAvoidPodsEntry) String() string { } s := strings.Join([]string{`&PreferAvoidPodsEntry{`, `PodSignature:` + strings.Replace(strings.Replace(this.PodSignature.String(), "PodSignature", "PodSignature", 1), `&`, ``, 1) + `,`, - `EvictionTime:` + strings.Replace(strings.Replace(this.EvictionTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, + `EvictionTime:` + strings.Replace(strings.Replace(this.EvictionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -12552,6 +13664,17 @@ func (this *Probe) String() string { }, "") return s } +func (this *ProjectedVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProjectedVolumeSource{`, + `Sources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sources), "VolumeProjection", "VolumeProjection", 1), `&`, ``, 1) + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `}`, + }, "") + return s +} func (this *QuobyteVolumeSource) String() string { if this == nil { return "nil" @@ -12588,7 +13711,7 @@ func (this *RangeAllocation) String() string { return "nil" } s := strings.Join([]string{`&RangeAllocation{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Range:` + fmt.Sprintf("%v", this.Range) + `,`, `Data:` + valueToStringGenerated(this.Data) + `,`, `}`, @@ -12600,7 +13723,7 @@ func (this *ReplicationController) String() string { return "nil" } s := strings.Join([]string{`&ReplicationController{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicationControllerSpec", "ReplicationControllerSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicationControllerStatus", "ReplicationControllerStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -12614,7 +13737,7 @@ func (this *ReplicationControllerCondition) String() string { s := strings.Join([]string{`&ReplicationControllerCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -12626,7 +13749,7 @@ func (this *ReplicationControllerList) String() string { return "nil" } s := strings.Join([]string{`&ReplicationControllerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicationController", "ReplicationController", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -12677,7 +13800,7 @@ func (this *ResourceFieldSelector) String() string { s := strings.Join([]string{`&ResourceFieldSelector{`, `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, - `Divisor:` + strings.Replace(strings.Replace(this.Divisor.String(), "Quantity", "k8s_io_kubernetes_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `Divisor:` + strings.Replace(strings.Replace(this.Divisor.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -12687,7 +13810,7 @@ func (this *ResourceQuota) String() string { return "nil" } s := strings.Join([]string{`&ResourceQuota{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceQuotaSpec", "ResourceQuotaSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceQuotaStatus", "ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -12699,7 +13822,7 @@ func (this *ResourceQuotaList) String() string { return "nil" } s := strings.Join([]string{`&ResourceQuotaList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ResourceQuota", "ResourceQuota", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -12801,6 +13924,25 @@ func (this *SELinuxOptions) String() string { }, "") return s } +func (this *ScaleIOVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleIOVolumeSource{`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `System:` + fmt.Sprintf("%v", this.System) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, + `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, + `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, + `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} func (this *Secret) String() string { if this == nil { return "nil" @@ -12826,7 +13968,7 @@ func (this *Secret) String() string { } mapStringForStringData += "}" s := strings.Join([]string{`&Secret{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Data:` + mapStringForData + `,`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `StringData:` + mapStringForStringData + `,`, @@ -12834,6 +13976,17 @@ func (this *Secret) String() string { }, "") return s } +func (this *SecretEnvSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} func (this *SecretKeySelector) String() string { if this == nil { return "nil" @@ -12841,6 +13994,7 @@ func (this *SecretKeySelector) String() string { s := strings.Join([]string{`&SecretKeySelector{`, `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, `}`, }, "") return s @@ -12850,12 +14004,24 @@ func (this *SecretList) String() string { return "nil" } s := strings.Join([]string{`&SecretList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Secret", "Secret", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } +func (this *SecretProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} func (this *SecretVolumeSource) String() string { if this == nil { return "nil" @@ -12864,6 +14030,7 @@ func (this *SecretVolumeSource) String() string { `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, `}`, }, "") return s @@ -12898,7 +14065,7 @@ func (this *Service) String() string { return "nil" } s := strings.Join([]string{`&Service{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceStatus", "ServiceStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -12910,9 +14077,10 @@ func (this *ServiceAccount) String() string { return "nil" } s := strings.Join([]string{`&ServiceAccount{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Secrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secrets), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, `}`, }, "") return s @@ -12922,7 +14090,7 @@ func (this *ServiceAccountList) String() string { return "nil" } s := strings.Join([]string{`&ServiceAccountList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ServiceAccount", "ServiceAccount", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -12933,7 +14101,7 @@ func (this *ServiceList) String() string { return "nil" } s := strings.Join([]string{`&ServiceList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Service", "Service", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -12947,7 +14115,7 @@ func (this *ServicePort) String() string { `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `TargetPort:` + strings.Replace(strings.Replace(this.TargetPort.String(), "IntOrString", "k8s_io_kubernetes_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `TargetPort:` + strings.Replace(strings.Replace(this.TargetPort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, `}`, }, "") @@ -13002,12 +14170,23 @@ func (this *ServiceStatus) String() string { }, "") return s } +func (this *Sysctl) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sysctl{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} func (this *TCPSocketAction) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&TCPSocketAction{`, - `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_kubernetes_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -13020,6 +14199,7 @@ func (this *Taint) String() string { `Key:` + fmt.Sprintf("%v", this.Key) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TimeAdded:` + strings.Replace(strings.Replace(this.TimeAdded.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -13033,6 +14213,7 @@ func (this *Toleration) String() string { `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`, `}`, }, "") return s @@ -13061,6 +14242,18 @@ func (this *VolumeMount) String() string { }, "") return s } +func (this *VolumeProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeProjection{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretProjection", "SecretProjection", 1) + `,`, + `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, + `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, + `}`, + }, "") + return s +} func (this *VolumeSource) String() string { if this == nil { return "nil" @@ -13089,6 +14282,9 @@ func (this *VolumeSource) String() string { `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, + `Projected:` + strings.Replace(fmt.Sprintf("%v", this.Projected), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`, `}`, }, "") return s @@ -15069,7 +16265,7 @@ func (m *ConfigMap) Unmarshal(data []byte) error { } return nil } -func (m *ConfigMapKeySelector) Unmarshal(data []byte) error { +func (m *ConfigMapEnvSource) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -15092,10 +16288,10 @@ func (m *ConfigMapKeySelector) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapEnvSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -15129,10 +16325,10 @@ func (m *ConfigMapKeySelector) Unmarshal(data []byte) error { } iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15142,21 +16338,143 @@ func (m *ConfigMapKeySelector) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(data[iNdEx:postIndex]) - iNdEx = postIndex + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapKeySelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -15289,6 +16607,138 @@ func (m *ConfigMapList) Unmarshal(data []byte) error { } return nil } +func (m *ConfigMapProjection) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ConfigMapVolumeSource) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 @@ -15399,6 +16849,27 @@ func (m *ConfigMapVolumeSource) Unmarshal(data []byte) error { } } m.DefaultMode = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -15967,6 +17438,66 @@ func (m *Container) Unmarshal(data []byte) error { } } m.TTY = bool(v != 0) + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminationMessagePolicy = TerminationMessagePolicy(data[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -17270,6 +18801,36 @@ func (m *DeleteOptions) Unmarshal(data []byte) error { } b := bool(v != 0) m.OrphanDependents = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PropagationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := DeletionPropagation(data[iNdEx:postIndex]) + m.PropagationPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -17291,7 +18852,7 @@ func (m *DeleteOptions) Unmarshal(data []byte) error { } return nil } -func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { +func (m *DownwardAPIProjection) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -17314,77 +18875,15 @@ func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") + return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FieldRef == nil { - m.FieldRef = &ObjectFieldSelector{} - } - if err := m.FieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17408,33 +18907,11 @@ func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResourceFieldRef == nil { - m.ResourceFieldRef = &ResourceFieldSelector{} - } - if err := m.ResourceFieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Mode = &v default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -17456,7 +18933,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { } return nil } -func (m *DownwardAPIVolumeSource) Unmarshal(data []byte) error { +func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -17479,15 +18956,44 @@ func (m *DownwardAPIVolumeSource) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17511,14 +19017,49 @@ func (m *DownwardAPIVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, DownwardAPIVolumeFile{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} + } + if err := m.FieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} + } + if err := m.ResourceFieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) } var v int32 for shift := uint(0); ; shift += 7 { @@ -17535,7 +19076,108 @@ func (m *DownwardAPIVolumeSource) Unmarshal(data []byte) error { break } } - m.DefaultMode = &v + m.Mode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DownwardAPIVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -18299,7 +19941,7 @@ func (m *EndpointsList) Unmarshal(data []byte) error { } return nil } -func (m *EnvVar) Unmarshal(data []byte) error { +func (m *EnvFromSource) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -18322,15 +19964,15 @@ func (m *EnvVar) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") + return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18355,156 +19997,11 @@ func (m *EnvVar) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Prefix = string(data[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ValueFrom == nil { - m.ValueFrom = &EnvVarSource{} - } - if err := m.ValueFrom.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EnvVarSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FieldRef == nil { - m.FieldRef = &ObjectFieldSelector{} - } - if err := m.FieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18528,16 +20025,306 @@ func (m *EnvVarSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResourceFieldRef == nil { - m.ResourceFieldRef = &ResourceFieldSelector{} + if m.ConfigMapRef == nil { + m.ConfigMapRef = &ConfigMapEnvSource{} } - if err := m.ResourceFieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ConfigMapRef.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretEnvSource{} + } + if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvVar) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValueFrom == nil { + m.ValueFrom = &EnvVarSource{} + } + if err := m.ValueFrom.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvVarSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} + } + if err := m.FieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} + } + if err := m.ResourceFieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19226,96 +21013,6 @@ func (m *ExecAction) Unmarshal(data []byte) error { } return nil } -func (m *ExportOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Export = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Exact = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *FCVolumeSource) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 @@ -20974,6 +22671,35 @@ func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { } } m.ReadOnly = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Portals = append(m.Portals, string(data[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -21513,7 +23239,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { return err } @@ -21629,7 +23355,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { return err } @@ -21745,7 +23471,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { return err } @@ -21861,7 +23587,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { return err } @@ -21977,7 +23703,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { return err } @@ -22285,7 +24011,7 @@ func (m *List) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, k8s_io_kubernetes_pkg_runtime.RawExtension{}) + m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } @@ -24151,7 +25877,7 @@ func (m *NodeProxyOptions) Unmarshal(data []byte) error { } return nil } -func (m *NodeSelector) Unmarshal(data []byte) error { +func (m *NodeResources) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -24174,15 +25900,15 @@ func (m *NodeSelector) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeSelector: wiretype end group for non-group") + return fmt.Errorf("proto: NodeResources: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeSelector: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NodeResources: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelectorTerms", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24206,66 +25932,7 @@ func (m *NodeSelector) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.NodeSelectorTerms = append(m.NodeSelectorTerms, NodeSelectorTerm{}) - if err := m.NodeSelectorTerms[len(m.NodeSelectorTerms)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeSelectorRequirement) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeSelectorRequirement: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 + var keykey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24275,26 +25942,12 @@ func (m *NodeSelectorRequirement) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) - } - var stringLen uint64 + var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24304,26 +25957,37 @@ func (m *NodeSelectorRequirement) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - m.Operator = NodeSelectorOperator(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - var stringLen uint64 + var mapmsglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24333,20 +25997,30 @@ func (m *NodeSelectorRequirement) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + mapmsglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if mapmsglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { return io.ErrUnexpectedEOF } - m.Values = append(m.Values, string(data[iNdEx:postIndex])) + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -24369,7 +26043,7 @@ func (m *NodeSelectorRequirement) Unmarshal(data []byte) error { } return nil } -func (m *NodeSelectorTerm) Unmarshal(data []byte) error { +func (m *NodeSelector) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -24392,15 +26066,15 @@ func (m *NodeSelectorTerm) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeSelectorTerm: wiretype end group for non-group") + return fmt.Errorf("proto: NodeSelector: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeSelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NodeSelector: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelectorTerms", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24424,8 +26098,226 @@ func (m *NodeSelectorTerm) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.MatchExpressions = append(m.MatchExpressions, NodeSelectorRequirement{}) - if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + m.NodeSelectorTerms = append(m.NodeSelectorTerms, NodeSelectorTerm{}) + if err := m.NodeSelectorTerms[len(m.NodeSelectorTerms)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelectorRequirement) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = NodeSelectorOperator(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelectorTerm) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelectorTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, NodeSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -24586,6 +26478,37 @@ func (m *NodeSpec) Unmarshal(data []byte) error { } } m.Unschedulable = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Taints = append(m.Taints, Taint{}) + if err := m.Taints[len(m.Taints)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -24742,7 +26665,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { return err } @@ -24858,7 +26781,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { return err } @@ -25751,7 +27674,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.UID = k8s_io_kubernetes_pkg_types.UID(data[iNdEx:postIndex]) + m.UID = k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { @@ -25858,7 +27781,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.DeletionTimestamp == nil { - m.DeletionTimestamp = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + m.DeletionTimestamp = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.DeletionTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { return err @@ -26132,7 +28055,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.OwnerReferences = append(m.OwnerReferences, OwnerReference{}) + m.OwnerReferences = append(m.OwnerReferences, k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference{}) if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } @@ -26359,7 +28282,7 @@ func (m *ObjectReference) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.UID = k8s_io_kubernetes_pkg_types.UID(data[iNdEx:postIndex]) + m.UID = k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -26469,7 +28392,7 @@ func (m *ObjectReference) Unmarshal(data []byte) error { } return nil } -func (m *OwnerReference) Unmarshal(data []byte) error { +func (m *PersistentVolume) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -26492,17 +28415,17 @@ func (m *OwnerReference) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group") + return fmt.Errorf("proto: PersistentVolume: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PersistentVolume: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -26512,55 +28435,27 @@ func (m *OwnerReference) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err } - m.Name = string(data[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -26570,26 +28465,27 @@ func (m *OwnerReference) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.UID = k8s_io_kubernetes_pkg_types.UID(data[iNdEx:postIndex]) + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -26599,42 +28495,22 @@ func (m *OwnerReference) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err } - b := bool(v != 0) - m.Controller = &b + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -26656,7 +28532,7 @@ func (m *OwnerReference) Unmarshal(data []byte) error { } return nil } -func (m *PersistentVolume) Unmarshal(data []byte) error { +func (m *PersistentVolumeClaim) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -26679,10 +28555,10 @@ func (m *PersistentVolume) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PersistentVolume: wiretype end group for non-group") + return fmt.Errorf("proto: PersistentVolumeClaim: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolume: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PersistentVolumeClaim: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -26796,7 +28672,7 @@ func (m *PersistentVolume) Unmarshal(data []byte) error { } return nil } -func (m *PersistentVolumeClaim) Unmarshal(data []byte) error { +func (m *PersistentVolumeClaimList) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -26819,15 +28695,15 @@ func (m *PersistentVolumeClaim) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeClaim: wiretype end group for non-group") + return fmt.Errorf("proto: PersistentVolumeClaimList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeClaim: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PersistentVolumeClaimList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26851,43 +28727,13 @@ func (m *PersistentVolumeClaim) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26911,7 +28757,8 @@ func (m *PersistentVolumeClaim) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, PersistentVolumeClaim{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -26936,7 +28783,7 @@ func (m *PersistentVolumeClaim) Unmarshal(data []byte) error { } return nil } -func (m *PersistentVolumeClaimList) Unmarshal(data []byte) error { +func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -26959,15 +28806,44 @@ func (m *PersistentVolumeClaimList) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeClaimList: wiretype end group for non-group") + return fmt.Errorf("proto: PersistentVolumeClaimSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeClaimList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PersistentVolumeClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26991,13 +28867,42 @@ func (m *PersistentVolumeClaimList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Resources.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -27021,11 +28926,43 @@ func (m *PersistentVolumeClaimList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, PersistentVolumeClaim{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.StorageClassName = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -27047,7 +28984,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(data []byte) error { } return nil } -func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { +func (m *PersistentVolumeClaimStatus) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -27070,15 +29007,15 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeClaimSpec: wiretype end group for non-group") + return fmt.Errorf("proto: PersistentVolumeClaimStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PersistentVolumeClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -27103,13 +29040,13 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) + m.Phase = PersistentVolumeClaimPhase(data[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -27119,27 +29056,26 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Resources.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -27149,26 +29085,34 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - var msglen int + var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -27178,248 +29122,70 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { return err } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeClaimStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeClaimStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Phase = PersistentVolumeClaimPhase(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - if m.Capacity == nil { - m.Capacity = make(ResourceList) - } - m.Capacity[ResourceName(mapkey)] = *mapvalue + iNdEx = postmsgIndex + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -28242,59 +30008,125 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 18: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PortworxVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PortworxVolume == nil { + m.PortworxVolume = &PortworxVolumeSource{} + } + if err := m.PortworxVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleIO", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleIO == nil { + m.ScaleIO = &ScaleIOVolumeSource{} + } + if err := m.ScaleIO.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -28398,7 +30230,7 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { return err } @@ -28529,6 +30361,35 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { } m.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(data[iNdEx:postIndex]) iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageClassName = string(data[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -29103,7 +30964,7 @@ func (m *PodAffinityTerm) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.LabelSelector == nil { - m.LabelSelector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} + m.LabelSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.LabelSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { return err @@ -30129,7 +31990,7 @@ func (m *PodLogOptions) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.SinceTime == nil { - m.SinceTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + m.SinceTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.SinceTime.Unmarshal(data[iNdEx:postIndex]); err != nil { return err @@ -30216,7 +32077,7 @@ func (m *PodLogOptions) Unmarshal(data []byte) error { } return nil } -func (m *PodProxyOptions) Unmarshal(data []byte) error { +func (m *PodPortForwardOptions) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -30239,17 +32100,17 @@ func (m *PodProxyOptions) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodProxyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: PodPortForwardOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodPortForwardOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30259,21 +32120,12 @@ func (m *PodProxyOptions) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(data[iNdEx:postIndex]) - iNdEx = postIndex + m.Ports = append(m.Ports, v) default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -30295,7 +32147,7 @@ func (m *PodProxyOptions) Unmarshal(data []byte) error { } return nil } -func (m *PodSecurityContext) Unmarshal(data []byte) error { +func (m *PodProxyOptions) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -30318,17 +32170,17 @@ func (m *PodSecurityContext) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityContext: wiretype end group for non-group") + return fmt.Errorf("proto: PodProxyOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30338,106 +32190,185 @@ func (m *PodSecurityContext) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.SELinuxOptions == nil { - m.SELinuxOptions = &SELinuxOptions{} - } - if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } + m.Path = string(data[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RunAsUser = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.RunAsNonRoot = &b - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.SupplementalGroups = append(m.SupplementalGroups, v) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.FSGroup = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityContext) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RunAsUser = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.RunAsNonRoot = &b + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FSGroup = &v default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -30515,7 +32446,7 @@ func (m *PodSignature) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.PodController == nil { - m.PodController = &OwnerReference{} + m.PodController = &k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference{} } if err := m.PodController.Unmarshal(data[iNdEx:postIndex]); err != nil { return err @@ -31111,6 +33042,151 @@ func (m *PodSpec) Unmarshal(data []byte) error { } m.Subdomain = string(data[iNdEx:postIndex]) iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Affinity == nil { + m.Affinity = &Affinity{} + } + if err := m.Affinity.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchedulerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchedulerName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitContainers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InitContainers = append(m.InitContainers, Container{}) + if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AutomountServiceAccountToken = &b + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -31364,7 +33440,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.StartTime == nil { - m.StartTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { return err @@ -31401,61 +33477,11 @@ func (m *PodStatus) Unmarshal(data []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodStatusResult) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodStatusResult: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodStatusResult: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field QOSClass", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31465,25 +33491,24 @@ func (m *PodStatusResult) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } + m.QOSClass = PodQOSClass(data[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InitContainerStatuses", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31507,7 +33532,8 @@ func (m *PodStatusResult) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + m.InitContainerStatuses = append(m.InitContainerStatuses, ContainerStatus{}) + if err := m.InitContainerStatuses[len(m.InitContainerStatuses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -31532,7 +33558,7 @@ func (m *PodStatusResult) Unmarshal(data []byte) error { } return nil } -func (m *PodTemplate) Unmarshal(data []byte) error { +func (m *PodStatusResult) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -31555,10 +33581,10 @@ func (m *PodTemplate) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodTemplate: wiretype end group for non-group") + return fmt.Errorf("proto: PodStatusResult: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodStatusResult: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -31593,7 +33619,7 @@ func (m *PodTemplate) Unmarshal(data []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31617,7 +33643,7 @@ func (m *PodTemplate) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -31642,7 +33668,7 @@ func (m *PodTemplate) Unmarshal(data []byte) error { } return nil } -func (m *PodTemplateList) Unmarshal(data []byte) error { +func (m *PodTemplate) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -31665,15 +33691,15 @@ func (m *PodTemplateList) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodTemplateList: wiretype end group for non-group") + return fmt.Errorf("proto: PodTemplate: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodTemplate: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31697,13 +33723,13 @@ func (m *PodTemplateList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31727,8 +33753,7 @@ func (m *PodTemplateList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, PodTemplate{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -31753,7 +33778,7 @@ func (m *PodTemplateList) Unmarshal(data []byte) error { } return nil } -func (m *PodTemplateSpec) Unmarshal(data []byte) error { +func (m *PodTemplateList) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -31776,15 +33801,15 @@ func (m *PodTemplateSpec) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodTemplateSpec: wiretype end group for non-group") + return fmt.Errorf("proto: PodTemplateList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31808,13 +33833,13 @@ func (m *PodTemplateSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31838,7 +33863,8 @@ func (m *PodTemplateSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, PodTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -31863,87 +33889,7 @@ func (m *PodTemplateSpec) Unmarshal(data []byte) error { } return nil } -func (m *Preconditions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Preconditions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Preconditions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := k8s_io_kubernetes_pkg_types.UID(data[iNdEx:postIndex]) - m.UID = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PreferAvoidPodsEntry) Unmarshal(data []byte) error { +func (m *PodTemplateSpec) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -31966,15 +33912,15 @@ func (m *PreferAvoidPodsEntry) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PreferAvoidPodsEntry: wiretype end group for non-group") + return fmt.Errorf("proto: PodTemplateSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PreferAvoidPodsEntry: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSignature", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31998,13 +33944,13 @@ func (m *PreferAvoidPodsEntry) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.PodSignature.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvictionTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -32028,68 +33974,10 @@ func (m *PreferAvoidPodsEntry) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.EvictionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -32111,7 +33999,383 @@ func (m *PreferAvoidPodsEntry) Unmarshal(data []byte) error { } return nil } -func (m *PreferredSchedulingTerm) Unmarshal(data []byte) error { +func (m *PortworxVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortworxVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortworxVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Preconditions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Preconditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Preconditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) + m.UID = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreferAvoidPodsEntry) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PreferAvoidPodsEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PreferAvoidPodsEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSignature", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodSignature.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvictionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EvictionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreferredSchedulingTerm) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -32385,6 +34649,107 @@ func (m *Probe) Unmarshal(data []byte) error { } return nil } +func (m *ProjectedVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectedVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectedVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sources = append(m.Sources, VolumeProjection{}) + if err := m.Sources[len(m.Sources)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *QuobyteVolumeSource) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 @@ -34368,7 +36733,7 @@ func (m *ResourceQuotaSpec) Unmarshal(data []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { return err } @@ -34563,7 +36928,7 @@ func (m *ResourceQuotaStatus) Unmarshal(data []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { return err } @@ -34679,7 +37044,7 @@ func (m *ResourceQuotaStatus) Unmarshal(data []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { return err } @@ -34845,7 +37210,7 @@ func (m *ResourceRequirements) Unmarshal(data []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { return err } @@ -34961,7 +37326,7 @@ func (m *ResourceRequirements) Unmarshal(data []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { return err } @@ -35158,6 +37523,332 @@ func (m *SELinuxOptions) Unmarshal(data []byte) error { } return nil } +func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleIOVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleIOVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gateway = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field System", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.System = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SSLEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SSLEnabled = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProtectionDomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProtectionDomain = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoragePool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoragePool = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageMode = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Secret) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 @@ -35490,6 +38181,107 @@ func (m *Secret) Unmarshal(data []byte) error { } return nil } +func (m *SecretEnvSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretEnvSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *SecretKeySelector) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 @@ -35551,86 +38343,248 @@ func (m *SecretKeySelector) Unmarshal(data []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SecretList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SecretList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SecretList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Secret{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretProjection) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35654,15 +38608,16 @@ func (m *SecretList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35672,23 +38627,13 @@ func (m *SecretList) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Secret{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -35819,6 +38764,27 @@ func (m *SecretVolumeSource) Unmarshal(data []byte) error { } } m.DefaultMode = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -36204,123 +39170,154 @@ func (m *Service) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccount) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, ObjectReference{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceAccount) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceAccount: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceAccount: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -36344,16 +39341,16 @@ func (m *ServiceAccount) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Secrets = append(m.Secrets, ObjectReference{}) - if err := m.Secrets[len(m.Secrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -36363,23 +39360,13 @@ func (m *ServiceAccount) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) - if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + b := bool(v != 0) + m.AutomountServiceAccountToken = &b default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -37076,11 +40063,127 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterIP = string(data[iNdEx:postIndex]) + m.ClusterIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ServiceType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalIPs = append(m.ExternalIPs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedPublicIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeprecatedPublicIPs = append(m.DeprecatedPublicIPs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinity", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionAffinity = ServiceAffinity(data[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -37105,11 +40208,11 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ServiceType(data[iNdEx:postIndex]) + m.LoadBalancerIP = string(data[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -37134,11 +40237,11 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalIPs = append(m.ExternalIPs, string(data[iNdEx:postIndex])) + m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(data[iNdEx:postIndex])) iNdEx = postIndex - case 6: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedPublicIPs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExternalName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -37163,100 +40266,63 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DeprecatedPublicIPs = append(m.DeprecatedPublicIPs, string(data[iNdEx:postIndex])) + m.ExternalName = string(data[iNdEx:postIndex]) iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinity", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.SessionAffinity = ServiceAffinity(data[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.LoadBalancerIP = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 10: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -37266,20 +40332,21 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalName = string(data[iNdEx:postIndex]) + if err := m.LoadBalancer.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -37302,7 +40369,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { } return nil } -func (m *ServiceStatus) Unmarshal(data []byte) error { +func (m *Sysctl) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -37325,17 +40392,17 @@ func (m *ServiceStatus) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceStatus: wiretype end group for non-group") + return fmt.Errorf("proto: Sysctl: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Sysctl: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -37345,21 +40412,49 @@ func (m *ServiceStatus) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LoadBalancer.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(data[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -37578,6 +40673,36 @@ func (m *Taint) Unmarshal(data []byte) error { } m.Effect = TaintEffect(data[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeAdded", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TimeAdded.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -37655,11 +40780,40 @@ func (m *Toleration) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(data[iNdEx:postIndex]) + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = TolerationOperator(data[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -37684,11 +40838,11 @@ func (m *Toleration) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Operator = TolerationOperator(data[iNdEx:postIndex]) + m.Value = string(data[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -37713,13 +40867,13 @@ func (m *Toleration) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(data[iNdEx:postIndex]) + m.Effect = TaintEffect(data[iNdEx:postIndex]) iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TolerationSeconds", wireType) } - var stringLen uint64 + var v int64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -37729,21 +40883,12 @@ func (m *Toleration) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Effect = TaintEffect(data[iNdEx:postIndex]) - iNdEx = postIndex + m.TolerationSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -38031,6 +41176,155 @@ func (m *VolumeMount) Unmarshal(data []byte) error { } return nil } +func (m *VolumeProjection) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &SecretProjection{} + } + if err := m.Secret.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DownwardAPI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DownwardAPI == nil { + m.DownwardAPI = &DownwardAPIProjection{} + } + if err := m.DownwardAPI.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMap == nil { + m.ConfigMap = &ConfigMapProjection{} + } + if err := m.ConfigMap.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *VolumeSource) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 @@ -38819,6 +42113,105 @@ func (m *VolumeSource) Unmarshal(data []byte) error { return err } iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortworxVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PortworxVolume == nil { + m.PortworxVolume = &PortworxVolumeSource{} + } + if err := m.PortworxVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleIO", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleIO == nil { + m.ScaleIO = &ScaleIOVolumeSource{} + } + if err := m.ScaleIO.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Projected", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Projected == nil { + m.Projected = &ProjectedVolumeSource{} + } + if err := m.Projected.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -39153,631 +42546,693 @@ var ( ) var fileDescriptorGenerated = []byte{ - // 10015 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x6c, 0x25, 0xd7, - 0x75, 0x98, 0xe7, 0x3d, 0x7e, 0xbd, 0xc3, 0xcf, 0xbd, 0xfb, 0x21, 0x8a, 0x91, 0x96, 0xeb, 0x91, - 0xb5, 0x5e, 0x49, 0x2b, 0xae, 0x77, 0x25, 0x45, 0xb2, 0xa5, 0xca, 0x26, 0xf9, 0xc8, 0x5d, 0x7a, - 0x97, 0xbb, 0x4f, 0xf7, 0x71, 0xb5, 0xb2, 0xad, 0x4a, 0x1e, 0xbe, 0xb9, 0x24, 0xc7, 0x3b, 0x9c, - 0x79, 0x9a, 0x99, 0xc7, 0x5d, 0xda, 0x0d, 0x90, 0x3a, 0x6a, 0x8a, 0x22, 0x46, 0xea, 0xa2, 0x35, - 0x5a, 0xa0, 0x2d, 0xea, 0x16, 0x68, 0x91, 0x36, 0x68, 0x1a, 0xa7, 0x6e, 0x62, 0xb7, 0x86, 0x51, - 0xa0, 0xa9, 0xe1, 0x7e, 0xa4, 0x70, 0x80, 0xa0, 0x09, 0x12, 0x80, 0x8d, 0x18, 0x14, 0xfd, 0xd1, - 0x1f, 0x2d, 0xd0, 0x5f, 0x25, 0x82, 0xb6, 0xb8, 0x9f, 0x73, 0xef, 0xbc, 0x79, 0x9c, 0x79, 0xd4, - 0x92, 0x51, 0x82, 0xfe, 0x7b, 0xef, 0x9c, 0x73, 0xcf, 0xfd, 0x98, 0x73, 0xcf, 0x3d, 0xf7, 0xdc, - 0x73, 0xcf, 0x85, 0xcb, 0xf7, 0x5f, 0x89, 0xe7, 0xbc, 0xf0, 0xca, 0xfd, 0xce, 0x3a, 0x89, 0x02, - 0x92, 0x90, 0xf8, 0x4a, 0xfb, 0xfe, 0xe6, 0x15, 0xa7, 0xed, 0x5d, 0xd9, 0xb9, 0x7a, 0x65, 0x93, - 0x04, 0x24, 0x72, 0x12, 0xe2, 0xce, 0xb5, 0xa3, 0x30, 0x09, 0xd1, 0x13, 0x9c, 0x7a, 0x2e, 0xa5, - 0x9e, 0x6b, 0xdf, 0xdf, 0x9c, 0x73, 0xda, 0xde, 0xdc, 0xce, 0xd5, 0x99, 0xe7, 0x37, 0xbd, 0x64, - 0xab, 0xb3, 0x3e, 0xd7, 0x0a, 0xb7, 0xaf, 0x6c, 0x86, 0x9b, 0xe1, 0x15, 0x56, 0x68, 0xbd, 0xb3, - 0xc1, 0xfe, 0xb1, 0x3f, 0xec, 0x17, 0x67, 0x36, 0x73, 0xad, 0x77, 0xd5, 0x11, 0x89, 0xc3, 0x4e, - 0xd4, 0x22, 0xd9, 0x06, 0xcc, 0xbc, 0xd4, 0xbb, 0x4c, 0x27, 0xd8, 0x21, 0x51, 0xec, 0x85, 0x01, - 0x71, 0xbb, 0x8a, 0x3d, 0x9f, 0x5f, 0x2c, 0xea, 0x04, 0x89, 0xb7, 0xdd, 0x5d, 0xcb, 0xd5, 0x7c, - 0xf2, 0x4e, 0xe2, 0xf9, 0x57, 0xbc, 0x20, 0x89, 0x93, 0x28, 0x5b, 0xc4, 0xfe, 0x5d, 0x0b, 0x2e, - 0xcc, 0xdf, 0x6b, 0x2e, 0xf9, 0x4e, 0x9c, 0x78, 0xad, 0x05, 0x3f, 0x6c, 0xdd, 0x6f, 0x26, 0x61, - 0x44, 0xde, 0x0c, 0xfd, 0xce, 0x36, 0x69, 0xb2, 0xde, 0xa0, 0xcb, 0x30, 0xb2, 0xc3, 0xfe, 0xaf, - 0xd4, 0xa7, 0xad, 0x0b, 0xd6, 0xa5, 0xda, 0xc2, 0xd4, 0x8f, 0xf7, 0x66, 0x3f, 0xb6, 0xbf, 0x37, - 0x3b, 0xf2, 0xa6, 0x80, 0x63, 0x45, 0x81, 0x2e, 0xc2, 0xd0, 0x46, 0xbc, 0xb6, 0xdb, 0x26, 0xd3, - 0x15, 0x46, 0x3b, 0x21, 0x68, 0x87, 0x96, 0x9b, 0x14, 0x8a, 0x05, 0x16, 0x5d, 0x81, 0x5a, 0xdb, - 0x89, 0x12, 0x2f, 0xf1, 0xc2, 0x60, 0xba, 0x7a, 0xc1, 0xba, 0x34, 0xb8, 0x70, 0x4a, 0x90, 0xd6, - 0x1a, 0x12, 0x81, 0x53, 0x1a, 0xda, 0x8c, 0x88, 0x38, 0xee, 0x9d, 0xc0, 0xdf, 0x9d, 0x1e, 0xb8, - 0x60, 0x5d, 0x1a, 0x49, 0x9b, 0x81, 0x05, 0x1c, 0x2b, 0x0a, 0xfb, 0x7b, 0x15, 0x18, 0x99, 0xdf, - 0xd8, 0xf0, 0x02, 0x2f, 0xd9, 0x45, 0x5f, 0x86, 0xb1, 0x20, 0x74, 0x89, 0xfc, 0xcf, 0x7a, 0x31, - 0x7a, 0xed, 0xd9, 0xb9, 0xc3, 0xe4, 0x62, 0xee, 0xb6, 0x56, 0x62, 0x61, 0x6a, 0x7f, 0x6f, 0x76, - 0x4c, 0x87, 0x60, 0x83, 0x23, 0x7a, 0x1b, 0x46, 0xdb, 0xa1, 0xab, 0x2a, 0xa8, 0xb0, 0x0a, 0x9e, - 0x39, 0xbc, 0x82, 0x46, 0x5a, 0x60, 0x61, 0x72, 0x7f, 0x6f, 0x76, 0x54, 0x03, 0x60, 0x9d, 0x1d, - 0xf2, 0x61, 0x92, 0xfe, 0x0d, 0x12, 0x4f, 0xd5, 0x50, 0x65, 0x35, 0x3c, 0x5f, 0x5c, 0x83, 0x56, - 0x68, 0xe1, 0xf4, 0xfe, 0xde, 0xec, 0x64, 0x06, 0x88, 0xb3, 0xac, 0xed, 0xaf, 0xc2, 0xc4, 0x7c, - 0x92, 0x38, 0xad, 0x2d, 0xe2, 0xf2, 0xef, 0x8b, 0x5e, 0x84, 0x81, 0xc0, 0xd9, 0x26, 0xe2, 0xeb, - 0x5f, 0x10, 0xc3, 0x3e, 0x70, 0xdb, 0xd9, 0x26, 0x07, 0x7b, 0xb3, 0x53, 0x77, 0x03, 0xef, 0xbd, - 0x8e, 0x90, 0x19, 0x0a, 0xc3, 0x8c, 0x1a, 0x5d, 0x03, 0x70, 0xc9, 0x8e, 0xd7, 0x22, 0x0d, 0x27, - 0xd9, 0x12, 0xd2, 0x80, 0x44, 0x59, 0xa8, 0x2b, 0x0c, 0xd6, 0xa8, 0xec, 0xaf, 0x5b, 0x50, 0x9b, - 0xdf, 0x09, 0x3d, 0xb7, 0x11, 0xba, 0x31, 0xea, 0xc0, 0x64, 0x3b, 0x22, 0x1b, 0x24, 0x52, 0xa0, - 0x69, 0xeb, 0x42, 0xf5, 0xd2, 0xe8, 0xb5, 0x6b, 0x05, 0xfd, 0x36, 0x0b, 0x2d, 0x05, 0x49, 0xb4, - 0xbb, 0xf0, 0x98, 0xa8, 0x7a, 0x32, 0x83, 0xc5, 0xd9, 0x3a, 0xec, 0xbf, 0x56, 0x81, 0xb3, 0xf3, - 0x5f, 0xed, 0x44, 0xa4, 0xee, 0xc5, 0xf7, 0xb3, 0x53, 0xc1, 0xf5, 0xe2, 0xfb, 0xb7, 0xd3, 0xc1, - 0x50, 0x32, 0x58, 0x17, 0x70, 0xac, 0x28, 0xd0, 0xf3, 0x30, 0x4c, 0x7f, 0xdf, 0xc5, 0x2b, 0xa2, - 0xf7, 0xa7, 0x05, 0xf1, 0x68, 0xdd, 0x49, 0x9c, 0x3a, 0x47, 0x61, 0x49, 0x83, 0x56, 0x61, 0xb4, - 0xe5, 0xb4, 0xb6, 0xbc, 0x60, 0x73, 0x35, 0x74, 0x09, 0xfb, 0xc2, 0xb5, 0x85, 0xe7, 0x28, 0xf9, - 0x62, 0x0a, 0x3e, 0xd8, 0x9b, 0x9d, 0xe6, 0x6d, 0x13, 0x2c, 0x34, 0x1c, 0xd6, 0xcb, 0x23, 0x5b, - 0x4d, 0xc4, 0x01, 0xc6, 0x09, 0x72, 0x26, 0xe1, 0x25, 0x6d, 0x4e, 0x0d, 0xb2, 0x39, 0x35, 0xd6, - 0x63, 0x3e, 0xfd, 0x13, 0x4b, 0x8c, 0xc9, 0xb2, 0xe7, 0x9b, 0xea, 0xe1, 0x1a, 0x40, 0x4c, 0x5a, - 0x11, 0x49, 0xb4, 0x51, 0x51, 0x9f, 0xb9, 0xa9, 0x30, 0x58, 0xa3, 0xa2, 0x93, 0x3f, 0xde, 0x72, - 0x22, 0x26, 0x2d, 0x62, 0x6c, 0xd4, 0xe4, 0x6f, 0x4a, 0x04, 0x4e, 0x69, 0x8c, 0xc9, 0x5f, 0x2d, - 0x9c, 0xfc, 0xff, 0xd2, 0x82, 0xe1, 0x05, 0x2f, 0x70, 0xbd, 0x60, 0x13, 0xbd, 0x05, 0x23, 0xdb, - 0x24, 0x71, 0x5c, 0x27, 0x71, 0xc4, 0xbc, 0xbf, 0x74, 0xb8, 0xf0, 0xdc, 0x59, 0xff, 0x0a, 0x69, - 0x25, 0xab, 0x24, 0x71, 0xd2, 0x6e, 0xa4, 0x30, 0xac, 0xb8, 0xa1, 0xbb, 0x30, 0x94, 0x38, 0xd1, - 0x26, 0x49, 0xc4, 0x74, 0x7f, 0xbe, 0x0c, 0x5f, 0x4c, 0x45, 0x8d, 0x04, 0x2d, 0x92, 0x2a, 0xc6, - 0x35, 0xc6, 0x04, 0x0b, 0x66, 0x76, 0x0b, 0xc6, 0x16, 0x9d, 0xb6, 0xb3, 0xee, 0xf9, 0x5e, 0xe2, - 0x91, 0x18, 0x7d, 0x12, 0xaa, 0x8e, 0xeb, 0x32, 0xc1, 0xaf, 0x2d, 0x9c, 0xdd, 0xdf, 0x9b, 0xad, - 0xce, 0xbb, 0xee, 0xc1, 0xde, 0x2c, 0x28, 0xaa, 0x5d, 0x4c, 0x29, 0xd0, 0xb3, 0x30, 0xe0, 0x46, - 0x61, 0x7b, 0xba, 0xc2, 0x28, 0xcf, 0xd1, 0x19, 0x5a, 0x8f, 0xc2, 0x76, 0x86, 0x94, 0xd1, 0xd8, - 0xff, 0xb6, 0x02, 0x68, 0x91, 0xb4, 0xb7, 0x96, 0x9b, 0xc6, 0xb7, 0xbc, 0x04, 0x23, 0xdb, 0x61, - 0xe0, 0x25, 0x61, 0x14, 0x8b, 0x0a, 0x99, 0x3c, 0xac, 0x0a, 0x18, 0x56, 0x58, 0x74, 0x01, 0x06, - 0xda, 0xe9, 0xb4, 0x1e, 0x93, 0x2a, 0x81, 0x4d, 0x68, 0x86, 0xa1, 0x14, 0x9d, 0x98, 0x44, 0x42, - 0x8e, 0x15, 0xc5, 0xdd, 0x98, 0x44, 0x98, 0x61, 0x52, 0xc9, 0xa1, 0x32, 0x25, 0xa4, 0x34, 0x23, - 0x39, 0x14, 0x83, 0x35, 0x2a, 0xf4, 0x2e, 0xd4, 0xf8, 0x3f, 0x4c, 0x36, 0x98, 0xc8, 0x16, 0x2a, - 0x83, 0x5b, 0x61, 0xcb, 0xf1, 0xb3, 0x83, 0x3f, 0xce, 0x24, 0x4d, 0x32, 0xc2, 0x29, 0x4f, 0x43, - 0xd2, 0x86, 0x0a, 0x25, 0xed, 0x6f, 0x59, 0x80, 0x16, 0xbd, 0xc0, 0x25, 0xd1, 0x09, 0x2c, 0x99, - 0xfd, 0x4d, 0x82, 0x3f, 0xa0, 0x4d, 0x0b, 0xb7, 0xdb, 0x61, 0x40, 0x82, 0x64, 0x31, 0x0c, 0x5c, - 0xbe, 0x8c, 0x7e, 0x06, 0x06, 0x12, 0x5a, 0x15, 0x6f, 0xd6, 0x45, 0xf9, 0x59, 0x68, 0x05, 0x07, - 0x7b, 0xb3, 0xe7, 0xba, 0x4b, 0xb0, 0x26, 0xb0, 0x32, 0xe8, 0xd3, 0x30, 0x14, 0x27, 0x4e, 0xd2, - 0x89, 0x45, 0x43, 0x3f, 0x2e, 0x1b, 0xda, 0x64, 0xd0, 0x83, 0xbd, 0xd9, 0x49, 0x55, 0x8c, 0x83, - 0xb0, 0x28, 0x80, 0x9e, 0x81, 0xe1, 0x6d, 0x12, 0xc7, 0xce, 0xa6, 0x54, 0x6c, 0x93, 0xa2, 0xec, - 0xf0, 0x2a, 0x07, 0x63, 0x89, 0x47, 0x4f, 0xc1, 0x20, 0x89, 0xa2, 0x30, 0x12, 0x12, 0x31, 0x2e, - 0x08, 0x07, 0x97, 0x28, 0x10, 0x73, 0x9c, 0xfd, 0xdb, 0x16, 0x4c, 0xaa, 0xb6, 0xf2, 0xba, 0x8e, - 0x71, 0xaa, 0xbb, 0x00, 0x2d, 0xd9, 0xb1, 0x98, 0x4d, 0xb0, 0xd1, 0x6b, 0x9f, 0x3a, 0x9c, 0x77, - 0xf7, 0x40, 0xa6, 0x75, 0x28, 0x50, 0x8c, 0x35, 0xbe, 0xf6, 0x8f, 0x2d, 0x38, 0x9d, 0xe9, 0xd3, - 0x2d, 0x2f, 0x4e, 0xd0, 0x9f, 0xef, 0xea, 0xd7, 0x95, 0x43, 0xea, 0xd6, 0x2c, 0xca, 0x39, 0x5a, - 0x9c, 0x75, 0x4f, 0x09, 0x8a, 0x84, 0x68, 0x9d, 0xc3, 0x30, 0xe8, 0x25, 0x64, 0x5b, 0xf6, 0xeb, - 0xf9, 0x92, 0xfd, 0xe2, 0x0d, 0x4c, 0x3f, 0xcf, 0x0a, 0xe5, 0x81, 0x39, 0x2b, 0xfb, 0x7f, 0x59, - 0x50, 0x5b, 0x0c, 0x83, 0x0d, 0x6f, 0x73, 0xd5, 0x69, 0x1f, 0xe3, 0x87, 0x69, 0xc2, 0x00, 0xe3, - 0xca, 0x9b, 0x7e, 0xb5, 0xa8, 0xe9, 0xa2, 0x41, 0x73, 0x74, 0xf1, 0xe4, 0x56, 0x81, 0xd2, 0x4b, - 0x14, 0x84, 0x19, 0xb3, 0x99, 0x97, 0xa1, 0xa6, 0x08, 0xd0, 0x14, 0x54, 0xef, 0x13, 0x6e, 0x32, - 0xd6, 0x30, 0xfd, 0x89, 0xce, 0xc0, 0xe0, 0x8e, 0xe3, 0x77, 0xc4, 0x6c, 0xc5, 0xfc, 0xcf, 0x67, - 0x2a, 0xaf, 0x58, 0xf6, 0x0f, 0x2c, 0x38, 0xa3, 0x2a, 0xb9, 0x49, 0x76, 0x9b, 0xc4, 0x27, 0xad, - 0x24, 0x8c, 0xd0, 0xfb, 0x16, 0x9c, 0xf1, 0x73, 0xf4, 0x90, 0x18, 0x8d, 0xa3, 0x68, 0xb0, 0x27, - 0x44, 0xc3, 0xcf, 0xe4, 0x61, 0x71, 0x6e, 0x6d, 0xe8, 0x49, 0xde, 0x17, 0x3e, 0x79, 0x47, 0x05, - 0x83, 0xea, 0x4d, 0xb2, 0xcb, 0x3a, 0x46, 0x9b, 0x3f, 0xae, 0x9a, 0x7f, 0x12, 0x92, 0x77, 0xcb, - 0x94, 0xbc, 0x4f, 0x96, 0xfc, 0x7c, 0x3d, 0x64, 0xee, 0xef, 0x55, 0xe0, 0xac, 0xa2, 0x31, 0xd4, - 0xf1, 0x47, 0x64, 0xf8, 0xfb, 0xeb, 0xee, 0x4d, 0xb2, 0xbb, 0x16, 0xd2, 0xf5, 0x34, 0xbf, 0xbb, - 0xe8, 0x2a, 0x8c, 0xba, 0x64, 0xc3, 0xe9, 0xf8, 0x89, 0x32, 0x17, 0x07, 0xf9, 0x3e, 0xa2, 0x9e, - 0x82, 0xb1, 0x4e, 0x63, 0xff, 0x56, 0x8d, 0xcd, 0xca, 0xc4, 0xf1, 0x02, 0x12, 0xd1, 0x05, 0x5a, - 0xb3, 0xea, 0xc7, 0x74, 0xab, 0x5e, 0x58, 0xf0, 0x4f, 0xc1, 0xa0, 0xb7, 0x4d, 0x55, 0x76, 0xc5, - 0xd4, 0xc4, 0x2b, 0x14, 0x88, 0x39, 0x0e, 0x3d, 0x0d, 0xc3, 0xad, 0x70, 0x7b, 0xdb, 0x09, 0xdc, - 0xe9, 0x2a, 0x33, 0x19, 0x46, 0xa9, 0x56, 0x5f, 0xe4, 0x20, 0x2c, 0x71, 0xe8, 0x09, 0x18, 0x70, - 0xa2, 0xcd, 0x78, 0x7a, 0x80, 0xd1, 0x8c, 0xd0, 0x9a, 0xe6, 0xa3, 0xcd, 0x18, 0x33, 0x28, 0x35, - 0x05, 0x1e, 0x84, 0xd1, 0x7d, 0x2f, 0xd8, 0xac, 0x7b, 0x11, 0x5b, 0xd7, 0x35, 0x53, 0xe0, 0x9e, - 0xc2, 0x60, 0x8d, 0x0a, 0x35, 0x60, 0xb0, 0x1d, 0x46, 0x49, 0x3c, 0x3d, 0xc4, 0x86, 0xf3, 0xb9, - 0x42, 0xe9, 0xe1, 0xfd, 0x6e, 0x84, 0x51, 0x92, 0x76, 0x85, 0xfe, 0x8b, 0x31, 0x67, 0x84, 0x16, - 0xa1, 0x4a, 0x82, 0x9d, 0xe9, 0x61, 0xc6, 0xef, 0x13, 0x87, 0xf3, 0x5b, 0x0a, 0x76, 0xde, 0x74, - 0xa2, 0x74, 0x16, 0x2d, 0x05, 0x3b, 0x98, 0x96, 0x46, 0x2d, 0xa8, 0x49, 0x47, 0x40, 0x3c, 0x3d, - 0x52, 0x46, 0xc0, 0xb0, 0x20, 0xc7, 0xe4, 0xbd, 0x8e, 0x17, 0x91, 0x6d, 0x12, 0x24, 0x71, 0x6a, - 0x0f, 0x4b, 0x6c, 0x8c, 0x53, 0xbe, 0xa8, 0x05, 0x63, 0xdc, 0x7c, 0x58, 0x0d, 0x3b, 0x41, 0x12, - 0x4f, 0xd7, 0x58, 0x93, 0x0b, 0x36, 0x9c, 0x6f, 0xa6, 0x25, 0x16, 0xce, 0x08, 0xf6, 0x63, 0x1a, - 0x30, 0xc6, 0x06, 0x53, 0xf4, 0x36, 0x8c, 0xfb, 0xde, 0x0e, 0x09, 0x48, 0x1c, 0x37, 0xa2, 0x70, - 0x9d, 0x4c, 0x03, 0xeb, 0xcd, 0x53, 0x45, 0x9b, 0xaf, 0x70, 0x9d, 0x2c, 0x9c, 0xda, 0xdf, 0x9b, - 0x1d, 0xbf, 0xa5, 0x97, 0xc6, 0x26, 0x33, 0xf4, 0x2e, 0x4c, 0x50, 0x5b, 0xc5, 0x4b, 0xd9, 0x8f, - 0x96, 0x67, 0x8f, 0xf6, 0xf7, 0x66, 0x27, 0xb0, 0x51, 0x1c, 0x67, 0xd8, 0xa1, 0x35, 0xa8, 0xf9, - 0xde, 0x06, 0x69, 0xed, 0xb6, 0x7c, 0x32, 0x3d, 0xc6, 0x78, 0x17, 0x4c, 0xb9, 0x5b, 0x92, 0x9c, - 0xdb, 0x87, 0xea, 0x2f, 0x4e, 0x19, 0xa1, 0x37, 0xe1, 0x5c, 0x42, 0xa2, 0x6d, 0x2f, 0x70, 0xe8, - 0xa2, 0x2d, 0x8c, 0x17, 0xb6, 0xc3, 0x1d, 0x67, 0x52, 0x7b, 0x5e, 0x0c, 0xec, 0xb9, 0xb5, 0x5c, - 0x2a, 0xdc, 0xa3, 0x34, 0xba, 0x03, 0x93, 0x6c, 0x3e, 0x35, 0x3a, 0xbe, 0xdf, 0x08, 0x7d, 0xaf, - 0xb5, 0x3b, 0x3d, 0xc1, 0x18, 0x3e, 0x2d, 0xf7, 0xad, 0x2b, 0x26, 0x9a, 0xda, 0xf5, 0xe9, 0x3f, - 0x9c, 0x2d, 0x8d, 0x7c, 0x98, 0x8c, 0x49, 0xab, 0x13, 0x79, 0xc9, 0x2e, 0x95, 0x7d, 0xf2, 0x30, - 0x99, 0x9e, 0x2c, 0xb3, 0x4f, 0x69, 0x9a, 0x85, 0xb8, 0xd3, 0x20, 0x03, 0xc4, 0x59, 0xd6, 0x54, - 0x55, 0xc4, 0x89, 0xeb, 0x05, 0xd3, 0x53, 0xcc, 0x30, 0x55, 0xf3, 0xab, 0x49, 0x81, 0x98, 0xe3, - 0xd8, 0xb6, 0x8f, 0xfe, 0xb8, 0x43, 0x75, 0xef, 0x29, 0x46, 0x98, 0x6e, 0xfb, 0x24, 0x02, 0xa7, - 0x34, 0x74, 0xc1, 0x4a, 0x92, 0xdd, 0x69, 0xc4, 0x48, 0xd5, 0x54, 0x5b, 0x5b, 0xfb, 0x02, 0xa6, - 0x70, 0x7b, 0x1d, 0x26, 0xd4, 0xb4, 0x66, 0xa3, 0x83, 0x66, 0x61, 0x90, 0x6a, 0x2e, 0xb9, 0x7b, - 0xa9, 0xd1, 0x26, 0x50, 0x85, 0x16, 0x63, 0x0e, 0x67, 0x4d, 0xf0, 0xbe, 0x4a, 0x16, 0x76, 0x13, - 0xc2, 0xad, 0xd8, 0xaa, 0xd6, 0x04, 0x89, 0xc0, 0x29, 0x8d, 0xfd, 0x7f, 0xf8, 0xa2, 0x98, 0xea, - 0x8e, 0x12, 0x7a, 0xf3, 0x32, 0x8c, 0x6c, 0x85, 0x71, 0x42, 0xa9, 0x59, 0x1d, 0x83, 0xe9, 0x2a, - 0x78, 0x43, 0xc0, 0xb1, 0xa2, 0x40, 0xaf, 0xc2, 0x78, 0x4b, 0xaf, 0x40, 0xa8, 0xf2, 0xb3, 0xa2, - 0x88, 0x59, 0x3b, 0x36, 0x69, 0xd1, 0x2b, 0x30, 0xc2, 0x5c, 0x79, 0xad, 0xd0, 0x17, 0xf6, 0xb2, - 0x5c, 0x99, 0x46, 0x1a, 0x02, 0x7e, 0xa0, 0xfd, 0xc6, 0x8a, 0x9a, 0xee, 0x3a, 0x68, 0x13, 0x56, - 0x1a, 0x42, 0xdd, 0xaa, 0x5d, 0xc7, 0x0d, 0x06, 0xc5, 0x02, 0x6b, 0xff, 0x6a, 0x45, 0x1b, 0x65, - 0x6a, 0xf4, 0x11, 0xf4, 0x45, 0x18, 0x7e, 0xe0, 0x78, 0x89, 0x17, 0x6c, 0x8a, 0x15, 0xf4, 0x85, - 0x92, 0xba, 0x97, 0x15, 0xbf, 0xc7, 0x8b, 0xf2, 0x75, 0x42, 0xfc, 0xc1, 0x92, 0x21, 0xe5, 0x1d, - 0x75, 0x82, 0x80, 0xf2, 0xae, 0xf4, 0xcf, 0x1b, 0xf3, 0xa2, 0x9c, 0xb7, 0xf8, 0x83, 0x25, 0x43, - 0xb4, 0x01, 0x20, 0x67, 0x1f, 0x71, 0x85, 0x0b, 0xed, 0xa7, 0xfb, 0x61, 0xbf, 0xa6, 0x4a, 0x2f, - 0x4c, 0xd0, 0x95, 0x29, 0xfd, 0x8f, 0x35, 0xce, 0x76, 0x87, 0x19, 0x22, 0xdd, 0xcd, 0x42, 0x6f, - 0xd3, 0x09, 0xe0, 0x44, 0x09, 0x71, 0xe7, 0x13, 0x31, 0x74, 0xcf, 0x95, 0x34, 0xa8, 0xd6, 0xbc, - 0x6d, 0xa2, 0xcf, 0x16, 0xc1, 0x05, 0xa7, 0x0c, 0xed, 0xef, 0x57, 0x61, 0xba, 0x57, 0x7b, 0xa9, - 0x4c, 0x92, 0x87, 0x5e, 0xb2, 0x48, 0x6d, 0x05, 0xcb, 0x94, 0xc9, 0x25, 0x01, 0xc7, 0x8a, 0x82, - 0x0a, 0x47, 0xec, 0x6d, 0x06, 0x8e, 0x2f, 0xe4, 0x57, 0x09, 0x47, 0x93, 0x41, 0xb1, 0xc0, 0x52, - 0xba, 0x88, 0x38, 0xb1, 0x70, 0xe1, 0x6a, 0x42, 0x84, 0x19, 0x14, 0x0b, 0xac, 0xbe, 0xfd, 0x1b, - 0x28, 0xd8, 0xfe, 0x19, 0x63, 0x34, 0xf8, 0x88, 0xc7, 0x08, 0xbd, 0x0b, 0xb0, 0xe1, 0x05, 0x5e, - 0xbc, 0xc5, 0xd8, 0x0f, 0xf5, 0xcf, 0x5e, 0x59, 0x25, 0xcb, 0x8a, 0x0d, 0xd6, 0x58, 0xa2, 0x97, - 0x60, 0x54, 0xcd, 0xd0, 0x95, 0xfa, 0xf4, 0xb0, 0xe9, 0xf8, 0x4b, 0xd5, 0x55, 0x1d, 0xeb, 0x74, - 0xf6, 0x57, 0xb2, 0x22, 0x23, 0x26, 0x86, 0x36, 0xc2, 0x56, 0xd9, 0x11, 0xae, 0x1c, 0x3e, 0xc2, - 0xf6, 0x7f, 0xae, 0xd2, 0xbd, 0xb3, 0x56, 0x59, 0x27, 0x2e, 0xa1, 0xd4, 0xde, 0xa0, 0x1a, 0xde, - 0x49, 0x88, 0x98, 0x96, 0x97, 0xfb, 0x99, 0x37, 0xfa, 0x7a, 0x40, 0xa7, 0x03, 0xe7, 0x84, 0xb6, - 0xa0, 0xe6, 0x3b, 0x31, 0xdb, 0x49, 0x12, 0x31, 0x1d, 0xfb, 0x63, 0x9b, 0x5a, 0xe1, 0x4e, 0x9c, - 0x68, 0x0b, 0x2e, 0xaf, 0x25, 0x65, 0x4e, 0x97, 0x27, 0x6a, 0x1d, 0xc8, 0x93, 0x03, 0xd5, 0x1c, - 0x6a, 0x42, 0xec, 0x62, 0x8e, 0x43, 0xaf, 0xc0, 0x58, 0x44, 0x98, 0xa8, 0x2c, 0x52, 0x03, 0x88, - 0x09, 0xdf, 0x60, 0x6a, 0x29, 0x61, 0x0d, 0x87, 0x0d, 0xca, 0xd4, 0x50, 0x1e, 0x3a, 0xc4, 0x50, - 0x7e, 0x06, 0x86, 0xd9, 0x0f, 0x25, 0x15, 0xea, 0x0b, 0xad, 0x70, 0x30, 0x96, 0xf8, 0xac, 0x10, - 0x8d, 0x94, 0x14, 0xa2, 0x67, 0x61, 0xa2, 0xee, 0x90, 0xed, 0x30, 0x58, 0x0a, 0xdc, 0x76, 0xe8, - 0x05, 0x09, 0x9a, 0x86, 0x01, 0xb6, 0xa4, 0xf0, 0x19, 0x3f, 0x40, 0x39, 0xe0, 0x01, 0x6a, 0xec, - 0xda, 0xff, 0xd7, 0x82, 0xf1, 0x3a, 0xf1, 0x49, 0x42, 0xee, 0xb4, 0x99, 0xfb, 0x01, 0x2d, 0x03, - 0xda, 0x8c, 0x9c, 0x16, 0x69, 0x90, 0xc8, 0x0b, 0xdd, 0x26, 0x69, 0x85, 0x01, 0x73, 0xb8, 0xd3, - 0x35, 0xf2, 0xdc, 0xfe, 0xde, 0x2c, 0xba, 0xde, 0x85, 0xc5, 0x39, 0x25, 0x90, 0x0b, 0xe3, 0xed, - 0x88, 0x18, 0xfe, 0x12, 0xab, 0xd8, 0x3e, 0x6f, 0xe8, 0x45, 0xb8, 0xf9, 0x68, 0x80, 0xb0, 0xc9, - 0x14, 0x7d, 0x0e, 0xa6, 0xc2, 0xa8, 0xbd, 0xe5, 0x04, 0x75, 0xd2, 0x26, 0x81, 0x4b, 0x6d, 0x66, - 0xe1, 0x14, 0x3b, 0xb3, 0xbf, 0x37, 0x3b, 0x75, 0x27, 0x83, 0xc3, 0x5d, 0xd4, 0xf6, 0x2f, 0x57, - 0xe0, 0x6c, 0x3d, 0x7c, 0x10, 0x3c, 0x70, 0x22, 0x77, 0xbe, 0xb1, 0xc2, 0x0d, 0x61, 0xe6, 0x64, - 0x94, 0xce, 0x4d, 0xab, 0xa7, 0x73, 0xf3, 0x4b, 0x30, 0xb2, 0xe1, 0x11, 0xdf, 0xc5, 0x64, 0x43, - 0x74, 0xef, 0x6a, 0x19, 0x8f, 0xc6, 0x32, 0x2d, 0x23, 0xbd, 0x02, 0xdc, 0xb7, 0xba, 0x2c, 0xd8, - 0x60, 0xc5, 0x10, 0x75, 0x60, 0x4a, 0x5a, 0xfa, 0x12, 0x2b, 0x66, 0xc7, 0x0b, 0xe5, 0x36, 0x12, - 0x66, 0x35, 0x6c, 0x3c, 0x70, 0x86, 0x21, 0xee, 0xaa, 0x82, 0xee, 0xd0, 0xb6, 0xe9, 0xea, 0x30, - 0xc0, 0x64, 0x85, 0xed, 0xd0, 0xd8, 0x16, 0x92, 0x41, 0xed, 0x7f, 0x64, 0xc1, 0x63, 0x5d, 0xa3, - 0x25, 0xf6, 0xd7, 0x6f, 0xc9, 0x8d, 0x2d, 0x3f, 0x9d, 0x29, 0x68, 0x65, 0xee, 0x98, 0x97, 0xdb, - 0xe4, 0x56, 0x4a, 0x6c, 0x72, 0xef, 0xc0, 0x99, 0xa5, 0xed, 0x76, 0xb2, 0x5b, 0xf7, 0x4c, 0x9f, - 0xec, 0xcb, 0x30, 0xb4, 0x4d, 0x5c, 0xaf, 0xb3, 0x2d, 0x3e, 0xeb, 0xac, 0x54, 0xa4, 0xab, 0x0c, - 0x7a, 0xb0, 0x37, 0x3b, 0xde, 0x4c, 0xc2, 0xc8, 0xd9, 0x24, 0x1c, 0x80, 0x05, 0xb9, 0xfd, 0x81, - 0x05, 0x93, 0x72, 0x42, 0xcd, 0xbb, 0x6e, 0x44, 0xe2, 0x18, 0xcd, 0x40, 0xc5, 0x6b, 0x0b, 0x46, - 0x20, 0x18, 0x55, 0x56, 0x1a, 0xb8, 0xe2, 0xb5, 0xd1, 0x17, 0xa1, 0xc6, 0x5d, 0xf9, 0xa9, 0x70, - 0xf4, 0x79, 0x34, 0xc0, 0x76, 0x1f, 0x6b, 0x92, 0x07, 0x4e, 0xd9, 0x49, 0xcb, 0x92, 0xa9, 0xea, - 0xaa, 0xe9, 0x58, 0xbe, 0x21, 0xe0, 0x58, 0x51, 0xa0, 0x4b, 0x30, 0x12, 0x84, 0x2e, 0x3f, 0x65, - 0xe1, 0xcb, 0x2e, 0x13, 0xb9, 0xdb, 0x02, 0x86, 0x15, 0xd6, 0xfe, 0x86, 0x05, 0x63, 0xb2, 0x8f, - 0x25, 0x8d, 0x5c, 0x3a, 0x49, 0x52, 0x03, 0x37, 0x9d, 0x24, 0xd4, 0x48, 0x65, 0x18, 0xc3, 0x36, - 0xad, 0xf6, 0x63, 0x9b, 0xda, 0xdf, 0xaf, 0xc0, 0x84, 0x6c, 0x4e, 0xb3, 0xb3, 0x1e, 0x93, 0x04, - 0xbd, 0x03, 0x35, 0x87, 0x0f, 0x3e, 0x91, 0x72, 0xf6, 0x7c, 0xd1, 0x0e, 0xdd, 0xf8, 0x66, 0xa9, - 0x61, 0x30, 0x2f, 0xf9, 0xe0, 0x94, 0x25, 0xda, 0x81, 0x53, 0x41, 0x98, 0xb0, 0xf5, 0x40, 0xe1, - 0xcb, 0x79, 0x44, 0xb3, 0xf5, 0x3c, 0x2e, 0xea, 0x39, 0x75, 0x3b, 0xcb, 0x0f, 0x77, 0x57, 0x81, - 0xee, 0x48, 0x2f, 0x46, 0x95, 0xd5, 0xf5, 0x6c, 0xb9, 0xba, 0x7a, 0x3b, 0x31, 0xec, 0x1f, 0x5a, - 0x50, 0x93, 0x64, 0xc7, 0xe9, 0x13, 0xbf, 0x07, 0xc3, 0x31, 0xfb, 0x34, 0x72, 0x98, 0x2e, 0x97, - 0x6b, 0x3a, 0xff, 0x9e, 0xe9, 0xe2, 0xc7, 0xff, 0xc7, 0x58, 0x72, 0x63, 0x6e, 0x48, 0xd5, 0x81, - 0x8f, 0x9e, 0x1b, 0x52, 0x35, 0xad, 0x87, 0x1b, 0xf2, 0x97, 0x2c, 0x18, 0xe2, 0xce, 0xa1, 0x72, - 0x1e, 0x36, 0xcd, 0x97, 0x9c, 0x72, 0x7c, 0x93, 0x02, 0x85, 0x6b, 0x19, 0xdd, 0x83, 0x1a, 0xfb, - 0xb1, 0x1c, 0x85, 0xdb, 0x62, 0x21, 0x78, 0xb6, 0x8c, 0x73, 0x8a, 0x2b, 0x3e, 0xae, 0x4d, 0xde, - 0x94, 0x0c, 0x70, 0xca, 0xcb, 0xfe, 0x41, 0x95, 0xce, 0xfa, 0x94, 0xd4, 0x58, 0xd6, 0xac, 0x93, - 0x58, 0xd6, 0x2a, 0xc7, 0xbf, 0xac, 0xbd, 0x07, 0x93, 0x2d, 0xcd, 0x27, 0x9f, 0x2e, 0xa6, 0xd7, - 0x4a, 0xba, 0x9b, 0x35, 0x47, 0x3e, 0x77, 0x86, 0x2c, 0x9a, 0xec, 0x70, 0x96, 0x3f, 0x22, 0x30, - 0xc6, 0x0f, 0x14, 0x45, 0x7d, 0x03, 0x85, 0x32, 0xcb, 0xfd, 0x2e, 0xbc, 0x84, 0xaa, 0x8c, 0x05, - 0x9d, 0x34, 0x35, 0x46, 0xd8, 0x60, 0x6b, 0xff, 0x8d, 0x41, 0x18, 0x5c, 0xda, 0x21, 0x41, 0x72, - 0x8c, 0xb3, 0x7c, 0x1b, 0x26, 0xbc, 0x60, 0x27, 0xf4, 0x77, 0x88, 0xcb, 0xf1, 0x47, 0x5b, 0xd1, - 0xce, 0x89, 0x4a, 0x26, 0x56, 0x0c, 0x66, 0x38, 0xc3, 0xfc, 0x38, 0xf6, 0x93, 0x6f, 0xc0, 0x10, - 0x97, 0x08, 0xb1, 0x99, 0x2c, 0x70, 0x92, 0xb2, 0x01, 0x15, 0x33, 0x27, 0xdd, 0xf5, 0x72, 0xff, - 0xac, 0x60, 0x84, 0xee, 0xc3, 0xc4, 0x86, 0x17, 0xc5, 0x09, 0xdd, 0x10, 0xc6, 0x89, 0xb3, 0xdd, - 0x3e, 0xca, 0x46, 0x52, 0x0d, 0xc9, 0xb2, 0xc1, 0x0a, 0x67, 0x58, 0xa3, 0x2d, 0x18, 0xa7, 0xfb, - 0x98, 0xb4, 0xae, 0xe1, 0xfe, 0xeb, 0x52, 0xbe, 0xa4, 0x5b, 0x3a, 0x27, 0x6c, 0x32, 0xa6, 0xca, - 0xa8, 0xc5, 0x36, 0x3e, 0x23, 0x6c, 0x49, 0x57, 0xca, 0x88, 0xef, 0x78, 0x38, 0x8e, 0xea, 0x34, - 0x76, 0x7e, 0x5c, 0x33, 0x75, 0x5a, 0x7a, 0x4a, 0x6c, 0x7f, 0x97, 0x2e, 0x40, 0x74, 0x14, 0x4f, - 0x42, 0x77, 0xdf, 0x30, 0x75, 0xf7, 0x53, 0x25, 0x3e, 0x6e, 0x0f, 0xbd, 0xfd, 0x65, 0x18, 0xd5, - 0xbe, 0x3d, 0xba, 0x02, 0xb5, 0x96, 0x3c, 0xea, 0x14, 0x0a, 0x5c, 0x19, 0x10, 0xea, 0x0c, 0x14, - 0xa7, 0x34, 0x74, 0x60, 0xa8, 0xe1, 0x95, 0x8d, 0x88, 0xa0, 0x66, 0x19, 0x66, 0x18, 0xfb, 0x05, - 0x80, 0xa5, 0x87, 0xa4, 0x35, 0xdf, 0x62, 0x07, 0xf1, 0xda, 0xb9, 0x89, 0xd5, 0xfb, 0xdc, 0xc4, - 0x7e, 0x1b, 0xc6, 0x97, 0x1e, 0xd2, 0x95, 0x5d, 0x6e, 0xd3, 0x2e, 0xc2, 0x10, 0x61, 0x00, 0xd6, - 0xaa, 0x91, 0x54, 0x48, 0x39, 0x19, 0x16, 0x58, 0x76, 0x8c, 0xfe, 0xd0, 0x11, 0x13, 0x56, 0xdb, - 0xf2, 0x2e, 0x51, 0x20, 0xe6, 0x38, 0xfb, 0x3b, 0x16, 0x4c, 0x2c, 0x2f, 0x1a, 0x76, 0xf2, 0x1c, - 0x00, 0xb7, 0x37, 0xef, 0xdd, 0xbb, 0x2d, 0xfd, 0xa8, 0xdc, 0xd9, 0xa5, 0xa0, 0x58, 0xa3, 0x40, - 0x8f, 0x43, 0xd5, 0xef, 0x04, 0xc2, 0x0c, 0x1c, 0xde, 0xdf, 0x9b, 0xad, 0xde, 0xea, 0x04, 0x98, - 0xc2, 0xb4, 0xc0, 0x86, 0x6a, 0xe9, 0xc0, 0x86, 0xe2, 0xd0, 0xbe, 0x6f, 0x55, 0x61, 0x6a, 0xd9, - 0x27, 0x0f, 0x8d, 0x56, 0x5f, 0x84, 0x21, 0x37, 0xf2, 0x76, 0x48, 0x94, 0x75, 0x93, 0xd4, 0x19, - 0x14, 0x0b, 0x6c, 0xe9, 0x58, 0x0b, 0x23, 0xce, 0xa4, 0x7a, 0xcc, 0x71, 0x26, 0x85, 0x7d, 0x46, - 0x1b, 0x30, 0x1c, 0xf2, 0xef, 0x3f, 0x3d, 0xc8, 0x04, 0xfd, 0xd5, 0xc3, 0x1b, 0x93, 0x1d, 0x9f, - 0x39, 0x21, 0x3d, 0xfc, 0xd0, 0x5b, 0x29, 0x4b, 0x01, 0xc5, 0x92, 0xf9, 0xcc, 0x67, 0x60, 0x4c, - 0xa7, 0xec, 0xeb, 0xf4, 0xfb, 0xe7, 0x2c, 0x38, 0xbd, 0xec, 0x87, 0xad, 0xfb, 0x99, 0x60, 0x98, - 0x97, 0x60, 0x94, 0x4e, 0xd5, 0xd8, 0x88, 0x10, 0x33, 0x42, 0xe1, 0x04, 0x0a, 0xeb, 0x74, 0x5a, - 0xb1, 0xbb, 0x77, 0x57, 0xea, 0x79, 0x11, 0x74, 0x02, 0x85, 0x75, 0x3a, 0xfb, 0x3f, 0x59, 0xf0, - 0xe4, 0xf5, 0xc5, 0xa5, 0x06, 0x55, 0x23, 0x71, 0x42, 0x82, 0xa4, 0x2b, 0x88, 0xef, 0x22, 0x0c, - 0xb5, 0x5d, 0xad, 0x29, 0x4a, 0x04, 0x1a, 0x75, 0xd6, 0x0a, 0x81, 0xfd, 0xa8, 0x44, 0xb2, 0xfe, - 0x92, 0x05, 0xa7, 0xaf, 0x7b, 0x09, 0x26, 0xed, 0x30, 0x1b, 0x77, 0x17, 0x91, 0x76, 0x18, 0x7b, - 0x49, 0x18, 0xed, 0x66, 0xe3, 0xee, 0xb0, 0xc2, 0x60, 0x8d, 0x8a, 0xd7, 0xbc, 0xe3, 0x51, 0x05, - 0x2b, 0x3a, 0xa5, 0xd5, 0xcc, 0xe1, 0x58, 0x51, 0xd0, 0x8e, 0xb9, 0x5e, 0xc4, 0x6c, 0x91, 0x5d, - 0x31, 0x83, 0x55, 0xc7, 0xea, 0x12, 0x81, 0x53, 0x1a, 0xfb, 0xef, 0x58, 0x70, 0xf6, 0xba, 0xdf, - 0x89, 0x13, 0x12, 0x6d, 0xc4, 0x46, 0x63, 0x5f, 0x80, 0x1a, 0x91, 0x76, 0xb3, 0x68, 0xab, 0x5a, - 0x93, 0x94, 0x41, 0xcd, 0x83, 0xfe, 0x14, 0x5d, 0x89, 0x18, 0xb3, 0xfe, 0x22, 0xa2, 0xfe, 0x55, - 0x05, 0xc6, 0x6f, 0xac, 0xad, 0x35, 0xae, 0x93, 0x44, 0xe8, 0xe0, 0x62, 0x47, 0x4f, 0x43, 0xdb, - 0xe5, 0x8e, 0x5e, 0x9b, 0xeb, 0x31, 0xeb, 0x3a, 0x89, 0xe7, 0xcf, 0xf1, 0x18, 0xeb, 0xb9, 0x95, - 0x20, 0xb9, 0x13, 0x35, 0x93, 0xc8, 0x0b, 0x36, 0x73, 0x77, 0xc5, 0x72, 0x9d, 0xa8, 0xf6, 0x5a, - 0x27, 0xd0, 0x0b, 0x30, 0x14, 0xb7, 0xb6, 0x88, 0xda, 0xb4, 0xff, 0x94, 0x32, 0x43, 0x18, 0xf4, - 0x60, 0x6f, 0xb6, 0x76, 0x17, 0xaf, 0xf0, 0x3f, 0x58, 0x90, 0xa2, 0x77, 0x61, 0x74, 0x2b, 0x49, - 0xda, 0x37, 0x88, 0xe3, 0x92, 0x48, 0x6a, 0x89, 0x02, 0x2b, 0x90, 0x0e, 0x06, 0x2f, 0x90, 0x4e, - 0xac, 0x14, 0x16, 0x63, 0x9d, 0xa3, 0xdd, 0x04, 0x48, 0x71, 0x8f, 0x68, 0x6b, 0x63, 0xff, 0xc5, - 0x0a, 0x0c, 0xdf, 0x70, 0x02, 0xd7, 0x27, 0x11, 0x5a, 0x86, 0x01, 0xf2, 0x90, 0xb4, 0xca, 0x19, - 0xb0, 0xe9, 0x42, 0xca, 0x3d, 0x55, 0xf4, 0x3f, 0x66, 0xe5, 0x11, 0x86, 0x61, 0xda, 0xee, 0xeb, - 0x2a, 0x30, 0xf3, 0xb9, 0xe2, 0x51, 0x50, 0x22, 0xc1, 0x57, 0x61, 0x01, 0xc2, 0x92, 0x11, 0xf3, - 0xe9, 0xb4, 0xda, 0x4d, 0xaa, 0xdc, 0x92, 0x72, 0xb1, 0xd7, 0x6b, 0x8b, 0x0d, 0x4e, 0x2e, 0xf8, - 0x72, 0x9f, 0x8e, 0x04, 0xe2, 0x94, 0x9d, 0xfd, 0x0a, 0x9c, 0x61, 0xa7, 0x82, 0x4e, 0xb2, 0x65, - 0xcc, 0x99, 0x42, 0xe1, 0xb4, 0xff, 0x7e, 0x05, 0x4e, 0xad, 0x34, 0x17, 0x9b, 0xa6, 0x37, 0xee, - 0x15, 0x18, 0xe3, 0xcb, 0x33, 0x15, 0x3a, 0xc7, 0x17, 0xe5, 0x95, 0x1b, 0x7b, 0x4d, 0xc3, 0x61, - 0x83, 0x12, 0x3d, 0x09, 0x55, 0xef, 0xbd, 0x20, 0x1b, 0x1f, 0xb4, 0xf2, 0xc6, 0x6d, 0x4c, 0xe1, - 0x14, 0x4d, 0x57, 0x7a, 0xae, 0xe2, 0x14, 0x5a, 0xad, 0xf6, 0xaf, 0xc3, 0x84, 0x17, 0xb7, 0x62, - 0x6f, 0x25, 0xa0, 0xf3, 0xdf, 0x69, 0x49, 0xf1, 0x4d, 0x6d, 0x7f, 0xda, 0x54, 0x85, 0xc5, 0x19, - 0x6a, 0x4d, 0xdf, 0x0e, 0x96, 0xb6, 0x16, 0x8a, 0x23, 0x34, 0xbf, 0x02, 0x35, 0x15, 0x49, 0x23, - 0x03, 0xa0, 0xac, 0xfc, 0x00, 0xa8, 0x12, 0x0a, 0x47, 0xfa, 0x48, 0xab, 0xb9, 0x3e, 0xd2, 0x7f, - 0x6a, 0x41, 0x1a, 0x34, 0x80, 0x30, 0xd4, 0xda, 0x21, 0x3b, 0x80, 0x88, 0xe4, 0x61, 0xdf, 0xd3, - 0x05, 0x92, 0xc8, 0x67, 0x02, 0x97, 0x95, 0x86, 0x2c, 0x8b, 0x53, 0x36, 0xe8, 0x16, 0x0c, 0xb7, - 0x23, 0xd2, 0x4c, 0x58, 0x98, 0x6f, 0x1f, 0x1c, 0x99, 0x54, 0x37, 0x78, 0x49, 0x2c, 0x59, 0xd8, - 0xbf, 0x6e, 0x01, 0xdc, 0xf2, 0xb6, 0xbd, 0x04, 0x3b, 0xc1, 0x26, 0x39, 0xc6, 0x5d, 0xe4, 0x6d, - 0x18, 0x88, 0xdb, 0xa4, 0x55, 0xee, 0xe8, 0x28, 0x6d, 0x51, 0xb3, 0x4d, 0x5a, 0xe9, 0x67, 0xa0, - 0xff, 0x30, 0xe3, 0x63, 0xff, 0x0a, 0xc0, 0x44, 0x4a, 0x46, 0xcd, 0x78, 0xf4, 0xbc, 0x11, 0xd7, - 0xfa, 0x78, 0x26, 0xae, 0xb5, 0xc6, 0xa8, 0xb5, 0x50, 0xd6, 0x04, 0xaa, 0xdb, 0xce, 0x43, 0xb1, - 0x6b, 0x78, 0xa9, 0x6c, 0x83, 0x68, 0x4d, 0x73, 0xab, 0xce, 0x43, 0x6e, 0x46, 0x3d, 0x27, 0x05, - 0x68, 0xd5, 0x79, 0x78, 0xc0, 0x0f, 0x88, 0xd8, 0x0c, 0xa4, 0xdb, 0x94, 0xaf, 0xff, 0x97, 0xf4, - 0x3f, 0x53, 0x8a, 0xb4, 0x3a, 0x56, 0xab, 0x17, 0x08, 0x57, 0x5f, 0x9f, 0xb5, 0x7a, 0x41, 0xb6, - 0x56, 0x2f, 0x28, 0x51, 0xab, 0x17, 0xa0, 0xf7, 0x2d, 0x18, 0x16, 0x1e, 0x72, 0x16, 0x7e, 0x35, - 0x7a, 0xed, 0xd3, 0x7d, 0x55, 0x2d, 0x5c, 0xed, 0xbc, 0xfa, 0x2b, 0xd2, 0x76, 0x14, 0xd0, 0xc2, - 0x26, 0xc8, 0xaa, 0xd1, 0xb7, 0x2d, 0x98, 0x10, 0xbf, 0x31, 0x79, 0xaf, 0x43, 0xe2, 0x44, 0xac, - 0x52, 0x9f, 0x3b, 0x4a, 0x6b, 0x04, 0x0b, 0xde, 0xa8, 0x9f, 0x96, 0x2a, 0xc6, 0x44, 0x16, 0xb6, - 0x2d, 0xd3, 0x1e, 0xf4, 0x3d, 0x0b, 0xce, 0x6c, 0x3b, 0x0f, 0x79, 0x8d, 0x1c, 0x86, 0x9d, 0xc4, - 0x0b, 0x45, 0x88, 0xd9, 0x72, 0xbf, 0x72, 0xd2, 0xc5, 0x88, 0x37, 0xf7, 0x35, 0x79, 0x6c, 0x99, - 0x47, 0x52, 0xd8, 0xe8, 0xdc, 0x16, 0xce, 0xb8, 0x30, 0x22, 0x05, 0x33, 0xc7, 0x6a, 0x5f, 0xd0, - 0x17, 0xe3, 0xc3, 0x67, 0xa0, 0x74, 0xa0, 0xcd, 0xbd, 0xd1, 0x71, 0x82, 0xc4, 0x4b, 0x76, 0x35, - 0x1b, 0x9f, 0xd5, 0x22, 0x04, 0xf1, 0x18, 0x6b, 0xd9, 0x82, 0x31, 0x5d, 0xe6, 0x8e, 0xb1, 0xa6, - 0x10, 0x4e, 0xe7, 0xc8, 0xd3, 0x31, 0x56, 0xd8, 0x81, 0xc7, 0x7b, 0xca, 0xc5, 0xf1, 0x55, 0x6b, - 0xff, 0xd0, 0xd2, 0x15, 0xe6, 0x49, 0x38, 0x66, 0x56, 0x4d, 0xc7, 0xcc, 0xa5, 0xb2, 0x53, 0xa7, - 0x87, 0x77, 0x66, 0x43, 0x6f, 0x3f, 0x5d, 0x09, 0xd0, 0x1a, 0x0c, 0xf9, 0x14, 0x22, 0x4f, 0x83, - 0x2e, 0xf7, 0x33, 0x39, 0x53, 0xe3, 0x82, 0xc1, 0x63, 0x2c, 0x78, 0xd9, 0xbf, 0x61, 0xc1, 0xc0, - 0x49, 0x0c, 0x4f, 0xc3, 0x1c, 0x9e, 0x5e, 0x26, 0xaa, 0xb8, 0xeb, 0x39, 0x87, 0x9d, 0x07, 0x4b, - 0x0f, 0x13, 0x12, 0xc4, 0xcc, 0x94, 0xcc, 0x1d, 0xa1, 0x5f, 0xae, 0xc0, 0x28, 0xad, 0x48, 0xfa, - 0x89, 0x5e, 0x85, 0x71, 0xdf, 0x59, 0x27, 0xbe, 0x74, 0x27, 0x67, 0xb7, 0x5d, 0xb7, 0x74, 0x24, - 0x36, 0x69, 0x69, 0xe1, 0x0d, 0xdd, 0xdb, 0x2e, 0x4c, 0x22, 0x55, 0xd8, 0x70, 0xc5, 0x63, 0x93, - 0x96, 0x5a, 0xfe, 0x0f, 0x9c, 0xa4, 0xb5, 0x25, 0xb6, 0x64, 0xaa, 0xb9, 0xf7, 0x28, 0x10, 0x73, - 0x1c, 0x9a, 0x87, 0x49, 0x29, 0xb1, 0x6f, 0xf2, 0xa1, 0x13, 0xe6, 0xa2, 0xba, 0xa7, 0x87, 0x4d, - 0x34, 0xce, 0xd2, 0xa3, 0xcf, 0xc0, 0x04, 0x1d, 0x9c, 0xb0, 0x93, 0xc8, 0x60, 0x85, 0x41, 0x16, - 0xac, 0xc0, 0x82, 0x43, 0xd7, 0x0c, 0x0c, 0xce, 0x50, 0xda, 0xef, 0xc2, 0xe9, 0x5b, 0xa1, 0xe3, - 0x2e, 0x38, 0xbe, 0x13, 0xb4, 0x48, 0xb4, 0x12, 0x6c, 0x16, 0x9e, 0xeb, 0xea, 0x67, 0xaf, 0x95, - 0xa2, 0xb3, 0x57, 0x3b, 0x02, 0xa4, 0x57, 0x20, 0xc2, 0x6c, 0xde, 0x86, 0x61, 0x8f, 0x57, 0x25, - 0xa4, 0xf6, 0x6a, 0x91, 0x53, 0xa9, 0xab, 0x8d, 0x5a, 0xd8, 0x08, 0x07, 0x60, 0xc9, 0x92, 0xee, - 0x24, 0xf2, 0xbc, 0x50, 0xc5, 0x9b, 0x35, 0xfb, 0xaf, 0x58, 0x30, 0x79, 0x3b, 0x73, 0x19, 0xec, - 0x22, 0x0c, 0xc5, 0x24, 0xca, 0x71, 0xa9, 0x35, 0x19, 0x14, 0x0b, 0xec, 0x23, 0xdf, 0xa6, 0xff, - 0x42, 0x05, 0x6a, 0x2c, 0x66, 0xb3, 0xed, 0xb4, 0x8e, 0xd3, 0x28, 0x5d, 0x35, 0x8c, 0xd2, 0x82, - 0x4d, 0xa2, 0x6a, 0x50, 0x2f, 0x9b, 0x14, 0xdd, 0x55, 0x97, 0xa3, 0x4a, 0xed, 0x0f, 0x53, 0x86, - 0xfc, 0x1e, 0xcd, 0x84, 0x79, 0x97, 0x4a, 0x5e, 0x9c, 0x62, 0xa7, 0xa1, 0x8a, 0xf6, 0xa3, 0x77, - 0x1a, 0xaa, 0x9a, 0xd6, 0x43, 0x2b, 0x35, 0xb4, 0xd6, 0x33, 0xb5, 0xfd, 0x59, 0x16, 0x80, 0xe7, - 0xf8, 0xde, 0x57, 0x89, 0xba, 0x64, 0x38, 0x2b, 0xe2, 0xe9, 0x04, 0xf4, 0x80, 0x29, 0x18, 0xf1, - 0x8f, 0xdf, 0x1d, 0x4d, 0x8b, 0xd8, 0x37, 0x60, 0x32, 0x33, 0x76, 0xe8, 0x25, 0x18, 0x6c, 0x6f, - 0x39, 0x31, 0xc9, 0x44, 0x76, 0x0c, 0x36, 0x28, 0xf0, 0x60, 0x6f, 0x76, 0x42, 0x15, 0x60, 0x10, - 0xcc, 0xa9, 0xed, 0x3f, 0xb6, 0x60, 0xe0, 0x76, 0xe8, 0x1e, 0xa7, 0x8c, 0xdd, 0x30, 0x64, 0xec, - 0x62, 0xf1, 0x8d, 0xf3, 0x9e, 0xe2, 0xd5, 0xc8, 0x88, 0xd7, 0xa5, 0x12, 0xbc, 0x0e, 0x97, 0xac, - 0x6d, 0x18, 0x65, 0x37, 0xda, 0x45, 0x48, 0xcb, 0x0b, 0xc6, 0x06, 0x6a, 0x36, 0xb3, 0x81, 0x9a, - 0xd4, 0x48, 0xb5, 0x6d, 0xd4, 0x33, 0x30, 0x2c, 0x42, 0x28, 0xb2, 0x51, 0x87, 0x82, 0x16, 0x4b, - 0xbc, 0xfd, 0x6b, 0x55, 0x30, 0x6e, 0xd0, 0xa3, 0x1f, 0x59, 0x30, 0x17, 0xf1, 0x2b, 0x0f, 0x6e, - 0xbd, 0x13, 0x79, 0xc1, 0x66, 0xb3, 0xb5, 0x45, 0xdc, 0x8e, 0xef, 0x05, 0x9b, 0x2b, 0x9b, 0x41, - 0xa8, 0xc0, 0x4b, 0x0f, 0x49, 0xab, 0xc3, 0xbc, 0xab, 0xa5, 0x2f, 0xee, 0xab, 0x33, 0xd4, 0x6b, - 0xfb, 0x7b, 0xb3, 0x73, 0xb8, 0xaf, 0x5a, 0x70, 0x9f, 0xad, 0x42, 0xbf, 0x67, 0xc1, 0x15, 0x7e, - 0x87, 0xbc, 0x7c, 0x4f, 0x4a, 0x6d, 0x3c, 0x1b, 0x92, 0x69, 0xca, 0x6e, 0x8d, 0x44, 0xdb, 0x0b, - 0x2f, 0x8b, 0x41, 0xbe, 0xd2, 0xe8, 0xaf, 0x56, 0xdc, 0x6f, 0x33, 0xed, 0x7f, 0x53, 0x85, 0x71, - 0x3a, 0x9e, 0xe9, 0xfd, 0xd1, 0x97, 0x0c, 0x31, 0xf9, 0x78, 0x46, 0x4c, 0x4e, 0x19, 0xc4, 0x8f, - 0xe6, 0xea, 0x68, 0x02, 0xa7, 0x7c, 0x27, 0x4e, 0x6e, 0x10, 0x27, 0x4a, 0xd6, 0x89, 0xc3, 0x0e, - 0x2c, 0xc5, 0x24, 0xe8, 0xeb, 0x10, 0x54, 0xc5, 0xe5, 0xdc, 0xca, 0x72, 0xc3, 0xdd, 0x15, 0xa0, - 0x07, 0x80, 0xd8, 0xe9, 0x68, 0xe4, 0x04, 0x31, 0xef, 0x8c, 0x27, 0x1c, 0xb2, 0x7d, 0x56, 0x3b, - 0x23, 0xaa, 0x45, 0xb7, 0xba, 0xd8, 0xe1, 0x9c, 0x2a, 0xb4, 0x23, 0xf0, 0xc1, 0xb2, 0x47, 0xe0, - 0x43, 0x05, 0x01, 0xbf, 0x3f, 0x6f, 0xc1, 0x69, 0xfa, 0x61, 0xcc, 0xe0, 0xd0, 0x18, 0x85, 0x30, - 0x49, 0x7b, 0xe0, 0x93, 0x44, 0xc2, 0xc4, 0x0c, 0x2b, 0xb0, 0xa5, 0x4d, 0x3e, 0xa9, 0xc5, 0x76, - 0xd3, 0x64, 0x86, 0xb3, 0xdc, 0xed, 0x5f, 0xb3, 0x80, 0x45, 0x9f, 0x9d, 0xc4, 0x3a, 0x76, 0xdd, - 0x5c, 0xc7, 0xec, 0x62, 0xa5, 0xd1, 0x63, 0x09, 0x7b, 0x11, 0xa6, 0x28, 0xb6, 0x11, 0x85, 0x0f, - 0x77, 0xa5, 0x71, 0x5d, 0xec, 0x9b, 0xfd, 0xcb, 0x16, 0x57, 0x77, 0xca, 0x2a, 0x7e, 0x00, 0xa7, - 0x02, 0xed, 0x3f, 0x9d, 0xc8, 0xd2, 0x08, 0x9c, 0x2b, 0xaf, 0xd0, 0xd8, 0xfc, 0xd7, 0x22, 0xcc, - 0x32, 0x0c, 0x71, 0x77, 0x1d, 0xf6, 0x3f, 0xb0, 0xe0, 0x31, 0x9d, 0x50, 0xbb, 0x66, 0x56, 0xe4, - 0x10, 0xad, 0xc3, 0x48, 0xd8, 0x26, 0x91, 0x93, 0xee, 0x00, 0x2e, 0xc9, 0x11, 0xbf, 0x23, 0xe0, - 0x07, 0x7b, 0xb3, 0x67, 0x74, 0xee, 0x12, 0x8e, 0x55, 0x49, 0x64, 0xc3, 0x10, 0xdb, 0x89, 0xc6, - 0xe2, 0x82, 0x20, 0xcb, 0x44, 0xc1, 0x8e, 0x01, 0x62, 0x2c, 0x30, 0xf6, 0x5f, 0xb5, 0xf8, 0x28, - 0xeb, 0x4d, 0x47, 0x5f, 0x83, 0xa9, 0x6d, 0xba, 0x59, 0x58, 0x7a, 0xd8, 0xa6, 0x4b, 0x08, 0x3b, - 0xfe, 0xb4, 0xca, 0x28, 0xce, 0x1e, 0xdd, 0x5d, 0x98, 0x16, 0xad, 0x9f, 0x5a, 0xcd, 0xb0, 0xc5, - 0x5d, 0x15, 0xd9, 0xbf, 0x2f, 0x64, 0x95, 0x59, 0x2d, 0xcf, 0xc0, 0x70, 0x3b, 0x74, 0x17, 0x57, - 0xea, 0x58, 0x8c, 0x95, 0x9a, 0x6c, 0x0d, 0x0e, 0xc6, 0x12, 0x8f, 0xae, 0x01, 0x90, 0x87, 0x09, - 0x89, 0x02, 0xc7, 0x57, 0xc7, 0x96, 0xca, 0x48, 0x58, 0x52, 0x18, 0xac, 0x51, 0xd1, 0x32, 0xed, - 0x28, 0xdc, 0xf1, 0x5c, 0x16, 0xee, 0x5d, 0x35, 0xcb, 0x34, 0x14, 0x06, 0x6b, 0x54, 0x74, 0x8b, - 0xd6, 0x09, 0x62, 0xae, 0xc0, 0x9d, 0x75, 0x91, 0x40, 0x61, 0x24, 0xdd, 0xa2, 0xdd, 0xd5, 0x91, - 0xd8, 0xa4, 0xb5, 0x7f, 0xbb, 0x06, 0x90, 0x9a, 0x08, 0xe8, 0x7d, 0x0b, 0x46, 0x5a, 0x4e, 0xdb, - 0x69, 0xf1, 0xec, 0x38, 0xd5, 0xe2, 0x7b, 0x31, 0x69, 0xe1, 0xb9, 0x45, 0x51, 0x90, 0xfb, 0xb6, - 0x3e, 0x25, 0x05, 0x44, 0x82, 0x0b, 0xfd, 0x59, 0xaa, 0x66, 0xf4, 0x4d, 0x0b, 0x46, 0x1d, 0xdf, - 0x0f, 0x5b, 0x4e, 0xc2, 0x7a, 0x54, 0x29, 0xe3, 0xac, 0xd4, 0x5a, 0x32, 0x9f, 0x96, 0xe5, 0x8d, - 0x79, 0x41, 0x9e, 0x6a, 0x69, 0x98, 0xc2, 0xf6, 0xe8, 0x4d, 0x40, 0x9f, 0x92, 0xa6, 0x25, 0xff, - 0x28, 0x33, 0x59, 0xd3, 0xb2, 0xc6, 0x54, 0x83, 0x66, 0x55, 0xa2, 0x77, 0x8d, 0x5c, 0x01, 0x03, - 0x65, 0xee, 0xa6, 0x1a, 0x8b, 0x66, 0x51, 0x9a, 0x00, 0xf4, 0x45, 0x3d, 0x12, 0x76, 0xb0, 0xcc, - 0xc5, 0x4f, 0xcd, 0x76, 0x2b, 0x88, 0x82, 0x4d, 0x60, 0xd2, 0x35, 0x17, 0x09, 0x11, 0xda, 0x74, - 0xb5, 0xb8, 0x86, 0xcc, 0xea, 0x92, 0x2e, 0x0b, 0x19, 0x04, 0xce, 0x56, 0x81, 0xbe, 0xc8, 0xe3, - 0x94, 0x57, 0x82, 0x8d, 0x50, 0x44, 0x37, 0x5d, 0x2e, 0xf1, 0xcd, 0x77, 0xe3, 0x84, 0x6c, 0xd3, - 0x32, 0xe9, 0x32, 0x70, 0x5b, 0x70, 0xc1, 0x8a, 0x1f, 0x5a, 0x83, 0x21, 0x76, 0xab, 0x22, 0x9e, - 0x1e, 0x29, 0xe3, 0x26, 0x32, 0xef, 0x13, 0xa6, 0x8b, 0x2f, 0xfb, 0x1b, 0x63, 0xc1, 0x0b, 0xdd, - 0x90, 0xf7, 0x6f, 0xe3, 0x95, 0xe0, 0x6e, 0x4c, 0xd8, 0xfd, 0xdb, 0xda, 0xc2, 0x27, 0xd2, 0x0b, - 0xb5, 0x1c, 0x9e, 0x9b, 0x1d, 0xc9, 0x28, 0x49, 0xd7, 0x60, 0xf1, 0x5f, 0x26, 0x5d, 0x9a, 0x86, - 0x32, 0x0d, 0x35, 0x53, 0x34, 0xa5, 0x83, 0xfd, 0xa6, 0xc9, 0x0c, 0x67, 0xb9, 0xcf, 0x78, 0x30, - 0x6e, 0xcc, 0xd8, 0x63, 0x74, 0x76, 0xfa, 0x30, 0x95, 0x9d, 0x92, 0xc7, 0xe8, 0xe3, 0xfc, 0xa3, - 0x01, 0x98, 0x30, 0x05, 0x03, 0x5d, 0x81, 0xda, 0x36, 0x4b, 0x89, 0x94, 0x26, 0x62, 0x51, 0xf2, - 0xbf, 0x2a, 0x11, 0x38, 0xa5, 0x61, 0x29, 0x69, 0x58, 0x71, 0x2d, 0xe6, 0x24, 0x4d, 0x49, 0xa3, - 0x30, 0x58, 0xa3, 0xa2, 0x06, 0xdb, 0x7a, 0x18, 0x26, 0x4a, 0x71, 0x2b, 0x99, 0x59, 0x60, 0x50, - 0x2c, 0xb0, 0x54, 0x61, 0xdf, 0xa7, 0x1d, 0xf2, 0x4d, 0x7f, 0x97, 0x52, 0xd8, 0x37, 0x75, 0x24, - 0x36, 0x69, 0xe9, 0x02, 0x14, 0xc6, 0x4c, 0x08, 0x85, 0x59, 0x98, 0xc6, 0xf0, 0x34, 0xf9, 0x2d, - 0x23, 0x89, 0x47, 0x5f, 0x80, 0xc7, 0xd4, 0xa5, 0x20, 0xcc, 0xfd, 0x87, 0xb2, 0xc6, 0x21, 0x63, - 0x6f, 0xf7, 0xd8, 0x62, 0x3e, 0x19, 0xee, 0x55, 0x1e, 0xbd, 0x0e, 0x13, 0xc2, 0xa4, 0x93, 0x1c, - 0x87, 0xcd, 0x23, 0xde, 0x9b, 0x06, 0x16, 0x67, 0xa8, 0x51, 0x1d, 0xa6, 0x28, 0x84, 0x99, 0x52, - 0x92, 0x03, 0xbf, 0xdc, 0xa4, 0x56, 0xe6, 0x9b, 0x19, 0x3c, 0xee, 0x2a, 0x81, 0xe6, 0x61, 0x92, - 0xdb, 0x16, 0x74, 0x07, 0xc3, 0xbe, 0x83, 0x88, 0x46, 0x54, 0x93, 0xe0, 0x8e, 0x89, 0xc6, 0x59, - 0x7a, 0xf4, 0x0a, 0x8c, 0x39, 0x51, 0x6b, 0xcb, 0x4b, 0x48, 0x2b, 0xe9, 0x44, 0xfc, 0x66, 0xbb, - 0x76, 0x46, 0x3e, 0xaf, 0xe1, 0xb0, 0x41, 0x69, 0x7f, 0x15, 0x4e, 0xe7, 0x04, 0x3d, 0x53, 0xc1, - 0x71, 0xda, 0x9e, 0xec, 0x53, 0x26, 0x1a, 0x67, 0xbe, 0xb1, 0x22, 0x7b, 0xa3, 0x51, 0x51, 0xe9, - 0x64, 0x8e, 0x53, 0x2d, 0x3f, 0x9a, 0x92, 0xce, 0x65, 0x89, 0xc0, 0x29, 0x8d, 0xfd, 0xdf, 0x6b, - 0xa0, 0xb9, 0x19, 0x4a, 0xc4, 0x60, 0xbc, 0x02, 0x63, 0x32, 0xe5, 0x9f, 0x96, 0x6a, 0x4b, 0x75, - 0xf3, 0xba, 0x86, 0xc3, 0x06, 0x25, 0x6d, 0x5b, 0x20, 0x9d, 0x26, 0xd9, 0xd8, 0x1f, 0xe5, 0x4d, - 0xc1, 0x29, 0x0d, 0xba, 0x0c, 0x23, 0x31, 0xf1, 0x37, 0x6e, 0x79, 0xc1, 0x7d, 0x21, 0xd8, 0x4a, - 0x2b, 0x37, 0x05, 0x1c, 0x2b, 0x0a, 0xf4, 0x39, 0xa8, 0x76, 0x3c, 0x57, 0x88, 0xf2, 0x9c, 0xb4, - 0x3b, 0xef, 0xae, 0xd4, 0x0f, 0xf6, 0x66, 0x67, 0xf3, 0xf3, 0x18, 0xd2, 0x6d, 0x64, 0x3c, 0x47, - 0x27, 0x1f, 0x2d, 0x9a, 0xe7, 0x3f, 0x1e, 0xea, 0xd3, 0x7f, 0x7c, 0x0d, 0x40, 0xf4, 0x59, 0x4a, - 0x72, 0x35, 0xfd, 0x66, 0xd7, 0x15, 0x06, 0x6b, 0x54, 0x74, 0x33, 0xda, 0x8a, 0x88, 0x23, 0x77, - 0x6b, 0x3c, 0x22, 0x77, 0xe4, 0x43, 0x6c, 0x46, 0x17, 0xb3, 0xdc, 0x70, 0x77, 0x05, 0xa8, 0x0d, - 0xa7, 0x5c, 0x3a, 0x8f, 0x8c, 0x5a, 0x6b, 0x47, 0x88, 0x03, 0xa6, 0x35, 0xd6, 0xb3, 0x9c, 0x70, - 0x37, 0x73, 0xf4, 0x0e, 0xcc, 0x48, 0x60, 0xf7, 0xb5, 0x3f, 0x36, 0x5d, 0xaa, 0x0b, 0xe7, 0xf7, - 0xf7, 0x66, 0x67, 0xea, 0x3d, 0xa9, 0xf0, 0x21, 0x1c, 0xd0, 0xdb, 0x30, 0xc4, 0x4e, 0x1c, 0xe2, - 0xe9, 0x51, 0xb6, 0xda, 0xbd, 0x58, 0xd6, 0xe1, 0x36, 0xc7, 0xce, 0x2d, 0x44, 0x20, 0x63, 0x7a, - 0x8a, 0xc3, 0x80, 0x58, 0xf0, 0x44, 0x6d, 0x18, 0x75, 0x82, 0x20, 0x4c, 0x1c, 0x6e, 0x84, 0x8d, - 0x95, 0xb1, 0x23, 0xb5, 0x2a, 0xe6, 0xd3, 0xb2, 0xbc, 0x1e, 0x15, 0x1d, 0xa5, 0x61, 0xb0, 0x5e, - 0x05, 0x5d, 0xc6, 0xc3, 0x07, 0x54, 0x61, 0x4a, 0xa7, 0x7b, 0x3c, 0x3d, 0x5e, 0x66, 0x19, 0xbf, - 0x63, 0x14, 0xd2, 0x34, 0x98, 0xc9, 0x0c, 0x67, 0xb9, 0xa3, 0x39, 0xc3, 0x8f, 0x3a, 0x91, 0x86, - 0xe9, 0xa6, 0x7e, 0x54, 0xdd, 0x6d, 0xca, 0xae, 0x94, 0xf2, 0xd0, 0x3c, 0xa6, 0x09, 0x26, 0x33, - 0x57, 0x4a, 0x53, 0x14, 0xd6, 0xe9, 0x66, 0x3e, 0x0d, 0xa3, 0xda, 0x80, 0xf7, 0x13, 0x0f, 0x3a, - 0xf3, 0x3a, 0x4c, 0x65, 0x07, 0xb2, 0xaf, 0x78, 0xd2, 0xff, 0x59, 0x81, 0xc9, 0x9c, 0x93, 0x8c, - 0xfb, 0x1e, 0x8b, 0x98, 0x36, 0x54, 0xde, 0x4d, 0x2f, 0x70, 0x31, 0xc3, 0x98, 0x8a, 0xab, 0x52, - 0x42, 0x71, 0x49, 0x2d, 0x5a, 0xed, 0xa9, 0x45, 0x85, 0xb2, 0x1a, 0x38, 0xba, 0xb2, 0x32, 0x57, - 0x87, 0xc1, 0x52, 0xab, 0xc3, 0x23, 0x50, 0x70, 0xc6, 0x02, 0x33, 0x5c, 0x62, 0x81, 0x39, 0xb0, - 0x60, 0xc2, 0x94, 0xbc, 0x12, 0x23, 0xfe, 0x51, 0x1d, 0xc0, 0x39, 0xb6, 0x11, 0x4b, 0xa2, 0xd0, - 0xf7, 0x49, 0x24, 0x22, 0xc5, 0x26, 0xc4, 0xbe, 0x4a, 0x40, 0xb1, 0x46, 0x61, 0x7f, 0xbb, 0x02, - 0x53, 0x69, 0xd8, 0xb0, 0x48, 0x7d, 0x7a, 0x7c, 0x47, 0x03, 0x6b, 0xc6, 0xd1, 0x40, 0x51, 0x46, - 0xd3, 0x4c, 0xbb, 0x7a, 0x1e, 0x13, 0xbc, 0x9d, 0x39, 0x26, 0x78, 0xb1, 0x4f, 0xbe, 0x87, 0x1f, - 0x19, 0xfc, 0xb3, 0x0a, 0x9c, 0xcd, 0x16, 0x59, 0xf4, 0x1d, 0x6f, 0xfb, 0x18, 0xc7, 0xe9, 0x0b, - 0xc6, 0x38, 0xbd, 0xdc, 0x5f, 0x7f, 0x58, 0xe3, 0x7a, 0x0e, 0x96, 0x93, 0x19, 0xac, 0x4f, 0x1f, - 0x85, 0xf9, 0xe1, 0x23, 0xf6, 0x3b, 0x16, 0x3c, 0x9e, 0x5b, 0xee, 0x24, 0x5c, 0xa0, 0x6f, 0x99, - 0x2e, 0xd0, 0x17, 0x8e, 0xd0, 0xbd, 0x1e, 0x3e, 0xd1, 0xff, 0x5a, 0xe9, 0xd1, 0x2d, 0xe6, 0x2d, - 0xbb, 0x03, 0xa3, 0x4e, 0xab, 0x45, 0xe2, 0x78, 0x35, 0x74, 0x55, 0x2e, 0x9e, 0xe7, 0xd9, 0xfa, - 0x99, 0x82, 0x0f, 0xf6, 0x66, 0x67, 0xb2, 0x2c, 0x52, 0x34, 0xd6, 0x39, 0x98, 0x39, 0xb5, 0x2a, - 0xc7, 0x94, 0x53, 0xeb, 0x1a, 0xc0, 0x8e, 0xda, 0xa5, 0x67, 0x9d, 0x70, 0xda, 0xfe, 0x5d, 0xa3, - 0x42, 0xef, 0x30, 0xab, 0x97, 0x87, 0x48, 0x0c, 0x14, 0x4e, 0x38, 0xe3, 0x03, 0xea, 0xf1, 0x16, - 0xfc, 0xce, 0xa4, 0xf2, 0x58, 0x2a, 0x9e, 0xf6, 0x77, 0xab, 0xf0, 0x53, 0x87, 0x88, 0x1d, 0x9a, - 0x37, 0x4f, 0x3e, 0x9f, 0xcb, 0xba, 0xa7, 0x66, 0x72, 0x0b, 0x1b, 0xfe, 0xaa, 0xcc, 0xc7, 0xaa, - 0x7c, 0xe8, 0x8f, 0xf5, 0x2d, 0xdd, 0x99, 0xc8, 0x43, 0x1d, 0xaf, 0x1f, 0x79, 0x62, 0x3d, 0x3a, - 0xef, 0xe2, 0x09, 0x3a, 0x3e, 0xec, 0xaf, 0x5b, 0xf0, 0xf1, 0xdc, 0x4e, 0x19, 0x01, 0x16, 0x57, - 0xa0, 0xd6, 0xa2, 0x40, 0xed, 0x2e, 0x4a, 0x7a, 0xc5, 0x4c, 0x22, 0x70, 0x4a, 0x63, 0xc4, 0x51, - 0x54, 0x0a, 0xe3, 0x28, 0xfe, 0xbd, 0x05, 0x67, 0xb2, 0x8d, 0x38, 0x09, 0xad, 0xd3, 0x34, 0xb5, - 0xce, 0x5c, 0x7f, 0xdf, 0xbe, 0x87, 0xc2, 0xf9, 0xf6, 0x38, 0x9c, 0xeb, 0x5a, 0xac, 0xf8, 0x30, - 0xfe, 0xac, 0x05, 0xa7, 0x36, 0xd9, 0xfe, 0x42, 0xbb, 0xf1, 0x23, 0x3a, 0x56, 0x70, 0x4d, 0xea, - 0xd0, 0x8b, 0x42, 0x7c, 0xb7, 0xd4, 0x45, 0x82, 0xbb, 0x2b, 0x43, 0xdf, 0xb0, 0xe0, 0x8c, 0xf3, - 0x20, 0xee, 0xca, 0xa3, 0x2f, 0xe4, 0xe8, 0xf5, 0x02, 0x57, 0x5e, 0x41, 0x06, 0xfe, 0x85, 0xe9, - 0xfd, 0xbd, 0xd9, 0x33, 0x79, 0x54, 0x38, 0xb7, 0x56, 0xf4, 0xb6, 0xc8, 0x3f, 0x46, 0xcd, 0xbe, - 0x52, 0x77, 0xd7, 0xf2, 0xee, 0x1f, 0x70, 0x9d, 0x24, 0x31, 0x58, 0x71, 0x44, 0x5f, 0x86, 0xda, - 0xa6, 0xbc, 0xe4, 0x23, 0x94, 0x5e, 0xc1, 0xca, 0x92, 0x7b, 0x27, 0x88, 0x47, 0xb9, 0x2b, 0x14, - 0x4e, 0x99, 0xa2, 0x1b, 0x50, 0x0d, 0x36, 0x62, 0x71, 0x5f, 0xb7, 0x28, 0x8e, 0xc6, 0x8c, 0x5a, - 0xe2, 0x37, 0x10, 0x6f, 0x2f, 0x37, 0x31, 0x65, 0x41, 0x39, 0x45, 0xeb, 0xae, 0xf0, 0x61, 0x17, - 0x70, 0xc2, 0x0b, 0xf5, 0x6e, 0x4e, 0x78, 0xa1, 0x8e, 0x29, 0x0b, 0x16, 0xb0, 0x17, 0xb7, 0x62, - 0x4f, 0x38, 0xa8, 0x0b, 0x2e, 0x73, 0x77, 0xdd, 0xca, 0xe0, 0xa9, 0xe8, 0x18, 0x18, 0x73, 0x46, - 0x68, 0x0d, 0x86, 0x5a, 0x2c, 0x75, 0xb4, 0xf0, 0x1f, 0x14, 0x25, 0x14, 0xee, 0x4a, 0x33, 0xcd, - 0x0f, 0xd2, 0x38, 0x1c, 0x0b, 0x5e, 0x8c, 0x2b, 0x69, 0x6f, 0x6d, 0xc4, 0xc2, 0x3f, 0x50, 0xc4, - 0xb5, 0x2b, 0x09, 0xb8, 0xe0, 0xca, 0xe0, 0x58, 0xf0, 0x42, 0x75, 0xa8, 0x6c, 0xb4, 0x44, 0xfe, - 0xc7, 0x82, 0x1d, 0xad, 0x79, 0x9d, 0x74, 0x61, 0x68, 0x7f, 0x6f, 0xb6, 0xb2, 0xbc, 0x88, 0x2b, - 0x1b, 0x2d, 0xf4, 0x16, 0x0c, 0x6f, 0xf0, 0x0b, 0x82, 0x22, 0xd7, 0xe3, 0xd5, 0xa2, 0x5b, 0x8c, - 0x5d, 0xb7, 0x09, 0xf9, 0x4d, 0x06, 0x81, 0xc0, 0x92, 0x1d, 0x7a, 0x07, 0x60, 0x43, 0x5d, 0x79, - 0x14, 0xc9, 0x1e, 0xe7, 0xfa, 0xbb, 0x22, 0x29, 0x76, 0xcf, 0x0a, 0x8a, 0x35, 0x8e, 0x54, 0xe6, - 0x1d, 0x99, 0xfd, 0x9e, 0x25, 0x7a, 0x2c, 0x94, 0xf9, 0xdc, 0x64, 0xf9, 0x5c, 0xe6, 0x15, 0x0a, - 0xa7, 0x4c, 0x51, 0x07, 0xc6, 0x77, 0xe2, 0xf6, 0x16, 0x91, 0x53, 0x9f, 0x65, 0x7f, 0x1c, 0xbd, - 0xf6, 0x5a, 0x41, 0x4a, 0x4f, 0x51, 0xc4, 0x8b, 0x92, 0x8e, 0xe3, 0x77, 0x69, 0x30, 0x96, 0x46, - 0xe9, 0x4d, 0x9d, 0x2d, 0x36, 0x6b, 0xa1, 0x9f, 0xe4, 0xbd, 0x4e, 0xb8, 0xbe, 0x9b, 0x10, 0x91, - 0x1d, 0xb2, 0xe0, 0x93, 0xbc, 0xc1, 0x89, 0xbb, 0x3f, 0x89, 0x40, 0x60, 0xc9, 0x4e, 0x0d, 0x19, - 0xd3, 0xc6, 0x53, 0xa5, 0x87, 0xac, 0xab, 0x0f, 0xe9, 0x90, 0x31, 0xed, 0x9b, 0x32, 0x65, 0x5a, - 0xb7, 0xbd, 0x15, 0x26, 0x61, 0x90, 0xd1, 0xfd, 0xa7, 0xca, 0x68, 0xdd, 0x46, 0x4e, 0xc9, 0x6e, - 0xad, 0x9b, 0x47, 0x85, 0x73, 0x6b, 0xb5, 0x7f, 0x7f, 0xb0, 0x7b, 0xbd, 0x65, 0xe6, 0xf0, 0x2f, - 0x76, 0x9f, 0xae, 0x7e, 0xae, 0xff, 0xed, 0xde, 0x23, 0x3c, 0x67, 0xfd, 0x86, 0x05, 0xe7, 0xda, - 0xb9, 0x8b, 0xa9, 0x58, 0xb0, 0xfa, 0xdd, 0x35, 0xf2, 0x01, 0x53, 0xa9, 0x4f, 0xf3, 0xf1, 0xb8, - 0x47, 0x9d, 0x59, 0x0b, 0xb4, 0xfa, 0xa1, 0x2d, 0xd0, 0x7b, 0x30, 0xc2, 0x8c, 0xa6, 0x34, 0xf7, - 0x46, 0x9f, 0xe9, 0x2a, 0xd8, 0xd2, 0xb7, 0x28, 0x58, 0x60, 0xc5, 0x8c, 0x0e, 0xdc, 0x93, 0xd9, - 0x4e, 0x60, 0xc2, 0xd0, 0x22, 0x67, 0x2b, 0x77, 0x4d, 0x2c, 0x8b, 0x91, 0x78, 0xb2, 0x71, 0x18, - 0xf1, 0x41, 0x11, 0x01, 0x3e, 0xbc, 0xb2, 0x93, 0xb4, 0x68, 0xff, 0xb1, 0x95, 0x63, 0x7f, 0xf1, - 0x3d, 0xc8, 0x6b, 0xe6, 0x1e, 0xe4, 0x62, 0x76, 0x0f, 0xd2, 0xe5, 0x31, 0x30, 0xb6, 0x1f, 0xe5, - 0xf3, 0x16, 0x96, 0x4d, 0x0e, 0x62, 0xfb, 0x70, 0xa1, 0x68, 0x72, 0xb3, 0x00, 0x1e, 0x57, 0x1d, - 0x0a, 0xa6, 0x01, 0x3c, 0xee, 0x4a, 0x1d, 0x33, 0x4c, 0xd9, 0xeb, 0xdf, 0xf6, 0xff, 0xb6, 0xa0, - 0xda, 0x08, 0xdd, 0x63, 0xf4, 0x80, 0x5c, 0x37, 0x3c, 0x20, 0x4f, 0x17, 0xbe, 0xf9, 0xd3, 0xd3, - 0xdf, 0x71, 0x27, 0xe3, 0xef, 0xf8, 0x64, 0x31, 0xab, 0xc3, 0xbd, 0x1b, 0xdf, 0xab, 0x82, 0xfe, - 0x6a, 0x11, 0xfa, 0xad, 0xa3, 0x84, 0x74, 0x56, 0xcb, 0x3d, 0x64, 0x24, 0xea, 0x60, 0x01, 0x50, - 0xf2, 0xbe, 0xd7, 0x9f, 0xda, 0xc8, 0xce, 0x7b, 0xc4, 0xdb, 0xdc, 0x4a, 0x88, 0x9b, 0xed, 0xd8, - 0xc9, 0x45, 0x76, 0xfe, 0x37, 0x0b, 0x26, 0x33, 0xb5, 0xa3, 0xed, 0xbc, 0x2b, 0x23, 0x47, 0x75, - 0x69, 0x9c, 0x2a, 0xbc, 0x64, 0x32, 0x07, 0xa0, 0xdc, 0xf0, 0xd2, 0xf1, 0xc0, 0x8c, 0x30, 0xe5, - 0xa7, 0x8f, 0xb1, 0x46, 0x81, 0x5e, 0x82, 0xd1, 0x24, 0x6c, 0x87, 0x7e, 0xb8, 0xb9, 0x7b, 0x93, - 0xc8, 0x8c, 0x04, 0xea, 0x08, 0x63, 0x2d, 0x45, 0x61, 0x9d, 0xce, 0xfe, 0x41, 0x15, 0xb2, 0x8f, - 0x5e, 0xfd, 0x7f, 0x41, 0xfd, 0xd3, 0x23, 0xa8, 0xbf, 0x6b, 0xc1, 0x14, 0xad, 0x9d, 0xc5, 0xaf, - 0xc8, 0xf8, 0x4b, 0x95, 0x6e, 0xdc, 0x3a, 0x24, 0xdd, 0xf8, 0x45, 0xaa, 0xee, 0xdc, 0xb0, 0x23, - 0x53, 0xe0, 0x68, 0x5a, 0x8c, 0x42, 0xb1, 0xc0, 0x0a, 0x3a, 0x12, 0x45, 0xe2, 0x72, 0x8a, 0x4e, - 0x47, 0xa2, 0x08, 0x0b, 0xac, 0xcc, 0x46, 0x3e, 0x90, 0x9f, 0x8d, 0x9c, 0x67, 0x0c, 0x12, 0x71, - 0x13, 0xc2, 0x0e, 0xd0, 0x32, 0x06, 0xc9, 0x80, 0x8a, 0x94, 0xc6, 0xfe, 0x17, 0x55, 0x18, 0x6b, - 0x84, 0x6e, 0x1a, 0x5b, 0xfd, 0xa2, 0x11, 0x5b, 0x7d, 0x21, 0x13, 0x5b, 0x3d, 0xa5, 0xd3, 0x3e, - 0x9a, 0xd0, 0x6a, 0x91, 0x5b, 0x8a, 0xe5, 0xcb, 0x3f, 0x6a, 0x58, 0xb5, 0x91, 0x5b, 0x4a, 0x71, - 0xc2, 0x26, 0xe3, 0x3f, 0x53, 0xe1, 0xd4, 0x7f, 0x6c, 0xc1, 0x44, 0x23, 0x74, 0xa9, 0x88, 0xfe, - 0x59, 0x92, 0x47, 0x3d, 0x23, 0xd5, 0xd0, 0x21, 0x19, 0xa9, 0x7e, 0xd5, 0x82, 0xe1, 0x46, 0xe8, - 0x9e, 0x84, 0x2b, 0x71, 0xd9, 0x74, 0x25, 0x7e, 0xbc, 0x50, 0xf9, 0xf6, 0xf0, 0x1e, 0xfe, 0x46, - 0x15, 0xc6, 0x69, 0x93, 0xc3, 0x4d, 0xf9, 0xc1, 0x8c, 0xc1, 0xb1, 0x4a, 0x0c, 0x0e, 0x35, 0x07, - 0x43, 0xdf, 0x0f, 0x1f, 0x64, 0x3f, 0xde, 0x32, 0x83, 0x62, 0x81, 0x45, 0x97, 0x61, 0xa4, 0x1d, - 0x91, 0x1d, 0x2f, 0xec, 0xc4, 0xd9, 0xbb, 0x6e, 0x0d, 0x01, 0xc7, 0x8a, 0x02, 0xbd, 0x08, 0x63, - 0xb1, 0x17, 0xb4, 0x88, 0x0c, 0xac, 0x18, 0x60, 0x81, 0x15, 0x3c, 0xf1, 0x9f, 0x06, 0xc7, 0x06, - 0x15, 0x7a, 0x0b, 0x6a, 0xec, 0x3f, 0x9b, 0x43, 0x47, 0x48, 0x91, 0xce, 0xb3, 0x52, 0x49, 0x0e, - 0x38, 0x65, 0x86, 0xae, 0x01, 0x24, 0x32, 0x06, 0x24, 0x16, 0x67, 0xa6, 0xca, 0x38, 0x55, 0xd1, - 0x21, 0x31, 0xd6, 0xa8, 0xd0, 0x73, 0x50, 0x4b, 0x1c, 0xcf, 0xbf, 0xe5, 0x05, 0x24, 0x16, 0x51, - 0x34, 0x22, 0x81, 0xad, 0x00, 0xe2, 0x14, 0x4f, 0xd7, 0x7c, 0x76, 0xd3, 0x96, 0x3f, 0xc0, 0x30, - 0xc2, 0xa8, 0xd9, 0x9a, 0x7f, 0x4b, 0x41, 0xb1, 0x46, 0x61, 0xbf, 0xc0, 0xd6, 0xee, 0x3e, 0x63, - 0xef, 0x7f, 0x52, 0x01, 0xd4, 0x60, 0xb1, 0x26, 0xc6, 0x1b, 0x15, 0x5b, 0x30, 0x11, 0x93, 0x5b, - 0x5e, 0xd0, 0x79, 0x28, 0x58, 0x95, 0xbb, 0xed, 0xd0, 0x5c, 0xd2, 0xcb, 0xf0, 0xdb, 0xa5, 0x26, - 0x0c, 0x67, 0xf8, 0xd2, 0x21, 0x89, 0x3a, 0xc1, 0x7c, 0x7c, 0x37, 0x26, 0x91, 0x78, 0x65, 0x82, - 0x0d, 0x09, 0x96, 0x40, 0x9c, 0xe2, 0xa9, 0x0c, 0xb0, 0x3f, 0xb7, 0xc3, 0x00, 0x87, 0x61, 0x22, - 0xa5, 0x86, 0xa5, 0x1c, 0xd7, 0xe0, 0xd8, 0xa0, 0x42, 0xcb, 0x80, 0xe2, 0x4e, 0xbb, 0xed, 0xb3, - 0xa3, 0x2d, 0xc7, 0xbf, 0x1e, 0x85, 0x9d, 0x36, 0x0f, 0x37, 0x16, 0xd9, 0xba, 0x9b, 0x5d, 0x58, - 0x9c, 0x53, 0x82, 0x4e, 0xfa, 0x8d, 0x98, 0xfd, 0x16, 0xb7, 0x67, 0xb9, 0x83, 0xad, 0xc9, 0x40, - 0x58, 0xe2, 0xec, 0x0e, 0x5b, 0xaa, 0x58, 0xf6, 0xff, 0xa4, 0x13, 0x11, 0x44, 0x60, 0xbc, 0xcd, - 0x96, 0x23, 0x79, 0xbe, 0x5e, 0x6a, 0x28, 0x33, 0xd1, 0x2e, 0x3c, 0xcb, 0xb7, 0xce, 0x06, 0x9b, - 0x5c, 0xed, 0xff, 0x08, 0x4c, 0xd7, 0x88, 0x53, 0xc5, 0x61, 0x11, 0xcb, 0x2a, 0x6c, 0xb1, 0x4f, - 0x94, 0x79, 0xee, 0x26, 0xd5, 0xe3, 0x22, 0x32, 0x16, 0x4b, 0x2e, 0xe8, 0x4b, 0x3c, 0x40, 0x80, - 0xcd, 0xef, 0xf2, 0x6f, 0x50, 0x71, 0x7a, 0x23, 0x4a, 0x5b, 0xb0, 0xc0, 0x1a, 0x3b, 0x74, 0x0b, - 0xc6, 0x45, 0x8a, 0x78, 0xe1, 0x19, 0xa8, 0x1a, 0xbb, 0xe3, 0x71, 0xac, 0x23, 0x0f, 0xb2, 0x00, - 0x6c, 0x16, 0x46, 0x9b, 0xf0, 0xa4, 0xf6, 0x6e, 0x4c, 0x4e, 0x44, 0x16, 0x57, 0x1c, 0x1f, 0xdf, - 0xdf, 0x9b, 0x7d, 0x72, 0xed, 0x30, 0x42, 0x7c, 0x38, 0x1f, 0x74, 0x07, 0xce, 0x3a, 0xad, 0xc4, - 0xdb, 0x21, 0x75, 0xe2, 0xb8, 0xbe, 0x17, 0x10, 0xf3, 0x6a, 0xf5, 0xe3, 0xfb, 0x7b, 0xb3, 0x67, - 0xe7, 0xf3, 0x08, 0x70, 0x7e, 0x39, 0xf4, 0x1a, 0xd4, 0xdc, 0x20, 0x16, 0x63, 0x30, 0x64, 0x3c, - 0x91, 0x53, 0xab, 0xdf, 0x6e, 0xaa, 0xfe, 0xa7, 0x7f, 0x70, 0x5a, 0x00, 0xbd, 0xc7, 0x5f, 0xee, - 0x55, 0x1b, 0x12, 0xfe, 0x34, 0xd3, 0xcb, 0xa5, 0xb6, 0xc0, 0xc6, 0x2d, 0x10, 0xee, 0x34, 0x53, - 0x91, 0x8f, 0xc6, 0x05, 0x11, 0xa3, 0x0a, 0xf4, 0x79, 0x40, 0x31, 0x89, 0x76, 0xbc, 0x16, 0x99, - 0x6f, 0xb1, 0x94, 0x97, 0xec, 0x78, 0x6e, 0xc4, 0x08, 0xff, 0x47, 0xcd, 0x2e, 0x0a, 0x9c, 0x53, - 0x0a, 0xdd, 0xa0, 0x1a, 0x47, 0x87, 0x8a, 0x40, 0x55, 0x69, 0xda, 0x4d, 0xd7, 0x49, 0x3b, 0x22, - 0x2d, 0x27, 0x21, 0xae, 0xc9, 0x11, 0x67, 0xca, 0xd1, 0x65, 0x45, 0xa5, 0xf2, 0x06, 0x33, 0xbc, - 0xb2, 0x3b, 0x9d, 0x37, 0xdd, 0x29, 0x6d, 0x85, 0x71, 0x72, 0x9b, 0x24, 0x0f, 0xc2, 0xe8, 0x3e, - 0x73, 0xb6, 0x8f, 0x68, 0x29, 0xbe, 0x52, 0x14, 0xd6, 0xe9, 0xa8, 0x0d, 0xc4, 0x4e, 0x79, 0x56, - 0xea, 0xcc, 0x85, 0x3e, 0x92, 0xce, 0x9d, 0x1b, 0x1c, 0x8c, 0x25, 0x5e, 0x92, 0xae, 0x34, 0x16, - 0x99, 0x3b, 0x3c, 0x43, 0xba, 0xd2, 0x58, 0xc4, 0x12, 0x8f, 0xc2, 0xee, 0x87, 0x88, 0x26, 0xca, - 0x1c, 0x4d, 0x74, 0x6b, 0xf0, 0x92, 0x6f, 0x11, 0x3d, 0x84, 0x29, 0xf5, 0x18, 0x12, 0xcf, 0xbd, - 0x18, 0x4f, 0x4f, 0x96, 0x79, 0x37, 0x38, 0x37, 0x85, 0xa3, 0x8a, 0x4c, 0x5e, 0xc9, 0xf0, 0xc4, - 0x5d, 0xb5, 0x18, 0x29, 0x02, 0xa6, 0x0a, 0xd3, 0xb3, 0x5f, 0x81, 0x5a, 0xdc, 0x59, 0x77, 0xc3, - 0x6d, 0xc7, 0x0b, 0x98, 0xcf, 0x5a, 0x7f, 0x05, 0x57, 0x22, 0x70, 0x4a, 0x33, 0xf3, 0x59, 0x38, - 0xd5, 0x25, 0xd3, 0x7d, 0x85, 0xd4, 0xfd, 0xe2, 0x00, 0xd4, 0x94, 0x57, 0x07, 0x5d, 0x31, 0x1d, - 0x77, 0x8f, 0x67, 0x1d, 0x77, 0x23, 0x74, 0xe5, 0xd5, 0x7d, 0x75, 0xef, 0xe4, 0x3c, 0x83, 0xf9, - 0x6c, 0xe1, 0x47, 0x2c, 0x7f, 0xb3, 0xa5, 0x8f, 0x47, 0x42, 0x53, 0xb3, 0x7e, 0xe0, 0x50, 0xb3, - 0xbe, 0xe4, 0x2b, 0x47, 0xd4, 0x80, 0x6f, 0x87, 0xee, 0x4a, 0x23, 0xfb, 0x82, 0x47, 0x83, 0x02, - 0x31, 0xc7, 0x31, 0xbb, 0x8b, 0x2a, 0x65, 0x66, 0x77, 0x0d, 0x1f, 0xd5, 0xee, 0x92, 0x1c, 0x70, - 0xca, 0x0c, 0xed, 0xc0, 0xa9, 0x96, 0xf9, 0x22, 0x8b, 0xba, 0xb0, 0xf2, 0x7c, 0x1f, 0x2f, 0xa2, - 0x74, 0xb4, 0xec, 0xf3, 0x8b, 0x59, 0x7e, 0xb8, 0xbb, 0x0a, 0xfb, 0x07, 0xdc, 0x0b, 0x24, 0xb6, - 0x85, 0x24, 0xee, 0xf8, 0xc7, 0x99, 0x4c, 0xfa, 0x8e, 0xb1, 0x53, 0x7d, 0x04, 0xfe, 0xc7, 0xdf, - 0xb4, 0x98, 0xff, 0x71, 0x8d, 0x6c, 0xb7, 0x7d, 0x27, 0x39, 0xce, 0x68, 0xbd, 0x2f, 0xc1, 0x48, - 0x22, 0x6a, 0x29, 0x97, 0x01, 0x5b, 0x6b, 0x16, 0xf3, 0xc7, 0x2a, 0x45, 0x20, 0xa1, 0x58, 0x31, - 0xb4, 0xff, 0x35, 0xff, 0x0a, 0x12, 0x73, 0x12, 0x3b, 0xab, 0xdb, 0xe6, 0xce, 0xea, 0x99, 0xd2, - 0x9d, 0xe9, 0xb1, 0xc3, 0xfa, 0xae, 0xd9, 0x05, 0x66, 0xb0, 0x7d, 0xf4, 0x3d, 0xe2, 0xf6, 0x2a, - 0x98, 0xaf, 0xcc, 0xa0, 0xd7, 0x78, 0xa8, 0x2a, 0xd7, 0x88, 0xcf, 0xf6, 0x19, 0xa6, 0x6a, 0xff, - 0x7a, 0x05, 0xce, 0xe4, 0x3d, 0x3e, 0x8f, 0x5c, 0x18, 0x6b, 0x6b, 0xe6, 0x73, 0xb9, 0x44, 0x06, - 0xba, 0xc1, 0x9d, 0x9a, 0x2e, 0x3a, 0x14, 0x1b, 0x5c, 0x11, 0x81, 0x31, 0xb2, 0xe3, 0xb5, 0x94, - 0x7b, 0xa5, 0xd2, 0xbf, 0x8a, 0x52, 0xd5, 0x2c, 0x69, 0x8c, 0xb0, 0xc1, 0xf6, 0x18, 0x92, 0xb4, - 0xdb, 0xff, 0xd0, 0x82, 0xc7, 0x7a, 0x64, 0x3b, 0xa0, 0xd5, 0x3d, 0x60, 0x5e, 0x48, 0xf1, 0x8a, - 0x91, 0xaa, 0x8e, 0xfb, 0x26, 0xb1, 0xc0, 0xa2, 0x75, 0x00, 0xee, 0x5b, 0x64, 0x4f, 0xbb, 0x56, - 0xca, 0xc4, 0x00, 0x74, 0xdd, 0xac, 0xd6, 0x2e, 0xdd, 0xaa, 0xc7, 0x5c, 0x35, 0xae, 0xf6, 0x77, - 0xaa, 0x30, 0xc8, 0x5f, 0x97, 0x6c, 0xc0, 0xf0, 0x16, 0x4f, 0xae, 0xd8, 0x5f, 0x6e, 0xc7, 0xd4, - 0x4e, 0xe2, 0x00, 0x2c, 0xd9, 0xa0, 0x55, 0x38, 0xed, 0x05, 0x5e, 0xe2, 0x39, 0x7e, 0x9d, 0xf8, - 0xce, 0xae, 0x34, 0xbc, 0x79, 0x62, 0x6d, 0x99, 0x03, 0xf6, 0xf4, 0x4a, 0x37, 0x09, 0xce, 0x2b, - 0x87, 0x5e, 0xef, 0xca, 0x8e, 0xc4, 0x93, 0x56, 0xaa, 0xbb, 0x5a, 0x87, 0x67, 0x48, 0x42, 0xaf, - 0xc2, 0x78, 0xbb, 0x6b, 0x8b, 0xa1, 0x3d, 0x4b, 0x68, 0x6e, 0x2b, 0x4c, 0x5a, 0x54, 0x87, 0xa9, - 0xb8, 0xc3, 0x4e, 0x64, 0xd7, 0xb6, 0x22, 0x12, 0x6f, 0x85, 0xbe, 0x2b, 0x9e, 0xd3, 0x52, 0xe6, - 0x54, 0x33, 0x83, 0xc7, 0x5d, 0x25, 0x28, 0x97, 0x0d, 0xc7, 0xf3, 0x3b, 0x11, 0x49, 0xb9, 0x0c, - 0x99, 0x5c, 0x96, 0x33, 0x78, 0xdc, 0x55, 0xc2, 0xfe, 0x43, 0x0b, 0x4e, 0xe7, 0x84, 0x2d, 0xf0, - 0x68, 0xba, 0x4d, 0x2f, 0x4e, 0x54, 0xfa, 0x64, 0x2d, 0x9a, 0x8e, 0xc3, 0xb1, 0xa2, 0xa0, 0x52, - 0xc8, 0xf7, 0x8d, 0xd9, 0xe3, 0x40, 0x71, 0x30, 0x2b, 0xb0, 0xfd, 0xe5, 0x3a, 0x52, 0x8f, 0xe4, - 0x0f, 0xf4, 0x7c, 0x24, 0xff, 0x29, 0x18, 0xdc, 0x54, 0xbb, 0x73, 0xcd, 0x30, 0xe1, 0xfb, 0x73, - 0x8e, 0xb3, 0xbf, 0x55, 0x85, 0xc9, 0x4c, 0xf8, 0x12, 0x6d, 0x48, 0xe6, 0x2d, 0x7f, 0xe6, 0x52, - 0x58, 0x24, 0xed, 0xad, 0x9c, 0xf7, 0xfc, 0x2f, 0x9a, 0x4f, 0xfd, 0xa6, 0x6d, 0x5e, 0xa8, 0x1b, - 0x8f, 0x98, 0x95, 0x4d, 0xe9, 0xfe, 0x14, 0x0c, 0xb4, 0x43, 0xf5, 0x26, 0xa5, 0x12, 0x7a, 0xbc, - 0x50, 0x6f, 0x84, 0xa1, 0x8f, 0x19, 0x12, 0x3d, 0x2d, 0x7a, 0x9f, 0x71, 0x4e, 0x62, 0xc7, 0x0d, - 0x63, 0x6d, 0x08, 0x9e, 0x81, 0xe1, 0xfb, 0x64, 0x37, 0xf2, 0x82, 0xcd, 0xac, 0x6b, 0xf6, 0x26, - 0x07, 0x63, 0x89, 0x37, 0xd3, 0xb6, 0x0f, 0x1f, 0x73, 0xda, 0xf6, 0x91, 0xc2, 0x10, 0xcc, 0x5f, - 0xb1, 0x60, 0x92, 0xe5, 0x9c, 0x13, 0xd7, 0x60, 0xbd, 0x30, 0x38, 0xc6, 0x55, 0xf1, 0x29, 0x18, - 0x8c, 0x68, 0x65, 0xd9, 0x8c, 0xcb, 0xac, 0x05, 0x98, 0xe3, 0xd0, 0x13, 0xe2, 0xc5, 0x74, 0xfa, - 0xf9, 0xc6, 0x78, 0x06, 0xdb, 0xf4, 0xe9, 0x73, 0x16, 0xe0, 0x8f, 0x49, 0xdb, 0xf7, 0x78, 0x63, - 0x53, 0x4f, 0xcc, 0x47, 0x25, 0xc0, 0x3f, 0xb7, 0x71, 0x8f, 0x2a, 0xc0, 0x3f, 0x9f, 0xf9, 0xe1, - 0x26, 0xe8, 0xff, 0xa8, 0xc0, 0xf9, 0xdc, 0x72, 0xe9, 0xb1, 0xce, 0xb2, 0x71, 0xac, 0x73, 0x2d, - 0x73, 0xac, 0x63, 0x1f, 0x5e, 0xfa, 0xd1, 0x1c, 0xf4, 0xe4, 0x1f, 0xbf, 0x54, 0x4f, 0xf2, 0xf8, - 0x65, 0xa0, 0xac, 0xad, 0x30, 0x58, 0x60, 0x2b, 0xfc, 0x8e, 0x05, 0x8f, 0xe7, 0x8e, 0xd9, 0x47, - 0xef, 0x4a, 0x45, 0x6e, 0x33, 0x7b, 0x58, 0xd0, 0x7f, 0xbd, 0xda, 0xa3, 0x5b, 0xcc, 0x96, 0xbe, - 0x44, 0xf5, 0x0e, 0x43, 0xc6, 0xc2, 0x0c, 0x1a, 0xe3, 0x3a, 0x87, 0xc3, 0xb0, 0xc2, 0xa2, 0x58, - 0xbb, 0x92, 0xc0, 0x1b, 0xb9, 0x74, 0xc4, 0x29, 0x35, 0x67, 0x3a, 0xcf, 0xf4, 0xfb, 0xbc, 0x99, - 0x7b, 0x0a, 0xe8, 0x9e, 0xb6, 0x3d, 0xaa, 0x1e, 0x65, 0x7b, 0x34, 0x96, 0xbf, 0x35, 0x42, 0xf3, - 0x30, 0xb9, 0xed, 0x05, 0xec, 0xcd, 0x34, 0xd3, 0x0e, 0x51, 0xb7, 0xe0, 0x56, 0x4d, 0x34, 0xce, - 0xd2, 0xcf, 0xbc, 0x0a, 0xe3, 0x47, 0xf7, 0x98, 0x7c, 0x50, 0x85, 0x9f, 0x3a, 0x44, 0x2d, 0xf0, - 0xf5, 0xc0, 0xf8, 0x2e, 0xda, 0x7a, 0xd0, 0xf5, 0x6d, 0x1a, 0x70, 0x66, 0xa3, 0xe3, 0xfb, 0xbb, - 0x2c, 0x2c, 0x82, 0xb8, 0x92, 0x42, 0xd8, 0x78, 0xea, 0x35, 0xd3, 0xe5, 0x1c, 0x1a, 0x9c, 0x5b, - 0x12, 0x7d, 0x1e, 0x50, 0xb8, 0xce, 0x12, 0x31, 0xba, 0xe9, 0x8d, 0x65, 0xf6, 0x09, 0xaa, 0xe9, - 0x5c, 0xbd, 0xd3, 0x45, 0x81, 0x73, 0x4a, 0x51, 0x8b, 0x8f, 0x3d, 0x84, 0xaa, 0x9a, 0x95, 0xb1, - 0xf8, 0xb0, 0x8e, 0xc4, 0x26, 0x2d, 0xba, 0x0e, 0xa7, 0x9c, 0x1d, 0xc7, 0xe3, 0xd9, 0x66, 0x24, - 0x03, 0x6e, 0xf2, 0x29, 0x97, 0xc4, 0x7c, 0x96, 0x00, 0x77, 0x97, 0x41, 0x6d, 0xc3, 0xc9, 0xc4, - 0x13, 0x2f, 0xbf, 0x76, 0x04, 0x09, 0x2e, 0xed, 0x76, 0xb2, 0xff, 0xc0, 0xa2, 0x8b, 0x5e, 0xce, - 0x1b, 0x63, 0xc6, 0xd3, 0xdc, 0xda, 0x2d, 0x8d, 0xee, 0xa7, 0xb9, 0x99, 0xff, 0xd5, 0xa4, 0xe5, - 0xa2, 0x11, 0xa7, 0x61, 0x95, 0x86, 0x7d, 0x29, 0x6e, 0x27, 0x29, 0x0a, 0x74, 0x0f, 0x86, 0x5d, - 0x6f, 0xc7, 0x8b, 0xc3, 0xa8, 0xc4, 0x4b, 0xb8, 0x5d, 0xa1, 0x7a, 0xa9, 0xba, 0xac, 0x73, 0x26, - 0x58, 0x72, 0xb3, 0xff, 0x66, 0x05, 0xc6, 0x65, 0x7d, 0x6f, 0x74, 0x42, 0xa6, 0xc3, 0x8e, 0x6b, - 0x29, 0x7f, 0xc3, 0x58, 0xca, 0xaf, 0x94, 0xbb, 0xa2, 0xc5, 0x1a, 0xd5, 0x73, 0x09, 0xff, 0x42, - 0x66, 0x09, 0xbf, 0xda, 0x0f, 0xd3, 0x42, 0xef, 0xd1, 0x29, 0x83, 0xfe, 0x23, 0x94, 0xf8, 0x37, - 0xaf, 0x3b, 0x3d, 0x16, 0x8e, 0xef, 0x54, 0x32, 0xdd, 0x60, 0x0b, 0xc6, 0xd7, 0x60, 0x60, 0xcb, - 0x89, 0x5c, 0x71, 0x54, 0xf6, 0xe9, 0x3e, 0x3f, 0xc5, 0xdc, 0x0d, 0x27, 0x72, 0xb9, 0xda, 0xbf, - 0xac, 0x5e, 0x28, 0x71, 0x22, 0xb7, 0x30, 0xc8, 0x98, 0x55, 0x8a, 0x5e, 0x81, 0xa1, 0xb8, 0x15, - 0xb6, 0x55, 0x54, 0xd7, 0x05, 0xfe, 0x7a, 0x09, 0x85, 0x1c, 0xec, 0xcd, 0x22, 0xb3, 0x3a, 0x0a, - 0xc6, 0x82, 0x7e, 0x86, 0x40, 0x4d, 0x55, 0x7d, 0x8c, 0xe1, 0xac, 0x1f, 0x54, 0xe1, 0x74, 0x8e, - 0xa8, 0xa0, 0x9f, 0x31, 0x46, 0xed, 0xd5, 0xbe, 0x65, 0xed, 0x43, 0x8e, 0xdb, 0xcf, 0xb0, 0x0d, - 0x91, 0x2b, 0x64, 0xe3, 0x08, 0xd5, 0xdf, 0x8d, 0x49, 0xb6, 0x7a, 0x0a, 0x2a, 0xae, 0x9e, 0x56, - 0x7b, 0x42, 0x83, 0x4f, 0xab, 0x51, 0xed, 0x3c, 0xc6, 0x6f, 0xfc, 0xfe, 0x00, 0x9c, 0xc9, 0xbb, - 0x06, 0x8a, 0x7e, 0xde, 0xca, 0xa4, 0x0e, 0x7f, 0xbd, 0xff, 0xbb, 0xa4, 0x3c, 0x9f, 0xb8, 0x48, - 0x0f, 0x31, 0x67, 0x26, 0x13, 0x2f, 0x1c, 0x6d, 0x51, 0x3b, 0xbb, 0x18, 0x10, 0xf1, 0x2c, 0xf0, - 0x52, 0x1f, 0x7c, 0xee, 0x08, 0x4d, 0x11, 0x89, 0xe4, 0xe3, 0xcc, 0xc5, 0x00, 0x09, 0x2e, 0xbe, - 0x18, 0x20, 0xdb, 0x30, 0xb3, 0x09, 0xa3, 0x5a, 0xbf, 0x8e, 0x51, 0x04, 0x3c, 0xba, 0x26, 0x69, - 0xad, 0x3e, 0x46, 0x31, 0xf8, 0xdb, 0x16, 0x64, 0xc2, 0x35, 0x94, 0xd7, 0xc5, 0xea, 0xe9, 0x75, - 0xb9, 0x00, 0x03, 0x51, 0xe8, 0x93, 0x6c, 0x4e, 0x6b, 0x1c, 0xfa, 0x04, 0x33, 0x8c, 0x7a, 0x07, - 0xb1, 0xda, 0xeb, 0x1d, 0x44, 0xba, 0x1d, 0xf7, 0xc9, 0x0e, 0x91, 0x3e, 0x10, 0xa5, 0xbc, 0x6f, - 0x51, 0x20, 0xe6, 0x38, 0xfb, 0x47, 0x55, 0x18, 0xe2, 0x8e, 0x86, 0x63, 0x5c, 0x96, 0x1b, 0x62, - 0xcf, 0x5f, 0xea, 0x42, 0x26, 0x6f, 0xcd, 0x5c, 0xdd, 0x49, 0x1c, 0x2e, 0x50, 0xaa, 0x6f, 0xa9, - 0x9f, 0x00, 0xcd, 0x19, 0xbd, 0x9f, 0xc9, 0x6c, 0x69, 0x81, 0xf3, 0xd0, 0xc6, 0x62, 0x0b, 0x20, - 0x66, 0x4f, 0x62, 0x51, 0x1e, 0x22, 0x29, 0xde, 0x8b, 0xa5, 0xda, 0xd1, 0x54, 0xc5, 0x78, 0x6b, - 0xd2, 0x6c, 0x5c, 0x0a, 0x81, 0x35, 0xde, 0x33, 0x2f, 0x43, 0x4d, 0x11, 0x17, 0x59, 0xfa, 0x63, - 0xba, 0x48, 0xfe, 0x39, 0x98, 0xcc, 0xd4, 0xd5, 0xd7, 0x46, 0xe1, 0xfb, 0x16, 0x9c, 0xea, 0x7a, - 0xc2, 0x15, 0xbd, 0x6f, 0xc1, 0x19, 0x3f, 0xc7, 0xc3, 0x24, 0x3e, 0xf0, 0x51, 0x7c, 0x53, 0x6a, - 0x97, 0x90, 0x87, 0xc5, 0xb9, 0xb5, 0xc9, 0x34, 0x9f, 0x95, 0xfc, 0x34, 0x9f, 0xec, 0x1d, 0x20, - 0xde, 0xf6, 0x93, 0xb0, 0x80, 0x56, 0x4c, 0x0b, 0xe8, 0x13, 0x65, 0xc4, 0xa0, 0x87, 0xe9, 0xf3, - 0xef, 0x2c, 0x40, 0x9c, 0x20, 0xfb, 0x34, 0x1e, 0xf7, 0xd8, 0x69, 0x36, 0x7b, 0x2a, 0x37, 0x0a, - 0x83, 0x35, 0xaa, 0x3e, 0xd3, 0x9e, 0xab, 0x27, 0xa5, 0xca, 0xbd, 0x5b, 0x5f, 0x2d, 0xf1, 0x6e, - 0xfd, 0x6f, 0x56, 0x21, 0x1b, 0xda, 0x80, 0xbe, 0x0c, 0x63, 0x2d, 0xa7, 0xed, 0xac, 0x7b, 0xbe, - 0x97, 0x78, 0x24, 0x2e, 0x77, 0x6c, 0xb4, 0xa8, 0x95, 0x10, 0x3e, 0x5f, 0x0d, 0x82, 0x0d, 0x8e, - 0x68, 0x0e, 0xa0, 0x1d, 0x79, 0x3b, 0x9e, 0x4f, 0x36, 0x99, 0xdd, 0xa1, 0x92, 0xa4, 0x34, 0x14, - 0x14, 0x6b, 0x14, 0x39, 0x31, 0x74, 0xd5, 0x93, 0x88, 0xa1, 0x1b, 0xe8, 0x33, 0x86, 0x6e, 0xb0, - 0x54, 0x0c, 0x1d, 0x86, 0x73, 0xd2, 0x55, 0x4b, 0xff, 0x2f, 0x7b, 0x3e, 0xe1, 0x79, 0xfd, 0x44, - 0xe4, 0xe3, 0xcc, 0xfe, 0xde, 0xec, 0x39, 0x9c, 0x4b, 0x81, 0x7b, 0x94, 0xb4, 0x3b, 0x70, 0xba, - 0x49, 0x22, 0x8f, 0xa5, 0x5d, 0x72, 0xd3, 0x19, 0xf8, 0x0e, 0xd4, 0xa2, 0xcc, 0xe4, 0xef, 0xf3, - 0x4e, 0x9a, 0x96, 0xbc, 0x42, 0x4e, 0xf6, 0x94, 0xa5, 0xfd, 0x97, 0x2a, 0x30, 0x2c, 0x42, 0x88, - 0x8e, 0x71, 0x21, 0xb9, 0x69, 0xec, 0xef, 0x9e, 0x29, 0x9a, 0xb9, 0xac, 0x39, 0x3d, 0x77, 0x76, - 0xcd, 0xcc, 0xce, 0xee, 0xb9, 0x72, 0xec, 0x0e, 0xdf, 0xd3, 0xfd, 0xb0, 0x02, 0x13, 0x66, 0x28, - 0xd5, 0x31, 0x0e, 0xc7, 0x5b, 0x30, 0x1c, 0x8b, 0xf8, 0xa2, 0x4a, 0x99, 0x58, 0x8d, 0xec, 0x27, - 0x4d, 0xdf, 0xc0, 0x17, 0x11, 0x45, 0x92, 0x5d, 0x6e, 0x08, 0x53, 0xf5, 0x24, 0x42, 0x98, 0xec, - 0x1f, 0x31, 0x95, 0xaa, 0x0f, 0xe0, 0x49, 0xac, 0x09, 0x6f, 0x98, 0xda, 0xf7, 0x72, 0x29, 0x51, - 0x10, 0xed, 0xeb, 0xb1, 0x36, 0x7c, 0xcf, 0x82, 0x51, 0x41, 0x78, 0x12, 0x3d, 0xf8, 0xbc, 0xd9, - 0x83, 0xa7, 0x4b, 0xf5, 0xa0, 0x47, 0xd3, 0xff, 0x6e, 0x45, 0x35, 0xbd, 0x21, 0x9e, 0x0c, 0x2d, - 0x4c, 0xf4, 0x38, 0xd2, 0x8e, 0xc2, 0x24, 0x6c, 0x85, 0xbe, 0x58, 0xe5, 0x9f, 0x48, 0xa3, 0xce, - 0x39, 0xfc, 0x40, 0xfb, 0x8d, 0x15, 0x35, 0x8b, 0xa6, 0x0e, 0xa3, 0x44, 0x2c, 0x51, 0x79, 0x0f, - 0x96, 0xae, 0xcb, 0x07, 0xa1, 0x29, 0x4c, 0x5c, 0xd9, 0xe8, 0xf7, 0x21, 0xd4, 0x34, 0x86, 0x5c, - 0x71, 0xc2, 0x1a, 0x57, 0x19, 0xde, 0xc8, 0x6a, 0x18, 0x34, 0xdd, 0xa8, 0xb7, 0x05, 0x1c, 0x2b, - 0x0a, 0xfb, 0x65, 0xa6, 0x63, 0xd9, 0xf0, 0xf4, 0x17, 0x18, 0xfe, 0x0b, 0x43, 0x6a, 0x60, 0x99, - 0x93, 0xe4, 0x36, 0x0c, 0xd2, 0x2e, 0xca, 0x7d, 0x60, 0x39, 0x85, 0x46, 0x9b, 0xa0, 0x07, 0x88, - 0x45, 0x49, 0x8c, 0x39, 0x1b, 0x44, 0xba, 0x7c, 0xef, 0x2f, 0x97, 0xd6, 0x91, 0x7d, 0x78, 0xdb, - 0x59, 0xe2, 0x18, 0x96, 0x2c, 0x63, 0xa5, 0x91, 0x4d, 0xce, 0xb9, 0x28, 0x11, 0x38, 0xa5, 0x41, - 0x57, 0x84, 0xb9, 0x6e, 0xbe, 0x27, 0x2b, 0xcd, 0x75, 0x39, 0x24, 0x9a, 0xbd, 0x7e, 0x15, 0x46, - 0x55, 0x7a, 0xf2, 0x06, 0xcf, 0x32, 0x5d, 0xe3, 0xf6, 0xcb, 0x52, 0x0a, 0xc6, 0x3a, 0x0d, 0x5a, - 0x81, 0xd3, 0xae, 0x8a, 0x66, 0x6d, 0x74, 0xd6, 0x7d, 0xaf, 0x45, 0x8b, 0xf2, 0x9b, 0x24, 0x8f, - 0xed, 0xef, 0xcd, 0x9e, 0xae, 0x77, 0xa3, 0x71, 0x5e, 0x19, 0xb4, 0x06, 0x93, 0x31, 0x4f, 0xc3, - 0x2e, 0xef, 0x9c, 0x89, 0xec, 0x75, 0xcf, 0x4a, 0xa7, 0x7f, 0xd3, 0x44, 0x1f, 0x30, 0x10, 0x57, - 0x0a, 0x02, 0x84, 0xb3, 0x2c, 0xd0, 0xeb, 0x30, 0xe1, 0xeb, 0xcf, 0x29, 0x35, 0x44, 0x50, 0xaf, - 0x0a, 0x88, 0x30, 0x1e, 0x5b, 0x6a, 0xe0, 0x0c, 0x35, 0x7a, 0x0b, 0xa6, 0x75, 0x88, 0xb8, 0xd4, - 0xee, 0x04, 0x9b, 0x24, 0x16, 0xf9, 0x9f, 0x9f, 0xd8, 0xdf, 0x9b, 0x9d, 0xbe, 0xd5, 0x83, 0x06, - 0xf7, 0x2c, 0x8d, 0x5e, 0x81, 0x31, 0x39, 0x92, 0x5a, 0x80, 0x6f, 0x1a, 0x8a, 0xa3, 0xe1, 0xb0, - 0x41, 0xf9, 0xe1, 0xce, 0x36, 0xbe, 0x46, 0x0b, 0x6b, 0x8b, 0x2a, 0xfa, 0x0a, 0x8c, 0xe9, 0x6d, - 0x14, 0x6a, 0xf2, 0x53, 0xe5, 0x9f, 0xa8, 0x12, 0x8b, 0xb3, 0x6a, 0xb9, 0x8e, 0xc3, 0x06, 0x6f, - 0xbb, 0x05, 0x93, 0x99, 0x27, 0x72, 0xd5, 0x5b, 0xcb, 0xd6, 0xa3, 0x7a, 0x6b, 0xd9, 0xfe, 0xba, - 0x05, 0x83, 0x6b, 0x8e, 0x57, 0xfc, 0xd0, 0x41, 0x99, 0xc7, 0x8a, 0xd1, 0x4b, 0x30, 0x44, 0x36, - 0x36, 0x48, 0x4b, 0xbe, 0xdd, 0xfc, 0xa4, 0x7a, 0x79, 0x9f, 0x41, 0xe9, 0x4c, 0x62, 0x95, 0xf1, - 0xbf, 0x58, 0x10, 0xdb, 0xff, 0xc1, 0x02, 0x58, 0x0b, 0x7d, 0x79, 0xca, 0x52, 0xd0, 0x92, 0x85, - 0xae, 0x27, 0x17, 0x2e, 0xe6, 0x3c, 0xb9, 0x80, 0x52, 0x86, 0x39, 0x0f, 0x2e, 0xa8, 0xde, 0x54, - 0x4b, 0xf5, 0x66, 0xa0, 0x9f, 0xde, 0x7c, 0xd3, 0x02, 0x11, 0xf2, 0x52, 0x62, 0x59, 0x72, 0x65, - 0x9a, 0x74, 0x23, 0xbb, 0xc4, 0xb3, 0x65, 0xee, 0x6d, 0x88, 0x9c, 0x12, 0x4a, 0x94, 0x8c, 0x4c, - 0x12, 0x06, 0x57, 0xba, 0xf5, 0x1e, 0xe5, 0xe8, 0x55, 0x66, 0xf0, 0x15, 0xb7, 0xab, 0xaf, 0x44, - 0x5a, 0x2c, 0x8b, 0x38, 0x65, 0xac, 0xf2, 0x29, 0xe9, 0x59, 0xc4, 0x25, 0x02, 0xa7, 0x34, 0xe8, - 0x19, 0x18, 0x8e, 0x3b, 0xeb, 0x8c, 0x3c, 0x13, 0xff, 0xd2, 0xe4, 0x60, 0x2c, 0xf1, 0xf6, 0xcf, - 0x21, 0x30, 0xba, 0x66, 0xe4, 0x6e, 0xb2, 0x1e, 0x79, 0xee, 0xa6, 0xb7, 0x61, 0x84, 0x6c, 0xb7, - 0x93, 0xdd, 0xba, 0x17, 0x95, 0xcb, 0xa3, 0xb7, 0x24, 0xa8, 0xbb, 0xb9, 0x4b, 0x0c, 0x56, 0x1c, - 0x7b, 0x64, 0xe2, 0xaa, 0x7e, 0x24, 0x32, 0x71, 0x0d, 0xfc, 0x89, 0x64, 0xe2, 0x7a, 0x0b, 0x86, - 0x37, 0xf9, 0xdb, 0xfd, 0xe2, 0x9e, 0x5e, 0xc1, 0xf1, 0x55, 0xce, 0x43, 0xff, 0xfc, 0x42, 0x96, - 0x40, 0x60, 0xc9, 0x0e, 0xad, 0xc1, 0x10, 0xdf, 0x2c, 0x88, 0xe4, 0x56, 0x9f, 0x2a, 0xe3, 0x46, - 0xe9, 0xce, 0xf3, 0x24, 0x82, 0x9c, 0x04, 0x2f, 0x99, 0x79, 0x6b, 0xf8, 0xc3, 0x67, 0xde, 0x52, - 0xf9, 0xb2, 0x46, 0x1e, 0x55, 0xbe, 0x2c, 0x23, 0xef, 0x58, 0xed, 0x38, 0xf2, 0x8e, 0x7d, 0xd3, - 0x82, 0xb3, 0xed, 0xbc, 0xb4, 0x7d, 0x22, 0xf3, 0xd5, 0x67, 0x8f, 0x90, 0xc6, 0xd0, 0xa8, 0x9a, - 0x5d, 0x9f, 0xca, 0x25, 0xc3, 0xf9, 0x15, 0xcb, 0x04, 0x66, 0xa3, 0x1f, 0x3e, 0x81, 0xd9, 0x71, - 0xa7, 0xc8, 0x4a, 0xd3, 0x99, 0x8d, 0x1f, 0x4b, 0x3a, 0xb3, 0x89, 0x47, 0x98, 0xce, 0x4c, 0x4b, - 0x44, 0x36, 0xf9, 0x68, 0x13, 0x91, 0x6d, 0xc1, 0xa8, 0x1b, 0x3e, 0x08, 0x1e, 0x38, 0x91, 0x3b, - 0xdf, 0x58, 0x11, 0x79, 0xaf, 0x0a, 0x72, 0x2c, 0xd4, 0xd3, 0x02, 0x46, 0x0d, 0xdc, 0x5f, 0x98, - 0x22, 0xb1, 0xce, 0x5a, 0xa4, 0x64, 0x3b, 0xf5, 0x21, 0x53, 0xb2, 0x19, 0x89, 0xcd, 0xd0, 0x71, - 0x24, 0x36, 0xfb, 0x32, 0xbb, 0x69, 0xbd, 0xe1, 0x6d, 0xae, 0x3a, 0xed, 0xe9, 0xd3, 0x65, 0x6a, - 0x58, 0x94, 0xe4, 0xdd, 0x35, 0x28, 0x14, 0x4e, 0x99, 0x76, 0xa7, 0x4e, 0x3b, 0x73, 0xd2, 0xa9, - 0xd3, 0xce, 0x1e, 0x63, 0xea, 0xb4, 0x73, 0x27, 0x9a, 0x3a, 0xed, 0xb1, 0x3f, 0x91, 0xd4, 0x69, - 0x7f, 0x01, 0xce, 0x1f, 0xfe, 0x39, 0xd2, 0xe4, 0xbc, 0x8d, 0x74, 0x87, 0x9f, 0x49, 0xce, 0xcb, - 0x4c, 0x1d, 0x8d, 0xaa, 0x74, 0x06, 0xa7, 0x7f, 0x6e, 0xc1, 0x63, 0x3d, 0xf2, 0x9c, 0x94, 0xbe, - 0x7c, 0xd0, 0x86, 0xc9, 0xb6, 0x59, 0xb4, 0xf4, 0x35, 0x21, 0x23, 0xaf, 0x8a, 0x0a, 0x6b, 0xcb, - 0x20, 0x70, 0x96, 0xfd, 0xc2, 0x27, 0x7e, 0xfc, 0xc1, 0xf9, 0x8f, 0xfd, 0xe4, 0x83, 0xf3, 0x1f, - 0xfb, 0xbd, 0x0f, 0xce, 0x7f, 0xec, 0x67, 0xf7, 0xcf, 0x5b, 0x3f, 0xde, 0x3f, 0x6f, 0xfd, 0x64, - 0xff, 0xbc, 0xf5, 0x87, 0xfb, 0xe7, 0xad, 0x6f, 0xfe, 0xd1, 0xf9, 0x8f, 0x7d, 0xb1, 0xb2, 0x73, - 0xf5, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x70, 0xfd, 0x33, 0x79, 0x2a, 0xb5, 0x00, 0x00, + // 11000 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0xbd, 0x7d, 0x70, 0x24, 0xc7, + 0x75, 0x18, 0xae, 0xd9, 0xc5, 0xd7, 0x3e, 0x7c, 0x37, 0x70, 0x47, 0x10, 0x22, 0x0f, 0xc7, 0xa1, + 0x48, 0x1d, 0xc9, 0x23, 0x20, 0x1e, 0x49, 0xf1, 0x24, 0xea, 0x47, 0x09, 0xc0, 0x02, 0x77, 0xd0, + 0x7d, 0x2d, 0x7b, 0x71, 0x77, 0x14, 0xc5, 0x9f, 0xc8, 0xb9, 0x9d, 0x06, 0x30, 0xbc, 0xc1, 0xcc, + 0x72, 0x66, 0x16, 0x77, 0x90, 0xa2, 0x2a, 0x5b, 0x51, 0xc9, 0x49, 0x59, 0x49, 0xe4, 0x72, 0x54, + 0x95, 0x72, 0x52, 0xa5, 0x94, 0xab, 0xe2, 0x28, 0x9f, 0x8e, 0xa2, 0xb2, 0x24, 0x97, 0xe5, 0xa4, + 0xe2, 0x58, 0x8e, 0x5c, 0x95, 0x38, 0xaa, 0x72, 0x25, 0x76, 0xca, 0x15, 0xd8, 0x82, 0x2a, 0xfe, + 0x23, 0x7f, 0xe4, 0x8f, 0xf8, 0x3f, 0x24, 0x95, 0x4a, 0xf5, 0xe7, 0x74, 0xcf, 0xee, 0x62, 0x66, + 0xc1, 0x03, 0x7c, 0x52, 0xe5, 0xbf, 0xdd, 0x7e, 0xaf, 0x5f, 0x7f, 0x4c, 0xf7, 0xeb, 0xf7, 0x5e, + 0xbf, 0xf7, 0x1a, 0xce, 0xdf, 0xbd, 0x18, 0xcf, 0x7b, 0xe1, 0xc2, 0xdd, 0xd6, 0x1d, 0x12, 0x05, + 0x24, 0x21, 0xf1, 0x42, 0xf3, 0xee, 0xe6, 0x82, 0xd3, 0xf4, 0x16, 0x76, 0x5e, 0x58, 0xd8, 0x24, + 0x01, 0x89, 0x9c, 0x84, 0xb8, 0xf3, 0xcd, 0x28, 0x4c, 0x42, 0xf4, 0x18, 0xc7, 0x9e, 0x4f, 0xb1, + 0xe7, 0x9b, 0x77, 0x37, 0xe7, 0x9d, 0xa6, 0x37, 0xbf, 0xf3, 0xc2, 0xec, 0xf3, 0x9b, 0x5e, 0xb2, + 0xd5, 0xba, 0x33, 0xdf, 0x08, 0xb7, 0x17, 0x36, 0xc3, 0xcd, 0x70, 0x81, 0x55, 0xba, 0xd3, 0xda, + 0x60, 0xff, 0xd8, 0x1f, 0xf6, 0x8b, 0x13, 0x9b, 0x7d, 0x49, 0x34, 0xed, 0x34, 0xbd, 0x6d, 0xa7, + 0xb1, 0xe5, 0x05, 0x24, 0xda, 0x55, 0x8d, 0x47, 0x24, 0x0e, 0x5b, 0x51, 0x83, 0x64, 0xbb, 0x70, + 0x68, 0xad, 0x78, 0x61, 0x9b, 0x24, 0x4e, 0x87, 0x8e, 0xcf, 0x2e, 0x74, 0xab, 0x15, 0xb5, 0x82, + 0xc4, 0xdb, 0x6e, 0x6f, 0xe6, 0xa3, 0x79, 0x15, 0xe2, 0xc6, 0x16, 0xd9, 0x76, 0xda, 0xea, 0xbd, + 0xd8, 0xad, 0x5e, 0x2b, 0xf1, 0xfc, 0x05, 0x2f, 0x48, 0xe2, 0x24, 0x3a, 0x6c, 0x4c, 0x31, 0x89, + 0x76, 0x48, 0x94, 0x0e, 0x88, 0xdc, 0x77, 0xb6, 0x9b, 0x3e, 0xe9, 0x30, 0x26, 0xfb, 0x8f, 0x2c, + 0x38, 0xbb, 0x78, 0xbb, 0xbe, 0xe2, 0x3b, 0x71, 0xe2, 0x35, 0x96, 0xfc, 0xb0, 0x71, 0xb7, 0x9e, + 0x84, 0x11, 0xb9, 0x15, 0xfa, 0xad, 0x6d, 0x52, 0x67, 0xd3, 0x87, 0xce, 0xc3, 0xd0, 0x0e, 0xfb, + 0xbf, 0x56, 0x9d, 0xb1, 0xce, 0x5a, 0xe7, 0x2a, 0x4b, 0x13, 0x3f, 0xdc, 0x9b, 0xfb, 0xc0, 0xfe, + 0xde, 0xdc, 0xd0, 0x2d, 0x51, 0x8e, 0x15, 0x06, 0x7a, 0x1a, 0x06, 0x36, 0xe2, 0xf5, 0xdd, 0x26, + 0x99, 0x29, 0x31, 0xdc, 0x31, 0x81, 0x3b, 0xb0, 0x5a, 0xa7, 0xa5, 0x58, 0x40, 0xd1, 0x02, 0x54, + 0x9a, 0x4e, 0x94, 0x78, 0x89, 0x17, 0x06, 0x33, 0xe5, 0xb3, 0xd6, 0xb9, 0xfe, 0xa5, 0x49, 0x81, + 0x5a, 0xa9, 0x49, 0x00, 0x4e, 0x71, 0x68, 0x37, 0x22, 0xe2, 0xb8, 0x37, 0x02, 0x7f, 0x77, 0xa6, + 0xef, 0xac, 0x75, 0x6e, 0x28, 0xed, 0x06, 0x16, 0xe5, 0x58, 0x61, 0xd8, 0xdf, 0x2b, 0xc1, 0xd0, + 0xe2, 0xc6, 0x86, 0x17, 0x78, 0xc9, 0x2e, 0x7a, 0x07, 0x46, 0x82, 0xd0, 0x25, 0xf2, 0x3f, 0x1b, + 0xc5, 0xf0, 0x85, 0x67, 0xe7, 0x0f, 0x5b, 0x8a, 0xf3, 0xd7, 0xb5, 0x1a, 0x4b, 0x13, 0xfb, 0x7b, + 0x73, 0x23, 0x7a, 0x09, 0x36, 0x28, 0xa2, 0xb7, 0x60, 0xb8, 0x19, 0xba, 0xaa, 0x81, 0x12, 0x6b, + 0xe0, 0x99, 0xc3, 0x1b, 0xa8, 0xa5, 0x15, 0x96, 0xc6, 0xf7, 0xf7, 0xe6, 0x86, 0xb5, 0x02, 0xac, + 0x93, 0x43, 0x3e, 0x8c, 0xd3, 0xbf, 0x41, 0xe2, 0xa9, 0x16, 0xca, 0xac, 0x85, 0xe7, 0xf3, 0x5b, + 0xd0, 0x2a, 0x2d, 0x4d, 0xed, 0xef, 0xcd, 0x8d, 0x67, 0x0a, 0x71, 0x96, 0xb4, 0xfd, 0x79, 0x18, + 0x5b, 0x4c, 0x12, 0xa7, 0xb1, 0x45, 0x5c, 0xfe, 0x7d, 0xd1, 0x4b, 0xd0, 0x17, 0x38, 0xdb, 0x44, + 0x7c, 0xfd, 0xb3, 0x62, 0xda, 0xfb, 0xae, 0x3b, 0xdb, 0xe4, 0x60, 0x6f, 0x6e, 0xe2, 0x66, 0xe0, + 0xbd, 0xd7, 0x12, 0x6b, 0x86, 0x96, 0x61, 0x86, 0x8d, 0x2e, 0x00, 0xb8, 0x64, 0xc7, 0x6b, 0x90, + 0x9a, 0x93, 0x6c, 0x89, 0xd5, 0x80, 0x44, 0x5d, 0xa8, 0x2a, 0x08, 0xd6, 0xb0, 0xec, 0x2f, 0x59, + 0x50, 0x59, 0xdc, 0x09, 0x3d, 0xb7, 0x16, 0xba, 0x31, 0x6a, 0xc1, 0x78, 0x33, 0x22, 0x1b, 0x24, + 0x52, 0x45, 0x33, 0xd6, 0xd9, 0xf2, 0xb9, 0xe1, 0x0b, 0x17, 0x72, 0xc6, 0x6d, 0x56, 0x5a, 0x09, + 0x92, 0x68, 0x77, 0xe9, 0x11, 0xd1, 0xf4, 0x78, 0x06, 0x8a, 0xb3, 0x6d, 0xd8, 0xbf, 0x54, 0x82, + 0x53, 0x8b, 0x9f, 0x6f, 0x45, 0xa4, 0xea, 0xc5, 0x77, 0xb3, 0x5b, 0xc1, 0xf5, 0xe2, 0xbb, 0xd7, + 0xd3, 0xc9, 0x50, 0x6b, 0xb0, 0x2a, 0xca, 0xb1, 0xc2, 0x40, 0xcf, 0xc3, 0x20, 0xfd, 0x7d, 0x13, + 0xaf, 0x89, 0xd1, 0x4f, 0x09, 0xe4, 0xe1, 0xaa, 0x93, 0x38, 0x55, 0x0e, 0xc2, 0x12, 0x07, 0x5d, + 0x83, 0xe1, 0x06, 0xdb, 0xef, 0x9b, 0xd7, 0x42, 0x97, 0xb0, 0x2f, 0x5c, 0x59, 0x7a, 0x8e, 0xa2, + 0x2f, 0xa7, 0xc5, 0x07, 0x7b, 0x73, 0x33, 0xbc, 0x6f, 0x82, 0x84, 0x06, 0xc3, 0x7a, 0x7d, 0x64, + 0xab, 0x8d, 0xd8, 0xc7, 0x28, 0x41, 0x87, 0x4d, 0x78, 0x4e, 0xdb, 0x53, 0xfd, 0x6c, 0x4f, 0x8d, + 0x74, 0xd9, 0x4f, 0xff, 0xd8, 0x12, 0x73, 0xb2, 0xea, 0xf9, 0x26, 0x7b, 0xb8, 0x00, 0x10, 0x93, + 0x46, 0x44, 0x12, 0x6d, 0x56, 0xd4, 0x67, 0xae, 0x2b, 0x08, 0xd6, 0xb0, 0xe8, 0xe6, 0x8f, 0xb7, + 0x9c, 0x88, 0xad, 0x16, 0x31, 0x37, 0x6a, 0xf3, 0xd7, 0x25, 0x00, 0xa7, 0x38, 0xc6, 0xe6, 0x2f, + 0xe7, 0x6e, 0xfe, 0xdf, 0xb1, 0x60, 0x70, 0xc9, 0x0b, 0x5c, 0x2f, 0xd8, 0x44, 0xef, 0xc0, 0x10, + 0xe5, 0xe8, 0xae, 0x93, 0x38, 0x62, 0xdf, 0x7f, 0x44, 0x2e, 0x1e, 0x9d, 0xc1, 0xca, 0xe5, 0x13, + 0xcf, 0x53, 0x6c, 0xba, 0x88, 0x6e, 0xdc, 0x79, 0x97, 0x34, 0x92, 0x6b, 0x24, 0x71, 0xd2, 0xe1, + 0xa4, 0x65, 0x58, 0x51, 0x45, 0x37, 0x61, 0x20, 0x71, 0xa2, 0x4d, 0x92, 0x88, 0x6d, 0x9f, 0xb3, + 0x29, 0x39, 0x0d, 0x4c, 0x97, 0x1c, 0x09, 0x1a, 0x24, 0x65, 0x90, 0xeb, 0x8c, 0x08, 0x16, 0xc4, + 0xec, 0x06, 0x8c, 0x2c, 0x3b, 0x4d, 0xe7, 0x8e, 0xe7, 0x7b, 0x89, 0x47, 0x62, 0xf4, 0x61, 0x28, + 0x3b, 0xae, 0xcb, 0x36, 0x40, 0x65, 0xe9, 0xd4, 0xfe, 0xde, 0x5c, 0x79, 0xd1, 0x75, 0x0f, 0xf6, + 0xe6, 0x40, 0x61, 0xed, 0x62, 0x8a, 0x81, 0x9e, 0x85, 0x3e, 0x37, 0x0a, 0x9b, 0x33, 0x25, 0x86, + 0x79, 0x9a, 0xee, 0xd4, 0x6a, 0x14, 0x36, 0x33, 0xa8, 0x0c, 0xc7, 0xfe, 0x41, 0x09, 0xd0, 0x32, + 0x69, 0x6e, 0xad, 0xd6, 0x8d, 0x6f, 0x7a, 0x0e, 0x86, 0xb6, 0xc3, 0xc0, 0x4b, 0xc2, 0x28, 0x16, + 0x0d, 0xb2, 0x75, 0x71, 0x4d, 0x94, 0x61, 0x05, 0x45, 0x67, 0xa1, 0xaf, 0x99, 0x6e, 0xef, 0x11, + 0xc9, 0x1a, 0xd8, 0xc6, 0x66, 0x10, 0x8a, 0xd1, 0x8a, 0x49, 0x24, 0xd6, 0xb3, 0xc2, 0xb8, 0x19, + 0x93, 0x08, 0x33, 0x48, 0xba, 0x82, 0xe8, 0xda, 0x12, 0xab, 0x35, 0xb3, 0x82, 0x28, 0x04, 0x6b, + 0x58, 0xe8, 0x6d, 0xa8, 0xf0, 0x7f, 0x98, 0x6c, 0xb0, 0xa5, 0x9b, 0xcb, 0x14, 0xae, 0x86, 0x0d, + 0xc7, 0xcf, 0x4e, 0xfe, 0x28, 0x5b, 0x71, 0x92, 0x10, 0x4e, 0x69, 0x1a, 0x2b, 0x6e, 0x20, 0x77, + 0xc5, 0xfd, 0x1d, 0x0b, 0xd0, 0xb2, 0x17, 0xb8, 0x24, 0x3a, 0x81, 0xa3, 0xb3, 0xb7, 0xcd, 0xf0, + 0x27, 0xb4, 0x6b, 0xe1, 0x76, 0x33, 0x0c, 0x48, 0x90, 0x2c, 0x87, 0x81, 0xcb, 0x8f, 0xd3, 0x8f, + 0x43, 0x5f, 0x42, 0x9b, 0xe2, 0xdd, 0x7a, 0x5a, 0x7e, 0x16, 0xda, 0xc0, 0xc1, 0xde, 0xdc, 0xe9, + 0xf6, 0x1a, 0xac, 0x0b, 0xac, 0x0e, 0xfa, 0x18, 0x0c, 0xc4, 0x89, 0x93, 0xb4, 0x62, 0xd1, 0xd1, + 0x27, 0x64, 0x47, 0xeb, 0xac, 0xf4, 0x60, 0x6f, 0x6e, 0x5c, 0x55, 0xe3, 0x45, 0x58, 0x54, 0x40, + 0xcf, 0xc0, 0xe0, 0x36, 0x89, 0x63, 0x67, 0x53, 0x32, 0xb8, 0x71, 0x51, 0x77, 0xf0, 0x1a, 0x2f, + 0xc6, 0x12, 0x8e, 0x9e, 0x84, 0x7e, 0x12, 0x45, 0x61, 0x24, 0x56, 0xc4, 0xa8, 0x40, 0xec, 0x5f, + 0xa1, 0x85, 0x98, 0xc3, 0xec, 0xff, 0x62, 0xc1, 0xb8, 0xea, 0x2b, 0x6f, 0xeb, 0x04, 0xb6, 0xbc, + 0x0b, 0xd0, 0x90, 0x03, 0x8c, 0xd9, 0x46, 0xd3, 0xda, 0xe8, 0xbc, 0xfc, 0xda, 0x27, 0x34, 0x6d, + 0x43, 0x15, 0xc5, 0x58, 0xa3, 0x6b, 0xff, 0x3b, 0x0b, 0xa6, 0x32, 0x63, 0xbb, 0xea, 0xc5, 0x09, + 0x7a, 0xab, 0x6d, 0x7c, 0xf3, 0xc5, 0xc6, 0x47, 0x6b, 0xb3, 0xd1, 0xa9, 0xf5, 0x22, 0x4b, 0xb4, + 0xb1, 0x61, 0xe8, 0xf7, 0x12, 0xb2, 0x2d, 0x87, 0xf5, 0x7c, 0xc1, 0x61, 0xf1, 0xfe, 0xa5, 0x5f, + 0x69, 0x8d, 0xd2, 0xc0, 0x9c, 0x94, 0xfd, 0xbf, 0x2c, 0xa8, 0x2c, 0x87, 0xc1, 0x86, 0xb7, 0x79, + 0xcd, 0x69, 0x9e, 0xc0, 0xf7, 0xa9, 0x43, 0x1f, 0xa3, 0xce, 0x87, 0xf0, 0x42, 0xde, 0x10, 0x44, + 0xc7, 0xe6, 0xe9, 0x99, 0xca, 0x85, 0x05, 0xc5, 0xa6, 0x68, 0x11, 0x66, 0xc4, 0x66, 0x5f, 0x81, + 0x8a, 0x42, 0x40, 0x13, 0x50, 0xbe, 0x4b, 0xb8, 0x24, 0x59, 0xc1, 0xf4, 0x27, 0x9a, 0x86, 0xfe, + 0x1d, 0xc7, 0x6f, 0x89, 0xcd, 0x8b, 0xf9, 0x9f, 0x8f, 0x97, 0x2e, 0x5a, 0xf6, 0x0f, 0xd8, 0x0e, + 0x14, 0x8d, 0xac, 0x04, 0x3b, 0x82, 0x39, 0x7c, 0xd9, 0x82, 0x69, 0xbf, 0x03, 0x53, 0x12, 0x73, + 0x72, 0x14, 0x76, 0xf6, 0x98, 0xe8, 0xf6, 0x74, 0x27, 0x28, 0xee, 0xd8, 0x1a, 0xe5, 0xf5, 0x61, + 0x93, 0x2e, 0x38, 0xc7, 0x67, 0x5d, 0x17, 0x32, 0xc0, 0x0d, 0x51, 0x86, 0x15, 0xd4, 0xfe, 0x73, + 0x0b, 0xa6, 0xd5, 0x38, 0xae, 0x90, 0xdd, 0x3a, 0xf1, 0x49, 0x23, 0x09, 0xa3, 0x87, 0x65, 0x24, + 0x8f, 0xf3, 0x6f, 0xc2, 0x79, 0xd2, 0xb0, 0x20, 0x50, 0xbe, 0x42, 0x76, 0xf9, 0x07, 0xd2, 0x07, + 0x5a, 0x3e, 0x74, 0xa0, 0xbf, 0x65, 0xc1, 0xa8, 0x1a, 0xe8, 0x09, 0x6c, 0xb9, 0xab, 0xe6, 0x96, + 0xfb, 0x70, 0xc1, 0xf5, 0xda, 0x65, 0xb3, 0xfd, 0xed, 0x12, 0x65, 0x1b, 0x02, 0xa7, 0x16, 0x85, + 0x74, 0x92, 0x28, 0xc7, 0x7f, 0x48, 0xbe, 0x52, 0x6f, 0x83, 0xbd, 0x42, 0x76, 0xd7, 0x43, 0x2a, + 0x4d, 0x74, 0x1e, 0xac, 0xf1, 0x51, 0xfb, 0x0e, 0xfd, 0xa8, 0xbf, 0x5f, 0x82, 0x53, 0x6a, 0x5a, + 0x8c, 0x53, 0xfa, 0x67, 0x72, 0x62, 0x5e, 0x80, 0x61, 0x97, 0x6c, 0x38, 0x2d, 0x3f, 0x51, 0xda, + 0x44, 0x3f, 0x57, 0x33, 0xab, 0x69, 0x31, 0xd6, 0x71, 0x7a, 0x98, 0xcb, 0x6f, 0x0c, 0x33, 0x7e, + 0x9e, 0x38, 0x74, 0xd5, 0x53, 0x09, 0x4f, 0x53, 0x0f, 0x47, 0x74, 0xf5, 0x50, 0xa8, 0x82, 0x4f, + 0x42, 0xbf, 0xb7, 0x4d, 0xcf, 0xfc, 0x92, 0x79, 0x94, 0xaf, 0xd1, 0x42, 0xcc, 0x61, 0xe8, 0x29, + 0x18, 0x6c, 0x84, 0xdb, 0xdb, 0x4e, 0xe0, 0xce, 0x94, 0x99, 0xcc, 0x39, 0x4c, 0xc5, 0x82, 0x65, + 0x5e, 0x84, 0x25, 0x0c, 0x3d, 0x06, 0x7d, 0x4e, 0xb4, 0x19, 0xcf, 0xf4, 0x31, 0x9c, 0x21, 0xda, + 0xd2, 0x62, 0xb4, 0x19, 0x63, 0x56, 0x4a, 0x65, 0xc9, 0x7b, 0x61, 0x74, 0xd7, 0x0b, 0x36, 0xab, + 0x5e, 0xc4, 0x04, 0x43, 0x4d, 0x96, 0xbc, 0xad, 0x20, 0x58, 0xc3, 0x42, 0x35, 0xe8, 0x6f, 0x86, + 0x51, 0x12, 0xcf, 0x0c, 0xb0, 0x89, 0x7f, 0x2e, 0x77, 0xfb, 0xf1, 0x71, 0xd7, 0xc2, 0x28, 0x49, + 0x87, 0x42, 0xff, 0xc5, 0x98, 0x13, 0x42, 0xcb, 0x50, 0x26, 0xc1, 0xce, 0xcc, 0x20, 0xa3, 0xf7, + 0xa1, 0xc3, 0xe9, 0xad, 0x04, 0x3b, 0xb7, 0x9c, 0x28, 0xe5, 0x57, 0x2b, 0xc1, 0x0e, 0xa6, 0xb5, + 0x51, 0x03, 0x2a, 0xd2, 0x84, 0x15, 0xcf, 0x0c, 0x15, 0x59, 0x8a, 0x58, 0xa0, 0x63, 0xf2, 0x5e, + 0xcb, 0x8b, 0xc8, 0x36, 0x09, 0x92, 0x38, 0x55, 0xac, 0x24, 0x34, 0xc6, 0x29, 0x5d, 0xd4, 0x80, + 0x11, 0x2e, 0x7f, 0x5e, 0x0b, 0x5b, 0x41, 0x12, 0xcf, 0x54, 0x58, 0x97, 0x73, 0x2c, 0x17, 0xb7, + 0xd2, 0x1a, 0x4b, 0xd3, 0x82, 0xfc, 0x88, 0x56, 0x18, 0x63, 0x83, 0x28, 0x7a, 0x0b, 0x46, 0x7d, + 0x6f, 0x87, 0x04, 0x24, 0x8e, 0x6b, 0x51, 0x78, 0x87, 0xcc, 0x00, 0x1b, 0xcd, 0x93, 0x79, 0x5a, + 0x7c, 0x78, 0x87, 0x2c, 0x4d, 0xee, 0xef, 0xcd, 0x8d, 0x5e, 0xd5, 0x6b, 0x63, 0x93, 0x18, 0x7a, + 0x1b, 0xc6, 0xa8, 0xb0, 0xeb, 0xa5, 0xe4, 0x87, 0x8b, 0x93, 0x47, 0xfb, 0x7b, 0x73, 0x63, 0xd8, + 0xa8, 0x8e, 0x33, 0xe4, 0xd0, 0x3a, 0x54, 0x7c, 0x6f, 0x83, 0x34, 0x76, 0x1b, 0x3e, 0x99, 0x19, + 0x61, 0xb4, 0x73, 0x36, 0xe7, 0x55, 0x89, 0xce, 0x15, 0x0c, 0xf5, 0x17, 0xa7, 0x84, 0xd0, 0x2d, + 0x38, 0x9d, 0x90, 0x68, 0xdb, 0x0b, 0x1c, 0xba, 0xa9, 0x84, 0xf4, 0xcb, 0x4c, 0x25, 0xa3, 0x6c, + 0xd5, 0x9e, 0x11, 0x13, 0x7b, 0x7a, 0xbd, 0x23, 0x16, 0xee, 0x52, 0x1b, 0xdd, 0x80, 0x71, 0xb6, + 0x9f, 0x6a, 0x2d, 0xdf, 0xaf, 0x85, 0xbe, 0xd7, 0xd8, 0x9d, 0x19, 0x63, 0x04, 0x9f, 0x92, 0x06, + 0x90, 0x35, 0x13, 0x4c, 0x15, 0xc3, 0xf4, 0x1f, 0xce, 0xd6, 0x46, 0x3e, 0x8c, 0xc7, 0xa4, 0xd1, + 0x8a, 0xbc, 0x64, 0x97, 0xae, 0x7d, 0x72, 0x3f, 0x99, 0x19, 0x2f, 0xa2, 0xe8, 0xd6, 0xcd, 0x4a, + 0xdc, 0xfa, 0x94, 0x29, 0xc4, 0x59, 0xd2, 0x94, 0x55, 0xc4, 0x89, 0xeb, 0x05, 0x33, 0x13, 0x8c, + 0x03, 0xa9, 0xfd, 0x55, 0xa7, 0x85, 0x98, 0xc3, 0x98, 0xfd, 0x80, 0xfe, 0xb8, 0x41, 0xb9, 0xf4, + 0x24, 0x43, 0x4c, 0xed, 0x07, 0x12, 0x80, 0x53, 0x1c, 0x2a, 0x1a, 0x24, 0xc9, 0xee, 0x0c, 0x62, + 0xa8, 0x6a, 0xab, 0xad, 0xaf, 0x7f, 0x06, 0xd3, 0x72, 0x74, 0x0b, 0x06, 0x49, 0xb0, 0xb3, 0x1a, + 0x85, 0xdb, 0x33, 0x53, 0x45, 0x78, 0xc0, 0x0a, 0x47, 0xe6, 0xe7, 0x47, 0xaa, 0xc2, 0x88, 0x62, + 0x2c, 0x89, 0xa1, 0xfb, 0x30, 0xd3, 0xe1, 0x2b, 0xf1, 0x8f, 0x32, 0xcd, 0x3e, 0xca, 0x27, 0x44, + 0xdd, 0x99, 0xf5, 0x2e, 0x78, 0x07, 0x87, 0xc0, 0x70, 0x57, 0xea, 0xf6, 0x1d, 0x18, 0x53, 0x8c, + 0x8a, 0x7d, 0x6f, 0x34, 0x07, 0xfd, 0x94, 0x17, 0x4b, 0x85, 0xbe, 0x42, 0x27, 0x95, 0xb2, 0xe8, + 0x18, 0xf3, 0x72, 0x36, 0xa9, 0xde, 0xe7, 0xc9, 0xd2, 0x6e, 0x42, 0xb8, 0x62, 0x57, 0xd6, 0x26, + 0x55, 0x02, 0x70, 0x8a, 0x63, 0xff, 0x1f, 0x2e, 0x26, 0xa5, 0xdc, 0xb0, 0xc0, 0x49, 0x70, 0x1e, + 0x86, 0xb6, 0xc2, 0x38, 0xa1, 0xd8, 0xac, 0x8d, 0xfe, 0x54, 0x30, 0xba, 0x2c, 0xca, 0xb1, 0xc2, + 0x40, 0xaf, 0xc2, 0x68, 0x43, 0x6f, 0x40, 0x1c, 0x63, 0xa7, 0x44, 0x15, 0xb3, 0x75, 0x6c, 0xe2, + 0xa2, 0x8b, 0x30, 0xc4, 0xac, 0xdc, 0x8d, 0xd0, 0x17, 0x2a, 0xa4, 0x3c, 0x95, 0x87, 0x6a, 0xa2, + 0xfc, 0x40, 0xfb, 0x8d, 0x15, 0x36, 0x55, 0xc4, 0x69, 0x17, 0xd6, 0x6a, 0xe2, 0x00, 0x51, 0x8a, + 0xf8, 0x65, 0x56, 0x8a, 0x05, 0xd4, 0xfe, 0x17, 0x25, 0x6d, 0x96, 0xa9, 0x02, 0x44, 0xd0, 0x9b, + 0x30, 0x78, 0xcf, 0xf1, 0x12, 0x2f, 0xd8, 0x14, 0xd2, 0xc3, 0x8b, 0x05, 0x4f, 0x13, 0x56, 0xfd, + 0x36, 0xaf, 0xca, 0x4f, 0x3e, 0xf1, 0x07, 0x4b, 0x82, 0x94, 0x76, 0xd4, 0x0a, 0x02, 0x4a, 0xbb, + 0xd4, 0x3b, 0x6d, 0xcc, 0xab, 0x72, 0xda, 0xe2, 0x0f, 0x96, 0x04, 0xd1, 0x06, 0x80, 0x5c, 0x4b, + 0xc4, 0x15, 0xd6, 0xe5, 0x8f, 0xf6, 0x42, 0x7e, 0x5d, 0xd5, 0x5e, 0x1a, 0xa3, 0x67, 0x6d, 0xfa, + 0x1f, 0x6b, 0x94, 0xed, 0x84, 0x09, 0x61, 0xed, 0xdd, 0x42, 0x9f, 0xa5, 0x5b, 0xda, 0x89, 0x12, + 0xe2, 0x2e, 0x26, 0x59, 0x03, 0xfd, 0xe1, 0x22, 0xf6, 0xba, 0xb7, 0x4d, 0xf4, 0xed, 0x2f, 0x88, + 0xe0, 0x94, 0x9e, 0xfd, 0xdd, 0x32, 0xcc, 0x74, 0xeb, 0x2e, 0x5d, 0x92, 0xe4, 0xbe, 0x97, 0x2c, + 0x53, 0x31, 0xc9, 0x32, 0x97, 0xe4, 0x8a, 0x28, 0xc7, 0x0a, 0x83, 0xae, 0x8d, 0xd8, 0xdb, 0x94, + 0xca, 0x52, 0x7f, 0xba, 0x36, 0xea, 0xac, 0x14, 0x0b, 0x28, 0xc5, 0x8b, 0x88, 0x13, 0x8b, 0xcb, + 0x0d, 0x6d, 0x0d, 0x61, 0x56, 0x8a, 0x05, 0x54, 0x37, 0x88, 0xf4, 0xe5, 0x18, 0x44, 0x8c, 0x29, + 0xea, 0x7f, 0xb0, 0x53, 0x84, 0x3e, 0x07, 0xb0, 0xe1, 0x05, 0x5e, 0xbc, 0xc5, 0xa8, 0x0f, 0xf4, + 0x4c, 0x5d, 0x09, 0x59, 0xab, 0x8a, 0x0a, 0xd6, 0x28, 0xa2, 0x97, 0x61, 0x58, 0x6d, 0xcf, 0xb5, + 0xea, 0xcc, 0xa0, 0x69, 0x10, 0x4f, 0x79, 0x55, 0x15, 0xeb, 0x78, 0xf6, 0xbb, 0xd9, 0xf5, 0x22, + 0x76, 0x85, 0x36, 0xbf, 0x56, 0xd1, 0xf9, 0x2d, 0x1d, 0x3e, 0xbf, 0xf6, 0x7f, 0x2e, 0xc3, 0xb8, + 0xd1, 0x58, 0x2b, 0x2e, 0xc0, 0xd1, 0x5e, 0xa7, 0x07, 0x96, 0x93, 0x10, 0xb1, 0x27, 0xcf, 0xf7, + 0xb2, 0x69, 0xf4, 0xe3, 0x8d, 0xee, 0x05, 0x4e, 0x09, 0x6d, 0x41, 0xc5, 0x77, 0x62, 0x66, 0x52, + 0x21, 0x62, 0x2f, 0xf6, 0x46, 0x36, 0x55, 0x3f, 0x9c, 0x38, 0xd1, 0x4e, 0x0f, 0xde, 0x4a, 0x4a, + 0x9c, 0x9e, 0xb6, 0x54, 0xd8, 0x91, 0x37, 0x6a, 0xaa, 0x3b, 0x54, 0x22, 0xda, 0xc5, 0x1c, 0x86, + 0x2e, 0xc2, 0x48, 0x44, 0xd8, 0x4a, 0x59, 0xa6, 0xf2, 0x1c, 0x5b, 0x7a, 0xfd, 0xa9, 0xe0, 0x87, + 0x35, 0x18, 0x36, 0x30, 0x53, 0xb9, 0x7f, 0xe0, 0x10, 0xb9, 0xff, 0x19, 0x18, 0x64, 0x3f, 0xd4, + 0xaa, 0x50, 0x5f, 0x68, 0x8d, 0x17, 0x63, 0x09, 0xcf, 0x2e, 0xa2, 0xa1, 0x82, 0x8b, 0xe8, 0x59, + 0x18, 0xab, 0x3a, 0x64, 0x3b, 0x0c, 0x56, 0x02, 0xb7, 0x19, 0x7a, 0x41, 0x82, 0x66, 0xa0, 0x8f, + 0x9d, 0x27, 0x7c, 0xbf, 0xf7, 0x51, 0x0a, 0xb8, 0x8f, 0xca, 0xee, 0xf6, 0x9f, 0x94, 0x60, 0xb4, + 0x4a, 0x7c, 0x92, 0x10, 0xae, 0xf7, 0xc4, 0x68, 0x15, 0xd0, 0x66, 0xe4, 0x34, 0x48, 0x8d, 0x44, + 0x5e, 0xe8, 0xd6, 0x49, 0x23, 0x0c, 0xd8, 0x45, 0x14, 0x3d, 0x20, 0x4f, 0xef, 0xef, 0xcd, 0xa1, + 0x4b, 0x6d, 0x50, 0xdc, 0xa1, 0x06, 0x72, 0x61, 0xb4, 0x19, 0x11, 0xc3, 0x6e, 0x68, 0xe5, 0x8b, + 0x1a, 0x35, 0xbd, 0x0a, 0x97, 0x86, 0x8d, 0x22, 0x6c, 0x12, 0x45, 0x9f, 0x82, 0x89, 0x30, 0x6a, + 0x6e, 0x39, 0x41, 0x95, 0x34, 0x49, 0xe0, 0x52, 0x15, 0x40, 0x58, 0x3b, 0xa6, 0xf7, 0xf7, 0xe6, + 0x26, 0x6e, 0x64, 0x60, 0xb8, 0x0d, 0x1b, 0xbd, 0x09, 0x93, 0xcd, 0x28, 0x6c, 0x3a, 0x9b, 0x6c, + 0xc9, 0x08, 0x69, 0x85, 0xf3, 0xa6, 0xf3, 0xfb, 0x7b, 0x73, 0x93, 0xb5, 0x2c, 0xf0, 0x60, 0x6f, + 0x6e, 0x8a, 0x4d, 0x19, 0x2d, 0x49, 0x81, 0xb8, 0x9d, 0x8c, 0xfd, 0x1e, 0x9c, 0xaa, 0x86, 0xf7, + 0x82, 0x7b, 0x4e, 0xe4, 0x2e, 0xd6, 0xd6, 0x34, 0xe3, 0xc4, 0x1b, 0x52, 0xf9, 0xe5, 0x17, 0x7c, + 0x39, 0x27, 0x9b, 0x46, 0x83, 0xab, 0x1d, 0xab, 0x9e, 0x4f, 0xba, 0x98, 0x43, 0xfe, 0x49, 0xc9, + 0x68, 0x33, 0xc5, 0x57, 0x77, 0x17, 0x56, 0xd7, 0xbb, 0x8b, 0xcf, 0xc2, 0xd0, 0x86, 0x47, 0x7c, + 0x17, 0x93, 0x0d, 0xf1, 0xb5, 0x5e, 0x28, 0x72, 0xb9, 0xb3, 0x4a, 0xeb, 0x48, 0xeb, 0x18, 0x57, + 0xa2, 0x57, 0x05, 0x19, 0xac, 0x08, 0xa2, 0x16, 0x4c, 0x48, 0x3d, 0x4c, 0x42, 0xc5, 0x66, 0x7f, + 0xb1, 0x98, 0x9a, 0x67, 0x36, 0xc3, 0x3e, 0x2f, 0xce, 0x10, 0xc4, 0x6d, 0x4d, 0x50, 0xfd, 0x79, + 0x9b, 0x1e, 0x75, 0x7d, 0x6c, 0xe9, 0x33, 0xfd, 0x99, 0x99, 0x02, 0x58, 0xa9, 0xfd, 0x6b, 0x16, + 0x3c, 0xd2, 0x36, 0x5b, 0xc2, 0x4e, 0x72, 0x6c, 0xdf, 0x28, 0x6b, 0xac, 0x28, 0xe5, 0x1b, 0x2b, + 0xec, 0x1b, 0x30, 0xbd, 0xb2, 0xdd, 0x4c, 0x76, 0xab, 0x9e, 0x79, 0xe5, 0xf2, 0x0a, 0x0c, 0x6c, + 0x13, 0xd7, 0x6b, 0x6d, 0x8b, 0xcf, 0x3a, 0x27, 0xcf, 0x85, 0x6b, 0xac, 0xf4, 0x60, 0x6f, 0x6e, + 0xb4, 0x9e, 0x84, 0x91, 0xb3, 0x49, 0x78, 0x01, 0x16, 0xe8, 0xf6, 0x8f, 0x2d, 0x18, 0x97, 0xfc, + 0x61, 0xd1, 0x75, 0x23, 0x12, 0xc7, 0x68, 0x16, 0x4a, 0x5e, 0x53, 0x10, 0x02, 0x41, 0xa8, 0xb4, + 0x56, 0xc3, 0x25, 0xaf, 0x89, 0xde, 0x84, 0x0a, 0xbf, 0xa9, 0x4b, 0x17, 0x47, 0x8f, 0x37, 0x7f, + 0x4c, 0x37, 0x5c, 0x97, 0x34, 0x70, 0x4a, 0x4e, 0x4a, 0xc9, 0xec, 0xe4, 0x29, 0x9b, 0xf7, 0x46, + 0x97, 0x45, 0x39, 0x56, 0x18, 0xe8, 0x1c, 0x0c, 0x05, 0xa1, 0xcb, 0x2f, 0x53, 0xf9, 0x3e, 0x65, + 0x4b, 0xee, 0xba, 0x28, 0xc3, 0x0a, 0x6a, 0x7f, 0xd5, 0x82, 0x11, 0x39, 0xc6, 0x82, 0x02, 0x3b, + 0xdd, 0x24, 0xa9, 0xb0, 0x9e, 0x6e, 0x12, 0x2a, 0x70, 0x33, 0x88, 0x21, 0x67, 0x97, 0x7b, 0x91, + 0xb3, 0xed, 0xdf, 0x2c, 0xc1, 0x98, 0xec, 0x4e, 0xbd, 0x75, 0x27, 0x26, 0x54, 0x0c, 0xa9, 0x38, + 0x7c, 0xf2, 0x89, 0x5c, 0x67, 0xcf, 0xe7, 0xe9, 0x62, 0xc6, 0x37, 0x4b, 0xc5, 0x9c, 0x45, 0x49, + 0x07, 0xa7, 0x24, 0xd1, 0x0e, 0x4c, 0x06, 0x61, 0xc2, 0x8e, 0x37, 0x05, 0x2f, 0x76, 0xd3, 0x91, + 0x6d, 0xe7, 0x51, 0xd1, 0xce, 0xe4, 0xf5, 0x2c, 0x3d, 0xdc, 0xde, 0x04, 0xba, 0x21, 0x6d, 0x4c, + 0x65, 0xd6, 0xd6, 0xb3, 0xc5, 0xda, 0xea, 0x6e, 0x62, 0xb2, 0x7f, 0xcf, 0x82, 0x8a, 0x44, 0x3b, + 0x89, 0x2b, 0xaf, 0xdb, 0x30, 0x18, 0xb3, 0x4f, 0x24, 0xa7, 0xeb, 0x7c, 0xb1, 0x21, 0xf0, 0xef, + 0x9a, 0x9e, 0xe9, 0xfc, 0x7f, 0x8c, 0x25, 0x35, 0x66, 0x6c, 0x57, 0x03, 0x79, 0xe8, 0x8c, 0xed, + 0xaa, 0x67, 0xdd, 0x6f, 0xb6, 0x46, 0x0d, 0x6b, 0x00, 0x15, 0x4c, 0x9b, 0x11, 0xd9, 0xf0, 0xee, + 0x67, 0x05, 0xd3, 0x1a, 0x2b, 0xc5, 0x02, 0x8a, 0x36, 0x60, 0xa4, 0x21, 0xcd, 0xd1, 0x29, 0x0b, + 0xf9, 0x48, 0x41, 0xdb, 0xbf, 0xba, 0x46, 0xe2, 0xae, 0x49, 0xcb, 0x1a, 0x25, 0x6c, 0xd0, 0xa5, + 0x7c, 0x2a, 0xbd, 0x29, 0x2f, 0x17, 0x34, 0xdc, 0x44, 0x24, 0x49, 0x5b, 0xe8, 0x7a, 0x49, 0x6e, + 0x7f, 0xd3, 0x82, 0x01, 0x6e, 0xbf, 0x2c, 0x66, 0x04, 0xd6, 0x2e, 0xc8, 0xd2, 0xf9, 0xbc, 0x45, + 0x0b, 0xc5, 0x7d, 0x19, 0xba, 0x0d, 0x15, 0xf6, 0x83, 0xd9, 0x62, 0xca, 0x45, 0xfc, 0xb4, 0x78, + 0xfb, 0x7a, 0x57, 0x6f, 0x49, 0x02, 0x38, 0xa5, 0x65, 0x7f, 0xbf, 0x4c, 0x59, 0x5f, 0x8a, 0x6a, + 0x9c, 0xed, 0xd6, 0x49, 0x9c, 0xed, 0xa5, 0xe3, 0x3f, 0xdb, 0xdf, 0x83, 0xf1, 0x86, 0x76, 0x41, + 0x97, 0x7e, 0xf1, 0x0b, 0x05, 0x97, 0x95, 0x76, 0xab, 0xc7, 0xed, 0x75, 0xcb, 0x26, 0x39, 0x9c, + 0xa5, 0x8f, 0x08, 0x8c, 0xf0, 0xf5, 0x20, 0xda, 0xeb, 0x63, 0xed, 0x2d, 0x14, 0x59, 0x61, 0x7a, + 0x63, 0x6c, 0x15, 0xd7, 0x35, 0x42, 0xd8, 0x20, 0x6b, 0xff, 0x4a, 0x3f, 0xf4, 0xaf, 0xec, 0x90, + 0x20, 0x39, 0x01, 0x56, 0xb7, 0x0d, 0x63, 0x5e, 0xb0, 0x13, 0xfa, 0x3b, 0xc4, 0xe5, 0xf0, 0xa3, + 0x1d, 0xef, 0xa7, 0x45, 0x23, 0x63, 0x6b, 0x06, 0x31, 0x9c, 0x21, 0x7e, 0x1c, 0x96, 0x82, 0xd7, + 0x61, 0x80, 0xaf, 0x0c, 0x61, 0x26, 0xc8, 0xb1, 0xe7, 0xb3, 0x89, 0x15, 0x3b, 0x28, 0xb5, 0x67, + 0xf0, 0xab, 0x04, 0x41, 0x08, 0xbd, 0x0b, 0x63, 0x1b, 0x5e, 0x14, 0x27, 0x54, 0xd9, 0x8f, 0x13, + 0x67, 0xbb, 0x79, 0x04, 0x1b, 0x81, 0x9a, 0x91, 0x55, 0x83, 0x12, 0xce, 0x50, 0x46, 0x9b, 0x30, + 0x4a, 0x55, 0xd4, 0xb4, 0xa9, 0xc1, 0x9e, 0x9b, 0x52, 0x26, 0xc2, 0xab, 0x3a, 0x21, 0x6c, 0xd2, + 0xa5, 0x2c, 0xa9, 0xc1, 0x54, 0xda, 0x21, 0x26, 0xdd, 0x28, 0x96, 0xc4, 0x75, 0x59, 0x0e, 0xa3, + 0x9c, 0x8d, 0x79, 0xca, 0x54, 0x4c, 0xce, 0x96, 0xfa, 0xc3, 0xd8, 0xdf, 0xa6, 0x67, 0x31, 0x9d, + 0xc3, 0x13, 0x38, 0xbe, 0x2e, 0x9b, 0xc7, 0xd7, 0x93, 0x05, 0xbe, 0x6c, 0x97, 0xa3, 0xeb, 0x1d, + 0x18, 0xd6, 0x3e, 0x3c, 0x5a, 0x80, 0x4a, 0x43, 0x3a, 0x73, 0x08, 0x2e, 0xae, 0x44, 0x29, 0xe5, + 0xe5, 0x81, 0x53, 0x1c, 0x3a, 0x2f, 0x54, 0x04, 0xcd, 0xba, 0x7e, 0x51, 0x01, 0x15, 0x33, 0x88, + 0xfd, 0x22, 0xc0, 0xca, 0x7d, 0xd2, 0x58, 0xe4, 0x2a, 0x9e, 0x76, 0xbf, 0x67, 0x75, 0xbf, 0xdf, + 0xb3, 0xbf, 0x65, 0xc1, 0xd8, 0xea, 0xb2, 0x21, 0xd3, 0xcf, 0x03, 0x70, 0xd9, 0xf8, 0xf6, 0xed, + 0xeb, 0xd2, 0x7e, 0xcd, 0x8d, 0x8c, 0xaa, 0x14, 0x6b, 0x18, 0xe8, 0x51, 0x28, 0xfb, 0xad, 0x40, + 0x88, 0xac, 0x83, 0xfb, 0x7b, 0x73, 0xe5, 0xab, 0xad, 0x00, 0xd3, 0x32, 0xcd, 0xc7, 0xaa, 0x5c, + 0xd8, 0xc7, 0x2a, 0xdf, 0xdb, 0xf8, 0xeb, 0x65, 0x98, 0x58, 0xf5, 0xc9, 0x7d, 0xa3, 0xd7, 0x4f, + 0xc3, 0x80, 0x1b, 0x79, 0x3b, 0x24, 0xca, 0x0a, 0x02, 0x55, 0x56, 0x8a, 0x05, 0xb4, 0xb0, 0xdb, + 0xd7, 0xdb, 0xed, 0x07, 0xf9, 0xf1, 0xb9, 0xbc, 0xe5, 0x8e, 0x19, 0x6d, 0xc0, 0x20, 0xbf, 0x0f, + 0x8e, 0x67, 0xfa, 0xd9, 0x52, 0x7c, 0xf5, 0xf0, 0xce, 0x64, 0xe7, 0x67, 0x5e, 0xd8, 0x57, 0xb8, + 0xc3, 0x8d, 0xe2, 0x65, 0xa2, 0x14, 0x4b, 0xe2, 0xb3, 0x1f, 0x87, 0x11, 0x1d, 0xb3, 0x27, 0xcf, + 0x9b, 0xbf, 0x6a, 0xc1, 0xd4, 0xaa, 0x1f, 0x36, 0xee, 0x66, 0xfc, 0xf2, 0x5e, 0x86, 0x61, 0xba, + 0x99, 0x62, 0xc3, 0x69, 0xd5, 0xf0, 0xce, 0x15, 0x20, 0xac, 0xe3, 0x69, 0xd5, 0x6e, 0xde, 0x5c, + 0xab, 0x76, 0x72, 0xea, 0x15, 0x20, 0xac, 0xe3, 0xd9, 0x7f, 0x60, 0xc1, 0xe3, 0x97, 0x96, 0x57, + 0x6a, 0x24, 0x8a, 0xbd, 0x38, 0x21, 0x41, 0xd2, 0xe6, 0x57, 0x4c, 0x65, 0x46, 0x57, 0xeb, 0x4a, + 0x2a, 0x33, 0x56, 0x59, 0x2f, 0x04, 0xf4, 0x61, 0x71, 0xae, 0xff, 0xa6, 0x05, 0x53, 0x97, 0xbc, + 0x04, 0x93, 0x66, 0x98, 0x75, 0x05, 0x8e, 0x48, 0x33, 0x8c, 0xbd, 0x24, 0x8c, 0x76, 0xb3, 0xae, + 0xc0, 0x58, 0x41, 0xb0, 0x86, 0xc5, 0x5b, 0xde, 0xf1, 0x62, 0xda, 0xd3, 0x92, 0xa9, 0xea, 0x62, + 0x51, 0x8e, 0x15, 0x06, 0x1d, 0x98, 0xeb, 0x45, 0x4c, 0x64, 0xd8, 0x15, 0x3b, 0x58, 0x0d, 0xac, + 0x2a, 0x01, 0x38, 0xc5, 0xb1, 0xff, 0x9e, 0x05, 0xa7, 0x2e, 0xf9, 0xad, 0x38, 0x21, 0xd1, 0x46, + 0x6c, 0x74, 0xf6, 0x45, 0xa8, 0x10, 0x29, 0xdc, 0x8b, 0xbe, 0xaa, 0x43, 0x43, 0x49, 0xfd, 0xdc, + 0x0f, 0x59, 0xe1, 0x15, 0x70, 0x77, 0xed, 0xcd, 0x39, 0xf3, 0xb7, 0x4b, 0x30, 0x7a, 0x79, 0x7d, + 0xbd, 0x76, 0x89, 0x24, 0x82, 0x4b, 0xe6, 0x1b, 0xa5, 0xb0, 0xa6, 0x91, 0x1f, 0x26, 0xfc, 0xb4, + 0x12, 0xcf, 0x9f, 0xe7, 0xe1, 0x22, 0xf3, 0x6b, 0x41, 0x72, 0x23, 0xaa, 0x27, 0x91, 0x17, 0x6c, + 0x76, 0xd4, 0xe1, 0x25, 0x2f, 0x2f, 0x77, 0xe3, 0xe5, 0xe8, 0x45, 0x18, 0x60, 0xf1, 0x2a, 0x52, + 0xf8, 0xf8, 0xa0, 0x92, 0x13, 0x58, 0xe9, 0xc1, 0xde, 0x5c, 0xe5, 0x26, 0x5e, 0xe3, 0x7f, 0xb0, + 0x40, 0x45, 0x6f, 0xc3, 0xf0, 0x56, 0x92, 0x34, 0x2f, 0x13, 0xc7, 0x25, 0x91, 0xe4, 0x13, 0xe7, + 0x0e, 0xe7, 0x13, 0x74, 0x3a, 0x78, 0x85, 0x74, 0x6b, 0xa5, 0x65, 0x31, 0xd6, 0x29, 0xda, 0x75, + 0x80, 0x14, 0xf6, 0x80, 0x74, 0x10, 0xfb, 0xe7, 0x4b, 0x30, 0x78, 0xd9, 0x09, 0x5c, 0x9f, 0x44, + 0x68, 0x15, 0xfa, 0xc8, 0x7d, 0xd2, 0x10, 0x07, 0x79, 0x4e, 0xd7, 0xd3, 0xc3, 0x8e, 0xdb, 0xd5, + 0xe8, 0x7f, 0xcc, 0xea, 0x23, 0x0c, 0x83, 0xb4, 0xdf, 0x97, 0x94, 0x97, 0xf8, 0x73, 0xf9, 0xb3, + 0xa0, 0x16, 0x05, 0x3f, 0x29, 0x45, 0x11, 0x96, 0x84, 0x98, 0x05, 0xaa, 0xd1, 0xac, 0x53, 0xf6, + 0x96, 0x14, 0xd3, 0xec, 0xd6, 0x97, 0x6b, 0x1c, 0x5d, 0xd0, 0xe5, 0x16, 0x28, 0x59, 0x88, 0x53, + 0x72, 0xf6, 0x45, 0x98, 0x66, 0xf7, 0xb1, 0x4e, 0xb2, 0x65, 0xec, 0x9a, 0xdc, 0xe5, 0x69, 0xff, + 0xb0, 0x04, 0x93, 0x6b, 0xf5, 0xe5, 0xba, 0x69, 0x3b, 0xbc, 0x08, 0x23, 0xfc, 0x80, 0xa6, 0x8b, + 0xce, 0xf1, 0x45, 0x7d, 0x75, 0x87, 0xb0, 0xae, 0xc1, 0xb0, 0x81, 0x89, 0x1e, 0x87, 0xb2, 0xf7, + 0x5e, 0x90, 0xf5, 0xea, 0x5b, 0x7b, 0xfd, 0x3a, 0xa6, 0xe5, 0x14, 0x4c, 0xcf, 0x7a, 0xce, 0xe4, + 0x14, 0x58, 0x9d, 0xf7, 0xaf, 0xc1, 0x98, 0x17, 0x37, 0x62, 0x6f, 0x2d, 0xa0, 0x1c, 0xc0, 0x69, + 0xc8, 0xe5, 0x9b, 0x0a, 0xe7, 0xb4, 0xab, 0x0a, 0x8a, 0x33, 0xd8, 0x1a, 0xc7, 0xed, 0x2f, 0x2c, + 0x2f, 0xe4, 0xba, 0x8b, 0x53, 0x51, 0xa8, 0xc9, 0x46, 0x17, 0x33, 0x1f, 0x21, 0x21, 0x0a, 0xf1, + 0x01, 0xc7, 0x58, 0xc2, 0xec, 0x77, 0xa1, 0xa2, 0xdc, 0xbc, 0xa4, 0x77, 0xa3, 0xd5, 0xc5, 0xbb, + 0x31, 0x9f, 0x33, 0x49, 0xc3, 0x6f, 0xb9, 0xa3, 0xe1, 0xf7, 0x9f, 0x59, 0x90, 0xfa, 0xa9, 0x20, + 0x0c, 0x95, 0x66, 0xc8, 0x2e, 0x89, 0x22, 0x79, 0x1b, 0xfb, 0x54, 0xce, 0x82, 0xe5, 0x1b, 0x86, + 0x2f, 0xa9, 0x9a, 0xac, 0x8b, 0x53, 0x32, 0xe8, 0x2a, 0x0c, 0x36, 0x23, 0x52, 0x4f, 0x58, 0x68, + 0x42, 0x0f, 0x14, 0xf9, 0xdc, 0xf0, 0x9a, 0x58, 0x92, 0xb0, 0xff, 0x95, 0x05, 0x70, 0xd5, 0xdb, + 0xf6, 0x12, 0xec, 0x04, 0x9b, 0xe4, 0x04, 0xb4, 0xc2, 0xeb, 0xd0, 0x17, 0x37, 0x49, 0xa3, 0xd8, + 0x35, 0x5f, 0xda, 0xb3, 0x7a, 0x93, 0x34, 0xd2, 0xcf, 0x41, 0xff, 0x61, 0x46, 0xc7, 0xfe, 0x1e, + 0xc0, 0x58, 0x8a, 0x46, 0x25, 0x73, 0xf4, 0xbc, 0xe1, 0x93, 0xff, 0x68, 0xc6, 0x27, 0xbf, 0xc2, + 0xb0, 0x35, 0x37, 0xfc, 0x04, 0xca, 0xdb, 0xce, 0x7d, 0xa1, 0x08, 0xbc, 0x5c, 0xb4, 0x43, 0xb4, + 0xa5, 0xf9, 0x6b, 0xce, 0x7d, 0x2e, 0x77, 0x3d, 0x27, 0x17, 0xd2, 0x35, 0xe7, 0xfe, 0x01, 0xbf, + 0xcc, 0x63, 0x1b, 0x96, 0x6a, 0x1e, 0x5f, 0xfa, 0xd3, 0xf4, 0x3f, 0xe3, 0xa1, 0xb4, 0x39, 0xd6, + 0xaa, 0x17, 0x08, 0x3b, 0x66, 0x8f, 0xad, 0x7a, 0x41, 0xb6, 0x55, 0x2f, 0x28, 0xd0, 0xaa, 0xc7, + 0x9c, 0x57, 0x07, 0x85, 0xf9, 0x9f, 0x79, 0xfe, 0x0d, 0x5f, 0xf8, 0x58, 0x4f, 0x4d, 0x8b, 0x7b, + 0x04, 0xde, 0xfc, 0x82, 0x14, 0x36, 0x45, 0x69, 0x6e, 0x17, 0x64, 0xd3, 0xe8, 0xef, 0x5b, 0x30, + 0x26, 0x7e, 0x63, 0xf2, 0x5e, 0x8b, 0xc4, 0x89, 0x38, 0xd4, 0x3e, 0x75, 0x94, 0xde, 0x08, 0x12, + 0xbc, 0x53, 0x1f, 0x95, 0x1c, 0xc9, 0x04, 0xe6, 0xf6, 0x2d, 0xd3, 0x1f, 0xf4, 0x3d, 0x0b, 0xa6, + 0xb7, 0x9d, 0xfb, 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xc4, 0x0b, 0x85, 0x77, 0xe3, 0x6a, 0xaf, 0xeb, + 0xa4, 0x8d, 0x10, 0xef, 0xae, 0x74, 0x5c, 0x9a, 0xee, 0x84, 0x92, 0xdb, 0xe9, 0x8e, 0x3d, 0x9c, + 0xdd, 0x80, 0x21, 0xb9, 0x30, 0x3b, 0x88, 0xf9, 0x55, 0xfd, 0xec, 0xce, 0x51, 0xaa, 0xe7, 0xa5, + 0x69, 0x6c, 0xfe, 0xf5, 0x96, 0x13, 0x24, 0x5e, 0xb2, 0xab, 0xa9, 0x05, 0xac, 0x1d, 0xb1, 0x14, + 0x8f, 0xb5, 0x9d, 0x77, 0x61, 0x44, 0x5f, 0x77, 0xc7, 0xda, 0xd6, 0x7b, 0x30, 0xd5, 0x61, 0x55, + 0x1d, 0x6b, 0x93, 0xf7, 0xe0, 0xd1, 0xae, 0xeb, 0xe3, 0x38, 0x1b, 0xb6, 0x7f, 0xdb, 0xd2, 0x59, + 0xe7, 0x09, 0x18, 0x5d, 0xae, 0x99, 0x46, 0x97, 0x73, 0x45, 0xf7, 0x50, 0x17, 0xcb, 0xcb, 0x86, + 0xde, 0x7d, 0x7a, 0x24, 0xa0, 0x75, 0x18, 0xf0, 0x69, 0x89, 0xbc, 0xf3, 0x3a, 0xdf, 0xcb, 0x2e, + 0x4d, 0x85, 0x12, 0x56, 0x1e, 0x63, 0x41, 0xcb, 0xfe, 0x9e, 0x05, 0x7d, 0x7f, 0x89, 0x11, 0x43, + 0x6d, 0xa4, 0x45, 0xe0, 0xfb, 0x3c, 0x76, 0xee, 0xad, 0xdc, 0x4f, 0x48, 0x10, 0x33, 0x19, 0xb4, + 0xdb, 0xad, 0xfd, 0x30, 0x6d, 0x4a, 0x3a, 0x61, 0xbc, 0x0a, 0xa3, 0xbe, 0x73, 0x87, 0xf8, 0xd2, + 0x60, 0x9c, 0xd5, 0xd8, 0xae, 0xea, 0x40, 0x6c, 0xe2, 0xd2, 0xca, 0x1b, 0xba, 0x3d, 0x5d, 0x08, + 0x49, 0xaa, 0xb2, 0x61, 0x6c, 0xc7, 0x26, 0x2e, 0x55, 0x19, 0xee, 0x39, 0x49, 0x63, 0x4b, 0x68, + 0x73, 0xaa, 0xbb, 0xb7, 0x69, 0x21, 0xe6, 0x30, 0xb4, 0x08, 0xe3, 0x72, 0xc5, 0xde, 0xa2, 0x6a, + 0x7e, 0x18, 0x08, 0x39, 0x53, 0x45, 0x1d, 0x63, 0x13, 0x8c, 0xb3, 0xf8, 0xe8, 0xe3, 0x30, 0x46, + 0x27, 0x27, 0x6c, 0x25, 0xd2, 0xc5, 0xa4, 0x9f, 0xb9, 0x98, 0x30, 0x0f, 0xe5, 0x75, 0x03, 0x82, + 0x33, 0x98, 0xf6, 0xdb, 0x30, 0x75, 0x35, 0x74, 0xdc, 0x25, 0xc7, 0x77, 0x82, 0x06, 0x89, 0xd6, + 0x82, 0xcd, 0xdc, 0xeb, 0x6b, 0xfd, 0x8a, 0xb9, 0x94, 0x77, 0xc5, 0x6c, 0x47, 0x80, 0xf4, 0x06, + 0x84, 0x73, 0xd4, 0x5b, 0x30, 0xe8, 0xf1, 0xa6, 0xc4, 0xb2, 0x7d, 0x21, 0xcf, 0x1e, 0xd5, 0xd6, + 0x47, 0xcd, 0xd9, 0x87, 0x17, 0x60, 0x49, 0x92, 0xaa, 0x20, 0x9d, 0x0c, 0x58, 0xf9, 0x5a, 0x9e, + 0xfd, 0xd7, 0x2d, 0x18, 0xbf, 0x9e, 0x09, 0x69, 0x7d, 0x1a, 0x06, 0x78, 0x62, 0x84, 0xac, 0x89, + 0xa5, 0xce, 0x4a, 0xb1, 0x80, 0x3e, 0x70, 0x0d, 0xff, 0x97, 0x4b, 0x50, 0x61, 0x6e, 0xb6, 0x4d, + 0xaa, 0x4e, 0x1c, 0xbf, 0x98, 0x7a, 0xcd, 0x10, 0x53, 0x73, 0xb4, 0x4c, 0xd5, 0xb1, 0x6e, 0x52, + 0x2a, 0xba, 0xa9, 0x42, 0x3d, 0x0b, 0x29, 0x98, 0x29, 0x41, 0x1e, 0x0e, 0x38, 0x66, 0x46, 0x86, + 0xca, 0x30, 0x50, 0x76, 0xe9, 0xab, 0x70, 0x1f, 0xba, 0x4b, 0x5f, 0xd5, 0xb3, 0x2e, 0xcc, 0xa9, + 0xa6, 0x75, 0x9e, 0xb1, 0xef, 0x4f, 0x32, 0xe7, 0x49, 0xc7, 0xf7, 0x3e, 0x4f, 0x54, 0xc4, 0xf4, + 0x9c, 0x70, 0x86, 0x14, 0xa5, 0x07, 0x8c, 0xcf, 0x88, 0x7f, 0x3c, 0x20, 0x3e, 0xad, 0x62, 0x5f, + 0x86, 0xf1, 0xcc, 0xd4, 0xa1, 0x97, 0xa1, 0xbf, 0xb9, 0xe5, 0xc4, 0x24, 0xe3, 0xc7, 0xd2, 0x5f, + 0xa3, 0x85, 0x07, 0x7b, 0x73, 0x63, 0xaa, 0x02, 0x2b, 0xc1, 0x1c, 0xdb, 0xfe, 0x72, 0x09, 0xfa, + 0xae, 0x87, 0xee, 0x49, 0x2c, 0xb5, 0xcb, 0xc6, 0x52, 0x7b, 0x3a, 0x3f, 0x9d, 0x46, 0xd7, 0x55, + 0x56, 0xcb, 0xac, 0xb2, 0x73, 0x05, 0x68, 0x1d, 0xbe, 0xc0, 0xb6, 0x61, 0x98, 0xa5, 0xeb, 0x10, + 0x8e, 0x3c, 0x2f, 0x1a, 0x9a, 0xd5, 0x5c, 0x46, 0xb3, 0x1a, 0xd7, 0x50, 0x35, 0xfd, 0xea, 0x19, + 0x18, 0x14, 0x8e, 0x23, 0x59, 0xd7, 0x51, 0x81, 0x8b, 0x25, 0xdc, 0xfe, 0x97, 0x65, 0x30, 0xd2, + 0x83, 0xa0, 0xdf, 0xb3, 0x60, 0x3e, 0xe2, 0x61, 0x38, 0x6e, 0xb5, 0x15, 0x79, 0xc1, 0x66, 0xbd, + 0xb1, 0x45, 0xdc, 0x96, 0xef, 0x05, 0x9b, 0x6b, 0x9b, 0x41, 0xa8, 0x8a, 0x57, 0xee, 0x93, 0x46, + 0x8b, 0xd9, 0x69, 0x0b, 0x67, 0x25, 0x51, 0x97, 0xa6, 0x17, 0xf6, 0xf7, 0xe6, 0xe6, 0x71, 0x4f, + 0xad, 0xe0, 0x1e, 0x7b, 0x85, 0xfe, 0xd8, 0x82, 0x05, 0x9e, 0x20, 0xa3, 0xf8, 0x48, 0x0a, 0x69, + 0xa4, 0x35, 0x49, 0x34, 0x25, 0xb7, 0x4e, 0xa2, 0xed, 0xa5, 0x57, 0xc4, 0x24, 0x2f, 0xd4, 0x7a, + 0x6b, 0x15, 0xf7, 0xda, 0x4d, 0xfb, 0xdf, 0x94, 0x61, 0x94, 0xce, 0x67, 0x1a, 0x14, 0xff, 0xb2, + 0xb1, 0x4c, 0x9e, 0xc8, 0x2c, 0x93, 0x49, 0x03, 0xf9, 0xc1, 0xc4, 0xc3, 0xc7, 0x30, 0xe9, 0x3b, + 0x71, 0x72, 0x99, 0x38, 0x51, 0x72, 0x87, 0x38, 0xec, 0x6e, 0x32, 0xeb, 0xf7, 0x50, 0xe0, 0xba, + 0x53, 0x39, 0x23, 0x5d, 0xcd, 0x12, 0xc3, 0xed, 0xf4, 0xd1, 0x0e, 0x20, 0x76, 0x0f, 0x1a, 0x39, + 0x41, 0xcc, 0xc7, 0xe2, 0x09, 0xbb, 0x6e, 0x6f, 0xad, 0xce, 0x8a, 0x56, 0xd1, 0xd5, 0x36, 0x6a, + 0xb8, 0x43, 0x0b, 0xda, 0x4d, 0x77, 0x7f, 0xd1, 0x9b, 0xee, 0x81, 0x1c, 0x9f, 0xed, 0xaf, 0x58, + 0x30, 0x45, 0x3f, 0x8b, 0xe9, 0xdf, 0x1b, 0xa3, 0x10, 0xc6, 0xe9, 0xb2, 0xf3, 0x49, 0x22, 0xcb, + 0xc4, 0xfe, 0xca, 0x91, 0xac, 0x4d, 0x3a, 0xa9, 0xf8, 0x76, 0xc5, 0x24, 0x86, 0xb3, 0xd4, 0xed, + 0x6f, 0x59, 0xc0, 0x3c, 0xee, 0x4e, 0xe0, 0x30, 0xbb, 0x64, 0x1e, 0x66, 0x76, 0x3e, 0xc7, 0xe8, + 0x72, 0x8e, 0xbd, 0x04, 0x13, 0x14, 0x5a, 0x8b, 0xc2, 0xfb, 0xbb, 0x52, 0xd0, 0xce, 0x37, 0xf0, + 0x7e, 0xa5, 0xc4, 0xb7, 0x8d, 0x8a, 0x27, 0x44, 0xbf, 0x60, 0xc1, 0x50, 0xc3, 0x69, 0x3a, 0x0d, + 0x9e, 0x5c, 0xa9, 0x80, 0x75, 0xc6, 0xa8, 0x3f, 0xbf, 0x2c, 0xea, 0x72, 0xcb, 0xc2, 0x47, 0xe4, + 0xd0, 0x65, 0x71, 0xae, 0x35, 0x41, 0x35, 0x3e, 0x7b, 0x17, 0x46, 0x0d, 0x62, 0xc7, 0xaa, 0x86, + 0xfe, 0x82, 0xc5, 0x99, 0xbe, 0x52, 0x15, 0xee, 0xc1, 0x64, 0xa0, 0xfd, 0xa7, 0xec, 0x4c, 0x4a, + 0xc6, 0xf3, 0xc5, 0xd9, 0x3a, 0xe3, 0x82, 0x9a, 0x77, 0x61, 0x86, 0x20, 0x6e, 0x6f, 0xc3, 0xfe, + 0x55, 0x0b, 0x1e, 0xd1, 0x11, 0xb5, 0x00, 0xd0, 0x3c, 0xbb, 0x71, 0x15, 0x86, 0xc2, 0x26, 0x89, + 0x9c, 0x54, 0x2d, 0x3a, 0x27, 0xe7, 0xff, 0x86, 0x28, 0x3f, 0xd8, 0x9b, 0x9b, 0xd6, 0xa9, 0xcb, + 0x72, 0xac, 0x6a, 0x22, 0x1b, 0x06, 0xd8, 0xbc, 0xc4, 0x22, 0x74, 0x97, 0x25, 0x1b, 0x62, 0x97, + 0x2a, 0x31, 0x16, 0x10, 0xfb, 0x6f, 0x59, 0x7c, 0xb9, 0xe9, 0x5d, 0x47, 0x5f, 0x80, 0x89, 0x6d, + 0xaa, 0x41, 0xad, 0xdc, 0x6f, 0xd2, 0x83, 0x94, 0x5d, 0x27, 0x5b, 0x45, 0x8e, 0x8f, 0x2e, 0xc3, + 0x5d, 0x9a, 0x11, 0xbd, 0x9f, 0xb8, 0x96, 0x21, 0x8b, 0xdb, 0x1a, 0xb2, 0xff, 0x61, 0x89, 0xef, + 0x59, 0x26, 0xc3, 0x3d, 0x03, 0x83, 0xcd, 0xd0, 0x5d, 0x5e, 0xab, 0x62, 0x31, 0x57, 0x8a, 0xe9, + 0xd4, 0x78, 0x31, 0x96, 0x70, 0x74, 0x01, 0x80, 0xdc, 0x4f, 0x48, 0x14, 0x38, 0xbe, 0xba, 0x06, + 0x56, 0xa2, 0xd2, 0x8a, 0x82, 0x60, 0x0d, 0x8b, 0xd6, 0x69, 0x46, 0xe1, 0x8e, 0xe7, 0xb2, 0xc8, + 0x85, 0xb2, 0x59, 0xa7, 0xa6, 0x20, 0x58, 0xc3, 0xa2, 0x7a, 0x6b, 0x2b, 0x88, 0xf9, 0x31, 0xe6, + 0xdc, 0x11, 0xb9, 0x71, 0x86, 0x52, 0xbd, 0xf5, 0xa6, 0x0e, 0xc4, 0x26, 0x2e, 0xba, 0x02, 0x03, + 0x89, 0xc3, 0x2e, 0x37, 0xfb, 0x8b, 0x78, 0x8a, 0xac, 0x53, 0x5c, 0x3d, 0x19, 0x11, 0xad, 0x8a, + 0x05, 0x09, 0xfb, 0x3f, 0x55, 0x00, 0x52, 0xa9, 0x0b, 0x7d, 0xb9, 0x7d, 0xc3, 0x7f, 0xb4, 0xa8, + 0xc8, 0xf6, 0xe0, 0x76, 0x3b, 0xfa, 0x9a, 0x05, 0xc3, 0x8e, 0xef, 0x87, 0x0d, 0x27, 0x61, 0xd3, + 0x53, 0x2a, 0xca, 0x7a, 0x44, 0x4f, 0x16, 0xd3, 0xba, 0xbc, 0x33, 0x2f, 0xca, 0x0b, 0x47, 0x0d, + 0x92, 0xdb, 0x1f, 0xbd, 0x0b, 0xe8, 0x23, 0x52, 0x6a, 0xe7, 0x5f, 0x78, 0x36, 0x2b, 0xb5, 0x57, + 0x18, 0xc3, 0xd5, 0x04, 0x76, 0xf4, 0xb6, 0x91, 0x4b, 0xa6, 0xaf, 0x48, 0xf8, 0xa9, 0x21, 0x87, + 0xe4, 0xa5, 0x91, 0x41, 0x6f, 0xea, 0x2e, 0xd5, 0xfd, 0x45, 0xe2, 0xbb, 0x35, 0x71, 0x38, 0xc7, + 0x9d, 0x3a, 0x81, 0x71, 0xd7, 0x3c, 0x79, 0x85, 0x5b, 0xd8, 0x0b, 0xf9, 0x2d, 0x64, 0x8e, 0xec, + 0xf4, 0xac, 0xcd, 0x00, 0x70, 0xb6, 0x09, 0xf4, 0x26, 0x77, 0x78, 0x5f, 0x0b, 0x36, 0x42, 0xe1, + 0x1a, 0x76, 0xbe, 0xc0, 0x37, 0xdf, 0x8d, 0x13, 0xb2, 0x4d, 0xeb, 0xa4, 0x87, 0xeb, 0x75, 0x41, + 0x05, 0x2b, 0x7a, 0x68, 0x1d, 0x06, 0x58, 0xb4, 0x51, 0x3c, 0x33, 0x54, 0xc4, 0x12, 0x67, 0x06, + 0xd9, 0xa6, 0xfb, 0x87, 0xfd, 0x8d, 0xb1, 0xa0, 0x85, 0x2e, 0xcb, 0x30, 0xfb, 0x78, 0x2d, 0xb8, + 0x19, 0x13, 0x16, 0x66, 0x5f, 0x59, 0xfa, 0x50, 0x1a, 0x37, 0xcf, 0xcb, 0x3b, 0x66, 0xd3, 0x33, + 0x6a, 0x52, 0xc1, 0x46, 0xfc, 0x97, 0x49, 0xfa, 0x66, 0xa0, 0x48, 0x47, 0xcd, 0x94, 0x7e, 0xe9, + 0x64, 0xdf, 0x32, 0x89, 0xe1, 0x2c, 0xf5, 0x13, 0x3d, 0x52, 0x67, 0x03, 0x98, 0xc8, 0x6e, 0xca, + 0x63, 0x3d, 0xc2, 0x7f, 0xd2, 0x07, 0x63, 0xe6, 0xe2, 0x40, 0x0b, 0x50, 0x11, 0x44, 0x54, 0xd2, + 0x2e, 0xb5, 0x07, 0xae, 0x49, 0x00, 0x4e, 0x71, 0x58, 0xfa, 0x32, 0x56, 0x5d, 0x73, 0x0a, 0x4a, + 0xd3, 0x97, 0x29, 0x08, 0xd6, 0xb0, 0xa8, 0x24, 0x7c, 0x27, 0x0c, 0x13, 0x75, 0x12, 0xa8, 0x75, + 0xb3, 0xc4, 0x4a, 0xb1, 0x80, 0xd2, 0x13, 0xe0, 0x2e, 0xfd, 0x98, 0xbe, 0x69, 0x55, 0x54, 0x27, + 0xc0, 0x15, 0x1d, 0x88, 0x4d, 0x5c, 0x7a, 0xa2, 0x85, 0x31, 0x5b, 0x88, 0x42, 0xde, 0x4e, 0x9d, + 0xac, 0xea, 0x3c, 0x02, 0x4f, 0xc2, 0xd1, 0x67, 0xe0, 0x11, 0x15, 0x30, 0x87, 0xb9, 0x95, 0x56, + 0xb6, 0x38, 0x60, 0xa8, 0xcc, 0x8f, 0x2c, 0x77, 0x46, 0xc3, 0xdd, 0xea, 0xa3, 0xd7, 0x60, 0x4c, + 0xc8, 0xca, 0x92, 0xe2, 0xa0, 0x79, 0x03, 0x7f, 0xc5, 0x80, 0xe2, 0x0c, 0x36, 0xaa, 0xc2, 0x04, + 0x2d, 0x61, 0x42, 0xaa, 0xa4, 0xc0, 0x03, 0xff, 0xd4, 0x51, 0x7f, 0x25, 0x03, 0xc7, 0x6d, 0x35, + 0xd0, 0x22, 0x8c, 0x73, 0x61, 0x85, 0x2a, 0x86, 0xec, 0x3b, 0x08, 0x7f, 0x4e, 0xb5, 0x11, 0x6e, + 0x98, 0x60, 0x9c, 0xc5, 0x47, 0x17, 0x61, 0xc4, 0x89, 0x1a, 0x5b, 0x5e, 0x42, 0x1a, 0x49, 0x2b, + 0xe2, 0x49, 0x2c, 0x34, 0x17, 0x86, 0x45, 0x0d, 0x86, 0x0d, 0x4c, 0xfb, 0xf3, 0x30, 0xd5, 0xc1, + 0x79, 0x9c, 0x2e, 0x1c, 0xa7, 0xe9, 0xc9, 0x31, 0x65, 0xdc, 0xa5, 0x16, 0x6b, 0x6b, 0x72, 0x34, + 0x1a, 0x16, 0x5d, 0x9d, 0xcc, 0x3c, 0xad, 0xe5, 0xd4, 0x54, 0xab, 0x73, 0x55, 0x02, 0x70, 0x8a, + 0x63, 0xff, 0x45, 0x05, 0x34, 0xeb, 0x4d, 0x01, 0x17, 0x99, 0x8b, 0x30, 0x22, 0xd3, 0xc4, 0x6a, + 0xe9, 0x19, 0xd5, 0x30, 0x2f, 0x69, 0x30, 0x6c, 0x60, 0xd2, 0xbe, 0x05, 0xd2, 0x26, 0x95, 0x75, + 0xce, 0x52, 0xc6, 0x2a, 0x9c, 0xe2, 0xa0, 0xf3, 0x30, 0x14, 0x13, 0x7f, 0xe3, 0xaa, 0x17, 0xdc, + 0x15, 0x0b, 0x5b, 0x71, 0xe6, 0xba, 0x28, 0xc7, 0x0a, 0x03, 0x2d, 0x41, 0xb9, 0xe5, 0xb9, 0x62, + 0x29, 0x4b, 0xb1, 0xa1, 0x7c, 0x73, 0xad, 0x7a, 0xb0, 0x37, 0xf7, 0x44, 0xb7, 0x9c, 0xb9, 0x54, + 0x3f, 0x8f, 0xe7, 0xe9, 0xf6, 0xa3, 0x95, 0x3b, 0xd9, 0xe9, 0x07, 0x7a, 0xb4, 0xd3, 0x5f, 0x00, + 0x10, 0xa3, 0x96, 0x6b, 0xb9, 0x9c, 0x7e, 0xb5, 0x4b, 0x0a, 0x82, 0x35, 0x2c, 0xaa, 0xe5, 0x37, + 0x22, 0xe2, 0x48, 0x45, 0x98, 0x3b, 0x35, 0x0f, 0x1d, 0x5d, 0xcb, 0x5f, 0xce, 0x12, 0xc3, 0xed, + 0xf4, 0x51, 0x08, 0x93, 0xae, 0x88, 0xca, 0x4c, 0x1b, 0xad, 0xf4, 0xee, 0x49, 0x4d, 0x1b, 0xac, + 0x66, 0x09, 0xe1, 0x76, 0xda, 0xe8, 0x73, 0x30, 0x2b, 0x0b, 0xdb, 0x43, 0x62, 0xd9, 0x76, 0x29, + 0x2f, 0x9d, 0xd9, 0xdf, 0x9b, 0x9b, 0xad, 0x76, 0xc5, 0xc2, 0x87, 0x50, 0x40, 0x6f, 0xc1, 0x00, + 0xbb, 0xd7, 0x89, 0x67, 0x86, 0xd9, 0x89, 0xf7, 0x52, 0x11, 0x7f, 0x7c, 0xba, 0xea, 0xe7, 0xd9, + 0xed, 0x90, 0xf0, 0x34, 0x4d, 0x2f, 0xcb, 0x58, 0x21, 0x16, 0x34, 0x51, 0x13, 0x86, 0x9d, 0x20, + 0x08, 0x13, 0x87, 0x0b, 0x62, 0x23, 0x45, 0x64, 0x49, 0xad, 0x89, 0xc5, 0xb4, 0x2e, 0x6f, 0x47, + 0x39, 0xaf, 0x69, 0x10, 0xac, 0x37, 0x81, 0xee, 0xc1, 0x78, 0x78, 0x8f, 0x32, 0x4c, 0x79, 0xb5, + 0x11, 0xcf, 0x8c, 0x9a, 0x03, 0xcb, 0x31, 0xd4, 0x1a, 0x95, 0x35, 0x4e, 0x66, 0x12, 0xc5, 0xd9, + 0x56, 0xd0, 0xbc, 0x61, 0xae, 0x1e, 0x4b, 0xfd, 0xa9, 0x53, 0x73, 0xb5, 0x6e, 0x9d, 0x66, 0x61, + 0xd7, 0xdc, 0x87, 0x92, 0x71, 0x84, 0xf1, 0x4c, 0xd8, 0x75, 0x0a, 0xc2, 0x3a, 0xde, 0xec, 0xc7, + 0x60, 0x58, 0x9b, 0xf8, 0x5e, 0x1c, 0x77, 0x67, 0x5f, 0x83, 0x89, 0xec, 0x84, 0xf6, 0xe4, 0xf8, + 0xfb, 0x3f, 0x4b, 0x30, 0xde, 0xe1, 0xde, 0xe8, 0xae, 0xc7, 0x9c, 0xcf, 0x0d, 0xd6, 0x77, 0xc5, + 0x0b, 0x5c, 0xcc, 0x20, 0x26, 0x03, 0x2b, 0x15, 0x60, 0x60, 0x92, 0x9b, 0x96, 0xbb, 0x72, 0x53, + 0xc1, 0xb4, 0xfa, 0xde, 0x0f, 0xd3, 0x32, 0xcf, 0x89, 0xfe, 0x42, 0xe7, 0xc4, 0x03, 0x60, 0x74, + 0xc6, 0x51, 0x33, 0x58, 0xe0, 0xa8, 0xf9, 0x66, 0x09, 0x26, 0x52, 0x27, 0x67, 0x91, 0x3b, 0xfa, + 0xf8, 0xaf, 0x21, 0xd6, 0x8d, 0x6b, 0x88, 0xbc, 0xd4, 0xd0, 0x99, 0xfe, 0x75, 0xbd, 0x92, 0x78, + 0x2b, 0x73, 0x25, 0xf1, 0x52, 0x8f, 0x74, 0x0f, 0xbf, 0x9e, 0xf8, 0x6e, 0x09, 0x4e, 0x65, 0xab, + 0x2c, 0xfb, 0x8e, 0xb7, 0x7d, 0x02, 0xf3, 0xf5, 0x19, 0x63, 0xbe, 0x5e, 0xe9, 0x6d, 0x5c, 0xac, + 0x93, 0x5d, 0x27, 0xcd, 0xc9, 0x4c, 0xda, 0xc7, 0x8e, 0x42, 0xfc, 0xf0, 0x99, 0xfb, 0x43, 0x0b, + 0x1e, 0xed, 0x58, 0xef, 0x04, 0x0c, 0xaf, 0x6f, 0x98, 0x86, 0xd7, 0x17, 0x8f, 0x30, 0xba, 0x2e, + 0x96, 0xd8, 0x5f, 0x2b, 0x77, 0x19, 0x15, 0x33, 0x4d, 0xdd, 0x80, 0x61, 0xa7, 0xd1, 0x20, 0x71, + 0x7c, 0x2d, 0x74, 0x55, 0x02, 0xa7, 0xe7, 0xd9, 0xd9, 0x92, 0x16, 0x1f, 0xec, 0xcd, 0xcd, 0x66, + 0x49, 0xa4, 0x60, 0xac, 0x53, 0x30, 0x53, 0xcb, 0x95, 0x8e, 0x29, 0xb5, 0xdc, 0x05, 0x80, 0x1d, + 0xa5, 0xc5, 0x66, 0x2d, 0x5e, 0x9a, 0x7e, 0xab, 0x61, 0xa1, 0xff, 0x9f, 0x49, 0x84, 0xdc, 0x49, + 0xa3, 0xcf, 0x8c, 0x97, 0xcc, 0xf9, 0x7e, 0xba, 0xc3, 0x07, 0x0f, 0xcb, 0x54, 0xd6, 0x41, 0x45, + 0x12, 0x7d, 0x0a, 0x26, 0x62, 0x1e, 0xfc, 0xbf, 0xec, 0x3b, 0x31, 0xf3, 0xee, 0x17, 0xfc, 0x94, + 0x45, 0x58, 0xd6, 0x33, 0x30, 0xdc, 0x86, 0x6d, 0x7f, 0xa7, 0x0c, 0x1f, 0x3c, 0x64, 0xd9, 0xa2, + 0x45, 0xf3, 0xd6, 0xf6, 0xb9, 0xac, 0xfd, 0x67, 0xb6, 0x63, 0x65, 0xc3, 0x20, 0x94, 0xf9, 0xda, + 0xa5, 0xf7, 0xfd, 0xb5, 0xbf, 0xae, 0x5b, 0xeb, 0xb8, 0xdf, 0xe6, 0xa5, 0x23, 0x6f, 0xcc, 0x9f, + 0x56, 0x63, 0xfd, 0x97, 0x2c, 0x78, 0xa2, 0xe3, 0xb0, 0x0c, 0x2f, 0x91, 0x05, 0xa8, 0x34, 0x68, + 0xa1, 0x16, 0x8b, 0x93, 0x06, 0xc1, 0x49, 0x00, 0x4e, 0x71, 0x0c, 0x67, 0x90, 0x52, 0xae, 0x33, + 0xc8, 0xef, 0x5b, 0x30, 0x9d, 0xed, 0xc4, 0x09, 0xf0, 0xad, 0xba, 0xc9, 0xb7, 0xe6, 0x7b, 0xfb, + 0xf8, 0x5d, 0x58, 0xd6, 0x7f, 0x1f, 0x83, 0xd3, 0x6d, 0xa7, 0x1e, 0x9f, 0xc5, 0x9f, 0xb3, 0x60, + 0x72, 0x93, 0x49, 0xef, 0x5a, 0xc0, 0x93, 0x18, 0x57, 0x4e, 0x94, 0xd8, 0xa1, 0x71, 0x52, 0x5c, + 0x17, 0x69, 0x43, 0xc1, 0xed, 0x8d, 0xa1, 0xaf, 0x5a, 0x30, 0xed, 0xdc, 0x8b, 0xdb, 0x5e, 0x36, + 0x11, 0x0b, 0xe9, 0xb5, 0x1c, 0x63, 0x59, 0xce, 0x9b, 0x28, 0x4b, 0x33, 0xfb, 0x7b, 0x73, 0xd3, + 0x9d, 0xb0, 0x70, 0xc7, 0x56, 0xe9, 0xf7, 0xdd, 0x12, 0xe1, 0x14, 0xc5, 0x42, 0xf7, 0x3a, 0x05, + 0x5f, 0x70, 0xb6, 0x26, 0x21, 0x58, 0x51, 0x44, 0xef, 0x40, 0x65, 0x53, 0xc6, 0x38, 0x65, 0xd9, + 0x66, 0x97, 0x69, 0xee, 0x14, 0x12, 0xc5, 0x7d, 0xf7, 0x15, 0x08, 0xa7, 0x44, 0xd1, 0x65, 0x28, + 0x07, 0x1b, 0xb1, 0x88, 0x26, 0xce, 0xf3, 0x01, 0x32, 0x3d, 0xaf, 0x78, 0x00, 0xe6, 0xf5, 0xd5, + 0x3a, 0xa6, 0x24, 0x28, 0xa5, 0xe8, 0x8e, 0x2b, 0xac, 0xc4, 0x39, 0x94, 0xf0, 0x52, 0xb5, 0x9d, + 0x12, 0x5e, 0xaa, 0x62, 0x4a, 0x02, 0xd5, 0xa0, 0x9f, 0x05, 0x6b, 0x08, 0x13, 0x70, 0x4e, 0xc8, + 0x79, 0x5b, 0x48, 0x0a, 0xcf, 0x80, 0xc8, 0x8a, 0x31, 0x27, 0x84, 0xd6, 0x61, 0xa0, 0xc1, 0x92, + 0xf8, 0x0b, 0xdd, 0x3c, 0x2f, 0x19, 0x43, 0x5b, 0xc2, 0x7f, 0x7e, 0xef, 0xc5, 0xcb, 0xb1, 0xa0, + 0xc5, 0xa8, 0x92, 0xe6, 0xd6, 0x46, 0x2c, 0x94, 0xef, 0x3c, 0xaa, 0x6d, 0xcf, 0x31, 0x08, 0xaa, + 0xac, 0x1c, 0x0b, 0x5a, 0xa8, 0x0a, 0xa5, 0x8d, 0x86, 0x48, 0xa4, 0x9a, 0x63, 0xfa, 0x35, 0xa3, + 0x69, 0x97, 0x06, 0xf6, 0xf7, 0xe6, 0x4a, 0xab, 0xcb, 0xb8, 0xb4, 0xd1, 0x40, 0x6f, 0xc0, 0xe0, + 0x06, 0x8f, 0x8f, 0x14, 0x49, 0x53, 0x5f, 0xc8, 0x0b, 0xe2, 0x6c, 0x0b, 0xa6, 0xe4, 0xf1, 0x19, + 0x02, 0x80, 0x25, 0x39, 0x96, 0x4f, 0x4e, 0x45, 0x7c, 0x8a, 0xac, 0xa9, 0xf3, 0xbd, 0x45, 0x88, + 0x0a, 0x9d, 0x54, 0x95, 0x62, 0x8d, 0x22, 0x5d, 0xf3, 0x8e, 0x7c, 0x8f, 0x84, 0x65, 0x4c, 0xcd, + 0x5d, 0xf3, 0x1d, 0x9f, 0x2f, 0xe1, 0x6b, 0x5e, 0x81, 0x70, 0x4a, 0x14, 0xb5, 0x60, 0x74, 0x27, + 0x6e, 0x6e, 0x11, 0xb9, 0xf5, 0x59, 0x1a, 0xd5, 0xe1, 0x0b, 0x9f, 0xc8, 0xc9, 0x8d, 0x2b, 0xaa, + 0x78, 0x51, 0xd2, 0x72, 0xfc, 0x36, 0x0e, 0xc6, 0x12, 0x78, 0xdd, 0xd2, 0xc9, 0x62, 0xb3, 0x15, + 0xfa, 0x49, 0xde, 0x6b, 0x85, 0x77, 0x76, 0x13, 0x22, 0xd2, 0xac, 0xe6, 0x7c, 0x92, 0xd7, 0x39, + 0x72, 0xfb, 0x27, 0x11, 0x00, 0x2c, 0xc9, 0xa9, 0x29, 0x63, 0xdc, 0x78, 0xa2, 0xf0, 0x94, 0xb5, + 0x8d, 0x21, 0x9d, 0x32, 0xc6, 0x7d, 0x53, 0xa2, 0x8c, 0xeb, 0x36, 0xb7, 0xc2, 0x24, 0x0c, 0x32, + 0xbc, 0x7f, 0xb2, 0x08, 0xd7, 0xad, 0x75, 0xa8, 0xd9, 0xce, 0x75, 0x3b, 0x61, 0xe1, 0x8e, 0xad, + 0xa2, 0x00, 0xc6, 0x9a, 0x61, 0x94, 0xdc, 0x0b, 0x23, 0xb9, 0x0e, 0x51, 0x21, 0x1d, 0xd1, 0xa8, + 0x23, 0xda, 0x66, 0x6e, 0xb8, 0x26, 0x04, 0x67, 0xa8, 0xd3, 0x4f, 0x17, 0x37, 0x1c, 0x9f, 0xac, + 0xdd, 0x98, 0x99, 0x2a, 0xf2, 0xe9, 0xea, 0x1c, 0xb9, 0xfd, 0xd3, 0x09, 0x00, 0x96, 0xe4, 0xec, + 0x5f, 0x1d, 0x68, 0x17, 0x1c, 0x98, 0x6a, 0xf0, 0x37, 0xdb, 0x6f, 0x62, 0x3f, 0xd5, 0xbb, 0x06, + 0xfc, 0x00, 0xef, 0x64, 0xbf, 0x6a, 0xc1, 0xe9, 0x66, 0x47, 0xb1, 0x40, 0x1c, 0xbd, 0xbd, 0x2a, + 0xd2, 0x7c, 0x5a, 0x54, 0x36, 0xe4, 0xce, 0x70, 0xdc, 0xa5, 0xcd, 0xac, 0x30, 0x5d, 0x7e, 0xdf, + 0xc2, 0xf4, 0x6d, 0x18, 0x62, 0xd2, 0x5f, 0x9a, 0xeb, 0xa4, 0xc7, 0xb4, 0x20, 0xec, 0x10, 0x5f, + 0x16, 0x24, 0xb0, 0x22, 0x46, 0x27, 0xee, 0xf1, 0xec, 0x20, 0x30, 0x61, 0x60, 0x91, 0x83, 0x8f, + 0x6b, 0x2a, 0xab, 0x62, 0x26, 0x1e, 0xaf, 0x1d, 0x86, 0x7c, 0x90, 0x87, 0x80, 0x0f, 0x6f, 0x0c, + 0x55, 0x3b, 0xa8, 0x4a, 0x03, 0xe6, 0xb5, 0x4b, 0xbe, 0xba, 0x74, 0xb2, 0x22, 0xfe, 0x3f, 0xb2, + 0x3a, 0x48, 0xa4, 0x5c, 0x2d, 0xfb, 0x84, 0xa9, 0x96, 0x3d, 0x9d, 0x55, 0xcb, 0xda, 0x8c, 0x31, + 0x86, 0x46, 0x56, 0x3c, 0x87, 0x68, 0xd1, 0x64, 0x2e, 0xb6, 0x0f, 0x67, 0xf3, 0xd8, 0x1d, 0x73, + 0xc5, 0x72, 0xd5, 0x25, 0x64, 0xea, 0x8a, 0xe5, 0xae, 0x55, 0x31, 0x83, 0x14, 0xcd, 0x07, 0x60, + 0xff, 0x7c, 0x09, 0xca, 0xb5, 0xd0, 0x3d, 0x01, 0xe3, 0xd2, 0x25, 0xc3, 0xb8, 0xf4, 0x54, 0xee, + 0xfb, 0x74, 0x5d, 0x4d, 0x49, 0x37, 0x32, 0xa6, 0xa4, 0x0f, 0xe7, 0x93, 0x3a, 0xdc, 0x70, 0xf4, + 0xbd, 0x32, 0xe8, 0x2f, 0xec, 0xa1, 0xff, 0x70, 0x14, 0x0f, 0xdd, 0x72, 0xb1, 0x47, 0xf7, 0x44, + 0x1b, 0xcc, 0x93, 0x4b, 0xc6, 0xf5, 0xfd, 0xd4, 0x3a, 0xea, 0xde, 0x26, 0xde, 0xe6, 0x56, 0x42, + 0xdc, 0xec, 0xc0, 0x4e, 0xce, 0x51, 0xf7, 0xcf, 0x2d, 0x18, 0xcf, 0xb4, 0x8e, 0xfc, 0x4e, 0x01, + 0x41, 0x47, 0x34, 0x17, 0x4d, 0xe6, 0x46, 0x10, 0xcd, 0x03, 0x28, 0xab, 0xbf, 0x34, 0xc9, 0x30, + 0xe9, 0x54, 0x5d, 0x0b, 0xc4, 0x58, 0xc3, 0x40, 0x2f, 0xc3, 0x70, 0x12, 0x36, 0x43, 0x3f, 0xdc, + 0xdc, 0xbd, 0x42, 0x64, 0xa6, 0x0a, 0x75, 0x63, 0xb2, 0x9e, 0x82, 0xb0, 0x8e, 0x67, 0x7f, 0xbf, + 0x0c, 0xd9, 0xf7, 0x19, 0xff, 0xdf, 0x3a, 0xfd, 0xe9, 0x59, 0xa7, 0x7f, 0x64, 0xc1, 0x04, 0x6d, + 0x9d, 0xb9, 0xce, 0x48, 0x87, 0x5a, 0xf5, 0xa0, 0x81, 0x75, 0xc8, 0x83, 0x06, 0x4f, 0x53, 0x6e, + 0xe7, 0x86, 0xad, 0x44, 0x18, 0x91, 0x34, 0x26, 0x46, 0x4b, 0xb1, 0x80, 0x0a, 0x3c, 0x12, 0x45, + 0x22, 0xf2, 0x48, 0xc7, 0x23, 0x51, 0x84, 0x05, 0x54, 0xbe, 0x77, 0xd0, 0xd7, 0xe5, 0xbd, 0x03, + 0x96, 0xeb, 0x49, 0xb8, 0x6b, 0x08, 0xb1, 0x42, 0xcb, 0xf5, 0x24, 0xfd, 0x38, 0x52, 0x1c, 0xfb, + 0xdb, 0x65, 0x18, 0xa9, 0x85, 0x6e, 0xea, 0x29, 0xff, 0x92, 0xe1, 0x29, 0x7f, 0x36, 0xe3, 0x29, + 0x3f, 0xa1, 0xe3, 0x3e, 0x18, 0x47, 0x79, 0x91, 0x13, 0x8c, 0xbd, 0xc8, 0x71, 0x44, 0x27, 0x79, + 0x23, 0x27, 0x98, 0x22, 0x84, 0x4d, 0xba, 0x3f, 0x4b, 0xce, 0xf1, 0xff, 0xdb, 0x82, 0xb1, 0x5a, + 0xe8, 0xd2, 0x05, 0xfa, 0xb3, 0xb4, 0x1a, 0xf5, 0x4c, 0x62, 0x03, 0x87, 0x64, 0x12, 0xfb, 0x75, + 0x0b, 0x06, 0x6b, 0xa1, 0x7b, 0x02, 0x06, 0xd6, 0x55, 0xd3, 0xc0, 0xfa, 0x44, 0x2e, 0xe7, 0xed, + 0x62, 0x53, 0xfd, 0x4e, 0x19, 0x46, 0x69, 0x8f, 0xc3, 0x4d, 0xf9, 0xbd, 0x8c, 0xb9, 0xb1, 0x0a, + 0xcc, 0x0d, 0x15, 0x09, 0x43, 0xdf, 0x0f, 0xef, 0x65, 0xbf, 0xdd, 0x2a, 0x2b, 0xc5, 0x02, 0x8a, + 0xce, 0xc3, 0x50, 0x33, 0x22, 0x3b, 0x5e, 0xd8, 0x8a, 0xb3, 0x51, 0x8c, 0x35, 0x51, 0x8e, 0x15, + 0x06, 0x7a, 0x09, 0x46, 0x62, 0x2f, 0x68, 0x10, 0xe9, 0xcc, 0xd1, 0xc7, 0x9c, 0x39, 0x78, 0xd2, + 0x46, 0xad, 0x1c, 0x1b, 0x58, 0xe8, 0x36, 0x54, 0xd8, 0x7f, 0xb6, 0x83, 0x7a, 0x7f, 0xb0, 0x80, + 0x67, 0x2a, 0x93, 0x04, 0x70, 0x4a, 0x0b, 0x5d, 0x00, 0x48, 0xa4, 0xdb, 0x49, 0x2c, 0xf2, 0xad, + 0x28, 0xb9, 0x54, 0x39, 0xa4, 0xc4, 0x58, 0xc3, 0x42, 0xcf, 0x41, 0x25, 0x71, 0x3c, 0xff, 0xaa, + 0x17, 0x90, 0x58, 0xb8, 0xed, 0x88, 0x04, 0xcc, 0xa2, 0x10, 0xa7, 0x70, 0x7a, 0xde, 0xb3, 0x18, + 0x6a, 0xfe, 0x18, 0xca, 0x10, 0xc3, 0x66, 0xe7, 0xfd, 0x55, 0x55, 0x8a, 0x35, 0x0c, 0xfb, 0x22, + 0x9c, 0xaa, 0x85, 0x6e, 0x2d, 0x8c, 0x92, 0xd5, 0x30, 0xba, 0xe7, 0x44, 0xae, 0xfc, 0x7e, 0x73, + 0x32, 0xef, 0x2f, 0x3d, 0x93, 0xfb, 0xb9, 0xcd, 0xd1, 0xc8, 0xe3, 0xfb, 0x22, 0x3b, 0xf1, 0x7b, + 0x0c, 0xc1, 0xf8, 0x51, 0x09, 0x50, 0x8d, 0x39, 0xc6, 0x18, 0x6f, 0xe7, 0x6c, 0xc1, 0x58, 0x4c, + 0xae, 0x7a, 0x41, 0xeb, 0xbe, 0x20, 0x55, 0x2c, 0xe6, 0xa5, 0xbe, 0xa2, 0xd7, 0xe1, 0x96, 0x0e, + 0xb3, 0x0c, 0x67, 0xe8, 0xd2, 0xc9, 0x8c, 0x5a, 0xc1, 0x62, 0x7c, 0x33, 0x26, 0x91, 0x78, 0x2b, + 0x86, 0x4d, 0x26, 0x96, 0x85, 0x38, 0x85, 0xd3, 0xc5, 0xc3, 0xfe, 0x5c, 0x0f, 0x03, 0x1c, 0x86, + 0x89, 0x5c, 0x6e, 0xec, 0xed, 0x00, 0xad, 0x1c, 0x1b, 0x58, 0x68, 0x15, 0x50, 0xdc, 0x6a, 0x36, + 0x7d, 0x76, 0xd7, 0xe8, 0xf8, 0x97, 0xa2, 0xb0, 0xd5, 0xe4, 0xfe, 0xd1, 0x22, 0xed, 0x7e, 0xbd, + 0x0d, 0x8a, 0x3b, 0xd4, 0xa0, 0xcc, 0x62, 0x23, 0x66, 0xbf, 0x45, 0x40, 0x35, 0xb7, 0x57, 0xd6, + 0x59, 0x11, 0x96, 0x30, 0xfb, 0x8b, 0xec, 0x80, 0x63, 0x8f, 0x78, 0x24, 0xad, 0x88, 0xa0, 0x6d, + 0x18, 0x6d, 0xb2, 0x43, 0x2c, 0x89, 0x42, 0xdf, 0x27, 0x52, 0xbe, 0x3c, 0x9a, 0x6b, 0x0e, 0x4f, + 0xdb, 0xaf, 0x93, 0xc3, 0x26, 0x75, 0xfb, 0x17, 0xc7, 0x18, 0xaf, 0x12, 0xd7, 0xbd, 0x83, 0xc2, + 0x09, 0x57, 0x48, 0x72, 0x1f, 0x2a, 0xf2, 0x1c, 0x57, 0x7a, 0x0e, 0x08, 0x97, 0x5e, 0x2c, 0xa9, + 0xa0, 0xcf, 0x32, 0x17, 0x73, 0xce, 0x20, 0x8a, 0x3f, 0x32, 0xc8, 0xf1, 0x0d, 0xf7, 0x72, 0x41, + 0x02, 0x6b, 0xe4, 0xd0, 0x55, 0x18, 0x15, 0x6f, 0x3e, 0x08, 0x33, 0x45, 0xd9, 0x50, 0xb1, 0x47, + 0xb1, 0x0e, 0x3c, 0xc8, 0x16, 0x60, 0xb3, 0x32, 0xda, 0x84, 0xc7, 0xb5, 0x37, 0x8d, 0x3a, 0xb8, + 0x91, 0x71, 0xce, 0xf3, 0xc4, 0xfe, 0xde, 0xdc, 0xe3, 0xeb, 0x87, 0x21, 0xe2, 0xc3, 0xe9, 0xa0, + 0x1b, 0x70, 0xca, 0x69, 0x24, 0xde, 0x0e, 0xa9, 0x12, 0xc7, 0xf5, 0xbd, 0x80, 0x98, 0x51, 0xf7, + 0x8f, 0xee, 0xef, 0xcd, 0x9d, 0x5a, 0xec, 0x84, 0x80, 0x3b, 0xd7, 0x43, 0x9f, 0x80, 0x8a, 0x1b, + 0xc4, 0x62, 0x0e, 0x06, 0x8c, 0x27, 0xbc, 0x2a, 0xd5, 0xeb, 0x75, 0x35, 0xfe, 0xf4, 0x0f, 0x4e, + 0x2b, 0xa0, 0xf7, 0xf8, 0x13, 0xf5, 0x4a, 0x9b, 0xe1, 0x4f, 0xc7, 0xbd, 0x52, 0x48, 0x7f, 0x36, + 0x62, 0x61, 0xb8, 0x05, 0x4f, 0xb9, 0x6b, 0x1a, 0x61, 0x32, 0x46, 0x13, 0xe8, 0xd3, 0x80, 0x62, + 0x12, 0xed, 0x78, 0x0d, 0xb2, 0xd8, 0x60, 0x99, 0x4e, 0x99, 0x8d, 0x67, 0xc8, 0x88, 0x5b, 0x40, + 0xf5, 0x36, 0x0c, 0xdc, 0xa1, 0x16, 0xba, 0x4c, 0x39, 0x8f, 0x5e, 0x2a, 0xbc, 0x6b, 0xa5, 0x60, + 0x38, 0x53, 0x25, 0xcd, 0x88, 0x34, 0x9c, 0x84, 0xb8, 0x26, 0x45, 0x9c, 0xa9, 0x47, 0xcf, 0x25, + 0x95, 0xcc, 0x1e, 0x4c, 0x9f, 0xd0, 0xf6, 0x84, 0xf6, 0x54, 0xcf, 0xda, 0x0a, 0xe3, 0xe4, 0x3a, + 0x49, 0xee, 0x85, 0xd1, 0x5d, 0x76, 0x87, 0x31, 0xa4, 0xa5, 0x8d, 0x4b, 0x41, 0x58, 0xc7, 0xa3, + 0x32, 0x14, 0xbb, 0x3c, 0x5b, 0xab, 0xb2, 0x9b, 0x89, 0xa1, 0x74, 0xef, 0x5c, 0xe6, 0xc5, 0x58, + 0xc2, 0x25, 0xea, 0x5a, 0x6d, 0x99, 0xdd, 0x32, 0x64, 0x50, 0xd7, 0x6a, 0xcb, 0x58, 0xc2, 0x51, + 0xd8, 0xfe, 0x50, 0xda, 0x58, 0x91, 0x1b, 0x9f, 0x76, 0x4e, 0x5e, 0xf0, 0xad, 0xb4, 0xfb, 0x30, + 0xa1, 0x1e, 0x6b, 0xe3, 0x19, 0x3d, 0xe3, 0x99, 0xf1, 0x22, 0x0f, 0xe4, 0x77, 0x4c, 0x0c, 0xaa, + 0xec, 0x7a, 0x6b, 0x19, 0x9a, 0xb8, 0xad, 0x15, 0x23, 0x7b, 0xc4, 0x44, 0xee, 0x03, 0x05, 0x0b, + 0x50, 0x89, 0x5b, 0x77, 0xdc, 0x70, 0xdb, 0xf1, 0x02, 0x76, 0x15, 0xa0, 0x3f, 0xf7, 0x2e, 0x01, + 0x38, 0xc5, 0x41, 0x35, 0x18, 0x72, 0x84, 0x0a, 0x27, 0x4c, 0xf6, 0x39, 0xd1, 0xe5, 0x52, 0xe1, + 0xe3, 0xd6, 0x55, 0xf9, 0x0f, 0x2b, 0x2a, 0xe8, 0x55, 0x18, 0x15, 0xc1, 0x51, 0xc2, 0x89, 0x71, + 0xca, 0x74, 0xa4, 0xaf, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0x4d, 0x18, 0xa3, 0x54, 0x52, 0x06, 0x38, + 0x33, 0xdd, 0x1b, 0x0f, 0xd5, 0x52, 0x41, 0xeb, 0x64, 0x70, 0x86, 0x2c, 0x72, 0xe1, 0x31, 0xa7, + 0x95, 0x84, 0xdb, 0x74, 0x27, 0x98, 0xfb, 0x64, 0x3d, 0xbc, 0x4b, 0x82, 0x99, 0x53, 0x6c, 0x05, + 0x9e, 0xdd, 0xdf, 0x9b, 0x7b, 0x6c, 0xf1, 0x10, 0x3c, 0x7c, 0x28, 0x15, 0xf4, 0x36, 0x0c, 0x27, + 0xa1, 0x2f, 0x7c, 0x93, 0xe3, 0x99, 0xd3, 0x45, 0x72, 0xda, 0xac, 0xab, 0x0a, 0xba, 0x19, 0x43, + 0x11, 0xc1, 0x3a, 0xc5, 0xd9, 0x4f, 0xc2, 0x64, 0x1b, 0x4b, 0xea, 0xc9, 0x7d, 0xf3, 0x3f, 0xf6, + 0x43, 0x45, 0x59, 0xf4, 0xd0, 0x82, 0x69, 0xbc, 0x7d, 0x34, 0x6b, 0xbc, 0x1d, 0xa2, 0x02, 0x94, + 0x6e, 0xaf, 0xfd, 0x5c, 0x87, 0xe7, 0xb9, 0x9f, 0xcd, 0xdd, 0x83, 0xc5, 0x23, 0xaa, 0x7a, 0x78, + 0xc4, 0x3c, 0xd5, 0xea, 0xfa, 0x0e, 0xd5, 0xea, 0x0a, 0x3e, 0x39, 0x47, 0xf5, 0xb7, 0x66, 0xe8, + 0xae, 0xd5, 0xb2, 0x2f, 0x2a, 0xd5, 0x68, 0x21, 0xe6, 0x30, 0x26, 0x77, 0xd3, 0x33, 0x95, 0xc9, + 0xdd, 0x83, 0x47, 0x94, 0xbb, 0x25, 0x01, 0x9c, 0xd2, 0x42, 0x3b, 0x30, 0xd9, 0x30, 0x1f, 0xc8, + 0x52, 0x71, 0x52, 0xcf, 0xf7, 0xf0, 0x40, 0x55, 0x4b, 0x7b, 0x3d, 0x63, 0x39, 0x4b, 0x0f, 0xb7, + 0x37, 0x81, 0x5e, 0x85, 0xa1, 0xf7, 0xc2, 0x98, 0x5d, 0x2b, 0x88, 0x83, 0x45, 0xc6, 0xa3, 0x0c, + 0xbd, 0x7e, 0xa3, 0xce, 0xca, 0x0f, 0xf6, 0xe6, 0x86, 0x6b, 0xa1, 0x2b, 0xff, 0x62, 0x55, 0x01, + 0x7d, 0xc9, 0x82, 0x53, 0xc6, 0x3e, 0x53, 0x3d, 0x87, 0xa3, 0xf4, 0xfc, 0x71, 0xd1, 0xf2, 0xa9, + 0xb5, 0x4e, 0x34, 0x71, 0xe7, 0xa6, 0xec, 0xdf, 0xe5, 0x26, 0x4c, 0x61, 0xd4, 0x20, 0x71, 0xcb, + 0x3f, 0x89, 0x4c, 0xf6, 0x37, 0x0c, 0x7b, 0xcb, 0x03, 0x30, 0xa2, 0xff, 0x7b, 0x8b, 0x19, 0xd1, + 0xd7, 0xc9, 0x76, 0xd3, 0x77, 0x92, 0x93, 0xf0, 0xee, 0xfd, 0x2c, 0x0c, 0x25, 0xa2, 0xb5, 0x62, + 0x69, 0xf8, 0xb5, 0xee, 0xb1, 0xcb, 0x05, 0x75, 0x30, 0xc9, 0x52, 0xac, 0x08, 0xda, 0xff, 0x9a, + 0x7f, 0x15, 0x09, 0x39, 0x01, 0x4b, 0xc1, 0x75, 0xd3, 0x52, 0xf0, 0x4c, 0xe1, 0xb1, 0x74, 0xb1, + 0x18, 0x7c, 0xdf, 0x1c, 0x01, 0xd3, 0x1f, 0x7e, 0x7a, 0x6e, 0x79, 0xec, 0x5f, 0xb1, 0x60, 0xba, + 0xd3, 0x75, 0x3b, 0x15, 0x30, 0xb8, 0xf6, 0xa2, 0xee, 0xbf, 0xd4, 0xac, 0xde, 0x12, 0xe5, 0x58, + 0x61, 0x14, 0xce, 0x8b, 0xdd, 0x5b, 0xea, 0xa6, 0x1b, 0x60, 0x3e, 0xb5, 0x86, 0x5e, 0xe3, 0xce, + 0xfc, 0x96, 0x7a, 0x0b, 0xad, 0x37, 0x47, 0x7e, 0xfb, 0x37, 0x4a, 0x30, 0xcd, 0x8d, 0xd0, 0x8b, + 0x3b, 0xa1, 0xe7, 0xd6, 0x42, 0x57, 0x84, 0x36, 0xb8, 0x30, 0xd2, 0xd4, 0x94, 0xcf, 0x62, 0xa9, + 0x60, 0x74, 0x75, 0x35, 0x15, 0xf8, 0xf5, 0x52, 0x6c, 0x50, 0xa5, 0xad, 0x90, 0x1d, 0xaf, 0xa1, + 0x6c, 0x9a, 0xa5, 0x9e, 0x4f, 0x06, 0xd5, 0xca, 0x8a, 0x46, 0x07, 0x1b, 0x54, 0x8f, 0xe1, 0x39, + 0x0b, 0xfb, 0x1f, 0x58, 0xf0, 0x48, 0x97, 0x74, 0x31, 0xb4, 0xb9, 0x7b, 0xcc, 0xf0, 0x2f, 0xde, + 0xf2, 0x53, 0xcd, 0xf1, 0xeb, 0x00, 0x2c, 0xa0, 0xe8, 0x0e, 0x00, 0x37, 0xe7, 0xb3, 0x97, 0xdd, + 0x4b, 0x45, 0xfc, 0x91, 0xda, 0x92, 0x32, 0x68, 0xf1, 0xfa, 0xea, 0x2d, 0x77, 0x8d, 0xaa, 0xfd, + 0xad, 0x32, 0xf4, 0xf3, 0x27, 0xa3, 0x6b, 0x30, 0xb8, 0xc5, 0xd3, 0xd7, 0xf6, 0x96, 0x3d, 0x37, + 0x55, 0x2e, 0x78, 0x01, 0x96, 0x64, 0xd0, 0x35, 0x98, 0xa2, 0x27, 0x8b, 0xe7, 0xf8, 0x55, 0xe2, + 0x3b, 0xbb, 0x52, 0x5b, 0xe5, 0x6f, 0x1c, 0xc8, 0x64, 0xdc, 0x53, 0x6b, 0xed, 0x28, 0xb8, 0x53, + 0x3d, 0xf4, 0x5a, 0x5b, 0xb6, 0x39, 0x9e, 0x16, 0x58, 0x49, 0xaa, 0x87, 0x67, 0x9c, 0xa3, 0xf2, + 0x74, 0xb3, 0x4d, 0x2f, 0xd7, 0x5e, 0xe6, 0x35, 0x75, 0x71, 0x13, 0x97, 0xf9, 0x16, 0xb4, 0x98, + 0x4f, 0xc5, 0xfa, 0x56, 0x44, 0xe2, 0xad, 0xd0, 0x77, 0xc5, 0xa3, 0x92, 0xa9, 0x6f, 0x41, 0x06, + 0x8e, 0xdb, 0x6a, 0x50, 0x2a, 0x1b, 0x8e, 0xe7, 0xb7, 0x22, 0x92, 0x52, 0x19, 0x30, 0xa9, 0xac, + 0x66, 0xe0, 0xb8, 0xad, 0x06, 0x5d, 0x5b, 0xa7, 0xc4, 0x3b, 0x84, 0x32, 0x38, 0x5a, 0xb0, 0xa0, + 0xcf, 0xc0, 0xa0, 0x74, 0x91, 0x2f, 0x94, 0xc3, 0x43, 0x38, 0x0e, 0xa8, 0x37, 0x0d, 0xb5, 0x37, + 0xaf, 0x84, 0x73, 0xbc, 0xa4, 0x77, 0x94, 0xf7, 0xee, 0xfe, 0xcc, 0x82, 0xa9, 0x0e, 0xae, 0x5e, + 0x9c, 0xa5, 0x6d, 0x7a, 0x71, 0xa2, 0x32, 0xee, 0x6b, 0x2c, 0x8d, 0x97, 0x63, 0x85, 0x41, 0x77, + 0x0b, 0x67, 0x9a, 0x59, 0x46, 0x29, 0x5c, 0x40, 0x04, 0xb4, 0x37, 0x46, 0x89, 0xce, 0x42, 0x5f, + 0x2b, 0x26, 0x91, 0x7c, 0x7c, 0x4e, 0xf2, 0x79, 0x66, 0x07, 0x64, 0x10, 0x2a, 0xb6, 0x6e, 0x2a, + 0x13, 0x9c, 0x26, 0xb6, 0x72, 0x23, 0x1c, 0x87, 0xd9, 0x5f, 0x2f, 0xc3, 0x78, 0xc6, 0xe5, 0x93, + 0x76, 0x64, 0x3b, 0x0c, 0xbc, 0x24, 0x54, 0x79, 0xd5, 0xf8, 0x7b, 0x57, 0xa4, 0xb9, 0x75, 0x4d, + 0x94, 0x63, 0x85, 0x81, 0x9e, 0x96, 0xef, 0x8d, 0x66, 0x5f, 0x12, 0x58, 0xaa, 0x1a, 0x4f, 0x8e, + 0x16, 0x7d, 0x05, 0xe4, 0x49, 0xe8, 0x6b, 0x86, 0xea, 0xf9, 0x68, 0xf5, 0x3d, 0xf1, 0x52, 0xb5, + 0x16, 0x86, 0x3e, 0x66, 0x40, 0xf4, 0x94, 0x18, 0x7d, 0xe6, 0xe6, 0x02, 0x3b, 0x6e, 0x18, 0x6b, + 0x53, 0xf0, 0x0c, 0x0c, 0xde, 0x25, 0xbb, 0x91, 0x17, 0x6c, 0x66, 0xef, 0x6d, 0xae, 0xf0, 0x62, + 0x2c, 0xe1, 0xe6, 0x4b, 0x1f, 0x83, 0xc7, 0xfc, 0xd2, 0xc7, 0x50, 0xee, 0x39, 0xf8, 0x1d, 0x0b, + 0xc6, 0x59, 0xb2, 0x51, 0x11, 0x9a, 0xef, 0x85, 0xc1, 0x09, 0xc8, 0x18, 0x4f, 0x42, 0x7f, 0x44, + 0x1b, 0xcd, 0xa6, 0xea, 0x67, 0x3d, 0xc1, 0x1c, 0x86, 0x1e, 0x83, 0x3e, 0xd6, 0x05, 0xfa, 0x19, + 0x47, 0x78, 0x4e, 0xf3, 0xaa, 0x93, 0x38, 0x98, 0x95, 0xb2, 0x28, 0x2b, 0x4c, 0x9a, 0xbe, 0xc7, + 0x3b, 0x9d, 0x9a, 0x5b, 0x1f, 0xb6, 0x28, 0xab, 0x8e, 0x9d, 0x7c, 0x50, 0x51, 0x56, 0x9d, 0x89, + 0x1f, 0x2e, 0xe7, 0xff, 0x8f, 0x12, 0x9c, 0xe9, 0x58, 0x2f, 0xbd, 0x01, 0x5e, 0x35, 0x6e, 0x80, + 0x2f, 0x64, 0x6e, 0x80, 0xed, 0xc3, 0x6b, 0x3f, 0x98, 0x3b, 0xe1, 0xce, 0x57, 0xb5, 0xe5, 0x13, + 0xbc, 0xaa, 0xed, 0x2b, 0x2a, 0xe2, 0xf4, 0xe7, 0x88, 0x38, 0x7f, 0x68, 0xc1, 0xa3, 0x1d, 0xa7, + 0xec, 0xa1, 0x0b, 0x6b, 0xeb, 0xd8, 0xcb, 0x2e, 0xda, 0xc9, 0x2f, 0x97, 0xbb, 0x8c, 0x8a, 0xe9, + 0x29, 0xe7, 0x28, 0x17, 0x62, 0xc0, 0x58, 0x08, 0x6f, 0x23, 0x9c, 0x03, 0xf1, 0x32, 0xac, 0xa0, + 0x28, 0xd6, 0xc2, 0xc2, 0x78, 0x27, 0x57, 0x8e, 0xb8, 0xa1, 0xe6, 0x4d, 0x3b, 0xb9, 0x9e, 0x6f, + 0x20, 0x1b, 0x2c, 0x76, 0x5b, 0xd3, 0x3c, 0xcb, 0x47, 0xd1, 0x3c, 0x47, 0x3a, 0x6b, 0x9d, 0x68, + 0x11, 0xc6, 0xb7, 0xbd, 0x80, 0x3d, 0x10, 0x6a, 0x4a, 0x4f, 0x2a, 0x36, 0xf7, 0x9a, 0x09, 0xc6, + 0x59, 0xfc, 0xd9, 0x57, 0x61, 0xf4, 0xe8, 0xd6, 0xb5, 0x1f, 0x97, 0xe1, 0x83, 0x87, 0x30, 0x05, + 0x7e, 0x3a, 0x18, 0xdf, 0x45, 0x3b, 0x1d, 0xda, 0xbe, 0x4d, 0x0d, 0xa6, 0x37, 0x5a, 0xbe, 0xbf, + 0xcb, 0xfc, 0xa7, 0x88, 0x2b, 0x31, 0x84, 0x50, 0xa3, 0x5e, 0x22, 0x5f, 0xed, 0x80, 0x83, 0x3b, + 0xd6, 0x44, 0x9f, 0x06, 0x14, 0xde, 0x61, 0xe9, 0x78, 0xdd, 0x34, 0x9f, 0x02, 0xfb, 0x04, 0xe5, + 0x74, 0xab, 0xde, 0x68, 0xc3, 0xc0, 0x1d, 0x6a, 0x51, 0x39, 0x95, 0x3d, 0x62, 0xae, 0xba, 0x95, + 0x91, 0x53, 0xb1, 0x0e, 0xc4, 0x26, 0x2e, 0xba, 0x04, 0x93, 0xce, 0x8e, 0xe3, 0xf1, 0xf4, 0x5a, + 0x92, 0x00, 0x17, 0x54, 0x95, 0xfd, 0x6a, 0x31, 0x8b, 0x80, 0xdb, 0xeb, 0xa0, 0xa6, 0x61, 0x90, + 0xe4, 0x89, 0xf8, 0x3f, 0x71, 0x84, 0x15, 0x5c, 0xd8, 0x44, 0x69, 0xff, 0x57, 0x8b, 0x1e, 0x7d, + 0x1d, 0xde, 0x92, 0xa4, 0x33, 0xa2, 0x0c, 0x6c, 0x5a, 0x98, 0x9b, 0x9a, 0x91, 0x65, 0x1d, 0x88, + 0x4d, 0x5c, 0xbe, 0x34, 0xe2, 0xd4, 0x9d, 0xdb, 0x90, 0x36, 0x45, 0x84, 0xa8, 0xc2, 0xa0, 0x12, + 0xb4, 0xeb, 0xed, 0x78, 0x71, 0x18, 0x89, 0x0d, 0xd4, 0xa3, 0x73, 0x6f, 0xca, 0x2f, 0xab, 0x9c, + 0x0c, 0x96, 0xf4, 0xec, 0x6f, 0x94, 0x60, 0x54, 0xb6, 0xf8, 0x7a, 0x2b, 0x4c, 0x9c, 0x13, 0x38, + 0xd2, 0x5f, 0x37, 0x8e, 0xf4, 0x85, 0x62, 0x01, 0xb3, 0xac, 0x73, 0x5d, 0x8f, 0xf2, 0xcf, 0x64, + 0x8e, 0xf2, 0x17, 0x7a, 0x21, 0x7a, 0xf8, 0x11, 0xfe, 0x6f, 0x2d, 0x98, 0x34, 0xf0, 0x4f, 0xe0, + 0x24, 0xa9, 0x99, 0x27, 0xc9, 0x73, 0x3d, 0x8c, 0xa6, 0xcb, 0x09, 0xf2, 0xed, 0x52, 0x66, 0x14, + 0xec, 0xe4, 0xf8, 0x02, 0xf4, 0x6d, 0x39, 0x91, 0x5b, 0x2c, 0xd7, 0x64, 0x5b, 0xf5, 0xf9, 0xcb, + 0x4e, 0xe4, 0x72, 0xfe, 0x7f, 0x5e, 0xbd, 0x74, 0xe5, 0x44, 0x6e, 0x6e, 0x94, 0x03, 0x6b, 0x14, + 0x5d, 0x84, 0x81, 0xb8, 0x11, 0x36, 0x95, 0x1f, 0xe8, 0x59, 0xfe, 0x0a, 0x16, 0x2d, 0x39, 0xd8, + 0x9b, 0x43, 0x66, 0x73, 0xb4, 0x18, 0x0b, 0xfc, 0xd9, 0x4d, 0xa8, 0xa8, 0xa6, 0x8f, 0xd5, 0x13, + 0xfe, 0xbf, 0x95, 0x61, 0xaa, 0xc3, 0x5a, 0x41, 0x5f, 0x34, 0xe6, 0xed, 0xd5, 0x9e, 0x17, 0xdb, + 0xfb, 0x9c, 0xb9, 0x2f, 0x32, 0x4d, 0xc9, 0x15, 0xab, 0xe3, 0x08, 0xcd, 0xdf, 0x8c, 0x49, 0xb6, + 0x79, 0x5a, 0x94, 0xdf, 0x3c, 0x6d, 0xf6, 0xc4, 0xa6, 0x9f, 0x36, 0xa4, 0x7a, 0x7a, 0xac, 0xdf, + 0xf9, 0xaf, 0xf5, 0xc1, 0x74, 0xa7, 0xc8, 0x7c, 0xf4, 0x15, 0x2b, 0xf3, 0xa0, 0xc4, 0x6b, 0xbd, + 0x87, 0xf7, 0xf3, 0x57, 0x26, 0x44, 0x36, 0x9b, 0x79, 0xf3, 0x89, 0x89, 0xdc, 0x19, 0x17, 0xad, + 0xb3, 0xf8, 0xa4, 0x88, 0x3f, 0x0e, 0x22, 0xb9, 0xc2, 0xa7, 0x8e, 0xd0, 0x15, 0xf1, 0xbe, 0x48, + 0x9c, 0x89, 0x4f, 0x92, 0xc5, 0xf9, 0xf1, 0x49, 0xb2, 0x0f, 0xb3, 0x1e, 0x0c, 0x6b, 0xe3, 0x3a, + 0xd6, 0x65, 0x70, 0x97, 0x1e, 0x51, 0x5a, 0xbf, 0x8f, 0x75, 0x29, 0xfc, 0x5d, 0x0b, 0x32, 0x4e, + 0x5b, 0xca, 0x2c, 0x63, 0x75, 0x35, 0xcb, 0x9c, 0x85, 0xbe, 0x28, 0xf4, 0x49, 0xf6, 0xb1, 0x03, + 0x1c, 0xfa, 0x04, 0x33, 0x88, 0x7a, 0xfc, 0xb6, 0xdc, 0xed, 0xf1, 0x5b, 0xaa, 0xa7, 0xfb, 0x64, + 0x87, 0x48, 0x23, 0x89, 0x62, 0xe3, 0x57, 0x69, 0x21, 0xe6, 0x30, 0xfb, 0xb7, 0xfa, 0x60, 0xaa, + 0x43, 0xb4, 0x1b, 0xd5, 0x90, 0x36, 0x9d, 0x84, 0xdc, 0x73, 0x76, 0xb3, 0x49, 0x57, 0x2f, 0xf1, + 0x62, 0x2c, 0xe1, 0xcc, 0xd9, 0x94, 0x27, 0x6e, 0xcb, 0x98, 0xae, 0x44, 0xbe, 0x36, 0x01, 0x3d, + 0xfe, 0x67, 0x52, 0x2f, 0x00, 0xc4, 0xb1, 0xbf, 0x12, 0x50, 0x09, 0xcf, 0x15, 0x4e, 0xad, 0x69, + 0xbe, 0xbf, 0xfa, 0x55, 0x01, 0xc1, 0x1a, 0x16, 0xaa, 0xc2, 0x44, 0x33, 0x0a, 0x13, 0x6e, 0x18, + 0xac, 0x72, 0x47, 0x88, 0x7e, 0x33, 0x9a, 0xaa, 0x96, 0x81, 0xe3, 0xb6, 0x1a, 0xe8, 0x65, 0x18, + 0x16, 0x11, 0x56, 0xb5, 0x30, 0xf4, 0x85, 0x19, 0x49, 0x5d, 0xc7, 0xd7, 0x53, 0x10, 0xd6, 0xf1, + 0xb4, 0x6a, 0xcc, 0xda, 0x38, 0xd8, 0xb1, 0x1a, 0xb7, 0x38, 0x6a, 0x78, 0x99, 0xfc, 0x1d, 0x43, + 0x85, 0xf2, 0x77, 0xa4, 0x86, 0xb5, 0x4a, 0xe1, 0x8b, 0x18, 0xc8, 0x35, 0x40, 0xfd, 0x41, 0x19, + 0x06, 0xf8, 0xa7, 0x38, 0x01, 0x29, 0xaf, 0x26, 0x4c, 0x4a, 0x85, 0x72, 0x25, 0xf0, 0x5e, 0xcd, + 0x57, 0x9d, 0xc4, 0xe1, 0xac, 0x49, 0xed, 0x90, 0xd4, 0x0c, 0x85, 0xe6, 0x8d, 0x3d, 0x34, 0x9b, + 0xb1, 0x94, 0x00, 0xa7, 0xa1, 0xed, 0xa8, 0x2d, 0x80, 0x98, 0x3d, 0xd5, 0x49, 0x69, 0x88, 0x8c, + 0xb0, 0x2f, 0x15, 0xea, 0x47, 0x5d, 0x55, 0xe3, 0xbd, 0x49, 0x97, 0xa5, 0x02, 0x60, 0x8d, 0xf6, + 0xec, 0x2b, 0x50, 0x51, 0xc8, 0x79, 0x2a, 0xe4, 0x88, 0xce, 0xda, 0xfe, 0x3f, 0x18, 0xcf, 0xb4, + 0xd5, 0x93, 0x06, 0xfa, 0x3b, 0x16, 0x8c, 0xf3, 0x2e, 0xaf, 0x04, 0x3b, 0x82, 0x15, 0x7c, 0xd9, + 0x82, 0x69, 0xbf, 0xc3, 0x4e, 0x14, 0x9f, 0xf9, 0x28, 0x7b, 0x58, 0x29, 0x9f, 0x9d, 0xa0, 0xb8, + 0x63, 0x6b, 0xe8, 0x1c, 0x0c, 0xf1, 0x97, 0x87, 0x1d, 0x5f, 0x78, 0x50, 0x8f, 0xf0, 0x5c, 0xd8, + 0xbc, 0x0c, 0x2b, 0xa8, 0xfd, 0x13, 0x0b, 0x26, 0xdb, 0x1e, 0xb2, 0x7f, 0x58, 0x86, 0x21, 0xb2, + 0x7e, 0x97, 0xba, 0x64, 0xfd, 0xd6, 0x47, 0x59, 0x3e, 0x74, 0x94, 0xbf, 0x61, 0x81, 0x58, 0xa1, + 0x27, 0xa0, 0x3f, 0xac, 0x99, 0xfa, 0xc3, 0x87, 0x8a, 0x2c, 0xfa, 0x2e, 0x8a, 0xc3, 0x2f, 0x95, + 0x60, 0x82, 0x23, 0xa4, 0x37, 0x32, 0x0f, 0xcb, 0xc7, 0xe9, 0xed, 0x35, 0x1a, 0xf5, 0x04, 0x68, + 0xe7, 0x91, 0x1a, 0xdf, 0xb2, 0xef, 0xd0, 0x6f, 0xf9, 0x17, 0x16, 0x20, 0x3e, 0x27, 0xd9, 0x67, + 0x9b, 0xf9, 0xe9, 0xa6, 0x99, 0x03, 0x52, 0xce, 0xa1, 0x20, 0x58, 0xc3, 0x7a, 0xc0, 0x43, 0xc8, + 0xdc, 0x87, 0x95, 0xf3, 0xef, 0xc3, 0x7a, 0x18, 0xf5, 0xef, 0x96, 0x21, 0xeb, 0x4a, 0x89, 0xde, + 0x81, 0x91, 0x86, 0xd3, 0x74, 0xee, 0x78, 0xbe, 0x97, 0x78, 0x24, 0x2e, 0x76, 0xe1, 0xbe, 0xac, + 0xd5, 0x10, 0xd7, 0x50, 0x5a, 0x09, 0x36, 0x28, 0xa2, 0x79, 0x80, 0x66, 0xe4, 0xed, 0x78, 0x3e, + 0xd9, 0x64, 0x1a, 0x0f, 0x8b, 0xc5, 0xe0, 0x77, 0xc7, 0xb2, 0x14, 0x6b, 0x18, 0x1d, 0x7c, 0xf7, + 0xcb, 0x27, 0xe1, 0xbb, 0xdf, 0xd7, 0xa3, 0xef, 0x7e, 0x7f, 0x21, 0xdf, 0x7d, 0x0c, 0xa7, 0xe5, + 0xe1, 0x4d, 0xff, 0xaf, 0x7a, 0x3e, 0x11, 0xb2, 0x1b, 0x8f, 0xd5, 0x98, 0xdd, 0xdf, 0x9b, 0x3b, + 0x8d, 0x3b, 0x62, 0xe0, 0x2e, 0x35, 0xed, 0x16, 0x4c, 0xd5, 0x49, 0xe4, 0xb1, 0x9c, 0x94, 0x6e, + 0xba, 0x97, 0x3e, 0x07, 0x95, 0x28, 0xb3, 0x8d, 0x7b, 0x0c, 0xc8, 0xd7, 0xb2, 0x98, 0xc9, 0x6d, + 0x9b, 0x92, 0xb4, 0xff, 0x46, 0x09, 0x06, 0x85, 0x13, 0xe5, 0x09, 0x08, 0x1f, 0x57, 0x0c, 0x13, + 0xd3, 0x33, 0x79, 0xfc, 0x8f, 0x75, 0xab, 0xab, 0x71, 0xa9, 0x9e, 0x31, 0x2e, 0x3d, 0x57, 0x8c, + 0xdc, 0xe1, 0x66, 0xa5, 0x7f, 0x5a, 0x86, 0x31, 0xd3, 0xa9, 0xf4, 0x04, 0xa6, 0xe5, 0x0d, 0x18, + 0x8c, 0x85, 0x7f, 0x73, 0xa9, 0x88, 0xcf, 0x5e, 0xf6, 0x13, 0xa7, 0x37, 0xf1, 0xc2, 0xa3, 0x59, + 0x92, 0xeb, 0xe8, 0x42, 0x5d, 0x3e, 0x11, 0x17, 0xea, 0x3c, 0x5f, 0xdf, 0xbe, 0x07, 0xe1, 0xeb, + 0x6b, 0xff, 0x80, 0xb1, 0x7c, 0xbd, 0xfc, 0x04, 0x8e, 0xf1, 0xd7, 0xcd, 0xc3, 0xe1, 0x7c, 0xa1, + 0x75, 0x27, 0xba, 0xd7, 0xe5, 0x38, 0xff, 0xae, 0x05, 0xc3, 0x02, 0xf1, 0x04, 0x06, 0xf0, 0x69, + 0x73, 0x00, 0x4f, 0x15, 0x1a, 0x40, 0x97, 0x9e, 0x7f, 0xa3, 0xa4, 0x7a, 0x5e, 0x13, 0x4f, 0xed, + 0xe7, 0x66, 0xe0, 0x1e, 0xa2, 0xaa, 0x5f, 0xd8, 0x08, 0x7d, 0x21, 0xc0, 0x3d, 0x96, 0x86, 0xe6, + 0xf1, 0xf2, 0x03, 0xed, 0x37, 0x56, 0xd8, 0x2c, 0x72, 0x2c, 0x8c, 0x12, 0x71, 0x80, 0x76, 0x7a, + 0xe8, 0xdf, 0x05, 0x48, 0x5f, 0x57, 0x17, 0x51, 0xad, 0xdd, 0x77, 0x6b, 0x2b, 0xf1, 0xfc, 0x79, + 0x2f, 0x48, 0xe2, 0x24, 0x9a, 0x5f, 0x0b, 0x92, 0x1b, 0x11, 0x17, 0xfa, 0xb5, 0x58, 0x3b, 0x45, + 0x0b, 0x6b, 0x74, 0x65, 0x10, 0x07, 0x6b, 0xa3, 0xdf, 0xbc, 0x41, 0xba, 0x2e, 0xca, 0xb1, 0xc2, + 0xb0, 0x5f, 0x61, 0x9c, 0x9d, 0x4d, 0x50, 0x6f, 0x61, 0x70, 0xbf, 0x38, 0xa0, 0xa6, 0x96, 0x99, + 0x85, 0xaf, 0xeb, 0xc1, 0x76, 0x45, 0xd9, 0x27, 0xed, 0x82, 0xee, 0x47, 0x9d, 0xc6, 0xe6, 0x21, + 0xd2, 0x76, 0xed, 0xf8, 0x4a, 0x61, 0x8e, 0xdc, 0xc3, 0x45, 0x23, 0x4b, 0x3a, 0xc8, 0x32, 0xad, + 0xad, 0xd5, 0xb2, 0x79, 0xd3, 0x97, 0x25, 0x00, 0xa7, 0x38, 0x68, 0x41, 0x28, 0x94, 0xdc, 0xe2, + 0xf2, 0xc1, 0x8c, 0x42, 0x29, 0xa7, 0x44, 0xd3, 0x28, 0x5f, 0x80, 0x61, 0xf5, 0x14, 0x4d, 0x8d, + 0x3f, 0x02, 0x52, 0xe1, 0xf2, 0xd5, 0x4a, 0x5a, 0x8c, 0x75, 0x1c, 0xb4, 0x06, 0x53, 0xae, 0x8a, + 0xd9, 0xa9, 0xb5, 0xee, 0xf8, 0x5e, 0x83, 0x56, 0xe5, 0xf1, 0xb6, 0x8f, 0xec, 0xef, 0xcd, 0x4d, + 0x55, 0xdb, 0xc1, 0xb8, 0x53, 0x1d, 0xb4, 0x0e, 0xe3, 0x31, 0x7f, 0x72, 0x47, 0x06, 0x66, 0x08, + 0x1b, 0xc4, 0xb3, 0xf2, 0xbe, 0xb3, 0x6e, 0x82, 0x0f, 0x58, 0x11, 0xe7, 0x0a, 0x32, 0x94, 0x23, + 0x4b, 0x02, 0xbd, 0x06, 0x63, 0xbe, 0xfe, 0x9e, 0x68, 0x4d, 0x98, 0x28, 0x94, 0x07, 0x9b, 0xf1, + 0xda, 0x68, 0x0d, 0x67, 0xb0, 0xd1, 0x1b, 0x30, 0xa3, 0x97, 0x88, 0x3c, 0x42, 0x4e, 0xb0, 0x49, + 0x62, 0xf1, 0x3c, 0xc7, 0x63, 0xfb, 0x7b, 0x73, 0x33, 0x57, 0xbb, 0xe0, 0xe0, 0xae, 0xb5, 0xd1, + 0x45, 0x18, 0x91, 0x33, 0xa9, 0x85, 0x31, 0xa5, 0xbe, 0x93, 0x1a, 0x0c, 0x1b, 0x98, 0xef, 0xef, + 0x5a, 0xf7, 0x0b, 0xb4, 0xb2, 0x76, 0x84, 0xa3, 0x77, 0x61, 0x44, 0xef, 0x63, 0xf6, 0x6c, 0xce, + 0x7f, 0xa3, 0x55, 0x88, 0x02, 0xaa, 0xe7, 0x3a, 0x0c, 0x1b, 0xb4, 0xed, 0x1b, 0x30, 0x50, 0xdf, + 0x8d, 0x1b, 0x89, 0x5f, 0x80, 0xbf, 0x3d, 0x69, 0x0c, 0x21, 0xdd, 0x7b, 0xec, 0xbd, 0x28, 0x31, + 0x22, 0x9b, 0xc0, 0xf8, 0xfa, 0x72, 0xad, 0x1e, 0x36, 0xee, 0x92, 0x64, 0x91, 0x6b, 0x6f, 0x58, + 0x70, 0x37, 0xeb, 0x88, 0x5c, 0xab, 0x03, 0x3f, 0xb4, 0xff, 0xd4, 0x82, 0x7e, 0xf6, 0xd6, 0x51, + 0xde, 0x3b, 0x59, 0x45, 0x3a, 0x8d, 0x5e, 0x86, 0x01, 0xb2, 0xb1, 0x41, 0x1a, 0x89, 0xd8, 0xc6, + 0x32, 0x56, 0x60, 0x60, 0x85, 0x95, 0xd2, 0xcd, 0xc9, 0x1a, 0xe3, 0x7f, 0xb1, 0x40, 0x46, 0x9f, + 0x85, 0x4a, 0xe2, 0x6d, 0x93, 0x45, 0xd7, 0x15, 0x06, 0xc5, 0xde, 0xdc, 0x57, 0x14, 0xb3, 0x58, + 0x97, 0x44, 0x70, 0x4a, 0xcf, 0xfe, 0x5a, 0x09, 0x20, 0x8d, 0xd4, 0xc9, 0x1b, 0xe6, 0x52, 0xdb, + 0x73, 0x60, 0x4f, 0x77, 0x78, 0x0e, 0x0c, 0xa5, 0x04, 0x3b, 0x3c, 0x06, 0xa6, 0xa6, 0xaa, 0x5c, + 0x68, 0xaa, 0xfa, 0x7a, 0x99, 0xaa, 0x65, 0x98, 0x4c, 0x23, 0x8d, 0xcc, 0x90, 0x4d, 0x96, 0x1c, + 0x74, 0x3d, 0x0b, 0xc4, 0xed, 0xf8, 0xf6, 0xd7, 0x2c, 0x10, 0x0e, 0x8f, 0x05, 0x56, 0xab, 0x2b, + 0x9f, 0xee, 0x31, 0xb2, 0x98, 0x3d, 0x5b, 0xc4, 0x17, 0x54, 0xe4, 0x2e, 0x53, 0xfb, 0xc7, 0xc8, + 0x58, 0x66, 0x50, 0xb5, 0x7f, 0xd3, 0x82, 0x61, 0x0e, 0xbe, 0xc6, 0x64, 0xea, 0xfc, 0x7e, 0xf5, + 0x94, 0x79, 0x96, 0xbd, 0x6a, 0x43, 0x09, 0xab, 0x0c, 0xa4, 0xfa, 0xab, 0x36, 0x12, 0x80, 0x53, + 0x1c, 0xf4, 0x0c, 0x0c, 0xc6, 0xad, 0x3b, 0x0c, 0x3d, 0xe3, 0xfd, 0x58, 0xe7, 0xc5, 0x58, 0xc2, + 0xed, 0x7f, 0x5e, 0x82, 0x89, 0xac, 0xf3, 0x2b, 0xc2, 0x30, 0xc0, 0x65, 0xec, 0xac, 0x78, 0x76, + 0x98, 0x2d, 0x47, 0x73, 0x9e, 0x05, 0xfe, 0x36, 0x33, 0x33, 0xba, 0x0b, 0x4a, 0x68, 0x03, 0x86, + 0xdd, 0xf0, 0x5e, 0x70, 0xcf, 0x89, 0xdc, 0xc5, 0xda, 0x9a, 0xf8, 0x12, 0x39, 0xee, 0x4a, 0xd5, + 0xb4, 0x82, 0xee, 0x9a, 0xcb, 0x6c, 0x0b, 0x29, 0x08, 0xeb, 0x84, 0xa9, 0x4e, 0xd9, 0x08, 0x83, + 0x0d, 0x6f, 0xf3, 0x9a, 0xd3, 0x2c, 0x76, 0x31, 0xbf, 0x2c, 0xd1, 0xb5, 0x36, 0x46, 0x45, 0x8e, + 0x06, 0x0e, 0xc0, 0x29, 0x49, 0xfb, 0xd7, 0xa7, 0xc1, 0x58, 0x0b, 0x46, 0x7a, 0x58, 0xeb, 0x81, + 0xa7, 0x87, 0x7d, 0x0b, 0x86, 0xc8, 0x76, 0x33, 0xd9, 0xad, 0x7a, 0x51, 0xb1, 0x64, 0xdf, 0x2b, + 0x02, 0xbb, 0x9d, 0xba, 0x84, 0x60, 0x45, 0xb1, 0x4b, 0xb2, 0xdf, 0xf2, 0x43, 0x91, 0xec, 0xb7, + 0xef, 0x2f, 0x25, 0xd9, 0xef, 0x1b, 0x30, 0xb8, 0xe9, 0x25, 0x98, 0x34, 0x43, 0x91, 0xf4, 0x22, + 0x67, 0xf1, 0x5c, 0xe2, 0xc8, 0xed, 0x69, 0x20, 0x05, 0x00, 0x4b, 0x72, 0x68, 0x5d, 0x6d, 0xaa, + 0x81, 0x22, 0x67, 0x79, 0xbb, 0xad, 0xaf, 0xe3, 0xb6, 0x12, 0xc9, 0x7d, 0x07, 0xdf, 0x7f, 0x72, + 0x5f, 0x95, 0x92, 0x77, 0xe8, 0x41, 0xa5, 0xe4, 0x35, 0x52, 0x1b, 0x57, 0x8e, 0x23, 0xb5, 0xf1, + 0xd7, 0x2c, 0x38, 0xd5, 0xec, 0x94, 0x18, 0x5c, 0x24, 0xd7, 0xfd, 0xe4, 0x11, 0x52, 0xa5, 0x1b, + 0x4d, 0xb3, 0x54, 0x02, 0x1d, 0xd1, 0x70, 0xe7, 0x86, 0x65, 0x8e, 0xe4, 0xe1, 0xf7, 0x9f, 0x23, + 0xf9, 0xb8, 0xb3, 0xf0, 0xa6, 0x19, 0x93, 0x47, 0x8f, 0x25, 0x63, 0xf2, 0xd8, 0x03, 0xcc, 0x98, + 0xac, 0xe5, 0x3a, 0x1e, 0x7f, 0xb0, 0xb9, 0x8e, 0xb7, 0xcc, 0x73, 0x89, 0xa7, 0xd6, 0x7d, 0xb9, + 0xf0, 0xb9, 0x64, 0xb4, 0x70, 0xf8, 0xc9, 0xc4, 0xb3, 0x3e, 0x4f, 0xbe, 0xcf, 0xac, 0xcf, 0x46, + 0xee, 0x64, 0x74, 0x1c, 0xb9, 0x93, 0xdf, 0xd1, 0x4f, 0xd0, 0xa9, 0x22, 0x2d, 0xa8, 0x83, 0xb2, + 0xbd, 0x85, 0x4e, 0x67, 0x68, 0x7b, 0x76, 0xe6, 0xe9, 0x93, 0xce, 0xce, 0x7c, 0xea, 0x18, 0xb3, + 0x33, 0x9f, 0x3e, 0xd1, 0xec, 0xcc, 0x8f, 0x3c, 0x24, 0xd9, 0x99, 0x67, 0x4e, 0x2a, 0x3b, 0xf3, + 0xa3, 0x0f, 0x34, 0x3b, 0x33, 0xfd, 0x74, 0x4d, 0x19, 0x42, 0x36, 0x33, 0x5b, 0xe4, 0xd3, 0x75, + 0x8c, 0x38, 0xe3, 0x9f, 0x4e, 0x81, 0x70, 0x4a, 0xd4, 0xfe, 0x2b, 0x70, 0xe6, 0xf0, 0xa5, 0x9b, + 0x7a, 0x6b, 0xd4, 0x52, 0x9b, 0x59, 0xc6, 0x5b, 0x83, 0x89, 0x85, 0x1a, 0x56, 0xe1, 0xf4, 0xb1, + 0xdf, 0xb6, 0xe0, 0x91, 0x2e, 0xd9, 0x15, 0x0b, 0xc7, 0x5f, 0x36, 0x61, 0xbc, 0x69, 0x56, 0x2d, + 0x1c, 0xce, 0x6d, 0x64, 0x73, 0x54, 0x3e, 0xf2, 0x19, 0x00, 0xce, 0x92, 0x5f, 0xfa, 0xd0, 0x0f, + 0x7f, 0x7c, 0xe6, 0x03, 0x3f, 0xfa, 0xf1, 0x99, 0x0f, 0xfc, 0xf1, 0x8f, 0xcf, 0x7c, 0xe0, 0xe7, + 0xf6, 0xcf, 0x58, 0x3f, 0xdc, 0x3f, 0x63, 0xfd, 0x68, 0xff, 0x8c, 0xf5, 0x67, 0xfb, 0x67, 0xac, + 0xaf, 0xfd, 0xe4, 0xcc, 0x07, 0xde, 0x2c, 0xed, 0xbc, 0xf0, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, + 0x5f, 0x32, 0x87, 0xa4, 0xbe, 0xc8, 0x00, 0x00, } diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/generated.proto b/vendor/k8s.io/kubernetes/pkg/api/v1/generated.proto index f6fa8a4a12..9c48148aaf 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/generated.proto +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,10 +21,12 @@ syntax = 'proto2'; package k8s.io.kubernetes.pkg.api.v1; -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; @@ -140,7 +142,7 @@ message Binding { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The target object that you want to bind to the standard object. optional ObjectReference target = 2; @@ -239,7 +241,7 @@ message ComponentStatus { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // List of component conditions observed // +optional @@ -251,7 +253,7 @@ message ComponentStatusList { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ComponentStatus objects. repeated ComponentStatus items = 2; @@ -262,7 +264,7 @@ message ConfigMap { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Data contains the configuration data. // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. @@ -270,6 +272,20 @@ message ConfigMap { map data = 2; } +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +message ConfigMapEnvSource { + // The ConfigMap to select from. + optional LocalObjectReference localObjectReference = 1; + + // Specify whether the ConfigMap must be defined + // +optional + optional bool optional = 2; +} + // Selects a key from a ConfigMap. message ConfigMapKeySelector { // The ConfigMap to select from. @@ -277,18 +293,47 @@ message ConfigMapKeySelector { // The key to select. optional string key = 2; + + // Specify whether the ConfigMap or it's key must be defined + // +optional + optional bool optional = 3; } // ConfigMapList is a resource containing a list of ConfigMap objects. message ConfigMapList { // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of ConfigMaps. repeated ConfigMap items = 2; } +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +message ConfigMapProjection { + optional LocalObjectReference localObjectReference = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + repeated KeyToPath items = 2; + + // Specify whether the ConfigMap or it's keys must be defined + // +optional + optional bool optional = 4; +} + // Adapts a ConfigMap into a volume. // // The contents of the target ConfigMap's Data field will be presented in a @@ -303,8 +348,8 @@ message ConfigMapVolumeSource { // key and content is the value. If specified, the listed keys will be // projected into the specified paths, and unlisted keys will not be // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error. Paths must be relative and may not contain - // the '..' path or start with '..'. + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. // +optional repeated KeyToPath items = 2; @@ -315,6 +360,10 @@ message ConfigMapVolumeSource { // mode, like fsGroup, and the result can be other mode bits set. // +optional optional int32 defaultMode = 3; + + // Specify whether the ConfigMap or it's keys must be defined + // +optional + optional bool optional = 4; } // A single application container that you want to run within a pod. @@ -368,6 +417,15 @@ message Container { // +optional repeated ContainerPort ports = 6; + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + repeated EnvFromSource envFrom = 19; + // List of environment variables to set in the container. // Cannot be updated. // +optional @@ -406,11 +464,23 @@ message Container { // Optional: Path at which the file to which the container's termination message // will be written is mounted into the container's filesystem. // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. // Defaults to /dev/termination-log. // Cannot be updated. // +optional optional string terminationMessagePath = 13; + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + optional string terminationMessagePolicy = 20; + // Image pull policy. // One of Always, Never, IfNotPresent. // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. @@ -507,7 +577,7 @@ message ContainerState { message ContainerStateRunning { // Time at which the container was last (re-)started // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time startedAt = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 1; } // ContainerStateTerminated is a terminated state of a container. @@ -529,11 +599,11 @@ message ContainerStateTerminated { // Time at which previous execution of the container started // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time startedAt = 5; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5; // Time at which the container last terminated // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time finishedAt = 6; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 6; // Container's ID in the format 'docker://' // +optional @@ -595,6 +665,8 @@ message DaemonEndpoint { } // DeleteOptions may be provided when deleting an API object +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +// +k8s:openapi-gen=false message DeleteOptions { // The duration in seconds before the object should be deleted. Value must be non-negative integer. // The value zero indicates delete immediately. If this value is nil, the default grace period for the @@ -608,10 +680,28 @@ message DeleteOptions { // +optional optional Preconditions preconditions = 2; + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. // Should the dependent objects be orphaned. If true/false, the "orphan" // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. // +optional optional bool orphanDependents = 3; + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // +optional + optional string propagationPolicy = 4; +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +message DownwardAPIProjection { + // Items is a list of DownwardAPIVolume file + // +optional + repeated DownwardAPIVolumeFile items = 1; } // DownwardAPIVolumeFile represents information to create the file containing the pod field @@ -747,7 +837,7 @@ message Endpoints { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The set of all endpoints is the union of all subsets. Addresses are placed into // subsets according to the IPs they share. A single address with multiple ports, @@ -764,12 +854,27 @@ message EndpointsList { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of endpoints. repeated Endpoints items = 2; } +// EnvFromSource represents the source of a set of ConfigMaps +message EnvFromSource { + // An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // +optional + optional string prefix = 1; + + // The ConfigMap to select from + // +optional + optional ConfigMapEnvSource configMapRef = 2; + + // The Secret to select from + // +optional + optional SecretEnvSource secretRef = 3; +} + // EnvVar represents an environment variable present in a Container. message EnvVar { // Name of the environment variable. Must be a C_IDENTIFIER. @@ -817,7 +922,7 @@ message EnvVarSource { message Event { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The object that this event is about. optional ObjectReference involvedObject = 2; @@ -839,11 +944,11 @@ message Event { // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time firstTimestamp = 6; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time firstTimestamp = 6; // The time at which the most recent occurrence of this event was recorded. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTimestamp = 7; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTimestamp = 7; // The number of times this event has occurred. // +optional @@ -859,7 +964,7 @@ message EventList { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of events repeated Event items = 2; @@ -887,15 +992,6 @@ message ExecAction { repeated string command = 1; } -// ExportOptions is the query options to the standard REST get call. -message ExportOptions { - // Should this value be exported. Export strips fields that a user can not specify. - optional bool export = 1; - - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - optional bool exact = 2; -} - // Represents a Fibre Channel volume. // Fibre Channel volumes can only be mounted as read/write once. // Fibre Channel volumes support ownership management and SELinux relabeling. @@ -1043,7 +1139,7 @@ message HTTPGetAction { // Name or number of the port to access on the container. // Number must be in the range 1 to 65535. // Name must be an IANA_SVC_NAME. - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 2; + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; // Host name to connect to, defaults to the pod IP. You probably want to set // "Host" in httpHeaders instead. @@ -1126,6 +1222,11 @@ message ISCSIVolumeSource { // Defaults to false. // +optional optional bool readOnly = 6; + + // iSCSI target portal List. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + repeated string portals = 7; } // Maps a string key to a path within a volume. @@ -1173,7 +1274,7 @@ message LimitRange { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the limits enforced. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -1189,23 +1290,23 @@ message LimitRangeItem { // Max usage constraints on this kind by resource name. // +optional - map max = 2; + map max = 2; // Min usage constraints on this kind by resource name. // +optional - map min = 3; + map min = 3; // Default resource requirement limit value by resource name if resource limit is omitted. // +optional - map default = 4; + map default = 4; // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. // +optional - map defaultRequest = 5; + map defaultRequest = 5; // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. // +optional - map maxLimitRequestRatio = 6; + map maxLimitRequestRatio = 6; } // LimitRangeList is a list of LimitRange items. @@ -1213,7 +1314,7 @@ message LimitRangeList { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of LimitRange objects. // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md @@ -1231,13 +1332,15 @@ message List { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of objects - repeated k8s.io.kubernetes.pkg.runtime.RawExtension items = 2; + repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; } // ListOptions is the query options to a standard REST list call. +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +// +k8s:openapi-gen=false message ListOptions { // A selector to restrict the list of returned objects by their labels. // Defaults to everything. @@ -1256,6 +1359,10 @@ message ListOptions { // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. // +optional optional string resourceVersion = 4; @@ -1321,7 +1428,7 @@ message Namespace { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of the Namespace. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -1339,7 +1446,7 @@ message NamespaceList { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Namespace objects in the list. // More info: http://kubernetes.io/docs/user-guide/namespaces @@ -1368,7 +1475,7 @@ message Node { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a node. // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -1425,11 +1532,11 @@ message NodeCondition { // Last time we got an update on a given condition. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastHeartbeatTime = 3; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastHeartbeatTime = 3; // Last time the condition transit from one status to another. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // (brief) reason for the condition's last transition. // +optional @@ -1452,7 +1559,7 @@ message NodeList { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of nodes repeated Node items = 2; @@ -1465,6 +1572,13 @@ message NodeProxyOptions { optional string path = 1; } +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +message NodeResources { + // Capacity represents the available resources of a node + map capacity = 1; +} + // A node selector represents the union of the results of one or more label queries // over a set of nodes; that is, it represents the OR of the selectors represented // by the node selector terms. @@ -1514,9 +1628,13 @@ message NodeSpec { optional string providerID = 3; // Unschedulable controls node schedulability of new pods. By default, node is schedulable. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration" + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration // +optional optional bool unschedulable = 4; + + // If specified, the node's taints. + // +optional + repeated Taint taints = 5; } // NodeStatus is information about the current status of a node. @@ -1524,12 +1642,12 @@ message NodeStatus { // Capacity represents the total resources of a node. // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity for more details. // +optional - map capacity = 1; + map capacity = 1; // Allocatable represents the resources of a node that are available for scheduling. // Defaults to Capacity. // +optional - map allocatable = 2; + map allocatable = 2; // NodePhase is the recently observed lifecycle phase of the node. // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase @@ -1619,6 +1737,8 @@ message ObjectFieldSelector { // ObjectMeta is metadata that all persisted resources must have, which includes all objects // users must create. +// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. +// +k8s:openapi-gen=false message ObjectMeta { // Name must be unique within a namespace. Is required when creating resources, although // some resources may allow a client to request the generation of an appropriate name @@ -1701,7 +1821,7 @@ message ObjectMeta { // Null for lists. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time creationTimestamp = 8; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time creationTimestamp = 8; // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This // field is set by the server when a graceful deletion is requested by the user, and is not @@ -1721,7 +1841,7 @@ message ObjectMeta { // Read-only. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time deletionTimestamp = 9; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deletionTimestamp = 9; // Number of seconds allowed for this object to gracefully terminate before // it will be removed from the system. Only set when deletionTimestamp is also set. @@ -1749,7 +1869,7 @@ message ObjectMeta { // then an entry in this list will point to this controller, with the controller field set to true. // There cannot be more than one managing controller. // +optional - repeated OwnerReference ownerReferences = 13; + repeated k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference ownerReferences = 13; // Must be empty before the object is deleted from the registry. Each entry // is an identifier for the responsible component that will remove the entry @@ -1808,30 +1928,6 @@ message ObjectReference { optional string fieldPath = 7; } -// OwnerReference contains enough information to let you identify an owning -// object. Currently, an owning object must be in the same namespace, so there -// is no namespace field. -message OwnerReference { - // API version of the referent. - optional string apiVersion = 5; - - // Kind of the referent. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - optional string kind = 1; - - // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names - optional string name = 3; - - // UID of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids - optional string uid = 4; - - // If true, this reference points to the managing controller. - // +optional - optional bool controller = 6; -} - // PersistentVolume (PV) is a storage resource provisioned by an administrator. // It is analogous to a node. // More info: http://kubernetes.io/docs/user-guide/persistent-volumes @@ -1839,7 +1935,7 @@ message PersistentVolume { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines a specification of a persistent volume owned by the cluster. // Provisioned by an administrator. @@ -1860,7 +1956,7 @@ message PersistentVolumeClaim { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired characteristics of a volume requested by a pod author. // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims @@ -1879,7 +1975,7 @@ message PersistentVolumeClaimList { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // A list of persistent volume claims. // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims @@ -1896,7 +1992,7 @@ message PersistentVolumeClaimSpec { // A label query over volumes to consider for binding. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 4; + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // Resources represents the minimum resources the volume should have. // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources @@ -1906,6 +2002,11 @@ message PersistentVolumeClaimSpec { // VolumeName is the binding reference to the PersistentVolume backing this claim. // +optional optional string volumeName = 3; + + // Name of the StorageClass required by the claim. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1 + // +optional + optional string storageClassName = 5; } // PersistentVolumeClaimStatus is the current status of a persistent volume claim. @@ -1921,7 +2022,7 @@ message PersistentVolumeClaimStatus { // Represents the actual resources of the underlying volume. // +optional - map capacity = 3; + map capacity = 3; } // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. @@ -1944,7 +2045,7 @@ message PersistentVolumeList { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of persistent volumes. // More info: http://kubernetes.io/docs/user-guide/persistent-volumes @@ -2036,6 +2137,14 @@ message PersistentVolumeSource { // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 17; + + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + optional PortworxVolumeSource portworxVolume = 18; + + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + optional ScaleIOVolumeSource scaleIO = 19; } // PersistentVolumeSpec is the specification of a persistent volume. @@ -2043,7 +2152,7 @@ message PersistentVolumeSpec { // A description of the persistent volume's resources and capacity. // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity // +optional - map capacity = 1; + map capacity = 1; // The actual volume backing the persistent volume. optional PersistentVolumeSource persistentVolumeSource = 2; @@ -2066,6 +2175,11 @@ message PersistentVolumeSpec { // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy // +optional optional string persistentVolumeReclaimPolicy = 5; + + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + optional string storageClassName = 6; } // PersistentVolumeStatus is the current status of a persistent volume. @@ -2102,7 +2216,7 @@ message Pod { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -2162,12 +2276,10 @@ message PodAffinity { message PodAffinityTerm { // A label query over a set of resources, in this case pods. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector labelSelector = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; // namespaces specifies which namespaces the labelSelector applies to (matches against); - // nil list means "this pod's namespace," empty list means "all namespaces" - // The json tag here is not "omitempty" since we need to distinguish nil and empty. - // See https://golang.org/pkg/encoding/json/#Marshal for more details. + // null or empty list means "this pod's namespace" repeated string namespaces = 2; // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -2263,11 +2375,11 @@ message PodCondition { // Last time we probed the condition. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // Unique, one-word, CamelCase reason for the condition's last transition. // +optional @@ -2317,7 +2429,7 @@ message PodList { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of pods. // More info: http://kubernetes.io/docs/user-guide/pods @@ -2350,7 +2462,7 @@ message PodLogOptions { // If this value is in the future, no logs will be returned. // Only one of sinceSeconds or sinceTime may be specified. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time sinceTime = 5; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line // of log output. Defaults to false. @@ -2369,6 +2481,19 @@ message PodLogOptions { optional int64 limitBytes = 8; } +// PodPortForwardOptions is the query options to a Pod's port forward call +// when using WebSockets. +// The `port` query parameter must specify the port or +// ports (comma separated) to forward over. +// Port forwarding over SPDY does not use these options. It requires the port +// to be passed in the `port` header as part of request. +message PodPortForwardOptions { + // List of ports to forward + // Required when using WebSockets + // +optional + repeated int32 ports = 1; +} + // PodProxyOptions is the query options to a Pod's proxy call. message PodProxyOptions { // Path is the URL path to use for the current proxy request to pod. @@ -2429,7 +2554,7 @@ message PodSecurityContext { message PodSignature { // Reference to controller whose pods should avoid this node. // +optional - optional OwnerReference podController = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference podController = 1; } // PodSpec is a description of a pod. @@ -2439,6 +2564,21 @@ message PodSpec { // +optional repeated Volume volumes = 1; + // List of initialization containers belonging to the pod. + // Init containers are executed in order prior to containers being started. If any + // init container fails, the pod is considered to have failed and is handled according + // to its restartPolicy. The name for an init container or normal container must be + // unique among all containers. + // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. + // The resourceRequirements of an init container are taken into account during scheduling + // by finding the highest request/limit for each resource type, and then using the max of + // of that value or the sum of the normal containers. Limits are applied to init containers + // in a similar fashion. + // Init containers cannot currently be added or removed. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/containers + repeated Container initContainers = 20; + // List of containers belonging to the pod. // Containers cannot currently be added or removed. // There must be at least one container in a Pod. @@ -2470,8 +2610,9 @@ message PodSpec { optional int64 activeDeadlineSeconds = 5; // Set DNS policy for containers within the pod. - // One of 'ClusterFirst' or 'Default'. + // One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. // Defaults to "ClusterFirst". + // To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. // +optional optional string dnsPolicy = 6; @@ -2492,6 +2633,10 @@ message PodSpec { // +optional optional string serviceAccount = 9; + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + optional bool automountServiceAccountToken = 21; + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, // the scheduler simply schedules this pod onto that node, assuming that it fits resource // requirements. @@ -2538,6 +2683,19 @@ message PodSpec { // If not specified, the pod will not have a domainname at all. // +optional optional string subdomain = 17; + + // If specified, the pod's scheduling constraints + // +optional + optional Affinity affinity = 18; + + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + optional string schedulerName = 19; + + // If specified, the pod's tolerations. + // +optional + repeated Toleration tolerations = 22; } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -2574,13 +2732,25 @@ message PodStatus { // RFC 3339 date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 7; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7; + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses + repeated ContainerStatus initContainerStatuses = 10; // The list has one entry per container in the manifest. Each entry is currently the output // of `docker inspect`. // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses // +optional repeated ContainerStatus containerStatuses = 8; + + // The Quality of Service (QOS) classification assigned to the pod based on resource requirements + // See PodQOSClass type for available QOS classes + // More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md + // +optional + optional string qosClass = 9; } // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded @@ -2588,7 +2758,7 @@ message PodStatusResult { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Most recently observed status of the pod. // This data may not be up to date. @@ -2604,7 +2774,7 @@ message PodTemplate { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Template defines the pods that will be created from this pod template. // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -2617,7 +2787,7 @@ message PodTemplateList { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of pod templates repeated PodTemplate items = 2; @@ -2628,7 +2798,7 @@ message PodTemplateSpec { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -2636,7 +2806,24 @@ message PodTemplateSpec { optional PodSpec spec = 2; } +// PortworxVolumeSource represents a Portworx volume resource. +message PortworxVolumeSource { + // VolumeID uniquely identifies a Portworx volume + optional string volumeID = 1; + + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + optional string fsType = 2; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 3; +} + // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +// +k8s:openapi-gen=false message Preconditions { // Specifies the target UID. // +optional @@ -2650,7 +2837,7 @@ message PreferAvoidPodsEntry { // Time at which this entry was added to the list. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time evictionTime = 2; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time evictionTime = 2; // (brief) reason why this entry was added to the list. // +optional @@ -2704,6 +2891,20 @@ message Probe { optional int32 failureThreshold = 6; } +// Represents a projected volume source +message ProjectedVolumeSource { + // list of volume projections + repeated VolumeProjection sources = 1; + + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 defaultMode = 2; +} + // Represents a Quobyte mount that lasts the lifetime of a pod. // Quobyte volumes do not support ownership management or SELinux relabeling. message QuobyteVolumeSource { @@ -2787,7 +2988,7 @@ message RangeAllocation { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Range is string that identifies the range represented by 'data'. optional string range = 2; @@ -2802,7 +3003,7 @@ message ReplicationController { // be the same as the Pod(s) that the replication controller manages. // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the replication controller. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -2828,7 +3029,7 @@ message ReplicationControllerCondition { // The last time the condition transitioned from one status to another. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 3; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -2844,7 +3045,7 @@ message ReplicationControllerList { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of replication controllers. // More info: http://kubernetes.io/docs/user-guide/replication-controller @@ -2920,7 +3121,7 @@ message ResourceFieldSelector { // Specifies the output format of the exposed resources, defaults to "1" // +optional - optional k8s.io.kubernetes.pkg.api.resource.Quantity divisor = 3; + optional k8s.io.apimachinery.pkg.api.resource.Quantity divisor = 3; } // ResourceQuota sets aggregate quota restrictions enforced per namespace @@ -2928,7 +3129,7 @@ message ResourceQuota { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired quota. // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -2946,7 +3147,7 @@ message ResourceQuotaList { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ResourceQuota objects. // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota @@ -2958,7 +3159,7 @@ message ResourceQuotaSpec { // Hard is the set of desired hard limits for each named resource. // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota // +optional - map hard = 1; + map hard = 1; // A collection of filters that must match each object tracked by a quota. // If not specified, the quota matches all objects. @@ -2971,11 +3172,11 @@ message ResourceQuotaStatus { // Hard is the set of enforced hard limits for each named resource. // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota // +optional - map hard = 1; + map hard = 1; // Used is the current observed total usage of the resource in the namespace. // +optional - map used = 2; + map used = 2; } // ResourceRequirements describes the compute resource requirements. @@ -2983,14 +3184,14 @@ message ResourceRequirements { // Limits describes the maximum amount of compute resources allowed. // More info: http://kubernetes.io/docs/user-guide/compute-resources/ // +optional - map limits = 1; + map limits = 1; // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, // otherwise to an implementation-defined value. // More info: http://kubernetes.io/docs/user-guide/compute-resources/ // +optional - map requests = 2; + map requests = 2; } // SELinuxOptions are the labels to be applied to the container @@ -3012,13 +3213,57 @@ message SELinuxOptions { optional string level = 4; } +// ScaleIOVolumeSource represents a persistent ScaleIO volume +message ScaleIOVolumeSource { + // The host address of the ScaleIO API Gateway. + optional string gateway = 1; + + // The name of the storage system as configured in ScaleIO. + optional string system = 2; + + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + optional LocalObjectReference secretRef = 3; + + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + optional bool sslEnabled = 4; + + // The name of the Protection Domain for the configured storage (defaults to "default"). + // +optional + optional string protectionDomain = 5; + + // The Storage Pool associated with the protection domain (defaults to "default"). + // +optional + optional string storagePool = 6; + + // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). + // +optional + optional string storageMode = 7; + + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + optional string volumeName = 8; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 9; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 10; +} + // Secret holds secret data of a certain type. The total bytes of the values in // the Data field must be less than MaxSecretSize bytes. message Secret { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN // or leading dot followed by valid DNS_SUBDOMAIN. @@ -3041,6 +3286,20 @@ message Secret { optional string type = 3; } +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +message SecretEnvSource { + // The Secret to select from. + optional LocalObjectReference localObjectReference = 1; + + // Specify whether the Secret must be defined + // +optional + optional bool optional = 2; +} + // SecretKeySelector selects a key of a Secret. message SecretKeySelector { // The name of the secret in the pod's namespace to select from. @@ -3048,6 +3307,10 @@ message SecretKeySelector { // The key of the secret to select from. Must be a valid secret key. optional string key = 2; + + // Specify whether the Secret or it's key must be defined + // +optional + optional bool optional = 3; } // SecretList is a list of Secret. @@ -3055,13 +3318,37 @@ message SecretList { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of secret objects. // More info: http://kubernetes.io/docs/user-guide/secrets repeated Secret items = 2; } +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +message SecretProjection { + optional LocalObjectReference localObjectReference = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + repeated KeyToPath items = 2; + + // Specify whether the Secret or its key must be defined + // +optional + optional bool optional = 4; +} + // Adapts a Secret into a volume. // // The contents of the target Secret's Data field will be presented in a volume @@ -3078,8 +3365,8 @@ message SecretVolumeSource { // key and content is the value. If specified, the listed keys will be // projected into the specified paths, and unlisted keys will not be // present. If a key is specified which is not present in the Secret, - // the volume setup will error. Paths must be relative and may not contain - // the '..' path or start with '..'. + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. // +optional repeated KeyToPath items = 2; @@ -3090,6 +3377,10 @@ message SecretVolumeSource { // mode, like fsGroup, and the result can be other mode bits set. // +optional optional int32 defaultMode = 3; + + // Specify whether the Secret or it's keys must be defined + // +optional + optional bool optional = 4; } // SecurityContext holds security configuration that will be applied to a container. @@ -3150,7 +3441,7 @@ message Service { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a service. // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -3173,7 +3464,7 @@ message ServiceAccount { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. // More info: http://kubernetes.io/docs/user-guide/secrets @@ -3186,6 +3477,11 @@ message ServiceAccount { // More info: http://kubernetes.io/docs/user-guide/secrets#manually-specifying-an-imagepullsecret // +optional repeated LocalObjectReference imagePullSecrets = 3; + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + optional bool automountServiceAccountToken = 4; } // ServiceAccountList is a list of ServiceAccount objects @@ -3193,7 +3489,7 @@ message ServiceAccountList { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ServiceAccounts. // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts @@ -3205,7 +3501,7 @@ message ServiceList { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of services repeated Service items = 2; @@ -3237,7 +3533,7 @@ message ServicePort { // omitted or set equal to the 'port' field. // More info: http://kubernetes.io/docs/user-guide/services#defining-a-service // +optional - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString targetPort = 4; + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4; // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. // Usually assigned by the system. If specified, it will be allocated to the service @@ -3360,12 +3656,18 @@ message ServiceStatus { optional LoadBalancerStatus loadBalancer = 1; } +message Sysctl { + optional string name = 1; + + optional string value = 2; +} + // TCPSocketAction describes an action based on opening a socket message TCPSocketAction { // Number or name of the port to access on the container. // Number must be in the range 1 to 65535. // Name must be an IANA_SVC_NAME. - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 1; + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1; } // The node this Taint is attached to has the effect "effect" on @@ -3380,18 +3682,24 @@ message Taint { // Required. The effect of the taint on pods // that do not tolerate the taint. - // Valid effects are NoSchedule and PreferNoSchedule. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. optional string effect = 3; + + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; } // The pod this Toleration is attached to tolerates any taint that matches // the triple using the matching operator . message Toleration { - // Required. Key is the taint key that the toleration applies to. + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. // +optional optional string key = 1; - // operator represents a key's relationship to the value. + // Operator represents a key's relationship to the value. // Valid operators are Exists and Equal. Defaults to Equal. // Exists is equivalent to wildcard for value, so that a pod can // tolerate all taints of a particular category. @@ -3404,9 +3712,16 @@ message Toleration { optional string value = 3; // Effect indicates the taint effect to match. Empty means match all taint effects. - // When specified, allowed values are NoSchedule and PreferNoSchedule. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. // +optional optional string effect = 4; + + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // +optional + optional int64 tolerationSeconds = 5; } // Volume represents a named volume in a pod that may be accessed by any container in the pod. @@ -3442,6 +3757,18 @@ message VolumeMount { optional string subPath = 4; } +// Projection that may be projected along with other supported volume types +message VolumeProjection { + // information about the secret data to project + optional SecretProjection secret = 1; + + // information about the downwardAPI data to project + optional DownwardAPIProjection downwardAPI = 2; + + // information about the configMap data to project + optional ConfigMapProjection configMap = 3; +} + // Represents the source of a volume to mount. // Only one of its members may be specified. message VolumeSource { @@ -3558,6 +3885,17 @@ message VolumeSource { // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 23; + + // Items for all in one resources secrets, configmaps, and downward API + optional ProjectedVolumeSource projected = 26; + + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + optional PortworxVolumeSource portworxVolume = 24; + + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + optional ScaleIOVolumeSource scaleIO = 25; } // Represents a vSphere volume resource. diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/helpers.go b/vendor/k8s.io/kubernetes/pkg/api/v1/helpers.go index 5ea0d329ff..83495de0b4 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/helpers.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,18 +16,617 @@ limitations under the License. package v1 -import "k8s.io/kubernetes/pkg/types" +import ( + "encoding/json" + "fmt" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + + "k8s.io/kubernetes/pkg/api" +) + +// IsOpaqueIntResourceName returns true if the resource name has the opaque +// integer resource prefix. +func IsOpaqueIntResourceName(name ResourceName) bool { + return strings.HasPrefix(string(name), ResourceOpaqueIntPrefix) +} + +// OpaqueIntResourceName returns a ResourceName with the canonical opaque +// integer prefix prepended. If the argument already has the prefix, it is +// returned unmodified. +func OpaqueIntResourceName(name string) ResourceName { + if IsOpaqueIntResourceName(ResourceName(name)) { + return ResourceName(name) + } + return ResourceName(fmt.Sprintf("%s%s", api.ResourceOpaqueIntPrefix, name)) +} // NewDeleteOptions returns a DeleteOptions indicating the resource should // be deleted within the specified grace period. Use zero to indicate // immediate deletion. If you would prefer to use the default grace period, -// use &v1.DeleteOptions{} directly. +// use &metav1.DeleteOptions{} directly. func NewDeleteOptions(grace int64) *DeleteOptions { return &DeleteOptions{GracePeriodSeconds: &grace} } +// NewPreconditionDeleteOptions returns a DeleteOptions with a UID precondition set. +func NewPreconditionDeleteOptions(uid string) *DeleteOptions { + u := types.UID(uid) + p := Preconditions{UID: &u} + return &DeleteOptions{Preconditions: &p} +} + // NewUIDPreconditions returns a Preconditions with UID set. func NewUIDPreconditions(uid string) *Preconditions { u := types.UID(uid) return &Preconditions{UID: &u} } + +// this function aims to check if the service's ClusterIP is set or not +// the objective is not to perform validation here +func IsServiceIPSet(service *Service) bool { + return service.Spec.ClusterIP != ClusterIPNone && service.Spec.ClusterIP != "" +} + +// this function aims to check if the service's cluster IP is requested or not +func IsServiceIPRequested(service *Service) bool { + // ExternalName services are CNAME aliases to external ones. Ignore the IP. + if service.Spec.Type == ServiceTypeExternalName { + return false + } + return service.Spec.ClusterIP == "" +} + +var standardFinalizers = sets.NewString( + string(FinalizerKubernetes), + metav1.FinalizerOrphanDependents, +) + +func IsStandardFinalizerName(str string) bool { + return standardFinalizers.Has(str) +} + +// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, +// only if they do not already exist +func AddToNodeAddresses(addresses *[]NodeAddress, addAddresses ...NodeAddress) { + for _, add := range addAddresses { + exists := false + for _, existing := range *addresses { + if existing.Address == add.Address && existing.Type == add.Type { + exists = true + break + } + } + if !exists { + *addresses = append(*addresses, add) + } + } +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusEqual(l, r *LoadBalancerStatus) bool { + return ingressSliceEqual(l.Ingress, r.Ingress) +} + +func ingressSliceEqual(lhs, rhs []LoadBalancerIngress) bool { + if len(lhs) != len(rhs) { + return false + } + for i := range lhs { + if !ingressEqual(&lhs[i], &rhs[i]) { + return false + } + } + return true +} + +func ingressEqual(lhs, rhs *LoadBalancerIngress) bool { + if lhs.IP != rhs.IP { + return false + } + if lhs.Hostname != rhs.Hostname { + return false + } + return true +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusDeepCopy(lb *LoadBalancerStatus) *LoadBalancerStatus { + c := &LoadBalancerStatus{} + c.Ingress = make([]LoadBalancerIngress, len(lb.Ingress)) + for i := range lb.Ingress { + c.Ingress[i] = lb.Ingress[i] + } + return c +} + +// GetAccessModesAsString returns a string representation of an array of access modes. +// modes, when present, are always in the same order: RWO,ROX,RWX. +func GetAccessModesAsString(modes []PersistentVolumeAccessMode) string { + modes = removeDuplicateAccessModes(modes) + modesStr := []string{} + if containsAccessMode(modes, ReadWriteOnce) { + modesStr = append(modesStr, "RWO") + } + if containsAccessMode(modes, ReadOnlyMany) { + modesStr = append(modesStr, "ROX") + } + if containsAccessMode(modes, ReadWriteMany) { + modesStr = append(modesStr, "RWX") + } + return strings.Join(modesStr, ",") +} + +// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString +func GetAccessModesFromString(modes string) []PersistentVolumeAccessMode { + strmodes := strings.Split(modes, ",") + accessModes := []PersistentVolumeAccessMode{} + for _, s := range strmodes { + s = strings.Trim(s, " ") + switch { + case s == "RWO": + accessModes = append(accessModes, ReadWriteOnce) + case s == "ROX": + accessModes = append(accessModes, ReadOnlyMany) + case s == "RWX": + accessModes = append(accessModes, ReadWriteMany) + } + } + return accessModes +} + +// removeDuplicateAccessModes returns an array of access modes without any duplicates +func removeDuplicateAccessModes(modes []PersistentVolumeAccessMode) []PersistentVolumeAccessMode { + accessModes := []PersistentVolumeAccessMode{} + for _, m := range modes { + if !containsAccessMode(accessModes, m) { + accessModes = append(accessModes, m) + } + } + return accessModes +} + +func containsAccessMode(modes []PersistentVolumeAccessMode, mode PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements +// labels.Selector. +func NodeSelectorRequirementsAsSelector(nsm []NodeSelectorRequirement) (labels.Selector, error) { + if len(nsm) == 0 { + return labels.Nothing(), nil + } + selector := labels.NewSelector() + for _, expr := range nsm { + var op selection.Operator + switch expr.Operator { + case NodeSelectorOpIn: + op = selection.In + case NodeSelectorOpNotIn: + op = selection.NotIn + case NodeSelectorOpExists: + op = selection.Exists + case NodeSelectorOpDoesNotExist: + op = selection.DoesNotExist + case NodeSelectorOpGt: + op = selection.GreaterThan + case NodeSelectorOpLt: + op = selection.LessThan + default: + return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) + } + r, err := labels.NewRequirement(expr.Key, op, expr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + return selector, nil +} + +const ( + // SeccompPodAnnotationKey represents the key of a seccomp profile applied + // to all containers of a pod. + SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" + + // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied + // to one container of a pod. + SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" + + // CreatedByAnnotation represents the key used to store the spec(json) + // used to create the resource. + CreatedByAnnotation = "kubernetes.io/created-by" + + // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) + // in the Annotations of a Node. + PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" + + // SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by + // the kubelet. Pods with other sysctls will fail to launch. + SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls" + + // UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly + // namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use + // is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet + // will fail to launch. + UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls" + + // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // an object (e.g. secret, config map) before fetching it again from apiserver. + // This annotation can be attached to node. + ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" + + // AffinityAnnotationKey represents the key of affinity data (json serialized) + // in the Annotations of a Pod. + // TODO: remove when alpha support for affinity is removed + AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity" +) + +// Tries to add a toleration to annotations list. Returns true if something was updated +// false otherwise. +func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) { + podTolerations := pod.Spec.Tolerations + + var newTolerations []Toleration + updated := false + for i := range podTolerations { + if toleration.MatchToleration(&podTolerations[i]) { + if api.Semantic.DeepEqual(toleration, podTolerations[i]) { + return false, nil + } + newTolerations = append(newTolerations, *toleration) + updated = true + continue + } + + newTolerations = append(newTolerations, podTolerations[i]) + } + + if !updated { + newTolerations = append(newTolerations, *toleration) + } + + pod.Spec.Tolerations = newTolerations + return true, nil +} + +// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , +// if the two tolerations have same combination, regard as they match. +// TODO: uniqueness check for tolerations in api validations. +func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { + return t.Key == tolerationToMatch.Key && + t.Effect == tolerationToMatch.Effect && + t.Operator == tolerationToMatch.Operator && + t.Value == tolerationToMatch.Value +} + +// ToleratesTaint checks if the toleration tolerates the taint. +// The matching follows the rules below: +// (1) Empty toleration.effect means to match all taint effects, +// otherwise taint effect must equal to toleration.effect. +// (2) If toleration.operator is 'Exists', it means to match all taint values. +// (3) Empty toleration.key means to match all taint keys. +// If toleration.key is empty, toleration.operator must be 'Exists'; +// this combination means to match all taint values and all taint keys. +func (t *Toleration) ToleratesTaint(taint *Taint) bool { + if len(t.Effect) > 0 && t.Effect != taint.Effect { + return false + } + + if len(t.Key) > 0 && t.Key != taint.Key { + return false + } + + // TODO: Use proper defaulting when Toleration becomes a field of PodSpec + switch t.Operator { + // empty operator means Equal + case "", TolerationOpEqual: + return t.Value == taint.Value + case TolerationOpExists: + return true + default: + return false + } +} + +// TolerationsTolerateTaint checks if taint is tolerated by any of the tolerations. +func TolerationsTolerateTaint(tolerations []Toleration, taint *Taint) bool { + for i := range tolerations { + if tolerations[i].ToleratesTaint(taint) { + return true + } + } + return false +} + +type taintsFilterFunc func(*Taint) bool + +// TolerationsTolerateTaintsWithFilter checks if given tolerations tolerates +// all the taints that apply to the filter in given taint list. +func TolerationsTolerateTaintsWithFilter(tolerations []Toleration, taints []Taint, applyFilter taintsFilterFunc) bool { + if len(taints) == 0 { + return true + } + + for i := range taints { + if applyFilter != nil && !applyFilter(&taints[i]) { + continue + } + + if !TolerationsTolerateTaint(tolerations, &taints[i]) { + return false + } + } + + return true +} + +// DeleteTaintsByKey removes all the taints that have the same key to given taintKey +func DeleteTaintsByKey(taints []Taint, taintKey string) ([]Taint, bool) { + newTaints := []Taint{} + deleted := false + for i := range taints { + if taintKey == taints[i].Key { + deleted = true + continue + } + newTaints = append(newTaints, taints[i]) + } + return newTaints, deleted +} + +// DeleteTaint removes all the the taints that have the same key and effect to given taintToDelete. +func DeleteTaint(taints []Taint, taintToDelete *Taint) ([]Taint, bool) { + newTaints := []Taint{} + deleted := false + for i := range taints { + if taintToDelete.MatchTaint(&taints[i]) { + deleted = true + continue + } + newTaints = append(newTaints, taints[i]) + } + return newTaints, deleted +} + +// Returns true and list of Tolerations matching all Taints if all are tolerated, or false otherwise. +func GetMatchingTolerations(taints []Taint, tolerations []Toleration) (bool, []Toleration) { + if len(taints) == 0 { + return true, []Toleration{} + } + if len(tolerations) == 0 && len(taints) > 0 { + return false, []Toleration{} + } + result := []Toleration{} + for i := range taints { + tolerated := false + for j := range tolerations { + if tolerations[j].ToleratesTaint(&taints[i]) { + result = append(result, tolerations[j]) + tolerated = true + break + } + } + if !tolerated { + return false, []Toleration{} + } + } + return true, result +} + +// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, +// if the two taints have same key:effect, regard as they match. +func (t *Taint) MatchTaint(taintToMatch *Taint) bool { + return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect +} + +// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +func (t *Taint) ToString() string { + if len(t.Value) == 0 { + return fmt.Sprintf("%v:%v", t.Key, t.Effect) + } + return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) +} + +func GetAvoidPodsFromNodeAnnotations(annotations map[string]string) (AvoidPods, error) { + var avoidPods AvoidPods + if len(annotations) > 0 && annotations[PreferAvoidPodsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[PreferAvoidPodsAnnotationKey]), &avoidPods) + if err != nil { + return avoidPods, err + } + } + return avoidPods, nil +} + +// SysctlsFromPodAnnotations parses the sysctl annotations into a slice of safe Sysctls +// and a slice of unsafe Sysctls. This is only a convenience wrapper around +// SysctlsFromPodAnnotation. +func SysctlsFromPodAnnotations(a map[string]string) ([]Sysctl, []Sysctl, error) { + safe, err := SysctlsFromPodAnnotation(a[SysctlsPodAnnotationKey]) + if err != nil { + return nil, nil, err + } + unsafe, err := SysctlsFromPodAnnotation(a[UnsafeSysctlsPodAnnotationKey]) + if err != nil { + return nil, nil, err + } + + return safe, unsafe, nil +} + +// SysctlsFromPodAnnotation parses an annotation value into a slice of Sysctls. +func SysctlsFromPodAnnotation(annotation string) ([]Sysctl, error) { + if len(annotation) == 0 { + return nil, nil + } + + kvs := strings.Split(annotation, ",") + sysctls := make([]Sysctl, len(kvs)) + for i, kv := range kvs { + cs := strings.Split(kv, "=") + if len(cs) != 2 || len(cs[0]) == 0 { + return nil, fmt.Errorf("sysctl %q not of the format sysctl_name=value", kv) + } + sysctls[i].Name = cs[0] + sysctls[i].Value = cs[1] + } + return sysctls, nil +} + +// PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls. +func PodAnnotationsFromSysctls(sysctls []Sysctl) string { + if len(sysctls) == 0 { + return "" + } + + kvs := make([]string, len(sysctls)) + for i := range sysctls { + kvs[i] = fmt.Sprintf("%s=%s", sysctls[i].Name, sysctls[i].Value) + } + return strings.Join(kvs, ",") +} + +type Sysctl struct { + Name string `protobuf:"bytes,1,opt,name=name"` + Value string `protobuf:"bytes,2,opt,name=value"` +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +type NodeResources struct { + // Capacity represents the available resources of a node + Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` +} + +// Tries to add a taint to annotations list. Returns a new copy of updated Node and true if something was updated +// false otherwise. +func AddOrUpdateTaint(node *Node, taint *Taint) (*Node, bool, error) { + objCopy, err := api.Scheme.DeepCopy(node) + if err != nil { + return nil, false, err + } + newNode := objCopy.(*Node) + nodeTaints := newNode.Spec.Taints + + var newTaints []Taint + updated := false + for i := range nodeTaints { + if taint.MatchTaint(&nodeTaints[i]) { + if api.Semantic.DeepEqual(taint, nodeTaints[i]) { + return newNode, false, nil + } + newTaints = append(newTaints, *taint) + updated = true + continue + } + + newTaints = append(newTaints, nodeTaints[i]) + } + + if !updated { + newTaints = append(newTaints, *taint) + } + + newNode.Spec.Taints = newTaints + return newNode, true, nil +} + +func TaintExists(taints []Taint, taintToFind *Taint) bool { + for _, taint := range taints { + if taint.MatchTaint(taintToFind) { + return true + } + } + return false +} + +// Tries to remove a taint from annotations list. Returns a new copy of updated Node and true if something was updated +// false otherwise. +func RemoveTaint(node *Node, taint *Taint) (*Node, bool, error) { + objCopy, err := api.Scheme.DeepCopy(node) + if err != nil { + return nil, false, err + } + newNode := objCopy.(*Node) + nodeTaints := newNode.Spec.Taints + if len(nodeTaints) == 0 { + return newNode, false, nil + } + + if !TaintExists(nodeTaints, taint) { + return newNode, false, nil + } + + newTaints, _ := DeleteTaint(nodeTaints, taint) + newNode.Spec.Taints = newTaints + return newNode, true, nil +} + +// GetAffinityFromPodAnnotations gets the json serialized affinity data from Pod.Annotations +// and converts it to the Affinity type in api. +// TODO: remove when alpha support for affinity is removed +func GetAffinityFromPodAnnotations(annotations map[string]string) (*Affinity, error) { + if len(annotations) > 0 && annotations[AffinityAnnotationKey] != "" { + var affinity Affinity + err := json.Unmarshal([]byte(annotations[AffinityAnnotationKey]), &affinity) + if err != nil { + return nil, err + } + return &affinity, nil + } + return nil, nil +} + +// GetPersistentVolumeClass returns StorageClassName. +func GetPersistentVolumeClass(volume *PersistentVolume) string { + // Use beta annotation first + if class, found := volume.Annotations[BetaStorageClassAnnotation]; found { + return class + } + + return volume.Spec.StorageClassName +} + +// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was +// requested, it returns "". +func GetPersistentVolumeClaimClass(claim *PersistentVolumeClaim) string { + // Use beta annotation first + if class, found := claim.Annotations[BetaStorageClassAnnotation]; found { + return class + } + + if claim.Spec.StorageClassName != nil { + return *claim.Spec.StorageClassName + } + + return "" +} + +// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. +func PersistentVolumeClaimHasClass(claim *PersistentVolumeClaim) bool { + // Use beta annotation first + if _, found := claim.Annotations[BetaStorageClassAnnotation]; found { + return true + } + + if claim.Spec.StorageClassName != nil { + return true + } + + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/meta.go b/vendor/k8s.io/kubernetes/pkg/api/v1/meta.go index 6f95cf7bc7..bb1ae2ff79 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/meta.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/meta.go @@ -17,34 +17,32 @@ limitations under the License. package v1 import ( - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/meta/metatypes" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ) -func (obj *ObjectMeta) GetObjectMeta() meta.Object { return obj } +func (obj *ObjectMeta) GetObjectMeta() metav1.Object { return obj } -// Namespace implements meta.Object for any object with an ObjectMeta typed field. Allows +// Namespace implements metav1.Object for any object with an ObjectMeta typed field. Allows // fast, direct access to metadata fields for API objects. -func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } -func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } -func (meta *ObjectMeta) GetName() string { return meta.Name } -func (meta *ObjectMeta) SetName(name string) { meta.Name = name } -func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName } -func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName } -func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } -func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } -func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } -func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } -func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink } -func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } -func (meta *ObjectMeta) GetCreationTimestamp() unversioned.Time { return meta.CreationTimestamp } -func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp unversioned.Time) { +func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } +func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } +func (meta *ObjectMeta) GetName() string { return meta.Name } +func (meta *ObjectMeta) SetName(name string) { meta.Name = name } +func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName } +func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName } +func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } +func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } +func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } +func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } +func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink } +func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } +func (meta *ObjectMeta) GetCreationTimestamp() metav1.Time { return meta.CreationTimestamp } +func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp metav1.Time) { meta.CreationTimestamp = creationTimestamp } -func (meta *ObjectMeta) GetDeletionTimestamp() *unversioned.Time { return meta.DeletionTimestamp } -func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *unversioned.Time) { +func (meta *ObjectMeta) GetDeletionTimestamp() *metav1.Time { return meta.DeletionTimestamp } +func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *metav1.Time) { meta.DeletionTimestamp = deletionTimestamp } func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels } @@ -54,8 +52,8 @@ func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Ann func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } -func (meta *ObjectMeta) GetOwnerReferences() []metatypes.OwnerReference { - ret := make([]metatypes.OwnerReference, len(meta.OwnerReferences)) +func (meta *ObjectMeta) GetOwnerReferences() []metav1.OwnerReference { + ret := make([]metav1.OwnerReference, len(meta.OwnerReferences)) for i := 0; i < len(meta.OwnerReferences); i++ { ret[i].Kind = meta.OwnerReferences[i].Kind ret[i].Name = meta.OwnerReferences[i].Name @@ -65,12 +63,16 @@ func (meta *ObjectMeta) GetOwnerReferences() []metatypes.OwnerReference { value := *meta.OwnerReferences[i].Controller ret[i].Controller = &value } + if meta.OwnerReferences[i].BlockOwnerDeletion != nil { + value := *meta.OwnerReferences[i].BlockOwnerDeletion + ret[i].BlockOwnerDeletion = &value + } } return ret } -func (meta *ObjectMeta) SetOwnerReferences(references []metatypes.OwnerReference) { - newReferences := make([]OwnerReference, len(references)) +func (meta *ObjectMeta) SetOwnerReferences(references []metav1.OwnerReference) { + newReferences := make([]metav1.OwnerReference, len(references)) for i := 0; i < len(references); i++ { newReferences[i].Kind = references[i].Kind newReferences[i].Name = references[i].Name @@ -80,6 +82,10 @@ func (meta *ObjectMeta) SetOwnerReferences(references []metatypes.OwnerReference value := *references[i].Controller newReferences[i].Controller = &value } + if references[i].BlockOwnerDeletion != nil { + value := *references[i].BlockOwnerDeletion + newReferences[i].BlockOwnerDeletion = &value + } } meta.OwnerReferences = newReferences } diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/ref.go b/vendor/k8s.io/kubernetes/pkg/api/v1/ref.go new file mode 100644 index 0000000000..5d33719fef --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/ref.go @@ -0,0 +1,133 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "errors" + "fmt" + "net/url" + "strings" + + "k8s.io/apimachinery/pkg/runtime/schema" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +var ( + // Errors that could be returned by GetReference. + ErrNilObject = errors.New("can't reference a nil object") + ErrNoSelfLink = errors.New("selfLink was empty, can't make reference") +) + +// GetReference returns an ObjectReference which refers to the given +// object, or an error if the object doesn't follow the conventions +// that would allow this. +// TODO: should take a meta.Interface see http://issue.k8s.io/7127 +func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, error) { + if obj == nil { + return nil, ErrNilObject + } + if ref, ok := obj.(*ObjectReference); ok { + // Don't make a reference to a reference. + return ref, nil + } + + gvk := obj.GetObjectKind().GroupVersionKind() + + // if the object referenced is actually persisted, we can just get kind from meta + // if we are building an object reference to something not yet persisted, we should fallback to scheme + kind := gvk.Kind + if len(kind) == 0 { + // TODO: this is wrong + gvks, _, err := scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + kind = gvks[0].Kind + } + + // An object that implements only List has enough metadata to build a reference + var listMeta meta.List + objectMeta, err := meta.Accessor(obj) + if err != nil { + listMeta, err = meta.ListAccessor(obj) + if err != nil { + return nil, err + } + } else { + listMeta = objectMeta + } + + // if the object referenced is actually persisted, we can also get version from meta + version := gvk.GroupVersion().String() + if len(version) == 0 { + selfLink := listMeta.GetSelfLink() + if len(selfLink) == 0 { + return nil, ErrNoSelfLink + } + selfLinkUrl, err := url.Parse(selfLink) + if err != nil { + return nil, err + } + // example paths: ///* + parts := strings.Split(selfLinkUrl.Path, "/") + if len(parts) < 3 { + return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version) + } + version = parts[2] + } + + // only has list metadata + if objectMeta == nil { + return &ObjectReference{ + Kind: kind, + APIVersion: version, + ResourceVersion: listMeta.GetResourceVersion(), + }, nil + } + + return &ObjectReference{ + Kind: kind, + APIVersion: version, + Name: objectMeta.GetName(), + Namespace: objectMeta.GetNamespace(), + UID: objectMeta.GetUID(), + ResourceVersion: objectMeta.GetResourceVersion(), + }, nil +} + +// GetPartialReference is exactly like GetReference, but allows you to set the FieldPath. +func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*ObjectReference, error) { + ref, err := GetReference(scheme, obj) + if err != nil { + return nil, err + } + ref.FieldPath = fieldPath + return ref, nil +} + +// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that +// intend only to get a reference to that object. This simplifies the event recording interface. +func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/register.go b/vendor/k8s.io/kubernetes/pkg/api/v1/register.go index f02ea8297f..5c2dfddd11 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/register.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/register.go @@ -17,16 +17,21 @@ limitations under the License. package v1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} var ( SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs, addFastPathConversionFuncs) @@ -69,12 +74,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &PersistentVolumeList{}, &PersistentVolumeClaim{}, &PersistentVolumeClaimList{}, - &DeleteOptions{}, - &ExportOptions{}, - &ListOptions{}, &PodAttachOptions{}, &PodLogOptions{}, &PodExecOptions{}, + &PodPortForwardOptions{}, &PodProxyOptions{}, &ComponentStatus{}, &ComponentStatusList{}, @@ -85,9 +88,9 @@ func addKnownTypes(scheme *runtime.Scheme) error { ) // Add common types - scheme.AddKnownTypes(SchemeGroupVersion, &unversioned.Status{}) + scheme.AddKnownTypes(SchemeGroupVersion, &metav1.Status{}) // Add the watch version that applies - versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/resource_helpers.go b/vendor/k8s.io/kubernetes/pkg/api/v1/resource_helpers.go new file mode 100644 index 0000000000..ec84232762 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/resource_helpers.go @@ -0,0 +1,257 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Returns string version of ResourceName. +func (self ResourceName) String() string { + return string(self) +} + +// Returns the CPU limit if specified. +func (self *ResourceList) Cpu() *resource.Quantity { + if val, ok := (*self)[ResourceCPU]; ok { + return &val + } + return &resource.Quantity{Format: resource.DecimalSI} +} + +// Returns the Memory limit if specified. +func (self *ResourceList) Memory() *resource.Quantity { + if val, ok := (*self)[ResourceMemory]; ok { + return &val + } + return &resource.Quantity{Format: resource.BinarySI} +} + +func (self *ResourceList) Pods() *resource.Quantity { + if val, ok := (*self)[ResourcePods]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) NvidiaGPU() *resource.Quantity { + if val, ok := (*self)[ResourceNvidiaGPU]; ok { + return &val + } + return &resource.Quantity{} +} + +func GetContainerStatus(statuses []ContainerStatus, name string) (ContainerStatus, bool) { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i], true + } + } + return ContainerStatus{}, false +} + +func GetExistingContainerStatus(statuses []ContainerStatus, name string) ContainerStatus { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i] + } + } + return ContainerStatus{} +} + +// IsPodAvailable returns true if a pod is available; false otherwise. +// Precondition for an available pod is that it must be ready. On top +// of that, there are two cases when a pod can be considered available: +// 1. minReadySeconds == 0, or +// 2. LastTransitionTime (is set) + minReadySeconds < current time +func IsPodAvailable(pod *Pod, minReadySeconds int32, now metav1.Time) bool { + if !IsPodReady(pod) { + return false + } + + c := GetPodReadyCondition(pod.Status) + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { + return true + } + return false +} + +// IsPodReady returns true if a pod is ready; false otherwise. +func IsPodReady(pod *Pod) bool { + return IsPodReadyConditionTrue(pod.Status) +} + +// IsPodReady retruns true if a pod is ready; false otherwise. +func IsPodReadyConditionTrue(status PodStatus) bool { + condition := GetPodReadyCondition(status) + return condition != nil && condition.Status == ConditionTrue +} + +// Extracts the pod ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetPodReadyCondition(status PodStatus) *PodCondition { + _, condition := GetPodCondition(&status, PodReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetPodCondition(status *PodStatus, conditionType PodConditionType) (int, *PodCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// GetNodeCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetNodeCondition(status *NodeStatus, conditionType NodeConditionType) (int, *NodeCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the +// status has changed. +// Returns true if pod condition has changed or has been added. +func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool { + condition.LastTransitionTime = metav1.Now() + // Try to find this pod condition. + conditionIndex, oldCondition := GetPodCondition(status, condition.Type) + + if oldCondition == nil { + // We are adding new pod condition. + status.Conditions = append(status.Conditions, *condition) + return true + } else { + // We are updating an existing condition, so we need to check if it has changed. + if condition.Status == oldCondition.Status { + condition.LastTransitionTime = oldCondition.LastTransitionTime + } + + isEqual := condition.Status == oldCondition.Status && + condition.Reason == oldCondition.Reason && + condition.Message == oldCondition.Message && + condition.LastProbeTime.Equal(oldCondition.LastProbeTime) && + condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime) + + status.Conditions[conditionIndex] = *condition + // Return true if one of the fields have changed. + return !isEqual + } +} + +// IsNodeReady returns true if a node is ready; false otherwise. +func IsNodeReady(node *Node) bool { + for _, c := range node.Status.Conditions { + if c.Type == NodeReady { + return c.Status == ConditionTrue + } + } + return false +} + +// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all +// containers of the pod. +func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, limits map[ResourceName]resource.Quantity, err error) { + reqs, limits = map[ResourceName]resource.Quantity{}, map[ResourceName]resource.Quantity{} + for _, container := range pod.Spec.Containers { + for name, quantity := range container.Resources.Requests { + if value, ok := reqs[name]; !ok { + reqs[name] = *quantity.Copy() + } else { + value.Add(quantity) + reqs[name] = value + } + } + for name, quantity := range container.Resources.Limits { + if value, ok := limits[name]; !ok { + limits[name] = *quantity.Copy() + } else { + value.Add(quantity) + limits[name] = value + } + } + } + // init containers define the minimum of any resource + for _, container := range pod.Spec.InitContainers { + for name, quantity := range container.Resources.Requests { + value, ok := reqs[name] + if !ok { + reqs[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + reqs[name] = *quantity.Copy() + } + } + for name, quantity := range container.Resources.Limits { + value, ok := limits[name] + if !ok { + limits[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + limits[name] = *quantity.Copy() + } + } + } + return +} + +// finds and returns the request for a specific resource. +func GetResourceRequest(pod *Pod, resource ResourceName) int64 { + if resource == ResourcePods { + return 1 + } + totalResources := int64(0) + for _, container := range pod.Spec.Containers { + if rQuantity, ok := container.Resources.Requests[resource]; ok { + if resource == ResourceCPU { + totalResources += rQuantity.MilliValue() + } else { + totalResources += rQuantity.Value() + } + } + } + // take max_resource(sum_pod, any_init_container) + for _, container := range pod.Spec.InitContainers { + if rQuantity, ok := container.Resources.Requests[resource]; ok { + if resource == ResourceCPU && rQuantity.MilliValue() > totalResources { + totalResources = rQuantity.MilliValue() + } else if rQuantity.Value() > totalResources { + totalResources = rQuantity.Value() + } + } + } + return totalResources +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/service/BUILD b/vendor/k8s.io/kubernetes/pkg/api/v1/service/BUILD new file mode 100644 index 0000000000..81101976d5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/service/BUILD @@ -0,0 +1,47 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "annotations.go", + "util.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/api/v1:go_default_library", + "//pkg/util/net/sets:go_default_library", + "//vendor:github.com/golang/glog", + ], +) + +go_test( + name = "go_default_test", + srcs = ["util_test.go"], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//pkg/api/v1:go_default_library", + "//pkg/util/net/sets:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/service/annotations.go b/vendor/k8s.io/kubernetes/pkg/api/v1/service/annotations.go new file mode 100644 index 0000000000..141e572124 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/service/annotations.go @@ -0,0 +1,111 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "strconv" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api/v1" +) + +const ( + // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers + // + // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to + // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow + // access only from the CIDRs currently allocated to MIT & the USPS. + // + // Not all cloud providers support this annotation, though AWS & GCE do. + AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" + + // AnnotationValueExternalTrafficLocal Value of annotation to specify local endpoints behaviour + AnnotationValueExternalTrafficLocal = "OnlyLocal" + // AnnotationValueExternalTrafficGlobal Value of annotation to specify global (legacy) behaviour + AnnotationValueExternalTrafficGlobal = "Global" + + // TODO: The alpha annotations have been deprecated, remove them when we move this feature to GA. + + // AlphaAnnotationHealthCheckNodePort Annotation specifying the healthcheck nodePort for the service + // If not specified, annotation is created by the service api backend with the allocated nodePort + // Will use user-specified nodePort value if specified by the client + AlphaAnnotationHealthCheckNodePort = "service.alpha.kubernetes.io/healthcheck-nodeport" + + // AlphaAnnotationExternalTraffic An annotation that denotes if this Service desires to route external traffic to local + // endpoints only. This preserves Source IP and avoids a second hop. + AlphaAnnotationExternalTraffic = "service.alpha.kubernetes.io/external-traffic" + + // BetaAnnotationHealthCheckNodePort is the beta version of AlphaAnnotationHealthCheckNodePort. + BetaAnnotationHealthCheckNodePort = "service.beta.kubernetes.io/healthcheck-nodeport" + + // BetaAnnotationExternalTraffic is the beta version of AlphaAnnotationExternalTraffic. + BetaAnnotationExternalTraffic = "service.beta.kubernetes.io/external-traffic" +) + +// NeedsHealthCheck Check service for health check annotations +func NeedsHealthCheck(service *v1.Service) bool { + // First check the alpha annotation and then the beta. This is so existing + // Services continue to work till the user decides to transition to beta. + // If they transition to beta, there's no way to go back to alpha without + // rolling back the cluster. + for _, annotation := range []string{AlphaAnnotationExternalTraffic, BetaAnnotationExternalTraffic} { + if l, ok := service.Annotations[annotation]; ok { + if l == AnnotationValueExternalTrafficLocal { + return true + } else if l == AnnotationValueExternalTrafficGlobal { + return false + } else { + glog.Errorf("Invalid value for annotation %v: %v", annotation, l) + } + } + } + return false +} + +// GetServiceHealthCheckNodePort Return health check node port annotation for service, if one exists +func GetServiceHealthCheckNodePort(service *v1.Service) int32 { + if !NeedsHealthCheck(service) { + return 0 + } + // First check the alpha annotation and then the beta. This is so existing + // Services continue to work till the user decides to transition to beta. + // If they transition to beta, there's no way to go back to alpha without + // rolling back the cluster. + for _, annotation := range []string{AlphaAnnotationHealthCheckNodePort, BetaAnnotationHealthCheckNodePort} { + if l, ok := service.Annotations[annotation]; ok { + p, err := strconv.Atoi(l) + if err != nil { + glog.Errorf("Failed to parse annotation %v: %v", annotation, err) + continue + } + return int32(p) + } + } + return 0 +} + +// GetServiceHealthCheckPathPort Return the path and nodePort programmed into the Cloud LB Health Check +func GetServiceHealthCheckPathPort(service *v1.Service) (string, int32) { + if !NeedsHealthCheck(service) { + return "", 0 + } + port := GetServiceHealthCheckNodePort(service) + if port == 0 { + return "", 0 + } + return "/healthz", port +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/service/util.go b/vendor/k8s.io/kubernetes/pkg/api/v1/service/util.go new file mode 100644 index 0000000000..3c00a49524 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/service/util.go @@ -0,0 +1,68 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "fmt" + "strings" + + "k8s.io/kubernetes/pkg/api/v1" + netsets "k8s.io/kubernetes/pkg/util/net/sets" +) + +const ( + defaultLoadBalancerSourceRanges = "0.0.0.0/0" +) + +// IsAllowAll checks whether the netsets.IPNet allows traffic from 0.0.0.0/0 +func IsAllowAll(ipnets netsets.IPNet) bool { + for _, s := range ipnets.StringSlice() { + if s == "0.0.0.0/0" { + return true + } + } + return false +} + +// GetLoadBalancerSourceRanges first try to parse and verify LoadBalancerSourceRanges field from a service. +// If the field is not specified, turn to parse and verify the AnnotationLoadBalancerSourceRangesKey annotation from a service, +// extracting the source ranges to allow, and if not present returns a default (allow-all) value. +func GetLoadBalancerSourceRanges(service *v1.Service) (netsets.IPNet, error) { + var ipnets netsets.IPNet + var err error + // if SourceRange field is specified, ignore sourceRange annotation + if len(service.Spec.LoadBalancerSourceRanges) > 0 { + specs := service.Spec.LoadBalancerSourceRanges + ipnets, err = netsets.ParseIPNets(specs...) + + if err != nil { + return nil, fmt.Errorf("service.Spec.LoadBalancerSourceRanges: %v is not valid. Expecting a list of IP ranges. For example, 10.0.0.0/24. Error msg: %v", specs, err) + } + } else { + val := service.Annotations[AnnotationLoadBalancerSourceRangesKey] + val = strings.TrimSpace(val) + if val == "" { + val = defaultLoadBalancerSourceRanges + } + specs := strings.Split(val, ",") + ipnets, err = netsets.ParseIPNets(specs...) + if err != nil { + return nil, fmt.Errorf("%s: %s is not valid. Expecting a comma-separated list of source IP ranges. For example, 10.0.0.0/24,192.168.2.0/24", AnnotationLoadBalancerSourceRangesKey, val) + } + } + return ipnets, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/api/v1/types.generated.go index 50aac6d384..c6fd805aa5 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/types.generated.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/types.generated.go @@ -25,11 +25,11 @@ import ( "errors" "fmt" codec1978 "github.com/ugorji/go/codec" - pkg3_resource "k8s.io/kubernetes/pkg/api/resource" - pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg5_runtime "k8s.io/kubernetes/pkg/runtime" - pkg1_types "k8s.io/kubernetes/pkg/types" - pkg4_intstr "k8s.io/kubernetes/pkg/util/intstr" + pkg3_resource "k8s.io/apimachinery/pkg/api/resource" + pkg2_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg5_runtime "k8s.io/apimachinery/pkg/runtime" + pkg1_types "k8s.io/apimachinery/pkg/types" + pkg4_intstr "k8s.io/apimachinery/pkg/util/intstr" "reflect" "runtime" time "time" @@ -66,7 +66,7 @@ func init() { } if false { // reference the types, but skip this branch at build/run time var v0 pkg3_resource.Quantity - var v1 pkg2_unversioned.Time + var v1 pkg2_v1.Time var v2 pkg5_runtime.RawExtension var v3 pkg1_types.UID var v4 pkg4_intstr.IntOrString @@ -488,7 +488,7 @@ func (x *ObjectMeta) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym44 if false { } else { - h.encSliceOwnerReference(([]OwnerReference)(x.OwnerReferences), e) + h.encSlicev1_OwnerReference(([]pkg2_v1.OwnerReference)(x.OwnerReferences), e) } } } else { @@ -506,7 +506,7 @@ func (x *ObjectMeta) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym45 if false { } else { - h.encSliceOwnerReference(([]OwnerReference)(x.OwnerReferences), e) + h.encSlicev1_OwnerReference(([]pkg2_v1.OwnerReference)(x.OwnerReferences), e) } } } @@ -582,25 +582,25 @@ func (x *ObjectMeta) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym52 := z.DecBinary() - _ = yym52 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct53 := r.ContainerType() - if yyct53 == codecSelferValueTypeMap1234 { - yyl53 := r.ReadMapStart() - if yyl53 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl53, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct53 == codecSelferValueTypeArray1234 { - yyl53 := r.ReadArrayStart() - if yyl53 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl53, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -612,12 +612,12 @@ func (x *ObjectMeta) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys54Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys54Slc - var yyhl54 bool = l >= 0 - for yyj54 := 0; ; yyj54++ { - if yyhl54 { - if yyj54 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -626,67 +626,110 @@ func (x *ObjectMeta) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys54Slc = r.DecodeBytes(yys54Slc, true, true) - yys54 := string(yys54Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys54 { + switch yys3 { case "name": if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "generateName": if r.TryDecodeAsNil() { x.GenerateName = "" } else { - x.GenerateName = string(r.DecodeString()) + yyv6 := &x.GenerateName + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "namespace": if r.TryDecodeAsNil() { x.Namespace = "" } else { - x.Namespace = string(r.DecodeString()) + yyv8 := &x.Namespace + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } case "selfLink": if r.TryDecodeAsNil() { x.SelfLink = "" } else { - x.SelfLink = string(r.DecodeString()) + yyv10 := &x.SelfLink + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } case "uid": if r.TryDecodeAsNil() { x.UID = "" } else { - x.UID = pkg1_types.UID(r.DecodeString()) + yyv12 := &x.UID + yym13 := z.DecBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.DecExt(yyv12) { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } case "resourceVersion": if r.TryDecodeAsNil() { x.ResourceVersion = "" } else { - x.ResourceVersion = string(r.DecodeString()) + yyv14 := &x.ResourceVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } case "generation": if r.TryDecodeAsNil() { x.Generation = 0 } else { - x.Generation = int64(r.DecodeInt(64)) + yyv16 := &x.Generation + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*int64)(yyv16)) = int64(r.DecodeInt(64)) + } } case "creationTimestamp": if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg2_unversioned.Time{} + x.CreationTimestamp = pkg2_v1.Time{} } else { - yyv62 := &x.CreationTimestamp - yym63 := z.DecBinary() - _ = yym63 + yyv18 := &x.CreationTimestamp + yym19 := z.DecBinary() + _ = yym19 if false { - } else if z.HasExtensions() && z.DecExt(yyv62) { - } else if yym63 { - z.DecBinaryUnmarshal(yyv62) - } else if !yym63 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv62) + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else if yym19 { + z.DecBinaryUnmarshal(yyv18) + } else if !yym19 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv18) } else { - z.DecFallback(yyv62, false) + z.DecFallback(yyv18, false) } } case "deletionTimestamp": @@ -696,15 +739,15 @@ func (x *ObjectMeta) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } else { if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg2_unversioned.Time) + x.DeletionTimestamp = new(pkg2_v1.Time) } - yym65 := z.DecBinary() - _ = yym65 + yym21 := z.DecBinary() + _ = yym21 if false { } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym65 { + } else if yym21 { z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym65 && z.IsJSONHandle() { + } else if !yym21 && z.IsJSONHandle() { z.DecJSONUnmarshal(x.DeletionTimestamp) } else { z.DecFallback(x.DeletionTimestamp, false) @@ -719,8 +762,8 @@ func (x *ObjectMeta) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.DeletionGracePeriodSeconds == nil { x.DeletionGracePeriodSeconds = new(int64) } - yym67 := z.DecBinary() - _ = yym67 + yym23 := z.DecBinary() + _ = yym23 if false { } else { *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) @@ -730,60 +773,66 @@ func (x *ObjectMeta) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Labels = nil } else { - yyv68 := &x.Labels - yym69 := z.DecBinary() - _ = yym69 + yyv24 := &x.Labels + yym25 := z.DecBinary() + _ = yym25 if false { } else { - z.F.DecMapStringStringX(yyv68, false, d) + z.F.DecMapStringStringX(yyv24, false, d) } } case "annotations": if r.TryDecodeAsNil() { x.Annotations = nil } else { - yyv70 := &x.Annotations - yym71 := z.DecBinary() - _ = yym71 + yyv26 := &x.Annotations + yym27 := z.DecBinary() + _ = yym27 if false { } else { - z.F.DecMapStringStringX(yyv70, false, d) + z.F.DecMapStringStringX(yyv26, false, d) } } case "ownerReferences": if r.TryDecodeAsNil() { x.OwnerReferences = nil } else { - yyv72 := &x.OwnerReferences - yym73 := z.DecBinary() - _ = yym73 + yyv28 := &x.OwnerReferences + yym29 := z.DecBinary() + _ = yym29 if false { } else { - h.decSliceOwnerReference((*[]OwnerReference)(yyv72), d) + h.decSlicev1_OwnerReference((*[]pkg2_v1.OwnerReference)(yyv28), d) } } case "finalizers": if r.TryDecodeAsNil() { x.Finalizers = nil } else { - yyv74 := &x.Finalizers - yym75 := z.DecBinary() - _ = yym75 + yyv30 := &x.Finalizers + yym31 := z.DecBinary() + _ = yym31 if false { } else { - z.F.DecSliceStringX(yyv74, false, d) + z.F.DecSliceStringX(yyv30, false, d) } } case "clusterName": if r.TryDecodeAsNil() { x.ClusterName = "" } else { - x.ClusterName = string(r.DecodeString()) + yyv32 := &x.ClusterName + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + *((*string)(yyv32)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys54) - } // end switch yys54 - } // end for yyj54 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -791,16 +840,16 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj77 int - var yyb77 bool - var yyhl77 bool = l >= 0 - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l + var yyj34 int + var yyb34 bool + var yyhl34 bool = l >= 0 + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l } else { - yyb77 = r.CheckBreak() + yyb34 = r.CheckBreak() } - if yyb77 { + if yyb34 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -808,15 +857,21 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv35 := &x.Name + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l } else { - yyb77 = r.CheckBreak() + yyb34 = r.CheckBreak() } - if yyb77 { + if yyb34 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -824,15 +879,21 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.GenerateName = "" } else { - x.GenerateName = string(r.DecodeString()) + yyv37 := &x.GenerateName + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l } else { - yyb77 = r.CheckBreak() + yyb34 = r.CheckBreak() } - if yyb77 { + if yyb34 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -840,15 +901,21 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Namespace = "" } else { - x.Namespace = string(r.DecodeString()) + yyv39 := &x.Namespace + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l } else { - yyb77 = r.CheckBreak() + yyb34 = r.CheckBreak() } - if yyb77 { + if yyb34 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -856,15 +923,21 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.SelfLink = "" } else { - x.SelfLink = string(r.DecodeString()) + yyv41 := &x.SelfLink + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*string)(yyv41)) = r.DecodeString() + } } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l } else { - yyb77 = r.CheckBreak() + yyb34 = r.CheckBreak() } - if yyb77 { + if yyb34 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -872,15 +945,22 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.UID = "" } else { - x.UID = pkg1_types.UID(r.DecodeString()) + yyv43 := &x.UID + yym44 := z.DecBinary() + _ = yym44 + if false { + } else if z.HasExtensions() && z.DecExt(yyv43) { + } else { + *((*string)(yyv43)) = r.DecodeString() + } } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l } else { - yyb77 = r.CheckBreak() + yyb34 = r.CheckBreak() } - if yyb77 { + if yyb34 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -888,15 +968,21 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ResourceVersion = "" } else { - x.ResourceVersion = string(r.DecodeString()) + yyv45 := &x.ResourceVersion + yym46 := z.DecBinary() + _ = yym46 + if false { + } else { + *((*string)(yyv45)) = r.DecodeString() + } } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l } else { - yyb77 = r.CheckBreak() + yyb34 = r.CheckBreak() } - if yyb77 { + if yyb34 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -904,42 +990,48 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Generation = 0 } else { - x.Generation = int64(r.DecodeInt(64)) + yyv47 := &x.Generation + yym48 := z.DecBinary() + _ = yym48 + if false { + } else { + *((*int64)(yyv47)) = int64(r.DecodeInt(64)) + } } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l } else { - yyb77 = r.CheckBreak() + yyb34 = r.CheckBreak() } - if yyb77 { + if yyb34 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg2_unversioned.Time{} + x.CreationTimestamp = pkg2_v1.Time{} } else { - yyv85 := &x.CreationTimestamp - yym86 := z.DecBinary() - _ = yym86 + yyv49 := &x.CreationTimestamp + yym50 := z.DecBinary() + _ = yym50 if false { - } else if z.HasExtensions() && z.DecExt(yyv85) { - } else if yym86 { - z.DecBinaryUnmarshal(yyv85) - } else if !yym86 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv85) + } else if z.HasExtensions() && z.DecExt(yyv49) { + } else if yym50 { + z.DecBinaryUnmarshal(yyv49) + } else if !yym50 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv49) } else { - z.DecFallback(yyv85, false) + z.DecFallback(yyv49, false) } } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l } else { - yyb77 = r.CheckBreak() + yyb34 = r.CheckBreak() } - if yyb77 { + if yyb34 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -950,27 +1042,27 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } } else { if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg2_unversioned.Time) + x.DeletionTimestamp = new(pkg2_v1.Time) } - yym88 := z.DecBinary() - _ = yym88 + yym52 := z.DecBinary() + _ = yym52 if false { } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym88 { + } else if yym52 { z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym88 && z.IsJSONHandle() { + } else if !yym52 && z.IsJSONHandle() { z.DecJSONUnmarshal(x.DeletionTimestamp) } else { z.DecFallback(x.DeletionTimestamp, false) } } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l } else { - yyb77 = r.CheckBreak() + yyb34 = r.CheckBreak() } - if yyb77 { + if yyb34 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -983,20 +1075,20 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.DeletionGracePeriodSeconds == nil { x.DeletionGracePeriodSeconds = new(int64) } - yym90 := z.DecBinary() - _ = yym90 + yym54 := z.DecBinary() + _ = yym54 if false { } else { *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) } } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l } else { - yyb77 = r.CheckBreak() + yyb34 = r.CheckBreak() } - if yyb77 { + if yyb34 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1004,21 +1096,21 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Labels = nil } else { - yyv91 := &x.Labels - yym92 := z.DecBinary() - _ = yym92 + yyv55 := &x.Labels + yym56 := z.DecBinary() + _ = yym56 if false { } else { - z.F.DecMapStringStringX(yyv91, false, d) + z.F.DecMapStringStringX(yyv55, false, d) } } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l } else { - yyb77 = r.CheckBreak() + yyb34 = r.CheckBreak() } - if yyb77 { + if yyb34 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1026,21 +1118,21 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Annotations = nil } else { - yyv93 := &x.Annotations - yym94 := z.DecBinary() - _ = yym94 + yyv57 := &x.Annotations + yym58 := z.DecBinary() + _ = yym58 if false { } else { - z.F.DecMapStringStringX(yyv93, false, d) + z.F.DecMapStringStringX(yyv57, false, d) } } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l } else { - yyb77 = r.CheckBreak() + yyb34 = r.CheckBreak() } - if yyb77 { + if yyb34 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1048,21 +1140,21 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.OwnerReferences = nil } else { - yyv95 := &x.OwnerReferences - yym96 := z.DecBinary() - _ = yym96 + yyv59 := &x.OwnerReferences + yym60 := z.DecBinary() + _ = yym60 if false { } else { - h.decSliceOwnerReference((*[]OwnerReference)(yyv95), d) + h.decSlicev1_OwnerReference((*[]pkg2_v1.OwnerReference)(yyv59), d) } } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l } else { - yyb77 = r.CheckBreak() + yyb34 = r.CheckBreak() } - if yyb77 { + if yyb34 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1070,21 +1162,21 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Finalizers = nil } else { - yyv97 := &x.Finalizers - yym98 := z.DecBinary() - _ = yym98 + yyv61 := &x.Finalizers + yym62 := z.DecBinary() + _ = yym62 if false { } else { - z.F.DecSliceStringX(yyv97, false, d) + z.F.DecSliceStringX(yyv61, false, d) } } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l } else { - yyb77 = r.CheckBreak() + yyb34 = r.CheckBreak() } - if yyb77 { + if yyb34 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1092,20 +1184,26 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ClusterName = "" } else { - x.ClusterName = string(r.DecodeString()) + yyv63 := &x.ClusterName + yym64 := z.DecBinary() + _ = yym64 + if false { + } else { + *((*string)(yyv63)) = r.DecodeString() + } } for { - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l } else { - yyb77 = r.CheckBreak() + yyb34 = r.CheckBreak() } - if yyb77 { + if yyb34 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj77-1, "") + z.DecStructFieldNotFound(yyj34-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -1117,56 +1215,59 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym100 := z.EncBinary() - _ = yym100 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep101 := !z.EncBinary() - yy2arr101 := z.EncBasicHandle().StructToArray - var yyq101 [24]bool - _, _, _ = yysep101, yyq101, yy2arr101 - const yyr101 bool = false - yyq101[1] = x.VolumeSource.HostPath != nil && x.HostPath != nil - yyq101[2] = x.VolumeSource.EmptyDir != nil && x.EmptyDir != nil - yyq101[3] = x.VolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil - yyq101[4] = x.VolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil - yyq101[5] = x.VolumeSource.GitRepo != nil && x.GitRepo != nil - yyq101[6] = x.VolumeSource.Secret != nil && x.Secret != nil - yyq101[7] = x.VolumeSource.NFS != nil && x.NFS != nil - yyq101[8] = x.VolumeSource.ISCSI != nil && x.ISCSI != nil - yyq101[9] = x.VolumeSource.Glusterfs != nil && x.Glusterfs != nil - yyq101[10] = x.VolumeSource.PersistentVolumeClaim != nil && x.PersistentVolumeClaim != nil - yyq101[11] = x.VolumeSource.RBD != nil && x.RBD != nil - yyq101[12] = x.VolumeSource.FlexVolume != nil && x.FlexVolume != nil - yyq101[13] = x.VolumeSource.Cinder != nil && x.Cinder != nil - yyq101[14] = x.VolumeSource.CephFS != nil && x.CephFS != nil - yyq101[15] = x.VolumeSource.Flocker != nil && x.Flocker != nil - yyq101[16] = x.VolumeSource.DownwardAPI != nil && x.DownwardAPI != nil - yyq101[17] = x.VolumeSource.FC != nil && x.FC != nil - yyq101[18] = x.VolumeSource.AzureFile != nil && x.AzureFile != nil - yyq101[19] = x.VolumeSource.ConfigMap != nil && x.ConfigMap != nil - yyq101[20] = x.VolumeSource.VsphereVolume != nil && x.VsphereVolume != nil - yyq101[21] = x.VolumeSource.Quobyte != nil && x.Quobyte != nil - yyq101[22] = x.VolumeSource.AzureDisk != nil && x.AzureDisk != nil - yyq101[23] = x.VolumeSource.PhotonPersistentDisk != nil && x.PhotonPersistentDisk != nil - var yynn101 int - if yyr101 || yy2arr101 { - r.EncodeArrayStart(24) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [27]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.VolumeSource.HostPath != nil && x.HostPath != nil + yyq2[2] = x.VolumeSource.EmptyDir != nil && x.EmptyDir != nil + yyq2[3] = x.VolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil + yyq2[4] = x.VolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil + yyq2[5] = x.VolumeSource.GitRepo != nil && x.GitRepo != nil + yyq2[6] = x.VolumeSource.Secret != nil && x.Secret != nil + yyq2[7] = x.VolumeSource.NFS != nil && x.NFS != nil + yyq2[8] = x.VolumeSource.ISCSI != nil && x.ISCSI != nil + yyq2[9] = x.VolumeSource.Glusterfs != nil && x.Glusterfs != nil + yyq2[10] = x.VolumeSource.PersistentVolumeClaim != nil && x.PersistentVolumeClaim != nil + yyq2[11] = x.VolumeSource.RBD != nil && x.RBD != nil + yyq2[12] = x.VolumeSource.FlexVolume != nil && x.FlexVolume != nil + yyq2[13] = x.VolumeSource.Cinder != nil && x.Cinder != nil + yyq2[14] = x.VolumeSource.CephFS != nil && x.CephFS != nil + yyq2[15] = x.VolumeSource.Flocker != nil && x.Flocker != nil + yyq2[16] = x.VolumeSource.DownwardAPI != nil && x.DownwardAPI != nil + yyq2[17] = x.VolumeSource.FC != nil && x.FC != nil + yyq2[18] = x.VolumeSource.AzureFile != nil && x.AzureFile != nil + yyq2[19] = x.VolumeSource.ConfigMap != nil && x.ConfigMap != nil + yyq2[20] = x.VolumeSource.VsphereVolume != nil && x.VsphereVolume != nil + yyq2[21] = x.VolumeSource.Quobyte != nil && x.Quobyte != nil + yyq2[22] = x.VolumeSource.AzureDisk != nil && x.AzureDisk != nil + yyq2[23] = x.VolumeSource.PhotonPersistentDisk != nil && x.PhotonPersistentDisk != nil + yyq2[24] = x.VolumeSource.Projected != nil && x.Projected != nil + yyq2[25] = x.VolumeSource.PortworxVolume != nil && x.PortworxVolume != nil + yyq2[26] = x.VolumeSource.ScaleIO != nil && x.ScaleIO != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(27) } else { - yynn101 = 1 - for _, b := range yyq101 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn101++ + yynn2++ } } - r.EncodeMapStart(yynn101) - yynn101 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr101 || yy2arr101 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym103 := z.EncBinary() - _ = yym103 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) @@ -1175,25 +1276,25 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("name")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym104 := z.EncBinary() - _ = yym104 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } - var yyn105 bool + var yyn6 bool if x.VolumeSource.HostPath == nil { - yyn105 = true - goto LABEL105 + yyn6 = true + goto LABEL6 } - LABEL105: - if yyr101 || yy2arr101 { - if yyn105 { + LABEL6: + if yyr2 || yy2arr2 { + if yyn6 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[1] { + if yyq2[1] { if x.HostPath == nil { r.EncodeNil() } else { @@ -1204,11 +1305,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hostPath")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn105 { + if yyn6 { r.EncodeNil() } else { if x.HostPath == nil { @@ -1219,18 +1320,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn106 bool + var yyn9 bool if x.VolumeSource.EmptyDir == nil { - yyn106 = true - goto LABEL106 + yyn9 = true + goto LABEL9 } - LABEL106: - if yyr101 || yy2arr101 { - if yyn106 { + LABEL9: + if yyr2 || yy2arr2 { + if yyn9 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[2] { + if yyq2[2] { if x.EmptyDir == nil { r.EncodeNil() } else { @@ -1241,11 +1342,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("emptyDir")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn106 { + if yyn9 { r.EncodeNil() } else { if x.EmptyDir == nil { @@ -1256,18 +1357,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn107 bool + var yyn12 bool if x.VolumeSource.GCEPersistentDisk == nil { - yyn107 = true - goto LABEL107 + yyn12 = true + goto LABEL12 } - LABEL107: - if yyr101 || yy2arr101 { - if yyn107 { + LABEL12: + if yyr2 || yy2arr2 { + if yyn12 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[3] { + if yyq2[3] { if x.GCEPersistentDisk == nil { r.EncodeNil() } else { @@ -1278,11 +1379,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn107 { + if yyn12 { r.EncodeNil() } else { if x.GCEPersistentDisk == nil { @@ -1293,18 +1394,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn108 bool + var yyn15 bool if x.VolumeSource.AWSElasticBlockStore == nil { - yyn108 = true - goto LABEL108 + yyn15 = true + goto LABEL15 } - LABEL108: - if yyr101 || yy2arr101 { - if yyn108 { + LABEL15: + if yyr2 || yy2arr2 { + if yyn15 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[4] { + if yyq2[4] { if x.AWSElasticBlockStore == nil { r.EncodeNil() } else { @@ -1315,11 +1416,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn108 { + if yyn15 { r.EncodeNil() } else { if x.AWSElasticBlockStore == nil { @@ -1330,18 +1431,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn109 bool + var yyn18 bool if x.VolumeSource.GitRepo == nil { - yyn109 = true - goto LABEL109 + yyn18 = true + goto LABEL18 } - LABEL109: - if yyr101 || yy2arr101 { - if yyn109 { + LABEL18: + if yyr2 || yy2arr2 { + if yyn18 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[5] { + if yyq2[5] { if x.GitRepo == nil { r.EncodeNil() } else { @@ -1352,11 +1453,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("gitRepo")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn109 { + if yyn18 { r.EncodeNil() } else { if x.GitRepo == nil { @@ -1367,18 +1468,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn110 bool + var yyn21 bool if x.VolumeSource.Secret == nil { - yyn110 = true - goto LABEL110 + yyn21 = true + goto LABEL21 } - LABEL110: - if yyr101 || yy2arr101 { - if yyn110 { + LABEL21: + if yyr2 || yy2arr2 { + if yyn21 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[6] { + if yyq2[6] { if x.Secret == nil { r.EncodeNil() } else { @@ -1389,11 +1490,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[6] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("secret")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn110 { + if yyn21 { r.EncodeNil() } else { if x.Secret == nil { @@ -1404,18 +1505,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn111 bool + var yyn24 bool if x.VolumeSource.NFS == nil { - yyn111 = true - goto LABEL111 + yyn24 = true + goto LABEL24 } - LABEL111: - if yyr101 || yy2arr101 { - if yyn111 { + LABEL24: + if yyr2 || yy2arr2 { + if yyn24 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[7] { + if yyq2[7] { if x.NFS == nil { r.EncodeNil() } else { @@ -1426,11 +1527,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[7] { + if yyq2[7] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nfs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn111 { + if yyn24 { r.EncodeNil() } else { if x.NFS == nil { @@ -1441,18 +1542,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn112 bool + var yyn27 bool if x.VolumeSource.ISCSI == nil { - yyn112 = true - goto LABEL112 + yyn27 = true + goto LABEL27 } - LABEL112: - if yyr101 || yy2arr101 { - if yyn112 { + LABEL27: + if yyr2 || yy2arr2 { + if yyn27 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[8] { + if yyq2[8] { if x.ISCSI == nil { r.EncodeNil() } else { @@ -1463,11 +1564,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[8] { + if yyq2[8] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("iscsi")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn112 { + if yyn27 { r.EncodeNil() } else { if x.ISCSI == nil { @@ -1478,18 +1579,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn113 bool + var yyn30 bool if x.VolumeSource.Glusterfs == nil { - yyn113 = true - goto LABEL113 + yyn30 = true + goto LABEL30 } - LABEL113: - if yyr101 || yy2arr101 { - if yyn113 { + LABEL30: + if yyr2 || yy2arr2 { + if yyn30 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[9] { + if yyq2[9] { if x.Glusterfs == nil { r.EncodeNil() } else { @@ -1500,11 +1601,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[9] { + if yyq2[9] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn113 { + if yyn30 { r.EncodeNil() } else { if x.Glusterfs == nil { @@ -1515,18 +1616,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn114 bool + var yyn33 bool if x.VolumeSource.PersistentVolumeClaim == nil { - yyn114 = true - goto LABEL114 + yyn33 = true + goto LABEL33 } - LABEL114: - if yyr101 || yy2arr101 { - if yyn114 { + LABEL33: + if yyr2 || yy2arr2 { + if yyn33 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[10] { + if yyq2[10] { if x.PersistentVolumeClaim == nil { r.EncodeNil() } else { @@ -1537,11 +1638,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[10] { + if yyq2[10] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn114 { + if yyn33 { r.EncodeNil() } else { if x.PersistentVolumeClaim == nil { @@ -1552,18 +1653,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn115 bool + var yyn36 bool if x.VolumeSource.RBD == nil { - yyn115 = true - goto LABEL115 + yyn36 = true + goto LABEL36 } - LABEL115: - if yyr101 || yy2arr101 { - if yyn115 { + LABEL36: + if yyr2 || yy2arr2 { + if yyn36 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[11] { + if yyq2[11] { if x.RBD == nil { r.EncodeNil() } else { @@ -1574,11 +1675,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[11] { + if yyq2[11] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("rbd")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn115 { + if yyn36 { r.EncodeNil() } else { if x.RBD == nil { @@ -1589,18 +1690,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn116 bool + var yyn39 bool if x.VolumeSource.FlexVolume == nil { - yyn116 = true - goto LABEL116 + yyn39 = true + goto LABEL39 } - LABEL116: - if yyr101 || yy2arr101 { - if yyn116 { + LABEL39: + if yyr2 || yy2arr2 { + if yyn39 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[12] { + if yyq2[12] { if x.FlexVolume == nil { r.EncodeNil() } else { @@ -1611,11 +1712,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[12] { + if yyq2[12] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn116 { + if yyn39 { r.EncodeNil() } else { if x.FlexVolume == nil { @@ -1626,18 +1727,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn117 bool + var yyn42 bool if x.VolumeSource.Cinder == nil { - yyn117 = true - goto LABEL117 + yyn42 = true + goto LABEL42 } - LABEL117: - if yyr101 || yy2arr101 { - if yyn117 { + LABEL42: + if yyr2 || yy2arr2 { + if yyn42 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[13] { + if yyq2[13] { if x.Cinder == nil { r.EncodeNil() } else { @@ -1648,11 +1749,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[13] { + if yyq2[13] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("cinder")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn117 { + if yyn42 { r.EncodeNil() } else { if x.Cinder == nil { @@ -1663,18 +1764,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn118 bool + var yyn45 bool if x.VolumeSource.CephFS == nil { - yyn118 = true - goto LABEL118 + yyn45 = true + goto LABEL45 } - LABEL118: - if yyr101 || yy2arr101 { - if yyn118 { + LABEL45: + if yyr2 || yy2arr2 { + if yyn45 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[14] { + if yyq2[14] { if x.CephFS == nil { r.EncodeNil() } else { @@ -1685,11 +1786,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[14] { + if yyq2[14] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("cephfs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn118 { + if yyn45 { r.EncodeNil() } else { if x.CephFS == nil { @@ -1700,18 +1801,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn119 bool + var yyn48 bool if x.VolumeSource.Flocker == nil { - yyn119 = true - goto LABEL119 + yyn48 = true + goto LABEL48 } - LABEL119: - if yyr101 || yy2arr101 { - if yyn119 { + LABEL48: + if yyr2 || yy2arr2 { + if yyn48 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[15] { + if yyq2[15] { if x.Flocker == nil { r.EncodeNil() } else { @@ -1722,11 +1823,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[15] { + if yyq2[15] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("flocker")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn119 { + if yyn48 { r.EncodeNil() } else { if x.Flocker == nil { @@ -1737,18 +1838,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn120 bool + var yyn51 bool if x.VolumeSource.DownwardAPI == nil { - yyn120 = true - goto LABEL120 + yyn51 = true + goto LABEL51 } - LABEL120: - if yyr101 || yy2arr101 { - if yyn120 { + LABEL51: + if yyr2 || yy2arr2 { + if yyn51 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[16] { + if yyq2[16] { if x.DownwardAPI == nil { r.EncodeNil() } else { @@ -1759,11 +1860,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[16] { + if yyq2[16] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn120 { + if yyn51 { r.EncodeNil() } else { if x.DownwardAPI == nil { @@ -1774,18 +1875,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn121 bool + var yyn54 bool if x.VolumeSource.FC == nil { - yyn121 = true - goto LABEL121 + yyn54 = true + goto LABEL54 } - LABEL121: - if yyr101 || yy2arr101 { - if yyn121 { + LABEL54: + if yyr2 || yy2arr2 { + if yyn54 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[17] { + if yyq2[17] { if x.FC == nil { r.EncodeNil() } else { @@ -1796,11 +1897,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[17] { + if yyq2[17] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fc")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn121 { + if yyn54 { r.EncodeNil() } else { if x.FC == nil { @@ -1811,18 +1912,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn122 bool + var yyn57 bool if x.VolumeSource.AzureFile == nil { - yyn122 = true - goto LABEL122 + yyn57 = true + goto LABEL57 } - LABEL122: - if yyr101 || yy2arr101 { - if yyn122 { + LABEL57: + if yyr2 || yy2arr2 { + if yyn57 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[18] { + if yyq2[18] { if x.AzureFile == nil { r.EncodeNil() } else { @@ -1833,11 +1934,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[18] { + if yyq2[18] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("azureFile")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn122 { + if yyn57 { r.EncodeNil() } else { if x.AzureFile == nil { @@ -1848,18 +1949,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn123 bool + var yyn60 bool if x.VolumeSource.ConfigMap == nil { - yyn123 = true - goto LABEL123 + yyn60 = true + goto LABEL60 } - LABEL123: - if yyr101 || yy2arr101 { - if yyn123 { + LABEL60: + if yyr2 || yy2arr2 { + if yyn60 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[19] { + if yyq2[19] { if x.ConfigMap == nil { r.EncodeNil() } else { @@ -1870,11 +1971,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[19] { + if yyq2[19] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("configMap")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn123 { + if yyn60 { r.EncodeNil() } else { if x.ConfigMap == nil { @@ -1885,18 +1986,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn124 bool + var yyn63 bool if x.VolumeSource.VsphereVolume == nil { - yyn124 = true - goto LABEL124 + yyn63 = true + goto LABEL63 } - LABEL124: - if yyr101 || yy2arr101 { - if yyn124 { + LABEL63: + if yyr2 || yy2arr2 { + if yyn63 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[20] { + if yyq2[20] { if x.VsphereVolume == nil { r.EncodeNil() } else { @@ -1907,11 +2008,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[20] { + if yyq2[20] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn124 { + if yyn63 { r.EncodeNil() } else { if x.VsphereVolume == nil { @@ -1922,18 +2023,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn125 bool + var yyn66 bool if x.VolumeSource.Quobyte == nil { - yyn125 = true - goto LABEL125 + yyn66 = true + goto LABEL66 } - LABEL125: - if yyr101 || yy2arr101 { - if yyn125 { + LABEL66: + if yyr2 || yy2arr2 { + if yyn66 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[21] { + if yyq2[21] { if x.Quobyte == nil { r.EncodeNil() } else { @@ -1944,11 +2045,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[21] { + if yyq2[21] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("quobyte")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn125 { + if yyn66 { r.EncodeNil() } else { if x.Quobyte == nil { @@ -1959,18 +2060,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn126 bool + var yyn69 bool if x.VolumeSource.AzureDisk == nil { - yyn126 = true - goto LABEL126 + yyn69 = true + goto LABEL69 } - LABEL126: - if yyr101 || yy2arr101 { - if yyn126 { + LABEL69: + if yyr2 || yy2arr2 { + if yyn69 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[22] { + if yyq2[22] { if x.AzureDisk == nil { r.EncodeNil() } else { @@ -1981,11 +2082,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[22] { + if yyq2[22] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn126 { + if yyn69 { r.EncodeNil() } else { if x.AzureDisk == nil { @@ -1996,18 +2097,18 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn127 bool + var yyn72 bool if x.VolumeSource.PhotonPersistentDisk == nil { - yyn127 = true - goto LABEL127 + yyn72 = true + goto LABEL72 } - LABEL127: - if yyr101 || yy2arr101 { - if yyn127 { + LABEL72: + if yyr2 || yy2arr2 { + if yyn72 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq101[23] { + if yyq2[23] { if x.PhotonPersistentDisk == nil { r.EncodeNil() } else { @@ -2018,11 +2119,11 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq101[23] { + if yyq2[23] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn127 { + if yyn72 { r.EncodeNil() } else { if x.PhotonPersistentDisk == nil { @@ -2033,7 +2134,118 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr101 || yy2arr101 { + var yyn75 bool + if x.VolumeSource.Projected == nil { + yyn75 = true + goto LABEL75 + } + LABEL75: + if yyr2 || yy2arr2 { + if yyn75 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[24] { + if x.Projected == nil { + r.EncodeNil() + } else { + x.Projected.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[24] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("projected")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn75 { + r.EncodeNil() + } else { + if x.Projected == nil { + r.EncodeNil() + } else { + x.Projected.CodecEncodeSelf(e) + } + } + } + } + var yyn78 bool + if x.VolumeSource.PortworxVolume == nil { + yyn78 = true + goto LABEL78 + } + LABEL78: + if yyr2 || yy2arr2 { + if yyn78 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[25] { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[25] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn78 { + r.EncodeNil() + } else { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn81 bool + if x.VolumeSource.ScaleIO == nil { + yyn81 = true + goto LABEL81 + } + LABEL81: + if yyr2 || yy2arr2 { + if yyn81 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[26] { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[26] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn81 { + r.EncodeNil() + } else { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } + } + } + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -2046,25 +2258,25 @@ func (x *Volume) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym128 := z.DecBinary() - _ = yym128 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct129 := r.ContainerType() - if yyct129 == codecSelferValueTypeMap1234 { - yyl129 := r.ReadMapStart() - if yyl129 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl129, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct129 == codecSelferValueTypeArray1234 { - yyl129 := r.ReadArrayStart() - if yyl129 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl129, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -2076,12 +2288,12 @@ func (x *Volume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys130Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys130Slc - var yyhl130 bool = l >= 0 - for yyj130 := 0; ; yyj130++ { - if yyhl130 { - if yyj130 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -2090,15 +2302,21 @@ func (x *Volume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys130Slc = r.DecodeBytes(yys130Slc, true, true) - yys130 := string(yys130Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys130 { + switch yys3 { case "name": if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "hostPath": if x.VolumeSource.HostPath == nil { @@ -2422,10 +2640,52 @@ func (x *Volume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } x.PhotonPersistentDisk.CodecDecodeSelf(d) } + case "projected": + if x.VolumeSource.Projected == nil { + x.VolumeSource.Projected = new(ProjectedVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Projected != nil { + x.Projected = nil + } + } else { + if x.Projected == nil { + x.Projected = new(ProjectedVolumeSource) + } + x.Projected.CodecDecodeSelf(d) + } + case "portworxVolume": + if x.VolumeSource.PortworxVolume == nil { + x.VolumeSource.PortworxVolume = new(PortworxVolumeSource) + } + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + case "scaleIO": + if x.VolumeSource.ScaleIO == nil { + x.VolumeSource.ScaleIO = new(ScaleIOVolumeSource) + } + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } default: - z.DecStructFieldNotFound(-1, yys130) - } // end switch yys130 - } // end for yyj130 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -2433,16 +2693,16 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj155 int - var yyb155 bool - var yyhl155 bool = l >= 0 - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + var yyj32 int + var yyb32 bool + var yyhl32 bool = l >= 0 + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2450,18 +2710,24 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv33 := &x.Name + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*string)(yyv33)) = r.DecodeString() + } } if x.VolumeSource.HostPath == nil { x.VolumeSource.HostPath = new(HostPathVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2479,13 +2745,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.EmptyDir == nil { x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2503,13 +2769,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.GCEPersistentDisk == nil { x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2527,13 +2793,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.AWSElasticBlockStore == nil { x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2551,13 +2817,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.GitRepo == nil { x.VolumeSource.GitRepo = new(GitRepoVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2575,13 +2841,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.Secret == nil { x.VolumeSource.Secret = new(SecretVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2599,13 +2865,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.NFS == nil { x.VolumeSource.NFS = new(NFSVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2623,13 +2889,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.ISCSI == nil { x.VolumeSource.ISCSI = new(ISCSIVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2647,13 +2913,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.Glusterfs == nil { x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2671,13 +2937,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.PersistentVolumeClaim == nil { x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2695,13 +2961,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.RBD == nil { x.VolumeSource.RBD = new(RBDVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2719,13 +2985,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.FlexVolume == nil { x.VolumeSource.FlexVolume = new(FlexVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2743,13 +3009,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.Cinder == nil { x.VolumeSource.Cinder = new(CinderVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2767,13 +3033,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.CephFS == nil { x.VolumeSource.CephFS = new(CephFSVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2791,13 +3057,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.Flocker == nil { x.VolumeSource.Flocker = new(FlockerVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2815,13 +3081,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.DownwardAPI == nil { x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2839,13 +3105,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.FC == nil { x.VolumeSource.FC = new(FCVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2863,13 +3129,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.AzureFile == nil { x.VolumeSource.AzureFile = new(AzureFileVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2887,13 +3153,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.ConfigMap == nil { x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2911,13 +3177,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.VsphereVolume == nil { x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2935,13 +3201,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.Quobyte == nil { x.VolumeSource.Quobyte = new(QuobyteVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2959,13 +3225,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.AzureDisk == nil { x.VolumeSource.AzureDisk = new(AzureDiskVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2983,13 +3249,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.PhotonPersistentDisk == nil { x.VolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) } - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3004,18 +3270,90 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.PhotonPersistentDisk.CodecDecodeSelf(d) } + if x.VolumeSource.Projected == nil { + x.VolumeSource.Projected = new(ProjectedVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Projected != nil { + x.Projected = nil + } + } else { + if x.Projected == nil { + x.Projected = new(ProjectedVolumeSource) + } + x.Projected.CodecDecodeSelf(d) + } + if x.VolumeSource.PortworxVolume == nil { + x.VolumeSource.PortworxVolume = new(PortworxVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + if x.VolumeSource.ScaleIO == nil { + x.VolumeSource.ScaleIO = new(ScaleIOVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } for { - yyj155++ - if yyhl155 { - yyb155 = yyj155 > l + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l } else { - yyb155 = r.CheckBreak() + yyb32 = r.CheckBreak() } - if yyb155 { + if yyb32 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj155-1, "") + z.DecStructFieldNotFound(yyj32-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -3027,55 +3365,58 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym180 := z.EncBinary() - _ = yym180 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep181 := !z.EncBinary() - yy2arr181 := z.EncBasicHandle().StructToArray - var yyq181 [23]bool - _, _, _ = yysep181, yyq181, yy2arr181 - const yyr181 bool = false - yyq181[0] = x.HostPath != nil - yyq181[1] = x.EmptyDir != nil - yyq181[2] = x.GCEPersistentDisk != nil - yyq181[3] = x.AWSElasticBlockStore != nil - yyq181[4] = x.GitRepo != nil - yyq181[5] = x.Secret != nil - yyq181[6] = x.NFS != nil - yyq181[7] = x.ISCSI != nil - yyq181[8] = x.Glusterfs != nil - yyq181[9] = x.PersistentVolumeClaim != nil - yyq181[10] = x.RBD != nil - yyq181[11] = x.FlexVolume != nil - yyq181[12] = x.Cinder != nil - yyq181[13] = x.CephFS != nil - yyq181[14] = x.Flocker != nil - yyq181[15] = x.DownwardAPI != nil - yyq181[16] = x.FC != nil - yyq181[17] = x.AzureFile != nil - yyq181[18] = x.ConfigMap != nil - yyq181[19] = x.VsphereVolume != nil - yyq181[20] = x.Quobyte != nil - yyq181[21] = x.AzureDisk != nil - yyq181[22] = x.PhotonPersistentDisk != nil - var yynn181 int - if yyr181 || yy2arr181 { - r.EncodeArrayStart(23) - } else { - yynn181 = 0 - for _, b := range yyq181 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [26]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.HostPath != nil + yyq2[1] = x.EmptyDir != nil + yyq2[2] = x.GCEPersistentDisk != nil + yyq2[3] = x.AWSElasticBlockStore != nil + yyq2[4] = x.GitRepo != nil + yyq2[5] = x.Secret != nil + yyq2[6] = x.NFS != nil + yyq2[7] = x.ISCSI != nil + yyq2[8] = x.Glusterfs != nil + yyq2[9] = x.PersistentVolumeClaim != nil + yyq2[10] = x.RBD != nil + yyq2[11] = x.FlexVolume != nil + yyq2[12] = x.Cinder != nil + yyq2[13] = x.CephFS != nil + yyq2[14] = x.Flocker != nil + yyq2[15] = x.DownwardAPI != nil + yyq2[16] = x.FC != nil + yyq2[17] = x.AzureFile != nil + yyq2[18] = x.ConfigMap != nil + yyq2[19] = x.VsphereVolume != nil + yyq2[20] = x.Quobyte != nil + yyq2[21] = x.AzureDisk != nil + yyq2[22] = x.PhotonPersistentDisk != nil + yyq2[23] = x.Projected != nil + yyq2[24] = x.PortworxVolume != nil + yyq2[25] = x.ScaleIO != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(26) + } else { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn181++ + yynn2++ } } - r.EncodeMapStart(yynn181) - yynn181 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[0] { + if yyq2[0] { if x.HostPath == nil { r.EncodeNil() } else { @@ -3085,7 +3426,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hostPath")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3096,9 +3437,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[1] { + if yyq2[1] { if x.EmptyDir == nil { r.EncodeNil() } else { @@ -3108,7 +3449,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("emptyDir")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3119,9 +3460,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[2] { + if yyq2[2] { if x.GCEPersistentDisk == nil { r.EncodeNil() } else { @@ -3131,7 +3472,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3142,9 +3483,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[3] { + if yyq2[3] { if x.AWSElasticBlockStore == nil { r.EncodeNil() } else { @@ -3154,7 +3495,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3165,9 +3506,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[4] { + if yyq2[4] { if x.GitRepo == nil { r.EncodeNil() } else { @@ -3177,7 +3518,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("gitRepo")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3188,9 +3529,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[5] { + if yyq2[5] { if x.Secret == nil { r.EncodeNil() } else { @@ -3200,7 +3541,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("secret")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3211,9 +3552,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[6] { + if yyq2[6] { if x.NFS == nil { r.EncodeNil() } else { @@ -3223,7 +3564,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[6] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nfs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3234,9 +3575,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[7] { + if yyq2[7] { if x.ISCSI == nil { r.EncodeNil() } else { @@ -3246,7 +3587,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[7] { + if yyq2[7] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("iscsi")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3257,9 +3598,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[8] { + if yyq2[8] { if x.Glusterfs == nil { r.EncodeNil() } else { @@ -3269,7 +3610,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[8] { + if yyq2[8] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3280,9 +3621,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[9] { + if yyq2[9] { if x.PersistentVolumeClaim == nil { r.EncodeNil() } else { @@ -3292,7 +3633,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[9] { + if yyq2[9] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3303,9 +3644,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[10] { + if yyq2[10] { if x.RBD == nil { r.EncodeNil() } else { @@ -3315,7 +3656,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[10] { + if yyq2[10] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("rbd")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3326,9 +3667,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[11] { + if yyq2[11] { if x.FlexVolume == nil { r.EncodeNil() } else { @@ -3338,7 +3679,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[11] { + if yyq2[11] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3349,9 +3690,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[12] { + if yyq2[12] { if x.Cinder == nil { r.EncodeNil() } else { @@ -3361,7 +3702,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[12] { + if yyq2[12] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("cinder")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3372,9 +3713,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[13] { + if yyq2[13] { if x.CephFS == nil { r.EncodeNil() } else { @@ -3384,7 +3725,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[13] { + if yyq2[13] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("cephfs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3395,9 +3736,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[14] { + if yyq2[14] { if x.Flocker == nil { r.EncodeNil() } else { @@ -3407,7 +3748,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[14] { + if yyq2[14] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("flocker")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3418,9 +3759,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[15] { + if yyq2[15] { if x.DownwardAPI == nil { r.EncodeNil() } else { @@ -3430,7 +3771,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[15] { + if yyq2[15] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3441,9 +3782,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[16] { + if yyq2[16] { if x.FC == nil { r.EncodeNil() } else { @@ -3453,7 +3794,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[16] { + if yyq2[16] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fc")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3464,9 +3805,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[17] { + if yyq2[17] { if x.AzureFile == nil { r.EncodeNil() } else { @@ -3476,7 +3817,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[17] { + if yyq2[17] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("azureFile")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3487,9 +3828,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[18] { + if yyq2[18] { if x.ConfigMap == nil { r.EncodeNil() } else { @@ -3499,7 +3840,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[18] { + if yyq2[18] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("configMap")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3510,9 +3851,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[19] { + if yyq2[19] { if x.VsphereVolume == nil { r.EncodeNil() } else { @@ -3522,7 +3863,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[19] { + if yyq2[19] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3533,9 +3874,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[20] { + if yyq2[20] { if x.Quobyte == nil { r.EncodeNil() } else { @@ -3545,7 +3886,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[20] { + if yyq2[20] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("quobyte")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3556,9 +3897,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[21] { + if yyq2[21] { if x.AzureDisk == nil { r.EncodeNil() } else { @@ -3568,7 +3909,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[21] { + if yyq2[21] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3579,9 +3920,9 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq181[22] { + if yyq2[22] { if x.PhotonPersistentDisk == nil { r.EncodeNil() } else { @@ -3591,7 +3932,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq181[22] { + if yyq2[22] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -3602,7 +3943,76 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr181 || yy2arr181 { + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[23] { + if x.Projected == nil { + r.EncodeNil() + } else { + x.Projected.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[23] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("projected")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Projected == nil { + r.EncodeNil() + } else { + x.Projected.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[24] { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[24] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[25] { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[25] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -3615,25 +4025,25 @@ func (x *VolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym205 := z.DecBinary() - _ = yym205 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct206 := r.ContainerType() - if yyct206 == codecSelferValueTypeMap1234 { - yyl206 := r.ReadMapStart() - if yyl206 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl206, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct206 == codecSelferValueTypeArray1234 { - yyl206 := r.ReadArrayStart() - if yyl206 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl206, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -3645,12 +4055,12 @@ func (x *VolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys207Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys207Slc - var yyhl207 bool = l >= 0 - for yyj207 := 0; ; yyj207++ { - if yyhl207 { - if yyj207 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -3659,10 +4069,10 @@ func (x *VolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys207Slc = r.DecodeBytes(yys207Slc, true, true) - yys207 := string(yys207Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys207 { + switch yys3 { case "hostPath": if r.TryDecodeAsNil() { if x.HostPath != nil { @@ -3916,10 +4326,43 @@ func (x *VolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } x.PhotonPersistentDisk.CodecDecodeSelf(d) } + case "projected": + if r.TryDecodeAsNil() { + if x.Projected != nil { + x.Projected = nil + } + } else { + if x.Projected == nil { + x.Projected = new(ProjectedVolumeSource) + } + x.Projected.CodecDecodeSelf(d) + } + case "portworxVolume": + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + case "scaleIO": + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } default: - z.DecStructFieldNotFound(-1, yys207) - } // end switch yys207 - } // end for yyj207 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -3927,16 +4370,16 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj231 int - var yyb231 bool - var yyhl231 bool = l >= 0 - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + var yyj30 int + var yyb30 bool + var yyhl30 bool = l >= 0 + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3951,13 +4394,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.HostPath.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3972,13 +4415,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.EmptyDir.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3993,13 +4436,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.GCEPersistentDisk.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4014,13 +4457,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.AWSElasticBlockStore.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4035,13 +4478,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.GitRepo.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4056,13 +4499,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Secret.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4077,13 +4520,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.NFS.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4098,13 +4541,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.ISCSI.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4119,13 +4562,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Glusterfs.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4140,13 +4583,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.PersistentVolumeClaim.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4161,13 +4604,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.RBD.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4182,13 +4625,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.FlexVolume.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4203,13 +4646,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Cinder.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4224,13 +4667,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.CephFS.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4245,13 +4688,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Flocker.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4266,13 +4709,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.DownwardAPI.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4287,13 +4730,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.FC.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4308,13 +4751,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.AzureFile.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4329,13 +4772,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.ConfigMap.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4350,13 +4793,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.VsphereVolume.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4371,13 +4814,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Quobyte.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4392,13 +4835,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.AzureDisk.CodecDecodeSelf(d) } - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4413,18 +4856,81 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.PhotonPersistentDisk.CodecDecodeSelf(d) } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Projected != nil { + x.Projected = nil + } + } else { + if x.Projected == nil { + x.Projected = new(ProjectedVolumeSource) + } + x.Projected.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } for { - yyj231++ - if yyhl231 { - yyb231 = yyj231 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb231 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb231 { + if yyb30 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj231-1, "") + z.DecStructFieldNotFound(yyj30-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -4436,34 +4942,34 @@ func (x *PersistentVolumeClaimVolumeSource) CodecEncodeSelf(e *codec1978.Encoder if x == nil { r.EncodeNil() } else { - yym255 := z.EncBinary() - _ = yym255 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep256 := !z.EncBinary() - yy2arr256 := z.EncBasicHandle().StructToArray - var yyq256 [2]bool - _, _, _ = yysep256, yyq256, yy2arr256 - const yyr256 bool = false - yyq256[1] = x.ReadOnly != false - var yynn256 int - if yyr256 || yy2arr256 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn256 = 1 - for _, b := range yyq256 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn256++ + yynn2++ } } - r.EncodeMapStart(yynn256) - yynn256 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr256 || yy2arr256 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym258 := z.EncBinary() - _ = yym258 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName)) @@ -4472,18 +4978,18 @@ func (x *PersistentVolumeClaimVolumeSource) CodecEncodeSelf(e *codec1978.Encoder z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("claimName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym259 := z.EncBinary() - _ = yym259 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName)) } } - if yyr256 || yy2arr256 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq256[1] { - yym261 := z.EncBinary() - _ = yym261 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeBool(bool(x.ReadOnly)) @@ -4492,19 +4998,19 @@ func (x *PersistentVolumeClaimVolumeSource) CodecEncodeSelf(e *codec1978.Encoder r.EncodeBool(false) } } else { - if yyq256[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("readOnly")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym262 := z.EncBinary() - _ = yym262 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeBool(bool(x.ReadOnly)) } } } - if yyr256 || yy2arr256 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -4517,25 +5023,25 @@ func (x *PersistentVolumeClaimVolumeSource) CodecDecodeSelf(d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym263 := z.DecBinary() - _ = yym263 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct264 := r.ContainerType() - if yyct264 == codecSelferValueTypeMap1234 { - yyl264 := r.ReadMapStart() - if yyl264 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl264, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct264 == codecSelferValueTypeArray1234 { - yyl264 := r.ReadArrayStart() - if yyl264 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl264, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -4547,12 +5053,12 @@ func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromMap(l int, d *cod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys265Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys265Slc - var yyhl265 bool = l >= 0 - for yyj265 := 0; ; yyj265++ { - if yyhl265 { - if yyj265 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -4561,26 +5067,38 @@ func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromMap(l int, d *cod } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys265Slc = r.DecodeBytes(yys265Slc, true, true) - yys265 := string(yys265Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys265 { + switch yys3 { case "claimName": if r.TryDecodeAsNil() { x.ClaimName = "" } else { - x.ClaimName = string(r.DecodeString()) + yyv4 := &x.ClaimName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "readOnly": if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv6 := &x.ReadOnly + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } } default: - z.DecStructFieldNotFound(-1, yys265) - } // end switch yys265 - } // end for yyj265 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -4588,16 +5106,16 @@ func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromArray(l int, d *c var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj268 int - var yyb268 bool - var yyhl268 bool = l >= 0 - yyj268++ - if yyhl268 { - yyb268 = yyj268 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb268 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb268 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4605,15 +5123,21 @@ func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromArray(l int, d *c if r.TryDecodeAsNil() { x.ClaimName = "" } else { - x.ClaimName = string(r.DecodeString()) + yyv9 := &x.ClaimName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } - yyj268++ - if yyhl268 { - yyb268 = yyj268 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb268 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb268 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4621,20 +5145,26 @@ func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromArray(l int, d *c if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv11 := &x.ReadOnly + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } } for { - yyj268++ - if yyhl268 { - yyb268 = yyj268 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb268 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb268 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj268-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -4646,49 +5176,51 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym271 := z.EncBinary() - _ = yym271 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep272 := !z.EncBinary() - yy2arr272 := z.EncBasicHandle().StructToArray - var yyq272 [17]bool - _, _, _ = yysep272, yyq272, yy2arr272 - const yyr272 bool = false - yyq272[0] = x.GCEPersistentDisk != nil - yyq272[1] = x.AWSElasticBlockStore != nil - yyq272[2] = x.HostPath != nil - yyq272[3] = x.Glusterfs != nil - yyq272[4] = x.NFS != nil - yyq272[5] = x.RBD != nil - yyq272[6] = x.ISCSI != nil - yyq272[7] = x.Cinder != nil - yyq272[8] = x.CephFS != nil - yyq272[9] = x.FC != nil - yyq272[10] = x.Flocker != nil - yyq272[11] = x.FlexVolume != nil - yyq272[12] = x.AzureFile != nil - yyq272[13] = x.VsphereVolume != nil - yyq272[14] = x.Quobyte != nil - yyq272[15] = x.AzureDisk != nil - yyq272[16] = x.PhotonPersistentDisk != nil - var yynn272 int - if yyr272 || yy2arr272 { - r.EncodeArrayStart(17) - } else { - yynn272 = 0 - for _, b := range yyq272 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [19]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.GCEPersistentDisk != nil + yyq2[1] = x.AWSElasticBlockStore != nil + yyq2[2] = x.HostPath != nil + yyq2[3] = x.Glusterfs != nil + yyq2[4] = x.NFS != nil + yyq2[5] = x.RBD != nil + yyq2[6] = x.ISCSI != nil + yyq2[7] = x.Cinder != nil + yyq2[8] = x.CephFS != nil + yyq2[9] = x.FC != nil + yyq2[10] = x.Flocker != nil + yyq2[11] = x.FlexVolume != nil + yyq2[12] = x.AzureFile != nil + yyq2[13] = x.VsphereVolume != nil + yyq2[14] = x.Quobyte != nil + yyq2[15] = x.AzureDisk != nil + yyq2[16] = x.PhotonPersistentDisk != nil + yyq2[17] = x.PortworxVolume != nil + yyq2[18] = x.ScaleIO != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(19) + } else { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn272++ + yynn2++ } } - r.EncodeMapStart(yynn272) - yynn272 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr272 || yy2arr272 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq272[0] { + if yyq2[0] { if x.GCEPersistentDisk == nil { r.EncodeNil() } else { @@ -4698,7 +5230,7 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq272[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -4709,9 +5241,9 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr272 || yy2arr272 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq272[1] { + if yyq2[1] { if x.AWSElasticBlockStore == nil { r.EncodeNil() } else { @@ -4721,7 +5253,7 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq272[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -4732,9 +5264,9 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr272 || yy2arr272 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq272[2] { + if yyq2[2] { if x.HostPath == nil { r.EncodeNil() } else { @@ -4744,7 +5276,7 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq272[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hostPath")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -4755,9 +5287,9 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr272 || yy2arr272 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq272[3] { + if yyq2[3] { if x.Glusterfs == nil { r.EncodeNil() } else { @@ -4767,7 +5299,7 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq272[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -4778,9 +5310,9 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr272 || yy2arr272 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq272[4] { + if yyq2[4] { if x.NFS == nil { r.EncodeNil() } else { @@ -4790,7 +5322,7 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq272[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nfs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -4801,9 +5333,9 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr272 || yy2arr272 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq272[5] { + if yyq2[5] { if x.RBD == nil { r.EncodeNil() } else { @@ -4813,7 +5345,7 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq272[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("rbd")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -4824,9 +5356,9 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr272 || yy2arr272 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq272[6] { + if yyq2[6] { if x.ISCSI == nil { r.EncodeNil() } else { @@ -4836,7 +5368,7 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq272[6] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("iscsi")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -4847,9 +5379,9 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr272 || yy2arr272 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq272[7] { + if yyq2[7] { if x.Cinder == nil { r.EncodeNil() } else { @@ -4859,7 +5391,7 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq272[7] { + if yyq2[7] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("cinder")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -4870,9 +5402,9 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr272 || yy2arr272 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq272[8] { + if yyq2[8] { if x.CephFS == nil { r.EncodeNil() } else { @@ -4882,7 +5414,7 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq272[8] { + if yyq2[8] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("cephfs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -4893,9 +5425,9 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr272 || yy2arr272 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq272[9] { + if yyq2[9] { if x.FC == nil { r.EncodeNil() } else { @@ -4905,7 +5437,7 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq272[9] { + if yyq2[9] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fc")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -4916,9 +5448,9 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr272 || yy2arr272 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq272[10] { + if yyq2[10] { if x.Flocker == nil { r.EncodeNil() } else { @@ -4928,7 +5460,7 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq272[10] { + if yyq2[10] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("flocker")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -4939,9 +5471,9 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr272 || yy2arr272 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq272[11] { + if yyq2[11] { if x.FlexVolume == nil { r.EncodeNil() } else { @@ -4951,7 +5483,7 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq272[11] { + if yyq2[11] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -4962,9 +5494,9 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr272 || yy2arr272 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq272[12] { + if yyq2[12] { if x.AzureFile == nil { r.EncodeNil() } else { @@ -4974,7 +5506,7 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq272[12] { + if yyq2[12] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("azureFile")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -4985,9 +5517,9 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr272 || yy2arr272 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq272[13] { + if yyq2[13] { if x.VsphereVolume == nil { r.EncodeNil() } else { @@ -4997,7 +5529,7 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq272[13] { + if yyq2[13] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -5008,9 +5540,9 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr272 || yy2arr272 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq272[14] { + if yyq2[14] { if x.Quobyte == nil { r.EncodeNil() } else { @@ -5020,7 +5552,7 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq272[14] { + if yyq2[14] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("quobyte")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -5031,9 +5563,9 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr272 || yy2arr272 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq272[15] { + if yyq2[15] { if x.AzureDisk == nil { r.EncodeNil() } else { @@ -5043,7 +5575,7 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq272[15] { + if yyq2[15] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -5054,9 +5586,9 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr272 || yy2arr272 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq272[16] { + if yyq2[16] { if x.PhotonPersistentDisk == nil { r.EncodeNil() } else { @@ -5066,7 +5598,7 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq272[16] { + if yyq2[16] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -5077,7 +5609,53 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr272 || yy2arr272 { + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -5090,25 +5668,25 @@ func (x *PersistentVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym290 := z.DecBinary() - _ = yym290 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct291 := r.ContainerType() - if yyct291 == codecSelferValueTypeMap1234 { - yyl291 := r.ReadMapStart() - if yyl291 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl291, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct291 == codecSelferValueTypeArray1234 { - yyl291 := r.ReadArrayStart() - if yyl291 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl291, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -5120,12 +5698,12 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Deco var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys292Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys292Slc - var yyhl292 bool = l >= 0 - for yyj292 := 0; ; yyj292++ { - if yyhl292 { - if yyj292 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -5134,10 +5712,10 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Deco } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys292Slc = r.DecodeBytes(yys292Slc, true, true) - yys292 := string(yys292Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys292 { + switch yys3 { case "gcePersistentDisk": if r.TryDecodeAsNil() { if x.GCEPersistentDisk != nil { @@ -5325,10 +5903,32 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Deco } x.PhotonPersistentDisk.CodecDecodeSelf(d) } + case "portworxVolume": + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + case "scaleIO": + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } default: - z.DecStructFieldNotFound(-1, yys292) - } // end switch yys292 - } // end for yyj292 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -5336,16 +5936,16 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj310 int - var yyb310 bool - var yyhl310 bool = l >= 0 - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + var yyj23 int + var yyb23 bool + var yyhl23 bool = l >= 0 + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb310 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb310 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5360,13 +5960,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.GCEPersistentDisk.CodecDecodeSelf(d) } - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb310 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb310 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5381,13 +5981,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.AWSElasticBlockStore.CodecDecodeSelf(d) } - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb310 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb310 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5402,13 +6002,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.HostPath.CodecDecodeSelf(d) } - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb310 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb310 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5423,13 +6023,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.Glusterfs.CodecDecodeSelf(d) } - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb310 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb310 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5444,13 +6044,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.NFS.CodecDecodeSelf(d) } - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb310 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb310 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5465,13 +6065,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.RBD.CodecDecodeSelf(d) } - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb310 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb310 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5486,13 +6086,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.ISCSI.CodecDecodeSelf(d) } - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb310 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb310 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5507,13 +6107,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.Cinder.CodecDecodeSelf(d) } - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb310 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb310 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5528,13 +6128,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.CephFS.CodecDecodeSelf(d) } - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb310 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb310 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5549,13 +6149,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.FC.CodecDecodeSelf(d) } - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb310 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb310 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5570,13 +6170,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.Flocker.CodecDecodeSelf(d) } - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb310 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb310 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5591,13 +6191,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.FlexVolume.CodecDecodeSelf(d) } - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb310 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb310 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5612,13 +6212,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.AzureFile.CodecDecodeSelf(d) } - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb310 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb310 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5633,13 +6233,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.VsphereVolume.CodecDecodeSelf(d) } - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb310 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb310 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5654,13 +6254,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.Quobyte.CodecDecodeSelf(d) } - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb310 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb310 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5675,13 +6275,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.AzureDisk.CodecDecodeSelf(d) } - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb310 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb310 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5696,18 +6296,60 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.PhotonPersistentDisk.CodecDecodeSelf(d) } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } for { - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb310 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb310 { + if yyb23 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj310-1, "") + z.DecStructFieldNotFound(yyj23-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -5719,39 +6361,39 @@ func (x *PersistentVolume) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym328 := z.EncBinary() - _ = yym328 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep329 := !z.EncBinary() - yy2arr329 := z.EncBasicHandle().StructToArray - var yyq329 [5]bool - _, _, _ = yysep329, yyq329, yy2arr329 - const yyr329 bool = false - yyq329[0] = x.Kind != "" - yyq329[1] = x.APIVersion != "" - yyq329[2] = true - yyq329[3] = true - yyq329[4] = true - var yynn329 int - if yyr329 || yy2arr329 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn329 = 0 - for _, b := range yyq329 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn329++ + yynn2++ } } - r.EncodeMapStart(yynn329) - yynn329 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr329 || yy2arr329 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq329[0] { - yym331 := z.EncBinary() - _ = yym331 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -5760,23 +6402,23 @@ func (x *PersistentVolume) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq329[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym332 := z.EncBinary() - _ = yym332 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr329 || yy2arr329 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq329[1] { - yym334 := z.EncBinary() - _ = yym334 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -5785,70 +6427,82 @@ func (x *PersistentVolume) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq329[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym335 := z.EncBinary() - _ = yym335 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr329 || yy2arr329 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq329[2] { - yy337 := &x.ObjectMeta - yy337.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq329[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy338 := &x.ObjectMeta - yy338.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr329 || yy2arr329 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq329[3] { - yy340 := &x.Spec - yy340.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq329[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy341 := &x.Spec - yy341.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr329 || yy2arr329 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq329[4] { - yy343 := &x.Status - yy343.CodecEncodeSelf(e) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq329[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy344 := &x.Status - yy344.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } - if yyr329 || yy2arr329 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -5861,25 +6515,25 @@ func (x *PersistentVolume) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym345 := z.DecBinary() - _ = yym345 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct346 := r.ContainerType() - if yyct346 == codecSelferValueTypeMap1234 { - yyl346 := r.ReadMapStart() - if yyl346 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl346, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct346 == codecSelferValueTypeArray1234 { - yyl346 := r.ReadArrayStart() - if yyl346 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl346, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -5891,12 +6545,12 @@ func (x *PersistentVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys347Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys347Slc - var yyhl347 bool = l >= 0 - for yyj347 := 0; ; yyj347++ { - if yyhl347 { - if yyj347 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -5905,47 +6559,65 @@ func (x *PersistentVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys347Slc = r.DecodeBytes(yys347Slc, true, true) - yys347 := string(yys347Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys347 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv350 := &x.ObjectMeta - yyv350.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = PersistentVolumeSpec{} } else { - yyv351 := &x.Spec - yyv351.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = PersistentVolumeStatus{} } else { - yyv352 := &x.Status - yyv352.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys347) - } // end switch yys347 - } // end for yyj347 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -5953,16 +6625,16 @@ func (x *PersistentVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj353 int - var yyb353 bool - var yyhl353 bool = l >= 0 - yyj353++ - if yyhl353 { - yyb353 = yyj353 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb353 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb353 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5970,15 +6642,21 @@ func (x *PersistentVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj353++ - if yyhl353 { - yyb353 = yyj353 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb353 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb353 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5986,32 +6664,44 @@ func (x *PersistentVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj353++ - if yyhl353 { - yyb353 = yyj353 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb353 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb353 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv356 := &x.ObjectMeta - yyv356.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj353++ - if yyhl353 { - yyb353 = yyj353 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb353 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb353 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6019,16 +6709,16 @@ func (x *PersistentVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Spec = PersistentVolumeSpec{} } else { - yyv357 := &x.Spec - yyv357.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj353++ - if yyhl353 { - yyb353 = yyj353 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb353 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb353 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6036,21 +6726,21 @@ func (x *PersistentVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Status = PersistentVolumeStatus{} } else { - yyv358 := &x.Status - yyv358.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj353++ - if yyhl353 { - yyb353 = yyj353 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb353 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb353 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj353-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -6062,53 +6752,56 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym359 := z.EncBinary() - _ = yym359 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep360 := !z.EncBinary() - yy2arr360 := z.EncBasicHandle().StructToArray - var yyq360 [21]bool - _, _, _ = yysep360, yyq360, yy2arr360 - const yyr360 bool = false - yyq360[0] = len(x.Capacity) != 0 - yyq360[1] = x.PersistentVolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil - yyq360[2] = x.PersistentVolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil - yyq360[3] = x.PersistentVolumeSource.HostPath != nil && x.HostPath != nil - yyq360[4] = x.PersistentVolumeSource.Glusterfs != nil && x.Glusterfs != nil - yyq360[5] = x.PersistentVolumeSource.NFS != nil && x.NFS != nil - yyq360[6] = x.PersistentVolumeSource.RBD != nil && x.RBD != nil - yyq360[7] = x.PersistentVolumeSource.ISCSI != nil && x.ISCSI != nil - yyq360[8] = x.PersistentVolumeSource.Cinder != nil && x.Cinder != nil - yyq360[9] = x.PersistentVolumeSource.CephFS != nil && x.CephFS != nil - yyq360[10] = x.PersistentVolumeSource.FC != nil && x.FC != nil - yyq360[11] = x.PersistentVolumeSource.Flocker != nil && x.Flocker != nil - yyq360[12] = x.PersistentVolumeSource.FlexVolume != nil && x.FlexVolume != nil - yyq360[13] = x.PersistentVolumeSource.AzureFile != nil && x.AzureFile != nil - yyq360[14] = x.PersistentVolumeSource.VsphereVolume != nil && x.VsphereVolume != nil - yyq360[15] = x.PersistentVolumeSource.Quobyte != nil && x.Quobyte != nil - yyq360[16] = x.PersistentVolumeSource.AzureDisk != nil && x.AzureDisk != nil - yyq360[17] = x.PersistentVolumeSource.PhotonPersistentDisk != nil && x.PhotonPersistentDisk != nil - yyq360[18] = len(x.AccessModes) != 0 - yyq360[19] = x.ClaimRef != nil - yyq360[20] = x.PersistentVolumeReclaimPolicy != "" - var yynn360 int - if yyr360 || yy2arr360 { - r.EncodeArrayStart(21) - } else { - yynn360 = 0 - for _, b := range yyq360 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [24]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Capacity) != 0 + yyq2[1] = x.PersistentVolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil + yyq2[2] = x.PersistentVolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil + yyq2[3] = x.PersistentVolumeSource.HostPath != nil && x.HostPath != nil + yyq2[4] = x.PersistentVolumeSource.Glusterfs != nil && x.Glusterfs != nil + yyq2[5] = x.PersistentVolumeSource.NFS != nil && x.NFS != nil + yyq2[6] = x.PersistentVolumeSource.RBD != nil && x.RBD != nil + yyq2[7] = x.PersistentVolumeSource.ISCSI != nil && x.ISCSI != nil + yyq2[8] = x.PersistentVolumeSource.Cinder != nil && x.Cinder != nil + yyq2[9] = x.PersistentVolumeSource.CephFS != nil && x.CephFS != nil + yyq2[10] = x.PersistentVolumeSource.FC != nil && x.FC != nil + yyq2[11] = x.PersistentVolumeSource.Flocker != nil && x.Flocker != nil + yyq2[12] = x.PersistentVolumeSource.FlexVolume != nil && x.FlexVolume != nil + yyq2[13] = x.PersistentVolumeSource.AzureFile != nil && x.AzureFile != nil + yyq2[14] = x.PersistentVolumeSource.VsphereVolume != nil && x.VsphereVolume != nil + yyq2[15] = x.PersistentVolumeSource.Quobyte != nil && x.Quobyte != nil + yyq2[16] = x.PersistentVolumeSource.AzureDisk != nil && x.AzureDisk != nil + yyq2[17] = x.PersistentVolumeSource.PhotonPersistentDisk != nil && x.PhotonPersistentDisk != nil + yyq2[18] = x.PersistentVolumeSource.PortworxVolume != nil && x.PortworxVolume != nil + yyq2[19] = x.PersistentVolumeSource.ScaleIO != nil && x.ScaleIO != nil + yyq2[20] = len(x.AccessModes) != 0 + yyq2[21] = x.ClaimRef != nil + yyq2[22] = x.PersistentVolumeReclaimPolicy != "" + yyq2[23] = x.StorageClassName != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(24) + } else { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn360++ + yynn2++ } } - r.EncodeMapStart(yynn360) - yynn360 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr360 || yy2arr360 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[0] { + if yyq2[0] { if x.Capacity == nil { r.EncodeNil() } else { @@ -6118,7 +6811,7 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq360[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("capacity")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -6129,18 +6822,18 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn362 bool + var yyn6 bool if x.PersistentVolumeSource.GCEPersistentDisk == nil { - yyn362 = true - goto LABEL362 + yyn6 = true + goto LABEL6 } - LABEL362: - if yyr360 || yy2arr360 { - if yyn362 { + LABEL6: + if yyr2 || yy2arr2 { + if yyn6 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[1] { + if yyq2[1] { if x.GCEPersistentDisk == nil { r.EncodeNil() } else { @@ -6151,11 +6844,11 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq360[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn362 { + if yyn6 { r.EncodeNil() } else { if x.GCEPersistentDisk == nil { @@ -6166,18 +6859,18 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn363 bool + var yyn9 bool if x.PersistentVolumeSource.AWSElasticBlockStore == nil { - yyn363 = true - goto LABEL363 + yyn9 = true + goto LABEL9 } - LABEL363: - if yyr360 || yy2arr360 { - if yyn363 { + LABEL9: + if yyr2 || yy2arr2 { + if yyn9 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[2] { + if yyq2[2] { if x.AWSElasticBlockStore == nil { r.EncodeNil() } else { @@ -6188,11 +6881,11 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq360[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn363 { + if yyn9 { r.EncodeNil() } else { if x.AWSElasticBlockStore == nil { @@ -6203,18 +6896,18 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn364 bool + var yyn12 bool if x.PersistentVolumeSource.HostPath == nil { - yyn364 = true - goto LABEL364 + yyn12 = true + goto LABEL12 } - LABEL364: - if yyr360 || yy2arr360 { - if yyn364 { + LABEL12: + if yyr2 || yy2arr2 { + if yyn12 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[3] { + if yyq2[3] { if x.HostPath == nil { r.EncodeNil() } else { @@ -6225,11 +6918,11 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq360[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hostPath")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn364 { + if yyn12 { r.EncodeNil() } else { if x.HostPath == nil { @@ -6240,18 +6933,18 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn365 bool + var yyn15 bool if x.PersistentVolumeSource.Glusterfs == nil { - yyn365 = true - goto LABEL365 + yyn15 = true + goto LABEL15 } - LABEL365: - if yyr360 || yy2arr360 { - if yyn365 { + LABEL15: + if yyr2 || yy2arr2 { + if yyn15 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[4] { + if yyq2[4] { if x.Glusterfs == nil { r.EncodeNil() } else { @@ -6262,11 +6955,11 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq360[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn365 { + if yyn15 { r.EncodeNil() } else { if x.Glusterfs == nil { @@ -6277,18 +6970,18 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn366 bool + var yyn18 bool if x.PersistentVolumeSource.NFS == nil { - yyn366 = true - goto LABEL366 + yyn18 = true + goto LABEL18 } - LABEL366: - if yyr360 || yy2arr360 { - if yyn366 { + LABEL18: + if yyr2 || yy2arr2 { + if yyn18 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[5] { + if yyq2[5] { if x.NFS == nil { r.EncodeNil() } else { @@ -6299,11 +6992,11 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq360[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nfs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn366 { + if yyn18 { r.EncodeNil() } else { if x.NFS == nil { @@ -6314,18 +7007,18 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn367 bool + var yyn21 bool if x.PersistentVolumeSource.RBD == nil { - yyn367 = true - goto LABEL367 + yyn21 = true + goto LABEL21 } - LABEL367: - if yyr360 || yy2arr360 { - if yyn367 { + LABEL21: + if yyr2 || yy2arr2 { + if yyn21 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[6] { + if yyq2[6] { if x.RBD == nil { r.EncodeNil() } else { @@ -6336,11 +7029,11 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq360[6] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("rbd")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn367 { + if yyn21 { r.EncodeNil() } else { if x.RBD == nil { @@ -6351,18 +7044,18 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn368 bool + var yyn24 bool if x.PersistentVolumeSource.ISCSI == nil { - yyn368 = true - goto LABEL368 + yyn24 = true + goto LABEL24 } - LABEL368: - if yyr360 || yy2arr360 { - if yyn368 { + LABEL24: + if yyr2 || yy2arr2 { + if yyn24 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[7] { + if yyq2[7] { if x.ISCSI == nil { r.EncodeNil() } else { @@ -6373,11 +7066,11 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq360[7] { + if yyq2[7] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("iscsi")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn368 { + if yyn24 { r.EncodeNil() } else { if x.ISCSI == nil { @@ -6388,18 +7081,18 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn369 bool + var yyn27 bool if x.PersistentVolumeSource.Cinder == nil { - yyn369 = true - goto LABEL369 + yyn27 = true + goto LABEL27 } - LABEL369: - if yyr360 || yy2arr360 { - if yyn369 { + LABEL27: + if yyr2 || yy2arr2 { + if yyn27 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[8] { + if yyq2[8] { if x.Cinder == nil { r.EncodeNil() } else { @@ -6410,11 +7103,11 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq360[8] { + if yyq2[8] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("cinder")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn369 { + if yyn27 { r.EncodeNil() } else { if x.Cinder == nil { @@ -6425,18 +7118,18 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn370 bool + var yyn30 bool if x.PersistentVolumeSource.CephFS == nil { - yyn370 = true - goto LABEL370 + yyn30 = true + goto LABEL30 } - LABEL370: - if yyr360 || yy2arr360 { - if yyn370 { + LABEL30: + if yyr2 || yy2arr2 { + if yyn30 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[9] { + if yyq2[9] { if x.CephFS == nil { r.EncodeNil() } else { @@ -6447,11 +7140,11 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq360[9] { + if yyq2[9] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("cephfs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn370 { + if yyn30 { r.EncodeNil() } else { if x.CephFS == nil { @@ -6462,18 +7155,18 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn371 bool + var yyn33 bool if x.PersistentVolumeSource.FC == nil { - yyn371 = true - goto LABEL371 + yyn33 = true + goto LABEL33 } - LABEL371: - if yyr360 || yy2arr360 { - if yyn371 { + LABEL33: + if yyr2 || yy2arr2 { + if yyn33 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[10] { + if yyq2[10] { if x.FC == nil { r.EncodeNil() } else { @@ -6484,11 +7177,11 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq360[10] { + if yyq2[10] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fc")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn371 { + if yyn33 { r.EncodeNil() } else { if x.FC == nil { @@ -6499,18 +7192,18 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn372 bool + var yyn36 bool if x.PersistentVolumeSource.Flocker == nil { - yyn372 = true - goto LABEL372 + yyn36 = true + goto LABEL36 } - LABEL372: - if yyr360 || yy2arr360 { - if yyn372 { + LABEL36: + if yyr2 || yy2arr2 { + if yyn36 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[11] { + if yyq2[11] { if x.Flocker == nil { r.EncodeNil() } else { @@ -6521,11 +7214,11 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq360[11] { + if yyq2[11] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("flocker")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn372 { + if yyn36 { r.EncodeNil() } else { if x.Flocker == nil { @@ -6536,18 +7229,18 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn373 bool + var yyn39 bool if x.PersistentVolumeSource.FlexVolume == nil { - yyn373 = true - goto LABEL373 + yyn39 = true + goto LABEL39 } - LABEL373: - if yyr360 || yy2arr360 { - if yyn373 { + LABEL39: + if yyr2 || yy2arr2 { + if yyn39 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[12] { + if yyq2[12] { if x.FlexVolume == nil { r.EncodeNil() } else { @@ -6558,11 +7251,11 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq360[12] { + if yyq2[12] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn373 { + if yyn39 { r.EncodeNil() } else { if x.FlexVolume == nil { @@ -6573,18 +7266,18 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn374 bool + var yyn42 bool if x.PersistentVolumeSource.AzureFile == nil { - yyn374 = true - goto LABEL374 + yyn42 = true + goto LABEL42 } - LABEL374: - if yyr360 || yy2arr360 { - if yyn374 { + LABEL42: + if yyr2 || yy2arr2 { + if yyn42 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[13] { + if yyq2[13] { if x.AzureFile == nil { r.EncodeNil() } else { @@ -6595,11 +7288,11 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq360[13] { + if yyq2[13] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("azureFile")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn374 { + if yyn42 { r.EncodeNil() } else { if x.AzureFile == nil { @@ -6610,18 +7303,18 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn375 bool + var yyn45 bool if x.PersistentVolumeSource.VsphereVolume == nil { - yyn375 = true - goto LABEL375 + yyn45 = true + goto LABEL45 } - LABEL375: - if yyr360 || yy2arr360 { - if yyn375 { + LABEL45: + if yyr2 || yy2arr2 { + if yyn45 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[14] { + if yyq2[14] { if x.VsphereVolume == nil { r.EncodeNil() } else { @@ -6632,11 +7325,11 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq360[14] { + if yyq2[14] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn375 { + if yyn45 { r.EncodeNil() } else { if x.VsphereVolume == nil { @@ -6647,18 +7340,18 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn376 bool + var yyn48 bool if x.PersistentVolumeSource.Quobyte == nil { - yyn376 = true - goto LABEL376 + yyn48 = true + goto LABEL48 } - LABEL376: - if yyr360 || yy2arr360 { - if yyn376 { + LABEL48: + if yyr2 || yy2arr2 { + if yyn48 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[15] { + if yyq2[15] { if x.Quobyte == nil { r.EncodeNil() } else { @@ -6669,11 +7362,11 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq360[15] { + if yyq2[15] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("quobyte")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn376 { + if yyn48 { r.EncodeNil() } else { if x.Quobyte == nil { @@ -6684,18 +7377,18 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn377 bool + var yyn51 bool if x.PersistentVolumeSource.AzureDisk == nil { - yyn377 = true - goto LABEL377 + yyn51 = true + goto LABEL51 } - LABEL377: - if yyr360 || yy2arr360 { - if yyn377 { + LABEL51: + if yyr2 || yy2arr2 { + if yyn51 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[16] { + if yyq2[16] { if x.AzureDisk == nil { r.EncodeNil() } else { @@ -6706,11 +7399,11 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq360[16] { + if yyq2[16] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn377 { + if yyn51 { r.EncodeNil() } else { if x.AzureDisk == nil { @@ -6721,18 +7414,18 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn378 bool + var yyn54 bool if x.PersistentVolumeSource.PhotonPersistentDisk == nil { - yyn378 = true - goto LABEL378 + yyn54 = true + goto LABEL54 } - LABEL378: - if yyr360 || yy2arr360 { - if yyn378 { + LABEL54: + if yyr2 || yy2arr2 { + if yyn54 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[17] { + if yyq2[17] { if x.PhotonPersistentDisk == nil { r.EncodeNil() } else { @@ -6743,11 +7436,11 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq360[17] { + if yyq2[17] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn378 { + if yyn54 { r.EncodeNil() } else { if x.PhotonPersistentDisk == nil { @@ -6758,14 +7451,88 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr360 || yy2arr360 { + var yyn57 bool + if x.PersistentVolumeSource.PortworxVolume == nil { + yyn57 = true + goto LABEL57 + } + LABEL57: + if yyr2 || yy2arr2 { + if yyn57 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn57 { + r.EncodeNil() + } else { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn60 bool + if x.PersistentVolumeSource.ScaleIO == nil { + yyn60 = true + goto LABEL60 + } + LABEL60: + if yyr2 || yy2arr2 { + if yyn60 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn60 { + r.EncodeNil() + } else { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } + } + } + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[18] { + if yyq2[20] { if x.AccessModes == nil { r.EncodeNil() } else { - yym380 := z.EncBinary() - _ = yym380 + yym64 := z.EncBinary() + _ = yym64 if false { } else { h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) @@ -6775,15 +7542,15 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq360[18] { + if yyq2[20] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("accessModes")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.AccessModes == nil { r.EncodeNil() } else { - yym381 := z.EncBinary() - _ = yym381 + yym65 := z.EncBinary() + _ = yym65 if false { } else { h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) @@ -6791,9 +7558,9 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr360 || yy2arr360 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[19] { + if yyq2[21] { if x.ClaimRef == nil { r.EncodeNil() } else { @@ -6803,7 +7570,7 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq360[19] { + if yyq2[21] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("claimRef")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -6814,22 +7581,47 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr360 || yy2arr360 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq360[20] { + if yyq2[22] { x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq360[20] { + if yyq2[22] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeReclaimPolicy")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e) } } - if yyr360 || yy2arr360 { + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[23] { + yym73 := z.EncBinary() + _ = yym73 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StorageClassName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[23] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storageClassName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym74 := z.EncBinary() + _ = yym74 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StorageClassName)) + } + } + } + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -6842,25 +7634,25 @@ func (x *PersistentVolumeSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym384 := z.DecBinary() - _ = yym384 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct385 := r.ContainerType() - if yyct385 == codecSelferValueTypeMap1234 { - yyl385 := r.ReadMapStart() - if yyl385 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl385, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct385 == codecSelferValueTypeArray1234 { - yyl385 := r.ReadArrayStart() - if yyl385 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl385, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -6872,12 +7664,12 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys386Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys386Slc - var yyhl386 bool = l >= 0 - for yyj386 := 0; ; yyj386++ { - if yyhl386 { - if yyj386 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -6886,16 +7678,16 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decode } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys386Slc = r.DecodeBytes(yys386Slc, true, true) - yys386 := string(yys386Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys386 { + switch yys3 { case "capacity": if r.TryDecodeAsNil() { x.Capacity = nil } else { - yyv387 := &x.Capacity - yyv387.CodecDecodeSelf(d) + yyv4 := &x.Capacity + yyv4.CodecDecodeSelf(d) } case "gcePersistentDisk": if x.PersistentVolumeSource.GCEPersistentDisk == nil { @@ -7135,16 +7927,44 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decode } x.PhotonPersistentDisk.CodecDecodeSelf(d) } + case "portworxVolume": + if x.PersistentVolumeSource.PortworxVolume == nil { + x.PersistentVolumeSource.PortworxVolume = new(PortworxVolumeSource) + } + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + case "scaleIO": + if x.PersistentVolumeSource.ScaleIO == nil { + x.PersistentVolumeSource.ScaleIO = new(ScaleIOVolumeSource) + } + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } case "accessModes": if r.TryDecodeAsNil() { x.AccessModes = nil } else { - yyv405 := &x.AccessModes - yym406 := z.DecBinary() - _ = yym406 + yyv24 := &x.AccessModes + yym25 := z.DecBinary() + _ = yym25 if false { } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv405), d) + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv24), d) } } case "claimRef": @@ -7162,12 +7982,25 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.PersistentVolumeReclaimPolicy = "" } else { - x.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(r.DecodeString()) + yyv27 := &x.PersistentVolumeReclaimPolicy + yyv27.CodecDecodeSelf(d) + } + case "storageClassName": + if r.TryDecodeAsNil() { + x.StorageClassName = "" + } else { + yyv28 := &x.StorageClassName + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*string)(yyv28)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys386) - } // end switch yys386 - } // end for yyj386 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -7175,16 +8008,16 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj409 int - var yyb409 bool - var yyhl409 bool = l >= 0 - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + var yyj30 int + var yyb30 bool + var yyhl30 bool = l >= 0 + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7192,19 +8025,19 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.Capacity = nil } else { - yyv410 := &x.Capacity - yyv410.CodecDecodeSelf(d) + yyv31 := &x.Capacity + yyv31.CodecDecodeSelf(d) } if x.PersistentVolumeSource.GCEPersistentDisk == nil { x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7222,13 +8055,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.AWSElasticBlockStore == nil { x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7246,13 +8079,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.HostPath == nil { x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource) } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7270,13 +8103,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.Glusterfs == nil { x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource) } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7294,13 +8127,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.NFS == nil { x.PersistentVolumeSource.NFS = new(NFSVolumeSource) } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7318,13 +8151,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.RBD == nil { x.PersistentVolumeSource.RBD = new(RBDVolumeSource) } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7342,13 +8175,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.ISCSI == nil { x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource) } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7366,13 +8199,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.Cinder == nil { x.PersistentVolumeSource.Cinder = new(CinderVolumeSource) } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7390,13 +8223,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.CephFS == nil { x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource) } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7414,13 +8247,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.FC == nil { x.PersistentVolumeSource.FC = new(FCVolumeSource) } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7438,13 +8271,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.Flocker == nil { x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource) } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7462,13 +8295,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.FlexVolume == nil { x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource) } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7486,13 +8319,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.AzureFile == nil { x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource) } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7510,13 +8343,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.VsphereVolume == nil { x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7534,13 +8367,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.Quobyte == nil { x.PersistentVolumeSource.Quobyte = new(QuobyteVolumeSource) } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7558,13 +8391,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.AzureDisk == nil { x.PersistentVolumeSource.AzureDisk = new(AzureDiskVolumeSource) } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7582,13 +8415,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.PhotonPersistentDisk == nil { x.PersistentVolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7603,13 +8436,61 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco } x.PhotonPersistentDisk.CodecDecodeSelf(d) } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + if x.PersistentVolumeSource.PortworxVolume == nil { + x.PersistentVolumeSource.PortworxVolume = new(PortworxVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.ScaleIO == nil { + x.PersistentVolumeSource.ScaleIO = new(ScaleIOVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7617,21 +8498,21 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.AccessModes = nil } else { - yyv428 := &x.AccessModes - yym429 := z.DecBinary() - _ = yym429 + yyv51 := &x.AccessModes + yym52 := z.DecBinary() + _ = yym52 if false { } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv428), d) + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv51), d) } } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7646,13 +8527,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco } x.ClaimRef.CodecDecodeSelf(d) } - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7660,20 +8541,43 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.PersistentVolumeReclaimPolicy = "" } else { - x.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(r.DecodeString()) + yyv54 := &x.PersistentVolumeReclaimPolicy + yyv54.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StorageClassName = "" + } else { + yyv55 := &x.StorageClassName + yym56 := z.DecBinary() + _ = yym56 + if false { + } else { + *((*string)(yyv55)) = r.DecodeString() + } } for { - yyj409++ - if yyhl409 { - yyb409 = yyj409 > l + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l } else { - yyb409 = r.CheckBreak() + yyb30 = r.CheckBreak() } - if yyb409 { + if yyb30 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj409-1, "") + z.DecStructFieldNotFound(yyj30-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -7682,8 +8586,8 @@ func (x PersistentVolumeReclaimPolicy) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym432 := z.EncBinary() - _ = yym432 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -7695,8 +8599,8 @@ func (x *PersistentVolumeReclaimPolicy) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym433 := z.DecBinary() - _ = yym433 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -7711,52 +8615,52 @@ func (x *PersistentVolumeStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym434 := z.EncBinary() - _ = yym434 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep435 := !z.EncBinary() - yy2arr435 := z.EncBasicHandle().StructToArray - var yyq435 [3]bool - _, _, _ = yysep435, yyq435, yy2arr435 - const yyr435 bool = false - yyq435[0] = x.Phase != "" - yyq435[1] = x.Message != "" - yyq435[2] = x.Reason != "" - var yynn435 int - if yyr435 || yy2arr435 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Phase != "" + yyq2[1] = x.Message != "" + yyq2[2] = x.Reason != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn435 = 0 - for _, b := range yyq435 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn435++ + yynn2++ } } - r.EncodeMapStart(yynn435) - yynn435 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr435 || yy2arr435 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq435[0] { + if yyq2[0] { x.Phase.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq435[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("phase")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Phase.CodecEncodeSelf(e) } } - if yyr435 || yy2arr435 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq435[1] { - yym438 := z.EncBinary() - _ = yym438 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) @@ -7765,23 +8669,23 @@ func (x *PersistentVolumeStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq435[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("message")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym439 := z.EncBinary() - _ = yym439 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) } } } - if yyr435 || yy2arr435 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq435[2] { - yym441 := z.EncBinary() - _ = yym441 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) @@ -7790,19 +8694,19 @@ func (x *PersistentVolumeStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq435[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("reason")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym442 := z.EncBinary() - _ = yym442 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) } } } - if yyr435 || yy2arr435 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -7815,25 +8719,25 @@ func (x *PersistentVolumeStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym443 := z.DecBinary() - _ = yym443 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct444 := r.ContainerType() - if yyct444 == codecSelferValueTypeMap1234 { - yyl444 := r.ReadMapStart() - if yyl444 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl444, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct444 == codecSelferValueTypeArray1234 { - yyl444 := r.ReadArrayStart() - if yyl444 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl444, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -7845,12 +8749,12 @@ func (x *PersistentVolumeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Deco var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys445Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys445Slc - var yyhl445 bool = l >= 0 - for yyj445 := 0; ; yyj445++ { - if yyhl445 { - if yyj445 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -7859,32 +8763,45 @@ func (x *PersistentVolumeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Deco } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys445Slc = r.DecodeBytes(yys445Slc, true, true) - yys445 := string(yys445Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys445 { + switch yys3 { case "phase": if r.TryDecodeAsNil() { x.Phase = "" } else { - x.Phase = PersistentVolumePhase(r.DecodeString()) + yyv4 := &x.Phase + yyv4.CodecDecodeSelf(d) } case "message": if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv5 := &x.Message + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } } case "reason": if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv7 := &x.Reason + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys445) - } // end switch yys445 - } // end for yyj445 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -7892,16 +8809,16 @@ func (x *PersistentVolumeStatus) codecDecodeSelfFromArray(l int, d *codec1978.De var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj449 int - var yyb449 bool - var yyhl449 bool = l >= 0 - yyj449++ - if yyhl449 { - yyb449 = yyj449 > l + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb449 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb449 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7909,15 +8826,16 @@ func (x *PersistentVolumeStatus) codecDecodeSelfFromArray(l int, d *codec1978.De if r.TryDecodeAsNil() { x.Phase = "" } else { - x.Phase = PersistentVolumePhase(r.DecodeString()) + yyv10 := &x.Phase + yyv10.CodecDecodeSelf(d) } - yyj449++ - if yyhl449 { - yyb449 = yyj449 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb449 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb449 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7925,15 +8843,21 @@ func (x *PersistentVolumeStatus) codecDecodeSelfFromArray(l int, d *codec1978.De if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv11 := &x.Message + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } - yyj449++ - if yyhl449 { - yyb449 = yyj449 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb449 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb449 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7941,20 +8865,26 @@ func (x *PersistentVolumeStatus) codecDecodeSelfFromArray(l int, d *codec1978.De if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv13 := &x.Reason + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } for { - yyj449++ - if yyhl449 { - yyb449 = yyj449 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb449 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb449 { + if yyb9 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj449-1, "") + z.DecStructFieldNotFound(yyj9-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -7966,37 +8896,37 @@ func (x *PersistentVolumeList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym453 := z.EncBinary() - _ = yym453 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep454 := !z.EncBinary() - yy2arr454 := z.EncBasicHandle().StructToArray - var yyq454 [4]bool - _, _, _ = yysep454, yyq454, yy2arr454 - const yyr454 bool = false - yyq454[0] = x.Kind != "" - yyq454[1] = x.APIVersion != "" - yyq454[2] = true - var yynn454 int - if yyr454 || yy2arr454 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn454 = 1 - for _, b := range yyq454 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn454++ + yynn2++ } } - r.EncodeMapStart(yynn454) - yynn454 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr454 || yy2arr454 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq454[0] { - yym456 := z.EncBinary() - _ = yym456 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -8005,23 +8935,23 @@ func (x *PersistentVolumeList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq454[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym457 := z.EncBinary() - _ = yym457 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr454 || yy2arr454 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq454[1] { - yym459 := z.EncBinary() - _ = yym459 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -8030,54 +8960,54 @@ func (x *PersistentVolumeList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq454[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym460 := z.EncBinary() - _ = yym460 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr454 || yy2arr454 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq454[2] { - yy462 := &x.ListMeta - yym463 := z.EncBinary() - _ = yym463 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy462) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy462) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq454[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy464 := &x.ListMeta - yym465 := z.EncBinary() - _ = yym465 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy464) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy464) + z.EncFallback(yy12) } } } - if yyr454 || yy2arr454 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym467 := z.EncBinary() - _ = yym467 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e) @@ -8090,15 +9020,15 @@ func (x *PersistentVolumeList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym468 := z.EncBinary() - _ = yym468 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e) } } } - if yyr454 || yy2arr454 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -8111,25 +9041,25 @@ func (x *PersistentVolumeList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym469 := z.DecBinary() - _ = yym469 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct470 := r.ContainerType() - if yyct470 == codecSelferValueTypeMap1234 { - yyl470 := r.ReadMapStart() - if yyl470 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl470, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct470 == codecSelferValueTypeArray1234 { - yyl470 := r.ReadArrayStart() - if yyl470 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl470, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -8141,12 +9071,12 @@ func (x *PersistentVolumeList) codecDecodeSelfFromMap(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys471Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys471Slc - var yyhl471 bool = l >= 0 - for yyj471 := 0; ; yyj471++ { - if yyhl471 { - if yyj471 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -8155,51 +9085,63 @@ func (x *PersistentVolumeList) codecDecodeSelfFromMap(l int, d *codec1978.Decode } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys471Slc = r.DecodeBytes(yys471Slc, true, true) - yys471 := string(yys471Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys471 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv474 := &x.ListMeta - yym475 := z.DecBinary() - _ = yym475 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv474) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv474, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv476 := &x.Items - yym477 := z.DecBinary() - _ = yym477 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSlicePersistentVolume((*[]PersistentVolume)(yyv476), d) + h.decSlicePersistentVolume((*[]PersistentVolume)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys471) - } // end switch yys471 - } // end for yyj471 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -8207,16 +9149,16 @@ func (x *PersistentVolumeList) codecDecodeSelfFromArray(l int, d *codec1978.Deco var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj478 int - var yyb478 bool - var yyhl478 bool = l >= 0 - yyj478++ - if yyhl478 { - yyb478 = yyj478 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb478 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb478 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -8224,15 +9166,21 @@ func (x *PersistentVolumeList) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj478++ - if yyhl478 { - yyb478 = yyj478 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb478 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb478 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -8240,38 +9188,44 @@ func (x *PersistentVolumeList) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj478++ - if yyhl478 { - yyb478 = yyj478 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb478 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb478 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv481 := &x.ListMeta - yym482 := z.DecBinary() - _ = yym482 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv481) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv481, false) + z.DecFallback(yyv17, false) } } - yyj478++ - if yyhl478 { - yyb478 = yyj478 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb478 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb478 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -8279,26 +9233,26 @@ func (x *PersistentVolumeList) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.Items = nil } else { - yyv483 := &x.Items - yym484 := z.DecBinary() - _ = yym484 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSlicePersistentVolume((*[]PersistentVolume)(yyv483), d) + h.decSlicePersistentVolume((*[]PersistentVolume)(yyv19), d) } } for { - yyj478++ - if yyhl478 { - yyb478 = yyj478 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb478 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb478 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj478-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -8310,39 +9264,39 @@ func (x *PersistentVolumeClaim) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym485 := z.EncBinary() - _ = yym485 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep486 := !z.EncBinary() - yy2arr486 := z.EncBasicHandle().StructToArray - var yyq486 [5]bool - _, _, _ = yysep486, yyq486, yy2arr486 - const yyr486 bool = false - yyq486[0] = x.Kind != "" - yyq486[1] = x.APIVersion != "" - yyq486[2] = true - yyq486[3] = true - yyq486[4] = true - var yynn486 int - if yyr486 || yy2arr486 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn486 = 0 - for _, b := range yyq486 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn486++ + yynn2++ } } - r.EncodeMapStart(yynn486) - yynn486 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr486 || yy2arr486 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq486[0] { - yym488 := z.EncBinary() - _ = yym488 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -8351,23 +9305,23 @@ func (x *PersistentVolumeClaim) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq486[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym489 := z.EncBinary() - _ = yym489 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr486 || yy2arr486 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq486[1] { - yym491 := z.EncBinary() - _ = yym491 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -8376,70 +9330,82 @@ func (x *PersistentVolumeClaim) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq486[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym492 := z.EncBinary() - _ = yym492 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr486 || yy2arr486 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq486[2] { - yy494 := &x.ObjectMeta - yy494.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq486[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy495 := &x.ObjectMeta - yy495.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr486 || yy2arr486 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq486[3] { - yy497 := &x.Spec - yy497.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq486[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy498 := &x.Spec - yy498.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr486 || yy2arr486 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq486[4] { - yy500 := &x.Status - yy500.CodecEncodeSelf(e) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq486[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy501 := &x.Status - yy501.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } - if yyr486 || yy2arr486 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -8452,25 +9418,25 @@ func (x *PersistentVolumeClaim) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym502 := z.DecBinary() - _ = yym502 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct503 := r.ContainerType() - if yyct503 == codecSelferValueTypeMap1234 { - yyl503 := r.ReadMapStart() - if yyl503 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl503, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct503 == codecSelferValueTypeArray1234 { - yyl503 := r.ReadArrayStart() - if yyl503 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl503, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -8482,12 +9448,12 @@ func (x *PersistentVolumeClaim) codecDecodeSelfFromMap(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys504Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys504Slc - var yyhl504 bool = l >= 0 - for yyj504 := 0; ; yyj504++ { - if yyhl504 { - if yyj504 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -8496,47 +9462,65 @@ func (x *PersistentVolumeClaim) codecDecodeSelfFromMap(l int, d *codec1978.Decod } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys504Slc = r.DecodeBytes(yys504Slc, true, true) - yys504 := string(yys504Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys504 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv507 := &x.ObjectMeta - yyv507.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = PersistentVolumeClaimSpec{} } else { - yyv508 := &x.Spec - yyv508.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = PersistentVolumeClaimStatus{} } else { - yyv509 := &x.Status - yyv509.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys504) - } // end switch yys504 - } // end for yyj504 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -8544,16 +9528,16 @@ func (x *PersistentVolumeClaim) codecDecodeSelfFromArray(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj510 int - var yyb510 bool - var yyhl510 bool = l >= 0 - yyj510++ - if yyhl510 { - yyb510 = yyj510 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb510 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb510 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -8561,15 +9545,21 @@ func (x *PersistentVolumeClaim) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj510++ - if yyhl510 { - yyb510 = yyj510 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb510 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb510 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -8577,32 +9567,44 @@ func (x *PersistentVolumeClaim) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj510++ - if yyhl510 { - yyb510 = yyj510 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb510 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb510 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv513 := &x.ObjectMeta - yyv513.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj510++ - if yyhl510 { - yyb510 = yyj510 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb510 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb510 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -8610,16 +9612,16 @@ func (x *PersistentVolumeClaim) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.Spec = PersistentVolumeClaimSpec{} } else { - yyv514 := &x.Spec - yyv514.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj510++ - if yyhl510 { - yyb510 = yyj510 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb510 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb510 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -8627,21 +9629,21 @@ func (x *PersistentVolumeClaim) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.Status = PersistentVolumeClaimStatus{} } else { - yyv515 := &x.Status - yyv515.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj510++ - if yyhl510 { - yyb510 = yyj510 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb510 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb510 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj510-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -8653,37 +9655,37 @@ func (x *PersistentVolumeClaimList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym516 := z.EncBinary() - _ = yym516 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep517 := !z.EncBinary() - yy2arr517 := z.EncBasicHandle().StructToArray - var yyq517 [4]bool - _, _, _ = yysep517, yyq517, yy2arr517 - const yyr517 bool = false - yyq517[0] = x.Kind != "" - yyq517[1] = x.APIVersion != "" - yyq517[2] = true - var yynn517 int - if yyr517 || yy2arr517 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn517 = 1 - for _, b := range yyq517 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn517++ + yynn2++ } } - r.EncodeMapStart(yynn517) - yynn517 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr517 || yy2arr517 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq517[0] { - yym519 := z.EncBinary() - _ = yym519 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -8692,23 +9694,23 @@ func (x *PersistentVolumeClaimList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq517[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym520 := z.EncBinary() - _ = yym520 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr517 || yy2arr517 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq517[1] { - yym522 := z.EncBinary() - _ = yym522 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -8717,54 +9719,54 @@ func (x *PersistentVolumeClaimList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq517[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym523 := z.EncBinary() - _ = yym523 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr517 || yy2arr517 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq517[2] { - yy525 := &x.ListMeta - yym526 := z.EncBinary() - _ = yym526 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy525) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy525) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq517[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy527 := &x.ListMeta - yym528 := z.EncBinary() - _ = yym528 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy527) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy527) + z.EncFallback(yy12) } } } - if yyr517 || yy2arr517 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym530 := z.EncBinary() - _ = yym530 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e) @@ -8777,15 +9779,15 @@ func (x *PersistentVolumeClaimList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym531 := z.EncBinary() - _ = yym531 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e) } } } - if yyr517 || yy2arr517 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -8798,25 +9800,25 @@ func (x *PersistentVolumeClaimList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym532 := z.DecBinary() - _ = yym532 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct533 := r.ContainerType() - if yyct533 == codecSelferValueTypeMap1234 { - yyl533 := r.ReadMapStart() - if yyl533 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl533, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct533 == codecSelferValueTypeArray1234 { - yyl533 := r.ReadArrayStart() - if yyl533 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl533, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -8828,12 +9830,12 @@ func (x *PersistentVolumeClaimList) codecDecodeSelfFromMap(l int, d *codec1978.D var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys534Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys534Slc - var yyhl534 bool = l >= 0 - for yyj534 := 0; ; yyj534++ { - if yyhl534 { - if yyj534 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -8842,51 +9844,63 @@ func (x *PersistentVolumeClaimList) codecDecodeSelfFromMap(l int, d *codec1978.D } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys534Slc = r.DecodeBytes(yys534Slc, true, true) - yys534 := string(yys534Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys534 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv537 := &x.ListMeta - yym538 := z.DecBinary() - _ = yym538 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv537) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv537, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv539 := &x.Items - yym540 := z.DecBinary() - _ = yym540 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv539), d) + h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys534) - } // end switch yys534 - } // end for yyj534 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -8894,16 +9908,16 @@ func (x *PersistentVolumeClaimList) codecDecodeSelfFromArray(l int, d *codec1978 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj541 int - var yyb541 bool - var yyhl541 bool = l >= 0 - yyj541++ - if yyhl541 { - yyb541 = yyj541 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb541 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb541 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -8911,15 +9925,21 @@ func (x *PersistentVolumeClaimList) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj541++ - if yyhl541 { - yyb541 = yyj541 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb541 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb541 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -8927,38 +9947,44 @@ func (x *PersistentVolumeClaimList) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj541++ - if yyhl541 { - yyb541 = yyj541 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb541 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb541 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv544 := &x.ListMeta - yym545 := z.DecBinary() - _ = yym545 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv544) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv544, false) + z.DecFallback(yyv17, false) } } - yyj541++ - if yyhl541 { - yyb541 = yyj541 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb541 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb541 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -8966,26 +9992,26 @@ func (x *PersistentVolumeClaimList) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.Items = nil } else { - yyv546 := &x.Items - yym547 := z.DecBinary() - _ = yym547 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv546), d) + h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv19), d) } } for { - yyj541++ - if yyhl541 { - yyb541 = yyj541 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb541 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb541 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj541-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -8997,41 +10023,42 @@ func (x *PersistentVolumeClaimSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym548 := z.EncBinary() - _ = yym548 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep549 := !z.EncBinary() - yy2arr549 := z.EncBasicHandle().StructToArray - var yyq549 [4]bool - _, _, _ = yysep549, yyq549, yy2arr549 - const yyr549 bool = false - yyq549[0] = len(x.AccessModes) != 0 - yyq549[1] = x.Selector != nil - yyq549[2] = true - yyq549[3] = x.VolumeName != "" - var yynn549 int - if yyr549 || yy2arr549 { - r.EncodeArrayStart(4) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.AccessModes) != 0 + yyq2[1] = x.Selector != nil + yyq2[2] = true + yyq2[3] = x.VolumeName != "" + yyq2[4] = x.StorageClassName != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) } else { - yynn549 = 0 - for _, b := range yyq549 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn549++ + yynn2++ } } - r.EncodeMapStart(yynn549) - yynn549 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr549 || yy2arr549 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq549[0] { + if yyq2[0] { if x.AccessModes == nil { r.EncodeNil() } else { - yym551 := z.EncBinary() - _ = yym551 + yym4 := z.EncBinary() + _ = yym4 if false { } else { h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) @@ -9041,15 +10068,15 @@ func (x *PersistentVolumeClaimSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq549[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("accessModes")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.AccessModes == nil { r.EncodeNil() } else { - yym552 := z.EncBinary() - _ = yym552 + yym5 := z.EncBinary() + _ = yym5 if false { } else { h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) @@ -9057,14 +10084,14 @@ func (x *PersistentVolumeClaimSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr549 || yy2arr549 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq549[1] { + if yyq2[1] { if x.Selector == nil { r.EncodeNil() } else { - yym554 := z.EncBinary() - _ = yym554 + yym7 := z.EncBinary() + _ = yym7 if false { } else if z.HasExtensions() && z.EncExt(x.Selector) { } else { @@ -9075,15 +10102,15 @@ func (x *PersistentVolumeClaimSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq549[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("selector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Selector == nil { r.EncodeNil() } else { - yym555 := z.EncBinary() - _ = yym555 + yym8 := z.EncBinary() + _ = yym8 if false { } else if z.HasExtensions() && z.EncExt(x.Selector) { } else { @@ -9092,28 +10119,28 @@ func (x *PersistentVolumeClaimSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr549 || yy2arr549 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq549[2] { - yy557 := &x.Resources - yy557.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.Resources + yy10.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq549[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("resources")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy558 := &x.Resources - yy558.CodecEncodeSelf(e) + yy12 := &x.Resources + yy12.CodecEncodeSelf(e) } } - if yyr549 || yy2arr549 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq549[3] { - yym560 := z.EncBinary() - _ = yym560 + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) @@ -9122,19 +10149,54 @@ func (x *PersistentVolumeClaimSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq549[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("volumeName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym561 := z.EncBinary() - _ = yym561 + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) } } } - if yyr549 || yy2arr549 { + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.StorageClassName == nil { + r.EncodeNil() + } else { + yy18 := *x.StorageClassName + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy18)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storageClassName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StorageClassName == nil { + r.EncodeNil() + } else { + yy20 := *x.StorageClassName + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy20)) + } + } + } + } + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -9147,25 +10209,25 @@ func (x *PersistentVolumeClaimSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym562 := z.DecBinary() - _ = yym562 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct563 := r.ContainerType() - if yyct563 == codecSelferValueTypeMap1234 { - yyl563 := r.ReadMapStart() - if yyl563 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl563, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct563 == codecSelferValueTypeArray1234 { - yyl563 := r.ReadArrayStart() - if yyl563 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl563, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -9177,12 +10239,12 @@ func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromMap(l int, d *codec1978.D var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys564Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys564Slc - var yyhl564 bool = l >= 0 - for yyj564 := 0; ; yyj564++ { - if yyhl564 { - if yyj564 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -9191,20 +10253,20 @@ func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromMap(l int, d *codec1978.D } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys564Slc = r.DecodeBytes(yys564Slc, true, true) - yys564 := string(yys564Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys564 { + switch yys3 { case "accessModes": if r.TryDecodeAsNil() { x.AccessModes = nil } else { - yyv565 := &x.AccessModes - yym566 := z.DecBinary() - _ = yym566 + yyv4 := &x.AccessModes + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv565), d) + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv4), d) } } case "selector": @@ -9214,10 +10276,10 @@ func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromMap(l int, d *codec1978.D } } else { if x.Selector == nil { - x.Selector = new(pkg2_unversioned.LabelSelector) + x.Selector = new(pkg2_v1.LabelSelector) } - yym568 := z.DecBinary() - _ = yym568 + yym7 := z.DecBinary() + _ = yym7 if false { } else if z.HasExtensions() && z.DecExt(x.Selector) { } else { @@ -9228,19 +10290,41 @@ func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromMap(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Resources = ResourceRequirements{} } else { - yyv569 := &x.Resources - yyv569.CodecDecodeSelf(d) + yyv8 := &x.Resources + yyv8.CodecDecodeSelf(d) } case "volumeName": if r.TryDecodeAsNil() { x.VolumeName = "" } else { - x.VolumeName = string(r.DecodeString()) + yyv9 := &x.VolumeName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + case "storageClassName": + if r.TryDecodeAsNil() { + if x.StorageClassName != nil { + x.StorageClassName = nil + } + } else { + if x.StorageClassName == nil { + x.StorageClassName = new(string) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(x.StorageClassName)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys564) - } // end switch yys564 - } // end for yyj564 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -9248,16 +10332,16 @@ func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromArray(l int, d *codec1978 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj571 int - var yyb571 bool - var yyhl571 bool = l >= 0 - yyj571++ - if yyhl571 { - yyb571 = yyj571 > l + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb571 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb571 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9265,21 +10349,21 @@ func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.AccessModes = nil } else { - yyv572 := &x.AccessModes - yym573 := z.DecBinary() - _ = yym573 + yyv14 := &x.AccessModes + yym15 := z.DecBinary() + _ = yym15 if false { } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv572), d) + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv14), d) } } - yyj571++ - if yyhl571 { - yyb571 = yyj571 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb571 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb571 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9290,23 +10374,23 @@ func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromArray(l int, d *codec1978 } } else { if x.Selector == nil { - x.Selector = new(pkg2_unversioned.LabelSelector) + x.Selector = new(pkg2_v1.LabelSelector) } - yym575 := z.DecBinary() - _ = yym575 + yym17 := z.DecBinary() + _ = yym17 if false { } else if z.HasExtensions() && z.DecExt(x.Selector) { } else { z.DecFallback(x.Selector, false) } } - yyj571++ - if yyhl571 { - yyb571 = yyj571 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb571 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb571 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9314,16 +10398,16 @@ func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.Resources = ResourceRequirements{} } else { - yyv576 := &x.Resources - yyv576.CodecDecodeSelf(d) + yyv18 := &x.Resources + yyv18.CodecDecodeSelf(d) } - yyj571++ - if yyhl571 { - yyb571 = yyj571 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb571 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb571 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9331,20 +10415,52 @@ func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.VolumeName = "" } else { - x.VolumeName = string(r.DecodeString()) + yyv19 := &x.VolumeName + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StorageClassName != nil { + x.StorageClassName = nil + } + } else { + if x.StorageClassName == nil { + x.StorageClassName = new(string) + } + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(x.StorageClassName)) = r.DecodeString() + } } for { - yyj571++ - if yyhl571 { - yyb571 = yyj571 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb571 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb571 { + if yyb13 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj571-1, "") + z.DecStructFieldNotFound(yyj13-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -9356,55 +10472,55 @@ func (x *PersistentVolumeClaimStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym578 := z.EncBinary() - _ = yym578 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep579 := !z.EncBinary() - yy2arr579 := z.EncBasicHandle().StructToArray - var yyq579 [3]bool - _, _, _ = yysep579, yyq579, yy2arr579 - const yyr579 bool = false - yyq579[0] = x.Phase != "" - yyq579[1] = len(x.AccessModes) != 0 - yyq579[2] = len(x.Capacity) != 0 - var yynn579 int - if yyr579 || yy2arr579 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Phase != "" + yyq2[1] = len(x.AccessModes) != 0 + yyq2[2] = len(x.Capacity) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn579 = 0 - for _, b := range yyq579 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn579++ + yynn2++ } } - r.EncodeMapStart(yynn579) - yynn579 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr579 || yy2arr579 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq579[0] { + if yyq2[0] { x.Phase.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq579[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("phase")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Phase.CodecEncodeSelf(e) } } - if yyr579 || yy2arr579 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq579[1] { + if yyq2[1] { if x.AccessModes == nil { r.EncodeNil() } else { - yym582 := z.EncBinary() - _ = yym582 + yym7 := z.EncBinary() + _ = yym7 if false { } else { h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) @@ -9414,15 +10530,15 @@ func (x *PersistentVolumeClaimStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq579[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("accessModes")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.AccessModes == nil { r.EncodeNil() } else { - yym583 := z.EncBinary() - _ = yym583 + yym8 := z.EncBinary() + _ = yym8 if false { } else { h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) @@ -9430,9 +10546,9 @@ func (x *PersistentVolumeClaimStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr579 || yy2arr579 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq579[2] { + if yyq2[2] { if x.Capacity == nil { r.EncodeNil() } else { @@ -9442,7 +10558,7 @@ func (x *PersistentVolumeClaimStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq579[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("capacity")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -9453,7 +10569,7 @@ func (x *PersistentVolumeClaimStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr579 || yy2arr579 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -9466,25 +10582,25 @@ func (x *PersistentVolumeClaimStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym585 := z.DecBinary() - _ = yym585 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct586 := r.ContainerType() - if yyct586 == codecSelferValueTypeMap1234 { - yyl586 := r.ReadMapStart() - if yyl586 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl586, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct586 == codecSelferValueTypeArray1234 { - yyl586 := r.ReadArrayStart() - if yyl586 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl586, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -9496,12 +10612,12 @@ func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromMap(l int, d *codec1978 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys587Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys587Slc - var yyhl587 bool = l >= 0 - for yyj587 := 0; ; yyj587++ { - if yyhl587 { - if yyj587 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -9510,39 +10626,40 @@ func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromMap(l int, d *codec1978 } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys587Slc = r.DecodeBytes(yys587Slc, true, true) - yys587 := string(yys587Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys587 { + switch yys3 { case "phase": if r.TryDecodeAsNil() { x.Phase = "" } else { - x.Phase = PersistentVolumeClaimPhase(r.DecodeString()) + yyv4 := &x.Phase + yyv4.CodecDecodeSelf(d) } case "accessModes": if r.TryDecodeAsNil() { x.AccessModes = nil } else { - yyv589 := &x.AccessModes - yym590 := z.DecBinary() - _ = yym590 + yyv5 := &x.AccessModes + yym6 := z.DecBinary() + _ = yym6 if false { } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv589), d) + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv5), d) } } case "capacity": if r.TryDecodeAsNil() { x.Capacity = nil } else { - yyv591 := &x.Capacity - yyv591.CodecDecodeSelf(d) + yyv7 := &x.Capacity + yyv7.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys587) - } // end switch yys587 - } // end for yyj587 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -9550,16 +10667,16 @@ func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromArray(l int, d *codec19 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj592 int - var yyb592 bool - var yyhl592 bool = l >= 0 - yyj592++ - if yyhl592 { - yyb592 = yyj592 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb592 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb592 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9567,15 +10684,16 @@ func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.Phase = "" } else { - x.Phase = PersistentVolumeClaimPhase(r.DecodeString()) + yyv9 := &x.Phase + yyv9.CodecDecodeSelf(d) } - yyj592++ - if yyhl592 { - yyb592 = yyj592 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb592 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb592 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9583,21 +10701,21 @@ func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.AccessModes = nil } else { - yyv594 := &x.AccessModes - yym595 := z.DecBinary() - _ = yym595 + yyv10 := &x.AccessModes + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv594), d) + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv10), d) } } - yyj592++ - if yyhl592 { - yyb592 = yyj592 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb592 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb592 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9605,21 +10723,21 @@ func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.Capacity = nil } else { - yyv596 := &x.Capacity - yyv596.CodecDecodeSelf(d) + yyv12 := &x.Capacity + yyv12.CodecDecodeSelf(d) } for { - yyj592++ - if yyhl592 { - yyb592 = yyj592 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb592 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb592 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj592-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -9628,8 +10746,8 @@ func (x PersistentVolumeAccessMode) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym597 := z.EncBinary() - _ = yym597 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -9641,8 +10759,8 @@ func (x *PersistentVolumeAccessMode) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym598 := z.DecBinary() - _ = yym598 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -9654,8 +10772,8 @@ func (x PersistentVolumePhase) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym599 := z.EncBinary() - _ = yym599 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -9667,8 +10785,8 @@ func (x *PersistentVolumePhase) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym600 := z.DecBinary() - _ = yym600 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -9680,8 +10798,8 @@ func (x PersistentVolumeClaimPhase) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym601 := z.EncBinary() - _ = yym601 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -9693,8 +10811,8 @@ func (x *PersistentVolumeClaimPhase) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym602 := z.DecBinary() - _ = yym602 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -9709,33 +10827,33 @@ func (x *HostPathVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym603 := z.EncBinary() - _ = yym603 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep604 := !z.EncBinary() - yy2arr604 := z.EncBasicHandle().StructToArray - var yyq604 [1]bool - _, _, _ = yysep604, yyq604, yy2arr604 - const yyr604 bool = false - var yynn604 int - if yyr604 || yy2arr604 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn604 = 1 - for _, b := range yyq604 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn604++ + yynn2++ } } - r.EncodeMapStart(yynn604) - yynn604 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr604 || yy2arr604 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym606 := z.EncBinary() - _ = yym606 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) @@ -9744,14 +10862,14 @@ func (x *HostPathVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("path")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym607 := z.EncBinary() - _ = yym607 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) } } - if yyr604 || yy2arr604 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -9764,25 +10882,25 @@ func (x *HostPathVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym608 := z.DecBinary() - _ = yym608 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct609 := r.ContainerType() - if yyct609 == codecSelferValueTypeMap1234 { - yyl609 := r.ReadMapStart() - if yyl609 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl609, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct609 == codecSelferValueTypeArray1234 { - yyl609 := r.ReadArrayStart() - if yyl609 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl609, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -9794,12 +10912,12 @@ func (x *HostPathVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys610Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys610Slc - var yyhl610 bool = l >= 0 - for yyj610 := 0; ; yyj610++ { - if yyhl610 { - if yyj610 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -9808,20 +10926,26 @@ func (x *HostPathVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decode } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys610Slc = r.DecodeBytes(yys610Slc, true, true) - yys610 := string(yys610Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys610 { + switch yys3 { case "path": if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys610) - } // end switch yys610 - } // end for yyj610 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -9829,16 +10953,16 @@ func (x *HostPathVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Deco var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj612 int - var yyb612 bool - var yyhl612 bool = l >= 0 - yyj612++ - if yyhl612 { - yyb612 = yyj612 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb612 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb612 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9846,20 +10970,26 @@ func (x *HostPathVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv7 := &x.Path + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } } for { - yyj612++ - if yyhl612 { - yyb612 = yyj612 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb612 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb612 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj612-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -9871,46 +11001,46 @@ func (x *EmptyDirVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym614 := z.EncBinary() - _ = yym614 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep615 := !z.EncBinary() - yy2arr615 := z.EncBasicHandle().StructToArray - var yyq615 [1]bool - _, _, _ = yysep615, yyq615, yy2arr615 - const yyr615 bool = false - yyq615[0] = x.Medium != "" - var yynn615 int - if yyr615 || yy2arr615 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Medium != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn615 = 0 - for _, b := range yyq615 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn615++ + yynn2++ } } - r.EncodeMapStart(yynn615) - yynn615 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr615 || yy2arr615 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq615[0] { + if yyq2[0] { x.Medium.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq615[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("medium")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Medium.CodecEncodeSelf(e) } } - if yyr615 || yy2arr615 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -9923,25 +11053,25 @@ func (x *EmptyDirVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym617 := z.DecBinary() - _ = yym617 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct618 := r.ContainerType() - if yyct618 == codecSelferValueTypeMap1234 { - yyl618 := r.ReadMapStart() - if yyl618 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl618, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct618 == codecSelferValueTypeArray1234 { - yyl618 := r.ReadArrayStart() - if yyl618 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl618, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -9953,12 +11083,12 @@ func (x *EmptyDirVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys619Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys619Slc - var yyhl619 bool = l >= 0 - for yyj619 := 0; ; yyj619++ { - if yyhl619 { - if yyj619 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -9967,20 +11097,21 @@ func (x *EmptyDirVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decode } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys619Slc = r.DecodeBytes(yys619Slc, true, true) - yys619 := string(yys619Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys619 { + switch yys3 { case "medium": if r.TryDecodeAsNil() { x.Medium = "" } else { - x.Medium = StorageMedium(r.DecodeString()) + yyv4 := &x.Medium + yyv4.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys619) - } // end switch yys619 - } // end for yyj619 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -9988,16 +11119,16 @@ func (x *EmptyDirVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Deco var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj621 int - var yyb621 bool - var yyhl621 bool = l >= 0 - yyj621++ - if yyhl621 { - yyb621 = yyj621 > l + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb621 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb621 { + if yyb5 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10005,20 +11136,21 @@ func (x *EmptyDirVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.Medium = "" } else { - x.Medium = StorageMedium(r.DecodeString()) + yyv6 := &x.Medium + yyv6.CodecDecodeSelf(d) } for { - yyj621++ - if yyhl621 { - yyb621 = yyj621 > l + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb621 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb621 { + if yyb5 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj621-1, "") + z.DecStructFieldNotFound(yyj5-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -10030,34 +11162,34 @@ func (x *GlusterfsVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym623 := z.EncBinary() - _ = yym623 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep624 := !z.EncBinary() - yy2arr624 := z.EncBasicHandle().StructToArray - var yyq624 [3]bool - _, _, _ = yysep624, yyq624, yy2arr624 - const yyr624 bool = false - yyq624[2] = x.ReadOnly != false - var yynn624 int - if yyr624 || yy2arr624 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn624 = 2 - for _, b := range yyq624 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn624++ + yynn2++ } } - r.EncodeMapStart(yynn624) - yynn624 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr624 || yy2arr624 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym626 := z.EncBinary() - _ = yym626 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName)) @@ -10066,17 +11198,17 @@ func (x *GlusterfsVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("endpoints")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym627 := z.EncBinary() - _ = yym627 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName)) } } - if yyr624 || yy2arr624 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym629 := z.EncBinary() - _ = yym629 + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) @@ -10085,18 +11217,18 @@ func (x *GlusterfsVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("path")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym630 := z.EncBinary() - _ = yym630 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) } } - if yyr624 || yy2arr624 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq624[2] { - yym632 := z.EncBinary() - _ = yym632 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeBool(bool(x.ReadOnly)) @@ -10105,19 +11237,19 @@ func (x *GlusterfsVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq624[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("readOnly")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym633 := z.EncBinary() - _ = yym633 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeBool(bool(x.ReadOnly)) } } } - if yyr624 || yy2arr624 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -10130,25 +11262,25 @@ func (x *GlusterfsVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym634 := z.DecBinary() - _ = yym634 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct635 := r.ContainerType() - if yyct635 == codecSelferValueTypeMap1234 { - yyl635 := r.ReadMapStart() - if yyl635 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl635, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct635 == codecSelferValueTypeArray1234 { - yyl635 := r.ReadArrayStart() - if yyl635 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl635, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -10160,12 +11292,12 @@ func (x *GlusterfsVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys636Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys636Slc - var yyhl636 bool = l >= 0 - for yyj636 := 0; ; yyj636++ { - if yyhl636 { - if yyj636 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -10174,32 +11306,50 @@ func (x *GlusterfsVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decod } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys636Slc = r.DecodeBytes(yys636Slc, true, true) - yys636 := string(yys636Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys636 { + switch yys3 { case "endpoints": if r.TryDecodeAsNil() { x.EndpointsName = "" } else { - x.EndpointsName = string(r.DecodeString()) + yyv4 := &x.EndpointsName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "path": if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv6 := &x.Path + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "readOnly": if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } } default: - z.DecStructFieldNotFound(-1, yys636) - } // end switch yys636 - } // end for yyj636 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -10207,16 +11357,16 @@ func (x *GlusterfsVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj640 int - var yyb640 bool - var yyhl640 bool = l >= 0 - yyj640++ - if yyhl640 { - yyb640 = yyj640 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb640 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb640 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10224,15 +11374,21 @@ func (x *GlusterfsVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.EndpointsName = "" } else { - x.EndpointsName = string(r.DecodeString()) + yyv11 := &x.EndpointsName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } - yyj640++ - if yyhl640 { - yyb640 = yyj640 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb640 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb640 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10240,15 +11396,21 @@ func (x *GlusterfsVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv13 := &x.Path + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj640++ - if yyhl640 { - yyb640 = yyj640 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb640 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb640 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10256,20 +11418,26 @@ func (x *GlusterfsVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } } for { - yyj640++ - if yyhl640 { - yyb640 = yyj640 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb640 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb640 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj640-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -10281,42 +11449,42 @@ func (x *RBDVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym644 := z.EncBinary() - _ = yym644 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep645 := !z.EncBinary() - yy2arr645 := z.EncBasicHandle().StructToArray - var yyq645 [8]bool - _, _, _ = yysep645, yyq645, yy2arr645 - const yyr645 bool = false - yyq645[2] = x.FSType != "" - yyq645[3] = x.RBDPool != "" - yyq645[4] = x.RadosUser != "" - yyq645[5] = x.Keyring != "" - yyq645[6] = x.SecretRef != nil - yyq645[7] = x.ReadOnly != false - var yynn645 int - if yyr645 || yy2arr645 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.FSType != "" + yyq2[3] = x.RBDPool != "" + yyq2[4] = x.RadosUser != "" + yyq2[5] = x.Keyring != "" + yyq2[6] = x.SecretRef != nil + yyq2[7] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(8) } else { - yynn645 = 2 - for _, b := range yyq645 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn645++ + yynn2++ } } - r.EncodeMapStart(yynn645) - yynn645 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr645 || yy2arr645 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.CephMonitors == nil { r.EncodeNil() } else { - yym647 := z.EncBinary() - _ = yym647 + yym4 := z.EncBinary() + _ = yym4 if false { } else { z.F.EncSliceStringV(x.CephMonitors, false, e) @@ -10329,18 +11497,18 @@ func (x *RBDVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x.CephMonitors == nil { r.EncodeNil() } else { - yym648 := z.EncBinary() - _ = yym648 + yym5 := z.EncBinary() + _ = yym5 if false { } else { z.F.EncSliceStringV(x.CephMonitors, false, e) } } } - if yyr645 || yy2arr645 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym650 := z.EncBinary() - _ = yym650 + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage)) @@ -10349,18 +11517,18 @@ func (x *RBDVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("image")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym651 := z.EncBinary() - _ = yym651 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage)) } } - if yyr645 || yy2arr645 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq645[2] { - yym653 := z.EncBinary() - _ = yym653 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) @@ -10369,23 +11537,23 @@ func (x *RBDVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq645[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fsType")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym654 := z.EncBinary() - _ = yym654 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) } } } - if yyr645 || yy2arr645 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq645[3] { - yym656 := z.EncBinary() - _ = yym656 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool)) @@ -10394,23 +11562,23 @@ func (x *RBDVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq645[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("pool")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym657 := z.EncBinary() - _ = yym657 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool)) } } } - if yyr645 || yy2arr645 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq645[4] { - yym659 := z.EncBinary() - _ = yym659 + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser)) @@ -10419,23 +11587,23 @@ func (x *RBDVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq645[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("user")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym660 := z.EncBinary() - _ = yym660 + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser)) } } } - if yyr645 || yy2arr645 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq645[5] { - yym662 := z.EncBinary() - _ = yym662 + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Keyring)) @@ -10444,21 +11612,21 @@ func (x *RBDVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq645[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("keyring")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym663 := z.EncBinary() - _ = yym663 + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Keyring)) } } } - if yyr645 || yy2arr645 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq645[6] { + if yyq2[6] { if x.SecretRef == nil { r.EncodeNil() } else { @@ -10468,7 +11636,7 @@ func (x *RBDVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq645[6] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("secretRef")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -10479,11 +11647,11 @@ func (x *RBDVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr645 || yy2arr645 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq645[7] { - yym666 := z.EncBinary() - _ = yym666 + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 if false { } else { r.EncodeBool(bool(x.ReadOnly)) @@ -10492,19 +11660,19 @@ func (x *RBDVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq645[7] { + if yyq2[7] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("readOnly")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym667 := z.EncBinary() - _ = yym667 + yym26 := z.EncBinary() + _ = yym26 if false { } else { r.EncodeBool(bool(x.ReadOnly)) } } } - if yyr645 || yy2arr645 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -10517,25 +11685,25 @@ func (x *RBDVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym668 := z.DecBinary() - _ = yym668 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct669 := r.ContainerType() - if yyct669 == codecSelferValueTypeMap1234 { - yyl669 := r.ReadMapStart() - if yyl669 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl669, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct669 == codecSelferValueTypeArray1234 { - yyl669 := r.ReadArrayStart() - if yyl669 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl669, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -10547,12 +11715,12 @@ func (x *RBDVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys670Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys670Slc - var yyhl670 bool = l >= 0 - for yyj670 := 0; ; yyj670++ { - if yyhl670 { - if yyj670 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -10561,51 +11729,81 @@ func (x *RBDVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys670Slc = r.DecodeBytes(yys670Slc, true, true) - yys670 := string(yys670Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys670 { + switch yys3 { case "monitors": if r.TryDecodeAsNil() { x.CephMonitors = nil } else { - yyv671 := &x.CephMonitors - yym672 := z.DecBinary() - _ = yym672 + yyv4 := &x.CephMonitors + yym5 := z.DecBinary() + _ = yym5 if false { } else { - z.F.DecSliceStringX(yyv671, false, d) + z.F.DecSliceStringX(yyv4, false, d) } } case "image": if r.TryDecodeAsNil() { x.RBDImage = "" } else { - x.RBDImage = string(r.DecodeString()) + yyv6 := &x.RBDImage + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "fsType": if r.TryDecodeAsNil() { x.FSType = "" } else { - x.FSType = string(r.DecodeString()) + yyv8 := &x.FSType + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } case "pool": if r.TryDecodeAsNil() { x.RBDPool = "" } else { - x.RBDPool = string(r.DecodeString()) + yyv10 := &x.RBDPool + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } case "user": if r.TryDecodeAsNil() { x.RadosUser = "" } else { - x.RadosUser = string(r.DecodeString()) + yyv12 := &x.RadosUser + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } case "keyring": if r.TryDecodeAsNil() { x.Keyring = "" } else { - x.Keyring = string(r.DecodeString()) + yyv14 := &x.Keyring + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } case "secretRef": if r.TryDecodeAsNil() { @@ -10622,12 +11820,18 @@ func (x *RBDVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv17 := &x.ReadOnly + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(yyv17)) = r.DecodeBool() + } } default: - z.DecStructFieldNotFound(-1, yys670) - } // end switch yys670 - } // end for yyj670 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -10635,16 +11839,16 @@ func (x *RBDVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj680 int - var yyb680 bool - var yyhl680 bool = l >= 0 - yyj680++ - if yyhl680 { - yyb680 = yyj680 > l + var yyj19 int + var yyb19 bool + var yyhl19 bool = l >= 0 + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb680 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb680 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10652,21 +11856,21 @@ func (x *RBDVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.CephMonitors = nil } else { - yyv681 := &x.CephMonitors - yym682 := z.DecBinary() - _ = yym682 + yyv20 := &x.CephMonitors + yym21 := z.DecBinary() + _ = yym21 if false { } else { - z.F.DecSliceStringX(yyv681, false, d) + z.F.DecSliceStringX(yyv20, false, d) } } - yyj680++ - if yyhl680 { - yyb680 = yyj680 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb680 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb680 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10674,15 +11878,21 @@ func (x *RBDVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.RBDImage = "" } else { - x.RBDImage = string(r.DecodeString()) + yyv22 := &x.RBDImage + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*string)(yyv22)) = r.DecodeString() + } } - yyj680++ - if yyhl680 { - yyb680 = yyj680 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb680 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb680 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10690,15 +11900,21 @@ func (x *RBDVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.FSType = "" } else { - x.FSType = string(r.DecodeString()) + yyv24 := &x.FSType + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*string)(yyv24)) = r.DecodeString() + } } - yyj680++ - if yyhl680 { - yyb680 = yyj680 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb680 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb680 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10706,15 +11922,21 @@ func (x *RBDVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.RBDPool = "" } else { - x.RBDPool = string(r.DecodeString()) + yyv26 := &x.RBDPool + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } } - yyj680++ - if yyhl680 { - yyb680 = yyj680 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb680 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb680 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10722,15 +11944,21 @@ func (x *RBDVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.RadosUser = "" } else { - x.RadosUser = string(r.DecodeString()) + yyv28 := &x.RadosUser + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*string)(yyv28)) = r.DecodeString() + } } - yyj680++ - if yyhl680 { - yyb680 = yyj680 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb680 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb680 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10738,15 +11966,21 @@ func (x *RBDVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Keyring = "" } else { - x.Keyring = string(r.DecodeString()) + yyv30 := &x.Keyring + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*string)(yyv30)) = r.DecodeString() + } } - yyj680++ - if yyhl680 { - yyb680 = yyj680 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb680 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb680 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10761,13 +11995,13 @@ func (x *RBDVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) } x.SecretRef.CodecDecodeSelf(d) } - yyj680++ - if yyhl680 { - yyb680 = yyj680 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb680 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb680 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10775,20 +12009,26 @@ func (x *RBDVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv33 := &x.ReadOnly + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*bool)(yyv33)) = r.DecodeBool() + } } for { - yyj680++ - if yyhl680 { - yyb680 = yyj680 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb680 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb680 { + if yyb19 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj680-1, "") + z.DecStructFieldNotFound(yyj19-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -10800,35 +12040,35 @@ func (x *CinderVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym690 := z.EncBinary() - _ = yym690 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep691 := !z.EncBinary() - yy2arr691 := z.EncBasicHandle().StructToArray - var yyq691 [3]bool - _, _, _ = yysep691, yyq691, yy2arr691 - const yyr691 bool = false - yyq691[1] = x.FSType != "" - yyq691[2] = x.ReadOnly != false - var yynn691 int - if yyr691 || yy2arr691 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn691 = 1 - for _, b := range yyq691 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn691++ + yynn2++ } } - r.EncodeMapStart(yynn691) - yynn691 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr691 || yy2arr691 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym693 := z.EncBinary() - _ = yym693 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) @@ -10837,18 +12077,18 @@ func (x *CinderVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("volumeID")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym694 := z.EncBinary() - _ = yym694 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) } } - if yyr691 || yy2arr691 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq691[1] { - yym696 := z.EncBinary() - _ = yym696 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) @@ -10857,23 +12097,23 @@ func (x *CinderVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq691[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fsType")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym697 := z.EncBinary() - _ = yym697 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) } } } - if yyr691 || yy2arr691 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq691[2] { - yym699 := z.EncBinary() - _ = yym699 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeBool(bool(x.ReadOnly)) @@ -10882,19 +12122,19 @@ func (x *CinderVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq691[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("readOnly")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym700 := z.EncBinary() - _ = yym700 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeBool(bool(x.ReadOnly)) } } } - if yyr691 || yy2arr691 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -10907,25 +12147,25 @@ func (x *CinderVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym701 := z.DecBinary() - _ = yym701 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct702 := r.ContainerType() - if yyct702 == codecSelferValueTypeMap1234 { - yyl702 := r.ReadMapStart() - if yyl702 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl702, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct702 == codecSelferValueTypeArray1234 { - yyl702 := r.ReadArrayStart() - if yyl702 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl702, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -10937,12 +12177,12 @@ func (x *CinderVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys703Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys703Slc - var yyhl703 bool = l >= 0 - for yyj703 := 0; ; yyj703++ { - if yyhl703 { - if yyj703 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -10951,32 +12191,50 @@ func (x *CinderVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys703Slc = r.DecodeBytes(yys703Slc, true, true) - yys703 := string(yys703Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys703 { + switch yys3 { case "volumeID": if r.TryDecodeAsNil() { x.VolumeID = "" } else { - x.VolumeID = string(r.DecodeString()) + yyv4 := &x.VolumeID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "fsType": if r.TryDecodeAsNil() { x.FSType = "" } else { - x.FSType = string(r.DecodeString()) + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "readOnly": if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } } default: - z.DecStructFieldNotFound(-1, yys703) - } // end switch yys703 - } // end for yyj703 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -10984,16 +12242,16 @@ func (x *CinderVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj707 int - var yyb707 bool - var yyhl707 bool = l >= 0 - yyj707++ - if yyhl707 { - yyb707 = yyj707 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb707 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb707 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -11001,15 +12259,21 @@ func (x *CinderVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.VolumeID = "" } else { - x.VolumeID = string(r.DecodeString()) + yyv11 := &x.VolumeID + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } - yyj707++ - if yyhl707 { - yyb707 = yyj707 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb707 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb707 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -11017,15 +12281,21 @@ func (x *CinderVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.FSType = "" } else { - x.FSType = string(r.DecodeString()) + yyv13 := &x.FSType + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj707++ - if yyhl707 { - yyb707 = yyj707 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb707 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb707 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -11033,20 +12303,26 @@ func (x *CinderVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } } for { - yyj707++ - if yyhl707 { - yyb707 = yyj707 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb707 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb707 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj707-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -11058,41 +12334,41 @@ func (x *CephFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym711 := z.EncBinary() - _ = yym711 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep712 := !z.EncBinary() - yy2arr712 := z.EncBasicHandle().StructToArray - var yyq712 [6]bool - _, _, _ = yysep712, yyq712, yy2arr712 - const yyr712 bool = false - yyq712[1] = x.Path != "" - yyq712[2] = x.User != "" - yyq712[3] = x.SecretFile != "" - yyq712[4] = x.SecretRef != nil - yyq712[5] = x.ReadOnly != false - var yynn712 int - if yyr712 || yy2arr712 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Path != "" + yyq2[2] = x.User != "" + yyq2[3] = x.SecretFile != "" + yyq2[4] = x.SecretRef != nil + yyq2[5] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(6) } else { - yynn712 = 1 - for _, b := range yyq712 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn712++ + yynn2++ } } - r.EncodeMapStart(yynn712) - yynn712 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr712 || yy2arr712 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Monitors == nil { r.EncodeNil() } else { - yym714 := z.EncBinary() - _ = yym714 + yym4 := z.EncBinary() + _ = yym4 if false { } else { z.F.EncSliceStringV(x.Monitors, false, e) @@ -11105,19 +12381,19 @@ func (x *CephFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x.Monitors == nil { r.EncodeNil() } else { - yym715 := z.EncBinary() - _ = yym715 + yym5 := z.EncBinary() + _ = yym5 if false { } else { z.F.EncSliceStringV(x.Monitors, false, e) } } } - if yyr712 || yy2arr712 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq712[1] { - yym717 := z.EncBinary() - _ = yym717 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) @@ -11126,23 +12402,23 @@ func (x *CephFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq712[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("path")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym718 := z.EncBinary() - _ = yym718 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) } } } - if yyr712 || yy2arr712 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq712[2] { - yym720 := z.EncBinary() - _ = yym720 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.User)) @@ -11151,23 +12427,23 @@ func (x *CephFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq712[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("user")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym721 := z.EncBinary() - _ = yym721 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.User)) } } } - if yyr712 || yy2arr712 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq712[3] { - yym723 := z.EncBinary() - _ = yym723 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile)) @@ -11176,21 +12452,21 @@ func (x *CephFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq712[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("secretFile")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym724 := z.EncBinary() - _ = yym724 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile)) } } } - if yyr712 || yy2arr712 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq712[4] { + if yyq2[4] { if x.SecretRef == nil { r.EncodeNil() } else { @@ -11200,7 +12476,7 @@ func (x *CephFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq712[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("secretRef")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -11211,11 +12487,11 @@ func (x *CephFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr712 || yy2arr712 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq712[5] { - yym727 := z.EncBinary() - _ = yym727 + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 if false { } else { r.EncodeBool(bool(x.ReadOnly)) @@ -11224,19 +12500,19 @@ func (x *CephFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq712[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("readOnly")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym728 := z.EncBinary() - _ = yym728 + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeBool(bool(x.ReadOnly)) } } } - if yyr712 || yy2arr712 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -11249,25 +12525,25 @@ func (x *CephFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym729 := z.DecBinary() - _ = yym729 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct730 := r.ContainerType() - if yyct730 == codecSelferValueTypeMap1234 { - yyl730 := r.ReadMapStart() - if yyl730 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl730, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct730 == codecSelferValueTypeArray1234 { - yyl730 := r.ReadArrayStart() - if yyl730 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl730, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -11279,12 +12555,12 @@ func (x *CephFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys731Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys731Slc - var yyhl731 bool = l >= 0 - for yyj731 := 0; ; yyj731++ { - if yyhl731 { - if yyj731 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -11293,39 +12569,57 @@ func (x *CephFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys731Slc = r.DecodeBytes(yys731Slc, true, true) - yys731 := string(yys731Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys731 { + switch yys3 { case "monitors": if r.TryDecodeAsNil() { x.Monitors = nil } else { - yyv732 := &x.Monitors - yym733 := z.DecBinary() - _ = yym733 + yyv4 := &x.Monitors + yym5 := z.DecBinary() + _ = yym5 if false { } else { - z.F.DecSliceStringX(yyv732, false, d) + z.F.DecSliceStringX(yyv4, false, d) } } case "path": if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv6 := &x.Path + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "user": if r.TryDecodeAsNil() { x.User = "" } else { - x.User = string(r.DecodeString()) + yyv8 := &x.User + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } case "secretFile": if r.TryDecodeAsNil() { x.SecretFile = "" } else { - x.SecretFile = string(r.DecodeString()) + yyv10 := &x.SecretFile + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } case "secretRef": if r.TryDecodeAsNil() { @@ -11342,12 +12636,18 @@ func (x *CephFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv13 := &x.ReadOnly + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*bool)(yyv13)) = r.DecodeBool() + } } default: - z.DecStructFieldNotFound(-1, yys731) - } // end switch yys731 - } // end for yyj731 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -11355,16 +12655,16 @@ func (x *CephFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj739 int - var yyb739 bool - var yyhl739 bool = l >= 0 - yyj739++ - if yyhl739 { - yyb739 = yyj739 > l + var yyj15 int + var yyb15 bool + var yyhl15 bool = l >= 0 + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb739 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb739 { + if yyb15 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -11372,21 +12672,21 @@ func (x *CephFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Monitors = nil } else { - yyv740 := &x.Monitors - yym741 := z.DecBinary() - _ = yym741 + yyv16 := &x.Monitors + yym17 := z.DecBinary() + _ = yym17 if false { } else { - z.F.DecSliceStringX(yyv740, false, d) + z.F.DecSliceStringX(yyv16, false, d) } } - yyj739++ - if yyhl739 { - yyb739 = yyj739 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb739 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb739 { + if yyb15 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -11394,15 +12694,21 @@ func (x *CephFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv18 := &x.Path + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } } - yyj739++ - if yyhl739 { - yyb739 = yyj739 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb739 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb739 { + if yyb15 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -11410,15 +12716,21 @@ func (x *CephFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.User = "" } else { - x.User = string(r.DecodeString()) + yyv20 := &x.User + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } } - yyj739++ - if yyhl739 { - yyb739 = yyj739 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb739 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb739 { + if yyb15 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -11426,15 +12738,21 @@ func (x *CephFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.SecretFile = "" } else { - x.SecretFile = string(r.DecodeString()) + yyv22 := &x.SecretFile + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*string)(yyv22)) = r.DecodeString() + } } - yyj739++ - if yyhl739 { - yyb739 = yyj739 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb739 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb739 { + if yyb15 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -11449,13 +12767,13 @@ func (x *CephFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decode } x.SecretRef.CodecDecodeSelf(d) } - yyj739++ - if yyhl739 { - yyb739 = yyj739 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb739 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb739 { + if yyb15 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -11463,20 +12781,26 @@ func (x *CephFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv25 := &x.ReadOnly + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } } for { - yyj739++ - if yyhl739 { - yyb739 = yyj739 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb739 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb739 { + if yyb15 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj739-1, "") + z.DecStructFieldNotFound(yyj15-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -11488,36 +12812,36 @@ func (x *FlockerVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym747 := z.EncBinary() - _ = yym747 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep748 := !z.EncBinary() - yy2arr748 := z.EncBasicHandle().StructToArray - var yyq748 [2]bool - _, _, _ = yysep748, yyq748, yy2arr748 - const yyr748 bool = false - yyq748[0] = x.DatasetName != "" - yyq748[1] = x.DatasetUUID != "" - var yynn748 int - if yyr748 || yy2arr748 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.DatasetName != "" + yyq2[1] = x.DatasetUUID != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn748 = 0 - for _, b := range yyq748 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn748++ + yynn2++ } } - r.EncodeMapStart(yynn748) - yynn748 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr748 || yy2arr748 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq748[0] { - yym750 := z.EncBinary() - _ = yym750 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName)) @@ -11526,23 +12850,23 @@ func (x *FlockerVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq748[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("datasetName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym751 := z.EncBinary() - _ = yym751 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName)) } } } - if yyr748 || yy2arr748 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq748[1] { - yym753 := z.EncBinary() - _ = yym753 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.DatasetUUID)) @@ -11551,19 +12875,19 @@ func (x *FlockerVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq748[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("datasetUUID")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym754 := z.EncBinary() - _ = yym754 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.DatasetUUID)) } } } - if yyr748 || yy2arr748 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -11576,25 +12900,25 @@ func (x *FlockerVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym755 := z.DecBinary() - _ = yym755 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct756 := r.ContainerType() - if yyct756 == codecSelferValueTypeMap1234 { - yyl756 := r.ReadMapStart() - if yyl756 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl756, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct756 == codecSelferValueTypeArray1234 { - yyl756 := r.ReadArrayStart() - if yyl756 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl756, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -11606,12 +12930,12 @@ func (x *FlockerVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys757Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys757Slc - var yyhl757 bool = l >= 0 - for yyj757 := 0; ; yyj757++ { - if yyhl757 { - if yyj757 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -11620,26 +12944,38 @@ func (x *FlockerVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys757Slc = r.DecodeBytes(yys757Slc, true, true) - yys757 := string(yys757Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys757 { + switch yys3 { case "datasetName": if r.TryDecodeAsNil() { x.DatasetName = "" } else { - x.DatasetName = string(r.DecodeString()) + yyv4 := &x.DatasetName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "datasetUUID": if r.TryDecodeAsNil() { x.DatasetUUID = "" } else { - x.DatasetUUID = string(r.DecodeString()) + yyv6 := &x.DatasetUUID + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys757) - } // end switch yys757 - } // end for yyj757 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -11647,16 +12983,16 @@ func (x *FlockerVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj760 int - var yyb760 bool - var yyhl760 bool = l >= 0 - yyj760++ - if yyhl760 { - yyb760 = yyj760 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb760 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb760 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -11664,15 +13000,21 @@ func (x *FlockerVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.DatasetName = "" } else { - x.DatasetName = string(r.DecodeString()) + yyv9 := &x.DatasetName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } - yyj760++ - if yyhl760 { - yyb760 = yyj760 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb760 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb760 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -11680,20 +13022,26 @@ func (x *FlockerVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.DatasetUUID = "" } else { - x.DatasetUUID = string(r.DecodeString()) + yyv11 := &x.DatasetUUID + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } for { - yyj760++ - if yyhl760 { - yyb760 = yyj760 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb760 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb760 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj760-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -11702,8 +13050,8 @@ func (x StorageMedium) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym763 := z.EncBinary() - _ = yym763 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -11715,8 +13063,8 @@ func (x *StorageMedium) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym764 := z.DecBinary() - _ = yym764 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -11728,8 +13076,8 @@ func (x Protocol) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym765 := z.EncBinary() - _ = yym765 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -11741,8 +13089,8 @@ func (x *Protocol) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym766 := z.DecBinary() - _ = yym766 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -11757,36 +13105,36 @@ func (x *GCEPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym767 := z.EncBinary() - _ = yym767 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep768 := !z.EncBinary() - yy2arr768 := z.EncBasicHandle().StructToArray - var yyq768 [4]bool - _, _, _ = yysep768, yyq768, yy2arr768 - const yyr768 bool = false - yyq768[1] = x.FSType != "" - yyq768[2] = x.Partition != 0 - yyq768[3] = x.ReadOnly != false - var yynn768 int - if yyr768 || yy2arr768 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.Partition != 0 + yyq2[3] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn768 = 1 - for _, b := range yyq768 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn768++ + yynn2++ } } - r.EncodeMapStart(yynn768) - yynn768 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr768 || yy2arr768 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym770 := z.EncBinary() - _ = yym770 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PDName)) @@ -11795,18 +13143,18 @@ func (x *GCEPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("pdName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym771 := z.EncBinary() - _ = yym771 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PDName)) } } - if yyr768 || yy2arr768 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq768[1] { - yym773 := z.EncBinary() - _ = yym773 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) @@ -11815,23 +13163,23 @@ func (x *GCEPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq768[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fsType")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym774 := z.EncBinary() - _ = yym774 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) } } } - if yyr768 || yy2arr768 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq768[2] { - yym776 := z.EncBinary() - _ = yym776 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeInt(int64(x.Partition)) @@ -11840,23 +13188,23 @@ func (x *GCEPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq768[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("partition")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym777 := z.EncBinary() - _ = yym777 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeInt(int64(x.Partition)) } } } - if yyr768 || yy2arr768 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq768[3] { - yym779 := z.EncBinary() - _ = yym779 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeBool(bool(x.ReadOnly)) @@ -11865,19 +13213,19 @@ func (x *GCEPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq768[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("readOnly")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym780 := z.EncBinary() - _ = yym780 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeBool(bool(x.ReadOnly)) } } } - if yyr768 || yy2arr768 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -11890,25 +13238,25 @@ func (x *GCEPersistentDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym781 := z.DecBinary() - _ = yym781 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct782 := r.ContainerType() - if yyct782 == codecSelferValueTypeMap1234 { - yyl782 := r.ReadMapStart() - if yyl782 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl782, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct782 == codecSelferValueTypeArray1234 { - yyl782 := r.ReadArrayStart() - if yyl782 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl782, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -11920,12 +13268,12 @@ func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec19 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys783Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys783Slc - var yyhl783 bool = l >= 0 - for yyj783 := 0; ; yyj783++ { - if yyhl783 { - if yyj783 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -11934,38 +13282,62 @@ func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec19 } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys783Slc = r.DecodeBytes(yys783Slc, true, true) - yys783 := string(yys783Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys783 { + switch yys3 { case "pdName": if r.TryDecodeAsNil() { x.PDName = "" } else { - x.PDName = string(r.DecodeString()) + yyv4 := &x.PDName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "fsType": if r.TryDecodeAsNil() { x.FSType = "" } else { - x.FSType = string(r.DecodeString()) + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "partition": if r.TryDecodeAsNil() { x.Partition = 0 } else { - x.Partition = int32(r.DecodeInt(32)) + yyv8 := &x.Partition + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } } case "readOnly": if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv10 := &x.ReadOnly + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } } default: - z.DecStructFieldNotFound(-1, yys783) - } // end switch yys783 - } // end for yyj783 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -11973,16 +13345,16 @@ func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj788 int - var yyb788 bool - var yyhl788 bool = l >= 0 - yyj788++ - if yyhl788 { - yyb788 = yyj788 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb788 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb788 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -11990,15 +13362,21 @@ func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec if r.TryDecodeAsNil() { x.PDName = "" } else { - x.PDName = string(r.DecodeString()) + yyv13 := &x.PDName + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj788++ - if yyhl788 { - yyb788 = yyj788 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb788 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb788 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12006,15 +13384,21 @@ func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec if r.TryDecodeAsNil() { x.FSType = "" } else { - x.FSType = string(r.DecodeString()) + yyv15 := &x.FSType + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj788++ - if yyhl788 { - yyb788 = yyj788 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb788 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb788 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12022,15 +13406,21 @@ func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec if r.TryDecodeAsNil() { x.Partition = 0 } else { - x.Partition = int32(r.DecodeInt(32)) + yyv17 := &x.Partition + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } } - yyj788++ - if yyhl788 { - yyb788 = yyj788 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb788 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb788 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12038,20 +13428,26 @@ func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } } for { - yyj788++ - if yyhl788 { - yyb788 = yyj788 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb788 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb788 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj788-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -12063,36 +13459,36 @@ func (x *QuobyteVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym793 := z.EncBinary() - _ = yym793 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep794 := !z.EncBinary() - yy2arr794 := z.EncBasicHandle().StructToArray - var yyq794 [5]bool - _, _, _ = yysep794, yyq794, yy2arr794 - const yyr794 bool = false - yyq794[2] = x.ReadOnly != false - yyq794[3] = x.User != "" - yyq794[4] = x.Group != "" - var yynn794 int - if yyr794 || yy2arr794 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.ReadOnly != false + yyq2[3] = x.User != "" + yyq2[4] = x.Group != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn794 = 2 - for _, b := range yyq794 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn794++ + yynn2++ } } - r.EncodeMapStart(yynn794) - yynn794 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr794 || yy2arr794 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym796 := z.EncBinary() - _ = yym796 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Registry)) @@ -12101,17 +13497,17 @@ func (x *QuobyteVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("registry")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym797 := z.EncBinary() - _ = yym797 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Registry)) } } - if yyr794 || yy2arr794 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym799 := z.EncBinary() - _ = yym799 + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Volume)) @@ -12120,18 +13516,18 @@ func (x *QuobyteVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("volume")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym800 := z.EncBinary() - _ = yym800 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Volume)) } } - if yyr794 || yy2arr794 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq794[2] { - yym802 := z.EncBinary() - _ = yym802 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeBool(bool(x.ReadOnly)) @@ -12140,23 +13536,23 @@ func (x *QuobyteVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq794[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("readOnly")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym803 := z.EncBinary() - _ = yym803 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeBool(bool(x.ReadOnly)) } } } - if yyr794 || yy2arr794 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq794[3] { - yym805 := z.EncBinary() - _ = yym805 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.User)) @@ -12165,23 +13561,23 @@ func (x *QuobyteVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq794[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("user")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym806 := z.EncBinary() - _ = yym806 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.User)) } } } - if yyr794 || yy2arr794 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq794[4] { - yym808 := z.EncBinary() - _ = yym808 + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Group)) @@ -12190,19 +13586,19 @@ func (x *QuobyteVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq794[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("group")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym809 := z.EncBinary() - _ = yym809 + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Group)) } } } - if yyr794 || yy2arr794 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -12215,25 +13611,25 @@ func (x *QuobyteVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym810 := z.DecBinary() - _ = yym810 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct811 := r.ContainerType() - if yyct811 == codecSelferValueTypeMap1234 { - yyl811 := r.ReadMapStart() - if yyl811 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl811, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct811 == codecSelferValueTypeArray1234 { - yyl811 := r.ReadArrayStart() - if yyl811 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl811, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -12245,12 +13641,12 @@ func (x *QuobyteVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys812Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys812Slc - var yyhl812 bool = l >= 0 - for yyj812 := 0; ; yyj812++ { - if yyhl812 { - if yyj812 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -12259,44 +13655,74 @@ func (x *QuobyteVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys812Slc = r.DecodeBytes(yys812Slc, true, true) - yys812 := string(yys812Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys812 { + switch yys3 { case "registry": if r.TryDecodeAsNil() { x.Registry = "" } else { - x.Registry = string(r.DecodeString()) + yyv4 := &x.Registry + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "volume": if r.TryDecodeAsNil() { x.Volume = "" } else { - x.Volume = string(r.DecodeString()) + yyv6 := &x.Volume + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "readOnly": if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } } case "user": if r.TryDecodeAsNil() { x.User = "" } else { - x.User = string(r.DecodeString()) + yyv10 := &x.User + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } case "group": if r.TryDecodeAsNil() { x.Group = "" } else { - x.Group = string(r.DecodeString()) + yyv12 := &x.Group + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys812) - } // end switch yys812 - } // end for yyj812 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -12304,16 +13730,16 @@ func (x *QuobyteVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj818 int - var yyb818 bool - var yyhl818 bool = l >= 0 - yyj818++ - if yyhl818 { - yyb818 = yyj818 > l + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb818 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb818 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12321,15 +13747,21 @@ func (x *QuobyteVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Registry = "" } else { - x.Registry = string(r.DecodeString()) + yyv15 := &x.Registry + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj818++ - if yyhl818 { - yyb818 = yyj818 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb818 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb818 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12337,15 +13769,21 @@ func (x *QuobyteVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Volume = "" } else { - x.Volume = string(r.DecodeString()) + yyv17 := &x.Volume + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } } - yyj818++ - if yyhl818 { - yyb818 = yyj818 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb818 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb818 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12353,15 +13791,21 @@ func (x *QuobyteVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } } - yyj818++ - if yyhl818 { - yyb818 = yyj818 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb818 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb818 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12369,15 +13813,21 @@ func (x *QuobyteVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.User = "" } else { - x.User = string(r.DecodeString()) + yyv21 := &x.User + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } } - yyj818++ - if yyhl818 { - yyb818 = yyj818 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb818 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb818 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12385,20 +13835,26 @@ func (x *QuobyteVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Group = "" } else { - x.Group = string(r.DecodeString()) + yyv23 := &x.Group + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } } for { - yyj818++ - if yyhl818 { - yyb818 = yyj818 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb818 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb818 { + if yyb14 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj818-1, "") + z.DecStructFieldNotFound(yyj14-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -12410,37 +13866,37 @@ func (x *FlexVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym824 := z.EncBinary() - _ = yym824 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep825 := !z.EncBinary() - yy2arr825 := z.EncBasicHandle().StructToArray - var yyq825 [5]bool - _, _, _ = yysep825, yyq825, yy2arr825 - const yyr825 bool = false - yyq825[1] = x.FSType != "" - yyq825[2] = x.SecretRef != nil - yyq825[3] = x.ReadOnly != false - yyq825[4] = len(x.Options) != 0 - var yynn825 int - if yyr825 || yy2arr825 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.SecretRef != nil + yyq2[3] = x.ReadOnly != false + yyq2[4] = len(x.Options) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn825 = 1 - for _, b := range yyq825 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn825++ + yynn2++ } } - r.EncodeMapStart(yynn825) - yynn825 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr825 || yy2arr825 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym827 := z.EncBinary() - _ = yym827 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Driver)) @@ -12449,18 +13905,18 @@ func (x *FlexVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("driver")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym828 := z.EncBinary() - _ = yym828 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Driver)) } } - if yyr825 || yy2arr825 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq825[1] { - yym830 := z.EncBinary() - _ = yym830 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) @@ -12469,21 +13925,21 @@ func (x *FlexVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq825[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fsType")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym831 := z.EncBinary() - _ = yym831 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) } } } - if yyr825 || yy2arr825 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq825[2] { + if yyq2[2] { if x.SecretRef == nil { r.EncodeNil() } else { @@ -12493,7 +13949,7 @@ func (x *FlexVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq825[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("secretRef")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -12504,11 +13960,11 @@ func (x *FlexVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr825 || yy2arr825 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq825[3] { - yym834 := z.EncBinary() - _ = yym834 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeBool(bool(x.ReadOnly)) @@ -12517,26 +13973,26 @@ func (x *FlexVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq825[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("readOnly")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym835 := z.EncBinary() - _ = yym835 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeBool(bool(x.ReadOnly)) } } } - if yyr825 || yy2arr825 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq825[4] { + if yyq2[4] { if x.Options == nil { r.EncodeNil() } else { - yym837 := z.EncBinary() - _ = yym837 + yym16 := z.EncBinary() + _ = yym16 if false { } else { z.F.EncMapStringStringV(x.Options, false, e) @@ -12546,15 +14002,15 @@ func (x *FlexVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq825[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("options")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Options == nil { r.EncodeNil() } else { - yym838 := z.EncBinary() - _ = yym838 + yym17 := z.EncBinary() + _ = yym17 if false { } else { z.F.EncMapStringStringV(x.Options, false, e) @@ -12562,7 +14018,7 @@ func (x *FlexVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr825 || yy2arr825 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -12575,25 +14031,25 @@ func (x *FlexVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym839 := z.DecBinary() - _ = yym839 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct840 := r.ContainerType() - if yyct840 == codecSelferValueTypeMap1234 { - yyl840 := r.ReadMapStart() - if yyl840 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl840, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct840 == codecSelferValueTypeArray1234 { - yyl840 := r.ReadArrayStart() - if yyl840 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl840, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -12605,12 +14061,12 @@ func (x *FlexVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys841Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys841Slc - var yyhl841 bool = l >= 0 - for yyj841 := 0; ; yyj841++ { - if yyhl841 { - if yyj841 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -12619,21 +14075,33 @@ func (x *FlexVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys841Slc = r.DecodeBytes(yys841Slc, true, true) - yys841 := string(yys841Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys841 { + switch yys3 { case "driver": if r.TryDecodeAsNil() { x.Driver = "" } else { - x.Driver = string(r.DecodeString()) + yyv4 := &x.Driver + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "fsType": if r.TryDecodeAsNil() { x.FSType = "" } else { - x.FSType = string(r.DecodeString()) + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "secretRef": if r.TryDecodeAsNil() { @@ -12650,24 +14118,30 @@ func (x *FlexVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv9 := &x.ReadOnly + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*bool)(yyv9)) = r.DecodeBool() + } } case "options": if r.TryDecodeAsNil() { x.Options = nil } else { - yyv846 := &x.Options - yym847 := z.DecBinary() - _ = yym847 + yyv11 := &x.Options + yym12 := z.DecBinary() + _ = yym12 if false { } else { - z.F.DecMapStringStringX(yyv846, false, d) + z.F.DecMapStringStringX(yyv11, false, d) } } default: - z.DecStructFieldNotFound(-1, yys841) - } // end switch yys841 - } // end for yyj841 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -12675,16 +14149,16 @@ func (x *FlexVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj848 int - var yyb848 bool - var yyhl848 bool = l >= 0 - yyj848++ - if yyhl848 { - yyb848 = yyj848 > l + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb848 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb848 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12692,15 +14166,21 @@ func (x *FlexVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Driver = "" } else { - x.Driver = string(r.DecodeString()) + yyv14 := &x.Driver + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } - yyj848++ - if yyhl848 { - yyb848 = yyj848 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb848 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb848 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12708,15 +14188,21 @@ func (x *FlexVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.FSType = "" } else { - x.FSType = string(r.DecodeString()) + yyv16 := &x.FSType + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } } - yyj848++ - if yyhl848 { - yyb848 = yyj848 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb848 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb848 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12731,13 +14217,13 @@ func (x *FlexVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) } x.SecretRef.CodecDecodeSelf(d) } - yyj848++ - if yyhl848 { - yyb848 = yyj848 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb848 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb848 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12745,15 +14231,21 @@ func (x *FlexVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } } - yyj848++ - if yyhl848 { - yyb848 = yyj848 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb848 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb848 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12761,26 +14253,26 @@ func (x *FlexVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Options = nil } else { - yyv853 := &x.Options - yym854 := z.DecBinary() - _ = yym854 + yyv21 := &x.Options + yym22 := z.DecBinary() + _ = yym22 if false { } else { - z.F.DecMapStringStringX(yyv853, false, d) + z.F.DecMapStringStringX(yyv21, false, d) } } for { - yyj848++ - if yyhl848 { - yyb848 = yyj848 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb848 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb848 { + if yyb13 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj848-1, "") + z.DecStructFieldNotFound(yyj13-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -12792,36 +14284,36 @@ func (x *AWSElasticBlockStoreVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) if x == nil { r.EncodeNil() } else { - yym855 := z.EncBinary() - _ = yym855 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep856 := !z.EncBinary() - yy2arr856 := z.EncBasicHandle().StructToArray - var yyq856 [4]bool - _, _, _ = yysep856, yyq856, yy2arr856 - const yyr856 bool = false - yyq856[1] = x.FSType != "" - yyq856[2] = x.Partition != 0 - yyq856[3] = x.ReadOnly != false - var yynn856 int - if yyr856 || yy2arr856 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.Partition != 0 + yyq2[3] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn856 = 1 - for _, b := range yyq856 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn856++ + yynn2++ } } - r.EncodeMapStart(yynn856) - yynn856 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr856 || yy2arr856 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym858 := z.EncBinary() - _ = yym858 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) @@ -12830,18 +14322,18 @@ func (x *AWSElasticBlockStoreVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("volumeID")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym859 := z.EncBinary() - _ = yym859 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) } } - if yyr856 || yy2arr856 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq856[1] { - yym861 := z.EncBinary() - _ = yym861 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) @@ -12850,23 +14342,23 @@ func (x *AWSElasticBlockStoreVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq856[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fsType")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym862 := z.EncBinary() - _ = yym862 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) } } } - if yyr856 || yy2arr856 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq856[2] { - yym864 := z.EncBinary() - _ = yym864 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeInt(int64(x.Partition)) @@ -12875,23 +14367,23 @@ func (x *AWSElasticBlockStoreVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) r.EncodeInt(0) } } else { - if yyq856[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("partition")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym865 := z.EncBinary() - _ = yym865 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeInt(int64(x.Partition)) } } } - if yyr856 || yy2arr856 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq856[3] { - yym867 := z.EncBinary() - _ = yym867 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeBool(bool(x.ReadOnly)) @@ -12900,19 +14392,19 @@ func (x *AWSElasticBlockStoreVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) r.EncodeBool(false) } } else { - if yyq856[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("readOnly")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym868 := z.EncBinary() - _ = yym868 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeBool(bool(x.ReadOnly)) } } } - if yyr856 || yy2arr856 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -12925,25 +14417,25 @@ func (x *AWSElasticBlockStoreVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym869 := z.DecBinary() - _ = yym869 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct870 := r.ContainerType() - if yyct870 == codecSelferValueTypeMap1234 { - yyl870 := r.ReadMapStart() - if yyl870 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl870, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct870 == codecSelferValueTypeArray1234 { - yyl870 := r.ReadArrayStart() - if yyl870 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl870, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -12955,12 +14447,12 @@ func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromMap(l int, d *code var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys871Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys871Slc - var yyhl871 bool = l >= 0 - for yyj871 := 0; ; yyj871++ { - if yyhl871 { - if yyj871 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -12969,38 +14461,62 @@ func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromMap(l int, d *code } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys871Slc = r.DecodeBytes(yys871Slc, true, true) - yys871 := string(yys871Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys871 { + switch yys3 { case "volumeID": if r.TryDecodeAsNil() { x.VolumeID = "" } else { - x.VolumeID = string(r.DecodeString()) + yyv4 := &x.VolumeID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "fsType": if r.TryDecodeAsNil() { x.FSType = "" } else { - x.FSType = string(r.DecodeString()) + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "partition": if r.TryDecodeAsNil() { x.Partition = 0 } else { - x.Partition = int32(r.DecodeInt(32)) + yyv8 := &x.Partition + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } } case "readOnly": if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv10 := &x.ReadOnly + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } } default: - z.DecStructFieldNotFound(-1, yys871) - } // end switch yys871 - } // end for yyj871 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -13008,16 +14524,16 @@ func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromArray(l int, d *co var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj876 int - var yyb876 bool - var yyhl876 bool = l >= 0 - yyj876++ - if yyhl876 { - yyb876 = yyj876 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb876 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb876 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13025,15 +14541,21 @@ func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromArray(l int, d *co if r.TryDecodeAsNil() { x.VolumeID = "" } else { - x.VolumeID = string(r.DecodeString()) + yyv13 := &x.VolumeID + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj876++ - if yyhl876 { - yyb876 = yyj876 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb876 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb876 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13041,15 +14563,21 @@ func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromArray(l int, d *co if r.TryDecodeAsNil() { x.FSType = "" } else { - x.FSType = string(r.DecodeString()) + yyv15 := &x.FSType + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj876++ - if yyhl876 { - yyb876 = yyj876 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb876 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb876 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13057,15 +14585,21 @@ func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromArray(l int, d *co if r.TryDecodeAsNil() { x.Partition = 0 } else { - x.Partition = int32(r.DecodeInt(32)) + yyv17 := &x.Partition + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } } - yyj876++ - if yyhl876 { - yyb876 = yyj876 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb876 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb876 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13073,20 +14607,26 @@ func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromArray(l int, d *co if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } } for { - yyj876++ - if yyhl876 { - yyb876 = yyj876 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb876 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb876 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj876-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -13098,35 +14638,35 @@ func (x *GitRepoVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym881 := z.EncBinary() - _ = yym881 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep882 := !z.EncBinary() - yy2arr882 := z.EncBasicHandle().StructToArray - var yyq882 [3]bool - _, _, _ = yysep882, yyq882, yy2arr882 - const yyr882 bool = false - yyq882[1] = x.Revision != "" - yyq882[2] = x.Directory != "" - var yynn882 int - if yyr882 || yy2arr882 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Revision != "" + yyq2[2] = x.Directory != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn882 = 1 - for _, b := range yyq882 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn882++ + yynn2++ } } - r.EncodeMapStart(yynn882) - yynn882 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr882 || yy2arr882 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym884 := z.EncBinary() - _ = yym884 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Repository)) @@ -13135,18 +14675,18 @@ func (x *GitRepoVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("repository")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym885 := z.EncBinary() - _ = yym885 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Repository)) } } - if yyr882 || yy2arr882 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq882[1] { - yym887 := z.EncBinary() - _ = yym887 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Revision)) @@ -13155,23 +14695,23 @@ func (x *GitRepoVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq882[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("revision")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym888 := z.EncBinary() - _ = yym888 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Revision)) } } } - if yyr882 || yy2arr882 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq882[2] { - yym890 := z.EncBinary() - _ = yym890 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Directory)) @@ -13180,19 +14720,19 @@ func (x *GitRepoVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq882[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("directory")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym891 := z.EncBinary() - _ = yym891 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Directory)) } } } - if yyr882 || yy2arr882 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -13205,25 +14745,25 @@ func (x *GitRepoVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym892 := z.DecBinary() - _ = yym892 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct893 := r.ContainerType() - if yyct893 == codecSelferValueTypeMap1234 { - yyl893 := r.ReadMapStart() - if yyl893 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl893, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct893 == codecSelferValueTypeArray1234 { - yyl893 := r.ReadArrayStart() - if yyl893 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl893, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -13235,12 +14775,12 @@ func (x *GitRepoVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys894Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys894Slc - var yyhl894 bool = l >= 0 - for yyj894 := 0; ; yyj894++ { - if yyhl894 { - if yyj894 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -13249,32 +14789,50 @@ func (x *GitRepoVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys894Slc = r.DecodeBytes(yys894Slc, true, true) - yys894 := string(yys894Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys894 { + switch yys3 { case "repository": if r.TryDecodeAsNil() { x.Repository = "" } else { - x.Repository = string(r.DecodeString()) + yyv4 := &x.Repository + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "revision": if r.TryDecodeAsNil() { x.Revision = "" } else { - x.Revision = string(r.DecodeString()) + yyv6 := &x.Revision + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "directory": if r.TryDecodeAsNil() { x.Directory = "" } else { - x.Directory = string(r.DecodeString()) + yyv8 := &x.Directory + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys894) - } // end switch yys894 - } // end for yyj894 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -13282,16 +14840,16 @@ func (x *GitRepoVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj898 int - var yyb898 bool - var yyhl898 bool = l >= 0 - yyj898++ - if yyhl898 { - yyb898 = yyj898 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb898 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb898 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13299,15 +14857,21 @@ func (x *GitRepoVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Repository = "" } else { - x.Repository = string(r.DecodeString()) + yyv11 := &x.Repository + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } - yyj898++ - if yyhl898 { - yyb898 = yyj898 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb898 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb898 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13315,15 +14879,21 @@ func (x *GitRepoVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Revision = "" } else { - x.Revision = string(r.DecodeString()) + yyv13 := &x.Revision + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj898++ - if yyhl898 { - yyb898 = yyj898 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb898 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb898 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13331,20 +14901,26 @@ func (x *GitRepoVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Directory = "" } else { - x.Directory = string(r.DecodeString()) + yyv15 := &x.Directory + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } for { - yyj898++ - if yyhl898 { - yyb898 = yyj898 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb898 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb898 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj898-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -13356,37 +14932,38 @@ func (x *SecretVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym902 := z.EncBinary() - _ = yym902 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep903 := !z.EncBinary() - yy2arr903 := z.EncBasicHandle().StructToArray - var yyq903 [3]bool - _, _, _ = yysep903, yyq903, yy2arr903 - const yyr903 bool = false - yyq903[0] = x.SecretName != "" - yyq903[1] = len(x.Items) != 0 - yyq903[2] = x.DefaultMode != nil - var yynn903 int - if yyr903 || yy2arr903 { - r.EncodeArrayStart(3) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.SecretName != "" + yyq2[1] = len(x.Items) != 0 + yyq2[2] = x.DefaultMode != nil + yyq2[3] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) } else { - yynn903 = 0 - for _, b := range yyq903 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn903++ + yynn2++ } } - r.EncodeMapStart(yynn903) - yynn903 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr903 || yy2arr903 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq903[0] { - yym905 := z.EncBinary() - _ = yym905 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) @@ -13395,26 +14972,26 @@ func (x *SecretVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq903[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("secretName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym906 := z.EncBinary() - _ = yym906 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) } } } - if yyr903 || yy2arr903 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq903[1] { + if yyq2[1] { if x.Items == nil { r.EncodeNil() } else { - yym908 := z.EncBinary() - _ = yym908 + yym7 := z.EncBinary() + _ = yym7 if false { } else { h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) @@ -13424,15 +15001,15 @@ func (x *SecretVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq903[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("items")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Items == nil { r.EncodeNil() } else { - yym909 := z.EncBinary() - _ = yym909 + yym8 := z.EncBinary() + _ = yym8 if false { } else { h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) @@ -13440,42 +15017,77 @@ func (x *SecretVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr903 || yy2arr903 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq903[2] { + if yyq2[2] { if x.DefaultMode == nil { r.EncodeNil() } else { - yy911 := *x.DefaultMode - yym912 := z.EncBinary() - _ = yym912 + yy10 := *x.DefaultMode + yym11 := z.EncBinary() + _ = yym11 if false { } else { - r.EncodeInt(int64(yy911)) + r.EncodeInt(int64(yy10)) } } } else { r.EncodeNil() } } else { - if yyq903[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.DefaultMode == nil { r.EncodeNil() } else { - yy913 := *x.DefaultMode - yym914 := z.EncBinary() - _ = yym914 + yy12 := *x.DefaultMode + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy15 := *x.Optional + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(yy15)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy17 := *x.Optional + yym18 := z.EncBinary() + _ = yym18 if false { } else { - r.EncodeInt(int64(yy913)) + r.EncodeBool(bool(yy17)) } } } } - if yyr903 || yy2arr903 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -13488,25 +15100,25 @@ func (x *SecretVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym915 := z.DecBinary() - _ = yym915 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct916 := r.ContainerType() - if yyct916 == codecSelferValueTypeMap1234 { - yyl916 := r.ReadMapStart() - if yyl916 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl916, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct916 == codecSelferValueTypeArray1234 { - yyl916 := r.ReadArrayStart() - if yyl916 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl916, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -13518,12 +15130,12 @@ func (x *SecretVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys917Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys917Slc - var yyhl917 bool = l >= 0 - for yyj917 := 0; ; yyj917++ { - if yyhl917 { - if yyj917 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -13532,26 +15144,32 @@ func (x *SecretVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys917Slc = r.DecodeBytes(yys917Slc, true, true) - yys917 := string(yys917Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys917 { + switch yys3 { case "secretName": if r.TryDecodeAsNil() { x.SecretName = "" } else { - x.SecretName = string(r.DecodeString()) + yyv4 := &x.SecretName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv919 := &x.Items - yym920 := z.DecBinary() - _ = yym920 + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 if false { } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv919), d) + h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) } } case "defaultMode": @@ -13563,17 +15181,33 @@ func (x *SecretVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) if x.DefaultMode == nil { x.DefaultMode = new(int32) } - yym922 := z.DecBinary() - _ = yym922 + yym9 := z.DecBinary() + _ = yym9 if false { } else { *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) } } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } default: - z.DecStructFieldNotFound(-1, yys917) - } // end switch yys917 - } // end for yyj917 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -13581,16 +15215,16 @@ func (x *SecretVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj923 int - var yyb923 bool - var yyhl923 bool = l >= 0 - yyj923++ - if yyhl923 { - yyb923 = yyj923 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb923 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb923 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13598,15 +15232,21 @@ func (x *SecretVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.SecretName = "" } else { - x.SecretName = string(r.DecodeString()) + yyv13 := &x.SecretName + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj923++ - if yyhl923 { - yyb923 = yyj923 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb923 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb923 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13614,21 +15254,21 @@ func (x *SecretVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Items = nil } else { - yyv925 := &x.Items - yym926 := z.DecBinary() - _ = yym926 + yyv15 := &x.Items + yym16 := z.DecBinary() + _ = yym16 if false { } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv925), d) + h.decSliceKeyToPath((*[]KeyToPath)(yyv15), d) } } - yyj923++ - if yyhl923 { - yyb923 = yyj923 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb923 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb923 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13641,25 +15281,378 @@ func (x *SecretVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decode if x.DefaultMode == nil { x.DefaultMode = new(int32) } - yym928 := z.DecBinary() - _ = yym928 + yym18 := z.DecBinary() + _ = yym18 if false { } else { *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) } } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SecretProjection) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = len(x.Items) != 0 + yyq2[2] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Items == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy10 := *x.Optional + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy12 := *x.Optional + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecretProjection) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecretProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecretProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv11 := &x.Name + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } for { - yyj923++ - if yyhl923 { - yyb923 = yyj923 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb923 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb923 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj923-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -13671,34 +15664,34 @@ func (x *NFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym929 := z.EncBinary() - _ = yym929 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep930 := !z.EncBinary() - yy2arr930 := z.EncBasicHandle().StructToArray - var yyq930 [3]bool - _, _, _ = yysep930, yyq930, yy2arr930 - const yyr930 bool = false - yyq930[2] = x.ReadOnly != false - var yynn930 int - if yyr930 || yy2arr930 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn930 = 2 - for _, b := range yyq930 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn930++ + yynn2++ } } - r.EncodeMapStart(yynn930) - yynn930 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr930 || yy2arr930 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym932 := z.EncBinary() - _ = yym932 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Server)) @@ -13707,17 +15700,17 @@ func (x *NFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("server")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym933 := z.EncBinary() - _ = yym933 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Server)) } } - if yyr930 || yy2arr930 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym935 := z.EncBinary() - _ = yym935 + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) @@ -13726,18 +15719,18 @@ func (x *NFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("path")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym936 := z.EncBinary() - _ = yym936 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) } } - if yyr930 || yy2arr930 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq930[2] { - yym938 := z.EncBinary() - _ = yym938 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeBool(bool(x.ReadOnly)) @@ -13746,19 +15739,19 @@ func (x *NFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq930[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("readOnly")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym939 := z.EncBinary() - _ = yym939 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeBool(bool(x.ReadOnly)) } } } - if yyr930 || yy2arr930 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -13771,25 +15764,25 @@ func (x *NFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym940 := z.DecBinary() - _ = yym940 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct941 := r.ContainerType() - if yyct941 == codecSelferValueTypeMap1234 { - yyl941 := r.ReadMapStart() - if yyl941 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl941, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct941 == codecSelferValueTypeArray1234 { - yyl941 := r.ReadArrayStart() - if yyl941 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl941, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -13801,12 +15794,12 @@ func (x *NFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys942Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys942Slc - var yyhl942 bool = l >= 0 - for yyj942 := 0; ; yyj942++ { - if yyhl942 { - if yyj942 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -13815,32 +15808,50 @@ func (x *NFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys942Slc = r.DecodeBytes(yys942Slc, true, true) - yys942 := string(yys942Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys942 { + switch yys3 { case "server": if r.TryDecodeAsNil() { x.Server = "" } else { - x.Server = string(r.DecodeString()) + yyv4 := &x.Server + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "path": if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv6 := &x.Path + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "readOnly": if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } } default: - z.DecStructFieldNotFound(-1, yys942) - } // end switch yys942 - } // end for yyj942 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -13848,16 +15859,16 @@ func (x *NFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj946 int - var yyb946 bool - var yyhl946 bool = l >= 0 - yyj946++ - if yyhl946 { - yyb946 = yyj946 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb946 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb946 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13865,15 +15876,21 @@ func (x *NFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Server = "" } else { - x.Server = string(r.DecodeString()) + yyv11 := &x.Server + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } - yyj946++ - if yyhl946 { - yyb946 = yyj946 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb946 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb946 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13881,15 +15898,21 @@ func (x *NFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv13 := &x.Path + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj946++ - if yyhl946 { - yyb946 = yyj946 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb946 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb946 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13897,20 +15920,26 @@ func (x *NFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } } for { - yyj946++ - if yyhl946 { - yyb946 = yyj946 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb946 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb946 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj946-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -13922,36 +15951,37 @@ func (x *ISCSIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym950 := z.EncBinary() - _ = yym950 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep951 := !z.EncBinary() - yy2arr951 := z.EncBasicHandle().StructToArray - var yyq951 [6]bool - _, _, _ = yysep951, yyq951, yy2arr951 - const yyr951 bool = false - yyq951[3] = x.ISCSIInterface != "" - yyq951[4] = x.FSType != "" - yyq951[5] = x.ReadOnly != false - var yynn951 int - if yyr951 || yy2arr951 { - r.EncodeArrayStart(6) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[3] = x.ISCSIInterface != "" + yyq2[4] = x.FSType != "" + yyq2[5] = x.ReadOnly != false + yyq2[6] = len(x.Portals) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) } else { - yynn951 = 3 - for _, b := range yyq951 { + yynn2 = 3 + for _, b := range yyq2 { if b { - yynn951++ + yynn2++ } } - r.EncodeMapStart(yynn951) - yynn951 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr951 || yy2arr951 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym953 := z.EncBinary() - _ = yym953 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal)) @@ -13960,17 +15990,17 @@ func (x *ISCSIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("targetPortal")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym954 := z.EncBinary() - _ = yym954 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal)) } } - if yyr951 || yy2arr951 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym956 := z.EncBinary() - _ = yym956 + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.IQN)) @@ -13979,17 +16009,17 @@ func (x *ISCSIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("iqn")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym957 := z.EncBinary() - _ = yym957 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.IQN)) } } - if yyr951 || yy2arr951 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym959 := z.EncBinary() - _ = yym959 + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeInt(int64(x.Lun)) @@ -13998,18 +16028,18 @@ func (x *ISCSIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("lun")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym960 := z.EncBinary() - _ = yym960 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeInt(int64(x.Lun)) } } - if yyr951 || yy2arr951 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq951[3] { - yym962 := z.EncBinary() - _ = yym962 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface)) @@ -14018,23 +16048,23 @@ func (x *ISCSIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq951[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("iscsiInterface")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym963 := z.EncBinary() - _ = yym963 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface)) } } } - if yyr951 || yy2arr951 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq951[4] { - yym965 := z.EncBinary() - _ = yym965 + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) @@ -14043,23 +16073,23 @@ func (x *ISCSIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq951[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fsType")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym966 := z.EncBinary() - _ = yym966 + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) } } } - if yyr951 || yy2arr951 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq951[5] { - yym968 := z.EncBinary() - _ = yym968 + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 if false { } else { r.EncodeBool(bool(x.ReadOnly)) @@ -14068,19 +16098,52 @@ func (x *ISCSIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq951[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("readOnly")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym969 := z.EncBinary() - _ = yym969 + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeBool(bool(x.ReadOnly)) } } } - if yyr951 || yy2arr951 { + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.Portals == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + z.F.EncSliceStringV(x.Portals, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portals")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Portals == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + z.F.EncSliceStringV(x.Portals, false, e) + } + } + } + } + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -14093,25 +16156,25 @@ func (x *ISCSIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym970 := z.DecBinary() - _ = yym970 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct971 := r.ContainerType() - if yyct971 == codecSelferValueTypeMap1234 { - yyl971 := r.ReadMapStart() - if yyl971 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl971, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct971 == codecSelferValueTypeArray1234 { - yyl971 := r.ReadArrayStart() - if yyl971 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl971, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -14123,12 +16186,12 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys972Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys972Slc - var yyhl972 bool = l >= 0 - for yyj972 := 0; ; yyj972++ { - if yyhl972 { - if yyj972 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -14137,50 +16200,98 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys972Slc = r.DecodeBytes(yys972Slc, true, true) - yys972 := string(yys972Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys972 { + switch yys3 { case "targetPortal": if r.TryDecodeAsNil() { x.TargetPortal = "" } else { - x.TargetPortal = string(r.DecodeString()) + yyv4 := &x.TargetPortal + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "iqn": if r.TryDecodeAsNil() { x.IQN = "" } else { - x.IQN = string(r.DecodeString()) + yyv6 := &x.IQN + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "lun": if r.TryDecodeAsNil() { x.Lun = 0 } else { - x.Lun = int32(r.DecodeInt(32)) + yyv8 := &x.Lun + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } } case "iscsiInterface": if r.TryDecodeAsNil() { x.ISCSIInterface = "" } else { - x.ISCSIInterface = string(r.DecodeString()) + yyv10 := &x.ISCSIInterface + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } case "fsType": if r.TryDecodeAsNil() { x.FSType = "" } else { - x.FSType = string(r.DecodeString()) + yyv12 := &x.FSType + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } case "readOnly": if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv14 := &x.ReadOnly + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "portals": + if r.TryDecodeAsNil() { + x.Portals = nil + } else { + yyv16 := &x.Portals + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + z.F.DecSliceStringX(yyv16, false, d) + } } default: - z.DecStructFieldNotFound(-1, yys972) - } // end switch yys972 - } // end for yyj972 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -14188,16 +16299,16 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj979 int - var yyb979 bool - var yyhl979 bool = l >= 0 - yyj979++ - if yyhl979 { - yyb979 = yyj979 > l + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb979 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb979 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14205,15 +16316,21 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.TargetPortal = "" } else { - x.TargetPortal = string(r.DecodeString()) + yyv19 := &x.TargetPortal + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } } - yyj979++ - if yyhl979 { - yyb979 = yyj979 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb979 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb979 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14221,15 +16338,21 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.IQN = "" } else { - x.IQN = string(r.DecodeString()) + yyv21 := &x.IQN + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } } - yyj979++ - if yyhl979 { - yyb979 = yyj979 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb979 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb979 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14237,15 +16360,21 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.Lun = 0 } else { - x.Lun = int32(r.DecodeInt(32)) + yyv23 := &x.Lun + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } } - yyj979++ - if yyhl979 { - yyb979 = yyj979 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb979 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb979 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14253,15 +16382,21 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.ISCSIInterface = "" } else { - x.ISCSIInterface = string(r.DecodeString()) + yyv25 := &x.ISCSIInterface + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } } - yyj979++ - if yyhl979 { - yyb979 = yyj979 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb979 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb979 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14269,15 +16404,21 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.FSType = "" } else { - x.FSType = string(r.DecodeString()) + yyv27 := &x.FSType + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } } - yyj979++ - if yyhl979 { - yyb979 = yyj979 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb979 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb979 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14285,20 +16426,48 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv29 := &x.ReadOnly + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Portals = nil + } else { + yyv31 := &x.Portals + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + z.F.DecSliceStringX(yyv31, false, d) + } } for { - yyj979++ - if yyhl979 { - yyb979 = yyj979 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb979 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb979 { + if yyb18 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj979-1, "") + z.DecStructFieldNotFound(yyj18-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -14310,38 +16479,38 @@ func (x *FCVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym986 := z.EncBinary() - _ = yym986 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep987 := !z.EncBinary() - yy2arr987 := z.EncBasicHandle().StructToArray - var yyq987 [4]bool - _, _, _ = yysep987, yyq987, yy2arr987 - const yyr987 bool = false - yyq987[2] = x.FSType != "" - yyq987[3] = x.ReadOnly != false - var yynn987 int - if yyr987 || yy2arr987 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.FSType != "" + yyq2[3] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn987 = 2 - for _, b := range yyq987 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn987++ + yynn2++ } } - r.EncodeMapStart(yynn987) - yynn987 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr987 || yy2arr987 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.TargetWWNs == nil { r.EncodeNil() } else { - yym989 := z.EncBinary() - _ = yym989 + yym4 := z.EncBinary() + _ = yym4 if false { } else { z.F.EncSliceStringV(x.TargetWWNs, false, e) @@ -14354,25 +16523,25 @@ func (x *FCVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x.TargetWWNs == nil { r.EncodeNil() } else { - yym990 := z.EncBinary() - _ = yym990 + yym5 := z.EncBinary() + _ = yym5 if false { } else { z.F.EncSliceStringV(x.TargetWWNs, false, e) } } } - if yyr987 || yy2arr987 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Lun == nil { r.EncodeNil() } else { - yy992 := *x.Lun - yym993 := z.EncBinary() - _ = yym993 + yy7 := *x.Lun + yym8 := z.EncBinary() + _ = yym8 if false { } else { - r.EncodeInt(int64(yy992)) + r.EncodeInt(int64(yy7)) } } } else { @@ -14382,20 +16551,20 @@ func (x *FCVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x.Lun == nil { r.EncodeNil() } else { - yy994 := *x.Lun - yym995 := z.EncBinary() - _ = yym995 + yy9 := *x.Lun + yym10 := z.EncBinary() + _ = yym10 if false { } else { - r.EncodeInt(int64(yy994)) + r.EncodeInt(int64(yy9)) } } } - if yyr987 || yy2arr987 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq987[2] { - yym997 := z.EncBinary() - _ = yym997 + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) @@ -14404,23 +16573,23 @@ func (x *FCVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq987[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fsType")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym998 := z.EncBinary() - _ = yym998 + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) } } } - if yyr987 || yy2arr987 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq987[3] { - yym1000 := z.EncBinary() - _ = yym1000 + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 if false { } else { r.EncodeBool(bool(x.ReadOnly)) @@ -14429,19 +16598,19 @@ func (x *FCVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq987[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("readOnly")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1001 := z.EncBinary() - _ = yym1001 + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeBool(bool(x.ReadOnly)) } } } - if yyr987 || yy2arr987 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -14454,25 +16623,25 @@ func (x *FCVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1002 := z.DecBinary() - _ = yym1002 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1003 := r.ContainerType() - if yyct1003 == codecSelferValueTypeMap1234 { - yyl1003 := r.ReadMapStart() - if yyl1003 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1003, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1003 == codecSelferValueTypeArray1234 { - yyl1003 := r.ReadArrayStart() - if yyl1003 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1003, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -14484,12 +16653,12 @@ func (x *FCVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1004Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1004Slc - var yyhl1004 bool = l >= 0 - for yyj1004 := 0; ; yyj1004++ { - if yyhl1004 { - if yyj1004 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -14498,20 +16667,20 @@ func (x *FCVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1004Slc = r.DecodeBytes(yys1004Slc, true, true) - yys1004 := string(yys1004Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1004 { + switch yys3 { case "targetWWNs": if r.TryDecodeAsNil() { x.TargetWWNs = nil } else { - yyv1005 := &x.TargetWWNs - yym1006 := z.DecBinary() - _ = yym1006 + yyv4 := &x.TargetWWNs + yym5 := z.DecBinary() + _ = yym5 if false { } else { - z.F.DecSliceStringX(yyv1005, false, d) + z.F.DecSliceStringX(yyv4, false, d) } } case "lun": @@ -14523,8 +16692,8 @@ func (x *FCVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.Lun == nil { x.Lun = new(int32) } - yym1008 := z.DecBinary() - _ = yym1008 + yym7 := z.DecBinary() + _ = yym7 if false { } else { *((*int32)(x.Lun)) = int32(r.DecodeInt(32)) @@ -14534,18 +16703,30 @@ func (x *FCVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.FSType = "" } else { - x.FSType = string(r.DecodeString()) + yyv8 := &x.FSType + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } case "readOnly": if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv10 := &x.ReadOnly + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } } default: - z.DecStructFieldNotFound(-1, yys1004) - } // end switch yys1004 - } // end for yyj1004 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -14553,16 +16734,16 @@ func (x *FCVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1011 int - var yyb1011 bool - var yyhl1011 bool = l >= 0 - yyj1011++ - if yyhl1011 { - yyb1011 = yyj1011 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1011 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1011 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14570,21 +16751,21 @@ func (x *FCVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.TargetWWNs = nil } else { - yyv1012 := &x.TargetWWNs - yym1013 := z.DecBinary() - _ = yym1013 + yyv13 := &x.TargetWWNs + yym14 := z.DecBinary() + _ = yym14 if false { } else { - z.F.DecSliceStringX(yyv1012, false, d) + z.F.DecSliceStringX(yyv13, false, d) } } - yyj1011++ - if yyhl1011 { - yyb1011 = yyj1011 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1011 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1011 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14597,20 +16778,20 @@ func (x *FCVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.Lun == nil { x.Lun = new(int32) } - yym1015 := z.DecBinary() - _ = yym1015 + yym16 := z.DecBinary() + _ = yym16 if false { } else { *((*int32)(x.Lun)) = int32(r.DecodeInt(32)) } } - yyj1011++ - if yyhl1011 { - yyb1011 = yyj1011 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1011 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1011 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14618,15 +16799,21 @@ func (x *FCVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.FSType = "" } else { - x.FSType = string(r.DecodeString()) + yyv17 := &x.FSType + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } } - yyj1011++ - if yyhl1011 { - yyb1011 = yyj1011 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1011 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1011 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14634,20 +16821,26 @@ func (x *FCVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } } for { - yyj1011++ - if yyhl1011 { - yyb1011 = yyj1011 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1011 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1011 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1011-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -14659,34 +16852,34 @@ func (x *AzureFileVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1018 := z.EncBinary() - _ = yym1018 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1019 := !z.EncBinary() - yy2arr1019 := z.EncBasicHandle().StructToArray - var yyq1019 [3]bool - _, _, _ = yysep1019, yyq1019, yy2arr1019 - const yyr1019 bool = false - yyq1019[2] = x.ReadOnly != false - var yynn1019 int - if yyr1019 || yy2arr1019 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn1019 = 2 - for _, b := range yyq1019 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn1019++ + yynn2++ } } - r.EncodeMapStart(yynn1019) - yynn1019 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1019 || yy2arr1019 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1021 := z.EncBinary() - _ = yym1021 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) @@ -14695,17 +16888,17 @@ func (x *AzureFileVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("secretName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1022 := z.EncBinary() - _ = yym1022 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) } } - if yyr1019 || yy2arr1019 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1024 := z.EncBinary() - _ = yym1024 + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ShareName)) @@ -14714,18 +16907,18 @@ func (x *AzureFileVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("shareName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1025 := z.EncBinary() - _ = yym1025 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ShareName)) } } - if yyr1019 || yy2arr1019 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1019[2] { - yym1027 := z.EncBinary() - _ = yym1027 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeBool(bool(x.ReadOnly)) @@ -14734,19 +16927,19 @@ func (x *AzureFileVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq1019[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("readOnly")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1028 := z.EncBinary() - _ = yym1028 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeBool(bool(x.ReadOnly)) } } } - if yyr1019 || yy2arr1019 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -14759,25 +16952,25 @@ func (x *AzureFileVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1029 := z.DecBinary() - _ = yym1029 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1030 := r.ContainerType() - if yyct1030 == codecSelferValueTypeMap1234 { - yyl1030 := r.ReadMapStart() - if yyl1030 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1030, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1030 == codecSelferValueTypeArray1234 { - yyl1030 := r.ReadArrayStart() - if yyl1030 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1030, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -14789,12 +16982,12 @@ func (x *AzureFileVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1031Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1031Slc - var yyhl1031 bool = l >= 0 - for yyj1031 := 0; ; yyj1031++ { - if yyhl1031 { - if yyj1031 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -14803,32 +16996,50 @@ func (x *AzureFileVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decod } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1031Slc = r.DecodeBytes(yys1031Slc, true, true) - yys1031 := string(yys1031Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1031 { + switch yys3 { case "secretName": if r.TryDecodeAsNil() { x.SecretName = "" } else { - x.SecretName = string(r.DecodeString()) + yyv4 := &x.SecretName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "shareName": if r.TryDecodeAsNil() { x.ShareName = "" } else { - x.ShareName = string(r.DecodeString()) + yyv6 := &x.ShareName + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "readOnly": if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } } default: - z.DecStructFieldNotFound(-1, yys1031) - } // end switch yys1031 - } // end for yyj1031 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -14836,16 +17047,16 @@ func (x *AzureFileVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1035 int - var yyb1035 bool - var yyhl1035 bool = l >= 0 - yyj1035++ - if yyhl1035 { - yyb1035 = yyj1035 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1035 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1035 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14853,15 +17064,21 @@ func (x *AzureFileVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.SecretName = "" } else { - x.SecretName = string(r.DecodeString()) + yyv11 := &x.SecretName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } - yyj1035++ - if yyhl1035 { - yyb1035 = yyj1035 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1035 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1035 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14869,15 +17086,21 @@ func (x *AzureFileVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.ShareName = "" } else { - x.ShareName = string(r.DecodeString()) + yyv13 := &x.ShareName + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj1035++ - if yyhl1035 { - yyb1035 = yyj1035 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1035 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1035 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14885,20 +17108,26 @@ func (x *AzureFileVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.ReadOnly = false } else { - x.ReadOnly = bool(r.DecodeBool()) + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } } for { - yyj1035++ - if yyhl1035 { - yyb1035 = yyj1035 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1035 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1035 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1035-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -14910,34 +17139,34 @@ func (x *VsphereVirtualDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1039 := z.EncBinary() - _ = yym1039 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1040 := !z.EncBinary() - yy2arr1040 := z.EncBasicHandle().StructToArray - var yyq1040 [2]bool - _, _, _ = yysep1040, yyq1040, yy2arr1040 - const yyr1040 bool = false - yyq1040[1] = x.FSType != "" - var yynn1040 int - if yyr1040 || yy2arr1040 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1040 = 1 - for _, b := range yyq1040 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1040++ + yynn2++ } } - r.EncodeMapStart(yynn1040) - yynn1040 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1040 || yy2arr1040 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1042 := z.EncBinary() - _ = yym1042 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) @@ -14946,18 +17175,18 @@ func (x *VsphereVirtualDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("volumePath")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1043 := z.EncBinary() - _ = yym1043 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) } } - if yyr1040 || yy2arr1040 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1040[1] { - yym1045 := z.EncBinary() - _ = yym1045 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) @@ -14966,19 +17195,19 @@ func (x *VsphereVirtualDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1040[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fsType")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1046 := z.EncBinary() - _ = yym1046 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) } } } - if yyr1040 || yy2arr1040 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -14991,25 +17220,25 @@ func (x *VsphereVirtualDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1047 := z.DecBinary() - _ = yym1047 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1048 := r.ContainerType() - if yyct1048 == codecSelferValueTypeMap1234 { - yyl1048 := r.ReadMapStart() - if yyl1048 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1048, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1048 == codecSelferValueTypeArray1234 { - yyl1048 := r.ReadArrayStart() - if yyl1048 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1048, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -15021,12 +17250,12 @@ func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1049Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1049Slc - var yyhl1049 bool = l >= 0 - for yyj1049 := 0; ; yyj1049++ { - if yyhl1049 { - if yyj1049 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -15035,26 +17264,38 @@ func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1 } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1049Slc = r.DecodeBytes(yys1049Slc, true, true) - yys1049 := string(yys1049Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1049 { + switch yys3 { case "volumePath": if r.TryDecodeAsNil() { x.VolumePath = "" } else { - x.VolumePath = string(r.DecodeString()) + yyv4 := &x.VolumePath + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "fsType": if r.TryDecodeAsNil() { x.FSType = "" } else { - x.FSType = string(r.DecodeString()) + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys1049) - } // end switch yys1049 - } // end for yyj1049 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -15062,16 +17303,16 @@ func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromArray(l int, d *code var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1052 int - var yyb1052 bool - var yyhl1052 bool = l >= 0 - yyj1052++ - if yyhl1052 { - yyb1052 = yyj1052 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1052 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1052 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15079,15 +17320,21 @@ func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromArray(l int, d *code if r.TryDecodeAsNil() { x.VolumePath = "" } else { - x.VolumePath = string(r.DecodeString()) + yyv9 := &x.VolumePath + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } - yyj1052++ - if yyhl1052 { - yyb1052 = yyj1052 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1052 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1052 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15095,20 +17342,26 @@ func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromArray(l int, d *code if r.TryDecodeAsNil() { x.FSType = "" } else { - x.FSType = string(r.DecodeString()) + yyv11 := &x.FSType + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } for { - yyj1052++ - if yyhl1052 { - yyb1052 = yyj1052 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1052 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1052 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1052-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -15120,34 +17373,34 @@ func (x *PhotonPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) if x == nil { r.EncodeNil() } else { - yym1055 := z.EncBinary() - _ = yym1055 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1056 := !z.EncBinary() - yy2arr1056 := z.EncBasicHandle().StructToArray - var yyq1056 [2]bool - _, _, _ = yysep1056, yyq1056, yy2arr1056 - const yyr1056 bool = false - yyq1056[1] = x.FSType != "" - var yynn1056 int - if yyr1056 || yy2arr1056 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1056 = 1 - for _, b := range yyq1056 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1056++ + yynn2++ } } - r.EncodeMapStart(yynn1056) - yynn1056 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1056 || yy2arr1056 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1058 := z.EncBinary() - _ = yym1058 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PdID)) @@ -15156,18 +17409,18 @@ func (x *PhotonPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("pdID")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1059 := z.EncBinary() - _ = yym1059 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PdID)) } } - if yyr1056 || yy2arr1056 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1056[1] { - yym1061 := z.EncBinary() - _ = yym1061 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) @@ -15176,19 +17429,19 @@ func (x *PhotonPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1056[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fsType")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1062 := z.EncBinary() - _ = yym1062 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) } } } - if yyr1056 || yy2arr1056 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -15201,25 +17454,25 @@ func (x *PhotonPersistentDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1063 := z.DecBinary() - _ = yym1063 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1064 := r.ContainerType() - if yyct1064 == codecSelferValueTypeMap1234 { - yyl1064 := r.ReadMapStart() - if yyl1064 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1064, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1064 == codecSelferValueTypeArray1234 { - yyl1064 := r.ReadArrayStart() - if yyl1064 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1064, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -15231,12 +17484,12 @@ func (x *PhotonPersistentDiskVolumeSource) codecDecodeSelfFromMap(l int, d *code var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1065Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1065Slc - var yyhl1065 bool = l >= 0 - for yyj1065 := 0; ; yyj1065++ { - if yyhl1065 { - if yyj1065 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -15245,26 +17498,38 @@ func (x *PhotonPersistentDiskVolumeSource) codecDecodeSelfFromMap(l int, d *code } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1065Slc = r.DecodeBytes(yys1065Slc, true, true) - yys1065 := string(yys1065Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1065 { + switch yys3 { case "pdID": if r.TryDecodeAsNil() { x.PdID = "" } else { - x.PdID = string(r.DecodeString()) + yyv4 := &x.PdID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "fsType": if r.TryDecodeAsNil() { x.FSType = "" } else { - x.FSType = string(r.DecodeString()) + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys1065) - } // end switch yys1065 - } // end for yyj1065 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -15272,16 +17537,16 @@ func (x *PhotonPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *co var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1068 int - var yyb1068 bool - var yyhl1068 bool = l >= 0 - yyj1068++ - if yyhl1068 { - yyb1068 = yyj1068 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1068 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1068 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15289,15 +17554,21 @@ func (x *PhotonPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *co if r.TryDecodeAsNil() { x.PdID = "" } else { - x.PdID = string(r.DecodeString()) + yyv9 := &x.PdID + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } - yyj1068++ - if yyhl1068 { - yyb1068 = yyj1068 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1068 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1068 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15305,20 +17576,26 @@ func (x *PhotonPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *co if r.TryDecodeAsNil() { x.FSType = "" } else { - x.FSType = string(r.DecodeString()) + yyv11 := &x.FSType + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } for { - yyj1068++ - if yyhl1068 { - yyb1068 = yyj1068 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1068 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1068 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1068-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -15327,8 +17604,8 @@ func (x AzureDataDiskCachingMode) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym1071 := z.EncBinary() - _ = yym1071 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -15340,8 +17617,8 @@ func (x *AzureDataDiskCachingMode) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1072 := z.DecBinary() - _ = yym1072 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -15356,36 +17633,36 @@ func (x *AzureDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1073 := z.EncBinary() - _ = yym1073 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1074 := !z.EncBinary() - yy2arr1074 := z.EncBasicHandle().StructToArray - var yyq1074 [5]bool - _, _, _ = yysep1074, yyq1074, yy2arr1074 - const yyr1074 bool = false - yyq1074[2] = x.CachingMode != nil - yyq1074[3] = x.FSType != nil - yyq1074[4] = x.ReadOnly != nil - var yynn1074 int - if yyr1074 || yy2arr1074 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.CachingMode != nil + yyq2[3] = x.FSType != nil + yyq2[4] = x.ReadOnly != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn1074 = 2 - for _, b := range yyq1074 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn1074++ + yynn2++ } } - r.EncodeMapStart(yynn1074) - yynn1074 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1074 || yy2arr1074 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1076 := z.EncBinary() - _ = yym1076 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.DiskName)) @@ -15394,17 +17671,17 @@ func (x *AzureDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("diskName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1077 := z.EncBinary() - _ = yym1077 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.DiskName)) } } - if yyr1074 || yy2arr1074 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1079 := z.EncBinary() - _ = yym1079 + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.DataDiskURI)) @@ -15413,109 +17690,109 @@ func (x *AzureDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("diskURI")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1080 := z.EncBinary() - _ = yym1080 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.DataDiskURI)) } } - if yyr1074 || yy2arr1074 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1074[2] { + if yyq2[2] { if x.CachingMode == nil { r.EncodeNil() } else { - yy1082 := *x.CachingMode - yy1082.CodecEncodeSelf(e) + yy10 := *x.CachingMode + yy10.CodecEncodeSelf(e) } } else { r.EncodeNil() } } else { - if yyq1074[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("cachingMode")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.CachingMode == nil { r.EncodeNil() } else { - yy1083 := *x.CachingMode - yy1083.CodecEncodeSelf(e) + yy12 := *x.CachingMode + yy12.CodecEncodeSelf(e) } } } - if yyr1074 || yy2arr1074 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1074[3] { + if yyq2[3] { if x.FSType == nil { r.EncodeNil() } else { - yy1085 := *x.FSType - yym1086 := z.EncBinary() - _ = yym1086 + yy15 := *x.FSType + yym16 := z.EncBinary() + _ = yym16 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(yy1085)) + r.EncodeString(codecSelferC_UTF81234, string(yy15)) } } } else { r.EncodeNil() } } else { - if yyq1074[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fsType")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.FSType == nil { r.EncodeNil() } else { - yy1087 := *x.FSType - yym1088 := z.EncBinary() - _ = yym1088 + yy17 := *x.FSType + yym18 := z.EncBinary() + _ = yym18 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(yy1087)) + r.EncodeString(codecSelferC_UTF81234, string(yy17)) } } } } - if yyr1074 || yy2arr1074 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1074[4] { + if yyq2[4] { if x.ReadOnly == nil { r.EncodeNil() } else { - yy1090 := *x.ReadOnly - yym1091 := z.EncBinary() - _ = yym1091 + yy20 := *x.ReadOnly + yym21 := z.EncBinary() + _ = yym21 if false { } else { - r.EncodeBool(bool(yy1090)) + r.EncodeBool(bool(yy20)) } } } else { r.EncodeNil() } } else { - if yyq1074[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("readOnly")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.ReadOnly == nil { r.EncodeNil() } else { - yy1092 := *x.ReadOnly - yym1093 := z.EncBinary() - _ = yym1093 + yy22 := *x.ReadOnly + yym23 := z.EncBinary() + _ = yym23 if false { } else { - r.EncodeBool(bool(yy1092)) + r.EncodeBool(bool(yy22)) } } } } - if yyr1074 || yy2arr1074 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -15528,25 +17805,25 @@ func (x *AzureDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1094 := z.DecBinary() - _ = yym1094 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1095 := r.ContainerType() - if yyct1095 == codecSelferValueTypeMap1234 { - yyl1095 := r.ReadMapStart() - if yyl1095 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1095, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1095 == codecSelferValueTypeArray1234 { - yyl1095 := r.ReadArrayStart() - if yyl1095 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1095, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -15558,12 +17835,12 @@ func (x *AzureDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1096Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1096Slc - var yyhl1096 bool = l >= 0 - for yyj1096 := 0; ; yyj1096++ { - if yyhl1096 { - if yyj1096 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -15572,21 +17849,33 @@ func (x *AzureDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decod } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1096Slc = r.DecodeBytes(yys1096Slc, true, true) - yys1096 := string(yys1096Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1096 { + switch yys3 { case "diskName": if r.TryDecodeAsNil() { x.DiskName = "" } else { - x.DiskName = string(r.DecodeString()) + yyv4 := &x.DiskName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "diskURI": if r.TryDecodeAsNil() { x.DataDiskURI = "" } else { - x.DataDiskURI = string(r.DecodeString()) + yyv6 := &x.DataDiskURI + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "cachingMode": if r.TryDecodeAsNil() { @@ -15608,8 +17897,8 @@ func (x *AzureDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decod if x.FSType == nil { x.FSType = new(string) } - yym1101 := z.DecBinary() - _ = yym1101 + yym10 := z.DecBinary() + _ = yym10 if false { } else { *((*string)(x.FSType)) = r.DecodeString() @@ -15624,17 +17913,17 @@ func (x *AzureDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decod if x.ReadOnly == nil { x.ReadOnly = new(bool) } - yym1103 := z.DecBinary() - _ = yym1103 + yym12 := z.DecBinary() + _ = yym12 if false { } else { *((*bool)(x.ReadOnly)) = r.DecodeBool() } } default: - z.DecStructFieldNotFound(-1, yys1096) - } // end switch yys1096 - } // end for yyj1096 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -15642,16 +17931,16 @@ func (x *AzureDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1104 int - var yyb1104 bool - var yyhl1104 bool = l >= 0 - yyj1104++ - if yyhl1104 { - yyb1104 = yyj1104 > l + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb1104 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb1104 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15659,15 +17948,21 @@ func (x *AzureDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.DiskName = "" } else { - x.DiskName = string(r.DecodeString()) + yyv14 := &x.DiskName + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } - yyj1104++ - if yyhl1104 { - yyb1104 = yyj1104 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb1104 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb1104 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15675,15 +17970,21 @@ func (x *AzureDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.DataDiskURI = "" } else { - x.DataDiskURI = string(r.DecodeString()) + yyv16 := &x.DataDiskURI + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } } - yyj1104++ - if yyhl1104 { - yyb1104 = yyj1104 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb1104 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb1104 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15698,13 +17999,13 @@ func (x *AzureDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec } x.CachingMode.CodecDecodeSelf(d) } - yyj1104++ - if yyhl1104 { - yyb1104 = yyj1104 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb1104 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb1104 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15717,20 +18018,20 @@ func (x *AzureDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec if x.FSType == nil { x.FSType = new(string) } - yym1109 := z.DecBinary() - _ = yym1109 + yym20 := z.DecBinary() + _ = yym20 if false { } else { *((*string)(x.FSType)) = r.DecodeString() } } - yyj1104++ - if yyhl1104 { - yyb1104 = yyj1104 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb1104 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb1104 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15743,156 +18044,131 @@ func (x *AzureDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec if x.ReadOnly == nil { x.ReadOnly = new(bool) } - yym1111 := z.DecBinary() - _ = yym1111 + yym22 := z.DecBinary() + _ = yym22 if false { } else { *((*bool)(x.ReadOnly)) = r.DecodeBool() } } for { - yyj1104++ - if yyhl1104 { - yyb1104 = yyj1104 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb1104 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb1104 { + if yyb13 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1104-1, "") + z.DecStructFieldNotFound(yyj13-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *PortworxVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1112 := z.EncBinary() - _ = yym1112 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1113 := !z.EncBinary() - yy2arr1113 := z.EncBasicHandle().StructToArray - var yyq1113 [3]bool - _, _, _ = yysep1113, yyq1113, yy2arr1113 - const yyr1113 bool = false - yyq1113[0] = x.Name != "" - yyq1113[1] = len(x.Items) != 0 - yyq1113[2] = x.DefaultMode != nil - var yynn1113 int - if yyr1113 || yy2arr1113 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn1113 = 0 - for _, b := range yyq1113 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1113++ + yynn2++ } } - r.EncodeMapStart(yynn1113) - yynn1113 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1113 || yy2arr1113 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1113[0] { - yym1115 := z.EncBinary() - _ = yym1115 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) } } else { - if yyq1113[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1116 := z.EncBinary() - _ = yym1116 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) } } - if yyr1113 || yy2arr1113 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1113[1] { - if x.Items == nil { - r.EncodeNil() + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { } else { - yym1118 := z.EncBinary() - _ = yym1118 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) } } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1113[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() + yym8 := z.EncBinary() + _ = yym8 + if false { } else { - yym1119 := z.EncBinary() - _ = yym1119 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) } } } - if yyr1113 || yy2arr1113 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1113[2] { - if x.DefaultMode == nil { - r.EncodeNil() + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { } else { - yy1121 := *x.DefaultMode - yym1122 := z.EncBinary() - _ = yym1122 - if false { - } else { - r.EncodeInt(int64(yy1121)) - } + r.EncodeBool(bool(x.ReadOnly)) } } else { - r.EncodeNil() + r.EncodeBool(false) } } else { - if yyq1113[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DefaultMode == nil { - r.EncodeNil() + yym11 := z.EncBinary() + _ = yym11 + if false { } else { - yy1123 := *x.DefaultMode - yym1124 := z.EncBinary() - _ = yym1124 - if false { - } else { - r.EncodeInt(int64(yy1123)) - } + r.EncodeBool(bool(x.ReadOnly)) } } } - if yyr1113 || yy2arr1113 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -15901,29 +18177,29 @@ func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ConfigMapVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *PortworxVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1125 := z.DecBinary() - _ = yym1125 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1126 := r.ContainerType() - if yyct1126 == codecSelferValueTypeMap1234 { - yyl1126 := r.ReadMapStart() - if yyl1126 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1126, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1126 == codecSelferValueTypeArray1234 { - yyl1126 := r.ReadArrayStart() - if yyl1126 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1126, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -15931,16 +18207,16 @@ func (x *ConfigMapVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ConfigMapVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *PortworxVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1127Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1127Slc - var yyhl1127 bool = l >= 0 - for yyj1127 := 0; ; yyj1127++ { - if yyhl1127 { - if yyj1127 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -15949,243 +18225,410 @@ func (x *ConfigMapVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decod } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1127Slc = r.DecodeBytes(yys1127Slc, true, true) - yys1127 := string(yys1127Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1127 { - case "name": + switch yys3 { + case "volumeID": if r.TryDecodeAsNil() { - x.Name = "" + x.VolumeID = "" } else { - x.Name = string(r.DecodeString()) + yyv4 := &x.VolumeID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } - case "items": + case "fsType": if r.TryDecodeAsNil() { - x.Items = nil + x.FSType = "" } else { - yyv1129 := &x.Items - yym1130 := z.DecBinary() - _ = yym1130 + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 if false { } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv1129), d) + *((*string)(yyv6)) = r.DecodeString() } } - case "defaultMode": + case "readOnly": if r.TryDecodeAsNil() { - if x.DefaultMode != nil { - x.DefaultMode = nil - } + x.ReadOnly = false } else { - if x.DefaultMode == nil { - x.DefaultMode = new(int32) - } - yym1132 := z.DecBinary() - _ = yym1132 + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 if false { } else { - *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + *((*bool)(yyv8)) = r.DecodeBool() } } default: - z.DecStructFieldNotFound(-1, yys1127) - } // end switch yys1127 - } // end for yyj1127 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ConfigMapVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *PortworxVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1133 int - var yyb1133 bool - var yyhl1133 bool = l >= 0 - yyj1133++ - if yyhl1133 { - yyb1133 = yyj1133 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1133 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1133 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Name = "" + x.VolumeID = "" } else { - x.Name = string(r.DecodeString()) + yyv11 := &x.VolumeID + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } - yyj1133++ - if yyhl1133 { - yyb1133 = yyj1133 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1133 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1133 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Items = nil + x.FSType = "" } else { - yyv1135 := &x.Items - yym1136 := z.DecBinary() - _ = yym1136 + yyv13 := &x.FSType + yym14 := z.DecBinary() + _ = yym14 if false { } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv1135), d) + *((*string)(yyv13)) = r.DecodeString() } } - yyj1133++ - if yyhl1133 { - yyb1133 = yyj1133 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1133 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1133 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.DefaultMode != nil { - x.DefaultMode = nil - } + x.ReadOnly = false } else { - if x.DefaultMode == nil { - x.DefaultMode = new(int32) - } - yym1138 := z.DecBinary() - _ = yym1138 + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 if false { } else { - *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + *((*bool)(yyv15)) = r.DecodeBool() } } for { - yyj1133++ - if yyhl1133 { - yyb1133 = yyj1133 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1133 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1133 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1133-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *KeyToPath) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ScaleIOVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1139 := z.EncBinary() - _ = yym1139 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1140 := !z.EncBinary() - yy2arr1140 := z.EncBasicHandle().StructToArray - var yyq1140 [3]bool - _, _, _ = yysep1140, yyq1140, yy2arr1140 - const yyr1140 bool = false - yyq1140[2] = x.Mode != nil - var yynn1140 int - if yyr1140 || yy2arr1140 { - r.EncodeArrayStart(3) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[3] = x.SSLEnabled != false + yyq2[4] = x.ProtectionDomain != "" + yyq2[5] = x.StoragePool != "" + yyq2[6] = x.StorageMode != "" + yyq2[7] = x.VolumeName != "" + yyq2[8] = x.FSType != "" + yyq2[9] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) } else { - yynn1140 = 2 - for _, b := range yyq1140 { + yynn2 = 3 + for _, b := range yyq2 { if b { - yynn1140++ + yynn2++ } } - r.EncodeMapStart(yynn1140) - yynn1140 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1140 || yy2arr1140 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1142 := z.EncBinary() - _ = yym1142 + yym4 := z.EncBinary() + _ = yym4 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + r.EncodeString(codecSelferC_UTF81234, string(x.Gateway)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) + r.EncodeString(codecSelferC_UTF81234, string("gateway")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1143 := z.EncBinary() - _ = yym1143 + yym5 := z.EncBinary() + _ = yym5 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + r.EncodeString(codecSelferC_UTF81234, string(x.Gateway)) } } - if yyr1140 || yy2arr1140 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1145 := z.EncBinary() - _ = yym1145 + yym7 := z.EncBinary() + _ = yym7 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + r.EncodeString(codecSelferC_UTF81234, string(x.System)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) + r.EncodeString(codecSelferC_UTF81234, string("system")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1146 := z.EncBinary() - _ = yym1146 + yym8 := z.EncBinary() + _ = yym8 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + r.EncodeString(codecSelferC_UTF81234, string(x.System)) } } - if yyr1140 || yy2arr1140 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1140[2] { - if x.Mode == nil { - r.EncodeNil() + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { } else { - yy1148 := *x.Mode - yym1149 := z.EncBinary() - _ = yym1149 - if false { - } else { - r.EncodeInt(int64(yy1148)) - } + r.EncodeBool(bool(x.SSLEnabled)) } } else { - r.EncodeNil() + r.EncodeBool(false) } } else { - if yyq1140[2] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("mode")) + r.EncodeString(codecSelferC_UTF81234, string("sslEnabled")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Mode == nil { - r.EncodeNil() + yym14 := z.EncBinary() + _ = yym14 + if false { } else { - yy1150 := *x.Mode - yym1151 := z.EncBinary() - _ = yym1151 - if false { - } else { - r.EncodeInt(int64(yy1150)) - } + r.EncodeBool(bool(x.SSLEnabled)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ProtectionDomain)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("protectionDomain")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ProtectionDomain)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StoragePool)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storagePool")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StoragePool)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StorageMode)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storageMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StorageMode)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) } } } - if yyr1140 || yy2arr1140 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -16194,29 +18637,29 @@ func (x *KeyToPath) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *KeyToPath) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ScaleIOVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1152 := z.DecBinary() - _ = yym1152 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1153 := r.ContainerType() - if yyct1153 == codecSelferValueTypeMap1234 { - yyl1153 := r.ReadMapStart() - if yyl1153 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1153, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1153 == codecSelferValueTypeArray1234 { - yyl1153 := r.ReadArrayStart() - if yyl1153 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1153, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -16224,16 +18667,16 @@ func (x *KeyToPath) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *KeyToPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ScaleIOVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1154Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1154Slc - var yyhl1154 bool = l >= 0 - for yyj1154 := 0; ; yyj1154++ { - if yyhl1154 { - if yyj1154 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -16242,270 +18685,3122 @@ func (x *KeyToPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1154Slc = r.DecodeBytes(yys1154Slc, true, true) - yys1154 := string(yys1154Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1154 { - case "key": + switch yys3 { + case "gateway": if r.TryDecodeAsNil() { - x.Key = "" + x.Gateway = "" } else { - x.Key = string(r.DecodeString()) + yyv4 := &x.Gateway + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } - case "path": + case "system": if r.TryDecodeAsNil() { - x.Path = "" + x.System = "" } else { - x.Path = string(r.DecodeString()) + yyv6 := &x.System + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } - case "mode": + case "secretRef": if r.TryDecodeAsNil() { - if x.Mode != nil { - x.Mode = nil + if x.SecretRef != nil { + x.SecretRef = nil } } else { - if x.Mode == nil { - x.Mode = new(int32) + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) } - yym1158 := z.DecBinary() - _ = yym1158 + x.SecretRef.CodecDecodeSelf(d) + } + case "sslEnabled": + if r.TryDecodeAsNil() { + x.SSLEnabled = false + } else { + yyv9 := &x.SSLEnabled + yym10 := z.DecBinary() + _ = yym10 if false { } else { - *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) + *((*bool)(yyv9)) = r.DecodeBool() } } - default: - z.DecStructFieldNotFound(-1, yys1154) - } // end switch yys1154 - } // end for yyj1154 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *KeyToPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1159 int - var yyb1159 bool - var yyhl1159 bool = l >= 0 - yyj1159++ - if yyhl1159 { - yyb1159 = yyj1159 > l + case "protectionDomain": + if r.TryDecodeAsNil() { + x.ProtectionDomain = "" + } else { + yyv11 := &x.ProtectionDomain + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + case "storagePool": + if r.TryDecodeAsNil() { + x.StoragePool = "" + } else { + yyv13 := &x.StoragePool + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + case "storageMode": + if r.TryDecodeAsNil() { + x.StorageMode = "" + } else { + yyv15 := &x.StorageMode + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + case "volumeName": + if r.TryDecodeAsNil() { + x.VolumeName = "" + } else { + yyv17 := &x.VolumeName + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv19 := &x.FSType + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv21 := &x.ReadOnly + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(yyv21)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScaleIOVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj23 int + var yyb23 bool + var yyhl23 bool = l >= 0 + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Gateway = "" + } else { + yyv24 := &x.Gateway + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*string)(yyv24)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.System = "" + } else { + yyv26 := &x.System + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SSLEnabled = false + } else { + yyv29 := &x.SSLEnabled + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ProtectionDomain = "" + } else { + yyv31 := &x.ProtectionDomain + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StoragePool = "" + } else { + yyv33 := &x.StoragePool + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*string)(yyv33)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StorageMode = "" + } else { + yyv35 := &x.StorageMode + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeName = "" + } else { + yyv37 := &x.VolumeName + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv39 := &x.FSType + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv41 := &x.ReadOnly + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*bool)(yyv41)) = r.DecodeBool() + } + } + for { + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj23-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = len(x.Items) != 0 + yyq2[2] = x.DefaultMode != nil + yyq2[3] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Items == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy10 := *x.DefaultMode + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy12 := *x.DefaultMode + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy15 := *x.Optional + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(yy15)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy17 := *x.Optional + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeBool(bool(yy17)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMapVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMapVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) + } + } + case "defaultMode": + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMapVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv13 := &x.Name + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv15 := &x.Items + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv15), d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMapProjection) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = len(x.Items) != 0 + yyq2[2] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Items == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy10 := *x.Optional + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy12 := *x.Optional + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMapProjection) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMapProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMapProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv11 := &x.Name + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ProjectedVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.DefaultMode != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Sources == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceVolumeProjection(([]VolumeProjection)(x.Sources), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sources")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Sources == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceVolumeProjection(([]VolumeProjection)(x.Sources), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy7 := *x.DefaultMode + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy9 := *x.DefaultMode + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ProjectedVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ProjectedVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "sources": + if r.TryDecodeAsNil() { + x.Sources = nil + } else { + yyv4 := &x.Sources + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceVolumeProjection((*[]VolumeProjection)(yyv4), d) + } + } + case "defaultMode": + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ProjectedVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Sources = nil + } else { + yyv9 := &x.Sources + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceVolumeProjection((*[]VolumeProjection)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *VolumeProjection) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Secret != nil + yyq2[1] = x.DownwardAPI != nil + yyq2[2] = x.ConfigMap != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secret")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("configMap")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *VolumeProjection) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *VolumeProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "secret": + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretProjection) + } + x.Secret.CodecDecodeSelf(d) + } + case "downwardAPI": + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIProjection) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + case "configMap": + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapProjection) + } + x.ConfigMap.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *VolumeProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretProjection) + } + x.Secret.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIProjection) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapProjection) + } + x.ConfigMap.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *KeyToPath) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.Mode != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Mode == nil { + r.EncodeNil() + } else { + yy10 := *x.Mode + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("mode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Mode == nil { + r.EncodeNil() + } else { + yy12 := *x.Mode + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *KeyToPath) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *KeyToPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv6 := &x.Path + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "mode": + if r.TryDecodeAsNil() { + if x.Mode != nil { + x.Mode = nil + } + } else { + if x.Mode == nil { + x.Mode = new(int32) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *KeyToPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv11 := &x.Key + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv13 := &x.Path + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Mode != nil { + x.Mode = nil + } + } else { + if x.Mode == nil { + x.Mode = new(int32) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ContainerPort) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = x.HostPort != 0 + yyq2[3] = x.Protocol != "" + yyq2[4] = x.HostIP != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.HostPort)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPort")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.HostPort)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.ContainerPort)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerPort")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.ContainerPort)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + x.Protocol.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("protocol")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Protocol.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostIP")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerPort) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "hostPort": + if r.TryDecodeAsNil() { + x.HostPort = 0 + } else { + yyv6 := &x.HostPort + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "containerPort": + if r.TryDecodeAsNil() { + x.ContainerPort = 0 + } else { + yyv8 := &x.ContainerPort + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "protocol": + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv10 := &x.Protocol + yyv10.CodecDecodeSelf(d) + } + case "hostIP": + if r.TryDecodeAsNil() { + x.HostIP = "" + } else { + yyv11 := &x.HostIP + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv14 := &x.Name + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostPort = 0 + } else { + yyv16 := &x.HostPort + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*int32)(yyv16)) = int32(r.DecodeInt(32)) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerPort = 0 + } else { + yyv18 := &x.ContainerPort + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*int32)(yyv18)) = int32(r.DecodeInt(32)) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv20 := &x.Protocol + yyv20.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostIP = "" + } else { + yyv21 := &x.HostIP + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *VolumeMount) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.ReadOnly != false + yyq2[3] = x.SubPath != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("mountPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *VolumeMount) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *VolumeMount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv6 := &x.ReadOnly + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + case "mountPath": + if r.TryDecodeAsNil() { + x.MountPath = "" + } else { + yyv8 := &x.MountPath + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "subPath": + if r.TryDecodeAsNil() { + x.SubPath = "" + } else { + yyv10 := &x.SubPath + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *VolumeMount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1159 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1159 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Key = "" + x.Name = "" } else { - x.Key = string(r.DecodeString()) + yyv13 := &x.Name + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj1159++ - if yyhl1159 { - yyb1159 = yyj1159 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1159 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1159 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Path = "" + x.ReadOnly = false } else { - x.Path = string(r.DecodeString()) + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } } - yyj1159++ - if yyhl1159 { - yyb1159 = yyj1159 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1159 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1159 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.Mode != nil { - x.Mode = nil - } + x.MountPath = "" } else { - if x.Mode == nil { - x.Mode = new(int32) + yyv17 := &x.MountPath + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() } - yym1163 := z.DecBinary() - _ = yym1163 + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SubPath = "" + } else { + yyv19 := &x.SubPath + yym20 := z.DecBinary() + _ = yym20 if false { } else { - *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) + *((*string)(yyv19)) = r.DecodeString() } } for { - yyj1159++ - if yyhl1159 { - yyb1159 = yyj1159 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1159 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1159 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1159-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ContainerPort) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *EnvVar) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1164 := z.EncBinary() - _ = yym1164 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1165 := !z.EncBinary() - yy2arr1165 := z.EncBasicHandle().StructToArray - var yyq1165 [5]bool - _, _, _ = yysep1165, yyq1165, yy2arr1165 - const yyr1165 bool = false - yyq1165[0] = x.Name != "" - yyq1165[1] = x.HostPort != 0 - yyq1165[3] = x.Protocol != "" - yyq1165[4] = x.HostIP != "" - var yynn1165 int - if yyr1165 || yy2arr1165 { - r.EncodeArrayStart(5) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Value != "" + yyq2[2] = x.ValueFrom != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) } else { - yynn1165 = 1 - for _, b := range yyq1165 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1165++ + yynn2++ } } - r.EncodeMapStart(yynn1165) - yynn1165 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } } - if yyr1165 || yy2arr1165 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1165[0] { - yym1167 := z.EncBinary() - _ = yym1167 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) } } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1165[0] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) + r.EncodeString(codecSelferC_UTF81234, string("value")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1168 := z.EncBinary() - _ = yym1168 + yym8 := z.EncBinary() + _ = yym8 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) } } } - if yyr1165 || yy2arr1165 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1165[1] { - yym1170 := z.EncBinary() - _ = yym1170 - if false { + if yyq2[2] { + if x.ValueFrom == nil { + r.EncodeNil() } else { - r.EncodeInt(int64(x.HostPort)) + x.ValueFrom.CodecEncodeSelf(e) } } else { - r.EncodeInt(0) + r.EncodeNil() } } else { - if yyq1165[1] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPort")) + r.EncodeString(codecSelferC_UTF81234, string("valueFrom")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1171 := z.EncBinary() - _ = yym1171 - if false { + if x.ValueFrom == nil { + r.EncodeNil() } else { - r.EncodeInt(int64(x.HostPort)) + x.ValueFrom.CodecEncodeSelf(e) } } } - if yyr1165 || yy2arr1165 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1173 := z.EncBinary() - _ = yym1173 + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EnvVar) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EnvVar) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 if false { } else { - r.EncodeInt(int64(x.ContainerPort)) + *((*string)(yyv4)) = r.DecodeString() } + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1174 := z.EncBinary() - _ = yym1174 + yyv6 := &x.Value + yym7 := z.DecBinary() + _ = yym7 if false { } else { - r.EncodeInt(int64(x.ContainerPort)) + *((*string)(yyv6)) = r.DecodeString() + } + } + case "valueFrom": + if r.TryDecodeAsNil() { + if x.ValueFrom != nil { + x.ValueFrom = nil + } + } else { + if x.ValueFrom == nil { + x.ValueFrom = new(EnvVarSource) + } + x.ValueFrom.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EnvVar) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv10 := &x.Name + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv12 := &x.Value + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ValueFrom != nil { + x.ValueFrom = nil + } + } else { + if x.ValueFrom == nil { + x.ValueFrom = new(EnvVarSource) + } + x.ValueFrom.CodecDecodeSelf(d) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.FieldRef != nil + yyq2[1] = x.ResourceFieldRef != nil + yyq2[2] = x.ConfigMapKeyRef != nil + yyq2[3] = x.SecretKeyRef != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } } + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1165 || yy2arr1165 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1165[3] { - x.Protocol.CodecEncodeSelf(e) + if yyq2[0] { + if x.FieldRef == nil { + r.EncodeNil() + } else { + x.FieldRef.CodecEncodeSelf(e) + } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeNil() } } else { - if yyq1165[3] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("protocol")) + r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Protocol.CodecEncodeSelf(e) + if x.FieldRef == nil { + r.EncodeNil() + } else { + x.FieldRef.CodecEncodeSelf(e) + } } } - if yyr1165 || yy2arr1165 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1165[4] { - yym1177 := z.EncBinary() - _ = yym1177 - if false { + if yyq2[1] { + if x.ResourceFieldRef == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) + x.ResourceFieldRef.CodecEncodeSelf(e) } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeNil() } } else { - if yyq1165[4] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIP")) + r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1178 := z.EncBinary() - _ = yym1178 - if false { + if x.ResourceFieldRef == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ConfigMapKeyRef == nil { + r.EncodeNil() + } else { + x.ConfigMapKeyRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("configMapKeyRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ConfigMapKeyRef == nil { + r.EncodeNil() + } else { + x.ConfigMapKeyRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.SecretKeyRef == nil { + r.EncodeNil() + } else { + x.SecretKeyRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretKeyRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretKeyRef == nil { + r.EncodeNil() + } else { + x.SecretKeyRef.CodecEncodeSelf(e) } } } - if yyr1165 || yy2arr1165 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -16514,29 +21809,29 @@ func (x *ContainerPort) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ContainerPort) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *EnvVarSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1179 := z.DecBinary() - _ = yym1179 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1180 := r.ContainerType() - if yyct1180 == codecSelferValueTypeMap1234 { - yyl1180 := r.ReadMapStart() - if yyl1180 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1180, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1180 == codecSelferValueTypeArray1234 { - yyl1180 := r.ReadArrayStart() - if yyl1180 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1180, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -16544,16 +21839,16 @@ func (x *ContainerPort) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ContainerPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *EnvVarSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1181Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1181Slc - var yyhl1181 bool = l >= 0 - for yyj1181 := 0; ; yyj1181++ { - if yyhl1181 { - if yyj1181 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -16562,271 +21857,244 @@ func (x *ContainerPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1181Slc = r.DecodeBytes(yys1181Slc, true, true) - yys1181 := string(yys1181Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1181 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "hostPort": + switch yys3 { + case "fieldRef": if r.TryDecodeAsNil() { - x.HostPort = 0 + if x.FieldRef != nil { + x.FieldRef = nil + } } else { - x.HostPort = int32(r.DecodeInt(32)) + if x.FieldRef == nil { + x.FieldRef = new(ObjectFieldSelector) + } + x.FieldRef.CodecDecodeSelf(d) } - case "containerPort": + case "resourceFieldRef": if r.TryDecodeAsNil() { - x.ContainerPort = 0 + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } } else { - x.ContainerPort = int32(r.DecodeInt(32)) + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) } - case "protocol": + case "configMapKeyRef": if r.TryDecodeAsNil() { - x.Protocol = "" + if x.ConfigMapKeyRef != nil { + x.ConfigMapKeyRef = nil + } } else { - x.Protocol = Protocol(r.DecodeString()) + if x.ConfigMapKeyRef == nil { + x.ConfigMapKeyRef = new(ConfigMapKeySelector) + } + x.ConfigMapKeyRef.CodecDecodeSelf(d) } - case "hostIP": + case "secretKeyRef": if r.TryDecodeAsNil() { - x.HostIP = "" + if x.SecretKeyRef != nil { + x.SecretKeyRef = nil + } } else { - x.HostIP = string(r.DecodeString()) + if x.SecretKeyRef == nil { + x.SecretKeyRef = new(SecretKeySelector) + } + x.SecretKeyRef.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1181) - } // end switch yys1181 - } // end for yyj1181 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ContainerPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *EnvVarSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1187 int - var yyb1187 bool - var yyhl1187 bool = l >= 0 - yyj1187++ - if yyhl1187 { - yyb1187 = yyj1187 > l - } else { - yyb1187 = r.CheckBreak() - } - if yyb1187 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj1187++ - if yyhl1187 { - yyb1187 = yyj1187 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1187 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1187 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.HostPort = 0 + if x.FieldRef != nil { + x.FieldRef = nil + } } else { - x.HostPort = int32(r.DecodeInt(32)) + if x.FieldRef == nil { + x.FieldRef = new(ObjectFieldSelector) + } + x.FieldRef.CodecDecodeSelf(d) } - yyj1187++ - if yyhl1187 { - yyb1187 = yyj1187 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1187 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1187 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ContainerPort = 0 + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } } else { - x.ContainerPort = int32(r.DecodeInt(32)) + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) } - yyj1187++ - if yyhl1187 { - yyb1187 = yyj1187 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1187 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1187 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Protocol = "" + if x.ConfigMapKeyRef != nil { + x.ConfigMapKeyRef = nil + } } else { - x.Protocol = Protocol(r.DecodeString()) + if x.ConfigMapKeyRef == nil { + x.ConfigMapKeyRef = new(ConfigMapKeySelector) + } + x.ConfigMapKeyRef.CodecDecodeSelf(d) } - yyj1187++ - if yyhl1187 { - yyb1187 = yyj1187 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1187 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1187 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.HostIP = "" + if x.SecretKeyRef != nil { + x.SecretKeyRef = nil + } } else { - x.HostIP = string(r.DecodeString()) + if x.SecretKeyRef == nil { + x.SecretKeyRef = new(SecretKeySelector) + } + x.SecretKeyRef.CodecDecodeSelf(d) } for { - yyj1187++ - if yyhl1187 { - yyb1187 = yyj1187 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1187 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1187 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1187-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *VolumeMount) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ObjectFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1193 := z.EncBinary() - _ = yym1193 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1194 := !z.EncBinary() - yy2arr1194 := z.EncBasicHandle().StructToArray - var yyq1194 [4]bool - _, _, _ = yysep1194, yyq1194, yy2arr1194 - const yyr1194 bool = false - yyq1194[1] = x.ReadOnly != false - yyq1194[3] = x.SubPath != "" - var yynn1194 int - if yyr1194 || yy2arr1194 { - r.EncodeArrayStart(4) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) } else { - yynn1194 = 2 - for _, b := range yyq1194 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1194++ + yynn2++ } } - r.EncodeMapStart(yynn1194) - yynn1194 = 0 - } - if yyr1194 || yy2arr1194 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1196 := z.EncBinary() - _ = yym1196 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1197 := z.EncBinary() - _ = yym1197 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1194 || yy2arr1194 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1194[1] { - yym1199 := z.EncBinary() - _ = yym1199 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { - r.EncodeBool(bool(x.ReadOnly)) + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } else { - r.EncodeBool(false) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1194[1] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1200 := z.EncBinary() - _ = yym1200 + yym5 := z.EncBinary() + _ = yym5 if false { } else { - r.EncodeBool(bool(x.ReadOnly)) + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr1194 || yy2arr1194 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1202 := z.EncBinary() - _ = yym1202 + yym7 := z.EncBinary() + _ = yym7 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) + r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("mountPath")) + r.EncodeString(codecSelferC_UTF81234, string("fieldPath")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1203 := z.EncBinary() - _ = yym1203 + yym8 := z.EncBinary() + _ = yym8 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) - } - } - if yyr1194 || yy2arr1194 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1194[3] { - yym1205 := z.EncBinary() - _ = yym1205 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1194[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1206 := z.EncBinary() - _ = yym1206 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) - } + r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) } } - if yyr1194 || yy2arr1194 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -16835,29 +22103,29 @@ func (x *VolumeMount) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *VolumeMount) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ObjectFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1207 := z.DecBinary() - _ = yym1207 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1208 := r.ContainerType() - if yyct1208 == codecSelferValueTypeMap1234 { - yyl1208 := r.ReadMapStart() - if yyl1208 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1208, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1208 == codecSelferValueTypeArray1234 { - yyl1208 := r.ReadArrayStart() - if yyl1208 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1208, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -16865,16 +22133,16 @@ func (x *VolumeMount) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *VolumeMount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ObjectFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1209Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1209Slc - var yyhl1209 bool = l >= 0 - for yyj1209 := 0; ; yyj1209++ { - if yyhl1209 { - if yyj1209 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -16883,228 +22151,218 @@ func (x *VolumeMount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1209Slc = r.DecodeBytes(yys1209Slc, true, true) - yys1209 := string(yys1209Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1209 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - case "mountPath": + switch yys3 { + case "apiVersion": if r.TryDecodeAsNil() { - x.MountPath = "" + x.APIVersion = "" } else { - x.MountPath = string(r.DecodeString()) + yyv4 := &x.APIVersion + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } - case "subPath": + case "fieldPath": if r.TryDecodeAsNil() { - x.SubPath = "" + x.FieldPath = "" } else { - x.SubPath = string(r.DecodeString()) + yyv6 := &x.FieldPath + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys1209) - } // end switch yys1209 - } // end for yyj1209 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *VolumeMount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ObjectFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1214 int - var yyb1214 bool - var yyhl1214 bool = l >= 0 - yyj1214++ - if yyhl1214 { - yyb1214 = yyj1214 > l - } else { - yyb1214 = r.CheckBreak() - } - if yyb1214 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj1214++ - if yyhl1214 { - yyb1214 = yyj1214 > l - } else { - yyb1214 = r.CheckBreak() - } - if yyb1214 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - x.ReadOnly = bool(r.DecodeBool()) + yyb8 = r.CheckBreak() } - yyj1214++ - if yyhl1214 { - yyb1214 = yyj1214 > l - } else { - yyb1214 = r.CheckBreak() - } - if yyb1214 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.MountPath = "" + x.APIVersion = "" } else { - x.MountPath = string(r.DecodeString()) + yyv9 := &x.APIVersion + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } - yyj1214++ - if yyhl1214 { - yyb1214 = yyj1214 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1214 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1214 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.SubPath = "" + x.FieldPath = "" } else { - x.SubPath = string(r.DecodeString()) + yyv11 := &x.FieldPath + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } for { - yyj1214++ - if yyhl1214 { - yyb1214 = yyj1214 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1214 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1214 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1214-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *EnvVar) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ResourceFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1219 := z.EncBinary() - _ = yym1219 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1220 := !z.EncBinary() - yy2arr1220 := z.EncBasicHandle().StructToArray - var yyq1220 [3]bool - _, _, _ = yysep1220, yyq1220, yy2arr1220 - const yyr1220 bool = false - yyq1220[1] = x.Value != "" - yyq1220[2] = x.ValueFrom != nil - var yynn1220 int - if yyr1220 || yy2arr1220 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ContainerName != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn1220 = 1 - for _, b := range yyq1220 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1220++ + yynn2++ } } - r.EncodeMapStart(yynn1220) - yynn1220 = 0 - } - if yyr1220 || yy2arr1220 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1222 := z.EncBinary() - _ = yym1222 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1223 := z.EncBinary() - _ = yym1223 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1220 || yy2arr1220 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1220[1] { - yym1225 := z.EncBinary() - _ = yym1225 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) } } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1220[1] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) + r.EncodeString(codecSelferC_UTF81234, string("containerName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1226 := z.EncBinary() - _ = yym1226 + yym5 := z.EncBinary() + _ = yym5 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) } } } - if yyr1220 || yy2arr1220 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1220[2] { - if x.ValueFrom == nil { - r.EncodeNil() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.Divisor + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) } else { - x.ValueFrom.CodecEncodeSelf(e) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq1220[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("valueFrom")) + r.EncodeString(codecSelferC_UTF81234, string("divisor")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ValueFrom == nil { - r.EncodeNil() + yy12 := &x.Divisor + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) } else { - x.ValueFrom.CodecEncodeSelf(e) + z.EncFallback(yy12) } } } - if yyr1220 || yy2arr1220 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -17113,29 +22371,29 @@ func (x *EnvVar) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *EnvVar) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ResourceFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1228 := z.DecBinary() - _ = yym1228 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1229 := r.ContainerType() - if yyct1229 == codecSelferValueTypeMap1234 { - yyl1229 := r.ReadMapStart() - if yyl1229 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1229, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1229 == codecSelferValueTypeArray1234 { - yyl1229 := r.ReadArrayStart() - if yyl1229 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1229, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -17143,16 +22401,16 @@ func (x *EnvVar) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *EnvVar) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ResourceFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1230Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1230Slc - var yyhl1230 bool = l >= 0 - for yyj1230 := 0; ; yyj1230++ { - if yyhl1230 { - if yyj1230 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -17161,243 +22419,260 @@ func (x *EnvVar) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1230Slc = r.DecodeBytes(yys1230Slc, true, true) - yys1230 := string(yys1230Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1230 { - case "name": + switch yys3 { + case "containerName": if r.TryDecodeAsNil() { - x.Name = "" + x.ContainerName = "" } else { - x.Name = string(r.DecodeString()) + yyv4 := &x.ContainerName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } - case "value": + case "resource": if r.TryDecodeAsNil() { - x.Value = "" + x.Resource = "" } else { - x.Value = string(r.DecodeString()) + yyv6 := &x.Resource + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } - case "valueFrom": + case "divisor": if r.TryDecodeAsNil() { - if x.ValueFrom != nil { - x.ValueFrom = nil - } + x.Divisor = pkg3_resource.Quantity{} } else { - if x.ValueFrom == nil { - x.ValueFrom = new(EnvVarSource) + yyv8 := &x.Divisor + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) } - x.ValueFrom.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1230) - } // end switch yys1230 - } // end for yyj1230 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *EnvVar) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ResourceFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1234 int - var yyb1234 bool - var yyhl1234 bool = l >= 0 - yyj1234++ - if yyhl1234 { - yyb1234 = yyj1234 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1234 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1234 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Name = "" + x.ContainerName = "" } else { - x.Name = string(r.DecodeString()) + yyv11 := &x.ContainerName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } - yyj1234++ - if yyhl1234 { - yyb1234 = yyj1234 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1234 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1234 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Value = "" + x.Resource = "" } else { - x.Value = string(r.DecodeString()) + yyv13 := &x.Resource + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj1234++ - if yyhl1234 { - yyb1234 = yyj1234 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1234 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1234 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.ValueFrom != nil { - x.ValueFrom = nil - } + x.Divisor = pkg3_resource.Quantity{} } else { - if x.ValueFrom == nil { - x.ValueFrom = new(EnvVarSource) + yyv15 := &x.Divisor + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv15) + } else { + z.DecFallback(yyv15, false) } - x.ValueFrom.CodecDecodeSelf(d) } for { - yyj1234++ - if yyhl1234 { - yyb1234 = yyj1234 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1234 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1234 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1234-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1238 := z.EncBinary() - _ = yym1238 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1239 := !z.EncBinary() - yy2arr1239 := z.EncBasicHandle().StructToArray - var yyq1239 [4]bool - _, _, _ = yysep1239, yyq1239, yy2arr1239 - const yyr1239 bool = false - yyq1239[0] = x.FieldRef != nil - yyq1239[1] = x.ResourceFieldRef != nil - yyq1239[2] = x.ConfigMapKeyRef != nil - yyq1239[3] = x.SecretKeyRef != nil - var yynn1239 int - if yyr1239 || yy2arr1239 { - r.EncodeArrayStart(4) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[2] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) } else { - yynn1239 = 0 - for _, b := range yyq1239 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1239++ + yynn2++ } } - r.EncodeMapStart(yynn1239) - yynn1239 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1239 || yy2arr1239 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1239[0] { - if x.FieldRef == nil { - r.EncodeNil() + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - x.FieldRef.CodecEncodeSelf(e) + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1239[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) + r.EncodeString(codecSelferC_UTF81234, string("name")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FieldRef == nil { - r.EncodeNil() + yym5 := z.EncBinary() + _ = yym5 + if false { } else { - x.FieldRef.CodecEncodeSelf(e) + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } } - if yyr1239 || yy2arr1239 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1239[1] { - if x.ResourceFieldRef == nil { - r.EncodeNil() - } else { - x.ResourceFieldRef.CodecEncodeSelf(e) - } + yym7 := z.EncBinary() + _ = yym7 + if false { } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) } } else { - if yyq1239[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceFieldRef == nil { - r.EncodeNil() - } else { - x.ResourceFieldRef.CodecEncodeSelf(e) - } - } - } - if yyr1239 || yy2arr1239 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1239[2] { - if x.ConfigMapKeyRef == nil { - r.EncodeNil() - } else { - x.ConfigMapKeyRef.CodecEncodeSelf(e) - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { } else { - r.EncodeNil() - } - } else { - if yyq1239[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("configMapKeyRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ConfigMapKeyRef == nil { - r.EncodeNil() - } else { - x.ConfigMapKeyRef.CodecEncodeSelf(e) - } + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) } } - if yyr1239 || yy2arr1239 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1239[3] { - if x.SecretKeyRef == nil { + if yyq2[2] { + if x.Optional == nil { r.EncodeNil() } else { - x.SecretKeyRef.CodecEncodeSelf(e) + yy10 := *x.Optional + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(yy10)) + } } } else { r.EncodeNil() } } else { - if yyq1239[3] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretKeyRef")) + r.EncodeString(codecSelferC_UTF81234, string("optional")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretKeyRef == nil { + if x.Optional == nil { r.EncodeNil() } else { - x.SecretKeyRef.CodecEncodeSelf(e) + yy12 := *x.Optional + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } } } } - if yyr1239 || yy2arr1239 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -17406,29 +22681,29 @@ func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *EnvVarSource) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ConfigMapKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1244 := z.DecBinary() - _ = yym1244 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1245 := r.ContainerType() - if yyct1245 == codecSelferValueTypeMap1234 { - yyl1245 := r.ReadMapStart() - if yyl1245 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1245, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1245 == codecSelferValueTypeArray1234 { - yyl1245 := r.ReadArrayStart() - if yyl1245 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1245, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -17436,16 +22711,16 @@ func (x *EnvVarSource) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *EnvVarSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1246Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1246Slc - var yyhl1246 bool = l >= 0 - for yyj1246 := 0; ; yyj1246++ { - if yyhl1246 { - if yyj1246 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -17454,244 +22729,262 @@ func (x *EnvVarSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1246Slc = r.DecodeBytes(yys1246Slc, true, true) - yys1246 := string(yys1246Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1246 { - case "fieldRef": + switch yys3 { + case "name": if r.TryDecodeAsNil() { - if x.FieldRef != nil { - x.FieldRef = nil - } + x.Name = "" } else { - if x.FieldRef == nil { - x.FieldRef = new(ObjectFieldSelector) + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() } - x.FieldRef.CodecDecodeSelf(d) } - case "resourceFieldRef": + case "key": if r.TryDecodeAsNil() { - if x.ResourceFieldRef != nil { - x.ResourceFieldRef = nil - } + x.Key = "" } else { - if x.ResourceFieldRef == nil { - x.ResourceFieldRef = new(ResourceFieldSelector) + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() } - x.ResourceFieldRef.CodecDecodeSelf(d) } - case "configMapKeyRef": + case "optional": if r.TryDecodeAsNil() { - if x.ConfigMapKeyRef != nil { - x.ConfigMapKeyRef = nil + if x.Optional != nil { + x.Optional = nil } } else { - if x.ConfigMapKeyRef == nil { - x.ConfigMapKeyRef = new(ConfigMapKeySelector) - } - x.ConfigMapKeyRef.CodecDecodeSelf(d) - } - case "secretKeyRef": - if r.TryDecodeAsNil() { - if x.SecretKeyRef != nil { - x.SecretKeyRef = nil + if x.Optional == nil { + x.Optional = new(bool) } - } else { - if x.SecretKeyRef == nil { - x.SecretKeyRef = new(SecretKeySelector) + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() } - x.SecretKeyRef.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1246) - } // end switch yys1246 - } // end for yyj1246 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *EnvVarSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ConfigMapKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1251 int - var yyb1251 bool - var yyhl1251 bool = l >= 0 - yyj1251++ - if yyhl1251 { - yyb1251 = yyj1251 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1251 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1251 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.FieldRef != nil { - x.FieldRef = nil - } + x.Name = "" } else { - if x.FieldRef == nil { - x.FieldRef = new(ObjectFieldSelector) + yyv11 := &x.Name + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() } - x.FieldRef.CodecDecodeSelf(d) } - yyj1251++ - if yyhl1251 { - yyb1251 = yyj1251 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1251 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1251 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.ResourceFieldRef != nil { - x.ResourceFieldRef = nil - } + x.Key = "" } else { - if x.ResourceFieldRef == nil { - x.ResourceFieldRef = new(ResourceFieldSelector) + yyv13 := &x.Key + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() } - x.ResourceFieldRef.CodecDecodeSelf(d) } - yyj1251++ - if yyhl1251 { - yyb1251 = yyj1251 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1251 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1251 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.ConfigMapKeyRef != nil { - x.ConfigMapKeyRef = nil + if x.Optional != nil { + x.Optional = nil } } else { - if x.ConfigMapKeyRef == nil { - x.ConfigMapKeyRef = new(ConfigMapKeySelector) - } - x.ConfigMapKeyRef.CodecDecodeSelf(d) - } - yyj1251++ - if yyhl1251 { - yyb1251 = yyj1251 > l - } else { - yyb1251 = r.CheckBreak() - } - if yyb1251 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretKeyRef != nil { - x.SecretKeyRef = nil + if x.Optional == nil { + x.Optional = new(bool) } - } else { - if x.SecretKeyRef == nil { - x.SecretKeyRef = new(SecretKeySelector) + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() } - x.SecretKeyRef.CodecDecodeSelf(d) } for { - yyj1251++ - if yyhl1251 { - yyb1251 = yyj1251 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1251 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1251 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1251-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ObjectFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *SecretKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1256 := z.EncBinary() - _ = yym1256 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1257 := !z.EncBinary() - yy2arr1257 := z.EncBasicHandle().StructToArray - var yyq1257 [2]bool - _, _, _ = yysep1257, yyq1257, yy2arr1257 - const yyr1257 bool = false - yyq1257[0] = x.APIVersion != "" - var yynn1257 int - if yyr1257 || yy2arr1257 { - r.EncodeArrayStart(2) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[2] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) } else { - yynn1257 = 1 - for _, b := range yyq1257 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1257++ + yynn2++ } } - r.EncodeMapStart(yynn1257) - yynn1257 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1257 || yy2arr1257 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1257[0] { - yym1259 := z.EncBinary() - _ = yym1259 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1257[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + r.EncodeString(codecSelferC_UTF81234, string("name")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1260 := z.EncBinary() - _ = yym1260 + yym5 := z.EncBinary() + _ = yym5 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } } - if yyr1257 || yy2arr1257 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1262 := z.EncBinary() - _ = yym1262 + yym7 := z.EncBinary() + _ = yym7 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldPath")) + r.EncodeString(codecSelferC_UTF81234, string("key")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1263 := z.EncBinary() - _ = yym1263 + yym8 := z.EncBinary() + _ = yym8 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy10 := *x.Optional + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy12 := *x.Optional + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } } } - if yyr1257 || yy2arr1257 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -17700,29 +22993,29 @@ func (x *ObjectFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ObjectFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *SecretKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1264 := z.DecBinary() - _ = yym1264 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1265 := r.ContainerType() - if yyct1265 == codecSelferValueTypeMap1234 { - yyl1265 := r.ReadMapStart() - if yyl1265 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1265, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1265 == codecSelferValueTypeArray1234 { - yyl1265 := r.ReadArrayStart() - if yyl1265 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1265, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -17730,16 +23023,16 @@ func (x *ObjectFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ObjectFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *SecretKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1266Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1266Slc - var yyhl1266 bool = l >= 0 - for yyj1266 := 0; ; yyj1266++ { - if yyhl1266 { - if yyj1266 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -17748,194 +23041,255 @@ func (x *ObjectFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1266Slc = r.DecodeBytes(yys1266Slc, true, true) - yys1266 := string(yys1266Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1266 { - case "apiVersion": + switch yys3 { + case "name": if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Name = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } - case "fieldPath": + case "key": if r.TryDecodeAsNil() { - x.FieldPath = "" + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } } else { - x.FieldPath = string(r.DecodeString()) + if x.Optional == nil { + x.Optional = new(bool) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } } default: - z.DecStructFieldNotFound(-1, yys1266) - } // end switch yys1266 - } // end for yyj1266 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ObjectFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *SecretKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1269 int - var yyb1269 bool - var yyhl1269 bool = l >= 0 - yyj1269++ - if yyhl1269 { - yyb1269 = yyj1269 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1269 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1269 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Name = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv11 := &x.Name + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } - yyj1269++ - if yyhl1269 { - yyb1269 = yyj1269 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1269 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1269 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.FieldPath = "" + x.Key = "" + } else { + yyv13 := &x.Key + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } } else { - x.FieldPath = string(r.DecodeString()) + if x.Optional == nil { + x.Optional = new(bool) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } } for { - yyj1269++ - if yyhl1269 { - yyb1269 = yyj1269 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1269 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1269 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1269-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ResourceFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *EnvFromSource) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1272 := z.EncBinary() - _ = yym1272 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1273 := !z.EncBinary() - yy2arr1273 := z.EncBasicHandle().StructToArray - var yyq1273 [3]bool - _, _, _ = yysep1273, yyq1273, yy2arr1273 - const yyr1273 bool = false - yyq1273[0] = x.ContainerName != "" - yyq1273[2] = true - var yynn1273 int - if yyr1273 || yy2arr1273 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Prefix != "" + yyq2[1] = x.ConfigMapRef != nil + yyq2[2] = x.SecretRef != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn1273 = 1 - for _, b := range yyq1273 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1273++ + yynn2++ } } - r.EncodeMapStart(yynn1273) - yynn1273 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1273 || yy2arr1273 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1273[0] { - yym1275 := z.EncBinary() - _ = yym1275 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) + r.EncodeString(codecSelferC_UTF81234, string(x.Prefix)) } } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1273[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerName")) + r.EncodeString(codecSelferC_UTF81234, string("prefix")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1276 := z.EncBinary() - _ = yym1276 + yym5 := z.EncBinary() + _ = yym5 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) + r.EncodeString(codecSelferC_UTF81234, string(x.Prefix)) } } } - if yyr1273 || yy2arr1273 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1278 := z.EncBinary() - _ = yym1278 - if false { + if yyq2[1] { + if x.ConfigMapRef == nil { + r.EncodeNil() + } else { + x.ConfigMapRef.CodecEncodeSelf(e) + } } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1279 := z.EncBinary() - _ = yym1279 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("configMapRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ConfigMapRef == nil { + r.EncodeNil() + } else { + x.ConfigMapRef.CodecEncodeSelf(e) + } } } - if yyr1273 || yy2arr1273 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1273[2] { - yy1281 := &x.Divisor - yym1282 := z.EncBinary() - _ = yym1282 - if false { - } else if z.HasExtensions() && z.EncExt(yy1281) { - } else if !yym1282 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1281) + if yyq2[2] { + if x.SecretRef == nil { + r.EncodeNil() } else { - z.EncFallback(yy1281) + x.SecretRef.CodecEncodeSelf(e) } } else { r.EncodeNil() } } else { - if yyq1273[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("divisor")) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1283 := &x.Divisor - yym1284 := z.EncBinary() - _ = yym1284 - if false { - } else if z.HasExtensions() && z.EncExt(yy1283) { - } else if !yym1284 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1283) + if x.SecretRef == nil { + r.EncodeNil() } else { - z.EncFallback(yy1283) + x.SecretRef.CodecEncodeSelf(e) } } } - if yyr1273 || yy2arr1273 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -17944,29 +23298,29 @@ func (x *ResourceFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ResourceFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *EnvFromSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1285 := z.DecBinary() - _ = yym1285 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1286 := r.ContainerType() - if yyct1286 == codecSelferValueTypeMap1234 { - yyl1286 := r.ReadMapStart() - if yyl1286 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1286, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1286 == codecSelferValueTypeArray1234 { - yyl1286 := r.ReadArrayStart() - if yyl1286 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1286, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -17974,16 +23328,16 @@ func (x *ResourceFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ResourceFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *EnvFromSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1287Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1287Slc - var yyhl1287 bool = l >= 0 - for yyj1287 := 0; ; yyj1287++ { - if yyhl1287 { - if yyj1287 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -17992,160 +23346,175 @@ func (x *ResourceFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decod } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1287Slc = r.DecodeBytes(yys1287Slc, true, true) - yys1287 := string(yys1287Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1287 { - case "containerName": + switch yys3 { + case "prefix": if r.TryDecodeAsNil() { - x.ContainerName = "" + x.Prefix = "" } else { - x.ContainerName = string(r.DecodeString()) + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } - case "resource": + case "configMapRef": if r.TryDecodeAsNil() { - x.Resource = "" + if x.ConfigMapRef != nil { + x.ConfigMapRef = nil + } } else { - x.Resource = string(r.DecodeString()) + if x.ConfigMapRef == nil { + x.ConfigMapRef = new(ConfigMapEnvSource) + } + x.ConfigMapRef.CodecDecodeSelf(d) } - case "divisor": + case "secretRef": if r.TryDecodeAsNil() { - x.Divisor = pkg3_resource.Quantity{} + if x.SecretRef != nil { + x.SecretRef = nil + } } else { - yyv1290 := &x.Divisor - yym1291 := z.DecBinary() - _ = yym1291 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1290) { - } else if !yym1291 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1290) - } else { - z.DecFallback(yyv1290, false) + if x.SecretRef == nil { + x.SecretRef = new(SecretEnvSource) } + x.SecretRef.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1287) - } // end switch yys1287 - } // end for yyj1287 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ResourceFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *EnvFromSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1292 int - var yyb1292 bool - var yyhl1292 bool = l >= 0 - yyj1292++ - if yyhl1292 { - yyb1292 = yyj1292 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1292 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1292 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ContainerName = "" + x.Prefix = "" } else { - x.ContainerName = string(r.DecodeString()) + yyv9 := &x.Prefix + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } - yyj1292++ - if yyhl1292 { - yyb1292 = yyj1292 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1292 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1292 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Resource = "" + if x.ConfigMapRef != nil { + x.ConfigMapRef = nil + } } else { - x.Resource = string(r.DecodeString()) + if x.ConfigMapRef == nil { + x.ConfigMapRef = new(ConfigMapEnvSource) + } + x.ConfigMapRef.CodecDecodeSelf(d) } - yyj1292++ - if yyhl1292 { - yyb1292 = yyj1292 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1292 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1292 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Divisor = pkg3_resource.Quantity{} + if x.SecretRef != nil { + x.SecretRef = nil + } } else { - yyv1295 := &x.Divisor - yym1296 := z.DecBinary() - _ = yym1296 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1295) { - } else if !yym1296 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1295) - } else { - z.DecFallback(yyv1295, false) + if x.SecretRef == nil { + x.SecretRef = new(SecretEnvSource) } + x.SecretRef.CodecDecodeSelf(d) } for { - yyj1292++ - if yyhl1292 { - yyb1292 = yyj1292 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1292 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1292 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1292-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ConfigMapEnvSource) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1297 := z.EncBinary() - _ = yym1297 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1298 := !z.EncBinary() - yy2arr1298 := z.EncBasicHandle().StructToArray - var yyq1298 [2]bool - _, _, _ = yysep1298, yyq1298, yy2arr1298 - const yyr1298 bool = false - yyq1298[0] = x.Name != "" - var yynn1298 int - if yyr1298 || yy2arr1298 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1298 = 1 - for _, b := range yyq1298 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1298++ + yynn2++ } } - r.EncodeMapStart(yynn1298) - yynn1298 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1298 || yy2arr1298 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1298[0] { - yym1300 := z.EncBinary() - _ = yym1300 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) @@ -18154,38 +23523,54 @@ func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1298[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("name")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1301 := z.EncBinary() - _ = yym1301 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } } - if yyr1298 || yy2arr1298 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1303 := z.EncBinary() - _ = yym1303 - if false { + if yyq2[1] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy7 := *x.Optional + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(yy7)) + } + } } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1304 := z.EncBinary() - _ = yym1304 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy9 := *x.Optional + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(yy9)) + } + } } } - if yyr1298 || yy2arr1298 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -18194,29 +23579,29 @@ func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ConfigMapKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ConfigMapEnvSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1305 := z.DecBinary() - _ = yym1305 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1306 := r.ContainerType() - if yyct1306 == codecSelferValueTypeMap1234 { - yyl1306 := r.ReadMapStart() - if yyl1306 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1306, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1306 == codecSelferValueTypeArray1234 { - yyl1306 := r.ReadArrayStart() - if yyl1306 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1306, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -18224,16 +23609,16 @@ func (x *ConfigMapKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ConfigMapEnvSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1307Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1307Slc - var yyhl1307 bool = l >= 0 - for yyj1307 := 0; ; yyj1307++ { - if yyhl1307 { - if yyj1307 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -18242,43 +23627,59 @@ func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decode } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1307Slc = r.DecodeBytes(yys1307Slc, true, true) - yys1307 := string(yys1307Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1307 { + switch yys3 { case "name": if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } - case "key": + case "optional": if r.TryDecodeAsNil() { - x.Key = "" + if x.Optional != nil { + x.Optional = nil + } } else { - x.Key = string(r.DecodeString()) + if x.Optional == nil { + x.Optional = new(bool) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } } default: - z.DecStructFieldNotFound(-1, yys1307) - } // end switch yys1307 - } // end for yyj1307 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ConfigMapKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ConfigMapEnvSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1310 int - var yyb1310 bool - var yyhl1310 bool = l >= 0 - yyj1310++ - if yyhl1310 { - yyb1310 = yyj1310 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1310 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1310 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -18286,76 +23687,93 @@ func (x *ConfigMapKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv9 := &x.Name + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } - yyj1310++ - if yyhl1310 { - yyb1310 = yyj1310 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1310 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1310 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Key = "" + if x.Optional != nil { + x.Optional = nil + } } else { - x.Key = string(r.DecodeString()) + if x.Optional == nil { + x.Optional = new(bool) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } } for { - yyj1310++ - if yyhl1310 { - yyb1310 = yyj1310 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1310 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1310 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1310-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *SecretKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *SecretEnvSource) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1313 := z.EncBinary() - _ = yym1313 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1314 := !z.EncBinary() - yy2arr1314 := z.EncBasicHandle().StructToArray - var yyq1314 [2]bool - _, _, _ = yysep1314, yyq1314, yy2arr1314 - const yyr1314 bool = false - yyq1314[0] = x.Name != "" - var yynn1314 int - if yyr1314 || yy2arr1314 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1314 = 1 - for _, b := range yyq1314 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1314++ + yynn2++ } } - r.EncodeMapStart(yynn1314) - yynn1314 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1314 || yy2arr1314 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1314[0] { - yym1316 := z.EncBinary() - _ = yym1316 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) @@ -18364,38 +23782,54 @@ func (x *SecretKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1314[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("name")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1317 := z.EncBinary() - _ = yym1317 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } } - if yyr1314 || yy2arr1314 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1319 := z.EncBinary() - _ = yym1319 - if false { + if yyq2[1] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy7 := *x.Optional + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(yy7)) + } + } } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1320 := z.EncBinary() - _ = yym1320 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy9 := *x.Optional + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(yy9)) + } + } } } - if yyr1314 || yy2arr1314 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -18404,29 +23838,29 @@ func (x *SecretKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *SecretKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *SecretEnvSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1321 := z.DecBinary() - _ = yym1321 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1322 := r.ContainerType() - if yyct1322 == codecSelferValueTypeMap1234 { - yyl1322 := r.ReadMapStart() - if yyl1322 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1322, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1322 == codecSelferValueTypeArray1234 { - yyl1322 := r.ReadArrayStart() - if yyl1322 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1322, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -18434,16 +23868,16 @@ func (x *SecretKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *SecretKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *SecretEnvSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1323Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1323Slc - var yyhl1323 bool = l >= 0 - for yyj1323 := 0; ; yyj1323++ { - if yyhl1323 { - if yyj1323 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -18452,43 +23886,59 @@ func (x *SecretKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1323Slc = r.DecodeBytes(yys1323Slc, true, true) - yys1323 := string(yys1323Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1323 { + switch yys3 { case "name": if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } - case "key": + case "optional": if r.TryDecodeAsNil() { - x.Key = "" + if x.Optional != nil { + x.Optional = nil + } } else { - x.Key = string(r.DecodeString()) + if x.Optional == nil { + x.Optional = new(bool) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } } default: - z.DecStructFieldNotFound(-1, yys1323) - } // end switch yys1323 - } // end for yyj1323 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *SecretKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *SecretEnvSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1326 int - var yyb1326 bool - var yyhl1326 bool = l >= 0 - yyj1326++ - if yyhl1326 { - yyb1326 = yyj1326 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1326 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1326 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -18496,36 +23946,52 @@ func (x *SecretKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv9 := &x.Name + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } - yyj1326++ - if yyhl1326 { - yyb1326 = yyj1326 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1326 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1326 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Key = "" + if x.Optional != nil { + x.Optional = nil + } } else { - x.Key = string(r.DecodeString()) + if x.Optional == nil { + x.Optional = new(bool) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } } for { - yyj1326++ - if yyhl1326 { - yyb1326 = yyj1326 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1326 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1326 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1326-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -18537,33 +24003,33 @@ func (x *HTTPHeader) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1329 := z.EncBinary() - _ = yym1329 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1330 := !z.EncBinary() - yy2arr1330 := z.EncBasicHandle().StructToArray - var yyq1330 [2]bool - _, _, _ = yysep1330, yyq1330, yy2arr1330 - const yyr1330 bool = false - var yynn1330 int - if yyr1330 || yy2arr1330 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1330 = 2 - for _, b := range yyq1330 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn1330++ + yynn2++ } } - r.EncodeMapStart(yynn1330) - yynn1330 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1330 || yy2arr1330 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1332 := z.EncBinary() - _ = yym1332 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) @@ -18572,17 +24038,17 @@ func (x *HTTPHeader) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("name")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1333 := z.EncBinary() - _ = yym1333 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } - if yyr1330 || yy2arr1330 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1335 := z.EncBinary() - _ = yym1335 + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Value)) @@ -18591,14 +24057,14 @@ func (x *HTTPHeader) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("value")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1336 := z.EncBinary() - _ = yym1336 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Value)) } } - if yyr1330 || yy2arr1330 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -18611,25 +24077,25 @@ func (x *HTTPHeader) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1337 := z.DecBinary() - _ = yym1337 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1338 := r.ContainerType() - if yyct1338 == codecSelferValueTypeMap1234 { - yyl1338 := r.ReadMapStart() - if yyl1338 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1338, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1338 == codecSelferValueTypeArray1234 { - yyl1338 := r.ReadArrayStart() - if yyl1338 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1338, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -18641,12 +24107,12 @@ func (x *HTTPHeader) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1339Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1339Slc - var yyhl1339 bool = l >= 0 - for yyj1339 := 0; ; yyj1339++ { - if yyhl1339 { - if yyj1339 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -18655,26 +24121,38 @@ func (x *HTTPHeader) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1339Slc = r.DecodeBytes(yys1339Slc, true, true) - yys1339 := string(yys1339Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1339 { + switch yys3 { case "name": if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "value": if r.TryDecodeAsNil() { x.Value = "" } else { - x.Value = string(r.DecodeString()) + yyv6 := &x.Value + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys1339) - } // end switch yys1339 - } // end for yyj1339 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -18682,16 +24160,16 @@ func (x *HTTPHeader) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1342 int - var yyb1342 bool - var yyhl1342 bool = l >= 0 - yyj1342++ - if yyhl1342 { - yyb1342 = yyj1342 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1342 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1342 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -18699,15 +24177,21 @@ func (x *HTTPHeader) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv9 := &x.Name + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } - yyj1342++ - if yyhl1342 { - yyb1342 = yyj1342 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1342 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1342 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -18715,20 +24199,26 @@ func (x *HTTPHeader) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Value = "" } else { - x.Value = string(r.DecodeString()) + yyv11 := &x.Value + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } for { - yyj1342++ - if yyhl1342 { - yyb1342 = yyj1342 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1342 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1342 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1342-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -18740,38 +24230,38 @@ func (x *HTTPGetAction) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1345 := z.EncBinary() - _ = yym1345 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1346 := !z.EncBinary() - yy2arr1346 := z.EncBasicHandle().StructToArray - var yyq1346 [5]bool - _, _, _ = yysep1346, yyq1346, yy2arr1346 - const yyr1346 bool = false - yyq1346[0] = x.Path != "" - yyq1346[2] = x.Host != "" - yyq1346[3] = x.Scheme != "" - yyq1346[4] = len(x.HTTPHeaders) != 0 - var yynn1346 int - if yyr1346 || yy2arr1346 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Path != "" + yyq2[2] = x.Host != "" + yyq2[3] = x.Scheme != "" + yyq2[4] = len(x.HTTPHeaders) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn1346 = 1 - for _, b := range yyq1346 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1346++ + yynn2++ } } - r.EncodeMapStart(yynn1346) - yynn1346 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1346 || yy2arr1346 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1346[0] { - yym1348 := z.EncBinary() - _ = yym1348 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) @@ -18780,50 +24270,50 @@ func (x *HTTPGetAction) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1346[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("path")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1349 := z.EncBinary() - _ = yym1349 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) } } } - if yyr1346 || yy2arr1346 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1351 := &x.Port - yym1352 := z.EncBinary() - _ = yym1352 + yy7 := &x.Port + yym8 := z.EncBinary() + _ = yym8 if false { - } else if z.HasExtensions() && z.EncExt(yy1351) { - } else if !yym1352 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1351) + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) } else { - z.EncFallback(yy1351) + z.EncFallback(yy7) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("port")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1353 := &x.Port - yym1354 := z.EncBinary() - _ = yym1354 + yy9 := &x.Port + yym10 := z.EncBinary() + _ = yym10 if false { - } else if z.HasExtensions() && z.EncExt(yy1353) { - } else if !yym1354 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1353) + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) } else { - z.EncFallback(yy1353) + z.EncFallback(yy9) } } - if yyr1346 || yy2arr1346 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1346[2] { - yym1356 := z.EncBinary() - _ = yym1356 + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Host)) @@ -18832,41 +24322,41 @@ func (x *HTTPGetAction) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1346[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("host")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1357 := z.EncBinary() - _ = yym1357 + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Host)) } } } - if yyr1346 || yy2arr1346 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1346[3] { + if yyq2[3] { x.Scheme.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1346[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("scheme")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Scheme.CodecEncodeSelf(e) } } - if yyr1346 || yy2arr1346 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1346[4] { + if yyq2[4] { if x.HTTPHeaders == nil { r.EncodeNil() } else { - yym1360 := z.EncBinary() - _ = yym1360 + yym18 := z.EncBinary() + _ = yym18 if false { } else { h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e) @@ -18876,15 +24366,15 @@ func (x *HTTPGetAction) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1346[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("httpHeaders")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.HTTPHeaders == nil { r.EncodeNil() } else { - yym1361 := z.EncBinary() - _ = yym1361 + yym19 := z.EncBinary() + _ = yym19 if false { } else { h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e) @@ -18892,7 +24382,7 @@ func (x *HTTPGetAction) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1346 || yy2arr1346 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -18905,25 +24395,25 @@ func (x *HTTPGetAction) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1362 := z.DecBinary() - _ = yym1362 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1363 := r.ContainerType() - if yyct1363 == codecSelferValueTypeMap1234 { - yyl1363 := r.ReadMapStart() - if yyl1363 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1363, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1363 == codecSelferValueTypeArray1234 { - yyl1363 := r.ReadArrayStart() - if yyl1363 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1363, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -18935,12 +24425,12 @@ func (x *HTTPGetAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1364Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1364Slc - var yyhl1364 bool = l >= 0 - for yyj1364 := 0; ; yyj1364++ { - if yyhl1364 { - if yyj1364 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -18949,59 +24439,72 @@ func (x *HTTPGetAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1364Slc = r.DecodeBytes(yys1364Slc, true, true) - yys1364 := string(yys1364Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1364 { + switch yys3 { case "path": if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "port": if r.TryDecodeAsNil() { x.Port = pkg4_intstr.IntOrString{} } else { - yyv1366 := &x.Port - yym1367 := z.DecBinary() - _ = yym1367 + yyv6 := &x.Port + yym7 := z.DecBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.DecExt(yyv1366) { - } else if !yym1367 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1366) + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) } else { - z.DecFallback(yyv1366, false) + z.DecFallback(yyv6, false) } } case "host": if r.TryDecodeAsNil() { x.Host = "" } else { - x.Host = string(r.DecodeString()) + yyv8 := &x.Host + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } case "scheme": if r.TryDecodeAsNil() { x.Scheme = "" } else { - x.Scheme = URIScheme(r.DecodeString()) + yyv10 := &x.Scheme + yyv10.CodecDecodeSelf(d) } case "httpHeaders": if r.TryDecodeAsNil() { x.HTTPHeaders = nil } else { - yyv1370 := &x.HTTPHeaders - yym1371 := z.DecBinary() - _ = yym1371 + yyv11 := &x.HTTPHeaders + yym12 := z.DecBinary() + _ = yym12 if false { } else { - h.decSliceHTTPHeader((*[]HTTPHeader)(yyv1370), d) + h.decSliceHTTPHeader((*[]HTTPHeader)(yyv11), d) } } default: - z.DecStructFieldNotFound(-1, yys1364) - } // end switch yys1364 - } // end for yyj1364 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -19009,16 +24512,16 @@ func (x *HTTPGetAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1372 int - var yyb1372 bool - var yyhl1372 bool = l >= 0 - yyj1372++ - if yyhl1372 { - yyb1372 = yyj1372 > l + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb1372 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb1372 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -19026,15 +24529,21 @@ func (x *HTTPGetAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv14 := &x.Path + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } - yyj1372++ - if yyhl1372 { - yyb1372 = yyj1372 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb1372 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb1372 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -19042,24 +24551,24 @@ func (x *HTTPGetAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Port = pkg4_intstr.IntOrString{} } else { - yyv1374 := &x.Port - yym1375 := z.DecBinary() - _ = yym1375 + yyv16 := &x.Port + yym17 := z.DecBinary() + _ = yym17 if false { - } else if z.HasExtensions() && z.DecExt(yyv1374) { - } else if !yym1375 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1374) + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else if !yym17 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv16) } else { - z.DecFallback(yyv1374, false) + z.DecFallback(yyv16, false) } } - yyj1372++ - if yyhl1372 { - yyb1372 = yyj1372 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb1372 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb1372 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -19067,15 +24576,21 @@ func (x *HTTPGetAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Host = "" } else { - x.Host = string(r.DecodeString()) + yyv18 := &x.Host + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } } - yyj1372++ - if yyhl1372 { - yyb1372 = yyj1372 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb1372 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb1372 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -19083,15 +24598,16 @@ func (x *HTTPGetAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Scheme = "" } else { - x.Scheme = URIScheme(r.DecodeString()) + yyv20 := &x.Scheme + yyv20.CodecDecodeSelf(d) } - yyj1372++ - if yyhl1372 { - yyb1372 = yyj1372 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb1372 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb1372 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -19099,26 +24615,26 @@ func (x *HTTPGetAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.HTTPHeaders = nil } else { - yyv1378 := &x.HTTPHeaders - yym1379 := z.DecBinary() - _ = yym1379 + yyv21 := &x.HTTPHeaders + yym22 := z.DecBinary() + _ = yym22 if false { } else { - h.decSliceHTTPHeader((*[]HTTPHeader)(yyv1378), d) + h.decSliceHTTPHeader((*[]HTTPHeader)(yyv21), d) } } for { - yyj1372++ - if yyhl1372 { - yyb1372 = yyj1372 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb1372 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb1372 { + if yyb13 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1372-1, "") + z.DecStructFieldNotFound(yyj13-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -19127,8 +24643,8 @@ func (x URIScheme) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym1380 := z.EncBinary() - _ = yym1380 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -19140,8 +24656,8 @@ func (x *URIScheme) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1381 := z.DecBinary() - _ = yym1381 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -19156,57 +24672,57 @@ func (x *TCPSocketAction) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1382 := z.EncBinary() - _ = yym1382 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1383 := !z.EncBinary() - yy2arr1383 := z.EncBasicHandle().StructToArray - var yyq1383 [1]bool - _, _, _ = yysep1383, yyq1383, yy2arr1383 - const yyr1383 bool = false - var yynn1383 int - if yyr1383 || yy2arr1383 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn1383 = 1 - for _, b := range yyq1383 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1383++ + yynn2++ } } - r.EncodeMapStart(yynn1383) - yynn1383 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1383 || yy2arr1383 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1385 := &x.Port - yym1386 := z.EncBinary() - _ = yym1386 + yy4 := &x.Port + yym5 := z.EncBinary() + _ = yym5 if false { - } else if z.HasExtensions() && z.EncExt(yy1385) { - } else if !yym1386 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1385) + } else if z.HasExtensions() && z.EncExt(yy4) { + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(yy4) } else { - z.EncFallback(yy1385) + z.EncFallback(yy4) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("port")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1387 := &x.Port - yym1388 := z.EncBinary() - _ = yym1388 + yy6 := &x.Port + yym7 := z.EncBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.EncExt(yy1387) { - } else if !yym1388 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1387) + } else if z.HasExtensions() && z.EncExt(yy6) { + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(yy6) } else { - z.EncFallback(yy1387) + z.EncFallback(yy6) } } - if yyr1383 || yy2arr1383 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -19219,25 +24735,25 @@ func (x *TCPSocketAction) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1389 := z.DecBinary() - _ = yym1389 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1390 := r.ContainerType() - if yyct1390 == codecSelferValueTypeMap1234 { - yyl1390 := r.ReadMapStart() - if yyl1390 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1390, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1390 == codecSelferValueTypeArray1234 { - yyl1390 := r.ReadArrayStart() - if yyl1390 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1390, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -19249,12 +24765,12 @@ func (x *TCPSocketAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1391Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1391Slc - var yyhl1391 bool = l >= 0 - for yyj1391 := 0; ; yyj1391++ { - if yyhl1391 { - if yyj1391 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -19263,29 +24779,29 @@ func (x *TCPSocketAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1391Slc = r.DecodeBytes(yys1391Slc, true, true) - yys1391 := string(yys1391Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1391 { + switch yys3 { case "port": if r.TryDecodeAsNil() { x.Port = pkg4_intstr.IntOrString{} } else { - yyv1392 := &x.Port - yym1393 := z.DecBinary() - _ = yym1393 + yyv4 := &x.Port + yym5 := z.DecBinary() + _ = yym5 if false { - } else if z.HasExtensions() && z.DecExt(yyv1392) { - } else if !yym1393 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1392) + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) } else { - z.DecFallback(yyv1392, false) + z.DecFallback(yyv4, false) } } default: - z.DecStructFieldNotFound(-1, yys1391) - } // end switch yys1391 - } // end for yyj1391 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -19293,16 +24809,16 @@ func (x *TCPSocketAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1394 int - var yyb1394 bool - var yyhl1394 bool = l >= 0 - yyj1394++ - if yyhl1394 { - yyb1394 = yyj1394 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1394 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1394 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -19310,29 +24826,29 @@ func (x *TCPSocketAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Port = pkg4_intstr.IntOrString{} } else { - yyv1395 := &x.Port - yym1396 := z.DecBinary() - _ = yym1396 + yyv7 := &x.Port + yym8 := z.DecBinary() + _ = yym8 if false { - } else if z.HasExtensions() && z.DecExt(yyv1395) { - } else if !yym1396 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1395) + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) } else { - z.DecFallback(yyv1395, false) + z.DecFallback(yyv7, false) } } for { - yyj1394++ - if yyhl1394 { - yyb1394 = yyj1394 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1394 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1394 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1394-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -19344,38 +24860,38 @@ func (x *ExecAction) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1397 := z.EncBinary() - _ = yym1397 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1398 := !z.EncBinary() - yy2arr1398 := z.EncBasicHandle().StructToArray - var yyq1398 [1]bool - _, _, _ = yysep1398, yyq1398, yy2arr1398 - const yyr1398 bool = false - yyq1398[0] = len(x.Command) != 0 - var yynn1398 int - if yyr1398 || yy2arr1398 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Command) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn1398 = 0 - for _, b := range yyq1398 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1398++ + yynn2++ } } - r.EncodeMapStart(yynn1398) - yynn1398 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1398 || yy2arr1398 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1398[0] { + if yyq2[0] { if x.Command == nil { r.EncodeNil() } else { - yym1400 := z.EncBinary() - _ = yym1400 + yym4 := z.EncBinary() + _ = yym4 if false { } else { z.F.EncSliceStringV(x.Command, false, e) @@ -19385,15 +24901,15 @@ func (x *ExecAction) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1398[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("command")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Command == nil { r.EncodeNil() } else { - yym1401 := z.EncBinary() - _ = yym1401 + yym5 := z.EncBinary() + _ = yym5 if false { } else { z.F.EncSliceStringV(x.Command, false, e) @@ -19401,7 +24917,7 @@ func (x *ExecAction) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1398 || yy2arr1398 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -19414,25 +24930,25 @@ func (x *ExecAction) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1402 := z.DecBinary() - _ = yym1402 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1403 := r.ContainerType() - if yyct1403 == codecSelferValueTypeMap1234 { - yyl1403 := r.ReadMapStart() - if yyl1403 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1403, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1403 == codecSelferValueTypeArray1234 { - yyl1403 := r.ReadArrayStart() - if yyl1403 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1403, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -19444,12 +24960,12 @@ func (x *ExecAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1404Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1404Slc - var yyhl1404 bool = l >= 0 - for yyj1404 := 0; ; yyj1404++ { - if yyhl1404 { - if yyj1404 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -19458,26 +24974,26 @@ func (x *ExecAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1404Slc = r.DecodeBytes(yys1404Slc, true, true) - yys1404 := string(yys1404Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1404 { + switch yys3 { case "command": if r.TryDecodeAsNil() { x.Command = nil } else { - yyv1405 := &x.Command - yym1406 := z.DecBinary() - _ = yym1406 + yyv4 := &x.Command + yym5 := z.DecBinary() + _ = yym5 if false { } else { - z.F.DecSliceStringX(yyv1405, false, d) + z.F.DecSliceStringX(yyv4, false, d) } } default: - z.DecStructFieldNotFound(-1, yys1404) - } // end switch yys1404 - } // end for yyj1404 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -19485,16 +25001,16 @@ func (x *ExecAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1407 int - var yyb1407 bool - var yyhl1407 bool = l >= 0 - yyj1407++ - if yyhl1407 { - yyb1407 = yyj1407 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1407 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1407 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -19502,26 +25018,26 @@ func (x *ExecAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Command = nil } else { - yyv1408 := &x.Command - yym1409 := z.DecBinary() - _ = yym1409 + yyv7 := &x.Command + yym8 := z.DecBinary() + _ = yym8 if false { } else { - z.F.DecSliceStringX(yyv1408, false, d) + z.F.DecSliceStringX(yyv7, false, d) } } for { - yyj1407++ - if yyhl1407 { - yyb1407 = yyj1407 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1407 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1407 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1407-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -19533,49 +25049,49 @@ func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1410 := z.EncBinary() - _ = yym1410 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1411 := !z.EncBinary() - yy2arr1411 := z.EncBasicHandle().StructToArray - var yyq1411 [8]bool - _, _, _ = yysep1411, yyq1411, yy2arr1411 - const yyr1411 bool = false - yyq1411[0] = x.Handler.Exec != nil && x.Exec != nil - yyq1411[1] = x.Handler.HTTPGet != nil && x.HTTPGet != nil - yyq1411[2] = x.Handler.TCPSocket != nil && x.TCPSocket != nil - yyq1411[3] = x.InitialDelaySeconds != 0 - yyq1411[4] = x.TimeoutSeconds != 0 - yyq1411[5] = x.PeriodSeconds != 0 - yyq1411[6] = x.SuccessThreshold != 0 - yyq1411[7] = x.FailureThreshold != 0 - var yynn1411 int - if yyr1411 || yy2arr1411 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Handler.Exec != nil && x.Exec != nil + yyq2[1] = x.Handler.HTTPGet != nil && x.HTTPGet != nil + yyq2[2] = x.Handler.TCPSocket != nil && x.TCPSocket != nil + yyq2[3] = x.InitialDelaySeconds != 0 + yyq2[4] = x.TimeoutSeconds != 0 + yyq2[5] = x.PeriodSeconds != 0 + yyq2[6] = x.SuccessThreshold != 0 + yyq2[7] = x.FailureThreshold != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(8) } else { - yynn1411 = 0 - for _, b := range yyq1411 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1411++ + yynn2++ } } - r.EncodeMapStart(yynn1411) - yynn1411 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - var yyn1412 bool + var yyn3 bool if x.Handler.Exec == nil { - yyn1412 = true - goto LABEL1412 + yyn3 = true + goto LABEL3 } - LABEL1412: - if yyr1411 || yy2arr1411 { - if yyn1412 { + LABEL3: + if yyr2 || yy2arr2 { + if yyn3 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1411[0] { + if yyq2[0] { if x.Exec == nil { r.EncodeNil() } else { @@ -19586,11 +25102,11 @@ func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq1411[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("exec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn1412 { + if yyn3 { r.EncodeNil() } else { if x.Exec == nil { @@ -19601,18 +25117,18 @@ func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn1413 bool + var yyn6 bool if x.Handler.HTTPGet == nil { - yyn1413 = true - goto LABEL1413 + yyn6 = true + goto LABEL6 } - LABEL1413: - if yyr1411 || yy2arr1411 { - if yyn1413 { + LABEL6: + if yyr2 || yy2arr2 { + if yyn6 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1411[1] { + if yyq2[1] { if x.HTTPGet == nil { r.EncodeNil() } else { @@ -19623,11 +25139,11 @@ func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq1411[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("httpGet")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn1413 { + if yyn6 { r.EncodeNil() } else { if x.HTTPGet == nil { @@ -19638,18 +25154,18 @@ func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) { } } } - var yyn1414 bool + var yyn9 bool if x.Handler.TCPSocket == nil { - yyn1414 = true - goto LABEL1414 + yyn9 = true + goto LABEL9 } - LABEL1414: - if yyr1411 || yy2arr1411 { - if yyn1414 { + LABEL9: + if yyr2 || yy2arr2 { + if yyn9 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1411[2] { + if yyq2[2] { if x.TCPSocket == nil { r.EncodeNil() } else { @@ -19660,11 +25176,11 @@ func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq1411[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("tcpSocket")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn1414 { + if yyn9 { r.EncodeNil() } else { if x.TCPSocket == nil { @@ -19675,11 +25191,11 @@ func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1411 || yy2arr1411 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1411[3] { - yym1416 := z.EncBinary() - _ = yym1416 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeInt(int64(x.InitialDelaySeconds)) @@ -19688,23 +25204,23 @@ func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq1411[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("initialDelaySeconds")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1417 := z.EncBinary() - _ = yym1417 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeInt(int64(x.InitialDelaySeconds)) } } } - if yyr1411 || yy2arr1411 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1411[4] { - yym1419 := z.EncBinary() - _ = yym1419 + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeInt(int64(x.TimeoutSeconds)) @@ -19713,23 +25229,23 @@ func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq1411[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("timeoutSeconds")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1420 := z.EncBinary() - _ = yym1420 + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeInt(int64(x.TimeoutSeconds)) } } } - if yyr1411 || yy2arr1411 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1411[5] { - yym1422 := z.EncBinary() - _ = yym1422 + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 if false { } else { r.EncodeInt(int64(x.PeriodSeconds)) @@ -19738,23 +25254,23 @@ func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq1411[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("periodSeconds")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1423 := z.EncBinary() - _ = yym1423 + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeInt(int64(x.PeriodSeconds)) } } } - if yyr1411 || yy2arr1411 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1411[6] { - yym1425 := z.EncBinary() - _ = yym1425 + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 if false { } else { r.EncodeInt(int64(x.SuccessThreshold)) @@ -19763,23 +25279,23 @@ func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq1411[6] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("successThreshold")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1426 := z.EncBinary() - _ = yym1426 + yym23 := z.EncBinary() + _ = yym23 if false { } else { r.EncodeInt(int64(x.SuccessThreshold)) } } } - if yyr1411 || yy2arr1411 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1411[7] { - yym1428 := z.EncBinary() - _ = yym1428 + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 if false { } else { r.EncodeInt(int64(x.FailureThreshold)) @@ -19788,19 +25304,19 @@ func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq1411[7] { + if yyq2[7] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("failureThreshold")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1429 := z.EncBinary() - _ = yym1429 + yym26 := z.EncBinary() + _ = yym26 if false { } else { r.EncodeInt(int64(x.FailureThreshold)) } } } - if yyr1411 || yy2arr1411 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -19813,25 +25329,25 @@ func (x *Probe) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1430 := z.DecBinary() - _ = yym1430 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1431 := r.ContainerType() - if yyct1431 == codecSelferValueTypeMap1234 { - yyl1431 := r.ReadMapStart() - if yyl1431 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1431, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1431 == codecSelferValueTypeArray1234 { - yyl1431 := r.ReadArrayStart() - if yyl1431 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1431, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -19843,12 +25359,12 @@ func (x *Probe) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1432Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1432Slc - var yyhl1432 bool = l >= 0 - for yyj1432 := 0; ; yyj1432++ { - if yyhl1432 { - if yyj1432 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -19857,10 +25373,10 @@ func (x *Probe) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1432Slc = r.DecodeBytes(yys1432Slc, true, true) - yys1432 := string(yys1432Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1432 { + switch yys3 { case "exec": if x.Handler.Exec == nil { x.Handler.Exec = new(ExecAction) @@ -19907,36 +25423,66 @@ func (x *Probe) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.InitialDelaySeconds = 0 } else { - x.InitialDelaySeconds = int32(r.DecodeInt(32)) + yyv7 := &x.InitialDelaySeconds + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } } case "timeoutSeconds": if r.TryDecodeAsNil() { x.TimeoutSeconds = 0 } else { - x.TimeoutSeconds = int32(r.DecodeInt(32)) + yyv9 := &x.TimeoutSeconds + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*int32)(yyv9)) = int32(r.DecodeInt(32)) + } } case "periodSeconds": if r.TryDecodeAsNil() { x.PeriodSeconds = 0 } else { - x.PeriodSeconds = int32(r.DecodeInt(32)) + yyv11 := &x.PeriodSeconds + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } } case "successThreshold": if r.TryDecodeAsNil() { x.SuccessThreshold = 0 } else { - x.SuccessThreshold = int32(r.DecodeInt(32)) + yyv13 := &x.SuccessThreshold + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*int32)(yyv13)) = int32(r.DecodeInt(32)) + } } case "failureThreshold": if r.TryDecodeAsNil() { x.FailureThreshold = 0 } else { - x.FailureThreshold = int32(r.DecodeInt(32)) + yyv15 := &x.FailureThreshold + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(yyv15)) = int32(r.DecodeInt(32)) + } } default: - z.DecStructFieldNotFound(-1, yys1432) - } // end switch yys1432 - } // end for yyj1432 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -19944,19 +25490,19 @@ func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1441 int - var yyb1441 bool - var yyhl1441 bool = l >= 0 + var yyj17 int + var yyb17 bool + var yyhl17 bool = l >= 0 if x.Handler.Exec == nil { x.Handler.Exec = new(ExecAction) } - yyj1441++ - if yyhl1441 { - yyb1441 = yyj1441 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb1441 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb1441 { + if yyb17 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -19974,13 +25520,13 @@ func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.Handler.HTTPGet == nil { x.Handler.HTTPGet = new(HTTPGetAction) } - yyj1441++ - if yyhl1441 { - yyb1441 = yyj1441 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb1441 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb1441 { + if yyb17 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -19998,13 +25544,13 @@ func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.Handler.TCPSocket == nil { x.Handler.TCPSocket = new(TCPSocketAction) } - yyj1441++ - if yyhl1441 { - yyb1441 = yyj1441 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb1441 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb1441 { + if yyb17 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -20019,13 +25565,13 @@ func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.TCPSocket.CodecDecodeSelf(d) } - yyj1441++ - if yyhl1441 { - yyb1441 = yyj1441 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb1441 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb1441 { + if yyb17 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -20033,15 +25579,21 @@ func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.InitialDelaySeconds = 0 } else { - x.InitialDelaySeconds = int32(r.DecodeInt(32)) + yyv21 := &x.InitialDelaySeconds + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } } - yyj1441++ - if yyhl1441 { - yyb1441 = yyj1441 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb1441 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb1441 { + if yyb17 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -20049,15 +25601,21 @@ func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.TimeoutSeconds = 0 } else { - x.TimeoutSeconds = int32(r.DecodeInt(32)) + yyv23 := &x.TimeoutSeconds + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } } - yyj1441++ - if yyhl1441 { - yyb1441 = yyj1441 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb1441 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb1441 { + if yyb17 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -20065,15 +25623,21 @@ func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.PeriodSeconds = 0 } else { - x.PeriodSeconds = int32(r.DecodeInt(32)) + yyv25 := &x.PeriodSeconds + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } } - yyj1441++ - if yyhl1441 { - yyb1441 = yyj1441 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb1441 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb1441 { + if yyb17 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -20081,15 +25645,21 @@ func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.SuccessThreshold = 0 } else { - x.SuccessThreshold = int32(r.DecodeInt(32)) + yyv27 := &x.SuccessThreshold + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(yyv27)) = int32(r.DecodeInt(32)) + } } - yyj1441++ - if yyhl1441 { - yyb1441 = yyj1441 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb1441 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb1441 { + if yyb17 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -20097,20 +25667,26 @@ func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.FailureThreshold = 0 } else { - x.FailureThreshold = int32(r.DecodeInt(32)) + yyv29 := &x.FailureThreshold + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*int32)(yyv29)) = int32(r.DecodeInt(32)) + } } for { - yyj1441++ - if yyhl1441 { - yyb1441 = yyj1441 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb1441 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb1441 { + if yyb17 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1441-1, "") + z.DecStructFieldNotFound(yyj17-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -20119,8 +25695,8 @@ func (x PullPolicy) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym1450 := z.EncBinary() - _ = yym1450 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -20132,8 +25708,34 @@ func (x *PullPolicy) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1451 := z.DecBinary() - _ = yym1451 + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x TerminationMessagePolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *TerminationMessagePolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -20145,8 +25747,8 @@ func (x Capability) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym1452 := z.EncBinary() - _ = yym1452 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -20158,8 +25760,8 @@ func (x *Capability) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1453 := z.DecBinary() - _ = yym1453 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -20174,39 +25776,39 @@ func (x *Capabilities) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1454 := z.EncBinary() - _ = yym1454 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1455 := !z.EncBinary() - yy2arr1455 := z.EncBasicHandle().StructToArray - var yyq1455 [2]bool - _, _, _ = yysep1455, yyq1455, yy2arr1455 - const yyr1455 bool = false - yyq1455[0] = len(x.Add) != 0 - yyq1455[1] = len(x.Drop) != 0 - var yynn1455 int - if yyr1455 || yy2arr1455 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Add) != 0 + yyq2[1] = len(x.Drop) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1455 = 0 - for _, b := range yyq1455 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1455++ + yynn2++ } } - r.EncodeMapStart(yynn1455) - yynn1455 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1455 || yy2arr1455 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1455[0] { + if yyq2[0] { if x.Add == nil { r.EncodeNil() } else { - yym1457 := z.EncBinary() - _ = yym1457 + yym4 := z.EncBinary() + _ = yym4 if false { } else { h.encSliceCapability(([]Capability)(x.Add), e) @@ -20216,15 +25818,15 @@ func (x *Capabilities) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1455[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("add")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Add == nil { r.EncodeNil() } else { - yym1458 := z.EncBinary() - _ = yym1458 + yym5 := z.EncBinary() + _ = yym5 if false { } else { h.encSliceCapability(([]Capability)(x.Add), e) @@ -20232,14 +25834,14 @@ func (x *Capabilities) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1455 || yy2arr1455 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1455[1] { + if yyq2[1] { if x.Drop == nil { r.EncodeNil() } else { - yym1460 := z.EncBinary() - _ = yym1460 + yym7 := z.EncBinary() + _ = yym7 if false { } else { h.encSliceCapability(([]Capability)(x.Drop), e) @@ -20249,15 +25851,15 @@ func (x *Capabilities) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1455[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("drop")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Drop == nil { r.EncodeNil() } else { - yym1461 := z.EncBinary() - _ = yym1461 + yym8 := z.EncBinary() + _ = yym8 if false { } else { h.encSliceCapability(([]Capability)(x.Drop), e) @@ -20265,7 +25867,7 @@ func (x *Capabilities) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1455 || yy2arr1455 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -20278,25 +25880,25 @@ func (x *Capabilities) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1462 := z.DecBinary() - _ = yym1462 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1463 := r.ContainerType() - if yyct1463 == codecSelferValueTypeMap1234 { - yyl1463 := r.ReadMapStart() - if yyl1463 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1463, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1463 == codecSelferValueTypeArray1234 { - yyl1463 := r.ReadArrayStart() - if yyl1463 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1463, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -20308,12 +25910,12 @@ func (x *Capabilities) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1464Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1464Slc - var yyhl1464 bool = l >= 0 - for yyj1464 := 0; ; yyj1464++ { - if yyhl1464 { - if yyj1464 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -20322,38 +25924,38 @@ func (x *Capabilities) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1464Slc = r.DecodeBytes(yys1464Slc, true, true) - yys1464 := string(yys1464Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1464 { + switch yys3 { case "add": if r.TryDecodeAsNil() { x.Add = nil } else { - yyv1465 := &x.Add - yym1466 := z.DecBinary() - _ = yym1466 + yyv4 := &x.Add + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSliceCapability((*[]Capability)(yyv1465), d) + h.decSliceCapability((*[]Capability)(yyv4), d) } } case "drop": if r.TryDecodeAsNil() { x.Drop = nil } else { - yyv1467 := &x.Drop - yym1468 := z.DecBinary() - _ = yym1468 + yyv6 := &x.Drop + yym7 := z.DecBinary() + _ = yym7 if false { } else { - h.decSliceCapability((*[]Capability)(yyv1467), d) + h.decSliceCapability((*[]Capability)(yyv6), d) } } default: - z.DecStructFieldNotFound(-1, yys1464) - } // end switch yys1464 - } // end for yyj1464 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -20361,16 +25963,16 @@ func (x *Capabilities) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1469 int - var yyb1469 bool - var yyhl1469 bool = l >= 0 - yyj1469++ - if yyhl1469 { - yyb1469 = yyj1469 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1469 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1469 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -20378,21 +25980,21 @@ func (x *Capabilities) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Add = nil } else { - yyv1470 := &x.Add - yym1471 := z.DecBinary() - _ = yym1471 + yyv9 := &x.Add + yym10 := z.DecBinary() + _ = yym10 if false { } else { - h.decSliceCapability((*[]Capability)(yyv1470), d) + h.decSliceCapability((*[]Capability)(yyv9), d) } } - yyj1469++ - if yyhl1469 { - yyb1469 = yyj1469 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1469 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1469 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -20400,26 +26002,26 @@ func (x *Capabilities) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Drop = nil } else { - yyv1472 := &x.Drop - yym1473 := z.DecBinary() - _ = yym1473 + yyv11 := &x.Drop + yym12 := z.DecBinary() + _ = yym12 if false { } else { - h.decSliceCapability((*[]Capability)(yyv1472), d) + h.decSliceCapability((*[]Capability)(yyv11), d) } } for { - yyj1469++ - if yyhl1469 { - yyb1469 = yyj1469 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1469 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1469 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1469-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -20431,34 +26033,34 @@ func (x *ResourceRequirements) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1474 := z.EncBinary() - _ = yym1474 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1475 := !z.EncBinary() - yy2arr1475 := z.EncBasicHandle().StructToArray - var yyq1475 [2]bool - _, _, _ = yysep1475, yyq1475, yy2arr1475 - const yyr1475 bool = false - yyq1475[0] = len(x.Limits) != 0 - yyq1475[1] = len(x.Requests) != 0 - var yynn1475 int - if yyr1475 || yy2arr1475 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Limits) != 0 + yyq2[1] = len(x.Requests) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1475 = 0 - for _, b := range yyq1475 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1475++ + yynn2++ } } - r.EncodeMapStart(yynn1475) - yynn1475 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1475 || yy2arr1475 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1475[0] { + if yyq2[0] { if x.Limits == nil { r.EncodeNil() } else { @@ -20468,7 +26070,7 @@ func (x *ResourceRequirements) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1475[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("limits")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -20479,9 +26081,9 @@ func (x *ResourceRequirements) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1475 || yy2arr1475 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1475[1] { + if yyq2[1] { if x.Requests == nil { r.EncodeNil() } else { @@ -20491,7 +26093,7 @@ func (x *ResourceRequirements) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1475[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("requests")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -20502,7 +26104,7 @@ func (x *ResourceRequirements) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1475 || yy2arr1475 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -20515,25 +26117,25 @@ func (x *ResourceRequirements) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1478 := z.DecBinary() - _ = yym1478 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1479 := r.ContainerType() - if yyct1479 == codecSelferValueTypeMap1234 { - yyl1479 := r.ReadMapStart() - if yyl1479 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1479, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1479 == codecSelferValueTypeArray1234 { - yyl1479 := r.ReadArrayStart() - if yyl1479 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1479, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -20545,12 +26147,12 @@ func (x *ResourceRequirements) codecDecodeSelfFromMap(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1480Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1480Slc - var yyhl1480 bool = l >= 0 - for yyj1480 := 0; ; yyj1480++ { - if yyhl1480 { - if yyj1480 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -20559,28 +26161,28 @@ func (x *ResourceRequirements) codecDecodeSelfFromMap(l int, d *codec1978.Decode } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1480Slc = r.DecodeBytes(yys1480Slc, true, true) - yys1480 := string(yys1480Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1480 { + switch yys3 { case "limits": if r.TryDecodeAsNil() { x.Limits = nil } else { - yyv1481 := &x.Limits - yyv1481.CodecDecodeSelf(d) + yyv4 := &x.Limits + yyv4.CodecDecodeSelf(d) } case "requests": if r.TryDecodeAsNil() { x.Requests = nil } else { - yyv1482 := &x.Requests - yyv1482.CodecDecodeSelf(d) + yyv5 := &x.Requests + yyv5.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1480) - } // end switch yys1480 - } // end for yyj1480 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -20588,16 +26190,16 @@ func (x *ResourceRequirements) codecDecodeSelfFromArray(l int, d *codec1978.Deco var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1483 int - var yyb1483 bool - var yyhl1483 bool = l >= 0 - yyj1483++ - if yyhl1483 { - yyb1483 = yyj1483 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1483 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1483 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -20605,16 +26207,16 @@ func (x *ResourceRequirements) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.Limits = nil } else { - yyv1484 := &x.Limits - yyv1484.CodecDecodeSelf(d) + yyv7 := &x.Limits + yyv7.CodecDecodeSelf(d) } - yyj1483++ - if yyhl1483 { - yyb1483 = yyj1483 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1483 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1483 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -20622,21 +26224,21 @@ func (x *ResourceRequirements) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.Requests = nil } else { - yyv1485 := &x.Requests - yyv1485.CodecDecodeSelf(d) + yyv8 := &x.Requests + yyv8.CodecDecodeSelf(d) } for { - yyj1483++ - if yyhl1483 { - yyb1483 = yyj1483 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1483 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1483 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1483-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -20648,50 +26250,52 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1486 := z.EncBinary() - _ = yym1486 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1487 := !z.EncBinary() - yy2arr1487 := z.EncBasicHandle().StructToArray - var yyq1487 [18]bool - _, _, _ = yysep1487, yyq1487, yy2arr1487 - const yyr1487 bool = false - yyq1487[1] = x.Image != "" - yyq1487[2] = len(x.Command) != 0 - yyq1487[3] = len(x.Args) != 0 - yyq1487[4] = x.WorkingDir != "" - yyq1487[5] = len(x.Ports) != 0 - yyq1487[6] = len(x.Env) != 0 - yyq1487[7] = true - yyq1487[8] = len(x.VolumeMounts) != 0 - yyq1487[9] = x.LivenessProbe != nil - yyq1487[10] = x.ReadinessProbe != nil - yyq1487[11] = x.Lifecycle != nil - yyq1487[12] = x.TerminationMessagePath != "" - yyq1487[13] = x.ImagePullPolicy != "" - yyq1487[14] = x.SecurityContext != nil - yyq1487[15] = x.Stdin != false - yyq1487[16] = x.StdinOnce != false - yyq1487[17] = x.TTY != false - var yynn1487 int - if yyr1487 || yy2arr1487 { - r.EncodeArrayStart(18) - } else { - yynn1487 = 1 - for _, b := range yyq1487 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [20]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Image != "" + yyq2[2] = len(x.Command) != 0 + yyq2[3] = len(x.Args) != 0 + yyq2[4] = x.WorkingDir != "" + yyq2[5] = len(x.Ports) != 0 + yyq2[6] = len(x.EnvFrom) != 0 + yyq2[7] = len(x.Env) != 0 + yyq2[8] = true + yyq2[9] = len(x.VolumeMounts) != 0 + yyq2[10] = x.LivenessProbe != nil + yyq2[11] = x.ReadinessProbe != nil + yyq2[12] = x.Lifecycle != nil + yyq2[13] = x.TerminationMessagePath != "" + yyq2[14] = x.TerminationMessagePolicy != "" + yyq2[15] = x.ImagePullPolicy != "" + yyq2[16] = x.SecurityContext != nil + yyq2[17] = x.Stdin != false + yyq2[18] = x.StdinOnce != false + yyq2[19] = x.TTY != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(20) + } else { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1487++ + yynn2++ } } - r.EncodeMapStart(yynn1487) - yynn1487 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1487 || yy2arr1487 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1489 := z.EncBinary() - _ = yym1489 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) @@ -20700,18 +26304,18 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("name")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1490 := z.EncBinary() - _ = yym1490 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } - if yyr1487 || yy2arr1487 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1487[1] { - yym1492 := z.EncBinary() - _ = yym1492 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Image)) @@ -20720,26 +26324,26 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1487[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("image")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1493 := z.EncBinary() - _ = yym1493 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Image)) } } } - if yyr1487 || yy2arr1487 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1487[2] { + if yyq2[2] { if x.Command == nil { r.EncodeNil() } else { - yym1495 := z.EncBinary() - _ = yym1495 + yym10 := z.EncBinary() + _ = yym10 if false { } else { z.F.EncSliceStringV(x.Command, false, e) @@ -20749,15 +26353,15 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1487[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("command")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Command == nil { r.EncodeNil() } else { - yym1496 := z.EncBinary() - _ = yym1496 + yym11 := z.EncBinary() + _ = yym11 if false { } else { z.F.EncSliceStringV(x.Command, false, e) @@ -20765,14 +26369,14 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1487 || yy2arr1487 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1487[3] { + if yyq2[3] { if x.Args == nil { r.EncodeNil() } else { - yym1498 := z.EncBinary() - _ = yym1498 + yym13 := z.EncBinary() + _ = yym13 if false { } else { z.F.EncSliceStringV(x.Args, false, e) @@ -20782,15 +26386,15 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1487[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("args")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Args == nil { r.EncodeNil() } else { - yym1499 := z.EncBinary() - _ = yym1499 + yym14 := z.EncBinary() + _ = yym14 if false { } else { z.F.EncSliceStringV(x.Args, false, e) @@ -20798,11 +26402,11 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1487 || yy2arr1487 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1487[4] { - yym1501 := z.EncBinary() - _ = yym1501 + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir)) @@ -20811,26 +26415,26 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1487[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("workingDir")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1502 := z.EncBinary() - _ = yym1502 + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir)) } } } - if yyr1487 || yy2arr1487 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1487[5] { + if yyq2[5] { if x.Ports == nil { r.EncodeNil() } else { - yym1504 := z.EncBinary() - _ = yym1504 + yym19 := z.EncBinary() + _ = yym19 if false { } else { h.encSliceContainerPort(([]ContainerPort)(x.Ports), e) @@ -20840,15 +26444,15 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1487[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("ports")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Ports == nil { r.EncodeNil() } else { - yym1505 := z.EncBinary() - _ = yym1505 + yym20 := z.EncBinary() + _ = yym20 if false { } else { h.encSliceContainerPort(([]ContainerPort)(x.Ports), e) @@ -20856,14 +26460,47 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1487 || yy2arr1487 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1487[6] { + if yyq2[6] { + if x.EnvFrom == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + h.encSliceEnvFromSource(([]EnvFromSource)(x.EnvFrom), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("envFrom")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.EnvFrom == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + h.encSliceEnvFromSource(([]EnvFromSource)(x.EnvFrom), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { if x.Env == nil { r.EncodeNil() } else { - yym1507 := z.EncBinary() - _ = yym1507 + yym25 := z.EncBinary() + _ = yym25 if false { } else { h.encSliceEnvVar(([]EnvVar)(x.Env), e) @@ -20873,15 +26510,15 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1487[6] { + if yyq2[7] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("env")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Env == nil { r.EncodeNil() } else { - yym1508 := z.EncBinary() - _ = yym1508 + yym26 := z.EncBinary() + _ = yym26 if false { } else { h.encSliceEnvVar(([]EnvVar)(x.Env), e) @@ -20889,31 +26526,31 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1487 || yy2arr1487 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1487[7] { - yy1510 := &x.Resources - yy1510.CodecEncodeSelf(e) + if yyq2[8] { + yy28 := &x.Resources + yy28.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq1487[7] { + if yyq2[8] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("resources")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1511 := &x.Resources - yy1511.CodecEncodeSelf(e) + yy30 := &x.Resources + yy30.CodecEncodeSelf(e) } } - if yyr1487 || yy2arr1487 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1487[8] { + if yyq2[9] { if x.VolumeMounts == nil { r.EncodeNil() } else { - yym1513 := z.EncBinary() - _ = yym1513 + yym33 := z.EncBinary() + _ = yym33 if false { } else { h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e) @@ -20923,15 +26560,15 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1487[8] { + if yyq2[9] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("volumeMounts")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.VolumeMounts == nil { r.EncodeNil() } else { - yym1514 := z.EncBinary() - _ = yym1514 + yym34 := z.EncBinary() + _ = yym34 if false { } else { h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e) @@ -20939,9 +26576,9 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1487 || yy2arr1487 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1487[9] { + if yyq2[10] { if x.LivenessProbe == nil { r.EncodeNil() } else { @@ -20951,7 +26588,7 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1487[9] { + if yyq2[10] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("livenessProbe")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -20962,9 +26599,9 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1487 || yy2arr1487 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1487[10] { + if yyq2[11] { if x.ReadinessProbe == nil { r.EncodeNil() } else { @@ -20974,7 +26611,7 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1487[10] { + if yyq2[11] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("readinessProbe")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -20985,9 +26622,9 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1487 || yy2arr1487 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1487[11] { + if yyq2[12] { if x.Lifecycle == nil { r.EncodeNil() } else { @@ -20997,7 +26634,7 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1487[11] { + if yyq2[12] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("lifecycle")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -21008,11 +26645,11 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1487 || yy2arr1487 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1487[12] { - yym1519 := z.EncBinary() - _ = yym1519 + if yyq2[13] { + yym45 := z.EncBinary() + _ = yym45 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath)) @@ -21021,36 +26658,51 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1487[12] { + if yyq2[13] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("terminationMessagePath")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1520 := z.EncBinary() - _ = yym1520 + yym46 := z.EncBinary() + _ = yym46 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath)) } } } - if yyr1487 || yy2arr1487 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1487[13] { + if yyq2[14] { + x.TerminationMessagePolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("terminationMessagePolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.TerminationMessagePolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { x.ImagePullPolicy.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1487[13] { + if yyq2[15] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("imagePullPolicy")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.ImagePullPolicy.CodecEncodeSelf(e) } } - if yyr1487 || yy2arr1487 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1487[14] { + if yyq2[16] { if x.SecurityContext == nil { r.EncodeNil() } else { @@ -21060,7 +26712,7 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1487[14] { + if yyq2[16] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("securityContext")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -21071,11 +26723,11 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1487 || yy2arr1487 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1487[15] { - yym1524 := z.EncBinary() - _ = yym1524 + if yyq2[17] { + yym57 := z.EncBinary() + _ = yym57 if false { } else { r.EncodeBool(bool(x.Stdin)) @@ -21084,23 +26736,23 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq1487[15] { + if yyq2[17] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("stdin")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1525 := z.EncBinary() - _ = yym1525 + yym58 := z.EncBinary() + _ = yym58 if false { } else { r.EncodeBool(bool(x.Stdin)) } } } - if yyr1487 || yy2arr1487 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1487[16] { - yym1527 := z.EncBinary() - _ = yym1527 + if yyq2[18] { + yym60 := z.EncBinary() + _ = yym60 if false { } else { r.EncodeBool(bool(x.StdinOnce)) @@ -21109,23 +26761,23 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq1487[16] { + if yyq2[18] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("stdinOnce")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1528 := z.EncBinary() - _ = yym1528 + yym61 := z.EncBinary() + _ = yym61 if false { } else { r.EncodeBool(bool(x.StdinOnce)) } } } - if yyr1487 || yy2arr1487 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1487[17] { - yym1530 := z.EncBinary() - _ = yym1530 + if yyq2[19] { + yym63 := z.EncBinary() + _ = yym63 if false { } else { r.EncodeBool(bool(x.TTY)) @@ -21134,19 +26786,19 @@ func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq1487[17] { + if yyq2[19] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("tty")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1531 := z.EncBinary() - _ = yym1531 + yym64 := z.EncBinary() + _ = yym64 if false { } else { r.EncodeBool(bool(x.TTY)) } } } - if yyr1487 || yy2arr1487 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -21159,25 +26811,25 @@ func (x *Container) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1532 := z.DecBinary() - _ = yym1532 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1533 := r.ContainerType() - if yyct1533 == codecSelferValueTypeMap1234 { - yyl1533 := r.ReadMapStart() - if yyl1533 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1533, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1533 == codecSelferValueTypeArray1234 { - yyl1533 := r.ReadArrayStart() - if yyl1533 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1533, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -21189,12 +26841,12 @@ func (x *Container) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1534Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1534Slc - var yyhl1534 bool = l >= 0 - for yyj1534 := 0; ; yyj1534++ { - if yyhl1534 { - if yyj1534 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -21203,93 +26855,123 @@ func (x *Container) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1534Slc = r.DecodeBytes(yys1534Slc, true, true) - yys1534 := string(yys1534Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1534 { + switch yys3 { case "name": if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "image": if r.TryDecodeAsNil() { x.Image = "" } else { - x.Image = string(r.DecodeString()) + yyv6 := &x.Image + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "command": if r.TryDecodeAsNil() { x.Command = nil } else { - yyv1537 := &x.Command - yym1538 := z.DecBinary() - _ = yym1538 + yyv8 := &x.Command + yym9 := z.DecBinary() + _ = yym9 if false { } else { - z.F.DecSliceStringX(yyv1537, false, d) + z.F.DecSliceStringX(yyv8, false, d) } } case "args": if r.TryDecodeAsNil() { x.Args = nil } else { - yyv1539 := &x.Args - yym1540 := z.DecBinary() - _ = yym1540 + yyv10 := &x.Args + yym11 := z.DecBinary() + _ = yym11 if false { } else { - z.F.DecSliceStringX(yyv1539, false, d) + z.F.DecSliceStringX(yyv10, false, d) } } case "workingDir": if r.TryDecodeAsNil() { x.WorkingDir = "" } else { - x.WorkingDir = string(r.DecodeString()) + yyv12 := &x.WorkingDir + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } case "ports": if r.TryDecodeAsNil() { x.Ports = nil } else { - yyv1542 := &x.Ports - yym1543 := z.DecBinary() - _ = yym1543 + yyv14 := &x.Ports + yym15 := z.DecBinary() + _ = yym15 if false { } else { - h.decSliceContainerPort((*[]ContainerPort)(yyv1542), d) + h.decSliceContainerPort((*[]ContainerPort)(yyv14), d) + } + } + case "envFrom": + if r.TryDecodeAsNil() { + x.EnvFrom = nil + } else { + yyv16 := &x.EnvFrom + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + h.decSliceEnvFromSource((*[]EnvFromSource)(yyv16), d) } } case "env": if r.TryDecodeAsNil() { x.Env = nil } else { - yyv1544 := &x.Env - yym1545 := z.DecBinary() - _ = yym1545 + yyv18 := &x.Env + yym19 := z.DecBinary() + _ = yym19 if false { } else { - h.decSliceEnvVar((*[]EnvVar)(yyv1544), d) + h.decSliceEnvVar((*[]EnvVar)(yyv18), d) } } case "resources": if r.TryDecodeAsNil() { x.Resources = ResourceRequirements{} } else { - yyv1546 := &x.Resources - yyv1546.CodecDecodeSelf(d) + yyv20 := &x.Resources + yyv20.CodecDecodeSelf(d) } case "volumeMounts": if r.TryDecodeAsNil() { x.VolumeMounts = nil } else { - yyv1547 := &x.VolumeMounts - yym1548 := z.DecBinary() - _ = yym1548 + yyv21 := &x.VolumeMounts + yym22 := z.DecBinary() + _ = yym22 if false { } else { - h.decSliceVolumeMount((*[]VolumeMount)(yyv1547), d) + h.decSliceVolumeMount((*[]VolumeMount)(yyv21), d) } } case "livenessProbe": @@ -21329,13 +27011,27 @@ func (x *Container) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.TerminationMessagePath = "" } else { - x.TerminationMessagePath = string(r.DecodeString()) + yyv26 := &x.TerminationMessagePath + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } + } + case "terminationMessagePolicy": + if r.TryDecodeAsNil() { + x.TerminationMessagePolicy = "" + } else { + yyv28 := &x.TerminationMessagePolicy + yyv28.CodecDecodeSelf(d) } case "imagePullPolicy": if r.TryDecodeAsNil() { x.ImagePullPolicy = "" } else { - x.ImagePullPolicy = PullPolicy(r.DecodeString()) + yyv29 := &x.ImagePullPolicy + yyv29.CodecDecodeSelf(d) } case "securityContext": if r.TryDecodeAsNil() { @@ -21352,24 +27048,42 @@ func (x *Container) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Stdin = false } else { - x.Stdin = bool(r.DecodeBool()) + yyv31 := &x.Stdin + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*bool)(yyv31)) = r.DecodeBool() + } } case "stdinOnce": if r.TryDecodeAsNil() { x.StdinOnce = false } else { - x.StdinOnce = bool(r.DecodeBool()) + yyv33 := &x.StdinOnce + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*bool)(yyv33)) = r.DecodeBool() + } } case "tty": if r.TryDecodeAsNil() { x.TTY = false } else { - x.TTY = bool(r.DecodeBool()) + yyv35 := &x.TTY + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*bool)(yyv35)) = r.DecodeBool() + } } default: - z.DecStructFieldNotFound(-1, yys1534) - } // end switch yys1534 - } // end for yyj1534 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -21377,16 +27091,16 @@ func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1558 int - var yyb1558 bool - var yyhl1558 bool = l >= 0 - yyj1558++ - if yyhl1558 { - yyb1558 = yyj1558 > l + var yyj37 int + var yyb37 bool + var yyhl37 bool = l >= 0 + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l } else { - yyb1558 = r.CheckBreak() + yyb37 = r.CheckBreak() } - if yyb1558 { + if yyb37 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21394,15 +27108,21 @@ func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv38 := &x.Name + yym39 := z.DecBinary() + _ = yym39 + if false { + } else { + *((*string)(yyv38)) = r.DecodeString() + } } - yyj1558++ - if yyhl1558 { - yyb1558 = yyj1558 > l + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l } else { - yyb1558 = r.CheckBreak() + yyb37 = r.CheckBreak() } - if yyb1558 { + if yyb37 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21410,15 +27130,21 @@ func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Image = "" } else { - x.Image = string(r.DecodeString()) + yyv40 := &x.Image + yym41 := z.DecBinary() + _ = yym41 + if false { + } else { + *((*string)(yyv40)) = r.DecodeString() + } } - yyj1558++ - if yyhl1558 { - yyb1558 = yyj1558 > l + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l } else { - yyb1558 = r.CheckBreak() + yyb37 = r.CheckBreak() } - if yyb1558 { + if yyb37 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21426,21 +27152,21 @@ func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Command = nil } else { - yyv1561 := &x.Command - yym1562 := z.DecBinary() - _ = yym1562 + yyv42 := &x.Command + yym43 := z.DecBinary() + _ = yym43 if false { } else { - z.F.DecSliceStringX(yyv1561, false, d) + z.F.DecSliceStringX(yyv42, false, d) } } - yyj1558++ - if yyhl1558 { - yyb1558 = yyj1558 > l + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l } else { - yyb1558 = r.CheckBreak() + yyb37 = r.CheckBreak() } - if yyb1558 { + if yyb37 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21448,21 +27174,21 @@ func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Args = nil } else { - yyv1563 := &x.Args - yym1564 := z.DecBinary() - _ = yym1564 + yyv44 := &x.Args + yym45 := z.DecBinary() + _ = yym45 if false { } else { - z.F.DecSliceStringX(yyv1563, false, d) + z.F.DecSliceStringX(yyv44, false, d) } } - yyj1558++ - if yyhl1558 { - yyb1558 = yyj1558 > l + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l } else { - yyb1558 = r.CheckBreak() + yyb37 = r.CheckBreak() } - if yyb1558 { + if yyb37 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21470,15 +27196,21 @@ func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.WorkingDir = "" } else { - x.WorkingDir = string(r.DecodeString()) + yyv46 := &x.WorkingDir + yym47 := z.DecBinary() + _ = yym47 + if false { + } else { + *((*string)(yyv46)) = r.DecodeString() + } } - yyj1558++ - if yyhl1558 { - yyb1558 = yyj1558 > l + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l } else { - yyb1558 = r.CheckBreak() + yyb37 = r.CheckBreak() } - if yyb1558 { + if yyb37 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21486,21 +27218,43 @@ func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Ports = nil } else { - yyv1566 := &x.Ports - yym1567 := z.DecBinary() - _ = yym1567 + yyv48 := &x.Ports + yym49 := z.DecBinary() + _ = yym49 + if false { + } else { + h.decSliceContainerPort((*[]ContainerPort)(yyv48), d) + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EnvFrom = nil + } else { + yyv50 := &x.EnvFrom + yym51 := z.DecBinary() + _ = yym51 if false { } else { - h.decSliceContainerPort((*[]ContainerPort)(yyv1566), d) + h.decSliceEnvFromSource((*[]EnvFromSource)(yyv50), d) } } - yyj1558++ - if yyhl1558 { - yyb1558 = yyj1558 > l + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l } else { - yyb1558 = r.CheckBreak() + yyb37 = r.CheckBreak() } - if yyb1558 { + if yyb37 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21508,21 +27262,21 @@ func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Env = nil } else { - yyv1568 := &x.Env - yym1569 := z.DecBinary() - _ = yym1569 + yyv52 := &x.Env + yym53 := z.DecBinary() + _ = yym53 if false { } else { - h.decSliceEnvVar((*[]EnvVar)(yyv1568), d) + h.decSliceEnvVar((*[]EnvVar)(yyv52), d) } } - yyj1558++ - if yyhl1558 { - yyb1558 = yyj1558 > l + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l } else { - yyb1558 = r.CheckBreak() + yyb37 = r.CheckBreak() } - if yyb1558 { + if yyb37 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21530,16 +27284,16 @@ func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Resources = ResourceRequirements{} } else { - yyv1570 := &x.Resources - yyv1570.CodecDecodeSelf(d) + yyv54 := &x.Resources + yyv54.CodecDecodeSelf(d) } - yyj1558++ - if yyhl1558 { - yyb1558 = yyj1558 > l + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l } else { - yyb1558 = r.CheckBreak() + yyb37 = r.CheckBreak() } - if yyb1558 { + if yyb37 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21547,21 +27301,21 @@ func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.VolumeMounts = nil } else { - yyv1571 := &x.VolumeMounts - yym1572 := z.DecBinary() - _ = yym1572 + yyv55 := &x.VolumeMounts + yym56 := z.DecBinary() + _ = yym56 if false { } else { - h.decSliceVolumeMount((*[]VolumeMount)(yyv1571), d) + h.decSliceVolumeMount((*[]VolumeMount)(yyv55), d) } } - yyj1558++ - if yyhl1558 { - yyb1558 = yyj1558 > l + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l } else { - yyb1558 = r.CheckBreak() + yyb37 = r.CheckBreak() } - if yyb1558 { + if yyb37 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21576,13 +27330,13 @@ func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.LivenessProbe.CodecDecodeSelf(d) } - yyj1558++ - if yyhl1558 { - yyb1558 = yyj1558 > l + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l } else { - yyb1558 = r.CheckBreak() + yyb37 = r.CheckBreak() } - if yyb1558 { + if yyb37 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21597,13 +27351,13 @@ func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.ReadinessProbe.CodecDecodeSelf(d) } - yyj1558++ - if yyhl1558 { - yyb1558 = yyj1558 > l + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l } else { - yyb1558 = r.CheckBreak() + yyb37 = r.CheckBreak() } - if yyb1558 { + if yyb37 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21618,13 +27372,13 @@ func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Lifecycle.CodecDecodeSelf(d) } - yyj1558++ - if yyhl1558 { - yyb1558 = yyj1558 > l + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l } else { - yyb1558 = r.CheckBreak() + yyb37 = r.CheckBreak() } - if yyb1558 { + if yyb37 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21632,15 +27386,38 @@ func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.TerminationMessagePath = "" } else { - x.TerminationMessagePath = string(r.DecodeString()) + yyv60 := &x.TerminationMessagePath + yym61 := z.DecBinary() + _ = yym61 + if false { + } else { + *((*string)(yyv60)) = r.DecodeString() + } } - yyj1558++ - if yyhl1558 { - yyb1558 = yyj1558 > l + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l } else { - yyb1558 = r.CheckBreak() + yyb37 = r.CheckBreak() } - if yyb1558 { + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TerminationMessagePolicy = "" + } else { + yyv62 := &x.TerminationMessagePolicy + yyv62.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21648,15 +27425,16 @@ func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ImagePullPolicy = "" } else { - x.ImagePullPolicy = PullPolicy(r.DecodeString()) + yyv63 := &x.ImagePullPolicy + yyv63.CodecDecodeSelf(d) } - yyj1558++ - if yyhl1558 { - yyb1558 = yyj1558 > l + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l } else { - yyb1558 = r.CheckBreak() + yyb37 = r.CheckBreak() } - if yyb1558 { + if yyb37 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21671,13 +27449,13 @@ func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.SecurityContext.CodecDecodeSelf(d) } - yyj1558++ - if yyhl1558 { - yyb1558 = yyj1558 > l + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l } else { - yyb1558 = r.CheckBreak() + yyb37 = r.CheckBreak() } - if yyb1558 { + if yyb37 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21685,15 +27463,21 @@ func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Stdin = false } else { - x.Stdin = bool(r.DecodeBool()) + yyv65 := &x.Stdin + yym66 := z.DecBinary() + _ = yym66 + if false { + } else { + *((*bool)(yyv65)) = r.DecodeBool() + } } - yyj1558++ - if yyhl1558 { - yyb1558 = yyj1558 > l + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l } else { - yyb1558 = r.CheckBreak() + yyb37 = r.CheckBreak() } - if yyb1558 { + if yyb37 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21701,15 +27485,21 @@ func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.StdinOnce = false } else { - x.StdinOnce = bool(r.DecodeBool()) + yyv67 := &x.StdinOnce + yym68 := z.DecBinary() + _ = yym68 + if false { + } else { + *((*bool)(yyv67)) = r.DecodeBool() + } } - yyj1558++ - if yyhl1558 { - yyb1558 = yyj1558 > l + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l } else { - yyb1558 = r.CheckBreak() + yyb37 = r.CheckBreak() } - if yyb1558 { + if yyb37 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21717,20 +27507,26 @@ func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.TTY = false } else { - x.TTY = bool(r.DecodeBool()) + yyv69 := &x.TTY + yym70 := z.DecBinary() + _ = yym70 + if false { + } else { + *((*bool)(yyv69)) = r.DecodeBool() + } } for { - yyj1558++ - if yyhl1558 { - yyb1558 = yyj1558 > l + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l } else { - yyb1558 = r.CheckBreak() + yyb37 = r.CheckBreak() } - if yyb1558 { + if yyb37 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1558-1, "") + z.DecStructFieldNotFound(yyj37-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -21742,35 +27538,35 @@ func (x *Handler) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1582 := z.EncBinary() - _ = yym1582 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1583 := !z.EncBinary() - yy2arr1583 := z.EncBasicHandle().StructToArray - var yyq1583 [3]bool - _, _, _ = yysep1583, yyq1583, yy2arr1583 - const yyr1583 bool = false - yyq1583[0] = x.Exec != nil - yyq1583[1] = x.HTTPGet != nil - yyq1583[2] = x.TCPSocket != nil - var yynn1583 int - if yyr1583 || yy2arr1583 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Exec != nil + yyq2[1] = x.HTTPGet != nil + yyq2[2] = x.TCPSocket != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn1583 = 0 - for _, b := range yyq1583 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1583++ + yynn2++ } } - r.EncodeMapStart(yynn1583) - yynn1583 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1583 || yy2arr1583 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1583[0] { + if yyq2[0] { if x.Exec == nil { r.EncodeNil() } else { @@ -21780,7 +27576,7 @@ func (x *Handler) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1583[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("exec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -21791,9 +27587,9 @@ func (x *Handler) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1583 || yy2arr1583 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1583[1] { + if yyq2[1] { if x.HTTPGet == nil { r.EncodeNil() } else { @@ -21803,7 +27599,7 @@ func (x *Handler) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1583[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("httpGet")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -21814,9 +27610,9 @@ func (x *Handler) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1583 || yy2arr1583 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1583[2] { + if yyq2[2] { if x.TCPSocket == nil { r.EncodeNil() } else { @@ -21826,7 +27622,7 @@ func (x *Handler) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1583[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("tcpSocket")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -21837,7 +27633,7 @@ func (x *Handler) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1583 || yy2arr1583 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -21850,25 +27646,25 @@ func (x *Handler) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1587 := z.DecBinary() - _ = yym1587 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1588 := r.ContainerType() - if yyct1588 == codecSelferValueTypeMap1234 { - yyl1588 := r.ReadMapStart() - if yyl1588 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1588, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1588 == codecSelferValueTypeArray1234 { - yyl1588 := r.ReadArrayStart() - if yyl1588 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1588, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -21880,12 +27676,12 @@ func (x *Handler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1589Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1589Slc - var yyhl1589 bool = l >= 0 - for yyj1589 := 0; ; yyj1589++ { - if yyhl1589 { - if yyj1589 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -21894,10 +27690,10 @@ func (x *Handler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1589Slc = r.DecodeBytes(yys1589Slc, true, true) - yys1589 := string(yys1589Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1589 { + switch yys3 { case "exec": if r.TryDecodeAsNil() { if x.Exec != nil { @@ -21932,9 +27728,9 @@ func (x *Handler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { x.TCPSocket.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1589) - } // end switch yys1589 - } // end for yyj1589 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -21942,16 +27738,16 @@ func (x *Handler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1593 int - var yyb1593 bool - var yyhl1593 bool = l >= 0 - yyj1593++ - if yyhl1593 { - yyb1593 = yyj1593 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1593 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1593 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21966,13 +27762,13 @@ func (x *Handler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Exec.CodecDecodeSelf(d) } - yyj1593++ - if yyhl1593 { - yyb1593 = yyj1593 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1593 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1593 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21987,13 +27783,13 @@ func (x *Handler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.HTTPGet.CodecDecodeSelf(d) } - yyj1593++ - if yyhl1593 { - yyb1593 = yyj1593 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1593 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1593 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -22009,17 +27805,17 @@ func (x *Handler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { x.TCPSocket.CodecDecodeSelf(d) } for { - yyj1593++ - if yyhl1593 { - yyb1593 = yyj1593 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1593 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1593 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1593-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -22031,34 +27827,34 @@ func (x *Lifecycle) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1597 := z.EncBinary() - _ = yym1597 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1598 := !z.EncBinary() - yy2arr1598 := z.EncBasicHandle().StructToArray - var yyq1598 [2]bool - _, _, _ = yysep1598, yyq1598, yy2arr1598 - const yyr1598 bool = false - yyq1598[0] = x.PostStart != nil - yyq1598[1] = x.PreStop != nil - var yynn1598 int - if yyr1598 || yy2arr1598 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.PostStart != nil + yyq2[1] = x.PreStop != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1598 = 0 - for _, b := range yyq1598 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1598++ + yynn2++ } } - r.EncodeMapStart(yynn1598) - yynn1598 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1598 || yy2arr1598 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1598[0] { + if yyq2[0] { if x.PostStart == nil { r.EncodeNil() } else { @@ -22068,7 +27864,7 @@ func (x *Lifecycle) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1598[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("postStart")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -22079,9 +27875,9 @@ func (x *Lifecycle) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1598 || yy2arr1598 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1598[1] { + if yyq2[1] { if x.PreStop == nil { r.EncodeNil() } else { @@ -22091,7 +27887,7 @@ func (x *Lifecycle) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1598[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("preStop")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -22102,7 +27898,7 @@ func (x *Lifecycle) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1598 || yy2arr1598 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -22115,25 +27911,25 @@ func (x *Lifecycle) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1601 := z.DecBinary() - _ = yym1601 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1602 := r.ContainerType() - if yyct1602 == codecSelferValueTypeMap1234 { - yyl1602 := r.ReadMapStart() - if yyl1602 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1602, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1602 == codecSelferValueTypeArray1234 { - yyl1602 := r.ReadArrayStart() - if yyl1602 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1602, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -22145,12 +27941,12 @@ func (x *Lifecycle) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1603Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1603Slc - var yyhl1603 bool = l >= 0 - for yyj1603 := 0; ; yyj1603++ { - if yyhl1603 { - if yyj1603 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -22159,10 +27955,10 @@ func (x *Lifecycle) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1603Slc = r.DecodeBytes(yys1603Slc, true, true) - yys1603 := string(yys1603Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1603 { + switch yys3 { case "postStart": if r.TryDecodeAsNil() { if x.PostStart != nil { @@ -22186,9 +27982,9 @@ func (x *Lifecycle) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { x.PreStop.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1603) - } // end switch yys1603 - } // end for yyj1603 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -22196,16 +27992,16 @@ func (x *Lifecycle) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1606 int - var yyb1606 bool - var yyhl1606 bool = l >= 0 - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1606 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1606 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -22220,13 +28016,13 @@ func (x *Lifecycle) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.PostStart.CodecDecodeSelf(d) } - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1606 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1606 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -22242,17 +28038,17 @@ func (x *Lifecycle) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { x.PreStop.CodecDecodeSelf(d) } for { - yyj1606++ - if yyhl1606 { - yyb1606 = yyj1606 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1606 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1606 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1606-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -22261,8 +28057,8 @@ func (x ConditionStatus) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym1609 := z.EncBinary() - _ = yym1609 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -22274,8 +28070,8 @@ func (x *ConditionStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1610 := z.DecBinary() - _ = yym1610 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -22290,36 +28086,36 @@ func (x *ContainerStateWaiting) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1611 := z.EncBinary() - _ = yym1611 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1612 := !z.EncBinary() - yy2arr1612 := z.EncBasicHandle().StructToArray - var yyq1612 [2]bool - _, _, _ = yysep1612, yyq1612, yy2arr1612 - const yyr1612 bool = false - yyq1612[0] = x.Reason != "" - yyq1612[1] = x.Message != "" - var yynn1612 int - if yyr1612 || yy2arr1612 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Reason != "" + yyq2[1] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1612 = 0 - for _, b := range yyq1612 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1612++ + yynn2++ } } - r.EncodeMapStart(yynn1612) - yynn1612 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1612 || yy2arr1612 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1612[0] { - yym1614 := z.EncBinary() - _ = yym1614 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) @@ -22328,23 +28124,23 @@ func (x *ContainerStateWaiting) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1612[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("reason")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1615 := z.EncBinary() - _ = yym1615 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) } } } - if yyr1612 || yy2arr1612 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1612[1] { - yym1617 := z.EncBinary() - _ = yym1617 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) @@ -22353,19 +28149,19 @@ func (x *ContainerStateWaiting) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1612[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("message")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1618 := z.EncBinary() - _ = yym1618 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) } } } - if yyr1612 || yy2arr1612 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -22378,25 +28174,25 @@ func (x *ContainerStateWaiting) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1619 := z.DecBinary() - _ = yym1619 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1620 := r.ContainerType() - if yyct1620 == codecSelferValueTypeMap1234 { - yyl1620 := r.ReadMapStart() - if yyl1620 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1620, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1620 == codecSelferValueTypeArray1234 { - yyl1620 := r.ReadArrayStart() - if yyl1620 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1620, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -22408,12 +28204,12 @@ func (x *ContainerStateWaiting) codecDecodeSelfFromMap(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1621Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1621Slc - var yyhl1621 bool = l >= 0 - for yyj1621 := 0; ; yyj1621++ { - if yyhl1621 { - if yyj1621 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -22422,26 +28218,38 @@ func (x *ContainerStateWaiting) codecDecodeSelfFromMap(l int, d *codec1978.Decod } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1621Slc = r.DecodeBytes(yys1621Slc, true, true) - yys1621 := string(yys1621Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1621 { + switch yys3 { case "reason": if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv4 := &x.Reason + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "message": if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv6 := &x.Message + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys1621) - } // end switch yys1621 - } // end for yyj1621 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -22449,16 +28257,16 @@ func (x *ContainerStateWaiting) codecDecodeSelfFromArray(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1624 int - var yyb1624 bool - var yyhl1624 bool = l >= 0 - yyj1624++ - if yyhl1624 { - yyb1624 = yyj1624 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1624 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1624 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -22466,15 +28274,21 @@ func (x *ContainerStateWaiting) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv9 := &x.Reason + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } - yyj1624++ - if yyhl1624 { - yyb1624 = yyj1624 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1624 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1624 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -22482,20 +28296,26 @@ func (x *ContainerStateWaiting) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv11 := &x.Message + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } for { - yyj1624++ - if yyhl1624 { - yyb1624 = yyj1624 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1624 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1624 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1624-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -22507,68 +28327,68 @@ func (x *ContainerStateRunning) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1627 := z.EncBinary() - _ = yym1627 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1628 := !z.EncBinary() - yy2arr1628 := z.EncBasicHandle().StructToArray - var yyq1628 [1]bool - _, _, _ = yysep1628, yyq1628, yy2arr1628 - const yyr1628 bool = false - yyq1628[0] = true - var yynn1628 int - if yyr1628 || yy2arr1628 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn1628 = 0 - for _, b := range yyq1628 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1628++ + yynn2++ } } - r.EncodeMapStart(yynn1628) - yynn1628 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1628 || yy2arr1628 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1628[0] { - yy1630 := &x.StartedAt - yym1631 := z.EncBinary() - _ = yym1631 + if yyq2[0] { + yy4 := &x.StartedAt + yym5 := z.EncBinary() + _ = yym5 if false { - } else if z.HasExtensions() && z.EncExt(yy1630) { - } else if yym1631 { - z.EncBinaryMarshal(yy1630) - } else if !yym1631 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1630) + } else if z.HasExtensions() && z.EncExt(yy4) { + } else if yym5 { + z.EncBinaryMarshal(yy4) + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(yy4) } else { - z.EncFallback(yy1630) + z.EncFallback(yy4) } } else { r.EncodeNil() } } else { - if yyq1628[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("startedAt")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1632 := &x.StartedAt - yym1633 := z.EncBinary() - _ = yym1633 + yy6 := &x.StartedAt + yym7 := z.EncBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.EncExt(yy1632) { - } else if yym1633 { - z.EncBinaryMarshal(yy1632) - } else if !yym1633 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1632) + } else if z.HasExtensions() && z.EncExt(yy6) { + } else if yym7 { + z.EncBinaryMarshal(yy6) + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(yy6) } else { - z.EncFallback(yy1632) + z.EncFallback(yy6) } } } - if yyr1628 || yy2arr1628 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -22581,25 +28401,25 @@ func (x *ContainerStateRunning) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1634 := z.DecBinary() - _ = yym1634 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1635 := r.ContainerType() - if yyct1635 == codecSelferValueTypeMap1234 { - yyl1635 := r.ReadMapStart() - if yyl1635 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1635, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1635 == codecSelferValueTypeArray1234 { - yyl1635 := r.ReadArrayStart() - if yyl1635 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1635, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -22611,12 +28431,12 @@ func (x *ContainerStateRunning) codecDecodeSelfFromMap(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1636Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1636Slc - var yyhl1636 bool = l >= 0 - for yyj1636 := 0; ; yyj1636++ { - if yyhl1636 { - if yyj1636 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -22625,31 +28445,31 @@ func (x *ContainerStateRunning) codecDecodeSelfFromMap(l int, d *codec1978.Decod } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1636Slc = r.DecodeBytes(yys1636Slc, true, true) - yys1636 := string(yys1636Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1636 { + switch yys3 { case "startedAt": if r.TryDecodeAsNil() { - x.StartedAt = pkg2_unversioned.Time{} + x.StartedAt = pkg2_v1.Time{} } else { - yyv1637 := &x.StartedAt - yym1638 := z.DecBinary() - _ = yym1638 + yyv4 := &x.StartedAt + yym5 := z.DecBinary() + _ = yym5 if false { - } else if z.HasExtensions() && z.DecExt(yyv1637) { - } else if yym1638 { - z.DecBinaryUnmarshal(yyv1637) - } else if !yym1638 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1637) + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if yym5 { + z.DecBinaryUnmarshal(yyv4) + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) } else { - z.DecFallback(yyv1637, false) + z.DecFallback(yyv4, false) } } default: - z.DecStructFieldNotFound(-1, yys1636) - } // end switch yys1636 - } // end for yyj1636 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -22657,48 +28477,48 @@ func (x *ContainerStateRunning) codecDecodeSelfFromArray(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1639 int - var yyb1639 bool - var yyhl1639 bool = l >= 0 - yyj1639++ - if yyhl1639 { - yyb1639 = yyj1639 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1639 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1639 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.StartedAt = pkg2_unversioned.Time{} + x.StartedAt = pkg2_v1.Time{} } else { - yyv1640 := &x.StartedAt - yym1641 := z.DecBinary() - _ = yym1641 + yyv7 := &x.StartedAt + yym8 := z.DecBinary() + _ = yym8 if false { - } else if z.HasExtensions() && z.DecExt(yyv1640) { - } else if yym1641 { - z.DecBinaryUnmarshal(yyv1640) - } else if !yym1641 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1640) + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if yym8 { + z.DecBinaryUnmarshal(yyv7) + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) } else { - z.DecFallback(yyv1640, false) + z.DecFallback(yyv7, false) } } for { - yyj1639++ - if yyhl1639 { - yyb1639 = yyj1639 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1639 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1639 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1639-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -22710,39 +28530,39 @@ func (x *ContainerStateTerminated) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1642 := z.EncBinary() - _ = yym1642 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1643 := !z.EncBinary() - yy2arr1643 := z.EncBasicHandle().StructToArray - var yyq1643 [7]bool - _, _, _ = yysep1643, yyq1643, yy2arr1643 - const yyr1643 bool = false - yyq1643[1] = x.Signal != 0 - yyq1643[2] = x.Reason != "" - yyq1643[3] = x.Message != "" - yyq1643[4] = true - yyq1643[5] = true - yyq1643[6] = x.ContainerID != "" - var yynn1643 int - if yyr1643 || yy2arr1643 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Signal != 0 + yyq2[2] = x.Reason != "" + yyq2[3] = x.Message != "" + yyq2[4] = true + yyq2[5] = true + yyq2[6] = x.ContainerID != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(7) } else { - yynn1643 = 1 - for _, b := range yyq1643 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1643++ + yynn2++ } } - r.EncodeMapStart(yynn1643) - yynn1643 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1643 || yy2arr1643 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1645 := z.EncBinary() - _ = yym1645 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeInt(int64(x.ExitCode)) @@ -22751,18 +28571,18 @@ func (x *ContainerStateTerminated) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("exitCode")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1646 := z.EncBinary() - _ = yym1646 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeInt(int64(x.ExitCode)) } } - if yyr1643 || yy2arr1643 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1643[1] { - yym1648 := z.EncBinary() - _ = yym1648 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeInt(int64(x.Signal)) @@ -22771,23 +28591,23 @@ func (x *ContainerStateTerminated) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq1643[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("signal")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1649 := z.EncBinary() - _ = yym1649 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeInt(int64(x.Signal)) } } } - if yyr1643 || yy2arr1643 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1643[2] { - yym1651 := z.EncBinary() - _ = yym1651 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) @@ -22796,23 +28616,23 @@ func (x *ContainerStateTerminated) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1643[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("reason")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1652 := z.EncBinary() - _ = yym1652 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) } } } - if yyr1643 || yy2arr1643 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1643[3] { - yym1654 := z.EncBinary() - _ = yym1654 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) @@ -22821,97 +28641,97 @@ func (x *ContainerStateTerminated) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1643[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("message")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1655 := z.EncBinary() - _ = yym1655 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) } } } - if yyr1643 || yy2arr1643 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1643[4] { - yy1657 := &x.StartedAt - yym1658 := z.EncBinary() - _ = yym1658 + if yyq2[4] { + yy16 := &x.StartedAt + yym17 := z.EncBinary() + _ = yym17 if false { - } else if z.HasExtensions() && z.EncExt(yy1657) { - } else if yym1658 { - z.EncBinaryMarshal(yy1657) - } else if !yym1658 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1657) + } else if z.HasExtensions() && z.EncExt(yy16) { + } else if yym17 { + z.EncBinaryMarshal(yy16) + } else if !yym17 && z.IsJSONHandle() { + z.EncJSONMarshal(yy16) } else { - z.EncFallback(yy1657) + z.EncFallback(yy16) } } else { r.EncodeNil() } } else { - if yyq1643[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("startedAt")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1659 := &x.StartedAt - yym1660 := z.EncBinary() - _ = yym1660 + yy18 := &x.StartedAt + yym19 := z.EncBinary() + _ = yym19 if false { - } else if z.HasExtensions() && z.EncExt(yy1659) { - } else if yym1660 { - z.EncBinaryMarshal(yy1659) - } else if !yym1660 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1659) + } else if z.HasExtensions() && z.EncExt(yy18) { + } else if yym19 { + z.EncBinaryMarshal(yy18) + } else if !yym19 && z.IsJSONHandle() { + z.EncJSONMarshal(yy18) } else { - z.EncFallback(yy1659) + z.EncFallback(yy18) } } } - if yyr1643 || yy2arr1643 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1643[5] { - yy1662 := &x.FinishedAt - yym1663 := z.EncBinary() - _ = yym1663 + if yyq2[5] { + yy21 := &x.FinishedAt + yym22 := z.EncBinary() + _ = yym22 if false { - } else if z.HasExtensions() && z.EncExt(yy1662) { - } else if yym1663 { - z.EncBinaryMarshal(yy1662) - } else if !yym1663 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1662) + } else if z.HasExtensions() && z.EncExt(yy21) { + } else if yym22 { + z.EncBinaryMarshal(yy21) + } else if !yym22 && z.IsJSONHandle() { + z.EncJSONMarshal(yy21) } else { - z.EncFallback(yy1662) + z.EncFallback(yy21) } } else { r.EncodeNil() } } else { - if yyq1643[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("finishedAt")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1664 := &x.FinishedAt - yym1665 := z.EncBinary() - _ = yym1665 + yy23 := &x.FinishedAt + yym24 := z.EncBinary() + _ = yym24 if false { - } else if z.HasExtensions() && z.EncExt(yy1664) { - } else if yym1665 { - z.EncBinaryMarshal(yy1664) - } else if !yym1665 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1664) + } else if z.HasExtensions() && z.EncExt(yy23) { + } else if yym24 { + z.EncBinaryMarshal(yy23) + } else if !yym24 && z.IsJSONHandle() { + z.EncJSONMarshal(yy23) } else { - z.EncFallback(yy1664) + z.EncFallback(yy23) } } } - if yyr1643 || yy2arr1643 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1643[6] { - yym1667 := z.EncBinary() - _ = yym1667 + if yyq2[6] { + yym26 := z.EncBinary() + _ = yym26 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) @@ -22920,19 +28740,19 @@ func (x *ContainerStateTerminated) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1643[6] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("containerID")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1668 := z.EncBinary() - _ = yym1668 + yym27 := z.EncBinary() + _ = yym27 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) } } } - if yyr1643 || yy2arr1643 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -22945,25 +28765,25 @@ func (x *ContainerStateTerminated) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1669 := z.DecBinary() - _ = yym1669 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1670 := r.ContainerType() - if yyct1670 == codecSelferValueTypeMap1234 { - yyl1670 := r.ReadMapStart() - if yyl1670 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1670, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1670 == codecSelferValueTypeArray1234 { - yyl1670 := r.ReadArrayStart() - if yyl1670 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1670, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -22975,12 +28795,12 @@ func (x *ContainerStateTerminated) codecDecodeSelfFromMap(l int, d *codec1978.De var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1671Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1671Slc - var yyhl1671 bool = l >= 0 - for yyj1671 := 0; ; yyj1671++ { - if yyhl1671 { - if yyj1671 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -22989,78 +28809,108 @@ func (x *ContainerStateTerminated) codecDecodeSelfFromMap(l int, d *codec1978.De } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1671Slc = r.DecodeBytes(yys1671Slc, true, true) - yys1671 := string(yys1671Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1671 { + switch yys3 { case "exitCode": if r.TryDecodeAsNil() { x.ExitCode = 0 } else { - x.ExitCode = int32(r.DecodeInt(32)) + yyv4 := &x.ExitCode + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } } case "signal": if r.TryDecodeAsNil() { x.Signal = 0 } else { - x.Signal = int32(r.DecodeInt(32)) + yyv6 := &x.Signal + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } } case "reason": if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv8 := &x.Reason + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } case "message": if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv10 := &x.Message + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } case "startedAt": if r.TryDecodeAsNil() { - x.StartedAt = pkg2_unversioned.Time{} + x.StartedAt = pkg2_v1.Time{} } else { - yyv1676 := &x.StartedAt - yym1677 := z.DecBinary() - _ = yym1677 + yyv12 := &x.StartedAt + yym13 := z.DecBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.DecExt(yyv1676) { - } else if yym1677 { - z.DecBinaryUnmarshal(yyv1676) - } else if !yym1677 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1676) + } else if z.HasExtensions() && z.DecExt(yyv12) { + } else if yym13 { + z.DecBinaryUnmarshal(yyv12) + } else if !yym13 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv12) } else { - z.DecFallback(yyv1676, false) + z.DecFallback(yyv12, false) } } case "finishedAt": if r.TryDecodeAsNil() { - x.FinishedAt = pkg2_unversioned.Time{} + x.FinishedAt = pkg2_v1.Time{} } else { - yyv1678 := &x.FinishedAt - yym1679 := z.DecBinary() - _ = yym1679 + yyv14 := &x.FinishedAt + yym15 := z.DecBinary() + _ = yym15 if false { - } else if z.HasExtensions() && z.DecExt(yyv1678) { - } else if yym1679 { - z.DecBinaryUnmarshal(yyv1678) - } else if !yym1679 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1678) + } else if z.HasExtensions() && z.DecExt(yyv14) { + } else if yym15 { + z.DecBinaryUnmarshal(yyv14) + } else if !yym15 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv14) } else { - z.DecFallback(yyv1678, false) + z.DecFallback(yyv14, false) } } case "containerID": if r.TryDecodeAsNil() { x.ContainerID = "" } else { - x.ContainerID = string(r.DecodeString()) + yyv16 := &x.ContainerID + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys1671) - } // end switch yys1671 - } // end for yyj1671 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -23068,16 +28918,16 @@ func (x *ContainerStateTerminated) codecDecodeSelfFromArray(l int, d *codec1978. var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1681 int - var yyb1681 bool - var yyhl1681 bool = l >= 0 - yyj1681++ - if yyhl1681 { - yyb1681 = yyj1681 > l + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb1681 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb1681 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23085,15 +28935,21 @@ func (x *ContainerStateTerminated) codecDecodeSelfFromArray(l int, d *codec1978. if r.TryDecodeAsNil() { x.ExitCode = 0 } else { - x.ExitCode = int32(r.DecodeInt(32)) + yyv19 := &x.ExitCode + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int32)(yyv19)) = int32(r.DecodeInt(32)) + } } - yyj1681++ - if yyhl1681 { - yyb1681 = yyj1681 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb1681 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb1681 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23101,15 +28957,21 @@ func (x *ContainerStateTerminated) codecDecodeSelfFromArray(l int, d *codec1978. if r.TryDecodeAsNil() { x.Signal = 0 } else { - x.Signal = int32(r.DecodeInt(32)) + yyv21 := &x.Signal + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } } - yyj1681++ - if yyhl1681 { - yyb1681 = yyj1681 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb1681 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb1681 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23117,15 +28979,21 @@ func (x *ContainerStateTerminated) codecDecodeSelfFromArray(l int, d *codec1978. if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv23 := &x.Reason + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } } - yyj1681++ - if yyhl1681 { - yyb1681 = yyj1681 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb1681 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb1681 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23133,69 +29001,75 @@ func (x *ContainerStateTerminated) codecDecodeSelfFromArray(l int, d *codec1978. if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv25 := &x.Message + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } } - yyj1681++ - if yyhl1681 { - yyb1681 = yyj1681 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb1681 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb1681 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.StartedAt = pkg2_unversioned.Time{} + x.StartedAt = pkg2_v1.Time{} } else { - yyv1686 := &x.StartedAt - yym1687 := z.DecBinary() - _ = yym1687 + yyv27 := &x.StartedAt + yym28 := z.DecBinary() + _ = yym28 if false { - } else if z.HasExtensions() && z.DecExt(yyv1686) { - } else if yym1687 { - z.DecBinaryUnmarshal(yyv1686) - } else if !yym1687 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1686) + } else if z.HasExtensions() && z.DecExt(yyv27) { + } else if yym28 { + z.DecBinaryUnmarshal(yyv27) + } else if !yym28 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv27) } else { - z.DecFallback(yyv1686, false) + z.DecFallback(yyv27, false) } } - yyj1681++ - if yyhl1681 { - yyb1681 = yyj1681 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb1681 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb1681 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.FinishedAt = pkg2_unversioned.Time{} + x.FinishedAt = pkg2_v1.Time{} } else { - yyv1688 := &x.FinishedAt - yym1689 := z.DecBinary() - _ = yym1689 + yyv29 := &x.FinishedAt + yym30 := z.DecBinary() + _ = yym30 if false { - } else if z.HasExtensions() && z.DecExt(yyv1688) { - } else if yym1689 { - z.DecBinaryUnmarshal(yyv1688) - } else if !yym1689 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1688) + } else if z.HasExtensions() && z.DecExt(yyv29) { + } else if yym30 { + z.DecBinaryUnmarshal(yyv29) + } else if !yym30 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv29) } else { - z.DecFallback(yyv1688, false) + z.DecFallback(yyv29, false) } } - yyj1681++ - if yyhl1681 { - yyb1681 = yyj1681 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb1681 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb1681 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23203,20 +29077,26 @@ func (x *ContainerStateTerminated) codecDecodeSelfFromArray(l int, d *codec1978. if r.TryDecodeAsNil() { x.ContainerID = "" } else { - x.ContainerID = string(r.DecodeString()) + yyv31 := &x.ContainerID + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } } for { - yyj1681++ - if yyhl1681 { - yyb1681 = yyj1681 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb1681 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb1681 { + if yyb18 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1681-1, "") + z.DecStructFieldNotFound(yyj18-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -23228,35 +29108,35 @@ func (x *ContainerState) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1691 := z.EncBinary() - _ = yym1691 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1692 := !z.EncBinary() - yy2arr1692 := z.EncBasicHandle().StructToArray - var yyq1692 [3]bool - _, _, _ = yysep1692, yyq1692, yy2arr1692 - const yyr1692 bool = false - yyq1692[0] = x.Waiting != nil - yyq1692[1] = x.Running != nil - yyq1692[2] = x.Terminated != nil - var yynn1692 int - if yyr1692 || yy2arr1692 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Waiting != nil + yyq2[1] = x.Running != nil + yyq2[2] = x.Terminated != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn1692 = 0 - for _, b := range yyq1692 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1692++ + yynn2++ } } - r.EncodeMapStart(yynn1692) - yynn1692 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1692 || yy2arr1692 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1692[0] { + if yyq2[0] { if x.Waiting == nil { r.EncodeNil() } else { @@ -23266,7 +29146,7 @@ func (x *ContainerState) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1692[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("waiting")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -23277,9 +29157,9 @@ func (x *ContainerState) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1692 || yy2arr1692 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1692[1] { + if yyq2[1] { if x.Running == nil { r.EncodeNil() } else { @@ -23289,7 +29169,7 @@ func (x *ContainerState) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1692[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("running")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -23300,9 +29180,9 @@ func (x *ContainerState) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1692 || yy2arr1692 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1692[2] { + if yyq2[2] { if x.Terminated == nil { r.EncodeNil() } else { @@ -23312,7 +29192,7 @@ func (x *ContainerState) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1692[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("terminated")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -23323,7 +29203,7 @@ func (x *ContainerState) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1692 || yy2arr1692 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -23336,25 +29216,25 @@ func (x *ContainerState) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1696 := z.DecBinary() - _ = yym1696 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1697 := r.ContainerType() - if yyct1697 == codecSelferValueTypeMap1234 { - yyl1697 := r.ReadMapStart() - if yyl1697 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1697, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1697 == codecSelferValueTypeArray1234 { - yyl1697 := r.ReadArrayStart() - if yyl1697 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1697, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -23366,12 +29246,12 @@ func (x *ContainerState) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1698Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1698Slc - var yyhl1698 bool = l >= 0 - for yyj1698 := 0; ; yyj1698++ { - if yyhl1698 { - if yyj1698 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -23380,10 +29260,10 @@ func (x *ContainerState) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1698Slc = r.DecodeBytes(yys1698Slc, true, true) - yys1698 := string(yys1698Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1698 { + switch yys3 { case "waiting": if r.TryDecodeAsNil() { if x.Waiting != nil { @@ -23418,9 +29298,9 @@ func (x *ContainerState) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { x.Terminated.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1698) - } // end switch yys1698 - } // end for yyj1698 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -23428,16 +29308,16 @@ func (x *ContainerState) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1702 int - var yyb1702 bool - var yyhl1702 bool = l >= 0 - yyj1702++ - if yyhl1702 { - yyb1702 = yyj1702 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1702 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1702 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23452,13 +29332,13 @@ func (x *ContainerState) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Waiting.CodecDecodeSelf(d) } - yyj1702++ - if yyhl1702 { - yyb1702 = yyj1702 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1702 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1702 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23473,13 +29353,13 @@ func (x *ContainerState) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Running.CodecDecodeSelf(d) } - yyj1702++ - if yyhl1702 { - yyb1702 = yyj1702 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1702 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1702 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23495,17 +29375,17 @@ func (x *ContainerState) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { x.Terminated.CodecDecodeSelf(d) } for { - yyj1702++ - if yyhl1702 { - yyb1702 = yyj1702 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1702 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1702 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1702-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -23517,36 +29397,36 @@ func (x *ContainerStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1706 := z.EncBinary() - _ = yym1706 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1707 := !z.EncBinary() - yy2arr1707 := z.EncBasicHandle().StructToArray - var yyq1707 [8]bool - _, _, _ = yysep1707, yyq1707, yy2arr1707 - const yyr1707 bool = false - yyq1707[1] = true - yyq1707[2] = true - yyq1707[7] = x.ContainerID != "" - var yynn1707 int - if yyr1707 || yy2arr1707 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = true + yyq2[2] = true + yyq2[7] = x.ContainerID != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(8) } else { - yynn1707 = 5 - for _, b := range yyq1707 { + yynn2 = 5 + for _, b := range yyq2 { if b { - yynn1707++ + yynn2++ } } - r.EncodeMapStart(yynn1707) - yynn1707 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1707 || yy2arr1707 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1709 := z.EncBinary() - _ = yym1709 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) @@ -23555,51 +29435,51 @@ func (x *ContainerStatus) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("name")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1710 := z.EncBinary() - _ = yym1710 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } - if yyr1707 || yy2arr1707 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1707[1] { - yy1712 := &x.State - yy1712.CodecEncodeSelf(e) + if yyq2[1] { + yy7 := &x.State + yy7.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq1707[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("state")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1713 := &x.State - yy1713.CodecEncodeSelf(e) + yy9 := &x.State + yy9.CodecEncodeSelf(e) } } - if yyr1707 || yy2arr1707 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1707[2] { - yy1715 := &x.LastTerminationState - yy1715.CodecEncodeSelf(e) + if yyq2[2] { + yy12 := &x.LastTerminationState + yy12.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq1707[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("lastState")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1716 := &x.LastTerminationState - yy1716.CodecEncodeSelf(e) + yy14 := &x.LastTerminationState + yy14.CodecEncodeSelf(e) } } - if yyr1707 || yy2arr1707 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1718 := z.EncBinary() - _ = yym1718 + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeBool(bool(x.Ready)) @@ -23608,17 +29488,17 @@ func (x *ContainerStatus) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("ready")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1719 := z.EncBinary() - _ = yym1719 + yym18 := z.EncBinary() + _ = yym18 if false { } else { r.EncodeBool(bool(x.Ready)) } } - if yyr1707 || yy2arr1707 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1721 := z.EncBinary() - _ = yym1721 + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeInt(int64(x.RestartCount)) @@ -23627,17 +29507,17 @@ func (x *ContainerStatus) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("restartCount")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1722 := z.EncBinary() - _ = yym1722 + yym21 := z.EncBinary() + _ = yym21 if false { } else { r.EncodeInt(int64(x.RestartCount)) } } - if yyr1707 || yy2arr1707 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1724 := z.EncBinary() - _ = yym1724 + yym23 := z.EncBinary() + _ = yym23 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Image)) @@ -23646,17 +29526,17 @@ func (x *ContainerStatus) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("image")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1725 := z.EncBinary() - _ = yym1725 + yym24 := z.EncBinary() + _ = yym24 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Image)) } } - if yyr1707 || yy2arr1707 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1727 := z.EncBinary() - _ = yym1727 + yym26 := z.EncBinary() + _ = yym26 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ImageID)) @@ -23665,18 +29545,18 @@ func (x *ContainerStatus) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("imageID")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1728 := z.EncBinary() - _ = yym1728 + yym27 := z.EncBinary() + _ = yym27 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ImageID)) } } - if yyr1707 || yy2arr1707 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1707[7] { - yym1730 := z.EncBinary() - _ = yym1730 + if yyq2[7] { + yym29 := z.EncBinary() + _ = yym29 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) @@ -23685,19 +29565,19 @@ func (x *ContainerStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1707[7] { + if yyq2[7] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("containerID")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1731 := z.EncBinary() - _ = yym1731 + yym30 := z.EncBinary() + _ = yym30 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) } } } - if yyr1707 || yy2arr1707 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -23710,25 +29590,25 @@ func (x *ContainerStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1732 := z.DecBinary() - _ = yym1732 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1733 := r.ContainerType() - if yyct1733 == codecSelferValueTypeMap1234 { - yyl1733 := r.ReadMapStart() - if yyl1733 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1733, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1733 == codecSelferValueTypeArray1234 { - yyl1733 := r.ReadArrayStart() - if yyl1733 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1733, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -23740,12 +29620,12 @@ func (x *ContainerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1734Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1734Slc - var yyhl1734 bool = l >= 0 - for yyj1734 := 0; ; yyj1734++ { - if yyhl1734 { - if yyj1734 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -23754,64 +29634,100 @@ func (x *ContainerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1734Slc = r.DecodeBytes(yys1734Slc, true, true) - yys1734 := string(yys1734Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1734 { + switch yys3 { case "name": if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "state": if r.TryDecodeAsNil() { x.State = ContainerState{} } else { - yyv1736 := &x.State - yyv1736.CodecDecodeSelf(d) + yyv6 := &x.State + yyv6.CodecDecodeSelf(d) } case "lastState": if r.TryDecodeAsNil() { x.LastTerminationState = ContainerState{} } else { - yyv1737 := &x.LastTerminationState - yyv1737.CodecDecodeSelf(d) + yyv7 := &x.LastTerminationState + yyv7.CodecDecodeSelf(d) } case "ready": if r.TryDecodeAsNil() { x.Ready = false } else { - x.Ready = bool(r.DecodeBool()) + yyv8 := &x.Ready + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } } case "restartCount": if r.TryDecodeAsNil() { x.RestartCount = 0 } else { - x.RestartCount = int32(r.DecodeInt(32)) + yyv10 := &x.RestartCount + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } } case "image": if r.TryDecodeAsNil() { x.Image = "" } else { - x.Image = string(r.DecodeString()) + yyv12 := &x.Image + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } case "imageID": if r.TryDecodeAsNil() { x.ImageID = "" } else { - x.ImageID = string(r.DecodeString()) + yyv14 := &x.ImageID + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } case "containerID": if r.TryDecodeAsNil() { x.ContainerID = "" } else { - x.ContainerID = string(r.DecodeString()) + yyv16 := &x.ContainerID + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys1734) - } // end switch yys1734 - } // end for yyj1734 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -23819,16 +29735,16 @@ func (x *ContainerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1743 int - var yyb1743 bool - var yyhl1743 bool = l >= 0 - yyj1743++ - if yyhl1743 { - yyb1743 = yyj1743 > l + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb1743 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb1743 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23836,15 +29752,21 @@ func (x *ContainerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv19 := &x.Name + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } } - yyj1743++ - if yyhl1743 { - yyb1743 = yyj1743 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb1743 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb1743 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23852,16 +29774,16 @@ func (x *ContainerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.State = ContainerState{} } else { - yyv1745 := &x.State - yyv1745.CodecDecodeSelf(d) + yyv21 := &x.State + yyv21.CodecDecodeSelf(d) } - yyj1743++ - if yyhl1743 { - yyb1743 = yyj1743 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb1743 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb1743 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23869,16 +29791,16 @@ func (x *ContainerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.LastTerminationState = ContainerState{} } else { - yyv1746 := &x.LastTerminationState - yyv1746.CodecDecodeSelf(d) + yyv22 := &x.LastTerminationState + yyv22.CodecDecodeSelf(d) } - yyj1743++ - if yyhl1743 { - yyb1743 = yyj1743 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb1743 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb1743 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23886,15 +29808,21 @@ func (x *ContainerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Ready = false } else { - x.Ready = bool(r.DecodeBool()) + yyv23 := &x.Ready + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } } - yyj1743++ - if yyhl1743 { - yyb1743 = yyj1743 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb1743 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb1743 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23902,15 +29830,21 @@ func (x *ContainerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.RestartCount = 0 } else { - x.RestartCount = int32(r.DecodeInt(32)) + yyv25 := &x.RestartCount + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } } - yyj1743++ - if yyhl1743 { - yyb1743 = yyj1743 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb1743 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb1743 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23918,15 +29852,21 @@ func (x *ContainerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Image = "" } else { - x.Image = string(r.DecodeString()) + yyv27 := &x.Image + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } } - yyj1743++ - if yyhl1743 { - yyb1743 = yyj1743 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb1743 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb1743 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23934,15 +29874,21 @@ func (x *ContainerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.ImageID = "" } else { - x.ImageID = string(r.DecodeString()) + yyv29 := &x.ImageID + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } } - yyj1743++ - if yyhl1743 { - yyb1743 = yyj1743 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb1743 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb1743 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23950,20 +29896,26 @@ func (x *ContainerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.ContainerID = "" } else { - x.ContainerID = string(r.DecodeString()) + yyv31 := &x.ContainerID + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } } for { - yyj1743++ - if yyhl1743 { - yyb1743 = yyj1743 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb1743 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb1743 { + if yyb18 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1743-1, "") + z.DecStructFieldNotFound(yyj18-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -23972,8 +29924,8 @@ func (x PodPhase) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym1752 := z.EncBinary() - _ = yym1752 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -23985,8 +29937,8 @@ func (x *PodPhase) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1753 := z.DecBinary() - _ = yym1753 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -23998,8 +29950,8 @@ func (x PodConditionType) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym1754 := z.EncBinary() - _ = yym1754 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -24011,8 +29963,8 @@ func (x *PodConditionType) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1755 := z.DecBinary() - _ = yym1755 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -24027,34 +29979,34 @@ func (x *PodCondition) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1756 := z.EncBinary() - _ = yym1756 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1757 := !z.EncBinary() - yy2arr1757 := z.EncBasicHandle().StructToArray - var yyq1757 [6]bool - _, _, _ = yysep1757, yyq1757, yy2arr1757 - const yyr1757 bool = false - yyq1757[2] = true - yyq1757[3] = true - yyq1757[4] = x.Reason != "" - yyq1757[5] = x.Message != "" - var yynn1757 int - if yyr1757 || yy2arr1757 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(6) } else { - yynn1757 = 2 - for _, b := range yyq1757 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn1757++ + yynn2++ } } - r.EncodeMapStart(yynn1757) - yynn1757 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1757 || yy2arr1757 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) x.Type.CodecEncodeSelf(e) } else { @@ -24063,7 +30015,7 @@ func (x *PodCondition) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Type.CodecEncodeSelf(e) } - if yyr1757 || yy2arr1757 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) x.Status.CodecEncodeSelf(e) } else { @@ -24072,85 +30024,85 @@ func (x *PodCondition) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Status.CodecEncodeSelf(e) } - if yyr1757 || yy2arr1757 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1757[2] { - yy1761 := &x.LastProbeTime - yym1762 := z.EncBinary() - _ = yym1762 + if yyq2[2] { + yy10 := &x.LastProbeTime + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy1761) { - } else if yym1762 { - z.EncBinaryMarshal(yy1761) - } else if !yym1762 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1761) + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) } else { - z.EncFallback(yy1761) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq1757[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1763 := &x.LastProbeTime - yym1764 := z.EncBinary() - _ = yym1764 + yy12 := &x.LastProbeTime + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy1763) { - } else if yym1764 { - z.EncBinaryMarshal(yy1763) - } else if !yym1764 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1763) + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) } else { - z.EncFallback(yy1763) + z.EncFallback(yy12) } } } - if yyr1757 || yy2arr1757 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1757[3] { - yy1766 := &x.LastTransitionTime - yym1767 := z.EncBinary() - _ = yym1767 + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 if false { - } else if z.HasExtensions() && z.EncExt(yy1766) { - } else if yym1767 { - z.EncBinaryMarshal(yy1766) - } else if !yym1767 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1766) + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) } else { - z.EncFallback(yy1766) + z.EncFallback(yy15) } } else { r.EncodeNil() } } else { - if yyq1757[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1768 := &x.LastTransitionTime - yym1769 := z.EncBinary() - _ = yym1769 + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.EncExt(yy1768) { - } else if yym1769 { - z.EncBinaryMarshal(yy1768) - } else if !yym1769 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1768) + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) } else { - z.EncFallback(yy1768) + z.EncFallback(yy17) } } } - if yyr1757 || yy2arr1757 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1757[4] { - yym1771 := z.EncBinary() - _ = yym1771 + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) @@ -24159,23 +30111,23 @@ func (x *PodCondition) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1757[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("reason")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1772 := z.EncBinary() - _ = yym1772 + yym21 := z.EncBinary() + _ = yym21 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) } } } - if yyr1757 || yy2arr1757 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1757[5] { - yym1774 := z.EncBinary() - _ = yym1774 + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) @@ -24184,19 +30136,19 @@ func (x *PodCondition) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1757[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("message")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1775 := z.EncBinary() - _ = yym1775 + yym24 := z.EncBinary() + _ = yym24 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) } } } - if yyr1757 || yy2arr1757 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -24209,25 +30161,25 @@ func (x *PodCondition) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1776 := z.DecBinary() - _ = yym1776 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1777 := r.ContainerType() - if yyct1777 == codecSelferValueTypeMap1234 { - yyl1777 := r.ReadMapStart() - if yyl1777 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1777, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1777 == codecSelferValueTypeArray1234 { - yyl1777 := r.ReadArrayStart() - if yyl1777 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1777, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -24239,12 +30191,12 @@ func (x *PodCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1778Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1778Slc - var yyhl1778 bool = l >= 0 - for yyj1778 := 0; ; yyj1778++ { - if yyhl1778 { - if yyj1778 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -24253,72 +30205,86 @@ func (x *PodCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1778Slc = r.DecodeBytes(yys1778Slc, true, true) - yys1778 := string(yys1778Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1778 { + switch yys3 { case "type": if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = PodConditionType(r.DecodeString()) + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = "" } else { - x.Status = ConditionStatus(r.DecodeString()) + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) } case "lastProbeTime": if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} + x.LastProbeTime = pkg2_v1.Time{} } else { - yyv1781 := &x.LastProbeTime - yym1782 := z.DecBinary() - _ = yym1782 + yyv6 := &x.LastProbeTime + yym7 := z.DecBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.DecExt(yyv1781) { - } else if yym1782 { - z.DecBinaryUnmarshal(yyv1781) - } else if !yym1782 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1781) + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) } else { - z.DecFallback(yyv1781, false) + z.DecFallback(yyv6, false) } } case "lastTransitionTime": if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} + x.LastTransitionTime = pkg2_v1.Time{} } else { - yyv1783 := &x.LastTransitionTime - yym1784 := z.DecBinary() - _ = yym1784 + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv1783) { - } else if yym1784 { - z.DecBinaryUnmarshal(yyv1783) - } else if !yym1784 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1783) + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) } else { - z.DecFallback(yyv1783, false) + z.DecFallback(yyv8, false) } } case "reason": if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv10 := &x.Reason + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } case "message": if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv12 := &x.Message + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys1778) - } // end switch yys1778 - } // end for yyj1778 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -24326,16 +30292,16 @@ func (x *PodCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1787 int - var yyb1787 bool - var yyhl1787 bool = l >= 0 - yyj1787++ - if yyhl1787 { - yyb1787 = yyj1787 > l + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb1787 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb1787 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -24343,15 +30309,16 @@ func (x *PodCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = PodConditionType(r.DecodeString()) + yyv15 := &x.Type + yyv15.CodecDecodeSelf(d) } - yyj1787++ - if yyhl1787 { - yyb1787 = yyj1787 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb1787 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb1787 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -24359,69 +30326,70 @@ func (x *PodCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Status = "" } else { - x.Status = ConditionStatus(r.DecodeString()) + yyv16 := &x.Status + yyv16.CodecDecodeSelf(d) } - yyj1787++ - if yyhl1787 { - yyb1787 = yyj1787 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb1787 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb1787 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} + x.LastProbeTime = pkg2_v1.Time{} } else { - yyv1790 := &x.LastProbeTime - yym1791 := z.DecBinary() - _ = yym1791 + yyv17 := &x.LastProbeTime + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv1790) { - } else if yym1791 { - z.DecBinaryUnmarshal(yyv1790) - } else if !yym1791 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1790) + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) } else { - z.DecFallback(yyv1790, false) + z.DecFallback(yyv17, false) } } - yyj1787++ - if yyhl1787 { - yyb1787 = yyj1787 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb1787 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb1787 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} + x.LastTransitionTime = pkg2_v1.Time{} } else { - yyv1792 := &x.LastTransitionTime - yym1793 := z.DecBinary() - _ = yym1793 + yyv19 := &x.LastTransitionTime + yym20 := z.DecBinary() + _ = yym20 if false { - } else if z.HasExtensions() && z.DecExt(yyv1792) { - } else if yym1793 { - z.DecBinaryUnmarshal(yyv1792) - } else if !yym1793 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1792) + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if yym20 { + z.DecBinaryUnmarshal(yyv19) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) } else { - z.DecFallback(yyv1792, false) + z.DecFallback(yyv19, false) } } - yyj1787++ - if yyhl1787 { - yyb1787 = yyj1787 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb1787 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb1787 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -24429,15 +30397,21 @@ func (x *PodCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv21 := &x.Reason + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } } - yyj1787++ - if yyhl1787 { - yyb1787 = yyj1787 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb1787 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb1787 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -24445,20 +30419,26 @@ func (x *PodCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv23 := &x.Message + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } } for { - yyj1787++ - if yyhl1787 { - yyb1787 = yyj1787 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb1787 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb1787 { + if yyb14 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1787-1, "") + z.DecStructFieldNotFound(yyj14-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -24467,8 +30447,8 @@ func (x RestartPolicy) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym1796 := z.EncBinary() - _ = yym1796 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -24480,8 +30460,8 @@ func (x *RestartPolicy) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1797 := z.DecBinary() - _ = yym1797 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -24493,8 +30473,8 @@ func (x DNSPolicy) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym1798 := z.EncBinary() - _ = yym1798 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -24506,8 +30486,8 @@ func (x *DNSPolicy) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1799 := z.DecBinary() - _ = yym1799 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -24522,36 +30502,36 @@ func (x *NodeSelector) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1800 := z.EncBinary() - _ = yym1800 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1801 := !z.EncBinary() - yy2arr1801 := z.EncBasicHandle().StructToArray - var yyq1801 [1]bool - _, _, _ = yysep1801, yyq1801, yy2arr1801 - const yyr1801 bool = false - var yynn1801 int - if yyr1801 || yy2arr1801 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn1801 = 1 - for _, b := range yyq1801 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1801++ + yynn2++ } } - r.EncodeMapStart(yynn1801) - yynn1801 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1801 || yy2arr1801 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.NodeSelectorTerms == nil { r.EncodeNil() } else { - yym1803 := z.EncBinary() - _ = yym1803 + yym4 := z.EncBinary() + _ = yym4 if false { } else { h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e) @@ -24564,15 +30544,15 @@ func (x *NodeSelector) CodecEncodeSelf(e *codec1978.Encoder) { if x.NodeSelectorTerms == nil { r.EncodeNil() } else { - yym1804 := z.EncBinary() - _ = yym1804 + yym5 := z.EncBinary() + _ = yym5 if false { } else { h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e) } } } - if yyr1801 || yy2arr1801 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -24585,25 +30565,25 @@ func (x *NodeSelector) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1805 := z.DecBinary() - _ = yym1805 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1806 := r.ContainerType() - if yyct1806 == codecSelferValueTypeMap1234 { - yyl1806 := r.ReadMapStart() - if yyl1806 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1806, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1806 == codecSelferValueTypeArray1234 { - yyl1806 := r.ReadArrayStart() - if yyl1806 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1806, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -24615,12 +30595,12 @@ func (x *NodeSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1807Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1807Slc - var yyhl1807 bool = l >= 0 - for yyj1807 := 0; ; yyj1807++ { - if yyhl1807 { - if yyj1807 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -24629,26 +30609,26 @@ func (x *NodeSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1807Slc = r.DecodeBytes(yys1807Slc, true, true) - yys1807 := string(yys1807Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1807 { + switch yys3 { case "nodeSelectorTerms": if r.TryDecodeAsNil() { x.NodeSelectorTerms = nil } else { - yyv1808 := &x.NodeSelectorTerms - yym1809 := z.DecBinary() - _ = yym1809 + yyv4 := &x.NodeSelectorTerms + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv1808), d) + h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv4), d) } } default: - z.DecStructFieldNotFound(-1, yys1807) - } // end switch yys1807 - } // end for yyj1807 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -24656,16 +30636,16 @@ func (x *NodeSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1810 int - var yyb1810 bool - var yyhl1810 bool = l >= 0 - yyj1810++ - if yyhl1810 { - yyb1810 = yyj1810 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1810 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1810 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -24673,26 +30653,26 @@ func (x *NodeSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.NodeSelectorTerms = nil } else { - yyv1811 := &x.NodeSelectorTerms - yym1812 := z.DecBinary() - _ = yym1812 + yyv7 := &x.NodeSelectorTerms + yym8 := z.DecBinary() + _ = yym8 if false { } else { - h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv1811), d) + h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv7), d) } } for { - yyj1810++ - if yyhl1810 { - yyb1810 = yyj1810 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1810 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1810 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1810-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -24704,36 +30684,36 @@ func (x *NodeSelectorTerm) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1813 := z.EncBinary() - _ = yym1813 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1814 := !z.EncBinary() - yy2arr1814 := z.EncBasicHandle().StructToArray - var yyq1814 [1]bool - _, _, _ = yysep1814, yyq1814, yy2arr1814 - const yyr1814 bool = false - var yynn1814 int - if yyr1814 || yy2arr1814 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn1814 = 1 - for _, b := range yyq1814 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1814++ + yynn2++ } } - r.EncodeMapStart(yynn1814) - yynn1814 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1814 || yy2arr1814 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.MatchExpressions == nil { r.EncodeNil() } else { - yym1816 := z.EncBinary() - _ = yym1816 + yym4 := z.EncBinary() + _ = yym4 if false { } else { h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e) @@ -24746,15 +30726,15 @@ func (x *NodeSelectorTerm) CodecEncodeSelf(e *codec1978.Encoder) { if x.MatchExpressions == nil { r.EncodeNil() } else { - yym1817 := z.EncBinary() - _ = yym1817 + yym5 := z.EncBinary() + _ = yym5 if false { } else { h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e) } } } - if yyr1814 || yy2arr1814 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -24767,25 +30747,25 @@ func (x *NodeSelectorTerm) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1818 := z.DecBinary() - _ = yym1818 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1819 := r.ContainerType() - if yyct1819 == codecSelferValueTypeMap1234 { - yyl1819 := r.ReadMapStart() - if yyl1819 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1819, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1819 == codecSelferValueTypeArray1234 { - yyl1819 := r.ReadArrayStart() - if yyl1819 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1819, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -24797,12 +30777,12 @@ func (x *NodeSelectorTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1820Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1820Slc - var yyhl1820 bool = l >= 0 - for yyj1820 := 0; ; yyj1820++ { - if yyhl1820 { - if yyj1820 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -24811,26 +30791,26 @@ func (x *NodeSelectorTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1820Slc = r.DecodeBytes(yys1820Slc, true, true) - yys1820 := string(yys1820Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1820 { + switch yys3 { case "matchExpressions": if r.TryDecodeAsNil() { x.MatchExpressions = nil } else { - yyv1821 := &x.MatchExpressions - yym1822 := z.DecBinary() - _ = yym1822 + yyv4 := &x.MatchExpressions + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv1821), d) + h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv4), d) } } default: - z.DecStructFieldNotFound(-1, yys1820) - } // end switch yys1820 - } // end for yyj1820 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -24838,16 +30818,16 @@ func (x *NodeSelectorTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1823 int - var yyb1823 bool - var yyhl1823 bool = l >= 0 - yyj1823++ - if yyhl1823 { - yyb1823 = yyj1823 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1823 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1823 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -24855,26 +30835,26 @@ func (x *NodeSelectorTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.MatchExpressions = nil } else { - yyv1824 := &x.MatchExpressions - yym1825 := z.DecBinary() - _ = yym1825 + yyv7 := &x.MatchExpressions + yym8 := z.DecBinary() + _ = yym8 if false { } else { - h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv1824), d) + h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv7), d) } } for { - yyj1823++ - if yyhl1823 { - yyb1823 = yyj1823 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1823 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1823 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1823-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -24886,34 +30866,34 @@ func (x *NodeSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1826 := z.EncBinary() - _ = yym1826 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1827 := !z.EncBinary() - yy2arr1827 := z.EncBasicHandle().StructToArray - var yyq1827 [3]bool - _, _, _ = yysep1827, yyq1827, yy2arr1827 - const yyr1827 bool = false - yyq1827[2] = len(x.Values) != 0 - var yynn1827 int - if yyr1827 || yy2arr1827 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = len(x.Values) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn1827 = 2 - for _, b := range yyq1827 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn1827++ + yynn2++ } } - r.EncodeMapStart(yynn1827) - yynn1827 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1827 || yy2arr1827 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1829 := z.EncBinary() - _ = yym1829 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Key)) @@ -24922,14 +30902,14 @@ func (x *NodeSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("key")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1830 := z.EncBinary() - _ = yym1830 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Key)) } } - if yyr1827 || yy2arr1827 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) x.Operator.CodecEncodeSelf(e) } else { @@ -24938,14 +30918,14 @@ func (x *NodeSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Operator.CodecEncodeSelf(e) } - if yyr1827 || yy2arr1827 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1827[2] { + if yyq2[2] { if x.Values == nil { r.EncodeNil() } else { - yym1833 := z.EncBinary() - _ = yym1833 + yym10 := z.EncBinary() + _ = yym10 if false { } else { z.F.EncSliceStringV(x.Values, false, e) @@ -24955,15 +30935,15 @@ func (x *NodeSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1827[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("values")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Values == nil { r.EncodeNil() } else { - yym1834 := z.EncBinary() - _ = yym1834 + yym11 := z.EncBinary() + _ = yym11 if false { } else { z.F.EncSliceStringV(x.Values, false, e) @@ -24971,7 +30951,7 @@ func (x *NodeSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1827 || yy2arr1827 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -24984,25 +30964,25 @@ func (x *NodeSelectorRequirement) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1835 := z.DecBinary() - _ = yym1835 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1836 := r.ContainerType() - if yyct1836 == codecSelferValueTypeMap1234 { - yyl1836 := r.ReadMapStart() - if yyl1836 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1836, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1836 == codecSelferValueTypeArray1234 { - yyl1836 := r.ReadArrayStart() - if yyl1836 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1836, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -25014,12 +30994,12 @@ func (x *NodeSelectorRequirement) codecDecodeSelfFromMap(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1837Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1837Slc - var yyhl1837 bool = l >= 0 - for yyj1837 := 0; ; yyj1837++ { - if yyhl1837 { - if yyj1837 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -25028,38 +31008,45 @@ func (x *NodeSelectorRequirement) codecDecodeSelfFromMap(l int, d *codec1978.Dec } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1837Slc = r.DecodeBytes(yys1837Slc, true, true) - yys1837 := string(yys1837Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1837 { + switch yys3 { case "key": if r.TryDecodeAsNil() { x.Key = "" } else { - x.Key = string(r.DecodeString()) + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "operator": if r.TryDecodeAsNil() { x.Operator = "" } else { - x.Operator = NodeSelectorOperator(r.DecodeString()) + yyv6 := &x.Operator + yyv6.CodecDecodeSelf(d) } case "values": if r.TryDecodeAsNil() { x.Values = nil } else { - yyv1840 := &x.Values - yym1841 := z.DecBinary() - _ = yym1841 + yyv7 := &x.Values + yym8 := z.DecBinary() + _ = yym8 if false { } else { - z.F.DecSliceStringX(yyv1840, false, d) + z.F.DecSliceStringX(yyv7, false, d) } } default: - z.DecStructFieldNotFound(-1, yys1837) - } // end switch yys1837 - } // end for yyj1837 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -25067,16 +31054,16 @@ func (x *NodeSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.D var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1842 int - var yyb1842 bool - var yyhl1842 bool = l >= 0 - yyj1842++ - if yyhl1842 { - yyb1842 = yyj1842 > l + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb1842 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb1842 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -25084,15 +31071,21 @@ func (x *NodeSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Key = "" } else { - x.Key = string(r.DecodeString()) + yyv10 := &x.Key + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } - yyj1842++ - if yyhl1842 { - yyb1842 = yyj1842 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb1842 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb1842 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -25100,15 +31093,16 @@ func (x *NodeSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Operator = "" } else { - x.Operator = NodeSelectorOperator(r.DecodeString()) + yyv12 := &x.Operator + yyv12.CodecDecodeSelf(d) } - yyj1842++ - if yyhl1842 { - yyb1842 = yyj1842 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb1842 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb1842 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -25116,26 +31110,26 @@ func (x *NodeSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Values = nil } else { - yyv1845 := &x.Values - yym1846 := z.DecBinary() - _ = yym1846 + yyv13 := &x.Values + yym14 := z.DecBinary() + _ = yym14 if false { } else { - z.F.DecSliceStringX(yyv1845, false, d) + z.F.DecSliceStringX(yyv13, false, d) } } for { - yyj1842++ - if yyhl1842 { - yyb1842 = yyj1842 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb1842 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb1842 { + if yyb9 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1842-1, "") + z.DecStructFieldNotFound(yyj9-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -25144,8 +31138,8 @@ func (x NodeSelectorOperator) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym1847 := z.EncBinary() - _ = yym1847 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -25157,8 +31151,8 @@ func (x *NodeSelectorOperator) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1848 := z.DecBinary() - _ = yym1848 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -25173,35 +31167,35 @@ func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1849 := z.EncBinary() - _ = yym1849 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1850 := !z.EncBinary() - yy2arr1850 := z.EncBasicHandle().StructToArray - var yyq1850 [3]bool - _, _, _ = yysep1850, yyq1850, yy2arr1850 - const yyr1850 bool = false - yyq1850[0] = x.NodeAffinity != nil - yyq1850[1] = x.PodAffinity != nil - yyq1850[2] = x.PodAntiAffinity != nil - var yynn1850 int - if yyr1850 || yy2arr1850 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.NodeAffinity != nil + yyq2[1] = x.PodAffinity != nil + yyq2[2] = x.PodAntiAffinity != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn1850 = 0 - for _, b := range yyq1850 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1850++ + yynn2++ } } - r.EncodeMapStart(yynn1850) - yynn1850 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1850 || yy2arr1850 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1850[0] { + if yyq2[0] { if x.NodeAffinity == nil { r.EncodeNil() } else { @@ -25211,7 +31205,7 @@ func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1850[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nodeAffinity")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -25222,9 +31216,9 @@ func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1850 || yy2arr1850 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1850[1] { + if yyq2[1] { if x.PodAffinity == nil { r.EncodeNil() } else { @@ -25234,7 +31228,7 @@ func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1850[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("podAffinity")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -25245,9 +31239,9 @@ func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1850 || yy2arr1850 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1850[2] { + if yyq2[2] { if x.PodAntiAffinity == nil { r.EncodeNil() } else { @@ -25257,7 +31251,7 @@ func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1850[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("podAntiAffinity")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -25268,7 +31262,7 @@ func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1850 || yy2arr1850 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -25281,25 +31275,25 @@ func (x *Affinity) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1854 := z.DecBinary() - _ = yym1854 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1855 := r.ContainerType() - if yyct1855 == codecSelferValueTypeMap1234 { - yyl1855 := r.ReadMapStart() - if yyl1855 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1855, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1855 == codecSelferValueTypeArray1234 { - yyl1855 := r.ReadArrayStart() - if yyl1855 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1855, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -25311,12 +31305,12 @@ func (x *Affinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1856Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1856Slc - var yyhl1856 bool = l >= 0 - for yyj1856 := 0; ; yyj1856++ { - if yyhl1856 { - if yyj1856 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -25325,10 +31319,10 @@ func (x *Affinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1856Slc = r.DecodeBytes(yys1856Slc, true, true) - yys1856 := string(yys1856Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1856 { + switch yys3 { case "nodeAffinity": if r.TryDecodeAsNil() { if x.NodeAffinity != nil { @@ -25363,9 +31357,9 @@ func (x *Affinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { x.PodAntiAffinity.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1856) - } // end switch yys1856 - } // end for yyj1856 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -25373,16 +31367,16 @@ func (x *Affinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1860 int - var yyb1860 bool - var yyhl1860 bool = l >= 0 - yyj1860++ - if yyhl1860 { - yyb1860 = yyj1860 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1860 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1860 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -25397,13 +31391,13 @@ func (x *Affinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.NodeAffinity.CodecDecodeSelf(d) } - yyj1860++ - if yyhl1860 { - yyb1860 = yyj1860 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1860 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1860 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -25418,13 +31412,13 @@ func (x *Affinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.PodAffinity.CodecDecodeSelf(d) } - yyj1860++ - if yyhl1860 { - yyb1860 = yyj1860 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1860 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1860 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -25440,17 +31434,17 @@ func (x *Affinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { x.PodAntiAffinity.CodecDecodeSelf(d) } for { - yyj1860++ - if yyhl1860 { - yyb1860 = yyj1860 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1860 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1860 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1860-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -25462,39 +31456,39 @@ func (x *PodAffinity) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1864 := z.EncBinary() - _ = yym1864 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1865 := !z.EncBinary() - yy2arr1865 := z.EncBasicHandle().StructToArray - var yyq1865 [2]bool - _, _, _ = yysep1865, yyq1865, yy2arr1865 - const yyr1865 bool = false - yyq1865[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 - yyq1865[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 - var yynn1865 int - if yyr1865 || yy2arr1865 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1865 = 0 - for _, b := range yyq1865 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1865++ + yynn2++ } } - r.EncodeMapStart(yynn1865) - yynn1865 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1865 || yy2arr1865 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1865[0] { + if yyq2[0] { if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { r.EncodeNil() } else { - yym1867 := z.EncBinary() - _ = yym1867 + yym4 := z.EncBinary() + _ = yym4 if false { } else { h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) @@ -25504,15 +31498,15 @@ func (x *PodAffinity) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1865[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { r.EncodeNil() } else { - yym1868 := z.EncBinary() - _ = yym1868 + yym5 := z.EncBinary() + _ = yym5 if false { } else { h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) @@ -25520,14 +31514,14 @@ func (x *PodAffinity) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1865 || yy2arr1865 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1865[1] { + if yyq2[1] { if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { r.EncodeNil() } else { - yym1870 := z.EncBinary() - _ = yym1870 + yym7 := z.EncBinary() + _ = yym7 if false { } else { h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) @@ -25537,15 +31531,15 @@ func (x *PodAffinity) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1865[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { r.EncodeNil() } else { - yym1871 := z.EncBinary() - _ = yym1871 + yym8 := z.EncBinary() + _ = yym8 if false { } else { h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) @@ -25553,7 +31547,7 @@ func (x *PodAffinity) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1865 || yy2arr1865 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -25566,25 +31560,25 @@ func (x *PodAffinity) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1872 := z.DecBinary() - _ = yym1872 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1873 := r.ContainerType() - if yyct1873 == codecSelferValueTypeMap1234 { - yyl1873 := r.ReadMapStart() - if yyl1873 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1873, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1873 == codecSelferValueTypeArray1234 { - yyl1873 := r.ReadArrayStart() - if yyl1873 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1873, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -25596,12 +31590,12 @@ func (x *PodAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1874Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1874Slc - var yyhl1874 bool = l >= 0 - for yyj1874 := 0; ; yyj1874++ { - if yyhl1874 { - if yyj1874 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -25610,38 +31604,38 @@ func (x *PodAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1874Slc = r.DecodeBytes(yys1874Slc, true, true) - yys1874 := string(yys1874Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1874 { + switch yys3 { case "requiredDuringSchedulingIgnoredDuringExecution": if r.TryDecodeAsNil() { x.RequiredDuringSchedulingIgnoredDuringExecution = nil } else { - yyv1875 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym1876 := z.DecBinary() - _ = yym1876 + yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv1875), d) + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) } } case "preferredDuringSchedulingIgnoredDuringExecution": if r.TryDecodeAsNil() { x.PreferredDuringSchedulingIgnoredDuringExecution = nil } else { - yyv1877 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym1878 := z.DecBinary() - _ = yym1878 + yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym7 := z.DecBinary() + _ = yym7 if false { } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv1877), d) + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) } } default: - z.DecStructFieldNotFound(-1, yys1874) - } // end switch yys1874 - } // end for yyj1874 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -25649,16 +31643,16 @@ func (x *PodAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1879 int - var yyb1879 bool - var yyhl1879 bool = l >= 0 - yyj1879++ - if yyhl1879 { - yyb1879 = yyj1879 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1879 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1879 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -25666,21 +31660,21 @@ func (x *PodAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.RequiredDuringSchedulingIgnoredDuringExecution = nil } else { - yyv1880 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym1881 := z.DecBinary() - _ = yym1881 + yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 if false { } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv1880), d) + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) } } - yyj1879++ - if yyhl1879 { - yyb1879 = yyj1879 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1879 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1879 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -25688,26 +31682,26 @@ func (x *PodAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.PreferredDuringSchedulingIgnoredDuringExecution = nil } else { - yyv1882 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym1883 := z.DecBinary() - _ = yym1883 + yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym12 := z.DecBinary() + _ = yym12 if false { } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv1882), d) + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) } } for { - yyj1879++ - if yyhl1879 { - yyb1879 = yyj1879 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1879 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1879 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1879-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -25719,39 +31713,39 @@ func (x *PodAntiAffinity) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1884 := z.EncBinary() - _ = yym1884 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1885 := !z.EncBinary() - yy2arr1885 := z.EncBasicHandle().StructToArray - var yyq1885 [2]bool - _, _, _ = yysep1885, yyq1885, yy2arr1885 - const yyr1885 bool = false - yyq1885[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 - yyq1885[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 - var yynn1885 int - if yyr1885 || yy2arr1885 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1885 = 0 - for _, b := range yyq1885 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1885++ + yynn2++ } } - r.EncodeMapStart(yynn1885) - yynn1885 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1885 || yy2arr1885 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1885[0] { + if yyq2[0] { if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { r.EncodeNil() } else { - yym1887 := z.EncBinary() - _ = yym1887 + yym4 := z.EncBinary() + _ = yym4 if false { } else { h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) @@ -25761,15 +31755,15 @@ func (x *PodAntiAffinity) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1885[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { r.EncodeNil() } else { - yym1888 := z.EncBinary() - _ = yym1888 + yym5 := z.EncBinary() + _ = yym5 if false { } else { h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) @@ -25777,14 +31771,14 @@ func (x *PodAntiAffinity) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1885 || yy2arr1885 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1885[1] { + if yyq2[1] { if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { r.EncodeNil() } else { - yym1890 := z.EncBinary() - _ = yym1890 + yym7 := z.EncBinary() + _ = yym7 if false { } else { h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) @@ -25794,15 +31788,15 @@ func (x *PodAntiAffinity) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1885[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { r.EncodeNil() } else { - yym1891 := z.EncBinary() - _ = yym1891 + yym8 := z.EncBinary() + _ = yym8 if false { } else { h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) @@ -25810,7 +31804,7 @@ func (x *PodAntiAffinity) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1885 || yy2arr1885 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -25823,25 +31817,25 @@ func (x *PodAntiAffinity) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1892 := z.DecBinary() - _ = yym1892 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1893 := r.ContainerType() - if yyct1893 == codecSelferValueTypeMap1234 { - yyl1893 := r.ReadMapStart() - if yyl1893 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1893, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1893 == codecSelferValueTypeArray1234 { - yyl1893 := r.ReadArrayStart() - if yyl1893 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1893, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -25853,12 +31847,12 @@ func (x *PodAntiAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1894Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1894Slc - var yyhl1894 bool = l >= 0 - for yyj1894 := 0; ; yyj1894++ { - if yyhl1894 { - if yyj1894 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -25867,38 +31861,38 @@ func (x *PodAntiAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1894Slc = r.DecodeBytes(yys1894Slc, true, true) - yys1894 := string(yys1894Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1894 { + switch yys3 { case "requiredDuringSchedulingIgnoredDuringExecution": if r.TryDecodeAsNil() { x.RequiredDuringSchedulingIgnoredDuringExecution = nil } else { - yyv1895 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym1896 := z.DecBinary() - _ = yym1896 + yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv1895), d) + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) } } case "preferredDuringSchedulingIgnoredDuringExecution": if r.TryDecodeAsNil() { x.PreferredDuringSchedulingIgnoredDuringExecution = nil } else { - yyv1897 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym1898 := z.DecBinary() - _ = yym1898 + yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym7 := z.DecBinary() + _ = yym7 if false { } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv1897), d) + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) } } default: - z.DecStructFieldNotFound(-1, yys1894) - } // end switch yys1894 - } // end for yyj1894 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -25906,16 +31900,16 @@ func (x *PodAntiAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1899 int - var yyb1899 bool - var yyhl1899 bool = l >= 0 - yyj1899++ - if yyhl1899 { - yyb1899 = yyj1899 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1899 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1899 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -25923,21 +31917,21 @@ func (x *PodAntiAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.RequiredDuringSchedulingIgnoredDuringExecution = nil } else { - yyv1900 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym1901 := z.DecBinary() - _ = yym1901 + yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 if false { } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv1900), d) + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) } } - yyj1899++ - if yyhl1899 { - yyb1899 = yyj1899 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1899 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1899 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -25945,26 +31939,26 @@ func (x *PodAntiAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.PreferredDuringSchedulingIgnoredDuringExecution = nil } else { - yyv1902 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym1903 := z.DecBinary() - _ = yym1903 + yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym12 := z.DecBinary() + _ = yym12 if false { } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv1902), d) + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) } } for { - yyj1899++ - if yyhl1899 { - yyb1899 = yyj1899 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1899 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1899 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1899-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -25976,33 +31970,33 @@ func (x *WeightedPodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1904 := z.EncBinary() - _ = yym1904 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1905 := !z.EncBinary() - yy2arr1905 := z.EncBasicHandle().StructToArray - var yyq1905 [2]bool - _, _, _ = yysep1905, yyq1905, yy2arr1905 - const yyr1905 bool = false - var yynn1905 int - if yyr1905 || yy2arr1905 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1905 = 2 - for _, b := range yyq1905 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn1905++ + yynn2++ } } - r.EncodeMapStart(yynn1905) - yynn1905 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1905 || yy2arr1905 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1907 := z.EncBinary() - _ = yym1907 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeInt(int64(x.Weight)) @@ -26011,25 +32005,25 @@ func (x *WeightedPodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("weight")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1908 := z.EncBinary() - _ = yym1908 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeInt(int64(x.Weight)) } } - if yyr1905 || yy2arr1905 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1910 := &x.PodAffinityTerm - yy1910.CodecEncodeSelf(e) + yy7 := &x.PodAffinityTerm + yy7.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("podAffinityTerm")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1911 := &x.PodAffinityTerm - yy1911.CodecEncodeSelf(e) + yy9 := &x.PodAffinityTerm + yy9.CodecEncodeSelf(e) } - if yyr1905 || yy2arr1905 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -26042,25 +32036,25 @@ func (x *WeightedPodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1912 := z.DecBinary() - _ = yym1912 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1913 := r.ContainerType() - if yyct1913 == codecSelferValueTypeMap1234 { - yyl1913 := r.ReadMapStart() - if yyl1913 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1913, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1913 == codecSelferValueTypeArray1234 { - yyl1913 := r.ReadArrayStart() - if yyl1913 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1913, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -26072,12 +32066,12 @@ func (x *WeightedPodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1914Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1914Slc - var yyhl1914 bool = l >= 0 - for yyj1914 := 0; ; yyj1914++ { - if yyhl1914 { - if yyj1914 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -26086,27 +32080,33 @@ func (x *WeightedPodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Dec } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1914Slc = r.DecodeBytes(yys1914Slc, true, true) - yys1914 := string(yys1914Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1914 { + switch yys3 { case "weight": if r.TryDecodeAsNil() { x.Weight = 0 } else { - x.Weight = int32(r.DecodeInt(32)) + yyv4 := &x.Weight + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } } case "podAffinityTerm": if r.TryDecodeAsNil() { x.PodAffinityTerm = PodAffinityTerm{} } else { - yyv1916 := &x.PodAffinityTerm - yyv1916.CodecDecodeSelf(d) + yyv6 := &x.PodAffinityTerm + yyv6.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1914) - } // end switch yys1914 - } // end for yyj1914 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -26114,16 +32114,16 @@ func (x *WeightedPodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.D var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1917 int - var yyb1917 bool - var yyhl1917 bool = l >= 0 - yyj1917++ - if yyhl1917 { - yyb1917 = yyj1917 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1917 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1917 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -26131,15 +32131,21 @@ func (x *WeightedPodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Weight = 0 } else { - x.Weight = int32(r.DecodeInt(32)) + yyv8 := &x.Weight + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } } - yyj1917++ - if yyhl1917 { - yyb1917 = yyj1917 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1917 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1917 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -26147,21 +32153,21 @@ func (x *WeightedPodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.PodAffinityTerm = PodAffinityTerm{} } else { - yyv1919 := &x.PodAffinityTerm - yyv1919.CodecDecodeSelf(d) + yyv10 := &x.PodAffinityTerm + yyv10.CodecDecodeSelf(d) } for { - yyj1917++ - if yyhl1917 { - yyb1917 = yyj1917 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1917 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1917 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1917-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -26173,39 +32179,40 @@ func (x *PodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1920 := z.EncBinary() - _ = yym1920 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1921 := !z.EncBinary() - yy2arr1921 := z.EncBasicHandle().StructToArray - var yyq1921 [3]bool - _, _, _ = yysep1921, yyq1921, yy2arr1921 - const yyr1921 bool = false - yyq1921[0] = x.LabelSelector != nil - yyq1921[2] = x.TopologyKey != "" - var yynn1921 int - if yyr1921 || yy2arr1921 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.LabelSelector != nil + yyq2[1] = len(x.Namespaces) != 0 + yyq2[2] = x.TopologyKey != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn1921 = 1 - for _, b := range yyq1921 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1921++ + yynn2++ } } - r.EncodeMapStart(yynn1921) - yynn1921 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1921 || yy2arr1921 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1921[0] { + if yyq2[0] { if x.LabelSelector == nil { r.EncodeNil() } else { - yym1923 := z.EncBinary() - _ = yym1923 + yym4 := z.EncBinary() + _ = yym4 if false { } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { } else { @@ -26216,15 +32223,15 @@ func (x *PodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1921[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("labelSelector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.LabelSelector == nil { r.EncodeNil() } else { - yym1924 := z.EncBinary() - _ = yym1924 + yym5 := z.EncBinary() + _ = yym5 if false { } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { } else { @@ -26233,38 +32240,44 @@ func (x *PodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1921 || yy2arr1921 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Namespaces == nil { - r.EncodeNil() - } else { - yym1926 := z.EncBinary() - _ = yym1926 - if false { + if yyq2[1] { + if x.Namespaces == nil { + r.EncodeNil() } else { - z.F.EncSliceStringV(x.Namespaces, false, e) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncSliceStringV(x.Namespaces, false, e) + } } + } else { + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespaces")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Namespaces == nil { - r.EncodeNil() - } else { - yym1927 := z.EncBinary() - _ = yym1927 - if false { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespaces")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Namespaces == nil { + r.EncodeNil() } else { - z.F.EncSliceStringV(x.Namespaces, false, e) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncSliceStringV(x.Namespaces, false, e) + } } } } - if yyr1921 || yy2arr1921 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1921[2] { - yym1929 := z.EncBinary() - _ = yym1929 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) @@ -26273,19 +32286,19 @@ func (x *PodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1921[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("topologyKey")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1930 := z.EncBinary() - _ = yym1930 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) } } } - if yyr1921 || yy2arr1921 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -26298,25 +32311,25 @@ func (x *PodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1931 := z.DecBinary() - _ = yym1931 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1932 := r.ContainerType() - if yyct1932 == codecSelferValueTypeMap1234 { - yyl1932 := r.ReadMapStart() - if yyl1932 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1932, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1932 == codecSelferValueTypeArray1234 { - yyl1932 := r.ReadArrayStart() - if yyl1932 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1932, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -26328,12 +32341,12 @@ func (x *PodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1933Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1933Slc - var yyhl1933 bool = l >= 0 - for yyj1933 := 0; ; yyj1933++ { - if yyhl1933 { - if yyj1933 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -26342,10 +32355,10 @@ func (x *PodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1933Slc = r.DecodeBytes(yys1933Slc, true, true) - yys1933 := string(yys1933Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1933 { + switch yys3 { case "labelSelector": if r.TryDecodeAsNil() { if x.LabelSelector != nil { @@ -26353,10 +32366,10 @@ func (x *PodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } else { if x.LabelSelector == nil { - x.LabelSelector = new(pkg2_unversioned.LabelSelector) + x.LabelSelector = new(pkg2_v1.LabelSelector) } - yym1935 := z.DecBinary() - _ = yym1935 + yym5 := z.DecBinary() + _ = yym5 if false { } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { } else { @@ -26367,24 +32380,30 @@ func (x *PodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Namespaces = nil } else { - yyv1936 := &x.Namespaces - yym1937 := z.DecBinary() - _ = yym1937 + yyv6 := &x.Namespaces + yym7 := z.DecBinary() + _ = yym7 if false { } else { - z.F.DecSliceStringX(yyv1936, false, d) + z.F.DecSliceStringX(yyv6, false, d) } } case "topologyKey": if r.TryDecodeAsNil() { x.TopologyKey = "" } else { - x.TopologyKey = string(r.DecodeString()) + yyv8 := &x.TopologyKey + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys1933) - } // end switch yys1933 - } // end for yyj1933 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -26392,16 +32411,16 @@ func (x *PodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1939 int - var yyb1939 bool - var yyhl1939 bool = l >= 0 - yyj1939++ - if yyhl1939 { - yyb1939 = yyj1939 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1939 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1939 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -26412,23 +32431,23 @@ func (x *PodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) } } else { if x.LabelSelector == nil { - x.LabelSelector = new(pkg2_unversioned.LabelSelector) + x.LabelSelector = new(pkg2_v1.LabelSelector) } - yym1941 := z.DecBinary() - _ = yym1941 + yym12 := z.DecBinary() + _ = yym12 if false { } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { } else { z.DecFallback(x.LabelSelector, false) } } - yyj1939++ - if yyhl1939 { - yyb1939 = yyj1939 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1939 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1939 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -26436,21 +32455,21 @@ func (x *PodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Namespaces = nil } else { - yyv1942 := &x.Namespaces - yym1943 := z.DecBinary() - _ = yym1943 + yyv13 := &x.Namespaces + yym14 := z.DecBinary() + _ = yym14 if false { } else { - z.F.DecSliceStringX(yyv1942, false, d) + z.F.DecSliceStringX(yyv13, false, d) } } - yyj1939++ - if yyhl1939 { - yyb1939 = yyj1939 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1939 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1939 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -26458,20 +32477,26 @@ func (x *PodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.TopologyKey = "" } else { - x.TopologyKey = string(r.DecodeString()) + yyv15 := &x.TopologyKey + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } for { - yyj1939++ - if yyhl1939 { - yyb1939 = yyj1939 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb1939 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb1939 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1939-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -26483,34 +32508,34 @@ func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1945 := z.EncBinary() - _ = yym1945 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1946 := !z.EncBinary() - yy2arr1946 := z.EncBasicHandle().StructToArray - var yyq1946 [2]bool - _, _, _ = yysep1946, yyq1946, yy2arr1946 - const yyr1946 bool = false - yyq1946[0] = x.RequiredDuringSchedulingIgnoredDuringExecution != nil - yyq1946[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 - var yynn1946 int - if yyr1946 || yy2arr1946 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.RequiredDuringSchedulingIgnoredDuringExecution != nil + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1946 = 0 - for _, b := range yyq1946 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1946++ + yynn2++ } } - r.EncodeMapStart(yynn1946) - yynn1946 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1946 || yy2arr1946 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1946[0] { + if yyq2[0] { if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { r.EncodeNil() } else { @@ -26520,7 +32545,7 @@ func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1946[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -26531,14 +32556,14 @@ func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1946 || yy2arr1946 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1946[1] { + if yyq2[1] { if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { r.EncodeNil() } else { - yym1949 := z.EncBinary() - _ = yym1949 + yym7 := z.EncBinary() + _ = yym7 if false { } else { h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) @@ -26548,15 +32573,15 @@ func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1946[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { r.EncodeNil() } else { - yym1950 := z.EncBinary() - _ = yym1950 + yym8 := z.EncBinary() + _ = yym8 if false { } else { h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) @@ -26564,7 +32589,7 @@ func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1946 || yy2arr1946 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -26577,25 +32602,25 @@ func (x *NodeAffinity) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1951 := z.DecBinary() - _ = yym1951 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1952 := r.ContainerType() - if yyct1952 == codecSelferValueTypeMap1234 { - yyl1952 := r.ReadMapStart() - if yyl1952 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1952, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1952 == codecSelferValueTypeArray1234 { - yyl1952 := r.ReadArrayStart() - if yyl1952 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1952, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -26607,12 +32632,12 @@ func (x *NodeAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1953Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1953Slc - var yyhl1953 bool = l >= 0 - for yyj1953 := 0; ; yyj1953++ { - if yyhl1953 { - if yyj1953 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -26621,10 +32646,10 @@ func (x *NodeAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1953Slc = r.DecodeBytes(yys1953Slc, true, true) - yys1953 := string(yys1953Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1953 { + switch yys3 { case "requiredDuringSchedulingIgnoredDuringExecution": if r.TryDecodeAsNil() { if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { @@ -26640,18 +32665,18 @@ func (x *NodeAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.PreferredDuringSchedulingIgnoredDuringExecution = nil } else { - yyv1955 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym1956 := z.DecBinary() - _ = yym1956 + yyv5 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym6 := z.DecBinary() + _ = yym6 if false { } else { - h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv1955), d) + h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv5), d) } } default: - z.DecStructFieldNotFound(-1, yys1953) - } // end switch yys1953 - } // end for yyj1953 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -26659,16 +32684,16 @@ func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1957 int - var yyb1957 bool - var yyhl1957 bool = l >= 0 - yyj1957++ - if yyhl1957 { - yyb1957 = yyj1957 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1957 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1957 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -26683,13 +32708,13 @@ func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) } - yyj1957++ - if yyhl1957 { - yyb1957 = yyj1957 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1957 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1957 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -26697,26 +32722,26 @@ func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.PreferredDuringSchedulingIgnoredDuringExecution = nil } else { - yyv1959 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym1960 := z.DecBinary() - _ = yym1960 + yyv9 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 if false { } else { - h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv1959), d) + h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv9), d) } } for { - yyj1957++ - if yyhl1957 { - yyb1957 = yyj1957 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1957 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1957 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1957-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -26728,33 +32753,33 @@ func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1961 := z.EncBinary() - _ = yym1961 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1962 := !z.EncBinary() - yy2arr1962 := z.EncBasicHandle().StructToArray - var yyq1962 [2]bool - _, _, _ = yysep1962, yyq1962, yy2arr1962 - const yyr1962 bool = false - var yynn1962 int - if yyr1962 || yy2arr1962 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1962 = 2 - for _, b := range yyq1962 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn1962++ + yynn2++ } } - r.EncodeMapStart(yynn1962) - yynn1962 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1962 || yy2arr1962 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1964 := z.EncBinary() - _ = yym1964 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeInt(int64(x.Weight)) @@ -26763,25 +32788,25 @@ func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("weight")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1965 := z.EncBinary() - _ = yym1965 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeInt(int64(x.Weight)) } } - if yyr1962 || yy2arr1962 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1967 := &x.Preference - yy1967.CodecEncodeSelf(e) + yy7 := &x.Preference + yy7.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("preference")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1968 := &x.Preference - yy1968.CodecEncodeSelf(e) + yy9 := &x.Preference + yy9.CodecEncodeSelf(e) } - if yyr1962 || yy2arr1962 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -26794,25 +32819,25 @@ func (x *PreferredSchedulingTerm) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1969 := z.DecBinary() - _ = yym1969 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1970 := r.ContainerType() - if yyct1970 == codecSelferValueTypeMap1234 { - yyl1970 := r.ReadMapStart() - if yyl1970 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1970, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1970 == codecSelferValueTypeArray1234 { - yyl1970 := r.ReadArrayStart() - if yyl1970 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1970, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -26824,12 +32849,12 @@ func (x *PreferredSchedulingTerm) codecDecodeSelfFromMap(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1971Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1971Slc - var yyhl1971 bool = l >= 0 - for yyj1971 := 0; ; yyj1971++ { - if yyhl1971 { - if yyj1971 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -26838,27 +32863,33 @@ func (x *PreferredSchedulingTerm) codecDecodeSelfFromMap(l int, d *codec1978.Dec } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1971Slc = r.DecodeBytes(yys1971Slc, true, true) - yys1971 := string(yys1971Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1971 { + switch yys3 { case "weight": if r.TryDecodeAsNil() { x.Weight = 0 } else { - x.Weight = int32(r.DecodeInt(32)) + yyv4 := &x.Weight + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } } case "preference": if r.TryDecodeAsNil() { x.Preference = NodeSelectorTerm{} } else { - yyv1973 := &x.Preference - yyv1973.CodecDecodeSelf(d) + yyv6 := &x.Preference + yyv6.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1971) - } // end switch yys1971 - } // end for yyj1971 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -26866,16 +32897,16 @@ func (x *PreferredSchedulingTerm) codecDecodeSelfFromArray(l int, d *codec1978.D var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1974 int - var yyb1974 bool - var yyhl1974 bool = l >= 0 - yyj1974++ - if yyhl1974 { - yyb1974 = yyj1974 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1974 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1974 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -26883,15 +32914,21 @@ func (x *PreferredSchedulingTerm) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Weight = 0 } else { - x.Weight = int32(r.DecodeInt(32)) + yyv8 := &x.Weight + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } } - yyj1974++ - if yyhl1974 { - yyb1974 = yyj1974 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1974 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1974 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -26899,21 +32936,21 @@ func (x *PreferredSchedulingTerm) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Preference = NodeSelectorTerm{} } else { - yyv1976 := &x.Preference - yyv1976.CodecDecodeSelf(d) + yyv10 := &x.Preference + yyv10.CodecDecodeSelf(d) } for { - yyj1974++ - if yyhl1974 { - yyb1974 = yyj1974 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1974 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1974 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1974-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -26925,34 +32962,35 @@ func (x *Taint) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1977 := z.EncBinary() - _ = yym1977 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1978 := !z.EncBinary() - yy2arr1978 := z.EncBasicHandle().StructToArray - var yyq1978 [3]bool - _, _, _ = yysep1978, yyq1978, yy2arr1978 - const yyr1978 bool = false - yyq1978[1] = x.Value != "" - var yynn1978 int - if yyr1978 || yy2arr1978 { - r.EncodeArrayStart(3) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Value != "" + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) } else { - yynn1978 = 2 - for _, b := range yyq1978 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn1978++ + yynn2++ } } - r.EncodeMapStart(yynn1978) - yynn1978 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1978 || yy2arr1978 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1980 := z.EncBinary() - _ = yym1980 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Key)) @@ -26961,18 +32999,18 @@ func (x *Taint) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("key")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1981 := z.EncBinary() - _ = yym1981 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Key)) } } - if yyr1978 || yy2arr1978 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1978[1] { - yym1983 := z.EncBinary() - _ = yym1983 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Value)) @@ -26981,19 +33019,19 @@ func (x *Taint) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1978[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("value")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1984 := z.EncBinary() - _ = yym1984 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Value)) } } } - if yyr1978 || yy2arr1978 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) x.Effect.CodecEncodeSelf(e) } else { @@ -27002,7 +33040,44 @@ func (x *Taint) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Effect.CodecEncodeSelf(e) } - if yyr1978 || yy2arr1978 { + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy13 := &x.TimeAdded + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(yy13) { + } else if yym14 { + z.EncBinaryMarshal(yy13) + } else if !yym14 && z.IsJSONHandle() { + z.EncJSONMarshal(yy13) + } else { + z.EncFallback(yy13) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("timeAdded")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy15 := &x.TimeAdded + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } + } + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -27015,25 +33090,25 @@ func (x *Taint) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1986 := z.DecBinary() - _ = yym1986 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1987 := r.ContainerType() - if yyct1987 == codecSelferValueTypeMap1234 { - yyl1987 := r.ReadMapStart() - if yyl1987 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1987, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1987 == codecSelferValueTypeArray1234 { - yyl1987 := r.ReadArrayStart() - if yyl1987 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1987, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -27045,12 +33120,12 @@ func (x *Taint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1988Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1988Slc - var yyhl1988 bool = l >= 0 - for yyj1988 := 0; ; yyj1988++ { - if yyhl1988 { - if yyj1988 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -27059,32 +33134,62 @@ func (x *Taint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1988Slc = r.DecodeBytes(yys1988Slc, true, true) - yys1988 := string(yys1988Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1988 { + switch yys3 { case "key": if r.TryDecodeAsNil() { x.Key = "" } else { - x.Key = string(r.DecodeString()) + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "value": if r.TryDecodeAsNil() { x.Value = "" } else { - x.Value = string(r.DecodeString()) + yyv6 := &x.Value + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "effect": if r.TryDecodeAsNil() { x.Effect = "" } else { - x.Effect = TaintEffect(r.DecodeString()) + yyv8 := &x.Effect + yyv8.CodecDecodeSelf(d) + } + case "timeAdded": + if r.TryDecodeAsNil() { + x.TimeAdded = pkg2_v1.Time{} + } else { + yyv9 := &x.TimeAdded + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if yym10 { + z.DecBinaryUnmarshal(yyv9) + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) + } else { + z.DecFallback(yyv9, false) + } } default: - z.DecStructFieldNotFound(-1, yys1988) - } // end switch yys1988 - } // end for yyj1988 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -27092,16 +33197,16 @@ func (x *Taint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1992 int - var yyb1992 bool - var yyhl1992 bool = l >= 0 - yyj1992++ - if yyhl1992 { - yyb1992 = yyj1992 > l + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb1992 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb1992 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -27109,15 +33214,21 @@ func (x *Taint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Key = "" } else { - x.Key = string(r.DecodeString()) + yyv12 := &x.Key + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } - yyj1992++ - if yyhl1992 { - yyb1992 = yyj1992 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb1992 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb1992 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -27125,15 +33236,21 @@ func (x *Taint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Value = "" } else { - x.Value = string(r.DecodeString()) + yyv14 := &x.Value + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } - yyj1992++ - if yyhl1992 { - yyb1992 = yyj1992 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb1992 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb1992 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -27141,20 +33258,48 @@ func (x *Taint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Effect = "" } else { - x.Effect = TaintEffect(r.DecodeString()) + yyv16 := &x.Effect + yyv16.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TimeAdded = pkg2_v1.Time{} + } else { + yyv17 := &x.TimeAdded + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } } for { - yyj1992++ - if yyhl1992 { - yyb1992 = yyj1992 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb1992 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb1992 { + if yyb11 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1992-1, "") + z.DecStructFieldNotFound(yyj11-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -27163,8 +33308,8 @@ func (x TaintEffect) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym1996 := z.EncBinary() - _ = yym1996 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -27176,8 +33321,8 @@ func (x *TaintEffect) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1997 := z.DecBinary() - _ = yym1997 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -27192,38 +33337,39 @@ func (x *Toleration) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1998 := z.EncBinary() - _ = yym1998 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1999 := !z.EncBinary() - yy2arr1999 := z.EncBasicHandle().StructToArray - var yyq1999 [4]bool - _, _, _ = yysep1999, yyq1999, yy2arr1999 - const yyr1999 bool = false - yyq1999[0] = x.Key != "" - yyq1999[1] = x.Operator != "" - yyq1999[2] = x.Value != "" - yyq1999[3] = x.Effect != "" - var yynn1999 int - if yyr1999 || yy2arr1999 { - r.EncodeArrayStart(4) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Key != "" + yyq2[1] = x.Operator != "" + yyq2[2] = x.Value != "" + yyq2[3] = x.Effect != "" + yyq2[4] = x.TolerationSeconds != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) } else { - yynn1999 = 0 - for _, b := range yyq1999 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1999++ + yynn2++ } } - r.EncodeMapStart(yynn1999) - yynn1999 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1999 || yy2arr1999 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1999[0] { - yym2001 := z.EncBinary() - _ = yym2001 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Key)) @@ -27232,38 +33378,38 @@ func (x *Toleration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1999[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("key")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2002 := z.EncBinary() - _ = yym2002 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Key)) } } } - if yyr1999 || yy2arr1999 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1999[1] { + if yyq2[1] { x.Operator.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1999[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("operator")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Operator.CodecEncodeSelf(e) } } - if yyr1999 || yy2arr1999 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1999[2] { - yym2005 := z.EncBinary() - _ = yym2005 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Value)) @@ -27272,34 +33418,69 @@ func (x *Toleration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1999[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("value")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2006 := z.EncBinary() - _ = yym2006 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Value)) } } } - if yyr1999 || yy2arr1999 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1999[3] { + if yyq2[3] { x.Effect.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1999[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("effect")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Effect.CodecEncodeSelf(e) } } - if yyr1999 || yy2arr1999 { + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.TolerationSeconds == nil { + r.EncodeNil() + } else { + yy16 := *x.TolerationSeconds + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(yy16)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tolerationSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TolerationSeconds == nil { + r.EncodeNil() + } else { + yy18 := *x.TolerationSeconds + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(yy18)) + } + } + } + } + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -27312,25 +33493,25 @@ func (x *Toleration) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2008 := z.DecBinary() - _ = yym2008 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2009 := r.ContainerType() - if yyct2009 == codecSelferValueTypeMap1234 { - yyl2009 := r.ReadMapStart() - if yyl2009 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2009, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2009 == codecSelferValueTypeArray1234 { - yyl2009 := r.ReadArrayStart() - if yyl2009 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2009, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -27342,12 +33523,12 @@ func (x *Toleration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2010Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2010Slc - var yyhl2010 bool = l >= 0 - for yyj2010 := 0; ; yyj2010++ { - if yyhl2010 { - if yyj2010 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -27356,38 +33537,68 @@ func (x *Toleration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2010Slc = r.DecodeBytes(yys2010Slc, true, true) - yys2010 := string(yys2010Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2010 { + switch yys3 { case "key": if r.TryDecodeAsNil() { x.Key = "" } else { - x.Key = string(r.DecodeString()) + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "operator": if r.TryDecodeAsNil() { x.Operator = "" } else { - x.Operator = TolerationOperator(r.DecodeString()) + yyv6 := &x.Operator + yyv6.CodecDecodeSelf(d) } case "value": if r.TryDecodeAsNil() { x.Value = "" } else { - x.Value = string(r.DecodeString()) + yyv7 := &x.Value + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } } case "effect": if r.TryDecodeAsNil() { x.Effect = "" } else { - x.Effect = TaintEffect(r.DecodeString()) + yyv9 := &x.Effect + yyv9.CodecDecodeSelf(d) + } + case "tolerationSeconds": + if r.TryDecodeAsNil() { + if x.TolerationSeconds != nil { + x.TolerationSeconds = nil + } + } else { + if x.TolerationSeconds == nil { + x.TolerationSeconds = new(int64) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int64)(x.TolerationSeconds)) = int64(r.DecodeInt(64)) + } } default: - z.DecStructFieldNotFound(-1, yys2010) - } // end switch yys2010 - } // end for yyj2010 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -27395,16 +33606,16 @@ func (x *Toleration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2015 int - var yyb2015 bool - var yyhl2015 bool = l >= 0 - yyj2015++ - if yyhl2015 { - yyb2015 = yyj2015 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2015 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2015 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -27412,15 +33623,21 @@ func (x *Toleration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Key = "" } else { - x.Key = string(r.DecodeString()) + yyv13 := &x.Key + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj2015++ - if yyhl2015 { - yyb2015 = yyj2015 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2015 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2015 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -27428,15 +33645,16 @@ func (x *Toleration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Operator = "" } else { - x.Operator = TolerationOperator(r.DecodeString()) + yyv15 := &x.Operator + yyv15.CodecDecodeSelf(d) } - yyj2015++ - if yyhl2015 { - yyb2015 = yyj2015 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2015 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2015 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -27444,15 +33662,21 @@ func (x *Toleration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Value = "" } else { - x.Value = string(r.DecodeString()) + yyv16 := &x.Value + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } } - yyj2015++ - if yyhl2015 { - yyb2015 = yyj2015 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2015 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2015 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -27460,20 +33684,47 @@ func (x *Toleration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Effect = "" } else { - x.Effect = TaintEffect(r.DecodeString()) + yyv18 := &x.Effect + yyv18.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TolerationSeconds != nil { + x.TolerationSeconds = nil + } + } else { + if x.TolerationSeconds == nil { + x.TolerationSeconds = new(int64) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(x.TolerationSeconds)) = int64(r.DecodeInt(64)) + } } for { - yyj2015++ - if yyhl2015 { - yyb2015 = yyj2015 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2015 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2015 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2015-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -27482,8 +33733,8 @@ func (x TolerationOperator) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym2020 := z.EncBinary() - _ = yym2020 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -27495,8 +33746,8 @@ func (x *TolerationOperator) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2021 := z.DecBinary() - _ = yym2021 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -27511,53 +33762,58 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2022 := z.EncBinary() - _ = yym2022 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2023 := !z.EncBinary() - yy2arr2023 := z.EncBasicHandle().StructToArray - var yyq2023 [17]bool - _, _, _ = yysep2023, yyq2023, yy2arr2023 - const yyr2023 bool = false - yyq2023[0] = len(x.Volumes) != 0 - yyq2023[2] = x.RestartPolicy != "" - yyq2023[3] = x.TerminationGracePeriodSeconds != nil - yyq2023[4] = x.ActiveDeadlineSeconds != nil - yyq2023[5] = x.DNSPolicy != "" - yyq2023[6] = len(x.NodeSelector) != 0 - yyq2023[7] = x.ServiceAccountName != "" - yyq2023[8] = x.DeprecatedServiceAccount != "" - yyq2023[9] = x.NodeName != "" - yyq2023[10] = x.HostNetwork != false - yyq2023[11] = x.HostPID != false - yyq2023[12] = x.HostIPC != false - yyq2023[13] = x.SecurityContext != nil - yyq2023[14] = len(x.ImagePullSecrets) != 0 - yyq2023[15] = x.Hostname != "" - yyq2023[16] = x.Subdomain != "" - var yynn2023 int - if yyr2023 || yy2arr2023 { - r.EncodeArrayStart(17) - } else { - yynn2023 = 1 - for _, b := range yyq2023 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [22]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Volumes) != 0 + yyq2[1] = len(x.InitContainers) != 0 + yyq2[3] = x.RestartPolicy != "" + yyq2[4] = x.TerminationGracePeriodSeconds != nil + yyq2[5] = x.ActiveDeadlineSeconds != nil + yyq2[6] = x.DNSPolicy != "" + yyq2[7] = len(x.NodeSelector) != 0 + yyq2[8] = x.ServiceAccountName != "" + yyq2[9] = x.DeprecatedServiceAccount != "" + yyq2[10] = x.AutomountServiceAccountToken != nil + yyq2[11] = x.NodeName != "" + yyq2[12] = x.HostNetwork != false + yyq2[13] = x.HostPID != false + yyq2[14] = x.HostIPC != false + yyq2[15] = x.SecurityContext != nil + yyq2[16] = len(x.ImagePullSecrets) != 0 + yyq2[17] = x.Hostname != "" + yyq2[18] = x.Subdomain != "" + yyq2[19] = x.Affinity != nil + yyq2[20] = x.SchedulerName != "" + yyq2[21] = len(x.Tolerations) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(22) + } else { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn2023++ + yynn2++ } } - r.EncodeMapStart(yynn2023) - yynn2023 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2023 || yy2arr2023 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2023[0] { + if yyq2[0] { if x.Volumes == nil { r.EncodeNil() } else { - yym2025 := z.EncBinary() - _ = yym2025 + yym4 := z.EncBinary() + _ = yym4 if false { } else { h.encSliceVolume(([]Volume)(x.Volumes), e) @@ -27567,15 +33823,15 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2023[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("volumes")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Volumes == nil { r.EncodeNil() } else { - yym2026 := z.EncBinary() - _ = yym2026 + yym5 := z.EncBinary() + _ = yym5 if false { } else { h.encSliceVolume(([]Volume)(x.Volumes), e) @@ -27583,13 +33839,46 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2023 || yy2arr2023 { + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.InitContainers == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceContainer(([]Container)(x.InitContainers), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("initContainers")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.InitContainers == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceContainer(([]Container)(x.InitContainers), e) + } + } + } + } + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Containers == nil { r.EncodeNil() } else { - yym2028 := z.EncBinary() - _ = yym2028 + yym10 := z.EncBinary() + _ = yym10 if false { } else { h.encSliceContainer(([]Container)(x.Containers), e) @@ -27602,122 +33891,122 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x.Containers == nil { r.EncodeNil() } else { - yym2029 := z.EncBinary() - _ = yym2029 + yym11 := z.EncBinary() + _ = yym11 if false { } else { h.encSliceContainer(([]Container)(x.Containers), e) } } } - if yyr2023 || yy2arr2023 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2023[2] { + if yyq2[3] { x.RestartPolicy.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2023[2] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("restartPolicy")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.RestartPolicy.CodecEncodeSelf(e) } } - if yyr2023 || yy2arr2023 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2023[3] { + if yyq2[4] { if x.TerminationGracePeriodSeconds == nil { r.EncodeNil() } else { - yy2032 := *x.TerminationGracePeriodSeconds - yym2033 := z.EncBinary() - _ = yym2033 + yy16 := *x.TerminationGracePeriodSeconds + yym17 := z.EncBinary() + _ = yym17 if false { } else { - r.EncodeInt(int64(yy2032)) + r.EncodeInt(int64(yy16)) } } } else { r.EncodeNil() } } else { - if yyq2023[3] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("terminationGracePeriodSeconds")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.TerminationGracePeriodSeconds == nil { r.EncodeNil() } else { - yy2034 := *x.TerminationGracePeriodSeconds - yym2035 := z.EncBinary() - _ = yym2035 + yy18 := *x.TerminationGracePeriodSeconds + yym19 := z.EncBinary() + _ = yym19 if false { } else { - r.EncodeInt(int64(yy2034)) + r.EncodeInt(int64(yy18)) } } } } - if yyr2023 || yy2arr2023 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2023[4] { + if yyq2[5] { if x.ActiveDeadlineSeconds == nil { r.EncodeNil() } else { - yy2037 := *x.ActiveDeadlineSeconds - yym2038 := z.EncBinary() - _ = yym2038 + yy21 := *x.ActiveDeadlineSeconds + yym22 := z.EncBinary() + _ = yym22 if false { } else { - r.EncodeInt(int64(yy2037)) + r.EncodeInt(int64(yy21)) } } } else { r.EncodeNil() } } else { - if yyq2023[4] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.ActiveDeadlineSeconds == nil { r.EncodeNil() } else { - yy2039 := *x.ActiveDeadlineSeconds - yym2040 := z.EncBinary() - _ = yym2040 + yy23 := *x.ActiveDeadlineSeconds + yym24 := z.EncBinary() + _ = yym24 if false { } else { - r.EncodeInt(int64(yy2039)) + r.EncodeInt(int64(yy23)) } } } } - if yyr2023 || yy2arr2023 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2023[5] { + if yyq2[6] { x.DNSPolicy.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2023[5] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("dnsPolicy")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.DNSPolicy.CodecEncodeSelf(e) } } - if yyr2023 || yy2arr2023 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2023[6] { + if yyq2[7] { if x.NodeSelector == nil { r.EncodeNil() } else { - yym2043 := z.EncBinary() - _ = yym2043 + yym29 := z.EncBinary() + _ = yym29 if false { } else { z.F.EncMapStringStringV(x.NodeSelector, false, e) @@ -27727,15 +34016,15 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2023[6] { + if yyq2[7] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nodeSelector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.NodeSelector == nil { r.EncodeNil() } else { - yym2044 := z.EncBinary() - _ = yym2044 + yym30 := z.EncBinary() + _ = yym30 if false { } else { z.F.EncMapStringStringV(x.NodeSelector, false, e) @@ -27743,11 +34032,11 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2023 || yy2arr2023 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2023[7] { - yym2046 := z.EncBinary() - _ = yym2046 + if yyq2[8] { + yym32 := z.EncBinary() + _ = yym32 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName)) @@ -27756,23 +34045,23 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2023[7] { + if yyq2[8] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("serviceAccountName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2047 := z.EncBinary() - _ = yym2047 + yym33 := z.EncBinary() + _ = yym33 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName)) } } } - if yyr2023 || yy2arr2023 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2023[8] { - yym2049 := z.EncBinary() - _ = yym2049 + if yyq2[9] { + yym35 := z.EncBinary() + _ = yym35 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.DeprecatedServiceAccount)) @@ -27781,23 +34070,58 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2023[8] { + if yyq2[9] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("serviceAccount")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2050 := z.EncBinary() - _ = yym2050 + yym36 := z.EncBinary() + _ = yym36 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.DeprecatedServiceAccount)) } } } - if yyr2023 || yy2arr2023 { + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.AutomountServiceAccountToken == nil { + r.EncodeNil() + } else { + yy38 := *x.AutomountServiceAccountToken + yym39 := z.EncBinary() + _ = yym39 + if false { + } else { + r.EncodeBool(bool(yy38)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("automountServiceAccountToken")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AutomountServiceAccountToken == nil { + r.EncodeNil() + } else { + yy40 := *x.AutomountServiceAccountToken + yym41 := z.EncBinary() + _ = yym41 + if false { + } else { + r.EncodeBool(bool(yy40)) + } + } + } + } + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2023[9] { - yym2052 := z.EncBinary() - _ = yym2052 + if yyq2[11] { + yym43 := z.EncBinary() + _ = yym43 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.NodeName)) @@ -27806,23 +34130,23 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2023[9] { + if yyq2[11] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nodeName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2053 := z.EncBinary() - _ = yym2053 + yym44 := z.EncBinary() + _ = yym44 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.NodeName)) } } } - if yyr2023 || yy2arr2023 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2023[10] { - yym2055 := z.EncBinary() - _ = yym2055 + if yyq2[12] { + yym46 := z.EncBinary() + _ = yym46 if false { } else { r.EncodeBool(bool(x.HostNetwork)) @@ -27831,23 +34155,23 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq2023[10] { + if yyq2[12] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2056 := z.EncBinary() - _ = yym2056 + yym47 := z.EncBinary() + _ = yym47 if false { } else { r.EncodeBool(bool(x.HostNetwork)) } } } - if yyr2023 || yy2arr2023 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2023[11] { - yym2058 := z.EncBinary() - _ = yym2058 + if yyq2[13] { + yym49 := z.EncBinary() + _ = yym49 if false { } else { r.EncodeBool(bool(x.HostPID)) @@ -27856,23 +34180,23 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq2023[11] { + if yyq2[13] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hostPID")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2059 := z.EncBinary() - _ = yym2059 + yym50 := z.EncBinary() + _ = yym50 if false { } else { r.EncodeBool(bool(x.HostPID)) } } } - if yyr2023 || yy2arr2023 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2023[12] { - yym2061 := z.EncBinary() - _ = yym2061 + if yyq2[14] { + yym52 := z.EncBinary() + _ = yym52 if false { } else { r.EncodeBool(bool(x.HostIPC)) @@ -27881,21 +34205,21 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq2023[12] { + if yyq2[14] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2062 := z.EncBinary() - _ = yym2062 + yym53 := z.EncBinary() + _ = yym53 if false { } else { r.EncodeBool(bool(x.HostIPC)) } } } - if yyr2023 || yy2arr2023 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2023[13] { + if yyq2[15] { if x.SecurityContext == nil { r.EncodeNil() } else { @@ -27905,7 +34229,7 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2023[13] { + if yyq2[15] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("securityContext")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -27916,14 +34240,14 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2023 || yy2arr2023 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2023[14] { + if yyq2[16] { if x.ImagePullSecrets == nil { r.EncodeNil() } else { - yym2065 := z.EncBinary() - _ = yym2065 + yym58 := z.EncBinary() + _ = yym58 if false { } else { h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) @@ -27933,15 +34257,15 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2023[14] { + if yyq2[16] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.ImagePullSecrets == nil { r.EncodeNil() } else { - yym2066 := z.EncBinary() - _ = yym2066 + yym59 := z.EncBinary() + _ = yym59 if false { } else { h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) @@ -27949,11 +34273,11 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2023 || yy2arr2023 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2023[15] { - yym2068 := z.EncBinary() - _ = yym2068 + if yyq2[17] { + yym61 := z.EncBinary() + _ = yym61 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) @@ -27962,23 +34286,23 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2023[15] { + if yyq2[17] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hostname")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2069 := z.EncBinary() - _ = yym2069 + yym62 := z.EncBinary() + _ = yym62 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) } } } - if yyr2023 || yy2arr2023 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2023[16] { - yym2071 := z.EncBinary() - _ = yym2071 + if yyq2[18] { + yym64 := z.EncBinary() + _ = yym64 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) @@ -27987,19 +34311,100 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2023[16] { + if yyq2[18] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("subdomain")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2072 := z.EncBinary() - _ = yym2072 + yym65 := z.EncBinary() + _ = yym65 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) } } } - if yyr2023 || yy2arr2023 { + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + if x.Affinity == nil { + r.EncodeNil() + } else { + x.Affinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("affinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Affinity == nil { + r.EncodeNil() + } else { + x.Affinity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[20] { + yym70 := z.EncBinary() + _ = yym70 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[20] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("schedulerName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym71 := z.EncBinary() + _ = yym71 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[21] { + if x.Tolerations == nil { + r.EncodeNil() + } else { + yym73 := z.EncBinary() + _ = yym73 + if false { + } else { + h.encSliceToleration(([]Toleration)(x.Tolerations), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[21] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tolerations")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Tolerations == nil { + r.EncodeNil() + } else { + yym74 := z.EncBinary() + _ = yym74 + if false { + } else { + h.encSliceToleration(([]Toleration)(x.Tolerations), e) + } + } + } + } + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -28012,25 +34417,25 @@ func (x *PodSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2073 := z.DecBinary() - _ = yym2073 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2074 := r.ContainerType() - if yyct2074 == codecSelferValueTypeMap1234 { - yyl2074 := r.ReadMapStart() - if yyl2074 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2074, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2074 == codecSelferValueTypeArray1234 { - yyl2074 := r.ReadArrayStart() - if yyl2074 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2074, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -28042,12 +34447,12 @@ func (x *PodSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2075Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2075Slc - var yyhl2075 bool = l >= 0 - for yyj2075 := 0; ; yyj2075++ { - if yyhl2075 { - if yyj2075 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -28056,39 +34461,52 @@ func (x *PodSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2075Slc = r.DecodeBytes(yys2075Slc, true, true) - yys2075 := string(yys2075Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2075 { + switch yys3 { case "volumes": if r.TryDecodeAsNil() { x.Volumes = nil } else { - yyv2076 := &x.Volumes - yym2077 := z.DecBinary() - _ = yym2077 + yyv4 := &x.Volumes + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceVolume((*[]Volume)(yyv4), d) + } + } + case "initContainers": + if r.TryDecodeAsNil() { + x.InitContainers = nil + } else { + yyv6 := &x.InitContainers + yym7 := z.DecBinary() + _ = yym7 if false { } else { - h.decSliceVolume((*[]Volume)(yyv2076), d) + h.decSliceContainer((*[]Container)(yyv6), d) } } case "containers": if r.TryDecodeAsNil() { x.Containers = nil } else { - yyv2078 := &x.Containers - yym2079 := z.DecBinary() - _ = yym2079 + yyv8 := &x.Containers + yym9 := z.DecBinary() + _ = yym9 if false { } else { - h.decSliceContainer((*[]Container)(yyv2078), d) + h.decSliceContainer((*[]Container)(yyv8), d) } } case "restartPolicy": if r.TryDecodeAsNil() { x.RestartPolicy = "" } else { - x.RestartPolicy = RestartPolicy(r.DecodeString()) + yyv10 := &x.RestartPolicy + yyv10.CodecDecodeSelf(d) } case "terminationGracePeriodSeconds": if r.TryDecodeAsNil() { @@ -28099,8 +34517,8 @@ func (x *PodSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.TerminationGracePeriodSeconds == nil { x.TerminationGracePeriodSeconds = new(int64) } - yym2082 := z.DecBinary() - _ = yym2082 + yym12 := z.DecBinary() + _ = yym12 if false { } else { *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) @@ -28115,8 +34533,8 @@ func (x *PodSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.ActiveDeadlineSeconds == nil { x.ActiveDeadlineSeconds = new(int64) } - yym2084 := z.DecBinary() - _ = yym2084 + yym14 := z.DecBinary() + _ = yym14 if false { } else { *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) @@ -28126,55 +34544,108 @@ func (x *PodSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.DNSPolicy = "" } else { - x.DNSPolicy = DNSPolicy(r.DecodeString()) + yyv15 := &x.DNSPolicy + yyv15.CodecDecodeSelf(d) } case "nodeSelector": if r.TryDecodeAsNil() { x.NodeSelector = nil } else { - yyv2086 := &x.NodeSelector - yym2087 := z.DecBinary() - _ = yym2087 + yyv16 := &x.NodeSelector + yym17 := z.DecBinary() + _ = yym17 if false { } else { - z.F.DecMapStringStringX(yyv2086, false, d) + z.F.DecMapStringStringX(yyv16, false, d) } } case "serviceAccountName": if r.TryDecodeAsNil() { x.ServiceAccountName = "" } else { - x.ServiceAccountName = string(r.DecodeString()) + yyv18 := &x.ServiceAccountName + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } } case "serviceAccount": if r.TryDecodeAsNil() { x.DeprecatedServiceAccount = "" } else { - x.DeprecatedServiceAccount = string(r.DecodeString()) + yyv20 := &x.DeprecatedServiceAccount + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } + } + case "automountServiceAccountToken": + if r.TryDecodeAsNil() { + if x.AutomountServiceAccountToken != nil { + x.AutomountServiceAccountToken = nil + } + } else { + if x.AutomountServiceAccountToken == nil { + x.AutomountServiceAccountToken = new(bool) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() + } } case "nodeName": if r.TryDecodeAsNil() { x.NodeName = "" } else { - x.NodeName = string(r.DecodeString()) + yyv24 := &x.NodeName + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*string)(yyv24)) = r.DecodeString() + } } case "hostNetwork": if r.TryDecodeAsNil() { x.HostNetwork = false } else { - x.HostNetwork = bool(r.DecodeBool()) + yyv26 := &x.HostNetwork + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*bool)(yyv26)) = r.DecodeBool() + } } case "hostPID": if r.TryDecodeAsNil() { x.HostPID = false } else { - x.HostPID = bool(r.DecodeBool()) + yyv28 := &x.HostPID + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*bool)(yyv28)) = r.DecodeBool() + } } case "hostIPC": if r.TryDecodeAsNil() { x.HostIPC = false } else { - x.HostIPC = bool(r.DecodeBool()) + yyv30 := &x.HostIPC + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*bool)(yyv30)) = r.DecodeBool() + } } case "securityContext": if r.TryDecodeAsNil() { @@ -28191,30 +34662,77 @@ func (x *PodSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ImagePullSecrets = nil } else { - yyv2095 := &x.ImagePullSecrets - yym2096 := z.DecBinary() - _ = yym2096 + yyv33 := &x.ImagePullSecrets + yym34 := z.DecBinary() + _ = yym34 if false { } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv2095), d) + h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv33), d) } } case "hostname": if r.TryDecodeAsNil() { x.Hostname = "" } else { - x.Hostname = string(r.DecodeString()) + yyv35 := &x.Hostname + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } } case "subdomain": if r.TryDecodeAsNil() { x.Subdomain = "" } else { - x.Subdomain = string(r.DecodeString()) + yyv37 := &x.Subdomain + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } + } + case "affinity": + if r.TryDecodeAsNil() { + if x.Affinity != nil { + x.Affinity = nil + } + } else { + if x.Affinity == nil { + x.Affinity = new(Affinity) + } + x.Affinity.CodecDecodeSelf(d) + } + case "schedulerName": + if r.TryDecodeAsNil() { + x.SchedulerName = "" + } else { + yyv40 := &x.SchedulerName + yym41 := z.DecBinary() + _ = yym41 + if false { + } else { + *((*string)(yyv40)) = r.DecodeString() + } + } + case "tolerations": + if r.TryDecodeAsNil() { + x.Tolerations = nil + } else { + yyv42 := &x.Tolerations + yym43 := z.DecBinary() + _ = yym43 + if false { + } else { + h.decSliceToleration((*[]Toleration)(yyv42), d) + } } default: - z.DecStructFieldNotFound(-1, yys2075) - } // end switch yys2075 - } // end for yyj2075 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -28222,16 +34740,16 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2099 int - var yyb2099 bool - var yyhl2099 bool = l >= 0 - yyj2099++ - if yyhl2099 { - yyb2099 = yyj2099 > l + var yyj44 int + var yyb44 bool + var yyhl44 bool = l >= 0 + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l } else { - yyb2099 = r.CheckBreak() + yyb44 = r.CheckBreak() } - if yyb2099 { + if yyb44 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28239,21 +34757,43 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Volumes = nil } else { - yyv2100 := &x.Volumes - yym2101 := z.DecBinary() - _ = yym2101 + yyv45 := &x.Volumes + yym46 := z.DecBinary() + _ = yym46 + if false { + } else { + h.decSliceVolume((*[]Volume)(yyv45), d) + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.InitContainers = nil + } else { + yyv47 := &x.InitContainers + yym48 := z.DecBinary() + _ = yym48 if false { } else { - h.decSliceVolume((*[]Volume)(yyv2100), d) + h.decSliceContainer((*[]Container)(yyv47), d) } } - yyj2099++ - if yyhl2099 { - yyb2099 = yyj2099 > l + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l } else { - yyb2099 = r.CheckBreak() + yyb44 = r.CheckBreak() } - if yyb2099 { + if yyb44 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28261,21 +34801,21 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Containers = nil } else { - yyv2102 := &x.Containers - yym2103 := z.DecBinary() - _ = yym2103 + yyv49 := &x.Containers + yym50 := z.DecBinary() + _ = yym50 if false { } else { - h.decSliceContainer((*[]Container)(yyv2102), d) + h.decSliceContainer((*[]Container)(yyv49), d) } } - yyj2099++ - if yyhl2099 { - yyb2099 = yyj2099 > l + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l } else { - yyb2099 = r.CheckBreak() + yyb44 = r.CheckBreak() } - if yyb2099 { + if yyb44 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28283,15 +34823,16 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.RestartPolicy = "" } else { - x.RestartPolicy = RestartPolicy(r.DecodeString()) + yyv51 := &x.RestartPolicy + yyv51.CodecDecodeSelf(d) } - yyj2099++ - if yyhl2099 { - yyb2099 = yyj2099 > l + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l } else { - yyb2099 = r.CheckBreak() + yyb44 = r.CheckBreak() } - if yyb2099 { + if yyb44 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28304,20 +34845,20 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.TerminationGracePeriodSeconds == nil { x.TerminationGracePeriodSeconds = new(int64) } - yym2106 := z.DecBinary() - _ = yym2106 + yym53 := z.DecBinary() + _ = yym53 if false { } else { *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) } } - yyj2099++ - if yyhl2099 { - yyb2099 = yyj2099 > l + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l } else { - yyb2099 = r.CheckBreak() + yyb44 = r.CheckBreak() } - if yyb2099 { + if yyb44 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28330,20 +34871,20 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.ActiveDeadlineSeconds == nil { x.ActiveDeadlineSeconds = new(int64) } - yym2108 := z.DecBinary() - _ = yym2108 + yym55 := z.DecBinary() + _ = yym55 if false { } else { *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) } } - yyj2099++ - if yyhl2099 { - yyb2099 = yyj2099 > l + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l } else { - yyb2099 = r.CheckBreak() + yyb44 = r.CheckBreak() } - if yyb2099 { + if yyb44 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28351,15 +34892,16 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.DNSPolicy = "" } else { - x.DNSPolicy = DNSPolicy(r.DecodeString()) + yyv56 := &x.DNSPolicy + yyv56.CodecDecodeSelf(d) } - yyj2099++ - if yyhl2099 { - yyb2099 = yyj2099 > l + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l } else { - yyb2099 = r.CheckBreak() + yyb44 = r.CheckBreak() } - if yyb2099 { + if yyb44 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28367,21 +34909,21 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.NodeSelector = nil } else { - yyv2110 := &x.NodeSelector - yym2111 := z.DecBinary() - _ = yym2111 + yyv57 := &x.NodeSelector + yym58 := z.DecBinary() + _ = yym58 if false { } else { - z.F.DecMapStringStringX(yyv2110, false, d) + z.F.DecMapStringStringX(yyv57, false, d) } } - yyj2099++ - if yyhl2099 { - yyb2099 = yyj2099 > l + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l } else { - yyb2099 = r.CheckBreak() + yyb44 = r.CheckBreak() } - if yyb2099 { + if yyb44 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28389,15 +34931,21 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ServiceAccountName = "" } else { - x.ServiceAccountName = string(r.DecodeString()) + yyv59 := &x.ServiceAccountName + yym60 := z.DecBinary() + _ = yym60 + if false { + } else { + *((*string)(yyv59)) = r.DecodeString() + } } - yyj2099++ - if yyhl2099 { - yyb2099 = yyj2099 > l + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l } else { - yyb2099 = r.CheckBreak() + yyb44 = r.CheckBreak() } - if yyb2099 { + if yyb44 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28405,15 +34953,47 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.DeprecatedServiceAccount = "" } else { - x.DeprecatedServiceAccount = string(r.DecodeString()) + yyv61 := &x.DeprecatedServiceAccount + yym62 := z.DecBinary() + _ = yym62 + if false { + } else { + *((*string)(yyv61)) = r.DecodeString() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AutomountServiceAccountToken != nil { + x.AutomountServiceAccountToken = nil + } + } else { + if x.AutomountServiceAccountToken == nil { + x.AutomountServiceAccountToken = new(bool) + } + yym64 := z.DecBinary() + _ = yym64 + if false { + } else { + *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() + } } - yyj2099++ - if yyhl2099 { - yyb2099 = yyj2099 > l + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l } else { - yyb2099 = r.CheckBreak() + yyb44 = r.CheckBreak() } - if yyb2099 { + if yyb44 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28421,15 +35001,21 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.NodeName = "" } else { - x.NodeName = string(r.DecodeString()) + yyv65 := &x.NodeName + yym66 := z.DecBinary() + _ = yym66 + if false { + } else { + *((*string)(yyv65)) = r.DecodeString() + } } - yyj2099++ - if yyhl2099 { - yyb2099 = yyj2099 > l + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l } else { - yyb2099 = r.CheckBreak() + yyb44 = r.CheckBreak() } - if yyb2099 { + if yyb44 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28437,15 +35023,21 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.HostNetwork = false } else { - x.HostNetwork = bool(r.DecodeBool()) + yyv67 := &x.HostNetwork + yym68 := z.DecBinary() + _ = yym68 + if false { + } else { + *((*bool)(yyv67)) = r.DecodeBool() + } } - yyj2099++ - if yyhl2099 { - yyb2099 = yyj2099 > l + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l } else { - yyb2099 = r.CheckBreak() + yyb44 = r.CheckBreak() } - if yyb2099 { + if yyb44 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28453,15 +35045,21 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.HostPID = false } else { - x.HostPID = bool(r.DecodeBool()) + yyv69 := &x.HostPID + yym70 := z.DecBinary() + _ = yym70 + if false { + } else { + *((*bool)(yyv69)) = r.DecodeBool() + } } - yyj2099++ - if yyhl2099 { - yyb2099 = yyj2099 > l + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l } else { - yyb2099 = r.CheckBreak() + yyb44 = r.CheckBreak() } - if yyb2099 { + if yyb44 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28469,15 +35067,21 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.HostIPC = false } else { - x.HostIPC = bool(r.DecodeBool()) + yyv71 := &x.HostIPC + yym72 := z.DecBinary() + _ = yym72 + if false { + } else { + *((*bool)(yyv71)) = r.DecodeBool() + } } - yyj2099++ - if yyhl2099 { - yyb2099 = yyj2099 > l + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l } else { - yyb2099 = r.CheckBreak() + yyb44 = r.CheckBreak() } - if yyb2099 { + if yyb44 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28492,13 +35096,13 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.SecurityContext.CodecDecodeSelf(d) } - yyj2099++ - if yyhl2099 { - yyb2099 = yyj2099 > l + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l } else { - yyb2099 = r.CheckBreak() + yyb44 = r.CheckBreak() } - if yyb2099 { + if yyb44 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28506,21 +35110,21 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ImagePullSecrets = nil } else { - yyv2119 := &x.ImagePullSecrets - yym2120 := z.DecBinary() - _ = yym2120 + yyv74 := &x.ImagePullSecrets + yym75 := z.DecBinary() + _ = yym75 if false { } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv2119), d) + h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv74), d) } } - yyj2099++ - if yyhl2099 { - yyb2099 = yyj2099 > l + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l } else { - yyb2099 = r.CheckBreak() + yyb44 = r.CheckBreak() } - if yyb2099 { + if yyb44 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28528,15 +35132,21 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Hostname = "" } else { - x.Hostname = string(r.DecodeString()) + yyv76 := &x.Hostname + yym77 := z.DecBinary() + _ = yym77 + if false { + } else { + *((*string)(yyv76)) = r.DecodeString() + } } - yyj2099++ - if yyhl2099 { - yyb2099 = yyj2099 > l + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l } else { - yyb2099 = r.CheckBreak() + yyb44 = r.CheckBreak() } - if yyb2099 { + if yyb44 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28544,20 +35154,91 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Subdomain = "" } else { - x.Subdomain = string(r.DecodeString()) + yyv78 := &x.Subdomain + yym79 := z.DecBinary() + _ = yym79 + if false { + } else { + *((*string)(yyv78)) = r.DecodeString() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Affinity != nil { + x.Affinity = nil + } + } else { + if x.Affinity == nil { + x.Affinity = new(Affinity) + } + x.Affinity.CodecDecodeSelf(d) + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SchedulerName = "" + } else { + yyv81 := &x.SchedulerName + yym82 := z.DecBinary() + _ = yym82 + if false { + } else { + *((*string)(yyv81)) = r.DecodeString() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Tolerations = nil + } else { + yyv83 := &x.Tolerations + yym84 := z.DecBinary() + _ = yym84 + if false { + } else { + h.decSliceToleration((*[]Toleration)(yyv83), d) + } } for { - yyj2099++ - if yyhl2099 { - yyb2099 = yyj2099 > l + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l } else { - yyb2099 = r.CheckBreak() + yyb44 = r.CheckBreak() } - if yyb2099 { + if yyb44 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2099-1, "") + z.DecStructFieldNotFound(yyj44-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -28569,37 +35250,37 @@ func (x *PodSecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2123 := z.EncBinary() - _ = yym2123 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2124 := !z.EncBinary() - yy2arr2124 := z.EncBasicHandle().StructToArray - var yyq2124 [5]bool - _, _, _ = yysep2124, yyq2124, yy2arr2124 - const yyr2124 bool = false - yyq2124[0] = x.SELinuxOptions != nil - yyq2124[1] = x.RunAsUser != nil - yyq2124[2] = x.RunAsNonRoot != nil - yyq2124[3] = len(x.SupplementalGroups) != 0 - yyq2124[4] = x.FSGroup != nil - var yynn2124 int - if yyr2124 || yy2arr2124 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.SELinuxOptions != nil + yyq2[1] = x.RunAsUser != nil + yyq2[2] = x.RunAsNonRoot != nil + yyq2[3] = len(x.SupplementalGroups) != 0 + yyq2[4] = x.FSGroup != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn2124 = 0 - for _, b := range yyq2124 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn2124++ + yynn2++ } } - r.EncodeMapStart(yynn2124) - yynn2124 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2124 || yy2arr2124 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2124[0] { + if yyq2[0] { if x.SELinuxOptions == nil { r.EncodeNil() } else { @@ -28609,7 +35290,7 @@ func (x *PodSecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2124[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -28620,84 +35301,84 @@ func (x *PodSecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2124 || yy2arr2124 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2124[1] { + if yyq2[1] { if x.RunAsUser == nil { r.EncodeNil() } else { - yy2127 := *x.RunAsUser - yym2128 := z.EncBinary() - _ = yym2128 + yy7 := *x.RunAsUser + yym8 := z.EncBinary() + _ = yym8 if false { } else { - r.EncodeInt(int64(yy2127)) + r.EncodeInt(int64(yy7)) } } } else { r.EncodeNil() } } else { - if yyq2124[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.RunAsUser == nil { r.EncodeNil() } else { - yy2129 := *x.RunAsUser - yym2130 := z.EncBinary() - _ = yym2130 + yy9 := *x.RunAsUser + yym10 := z.EncBinary() + _ = yym10 if false { } else { - r.EncodeInt(int64(yy2129)) + r.EncodeInt(int64(yy9)) } } } } - if yyr2124 || yy2arr2124 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2124[2] { + if yyq2[2] { if x.RunAsNonRoot == nil { r.EncodeNil() } else { - yy2132 := *x.RunAsNonRoot - yym2133 := z.EncBinary() - _ = yym2133 + yy12 := *x.RunAsNonRoot + yym13 := z.EncBinary() + _ = yym13 if false { } else { - r.EncodeBool(bool(yy2132)) + r.EncodeBool(bool(yy12)) } } } else { r.EncodeNil() } } else { - if yyq2124[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.RunAsNonRoot == nil { r.EncodeNil() } else { - yy2134 := *x.RunAsNonRoot - yym2135 := z.EncBinary() - _ = yym2135 + yy14 := *x.RunAsNonRoot + yym15 := z.EncBinary() + _ = yym15 if false { } else { - r.EncodeBool(bool(yy2134)) + r.EncodeBool(bool(yy14)) } } } } - if yyr2124 || yy2arr2124 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2124[3] { + if yyq2[3] { if x.SupplementalGroups == nil { r.EncodeNil() } else { - yym2137 := z.EncBinary() - _ = yym2137 + yym17 := z.EncBinary() + _ = yym17 if false { } else { z.F.EncSliceInt64V(x.SupplementalGroups, false, e) @@ -28707,15 +35388,15 @@ func (x *PodSecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2124[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.SupplementalGroups == nil { r.EncodeNil() } else { - yym2138 := z.EncBinary() - _ = yym2138 + yym18 := z.EncBinary() + _ = yym18 if false { } else { z.F.EncSliceInt64V(x.SupplementalGroups, false, e) @@ -28723,42 +35404,42 @@ func (x *PodSecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2124 || yy2arr2124 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2124[4] { + if yyq2[4] { if x.FSGroup == nil { r.EncodeNil() } else { - yy2140 := *x.FSGroup - yym2141 := z.EncBinary() - _ = yym2141 + yy20 := *x.FSGroup + yym21 := z.EncBinary() + _ = yym21 if false { } else { - r.EncodeInt(int64(yy2140)) + r.EncodeInt(int64(yy20)) } } } else { r.EncodeNil() } } else { - if yyq2124[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fsGroup")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.FSGroup == nil { r.EncodeNil() } else { - yy2142 := *x.FSGroup - yym2143 := z.EncBinary() - _ = yym2143 + yy22 := *x.FSGroup + yym23 := z.EncBinary() + _ = yym23 if false { } else { - r.EncodeInt(int64(yy2142)) + r.EncodeInt(int64(yy22)) } } } } - if yyr2124 || yy2arr2124 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -28771,25 +35452,25 @@ func (x *PodSecurityContext) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2144 := z.DecBinary() - _ = yym2144 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2145 := r.ContainerType() - if yyct2145 == codecSelferValueTypeMap1234 { - yyl2145 := r.ReadMapStart() - if yyl2145 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2145, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2145 == codecSelferValueTypeArray1234 { - yyl2145 := r.ReadArrayStart() - if yyl2145 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2145, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -28801,12 +35482,12 @@ func (x *PodSecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2146Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2146Slc - var yyhl2146 bool = l >= 0 - for yyj2146 := 0; ; yyj2146++ { - if yyhl2146 { - if yyj2146 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -28815,10 +35496,10 @@ func (x *PodSecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2146Slc = r.DecodeBytes(yys2146Slc, true, true) - yys2146 := string(yys2146Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2146 { + switch yys3 { case "seLinuxOptions": if r.TryDecodeAsNil() { if x.SELinuxOptions != nil { @@ -28839,8 +35520,8 @@ func (x *PodSecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) if x.RunAsUser == nil { x.RunAsUser = new(int64) } - yym2149 := z.DecBinary() - _ = yym2149 + yym6 := z.DecBinary() + _ = yym6 if false { } else { *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) @@ -28855,8 +35536,8 @@ func (x *PodSecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) if x.RunAsNonRoot == nil { x.RunAsNonRoot = new(bool) } - yym2151 := z.DecBinary() - _ = yym2151 + yym8 := z.DecBinary() + _ = yym8 if false { } else { *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() @@ -28866,12 +35547,12 @@ func (x *PodSecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.SupplementalGroups = nil } else { - yyv2152 := &x.SupplementalGroups - yym2153 := z.DecBinary() - _ = yym2153 + yyv9 := &x.SupplementalGroups + yym10 := z.DecBinary() + _ = yym10 if false { } else { - z.F.DecSliceInt64X(yyv2152, false, d) + z.F.DecSliceInt64X(yyv9, false, d) } } case "fsGroup": @@ -28883,17 +35564,17 @@ func (x *PodSecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) if x.FSGroup == nil { x.FSGroup = new(int64) } - yym2155 := z.DecBinary() - _ = yym2155 + yym12 := z.DecBinary() + _ = yym12 if false { } else { *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64)) } } default: - z.DecStructFieldNotFound(-1, yys2146) - } // end switch yys2146 - } // end for yyj2146 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -28901,16 +35582,16 @@ func (x *PodSecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2156 int - var yyb2156 bool - var yyhl2156 bool = l >= 0 - yyj2156++ - if yyhl2156 { - yyb2156 = yyj2156 > l + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb2156 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb2156 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28925,13 +35606,13 @@ func (x *PodSecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decode } x.SELinuxOptions.CodecDecodeSelf(d) } - yyj2156++ - if yyhl2156 { - yyb2156 = yyj2156 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb2156 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb2156 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28944,20 +35625,20 @@ func (x *PodSecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decode if x.RunAsUser == nil { x.RunAsUser = new(int64) } - yym2159 := z.DecBinary() - _ = yym2159 + yym16 := z.DecBinary() + _ = yym16 if false { } else { *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) } } - yyj2156++ - if yyhl2156 { - yyb2156 = yyj2156 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb2156 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb2156 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28970,20 +35651,20 @@ func (x *PodSecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decode if x.RunAsNonRoot == nil { x.RunAsNonRoot = new(bool) } - yym2161 := z.DecBinary() - _ = yym2161 + yym18 := z.DecBinary() + _ = yym18 if false { } else { *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() } } - yyj2156++ - if yyhl2156 { - yyb2156 = yyj2156 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb2156 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb2156 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28991,21 +35672,21 @@ func (x *PodSecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.SupplementalGroups = nil } else { - yyv2162 := &x.SupplementalGroups - yym2163 := z.DecBinary() - _ = yym2163 + yyv19 := &x.SupplementalGroups + yym20 := z.DecBinary() + _ = yym20 if false { } else { - z.F.DecSliceInt64X(yyv2162, false, d) + z.F.DecSliceInt64X(yyv19, false, d) } } - yyj2156++ - if yyhl2156 { - yyb2156 = yyj2156 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb2156 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb2156 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -29018,29 +35699,55 @@ func (x *PodSecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decode if x.FSGroup == nil { x.FSGroup = new(int64) } - yym2165 := z.DecBinary() - _ = yym2165 + yym22 := z.DecBinary() + _ = yym22 if false { } else { *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64)) } } for { - yyj2156++ - if yyhl2156 { - yyb2156 = yyj2156 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb2156 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb2156 { + if yyb13 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2156-1, "") + z.DecStructFieldNotFound(yyj13-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } +func (x PodQOSClass) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PodQOSClass) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) @@ -29048,60 +35755,62 @@ func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2166 := z.EncBinary() - _ = yym2166 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2167 := !z.EncBinary() - yy2arr2167 := z.EncBasicHandle().StructToArray - var yyq2167 [8]bool - _, _, _ = yysep2167, yyq2167, yy2arr2167 - const yyr2167 bool = false - yyq2167[0] = x.Phase != "" - yyq2167[1] = len(x.Conditions) != 0 - yyq2167[2] = x.Message != "" - yyq2167[3] = x.Reason != "" - yyq2167[4] = x.HostIP != "" - yyq2167[5] = x.PodIP != "" - yyq2167[6] = x.StartTime != nil - yyq2167[7] = len(x.ContainerStatuses) != 0 - var yynn2167 int - if yyr2167 || yy2arr2167 { - r.EncodeArrayStart(8) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Phase != "" + yyq2[1] = len(x.Conditions) != 0 + yyq2[2] = x.Message != "" + yyq2[3] = x.Reason != "" + yyq2[4] = x.HostIP != "" + yyq2[5] = x.PodIP != "" + yyq2[6] = x.StartTime != nil + yyq2[7] = len(x.InitContainerStatuses) != 0 + yyq2[8] = len(x.ContainerStatuses) != 0 + yyq2[9] = x.QOSClass != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) } else { - yynn2167 = 0 - for _, b := range yyq2167 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn2167++ + yynn2++ } } - r.EncodeMapStart(yynn2167) - yynn2167 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2167 || yy2arr2167 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2167[0] { + if yyq2[0] { x.Phase.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2167[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("phase")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Phase.CodecEncodeSelf(e) } } - if yyr2167 || yy2arr2167 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2167[1] { + if yyq2[1] { if x.Conditions == nil { r.EncodeNil() } else { - yym2170 := z.EncBinary() - _ = yym2170 + yym7 := z.EncBinary() + _ = yym7 if false { } else { h.encSlicePodCondition(([]PodCondition)(x.Conditions), e) @@ -29111,15 +35820,15 @@ func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2167[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("conditions")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Conditions == nil { r.EncodeNil() } else { - yym2171 := z.EncBinary() - _ = yym2171 + yym8 := z.EncBinary() + _ = yym8 if false { } else { h.encSlicePodCondition(([]PodCondition)(x.Conditions), e) @@ -29127,11 +35836,11 @@ func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2167 || yy2arr2167 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2167[2] { - yym2173 := z.EncBinary() - _ = yym2173 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) @@ -29140,23 +35849,23 @@ func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2167[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("message")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2174 := z.EncBinary() - _ = yym2174 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) } } } - if yyr2167 || yy2arr2167 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2167[3] { - yym2176 := z.EncBinary() - _ = yym2176 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) @@ -29165,23 +35874,23 @@ func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2167[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("reason")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2177 := z.EncBinary() - _ = yym2177 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) } } } - if yyr2167 || yy2arr2167 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2167[4] { - yym2179 := z.EncBinary() - _ = yym2179 + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) @@ -29190,23 +35899,23 @@ func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2167[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hostIP")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2180 := z.EncBinary() - _ = yym2180 + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) } } } - if yyr2167 || yy2arr2167 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2167[5] { - yym2182 := z.EncBinary() - _ = yym2182 + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PodIP)) @@ -29215,31 +35924,31 @@ func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2167[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("podIP")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2183 := z.EncBinary() - _ = yym2183 + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PodIP)) } } } - if yyr2167 || yy2arr2167 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2167[6] { + if yyq2[6] { if x.StartTime == nil { r.EncodeNil() } else { - yym2185 := z.EncBinary() - _ = yym2185 + yym22 := z.EncBinary() + _ = yym22 if false { } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym2185 { + } else if yym22 { z.EncBinaryMarshal(x.StartTime) - } else if !yym2185 && z.IsJSONHandle() { + } else if !yym22 && z.IsJSONHandle() { z.EncJSONMarshal(x.StartTime) } else { z.EncFallback(x.StartTime) @@ -29249,20 +35958,20 @@ func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2167[6] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("startTime")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.StartTime == nil { r.EncodeNil() } else { - yym2186 := z.EncBinary() - _ = yym2186 + yym23 := z.EncBinary() + _ = yym23 if false { } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym2186 { + } else if yym23 { z.EncBinaryMarshal(x.StartTime) - } else if !yym2186 && z.IsJSONHandle() { + } else if !yym23 && z.IsJSONHandle() { z.EncJSONMarshal(x.StartTime) } else { z.EncFallback(x.StartTime) @@ -29270,14 +35979,47 @@ func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2167 || yy2arr2167 { + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.InitContainerStatuses == nil { + r.EncodeNil() + } else { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + h.encSliceContainerStatus(([]ContainerStatus)(x.InitContainerStatuses), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("initContainerStatuses")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.InitContainerStatuses == nil { + r.EncodeNil() + } else { + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + h.encSliceContainerStatus(([]ContainerStatus)(x.InitContainerStatuses), e) + } + } + } + } + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2167[7] { + if yyq2[8] { if x.ContainerStatuses == nil { r.EncodeNil() } else { - yym2188 := z.EncBinary() - _ = yym2188 + yym28 := z.EncBinary() + _ = yym28 if false { } else { h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e) @@ -29287,15 +36029,15 @@ func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2167[7] { + if yyq2[8] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("containerStatuses")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.ContainerStatuses == nil { r.EncodeNil() } else { - yym2189 := z.EncBinary() - _ = yym2189 + yym29 := z.EncBinary() + _ = yym29 if false { } else { h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e) @@ -29303,7 +36045,22 @@ func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2167 || yy2arr2167 { + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + x.QOSClass.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("qosClass")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.QOSClass.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -29316,25 +36073,25 @@ func (x *PodStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2190 := z.DecBinary() - _ = yym2190 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2191 := r.ContainerType() - if yyct2191 == codecSelferValueTypeMap1234 { - yyl2191 := r.ReadMapStart() - if yyl2191 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2191, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2191 == codecSelferValueTypeArray1234 { - yyl2191 := r.ReadArrayStart() - if yyl2191 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2191, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -29346,12 +36103,12 @@ func (x *PodStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2192Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2192Slc - var yyhl2192 bool = l >= 0 - for yyj2192 := 0; ; yyj2192++ { - if yyhl2192 { - if yyj2192 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -29360,51 +36117,76 @@ func (x *PodStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2192Slc = r.DecodeBytes(yys2192Slc, true, true) - yys2192 := string(yys2192Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2192 { + switch yys3 { case "phase": if r.TryDecodeAsNil() { x.Phase = "" } else { - x.Phase = PodPhase(r.DecodeString()) + yyv4 := &x.Phase + yyv4.CodecDecodeSelf(d) } case "conditions": if r.TryDecodeAsNil() { x.Conditions = nil } else { - yyv2194 := &x.Conditions - yym2195 := z.DecBinary() - _ = yym2195 + yyv5 := &x.Conditions + yym6 := z.DecBinary() + _ = yym6 if false { } else { - h.decSlicePodCondition((*[]PodCondition)(yyv2194), d) + h.decSlicePodCondition((*[]PodCondition)(yyv5), d) } } case "message": if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv7 := &x.Message + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } } case "reason": if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv9 := &x.Reason + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } case "hostIP": if r.TryDecodeAsNil() { x.HostIP = "" } else { - x.HostIP = string(r.DecodeString()) + yyv11 := &x.HostIP + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } case "podIP": if r.TryDecodeAsNil() { x.PodIP = "" } else { - x.PodIP = string(r.DecodeString()) + yyv13 := &x.PodIP + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } case "startTime": if r.TryDecodeAsNil() { @@ -29413,36 +36195,55 @@ func (x *PodStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } else { if x.StartTime == nil { - x.StartTime = new(pkg2_unversioned.Time) + x.StartTime = new(pkg2_v1.Time) } - yym2201 := z.DecBinary() - _ = yym2201 + yym16 := z.DecBinary() + _ = yym16 if false { } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym2201 { + } else if yym16 { z.DecBinaryUnmarshal(x.StartTime) - } else if !yym2201 && z.IsJSONHandle() { + } else if !yym16 && z.IsJSONHandle() { z.DecJSONUnmarshal(x.StartTime) } else { z.DecFallback(x.StartTime, false) } } + case "initContainerStatuses": + if r.TryDecodeAsNil() { + x.InitContainerStatuses = nil + } else { + yyv17 := &x.InitContainerStatuses + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + h.decSliceContainerStatus((*[]ContainerStatus)(yyv17), d) + } + } case "containerStatuses": if r.TryDecodeAsNil() { x.ContainerStatuses = nil } else { - yyv2202 := &x.ContainerStatuses - yym2203 := z.DecBinary() - _ = yym2203 + yyv19 := &x.ContainerStatuses + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceContainerStatus((*[]ContainerStatus)(yyv2202), d) + h.decSliceContainerStatus((*[]ContainerStatus)(yyv19), d) } } + case "qosClass": + if r.TryDecodeAsNil() { + x.QOSClass = "" + } else { + yyv21 := &x.QOSClass + yyv21.CodecDecodeSelf(d) + } default: - z.DecStructFieldNotFound(-1, yys2192) - } // end switch yys2192 - } // end for yyj2192 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -29450,16 +36251,16 @@ func (x *PodStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2204 int - var yyb2204 bool - var yyhl2204 bool = l >= 0 - yyj2204++ - if yyhl2204 { - yyb2204 = yyj2204 > l + var yyj22 int + var yyb22 bool + var yyhl22 bool = l >= 0 + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2204 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2204 { + if yyb22 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -29467,15 +36268,16 @@ func (x *PodStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Phase = "" } else { - x.Phase = PodPhase(r.DecodeString()) + yyv23 := &x.Phase + yyv23.CodecDecodeSelf(d) } - yyj2204++ - if yyhl2204 { - yyb2204 = yyj2204 > l + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2204 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2204 { + if yyb22 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -29483,21 +36285,21 @@ func (x *PodStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Conditions = nil } else { - yyv2206 := &x.Conditions - yym2207 := z.DecBinary() - _ = yym2207 + yyv24 := &x.Conditions + yym25 := z.DecBinary() + _ = yym25 if false { } else { - h.decSlicePodCondition((*[]PodCondition)(yyv2206), d) + h.decSlicePodCondition((*[]PodCondition)(yyv24), d) } } - yyj2204++ - if yyhl2204 { - yyb2204 = yyj2204 > l + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2204 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2204 { + if yyb22 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -29505,15 +36307,21 @@ func (x *PodStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv26 := &x.Message + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } } - yyj2204++ - if yyhl2204 { - yyb2204 = yyj2204 > l + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2204 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2204 { + if yyb22 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -29521,15 +36329,21 @@ func (x *PodStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv28 := &x.Reason + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*string)(yyv28)) = r.DecodeString() + } } - yyj2204++ - if yyhl2204 { - yyb2204 = yyj2204 > l + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2204 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2204 { + if yyb22 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -29537,15 +36351,21 @@ func (x *PodStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.HostIP = "" } else { - x.HostIP = string(r.DecodeString()) + yyv30 := &x.HostIP + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*string)(yyv30)) = r.DecodeString() + } } - yyj2204++ - if yyhl2204 { - yyb2204 = yyj2204 > l + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2204 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2204 { + if yyb22 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -29553,15 +36373,21 @@ func (x *PodStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.PodIP = "" } else { - x.PodIP = string(r.DecodeString()) + yyv32 := &x.PodIP + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + *((*string)(yyv32)) = r.DecodeString() + } } - yyj2204++ - if yyhl2204 { - yyb2204 = yyj2204 > l + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2204 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2204 { + if yyb22 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -29572,27 +36398,49 @@ func (x *PodStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } } else { if x.StartTime == nil { - x.StartTime = new(pkg2_unversioned.Time) + x.StartTime = new(pkg2_v1.Time) } - yym2213 := z.DecBinary() - _ = yym2213 + yym35 := z.DecBinary() + _ = yym35 if false { } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym2213 { + } else if yym35 { z.DecBinaryUnmarshal(x.StartTime) - } else if !yym2213 && z.IsJSONHandle() { + } else if !yym35 && z.IsJSONHandle() { z.DecJSONUnmarshal(x.StartTime) } else { z.DecFallback(x.StartTime, false) } } - yyj2204++ - if yyhl2204 { - yyb2204 = yyj2204 > l + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2204 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2204 { + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.InitContainerStatuses = nil + } else { + yyv36 := &x.InitContainerStatuses + yym37 := z.DecBinary() + _ = yym37 + if false { + } else { + h.decSliceContainerStatus((*[]ContainerStatus)(yyv36), d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -29600,26 +36448,43 @@ func (x *PodStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ContainerStatuses = nil } else { - yyv2214 := &x.ContainerStatuses - yym2215 := z.DecBinary() - _ = yym2215 + yyv38 := &x.ContainerStatuses + yym39 := z.DecBinary() + _ = yym39 if false { } else { - h.decSliceContainerStatus((*[]ContainerStatus)(yyv2214), d) + h.decSliceContainerStatus((*[]ContainerStatus)(yyv38), d) } } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.QOSClass = "" + } else { + yyv40 := &x.QOSClass + yyv40.CodecDecodeSelf(d) + } for { - yyj2204++ - if yyhl2204 { - yyb2204 = yyj2204 > l + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2204 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2204 { + if yyb22 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2204-1, "") + z.DecStructFieldNotFound(yyj22-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -29631,38 +36496,38 @@ func (x *PodStatusResult) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2216 := z.EncBinary() - _ = yym2216 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2217 := !z.EncBinary() - yy2arr2217 := z.EncBasicHandle().StructToArray - var yyq2217 [4]bool - _, _, _ = yysep2217, yyq2217, yy2arr2217 - const yyr2217 bool = false - yyq2217[0] = x.Kind != "" - yyq2217[1] = x.APIVersion != "" - yyq2217[2] = true - yyq2217[3] = true - var yynn2217 int - if yyr2217 || yy2arr2217 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn2217 = 0 - for _, b := range yyq2217 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn2217++ + yynn2++ } } - r.EncodeMapStart(yynn2217) - yynn2217 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2217 || yy2arr2217 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2217[0] { - yym2219 := z.EncBinary() - _ = yym2219 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -29671,23 +36536,23 @@ func (x *PodStatusResult) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2217[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2220 := z.EncBinary() - _ = yym2220 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr2217 || yy2arr2217 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2217[1] { - yym2222 := z.EncBinary() - _ = yym2222 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -29696,53 +36561,65 @@ func (x *PodStatusResult) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2217[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2223 := z.EncBinary() - _ = yym2223 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr2217 || yy2arr2217 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2217[2] { - yy2225 := &x.ObjectMeta - yy2225.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq2217[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2226 := &x.ObjectMeta - yy2226.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr2217 || yy2arr2217 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2217[3] { - yy2228 := &x.Status - yy2228.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Status + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq2217[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2229 := &x.Status - yy2229.CodecEncodeSelf(e) + yy17 := &x.Status + yy17.CodecEncodeSelf(e) } } - if yyr2217 || yy2arr2217 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -29755,25 +36632,25 @@ func (x *PodStatusResult) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2230 := z.DecBinary() - _ = yym2230 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2231 := r.ContainerType() - if yyct2231 == codecSelferValueTypeMap1234 { - yyl2231 := r.ReadMapStart() - if yyl2231 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2231, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2231 == codecSelferValueTypeArray1234 { - yyl2231 := r.ReadArrayStart() - if yyl2231 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2231, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -29785,12 +36662,12 @@ func (x *PodStatusResult) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2232Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2232Slc - var yyhl2232 bool = l >= 0 - for yyj2232 := 0; ; yyj2232++ { - if yyhl2232 { - if yyj2232 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -29799,40 +36676,58 @@ func (x *PodStatusResult) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2232Slc = r.DecodeBytes(yys2232Slc, true, true) - yys2232 := string(yys2232Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2232 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv2235 := &x.ObjectMeta - yyv2235.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "status": if r.TryDecodeAsNil() { x.Status = PodStatus{} } else { - yyv2236 := &x.Status - yyv2236.CodecDecodeSelf(d) + yyv10 := &x.Status + yyv10.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys2232) - } // end switch yys2232 - } // end for yyj2232 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -29840,16 +36735,16 @@ func (x *PodStatusResult) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2237 int - var yyb2237 bool - var yyhl2237 bool = l >= 0 - yyj2237++ - if yyhl2237 { - yyb2237 = yyj2237 > l + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2237 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2237 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -29857,15 +36752,21 @@ func (x *PodStatusResult) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } - yyj2237++ - if yyhl2237 { - yyb2237 = yyj2237 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2237 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2237 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -29873,32 +36774,44 @@ func (x *PodStatusResult) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } - yyj2237++ - if yyhl2237 { - yyb2237 = yyj2237 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2237 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2237 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv2240 := &x.ObjectMeta - yyv2240.CodecDecodeSelf(d) + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } } - yyj2237++ - if yyhl2237 { - yyb2237 = yyj2237 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2237 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2237 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -29906,21 +36819,21 @@ func (x *PodStatusResult) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Status = PodStatus{} } else { - yyv2241 := &x.Status - yyv2241.CodecDecodeSelf(d) + yyv18 := &x.Status + yyv18.CodecDecodeSelf(d) } for { - yyj2237++ - if yyhl2237 { - yyb2237 = yyj2237 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2237 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2237 { + if yyb11 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2237-1, "") + z.DecStructFieldNotFound(yyj11-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -29932,39 +36845,39 @@ func (x *Pod) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2242 := z.EncBinary() - _ = yym2242 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2243 := !z.EncBinary() - yy2arr2243 := z.EncBasicHandle().StructToArray - var yyq2243 [5]bool - _, _, _ = yysep2243, yyq2243, yy2arr2243 - const yyr2243 bool = false - yyq2243[0] = x.Kind != "" - yyq2243[1] = x.APIVersion != "" - yyq2243[2] = true - yyq2243[3] = true - yyq2243[4] = true - var yynn2243 int - if yyr2243 || yy2arr2243 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn2243 = 0 - for _, b := range yyq2243 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn2243++ + yynn2++ } } - r.EncodeMapStart(yynn2243) - yynn2243 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2243 || yy2arr2243 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2243[0] { - yym2245 := z.EncBinary() - _ = yym2245 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -29973,23 +36886,23 @@ func (x *Pod) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2243[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2246 := z.EncBinary() - _ = yym2246 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr2243 || yy2arr2243 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2243[1] { - yym2248 := z.EncBinary() - _ = yym2248 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -29998,70 +36911,82 @@ func (x *Pod) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2243[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2249 := z.EncBinary() - _ = yym2249 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr2243 || yy2arr2243 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2243[2] { - yy2251 := &x.ObjectMeta - yy2251.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq2243[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2252 := &x.ObjectMeta - yy2252.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr2243 || yy2arr2243 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2243[3] { - yy2254 := &x.Spec - yy2254.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq2243[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2255 := &x.Spec - yy2255.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr2243 || yy2arr2243 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2243[4] { - yy2257 := &x.Status - yy2257.CodecEncodeSelf(e) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq2243[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2258 := &x.Status - yy2258.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } - if yyr2243 || yy2arr2243 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -30074,25 +36999,25 @@ func (x *Pod) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2259 := z.DecBinary() - _ = yym2259 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2260 := r.ContainerType() - if yyct2260 == codecSelferValueTypeMap1234 { - yyl2260 := r.ReadMapStart() - if yyl2260 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2260, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2260 == codecSelferValueTypeArray1234 { - yyl2260 := r.ReadArrayStart() - if yyl2260 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2260, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -30104,12 +37029,12 @@ func (x *Pod) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2261Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2261Slc - var yyhl2261 bool = l >= 0 - for yyj2261 := 0; ; yyj2261++ { - if yyhl2261 { - if yyj2261 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -30118,47 +37043,65 @@ func (x *Pod) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2261Slc = r.DecodeBytes(yys2261Slc, true, true) - yys2261 := string(yys2261Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2261 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv2264 := &x.ObjectMeta - yyv2264.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = PodSpec{} } else { - yyv2265 := &x.Spec - yyv2265.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = PodStatus{} } else { - yyv2266 := &x.Status - yyv2266.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys2261) - } // end switch yys2261 - } // end for yyj2261 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -30166,16 +37109,16 @@ func (x *Pod) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2267 int - var yyb2267 bool - var yyhl2267 bool = l >= 0 - yyj2267++ - if yyhl2267 { - yyb2267 = yyj2267 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2267 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2267 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -30183,15 +37126,21 @@ func (x *Pod) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj2267++ - if yyhl2267 { - yyb2267 = yyj2267 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2267 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2267 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -30199,32 +37148,44 @@ func (x *Pod) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj2267++ - if yyhl2267 { - yyb2267 = yyj2267 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2267 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2267 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv2270 := &x.ObjectMeta - yyv2270.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj2267++ - if yyhl2267 { - yyb2267 = yyj2267 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2267 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2267 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -30232,16 +37193,16 @@ func (x *Pod) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Spec = PodSpec{} } else { - yyv2271 := &x.Spec - yyv2271.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj2267++ - if yyhl2267 { - yyb2267 = yyj2267 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2267 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2267 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -30249,21 +37210,21 @@ func (x *Pod) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Status = PodStatus{} } else { - yyv2272 := &x.Status - yyv2272.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj2267++ - if yyhl2267 { - yyb2267 = yyj2267 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2267 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2267 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2267-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -30275,37 +37236,37 @@ func (x *PodList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2273 := z.EncBinary() - _ = yym2273 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2274 := !z.EncBinary() - yy2arr2274 := z.EncBasicHandle().StructToArray - var yyq2274 [4]bool - _, _, _ = yysep2274, yyq2274, yy2arr2274 - const yyr2274 bool = false - yyq2274[0] = x.Kind != "" - yyq2274[1] = x.APIVersion != "" - yyq2274[2] = true - var yynn2274 int - if yyr2274 || yy2arr2274 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn2274 = 1 - for _, b := range yyq2274 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn2274++ + yynn2++ } } - r.EncodeMapStart(yynn2274) - yynn2274 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2274 || yy2arr2274 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2274[0] { - yym2276 := z.EncBinary() - _ = yym2276 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -30314,23 +37275,23 @@ func (x *PodList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2274[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2277 := z.EncBinary() - _ = yym2277 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr2274 || yy2arr2274 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2274[1] { - yym2279 := z.EncBinary() - _ = yym2279 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -30339,54 +37300,54 @@ func (x *PodList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2274[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2280 := z.EncBinary() - _ = yym2280 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr2274 || yy2arr2274 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2274[2] { - yy2282 := &x.ListMeta - yym2283 := z.EncBinary() - _ = yym2283 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy2282) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy2282) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq2274[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2284 := &x.ListMeta - yym2285 := z.EncBinary() - _ = yym2285 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy2284) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy2284) + z.EncFallback(yy12) } } } - if yyr2274 || yy2arr2274 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym2287 := z.EncBinary() - _ = yym2287 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSlicePod(([]Pod)(x.Items), e) @@ -30399,15 +37360,15 @@ func (x *PodList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym2288 := z.EncBinary() - _ = yym2288 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSlicePod(([]Pod)(x.Items), e) } } } - if yyr2274 || yy2arr2274 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -30420,25 +37381,25 @@ func (x *PodList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2289 := z.DecBinary() - _ = yym2289 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2290 := r.ContainerType() - if yyct2290 == codecSelferValueTypeMap1234 { - yyl2290 := r.ReadMapStart() - if yyl2290 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2290, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2290 == codecSelferValueTypeArray1234 { - yyl2290 := r.ReadArrayStart() - if yyl2290 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2290, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -30450,12 +37411,12 @@ func (x *PodList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2291Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2291Slc - var yyhl2291 bool = l >= 0 - for yyj2291 := 0; ; yyj2291++ { - if yyhl2291 { - if yyj2291 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -30464,51 +37425,63 @@ func (x *PodList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2291Slc = r.DecodeBytes(yys2291Slc, true, true) - yys2291 := string(yys2291Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2291 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv2294 := &x.ListMeta - yym2295 := z.DecBinary() - _ = yym2295 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv2294) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv2294, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv2296 := &x.Items - yym2297 := z.DecBinary() - _ = yym2297 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSlicePod((*[]Pod)(yyv2296), d) + h.decSlicePod((*[]Pod)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys2291) - } // end switch yys2291 - } // end for yyj2291 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -30516,16 +37489,16 @@ func (x *PodList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2298 int - var yyb2298 bool - var yyhl2298 bool = l >= 0 - yyj2298++ - if yyhl2298 { - yyb2298 = yyj2298 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2298 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2298 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -30533,15 +37506,21 @@ func (x *PodList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj2298++ - if yyhl2298 { - yyb2298 = yyj2298 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2298 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2298 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -30549,38 +37528,44 @@ func (x *PodList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj2298++ - if yyhl2298 { - yyb2298 = yyj2298 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2298 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2298 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv2301 := &x.ListMeta - yym2302 := z.DecBinary() - _ = yym2302 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv2301) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv2301, false) + z.DecFallback(yyv17, false) } } - yyj2298++ - if yyhl2298 { - yyb2298 = yyj2298 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2298 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2298 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -30588,26 +37573,26 @@ func (x *PodList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Items = nil } else { - yyv2303 := &x.Items - yym2304 := z.DecBinary() - _ = yym2304 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSlicePod((*[]Pod)(yyv2303), d) + h.decSlicePod((*[]Pod)(yyv19), d) } } for { - yyj2298++ - if yyhl2298 { - yyb2298 = yyj2298 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2298 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2298 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2298-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -30619,66 +37604,78 @@ func (x *PodTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2305 := z.EncBinary() - _ = yym2305 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2306 := !z.EncBinary() - yy2arr2306 := z.EncBasicHandle().StructToArray - var yyq2306 [2]bool - _, _, _ = yysep2306, yyq2306, yy2arr2306 - const yyr2306 bool = false - yyq2306[0] = true - yyq2306[1] = true - var yynn2306 int - if yyr2306 || yy2arr2306 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn2306 = 0 - for _, b := range yyq2306 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn2306++ + yynn2++ } } - r.EncodeMapStart(yynn2306) - yynn2306 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2306 || yy2arr2306 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2306[0] { - yy2308 := &x.ObjectMeta - yy2308.CodecEncodeSelf(e) + if yyq2[0] { + yy4 := &x.ObjectMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } } else { r.EncodeNil() } } else { - if yyq2306[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2309 := &x.ObjectMeta - yy2309.CodecEncodeSelf(e) + yy6 := &x.ObjectMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } } } - if yyr2306 || yy2arr2306 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2306[1] { - yy2311 := &x.Spec - yy2311.CodecEncodeSelf(e) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq2306[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2312 := &x.Spec - yy2312.CodecEncodeSelf(e) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) } } - if yyr2306 || yy2arr2306 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -30691,25 +37688,25 @@ func (x *PodTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2313 := z.DecBinary() - _ = yym2313 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2314 := r.ContainerType() - if yyct2314 == codecSelferValueTypeMap1234 { - yyl2314 := r.ReadMapStart() - if yyl2314 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2314, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2314 == codecSelferValueTypeArray1234 { - yyl2314 := r.ReadArrayStart() - if yyl2314 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2314, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -30721,12 +37718,12 @@ func (x *PodTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2315Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2315Slc - var yyhl2315 bool = l >= 0 - for yyj2315 := 0; ; yyj2315++ { - if yyhl2315 { - if yyj2315 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -30735,28 +37732,34 @@ func (x *PodTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2315Slc = r.DecodeBytes(yys2315Slc, true, true) - yys2315 := string(yys2315Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2315 { + switch yys3 { case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv2316 := &x.ObjectMeta - yyv2316.CodecDecodeSelf(d) + yyv4 := &x.ObjectMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = PodSpec{} } else { - yyv2317 := &x.Spec - yyv2317.CodecDecodeSelf(d) + yyv6 := &x.Spec + yyv6.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys2315) - } // end switch yys2315 - } // end for yyj2315 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -30764,33 +37767,39 @@ func (x *PodTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2318 int - var yyb2318 bool - var yyhl2318 bool = l >= 0 - yyj2318++ - if yyhl2318 { - yyb2318 = yyj2318 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb2318 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb2318 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv2319 := &x.ObjectMeta - yyv2319.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } - yyj2318++ - if yyhl2318 { - yyb2318 = yyj2318 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb2318 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb2318 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -30798,21 +37807,21 @@ func (x *PodTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Spec = PodSpec{} } else { - yyv2320 := &x.Spec - yyv2320.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } for { - yyj2318++ - if yyhl2318 { - yyb2318 = yyj2318 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb2318 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb2318 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2318-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -30824,38 +37833,38 @@ func (x *PodTemplate) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2321 := z.EncBinary() - _ = yym2321 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2322 := !z.EncBinary() - yy2arr2322 := z.EncBasicHandle().StructToArray - var yyq2322 [4]bool - _, _, _ = yysep2322, yyq2322, yy2arr2322 - const yyr2322 bool = false - yyq2322[0] = x.Kind != "" - yyq2322[1] = x.APIVersion != "" - yyq2322[2] = true - yyq2322[3] = true - var yynn2322 int - if yyr2322 || yy2arr2322 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn2322 = 0 - for _, b := range yyq2322 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn2322++ + yynn2++ } } - r.EncodeMapStart(yynn2322) - yynn2322 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2322 || yy2arr2322 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2322[0] { - yym2324 := z.EncBinary() - _ = yym2324 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -30864,23 +37873,23 @@ func (x *PodTemplate) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2322[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2325 := z.EncBinary() - _ = yym2325 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr2322 || yy2arr2322 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2322[1] { - yym2327 := z.EncBinary() - _ = yym2327 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -30889,53 +37898,65 @@ func (x *PodTemplate) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2322[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2328 := z.EncBinary() - _ = yym2328 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr2322 || yy2arr2322 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2322[2] { - yy2330 := &x.ObjectMeta - yy2330.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq2322[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2331 := &x.ObjectMeta - yy2331.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr2322 || yy2arr2322 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2322[3] { - yy2333 := &x.Template - yy2333.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Template + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq2322[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("template")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2334 := &x.Template - yy2334.CodecEncodeSelf(e) + yy17 := &x.Template + yy17.CodecEncodeSelf(e) } } - if yyr2322 || yy2arr2322 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -30948,25 +37969,25 @@ func (x *PodTemplate) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2335 := z.DecBinary() - _ = yym2335 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2336 := r.ContainerType() - if yyct2336 == codecSelferValueTypeMap1234 { - yyl2336 := r.ReadMapStart() - if yyl2336 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2336, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2336 == codecSelferValueTypeArray1234 { - yyl2336 := r.ReadArrayStart() - if yyl2336 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2336, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -30978,12 +37999,12 @@ func (x *PodTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2337Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2337Slc - var yyhl2337 bool = l >= 0 - for yyj2337 := 0; ; yyj2337++ { - if yyhl2337 { - if yyj2337 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -30992,40 +38013,58 @@ func (x *PodTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2337Slc = r.DecodeBytes(yys2337Slc, true, true) - yys2337 := string(yys2337Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2337 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv2340 := &x.ObjectMeta - yyv2340.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "template": if r.TryDecodeAsNil() { x.Template = PodTemplateSpec{} } else { - yyv2341 := &x.Template - yyv2341.CodecDecodeSelf(d) + yyv10 := &x.Template + yyv10.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys2337) - } // end switch yys2337 - } // end for yyj2337 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -31033,16 +38072,16 @@ func (x *PodTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2342 int - var yyb2342 bool - var yyhl2342 bool = l >= 0 - yyj2342++ - if yyhl2342 { - yyb2342 = yyj2342 > l + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2342 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2342 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -31050,15 +38089,21 @@ func (x *PodTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } - yyj2342++ - if yyhl2342 { - yyb2342 = yyj2342 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2342 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2342 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -31066,32 +38111,44 @@ func (x *PodTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } - yyj2342++ - if yyhl2342 { - yyb2342 = yyj2342 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2342 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2342 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv2345 := &x.ObjectMeta - yyv2345.CodecDecodeSelf(d) + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } } - yyj2342++ - if yyhl2342 { - yyb2342 = yyj2342 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2342 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2342 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -31099,21 +38156,21 @@ func (x *PodTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Template = PodTemplateSpec{} } else { - yyv2346 := &x.Template - yyv2346.CodecDecodeSelf(d) + yyv18 := &x.Template + yyv18.CodecDecodeSelf(d) } for { - yyj2342++ - if yyhl2342 { - yyb2342 = yyj2342 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2342 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2342 { + if yyb11 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2342-1, "") + z.DecStructFieldNotFound(yyj11-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -31125,37 +38182,37 @@ func (x *PodTemplateList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2347 := z.EncBinary() - _ = yym2347 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2348 := !z.EncBinary() - yy2arr2348 := z.EncBasicHandle().StructToArray - var yyq2348 [4]bool - _, _, _ = yysep2348, yyq2348, yy2arr2348 - const yyr2348 bool = false - yyq2348[0] = x.Kind != "" - yyq2348[1] = x.APIVersion != "" - yyq2348[2] = true - var yynn2348 int - if yyr2348 || yy2arr2348 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn2348 = 1 - for _, b := range yyq2348 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn2348++ + yynn2++ } } - r.EncodeMapStart(yynn2348) - yynn2348 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2348 || yy2arr2348 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2348[0] { - yym2350 := z.EncBinary() - _ = yym2350 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -31164,23 +38221,23 @@ func (x *PodTemplateList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2348[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2351 := z.EncBinary() - _ = yym2351 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr2348 || yy2arr2348 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2348[1] { - yym2353 := z.EncBinary() - _ = yym2353 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -31189,54 +38246,54 @@ func (x *PodTemplateList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2348[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2354 := z.EncBinary() - _ = yym2354 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr2348 || yy2arr2348 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2348[2] { - yy2356 := &x.ListMeta - yym2357 := z.EncBinary() - _ = yym2357 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy2356) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy2356) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq2348[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2358 := &x.ListMeta - yym2359 := z.EncBinary() - _ = yym2359 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy2358) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy2358) + z.EncFallback(yy12) } } } - if yyr2348 || yy2arr2348 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym2361 := z.EncBinary() - _ = yym2361 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSlicePodTemplate(([]PodTemplate)(x.Items), e) @@ -31249,15 +38306,15 @@ func (x *PodTemplateList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym2362 := z.EncBinary() - _ = yym2362 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSlicePodTemplate(([]PodTemplate)(x.Items), e) } } } - if yyr2348 || yy2arr2348 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -31270,25 +38327,25 @@ func (x *PodTemplateList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2363 := z.DecBinary() - _ = yym2363 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2364 := r.ContainerType() - if yyct2364 == codecSelferValueTypeMap1234 { - yyl2364 := r.ReadMapStart() - if yyl2364 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2364, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2364 == codecSelferValueTypeArray1234 { - yyl2364 := r.ReadArrayStart() - if yyl2364 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2364, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -31300,12 +38357,12 @@ func (x *PodTemplateList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2365Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2365Slc - var yyhl2365 bool = l >= 0 - for yyj2365 := 0; ; yyj2365++ { - if yyhl2365 { - if yyj2365 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -31314,51 +38371,63 @@ func (x *PodTemplateList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2365Slc = r.DecodeBytes(yys2365Slc, true, true) - yys2365 := string(yys2365Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2365 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv2368 := &x.ListMeta - yym2369 := z.DecBinary() - _ = yym2369 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv2368) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv2368, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv2370 := &x.Items - yym2371 := z.DecBinary() - _ = yym2371 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSlicePodTemplate((*[]PodTemplate)(yyv2370), d) + h.decSlicePodTemplate((*[]PodTemplate)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys2365) - } // end switch yys2365 - } // end for yyj2365 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -31366,16 +38435,16 @@ func (x *PodTemplateList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2372 int - var yyb2372 bool - var yyhl2372 bool = l >= 0 - yyj2372++ - if yyhl2372 { - yyb2372 = yyj2372 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2372 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2372 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -31383,15 +38452,21 @@ func (x *PodTemplateList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj2372++ - if yyhl2372 { - yyb2372 = yyj2372 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2372 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2372 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -31399,38 +38474,44 @@ func (x *PodTemplateList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj2372++ - if yyhl2372 { - yyb2372 = yyj2372 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2372 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2372 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv2375 := &x.ListMeta - yym2376 := z.DecBinary() - _ = yym2376 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv2375) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv2375, false) + z.DecFallback(yyv17, false) } } - yyj2372++ - if yyhl2372 { - yyb2372 = yyj2372 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2372 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2372 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -31438,26 +38519,26 @@ func (x *PodTemplateList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Items = nil } else { - yyv2377 := &x.Items - yym2378 := z.DecBinary() - _ = yym2378 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSlicePodTemplate((*[]PodTemplate)(yyv2377), d) + h.decSlicePodTemplate((*[]PodTemplate)(yyv19), d) } } for { - yyj2372++ - if yyhl2372 { - yyb2372 = yyj2372 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2372 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2372 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2372-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -31469,73 +38550,73 @@ func (x *ReplicationControllerSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2379 := z.EncBinary() - _ = yym2379 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2380 := !z.EncBinary() - yy2arr2380 := z.EncBasicHandle().StructToArray - var yyq2380 [4]bool - _, _, _ = yysep2380, yyq2380, yy2arr2380 - const yyr2380 bool = false - yyq2380[0] = x.Replicas != nil - yyq2380[1] = x.MinReadySeconds != 0 - yyq2380[2] = len(x.Selector) != 0 - yyq2380[3] = x.Template != nil - var yynn2380 int - if yyr2380 || yy2arr2380 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != nil + yyq2[1] = x.MinReadySeconds != 0 + yyq2[2] = len(x.Selector) != 0 + yyq2[3] = x.Template != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn2380 = 0 - for _, b := range yyq2380 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn2380++ + yynn2++ } } - r.EncodeMapStart(yynn2380) - yynn2380 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2380 || yy2arr2380 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2380[0] { + if yyq2[0] { if x.Replicas == nil { r.EncodeNil() } else { - yy2382 := *x.Replicas - yym2383 := z.EncBinary() - _ = yym2383 + yy4 := *x.Replicas + yym5 := z.EncBinary() + _ = yym5 if false { } else { - r.EncodeInt(int64(yy2382)) + r.EncodeInt(int64(yy4)) } } } else { r.EncodeNil() } } else { - if yyq2380[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("replicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Replicas == nil { r.EncodeNil() } else { - yy2384 := *x.Replicas - yym2385 := z.EncBinary() - _ = yym2385 + yy6 := *x.Replicas + yym7 := z.EncBinary() + _ = yym7 if false { } else { - r.EncodeInt(int64(yy2384)) + r.EncodeInt(int64(yy6)) } } } } - if yyr2380 || yy2arr2380 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2380[1] { - yym2387 := z.EncBinary() - _ = yym2387 + if yyq2[1] { + yym9 := z.EncBinary() + _ = yym9 if false { } else { r.EncodeInt(int64(x.MinReadySeconds)) @@ -31544,26 +38625,26 @@ func (x *ReplicationControllerSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq2380[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2388 := z.EncBinary() - _ = yym2388 + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeInt(int64(x.MinReadySeconds)) } } } - if yyr2380 || yy2arr2380 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2380[2] { + if yyq2[2] { if x.Selector == nil { r.EncodeNil() } else { - yym2390 := z.EncBinary() - _ = yym2390 + yym12 := z.EncBinary() + _ = yym12 if false { } else { z.F.EncMapStringStringV(x.Selector, false, e) @@ -31573,15 +38654,15 @@ func (x *ReplicationControllerSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2380[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("selector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Selector == nil { r.EncodeNil() } else { - yym2391 := z.EncBinary() - _ = yym2391 + yym13 := z.EncBinary() + _ = yym13 if false { } else { z.F.EncMapStringStringV(x.Selector, false, e) @@ -31589,9 +38670,9 @@ func (x *ReplicationControllerSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2380 || yy2arr2380 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2380[3] { + if yyq2[3] { if x.Template == nil { r.EncodeNil() } else { @@ -31601,7 +38682,7 @@ func (x *ReplicationControllerSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2380[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("template")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -31612,7 +38693,7 @@ func (x *ReplicationControllerSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2380 || yy2arr2380 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -31625,25 +38706,25 @@ func (x *ReplicationControllerSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2393 := z.DecBinary() - _ = yym2393 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2394 := r.ContainerType() - if yyct2394 == codecSelferValueTypeMap1234 { - yyl2394 := r.ReadMapStart() - if yyl2394 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2394, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2394 == codecSelferValueTypeArray1234 { - yyl2394 := r.ReadArrayStart() - if yyl2394 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2394, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -31655,12 +38736,12 @@ func (x *ReplicationControllerSpec) codecDecodeSelfFromMap(l int, d *codec1978.D var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2395Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2395Slc - var yyhl2395 bool = l >= 0 - for yyj2395 := 0; ; yyj2395++ { - if yyhl2395 { - if yyj2395 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -31669,10 +38750,10 @@ func (x *ReplicationControllerSpec) codecDecodeSelfFromMap(l int, d *codec1978.D } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2395Slc = r.DecodeBytes(yys2395Slc, true, true) - yys2395 := string(yys2395Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2395 { + switch yys3 { case "replicas": if r.TryDecodeAsNil() { if x.Replicas != nil { @@ -31682,8 +38763,8 @@ func (x *ReplicationControllerSpec) codecDecodeSelfFromMap(l int, d *codec1978.D if x.Replicas == nil { x.Replicas = new(int32) } - yym2397 := z.DecBinary() - _ = yym2397 + yym5 := z.DecBinary() + _ = yym5 if false { } else { *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) @@ -31693,18 +38774,24 @@ func (x *ReplicationControllerSpec) codecDecodeSelfFromMap(l int, d *codec1978.D if r.TryDecodeAsNil() { x.MinReadySeconds = 0 } else { - x.MinReadySeconds = int32(r.DecodeInt(32)) + yyv6 := &x.MinReadySeconds + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } } case "selector": if r.TryDecodeAsNil() { x.Selector = nil } else { - yyv2399 := &x.Selector - yym2400 := z.DecBinary() - _ = yym2400 + yyv8 := &x.Selector + yym9 := z.DecBinary() + _ = yym9 if false { } else { - z.F.DecMapStringStringX(yyv2399, false, d) + z.F.DecMapStringStringX(yyv8, false, d) } } case "template": @@ -31719,9 +38806,9 @@ func (x *ReplicationControllerSpec) codecDecodeSelfFromMap(l int, d *codec1978.D x.Template.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys2395) - } // end switch yys2395 - } // end for yyj2395 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -31729,16 +38816,16 @@ func (x *ReplicationControllerSpec) codecDecodeSelfFromArray(l int, d *codec1978 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2402 int - var yyb2402 bool - var yyhl2402 bool = l >= 0 - yyj2402++ - if yyhl2402 { - yyb2402 = yyj2402 > l + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2402 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2402 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -31751,20 +38838,20 @@ func (x *ReplicationControllerSpec) codecDecodeSelfFromArray(l int, d *codec1978 if x.Replicas == nil { x.Replicas = new(int32) } - yym2404 := z.DecBinary() - _ = yym2404 + yym13 := z.DecBinary() + _ = yym13 if false { } else { *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) } } - yyj2402++ - if yyhl2402 { - yyb2402 = yyj2402 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2402 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2402 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -31772,15 +38859,21 @@ func (x *ReplicationControllerSpec) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.MinReadySeconds = 0 } else { - x.MinReadySeconds = int32(r.DecodeInt(32)) + yyv14 := &x.MinReadySeconds + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } } - yyj2402++ - if yyhl2402 { - yyb2402 = yyj2402 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2402 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2402 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -31788,21 +38881,21 @@ func (x *ReplicationControllerSpec) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.Selector = nil } else { - yyv2406 := &x.Selector - yym2407 := z.DecBinary() - _ = yym2407 + yyv16 := &x.Selector + yym17 := z.DecBinary() + _ = yym17 if false { } else { - z.F.DecMapStringStringX(yyv2406, false, d) + z.F.DecMapStringStringX(yyv16, false, d) } } - yyj2402++ - if yyhl2402 { - yyb2402 = yyj2402 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2402 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2402 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -31818,17 +38911,17 @@ func (x *ReplicationControllerSpec) codecDecodeSelfFromArray(l int, d *codec1978 x.Template.CodecDecodeSelf(d) } for { - yyj2402++ - if yyhl2402 { - yyb2402 = yyj2402 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2402 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2402 { + if yyb11 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2402-1, "") + z.DecStructFieldNotFound(yyj11-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -31840,38 +38933,38 @@ func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2409 := z.EncBinary() - _ = yym2409 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2410 := !z.EncBinary() - yy2arr2410 := z.EncBasicHandle().StructToArray - var yyq2410 [6]bool - _, _, _ = yysep2410, yyq2410, yy2arr2410 - const yyr2410 bool = false - yyq2410[1] = x.FullyLabeledReplicas != 0 - yyq2410[2] = x.ReadyReplicas != 0 - yyq2410[3] = x.AvailableReplicas != 0 - yyq2410[4] = x.ObservedGeneration != 0 - yyq2410[5] = len(x.Conditions) != 0 - var yynn2410 int - if yyr2410 || yy2arr2410 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FullyLabeledReplicas != 0 + yyq2[2] = x.ReadyReplicas != 0 + yyq2[3] = x.AvailableReplicas != 0 + yyq2[4] = x.ObservedGeneration != 0 + yyq2[5] = len(x.Conditions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(6) } else { - yynn2410 = 1 - for _, b := range yyq2410 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn2410++ + yynn2++ } } - r.EncodeMapStart(yynn2410) - yynn2410 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2410 || yy2arr2410 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2412 := z.EncBinary() - _ = yym2412 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeInt(int64(x.Replicas)) @@ -31880,18 +38973,18 @@ func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("replicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2413 := z.EncBinary() - _ = yym2413 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeInt(int64(x.Replicas)) } } - if yyr2410 || yy2arr2410 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2410[1] { - yym2415 := z.EncBinary() - _ = yym2415 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeInt(int64(x.FullyLabeledReplicas)) @@ -31900,23 +38993,23 @@ func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq2410[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2416 := z.EncBinary() - _ = yym2416 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeInt(int64(x.FullyLabeledReplicas)) } } } - if yyr2410 || yy2arr2410 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2410[2] { - yym2418 := z.EncBinary() - _ = yym2418 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeInt(int64(x.ReadyReplicas)) @@ -31925,23 +39018,23 @@ func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq2410[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("readyReplicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2419 := z.EncBinary() - _ = yym2419 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeInt(int64(x.ReadyReplicas)) } } } - if yyr2410 || yy2arr2410 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2410[3] { - yym2421 := z.EncBinary() - _ = yym2421 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeInt(int64(x.AvailableReplicas)) @@ -31950,23 +39043,23 @@ func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq2410[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2422 := z.EncBinary() - _ = yym2422 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeInt(int64(x.AvailableReplicas)) } } } - if yyr2410 || yy2arr2410 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2410[4] { - yym2424 := z.EncBinary() - _ = yym2424 + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeInt(int64(x.ObservedGeneration)) @@ -31975,26 +39068,26 @@ func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq2410[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2425 := z.EncBinary() - _ = yym2425 + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeInt(int64(x.ObservedGeneration)) } } } - if yyr2410 || yy2arr2410 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2410[5] { + if yyq2[5] { if x.Conditions == nil { r.EncodeNil() } else { - yym2427 := z.EncBinary() - _ = yym2427 + yym19 := z.EncBinary() + _ = yym19 if false { } else { h.encSliceReplicationControllerCondition(([]ReplicationControllerCondition)(x.Conditions), e) @@ -32004,15 +39097,15 @@ func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2410[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("conditions")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Conditions == nil { r.EncodeNil() } else { - yym2428 := z.EncBinary() - _ = yym2428 + yym20 := z.EncBinary() + _ = yym20 if false { } else { h.encSliceReplicationControllerCondition(([]ReplicationControllerCondition)(x.Conditions), e) @@ -32020,7 +39113,7 @@ func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2410 || yy2arr2410 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -32033,25 +39126,25 @@ func (x *ReplicationControllerStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2429 := z.DecBinary() - _ = yym2429 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2430 := r.ContainerType() - if yyct2430 == codecSelferValueTypeMap1234 { - yyl2430 := r.ReadMapStart() - if yyl2430 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2430, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2430 == codecSelferValueTypeArray1234 { - yyl2430 := r.ReadArrayStart() - if yyl2430 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2430, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -32063,12 +39156,12 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromMap(l int, d *codec1978 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2431Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2431Slc - var yyhl2431 bool = l >= 0 - for yyj2431 := 0; ; yyj2431++ { - if yyhl2431 { - if yyj2431 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -32077,56 +39170,86 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromMap(l int, d *codec1978 } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2431Slc = r.DecodeBytes(yys2431Slc, true, true) - yys2431 := string(yys2431Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2431 { + switch yys3 { case "replicas": if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int32(r.DecodeInt(32)) + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } } case "fullyLabeledReplicas": if r.TryDecodeAsNil() { x.FullyLabeledReplicas = 0 } else { - x.FullyLabeledReplicas = int32(r.DecodeInt(32)) + yyv6 := &x.FullyLabeledReplicas + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } } case "readyReplicas": if r.TryDecodeAsNil() { x.ReadyReplicas = 0 } else { - x.ReadyReplicas = int32(r.DecodeInt(32)) + yyv8 := &x.ReadyReplicas + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } } case "availableReplicas": if r.TryDecodeAsNil() { x.AvailableReplicas = 0 } else { - x.AvailableReplicas = int32(r.DecodeInt(32)) + yyv10 := &x.AvailableReplicas + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } } case "observedGeneration": if r.TryDecodeAsNil() { x.ObservedGeneration = 0 } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) + yyv12 := &x.ObservedGeneration + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int64)(yyv12)) = int64(r.DecodeInt(64)) + } } case "conditions": if r.TryDecodeAsNil() { x.Conditions = nil } else { - yyv2437 := &x.Conditions - yym2438 := z.DecBinary() - _ = yym2438 + yyv14 := &x.Conditions + yym15 := z.DecBinary() + _ = yym15 if false { } else { - h.decSliceReplicationControllerCondition((*[]ReplicationControllerCondition)(yyv2437), d) + h.decSliceReplicationControllerCondition((*[]ReplicationControllerCondition)(yyv14), d) } } default: - z.DecStructFieldNotFound(-1, yys2431) - } // end switch yys2431 - } // end for yyj2431 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -32134,16 +39257,16 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec19 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2439 int - var yyb2439 bool - var yyhl2439 bool = l >= 0 - yyj2439++ - if yyhl2439 { - yyb2439 = yyj2439 > l + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb2439 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb2439 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -32151,15 +39274,21 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int32(r.DecodeInt(32)) + yyv17 := &x.Replicas + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } } - yyj2439++ - if yyhl2439 { - yyb2439 = yyj2439 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb2439 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb2439 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -32167,15 +39296,21 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.FullyLabeledReplicas = 0 } else { - x.FullyLabeledReplicas = int32(r.DecodeInt(32)) + yyv19 := &x.FullyLabeledReplicas + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int32)(yyv19)) = int32(r.DecodeInt(32)) + } } - yyj2439++ - if yyhl2439 { - yyb2439 = yyj2439 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb2439 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb2439 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -32183,15 +39318,21 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.ReadyReplicas = 0 } else { - x.ReadyReplicas = int32(r.DecodeInt(32)) + yyv21 := &x.ReadyReplicas + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } } - yyj2439++ - if yyhl2439 { - yyb2439 = yyj2439 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb2439 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb2439 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -32199,15 +39340,21 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.AvailableReplicas = 0 } else { - x.AvailableReplicas = int32(r.DecodeInt(32)) + yyv23 := &x.AvailableReplicas + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } } - yyj2439++ - if yyhl2439 { - yyb2439 = yyj2439 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb2439 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb2439 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -32215,15 +39362,21 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.ObservedGeneration = 0 } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) + yyv25 := &x.ObservedGeneration + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int64)(yyv25)) = int64(r.DecodeInt(64)) + } } - yyj2439++ - if yyhl2439 { - yyb2439 = yyj2439 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb2439 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb2439 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -32231,26 +39384,26 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.Conditions = nil } else { - yyv2445 := &x.Conditions - yym2446 := z.DecBinary() - _ = yym2446 + yyv27 := &x.Conditions + yym28 := z.DecBinary() + _ = yym28 if false { } else { - h.decSliceReplicationControllerCondition((*[]ReplicationControllerCondition)(yyv2445), d) + h.decSliceReplicationControllerCondition((*[]ReplicationControllerCondition)(yyv27), d) } } for { - yyj2439++ - if yyhl2439 { - yyb2439 = yyj2439 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb2439 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb2439 { + if yyb16 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2439-1, "") + z.DecStructFieldNotFound(yyj16-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -32259,8 +39412,8 @@ func (x ReplicationControllerConditionType) CodecEncodeSelf(e *codec1978.Encoder var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym2447 := z.EncBinary() - _ = yym2447 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -32272,8 +39425,8 @@ func (x *ReplicationControllerConditionType) CodecDecodeSelf(d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2448 := z.DecBinary() - _ = yym2448 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -32288,33 +39441,33 @@ func (x *ReplicationControllerCondition) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2449 := z.EncBinary() - _ = yym2449 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2450 := !z.EncBinary() - yy2arr2450 := z.EncBasicHandle().StructToArray - var yyq2450 [5]bool - _, _, _ = yysep2450, yyq2450, yy2arr2450 - const yyr2450 bool = false - yyq2450[2] = true - yyq2450[3] = x.Reason != "" - yyq2450[4] = x.Message != "" - var yynn2450 int - if yyr2450 || yy2arr2450 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = x.Reason != "" + yyq2[4] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn2450 = 2 - for _, b := range yyq2450 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn2450++ + yynn2++ } } - r.EncodeMapStart(yynn2450) - yynn2450 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2450 || yy2arr2450 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) x.Type.CodecEncodeSelf(e) } else { @@ -32323,7 +39476,7 @@ func (x *ReplicationControllerCondition) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Type.CodecEncodeSelf(e) } - if yyr2450 || yy2arr2450 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) x.Status.CodecEncodeSelf(e) } else { @@ -32332,48 +39485,48 @@ func (x *ReplicationControllerCondition) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Status.CodecEncodeSelf(e) } - if yyr2450 || yy2arr2450 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2450[2] { - yy2454 := &x.LastTransitionTime - yym2455 := z.EncBinary() - _ = yym2455 + if yyq2[2] { + yy10 := &x.LastTransitionTime + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy2454) { - } else if yym2455 { - z.EncBinaryMarshal(yy2454) - } else if !yym2455 && z.IsJSONHandle() { - z.EncJSONMarshal(yy2454) + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) } else { - z.EncFallback(yy2454) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq2450[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2456 := &x.LastTransitionTime - yym2457 := z.EncBinary() - _ = yym2457 + yy12 := &x.LastTransitionTime + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy2456) { - } else if yym2457 { - z.EncBinaryMarshal(yy2456) - } else if !yym2457 && z.IsJSONHandle() { - z.EncJSONMarshal(yy2456) + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) } else { - z.EncFallback(yy2456) + z.EncFallback(yy12) } } } - if yyr2450 || yy2arr2450 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2450[3] { - yym2459 := z.EncBinary() - _ = yym2459 + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) @@ -32382,23 +39535,23 @@ func (x *ReplicationControllerCondition) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2450[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("reason")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2460 := z.EncBinary() - _ = yym2460 + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) } } } - if yyr2450 || yy2arr2450 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2450[4] { - yym2462 := z.EncBinary() - _ = yym2462 + if yyq2[4] { + yym18 := z.EncBinary() + _ = yym18 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) @@ -32407,19 +39560,19 @@ func (x *ReplicationControllerCondition) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2450[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("message")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2463 := z.EncBinary() - _ = yym2463 + yym19 := z.EncBinary() + _ = yym19 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) } } } - if yyr2450 || yy2arr2450 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -32432,25 +39585,25 @@ func (x *ReplicationControllerCondition) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2464 := z.DecBinary() - _ = yym2464 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2465 := r.ContainerType() - if yyct2465 == codecSelferValueTypeMap1234 { - yyl2465 := r.ReadMapStart() - if yyl2465 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2465, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2465 == codecSelferValueTypeArray1234 { - yyl2465 := r.ReadArrayStart() - if yyl2465 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2465, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -32462,12 +39615,12 @@ func (x *ReplicationControllerCondition) codecDecodeSelfFromMap(l int, d *codec1 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2466Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2466Slc - var yyhl2466 bool = l >= 0 - for yyj2466 := 0; ; yyj2466++ { - if yyhl2466 { - if yyj2466 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -32476,55 +39629,69 @@ func (x *ReplicationControllerCondition) codecDecodeSelfFromMap(l int, d *codec1 } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2466Slc = r.DecodeBytes(yys2466Slc, true, true) - yys2466 := string(yys2466Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2466 { + switch yys3 { case "type": if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = ReplicationControllerConditionType(r.DecodeString()) + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = "" } else { - x.Status = ConditionStatus(r.DecodeString()) + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) } case "lastTransitionTime": if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} + x.LastTransitionTime = pkg2_v1.Time{} } else { - yyv2469 := &x.LastTransitionTime - yym2470 := z.DecBinary() - _ = yym2470 + yyv6 := &x.LastTransitionTime + yym7 := z.DecBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.DecExt(yyv2469) { - } else if yym2470 { - z.DecBinaryUnmarshal(yyv2469) - } else if !yym2470 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv2469) + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) } else { - z.DecFallback(yyv2469, false) + z.DecFallback(yyv6, false) } } case "reason": if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv8 := &x.Reason + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } case "message": if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv10 := &x.Message + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys2466) - } // end switch yys2466 - } // end for yyj2466 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -32532,16 +39699,16 @@ func (x *ReplicationControllerCondition) codecDecodeSelfFromArray(l int, d *code var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2473 int - var yyb2473 bool - var yyhl2473 bool = l >= 0 - yyj2473++ - if yyhl2473 { - yyb2473 = yyj2473 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2473 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2473 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -32549,15 +39716,16 @@ func (x *ReplicationControllerCondition) codecDecodeSelfFromArray(l int, d *code if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = ReplicationControllerConditionType(r.DecodeString()) + yyv13 := &x.Type + yyv13.CodecDecodeSelf(d) } - yyj2473++ - if yyhl2473 { - yyb2473 = yyj2473 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2473 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2473 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -32565,42 +39733,43 @@ func (x *ReplicationControllerCondition) codecDecodeSelfFromArray(l int, d *code if r.TryDecodeAsNil() { x.Status = "" } else { - x.Status = ConditionStatus(r.DecodeString()) + yyv14 := &x.Status + yyv14.CodecDecodeSelf(d) } - yyj2473++ - if yyhl2473 { - yyb2473 = yyj2473 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2473 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2473 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} + x.LastTransitionTime = pkg2_v1.Time{} } else { - yyv2476 := &x.LastTransitionTime - yym2477 := z.DecBinary() - _ = yym2477 + yyv15 := &x.LastTransitionTime + yym16 := z.DecBinary() + _ = yym16 if false { - } else if z.HasExtensions() && z.DecExt(yyv2476) { - } else if yym2477 { - z.DecBinaryUnmarshal(yyv2476) - } else if !yym2477 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv2476) + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else if yym16 { + z.DecBinaryUnmarshal(yyv15) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv15) } else { - z.DecFallback(yyv2476, false) + z.DecFallback(yyv15, false) } } - yyj2473++ - if yyhl2473 { - yyb2473 = yyj2473 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2473 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2473 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -32608,15 +39777,21 @@ func (x *ReplicationControllerCondition) codecDecodeSelfFromArray(l int, d *code if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv17 := &x.Reason + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } } - yyj2473++ - if yyhl2473 { - yyb2473 = yyj2473 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2473 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2473 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -32624,20 +39799,26 @@ func (x *ReplicationControllerCondition) codecDecodeSelfFromArray(l int, d *code if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv19 := &x.Message + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } } for { - yyj2473++ - if yyhl2473 { - yyb2473 = yyj2473 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2473 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2473 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2473-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -32649,39 +39830,39 @@ func (x *ReplicationController) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2480 := z.EncBinary() - _ = yym2480 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2481 := !z.EncBinary() - yy2arr2481 := z.EncBasicHandle().StructToArray - var yyq2481 [5]bool - _, _, _ = yysep2481, yyq2481, yy2arr2481 - const yyr2481 bool = false - yyq2481[0] = x.Kind != "" - yyq2481[1] = x.APIVersion != "" - yyq2481[2] = true - yyq2481[3] = true - yyq2481[4] = true - var yynn2481 int - if yyr2481 || yy2arr2481 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn2481 = 0 - for _, b := range yyq2481 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn2481++ + yynn2++ } } - r.EncodeMapStart(yynn2481) - yynn2481 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2481 || yy2arr2481 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2481[0] { - yym2483 := z.EncBinary() - _ = yym2483 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -32690,23 +39871,23 @@ func (x *ReplicationController) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2481[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2484 := z.EncBinary() - _ = yym2484 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr2481 || yy2arr2481 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2481[1] { - yym2486 := z.EncBinary() - _ = yym2486 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -32715,70 +39896,82 @@ func (x *ReplicationController) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2481[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2487 := z.EncBinary() - _ = yym2487 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr2481 || yy2arr2481 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2481[2] { - yy2489 := &x.ObjectMeta - yy2489.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq2481[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2490 := &x.ObjectMeta - yy2490.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr2481 || yy2arr2481 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2481[3] { - yy2492 := &x.Spec - yy2492.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq2481[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2493 := &x.Spec - yy2493.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr2481 || yy2arr2481 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2481[4] { - yy2495 := &x.Status - yy2495.CodecEncodeSelf(e) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq2481[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2496 := &x.Status - yy2496.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } - if yyr2481 || yy2arr2481 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -32791,25 +39984,25 @@ func (x *ReplicationController) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2497 := z.DecBinary() - _ = yym2497 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2498 := r.ContainerType() - if yyct2498 == codecSelferValueTypeMap1234 { - yyl2498 := r.ReadMapStart() - if yyl2498 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2498, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2498 == codecSelferValueTypeArray1234 { - yyl2498 := r.ReadArrayStart() - if yyl2498 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2498, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -32821,12 +40014,12 @@ func (x *ReplicationController) codecDecodeSelfFromMap(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2499Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2499Slc - var yyhl2499 bool = l >= 0 - for yyj2499 := 0; ; yyj2499++ { - if yyhl2499 { - if yyj2499 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -32835,47 +40028,65 @@ func (x *ReplicationController) codecDecodeSelfFromMap(l int, d *codec1978.Decod } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2499Slc = r.DecodeBytes(yys2499Slc, true, true) - yys2499 := string(yys2499Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2499 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv2502 := &x.ObjectMeta - yyv2502.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = ReplicationControllerSpec{} } else { - yyv2503 := &x.Spec - yyv2503.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = ReplicationControllerStatus{} } else { - yyv2504 := &x.Status - yyv2504.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys2499) - } // end switch yys2499 - } // end for yyj2499 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -32883,16 +40094,16 @@ func (x *ReplicationController) codecDecodeSelfFromArray(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2505 int - var yyb2505 bool - var yyhl2505 bool = l >= 0 - yyj2505++ - if yyhl2505 { - yyb2505 = yyj2505 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2505 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2505 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -32900,15 +40111,21 @@ func (x *ReplicationController) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj2505++ - if yyhl2505 { - yyb2505 = yyj2505 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2505 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2505 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -32916,32 +40133,44 @@ func (x *ReplicationController) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj2505++ - if yyhl2505 { - yyb2505 = yyj2505 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2505 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2505 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv2508 := &x.ObjectMeta - yyv2508.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj2505++ - if yyhl2505 { - yyb2505 = yyj2505 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2505 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2505 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -32949,16 +40178,16 @@ func (x *ReplicationController) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.Spec = ReplicationControllerSpec{} } else { - yyv2509 := &x.Spec - yyv2509.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj2505++ - if yyhl2505 { - yyb2505 = yyj2505 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2505 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2505 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -32966,21 +40195,21 @@ func (x *ReplicationController) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.Status = ReplicationControllerStatus{} } else { - yyv2510 := &x.Status - yyv2510.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj2505++ - if yyhl2505 { - yyb2505 = yyj2505 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2505 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2505 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2505-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -32992,37 +40221,37 @@ func (x *ReplicationControllerList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2511 := z.EncBinary() - _ = yym2511 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2512 := !z.EncBinary() - yy2arr2512 := z.EncBasicHandle().StructToArray - var yyq2512 [4]bool - _, _, _ = yysep2512, yyq2512, yy2arr2512 - const yyr2512 bool = false - yyq2512[0] = x.Kind != "" - yyq2512[1] = x.APIVersion != "" - yyq2512[2] = true - var yynn2512 int - if yyr2512 || yy2arr2512 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn2512 = 1 - for _, b := range yyq2512 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn2512++ + yynn2++ } } - r.EncodeMapStart(yynn2512) - yynn2512 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2512 || yy2arr2512 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2512[0] { - yym2514 := z.EncBinary() - _ = yym2514 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -33031,23 +40260,23 @@ func (x *ReplicationControllerList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2512[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2515 := z.EncBinary() - _ = yym2515 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr2512 || yy2arr2512 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2512[1] { - yym2517 := z.EncBinary() - _ = yym2517 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -33056,54 +40285,54 @@ func (x *ReplicationControllerList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2512[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2518 := z.EncBinary() - _ = yym2518 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr2512 || yy2arr2512 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2512[2] { - yy2520 := &x.ListMeta - yym2521 := z.EncBinary() - _ = yym2521 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy2520) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy2520) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq2512[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2522 := &x.ListMeta - yym2523 := z.EncBinary() - _ = yym2523 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy2522) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy2522) + z.EncFallback(yy12) } } } - if yyr2512 || yy2arr2512 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym2525 := z.EncBinary() - _ = yym2525 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceReplicationController(([]ReplicationController)(x.Items), e) @@ -33116,15 +40345,15 @@ func (x *ReplicationControllerList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym2526 := z.EncBinary() - _ = yym2526 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceReplicationController(([]ReplicationController)(x.Items), e) } } } - if yyr2512 || yy2arr2512 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -33137,25 +40366,25 @@ func (x *ReplicationControllerList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2527 := z.DecBinary() - _ = yym2527 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2528 := r.ContainerType() - if yyct2528 == codecSelferValueTypeMap1234 { - yyl2528 := r.ReadMapStart() - if yyl2528 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2528, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2528 == codecSelferValueTypeArray1234 { - yyl2528 := r.ReadArrayStart() - if yyl2528 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2528, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -33167,12 +40396,12 @@ func (x *ReplicationControllerList) codecDecodeSelfFromMap(l int, d *codec1978.D var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2529Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2529Slc - var yyhl2529 bool = l >= 0 - for yyj2529 := 0; ; yyj2529++ { - if yyhl2529 { - if yyj2529 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -33181,51 +40410,63 @@ func (x *ReplicationControllerList) codecDecodeSelfFromMap(l int, d *codec1978.D } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2529Slc = r.DecodeBytes(yys2529Slc, true, true) - yys2529 := string(yys2529Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2529 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv2532 := &x.ListMeta - yym2533 := z.DecBinary() - _ = yym2533 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv2532) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv2532, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv2534 := &x.Items - yym2535 := z.DecBinary() - _ = yym2535 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceReplicationController((*[]ReplicationController)(yyv2534), d) + h.decSliceReplicationController((*[]ReplicationController)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys2529) - } // end switch yys2529 - } // end for yyj2529 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -33233,16 +40474,16 @@ func (x *ReplicationControllerList) codecDecodeSelfFromArray(l int, d *codec1978 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2536 int - var yyb2536 bool - var yyhl2536 bool = l >= 0 - yyj2536++ - if yyhl2536 { - yyb2536 = yyj2536 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2536 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2536 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33250,15 +40491,21 @@ func (x *ReplicationControllerList) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj2536++ - if yyhl2536 { - yyb2536 = yyj2536 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2536 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2536 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33266,38 +40513,44 @@ func (x *ReplicationControllerList) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj2536++ - if yyhl2536 { - yyb2536 = yyj2536 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2536 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2536 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv2539 := &x.ListMeta - yym2540 := z.DecBinary() - _ = yym2540 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv2539) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv2539, false) + z.DecFallback(yyv17, false) } } - yyj2536++ - if yyhl2536 { - yyb2536 = yyj2536 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2536 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2536 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33305,26 +40558,26 @@ func (x *ReplicationControllerList) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.Items = nil } else { - yyv2541 := &x.Items - yym2542 := z.DecBinary() - _ = yym2542 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceReplicationController((*[]ReplicationController)(yyv2541), d) + h.decSliceReplicationController((*[]ReplicationController)(yyv19), d) } } for { - yyj2536++ - if yyhl2536 { - yyb2536 = yyj2536 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2536 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2536 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2536-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -33333,8 +40586,8 @@ func (x ServiceAffinity) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym2543 := z.EncBinary() - _ = yym2543 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -33346,8 +40599,8 @@ func (x *ServiceAffinity) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2544 := z.DecBinary() - _ = yym2544 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -33359,8 +40612,8 @@ func (x ServiceType) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym2545 := z.EncBinary() - _ = yym2545 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -33372,8 +40625,8 @@ func (x *ServiceType) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2546 := z.DecBinary() - _ = yym2546 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -33388,48 +40641,48 @@ func (x *ServiceStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2547 := z.EncBinary() - _ = yym2547 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2548 := !z.EncBinary() - yy2arr2548 := z.EncBasicHandle().StructToArray - var yyq2548 [1]bool - _, _, _ = yysep2548, yyq2548, yy2arr2548 - const yyr2548 bool = false - yyq2548[0] = true - var yynn2548 int - if yyr2548 || yy2arr2548 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn2548 = 0 - for _, b := range yyq2548 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn2548++ + yynn2++ } } - r.EncodeMapStart(yynn2548) - yynn2548 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2548 || yy2arr2548 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2548[0] { - yy2550 := &x.LoadBalancer - yy2550.CodecEncodeSelf(e) + if yyq2[0] { + yy4 := &x.LoadBalancer + yy4.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq2548[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("loadBalancer")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2551 := &x.LoadBalancer - yy2551.CodecEncodeSelf(e) + yy6 := &x.LoadBalancer + yy6.CodecEncodeSelf(e) } } - if yyr2548 || yy2arr2548 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -33442,25 +40695,25 @@ func (x *ServiceStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2552 := z.DecBinary() - _ = yym2552 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2553 := r.ContainerType() - if yyct2553 == codecSelferValueTypeMap1234 { - yyl2553 := r.ReadMapStart() - if yyl2553 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2553, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2553 == codecSelferValueTypeArray1234 { - yyl2553 := r.ReadArrayStart() - if yyl2553 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2553, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -33472,12 +40725,12 @@ func (x *ServiceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2554Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2554Slc - var yyhl2554 bool = l >= 0 - for yyj2554 := 0; ; yyj2554++ { - if yyhl2554 { - if yyj2554 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -33486,21 +40739,21 @@ func (x *ServiceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2554Slc = r.DecodeBytes(yys2554Slc, true, true) - yys2554 := string(yys2554Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2554 { + switch yys3 { case "loadBalancer": if r.TryDecodeAsNil() { x.LoadBalancer = LoadBalancerStatus{} } else { - yyv2555 := &x.LoadBalancer - yyv2555.CodecDecodeSelf(d) + yyv4 := &x.LoadBalancer + yyv4.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys2554) - } // end switch yys2554 - } // end for yyj2554 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -33508,16 +40761,16 @@ func (x *ServiceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2556 int - var yyb2556 bool - var yyhl2556 bool = l >= 0 - yyj2556++ - if yyhl2556 { - yyb2556 = yyj2556 > l + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb2556 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb2556 { + if yyb5 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33525,21 +40778,21 @@ func (x *ServiceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.LoadBalancer = LoadBalancerStatus{} } else { - yyv2557 := &x.LoadBalancer - yyv2557.CodecDecodeSelf(d) + yyv6 := &x.LoadBalancer + yyv6.CodecDecodeSelf(d) } for { - yyj2556++ - if yyhl2556 { - yyb2556 = yyj2556 > l + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb2556 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb2556 { + if yyb5 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2556-1, "") + z.DecStructFieldNotFound(yyj5-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -33551,38 +40804,38 @@ func (x *LoadBalancerStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2558 := z.EncBinary() - _ = yym2558 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2559 := !z.EncBinary() - yy2arr2559 := z.EncBasicHandle().StructToArray - var yyq2559 [1]bool - _, _, _ = yysep2559, yyq2559, yy2arr2559 - const yyr2559 bool = false - yyq2559[0] = len(x.Ingress) != 0 - var yynn2559 int - if yyr2559 || yy2arr2559 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Ingress) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn2559 = 0 - for _, b := range yyq2559 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn2559++ + yynn2++ } } - r.EncodeMapStart(yynn2559) - yynn2559 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2559 || yy2arr2559 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2559[0] { + if yyq2[0] { if x.Ingress == nil { r.EncodeNil() } else { - yym2561 := z.EncBinary() - _ = yym2561 + yym4 := z.EncBinary() + _ = yym4 if false { } else { h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e) @@ -33592,15 +40845,15 @@ func (x *LoadBalancerStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2559[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("ingress")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Ingress == nil { r.EncodeNil() } else { - yym2562 := z.EncBinary() - _ = yym2562 + yym5 := z.EncBinary() + _ = yym5 if false { } else { h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e) @@ -33608,7 +40861,7 @@ func (x *LoadBalancerStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2559 || yy2arr2559 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -33621,25 +40874,25 @@ func (x *LoadBalancerStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2563 := z.DecBinary() - _ = yym2563 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2564 := r.ContainerType() - if yyct2564 == codecSelferValueTypeMap1234 { - yyl2564 := r.ReadMapStart() - if yyl2564 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2564, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2564 == codecSelferValueTypeArray1234 { - yyl2564 := r.ReadArrayStart() - if yyl2564 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2564, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -33651,12 +40904,12 @@ func (x *LoadBalancerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2565Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2565Slc - var yyhl2565 bool = l >= 0 - for yyj2565 := 0; ; yyj2565++ { - if yyhl2565 { - if yyj2565 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -33665,26 +40918,26 @@ func (x *LoadBalancerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2565Slc = r.DecodeBytes(yys2565Slc, true, true) - yys2565 := string(yys2565Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2565 { + switch yys3 { case "ingress": if r.TryDecodeAsNil() { x.Ingress = nil } else { - yyv2566 := &x.Ingress - yym2567 := z.DecBinary() - _ = yym2567 + yyv4 := &x.Ingress + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv2566), d) + h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv4), d) } } default: - z.DecStructFieldNotFound(-1, yys2565) - } // end switch yys2565 - } // end for yyj2565 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -33692,16 +40945,16 @@ func (x *LoadBalancerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2568 int - var yyb2568 bool - var yyhl2568 bool = l >= 0 - yyj2568++ - if yyhl2568 { - yyb2568 = yyj2568 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb2568 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb2568 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33709,26 +40962,26 @@ func (x *LoadBalancerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Ingress = nil } else { - yyv2569 := &x.Ingress - yym2570 := z.DecBinary() - _ = yym2570 + yyv7 := &x.Ingress + yym8 := z.DecBinary() + _ = yym8 if false { } else { - h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv2569), d) + h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv7), d) } } for { - yyj2568++ - if yyhl2568 { - yyb2568 = yyj2568 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb2568 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb2568 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2568-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -33740,36 +40993,36 @@ func (x *LoadBalancerIngress) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2571 := z.EncBinary() - _ = yym2571 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2572 := !z.EncBinary() - yy2arr2572 := z.EncBasicHandle().StructToArray - var yyq2572 [2]bool - _, _, _ = yysep2572, yyq2572, yy2arr2572 - const yyr2572 bool = false - yyq2572[0] = x.IP != "" - yyq2572[1] = x.Hostname != "" - var yynn2572 int - if yyr2572 || yy2arr2572 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.IP != "" + yyq2[1] = x.Hostname != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn2572 = 0 - for _, b := range yyq2572 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn2572++ + yynn2++ } } - r.EncodeMapStart(yynn2572) - yynn2572 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2572 || yy2arr2572 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2572[0] { - yym2574 := z.EncBinary() - _ = yym2574 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.IP)) @@ -33778,23 +41031,23 @@ func (x *LoadBalancerIngress) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2572[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("ip")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2575 := z.EncBinary() - _ = yym2575 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.IP)) } } } - if yyr2572 || yy2arr2572 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2572[1] { - yym2577 := z.EncBinary() - _ = yym2577 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) @@ -33803,19 +41056,19 @@ func (x *LoadBalancerIngress) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2572[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hostname")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2578 := z.EncBinary() - _ = yym2578 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) } } } - if yyr2572 || yy2arr2572 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -33828,25 +41081,25 @@ func (x *LoadBalancerIngress) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2579 := z.DecBinary() - _ = yym2579 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2580 := r.ContainerType() - if yyct2580 == codecSelferValueTypeMap1234 { - yyl2580 := r.ReadMapStart() - if yyl2580 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2580, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2580 == codecSelferValueTypeArray1234 { - yyl2580 := r.ReadArrayStart() - if yyl2580 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2580, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -33858,12 +41111,12 @@ func (x *LoadBalancerIngress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2581Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2581Slc - var yyhl2581 bool = l >= 0 - for yyj2581 := 0; ; yyj2581++ { - if yyhl2581 { - if yyj2581 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -33872,26 +41125,38 @@ func (x *LoadBalancerIngress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2581Slc = r.DecodeBytes(yys2581Slc, true, true) - yys2581 := string(yys2581Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2581 { + switch yys3 { case "ip": if r.TryDecodeAsNil() { x.IP = "" } else { - x.IP = string(r.DecodeString()) + yyv4 := &x.IP + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "hostname": if r.TryDecodeAsNil() { x.Hostname = "" } else { - x.Hostname = string(r.DecodeString()) + yyv6 := &x.Hostname + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys2581) - } // end switch yys2581 - } // end for yyj2581 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -33899,16 +41164,16 @@ func (x *LoadBalancerIngress) codecDecodeSelfFromArray(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2584 int - var yyb2584 bool - var yyhl2584 bool = l >= 0 - yyj2584++ - if yyhl2584 { - yyb2584 = yyj2584 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb2584 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb2584 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33916,15 +41181,21 @@ func (x *LoadBalancerIngress) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.IP = "" } else { - x.IP = string(r.DecodeString()) + yyv9 := &x.IP + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } - yyj2584++ - if yyhl2584 { - yyb2584 = yyj2584 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb2584 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb2584 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33932,20 +41203,26 @@ func (x *LoadBalancerIngress) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Hostname = "" } else { - x.Hostname = string(r.DecodeString()) + yyv11 := &x.Hostname + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } for { - yyj2584++ - if yyhl2584 { - yyb2584 = yyj2584 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb2584 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb2584 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2584-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -33957,73 +41234,80 @@ func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2587 := z.EncBinary() - _ = yym2587 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2588 := !z.EncBinary() - yy2arr2588 := z.EncBasicHandle().StructToArray - var yyq2588 [10]bool - _, _, _ = yysep2588, yyq2588, yy2arr2588 - const yyr2588 bool = false - yyq2588[1] = len(x.Selector) != 0 - yyq2588[2] = x.ClusterIP != "" - yyq2588[3] = x.Type != "" - yyq2588[4] = len(x.ExternalIPs) != 0 - yyq2588[5] = len(x.DeprecatedPublicIPs) != 0 - yyq2588[6] = x.SessionAffinity != "" - yyq2588[7] = x.LoadBalancerIP != "" - yyq2588[8] = len(x.LoadBalancerSourceRanges) != 0 - yyq2588[9] = x.ExternalName != "" - var yynn2588 int - if yyr2588 || yy2arr2588 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Ports) != 0 + yyq2[1] = len(x.Selector) != 0 + yyq2[2] = x.ClusterIP != "" + yyq2[3] = x.Type != "" + yyq2[4] = len(x.ExternalIPs) != 0 + yyq2[5] = len(x.DeprecatedPublicIPs) != 0 + yyq2[6] = x.SessionAffinity != "" + yyq2[7] = x.LoadBalancerIP != "" + yyq2[8] = len(x.LoadBalancerSourceRanges) != 0 + yyq2[9] = x.ExternalName != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(10) } else { - yynn2588 = 1 - for _, b := range yyq2588 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn2588++ + yynn2++ } } - r.EncodeMapStart(yynn2588) - yynn2588 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2588 || yy2arr2588 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym2590 := z.EncBinary() - _ = yym2590 - if false { + if yyq2[0] { + if x.Ports == nil { + r.EncodeNil() } else { - h.encSliceServicePort(([]ServicePort)(x.Ports), e) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceServicePort(([]ServicePort)(x.Ports), e) + } } + } else { + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ports")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym2591 := z.EncBinary() - _ = yym2591 - if false { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ports")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ports == nil { + r.EncodeNil() } else { - h.encSliceServicePort(([]ServicePort)(x.Ports), e) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceServicePort(([]ServicePort)(x.Ports), e) + } } } } - if yyr2588 || yy2arr2588 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2588[1] { + if yyq2[1] { if x.Selector == nil { r.EncodeNil() } else { - yym2593 := z.EncBinary() - _ = yym2593 + yym7 := z.EncBinary() + _ = yym7 if false { } else { z.F.EncMapStringStringV(x.Selector, false, e) @@ -34033,15 +41317,15 @@ func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2588[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("selector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Selector == nil { r.EncodeNil() } else { - yym2594 := z.EncBinary() - _ = yym2594 + yym8 := z.EncBinary() + _ = yym8 if false { } else { z.F.EncMapStringStringV(x.Selector, false, e) @@ -34049,11 +41333,11 @@ func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2588 || yy2arr2588 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2588[2] { - yym2596 := z.EncBinary() - _ = yym2596 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP)) @@ -34062,41 +41346,41 @@ func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2588[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("clusterIP")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2597 := z.EncBinary() - _ = yym2597 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP)) } } } - if yyr2588 || yy2arr2588 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2588[3] { + if yyq2[3] { x.Type.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2588[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("type")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Type.CodecEncodeSelf(e) } } - if yyr2588 || yy2arr2588 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2588[4] { + if yyq2[4] { if x.ExternalIPs == nil { r.EncodeNil() } else { - yym2600 := z.EncBinary() - _ = yym2600 + yym16 := z.EncBinary() + _ = yym16 if false { } else { z.F.EncSliceStringV(x.ExternalIPs, false, e) @@ -34106,15 +41390,15 @@ func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2588[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("externalIPs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.ExternalIPs == nil { r.EncodeNil() } else { - yym2601 := z.EncBinary() - _ = yym2601 + yym17 := z.EncBinary() + _ = yym17 if false { } else { z.F.EncSliceStringV(x.ExternalIPs, false, e) @@ -34122,14 +41406,14 @@ func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2588 || yy2arr2588 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2588[5] { + if yyq2[5] { if x.DeprecatedPublicIPs == nil { r.EncodeNil() } else { - yym2603 := z.EncBinary() - _ = yym2603 + yym19 := z.EncBinary() + _ = yym19 if false { } else { z.F.EncSliceStringV(x.DeprecatedPublicIPs, false, e) @@ -34139,15 +41423,15 @@ func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2588[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("deprecatedPublicIPs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.DeprecatedPublicIPs == nil { r.EncodeNil() } else { - yym2604 := z.EncBinary() - _ = yym2604 + yym20 := z.EncBinary() + _ = yym20 if false { } else { z.F.EncSliceStringV(x.DeprecatedPublicIPs, false, e) @@ -34155,26 +41439,26 @@ func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2588 || yy2arr2588 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2588[6] { + if yyq2[6] { x.SessionAffinity.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2588[6] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("sessionAffinity")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.SessionAffinity.CodecEncodeSelf(e) } } - if yyr2588 || yy2arr2588 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2588[7] { - yym2607 := z.EncBinary() - _ = yym2607 + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP)) @@ -34183,26 +41467,26 @@ func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2588[7] { + if yyq2[7] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("loadBalancerIP")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2608 := z.EncBinary() - _ = yym2608 + yym26 := z.EncBinary() + _ = yym26 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP)) } } } - if yyr2588 || yy2arr2588 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2588[8] { + if yyq2[8] { if x.LoadBalancerSourceRanges == nil { r.EncodeNil() } else { - yym2610 := z.EncBinary() - _ = yym2610 + yym28 := z.EncBinary() + _ = yym28 if false { } else { z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) @@ -34212,15 +41496,15 @@ func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2588[8] { + if yyq2[8] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("loadBalancerSourceRanges")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.LoadBalancerSourceRanges == nil { r.EncodeNil() } else { - yym2611 := z.EncBinary() - _ = yym2611 + yym29 := z.EncBinary() + _ = yym29 if false { } else { z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) @@ -34228,11 +41512,11 @@ func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2588 || yy2arr2588 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2588[9] { - yym2613 := z.EncBinary() - _ = yym2613 + if yyq2[9] { + yym31 := z.EncBinary() + _ = yym31 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ExternalName)) @@ -34241,19 +41525,19 @@ func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2588[9] { + if yyq2[9] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("externalName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2614 := z.EncBinary() - _ = yym2614 + yym32 := z.EncBinary() + _ = yym32 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ExternalName)) } } } - if yyr2588 || yy2arr2588 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -34266,25 +41550,25 @@ func (x *ServiceSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2615 := z.DecBinary() - _ = yym2615 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2616 := r.ContainerType() - if yyct2616 == codecSelferValueTypeMap1234 { - yyl2616 := r.ReadMapStart() - if yyl2616 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2616, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2616 == codecSelferValueTypeArray1234 { - yyl2616 := r.ReadArrayStart() - if yyl2616 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2616, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -34296,12 +41580,12 @@ func (x *ServiceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2617Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2617Slc - var yyhl2617 bool = l >= 0 - for yyj2617 := 0; ; yyj2617++ { - if yyhl2617 { - if yyj2617 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -34310,104 +41594,124 @@ func (x *ServiceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2617Slc = r.DecodeBytes(yys2617Slc, true, true) - yys2617 := string(yys2617Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2617 { + switch yys3 { case "ports": if r.TryDecodeAsNil() { x.Ports = nil } else { - yyv2618 := &x.Ports - yym2619 := z.DecBinary() - _ = yym2619 + yyv4 := &x.Ports + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSliceServicePort((*[]ServicePort)(yyv2618), d) + h.decSliceServicePort((*[]ServicePort)(yyv4), d) } } case "selector": if r.TryDecodeAsNil() { x.Selector = nil } else { - yyv2620 := &x.Selector - yym2621 := z.DecBinary() - _ = yym2621 + yyv6 := &x.Selector + yym7 := z.DecBinary() + _ = yym7 if false { } else { - z.F.DecMapStringStringX(yyv2620, false, d) + z.F.DecMapStringStringX(yyv6, false, d) } } case "clusterIP": if r.TryDecodeAsNil() { x.ClusterIP = "" } else { - x.ClusterIP = string(r.DecodeString()) + yyv8 := &x.ClusterIP + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } case "type": if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = ServiceType(r.DecodeString()) + yyv10 := &x.Type + yyv10.CodecDecodeSelf(d) } case "externalIPs": if r.TryDecodeAsNil() { x.ExternalIPs = nil } else { - yyv2624 := &x.ExternalIPs - yym2625 := z.DecBinary() - _ = yym2625 + yyv11 := &x.ExternalIPs + yym12 := z.DecBinary() + _ = yym12 if false { } else { - z.F.DecSliceStringX(yyv2624, false, d) + z.F.DecSliceStringX(yyv11, false, d) } } case "deprecatedPublicIPs": if r.TryDecodeAsNil() { x.DeprecatedPublicIPs = nil } else { - yyv2626 := &x.DeprecatedPublicIPs - yym2627 := z.DecBinary() - _ = yym2627 + yyv13 := &x.DeprecatedPublicIPs + yym14 := z.DecBinary() + _ = yym14 if false { } else { - z.F.DecSliceStringX(yyv2626, false, d) + z.F.DecSliceStringX(yyv13, false, d) } } case "sessionAffinity": if r.TryDecodeAsNil() { x.SessionAffinity = "" } else { - x.SessionAffinity = ServiceAffinity(r.DecodeString()) + yyv15 := &x.SessionAffinity + yyv15.CodecDecodeSelf(d) } case "loadBalancerIP": if r.TryDecodeAsNil() { x.LoadBalancerIP = "" } else { - x.LoadBalancerIP = string(r.DecodeString()) + yyv16 := &x.LoadBalancerIP + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } } case "loadBalancerSourceRanges": if r.TryDecodeAsNil() { x.LoadBalancerSourceRanges = nil } else { - yyv2630 := &x.LoadBalancerSourceRanges - yym2631 := z.DecBinary() - _ = yym2631 + yyv18 := &x.LoadBalancerSourceRanges + yym19 := z.DecBinary() + _ = yym19 if false { } else { - z.F.DecSliceStringX(yyv2630, false, d) + z.F.DecSliceStringX(yyv18, false, d) } } case "externalName": if r.TryDecodeAsNil() { x.ExternalName = "" } else { - x.ExternalName = string(r.DecodeString()) + yyv20 := &x.ExternalName + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys2617) - } // end switch yys2617 - } // end for yyj2617 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -34415,16 +41719,16 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2633 int - var yyb2633 bool - var yyhl2633 bool = l >= 0 - yyj2633++ - if yyhl2633 { - yyb2633 = yyj2633 > l + var yyj22 int + var yyb22 bool + var yyhl22 bool = l >= 0 + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2633 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2633 { + if yyb22 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -34432,21 +41736,21 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Ports = nil } else { - yyv2634 := &x.Ports - yym2635 := z.DecBinary() - _ = yym2635 + yyv23 := &x.Ports + yym24 := z.DecBinary() + _ = yym24 if false { } else { - h.decSliceServicePort((*[]ServicePort)(yyv2634), d) + h.decSliceServicePort((*[]ServicePort)(yyv23), d) } } - yyj2633++ - if yyhl2633 { - yyb2633 = yyj2633 > l + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2633 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2633 { + if yyb22 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -34454,21 +41758,21 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Selector = nil } else { - yyv2636 := &x.Selector - yym2637 := z.DecBinary() - _ = yym2637 + yyv25 := &x.Selector + yym26 := z.DecBinary() + _ = yym26 if false { } else { - z.F.DecMapStringStringX(yyv2636, false, d) + z.F.DecMapStringStringX(yyv25, false, d) } } - yyj2633++ - if yyhl2633 { - yyb2633 = yyj2633 > l + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2633 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2633 { + if yyb22 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -34476,15 +41780,21 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ClusterIP = "" } else { - x.ClusterIP = string(r.DecodeString()) + yyv27 := &x.ClusterIP + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } } - yyj2633++ - if yyhl2633 { - yyb2633 = yyj2633 > l + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2633 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2633 { + if yyb22 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -34492,15 +41802,16 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = ServiceType(r.DecodeString()) + yyv29 := &x.Type + yyv29.CodecDecodeSelf(d) } - yyj2633++ - if yyhl2633 { - yyb2633 = yyj2633 > l + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2633 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2633 { + if yyb22 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -34508,21 +41819,21 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ExternalIPs = nil } else { - yyv2640 := &x.ExternalIPs - yym2641 := z.DecBinary() - _ = yym2641 + yyv30 := &x.ExternalIPs + yym31 := z.DecBinary() + _ = yym31 if false { } else { - z.F.DecSliceStringX(yyv2640, false, d) + z.F.DecSliceStringX(yyv30, false, d) } } - yyj2633++ - if yyhl2633 { - yyb2633 = yyj2633 > l + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2633 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2633 { + if yyb22 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -34530,21 +41841,21 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.DeprecatedPublicIPs = nil } else { - yyv2642 := &x.DeprecatedPublicIPs - yym2643 := z.DecBinary() - _ = yym2643 + yyv32 := &x.DeprecatedPublicIPs + yym33 := z.DecBinary() + _ = yym33 if false { } else { - z.F.DecSliceStringX(yyv2642, false, d) + z.F.DecSliceStringX(yyv32, false, d) } } - yyj2633++ - if yyhl2633 { - yyb2633 = yyj2633 > l + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2633 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2633 { + if yyb22 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -34552,15 +41863,16 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.SessionAffinity = "" } else { - x.SessionAffinity = ServiceAffinity(r.DecodeString()) + yyv34 := &x.SessionAffinity + yyv34.CodecDecodeSelf(d) } - yyj2633++ - if yyhl2633 { - yyb2633 = yyj2633 > l + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2633 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2633 { + if yyb22 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -34568,15 +41880,21 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.LoadBalancerIP = "" } else { - x.LoadBalancerIP = string(r.DecodeString()) + yyv35 := &x.LoadBalancerIP + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } } - yyj2633++ - if yyhl2633 { - yyb2633 = yyj2633 > l + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2633 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2633 { + if yyb22 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -34584,21 +41902,21 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.LoadBalancerSourceRanges = nil } else { - yyv2646 := &x.LoadBalancerSourceRanges - yym2647 := z.DecBinary() - _ = yym2647 + yyv37 := &x.LoadBalancerSourceRanges + yym38 := z.DecBinary() + _ = yym38 if false { } else { - z.F.DecSliceStringX(yyv2646, false, d) + z.F.DecSliceStringX(yyv37, false, d) } } - yyj2633++ - if yyhl2633 { - yyb2633 = yyj2633 > l + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2633 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2633 { + if yyb22 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -34606,20 +41924,26 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ExternalName = "" } else { - x.ExternalName = string(r.DecodeString()) + yyv39 := &x.ExternalName + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } } for { - yyj2633++ - if yyhl2633 { - yyb2633 = yyj2633 > l + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l } else { - yyb2633 = r.CheckBreak() + yyb22 = r.CheckBreak() } - if yyb2633 { + if yyb22 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2633-1, "") + z.DecStructFieldNotFound(yyj22-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -34631,38 +41955,38 @@ func (x *ServicePort) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2649 := z.EncBinary() - _ = yym2649 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2650 := !z.EncBinary() - yy2arr2650 := z.EncBasicHandle().StructToArray - var yyq2650 [5]bool - _, _, _ = yysep2650, yyq2650, yy2arr2650 - const yyr2650 bool = false - yyq2650[0] = x.Name != "" - yyq2650[1] = x.Protocol != "" - yyq2650[3] = true - yyq2650[4] = x.NodePort != 0 - var yynn2650 int - if yyr2650 || yy2arr2650 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = x.Protocol != "" + yyq2[3] = true + yyq2[4] = x.NodePort != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn2650 = 1 - for _, b := range yyq2650 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn2650++ + yynn2++ } } - r.EncodeMapStart(yynn2650) - yynn2650 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2650 || yy2arr2650 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2650[0] { - yym2652 := z.EncBinary() - _ = yym2652 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) @@ -34671,37 +41995,37 @@ func (x *ServicePort) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2650[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("name")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2653 := z.EncBinary() - _ = yym2653 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } } - if yyr2650 || yy2arr2650 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2650[1] { + if yyq2[1] { x.Protocol.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2650[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("protocol")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Protocol.CodecEncodeSelf(e) } } - if yyr2650 || yy2arr2650 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2656 := z.EncBinary() - _ = yym2656 + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeInt(int64(x.Port)) @@ -34710,51 +42034,51 @@ func (x *ServicePort) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("port")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2657 := z.EncBinary() - _ = yym2657 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeInt(int64(x.Port)) } } - if yyr2650 || yy2arr2650 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2650[3] { - yy2659 := &x.TargetPort - yym2660 := z.EncBinary() - _ = yym2660 + if yyq2[3] { + yy13 := &x.TargetPort + yym14 := z.EncBinary() + _ = yym14 if false { - } else if z.HasExtensions() && z.EncExt(yy2659) { - } else if !yym2660 && z.IsJSONHandle() { - z.EncJSONMarshal(yy2659) + } else if z.HasExtensions() && z.EncExt(yy13) { + } else if !yym14 && z.IsJSONHandle() { + z.EncJSONMarshal(yy13) } else { - z.EncFallback(yy2659) + z.EncFallback(yy13) } } else { r.EncodeNil() } } else { - if yyq2650[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("targetPort")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2661 := &x.TargetPort - yym2662 := z.EncBinary() - _ = yym2662 + yy15 := &x.TargetPort + yym16 := z.EncBinary() + _ = yym16 if false { - } else if z.HasExtensions() && z.EncExt(yy2661) { - } else if !yym2662 && z.IsJSONHandle() { - z.EncJSONMarshal(yy2661) + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) } else { - z.EncFallback(yy2661) + z.EncFallback(yy15) } } } - if yyr2650 || yy2arr2650 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2650[4] { - yym2664 := z.EncBinary() - _ = yym2664 + if yyq2[4] { + yym18 := z.EncBinary() + _ = yym18 if false { } else { r.EncodeInt(int64(x.NodePort)) @@ -34763,19 +42087,19 @@ func (x *ServicePort) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq2650[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nodePort")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2665 := z.EncBinary() - _ = yym2665 + yym19 := z.EncBinary() + _ = yym19 if false { } else { r.EncodeInt(int64(x.NodePort)) } } } - if yyr2650 || yy2arr2650 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -34788,25 +42112,25 @@ func (x *ServicePort) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2666 := z.DecBinary() - _ = yym2666 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2667 := r.ContainerType() - if yyct2667 == codecSelferValueTypeMap1234 { - yyl2667 := r.ReadMapStart() - if yyl2667 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2667, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2667 == codecSelferValueTypeArray1234 { - yyl2667 := r.ReadArrayStart() - if yyl2667 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2667, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -34818,12 +42142,12 @@ func (x *ServicePort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2668Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2668Slc - var yyhl2668 bool = l >= 0 - for yyj2668 := 0; ; yyj2668++ { - if yyhl2668 { - if yyj2668 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -34832,53 +42156,72 @@ func (x *ServicePort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2668Slc = r.DecodeBytes(yys2668Slc, true, true) - yys2668 := string(yys2668Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2668 { + switch yys3 { case "name": if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "protocol": if r.TryDecodeAsNil() { x.Protocol = "" } else { - x.Protocol = Protocol(r.DecodeString()) + yyv6 := &x.Protocol + yyv6.CodecDecodeSelf(d) } case "port": if r.TryDecodeAsNil() { x.Port = 0 } else { - x.Port = int32(r.DecodeInt(32)) + yyv7 := &x.Port + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } } case "targetPort": if r.TryDecodeAsNil() { x.TargetPort = pkg4_intstr.IntOrString{} } else { - yyv2672 := &x.TargetPort - yym2673 := z.DecBinary() - _ = yym2673 + yyv9 := &x.TargetPort + yym10 := z.DecBinary() + _ = yym10 if false { - } else if z.HasExtensions() && z.DecExt(yyv2672) { - } else if !yym2673 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv2672) + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) } else { - z.DecFallback(yyv2672, false) + z.DecFallback(yyv9, false) } } case "nodePort": if r.TryDecodeAsNil() { x.NodePort = 0 } else { - x.NodePort = int32(r.DecodeInt(32)) + yyv11 := &x.NodePort + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } } default: - z.DecStructFieldNotFound(-1, yys2668) - } // end switch yys2668 - } // end for yyj2668 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -34886,16 +42229,16 @@ func (x *ServicePort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2675 int - var yyb2675 bool - var yyhl2675 bool = l >= 0 - yyj2675++ - if yyhl2675 { - yyb2675 = yyj2675 > l + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb2675 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb2675 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -34903,15 +42246,21 @@ func (x *ServicePort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv14 := &x.Name + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } - yyj2675++ - if yyhl2675 { - yyb2675 = yyj2675 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb2675 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb2675 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -34919,15 +42268,16 @@ func (x *ServicePort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Protocol = "" } else { - x.Protocol = Protocol(r.DecodeString()) + yyv16 := &x.Protocol + yyv16.CodecDecodeSelf(d) } - yyj2675++ - if yyhl2675 { - yyb2675 = yyj2675 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb2675 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb2675 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -34935,15 +42285,21 @@ func (x *ServicePort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Port = 0 } else { - x.Port = int32(r.DecodeInt(32)) + yyv17 := &x.Port + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } } - yyj2675++ - if yyhl2675 { - yyb2675 = yyj2675 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb2675 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb2675 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -34951,24 +42307,24 @@ func (x *ServicePort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.TargetPort = pkg4_intstr.IntOrString{} } else { - yyv2679 := &x.TargetPort - yym2680 := z.DecBinary() - _ = yym2680 + yyv19 := &x.TargetPort + yym20 := z.DecBinary() + _ = yym20 if false { - } else if z.HasExtensions() && z.DecExt(yyv2679) { - } else if !yym2680 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv2679) + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) } else { - z.DecFallback(yyv2679, false) + z.DecFallback(yyv19, false) } } - yyj2675++ - if yyhl2675 { - yyb2675 = yyj2675 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb2675 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb2675 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -34976,20 +42332,26 @@ func (x *ServicePort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.NodePort = 0 } else { - x.NodePort = int32(r.DecodeInt(32)) + yyv21 := &x.NodePort + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } } for { - yyj2675++ - if yyhl2675 { - yyb2675 = yyj2675 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb2675 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb2675 { + if yyb13 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2675-1, "") + z.DecStructFieldNotFound(yyj13-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -35001,39 +42363,39 @@ func (x *Service) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2682 := z.EncBinary() - _ = yym2682 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2683 := !z.EncBinary() - yy2arr2683 := z.EncBasicHandle().StructToArray - var yyq2683 [5]bool - _, _, _ = yysep2683, yyq2683, yy2arr2683 - const yyr2683 bool = false - yyq2683[0] = x.Kind != "" - yyq2683[1] = x.APIVersion != "" - yyq2683[2] = true - yyq2683[3] = true - yyq2683[4] = true - var yynn2683 int - if yyr2683 || yy2arr2683 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn2683 = 0 - for _, b := range yyq2683 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn2683++ + yynn2++ } } - r.EncodeMapStart(yynn2683) - yynn2683 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2683 || yy2arr2683 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2683[0] { - yym2685 := z.EncBinary() - _ = yym2685 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -35042,23 +42404,23 @@ func (x *Service) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2683[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2686 := z.EncBinary() - _ = yym2686 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr2683 || yy2arr2683 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2683[1] { - yym2688 := z.EncBinary() - _ = yym2688 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -35067,70 +42429,82 @@ func (x *Service) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2683[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2689 := z.EncBinary() - _ = yym2689 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr2683 || yy2arr2683 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2683[2] { - yy2691 := &x.ObjectMeta - yy2691.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq2683[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2692 := &x.ObjectMeta - yy2692.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr2683 || yy2arr2683 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2683[3] { - yy2694 := &x.Spec - yy2694.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq2683[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2695 := &x.Spec - yy2695.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr2683 || yy2arr2683 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2683[4] { - yy2697 := &x.Status - yy2697.CodecEncodeSelf(e) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq2683[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2698 := &x.Status - yy2698.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } - if yyr2683 || yy2arr2683 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -35143,25 +42517,25 @@ func (x *Service) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2699 := z.DecBinary() - _ = yym2699 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2700 := r.ContainerType() - if yyct2700 == codecSelferValueTypeMap1234 { - yyl2700 := r.ReadMapStart() - if yyl2700 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2700, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2700 == codecSelferValueTypeArray1234 { - yyl2700 := r.ReadArrayStart() - if yyl2700 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2700, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -35173,12 +42547,12 @@ func (x *Service) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2701Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2701Slc - var yyhl2701 bool = l >= 0 - for yyj2701 := 0; ; yyj2701++ { - if yyhl2701 { - if yyj2701 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -35187,47 +42561,65 @@ func (x *Service) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2701Slc = r.DecodeBytes(yys2701Slc, true, true) - yys2701 := string(yys2701Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2701 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv2704 := &x.ObjectMeta - yyv2704.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = ServiceSpec{} } else { - yyv2705 := &x.Spec - yyv2705.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = ServiceStatus{} } else { - yyv2706 := &x.Status - yyv2706.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys2701) - } // end switch yys2701 - } // end for yyj2701 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -35235,16 +42627,16 @@ func (x *Service) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2707 int - var yyb2707 bool - var yyhl2707 bool = l >= 0 - yyj2707++ - if yyhl2707 { - yyb2707 = yyj2707 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2707 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2707 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -35252,15 +42644,21 @@ func (x *Service) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj2707++ - if yyhl2707 { - yyb2707 = yyj2707 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2707 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2707 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -35268,32 +42666,44 @@ func (x *Service) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj2707++ - if yyhl2707 { - yyb2707 = yyj2707 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2707 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2707 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv2710 := &x.ObjectMeta - yyv2710.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj2707++ - if yyhl2707 { - yyb2707 = yyj2707 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2707 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2707 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -35301,16 +42711,16 @@ func (x *Service) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Spec = ServiceSpec{} } else { - yyv2711 := &x.Spec - yyv2711.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj2707++ - if yyhl2707 { - yyb2707 = yyj2707 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2707 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2707 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -35318,21 +42728,21 @@ func (x *Service) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Status = ServiceStatus{} } else { - yyv2712 := &x.Status - yyv2712.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj2707++ - if yyhl2707 { - yyb2707 = yyj2707 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2707 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2707 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2707-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -35344,37 +42754,37 @@ func (x *ServiceList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2713 := z.EncBinary() - _ = yym2713 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2714 := !z.EncBinary() - yy2arr2714 := z.EncBasicHandle().StructToArray - var yyq2714 [4]bool - _, _, _ = yysep2714, yyq2714, yy2arr2714 - const yyr2714 bool = false - yyq2714[0] = x.Kind != "" - yyq2714[1] = x.APIVersion != "" - yyq2714[2] = true - var yynn2714 int - if yyr2714 || yy2arr2714 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn2714 = 1 - for _, b := range yyq2714 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn2714++ + yynn2++ } } - r.EncodeMapStart(yynn2714) - yynn2714 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2714 || yy2arr2714 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2714[0] { - yym2716 := z.EncBinary() - _ = yym2716 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -35383,23 +42793,23 @@ func (x *ServiceList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2714[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2717 := z.EncBinary() - _ = yym2717 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr2714 || yy2arr2714 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2714[1] { - yym2719 := z.EncBinary() - _ = yym2719 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -35408,54 +42818,54 @@ func (x *ServiceList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2714[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2720 := z.EncBinary() - _ = yym2720 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr2714 || yy2arr2714 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2714[2] { - yy2722 := &x.ListMeta - yym2723 := z.EncBinary() - _ = yym2723 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy2722) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy2722) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq2714[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2724 := &x.ListMeta - yym2725 := z.EncBinary() - _ = yym2725 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy2724) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy2724) + z.EncFallback(yy12) } } } - if yyr2714 || yy2arr2714 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym2727 := z.EncBinary() - _ = yym2727 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceService(([]Service)(x.Items), e) @@ -35468,15 +42878,15 @@ func (x *ServiceList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym2728 := z.EncBinary() - _ = yym2728 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceService(([]Service)(x.Items), e) } } } - if yyr2714 || yy2arr2714 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -35489,25 +42899,25 @@ func (x *ServiceList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2729 := z.DecBinary() - _ = yym2729 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2730 := r.ContainerType() - if yyct2730 == codecSelferValueTypeMap1234 { - yyl2730 := r.ReadMapStart() - if yyl2730 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2730, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2730 == codecSelferValueTypeArray1234 { - yyl2730 := r.ReadArrayStart() - if yyl2730 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2730, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -35519,12 +42929,12 @@ func (x *ServiceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2731Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2731Slc - var yyhl2731 bool = l >= 0 - for yyj2731 := 0; ; yyj2731++ { - if yyhl2731 { - if yyj2731 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -35533,51 +42943,63 @@ func (x *ServiceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2731Slc = r.DecodeBytes(yys2731Slc, true, true) - yys2731 := string(yys2731Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2731 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv2734 := &x.ListMeta - yym2735 := z.DecBinary() - _ = yym2735 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv2734) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv2734, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv2736 := &x.Items - yym2737 := z.DecBinary() - _ = yym2737 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceService((*[]Service)(yyv2736), d) + h.decSliceService((*[]Service)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys2731) - } // end switch yys2731 - } // end for yyj2731 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -35585,16 +43007,16 @@ func (x *ServiceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2738 int - var yyb2738 bool - var yyhl2738 bool = l >= 0 - yyj2738++ - if yyhl2738 { - yyb2738 = yyj2738 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2738 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2738 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -35602,15 +43024,21 @@ func (x *ServiceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj2738++ - if yyhl2738 { - yyb2738 = yyj2738 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2738 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2738 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -35618,38 +43046,44 @@ func (x *ServiceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj2738++ - if yyhl2738 { - yyb2738 = yyj2738 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2738 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2738 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv2741 := &x.ListMeta - yym2742 := z.DecBinary() - _ = yym2742 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv2741) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv2741, false) + z.DecFallback(yyv17, false) } } - yyj2738++ - if yyhl2738 { - yyb2738 = yyj2738 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2738 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2738 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -35657,26 +43091,26 @@ func (x *ServiceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Items = nil } else { - yyv2743 := &x.Items - yym2744 := z.DecBinary() - _ = yym2744 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceService((*[]Service)(yyv2743), d) + h.decSliceService((*[]Service)(yyv19), d) } } for { - yyj2738++ - if yyhl2738 { - yyb2738 = yyj2738 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2738 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2738 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2738-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -35688,39 +43122,40 @@ func (x *ServiceAccount) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2745 := z.EncBinary() - _ = yym2745 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2746 := !z.EncBinary() - yy2arr2746 := z.EncBasicHandle().StructToArray - var yyq2746 [5]bool - _, _, _ = yysep2746, yyq2746, yy2arr2746 - const yyr2746 bool = false - yyq2746[0] = x.Kind != "" - yyq2746[1] = x.APIVersion != "" - yyq2746[2] = true - yyq2746[3] = len(x.Secrets) != 0 - yyq2746[4] = len(x.ImagePullSecrets) != 0 - var yynn2746 int - if yyr2746 || yy2arr2746 { - r.EncodeArrayStart(5) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = len(x.Secrets) != 0 + yyq2[4] = len(x.ImagePullSecrets) != 0 + yyq2[5] = x.AutomountServiceAccountToken != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) } else { - yynn2746 = 0 - for _, b := range yyq2746 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn2746++ + yynn2++ } } - r.EncodeMapStart(yynn2746) - yynn2746 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2746 || yy2arr2746 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2746[0] { - yym2748 := z.EncBinary() - _ = yym2748 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -35729,23 +43164,23 @@ func (x *ServiceAccount) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2746[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2749 := z.EncBinary() - _ = yym2749 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr2746 || yy2arr2746 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2746[1] { - yym2751 := z.EncBinary() - _ = yym2751 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -35754,43 +43189,55 @@ func (x *ServiceAccount) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2746[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2752 := z.EncBinary() - _ = yym2752 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr2746 || yy2arr2746 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2746[2] { - yy2754 := &x.ObjectMeta - yy2754.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq2746[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2755 := &x.ObjectMeta - yy2755.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr2746 || yy2arr2746 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2746[3] { + if yyq2[3] { if x.Secrets == nil { r.EncodeNil() } else { - yym2757 := z.EncBinary() - _ = yym2757 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e) @@ -35800,15 +43247,15 @@ func (x *ServiceAccount) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2746[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("secrets")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Secrets == nil { r.EncodeNil() } else { - yym2758 := z.EncBinary() - _ = yym2758 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e) @@ -35816,14 +43263,14 @@ func (x *ServiceAccount) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2746 || yy2arr2746 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2746[4] { + if yyq2[4] { if x.ImagePullSecrets == nil { r.EncodeNil() } else { - yym2760 := z.EncBinary() - _ = yym2760 + yym18 := z.EncBinary() + _ = yym18 if false { } else { h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) @@ -35833,15 +43280,15 @@ func (x *ServiceAccount) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2746[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.ImagePullSecrets == nil { r.EncodeNil() } else { - yym2761 := z.EncBinary() - _ = yym2761 + yym19 := z.EncBinary() + _ = yym19 if false { } else { h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) @@ -35849,7 +43296,42 @@ func (x *ServiceAccount) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2746 || yy2arr2746 { + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.AutomountServiceAccountToken == nil { + r.EncodeNil() + } else { + yy21 := *x.AutomountServiceAccountToken + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeBool(bool(yy21)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("automountServiceAccountToken")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AutomountServiceAccountToken == nil { + r.EncodeNil() + } else { + yy23 := *x.AutomountServiceAccountToken + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeBool(bool(yy23)) + } + } + } + } + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -35862,25 +43344,25 @@ func (x *ServiceAccount) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2762 := z.DecBinary() - _ = yym2762 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2763 := r.ContainerType() - if yyct2763 == codecSelferValueTypeMap1234 { - yyl2763 := r.ReadMapStart() - if yyl2763 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2763, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2763 == codecSelferValueTypeArray1234 { - yyl2763 := r.ReadArrayStart() - if yyl2763 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2763, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -35892,12 +43374,12 @@ func (x *ServiceAccount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2764Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2764Slc - var yyhl2764 bool = l >= 0 - for yyj2764 := 0; ; yyj2764++ { - if yyhl2764 { - if yyj2764 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -35906,57 +43388,91 @@ func (x *ServiceAccount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2764Slc = r.DecodeBytes(yys2764Slc, true, true) - yys2764 := string(yys2764Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2764 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv2767 := &x.ObjectMeta - yyv2767.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "secrets": if r.TryDecodeAsNil() { x.Secrets = nil } else { - yyv2768 := &x.Secrets - yym2769 := z.DecBinary() - _ = yym2769 + yyv10 := &x.Secrets + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceObjectReference((*[]ObjectReference)(yyv2768), d) + h.decSliceObjectReference((*[]ObjectReference)(yyv10), d) } } case "imagePullSecrets": if r.TryDecodeAsNil() { x.ImagePullSecrets = nil } else { - yyv2770 := &x.ImagePullSecrets - yym2771 := z.DecBinary() - _ = yym2771 + yyv12 := &x.ImagePullSecrets + yym13 := z.DecBinary() + _ = yym13 if false { } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv2770), d) + h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv12), d) + } + } + case "automountServiceAccountToken": + if r.TryDecodeAsNil() { + if x.AutomountServiceAccountToken != nil { + x.AutomountServiceAccountToken = nil + } + } else { + if x.AutomountServiceAccountToken == nil { + x.AutomountServiceAccountToken = new(bool) + } + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() } } default: - z.DecStructFieldNotFound(-1, yys2764) - } // end switch yys2764 - } // end for yyj2764 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -35964,16 +43480,16 @@ func (x *ServiceAccount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2772 int - var yyb2772 bool - var yyhl2772 bool = l >= 0 - yyj2772++ - if yyhl2772 { - yyb2772 = yyj2772 > l + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb2772 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb2772 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -35981,15 +43497,21 @@ func (x *ServiceAccount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv17 := &x.Kind + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } } - yyj2772++ - if yyhl2772 { - yyb2772 = yyj2772 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb2772 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb2772 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -35997,32 +43519,44 @@ func (x *ServiceAccount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv19 := &x.APIVersion + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } } - yyj2772++ - if yyhl2772 { - yyb2772 = yyj2772 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb2772 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb2772 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv2775 := &x.ObjectMeta - yyv2775.CodecDecodeSelf(d) + yyv21 := &x.ObjectMeta + yym22 := z.DecBinary() + _ = yym22 + if false { + } else if z.HasExtensions() && z.DecExt(yyv21) { + } else { + z.DecFallback(yyv21, false) + } } - yyj2772++ - if yyhl2772 { - yyb2772 = yyj2772 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb2772 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb2772 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -36030,21 +43564,21 @@ func (x *ServiceAccount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Secrets = nil } else { - yyv2776 := &x.Secrets - yym2777 := z.DecBinary() - _ = yym2777 + yyv23 := &x.Secrets + yym24 := z.DecBinary() + _ = yym24 if false { } else { - h.decSliceObjectReference((*[]ObjectReference)(yyv2776), d) + h.decSliceObjectReference((*[]ObjectReference)(yyv23), d) } } - yyj2772++ - if yyhl2772 { - yyb2772 = yyj2772 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb2772 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb2772 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -36052,26 +43586,52 @@ func (x *ServiceAccount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ImagePullSecrets = nil } else { - yyv2778 := &x.ImagePullSecrets - yym2779 := z.DecBinary() - _ = yym2779 + yyv25 := &x.ImagePullSecrets + yym26 := z.DecBinary() + _ = yym26 if false { } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv2778), d) + h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv25), d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AutomountServiceAccountToken != nil { + x.AutomountServiceAccountToken = nil + } + } else { + if x.AutomountServiceAccountToken == nil { + x.AutomountServiceAccountToken = new(bool) + } + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() } } for { - yyj2772++ - if yyhl2772 { - yyb2772 = yyj2772 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb2772 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb2772 { + if yyb16 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2772-1, "") + z.DecStructFieldNotFound(yyj16-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -36083,37 +43643,37 @@ func (x *ServiceAccountList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2780 := z.EncBinary() - _ = yym2780 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2781 := !z.EncBinary() - yy2arr2781 := z.EncBasicHandle().StructToArray - var yyq2781 [4]bool - _, _, _ = yysep2781, yyq2781, yy2arr2781 - const yyr2781 bool = false - yyq2781[0] = x.Kind != "" - yyq2781[1] = x.APIVersion != "" - yyq2781[2] = true - var yynn2781 int - if yyr2781 || yy2arr2781 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn2781 = 1 - for _, b := range yyq2781 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn2781++ + yynn2++ } } - r.EncodeMapStart(yynn2781) - yynn2781 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2781 || yy2arr2781 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2781[0] { - yym2783 := z.EncBinary() - _ = yym2783 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -36122,23 +43682,23 @@ func (x *ServiceAccountList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2781[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2784 := z.EncBinary() - _ = yym2784 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr2781 || yy2arr2781 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2781[1] { - yym2786 := z.EncBinary() - _ = yym2786 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -36147,54 +43707,54 @@ func (x *ServiceAccountList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2781[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2787 := z.EncBinary() - _ = yym2787 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr2781 || yy2arr2781 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2781[2] { - yy2789 := &x.ListMeta - yym2790 := z.EncBinary() - _ = yym2790 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy2789) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy2789) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq2781[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2791 := &x.ListMeta - yym2792 := z.EncBinary() - _ = yym2792 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy2791) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy2791) + z.EncFallback(yy12) } } } - if yyr2781 || yy2arr2781 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym2794 := z.EncBinary() - _ = yym2794 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e) @@ -36207,15 +43767,15 @@ func (x *ServiceAccountList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym2795 := z.EncBinary() - _ = yym2795 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e) } } } - if yyr2781 || yy2arr2781 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -36228,25 +43788,25 @@ func (x *ServiceAccountList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2796 := z.DecBinary() - _ = yym2796 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2797 := r.ContainerType() - if yyct2797 == codecSelferValueTypeMap1234 { - yyl2797 := r.ReadMapStart() - if yyl2797 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2797, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2797 == codecSelferValueTypeArray1234 { - yyl2797 := r.ReadArrayStart() - if yyl2797 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2797, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -36258,12 +43818,12 @@ func (x *ServiceAccountList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2798Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2798Slc - var yyhl2798 bool = l >= 0 - for yyj2798 := 0; ; yyj2798++ { - if yyhl2798 { - if yyj2798 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -36272,51 +43832,63 @@ func (x *ServiceAccountList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2798Slc = r.DecodeBytes(yys2798Slc, true, true) - yys2798 := string(yys2798Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2798 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv2801 := &x.ListMeta - yym2802 := z.DecBinary() - _ = yym2802 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv2801) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv2801, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv2803 := &x.Items - yym2804 := z.DecBinary() - _ = yym2804 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceServiceAccount((*[]ServiceAccount)(yyv2803), d) + h.decSliceServiceAccount((*[]ServiceAccount)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys2798) - } // end switch yys2798 - } // end for yyj2798 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -36324,16 +43896,16 @@ func (x *ServiceAccountList) codecDecodeSelfFromArray(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2805 int - var yyb2805 bool - var yyhl2805 bool = l >= 0 - yyj2805++ - if yyhl2805 { - yyb2805 = yyj2805 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2805 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2805 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -36341,15 +43913,21 @@ func (x *ServiceAccountList) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj2805++ - if yyhl2805 { - yyb2805 = yyj2805 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2805 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2805 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -36357,38 +43935,44 @@ func (x *ServiceAccountList) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj2805++ - if yyhl2805 { - yyb2805 = yyj2805 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2805 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2805 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv2808 := &x.ListMeta - yym2809 := z.DecBinary() - _ = yym2809 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv2808) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv2808, false) + z.DecFallback(yyv17, false) } } - yyj2805++ - if yyhl2805 { - yyb2805 = yyj2805 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2805 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2805 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -36396,26 +43980,26 @@ func (x *ServiceAccountList) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Items = nil } else { - yyv2810 := &x.Items - yym2811 := z.DecBinary() - _ = yym2811 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceServiceAccount((*[]ServiceAccount)(yyv2810), d) + h.decSliceServiceAccount((*[]ServiceAccount)(yyv19), d) } } for { - yyj2805++ - if yyhl2805 { - yyb2805 = yyj2805 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2805 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2805 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2805-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -36427,37 +44011,37 @@ func (x *Endpoints) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2812 := z.EncBinary() - _ = yym2812 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2813 := !z.EncBinary() - yy2arr2813 := z.EncBasicHandle().StructToArray - var yyq2813 [4]bool - _, _, _ = yysep2813, yyq2813, yy2arr2813 - const yyr2813 bool = false - yyq2813[0] = x.Kind != "" - yyq2813[1] = x.APIVersion != "" - yyq2813[2] = true - var yynn2813 int - if yyr2813 || yy2arr2813 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn2813 = 1 - for _, b := range yyq2813 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn2813++ + yynn2++ } } - r.EncodeMapStart(yynn2813) - yynn2813 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2813 || yy2arr2813 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2813[0] { - yym2815 := z.EncBinary() - _ = yym2815 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -36466,23 +44050,23 @@ func (x *Endpoints) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2813[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2816 := z.EncBinary() - _ = yym2816 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr2813 || yy2arr2813 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2813[1] { - yym2818 := z.EncBinary() - _ = yym2818 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -36491,42 +44075,54 @@ func (x *Endpoints) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2813[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2819 := z.EncBinary() - _ = yym2819 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr2813 || yy2arr2813 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2813[2] { - yy2821 := &x.ObjectMeta - yy2821.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq2813[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2822 := &x.ObjectMeta - yy2822.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr2813 || yy2arr2813 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Subsets == nil { r.EncodeNil() } else { - yym2824 := z.EncBinary() - _ = yym2824 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e) @@ -36539,15 +44135,15 @@ func (x *Endpoints) CodecEncodeSelf(e *codec1978.Encoder) { if x.Subsets == nil { r.EncodeNil() } else { - yym2825 := z.EncBinary() - _ = yym2825 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e) } } } - if yyr2813 || yy2arr2813 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -36560,25 +44156,25 @@ func (x *Endpoints) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2826 := z.DecBinary() - _ = yym2826 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2827 := r.ContainerType() - if yyct2827 == codecSelferValueTypeMap1234 { - yyl2827 := r.ReadMapStart() - if yyl2827 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2827, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2827 == codecSelferValueTypeArray1234 { - yyl2827 := r.ReadArrayStart() - if yyl2827 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2827, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -36590,12 +44186,12 @@ func (x *Endpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2828Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2828Slc - var yyhl2828 bool = l >= 0 - for yyj2828 := 0; ; yyj2828++ { - if yyhl2828 { - if yyj2828 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -36604,45 +44200,63 @@ func (x *Endpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2828Slc = r.DecodeBytes(yys2828Slc, true, true) - yys2828 := string(yys2828Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2828 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv2831 := &x.ObjectMeta - yyv2831.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "subsets": if r.TryDecodeAsNil() { x.Subsets = nil } else { - yyv2832 := &x.Subsets - yym2833 := z.DecBinary() - _ = yym2833 + yyv10 := &x.Subsets + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceEndpointSubset((*[]EndpointSubset)(yyv2832), d) + h.decSliceEndpointSubset((*[]EndpointSubset)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys2828) - } // end switch yys2828 - } // end for yyj2828 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -36650,16 +44264,16 @@ func (x *Endpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2834 int - var yyb2834 bool - var yyhl2834 bool = l >= 0 - yyj2834++ - if yyhl2834 { - yyb2834 = yyj2834 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2834 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2834 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -36667,15 +44281,21 @@ func (x *Endpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj2834++ - if yyhl2834 { - yyb2834 = yyj2834 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2834 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2834 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -36683,32 +44303,44 @@ func (x *Endpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj2834++ - if yyhl2834 { - yyb2834 = yyj2834 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2834 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2834 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv2837 := &x.ObjectMeta - yyv2837.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj2834++ - if yyhl2834 { - yyb2834 = yyj2834 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2834 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2834 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -36716,26 +44348,26 @@ func (x *Endpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Subsets = nil } else { - yyv2838 := &x.Subsets - yym2839 := z.DecBinary() - _ = yym2839 + yyv19 := &x.Subsets + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceEndpointSubset((*[]EndpointSubset)(yyv2838), d) + h.decSliceEndpointSubset((*[]EndpointSubset)(yyv19), d) } } for { - yyj2834++ - if yyhl2834 { - yyb2834 = yyj2834 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2834 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2834 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2834-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -36747,40 +44379,40 @@ func (x *EndpointSubset) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2840 := z.EncBinary() - _ = yym2840 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2841 := !z.EncBinary() - yy2arr2841 := z.EncBasicHandle().StructToArray - var yyq2841 [3]bool - _, _, _ = yysep2841, yyq2841, yy2arr2841 - const yyr2841 bool = false - yyq2841[0] = len(x.Addresses) != 0 - yyq2841[1] = len(x.NotReadyAddresses) != 0 - yyq2841[2] = len(x.Ports) != 0 - var yynn2841 int - if yyr2841 || yy2arr2841 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Addresses) != 0 + yyq2[1] = len(x.NotReadyAddresses) != 0 + yyq2[2] = len(x.Ports) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn2841 = 0 - for _, b := range yyq2841 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn2841++ + yynn2++ } } - r.EncodeMapStart(yynn2841) - yynn2841 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2841 || yy2arr2841 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2841[0] { + if yyq2[0] { if x.Addresses == nil { r.EncodeNil() } else { - yym2843 := z.EncBinary() - _ = yym2843 + yym4 := z.EncBinary() + _ = yym4 if false { } else { h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e) @@ -36790,15 +44422,15 @@ func (x *EndpointSubset) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2841[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("addresses")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Addresses == nil { r.EncodeNil() } else { - yym2844 := z.EncBinary() - _ = yym2844 + yym5 := z.EncBinary() + _ = yym5 if false { } else { h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e) @@ -36806,14 +44438,14 @@ func (x *EndpointSubset) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2841 || yy2arr2841 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2841[1] { + if yyq2[1] { if x.NotReadyAddresses == nil { r.EncodeNil() } else { - yym2846 := z.EncBinary() - _ = yym2846 + yym7 := z.EncBinary() + _ = yym7 if false { } else { h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e) @@ -36823,15 +44455,15 @@ func (x *EndpointSubset) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2841[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("notReadyAddresses")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.NotReadyAddresses == nil { r.EncodeNil() } else { - yym2847 := z.EncBinary() - _ = yym2847 + yym8 := z.EncBinary() + _ = yym8 if false { } else { h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e) @@ -36839,14 +44471,14 @@ func (x *EndpointSubset) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2841 || yy2arr2841 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2841[2] { + if yyq2[2] { if x.Ports == nil { r.EncodeNil() } else { - yym2849 := z.EncBinary() - _ = yym2849 + yym10 := z.EncBinary() + _ = yym10 if false { } else { h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e) @@ -36856,15 +44488,15 @@ func (x *EndpointSubset) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2841[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("ports")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Ports == nil { r.EncodeNil() } else { - yym2850 := z.EncBinary() - _ = yym2850 + yym11 := z.EncBinary() + _ = yym11 if false { } else { h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e) @@ -36872,7 +44504,7 @@ func (x *EndpointSubset) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2841 || yy2arr2841 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -36885,25 +44517,25 @@ func (x *EndpointSubset) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2851 := z.DecBinary() - _ = yym2851 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2852 := r.ContainerType() - if yyct2852 == codecSelferValueTypeMap1234 { - yyl2852 := r.ReadMapStart() - if yyl2852 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2852, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2852 == codecSelferValueTypeArray1234 { - yyl2852 := r.ReadArrayStart() - if yyl2852 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2852, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -36915,12 +44547,12 @@ func (x *EndpointSubset) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2853Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2853Slc - var yyhl2853 bool = l >= 0 - for yyj2853 := 0; ; yyj2853++ { - if yyhl2853 { - if yyj2853 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -36929,50 +44561,50 @@ func (x *EndpointSubset) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2853Slc = r.DecodeBytes(yys2853Slc, true, true) - yys2853 := string(yys2853Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2853 { + switch yys3 { case "addresses": if r.TryDecodeAsNil() { x.Addresses = nil } else { - yyv2854 := &x.Addresses - yym2855 := z.DecBinary() - _ = yym2855 + yyv4 := &x.Addresses + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv2854), d) + h.decSliceEndpointAddress((*[]EndpointAddress)(yyv4), d) } } case "notReadyAddresses": if r.TryDecodeAsNil() { x.NotReadyAddresses = nil } else { - yyv2856 := &x.NotReadyAddresses - yym2857 := z.DecBinary() - _ = yym2857 + yyv6 := &x.NotReadyAddresses + yym7 := z.DecBinary() + _ = yym7 if false { } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv2856), d) + h.decSliceEndpointAddress((*[]EndpointAddress)(yyv6), d) } } case "ports": if r.TryDecodeAsNil() { x.Ports = nil } else { - yyv2858 := &x.Ports - yym2859 := z.DecBinary() - _ = yym2859 + yyv8 := &x.Ports + yym9 := z.DecBinary() + _ = yym9 if false { } else { - h.decSliceEndpointPort((*[]EndpointPort)(yyv2858), d) + h.decSliceEndpointPort((*[]EndpointPort)(yyv8), d) } } default: - z.DecStructFieldNotFound(-1, yys2853) - } // end switch yys2853 - } // end for yyj2853 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -36980,16 +44612,16 @@ func (x *EndpointSubset) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2860 int - var yyb2860 bool - var yyhl2860 bool = l >= 0 - yyj2860++ - if yyhl2860 { - yyb2860 = yyj2860 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb2860 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb2860 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -36997,21 +44629,21 @@ func (x *EndpointSubset) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Addresses = nil } else { - yyv2861 := &x.Addresses - yym2862 := z.DecBinary() - _ = yym2862 + yyv11 := &x.Addresses + yym12 := z.DecBinary() + _ = yym12 if false { } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv2861), d) + h.decSliceEndpointAddress((*[]EndpointAddress)(yyv11), d) } } - yyj2860++ - if yyhl2860 { - yyb2860 = yyj2860 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb2860 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb2860 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -37019,21 +44651,21 @@ func (x *EndpointSubset) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.NotReadyAddresses = nil } else { - yyv2863 := &x.NotReadyAddresses - yym2864 := z.DecBinary() - _ = yym2864 + yyv13 := &x.NotReadyAddresses + yym14 := z.DecBinary() + _ = yym14 if false { } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv2863), d) + h.decSliceEndpointAddress((*[]EndpointAddress)(yyv13), d) } } - yyj2860++ - if yyhl2860 { - yyb2860 = yyj2860 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb2860 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb2860 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -37041,26 +44673,26 @@ func (x *EndpointSubset) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Ports = nil } else { - yyv2865 := &x.Ports - yym2866 := z.DecBinary() - _ = yym2866 + yyv15 := &x.Ports + yym16 := z.DecBinary() + _ = yym16 if false { } else { - h.decSliceEndpointPort((*[]EndpointPort)(yyv2865), d) + h.decSliceEndpointPort((*[]EndpointPort)(yyv15), d) } } for { - yyj2860++ - if yyhl2860 { - yyb2860 = yyj2860 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb2860 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb2860 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2860-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -37072,36 +44704,36 @@ func (x *EndpointAddress) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2867 := z.EncBinary() - _ = yym2867 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2868 := !z.EncBinary() - yy2arr2868 := z.EncBasicHandle().StructToArray - var yyq2868 [4]bool - _, _, _ = yysep2868, yyq2868, yy2arr2868 - const yyr2868 bool = false - yyq2868[1] = x.Hostname != "" - yyq2868[2] = x.NodeName != nil - yyq2868[3] = x.TargetRef != nil - var yynn2868 int - if yyr2868 || yy2arr2868 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Hostname != "" + yyq2[2] = x.NodeName != nil + yyq2[3] = x.TargetRef != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn2868 = 1 - for _, b := range yyq2868 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn2868++ + yynn2++ } } - r.EncodeMapStart(yynn2868) - yynn2868 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2868 || yy2arr2868 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2870 := z.EncBinary() - _ = yym2870 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.IP)) @@ -37110,18 +44742,18 @@ func (x *EndpointAddress) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("ip")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2871 := z.EncBinary() - _ = yym2871 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.IP)) } } - if yyr2868 || yy2arr2868 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2868[1] { - yym2873 := z.EncBinary() - _ = yym2873 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) @@ -37130,56 +44762,56 @@ func (x *EndpointAddress) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2868[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hostname")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2874 := z.EncBinary() - _ = yym2874 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) } } } - if yyr2868 || yy2arr2868 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2868[2] { + if yyq2[2] { if x.NodeName == nil { r.EncodeNil() } else { - yy2876 := *x.NodeName - yym2877 := z.EncBinary() - _ = yym2877 + yy10 := *x.NodeName + yym11 := z.EncBinary() + _ = yym11 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(yy2876)) + r.EncodeString(codecSelferC_UTF81234, string(yy10)) } } } else { r.EncodeNil() } } else { - if yyq2868[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nodeName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.NodeName == nil { r.EncodeNil() } else { - yy2878 := *x.NodeName - yym2879 := z.EncBinary() - _ = yym2879 + yy12 := *x.NodeName + yym13 := z.EncBinary() + _ = yym13 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(yy2878)) + r.EncodeString(codecSelferC_UTF81234, string(yy12)) } } } } - if yyr2868 || yy2arr2868 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2868[3] { + if yyq2[3] { if x.TargetRef == nil { r.EncodeNil() } else { @@ -37189,7 +44821,7 @@ func (x *EndpointAddress) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2868[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("targetRef")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -37200,7 +44832,7 @@ func (x *EndpointAddress) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2868 || yy2arr2868 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -37213,25 +44845,25 @@ func (x *EndpointAddress) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2881 := z.DecBinary() - _ = yym2881 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2882 := r.ContainerType() - if yyct2882 == codecSelferValueTypeMap1234 { - yyl2882 := r.ReadMapStart() - if yyl2882 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2882, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2882 == codecSelferValueTypeArray1234 { - yyl2882 := r.ReadArrayStart() - if yyl2882 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2882, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -37243,12 +44875,12 @@ func (x *EndpointAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2883Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2883Slc - var yyhl2883 bool = l >= 0 - for yyj2883 := 0; ; yyj2883++ { - if yyhl2883 { - if yyj2883 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -37257,21 +44889,33 @@ func (x *EndpointAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2883Slc = r.DecodeBytes(yys2883Slc, true, true) - yys2883 := string(yys2883Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2883 { + switch yys3 { case "ip": if r.TryDecodeAsNil() { x.IP = "" } else { - x.IP = string(r.DecodeString()) + yyv4 := &x.IP + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "hostname": if r.TryDecodeAsNil() { x.Hostname = "" } else { - x.Hostname = string(r.DecodeString()) + yyv6 := &x.Hostname + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "nodeName": if r.TryDecodeAsNil() { @@ -37282,8 +44926,8 @@ func (x *EndpointAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.NodeName == nil { x.NodeName = new(string) } - yym2887 := z.DecBinary() - _ = yym2887 + yym9 := z.DecBinary() + _ = yym9 if false { } else { *((*string)(x.NodeName)) = r.DecodeString() @@ -37301,9 +44945,9 @@ func (x *EndpointAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { x.TargetRef.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys2883) - } // end switch yys2883 - } // end for yyj2883 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -37311,16 +44955,16 @@ func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2889 int - var yyb2889 bool - var yyhl2889 bool = l >= 0 - yyj2889++ - if yyhl2889 { - yyb2889 = yyj2889 > l + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2889 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2889 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -37328,15 +44972,21 @@ func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.IP = "" } else { - x.IP = string(r.DecodeString()) + yyv12 := &x.IP + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } - yyj2889++ - if yyhl2889 { - yyb2889 = yyj2889 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2889 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2889 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -37344,15 +44994,21 @@ func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Hostname = "" } else { - x.Hostname = string(r.DecodeString()) + yyv14 := &x.Hostname + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } - yyj2889++ - if yyhl2889 { - yyb2889 = yyj2889 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2889 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2889 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -37365,20 +45021,20 @@ func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if x.NodeName == nil { x.NodeName = new(string) } - yym2893 := z.DecBinary() - _ = yym2893 + yym17 := z.DecBinary() + _ = yym17 if false { } else { *((*string)(x.NodeName)) = r.DecodeString() } } - yyj2889++ - if yyhl2889 { - yyb2889 = yyj2889 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2889 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2889 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -37394,17 +45050,17 @@ func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) x.TargetRef.CodecDecodeSelf(d) } for { - yyj2889++ - if yyhl2889 { - yyb2889 = yyj2889 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb2889 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb2889 { + if yyb11 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2889-1, "") + z.DecStructFieldNotFound(yyj11-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -37416,36 +45072,36 @@ func (x *EndpointPort) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2895 := z.EncBinary() - _ = yym2895 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2896 := !z.EncBinary() - yy2arr2896 := z.EncBasicHandle().StructToArray - var yyq2896 [3]bool - _, _, _ = yysep2896, yyq2896, yy2arr2896 - const yyr2896 bool = false - yyq2896[0] = x.Name != "" - yyq2896[2] = x.Protocol != "" - var yynn2896 int - if yyr2896 || yy2arr2896 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[2] = x.Protocol != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn2896 = 1 - for _, b := range yyq2896 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn2896++ + yynn2++ } } - r.EncodeMapStart(yynn2896) - yynn2896 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2896 || yy2arr2896 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2896[0] { - yym2898 := z.EncBinary() - _ = yym2898 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) @@ -37454,22 +45110,22 @@ func (x *EndpointPort) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2896[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("name")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2899 := z.EncBinary() - _ = yym2899 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } } - if yyr2896 || yy2arr2896 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2901 := z.EncBinary() - _ = yym2901 + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeInt(int64(x.Port)) @@ -37478,29 +45134,29 @@ func (x *EndpointPort) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("port")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2902 := z.EncBinary() - _ = yym2902 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeInt(int64(x.Port)) } } - if yyr2896 || yy2arr2896 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2896[2] { + if yyq2[2] { x.Protocol.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2896[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("protocol")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Protocol.CodecEncodeSelf(e) } } - if yyr2896 || yy2arr2896 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -37513,25 +45169,25 @@ func (x *EndpointPort) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2904 := z.DecBinary() - _ = yym2904 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2905 := r.ContainerType() - if yyct2905 == codecSelferValueTypeMap1234 { - yyl2905 := r.ReadMapStart() - if yyl2905 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2905, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2905 == codecSelferValueTypeArray1234 { - yyl2905 := r.ReadArrayStart() - if yyl2905 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2905, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -37543,12 +45199,12 @@ func (x *EndpointPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2906Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2906Slc - var yyhl2906 bool = l >= 0 - for yyj2906 := 0; ; yyj2906++ { - if yyhl2906 { - if yyj2906 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -37557,32 +45213,45 @@ func (x *EndpointPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2906Slc = r.DecodeBytes(yys2906Slc, true, true) - yys2906 := string(yys2906Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2906 { + switch yys3 { case "name": if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "port": if r.TryDecodeAsNil() { x.Port = 0 } else { - x.Port = int32(r.DecodeInt(32)) + yyv6 := &x.Port + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } } case "protocol": if r.TryDecodeAsNil() { x.Protocol = "" } else { - x.Protocol = Protocol(r.DecodeString()) + yyv8 := &x.Protocol + yyv8.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys2906) - } // end switch yys2906 - } // end for yyj2906 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -37590,16 +45259,16 @@ func (x *EndpointPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2910 int - var yyb2910 bool - var yyhl2910 bool = l >= 0 - yyj2910++ - if yyhl2910 { - yyb2910 = yyj2910 > l + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb2910 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb2910 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -37607,15 +45276,21 @@ func (x *EndpointPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv10 := &x.Name + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } - yyj2910++ - if yyhl2910 { - yyb2910 = yyj2910 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb2910 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb2910 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -37623,15 +45298,21 @@ func (x *EndpointPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Port = 0 } else { - x.Port = int32(r.DecodeInt(32)) + yyv12 := &x.Port + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(yyv12)) = int32(r.DecodeInt(32)) + } } - yyj2910++ - if yyhl2910 { - yyb2910 = yyj2910 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb2910 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb2910 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -37639,20 +45320,21 @@ func (x *EndpointPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Protocol = "" } else { - x.Protocol = Protocol(r.DecodeString()) + yyv14 := &x.Protocol + yyv14.CodecDecodeSelf(d) } for { - yyj2910++ - if yyhl2910 { - yyb2910 = yyj2910 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb2910 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb2910 { + if yyb9 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2910-1, "") + z.DecStructFieldNotFound(yyj9-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -37664,37 +45346,37 @@ func (x *EndpointsList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2914 := z.EncBinary() - _ = yym2914 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2915 := !z.EncBinary() - yy2arr2915 := z.EncBasicHandle().StructToArray - var yyq2915 [4]bool - _, _, _ = yysep2915, yyq2915, yy2arr2915 - const yyr2915 bool = false - yyq2915[0] = x.Kind != "" - yyq2915[1] = x.APIVersion != "" - yyq2915[2] = true - var yynn2915 int - if yyr2915 || yy2arr2915 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn2915 = 1 - for _, b := range yyq2915 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn2915++ + yynn2++ } } - r.EncodeMapStart(yynn2915) - yynn2915 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2915 || yy2arr2915 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2915[0] { - yym2917 := z.EncBinary() - _ = yym2917 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -37703,23 +45385,23 @@ func (x *EndpointsList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2915[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2918 := z.EncBinary() - _ = yym2918 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr2915 || yy2arr2915 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2915[1] { - yym2920 := z.EncBinary() - _ = yym2920 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -37728,54 +45410,54 @@ func (x *EndpointsList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2915[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2921 := z.EncBinary() - _ = yym2921 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr2915 || yy2arr2915 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2915[2] { - yy2923 := &x.ListMeta - yym2924 := z.EncBinary() - _ = yym2924 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy2923) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy2923) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq2915[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2925 := &x.ListMeta - yym2926 := z.EncBinary() - _ = yym2926 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy2925) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy2925) + z.EncFallback(yy12) } } } - if yyr2915 || yy2arr2915 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym2928 := z.EncBinary() - _ = yym2928 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceEndpoints(([]Endpoints)(x.Items), e) @@ -37788,15 +45470,15 @@ func (x *EndpointsList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym2929 := z.EncBinary() - _ = yym2929 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceEndpoints(([]Endpoints)(x.Items), e) } } } - if yyr2915 || yy2arr2915 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -37809,25 +45491,25 @@ func (x *EndpointsList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2930 := z.DecBinary() - _ = yym2930 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2931 := r.ContainerType() - if yyct2931 == codecSelferValueTypeMap1234 { - yyl2931 := r.ReadMapStart() - if yyl2931 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2931, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2931 == codecSelferValueTypeArray1234 { - yyl2931 := r.ReadArrayStart() - if yyl2931 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2931, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -37839,12 +45521,12 @@ func (x *EndpointsList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2932Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2932Slc - var yyhl2932 bool = l >= 0 - for yyj2932 := 0; ; yyj2932++ { - if yyhl2932 { - if yyj2932 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -37853,51 +45535,63 @@ func (x *EndpointsList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2932Slc = r.DecodeBytes(yys2932Slc, true, true) - yys2932 := string(yys2932Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2932 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv2935 := &x.ListMeta - yym2936 := z.DecBinary() - _ = yym2936 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv2935) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv2935, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv2937 := &x.Items - yym2938 := z.DecBinary() - _ = yym2938 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceEndpoints((*[]Endpoints)(yyv2937), d) + h.decSliceEndpoints((*[]Endpoints)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys2932) - } // end switch yys2932 - } // end for yyj2932 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -37905,16 +45599,16 @@ func (x *EndpointsList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2939 int - var yyb2939 bool - var yyhl2939 bool = l >= 0 - yyj2939++ - if yyhl2939 { - yyb2939 = yyj2939 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2939 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2939 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -37922,15 +45616,21 @@ func (x *EndpointsList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj2939++ - if yyhl2939 { - yyb2939 = yyj2939 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2939 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2939 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -37938,38 +45638,44 @@ func (x *EndpointsList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj2939++ - if yyhl2939 { - yyb2939 = yyj2939 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2939 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2939 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv2942 := &x.ListMeta - yym2943 := z.DecBinary() - _ = yym2943 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv2942) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv2942, false) + z.DecFallback(yyv17, false) } } - yyj2939++ - if yyhl2939 { - yyb2939 = yyj2939 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2939 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2939 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -37977,26 +45683,26 @@ func (x *EndpointsList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Items = nil } else { - yyv2944 := &x.Items - yym2945 := z.DecBinary() - _ = yym2945 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceEndpoints((*[]Endpoints)(yyv2944), d) + h.decSliceEndpoints((*[]Endpoints)(yyv19), d) } } for { - yyj2939++ - if yyhl2939 { - yyb2939 = yyj2939 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb2939 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb2939 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2939-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -38008,38 +45714,39 @@ func (x *NodeSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2946 := z.EncBinary() - _ = yym2946 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2947 := !z.EncBinary() - yy2arr2947 := z.EncBasicHandle().StructToArray - var yyq2947 [4]bool - _, _, _ = yysep2947, yyq2947, yy2arr2947 - const yyr2947 bool = false - yyq2947[0] = x.PodCIDR != "" - yyq2947[1] = x.ExternalID != "" - yyq2947[2] = x.ProviderID != "" - yyq2947[3] = x.Unschedulable != false - var yynn2947 int - if yyr2947 || yy2arr2947 { - r.EncodeArrayStart(4) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.PodCIDR != "" + yyq2[1] = x.ExternalID != "" + yyq2[2] = x.ProviderID != "" + yyq2[3] = x.Unschedulable != false + yyq2[4] = len(x.Taints) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) } else { - yynn2947 = 0 - for _, b := range yyq2947 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn2947++ + yynn2++ } } - r.EncodeMapStart(yynn2947) - yynn2947 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2947 || yy2arr2947 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2947[0] { - yym2949 := z.EncBinary() - _ = yym2949 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) @@ -38048,23 +45755,23 @@ func (x *NodeSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2947[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("podCIDR")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2950 := z.EncBinary() - _ = yym2950 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) } } } - if yyr2947 || yy2arr2947 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2947[1] { - yym2952 := z.EncBinary() - _ = yym2952 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID)) @@ -38073,23 +45780,23 @@ func (x *NodeSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2947[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("externalID")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2953 := z.EncBinary() - _ = yym2953 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID)) } } } - if yyr2947 || yy2arr2947 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2947[2] { - yym2955 := z.EncBinary() - _ = yym2955 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID)) @@ -38098,23 +45805,23 @@ func (x *NodeSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2947[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("providerID")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2956 := z.EncBinary() - _ = yym2956 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID)) } } } - if yyr2947 || yy2arr2947 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2947[3] { - yym2958 := z.EncBinary() - _ = yym2958 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeBool(bool(x.Unschedulable)) @@ -38123,19 +45830,52 @@ func (x *NodeSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq2947[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("unschedulable")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2959 := z.EncBinary() - _ = yym2959 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeBool(bool(x.Unschedulable)) } } } - if yyr2947 || yy2arr2947 { + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Taints == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceTaint(([]Taint)(x.Taints), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("taints")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Taints == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSliceTaint(([]Taint)(x.Taints), e) + } + } + } + } + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -38148,25 +45888,25 @@ func (x *NodeSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2960 := z.DecBinary() - _ = yym2960 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2961 := r.ContainerType() - if yyct2961 == codecSelferValueTypeMap1234 { - yyl2961 := r.ReadMapStart() - if yyl2961 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2961, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2961 == codecSelferValueTypeArray1234 { - yyl2961 := r.ReadArrayStart() - if yyl2961 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2961, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -38178,12 +45918,12 @@ func (x *NodeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2962Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2962Slc - var yyhl2962 bool = l >= 0 - for yyj2962 := 0; ; yyj2962++ { - if yyhl2962 { - if yyj2962 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -38192,38 +45932,74 @@ func (x *NodeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2962Slc = r.DecodeBytes(yys2962Slc, true, true) - yys2962 := string(yys2962Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2962 { + switch yys3 { case "podCIDR": if r.TryDecodeAsNil() { x.PodCIDR = "" } else { - x.PodCIDR = string(r.DecodeString()) + yyv4 := &x.PodCIDR + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "externalID": if r.TryDecodeAsNil() { x.ExternalID = "" } else { - x.ExternalID = string(r.DecodeString()) + yyv6 := &x.ExternalID + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "providerID": if r.TryDecodeAsNil() { x.ProviderID = "" } else { - x.ProviderID = string(r.DecodeString()) + yyv8 := &x.ProviderID + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } case "unschedulable": if r.TryDecodeAsNil() { x.Unschedulable = false } else { - x.Unschedulable = bool(r.DecodeBool()) + yyv10 := &x.Unschedulable + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "taints": + if r.TryDecodeAsNil() { + x.Taints = nil + } else { + yyv12 := &x.Taints + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + h.decSliceTaint((*[]Taint)(yyv12), d) + } } default: - z.DecStructFieldNotFound(-1, yys2962) - } // end switch yys2962 - } // end for yyj2962 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -38231,16 +46007,16 @@ func (x *NodeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2967 int - var yyb2967 bool - var yyhl2967 bool = l >= 0 - yyj2967++ - if yyhl2967 { - yyb2967 = yyj2967 > l + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb2967 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb2967 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -38248,15 +46024,21 @@ func (x *NodeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.PodCIDR = "" } else { - x.PodCIDR = string(r.DecodeString()) + yyv15 := &x.PodCIDR + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj2967++ - if yyhl2967 { - yyb2967 = yyj2967 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb2967 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb2967 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -38264,15 +46046,21 @@ func (x *NodeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ExternalID = "" } else { - x.ExternalID = string(r.DecodeString()) + yyv17 := &x.ExternalID + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } } - yyj2967++ - if yyhl2967 { - yyb2967 = yyj2967 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb2967 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb2967 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -38280,15 +46068,21 @@ func (x *NodeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ProviderID = "" } else { - x.ProviderID = string(r.DecodeString()) + yyv19 := &x.ProviderID + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } } - yyj2967++ - if yyhl2967 { - yyb2967 = yyj2967 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb2967 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb2967 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -38296,20 +46090,48 @@ func (x *NodeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Unschedulable = false } else { - x.Unschedulable = bool(r.DecodeBool()) + yyv21 := &x.Unschedulable + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(yyv21)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Taints = nil + } else { + yyv23 := &x.Taints + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + h.decSliceTaint((*[]Taint)(yyv23), d) + } } for { - yyj2967++ - if yyhl2967 { - yyb2967 = yyj2967 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb2967 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb2967 { + if yyb14 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2967-1, "") + z.DecStructFieldNotFound(yyj14-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -38321,33 +46143,33 @@ func (x *DaemonEndpoint) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2972 := z.EncBinary() - _ = yym2972 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2973 := !z.EncBinary() - yy2arr2973 := z.EncBasicHandle().StructToArray - var yyq2973 [1]bool - _, _, _ = yysep2973, yyq2973, yy2arr2973 - const yyr2973 bool = false - var yynn2973 int - if yyr2973 || yy2arr2973 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn2973 = 1 - for _, b := range yyq2973 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn2973++ + yynn2++ } } - r.EncodeMapStart(yynn2973) - yynn2973 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2973 || yy2arr2973 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2975 := z.EncBinary() - _ = yym2975 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeInt(int64(x.Port)) @@ -38356,14 +46178,14 @@ func (x *DaemonEndpoint) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("Port")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2976 := z.EncBinary() - _ = yym2976 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeInt(int64(x.Port)) } } - if yyr2973 || yy2arr2973 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -38376,25 +46198,25 @@ func (x *DaemonEndpoint) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2977 := z.DecBinary() - _ = yym2977 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2978 := r.ContainerType() - if yyct2978 == codecSelferValueTypeMap1234 { - yyl2978 := r.ReadMapStart() - if yyl2978 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2978, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2978 == codecSelferValueTypeArray1234 { - yyl2978 := r.ReadArrayStart() - if yyl2978 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2978, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -38406,12 +46228,12 @@ func (x *DaemonEndpoint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2979Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2979Slc - var yyhl2979 bool = l >= 0 - for yyj2979 := 0; ; yyj2979++ { - if yyhl2979 { - if yyj2979 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -38420,20 +46242,26 @@ func (x *DaemonEndpoint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2979Slc = r.DecodeBytes(yys2979Slc, true, true) - yys2979 := string(yys2979Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2979 { + switch yys3 { case "Port": if r.TryDecodeAsNil() { x.Port = 0 } else { - x.Port = int32(r.DecodeInt(32)) + yyv4 := &x.Port + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } } default: - z.DecStructFieldNotFound(-1, yys2979) - } // end switch yys2979 - } // end for yyj2979 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -38441,16 +46269,16 @@ func (x *DaemonEndpoint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2981 int - var yyb2981 bool - var yyhl2981 bool = l >= 0 - yyj2981++ - if yyhl2981 { - yyb2981 = yyj2981 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb2981 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb2981 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -38458,20 +46286,26 @@ func (x *DaemonEndpoint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Port = 0 } else { - x.Port = int32(r.DecodeInt(32)) + yyv7 := &x.Port + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } } for { - yyj2981++ - if yyhl2981 { - yyb2981 = yyj2981 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb2981 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb2981 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2981-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -38483,48 +46317,48 @@ func (x *NodeDaemonEndpoints) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2983 := z.EncBinary() - _ = yym2983 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2984 := !z.EncBinary() - yy2arr2984 := z.EncBasicHandle().StructToArray - var yyq2984 [1]bool - _, _, _ = yysep2984, yyq2984, yy2arr2984 - const yyr2984 bool = false - yyq2984[0] = true - var yynn2984 int - if yyr2984 || yy2arr2984 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn2984 = 0 - for _, b := range yyq2984 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn2984++ + yynn2++ } } - r.EncodeMapStart(yynn2984) - yynn2984 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2984 || yy2arr2984 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2984[0] { - yy2986 := &x.KubeletEndpoint - yy2986.CodecEncodeSelf(e) + if yyq2[0] { + yy4 := &x.KubeletEndpoint + yy4.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq2984[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kubeletEndpoint")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy2987 := &x.KubeletEndpoint - yy2987.CodecEncodeSelf(e) + yy6 := &x.KubeletEndpoint + yy6.CodecEncodeSelf(e) } } - if yyr2984 || yy2arr2984 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -38537,25 +46371,25 @@ func (x *NodeDaemonEndpoints) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym2988 := z.DecBinary() - _ = yym2988 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct2989 := r.ContainerType() - if yyct2989 == codecSelferValueTypeMap1234 { - yyl2989 := r.ReadMapStart() - if yyl2989 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl2989, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2989 == codecSelferValueTypeArray1234 { - yyl2989 := r.ReadArrayStart() - if yyl2989 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl2989, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -38567,12 +46401,12 @@ func (x *NodeDaemonEndpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys2990Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys2990Slc - var yyhl2990 bool = l >= 0 - for yyj2990 := 0; ; yyj2990++ { - if yyhl2990 { - if yyj2990 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -38581,21 +46415,21 @@ func (x *NodeDaemonEndpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys2990Slc = r.DecodeBytes(yys2990Slc, true, true) - yys2990 := string(yys2990Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys2990 { + switch yys3 { case "kubeletEndpoint": if r.TryDecodeAsNil() { x.KubeletEndpoint = DaemonEndpoint{} } else { - yyv2991 := &x.KubeletEndpoint - yyv2991.CodecDecodeSelf(d) + yyv4 := &x.KubeletEndpoint + yyv4.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys2990) - } // end switch yys2990 - } // end for yyj2990 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -38603,16 +46437,16 @@ func (x *NodeDaemonEndpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj2992 int - var yyb2992 bool - var yyhl2992 bool = l >= 0 - yyj2992++ - if yyhl2992 { - yyb2992 = yyj2992 > l + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb2992 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb2992 { + if yyb5 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -38620,21 +46454,21 @@ func (x *NodeDaemonEndpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.KubeletEndpoint = DaemonEndpoint{} } else { - yyv2993 := &x.KubeletEndpoint - yyv2993.CodecDecodeSelf(d) + yyv6 := &x.KubeletEndpoint + yyv6.CodecDecodeSelf(d) } for { - yyj2992++ - if yyhl2992 { - yyb2992 = yyj2992 > l + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb2992 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb2992 { + if yyb5 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj2992-1, "") + z.DecStructFieldNotFound(yyj5-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -38646,33 +46480,33 @@ func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym2994 := z.EncBinary() - _ = yym2994 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2995 := !z.EncBinary() - yy2arr2995 := z.EncBasicHandle().StructToArray - var yyq2995 [10]bool - _, _, _ = yysep2995, yyq2995, yy2arr2995 - const yyr2995 bool = false - var yynn2995 int - if yyr2995 || yy2arr2995 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(10) } else { - yynn2995 = 10 - for _, b := range yyq2995 { + yynn2 = 10 + for _, b := range yyq2 { if b { - yynn2995++ + yynn2++ } } - r.EncodeMapStart(yynn2995) - yynn2995 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr2995 || yy2arr2995 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2997 := z.EncBinary() - _ = yym2997 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.MachineID)) @@ -38681,17 +46515,17 @@ func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("machineID")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym2998 := z.EncBinary() - _ = yym2998 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.MachineID)) } } - if yyr2995 || yy2arr2995 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3000 := z.EncBinary() - _ = yym3000 + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID)) @@ -38700,17 +46534,17 @@ func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("systemUUID")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3001 := z.EncBinary() - _ = yym3001 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID)) } } - if yyr2995 || yy2arr2995 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3003 := z.EncBinary() - _ = yym3003 + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.BootID)) @@ -38719,17 +46553,17 @@ func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("bootID")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3004 := z.EncBinary() - _ = yym3004 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.BootID)) } } - if yyr2995 || yy2arr2995 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3006 := z.EncBinary() - _ = yym3006 + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion)) @@ -38738,17 +46572,17 @@ func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kernelVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3007 := z.EncBinary() - _ = yym3007 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion)) } } - if yyr2995 || yy2arr2995 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3009 := z.EncBinary() - _ = yym3009 + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.OSImage)) @@ -38757,17 +46591,17 @@ func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("osImage")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3010 := z.EncBinary() - _ = yym3010 + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.OSImage)) } } - if yyr2995 || yy2arr2995 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3012 := z.EncBinary() - _ = yym3012 + yym19 := z.EncBinary() + _ = yym19 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion)) @@ -38776,17 +46610,17 @@ func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("containerRuntimeVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3013 := z.EncBinary() - _ = yym3013 + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion)) } } - if yyr2995 || yy2arr2995 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3015 := z.EncBinary() - _ = yym3015 + yym22 := z.EncBinary() + _ = yym22 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion)) @@ -38795,17 +46629,17 @@ func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kubeletVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3016 := z.EncBinary() - _ = yym3016 + yym23 := z.EncBinary() + _ = yym23 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion)) } } - if yyr2995 || yy2arr2995 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3018 := z.EncBinary() - _ = yym3018 + yym25 := z.EncBinary() + _ = yym25 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion)) @@ -38814,17 +46648,17 @@ func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kubeProxyVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3019 := z.EncBinary() - _ = yym3019 + yym26 := z.EncBinary() + _ = yym26 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion)) } } - if yyr2995 || yy2arr2995 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3021 := z.EncBinary() - _ = yym3021 + yym28 := z.EncBinary() + _ = yym28 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) @@ -38833,17 +46667,17 @@ func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("operatingSystem")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3022 := z.EncBinary() - _ = yym3022 + yym29 := z.EncBinary() + _ = yym29 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) } } - if yyr2995 || yy2arr2995 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3024 := z.EncBinary() - _ = yym3024 + yym31 := z.EncBinary() + _ = yym31 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) @@ -38852,14 +46686,14 @@ func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("architecture")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3025 := z.EncBinary() - _ = yym3025 + yym32 := z.EncBinary() + _ = yym32 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) } } - if yyr2995 || yy2arr2995 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -38872,25 +46706,25 @@ func (x *NodeSystemInfo) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3026 := z.DecBinary() - _ = yym3026 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3027 := r.ContainerType() - if yyct3027 == codecSelferValueTypeMap1234 { - yyl3027 := r.ReadMapStart() - if yyl3027 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3027, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3027 == codecSelferValueTypeArray1234 { - yyl3027 := r.ReadArrayStart() - if yyl3027 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3027, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -38902,12 +46736,12 @@ func (x *NodeSystemInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3028Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3028Slc - var yyhl3028 bool = l >= 0 - for yyj3028 := 0; ; yyj3028++ { - if yyhl3028 { - if yyj3028 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -38916,74 +46750,134 @@ func (x *NodeSystemInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3028Slc = r.DecodeBytes(yys3028Slc, true, true) - yys3028 := string(yys3028Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3028 { + switch yys3 { case "machineID": if r.TryDecodeAsNil() { x.MachineID = "" } else { - x.MachineID = string(r.DecodeString()) + yyv4 := &x.MachineID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "systemUUID": if r.TryDecodeAsNil() { x.SystemUUID = "" } else { - x.SystemUUID = string(r.DecodeString()) + yyv6 := &x.SystemUUID + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "bootID": if r.TryDecodeAsNil() { x.BootID = "" } else { - x.BootID = string(r.DecodeString()) + yyv8 := &x.BootID + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } case "kernelVersion": if r.TryDecodeAsNil() { x.KernelVersion = "" } else { - x.KernelVersion = string(r.DecodeString()) + yyv10 := &x.KernelVersion + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } case "osImage": if r.TryDecodeAsNil() { x.OSImage = "" } else { - x.OSImage = string(r.DecodeString()) + yyv12 := &x.OSImage + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } case "containerRuntimeVersion": if r.TryDecodeAsNil() { x.ContainerRuntimeVersion = "" } else { - x.ContainerRuntimeVersion = string(r.DecodeString()) + yyv14 := &x.ContainerRuntimeVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } case "kubeletVersion": if r.TryDecodeAsNil() { x.KubeletVersion = "" } else { - x.KubeletVersion = string(r.DecodeString()) + yyv16 := &x.KubeletVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } } case "kubeProxyVersion": if r.TryDecodeAsNil() { x.KubeProxyVersion = "" } else { - x.KubeProxyVersion = string(r.DecodeString()) + yyv18 := &x.KubeProxyVersion + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } } case "operatingSystem": if r.TryDecodeAsNil() { x.OperatingSystem = "" } else { - x.OperatingSystem = string(r.DecodeString()) + yyv20 := &x.OperatingSystem + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } } case "architecture": if r.TryDecodeAsNil() { x.Architecture = "" } else { - x.Architecture = string(r.DecodeString()) + yyv22 := &x.Architecture + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*string)(yyv22)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys3028) - } // end switch yys3028 - } // end for yyj3028 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -38991,16 +46885,16 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3039 int - var yyb3039 bool - var yyhl3039 bool = l >= 0 - yyj3039++ - if yyhl3039 { - yyb3039 = yyj3039 > l + var yyj24 int + var yyb24 bool + var yyhl24 bool = l >= 0 + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3039 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3039 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39008,15 +46902,21 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.MachineID = "" } else { - x.MachineID = string(r.DecodeString()) + yyv25 := &x.MachineID + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } } - yyj3039++ - if yyhl3039 { - yyb3039 = yyj3039 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3039 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3039 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39024,15 +46924,21 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.SystemUUID = "" } else { - x.SystemUUID = string(r.DecodeString()) + yyv27 := &x.SystemUUID + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } } - yyj3039++ - if yyhl3039 { - yyb3039 = yyj3039 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3039 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3039 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39040,15 +46946,21 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.BootID = "" } else { - x.BootID = string(r.DecodeString()) + yyv29 := &x.BootID + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } } - yyj3039++ - if yyhl3039 { - yyb3039 = yyj3039 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3039 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3039 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39056,15 +46968,21 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.KernelVersion = "" } else { - x.KernelVersion = string(r.DecodeString()) + yyv31 := &x.KernelVersion + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } } - yyj3039++ - if yyhl3039 { - yyb3039 = yyj3039 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3039 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3039 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39072,15 +46990,21 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.OSImage = "" } else { - x.OSImage = string(r.DecodeString()) + yyv33 := &x.OSImage + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*string)(yyv33)) = r.DecodeString() + } } - yyj3039++ - if yyhl3039 { - yyb3039 = yyj3039 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3039 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3039 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39088,15 +47012,21 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ContainerRuntimeVersion = "" } else { - x.ContainerRuntimeVersion = string(r.DecodeString()) + yyv35 := &x.ContainerRuntimeVersion + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } } - yyj3039++ - if yyhl3039 { - yyb3039 = yyj3039 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3039 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3039 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39104,15 +47034,21 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.KubeletVersion = "" } else { - x.KubeletVersion = string(r.DecodeString()) + yyv37 := &x.KubeletVersion + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } } - yyj3039++ - if yyhl3039 { - yyb3039 = yyj3039 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3039 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3039 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39120,15 +47056,21 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.KubeProxyVersion = "" } else { - x.KubeProxyVersion = string(r.DecodeString()) + yyv39 := &x.KubeProxyVersion + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } } - yyj3039++ - if yyhl3039 { - yyb3039 = yyj3039 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3039 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3039 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39136,15 +47078,21 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.OperatingSystem = "" } else { - x.OperatingSystem = string(r.DecodeString()) + yyv41 := &x.OperatingSystem + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*string)(yyv41)) = r.DecodeString() + } } - yyj3039++ - if yyhl3039 { - yyb3039 = yyj3039 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3039 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3039 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39152,20 +47100,26 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Architecture = "" } else { - x.Architecture = string(r.DecodeString()) + yyv43 := &x.Architecture + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*string)(yyv43)) = r.DecodeString() + } } for { - yyj3039++ - if yyhl3039 { - yyb3039 = yyj3039 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3039 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3039 { + if yyb24 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3039-1, "") + z.DecStructFieldNotFound(yyj24-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -39177,42 +47131,42 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3050 := z.EncBinary() - _ = yym3050 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3051 := !z.EncBinary() - yy2arr3051 := z.EncBasicHandle().StructToArray - var yyq3051 [10]bool - _, _, _ = yysep3051, yyq3051, yy2arr3051 - const yyr3051 bool = false - yyq3051[0] = len(x.Capacity) != 0 - yyq3051[1] = len(x.Allocatable) != 0 - yyq3051[2] = x.Phase != "" - yyq3051[3] = len(x.Conditions) != 0 - yyq3051[4] = len(x.Addresses) != 0 - yyq3051[5] = true - yyq3051[6] = true - yyq3051[7] = len(x.Images) != 0 - yyq3051[8] = len(x.VolumesInUse) != 0 - yyq3051[9] = len(x.VolumesAttached) != 0 - var yynn3051 int - if yyr3051 || yy2arr3051 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Capacity) != 0 + yyq2[1] = len(x.Allocatable) != 0 + yyq2[2] = x.Phase != "" + yyq2[3] = len(x.Conditions) != 0 + yyq2[4] = len(x.Addresses) != 0 + yyq2[5] = true + yyq2[6] = true + yyq2[7] = len(x.Images) != 0 + yyq2[8] = len(x.VolumesInUse) != 0 + yyq2[9] = len(x.VolumesAttached) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(10) } else { - yynn3051 = 0 - for _, b := range yyq3051 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn3051++ + yynn2++ } } - r.EncodeMapStart(yynn3051) - yynn3051 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3051 || yy2arr3051 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3051[0] { + if yyq2[0] { if x.Capacity == nil { r.EncodeNil() } else { @@ -39222,7 +47176,7 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq3051[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("capacity")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -39233,9 +47187,9 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr3051 || yy2arr3051 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3051[1] { + if yyq2[1] { if x.Allocatable == nil { r.EncodeNil() } else { @@ -39245,7 +47199,7 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq3051[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("allocatable")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -39256,29 +47210,29 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr3051 || yy2arr3051 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3051[2] { + if yyq2[2] { x.Phase.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3051[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("phase")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Phase.CodecEncodeSelf(e) } } - if yyr3051 || yy2arr3051 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3051[3] { + if yyq2[3] { if x.Conditions == nil { r.EncodeNil() } else { - yym3056 := z.EncBinary() - _ = yym3056 + yym13 := z.EncBinary() + _ = yym13 if false { } else { h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e) @@ -39288,15 +47242,15 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq3051[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("conditions")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Conditions == nil { r.EncodeNil() } else { - yym3057 := z.EncBinary() - _ = yym3057 + yym14 := z.EncBinary() + _ = yym14 if false { } else { h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e) @@ -39304,14 +47258,14 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr3051 || yy2arr3051 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3051[4] { + if yyq2[4] { if x.Addresses == nil { r.EncodeNil() } else { - yym3059 := z.EncBinary() - _ = yym3059 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e) @@ -39321,15 +47275,15 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq3051[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("addresses")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Addresses == nil { r.EncodeNil() } else { - yym3060 := z.EncBinary() - _ = yym3060 + yym17 := z.EncBinary() + _ = yym17 if false { } else { h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e) @@ -39337,48 +47291,48 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr3051 || yy2arr3051 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3051[5] { - yy3062 := &x.DaemonEndpoints - yy3062.CodecEncodeSelf(e) + if yyq2[5] { + yy19 := &x.DaemonEndpoints + yy19.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq3051[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("daemonEndpoints")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3063 := &x.DaemonEndpoints - yy3063.CodecEncodeSelf(e) + yy21 := &x.DaemonEndpoints + yy21.CodecEncodeSelf(e) } } - if yyr3051 || yy2arr3051 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3051[6] { - yy3065 := &x.NodeInfo - yy3065.CodecEncodeSelf(e) + if yyq2[6] { + yy24 := &x.NodeInfo + yy24.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq3051[6] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nodeInfo")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3066 := &x.NodeInfo - yy3066.CodecEncodeSelf(e) + yy26 := &x.NodeInfo + yy26.CodecEncodeSelf(e) } } - if yyr3051 || yy2arr3051 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3051[7] { + if yyq2[7] { if x.Images == nil { r.EncodeNil() } else { - yym3068 := z.EncBinary() - _ = yym3068 + yym29 := z.EncBinary() + _ = yym29 if false { } else { h.encSliceContainerImage(([]ContainerImage)(x.Images), e) @@ -39388,15 +47342,15 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq3051[7] { + if yyq2[7] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("images")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Images == nil { r.EncodeNil() } else { - yym3069 := z.EncBinary() - _ = yym3069 + yym30 := z.EncBinary() + _ = yym30 if false { } else { h.encSliceContainerImage(([]ContainerImage)(x.Images), e) @@ -39404,14 +47358,14 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr3051 || yy2arr3051 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3051[8] { + if yyq2[8] { if x.VolumesInUse == nil { r.EncodeNil() } else { - yym3071 := z.EncBinary() - _ = yym3071 + yym32 := z.EncBinary() + _ = yym32 if false { } else { h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e) @@ -39421,15 +47375,15 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq3051[8] { + if yyq2[8] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("volumesInUse")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.VolumesInUse == nil { r.EncodeNil() } else { - yym3072 := z.EncBinary() - _ = yym3072 + yym33 := z.EncBinary() + _ = yym33 if false { } else { h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e) @@ -39437,14 +47391,14 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr3051 || yy2arr3051 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3051[9] { + if yyq2[9] { if x.VolumesAttached == nil { r.EncodeNil() } else { - yym3074 := z.EncBinary() - _ = yym3074 + yym35 := z.EncBinary() + _ = yym35 if false { } else { h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e) @@ -39454,15 +47408,15 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq3051[9] { + if yyq2[9] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("volumesAttached")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.VolumesAttached == nil { r.EncodeNil() } else { - yym3075 := z.EncBinary() - _ = yym3075 + yym36 := z.EncBinary() + _ = yym36 if false { } else { h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e) @@ -39470,7 +47424,7 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr3051 || yy2arr3051 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -39483,25 +47437,25 @@ func (x *NodeStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3076 := z.DecBinary() - _ = yym3076 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3077 := r.ContainerType() - if yyct3077 == codecSelferValueTypeMap1234 { - yyl3077 := r.ReadMapStart() - if yyl3077 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3077, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3077 == codecSelferValueTypeArray1234 { - yyl3077 := r.ReadArrayStart() - if yyl3077 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3077, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -39513,12 +47467,12 @@ func (x *NodeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3078Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3078Slc - var yyhl3078 bool = l >= 0 - for yyj3078 := 0; ; yyj3078++ { - if yyhl3078 { - if yyj3078 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -39527,108 +47481,109 @@ func (x *NodeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3078Slc = r.DecodeBytes(yys3078Slc, true, true) - yys3078 := string(yys3078Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3078 { + switch yys3 { case "capacity": if r.TryDecodeAsNil() { x.Capacity = nil } else { - yyv3079 := &x.Capacity - yyv3079.CodecDecodeSelf(d) + yyv4 := &x.Capacity + yyv4.CodecDecodeSelf(d) } case "allocatable": if r.TryDecodeAsNil() { x.Allocatable = nil } else { - yyv3080 := &x.Allocatable - yyv3080.CodecDecodeSelf(d) + yyv5 := &x.Allocatable + yyv5.CodecDecodeSelf(d) } case "phase": if r.TryDecodeAsNil() { x.Phase = "" } else { - x.Phase = NodePhase(r.DecodeString()) + yyv6 := &x.Phase + yyv6.CodecDecodeSelf(d) } case "conditions": if r.TryDecodeAsNil() { x.Conditions = nil } else { - yyv3082 := &x.Conditions - yym3083 := z.DecBinary() - _ = yym3083 + yyv7 := &x.Conditions + yym8 := z.DecBinary() + _ = yym8 if false { } else { - h.decSliceNodeCondition((*[]NodeCondition)(yyv3082), d) + h.decSliceNodeCondition((*[]NodeCondition)(yyv7), d) } } case "addresses": if r.TryDecodeAsNil() { x.Addresses = nil } else { - yyv3084 := &x.Addresses - yym3085 := z.DecBinary() - _ = yym3085 + yyv9 := &x.Addresses + yym10 := z.DecBinary() + _ = yym10 if false { } else { - h.decSliceNodeAddress((*[]NodeAddress)(yyv3084), d) + h.decSliceNodeAddress((*[]NodeAddress)(yyv9), d) } } case "daemonEndpoints": if r.TryDecodeAsNil() { x.DaemonEndpoints = NodeDaemonEndpoints{} } else { - yyv3086 := &x.DaemonEndpoints - yyv3086.CodecDecodeSelf(d) + yyv11 := &x.DaemonEndpoints + yyv11.CodecDecodeSelf(d) } case "nodeInfo": if r.TryDecodeAsNil() { x.NodeInfo = NodeSystemInfo{} } else { - yyv3087 := &x.NodeInfo - yyv3087.CodecDecodeSelf(d) + yyv12 := &x.NodeInfo + yyv12.CodecDecodeSelf(d) } case "images": if r.TryDecodeAsNil() { x.Images = nil } else { - yyv3088 := &x.Images - yym3089 := z.DecBinary() - _ = yym3089 + yyv13 := &x.Images + yym14 := z.DecBinary() + _ = yym14 if false { } else { - h.decSliceContainerImage((*[]ContainerImage)(yyv3088), d) + h.decSliceContainerImage((*[]ContainerImage)(yyv13), d) } } case "volumesInUse": if r.TryDecodeAsNil() { x.VolumesInUse = nil } else { - yyv3090 := &x.VolumesInUse - yym3091 := z.DecBinary() - _ = yym3091 + yyv15 := &x.VolumesInUse + yym16 := z.DecBinary() + _ = yym16 if false { } else { - h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv3090), d) + h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv15), d) } } case "volumesAttached": if r.TryDecodeAsNil() { x.VolumesAttached = nil } else { - yyv3092 := &x.VolumesAttached - yym3093 := z.DecBinary() - _ = yym3093 + yyv17 := &x.VolumesAttached + yym18 := z.DecBinary() + _ = yym18 if false { } else { - h.decSliceAttachedVolume((*[]AttachedVolume)(yyv3092), d) + h.decSliceAttachedVolume((*[]AttachedVolume)(yyv17), d) } } default: - z.DecStructFieldNotFound(-1, yys3078) - } // end switch yys3078 - } // end for yyj3078 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -39636,16 +47591,16 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3094 int - var yyb3094 bool - var yyhl3094 bool = l >= 0 - yyj3094++ - if yyhl3094 { - yyb3094 = yyj3094 > l + var yyj19 int + var yyb19 bool + var yyhl19 bool = l >= 0 + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb3094 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb3094 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39653,16 +47608,16 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Capacity = nil } else { - yyv3095 := &x.Capacity - yyv3095.CodecDecodeSelf(d) + yyv20 := &x.Capacity + yyv20.CodecDecodeSelf(d) } - yyj3094++ - if yyhl3094 { - yyb3094 = yyj3094 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb3094 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb3094 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39670,16 +47625,16 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Allocatable = nil } else { - yyv3096 := &x.Allocatable - yyv3096.CodecDecodeSelf(d) + yyv21 := &x.Allocatable + yyv21.CodecDecodeSelf(d) } - yyj3094++ - if yyhl3094 { - yyb3094 = yyj3094 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb3094 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb3094 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39687,15 +47642,16 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Phase = "" } else { - x.Phase = NodePhase(r.DecodeString()) + yyv22 := &x.Phase + yyv22.CodecDecodeSelf(d) } - yyj3094++ - if yyhl3094 { - yyb3094 = yyj3094 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb3094 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb3094 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39703,21 +47659,21 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Conditions = nil } else { - yyv3098 := &x.Conditions - yym3099 := z.DecBinary() - _ = yym3099 + yyv23 := &x.Conditions + yym24 := z.DecBinary() + _ = yym24 if false { } else { - h.decSliceNodeCondition((*[]NodeCondition)(yyv3098), d) + h.decSliceNodeCondition((*[]NodeCondition)(yyv23), d) } } - yyj3094++ - if yyhl3094 { - yyb3094 = yyj3094 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb3094 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb3094 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39725,21 +47681,21 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Addresses = nil } else { - yyv3100 := &x.Addresses - yym3101 := z.DecBinary() - _ = yym3101 + yyv25 := &x.Addresses + yym26 := z.DecBinary() + _ = yym26 if false { } else { - h.decSliceNodeAddress((*[]NodeAddress)(yyv3100), d) + h.decSliceNodeAddress((*[]NodeAddress)(yyv25), d) } } - yyj3094++ - if yyhl3094 { - yyb3094 = yyj3094 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb3094 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb3094 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39747,16 +47703,16 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.DaemonEndpoints = NodeDaemonEndpoints{} } else { - yyv3102 := &x.DaemonEndpoints - yyv3102.CodecDecodeSelf(d) + yyv27 := &x.DaemonEndpoints + yyv27.CodecDecodeSelf(d) } - yyj3094++ - if yyhl3094 { - yyb3094 = yyj3094 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb3094 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb3094 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39764,16 +47720,16 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.NodeInfo = NodeSystemInfo{} } else { - yyv3103 := &x.NodeInfo - yyv3103.CodecDecodeSelf(d) + yyv28 := &x.NodeInfo + yyv28.CodecDecodeSelf(d) } - yyj3094++ - if yyhl3094 { - yyb3094 = yyj3094 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb3094 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb3094 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39781,21 +47737,21 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Images = nil } else { - yyv3104 := &x.Images - yym3105 := z.DecBinary() - _ = yym3105 + yyv29 := &x.Images + yym30 := z.DecBinary() + _ = yym30 if false { } else { - h.decSliceContainerImage((*[]ContainerImage)(yyv3104), d) + h.decSliceContainerImage((*[]ContainerImage)(yyv29), d) } } - yyj3094++ - if yyhl3094 { - yyb3094 = yyj3094 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb3094 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb3094 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39803,21 +47759,21 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.VolumesInUse = nil } else { - yyv3106 := &x.VolumesInUse - yym3107 := z.DecBinary() - _ = yym3107 + yyv31 := &x.VolumesInUse + yym32 := z.DecBinary() + _ = yym32 if false { } else { - h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv3106), d) + h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv31), d) } } - yyj3094++ - if yyhl3094 { - yyb3094 = yyj3094 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb3094 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb3094 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -39825,26 +47781,26 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.VolumesAttached = nil } else { - yyv3108 := &x.VolumesAttached - yym3109 := z.DecBinary() - _ = yym3109 + yyv33 := &x.VolumesAttached + yym34 := z.DecBinary() + _ = yym34 if false { } else { - h.decSliceAttachedVolume((*[]AttachedVolume)(yyv3108), d) + h.decSliceAttachedVolume((*[]AttachedVolume)(yyv33), d) } } for { - yyj3094++ - if yyhl3094 { - yyb3094 = yyj3094 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb3094 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb3094 { + if yyb19 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3094-1, "") + z.DecStructFieldNotFound(yyj19-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -39853,8 +47809,8 @@ func (x UniqueVolumeName) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym3110 := z.EncBinary() - _ = yym3110 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -39866,8 +47822,8 @@ func (x *UniqueVolumeName) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3111 := z.DecBinary() - _ = yym3111 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -39882,30 +47838,30 @@ func (x *AttachedVolume) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3112 := z.EncBinary() - _ = yym3112 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3113 := !z.EncBinary() - yy2arr3113 := z.EncBasicHandle().StructToArray - var yyq3113 [2]bool - _, _, _ = yysep3113, yyq3113, yy2arr3113 - const yyr3113 bool = false - var yynn3113 int - if yyr3113 || yy2arr3113 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn3113 = 2 - for _, b := range yyq3113 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn3113++ + yynn2++ } } - r.EncodeMapStart(yynn3113) - yynn3113 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3113 || yy2arr3113 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) x.Name.CodecEncodeSelf(e) } else { @@ -39914,10 +47870,10 @@ func (x *AttachedVolume) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Name.CodecEncodeSelf(e) } - if yyr3113 || yy2arr3113 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3116 := z.EncBinary() - _ = yym3116 + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath)) @@ -39926,14 +47882,14 @@ func (x *AttachedVolume) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("devicePath")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3117 := z.EncBinary() - _ = yym3117 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath)) } } - if yyr3113 || yy2arr3113 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -39946,25 +47902,25 @@ func (x *AttachedVolume) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3118 := z.DecBinary() - _ = yym3118 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3119 := r.ContainerType() - if yyct3119 == codecSelferValueTypeMap1234 { - yyl3119 := r.ReadMapStart() - if yyl3119 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3119, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3119 == codecSelferValueTypeArray1234 { - yyl3119 := r.ReadArrayStart() - if yyl3119 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3119, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -39976,12 +47932,12 @@ func (x *AttachedVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3120Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3120Slc - var yyhl3120 bool = l >= 0 - for yyj3120 := 0; ; yyj3120++ { - if yyhl3120 { - if yyj3120 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -39990,26 +47946,33 @@ func (x *AttachedVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3120Slc = r.DecodeBytes(yys3120Slc, true, true) - yys3120 := string(yys3120Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3120 { + switch yys3 { case "name": if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = UniqueVolumeName(r.DecodeString()) + yyv4 := &x.Name + yyv4.CodecDecodeSelf(d) } case "devicePath": if r.TryDecodeAsNil() { x.DevicePath = "" } else { - x.DevicePath = string(r.DecodeString()) + yyv5 := &x.DevicePath + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys3120) - } // end switch yys3120 - } // end for yyj3120 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -40017,16 +47980,16 @@ func (x *AttachedVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3123 int - var yyb3123 bool - var yyhl3123 bool = l >= 0 - yyj3123++ - if yyhl3123 { - yyb3123 = yyj3123 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb3123 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb3123 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -40034,15 +47997,16 @@ func (x *AttachedVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = UniqueVolumeName(r.DecodeString()) + yyv8 := &x.Name + yyv8.CodecDecodeSelf(d) } - yyj3123++ - if yyhl3123 { - yyb3123 = yyj3123 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb3123 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb3123 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -40050,20 +48014,26 @@ func (x *AttachedVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.DevicePath = "" } else { - x.DevicePath = string(r.DecodeString()) + yyv9 := &x.DevicePath + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } for { - yyj3123++ - if yyhl3123 { - yyb3123 = yyj3123 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb3123 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb3123 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3123-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -40075,38 +48045,38 @@ func (x *AvoidPods) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3126 := z.EncBinary() - _ = yym3126 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3127 := !z.EncBinary() - yy2arr3127 := z.EncBasicHandle().StructToArray - var yyq3127 [1]bool - _, _, _ = yysep3127, yyq3127, yy2arr3127 - const yyr3127 bool = false - yyq3127[0] = len(x.PreferAvoidPods) != 0 - var yynn3127 int - if yyr3127 || yy2arr3127 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.PreferAvoidPods) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn3127 = 0 - for _, b := range yyq3127 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn3127++ + yynn2++ } } - r.EncodeMapStart(yynn3127) - yynn3127 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3127 || yy2arr3127 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3127[0] { + if yyq2[0] { if x.PreferAvoidPods == nil { r.EncodeNil() } else { - yym3129 := z.EncBinary() - _ = yym3129 + yym4 := z.EncBinary() + _ = yym4 if false { } else { h.encSlicePreferAvoidPodsEntry(([]PreferAvoidPodsEntry)(x.PreferAvoidPods), e) @@ -40116,15 +48086,15 @@ func (x *AvoidPods) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq3127[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("preferAvoidPods")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.PreferAvoidPods == nil { r.EncodeNil() } else { - yym3130 := z.EncBinary() - _ = yym3130 + yym5 := z.EncBinary() + _ = yym5 if false { } else { h.encSlicePreferAvoidPodsEntry(([]PreferAvoidPodsEntry)(x.PreferAvoidPods), e) @@ -40132,7 +48102,7 @@ func (x *AvoidPods) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr3127 || yy2arr3127 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -40145,25 +48115,25 @@ func (x *AvoidPods) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3131 := z.DecBinary() - _ = yym3131 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3132 := r.ContainerType() - if yyct3132 == codecSelferValueTypeMap1234 { - yyl3132 := r.ReadMapStart() - if yyl3132 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3132, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3132 == codecSelferValueTypeArray1234 { - yyl3132 := r.ReadArrayStart() - if yyl3132 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3132, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -40175,12 +48145,12 @@ func (x *AvoidPods) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3133Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3133Slc - var yyhl3133 bool = l >= 0 - for yyj3133 := 0; ; yyj3133++ { - if yyhl3133 { - if yyj3133 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -40189,26 +48159,26 @@ func (x *AvoidPods) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3133Slc = r.DecodeBytes(yys3133Slc, true, true) - yys3133 := string(yys3133Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3133 { + switch yys3 { case "preferAvoidPods": if r.TryDecodeAsNil() { x.PreferAvoidPods = nil } else { - yyv3134 := &x.PreferAvoidPods - yym3135 := z.DecBinary() - _ = yym3135 + yyv4 := &x.PreferAvoidPods + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSlicePreferAvoidPodsEntry((*[]PreferAvoidPodsEntry)(yyv3134), d) + h.decSlicePreferAvoidPodsEntry((*[]PreferAvoidPodsEntry)(yyv4), d) } } default: - z.DecStructFieldNotFound(-1, yys3133) - } // end switch yys3133 - } // end for yyj3133 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -40216,16 +48186,16 @@ func (x *AvoidPods) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3136 int - var yyb3136 bool - var yyhl3136 bool = l >= 0 - yyj3136++ - if yyhl3136 { - yyb3136 = yyj3136 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb3136 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb3136 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -40233,26 +48203,26 @@ func (x *AvoidPods) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.PreferAvoidPods = nil } else { - yyv3137 := &x.PreferAvoidPods - yym3138 := z.DecBinary() - _ = yym3138 + yyv7 := &x.PreferAvoidPods + yym8 := z.DecBinary() + _ = yym8 if false { } else { - h.decSlicePreferAvoidPodsEntry((*[]PreferAvoidPodsEntry)(yyv3137), d) + h.decSlicePreferAvoidPodsEntry((*[]PreferAvoidPodsEntry)(yyv7), d) } } for { - yyj3136++ - if yyhl3136 { - yyb3136 = yyj3136 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb3136 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb3136 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3136-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -40264,85 +48234,85 @@ func (x *PreferAvoidPodsEntry) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3139 := z.EncBinary() - _ = yym3139 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3140 := !z.EncBinary() - yy2arr3140 := z.EncBasicHandle().StructToArray - var yyq3140 [4]bool - _, _, _ = yysep3140, yyq3140, yy2arr3140 - const yyr3140 bool = false - yyq3140[1] = true - yyq3140[2] = x.Reason != "" - yyq3140[3] = x.Message != "" - var yynn3140 int - if yyr3140 || yy2arr3140 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = true + yyq2[2] = x.Reason != "" + yyq2[3] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn3140 = 1 - for _, b := range yyq3140 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn3140++ + yynn2++ } } - r.EncodeMapStart(yynn3140) - yynn3140 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3140 || yy2arr3140 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy3142 := &x.PodSignature - yy3142.CodecEncodeSelf(e) + yy4 := &x.PodSignature + yy4.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("podSignature")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3143 := &x.PodSignature - yy3143.CodecEncodeSelf(e) + yy6 := &x.PodSignature + yy6.CodecEncodeSelf(e) } - if yyr3140 || yy2arr3140 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3140[1] { - yy3145 := &x.EvictionTime - yym3146 := z.EncBinary() - _ = yym3146 + if yyq2[1] { + yy9 := &x.EvictionTime + yym10 := z.EncBinary() + _ = yym10 if false { - } else if z.HasExtensions() && z.EncExt(yy3145) { - } else if yym3146 { - z.EncBinaryMarshal(yy3145) - } else if !yym3146 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3145) + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if yym10 { + z.EncBinaryMarshal(yy9) + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) } else { - z.EncFallback(yy3145) + z.EncFallback(yy9) } } else { r.EncodeNil() } } else { - if yyq3140[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("evictionTime")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3147 := &x.EvictionTime - yym3148 := z.EncBinary() - _ = yym3148 + yy11 := &x.EvictionTime + yym12 := z.EncBinary() + _ = yym12 if false { - } else if z.HasExtensions() && z.EncExt(yy3147) { - } else if yym3148 { - z.EncBinaryMarshal(yy3147) - } else if !yym3148 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3147) + } else if z.HasExtensions() && z.EncExt(yy11) { + } else if yym12 { + z.EncBinaryMarshal(yy11) + } else if !yym12 && z.IsJSONHandle() { + z.EncJSONMarshal(yy11) } else { - z.EncFallback(yy3147) + z.EncFallback(yy11) } } } - if yyr3140 || yy2arr3140 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3140[2] { - yym3150 := z.EncBinary() - _ = yym3150 + if yyq2[2] { + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) @@ -40351,23 +48321,23 @@ func (x *PreferAvoidPodsEntry) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3140[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("reason")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3151 := z.EncBinary() - _ = yym3151 + yym15 := z.EncBinary() + _ = yym15 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) } } } - if yyr3140 || yy2arr3140 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3140[3] { - yym3153 := z.EncBinary() - _ = yym3153 + if yyq2[3] { + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) @@ -40376,19 +48346,19 @@ func (x *PreferAvoidPodsEntry) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3140[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("message")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3154 := z.EncBinary() - _ = yym3154 + yym18 := z.EncBinary() + _ = yym18 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) } } } - if yyr3140 || yy2arr3140 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -40401,25 +48371,25 @@ func (x *PreferAvoidPodsEntry) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3155 := z.DecBinary() - _ = yym3155 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3156 := r.ContainerType() - if yyct3156 == codecSelferValueTypeMap1234 { - yyl3156 := r.ReadMapStart() - if yyl3156 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3156, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3156 == codecSelferValueTypeArray1234 { - yyl3156 := r.ReadArrayStart() - if yyl3156 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3156, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -40431,12 +48401,12 @@ func (x *PreferAvoidPodsEntry) codecDecodeSelfFromMap(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3157Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3157Slc - var yyhl3157 bool = l >= 0 - for yyj3157 := 0; ; yyj3157++ { - if yyhl3157 { - if yyj3157 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -40445,50 +48415,62 @@ func (x *PreferAvoidPodsEntry) codecDecodeSelfFromMap(l int, d *codec1978.Decode } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3157Slc = r.DecodeBytes(yys3157Slc, true, true) - yys3157 := string(yys3157Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3157 { + switch yys3 { case "podSignature": if r.TryDecodeAsNil() { x.PodSignature = PodSignature{} } else { - yyv3158 := &x.PodSignature - yyv3158.CodecDecodeSelf(d) + yyv4 := &x.PodSignature + yyv4.CodecDecodeSelf(d) } case "evictionTime": if r.TryDecodeAsNil() { - x.EvictionTime = pkg2_unversioned.Time{} + x.EvictionTime = pkg2_v1.Time{} } else { - yyv3159 := &x.EvictionTime - yym3160 := z.DecBinary() - _ = yym3160 + yyv5 := &x.EvictionTime + yym6 := z.DecBinary() + _ = yym6 if false { - } else if z.HasExtensions() && z.DecExt(yyv3159) { - } else if yym3160 { - z.DecBinaryUnmarshal(yyv3159) - } else if !yym3160 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv3159) + } else if z.HasExtensions() && z.DecExt(yyv5) { + } else if yym6 { + z.DecBinaryUnmarshal(yyv5) + } else if !yym6 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv5) } else { - z.DecFallback(yyv3159, false) + z.DecFallback(yyv5, false) } } case "reason": if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv7 := &x.Reason + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } } case "message": if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv9 := &x.Message + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys3157) - } // end switch yys3157 - } // end for yyj3157 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -40496,16 +48478,16 @@ func (x *PreferAvoidPodsEntry) codecDecodeSelfFromArray(l int, d *codec1978.Deco var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3163 int - var yyb3163 bool - var yyhl3163 bool = l >= 0 - yyj3163++ - if yyhl3163 { - yyb3163 = yyj3163 > l + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb3163 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb3163 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -40513,43 +48495,43 @@ func (x *PreferAvoidPodsEntry) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.PodSignature = PodSignature{} } else { - yyv3164 := &x.PodSignature - yyv3164.CodecDecodeSelf(d) + yyv12 := &x.PodSignature + yyv12.CodecDecodeSelf(d) } - yyj3163++ - if yyhl3163 { - yyb3163 = yyj3163 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb3163 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb3163 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.EvictionTime = pkg2_unversioned.Time{} + x.EvictionTime = pkg2_v1.Time{} } else { - yyv3165 := &x.EvictionTime - yym3166 := z.DecBinary() - _ = yym3166 + yyv13 := &x.EvictionTime + yym14 := z.DecBinary() + _ = yym14 if false { - } else if z.HasExtensions() && z.DecExt(yyv3165) { - } else if yym3166 { - z.DecBinaryUnmarshal(yyv3165) - } else if !yym3166 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv3165) + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if yym14 { + z.DecBinaryUnmarshal(yyv13) + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) } else { - z.DecFallback(yyv3165, false) + z.DecFallback(yyv13, false) } } - yyj3163++ - if yyhl3163 { - yyb3163 = yyj3163 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb3163 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb3163 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -40557,15 +48539,21 @@ func (x *PreferAvoidPodsEntry) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv15 := &x.Reason + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj3163++ - if yyhl3163 { - yyb3163 = yyj3163 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb3163 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb3163 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -40573,20 +48561,26 @@ func (x *PreferAvoidPodsEntry) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv17 := &x.Message + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } } for { - yyj3163++ - if yyhl3163 { - yyb3163 = yyj3163 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb3163 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb3163 { + if yyb11 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3163-1, "") + z.DecStructFieldNotFound(yyj11-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -40598,54 +48592,66 @@ func (x *PodSignature) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3169 := z.EncBinary() - _ = yym3169 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3170 := !z.EncBinary() - yy2arr3170 := z.EncBasicHandle().StructToArray - var yyq3170 [1]bool - _, _, _ = yysep3170, yyq3170, yy2arr3170 - const yyr3170 bool = false - yyq3170[0] = x.PodController != nil - var yynn3170 int - if yyr3170 || yy2arr3170 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.PodController != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn3170 = 0 - for _, b := range yyq3170 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn3170++ + yynn2++ } } - r.EncodeMapStart(yynn3170) - yynn3170 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3170 || yy2arr3170 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3170[0] { + if yyq2[0] { if x.PodController == nil { r.EncodeNil() } else { - x.PodController.CodecEncodeSelf(e) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.PodController) { + } else { + z.EncFallback(x.PodController) + } } } else { r.EncodeNil() } } else { - if yyq3170[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("podController")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.PodController == nil { r.EncodeNil() } else { - x.PodController.CodecEncodeSelf(e) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.PodController) { + } else { + z.EncFallback(x.PodController) + } } } } - if yyr3170 || yy2arr3170 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -40658,25 +48664,25 @@ func (x *PodSignature) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3172 := z.DecBinary() - _ = yym3172 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3173 := r.ContainerType() - if yyct3173 == codecSelferValueTypeMap1234 { - yyl3173 := r.ReadMapStart() - if yyl3173 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3173, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3173 == codecSelferValueTypeArray1234 { - yyl3173 := r.ReadArrayStart() - if yyl3173 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3173, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -40688,12 +48694,12 @@ func (x *PodSignature) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3174Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3174Slc - var yyhl3174 bool = l >= 0 - for yyj3174 := 0; ; yyj3174++ { - if yyhl3174 { - if yyj3174 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -40702,10 +48708,10 @@ func (x *PodSignature) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3174Slc = r.DecodeBytes(yys3174Slc, true, true) - yys3174 := string(yys3174Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3174 { + switch yys3 { case "podController": if r.TryDecodeAsNil() { if x.PodController != nil { @@ -40713,14 +48719,20 @@ func (x *PodSignature) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } else { if x.PodController == nil { - x.PodController = new(OwnerReference) + x.PodController = new(pkg2_v1.OwnerReference) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.PodController) { + } else { + z.DecFallback(x.PodController, false) } - x.PodController.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys3174) - } // end switch yys3174 - } // end for yyj3174 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -40728,16 +48740,16 @@ func (x *PodSignature) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3176 int - var yyb3176 bool - var yyhl3176 bool = l >= 0 - yyj3176++ - if yyhl3176 { - yyb3176 = yyj3176 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb3176 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb3176 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -40748,22 +48760,28 @@ func (x *PodSignature) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } } else { if x.PodController == nil { - x.PodController = new(OwnerReference) + x.PodController = new(pkg2_v1.OwnerReference) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(x.PodController) { + } else { + z.DecFallback(x.PodController, false) } - x.PodController.CodecDecodeSelf(d) } for { - yyj3176++ - if yyhl3176 { - yyb3176 = yyj3176 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb3176 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb3176 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3176-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -40775,37 +48793,37 @@ func (x *ContainerImage) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3178 := z.EncBinary() - _ = yym3178 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3179 := !z.EncBinary() - yy2arr3179 := z.EncBasicHandle().StructToArray - var yyq3179 [2]bool - _, _, _ = yysep3179, yyq3179, yy2arr3179 - const yyr3179 bool = false - yyq3179[1] = x.SizeBytes != 0 - var yynn3179 int - if yyr3179 || yy2arr3179 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.SizeBytes != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn3179 = 1 - for _, b := range yyq3179 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn3179++ + yynn2++ } } - r.EncodeMapStart(yynn3179) - yynn3179 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3179 || yy2arr3179 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Names == nil { r.EncodeNil() } else { - yym3181 := z.EncBinary() - _ = yym3181 + yym4 := z.EncBinary() + _ = yym4 if false { } else { z.F.EncSliceStringV(x.Names, false, e) @@ -40818,19 +48836,19 @@ func (x *ContainerImage) CodecEncodeSelf(e *codec1978.Encoder) { if x.Names == nil { r.EncodeNil() } else { - yym3182 := z.EncBinary() - _ = yym3182 + yym5 := z.EncBinary() + _ = yym5 if false { } else { z.F.EncSliceStringV(x.Names, false, e) } } } - if yyr3179 || yy2arr3179 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3179[1] { - yym3184 := z.EncBinary() - _ = yym3184 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeInt(int64(x.SizeBytes)) @@ -40839,19 +48857,19 @@ func (x *ContainerImage) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq3179[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("sizeBytes")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3185 := z.EncBinary() - _ = yym3185 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeInt(int64(x.SizeBytes)) } } } - if yyr3179 || yy2arr3179 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -40864,25 +48882,25 @@ func (x *ContainerImage) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3186 := z.DecBinary() - _ = yym3186 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3187 := r.ContainerType() - if yyct3187 == codecSelferValueTypeMap1234 { - yyl3187 := r.ReadMapStart() - if yyl3187 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3187, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3187 == codecSelferValueTypeArray1234 { - yyl3187 := r.ReadArrayStart() - if yyl3187 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3187, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -40894,12 +48912,12 @@ func (x *ContainerImage) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3188Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3188Slc - var yyhl3188 bool = l >= 0 - for yyj3188 := 0; ; yyj3188++ { - if yyhl3188 { - if yyj3188 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -40908,32 +48926,38 @@ func (x *ContainerImage) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3188Slc = r.DecodeBytes(yys3188Slc, true, true) - yys3188 := string(yys3188Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3188 { + switch yys3 { case "names": if r.TryDecodeAsNil() { x.Names = nil } else { - yyv3189 := &x.Names - yym3190 := z.DecBinary() - _ = yym3190 + yyv4 := &x.Names + yym5 := z.DecBinary() + _ = yym5 if false { } else { - z.F.DecSliceStringX(yyv3189, false, d) + z.F.DecSliceStringX(yyv4, false, d) } } case "sizeBytes": if r.TryDecodeAsNil() { x.SizeBytes = 0 } else { - x.SizeBytes = int64(r.DecodeInt(64)) + yyv6 := &x.SizeBytes + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int64)(yyv6)) = int64(r.DecodeInt(64)) + } } default: - z.DecStructFieldNotFound(-1, yys3188) - } // end switch yys3188 - } // end for yyj3188 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -40941,16 +48965,16 @@ func (x *ContainerImage) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3192 int - var yyb3192 bool - var yyhl3192 bool = l >= 0 - yyj3192++ - if yyhl3192 { - yyb3192 = yyj3192 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb3192 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb3192 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -40958,21 +48982,21 @@ func (x *ContainerImage) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Names = nil } else { - yyv3193 := &x.Names - yym3194 := z.DecBinary() - _ = yym3194 + yyv9 := &x.Names + yym10 := z.DecBinary() + _ = yym10 if false { } else { - z.F.DecSliceStringX(yyv3193, false, d) + z.F.DecSliceStringX(yyv9, false, d) } } - yyj3192++ - if yyhl3192 { - yyb3192 = yyj3192 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb3192 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb3192 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -40980,20 +49004,26 @@ func (x *ContainerImage) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.SizeBytes = 0 } else { - x.SizeBytes = int64(r.DecodeInt(64)) + yyv11 := &x.SizeBytes + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int64)(yyv11)) = int64(r.DecodeInt(64)) + } } for { - yyj3192++ - if yyhl3192 { - yyb3192 = yyj3192 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb3192 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb3192 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3192-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -41002,8 +49032,8 @@ func (x NodePhase) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym3196 := z.EncBinary() - _ = yym3196 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -41015,8 +49045,8 @@ func (x *NodePhase) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3197 := z.DecBinary() - _ = yym3197 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -41028,8 +49058,8 @@ func (x NodeConditionType) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym3198 := z.EncBinary() - _ = yym3198 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -41041,8 +49071,8 @@ func (x *NodeConditionType) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3199 := z.DecBinary() - _ = yym3199 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -41057,34 +49087,34 @@ func (x *NodeCondition) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3200 := z.EncBinary() - _ = yym3200 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3201 := !z.EncBinary() - yy2arr3201 := z.EncBasicHandle().StructToArray - var yyq3201 [6]bool - _, _, _ = yysep3201, yyq3201, yy2arr3201 - const yyr3201 bool = false - yyq3201[2] = true - yyq3201[3] = true - yyq3201[4] = x.Reason != "" - yyq3201[5] = x.Message != "" - var yynn3201 int - if yyr3201 || yy2arr3201 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(6) } else { - yynn3201 = 2 - for _, b := range yyq3201 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn3201++ + yynn2++ } } - r.EncodeMapStart(yynn3201) - yynn3201 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3201 || yy2arr3201 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) x.Type.CodecEncodeSelf(e) } else { @@ -41093,7 +49123,7 @@ func (x *NodeCondition) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Type.CodecEncodeSelf(e) } - if yyr3201 || yy2arr3201 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) x.Status.CodecEncodeSelf(e) } else { @@ -41102,85 +49132,85 @@ func (x *NodeCondition) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Status.CodecEncodeSelf(e) } - if yyr3201 || yy2arr3201 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3201[2] { - yy3205 := &x.LastHeartbeatTime - yym3206 := z.EncBinary() - _ = yym3206 + if yyq2[2] { + yy10 := &x.LastHeartbeatTime + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy3205) { - } else if yym3206 { - z.EncBinaryMarshal(yy3205) - } else if !yym3206 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3205) + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) } else { - z.EncFallback(yy3205) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq3201[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("lastHeartbeatTime")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3207 := &x.LastHeartbeatTime - yym3208 := z.EncBinary() - _ = yym3208 + yy12 := &x.LastHeartbeatTime + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy3207) { - } else if yym3208 { - z.EncBinaryMarshal(yy3207) - } else if !yym3208 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3207) + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) } else { - z.EncFallback(yy3207) + z.EncFallback(yy12) } } } - if yyr3201 || yy2arr3201 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3201[3] { - yy3210 := &x.LastTransitionTime - yym3211 := z.EncBinary() - _ = yym3211 + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 if false { - } else if z.HasExtensions() && z.EncExt(yy3210) { - } else if yym3211 { - z.EncBinaryMarshal(yy3210) - } else if !yym3211 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3210) + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) } else { - z.EncFallback(yy3210) + z.EncFallback(yy15) } } else { r.EncodeNil() } } else { - if yyq3201[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3212 := &x.LastTransitionTime - yym3213 := z.EncBinary() - _ = yym3213 + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.EncExt(yy3212) { - } else if yym3213 { - z.EncBinaryMarshal(yy3212) - } else if !yym3213 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3212) + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) } else { - z.EncFallback(yy3212) + z.EncFallback(yy17) } } } - if yyr3201 || yy2arr3201 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3201[4] { - yym3215 := z.EncBinary() - _ = yym3215 + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) @@ -41189,23 +49219,23 @@ func (x *NodeCondition) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3201[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("reason")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3216 := z.EncBinary() - _ = yym3216 + yym21 := z.EncBinary() + _ = yym21 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) } } } - if yyr3201 || yy2arr3201 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3201[5] { - yym3218 := z.EncBinary() - _ = yym3218 + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) @@ -41214,19 +49244,19 @@ func (x *NodeCondition) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3201[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("message")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3219 := z.EncBinary() - _ = yym3219 + yym24 := z.EncBinary() + _ = yym24 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) } } } - if yyr3201 || yy2arr3201 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -41239,25 +49269,25 @@ func (x *NodeCondition) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3220 := z.DecBinary() - _ = yym3220 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3221 := r.ContainerType() - if yyct3221 == codecSelferValueTypeMap1234 { - yyl3221 := r.ReadMapStart() - if yyl3221 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3221, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3221 == codecSelferValueTypeArray1234 { - yyl3221 := r.ReadArrayStart() - if yyl3221 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3221, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -41269,12 +49299,12 @@ func (x *NodeCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3222Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3222Slc - var yyhl3222 bool = l >= 0 - for yyj3222 := 0; ; yyj3222++ { - if yyhl3222 { - if yyj3222 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -41283,72 +49313,86 @@ func (x *NodeCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3222Slc = r.DecodeBytes(yys3222Slc, true, true) - yys3222 := string(yys3222Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3222 { + switch yys3 { case "type": if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = NodeConditionType(r.DecodeString()) + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = "" } else { - x.Status = ConditionStatus(r.DecodeString()) + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) } case "lastHeartbeatTime": if r.TryDecodeAsNil() { - x.LastHeartbeatTime = pkg2_unversioned.Time{} + x.LastHeartbeatTime = pkg2_v1.Time{} } else { - yyv3225 := &x.LastHeartbeatTime - yym3226 := z.DecBinary() - _ = yym3226 + yyv6 := &x.LastHeartbeatTime + yym7 := z.DecBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.DecExt(yyv3225) { - } else if yym3226 { - z.DecBinaryUnmarshal(yyv3225) - } else if !yym3226 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv3225) + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) } else { - z.DecFallback(yyv3225, false) + z.DecFallback(yyv6, false) } } case "lastTransitionTime": if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} + x.LastTransitionTime = pkg2_v1.Time{} } else { - yyv3227 := &x.LastTransitionTime - yym3228 := z.DecBinary() - _ = yym3228 + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv3227) { - } else if yym3228 { - z.DecBinaryUnmarshal(yyv3227) - } else if !yym3228 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv3227) + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) } else { - z.DecFallback(yyv3227, false) + z.DecFallback(yyv8, false) } } case "reason": if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv10 := &x.Reason + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } case "message": if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv12 := &x.Message + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys3222) - } // end switch yys3222 - } // end for yyj3222 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -41356,16 +49400,16 @@ func (x *NodeCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3231 int - var yyb3231 bool - var yyhl3231 bool = l >= 0 - yyj3231++ - if yyhl3231 { - yyb3231 = yyj3231 > l + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb3231 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb3231 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -41373,15 +49417,16 @@ func (x *NodeCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = NodeConditionType(r.DecodeString()) + yyv15 := &x.Type + yyv15.CodecDecodeSelf(d) } - yyj3231++ - if yyhl3231 { - yyb3231 = yyj3231 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb3231 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb3231 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -41389,69 +49434,70 @@ func (x *NodeCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Status = "" } else { - x.Status = ConditionStatus(r.DecodeString()) + yyv16 := &x.Status + yyv16.CodecDecodeSelf(d) } - yyj3231++ - if yyhl3231 { - yyb3231 = yyj3231 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb3231 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb3231 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.LastHeartbeatTime = pkg2_unversioned.Time{} + x.LastHeartbeatTime = pkg2_v1.Time{} } else { - yyv3234 := &x.LastHeartbeatTime - yym3235 := z.DecBinary() - _ = yym3235 + yyv17 := &x.LastHeartbeatTime + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv3234) { - } else if yym3235 { - z.DecBinaryUnmarshal(yyv3234) - } else if !yym3235 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv3234) + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) } else { - z.DecFallback(yyv3234, false) + z.DecFallback(yyv17, false) } } - yyj3231++ - if yyhl3231 { - yyb3231 = yyj3231 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb3231 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb3231 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} + x.LastTransitionTime = pkg2_v1.Time{} } else { - yyv3236 := &x.LastTransitionTime - yym3237 := z.DecBinary() - _ = yym3237 + yyv19 := &x.LastTransitionTime + yym20 := z.DecBinary() + _ = yym20 if false { - } else if z.HasExtensions() && z.DecExt(yyv3236) { - } else if yym3237 { - z.DecBinaryUnmarshal(yyv3236) - } else if !yym3237 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv3236) + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if yym20 { + z.DecBinaryUnmarshal(yyv19) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) } else { - z.DecFallback(yyv3236, false) + z.DecFallback(yyv19, false) } } - yyj3231++ - if yyhl3231 { - yyb3231 = yyj3231 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb3231 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb3231 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -41459,15 +49505,21 @@ func (x *NodeCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv21 := &x.Reason + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } } - yyj3231++ - if yyhl3231 { - yyb3231 = yyj3231 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb3231 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb3231 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -41475,20 +49527,26 @@ func (x *NodeCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv23 := &x.Message + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } } for { - yyj3231++ - if yyhl3231 { - yyb3231 = yyj3231 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb3231 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb3231 { + if yyb14 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3231-1, "") + z.DecStructFieldNotFound(yyj14-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -41497,8 +49555,8 @@ func (x NodeAddressType) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym3240 := z.EncBinary() - _ = yym3240 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -41510,8 +49568,8 @@ func (x *NodeAddressType) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3241 := z.DecBinary() - _ = yym3241 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -41526,30 +49584,30 @@ func (x *NodeAddress) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3242 := z.EncBinary() - _ = yym3242 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3243 := !z.EncBinary() - yy2arr3243 := z.EncBasicHandle().StructToArray - var yyq3243 [2]bool - _, _, _ = yysep3243, yyq3243, yy2arr3243 - const yyr3243 bool = false - var yynn3243 int - if yyr3243 || yy2arr3243 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn3243 = 2 - for _, b := range yyq3243 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn3243++ + yynn2++ } } - r.EncodeMapStart(yynn3243) - yynn3243 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3243 || yy2arr3243 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) x.Type.CodecEncodeSelf(e) } else { @@ -41558,10 +49616,10 @@ func (x *NodeAddress) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Type.CodecEncodeSelf(e) } - if yyr3243 || yy2arr3243 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3246 := z.EncBinary() - _ = yym3246 + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Address)) @@ -41570,14 +49628,14 @@ func (x *NodeAddress) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("address")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3247 := z.EncBinary() - _ = yym3247 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Address)) } } - if yyr3243 || yy2arr3243 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -41590,25 +49648,25 @@ func (x *NodeAddress) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3248 := z.DecBinary() - _ = yym3248 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3249 := r.ContainerType() - if yyct3249 == codecSelferValueTypeMap1234 { - yyl3249 := r.ReadMapStart() - if yyl3249 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3249, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3249 == codecSelferValueTypeArray1234 { - yyl3249 := r.ReadArrayStart() - if yyl3249 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3249, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -41620,12 +49678,12 @@ func (x *NodeAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3250Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3250Slc - var yyhl3250 bool = l >= 0 - for yyj3250 := 0; ; yyj3250++ { - if yyhl3250 { - if yyj3250 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -41634,26 +49692,33 @@ func (x *NodeAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3250Slc = r.DecodeBytes(yys3250Slc, true, true) - yys3250 := string(yys3250Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3250 { + switch yys3 { case "type": if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = NodeAddressType(r.DecodeString()) + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) } case "address": if r.TryDecodeAsNil() { x.Address = "" } else { - x.Address = string(r.DecodeString()) + yyv5 := &x.Address + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys3250) - } // end switch yys3250 - } // end for yyj3250 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -41661,16 +49726,16 @@ func (x *NodeAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3253 int - var yyb3253 bool - var yyhl3253 bool = l >= 0 - yyj3253++ - if yyhl3253 { - yyb3253 = yyj3253 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb3253 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb3253 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -41678,15 +49743,16 @@ func (x *NodeAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = NodeAddressType(r.DecodeString()) + yyv8 := &x.Type + yyv8.CodecDecodeSelf(d) } - yyj3253++ - if yyhl3253 { - yyb3253 = yyj3253 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb3253 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb3253 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -41694,20 +49760,26 @@ func (x *NodeAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Address = "" } else { - x.Address = string(r.DecodeString()) + yyv9 := &x.Address + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } for { - yyj3253++ - if yyhl3253 { - yyb3253 = yyj3253 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb3253 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb3253 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3253-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -41716,8 +49788,8 @@ func (x ResourceName) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym3256 := z.EncBinary() - _ = yym3256 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -41729,8 +49801,8 @@ func (x *ResourceName) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3257 := z.DecBinary() - _ = yym3257 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -41745,8 +49817,8 @@ func (x ResourceList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3258 := z.EncBinary() - _ = yym3258 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -41759,8 +49831,8 @@ func (x *ResourceList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3259 := z.DecBinary() - _ = yym3259 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -41775,39 +49847,39 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3260 := z.EncBinary() - _ = yym3260 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3261 := !z.EncBinary() - yy2arr3261 := z.EncBasicHandle().StructToArray - var yyq3261 [5]bool - _, _, _ = yysep3261, yyq3261, yy2arr3261 - const yyr3261 bool = false - yyq3261[0] = x.Kind != "" - yyq3261[1] = x.APIVersion != "" - yyq3261[2] = true - yyq3261[3] = true - yyq3261[4] = true - var yynn3261 int - if yyr3261 || yy2arr3261 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn3261 = 0 - for _, b := range yyq3261 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn3261++ + yynn2++ } } - r.EncodeMapStart(yynn3261) - yynn3261 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3261 || yy2arr3261 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3261[0] { - yym3263 := z.EncBinary() - _ = yym3263 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -41816,23 +49888,23 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3261[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3264 := z.EncBinary() - _ = yym3264 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr3261 || yy2arr3261 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3261[1] { - yym3266 := z.EncBinary() - _ = yym3266 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -41841,70 +49913,82 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3261[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3267 := z.EncBinary() - _ = yym3267 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr3261 || yy2arr3261 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3261[2] { - yy3269 := &x.ObjectMeta - yy3269.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq3261[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3270 := &x.ObjectMeta - yy3270.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr3261 || yy2arr3261 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3261[3] { - yy3272 := &x.Spec - yy3272.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq3261[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3273 := &x.Spec - yy3273.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr3261 || yy2arr3261 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3261[4] { - yy3275 := &x.Status - yy3275.CodecEncodeSelf(e) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq3261[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3276 := &x.Status - yy3276.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } - if yyr3261 || yy2arr3261 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -41917,25 +50001,25 @@ func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3277 := z.DecBinary() - _ = yym3277 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3278 := r.ContainerType() - if yyct3278 == codecSelferValueTypeMap1234 { - yyl3278 := r.ReadMapStart() - if yyl3278 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3278, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3278 == codecSelferValueTypeArray1234 { - yyl3278 := r.ReadArrayStart() - if yyl3278 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3278, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -41947,12 +50031,12 @@ func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3279Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3279Slc - var yyhl3279 bool = l >= 0 - for yyj3279 := 0; ; yyj3279++ { - if yyhl3279 { - if yyj3279 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -41961,47 +50045,65 @@ func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3279Slc = r.DecodeBytes(yys3279Slc, true, true) - yys3279 := string(yys3279Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3279 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv3282 := &x.ObjectMeta - yyv3282.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = NodeSpec{} } else { - yyv3283 := &x.Spec - yyv3283.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = NodeStatus{} } else { - yyv3284 := &x.Status - yyv3284.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys3279) - } // end switch yys3279 - } // end for yyj3279 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -42009,16 +50111,16 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3285 int - var yyb3285 bool - var yyhl3285 bool = l >= 0 - yyj3285++ - if yyhl3285 { - yyb3285 = yyj3285 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3285 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3285 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -42026,15 +50128,21 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj3285++ - if yyhl3285 { - yyb3285 = yyj3285 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3285 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3285 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -42042,32 +50150,44 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj3285++ - if yyhl3285 { - yyb3285 = yyj3285 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3285 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3285 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv3288 := &x.ObjectMeta - yyv3288.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj3285++ - if yyhl3285 { - yyb3285 = yyj3285 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3285 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3285 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -42075,16 +50195,16 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Spec = NodeSpec{} } else { - yyv3289 := &x.Spec - yyv3289.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj3285++ - if yyhl3285 { - yyb3285 = yyj3285 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3285 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3285 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -42092,21 +50212,21 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Status = NodeStatus{} } else { - yyv3290 := &x.Status - yyv3290.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj3285++ - if yyhl3285 { - yyb3285 = yyj3285 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3285 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3285 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3285-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -42118,37 +50238,37 @@ func (x *NodeList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3291 := z.EncBinary() - _ = yym3291 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3292 := !z.EncBinary() - yy2arr3292 := z.EncBasicHandle().StructToArray - var yyq3292 [4]bool - _, _, _ = yysep3292, yyq3292, yy2arr3292 - const yyr3292 bool = false - yyq3292[0] = x.Kind != "" - yyq3292[1] = x.APIVersion != "" - yyq3292[2] = true - var yynn3292 int - if yyr3292 || yy2arr3292 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn3292 = 1 - for _, b := range yyq3292 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn3292++ + yynn2++ } } - r.EncodeMapStart(yynn3292) - yynn3292 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3292 || yy2arr3292 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3292[0] { - yym3294 := z.EncBinary() - _ = yym3294 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -42157,23 +50277,23 @@ func (x *NodeList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3292[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3295 := z.EncBinary() - _ = yym3295 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr3292 || yy2arr3292 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3292[1] { - yym3297 := z.EncBinary() - _ = yym3297 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -42182,54 +50302,54 @@ func (x *NodeList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3292[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3298 := z.EncBinary() - _ = yym3298 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr3292 || yy2arr3292 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3292[2] { - yy3300 := &x.ListMeta - yym3301 := z.EncBinary() - _ = yym3301 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy3300) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy3300) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq3292[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3302 := &x.ListMeta - yym3303 := z.EncBinary() - _ = yym3303 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy3302) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy3302) + z.EncFallback(yy12) } } } - if yyr3292 || yy2arr3292 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym3305 := z.EncBinary() - _ = yym3305 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceNode(([]Node)(x.Items), e) @@ -42242,15 +50362,15 @@ func (x *NodeList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym3306 := z.EncBinary() - _ = yym3306 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceNode(([]Node)(x.Items), e) } } } - if yyr3292 || yy2arr3292 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -42263,25 +50383,25 @@ func (x *NodeList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3307 := z.DecBinary() - _ = yym3307 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3308 := r.ContainerType() - if yyct3308 == codecSelferValueTypeMap1234 { - yyl3308 := r.ReadMapStart() - if yyl3308 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3308, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3308 == codecSelferValueTypeArray1234 { - yyl3308 := r.ReadArrayStart() - if yyl3308 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3308, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -42293,12 +50413,12 @@ func (x *NodeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3309Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3309Slc - var yyhl3309 bool = l >= 0 - for yyj3309 := 0; ; yyj3309++ { - if yyhl3309 { - if yyj3309 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -42307,51 +50427,63 @@ func (x *NodeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3309Slc = r.DecodeBytes(yys3309Slc, true, true) - yys3309 := string(yys3309Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3309 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv3312 := &x.ListMeta - yym3313 := z.DecBinary() - _ = yym3313 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv3312) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv3312, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv3314 := &x.Items - yym3315 := z.DecBinary() - _ = yym3315 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceNode((*[]Node)(yyv3314), d) + h.decSliceNode((*[]Node)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys3309) - } // end switch yys3309 - } // end for yyj3309 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -42359,16 +50491,16 @@ func (x *NodeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3316 int - var yyb3316 bool - var yyhl3316 bool = l >= 0 - yyj3316++ - if yyhl3316 { - yyb3316 = yyj3316 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3316 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3316 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -42376,15 +50508,21 @@ func (x *NodeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj3316++ - if yyhl3316 { - yyb3316 = yyj3316 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3316 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3316 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -42392,38 +50530,44 @@ func (x *NodeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj3316++ - if yyhl3316 { - yyb3316 = yyj3316 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3316 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3316 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv3319 := &x.ListMeta - yym3320 := z.DecBinary() - _ = yym3320 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv3319) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv3319, false) + z.DecFallback(yyv17, false) } } - yyj3316++ - if yyhl3316 { - yyb3316 = yyj3316 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3316 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3316 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -42431,26 +50575,26 @@ func (x *NodeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Items = nil } else { - yyv3321 := &x.Items - yym3322 := z.DecBinary() - _ = yym3322 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceNode((*[]Node)(yyv3321), d) + h.decSliceNode((*[]Node)(yyv19), d) } } for { - yyj3316++ - if yyhl3316 { - yyb3316 = yyj3316 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3316 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3316 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3316-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -42459,8 +50603,8 @@ func (x FinalizerName) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym3323 := z.EncBinary() - _ = yym3323 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -42472,8 +50616,8 @@ func (x *FinalizerName) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3324 := z.DecBinary() - _ = yym3324 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -42488,38 +50632,38 @@ func (x *NamespaceSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3325 := z.EncBinary() - _ = yym3325 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3326 := !z.EncBinary() - yy2arr3326 := z.EncBasicHandle().StructToArray - var yyq3326 [1]bool - _, _, _ = yysep3326, yyq3326, yy2arr3326 - const yyr3326 bool = false - yyq3326[0] = len(x.Finalizers) != 0 - var yynn3326 int - if yyr3326 || yy2arr3326 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Finalizers) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn3326 = 0 - for _, b := range yyq3326 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn3326++ + yynn2++ } } - r.EncodeMapStart(yynn3326) - yynn3326 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3326 || yy2arr3326 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3326[0] { + if yyq2[0] { if x.Finalizers == nil { r.EncodeNil() } else { - yym3328 := z.EncBinary() - _ = yym3328 + yym4 := z.EncBinary() + _ = yym4 if false { } else { h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e) @@ -42529,15 +50673,15 @@ func (x *NamespaceSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq3326[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("finalizers")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Finalizers == nil { r.EncodeNil() } else { - yym3329 := z.EncBinary() - _ = yym3329 + yym5 := z.EncBinary() + _ = yym5 if false { } else { h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e) @@ -42545,7 +50689,7 @@ func (x *NamespaceSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr3326 || yy2arr3326 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -42558,25 +50702,25 @@ func (x *NamespaceSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3330 := z.DecBinary() - _ = yym3330 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3331 := r.ContainerType() - if yyct3331 == codecSelferValueTypeMap1234 { - yyl3331 := r.ReadMapStart() - if yyl3331 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3331, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3331 == codecSelferValueTypeArray1234 { - yyl3331 := r.ReadArrayStart() - if yyl3331 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3331, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -42588,12 +50732,12 @@ func (x *NamespaceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3332Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3332Slc - var yyhl3332 bool = l >= 0 - for yyj3332 := 0; ; yyj3332++ { - if yyhl3332 { - if yyj3332 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -42602,26 +50746,26 @@ func (x *NamespaceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3332Slc = r.DecodeBytes(yys3332Slc, true, true) - yys3332 := string(yys3332Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3332 { + switch yys3 { case "finalizers": if r.TryDecodeAsNil() { x.Finalizers = nil } else { - yyv3333 := &x.Finalizers - yym3334 := z.DecBinary() - _ = yym3334 + yyv4 := &x.Finalizers + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSliceFinalizerName((*[]FinalizerName)(yyv3333), d) + h.decSliceFinalizerName((*[]FinalizerName)(yyv4), d) } } default: - z.DecStructFieldNotFound(-1, yys3332) - } // end switch yys3332 - } // end for yyj3332 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -42629,16 +50773,16 @@ func (x *NamespaceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3335 int - var yyb3335 bool - var yyhl3335 bool = l >= 0 - yyj3335++ - if yyhl3335 { - yyb3335 = yyj3335 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb3335 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb3335 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -42646,26 +50790,26 @@ func (x *NamespaceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Finalizers = nil } else { - yyv3336 := &x.Finalizers - yym3337 := z.DecBinary() - _ = yym3337 + yyv7 := &x.Finalizers + yym8 := z.DecBinary() + _ = yym8 if false { } else { - h.decSliceFinalizerName((*[]FinalizerName)(yyv3336), d) + h.decSliceFinalizerName((*[]FinalizerName)(yyv7), d) } } for { - yyj3335++ - if yyhl3335 { - yyb3335 = yyj3335 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb3335 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb3335 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3335-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -42677,46 +50821,46 @@ func (x *NamespaceStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3338 := z.EncBinary() - _ = yym3338 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3339 := !z.EncBinary() - yy2arr3339 := z.EncBasicHandle().StructToArray - var yyq3339 [1]bool - _, _, _ = yysep3339, yyq3339, yy2arr3339 - const yyr3339 bool = false - yyq3339[0] = x.Phase != "" - var yynn3339 int - if yyr3339 || yy2arr3339 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Phase != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn3339 = 0 - for _, b := range yyq3339 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn3339++ + yynn2++ } } - r.EncodeMapStart(yynn3339) - yynn3339 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3339 || yy2arr3339 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3339[0] { + if yyq2[0] { x.Phase.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3339[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("phase")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Phase.CodecEncodeSelf(e) } } - if yyr3339 || yy2arr3339 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -42729,25 +50873,25 @@ func (x *NamespaceStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3341 := z.DecBinary() - _ = yym3341 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3342 := r.ContainerType() - if yyct3342 == codecSelferValueTypeMap1234 { - yyl3342 := r.ReadMapStart() - if yyl3342 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3342, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3342 == codecSelferValueTypeArray1234 { - yyl3342 := r.ReadArrayStart() - if yyl3342 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3342, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -42759,12 +50903,12 @@ func (x *NamespaceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3343Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3343Slc - var yyhl3343 bool = l >= 0 - for yyj3343 := 0; ; yyj3343++ { - if yyhl3343 { - if yyj3343 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -42773,20 +50917,21 @@ func (x *NamespaceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3343Slc = r.DecodeBytes(yys3343Slc, true, true) - yys3343 := string(yys3343Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3343 { + switch yys3 { case "phase": if r.TryDecodeAsNil() { x.Phase = "" } else { - x.Phase = NamespacePhase(r.DecodeString()) + yyv4 := &x.Phase + yyv4.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys3343) - } // end switch yys3343 - } // end for yyj3343 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -42794,16 +50939,16 @@ func (x *NamespaceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3345 int - var yyb3345 bool - var yyhl3345 bool = l >= 0 - yyj3345++ - if yyhl3345 { - yyb3345 = yyj3345 > l + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb3345 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb3345 { + if yyb5 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -42811,20 +50956,21 @@ func (x *NamespaceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Phase = "" } else { - x.Phase = NamespacePhase(r.DecodeString()) + yyv6 := &x.Phase + yyv6.CodecDecodeSelf(d) } for { - yyj3345++ - if yyhl3345 { - yyb3345 = yyj3345 > l + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb3345 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb3345 { + if yyb5 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3345-1, "") + z.DecStructFieldNotFound(yyj5-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -42833,8 +50979,8 @@ func (x NamespacePhase) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym3347 := z.EncBinary() - _ = yym3347 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -42846,8 +50992,8 @@ func (x *NamespacePhase) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3348 := z.DecBinary() - _ = yym3348 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -42862,39 +51008,39 @@ func (x *Namespace) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3349 := z.EncBinary() - _ = yym3349 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3350 := !z.EncBinary() - yy2arr3350 := z.EncBasicHandle().StructToArray - var yyq3350 [5]bool - _, _, _ = yysep3350, yyq3350, yy2arr3350 - const yyr3350 bool = false - yyq3350[0] = x.Kind != "" - yyq3350[1] = x.APIVersion != "" - yyq3350[2] = true - yyq3350[3] = true - yyq3350[4] = true - var yynn3350 int - if yyr3350 || yy2arr3350 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn3350 = 0 - for _, b := range yyq3350 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn3350++ + yynn2++ } } - r.EncodeMapStart(yynn3350) - yynn3350 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3350 || yy2arr3350 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3350[0] { - yym3352 := z.EncBinary() - _ = yym3352 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -42903,23 +51049,23 @@ func (x *Namespace) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3350[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3353 := z.EncBinary() - _ = yym3353 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr3350 || yy2arr3350 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3350[1] { - yym3355 := z.EncBinary() - _ = yym3355 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -42928,70 +51074,82 @@ func (x *Namespace) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3350[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3356 := z.EncBinary() - _ = yym3356 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr3350 || yy2arr3350 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3350[2] { - yy3358 := &x.ObjectMeta - yy3358.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq3350[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3359 := &x.ObjectMeta - yy3359.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr3350 || yy2arr3350 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3350[3] { - yy3361 := &x.Spec - yy3361.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq3350[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3362 := &x.Spec - yy3362.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr3350 || yy2arr3350 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3350[4] { - yy3364 := &x.Status - yy3364.CodecEncodeSelf(e) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq3350[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3365 := &x.Status - yy3365.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } - if yyr3350 || yy2arr3350 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -43004,25 +51162,25 @@ func (x *Namespace) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3366 := z.DecBinary() - _ = yym3366 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3367 := r.ContainerType() - if yyct3367 == codecSelferValueTypeMap1234 { - yyl3367 := r.ReadMapStart() - if yyl3367 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3367, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3367 == codecSelferValueTypeArray1234 { - yyl3367 := r.ReadArrayStart() - if yyl3367 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3367, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -43034,12 +51192,12 @@ func (x *Namespace) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3368Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3368Slc - var yyhl3368 bool = l >= 0 - for yyj3368 := 0; ; yyj3368++ { - if yyhl3368 { - if yyj3368 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -43048,47 +51206,65 @@ func (x *Namespace) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3368Slc = r.DecodeBytes(yys3368Slc, true, true) - yys3368 := string(yys3368Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3368 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv3371 := &x.ObjectMeta - yyv3371.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = NamespaceSpec{} } else { - yyv3372 := &x.Spec - yyv3372.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = NamespaceStatus{} } else { - yyv3373 := &x.Status - yyv3373.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys3368) - } // end switch yys3368 - } // end for yyj3368 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -43096,16 +51272,16 @@ func (x *Namespace) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3374 int - var yyb3374 bool - var yyhl3374 bool = l >= 0 - yyj3374++ - if yyhl3374 { - yyb3374 = yyj3374 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3374 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3374 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -43113,15 +51289,21 @@ func (x *Namespace) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj3374++ - if yyhl3374 { - yyb3374 = yyj3374 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3374 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3374 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -43129,32 +51311,44 @@ func (x *Namespace) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj3374++ - if yyhl3374 { - yyb3374 = yyj3374 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3374 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3374 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv3377 := &x.ObjectMeta - yyv3377.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj3374++ - if yyhl3374 { - yyb3374 = yyj3374 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3374 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3374 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -43162,16 +51356,16 @@ func (x *Namespace) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Spec = NamespaceSpec{} } else { - yyv3378 := &x.Spec - yyv3378.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj3374++ - if yyhl3374 { - yyb3374 = yyj3374 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3374 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3374 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -43179,21 +51373,21 @@ func (x *Namespace) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Status = NamespaceStatus{} } else { - yyv3379 := &x.Status - yyv3379.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj3374++ - if yyhl3374 { - yyb3374 = yyj3374 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3374 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3374 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3374-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -43205,37 +51399,37 @@ func (x *NamespaceList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3380 := z.EncBinary() - _ = yym3380 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3381 := !z.EncBinary() - yy2arr3381 := z.EncBasicHandle().StructToArray - var yyq3381 [4]bool - _, _, _ = yysep3381, yyq3381, yy2arr3381 - const yyr3381 bool = false - yyq3381[0] = x.Kind != "" - yyq3381[1] = x.APIVersion != "" - yyq3381[2] = true - var yynn3381 int - if yyr3381 || yy2arr3381 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn3381 = 1 - for _, b := range yyq3381 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn3381++ + yynn2++ } } - r.EncodeMapStart(yynn3381) - yynn3381 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3381 || yy2arr3381 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3381[0] { - yym3383 := z.EncBinary() - _ = yym3383 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -43244,23 +51438,23 @@ func (x *NamespaceList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3381[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3384 := z.EncBinary() - _ = yym3384 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr3381 || yy2arr3381 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3381[1] { - yym3386 := z.EncBinary() - _ = yym3386 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -43269,54 +51463,54 @@ func (x *NamespaceList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3381[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3387 := z.EncBinary() - _ = yym3387 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr3381 || yy2arr3381 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3381[2] { - yy3389 := &x.ListMeta - yym3390 := z.EncBinary() - _ = yym3390 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy3389) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy3389) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq3381[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3391 := &x.ListMeta - yym3392 := z.EncBinary() - _ = yym3392 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy3391) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy3391) + z.EncFallback(yy12) } } } - if yyr3381 || yy2arr3381 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym3394 := z.EncBinary() - _ = yym3394 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceNamespace(([]Namespace)(x.Items), e) @@ -43329,15 +51523,15 @@ func (x *NamespaceList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym3395 := z.EncBinary() - _ = yym3395 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceNamespace(([]Namespace)(x.Items), e) } } } - if yyr3381 || yy2arr3381 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -43350,25 +51544,25 @@ func (x *NamespaceList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3396 := z.DecBinary() - _ = yym3396 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3397 := r.ContainerType() - if yyct3397 == codecSelferValueTypeMap1234 { - yyl3397 := r.ReadMapStart() - if yyl3397 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3397, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3397 == codecSelferValueTypeArray1234 { - yyl3397 := r.ReadArrayStart() - if yyl3397 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3397, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -43380,12 +51574,12 @@ func (x *NamespaceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3398Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3398Slc - var yyhl3398 bool = l >= 0 - for yyj3398 := 0; ; yyj3398++ { - if yyhl3398 { - if yyj3398 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -43394,51 +51588,63 @@ func (x *NamespaceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3398Slc = r.DecodeBytes(yys3398Slc, true, true) - yys3398 := string(yys3398Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3398 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv3401 := &x.ListMeta - yym3402 := z.DecBinary() - _ = yym3402 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv3401) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv3401, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv3403 := &x.Items - yym3404 := z.DecBinary() - _ = yym3404 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceNamespace((*[]Namespace)(yyv3403), d) + h.decSliceNamespace((*[]Namespace)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys3398) - } // end switch yys3398 - } // end for yyj3398 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -43446,16 +51652,16 @@ func (x *NamespaceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3405 int - var yyb3405 bool - var yyhl3405 bool = l >= 0 - yyj3405++ - if yyhl3405 { - yyb3405 = yyj3405 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3405 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3405 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -43463,15 +51669,21 @@ func (x *NamespaceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj3405++ - if yyhl3405 { - yyb3405 = yyj3405 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3405 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3405 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -43479,38 +51691,44 @@ func (x *NamespaceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj3405++ - if yyhl3405 { - yyb3405 = yyj3405 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3405 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3405 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv3408 := &x.ListMeta - yym3409 := z.DecBinary() - _ = yym3409 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv3408) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv3408, false) + z.DecFallback(yyv17, false) } } - yyj3405++ - if yyhl3405 { - yyb3405 = yyj3405 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3405 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3405 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -43518,26 +51736,26 @@ func (x *NamespaceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Items = nil } else { - yyv3410 := &x.Items - yym3411 := z.DecBinary() - _ = yym3411 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceNamespace((*[]Namespace)(yyv3410), d) + h.decSliceNamespace((*[]Namespace)(yyv19), d) } } for { - yyj3405++ - if yyhl3405 { - yyb3405 = yyj3405 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb3405 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb3405 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3405-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -43549,37 +51767,37 @@ func (x *Binding) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3412 := z.EncBinary() - _ = yym3412 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3413 := !z.EncBinary() - yy2arr3413 := z.EncBasicHandle().StructToArray - var yyq3413 [4]bool - _, _, _ = yysep3413, yyq3413, yy2arr3413 - const yyr3413 bool = false - yyq3413[0] = x.Kind != "" - yyq3413[1] = x.APIVersion != "" - yyq3413[2] = true - var yynn3413 int - if yyr3413 || yy2arr3413 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn3413 = 1 - for _, b := range yyq3413 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn3413++ + yynn2++ } } - r.EncodeMapStart(yynn3413) - yynn3413 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3413 || yy2arr3413 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3413[0] { - yym3415 := z.EncBinary() - _ = yym3415 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -43588,23 +51806,23 @@ func (x *Binding) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3413[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3416 := z.EncBinary() - _ = yym3416 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr3413 || yy2arr3413 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3413[1] { - yym3418 := z.EncBinary() - _ = yym3418 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -43613,47 +51831,59 @@ func (x *Binding) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3413[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3419 := z.EncBinary() - _ = yym3419 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr3413 || yy2arr3413 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3413[2] { - yy3421 := &x.ObjectMeta - yy3421.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq3413[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3422 := &x.ObjectMeta - yy3422.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr3413 || yy2arr3413 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy3424 := &x.Target - yy3424.CodecEncodeSelf(e) + yy15 := &x.Target + yy15.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("target")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3425 := &x.Target - yy3425.CodecEncodeSelf(e) + yy17 := &x.Target + yy17.CodecEncodeSelf(e) } - if yyr3413 || yy2arr3413 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -43666,25 +51896,25 @@ func (x *Binding) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3426 := z.DecBinary() - _ = yym3426 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3427 := r.ContainerType() - if yyct3427 == codecSelferValueTypeMap1234 { - yyl3427 := r.ReadMapStart() - if yyl3427 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3427, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3427 == codecSelferValueTypeArray1234 { - yyl3427 := r.ReadArrayStart() - if yyl3427 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3427, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -43696,12 +51926,12 @@ func (x *Binding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3428Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3428Slc - var yyhl3428 bool = l >= 0 - for yyj3428 := 0; ; yyj3428++ { - if yyhl3428 { - if yyj3428 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -43710,40 +51940,58 @@ func (x *Binding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3428Slc = r.DecodeBytes(yys3428Slc, true, true) - yys3428 := string(yys3428Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3428 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv3431 := &x.ObjectMeta - yyv3431.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "target": if r.TryDecodeAsNil() { x.Target = ObjectReference{} } else { - yyv3432 := &x.Target - yyv3432.CodecDecodeSelf(d) + yyv10 := &x.Target + yyv10.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys3428) - } // end switch yys3428 - } // end for yyj3428 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -43751,16 +51999,16 @@ func (x *Binding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3433 int - var yyb3433 bool - var yyhl3433 bool = l >= 0 - yyj3433++ - if yyhl3433 { - yyb3433 = yyj3433 > l + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb3433 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb3433 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -43768,15 +52016,21 @@ func (x *Binding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } - yyj3433++ - if yyhl3433 { - yyb3433 = yyj3433 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb3433 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb3433 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -43784,32 +52038,44 @@ func (x *Binding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } - yyj3433++ - if yyhl3433 { - yyb3433 = yyj3433 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb3433 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb3433 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv3436 := &x.ObjectMeta - yyv3436.CodecDecodeSelf(d) + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } } - yyj3433++ - if yyhl3433 { - yyb3433 = yyj3433 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb3433 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb3433 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -43817,21 +52083,21 @@ func (x *Binding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Target = ObjectReference{} } else { - yyv3437 := &x.Target - yyv3437.CodecDecodeSelf(d) + yyv18 := &x.Target + yyv18.CodecDecodeSelf(d) } for { - yyj3433++ - if yyhl3433 { - yyb3433 = yyj3433 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb3433 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb3433 { + if yyb11 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3433-1, "") + z.DecStructFieldNotFound(yyj11-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -43843,68 +52109,68 @@ func (x *Preconditions) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3438 := z.EncBinary() - _ = yym3438 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3439 := !z.EncBinary() - yy2arr3439 := z.EncBasicHandle().StructToArray - var yyq3439 [1]bool - _, _, _ = yysep3439, yyq3439, yy2arr3439 - const yyr3439 bool = false - yyq3439[0] = x.UID != nil - var yynn3439 int - if yyr3439 || yy2arr3439 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.UID != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn3439 = 0 - for _, b := range yyq3439 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn3439++ + yynn2++ } } - r.EncodeMapStart(yynn3439) - yynn3439 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3439 || yy2arr3439 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3439[0] { + if yyq2[0] { if x.UID == nil { r.EncodeNil() } else { - yy3441 := *x.UID - yym3442 := z.EncBinary() - _ = yym3442 + yy4 := *x.UID + yym5 := z.EncBinary() + _ = yym5 if false { - } else if z.HasExtensions() && z.EncExt(yy3441) { + } else if z.HasExtensions() && z.EncExt(yy4) { } else { - r.EncodeString(codecSelferC_UTF81234, string(yy3441)) + r.EncodeString(codecSelferC_UTF81234, string(yy4)) } } } else { r.EncodeNil() } } else { - if yyq3439[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("uid")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.UID == nil { r.EncodeNil() } else { - yy3443 := *x.UID - yym3444 := z.EncBinary() - _ = yym3444 + yy6 := *x.UID + yym7 := z.EncBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.EncExt(yy3443) { + } else if z.HasExtensions() && z.EncExt(yy6) { } else { - r.EncodeString(codecSelferC_UTF81234, string(yy3443)) + r.EncodeString(codecSelferC_UTF81234, string(yy6)) } } } } - if yyr3439 || yy2arr3439 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -43917,25 +52183,25 @@ func (x *Preconditions) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3445 := z.DecBinary() - _ = yym3445 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3446 := r.ContainerType() - if yyct3446 == codecSelferValueTypeMap1234 { - yyl3446 := r.ReadMapStart() - if yyl3446 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3446, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3446 == codecSelferValueTypeArray1234 { - yyl3446 := r.ReadArrayStart() - if yyl3446 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3446, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -43947,12 +52213,12 @@ func (x *Preconditions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3447Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3447Slc - var yyhl3447 bool = l >= 0 - for yyj3447 := 0; ; yyj3447++ { - if yyhl3447 { - if yyj3447 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -43961,10 +52227,10 @@ func (x *Preconditions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3447Slc = r.DecodeBytes(yys3447Slc, true, true) - yys3447 := string(yys3447Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3447 { + switch yys3 { case "uid": if r.TryDecodeAsNil() { if x.UID != nil { @@ -43974,8 +52240,8 @@ func (x *Preconditions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.UID == nil { x.UID = new(pkg1_types.UID) } - yym3449 := z.DecBinary() - _ = yym3449 + yym5 := z.DecBinary() + _ = yym5 if false { } else if z.HasExtensions() && z.DecExt(x.UID) { } else { @@ -43983,9 +52249,9 @@ func (x *Preconditions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } default: - z.DecStructFieldNotFound(-1, yys3447) - } // end switch yys3447 - } // end for yyj3447 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -43993,16 +52259,16 @@ func (x *Preconditions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3450 int - var yyb3450 bool - var yyhl3450 bool = l >= 0 - yyj3450++ - if yyhl3450 { - yyb3450 = yyj3450 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb3450 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb3450 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -44015,8 +52281,8 @@ func (x *Preconditions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.UID == nil { x.UID = new(pkg1_types.UID) } - yym3452 := z.DecBinary() - _ = yym3452 + yym8 := z.DecBinary() + _ = yym8 if false { } else if z.HasExtensions() && z.DecExt(x.UID) { } else { @@ -44024,21 +52290,47 @@ func (x *Preconditions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } } for { - yyj3450++ - if yyhl3450 { - yyb3450 = yyj3450 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb3450 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb3450 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3450-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } +func (x DeletionPropagation) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DeletionPropagation) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) @@ -44046,39 +52338,39 @@ func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3453 := z.EncBinary() - _ = yym3453 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3454 := !z.EncBinary() - yy2arr3454 := z.EncBasicHandle().StructToArray - var yyq3454 [5]bool - _, _, _ = yysep3454, yyq3454, yy2arr3454 - const yyr3454 bool = false - yyq3454[0] = x.Kind != "" - yyq3454[1] = x.APIVersion != "" - yyq3454[2] = x.GracePeriodSeconds != nil - yyq3454[3] = x.Preconditions != nil - yyq3454[4] = x.OrphanDependents != nil - var yynn3454 int - if yyr3454 || yy2arr3454 { - r.EncodeArrayStart(5) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.GracePeriodSeconds != nil + yyq2[3] = x.Preconditions != nil + yyq2[4] = x.OrphanDependents != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) } else { - yynn3454 = 0 - for _, b := range yyq3454 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn3454++ + yynn2++ } } - r.EncodeMapStart(yynn3454) - yynn3454 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3454 || yy2arr3454 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3454[0] { - yym3456 := z.EncBinary() - _ = yym3456 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -44087,23 +52379,23 @@ func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3454[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3457 := z.EncBinary() - _ = yym3457 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr3454 || yy2arr3454 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3454[1] { - yym3459 := z.EncBinary() - _ = yym3459 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -44112,56 +52404,56 @@ func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3454[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3460 := z.EncBinary() - _ = yym3460 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr3454 || yy2arr3454 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3454[2] { + if yyq2[2] { if x.GracePeriodSeconds == nil { r.EncodeNil() } else { - yy3462 := *x.GracePeriodSeconds - yym3463 := z.EncBinary() - _ = yym3463 + yy10 := *x.GracePeriodSeconds + yym11 := z.EncBinary() + _ = yym11 if false { } else { - r.EncodeInt(int64(yy3462)) + r.EncodeInt(int64(yy10)) } } } else { r.EncodeNil() } } else { - if yyq3454[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("gracePeriodSeconds")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.GracePeriodSeconds == nil { r.EncodeNil() } else { - yy3464 := *x.GracePeriodSeconds - yym3465 := z.EncBinary() - _ = yym3465 + yy12 := *x.GracePeriodSeconds + yym13 := z.EncBinary() + _ = yym13 if false { } else { - r.EncodeInt(int64(yy3464)) + r.EncodeInt(int64(yy12)) } } } } - if yyr3454 || yy2arr3454 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3454[3] { + if yyq2[3] { if x.Preconditions == nil { r.EncodeNil() } else { @@ -44171,7 +52463,7 @@ func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq3454[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("preconditions")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -44182,42 +52474,61 @@ func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr3454 || yy2arr3454 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3454[4] { + if yyq2[4] { if x.OrphanDependents == nil { r.EncodeNil() } else { - yy3468 := *x.OrphanDependents - yym3469 := z.EncBinary() - _ = yym3469 + yy18 := *x.OrphanDependents + yym19 := z.EncBinary() + _ = yym19 if false { } else { - r.EncodeBool(bool(yy3468)) + r.EncodeBool(bool(yy18)) } } } else { r.EncodeNil() } } else { - if yyq3454[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("orphanDependents")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.OrphanDependents == nil { r.EncodeNil() } else { - yy3470 := *x.OrphanDependents - yym3471 := z.EncBinary() - _ = yym3471 + yy20 := *x.OrphanDependents + yym21 := z.EncBinary() + _ = yym21 if false { } else { - r.EncodeBool(bool(yy3470)) + r.EncodeBool(bool(yy20)) } } } } - if yyr3454 || yy2arr3454 { + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.PropagationPolicy == nil { + r.EncodeNil() + } else { + yy23 := *x.PropagationPolicy + yy23.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("PropagationPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PropagationPolicy == nil { + r.EncodeNil() + } else { + yy25 := *x.PropagationPolicy + yy25.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -44230,25 +52541,25 @@ func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3472 := z.DecBinary() - _ = yym3472 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3473 := r.ContainerType() - if yyct3473 == codecSelferValueTypeMap1234 { - yyl3473 := r.ReadMapStart() - if yyl3473 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3473, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3473 == codecSelferValueTypeArray1234 { - yyl3473 := r.ReadArrayStart() - if yyl3473 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3473, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -44260,12 +52571,12 @@ func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3474Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3474Slc - var yyhl3474 bool = l >= 0 - for yyj3474 := 0; ; yyj3474++ { - if yyhl3474 { - if yyj3474 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -44274,21 +52585,33 @@ func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3474Slc = r.DecodeBytes(yys3474Slc, true, true) - yys3474 := string(yys3474Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3474 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "gracePeriodSeconds": if r.TryDecodeAsNil() { @@ -44299,8 +52622,8 @@ func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.GracePeriodSeconds == nil { x.GracePeriodSeconds = new(int64) } - yym3478 := z.DecBinary() - _ = yym3478 + yym9 := z.DecBinary() + _ = yym9 if false { } else { *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) @@ -44326,17 +52649,28 @@ func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.OrphanDependents == nil { x.OrphanDependents = new(bool) } - yym3481 := z.DecBinary() - _ = yym3481 + yym12 := z.DecBinary() + _ = yym12 if false { } else { *((*bool)(x.OrphanDependents)) = r.DecodeBool() } } + case "PropagationPolicy": + if r.TryDecodeAsNil() { + if x.PropagationPolicy != nil { + x.PropagationPolicy = nil + } + } else { + if x.PropagationPolicy == nil { + x.PropagationPolicy = new(DeletionPropagation) + } + x.PropagationPolicy.CodecDecodeSelf(d) + } default: - z.DecStructFieldNotFound(-1, yys3474) - } // end switch yys3474 - } // end for yyj3474 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -44344,16 +52678,16 @@ func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3482 int - var yyb3482 bool - var yyhl3482 bool = l >= 0 - yyj3482++ - if yyhl3482 { - yyb3482 = yyj3482 > l + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb3482 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb3482 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -44361,15 +52695,21 @@ func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv15 := &x.Kind + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj3482++ - if yyhl3482 { - yyb3482 = yyj3482 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb3482 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb3482 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -44377,15 +52717,21 @@ func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv17 := &x.APIVersion + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } } - yyj3482++ - if yyhl3482 { - yyb3482 = yyj3482 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb3482 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb3482 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -44398,20 +52744,20 @@ func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.GracePeriodSeconds == nil { x.GracePeriodSeconds = new(int64) } - yym3486 := z.DecBinary() - _ = yym3486 + yym20 := z.DecBinary() + _ = yym20 if false { } else { *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) } } - yyj3482++ - if yyhl3482 { - yyb3482 = yyj3482 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb3482 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb3482 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -44426,13 +52772,13 @@ func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Preconditions.CodecDecodeSelf(d) } - yyj3482++ - if yyhl3482 { - yyb3482 = yyj3482 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb3482 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb3482 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -44445,324 +52791,46 @@ func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.OrphanDependents == nil { x.OrphanDependents = new(bool) } - yym3489 := z.DecBinary() - _ = yym3489 + yym23 := z.DecBinary() + _ = yym23 if false { } else { *((*bool)(x.OrphanDependents)) = r.DecodeBool() } } - for { - yyj3482++ - if yyhl3482 { - yyb3482 = yyj3482 > l - } else { - yyb3482 = r.CheckBreak() - } - if yyb3482 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3482-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ExportOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym3490 := z.EncBinary() - _ = yym3490 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep3491 := !z.EncBinary() - yy2arr3491 := z.EncBasicHandle().StructToArray - var yyq3491 [4]bool - _, _, _ = yysep3491, yyq3491, yy2arr3491 - const yyr3491 bool = false - yyq3491[0] = x.Kind != "" - yyq3491[1] = x.APIVersion != "" - var yynn3491 int - if yyr3491 || yy2arr3491 { - r.EncodeArrayStart(4) - } else { - yynn3491 = 2 - for _, b := range yyq3491 { - if b { - yynn3491++ - } - } - r.EncodeMapStart(yynn3491) - yynn3491 = 0 - } - if yyr3491 || yy2arr3491 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3491[0] { - yym3493 := z.EncBinary() - _ = yym3493 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3491[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3494 := z.EncBinary() - _ = yym3494 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr3491 || yy2arr3491 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3491[1] { - yym3496 := z.EncBinary() - _ = yym3496 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq3491[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3497 := z.EncBinary() - _ = yym3497 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr3491 || yy2arr3491 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3499 := z.EncBinary() - _ = yym3499 - if false { - } else { - r.EncodeBool(bool(x.Export)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("export")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3500 := z.EncBinary() - _ = yym3500 - if false { - } else { - r.EncodeBool(bool(x.Export)) - } - } - if yyr3491 || yy2arr3491 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3502 := z.EncBinary() - _ = yym3502 - if false { - } else { - r.EncodeBool(bool(x.Exact)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exact")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3503 := z.EncBinary() - _ = yym3503 - if false { - } else { - r.EncodeBool(bool(x.Exact)) - } - } - if yyr3491 || yy2arr3491 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ExportOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym3504 := z.DecBinary() - _ = yym3504 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct3505 := r.ContainerType() - if yyct3505 == codecSelferValueTypeMap1234 { - yyl3505 := r.ReadMapStart() - if yyl3505 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl3505, d) - } - } else if yyct3505 == codecSelferValueTypeArray1234 { - yyl3505 := r.ReadArrayStart() - if yyl3505 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl3505, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ExportOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3506Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3506Slc - var yyhl3506 bool = l >= 0 - for yyj3506 := 0; ; yyj3506++ { - if yyhl3506 { - if yyj3506 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3506Slc = r.DecodeBytes(yys3506Slc, true, true) - yys3506 := string(yys3506Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3506 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "export": - if r.TryDecodeAsNil() { - x.Export = false - } else { - x.Export = bool(r.DecodeBool()) - } - case "exact": - if r.TryDecodeAsNil() { - x.Exact = false - } else { - x.Exact = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3506) - } // end switch yys3506 - } // end for yyj3506 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ExportOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj3511 int - var yyb3511 bool - var yyhl3511 bool = l >= 0 - yyj3511++ - if yyhl3511 { - yyb3511 = yyj3511 > l - } else { - yyb3511 = r.CheckBreak() - } - if yyb3511 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj3511++ - if yyhl3511 { - yyb3511 = yyj3511 > l - } else { - yyb3511 = r.CheckBreak() - } - if yyb3511 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj3511++ - if yyhl3511 { - yyb3511 = yyj3511 > l - } else { - yyb3511 = r.CheckBreak() - } - if yyb3511 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Export = false - } else { - x.Export = bool(r.DecodeBool()) - } - yyj3511++ - if yyhl3511 { - yyb3511 = yyj3511 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb3511 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb3511 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Exact = false + if x.PropagationPolicy != nil { + x.PropagationPolicy = nil + } } else { - x.Exact = bool(r.DecodeBool()) + if x.PropagationPolicy == nil { + x.PropagationPolicy = new(DeletionPropagation) + } + x.PropagationPolicy.CodecDecodeSelf(d) } for { - yyj3511++ - if yyhl3511 { - yyb3511 = yyj3511 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb3511 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb3511 { + if yyb14 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3511-1, "") + z.DecStructFieldNotFound(yyj14-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -44774,41 +52842,41 @@ func (x *ListOptions) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3516 := z.EncBinary() - _ = yym3516 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3517 := !z.EncBinary() - yy2arr3517 := z.EncBasicHandle().StructToArray - var yyq3517 [7]bool - _, _, _ = yysep3517, yyq3517, yy2arr3517 - const yyr3517 bool = false - yyq3517[0] = x.Kind != "" - yyq3517[1] = x.APIVersion != "" - yyq3517[2] = x.LabelSelector != "" - yyq3517[3] = x.FieldSelector != "" - yyq3517[4] = x.Watch != false - yyq3517[5] = x.ResourceVersion != "" - yyq3517[6] = x.TimeoutSeconds != nil - var yynn3517 int - if yyr3517 || yy2arr3517 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.LabelSelector != "" + yyq2[3] = x.FieldSelector != "" + yyq2[4] = x.Watch != false + yyq2[5] = x.ResourceVersion != "" + yyq2[6] = x.TimeoutSeconds != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(7) } else { - yynn3517 = 0 - for _, b := range yyq3517 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn3517++ + yynn2++ } } - r.EncodeMapStart(yynn3517) - yynn3517 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3517 || yy2arr3517 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3517[0] { - yym3519 := z.EncBinary() - _ = yym3519 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -44817,23 +52885,23 @@ func (x *ListOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3517[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3520 := z.EncBinary() - _ = yym3520 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr3517 || yy2arr3517 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3517[1] { - yym3522 := z.EncBinary() - _ = yym3522 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -44842,23 +52910,23 @@ func (x *ListOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3517[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3523 := z.EncBinary() - _ = yym3523 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr3517 || yy2arr3517 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3517[2] { - yym3525 := z.EncBinary() - _ = yym3525 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.LabelSelector)) @@ -44867,23 +52935,23 @@ func (x *ListOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3517[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("labelSelector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3526 := z.EncBinary() - _ = yym3526 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.LabelSelector)) } } } - if yyr3517 || yy2arr3517 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3517[3] { - yym3528 := z.EncBinary() - _ = yym3528 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FieldSelector)) @@ -44892,23 +52960,23 @@ func (x *ListOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3517[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fieldSelector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3529 := z.EncBinary() - _ = yym3529 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FieldSelector)) } } } - if yyr3517 || yy2arr3517 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3517[4] { - yym3531 := z.EncBinary() - _ = yym3531 + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeBool(bool(x.Watch)) @@ -44917,23 +52985,23 @@ func (x *ListOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq3517[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("watch")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3532 := z.EncBinary() - _ = yym3532 + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeBool(bool(x.Watch)) } } } - if yyr3517 || yy2arr3517 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3517[5] { - yym3534 := z.EncBinary() - _ = yym3534 + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) @@ -44942,54 +53010,54 @@ func (x *ListOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3517[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3535 := z.EncBinary() - _ = yym3535 + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) } } } - if yyr3517 || yy2arr3517 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3517[6] { + if yyq2[6] { if x.TimeoutSeconds == nil { r.EncodeNil() } else { - yy3537 := *x.TimeoutSeconds - yym3538 := z.EncBinary() - _ = yym3538 + yy22 := *x.TimeoutSeconds + yym23 := z.EncBinary() + _ = yym23 if false { } else { - r.EncodeInt(int64(yy3537)) + r.EncodeInt(int64(yy22)) } } } else { r.EncodeNil() } } else { - if yyq3517[6] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("timeoutSeconds")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.TimeoutSeconds == nil { r.EncodeNil() } else { - yy3539 := *x.TimeoutSeconds - yym3540 := z.EncBinary() - _ = yym3540 + yy24 := *x.TimeoutSeconds + yym25 := z.EncBinary() + _ = yym25 if false { } else { - r.EncodeInt(int64(yy3539)) + r.EncodeInt(int64(yy24)) } } } } - if yyr3517 || yy2arr3517 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -45002,25 +53070,25 @@ func (x *ListOptions) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3541 := z.DecBinary() - _ = yym3541 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3542 := r.ContainerType() - if yyct3542 == codecSelferValueTypeMap1234 { - yyl3542 := r.ReadMapStart() - if yyl3542 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3542, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3542 == codecSelferValueTypeArray1234 { - yyl3542 := r.ReadArrayStart() - if yyl3542 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3542, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -45032,12 +53100,12 @@ func (x *ListOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3543Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3543Slc - var yyhl3543 bool = l >= 0 - for yyj3543 := 0; ; yyj3543++ { - if yyhl3543 { - if yyj3543 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -45046,45 +53114,81 @@ func (x *ListOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3543Slc = r.DecodeBytes(yys3543Slc, true, true) - yys3543 := string(yys3543Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3543 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "labelSelector": if r.TryDecodeAsNil() { x.LabelSelector = "" } else { - x.LabelSelector = string(r.DecodeString()) + yyv8 := &x.LabelSelector + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } case "fieldSelector": if r.TryDecodeAsNil() { x.FieldSelector = "" } else { - x.FieldSelector = string(r.DecodeString()) + yyv10 := &x.FieldSelector + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } case "watch": if r.TryDecodeAsNil() { x.Watch = false } else { - x.Watch = bool(r.DecodeBool()) + yyv12 := &x.Watch + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } } case "resourceVersion": if r.TryDecodeAsNil() { x.ResourceVersion = "" } else { - x.ResourceVersion = string(r.DecodeString()) + yyv14 := &x.ResourceVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } case "timeoutSeconds": if r.TryDecodeAsNil() { @@ -45095,17 +53199,17 @@ func (x *ListOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.TimeoutSeconds == nil { x.TimeoutSeconds = new(int64) } - yym3551 := z.DecBinary() - _ = yym3551 + yym17 := z.DecBinary() + _ = yym17 if false { } else { *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64)) } } default: - z.DecStructFieldNotFound(-1, yys3543) - } // end switch yys3543 - } // end for yyj3543 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -45113,16 +53217,16 @@ func (x *ListOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3552 int - var yyb3552 bool - var yyhl3552 bool = l >= 0 - yyj3552++ - if yyhl3552 { - yyb3552 = yyj3552 > l + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3552 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3552 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -45130,15 +53234,21 @@ func (x *ListOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv19 := &x.Kind + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } } - yyj3552++ - if yyhl3552 { - yyb3552 = yyj3552 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3552 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3552 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -45146,15 +53256,21 @@ func (x *ListOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv21 := &x.APIVersion + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } } - yyj3552++ - if yyhl3552 { - yyb3552 = yyj3552 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3552 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3552 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -45162,15 +53278,21 @@ func (x *ListOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.LabelSelector = "" } else { - x.LabelSelector = string(r.DecodeString()) + yyv23 := &x.LabelSelector + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } } - yyj3552++ - if yyhl3552 { - yyb3552 = yyj3552 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3552 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3552 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -45178,15 +53300,21 @@ func (x *ListOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.FieldSelector = "" } else { - x.FieldSelector = string(r.DecodeString()) + yyv25 := &x.FieldSelector + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } } - yyj3552++ - if yyhl3552 { - yyb3552 = yyj3552 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3552 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3552 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -45194,15 +53322,21 @@ func (x *ListOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Watch = false } else { - x.Watch = bool(r.DecodeBool()) + yyv27 := &x.Watch + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(yyv27)) = r.DecodeBool() + } } - yyj3552++ - if yyhl3552 { - yyb3552 = yyj3552 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3552 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3552 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -45210,15 +53344,21 @@ func (x *ListOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ResourceVersion = "" } else { - x.ResourceVersion = string(r.DecodeString()) + yyv29 := &x.ResourceVersion + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } } - yyj3552++ - if yyhl3552 { - yyb3552 = yyj3552 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3552 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3552 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -45231,25 +53371,25 @@ func (x *ListOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.TimeoutSeconds == nil { x.TimeoutSeconds = new(int64) } - yym3560 := z.DecBinary() - _ = yym3560 + yym32 := z.DecBinary() + _ = yym32 if false { } else { *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64)) } } for { - yyj3552++ - if yyhl3552 { - yyb3552 = yyj3552 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3552 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3552 { + if yyb18 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3552-1, "") + z.DecStructFieldNotFound(yyj18-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -45261,44 +53401,44 @@ func (x *PodLogOptions) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3561 := z.EncBinary() - _ = yym3561 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3562 := !z.EncBinary() - yy2arr3562 := z.EncBasicHandle().StructToArray - var yyq3562 [10]bool - _, _, _ = yysep3562, yyq3562, yy2arr3562 - const yyr3562 bool = false - yyq3562[0] = x.Kind != "" - yyq3562[1] = x.APIVersion != "" - yyq3562[2] = x.Container != "" - yyq3562[3] = x.Follow != false - yyq3562[4] = x.Previous != false - yyq3562[5] = x.SinceSeconds != nil - yyq3562[6] = x.SinceTime != nil - yyq3562[7] = x.Timestamps != false - yyq3562[8] = x.TailLines != nil - yyq3562[9] = x.LimitBytes != nil - var yynn3562 int - if yyr3562 || yy2arr3562 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Container != "" + yyq2[3] = x.Follow != false + yyq2[4] = x.Previous != false + yyq2[5] = x.SinceSeconds != nil + yyq2[6] = x.SinceTime != nil + yyq2[7] = x.Timestamps != false + yyq2[8] = x.TailLines != nil + yyq2[9] = x.LimitBytes != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(10) } else { - yynn3562 = 0 - for _, b := range yyq3562 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn3562++ + yynn2++ } } - r.EncodeMapStart(yynn3562) - yynn3562 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3562 || yy2arr3562 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3562[0] { - yym3564 := z.EncBinary() - _ = yym3564 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -45307,23 +53447,23 @@ func (x *PodLogOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3562[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3565 := z.EncBinary() - _ = yym3565 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr3562 || yy2arr3562 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3562[1] { - yym3567 := z.EncBinary() - _ = yym3567 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -45332,23 +53472,23 @@ func (x *PodLogOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3562[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3568 := z.EncBinary() - _ = yym3568 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr3562 || yy2arr3562 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3562[2] { - yym3570 := z.EncBinary() - _ = yym3570 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Container)) @@ -45357,23 +53497,23 @@ func (x *PodLogOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3562[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("container")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3571 := z.EncBinary() - _ = yym3571 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Container)) } } } - if yyr3562 || yy2arr3562 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3562[3] { - yym3573 := z.EncBinary() - _ = yym3573 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeBool(bool(x.Follow)) @@ -45382,23 +53522,23 @@ func (x *PodLogOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq3562[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("follow")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3574 := z.EncBinary() - _ = yym3574 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeBool(bool(x.Follow)) } } } - if yyr3562 || yy2arr3562 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3562[4] { - yym3576 := z.EncBinary() - _ = yym3576 + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeBool(bool(x.Previous)) @@ -45407,66 +53547,66 @@ func (x *PodLogOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq3562[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("previous")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3577 := z.EncBinary() - _ = yym3577 + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeBool(bool(x.Previous)) } } } - if yyr3562 || yy2arr3562 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3562[5] { + if yyq2[5] { if x.SinceSeconds == nil { r.EncodeNil() } else { - yy3579 := *x.SinceSeconds - yym3580 := z.EncBinary() - _ = yym3580 + yy19 := *x.SinceSeconds + yym20 := z.EncBinary() + _ = yym20 if false { } else { - r.EncodeInt(int64(yy3579)) + r.EncodeInt(int64(yy19)) } } } else { r.EncodeNil() } } else { - if yyq3562[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("sinceSeconds")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.SinceSeconds == nil { r.EncodeNil() } else { - yy3581 := *x.SinceSeconds - yym3582 := z.EncBinary() - _ = yym3582 + yy21 := *x.SinceSeconds + yym22 := z.EncBinary() + _ = yym22 if false { } else { - r.EncodeInt(int64(yy3581)) + r.EncodeInt(int64(yy21)) } } } } - if yyr3562 || yy2arr3562 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3562[6] { + if yyq2[6] { if x.SinceTime == nil { r.EncodeNil() } else { - yym3584 := z.EncBinary() - _ = yym3584 + yym24 := z.EncBinary() + _ = yym24 if false { } else if z.HasExtensions() && z.EncExt(x.SinceTime) { - } else if yym3584 { + } else if yym24 { z.EncBinaryMarshal(x.SinceTime) - } else if !yym3584 && z.IsJSONHandle() { + } else if !yym24 && z.IsJSONHandle() { z.EncJSONMarshal(x.SinceTime) } else { z.EncFallback(x.SinceTime) @@ -45476,20 +53616,20 @@ func (x *PodLogOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq3562[6] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("sinceTime")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.SinceTime == nil { r.EncodeNil() } else { - yym3585 := z.EncBinary() - _ = yym3585 + yym25 := z.EncBinary() + _ = yym25 if false { } else if z.HasExtensions() && z.EncExt(x.SinceTime) { - } else if yym3585 { + } else if yym25 { z.EncBinaryMarshal(x.SinceTime) - } else if !yym3585 && z.IsJSONHandle() { + } else if !yym25 && z.IsJSONHandle() { z.EncJSONMarshal(x.SinceTime) } else { z.EncFallback(x.SinceTime) @@ -45497,11 +53637,11 @@ func (x *PodLogOptions) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr3562 || yy2arr3562 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3562[7] { - yym3587 := z.EncBinary() - _ = yym3587 + if yyq2[7] { + yym27 := z.EncBinary() + _ = yym27 if false { } else { r.EncodeBool(bool(x.Timestamps)) @@ -45510,89 +53650,89 @@ func (x *PodLogOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq3562[7] { + if yyq2[7] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("timestamps")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3588 := z.EncBinary() - _ = yym3588 + yym28 := z.EncBinary() + _ = yym28 if false { } else { r.EncodeBool(bool(x.Timestamps)) } } } - if yyr3562 || yy2arr3562 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3562[8] { + if yyq2[8] { if x.TailLines == nil { r.EncodeNil() } else { - yy3590 := *x.TailLines - yym3591 := z.EncBinary() - _ = yym3591 + yy30 := *x.TailLines + yym31 := z.EncBinary() + _ = yym31 if false { } else { - r.EncodeInt(int64(yy3590)) + r.EncodeInt(int64(yy30)) } } } else { r.EncodeNil() } } else { - if yyq3562[8] { + if yyq2[8] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("tailLines")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.TailLines == nil { r.EncodeNil() } else { - yy3592 := *x.TailLines - yym3593 := z.EncBinary() - _ = yym3593 + yy32 := *x.TailLines + yym33 := z.EncBinary() + _ = yym33 if false { } else { - r.EncodeInt(int64(yy3592)) + r.EncodeInt(int64(yy32)) } } } } - if yyr3562 || yy2arr3562 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3562[9] { + if yyq2[9] { if x.LimitBytes == nil { r.EncodeNil() } else { - yy3595 := *x.LimitBytes - yym3596 := z.EncBinary() - _ = yym3596 + yy35 := *x.LimitBytes + yym36 := z.EncBinary() + _ = yym36 if false { } else { - r.EncodeInt(int64(yy3595)) + r.EncodeInt(int64(yy35)) } } } else { r.EncodeNil() } } else { - if yyq3562[9] { + if yyq2[9] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("limitBytes")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.LimitBytes == nil { r.EncodeNil() } else { - yy3597 := *x.LimitBytes - yym3598 := z.EncBinary() - _ = yym3598 + yy37 := *x.LimitBytes + yym38 := z.EncBinary() + _ = yym38 if false { } else { - r.EncodeInt(int64(yy3597)) + r.EncodeInt(int64(yy37)) } } } } - if yyr3562 || yy2arr3562 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -45605,25 +53745,25 @@ func (x *PodLogOptions) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3599 := z.DecBinary() - _ = yym3599 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3600 := r.ContainerType() - if yyct3600 == codecSelferValueTypeMap1234 { - yyl3600 := r.ReadMapStart() - if yyl3600 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3600, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3600 == codecSelferValueTypeArray1234 { - yyl3600 := r.ReadArrayStart() - if yyl3600 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3600, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -45635,12 +53775,12 @@ func (x *PodLogOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3601Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3601Slc - var yyhl3601 bool = l >= 0 - for yyj3601 := 0; ; yyj3601++ { - if yyhl3601 { - if yyj3601 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -45649,39 +53789,69 @@ func (x *PodLogOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3601Slc = r.DecodeBytes(yys3601Slc, true, true) - yys3601 := string(yys3601Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3601 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "container": if r.TryDecodeAsNil() { x.Container = "" } else { - x.Container = string(r.DecodeString()) + yyv8 := &x.Container + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } case "follow": if r.TryDecodeAsNil() { x.Follow = false } else { - x.Follow = bool(r.DecodeBool()) + yyv10 := &x.Follow + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } } case "previous": if r.TryDecodeAsNil() { x.Previous = false } else { - x.Previous = bool(r.DecodeBool()) + yyv12 := &x.Previous + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } } case "sinceSeconds": if r.TryDecodeAsNil() { @@ -45692,8 +53862,8 @@ func (x *PodLogOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.SinceSeconds == nil { x.SinceSeconds = new(int64) } - yym3608 := z.DecBinary() - _ = yym3608 + yym15 := z.DecBinary() + _ = yym15 if false { } else { *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64)) @@ -45706,15 +53876,15 @@ func (x *PodLogOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } else { if x.SinceTime == nil { - x.SinceTime = new(pkg2_unversioned.Time) + x.SinceTime = new(pkg2_v1.Time) } - yym3610 := z.DecBinary() - _ = yym3610 + yym17 := z.DecBinary() + _ = yym17 if false { } else if z.HasExtensions() && z.DecExt(x.SinceTime) { - } else if yym3610 { + } else if yym17 { z.DecBinaryUnmarshal(x.SinceTime) - } else if !yym3610 && z.IsJSONHandle() { + } else if !yym17 && z.IsJSONHandle() { z.DecJSONUnmarshal(x.SinceTime) } else { z.DecFallback(x.SinceTime, false) @@ -45724,7 +53894,13 @@ func (x *PodLogOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Timestamps = false } else { - x.Timestamps = bool(r.DecodeBool()) + yyv18 := &x.Timestamps + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*bool)(yyv18)) = r.DecodeBool() + } } case "tailLines": if r.TryDecodeAsNil() { @@ -45735,8 +53911,8 @@ func (x *PodLogOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.TailLines == nil { x.TailLines = new(int64) } - yym3613 := z.DecBinary() - _ = yym3613 + yym21 := z.DecBinary() + _ = yym21 if false { } else { *((*int64)(x.TailLines)) = int64(r.DecodeInt(64)) @@ -45751,17 +53927,17 @@ func (x *PodLogOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.LimitBytes == nil { x.LimitBytes = new(int64) } - yym3615 := z.DecBinary() - _ = yym3615 + yym23 := z.DecBinary() + _ = yym23 if false { } else { *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64)) } } default: - z.DecStructFieldNotFound(-1, yys3601) - } // end switch yys3601 - } // end for yyj3601 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -45769,16 +53945,16 @@ func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3616 int - var yyb3616 bool - var yyhl3616 bool = l >= 0 - yyj3616++ - if yyhl3616 { - yyb3616 = yyj3616 > l + var yyj24 int + var yyb24 bool + var yyhl24 bool = l >= 0 + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3616 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3616 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -45786,15 +53962,21 @@ func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv25 := &x.Kind + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } } - yyj3616++ - if yyhl3616 { - yyb3616 = yyj3616 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3616 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3616 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -45802,15 +53984,21 @@ func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv27 := &x.APIVersion + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } } - yyj3616++ - if yyhl3616 { - yyb3616 = yyj3616 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3616 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3616 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -45818,15 +54006,21 @@ func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Container = "" } else { - x.Container = string(r.DecodeString()) + yyv29 := &x.Container + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } } - yyj3616++ - if yyhl3616 { - yyb3616 = yyj3616 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3616 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3616 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -45834,15 +54028,21 @@ func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Follow = false } else { - x.Follow = bool(r.DecodeBool()) + yyv31 := &x.Follow + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*bool)(yyv31)) = r.DecodeBool() + } } - yyj3616++ - if yyhl3616 { - yyb3616 = yyj3616 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3616 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3616 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -45850,15 +54050,21 @@ func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Previous = false } else { - x.Previous = bool(r.DecodeBool()) + yyv33 := &x.Previous + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*bool)(yyv33)) = r.DecodeBool() + } } - yyj3616++ - if yyhl3616 { - yyb3616 = yyj3616 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3616 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3616 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -45871,20 +54077,20 @@ func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.SinceSeconds == nil { x.SinceSeconds = new(int64) } - yym3623 := z.DecBinary() - _ = yym3623 + yym36 := z.DecBinary() + _ = yym36 if false { } else { *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64)) } } - yyj3616++ - if yyhl3616 { - yyb3616 = yyj3616 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3616 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3616 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -45895,27 +54101,27 @@ func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } } else { if x.SinceTime == nil { - x.SinceTime = new(pkg2_unversioned.Time) + x.SinceTime = new(pkg2_v1.Time) } - yym3625 := z.DecBinary() - _ = yym3625 + yym38 := z.DecBinary() + _ = yym38 if false { } else if z.HasExtensions() && z.DecExt(x.SinceTime) { - } else if yym3625 { + } else if yym38 { z.DecBinaryUnmarshal(x.SinceTime) - } else if !yym3625 && z.IsJSONHandle() { + } else if !yym38 && z.IsJSONHandle() { z.DecJSONUnmarshal(x.SinceTime) } else { z.DecFallback(x.SinceTime, false) } } - yyj3616++ - if yyhl3616 { - yyb3616 = yyj3616 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3616 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3616 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -45923,15 +54129,21 @@ func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Timestamps = false } else { - x.Timestamps = bool(r.DecodeBool()) + yyv39 := &x.Timestamps + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*bool)(yyv39)) = r.DecodeBool() + } } - yyj3616++ - if yyhl3616 { - yyb3616 = yyj3616 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3616 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3616 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -45944,20 +54156,20 @@ func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.TailLines == nil { x.TailLines = new(int64) } - yym3628 := z.DecBinary() - _ = yym3628 + yym42 := z.DecBinary() + _ = yym42 if false { } else { *((*int64)(x.TailLines)) = int64(r.DecodeInt(64)) } } - yyj3616++ - if yyhl3616 { - yyb3616 = yyj3616 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3616 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3616 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -45970,25 +54182,25 @@ func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.LimitBytes == nil { x.LimitBytes = new(int64) } - yym3630 := z.DecBinary() - _ = yym3630 + yym44 := z.DecBinary() + _ = yym44 if false { } else { *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64)) } } for { - yyj3616++ - if yyhl3616 { - yyb3616 = yyj3616 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3616 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3616 { + if yyb24 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3616-1, "") + z.DecStructFieldNotFound(yyj24-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -46000,41 +54212,41 @@ func (x *PodAttachOptions) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3631 := z.EncBinary() - _ = yym3631 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3632 := !z.EncBinary() - yy2arr3632 := z.EncBasicHandle().StructToArray - var yyq3632 [7]bool - _, _, _ = yysep3632, yyq3632, yy2arr3632 - const yyr3632 bool = false - yyq3632[0] = x.Kind != "" - yyq3632[1] = x.APIVersion != "" - yyq3632[2] = x.Stdin != false - yyq3632[3] = x.Stdout != false - yyq3632[4] = x.Stderr != false - yyq3632[5] = x.TTY != false - yyq3632[6] = x.Container != "" - var yynn3632 int - if yyr3632 || yy2arr3632 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Stdin != false + yyq2[3] = x.Stdout != false + yyq2[4] = x.Stderr != false + yyq2[5] = x.TTY != false + yyq2[6] = x.Container != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(7) } else { - yynn3632 = 0 - for _, b := range yyq3632 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn3632++ + yynn2++ } } - r.EncodeMapStart(yynn3632) - yynn3632 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3632 || yy2arr3632 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3632[0] { - yym3634 := z.EncBinary() - _ = yym3634 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -46043,23 +54255,23 @@ func (x *PodAttachOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3632[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3635 := z.EncBinary() - _ = yym3635 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr3632 || yy2arr3632 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3632[1] { - yym3637 := z.EncBinary() - _ = yym3637 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -46068,23 +54280,23 @@ func (x *PodAttachOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3632[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3638 := z.EncBinary() - _ = yym3638 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr3632 || yy2arr3632 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3632[2] { - yym3640 := z.EncBinary() - _ = yym3640 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeBool(bool(x.Stdin)) @@ -46093,23 +54305,23 @@ func (x *PodAttachOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq3632[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("stdin")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3641 := z.EncBinary() - _ = yym3641 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeBool(bool(x.Stdin)) } } } - if yyr3632 || yy2arr3632 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3632[3] { - yym3643 := z.EncBinary() - _ = yym3643 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeBool(bool(x.Stdout)) @@ -46118,23 +54330,23 @@ func (x *PodAttachOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq3632[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("stdout")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3644 := z.EncBinary() - _ = yym3644 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeBool(bool(x.Stdout)) } } } - if yyr3632 || yy2arr3632 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3632[4] { - yym3646 := z.EncBinary() - _ = yym3646 + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeBool(bool(x.Stderr)) @@ -46143,23 +54355,23 @@ func (x *PodAttachOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq3632[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("stderr")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3647 := z.EncBinary() - _ = yym3647 + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeBool(bool(x.Stderr)) } } } - if yyr3632 || yy2arr3632 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3632[5] { - yym3649 := z.EncBinary() - _ = yym3649 + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 if false { } else { r.EncodeBool(bool(x.TTY)) @@ -46168,23 +54380,23 @@ func (x *PodAttachOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq3632[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("tty")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3650 := z.EncBinary() - _ = yym3650 + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeBool(bool(x.TTY)) } } } - if yyr3632 || yy2arr3632 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3632[6] { - yym3652 := z.EncBinary() - _ = yym3652 + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Container)) @@ -46193,19 +54405,19 @@ func (x *PodAttachOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3632[6] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("container")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3653 := z.EncBinary() - _ = yym3653 + yym23 := z.EncBinary() + _ = yym23 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Container)) } } } - if yyr3632 || yy2arr3632 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -46218,25 +54430,25 @@ func (x *PodAttachOptions) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3654 := z.DecBinary() - _ = yym3654 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3655 := r.ContainerType() - if yyct3655 == codecSelferValueTypeMap1234 { - yyl3655 := r.ReadMapStart() - if yyl3655 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3655, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3655 == codecSelferValueTypeArray1234 { - yyl3655 := r.ReadArrayStart() - if yyl3655 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3655, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -46248,12 +54460,12 @@ func (x *PodAttachOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3656Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3656Slc - var yyhl3656 bool = l >= 0 - for yyj3656 := 0; ; yyj3656++ { - if yyhl3656 { - if yyj3656 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -46262,56 +54474,98 @@ func (x *PodAttachOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3656Slc = r.DecodeBytes(yys3656Slc, true, true) - yys3656 := string(yys3656Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3656 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "stdin": if r.TryDecodeAsNil() { x.Stdin = false } else { - x.Stdin = bool(r.DecodeBool()) + yyv8 := &x.Stdin + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } } case "stdout": if r.TryDecodeAsNil() { x.Stdout = false } else { - x.Stdout = bool(r.DecodeBool()) + yyv10 := &x.Stdout + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } } case "stderr": if r.TryDecodeAsNil() { x.Stderr = false } else { - x.Stderr = bool(r.DecodeBool()) + yyv12 := &x.Stderr + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } } case "tty": if r.TryDecodeAsNil() { x.TTY = false } else { - x.TTY = bool(r.DecodeBool()) + yyv14 := &x.TTY + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } } case "container": if r.TryDecodeAsNil() { x.Container = "" } else { - x.Container = string(r.DecodeString()) + yyv16 := &x.Container + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys3656) - } // end switch yys3656 - } // end for yyj3656 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -46319,16 +54573,16 @@ func (x *PodAttachOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3664 int - var yyb3664 bool - var yyhl3664 bool = l >= 0 - yyj3664++ - if yyhl3664 { - yyb3664 = yyj3664 > l + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3664 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3664 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -46336,15 +54590,21 @@ func (x *PodAttachOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv19 := &x.Kind + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } } - yyj3664++ - if yyhl3664 { - yyb3664 = yyj3664 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3664 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3664 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -46352,15 +54612,21 @@ func (x *PodAttachOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv21 := &x.APIVersion + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } } - yyj3664++ - if yyhl3664 { - yyb3664 = yyj3664 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3664 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3664 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -46368,15 +54634,21 @@ func (x *PodAttachOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Stdin = false } else { - x.Stdin = bool(r.DecodeBool()) + yyv23 := &x.Stdin + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } } - yyj3664++ - if yyhl3664 { - yyb3664 = yyj3664 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3664 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3664 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -46384,15 +54656,21 @@ func (x *PodAttachOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Stdout = false } else { - x.Stdout = bool(r.DecodeBool()) + yyv25 := &x.Stdout + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } } - yyj3664++ - if yyhl3664 { - yyb3664 = yyj3664 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3664 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3664 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -46400,15 +54678,21 @@ func (x *PodAttachOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Stderr = false } else { - x.Stderr = bool(r.DecodeBool()) + yyv27 := &x.Stderr + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(yyv27)) = r.DecodeBool() + } } - yyj3664++ - if yyhl3664 { - yyb3664 = yyj3664 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3664 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3664 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -46416,15 +54700,21 @@ func (x *PodAttachOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.TTY = false } else { - x.TTY = bool(r.DecodeBool()) + yyv29 := &x.TTY + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } } - yyj3664++ - if yyhl3664 { - yyb3664 = yyj3664 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3664 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3664 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -46432,20 +54722,26 @@ func (x *PodAttachOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Container = "" } else { - x.Container = string(r.DecodeString()) + yyv31 := &x.Container + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } } for { - yyj3664++ - if yyhl3664 { - yyb3664 = yyj3664 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3664 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3664 { + if yyb18 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3664-1, "") + z.DecStructFieldNotFound(yyj18-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -46457,41 +54753,41 @@ func (x *PodExecOptions) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3672 := z.EncBinary() - _ = yym3672 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3673 := !z.EncBinary() - yy2arr3673 := z.EncBasicHandle().StructToArray - var yyq3673 [8]bool - _, _, _ = yysep3673, yyq3673, yy2arr3673 - const yyr3673 bool = false - yyq3673[0] = x.Kind != "" - yyq3673[1] = x.APIVersion != "" - yyq3673[2] = x.Stdin != false - yyq3673[3] = x.Stdout != false - yyq3673[4] = x.Stderr != false - yyq3673[5] = x.TTY != false - yyq3673[6] = x.Container != "" - var yynn3673 int - if yyr3673 || yy2arr3673 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Stdin != false + yyq2[3] = x.Stdout != false + yyq2[4] = x.Stderr != false + yyq2[5] = x.TTY != false + yyq2[6] = x.Container != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(8) } else { - yynn3673 = 1 - for _, b := range yyq3673 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn3673++ + yynn2++ } } - r.EncodeMapStart(yynn3673) - yynn3673 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3673 || yy2arr3673 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3673[0] { - yym3675 := z.EncBinary() - _ = yym3675 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -46500,23 +54796,23 @@ func (x *PodExecOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3673[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3676 := z.EncBinary() - _ = yym3676 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr3673 || yy2arr3673 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3673[1] { - yym3678 := z.EncBinary() - _ = yym3678 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -46525,23 +54821,23 @@ func (x *PodExecOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3673[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3679 := z.EncBinary() - _ = yym3679 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr3673 || yy2arr3673 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3673[2] { - yym3681 := z.EncBinary() - _ = yym3681 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeBool(bool(x.Stdin)) @@ -46550,23 +54846,23 @@ func (x *PodExecOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq3673[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("stdin")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3682 := z.EncBinary() - _ = yym3682 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeBool(bool(x.Stdin)) } } } - if yyr3673 || yy2arr3673 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3673[3] { - yym3684 := z.EncBinary() - _ = yym3684 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeBool(bool(x.Stdout)) @@ -46575,23 +54871,23 @@ func (x *PodExecOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq3673[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("stdout")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3685 := z.EncBinary() - _ = yym3685 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeBool(bool(x.Stdout)) } } } - if yyr3673 || yy2arr3673 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3673[4] { - yym3687 := z.EncBinary() - _ = yym3687 + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeBool(bool(x.Stderr)) @@ -46600,23 +54896,23 @@ func (x *PodExecOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq3673[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("stderr")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3688 := z.EncBinary() - _ = yym3688 + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeBool(bool(x.Stderr)) } } } - if yyr3673 || yy2arr3673 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3673[5] { - yym3690 := z.EncBinary() - _ = yym3690 + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 if false { } else { r.EncodeBool(bool(x.TTY)) @@ -46625,23 +54921,23 @@ func (x *PodExecOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq3673[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("tty")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3691 := z.EncBinary() - _ = yym3691 + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeBool(bool(x.TTY)) } } } - if yyr3673 || yy2arr3673 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3673[6] { - yym3693 := z.EncBinary() - _ = yym3693 + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Container)) @@ -46650,25 +54946,25 @@ func (x *PodExecOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3673[6] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("container")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3694 := z.EncBinary() - _ = yym3694 + yym23 := z.EncBinary() + _ = yym23 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Container)) } } } - if yyr3673 || yy2arr3673 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Command == nil { r.EncodeNil() } else { - yym3696 := z.EncBinary() - _ = yym3696 + yym25 := z.EncBinary() + _ = yym25 if false { } else { z.F.EncSliceStringV(x.Command, false, e) @@ -46681,15 +54977,15 @@ func (x *PodExecOptions) CodecEncodeSelf(e *codec1978.Encoder) { if x.Command == nil { r.EncodeNil() } else { - yym3697 := z.EncBinary() - _ = yym3697 + yym26 := z.EncBinary() + _ = yym26 if false { } else { z.F.EncSliceStringV(x.Command, false, e) } } } - if yyr3673 || yy2arr3673 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -46702,25 +54998,25 @@ func (x *PodExecOptions) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3698 := z.DecBinary() - _ = yym3698 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3699 := r.ContainerType() - if yyct3699 == codecSelferValueTypeMap1234 { - yyl3699 := r.ReadMapStart() - if yyl3699 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3699, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3699 == codecSelferValueTypeArray1234 { - yyl3699 := r.ReadArrayStart() - if yyl3699 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3699, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -46732,12 +55028,12 @@ func (x *PodExecOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3700Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3700Slc - var yyhl3700 bool = l >= 0 - for yyj3700 := 0; ; yyj3700++ { - if yyhl3700 { - if yyj3700 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -46746,68 +55042,110 @@ func (x *PodExecOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3700Slc = r.DecodeBytes(yys3700Slc, true, true) - yys3700 := string(yys3700Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3700 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "stdin": if r.TryDecodeAsNil() { x.Stdin = false } else { - x.Stdin = bool(r.DecodeBool()) + yyv8 := &x.Stdin + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } } case "stdout": if r.TryDecodeAsNil() { x.Stdout = false } else { - x.Stdout = bool(r.DecodeBool()) + yyv10 := &x.Stdout + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } } case "stderr": if r.TryDecodeAsNil() { x.Stderr = false } else { - x.Stderr = bool(r.DecodeBool()) + yyv12 := &x.Stderr + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } } case "tty": if r.TryDecodeAsNil() { x.TTY = false } else { - x.TTY = bool(r.DecodeBool()) + yyv14 := &x.TTY + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } } case "container": if r.TryDecodeAsNil() { x.Container = "" } else { - x.Container = string(r.DecodeString()) + yyv16 := &x.Container + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } } case "command": if r.TryDecodeAsNil() { x.Command = nil } else { - yyv3708 := &x.Command - yym3709 := z.DecBinary() - _ = yym3709 + yyv18 := &x.Command + yym19 := z.DecBinary() + _ = yym19 if false { } else { - z.F.DecSliceStringX(yyv3708, false, d) + z.F.DecSliceStringX(yyv18, false, d) } } default: - z.DecStructFieldNotFound(-1, yys3700) - } // end switch yys3700 - } // end for yyj3700 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -46815,16 +55153,16 @@ func (x *PodExecOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3710 int - var yyb3710 bool - var yyhl3710 bool = l >= 0 - yyj3710++ - if yyhl3710 { - yyb3710 = yyj3710 > l + var yyj20 int + var yyb20 bool + var yyhl20 bool = l >= 0 + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l } else { - yyb3710 = r.CheckBreak() + yyb20 = r.CheckBreak() } - if yyb3710 { + if yyb20 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -46832,15 +55170,21 @@ func (x *PodExecOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv21 := &x.Kind + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } } - yyj3710++ - if yyhl3710 { - yyb3710 = yyj3710 > l + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l } else { - yyb3710 = r.CheckBreak() + yyb20 = r.CheckBreak() } - if yyb3710 { + if yyb20 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -46848,15 +55192,21 @@ func (x *PodExecOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv23 := &x.APIVersion + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } } - yyj3710++ - if yyhl3710 { - yyb3710 = yyj3710 > l + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l } else { - yyb3710 = r.CheckBreak() + yyb20 = r.CheckBreak() } - if yyb3710 { + if yyb20 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -46864,15 +55214,21 @@ func (x *PodExecOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Stdin = false } else { - x.Stdin = bool(r.DecodeBool()) + yyv25 := &x.Stdin + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } } - yyj3710++ - if yyhl3710 { - yyb3710 = yyj3710 > l + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l } else { - yyb3710 = r.CheckBreak() + yyb20 = r.CheckBreak() } - if yyb3710 { + if yyb20 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -46880,15 +55236,21 @@ func (x *PodExecOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Stdout = false } else { - x.Stdout = bool(r.DecodeBool()) + yyv27 := &x.Stdout + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(yyv27)) = r.DecodeBool() + } } - yyj3710++ - if yyhl3710 { - yyb3710 = yyj3710 > l + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l } else { - yyb3710 = r.CheckBreak() + yyb20 = r.CheckBreak() } - if yyb3710 { + if yyb20 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -46896,15 +55258,21 @@ func (x *PodExecOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Stderr = false } else { - x.Stderr = bool(r.DecodeBool()) + yyv29 := &x.Stderr + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } } - yyj3710++ - if yyhl3710 { - yyb3710 = yyj3710 > l + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l } else { - yyb3710 = r.CheckBreak() + yyb20 = r.CheckBreak() } - if yyb3710 { + if yyb20 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -46912,15 +55280,21 @@ func (x *PodExecOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.TTY = false } else { - x.TTY = bool(r.DecodeBool()) + yyv31 := &x.TTY + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*bool)(yyv31)) = r.DecodeBool() + } } - yyj3710++ - if yyhl3710 { - yyb3710 = yyj3710 > l + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l } else { - yyb3710 = r.CheckBreak() + yyb20 = r.CheckBreak() } - if yyb3710 { + if yyb20 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -46928,15 +55302,21 @@ func (x *PodExecOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Container = "" } else { - x.Container = string(r.DecodeString()) + yyv33 := &x.Container + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*string)(yyv33)) = r.DecodeString() + } } - yyj3710++ - if yyhl3710 { - yyb3710 = yyj3710 > l + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l } else { - yyb3710 = r.CheckBreak() + yyb20 = r.CheckBreak() } - if yyb3710 { + if yyb20 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -46944,68 +55324,68 @@ func (x *PodExecOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Command = nil } else { - yyv3718 := &x.Command - yym3719 := z.DecBinary() - _ = yym3719 + yyv35 := &x.Command + yym36 := z.DecBinary() + _ = yym36 if false { } else { - z.F.DecSliceStringX(yyv3718, false, d) + z.F.DecSliceStringX(yyv35, false, d) } } for { - yyj3710++ - if yyhl3710 { - yyb3710 = yyj3710 > l + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l } else { - yyb3710 = r.CheckBreak() + yyb20 = r.CheckBreak() } - if yyb3710 { + if yyb20 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3710-1, "") + z.DecStructFieldNotFound(yyj20-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *PodProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *PodPortForwardOptions) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym3720 := z.EncBinary() - _ = yym3720 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3721 := !z.EncBinary() - yy2arr3721 := z.EncBasicHandle().StructToArray - var yyq3721 [3]bool - _, _, _ = yysep3721, yyq3721, yy2arr3721 - const yyr3721 bool = false - yyq3721[0] = x.Kind != "" - yyq3721[1] = x.APIVersion != "" - yyq3721[2] = x.Path != "" - var yynn3721 int - if yyr3721 || yy2arr3721 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = len(x.Ports) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn3721 = 0 - for _, b := range yyq3721 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn3721++ + yynn2++ } } - r.EncodeMapStart(yynn3721) - yynn3721 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3721 || yy2arr3721 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3721[0] { - yym3723 := z.EncBinary() - _ = yym3723 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -47014,23 +55394,23 @@ func (x *PodProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3721[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3724 := z.EncBinary() - _ = yym3724 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr3721 || yy2arr3721 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3721[1] { - yym3726 := z.EncBinary() - _ = yym3726 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -47039,44 +55419,52 @@ func (x *PodProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3721[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3727 := z.EncBinary() - _ = yym3727 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr3721 || yy2arr3721 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3721[2] { - yym3729 := z.EncBinary() - _ = yym3729 - if false { + if yyq2[2] { + if x.Ports == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceInt32V(x.Ports, false, e) + } } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeNil() } } else { - if yyq3721[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) + r.EncodeString(codecSelferC_UTF81234, string("ports")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3730 := z.EncBinary() - _ = yym3730 - if false { + if x.Ports == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceInt32V(x.Ports, false, e) + } } } } - if yyr3721 || yy2arr3721 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -47085,29 +55473,29 @@ func (x *PodProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *PodProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *PodPortForwardOptions) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3731 := z.DecBinary() - _ = yym3731 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3732 := r.ContainerType() - if yyct3732 == codecSelferValueTypeMap1234 { - yyl3732 := r.ReadMapStart() - if yyl3732 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3732, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3732 == codecSelferValueTypeArray1234 { - yyl3732 := r.ReadArrayStart() - if yyl3732 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3732, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -47115,16 +55503,16 @@ func (x *PodProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *PodProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *PodPortForwardOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3733Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3733Slc - var yyhl3733 bool = l >= 0 - for yyj3733 := 0; ; yyj3733++ { - if yyhl3733 { - if yyj3733 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -47133,49 +55521,67 @@ func (x *PodProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3733Slc = r.DecodeBytes(yys3733Slc, true, true) - yys3733 := string(yys3733Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3733 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } - case "path": + case "ports": if r.TryDecodeAsNil() { - x.Path = "" + x.Ports = nil } else { - x.Path = string(r.DecodeString()) + yyv8 := &x.Ports + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceInt32X(yyv8, false, d) + } } default: - z.DecStructFieldNotFound(-1, yys3733) - } // end switch yys3733 - } // end for yyj3733 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *PodProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *PodPortForwardOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3737 int - var yyb3737 bool - var yyhl3737 bool = l >= 0 - yyj3737++ - if yyhl3737 { - yyb3737 = yyj3737 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb3737 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb3737 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -47183,15 +55589,21 @@ func (x *PodProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } - yyj3737++ - if yyhl3737 { - yyb3737 = yyj3737 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb3737 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb3737 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -47199,78 +55611,90 @@ func (x *PodProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv13 := &x.APIVersion + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj3737++ - if yyhl3737 { - yyb3737 = yyj3737 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb3737 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb3737 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Path = "" + x.Ports = nil } else { - x.Path = string(r.DecodeString()) + yyv15 := &x.Ports + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + z.F.DecSliceInt32X(yyv15, false, d) + } } for { - yyj3737++ - if yyhl3737 { - yyb3737 = yyj3737 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb3737 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb3737 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3737-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *NodeProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *PodProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym3741 := z.EncBinary() - _ = yym3741 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3742 := !z.EncBinary() - yy2arr3742 := z.EncBasicHandle().StructToArray - var yyq3742 [3]bool - _, _, _ = yysep3742, yyq3742, yy2arr3742 - const yyr3742 bool = false - yyq3742[0] = x.Kind != "" - yyq3742[1] = x.APIVersion != "" - yyq3742[2] = x.Path != "" - var yynn3742 int - if yyr3742 || yy2arr3742 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Path != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn3742 = 0 - for _, b := range yyq3742 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn3742++ + yynn2++ } } - r.EncodeMapStart(yynn3742) - yynn3742 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3742 || yy2arr3742 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3742[0] { - yym3744 := z.EncBinary() - _ = yym3744 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -47279,23 +55703,23 @@ func (x *NodeProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3742[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3745 := z.EncBinary() - _ = yym3745 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr3742 || yy2arr3742 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3742[1] { - yym3747 := z.EncBinary() - _ = yym3747 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -47304,23 +55728,23 @@ func (x *NodeProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3742[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3748 := z.EncBinary() - _ = yym3748 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr3742 || yy2arr3742 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3742[2] { - yym3750 := z.EncBinary() - _ = yym3750 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) @@ -47329,19 +55753,19 @@ func (x *NodeProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3742[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("path")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3751 := z.EncBinary() - _ = yym3751 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) } } } - if yyr3742 || yy2arr3742 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -47350,29 +55774,29 @@ func (x *NodeProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *NodeProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *PodProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3752 := z.DecBinary() - _ = yym3752 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3753 := r.ContainerType() - if yyct3753 == codecSelferValueTypeMap1234 { - yyl3753 := r.ReadMapStart() - if yyl3753 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3753, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3753 == codecSelferValueTypeArray1234 { - yyl3753 := r.ReadArrayStart() - if yyl3753 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3753, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -47380,16 +55804,16 @@ func (x *NodeProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *NodeProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *PodProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3754Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3754Slc - var yyhl3754 bool = l >= 0 - for yyj3754 := 0; ; yyj3754++ { - if yyhl3754 { - if yyj3754 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -47398,49 +55822,67 @@ func (x *NodeProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3754Slc = r.DecodeBytes(yys3754Slc, true, true) - yys3754 := string(yys3754Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3754 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "path": if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv8 := &x.Path + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys3754) - } // end switch yys3754 - } // end for yyj3754 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *NodeProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *PodProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3758 int - var yyb3758 bool - var yyhl3758 bool = l >= 0 - yyj3758++ - if yyhl3758 { - yyb3758 = yyj3758 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb3758 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb3758 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -47448,15 +55890,21 @@ func (x *NodeProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } - yyj3758++ - if yyhl3758 { - yyb3758 = yyj3758 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb3758 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb3758 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -47464,15 +55912,21 @@ func (x *NodeProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv13 := &x.APIVersion + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj3758++ - if yyhl3758 { - yyb3758 = yyj3758 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb3758 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb3758 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -47480,62 +55934,68 @@ func (x *NodeProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv15 := &x.Path + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } for { - yyj3758++ - if yyhl3758 { - yyb3758 = yyj3758 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb3758 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb3758 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3758-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *NodeProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym3762 := z.EncBinary() - _ = yym3762 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3763 := !z.EncBinary() - yy2arr3763 := z.EncBasicHandle().StructToArray - var yyq3763 [3]bool - _, _, _ = yysep3763, yyq3763, yy2arr3763 - const yyr3763 bool = false - yyq3763[0] = x.Kind != "" - yyq3763[1] = x.APIVersion != "" - yyq3763[2] = x.Path != "" - var yynn3763 int - if yyr3763 || yy2arr3763 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Path != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn3763 = 0 - for _, b := range yyq3763 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn3763++ + yynn2++ } } - r.EncodeMapStart(yynn3763) - yynn3763 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3763 || yy2arr3763 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3763[0] { - yym3765 := z.EncBinary() - _ = yym3765 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -47544,23 +56004,23 @@ func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3763[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3766 := z.EncBinary() - _ = yym3766 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr3763 || yy2arr3763 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3763[1] { - yym3768 := z.EncBinary() - _ = yym3768 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -47569,23 +56029,23 @@ func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3763[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3769 := z.EncBinary() - _ = yym3769 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr3763 || yy2arr3763 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3763[2] { - yym3771 := z.EncBinary() - _ = yym3771 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) @@ -47594,19 +56054,19 @@ func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3763[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("path")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3772 := z.EncBinary() - _ = yym3772 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) } } } - if yyr3763 || yy2arr3763 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -47615,29 +56075,29 @@ func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ServiceProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *NodeProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3773 := z.DecBinary() - _ = yym3773 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3774 := r.ContainerType() - if yyct3774 == codecSelferValueTypeMap1234 { - yyl3774 := r.ReadMapStart() - if yyl3774 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3774, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3774 == codecSelferValueTypeArray1234 { - yyl3774 := r.ReadArrayStart() - if yyl3774 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3774, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -47645,16 +56105,16 @@ func (x *ServiceProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *NodeProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3775Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3775Slc - var yyhl3775 bool = l >= 0 - for yyj3775 := 0; ; yyj3775++ { - if yyhl3775 { - if yyj3775 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -47663,49 +56123,67 @@ func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3775Slc = r.DecodeBytes(yys3775Slc, true, true) - yys3775 := string(yys3775Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3775 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "path": if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv8 := &x.Path + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys3775) - } // end switch yys3775 - } // end for yyj3775 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *NodeProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3779 int - var yyb3779 bool - var yyhl3779 bool = l >= 0 - yyj3779++ - if yyhl3779 { - yyb3779 = yyj3779 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb3779 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb3779 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -47713,15 +56191,21 @@ func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } - yyj3779++ - if yyhl3779 { - yyb3779 = yyj3779 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb3779 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb3779 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -47729,15 +56213,21 @@ func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv13 := &x.APIVersion + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj3779++ - if yyhl3779 { - yyb3779 = yyj3779 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb3779 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb3779 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -47745,169 +56235,139 @@ func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv15 := &x.Path + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } for { - yyj3779++ - if yyhl3779 { - yyb3779 = yyj3779 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb3779 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb3779 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3779-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *OwnerReference) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym3783 := z.EncBinary() - _ = yym3783 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3784 := !z.EncBinary() - yy2arr3784 := z.EncBasicHandle().StructToArray - var yyq3784 [5]bool - _, _, _ = yysep3784, yyq3784, yy2arr3784 - const yyr3784 bool = false - yyq3784[4] = x.Controller != nil - var yynn3784 int - if yyr3784 || yy2arr3784 { - r.EncodeArrayStart(5) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Path != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) } else { - yynn3784 = 4 - for _, b := range yyq3784 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn3784++ + yynn2++ } } - r.EncodeMapStart(yynn3784) - yynn3784 = 0 - } - if yyr3784 || yy2arr3784 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3786 := z.EncBinary() - _ = yym3786 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3787 := z.EncBinary() - _ = yym3787 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - if yyr3784 || yy2arr3784 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3789 := z.EncBinary() - _ = yym3789 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3790 := z.EncBinary() - _ = yym3790 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3784 || yy2arr3784 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3792 := z.EncBinary() - _ = yym3792 - if false { + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3793 := z.EncBinary() - _ = yym3793 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } } } - if yyr3784 || yy2arr3784 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym3795 := z.EncBinary() - _ = yym3795 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3796 := z.EncBinary() - _ = yym3796 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } } } - if yyr3784 || yy2arr3784 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3784[4] { - if x.Controller == nil { - r.EncodeNil() + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { } else { - yy3798 := *x.Controller - yym3799 := z.EncBinary() - _ = yym3799 - if false { - } else { - r.EncodeBool(bool(yy3798)) - } + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) } } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3784[4] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("controller")) + r.EncodeString(codecSelferC_UTF81234, string("path")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Controller == nil { - r.EncodeNil() + yym11 := z.EncBinary() + _ = yym11 + if false { } else { - yy3800 := *x.Controller - yym3801 := z.EncBinary() - _ = yym3801 - if false { - } else { - r.EncodeBool(bool(yy3800)) - } + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) } } } - if yyr3784 || yy2arr3784 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -47916,29 +56376,29 @@ func (x *OwnerReference) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *OwnerReference) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ServiceProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3802 := z.DecBinary() - _ = yym3802 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3803 := r.ContainerType() - if yyct3803 == codecSelferValueTypeMap1234 { - yyl3803 := r.ReadMapStart() - if yyl3803 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3803, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3803 == codecSelferValueTypeArray1234 { - yyl3803 := r.ReadArrayStart() - if yyl3803 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3803, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -47946,16 +56406,16 @@ func (x *OwnerReference) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *OwnerReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3804Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3804Slc - var yyhl3804 bool = l >= 0 - for yyj3804 := 0; ; yyj3804++ { - if yyhl3804 { - if yyj3804 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -47964,87 +56424,67 @@ func (x *OwnerReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3804Slc = r.DecodeBytes(yys3804Slc, true, true) - yys3804 := string(yys3804Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3804 { - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } - case "uid": + case "apiVersion": if r.TryDecodeAsNil() { - x.UID = "" + x.APIVersion = "" } else { - x.UID = pkg1_types.UID(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } - case "controller": + case "path": if r.TryDecodeAsNil() { - if x.Controller != nil { - x.Controller = nil - } + x.Path = "" } else { - if x.Controller == nil { - x.Controller = new(bool) - } - yym3810 := z.DecBinary() - _ = yym3810 + yyv8 := &x.Path + yym9 := z.DecBinary() + _ = yym9 if false { } else { - *((*bool)(x.Controller)) = r.DecodeBool() + *((*string)(yyv8)) = r.DecodeString() } } default: - z.DecStructFieldNotFound(-1, yys3804) - } // end switch yys3804 - } // end for yyj3804 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *OwnerReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3811 int - var yyb3811 bool - var yyhl3811 bool = l >= 0 - yyj3811++ - if yyhl3811 { - yyb3811 = yyj3811 > l - } else { - yyb3811 = r.CheckBreak() - } - if yyb3811 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - x.APIVersion = string(r.DecodeString()) + yyb10 = r.CheckBreak() } - yyj3811++ - if yyhl3811 { - yyb3811 = yyj3811 > l - } else { - yyb3811 = r.CheckBreak() - } - if yyb3811 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -48052,78 +56492,70 @@ func (x *OwnerReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) - } - yyj3811++ - if yyhl3811 { - yyb3811 = yyj3811 > l - } else { - yyb3811 = r.CheckBreak() - } - if yyb3811 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } - yyj3811++ - if yyhl3811 { - yyb3811 = yyj3811 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb3811 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb3811 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.UID = "" + x.APIVersion = "" } else { - x.UID = pkg1_types.UID(r.DecodeString()) + yyv13 := &x.APIVersion + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj3811++ - if yyhl3811 { - yyb3811 = yyj3811 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb3811 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb3811 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.Controller != nil { - x.Controller = nil - } + x.Path = "" } else { - if x.Controller == nil { - x.Controller = new(bool) - } - yym3817 := z.DecBinary() - _ = yym3817 + yyv15 := &x.Path + yym16 := z.DecBinary() + _ = yym16 if false { } else { - *((*bool)(x.Controller)) = r.DecodeBool() + *((*string)(yyv15)) = r.DecodeString() } } for { - yyj3811++ - if yyhl3811 { - yyb3811 = yyj3811 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb3811 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb3811 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3811-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -48135,41 +56567,41 @@ func (x *ObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3818 := z.EncBinary() - _ = yym3818 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3819 := !z.EncBinary() - yy2arr3819 := z.EncBasicHandle().StructToArray - var yyq3819 [7]bool - _, _, _ = yysep3819, yyq3819, yy2arr3819 - const yyr3819 bool = false - yyq3819[0] = x.Kind != "" - yyq3819[1] = x.Namespace != "" - yyq3819[2] = x.Name != "" - yyq3819[3] = x.UID != "" - yyq3819[4] = x.APIVersion != "" - yyq3819[5] = x.ResourceVersion != "" - yyq3819[6] = x.FieldPath != "" - var yynn3819 int - if yyr3819 || yy2arr3819 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.Namespace != "" + yyq2[2] = x.Name != "" + yyq2[3] = x.UID != "" + yyq2[4] = x.APIVersion != "" + yyq2[5] = x.ResourceVersion != "" + yyq2[6] = x.FieldPath != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(7) } else { - yynn3819 = 0 - for _, b := range yyq3819 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn3819++ + yynn2++ } } - r.EncodeMapStart(yynn3819) - yynn3819 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3819 || yy2arr3819 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3819[0] { - yym3821 := z.EncBinary() - _ = yym3821 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -48178,23 +56610,23 @@ func (x *ObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3819[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3822 := z.EncBinary() - _ = yym3822 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr3819 || yy2arr3819 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3819[1] { - yym3824 := z.EncBinary() - _ = yym3824 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) @@ -48203,23 +56635,23 @@ func (x *ObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3819[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("namespace")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3825 := z.EncBinary() - _ = yym3825 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) } } } - if yyr3819 || yy2arr3819 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3819[2] { - yym3827 := z.EncBinary() - _ = yym3827 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) @@ -48228,23 +56660,23 @@ func (x *ObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3819[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("name")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3828 := z.EncBinary() - _ = yym3828 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } } - if yyr3819 || yy2arr3819 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3819[3] { - yym3830 := z.EncBinary() - _ = yym3830 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else if z.HasExtensions() && z.EncExt(x.UID) { } else { @@ -48254,12 +56686,12 @@ func (x *ObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3819[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("uid")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3831 := z.EncBinary() - _ = yym3831 + yym14 := z.EncBinary() + _ = yym14 if false { } else if z.HasExtensions() && z.EncExt(x.UID) { } else { @@ -48267,11 +56699,11 @@ func (x *ObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr3819 || yy2arr3819 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3819[4] { - yym3833 := z.EncBinary() - _ = yym3833 + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -48280,23 +56712,23 @@ func (x *ObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3819[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3834 := z.EncBinary() - _ = yym3834 + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr3819 || yy2arr3819 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3819[5] { - yym3836 := z.EncBinary() - _ = yym3836 + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) @@ -48305,23 +56737,23 @@ func (x *ObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3819[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3837 := z.EncBinary() - _ = yym3837 + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) } } } - if yyr3819 || yy2arr3819 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3819[6] { - yym3839 := z.EncBinary() - _ = yym3839 + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) @@ -48330,19 +56762,19 @@ func (x *ObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3819[6] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fieldPath")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3840 := z.EncBinary() - _ = yym3840 + yym23 := z.EncBinary() + _ = yym23 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) } } } - if yyr3819 || yy2arr3819 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -48355,25 +56787,25 @@ func (x *ObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3841 := z.DecBinary() - _ = yym3841 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3842 := r.ContainerType() - if yyct3842 == codecSelferValueTypeMap1234 { - yyl3842 := r.ReadMapStart() - if yyl3842 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3842, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3842 == codecSelferValueTypeArray1234 { - yyl3842 := r.ReadArrayStart() - if yyl3842 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3842, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -48385,12 +56817,12 @@ func (x *ObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3843Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3843Slc - var yyhl3843 bool = l >= 0 - for yyj3843 := 0; ; yyj3843++ { - if yyhl3843 { - if yyj3843 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -48399,56 +56831,99 @@ func (x *ObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3843Slc = r.DecodeBytes(yys3843Slc, true, true) - yys3843 := string(yys3843Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3843 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "namespace": if r.TryDecodeAsNil() { x.Namespace = "" } else { - x.Namespace = string(r.DecodeString()) + yyv6 := &x.Namespace + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "name": if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } case "uid": if r.TryDecodeAsNil() { x.UID = "" } else { - x.UID = pkg1_types.UID(r.DecodeString()) + yyv10 := &x.UID + yym11 := z.DecBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.DecExt(yyv10) { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv12 := &x.APIVersion + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } case "resourceVersion": if r.TryDecodeAsNil() { x.ResourceVersion = "" } else { - x.ResourceVersion = string(r.DecodeString()) + yyv14 := &x.ResourceVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } case "fieldPath": if r.TryDecodeAsNil() { x.FieldPath = "" } else { - x.FieldPath = string(r.DecodeString()) + yyv16 := &x.FieldPath + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys3843) - } // end switch yys3843 - } // end for yyj3843 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -48456,16 +56931,16 @@ func (x *ObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3851 int - var yyb3851 bool - var yyhl3851 bool = l >= 0 - yyj3851++ - if yyhl3851 { - yyb3851 = yyj3851 > l + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3851 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3851 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -48473,15 +56948,21 @@ func (x *ObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv19 := &x.Kind + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } } - yyj3851++ - if yyhl3851 { - yyb3851 = yyj3851 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3851 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3851 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -48489,15 +56970,21 @@ func (x *ObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Namespace = "" } else { - x.Namespace = string(r.DecodeString()) + yyv21 := &x.Namespace + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } } - yyj3851++ - if yyhl3851 { - yyb3851 = yyj3851 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3851 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3851 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -48505,15 +56992,21 @@ func (x *ObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv23 := &x.Name + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } } - yyj3851++ - if yyhl3851 { - yyb3851 = yyj3851 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3851 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3851 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -48521,15 +57014,22 @@ func (x *ObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.UID = "" } else { - x.UID = pkg1_types.UID(r.DecodeString()) + yyv25 := &x.UID + yym26 := z.DecBinary() + _ = yym26 + if false { + } else if z.HasExtensions() && z.DecExt(yyv25) { + } else { + *((*string)(yyv25)) = r.DecodeString() + } } - yyj3851++ - if yyhl3851 { - yyb3851 = yyj3851 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3851 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3851 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -48537,15 +57037,21 @@ func (x *ObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv27 := &x.APIVersion + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } } - yyj3851++ - if yyhl3851 { - yyb3851 = yyj3851 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3851 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3851 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -48553,15 +57059,21 @@ func (x *ObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.ResourceVersion = "" } else { - x.ResourceVersion = string(r.DecodeString()) + yyv29 := &x.ResourceVersion + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } } - yyj3851++ - if yyhl3851 { - yyb3851 = yyj3851 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3851 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3851 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -48569,20 +57081,26 @@ func (x *ObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.FieldPath = "" } else { - x.FieldPath = string(r.DecodeString()) + yyv31 := &x.FieldPath + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } } for { - yyj3851++ - if yyhl3851 { - yyb3851 = yyj3851 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb3851 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb3851 { + if yyb18 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3851-1, "") + z.DecStructFieldNotFound(yyj18-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -48594,35 +57112,35 @@ func (x *LocalObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3859 := z.EncBinary() - _ = yym3859 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3860 := !z.EncBinary() - yy2arr3860 := z.EncBasicHandle().StructToArray - var yyq3860 [1]bool - _, _, _ = yysep3860, yyq3860, yy2arr3860 - const yyr3860 bool = false - yyq3860[0] = x.Name != "" - var yynn3860 int - if yyr3860 || yy2arr3860 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn3860 = 0 - for _, b := range yyq3860 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn3860++ + yynn2++ } } - r.EncodeMapStart(yynn3860) - yynn3860 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3860 || yy2arr3860 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3860[0] { - yym3862 := z.EncBinary() - _ = yym3862 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) @@ -48631,19 +57149,19 @@ func (x *LocalObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3860[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("name")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3863 := z.EncBinary() - _ = yym3863 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } } - if yyr3860 || yy2arr3860 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -48656,25 +57174,25 @@ func (x *LocalObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3864 := z.DecBinary() - _ = yym3864 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3865 := r.ContainerType() - if yyct3865 == codecSelferValueTypeMap1234 { - yyl3865 := r.ReadMapStart() - if yyl3865 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3865, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3865 == codecSelferValueTypeArray1234 { - yyl3865 := r.ReadArrayStart() - if yyl3865 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3865, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -48686,12 +57204,12 @@ func (x *LocalObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3866Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3866Slc - var yyhl3866 bool = l >= 0 - for yyj3866 := 0; ; yyj3866++ { - if yyhl3866 { - if yyj3866 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -48700,20 +57218,26 @@ func (x *LocalObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decode } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3866Slc = r.DecodeBytes(yys3866Slc, true, true) - yys3866 := string(yys3866Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3866 { + switch yys3 { case "name": if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys3866) - } // end switch yys3866 - } // end for yyj3866 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -48721,16 +57245,16 @@ func (x *LocalObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Deco var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3868 int - var yyb3868 bool - var yyhl3868 bool = l >= 0 - yyj3868++ - if yyhl3868 { - yyb3868 = yyj3868 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb3868 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb3868 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -48738,20 +57262,26 @@ func (x *LocalObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv7 := &x.Name + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } } for { - yyj3868++ - if yyhl3868 { - yyb3868 = yyj3868 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb3868 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb3868 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3868-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -48763,37 +57293,37 @@ func (x *SerializedReference) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3870 := z.EncBinary() - _ = yym3870 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3871 := !z.EncBinary() - yy2arr3871 := z.EncBasicHandle().StructToArray - var yyq3871 [3]bool - _, _, _ = yysep3871, yyq3871, yy2arr3871 - const yyr3871 bool = false - yyq3871[0] = x.Kind != "" - yyq3871[1] = x.APIVersion != "" - yyq3871[2] = true - var yynn3871 int - if yyr3871 || yy2arr3871 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn3871 = 0 - for _, b := range yyq3871 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn3871++ + yynn2++ } } - r.EncodeMapStart(yynn3871) - yynn3871 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3871 || yy2arr3871 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3871[0] { - yym3873 := z.EncBinary() - _ = yym3873 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -48802,23 +57332,23 @@ func (x *SerializedReference) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3871[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3874 := z.EncBinary() - _ = yym3874 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr3871 || yy2arr3871 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3871[1] { - yym3876 := z.EncBinary() - _ = yym3876 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -48827,36 +57357,36 @@ func (x *SerializedReference) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3871[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3877 := z.EncBinary() - _ = yym3877 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr3871 || yy2arr3871 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3871[2] { - yy3879 := &x.Reference - yy3879.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.Reference + yy10.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq3871[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("reference")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3880 := &x.Reference - yy3880.CodecEncodeSelf(e) + yy12 := &x.Reference + yy12.CodecEncodeSelf(e) } } - if yyr3871 || yy2arr3871 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -48869,25 +57399,25 @@ func (x *SerializedReference) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3881 := z.DecBinary() - _ = yym3881 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3882 := r.ContainerType() - if yyct3882 == codecSelferValueTypeMap1234 { - yyl3882 := r.ReadMapStart() - if yyl3882 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3882, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3882 == codecSelferValueTypeArray1234 { - yyl3882 := r.ReadArrayStart() - if yyl3882 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3882, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -48899,12 +57429,12 @@ func (x *SerializedReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3883Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3883Slc - var yyhl3883 bool = l >= 0 - for yyj3883 := 0; ; yyj3883++ { - if yyhl3883 { - if yyj3883 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -48913,33 +57443,45 @@ func (x *SerializedReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3883Slc = r.DecodeBytes(yys3883Slc, true, true) - yys3883 := string(yys3883Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3883 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "reference": if r.TryDecodeAsNil() { x.Reference = ObjectReference{} } else { - yyv3886 := &x.Reference - yyv3886.CodecDecodeSelf(d) + yyv8 := &x.Reference + yyv8.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys3883) - } // end switch yys3883 - } // end for yyj3883 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -48947,16 +57489,16 @@ func (x *SerializedReference) codecDecodeSelfFromArray(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3887 int - var yyb3887 bool - var yyhl3887 bool = l >= 0 - yyj3887++ - if yyhl3887 { - yyb3887 = yyj3887 > l + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb3887 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb3887 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -48964,15 +57506,21 @@ func (x *SerializedReference) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv10 := &x.Kind + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } - yyj3887++ - if yyhl3887 { - yyb3887 = yyj3887 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb3887 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb3887 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -48980,15 +57528,21 @@ func (x *SerializedReference) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv12 := &x.APIVersion + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } - yyj3887++ - if yyhl3887 { - yyb3887 = yyj3887 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb3887 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb3887 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -48996,21 +57550,21 @@ func (x *SerializedReference) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Reference = ObjectReference{} } else { - yyv3890 := &x.Reference - yyv3890.CodecDecodeSelf(d) + yyv14 := &x.Reference + yyv14.CodecDecodeSelf(d) } for { - yyj3887++ - if yyhl3887 { - yyb3887 = yyj3887 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb3887 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb3887 { + if yyb9 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3887-1, "") + z.DecStructFieldNotFound(yyj9-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -49022,36 +57576,36 @@ func (x *EventSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3891 := z.EncBinary() - _ = yym3891 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3892 := !z.EncBinary() - yy2arr3892 := z.EncBasicHandle().StructToArray - var yyq3892 [2]bool - _, _, _ = yysep3892, yyq3892, yy2arr3892 - const yyr3892 bool = false - yyq3892[0] = x.Component != "" - yyq3892[1] = x.Host != "" - var yynn3892 int - if yyr3892 || yy2arr3892 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Component != "" + yyq2[1] = x.Host != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn3892 = 0 - for _, b := range yyq3892 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn3892++ + yynn2++ } } - r.EncodeMapStart(yynn3892) - yynn3892 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3892 || yy2arr3892 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3892[0] { - yym3894 := z.EncBinary() - _ = yym3894 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Component)) @@ -49060,23 +57614,23 @@ func (x *EventSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3892[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("component")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3895 := z.EncBinary() - _ = yym3895 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Component)) } } } - if yyr3892 || yy2arr3892 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3892[1] { - yym3897 := z.EncBinary() - _ = yym3897 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Host)) @@ -49085,19 +57639,19 @@ func (x *EventSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3892[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("host")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3898 := z.EncBinary() - _ = yym3898 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Host)) } } } - if yyr3892 || yy2arr3892 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -49110,25 +57664,25 @@ func (x *EventSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3899 := z.DecBinary() - _ = yym3899 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3900 := r.ContainerType() - if yyct3900 == codecSelferValueTypeMap1234 { - yyl3900 := r.ReadMapStart() - if yyl3900 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3900, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3900 == codecSelferValueTypeArray1234 { - yyl3900 := r.ReadArrayStart() - if yyl3900 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3900, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -49140,12 +57694,12 @@ func (x *EventSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3901Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3901Slc - var yyhl3901 bool = l >= 0 - for yyj3901 := 0; ; yyj3901++ { - if yyhl3901 { - if yyj3901 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -49154,26 +57708,38 @@ func (x *EventSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3901Slc = r.DecodeBytes(yys3901Slc, true, true) - yys3901 := string(yys3901Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3901 { + switch yys3 { case "component": if r.TryDecodeAsNil() { x.Component = "" } else { - x.Component = string(r.DecodeString()) + yyv4 := &x.Component + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "host": if r.TryDecodeAsNil() { x.Host = "" } else { - x.Host = string(r.DecodeString()) + yyv6 := &x.Host + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys3901) - } // end switch yys3901 - } // end for yyj3901 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -49181,16 +57747,16 @@ func (x *EventSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3904 int - var yyb3904 bool - var yyhl3904 bool = l >= 0 - yyj3904++ - if yyhl3904 { - yyb3904 = yyj3904 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb3904 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb3904 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -49198,15 +57764,21 @@ func (x *EventSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Component = "" } else { - x.Component = string(r.DecodeString()) + yyv9 := &x.Component + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } - yyj3904++ - if yyhl3904 { - yyb3904 = yyj3904 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb3904 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb3904 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -49214,20 +57786,26 @@ func (x *EventSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Host = "" } else { - x.Host = string(r.DecodeString()) + yyv11 := &x.Host + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } for { - yyj3904++ - if yyhl3904 { - yyb3904 = yyj3904 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb3904 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb3904 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3904-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -49239,43 +57817,43 @@ func (x *Event) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3907 := z.EncBinary() - _ = yym3907 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3908 := !z.EncBinary() - yy2arr3908 := z.EncBasicHandle().StructToArray - var yyq3908 [11]bool - _, _, _ = yysep3908, yyq3908, yy2arr3908 - const yyr3908 bool = false - yyq3908[0] = x.Kind != "" - yyq3908[1] = x.APIVersion != "" - yyq3908[4] = x.Reason != "" - yyq3908[5] = x.Message != "" - yyq3908[6] = true - yyq3908[7] = true - yyq3908[8] = true - yyq3908[9] = x.Count != 0 - yyq3908[10] = x.Type != "" - var yynn3908 int - if yyr3908 || yy2arr3908 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [11]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + yyq2[6] = true + yyq2[7] = true + yyq2[8] = true + yyq2[9] = x.Count != 0 + yyq2[10] = x.Type != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(11) } else { - yynn3908 = 2 - for _, b := range yyq3908 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn3908++ + yynn2++ } } - r.EncodeMapStart(yynn3908) - yynn3908 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3908 || yy2arr3908 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3908[0] { - yym3910 := z.EncBinary() - _ = yym3910 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -49284,23 +57862,23 @@ func (x *Event) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3908[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3911 := z.EncBinary() - _ = yym3911 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr3908 || yy2arr3908 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3908[1] { - yym3913 := z.EncBinary() - _ = yym3913 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -49309,45 +57887,57 @@ func (x *Event) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3908[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3914 := z.EncBinary() - _ = yym3914 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr3908 || yy2arr3908 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy3916 := &x.ObjectMeta - yy3916.CodecEncodeSelf(e) + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3917 := &x.ObjectMeta - yy3917.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } - if yyr3908 || yy2arr3908 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy3919 := &x.InvolvedObject - yy3919.CodecEncodeSelf(e) + yy15 := &x.InvolvedObject + yy15.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("involvedObject")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3920 := &x.InvolvedObject - yy3920.CodecEncodeSelf(e) + yy17 := &x.InvolvedObject + yy17.CodecEncodeSelf(e) } - if yyr3908 || yy2arr3908 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3908[4] { - yym3922 := z.EncBinary() - _ = yym3922 + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) @@ -49356,23 +57946,23 @@ func (x *Event) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3908[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("reason")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3923 := z.EncBinary() - _ = yym3923 + yym21 := z.EncBinary() + _ = yym21 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) } } } - if yyr3908 || yy2arr3908 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3908[5] { - yym3925 := z.EncBinary() - _ = yym3925 + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) @@ -49381,114 +57971,114 @@ func (x *Event) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3908[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("message")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3926 := z.EncBinary() - _ = yym3926 + yym24 := z.EncBinary() + _ = yym24 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) } } } - if yyr3908 || yy2arr3908 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3908[6] { - yy3928 := &x.Source - yy3928.CodecEncodeSelf(e) + if yyq2[6] { + yy26 := &x.Source + yy26.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq3908[6] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("source")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3929 := &x.Source - yy3929.CodecEncodeSelf(e) + yy28 := &x.Source + yy28.CodecEncodeSelf(e) } } - if yyr3908 || yy2arr3908 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3908[7] { - yy3931 := &x.FirstTimestamp - yym3932 := z.EncBinary() - _ = yym3932 + if yyq2[7] { + yy31 := &x.FirstTimestamp + yym32 := z.EncBinary() + _ = yym32 if false { - } else if z.HasExtensions() && z.EncExt(yy3931) { - } else if yym3932 { - z.EncBinaryMarshal(yy3931) - } else if !yym3932 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3931) + } else if z.HasExtensions() && z.EncExt(yy31) { + } else if yym32 { + z.EncBinaryMarshal(yy31) + } else if !yym32 && z.IsJSONHandle() { + z.EncJSONMarshal(yy31) } else { - z.EncFallback(yy3931) + z.EncFallback(yy31) } } else { r.EncodeNil() } } else { - if yyq3908[7] { + if yyq2[7] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("firstTimestamp")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3933 := &x.FirstTimestamp - yym3934 := z.EncBinary() - _ = yym3934 + yy33 := &x.FirstTimestamp + yym34 := z.EncBinary() + _ = yym34 if false { - } else if z.HasExtensions() && z.EncExt(yy3933) { - } else if yym3934 { - z.EncBinaryMarshal(yy3933) - } else if !yym3934 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3933) + } else if z.HasExtensions() && z.EncExt(yy33) { + } else if yym34 { + z.EncBinaryMarshal(yy33) + } else if !yym34 && z.IsJSONHandle() { + z.EncJSONMarshal(yy33) } else { - z.EncFallback(yy3933) + z.EncFallback(yy33) } } } - if yyr3908 || yy2arr3908 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3908[8] { - yy3936 := &x.LastTimestamp - yym3937 := z.EncBinary() - _ = yym3937 + if yyq2[8] { + yy36 := &x.LastTimestamp + yym37 := z.EncBinary() + _ = yym37 if false { - } else if z.HasExtensions() && z.EncExt(yy3936) { - } else if yym3937 { - z.EncBinaryMarshal(yy3936) - } else if !yym3937 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3936) + } else if z.HasExtensions() && z.EncExt(yy36) { + } else if yym37 { + z.EncBinaryMarshal(yy36) + } else if !yym37 && z.IsJSONHandle() { + z.EncJSONMarshal(yy36) } else { - z.EncFallback(yy3936) + z.EncFallback(yy36) } } else { r.EncodeNil() } } else { - if yyq3908[8] { + if yyq2[8] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("lastTimestamp")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3938 := &x.LastTimestamp - yym3939 := z.EncBinary() - _ = yym3939 + yy38 := &x.LastTimestamp + yym39 := z.EncBinary() + _ = yym39 if false { - } else if z.HasExtensions() && z.EncExt(yy3938) { - } else if yym3939 { - z.EncBinaryMarshal(yy3938) - } else if !yym3939 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3938) + } else if z.HasExtensions() && z.EncExt(yy38) { + } else if yym39 { + z.EncBinaryMarshal(yy38) + } else if !yym39 && z.IsJSONHandle() { + z.EncJSONMarshal(yy38) } else { - z.EncFallback(yy3938) + z.EncFallback(yy38) } } } - if yyr3908 || yy2arr3908 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3908[9] { - yym3941 := z.EncBinary() - _ = yym3941 + if yyq2[9] { + yym41 := z.EncBinary() + _ = yym41 if false { } else { r.EncodeInt(int64(x.Count)) @@ -49497,23 +58087,23 @@ func (x *Event) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq3908[9] { + if yyq2[9] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("count")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3942 := z.EncBinary() - _ = yym3942 + yym42 := z.EncBinary() + _ = yym42 if false { } else { r.EncodeInt(int64(x.Count)) } } } - if yyr3908 || yy2arr3908 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3908[10] { - yym3944 := z.EncBinary() - _ = yym3944 + if yyq2[10] { + yym44 := z.EncBinary() + _ = yym44 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Type)) @@ -49522,19 +58112,19 @@ func (x *Event) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3908[10] { + if yyq2[10] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("type")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3945 := z.EncBinary() - _ = yym3945 + yym45 := z.EncBinary() + _ = yym45 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Type)) } } } - if yyr3908 || yy2arr3908 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -49547,25 +58137,25 @@ func (x *Event) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3946 := z.DecBinary() - _ = yym3946 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3947 := r.ContainerType() - if yyct3947 == codecSelferValueTypeMap1234 { - yyl3947 := r.ReadMapStart() - if yyl3947 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3947, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3947 == codecSelferValueTypeArray1234 { - yyl3947 := r.ReadArrayStart() - if yyl3947 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3947, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -49577,12 +58167,12 @@ func (x *Event) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3948Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3948Slc - var yyhl3948 bool = l >= 0 - for yyj3948 := 0; ; yyj3948++ { - if yyhl3948 { - if yyj3948 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -49591,105 +58181,147 @@ func (x *Event) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3948Slc = r.DecodeBytes(yys3948Slc, true, true) - yys3948 := string(yys3948Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3948 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv3951 := &x.ObjectMeta - yyv3951.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "involvedObject": if r.TryDecodeAsNil() { x.InvolvedObject = ObjectReference{} } else { - yyv3952 := &x.InvolvedObject - yyv3952.CodecDecodeSelf(d) + yyv10 := &x.InvolvedObject + yyv10.CodecDecodeSelf(d) } case "reason": if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv11 := &x.Reason + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } case "message": if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv13 := &x.Message + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } case "source": if r.TryDecodeAsNil() { x.Source = EventSource{} } else { - yyv3955 := &x.Source - yyv3955.CodecDecodeSelf(d) + yyv15 := &x.Source + yyv15.CodecDecodeSelf(d) } case "firstTimestamp": if r.TryDecodeAsNil() { - x.FirstTimestamp = pkg2_unversioned.Time{} + x.FirstTimestamp = pkg2_v1.Time{} } else { - yyv3956 := &x.FirstTimestamp - yym3957 := z.DecBinary() - _ = yym3957 + yyv16 := &x.FirstTimestamp + yym17 := z.DecBinary() + _ = yym17 if false { - } else if z.HasExtensions() && z.DecExt(yyv3956) { - } else if yym3957 { - z.DecBinaryUnmarshal(yyv3956) - } else if !yym3957 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv3956) + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else if yym17 { + z.DecBinaryUnmarshal(yyv16) + } else if !yym17 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv16) } else { - z.DecFallback(yyv3956, false) + z.DecFallback(yyv16, false) } } case "lastTimestamp": if r.TryDecodeAsNil() { - x.LastTimestamp = pkg2_unversioned.Time{} + x.LastTimestamp = pkg2_v1.Time{} } else { - yyv3958 := &x.LastTimestamp - yym3959 := z.DecBinary() - _ = yym3959 + yyv18 := &x.LastTimestamp + yym19 := z.DecBinary() + _ = yym19 if false { - } else if z.HasExtensions() && z.DecExt(yyv3958) { - } else if yym3959 { - z.DecBinaryUnmarshal(yyv3958) - } else if !yym3959 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv3958) + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else if yym19 { + z.DecBinaryUnmarshal(yyv18) + } else if !yym19 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv18) } else { - z.DecFallback(yyv3958, false) + z.DecFallback(yyv18, false) } } case "count": if r.TryDecodeAsNil() { x.Count = 0 } else { - x.Count = int32(r.DecodeInt(32)) + yyv20 := &x.Count + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*int32)(yyv20)) = int32(r.DecodeInt(32)) + } } case "type": if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = string(r.DecodeString()) + yyv22 := &x.Type + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*string)(yyv22)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys3948) - } // end switch yys3948 - } // end for yyj3948 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -49697,16 +58329,16 @@ func (x *Event) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj3962 int - var yyb3962 bool - var yyhl3962 bool = l >= 0 - yyj3962++ - if yyhl3962 { - yyb3962 = yyj3962 > l + var yyj24 int + var yyb24 bool + var yyhl24 bool = l >= 0 + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3962 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3962 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -49714,15 +58346,21 @@ func (x *Event) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv25 := &x.Kind + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } } - yyj3962++ - if yyhl3962 { - yyb3962 = yyj3962 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3962 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3962 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -49730,32 +58368,44 @@ func (x *Event) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv27 := &x.APIVersion + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } } - yyj3962++ - if yyhl3962 { - yyb3962 = yyj3962 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3962 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3962 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv3965 := &x.ObjectMeta - yyv3965.CodecDecodeSelf(d) + yyv29 := &x.ObjectMeta + yym30 := z.DecBinary() + _ = yym30 + if false { + } else if z.HasExtensions() && z.DecExt(yyv29) { + } else { + z.DecFallback(yyv29, false) + } } - yyj3962++ - if yyhl3962 { - yyb3962 = yyj3962 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3962 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3962 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -49763,16 +58413,16 @@ func (x *Event) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.InvolvedObject = ObjectReference{} } else { - yyv3966 := &x.InvolvedObject - yyv3966.CodecDecodeSelf(d) + yyv31 := &x.InvolvedObject + yyv31.CodecDecodeSelf(d) } - yyj3962++ - if yyhl3962 { - yyb3962 = yyj3962 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3962 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3962 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -49780,15 +58430,21 @@ func (x *Event) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv32 := &x.Reason + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + *((*string)(yyv32)) = r.DecodeString() + } } - yyj3962++ - if yyhl3962 { - yyb3962 = yyj3962 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3962 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3962 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -49796,15 +58452,21 @@ func (x *Event) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv34 := &x.Message + yym35 := z.DecBinary() + _ = yym35 + if false { + } else { + *((*string)(yyv34)) = r.DecodeString() + } } - yyj3962++ - if yyhl3962 { - yyb3962 = yyj3962 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3962 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3962 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -49812,70 +58474,70 @@ func (x *Event) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Source = EventSource{} } else { - yyv3969 := &x.Source - yyv3969.CodecDecodeSelf(d) + yyv36 := &x.Source + yyv36.CodecDecodeSelf(d) } - yyj3962++ - if yyhl3962 { - yyb3962 = yyj3962 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3962 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3962 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.FirstTimestamp = pkg2_unversioned.Time{} + x.FirstTimestamp = pkg2_v1.Time{} } else { - yyv3970 := &x.FirstTimestamp - yym3971 := z.DecBinary() - _ = yym3971 + yyv37 := &x.FirstTimestamp + yym38 := z.DecBinary() + _ = yym38 if false { - } else if z.HasExtensions() && z.DecExt(yyv3970) { - } else if yym3971 { - z.DecBinaryUnmarshal(yyv3970) - } else if !yym3971 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv3970) + } else if z.HasExtensions() && z.DecExt(yyv37) { + } else if yym38 { + z.DecBinaryUnmarshal(yyv37) + } else if !yym38 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv37) } else { - z.DecFallback(yyv3970, false) + z.DecFallback(yyv37, false) } } - yyj3962++ - if yyhl3962 { - yyb3962 = yyj3962 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3962 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3962 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.LastTimestamp = pkg2_unversioned.Time{} + x.LastTimestamp = pkg2_v1.Time{} } else { - yyv3972 := &x.LastTimestamp - yym3973 := z.DecBinary() - _ = yym3973 + yyv39 := &x.LastTimestamp + yym40 := z.DecBinary() + _ = yym40 if false { - } else if z.HasExtensions() && z.DecExt(yyv3972) { - } else if yym3973 { - z.DecBinaryUnmarshal(yyv3972) - } else if !yym3973 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv3972) + } else if z.HasExtensions() && z.DecExt(yyv39) { + } else if yym40 { + z.DecBinaryUnmarshal(yyv39) + } else if !yym40 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv39) } else { - z.DecFallback(yyv3972, false) + z.DecFallback(yyv39, false) } } - yyj3962++ - if yyhl3962 { - yyb3962 = yyj3962 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3962 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3962 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -49883,15 +58545,21 @@ func (x *Event) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Count = 0 } else { - x.Count = int32(r.DecodeInt(32)) + yyv41 := &x.Count + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*int32)(yyv41)) = int32(r.DecodeInt(32)) + } } - yyj3962++ - if yyhl3962 { - yyb3962 = yyj3962 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3962 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3962 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -49899,20 +58567,26 @@ func (x *Event) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = string(r.DecodeString()) + yyv43 := &x.Type + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*string)(yyv43)) = r.DecodeString() + } } for { - yyj3962++ - if yyhl3962 { - yyb3962 = yyj3962 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb3962 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb3962 { + if yyb24 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj3962-1, "") + z.DecStructFieldNotFound(yyj24-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -49924,37 +58598,37 @@ func (x *EventList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym3976 := z.EncBinary() - _ = yym3976 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep3977 := !z.EncBinary() - yy2arr3977 := z.EncBasicHandle().StructToArray - var yyq3977 [4]bool - _, _, _ = yysep3977, yyq3977, yy2arr3977 - const yyr3977 bool = false - yyq3977[0] = x.Kind != "" - yyq3977[1] = x.APIVersion != "" - yyq3977[2] = true - var yynn3977 int - if yyr3977 || yy2arr3977 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn3977 = 1 - for _, b := range yyq3977 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn3977++ + yynn2++ } } - r.EncodeMapStart(yynn3977) - yynn3977 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr3977 || yy2arr3977 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3977[0] { - yym3979 := z.EncBinary() - _ = yym3979 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -49963,23 +58637,23 @@ func (x *EventList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3977[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3980 := z.EncBinary() - _ = yym3980 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr3977 || yy2arr3977 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3977[1] { - yym3982 := z.EncBinary() - _ = yym3982 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -49988,54 +58662,54 @@ func (x *EventList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq3977[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym3983 := z.EncBinary() - _ = yym3983 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr3977 || yy2arr3977 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq3977[2] { - yy3985 := &x.ListMeta - yym3986 := z.EncBinary() - _ = yym3986 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy3985) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy3985) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq3977[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3987 := &x.ListMeta - yym3988 := z.EncBinary() - _ = yym3988 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy3987) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy3987) + z.EncFallback(yy12) } } } - if yyr3977 || yy2arr3977 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym3990 := z.EncBinary() - _ = yym3990 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceEvent(([]Event)(x.Items), e) @@ -50048,15 +58722,15 @@ func (x *EventList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym3991 := z.EncBinary() - _ = yym3991 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceEvent(([]Event)(x.Items), e) } } } - if yyr3977 || yy2arr3977 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -50069,25 +58743,25 @@ func (x *EventList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym3992 := z.DecBinary() - _ = yym3992 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct3993 := r.ContainerType() - if yyct3993 == codecSelferValueTypeMap1234 { - yyl3993 := r.ReadMapStart() - if yyl3993 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl3993, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct3993 == codecSelferValueTypeArray1234 { - yyl3993 := r.ReadArrayStart() - if yyl3993 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl3993, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -50099,12 +58773,12 @@ func (x *EventList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3994Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3994Slc - var yyhl3994 bool = l >= 0 - for yyj3994 := 0; ; yyj3994++ { - if yyhl3994 { - if yyj3994 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -50113,51 +58787,63 @@ func (x *EventList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3994Slc = r.DecodeBytes(yys3994Slc, true, true) - yys3994 := string(yys3994Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3994 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv3997 := &x.ListMeta - yym3998 := z.DecBinary() - _ = yym3998 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv3997) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv3997, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv3999 := &x.Items - yym4000 := z.DecBinary() - _ = yym4000 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceEvent((*[]Event)(yyv3999), d) + h.decSliceEvent((*[]Event)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys3994) - } // end switch yys3994 - } // end for yyj3994 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -50165,16 +58851,16 @@ func (x *EventList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4001 int - var yyb4001 bool - var yyhl4001 bool = l >= 0 - yyj4001++ - if yyhl4001 { - yyb4001 = yyj4001 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4001 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4001 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -50182,15 +58868,21 @@ func (x *EventList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj4001++ - if yyhl4001 { - yyb4001 = yyj4001 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4001 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4001 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -50198,38 +58890,44 @@ func (x *EventList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj4001++ - if yyhl4001 { - yyb4001 = yyj4001 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4001 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4001 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv4004 := &x.ListMeta - yym4005 := z.DecBinary() - _ = yym4005 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv4004) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv4004, false) + z.DecFallback(yyv17, false) } } - yyj4001++ - if yyhl4001 { - yyb4001 = yyj4001 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4001 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4001 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -50237,26 +58935,26 @@ func (x *EventList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Items = nil } else { - yyv4006 := &x.Items - yym4007 := z.DecBinary() - _ = yym4007 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceEvent((*[]Event)(yyv4006), d) + h.decSliceEvent((*[]Event)(yyv19), d) } } for { - yyj4001++ - if yyhl4001 { - yyb4001 = yyj4001 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4001 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4001 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4001-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -50268,37 +58966,37 @@ func (x *List) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4008 := z.EncBinary() - _ = yym4008 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4009 := !z.EncBinary() - yy2arr4009 := z.EncBasicHandle().StructToArray - var yyq4009 [4]bool - _, _, _ = yysep4009, yyq4009, yy2arr4009 - const yyr4009 bool = false - yyq4009[0] = x.Kind != "" - yyq4009[1] = x.APIVersion != "" - yyq4009[2] = true - var yynn4009 int - if yyr4009 || yy2arr4009 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn4009 = 1 - for _, b := range yyq4009 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn4009++ + yynn2++ } } - r.EncodeMapStart(yynn4009) - yynn4009 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4009 || yy2arr4009 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4009[0] { - yym4011 := z.EncBinary() - _ = yym4011 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -50307,23 +59005,23 @@ func (x *List) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4009[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4012 := z.EncBinary() - _ = yym4012 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr4009 || yy2arr4009 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4009[1] { - yym4014 := z.EncBinary() - _ = yym4014 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -50332,54 +59030,54 @@ func (x *List) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4009[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4015 := z.EncBinary() - _ = yym4015 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr4009 || yy2arr4009 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4009[2] { - yy4017 := &x.ListMeta - yym4018 := z.EncBinary() - _ = yym4018 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy4017) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy4017) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq4009[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4019 := &x.ListMeta - yym4020 := z.EncBinary() - _ = yym4020 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy4019) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy4019) + z.EncFallback(yy12) } } } - if yyr4009 || yy2arr4009 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym4022 := z.EncBinary() - _ = yym4022 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceruntime_RawExtension(([]pkg5_runtime.RawExtension)(x.Items), e) @@ -50392,15 +59090,15 @@ func (x *List) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym4023 := z.EncBinary() - _ = yym4023 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceruntime_RawExtension(([]pkg5_runtime.RawExtension)(x.Items), e) } } } - if yyr4009 || yy2arr4009 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -50413,25 +59111,25 @@ func (x *List) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4024 := z.DecBinary() - _ = yym4024 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4025 := r.ContainerType() - if yyct4025 == codecSelferValueTypeMap1234 { - yyl4025 := r.ReadMapStart() - if yyl4025 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4025, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4025 == codecSelferValueTypeArray1234 { - yyl4025 := r.ReadArrayStart() - if yyl4025 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4025, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -50443,12 +59141,12 @@ func (x *List) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4026Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4026Slc - var yyhl4026 bool = l >= 0 - for yyj4026 := 0; ; yyj4026++ { - if yyhl4026 { - if yyj4026 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -50457,51 +59155,63 @@ func (x *List) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4026Slc = r.DecodeBytes(yys4026Slc, true, true) - yys4026 := string(yys4026Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4026 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv4029 := &x.ListMeta - yym4030 := z.DecBinary() - _ = yym4030 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv4029) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv4029, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv4031 := &x.Items - yym4032 := z.DecBinary() - _ = yym4032 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceruntime_RawExtension((*[]pkg5_runtime.RawExtension)(yyv4031), d) + h.decSliceruntime_RawExtension((*[]pkg5_runtime.RawExtension)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys4026) - } // end switch yys4026 - } // end for yyj4026 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -50509,16 +59219,16 @@ func (x *List) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4033 int - var yyb4033 bool - var yyhl4033 bool = l >= 0 - yyj4033++ - if yyhl4033 { - yyb4033 = yyj4033 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4033 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4033 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -50526,15 +59236,21 @@ func (x *List) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj4033++ - if yyhl4033 { - yyb4033 = yyj4033 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4033 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4033 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -50542,38 +59258,44 @@ func (x *List) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj4033++ - if yyhl4033 { - yyb4033 = yyj4033 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4033 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4033 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv4036 := &x.ListMeta - yym4037 := z.DecBinary() - _ = yym4037 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv4036) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv4036, false) + z.DecFallback(yyv17, false) } } - yyj4033++ - if yyhl4033 { - yyb4033 = yyj4033 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4033 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4033 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -50581,26 +59303,26 @@ func (x *List) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Items = nil } else { - yyv4038 := &x.Items - yym4039 := z.DecBinary() - _ = yym4039 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceruntime_RawExtension((*[]pkg5_runtime.RawExtension)(yyv4038), d) + h.decSliceruntime_RawExtension((*[]pkg5_runtime.RawExtension)(yyv19), d) } } for { - yyj4033++ - if yyhl4033 { - yyb4033 = yyj4033 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4033 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4033 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4033-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -50609,8 +59331,8 @@ func (x LimitType) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym4040 := z.EncBinary() - _ = yym4040 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -50622,8 +59344,8 @@ func (x *LimitType) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4041 := z.DecBinary() - _ = yym4041 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -50638,53 +59360,53 @@ func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4042 := z.EncBinary() - _ = yym4042 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4043 := !z.EncBinary() - yy2arr4043 := z.EncBasicHandle().StructToArray - var yyq4043 [6]bool - _, _, _ = yysep4043, yyq4043, yy2arr4043 - const yyr4043 bool = false - yyq4043[0] = x.Type != "" - yyq4043[1] = len(x.Max) != 0 - yyq4043[2] = len(x.Min) != 0 - yyq4043[3] = len(x.Default) != 0 - yyq4043[4] = len(x.DefaultRequest) != 0 - yyq4043[5] = len(x.MaxLimitRequestRatio) != 0 - var yynn4043 int - if yyr4043 || yy2arr4043 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Type != "" + yyq2[1] = len(x.Max) != 0 + yyq2[2] = len(x.Min) != 0 + yyq2[3] = len(x.Default) != 0 + yyq2[4] = len(x.DefaultRequest) != 0 + yyq2[5] = len(x.MaxLimitRequestRatio) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(6) } else { - yynn4043 = 0 - for _, b := range yyq4043 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn4043++ + yynn2++ } } - r.EncodeMapStart(yynn4043) - yynn4043 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4043 || yy2arr4043 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4043[0] { + if yyq2[0] { x.Type.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4043[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("type")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Type.CodecEncodeSelf(e) } } - if yyr4043 || yy2arr4043 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4043[1] { + if yyq2[1] { if x.Max == nil { r.EncodeNil() } else { @@ -50694,7 +59416,7 @@ func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq4043[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("max")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -50705,9 +59427,9 @@ func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr4043 || yy2arr4043 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4043[2] { + if yyq2[2] { if x.Min == nil { r.EncodeNil() } else { @@ -50717,7 +59439,7 @@ func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq4043[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("min")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -50728,9 +59450,9 @@ func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr4043 || yy2arr4043 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4043[3] { + if yyq2[3] { if x.Default == nil { r.EncodeNil() } else { @@ -50740,7 +59462,7 @@ func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq4043[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("default")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -50751,9 +59473,9 @@ func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr4043 || yy2arr4043 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4043[4] { + if yyq2[4] { if x.DefaultRequest == nil { r.EncodeNil() } else { @@ -50763,7 +59485,7 @@ func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq4043[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("defaultRequest")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -50774,9 +59496,9 @@ func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr4043 || yy2arr4043 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4043[5] { + if yyq2[5] { if x.MaxLimitRequestRatio == nil { r.EncodeNil() } else { @@ -50786,7 +59508,7 @@ func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq4043[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("maxLimitRequestRatio")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -50797,7 +59519,7 @@ func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr4043 || yy2arr4043 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -50810,25 +59532,25 @@ func (x *LimitRangeItem) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4050 := z.DecBinary() - _ = yym4050 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4051 := r.ContainerType() - if yyct4051 == codecSelferValueTypeMap1234 { - yyl4051 := r.ReadMapStart() - if yyl4051 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4051, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4051 == codecSelferValueTypeArray1234 { - yyl4051 := r.ReadArrayStart() - if yyl4051 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4051, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -50840,12 +59562,12 @@ func (x *LimitRangeItem) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4052Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4052Slc - var yyhl4052 bool = l >= 0 - for yyj4052 := 0; ; yyj4052++ { - if yyhl4052 { - if yyj4052 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -50854,55 +59576,56 @@ func (x *LimitRangeItem) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4052Slc = r.DecodeBytes(yys4052Slc, true, true) - yys4052 := string(yys4052Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4052 { + switch yys3 { case "type": if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = LimitType(r.DecodeString()) + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) } case "max": if r.TryDecodeAsNil() { x.Max = nil } else { - yyv4054 := &x.Max - yyv4054.CodecDecodeSelf(d) + yyv5 := &x.Max + yyv5.CodecDecodeSelf(d) } case "min": if r.TryDecodeAsNil() { x.Min = nil } else { - yyv4055 := &x.Min - yyv4055.CodecDecodeSelf(d) + yyv6 := &x.Min + yyv6.CodecDecodeSelf(d) } case "default": if r.TryDecodeAsNil() { x.Default = nil } else { - yyv4056 := &x.Default - yyv4056.CodecDecodeSelf(d) + yyv7 := &x.Default + yyv7.CodecDecodeSelf(d) } case "defaultRequest": if r.TryDecodeAsNil() { x.DefaultRequest = nil } else { - yyv4057 := &x.DefaultRequest - yyv4057.CodecDecodeSelf(d) + yyv8 := &x.DefaultRequest + yyv8.CodecDecodeSelf(d) } case "maxLimitRequestRatio": if r.TryDecodeAsNil() { x.MaxLimitRequestRatio = nil } else { - yyv4058 := &x.MaxLimitRequestRatio - yyv4058.CodecDecodeSelf(d) + yyv9 := &x.MaxLimitRequestRatio + yyv9.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys4052) - } // end switch yys4052 - } // end for yyj4052 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -50910,16 +59633,16 @@ func (x *LimitRangeItem) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4059 int - var yyb4059 bool - var yyhl4059 bool = l >= 0 - yyj4059++ - if yyhl4059 { - yyb4059 = yyj4059 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb4059 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb4059 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -50927,15 +59650,16 @@ func (x *LimitRangeItem) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = LimitType(r.DecodeString()) + yyv11 := &x.Type + yyv11.CodecDecodeSelf(d) } - yyj4059++ - if yyhl4059 { - yyb4059 = yyj4059 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb4059 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb4059 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -50943,16 +59667,16 @@ func (x *LimitRangeItem) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Max = nil } else { - yyv4061 := &x.Max - yyv4061.CodecDecodeSelf(d) + yyv12 := &x.Max + yyv12.CodecDecodeSelf(d) } - yyj4059++ - if yyhl4059 { - yyb4059 = yyj4059 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb4059 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb4059 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -50960,16 +59684,16 @@ func (x *LimitRangeItem) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Min = nil } else { - yyv4062 := &x.Min - yyv4062.CodecDecodeSelf(d) + yyv13 := &x.Min + yyv13.CodecDecodeSelf(d) } - yyj4059++ - if yyhl4059 { - yyb4059 = yyj4059 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb4059 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb4059 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -50977,16 +59701,16 @@ func (x *LimitRangeItem) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Default = nil } else { - yyv4063 := &x.Default - yyv4063.CodecDecodeSelf(d) + yyv14 := &x.Default + yyv14.CodecDecodeSelf(d) } - yyj4059++ - if yyhl4059 { - yyb4059 = yyj4059 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb4059 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb4059 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -50994,16 +59718,16 @@ func (x *LimitRangeItem) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.DefaultRequest = nil } else { - yyv4064 := &x.DefaultRequest - yyv4064.CodecDecodeSelf(d) + yyv15 := &x.DefaultRequest + yyv15.CodecDecodeSelf(d) } - yyj4059++ - if yyhl4059 { - yyb4059 = yyj4059 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb4059 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb4059 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -51011,21 +59735,21 @@ func (x *LimitRangeItem) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.MaxLimitRequestRatio = nil } else { - yyv4065 := &x.MaxLimitRequestRatio - yyv4065.CodecDecodeSelf(d) + yyv16 := &x.MaxLimitRequestRatio + yyv16.CodecDecodeSelf(d) } for { - yyj4059++ - if yyhl4059 { - yyb4059 = yyj4059 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb4059 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb4059 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4059-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -51037,36 +59761,36 @@ func (x *LimitRangeSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4066 := z.EncBinary() - _ = yym4066 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4067 := !z.EncBinary() - yy2arr4067 := z.EncBasicHandle().StructToArray - var yyq4067 [1]bool - _, _, _ = yysep4067, yyq4067, yy2arr4067 - const yyr4067 bool = false - var yynn4067 int - if yyr4067 || yy2arr4067 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn4067 = 1 - for _, b := range yyq4067 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn4067++ + yynn2++ } } - r.EncodeMapStart(yynn4067) - yynn4067 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4067 || yy2arr4067 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Limits == nil { r.EncodeNil() } else { - yym4069 := z.EncBinary() - _ = yym4069 + yym4 := z.EncBinary() + _ = yym4 if false { } else { h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e) @@ -51079,15 +59803,15 @@ func (x *LimitRangeSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x.Limits == nil { r.EncodeNil() } else { - yym4070 := z.EncBinary() - _ = yym4070 + yym5 := z.EncBinary() + _ = yym5 if false { } else { h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e) } } } - if yyr4067 || yy2arr4067 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -51100,25 +59824,25 @@ func (x *LimitRangeSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4071 := z.DecBinary() - _ = yym4071 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4072 := r.ContainerType() - if yyct4072 == codecSelferValueTypeMap1234 { - yyl4072 := r.ReadMapStart() - if yyl4072 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4072, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4072 == codecSelferValueTypeArray1234 { - yyl4072 := r.ReadArrayStart() - if yyl4072 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4072, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -51130,12 +59854,12 @@ func (x *LimitRangeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4073Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4073Slc - var yyhl4073 bool = l >= 0 - for yyj4073 := 0; ; yyj4073++ { - if yyhl4073 { - if yyj4073 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -51144,26 +59868,26 @@ func (x *LimitRangeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4073Slc = r.DecodeBytes(yys4073Slc, true, true) - yys4073 := string(yys4073Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4073 { + switch yys3 { case "limits": if r.TryDecodeAsNil() { x.Limits = nil } else { - yyv4074 := &x.Limits - yym4075 := z.DecBinary() - _ = yym4075 + yyv4 := &x.Limits + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv4074), d) + h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv4), d) } } default: - z.DecStructFieldNotFound(-1, yys4073) - } // end switch yys4073 - } // end for yyj4073 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -51171,16 +59895,16 @@ func (x *LimitRangeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4076 int - var yyb4076 bool - var yyhl4076 bool = l >= 0 - yyj4076++ - if yyhl4076 { - yyb4076 = yyj4076 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb4076 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb4076 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -51188,26 +59912,26 @@ func (x *LimitRangeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Limits = nil } else { - yyv4077 := &x.Limits - yym4078 := z.DecBinary() - _ = yym4078 + yyv7 := &x.Limits + yym8 := z.DecBinary() + _ = yym8 if false { } else { - h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv4077), d) + h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv7), d) } } for { - yyj4076++ - if yyhl4076 { - yyb4076 = yyj4076 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb4076 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb4076 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4076-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -51219,38 +59943,38 @@ func (x *LimitRange) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4079 := z.EncBinary() - _ = yym4079 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4080 := !z.EncBinary() - yy2arr4080 := z.EncBasicHandle().StructToArray - var yyq4080 [4]bool - _, _, _ = yysep4080, yyq4080, yy2arr4080 - const yyr4080 bool = false - yyq4080[0] = x.Kind != "" - yyq4080[1] = x.APIVersion != "" - yyq4080[2] = true - yyq4080[3] = true - var yynn4080 int - if yyr4080 || yy2arr4080 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn4080 = 0 - for _, b := range yyq4080 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn4080++ + yynn2++ } } - r.EncodeMapStart(yynn4080) - yynn4080 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4080 || yy2arr4080 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4080[0] { - yym4082 := z.EncBinary() - _ = yym4082 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -51259,23 +59983,23 @@ func (x *LimitRange) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4080[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4083 := z.EncBinary() - _ = yym4083 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr4080 || yy2arr4080 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4080[1] { - yym4085 := z.EncBinary() - _ = yym4085 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -51284,53 +60008,65 @@ func (x *LimitRange) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4080[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4086 := z.EncBinary() - _ = yym4086 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr4080 || yy2arr4080 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4080[2] { - yy4088 := &x.ObjectMeta - yy4088.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq4080[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4089 := &x.ObjectMeta - yy4089.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr4080 || yy2arr4080 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4080[3] { - yy4091 := &x.Spec - yy4091.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq4080[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4092 := &x.Spec - yy4092.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr4080 || yy2arr4080 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -51343,25 +60079,25 @@ func (x *LimitRange) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4093 := z.DecBinary() - _ = yym4093 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4094 := r.ContainerType() - if yyct4094 == codecSelferValueTypeMap1234 { - yyl4094 := r.ReadMapStart() - if yyl4094 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4094, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4094 == codecSelferValueTypeArray1234 { - yyl4094 := r.ReadArrayStart() - if yyl4094 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4094, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -51373,12 +60109,12 @@ func (x *LimitRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4095Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4095Slc - var yyhl4095 bool = l >= 0 - for yyj4095 := 0; ; yyj4095++ { - if yyhl4095 { - if yyj4095 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -51387,40 +60123,58 @@ func (x *LimitRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4095Slc = r.DecodeBytes(yys4095Slc, true, true) - yys4095 := string(yys4095Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4095 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv4098 := &x.ObjectMeta - yyv4098.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = LimitRangeSpec{} } else { - yyv4099 := &x.Spec - yyv4099.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys4095) - } // end switch yys4095 - } // end for yyj4095 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -51428,16 +60182,16 @@ func (x *LimitRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4100 int - var yyb4100 bool - var yyhl4100 bool = l >= 0 - yyj4100++ - if yyhl4100 { - yyb4100 = yyj4100 > l + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb4100 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb4100 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -51445,15 +60199,21 @@ func (x *LimitRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } - yyj4100++ - if yyhl4100 { - yyb4100 = yyj4100 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb4100 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb4100 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -51461,32 +60221,44 @@ func (x *LimitRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } - yyj4100++ - if yyhl4100 { - yyb4100 = yyj4100 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb4100 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb4100 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv4103 := &x.ObjectMeta - yyv4103.CodecDecodeSelf(d) + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } } - yyj4100++ - if yyhl4100 { - yyb4100 = yyj4100 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb4100 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb4100 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -51494,21 +60266,21 @@ func (x *LimitRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Spec = LimitRangeSpec{} } else { - yyv4104 := &x.Spec - yyv4104.CodecDecodeSelf(d) + yyv18 := &x.Spec + yyv18.CodecDecodeSelf(d) } for { - yyj4100++ - if yyhl4100 { - yyb4100 = yyj4100 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb4100 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb4100 { + if yyb11 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4100-1, "") + z.DecStructFieldNotFound(yyj11-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -51520,37 +60292,37 @@ func (x *LimitRangeList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4105 := z.EncBinary() - _ = yym4105 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4106 := !z.EncBinary() - yy2arr4106 := z.EncBasicHandle().StructToArray - var yyq4106 [4]bool - _, _, _ = yysep4106, yyq4106, yy2arr4106 - const yyr4106 bool = false - yyq4106[0] = x.Kind != "" - yyq4106[1] = x.APIVersion != "" - yyq4106[2] = true - var yynn4106 int - if yyr4106 || yy2arr4106 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn4106 = 1 - for _, b := range yyq4106 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn4106++ + yynn2++ } } - r.EncodeMapStart(yynn4106) - yynn4106 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4106 || yy2arr4106 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4106[0] { - yym4108 := z.EncBinary() - _ = yym4108 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -51559,23 +60331,23 @@ func (x *LimitRangeList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4106[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4109 := z.EncBinary() - _ = yym4109 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr4106 || yy2arr4106 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4106[1] { - yym4111 := z.EncBinary() - _ = yym4111 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -51584,54 +60356,54 @@ func (x *LimitRangeList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4106[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4112 := z.EncBinary() - _ = yym4112 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr4106 || yy2arr4106 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4106[2] { - yy4114 := &x.ListMeta - yym4115 := z.EncBinary() - _ = yym4115 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy4114) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy4114) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq4106[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4116 := &x.ListMeta - yym4117 := z.EncBinary() - _ = yym4117 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy4116) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy4116) + z.EncFallback(yy12) } } } - if yyr4106 || yy2arr4106 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym4119 := z.EncBinary() - _ = yym4119 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceLimitRange(([]LimitRange)(x.Items), e) @@ -51644,15 +60416,15 @@ func (x *LimitRangeList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym4120 := z.EncBinary() - _ = yym4120 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceLimitRange(([]LimitRange)(x.Items), e) } } } - if yyr4106 || yy2arr4106 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -51665,25 +60437,25 @@ func (x *LimitRangeList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4121 := z.DecBinary() - _ = yym4121 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4122 := r.ContainerType() - if yyct4122 == codecSelferValueTypeMap1234 { - yyl4122 := r.ReadMapStart() - if yyl4122 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4122, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4122 == codecSelferValueTypeArray1234 { - yyl4122 := r.ReadArrayStart() - if yyl4122 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4122, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -51695,12 +60467,12 @@ func (x *LimitRangeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4123Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4123Slc - var yyhl4123 bool = l >= 0 - for yyj4123 := 0; ; yyj4123++ { - if yyhl4123 { - if yyj4123 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -51709,51 +60481,63 @@ func (x *LimitRangeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4123Slc = r.DecodeBytes(yys4123Slc, true, true) - yys4123 := string(yys4123Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4123 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv4126 := &x.ListMeta - yym4127 := z.DecBinary() - _ = yym4127 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv4126) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv4126, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv4128 := &x.Items - yym4129 := z.DecBinary() - _ = yym4129 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceLimitRange((*[]LimitRange)(yyv4128), d) + h.decSliceLimitRange((*[]LimitRange)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys4123) - } // end switch yys4123 - } // end for yyj4123 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -51761,16 +60545,16 @@ func (x *LimitRangeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4130 int - var yyb4130 bool - var yyhl4130 bool = l >= 0 - yyj4130++ - if yyhl4130 { - yyb4130 = yyj4130 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4130 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4130 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -51778,15 +60562,21 @@ func (x *LimitRangeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj4130++ - if yyhl4130 { - yyb4130 = yyj4130 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4130 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4130 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -51794,38 +60584,44 @@ func (x *LimitRangeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj4130++ - if yyhl4130 { - yyb4130 = yyj4130 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4130 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4130 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv4133 := &x.ListMeta - yym4134 := z.DecBinary() - _ = yym4134 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv4133) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv4133, false) + z.DecFallback(yyv17, false) } } - yyj4130++ - if yyhl4130 { - yyb4130 = yyj4130 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4130 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4130 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -51833,26 +60629,26 @@ func (x *LimitRangeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Items = nil } else { - yyv4135 := &x.Items - yym4136 := z.DecBinary() - _ = yym4136 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceLimitRange((*[]LimitRange)(yyv4135), d) + h.decSliceLimitRange((*[]LimitRange)(yyv19), d) } } for { - yyj4130++ - if yyhl4130 { - yyb4130 = yyj4130 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4130 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4130 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4130-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -51861,8 +60657,8 @@ func (x ResourceQuotaScope) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym4137 := z.EncBinary() - _ = yym4137 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -51874,8 +60670,8 @@ func (x *ResourceQuotaScope) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4138 := z.DecBinary() - _ = yym4138 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -51890,34 +60686,34 @@ func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4139 := z.EncBinary() - _ = yym4139 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4140 := !z.EncBinary() - yy2arr4140 := z.EncBasicHandle().StructToArray - var yyq4140 [2]bool - _, _, _ = yysep4140, yyq4140, yy2arr4140 - const yyr4140 bool = false - yyq4140[0] = len(x.Hard) != 0 - yyq4140[1] = len(x.Scopes) != 0 - var yynn4140 int - if yyr4140 || yy2arr4140 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Hard) != 0 + yyq2[1] = len(x.Scopes) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn4140 = 0 - for _, b := range yyq4140 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn4140++ + yynn2++ } } - r.EncodeMapStart(yynn4140) - yynn4140 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4140 || yy2arr4140 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4140[0] { + if yyq2[0] { if x.Hard == nil { r.EncodeNil() } else { @@ -51927,7 +60723,7 @@ func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq4140[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hard")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -51938,14 +60734,14 @@ func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr4140 || yy2arr4140 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4140[1] { + if yyq2[1] { if x.Scopes == nil { r.EncodeNil() } else { - yym4143 := z.EncBinary() - _ = yym4143 + yym7 := z.EncBinary() + _ = yym7 if false { } else { h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e) @@ -51955,15 +60751,15 @@ func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq4140[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("scopes")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Scopes == nil { r.EncodeNil() } else { - yym4144 := z.EncBinary() - _ = yym4144 + yym8 := z.EncBinary() + _ = yym8 if false { } else { h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e) @@ -51971,7 +60767,7 @@ func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr4140 || yy2arr4140 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -51984,25 +60780,25 @@ func (x *ResourceQuotaSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4145 := z.DecBinary() - _ = yym4145 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4146 := r.ContainerType() - if yyct4146 == codecSelferValueTypeMap1234 { - yyl4146 := r.ReadMapStart() - if yyl4146 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4146, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4146 == codecSelferValueTypeArray1234 { - yyl4146 := r.ReadArrayStart() - if yyl4146 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4146, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -52014,12 +60810,12 @@ func (x *ResourceQuotaSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4147Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4147Slc - var yyhl4147 bool = l >= 0 - for yyj4147 := 0; ; yyj4147++ { - if yyhl4147 { - if yyj4147 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -52028,33 +60824,33 @@ func (x *ResourceQuotaSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4147Slc = r.DecodeBytes(yys4147Slc, true, true) - yys4147 := string(yys4147Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4147 { + switch yys3 { case "hard": if r.TryDecodeAsNil() { x.Hard = nil } else { - yyv4148 := &x.Hard - yyv4148.CodecDecodeSelf(d) + yyv4 := &x.Hard + yyv4.CodecDecodeSelf(d) } case "scopes": if r.TryDecodeAsNil() { x.Scopes = nil } else { - yyv4149 := &x.Scopes - yym4150 := z.DecBinary() - _ = yym4150 + yyv5 := &x.Scopes + yym6 := z.DecBinary() + _ = yym6 if false { } else { - h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv4149), d) + h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv5), d) } } default: - z.DecStructFieldNotFound(-1, yys4147) - } // end switch yys4147 - } // end for yyj4147 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -52062,16 +60858,16 @@ func (x *ResourceQuotaSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4151 int - var yyb4151 bool - var yyhl4151 bool = l >= 0 - yyj4151++ - if yyhl4151 { - yyb4151 = yyj4151 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb4151 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb4151 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -52079,16 +60875,16 @@ func (x *ResourceQuotaSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.Hard = nil } else { - yyv4152 := &x.Hard - yyv4152.CodecDecodeSelf(d) + yyv8 := &x.Hard + yyv8.CodecDecodeSelf(d) } - yyj4151++ - if yyhl4151 { - yyb4151 = yyj4151 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb4151 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb4151 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -52096,26 +60892,26 @@ func (x *ResourceQuotaSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.Scopes = nil } else { - yyv4153 := &x.Scopes - yym4154 := z.DecBinary() - _ = yym4154 + yyv9 := &x.Scopes + yym10 := z.DecBinary() + _ = yym10 if false { } else { - h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv4153), d) + h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv9), d) } } for { - yyj4151++ - if yyhl4151 { - yyb4151 = yyj4151 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb4151 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb4151 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4151-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -52127,34 +60923,34 @@ func (x *ResourceQuotaStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4155 := z.EncBinary() - _ = yym4155 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4156 := !z.EncBinary() - yy2arr4156 := z.EncBasicHandle().StructToArray - var yyq4156 [2]bool - _, _, _ = yysep4156, yyq4156, yy2arr4156 - const yyr4156 bool = false - yyq4156[0] = len(x.Hard) != 0 - yyq4156[1] = len(x.Used) != 0 - var yynn4156 int - if yyr4156 || yy2arr4156 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Hard) != 0 + yyq2[1] = len(x.Used) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn4156 = 0 - for _, b := range yyq4156 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn4156++ + yynn2++ } } - r.EncodeMapStart(yynn4156) - yynn4156 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4156 || yy2arr4156 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4156[0] { + if yyq2[0] { if x.Hard == nil { r.EncodeNil() } else { @@ -52164,7 +60960,7 @@ func (x *ResourceQuotaStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq4156[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hard")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -52175,9 +60971,9 @@ func (x *ResourceQuotaStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr4156 || yy2arr4156 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4156[1] { + if yyq2[1] { if x.Used == nil { r.EncodeNil() } else { @@ -52187,7 +60983,7 @@ func (x *ResourceQuotaStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq4156[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("used")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -52198,7 +60994,7 @@ func (x *ResourceQuotaStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr4156 || yy2arr4156 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -52211,25 +61007,25 @@ func (x *ResourceQuotaStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4159 := z.DecBinary() - _ = yym4159 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4160 := r.ContainerType() - if yyct4160 == codecSelferValueTypeMap1234 { - yyl4160 := r.ReadMapStart() - if yyl4160 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4160, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4160 == codecSelferValueTypeArray1234 { - yyl4160 := r.ReadArrayStart() - if yyl4160 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4160, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -52241,12 +61037,12 @@ func (x *ResourceQuotaStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4161Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4161Slc - var yyhl4161 bool = l >= 0 - for yyj4161 := 0; ; yyj4161++ { - if yyhl4161 { - if yyj4161 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -52255,28 +61051,28 @@ func (x *ResourceQuotaStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4161Slc = r.DecodeBytes(yys4161Slc, true, true) - yys4161 := string(yys4161Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4161 { + switch yys3 { case "hard": if r.TryDecodeAsNil() { x.Hard = nil } else { - yyv4162 := &x.Hard - yyv4162.CodecDecodeSelf(d) + yyv4 := &x.Hard + yyv4.CodecDecodeSelf(d) } case "used": if r.TryDecodeAsNil() { x.Used = nil } else { - yyv4163 := &x.Used - yyv4163.CodecDecodeSelf(d) + yyv5 := &x.Used + yyv5.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys4161) - } // end switch yys4161 - } // end for yyj4161 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -52284,16 +61080,16 @@ func (x *ResourceQuotaStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4164 int - var yyb4164 bool - var yyhl4164 bool = l >= 0 - yyj4164++ - if yyhl4164 { - yyb4164 = yyj4164 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb4164 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb4164 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -52301,16 +61097,16 @@ func (x *ResourceQuotaStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Hard = nil } else { - yyv4165 := &x.Hard - yyv4165.CodecDecodeSelf(d) + yyv7 := &x.Hard + yyv7.CodecDecodeSelf(d) } - yyj4164++ - if yyhl4164 { - yyb4164 = yyj4164 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb4164 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb4164 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -52318,21 +61114,21 @@ func (x *ResourceQuotaStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Used = nil } else { - yyv4166 := &x.Used - yyv4166.CodecDecodeSelf(d) + yyv8 := &x.Used + yyv8.CodecDecodeSelf(d) } for { - yyj4164++ - if yyhl4164 { - yyb4164 = yyj4164 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb4164 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb4164 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4164-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -52344,39 +61140,39 @@ func (x *ResourceQuota) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4167 := z.EncBinary() - _ = yym4167 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4168 := !z.EncBinary() - yy2arr4168 := z.EncBasicHandle().StructToArray - var yyq4168 [5]bool - _, _, _ = yysep4168, yyq4168, yy2arr4168 - const yyr4168 bool = false - yyq4168[0] = x.Kind != "" - yyq4168[1] = x.APIVersion != "" - yyq4168[2] = true - yyq4168[3] = true - yyq4168[4] = true - var yynn4168 int - if yyr4168 || yy2arr4168 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn4168 = 0 - for _, b := range yyq4168 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn4168++ + yynn2++ } } - r.EncodeMapStart(yynn4168) - yynn4168 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4168 || yy2arr4168 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4168[0] { - yym4170 := z.EncBinary() - _ = yym4170 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -52385,23 +61181,23 @@ func (x *ResourceQuota) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4168[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4171 := z.EncBinary() - _ = yym4171 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr4168 || yy2arr4168 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4168[1] { - yym4173 := z.EncBinary() - _ = yym4173 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -52410,70 +61206,82 @@ func (x *ResourceQuota) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4168[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4174 := z.EncBinary() - _ = yym4174 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr4168 || yy2arr4168 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4168[2] { - yy4176 := &x.ObjectMeta - yy4176.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq4168[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4177 := &x.ObjectMeta - yy4177.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr4168 || yy2arr4168 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4168[3] { - yy4179 := &x.Spec - yy4179.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq4168[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4180 := &x.Spec - yy4180.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr4168 || yy2arr4168 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4168[4] { - yy4182 := &x.Status - yy4182.CodecEncodeSelf(e) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq4168[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4183 := &x.Status - yy4183.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } - if yyr4168 || yy2arr4168 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -52486,25 +61294,25 @@ func (x *ResourceQuota) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4184 := z.DecBinary() - _ = yym4184 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4185 := r.ContainerType() - if yyct4185 == codecSelferValueTypeMap1234 { - yyl4185 := r.ReadMapStart() - if yyl4185 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4185, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4185 == codecSelferValueTypeArray1234 { - yyl4185 := r.ReadArrayStart() - if yyl4185 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4185, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -52516,12 +61324,12 @@ func (x *ResourceQuota) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4186Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4186Slc - var yyhl4186 bool = l >= 0 - for yyj4186 := 0; ; yyj4186++ { - if yyhl4186 { - if yyj4186 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -52530,47 +61338,65 @@ func (x *ResourceQuota) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4186Slc = r.DecodeBytes(yys4186Slc, true, true) - yys4186 := string(yys4186Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4186 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv4189 := &x.ObjectMeta - yyv4189.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = ResourceQuotaSpec{} } else { - yyv4190 := &x.Spec - yyv4190.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = ResourceQuotaStatus{} } else { - yyv4191 := &x.Status - yyv4191.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys4186) - } // end switch yys4186 - } // end for yyj4186 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -52578,16 +61404,16 @@ func (x *ResourceQuota) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4192 int - var yyb4192 bool - var yyhl4192 bool = l >= 0 - yyj4192++ - if yyhl4192 { - yyb4192 = yyj4192 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4192 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4192 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -52595,15 +61421,21 @@ func (x *ResourceQuota) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj4192++ - if yyhl4192 { - yyb4192 = yyj4192 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4192 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4192 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -52611,32 +61443,44 @@ func (x *ResourceQuota) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj4192++ - if yyhl4192 { - yyb4192 = yyj4192 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4192 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4192 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv4195 := &x.ObjectMeta - yyv4195.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj4192++ - if yyhl4192 { - yyb4192 = yyj4192 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4192 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4192 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -52644,16 +61488,16 @@ func (x *ResourceQuota) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Spec = ResourceQuotaSpec{} } else { - yyv4196 := &x.Spec - yyv4196.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj4192++ - if yyhl4192 { - yyb4192 = yyj4192 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4192 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4192 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -52661,21 +61505,21 @@ func (x *ResourceQuota) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Status = ResourceQuotaStatus{} } else { - yyv4197 := &x.Status - yyv4197.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj4192++ - if yyhl4192 { - yyb4192 = yyj4192 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4192 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4192 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4192-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -52687,37 +61531,37 @@ func (x *ResourceQuotaList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4198 := z.EncBinary() - _ = yym4198 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4199 := !z.EncBinary() - yy2arr4199 := z.EncBasicHandle().StructToArray - var yyq4199 [4]bool - _, _, _ = yysep4199, yyq4199, yy2arr4199 - const yyr4199 bool = false - yyq4199[0] = x.Kind != "" - yyq4199[1] = x.APIVersion != "" - yyq4199[2] = true - var yynn4199 int - if yyr4199 || yy2arr4199 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn4199 = 1 - for _, b := range yyq4199 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn4199++ + yynn2++ } } - r.EncodeMapStart(yynn4199) - yynn4199 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4199 || yy2arr4199 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4199[0] { - yym4201 := z.EncBinary() - _ = yym4201 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -52726,23 +61570,23 @@ func (x *ResourceQuotaList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4199[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4202 := z.EncBinary() - _ = yym4202 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr4199 || yy2arr4199 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4199[1] { - yym4204 := z.EncBinary() - _ = yym4204 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -52751,54 +61595,54 @@ func (x *ResourceQuotaList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4199[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4205 := z.EncBinary() - _ = yym4205 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr4199 || yy2arr4199 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4199[2] { - yy4207 := &x.ListMeta - yym4208 := z.EncBinary() - _ = yym4208 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy4207) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy4207) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq4199[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4209 := &x.ListMeta - yym4210 := z.EncBinary() - _ = yym4210 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy4209) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy4209) + z.EncFallback(yy12) } } } - if yyr4199 || yy2arr4199 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym4212 := z.EncBinary() - _ = yym4212 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e) @@ -52811,15 +61655,15 @@ func (x *ResourceQuotaList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym4213 := z.EncBinary() - _ = yym4213 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e) } } } - if yyr4199 || yy2arr4199 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -52832,25 +61676,25 @@ func (x *ResourceQuotaList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4214 := z.DecBinary() - _ = yym4214 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4215 := r.ContainerType() - if yyct4215 == codecSelferValueTypeMap1234 { - yyl4215 := r.ReadMapStart() - if yyl4215 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4215, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4215 == codecSelferValueTypeArray1234 { - yyl4215 := r.ReadArrayStart() - if yyl4215 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4215, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -52862,12 +61706,12 @@ func (x *ResourceQuotaList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4216Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4216Slc - var yyhl4216 bool = l >= 0 - for yyj4216 := 0; ; yyj4216++ { - if yyhl4216 { - if yyj4216 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -52876,51 +61720,63 @@ func (x *ResourceQuotaList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4216Slc = r.DecodeBytes(yys4216Slc, true, true) - yys4216 := string(yys4216Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4216 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv4219 := &x.ListMeta - yym4220 := z.DecBinary() - _ = yym4220 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv4219) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv4219, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv4221 := &x.Items - yym4222 := z.DecBinary() - _ = yym4222 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceResourceQuota((*[]ResourceQuota)(yyv4221), d) + h.decSliceResourceQuota((*[]ResourceQuota)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys4216) - } // end switch yys4216 - } // end for yyj4216 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -52928,16 +61784,16 @@ func (x *ResourceQuotaList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4223 int - var yyb4223 bool - var yyhl4223 bool = l >= 0 - yyj4223++ - if yyhl4223 { - yyb4223 = yyj4223 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4223 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4223 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -52945,15 +61801,21 @@ func (x *ResourceQuotaList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj4223++ - if yyhl4223 { - yyb4223 = yyj4223 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4223 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4223 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -52961,38 +61823,44 @@ func (x *ResourceQuotaList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj4223++ - if yyhl4223 { - yyb4223 = yyj4223 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4223 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4223 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv4226 := &x.ListMeta - yym4227 := z.DecBinary() - _ = yym4227 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv4226) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv4226, false) + z.DecFallback(yyv17, false) } } - yyj4223++ - if yyhl4223 { - yyb4223 = yyj4223 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4223 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4223 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -53000,26 +61868,26 @@ func (x *ResourceQuotaList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.Items = nil } else { - yyv4228 := &x.Items - yym4229 := z.DecBinary() - _ = yym4229 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceResourceQuota((*[]ResourceQuota)(yyv4228), d) + h.decSliceResourceQuota((*[]ResourceQuota)(yyv19), d) } } for { - yyj4223++ - if yyhl4223 { - yyb4223 = yyj4223 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4223 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4223 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4223-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -53031,40 +61899,40 @@ func (x *Secret) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4230 := z.EncBinary() - _ = yym4230 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4231 := !z.EncBinary() - yy2arr4231 := z.EncBasicHandle().StructToArray - var yyq4231 [6]bool - _, _, _ = yysep4231, yyq4231, yy2arr4231 - const yyr4231 bool = false - yyq4231[0] = x.Kind != "" - yyq4231[1] = x.APIVersion != "" - yyq4231[2] = true - yyq4231[3] = len(x.Data) != 0 - yyq4231[4] = len(x.StringData) != 0 - yyq4231[5] = x.Type != "" - var yynn4231 int - if yyr4231 || yy2arr4231 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = len(x.Data) != 0 + yyq2[4] = len(x.StringData) != 0 + yyq2[5] = x.Type != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(6) } else { - yynn4231 = 0 - for _, b := range yyq4231 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn4231++ + yynn2++ } } - r.EncodeMapStart(yynn4231) - yynn4231 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4231 || yy2arr4231 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4231[0] { - yym4233 := z.EncBinary() - _ = yym4233 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -53073,23 +61941,23 @@ func (x *Secret) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4231[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4234 := z.EncBinary() - _ = yym4234 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr4231 || yy2arr4231 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4231[1] { - yym4236 := z.EncBinary() - _ = yym4236 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -53098,43 +61966,55 @@ func (x *Secret) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4231[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4237 := z.EncBinary() - _ = yym4237 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr4231 || yy2arr4231 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4231[2] { - yy4239 := &x.ObjectMeta - yy4239.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq4231[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4240 := &x.ObjectMeta - yy4240.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr4231 || yy2arr4231 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4231[3] { + if yyq2[3] { if x.Data == nil { r.EncodeNil() } else { - yym4242 := z.EncBinary() - _ = yym4242 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e) @@ -53144,15 +62024,15 @@ func (x *Secret) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq4231[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("data")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Data == nil { r.EncodeNil() } else { - yym4243 := z.EncBinary() - _ = yym4243 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e) @@ -53160,14 +62040,14 @@ func (x *Secret) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr4231 || yy2arr4231 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4231[4] { + if yyq2[4] { if x.StringData == nil { r.EncodeNil() } else { - yym4245 := z.EncBinary() - _ = yym4245 + yym18 := z.EncBinary() + _ = yym18 if false { } else { z.F.EncMapStringStringV(x.StringData, false, e) @@ -53177,15 +62057,15 @@ func (x *Secret) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq4231[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("stringData")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.StringData == nil { r.EncodeNil() } else { - yym4246 := z.EncBinary() - _ = yym4246 + yym19 := z.EncBinary() + _ = yym19 if false { } else { z.F.EncMapStringStringV(x.StringData, false, e) @@ -53193,22 +62073,22 @@ func (x *Secret) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr4231 || yy2arr4231 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4231[5] { + if yyq2[5] { x.Type.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4231[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("type")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Type.CodecEncodeSelf(e) } } - if yyr4231 || yy2arr4231 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -53221,25 +62101,25 @@ func (x *Secret) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4248 := z.DecBinary() - _ = yym4248 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4249 := r.ContainerType() - if yyct4249 == codecSelferValueTypeMap1234 { - yyl4249 := r.ReadMapStart() - if yyl4249 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4249, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4249 == codecSelferValueTypeArray1234 { - yyl4249 := r.ReadArrayStart() - if yyl4249 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4249, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -53251,12 +62131,12 @@ func (x *Secret) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4250Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4250Slc - var yyhl4250 bool = l >= 0 - for yyj4250 := 0; ; yyj4250++ { - if yyhl4250 { - if yyj4250 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -53265,63 +62145,82 @@ func (x *Secret) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4250Slc = r.DecodeBytes(yys4250Slc, true, true) - yys4250 := string(yys4250Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4250 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv4253 := &x.ObjectMeta - yyv4253.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "data": if r.TryDecodeAsNil() { x.Data = nil } else { - yyv4254 := &x.Data - yym4255 := z.DecBinary() - _ = yym4255 + yyv10 := &x.Data + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decMapstringSliceuint8((*map[string][]uint8)(yyv4254), d) + h.decMapstringSliceuint8((*map[string][]uint8)(yyv10), d) } } case "stringData": if r.TryDecodeAsNil() { x.StringData = nil } else { - yyv4256 := &x.StringData - yym4257 := z.DecBinary() - _ = yym4257 + yyv12 := &x.StringData + yym13 := z.DecBinary() + _ = yym13 if false { } else { - z.F.DecMapStringStringX(yyv4256, false, d) + z.F.DecMapStringStringX(yyv12, false, d) } } case "type": if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = SecretType(r.DecodeString()) + yyv14 := &x.Type + yyv14.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys4250) - } // end switch yys4250 - } // end for yyj4250 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -53329,16 +62228,16 @@ func (x *Secret) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4259 int - var yyb4259 bool - var yyhl4259 bool = l >= 0 - yyj4259++ - if yyhl4259 { - yyb4259 = yyj4259 > l + var yyj15 int + var yyb15 bool + var yyhl15 bool = l >= 0 + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb4259 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb4259 { + if yyb15 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -53346,15 +62245,21 @@ func (x *Secret) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv16 := &x.Kind + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } } - yyj4259++ - if yyhl4259 { - yyb4259 = yyj4259 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb4259 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb4259 { + if yyb15 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -53362,32 +62267,44 @@ func (x *Secret) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv18 := &x.APIVersion + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } } - yyj4259++ - if yyhl4259 { - yyb4259 = yyj4259 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb4259 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb4259 { + if yyb15 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv4262 := &x.ObjectMeta - yyv4262.CodecDecodeSelf(d) + yyv20 := &x.ObjectMeta + yym21 := z.DecBinary() + _ = yym21 + if false { + } else if z.HasExtensions() && z.DecExt(yyv20) { + } else { + z.DecFallback(yyv20, false) + } } - yyj4259++ - if yyhl4259 { - yyb4259 = yyj4259 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb4259 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb4259 { + if yyb15 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -53395,21 +62312,21 @@ func (x *Secret) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Data = nil } else { - yyv4263 := &x.Data - yym4264 := z.DecBinary() - _ = yym4264 + yyv22 := &x.Data + yym23 := z.DecBinary() + _ = yym23 if false { } else { - h.decMapstringSliceuint8((*map[string][]uint8)(yyv4263), d) + h.decMapstringSliceuint8((*map[string][]uint8)(yyv22), d) } } - yyj4259++ - if yyhl4259 { - yyb4259 = yyj4259 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb4259 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb4259 { + if yyb15 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -53417,21 +62334,21 @@ func (x *Secret) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.StringData = nil } else { - yyv4265 := &x.StringData - yym4266 := z.DecBinary() - _ = yym4266 + yyv24 := &x.StringData + yym25 := z.DecBinary() + _ = yym25 if false { } else { - z.F.DecMapStringStringX(yyv4265, false, d) + z.F.DecMapStringStringX(yyv24, false, d) } } - yyj4259++ - if yyhl4259 { - yyb4259 = yyj4259 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb4259 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb4259 { + if yyb15 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -53439,20 +62356,21 @@ func (x *Secret) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = SecretType(r.DecodeString()) + yyv26 := &x.Type + yyv26.CodecDecodeSelf(d) } for { - yyj4259++ - if yyhl4259 { - yyb4259 = yyj4259 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb4259 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb4259 { + if yyb15 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4259-1, "") + z.DecStructFieldNotFound(yyj15-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -53461,8 +62379,8 @@ func (x SecretType) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym4268 := z.EncBinary() - _ = yym4268 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -53474,8 +62392,8 @@ func (x *SecretType) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4269 := z.DecBinary() - _ = yym4269 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -53490,37 +62408,37 @@ func (x *SecretList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4270 := z.EncBinary() - _ = yym4270 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4271 := !z.EncBinary() - yy2arr4271 := z.EncBasicHandle().StructToArray - var yyq4271 [4]bool - _, _, _ = yysep4271, yyq4271, yy2arr4271 - const yyr4271 bool = false - yyq4271[0] = x.Kind != "" - yyq4271[1] = x.APIVersion != "" - yyq4271[2] = true - var yynn4271 int - if yyr4271 || yy2arr4271 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn4271 = 1 - for _, b := range yyq4271 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn4271++ + yynn2++ } } - r.EncodeMapStart(yynn4271) - yynn4271 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4271 || yy2arr4271 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4271[0] { - yym4273 := z.EncBinary() - _ = yym4273 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -53529,23 +62447,23 @@ func (x *SecretList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4271[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4274 := z.EncBinary() - _ = yym4274 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr4271 || yy2arr4271 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4271[1] { - yym4276 := z.EncBinary() - _ = yym4276 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -53554,54 +62472,54 @@ func (x *SecretList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4271[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4277 := z.EncBinary() - _ = yym4277 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr4271 || yy2arr4271 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4271[2] { - yy4279 := &x.ListMeta - yym4280 := z.EncBinary() - _ = yym4280 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy4279) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy4279) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq4271[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4281 := &x.ListMeta - yym4282 := z.EncBinary() - _ = yym4282 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy4281) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy4281) + z.EncFallback(yy12) } } } - if yyr4271 || yy2arr4271 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym4284 := z.EncBinary() - _ = yym4284 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceSecret(([]Secret)(x.Items), e) @@ -53614,15 +62532,15 @@ func (x *SecretList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym4285 := z.EncBinary() - _ = yym4285 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceSecret(([]Secret)(x.Items), e) } } } - if yyr4271 || yy2arr4271 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -53635,25 +62553,25 @@ func (x *SecretList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4286 := z.DecBinary() - _ = yym4286 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4287 := r.ContainerType() - if yyct4287 == codecSelferValueTypeMap1234 { - yyl4287 := r.ReadMapStart() - if yyl4287 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4287, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4287 == codecSelferValueTypeArray1234 { - yyl4287 := r.ReadArrayStart() - if yyl4287 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4287, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -53665,12 +62583,12 @@ func (x *SecretList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4288Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4288Slc - var yyhl4288 bool = l >= 0 - for yyj4288 := 0; ; yyj4288++ { - if yyhl4288 { - if yyj4288 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -53679,51 +62597,63 @@ func (x *SecretList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4288Slc = r.DecodeBytes(yys4288Slc, true, true) - yys4288 := string(yys4288Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4288 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv4291 := &x.ListMeta - yym4292 := z.DecBinary() - _ = yym4292 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv4291) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv4291, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv4293 := &x.Items - yym4294 := z.DecBinary() - _ = yym4294 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceSecret((*[]Secret)(yyv4293), d) + h.decSliceSecret((*[]Secret)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys4288) - } // end switch yys4288 - } // end for yyj4288 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -53731,16 +62661,16 @@ func (x *SecretList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4295 int - var yyb4295 bool - var yyhl4295 bool = l >= 0 - yyj4295++ - if yyhl4295 { - yyb4295 = yyj4295 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4295 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4295 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -53748,15 +62678,21 @@ func (x *SecretList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj4295++ - if yyhl4295 { - yyb4295 = yyj4295 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4295 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4295 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -53764,38 +62700,44 @@ func (x *SecretList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj4295++ - if yyhl4295 { - yyb4295 = yyj4295 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4295 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4295 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv4298 := &x.ListMeta - yym4299 := z.DecBinary() - _ = yym4299 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv4298) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv4298, false) + z.DecFallback(yyv17, false) } } - yyj4295++ - if yyhl4295 { - yyb4295 = yyj4295 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4295 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4295 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -53803,26 +62745,26 @@ func (x *SecretList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Items = nil } else { - yyv4300 := &x.Items - yym4301 := z.DecBinary() - _ = yym4301 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceSecret((*[]Secret)(yyv4300), d) + h.decSliceSecret((*[]Secret)(yyv19), d) } } for { - yyj4295++ - if yyhl4295 { - yyb4295 = yyj4295 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4295 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4295 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4295-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -53834,38 +62776,38 @@ func (x *ConfigMap) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4302 := z.EncBinary() - _ = yym4302 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4303 := !z.EncBinary() - yy2arr4303 := z.EncBasicHandle().StructToArray - var yyq4303 [4]bool - _, _, _ = yysep4303, yyq4303, yy2arr4303 - const yyr4303 bool = false - yyq4303[0] = x.Kind != "" - yyq4303[1] = x.APIVersion != "" - yyq4303[2] = true - yyq4303[3] = len(x.Data) != 0 - var yynn4303 int - if yyr4303 || yy2arr4303 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = len(x.Data) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn4303 = 0 - for _, b := range yyq4303 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn4303++ + yynn2++ } } - r.EncodeMapStart(yynn4303) - yynn4303 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4303 || yy2arr4303 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4303[0] { - yym4305 := z.EncBinary() - _ = yym4305 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -53874,23 +62816,23 @@ func (x *ConfigMap) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4303[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4306 := z.EncBinary() - _ = yym4306 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr4303 || yy2arr4303 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4303[1] { - yym4308 := z.EncBinary() - _ = yym4308 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -53899,43 +62841,55 @@ func (x *ConfigMap) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4303[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4309 := z.EncBinary() - _ = yym4309 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr4303 || yy2arr4303 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4303[2] { - yy4311 := &x.ObjectMeta - yy4311.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq4303[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4312 := &x.ObjectMeta - yy4312.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr4303 || yy2arr4303 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4303[3] { + if yyq2[3] { if x.Data == nil { r.EncodeNil() } else { - yym4314 := z.EncBinary() - _ = yym4314 + yym15 := z.EncBinary() + _ = yym15 if false { } else { z.F.EncMapStringStringV(x.Data, false, e) @@ -53945,15 +62899,15 @@ func (x *ConfigMap) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq4303[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("data")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Data == nil { r.EncodeNil() } else { - yym4315 := z.EncBinary() - _ = yym4315 + yym16 := z.EncBinary() + _ = yym16 if false { } else { z.F.EncMapStringStringV(x.Data, false, e) @@ -53961,7 +62915,7 @@ func (x *ConfigMap) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr4303 || yy2arr4303 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -53974,25 +62928,25 @@ func (x *ConfigMap) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4316 := z.DecBinary() - _ = yym4316 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4317 := r.ContainerType() - if yyct4317 == codecSelferValueTypeMap1234 { - yyl4317 := r.ReadMapStart() - if yyl4317 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4317, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4317 == codecSelferValueTypeArray1234 { - yyl4317 := r.ReadArrayStart() - if yyl4317 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4317, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -54004,12 +62958,12 @@ func (x *ConfigMap) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4318Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4318Slc - var yyhl4318 bool = l >= 0 - for yyj4318 := 0; ; yyj4318++ { - if yyhl4318 { - if yyj4318 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -54018,45 +62972,63 @@ func (x *ConfigMap) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4318Slc = r.DecodeBytes(yys4318Slc, true, true) - yys4318 := string(yys4318Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4318 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv4321 := &x.ObjectMeta - yyv4321.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "data": if r.TryDecodeAsNil() { x.Data = nil } else { - yyv4322 := &x.Data - yym4323 := z.DecBinary() - _ = yym4323 + yyv10 := &x.Data + yym11 := z.DecBinary() + _ = yym11 if false { } else { - z.F.DecMapStringStringX(yyv4322, false, d) + z.F.DecMapStringStringX(yyv10, false, d) } } default: - z.DecStructFieldNotFound(-1, yys4318) - } // end switch yys4318 - } // end for yyj4318 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -54064,16 +63036,16 @@ func (x *ConfigMap) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4324 int - var yyb4324 bool - var yyhl4324 bool = l >= 0 - yyj4324++ - if yyhl4324 { - yyb4324 = yyj4324 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4324 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4324 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -54081,15 +63053,21 @@ func (x *ConfigMap) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj4324++ - if yyhl4324 { - yyb4324 = yyj4324 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4324 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4324 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -54097,32 +63075,44 @@ func (x *ConfigMap) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj4324++ - if yyhl4324 { - yyb4324 = yyj4324 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4324 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4324 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv4327 := &x.ObjectMeta - yyv4327.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj4324++ - if yyhl4324 { - yyb4324 = yyj4324 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4324 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4324 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -54130,26 +63120,26 @@ func (x *ConfigMap) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Data = nil } else { - yyv4328 := &x.Data - yym4329 := z.DecBinary() - _ = yym4329 + yyv19 := &x.Data + yym20 := z.DecBinary() + _ = yym20 if false { } else { - z.F.DecMapStringStringX(yyv4328, false, d) + z.F.DecMapStringStringX(yyv19, false, d) } } for { - yyj4324++ - if yyhl4324 { - yyb4324 = yyj4324 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4324 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4324 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4324-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -54161,37 +63151,37 @@ func (x *ConfigMapList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4330 := z.EncBinary() - _ = yym4330 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4331 := !z.EncBinary() - yy2arr4331 := z.EncBasicHandle().StructToArray - var yyq4331 [4]bool - _, _, _ = yysep4331, yyq4331, yy2arr4331 - const yyr4331 bool = false - yyq4331[0] = x.Kind != "" - yyq4331[1] = x.APIVersion != "" - yyq4331[2] = true - var yynn4331 int - if yyr4331 || yy2arr4331 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn4331 = 1 - for _, b := range yyq4331 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn4331++ + yynn2++ } } - r.EncodeMapStart(yynn4331) - yynn4331 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4331 || yy2arr4331 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4331[0] { - yym4333 := z.EncBinary() - _ = yym4333 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -54200,23 +63190,23 @@ func (x *ConfigMapList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4331[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4334 := z.EncBinary() - _ = yym4334 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr4331 || yy2arr4331 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4331[1] { - yym4336 := z.EncBinary() - _ = yym4336 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -54225,54 +63215,54 @@ func (x *ConfigMapList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4331[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4337 := z.EncBinary() - _ = yym4337 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr4331 || yy2arr4331 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4331[2] { - yy4339 := &x.ListMeta - yym4340 := z.EncBinary() - _ = yym4340 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy4339) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy4339) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq4331[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4341 := &x.ListMeta - yym4342 := z.EncBinary() - _ = yym4342 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy4341) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy4341) + z.EncFallback(yy12) } } } - if yyr4331 || yy2arr4331 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym4344 := z.EncBinary() - _ = yym4344 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceConfigMap(([]ConfigMap)(x.Items), e) @@ -54285,15 +63275,15 @@ func (x *ConfigMapList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym4345 := z.EncBinary() - _ = yym4345 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceConfigMap(([]ConfigMap)(x.Items), e) } } } - if yyr4331 || yy2arr4331 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -54306,25 +63296,25 @@ func (x *ConfigMapList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4346 := z.DecBinary() - _ = yym4346 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4347 := r.ContainerType() - if yyct4347 == codecSelferValueTypeMap1234 { - yyl4347 := r.ReadMapStart() - if yyl4347 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4347, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4347 == codecSelferValueTypeArray1234 { - yyl4347 := r.ReadArrayStart() - if yyl4347 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4347, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -54336,12 +63326,12 @@ func (x *ConfigMapList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4348Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4348Slc - var yyhl4348 bool = l >= 0 - for yyj4348 := 0; ; yyj4348++ { - if yyhl4348 { - if yyj4348 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -54350,51 +63340,63 @@ func (x *ConfigMapList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4348Slc = r.DecodeBytes(yys4348Slc, true, true) - yys4348 := string(yys4348Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4348 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv4351 := &x.ListMeta - yym4352 := z.DecBinary() - _ = yym4352 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv4351) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv4351, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv4353 := &x.Items - yym4354 := z.DecBinary() - _ = yym4354 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceConfigMap((*[]ConfigMap)(yyv4353), d) + h.decSliceConfigMap((*[]ConfigMap)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys4348) - } // end switch yys4348 - } // end for yyj4348 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -54402,16 +63404,16 @@ func (x *ConfigMapList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4355 int - var yyb4355 bool - var yyhl4355 bool = l >= 0 - yyj4355++ - if yyhl4355 { - yyb4355 = yyj4355 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4355 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4355 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -54419,15 +63421,21 @@ func (x *ConfigMapList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj4355++ - if yyhl4355 { - yyb4355 = yyj4355 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4355 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4355 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -54435,38 +63443,44 @@ func (x *ConfigMapList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj4355++ - if yyhl4355 { - yyb4355 = yyj4355 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4355 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4355 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv4358 := &x.ListMeta - yym4359 := z.DecBinary() - _ = yym4359 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv4358) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv4358, false) + z.DecFallback(yyv17, false) } } - yyj4355++ - if yyhl4355 { - yyb4355 = yyj4355 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4355 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4355 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -54474,26 +63488,26 @@ func (x *ConfigMapList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Items = nil } else { - yyv4360 := &x.Items - yym4361 := z.DecBinary() - _ = yym4361 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceConfigMap((*[]ConfigMap)(yyv4360), d) + h.decSliceConfigMap((*[]ConfigMap)(yyv19), d) } } for { - yyj4355++ - if yyhl4355 { - yyb4355 = yyj4355 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4355 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4355 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4355-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -54502,8 +63516,8 @@ func (x ComponentConditionType) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym4362 := z.EncBinary() - _ = yym4362 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -54515,8 +63529,8 @@ func (x *ComponentConditionType) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4363 := z.DecBinary() - _ = yym4363 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -54531,32 +63545,32 @@ func (x *ComponentCondition) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4364 := z.EncBinary() - _ = yym4364 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4365 := !z.EncBinary() - yy2arr4365 := z.EncBasicHandle().StructToArray - var yyq4365 [4]bool - _, _, _ = yysep4365, yyq4365, yy2arr4365 - const yyr4365 bool = false - yyq4365[2] = x.Message != "" - yyq4365[3] = x.Error != "" - var yynn4365 int - if yyr4365 || yy2arr4365 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.Message != "" + yyq2[3] = x.Error != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn4365 = 2 - for _, b := range yyq4365 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn4365++ + yynn2++ } } - r.EncodeMapStart(yynn4365) - yynn4365 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4365 || yy2arr4365 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) x.Type.CodecEncodeSelf(e) } else { @@ -54565,7 +63579,7 @@ func (x *ComponentCondition) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Type.CodecEncodeSelf(e) } - if yyr4365 || yy2arr4365 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) x.Status.CodecEncodeSelf(e) } else { @@ -54574,11 +63588,11 @@ func (x *ComponentCondition) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Status.CodecEncodeSelf(e) } - if yyr4365 || yy2arr4365 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4365[2] { - yym4369 := z.EncBinary() - _ = yym4369 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) @@ -54587,23 +63601,23 @@ func (x *ComponentCondition) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4365[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("message")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4370 := z.EncBinary() - _ = yym4370 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) } } } - if yyr4365 || yy2arr4365 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4365[3] { - yym4372 := z.EncBinary() - _ = yym4372 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Error)) @@ -54612,19 +63626,19 @@ func (x *ComponentCondition) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4365[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("error")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4373 := z.EncBinary() - _ = yym4373 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Error)) } } } - if yyr4365 || yy2arr4365 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -54637,25 +63651,25 @@ func (x *ComponentCondition) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4374 := z.DecBinary() - _ = yym4374 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4375 := r.ContainerType() - if yyct4375 == codecSelferValueTypeMap1234 { - yyl4375 := r.ReadMapStart() - if yyl4375 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4375, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4375 == codecSelferValueTypeArray1234 { - yyl4375 := r.ReadArrayStart() - if yyl4375 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4375, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -54667,12 +63681,12 @@ func (x *ComponentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4376Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4376Slc - var yyhl4376 bool = l >= 0 - for yyj4376 := 0; ; yyj4376++ { - if yyhl4376 { - if yyj4376 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -54681,38 +63695,52 @@ func (x *ComponentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4376Slc = r.DecodeBytes(yys4376Slc, true, true) - yys4376 := string(yys4376Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4376 { + switch yys3 { case "type": if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = ComponentConditionType(r.DecodeString()) + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = "" } else { - x.Status = ConditionStatus(r.DecodeString()) + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) } case "message": if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv6 := &x.Message + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "error": if r.TryDecodeAsNil() { x.Error = "" } else { - x.Error = string(r.DecodeString()) + yyv8 := &x.Error + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys4376) - } // end switch yys4376 - } // end for yyj4376 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -54720,16 +63748,16 @@ func (x *ComponentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4381 int - var yyb4381 bool - var yyhl4381 bool = l >= 0 - yyj4381++ - if yyhl4381 { - yyb4381 = yyj4381 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb4381 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb4381 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -54737,15 +63765,16 @@ func (x *ComponentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = ComponentConditionType(r.DecodeString()) + yyv11 := &x.Type + yyv11.CodecDecodeSelf(d) } - yyj4381++ - if yyhl4381 { - yyb4381 = yyj4381 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb4381 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb4381 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -54753,15 +63782,16 @@ func (x *ComponentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Status = "" } else { - x.Status = ConditionStatus(r.DecodeString()) + yyv12 := &x.Status + yyv12.CodecDecodeSelf(d) } - yyj4381++ - if yyhl4381 { - yyb4381 = yyj4381 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb4381 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb4381 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -54769,15 +63799,21 @@ func (x *ComponentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv13 := &x.Message + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj4381++ - if yyhl4381 { - yyb4381 = yyj4381 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb4381 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb4381 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -54785,20 +63821,26 @@ func (x *ComponentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Error = "" } else { - x.Error = string(r.DecodeString()) + yyv15 := &x.Error + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } for { - yyj4381++ - if yyhl4381 { - yyb4381 = yyj4381 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb4381 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb4381 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4381-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -54810,38 +63852,38 @@ func (x *ComponentStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4386 := z.EncBinary() - _ = yym4386 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4387 := !z.EncBinary() - yy2arr4387 := z.EncBasicHandle().StructToArray - var yyq4387 [4]bool - _, _, _ = yysep4387, yyq4387, yy2arr4387 - const yyr4387 bool = false - yyq4387[0] = x.Kind != "" - yyq4387[1] = x.APIVersion != "" - yyq4387[2] = true - yyq4387[3] = len(x.Conditions) != 0 - var yynn4387 int - if yyr4387 || yy2arr4387 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = len(x.Conditions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn4387 = 0 - for _, b := range yyq4387 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn4387++ + yynn2++ } } - r.EncodeMapStart(yynn4387) - yynn4387 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4387 || yy2arr4387 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4387[0] { - yym4389 := z.EncBinary() - _ = yym4389 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -54850,23 +63892,23 @@ func (x *ComponentStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4387[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4390 := z.EncBinary() - _ = yym4390 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr4387 || yy2arr4387 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4387[1] { - yym4392 := z.EncBinary() - _ = yym4392 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -54875,43 +63917,55 @@ func (x *ComponentStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4387[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4393 := z.EncBinary() - _ = yym4393 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr4387 || yy2arr4387 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4387[2] { - yy4395 := &x.ObjectMeta - yy4395.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq4387[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4396 := &x.ObjectMeta - yy4396.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr4387 || yy2arr4387 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4387[3] { + if yyq2[3] { if x.Conditions == nil { r.EncodeNil() } else { - yym4398 := z.EncBinary() - _ = yym4398 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e) @@ -54921,15 +63975,15 @@ func (x *ComponentStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq4387[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("conditions")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Conditions == nil { r.EncodeNil() } else { - yym4399 := z.EncBinary() - _ = yym4399 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e) @@ -54937,7 +63991,7 @@ func (x *ComponentStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr4387 || yy2arr4387 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -54950,25 +64004,25 @@ func (x *ComponentStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4400 := z.DecBinary() - _ = yym4400 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4401 := r.ContainerType() - if yyct4401 == codecSelferValueTypeMap1234 { - yyl4401 := r.ReadMapStart() - if yyl4401 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4401, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4401 == codecSelferValueTypeArray1234 { - yyl4401 := r.ReadArrayStart() - if yyl4401 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4401, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -54980,12 +64034,12 @@ func (x *ComponentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4402Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4402Slc - var yyhl4402 bool = l >= 0 - for yyj4402 := 0; ; yyj4402++ { - if yyhl4402 { - if yyj4402 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -54994,45 +64048,63 @@ func (x *ComponentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4402Slc = r.DecodeBytes(yys4402Slc, true, true) - yys4402 := string(yys4402Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4402 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv4405 := &x.ObjectMeta - yyv4405.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "conditions": if r.TryDecodeAsNil() { x.Conditions = nil } else { - yyv4406 := &x.Conditions - yym4407 := z.DecBinary() - _ = yym4407 + yyv10 := &x.Conditions + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceComponentCondition((*[]ComponentCondition)(yyv4406), d) + h.decSliceComponentCondition((*[]ComponentCondition)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys4402) - } // end switch yys4402 - } // end for yyj4402 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -55040,16 +64112,16 @@ func (x *ComponentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4408 int - var yyb4408 bool - var yyhl4408 bool = l >= 0 - yyj4408++ - if yyhl4408 { - yyb4408 = yyj4408 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4408 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4408 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -55057,15 +64129,21 @@ func (x *ComponentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj4408++ - if yyhl4408 { - yyb4408 = yyj4408 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4408 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4408 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -55073,32 +64151,44 @@ func (x *ComponentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj4408++ - if yyhl4408 { - yyb4408 = yyj4408 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4408 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4408 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv4411 := &x.ObjectMeta - yyv4411.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj4408++ - if yyhl4408 { - yyb4408 = yyj4408 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4408 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4408 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -55106,26 +64196,26 @@ func (x *ComponentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Conditions = nil } else { - yyv4412 := &x.Conditions - yym4413 := z.DecBinary() - _ = yym4413 + yyv19 := &x.Conditions + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceComponentCondition((*[]ComponentCondition)(yyv4412), d) + h.decSliceComponentCondition((*[]ComponentCondition)(yyv19), d) } } for { - yyj4408++ - if yyhl4408 { - yyb4408 = yyj4408 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4408 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4408 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4408-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -55137,37 +64227,37 @@ func (x *ComponentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4414 := z.EncBinary() - _ = yym4414 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4415 := !z.EncBinary() - yy2arr4415 := z.EncBasicHandle().StructToArray - var yyq4415 [4]bool - _, _, _ = yysep4415, yyq4415, yy2arr4415 - const yyr4415 bool = false - yyq4415[0] = x.Kind != "" - yyq4415[1] = x.APIVersion != "" - yyq4415[2] = true - var yynn4415 int - if yyr4415 || yy2arr4415 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn4415 = 1 - for _, b := range yyq4415 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn4415++ + yynn2++ } } - r.EncodeMapStart(yynn4415) - yynn4415 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4415 || yy2arr4415 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4415[0] { - yym4417 := z.EncBinary() - _ = yym4417 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -55176,23 +64266,23 @@ func (x *ComponentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4415[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4418 := z.EncBinary() - _ = yym4418 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr4415 || yy2arr4415 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4415[1] { - yym4420 := z.EncBinary() - _ = yym4420 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -55201,54 +64291,54 @@ func (x *ComponentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4415[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4421 := z.EncBinary() - _ = yym4421 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr4415 || yy2arr4415 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4415[2] { - yy4423 := &x.ListMeta - yym4424 := z.EncBinary() - _ = yym4424 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy4423) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy4423) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq4415[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4425 := &x.ListMeta - yym4426 := z.EncBinary() - _ = yym4426 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy4425) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy4425) + z.EncFallback(yy12) } } } - if yyr4415 || yy2arr4415 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym4428 := z.EncBinary() - _ = yym4428 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e) @@ -55261,15 +64351,15 @@ func (x *ComponentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym4429 := z.EncBinary() - _ = yym4429 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e) } } } - if yyr4415 || yy2arr4415 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -55282,25 +64372,25 @@ func (x *ComponentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4430 := z.DecBinary() - _ = yym4430 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4431 := r.ContainerType() - if yyct4431 == codecSelferValueTypeMap1234 { - yyl4431 := r.ReadMapStart() - if yyl4431 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4431, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4431 == codecSelferValueTypeArray1234 { - yyl4431 := r.ReadArrayStart() - if yyl4431 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4431, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -55312,12 +64402,12 @@ func (x *ComponentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4432Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4432Slc - var yyhl4432 bool = l >= 0 - for yyj4432 := 0; ; yyj4432++ { - if yyhl4432 { - if yyj4432 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -55326,51 +64416,63 @@ func (x *ComponentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4432Slc = r.DecodeBytes(yys4432Slc, true, true) - yys4432 := string(yys4432Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4432 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv4435 := &x.ListMeta - yym4436 := z.DecBinary() - _ = yym4436 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv4435) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv4435, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv4437 := &x.Items - yym4438 := z.DecBinary() - _ = yym4438 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceComponentStatus((*[]ComponentStatus)(yyv4437), d) + h.decSliceComponentStatus((*[]ComponentStatus)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys4432) - } // end switch yys4432 - } // end for yyj4432 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -55378,16 +64480,16 @@ func (x *ComponentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4439 int - var yyb4439 bool - var yyhl4439 bool = l >= 0 - yyj4439++ - if yyhl4439 { - yyb4439 = yyj4439 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4439 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4439 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -55395,15 +64497,21 @@ func (x *ComponentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj4439++ - if yyhl4439 { - yyb4439 = yyj4439 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4439 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4439 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -55411,38 +64519,44 @@ func (x *ComponentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj4439++ - if yyhl4439 { - yyb4439 = yyj4439 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4439 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4439 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv4442 := &x.ListMeta - yym4443 := z.DecBinary() - _ = yym4443 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv4442) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv4442, false) + z.DecFallback(yyv17, false) } } - yyj4439++ - if yyhl4439 { - yyb4439 = yyj4439 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4439 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4439 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -55450,26 +64564,26 @@ func (x *ComponentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Items = nil } else { - yyv4444 := &x.Items - yym4445 := z.DecBinary() - _ = yym4445 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceComponentStatus((*[]ComponentStatus)(yyv4444), d) + h.decSliceComponentStatus((*[]ComponentStatus)(yyv19), d) } } for { - yyj4439++ - if yyhl4439 { - yyb4439 = yyj4439 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4439 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4439 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4439-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -55481,39 +64595,39 @@ func (x *DownwardAPIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4446 := z.EncBinary() - _ = yym4446 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4447 := !z.EncBinary() - yy2arr4447 := z.EncBasicHandle().StructToArray - var yyq4447 [2]bool - _, _, _ = yysep4447, yyq4447, yy2arr4447 - const yyr4447 bool = false - yyq4447[0] = len(x.Items) != 0 - yyq4447[1] = x.DefaultMode != nil - var yynn4447 int - if yyr4447 || yy2arr4447 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Items) != 0 + yyq2[1] = x.DefaultMode != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn4447 = 0 - for _, b := range yyq4447 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn4447++ + yynn2++ } } - r.EncodeMapStart(yynn4447) - yynn4447 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4447 || yy2arr4447 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4447[0] { + if yyq2[0] { if x.Items == nil { r.EncodeNil() } else { - yym4449 := z.EncBinary() - _ = yym4449 + yym4 := z.EncBinary() + _ = yym4 if false { } else { h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) @@ -55523,15 +64637,15 @@ func (x *DownwardAPIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq4447[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("items")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Items == nil { r.EncodeNil() } else { - yym4450 := z.EncBinary() - _ = yym4450 + yym5 := z.EncBinary() + _ = yym5 if false { } else { h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) @@ -55539,42 +64653,42 @@ func (x *DownwardAPIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr4447 || yy2arr4447 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4447[1] { + if yyq2[1] { if x.DefaultMode == nil { r.EncodeNil() } else { - yy4452 := *x.DefaultMode - yym4453 := z.EncBinary() - _ = yym4453 + yy7 := *x.DefaultMode + yym8 := z.EncBinary() + _ = yym8 if false { } else { - r.EncodeInt(int64(yy4452)) + r.EncodeInt(int64(yy7)) } } } else { r.EncodeNil() } } else { - if yyq4447[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.DefaultMode == nil { r.EncodeNil() } else { - yy4454 := *x.DefaultMode - yym4455 := z.EncBinary() - _ = yym4455 + yy9 := *x.DefaultMode + yym10 := z.EncBinary() + _ = yym10 if false { } else { - r.EncodeInt(int64(yy4454)) + r.EncodeInt(int64(yy9)) } } } } - if yyr4447 || yy2arr4447 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -55587,25 +64701,25 @@ func (x *DownwardAPIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4456 := z.DecBinary() - _ = yym4456 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4457 := r.ContainerType() - if yyct4457 == codecSelferValueTypeMap1234 { - yyl4457 := r.ReadMapStart() - if yyl4457 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4457, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4457 == codecSelferValueTypeArray1234 { - yyl4457 := r.ReadArrayStart() - if yyl4457 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4457, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -55617,12 +64731,12 @@ func (x *DownwardAPIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4458Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4458Slc - var yyhl4458 bool = l >= 0 - for yyj4458 := 0; ; yyj4458++ { - if yyhl4458 { - if yyj4458 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -55631,20 +64745,20 @@ func (x *DownwardAPIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Dec } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4458Slc = r.DecodeBytes(yys4458Slc, true, true) - yys4458 := string(yys4458Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4458 { + switch yys3 { case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv4459 := &x.Items - yym4460 := z.DecBinary() - _ = yym4460 + yyv4 := &x.Items + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv4459), d) + h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv4), d) } } case "defaultMode": @@ -55656,17 +64770,17 @@ func (x *DownwardAPIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Dec if x.DefaultMode == nil { x.DefaultMode = new(int32) } - yym4462 := z.DecBinary() - _ = yym4462 + yym7 := z.DecBinary() + _ = yym7 if false { } else { *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) } } default: - z.DecStructFieldNotFound(-1, yys4458) - } // end switch yys4458 - } // end for yyj4458 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -55674,16 +64788,16 @@ func (x *DownwardAPIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.D var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4463 int - var yyb4463 bool - var yyhl4463 bool = l >= 0 - yyj4463++ - if yyhl4463 { - yyb4463 = yyj4463 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb4463 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb4463 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -55691,21 +64805,21 @@ func (x *DownwardAPIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Items = nil } else { - yyv4464 := &x.Items - yym4465 := z.DecBinary() - _ = yym4465 + yyv9 := &x.Items + yym10 := z.DecBinary() + _ = yym10 if false { } else { - h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv4464), d) + h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv9), d) } } - yyj4463++ - if yyhl4463 { - yyb4463 = yyj4463 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb4463 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb4463 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -55718,25 +64832,25 @@ func (x *DownwardAPIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.D if x.DefaultMode == nil { x.DefaultMode = new(int32) } - yym4467 := z.DecBinary() - _ = yym4467 + yym12 := z.DecBinary() + _ = yym12 if false { } else { *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) } } for { - yyj4463++ - if yyhl4463 { - yyb4463 = yyj4463 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb4463 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb4463 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4463-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -55748,36 +64862,36 @@ func (x *DownwardAPIVolumeFile) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4468 := z.EncBinary() - _ = yym4468 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4469 := !z.EncBinary() - yy2arr4469 := z.EncBasicHandle().StructToArray - var yyq4469 [4]bool - _, _, _ = yysep4469, yyq4469, yy2arr4469 - const yyr4469 bool = false - yyq4469[1] = x.FieldRef != nil - yyq4469[2] = x.ResourceFieldRef != nil - yyq4469[3] = x.Mode != nil - var yynn4469 int - if yyr4469 || yy2arr4469 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FieldRef != nil + yyq2[2] = x.ResourceFieldRef != nil + yyq2[3] = x.Mode != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn4469 = 1 - for _, b := range yyq4469 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn4469++ + yynn2++ } } - r.EncodeMapStart(yynn4469) - yynn4469 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4469 || yy2arr4469 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4471 := z.EncBinary() - _ = yym4471 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) @@ -55786,16 +64900,16 @@ func (x *DownwardAPIVolumeFile) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("path")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4472 := z.EncBinary() - _ = yym4472 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) } } - if yyr4469 || yy2arr4469 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4469[1] { + if yyq2[1] { if x.FieldRef == nil { r.EncodeNil() } else { @@ -55805,7 +64919,7 @@ func (x *DownwardAPIVolumeFile) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq4469[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -55816,9 +64930,9 @@ func (x *DownwardAPIVolumeFile) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr4469 || yy2arr4469 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4469[2] { + if yyq2[2] { if x.ResourceFieldRef == nil { r.EncodeNil() } else { @@ -55828,7 +64942,7 @@ func (x *DownwardAPIVolumeFile) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq4469[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -55839,42 +64953,42 @@ func (x *DownwardAPIVolumeFile) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr4469 || yy2arr4469 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4469[3] { + if yyq2[3] { if x.Mode == nil { r.EncodeNil() } else { - yy4476 := *x.Mode - yym4477 := z.EncBinary() - _ = yym4477 + yy13 := *x.Mode + yym14 := z.EncBinary() + _ = yym14 if false { } else { - r.EncodeInt(int64(yy4476)) + r.EncodeInt(int64(yy13)) } } } else { r.EncodeNil() } } else { - if yyq4469[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("mode")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Mode == nil { r.EncodeNil() } else { - yy4478 := *x.Mode - yym4479 := z.EncBinary() - _ = yym4479 + yy15 := *x.Mode + yym16 := z.EncBinary() + _ = yym16 if false { } else { - r.EncodeInt(int64(yy4478)) + r.EncodeInt(int64(yy15)) } } } } - if yyr4469 || yy2arr4469 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -55887,25 +65001,25 @@ func (x *DownwardAPIVolumeFile) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4480 := z.DecBinary() - _ = yym4480 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4481 := r.ContainerType() - if yyct4481 == codecSelferValueTypeMap1234 { - yyl4481 := r.ReadMapStart() - if yyl4481 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4481, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4481 == codecSelferValueTypeArray1234 { - yyl4481 := r.ReadArrayStart() - if yyl4481 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4481, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -55917,12 +65031,12 @@ func (x *DownwardAPIVolumeFile) codecDecodeSelfFromMap(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4482Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4482Slc - var yyhl4482 bool = l >= 0 - for yyj4482 := 0; ; yyj4482++ { - if yyhl4482 { - if yyj4482 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -55931,15 +65045,21 @@ func (x *DownwardAPIVolumeFile) codecDecodeSelfFromMap(l int, d *codec1978.Decod } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4482Slc = r.DecodeBytes(yys4482Slc, true, true) - yys4482 := string(yys4482Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4482 { + switch yys3 { case "path": if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "fieldRef": if r.TryDecodeAsNil() { @@ -55972,17 +65092,17 @@ func (x *DownwardAPIVolumeFile) codecDecodeSelfFromMap(l int, d *codec1978.Decod if x.Mode == nil { x.Mode = new(int32) } - yym4487 := z.DecBinary() - _ = yym4487 + yym9 := z.DecBinary() + _ = yym9 if false { } else { *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) } } default: - z.DecStructFieldNotFound(-1, yys4482) - } // end switch yys4482 - } // end for yyj4482 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -55990,16 +65110,16 @@ func (x *DownwardAPIVolumeFile) codecDecodeSelfFromArray(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4488 int - var yyb4488 bool - var yyhl4488 bool = l >= 0 - yyj4488++ - if yyhl4488 { - yyb4488 = yyj4488 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb4488 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb4488 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -56007,15 +65127,21 @@ func (x *DownwardAPIVolumeFile) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv11 := &x.Path + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } - yyj4488++ - if yyhl4488 { - yyb4488 = yyj4488 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb4488 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb4488 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -56030,13 +65156,13 @@ func (x *DownwardAPIVolumeFile) codecDecodeSelfFromArray(l int, d *codec1978.Dec } x.FieldRef.CodecDecodeSelf(d) } - yyj4488++ - if yyhl4488 { - yyb4488 = yyj4488 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb4488 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb4488 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -56051,13 +65177,13 @@ func (x *DownwardAPIVolumeFile) codecDecodeSelfFromArray(l int, d *codec1978.Dec } x.ResourceFieldRef.CodecDecodeSelf(d) } - yyj4488++ - if yyhl4488 { - yyb4488 = yyj4488 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb4488 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb4488 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -56070,25 +65196,214 @@ func (x *DownwardAPIVolumeFile) codecDecodeSelfFromArray(l int, d *codec1978.Dec if x.Mode == nil { x.Mode = new(int32) } - yym4493 := z.DecBinary() - _ = yym4493 + yym16 := z.DecBinary() + _ = yym16 if false { } else { *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) } } for { - yyj4488++ - if yyhl4488 { - yyb4488 = yyj4488 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb4488 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb4488 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4488-1, "") + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DownwardAPIProjection) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Items) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Items == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DownwardAPIProjection) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DownwardAPIProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv4 := &x.Items + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DownwardAPIProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv7 := &x.Items + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -56100,38 +65415,38 @@ func (x *SecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4494 := z.EncBinary() - _ = yym4494 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4495 := !z.EncBinary() - yy2arr4495 := z.EncBasicHandle().StructToArray - var yyq4495 [6]bool - _, _, _ = yysep4495, yyq4495, yy2arr4495 - const yyr4495 bool = false - yyq4495[0] = x.Capabilities != nil - yyq4495[1] = x.Privileged != nil - yyq4495[2] = x.SELinuxOptions != nil - yyq4495[3] = x.RunAsUser != nil - yyq4495[4] = x.RunAsNonRoot != nil - yyq4495[5] = x.ReadOnlyRootFilesystem != nil - var yynn4495 int - if yyr4495 || yy2arr4495 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Capabilities != nil + yyq2[1] = x.Privileged != nil + yyq2[2] = x.SELinuxOptions != nil + yyq2[3] = x.RunAsUser != nil + yyq2[4] = x.RunAsNonRoot != nil + yyq2[5] = x.ReadOnlyRootFilesystem != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(6) } else { - yynn4495 = 0 - for _, b := range yyq4495 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn4495++ + yynn2++ } } - r.EncodeMapStart(yynn4495) - yynn4495 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4495 || yy2arr4495 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4495[0] { + if yyq2[0] { if x.Capabilities == nil { r.EncodeNil() } else { @@ -56141,7 +65456,7 @@ func (x *SecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq4495[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("capabilities")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -56152,44 +65467,44 @@ func (x *SecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr4495 || yy2arr4495 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4495[1] { + if yyq2[1] { if x.Privileged == nil { r.EncodeNil() } else { - yy4498 := *x.Privileged - yym4499 := z.EncBinary() - _ = yym4499 + yy7 := *x.Privileged + yym8 := z.EncBinary() + _ = yym8 if false { } else { - r.EncodeBool(bool(yy4498)) + r.EncodeBool(bool(yy7)) } } } else { r.EncodeNil() } } else { - if yyq4495[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("privileged")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Privileged == nil { r.EncodeNil() } else { - yy4500 := *x.Privileged - yym4501 := z.EncBinary() - _ = yym4501 + yy9 := *x.Privileged + yym10 := z.EncBinary() + _ = yym10 if false { } else { - r.EncodeBool(bool(yy4500)) + r.EncodeBool(bool(yy9)) } } } } - if yyr4495 || yy2arr4495 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4495[2] { + if yyq2[2] { if x.SELinuxOptions == nil { r.EncodeNil() } else { @@ -56199,7 +65514,7 @@ func (x *SecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq4495[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -56210,112 +65525,112 @@ func (x *SecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr4495 || yy2arr4495 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4495[3] { + if yyq2[3] { if x.RunAsUser == nil { r.EncodeNil() } else { - yy4504 := *x.RunAsUser - yym4505 := z.EncBinary() - _ = yym4505 + yy15 := *x.RunAsUser + yym16 := z.EncBinary() + _ = yym16 if false { } else { - r.EncodeInt(int64(yy4504)) + r.EncodeInt(int64(yy15)) } } } else { r.EncodeNil() } } else { - if yyq4495[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.RunAsUser == nil { r.EncodeNil() } else { - yy4506 := *x.RunAsUser - yym4507 := z.EncBinary() - _ = yym4507 + yy17 := *x.RunAsUser + yym18 := z.EncBinary() + _ = yym18 if false { } else { - r.EncodeInt(int64(yy4506)) + r.EncodeInt(int64(yy17)) } } } } - if yyr4495 || yy2arr4495 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4495[4] { + if yyq2[4] { if x.RunAsNonRoot == nil { r.EncodeNil() } else { - yy4509 := *x.RunAsNonRoot - yym4510 := z.EncBinary() - _ = yym4510 + yy20 := *x.RunAsNonRoot + yym21 := z.EncBinary() + _ = yym21 if false { } else { - r.EncodeBool(bool(yy4509)) + r.EncodeBool(bool(yy20)) } } } else { r.EncodeNil() } } else { - if yyq4495[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.RunAsNonRoot == nil { r.EncodeNil() } else { - yy4511 := *x.RunAsNonRoot - yym4512 := z.EncBinary() - _ = yym4512 + yy22 := *x.RunAsNonRoot + yym23 := z.EncBinary() + _ = yym23 if false { } else { - r.EncodeBool(bool(yy4511)) + r.EncodeBool(bool(yy22)) } } } } - if yyr4495 || yy2arr4495 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4495[5] { + if yyq2[5] { if x.ReadOnlyRootFilesystem == nil { r.EncodeNil() } else { - yy4514 := *x.ReadOnlyRootFilesystem - yym4515 := z.EncBinary() - _ = yym4515 + yy25 := *x.ReadOnlyRootFilesystem + yym26 := z.EncBinary() + _ = yym26 if false { } else { - r.EncodeBool(bool(yy4514)) + r.EncodeBool(bool(yy25)) } } } else { r.EncodeNil() } } else { - if yyq4495[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.ReadOnlyRootFilesystem == nil { r.EncodeNil() } else { - yy4516 := *x.ReadOnlyRootFilesystem - yym4517 := z.EncBinary() - _ = yym4517 + yy27 := *x.ReadOnlyRootFilesystem + yym28 := z.EncBinary() + _ = yym28 if false { } else { - r.EncodeBool(bool(yy4516)) + r.EncodeBool(bool(yy27)) } } } } - if yyr4495 || yy2arr4495 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -56328,25 +65643,25 @@ func (x *SecurityContext) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4518 := z.DecBinary() - _ = yym4518 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4519 := r.ContainerType() - if yyct4519 == codecSelferValueTypeMap1234 { - yyl4519 := r.ReadMapStart() - if yyl4519 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4519, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4519 == codecSelferValueTypeArray1234 { - yyl4519 := r.ReadArrayStart() - if yyl4519 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4519, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -56358,12 +65673,12 @@ func (x *SecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4520Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4520Slc - var yyhl4520 bool = l >= 0 - for yyj4520 := 0; ; yyj4520++ { - if yyhl4520 { - if yyj4520 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -56372,10 +65687,10 @@ func (x *SecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4520Slc = r.DecodeBytes(yys4520Slc, true, true) - yys4520 := string(yys4520Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4520 { + switch yys3 { case "capabilities": if r.TryDecodeAsNil() { if x.Capabilities != nil { @@ -56396,8 +65711,8 @@ func (x *SecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.Privileged == nil { x.Privileged = new(bool) } - yym4523 := z.DecBinary() - _ = yym4523 + yym6 := z.DecBinary() + _ = yym6 if false { } else { *((*bool)(x.Privileged)) = r.DecodeBool() @@ -56423,8 +65738,8 @@ func (x *SecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.RunAsUser == nil { x.RunAsUser = new(int64) } - yym4526 := z.DecBinary() - _ = yym4526 + yym9 := z.DecBinary() + _ = yym9 if false { } else { *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) @@ -56439,8 +65754,8 @@ func (x *SecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.RunAsNonRoot == nil { x.RunAsNonRoot = new(bool) } - yym4528 := z.DecBinary() - _ = yym4528 + yym11 := z.DecBinary() + _ = yym11 if false { } else { *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() @@ -56455,17 +65770,17 @@ func (x *SecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.ReadOnlyRootFilesystem == nil { x.ReadOnlyRootFilesystem = new(bool) } - yym4530 := z.DecBinary() - _ = yym4530 + yym13 := z.DecBinary() + _ = yym13 if false { } else { *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool() } } default: - z.DecStructFieldNotFound(-1, yys4520) - } // end switch yys4520 - } // end for yyj4520 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -56473,16 +65788,16 @@ func (x *SecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4531 int - var yyb4531 bool - var yyhl4531 bool = l >= 0 - yyj4531++ - if yyhl4531 { - yyb4531 = yyj4531 > l + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb4531 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb4531 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -56497,13 +65812,13 @@ func (x *SecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) } x.Capabilities.CodecDecodeSelf(d) } - yyj4531++ - if yyhl4531 { - yyb4531 = yyj4531 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb4531 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb4531 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -56516,20 +65831,20 @@ func (x *SecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if x.Privileged == nil { x.Privileged = new(bool) } - yym4534 := z.DecBinary() - _ = yym4534 + yym17 := z.DecBinary() + _ = yym17 if false { } else { *((*bool)(x.Privileged)) = r.DecodeBool() } } - yyj4531++ - if yyhl4531 { - yyb4531 = yyj4531 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb4531 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb4531 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -56544,13 +65859,13 @@ func (x *SecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) } x.SELinuxOptions.CodecDecodeSelf(d) } - yyj4531++ - if yyhl4531 { - yyb4531 = yyj4531 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb4531 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb4531 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -56563,20 +65878,20 @@ func (x *SecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if x.RunAsUser == nil { x.RunAsUser = new(int64) } - yym4537 := z.DecBinary() - _ = yym4537 + yym20 := z.DecBinary() + _ = yym20 if false { } else { *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) } } - yyj4531++ - if yyhl4531 { - yyb4531 = yyj4531 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb4531 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb4531 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -56589,20 +65904,20 @@ func (x *SecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if x.RunAsNonRoot == nil { x.RunAsNonRoot = new(bool) } - yym4539 := z.DecBinary() - _ = yym4539 + yym22 := z.DecBinary() + _ = yym22 if false { } else { *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() } } - yyj4531++ - if yyhl4531 { - yyb4531 = yyj4531 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb4531 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb4531 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -56615,25 +65930,25 @@ func (x *SecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if x.ReadOnlyRootFilesystem == nil { x.ReadOnlyRootFilesystem = new(bool) } - yym4541 := z.DecBinary() - _ = yym4541 + yym24 := z.DecBinary() + _ = yym24 if false { } else { *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool() } } for { - yyj4531++ - if yyhl4531 { - yyb4531 = yyj4531 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb4531 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb4531 { + if yyb14 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4531-1, "") + z.DecStructFieldNotFound(yyj14-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -56645,38 +65960,38 @@ func (x *SELinuxOptions) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4542 := z.EncBinary() - _ = yym4542 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4543 := !z.EncBinary() - yy2arr4543 := z.EncBasicHandle().StructToArray - var yyq4543 [4]bool - _, _, _ = yysep4543, yyq4543, yy2arr4543 - const yyr4543 bool = false - yyq4543[0] = x.User != "" - yyq4543[1] = x.Role != "" - yyq4543[2] = x.Type != "" - yyq4543[3] = x.Level != "" - var yynn4543 int - if yyr4543 || yy2arr4543 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.User != "" + yyq2[1] = x.Role != "" + yyq2[2] = x.Type != "" + yyq2[3] = x.Level != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn4543 = 0 - for _, b := range yyq4543 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn4543++ + yynn2++ } } - r.EncodeMapStart(yynn4543) - yynn4543 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4543 || yy2arr4543 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4543[0] { - yym4545 := z.EncBinary() - _ = yym4545 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.User)) @@ -56685,23 +66000,23 @@ func (x *SELinuxOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4543[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("user")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4546 := z.EncBinary() - _ = yym4546 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.User)) } } } - if yyr4543 || yy2arr4543 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4543[1] { - yym4548 := z.EncBinary() - _ = yym4548 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Role)) @@ -56710,23 +66025,23 @@ func (x *SELinuxOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4543[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("role")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4549 := z.EncBinary() - _ = yym4549 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Role)) } } } - if yyr4543 || yy2arr4543 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4543[2] { - yym4551 := z.EncBinary() - _ = yym4551 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Type)) @@ -56735,23 +66050,23 @@ func (x *SELinuxOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4543[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("type")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4552 := z.EncBinary() - _ = yym4552 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Type)) } } } - if yyr4543 || yy2arr4543 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4543[3] { - yym4554 := z.EncBinary() - _ = yym4554 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Level)) @@ -56760,19 +66075,19 @@ func (x *SELinuxOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4543[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("level")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4555 := z.EncBinary() - _ = yym4555 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Level)) } } } - if yyr4543 || yy2arr4543 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -56785,25 +66100,25 @@ func (x *SELinuxOptions) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4556 := z.DecBinary() - _ = yym4556 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4557 := r.ContainerType() - if yyct4557 == codecSelferValueTypeMap1234 { - yyl4557 := r.ReadMapStart() - if yyl4557 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4557, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4557 == codecSelferValueTypeArray1234 { - yyl4557 := r.ReadArrayStart() - if yyl4557 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4557, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -56815,12 +66130,12 @@ func (x *SELinuxOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4558Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4558Slc - var yyhl4558 bool = l >= 0 - for yyj4558 := 0; ; yyj4558++ { - if yyhl4558 { - if yyj4558 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -56829,38 +66144,62 @@ func (x *SELinuxOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4558Slc = r.DecodeBytes(yys4558Slc, true, true) - yys4558 := string(yys4558Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4558 { + switch yys3 { case "user": if r.TryDecodeAsNil() { x.User = "" } else { - x.User = string(r.DecodeString()) + yyv4 := &x.User + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "role": if r.TryDecodeAsNil() { x.Role = "" } else { - x.Role = string(r.DecodeString()) + yyv6 := &x.Role + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "type": if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = string(r.DecodeString()) + yyv8 := &x.Type + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } case "level": if r.TryDecodeAsNil() { x.Level = "" } else { - x.Level = string(r.DecodeString()) + yyv10 := &x.Level + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys4558) - } // end switch yys4558 - } // end for yyj4558 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -56868,16 +66207,16 @@ func (x *SELinuxOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4563 int - var yyb4563 bool - var yyhl4563 bool = l >= 0 - yyj4563++ - if yyhl4563 { - yyb4563 = yyj4563 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4563 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4563 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -56885,15 +66224,21 @@ func (x *SELinuxOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.User = "" } else { - x.User = string(r.DecodeString()) + yyv13 := &x.User + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj4563++ - if yyhl4563 { - yyb4563 = yyj4563 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4563 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4563 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -56901,15 +66246,21 @@ func (x *SELinuxOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Role = "" } else { - x.Role = string(r.DecodeString()) + yyv15 := &x.Role + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj4563++ - if yyhl4563 { - yyb4563 = yyj4563 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4563 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4563 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -56917,15 +66268,21 @@ func (x *SELinuxOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = string(r.DecodeString()) + yyv17 := &x.Type + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } } - yyj4563++ - if yyhl4563 { - yyb4563 = yyj4563 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4563 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4563 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -56933,20 +66290,26 @@ func (x *SELinuxOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Level = "" } else { - x.Level = string(r.DecodeString()) + yyv19 := &x.Level + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } } for { - yyj4563++ - if yyhl4563 { - yyb4563 = yyj4563 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb4563 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb4563 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4563-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -56958,37 +66321,37 @@ func (x *RangeAllocation) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym4568 := z.EncBinary() - _ = yym4568 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep4569 := !z.EncBinary() - yy2arr4569 := z.EncBasicHandle().StructToArray - var yyq4569 [5]bool - _, _, _ = yysep4569, yyq4569, yy2arr4569 - const yyr4569 bool = false - yyq4569[0] = x.Kind != "" - yyq4569[1] = x.APIVersion != "" - yyq4569[2] = true - var yynn4569 int - if yyr4569 || yy2arr4569 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn4569 = 2 - for _, b := range yyq4569 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn4569++ + yynn2++ } } - r.EncodeMapStart(yynn4569) - yynn4569 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr4569 || yy2arr4569 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4569[0] { - yym4571 := z.EncBinary() - _ = yym4571 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -56997,23 +66360,23 @@ func (x *RangeAllocation) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4569[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4572 := z.EncBinary() - _ = yym4572 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr4569 || yy2arr4569 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4569[1] { - yym4574 := z.EncBinary() - _ = yym4574 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -57022,39 +66385,51 @@ func (x *RangeAllocation) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq4569[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4575 := z.EncBinary() - _ = yym4575 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr4569 || yy2arr4569 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq4569[2] { - yy4577 := &x.ObjectMeta - yy4577.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq4569[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4578 := &x.ObjectMeta - yy4578.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr4569 || yy2arr4569 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4580 := z.EncBinary() - _ = yym4580 + yym15 := z.EncBinary() + _ = yym15 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Range)) @@ -57063,20 +66438,20 @@ func (x *RangeAllocation) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("range")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym4581 := z.EncBinary() - _ = yym4581 + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Range)) } } - if yyr4569 || yy2arr4569 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Data == nil { r.EncodeNil() } else { - yym4583 := z.EncBinary() - _ = yym4583 + yym18 := z.EncBinary() + _ = yym18 if false { } else { r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) @@ -57089,15 +66464,15 @@ func (x *RangeAllocation) CodecEncodeSelf(e *codec1978.Encoder) { if x.Data == nil { r.EncodeNil() } else { - yym4584 := z.EncBinary() - _ = yym4584 + yym19 := z.EncBinary() + _ = yym19 if false { } else { r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) } } } - if yyr4569 || yy2arr4569 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -57110,25 +66485,25 @@ func (x *RangeAllocation) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym4585 := z.DecBinary() - _ = yym4585 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct4586 := r.ContainerType() - if yyct4586 == codecSelferValueTypeMap1234 { - yyl4586 := r.ReadMapStart() - if yyl4586 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl4586, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct4586 == codecSelferValueTypeArray1234 { - yyl4586 := r.ReadArrayStart() - if yyl4586 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl4586, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -57140,12 +66515,12 @@ func (x *RangeAllocation) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys4587Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys4587Slc - var yyhl4587 bool = l >= 0 - for yyj4587 := 0; ; yyj4587++ { - if yyhl4587 { - if yyj4587 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -57154,51 +66529,75 @@ func (x *RangeAllocation) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys4587Slc = r.DecodeBytes(yys4587Slc, true, true) - yys4587 := string(yys4587Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys4587 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv4590 := &x.ObjectMeta - yyv4590.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "range": if r.TryDecodeAsNil() { x.Range = "" } else { - x.Range = string(r.DecodeString()) + yyv10 := &x.Range + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } case "data": if r.TryDecodeAsNil() { x.Data = nil } else { - yyv4592 := &x.Data - yym4593 := z.DecBinary() - _ = yym4593 + yyv12 := &x.Data + yym13 := z.DecBinary() + _ = yym13 if false { } else { - *yyv4592 = r.DecodeBytes(*(*[]byte)(yyv4592), false, false) + *yyv12 = r.DecodeBytes(*(*[]byte)(yyv12), false, false) } } default: - z.DecStructFieldNotFound(-1, yys4587) - } // end switch yys4587 - } // end for yyj4587 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -57206,16 +66605,16 @@ func (x *RangeAllocation) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj4594 int - var yyb4594 bool - var yyhl4594 bool = l >= 0 - yyj4594++ - if yyhl4594 { - yyb4594 = yyj4594 > l + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb4594 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb4594 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -57223,15 +66622,21 @@ func (x *RangeAllocation) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv15 := &x.Kind + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj4594++ - if yyhl4594 { - yyb4594 = yyj4594 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb4594 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb4594 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -57239,32 +66644,44 @@ func (x *RangeAllocation) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv17 := &x.APIVersion + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } } - yyj4594++ - if yyhl4594 { - yyb4594 = yyj4594 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb4594 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb4594 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv4597 := &x.ObjectMeta - yyv4597.CodecDecodeSelf(d) + yyv19 := &x.ObjectMeta + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else { + z.DecFallback(yyv19, false) + } } - yyj4594++ - if yyhl4594 { - yyb4594 = yyj4594 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb4594 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb4594 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -57272,15 +66689,21 @@ func (x *RangeAllocation) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Range = "" } else { - x.Range = string(r.DecodeString()) + yyv21 := &x.Range + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } } - yyj4594++ - if yyhl4594 { - yyb4594 = yyj4594 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb4594 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb4594 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -57288,125 +66711,152 @@ func (x *RangeAllocation) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Data = nil } else { - yyv4599 := &x.Data - yym4600 := z.DecBinary() - _ = yym4600 + yyv23 := &x.Data + yym24 := z.DecBinary() + _ = yym24 if false { } else { - *yyv4599 = r.DecodeBytes(*(*[]byte)(yyv4599), false, false) + *yyv23 = r.DecodeBytes(*(*[]byte)(yyv23), false, false) } } for { - yyj4594++ - if yyhl4594 { - yyb4594 = yyj4594 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb4594 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb4594 { + if yyb14 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj4594-1, "") + z.DecStructFieldNotFound(yyj14-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) encSliceOwnerReference(v []OwnerReference, e *codec1978.Encoder) { +func (x codecSelfer1234) encSlicev1_OwnerReference(v []pkg2_v1.OwnerReference, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4601 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4602 := &yyv4601 - yy4602.CodecEncodeSelf(e) + yy2 := &yyv1 + yym3 := z.EncBinary() + _ = yym3 + if false { + } else if z.HasExtensions() && z.EncExt(yy2) { + } else { + z.EncFallback(yy2) + } } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceOwnerReference(v *[]OwnerReference, d *codec1978.Decoder) { +func (x codecSelfer1234) decSlicev1_OwnerReference(v *[]pkg2_v1.OwnerReference, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4603 := *v - yyh4603, yyl4603 := z.DecSliceHelperStart() - var yyc4603 bool - if yyl4603 == 0 { - if yyv4603 == nil { - yyv4603 = []OwnerReference{} - yyc4603 = true - } else if len(yyv4603) != 0 { - yyv4603 = yyv4603[:0] - yyc4603 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg2_v1.OwnerReference{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4603 > 0 { - var yyrr4603, yyrl4603 int - var yyrt4603 bool - if yyl4603 > cap(yyv4603) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4603 := len(yyv4603) > 0 - yyv24603 := yyv4603 - yyrl4603, yyrt4603 = z.DecInferLen(yyl4603, z.DecBasicHandle().MaxInitLen, 72) - if yyrt4603 { - if yyrl4603 <= cap(yyv4603) { - yyv4603 = yyv4603[:yyrl4603] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 80) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4603 = make([]OwnerReference, yyrl4603) + yyv1 = make([]pkg2_v1.OwnerReference, yyrl1) } } else { - yyv4603 = make([]OwnerReference, yyrl4603) + yyv1 = make([]pkg2_v1.OwnerReference, yyrl1) } - yyc4603 = true - yyrr4603 = len(yyv4603) - if yyrg4603 { - copy(yyv4603, yyv24603) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4603 != len(yyv4603) { - yyv4603 = yyv4603[:yyl4603] - yyc4603 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4603 := 0 - for ; yyj4603 < yyrr4603; yyj4603++ { - yyh4603.ElemContainerState(yyj4603) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4603[yyj4603] = OwnerReference{} + yyv1[yyj1] = pkg2_v1.OwnerReference{} } else { - yyv4604 := &yyv4603[yyj4603] - yyv4604.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else if z.HasExtensions() && z.DecExt(yyv2) { + } else { + z.DecFallback(yyv2, false) + } } } - if yyrt4603 { - for ; yyj4603 < yyl4603; yyj4603++ { - yyv4603 = append(yyv4603, OwnerReference{}) - yyh4603.ElemContainerState(yyj4603) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, pkg2_v1.OwnerReference{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4603[yyj4603] = OwnerReference{} + yyv1[yyj1] = pkg2_v1.OwnerReference{} } else { - yyv4605 := &yyv4603[yyj4603] - yyv4605.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } } } } } else { - yyj4603 := 0 - for ; !r.CheckBreak(); yyj4603++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4603 >= len(yyv4603) { - yyv4603 = append(yyv4603, OwnerReference{}) // var yyz4603 OwnerReference - yyc4603 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, pkg2_v1.OwnerReference{}) // var yyz1 pkg2_v1.OwnerReference + yyc1 = true } - yyh4603.ElemContainerState(yyj4603) - if yyj4603 < len(yyv4603) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4603[yyj4603] = OwnerReference{} + yyv1[yyj1] = pkg2_v1.OwnerReference{} } else { - yyv4606 := &yyv4603[yyj4603] - yyv4606.CodecDecodeSelf(d) + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else { + z.DecFallback(yyv6, false) + } } } else { @@ -57414,17 +66864,17 @@ func (x codecSelfer1234) decSliceOwnerReference(v *[]OwnerReference, d *codec197 } } - if yyj4603 < len(yyv4603) { - yyv4603 = yyv4603[:yyj4603] - yyc4603 = true - } else if yyj4603 == 0 && yyv4603 == nil { - yyv4603 = []OwnerReference{} - yyc4603 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg2_v1.OwnerReference{} + yyc1 = true } } - yyh4603.End() - if yyc4603 { - *v = yyv4603 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -57433,9 +66883,9 @@ func (x codecSelfer1234) encSlicePersistentVolumeAccessMode(v []PersistentVolume z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4607 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv4607.CodecEncodeSelf(e) + yyv1.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -57445,75 +66895,81 @@ func (x codecSelfer1234) decSlicePersistentVolumeAccessMode(v *[]PersistentVolum z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4608 := *v - yyh4608, yyl4608 := z.DecSliceHelperStart() - var yyc4608 bool - if yyl4608 == 0 { - if yyv4608 == nil { - yyv4608 = []PersistentVolumeAccessMode{} - yyc4608 = true - } else if len(yyv4608) != 0 { - yyv4608 = yyv4608[:0] - yyc4608 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PersistentVolumeAccessMode{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4608 > 0 { - var yyrr4608, yyrl4608 int - var yyrt4608 bool - if yyl4608 > cap(yyv4608) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrl4608, yyrt4608 = z.DecInferLen(yyl4608, z.DecBasicHandle().MaxInitLen, 16) - if yyrt4608 { - if yyrl4608 <= cap(yyv4608) { - yyv4608 = yyv4608[:yyrl4608] + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4608 = make([]PersistentVolumeAccessMode, yyrl4608) + yyv1 = make([]PersistentVolumeAccessMode, yyrl1) } } else { - yyv4608 = make([]PersistentVolumeAccessMode, yyrl4608) + yyv1 = make([]PersistentVolumeAccessMode, yyrl1) } - yyc4608 = true - yyrr4608 = len(yyv4608) - } else if yyl4608 != len(yyv4608) { - yyv4608 = yyv4608[:yyl4608] - yyc4608 = true + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4608 := 0 - for ; yyj4608 < yyrr4608; yyj4608++ { - yyh4608.ElemContainerState(yyj4608) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4608[yyj4608] = "" + yyv1[yyj1] = "" } else { - yyv4608[yyj4608] = PersistentVolumeAccessMode(r.DecodeString()) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4608 { - for ; yyj4608 < yyl4608; yyj4608++ { - yyv4608 = append(yyv4608, "") - yyh4608.ElemContainerState(yyj4608) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4608[yyj4608] = "" + yyv1[yyj1] = "" } else { - yyv4608[yyj4608] = PersistentVolumeAccessMode(r.DecodeString()) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4608 := 0 - for ; !r.CheckBreak(); yyj4608++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4608 >= len(yyv4608) { - yyv4608 = append(yyv4608, "") // var yyz4608 PersistentVolumeAccessMode - yyc4608 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 PersistentVolumeAccessMode + yyc1 = true } - yyh4608.ElemContainerState(yyj4608) - if yyj4608 < len(yyv4608) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4608[yyj4608] = "" + yyv1[yyj1] = "" } else { - yyv4608[yyj4608] = PersistentVolumeAccessMode(r.DecodeString()) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -57521,17 +66977,17 @@ func (x codecSelfer1234) decSlicePersistentVolumeAccessMode(v *[]PersistentVolum } } - if yyj4608 < len(yyv4608) { - yyv4608 = yyv4608[:yyj4608] - yyc4608 = true - } else if yyj4608 == 0 && yyv4608 == nil { - yyv4608 = []PersistentVolumeAccessMode{} - yyc4608 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PersistentVolumeAccessMode{} + yyc1 = true } } - yyh4608.End() - if yyc4608 { - *v = yyv4608 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -57540,10 +66996,10 @@ func (x codecSelfer1234) encSlicePersistentVolume(v []PersistentVolume, e *codec z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4612 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4613 := &yyv4612 - yy4613.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -57553,83 +67009,86 @@ func (x codecSelfer1234) decSlicePersistentVolume(v *[]PersistentVolume, d *code z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4614 := *v - yyh4614, yyl4614 := z.DecSliceHelperStart() - var yyc4614 bool - if yyl4614 == 0 { - if yyv4614 == nil { - yyv4614 = []PersistentVolume{} - yyc4614 = true - } else if len(yyv4614) != 0 { - yyv4614 = yyv4614[:0] - yyc4614 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PersistentVolume{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4614 > 0 { - var yyrr4614, yyrl4614 int - var yyrt4614 bool - if yyl4614 > cap(yyv4614) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4614 := len(yyv4614) > 0 - yyv24614 := yyv4614 - yyrl4614, yyrt4614 = z.DecInferLen(yyl4614, z.DecBasicHandle().MaxInitLen, 496) - if yyrt4614 { - if yyrl4614 <= cap(yyv4614) { - yyv4614 = yyv4614[:yyrl4614] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 528) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4614 = make([]PersistentVolume, yyrl4614) + yyv1 = make([]PersistentVolume, yyrl1) } } else { - yyv4614 = make([]PersistentVolume, yyrl4614) + yyv1 = make([]PersistentVolume, yyrl1) } - yyc4614 = true - yyrr4614 = len(yyv4614) - if yyrg4614 { - copy(yyv4614, yyv24614) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4614 != len(yyv4614) { - yyv4614 = yyv4614[:yyl4614] - yyc4614 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4614 := 0 - for ; yyj4614 < yyrr4614; yyj4614++ { - yyh4614.ElemContainerState(yyj4614) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4614[yyj4614] = PersistentVolume{} + yyv1[yyj1] = PersistentVolume{} } else { - yyv4615 := &yyv4614[yyj4614] - yyv4615.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4614 { - for ; yyj4614 < yyl4614; yyj4614++ { - yyv4614 = append(yyv4614, PersistentVolume{}) - yyh4614.ElemContainerState(yyj4614) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PersistentVolume{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4614[yyj4614] = PersistentVolume{} + yyv1[yyj1] = PersistentVolume{} } else { - yyv4616 := &yyv4614[yyj4614] - yyv4616.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4614 := 0 - for ; !r.CheckBreak(); yyj4614++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4614 >= len(yyv4614) { - yyv4614 = append(yyv4614, PersistentVolume{}) // var yyz4614 PersistentVolume - yyc4614 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PersistentVolume{}) // var yyz1 PersistentVolume + yyc1 = true } - yyh4614.ElemContainerState(yyj4614) - if yyj4614 < len(yyv4614) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4614[yyj4614] = PersistentVolume{} + yyv1[yyj1] = PersistentVolume{} } else { - yyv4617 := &yyv4614[yyj4614] - yyv4617.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -57637,17 +67096,17 @@ func (x codecSelfer1234) decSlicePersistentVolume(v *[]PersistentVolume, d *code } } - if yyj4614 < len(yyv4614) { - yyv4614 = yyv4614[:yyj4614] - yyc4614 = true - } else if yyj4614 == 0 && yyv4614 == nil { - yyv4614 = []PersistentVolume{} - yyc4614 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PersistentVolume{} + yyc1 = true } } - yyh4614.End() - if yyc4614 { - *v = yyv4614 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -57656,10 +67115,10 @@ func (x codecSelfer1234) encSlicePersistentVolumeClaim(v []PersistentVolumeClaim z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4618 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4619 := &yyv4618 - yy4619.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -57669,83 +67128,86 @@ func (x codecSelfer1234) decSlicePersistentVolumeClaim(v *[]PersistentVolumeClai z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4620 := *v - yyh4620, yyl4620 := z.DecSliceHelperStart() - var yyc4620 bool - if yyl4620 == 0 { - if yyv4620 == nil { - yyv4620 = []PersistentVolumeClaim{} - yyc4620 = true - } else if len(yyv4620) != 0 { - yyv4620 = yyv4620[:0] - yyc4620 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PersistentVolumeClaim{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4620 > 0 { - var yyrr4620, yyrl4620 int - var yyrt4620 bool - if yyl4620 > cap(yyv4620) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4620 := len(yyv4620) > 0 - yyv24620 := yyv4620 - yyrl4620, yyrt4620 = z.DecInferLen(yyl4620, z.DecBasicHandle().MaxInitLen, 368) - if yyrt4620 { - if yyrl4620 <= cap(yyv4620) { - yyv4620 = yyv4620[:yyrl4620] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 376) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4620 = make([]PersistentVolumeClaim, yyrl4620) + yyv1 = make([]PersistentVolumeClaim, yyrl1) } } else { - yyv4620 = make([]PersistentVolumeClaim, yyrl4620) + yyv1 = make([]PersistentVolumeClaim, yyrl1) } - yyc4620 = true - yyrr4620 = len(yyv4620) - if yyrg4620 { - copy(yyv4620, yyv24620) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4620 != len(yyv4620) { - yyv4620 = yyv4620[:yyl4620] - yyc4620 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4620 := 0 - for ; yyj4620 < yyrr4620; yyj4620++ { - yyh4620.ElemContainerState(yyj4620) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4620[yyj4620] = PersistentVolumeClaim{} + yyv1[yyj1] = PersistentVolumeClaim{} } else { - yyv4621 := &yyv4620[yyj4620] - yyv4621.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4620 { - for ; yyj4620 < yyl4620; yyj4620++ { - yyv4620 = append(yyv4620, PersistentVolumeClaim{}) - yyh4620.ElemContainerState(yyj4620) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PersistentVolumeClaim{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4620[yyj4620] = PersistentVolumeClaim{} + yyv1[yyj1] = PersistentVolumeClaim{} } else { - yyv4622 := &yyv4620[yyj4620] - yyv4622.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4620 := 0 - for ; !r.CheckBreak(); yyj4620++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4620 >= len(yyv4620) { - yyv4620 = append(yyv4620, PersistentVolumeClaim{}) // var yyz4620 PersistentVolumeClaim - yyc4620 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PersistentVolumeClaim{}) // var yyz1 PersistentVolumeClaim + yyc1 = true } - yyh4620.ElemContainerState(yyj4620) - if yyj4620 < len(yyv4620) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4620[yyj4620] = PersistentVolumeClaim{} + yyv1[yyj1] = PersistentVolumeClaim{} } else { - yyv4623 := &yyv4620[yyj4620] - yyv4623.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -57753,17 +67215,17 @@ func (x codecSelfer1234) decSlicePersistentVolumeClaim(v *[]PersistentVolumeClai } } - if yyj4620 < len(yyv4620) { - yyv4620 = yyv4620[:yyj4620] - yyc4620 = true - } else if yyj4620 == 0 && yyv4620 == nil { - yyv4620 = []PersistentVolumeClaim{} - yyc4620 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PersistentVolumeClaim{} + yyc1 = true } } - yyh4620.End() - if yyc4620 { - *v = yyv4620 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -57772,10 +67234,10 @@ func (x codecSelfer1234) encSliceKeyToPath(v []KeyToPath, e *codec1978.Encoder) z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4624 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4625 := &yyv4624 - yy4625.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -57785,83 +67247,86 @@ func (x codecSelfer1234) decSliceKeyToPath(v *[]KeyToPath, d *codec1978.Decoder) z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4626 := *v - yyh4626, yyl4626 := z.DecSliceHelperStart() - var yyc4626 bool - if yyl4626 == 0 { - if yyv4626 == nil { - yyv4626 = []KeyToPath{} - yyc4626 = true - } else if len(yyv4626) != 0 { - yyv4626 = yyv4626[:0] - yyc4626 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []KeyToPath{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4626 > 0 { - var yyrr4626, yyrl4626 int - var yyrt4626 bool - if yyl4626 > cap(yyv4626) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4626 := len(yyv4626) > 0 - yyv24626 := yyv4626 - yyrl4626, yyrt4626 = z.DecInferLen(yyl4626, z.DecBasicHandle().MaxInitLen, 40) - if yyrt4626 { - if yyrl4626 <= cap(yyv4626) { - yyv4626 = yyv4626[:yyrl4626] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4626 = make([]KeyToPath, yyrl4626) + yyv1 = make([]KeyToPath, yyrl1) } } else { - yyv4626 = make([]KeyToPath, yyrl4626) + yyv1 = make([]KeyToPath, yyrl1) } - yyc4626 = true - yyrr4626 = len(yyv4626) - if yyrg4626 { - copy(yyv4626, yyv24626) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4626 != len(yyv4626) { - yyv4626 = yyv4626[:yyl4626] - yyc4626 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4626 := 0 - for ; yyj4626 < yyrr4626; yyj4626++ { - yyh4626.ElemContainerState(yyj4626) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4626[yyj4626] = KeyToPath{} + yyv1[yyj1] = KeyToPath{} } else { - yyv4627 := &yyv4626[yyj4626] - yyv4627.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4626 { - for ; yyj4626 < yyl4626; yyj4626++ { - yyv4626 = append(yyv4626, KeyToPath{}) - yyh4626.ElemContainerState(yyj4626) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, KeyToPath{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4626[yyj4626] = KeyToPath{} + yyv1[yyj1] = KeyToPath{} } else { - yyv4628 := &yyv4626[yyj4626] - yyv4628.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4626 := 0 - for ; !r.CheckBreak(); yyj4626++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4626 >= len(yyv4626) { - yyv4626 = append(yyv4626, KeyToPath{}) // var yyz4626 KeyToPath - yyc4626 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, KeyToPath{}) // var yyz1 KeyToPath + yyc1 = true } - yyh4626.ElemContainerState(yyj4626) - if yyj4626 < len(yyv4626) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4626[yyj4626] = KeyToPath{} + yyv1[yyj1] = KeyToPath{} } else { - yyv4629 := &yyv4626[yyj4626] - yyv4629.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -57869,17 +67334,136 @@ func (x codecSelfer1234) decSliceKeyToPath(v *[]KeyToPath, d *codec1978.Decoder) } } - if yyj4626 < len(yyv4626) { - yyv4626 = yyv4626[:yyj4626] - yyc4626 = true - } else if yyj4626 == 0 && yyv4626 == nil { - yyv4626 = []KeyToPath{} - yyc4626 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []KeyToPath{} + yyc1 = true } } - yyh4626.End() - if yyc4626 { - *v = yyv4626 + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceVolumeProjection(v []VolumeProjection, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceVolumeProjection(v *[]VolumeProjection, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []VolumeProjection{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]VolumeProjection, yyrl1) + } + } else { + yyv1 = make([]VolumeProjection, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeProjection{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, VolumeProjection{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeProjection{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, VolumeProjection{}) // var yyz1 VolumeProjection + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeProjection{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []VolumeProjection{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -57888,10 +67472,10 @@ func (x codecSelfer1234) encSliceHTTPHeader(v []HTTPHeader, e *codec1978.Encoder z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4630 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4631 := &yyv4630 - yy4631.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -57901,83 +67485,86 @@ func (x codecSelfer1234) decSliceHTTPHeader(v *[]HTTPHeader, d *codec1978.Decode z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4632 := *v - yyh4632, yyl4632 := z.DecSliceHelperStart() - var yyc4632 bool - if yyl4632 == 0 { - if yyv4632 == nil { - yyv4632 = []HTTPHeader{} - yyc4632 = true - } else if len(yyv4632) != 0 { - yyv4632 = yyv4632[:0] - yyc4632 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []HTTPHeader{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4632 > 0 { - var yyrr4632, yyrl4632 int - var yyrt4632 bool - if yyl4632 > cap(yyv4632) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4632 := len(yyv4632) > 0 - yyv24632 := yyv4632 - yyrl4632, yyrt4632 = z.DecInferLen(yyl4632, z.DecBasicHandle().MaxInitLen, 32) - if yyrt4632 { - if yyrl4632 <= cap(yyv4632) { - yyv4632 = yyv4632[:yyrl4632] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4632 = make([]HTTPHeader, yyrl4632) + yyv1 = make([]HTTPHeader, yyrl1) } } else { - yyv4632 = make([]HTTPHeader, yyrl4632) + yyv1 = make([]HTTPHeader, yyrl1) } - yyc4632 = true - yyrr4632 = len(yyv4632) - if yyrg4632 { - copy(yyv4632, yyv24632) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4632 != len(yyv4632) { - yyv4632 = yyv4632[:yyl4632] - yyc4632 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4632 := 0 - for ; yyj4632 < yyrr4632; yyj4632++ { - yyh4632.ElemContainerState(yyj4632) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4632[yyj4632] = HTTPHeader{} + yyv1[yyj1] = HTTPHeader{} } else { - yyv4633 := &yyv4632[yyj4632] - yyv4633.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4632 { - for ; yyj4632 < yyl4632; yyj4632++ { - yyv4632 = append(yyv4632, HTTPHeader{}) - yyh4632.ElemContainerState(yyj4632) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, HTTPHeader{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4632[yyj4632] = HTTPHeader{} + yyv1[yyj1] = HTTPHeader{} } else { - yyv4634 := &yyv4632[yyj4632] - yyv4634.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4632 := 0 - for ; !r.CheckBreak(); yyj4632++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4632 >= len(yyv4632) { - yyv4632 = append(yyv4632, HTTPHeader{}) // var yyz4632 HTTPHeader - yyc4632 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, HTTPHeader{}) // var yyz1 HTTPHeader + yyc1 = true } - yyh4632.ElemContainerState(yyj4632) - if yyj4632 < len(yyv4632) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4632[yyj4632] = HTTPHeader{} + yyv1[yyj1] = HTTPHeader{} } else { - yyv4635 := &yyv4632[yyj4632] - yyv4635.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -57985,17 +67572,17 @@ func (x codecSelfer1234) decSliceHTTPHeader(v *[]HTTPHeader, d *codec1978.Decode } } - if yyj4632 < len(yyv4632) { - yyv4632 = yyv4632[:yyj4632] - yyc4632 = true - } else if yyj4632 == 0 && yyv4632 == nil { - yyv4632 = []HTTPHeader{} - yyc4632 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []HTTPHeader{} + yyc1 = true } } - yyh4632.End() - if yyc4632 { - *v = yyv4632 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -58004,9 +67591,9 @@ func (x codecSelfer1234) encSliceCapability(v []Capability, e *codec1978.Encoder z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4636 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv4636.CodecEncodeSelf(e) + yyv1.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -58016,75 +67603,81 @@ func (x codecSelfer1234) decSliceCapability(v *[]Capability, d *codec1978.Decode z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4637 := *v - yyh4637, yyl4637 := z.DecSliceHelperStart() - var yyc4637 bool - if yyl4637 == 0 { - if yyv4637 == nil { - yyv4637 = []Capability{} - yyc4637 = true - } else if len(yyv4637) != 0 { - yyv4637 = yyv4637[:0] - yyc4637 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Capability{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4637 > 0 { - var yyrr4637, yyrl4637 int - var yyrt4637 bool - if yyl4637 > cap(yyv4637) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrl4637, yyrt4637 = z.DecInferLen(yyl4637, z.DecBasicHandle().MaxInitLen, 16) - if yyrt4637 { - if yyrl4637 <= cap(yyv4637) { - yyv4637 = yyv4637[:yyrl4637] + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4637 = make([]Capability, yyrl4637) + yyv1 = make([]Capability, yyrl1) } } else { - yyv4637 = make([]Capability, yyrl4637) + yyv1 = make([]Capability, yyrl1) } - yyc4637 = true - yyrr4637 = len(yyv4637) - } else if yyl4637 != len(yyv4637) { - yyv4637 = yyv4637[:yyl4637] - yyc4637 = true + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4637 := 0 - for ; yyj4637 < yyrr4637; yyj4637++ { - yyh4637.ElemContainerState(yyj4637) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4637[yyj4637] = "" + yyv1[yyj1] = "" } else { - yyv4637[yyj4637] = Capability(r.DecodeString()) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4637 { - for ; yyj4637 < yyl4637; yyj4637++ { - yyv4637 = append(yyv4637, "") - yyh4637.ElemContainerState(yyj4637) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4637[yyj4637] = "" + yyv1[yyj1] = "" } else { - yyv4637[yyj4637] = Capability(r.DecodeString()) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4637 := 0 - for ; !r.CheckBreak(); yyj4637++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4637 >= len(yyv4637) { - yyv4637 = append(yyv4637, "") // var yyz4637 Capability - yyc4637 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 Capability + yyc1 = true } - yyh4637.ElemContainerState(yyj4637) - if yyj4637 < len(yyv4637) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4637[yyj4637] = "" + yyv1[yyj1] = "" } else { - yyv4637[yyj4637] = Capability(r.DecodeString()) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -58092,17 +67685,17 @@ func (x codecSelfer1234) decSliceCapability(v *[]Capability, d *codec1978.Decode } } - if yyj4637 < len(yyv4637) { - yyv4637 = yyv4637[:yyj4637] - yyc4637 = true - } else if yyj4637 == 0 && yyv4637 == nil { - yyv4637 = []Capability{} - yyc4637 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Capability{} + yyc1 = true } } - yyh4637.End() - if yyc4637 { - *v = yyv4637 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -58111,10 +67704,10 @@ func (x codecSelfer1234) encSliceContainerPort(v []ContainerPort, e *codec1978.E z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4641 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4642 := &yyv4641 - yy4642.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -58124,83 +67717,205 @@ func (x codecSelfer1234) decSliceContainerPort(v *[]ContainerPort, d *codec1978. z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4643 := *v - yyh4643, yyl4643 := z.DecSliceHelperStart() - var yyc4643 bool - if yyl4643 == 0 { - if yyv4643 == nil { - yyv4643 = []ContainerPort{} - yyc4643 = true - } else if len(yyv4643) != 0 { - yyv4643 = yyv4643[:0] - yyc4643 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ContainerPort{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ContainerPort, yyrl1) + } + } else { + yyv1 = make([]ContainerPort, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerPort{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ContainerPort{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerPort{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ContainerPort{}) // var yyz1 ContainerPort + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerPort{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ContainerPort{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEnvFromSource(v []EnvFromSource, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEnvFromSource(v *[]EnvFromSource, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EnvFromSource{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4643 > 0 { - var yyrr4643, yyrl4643 int - var yyrt4643 bool - if yyl4643 > cap(yyv4643) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4643 := len(yyv4643) > 0 - yyv24643 := yyv4643 - yyrl4643, yyrt4643 = z.DecInferLen(yyl4643, z.DecBasicHandle().MaxInitLen, 56) - if yyrt4643 { - if yyrl4643 <= cap(yyv4643) { - yyv4643 = yyv4643[:yyrl4643] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4643 = make([]ContainerPort, yyrl4643) + yyv1 = make([]EnvFromSource, yyrl1) } } else { - yyv4643 = make([]ContainerPort, yyrl4643) + yyv1 = make([]EnvFromSource, yyrl1) } - yyc4643 = true - yyrr4643 = len(yyv4643) - if yyrg4643 { - copy(yyv4643, yyv24643) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4643 != len(yyv4643) { - yyv4643 = yyv4643[:yyl4643] - yyc4643 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4643 := 0 - for ; yyj4643 < yyrr4643; yyj4643++ { - yyh4643.ElemContainerState(yyj4643) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4643[yyj4643] = ContainerPort{} + yyv1[yyj1] = EnvFromSource{} } else { - yyv4644 := &yyv4643[yyj4643] - yyv4644.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4643 { - for ; yyj4643 < yyl4643; yyj4643++ { - yyv4643 = append(yyv4643, ContainerPort{}) - yyh4643.ElemContainerState(yyj4643) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EnvFromSource{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4643[yyj4643] = ContainerPort{} + yyv1[yyj1] = EnvFromSource{} } else { - yyv4645 := &yyv4643[yyj4643] - yyv4645.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4643 := 0 - for ; !r.CheckBreak(); yyj4643++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4643 >= len(yyv4643) { - yyv4643 = append(yyv4643, ContainerPort{}) // var yyz4643 ContainerPort - yyc4643 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EnvFromSource{}) // var yyz1 EnvFromSource + yyc1 = true } - yyh4643.ElemContainerState(yyj4643) - if yyj4643 < len(yyv4643) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4643[yyj4643] = ContainerPort{} + yyv1[yyj1] = EnvFromSource{} } else { - yyv4646 := &yyv4643[yyj4643] - yyv4646.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -58208,17 +67923,17 @@ func (x codecSelfer1234) decSliceContainerPort(v *[]ContainerPort, d *codec1978. } } - if yyj4643 < len(yyv4643) { - yyv4643 = yyv4643[:yyj4643] - yyc4643 = true - } else if yyj4643 == 0 && yyv4643 == nil { - yyv4643 = []ContainerPort{} - yyc4643 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EnvFromSource{} + yyc1 = true } } - yyh4643.End() - if yyc4643 { - *v = yyv4643 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -58227,10 +67942,10 @@ func (x codecSelfer1234) encSliceEnvVar(v []EnvVar, e *codec1978.Encoder) { z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4647 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4648 := &yyv4647 - yy4648.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -58240,83 +67955,86 @@ func (x codecSelfer1234) decSliceEnvVar(v *[]EnvVar, d *codec1978.Decoder) { z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4649 := *v - yyh4649, yyl4649 := z.DecSliceHelperStart() - var yyc4649 bool - if yyl4649 == 0 { - if yyv4649 == nil { - yyv4649 = []EnvVar{} - yyc4649 = true - } else if len(yyv4649) != 0 { - yyv4649 = yyv4649[:0] - yyc4649 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EnvVar{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4649 > 0 { - var yyrr4649, yyrl4649 int - var yyrt4649 bool - if yyl4649 > cap(yyv4649) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4649 := len(yyv4649) > 0 - yyv24649 := yyv4649 - yyrl4649, yyrt4649 = z.DecInferLen(yyl4649, z.DecBasicHandle().MaxInitLen, 40) - if yyrt4649 { - if yyrl4649 <= cap(yyv4649) { - yyv4649 = yyv4649[:yyrl4649] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4649 = make([]EnvVar, yyrl4649) + yyv1 = make([]EnvVar, yyrl1) } } else { - yyv4649 = make([]EnvVar, yyrl4649) + yyv1 = make([]EnvVar, yyrl1) } - yyc4649 = true - yyrr4649 = len(yyv4649) - if yyrg4649 { - copy(yyv4649, yyv24649) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4649 != len(yyv4649) { - yyv4649 = yyv4649[:yyl4649] - yyc4649 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4649 := 0 - for ; yyj4649 < yyrr4649; yyj4649++ { - yyh4649.ElemContainerState(yyj4649) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4649[yyj4649] = EnvVar{} + yyv1[yyj1] = EnvVar{} } else { - yyv4650 := &yyv4649[yyj4649] - yyv4650.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4649 { - for ; yyj4649 < yyl4649; yyj4649++ { - yyv4649 = append(yyv4649, EnvVar{}) - yyh4649.ElemContainerState(yyj4649) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EnvVar{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4649[yyj4649] = EnvVar{} + yyv1[yyj1] = EnvVar{} } else { - yyv4651 := &yyv4649[yyj4649] - yyv4651.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4649 := 0 - for ; !r.CheckBreak(); yyj4649++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4649 >= len(yyv4649) { - yyv4649 = append(yyv4649, EnvVar{}) // var yyz4649 EnvVar - yyc4649 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EnvVar{}) // var yyz1 EnvVar + yyc1 = true } - yyh4649.ElemContainerState(yyj4649) - if yyj4649 < len(yyv4649) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4649[yyj4649] = EnvVar{} + yyv1[yyj1] = EnvVar{} } else { - yyv4652 := &yyv4649[yyj4649] - yyv4652.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -58324,17 +68042,17 @@ func (x codecSelfer1234) decSliceEnvVar(v *[]EnvVar, d *codec1978.Decoder) { } } - if yyj4649 < len(yyv4649) { - yyv4649 = yyv4649[:yyj4649] - yyc4649 = true - } else if yyj4649 == 0 && yyv4649 == nil { - yyv4649 = []EnvVar{} - yyc4649 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EnvVar{} + yyc1 = true } } - yyh4649.End() - if yyc4649 { - *v = yyv4649 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -58343,10 +68061,10 @@ func (x codecSelfer1234) encSliceVolumeMount(v []VolumeMount, e *codec1978.Encod z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4653 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4654 := &yyv4653 - yy4654.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -58356,83 +68074,86 @@ func (x codecSelfer1234) decSliceVolumeMount(v *[]VolumeMount, d *codec1978.Deco z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4655 := *v - yyh4655, yyl4655 := z.DecSliceHelperStart() - var yyc4655 bool - if yyl4655 == 0 { - if yyv4655 == nil { - yyv4655 = []VolumeMount{} - yyc4655 = true - } else if len(yyv4655) != 0 { - yyv4655 = yyv4655[:0] - yyc4655 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []VolumeMount{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4655 > 0 { - var yyrr4655, yyrl4655 int - var yyrt4655 bool - if yyl4655 > cap(yyv4655) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4655 := len(yyv4655) > 0 - yyv24655 := yyv4655 - yyrl4655, yyrt4655 = z.DecInferLen(yyl4655, z.DecBasicHandle().MaxInitLen, 56) - if yyrt4655 { - if yyrl4655 <= cap(yyv4655) { - yyv4655 = yyv4655[:yyrl4655] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4655 = make([]VolumeMount, yyrl4655) + yyv1 = make([]VolumeMount, yyrl1) } } else { - yyv4655 = make([]VolumeMount, yyrl4655) + yyv1 = make([]VolumeMount, yyrl1) } - yyc4655 = true - yyrr4655 = len(yyv4655) - if yyrg4655 { - copy(yyv4655, yyv24655) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4655 != len(yyv4655) { - yyv4655 = yyv4655[:yyl4655] - yyc4655 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4655 := 0 - for ; yyj4655 < yyrr4655; yyj4655++ { - yyh4655.ElemContainerState(yyj4655) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4655[yyj4655] = VolumeMount{} + yyv1[yyj1] = VolumeMount{} } else { - yyv4656 := &yyv4655[yyj4655] - yyv4656.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4655 { - for ; yyj4655 < yyl4655; yyj4655++ { - yyv4655 = append(yyv4655, VolumeMount{}) - yyh4655.ElemContainerState(yyj4655) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, VolumeMount{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4655[yyj4655] = VolumeMount{} + yyv1[yyj1] = VolumeMount{} } else { - yyv4657 := &yyv4655[yyj4655] - yyv4657.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4655 := 0 - for ; !r.CheckBreak(); yyj4655++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4655 >= len(yyv4655) { - yyv4655 = append(yyv4655, VolumeMount{}) // var yyz4655 VolumeMount - yyc4655 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, VolumeMount{}) // var yyz1 VolumeMount + yyc1 = true } - yyh4655.ElemContainerState(yyj4655) - if yyj4655 < len(yyv4655) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4655[yyj4655] = VolumeMount{} + yyv1[yyj1] = VolumeMount{} } else { - yyv4658 := &yyv4655[yyj4655] - yyv4658.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -58440,17 +68161,17 @@ func (x codecSelfer1234) decSliceVolumeMount(v *[]VolumeMount, d *codec1978.Deco } } - if yyj4655 < len(yyv4655) { - yyv4655 = yyv4655[:yyj4655] - yyc4655 = true - } else if yyj4655 == 0 && yyv4655 == nil { - yyv4655 = []VolumeMount{} - yyc4655 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []VolumeMount{} + yyc1 = true } } - yyh4655.End() - if yyc4655 { - *v = yyv4655 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -58459,10 +68180,10 @@ func (x codecSelfer1234) encSliceNodeSelectorTerm(v []NodeSelectorTerm, e *codec z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4659 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4660 := &yyv4659 - yy4660.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -58472,83 +68193,86 @@ func (x codecSelfer1234) decSliceNodeSelectorTerm(v *[]NodeSelectorTerm, d *code z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4661 := *v - yyh4661, yyl4661 := z.DecSliceHelperStart() - var yyc4661 bool - if yyl4661 == 0 { - if yyv4661 == nil { - yyv4661 = []NodeSelectorTerm{} - yyc4661 = true - } else if len(yyv4661) != 0 { - yyv4661 = yyv4661[:0] - yyc4661 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NodeSelectorTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4661 > 0 { - var yyrr4661, yyrl4661 int - var yyrt4661 bool - if yyl4661 > cap(yyv4661) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4661 := len(yyv4661) > 0 - yyv24661 := yyv4661 - yyrl4661, yyrt4661 = z.DecInferLen(yyl4661, z.DecBasicHandle().MaxInitLen, 24) - if yyrt4661 { - if yyrl4661 <= cap(yyv4661) { - yyv4661 = yyv4661[:yyrl4661] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4661 = make([]NodeSelectorTerm, yyrl4661) + yyv1 = make([]NodeSelectorTerm, yyrl1) } } else { - yyv4661 = make([]NodeSelectorTerm, yyrl4661) + yyv1 = make([]NodeSelectorTerm, yyrl1) } - yyc4661 = true - yyrr4661 = len(yyv4661) - if yyrg4661 { - copy(yyv4661, yyv24661) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4661 != len(yyv4661) { - yyv4661 = yyv4661[:yyl4661] - yyc4661 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4661 := 0 - for ; yyj4661 < yyrr4661; yyj4661++ { - yyh4661.ElemContainerState(yyj4661) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4661[yyj4661] = NodeSelectorTerm{} + yyv1[yyj1] = NodeSelectorTerm{} } else { - yyv4662 := &yyv4661[yyj4661] - yyv4662.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4661 { - for ; yyj4661 < yyl4661; yyj4661++ { - yyv4661 = append(yyv4661, NodeSelectorTerm{}) - yyh4661.ElemContainerState(yyj4661) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NodeSelectorTerm{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4661[yyj4661] = NodeSelectorTerm{} + yyv1[yyj1] = NodeSelectorTerm{} } else { - yyv4663 := &yyv4661[yyj4661] - yyv4663.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4661 := 0 - for ; !r.CheckBreak(); yyj4661++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4661 >= len(yyv4661) { - yyv4661 = append(yyv4661, NodeSelectorTerm{}) // var yyz4661 NodeSelectorTerm - yyc4661 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NodeSelectorTerm{}) // var yyz1 NodeSelectorTerm + yyc1 = true } - yyh4661.ElemContainerState(yyj4661) - if yyj4661 < len(yyv4661) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4661[yyj4661] = NodeSelectorTerm{} + yyv1[yyj1] = NodeSelectorTerm{} } else { - yyv4664 := &yyv4661[yyj4661] - yyv4664.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -58556,17 +68280,17 @@ func (x codecSelfer1234) decSliceNodeSelectorTerm(v *[]NodeSelectorTerm, d *code } } - if yyj4661 < len(yyv4661) { - yyv4661 = yyv4661[:yyj4661] - yyc4661 = true - } else if yyj4661 == 0 && yyv4661 == nil { - yyv4661 = []NodeSelectorTerm{} - yyc4661 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NodeSelectorTerm{} + yyc1 = true } } - yyh4661.End() - if yyc4661 { - *v = yyv4661 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -58575,10 +68299,10 @@ func (x codecSelfer1234) encSliceNodeSelectorRequirement(v []NodeSelectorRequire z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4665 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4666 := &yyv4665 - yy4666.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -58588,83 +68312,86 @@ func (x codecSelfer1234) decSliceNodeSelectorRequirement(v *[]NodeSelectorRequir z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4667 := *v - yyh4667, yyl4667 := z.DecSliceHelperStart() - var yyc4667 bool - if yyl4667 == 0 { - if yyv4667 == nil { - yyv4667 = []NodeSelectorRequirement{} - yyc4667 = true - } else if len(yyv4667) != 0 { - yyv4667 = yyv4667[:0] - yyc4667 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NodeSelectorRequirement{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4667 > 0 { - var yyrr4667, yyrl4667 int - var yyrt4667 bool - if yyl4667 > cap(yyv4667) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4667 := len(yyv4667) > 0 - yyv24667 := yyv4667 - yyrl4667, yyrt4667 = z.DecInferLen(yyl4667, z.DecBasicHandle().MaxInitLen, 56) - if yyrt4667 { - if yyrl4667 <= cap(yyv4667) { - yyv4667 = yyv4667[:yyrl4667] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4667 = make([]NodeSelectorRequirement, yyrl4667) + yyv1 = make([]NodeSelectorRequirement, yyrl1) } } else { - yyv4667 = make([]NodeSelectorRequirement, yyrl4667) + yyv1 = make([]NodeSelectorRequirement, yyrl1) } - yyc4667 = true - yyrr4667 = len(yyv4667) - if yyrg4667 { - copy(yyv4667, yyv24667) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4667 != len(yyv4667) { - yyv4667 = yyv4667[:yyl4667] - yyc4667 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4667 := 0 - for ; yyj4667 < yyrr4667; yyj4667++ { - yyh4667.ElemContainerState(yyj4667) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4667[yyj4667] = NodeSelectorRequirement{} + yyv1[yyj1] = NodeSelectorRequirement{} } else { - yyv4668 := &yyv4667[yyj4667] - yyv4668.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4667 { - for ; yyj4667 < yyl4667; yyj4667++ { - yyv4667 = append(yyv4667, NodeSelectorRequirement{}) - yyh4667.ElemContainerState(yyj4667) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NodeSelectorRequirement{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4667[yyj4667] = NodeSelectorRequirement{} + yyv1[yyj1] = NodeSelectorRequirement{} } else { - yyv4669 := &yyv4667[yyj4667] - yyv4669.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4667 := 0 - for ; !r.CheckBreak(); yyj4667++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4667 >= len(yyv4667) { - yyv4667 = append(yyv4667, NodeSelectorRequirement{}) // var yyz4667 NodeSelectorRequirement - yyc4667 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NodeSelectorRequirement{}) // var yyz1 NodeSelectorRequirement + yyc1 = true } - yyh4667.ElemContainerState(yyj4667) - if yyj4667 < len(yyv4667) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4667[yyj4667] = NodeSelectorRequirement{} + yyv1[yyj1] = NodeSelectorRequirement{} } else { - yyv4670 := &yyv4667[yyj4667] - yyv4670.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -58672,17 +68399,17 @@ func (x codecSelfer1234) decSliceNodeSelectorRequirement(v *[]NodeSelectorRequir } } - if yyj4667 < len(yyv4667) { - yyv4667 = yyv4667[:yyj4667] - yyc4667 = true - } else if yyj4667 == 0 && yyv4667 == nil { - yyv4667 = []NodeSelectorRequirement{} - yyc4667 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NodeSelectorRequirement{} + yyc1 = true } } - yyh4667.End() - if yyc4667 { - *v = yyv4667 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -58691,10 +68418,10 @@ func (x codecSelfer1234) encSlicePodAffinityTerm(v []PodAffinityTerm, e *codec19 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4671 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4672 := &yyv4671 - yy4672.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -58704,83 +68431,86 @@ func (x codecSelfer1234) decSlicePodAffinityTerm(v *[]PodAffinityTerm, d *codec1 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4673 := *v - yyh4673, yyl4673 := z.DecSliceHelperStart() - var yyc4673 bool - if yyl4673 == 0 { - if yyv4673 == nil { - yyv4673 = []PodAffinityTerm{} - yyc4673 = true - } else if len(yyv4673) != 0 { - yyv4673 = yyv4673[:0] - yyc4673 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodAffinityTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4673 > 0 { - var yyrr4673, yyrl4673 int - var yyrt4673 bool - if yyl4673 > cap(yyv4673) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4673 := len(yyv4673) > 0 - yyv24673 := yyv4673 - yyrl4673, yyrt4673 = z.DecInferLen(yyl4673, z.DecBasicHandle().MaxInitLen, 48) - if yyrt4673 { - if yyrl4673 <= cap(yyv4673) { - yyv4673 = yyv4673[:yyrl4673] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4673 = make([]PodAffinityTerm, yyrl4673) + yyv1 = make([]PodAffinityTerm, yyrl1) } } else { - yyv4673 = make([]PodAffinityTerm, yyrl4673) + yyv1 = make([]PodAffinityTerm, yyrl1) } - yyc4673 = true - yyrr4673 = len(yyv4673) - if yyrg4673 { - copy(yyv4673, yyv24673) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4673 != len(yyv4673) { - yyv4673 = yyv4673[:yyl4673] - yyc4673 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4673 := 0 - for ; yyj4673 < yyrr4673; yyj4673++ { - yyh4673.ElemContainerState(yyj4673) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4673[yyj4673] = PodAffinityTerm{} + yyv1[yyj1] = PodAffinityTerm{} } else { - yyv4674 := &yyv4673[yyj4673] - yyv4674.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4673 { - for ; yyj4673 < yyl4673; yyj4673++ { - yyv4673 = append(yyv4673, PodAffinityTerm{}) - yyh4673.ElemContainerState(yyj4673) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodAffinityTerm{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4673[yyj4673] = PodAffinityTerm{} + yyv1[yyj1] = PodAffinityTerm{} } else { - yyv4675 := &yyv4673[yyj4673] - yyv4675.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4673 := 0 - for ; !r.CheckBreak(); yyj4673++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4673 >= len(yyv4673) { - yyv4673 = append(yyv4673, PodAffinityTerm{}) // var yyz4673 PodAffinityTerm - yyc4673 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodAffinityTerm{}) // var yyz1 PodAffinityTerm + yyc1 = true } - yyh4673.ElemContainerState(yyj4673) - if yyj4673 < len(yyv4673) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4673[yyj4673] = PodAffinityTerm{} + yyv1[yyj1] = PodAffinityTerm{} } else { - yyv4676 := &yyv4673[yyj4673] - yyv4676.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -58788,17 +68518,17 @@ func (x codecSelfer1234) decSlicePodAffinityTerm(v *[]PodAffinityTerm, d *codec1 } } - if yyj4673 < len(yyv4673) { - yyv4673 = yyv4673[:yyj4673] - yyc4673 = true - } else if yyj4673 == 0 && yyv4673 == nil { - yyv4673 = []PodAffinityTerm{} - yyc4673 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodAffinityTerm{} + yyc1 = true } } - yyh4673.End() - if yyc4673 { - *v = yyv4673 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -58807,10 +68537,10 @@ func (x codecSelfer1234) encSliceWeightedPodAffinityTerm(v []WeightedPodAffinity z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4677 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4678 := &yyv4677 - yy4678.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -58820,83 +68550,86 @@ func (x codecSelfer1234) decSliceWeightedPodAffinityTerm(v *[]WeightedPodAffinit z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4679 := *v - yyh4679, yyl4679 := z.DecSliceHelperStart() - var yyc4679 bool - if yyl4679 == 0 { - if yyv4679 == nil { - yyv4679 = []WeightedPodAffinityTerm{} - yyc4679 = true - } else if len(yyv4679) != 0 { - yyv4679 = yyv4679[:0] - yyc4679 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []WeightedPodAffinityTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4679 > 0 { - var yyrr4679, yyrl4679 int - var yyrt4679 bool - if yyl4679 > cap(yyv4679) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4679 := len(yyv4679) > 0 - yyv24679 := yyv4679 - yyrl4679, yyrt4679 = z.DecInferLen(yyl4679, z.DecBasicHandle().MaxInitLen, 56) - if yyrt4679 { - if yyrl4679 <= cap(yyv4679) { - yyv4679 = yyv4679[:yyrl4679] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4679 = make([]WeightedPodAffinityTerm, yyrl4679) + yyv1 = make([]WeightedPodAffinityTerm, yyrl1) } } else { - yyv4679 = make([]WeightedPodAffinityTerm, yyrl4679) + yyv1 = make([]WeightedPodAffinityTerm, yyrl1) } - yyc4679 = true - yyrr4679 = len(yyv4679) - if yyrg4679 { - copy(yyv4679, yyv24679) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4679 != len(yyv4679) { - yyv4679 = yyv4679[:yyl4679] - yyc4679 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4679 := 0 - for ; yyj4679 < yyrr4679; yyj4679++ { - yyh4679.ElemContainerState(yyj4679) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4679[yyj4679] = WeightedPodAffinityTerm{} + yyv1[yyj1] = WeightedPodAffinityTerm{} } else { - yyv4680 := &yyv4679[yyj4679] - yyv4680.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4679 { - for ; yyj4679 < yyl4679; yyj4679++ { - yyv4679 = append(yyv4679, WeightedPodAffinityTerm{}) - yyh4679.ElemContainerState(yyj4679) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, WeightedPodAffinityTerm{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4679[yyj4679] = WeightedPodAffinityTerm{} + yyv1[yyj1] = WeightedPodAffinityTerm{} } else { - yyv4681 := &yyv4679[yyj4679] - yyv4681.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4679 := 0 - for ; !r.CheckBreak(); yyj4679++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4679 >= len(yyv4679) { - yyv4679 = append(yyv4679, WeightedPodAffinityTerm{}) // var yyz4679 WeightedPodAffinityTerm - yyc4679 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, WeightedPodAffinityTerm{}) // var yyz1 WeightedPodAffinityTerm + yyc1 = true } - yyh4679.ElemContainerState(yyj4679) - if yyj4679 < len(yyv4679) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4679[yyj4679] = WeightedPodAffinityTerm{} + yyv1[yyj1] = WeightedPodAffinityTerm{} } else { - yyv4682 := &yyv4679[yyj4679] - yyv4682.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -58904,17 +68637,17 @@ func (x codecSelfer1234) decSliceWeightedPodAffinityTerm(v *[]WeightedPodAffinit } } - if yyj4679 < len(yyv4679) { - yyv4679 = yyv4679[:yyj4679] - yyc4679 = true - } else if yyj4679 == 0 && yyv4679 == nil { - yyv4679 = []WeightedPodAffinityTerm{} - yyc4679 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []WeightedPodAffinityTerm{} + yyc1 = true } } - yyh4679.End() - if yyc4679 { - *v = yyv4679 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -58923,10 +68656,10 @@ func (x codecSelfer1234) encSlicePreferredSchedulingTerm(v []PreferredScheduling z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4683 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4684 := &yyv4683 - yy4684.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -58936,83 +68669,86 @@ func (x codecSelfer1234) decSlicePreferredSchedulingTerm(v *[]PreferredSchedulin z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4685 := *v - yyh4685, yyl4685 := z.DecSliceHelperStart() - var yyc4685 bool - if yyl4685 == 0 { - if yyv4685 == nil { - yyv4685 = []PreferredSchedulingTerm{} - yyc4685 = true - } else if len(yyv4685) != 0 { - yyv4685 = yyv4685[:0] - yyc4685 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PreferredSchedulingTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4685 > 0 { - var yyrr4685, yyrl4685 int - var yyrt4685 bool - if yyl4685 > cap(yyv4685) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4685 := len(yyv4685) > 0 - yyv24685 := yyv4685 - yyrl4685, yyrt4685 = z.DecInferLen(yyl4685, z.DecBasicHandle().MaxInitLen, 32) - if yyrt4685 { - if yyrl4685 <= cap(yyv4685) { - yyv4685 = yyv4685[:yyrl4685] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4685 = make([]PreferredSchedulingTerm, yyrl4685) + yyv1 = make([]PreferredSchedulingTerm, yyrl1) } } else { - yyv4685 = make([]PreferredSchedulingTerm, yyrl4685) + yyv1 = make([]PreferredSchedulingTerm, yyrl1) } - yyc4685 = true - yyrr4685 = len(yyv4685) - if yyrg4685 { - copy(yyv4685, yyv24685) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4685 != len(yyv4685) { - yyv4685 = yyv4685[:yyl4685] - yyc4685 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4685 := 0 - for ; yyj4685 < yyrr4685; yyj4685++ { - yyh4685.ElemContainerState(yyj4685) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4685[yyj4685] = PreferredSchedulingTerm{} + yyv1[yyj1] = PreferredSchedulingTerm{} } else { - yyv4686 := &yyv4685[yyj4685] - yyv4686.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4685 { - for ; yyj4685 < yyl4685; yyj4685++ { - yyv4685 = append(yyv4685, PreferredSchedulingTerm{}) - yyh4685.ElemContainerState(yyj4685) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PreferredSchedulingTerm{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4685[yyj4685] = PreferredSchedulingTerm{} + yyv1[yyj1] = PreferredSchedulingTerm{} } else { - yyv4687 := &yyv4685[yyj4685] - yyv4687.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4685 := 0 - for ; !r.CheckBreak(); yyj4685++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4685 >= len(yyv4685) { - yyv4685 = append(yyv4685, PreferredSchedulingTerm{}) // var yyz4685 PreferredSchedulingTerm - yyc4685 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PreferredSchedulingTerm{}) // var yyz1 PreferredSchedulingTerm + yyc1 = true } - yyh4685.ElemContainerState(yyj4685) - if yyj4685 < len(yyv4685) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4685[yyj4685] = PreferredSchedulingTerm{} + yyv1[yyj1] = PreferredSchedulingTerm{} } else { - yyv4688 := &yyv4685[yyj4685] - yyv4688.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -59020,17 +68756,17 @@ func (x codecSelfer1234) decSlicePreferredSchedulingTerm(v *[]PreferredSchedulin } } - if yyj4685 < len(yyv4685) { - yyv4685 = yyv4685[:yyj4685] - yyc4685 = true - } else if yyj4685 == 0 && yyv4685 == nil { - yyv4685 = []PreferredSchedulingTerm{} - yyc4685 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PreferredSchedulingTerm{} + yyc1 = true } } - yyh4685.End() - if yyc4685 { - *v = yyv4685 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -59039,10 +68775,10 @@ func (x codecSelfer1234) encSliceVolume(v []Volume, e *codec1978.Encoder) { z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4689 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4690 := &yyv4689 - yy4690.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -59052,83 +68788,86 @@ func (x codecSelfer1234) decSliceVolume(v *[]Volume, d *codec1978.Decoder) { z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4691 := *v - yyh4691, yyl4691 := z.DecSliceHelperStart() - var yyc4691 bool - if yyl4691 == 0 { - if yyv4691 == nil { - yyv4691 = []Volume{} - yyc4691 = true - } else if len(yyv4691) != 0 { - yyv4691 = yyv4691[:0] - yyc4691 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Volume{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4691 > 0 { - var yyrr4691, yyrl4691 int - var yyrt4691 bool - if yyl4691 > cap(yyv4691) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4691 := len(yyv4691) > 0 - yyv24691 := yyv4691 - yyrl4691, yyrt4691 = z.DecInferLen(yyl4691, z.DecBasicHandle().MaxInitLen, 200) - if yyrt4691 { - if yyrl4691 <= cap(yyv4691) { - yyv4691 = yyv4691[:yyrl4691] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 224) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4691 = make([]Volume, yyrl4691) + yyv1 = make([]Volume, yyrl1) } } else { - yyv4691 = make([]Volume, yyrl4691) + yyv1 = make([]Volume, yyrl1) } - yyc4691 = true - yyrr4691 = len(yyv4691) - if yyrg4691 { - copy(yyv4691, yyv24691) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4691 != len(yyv4691) { - yyv4691 = yyv4691[:yyl4691] - yyc4691 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4691 := 0 - for ; yyj4691 < yyrr4691; yyj4691++ { - yyh4691.ElemContainerState(yyj4691) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4691[yyj4691] = Volume{} + yyv1[yyj1] = Volume{} } else { - yyv4692 := &yyv4691[yyj4691] - yyv4692.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4691 { - for ; yyj4691 < yyl4691; yyj4691++ { - yyv4691 = append(yyv4691, Volume{}) - yyh4691.ElemContainerState(yyj4691) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Volume{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4691[yyj4691] = Volume{} + yyv1[yyj1] = Volume{} } else { - yyv4693 := &yyv4691[yyj4691] - yyv4693.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4691 := 0 - for ; !r.CheckBreak(); yyj4691++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4691 >= len(yyv4691) { - yyv4691 = append(yyv4691, Volume{}) // var yyz4691 Volume - yyc4691 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Volume{}) // var yyz1 Volume + yyc1 = true } - yyh4691.ElemContainerState(yyj4691) - if yyj4691 < len(yyv4691) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4691[yyj4691] = Volume{} + yyv1[yyj1] = Volume{} } else { - yyv4694 := &yyv4691[yyj4691] - yyv4694.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -59136,17 +68875,17 @@ func (x codecSelfer1234) decSliceVolume(v *[]Volume, d *codec1978.Decoder) { } } - if yyj4691 < len(yyv4691) { - yyv4691 = yyv4691[:yyj4691] - yyc4691 = true - } else if yyj4691 == 0 && yyv4691 == nil { - yyv4691 = []Volume{} - yyc4691 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Volume{} + yyc1 = true } } - yyh4691.End() - if yyc4691 { - *v = yyv4691 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -59155,10 +68894,10 @@ func (x codecSelfer1234) encSliceContainer(v []Container, e *codec1978.Encoder) z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4695 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4696 := &yyv4695 - yy4696.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -59168,83 +68907,86 @@ func (x codecSelfer1234) decSliceContainer(v *[]Container, d *codec1978.Decoder) z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4697 := *v - yyh4697, yyl4697 := z.DecSliceHelperStart() - var yyc4697 bool - if yyl4697 == 0 { - if yyv4697 == nil { - yyv4697 = []Container{} - yyc4697 = true - } else if len(yyv4697) != 0 { - yyv4697 = yyv4697[:0] - yyc4697 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Container{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4697 > 0 { - var yyrr4697, yyrl4697 int - var yyrt4697 bool - if yyl4697 > cap(yyv4697) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4697 := len(yyv4697) > 0 - yyv24697 := yyv4697 - yyrl4697, yyrt4697 = z.DecInferLen(yyl4697, z.DecBasicHandle().MaxInitLen, 256) - if yyrt4697 { - if yyrl4697 <= cap(yyv4697) { - yyv4697 = yyv4697[:yyrl4697] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4697 = make([]Container, yyrl4697) + yyv1 = make([]Container, yyrl1) } } else { - yyv4697 = make([]Container, yyrl4697) + yyv1 = make([]Container, yyrl1) } - yyc4697 = true - yyrr4697 = len(yyv4697) - if yyrg4697 { - copy(yyv4697, yyv24697) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4697 != len(yyv4697) { - yyv4697 = yyv4697[:yyl4697] - yyc4697 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4697 := 0 - for ; yyj4697 < yyrr4697; yyj4697++ { - yyh4697.ElemContainerState(yyj4697) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4697[yyj4697] = Container{} + yyv1[yyj1] = Container{} } else { - yyv4698 := &yyv4697[yyj4697] - yyv4698.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4697 { - for ; yyj4697 < yyl4697; yyj4697++ { - yyv4697 = append(yyv4697, Container{}) - yyh4697.ElemContainerState(yyj4697) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Container{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4697[yyj4697] = Container{} + yyv1[yyj1] = Container{} } else { - yyv4699 := &yyv4697[yyj4697] - yyv4699.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4697 := 0 - for ; !r.CheckBreak(); yyj4697++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4697 >= len(yyv4697) { - yyv4697 = append(yyv4697, Container{}) // var yyz4697 Container - yyc4697 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Container{}) // var yyz1 Container + yyc1 = true } - yyh4697.ElemContainerState(yyj4697) - if yyj4697 < len(yyv4697) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4697[yyj4697] = Container{} + yyv1[yyj1] = Container{} } else { - yyv4700 := &yyv4697[yyj4697] - yyv4700.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -59252,17 +68994,17 @@ func (x codecSelfer1234) decSliceContainer(v *[]Container, d *codec1978.Decoder) } } - if yyj4697 < len(yyv4697) { - yyv4697 = yyv4697[:yyj4697] - yyc4697 = true - } else if yyj4697 == 0 && yyv4697 == nil { - yyv4697 = []Container{} - yyc4697 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Container{} + yyc1 = true } } - yyh4697.End() - if yyc4697 { - *v = yyv4697 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -59271,10 +69013,10 @@ func (x codecSelfer1234) encSliceLocalObjectReference(v []LocalObjectReference, z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4701 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4702 := &yyv4701 - yy4702.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -59284,83 +69026,205 @@ func (x codecSelfer1234) decSliceLocalObjectReference(v *[]LocalObjectReference, z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4703 := *v - yyh4703, yyl4703 := z.DecSliceHelperStart() - var yyc4703 bool - if yyl4703 == 0 { - if yyv4703 == nil { - yyv4703 = []LocalObjectReference{} - yyc4703 = true - } else if len(yyv4703) != 0 { - yyv4703 = yyv4703[:0] - yyc4703 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []LocalObjectReference{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]LocalObjectReference, yyrl1) + } + } else { + yyv1 = make([]LocalObjectReference, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LocalObjectReference{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, LocalObjectReference{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LocalObjectReference{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, LocalObjectReference{}) // var yyz1 LocalObjectReference + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = LocalObjectReference{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []LocalObjectReference{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceToleration(v []Toleration, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceToleration(v *[]Toleration, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Toleration{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4703 > 0 { - var yyrr4703, yyrl4703 int - var yyrt4703 bool - if yyl4703 > cap(yyv4703) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4703 := len(yyv4703) > 0 - yyv24703 := yyv4703 - yyrl4703, yyrt4703 = z.DecInferLen(yyl4703, z.DecBasicHandle().MaxInitLen, 16) - if yyrt4703 { - if yyrl4703 <= cap(yyv4703) { - yyv4703 = yyv4703[:yyrl4703] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4703 = make([]LocalObjectReference, yyrl4703) + yyv1 = make([]Toleration, yyrl1) } } else { - yyv4703 = make([]LocalObjectReference, yyrl4703) + yyv1 = make([]Toleration, yyrl1) } - yyc4703 = true - yyrr4703 = len(yyv4703) - if yyrg4703 { - copy(yyv4703, yyv24703) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4703 != len(yyv4703) { - yyv4703 = yyv4703[:yyl4703] - yyc4703 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4703 := 0 - for ; yyj4703 < yyrr4703; yyj4703++ { - yyh4703.ElemContainerState(yyj4703) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4703[yyj4703] = LocalObjectReference{} + yyv1[yyj1] = Toleration{} } else { - yyv4704 := &yyv4703[yyj4703] - yyv4704.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4703 { - for ; yyj4703 < yyl4703; yyj4703++ { - yyv4703 = append(yyv4703, LocalObjectReference{}) - yyh4703.ElemContainerState(yyj4703) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Toleration{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4703[yyj4703] = LocalObjectReference{} + yyv1[yyj1] = Toleration{} } else { - yyv4705 := &yyv4703[yyj4703] - yyv4705.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4703 := 0 - for ; !r.CheckBreak(); yyj4703++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4703 >= len(yyv4703) { - yyv4703 = append(yyv4703, LocalObjectReference{}) // var yyz4703 LocalObjectReference - yyc4703 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Toleration{}) // var yyz1 Toleration + yyc1 = true } - yyh4703.ElemContainerState(yyj4703) - if yyj4703 < len(yyv4703) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4703[yyj4703] = LocalObjectReference{} + yyv1[yyj1] = Toleration{} } else { - yyv4706 := &yyv4703[yyj4703] - yyv4706.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -59368,17 +69232,17 @@ func (x codecSelfer1234) decSliceLocalObjectReference(v *[]LocalObjectReference, } } - if yyj4703 < len(yyv4703) { - yyv4703 = yyv4703[:yyj4703] - yyc4703 = true - } else if yyj4703 == 0 && yyv4703 == nil { - yyv4703 = []LocalObjectReference{} - yyc4703 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Toleration{} + yyc1 = true } } - yyh4703.End() - if yyc4703 { - *v = yyv4703 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -59387,10 +69251,10 @@ func (x codecSelfer1234) encSlicePodCondition(v []PodCondition, e *codec1978.Enc z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4707 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4708 := &yyv4707 - yy4708.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -59400,83 +69264,86 @@ func (x codecSelfer1234) decSlicePodCondition(v *[]PodCondition, d *codec1978.De z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4709 := *v - yyh4709, yyl4709 := z.DecSliceHelperStart() - var yyc4709 bool - if yyl4709 == 0 { - if yyv4709 == nil { - yyv4709 = []PodCondition{} - yyc4709 = true - } else if len(yyv4709) != 0 { - yyv4709 = yyv4709[:0] - yyc4709 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4709 > 0 { - var yyrr4709, yyrl4709 int - var yyrt4709 bool - if yyl4709 > cap(yyv4709) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4709 := len(yyv4709) > 0 - yyv24709 := yyv4709 - yyrl4709, yyrt4709 = z.DecInferLen(yyl4709, z.DecBasicHandle().MaxInitLen, 112) - if yyrt4709 { - if yyrl4709 <= cap(yyv4709) { - yyv4709 = yyv4709[:yyrl4709] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4709 = make([]PodCondition, yyrl4709) + yyv1 = make([]PodCondition, yyrl1) } } else { - yyv4709 = make([]PodCondition, yyrl4709) + yyv1 = make([]PodCondition, yyrl1) } - yyc4709 = true - yyrr4709 = len(yyv4709) - if yyrg4709 { - copy(yyv4709, yyv24709) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4709 != len(yyv4709) { - yyv4709 = yyv4709[:yyl4709] - yyc4709 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4709 := 0 - for ; yyj4709 < yyrr4709; yyj4709++ { - yyh4709.ElemContainerState(yyj4709) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4709[yyj4709] = PodCondition{} + yyv1[yyj1] = PodCondition{} } else { - yyv4710 := &yyv4709[yyj4709] - yyv4710.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4709 { - for ; yyj4709 < yyl4709; yyj4709++ { - yyv4709 = append(yyv4709, PodCondition{}) - yyh4709.ElemContainerState(yyj4709) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodCondition{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4709[yyj4709] = PodCondition{} + yyv1[yyj1] = PodCondition{} } else { - yyv4711 := &yyv4709[yyj4709] - yyv4711.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4709 := 0 - for ; !r.CheckBreak(); yyj4709++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4709 >= len(yyv4709) { - yyv4709 = append(yyv4709, PodCondition{}) // var yyz4709 PodCondition - yyc4709 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodCondition{}) // var yyz1 PodCondition + yyc1 = true } - yyh4709.ElemContainerState(yyj4709) - if yyj4709 < len(yyv4709) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4709[yyj4709] = PodCondition{} + yyv1[yyj1] = PodCondition{} } else { - yyv4712 := &yyv4709[yyj4709] - yyv4712.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -59484,17 +69351,17 @@ func (x codecSelfer1234) decSlicePodCondition(v *[]PodCondition, d *codec1978.De } } - if yyj4709 < len(yyv4709) { - yyv4709 = yyv4709[:yyj4709] - yyc4709 = true - } else if yyj4709 == 0 && yyv4709 == nil { - yyv4709 = []PodCondition{} - yyc4709 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodCondition{} + yyc1 = true } } - yyh4709.End() - if yyc4709 { - *v = yyv4709 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -59503,10 +69370,10 @@ func (x codecSelfer1234) encSliceContainerStatus(v []ContainerStatus, e *codec19 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4713 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4714 := &yyv4713 - yy4714.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -59516,83 +69383,86 @@ func (x codecSelfer1234) decSliceContainerStatus(v *[]ContainerStatus, d *codec1 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4715 := *v - yyh4715, yyl4715 := z.DecSliceHelperStart() - var yyc4715 bool - if yyl4715 == 0 { - if yyv4715 == nil { - yyv4715 = []ContainerStatus{} - yyc4715 = true - } else if len(yyv4715) != 0 { - yyv4715 = yyv4715[:0] - yyc4715 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ContainerStatus{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4715 > 0 { - var yyrr4715, yyrl4715 int - var yyrt4715 bool - if yyl4715 > cap(yyv4715) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4715 := len(yyv4715) > 0 - yyv24715 := yyv4715 - yyrl4715, yyrt4715 = z.DecInferLen(yyl4715, z.DecBasicHandle().MaxInitLen, 120) - if yyrt4715 { - if yyrl4715 <= cap(yyv4715) { - yyv4715 = yyv4715[:yyrl4715] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 120) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4715 = make([]ContainerStatus, yyrl4715) + yyv1 = make([]ContainerStatus, yyrl1) } } else { - yyv4715 = make([]ContainerStatus, yyrl4715) + yyv1 = make([]ContainerStatus, yyrl1) } - yyc4715 = true - yyrr4715 = len(yyv4715) - if yyrg4715 { - copy(yyv4715, yyv24715) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4715 != len(yyv4715) { - yyv4715 = yyv4715[:yyl4715] - yyc4715 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4715 := 0 - for ; yyj4715 < yyrr4715; yyj4715++ { - yyh4715.ElemContainerState(yyj4715) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4715[yyj4715] = ContainerStatus{} + yyv1[yyj1] = ContainerStatus{} } else { - yyv4716 := &yyv4715[yyj4715] - yyv4716.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4715 { - for ; yyj4715 < yyl4715; yyj4715++ { - yyv4715 = append(yyv4715, ContainerStatus{}) - yyh4715.ElemContainerState(yyj4715) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ContainerStatus{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4715[yyj4715] = ContainerStatus{} + yyv1[yyj1] = ContainerStatus{} } else { - yyv4717 := &yyv4715[yyj4715] - yyv4717.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4715 := 0 - for ; !r.CheckBreak(); yyj4715++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4715 >= len(yyv4715) { - yyv4715 = append(yyv4715, ContainerStatus{}) // var yyz4715 ContainerStatus - yyc4715 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ContainerStatus{}) // var yyz1 ContainerStatus + yyc1 = true } - yyh4715.ElemContainerState(yyj4715) - if yyj4715 < len(yyv4715) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4715[yyj4715] = ContainerStatus{} + yyv1[yyj1] = ContainerStatus{} } else { - yyv4718 := &yyv4715[yyj4715] - yyv4718.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -59600,17 +69470,17 @@ func (x codecSelfer1234) decSliceContainerStatus(v *[]ContainerStatus, d *codec1 } } - if yyj4715 < len(yyv4715) { - yyv4715 = yyv4715[:yyj4715] - yyc4715 = true - } else if yyj4715 == 0 && yyv4715 == nil { - yyv4715 = []ContainerStatus{} - yyc4715 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ContainerStatus{} + yyc1 = true } } - yyh4715.End() - if yyc4715 { - *v = yyv4715 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -59619,10 +69489,10 @@ func (x codecSelfer1234) encSlicePod(v []Pod, e *codec1978.Encoder) { z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4719 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4720 := &yyv4719 - yy4720.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -59632,83 +69502,86 @@ func (x codecSelfer1234) decSlicePod(v *[]Pod, d *codec1978.Decoder) { z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4721 := *v - yyh4721, yyl4721 := z.DecSliceHelperStart() - var yyc4721 bool - if yyl4721 == 0 { - if yyv4721 == nil { - yyv4721 = []Pod{} - yyc4721 = true - } else if len(yyv4721) != 0 { - yyv4721 = yyv4721[:0] - yyc4721 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Pod{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4721 > 0 { - var yyrr4721, yyrl4721 int - var yyrt4721 bool - if yyl4721 > cap(yyv4721) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4721 := len(yyv4721) > 0 - yyv24721 := yyv4721 - yyrl4721, yyrt4721 = z.DecInferLen(yyl4721, z.DecBasicHandle().MaxInitLen, 664) - if yyrt4721 { - if yyrl4721 <= cap(yyv4721) { - yyv4721 = yyv4721[:yyrl4721] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 736) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4721 = make([]Pod, yyrl4721) + yyv1 = make([]Pod, yyrl1) } } else { - yyv4721 = make([]Pod, yyrl4721) + yyv1 = make([]Pod, yyrl1) } - yyc4721 = true - yyrr4721 = len(yyv4721) - if yyrg4721 { - copy(yyv4721, yyv24721) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4721 != len(yyv4721) { - yyv4721 = yyv4721[:yyl4721] - yyc4721 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4721 := 0 - for ; yyj4721 < yyrr4721; yyj4721++ { - yyh4721.ElemContainerState(yyj4721) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4721[yyj4721] = Pod{} + yyv1[yyj1] = Pod{} } else { - yyv4722 := &yyv4721[yyj4721] - yyv4722.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4721 { - for ; yyj4721 < yyl4721; yyj4721++ { - yyv4721 = append(yyv4721, Pod{}) - yyh4721.ElemContainerState(yyj4721) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Pod{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4721[yyj4721] = Pod{} + yyv1[yyj1] = Pod{} } else { - yyv4723 := &yyv4721[yyj4721] - yyv4723.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4721 := 0 - for ; !r.CheckBreak(); yyj4721++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4721 >= len(yyv4721) { - yyv4721 = append(yyv4721, Pod{}) // var yyz4721 Pod - yyc4721 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Pod{}) // var yyz1 Pod + yyc1 = true } - yyh4721.ElemContainerState(yyj4721) - if yyj4721 < len(yyv4721) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4721[yyj4721] = Pod{} + yyv1[yyj1] = Pod{} } else { - yyv4724 := &yyv4721[yyj4721] - yyv4724.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -59716,17 +69589,17 @@ func (x codecSelfer1234) decSlicePod(v *[]Pod, d *codec1978.Decoder) { } } - if yyj4721 < len(yyv4721) { - yyv4721 = yyv4721[:yyj4721] - yyc4721 = true - } else if yyj4721 == 0 && yyv4721 == nil { - yyv4721 = []Pod{} - yyc4721 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Pod{} + yyc1 = true } } - yyh4721.End() - if yyc4721 { - *v = yyv4721 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -59735,10 +69608,10 @@ func (x codecSelfer1234) encSlicePodTemplate(v []PodTemplate, e *codec1978.Encod z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4725 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4726 := &yyv4725 - yy4726.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -59748,83 +69621,86 @@ func (x codecSelfer1234) decSlicePodTemplate(v *[]PodTemplate, d *codec1978.Deco z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4727 := *v - yyh4727, yyl4727 := z.DecSliceHelperStart() - var yyc4727 bool - if yyl4727 == 0 { - if yyv4727 == nil { - yyv4727 = []PodTemplate{} - yyc4727 = true - } else if len(yyv4727) != 0 { - yyv4727 = yyv4727[:0] - yyc4727 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodTemplate{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4727 > 0 { - var yyrr4727, yyrl4727 int - var yyrt4727 bool - if yyl4727 > cap(yyv4727) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4727 := len(yyv4727) > 0 - yyv24727 := yyv4727 - yyrl4727, yyrt4727 = z.DecInferLen(yyl4727, z.DecBasicHandle().MaxInitLen, 728) - if yyrt4727 { - if yyrl4727 <= cap(yyv4727) { - yyv4727 = yyv4727[:yyrl4727] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 784) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4727 = make([]PodTemplate, yyrl4727) + yyv1 = make([]PodTemplate, yyrl1) } } else { - yyv4727 = make([]PodTemplate, yyrl4727) + yyv1 = make([]PodTemplate, yyrl1) } - yyc4727 = true - yyrr4727 = len(yyv4727) - if yyrg4727 { - copy(yyv4727, yyv24727) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4727 != len(yyv4727) { - yyv4727 = yyv4727[:yyl4727] - yyc4727 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4727 := 0 - for ; yyj4727 < yyrr4727; yyj4727++ { - yyh4727.ElemContainerState(yyj4727) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4727[yyj4727] = PodTemplate{} + yyv1[yyj1] = PodTemplate{} } else { - yyv4728 := &yyv4727[yyj4727] - yyv4728.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4727 { - for ; yyj4727 < yyl4727; yyj4727++ { - yyv4727 = append(yyv4727, PodTemplate{}) - yyh4727.ElemContainerState(yyj4727) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodTemplate{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4727[yyj4727] = PodTemplate{} + yyv1[yyj1] = PodTemplate{} } else { - yyv4729 := &yyv4727[yyj4727] - yyv4729.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4727 := 0 - for ; !r.CheckBreak(); yyj4727++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4727 >= len(yyv4727) { - yyv4727 = append(yyv4727, PodTemplate{}) // var yyz4727 PodTemplate - yyc4727 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodTemplate{}) // var yyz1 PodTemplate + yyc1 = true } - yyh4727.ElemContainerState(yyj4727) - if yyj4727 < len(yyv4727) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4727[yyj4727] = PodTemplate{} + yyv1[yyj1] = PodTemplate{} } else { - yyv4730 := &yyv4727[yyj4727] - yyv4730.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -59832,17 +69708,17 @@ func (x codecSelfer1234) decSlicePodTemplate(v *[]PodTemplate, d *codec1978.Deco } } - if yyj4727 < len(yyv4727) { - yyv4727 = yyv4727[:yyj4727] - yyc4727 = true - } else if yyj4727 == 0 && yyv4727 == nil { - yyv4727 = []PodTemplate{} - yyc4727 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodTemplate{} + yyc1 = true } } - yyh4727.End() - if yyc4727 { - *v = yyv4727 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -59851,10 +69727,10 @@ func (x codecSelfer1234) encSliceReplicationControllerCondition(v []ReplicationC z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4731 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4732 := &yyv4731 - yy4732.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -59864,83 +69740,86 @@ func (x codecSelfer1234) decSliceReplicationControllerCondition(v *[]Replication z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4733 := *v - yyh4733, yyl4733 := z.DecSliceHelperStart() - var yyc4733 bool - if yyl4733 == 0 { - if yyv4733 == nil { - yyv4733 = []ReplicationControllerCondition{} - yyc4733 = true - } else if len(yyv4733) != 0 { - yyv4733 = yyv4733[:0] - yyc4733 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ReplicationControllerCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4733 > 0 { - var yyrr4733, yyrl4733 int - var yyrt4733 bool - if yyl4733 > cap(yyv4733) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4733 := len(yyv4733) > 0 - yyv24733 := yyv4733 - yyrl4733, yyrt4733 = z.DecInferLen(yyl4733, z.DecBasicHandle().MaxInitLen, 88) - if yyrt4733 { - if yyrl4733 <= cap(yyv4733) { - yyv4733 = yyv4733[:yyrl4733] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 88) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4733 = make([]ReplicationControllerCondition, yyrl4733) + yyv1 = make([]ReplicationControllerCondition, yyrl1) } } else { - yyv4733 = make([]ReplicationControllerCondition, yyrl4733) + yyv1 = make([]ReplicationControllerCondition, yyrl1) } - yyc4733 = true - yyrr4733 = len(yyv4733) - if yyrg4733 { - copy(yyv4733, yyv24733) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4733 != len(yyv4733) { - yyv4733 = yyv4733[:yyl4733] - yyc4733 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4733 := 0 - for ; yyj4733 < yyrr4733; yyj4733++ { - yyh4733.ElemContainerState(yyj4733) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4733[yyj4733] = ReplicationControllerCondition{} + yyv1[yyj1] = ReplicationControllerCondition{} } else { - yyv4734 := &yyv4733[yyj4733] - yyv4734.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4733 { - for ; yyj4733 < yyl4733; yyj4733++ { - yyv4733 = append(yyv4733, ReplicationControllerCondition{}) - yyh4733.ElemContainerState(yyj4733) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ReplicationControllerCondition{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4733[yyj4733] = ReplicationControllerCondition{} + yyv1[yyj1] = ReplicationControllerCondition{} } else { - yyv4735 := &yyv4733[yyj4733] - yyv4735.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4733 := 0 - for ; !r.CheckBreak(); yyj4733++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4733 >= len(yyv4733) { - yyv4733 = append(yyv4733, ReplicationControllerCondition{}) // var yyz4733 ReplicationControllerCondition - yyc4733 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ReplicationControllerCondition{}) // var yyz1 ReplicationControllerCondition + yyc1 = true } - yyh4733.ElemContainerState(yyj4733) - if yyj4733 < len(yyv4733) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4733[yyj4733] = ReplicationControllerCondition{} + yyv1[yyj1] = ReplicationControllerCondition{} } else { - yyv4736 := &yyv4733[yyj4733] - yyv4736.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -59948,17 +69827,17 @@ func (x codecSelfer1234) decSliceReplicationControllerCondition(v *[]Replication } } - if yyj4733 < len(yyv4733) { - yyv4733 = yyv4733[:yyj4733] - yyc4733 = true - } else if yyj4733 == 0 && yyv4733 == nil { - yyv4733 = []ReplicationControllerCondition{} - yyc4733 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ReplicationControllerCondition{} + yyc1 = true } } - yyh4733.End() - if yyc4733 { - *v = yyv4733 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -59967,10 +69846,10 @@ func (x codecSelfer1234) encSliceReplicationController(v []ReplicationController z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4737 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4738 := &yyv4737 - yy4738.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -59980,83 +69859,86 @@ func (x codecSelfer1234) decSliceReplicationController(v *[]ReplicationControlle z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4739 := *v - yyh4739, yyl4739 := z.DecSliceHelperStart() - var yyc4739 bool - if yyl4739 == 0 { - if yyv4739 == nil { - yyv4739 = []ReplicationController{} - yyc4739 = true - } else if len(yyv4739) != 0 { - yyv4739 = yyv4739[:0] - yyc4739 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ReplicationController{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4739 > 0 { - var yyrr4739, yyrl4739 int - var yyrt4739 bool - if yyl4739 > cap(yyv4739) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4739 := len(yyv4739) > 0 - yyv24739 := yyv4739 - yyrl4739, yyrt4739 = z.DecInferLen(yyl4739, z.DecBasicHandle().MaxInitLen, 336) - if yyrt4739 { - if yyrl4739 <= cap(yyv4739) { - yyv4739 = yyv4739[:yyrl4739] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 336) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4739 = make([]ReplicationController, yyrl4739) + yyv1 = make([]ReplicationController, yyrl1) } } else { - yyv4739 = make([]ReplicationController, yyrl4739) + yyv1 = make([]ReplicationController, yyrl1) } - yyc4739 = true - yyrr4739 = len(yyv4739) - if yyrg4739 { - copy(yyv4739, yyv24739) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4739 != len(yyv4739) { - yyv4739 = yyv4739[:yyl4739] - yyc4739 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4739 := 0 - for ; yyj4739 < yyrr4739; yyj4739++ { - yyh4739.ElemContainerState(yyj4739) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4739[yyj4739] = ReplicationController{} + yyv1[yyj1] = ReplicationController{} } else { - yyv4740 := &yyv4739[yyj4739] - yyv4740.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4739 { - for ; yyj4739 < yyl4739; yyj4739++ { - yyv4739 = append(yyv4739, ReplicationController{}) - yyh4739.ElemContainerState(yyj4739) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ReplicationController{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4739[yyj4739] = ReplicationController{} + yyv1[yyj1] = ReplicationController{} } else { - yyv4741 := &yyv4739[yyj4739] - yyv4741.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4739 := 0 - for ; !r.CheckBreak(); yyj4739++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4739 >= len(yyv4739) { - yyv4739 = append(yyv4739, ReplicationController{}) // var yyz4739 ReplicationController - yyc4739 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ReplicationController{}) // var yyz1 ReplicationController + yyc1 = true } - yyh4739.ElemContainerState(yyj4739) - if yyj4739 < len(yyv4739) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4739[yyj4739] = ReplicationController{} + yyv1[yyj1] = ReplicationController{} } else { - yyv4742 := &yyv4739[yyj4739] - yyv4742.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -60064,17 +69946,17 @@ func (x codecSelfer1234) decSliceReplicationController(v *[]ReplicationControlle } } - if yyj4739 < len(yyv4739) { - yyv4739 = yyv4739[:yyj4739] - yyc4739 = true - } else if yyj4739 == 0 && yyv4739 == nil { - yyv4739 = []ReplicationController{} - yyc4739 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ReplicationController{} + yyc1 = true } } - yyh4739.End() - if yyc4739 { - *v = yyv4739 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -60083,10 +69965,10 @@ func (x codecSelfer1234) encSliceLoadBalancerIngress(v []LoadBalancerIngress, e z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4743 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4744 := &yyv4743 - yy4744.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -60096,83 +69978,86 @@ func (x codecSelfer1234) decSliceLoadBalancerIngress(v *[]LoadBalancerIngress, d z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4745 := *v - yyh4745, yyl4745 := z.DecSliceHelperStart() - var yyc4745 bool - if yyl4745 == 0 { - if yyv4745 == nil { - yyv4745 = []LoadBalancerIngress{} - yyc4745 = true - } else if len(yyv4745) != 0 { - yyv4745 = yyv4745[:0] - yyc4745 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []LoadBalancerIngress{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4745 > 0 { - var yyrr4745, yyrl4745 int - var yyrt4745 bool - if yyl4745 > cap(yyv4745) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4745 := len(yyv4745) > 0 - yyv24745 := yyv4745 - yyrl4745, yyrt4745 = z.DecInferLen(yyl4745, z.DecBasicHandle().MaxInitLen, 32) - if yyrt4745 { - if yyrl4745 <= cap(yyv4745) { - yyv4745 = yyv4745[:yyrl4745] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4745 = make([]LoadBalancerIngress, yyrl4745) + yyv1 = make([]LoadBalancerIngress, yyrl1) } } else { - yyv4745 = make([]LoadBalancerIngress, yyrl4745) + yyv1 = make([]LoadBalancerIngress, yyrl1) } - yyc4745 = true - yyrr4745 = len(yyv4745) - if yyrg4745 { - copy(yyv4745, yyv24745) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4745 != len(yyv4745) { - yyv4745 = yyv4745[:yyl4745] - yyc4745 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4745 := 0 - for ; yyj4745 < yyrr4745; yyj4745++ { - yyh4745.ElemContainerState(yyj4745) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4745[yyj4745] = LoadBalancerIngress{} + yyv1[yyj1] = LoadBalancerIngress{} } else { - yyv4746 := &yyv4745[yyj4745] - yyv4746.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4745 { - for ; yyj4745 < yyl4745; yyj4745++ { - yyv4745 = append(yyv4745, LoadBalancerIngress{}) - yyh4745.ElemContainerState(yyj4745) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, LoadBalancerIngress{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4745[yyj4745] = LoadBalancerIngress{} + yyv1[yyj1] = LoadBalancerIngress{} } else { - yyv4747 := &yyv4745[yyj4745] - yyv4747.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4745 := 0 - for ; !r.CheckBreak(); yyj4745++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4745 >= len(yyv4745) { - yyv4745 = append(yyv4745, LoadBalancerIngress{}) // var yyz4745 LoadBalancerIngress - yyc4745 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, LoadBalancerIngress{}) // var yyz1 LoadBalancerIngress + yyc1 = true } - yyh4745.ElemContainerState(yyj4745) - if yyj4745 < len(yyv4745) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4745[yyj4745] = LoadBalancerIngress{} + yyv1[yyj1] = LoadBalancerIngress{} } else { - yyv4748 := &yyv4745[yyj4745] - yyv4748.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -60180,17 +70065,17 @@ func (x codecSelfer1234) decSliceLoadBalancerIngress(v *[]LoadBalancerIngress, d } } - if yyj4745 < len(yyv4745) { - yyv4745 = yyv4745[:yyj4745] - yyc4745 = true - } else if yyj4745 == 0 && yyv4745 == nil { - yyv4745 = []LoadBalancerIngress{} - yyc4745 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []LoadBalancerIngress{} + yyc1 = true } } - yyh4745.End() - if yyc4745 { - *v = yyv4745 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -60199,10 +70084,10 @@ func (x codecSelfer1234) encSliceServicePort(v []ServicePort, e *codec1978.Encod z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4749 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4750 := &yyv4749 - yy4750.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -60212,83 +70097,86 @@ func (x codecSelfer1234) decSliceServicePort(v *[]ServicePort, d *codec1978.Deco z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4751 := *v - yyh4751, yyl4751 := z.DecSliceHelperStart() - var yyc4751 bool - if yyl4751 == 0 { - if yyv4751 == nil { - yyv4751 = []ServicePort{} - yyc4751 = true - } else if len(yyv4751) != 0 { - yyv4751 = yyv4751[:0] - yyc4751 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ServicePort{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4751 > 0 { - var yyrr4751, yyrl4751 int - var yyrt4751 bool - if yyl4751 > cap(yyv4751) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4751 := len(yyv4751) > 0 - yyv24751 := yyv4751 - yyrl4751, yyrt4751 = z.DecInferLen(yyl4751, z.DecBasicHandle().MaxInitLen, 80) - if yyrt4751 { - if yyrl4751 <= cap(yyv4751) { - yyv4751 = yyv4751[:yyrl4751] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 80) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4751 = make([]ServicePort, yyrl4751) + yyv1 = make([]ServicePort, yyrl1) } } else { - yyv4751 = make([]ServicePort, yyrl4751) + yyv1 = make([]ServicePort, yyrl1) } - yyc4751 = true - yyrr4751 = len(yyv4751) - if yyrg4751 { - copy(yyv4751, yyv24751) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4751 != len(yyv4751) { - yyv4751 = yyv4751[:yyl4751] - yyc4751 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4751 := 0 - for ; yyj4751 < yyrr4751; yyj4751++ { - yyh4751.ElemContainerState(yyj4751) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4751[yyj4751] = ServicePort{} + yyv1[yyj1] = ServicePort{} } else { - yyv4752 := &yyv4751[yyj4751] - yyv4752.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4751 { - for ; yyj4751 < yyl4751; yyj4751++ { - yyv4751 = append(yyv4751, ServicePort{}) - yyh4751.ElemContainerState(yyj4751) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ServicePort{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4751[yyj4751] = ServicePort{} + yyv1[yyj1] = ServicePort{} } else { - yyv4753 := &yyv4751[yyj4751] - yyv4753.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4751 := 0 - for ; !r.CheckBreak(); yyj4751++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4751 >= len(yyv4751) { - yyv4751 = append(yyv4751, ServicePort{}) // var yyz4751 ServicePort - yyc4751 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ServicePort{}) // var yyz1 ServicePort + yyc1 = true } - yyh4751.ElemContainerState(yyj4751) - if yyj4751 < len(yyv4751) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4751[yyj4751] = ServicePort{} + yyv1[yyj1] = ServicePort{} } else { - yyv4754 := &yyv4751[yyj4751] - yyv4754.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -60296,17 +70184,17 @@ func (x codecSelfer1234) decSliceServicePort(v *[]ServicePort, d *codec1978.Deco } } - if yyj4751 < len(yyv4751) { - yyv4751 = yyv4751[:yyj4751] - yyc4751 = true - } else if yyj4751 == 0 && yyv4751 == nil { - yyv4751 = []ServicePort{} - yyc4751 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ServicePort{} + yyc1 = true } } - yyh4751.End() - if yyc4751 { - *v = yyv4751 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -60315,10 +70203,10 @@ func (x codecSelfer1234) encSliceService(v []Service, e *codec1978.Encoder) { z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4755 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4756 := &yyv4755 - yy4756.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -60328,83 +70216,86 @@ func (x codecSelfer1234) decSliceService(v *[]Service, d *codec1978.Decoder) { z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4757 := *v - yyh4757, yyl4757 := z.DecSliceHelperStart() - var yyc4757 bool - if yyl4757 == 0 { - if yyv4757 == nil { - yyv4757 = []Service{} - yyc4757 = true - } else if len(yyv4757) != 0 { - yyv4757 = yyv4757[:0] - yyc4757 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Service{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4757 > 0 { - var yyrr4757, yyrl4757 int - var yyrt4757 bool - if yyl4757 > cap(yyv4757) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4757 := len(yyv4757) > 0 - yyv24757 := yyv4757 - yyrl4757, yyrt4757 = z.DecInferLen(yyl4757, z.DecBasicHandle().MaxInitLen, 464) - if yyrt4757 { - if yyrl4757 <= cap(yyv4757) { - yyv4757 = yyv4757[:yyrl4757] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 464) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4757 = make([]Service, yyrl4757) + yyv1 = make([]Service, yyrl1) } } else { - yyv4757 = make([]Service, yyrl4757) + yyv1 = make([]Service, yyrl1) } - yyc4757 = true - yyrr4757 = len(yyv4757) - if yyrg4757 { - copy(yyv4757, yyv24757) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4757 != len(yyv4757) { - yyv4757 = yyv4757[:yyl4757] - yyc4757 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4757 := 0 - for ; yyj4757 < yyrr4757; yyj4757++ { - yyh4757.ElemContainerState(yyj4757) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4757[yyj4757] = Service{} + yyv1[yyj1] = Service{} } else { - yyv4758 := &yyv4757[yyj4757] - yyv4758.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4757 { - for ; yyj4757 < yyl4757; yyj4757++ { - yyv4757 = append(yyv4757, Service{}) - yyh4757.ElemContainerState(yyj4757) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Service{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4757[yyj4757] = Service{} + yyv1[yyj1] = Service{} } else { - yyv4759 := &yyv4757[yyj4757] - yyv4759.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4757 := 0 - for ; !r.CheckBreak(); yyj4757++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4757 >= len(yyv4757) { - yyv4757 = append(yyv4757, Service{}) // var yyz4757 Service - yyc4757 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Service{}) // var yyz1 Service + yyc1 = true } - yyh4757.ElemContainerState(yyj4757) - if yyj4757 < len(yyv4757) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4757[yyj4757] = Service{} + yyv1[yyj1] = Service{} } else { - yyv4760 := &yyv4757[yyj4757] - yyv4760.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -60412,17 +70303,17 @@ func (x codecSelfer1234) decSliceService(v *[]Service, d *codec1978.Decoder) { } } - if yyj4757 < len(yyv4757) { - yyv4757 = yyv4757[:yyj4757] - yyc4757 = true - } else if yyj4757 == 0 && yyv4757 == nil { - yyv4757 = []Service{} - yyc4757 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Service{} + yyc1 = true } } - yyh4757.End() - if yyc4757 { - *v = yyv4757 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -60431,10 +70322,10 @@ func (x codecSelfer1234) encSliceObjectReference(v []ObjectReference, e *codec19 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4761 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4762 := &yyv4761 - yy4762.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -60444,83 +70335,86 @@ func (x codecSelfer1234) decSliceObjectReference(v *[]ObjectReference, d *codec1 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4763 := *v - yyh4763, yyl4763 := z.DecSliceHelperStart() - var yyc4763 bool - if yyl4763 == 0 { - if yyv4763 == nil { - yyv4763 = []ObjectReference{} - yyc4763 = true - } else if len(yyv4763) != 0 { - yyv4763 = yyv4763[:0] - yyc4763 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ObjectReference{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4763 > 0 { - var yyrr4763, yyrl4763 int - var yyrt4763 bool - if yyl4763 > cap(yyv4763) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4763 := len(yyv4763) > 0 - yyv24763 := yyv4763 - yyrl4763, yyrt4763 = z.DecInferLen(yyl4763, z.DecBasicHandle().MaxInitLen, 112) - if yyrt4763 { - if yyrl4763 <= cap(yyv4763) { - yyv4763 = yyv4763[:yyrl4763] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4763 = make([]ObjectReference, yyrl4763) + yyv1 = make([]ObjectReference, yyrl1) } } else { - yyv4763 = make([]ObjectReference, yyrl4763) + yyv1 = make([]ObjectReference, yyrl1) } - yyc4763 = true - yyrr4763 = len(yyv4763) - if yyrg4763 { - copy(yyv4763, yyv24763) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4763 != len(yyv4763) { - yyv4763 = yyv4763[:yyl4763] - yyc4763 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4763 := 0 - for ; yyj4763 < yyrr4763; yyj4763++ { - yyh4763.ElemContainerState(yyj4763) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4763[yyj4763] = ObjectReference{} + yyv1[yyj1] = ObjectReference{} } else { - yyv4764 := &yyv4763[yyj4763] - yyv4764.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4763 { - for ; yyj4763 < yyl4763; yyj4763++ { - yyv4763 = append(yyv4763, ObjectReference{}) - yyh4763.ElemContainerState(yyj4763) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ObjectReference{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4763[yyj4763] = ObjectReference{} + yyv1[yyj1] = ObjectReference{} } else { - yyv4765 := &yyv4763[yyj4763] - yyv4765.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4763 := 0 - for ; !r.CheckBreak(); yyj4763++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4763 >= len(yyv4763) { - yyv4763 = append(yyv4763, ObjectReference{}) // var yyz4763 ObjectReference - yyc4763 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ObjectReference{}) // var yyz1 ObjectReference + yyc1 = true } - yyh4763.ElemContainerState(yyj4763) - if yyj4763 < len(yyv4763) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4763[yyj4763] = ObjectReference{} + yyv1[yyj1] = ObjectReference{} } else { - yyv4766 := &yyv4763[yyj4763] - yyv4766.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -60528,17 +70422,17 @@ func (x codecSelfer1234) decSliceObjectReference(v *[]ObjectReference, d *codec1 } } - if yyj4763 < len(yyv4763) { - yyv4763 = yyv4763[:yyj4763] - yyc4763 = true - } else if yyj4763 == 0 && yyv4763 == nil { - yyv4763 = []ObjectReference{} - yyc4763 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ObjectReference{} + yyc1 = true } } - yyh4763.End() - if yyc4763 { - *v = yyv4763 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -60547,10 +70441,10 @@ func (x codecSelfer1234) encSliceServiceAccount(v []ServiceAccount, e *codec1978 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4767 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4768 := &yyv4767 - yy4768.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -60560,83 +70454,86 @@ func (x codecSelfer1234) decSliceServiceAccount(v *[]ServiceAccount, d *codec197 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4769 := *v - yyh4769, yyl4769 := z.DecSliceHelperStart() - var yyc4769 bool - if yyl4769 == 0 { - if yyv4769 == nil { - yyv4769 = []ServiceAccount{} - yyc4769 = true - } else if len(yyv4769) != 0 { - yyv4769 = yyv4769[:0] - yyc4769 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ServiceAccount{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4769 > 0 { - var yyrr4769, yyrl4769 int - var yyrt4769 bool - if yyl4769 > cap(yyv4769) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4769 := len(yyv4769) > 0 - yyv24769 := yyv4769 - yyrl4769, yyrt4769 = z.DecInferLen(yyl4769, z.DecBasicHandle().MaxInitLen, 304) - if yyrt4769 { - if yyrl4769 <= cap(yyv4769) { - yyv4769 = yyv4769[:yyrl4769] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 312) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4769 = make([]ServiceAccount, yyrl4769) + yyv1 = make([]ServiceAccount, yyrl1) } } else { - yyv4769 = make([]ServiceAccount, yyrl4769) + yyv1 = make([]ServiceAccount, yyrl1) } - yyc4769 = true - yyrr4769 = len(yyv4769) - if yyrg4769 { - copy(yyv4769, yyv24769) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4769 != len(yyv4769) { - yyv4769 = yyv4769[:yyl4769] - yyc4769 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4769 := 0 - for ; yyj4769 < yyrr4769; yyj4769++ { - yyh4769.ElemContainerState(yyj4769) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4769[yyj4769] = ServiceAccount{} + yyv1[yyj1] = ServiceAccount{} } else { - yyv4770 := &yyv4769[yyj4769] - yyv4770.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4769 { - for ; yyj4769 < yyl4769; yyj4769++ { - yyv4769 = append(yyv4769, ServiceAccount{}) - yyh4769.ElemContainerState(yyj4769) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ServiceAccount{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4769[yyj4769] = ServiceAccount{} + yyv1[yyj1] = ServiceAccount{} } else { - yyv4771 := &yyv4769[yyj4769] - yyv4771.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4769 := 0 - for ; !r.CheckBreak(); yyj4769++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4769 >= len(yyv4769) { - yyv4769 = append(yyv4769, ServiceAccount{}) // var yyz4769 ServiceAccount - yyc4769 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ServiceAccount{}) // var yyz1 ServiceAccount + yyc1 = true } - yyh4769.ElemContainerState(yyj4769) - if yyj4769 < len(yyv4769) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4769[yyj4769] = ServiceAccount{} + yyv1[yyj1] = ServiceAccount{} } else { - yyv4772 := &yyv4769[yyj4769] - yyv4772.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -60644,17 +70541,17 @@ func (x codecSelfer1234) decSliceServiceAccount(v *[]ServiceAccount, d *codec197 } } - if yyj4769 < len(yyv4769) { - yyv4769 = yyv4769[:yyj4769] - yyc4769 = true - } else if yyj4769 == 0 && yyv4769 == nil { - yyv4769 = []ServiceAccount{} - yyc4769 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ServiceAccount{} + yyc1 = true } } - yyh4769.End() - if yyc4769 { - *v = yyv4769 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -60663,10 +70560,10 @@ func (x codecSelfer1234) encSliceEndpointSubset(v []EndpointSubset, e *codec1978 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4773 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4774 := &yyv4773 - yy4774.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -60676,83 +70573,86 @@ func (x codecSelfer1234) decSliceEndpointSubset(v *[]EndpointSubset, d *codec197 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4775 := *v - yyh4775, yyl4775 := z.DecSliceHelperStart() - var yyc4775 bool - if yyl4775 == 0 { - if yyv4775 == nil { - yyv4775 = []EndpointSubset{} - yyc4775 = true - } else if len(yyv4775) != 0 { - yyv4775 = yyv4775[:0] - yyc4775 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EndpointSubset{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4775 > 0 { - var yyrr4775, yyrl4775 int - var yyrt4775 bool - if yyl4775 > cap(yyv4775) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4775 := len(yyv4775) > 0 - yyv24775 := yyv4775 - yyrl4775, yyrt4775 = z.DecInferLen(yyl4775, z.DecBasicHandle().MaxInitLen, 72) - if yyrt4775 { - if yyrl4775 <= cap(yyv4775) { - yyv4775 = yyv4775[:yyrl4775] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4775 = make([]EndpointSubset, yyrl4775) + yyv1 = make([]EndpointSubset, yyrl1) } } else { - yyv4775 = make([]EndpointSubset, yyrl4775) + yyv1 = make([]EndpointSubset, yyrl1) } - yyc4775 = true - yyrr4775 = len(yyv4775) - if yyrg4775 { - copy(yyv4775, yyv24775) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4775 != len(yyv4775) { - yyv4775 = yyv4775[:yyl4775] - yyc4775 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4775 := 0 - for ; yyj4775 < yyrr4775; yyj4775++ { - yyh4775.ElemContainerState(yyj4775) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4775[yyj4775] = EndpointSubset{} + yyv1[yyj1] = EndpointSubset{} } else { - yyv4776 := &yyv4775[yyj4775] - yyv4776.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4775 { - for ; yyj4775 < yyl4775; yyj4775++ { - yyv4775 = append(yyv4775, EndpointSubset{}) - yyh4775.ElemContainerState(yyj4775) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EndpointSubset{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4775[yyj4775] = EndpointSubset{} + yyv1[yyj1] = EndpointSubset{} } else { - yyv4777 := &yyv4775[yyj4775] - yyv4777.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4775 := 0 - for ; !r.CheckBreak(); yyj4775++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4775 >= len(yyv4775) { - yyv4775 = append(yyv4775, EndpointSubset{}) // var yyz4775 EndpointSubset - yyc4775 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EndpointSubset{}) // var yyz1 EndpointSubset + yyc1 = true } - yyh4775.ElemContainerState(yyj4775) - if yyj4775 < len(yyv4775) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4775[yyj4775] = EndpointSubset{} + yyv1[yyj1] = EndpointSubset{} } else { - yyv4778 := &yyv4775[yyj4775] - yyv4778.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -60760,17 +70660,17 @@ func (x codecSelfer1234) decSliceEndpointSubset(v *[]EndpointSubset, d *codec197 } } - if yyj4775 < len(yyv4775) { - yyv4775 = yyv4775[:yyj4775] - yyc4775 = true - } else if yyj4775 == 0 && yyv4775 == nil { - yyv4775 = []EndpointSubset{} - yyc4775 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EndpointSubset{} + yyc1 = true } } - yyh4775.End() - if yyc4775 { - *v = yyv4775 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -60779,10 +70679,10 @@ func (x codecSelfer1234) encSliceEndpointAddress(v []EndpointAddress, e *codec19 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4779 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4780 := &yyv4779 - yy4780.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -60792,83 +70692,86 @@ func (x codecSelfer1234) decSliceEndpointAddress(v *[]EndpointAddress, d *codec1 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4781 := *v - yyh4781, yyl4781 := z.DecSliceHelperStart() - var yyc4781 bool - if yyl4781 == 0 { - if yyv4781 == nil { - yyv4781 = []EndpointAddress{} - yyc4781 = true - } else if len(yyv4781) != 0 { - yyv4781 = yyv4781[:0] - yyc4781 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EndpointAddress{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4781 > 0 { - var yyrr4781, yyrl4781 int - var yyrt4781 bool - if yyl4781 > cap(yyv4781) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4781 := len(yyv4781) > 0 - yyv24781 := yyv4781 - yyrl4781, yyrt4781 = z.DecInferLen(yyl4781, z.DecBasicHandle().MaxInitLen, 48) - if yyrt4781 { - if yyrl4781 <= cap(yyv4781) { - yyv4781 = yyv4781[:yyrl4781] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4781 = make([]EndpointAddress, yyrl4781) + yyv1 = make([]EndpointAddress, yyrl1) } } else { - yyv4781 = make([]EndpointAddress, yyrl4781) + yyv1 = make([]EndpointAddress, yyrl1) } - yyc4781 = true - yyrr4781 = len(yyv4781) - if yyrg4781 { - copy(yyv4781, yyv24781) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4781 != len(yyv4781) { - yyv4781 = yyv4781[:yyl4781] - yyc4781 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4781 := 0 - for ; yyj4781 < yyrr4781; yyj4781++ { - yyh4781.ElemContainerState(yyj4781) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4781[yyj4781] = EndpointAddress{} + yyv1[yyj1] = EndpointAddress{} } else { - yyv4782 := &yyv4781[yyj4781] - yyv4782.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4781 { - for ; yyj4781 < yyl4781; yyj4781++ { - yyv4781 = append(yyv4781, EndpointAddress{}) - yyh4781.ElemContainerState(yyj4781) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EndpointAddress{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4781[yyj4781] = EndpointAddress{} + yyv1[yyj1] = EndpointAddress{} } else { - yyv4783 := &yyv4781[yyj4781] - yyv4783.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4781 := 0 - for ; !r.CheckBreak(); yyj4781++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4781 >= len(yyv4781) { - yyv4781 = append(yyv4781, EndpointAddress{}) // var yyz4781 EndpointAddress - yyc4781 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EndpointAddress{}) // var yyz1 EndpointAddress + yyc1 = true } - yyh4781.ElemContainerState(yyj4781) - if yyj4781 < len(yyv4781) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4781[yyj4781] = EndpointAddress{} + yyv1[yyj1] = EndpointAddress{} } else { - yyv4784 := &yyv4781[yyj4781] - yyv4784.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -60876,17 +70779,17 @@ func (x codecSelfer1234) decSliceEndpointAddress(v *[]EndpointAddress, d *codec1 } } - if yyj4781 < len(yyv4781) { - yyv4781 = yyv4781[:yyj4781] - yyc4781 = true - } else if yyj4781 == 0 && yyv4781 == nil { - yyv4781 = []EndpointAddress{} - yyc4781 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EndpointAddress{} + yyc1 = true } } - yyh4781.End() - if yyc4781 { - *v = yyv4781 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -60895,10 +70798,10 @@ func (x codecSelfer1234) encSliceEndpointPort(v []EndpointPort, e *codec1978.Enc z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4785 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4786 := &yyv4785 - yy4786.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -60908,83 +70811,86 @@ func (x codecSelfer1234) decSliceEndpointPort(v *[]EndpointPort, d *codec1978.De z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4787 := *v - yyh4787, yyl4787 := z.DecSliceHelperStart() - var yyc4787 bool - if yyl4787 == 0 { - if yyv4787 == nil { - yyv4787 = []EndpointPort{} - yyc4787 = true - } else if len(yyv4787) != 0 { - yyv4787 = yyv4787[:0] - yyc4787 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EndpointPort{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4787 > 0 { - var yyrr4787, yyrl4787 int - var yyrt4787 bool - if yyl4787 > cap(yyv4787) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4787 := len(yyv4787) > 0 - yyv24787 := yyv4787 - yyrl4787, yyrt4787 = z.DecInferLen(yyl4787, z.DecBasicHandle().MaxInitLen, 40) - if yyrt4787 { - if yyrl4787 <= cap(yyv4787) { - yyv4787 = yyv4787[:yyrl4787] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4787 = make([]EndpointPort, yyrl4787) + yyv1 = make([]EndpointPort, yyrl1) } } else { - yyv4787 = make([]EndpointPort, yyrl4787) + yyv1 = make([]EndpointPort, yyrl1) } - yyc4787 = true - yyrr4787 = len(yyv4787) - if yyrg4787 { - copy(yyv4787, yyv24787) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4787 != len(yyv4787) { - yyv4787 = yyv4787[:yyl4787] - yyc4787 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4787 := 0 - for ; yyj4787 < yyrr4787; yyj4787++ { - yyh4787.ElemContainerState(yyj4787) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4787[yyj4787] = EndpointPort{} + yyv1[yyj1] = EndpointPort{} } else { - yyv4788 := &yyv4787[yyj4787] - yyv4788.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4787 { - for ; yyj4787 < yyl4787; yyj4787++ { - yyv4787 = append(yyv4787, EndpointPort{}) - yyh4787.ElemContainerState(yyj4787) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EndpointPort{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4787[yyj4787] = EndpointPort{} + yyv1[yyj1] = EndpointPort{} } else { - yyv4789 := &yyv4787[yyj4787] - yyv4789.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4787 := 0 - for ; !r.CheckBreak(); yyj4787++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4787 >= len(yyv4787) { - yyv4787 = append(yyv4787, EndpointPort{}) // var yyz4787 EndpointPort - yyc4787 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EndpointPort{}) // var yyz1 EndpointPort + yyc1 = true } - yyh4787.ElemContainerState(yyj4787) - if yyj4787 < len(yyv4787) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4787[yyj4787] = EndpointPort{} + yyv1[yyj1] = EndpointPort{} } else { - yyv4790 := &yyv4787[yyj4787] - yyv4790.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -60992,17 +70898,17 @@ func (x codecSelfer1234) decSliceEndpointPort(v *[]EndpointPort, d *codec1978.De } } - if yyj4787 < len(yyv4787) { - yyv4787 = yyv4787[:yyj4787] - yyc4787 = true - } else if yyj4787 == 0 && yyv4787 == nil { - yyv4787 = []EndpointPort{} - yyc4787 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EndpointPort{} + yyc1 = true } } - yyh4787.End() - if yyc4787 { - *v = yyv4787 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -61011,10 +70917,10 @@ func (x codecSelfer1234) encSliceEndpoints(v []Endpoints, e *codec1978.Encoder) z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4791 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4792 := &yyv4791 - yy4792.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -61024,83 +70930,86 @@ func (x codecSelfer1234) decSliceEndpoints(v *[]Endpoints, d *codec1978.Decoder) z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4793 := *v - yyh4793, yyl4793 := z.DecSliceHelperStart() - var yyc4793 bool - if yyl4793 == 0 { - if yyv4793 == nil { - yyv4793 = []Endpoints{} - yyc4793 = true - } else if len(yyv4793) != 0 { - yyv4793 = yyv4793[:0] - yyc4793 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Endpoints{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4793 > 0 { - var yyrr4793, yyrl4793 int - var yyrt4793 bool - if yyl4793 > cap(yyv4793) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4793 := len(yyv4793) > 0 - yyv24793 := yyv4793 - yyrl4793, yyrt4793 = z.DecInferLen(yyl4793, z.DecBasicHandle().MaxInitLen, 280) - if yyrt4793 { - if yyrl4793 <= cap(yyv4793) { - yyv4793 = yyv4793[:yyrl4793] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4793 = make([]Endpoints, yyrl4793) + yyv1 = make([]Endpoints, yyrl1) } } else { - yyv4793 = make([]Endpoints, yyrl4793) + yyv1 = make([]Endpoints, yyrl1) } - yyc4793 = true - yyrr4793 = len(yyv4793) - if yyrg4793 { - copy(yyv4793, yyv24793) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4793 != len(yyv4793) { - yyv4793 = yyv4793[:yyl4793] - yyc4793 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4793 := 0 - for ; yyj4793 < yyrr4793; yyj4793++ { - yyh4793.ElemContainerState(yyj4793) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4793[yyj4793] = Endpoints{} + yyv1[yyj1] = Endpoints{} } else { - yyv4794 := &yyv4793[yyj4793] - yyv4794.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4793 { - for ; yyj4793 < yyl4793; yyj4793++ { - yyv4793 = append(yyv4793, Endpoints{}) - yyh4793.ElemContainerState(yyj4793) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Endpoints{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4793[yyj4793] = Endpoints{} + yyv1[yyj1] = Endpoints{} } else { - yyv4795 := &yyv4793[yyj4793] - yyv4795.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4793 := 0 - for ; !r.CheckBreak(); yyj4793++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4793 >= len(yyv4793) { - yyv4793 = append(yyv4793, Endpoints{}) // var yyz4793 Endpoints - yyc4793 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Endpoints{}) // var yyz1 Endpoints + yyc1 = true } - yyh4793.ElemContainerState(yyj4793) - if yyj4793 < len(yyv4793) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4793[yyj4793] = Endpoints{} + yyv1[yyj1] = Endpoints{} } else { - yyv4796 := &yyv4793[yyj4793] - yyv4796.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -61108,17 +71017,136 @@ func (x codecSelfer1234) decSliceEndpoints(v *[]Endpoints, d *codec1978.Decoder) } } - if yyj4793 < len(yyv4793) { - yyv4793 = yyv4793[:yyj4793] - yyc4793 = true - } else if yyj4793 == 0 && yyv4793 == nil { - yyv4793 = []Endpoints{} - yyc4793 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Endpoints{} + yyc1 = true } } - yyh4793.End() - if yyc4793 { - *v = yyv4793 + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceTaint(v []Taint, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceTaint(v *[]Taint, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Taint{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Taint, yyrl1) + } + } else { + yyv1 = make([]Taint, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Taint{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Taint{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Taint{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Taint{}) // var yyz1 Taint + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Taint{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Taint{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -61127,10 +71155,10 @@ func (x codecSelfer1234) encSliceNodeCondition(v []NodeCondition, e *codec1978.E z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4797 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4798 := &yyv4797 - yy4798.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -61140,83 +71168,86 @@ func (x codecSelfer1234) decSliceNodeCondition(v *[]NodeCondition, d *codec1978. z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4799 := *v - yyh4799, yyl4799 := z.DecSliceHelperStart() - var yyc4799 bool - if yyl4799 == 0 { - if yyv4799 == nil { - yyv4799 = []NodeCondition{} - yyc4799 = true - } else if len(yyv4799) != 0 { - yyv4799 = yyv4799[:0] - yyc4799 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NodeCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4799 > 0 { - var yyrr4799, yyrl4799 int - var yyrt4799 bool - if yyl4799 > cap(yyv4799) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4799 := len(yyv4799) > 0 - yyv24799 := yyv4799 - yyrl4799, yyrt4799 = z.DecInferLen(yyl4799, z.DecBasicHandle().MaxInitLen, 112) - if yyrt4799 { - if yyrl4799 <= cap(yyv4799) { - yyv4799 = yyv4799[:yyrl4799] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4799 = make([]NodeCondition, yyrl4799) + yyv1 = make([]NodeCondition, yyrl1) } } else { - yyv4799 = make([]NodeCondition, yyrl4799) + yyv1 = make([]NodeCondition, yyrl1) } - yyc4799 = true - yyrr4799 = len(yyv4799) - if yyrg4799 { - copy(yyv4799, yyv24799) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4799 != len(yyv4799) { - yyv4799 = yyv4799[:yyl4799] - yyc4799 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4799 := 0 - for ; yyj4799 < yyrr4799; yyj4799++ { - yyh4799.ElemContainerState(yyj4799) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4799[yyj4799] = NodeCondition{} + yyv1[yyj1] = NodeCondition{} } else { - yyv4800 := &yyv4799[yyj4799] - yyv4800.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4799 { - for ; yyj4799 < yyl4799; yyj4799++ { - yyv4799 = append(yyv4799, NodeCondition{}) - yyh4799.ElemContainerState(yyj4799) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NodeCondition{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4799[yyj4799] = NodeCondition{} + yyv1[yyj1] = NodeCondition{} } else { - yyv4801 := &yyv4799[yyj4799] - yyv4801.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4799 := 0 - for ; !r.CheckBreak(); yyj4799++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4799 >= len(yyv4799) { - yyv4799 = append(yyv4799, NodeCondition{}) // var yyz4799 NodeCondition - yyc4799 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NodeCondition{}) // var yyz1 NodeCondition + yyc1 = true } - yyh4799.ElemContainerState(yyj4799) - if yyj4799 < len(yyv4799) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4799[yyj4799] = NodeCondition{} + yyv1[yyj1] = NodeCondition{} } else { - yyv4802 := &yyv4799[yyj4799] - yyv4802.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -61224,17 +71255,17 @@ func (x codecSelfer1234) decSliceNodeCondition(v *[]NodeCondition, d *codec1978. } } - if yyj4799 < len(yyv4799) { - yyv4799 = yyv4799[:yyj4799] - yyc4799 = true - } else if yyj4799 == 0 && yyv4799 == nil { - yyv4799 = []NodeCondition{} - yyc4799 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NodeCondition{} + yyc1 = true } } - yyh4799.End() - if yyc4799 { - *v = yyv4799 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -61243,10 +71274,10 @@ func (x codecSelfer1234) encSliceNodeAddress(v []NodeAddress, e *codec1978.Encod z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4803 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4804 := &yyv4803 - yy4804.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -61256,83 +71287,86 @@ func (x codecSelfer1234) decSliceNodeAddress(v *[]NodeAddress, d *codec1978.Deco z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4805 := *v - yyh4805, yyl4805 := z.DecSliceHelperStart() - var yyc4805 bool - if yyl4805 == 0 { - if yyv4805 == nil { - yyv4805 = []NodeAddress{} - yyc4805 = true - } else if len(yyv4805) != 0 { - yyv4805 = yyv4805[:0] - yyc4805 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NodeAddress{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4805 > 0 { - var yyrr4805, yyrl4805 int - var yyrt4805 bool - if yyl4805 > cap(yyv4805) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4805 := len(yyv4805) > 0 - yyv24805 := yyv4805 - yyrl4805, yyrt4805 = z.DecInferLen(yyl4805, z.DecBasicHandle().MaxInitLen, 32) - if yyrt4805 { - if yyrl4805 <= cap(yyv4805) { - yyv4805 = yyv4805[:yyrl4805] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4805 = make([]NodeAddress, yyrl4805) + yyv1 = make([]NodeAddress, yyrl1) } } else { - yyv4805 = make([]NodeAddress, yyrl4805) + yyv1 = make([]NodeAddress, yyrl1) } - yyc4805 = true - yyrr4805 = len(yyv4805) - if yyrg4805 { - copy(yyv4805, yyv24805) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4805 != len(yyv4805) { - yyv4805 = yyv4805[:yyl4805] - yyc4805 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4805 := 0 - for ; yyj4805 < yyrr4805; yyj4805++ { - yyh4805.ElemContainerState(yyj4805) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4805[yyj4805] = NodeAddress{} + yyv1[yyj1] = NodeAddress{} } else { - yyv4806 := &yyv4805[yyj4805] - yyv4806.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4805 { - for ; yyj4805 < yyl4805; yyj4805++ { - yyv4805 = append(yyv4805, NodeAddress{}) - yyh4805.ElemContainerState(yyj4805) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NodeAddress{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4805[yyj4805] = NodeAddress{} + yyv1[yyj1] = NodeAddress{} } else { - yyv4807 := &yyv4805[yyj4805] - yyv4807.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4805 := 0 - for ; !r.CheckBreak(); yyj4805++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4805 >= len(yyv4805) { - yyv4805 = append(yyv4805, NodeAddress{}) // var yyz4805 NodeAddress - yyc4805 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NodeAddress{}) // var yyz1 NodeAddress + yyc1 = true } - yyh4805.ElemContainerState(yyj4805) - if yyj4805 < len(yyv4805) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4805[yyj4805] = NodeAddress{} + yyv1[yyj1] = NodeAddress{} } else { - yyv4808 := &yyv4805[yyj4805] - yyv4808.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -61340,17 +71374,17 @@ func (x codecSelfer1234) decSliceNodeAddress(v *[]NodeAddress, d *codec1978.Deco } } - if yyj4805 < len(yyv4805) { - yyv4805 = yyv4805[:yyj4805] - yyc4805 = true - } else if yyj4805 == 0 && yyv4805 == nil { - yyv4805 = []NodeAddress{} - yyc4805 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NodeAddress{} + yyc1 = true } } - yyh4805.End() - if yyc4805 { - *v = yyv4805 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -61359,10 +71393,10 @@ func (x codecSelfer1234) encSliceContainerImage(v []ContainerImage, e *codec1978 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4809 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4810 := &yyv4809 - yy4810.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -61372,83 +71406,86 @@ func (x codecSelfer1234) decSliceContainerImage(v *[]ContainerImage, d *codec197 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4811 := *v - yyh4811, yyl4811 := z.DecSliceHelperStart() - var yyc4811 bool - if yyl4811 == 0 { - if yyv4811 == nil { - yyv4811 = []ContainerImage{} - yyc4811 = true - } else if len(yyv4811) != 0 { - yyv4811 = yyv4811[:0] - yyc4811 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ContainerImage{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4811 > 0 { - var yyrr4811, yyrl4811 int - var yyrt4811 bool - if yyl4811 > cap(yyv4811) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4811 := len(yyv4811) > 0 - yyv24811 := yyv4811 - yyrl4811, yyrt4811 = z.DecInferLen(yyl4811, z.DecBasicHandle().MaxInitLen, 32) - if yyrt4811 { - if yyrl4811 <= cap(yyv4811) { - yyv4811 = yyv4811[:yyrl4811] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4811 = make([]ContainerImage, yyrl4811) + yyv1 = make([]ContainerImage, yyrl1) } } else { - yyv4811 = make([]ContainerImage, yyrl4811) + yyv1 = make([]ContainerImage, yyrl1) } - yyc4811 = true - yyrr4811 = len(yyv4811) - if yyrg4811 { - copy(yyv4811, yyv24811) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4811 != len(yyv4811) { - yyv4811 = yyv4811[:yyl4811] - yyc4811 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4811 := 0 - for ; yyj4811 < yyrr4811; yyj4811++ { - yyh4811.ElemContainerState(yyj4811) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4811[yyj4811] = ContainerImage{} + yyv1[yyj1] = ContainerImage{} } else { - yyv4812 := &yyv4811[yyj4811] - yyv4812.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4811 { - for ; yyj4811 < yyl4811; yyj4811++ { - yyv4811 = append(yyv4811, ContainerImage{}) - yyh4811.ElemContainerState(yyj4811) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ContainerImage{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4811[yyj4811] = ContainerImage{} + yyv1[yyj1] = ContainerImage{} } else { - yyv4813 := &yyv4811[yyj4811] - yyv4813.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4811 := 0 - for ; !r.CheckBreak(); yyj4811++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4811 >= len(yyv4811) { - yyv4811 = append(yyv4811, ContainerImage{}) // var yyz4811 ContainerImage - yyc4811 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ContainerImage{}) // var yyz1 ContainerImage + yyc1 = true } - yyh4811.ElemContainerState(yyj4811) - if yyj4811 < len(yyv4811) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4811[yyj4811] = ContainerImage{} + yyv1[yyj1] = ContainerImage{} } else { - yyv4814 := &yyv4811[yyj4811] - yyv4814.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -61456,17 +71493,17 @@ func (x codecSelfer1234) decSliceContainerImage(v *[]ContainerImage, d *codec197 } } - if yyj4811 < len(yyv4811) { - yyv4811 = yyv4811[:yyj4811] - yyc4811 = true - } else if yyj4811 == 0 && yyv4811 == nil { - yyv4811 = []ContainerImage{} - yyc4811 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ContainerImage{} + yyc1 = true } } - yyh4811.End() - if yyc4811 { - *v = yyv4811 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -61475,9 +71512,9 @@ func (x codecSelfer1234) encSliceUniqueVolumeName(v []UniqueVolumeName, e *codec z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4815 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv4815.CodecEncodeSelf(e) + yyv1.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -61487,75 +71524,81 @@ func (x codecSelfer1234) decSliceUniqueVolumeName(v *[]UniqueVolumeName, d *code z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4816 := *v - yyh4816, yyl4816 := z.DecSliceHelperStart() - var yyc4816 bool - if yyl4816 == 0 { - if yyv4816 == nil { - yyv4816 = []UniqueVolumeName{} - yyc4816 = true - } else if len(yyv4816) != 0 { - yyv4816 = yyv4816[:0] - yyc4816 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []UniqueVolumeName{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4816 > 0 { - var yyrr4816, yyrl4816 int - var yyrt4816 bool - if yyl4816 > cap(yyv4816) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrl4816, yyrt4816 = z.DecInferLen(yyl4816, z.DecBasicHandle().MaxInitLen, 16) - if yyrt4816 { - if yyrl4816 <= cap(yyv4816) { - yyv4816 = yyv4816[:yyrl4816] + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4816 = make([]UniqueVolumeName, yyrl4816) + yyv1 = make([]UniqueVolumeName, yyrl1) } } else { - yyv4816 = make([]UniqueVolumeName, yyrl4816) + yyv1 = make([]UniqueVolumeName, yyrl1) } - yyc4816 = true - yyrr4816 = len(yyv4816) - } else if yyl4816 != len(yyv4816) { - yyv4816 = yyv4816[:yyl4816] - yyc4816 = true + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4816 := 0 - for ; yyj4816 < yyrr4816; yyj4816++ { - yyh4816.ElemContainerState(yyj4816) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4816[yyj4816] = "" + yyv1[yyj1] = "" } else { - yyv4816[yyj4816] = UniqueVolumeName(r.DecodeString()) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4816 { - for ; yyj4816 < yyl4816; yyj4816++ { - yyv4816 = append(yyv4816, "") - yyh4816.ElemContainerState(yyj4816) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4816[yyj4816] = "" + yyv1[yyj1] = "" } else { - yyv4816[yyj4816] = UniqueVolumeName(r.DecodeString()) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4816 := 0 - for ; !r.CheckBreak(); yyj4816++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4816 >= len(yyv4816) { - yyv4816 = append(yyv4816, "") // var yyz4816 UniqueVolumeName - yyc4816 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 UniqueVolumeName + yyc1 = true } - yyh4816.ElemContainerState(yyj4816) - if yyj4816 < len(yyv4816) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4816[yyj4816] = "" + yyv1[yyj1] = "" } else { - yyv4816[yyj4816] = UniqueVolumeName(r.DecodeString()) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -61563,17 +71606,17 @@ func (x codecSelfer1234) decSliceUniqueVolumeName(v *[]UniqueVolumeName, d *code } } - if yyj4816 < len(yyv4816) { - yyv4816 = yyv4816[:yyj4816] - yyc4816 = true - } else if yyj4816 == 0 && yyv4816 == nil { - yyv4816 = []UniqueVolumeName{} - yyc4816 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []UniqueVolumeName{} + yyc1 = true } } - yyh4816.End() - if yyc4816 { - *v = yyv4816 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -61582,10 +71625,10 @@ func (x codecSelfer1234) encSliceAttachedVolume(v []AttachedVolume, e *codec1978 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4820 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4821 := &yyv4820 - yy4821.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -61595,83 +71638,86 @@ func (x codecSelfer1234) decSliceAttachedVolume(v *[]AttachedVolume, d *codec197 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4822 := *v - yyh4822, yyl4822 := z.DecSliceHelperStart() - var yyc4822 bool - if yyl4822 == 0 { - if yyv4822 == nil { - yyv4822 = []AttachedVolume{} - yyc4822 = true - } else if len(yyv4822) != 0 { - yyv4822 = yyv4822[:0] - yyc4822 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []AttachedVolume{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4822 > 0 { - var yyrr4822, yyrl4822 int - var yyrt4822 bool - if yyl4822 > cap(yyv4822) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4822 := len(yyv4822) > 0 - yyv24822 := yyv4822 - yyrl4822, yyrt4822 = z.DecInferLen(yyl4822, z.DecBasicHandle().MaxInitLen, 32) - if yyrt4822 { - if yyrl4822 <= cap(yyv4822) { - yyv4822 = yyv4822[:yyrl4822] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4822 = make([]AttachedVolume, yyrl4822) + yyv1 = make([]AttachedVolume, yyrl1) } } else { - yyv4822 = make([]AttachedVolume, yyrl4822) + yyv1 = make([]AttachedVolume, yyrl1) } - yyc4822 = true - yyrr4822 = len(yyv4822) - if yyrg4822 { - copy(yyv4822, yyv24822) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4822 != len(yyv4822) { - yyv4822 = yyv4822[:yyl4822] - yyc4822 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4822 := 0 - for ; yyj4822 < yyrr4822; yyj4822++ { - yyh4822.ElemContainerState(yyj4822) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4822[yyj4822] = AttachedVolume{} + yyv1[yyj1] = AttachedVolume{} } else { - yyv4823 := &yyv4822[yyj4822] - yyv4823.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4822 { - for ; yyj4822 < yyl4822; yyj4822++ { - yyv4822 = append(yyv4822, AttachedVolume{}) - yyh4822.ElemContainerState(yyj4822) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, AttachedVolume{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4822[yyj4822] = AttachedVolume{} + yyv1[yyj1] = AttachedVolume{} } else { - yyv4824 := &yyv4822[yyj4822] - yyv4824.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4822 := 0 - for ; !r.CheckBreak(); yyj4822++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4822 >= len(yyv4822) { - yyv4822 = append(yyv4822, AttachedVolume{}) // var yyz4822 AttachedVolume - yyc4822 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, AttachedVolume{}) // var yyz1 AttachedVolume + yyc1 = true } - yyh4822.ElemContainerState(yyj4822) - if yyj4822 < len(yyv4822) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4822[yyj4822] = AttachedVolume{} + yyv1[yyj1] = AttachedVolume{} } else { - yyv4825 := &yyv4822[yyj4822] - yyv4825.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -61679,17 +71725,17 @@ func (x codecSelfer1234) decSliceAttachedVolume(v *[]AttachedVolume, d *codec197 } } - if yyj4822 < len(yyv4822) { - yyv4822 = yyv4822[:yyj4822] - yyc4822 = true - } else if yyj4822 == 0 && yyv4822 == nil { - yyv4822 = []AttachedVolume{} - yyc4822 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []AttachedVolume{} + yyc1 = true } } - yyh4822.End() - if yyc4822 { - *v = yyv4822 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -61698,10 +71744,10 @@ func (x codecSelfer1234) encSlicePreferAvoidPodsEntry(v []PreferAvoidPodsEntry, z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4826 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4827 := &yyv4826 - yy4827.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -61711,83 +71757,86 @@ func (x codecSelfer1234) decSlicePreferAvoidPodsEntry(v *[]PreferAvoidPodsEntry, z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4828 := *v - yyh4828, yyl4828 := z.DecSliceHelperStart() - var yyc4828 bool - if yyl4828 == 0 { - if yyv4828 == nil { - yyv4828 = []PreferAvoidPodsEntry{} - yyc4828 = true - } else if len(yyv4828) != 0 { - yyv4828 = yyv4828[:0] - yyc4828 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PreferAvoidPodsEntry{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4828 > 0 { - var yyrr4828, yyrl4828 int - var yyrt4828 bool - if yyl4828 > cap(yyv4828) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4828 := len(yyv4828) > 0 - yyv24828 := yyv4828 - yyrl4828, yyrt4828 = z.DecInferLen(yyl4828, z.DecBasicHandle().MaxInitLen, 64) - if yyrt4828 { - if yyrl4828 <= cap(yyv4828) { - yyv4828 = yyv4828[:yyrl4828] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4828 = make([]PreferAvoidPodsEntry, yyrl4828) + yyv1 = make([]PreferAvoidPodsEntry, yyrl1) } } else { - yyv4828 = make([]PreferAvoidPodsEntry, yyrl4828) + yyv1 = make([]PreferAvoidPodsEntry, yyrl1) } - yyc4828 = true - yyrr4828 = len(yyv4828) - if yyrg4828 { - copy(yyv4828, yyv24828) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4828 != len(yyv4828) { - yyv4828 = yyv4828[:yyl4828] - yyc4828 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4828 := 0 - for ; yyj4828 < yyrr4828; yyj4828++ { - yyh4828.ElemContainerState(yyj4828) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4828[yyj4828] = PreferAvoidPodsEntry{} + yyv1[yyj1] = PreferAvoidPodsEntry{} } else { - yyv4829 := &yyv4828[yyj4828] - yyv4829.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4828 { - for ; yyj4828 < yyl4828; yyj4828++ { - yyv4828 = append(yyv4828, PreferAvoidPodsEntry{}) - yyh4828.ElemContainerState(yyj4828) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PreferAvoidPodsEntry{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4828[yyj4828] = PreferAvoidPodsEntry{} + yyv1[yyj1] = PreferAvoidPodsEntry{} } else { - yyv4830 := &yyv4828[yyj4828] - yyv4830.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4828 := 0 - for ; !r.CheckBreak(); yyj4828++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4828 >= len(yyv4828) { - yyv4828 = append(yyv4828, PreferAvoidPodsEntry{}) // var yyz4828 PreferAvoidPodsEntry - yyc4828 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PreferAvoidPodsEntry{}) // var yyz1 PreferAvoidPodsEntry + yyc1 = true } - yyh4828.ElemContainerState(yyj4828) - if yyj4828 < len(yyv4828) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4828[yyj4828] = PreferAvoidPodsEntry{} + yyv1[yyj1] = PreferAvoidPodsEntry{} } else { - yyv4831 := &yyv4828[yyj4828] - yyv4831.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -61795,17 +71844,17 @@ func (x codecSelfer1234) decSlicePreferAvoidPodsEntry(v *[]PreferAvoidPodsEntry, } } - if yyj4828 < len(yyv4828) { - yyv4828 = yyv4828[:yyj4828] - yyc4828 = true - } else if yyj4828 == 0 && yyv4828 == nil { - yyv4828 = []PreferAvoidPodsEntry{} - yyc4828 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PreferAvoidPodsEntry{} + yyc1 = true } } - yyh4828.End() - if yyc4828 { - *v = yyv4828 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -61814,19 +71863,19 @@ func (x codecSelfer1234) encResourceList(v ResourceList, e *codec1978.Encoder) { z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeMapStart(len(v)) - for yyk4832, yyv4832 := range v { + for yyk1, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerMapKey1234) - yyk4832.CodecEncodeSelf(e) + yyk1.CodecEncodeSelf(e) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy4833 := &yyv4832 - yym4834 := z.EncBinary() - _ = yym4834 + yy3 := &yyv1 + yym4 := z.EncBinary() + _ = yym4 if false { - } else if z.HasExtensions() && z.EncExt(yy4833) { - } else if !yym4834 && z.IsJSONHandle() { - z.EncJSONMarshal(yy4833) + } else if z.HasExtensions() && z.EncExt(yy3) { + } else if !yym4 && z.IsJSONHandle() { + z.EncJSONMarshal(yy3) } else { - z.EncFallback(yy4833) + z.EncFallback(yy3) } } z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -61837,86 +71886,88 @@ func (x codecSelfer1234) decResourceList(v *ResourceList, d *codec1978.Decoder) z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4835 := *v - yyl4835 := r.ReadMapStart() - yybh4835 := z.DecBasicHandle() - if yyv4835 == nil { - yyrl4835, _ := z.DecInferLen(yyl4835, yybh4835.MaxInitLen, 72) - yyv4835 = make(map[ResourceName]pkg3_resource.Quantity, yyrl4835) - *v = yyv4835 + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 72) + yyv1 = make(map[ResourceName]pkg3_resource.Quantity, yyrl1) + *v = yyv1 } - var yymk4835 ResourceName - var yymv4835 pkg3_resource.Quantity - var yymg4835 bool - if yybh4835.MapValueReset { - yymg4835 = true + var yymk1 ResourceName + var yymv1 pkg3_resource.Quantity + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true } - if yyl4835 > 0 { - for yyj4835 := 0; yyj4835 < yyl4835; yyj4835++ { + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { z.DecSendContainerState(codecSelfer_containerMapKey1234) if r.TryDecodeAsNil() { - yymk4835 = "" + yymk1 = "" } else { - yymk4835 = ResourceName(r.DecodeString()) + yyv2 := &yymk1 + yyv2.CodecDecodeSelf(d) } - if yymg4835 { - yymv4835 = yyv4835[yymk4835] + if yymg1 { + yymv1 = yyv1[yymk1] } else { - yymv4835 = pkg3_resource.Quantity{} + yymv1 = pkg3_resource.Quantity{} } z.DecSendContainerState(codecSelfer_containerMapValue1234) if r.TryDecodeAsNil() { - yymv4835 = pkg3_resource.Quantity{} + yymv1 = pkg3_resource.Quantity{} } else { - yyv4837 := &yymv4835 - yym4838 := z.DecBinary() - _ = yym4838 + yyv3 := &yymv1 + yym4 := z.DecBinary() + _ = yym4 if false { - } else if z.HasExtensions() && z.DecExt(yyv4837) { - } else if !yym4838 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4837) + } else if z.HasExtensions() && z.DecExt(yyv3) { + } else if !yym4 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv3) } else { - z.DecFallback(yyv4837, false) + z.DecFallback(yyv3, false) } } - if yyv4835 != nil { - yyv4835[yymk4835] = yymv4835 + if yyv1 != nil { + yyv1[yymk1] = yymv1 } } - } else if yyl4835 < 0 { - for yyj4835 := 0; !r.CheckBreak(); yyj4835++ { + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { z.DecSendContainerState(codecSelfer_containerMapKey1234) if r.TryDecodeAsNil() { - yymk4835 = "" + yymk1 = "" } else { - yymk4835 = ResourceName(r.DecodeString()) + yyv5 := &yymk1 + yyv5.CodecDecodeSelf(d) } - if yymg4835 { - yymv4835 = yyv4835[yymk4835] + if yymg1 { + yymv1 = yyv1[yymk1] } else { - yymv4835 = pkg3_resource.Quantity{} + yymv1 = pkg3_resource.Quantity{} } z.DecSendContainerState(codecSelfer_containerMapValue1234) if r.TryDecodeAsNil() { - yymv4835 = pkg3_resource.Quantity{} + yymv1 = pkg3_resource.Quantity{} } else { - yyv4840 := &yymv4835 - yym4841 := z.DecBinary() - _ = yym4841 + yyv6 := &yymv1 + yym7 := z.DecBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.DecExt(yyv4840) { - } else if !yym4841 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4840) + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) } else { - z.DecFallback(yyv4840, false) + z.DecFallback(yyv6, false) } } - if yyv4835 != nil { - yyv4835[yymk4835] = yymv4835 + if yyv1 != nil { + yyv1[yymk1] = yymv1 } } } // else len==0: TODO: Should we clear map entries? @@ -61928,10 +71979,10 @@ func (x codecSelfer1234) encSliceNode(v []Node, e *codec1978.Encoder) { z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4842 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4843 := &yyv4842 - yy4843.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -61941,83 +71992,86 @@ func (x codecSelfer1234) decSliceNode(v *[]Node, d *codec1978.Decoder) { z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4844 := *v - yyh4844, yyl4844 := z.DecSliceHelperStart() - var yyc4844 bool - if yyl4844 == 0 { - if yyv4844 == nil { - yyv4844 = []Node{} - yyc4844 = true - } else if len(yyv4844) != 0 { - yyv4844 = yyv4844[:0] - yyc4844 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Node{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4844 > 0 { - var yyrr4844, yyrl4844 int - var yyrt4844 bool - if yyl4844 > cap(yyv4844) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4844 := len(yyv4844) > 0 - yyv24844 := yyv4844 - yyrl4844, yyrt4844 = z.DecInferLen(yyl4844, z.DecBasicHandle().MaxInitLen, 632) - if yyrt4844 { - if yyrl4844 <= cap(yyv4844) { - yyv4844 = yyv4844[:yyrl4844] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 656) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4844 = make([]Node, yyrl4844) + yyv1 = make([]Node, yyrl1) } } else { - yyv4844 = make([]Node, yyrl4844) + yyv1 = make([]Node, yyrl1) } - yyc4844 = true - yyrr4844 = len(yyv4844) - if yyrg4844 { - copy(yyv4844, yyv24844) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4844 != len(yyv4844) { - yyv4844 = yyv4844[:yyl4844] - yyc4844 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4844 := 0 - for ; yyj4844 < yyrr4844; yyj4844++ { - yyh4844.ElemContainerState(yyj4844) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4844[yyj4844] = Node{} + yyv1[yyj1] = Node{} } else { - yyv4845 := &yyv4844[yyj4844] - yyv4845.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4844 { - for ; yyj4844 < yyl4844; yyj4844++ { - yyv4844 = append(yyv4844, Node{}) - yyh4844.ElemContainerState(yyj4844) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Node{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4844[yyj4844] = Node{} + yyv1[yyj1] = Node{} } else { - yyv4846 := &yyv4844[yyj4844] - yyv4846.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4844 := 0 - for ; !r.CheckBreak(); yyj4844++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4844 >= len(yyv4844) { - yyv4844 = append(yyv4844, Node{}) // var yyz4844 Node - yyc4844 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Node{}) // var yyz1 Node + yyc1 = true } - yyh4844.ElemContainerState(yyj4844) - if yyj4844 < len(yyv4844) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4844[yyj4844] = Node{} + yyv1[yyj1] = Node{} } else { - yyv4847 := &yyv4844[yyj4844] - yyv4847.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -62025,17 +72079,17 @@ func (x codecSelfer1234) decSliceNode(v *[]Node, d *codec1978.Decoder) { } } - if yyj4844 < len(yyv4844) { - yyv4844 = yyv4844[:yyj4844] - yyc4844 = true - } else if yyj4844 == 0 && yyv4844 == nil { - yyv4844 = []Node{} - yyc4844 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Node{} + yyc1 = true } } - yyh4844.End() - if yyc4844 { - *v = yyv4844 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -62044,9 +72098,9 @@ func (x codecSelfer1234) encSliceFinalizerName(v []FinalizerName, e *codec1978.E z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4848 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv4848.CodecEncodeSelf(e) + yyv1.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -62056,75 +72110,81 @@ func (x codecSelfer1234) decSliceFinalizerName(v *[]FinalizerName, d *codec1978. z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4849 := *v - yyh4849, yyl4849 := z.DecSliceHelperStart() - var yyc4849 bool - if yyl4849 == 0 { - if yyv4849 == nil { - yyv4849 = []FinalizerName{} - yyc4849 = true - } else if len(yyv4849) != 0 { - yyv4849 = yyv4849[:0] - yyc4849 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []FinalizerName{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4849 > 0 { - var yyrr4849, yyrl4849 int - var yyrt4849 bool - if yyl4849 > cap(yyv4849) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrl4849, yyrt4849 = z.DecInferLen(yyl4849, z.DecBasicHandle().MaxInitLen, 16) - if yyrt4849 { - if yyrl4849 <= cap(yyv4849) { - yyv4849 = yyv4849[:yyrl4849] + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4849 = make([]FinalizerName, yyrl4849) + yyv1 = make([]FinalizerName, yyrl1) } } else { - yyv4849 = make([]FinalizerName, yyrl4849) + yyv1 = make([]FinalizerName, yyrl1) } - yyc4849 = true - yyrr4849 = len(yyv4849) - } else if yyl4849 != len(yyv4849) { - yyv4849 = yyv4849[:yyl4849] - yyc4849 = true + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4849 := 0 - for ; yyj4849 < yyrr4849; yyj4849++ { - yyh4849.ElemContainerState(yyj4849) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4849[yyj4849] = "" + yyv1[yyj1] = "" } else { - yyv4849[yyj4849] = FinalizerName(r.DecodeString()) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4849 { - for ; yyj4849 < yyl4849; yyj4849++ { - yyv4849 = append(yyv4849, "") - yyh4849.ElemContainerState(yyj4849) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4849[yyj4849] = "" + yyv1[yyj1] = "" } else { - yyv4849[yyj4849] = FinalizerName(r.DecodeString()) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4849 := 0 - for ; !r.CheckBreak(); yyj4849++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4849 >= len(yyv4849) { - yyv4849 = append(yyv4849, "") // var yyz4849 FinalizerName - yyc4849 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 FinalizerName + yyc1 = true } - yyh4849.ElemContainerState(yyj4849) - if yyj4849 < len(yyv4849) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4849[yyj4849] = "" + yyv1[yyj1] = "" } else { - yyv4849[yyj4849] = FinalizerName(r.DecodeString()) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -62132,17 +72192,17 @@ func (x codecSelfer1234) decSliceFinalizerName(v *[]FinalizerName, d *codec1978. } } - if yyj4849 < len(yyv4849) { - yyv4849 = yyv4849[:yyj4849] - yyc4849 = true - } else if yyj4849 == 0 && yyv4849 == nil { - yyv4849 = []FinalizerName{} - yyc4849 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []FinalizerName{} + yyc1 = true } } - yyh4849.End() - if yyc4849 { - *v = yyv4849 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -62151,10 +72211,10 @@ func (x codecSelfer1234) encSliceNamespace(v []Namespace, e *codec1978.Encoder) z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4853 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4854 := &yyv4853 - yy4854.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -62164,83 +72224,86 @@ func (x codecSelfer1234) decSliceNamespace(v *[]Namespace, d *codec1978.Decoder) z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4855 := *v - yyh4855, yyl4855 := z.DecSliceHelperStart() - var yyc4855 bool - if yyl4855 == 0 { - if yyv4855 == nil { - yyv4855 = []Namespace{} - yyc4855 = true - } else if len(yyv4855) != 0 { - yyv4855 = yyv4855[:0] - yyc4855 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Namespace{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4855 > 0 { - var yyrr4855, yyrl4855 int - var yyrt4855 bool - if yyl4855 > cap(yyv4855) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4855 := len(yyv4855) > 0 - yyv24855 := yyv4855 - yyrl4855, yyrt4855 = z.DecInferLen(yyl4855, z.DecBasicHandle().MaxInitLen, 296) - if yyrt4855 { - if yyrl4855 <= cap(yyv4855) { - yyv4855 = yyv4855[:yyrl4855] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4855 = make([]Namespace, yyrl4855) + yyv1 = make([]Namespace, yyrl1) } } else { - yyv4855 = make([]Namespace, yyrl4855) + yyv1 = make([]Namespace, yyrl1) } - yyc4855 = true - yyrr4855 = len(yyv4855) - if yyrg4855 { - copy(yyv4855, yyv24855) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4855 != len(yyv4855) { - yyv4855 = yyv4855[:yyl4855] - yyc4855 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4855 := 0 - for ; yyj4855 < yyrr4855; yyj4855++ { - yyh4855.ElemContainerState(yyj4855) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4855[yyj4855] = Namespace{} + yyv1[yyj1] = Namespace{} } else { - yyv4856 := &yyv4855[yyj4855] - yyv4856.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4855 { - for ; yyj4855 < yyl4855; yyj4855++ { - yyv4855 = append(yyv4855, Namespace{}) - yyh4855.ElemContainerState(yyj4855) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Namespace{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4855[yyj4855] = Namespace{} + yyv1[yyj1] = Namespace{} } else { - yyv4857 := &yyv4855[yyj4855] - yyv4857.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4855 := 0 - for ; !r.CheckBreak(); yyj4855++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4855 >= len(yyv4855) { - yyv4855 = append(yyv4855, Namespace{}) // var yyz4855 Namespace - yyc4855 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Namespace{}) // var yyz1 Namespace + yyc1 = true } - yyh4855.ElemContainerState(yyj4855) - if yyj4855 < len(yyv4855) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4855[yyj4855] = Namespace{} + yyv1[yyj1] = Namespace{} } else { - yyv4858 := &yyv4855[yyj4855] - yyv4858.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -62248,17 +72311,17 @@ func (x codecSelfer1234) decSliceNamespace(v *[]Namespace, d *codec1978.Decoder) } } - if yyj4855 < len(yyv4855) { - yyv4855 = yyv4855[:yyj4855] - yyc4855 = true - } else if yyj4855 == 0 && yyv4855 == nil { - yyv4855 = []Namespace{} - yyc4855 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Namespace{} + yyc1 = true } } - yyh4855.End() - if yyc4855 { - *v = yyv4855 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -62267,10 +72330,10 @@ func (x codecSelfer1234) encSliceEvent(v []Event, e *codec1978.Encoder) { z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4859 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4860 := &yyv4859 - yy4860.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -62280,83 +72343,86 @@ func (x codecSelfer1234) decSliceEvent(v *[]Event, d *codec1978.Decoder) { z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4861 := *v - yyh4861, yyl4861 := z.DecSliceHelperStart() - var yyc4861 bool - if yyl4861 == 0 { - if yyv4861 == nil { - yyv4861 = []Event{} - yyc4861 = true - } else if len(yyv4861) != 0 { - yyv4861 = yyv4861[:0] - yyc4861 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Event{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4861 > 0 { - var yyrr4861, yyrl4861 int - var yyrt4861 bool - if yyl4861 > cap(yyv4861) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4861 := len(yyv4861) > 0 - yyv24861 := yyv4861 - yyrl4861, yyrt4861 = z.DecInferLen(yyl4861, z.DecBasicHandle().MaxInitLen, 504) - if yyrt4861 { - if yyrl4861 <= cap(yyv4861) { - yyv4861 = yyv4861[:yyrl4861] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 504) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4861 = make([]Event, yyrl4861) + yyv1 = make([]Event, yyrl1) } } else { - yyv4861 = make([]Event, yyrl4861) + yyv1 = make([]Event, yyrl1) } - yyc4861 = true - yyrr4861 = len(yyv4861) - if yyrg4861 { - copy(yyv4861, yyv24861) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4861 != len(yyv4861) { - yyv4861 = yyv4861[:yyl4861] - yyc4861 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4861 := 0 - for ; yyj4861 < yyrr4861; yyj4861++ { - yyh4861.ElemContainerState(yyj4861) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4861[yyj4861] = Event{} + yyv1[yyj1] = Event{} } else { - yyv4862 := &yyv4861[yyj4861] - yyv4862.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4861 { - for ; yyj4861 < yyl4861; yyj4861++ { - yyv4861 = append(yyv4861, Event{}) - yyh4861.ElemContainerState(yyj4861) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Event{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4861[yyj4861] = Event{} + yyv1[yyj1] = Event{} } else { - yyv4863 := &yyv4861[yyj4861] - yyv4863.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4861 := 0 - for ; !r.CheckBreak(); yyj4861++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4861 >= len(yyv4861) { - yyv4861 = append(yyv4861, Event{}) // var yyz4861 Event - yyc4861 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Event{}) // var yyz1 Event + yyc1 = true } - yyh4861.ElemContainerState(yyj4861) - if yyj4861 < len(yyv4861) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4861[yyj4861] = Event{} + yyv1[yyj1] = Event{} } else { - yyv4864 := &yyv4861[yyj4861] - yyv4864.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -62364,17 +72430,17 @@ func (x codecSelfer1234) decSliceEvent(v *[]Event, d *codec1978.Decoder) { } } - if yyj4861 < len(yyv4861) { - yyv4861 = yyv4861[:yyj4861] - yyc4861 = true - } else if yyj4861 == 0 && yyv4861 == nil { - yyv4861 = []Event{} - yyc4861 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Event{} + yyc1 = true } } - yyh4861.End() - if yyc4861 { - *v = yyv4861 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -62383,17 +72449,17 @@ func (x codecSelfer1234) encSliceruntime_RawExtension(v []pkg5_runtime.RawExtens z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4865 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4866 := &yyv4865 - yym4867 := z.EncBinary() - _ = yym4867 + yy2 := &yyv1 + yym3 := z.EncBinary() + _ = yym3 if false { - } else if z.HasExtensions() && z.EncExt(yy4866) { - } else if !yym4867 && z.IsJSONHandle() { - z.EncJSONMarshal(yy4866) + } else if z.HasExtensions() && z.EncExt(yy2) { + } else if !yym3 && z.IsJSONHandle() { + z.EncJSONMarshal(yy2) } else { - z.EncFallback(yy4866) + z.EncFallback(yy2) } } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) @@ -62404,78 +72470,81 @@ func (x codecSelfer1234) decSliceruntime_RawExtension(v *[]pkg5_runtime.RawExten z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4868 := *v - yyh4868, yyl4868 := z.DecSliceHelperStart() - var yyc4868 bool - if yyl4868 == 0 { - if yyv4868 == nil { - yyv4868 = []pkg5_runtime.RawExtension{} - yyc4868 = true - } else if len(yyv4868) != 0 { - yyv4868 = yyv4868[:0] - yyc4868 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg5_runtime.RawExtension{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4868 > 0 { - var yyrr4868, yyrl4868 int - var yyrt4868 bool - if yyl4868 > cap(yyv4868) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4868 := len(yyv4868) > 0 - yyv24868 := yyv4868 - yyrl4868, yyrt4868 = z.DecInferLen(yyl4868, z.DecBasicHandle().MaxInitLen, 40) - if yyrt4868 { - if yyrl4868 <= cap(yyv4868) { - yyv4868 = yyv4868[:yyrl4868] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4868 = make([]pkg5_runtime.RawExtension, yyrl4868) + yyv1 = make([]pkg5_runtime.RawExtension, yyrl1) } } else { - yyv4868 = make([]pkg5_runtime.RawExtension, yyrl4868) + yyv1 = make([]pkg5_runtime.RawExtension, yyrl1) } - yyc4868 = true - yyrr4868 = len(yyv4868) - if yyrg4868 { - copy(yyv4868, yyv24868) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4868 != len(yyv4868) { - yyv4868 = yyv4868[:yyl4868] - yyc4868 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4868 := 0 - for ; yyj4868 < yyrr4868; yyj4868++ { - yyh4868.ElemContainerState(yyj4868) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4868[yyj4868] = pkg5_runtime.RawExtension{} + yyv1[yyj1] = pkg5_runtime.RawExtension{} } else { - yyv4869 := &yyv4868[yyj4868] - yym4870 := z.DecBinary() - _ = yym4870 + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 if false { - } else if z.HasExtensions() && z.DecExt(yyv4869) { - } else if !yym4870 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4869) + } else if z.HasExtensions() && z.DecExt(yyv2) { + } else if !yym3 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv2) } else { - z.DecFallback(yyv4869, false) + z.DecFallback(yyv2, false) } } } - if yyrt4868 { - for ; yyj4868 < yyl4868; yyj4868++ { - yyv4868 = append(yyv4868, pkg5_runtime.RawExtension{}) - yyh4868.ElemContainerState(yyj4868) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, pkg5_runtime.RawExtension{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4868[yyj4868] = pkg5_runtime.RawExtension{} + yyv1[yyj1] = pkg5_runtime.RawExtension{} } else { - yyv4871 := &yyv4868[yyj4868] - yym4872 := z.DecBinary() - _ = yym4872 + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 if false { - } else if z.HasExtensions() && z.DecExt(yyv4871) { - } else if !yym4872 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4871) + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) } else { - z.DecFallback(yyv4871, false) + z.DecFallback(yyv4, false) } } @@ -62483,27 +72552,27 @@ func (x codecSelfer1234) decSliceruntime_RawExtension(v *[]pkg5_runtime.RawExten } } else { - yyj4868 := 0 - for ; !r.CheckBreak(); yyj4868++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4868 >= len(yyv4868) { - yyv4868 = append(yyv4868, pkg5_runtime.RawExtension{}) // var yyz4868 pkg5_runtime.RawExtension - yyc4868 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, pkg5_runtime.RawExtension{}) // var yyz1 pkg5_runtime.RawExtension + yyc1 = true } - yyh4868.ElemContainerState(yyj4868) - if yyj4868 < len(yyv4868) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4868[yyj4868] = pkg5_runtime.RawExtension{} + yyv1[yyj1] = pkg5_runtime.RawExtension{} } else { - yyv4873 := &yyv4868[yyj4868] - yym4874 := z.DecBinary() - _ = yym4874 + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.DecExt(yyv4873) { - } else if !yym4874 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4873) + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) } else { - z.DecFallback(yyv4873, false) + z.DecFallback(yyv6, false) } } @@ -62512,17 +72581,17 @@ func (x codecSelfer1234) decSliceruntime_RawExtension(v *[]pkg5_runtime.RawExten } } - if yyj4868 < len(yyv4868) { - yyv4868 = yyv4868[:yyj4868] - yyc4868 = true - } else if yyj4868 == 0 && yyv4868 == nil { - yyv4868 = []pkg5_runtime.RawExtension{} - yyc4868 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg5_runtime.RawExtension{} + yyc1 = true } } - yyh4868.End() - if yyc4868 { - *v = yyv4868 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -62531,10 +72600,10 @@ func (x codecSelfer1234) encSliceLimitRangeItem(v []LimitRangeItem, e *codec1978 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4875 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4876 := &yyv4875 - yy4876.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -62544,83 +72613,86 @@ func (x codecSelfer1234) decSliceLimitRangeItem(v *[]LimitRangeItem, d *codec197 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4877 := *v - yyh4877, yyl4877 := z.DecSliceHelperStart() - var yyc4877 bool - if yyl4877 == 0 { - if yyv4877 == nil { - yyv4877 = []LimitRangeItem{} - yyc4877 = true - } else if len(yyv4877) != 0 { - yyv4877 = yyv4877[:0] - yyc4877 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []LimitRangeItem{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4877 > 0 { - var yyrr4877, yyrl4877 int - var yyrt4877 bool - if yyl4877 > cap(yyv4877) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4877 := len(yyv4877) > 0 - yyv24877 := yyv4877 - yyrl4877, yyrt4877 = z.DecInferLen(yyl4877, z.DecBasicHandle().MaxInitLen, 56) - if yyrt4877 { - if yyrl4877 <= cap(yyv4877) { - yyv4877 = yyv4877[:yyrl4877] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4877 = make([]LimitRangeItem, yyrl4877) + yyv1 = make([]LimitRangeItem, yyrl1) } } else { - yyv4877 = make([]LimitRangeItem, yyrl4877) + yyv1 = make([]LimitRangeItem, yyrl1) } - yyc4877 = true - yyrr4877 = len(yyv4877) - if yyrg4877 { - copy(yyv4877, yyv24877) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4877 != len(yyv4877) { - yyv4877 = yyv4877[:yyl4877] - yyc4877 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4877 := 0 - for ; yyj4877 < yyrr4877; yyj4877++ { - yyh4877.ElemContainerState(yyj4877) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4877[yyj4877] = LimitRangeItem{} + yyv1[yyj1] = LimitRangeItem{} } else { - yyv4878 := &yyv4877[yyj4877] - yyv4878.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4877 { - for ; yyj4877 < yyl4877; yyj4877++ { - yyv4877 = append(yyv4877, LimitRangeItem{}) - yyh4877.ElemContainerState(yyj4877) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, LimitRangeItem{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4877[yyj4877] = LimitRangeItem{} + yyv1[yyj1] = LimitRangeItem{} } else { - yyv4879 := &yyv4877[yyj4877] - yyv4879.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4877 := 0 - for ; !r.CheckBreak(); yyj4877++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4877 >= len(yyv4877) { - yyv4877 = append(yyv4877, LimitRangeItem{}) // var yyz4877 LimitRangeItem - yyc4877 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, LimitRangeItem{}) // var yyz1 LimitRangeItem + yyc1 = true } - yyh4877.ElemContainerState(yyj4877) - if yyj4877 < len(yyv4877) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4877[yyj4877] = LimitRangeItem{} + yyv1[yyj1] = LimitRangeItem{} } else { - yyv4880 := &yyv4877[yyj4877] - yyv4880.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -62628,17 +72700,17 @@ func (x codecSelfer1234) decSliceLimitRangeItem(v *[]LimitRangeItem, d *codec197 } } - if yyj4877 < len(yyv4877) { - yyv4877 = yyv4877[:yyj4877] - yyc4877 = true - } else if yyj4877 == 0 && yyv4877 == nil { - yyv4877 = []LimitRangeItem{} - yyc4877 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []LimitRangeItem{} + yyc1 = true } } - yyh4877.End() - if yyc4877 { - *v = yyv4877 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -62647,10 +72719,10 @@ func (x codecSelfer1234) encSliceLimitRange(v []LimitRange, e *codec1978.Encoder z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4881 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4882 := &yyv4881 - yy4882.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -62660,83 +72732,86 @@ func (x codecSelfer1234) decSliceLimitRange(v *[]LimitRange, d *codec1978.Decode z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4883 := *v - yyh4883, yyl4883 := z.DecSliceHelperStart() - var yyc4883 bool - if yyl4883 == 0 { - if yyv4883 == nil { - yyv4883 = []LimitRange{} - yyc4883 = true - } else if len(yyv4883) != 0 { - yyv4883 = yyv4883[:0] - yyc4883 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []LimitRange{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4883 > 0 { - var yyrr4883, yyrl4883 int - var yyrt4883 bool - if yyl4883 > cap(yyv4883) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4883 := len(yyv4883) > 0 - yyv24883 := yyv4883 - yyrl4883, yyrt4883 = z.DecInferLen(yyl4883, z.DecBasicHandle().MaxInitLen, 280) - if yyrt4883 { - if yyrl4883 <= cap(yyv4883) { - yyv4883 = yyv4883[:yyrl4883] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4883 = make([]LimitRange, yyrl4883) + yyv1 = make([]LimitRange, yyrl1) } } else { - yyv4883 = make([]LimitRange, yyrl4883) + yyv1 = make([]LimitRange, yyrl1) } - yyc4883 = true - yyrr4883 = len(yyv4883) - if yyrg4883 { - copy(yyv4883, yyv24883) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4883 != len(yyv4883) { - yyv4883 = yyv4883[:yyl4883] - yyc4883 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4883 := 0 - for ; yyj4883 < yyrr4883; yyj4883++ { - yyh4883.ElemContainerState(yyj4883) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4883[yyj4883] = LimitRange{} + yyv1[yyj1] = LimitRange{} } else { - yyv4884 := &yyv4883[yyj4883] - yyv4884.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4883 { - for ; yyj4883 < yyl4883; yyj4883++ { - yyv4883 = append(yyv4883, LimitRange{}) - yyh4883.ElemContainerState(yyj4883) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, LimitRange{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4883[yyj4883] = LimitRange{} + yyv1[yyj1] = LimitRange{} } else { - yyv4885 := &yyv4883[yyj4883] - yyv4885.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4883 := 0 - for ; !r.CheckBreak(); yyj4883++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4883 >= len(yyv4883) { - yyv4883 = append(yyv4883, LimitRange{}) // var yyz4883 LimitRange - yyc4883 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, LimitRange{}) // var yyz1 LimitRange + yyc1 = true } - yyh4883.ElemContainerState(yyj4883) - if yyj4883 < len(yyv4883) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4883[yyj4883] = LimitRange{} + yyv1[yyj1] = LimitRange{} } else { - yyv4886 := &yyv4883[yyj4883] - yyv4886.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -62744,17 +72819,17 @@ func (x codecSelfer1234) decSliceLimitRange(v *[]LimitRange, d *codec1978.Decode } } - if yyj4883 < len(yyv4883) { - yyv4883 = yyv4883[:yyj4883] - yyc4883 = true - } else if yyj4883 == 0 && yyv4883 == nil { - yyv4883 = []LimitRange{} - yyc4883 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []LimitRange{} + yyc1 = true } } - yyh4883.End() - if yyc4883 { - *v = yyv4883 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -62763,9 +72838,9 @@ func (x codecSelfer1234) encSliceResourceQuotaScope(v []ResourceQuotaScope, e *c z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4887 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv4887.CodecEncodeSelf(e) + yyv1.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -62775,75 +72850,81 @@ func (x codecSelfer1234) decSliceResourceQuotaScope(v *[]ResourceQuotaScope, d * z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4888 := *v - yyh4888, yyl4888 := z.DecSliceHelperStart() - var yyc4888 bool - if yyl4888 == 0 { - if yyv4888 == nil { - yyv4888 = []ResourceQuotaScope{} - yyc4888 = true - } else if len(yyv4888) != 0 { - yyv4888 = yyv4888[:0] - yyc4888 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ResourceQuotaScope{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4888 > 0 { - var yyrr4888, yyrl4888 int - var yyrt4888 bool - if yyl4888 > cap(yyv4888) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrl4888, yyrt4888 = z.DecInferLen(yyl4888, z.DecBasicHandle().MaxInitLen, 16) - if yyrt4888 { - if yyrl4888 <= cap(yyv4888) { - yyv4888 = yyv4888[:yyrl4888] + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4888 = make([]ResourceQuotaScope, yyrl4888) + yyv1 = make([]ResourceQuotaScope, yyrl1) } } else { - yyv4888 = make([]ResourceQuotaScope, yyrl4888) + yyv1 = make([]ResourceQuotaScope, yyrl1) } - yyc4888 = true - yyrr4888 = len(yyv4888) - } else if yyl4888 != len(yyv4888) { - yyv4888 = yyv4888[:yyl4888] - yyc4888 = true + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4888 := 0 - for ; yyj4888 < yyrr4888; yyj4888++ { - yyh4888.ElemContainerState(yyj4888) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4888[yyj4888] = "" + yyv1[yyj1] = "" } else { - yyv4888[yyj4888] = ResourceQuotaScope(r.DecodeString()) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4888 { - for ; yyj4888 < yyl4888; yyj4888++ { - yyv4888 = append(yyv4888, "") - yyh4888.ElemContainerState(yyj4888) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4888[yyj4888] = "" + yyv1[yyj1] = "" } else { - yyv4888[yyj4888] = ResourceQuotaScope(r.DecodeString()) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4888 := 0 - for ; !r.CheckBreak(); yyj4888++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4888 >= len(yyv4888) { - yyv4888 = append(yyv4888, "") // var yyz4888 ResourceQuotaScope - yyc4888 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 ResourceQuotaScope + yyc1 = true } - yyh4888.ElemContainerState(yyj4888) - if yyj4888 < len(yyv4888) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4888[yyj4888] = "" + yyv1[yyj1] = "" } else { - yyv4888[yyj4888] = ResourceQuotaScope(r.DecodeString()) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -62851,17 +72932,17 @@ func (x codecSelfer1234) decSliceResourceQuotaScope(v *[]ResourceQuotaScope, d * } } - if yyj4888 < len(yyv4888) { - yyv4888 = yyv4888[:yyj4888] - yyc4888 = true - } else if yyj4888 == 0 && yyv4888 == nil { - yyv4888 = []ResourceQuotaScope{} - yyc4888 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ResourceQuotaScope{} + yyc1 = true } } - yyh4888.End() - if yyc4888 { - *v = yyv4888 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -62870,10 +72951,10 @@ func (x codecSelfer1234) encSliceResourceQuota(v []ResourceQuota, e *codec1978.E z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4892 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4893 := &yyv4892 - yy4893.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -62883,83 +72964,86 @@ func (x codecSelfer1234) decSliceResourceQuota(v *[]ResourceQuota, d *codec1978. z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4894 := *v - yyh4894, yyl4894 := z.DecSliceHelperStart() - var yyc4894 bool - if yyl4894 == 0 { - if yyv4894 == nil { - yyv4894 = []ResourceQuota{} - yyc4894 = true - } else if len(yyv4894) != 0 { - yyv4894 = yyv4894[:0] - yyc4894 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ResourceQuota{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4894 > 0 { - var yyrr4894, yyrl4894 int - var yyrt4894 bool - if yyl4894 > cap(yyv4894) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4894 := len(yyv4894) > 0 - yyv24894 := yyv4894 - yyrl4894, yyrt4894 = z.DecInferLen(yyl4894, z.DecBasicHandle().MaxInitLen, 304) - if yyrt4894 { - if yyrl4894 <= cap(yyv4894) { - yyv4894 = yyv4894[:yyrl4894] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 304) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4894 = make([]ResourceQuota, yyrl4894) + yyv1 = make([]ResourceQuota, yyrl1) } } else { - yyv4894 = make([]ResourceQuota, yyrl4894) + yyv1 = make([]ResourceQuota, yyrl1) } - yyc4894 = true - yyrr4894 = len(yyv4894) - if yyrg4894 { - copy(yyv4894, yyv24894) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4894 != len(yyv4894) { - yyv4894 = yyv4894[:yyl4894] - yyc4894 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4894 := 0 - for ; yyj4894 < yyrr4894; yyj4894++ { - yyh4894.ElemContainerState(yyj4894) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4894[yyj4894] = ResourceQuota{} + yyv1[yyj1] = ResourceQuota{} } else { - yyv4895 := &yyv4894[yyj4894] - yyv4895.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4894 { - for ; yyj4894 < yyl4894; yyj4894++ { - yyv4894 = append(yyv4894, ResourceQuota{}) - yyh4894.ElemContainerState(yyj4894) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ResourceQuota{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4894[yyj4894] = ResourceQuota{} + yyv1[yyj1] = ResourceQuota{} } else { - yyv4896 := &yyv4894[yyj4894] - yyv4896.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4894 := 0 - for ; !r.CheckBreak(); yyj4894++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4894 >= len(yyv4894) { - yyv4894 = append(yyv4894, ResourceQuota{}) // var yyz4894 ResourceQuota - yyc4894 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ResourceQuota{}) // var yyz1 ResourceQuota + yyc1 = true } - yyh4894.ElemContainerState(yyj4894) - if yyj4894 < len(yyv4894) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4894[yyj4894] = ResourceQuota{} + yyv1[yyj1] = ResourceQuota{} } else { - yyv4897 := &yyv4894[yyj4894] - yyv4897.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -62967,17 +73051,17 @@ func (x codecSelfer1234) decSliceResourceQuota(v *[]ResourceQuota, d *codec1978. } } - if yyj4894 < len(yyv4894) { - yyv4894 = yyv4894[:yyj4894] - yyc4894 = true - } else if yyj4894 == 0 && yyv4894 == nil { - yyv4894 = []ResourceQuota{} - yyc4894 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ResourceQuota{} + yyc1 = true } } - yyh4894.End() - if yyc4894 { - *v = yyv4894 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -62986,23 +73070,23 @@ func (x codecSelfer1234) encMapstringSliceuint8(v map[string][]uint8, e *codec19 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeMapStart(len(v)) - for yyk4898, yyv4898 := range v { + for yyk1, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym4899 := z.EncBinary() - _ = yym4899 + yym2 := z.EncBinary() + _ = yym2 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk4898)) + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) } z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyv4898 == nil { + if yyv1 == nil { r.EncodeNil() } else { - yym4900 := z.EncBinary() - _ = yym4900 + yym3 := z.EncBinary() + _ = yym3 if false { } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(yyv4898)) + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(yyv1)) } } } @@ -63014,95 +73098,121 @@ func (x codecSelfer1234) decMapstringSliceuint8(v *map[string][]uint8, d *codec1 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4901 := *v - yyl4901 := r.ReadMapStart() - yybh4901 := z.DecBasicHandle() - if yyv4901 == nil { - yyrl4901, _ := z.DecInferLen(yyl4901, yybh4901.MaxInitLen, 40) - yyv4901 = make(map[string][]uint8, yyrl4901) - *v = yyv4901 + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string][]uint8, yyrl1) + *v = yyv1 } - var yymk4901 string - var yymv4901 []uint8 - var yymg4901 bool - if yybh4901.MapValueReset { - yymg4901 = true + var yymk1 string + var yymv1 []uint8 + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true } - if yyl4901 > 0 { - for yyj4901 := 0; yyj4901 < yyl4901; yyj4901++ { + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { z.DecSendContainerState(codecSelfer_containerMapKey1234) if r.TryDecodeAsNil() { - yymk4901 = "" + yymk1 = "" } else { - yymk4901 = string(r.DecodeString()) + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } } - if yymg4901 { - yymv4901 = yyv4901[yymk4901] + if yymg1 { + yymv1 = yyv1[yymk1] } else { - yymv4901 = nil + yymv1 = nil } z.DecSendContainerState(codecSelfer_containerMapValue1234) if r.TryDecodeAsNil() { - yymv4901 = nil + yymv1 = nil } else { - yyv4903 := &yymv4901 - yym4904 := z.DecBinary() - _ = yym4904 + yyv4 := &yymv1 + yym5 := z.DecBinary() + _ = yym5 if false { } else { - *yyv4903 = r.DecodeBytes(*(*[]byte)(yyv4903), false, false) + *yyv4 = r.DecodeBytes(*(*[]byte)(yyv4), false, false) } } - if yyv4901 != nil { - yyv4901[yymk4901] = yymv4901 + if yyv1 != nil { + yyv1[yymk1] = yymv1 } } - } else if yyl4901 < 0 { - for yyj4901 := 0; !r.CheckBreak(); yyj4901++ { + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { z.DecSendContainerState(codecSelfer_containerMapKey1234) if r.TryDecodeAsNil() { - yymk4901 = "" + yymk1 = "" } else { - yymk4901 = string(r.DecodeString()) + yyv6 := &yymk1 + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } - if yymg4901 { - yymv4901 = yyv4901[yymk4901] + if yymg1 { + yymv1 = yyv1[yymk1] } else { - yymv4901 = nil + yymv1 = nil } z.DecSendContainerState(codecSelfer_containerMapValue1234) if r.TryDecodeAsNil() { - yymv4901 = nil + yymv1 = nil } else { - yyv4906 := &yymv4901 - yym4907 := z.DecBinary() - _ = yym4907 + yyv8 := &yymv1 + yym9 := z.DecBinary() + _ = yym9 if false { } else { - *yyv4906 = r.DecodeBytes(*(*[]byte)(yyv4906), false, false) + *yyv8 = r.DecodeBytes(*(*[]byte)(yyv8), false, false) } } - if yyv4901 != nil { - yyv4901[yymk4901] = yymv4901 + if yyv1 != nil { + yyv1[yymk1] = yymv1 } } } // else len==0: TODO: Should we clear map entries? z.DecSendContainerState(codecSelfer_containerMapEnd1234) } +func (x codecSelfer1234) encSliceuint8(v []uint8, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(v)) +} + +func (x codecSelfer1234) decSliceuint8(v *[]uint8, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + *v = r.DecodeBytes(*((*[]byte)(v)), false, false) +} + func (x codecSelfer1234) encSliceSecret(v []Secret, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4908 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4909 := &yyv4908 - yy4909.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -63112,83 +73222,86 @@ func (x codecSelfer1234) decSliceSecret(v *[]Secret, d *codec1978.Decoder) { z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4910 := *v - yyh4910, yyl4910 := z.DecSliceHelperStart() - var yyc4910 bool - if yyl4910 == 0 { - if yyv4910 == nil { - yyv4910 = []Secret{} - yyc4910 = true - } else if len(yyv4910) != 0 { - yyv4910 = yyv4910[:0] - yyc4910 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Secret{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4910 > 0 { - var yyrr4910, yyrl4910 int - var yyrt4910 bool - if yyl4910 > cap(yyv4910) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4910 := len(yyv4910) > 0 - yyv24910 := yyv4910 - yyrl4910, yyrt4910 = z.DecInferLen(yyl4910, z.DecBasicHandle().MaxInitLen, 288) - if yyrt4910 { - if yyrl4910 <= cap(yyv4910) { - yyv4910 = yyv4910[:yyrl4910] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 288) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4910 = make([]Secret, yyrl4910) + yyv1 = make([]Secret, yyrl1) } } else { - yyv4910 = make([]Secret, yyrl4910) + yyv1 = make([]Secret, yyrl1) } - yyc4910 = true - yyrr4910 = len(yyv4910) - if yyrg4910 { - copy(yyv4910, yyv24910) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4910 != len(yyv4910) { - yyv4910 = yyv4910[:yyl4910] - yyc4910 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4910 := 0 - for ; yyj4910 < yyrr4910; yyj4910++ { - yyh4910.ElemContainerState(yyj4910) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4910[yyj4910] = Secret{} + yyv1[yyj1] = Secret{} } else { - yyv4911 := &yyv4910[yyj4910] - yyv4911.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4910 { - for ; yyj4910 < yyl4910; yyj4910++ { - yyv4910 = append(yyv4910, Secret{}) - yyh4910.ElemContainerState(yyj4910) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Secret{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4910[yyj4910] = Secret{} + yyv1[yyj1] = Secret{} } else { - yyv4912 := &yyv4910[yyj4910] - yyv4912.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4910 := 0 - for ; !r.CheckBreak(); yyj4910++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4910 >= len(yyv4910) { - yyv4910 = append(yyv4910, Secret{}) // var yyz4910 Secret - yyc4910 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Secret{}) // var yyz1 Secret + yyc1 = true } - yyh4910.ElemContainerState(yyj4910) - if yyj4910 < len(yyv4910) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4910[yyj4910] = Secret{} + yyv1[yyj1] = Secret{} } else { - yyv4913 := &yyv4910[yyj4910] - yyv4913.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -63196,17 +73309,17 @@ func (x codecSelfer1234) decSliceSecret(v *[]Secret, d *codec1978.Decoder) { } } - if yyj4910 < len(yyv4910) { - yyv4910 = yyv4910[:yyj4910] - yyc4910 = true - } else if yyj4910 == 0 && yyv4910 == nil { - yyv4910 = []Secret{} - yyc4910 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Secret{} + yyc1 = true } } - yyh4910.End() - if yyc4910 { - *v = yyv4910 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -63215,10 +73328,10 @@ func (x codecSelfer1234) encSliceConfigMap(v []ConfigMap, e *codec1978.Encoder) z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4914 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4915 := &yyv4914 - yy4915.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -63228,83 +73341,86 @@ func (x codecSelfer1234) decSliceConfigMap(v *[]ConfigMap, d *codec1978.Decoder) z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4916 := *v - yyh4916, yyl4916 := z.DecSliceHelperStart() - var yyc4916 bool - if yyl4916 == 0 { - if yyv4916 == nil { - yyv4916 = []ConfigMap{} - yyc4916 = true - } else if len(yyv4916) != 0 { - yyv4916 = yyv4916[:0] - yyc4916 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ConfigMap{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4916 > 0 { - var yyrr4916, yyrl4916 int - var yyrt4916 bool - if yyl4916 > cap(yyv4916) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4916 := len(yyv4916) > 0 - yyv24916 := yyv4916 - yyrl4916, yyrt4916 = z.DecInferLen(yyl4916, z.DecBasicHandle().MaxInitLen, 264) - if yyrt4916 { - if yyrl4916 <= cap(yyv4916) { - yyv4916 = yyv4916[:yyrl4916] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4916 = make([]ConfigMap, yyrl4916) + yyv1 = make([]ConfigMap, yyrl1) } } else { - yyv4916 = make([]ConfigMap, yyrl4916) + yyv1 = make([]ConfigMap, yyrl1) } - yyc4916 = true - yyrr4916 = len(yyv4916) - if yyrg4916 { - copy(yyv4916, yyv24916) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4916 != len(yyv4916) { - yyv4916 = yyv4916[:yyl4916] - yyc4916 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4916 := 0 - for ; yyj4916 < yyrr4916; yyj4916++ { - yyh4916.ElemContainerState(yyj4916) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4916[yyj4916] = ConfigMap{} + yyv1[yyj1] = ConfigMap{} } else { - yyv4917 := &yyv4916[yyj4916] - yyv4917.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4916 { - for ; yyj4916 < yyl4916; yyj4916++ { - yyv4916 = append(yyv4916, ConfigMap{}) - yyh4916.ElemContainerState(yyj4916) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ConfigMap{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4916[yyj4916] = ConfigMap{} + yyv1[yyj1] = ConfigMap{} } else { - yyv4918 := &yyv4916[yyj4916] - yyv4918.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4916 := 0 - for ; !r.CheckBreak(); yyj4916++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4916 >= len(yyv4916) { - yyv4916 = append(yyv4916, ConfigMap{}) // var yyz4916 ConfigMap - yyc4916 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ConfigMap{}) // var yyz1 ConfigMap + yyc1 = true } - yyh4916.ElemContainerState(yyj4916) - if yyj4916 < len(yyv4916) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4916[yyj4916] = ConfigMap{} + yyv1[yyj1] = ConfigMap{} } else { - yyv4919 := &yyv4916[yyj4916] - yyv4919.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -63312,17 +73428,17 @@ func (x codecSelfer1234) decSliceConfigMap(v *[]ConfigMap, d *codec1978.Decoder) } } - if yyj4916 < len(yyv4916) { - yyv4916 = yyv4916[:yyj4916] - yyc4916 = true - } else if yyj4916 == 0 && yyv4916 == nil { - yyv4916 = []ConfigMap{} - yyc4916 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ConfigMap{} + yyc1 = true } } - yyh4916.End() - if yyc4916 { - *v = yyv4916 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -63331,10 +73447,10 @@ func (x codecSelfer1234) encSliceComponentCondition(v []ComponentCondition, e *c z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4920 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4921 := &yyv4920 - yy4921.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -63344,83 +73460,86 @@ func (x codecSelfer1234) decSliceComponentCondition(v *[]ComponentCondition, d * z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4922 := *v - yyh4922, yyl4922 := z.DecSliceHelperStart() - var yyc4922 bool - if yyl4922 == 0 { - if yyv4922 == nil { - yyv4922 = []ComponentCondition{} - yyc4922 = true - } else if len(yyv4922) != 0 { - yyv4922 = yyv4922[:0] - yyc4922 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ComponentCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4922 > 0 { - var yyrr4922, yyrl4922 int - var yyrt4922 bool - if yyl4922 > cap(yyv4922) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4922 := len(yyv4922) > 0 - yyv24922 := yyv4922 - yyrl4922, yyrt4922 = z.DecInferLen(yyl4922, z.DecBasicHandle().MaxInitLen, 64) - if yyrt4922 { - if yyrl4922 <= cap(yyv4922) { - yyv4922 = yyv4922[:yyrl4922] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4922 = make([]ComponentCondition, yyrl4922) + yyv1 = make([]ComponentCondition, yyrl1) } } else { - yyv4922 = make([]ComponentCondition, yyrl4922) + yyv1 = make([]ComponentCondition, yyrl1) } - yyc4922 = true - yyrr4922 = len(yyv4922) - if yyrg4922 { - copy(yyv4922, yyv24922) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4922 != len(yyv4922) { - yyv4922 = yyv4922[:yyl4922] - yyc4922 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4922 := 0 - for ; yyj4922 < yyrr4922; yyj4922++ { - yyh4922.ElemContainerState(yyj4922) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4922[yyj4922] = ComponentCondition{} + yyv1[yyj1] = ComponentCondition{} } else { - yyv4923 := &yyv4922[yyj4922] - yyv4923.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4922 { - for ; yyj4922 < yyl4922; yyj4922++ { - yyv4922 = append(yyv4922, ComponentCondition{}) - yyh4922.ElemContainerState(yyj4922) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ComponentCondition{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4922[yyj4922] = ComponentCondition{} + yyv1[yyj1] = ComponentCondition{} } else { - yyv4924 := &yyv4922[yyj4922] - yyv4924.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4922 := 0 - for ; !r.CheckBreak(); yyj4922++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4922 >= len(yyv4922) { - yyv4922 = append(yyv4922, ComponentCondition{}) // var yyz4922 ComponentCondition - yyc4922 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ComponentCondition{}) // var yyz1 ComponentCondition + yyc1 = true } - yyh4922.ElemContainerState(yyj4922) - if yyj4922 < len(yyv4922) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4922[yyj4922] = ComponentCondition{} + yyv1[yyj1] = ComponentCondition{} } else { - yyv4925 := &yyv4922[yyj4922] - yyv4925.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -63428,17 +73547,17 @@ func (x codecSelfer1234) decSliceComponentCondition(v *[]ComponentCondition, d * } } - if yyj4922 < len(yyv4922) { - yyv4922 = yyv4922[:yyj4922] - yyc4922 = true - } else if yyj4922 == 0 && yyv4922 == nil { - yyv4922 = []ComponentCondition{} - yyc4922 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ComponentCondition{} + yyc1 = true } } - yyh4922.End() - if yyc4922 { - *v = yyv4922 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -63447,10 +73566,10 @@ func (x codecSelfer1234) encSliceComponentStatus(v []ComponentStatus, e *codec19 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4926 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4927 := &yyv4926 - yy4927.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -63460,83 +73579,86 @@ func (x codecSelfer1234) decSliceComponentStatus(v *[]ComponentStatus, d *codec1 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4928 := *v - yyh4928, yyl4928 := z.DecSliceHelperStart() - var yyc4928 bool - if yyl4928 == 0 { - if yyv4928 == nil { - yyv4928 = []ComponentStatus{} - yyc4928 = true - } else if len(yyv4928) != 0 { - yyv4928 = yyv4928[:0] - yyc4928 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ComponentStatus{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4928 > 0 { - var yyrr4928, yyrl4928 int - var yyrt4928 bool - if yyl4928 > cap(yyv4928) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4928 := len(yyv4928) > 0 - yyv24928 := yyv4928 - yyrl4928, yyrt4928 = z.DecInferLen(yyl4928, z.DecBasicHandle().MaxInitLen, 280) - if yyrt4928 { - if yyrl4928 <= cap(yyv4928) { - yyv4928 = yyv4928[:yyrl4928] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4928 = make([]ComponentStatus, yyrl4928) + yyv1 = make([]ComponentStatus, yyrl1) } } else { - yyv4928 = make([]ComponentStatus, yyrl4928) + yyv1 = make([]ComponentStatus, yyrl1) } - yyc4928 = true - yyrr4928 = len(yyv4928) - if yyrg4928 { - copy(yyv4928, yyv24928) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4928 != len(yyv4928) { - yyv4928 = yyv4928[:yyl4928] - yyc4928 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4928 := 0 - for ; yyj4928 < yyrr4928; yyj4928++ { - yyh4928.ElemContainerState(yyj4928) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4928[yyj4928] = ComponentStatus{} + yyv1[yyj1] = ComponentStatus{} } else { - yyv4929 := &yyv4928[yyj4928] - yyv4929.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4928 { - for ; yyj4928 < yyl4928; yyj4928++ { - yyv4928 = append(yyv4928, ComponentStatus{}) - yyh4928.ElemContainerState(yyj4928) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ComponentStatus{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4928[yyj4928] = ComponentStatus{} + yyv1[yyj1] = ComponentStatus{} } else { - yyv4930 := &yyv4928[yyj4928] - yyv4930.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4928 := 0 - for ; !r.CheckBreak(); yyj4928++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4928 >= len(yyv4928) { - yyv4928 = append(yyv4928, ComponentStatus{}) // var yyz4928 ComponentStatus - yyc4928 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ComponentStatus{}) // var yyz1 ComponentStatus + yyc1 = true } - yyh4928.ElemContainerState(yyj4928) - if yyj4928 < len(yyv4928) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4928[yyj4928] = ComponentStatus{} + yyv1[yyj1] = ComponentStatus{} } else { - yyv4931 := &yyv4928[yyj4928] - yyv4931.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -63544,17 +73666,17 @@ func (x codecSelfer1234) decSliceComponentStatus(v *[]ComponentStatus, d *codec1 } } - if yyj4928 < len(yyv4928) { - yyv4928 = yyv4928[:yyj4928] - yyc4928 = true - } else if yyj4928 == 0 && yyv4928 == nil { - yyv4928 = []ComponentStatus{} - yyc4928 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ComponentStatus{} + yyc1 = true } } - yyh4928.End() - if yyc4928 { - *v = yyv4928 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -63563,10 +73685,10 @@ func (x codecSelfer1234) encSliceDownwardAPIVolumeFile(v []DownwardAPIVolumeFile z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv4932 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4933 := &yyv4932 - yy4933.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -63576,83 +73698,86 @@ func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFil z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv4934 := *v - yyh4934, yyl4934 := z.DecSliceHelperStart() - var yyc4934 bool - if yyl4934 == 0 { - if yyv4934 == nil { - yyv4934 = []DownwardAPIVolumeFile{} - yyc4934 = true - } else if len(yyv4934) != 0 { - yyv4934 = yyv4934[:0] - yyc4934 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []DownwardAPIVolumeFile{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl4934 > 0 { - var yyrr4934, yyrl4934 int - var yyrt4934 bool - if yyl4934 > cap(yyv4934) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg4934 := len(yyv4934) > 0 - yyv24934 := yyv4934 - yyrl4934, yyrt4934 = z.DecInferLen(yyl4934, z.DecBasicHandle().MaxInitLen, 40) - if yyrt4934 { - if yyrl4934 <= cap(yyv4934) { - yyv4934 = yyv4934[:yyrl4934] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv4934 = make([]DownwardAPIVolumeFile, yyrl4934) + yyv1 = make([]DownwardAPIVolumeFile, yyrl1) } } else { - yyv4934 = make([]DownwardAPIVolumeFile, yyrl4934) + yyv1 = make([]DownwardAPIVolumeFile, yyrl1) } - yyc4934 = true - yyrr4934 = len(yyv4934) - if yyrg4934 { - copy(yyv4934, yyv24934) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl4934 != len(yyv4934) { - yyv4934 = yyv4934[:yyl4934] - yyc4934 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj4934 := 0 - for ; yyj4934 < yyrr4934; yyj4934++ { - yyh4934.ElemContainerState(yyj4934) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4934[yyj4934] = DownwardAPIVolumeFile{} + yyv1[yyj1] = DownwardAPIVolumeFile{} } else { - yyv4935 := &yyv4934[yyj4934] - yyv4935.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt4934 { - for ; yyj4934 < yyl4934; yyj4934++ { - yyv4934 = append(yyv4934, DownwardAPIVolumeFile{}) - yyh4934.ElemContainerState(yyj4934) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, DownwardAPIVolumeFile{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv4934[yyj4934] = DownwardAPIVolumeFile{} + yyv1[yyj1] = DownwardAPIVolumeFile{} } else { - yyv4936 := &yyv4934[yyj4934] - yyv4936.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj4934 := 0 - for ; !r.CheckBreak(); yyj4934++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj4934 >= len(yyv4934) { - yyv4934 = append(yyv4934, DownwardAPIVolumeFile{}) // var yyz4934 DownwardAPIVolumeFile - yyc4934 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, DownwardAPIVolumeFile{}) // var yyz1 DownwardAPIVolumeFile + yyc1 = true } - yyh4934.ElemContainerState(yyj4934) - if yyj4934 < len(yyv4934) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv4934[yyj4934] = DownwardAPIVolumeFile{} + yyv1[yyj1] = DownwardAPIVolumeFile{} } else { - yyv4937 := &yyv4934[yyj4934] - yyv4937.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -63660,16 +73785,16 @@ func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFil } } - if yyj4934 < len(yyv4934) { - yyv4934 = yyv4934[:yyj4934] - yyc4934 = true - } else if yyj4934 == 0 && yyv4934 == nil { - yyv4934 = []DownwardAPIVolumeFile{} - yyc4934 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []DownwardAPIVolumeFile{} + yyc1 = true } } - yyh4934.End() - if yyc4934 { - *v = yyv4934 + yyh1.End() + if yyc1 { + *v = yyv1 } } diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/types.go b/vendor/k8s.io/kubernetes/pkg/api/v1/types.go index cc8abfa958..a75a1d0f06 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/types.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/types.go @@ -17,11 +17,11 @@ limitations under the License. package v1 import ( - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util/intstr" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" ) // The comments for the structs and fields can be used from go-restful to @@ -65,6 +65,8 @@ import ( // ObjectMeta is metadata that all persisted resources must have, which includes all objects // users must create. +// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. +// +k8s:openapi-gen=false type ObjectMeta struct { // Name must be unique within a namespace. Is required when creating resources, although // some resources may allow a client to request the generation of an appropriate name @@ -118,7 +120,7 @@ type ObjectMeta struct { // Read-only. // More info: http://kubernetes.io/docs/user-guide/identifiers#uids // +optional - UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` + UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` // An opaque value that represents the internal version of this object that can // be used by clients to determine when objects have changed. May be used for optimistic @@ -147,7 +149,7 @@ type ObjectMeta struct { // Null for lists. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - CreationTimestamp unversioned.Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` + CreationTimestamp metav1.Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This // field is set by the server when a graceful deletion is requested by the user, and is not @@ -167,7 +169,7 @@ type ObjectMeta struct { // Read-only. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - DeletionTimestamp *unversioned.Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` + DeletionTimestamp *metav1.Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` // Number of seconds allowed for this object to gracefully terminate before // it will be removed from the system. Only set when deletionTimestamp is also set. @@ -195,7 +197,7 @@ type ObjectMeta struct { // then an entry in this list will point to this controller, with the controller field set to true. // There cannot be more than one managing controller. // +optional - OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` + OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` // Must be empty before the object is deleted from the registry. Each entry // is an identifier for the responsible component that will remove the entry @@ -324,6 +326,14 @@ type VolumeSource struct { AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"` // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"` + // Items for all in one resources secrets, configmaps, and downward API + Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"` + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"` + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"` } // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. @@ -409,8 +419,25 @@ type PersistentVolumeSource struct { AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"` // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"` + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"` + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"` } +const ( + // AlphaStorageClassAnnotation represents the previous alpha storage class + // annotation. It's currently still used and will be held for backwards + // compatibility + AlphaStorageClassAnnotation = "volume.alpha.kubernetes.io/storage-class" + + // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. + // It's currently still used and will be held for backwards compatibility + BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" +) + // +genclient=true // +nonNamespaced=true @@ -418,11 +445,11 @@ type PersistentVolumeSource struct { // It is analogous to a node. // More info: http://kubernetes.io/docs/user-guide/persistent-volumes type PersistentVolume struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines a specification of a persistent volume owned by the cluster. // Provisioned by an administrator. @@ -462,6 +489,10 @@ type PersistentVolumeSpec struct { // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy // +optional PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"` + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + StorageClassName string `json:"storageClassName,omitempty" protobuf:"bytes,6,opt,name=storageClassName"` } // PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes. @@ -496,11 +527,11 @@ type PersistentVolumeStatus struct { // PersistentVolumeList is a list of PersistentVolume items. type PersistentVolumeList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of persistent volumes. // More info: http://kubernetes.io/docs/user-guide/persistent-volumes Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"` @@ -510,11 +541,11 @@ type PersistentVolumeList struct { // PersistentVolumeClaim is a user's request for and claim to a persistent volume type PersistentVolumeClaim struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the desired characteristics of a volume requested by a pod author. // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims @@ -530,11 +561,11 @@ type PersistentVolumeClaim struct { // PersistentVolumeClaimList is a list of PersistentVolumeClaim items. type PersistentVolumeClaimList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // A list of persistent volume claims. // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"` @@ -549,7 +580,7 @@ type PersistentVolumeClaimSpec struct { AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` // A label query over volumes to consider for binding. // +optional - Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` // Resources represents the minimum resources the volume should have. // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources // +optional @@ -557,6 +588,10 @@ type PersistentVolumeClaimSpec struct { // VolumeName is the binding reference to the PersistentVolume backing this claim. // +optional VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"` + // Name of the StorageClass required by the claim. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1 + // +optional + StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"` } // PersistentVolumeClaimStatus is the current status of a persistent volume claim. @@ -922,8 +957,8 @@ type SecretVolumeSource struct { // key and content is the value. If specified, the listed keys will be // projected into the specified paths, and unlisted keys will not be // present. If a key is specified which is not present in the Secret, - // the volume setup will error. Paths must be relative and may not contain - // the '..' path or start with '..'. + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. // +optional Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` // Optional: mode bits to use on created files by default. Must be a @@ -933,12 +968,37 @@ type SecretVolumeSource struct { // mode, like fsGroup, and the result can be other mode bits set. // +optional DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"` + // Specify whether the Secret or it's keys must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } const ( SecretVolumeSourceDefaultMode int32 = 0644 ) +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +type SecretProjection struct { + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + // Represents an NFS mount that lasts the lifetime of a pod. // NFS volumes do not support ownership management or SELinux relabeling. type NFSVolumeSource struct { @@ -983,6 +1043,10 @@ type ISCSIVolumeSource struct { // Defaults to false. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` + // iSCSI target portal List. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` } // Represents a Fibre Channel volume. @@ -1066,6 +1130,55 @@ type AzureDiskVolumeSource struct { ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"` } +// PortworxVolumeSource represents a Portworx volume resource. +type PortworxVolumeSource struct { + // VolumeID uniquely identifies a Portworx volume + VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// ScaleIOVolumeSource represents a persistent ScaleIO volume +type ScaleIOVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"` + // The name of the storage system as configured in ScaleIO. + System string `json:"system" protobuf:"bytes,2,opt,name=system"` + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *LocalObjectReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"` + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"` + // The name of the Protection Domain for the configured storage (defaults to "default"). + // +optional + ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"` + // The Storage Pool associated with the protection domain (defaults to "default"). + // +optional + StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"` + // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). + // +optional + StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"` +} + // Adapts a ConfigMap into a volume. // // The contents of the target ConfigMap's Data field will be presented in a @@ -1079,8 +1192,8 @@ type ConfigMapVolumeSource struct { // key and content is the value. If specified, the listed keys will be // projected into the specified paths, and unlisted keys will not be // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error. Paths must be relative and may not contain - // the '..' path or start with '..'. + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. // +optional Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` // Optional: mode bits to use on created files by default. Must be a @@ -1090,12 +1203,67 @@ type ConfigMapVolumeSource struct { // mode, like fsGroup, and the result can be other mode bits set. // +optional DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"` + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } const ( ConfigMapVolumeSourceDefaultMode int32 = 0644 ) +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +type ConfigMapProjection struct { + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + +// Represents a projected volume source +type ProjectedVolumeSource struct { + // list of volume projections + Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"` + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"` +} + +// Projection that may be projected along with other supported volume types +type VolumeProjection struct { + // all types below are the supported types for projection into the same volume + + // information about the secret data to project + Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"` + // information about the downwardAPI data to project + DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"` + // information about the configMap data to project + ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"` +} + +const ( + ProjectedVolumeSourceDefaultMode int32 = 0644 +) + // Maps a string key to a path within a volume. type KeyToPath struct { // The key to project. @@ -1223,6 +1391,9 @@ type ConfigMapKeySelector struct { LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` // The key to select. Key string `json:"key" protobuf:"bytes,2,opt,name=key"` + // Specify whether the ConfigMap or it's key must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` } // SecretKeySelector selects a key of a Secret. @@ -1231,6 +1402,48 @@ type SecretKeySelector struct { LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` // The key of the secret to select from. Must be a valid secret key. Key string `json:"key" protobuf:"bytes,2,opt,name=key"` + // Specify whether the Secret or it's key must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` +} + +// EnvFromSource represents the source of a set of ConfigMaps +type EnvFromSource struct { + // An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // +optional + Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"` + // The ConfigMap to select from + // +optional + ConfigMapRef *ConfigMapEnvSource `json:"configMapRef,omitempty" protobuf:"bytes,2,opt,name=configMapRef"` + // The Secret to select from + // +optional + SecretRef *SecretEnvSource `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +type ConfigMapEnvSource struct { + // The ConfigMap to select from. + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // Specify whether the ConfigMap must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"` +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +type SecretEnvSource struct { + // The Secret to select from. + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // Specify whether the Secret must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"` } // HTTPHeader describes a custom header to be used in HTTP probes @@ -1332,6 +1545,19 @@ const ( PullIfNotPresent PullPolicy = "IfNotPresent" ) +// TerminationMessagePolicy describes how termination messages are retrieved from a container. +type TerminationMessagePolicy string + +const ( + // TerminationMessageReadFile is the default behavior and will set the container status message to + // the contents of the container's terminationMessagePath when the container exits. + TerminationMessageReadFile TerminationMessagePolicy = "File" + // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs + // for the container status message when the container exits with an error and the + // terminationMessagePath has no contents. + TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" +) + // Capability represent POSIX capabilities type type Capability string @@ -1409,6 +1635,14 @@ type Container struct { // Cannot be updated. // +optional Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` // List of environment variables to set in the container. // Cannot be updated. // +optional @@ -1441,10 +1675,21 @@ type Container struct { // Optional: Path at which the file to which the container's termination message // will be written is mounted into the container's filesystem. // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. // Defaults to /dev/termination-log. // Cannot be updated. // +optional TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` // Image pull policy. // One of Always, Never, IfNotPresent. // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. @@ -1543,7 +1788,7 @@ type ContainerStateWaiting struct { type ContainerStateRunning struct { // Time at which the container was last (re-)started // +optional - StartedAt unversioned.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"` + StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"` } // ContainerStateTerminated is a terminated state of a container. @@ -1561,10 +1806,10 @@ type ContainerStateTerminated struct { Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` // Time at which previous execution of the container started // +optional - StartedAt unversioned.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"` + StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"` // Time at which the container last terminated // +optional - FinishedAt unversioned.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"` + FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"` // Container's ID in the format 'docker://' // +optional ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"` @@ -1667,10 +1912,10 @@ type PodCondition struct { Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` // Last time we probed the condition. // +optional - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` // Last time the condition transitioned from one status to another. // +optional - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` // Unique, one-word, CamelCase reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` @@ -1695,9 +1940,14 @@ const ( type DNSPolicy string const ( + // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS + // first, if it is available, then fall back on the default + // (as determined by kubelet) DNS settings. + DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" + // DNSClusterFirst indicates that the pod should use cluster DNS - // first, if it is available, then fall back on the default (as - // determined by kubelet) DNS settings. + // first unless hostNetwork is true, if it is available, then + // fall back on the default (as determined by kubelet) DNS settings. DNSClusterFirst DNSPolicy = "ClusterFirst" // DNSDefault indicates that the pod should use the default (as @@ -1850,12 +2100,10 @@ type WeightedPodAffinityTerm struct { type PodAffinityTerm struct { // A label query over a set of resources, in this case pods. // +optional - LabelSelector *unversioned.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` // namespaces specifies which namespaces the labelSelector applies to (matches against); - // nil list means "this pod's namespace," empty list means "all namespaces" - // The json tag here is not "omitempty" since we need to distinguish nil and empty. - // See https://golang.org/pkg/encoding/json/#Marshal for more details. - Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"` + // null or empty list means "this pod's namespace" + Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"` // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching // the labelSelector in the specified namespaces, where co-located is defined as running on a node // whose value of the label with key topologyKey matches that of any node on which any of the @@ -1917,8 +2165,12 @@ type Taint struct { Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` // Required. The effect of the taint on pods // that do not tolerate the taint. - // Valid effects are NoSchedule and PreferNoSchedule. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"` + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + TimeAdded metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"` } type TaintEffect string @@ -1934,26 +2186,23 @@ const ( // onto the node entirely. Enforced by the scheduler. TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // Do not allow new pods to schedule onto the node unless they tolerate the taint, - // do not allow pods to start on Kubelet unless they tolerate the taint, - // but allow all already-running pods to continue running. - // Enforced by the scheduler and Kubelet. + // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to + // Kubelet without going through the scheduler to start. + // Enforced by Kubelet and the scheduler. // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // Do not allow new pods to schedule onto the node unless they tolerate the taint, - // do not allow pods to start on Kubelet unless they tolerate the taint, - // and evict any already-running pods that do not tolerate the taint. - // Enforced by the scheduler and Kubelet. - // TaintEffectNoScheduleNoAdmitNoExecute = "NoScheduleNoAdmitNoExecute" + // Evict any already-running pods that do not tolerate the taint. + // Currently enforced by NodeController. + TaintEffectNoExecute TaintEffect = "NoExecute" ) // The pod this Toleration is attached to tolerates any taint that matches // the triple using the matching operator . type Toleration struct { - // Required. Key is the taint key that the toleration applies to. + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. // +optional Key string `json:"key,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` - // operator represents a key's relationship to the value. + // Operator represents a key's relationship to the value. // Valid operators are Exists and Equal. Defaults to Equal. // Exists is equivalent to wildcard for value, so that a pod can // tolerate all taints of a particular category. @@ -1964,11 +2213,15 @@ type Toleration struct { // +optional Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"` // Effect indicates the taint effect to match. Empty means match all taint effects. - // When specified, allowed values are NoSchedule and PreferNoSchedule. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. // +optional Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"` - // TODO: For forgiveness (#1574), we'd eventually add at least a grace period - // here, and possibly an occurrence threshold and period. + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // +optional + TolerationSeconds *int64 `json:"tolerationSeconds,omitempty" protobuf:"varint,5,opt,name=tolerationSeconds"` } // A toleration operator is the set of operators that can be used in a toleration. @@ -2017,10 +2270,9 @@ type PodSpec struct { // of that value or the sum of the normal containers. Limits are applied to init containers // in a similar fashion. // Init containers cannot currently be added or removed. - // Init containers are in alpha state and may change without notice. // Cannot be updated. // More info: http://kubernetes.io/docs/user-guide/containers - InitContainers []Container `json:"-" patchStrategy:"merge" patchMergeKey:"name"` + InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"` // List of containers belonging to the pod. // Containers cannot currently be added or removed. // There must be at least one container in a Pod. @@ -2048,8 +2300,9 @@ type PodSpec struct { // +optional ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"` // Set DNS policy for containers within the pod. - // One of 'ClusterFirst' or 'Default'. + // One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. // Defaults to "ClusterFirst". + // To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. // +optional DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"` // NodeSelector is a selector which must be true for the pod to fit on a node. @@ -2067,6 +2320,9 @@ type PodSpec struct { // +k8s:conversion-gen=false // +optional DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"` + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"` // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, // the scheduler simply schedules this pod onto that node, assuming that it fits resource @@ -2107,6 +2363,16 @@ type PodSpec struct { // If not specified, the pod will not have a domainname at all. // +optional Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"` + // If specified, the pod's scheduling constraints + // +optional + Affinity *Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"` + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"` + // If specified, the pod's tolerations. + // +optional + Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"` } // PodSecurityContext holds pod-level security attributes and common container settings. @@ -2153,6 +2419,18 @@ type PodSecurityContext struct { FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"` } +// PodQOSClass defines the supported qos classes of Pods. +type PodQOSClass string + +const ( + // PodQOSGuaranteed is the Guaranteed qos class. + PodQOSGuaranteed PodQOSClass = "Guaranteed" + // PodQOSBurstable is the Burstable qos class. + PodQOSBurstable PodQOSClass = "Burstable" + // PodQOSBestEffort is the BestEffort qos class. + PodQOSBestEffort PodQOSClass = "BestEffort" +) + // PodStatus represents information about the status of a pod. Status may trail the actual // state of a system. type PodStatus struct { @@ -2183,28 +2461,33 @@ type PodStatus struct { // RFC 3339 date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. // +optional - StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"` + StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"` // The list has one entry per init container in the manifest. The most recent successful // init container will have ready = true, the most recently started container will have // startTime set. - // Init containers are in alpha state and may change without notice. // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses - InitContainerStatuses []ContainerStatus `json:"-"` + InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"` + // The list has one entry per container in the manifest. Each entry is currently the output // of `docker inspect`. // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses // +optional ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` + // The Quality of Service (QOS) classification assigned to the pod based on resource requirements + // See PodQOSClass type for available QOS classes + // More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md + // +optional + QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` } // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded type PodStatusResult struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Most recently observed status of the pod. // This data may not be up to date. // Populated by the system. @@ -2219,11 +2502,11 @@ type PodStatusResult struct { // Pod is a collection of containers that can run on a host. This resource is created // by clients and scheduled onto hosts. type Pod struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the pod. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -2241,11 +2524,11 @@ type Pod struct { // PodList is a list of Pods. type PodList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of pods. // More info: http://kubernetes.io/docs/user-guide/pods @@ -2257,7 +2540,7 @@ type PodTemplateSpec struct { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the pod. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -2269,11 +2552,11 @@ type PodTemplateSpec struct { // PodTemplate describes a template for creating copies of a predefined pod. type PodTemplate struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Template defines the pods that will be created from this pod template. // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -2283,11 +2566,11 @@ type PodTemplate struct { // PodTemplateList is a list of PodTemplates. type PodTemplateList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of pod templates Items []PodTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` @@ -2375,7 +2658,7 @@ type ReplicationControllerCondition struct { Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` // The last time the condition transitioned from one status to another. // +optional - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` // The reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` @@ -2388,13 +2671,13 @@ type ReplicationControllerCondition struct { // ReplicationController represents the configuration of a replication controller. type ReplicationController struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // If the Labels of a ReplicationController are empty, they are defaulted to // be the same as the Pod(s) that the replication controller manages. // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the replication controller. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -2412,11 +2695,11 @@ type ReplicationController struct { // ReplicationControllerList is a collection of replication controllers. type ReplicationControllerList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of replication controllers. // More info: http://kubernetes.io/docs/user-guide/replication-controller @@ -2491,7 +2774,7 @@ type LoadBalancerIngress struct { type ServiceSpec struct { // The list of ports that are exposed by this service. // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies - Ports []ServicePort `json:"ports" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"` + Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"` // Route service traffic to pods with label keys and values matching this // selector. If empty or not present, the service is assumed to have an @@ -2623,11 +2906,11 @@ type ServicePort struct { // (for example 3306) that the proxy listens on, and the selector that determines which pods // will answer requests sent through the proxy. type Service struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of a service. // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -2650,11 +2933,11 @@ const ( // ServiceList holds a list of services. type ServiceList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of services Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"` @@ -2667,11 +2950,11 @@ type ServiceList struct { // * a principal that can be authenticated and authorized // * a set of secrets type ServiceAccount struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. // More info: http://kubernetes.io/docs/user-guide/secrets @@ -2684,15 +2967,20 @@ type ServiceAccount struct { // More info: http://kubernetes.io/docs/user-guide/secrets#manually-specifying-an-imagepullsecret // +optional ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"` + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,4,opt,name=automountServiceAccountToken"` } // ServiceAccountList is a list of ServiceAccount objects type ServiceAccountList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ServiceAccounts. // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts @@ -2714,11 +3002,11 @@ type ServiceAccountList struct { // }, // ] type Endpoints struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The set of all endpoints is the union of all subsets. Addresses are placed into // subsets according to the IPs they share. A single address with multiple ports, @@ -2795,11 +3083,11 @@ type EndpointPort struct { // EndpointsList is a list of endpoints. type EndpointsList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of endpoints. Items []Endpoints `json:"items" protobuf:"bytes,2,rep,name=items"` @@ -2818,9 +3106,12 @@ type NodeSpec struct { // +optional ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"` // Unschedulable controls node schedulability of new pods. By default, node is schedulable. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration" + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration // +optional Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"` + // If specified, the node's taints. + // +optional + Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"` } // DaemonEndpoint contains information about a single Daemon endpoint. @@ -2939,7 +3230,7 @@ type PreferAvoidPodsEntry struct { PodSignature PodSignature `json:"podSignature" protobuf:"bytes,1,opt,name=podSignature"` // Time at which this entry was added to the list. // +optional - EvictionTime unversioned.Time `json:"evictionTime,omitempty" protobuf:"bytes,2,opt,name=evictionTime"` + EvictionTime metav1.Time `json:"evictionTime,omitempty" protobuf:"bytes,2,opt,name=evictionTime"` // (brief) reason why this entry was added to the list. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` @@ -2953,7 +3244,7 @@ type PreferAvoidPodsEntry struct { type PodSignature struct { // Reference to controller whose pods should avoid this node. // +optional - PodController *OwnerReference `json:"podController,omitempty" protobuf:"bytes,1,opt,name=podController"` + PodController *metav1.OwnerReference `json:"podController,omitempty" protobuf:"bytes,1,opt,name=podController"` } // Describe a container image @@ -2995,6 +3286,8 @@ const ( NodeDiskPressure NodeConditionType = "DiskPressure" // NodeNetworkUnavailable means that network for the node is not correctly configured. NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" + // NodeInodePressure means the kubelet is under pressure due to insufficient available inodes. + NodeInodePressure NodeConditionType = "InodePressure" ) // NodeCondition contains condition information for a node. @@ -3005,10 +3298,10 @@ type NodeCondition struct { Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` // Last time we got an update on a given condition. // +optional - LastHeartbeatTime unversioned.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"` + LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"` // Last time the condition transit from one status to another. // +optional - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` // (brief) reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` @@ -3026,6 +3319,8 @@ const ( NodeHostName NodeAddressType = "Hostname" NodeExternalIP NodeAddressType = "ExternalIP" NodeInternalIP NodeAddressType = "InternalIP" + NodeExternalDNS NodeAddressType = "ExternalDNS" + NodeInternalDNS NodeAddressType = "InternalDNS" ) // NodeAddress contains information for the node's address. @@ -3056,6 +3351,11 @@ const ( // Number of Pods that may be running on this Node: see ResourcePods ) +const ( + // Namespace prefix for opaque counted resources (alpha). + ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-" +) + // ResourceList is a set of (resource name, quantity) pairs. type ResourceList map[ResourceName]resource.Quantity @@ -3065,11 +3365,11 @@ type ResourceList map[ResourceName]resource.Quantity // Node is a worker node in Kubernetes. // Each node will have a unique identifier in the cache (i.e. in etcd). type Node struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of a node. // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -3086,22 +3386,23 @@ type Node struct { // NodeList is the whole list of all Nodes which have been registered with master. type NodeList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of nodes Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"` } +// FinalizerName is the name identifying a finalizer during namespace lifecycle. type FinalizerName string -// These are internal finalizer values to Kubernetes, must be qualified name unless defined here +// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or +// in metav1. const ( FinalizerKubernetes FinalizerName = "kubernetes" - FinalizerOrphan string = "orphan" ) // NamespaceSpec describes the attributes on a Namespace. @@ -3136,11 +3437,11 @@ const ( // Namespace provides a scope for Names. // Use of multiple namespaces is optional. type Namespace struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of the Namespace. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -3155,11 +3456,11 @@ type Namespace struct { // NamespaceList is a list of Namespaces. type NamespaceList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of Namespace objects in the list. // More info: http://kubernetes.io/docs/user-guide/namespaces @@ -3169,26 +3470,43 @@ type NamespaceList struct { // Binding ties one object to another. // For example, a pod is bound to a node by a scheduler. type Binding struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The target object that you want to bind to the standard object. Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"` } // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +// +k8s:openapi-gen=false type Preconditions struct { // Specifies the target UID. // +optional - UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` + UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` } +// DeletionPropagation decides if a deletion will propagate to the dependents of the object, and how the garbage collector will handle the propagation. +type DeletionPropagation string + +const ( + // Orphans the dependents. + DeletePropagationOrphan DeletionPropagation = "Orphan" + // Deletes the object from the key-value store, the garbage collector will delete the dependents in the background. + DeletePropagationBackground DeletionPropagation = "Background" + // The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store. + // API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp. + // This policy is cascading, i.e., the dependents will be deleted with Foreground. + DeletePropagationForeground DeletionPropagation = "Foreground" +) + // DeleteOptions may be provided when deleting an API object +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +// +k8s:openapi-gen=false type DeleteOptions struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // The duration in seconds before the object should be deleted. Value must be non-negative integer. // The value zero indicates delete immediately. If this value is nil, the default grace period for the @@ -3202,25 +3520,26 @@ type DeleteOptions struct { // +optional Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"` + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. // Should the dependent objects be orphaned. If true/false, the "orphan" // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. // +optional OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"` -} -// ExportOptions is the query options to the standard REST get call. -type ExportOptions struct { - unversioned.TypeMeta `json:",inline"` - - // Should this value be exported. Export strips fields that a user can not specify. - Export bool `json:"export" protobuf:"varint,1,opt,name=export"` - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // +optional + PropagationPolicy *DeletionPropagation `protobuf:"bytes,4,opt,name=propagationPolicy,casttype=DeletionPropagation"` } // ListOptions is the query options to a standard REST list call. +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +// +k8s:openapi-gen=false type ListOptions struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // A selector to restrict the list of returned objects by their labels. // Defaults to everything. @@ -3236,6 +3555,10 @@ type ListOptions struct { Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"` // Timeout for the list/watch call. @@ -3245,7 +3568,7 @@ type ListOptions struct { // PodLogOptions is the query options for a Pod's logs REST call. type PodLogOptions struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // The container for which to stream logs. Defaults to only container if there is one container in the pod. // +optional @@ -3267,7 +3590,7 @@ type PodLogOptions struct { // If this value is in the future, no logs will be returned. // Only one of sinceSeconds or sinceTime may be specified. // +optional - SinceTime *unversioned.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"` + SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"` // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line // of log output. Defaults to false. // +optional @@ -3288,7 +3611,7 @@ type PodLogOptions struct { // TODO: merge w/ PodExecOptions below for stdin, stdout, etc // and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY type PodAttachOptions struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Stdin if true, redirects the standard input stream of the pod for this call. // Defaults to false. @@ -3323,7 +3646,7 @@ type PodAttachOptions struct { // TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging // and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY type PodExecOptions struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Redirect the standard input stream of the pod for this call. // Defaults to false. @@ -3354,9 +3677,24 @@ type PodExecOptions struct { Command []string `json:"command" protobuf:"bytes,6,rep,name=command"` } +// PodPortForwardOptions is the query options to a Pod's port forward call +// when using WebSockets. +// The `port` query parameter must specify the port or +// ports (comma separated) to forward over. +// Port forwarding over SPDY does not use these options. It requires the port +// to be passed in the `port` header as part of request. +type PodPortForwardOptions struct { + metav1.TypeMeta `json:",inline"` + + // List of ports to forward + // Required when using WebSockets + // +optional + Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"` +} + // PodProxyOptions is the query options to a Pod's proxy call. type PodProxyOptions struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Path is the URL path to use for the current proxy request to pod. // +optional @@ -3365,7 +3703,7 @@ type PodProxyOptions struct { // NodeProxyOptions is the query options to a Node's proxy call. type NodeProxyOptions struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Path is the URL path to use for the current proxy request to node. // +optional @@ -3374,7 +3712,7 @@ type NodeProxyOptions struct { // ServiceProxyOptions is the query options to a Service's proxy call. type ServiceProxyOptions struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Path is the part of URLs that include service endpoints, suffixes, // and parameters to use for the current proxy request to service. @@ -3385,26 +3723,6 @@ type ServiceProxyOptions struct { Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` } -// OwnerReference contains enough information to let you identify an owning -// object. Currently, an owning object must be in the same namespace, so there -// is no namespace field. -type OwnerReference struct { - // API version of the referent. - APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` - // Kind of the referent. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names - Name string `json:"name" protobuf:"bytes,3,opt,name=name"` - // UID of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids - UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` - // If true, this reference points to the managing controller. - // +optional - Controller *bool `json:"controller,omitempty" protobuf:"varint,6,opt,name=controller"` -} - // ObjectReference contains enough information to let you inspect or modify the referred object. type ObjectReference struct { // Kind of the referent. @@ -3422,7 +3740,7 @@ type ObjectReference struct { // UID of the referent. // More info: http://kubernetes.io/docs/user-guide/identifiers#uids // +optional - UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` + UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` // API version of the referent. // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"` @@ -3455,7 +3773,7 @@ type LocalObjectReference struct { // SerializedReference is a reference to serialized object. type SerializedReference struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // The reference to an object in the system. // +optional Reference ObjectReference `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"` @@ -3484,10 +3802,10 @@ const ( // Event is a report of an event somewhere in the cluster. // TODO: Decide whether to store these separately or with the object they apply to. type Event struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` // The object that this event is about. InvolvedObject ObjectReference `json:"involvedObject" protobuf:"bytes,2,opt,name=involvedObject"` @@ -3509,11 +3827,11 @@ type Event struct { // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) // +optional - FirstTimestamp unversioned.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"` + FirstTimestamp metav1.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"` // The time at which the most recent occurrence of this event was recorded. // +optional - LastTimestamp unversioned.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"` + LastTimestamp metav1.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"` // The number of times this event has occurred. // +optional @@ -3526,11 +3844,11 @@ type Event struct { // EventList is a list of events. type EventList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of events Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"` @@ -3538,11 +3856,11 @@ type EventList struct { // List holds a list of objects, which may not be known by the server. type List struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of objects Items []runtime.RawExtension `json:"items" protobuf:"bytes,2,rep,name=items"` @@ -3592,11 +3910,11 @@ type LimitRangeSpec struct { // LimitRange sets resource usage limits for each kind of resource in a Namespace. type LimitRange struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the limits enforced. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -3606,11 +3924,11 @@ type LimitRange struct { // LimitRangeList is a list of LimitRange items. type LimitRangeList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of LimitRange objects. // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md @@ -3690,11 +4008,11 @@ type ResourceQuotaStatus struct { // ResourceQuota sets aggregate quota restrictions enforced per namespace type ResourceQuota struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the desired quota. // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -3709,11 +4027,11 @@ type ResourceQuota struct { // ResourceQuotaList is a list of ResourceQuota items. type ResourceQuotaList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of ResourceQuota objects. // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota @@ -3725,11 +4043,11 @@ type ResourceQuotaList struct { // Secret holds secret data of a certain type. The total bytes of the values in // the Data field must be less than MaxSecretSize bytes. type Secret struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN // or leading dot followed by valid DNS_SUBDOMAIN. @@ -3790,6 +4108,35 @@ const ( // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets DockerConfigKey = ".dockercfg" + // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json + // + // Required fields: + // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file + SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" + + // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets + DockerConfigJsonKey = ".dockerconfigjson" + + // SecretTypeBasicAuth contains data needed for basic authentication. + // + // Required at least one of fields: + // - Secret.Data["username"] - username used for authentication + // - Secret.Data["password"] - password or token needed for authentication + SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" + + // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets + BasicAuthUsernameKey = "username" + // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets + BasicAuthPasswordKey = "password" + + // SecretTypeSSHAuth contains data needed for SSH authetication. + // + // Required field: + // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication + SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" + + // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets + SSHAuthPrivateKey = "ssh-privatekey" // SecretTypeTLS contains information about a TLS client or server secret. It // is primarily used with TLS termination of the Ingress resource, but may be // used in other types. @@ -3808,11 +4155,11 @@ const ( // SecretList is a list of Secret. type SecretList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of secret objects. // More info: http://kubernetes.io/docs/user-guide/secrets @@ -3823,11 +4170,11 @@ type SecretList struct { // ConfigMap holds configuration data for pods to consume. type ConfigMap struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Data contains the configuration data. // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. @@ -3837,11 +4184,11 @@ type ConfigMap struct { // ConfigMapList is a resource containing a list of ConfigMap objects. type ConfigMapList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of ConfigMaps. Items []ConfigMap `json:"items" protobuf:"bytes,2,rep,name=items"` @@ -3878,11 +4225,11 @@ type ComponentCondition struct { // ComponentStatus (and ComponentStatusList) holds the cluster validation info. type ComponentStatus struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of component conditions observed // +optional @@ -3891,11 +4238,11 @@ type ComponentStatus struct { // Status of all the conditions for the component as a list of ComponentStatus objects. type ComponentStatusList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ComponentStatus objects. Items []ComponentStatus `json:"items" protobuf:"bytes,2,rep,name=items"` @@ -3939,6 +4286,15 @@ type DownwardAPIVolumeFile struct { Mode *int32 `json:"mode,omitempty" protobuf:"varint,4,opt,name=mode"` } +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +type DownwardAPIProjection struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"` +} + // SecurityContext holds security configuration that will be applied to a container. // Some fields are present in both SecurityContext and PodSecurityContext. When both // are set, the values in SecurityContext take precedence. @@ -3996,11 +4352,11 @@ type SELinuxOptions struct { // RangeAllocation is not a public type. type RangeAllocation struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Range is string that identifies the range represented by 'data'. Range string `json:"range" protobuf:"bytes,2,opt,name=range"` @@ -4011,4 +4367,14 @@ type RangeAllocation struct { const ( // "default-scheduler" is the name of default scheduler. DefaultSchedulerName = "default-scheduler" + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // When the --hard-pod-affinity-weight scheduler flag is not specified, + // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. + DefaultHardPodAffinitySymmetricWeight int = 1 + + // When the --failure-domains scheduler flag is not specified, + // DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity. + DefaultFailureDomains string = metav1.LabelHostname + "," + metav1.LabelZoneFailureDomain + "," + metav1.LabelZoneRegion ) diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/api/v1/types_swagger_doc_generated.go index 7322cea87e..75416d59a2 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/types_swagger_doc_generated.go @@ -180,9 +180,19 @@ func (ConfigMap) SwaggerDoc() map[string]string { return map_ConfigMap } +var map_ConfigMapEnvSource = map[string]string{ + "": "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.", + "optional": "Specify whether the ConfigMap must be defined", +} + +func (ConfigMapEnvSource) SwaggerDoc() map[string]string { + return map_ConfigMapEnvSource +} + var map_ConfigMapKeySelector = map[string]string{ - "": "Selects a key from a ConfigMap.", - "key": "The key to select.", + "": "Selects a key from a ConfigMap.", + "key": "The key to select.", + "optional": "Specify whether the ConfigMap or it's key must be defined", } func (ConfigMapKeySelector) SwaggerDoc() map[string]string { @@ -199,10 +209,21 @@ func (ConfigMapList) SwaggerDoc() map[string]string { return map_ConfigMapList } +var map_ConfigMapProjection = map[string]string{ + "": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", + "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "optional": "Specify whether the ConfigMap or it's keys must be defined", +} + +func (ConfigMapProjection) SwaggerDoc() map[string]string { + return map_ConfigMapProjection +} + var map_ConfigMapVolumeSource = map[string]string{ "": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", - "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error. Paths must be relative and may not contain the '..' path or start with '..'.", + "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "optional": "Specify whether the ConfigMap or it's keys must be defined", } func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { @@ -210,25 +231,27 @@ func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { } var map_Container = map[string]string{ - "": "A single application container that you want to run within a pod.", - "name": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", - "image": "Docker image name. More info: http://kubernetes.io/docs/user-guide/images", - "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands", - "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands", - "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", - "ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", - "env": "List of environment variables to set in the container. Cannot be updated.", - "resources": "Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources", - "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", - "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", - "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", - "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", - "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Defaults to /dev/termination-log. Cannot be updated.", - "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/images#updating-images", - "securityContext": "Security options the pod should run with. More info: http://releases.k8s.io/HEAD/docs/design/security_context.md", - "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", - "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", - "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + "": "A single application container that you want to run within a pod.", + "name": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + "image": "Docker image name. More info: http://kubernetes.io/docs/user-guide/images", + "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands", + "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands", + "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "env": "List of environment variables to set in the container. Cannot be updated.", + "resources": "Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources", + "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", + "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", + "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/images#updating-images", + "securityContext": "Security options the pod should run with. More info: http://releases.k8s.io/HEAD/docs/design/security_context.md", + "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", } func (Container) SwaggerDoc() map[string]string { @@ -329,16 +352,26 @@ func (DaemonEndpoint) SwaggerDoc() map[string]string { } var map_DeleteOptions = map[string]string{ - "": "DeleteOptions may be provided when deleting an API object", + "": "DeleteOptions may be provided when deleting an API object DEPRECATED: This type has been moved to meta/v1 and will be removed soon.", "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", - "orphanDependents": "Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list.", + "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "PropagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.", } func (DeleteOptions) SwaggerDoc() map[string]string { return map_DeleteOptions } +var map_DownwardAPIProjection = map[string]string{ + "": "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.", + "items": "Items is a list of DownwardAPIVolume file", +} + +func (DownwardAPIProjection) SwaggerDoc() map[string]string { + return map_DownwardAPIProjection +} + var map_DownwardAPIVolumeFile = map[string]string{ "": "DownwardAPIVolumeFile represents information to create the file containing the pod field", "path": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", @@ -424,6 +457,17 @@ func (EndpointsList) SwaggerDoc() map[string]string { return map_EndpointsList } +var map_EnvFromSource = map[string]string{ + "": "EnvFromSource represents the source of a set of ConfigMaps", + "prefix": "An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.", + "configMapRef": "The ConfigMap to select from", + "secretRef": "The Secret to select from", +} + +func (EnvFromSource) SwaggerDoc() map[string]string { + return map_EnvFromSource +} + var map_EnvVar = map[string]string{ "": "EnvVar represents an environment variable present in a Container.", "name": "Name of the environment variable. Must be a C_IDENTIFIER.", @@ -493,16 +537,6 @@ func (ExecAction) SwaggerDoc() map[string]string { return map_ExecAction } -var map_ExportOptions = map[string]string{ - "": "ExportOptions is the query options to the standard REST get call.", - "export": "Should this value be exported. Export strips fields that a user can not specify.", - "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'", -} - -func (ExportOptions) SwaggerDoc() map[string]string { - return map_ExportOptions -} - var map_FCVolumeSource = map[string]string{ "": "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.", "targetWWNs": "Required: FC target worldwide names (WWNs)", @@ -623,6 +657,7 @@ var map_ISCSIVolumeSource = map[string]string{ "iscsiInterface": "Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.", "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#iscsi", "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "portals": "iSCSI target portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", } func (ISCSIVolumeSource) SwaggerDoc() map[string]string { @@ -704,11 +739,11 @@ func (List) SwaggerDoc() map[string]string { } var map_ListOptions = map[string]string{ - "": "ListOptions is the query options to a standard REST list call.", + "": "ListOptions is the query options to a standard REST list call. DEPRECATED: This type has been moved to meta/v1 and will be removed soon.", "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.", + "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", "timeoutSeconds": "Timeout for the list/watch call.", } @@ -901,7 +936,8 @@ var map_NodeSpec = map[string]string{ "podCIDR": "PodCIDR represents the pod IP range assigned to the node.", "externalID": "External ID of the node assigned by some machine database (e.g. a cloud provider). Deprecated.", "providerID": "ID of the node assigned by the cloud provider in the format: ://", - "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration\"", + "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration", + "taints": "If specified, the node's taints.", } func (NodeSpec) SwaggerDoc() map[string]string { @@ -955,7 +991,7 @@ func (ObjectFieldSelector) SwaggerDoc() map[string]string { } var map_ObjectMeta = map[string]string{ - "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", + "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon.", "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency", "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", @@ -992,19 +1028,6 @@ func (ObjectReference) SwaggerDoc() map[string]string { return map_ObjectReference } -var map_OwnerReference = map[string]string{ - "": "OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.", - "apiVersion": "API version of the referent.", - "kind": "Kind of the referent. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", - "controller": "If true, this reference points to the managing controller.", -} - -func (OwnerReference) SwaggerDoc() map[string]string { - return map_OwnerReference -} - var map_PersistentVolume = map[string]string{ "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: http://kubernetes.io/docs/user-guide/persistent-volumes", "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", @@ -1038,11 +1061,12 @@ func (PersistentVolumeClaimList) SwaggerDoc() map[string]string { } var map_PersistentVolumeClaimSpec = map[string]string{ - "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", - "accessModes": "AccessModes contains the desired access modes the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1", - "selector": "A label query over volumes to consider for binding.", - "resources": "Resources represents the minimum resources the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources", - "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", + "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", + "accessModes": "AccessModes contains the desired access modes the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1", + "selector": "A label query over volumes to consider for binding.", + "resources": "Resources represents the minimum resources the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources", + "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", + "storageClassName": "Name of the StorageClass required by the claim. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1099,6 +1123,8 @@ var map_PersistentVolumeSource = map[string]string{ "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", } func (PersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1111,6 +1137,7 @@ var map_PersistentVolumeSpec = map[string]string{ "accessModes": "AccessModes contains all ways the volume can be mounted. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes", "claimRef": "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#binding", "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy", + "storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", } func (PersistentVolumeSpec) SwaggerDoc() map[string]string { @@ -1162,7 +1189,7 @@ func (PodAffinity) SwaggerDoc() map[string]string { var map_PodAffinityTerm = map[string]string{ "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key tches that of any node on which a pod of the set of pods is running", "labelSelector": "A label query over a set of resources, in this case pods.", - "namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); nil list means \"this pod's namespace,\" empty list means \"all namespaces\" The json tag here is not \"omitempty\" since we need to distinguish nil and empty. See https://golang.org/pkg/encoding/json/#Marshal for more details.", + "namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"", "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as \"all topologies\" (\"all topologies\" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.", } @@ -1247,6 +1274,15 @@ func (PodLogOptions) SwaggerDoc() map[string]string { return map_PodLogOptions } +var map_PodPortForwardOptions = map[string]string{ + "": "PodPortForwardOptions is the query options to a Pod's port forward call when using WebSockets. The `port` query parameter must specify the port or ports (comma separated) to forward over. Port forwarding over SPDY does not use these options. It requires the port to be passed in the `port` header as part of request.", + "ports": "List of ports to forward Required when using WebSockets", +} + +func (PodPortForwardOptions) SwaggerDoc() map[string]string { + return map_PodPortForwardOptions +} + var map_PodProxyOptions = map[string]string{ "": "PodProxyOptions is the query options to a Pod's proxy call.", "path": "Path is the URL path to use for the current proxy request to pod.", @@ -1281,14 +1317,16 @@ func (PodSignature) SwaggerDoc() map[string]string { var map_PodSpec = map[string]string{ "": "PodSpec is a description of a pod.", "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: http://kubernetes.io/docs/user-guide/volumes", + "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers", "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers", "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: http://kubernetes.io/docs/user-guide/pod-states#restartpolicy", "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", - "dnsPolicy": "Set DNS policy for containers within the pod. One of 'ClusterFirst' or 'Default'. Defaults to \"ClusterFirst\".", + "dnsPolicy": "Set DNS policy for containers within the pod. One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. Defaults to \"ClusterFirst\". To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", "nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: http://kubernetes.io/docs/user-guide/node-selection/README", "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md", "serviceAccount": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", + "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", "nodeName": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", "hostPID": "Use the host's pid namespace. Optional: Default to false.", @@ -1297,6 +1335,9 @@ var map_PodSpec = map[string]string{ "imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod", "hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.", "subdomain": "If specified, the fully qualified Pod hostname will be \"...svc.\". If not specified, the pod will not have a domainname at all.", + "affinity": "If specified, the pod's scheduling constraints", + "schedulerName": "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", + "tolerations": "If specified, the pod's tolerations.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1304,15 +1345,17 @@ func (PodSpec) SwaggerDoc() map[string]string { } var map_PodStatus = map[string]string{ - "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system.", - "phase": "Current condition of the pod. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-phase", - "conditions": "Current service state of pod. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions", - "message": "A human readable message indicating details about why the pod is in this condition.", - "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk'", - "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", - "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", - "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", - "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses", + "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system.", + "phase": "Current condition of the pod. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-phase", + "conditions": "Current service state of pod. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions", + "message": "A human readable message indicating details about why the pod is in this condition.", + "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk'", + "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", + "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", + "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", + "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses", + "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses", + "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md", } func (PodStatus) SwaggerDoc() map[string]string { @@ -1359,6 +1402,17 @@ func (PodTemplateSpec) SwaggerDoc() map[string]string { return map_PodTemplateSpec } +var map_PortworxVolumeSource = map[string]string{ + "": "PortworxVolumeSource represents a Portworx volume resource.", + "volumeID": "VolumeID uniquely identifies a Portworx volume", + "fsType": "FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (PortworxVolumeSource) SwaggerDoc() map[string]string { + return map_PortworxVolumeSource +} + var map_Preconditions = map[string]string{ "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", "uid": "Specifies the target UID.", @@ -1403,6 +1457,16 @@ func (Probe) SwaggerDoc() map[string]string { return map_Probe } +var map_ProjectedVolumeSource = map[string]string{ + "": "Represents a projected volume source", + "sources": "list of volume projections", + "defaultMode": "Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", +} + +func (ProjectedVolumeSource) SwaggerDoc() map[string]string { + return map_ProjectedVolumeSource +} + var map_QuobyteVolumeSource = map[string]string{ "": "Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.", "registry": "Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", @@ -1577,6 +1641,24 @@ func (SELinuxOptions) SwaggerDoc() map[string]string { return map_SELinuxOptions } +var map_ScaleIOVolumeSource = map[string]string{ + "": "ScaleIOVolumeSource represents a persistent ScaleIO volume", + "gateway": "The host address of the ScaleIO API Gateway.", + "system": "The name of the storage system as configured in ScaleIO.", + "secretRef": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", + "sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false", + "protectionDomain": "The name of the Protection Domain for the configured storage (defaults to \"default\").", + "storagePool": "The Storage Pool associated with the protection domain (defaults to \"default\").", + "storageMode": "Indicates whether the storage for a volume should be thick or thin (defaults to \"thin\").", + "volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (ScaleIOVolumeSource) SwaggerDoc() map[string]string { + return map_ScaleIOVolumeSource +} + var map_Secret = map[string]string{ "": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", @@ -1589,9 +1671,19 @@ func (Secret) SwaggerDoc() map[string]string { return map_Secret } +var map_SecretEnvSource = map[string]string{ + "": "SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.", + "optional": "Specify whether the Secret must be defined", +} + +func (SecretEnvSource) SwaggerDoc() map[string]string { + return map_SecretEnvSource +} + var map_SecretKeySelector = map[string]string{ - "": "SecretKeySelector selects a key of a Secret.", - "key": "The key of the secret to select from. Must be a valid secret key.", + "": "SecretKeySelector selects a key of a Secret.", + "key": "The key of the secret to select from. Must be a valid secret key.", + "optional": "Specify whether the Secret or it's key must be defined", } func (SecretKeySelector) SwaggerDoc() map[string]string { @@ -1608,11 +1700,22 @@ func (SecretList) SwaggerDoc() map[string]string { return map_SecretList } +var map_SecretProjection = map[string]string{ + "": "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.", + "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "optional": "Specify whether the Secret or its key must be defined", +} + +func (SecretProjection) SwaggerDoc() map[string]string { + return map_SecretProjection +} + var map_SecretVolumeSource = map[string]string{ "": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.", "secretName": "Name of the secret in the pod's namespace to use. More info: http://kubernetes.io/docs/user-guide/volumes#secrets", - "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error. Paths must be relative and may not contain the '..' path or start with '..'.", + "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "optional": "Specify whether the Secret or it's keys must be defined", } func (SecretVolumeSource) SwaggerDoc() map[string]string { @@ -1654,10 +1757,11 @@ func (Service) SwaggerDoc() map[string]string { } var map_ServiceAccount = map[string]string{ - "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: http://kubernetes.io/docs/user-guide/secrets", - "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: http://kubernetes.io/docs/user-guide/secrets#manually-specifying-an-imagepullsecret", + "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: http://kubernetes.io/docs/user-guide/secrets", + "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: http://kubernetes.io/docs/user-guide/secrets#manually-specifying-an-imagepullsecret", + "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.", } func (ServiceAccount) SwaggerDoc() map[string]string { @@ -1743,10 +1847,11 @@ func (TCPSocketAction) SwaggerDoc() map[string]string { } var map_Taint = map[string]string{ - "": "The node this Taint is attached to has the effect \"effect\" on any pod that that does not tolerate the Taint.", - "key": "Required. The taint key to be applied to a node.", - "value": "Required. The taint value corresponding to the taint key.", - "effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule and PreferNoSchedule.", + "": "The node this Taint is attached to has the effect \"effect\" on any pod that that does not tolerate the Taint.", + "key": "Required. The taint key to be applied to a node.", + "value": "Required. The taint value corresponding to the taint key.", + "effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.", + "timeAdded": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.", } func (Taint) SwaggerDoc() map[string]string { @@ -1754,11 +1859,12 @@ func (Taint) SwaggerDoc() map[string]string { } var map_Toleration = map[string]string{ - "": "The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .", - "key": "Required. Key is the taint key that the toleration applies to.", - "operator": "operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", - "value": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", - "effect": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule and PreferNoSchedule.", + "": "The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .", + "key": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.", + "operator": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", + "value": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", + "effect": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", + "tolerationSeconds": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.", } func (Toleration) SwaggerDoc() map[string]string { @@ -1786,6 +1892,17 @@ func (VolumeMount) SwaggerDoc() map[string]string { return map_VolumeMount } +var map_VolumeProjection = map[string]string{ + "": "Projection that may be projected along with other supported volume types", + "secret": "information about the secret data to project", + "downwardAPI": "information about the downwardAPI data to project", + "configMap": "information about the configMap data to project", +} + +func (VolumeProjection) SwaggerDoc() map[string]string { + return map_VolumeProjection +} + var map_VolumeSource = map[string]string{ "": "Represents the source of a volume to mount. Only one of its members may be specified.", "hostPath": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath", @@ -1811,6 +1928,9 @@ var map_VolumeSource = map[string]string{ "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "projected": "Items for all in one resources secrets, configmaps, and downward API", + "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", } func (VolumeSource) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go index 8dd547c58d..04609041c3 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,11 +21,11 @@ limitations under the License. package v1 import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" - types "k8s.io/kubernetes/pkg/types" unsafe "unsafe" ) @@ -65,10 +65,14 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_api_ComponentStatusList_To_v1_ComponentStatusList, Convert_v1_ConfigMap_To_api_ConfigMap, Convert_api_ConfigMap_To_v1_ConfigMap, + Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource, + Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource, Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector, Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector, Convert_v1_ConfigMapList_To_api_ConfigMapList, Convert_api_ConfigMapList_To_v1_ConfigMapList, + Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection, + Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection, Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource, Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource, Convert_v1_Container_To_api_Container, @@ -91,6 +95,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint, Convert_v1_DeleteOptions_To_api_DeleteOptions, Convert_api_DeleteOptions_To_v1_DeleteOptions, + Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection, + Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection, Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile, Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile, Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource, @@ -107,6 +113,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_api_Endpoints_To_v1_Endpoints, Convert_v1_EndpointsList_To_api_EndpointsList, Convert_api_EndpointsList_To_v1_EndpointsList, + Convert_v1_EnvFromSource_To_api_EnvFromSource, + Convert_api_EnvFromSource_To_v1_EnvFromSource, Convert_v1_EnvVar_To_api_EnvVar, Convert_api_EnvVar_To_v1_EnvVar, Convert_v1_EnvVarSource_To_api_EnvVarSource, @@ -119,8 +127,6 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_api_EventSource_To_v1_EventSource, Convert_v1_ExecAction_To_api_ExecAction, Convert_api_ExecAction_To_v1_ExecAction, - Convert_v1_ExportOptions_To_api_ExportOptions, - Convert_api_ExportOptions_To_v1_ExportOptions, Convert_v1_FCVolumeSource_To_api_FCVolumeSource, Convert_api_FCVolumeSource_To_v1_FCVolumeSource, Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource, @@ -189,6 +195,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_api_NodeList_To_v1_NodeList, Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions, Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions, + Convert_v1_NodeResources_To_api_NodeResources, + Convert_api_NodeResources_To_v1_NodeResources, Convert_v1_NodeSelector_To_api_NodeSelector, Convert_api_NodeSelector_To_v1_NodeSelector, Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement, @@ -207,8 +215,6 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_api_ObjectMeta_To_v1_ObjectMeta, Convert_v1_ObjectReference_To_api_ObjectReference, Convert_api_ObjectReference_To_v1_ObjectReference, - Convert_v1_OwnerReference_To_api_OwnerReference, - Convert_api_OwnerReference_To_v1_OwnerReference, Convert_v1_PersistentVolume_To_api_PersistentVolume, Convert_api_PersistentVolume_To_v1_PersistentVolume, Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim, @@ -249,6 +255,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_api_PodList_To_v1_PodList, Convert_v1_PodLogOptions_To_api_PodLogOptions, Convert_api_PodLogOptions_To_v1_PodLogOptions, + Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions, + Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions, Convert_v1_PodProxyOptions_To_api_PodProxyOptions, Convert_api_PodProxyOptions_To_v1_PodProxyOptions, Convert_v1_PodSecurityContext_To_api_PodSecurityContext, @@ -267,6 +275,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_api_PodTemplateList_To_v1_PodTemplateList, Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec, Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec, + Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource, + Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource, Convert_v1_Preconditions_To_api_Preconditions, Convert_api_Preconditions_To_v1_Preconditions, Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry, @@ -275,6 +285,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm, Convert_v1_Probe_To_api_Probe, Convert_api_Probe_To_v1_Probe, + Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource, + Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource, Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource, Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource, Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource, @@ -305,12 +317,18 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_api_ResourceRequirements_To_v1_ResourceRequirements, Convert_v1_SELinuxOptions_To_api_SELinuxOptions, Convert_api_SELinuxOptions_To_v1_SELinuxOptions, + Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource, + Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource, Convert_v1_Secret_To_api_Secret, Convert_api_Secret_To_v1_Secret, + Convert_v1_SecretEnvSource_To_api_SecretEnvSource, + Convert_api_SecretEnvSource_To_v1_SecretEnvSource, Convert_v1_SecretKeySelector_To_api_SecretKeySelector, Convert_api_SecretKeySelector_To_v1_SecretKeySelector, Convert_v1_SecretList_To_api_SecretList, Convert_api_SecretList_To_v1_SecretList, + Convert_v1_SecretProjection_To_api_SecretProjection, + Convert_api_SecretProjection_To_v1_SecretProjection, Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource, Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource, Convert_v1_SecurityContext_To_api_SecurityContext, @@ -333,6 +351,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_api_ServiceSpec_To_v1_ServiceSpec, Convert_v1_ServiceStatus_To_api_ServiceStatus, Convert_api_ServiceStatus_To_v1_ServiceStatus, + Convert_v1_Sysctl_To_api_Sysctl, + Convert_api_Sysctl_To_v1_Sysctl, Convert_v1_TCPSocketAction_To_api_TCPSocketAction, Convert_api_TCPSocketAction_To_v1_TCPSocketAction, Convert_v1_Taint_To_api_Taint, @@ -343,6 +363,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_api_Volume_To_v1_Volume, Convert_v1_VolumeMount_To_api_VolumeMount, Convert_api_VolumeMount_To_v1_VolumeMount, + Convert_v1_VolumeProjection_To_api_VolumeProjection, + Convert_api_VolumeProjection_To_v1_VolumeProjection, Convert_v1_VolumeSource_To_api_VolumeSource, Convert_api_VolumeSource_To_v1_VolumeSource, Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource, @@ -485,9 +507,7 @@ func Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.Azure } func autoConvert_v1_Binding_To_api_Binding(in *Binding, out *api.Binding, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Target, &out.Target, s); err != nil { return err } @@ -499,9 +519,7 @@ func Convert_v1_Binding_To_api_Binding(in *Binding, out *api.Binding, s conversi } func autoConvert_api_Binding_To_v1_Binding(in *api.Binding, out *Binding, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Target, &out.Target, s); err != nil { return err } @@ -547,7 +565,11 @@ func Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSou } func autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *CephFSVolumeSource, s conversion.Scope) error { - out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + if in.Monitors == nil { + out.Monitors = make([]string, 0) + } else { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + } out.Path = in.Path out.User = in.User out.SecretFile = in.SecretFile @@ -607,9 +629,7 @@ func Convert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCo } func autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in *ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta out.Conditions = *(*[]api.ComponentCondition)(unsafe.Pointer(&in.Conditions)) return nil } @@ -619,9 +639,7 @@ func Convert_v1_ComponentStatus_To_api_ComponentStatus(in *ComponentStatus, out } func autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *ComponentStatus, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta out.Conditions = *(*[]ComponentCondition)(unsafe.Pointer(&in.Conditions)) return nil } @@ -642,7 +660,11 @@ func Convert_v1_ComponentStatusList_To_api_ComponentStatusList(in *ComponentStat func autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *ComponentStatusList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]ComponentStatus)(unsafe.Pointer(&in.Items)) + if in.Items == nil { + out.Items = make([]ComponentStatus, 0) + } else { + out.Items = *(*[]ComponentStatus)(unsafe.Pointer(&in.Items)) + } return nil } @@ -651,9 +673,7 @@ func Convert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.Component } func autoConvert_v1_ConfigMap_To_api_ConfigMap(in *ConfigMap, out *api.ConfigMap, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) return nil } @@ -663,9 +683,7 @@ func Convert_v1_ConfigMap_To_api_ConfigMap(in *ConfigMap, out *api.ConfigMap, s } func autoConvert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *ConfigMap, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) return nil } @@ -674,11 +692,36 @@ func Convert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *ConfigMap, s return autoConvert_api_ConfigMap_To_v1_ConfigMap(in, out, s) } +func autoConvert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in *ConfigMapEnvSource, out *api.ConfigMapEnvSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in *ConfigMapEnvSource, out *api.ConfigMapEnvSource, s conversion.Scope) error { + return autoConvert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in, out, s) +} + +func autoConvert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *api.ConfigMapEnvSource, out *ConfigMapEnvSource, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *api.ConfigMapEnvSource, out *ConfigMapEnvSource, s conversion.Scope) error { + return autoConvert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in, out, s) +} + func autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { return err } out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) return nil } @@ -691,6 +734,7 @@ func autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.Con return err } out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) return nil } @@ -710,7 +754,11 @@ func Convert_v1_ConfigMapList_To_api_ConfigMapList(in *ConfigMapList, out *api.C func autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *ConfigMapList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]ConfigMap)(unsafe.Pointer(&in.Items)) + if in.Items == nil { + out.Items = make([]ConfigMap, 0) + } else { + out.Items = *(*[]ConfigMap)(unsafe.Pointer(&in.Items)) + } return nil } @@ -718,12 +766,39 @@ func Convert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *C return autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in, out, s) } +func autoConvert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in *ConfigMapProjection, out *api.ConfigMapProjection, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in *ConfigMapProjection, out *api.ConfigMapProjection, s conversion.Scope) error { + return autoConvert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in, out, s) +} + +func autoConvert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in *api.ConfigMapProjection, out *ConfigMapProjection, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in *api.ConfigMapProjection, out *ConfigMapProjection, s conversion.Scope) error { + return autoConvert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in, out, s) +} + func autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { return err } out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) return nil } @@ -737,6 +812,7 @@ func autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.C } out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) return nil } @@ -751,6 +827,7 @@ func autoConvert_v1_Container_To_api_Container(in *Container, out *api.Container out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) out.WorkingDir = in.WorkingDir out.Ports = *(*[]api.ContainerPort)(unsafe.Pointer(&in.Ports)) + out.EnvFrom = *(*[]api.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) out.Env = *(*[]api.EnvVar)(unsafe.Pointer(&in.Env)) if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { return err @@ -760,6 +837,7 @@ func autoConvert_v1_Container_To_api_Container(in *Container, out *api.Container out.ReadinessProbe = (*api.Probe)(unsafe.Pointer(in.ReadinessProbe)) out.Lifecycle = (*api.Lifecycle)(unsafe.Pointer(in.Lifecycle)) out.TerminationMessagePath = in.TerminationMessagePath + out.TerminationMessagePolicy = api.TerminationMessagePolicy(in.TerminationMessagePolicy) out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy) out.SecurityContext = (*api.SecurityContext)(unsafe.Pointer(in.SecurityContext)) out.Stdin = in.Stdin @@ -779,6 +857,7 @@ func autoConvert_api_Container_To_v1_Container(in *api.Container, out *Container out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) out.WorkingDir = in.WorkingDir out.Ports = *(*[]ContainerPort)(unsafe.Pointer(&in.Ports)) + out.EnvFrom = *(*[]EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) out.Env = *(*[]EnvVar)(unsafe.Pointer(&in.Env)) if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { return err @@ -788,6 +867,7 @@ func autoConvert_api_Container_To_v1_Container(in *api.Container, out *Container out.ReadinessProbe = (*Probe)(unsafe.Pointer(in.ReadinessProbe)) out.Lifecycle = (*Lifecycle)(unsafe.Pointer(in.Lifecycle)) out.TerminationMessagePath = in.TerminationMessagePath + out.TerminationMessagePolicy = TerminationMessagePolicy(in.TerminationMessagePolicy) out.ImagePullPolicy = PullPolicy(in.ImagePullPolicy) out.SecurityContext = (*SecurityContext)(unsafe.Pointer(in.SecurityContext)) out.Stdin = in.Stdin @@ -811,7 +891,11 @@ func Convert_v1_ContainerImage_To_api_ContainerImage(in *ContainerImage, out *ap } func autoConvert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *ContainerImage, s conversion.Scope) error { - out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + if in.Names == nil { + out.Names = make([]string, 0) + } else { + out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + } out.SizeBytes = in.SizeBytes return nil } @@ -998,6 +1082,7 @@ func autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in *DeleteOptions, out *a out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) out.Preconditions = (*api.Preconditions)(unsafe.Pointer(in.Preconditions)) out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) + out.PropagationPolicy = (*api.DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) return nil } @@ -1009,6 +1094,7 @@ func autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, ou out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) out.Preconditions = (*Preconditions)(unsafe.Pointer(in.Preconditions)) out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) + out.PropagationPolicy = (*DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) return nil } @@ -1016,6 +1102,24 @@ func Convert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *D return autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in, out, s) } +func autoConvert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in *DownwardAPIProjection, out *api.DownwardAPIProjection, s conversion.Scope) error { + out.Items = *(*[]api.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in *DownwardAPIProjection, out *api.DownwardAPIProjection, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in, out, s) +} + +func autoConvert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *api.DownwardAPIProjection, out *DownwardAPIProjection, s conversion.Scope) error { + out.Items = *(*[]DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *api.DownwardAPIProjection, out *DownwardAPIProjection, s conversion.Scope) error { + return autoConvert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in, out, s) +} + func autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { out.Path = in.Path out.FieldRef = (*api.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) @@ -1147,9 +1251,7 @@ func Convert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out } func autoConvert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta out.Subsets = *(*[]api.EndpointSubset)(unsafe.Pointer(&in.Subsets)) return nil } @@ -1159,10 +1261,12 @@ func Convert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s } func autoConvert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *Endpoints, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err + out.ObjectMeta = in.ObjectMeta + if in.Subsets == nil { + out.Subsets = make([]EndpointSubset, 0) + } else { + out.Subsets = *(*[]EndpointSubset)(unsafe.Pointer(&in.Subsets)) } - out.Subsets = *(*[]EndpointSubset)(unsafe.Pointer(&in.Subsets)) return nil } @@ -1182,7 +1286,11 @@ func Convert_v1_EndpointsList_To_api_EndpointsList(in *EndpointsList, out *api.E func autoConvert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *EndpointsList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]Endpoints)(unsafe.Pointer(&in.Items)) + if in.Items == nil { + out.Items = make([]Endpoints, 0) + } else { + out.Items = *(*[]Endpoints)(unsafe.Pointer(&in.Items)) + } return nil } @@ -1190,6 +1298,28 @@ func Convert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *E return autoConvert_api_EndpointsList_To_v1_EndpointsList(in, out, s) } +func autoConvert_v1_EnvFromSource_To_api_EnvFromSource(in *EnvFromSource, out *api.EnvFromSource, s conversion.Scope) error { + out.Prefix = in.Prefix + out.ConfigMapRef = (*api.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) + out.SecretRef = (*api.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) + return nil +} + +func Convert_v1_EnvFromSource_To_api_EnvFromSource(in *EnvFromSource, out *api.EnvFromSource, s conversion.Scope) error { + return autoConvert_v1_EnvFromSource_To_api_EnvFromSource(in, out, s) +} + +func autoConvert_api_EnvFromSource_To_v1_EnvFromSource(in *api.EnvFromSource, out *EnvFromSource, s conversion.Scope) error { + out.Prefix = in.Prefix + out.ConfigMapRef = (*ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) + out.SecretRef = (*SecretEnvSource)(unsafe.Pointer(in.SecretRef)) + return nil +} + +func Convert_api_EnvFromSource_To_v1_EnvFromSource(in *api.EnvFromSource, out *EnvFromSource, s conversion.Scope) error { + return autoConvert_api_EnvFromSource_To_v1_EnvFromSource(in, out, s) +} + func autoConvert_v1_EnvVar_To_api_EnvVar(in *EnvVar, out *api.EnvVar, s conversion.Scope) error { out.Name = in.Name out.Value = in.Value @@ -1237,9 +1367,7 @@ func Convert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *EnvV } func autoConvert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { return err } @@ -1260,9 +1388,7 @@ func Convert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.Scope } func autoConvert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { return err } @@ -1294,7 +1420,11 @@ func Convert_v1_EventList_To_api_EventList(in *EventList, out *api.EventList, s func autoConvert_api_EventList_To_v1_EventList(in *api.EventList, out *EventList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]Event)(unsafe.Pointer(&in.Items)) + if in.Items == nil { + out.Items = make([]Event, 0) + } else { + out.Items = *(*[]Event)(unsafe.Pointer(&in.Items)) + } return nil } @@ -1340,26 +1470,6 @@ func Convert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *ExecAction return autoConvert_api_ExecAction_To_v1_ExecAction(in, out, s) } -func autoConvert_v1_ExportOptions_To_api_ExportOptions(in *ExportOptions, out *api.ExportOptions, s conversion.Scope) error { - out.Export = in.Export - out.Exact = in.Exact - return nil -} - -func Convert_v1_ExportOptions_To_api_ExportOptions(in *ExportOptions, out *api.ExportOptions, s conversion.Scope) error { - return autoConvert_v1_ExportOptions_To_api_ExportOptions(in, out, s) -} - -func autoConvert_api_ExportOptions_To_v1_ExportOptions(in *api.ExportOptions, out *ExportOptions, s conversion.Scope) error { - out.Export = in.Export - out.Exact = in.Exact - return nil -} - -func Convert_api_ExportOptions_To_v1_ExportOptions(in *api.ExportOptions, out *ExportOptions, s conversion.Scope) error { - return autoConvert_api_ExportOptions_To_v1_ExportOptions(in, out, s) -} - func autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) out.Lun = (*int32)(unsafe.Pointer(in.Lun)) @@ -1373,7 +1483,11 @@ func Convert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *ap } func autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *FCVolumeSource, s conversion.Scope) error { - out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) + if in.TargetWWNs == nil { + out.TargetWWNs = make([]string, 0) + } else { + out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) + } out.Lun = (*int32)(unsafe.Pointer(in.Lun)) out.FSType = in.FSType out.ReadOnly = in.ReadOnly @@ -1591,6 +1705,7 @@ func autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSo out.ISCSIInterface = in.ISCSIInterface out.FSType = in.FSType out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) return nil } @@ -1605,6 +1720,7 @@ func autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolu out.ISCSIInterface = in.ISCSIInterface out.FSType = in.FSType out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) return nil } @@ -1655,9 +1771,7 @@ func Convert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *Lifecycle, s } func autoConvert_v1_LimitRange_To_api_LimitRange(in *LimitRange, out *api.LimitRange, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -1669,9 +1783,7 @@ func Convert_v1_LimitRange_To_api_LimitRange(in *LimitRange, out *api.LimitRange } func autoConvert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *LimitRange, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -1722,7 +1834,11 @@ func Convert_v1_LimitRangeList_To_api_LimitRangeList(in *LimitRangeList, out *ap func autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *LimitRangeList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]LimitRange)(unsafe.Pointer(&in.Items)) + if in.Items == nil { + out.Items = make([]LimitRange, 0) + } else { + out.Items = *(*[]LimitRange)(unsafe.Pointer(&in.Items)) + } return nil } @@ -1740,7 +1856,11 @@ func Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *LimitRangeSpec, out *ap } func autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *LimitRangeSpec, s conversion.Scope) error { - out.Limits = *(*[]LimitRangeItem)(unsafe.Pointer(&in.Limits)) + if in.Limits == nil { + out.Limits = make([]LimitRangeItem, 0) + } else { + out.Limits = *(*[]LimitRangeItem)(unsafe.Pointer(&in.Limits)) + } return nil } @@ -1779,7 +1899,7 @@ func autoConvert_api_List_To_v1_List(in *api.List, out *List, s conversion.Scope } } } else { - out.Items = nil + out.Items = make([]runtime.RawExtension, 0) } return nil } @@ -1789,10 +1909,10 @@ func Convert_api_List_To_v1_List(in *api.List, out *List, s conversion.Scope) er } func autoConvert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error { - if err := api.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { + if err := meta_v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { return err } - if err := api.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { + if err := meta_v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { return err } out.Watch = in.Watch @@ -1806,10 +1926,10 @@ func Convert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOpt } func autoConvert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error { - if err := api.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { + if err := meta_v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { return err } - if err := api.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { + if err := meta_v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { return err } out.Watch = in.Watch @@ -1901,9 +2021,7 @@ func Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, } func autoConvert_v1_Namespace_To_api_Namespace(in *Namespace, out *api.Namespace, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1_NamespaceSpec_To_api_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -1918,9 +2036,7 @@ func Convert_v1_Namespace_To_api_Namespace(in *Namespace, out *api.Namespace, s } func autoConvert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *Namespace, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_api_NamespaceSpec_To_v1_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -1946,7 +2062,11 @@ func Convert_v1_NamespaceList_To_api_NamespaceList(in *NamespaceList, out *api.N func autoConvert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *NamespaceList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]Namespace)(unsafe.Pointer(&in.Items)) + if in.Items == nil { + out.Items = make([]Namespace, 0) + } else { + out.Items = *(*[]Namespace)(unsafe.Pointer(&in.Items)) + } return nil } @@ -1991,9 +2111,7 @@ func Convert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, } func autoConvert_v1_Node_To_api_Node(in *Node, out *api.Node, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1_NodeSpec_To_api_NodeSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -2008,9 +2126,7 @@ func Convert_v1_Node_To_api_Node(in *Node, out *api.Node, s conversion.Scope) er } func autoConvert_api_Node_To_v1_Node(in *api.Node, out *Node, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_api_NodeSpec_To_v1_NodeSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -2126,7 +2242,11 @@ func Convert_v1_NodeList_To_api_NodeList(in *NodeList, out *api.NodeList, s conv func autoConvert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *NodeList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]Node)(unsafe.Pointer(&in.Items)) + if in.Items == nil { + out.Items = make([]Node, 0) + } else { + out.Items = *(*[]Node)(unsafe.Pointer(&in.Items)) + } return nil } @@ -2152,6 +2272,24 @@ func Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOption return autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in, out, s) } +func autoConvert_v1_NodeResources_To_api_NodeResources(in *NodeResources, out *api.NodeResources, s conversion.Scope) error { + out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +func Convert_v1_NodeResources_To_api_NodeResources(in *NodeResources, out *api.NodeResources, s conversion.Scope) error { + return autoConvert_v1_NodeResources_To_api_NodeResources(in, out, s) +} + +func autoConvert_api_NodeResources_To_v1_NodeResources(in *api.NodeResources, out *NodeResources, s conversion.Scope) error { + out.Capacity = *(*ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +func Convert_api_NodeResources_To_v1_NodeResources(in *api.NodeResources, out *NodeResources, s conversion.Scope) error { + return autoConvert_api_NodeResources_To_v1_NodeResources(in, out, s) +} + func autoConvert_v1_NodeSelector_To_api_NodeSelector(in *NodeSelector, out *api.NodeSelector, s conversion.Scope) error { out.NodeSelectorTerms = *(*[]api.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) return nil @@ -2162,7 +2300,11 @@ func Convert_v1_NodeSelector_To_api_NodeSelector(in *NodeSelector, out *api.Node } func autoConvert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *NodeSelector, s conversion.Scope) error { - out.NodeSelectorTerms = *(*[]NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) + if in.NodeSelectorTerms == nil { + out.NodeSelectorTerms = make([]NodeSelectorTerm, 0) + } else { + out.NodeSelectorTerms = *(*[]NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) + } return nil } @@ -2202,7 +2344,11 @@ func Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *NodeSelectorTerm, o } func autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *NodeSelectorTerm, s conversion.Scope) error { - out.MatchExpressions = *(*[]NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) + if in.MatchExpressions == nil { + out.MatchExpressions = make([]NodeSelectorRequirement, 0) + } else { + out.MatchExpressions = *(*[]NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) + } return nil } @@ -2215,6 +2361,7 @@ func autoConvert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s out.ExternalID = in.ExternalID out.ProviderID = in.ProviderID out.Unschedulable = in.Unschedulable + out.Taints = *(*[]api.Taint)(unsafe.Pointer(&in.Taints)) return nil } @@ -2227,6 +2374,7 @@ func autoConvert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s out.ExternalID = in.ExternalID out.ProviderID = in.ProviderID out.Unschedulable = in.Unschedulable + out.Taints = *(*[]Taint)(unsafe.Pointer(&in.Taints)) return nil } @@ -2343,11 +2491,11 @@ func autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.Object out.ResourceVersion = in.ResourceVersion out.Generation = in.Generation out.CreationTimestamp = in.CreationTimestamp - out.DeletionTimestamp = (*unversioned.Time)(unsafe.Pointer(in.DeletionTimestamp)) + out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) - out.OwnerReferences = *(*[]api.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) + out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) out.ClusterName = in.ClusterName return nil @@ -2366,11 +2514,11 @@ func autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *Object out.ResourceVersion = in.ResourceVersion out.Generation = in.Generation out.CreationTimestamp = in.CreationTimestamp - out.DeletionTimestamp = (*unversioned.Time)(unsafe.Pointer(in.DeletionTimestamp)) + out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) - out.OwnerReferences = *(*[]OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) + out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) out.ClusterName = in.ClusterName return nil @@ -2410,36 +2558,8 @@ func Convert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, return autoConvert_api_ObjectReference_To_v1_ObjectReference(in, out, s) } -func autoConvert_v1_OwnerReference_To_api_OwnerReference(in *OwnerReference, out *api.OwnerReference, s conversion.Scope) error { - out.APIVersion = in.APIVersion - out.Kind = in.Kind - out.Name = in.Name - out.UID = types.UID(in.UID) - out.Controller = (*bool)(unsafe.Pointer(in.Controller)) - return nil -} - -func Convert_v1_OwnerReference_To_api_OwnerReference(in *OwnerReference, out *api.OwnerReference, s conversion.Scope) error { - return autoConvert_v1_OwnerReference_To_api_OwnerReference(in, out, s) -} - -func autoConvert_api_OwnerReference_To_v1_OwnerReference(in *api.OwnerReference, out *OwnerReference, s conversion.Scope) error { - out.APIVersion = in.APIVersion - out.Kind = in.Kind - out.Name = in.Name - out.UID = types.UID(in.UID) - out.Controller = (*bool)(unsafe.Pointer(in.Controller)) - return nil -} - -func Convert_api_OwnerReference_To_v1_OwnerReference(in *api.OwnerReference, out *OwnerReference, s conversion.Scope) error { - return autoConvert_api_OwnerReference_To_v1_OwnerReference(in, out, s) -} - func autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in *PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -2454,9 +2574,7 @@ func Convert_v1_PersistentVolume_To_api_PersistentVolume(in *PersistentVolume, o } func autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *PersistentVolume, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -2471,9 +2589,7 @@ func Convert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolum } func autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -2488,9 +2604,7 @@ func Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *Persisten } func autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *PersistentVolumeClaim, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -2516,7 +2630,11 @@ func Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *P func autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *PersistentVolumeClaimList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) + if in.Items == nil { + out.Items = make([]PersistentVolumeClaim, 0) + } else { + out.Items = *(*[]PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) + } return nil } @@ -2526,11 +2644,12 @@ func Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *a func autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.Selector = (*unversioned.LabelSelector)(unsafe.Pointer(in.Selector)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { return err } out.VolumeName = in.VolumeName + out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) return nil } @@ -2540,11 +2659,12 @@ func Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *P func autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, s conversion.Scope) error { out.AccessModes = *(*[]PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.Selector = (*unversioned.LabelSelector)(unsafe.Pointer(in.Selector)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { return err } out.VolumeName = in.VolumeName + out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) return nil } @@ -2625,7 +2745,7 @@ func autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.Per } } } else { - out.Items = nil + out.Items = make([]PersistentVolume, 0) } return nil } @@ -2652,6 +2772,8 @@ func autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *Per out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.PortworxVolume = (*api.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*api.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) return nil } @@ -2677,6 +2799,8 @@ func autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api out.VsphereVolume = (*VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) out.AzureDisk = (*AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) out.PhotonPersistentDisk = (*PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.PortworxVolume = (*PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) return nil } @@ -2692,6 +2816,7 @@ func autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *Persist out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) out.ClaimRef = (*api.ObjectReference)(unsafe.Pointer(in.ClaimRef)) out.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) + out.StorageClassName = in.StorageClassName return nil } @@ -2707,6 +2832,7 @@ func autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.Per out.AccessModes = *(*[]PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) out.ClaimRef = (*ObjectReference)(unsafe.Pointer(in.ClaimRef)) out.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) + out.StorageClassName = in.StorageClassName return nil } @@ -2757,9 +2883,7 @@ func Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolu } func autoConvert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -2770,9 +2894,7 @@ func autoConvert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) er } func autoConvert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -2803,7 +2925,7 @@ func Convert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *PodAffi } func autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { - out.LabelSelector = (*unversioned.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) out.TopologyKey = in.TopologyKey return nil @@ -2814,7 +2936,7 @@ func Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out } func autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *PodAffinityTerm, s conversion.Scope) error { - out.LabelSelector = (*unversioned.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) out.TopologyKey = in.TopologyKey return nil @@ -2918,7 +3040,11 @@ func autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out.Stderr = in.Stderr out.TTY = in.TTY out.Container = in.Container - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + if in.Command == nil { + out.Command = make([]string, 0) + } else { + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + } return nil } @@ -2957,7 +3083,7 @@ func autoConvert_api_PodList_To_v1_PodList(in *api.PodList, out *PodList, s conv } } } else { - out.Items = nil + out.Items = make([]Pod, 0) } return nil } @@ -2971,7 +3097,7 @@ func autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in *PodLogOptions, out *a out.Follow = in.Follow out.Previous = in.Previous out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) - out.SinceTime = (*unversioned.Time)(unsafe.Pointer(in.SinceTime)) + out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) out.Timestamps = in.Timestamps out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) @@ -2987,7 +3113,7 @@ func autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, ou out.Follow = in.Follow out.Previous = in.Previous out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) - out.SinceTime = (*unversioned.Time)(unsafe.Pointer(in.SinceTime)) + out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) out.Timestamps = in.Timestamps out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) @@ -2998,6 +3124,24 @@ func Convert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *P return autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in, out, s) } +func autoConvert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in *PodPortForwardOptions, out *api.PodPortForwardOptions, s conversion.Scope) error { + out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) + return nil +} + +func Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in *PodPortForwardOptions, out *api.PodPortForwardOptions, s conversion.Scope) error { + return autoConvert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in, out, s) +} + +func autoConvert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *api.PodPortForwardOptions, out *PodPortForwardOptions, s conversion.Scope) error { + out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) + return nil +} + +func Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *api.PodPortForwardOptions, out *PodPortForwardOptions, s conversion.Scope) error { + return autoConvert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in, out, s) +} + func autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in *PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error { out.Path = in.Path return nil @@ -3038,7 +3182,7 @@ func autoConvert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecu } func autoConvert_v1_PodSignature_To_api_PodSignature(in *PodSignature, out *api.PodSignature, s conversion.Scope) error { - out.PodController = (*api.OwnerReference)(unsafe.Pointer(in.PodController)) + out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) return nil } @@ -3047,7 +3191,7 @@ func Convert_v1_PodSignature_To_api_PodSignature(in *PodSignature, out *api.PodS } func autoConvert_api_PodSignature_To_v1_PodSignature(in *api.PodSignature, out *PodSignature, s conversion.Scope) error { - out.PodController = (*OwnerReference)(unsafe.Pointer(in.PodController)) + out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) return nil } @@ -3076,6 +3220,7 @@ func autoConvert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conv out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) out.ServiceAccountName = in.ServiceAccountName // INFO: in.DeprecatedServiceAccount opted out of conversion generation + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) out.NodeName = in.NodeName // INFO: in.HostNetwork opted out of conversion generation // INFO: in.HostPID opted out of conversion generation @@ -3092,6 +3237,9 @@ func autoConvert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conv out.ImagePullSecrets = *(*[]api.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) out.Hostname = in.Hostname out.Subdomain = in.Subdomain + out.Affinity = (*api.Affinity)(unsafe.Pointer(in.Affinity)) + out.SchedulerName = in.SchedulerName + out.Tolerations = *(*[]api.Toleration)(unsafe.Pointer(&in.Tolerations)) return nil } @@ -3108,13 +3256,18 @@ func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conv out.Volumes = nil } out.InitContainers = *(*[]Container)(unsafe.Pointer(&in.InitContainers)) - out.Containers = *(*[]Container)(unsafe.Pointer(&in.Containers)) + if in.Containers == nil { + out.Containers = make([]Container, 0) + } else { + out.Containers = *(*[]Container)(unsafe.Pointer(&in.Containers)) + } out.RestartPolicy = RestartPolicy(in.RestartPolicy) out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) out.DNSPolicy = DNSPolicy(in.DNSPolicy) out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) out.ServiceAccountName = in.ServiceAccountName + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) out.NodeName = in.NodeName if in.SecurityContext != nil { in, out := &in.SecurityContext, &out.SecurityContext @@ -3128,6 +3281,9 @@ func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conv out.ImagePullSecrets = *(*[]LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) out.Hostname = in.Hostname out.Subdomain = in.Subdomain + out.Affinity = (*Affinity)(unsafe.Pointer(in.Affinity)) + out.SchedulerName = in.SchedulerName + out.Tolerations = *(*[]Toleration)(unsafe.Pointer(&in.Tolerations)) return nil } @@ -3138,9 +3294,10 @@ func autoConvert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus out.Reason = in.Reason out.HostIP = in.HostIP out.PodIP = in.PodIP - out.StartTime = (*unversioned.Time)(unsafe.Pointer(in.StartTime)) + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) out.InitContainerStatuses = *(*[]api.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) out.ContainerStatuses = *(*[]api.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) + out.QOSClass = api.PodQOSClass(in.QOSClass) return nil } @@ -3155,7 +3312,8 @@ func autoConvert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus out.Reason = in.Reason out.HostIP = in.HostIP out.PodIP = in.PodIP - out.StartTime = (*unversioned.Time)(unsafe.Pointer(in.StartTime)) + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + out.QOSClass = PodQOSClass(in.QOSClass) out.InitContainerStatuses = *(*[]ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) out.ContainerStatuses = *(*[]ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) return nil @@ -3166,9 +3324,7 @@ func Convert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s } func autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { return err } @@ -3176,9 +3332,7 @@ func autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, } func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { return err } @@ -3186,9 +3340,7 @@ func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResu } func autoConvert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } @@ -3200,9 +3352,7 @@ func Convert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemp } func autoConvert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *PodTemplate, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } @@ -3244,7 +3394,7 @@ func autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateLi } } } else { - out.Items = nil + out.Items = make([]PodTemplate, 0) } return nil } @@ -3254,9 +3404,7 @@ func Convert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, } func autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -3264,15 +3412,35 @@ func autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, } func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { return err } return nil } +func autoConvert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in *PortworxVolumeSource, out *api.PortworxVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in *PortworxVolumeSource, out *api.PortworxVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in, out, s) +} + +func autoConvert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *api.PortworxVolumeSource, out *PortworxVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *api.PortworxVolumeSource, out *PortworxVolumeSource, s conversion.Scope) error { + return autoConvert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in, out, s) +} + func autoConvert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error { out.UID = (*types.UID)(unsafe.Pointer(in.UID)) return nil @@ -3375,6 +3543,30 @@ func Convert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope return autoConvert_api_Probe_To_v1_Probe(in, out, s) } +func autoConvert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in *ProjectedVolumeSource, out *api.ProjectedVolumeSource, s conversion.Scope) error { + out.Sources = *(*[]api.VolumeProjection)(unsafe.Pointer(&in.Sources)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +func Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in *ProjectedVolumeSource, out *api.ProjectedVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in, out, s) +} + +func autoConvert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *api.ProjectedVolumeSource, out *ProjectedVolumeSource, s conversion.Scope) error { + if in.Sources == nil { + out.Sources = make([]VolumeProjection, 0) + } else { + out.Sources = *(*[]VolumeProjection)(unsafe.Pointer(&in.Sources)) + } + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +func Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *api.ProjectedVolumeSource, out *ProjectedVolumeSource, s conversion.Scope) error { + return autoConvert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in, out, s) +} + func autoConvert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in *QuobyteVolumeSource, out *api.QuobyteVolumeSource, s conversion.Scope) error { out.Registry = in.Registry out.Volume = in.Volume @@ -3418,7 +3610,11 @@ func Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out } func autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error { - out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + if in.CephMonitors == nil { + out.CephMonitors = make([]string, 0) + } else { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + } out.RBDImage = in.RBDImage out.FSType = in.FSType out.RBDPool = in.RBDPool @@ -3434,9 +3630,7 @@ func Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, } func autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta out.Range = in.Range out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) return nil @@ -3447,11 +3641,13 @@ func Convert_v1_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out } func autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta out.Range = in.Range - out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + if in.Data == nil { + out.Data = make([]byte, 0) + } else { + out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + } return nil } @@ -3460,9 +3656,7 @@ func Convert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, } func autoConvert_v1_ReplicationController_To_api_ReplicationController(in *ReplicationController, out *api.ReplicationController, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -3477,9 +3671,7 @@ func Convert_v1_ReplicationController_To_api_ReplicationController(in *Replicati } func autoConvert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *ReplicationController, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -3550,7 +3742,7 @@ func autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(i } } } else { - out.Items = nil + out.Items = make([]ReplicationController, 0) } return nil } @@ -3560,7 +3752,7 @@ func Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *a } func autoConvert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error { - if err := api.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -3578,7 +3770,7 @@ func autoConvert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(i } func autoConvert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *ReplicationControllerSpec, s conversion.Scope) error { - if err := api.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -3646,9 +3838,7 @@ func Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.Resou } func autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in *ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -3663,9 +3853,7 @@ func Convert_v1_ResourceQuota_To_api_ResourceQuota(in *ResourceQuota, out *api.R } func autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *ResourceQuota, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -3691,7 +3879,11 @@ func Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *ResourceQuotaList func autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *ResourceQuotaList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]ResourceQuota)(unsafe.Pointer(&in.Items)) + if in.Items == nil { + out.Items = make([]ResourceQuota, 0) + } else { + out.Items = *(*[]ResourceQuota)(unsafe.Pointer(&in.Items)) + } return nil } @@ -3783,10 +3975,44 @@ func Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out return autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in, out, s) } +func autoConvert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in *ScaleIOVolumeSource, out *api.ScaleIOVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in *ScaleIOVolumeSource, out *api.ScaleIOVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in, out, s) +} + +func autoConvert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *api.ScaleIOVolumeSource, out *ScaleIOVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *api.ScaleIOVolumeSource, out *ScaleIOVolumeSource, s conversion.Scope) error { + return autoConvert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in, out, s) +} + func autoConvert_v1_Secret_To_api_Secret(in *Secret, out *api.Secret, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) // INFO: in.StringData opted out of conversion generation out.Type = api.SecretType(in.Type) @@ -3794,9 +4020,7 @@ func autoConvert_v1_Secret_To_api_Secret(in *Secret, out *api.Secret, s conversi } func autoConvert_api_Secret_To_v1_Secret(in *api.Secret, out *Secret, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) out.Type = SecretType(in.Type) return nil @@ -3806,11 +4030,36 @@ func Convert_api_Secret_To_v1_Secret(in *api.Secret, out *Secret, s conversion.S return autoConvert_api_Secret_To_v1_Secret(in, out, s) } +func autoConvert_v1_SecretEnvSource_To_api_SecretEnvSource(in *SecretEnvSource, out *api.SecretEnvSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_SecretEnvSource_To_api_SecretEnvSource(in *SecretEnvSource, out *api.SecretEnvSource, s conversion.Scope) error { + return autoConvert_v1_SecretEnvSource_To_api_SecretEnvSource(in, out, s) +} + +func autoConvert_api_SecretEnvSource_To_v1_SecretEnvSource(in *api.SecretEnvSource, out *SecretEnvSource, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_SecretEnvSource_To_v1_SecretEnvSource(in *api.SecretEnvSource, out *SecretEnvSource, s conversion.Scope) error { + return autoConvert_api_SecretEnvSource_To_v1_SecretEnvSource(in, out, s) +} + func autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in *SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { return err } out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) return nil } @@ -3823,6 +4072,7 @@ func autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKey return err } out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) return nil } @@ -3861,7 +4111,7 @@ func autoConvert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *Secret } } } else { - out.Items = nil + out.Items = make([]Secret, 0) } return nil } @@ -3870,10 +4120,37 @@ func Convert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList return autoConvert_api_SecretList_To_v1_SecretList(in, out, s) } +func autoConvert_v1_SecretProjection_To_api_SecretProjection(in *SecretProjection, out *api.SecretProjection, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_SecretProjection_To_api_SecretProjection(in *SecretProjection, out *api.SecretProjection, s conversion.Scope) error { + return autoConvert_v1_SecretProjection_To_api_SecretProjection(in, out, s) +} + +func autoConvert_api_SecretProjection_To_v1_SecretProjection(in *api.SecretProjection, out *SecretProjection, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_SecretProjection_To_v1_SecretProjection(in *api.SecretProjection, out *SecretProjection, s conversion.Scope) error { + return autoConvert_api_SecretProjection_To_v1_SecretProjection(in, out, s) +} + func autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { out.SecretName = in.SecretName out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) return nil } @@ -3885,6 +4162,7 @@ func autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretV out.SecretName = in.SecretName out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) return nil } @@ -3943,9 +4221,7 @@ func Convert_api_SerializedReference_To_v1_SerializedReference(in *api.Serialize } func autoConvert_v1_Service_To_api_Service(in *Service, out *api.Service, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1_ServiceSpec_To_api_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -3960,9 +4236,7 @@ func Convert_v1_Service_To_api_Service(in *Service, out *api.Service, s conversi } func autoConvert_api_Service_To_v1_Service(in *api.Service, out *Service, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_api_ServiceSpec_To_v1_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -3977,11 +4251,10 @@ func Convert_api_Service_To_v1_Service(in *api.Service, out *Service, s conversi } func autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in *ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta out.Secrets = *(*[]api.ObjectReference)(unsafe.Pointer(&in.Secrets)) out.ImagePullSecrets = *(*[]api.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) return nil } @@ -3990,11 +4263,10 @@ func Convert_v1_ServiceAccount_To_api_ServiceAccount(in *ServiceAccount, out *ap } func autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *ServiceAccount, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta out.Secrets = *(*[]ObjectReference)(unsafe.Pointer(&in.Secrets)) out.ImagePullSecrets = *(*[]LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) return nil } @@ -4014,7 +4286,11 @@ func Convert_v1_ServiceAccountList_To_api_ServiceAccountList(in *ServiceAccountL func autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *ServiceAccountList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]ServiceAccount)(unsafe.Pointer(&in.Items)) + if in.Items == nil { + out.Items = make([]ServiceAccount, 0) + } else { + out.Items = *(*[]ServiceAccount)(unsafe.Pointer(&in.Items)) + } return nil } @@ -4053,7 +4329,7 @@ func autoConvert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *Ser } } } else { - out.Items = nil + out.Items = make([]Service, 0) } return nil } @@ -4155,6 +4431,26 @@ func Convert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *S return autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in, out, s) } +func autoConvert_v1_Sysctl_To_api_Sysctl(in *Sysctl, out *api.Sysctl, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +func Convert_v1_Sysctl_To_api_Sysctl(in *Sysctl, out *api.Sysctl, s conversion.Scope) error { + return autoConvert_v1_Sysctl_To_api_Sysctl(in, out, s) +} + +func autoConvert_api_Sysctl_To_v1_Sysctl(in *api.Sysctl, out *Sysctl, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +func Convert_api_Sysctl_To_v1_Sysctl(in *api.Sysctl, out *Sysctl, s conversion.Scope) error { + return autoConvert_api_Sysctl_To_v1_Sysctl(in, out, s) +} + func autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in *TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { out.Port = in.Port return nil @@ -4177,6 +4473,7 @@ func autoConvert_v1_Taint_To_api_Taint(in *Taint, out *api.Taint, s conversion.S out.Key = in.Key out.Value = in.Value out.Effect = api.TaintEffect(in.Effect) + out.TimeAdded = in.TimeAdded return nil } @@ -4188,6 +4485,7 @@ func autoConvert_api_Taint_To_v1_Taint(in *api.Taint, out *Taint, s conversion.S out.Key = in.Key out.Value = in.Value out.Effect = TaintEffect(in.Effect) + out.TimeAdded = in.TimeAdded return nil } @@ -4200,6 +4498,7 @@ func autoConvert_v1_Toleration_To_api_Toleration(in *Toleration, out *api.Tolera out.Operator = api.TolerationOperator(in.Operator) out.Value = in.Value out.Effect = api.TaintEffect(in.Effect) + out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) return nil } @@ -4212,6 +4511,7 @@ func autoConvert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *Tolera out.Operator = TolerationOperator(in.Operator) out.Value = in.Value out.Effect = TaintEffect(in.Effect) + out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) return nil } @@ -4267,6 +4567,28 @@ func Convert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *VolumeM return autoConvert_api_VolumeMount_To_v1_VolumeMount(in, out, s) } +func autoConvert_v1_VolumeProjection_To_api_VolumeProjection(in *VolumeProjection, out *api.VolumeProjection, s conversion.Scope) error { + out.Secret = (*api.SecretProjection)(unsafe.Pointer(in.Secret)) + out.DownwardAPI = (*api.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) + out.ConfigMap = (*api.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) + return nil +} + +func Convert_v1_VolumeProjection_To_api_VolumeProjection(in *VolumeProjection, out *api.VolumeProjection, s conversion.Scope) error { + return autoConvert_v1_VolumeProjection_To_api_VolumeProjection(in, out, s) +} + +func autoConvert_api_VolumeProjection_To_v1_VolumeProjection(in *api.VolumeProjection, out *VolumeProjection, s conversion.Scope) error { + out.Secret = (*SecretProjection)(unsafe.Pointer(in.Secret)) + out.DownwardAPI = (*DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) + out.ConfigMap = (*ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) + return nil +} + +func Convert_api_VolumeProjection_To_v1_VolumeProjection(in *api.VolumeProjection, out *VolumeProjection, s conversion.Scope) error { + return autoConvert_api_VolumeProjection_To_v1_VolumeProjection(in, out, s) +} + func autoConvert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.VolumeSource, s conversion.Scope) error { out.HostPath = (*api.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) out.EmptyDir = (*api.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) @@ -4291,6 +4613,9 @@ func autoConvert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api. out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.Projected = (*api.ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) + out.PortworxVolume = (*api.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*api.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) return nil } @@ -4322,6 +4647,9 @@ func autoConvert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out * out.VsphereVolume = (*VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) out.AzureDisk = (*AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) out.PhotonPersistentDisk = (*PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.Projected = (*ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) + out.PortworxVolume = (*PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.deepcopy.go index 29db773260..463e946800 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,10 +21,10 @@ limitations under the License. package v1 import ( - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" - types "k8s.io/kubernetes/pkg/types" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" reflect "reflect" ) @@ -50,8 +50,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ComponentStatus, InType: reflect.TypeOf(&ComponentStatus{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ComponentStatusList, InType: reflect.TypeOf(&ComponentStatusList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMap, InType: reflect.TypeOf(&ConfigMap{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapEnvSource, InType: reflect.TypeOf(&ConfigMapEnvSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapKeySelector, InType: reflect.TypeOf(&ConfigMapKeySelector{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapList, InType: reflect.TypeOf(&ConfigMapList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapProjection, InType: reflect.TypeOf(&ConfigMapProjection{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapVolumeSource, InType: reflect.TypeOf(&ConfigMapVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Container, InType: reflect.TypeOf(&Container{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerImage, InType: reflect.TypeOf(&ContainerImage{})}, @@ -63,6 +65,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerStatus, InType: reflect.TypeOf(&ContainerStatus{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DaemonEndpoint, InType: reflect.TypeOf(&DaemonEndpoint{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DeleteOptions, InType: reflect.TypeOf(&DeleteOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DownwardAPIProjection, InType: reflect.TypeOf(&DownwardAPIProjection{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DownwardAPIVolumeFile, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DownwardAPIVolumeSource, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EmptyDirVolumeSource, InType: reflect.TypeOf(&EmptyDirVolumeSource{})}, @@ -71,13 +74,13 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EndpointSubset, InType: reflect.TypeOf(&EndpointSubset{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Endpoints, InType: reflect.TypeOf(&Endpoints{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EndpointsList, InType: reflect.TypeOf(&EndpointsList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EnvFromSource, InType: reflect.TypeOf(&EnvFromSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EnvVar, InType: reflect.TypeOf(&EnvVar{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EnvVarSource, InType: reflect.TypeOf(&EnvVarSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Event, InType: reflect.TypeOf(&Event{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EventList, InType: reflect.TypeOf(&EventList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EventSource, InType: reflect.TypeOf(&EventSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ExecAction, InType: reflect.TypeOf(&ExecAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ExportOptions, InType: reflect.TypeOf(&ExportOptions{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_FCVolumeSource, InType: reflect.TypeOf(&FCVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_FlexVolumeSource, InType: reflect.TypeOf(&FlexVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_FlockerVolumeSource, InType: reflect.TypeOf(&FlockerVolumeSource{})}, @@ -112,6 +115,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeDaemonEndpoints, InType: reflect.TypeOf(&NodeDaemonEndpoints{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeList, InType: reflect.TypeOf(&NodeList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeProxyOptions, InType: reflect.TypeOf(&NodeProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeResources, InType: reflect.TypeOf(&NodeResources{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSelector, InType: reflect.TypeOf(&NodeSelector{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSelectorRequirement, InType: reflect.TypeOf(&NodeSelectorRequirement{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSelectorTerm, InType: reflect.TypeOf(&NodeSelectorTerm{})}, @@ -121,7 +125,6 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectFieldSelector, InType: reflect.TypeOf(&ObjectFieldSelector{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectMeta, InType: reflect.TypeOf(&ObjectMeta{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectReference, InType: reflect.TypeOf(&ObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_OwnerReference, InType: reflect.TypeOf(&OwnerReference{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolume, InType: reflect.TypeOf(&PersistentVolume{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaim, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaimList, InType: reflect.TypeOf(&PersistentVolumeClaimList{})}, @@ -142,6 +145,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodExecOptions, InType: reflect.TypeOf(&PodExecOptions{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodList, InType: reflect.TypeOf(&PodList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodLogOptions, InType: reflect.TypeOf(&PodLogOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodPortForwardOptions, InType: reflect.TypeOf(&PodPortForwardOptions{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodProxyOptions, InType: reflect.TypeOf(&PodProxyOptions{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodSecurityContext, InType: reflect.TypeOf(&PodSecurityContext{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodSignature, InType: reflect.TypeOf(&PodSignature{})}, @@ -151,10 +155,12 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodTemplate, InType: reflect.TypeOf(&PodTemplate{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodTemplateList, InType: reflect.TypeOf(&PodTemplateList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodTemplateSpec, InType: reflect.TypeOf(&PodTemplateSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PortworxVolumeSource, InType: reflect.TypeOf(&PortworxVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Preconditions, InType: reflect.TypeOf(&Preconditions{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PreferAvoidPodsEntry, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PreferredSchedulingTerm, InType: reflect.TypeOf(&PreferredSchedulingTerm{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Probe, InType: reflect.TypeOf(&Probe{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ProjectedVolumeSource, InType: reflect.TypeOf(&ProjectedVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_QuobyteVolumeSource, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_RBDVolumeSource, InType: reflect.TypeOf(&RBDVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_RangeAllocation, InType: reflect.TypeOf(&RangeAllocation{})}, @@ -170,9 +176,12 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceQuotaStatus, InType: reflect.TypeOf(&ResourceQuotaStatus{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceRequirements, InType: reflect.TypeOf(&ResourceRequirements{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SELinuxOptions, InType: reflect.TypeOf(&SELinuxOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ScaleIOVolumeSource, InType: reflect.TypeOf(&ScaleIOVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Secret, InType: reflect.TypeOf(&Secret{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretEnvSource, InType: reflect.TypeOf(&SecretEnvSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretKeySelector, InType: reflect.TypeOf(&SecretKeySelector{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretList, InType: reflect.TypeOf(&SecretList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretProjection, InType: reflect.TypeOf(&SecretProjection{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretVolumeSource, InType: reflect.TypeOf(&SecretVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecurityContext, InType: reflect.TypeOf(&SecurityContext{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SerializedReference, InType: reflect.TypeOf(&SerializedReference{})}, @@ -184,11 +193,13 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceProxyOptions, InType: reflect.TypeOf(&ServiceProxyOptions{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceSpec, InType: reflect.TypeOf(&ServiceSpec{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceStatus, InType: reflect.TypeOf(&ServiceStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Sysctl, InType: reflect.TypeOf(&Sysctl{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TCPSocketAction, InType: reflect.TypeOf(&TCPSocketAction{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Taint, InType: reflect.TypeOf(&Taint{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Toleration, InType: reflect.TypeOf(&Toleration{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Volume, InType: reflect.TypeOf(&Volume{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VolumeMount, InType: reflect.TypeOf(&VolumeMount{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VolumeProjection, InType: reflect.TypeOf(&VolumeProjection{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VolumeSource, InType: reflect.TypeOf(&VolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VsphereVirtualDiskVolumeSource, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_WeightedPodAffinityTerm, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})}, @@ -199,10 +210,7 @@ func DeepCopy_v1_AWSElasticBlockStoreVolumeSource(in interface{}, out interface{ { in := in.(*AWSElasticBlockStoreVolumeSource) out := out.(*AWSElasticBlockStoreVolumeSource) - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly + *out = *in return nil } } @@ -211,14 +219,13 @@ func DeepCopy_v1_Affinity(in interface{}, out interface{}, c *conversion.Cloner) { in := in.(*Affinity) out := out.(*Affinity) + *out = *in if in.NodeAffinity != nil { in, out := &in.NodeAffinity, &out.NodeAffinity *out = new(NodeAffinity) if err := DeepCopy_v1_NodeAffinity(*in, *out, c); err != nil { return err } - } else { - out.NodeAffinity = nil } if in.PodAffinity != nil { in, out := &in.PodAffinity, &out.PodAffinity @@ -226,8 +233,6 @@ func DeepCopy_v1_Affinity(in interface{}, out interface{}, c *conversion.Cloner) if err := DeepCopy_v1_PodAffinity(*in, *out, c); err != nil { return err } - } else { - out.PodAffinity = nil } if in.PodAntiAffinity != nil { in, out := &in.PodAntiAffinity, &out.PodAntiAffinity @@ -235,8 +240,6 @@ func DeepCopy_v1_Affinity(in interface{}, out interface{}, c *conversion.Cloner) if err := DeepCopy_v1_PodAntiAffinity(*in, *out, c); err != nil { return err } - } else { - out.PodAntiAffinity = nil } return nil } @@ -246,8 +249,7 @@ func DeepCopy_v1_AttachedVolume(in interface{}, out interface{}, c *conversion.C { in := in.(*AttachedVolume) out := out.(*AttachedVolume) - out.Name = in.Name - out.DevicePath = in.DevicePath + *out = *in return nil } } @@ -256,6 +258,7 @@ func DeepCopy_v1_AvoidPods(in interface{}, out interface{}, c *conversion.Cloner { in := in.(*AvoidPods) out := out.(*AvoidPods) + *out = *in if in.PreferAvoidPods != nil { in, out := &in.PreferAvoidPods, &out.PreferAvoidPods *out = make([]PreferAvoidPodsEntry, len(*in)) @@ -264,8 +267,6 @@ func DeepCopy_v1_AvoidPods(in interface{}, out interface{}, c *conversion.Cloner return err } } - } else { - out.PreferAvoidPods = nil } return nil } @@ -275,28 +276,21 @@ func DeepCopy_v1_AzureDiskVolumeSource(in interface{}, out interface{}, c *conve { in := in.(*AzureDiskVolumeSource) out := out.(*AzureDiskVolumeSource) - out.DiskName = in.DiskName - out.DataDiskURI = in.DataDiskURI + *out = *in if in.CachingMode != nil { in, out := &in.CachingMode, &out.CachingMode *out = new(AzureDataDiskCachingMode) **out = **in - } else { - out.CachingMode = nil } if in.FSType != nil { in, out := &in.FSType, &out.FSType *out = new(string) **out = **in - } else { - out.FSType = nil } if in.ReadOnly != nil { in, out := &in.ReadOnly, &out.ReadOnly *out = new(bool) **out = **in - } else { - out.ReadOnly = nil } return nil } @@ -306,9 +300,7 @@ func DeepCopy_v1_AzureFileVolumeSource(in interface{}, out interface{}, c *conve { in := in.(*AzureFileVolumeSource) out := out.(*AzureFileVolumeSource) - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly + *out = *in return nil } } @@ -317,11 +309,12 @@ func DeepCopy_v1_Binding(in interface{}, out interface{}, c *conversion.Cloner) { in := in.(*Binding) out := out.(*Binding) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } - out.Target = in.Target return nil } } @@ -330,23 +323,16 @@ func DeepCopy_v1_Capabilities(in interface{}, out interface{}, c *conversion.Clo { in := in.(*Capabilities) out := out.(*Capabilities) + *out = *in if in.Add != nil { in, out := &in.Add, &out.Add *out = make([]Capability, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Add = nil + copy(*out, *in) } if in.Drop != nil { in, out := &in.Drop, &out.Drop *out = make([]Capability, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Drop = nil + copy(*out, *in) } return nil } @@ -356,24 +342,17 @@ func DeepCopy_v1_CephFSVolumeSource(in interface{}, out interface{}, c *conversi { in := in.(*CephFSVolumeSource) out := out.(*CephFSVolumeSource) + *out = *in if in.Monitors != nil { in, out := &in.Monitors, &out.Monitors *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Monitors = nil } - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef *out = new(LocalObjectReference) **out = **in - } else { - out.SecretRef = nil } - out.ReadOnly = in.ReadOnly return nil } } @@ -382,9 +361,7 @@ func DeepCopy_v1_CinderVolumeSource(in interface{}, out interface{}, c *conversi { in := in.(*CinderVolumeSource) out := out.(*CinderVolumeSource) - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly + *out = *in return nil } } @@ -393,10 +370,7 @@ func DeepCopy_v1_ComponentCondition(in interface{}, out interface{}, c *conversi { in := in.(*ComponentCondition) out := out.(*ComponentCondition) - out.Type = in.Type - out.Status = in.Status - out.Message = in.Message - out.Error = in.Error + *out = *in return nil } } @@ -405,18 +379,16 @@ func DeepCopy_v1_ComponentStatus(in interface{}, out interface{}, c *conversion. { in := in.(*ComponentStatus) out := out.(*ComponentStatus) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]ComponentCondition, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Conditions = nil + copy(*out, *in) } return nil } @@ -426,8 +398,7 @@ func DeepCopy_v1_ComponentStatusList(in interface{}, out interface{}, c *convers { in := in.(*ComponentStatusList) out := out.(*ComponentStatusList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ComponentStatus, len(*in)) @@ -436,8 +407,6 @@ func DeepCopy_v1_ComponentStatusList(in interface{}, out interface{}, c *convers return err } } - } else { - out.Items = nil } return nil } @@ -447,9 +416,11 @@ func DeepCopy_v1_ConfigMap(in interface{}, out interface{}, c *conversion.Cloner { in := in.(*ConfigMap) out := out.(*ConfigMap) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } if in.Data != nil { in, out := &in.Data, &out.Data @@ -457,8 +428,20 @@ func DeepCopy_v1_ConfigMap(in interface{}, out interface{}, c *conversion.Cloner for key, val := range *in { (*out)[key] = val } - } else { - out.Data = nil + } + return nil + } +} + +func DeepCopy_v1_ConfigMapEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapEnvSource) + out := out.(*ConfigMapEnvSource) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in } return nil } @@ -468,8 +451,12 @@ func DeepCopy_v1_ConfigMapKeySelector(in interface{}, out interface{}, c *conver { in := in.(*ConfigMapKeySelector) out := out.(*ConfigMapKeySelector) - out.LocalObjectReference = in.LocalObjectReference - out.Key = in.Key + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } return nil } } @@ -478,8 +465,7 @@ func DeepCopy_v1_ConfigMapList(in interface{}, out interface{}, c *conversion.Cl { in := in.(*ConfigMapList) out := out.(*ConfigMapList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ConfigMap, len(*in)) @@ -488,8 +474,29 @@ func DeepCopy_v1_ConfigMapList(in interface{}, out interface{}, c *conversion.Cl return err } } - } else { - out.Items = nil + } + return nil + } +} + +func DeepCopy_v1_ConfigMapProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapProjection) + out := out.(*ConfigMapProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in } return nil } @@ -499,7 +506,7 @@ func DeepCopy_v1_ConfigMapVolumeSource(in interface{}, out interface{}, c *conve { in := in.(*ConfigMapVolumeSource) out := out.(*ConfigMapVolumeSource) - out.LocalObjectReference = in.LocalObjectReference + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]KeyToPath, len(*in)) @@ -508,15 +515,16 @@ func DeepCopy_v1_ConfigMapVolumeSource(in interface{}, out interface{}, c *conve return err } } - } else { - out.Items = nil } if in.DefaultMode != nil { in, out := &in.DefaultMode, &out.DefaultMode *out = new(int32) **out = **in - } else { - out.DefaultMode = nil + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in } return nil } @@ -526,31 +534,30 @@ func DeepCopy_v1_Container(in interface{}, out interface{}, c *conversion.Cloner { in := in.(*Container) out := out.(*Container) - out.Name = in.Name - out.Image = in.Image + *out = *in if in.Command != nil { in, out := &in.Command, &out.Command *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Command = nil } if in.Args != nil { in, out := &in.Args, &out.Args *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Args = nil } - out.WorkingDir = in.WorkingDir if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) for i := range *in { - (*out)[i] = (*in)[i] + if err := DeepCopy_v1_EnvFromSource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } } - } else { - out.Ports = nil } if in.Env != nil { in, out := &in.Env, &out.Env @@ -560,8 +567,6 @@ func DeepCopy_v1_Container(in interface{}, out interface{}, c *conversion.Cloner return err } } - } else { - out.Env = nil } if err := DeepCopy_v1_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { return err @@ -569,11 +574,7 @@ func DeepCopy_v1_Container(in interface{}, out interface{}, c *conversion.Cloner if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts *out = make([]VolumeMount, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.VolumeMounts = nil + copy(*out, *in) } if in.LivenessProbe != nil { in, out := &in.LivenessProbe, &out.LivenessProbe @@ -581,8 +582,6 @@ func DeepCopy_v1_Container(in interface{}, out interface{}, c *conversion.Cloner if err := DeepCopy_v1_Probe(*in, *out, c); err != nil { return err } - } else { - out.LivenessProbe = nil } if in.ReadinessProbe != nil { in, out := &in.ReadinessProbe, &out.ReadinessProbe @@ -590,8 +589,6 @@ func DeepCopy_v1_Container(in interface{}, out interface{}, c *conversion.Cloner if err := DeepCopy_v1_Probe(*in, *out, c); err != nil { return err } - } else { - out.ReadinessProbe = nil } if in.Lifecycle != nil { in, out := &in.Lifecycle, &out.Lifecycle @@ -599,23 +596,14 @@ func DeepCopy_v1_Container(in interface{}, out interface{}, c *conversion.Cloner if err := DeepCopy_v1_Lifecycle(*in, *out, c); err != nil { return err } - } else { - out.Lifecycle = nil } - out.TerminationMessagePath = in.TerminationMessagePath - out.ImagePullPolicy = in.ImagePullPolicy if in.SecurityContext != nil { in, out := &in.SecurityContext, &out.SecurityContext *out = new(SecurityContext) if err := DeepCopy_v1_SecurityContext(*in, *out, c); err != nil { return err } - } else { - out.SecurityContext = nil } - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY return nil } } @@ -624,14 +612,12 @@ func DeepCopy_v1_ContainerImage(in interface{}, out interface{}, c *conversion.C { in := in.(*ContainerImage) out := out.(*ContainerImage) + *out = *in if in.Names != nil { in, out := &in.Names, &out.Names *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Names = nil } - out.SizeBytes = in.SizeBytes return nil } } @@ -640,11 +626,7 @@ func DeepCopy_v1_ContainerPort(in interface{}, out interface{}, c *conversion.Cl { in := in.(*ContainerPort) out := out.(*ContainerPort) - out.Name = in.Name - out.HostPort = in.HostPort - out.ContainerPort = in.ContainerPort - out.Protocol = in.Protocol - out.HostIP = in.HostIP + *out = *in return nil } } @@ -653,12 +635,11 @@ func DeepCopy_v1_ContainerState(in interface{}, out interface{}, c *conversion.C { in := in.(*ContainerState) out := out.(*ContainerState) + *out = *in if in.Waiting != nil { in, out := &in.Waiting, &out.Waiting *out = new(ContainerStateWaiting) **out = **in - } else { - out.Waiting = nil } if in.Running != nil { in, out := &in.Running, &out.Running @@ -666,8 +647,6 @@ func DeepCopy_v1_ContainerState(in interface{}, out interface{}, c *conversion.C if err := DeepCopy_v1_ContainerStateRunning(*in, *out, c); err != nil { return err } - } else { - out.Running = nil } if in.Terminated != nil { in, out := &in.Terminated, &out.Terminated @@ -675,8 +654,6 @@ func DeepCopy_v1_ContainerState(in interface{}, out interface{}, c *conversion.C if err := DeepCopy_v1_ContainerStateTerminated(*in, *out, c); err != nil { return err } - } else { - out.Terminated = nil } return nil } @@ -686,6 +663,7 @@ func DeepCopy_v1_ContainerStateRunning(in interface{}, out interface{}, c *conve { in := in.(*ContainerStateRunning) out := out.(*ContainerStateRunning) + *out = *in out.StartedAt = in.StartedAt.DeepCopy() return nil } @@ -695,13 +673,9 @@ func DeepCopy_v1_ContainerStateTerminated(in interface{}, out interface{}, c *co { in := in.(*ContainerStateTerminated) out := out.(*ContainerStateTerminated) - out.ExitCode = in.ExitCode - out.Signal = in.Signal - out.Reason = in.Reason - out.Message = in.Message + *out = *in out.StartedAt = in.StartedAt.DeepCopy() out.FinishedAt = in.FinishedAt.DeepCopy() - out.ContainerID = in.ContainerID return nil } } @@ -710,8 +684,7 @@ func DeepCopy_v1_ContainerStateWaiting(in interface{}, out interface{}, c *conve { in := in.(*ContainerStateWaiting) out := out.(*ContainerStateWaiting) - out.Reason = in.Reason - out.Message = in.Message + *out = *in return nil } } @@ -720,18 +693,13 @@ func DeepCopy_v1_ContainerStatus(in interface{}, out interface{}, c *conversion. { in := in.(*ContainerStatus) out := out.(*ContainerStatus) - out.Name = in.Name + *out = *in if err := DeepCopy_v1_ContainerState(&in.State, &out.State, c); err != nil { return err } if err := DeepCopy_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, c); err != nil { return err } - out.Ready = in.Ready - out.RestartCount = in.RestartCount - out.Image = in.Image - out.ImageID = in.ImageID - out.ContainerID = in.ContainerID return nil } } @@ -740,7 +708,7 @@ func DeepCopy_v1_DaemonEndpoint(in interface{}, out interface{}, c *conversion.C { in := in.(*DaemonEndpoint) out := out.(*DaemonEndpoint) - out.Port = in.Port + *out = *in return nil } } @@ -749,13 +717,11 @@ func DeepCopy_v1_DeleteOptions(in interface{}, out interface{}, c *conversion.Cl { in := in.(*DeleteOptions) out := out.(*DeleteOptions) - out.TypeMeta = in.TypeMeta + *out = *in if in.GracePeriodSeconds != nil { in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds *out = new(int64) **out = **in - } else { - out.GracePeriodSeconds = nil } if in.Preconditions != nil { in, out := &in.Preconditions, &out.Preconditions @@ -763,15 +729,34 @@ func DeepCopy_v1_DeleteOptions(in interface{}, out interface{}, c *conversion.Cl if err := DeepCopy_v1_Preconditions(*in, *out, c); err != nil { return err } - } else { - out.Preconditions = nil } if in.OrphanDependents != nil { in, out := &in.OrphanDependents, &out.OrphanDependents *out = new(bool) **out = **in - } else { - out.OrphanDependents = nil + } + if in.PropagationPolicy != nil { + in, out := &in.PropagationPolicy, &out.PropagationPolicy + *out = new(DeletionPropagation) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_DownwardAPIProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIProjection) + out := out.(*DownwardAPIProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + if err := DeepCopy_v1_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } } return nil } @@ -781,13 +766,11 @@ func DeepCopy_v1_DownwardAPIVolumeFile(in interface{}, out interface{}, c *conve { in := in.(*DownwardAPIVolumeFile) out := out.(*DownwardAPIVolumeFile) - out.Path = in.Path + *out = *in if in.FieldRef != nil { in, out := &in.FieldRef, &out.FieldRef *out = new(ObjectFieldSelector) **out = **in - } else { - out.FieldRef = nil } if in.ResourceFieldRef != nil { in, out := &in.ResourceFieldRef, &out.ResourceFieldRef @@ -795,15 +778,11 @@ func DeepCopy_v1_DownwardAPIVolumeFile(in interface{}, out interface{}, c *conve if err := DeepCopy_v1_ResourceFieldSelector(*in, *out, c); err != nil { return err } - } else { - out.ResourceFieldRef = nil } if in.Mode != nil { in, out := &in.Mode, &out.Mode *out = new(int32) **out = **in - } else { - out.Mode = nil } return nil } @@ -813,6 +792,7 @@ func DeepCopy_v1_DownwardAPIVolumeSource(in interface{}, out interface{}, c *con { in := in.(*DownwardAPIVolumeSource) out := out.(*DownwardAPIVolumeSource) + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DownwardAPIVolumeFile, len(*in)) @@ -821,15 +801,11 @@ func DeepCopy_v1_DownwardAPIVolumeSource(in interface{}, out interface{}, c *con return err } } - } else { - out.Items = nil } if in.DefaultMode != nil { in, out := &in.DefaultMode, &out.DefaultMode *out = new(int32) **out = **in - } else { - out.DefaultMode = nil } return nil } @@ -839,7 +815,7 @@ func DeepCopy_v1_EmptyDirVolumeSource(in interface{}, out interface{}, c *conver { in := in.(*EmptyDirVolumeSource) out := out.(*EmptyDirVolumeSource) - out.Medium = in.Medium + *out = *in return nil } } @@ -848,21 +824,16 @@ func DeepCopy_v1_EndpointAddress(in interface{}, out interface{}, c *conversion. { in := in.(*EndpointAddress) out := out.(*EndpointAddress) - out.IP = in.IP - out.Hostname = in.Hostname + *out = *in if in.NodeName != nil { in, out := &in.NodeName, &out.NodeName *out = new(string) **out = **in - } else { - out.NodeName = nil } if in.TargetRef != nil { in, out := &in.TargetRef, &out.TargetRef *out = new(ObjectReference) **out = **in - } else { - out.TargetRef = nil } return nil } @@ -872,9 +843,7 @@ func DeepCopy_v1_EndpointPort(in interface{}, out interface{}, c *conversion.Clo { in := in.(*EndpointPort) out := out.(*EndpointPort) - out.Name = in.Name - out.Port = in.Port - out.Protocol = in.Protocol + *out = *in return nil } } @@ -883,6 +852,7 @@ func DeepCopy_v1_EndpointSubset(in interface{}, out interface{}, c *conversion.C { in := in.(*EndpointSubset) out := out.(*EndpointSubset) + *out = *in if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses *out = make([]EndpointAddress, len(*in)) @@ -891,8 +861,6 @@ func DeepCopy_v1_EndpointSubset(in interface{}, out interface{}, c *conversion.C return err } } - } else { - out.Addresses = nil } if in.NotReadyAddresses != nil { in, out := &in.NotReadyAddresses, &out.NotReadyAddresses @@ -902,17 +870,11 @@ func DeepCopy_v1_EndpointSubset(in interface{}, out interface{}, c *conversion.C return err } } - } else { - out.NotReadyAddresses = nil } if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]EndpointPort, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Ports = nil + copy(*out, *in) } return nil } @@ -922,9 +884,11 @@ func DeepCopy_v1_Endpoints(in interface{}, out interface{}, c *conversion.Cloner { in := in.(*Endpoints) out := out.(*Endpoints) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } if in.Subsets != nil { in, out := &in.Subsets, &out.Subsets @@ -934,8 +898,6 @@ func DeepCopy_v1_Endpoints(in interface{}, out interface{}, c *conversion.Cloner return err } } - } else { - out.Subsets = nil } return nil } @@ -945,8 +907,7 @@ func DeepCopy_v1_EndpointsList(in interface{}, out interface{}, c *conversion.Cl { in := in.(*EndpointsList) out := out.(*EndpointsList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Endpoints, len(*in)) @@ -955,8 +916,29 @@ func DeepCopy_v1_EndpointsList(in interface{}, out interface{}, c *conversion.Cl return err } } - } else { - out.Items = nil + } + return nil + } +} + +func DeepCopy_v1_EnvFromSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvFromSource) + out := out.(*EnvFromSource) + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(ConfigMapEnvSource) + if err := DeepCopy_v1_ConfigMapEnvSource(*in, *out, c); err != nil { + return err + } + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretEnvSource) + if err := DeepCopy_v1_SecretEnvSource(*in, *out, c); err != nil { + return err + } } return nil } @@ -966,16 +948,13 @@ func DeepCopy_v1_EnvVar(in interface{}, out interface{}, c *conversion.Cloner) e { in := in.(*EnvVar) out := out.(*EnvVar) - out.Name = in.Name - out.Value = in.Value + *out = *in if in.ValueFrom != nil { in, out := &in.ValueFrom, &out.ValueFrom *out = new(EnvVarSource) if err := DeepCopy_v1_EnvVarSource(*in, *out, c); err != nil { return err } - } else { - out.ValueFrom = nil } return nil } @@ -985,12 +964,11 @@ func DeepCopy_v1_EnvVarSource(in interface{}, out interface{}, c *conversion.Clo { in := in.(*EnvVarSource) out := out.(*EnvVarSource) + *out = *in if in.FieldRef != nil { in, out := &in.FieldRef, &out.FieldRef *out = new(ObjectFieldSelector) **out = **in - } else { - out.FieldRef = nil } if in.ResourceFieldRef != nil { in, out := &in.ResourceFieldRef, &out.ResourceFieldRef @@ -998,22 +976,20 @@ func DeepCopy_v1_EnvVarSource(in interface{}, out interface{}, c *conversion.Clo if err := DeepCopy_v1_ResourceFieldSelector(*in, *out, c); err != nil { return err } - } else { - out.ResourceFieldRef = nil } if in.ConfigMapKeyRef != nil { in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef *out = new(ConfigMapKeySelector) - **out = **in - } else { - out.ConfigMapKeyRef = nil + if err := DeepCopy_v1_ConfigMapKeySelector(*in, *out, c); err != nil { + return err + } } if in.SecretKeyRef != nil { in, out := &in.SecretKeyRef, &out.SecretKeyRef *out = new(SecretKeySelector) - **out = **in - } else { - out.SecretKeyRef = nil + if err := DeepCopy_v1_SecretKeySelector(*in, *out, c); err != nil { + return err + } } return nil } @@ -1023,18 +999,14 @@ func DeepCopy_v1_Event(in interface{}, out interface{}, c *conversion.Cloner) er { in := in.(*Event) out := out.(*Event) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } - out.InvolvedObject = in.InvolvedObject - out.Reason = in.Reason - out.Message = in.Message - out.Source = in.Source out.FirstTimestamp = in.FirstTimestamp.DeepCopy() out.LastTimestamp = in.LastTimestamp.DeepCopy() - out.Count = in.Count - out.Type = in.Type return nil } } @@ -1043,8 +1015,7 @@ func DeepCopy_v1_EventList(in interface{}, out interface{}, c *conversion.Cloner { in := in.(*EventList) out := out.(*EventList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Event, len(*in)) @@ -1053,8 +1024,6 @@ func DeepCopy_v1_EventList(in interface{}, out interface{}, c *conversion.Cloner return err } } - } else { - out.Items = nil } return nil } @@ -1064,8 +1033,7 @@ func DeepCopy_v1_EventSource(in interface{}, out interface{}, c *conversion.Clon { in := in.(*EventSource) out := out.(*EventSource) - out.Component = in.Component - out.Host = in.Host + *out = *in return nil } } @@ -1074,48 +1042,31 @@ func DeepCopy_v1_ExecAction(in interface{}, out interface{}, c *conversion.Clone { in := in.(*ExecAction) out := out.(*ExecAction) + *out = *in if in.Command != nil { in, out := &in.Command, &out.Command *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Command = nil } return nil } } -func DeepCopy_v1_ExportOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ExportOptions) - out := out.(*ExportOptions) - out.TypeMeta = in.TypeMeta - out.Export = in.Export - out.Exact = in.Exact - return nil - } -} - func DeepCopy_v1_FCVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*FCVolumeSource) out := out.(*FCVolumeSource) + *out = *in if in.TargetWWNs != nil { in, out := &in.TargetWWNs, &out.TargetWWNs *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.TargetWWNs = nil } if in.Lun != nil { in, out := &in.Lun, &out.Lun *out = new(int32) **out = **in - } else { - out.Lun = nil } - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly return nil } } @@ -1124,24 +1075,18 @@ func DeepCopy_v1_FlexVolumeSource(in interface{}, out interface{}, c *conversion { in := in.(*FlexVolumeSource) out := out.(*FlexVolumeSource) - out.Driver = in.Driver - out.FSType = in.FSType + *out = *in if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef *out = new(LocalObjectReference) **out = **in - } else { - out.SecretRef = nil } - out.ReadOnly = in.ReadOnly if in.Options != nil { in, out := &in.Options, &out.Options *out = make(map[string]string) for key, val := range *in { (*out)[key] = val } - } else { - out.Options = nil } return nil } @@ -1151,8 +1096,7 @@ func DeepCopy_v1_FlockerVolumeSource(in interface{}, out interface{}, c *convers { in := in.(*FlockerVolumeSource) out := out.(*FlockerVolumeSource) - out.DatasetName = in.DatasetName - out.DatasetUUID = in.DatasetUUID + *out = *in return nil } } @@ -1161,10 +1105,7 @@ func DeepCopy_v1_GCEPersistentDiskVolumeSource(in interface{}, out interface{}, { in := in.(*GCEPersistentDiskVolumeSource) out := out.(*GCEPersistentDiskVolumeSource) - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly + *out = *in return nil } } @@ -1173,9 +1114,7 @@ func DeepCopy_v1_GitRepoVolumeSource(in interface{}, out interface{}, c *convers { in := in.(*GitRepoVolumeSource) out := out.(*GitRepoVolumeSource) - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory + *out = *in return nil } } @@ -1184,9 +1123,7 @@ func DeepCopy_v1_GlusterfsVolumeSource(in interface{}, out interface{}, c *conve { in := in.(*GlusterfsVolumeSource) out := out.(*GlusterfsVolumeSource) - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly + *out = *in return nil } } @@ -1195,18 +1132,11 @@ func DeepCopy_v1_HTTPGetAction(in interface{}, out interface{}, c *conversion.Cl { in := in.(*HTTPGetAction) out := out.(*HTTPGetAction) - out.Path = in.Path - out.Port = in.Port - out.Host = in.Host - out.Scheme = in.Scheme + *out = *in if in.HTTPHeaders != nil { in, out := &in.HTTPHeaders, &out.HTTPHeaders *out = make([]HTTPHeader, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.HTTPHeaders = nil + copy(*out, *in) } return nil } @@ -1216,8 +1146,7 @@ func DeepCopy_v1_HTTPHeader(in interface{}, out interface{}, c *conversion.Clone { in := in.(*HTTPHeader) out := out.(*HTTPHeader) - out.Name = in.Name - out.Value = in.Value + *out = *in return nil } } @@ -1226,14 +1155,13 @@ func DeepCopy_v1_Handler(in interface{}, out interface{}, c *conversion.Cloner) { in := in.(*Handler) out := out.(*Handler) + *out = *in if in.Exec != nil { in, out := &in.Exec, &out.Exec *out = new(ExecAction) if err := DeepCopy_v1_ExecAction(*in, *out, c); err != nil { return err } - } else { - out.Exec = nil } if in.HTTPGet != nil { in, out := &in.HTTPGet, &out.HTTPGet @@ -1241,15 +1169,11 @@ func DeepCopy_v1_Handler(in interface{}, out interface{}, c *conversion.Cloner) if err := DeepCopy_v1_HTTPGetAction(*in, *out, c); err != nil { return err } - } else { - out.HTTPGet = nil } if in.TCPSocket != nil { in, out := &in.TCPSocket, &out.TCPSocket *out = new(TCPSocketAction) **out = **in - } else { - out.TCPSocket = nil } return nil } @@ -1259,7 +1183,7 @@ func DeepCopy_v1_HostPathVolumeSource(in interface{}, out interface{}, c *conver { in := in.(*HostPathVolumeSource) out := out.(*HostPathVolumeSource) - out.Path = in.Path + *out = *in return nil } } @@ -1268,12 +1192,12 @@ func DeepCopy_v1_ISCSIVolumeSource(in interface{}, out interface{}, c *conversio { in := in.(*ISCSIVolumeSource) out := out.(*ISCSIVolumeSource) - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = in.Lun - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } return nil } } @@ -1282,14 +1206,11 @@ func DeepCopy_v1_KeyToPath(in interface{}, out interface{}, c *conversion.Cloner { in := in.(*KeyToPath) out := out.(*KeyToPath) - out.Key = in.Key - out.Path = in.Path + *out = *in if in.Mode != nil { in, out := &in.Mode, &out.Mode *out = new(int32) **out = **in - } else { - out.Mode = nil } return nil } @@ -1299,14 +1220,13 @@ func DeepCopy_v1_Lifecycle(in interface{}, out interface{}, c *conversion.Cloner { in := in.(*Lifecycle) out := out.(*Lifecycle) + *out = *in if in.PostStart != nil { in, out := &in.PostStart, &out.PostStart *out = new(Handler) if err := DeepCopy_v1_Handler(*in, *out, c); err != nil { return err } - } else { - out.PostStart = nil } if in.PreStop != nil { in, out := &in.PreStop, &out.PreStop @@ -1314,8 +1234,6 @@ func DeepCopy_v1_Lifecycle(in interface{}, out interface{}, c *conversion.Cloner if err := DeepCopy_v1_Handler(*in, *out, c); err != nil { return err } - } else { - out.PreStop = nil } return nil } @@ -1325,9 +1243,11 @@ func DeepCopy_v1_LimitRange(in interface{}, out interface{}, c *conversion.Clone { in := in.(*LimitRange) out := out.(*LimitRange) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } if err := DeepCopy_v1_LimitRangeSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -1340,15 +1260,13 @@ func DeepCopy_v1_LimitRangeItem(in interface{}, out interface{}, c *conversion.C { in := in.(*LimitRangeItem) out := out.(*LimitRangeItem) - out.Type = in.Type + *out = *in if in.Max != nil { in, out := &in.Max, &out.Max *out = make(ResourceList) for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Max = nil } if in.Min != nil { in, out := &in.Min, &out.Min @@ -1356,8 +1274,6 @@ func DeepCopy_v1_LimitRangeItem(in interface{}, out interface{}, c *conversion.C for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Min = nil } if in.Default != nil { in, out := &in.Default, &out.Default @@ -1365,8 +1281,6 @@ func DeepCopy_v1_LimitRangeItem(in interface{}, out interface{}, c *conversion.C for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Default = nil } if in.DefaultRequest != nil { in, out := &in.DefaultRequest, &out.DefaultRequest @@ -1374,8 +1288,6 @@ func DeepCopy_v1_LimitRangeItem(in interface{}, out interface{}, c *conversion.C for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.DefaultRequest = nil } if in.MaxLimitRequestRatio != nil { in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio @@ -1383,8 +1295,6 @@ func DeepCopy_v1_LimitRangeItem(in interface{}, out interface{}, c *conversion.C for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.MaxLimitRequestRatio = nil } return nil } @@ -1394,8 +1304,7 @@ func DeepCopy_v1_LimitRangeList(in interface{}, out interface{}, c *conversion.C { in := in.(*LimitRangeList) out := out.(*LimitRangeList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]LimitRange, len(*in)) @@ -1404,8 +1313,6 @@ func DeepCopy_v1_LimitRangeList(in interface{}, out interface{}, c *conversion.C return err } } - } else { - out.Items = nil } return nil } @@ -1415,6 +1322,7 @@ func DeepCopy_v1_LimitRangeSpec(in interface{}, out interface{}, c *conversion.C { in := in.(*LimitRangeSpec) out := out.(*LimitRangeSpec) + *out = *in if in.Limits != nil { in, out := &in.Limits, &out.Limits *out = make([]LimitRangeItem, len(*in)) @@ -1423,8 +1331,6 @@ func DeepCopy_v1_LimitRangeSpec(in interface{}, out interface{}, c *conversion.C return err } } - } else { - out.Limits = nil } return nil } @@ -1434,18 +1340,17 @@ func DeepCopy_v1_List(in interface{}, out interface{}, c *conversion.Cloner) err { in := in.(*List) out := out.(*List) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]runtime.RawExtension, len(*in)) for i := range *in { - if err := runtime.DeepCopy_runtime_RawExtension(&(*in)[i], &(*out)[i], c); err != nil { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { return err + } else { + (*out)[i] = *newVal.(*runtime.RawExtension) } } - } else { - out.Items = nil } return nil } @@ -1455,17 +1360,11 @@ func DeepCopy_v1_ListOptions(in interface{}, out interface{}, c *conversion.Clon { in := in.(*ListOptions) out := out.(*ListOptions) - out.TypeMeta = in.TypeMeta - out.LabelSelector = in.LabelSelector - out.FieldSelector = in.FieldSelector - out.Watch = in.Watch - out.ResourceVersion = in.ResourceVersion + *out = *in if in.TimeoutSeconds != nil { in, out := &in.TimeoutSeconds, &out.TimeoutSeconds *out = new(int64) **out = **in - } else { - out.TimeoutSeconds = nil } return nil } @@ -1475,8 +1374,7 @@ func DeepCopy_v1_LoadBalancerIngress(in interface{}, out interface{}, c *convers { in := in.(*LoadBalancerIngress) out := out.(*LoadBalancerIngress) - out.IP = in.IP - out.Hostname = in.Hostname + *out = *in return nil } } @@ -1485,14 +1383,11 @@ func DeepCopy_v1_LoadBalancerStatus(in interface{}, out interface{}, c *conversi { in := in.(*LoadBalancerStatus) out := out.(*LoadBalancerStatus) + *out = *in if in.Ingress != nil { in, out := &in.Ingress, &out.Ingress *out = make([]LoadBalancerIngress, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Ingress = nil + copy(*out, *in) } return nil } @@ -1502,7 +1397,7 @@ func DeepCopy_v1_LocalObjectReference(in interface{}, out interface{}, c *conver { in := in.(*LocalObjectReference) out := out.(*LocalObjectReference) - out.Name = in.Name + *out = *in return nil } } @@ -1511,9 +1406,7 @@ func DeepCopy_v1_NFSVolumeSource(in interface{}, out interface{}, c *conversion. { in := in.(*NFSVolumeSource) out := out.(*NFSVolumeSource) - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly + *out = *in return nil } } @@ -1522,14 +1415,15 @@ func DeepCopy_v1_Namespace(in interface{}, out interface{}, c *conversion.Cloner { in := in.(*Namespace) out := out.(*Namespace) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } if err := DeepCopy_v1_NamespaceSpec(&in.Spec, &out.Spec, c); err != nil { return err } - out.Status = in.Status return nil } } @@ -1538,8 +1432,7 @@ func DeepCopy_v1_NamespaceList(in interface{}, out interface{}, c *conversion.Cl { in := in.(*NamespaceList) out := out.(*NamespaceList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Namespace, len(*in)) @@ -1548,8 +1441,6 @@ func DeepCopy_v1_NamespaceList(in interface{}, out interface{}, c *conversion.Cl return err } } - } else { - out.Items = nil } return nil } @@ -1559,14 +1450,11 @@ func DeepCopy_v1_NamespaceSpec(in interface{}, out interface{}, c *conversion.Cl { in := in.(*NamespaceSpec) out := out.(*NamespaceSpec) + *out = *in if in.Finalizers != nil { in, out := &in.Finalizers, &out.Finalizers *out = make([]FinalizerName, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Finalizers = nil + copy(*out, *in) } return nil } @@ -1576,7 +1464,7 @@ func DeepCopy_v1_NamespaceStatus(in interface{}, out interface{}, c *conversion. { in := in.(*NamespaceStatus) out := out.(*NamespaceStatus) - out.Phase = in.Phase + *out = *in return nil } } @@ -1585,11 +1473,15 @@ func DeepCopy_v1_Node(in interface{}, out interface{}, c *conversion.Cloner) err { in := in.(*Node) out := out.(*Node) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_NodeSpec(&in.Spec, &out.Spec, c); err != nil { return err } - out.Spec = in.Spec if err := DeepCopy_v1_NodeStatus(&in.Status, &out.Status, c); err != nil { return err } @@ -1601,8 +1493,7 @@ func DeepCopy_v1_NodeAddress(in interface{}, out interface{}, c *conversion.Clon { in := in.(*NodeAddress) out := out.(*NodeAddress) - out.Type = in.Type - out.Address = in.Address + *out = *in return nil } } @@ -1611,14 +1502,13 @@ func DeepCopy_v1_NodeAffinity(in interface{}, out interface{}, c *conversion.Clo { in := in.(*NodeAffinity) out := out.(*NodeAffinity) + *out = *in if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution *out = new(NodeSelector) if err := DeepCopy_v1_NodeSelector(*in, *out, c); err != nil { return err } - } else { - out.RequiredDuringSchedulingIgnoredDuringExecution = nil } if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution @@ -1628,8 +1518,6 @@ func DeepCopy_v1_NodeAffinity(in interface{}, out interface{}, c *conversion.Clo return err } } - } else { - out.PreferredDuringSchedulingIgnoredDuringExecution = nil } return nil } @@ -1639,12 +1527,9 @@ func DeepCopy_v1_NodeCondition(in interface{}, out interface{}, c *conversion.Cl { in := in.(*NodeCondition) out := out.(*NodeCondition) - out.Type = in.Type - out.Status = in.Status + *out = *in out.LastHeartbeatTime = in.LastHeartbeatTime.DeepCopy() out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message return nil } } @@ -1653,7 +1538,7 @@ func DeepCopy_v1_NodeDaemonEndpoints(in interface{}, out interface{}, c *convers { in := in.(*NodeDaemonEndpoints) out := out.(*NodeDaemonEndpoints) - out.KubeletEndpoint = in.KubeletEndpoint + *out = *in return nil } } @@ -1662,8 +1547,7 @@ func DeepCopy_v1_NodeList(in interface{}, out interface{}, c *conversion.Cloner) { in := in.(*NodeList) out := out.(*NodeList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Node, len(*in)) @@ -1672,8 +1556,6 @@ func DeepCopy_v1_NodeList(in interface{}, out interface{}, c *conversion.Cloner) return err } } - } else { - out.Items = nil } return nil } @@ -1683,8 +1565,23 @@ func DeepCopy_v1_NodeProxyOptions(in interface{}, out interface{}, c *conversion { in := in.(*NodeProxyOptions) out := out.(*NodeProxyOptions) - out.TypeMeta = in.TypeMeta - out.Path = in.Path + *out = *in + return nil + } +} + +func DeepCopy_v1_NodeResources(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeResources) + out := out.(*NodeResources) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } return nil } } @@ -1693,6 +1590,7 @@ func DeepCopy_v1_NodeSelector(in interface{}, out interface{}, c *conversion.Clo { in := in.(*NodeSelector) out := out.(*NodeSelector) + *out = *in if in.NodeSelectorTerms != nil { in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms *out = make([]NodeSelectorTerm, len(*in)) @@ -1701,8 +1599,6 @@ func DeepCopy_v1_NodeSelector(in interface{}, out interface{}, c *conversion.Clo return err } } - } else { - out.NodeSelectorTerms = nil } return nil } @@ -1712,14 +1608,11 @@ func DeepCopy_v1_NodeSelectorRequirement(in interface{}, out interface{}, c *con { in := in.(*NodeSelectorRequirement) out := out.(*NodeSelectorRequirement) - out.Key = in.Key - out.Operator = in.Operator + *out = *in if in.Values != nil { in, out := &in.Values, &out.Values *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Values = nil } return nil } @@ -1729,6 +1622,7 @@ func DeepCopy_v1_NodeSelectorTerm(in interface{}, out interface{}, c *conversion { in := in.(*NodeSelectorTerm) out := out.(*NodeSelectorTerm) + *out = *in if in.MatchExpressions != nil { in, out := &in.MatchExpressions, &out.MatchExpressions *out = make([]NodeSelectorRequirement, len(*in)) @@ -1737,8 +1631,6 @@ func DeepCopy_v1_NodeSelectorTerm(in interface{}, out interface{}, c *conversion return err } } - } else { - out.MatchExpressions = nil } return nil } @@ -1748,10 +1640,16 @@ func DeepCopy_v1_NodeSpec(in interface{}, out interface{}, c *conversion.Cloner) { in := in.(*NodeSpec) out := out.(*NodeSpec) - out.PodCIDR = in.PodCIDR - out.ExternalID = in.ExternalID - out.ProviderID = in.ProviderID - out.Unschedulable = in.Unschedulable + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]Taint, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Taint(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } return nil } } @@ -1760,14 +1658,13 @@ func DeepCopy_v1_NodeStatus(in interface{}, out interface{}, c *conversion.Clone { in := in.(*NodeStatus) out := out.(*NodeStatus) + *out = *in if in.Capacity != nil { in, out := &in.Capacity, &out.Capacity *out = make(ResourceList) for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Capacity = nil } if in.Allocatable != nil { in, out := &in.Allocatable, &out.Allocatable @@ -1775,10 +1672,7 @@ func DeepCopy_v1_NodeStatus(in interface{}, out interface{}, c *conversion.Clone for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Allocatable = nil } - out.Phase = in.Phase if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]NodeCondition, len(*in)) @@ -1787,20 +1681,12 @@ func DeepCopy_v1_NodeStatus(in interface{}, out interface{}, c *conversion.Clone return err } } - } else { - out.Conditions = nil } if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses *out = make([]NodeAddress, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Addresses = nil + copy(*out, *in) } - out.DaemonEndpoints = in.DaemonEndpoints - out.NodeInfo = in.NodeInfo if in.Images != nil { in, out := &in.Images, &out.Images *out = make([]ContainerImage, len(*in)) @@ -1809,26 +1695,16 @@ func DeepCopy_v1_NodeStatus(in interface{}, out interface{}, c *conversion.Clone return err } } - } else { - out.Images = nil } if in.VolumesInUse != nil { in, out := &in.VolumesInUse, &out.VolumesInUse *out = make([]UniqueVolumeName, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.VolumesInUse = nil + copy(*out, *in) } if in.VolumesAttached != nil { in, out := &in.VolumesAttached, &out.VolumesAttached *out = make([]AttachedVolume, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.VolumesAttached = nil + copy(*out, *in) } return nil } @@ -1838,16 +1714,7 @@ func DeepCopy_v1_NodeSystemInfo(in interface{}, out interface{}, c *conversion.C { in := in.(*NodeSystemInfo) out := out.(*NodeSystemInfo) - out.MachineID = in.MachineID - out.SystemUUID = in.SystemUUID - out.BootID = in.BootID - out.KernelVersion = in.KernelVersion - out.OSImage = in.OSImage - out.ContainerRuntimeVersion = in.ContainerRuntimeVersion - out.KubeletVersion = in.KubeletVersion - out.KubeProxyVersion = in.KubeProxyVersion - out.OperatingSystem = in.OperatingSystem - out.Architecture = in.Architecture + *out = *in return nil } } @@ -1856,8 +1723,7 @@ func DeepCopy_v1_ObjectFieldSelector(in interface{}, out interface{}, c *convers { in := in.(*ObjectFieldSelector) out := out.(*ObjectFieldSelector) - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath + *out = *in return nil } } @@ -1866,27 +1732,17 @@ func DeepCopy_v1_ObjectMeta(in interface{}, out interface{}, c *conversion.Clone { in := in.(*ObjectMeta) out := out.(*ObjectMeta) - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = in.UID - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation + *out = *in out.CreationTimestamp = in.CreationTimestamp.DeepCopy() if in.DeletionTimestamp != nil { in, out := &in.DeletionTimestamp, &out.DeletionTimestamp - *out = new(unversioned.Time) + *out = new(meta_v1.Time) **out = (*in).DeepCopy() - } else { - out.DeletionTimestamp = nil } if in.DeletionGracePeriodSeconds != nil { in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds *out = new(int64) **out = **in - } else { - out.DeletionGracePeriodSeconds = nil } if in.Labels != nil { in, out := &in.Labels, &out.Labels @@ -1894,8 +1750,6 @@ func DeepCopy_v1_ObjectMeta(in interface{}, out interface{}, c *conversion.Clone for key, val := range *in { (*out)[key] = val } - } else { - out.Labels = nil } if in.Annotations != nil { in, out := &in.Annotations, &out.Annotations @@ -1903,28 +1757,23 @@ func DeepCopy_v1_ObjectMeta(in interface{}, out interface{}, c *conversion.Clone for key, val := range *in { (*out)[key] = val } - } else { - out.Annotations = nil } if in.OwnerReferences != nil { in, out := &in.OwnerReferences, &out.OwnerReferences - *out = make([]OwnerReference, len(*in)) + *out = make([]meta_v1.OwnerReference, len(*in)) for i := range *in { - if err := DeepCopy_v1_OwnerReference(&(*in)[i], &(*out)[i], c); err != nil { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { return err + } else { + (*out)[i] = *newVal.(*meta_v1.OwnerReference) } } - } else { - out.OwnerReferences = nil } if in.Finalizers != nil { in, out := &in.Finalizers, &out.Finalizers *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Finalizers = nil } - out.ClusterName = in.ClusterName return nil } } @@ -1933,32 +1782,7 @@ func DeepCopy_v1_ObjectReference(in interface{}, out interface{}, c *conversion. { in := in.(*ObjectReference) out := out.(*ObjectReference) - out.Kind = in.Kind - out.Namespace = in.Namespace - out.Name = in.Name - out.UID = in.UID - out.APIVersion = in.APIVersion - out.ResourceVersion = in.ResourceVersion - out.FieldPath = in.FieldPath - return nil - } -} - -func DeepCopy_v1_OwnerReference(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*OwnerReference) - out := out.(*OwnerReference) - out.APIVersion = in.APIVersion - out.Kind = in.Kind - out.Name = in.Name - out.UID = in.UID - if in.Controller != nil { - in, out := &in.Controller, &out.Controller - *out = new(bool) - **out = **in - } else { - out.Controller = nil - } + *out = *in return nil } } @@ -1967,14 +1791,15 @@ func DeepCopy_v1_PersistentVolume(in interface{}, out interface{}, c *conversion { in := in.(*PersistentVolume) out := out.(*PersistentVolume) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } if err := DeepCopy_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, c); err != nil { return err } - out.Status = in.Status return nil } } @@ -1983,9 +1808,11 @@ func DeepCopy_v1_PersistentVolumeClaim(in interface{}, out interface{}, c *conve { in := in.(*PersistentVolumeClaim) out := out.(*PersistentVolumeClaim) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } if err := DeepCopy_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -2001,8 +1828,7 @@ func DeepCopy_v1_PersistentVolumeClaimList(in interface{}, out interface{}, c *c { in := in.(*PersistentVolumeClaimList) out := out.(*PersistentVolumeClaimList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PersistentVolumeClaim, len(*in)) @@ -2011,8 +1837,6 @@ func DeepCopy_v1_PersistentVolumeClaimList(in interface{}, out interface{}, c *c return err } } - } else { - out.Items = nil } return nil } @@ -2022,28 +1846,28 @@ func DeepCopy_v1_PersistentVolumeClaimSpec(in interface{}, out interface{}, c *c { in := in.(*PersistentVolumeClaimSpec) out := out.(*PersistentVolumeClaimSpec) + *out = *in if in.AccessModes != nil { in, out := &in.AccessModes, &out.AccessModes *out = make([]PersistentVolumeAccessMode, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.AccessModes = nil + copy(*out, *in) } if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*meta_v1.LabelSelector) } - } else { - out.Selector = nil } if err := DeepCopy_v1_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { return err } - out.VolumeName = in.VolumeName + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } return nil } } @@ -2052,15 +1876,11 @@ func DeepCopy_v1_PersistentVolumeClaimStatus(in interface{}, out interface{}, c { in := in.(*PersistentVolumeClaimStatus) out := out.(*PersistentVolumeClaimStatus) - out.Phase = in.Phase + *out = *in if in.AccessModes != nil { in, out := &in.AccessModes, &out.AccessModes *out = make([]PersistentVolumeAccessMode, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.AccessModes = nil + copy(*out, *in) } if in.Capacity != nil { in, out := &in.Capacity, &out.Capacity @@ -2068,8 +1888,6 @@ func DeepCopy_v1_PersistentVolumeClaimStatus(in interface{}, out interface{}, c for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Capacity = nil } return nil } @@ -2079,8 +1897,7 @@ func DeepCopy_v1_PersistentVolumeClaimVolumeSource(in interface{}, out interface { in := in.(*PersistentVolumeClaimVolumeSource) out := out.(*PersistentVolumeClaimVolumeSource) - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly + *out = *in return nil } } @@ -2089,8 +1906,7 @@ func DeepCopy_v1_PersistentVolumeList(in interface{}, out interface{}, c *conver { in := in.(*PersistentVolumeList) out := out.(*PersistentVolumeList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PersistentVolume, len(*in)) @@ -2099,8 +1915,6 @@ func DeepCopy_v1_PersistentVolumeList(in interface{}, out interface{}, c *conver return err } } - } else { - out.Items = nil } return nil } @@ -2110,40 +1924,31 @@ func DeepCopy_v1_PersistentVolumeSource(in interface{}, out interface{}, c *conv { in := in.(*PersistentVolumeSource) out := out.(*PersistentVolumeSource) + *out = *in if in.GCEPersistentDisk != nil { in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk *out = new(GCEPersistentDiskVolumeSource) **out = **in - } else { - out.GCEPersistentDisk = nil } if in.AWSElasticBlockStore != nil { in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore *out = new(AWSElasticBlockStoreVolumeSource) **out = **in - } else { - out.AWSElasticBlockStore = nil } if in.HostPath != nil { in, out := &in.HostPath, &out.HostPath *out = new(HostPathVolumeSource) **out = **in - } else { - out.HostPath = nil } if in.Glusterfs != nil { in, out := &in.Glusterfs, &out.Glusterfs *out = new(GlusterfsVolumeSource) **out = **in - } else { - out.Glusterfs = nil } if in.NFS != nil { in, out := &in.NFS, &out.NFS *out = new(NFSVolumeSource) **out = **in - } else { - out.NFS = nil } if in.RBD != nil { in, out := &in.RBD, &out.RBD @@ -2151,22 +1956,18 @@ func DeepCopy_v1_PersistentVolumeSource(in interface{}, out interface{}, c *conv if err := DeepCopy_v1_RBDVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.RBD = nil } if in.ISCSI != nil { in, out := &in.ISCSI, &out.ISCSI *out = new(ISCSIVolumeSource) - **out = **in - } else { - out.ISCSI = nil + if err := DeepCopy_v1_ISCSIVolumeSource(*in, *out, c); err != nil { + return err + } } if in.Cinder != nil { in, out := &in.Cinder, &out.Cinder *out = new(CinderVolumeSource) **out = **in - } else { - out.Cinder = nil } if in.CephFS != nil { in, out := &in.CephFS, &out.CephFS @@ -2174,8 +1975,6 @@ func DeepCopy_v1_PersistentVolumeSource(in interface{}, out interface{}, c *conv if err := DeepCopy_v1_CephFSVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.CephFS = nil } if in.FC != nil { in, out := &in.FC, &out.FC @@ -2183,15 +1982,11 @@ func DeepCopy_v1_PersistentVolumeSource(in interface{}, out interface{}, c *conv if err := DeepCopy_v1_FCVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.FC = nil } if in.Flocker != nil { in, out := &in.Flocker, &out.Flocker *out = new(FlockerVolumeSource) **out = **in - } else { - out.Flocker = nil } if in.FlexVolume != nil { in, out := &in.FlexVolume, &out.FlexVolume @@ -2199,29 +1994,21 @@ func DeepCopy_v1_PersistentVolumeSource(in interface{}, out interface{}, c *conv if err := DeepCopy_v1_FlexVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.FlexVolume = nil } if in.AzureFile != nil { in, out := &in.AzureFile, &out.AzureFile *out = new(AzureFileVolumeSource) **out = **in - } else { - out.AzureFile = nil } if in.VsphereVolume != nil { in, out := &in.VsphereVolume, &out.VsphereVolume *out = new(VsphereVirtualDiskVolumeSource) **out = **in - } else { - out.VsphereVolume = nil } if in.Quobyte != nil { in, out := &in.Quobyte, &out.Quobyte *out = new(QuobyteVolumeSource) **out = **in - } else { - out.Quobyte = nil } if in.AzureDisk != nil { in, out := &in.AzureDisk, &out.AzureDisk @@ -2229,15 +2016,23 @@ func DeepCopy_v1_PersistentVolumeSource(in interface{}, out interface{}, c *conv if err := DeepCopy_v1_AzureDiskVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.AzureDisk = nil } if in.PhotonPersistentDisk != nil { in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk *out = new(PhotonPersistentDiskVolumeSource) **out = **in - } else { - out.PhotonPersistentDisk = nil + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + if err := DeepCopy_v1_ScaleIOVolumeSource(*in, *out, c); err != nil { + return err + } } return nil } @@ -2247,14 +2042,13 @@ func DeepCopy_v1_PersistentVolumeSpec(in interface{}, out interface{}, c *conver { in := in.(*PersistentVolumeSpec) out := out.(*PersistentVolumeSpec) + *out = *in if in.Capacity != nil { in, out := &in.Capacity, &out.Capacity *out = make(ResourceList) for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Capacity = nil } if err := DeepCopy_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, c); err != nil { return err @@ -2262,20 +2056,13 @@ func DeepCopy_v1_PersistentVolumeSpec(in interface{}, out interface{}, c *conver if in.AccessModes != nil { in, out := &in.AccessModes, &out.AccessModes *out = make([]PersistentVolumeAccessMode, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.AccessModes = nil + copy(*out, *in) } if in.ClaimRef != nil { in, out := &in.ClaimRef, &out.ClaimRef *out = new(ObjectReference) **out = **in - } else { - out.ClaimRef = nil } - out.PersistentVolumeReclaimPolicy = in.PersistentVolumeReclaimPolicy return nil } } @@ -2284,9 +2071,7 @@ func DeepCopy_v1_PersistentVolumeStatus(in interface{}, out interface{}, c *conv { in := in.(*PersistentVolumeStatus) out := out.(*PersistentVolumeStatus) - out.Phase = in.Phase - out.Message = in.Message - out.Reason = in.Reason + *out = *in return nil } } @@ -2295,8 +2080,7 @@ func DeepCopy_v1_PhotonPersistentDiskVolumeSource(in interface{}, out interface{ { in := in.(*PhotonPersistentDiskVolumeSource) out := out.(*PhotonPersistentDiskVolumeSource) - out.PdID = in.PdID - out.FSType = in.FSType + *out = *in return nil } } @@ -2305,9 +2089,11 @@ func DeepCopy_v1_Pod(in interface{}, out interface{}, c *conversion.Cloner) erro { in := in.(*Pod) out := out.(*Pod) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } if err := DeepCopy_v1_PodSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -2323,6 +2109,7 @@ func DeepCopy_v1_PodAffinity(in interface{}, out interface{}, c *conversion.Clon { in := in.(*PodAffinity) out := out.(*PodAffinity) + *out = *in if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution *out = make([]PodAffinityTerm, len(*in)) @@ -2331,8 +2118,6 @@ func DeepCopy_v1_PodAffinity(in interface{}, out interface{}, c *conversion.Clon return err } } - } else { - out.RequiredDuringSchedulingIgnoredDuringExecution = nil } if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution @@ -2342,8 +2127,6 @@ func DeepCopy_v1_PodAffinity(in interface{}, out interface{}, c *conversion.Clon return err } } - } else { - out.PreferredDuringSchedulingIgnoredDuringExecution = nil } return nil } @@ -2353,23 +2136,20 @@ func DeepCopy_v1_PodAffinityTerm(in interface{}, out interface{}, c *conversion. { in := in.(*PodAffinityTerm) out := out.(*PodAffinityTerm) + *out = *in if in.LabelSelector != nil { in, out := &in.LabelSelector, &out.LabelSelector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*meta_v1.LabelSelector) } - } else { - out.LabelSelector = nil } if in.Namespaces != nil { in, out := &in.Namespaces, &out.Namespaces *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Namespaces = nil } - out.TopologyKey = in.TopologyKey return nil } } @@ -2378,6 +2158,7 @@ func DeepCopy_v1_PodAntiAffinity(in interface{}, out interface{}, c *conversion. { in := in.(*PodAntiAffinity) out := out.(*PodAntiAffinity) + *out = *in if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution *out = make([]PodAffinityTerm, len(*in)) @@ -2386,8 +2167,6 @@ func DeepCopy_v1_PodAntiAffinity(in interface{}, out interface{}, c *conversion. return err } } - } else { - out.RequiredDuringSchedulingIgnoredDuringExecution = nil } if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution @@ -2397,8 +2176,6 @@ func DeepCopy_v1_PodAntiAffinity(in interface{}, out interface{}, c *conversion. return err } } - } else { - out.PreferredDuringSchedulingIgnoredDuringExecution = nil } return nil } @@ -2408,12 +2185,7 @@ func DeepCopy_v1_PodAttachOptions(in interface{}, out interface{}, c *conversion { in := in.(*PodAttachOptions) out := out.(*PodAttachOptions) - out.TypeMeta = in.TypeMeta - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container + *out = *in return nil } } @@ -2422,12 +2194,9 @@ func DeepCopy_v1_PodCondition(in interface{}, out interface{}, c *conversion.Clo { in := in.(*PodCondition) out := out.(*PodCondition) - out.Type = in.Type - out.Status = in.Status + *out = *in out.LastProbeTime = in.LastProbeTime.DeepCopy() out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message return nil } } @@ -2436,18 +2205,11 @@ func DeepCopy_v1_PodExecOptions(in interface{}, out interface{}, c *conversion.C { in := in.(*PodExecOptions) out := out.(*PodExecOptions) - out.TypeMeta = in.TypeMeta - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container + *out = *in if in.Command != nil { in, out := &in.Command, &out.Command *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Command = nil } return nil } @@ -2457,8 +2219,7 @@ func DeepCopy_v1_PodList(in interface{}, out interface{}, c *conversion.Cloner) { in := in.(*PodList) out := out.(*PodList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Pod, len(*in)) @@ -2467,8 +2228,6 @@ func DeepCopy_v1_PodList(in interface{}, out interface{}, c *conversion.Cloner) return err } } - } else { - out.Items = nil } return nil } @@ -2478,38 +2237,40 @@ func DeepCopy_v1_PodLogOptions(in interface{}, out interface{}, c *conversion.Cl { in := in.(*PodLogOptions) out := out.(*PodLogOptions) - out.TypeMeta = in.TypeMeta - out.Container = in.Container - out.Follow = in.Follow - out.Previous = in.Previous + *out = *in if in.SinceSeconds != nil { in, out := &in.SinceSeconds, &out.SinceSeconds *out = new(int64) **out = **in - } else { - out.SinceSeconds = nil } if in.SinceTime != nil { in, out := &in.SinceTime, &out.SinceTime - *out = new(unversioned.Time) + *out = new(meta_v1.Time) **out = (*in).DeepCopy() - } else { - out.SinceTime = nil } - out.Timestamps = in.Timestamps if in.TailLines != nil { in, out := &in.TailLines, &out.TailLines *out = new(int64) **out = **in - } else { - out.TailLines = nil } if in.LimitBytes != nil { in, out := &in.LimitBytes, &out.LimitBytes *out = new(int64) **out = **in - } else { - out.LimitBytes = nil + } + return nil + } +} + +func DeepCopy_v1_PodPortForwardOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPortForwardOptions) + out := out.(*PodPortForwardOptions) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]int32, len(*in)) + copy(*out, *in) } return nil } @@ -2519,8 +2280,7 @@ func DeepCopy_v1_PodProxyOptions(in interface{}, out interface{}, c *conversion. { in := in.(*PodProxyOptions) out := out.(*PodProxyOptions) - out.TypeMeta = in.TypeMeta - out.Path = in.Path + *out = *in return nil } } @@ -2529,40 +2289,31 @@ func DeepCopy_v1_PodSecurityContext(in interface{}, out interface{}, c *conversi { in := in.(*PodSecurityContext) out := out.(*PodSecurityContext) + *out = *in if in.SELinuxOptions != nil { in, out := &in.SELinuxOptions, &out.SELinuxOptions *out = new(SELinuxOptions) **out = **in - } else { - out.SELinuxOptions = nil } if in.RunAsUser != nil { in, out := &in.RunAsUser, &out.RunAsUser *out = new(int64) **out = **in - } else { - out.RunAsUser = nil } if in.RunAsNonRoot != nil { in, out := &in.RunAsNonRoot, &out.RunAsNonRoot *out = new(bool) **out = **in - } else { - out.RunAsNonRoot = nil } if in.SupplementalGroups != nil { in, out := &in.SupplementalGroups, &out.SupplementalGroups *out = make([]int64, len(*in)) copy(*out, *in) - } else { - out.SupplementalGroups = nil } if in.FSGroup != nil { in, out := &in.FSGroup, &out.FSGroup *out = new(int64) **out = **in - } else { - out.FSGroup = nil } return nil } @@ -2572,14 +2323,14 @@ func DeepCopy_v1_PodSignature(in interface{}, out interface{}, c *conversion.Clo { in := in.(*PodSignature) out := out.(*PodSignature) + *out = *in if in.PodController != nil { in, out := &in.PodController, &out.PodController - *out = new(OwnerReference) - if err := DeepCopy_v1_OwnerReference(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*meta_v1.OwnerReference) } - } else { - out.PodController = nil } return nil } @@ -2589,6 +2340,7 @@ func DeepCopy_v1_PodSpec(in interface{}, out interface{}, c *conversion.Cloner) { in := in.(*PodSpec) out := out.(*PodSpec) + *out = *in if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes *out = make([]Volume, len(*in)) @@ -2597,8 +2349,6 @@ func DeepCopy_v1_PodSpec(in interface{}, out interface{}, c *conversion.Cloner) return err } } - } else { - out.Volumes = nil } if in.InitContainers != nil { in, out := &in.InitContainers, &out.InitContainers @@ -2608,8 +2358,6 @@ func DeepCopy_v1_PodSpec(in interface{}, out interface{}, c *conversion.Cloner) return err } } - } else { - out.InitContainers = nil } if in.Containers != nil { in, out := &in.Containers, &out.Containers @@ -2619,60 +2367,57 @@ func DeepCopy_v1_PodSpec(in interface{}, out interface{}, c *conversion.Cloner) return err } } - } else { - out.Containers = nil } - out.RestartPolicy = in.RestartPolicy if in.TerminationGracePeriodSeconds != nil { in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds *out = new(int64) **out = **in - } else { - out.TerminationGracePeriodSeconds = nil } if in.ActiveDeadlineSeconds != nil { in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds *out = new(int64) **out = **in - } else { - out.ActiveDeadlineSeconds = nil } - out.DNSPolicy = in.DNSPolicy if in.NodeSelector != nil { in, out := &in.NodeSelector, &out.NodeSelector *out = make(map[string]string) for key, val := range *in { (*out)[key] = val } - } else { - out.NodeSelector = nil - } - out.ServiceAccountName = in.ServiceAccountName - out.DeprecatedServiceAccount = in.DeprecatedServiceAccount - out.NodeName = in.NodeName - out.HostNetwork = in.HostNetwork - out.HostPID = in.HostPID - out.HostIPC = in.HostIPC + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } if in.SecurityContext != nil { in, out := &in.SecurityContext, &out.SecurityContext *out = new(PodSecurityContext) if err := DeepCopy_v1_PodSecurityContext(*in, *out, c); err != nil { return err } - } else { - out.SecurityContext = nil } if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(Affinity) + if err := DeepCopy_v1_Affinity(*in, *out, c); err != nil { + return err + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]Toleration, len(*in)) for i := range *in { - (*out)[i] = (*in)[i] + if err := DeepCopy_v1_Toleration(&(*in)[i], &(*out)[i], c); err != nil { + return err + } } - } else { - out.ImagePullSecrets = nil } - out.Hostname = in.Hostname - out.Subdomain = in.Subdomain return nil } } @@ -2681,7 +2426,7 @@ func DeepCopy_v1_PodStatus(in interface{}, out interface{}, c *conversion.Cloner { in := in.(*PodStatus) out := out.(*PodStatus) - out.Phase = in.Phase + *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]PodCondition, len(*in)) @@ -2690,19 +2435,11 @@ func DeepCopy_v1_PodStatus(in interface{}, out interface{}, c *conversion.Cloner return err } } - } else { - out.Conditions = nil } - out.Message = in.Message - out.Reason = in.Reason - out.HostIP = in.HostIP - out.PodIP = in.PodIP if in.StartTime != nil { in, out := &in.StartTime, &out.StartTime - *out = new(unversioned.Time) + *out = new(meta_v1.Time) **out = (*in).DeepCopy() - } else { - out.StartTime = nil } if in.InitContainerStatuses != nil { in, out := &in.InitContainerStatuses, &out.InitContainerStatuses @@ -2712,8 +2449,6 @@ func DeepCopy_v1_PodStatus(in interface{}, out interface{}, c *conversion.Cloner return err } } - } else { - out.InitContainerStatuses = nil } if in.ContainerStatuses != nil { in, out := &in.ContainerStatuses, &out.ContainerStatuses @@ -2723,8 +2458,6 @@ func DeepCopy_v1_PodStatus(in interface{}, out interface{}, c *conversion.Cloner return err } } - } else { - out.ContainerStatuses = nil } return nil } @@ -2734,9 +2467,11 @@ func DeepCopy_v1_PodStatusResult(in interface{}, out interface{}, c *conversion. { in := in.(*PodStatusResult) out := out.(*PodStatusResult) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } if err := DeepCopy_v1_PodStatus(&in.Status, &out.Status, c); err != nil { return err @@ -2749,9 +2484,11 @@ func DeepCopy_v1_PodTemplate(in interface{}, out interface{}, c *conversion.Clon { in := in.(*PodTemplate) out := out.(*PodTemplate) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } if err := DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { return err @@ -2764,8 +2501,7 @@ func DeepCopy_v1_PodTemplateList(in interface{}, out interface{}, c *conversion. { in := in.(*PodTemplateList) out := out.(*PodTemplateList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodTemplate, len(*in)) @@ -2774,8 +2510,6 @@ func DeepCopy_v1_PodTemplateList(in interface{}, out interface{}, c *conversion. return err } } - } else { - out.Items = nil } return nil } @@ -2785,8 +2519,11 @@ func DeepCopy_v1_PodTemplateSpec(in interface{}, out interface{}, c *conversion. { in := in.(*PodTemplateSpec) out := out.(*PodTemplateSpec) - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } if err := DeepCopy_v1_PodSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -2795,16 +2532,24 @@ func DeepCopy_v1_PodTemplateSpec(in interface{}, out interface{}, c *conversion. } } +func DeepCopy_v1_PortworxVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PortworxVolumeSource) + out := out.(*PortworxVolumeSource) + *out = *in + return nil + } +} + func DeepCopy_v1_Preconditions(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*Preconditions) out := out.(*Preconditions) + *out = *in if in.UID != nil { in, out := &in.UID, &out.UID *out = new(types.UID) **out = **in - } else { - out.UID = nil } return nil } @@ -2814,12 +2559,11 @@ func DeepCopy_v1_PreferAvoidPodsEntry(in interface{}, out interface{}, c *conver { in := in.(*PreferAvoidPodsEntry) out := out.(*PreferAvoidPodsEntry) + *out = *in if err := DeepCopy_v1_PodSignature(&in.PodSignature, &out.PodSignature, c); err != nil { return err } out.EvictionTime = in.EvictionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message return nil } } @@ -2828,7 +2572,7 @@ func DeepCopy_v1_PreferredSchedulingTerm(in interface{}, out interface{}, c *con { in := in.(*PreferredSchedulingTerm) out := out.(*PreferredSchedulingTerm) - out.Weight = in.Weight + *out = *in if err := DeepCopy_v1_NodeSelectorTerm(&in.Preference, &out.Preference, c); err != nil { return err } @@ -2840,14 +2584,33 @@ func DeepCopy_v1_Probe(in interface{}, out interface{}, c *conversion.Cloner) er { in := in.(*Probe) out := out.(*Probe) + *out = *in if err := DeepCopy_v1_Handler(&in.Handler, &out.Handler, c); err != nil { return err } - out.InitialDelaySeconds = in.InitialDelaySeconds - out.TimeoutSeconds = in.TimeoutSeconds - out.PeriodSeconds = in.PeriodSeconds - out.SuccessThreshold = in.SuccessThreshold - out.FailureThreshold = in.FailureThreshold + return nil + } +} + +func DeepCopy_v1_ProjectedVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ProjectedVolumeSource) + out := out.(*ProjectedVolumeSource) + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]VolumeProjection, len(*in)) + for i := range *in { + if err := DeepCopy_v1_VolumeProjection(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } return nil } } @@ -2856,11 +2619,7 @@ func DeepCopy_v1_QuobyteVolumeSource(in interface{}, out interface{}, c *convers { in := in.(*QuobyteVolumeSource) out := out.(*QuobyteVolumeSource) - out.Registry = in.Registry - out.Volume = in.Volume - out.ReadOnly = in.ReadOnly - out.User = in.User - out.Group = in.Group + *out = *in return nil } } @@ -2869,26 +2628,17 @@ func DeepCopy_v1_RBDVolumeSource(in interface{}, out interface{}, c *conversion. { in := in.(*RBDVolumeSource) out := out.(*RBDVolumeSource) + *out = *in if in.CephMonitors != nil { in, out := &in.CephMonitors, &out.CephMonitors *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.CephMonitors = nil } - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef *out = new(LocalObjectReference) **out = **in - } else { - out.SecretRef = nil } - out.ReadOnly = in.ReadOnly return nil } } @@ -2897,17 +2647,16 @@ func DeepCopy_v1_RangeAllocation(in interface{}, out interface{}, c *conversion. { in := in.(*RangeAllocation) out := out.(*RangeAllocation) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } - out.Range = in.Range if in.Data != nil { in, out := &in.Data, &out.Data *out = make([]byte, len(*in)) copy(*out, *in) - } else { - out.Data = nil } return nil } @@ -2917,9 +2666,11 @@ func DeepCopy_v1_ReplicationController(in interface{}, out interface{}, c *conve { in := in.(*ReplicationController) out := out.(*ReplicationController) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } if err := DeepCopy_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -2935,11 +2686,8 @@ func DeepCopy_v1_ReplicationControllerCondition(in interface{}, out interface{}, { in := in.(*ReplicationControllerCondition) out := out.(*ReplicationControllerCondition) - out.Type = in.Type - out.Status = in.Status + *out = *in out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message return nil } } @@ -2948,8 +2696,7 @@ func DeepCopy_v1_ReplicationControllerList(in interface{}, out interface{}, c *c { in := in.(*ReplicationControllerList) out := out.(*ReplicationControllerList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicationController, len(*in)) @@ -2958,8 +2705,6 @@ func DeepCopy_v1_ReplicationControllerList(in interface{}, out interface{}, c *c return err } } - } else { - out.Items = nil } return nil } @@ -2969,22 +2714,18 @@ func DeepCopy_v1_ReplicationControllerSpec(in interface{}, out interface{}, c *c { in := in.(*ReplicationControllerSpec) out := out.(*ReplicationControllerSpec) + *out = *in if in.Replicas != nil { in, out := &in.Replicas, &out.Replicas *out = new(int32) **out = **in - } else { - out.Replicas = nil } - out.MinReadySeconds = in.MinReadySeconds if in.Selector != nil { in, out := &in.Selector, &out.Selector *out = make(map[string]string) for key, val := range *in { (*out)[key] = val } - } else { - out.Selector = nil } if in.Template != nil { in, out := &in.Template, &out.Template @@ -2992,8 +2733,6 @@ func DeepCopy_v1_ReplicationControllerSpec(in interface{}, out interface{}, c *c if err := DeepCopy_v1_PodTemplateSpec(*in, *out, c); err != nil { return err } - } else { - out.Template = nil } return nil } @@ -3003,11 +2742,7 @@ func DeepCopy_v1_ReplicationControllerStatus(in interface{}, out interface{}, c { in := in.(*ReplicationControllerStatus) out := out.(*ReplicationControllerStatus) - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.ObservedGeneration = in.ObservedGeneration + *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]ReplicationControllerCondition, len(*in)) @@ -3016,8 +2751,6 @@ func DeepCopy_v1_ReplicationControllerStatus(in interface{}, out interface{}, c return err } } - } else { - out.Conditions = nil } return nil } @@ -3027,8 +2760,7 @@ func DeepCopy_v1_ResourceFieldSelector(in interface{}, out interface{}, c *conve { in := in.(*ResourceFieldSelector) out := out.(*ResourceFieldSelector) - out.ContainerName = in.ContainerName - out.Resource = in.Resource + *out = *in out.Divisor = in.Divisor.DeepCopy() return nil } @@ -3038,9 +2770,11 @@ func DeepCopy_v1_ResourceQuota(in interface{}, out interface{}, c *conversion.Cl { in := in.(*ResourceQuota) out := out.(*ResourceQuota) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } if err := DeepCopy_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -3056,8 +2790,7 @@ func DeepCopy_v1_ResourceQuotaList(in interface{}, out interface{}, c *conversio { in := in.(*ResourceQuotaList) out := out.(*ResourceQuotaList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ResourceQuota, len(*in)) @@ -3066,8 +2799,6 @@ func DeepCopy_v1_ResourceQuotaList(in interface{}, out interface{}, c *conversio return err } } - } else { - out.Items = nil } return nil } @@ -3077,23 +2808,18 @@ func DeepCopy_v1_ResourceQuotaSpec(in interface{}, out interface{}, c *conversio { in := in.(*ResourceQuotaSpec) out := out.(*ResourceQuotaSpec) + *out = *in if in.Hard != nil { in, out := &in.Hard, &out.Hard *out = make(ResourceList) for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Hard = nil } if in.Scopes != nil { in, out := &in.Scopes, &out.Scopes *out = make([]ResourceQuotaScope, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Scopes = nil + copy(*out, *in) } return nil } @@ -3103,14 +2829,13 @@ func DeepCopy_v1_ResourceQuotaStatus(in interface{}, out interface{}, c *convers { in := in.(*ResourceQuotaStatus) out := out.(*ResourceQuotaStatus) + *out = *in if in.Hard != nil { in, out := &in.Hard, &out.Hard *out = make(ResourceList) for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Hard = nil } if in.Used != nil { in, out := &in.Used, &out.Used @@ -3118,8 +2843,6 @@ func DeepCopy_v1_ResourceQuotaStatus(in interface{}, out interface{}, c *convers for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Used = nil } return nil } @@ -3129,14 +2852,13 @@ func DeepCopy_v1_ResourceRequirements(in interface{}, out interface{}, c *conver { in := in.(*ResourceRequirements) out := out.(*ResourceRequirements) + *out = *in if in.Limits != nil { in, out := &in.Limits, &out.Limits *out = make(ResourceList) for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Limits = nil } if in.Requests != nil { in, out := &in.Requests, &out.Requests @@ -3144,8 +2866,6 @@ func DeepCopy_v1_ResourceRequirements(in interface{}, out interface{}, c *conver for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Requests = nil } return nil } @@ -3155,10 +2875,21 @@ func DeepCopy_v1_SELinuxOptions(in interface{}, out interface{}, c *conversion.C { in := in.(*SELinuxOptions) out := out.(*SELinuxOptions) - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level + *out = *in + return nil + } +} + +func DeepCopy_v1_ScaleIOVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleIOVolumeSource) + out := out.(*ScaleIOVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } return nil } } @@ -3167,9 +2898,11 @@ func DeepCopy_v1_Secret(in interface{}, out interface{}, c *conversion.Cloner) e { in := in.(*Secret) out := out.(*Secret) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } if in.Data != nil { in, out := &in.Data, &out.Data @@ -3181,8 +2914,6 @@ func DeepCopy_v1_Secret(in interface{}, out interface{}, c *conversion.Cloner) e (*out)[key] = *newVal.(*[]byte) } } - } else { - out.Data = nil } if in.StringData != nil { in, out := &in.StringData, &out.StringData @@ -3190,10 +2921,21 @@ func DeepCopy_v1_Secret(in interface{}, out interface{}, c *conversion.Cloner) e for key, val := range *in { (*out)[key] = val } - } else { - out.StringData = nil } - out.Type = in.Type + return nil + } +} + +func DeepCopy_v1_SecretEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretEnvSource) + out := out.(*SecretEnvSource) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } return nil } } @@ -3202,8 +2944,12 @@ func DeepCopy_v1_SecretKeySelector(in interface{}, out interface{}, c *conversio { in := in.(*SecretKeySelector) out := out.(*SecretKeySelector) - out.LocalObjectReference = in.LocalObjectReference - out.Key = in.Key + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } return nil } } @@ -3212,8 +2958,7 @@ func DeepCopy_v1_SecretList(in interface{}, out interface{}, c *conversion.Clone { in := in.(*SecretList) out := out.(*SecretList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Secret, len(*in)) @@ -3222,8 +2967,29 @@ func DeepCopy_v1_SecretList(in interface{}, out interface{}, c *conversion.Clone return err } } - } else { - out.Items = nil + } + return nil + } +} + +func DeepCopy_v1_SecretProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretProjection) + out := out.(*SecretProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in } return nil } @@ -3233,7 +2999,7 @@ func DeepCopy_v1_SecretVolumeSource(in interface{}, out interface{}, c *conversi { in := in.(*SecretVolumeSource) out := out.(*SecretVolumeSource) - out.SecretName = in.SecretName + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]KeyToPath, len(*in)) @@ -3242,15 +3008,16 @@ func DeepCopy_v1_SecretVolumeSource(in interface{}, out interface{}, c *conversi return err } } - } else { - out.Items = nil } if in.DefaultMode != nil { in, out := &in.DefaultMode, &out.DefaultMode *out = new(int32) **out = **in - } else { - out.DefaultMode = nil + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in } return nil } @@ -3260,49 +3027,38 @@ func DeepCopy_v1_SecurityContext(in interface{}, out interface{}, c *conversion. { in := in.(*SecurityContext) out := out.(*SecurityContext) + *out = *in if in.Capabilities != nil { in, out := &in.Capabilities, &out.Capabilities *out = new(Capabilities) if err := DeepCopy_v1_Capabilities(*in, *out, c); err != nil { return err } - } else { - out.Capabilities = nil } if in.Privileged != nil { in, out := &in.Privileged, &out.Privileged *out = new(bool) **out = **in - } else { - out.Privileged = nil } if in.SELinuxOptions != nil { in, out := &in.SELinuxOptions, &out.SELinuxOptions *out = new(SELinuxOptions) **out = **in - } else { - out.SELinuxOptions = nil } if in.RunAsUser != nil { in, out := &in.RunAsUser, &out.RunAsUser *out = new(int64) **out = **in - } else { - out.RunAsUser = nil } if in.RunAsNonRoot != nil { in, out := &in.RunAsNonRoot, &out.RunAsNonRoot *out = new(bool) **out = **in - } else { - out.RunAsNonRoot = nil } if in.ReadOnlyRootFilesystem != nil { in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem *out = new(bool) **out = **in - } else { - out.ReadOnlyRootFilesystem = nil } return nil } @@ -3312,8 +3068,7 @@ func DeepCopy_v1_SerializedReference(in interface{}, out interface{}, c *convers { in := in.(*SerializedReference) out := out.(*SerializedReference) - out.TypeMeta = in.TypeMeta - out.Reference = in.Reference + *out = *in return nil } } @@ -3322,9 +3077,11 @@ func DeepCopy_v1_Service(in interface{}, out interface{}, c *conversion.Cloner) { in := in.(*Service) out := out.(*Service) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } if err := DeepCopy_v1_ServiceSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -3340,27 +3097,26 @@ func DeepCopy_v1_ServiceAccount(in interface{}, out interface{}, c *conversion.C { in := in.(*ServiceAccount) out := out.(*ServiceAccount) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } if in.Secrets != nil { in, out := &in.Secrets, &out.Secrets *out = make([]ObjectReference, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Secrets = nil + copy(*out, *in) } if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets *out = make([]LocalObjectReference, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.ImagePullSecrets = nil + copy(*out, *in) + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in } return nil } @@ -3370,8 +3126,7 @@ func DeepCopy_v1_ServiceAccountList(in interface{}, out interface{}, c *conversi { in := in.(*ServiceAccountList) out := out.(*ServiceAccountList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ServiceAccount, len(*in)) @@ -3380,8 +3135,6 @@ func DeepCopy_v1_ServiceAccountList(in interface{}, out interface{}, c *conversi return err } } - } else { - out.Items = nil } return nil } @@ -3391,8 +3144,7 @@ func DeepCopy_v1_ServiceList(in interface{}, out interface{}, c *conversion.Clon { in := in.(*ServiceList) out := out.(*ServiceList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Service, len(*in)) @@ -3401,8 +3153,6 @@ func DeepCopy_v1_ServiceList(in interface{}, out interface{}, c *conversion.Clon return err } } - } else { - out.Items = nil } return nil } @@ -3412,11 +3162,7 @@ func DeepCopy_v1_ServicePort(in interface{}, out interface{}, c *conversion.Clon { in := in.(*ServicePort) out := out.(*ServicePort) - out.Name = in.Name - out.Protocol = in.Protocol - out.Port = in.Port - out.TargetPort = in.TargetPort - out.NodePort = in.NodePort + *out = *in return nil } } @@ -3425,8 +3171,7 @@ func DeepCopy_v1_ServiceProxyOptions(in interface{}, out interface{}, c *convers { in := in.(*ServiceProxyOptions) out := out.(*ServiceProxyOptions) - out.TypeMeta = in.TypeMeta - out.Path = in.Path + *out = *in return nil } } @@ -3435,14 +3180,11 @@ func DeepCopy_v1_ServiceSpec(in interface{}, out interface{}, c *conversion.Clon { in := in.(*ServiceSpec) out := out.(*ServiceSpec) + *out = *in if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]ServicePort, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Ports = nil + copy(*out, *in) } if in.Selector != nil { in, out := &in.Selector, &out.Selector @@ -3450,35 +3192,22 @@ func DeepCopy_v1_ServiceSpec(in interface{}, out interface{}, c *conversion.Clon for key, val := range *in { (*out)[key] = val } - } else { - out.Selector = nil } - out.ClusterIP = in.ClusterIP - out.Type = in.Type if in.ExternalIPs != nil { in, out := &in.ExternalIPs, &out.ExternalIPs *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.ExternalIPs = nil } if in.DeprecatedPublicIPs != nil { in, out := &in.DeprecatedPublicIPs, &out.DeprecatedPublicIPs *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.DeprecatedPublicIPs = nil } - out.SessionAffinity = in.SessionAffinity - out.LoadBalancerIP = in.LoadBalancerIP if in.LoadBalancerSourceRanges != nil { in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.LoadBalancerSourceRanges = nil } - out.ExternalName = in.ExternalName return nil } } @@ -3487,6 +3216,7 @@ func DeepCopy_v1_ServiceStatus(in interface{}, out interface{}, c *conversion.Cl { in := in.(*ServiceStatus) out := out.(*ServiceStatus) + *out = *in if err := DeepCopy_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { return err } @@ -3494,11 +3224,20 @@ func DeepCopy_v1_ServiceStatus(in interface{}, out interface{}, c *conversion.Cl } } +func DeepCopy_v1_Sysctl(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Sysctl) + out := out.(*Sysctl) + *out = *in + return nil + } +} + func DeepCopy_v1_TCPSocketAction(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*TCPSocketAction) out := out.(*TCPSocketAction) - out.Port = in.Port + *out = *in return nil } } @@ -3507,9 +3246,8 @@ func DeepCopy_v1_Taint(in interface{}, out interface{}, c *conversion.Cloner) er { in := in.(*Taint) out := out.(*Taint) - out.Key = in.Key - out.Value = in.Value - out.Effect = in.Effect + *out = *in + out.TimeAdded = in.TimeAdded.DeepCopy() return nil } } @@ -3518,10 +3256,12 @@ func DeepCopy_v1_Toleration(in interface{}, out interface{}, c *conversion.Clone { in := in.(*Toleration) out := out.(*Toleration) - out.Key = in.Key - out.Operator = in.Operator - out.Value = in.Value - out.Effect = in.Effect + *out = *in + if in.TolerationSeconds != nil { + in, out := &in.TolerationSeconds, &out.TolerationSeconds + *out = new(int64) + **out = **in + } return nil } } @@ -3530,7 +3270,7 @@ func DeepCopy_v1_Volume(in interface{}, out interface{}, c *conversion.Cloner) e { in := in.(*Volume) out := out.(*Volume) - out.Name = in.Name + *out = *in if err := DeepCopy_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, c); err != nil { return err } @@ -3542,10 +3282,37 @@ func DeepCopy_v1_VolumeMount(in interface{}, out interface{}, c *conversion.Clon { in := in.(*VolumeMount) out := out.(*VolumeMount) - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - out.SubPath = in.SubPath + *out = *in + return nil + } +} + +func DeepCopy_v1_VolumeProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeProjection) + out := out.(*VolumeProjection) + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretProjection) + if err := DeepCopy_v1_SecretProjection(*in, *out, c); err != nil { + return err + } + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIProjection) + if err := DeepCopy_v1_DownwardAPIProjection(*in, *out, c); err != nil { + return err + } + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapProjection) + if err := DeepCopy_v1_ConfigMapProjection(*in, *out, c); err != nil { + return err + } + } return nil } } @@ -3554,40 +3321,31 @@ func DeepCopy_v1_VolumeSource(in interface{}, out interface{}, c *conversion.Clo { in := in.(*VolumeSource) out := out.(*VolumeSource) + *out = *in if in.HostPath != nil { in, out := &in.HostPath, &out.HostPath *out = new(HostPathVolumeSource) **out = **in - } else { - out.HostPath = nil } if in.EmptyDir != nil { in, out := &in.EmptyDir, &out.EmptyDir *out = new(EmptyDirVolumeSource) **out = **in - } else { - out.EmptyDir = nil } if in.GCEPersistentDisk != nil { in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk *out = new(GCEPersistentDiskVolumeSource) **out = **in - } else { - out.GCEPersistentDisk = nil } if in.AWSElasticBlockStore != nil { in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore *out = new(AWSElasticBlockStoreVolumeSource) **out = **in - } else { - out.AWSElasticBlockStore = nil } if in.GitRepo != nil { in, out := &in.GitRepo, &out.GitRepo *out = new(GitRepoVolumeSource) **out = **in - } else { - out.GitRepo = nil } if in.Secret != nil { in, out := &in.Secret, &out.Secret @@ -3595,36 +3353,28 @@ func DeepCopy_v1_VolumeSource(in interface{}, out interface{}, c *conversion.Clo if err := DeepCopy_v1_SecretVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.Secret = nil } if in.NFS != nil { in, out := &in.NFS, &out.NFS *out = new(NFSVolumeSource) **out = **in - } else { - out.NFS = nil } if in.ISCSI != nil { in, out := &in.ISCSI, &out.ISCSI *out = new(ISCSIVolumeSource) - **out = **in - } else { - out.ISCSI = nil + if err := DeepCopy_v1_ISCSIVolumeSource(*in, *out, c); err != nil { + return err + } } if in.Glusterfs != nil { in, out := &in.Glusterfs, &out.Glusterfs *out = new(GlusterfsVolumeSource) **out = **in - } else { - out.Glusterfs = nil } if in.PersistentVolumeClaim != nil { in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim *out = new(PersistentVolumeClaimVolumeSource) **out = **in - } else { - out.PersistentVolumeClaim = nil } if in.RBD != nil { in, out := &in.RBD, &out.RBD @@ -3632,8 +3382,6 @@ func DeepCopy_v1_VolumeSource(in interface{}, out interface{}, c *conversion.Clo if err := DeepCopy_v1_RBDVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.RBD = nil } if in.FlexVolume != nil { in, out := &in.FlexVolume, &out.FlexVolume @@ -3641,15 +3389,11 @@ func DeepCopy_v1_VolumeSource(in interface{}, out interface{}, c *conversion.Clo if err := DeepCopy_v1_FlexVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.FlexVolume = nil } if in.Cinder != nil { in, out := &in.Cinder, &out.Cinder *out = new(CinderVolumeSource) **out = **in - } else { - out.Cinder = nil } if in.CephFS != nil { in, out := &in.CephFS, &out.CephFS @@ -3657,15 +3401,11 @@ func DeepCopy_v1_VolumeSource(in interface{}, out interface{}, c *conversion.Clo if err := DeepCopy_v1_CephFSVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.CephFS = nil } if in.Flocker != nil { in, out := &in.Flocker, &out.Flocker *out = new(FlockerVolumeSource) **out = **in - } else { - out.Flocker = nil } if in.DownwardAPI != nil { in, out := &in.DownwardAPI, &out.DownwardAPI @@ -3673,8 +3413,6 @@ func DeepCopy_v1_VolumeSource(in interface{}, out interface{}, c *conversion.Clo if err := DeepCopy_v1_DownwardAPIVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.DownwardAPI = nil } if in.FC != nil { in, out := &in.FC, &out.FC @@ -3682,15 +3420,11 @@ func DeepCopy_v1_VolumeSource(in interface{}, out interface{}, c *conversion.Clo if err := DeepCopy_v1_FCVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.FC = nil } if in.AzureFile != nil { in, out := &in.AzureFile, &out.AzureFile *out = new(AzureFileVolumeSource) **out = **in - } else { - out.AzureFile = nil } if in.ConfigMap != nil { in, out := &in.ConfigMap, &out.ConfigMap @@ -3698,22 +3432,16 @@ func DeepCopy_v1_VolumeSource(in interface{}, out interface{}, c *conversion.Clo if err := DeepCopy_v1_ConfigMapVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.ConfigMap = nil } if in.VsphereVolume != nil { in, out := &in.VsphereVolume, &out.VsphereVolume *out = new(VsphereVirtualDiskVolumeSource) **out = **in - } else { - out.VsphereVolume = nil } if in.Quobyte != nil { in, out := &in.Quobyte, &out.Quobyte *out = new(QuobyteVolumeSource) **out = **in - } else { - out.Quobyte = nil } if in.AzureDisk != nil { in, out := &in.AzureDisk, &out.AzureDisk @@ -3721,15 +3449,30 @@ func DeepCopy_v1_VolumeSource(in interface{}, out interface{}, c *conversion.Clo if err := DeepCopy_v1_AzureDiskVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.AzureDisk = nil } if in.PhotonPersistentDisk != nil { in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk *out = new(PhotonPersistentDiskVolumeSource) **out = **in - } else { - out.PhotonPersistentDisk = nil + } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(ProjectedVolumeSource) + if err := DeepCopy_v1_ProjectedVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + if err := DeepCopy_v1_ScaleIOVolumeSource(*in, *out, c); err != nil { + return err + } } return nil } @@ -3739,8 +3482,7 @@ func DeepCopy_v1_VsphereVirtualDiskVolumeSource(in interface{}, out interface{}, { in := in.(*VsphereVirtualDiskVolumeSource) out := out.(*VsphereVirtualDiskVolumeSource) - out.VolumePath = in.VolumePath - out.FSType = in.FSType + *out = *in return nil } } @@ -3749,7 +3491,7 @@ func DeepCopy_v1_WeightedPodAffinityTerm(in interface{}, out interface{}, c *con { in := in.(*WeightedPodAffinityTerm) out := out.(*WeightedPodAffinityTerm) - out.Weight = in.Weight + *out = *in if err := DeepCopy_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.defaults.go index c9f34d1999..121b39185c 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ limitations under the License. package v1 import ( - runtime "k8s.io/kubernetes/pkg/runtime" + runtime "k8s.io/apimachinery/pkg/runtime" ) // RegisterDefaults adds defaulters functions to the given scheme. @@ -137,6 +137,9 @@ func SetObjectDefaults_PersistentVolume(in *PersistentVolume) { if in.Spec.PersistentVolumeSource.AzureDisk != nil { SetDefaults_AzureDiskVolumeSource(in.Spec.PersistentVolumeSource.AzureDisk) } + if in.Spec.PersistentVolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(in.Spec.PersistentVolumeSource.ScaleIO) + } } func SetObjectDefaults_PersistentVolumeClaim(in *PersistentVolumeClaim) { @@ -190,6 +193,23 @@ func SetObjectDefaults_Pod(in *Pod) { if a.VolumeSource.AzureDisk != nil { SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) } + if a.VolumeSource.Projected != nil { + SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } } for i := range in.Spec.InitContainers { a := &in.Spec.InitContainers[i] @@ -321,6 +341,23 @@ func SetObjectDefaults_PodTemplate(in *PodTemplate) { if a.VolumeSource.AzureDisk != nil { SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) } + if a.VolumeSource.Projected != nil { + SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } } for i := range in.Template.Spec.InitContainers { a := &in.Template.Spec.InitContainers[i] @@ -446,6 +483,23 @@ func SetObjectDefaults_ReplicationController(in *ReplicationController) { if a.VolumeSource.AzureDisk != nil { SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) } + if a.VolumeSource.Projected != nil { + SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } } for i := range in.Spec.Template.Spec.InitContainers { a := &in.Spec.Template.Spec.InitContainers[i] diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/api/validation/BUILD deleted file mode 100644 index 63169c20d2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/BUILD +++ /dev/null @@ -1,87 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "events.go", - "schema.go", - "validation.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/endpoints:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/pod:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/api/service:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/unversioned/validation:go_default_library", - "//pkg/api/util:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/capabilities:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/security/apparmor:go_default_library", - "//pkg/util/config:go_default_library", - "//pkg/util/errors:go_default_library", - "//pkg/util/intstr:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/util/validation:go_default_library", - "//pkg/util/validation/field:go_default_library", - "//pkg/util/yaml:go_default_library", - "//vendor:github.com/emicklei/go-restful/swagger", - "//vendor:github.com/exponent-io/jsonpath", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "events_test.go", - "schema_test.go", - "validation_test.go", - ], - data = [ - "testdata/v1/invalidPod.yaml", - "testdata/v1/invalidPod1.json", - "testdata/v1/invalidPod2.json", - "testdata/v1/invalidPod3.json", - "testdata/v1/invalidPod4.yaml", - "testdata/v1/validPod.yaml", - "//api/swagger-spec", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/api/service:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/api/testing:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/capabilities:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/security/apparmor:go_default_library", - "//pkg/util/intstr:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/util/validation/field:go_default_library", - "//pkg/util/yaml:go_default_library", - "//vendor:github.com/ghodss/yaml", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go b/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go deleted file mode 100644 index f17a15cf9c..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package validation has functions for validating the correctness of api -// objects and explaining what is wrong with them when they aren't valid. -package validation diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/events.go b/vendor/k8s.io/kubernetes/pkg/api/validation/events.go deleted file mode 100644 index 589fe919f2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/events.go +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - apiutil "k8s.io/kubernetes/pkg/api/util" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/util/validation" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -// ValidateEvent makes sure that the event makes sense. -func ValidateEvent(event *api.Event) field.ErrorList { - allErrs := field.ErrorList{} - - // Make sure event.Namespace and the involvedObject.Namespace agree - if len(event.InvolvedObject.Namespace) == 0 { - // event.Namespace must also be empty (or "default", for compatibility with old clients) - if event.Namespace != api.NamespaceNone && event.Namespace != api.NamespaceDefault { - allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) - } - } else { - // event namespace must match - if event.Namespace != event.InvolvedObject.Namespace { - allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) - } - } - - // For kinds we recognize, make sure involvedObject.Namespace is set for namespaced kinds - if namespaced, err := isNamespacedKind(event.InvolvedObject.Kind, event.InvolvedObject.APIVersion); err == nil { - if namespaced && len(event.InvolvedObject.Namespace) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("involvedObject", "namespace"), fmt.Sprintf("required for kind %s", event.InvolvedObject.Kind))) - } - if !namespaced && len(event.InvolvedObject.Namespace) > 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, fmt.Sprintf("not allowed for kind %s", event.InvolvedObject.Kind))) - } - } - - for _, msg := range validation.IsDNS1123Subdomain(event.Namespace) { - allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, msg)) - } - return allErrs -} - -// Check whether the kind in groupVersion is scoped at the root of the api hierarchy -func isNamespacedKind(kind, groupVersion string) (bool, error) { - group := apiutil.GetGroup(groupVersion) - g, err := registered.Group(group) - if err != nil { - return false, err - } - restMapping, err := g.RESTMapper.RESTMapping(unversioned.GroupKind{Group: group, Kind: kind}, apiutil.GetVersion(groupVersion)) - if err != nil { - return false, err - } - scopeName := restMapping.Scope.Name() - if scopeName == meta.RESTScopeNameNamespace { - return true, nil - } - return false, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/path/BUILD b/vendor/k8s.io/kubernetes/pkg/api/validation/path/BUILD deleted file mode 100644 index aa242f2245..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/path/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["name.go"], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = ["name_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/path/name.go b/vendor/k8s.io/kubernetes/pkg/api/validation/path/name.go deleted file mode 100644 index 981d9bb465..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/path/name.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package path - -import ( - "fmt" - "strings" -) - -// NameMayNotBe specifies strings that cannot be used as names specified as path segments (like the REST API or etcd store) -var NameMayNotBe = []string{".", ".."} - -// NameMayNotContain specifies substrings that cannot be used in names specified as path segments (like the REST API or etcd store) -var NameMayNotContain = []string{"/", "%"} - -// IsValidPathSegmentName validates the name can be safely encoded as a path segment -func IsValidPathSegmentName(name string) []string { - for _, illegalName := range NameMayNotBe { - if name == illegalName { - return []string{fmt.Sprintf(`may not be '%s'`, illegalName)} - } - } - - for _, illegalContent := range NameMayNotContain { - if strings.Contains(name, illegalContent) { - return []string{fmt.Sprintf(`may not contain '%s'`, illegalContent)} - } - } - - return nil -} - -// IsValidPathSegmentPrefix validates the name can be used as a prefix for a name which will be encoded as a path segment -// It does not check for exact matches with disallowed names, since an arbitrary suffix might make the name valid -func IsValidPathSegmentPrefix(name string) []string { - for _, illegalContent := range NameMayNotContain { - if strings.Contains(name, illegalContent) { - return []string{fmt.Sprintf(`may not contain '%s'`, illegalContent)} - } - } - - return nil -} - -// ValidatePathSegmentName validates the name can be safely encoded as a path segment -func ValidatePathSegmentName(name string, prefix bool) []string { - if prefix { - return IsValidPathSegmentPrefix(name) - } else { - return IsValidPathSegmentName(name) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/schema.go b/vendor/k8s.io/kubernetes/pkg/api/validation/schema.go deleted file mode 100644 index f694baeaeb..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/schema.go +++ /dev/null @@ -1,435 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "regexp" - "strings" - - "github.com/emicklei/go-restful/swagger" - ejson "github.com/exponent-io/jsonpath" - "github.com/golang/glog" - apiutil "k8s.io/kubernetes/pkg/api/util" - "k8s.io/kubernetes/pkg/runtime" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/yaml" -) - -type InvalidTypeError struct { - ExpectedKind reflect.Kind - ObservedKind reflect.Kind - FieldName string -} - -func (i *InvalidTypeError) Error() string { - return fmt.Sprintf("expected type %s, for field %s, got %s", i.ExpectedKind.String(), i.FieldName, i.ObservedKind.String()) -} - -func NewInvalidTypeError(expected reflect.Kind, observed reflect.Kind, fieldName string) error { - return &InvalidTypeError{expected, observed, fieldName} -} - -// TypeNotFoundError is returned when specified type -// can not found in schema -type TypeNotFoundError string - -func (tnfe TypeNotFoundError) Error() string { - return fmt.Sprintf("couldn't find type: %s", string(tnfe)) -} - -// Schema is an interface that knows how to validate an API object serialized to a byte array. -type Schema interface { - ValidateBytes(data []byte) error -} - -type NullSchema struct{} - -func (NullSchema) ValidateBytes(data []byte) error { return nil } - -type NoDoubleKeySchema struct{} - -func (NoDoubleKeySchema) ValidateBytes(data []byte) error { - var list []error = nil - if err := validateNoDuplicateKeys(data, "metadata", "labels"); err != nil { - list = append(list, err) - } - if err := validateNoDuplicateKeys(data, "metadata", "annotations"); err != nil { - list = append(list, err) - } - return utilerrors.NewAggregate(list) -} - -func validateNoDuplicateKeys(data []byte, path ...string) error { - r := ejson.NewDecoder(bytes.NewReader(data)) - // This is Go being unfriendly. The 'path ...string' comes in as a - // []string, and SeekTo takes ...interface{}, so we can't just pass - // the path straight in, we have to copy it. *sigh* - ifacePath := []interface{}{} - for ix := range path { - ifacePath = append(ifacePath, path[ix]) - } - found, err := r.SeekTo(ifacePath...) - if err != nil { - return err - } - if !found { - return nil - } - seen := map[string]bool{} - for { - tok, err := r.Token() - if err != nil { - return err - } - switch t := tok.(type) { - case json.Delim: - if t.String() == "}" { - return nil - } - case ejson.KeyString: - if seen[string(t)] { - return fmt.Errorf("duplicate key: %s", string(t)) - } else { - seen[string(t)] = true - } - } - } -} - -type ConjunctiveSchema []Schema - -func (c ConjunctiveSchema) ValidateBytes(data []byte) error { - var list []error = nil - schemas := []Schema(c) - for ix := range schemas { - if err := schemas[ix].ValidateBytes(data); err != nil { - list = append(list, err) - } - } - return utilerrors.NewAggregate(list) -} - -type SwaggerSchema struct { - api swagger.ApiDeclaration - delegate Schema // For delegating to other api groups -} - -func NewSwaggerSchemaFromBytes(data []byte, factory Schema) (Schema, error) { - schema := &SwaggerSchema{} - err := json.Unmarshal(data, &schema.api) - if err != nil { - return nil, err - } - schema.delegate = factory - return schema, nil -} - -// validateList unpacks a list and validate every item in the list. -// It return nil if every item is ok. -// Otherwise it return an error list contain errors of every item. -func (s *SwaggerSchema) validateList(obj map[string]interface{}) []error { - items, exists := obj["items"] - if !exists { - return []error{fmt.Errorf("no items field in %#v", obj)} - } - return s.validateItems(items) -} - -func (s *SwaggerSchema) validateItems(items interface{}) []error { - allErrs := []error{} - itemList, ok := items.([]interface{}) - if !ok { - return append(allErrs, fmt.Errorf("items isn't a slice")) - } - for i, item := range itemList { - fields, ok := item.(map[string]interface{}) - if !ok { - allErrs = append(allErrs, fmt.Errorf("items[%d] isn't a map[string]interface{}", i)) - continue - } - groupVersion := fields["apiVersion"] - if groupVersion == nil { - allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion not set", i)) - continue - } - itemVersion, ok := groupVersion.(string) - if !ok { - allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion isn't string type", i)) - continue - } - if len(itemVersion) == 0 { - allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion is empty", i)) - } - kind := fields["kind"] - if kind == nil { - allErrs = append(allErrs, fmt.Errorf("items[%d].kind not set", i)) - continue - } - itemKind, ok := kind.(string) - if !ok { - allErrs = append(allErrs, fmt.Errorf("items[%d].kind isn't string type", i)) - continue - } - if len(itemKind) == 0 { - allErrs = append(allErrs, fmt.Errorf("items[%d].kind is empty", i)) - } - version := apiutil.GetVersion(itemVersion) - errs := s.ValidateObject(item, "", version+"."+itemKind) - if len(errs) >= 1 { - allErrs = append(allErrs, errs...) - } - } - - return allErrs -} - -func (s *SwaggerSchema) ValidateBytes(data []byte) error { - var obj interface{} - out, err := yaml.ToJSON(data) - if err != nil { - return err - } - data = out - if err := json.Unmarshal(data, &obj); err != nil { - return err - } - fields, ok := obj.(map[string]interface{}) - if !ok { - return fmt.Errorf("error in unmarshaling data %s", string(data)) - } - groupVersion := fields["apiVersion"] - if groupVersion == nil { - return fmt.Errorf("apiVersion not set") - } - if _, ok := groupVersion.(string); !ok { - return fmt.Errorf("apiVersion isn't string type") - } - kind := fields["kind"] - if kind == nil { - return fmt.Errorf("kind not set") - } - if _, ok := kind.(string); !ok { - return fmt.Errorf("kind isn't string type") - } - if strings.HasSuffix(kind.(string), "List") { - return utilerrors.NewAggregate(s.validateList(fields)) - } - version := apiutil.GetVersion(groupVersion.(string)) - allErrs := s.ValidateObject(obj, "", version+"."+kind.(string)) - if len(allErrs) == 1 { - return allErrs[0] - } - return utilerrors.NewAggregate(allErrs) -} - -func (s *SwaggerSchema) ValidateObject(obj interface{}, fieldName, typeName string) []error { - allErrs := []error{} - models := s.api.Models - model, ok := models.At(typeName) - - // Verify the api version matches. This is required for nested types with differing api versions because - // s.api only has schema for 1 api version (the parent object type's version). - // e.g. an extensions/v1beta1 Template embedding a /v1 Service requires the schema for the extensions/v1beta1 - // api to delegate to the schema for the /v1 api. - // Only do this for !ok objects so that cross ApiVersion vendored types take precedence. - if !ok && s.delegate != nil { - fields, mapOk := obj.(map[string]interface{}) - if !mapOk { - return append(allErrs, fmt.Errorf("field %s: expected object of type map[string]interface{}, but the actual type is %T", fieldName, obj)) - } - if delegated, err := s.delegateIfDifferentApiVersion(runtime.Unstructured{Object: fields}); delegated { - if err != nil { - allErrs = append(allErrs, err) - } - return allErrs - } - } - - if !ok { - return append(allErrs, TypeNotFoundError(typeName)) - } - properties := model.Properties - if len(properties.List) == 0 { - // The object does not have any sub-fields. - return nil - } - fields, ok := obj.(map[string]interface{}) - if !ok { - return append(allErrs, fmt.Errorf("field %s: expected object of type map[string]interface{}, but the actual type is %T", fieldName, obj)) - } - if len(fieldName) > 0 { - fieldName = fieldName + "." - } - // handle required fields - for _, requiredKey := range model.Required { - if _, ok := fields[requiredKey]; !ok { - allErrs = append(allErrs, fmt.Errorf("field %s: is required", requiredKey)) - } - } - for key, value := range fields { - details, ok := properties.At(key) - - // Special case for runtime.RawExtension and runtime.Objects because they always fail to validate - // This is because the actual values will be of some sub-type (e.g. Deployment) not the expected - // super-type (RawExtension) - if s.isGenericArray(details) { - errs := s.validateItems(value) - if len(errs) > 0 { - allErrs = append(allErrs, errs...) - } - continue - } - if !ok { - allErrs = append(allErrs, fmt.Errorf("found invalid field %s for %s", key, typeName)) - continue - } - if details.Type == nil && details.Ref == nil { - allErrs = append(allErrs, fmt.Errorf("could not find the type of %s from object: %v", key, details)) - } - var fieldType string - if details.Type != nil { - fieldType = *details.Type - } else { - fieldType = *details.Ref - } - if value == nil { - glog.V(2).Infof("Skipping nil field: %s", key) - continue - } - errs := s.validateField(value, fieldName+key, fieldType, &details) - if len(errs) > 0 { - allErrs = append(allErrs, errs...) - } - } - return allErrs -} - -// delegateIfDifferentApiVersion delegates the validation of an object if its ApiGroup does not match the -// current SwaggerSchema. -// First return value is true if the validation was delegated (by a different ApiGroup SwaggerSchema) -// Second return value is the result of the delegated validation if performed. -func (s *SwaggerSchema) delegateIfDifferentApiVersion(obj runtime.Unstructured) (bool, error) { - // Never delegate objects in the same ApiVersion or we will get infinite recursion - if !s.isDifferentApiVersion(obj) { - return false, nil - } - - // Convert the object back into bytes so that we can pass it to the ValidateBytes function - m, err := json.Marshal(obj.Object) - if err != nil { - return true, err - } - - // Delegate validation of this object to the correct SwaggerSchema for its ApiGroup - return true, s.delegate.ValidateBytes(m) -} - -// isDifferentApiVersion Returns true if obj lives in a different ApiVersion than the SwaggerSchema does. -// The SwaggerSchema will not be able to process objects in different ApiVersions unless they are vendored. -func (s *SwaggerSchema) isDifferentApiVersion(obj runtime.Unstructured) bool { - groupVersion := obj.GetAPIVersion() - return len(groupVersion) > 0 && s.api.ApiVersion != groupVersion -} - -// isGenericArray Returns true if p is an array of generic Objects - either RawExtension or Object. -func (s *SwaggerSchema) isGenericArray(p swagger.ModelProperty) bool { - return p.DataTypeFields.Type != nil && - *p.DataTypeFields.Type == "array" && - p.Items != nil && - p.Items.Ref != nil && - (*p.Items.Ref == "runtime.RawExtension" || *p.Items.Ref == "runtime.Object") -} - -// This matches type name in the swagger spec, such as "v1.Binding". -var versionRegexp = regexp.MustCompile(`^(v.+|unversioned)\..*`) - -func (s *SwaggerSchema) validateField(value interface{}, fieldName, fieldType string, fieldDetails *swagger.ModelProperty) []error { - allErrs := []error{} - if reflect.TypeOf(value) == nil { - return append(allErrs, fmt.Errorf("unexpected nil value for field %v", fieldName)) - } - // TODO: caesarxuchao: because we have multiple group/versions and objects - // may reference objects in other group, the commented out way of checking - // if a filedType is a type defined by us is outdated. We use a hacky way - // for now. - // TODO: the type name in the swagger spec is something like "v1.Binding", - // and the "v1" is generated from the package name, not the groupVersion of - // the type. We need to fix go-restful to embed the group name in the type - // name, otherwise we couldn't handle identically named types in different - // groups correctly. - if versionRegexp.MatchString(fieldType) { - // if strings.HasPrefix(fieldType, apiVersion) { - return s.ValidateObject(value, fieldName, fieldType) - } - switch fieldType { - case "string": - // Be loose about what we accept for 'string' since we use IntOrString in a couple of places - _, isString := value.(string) - _, isNumber := value.(float64) - _, isInteger := value.(int) - if !isString && !isNumber && !isInteger { - return append(allErrs, NewInvalidTypeError(reflect.String, reflect.TypeOf(value).Kind(), fieldName)) - } - case "array": - arr, ok := value.([]interface{}) - if !ok { - return append(allErrs, NewInvalidTypeError(reflect.Array, reflect.TypeOf(value).Kind(), fieldName)) - } - var arrType string - if fieldDetails.Items.Ref == nil && fieldDetails.Items.Type == nil { - return append(allErrs, NewInvalidTypeError(reflect.Array, reflect.TypeOf(value).Kind(), fieldName)) - } - if fieldDetails.Items.Ref != nil { - arrType = *fieldDetails.Items.Ref - } else { - arrType = *fieldDetails.Items.Type - } - for ix := range arr { - errs := s.validateField(arr[ix], fmt.Sprintf("%s[%d]", fieldName, ix), arrType, nil) - if len(errs) > 0 { - allErrs = append(allErrs, errs...) - } - } - case "uint64": - case "int64": - case "integer": - _, isNumber := value.(float64) - _, isInteger := value.(int) - if !isNumber && !isInteger { - return append(allErrs, NewInvalidTypeError(reflect.Int, reflect.TypeOf(value).Kind(), fieldName)) - } - case "float64": - if _, ok := value.(float64); !ok { - return append(allErrs, NewInvalidTypeError(reflect.Float64, reflect.TypeOf(value).Kind(), fieldName)) - } - case "boolean": - if _, ok := value.(bool); !ok { - return append(allErrs, NewInvalidTypeError(reflect.Bool, reflect.TypeOf(value).Kind(), fieldName)) - } - // API servers before release 1.3 produce swagger spec with `type: "any"` as the fallback type, while newer servers produce spec with `type: "object"`. - // We have both here so that kubectl can work with both old and new api servers. - case "object": - case "any": - default: - return append(allErrs, fmt.Errorf("unexpected type: %v", fieldType)) - } - return allErrs -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go deleted file mode 100644 index 1c24a0d8c7..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go +++ /dev/null @@ -1,3737 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "encoding/json" - "fmt" - "net" - "os" - "path" - "reflect" - "regexp" - "strings" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/endpoints" - utilpod "k8s.io/kubernetes/pkg/api/pod" - "k8s.io/kubernetes/pkg/api/resource" - apiservice "k8s.io/kubernetes/pkg/api/service" - "k8s.io/kubernetes/pkg/api/unversioned" - unversionedvalidation "k8s.io/kubernetes/pkg/api/unversioned/validation" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/capabilities" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/security/apparmor" - utilconfig "k8s.io/kubernetes/pkg/util/config" - "k8s.io/kubernetes/pkg/util/intstr" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/util/validation" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -// TODO: delete this global variable when we enable the validation of common -// fields by default. -var RepairMalformedUpdates bool = true - -const isNegativeErrorMsg string = `must be greater than or equal to 0` -const isInvalidQuotaResource string = `must be a standard resource for quota` -const fieldImmutableErrorMsg string = `field is immutable` -const isNotIntegerErrorMsg string = `must be an integer` - -var pdPartitionErrorMsg string = validation.InclusiveRangeError(1, 255) -var volumeModeErrorMsg string = "must be a number between 0 and 0777 (octal), both inclusive" - -const totalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB - -// BannedOwners is a black list of object that are not allowed to be owners. -var BannedOwners = map[unversioned.GroupVersionKind]struct{}{ - v1.SchemeGroupVersion.WithKind("Event"): {}, -} - -// ValidateHasLabel requires that api.ObjectMeta has a Label with key and expectedValue -func ValidateHasLabel(meta api.ObjectMeta, fldPath *field.Path, key, expectedValue string) field.ErrorList { - allErrs := field.ErrorList{} - actualValue, found := meta.Labels[key] - if !found { - allErrs = append(allErrs, field.Required(fldPath.Child("labels").Key(key), - fmt.Sprintf("must be '%s'", expectedValue))) - return allErrs - } - if actualValue != expectedValue { - allErrs = append(allErrs, field.Invalid(fldPath.Child("labels").Key(key), meta.Labels, - fmt.Sprintf("must be '%s'", expectedValue))) - } - return allErrs -} - -// ValidateAnnotations validates that a set of annotations are correctly defined. -func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - var totalSize int64 - for k, v := range annotations { - for _, msg := range validation.IsQualifiedName(strings.ToLower(k)) { - allErrs = append(allErrs, field.Invalid(fldPath, k, msg)) - } - totalSize += (int64)(len(k)) + (int64)(len(v)) - } - if totalSize > (int64)(totalAnnotationSizeLimitB) { - allErrs = append(allErrs, field.TooLong(fldPath, "", totalAnnotationSizeLimitB)) - } - return allErrs -} - -func ValidateDNS1123Label(value string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for _, msg := range validation.IsDNS1123Label(value) { - allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) - } - return allErrs -} - -// ValidateDNS1123Subdomain validates that a name is a proper DNS subdomain. -func ValidateDNS1123Subdomain(value string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for _, msg := range validation.IsDNS1123Subdomain(value) { - allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) - } - return allErrs -} - -func ValidatePodSpecificAnnotations(annotations map[string]string, spec *api.PodSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if annotations[api.AffinityAnnotationKey] != "" { - allErrs = append(allErrs, ValidateAffinityInPodAnnotations(annotations, fldPath)...) - } - - if annotations[api.TolerationsAnnotationKey] != "" { - allErrs = append(allErrs, ValidateTolerationsInPodAnnotations(annotations, fldPath)...) - } - - // TODO: remove these after we EOL the annotations. - if hostname, exists := annotations[utilpod.PodHostnameAnnotation]; exists { - allErrs = append(allErrs, ValidateDNS1123Label(hostname, fldPath.Key(utilpod.PodHostnameAnnotation))...) - } - if subdomain, exists := annotations[utilpod.PodSubdomainAnnotation]; exists { - allErrs = append(allErrs, ValidateDNS1123Label(subdomain, fldPath.Key(utilpod.PodSubdomainAnnotation))...) - } - - allErrs = append(allErrs, ValidateSeccompPodAnnotations(annotations, fldPath)...) - allErrs = append(allErrs, ValidateAppArmorPodAnnotations(annotations, spec, fldPath)...) - - sysctls, err := api.SysctlsFromPodAnnotation(annotations[api.SysctlsPodAnnotationKey]) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Key(api.SysctlsPodAnnotationKey), annotations[api.SysctlsPodAnnotationKey], err.Error())) - } else { - allErrs = append(allErrs, validateSysctls(sysctls, fldPath.Key(api.SysctlsPodAnnotationKey))...) - } - unsafeSysctls, err := api.SysctlsFromPodAnnotation(annotations[api.UnsafeSysctlsPodAnnotationKey]) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Key(api.UnsafeSysctlsPodAnnotationKey), annotations[api.UnsafeSysctlsPodAnnotationKey], err.Error())) - } else { - allErrs = append(allErrs, validateSysctls(unsafeSysctls, fldPath.Key(api.UnsafeSysctlsPodAnnotationKey))...) - } - inBoth := sysctlIntersection(sysctls, unsafeSysctls) - if len(inBoth) > 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Key(api.UnsafeSysctlsPodAnnotationKey), strings.Join(inBoth, ", "), "can not be safe and unsafe")) - } - - return allErrs -} - -func ValidatePodSpecificAnnotationUpdates(newPod, oldPod *api.Pod, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - newAnnotations := newPod.Annotations - oldAnnotations := oldPod.Annotations - for k, oldVal := range oldAnnotations { - if newAnnotations[k] == oldVal { - continue // No change. - } - if strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { - allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not update AppArmor annotations")) - } - } - // Check for removals. - for k := range newAnnotations { - if _, ok := oldAnnotations[k]; ok { - continue // No change. - } - if strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { - allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not remove AppArmor annotations")) - } - } - allErrs = append(allErrs, ValidatePodSpecificAnnotations(newAnnotations, &newPod.Spec, fldPath)...) - return allErrs -} - -func ValidateEndpointsSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - // TODO: remove this after we EOL the annotation. - hostnamesMap, exists := annotations[endpoints.PodHostnamesAnnotation] - if exists && !isValidHostnamesMap(hostnamesMap) { - allErrs = append(allErrs, field.Invalid(fldPath, endpoints.PodHostnamesAnnotation, - `must be a valid json representation of map[string(IP)][HostRecord] e.g. "{"10.245.1.6":{"HostName":"my-webserver"}}"`)) - } - - return allErrs -} - -func validateOwnerReference(ownerReference api.OwnerReference, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - gvk := unversioned.FromAPIVersionAndKind(ownerReference.APIVersion, ownerReference.Kind) - // gvk.Group is empty for the legacy group. - if len(gvk.Version) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("apiVersion"), ownerReference.APIVersion, "version must not be empty")) - } - if len(gvk.Kind) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), ownerReference.Kind, "kind must not be empty")) - } - if len(ownerReference.Name) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), ownerReference.Name, "name must not be empty")) - } - if len(ownerReference.UID) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), ownerReference.UID, "uid must not be empty")) - } - if _, ok := BannedOwners[gvk]; ok { - allErrs = append(allErrs, field.Invalid(fldPath, ownerReference, fmt.Sprintf("%s is disallowed from being an owner", gvk))) - } - return allErrs -} - -func ValidateOwnerReferences(ownerReferences []api.OwnerReference, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - controllerName := "" - for _, ref := range ownerReferences { - allErrs = append(allErrs, validateOwnerReference(ref, fldPath)...) - if ref.Controller != nil && *ref.Controller { - if controllerName != "" { - allErrs = append(allErrs, field.Invalid(fldPath, ownerReferences, - fmt.Sprintf("Only one reference can have Controller set to true. Found \"true\" in references for %v and %v", controllerName, ref.Name))) - } else { - controllerName = ref.Name - } - } - } - return allErrs -} - -// ValidateNameFunc validates that the provided name is valid for a given resource type. -// Not all resources have the same validation rules for names. Prefix is true -// if the name will have a value appended to it. If the name is not valid, -// this returns a list of descriptions of individual characteristics of the -// value that were not valid. Otherwise this returns an empty list or nil. -type ValidateNameFunc func(name string, prefix bool) []string - -// maskTrailingDash replaces the final character of a string with a subdomain safe -// value if is a dash. -func maskTrailingDash(name string) string { - if strings.HasSuffix(name, "-") { - return name[:len(name)-2] + "a" - } - return name -} - -// ValidatePodName can be used to check whether the given pod name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidatePodName = NameIsDNSSubdomain - -// ValidateReplicationControllerName can be used to check whether the given replication -// controller name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateReplicationControllerName = NameIsDNSSubdomain - -// ValidateServiceName can be used to check whether the given service name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateServiceName = NameIsDNS1035Label - -// ValidateNodeName can be used to check whether the given node name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateNodeName = NameIsDNSSubdomain - -// ValidateNamespaceName can be used to check whether the given namespace name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateNamespaceName = NameIsDNSLabel - -// ValidateLimitRangeName can be used to check whether the given limit range name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateLimitRangeName = NameIsDNSSubdomain - -// ValidateResourceQuotaName can be used to check whether the given -// resource quota name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateResourceQuotaName = NameIsDNSSubdomain - -// ValidateSecretName can be used to check whether the given secret name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateSecretName = NameIsDNSSubdomain - -// ValidateServiceAccountName can be used to check whether the given service account name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateServiceAccountName = NameIsDNSSubdomain - -// ValidateEndpointsName can be used to check whether the given endpoints name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateEndpointsName = NameIsDNSSubdomain - -// ValidateClusterName can be used to check whether the given cluster name is valid. -var ValidateClusterName = NameIsDNS1035Label - -// NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain. -func NameIsDNSSubdomain(name string, prefix bool) []string { - if prefix { - name = maskTrailingDash(name) - } - return validation.IsDNS1123Subdomain(name) -} - -// NameIsDNSLabel is a ValidateNameFunc for names that must be a DNS 1123 label. -func NameIsDNSLabel(name string, prefix bool) []string { - if prefix { - name = maskTrailingDash(name) - } - return validation.IsDNS1123Label(name) -} - -// NameIsDNS1035Label is a ValidateNameFunc for names that must be a DNS 952 label. -func NameIsDNS1035Label(name string, prefix bool) []string { - if prefix { - name = maskTrailingDash(name) - } - return validation.IsDNS1035Label(name) -} - -// Validates that given value is not negative. -func ValidateNonnegativeField(value int64, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if value < 0 { - allErrs = append(allErrs, field.Invalid(fldPath, value, isNegativeErrorMsg)) - } - return allErrs -} - -// Validates that a Quantity is not negative -func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if value.Cmp(resource.Quantity{}) < 0 { - allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNegativeErrorMsg)) - } - return allErrs -} - -func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if !api.Semantic.DeepEqual(oldVal, newVal) { - allErrs = append(allErrs, field.Invalid(fldPath, newVal, fieldImmutableErrorMsg)) - } - return allErrs -} - -// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already -// been performed. -// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. -// TODO: Remove calls to this method scattered in validations of specific resources, e.g., ValidatePodUpdate. -func ValidateObjectMeta(meta *api.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if len(meta.GenerateName) != 0 { - for _, msg := range nameFn(meta.GenerateName, true) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("generateName"), meta.GenerateName, msg)) - } - } - // If the generated name validates, but the calculated value does not, it's a problem with generation, and we - // report it here. This may confuse users, but indicates a programming bug and still must be validated. - // If there are multiple fields out of which one is required then add an or as a separator - if len(meta.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name or generateName is required")) - } else { - for _, msg := range nameFn(meta.Name, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), meta.Name, msg)) - } - } - if requiresNamespace { - if len(meta.Namespace) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), "")) - } else { - for _, msg := range ValidateNamespaceName(meta.Namespace, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), meta.Namespace, msg)) - } - } - } else { - if len(meta.Namespace) != 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("namespace"), "not allowed on this type")) - } - } - if len(meta.ClusterName) != 0 { - for _, msg := range ValidateClusterName(meta.ClusterName, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("clusterName"), meta.ClusterName, msg)) - } - } - allErrs = append(allErrs, ValidateNonnegativeField(meta.Generation, fldPath.Child("generation"))...) - allErrs = append(allErrs, unversionedvalidation.ValidateLabels(meta.Labels, fldPath.Child("labels"))...) - allErrs = append(allErrs, ValidateAnnotations(meta.Annotations, fldPath.Child("annotations"))...) - allErrs = append(allErrs, ValidateOwnerReferences(meta.OwnerReferences, fldPath.Child("ownerReferences"))...) - for _, finalizer := range meta.Finalizers { - allErrs = append(allErrs, validateFinalizerName(finalizer, fldPath.Child("finalizers"))...) - } - return allErrs -} - -// ValidateObjectMetaUpdate validates an object's metadata when updated -func ValidateObjectMetaUpdate(newMeta, oldMeta *api.ObjectMeta, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if !RepairMalformedUpdates && newMeta.UID != oldMeta.UID { - allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), newMeta.UID, "field is immutable")) - } - // in the event it is left empty, set it, to allow clients more flexibility - // TODO: remove the following code that repairs the update request when we retire the clients that modify the immutable fields. - // Please do not copy this pattern elsewhere; validation functions should not be modifying the objects they are passed! - if RepairMalformedUpdates { - if len(newMeta.UID) == 0 { - newMeta.UID = oldMeta.UID - } - // ignore changes to timestamp - if oldMeta.CreationTimestamp.IsZero() { - oldMeta.CreationTimestamp = newMeta.CreationTimestamp - } else { - newMeta.CreationTimestamp = oldMeta.CreationTimestamp - } - // an object can never remove a deletion timestamp or clear/change grace period seconds - if !oldMeta.DeletionTimestamp.IsZero() { - newMeta.DeletionTimestamp = oldMeta.DeletionTimestamp - } - if oldMeta.DeletionGracePeriodSeconds != nil && newMeta.DeletionGracePeriodSeconds == nil { - newMeta.DeletionGracePeriodSeconds = oldMeta.DeletionGracePeriodSeconds - } - } - - // TODO: needs to check if newMeta==nil && oldMeta !=nil after the repair logic is removed. - if newMeta.DeletionGracePeriodSeconds != nil && (oldMeta.DeletionGracePeriodSeconds == nil || *newMeta.DeletionGracePeriodSeconds != *oldMeta.DeletionGracePeriodSeconds) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("deletionGracePeriodSeconds"), newMeta.DeletionGracePeriodSeconds, "field is immutable; may only be changed via deletion")) - } - if newMeta.DeletionTimestamp != nil && (oldMeta.DeletionTimestamp == nil || !newMeta.DeletionTimestamp.Equal(*oldMeta.DeletionTimestamp)) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("deletionTimestamp"), newMeta.DeletionTimestamp, "field is immutable; may only be changed via deletion")) - } - - // Finalizers cannot be added if the object is already being deleted. - if oldMeta.DeletionTimestamp != nil { - allErrs = append(allErrs, ValidateNoNewFinalizers(newMeta.Finalizers, oldMeta.Finalizers, fldPath.Child("finalizers"))...) - } - - // Reject updates that don't specify a resource version - if len(newMeta.ResourceVersion) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceVersion"), newMeta.ResourceVersion, "must be specified for an update")) - } - - // Generation shouldn't be decremented - if newMeta.Generation < oldMeta.Generation { - allErrs = append(allErrs, field.Invalid(fldPath.Child("generation"), newMeta.Generation, "must not be decremented")) - } - - allErrs = append(allErrs, ValidateImmutableField(newMeta.Name, oldMeta.Name, fldPath.Child("name"))...) - allErrs = append(allErrs, ValidateImmutableField(newMeta.Namespace, oldMeta.Namespace, fldPath.Child("namespace"))...) - allErrs = append(allErrs, ValidateImmutableField(newMeta.UID, oldMeta.UID, fldPath.Child("uid"))...) - allErrs = append(allErrs, ValidateImmutableField(newMeta.CreationTimestamp, oldMeta.CreationTimestamp, fldPath.Child("creationTimestamp"))...) - allErrs = append(allErrs, ValidateImmutableField(newMeta.ClusterName, oldMeta.ClusterName, fldPath.Child("clusterName"))...) - - allErrs = append(allErrs, unversionedvalidation.ValidateLabels(newMeta.Labels, fldPath.Child("labels"))...) - allErrs = append(allErrs, ValidateAnnotations(newMeta.Annotations, fldPath.Child("annotations"))...) - allErrs = append(allErrs, ValidateOwnerReferences(newMeta.OwnerReferences, fldPath.Child("ownerReferences"))...) - - return allErrs -} - -func ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList { - const newFinalizersErrorMsg string = `no new finalizers can be added if the object is being deleted` - allErrs := field.ErrorList{} - extra := sets.NewString(newFinalizers...).Difference(sets.NewString(oldFinalizers...)) - if len(extra) != 0 { - allErrs = append(allErrs, field.Forbidden(fldPath, fmt.Sprintf("no new finalizers can be added if the object is being deleted, found new finalizers %#v", extra.List()))) - } - return allErrs -} - -func validateVolumes(volumes []api.Volume, fldPath *field.Path) (sets.String, field.ErrorList) { - allErrs := field.ErrorList{} - - allNames := sets.String{} - for i, vol := range volumes { - idxPath := fldPath.Index(i) - namePath := idxPath.Child("name") - el := validateVolumeSource(&vol.VolumeSource, idxPath) - if len(vol.Name) == 0 { - el = append(el, field.Required(namePath, "")) - } else { - el = append(el, ValidateDNS1123Label(vol.Name, namePath)...) - } - if allNames.Has(vol.Name) { - el = append(el, field.Duplicate(namePath, vol.Name)) - } - if len(el) == 0 { - allNames.Insert(vol.Name) - } else { - allErrs = append(allErrs, el...) - } - - } - return allNames, allErrs -} - -func validateVolumeSource(source *api.VolumeSource, fldPath *field.Path) field.ErrorList { - numVolumes := 0 - allErrs := field.ErrorList{} - if source.EmptyDir != nil { - numVolumes++ - // EmptyDirs have nothing to validate - } - if source.HostPath != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostPath"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateHostPathVolumeSource(source.HostPath, fldPath.Child("hostPath"))...) - } - } - if source.GitRepo != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("gitRepo"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateGitRepoVolumeSource(source.GitRepo, fldPath.Child("gitRepo"))...) - } - } - if source.GCEPersistentDisk != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(source.GCEPersistentDisk, fldPath.Child("persistentDisk"))...) - } - } - if source.AWSElasticBlockStore != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(source.AWSElasticBlockStore, fldPath.Child("awsElasticBlockStore"))...) - } - } - if source.Secret != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("secret"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateSecretVolumeSource(source.Secret, fldPath.Child("secret"))...) - } - } - if source.NFS != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("nfs"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateNFSVolumeSource(source.NFS, fldPath.Child("nfs"))...) - } - } - if source.ISCSI != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("iscsi"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateISCSIVolumeSource(source.ISCSI, fldPath.Child("iscsi"))...) - } - } - if source.Glusterfs != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("glusterfs"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateGlusterfs(source.Glusterfs, fldPath.Child("glusterfs"))...) - } - } - if source.Flocker != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("flocker"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateFlockerVolumeSource(source.Flocker, fldPath.Child("flocker"))...) - } - } - if source.PersistentVolumeClaim != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("persistentVolumeClaim"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validatePersistentClaimVolumeSource(source.PersistentVolumeClaim, fldPath.Child("persistentVolumeClaim"))...) - } - } - if source.RBD != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("rbd"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateRBDVolumeSource(source.RBD, fldPath.Child("rbd"))...) - } - } - if source.Cinder != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("cinder"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateCinderVolumeSource(source.Cinder, fldPath.Child("cinder"))...) - } - } - if source.CephFS != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("cephFS"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateCephFSVolumeSource(source.CephFS, fldPath.Child("cephfs"))...) - } - } - if source.Quobyte != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("quobyte"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateQuobyteVolumeSource(source.Quobyte, fldPath.Child("quobyte"))...) - } - } - if source.DownwardAPI != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("downwarAPI"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateDownwardAPIVolumeSource(source.DownwardAPI, fldPath.Child("downwardAPI"))...) - } - } - if source.FC != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("fc"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateFCVolumeSource(source.FC, fldPath.Child("fc"))...) - } - } - if source.FlexVolume != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("flexVolume"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateFlexVolumeSource(source.FlexVolume, fldPath.Child("flexVolume"))...) - } - } - if source.ConfigMap != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("configMap"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateConfigMapVolumeSource(source.ConfigMap, fldPath.Child("configMap"))...) - } - } - if source.AzureFile != nil { - numVolumes++ - allErrs = append(allErrs, validateAzureFile(source.AzureFile, fldPath.Child("azureFile"))...) - } - if source.VsphereVolume != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("vsphereVolume"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateVsphereVolumeSource(source.VsphereVolume, fldPath.Child("vsphereVolume"))...) - } - } - if source.PhotonPersistentDisk != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("photonPersistentDisk"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validatePhotonPersistentDiskVolumeSource(source.PhotonPersistentDisk, fldPath.Child("photonPersistentDisk"))...) - } - } - if source.AzureDisk != nil { - numVolumes++ - allErrs = append(allErrs, validateAzureDisk(source.AzureDisk, fldPath.Child("azureDisk"))...) - } - - if numVolumes == 0 { - allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type")) - } - - return allErrs -} - -func validateHostPathVolumeSource(hostPath *api.HostPathVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(hostPath.Path) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) - } - return allErrs -} - -func validateGitRepoVolumeSource(gitRepo *api.GitRepoVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(gitRepo.Repository) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("repository"), "")) - } - - pathErrs := validateLocalDescendingPath(gitRepo.Directory, fldPath.Child("directory")) - allErrs = append(allErrs, pathErrs...) - return allErrs -} - -func validateISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(iscsi.TargetPortal) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("targetPortal"), "")) - } - if len(iscsi.IQN) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("iqn"), "")) - } - if iscsi.Lun < 0 || iscsi.Lun > 255 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), iscsi.Lun, validation.InclusiveRangeError(0, 255))) - } - return allErrs -} - -func validateFCVolumeSource(fc *api.FCVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(fc.TargetWWNs) < 1 { - allErrs = append(allErrs, field.Required(fldPath.Child("targetWWNs"), "")) - } - - if fc.Lun == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("lun"), "")) - } else { - if *fc.Lun < 0 || *fc.Lun > 255 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), fc.Lun, validation.InclusiveRangeError(0, 255))) - } - } - return allErrs -} - -func validateGCEPersistentDiskVolumeSource(pd *api.GCEPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(pd.PDName) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("pdName"), "")) - } - if pd.Partition < 0 || pd.Partition > 255 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), pd.Partition, pdPartitionErrorMsg)) - } - return allErrs -} - -func validateAWSElasticBlockStoreVolumeSource(PD *api.AWSElasticBlockStoreVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(PD.VolumeID) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) - } - if PD.Partition < 0 || PD.Partition > 255 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), PD.Partition, pdPartitionErrorMsg)) - } - return allErrs -} - -func validateSecretVolumeSource(secretSource *api.SecretVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(secretSource.SecretName) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) - } - - secretMode := secretSource.DefaultMode - if secretMode != nil && (*secretMode > 0777 || *secretMode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *secretMode, volumeModeErrorMsg)) - } - - itemsPath := fldPath.Child("items") - for i, kp := range secretSource.Items { - itemPath := itemsPath.Index(i) - allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...) - } - return allErrs -} - -func validateConfigMapVolumeSource(configMapSource *api.ConfigMapVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(configMapSource.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } - - configMapMode := configMapSource.DefaultMode - if configMapMode != nil && (*configMapMode > 0777 || *configMapMode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *configMapMode, volumeModeErrorMsg)) - } - - itemsPath := fldPath.Child("items") - for i, kp := range configMapSource.Items { - itemPath := itemsPath.Index(i) - allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...) - } - return allErrs -} - -func validateKeyToPath(kp *api.KeyToPath, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(kp.Key) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("key"), "")) - } - if len(kp.Path) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) - } - allErrs = append(allErrs, validateLocalNonReservedPath(kp.Path, fldPath.Child("path"))...) - if kp.Mode != nil && (*kp.Mode > 0777 || *kp.Mode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *kp.Mode, volumeModeErrorMsg)) - } - - return allErrs -} - -func validatePersistentClaimVolumeSource(claim *api.PersistentVolumeClaimVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(claim.ClaimName) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("claimName"), "")) - } - return allErrs -} - -func validateNFSVolumeSource(nfs *api.NFSVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(nfs.Server) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("server"), "")) - } - if len(nfs.Path) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) - } - if !path.IsAbs(nfs.Path) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("path"), nfs.Path, "must be an absolute path")) - } - return allErrs -} - -func validateQuobyteVolumeSource(quobyte *api.QuobyteVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(quobyte.Registry) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("registry"), "must be a host:port pair or multiple pairs separated by commas")) - } else { - for _, hostPortPair := range strings.Split(quobyte.Registry, ",") { - if _, _, err := net.SplitHostPort(hostPortPair); err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("registry"), quobyte.Registry, "must be a host:port pair or multiple pairs separated by commas")) - } - } - } - - if len(quobyte.Volume) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("volume"), "")) - } - return allErrs -} - -func validateGlusterfs(glusterfs *api.GlusterfsVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(glusterfs.EndpointsName) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("endpoints"), "")) - } - if len(glusterfs.Path) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) - } - return allErrs -} - -func validateFlockerVolumeSource(flocker *api.FlockerVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(flocker.DatasetName) == 0 && len(flocker.DatasetUUID) == 0 { - //TODO: consider adding a RequiredOneOf() error for this and similar cases - allErrs = append(allErrs, field.Required(fldPath, "one of datasetName and datasetUUID is required")) - } - if len(flocker.DatasetName) != 0 && len(flocker.DatasetUUID) != 0 { - allErrs = append(allErrs, field.Invalid(fldPath, "resource", "datasetName and datasetUUID can not be specified simultaneously")) - } - if strings.Contains(flocker.DatasetName, "/") { - allErrs = append(allErrs, field.Invalid(fldPath.Child("datasetName"), flocker.DatasetName, "must not contain '/'")) - } - return allErrs -} - -var validDownwardAPIFieldPathExpressions = sets.NewString( - "metadata.name", - "metadata.namespace", - "metadata.labels", - "metadata.annotations") - -func validateDownwardAPIVolumeSource(downwardAPIVolume *api.DownwardAPIVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - downwardAPIMode := downwardAPIVolume.DefaultMode - if downwardAPIMode != nil && (*downwardAPIMode > 0777 || *downwardAPIMode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *downwardAPIMode, volumeModeErrorMsg)) - } - - for _, file := range downwardAPIVolume.Items { - if len(file.Path) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) - } - allErrs = append(allErrs, validateLocalNonReservedPath(file.Path, fldPath.Child("path"))...) - if file.FieldRef != nil { - allErrs = append(allErrs, validateObjectFieldSelector(file.FieldRef, &validDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...) - if file.ResourceFieldRef != nil { - allErrs = append(allErrs, field.Invalid(fldPath, "resource", "fieldRef and resourceFieldRef can not be specified simultaneously")) - } - } else if file.ResourceFieldRef != nil { - allErrs = append(allErrs, validateContainerResourceFieldSelector(file.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), true)...) - } else { - allErrs = append(allErrs, field.Required(fldPath, "one of fieldRef and resourceFieldRef is required")) - } - if file.Mode != nil && (*file.Mode > 0777 || *file.Mode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *file.Mode, volumeModeErrorMsg)) - } - } - return allErrs -} - -// This validate will make sure targetPath: -// 1. is not abs path -// 2. does not have any element which is ".." -func validateLocalDescendingPath(targetPath string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if path.IsAbs(targetPath) { - allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must be a relative path")) - } - - // TODO: this assumes the OS of apiserver & nodes are the same - parts := strings.Split(targetPath, string(os.PathSeparator)) - for _, item := range parts { - if item == ".." { - allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not contain '..'")) - break // even for `../../..`, one error is sufficient to make the point - } - } - return allErrs -} - -// This validate will make sure targetPath: -// 1. is not abs path -// 2. does not contain any '..' elements -// 3. does not start with '..' -func validateLocalNonReservedPath(targetPath string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, validateLocalDescendingPath(targetPath, fldPath)...) - // Don't report this error if the check for .. elements already caught it. - if strings.HasPrefix(targetPath, "..") && !strings.HasPrefix(targetPath, "../") { - allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not start with '..'")) - } - return allErrs -} - -func validateRBDVolumeSource(rbd *api.RBDVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(rbd.CephMonitors) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) - } - if len(rbd.RBDImage) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("image"), "")) - } - return allErrs -} - -func validateCinderVolumeSource(cd *api.CinderVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(cd.VolumeID) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) - } - return allErrs -} - -func validateCephFSVolumeSource(cephfs *api.CephFSVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(cephfs.Monitors) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) - } - return allErrs -} - -func validateFlexVolumeSource(fv *api.FlexVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(fv.Driver) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("driver"), "")) - } - return allErrs -} - -func validateAzureFile(azure *api.AzureFileVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if azure.SecretName == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) - } - if azure.ShareName == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("shareName"), "")) - } - return allErrs -} - -var supportedCachingModes = sets.NewString(string(api.AzureDataDiskCachingNone), string(api.AzureDataDiskCachingReadOnly), string(api.AzureDataDiskCachingReadWrite)) - -func validateAzureDisk(azure *api.AzureDiskVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if azure.DiskName == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("diskName"), "")) - } - if azure.DataDiskURI == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("diskURI"), "")) - } - if azure.CachingMode != nil && !supportedCachingModes.Has(string(*azure.CachingMode)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("cachingMode"), *azure.CachingMode, supportedCachingModes.List())) - } - return allErrs -} - -func validateVsphereVolumeSource(cd *api.VsphereVirtualDiskVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(cd.VolumePath) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("volumePath"), "")) - } - return allErrs -} - -func validatePhotonPersistentDiskVolumeSource(cd *api.PhotonPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(cd.PdID) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("pdID"), "")) - } - return allErrs -} - -// ValidatePersistentVolumeName checks that a name is appropriate for a -// PersistentVolumeName object. -var ValidatePersistentVolumeName = NameIsDNSSubdomain - -var supportedAccessModes = sets.NewString(string(api.ReadWriteOnce), string(api.ReadOnlyMany), string(api.ReadWriteMany)) - -var supportedReclaimPolicy = sets.NewString(string(api.PersistentVolumeReclaimDelete), string(api.PersistentVolumeReclaimRecycle), string(api.PersistentVolumeReclaimRetain)) - -func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList { - allErrs := ValidateObjectMeta(&pv.ObjectMeta, false, ValidatePersistentVolumeName, field.NewPath("metadata")) - - specPath := field.NewPath("spec") - if len(pv.Spec.AccessModes) == 0 { - allErrs = append(allErrs, field.Required(specPath.Child("accessModes"), "")) - } - for _, mode := range pv.Spec.AccessModes { - if !supportedAccessModes.Has(string(mode)) { - allErrs = append(allErrs, field.NotSupported(specPath.Child("accessModes"), mode, supportedAccessModes.List())) - } - } - - if len(pv.Spec.Capacity) == 0 { - allErrs = append(allErrs, field.Required(specPath.Child("capacity"), "")) - } - - if _, ok := pv.Spec.Capacity[api.ResourceStorage]; !ok || len(pv.Spec.Capacity) > 1 { - allErrs = append(allErrs, field.NotSupported(specPath.Child("capacity"), pv.Spec.Capacity, []string{string(api.ResourceStorage)})) - } - capPath := specPath.Child("capacity") - for r, qty := range pv.Spec.Capacity { - allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...) - } - if len(string(pv.Spec.PersistentVolumeReclaimPolicy)) > 0 { - if !supportedReclaimPolicy.Has(string(pv.Spec.PersistentVolumeReclaimPolicy)) { - allErrs = append(allErrs, field.NotSupported(specPath.Child("persistentVolumeReclaimPolicy"), pv.Spec.PersistentVolumeReclaimPolicy, supportedReclaimPolicy.List())) - } - } - - numVolumes := 0 - if pv.Spec.HostPath != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("hostPath"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateHostPathVolumeSource(pv.Spec.HostPath, specPath.Child("hostPath"))...) - } - } - if pv.Spec.GCEPersistentDisk != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(pv.Spec.GCEPersistentDisk, specPath.Child("persistentDisk"))...) - } - } - if pv.Spec.AWSElasticBlockStore != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(pv.Spec.AWSElasticBlockStore, specPath.Child("awsElasticBlockStore"))...) - } - } - if pv.Spec.Glusterfs != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("glusterfs"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateGlusterfs(pv.Spec.Glusterfs, specPath.Child("glusterfs"))...) - } - } - if pv.Spec.Flocker != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("flocker"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateFlockerVolumeSource(pv.Spec.Flocker, specPath.Child("flocker"))...) - } - } - if pv.Spec.NFS != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("nfs"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateNFSVolumeSource(pv.Spec.NFS, specPath.Child("nfs"))...) - } - } - if pv.Spec.RBD != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("rbd"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateRBDVolumeSource(pv.Spec.RBD, specPath.Child("rbd"))...) - } - } - if pv.Spec.Quobyte != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("quobyte"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateQuobyteVolumeSource(pv.Spec.Quobyte, specPath.Child("quobyte"))...) - } - } - if pv.Spec.CephFS != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("cephFS"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateCephFSVolumeSource(pv.Spec.CephFS, specPath.Child("cephfs"))...) - } - } - if pv.Spec.ISCSI != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("iscsi"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateISCSIVolumeSource(pv.Spec.ISCSI, specPath.Child("iscsi"))...) - } - } - if pv.Spec.Cinder != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("cinder"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateCinderVolumeSource(pv.Spec.Cinder, specPath.Child("cinder"))...) - } - } - if pv.Spec.FC != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("fc"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateFCVolumeSource(pv.Spec.FC, specPath.Child("fc"))...) - } - } - if pv.Spec.FlexVolume != nil { - numVolumes++ - allErrs = append(allErrs, validateFlexVolumeSource(pv.Spec.FlexVolume, specPath.Child("flexVolume"))...) - } - if pv.Spec.AzureFile != nil { - numVolumes++ - allErrs = append(allErrs, validateAzureFile(pv.Spec.AzureFile, specPath.Child("azureFile"))...) - } - if pv.Spec.VsphereVolume != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("vsphereVolume"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateVsphereVolumeSource(pv.Spec.VsphereVolume, specPath.Child("vsphereVolume"))...) - } - } - if pv.Spec.PhotonPersistentDisk != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("photonPersistentDisk"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validatePhotonPersistentDiskVolumeSource(pv.Spec.PhotonPersistentDisk, specPath.Child("photonPersistentDisk"))...) - } - } - if pv.Spec.AzureDisk != nil { - numVolumes++ - allErrs = append(allErrs, validateAzureDisk(pv.Spec.AzureDisk, specPath.Child("azureDisk"))...) - } - - if numVolumes == 0 { - allErrs = append(allErrs, field.Required(specPath, "must specify a volume type")) - } - - // do not allow hostPath mounts of '/' to have a 'recycle' reclaim policy - if pv.Spec.HostPath != nil && path.Clean(pv.Spec.HostPath.Path) == "/" && pv.Spec.PersistentVolumeReclaimPolicy == api.PersistentVolumeReclaimRecycle { - allErrs = append(allErrs, field.Forbidden(specPath.Child("persistentVolumeReclaimPolicy"), "may not be 'recycle' for a hostPath mount of '/'")) - } - - return allErrs -} - -// ValidatePersistentVolumeUpdate tests to see if the update is legal for an end user to make. -// newPv is updated with fields that cannot be changed. -func ValidatePersistentVolumeUpdate(newPv, oldPv *api.PersistentVolume) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = ValidatePersistentVolume(newPv) - newPv.Status = oldPv.Status - return allErrs -} - -// ValidatePersistentVolumeStatusUpdate tests to see if the status update is legal for an end user to make. -// newPv is updated with fields that cannot be changed. -func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *api.PersistentVolume) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newPv.ObjectMeta, &oldPv.ObjectMeta, field.NewPath("metadata")) - if len(newPv.ResourceVersion) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) - } - newPv.Spec = oldPv.Spec - return allErrs -} - -// ValidatePersistentVolumeClaim validates a PersistentVolumeClaim -func ValidatePersistentVolumeClaim(pvc *api.PersistentVolumeClaim) field.ErrorList { - allErrs := ValidateObjectMeta(&pvc.ObjectMeta, true, ValidatePersistentVolumeName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidatePersistentVolumeClaimSpec(&pvc.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidatePersistentVolumeClaimSpec validates a PersistentVolumeClaimSpec -func ValidatePersistentVolumeClaimSpec(spec *api.PersistentVolumeClaimSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(spec.AccessModes) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("accessModes"), "at least 1 access mode is required")) - } - if spec.Selector != nil { - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) - } - for _, mode := range spec.AccessModes { - if mode != api.ReadWriteOnce && mode != api.ReadOnlyMany && mode != api.ReadWriteMany { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, supportedAccessModes.List())) - } - } - storageValue, ok := spec.Resources.Requests[api.ResourceStorage] - if !ok { - allErrs = append(allErrs, field.Required(fldPath.Child("resources").Key(string(api.ResourceStorage)), "")) - } else { - allErrs = append(allErrs, ValidateResourceQuantityValue(string(api.ResourceStorage), storageValue, fldPath.Child("resources").Key(string(api.ResourceStorage)))...) - } - return allErrs -} - -// ValidatePersistentVolumeClaimUpdate validates an update to a PeristentVolumeClaim -func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *api.PersistentVolumeClaim) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidatePersistentVolumeClaim(newPvc)...) - // PVController needs to update PVC.Spec w/ VolumeName. - // Claims are immutable in order to enforce quota, range limits, etc. without gaming the system. - if len(oldPvc.Spec.VolumeName) == 0 { - // volumeName changes are allowed once. - // Reset back to empty string after equality check - oldPvc.Spec.VolumeName = newPvc.Spec.VolumeName - defer func() { oldPvc.Spec.VolumeName = "" }() - } - // changes to Spec are not allowed, but updates to label/annotations are OK. - // no-op updates pass validation. - if !api.Semantic.DeepEqual(newPvc.Spec, oldPvc.Spec) { - allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "field is immutable after creation")) - } - newPvc.Status = oldPvc.Status - return allErrs -} - -// ValidatePersistentVolumeClaimStatusUpdate validates an update to status of a PeristentVolumeClaim -func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *api.PersistentVolumeClaim) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata")) - if len(newPvc.ResourceVersion) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) - } - if len(newPvc.Spec.AccessModes) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("Spec", "accessModes"), "")) - } - capPath := field.NewPath("status", "capacity") - for r, qty := range newPvc.Status.Capacity { - allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...) - } - newPvc.Spec = oldPvc.Spec - return allErrs -} - -var supportedPortProtocols = sets.NewString(string(api.ProtocolTCP), string(api.ProtocolUDP)) - -func validateContainerPorts(ports []api.ContainerPort, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - allNames := sets.String{} - for i, port := range ports { - idxPath := fldPath.Index(i) - if len(port.Name) > 0 { - if msgs := validation.IsValidPortName(port.Name); len(msgs) != 0 { - for i = range msgs { - allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), port.Name, msgs[i])) - } - } else if allNames.Has(port.Name) { - allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), port.Name)) - } else { - allNames.Insert(port.Name) - } - } - if port.ContainerPort == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("containerPort"), "")) - } else { - for _, msg := range validation.IsValidPortNum(int(port.ContainerPort)) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, msg)) - } - } - if port.HostPort != 0 { - for _, msg := range validation.IsValidPortNum(int(port.HostPort)) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("hostPort"), port.HostPort, msg)) - } - } - if len(port.Protocol) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("protocol"), "")) - } else if !supportedPortProtocols.Has(string(port.Protocol)) { - allErrs = append(allErrs, field.NotSupported(idxPath.Child("protocol"), port.Protocol, supportedPortProtocols.List())) - } - } - return allErrs -} - -func validateEnv(vars []api.EnvVar, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - for i, ev := range vars { - idxPath := fldPath.Index(i) - if len(ev.Name) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) - } else { - for _, msg := range validation.IsCIdentifier(ev.Name) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), ev.Name, msg)) - } - } - allErrs = append(allErrs, validateEnvVarValueFrom(ev, idxPath.Child("valueFrom"))...) - } - return allErrs -} - -var validFieldPathExpressionsEnv = sets.NewString("metadata.name", "metadata.namespace", "spec.nodeName", "spec.serviceAccountName", "status.podIP") -var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "requests.cpu", "requests.memory") - -func validateEnvVarValueFrom(ev api.EnvVar, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if ev.ValueFrom == nil { - return allErrs - } - - numSources := 0 - - if ev.ValueFrom.FieldRef != nil { - numSources++ - allErrs = append(allErrs, validateObjectFieldSelector(ev.ValueFrom.FieldRef, &validFieldPathExpressionsEnv, fldPath.Child("fieldRef"))...) - } - if ev.ValueFrom.ResourceFieldRef != nil { - numSources++ - allErrs = append(allErrs, validateContainerResourceFieldSelector(ev.ValueFrom.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), false)...) - } - if ev.ValueFrom.ConfigMapKeyRef != nil { - numSources++ - allErrs = append(allErrs, validateConfigMapKeySelector(ev.ValueFrom.ConfigMapKeyRef, fldPath.Child("configMapKeyRef"))...) - } - if ev.ValueFrom.SecretKeyRef != nil { - numSources++ - allErrs = append(allErrs, validateSecretKeySelector(ev.ValueFrom.SecretKeyRef, fldPath.Child("secretKeyRef"))...) - } - - if len(ev.Value) != 0 { - if numSources != 0 { - allErrs = append(allErrs, field.Invalid(fldPath, "", "may not be specified when `value` is not empty")) - } - } else if numSources != 1 { - allErrs = append(allErrs, field.Invalid(fldPath, "", "may not have more than one field specified at a time")) - } - - return allErrs -} - -func validateObjectFieldSelector(fs *api.ObjectFieldSelector, expressions *sets.String, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if len(fs.APIVersion) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("apiVersion"), "")) - } else if len(fs.FieldPath) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("fieldPath"), "")) - } else { - internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "") - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldPath"), fs.FieldPath, fmt.Sprintf("error converting fieldPath: %v", err))) - } else if !expressions.Has(internalFieldPath) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("fieldPath"), internalFieldPath, expressions.List())) - } - } - - return allErrs -} - -func validateContainerResourceFieldSelector(fs *api.ResourceFieldSelector, expressions *sets.String, fldPath *field.Path, volume bool) field.ErrorList { - allErrs := field.ErrorList{} - - if volume && len(fs.ContainerName) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("containerName"), "")) - } else if len(fs.Resource) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("resource"), "")) - } else if !expressions.Has(fs.Resource) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("resource"), fs.Resource, expressions.List())) - } - allErrs = append(allErrs, validateContainerResourceDivisor(fs.Resource, fs.Divisor, fldPath)...) - return allErrs -} - -var validContainerResourceDivisorForCPU = sets.NewString("1m", "1") -var validContainerResourceDivisorForMemory = sets.NewString("1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei") - -func validateContainerResourceDivisor(rName string, divisor resource.Quantity, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - unsetDivisor := resource.Quantity{} - if unsetDivisor.Cmp(divisor) == 0 { - return allErrs - } - switch rName { - case "limits.cpu", "requests.cpu": - if !validContainerResourceDivisorForCPU.Has(divisor.String()) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1m and 1 are supported with the cpu resource")) - } - case "limits.memory", "requests.memory": - if !validContainerResourceDivisorForMemory.Has(divisor.String()) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the memory resource")) - } - } - return allErrs -} - -func validateConfigMapKeySelector(s *api.ConfigMapKeySelector, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if len(s.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } - if len(s.Key) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("key"), "")) - } else { - for _, msg := range validation.IsConfigMapKey(s.Key) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg)) - } - } - - return allErrs -} - -func validateSecretKeySelector(s *api.SecretKeySelector, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if len(s.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } - if len(s.Key) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("key"), "")) - } else { - for _, msg := range validation.IsConfigMapKey(s.Key) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg)) - } - } - - return allErrs -} - -func validateVolumeMounts(mounts []api.VolumeMount, volumes sets.String, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - mountpoints := sets.NewString() - - for i, mnt := range mounts { - idxPath := fldPath.Index(i) - if len(mnt.Name) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) - } else if !volumes.Has(mnt.Name) { - allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), mnt.Name)) - } - if len(mnt.MountPath) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("mountPath"), "")) - } - if mountpoints.Has(mnt.MountPath) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be unique")) - } - mountpoints.Insert(mnt.MountPath) - if len(mnt.SubPath) > 0 { - allErrs = append(allErrs, validateLocalDescendingPath(mnt.SubPath, fldPath.Child("subPath"))...) - } - } - return allErrs -} - -func validateProbe(probe *api.Probe, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if probe == nil { - return allErrs - } - allErrs = append(allErrs, validateHandler(&probe.Handler, fldPath)...) - - allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.InitialDelaySeconds), fldPath.Child("initialDelaySeconds"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.TimeoutSeconds), fldPath.Child("timeoutSeconds"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.PeriodSeconds), fldPath.Child("periodSeconds"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.SuccessThreshold), fldPath.Child("successThreshold"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.FailureThreshold), fldPath.Child("failureThreshold"))...) - return allErrs -} - -// AccumulateUniqueHostPorts extracts each HostPort of each Container, -// accumulating the results and returning an error if any ports conflict. -func AccumulateUniqueHostPorts(containers []api.Container, accumulator *sets.String, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - for ci, ctr := range containers { - idxPath := fldPath.Index(ci) - portsPath := idxPath.Child("ports") - for pi := range ctr.Ports { - idxPath := portsPath.Index(pi) - port := ctr.Ports[pi].HostPort - if port == 0 { - continue - } - str := fmt.Sprintf("%d/%s", port, ctr.Ports[pi].Protocol) - if accumulator.Has(str) { - allErrs = append(allErrs, field.Duplicate(idxPath.Child("hostPort"), str)) - } else { - accumulator.Insert(str) - } - } - } - return allErrs -} - -// checkHostPortConflicts checks for colliding Port.HostPort values across -// a slice of containers. -func checkHostPortConflicts(containers []api.Container, fldPath *field.Path) field.ErrorList { - allPorts := sets.String{} - return AccumulateUniqueHostPorts(containers, &allPorts, fldPath) -} - -func validateExecAction(exec *api.ExecAction, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - if len(exec.Command) == 0 { - allErrors = append(allErrors, field.Required(fldPath.Child("command"), "")) - } - return allErrors -} - -var supportedHTTPSchemes = sets.NewString(string(api.URISchemeHTTP), string(api.URISchemeHTTPS)) - -func validateHTTPGetAction(http *api.HTTPGetAction, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - if len(http.Path) == 0 { - allErrors = append(allErrors, field.Required(fldPath.Child("path"), "")) - } - allErrors = append(allErrors, ValidatePortNumOrName(http.Port, fldPath.Child("port"))...) - if !supportedHTTPSchemes.Has(string(http.Scheme)) { - allErrors = append(allErrors, field.NotSupported(fldPath.Child("scheme"), http.Scheme, supportedHTTPSchemes.List())) - } - for _, header := range http.HTTPHeaders { - for _, msg := range validation.IsHTTPHeaderName(header.Name) { - allErrors = append(allErrors, field.Invalid(fldPath.Child("httpHeaders"), header.Name, msg)) - } - } - return allErrors -} - -func ValidatePortNumOrName(port intstr.IntOrString, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if port.Type == intstr.Int { - for _, msg := range validation.IsValidPortNum(port.IntValue()) { - allErrs = append(allErrs, field.Invalid(fldPath, port.IntValue(), msg)) - } - } else if port.Type == intstr.String { - for _, msg := range validation.IsValidPortName(port.StrVal) { - allErrs = append(allErrs, field.Invalid(fldPath, port.StrVal, msg)) - } - } else { - allErrs = append(allErrs, field.InternalError(fldPath, fmt.Errorf("unknown type: %v", port.Type))) - } - return allErrs -} - -func validateTCPSocketAction(tcp *api.TCPSocketAction, fldPath *field.Path) field.ErrorList { - return ValidatePortNumOrName(tcp.Port, fldPath.Child("port")) -} - -func validateHandler(handler *api.Handler, fldPath *field.Path) field.ErrorList { - numHandlers := 0 - allErrors := field.ErrorList{} - if handler.Exec != nil { - if numHandlers > 0 { - allErrors = append(allErrors, field.Forbidden(fldPath.Child("exec"), "may not specify more than 1 handler type")) - } else { - numHandlers++ - allErrors = append(allErrors, validateExecAction(handler.Exec, fldPath.Child("exec"))...) - } - } - if handler.HTTPGet != nil { - if numHandlers > 0 { - allErrors = append(allErrors, field.Forbidden(fldPath.Child("httpGet"), "may not specify more than 1 handler type")) - } else { - numHandlers++ - allErrors = append(allErrors, validateHTTPGetAction(handler.HTTPGet, fldPath.Child("httpGet"))...) - } - } - if handler.TCPSocket != nil { - if numHandlers > 0 { - allErrors = append(allErrors, field.Forbidden(fldPath.Child("tcpSocket"), "may not specify more than 1 handler type")) - } else { - numHandlers++ - allErrors = append(allErrors, validateTCPSocketAction(handler.TCPSocket, fldPath.Child("tcpSocket"))...) - } - } - if numHandlers == 0 { - allErrors = append(allErrors, field.Required(fldPath, "must specify a handler type")) - } - return allErrors -} - -func validateLifecycle(lifecycle *api.Lifecycle, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if lifecycle.PostStart != nil { - allErrs = append(allErrs, validateHandler(lifecycle.PostStart, fldPath.Child("postStart"))...) - } - if lifecycle.PreStop != nil { - allErrs = append(allErrs, validateHandler(lifecycle.PreStop, fldPath.Child("preStop"))...) - } - return allErrs -} - -var supportedPullPolicies = sets.NewString(string(api.PullAlways), string(api.PullIfNotPresent), string(api.PullNever)) - -func validatePullPolicy(policy api.PullPolicy, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - - switch policy { - case api.PullAlways, api.PullIfNotPresent, api.PullNever: - break - case "": - allErrors = append(allErrors, field.Required(fldPath, "")) - default: - allErrors = append(allErrors, field.NotSupported(fldPath, policy, supportedPullPolicies.List())) - } - - return allErrors -} - -func validateInitContainers(containers, otherContainers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - if len(containers) > 0 { - allErrs = append(allErrs, validateContainers(containers, volumes, fldPath)...) - } - - allNames := sets.String{} - for _, ctr := range otherContainers { - allNames.Insert(ctr.Name) - } - for i, ctr := range containers { - idxPath := fldPath.Index(i) - if allNames.Has(ctr.Name) { - allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), ctr.Name)) - } - if len(ctr.Name) > 0 { - allNames.Insert(ctr.Name) - } - if ctr.Lifecycle != nil { - allErrs = append(allErrs, field.Invalid(idxPath.Child("lifecycle"), ctr.Lifecycle, "must not be set for init containers")) - } - if ctr.LivenessProbe != nil { - allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe"), ctr.LivenessProbe, "must not be set for init containers")) - } - if ctr.ReadinessProbe != nil { - allErrs = append(allErrs, field.Invalid(idxPath.Child("readinessProbe"), ctr.ReadinessProbe, "must not be set for init containers")) - } - } - return allErrs -} - -func validateContainers(containers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if len(containers) == 0 { - return append(allErrs, field.Required(fldPath, "")) - } - - allNames := sets.String{} - for i, ctr := range containers { - idxPath := fldPath.Index(i) - namePath := idxPath.Child("name") - if len(ctr.Name) == 0 { - allErrs = append(allErrs, field.Required(namePath, "")) - } else { - allErrs = append(allErrs, ValidateDNS1123Label(ctr.Name, namePath)...) - } - if allNames.Has(ctr.Name) { - allErrs = append(allErrs, field.Duplicate(namePath, ctr.Name)) - } else { - allNames.Insert(ctr.Name) - } - if len(ctr.Image) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("image"), "")) - } - if ctr.Lifecycle != nil { - allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, idxPath.Child("lifecycle"))...) - } - allErrs = append(allErrs, validateProbe(ctr.LivenessProbe, idxPath.Child("livenessProbe"))...) - // Liveness-specific validation - if ctr.LivenessProbe != nil && ctr.LivenessProbe.SuccessThreshold != 1 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe", "successThreshold"), ctr.LivenessProbe.SuccessThreshold, "must be 1")) - } - - allErrs = append(allErrs, validateProbe(ctr.ReadinessProbe, idxPath.Child("readinessProbe"))...) - allErrs = append(allErrs, validateContainerPorts(ctr.Ports, idxPath.Child("ports"))...) - allErrs = append(allErrs, validateEnv(ctr.Env, idxPath.Child("env"))...) - allErrs = append(allErrs, validateVolumeMounts(ctr.VolumeMounts, volumes, idxPath.Child("volumeMounts"))...) - allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, idxPath.Child("imagePullPolicy"))...) - allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, idxPath.Child("resources"))...) - allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, idxPath.Child("securityContext"))...) - } - // Check for colliding ports across all containers. - allErrs = append(allErrs, checkHostPortConflicts(containers, fldPath)...) - - return allErrs -} - -func validateRestartPolicy(restartPolicy *api.RestartPolicy, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - switch *restartPolicy { - case api.RestartPolicyAlways, api.RestartPolicyOnFailure, api.RestartPolicyNever: - break - case "": - allErrors = append(allErrors, field.Required(fldPath, "")) - default: - validValues := []string{string(api.RestartPolicyAlways), string(api.RestartPolicyOnFailure), string(api.RestartPolicyNever)} - allErrors = append(allErrors, field.NotSupported(fldPath, *restartPolicy, validValues)) - } - - return allErrors -} - -func validateDNSPolicy(dnsPolicy *api.DNSPolicy, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - switch *dnsPolicy { - case api.DNSClusterFirst, api.DNSDefault: - break - case "": - allErrors = append(allErrors, field.Required(fldPath, "")) - default: - validValues := []string{string(api.DNSClusterFirst), string(api.DNSDefault)} - allErrors = append(allErrors, field.NotSupported(fldPath, dnsPolicy, validValues)) - } - return allErrors -} - -func validateHostNetwork(hostNetwork bool, containers []api.Container, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - if hostNetwork { - for i, container := range containers { - portsPath := fldPath.Index(i).Child("ports") - for i, port := range container.Ports { - idxPath := portsPath.Index(i) - if port.HostPort != port.ContainerPort { - allErrors = append(allErrors, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, "must match `hostPort` when `hostNetwork` is true")) - } - } - } - } - return allErrors -} - -// validateImagePullSecrets checks to make sure the pull secrets are well -// formed. Right now, we only expect name to be set (it's the only field). If -// this ever changes and someone decides to set those fields, we'd like to -// know. -func validateImagePullSecrets(imagePullSecrets []api.LocalObjectReference, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - for i, currPullSecret := range imagePullSecrets { - idxPath := fldPath.Index(i) - strippedRef := api.LocalObjectReference{Name: currPullSecret.Name} - if !reflect.DeepEqual(strippedRef, currPullSecret) { - allErrors = append(allErrors, field.Invalid(idxPath, currPullSecret, "only name may be set")) - } - } - return allErrors -} - -func validateTaintEffect(effect *api.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList { - if !allowEmpty && len(*effect) == 0 { - return field.ErrorList{field.Required(fldPath, "")} - } - - allErrors := field.ErrorList{} - switch *effect { - // TODO: Replace next line with subsequent commented-out line when implement TaintEffectNoScheduleNoAdmit, TaintEffectNoScheduleNoAdmitNoExecute. - case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule: - // case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule, api.TaintEffectNoScheduleNoAdmit, api.TaintEffectNoScheduleNoAdmitNoExecute: - default: - validValues := []string{ - string(api.TaintEffectNoSchedule), - string(api.TaintEffectPreferNoSchedule), - // TODO: Uncomment this block when implement TaintEffectNoScheduleNoAdmit, TaintEffectNoScheduleNoAdmitNoExecute. - // string(api.TaintEffectNoScheduleNoAdmit), - // string(api.TaintEffectNoScheduleNoAdmitNoExecute), - } - allErrors = append(allErrors, field.NotSupported(fldPath, effect, validValues)) - } - return allErrors -} - -// validateTolerations tests if given tolerations have valid data. -func validateTolerations(tolerations []api.Toleration, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - for i, toleration := range tolerations { - idxPath := fldPath.Index(i) - // validate the toleration key - allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(toleration.Key, idxPath.Child("key"))...) - - // validate toleration operator and value - switch toleration.Operator { - case api.TolerationOpEqual, "": - if errs := validation.IsValidLabelValue(toleration.Value); len(errs) != 0 { - allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Value, strings.Join(errs, ";"))) - } - case api.TolerationOpExists: - if len(toleration.Value) > 0 { - allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration, "value must be empty when `operator` is 'Exists'")) - } - default: - validValues := []string{string(api.TolerationOpEqual), string(api.TolerationOpExists)} - allErrors = append(allErrors, field.NotSupported(idxPath.Child("operator"), toleration.Operator, validValues)) - } - - // validate toleration effect - if len(toleration.Effect) > 0 { - allErrors = append(allErrors, validateTaintEffect(&toleration.Effect, true, idxPath.Child("effect"))...) - } - } - return allErrors -} - -// ValidatePod tests if required fields in the pod are set. -func ValidatePod(pod *api.Pod) field.ErrorList { - fldPath := field.NewPath("metadata") - allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, fldPath) - allErrs = append(allErrs, ValidatePodSpecificAnnotations(pod.ObjectMeta.Annotations, &pod.Spec, fldPath.Child("annotations"))...) - allErrs = append(allErrs, ValidatePodSpec(&pod.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidatePodSpec tests that the specified PodSpec has valid data. -// This includes checking formatting and uniqueness. It also canonicalizes the -// structure by setting default values and implementing any backwards-compatibility -// tricks. -func ValidatePodSpec(spec *api.PodSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - allVolumes, vErrs := validateVolumes(spec.Volumes, fldPath.Child("volumes")) - allErrs = append(allErrs, vErrs...) - allErrs = append(allErrs, validateContainers(spec.Containers, allVolumes, fldPath.Child("containers"))...) - allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, allVolumes, fldPath.Child("initContainers"))...) - allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...) - allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...) - allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...) - allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"))...) - allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...) - if len(spec.ServiceAccountName) > 0 { - for _, msg := range ValidateServiceAccountName(spec.ServiceAccountName, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceAccountName"), spec.ServiceAccountName, msg)) - } - } - - if len(spec.NodeName) > 0 { - for _, msg := range ValidateNodeName(spec.NodeName, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), spec.NodeName, msg)) - } - } - - if spec.ActiveDeadlineSeconds != nil { - if *spec.ActiveDeadlineSeconds <= 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("activeDeadlineSeconds"), spec.ActiveDeadlineSeconds, "must be greater than 0")) - } - } - - if len(spec.Hostname) > 0 { - allErrs = append(allErrs, ValidateDNS1123Label(spec.Hostname, fldPath.Child("hostname"))...) - } - - if len(spec.Subdomain) > 0 { - allErrs = append(allErrs, ValidateDNS1123Label(spec.Subdomain, fldPath.Child("subdomain"))...) - } - - return allErrs -} - -// ValidateNodeSelectorRequirement tests that the specified NodeSelectorRequirement fields has valid data -func ValidateNodeSelectorRequirement(rq api.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - switch rq.Operator { - case api.NodeSelectorOpIn, api.NodeSelectorOpNotIn: - if len(rq.Values) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) - } - case api.NodeSelectorOpExists, api.NodeSelectorOpDoesNotExist: - if len(rq.Values) > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) - } - - case api.NodeSelectorOpGt, api.NodeSelectorOpLt: - if len(rq.Values) != 1 { - allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified single value when `operator` is 'Lt' or 'Gt'")) - } - default: - allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), rq.Operator, "not a valid selector operator")) - } - allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(rq.Key, fldPath.Child("key"))...) - return allErrs -} - -// ValidateNodeSelectorTerm tests that the specified node selector term has valid data -func ValidateNodeSelectorTerm(term api.NodeSelectorTerm, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if len(term.MatchExpressions) == 0 { - return append(allErrs, field.Required(fldPath.Child("matchExpressions"), "must have at least one node selector requirement")) - } - for j, req := range term.MatchExpressions { - allErrs = append(allErrs, ValidateNodeSelectorRequirement(req, fldPath.Child("matchExpressions").Index(j))...) - } - return allErrs -} - -// ValidateNodeSelector tests that the specified nodeSelector fields has valid data -func ValidateNodeSelector(nodeSelector *api.NodeSelector, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - termFldPath := fldPath.Child("nodeSelectorTerms") - if len(nodeSelector.NodeSelectorTerms) == 0 { - return append(allErrs, field.Required(termFldPath, "must have at least one node selector term")) - } - - for i, term := range nodeSelector.NodeSelectorTerms { - allErrs = append(allErrs, ValidateNodeSelectorTerm(term, termFldPath.Index(i))...) - } - - return allErrs -} - -// ValidateAvoidPodsInNodeAnnotations tests that the serialized AvoidPods in Node.Annotations has valid data -func ValidateAvoidPodsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - avoids, err := api.GetAvoidPodsFromNodeAnnotations(annotations) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), api.PreferAvoidPodsAnnotationKey, err.Error())) - return allErrs - } - - if len(avoids.PreferAvoidPods) != 0 { - for i, pa := range avoids.PreferAvoidPods { - idxPath := fldPath.Child(api.PreferAvoidPodsAnnotationKey).Index(i) - allErrs = append(allErrs, validatePreferAvoidPodsEntry(pa, idxPath)...) - } - } - - return allErrs -} - -// validatePreferAvoidPodsEntry tests if given PreferAvoidPodsEntry has valid data. -func validatePreferAvoidPodsEntry(avoidPodEntry api.PreferAvoidPodsEntry, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - if avoidPodEntry.PodSignature.PodController == nil { - allErrors = append(allErrors, field.Required(fldPath.Child("PodSignature"), "")) - } else { - if *(avoidPodEntry.PodSignature.PodController.Controller) != true { - allErrors = append(allErrors, - field.Invalid(fldPath.Child("PodSignature").Child("PodController").Child("Controller"), - *(avoidPodEntry.PodSignature.PodController.Controller), "must point to a controller")) - } - } - return allErrors -} - -// ValidatePreferredSchedulingTerms tests that the specified SoftNodeAffinity fields has valid data -func ValidatePreferredSchedulingTerms(terms []api.PreferredSchedulingTerm, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - for i, term := range terms { - if term.Weight <= 0 || term.Weight > 100 { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("weight"), term.Weight, "must be in the range 1-100")) - } - - allErrs = append(allErrs, ValidateNodeSelectorTerm(term.Preference, fldPath.Index(i).Child("preference"))...) - } - return allErrs -} - -// validatePodAffinityTerm tests that the specified podAffinityTerm fields have valid data -func validatePodAffinityTerm(podAffinityTerm api.PodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, fldPath.Child("matchExpressions"))...) - for _, name := range podAffinityTerm.Namespaces { - for _, msg := range ValidateNamespaceName(name, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, msg)) - } - } - if !allowEmptyTopologyKey && len(podAffinityTerm.TopologyKey) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("topologyKey"), "can only be empty for PreferredDuringScheduling pod anti affinity")) - } - if len(podAffinityTerm.TopologyKey) != 0 { - allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(podAffinityTerm.TopologyKey, fldPath.Child("topologyKey"))...) - } - return allErrs -} - -// validatePodAffinityTerms tests that the specified podAffinityTerms fields have valid data -func validatePodAffinityTerms(podAffinityTerms []api.PodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for i, podAffinityTerm := range podAffinityTerms { - allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, allowEmptyTopologyKey, fldPath.Index(i))...) - } - return allErrs -} - -// validateWeightedPodAffinityTerms tests that the specified weightedPodAffinityTerms fields have valid data -func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []api.WeightedPodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for j, weightedTerm := range weightedPodAffinityTerms { - if weightedTerm.Weight <= 0 || weightedTerm.Weight > 100 { - allErrs = append(allErrs, field.Invalid(fldPath.Index(j).Child("weight"), weightedTerm.Weight, "must be in the range 1-100")) - } - allErrs = append(allErrs, validatePodAffinityTerm(weightedTerm.PodAffinityTerm, allowEmptyTopologyKey, fldPath.Index(j).Child("podAffinityTerm"))...) - } - return allErrs -} - -// validatePodAntiAffinity tests that the specified podAntiAffinity fields have valid data -func validatePodAntiAffinity(podAntiAffinity *api.PodAntiAffinity, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. - // if podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { - // allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution, false, - // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) - //} - if podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { - // empty topologyKey is not allowed for hard pod anti-affinity - allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, false, - fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) - } - if podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil { - // empty topologyKey is allowed for soft pod anti-affinity - allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, true, - fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) - } - return allErrs -} - -// validatePodAffinity tests that the specified podAffinity fields have valid data -func validatePodAffinity(podAffinity *api.PodAffinity, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. - // if podAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { - // allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingRequiredDuringExecution, false, - // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) - //} - if podAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { - // empty topologyKey is not allowed for hard pod affinity - allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution, false, - fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) - } - if podAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil { - // empty topologyKey is not allowed for soft pod affinity - allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, false, - fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) - } - return allErrs -} - -// ValidateAffinityInPodAnnotations tests that the serialized Affinity in Pod.Annotations has valid data -func ValidateAffinityInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - affinity, err := api.GetAffinityFromPodAnnotations(annotations) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath, api.AffinityAnnotationKey, err.Error())) - return allErrs - } - if affinity == nil { - return allErrs - } - - affinityFldPath := fldPath.Child(api.AffinityAnnotationKey) - if affinity.NodeAffinity != nil { - na := affinity.NodeAffinity - naFldPath := affinityFldPath.Child("nodeAffinity") - // TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented. - // if na.RequiredDuringSchedulingRequiredDuringExecution != nil { - // allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, naFldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) - // } - - if na.RequiredDuringSchedulingIgnoredDuringExecution != nil { - allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, naFldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) - } - - if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, naFldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) - } - } - if affinity.PodAffinity != nil { - allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, affinityFldPath.Child("podAffinity"))...) - } - if affinity.PodAntiAffinity != nil { - allErrs = append(allErrs, validatePodAntiAffinity(affinity.PodAntiAffinity, affinityFldPath.Child("podAntiAffinity"))...) - } - - return allErrs -} - -// ValidateTolerationsInPodAnnotations tests that the serialized tolerations in Pod.Annotations has valid data -func ValidateTolerationsInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - tolerations, err := api.GetTolerationsFromPodAnnotations(annotations) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath, api.TolerationsAnnotationKey, err.Error())) - return allErrs - } - if len(tolerations) > 0 { - allErrs = append(allErrs, validateTolerations(tolerations, fldPath.Child(api.TolerationsAnnotationKey))...) - } - - return allErrs -} - -func ValidateSeccompProfile(p string, fldPath *field.Path) field.ErrorList { - if p == "docker/default" { - return nil - } - if p == "unconfined" { - return nil - } - if strings.HasPrefix(p, "localhost/") { - return validateLocalDescendingPath(strings.TrimPrefix(p, "localhost/"), fldPath) - } - return field.ErrorList{field.Invalid(fldPath, p, "must be a valid seccomp profile")} -} - -func ValidateSeccompPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if p, exists := annotations[api.SeccompPodAnnotationKey]; exists { - allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(api.SeccompPodAnnotationKey))...) - } - for k, p := range annotations { - if strings.HasPrefix(k, api.SeccompContainerAnnotationKeyPrefix) { - allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(k))...) - } - } - - return allErrs -} - -func ValidateAppArmorPodAnnotations(annotations map[string]string, spec *api.PodSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for k, p := range annotations { - if !strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { - continue - } - if !utilconfig.DefaultFeatureGate.AppArmor() { - allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "AppArmor is disabled by feature-gate")) - continue - } - containerName := strings.TrimPrefix(k, apparmor.ContainerAnnotationKeyPrefix) - if !podSpecHasContainer(spec, containerName) { - allErrs = append(allErrs, field.Invalid(fldPath.Key(k), containerName, "container not found")) - } - - if err := apparmor.ValidateProfileFormat(p); err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Key(k), p, err.Error())) - } - } - - return allErrs -} - -func podSpecHasContainer(spec *api.PodSpec, containerName string) bool { - for _, c := range spec.InitContainers { - if c.Name == containerName { - return true - } - } - for _, c := range spec.Containers { - if c.Name == containerName { - return true - } - } - return false -} - -const ( - // a sysctl segment regex, concatenated with dots to form a sysctl name - SysctlSegmentFmt string = "[a-z0-9]([-_a-z0-9]*[a-z0-9])?" - - // a sysctl name regex - SysctlFmt string = "(" + SysctlSegmentFmt + "\\.)*" + SysctlSegmentFmt - - // the maximal length of a sysctl name - SysctlMaxLength int = 253 -) - -var sysctlRegexp = regexp.MustCompile("^" + SysctlFmt + "$") - -// IsValidSysctlName checks that the given string is a valid sysctl name, -// i.e. matches SysctlFmt. -func IsValidSysctlName(name string) bool { - if len(name) > SysctlMaxLength { - return false - } - return sysctlRegexp.MatchString(name) -} - -func validateSysctls(sysctls []api.Sysctl, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for i, s := range sysctls { - if len(s.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("name"), "")) - } else if !IsValidSysctlName(s.Name) { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("name"), s.Name, fmt.Sprintf("must have at most %d characters and match regex %s", SysctlMaxLength, SysctlFmt))) - } - } - return allErrs -} - -// ValidatePodSecurityContext test that the specified PodSecurityContext has valid data. -func ValidatePodSecurityContext(securityContext *api.PodSecurityContext, spec *api.PodSpec, specPath, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if securityContext != nil { - allErrs = append(allErrs, validateHostNetwork(securityContext.HostNetwork, spec.Containers, specPath.Child("containers"))...) - if securityContext.FSGroup != nil { - for _, msg := range validation.IsValidGroupId(*securityContext.FSGroup) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("fsGroup"), *(securityContext.FSGroup), msg)) - } - } - if securityContext.RunAsUser != nil { - for _, msg := range validation.IsValidUserId(*securityContext.RunAsUser) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *(securityContext.RunAsUser), msg)) - } - } - for g, gid := range securityContext.SupplementalGroups { - for _, msg := range validation.IsValidGroupId(gid) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("supplementalGroups").Index(g), gid, msg)) - } - } - } - - return allErrs -} - -func ValidateContainerUpdates(newContainers, oldContainers []api.Container, fldPath *field.Path) (allErrs field.ErrorList, stop bool) { - allErrs = field.ErrorList{} - if len(newContainers) != len(oldContainers) { - //TODO: Pinpoint the specific container that causes the invalid error after we have strategic merge diff - allErrs = append(allErrs, field.Forbidden(fldPath, "pod updates may not add or remove containers")) - return allErrs, true - } - - // validate updated container images - for i, ctr := range newContainers { - if len(ctr.Image) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("image"), "")) - } - } - return allErrs, false -} - -// ValidatePodUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields -// that cannot be changed. -func ValidatePodUpdate(newPod, oldPod *api.Pod) field.ErrorList { - fldPath := field.NewPath("metadata") - allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath) - allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"))...) - specPath := field.NewPath("spec") - - // validate updateable fields: - // 1. containers[*].image - // 2. initContainers[*].image - // 3. spec.activeDeadlineSeconds - - containerErrs, stop := ValidateContainerUpdates(newPod.Spec.Containers, oldPod.Spec.Containers, specPath.Child("containers")) - allErrs = append(allErrs, containerErrs...) - if stop { - return allErrs - } - containerErrs, stop = ValidateContainerUpdates(newPod.Spec.InitContainers, oldPod.Spec.InitContainers, specPath.Child("initContainers")) - allErrs = append(allErrs, containerErrs...) - if stop { - return allErrs - } - - // validate updated spec.activeDeadlineSeconds. two types of updates are allowed: - // 1. from nil to a positive value - // 2. from a positive value to a lesser, non-negative value - if newPod.Spec.ActiveDeadlineSeconds != nil { - newActiveDeadlineSeconds := *newPod.Spec.ActiveDeadlineSeconds - if newActiveDeadlineSeconds < 0 { - allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, isNegativeErrorMsg)) - return allErrs - } - if oldPod.Spec.ActiveDeadlineSeconds != nil { - oldActiveDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds - if oldActiveDeadlineSeconds < newActiveDeadlineSeconds { - allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, "must be less than or equal to previous value")) - return allErrs - } - } - } else if oldPod.Spec.ActiveDeadlineSeconds != nil { - allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newPod.Spec.ActiveDeadlineSeconds, "must not update from a positive integer to nil value")) - } - - // handle updateable fields by munging those fields prior to deep equal comparison. - mungedPod := *newPod - // munge containers[*].image - var newContainers []api.Container - for ix, container := range mungedPod.Spec.Containers { - container.Image = oldPod.Spec.Containers[ix].Image - newContainers = append(newContainers, container) - } - mungedPod.Spec.Containers = newContainers - // munge initContainers[*].image - var newInitContainers []api.Container - for ix, container := range mungedPod.Spec.InitContainers { - container.Image = oldPod.Spec.InitContainers[ix].Image - newInitContainers = append(newInitContainers, container) - } - mungedPod.Spec.InitContainers = newInitContainers - // munge spec.activeDeadlineSeconds - mungedPod.Spec.ActiveDeadlineSeconds = nil - if oldPod.Spec.ActiveDeadlineSeconds != nil { - activeDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds - mungedPod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds - } - if !api.Semantic.DeepEqual(mungedPod.Spec, oldPod.Spec) { - //TODO: Pinpoint the specific field that causes the invalid error after we have strategic merge diff - allErrs = append(allErrs, field.Forbidden(specPath, "pod updates may not change fields other than `containers[*].image` or `spec.activeDeadlineSeconds`")) - } - - return allErrs -} - -// ValidatePodStatusUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields -// that cannot be changed. -func ValidatePodStatusUpdate(newPod, oldPod *api.Pod) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, field.NewPath("metadata")) - - // TODO: allow change when bindings are properly decoupled from pods - if newPod.Spec.NodeName != oldPod.Spec.NodeName { - allErrs = append(allErrs, field.Forbidden(field.NewPath("status", "nodeName"), "may not be changed directly")) - } - - // For status update we ignore changes to pod spec. - newPod.Spec = oldPod.Spec - - return allErrs -} - -// ValidatePodBinding tests if required fields in the pod binding are legal. -func ValidatePodBinding(binding *api.Binding) field.ErrorList { - allErrs := field.ErrorList{} - - if len(binding.Target.Kind) != 0 && binding.Target.Kind != "Node" { - // TODO: When validation becomes versioned, this gets more complicated. - allErrs = append(allErrs, field.NotSupported(field.NewPath("target", "kind"), binding.Target.Kind, []string{"Node", ""})) - } - if len(binding.Target.Name) == 0 { - // TODO: When validation becomes versioned, this gets more complicated. - allErrs = append(allErrs, field.Required(field.NewPath("target", "name"), "")) - } - - return allErrs -} - -// ValidatePodTemplate tests if required fields in the pod template are set. -func ValidatePodTemplate(pod *api.PodTemplate) field.ErrorList { - allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidatePodTemplateSpec(&pod.Template, field.NewPath("template"))...) - return allErrs -} - -// ValidatePodTemplateUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields -// that cannot be changed. -func ValidatePodTemplateUpdate(newPod, oldPod *api.PodTemplate) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&oldPod.ObjectMeta, &newPod.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidatePodTemplateSpec(&newPod.Template, field.NewPath("template"))...) - return allErrs -} - -var supportedSessionAffinityType = sets.NewString(string(api.ServiceAffinityClientIP), string(api.ServiceAffinityNone)) -var supportedServiceType = sets.NewString(string(api.ServiceTypeClusterIP), string(api.ServiceTypeNodePort), - string(api.ServiceTypeLoadBalancer), string(api.ServiceTypeExternalName)) - -// ValidateService tests if required fields/annotations of a Service are valid. -func ValidateService(service *api.Service) field.ErrorList { - allErrs := validateServiceFields(service) - allErrs = append(allErrs, validateServiceAnnotations(service, nil)...) - return allErrs -} - -// validateServiceFields tests if required fields in the service are set. -func validateServiceFields(service *api.Service) field.ErrorList { - allErrs := ValidateObjectMeta(&service.ObjectMeta, true, ValidateServiceName, field.NewPath("metadata")) - - specPath := field.NewPath("spec") - isHeadlessService := service.Spec.ClusterIP == api.ClusterIPNone - if len(service.Spec.Ports) == 0 && !isHeadlessService && service.Spec.Type != api.ServiceTypeExternalName { - allErrs = append(allErrs, field.Required(specPath.Child("ports"), "")) - } - switch service.Spec.Type { - case api.ServiceTypeLoadBalancer: - for ix := range service.Spec.Ports { - port := &service.Spec.Ports[ix] - // This is a workaround for broken cloud environments that - // over-open firewalls. Hopefully it can go away when more clouds - // understand containers better. - if port.Port == 10250 { - portPath := specPath.Child("ports").Index(ix) - allErrs = append(allErrs, field.Invalid(portPath, port.Port, "may not expose port 10250 externally since it is used by kubelet")) - } - } - if service.Spec.ClusterIP == "None" { - allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "may not be set to 'None' for LoadBalancer services")) - } - case api.ServiceTypeExternalName: - if service.Spec.ClusterIP != "" { - allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "must be empty for ExternalName services")) - } - if len(service.Spec.ExternalName) > 0 { - allErrs = append(allErrs, ValidateDNS1123Subdomain(service.Spec.ExternalName, specPath.Child("externalName"))...) - } else { - allErrs = append(allErrs, field.Required(specPath.Child("externalName"), "")) - } - } - - allPortNames := sets.String{} - portsPath := specPath.Child("ports") - for i := range service.Spec.Ports { - portPath := portsPath.Index(i) - allErrs = append(allErrs, validateServicePort(&service.Spec.Ports[i], len(service.Spec.Ports) > 1, isHeadlessService, &allPortNames, portPath)...) - } - - if service.Spec.Selector != nil { - allErrs = append(allErrs, unversionedvalidation.ValidateLabels(service.Spec.Selector, specPath.Child("selector"))...) - } - - if len(service.Spec.SessionAffinity) == 0 { - allErrs = append(allErrs, field.Required(specPath.Child("sessionAffinity"), "")) - } else if !supportedSessionAffinityType.Has(string(service.Spec.SessionAffinity)) { - allErrs = append(allErrs, field.NotSupported(specPath.Child("sessionAffinity"), service.Spec.SessionAffinity, supportedSessionAffinityType.List())) - } - - if api.IsServiceIPSet(service) { - if ip := net.ParseIP(service.Spec.ClusterIP); ip == nil { - allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "must be empty, 'None', or a valid IP address")) - } - } - - ipPath := specPath.Child("externalIPs") - for i, ip := range service.Spec.ExternalIPs { - idxPath := ipPath.Index(i) - if msgs := validation.IsValidIP(ip); len(msgs) != 0 { - for i := range msgs { - allErrs = append(allErrs, field.Invalid(idxPath, ip, msgs[i])) - } - } else { - allErrs = append(allErrs, validateNonSpecialIP(ip, idxPath)...) - } - } - - if len(service.Spec.Type) == 0 { - allErrs = append(allErrs, field.Required(specPath.Child("type"), "")) - } else if !supportedServiceType.Has(string(service.Spec.Type)) { - allErrs = append(allErrs, field.NotSupported(specPath.Child("type"), service.Spec.Type, supportedServiceType.List())) - } - - if service.Spec.Type == api.ServiceTypeLoadBalancer { - portsPath := specPath.Child("ports") - includeProtocols := sets.NewString() - for i := range service.Spec.Ports { - portPath := portsPath.Index(i) - if !supportedPortProtocols.Has(string(service.Spec.Ports[i].Protocol)) { - allErrs = append(allErrs, field.Invalid(portPath.Child("protocol"), service.Spec.Ports[i].Protocol, "cannot create an external load balancer with non-TCP/UDP ports")) - } else { - includeProtocols.Insert(string(service.Spec.Ports[i].Protocol)) - } - } - if includeProtocols.Len() > 1 { - allErrs = append(allErrs, field.Invalid(portsPath, service.Spec.Ports, "cannot create an external load balancer with mix protocols")) - } - } - - if service.Spec.Type == api.ServiceTypeClusterIP { - portsPath := specPath.Child("ports") - for i := range service.Spec.Ports { - portPath := portsPath.Index(i) - if service.Spec.Ports[i].NodePort != 0 { - allErrs = append(allErrs, field.Invalid(portPath.Child("nodePort"), service.Spec.Ports[i].NodePort, "may not be used when `type` is 'ClusterIP'")) - } - } - } - - // Check for duplicate NodePorts, considering (protocol,port) pairs - portsPath = specPath.Child("ports") - nodePorts := make(map[api.ServicePort]bool) - for i := range service.Spec.Ports { - port := &service.Spec.Ports[i] - if port.NodePort == 0 { - continue - } - portPath := portsPath.Index(i) - var key api.ServicePort - key.Protocol = port.Protocol - key.NodePort = port.NodePort - _, found := nodePorts[key] - if found { - allErrs = append(allErrs, field.Duplicate(portPath.Child("nodePort"), port.NodePort)) - } - nodePorts[key] = true - } - - // Validate SourceRange field and annotation - _, ok := service.Annotations[apiservice.AnnotationLoadBalancerSourceRangesKey] - if len(service.Spec.LoadBalancerSourceRanges) > 0 || ok { - var fieldPath *field.Path - var val string - if len(service.Spec.LoadBalancerSourceRanges) > 0 { - fieldPath = specPath.Child("LoadBalancerSourceRanges") - val = fmt.Sprintf("%v", service.Spec.LoadBalancerSourceRanges) - } else { - fieldPath = field.NewPath("metadata", "annotations").Key(apiservice.AnnotationLoadBalancerSourceRangesKey) - val = service.Annotations[apiservice.AnnotationLoadBalancerSourceRangesKey] - } - if service.Spec.Type != api.ServiceTypeLoadBalancer { - allErrs = append(allErrs, field.Invalid(fieldPath, "", "may only be used when `type` is 'LoadBalancer'")) - } - _, err := apiservice.GetLoadBalancerSourceRanges(service) - if err != nil { - allErrs = append(allErrs, field.Invalid(fieldPath, val, "must be a list of IP ranges. For example, 10.240.0.0/24,10.250.0.0/24 ")) - } - } - return allErrs -} - -func validateServicePort(sp *api.ServicePort, requireName, isHeadlessService bool, allNames *sets.String, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if requireName && len(sp.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } else if len(sp.Name) != 0 { - allErrs = append(allErrs, ValidateDNS1123Label(sp.Name, fldPath.Child("name"))...) - if allNames.Has(sp.Name) { - allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), sp.Name)) - } else { - allNames.Insert(sp.Name) - } - } - - for _, msg := range validation.IsValidPortNum(int(sp.Port)) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), sp.Port, msg)) - } - - if len(sp.Protocol) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), "")) - } else if !supportedPortProtocols.Has(string(sp.Protocol)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), sp.Protocol, supportedPortProtocols.List())) - } - - allErrs = append(allErrs, ValidatePortNumOrName(sp.TargetPort, fldPath.Child("targetPort"))...) - - // in the v1 API, targetPorts on headless services were tolerated. - // once we have version-specific validation, we can reject this on newer API versions, but until then, we have to tolerate it for compatibility. - // - // if isHeadlessService { - // if sp.TargetPort.Type == intstr.String || (sp.TargetPort.Type == intstr.Int && sp.Port != sp.TargetPort.IntValue()) { - // allErrs = append(allErrs, field.Invalid(fldPath.Child("targetPort"), sp.TargetPort, "must be equal to the value of 'port' when clusterIP = None")) - // } - // } - - return allErrs -} - -func validateServiceAnnotations(service *api.Service, oldService *api.Service) (allErrs field.ErrorList) { - // 2 annotations went from alpha to beta in 1.5: healthcheck-nodeport and - // external-traffic. The user cannot mix these. All updates to the alpha - // annotation are disallowed. The user must change both alpha annotations - // to beta before making any modifications, even though the system continues - // to respect the alpha version. - hcAlpha, healthCheckAlphaOk := service.Annotations[apiservice.AlphaAnnotationHealthCheckNodePort] - onlyLocalAlpha, onlyLocalAlphaOk := service.Annotations[apiservice.AlphaAnnotationExternalTraffic] - - _, healthCheckBetaOk := service.Annotations[apiservice.BetaAnnotationHealthCheckNodePort] - _, onlyLocalBetaOk := service.Annotations[apiservice.BetaAnnotationExternalTraffic] - - var oldHealthCheckAlpha, oldOnlyLocalAlpha string - var oldHealthCheckAlphaOk, oldOnlyLocalAlphaOk bool - if oldService != nil { - oldHealthCheckAlpha, oldHealthCheckAlphaOk = oldService.Annotations[apiservice.AlphaAnnotationHealthCheckNodePort] - oldOnlyLocalAlpha, oldOnlyLocalAlphaOk = oldService.Annotations[apiservice.AlphaAnnotationExternalTraffic] - } - hcValueChanged := oldHealthCheckAlphaOk && healthCheckAlphaOk && oldHealthCheckAlpha != hcAlpha - hcValueNew := !oldHealthCheckAlphaOk && healthCheckAlphaOk - hcValueGone := !healthCheckAlphaOk && !healthCheckBetaOk && oldHealthCheckAlphaOk - onlyLocalHCMismatch := onlyLocalBetaOk && healthCheckAlphaOk - - // On upgrading to a 1.5 cluster, the user is locked in at the current - // alpha setting, till they modify the Service such that the pair of - // annotations are both beta. Basically this means we need to: - // Disallow updates to the alpha annotation. - // Disallow creating a Service with the alpha annotation. - // Disallow removing both alpha annotations. Removing the health-check - // annotation is rejected at a later stage anyway, so if we allow removing - // just onlyLocal we might leak the port. - // Disallow a single field from transitioning to beta. Mismatched annotations - // cause confusion. - // Ignore changes to the fields if they're both transitioning to beta. - // Allow modifications to Services in fields other than the alpha annotation. - - if hcValueNew || hcValueChanged || hcValueGone || onlyLocalHCMismatch { - fieldPath := field.NewPath("metadata", "annotations").Key(apiservice.AlphaAnnotationHealthCheckNodePort) - msg := fmt.Sprintf("please replace the alpha annotation with the beta version %v", - apiservice.BetaAnnotationHealthCheckNodePort) - allErrs = append(allErrs, field.Invalid(fieldPath, apiservice.AlphaAnnotationHealthCheckNodePort, msg)) - } - - onlyLocalValueChanged := oldOnlyLocalAlphaOk && onlyLocalAlphaOk && oldOnlyLocalAlpha != onlyLocalAlpha - onlyLocalValueNew := !oldOnlyLocalAlphaOk && onlyLocalAlphaOk - onlyLocalValueGone := !onlyLocalAlphaOk && !onlyLocalBetaOk && oldOnlyLocalAlphaOk - hcOnlyLocalMismatch := onlyLocalAlphaOk && healthCheckBetaOk - - if onlyLocalValueNew || onlyLocalValueChanged || onlyLocalValueGone || hcOnlyLocalMismatch { - fieldPath := field.NewPath("metadata", "annotations").Key(apiservice.AlphaAnnotationExternalTraffic) - msg := fmt.Sprintf("please replace the alpha annotation with the beta version %v", - apiservice.BetaAnnotationExternalTraffic) - allErrs = append(allErrs, field.Invalid(fieldPath, apiservice.AlphaAnnotationExternalTraffic, msg)) - } - return -} - -// ValidateServiceUpdate tests if required fields in the service are set during an update -func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata")) - - // ClusterIP should be immutable for services using it (every type other than ExternalName) - // which do not have ClusterIP assigned yet (empty string value) - if service.Spec.Type != api.ServiceTypeExternalName { - if oldService.Spec.Type != api.ServiceTypeExternalName && oldService.Spec.ClusterIP != "" { - allErrs = append(allErrs, ValidateImmutableField(service.Spec.ClusterIP, oldService.Spec.ClusterIP, field.NewPath("spec", "clusterIP"))...) - } - } - - allErrs = append(allErrs, validateServiceFields(service)...) - allErrs = append(allErrs, validateServiceAnnotations(service, oldService)...) - return allErrs -} - -// ValidateServiceStatusUpdate tests if required fields in the Service are set when updating status. -func ValidateServiceStatusUpdate(service, oldService *api.Service) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateLoadBalancerStatus(&service.Status.LoadBalancer, field.NewPath("status", "loadBalancer"))...) - return allErrs -} - -// ValidateReplicationController tests if required fields in the replication controller are set. -func ValidateReplicationController(controller *api.ReplicationController) field.ErrorList { - allErrs := ValidateObjectMeta(&controller.ObjectMeta, true, ValidateReplicationControllerName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateReplicationControllerUpdate tests if required fields in the replication controller are set. -func ValidateReplicationControllerUpdate(controller, oldController *api.ReplicationController) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateReplicationControllerStatusUpdate tests if required fields in the replication controller are set. -func ValidateReplicationControllerStatusUpdate(controller, oldController *api.ReplicationController) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata")) - statusPath := field.NewPath("status") - allErrs = append(allErrs, ValidateNonnegativeField(int64(controller.Status.Replicas), statusPath.Child("replicas"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(controller.Status.FullyLabeledReplicas), statusPath.Child("fullyLabeledReplicas"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(controller.Status.ReadyReplicas), statusPath.Child("readyReplicas"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(controller.Status.AvailableReplicas), statusPath.Child("availableReplicas"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(controller.Status.ObservedGeneration), statusPath.Child("observedGeneration"))...) - return allErrs -} - -// Validates that the given selector is non-empty. -func ValidateNonEmptySelector(selectorMap map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - selector := labels.Set(selectorMap).AsSelector() - if selector.Empty() { - allErrs = append(allErrs, field.Required(fldPath, "")) - } - return allErrs -} - -// Validates the given template and ensures that it is in accordance with the desired selector and replicas. -func ValidatePodTemplateSpecForRC(template *api.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if template == nil { - allErrs = append(allErrs, field.Required(fldPath, "")) - } else { - selector := labels.Set(selectorMap).AsSelector() - if !selector.Empty() { - // Verify that the RC selector matches the labels in template. - labels := labels.Set(template.Labels) - if !selector.Matches(labels) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`")) - } - } - allErrs = append(allErrs, ValidatePodTemplateSpec(template, fldPath)...) - if replicas > 1 { - allErrs = append(allErrs, ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...) - } - // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). - if template.Spec.RestartPolicy != api.RestartPolicyAlways { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) - } - } - return allErrs -} - -// ValidateReplicationControllerSpec tests if required fields in the replication controller spec are set. -func ValidateReplicationControllerSpec(spec *api.ReplicationControllerSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) - allErrs = append(allErrs, ValidateNonEmptySelector(spec.Selector, fldPath.Child("selector"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) - allErrs = append(allErrs, ValidatePodTemplateSpecForRC(spec.Template, spec.Selector, spec.Replicas, fldPath.Child("template"))...) - return allErrs -} - -// ValidatePodTemplateSpec validates the spec of a pod template -func ValidatePodTemplateSpec(spec *api.PodTemplateSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.Labels, fldPath.Child("labels"))...) - allErrs = append(allErrs, ValidateAnnotations(spec.Annotations, fldPath.Child("annotations"))...) - allErrs = append(allErrs, ValidatePodSpecificAnnotations(spec.Annotations, &spec.Spec, fldPath.Child("annotations"))...) - allErrs = append(allErrs, ValidatePodSpec(&spec.Spec, fldPath.Child("spec"))...) - return allErrs -} - -func ValidateReadOnlyPersistentDisks(volumes []api.Volume, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for i := range volumes { - vol := &volumes[i] - idxPath := fldPath.Index(i) - if vol.GCEPersistentDisk != nil { - if vol.GCEPersistentDisk.ReadOnly == false { - allErrs = append(allErrs, field.Invalid(idxPath.Child("gcePersistentDisk", "readOnly"), false, "must be true for replicated pods > 1; GCE PD can only be mounted on multiple machines if it is read-only")) - } - } - // TODO: What to do for AWS? It doesn't support replicas - } - return allErrs -} - -// validateTaints tests if given taints have valid data. -func validateTaints(taints []api.Taint, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - - uniqueTaints := map[api.TaintEffect]sets.String{} - - for i, currTaint := range taints { - idxPath := fldPath.Index(i) - // validate the taint key - allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(currTaint.Key, idxPath.Child("key"))...) - // validate the taint value - if errs := validation.IsValidLabelValue(currTaint.Value); len(errs) != 0 { - allErrors = append(allErrors, field.Invalid(idxPath.Child("value"), currTaint.Value, strings.Join(errs, ";"))) - } - // validate the taint effect - allErrors = append(allErrors, validateTaintEffect(&currTaint.Effect, false, idxPath.Child("effect"))...) - - // validate if taint is unique by - if len(uniqueTaints[currTaint.Effect]) > 0 && uniqueTaints[currTaint.Effect].Has(currTaint.Key) { - duplicatedError := field.Duplicate(idxPath, currTaint) - duplicatedError.Detail = "taints must be unique by key and effect pair" - allErrors = append(allErrors, duplicatedError) - continue - } - - // add taint to existingTaints for uniqueness check - if len(uniqueTaints[currTaint.Effect]) == 0 { - uniqueTaints[currTaint.Effect] = sets.String{} - } - uniqueTaints[currTaint.Effect].Insert(currTaint.Key) - } - return allErrors -} - -// ValidateTaintsInNodeAnnotations tests that the serialized taints in Node.Annotations has valid data -func ValidateTaintsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - taints, err := api.GetTaintsFromNodeAnnotations(annotations) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath, api.TaintsAnnotationKey, err.Error())) - return allErrs - } - if len(taints) > 0 { - allErrs = append(allErrs, validateTaints(taints, fldPath.Child(api.TaintsAnnotationKey))...) - } - - return allErrs -} - -func ValidateNodeSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if annotations[api.PreferAvoidPodsAnnotationKey] != "" { - allErrs = append(allErrs, ValidateAvoidPodsInNodeAnnotations(annotations, fldPath)...) - } - if annotations[api.TaintsAnnotationKey] != "" { - allErrs = append(allErrs, ValidateTaintsInNodeAnnotations(annotations, fldPath)...) - } - return allErrs -} - -// ValidateNode tests if required fields in the node are set. -func ValidateNode(node *api.Node) field.ErrorList { - fldPath := field.NewPath("metadata") - allErrs := ValidateObjectMeta(&node.ObjectMeta, false, ValidateNodeName, fldPath) - allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) - - // Only validate spec. All status fields are optional and can be updated later. - - // external ID is required. - if len(node.Spec.ExternalID) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("spec", "externalID"), "")) - } - - // TODO(rjnagal): Ignore PodCIDR till its completely implemented. - return allErrs -} - -// ValidateNodeUpdate tests to make sure a node update can be applied. Modifies oldNode. -func ValidateNodeUpdate(node, oldNode *api.Node) field.ErrorList { - fldPath := field.NewPath("metadata") - allErrs := ValidateObjectMetaUpdate(&node.ObjectMeta, &oldNode.ObjectMeta, fldPath) - allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) - - // TODO: Enable the code once we have better api object.status update model. Currently, - // anyone can update node status. - // if !api.Semantic.DeepEqual(node.Status, api.NodeStatus{}) { - // allErrs = append(allErrs, field.Invalid("status", node.Status, "must be empty")) - // } - - // Validate resource quantities in capacity. - for k, v := range node.Status.Capacity { - resPath := field.NewPath("status", "capacity", string(k)) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) - } - // Validate resource quantities in allocatable. - for k, v := range node.Status.Allocatable { - resPath := field.NewPath("status", "allocatable", string(k)) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) - } - - // Validte no duplicate addresses in node status. - addresses := make(map[api.NodeAddress]bool) - for i, address := range node.Status.Addresses { - if _, ok := addresses[address]; ok { - allErrs = append(allErrs, field.Duplicate(field.NewPath("status", "addresses").Index(i), address)) - } - addresses[address] = true - } - - if len(oldNode.Spec.PodCIDR) == 0 { - // Allow the controller manager to assign a CIDR to a node if it doesn't have one. - oldNode.Spec.PodCIDR = node.Spec.PodCIDR - } else { - if oldNode.Spec.PodCIDR != node.Spec.PodCIDR { - allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "podCIDR"), "node updates may not change podCIDR except from \"\" to valid")) - } - } - // TODO: move reset function to its own location - // Ignore metadata changes now that they have been tested - oldNode.ObjectMeta = node.ObjectMeta - // Allow users to update capacity - oldNode.Status.Capacity = node.Status.Capacity - // Allow users to unschedule node - oldNode.Spec.Unschedulable = node.Spec.Unschedulable - // Clear status - oldNode.Status = node.Status - - // TODO: Add a 'real' error type for this error and provide print actual diffs. - if !api.Semantic.DeepEqual(oldNode, node) { - glog.V(4).Infof("Update failed validation %#v vs %#v", oldNode, node) - allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "node updates may only change labels or capacity")) - } - - return allErrs -} - -// Validate compute resource typename. -// Refer to docs/design/resources.md for more details. -func validateResourceName(value string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for _, msg := range validation.IsQualifiedName(value) { - allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) - } - if len(allErrs) != 0 { - return allErrs - } - - if len(strings.Split(value, "/")) == 1 { - if !api.IsStandardResourceName(value) { - return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource type or fully qualified")) - } - } - - return field.ErrorList{} -} - -// Validate container resource name -// Refer to docs/design/resources.md for more details. -func validateContainerResourceName(value string, fldPath *field.Path) field.ErrorList { - allErrs := validateResourceName(value, fldPath) - if len(strings.Split(value, "/")) == 1 { - if !api.IsStandardContainerResourceName(value) { - return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource for containers")) - } - } - return field.ErrorList{} -} - -// Validate resource names that can go in a resource quota -// Refer to docs/design/resources.md for more details. -func ValidateResourceQuotaResourceName(value string, fldPath *field.Path) field.ErrorList { - allErrs := validateResourceName(value, fldPath) - if len(strings.Split(value, "/")) == 1 { - if !api.IsStandardQuotaResourceName(value) { - return append(allErrs, field.Invalid(fldPath, value, isInvalidQuotaResource)) - } - } - return field.ErrorList{} -} - -// Validate limit range types -func validateLimitRangeTypeName(value string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for _, msg := range validation.IsQualifiedName(value) { - allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) - } - if len(allErrs) != 0 { - return allErrs - } - - if len(strings.Split(value, "/")) == 1 { - if !api.IsStandardLimitRangeType(value) { - return append(allErrs, field.Invalid(fldPath, value, "must be a standard limit type or fully qualified")) - } - } - - return allErrs -} - -// Validate limit range resource name -// limit types (other than Pod/Container) could contain storage not just cpu or memory -func validateLimitRangeResourceName(limitType api.LimitType, value string, fldPath *field.Path) field.ErrorList { - switch limitType { - case api.LimitTypePod, api.LimitTypeContainer: - return validateContainerResourceName(value, fldPath) - default: - return validateResourceName(value, fldPath) - } -} - -// ValidateLimitRange tests if required fields in the LimitRange are set. -func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList { - allErrs := ValidateObjectMeta(&limitRange.ObjectMeta, true, ValidateLimitRangeName, field.NewPath("metadata")) - - // ensure resource names are properly qualified per docs/design/resources.md - limitTypeSet := map[api.LimitType]bool{} - fldPath := field.NewPath("spec", "limits") - for i := range limitRange.Spec.Limits { - idxPath := fldPath.Index(i) - limit := &limitRange.Spec.Limits[i] - allErrs = append(allErrs, validateLimitRangeTypeName(string(limit.Type), idxPath.Child("type"))...) - - _, found := limitTypeSet[limit.Type] - if found { - allErrs = append(allErrs, field.Duplicate(idxPath.Child("type"), limit.Type)) - } - limitTypeSet[limit.Type] = true - - keys := sets.String{} - min := map[string]resource.Quantity{} - max := map[string]resource.Quantity{} - defaults := map[string]resource.Quantity{} - defaultRequests := map[string]resource.Quantity{} - maxLimitRequestRatios := map[string]resource.Quantity{} - - for k, q := range limit.Max { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("max").Key(string(k)))...) - keys.Insert(string(k)) - max[string(k)] = q - } - for k, q := range limit.Min { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("min").Key(string(k)))...) - keys.Insert(string(k)) - min[string(k)] = q - } - - if limit.Type == api.LimitTypePod { - if len(limit.Default) > 0 { - allErrs = append(allErrs, field.Forbidden(idxPath.Child("default"), "may not be specified when `type` is 'Pod'")) - } - if len(limit.DefaultRequest) > 0 { - allErrs = append(allErrs, field.Forbidden(idxPath.Child("defaultRequest"), "may not be specified when `type` is 'Pod'")) - } - } else { - for k, q := range limit.Default { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("default").Key(string(k)))...) - keys.Insert(string(k)) - defaults[string(k)] = q - } - for k, q := range limit.DefaultRequest { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("defaultRequest").Key(string(k)))...) - keys.Insert(string(k)) - defaultRequests[string(k)] = q - } - } - - if limit.Type == api.LimitTypePersistentVolumeClaim { - _, minQuantityFound := limit.Min[api.ResourceStorage] - _, maxQuantityFound := limit.Max[api.ResourceStorage] - if !minQuantityFound && !maxQuantityFound { - allErrs = append(allErrs, field.Required(idxPath.Child("limits"), "either minimum or maximum storage value is required, but neither was provided")) - } - } - - for k, q := range limit.MaxLimitRequestRatio { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("maxLimitRequestRatio").Key(string(k)))...) - keys.Insert(string(k)) - maxLimitRequestRatios[string(k)] = q - } - - for k := range keys { - minQuantity, minQuantityFound := min[k] - maxQuantity, maxQuantityFound := max[k] - defaultQuantity, defaultQuantityFound := defaults[k] - defaultRequestQuantity, defaultRequestQuantityFound := defaultRequests[k] - maxRatio, maxRatioFound := maxLimitRequestRatios[k] - - if minQuantityFound && maxQuantityFound && minQuantity.Cmp(maxQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("min").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than max value %s", minQuantity.String(), maxQuantity.String()))) - } - - if defaultRequestQuantityFound && minQuantityFound && minQuantity.Cmp(defaultRequestQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("min value %s is greater than default request value %s", minQuantity.String(), defaultRequestQuantity.String()))) - } - - if defaultRequestQuantityFound && maxQuantityFound && defaultRequestQuantity.Cmp(maxQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than max value %s", defaultRequestQuantity.String(), maxQuantity.String()))) - } - - if defaultRequestQuantityFound && defaultQuantityFound && defaultRequestQuantity.Cmp(defaultQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than default limit value %s", defaultRequestQuantity.String(), defaultQuantity.String()))) - } - - if defaultQuantityFound && minQuantityFound && minQuantity.Cmp(defaultQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than default value %s", minQuantity.String(), defaultQuantity.String()))) - } - - if defaultQuantityFound && maxQuantityFound && defaultQuantity.Cmp(maxQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), maxQuantity, fmt.Sprintf("default value %s is greater than max value %s", defaultQuantity.String(), maxQuantity.String()))) - } - if maxRatioFound && maxRatio.Cmp(*resource.NewQuantity(1, resource.DecimalSI)) < 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is less than 1", maxRatio.String()))) - } - if maxRatioFound && minQuantityFound && maxQuantityFound { - maxRatioValue := float64(maxRatio.Value()) - minQuantityValue := minQuantity.Value() - maxQuantityValue := maxQuantity.Value() - if maxRatio.Value() < resource.MaxMilliValue && minQuantityValue < resource.MaxMilliValue && maxQuantityValue < resource.MaxMilliValue { - maxRatioValue = float64(maxRatio.MilliValue()) / 1000 - minQuantityValue = minQuantity.MilliValue() - maxQuantityValue = maxQuantity.MilliValue() - } - maxRatioLimit := float64(maxQuantityValue) / float64(minQuantityValue) - if maxRatioValue > maxRatioLimit { - allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is greater than max/min = %f", maxRatio.String(), maxRatioLimit))) - } - } - } - } - - return allErrs -} - -// ValidateServiceAccount tests if required fields in the ServiceAccount are set. -func ValidateServiceAccount(serviceAccount *api.ServiceAccount) field.ErrorList { - allErrs := ValidateObjectMeta(&serviceAccount.ObjectMeta, true, ValidateServiceAccountName, field.NewPath("metadata")) - return allErrs -} - -// ValidateServiceAccountUpdate tests if required fields in the ServiceAccount are set. -func ValidateServiceAccountUpdate(newServiceAccount, oldServiceAccount *api.ServiceAccount) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newServiceAccount.ObjectMeta, &oldServiceAccount.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateServiceAccount(newServiceAccount)...) - return allErrs -} - -// ValidateSecret tests if required fields in the Secret are set. -func ValidateSecret(secret *api.Secret) field.ErrorList { - allErrs := ValidateObjectMeta(&secret.ObjectMeta, true, ValidateSecretName, field.NewPath("metadata")) - - dataPath := field.NewPath("data") - totalSize := 0 - for key, value := range secret.Data { - for _, msg := range validation.IsConfigMapKey(key) { - allErrs = append(allErrs, field.Invalid(dataPath.Key(key), key, msg)) - } - totalSize += len(value) - } - if totalSize > api.MaxSecretSize { - allErrs = append(allErrs, field.TooLong(dataPath, "", api.MaxSecretSize)) - } - - switch secret.Type { - case api.SecretTypeServiceAccountToken: - // Only require Annotations[kubernetes.io/service-account.name] - // Additional fields (like Annotations[kubernetes.io/service-account.uid] and Data[token]) might be contributed later by a controller loop - if value := secret.Annotations[api.ServiceAccountNameKey]; len(value) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("metadata", "annotations").Key(api.ServiceAccountNameKey), "")) - } - case api.SecretTypeOpaque, "": - // no-op - case api.SecretTypeDockercfg: - dockercfgBytes, exists := secret.Data[api.DockerConfigKey] - if !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.DockerConfigKey), "")) - break - } - - // make sure that the content is well-formed json. - if err := json.Unmarshal(dockercfgBytes, &map[string]interface{}{}); err != nil { - allErrs = append(allErrs, field.Invalid(dataPath.Key(api.DockerConfigKey), "", err.Error())) - } - case api.SecretTypeDockerConfigJson: - dockerConfigJsonBytes, exists := secret.Data[api.DockerConfigJsonKey] - if !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.DockerConfigJsonKey), "")) - break - } - - // make sure that the content is well-formed json. - if err := json.Unmarshal(dockerConfigJsonBytes, &map[string]interface{}{}); err != nil { - allErrs = append(allErrs, field.Invalid(dataPath.Key(api.DockerConfigJsonKey), "", err.Error())) - } - case api.SecretTypeBasicAuth: - _, usernameFieldExists := secret.Data[api.BasicAuthUsernameKey] - _, passwordFieldExists := secret.Data[api.BasicAuthPasswordKey] - - // username or password might be empty, but the field must be present - if !usernameFieldExists && !passwordFieldExists { - allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.BasicAuthUsernameKey), "")) - allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.BasicAuthPasswordKey), "")) - break - } - case api.SecretTypeSSHAuth: - if len(secret.Data[api.SSHAuthPrivateKey]) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.SSHAuthPrivateKey), "")) - break - } - - case api.SecretTypeTLS: - if _, exists := secret.Data[api.TLSCertKey]; !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.TLSCertKey), "")) - } - if _, exists := secret.Data[api.TLSPrivateKeyKey]; !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.TLSPrivateKeyKey), "")) - } - // TODO: Verify that the key matches the cert. - default: - // no-op - } - - return allErrs -} - -// ValidateSecretUpdate tests if required fields in the Secret are set. -func ValidateSecretUpdate(newSecret, oldSecret *api.Secret) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newSecret.ObjectMeta, &oldSecret.ObjectMeta, field.NewPath("metadata")) - - if len(newSecret.Type) == 0 { - newSecret.Type = oldSecret.Type - } - - allErrs = append(allErrs, ValidateImmutableField(newSecret.Type, oldSecret.Type, field.NewPath("type"))...) - - allErrs = append(allErrs, ValidateSecret(newSecret)...) - return allErrs -} - -// ValidateConfigMapName can be used to check whether the given ConfigMap name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateConfigMapName = NameIsDNSSubdomain - -// ValidateConfigMap tests whether required fields in the ConfigMap are set. -func ValidateConfigMap(cfg *api.ConfigMap) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidateObjectMeta(&cfg.ObjectMeta, true, ValidateConfigMapName, field.NewPath("metadata"))...) - - totalSize := 0 - - for key, value := range cfg.Data { - for _, msg := range validation.IsConfigMapKey(key) { - allErrs = append(allErrs, field.Invalid(field.NewPath("data").Key(key), key, msg)) - } - totalSize += len(value) - } - if totalSize > api.MaxSecretSize { - allErrs = append(allErrs, field.TooLong(field.NewPath("data"), "", api.MaxSecretSize)) - } - - return allErrs -} - -// ValidateConfigMapUpdate tests if required fields in the ConfigMap are set. -func ValidateConfigMapUpdate(newCfg, oldCfg *api.ConfigMap) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidateObjectMetaUpdate(&newCfg.ObjectMeta, &oldCfg.ObjectMeta, field.NewPath("metadata"))...) - allErrs = append(allErrs, ValidateConfigMap(newCfg)...) - - return allErrs -} - -func validateBasicResource(quantity resource.Quantity, fldPath *field.Path) field.ErrorList { - if quantity.Value() < 0 { - return field.ErrorList{field.Invalid(fldPath, quantity.Value(), "must be a valid resource quantity")} - } - return field.ErrorList{} -} - -// Validates resource requirement spec. -func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - limPath := fldPath.Child("limits") - reqPath := fldPath.Child("requests") - for resourceName, quantity := range requirements.Limits { - fldPath := limPath.Key(string(resourceName)) - // Validate resource name. - allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) - - // Validate resource quantity. - allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) - - // Check that request <= limit. - requestQuantity, exists := requirements.Requests[resourceName] - if exists { - // For GPUs, not only requests can't exceed limits, they also can't be lower, i.e. must be equal. - if resourceName == api.ResourceNvidiaGPU && quantity.Cmp(requestQuantity) != 0 { - allErrs = append(allErrs, field.Invalid(reqPath, requestQuantity.String(), fmt.Sprintf("must be equal to %s limit", api.ResourceNvidiaGPU))) - } else if quantity.Cmp(requestQuantity) < 0 { - allErrs = append(allErrs, field.Invalid(limPath, quantity.String(), fmt.Sprintf("must be greater than or equal to %s request", resourceName))) - } - } - } - for resourceName, quantity := range requirements.Requests { - fldPath := reqPath.Key(string(resourceName)) - // Validate resource name. - allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) - // Validate resource quantity. - allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) - } - - return allErrs -} - -// validateResourceQuotaScopes ensures that each enumerated hard resource constraint is valid for set of scopes -func validateResourceQuotaScopes(resourceQuotaSpec *api.ResourceQuotaSpec, fld *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(resourceQuotaSpec.Scopes) == 0 { - return allErrs - } - hardLimits := sets.NewString() - for k := range resourceQuotaSpec.Hard { - hardLimits.Insert(string(k)) - } - fldPath := fld.Child("scopes") - scopeSet := sets.NewString() - for _, scope := range resourceQuotaSpec.Scopes { - if !api.IsStandardResourceQuotaScope(string(scope)) { - allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope")) - } - for _, k := range hardLimits.List() { - if api.IsStandardQuotaResourceName(k) && !api.IsResourceQuotaScopeValidForResource(scope, k) { - allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope applied to resource")) - } - } - scopeSet.Insert(string(scope)) - } - invalidScopePairs := []sets.String{ - sets.NewString(string(api.ResourceQuotaScopeBestEffort), string(api.ResourceQuotaScopeNotBestEffort)), - sets.NewString(string(api.ResourceQuotaScopeTerminating), string(api.ResourceQuotaScopeNotTerminating)), - } - for _, invalidScopePair := range invalidScopePairs { - if scopeSet.HasAll(invalidScopePair.List()...) { - allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "conflicting scopes")) - } - } - return allErrs -} - -// ValidateResourceQuota tests if required fields in the ResourceQuota are set. -func ValidateResourceQuota(resourceQuota *api.ResourceQuota) field.ErrorList { - allErrs := ValidateObjectMeta(&resourceQuota.ObjectMeta, true, ValidateResourceQuotaName, field.NewPath("metadata")) - - allErrs = append(allErrs, ValidateResourceQuotaSpec(&resourceQuota.Spec, field.NewPath("spec"))...) - allErrs = append(allErrs, ValidateResourceQuotaStatus(&resourceQuota.Status, field.NewPath("status"))...) - - return allErrs -} - -func ValidateResourceQuotaStatus(status *api.ResourceQuotaStatus, fld *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - fldPath := fld.Child("hard") - for k, v := range status.Hard { - resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) - } - fldPath = fld.Child("used") - for k, v := range status.Used { - resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) - } - - return allErrs -} - -func ValidateResourceQuotaSpec(resourceQuotaSpec *api.ResourceQuotaSpec, fld *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - fldPath := fld.Child("hard") - for k, v := range resourceQuotaSpec.Hard { - resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) - } - allErrs = append(allErrs, validateResourceQuotaScopes(resourceQuotaSpec, fld)...) - - return allErrs -} - -// ValidateResourceQuantityValue enforces that specified quantity is valid for specified resource -func ValidateResourceQuantityValue(resource string, value resource.Quantity, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidateNonnegativeQuantity(value, fldPath)...) - if api.IsIntegerResourceName(resource) { - if value.MilliValue()%int64(1000) != int64(0) { - allErrs = append(allErrs, field.Invalid(fldPath, value, isNotIntegerErrorMsg)) - } - } - return allErrs -} - -// ValidateResourceQuotaUpdate tests to see if the update is legal for an end user to make. -// newResourceQuota is updated with fields that cannot be changed. -func ValidateResourceQuotaUpdate(newResourceQuota, oldResourceQuota *api.ResourceQuota) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateResourceQuotaSpec(&newResourceQuota.Spec, field.NewPath("spec"))...) - - // ensure scopes cannot change, and that resources are still valid for scope - fldPath := field.NewPath("spec", "scopes") - oldScopes := sets.NewString() - newScopes := sets.NewString() - for _, scope := range newResourceQuota.Spec.Scopes { - newScopes.Insert(string(scope)) - } - for _, scope := range oldResourceQuota.Spec.Scopes { - oldScopes.Insert(string(scope)) - } - if !oldScopes.Equal(newScopes) { - allErrs = append(allErrs, field.Invalid(fldPath, newResourceQuota.Spec.Scopes, "field is immutable")) - } - - newResourceQuota.Status = oldResourceQuota.Status - return allErrs -} - -// ValidateResourceQuotaStatusUpdate tests to see if the status update is legal for an end user to make. -// newResourceQuota is updated with fields that cannot be changed. -func ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota *api.ResourceQuota) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata")) - if len(newResourceQuota.ResourceVersion) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) - } - fldPath := field.NewPath("status", "hard") - for k, v := range newResourceQuota.Status.Hard { - resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) - } - fldPath = field.NewPath("status", "used") - for k, v := range newResourceQuota.Status.Used { - resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) - } - newResourceQuota.Spec = oldResourceQuota.Spec - return allErrs -} - -// ValidateNamespace tests if required fields are set. -func ValidateNamespace(namespace *api.Namespace) field.ErrorList { - allErrs := ValidateObjectMeta(&namespace.ObjectMeta, false, ValidateNamespaceName, field.NewPath("metadata")) - for i := range namespace.Spec.Finalizers { - allErrs = append(allErrs, validateFinalizerName(string(namespace.Spec.Finalizers[i]), field.NewPath("spec", "finalizers"))...) - } - return allErrs -} - -// Validate finalizer names -func validateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for _, msg := range validation.IsQualifiedName(stringValue) { - allErrs = append(allErrs, field.Invalid(fldPath, stringValue, msg)) - } - if len(allErrs) != 0 { - return allErrs - } - - if len(strings.Split(stringValue, "/")) == 1 { - if !api.IsStandardFinalizerName(stringValue) { - return append(allErrs, field.Invalid(fldPath, stringValue, "name is neither a standard finalizer name nor is it fully qualified")) - } - } - - return field.ErrorList{} -} - -// ValidateNamespaceUpdate tests to make sure a namespace update can be applied. -// newNamespace is updated with fields that cannot be changed -func ValidateNamespaceUpdate(newNamespace *api.Namespace, oldNamespace *api.Namespace) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) - newNamespace.Spec.Finalizers = oldNamespace.Spec.Finalizers - newNamespace.Status = oldNamespace.Status - return allErrs -} - -// ValidateNamespaceStatusUpdate tests to see if the update is legal for an end user to make. newNamespace is updated with fields -// that cannot be changed. -func ValidateNamespaceStatusUpdate(newNamespace, oldNamespace *api.Namespace) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) - newNamespace.Spec = oldNamespace.Spec - if newNamespace.DeletionTimestamp.IsZero() { - if newNamespace.Status.Phase != api.NamespaceActive { - allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Active' if `deletionTimestamp` is empty")) - } - } else { - if newNamespace.Status.Phase != api.NamespaceTerminating { - allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Terminating' if `deletionTimestamp` is not empty")) - } - } - return allErrs -} - -// ValidateNamespaceFinalizeUpdate tests to see if the update is legal for an end user to make. -// newNamespace is updated with fields that cannot be changed. -func ValidateNamespaceFinalizeUpdate(newNamespace, oldNamespace *api.Namespace) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) - - fldPath := field.NewPath("spec", "finalizers") - for i := range newNamespace.Spec.Finalizers { - idxPath := fldPath.Index(i) - allErrs = append(allErrs, validateFinalizerName(string(newNamespace.Spec.Finalizers[i]), idxPath)...) - } - newNamespace.Status = oldNamespace.Status - return allErrs -} - -// Construct lookup map of old subset IPs to NodeNames. -func updateEpAddrToNodeNameMap(ipToNodeName map[string]string, addresses []api.EndpointAddress) { - for n := range addresses { - if addresses[n].NodeName == nil { - continue - } - ipToNodeName[addresses[n].IP] = *addresses[n].NodeName - } -} - -// Build a map across all subsets of IP -> NodeName -func buildEndpointAddressNodeNameMap(subsets []api.EndpointSubset) map[string]string { - ipToNodeName := make(map[string]string) - for i := range subsets { - updateEpAddrToNodeNameMap(ipToNodeName, subsets[i].Addresses) - updateEpAddrToNodeNameMap(ipToNodeName, subsets[i].NotReadyAddresses) - } - return ipToNodeName -} - -func validateEpAddrNodeNameTransition(addr *api.EndpointAddress, ipToNodeName map[string]string, fldPath *field.Path) field.ErrorList { - errList := field.ErrorList{} - existingNodeName, found := ipToNodeName[addr.IP] - if !found { - return errList - } - if addr.NodeName == nil || *addr.NodeName == existingNodeName { - return errList - } - // NodeName entry found for this endpoint IP, but user is attempting to change NodeName - return append(errList, field.Forbidden(fldPath, fmt.Sprintf("Cannot change NodeName for %s to %s", addr.IP, *addr.NodeName))) -} - -// ValidateEndpoints tests if required fields are set. -func ValidateEndpoints(endpoints *api.Endpoints) field.ErrorList { - allErrs := ValidateObjectMeta(&endpoints.ObjectMeta, true, ValidateEndpointsName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(endpoints.Annotations, field.NewPath("annotations"))...) - allErrs = append(allErrs, validateEndpointSubsets(endpoints.Subsets, []api.EndpointSubset{}, field.NewPath("subsets"))...) - return allErrs -} - -func validateEndpointSubsets(subsets []api.EndpointSubset, oldSubsets []api.EndpointSubset, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - ipToNodeName := buildEndpointAddressNodeNameMap(oldSubsets) - for i := range subsets { - ss := &subsets[i] - idxPath := fldPath.Index(i) - - if len(ss.Addresses) == 0 && len(ss.NotReadyAddresses) == 0 { - //TODO: consider adding a RequiredOneOf() error for this and similar cases - allErrs = append(allErrs, field.Required(idxPath, "must specify `addresses` or `notReadyAddresses`")) - } - if len(ss.Ports) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("ports"), "")) - } - for addr := range ss.Addresses { - allErrs = append(allErrs, validateEndpointAddress(&ss.Addresses[addr], idxPath.Child("addresses").Index(addr), ipToNodeName)...) - } - for addr := range ss.NotReadyAddresses { - allErrs = append(allErrs, validateEndpointAddress(&ss.NotReadyAddresses[addr], idxPath.Child("notReadyAddresses").Index(addr), ipToNodeName)...) - } - for port := range ss.Ports { - allErrs = append(allErrs, validateEndpointPort(&ss.Ports[port], len(ss.Ports) > 1, idxPath.Child("ports").Index(port))...) - } - } - - return allErrs -} - -func validateEndpointAddress(address *api.EndpointAddress, fldPath *field.Path, ipToNodeName map[string]string) field.ErrorList { - allErrs := field.ErrorList{} - for _, msg := range validation.IsValidIP(address.IP) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), address.IP, msg)) - } - if len(address.Hostname) > 0 { - allErrs = append(allErrs, ValidateDNS1123Label(address.Hostname, fldPath.Child("hostname"))...) - } - // During endpoint update, verify that NodeName is a DNS subdomain and transition rules allow the update - if address.NodeName != nil { - for _, msg := range ValidateNodeName(*address.NodeName, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), *address.NodeName, msg)) - } - } - allErrs = append(allErrs, validateEpAddrNodeNameTransition(address, ipToNodeName, fldPath.Child("nodeName"))...) - if len(allErrs) > 0 { - return allErrs - } - allErrs = append(allErrs, validateNonSpecialIP(address.IP, fldPath.Child("ip"))...) - return allErrs -} - -func validateNonSpecialIP(ipAddress string, fldPath *field.Path) field.ErrorList { - // We disallow some IPs as endpoints or external-ips. Specifically, - // unspecified and loopback addresses are nonsensical and link-local - // addresses tend to be used for node-centric purposes (e.g. metadata - // service). - allErrs := field.ErrorList{} - ip := net.ParseIP(ipAddress) - if ip == nil { - allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "must be a valid IP address")) - return allErrs - } - if ip.IsUnspecified() { - allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be unspecified (0.0.0.0)")) - } - if ip.IsLoopback() { - allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the loopback range (127.0.0.0/8)")) - } - if ip.IsLinkLocalUnicast() { - allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local range (169.254.0.0/16)")) - } - if ip.IsLinkLocalMulticast() { - allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local multicast range (224.0.0.0/24)")) - } - return allErrs -} - -func validateEndpointPort(port *api.EndpointPort, requireName bool, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if requireName && len(port.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } else if len(port.Name) != 0 { - allErrs = append(allErrs, ValidateDNS1123Label(port.Name, fldPath.Child("name"))...) - } - for _, msg := range validation.IsValidPortNum(int(port.Port)) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), port.Port, msg)) - } - if len(port.Protocol) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), "")) - } else if !supportedPortProtocols.Has(string(port.Protocol)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), port.Protocol, supportedPortProtocols.List())) - } - return allErrs -} - -// ValidateEndpointsUpdate tests to make sure an endpoints update can be applied. -func ValidateEndpointsUpdate(newEndpoints, oldEndpoints *api.Endpoints) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newEndpoints.ObjectMeta, &oldEndpoints.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, validateEndpointSubsets(newEndpoints.Subsets, oldEndpoints.Subsets, field.NewPath("subsets"))...) - allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(newEndpoints.Annotations, field.NewPath("annotations"))...) - return allErrs -} - -// ValidateSecurityContext ensure the security context contains valid settings -func ValidateSecurityContext(sc *api.SecurityContext, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - //this should only be true for testing since SecurityContext is defaulted by the api - if sc == nil { - return allErrs - } - - if sc.Privileged != nil { - if *sc.Privileged && !capabilities.Get().AllowPrivileged { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("privileged"), "disallowed by policy")) - } - } - - if sc.RunAsUser != nil { - if *sc.RunAsUser < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *sc.RunAsUser, isNegativeErrorMsg)) - } - } - return allErrs -} - -func ValidatePodLogOptions(opts *api.PodLogOptions) field.ErrorList { - allErrs := field.ErrorList{} - if opts.TailLines != nil && *opts.TailLines < 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), *opts.TailLines, isNegativeErrorMsg)) - } - if opts.LimitBytes != nil && *opts.LimitBytes < 1 { - allErrs = append(allErrs, field.Invalid(field.NewPath("limitBytes"), *opts.LimitBytes, "must be greater than 0")) - } - switch { - case opts.SinceSeconds != nil && opts.SinceTime != nil: - allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "at most one of `sinceTime` or `sinceSeconds` may be specified")) - case opts.SinceSeconds != nil: - if *opts.SinceSeconds < 1 { - allErrs = append(allErrs, field.Invalid(field.NewPath("sinceSeconds"), *opts.SinceSeconds, "must be greater than 0")) - } - } - return allErrs -} - -// ValidateLoadBalancerStatus validates required fields on a LoadBalancerStatus -func ValidateLoadBalancerStatus(status *api.LoadBalancerStatus, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for i, ingress := range status.Ingress { - idxPath := fldPath.Child("ingress").Index(i) - if len(ingress.IP) > 0 { - if isIP := (net.ParseIP(ingress.IP) != nil); !isIP { - allErrs = append(allErrs, field.Invalid(idxPath.Child("ip"), ingress.IP, "must be a valid IP address")) - } - } - if len(ingress.Hostname) > 0 { - for _, msg := range validation.IsDNS1123Subdomain(ingress.Hostname) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, msg)) - } - if isIP := (net.ParseIP(ingress.Hostname) != nil); isIP { - allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, "must be a DNS name, not an IP address")) - } - } - } - return allErrs -} - -// TODO: remove this after we EOL the annotation that carries it. -func isValidHostnamesMap(serializedPodHostNames string) bool { - if len(serializedPodHostNames) == 0 { - return false - } - podHostNames := map[string]endpoints.HostRecord{} - err := json.Unmarshal([]byte(serializedPodHostNames), &podHostNames) - if err != nil { - return false - } - - for ip, hostRecord := range podHostNames { - if len(validation.IsDNS1123Label(hostRecord.HostName)) != 0 { - return false - } - if net.ParseIP(ip) == nil { - return false - } - } - return true -} - -func sysctlIntersection(a []api.Sysctl, b []api.Sysctl) []string { - lookup := make(map[string]struct{}, len(a)) - result := []string{} - for i := range a { - lookup[a[i].Name] = struct{}{} - } - for i := range b { - if _, found := lookup[b[i].Name]; found { - result = append(result, b[i].Name) - } - } - return result -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go index 2a559f17b4..c018bcc4ee 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,12 +21,12 @@ limitations under the License. package api import ( - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" - fields "k8s.io/kubernetes/pkg/fields" - labels "k8s.io/kubernetes/pkg/labels" - runtime "k8s.io/kubernetes/pkg/runtime" - types "k8s.io/kubernetes/pkg/types" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + fields "k8s.io/apimachinery/pkg/fields" + labels "k8s.io/apimachinery/pkg/labels" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" reflect "reflect" ) @@ -52,8 +52,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ComponentStatus, InType: reflect.TypeOf(&ComponentStatus{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ComponentStatusList, InType: reflect.TypeOf(&ComponentStatusList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMap, InType: reflect.TypeOf(&ConfigMap{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapEnvSource, InType: reflect.TypeOf(&ConfigMapEnvSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapKeySelector, InType: reflect.TypeOf(&ConfigMapKeySelector{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapList, InType: reflect.TypeOf(&ConfigMapList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapProjection, InType: reflect.TypeOf(&ConfigMapProjection{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapVolumeSource, InType: reflect.TypeOf(&ConfigMapVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Container, InType: reflect.TypeOf(&Container{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerImage, InType: reflect.TypeOf(&ContainerImage{})}, @@ -66,6 +68,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConversionError, InType: reflect.TypeOf(&ConversionError{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DaemonEndpoint, InType: reflect.TypeOf(&DaemonEndpoint{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeleteOptions, InType: reflect.TypeOf(&DeleteOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DownwardAPIProjection, InType: reflect.TypeOf(&DownwardAPIProjection{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DownwardAPIVolumeFile, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DownwardAPIVolumeSource, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EmptyDirVolumeSource, InType: reflect.TypeOf(&EmptyDirVolumeSource{})}, @@ -74,13 +77,13 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointSubset, InType: reflect.TypeOf(&EndpointSubset{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Endpoints, InType: reflect.TypeOf(&Endpoints{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointsList, InType: reflect.TypeOf(&EndpointsList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EnvFromSource, InType: reflect.TypeOf(&EnvFromSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EnvVar, InType: reflect.TypeOf(&EnvVar{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EnvVarSource, InType: reflect.TypeOf(&EnvVarSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Event, InType: reflect.TypeOf(&Event{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EventList, InType: reflect.TypeOf(&EventList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EventSource, InType: reflect.TypeOf(&EventSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ExecAction, InType: reflect.TypeOf(&ExecAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ExportOptions, InType: reflect.TypeOf(&ExportOptions{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_FCVolumeSource, InType: reflect.TypeOf(&FCVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_FlexVolumeSource, InType: reflect.TypeOf(&FlexVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_FlockerVolumeSource, InType: reflect.TypeOf(&FlockerVolumeSource{})}, @@ -125,7 +128,6 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ObjectFieldSelector, InType: reflect.TypeOf(&ObjectFieldSelector{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ObjectMeta, InType: reflect.TypeOf(&ObjectMeta{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ObjectReference, InType: reflect.TypeOf(&ObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_OwnerReference, InType: reflect.TypeOf(&OwnerReference{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolume, InType: reflect.TypeOf(&PersistentVolume{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaim, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimList, InType: reflect.TypeOf(&PersistentVolumeClaimList{})}, @@ -146,6 +148,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodExecOptions, InType: reflect.TypeOf(&PodExecOptions{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodList, InType: reflect.TypeOf(&PodList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodLogOptions, InType: reflect.TypeOf(&PodLogOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodPortForwardOptions, InType: reflect.TypeOf(&PodPortForwardOptions{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodProxyOptions, InType: reflect.TypeOf(&PodProxyOptions{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodSecurityContext, InType: reflect.TypeOf(&PodSecurityContext{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodSignature, InType: reflect.TypeOf(&PodSignature{})}, @@ -155,10 +158,12 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodTemplate, InType: reflect.TypeOf(&PodTemplate{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodTemplateList, InType: reflect.TypeOf(&PodTemplateList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodTemplateSpec, InType: reflect.TypeOf(&PodTemplateSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PortworxVolumeSource, InType: reflect.TypeOf(&PortworxVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Preconditions, InType: reflect.TypeOf(&Preconditions{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PreferAvoidPodsEntry, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PreferredSchedulingTerm, InType: reflect.TypeOf(&PreferredSchedulingTerm{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Probe, InType: reflect.TypeOf(&Probe{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ProjectedVolumeSource, InType: reflect.TypeOf(&ProjectedVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_QuobyteVolumeSource, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_RBDVolumeSource, InType: reflect.TypeOf(&RBDVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_RangeAllocation, InType: reflect.TypeOf(&RangeAllocation{})}, @@ -174,9 +179,12 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuotaStatus, InType: reflect.TypeOf(&ResourceQuotaStatus{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceRequirements, InType: reflect.TypeOf(&ResourceRequirements{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SELinuxOptions, InType: reflect.TypeOf(&SELinuxOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ScaleIOVolumeSource, InType: reflect.TypeOf(&ScaleIOVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Secret, InType: reflect.TypeOf(&Secret{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretEnvSource, InType: reflect.TypeOf(&SecretEnvSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretKeySelector, InType: reflect.TypeOf(&SecretKeySelector{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretList, InType: reflect.TypeOf(&SecretList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretProjection, InType: reflect.TypeOf(&SecretProjection{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretVolumeSource, InType: reflect.TypeOf(&SecretVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecurityContext, InType: reflect.TypeOf(&SecurityContext{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SerializedReference, InType: reflect.TypeOf(&SerializedReference{})}, @@ -194,6 +202,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Toleration, InType: reflect.TypeOf(&Toleration{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Volume, InType: reflect.TypeOf(&Volume{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VolumeMount, InType: reflect.TypeOf(&VolumeMount{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VolumeProjection, InType: reflect.TypeOf(&VolumeProjection{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VolumeSource, InType: reflect.TypeOf(&VolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VsphereVirtualDiskVolumeSource, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_WeightedPodAffinityTerm, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})}, @@ -204,10 +213,7 @@ func DeepCopy_api_AWSElasticBlockStoreVolumeSource(in interface{}, out interface { in := in.(*AWSElasticBlockStoreVolumeSource) out := out.(*AWSElasticBlockStoreVolumeSource) - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly + *out = *in return nil } } @@ -216,14 +222,13 @@ func DeepCopy_api_Affinity(in interface{}, out interface{}, c *conversion.Cloner { in := in.(*Affinity) out := out.(*Affinity) + *out = *in if in.NodeAffinity != nil { in, out := &in.NodeAffinity, &out.NodeAffinity *out = new(NodeAffinity) if err := DeepCopy_api_NodeAffinity(*in, *out, c); err != nil { return err } - } else { - out.NodeAffinity = nil } if in.PodAffinity != nil { in, out := &in.PodAffinity, &out.PodAffinity @@ -231,8 +236,6 @@ func DeepCopy_api_Affinity(in interface{}, out interface{}, c *conversion.Cloner if err := DeepCopy_api_PodAffinity(*in, *out, c); err != nil { return err } - } else { - out.PodAffinity = nil } if in.PodAntiAffinity != nil { in, out := &in.PodAntiAffinity, &out.PodAntiAffinity @@ -240,8 +243,6 @@ func DeepCopy_api_Affinity(in interface{}, out interface{}, c *conversion.Cloner if err := DeepCopy_api_PodAntiAffinity(*in, *out, c); err != nil { return err } - } else { - out.PodAntiAffinity = nil } return nil } @@ -251,8 +252,7 @@ func DeepCopy_api_AttachedVolume(in interface{}, out interface{}, c *conversion. { in := in.(*AttachedVolume) out := out.(*AttachedVolume) - out.Name = in.Name - out.DevicePath = in.DevicePath + *out = *in return nil } } @@ -261,6 +261,7 @@ func DeepCopy_api_AvoidPods(in interface{}, out interface{}, c *conversion.Clone { in := in.(*AvoidPods) out := out.(*AvoidPods) + *out = *in if in.PreferAvoidPods != nil { in, out := &in.PreferAvoidPods, &out.PreferAvoidPods *out = make([]PreferAvoidPodsEntry, len(*in)) @@ -269,8 +270,6 @@ func DeepCopy_api_AvoidPods(in interface{}, out interface{}, c *conversion.Clone return err } } - } else { - out.PreferAvoidPods = nil } return nil } @@ -280,28 +279,21 @@ func DeepCopy_api_AzureDiskVolumeSource(in interface{}, out interface{}, c *conv { in := in.(*AzureDiskVolumeSource) out := out.(*AzureDiskVolumeSource) - out.DiskName = in.DiskName - out.DataDiskURI = in.DataDiskURI + *out = *in if in.CachingMode != nil { in, out := &in.CachingMode, &out.CachingMode *out = new(AzureDataDiskCachingMode) **out = **in - } else { - out.CachingMode = nil } if in.FSType != nil { in, out := &in.FSType, &out.FSType *out = new(string) **out = **in - } else { - out.FSType = nil } if in.ReadOnly != nil { in, out := &in.ReadOnly, &out.ReadOnly *out = new(bool) **out = **in - } else { - out.ReadOnly = nil } return nil } @@ -311,9 +303,7 @@ func DeepCopy_api_AzureFileVolumeSource(in interface{}, out interface{}, c *conv { in := in.(*AzureFileVolumeSource) out := out.(*AzureFileVolumeSource) - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly + *out = *in return nil } } @@ -322,11 +312,12 @@ func DeepCopy_api_Binding(in interface{}, out interface{}, c *conversion.Cloner) { in := in.(*Binding) out := out.(*Binding) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } - out.Target = in.Target return nil } } @@ -335,23 +326,16 @@ func DeepCopy_api_Capabilities(in interface{}, out interface{}, c *conversion.Cl { in := in.(*Capabilities) out := out.(*Capabilities) + *out = *in if in.Add != nil { in, out := &in.Add, &out.Add *out = make([]Capability, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Add = nil + copy(*out, *in) } if in.Drop != nil { in, out := &in.Drop, &out.Drop *out = make([]Capability, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Drop = nil + copy(*out, *in) } return nil } @@ -361,24 +345,17 @@ func DeepCopy_api_CephFSVolumeSource(in interface{}, out interface{}, c *convers { in := in.(*CephFSVolumeSource) out := out.(*CephFSVolumeSource) + *out = *in if in.Monitors != nil { in, out := &in.Monitors, &out.Monitors *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Monitors = nil } - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef *out = new(LocalObjectReference) **out = **in - } else { - out.SecretRef = nil } - out.ReadOnly = in.ReadOnly return nil } } @@ -387,9 +364,7 @@ func DeepCopy_api_CinderVolumeSource(in interface{}, out interface{}, c *convers { in := in.(*CinderVolumeSource) out := out.(*CinderVolumeSource) - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly + *out = *in return nil } } @@ -398,10 +373,7 @@ func DeepCopy_api_ComponentCondition(in interface{}, out interface{}, c *convers { in := in.(*ComponentCondition) out := out.(*ComponentCondition) - out.Type = in.Type - out.Status = in.Status - out.Message = in.Message - out.Error = in.Error + *out = *in return nil } } @@ -410,18 +382,16 @@ func DeepCopy_api_ComponentStatus(in interface{}, out interface{}, c *conversion { in := in.(*ComponentStatus) out := out.(*ComponentStatus) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]ComponentCondition, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Conditions = nil + copy(*out, *in) } return nil } @@ -431,8 +401,7 @@ func DeepCopy_api_ComponentStatusList(in interface{}, out interface{}, c *conver { in := in.(*ComponentStatusList) out := out.(*ComponentStatusList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ComponentStatus, len(*in)) @@ -441,8 +410,6 @@ func DeepCopy_api_ComponentStatusList(in interface{}, out interface{}, c *conver return err } } - } else { - out.Items = nil } return nil } @@ -452,9 +419,11 @@ func DeepCopy_api_ConfigMap(in interface{}, out interface{}, c *conversion.Clone { in := in.(*ConfigMap) out := out.(*ConfigMap) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if in.Data != nil { in, out := &in.Data, &out.Data @@ -462,8 +431,20 @@ func DeepCopy_api_ConfigMap(in interface{}, out interface{}, c *conversion.Clone for key, val := range *in { (*out)[key] = val } - } else { - out.Data = nil + } + return nil + } +} + +func DeepCopy_api_ConfigMapEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapEnvSource) + out := out.(*ConfigMapEnvSource) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in } return nil } @@ -473,8 +454,12 @@ func DeepCopy_api_ConfigMapKeySelector(in interface{}, out interface{}, c *conve { in := in.(*ConfigMapKeySelector) out := out.(*ConfigMapKeySelector) - out.LocalObjectReference = in.LocalObjectReference - out.Key = in.Key + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } return nil } } @@ -483,8 +468,7 @@ func DeepCopy_api_ConfigMapList(in interface{}, out interface{}, c *conversion.C { in := in.(*ConfigMapList) out := out.(*ConfigMapList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ConfigMap, len(*in)) @@ -493,8 +477,29 @@ func DeepCopy_api_ConfigMapList(in interface{}, out interface{}, c *conversion.C return err } } - } else { - out.Items = nil + } + return nil + } +} + +func DeepCopy_api_ConfigMapProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapProjection) + out := out.(*ConfigMapProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in } return nil } @@ -504,7 +509,7 @@ func DeepCopy_api_ConfigMapVolumeSource(in interface{}, out interface{}, c *conv { in := in.(*ConfigMapVolumeSource) out := out.(*ConfigMapVolumeSource) - out.LocalObjectReference = in.LocalObjectReference + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]KeyToPath, len(*in)) @@ -513,15 +518,16 @@ func DeepCopy_api_ConfigMapVolumeSource(in interface{}, out interface{}, c *conv return err } } - } else { - out.Items = nil } if in.DefaultMode != nil { in, out := &in.DefaultMode, &out.DefaultMode *out = new(int32) **out = **in - } else { - out.DefaultMode = nil + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in } return nil } @@ -531,31 +537,30 @@ func DeepCopy_api_Container(in interface{}, out interface{}, c *conversion.Clone { in := in.(*Container) out := out.(*Container) - out.Name = in.Name - out.Image = in.Image + *out = *in if in.Command != nil { in, out := &in.Command, &out.Command *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Command = nil } if in.Args != nil { in, out := &in.Args, &out.Args *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Args = nil } - out.WorkingDir = in.WorkingDir if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) for i := range *in { - (*out)[i] = (*in)[i] + if err := DeepCopy_api_EnvFromSource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } } - } else { - out.Ports = nil } if in.Env != nil { in, out := &in.Env, &out.Env @@ -565,8 +570,6 @@ func DeepCopy_api_Container(in interface{}, out interface{}, c *conversion.Clone return err } } - } else { - out.Env = nil } if err := DeepCopy_api_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { return err @@ -574,11 +577,7 @@ func DeepCopy_api_Container(in interface{}, out interface{}, c *conversion.Clone if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts *out = make([]VolumeMount, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.VolumeMounts = nil + copy(*out, *in) } if in.LivenessProbe != nil { in, out := &in.LivenessProbe, &out.LivenessProbe @@ -586,8 +585,6 @@ func DeepCopy_api_Container(in interface{}, out interface{}, c *conversion.Clone if err := DeepCopy_api_Probe(*in, *out, c); err != nil { return err } - } else { - out.LivenessProbe = nil } if in.ReadinessProbe != nil { in, out := &in.ReadinessProbe, &out.ReadinessProbe @@ -595,8 +592,6 @@ func DeepCopy_api_Container(in interface{}, out interface{}, c *conversion.Clone if err := DeepCopy_api_Probe(*in, *out, c); err != nil { return err } - } else { - out.ReadinessProbe = nil } if in.Lifecycle != nil { in, out := &in.Lifecycle, &out.Lifecycle @@ -604,23 +599,14 @@ func DeepCopy_api_Container(in interface{}, out interface{}, c *conversion.Clone if err := DeepCopy_api_Lifecycle(*in, *out, c); err != nil { return err } - } else { - out.Lifecycle = nil } - out.TerminationMessagePath = in.TerminationMessagePath - out.ImagePullPolicy = in.ImagePullPolicy if in.SecurityContext != nil { in, out := &in.SecurityContext, &out.SecurityContext *out = new(SecurityContext) if err := DeepCopy_api_SecurityContext(*in, *out, c); err != nil { return err } - } else { - out.SecurityContext = nil } - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY return nil } } @@ -629,14 +615,12 @@ func DeepCopy_api_ContainerImage(in interface{}, out interface{}, c *conversion. { in := in.(*ContainerImage) out := out.(*ContainerImage) + *out = *in if in.Names != nil { in, out := &in.Names, &out.Names *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Names = nil } - out.SizeBytes = in.SizeBytes return nil } } @@ -645,11 +629,7 @@ func DeepCopy_api_ContainerPort(in interface{}, out interface{}, c *conversion.C { in := in.(*ContainerPort) out := out.(*ContainerPort) - out.Name = in.Name - out.HostPort = in.HostPort - out.ContainerPort = in.ContainerPort - out.Protocol = in.Protocol - out.HostIP = in.HostIP + *out = *in return nil } } @@ -658,12 +638,11 @@ func DeepCopy_api_ContainerState(in interface{}, out interface{}, c *conversion. { in := in.(*ContainerState) out := out.(*ContainerState) + *out = *in if in.Waiting != nil { in, out := &in.Waiting, &out.Waiting *out = new(ContainerStateWaiting) **out = **in - } else { - out.Waiting = nil } if in.Running != nil { in, out := &in.Running, &out.Running @@ -671,8 +650,6 @@ func DeepCopy_api_ContainerState(in interface{}, out interface{}, c *conversion. if err := DeepCopy_api_ContainerStateRunning(*in, *out, c); err != nil { return err } - } else { - out.Running = nil } if in.Terminated != nil { in, out := &in.Terminated, &out.Terminated @@ -680,8 +657,6 @@ func DeepCopy_api_ContainerState(in interface{}, out interface{}, c *conversion. if err := DeepCopy_api_ContainerStateTerminated(*in, *out, c); err != nil { return err } - } else { - out.Terminated = nil } return nil } @@ -691,6 +666,7 @@ func DeepCopy_api_ContainerStateRunning(in interface{}, out interface{}, c *conv { in := in.(*ContainerStateRunning) out := out.(*ContainerStateRunning) + *out = *in out.StartedAt = in.StartedAt.DeepCopy() return nil } @@ -700,13 +676,9 @@ func DeepCopy_api_ContainerStateTerminated(in interface{}, out interface{}, c *c { in := in.(*ContainerStateTerminated) out := out.(*ContainerStateTerminated) - out.ExitCode = in.ExitCode - out.Signal = in.Signal - out.Reason = in.Reason - out.Message = in.Message + *out = *in out.StartedAt = in.StartedAt.DeepCopy() out.FinishedAt = in.FinishedAt.DeepCopy() - out.ContainerID = in.ContainerID return nil } } @@ -715,8 +687,7 @@ func DeepCopy_api_ContainerStateWaiting(in interface{}, out interface{}, c *conv { in := in.(*ContainerStateWaiting) out := out.(*ContainerStateWaiting) - out.Reason = in.Reason - out.Message = in.Message + *out = *in return nil } } @@ -725,18 +696,13 @@ func DeepCopy_api_ContainerStatus(in interface{}, out interface{}, c *conversion { in := in.(*ContainerStatus) out := out.(*ContainerStatus) - out.Name = in.Name + *out = *in if err := DeepCopy_api_ContainerState(&in.State, &out.State, c); err != nil { return err } if err := DeepCopy_api_ContainerState(&in.LastTerminationState, &out.LastTerminationState, c); err != nil { return err } - out.Ready = in.Ready - out.RestartCount = in.RestartCount - out.Image = in.Image - out.ImageID = in.ImageID - out.ContainerID = in.ContainerID return nil } } @@ -745,21 +711,23 @@ func DeepCopy_api_ConversionError(in interface{}, out interface{}, c *conversion { in := in.(*ConversionError) out := out.(*ConversionError) - if in.In == nil { - out.In = nil - } else if newVal, err := c.DeepCopy(&in.In); err != nil { - return err - } else { - out.In = *newVal.(*interface{}) + *out = *in + // in.In is kind 'Interface' + if in.In != nil { + if newVal, err := c.DeepCopy(&in.In); err != nil { + return err + } else { + out.In = *newVal.(*interface{}) + } } - if in.Out == nil { - out.Out = nil - } else if newVal, err := c.DeepCopy(&in.Out); err != nil { - return err - } else { - out.Out = *newVal.(*interface{}) + // in.Out is kind 'Interface' + if in.Out != nil { + if newVal, err := c.DeepCopy(&in.Out); err != nil { + return err + } else { + out.Out = *newVal.(*interface{}) + } } - out.Message = in.Message return nil } } @@ -768,7 +736,7 @@ func DeepCopy_api_DaemonEndpoint(in interface{}, out interface{}, c *conversion. { in := in.(*DaemonEndpoint) out := out.(*DaemonEndpoint) - out.Port = in.Port + *out = *in return nil } } @@ -777,13 +745,11 @@ func DeepCopy_api_DeleteOptions(in interface{}, out interface{}, c *conversion.C { in := in.(*DeleteOptions) out := out.(*DeleteOptions) - out.TypeMeta = in.TypeMeta + *out = *in if in.GracePeriodSeconds != nil { in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds *out = new(int64) **out = **in - } else { - out.GracePeriodSeconds = nil } if in.Preconditions != nil { in, out := &in.Preconditions, &out.Preconditions @@ -791,15 +757,34 @@ func DeepCopy_api_DeleteOptions(in interface{}, out interface{}, c *conversion.C if err := DeepCopy_api_Preconditions(*in, *out, c); err != nil { return err } - } else { - out.Preconditions = nil } if in.OrphanDependents != nil { in, out := &in.OrphanDependents, &out.OrphanDependents *out = new(bool) **out = **in - } else { - out.OrphanDependents = nil + } + if in.PropagationPolicy != nil { + in, out := &in.PropagationPolicy, &out.PropagationPolicy + *out = new(DeletionPropagation) + **out = **in + } + return nil + } +} + +func DeepCopy_api_DownwardAPIProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIProjection) + out := out.(*DownwardAPIProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + if err := DeepCopy_api_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } } return nil } @@ -809,13 +794,11 @@ func DeepCopy_api_DownwardAPIVolumeFile(in interface{}, out interface{}, c *conv { in := in.(*DownwardAPIVolumeFile) out := out.(*DownwardAPIVolumeFile) - out.Path = in.Path + *out = *in if in.FieldRef != nil { in, out := &in.FieldRef, &out.FieldRef *out = new(ObjectFieldSelector) **out = **in - } else { - out.FieldRef = nil } if in.ResourceFieldRef != nil { in, out := &in.ResourceFieldRef, &out.ResourceFieldRef @@ -823,15 +806,11 @@ func DeepCopy_api_DownwardAPIVolumeFile(in interface{}, out interface{}, c *conv if err := DeepCopy_api_ResourceFieldSelector(*in, *out, c); err != nil { return err } - } else { - out.ResourceFieldRef = nil } if in.Mode != nil { in, out := &in.Mode, &out.Mode *out = new(int32) **out = **in - } else { - out.Mode = nil } return nil } @@ -841,6 +820,7 @@ func DeepCopy_api_DownwardAPIVolumeSource(in interface{}, out interface{}, c *co { in := in.(*DownwardAPIVolumeSource) out := out.(*DownwardAPIVolumeSource) + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DownwardAPIVolumeFile, len(*in)) @@ -849,15 +829,11 @@ func DeepCopy_api_DownwardAPIVolumeSource(in interface{}, out interface{}, c *co return err } } - } else { - out.Items = nil } if in.DefaultMode != nil { in, out := &in.DefaultMode, &out.DefaultMode *out = new(int32) **out = **in - } else { - out.DefaultMode = nil } return nil } @@ -867,7 +843,7 @@ func DeepCopy_api_EmptyDirVolumeSource(in interface{}, out interface{}, c *conve { in := in.(*EmptyDirVolumeSource) out := out.(*EmptyDirVolumeSource) - out.Medium = in.Medium + *out = *in return nil } } @@ -876,21 +852,16 @@ func DeepCopy_api_EndpointAddress(in interface{}, out interface{}, c *conversion { in := in.(*EndpointAddress) out := out.(*EndpointAddress) - out.IP = in.IP - out.Hostname = in.Hostname + *out = *in if in.NodeName != nil { in, out := &in.NodeName, &out.NodeName *out = new(string) **out = **in - } else { - out.NodeName = nil } if in.TargetRef != nil { in, out := &in.TargetRef, &out.TargetRef *out = new(ObjectReference) **out = **in - } else { - out.TargetRef = nil } return nil } @@ -900,9 +871,7 @@ func DeepCopy_api_EndpointPort(in interface{}, out interface{}, c *conversion.Cl { in := in.(*EndpointPort) out := out.(*EndpointPort) - out.Name = in.Name - out.Port = in.Port - out.Protocol = in.Protocol + *out = *in return nil } } @@ -911,6 +880,7 @@ func DeepCopy_api_EndpointSubset(in interface{}, out interface{}, c *conversion. { in := in.(*EndpointSubset) out := out.(*EndpointSubset) + *out = *in if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses *out = make([]EndpointAddress, len(*in)) @@ -919,8 +889,6 @@ func DeepCopy_api_EndpointSubset(in interface{}, out interface{}, c *conversion. return err } } - } else { - out.Addresses = nil } if in.NotReadyAddresses != nil { in, out := &in.NotReadyAddresses, &out.NotReadyAddresses @@ -930,17 +898,11 @@ func DeepCopy_api_EndpointSubset(in interface{}, out interface{}, c *conversion. return err } } - } else { - out.NotReadyAddresses = nil } if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]EndpointPort, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Ports = nil + copy(*out, *in) } return nil } @@ -950,9 +912,11 @@ func DeepCopy_api_Endpoints(in interface{}, out interface{}, c *conversion.Clone { in := in.(*Endpoints) out := out.(*Endpoints) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if in.Subsets != nil { in, out := &in.Subsets, &out.Subsets @@ -962,8 +926,6 @@ func DeepCopy_api_Endpoints(in interface{}, out interface{}, c *conversion.Clone return err } } - } else { - out.Subsets = nil } return nil } @@ -973,8 +935,7 @@ func DeepCopy_api_EndpointsList(in interface{}, out interface{}, c *conversion.C { in := in.(*EndpointsList) out := out.(*EndpointsList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Endpoints, len(*in)) @@ -983,8 +944,29 @@ func DeepCopy_api_EndpointsList(in interface{}, out interface{}, c *conversion.C return err } } - } else { - out.Items = nil + } + return nil + } +} + +func DeepCopy_api_EnvFromSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvFromSource) + out := out.(*EnvFromSource) + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(ConfigMapEnvSource) + if err := DeepCopy_api_ConfigMapEnvSource(*in, *out, c); err != nil { + return err + } + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretEnvSource) + if err := DeepCopy_api_SecretEnvSource(*in, *out, c); err != nil { + return err + } } return nil } @@ -994,16 +976,13 @@ func DeepCopy_api_EnvVar(in interface{}, out interface{}, c *conversion.Cloner) { in := in.(*EnvVar) out := out.(*EnvVar) - out.Name = in.Name - out.Value = in.Value + *out = *in if in.ValueFrom != nil { in, out := &in.ValueFrom, &out.ValueFrom *out = new(EnvVarSource) if err := DeepCopy_api_EnvVarSource(*in, *out, c); err != nil { return err } - } else { - out.ValueFrom = nil } return nil } @@ -1013,12 +992,11 @@ func DeepCopy_api_EnvVarSource(in interface{}, out interface{}, c *conversion.Cl { in := in.(*EnvVarSource) out := out.(*EnvVarSource) + *out = *in if in.FieldRef != nil { in, out := &in.FieldRef, &out.FieldRef *out = new(ObjectFieldSelector) **out = **in - } else { - out.FieldRef = nil } if in.ResourceFieldRef != nil { in, out := &in.ResourceFieldRef, &out.ResourceFieldRef @@ -1026,22 +1004,20 @@ func DeepCopy_api_EnvVarSource(in interface{}, out interface{}, c *conversion.Cl if err := DeepCopy_api_ResourceFieldSelector(*in, *out, c); err != nil { return err } - } else { - out.ResourceFieldRef = nil } if in.ConfigMapKeyRef != nil { in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef *out = new(ConfigMapKeySelector) - **out = **in - } else { - out.ConfigMapKeyRef = nil + if err := DeepCopy_api_ConfigMapKeySelector(*in, *out, c); err != nil { + return err + } } if in.SecretKeyRef != nil { in, out := &in.SecretKeyRef, &out.SecretKeyRef *out = new(SecretKeySelector) - **out = **in - } else { - out.SecretKeyRef = nil + if err := DeepCopy_api_SecretKeySelector(*in, *out, c); err != nil { + return err + } } return nil } @@ -1051,18 +1027,14 @@ func DeepCopy_api_Event(in interface{}, out interface{}, c *conversion.Cloner) e { in := in.(*Event) out := out.(*Event) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } - out.InvolvedObject = in.InvolvedObject - out.Reason = in.Reason - out.Message = in.Message - out.Source = in.Source out.FirstTimestamp = in.FirstTimestamp.DeepCopy() out.LastTimestamp = in.LastTimestamp.DeepCopy() - out.Count = in.Count - out.Type = in.Type return nil } } @@ -1071,8 +1043,7 @@ func DeepCopy_api_EventList(in interface{}, out interface{}, c *conversion.Clone { in := in.(*EventList) out := out.(*EventList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Event, len(*in)) @@ -1081,8 +1052,6 @@ func DeepCopy_api_EventList(in interface{}, out interface{}, c *conversion.Clone return err } } - } else { - out.Items = nil } return nil } @@ -1092,8 +1061,7 @@ func DeepCopy_api_EventSource(in interface{}, out interface{}, c *conversion.Clo { in := in.(*EventSource) out := out.(*EventSource) - out.Component = in.Component - out.Host = in.Host + *out = *in return nil } } @@ -1102,48 +1070,31 @@ func DeepCopy_api_ExecAction(in interface{}, out interface{}, c *conversion.Clon { in := in.(*ExecAction) out := out.(*ExecAction) + *out = *in if in.Command != nil { in, out := &in.Command, &out.Command *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Command = nil } return nil } } -func DeepCopy_api_ExportOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ExportOptions) - out := out.(*ExportOptions) - out.TypeMeta = in.TypeMeta - out.Export = in.Export - out.Exact = in.Exact - return nil - } -} - func DeepCopy_api_FCVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*FCVolumeSource) out := out.(*FCVolumeSource) + *out = *in if in.TargetWWNs != nil { in, out := &in.TargetWWNs, &out.TargetWWNs *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.TargetWWNs = nil } if in.Lun != nil { in, out := &in.Lun, &out.Lun *out = new(int32) **out = **in - } else { - out.Lun = nil } - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly return nil } } @@ -1152,24 +1103,18 @@ func DeepCopy_api_FlexVolumeSource(in interface{}, out interface{}, c *conversio { in := in.(*FlexVolumeSource) out := out.(*FlexVolumeSource) - out.Driver = in.Driver - out.FSType = in.FSType + *out = *in if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef *out = new(LocalObjectReference) **out = **in - } else { - out.SecretRef = nil } - out.ReadOnly = in.ReadOnly if in.Options != nil { in, out := &in.Options, &out.Options *out = make(map[string]string) for key, val := range *in { (*out)[key] = val } - } else { - out.Options = nil } return nil } @@ -1179,8 +1124,7 @@ func DeepCopy_api_FlockerVolumeSource(in interface{}, out interface{}, c *conver { in := in.(*FlockerVolumeSource) out := out.(*FlockerVolumeSource) - out.DatasetName = in.DatasetName - out.DatasetUUID = in.DatasetUUID + *out = *in return nil } } @@ -1189,10 +1133,7 @@ func DeepCopy_api_GCEPersistentDiskVolumeSource(in interface{}, out interface{}, { in := in.(*GCEPersistentDiskVolumeSource) out := out.(*GCEPersistentDiskVolumeSource) - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly + *out = *in return nil } } @@ -1201,9 +1142,7 @@ func DeepCopy_api_GitRepoVolumeSource(in interface{}, out interface{}, c *conver { in := in.(*GitRepoVolumeSource) out := out.(*GitRepoVolumeSource) - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory + *out = *in return nil } } @@ -1212,9 +1151,7 @@ func DeepCopy_api_GlusterfsVolumeSource(in interface{}, out interface{}, c *conv { in := in.(*GlusterfsVolumeSource) out := out.(*GlusterfsVolumeSource) - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly + *out = *in return nil } } @@ -1223,18 +1160,11 @@ func DeepCopy_api_HTTPGetAction(in interface{}, out interface{}, c *conversion.C { in := in.(*HTTPGetAction) out := out.(*HTTPGetAction) - out.Path = in.Path - out.Port = in.Port - out.Host = in.Host - out.Scheme = in.Scheme + *out = *in if in.HTTPHeaders != nil { in, out := &in.HTTPHeaders, &out.HTTPHeaders *out = make([]HTTPHeader, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.HTTPHeaders = nil + copy(*out, *in) } return nil } @@ -1244,8 +1174,7 @@ func DeepCopy_api_HTTPHeader(in interface{}, out interface{}, c *conversion.Clon { in := in.(*HTTPHeader) out := out.(*HTTPHeader) - out.Name = in.Name - out.Value = in.Value + *out = *in return nil } } @@ -1254,14 +1183,13 @@ func DeepCopy_api_Handler(in interface{}, out interface{}, c *conversion.Cloner) { in := in.(*Handler) out := out.(*Handler) + *out = *in if in.Exec != nil { in, out := &in.Exec, &out.Exec *out = new(ExecAction) if err := DeepCopy_api_ExecAction(*in, *out, c); err != nil { return err } - } else { - out.Exec = nil } if in.HTTPGet != nil { in, out := &in.HTTPGet, &out.HTTPGet @@ -1269,15 +1197,11 @@ func DeepCopy_api_Handler(in interface{}, out interface{}, c *conversion.Cloner) if err := DeepCopy_api_HTTPGetAction(*in, *out, c); err != nil { return err } - } else { - out.HTTPGet = nil } if in.TCPSocket != nil { in, out := &in.TCPSocket, &out.TCPSocket *out = new(TCPSocketAction) **out = **in - } else { - out.TCPSocket = nil } return nil } @@ -1287,7 +1211,7 @@ func DeepCopy_api_HostPathVolumeSource(in interface{}, out interface{}, c *conve { in := in.(*HostPathVolumeSource) out := out.(*HostPathVolumeSource) - out.Path = in.Path + *out = *in return nil } } @@ -1296,12 +1220,12 @@ func DeepCopy_api_ISCSIVolumeSource(in interface{}, out interface{}, c *conversi { in := in.(*ISCSIVolumeSource) out := out.(*ISCSIVolumeSource) - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = in.Lun - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } return nil } } @@ -1310,14 +1234,11 @@ func DeepCopy_api_KeyToPath(in interface{}, out interface{}, c *conversion.Clone { in := in.(*KeyToPath) out := out.(*KeyToPath) - out.Key = in.Key - out.Path = in.Path + *out = *in if in.Mode != nil { in, out := &in.Mode, &out.Mode *out = new(int32) **out = **in - } else { - out.Mode = nil } return nil } @@ -1327,14 +1248,13 @@ func DeepCopy_api_Lifecycle(in interface{}, out interface{}, c *conversion.Clone { in := in.(*Lifecycle) out := out.(*Lifecycle) + *out = *in if in.PostStart != nil { in, out := &in.PostStart, &out.PostStart *out = new(Handler) if err := DeepCopy_api_Handler(*in, *out, c); err != nil { return err } - } else { - out.PostStart = nil } if in.PreStop != nil { in, out := &in.PreStop, &out.PreStop @@ -1342,8 +1262,6 @@ func DeepCopy_api_Lifecycle(in interface{}, out interface{}, c *conversion.Clone if err := DeepCopy_api_Handler(*in, *out, c); err != nil { return err } - } else { - out.PreStop = nil } return nil } @@ -1353,9 +1271,11 @@ func DeepCopy_api_LimitRange(in interface{}, out interface{}, c *conversion.Clon { in := in.(*LimitRange) out := out.(*LimitRange) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_api_LimitRangeSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -1368,15 +1288,13 @@ func DeepCopy_api_LimitRangeItem(in interface{}, out interface{}, c *conversion. { in := in.(*LimitRangeItem) out := out.(*LimitRangeItem) - out.Type = in.Type + *out = *in if in.Max != nil { in, out := &in.Max, &out.Max *out = make(ResourceList) for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Max = nil } if in.Min != nil { in, out := &in.Min, &out.Min @@ -1384,8 +1302,6 @@ func DeepCopy_api_LimitRangeItem(in interface{}, out interface{}, c *conversion. for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Min = nil } if in.Default != nil { in, out := &in.Default, &out.Default @@ -1393,8 +1309,6 @@ func DeepCopy_api_LimitRangeItem(in interface{}, out interface{}, c *conversion. for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Default = nil } if in.DefaultRequest != nil { in, out := &in.DefaultRequest, &out.DefaultRequest @@ -1402,8 +1316,6 @@ func DeepCopy_api_LimitRangeItem(in interface{}, out interface{}, c *conversion. for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.DefaultRequest = nil } if in.MaxLimitRequestRatio != nil { in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio @@ -1411,8 +1323,6 @@ func DeepCopy_api_LimitRangeItem(in interface{}, out interface{}, c *conversion. for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.MaxLimitRequestRatio = nil } return nil } @@ -1422,8 +1332,7 @@ func DeepCopy_api_LimitRangeList(in interface{}, out interface{}, c *conversion. { in := in.(*LimitRangeList) out := out.(*LimitRangeList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]LimitRange, len(*in)) @@ -1432,8 +1341,6 @@ func DeepCopy_api_LimitRangeList(in interface{}, out interface{}, c *conversion. return err } } - } else { - out.Items = nil } return nil } @@ -1443,6 +1350,7 @@ func DeepCopy_api_LimitRangeSpec(in interface{}, out interface{}, c *conversion. { in := in.(*LimitRangeSpec) out := out.(*LimitRangeSpec) + *out = *in if in.Limits != nil { in, out := &in.Limits, &out.Limits *out = make([]LimitRangeItem, len(*in)) @@ -1451,8 +1359,6 @@ func DeepCopy_api_LimitRangeSpec(in interface{}, out interface{}, c *conversion. return err } } - } else { - out.Limits = nil } return nil } @@ -1462,8 +1368,7 @@ func DeepCopy_api_List(in interface{}, out interface{}, c *conversion.Cloner) er { in := in.(*List) out := out.(*List) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]runtime.Object, len(*in)) @@ -1474,8 +1379,6 @@ func DeepCopy_api_List(in interface{}, out interface{}, c *conversion.Cloner) er (*out)[i] = *newVal.(*runtime.Object) } } - } else { - out.Items = nil } return nil } @@ -1485,29 +1388,27 @@ func DeepCopy_api_ListOptions(in interface{}, out interface{}, c *conversion.Clo { in := in.(*ListOptions) out := out.(*ListOptions) - out.TypeMeta = in.TypeMeta - if in.LabelSelector == nil { - out.LabelSelector = nil - } else if newVal, err := c.DeepCopy(&in.LabelSelector); err != nil { - return err - } else { - out.LabelSelector = *newVal.(*labels.Selector) + *out = *in + // in.LabelSelector is kind 'Interface' + if in.LabelSelector != nil { + if newVal, err := c.DeepCopy(&in.LabelSelector); err != nil { + return err + } else { + out.LabelSelector = *newVal.(*labels.Selector) + } } - if in.FieldSelector == nil { - out.FieldSelector = nil - } else if newVal, err := c.DeepCopy(&in.FieldSelector); err != nil { - return err - } else { - out.FieldSelector = *newVal.(*fields.Selector) + // in.FieldSelector is kind 'Interface' + if in.FieldSelector != nil { + if newVal, err := c.DeepCopy(&in.FieldSelector); err != nil { + return err + } else { + out.FieldSelector = *newVal.(*fields.Selector) + } } - out.Watch = in.Watch - out.ResourceVersion = in.ResourceVersion if in.TimeoutSeconds != nil { in, out := &in.TimeoutSeconds, &out.TimeoutSeconds *out = new(int64) **out = **in - } else { - out.TimeoutSeconds = nil } return nil } @@ -1517,8 +1418,7 @@ func DeepCopy_api_LoadBalancerIngress(in interface{}, out interface{}, c *conver { in := in.(*LoadBalancerIngress) out := out.(*LoadBalancerIngress) - out.IP = in.IP - out.Hostname = in.Hostname + *out = *in return nil } } @@ -1527,14 +1427,11 @@ func DeepCopy_api_LoadBalancerStatus(in interface{}, out interface{}, c *convers { in := in.(*LoadBalancerStatus) out := out.(*LoadBalancerStatus) + *out = *in if in.Ingress != nil { in, out := &in.Ingress, &out.Ingress *out = make([]LoadBalancerIngress, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Ingress = nil + copy(*out, *in) } return nil } @@ -1544,7 +1441,7 @@ func DeepCopy_api_LocalObjectReference(in interface{}, out interface{}, c *conve { in := in.(*LocalObjectReference) out := out.(*LocalObjectReference) - out.Name = in.Name + *out = *in return nil } } @@ -1553,9 +1450,7 @@ func DeepCopy_api_NFSVolumeSource(in interface{}, out interface{}, c *conversion { in := in.(*NFSVolumeSource) out := out.(*NFSVolumeSource) - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly + *out = *in return nil } } @@ -1564,14 +1459,15 @@ func DeepCopy_api_Namespace(in interface{}, out interface{}, c *conversion.Clone { in := in.(*Namespace) out := out.(*Namespace) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_api_NamespaceSpec(&in.Spec, &out.Spec, c); err != nil { return err } - out.Status = in.Status return nil } } @@ -1580,8 +1476,7 @@ func DeepCopy_api_NamespaceList(in interface{}, out interface{}, c *conversion.C { in := in.(*NamespaceList) out := out.(*NamespaceList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Namespace, len(*in)) @@ -1590,8 +1485,6 @@ func DeepCopy_api_NamespaceList(in interface{}, out interface{}, c *conversion.C return err } } - } else { - out.Items = nil } return nil } @@ -1601,14 +1494,11 @@ func DeepCopy_api_NamespaceSpec(in interface{}, out interface{}, c *conversion.C { in := in.(*NamespaceSpec) out := out.(*NamespaceSpec) + *out = *in if in.Finalizers != nil { in, out := &in.Finalizers, &out.Finalizers *out = make([]FinalizerName, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Finalizers = nil + copy(*out, *in) } return nil } @@ -1618,7 +1508,7 @@ func DeepCopy_api_NamespaceStatus(in interface{}, out interface{}, c *conversion { in := in.(*NamespaceStatus) out := out.(*NamespaceStatus) - out.Phase = in.Phase + *out = *in return nil } } @@ -1627,11 +1517,15 @@ func DeepCopy_api_Node(in interface{}, out interface{}, c *conversion.Cloner) er { in := in.(*Node) out := out.(*Node) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_NodeSpec(&in.Spec, &out.Spec, c); err != nil { return err } - out.Spec = in.Spec if err := DeepCopy_api_NodeStatus(&in.Status, &out.Status, c); err != nil { return err } @@ -1643,8 +1537,7 @@ func DeepCopy_api_NodeAddress(in interface{}, out interface{}, c *conversion.Clo { in := in.(*NodeAddress) out := out.(*NodeAddress) - out.Type = in.Type - out.Address = in.Address + *out = *in return nil } } @@ -1653,14 +1546,13 @@ func DeepCopy_api_NodeAffinity(in interface{}, out interface{}, c *conversion.Cl { in := in.(*NodeAffinity) out := out.(*NodeAffinity) + *out = *in if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution *out = new(NodeSelector) if err := DeepCopy_api_NodeSelector(*in, *out, c); err != nil { return err } - } else { - out.RequiredDuringSchedulingIgnoredDuringExecution = nil } if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution @@ -1670,8 +1562,6 @@ func DeepCopy_api_NodeAffinity(in interface{}, out interface{}, c *conversion.Cl return err } } - } else { - out.PreferredDuringSchedulingIgnoredDuringExecution = nil } return nil } @@ -1681,12 +1571,9 @@ func DeepCopy_api_NodeCondition(in interface{}, out interface{}, c *conversion.C { in := in.(*NodeCondition) out := out.(*NodeCondition) - out.Type = in.Type - out.Status = in.Status + *out = *in out.LastHeartbeatTime = in.LastHeartbeatTime.DeepCopy() out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message return nil } } @@ -1695,7 +1582,7 @@ func DeepCopy_api_NodeDaemonEndpoints(in interface{}, out interface{}, c *conver { in := in.(*NodeDaemonEndpoints) out := out.(*NodeDaemonEndpoints) - out.KubeletEndpoint = in.KubeletEndpoint + *out = *in return nil } } @@ -1704,8 +1591,7 @@ func DeepCopy_api_NodeList(in interface{}, out interface{}, c *conversion.Cloner { in := in.(*NodeList) out := out.(*NodeList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Node, len(*in)) @@ -1714,8 +1600,6 @@ func DeepCopy_api_NodeList(in interface{}, out interface{}, c *conversion.Cloner return err } } - } else { - out.Items = nil } return nil } @@ -1725,8 +1609,7 @@ func DeepCopy_api_NodeProxyOptions(in interface{}, out interface{}, c *conversio { in := in.(*NodeProxyOptions) out := out.(*NodeProxyOptions) - out.TypeMeta = in.TypeMeta - out.Path = in.Path + *out = *in return nil } } @@ -1735,14 +1618,13 @@ func DeepCopy_api_NodeResources(in interface{}, out interface{}, c *conversion.C { in := in.(*NodeResources) out := out.(*NodeResources) + *out = *in if in.Capacity != nil { in, out := &in.Capacity, &out.Capacity *out = make(ResourceList) for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Capacity = nil } return nil } @@ -1752,6 +1634,7 @@ func DeepCopy_api_NodeSelector(in interface{}, out interface{}, c *conversion.Cl { in := in.(*NodeSelector) out := out.(*NodeSelector) + *out = *in if in.NodeSelectorTerms != nil { in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms *out = make([]NodeSelectorTerm, len(*in)) @@ -1760,8 +1643,6 @@ func DeepCopy_api_NodeSelector(in interface{}, out interface{}, c *conversion.Cl return err } } - } else { - out.NodeSelectorTerms = nil } return nil } @@ -1771,14 +1652,11 @@ func DeepCopy_api_NodeSelectorRequirement(in interface{}, out interface{}, c *co { in := in.(*NodeSelectorRequirement) out := out.(*NodeSelectorRequirement) - out.Key = in.Key - out.Operator = in.Operator + *out = *in if in.Values != nil { in, out := &in.Values, &out.Values *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Values = nil } return nil } @@ -1788,6 +1666,7 @@ func DeepCopy_api_NodeSelectorTerm(in interface{}, out interface{}, c *conversio { in := in.(*NodeSelectorTerm) out := out.(*NodeSelectorTerm) + *out = *in if in.MatchExpressions != nil { in, out := &in.MatchExpressions, &out.MatchExpressions *out = make([]NodeSelectorRequirement, len(*in)) @@ -1796,8 +1675,6 @@ func DeepCopy_api_NodeSelectorTerm(in interface{}, out interface{}, c *conversio return err } } - } else { - out.MatchExpressions = nil } return nil } @@ -1807,10 +1684,16 @@ func DeepCopy_api_NodeSpec(in interface{}, out interface{}, c *conversion.Cloner { in := in.(*NodeSpec) out := out.(*NodeSpec) - out.PodCIDR = in.PodCIDR - out.ExternalID = in.ExternalID - out.ProviderID = in.ProviderID - out.Unschedulable = in.Unschedulable + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]Taint, len(*in)) + for i := range *in { + if err := DeepCopy_api_Taint(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } return nil } } @@ -1819,14 +1702,13 @@ func DeepCopy_api_NodeStatus(in interface{}, out interface{}, c *conversion.Clon { in := in.(*NodeStatus) out := out.(*NodeStatus) + *out = *in if in.Capacity != nil { in, out := &in.Capacity, &out.Capacity *out = make(ResourceList) for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Capacity = nil } if in.Allocatable != nil { in, out := &in.Allocatable, &out.Allocatable @@ -1834,10 +1716,7 @@ func DeepCopy_api_NodeStatus(in interface{}, out interface{}, c *conversion.Clon for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Allocatable = nil } - out.Phase = in.Phase if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]NodeCondition, len(*in)) @@ -1846,20 +1725,12 @@ func DeepCopy_api_NodeStatus(in interface{}, out interface{}, c *conversion.Clon return err } } - } else { - out.Conditions = nil } if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses *out = make([]NodeAddress, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Addresses = nil + copy(*out, *in) } - out.DaemonEndpoints = in.DaemonEndpoints - out.NodeInfo = in.NodeInfo if in.Images != nil { in, out := &in.Images, &out.Images *out = make([]ContainerImage, len(*in)) @@ -1868,26 +1739,16 @@ func DeepCopy_api_NodeStatus(in interface{}, out interface{}, c *conversion.Clon return err } } - } else { - out.Images = nil } if in.VolumesInUse != nil { in, out := &in.VolumesInUse, &out.VolumesInUse *out = make([]UniqueVolumeName, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.VolumesInUse = nil + copy(*out, *in) } if in.VolumesAttached != nil { in, out := &in.VolumesAttached, &out.VolumesAttached *out = make([]AttachedVolume, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.VolumesAttached = nil + copy(*out, *in) } return nil } @@ -1897,16 +1758,7 @@ func DeepCopy_api_NodeSystemInfo(in interface{}, out interface{}, c *conversion. { in := in.(*NodeSystemInfo) out := out.(*NodeSystemInfo) - out.MachineID = in.MachineID - out.SystemUUID = in.SystemUUID - out.BootID = in.BootID - out.KernelVersion = in.KernelVersion - out.OSImage = in.OSImage - out.ContainerRuntimeVersion = in.ContainerRuntimeVersion - out.KubeletVersion = in.KubeletVersion - out.KubeProxyVersion = in.KubeProxyVersion - out.OperatingSystem = in.OperatingSystem - out.Architecture = in.Architecture + *out = *in return nil } } @@ -1915,8 +1767,7 @@ func DeepCopy_api_ObjectFieldSelector(in interface{}, out interface{}, c *conver { in := in.(*ObjectFieldSelector) out := out.(*ObjectFieldSelector) - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath + *out = *in return nil } } @@ -1925,27 +1776,17 @@ func DeepCopy_api_ObjectMeta(in interface{}, out interface{}, c *conversion.Clon { in := in.(*ObjectMeta) out := out.(*ObjectMeta) - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = in.UID - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation + *out = *in out.CreationTimestamp = in.CreationTimestamp.DeepCopy() if in.DeletionTimestamp != nil { in, out := &in.DeletionTimestamp, &out.DeletionTimestamp - *out = new(unversioned.Time) + *out = new(v1.Time) **out = (*in).DeepCopy() - } else { - out.DeletionTimestamp = nil } if in.DeletionGracePeriodSeconds != nil { in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds *out = new(int64) **out = **in - } else { - out.DeletionGracePeriodSeconds = nil } if in.Labels != nil { in, out := &in.Labels, &out.Labels @@ -1953,8 +1794,6 @@ func DeepCopy_api_ObjectMeta(in interface{}, out interface{}, c *conversion.Clon for key, val := range *in { (*out)[key] = val } - } else { - out.Labels = nil } if in.Annotations != nil { in, out := &in.Annotations, &out.Annotations @@ -1962,28 +1801,23 @@ func DeepCopy_api_ObjectMeta(in interface{}, out interface{}, c *conversion.Clon for key, val := range *in { (*out)[key] = val } - } else { - out.Annotations = nil } if in.OwnerReferences != nil { in, out := &in.OwnerReferences, &out.OwnerReferences - *out = make([]OwnerReference, len(*in)) + *out = make([]v1.OwnerReference, len(*in)) for i := range *in { - if err := DeepCopy_api_OwnerReference(&(*in)[i], &(*out)[i], c); err != nil { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { return err + } else { + (*out)[i] = *newVal.(*v1.OwnerReference) } } - } else { - out.OwnerReferences = nil } if in.Finalizers != nil { in, out := &in.Finalizers, &out.Finalizers *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Finalizers = nil } - out.ClusterName = in.ClusterName return nil } } @@ -1992,32 +1826,7 @@ func DeepCopy_api_ObjectReference(in interface{}, out interface{}, c *conversion { in := in.(*ObjectReference) out := out.(*ObjectReference) - out.Kind = in.Kind - out.Namespace = in.Namespace - out.Name = in.Name - out.UID = in.UID - out.APIVersion = in.APIVersion - out.ResourceVersion = in.ResourceVersion - out.FieldPath = in.FieldPath - return nil - } -} - -func DeepCopy_api_OwnerReference(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*OwnerReference) - out := out.(*OwnerReference) - out.APIVersion = in.APIVersion - out.Kind = in.Kind - out.Name = in.Name - out.UID = in.UID - if in.Controller != nil { - in, out := &in.Controller, &out.Controller - *out = new(bool) - **out = **in - } else { - out.Controller = nil - } + *out = *in return nil } } @@ -2026,14 +1835,15 @@ func DeepCopy_api_PersistentVolume(in interface{}, out interface{}, c *conversio { in := in.(*PersistentVolume) out := out.(*PersistentVolume) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_api_PersistentVolumeSpec(&in.Spec, &out.Spec, c); err != nil { return err } - out.Status = in.Status return nil } } @@ -2042,9 +1852,11 @@ func DeepCopy_api_PersistentVolumeClaim(in interface{}, out interface{}, c *conv { in := in.(*PersistentVolumeClaim) out := out.(*PersistentVolumeClaim) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_api_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -2060,8 +1872,7 @@ func DeepCopy_api_PersistentVolumeClaimList(in interface{}, out interface{}, c * { in := in.(*PersistentVolumeClaimList) out := out.(*PersistentVolumeClaimList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PersistentVolumeClaim, len(*in)) @@ -2070,8 +1881,6 @@ func DeepCopy_api_PersistentVolumeClaimList(in interface{}, out interface{}, c * return err } } - } else { - out.Items = nil } return nil } @@ -2081,28 +1890,28 @@ func DeepCopy_api_PersistentVolumeClaimSpec(in interface{}, out interface{}, c * { in := in.(*PersistentVolumeClaimSpec) out := out.(*PersistentVolumeClaimSpec) + *out = *in if in.AccessModes != nil { in, out := &in.AccessModes, &out.AccessModes *out = make([]PersistentVolumeAccessMode, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.AccessModes = nil + copy(*out, *in) } if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.LabelSelector) } - } else { - out.Selector = nil } if err := DeepCopy_api_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { return err } - out.VolumeName = in.VolumeName + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } return nil } } @@ -2111,15 +1920,11 @@ func DeepCopy_api_PersistentVolumeClaimStatus(in interface{}, out interface{}, c { in := in.(*PersistentVolumeClaimStatus) out := out.(*PersistentVolumeClaimStatus) - out.Phase = in.Phase + *out = *in if in.AccessModes != nil { in, out := &in.AccessModes, &out.AccessModes *out = make([]PersistentVolumeAccessMode, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.AccessModes = nil + copy(*out, *in) } if in.Capacity != nil { in, out := &in.Capacity, &out.Capacity @@ -2127,8 +1932,6 @@ func DeepCopy_api_PersistentVolumeClaimStatus(in interface{}, out interface{}, c for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Capacity = nil } return nil } @@ -2138,8 +1941,7 @@ func DeepCopy_api_PersistentVolumeClaimVolumeSource(in interface{}, out interfac { in := in.(*PersistentVolumeClaimVolumeSource) out := out.(*PersistentVolumeClaimVolumeSource) - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly + *out = *in return nil } } @@ -2148,8 +1950,7 @@ func DeepCopy_api_PersistentVolumeList(in interface{}, out interface{}, c *conve { in := in.(*PersistentVolumeList) out := out.(*PersistentVolumeList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PersistentVolume, len(*in)) @@ -2158,8 +1959,6 @@ func DeepCopy_api_PersistentVolumeList(in interface{}, out interface{}, c *conve return err } } - } else { - out.Items = nil } return nil } @@ -2169,40 +1968,31 @@ func DeepCopy_api_PersistentVolumeSource(in interface{}, out interface{}, c *con { in := in.(*PersistentVolumeSource) out := out.(*PersistentVolumeSource) + *out = *in if in.GCEPersistentDisk != nil { in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk *out = new(GCEPersistentDiskVolumeSource) **out = **in - } else { - out.GCEPersistentDisk = nil } if in.AWSElasticBlockStore != nil { in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore *out = new(AWSElasticBlockStoreVolumeSource) **out = **in - } else { - out.AWSElasticBlockStore = nil } if in.HostPath != nil { in, out := &in.HostPath, &out.HostPath *out = new(HostPathVolumeSource) **out = **in - } else { - out.HostPath = nil } if in.Glusterfs != nil { in, out := &in.Glusterfs, &out.Glusterfs *out = new(GlusterfsVolumeSource) **out = **in - } else { - out.Glusterfs = nil } if in.NFS != nil { in, out := &in.NFS, &out.NFS *out = new(NFSVolumeSource) **out = **in - } else { - out.NFS = nil } if in.RBD != nil { in, out := &in.RBD, &out.RBD @@ -2210,22 +2000,18 @@ func DeepCopy_api_PersistentVolumeSource(in interface{}, out interface{}, c *con if err := DeepCopy_api_RBDVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.RBD = nil } if in.Quobyte != nil { in, out := &in.Quobyte, &out.Quobyte *out = new(QuobyteVolumeSource) **out = **in - } else { - out.Quobyte = nil } if in.ISCSI != nil { in, out := &in.ISCSI, &out.ISCSI *out = new(ISCSIVolumeSource) - **out = **in - } else { - out.ISCSI = nil + if err := DeepCopy_api_ISCSIVolumeSource(*in, *out, c); err != nil { + return err + } } if in.FlexVolume != nil { in, out := &in.FlexVolume, &out.FlexVolume @@ -2233,15 +2019,11 @@ func DeepCopy_api_PersistentVolumeSource(in interface{}, out interface{}, c *con if err := DeepCopy_api_FlexVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.FlexVolume = nil } if in.Cinder != nil { in, out := &in.Cinder, &out.Cinder *out = new(CinderVolumeSource) **out = **in - } else { - out.Cinder = nil } if in.CephFS != nil { in, out := &in.CephFS, &out.CephFS @@ -2249,8 +2031,6 @@ func DeepCopy_api_PersistentVolumeSource(in interface{}, out interface{}, c *con if err := DeepCopy_api_CephFSVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.CephFS = nil } if in.FC != nil { in, out := &in.FC, &out.FC @@ -2258,29 +2038,21 @@ func DeepCopy_api_PersistentVolumeSource(in interface{}, out interface{}, c *con if err := DeepCopy_api_FCVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.FC = nil } if in.Flocker != nil { in, out := &in.Flocker, &out.Flocker *out = new(FlockerVolumeSource) **out = **in - } else { - out.Flocker = nil } if in.AzureFile != nil { in, out := &in.AzureFile, &out.AzureFile *out = new(AzureFileVolumeSource) **out = **in - } else { - out.AzureFile = nil } if in.VsphereVolume != nil { in, out := &in.VsphereVolume, &out.VsphereVolume *out = new(VsphereVirtualDiskVolumeSource) **out = **in - } else { - out.VsphereVolume = nil } if in.AzureDisk != nil { in, out := &in.AzureDisk, &out.AzureDisk @@ -2288,15 +2060,23 @@ func DeepCopy_api_PersistentVolumeSource(in interface{}, out interface{}, c *con if err := DeepCopy_api_AzureDiskVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.AzureDisk = nil } if in.PhotonPersistentDisk != nil { in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk *out = new(PhotonPersistentDiskVolumeSource) **out = **in - } else { - out.PhotonPersistentDisk = nil + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + if err := DeepCopy_api_ScaleIOVolumeSource(*in, *out, c); err != nil { + return err + } } return nil } @@ -2306,14 +2086,13 @@ func DeepCopy_api_PersistentVolumeSpec(in interface{}, out interface{}, c *conve { in := in.(*PersistentVolumeSpec) out := out.(*PersistentVolumeSpec) + *out = *in if in.Capacity != nil { in, out := &in.Capacity, &out.Capacity *out = make(ResourceList) for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Capacity = nil } if err := DeepCopy_api_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, c); err != nil { return err @@ -2321,20 +2100,13 @@ func DeepCopy_api_PersistentVolumeSpec(in interface{}, out interface{}, c *conve if in.AccessModes != nil { in, out := &in.AccessModes, &out.AccessModes *out = make([]PersistentVolumeAccessMode, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.AccessModes = nil + copy(*out, *in) } if in.ClaimRef != nil { in, out := &in.ClaimRef, &out.ClaimRef *out = new(ObjectReference) **out = **in - } else { - out.ClaimRef = nil } - out.PersistentVolumeReclaimPolicy = in.PersistentVolumeReclaimPolicy return nil } } @@ -2343,9 +2115,7 @@ func DeepCopy_api_PersistentVolumeStatus(in interface{}, out interface{}, c *con { in := in.(*PersistentVolumeStatus) out := out.(*PersistentVolumeStatus) - out.Phase = in.Phase - out.Message = in.Message - out.Reason = in.Reason + *out = *in return nil } } @@ -2354,8 +2124,7 @@ func DeepCopy_api_PhotonPersistentDiskVolumeSource(in interface{}, out interface { in := in.(*PhotonPersistentDiskVolumeSource) out := out.(*PhotonPersistentDiskVolumeSource) - out.PdID = in.PdID - out.FSType = in.FSType + *out = *in return nil } } @@ -2364,9 +2133,11 @@ func DeepCopy_api_Pod(in interface{}, out interface{}, c *conversion.Cloner) err { in := in.(*Pod) out := out.(*Pod) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_api_PodSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -2382,6 +2153,7 @@ func DeepCopy_api_PodAffinity(in interface{}, out interface{}, c *conversion.Clo { in := in.(*PodAffinity) out := out.(*PodAffinity) + *out = *in if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution *out = make([]PodAffinityTerm, len(*in)) @@ -2390,8 +2162,6 @@ func DeepCopy_api_PodAffinity(in interface{}, out interface{}, c *conversion.Clo return err } } - } else { - out.RequiredDuringSchedulingIgnoredDuringExecution = nil } if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution @@ -2401,8 +2171,6 @@ func DeepCopy_api_PodAffinity(in interface{}, out interface{}, c *conversion.Clo return err } } - } else { - out.PreferredDuringSchedulingIgnoredDuringExecution = nil } return nil } @@ -2412,23 +2180,20 @@ func DeepCopy_api_PodAffinityTerm(in interface{}, out interface{}, c *conversion { in := in.(*PodAffinityTerm) out := out.(*PodAffinityTerm) + *out = *in if in.LabelSelector != nil { in, out := &in.LabelSelector, &out.LabelSelector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.LabelSelector) } - } else { - out.LabelSelector = nil } if in.Namespaces != nil { in, out := &in.Namespaces, &out.Namespaces *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Namespaces = nil } - out.TopologyKey = in.TopologyKey return nil } } @@ -2437,6 +2202,7 @@ func DeepCopy_api_PodAntiAffinity(in interface{}, out interface{}, c *conversion { in := in.(*PodAntiAffinity) out := out.(*PodAntiAffinity) + *out = *in if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution *out = make([]PodAffinityTerm, len(*in)) @@ -2445,8 +2211,6 @@ func DeepCopy_api_PodAntiAffinity(in interface{}, out interface{}, c *conversion return err } } - } else { - out.RequiredDuringSchedulingIgnoredDuringExecution = nil } if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution @@ -2456,8 +2220,6 @@ func DeepCopy_api_PodAntiAffinity(in interface{}, out interface{}, c *conversion return err } } - } else { - out.PreferredDuringSchedulingIgnoredDuringExecution = nil } return nil } @@ -2467,12 +2229,7 @@ func DeepCopy_api_PodAttachOptions(in interface{}, out interface{}, c *conversio { in := in.(*PodAttachOptions) out := out.(*PodAttachOptions) - out.TypeMeta = in.TypeMeta - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container + *out = *in return nil } } @@ -2481,12 +2238,9 @@ func DeepCopy_api_PodCondition(in interface{}, out interface{}, c *conversion.Cl { in := in.(*PodCondition) out := out.(*PodCondition) - out.Type = in.Type - out.Status = in.Status + *out = *in out.LastProbeTime = in.LastProbeTime.DeepCopy() out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message return nil } } @@ -2495,18 +2249,11 @@ func DeepCopy_api_PodExecOptions(in interface{}, out interface{}, c *conversion. { in := in.(*PodExecOptions) out := out.(*PodExecOptions) - out.TypeMeta = in.TypeMeta - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container + *out = *in if in.Command != nil { in, out := &in.Command, &out.Command *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Command = nil } return nil } @@ -2516,8 +2263,7 @@ func DeepCopy_api_PodList(in interface{}, out interface{}, c *conversion.Cloner) { in := in.(*PodList) out := out.(*PodList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Pod, len(*in)) @@ -2526,8 +2272,6 @@ func DeepCopy_api_PodList(in interface{}, out interface{}, c *conversion.Cloner) return err } } - } else { - out.Items = nil } return nil } @@ -2537,38 +2281,40 @@ func DeepCopy_api_PodLogOptions(in interface{}, out interface{}, c *conversion.C { in := in.(*PodLogOptions) out := out.(*PodLogOptions) - out.TypeMeta = in.TypeMeta - out.Container = in.Container - out.Follow = in.Follow - out.Previous = in.Previous + *out = *in if in.SinceSeconds != nil { in, out := &in.SinceSeconds, &out.SinceSeconds *out = new(int64) **out = **in - } else { - out.SinceSeconds = nil } if in.SinceTime != nil { in, out := &in.SinceTime, &out.SinceTime - *out = new(unversioned.Time) + *out = new(v1.Time) **out = (*in).DeepCopy() - } else { - out.SinceTime = nil } - out.Timestamps = in.Timestamps if in.TailLines != nil { in, out := &in.TailLines, &out.TailLines *out = new(int64) **out = **in - } else { - out.TailLines = nil } if in.LimitBytes != nil { in, out := &in.LimitBytes, &out.LimitBytes *out = new(int64) **out = **in - } else { - out.LimitBytes = nil + } + return nil + } +} + +func DeepCopy_api_PodPortForwardOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPortForwardOptions) + out := out.(*PodPortForwardOptions) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]int32, len(*in)) + copy(*out, *in) } return nil } @@ -2578,8 +2324,7 @@ func DeepCopy_api_PodProxyOptions(in interface{}, out interface{}, c *conversion { in := in.(*PodProxyOptions) out := out.(*PodProxyOptions) - out.TypeMeta = in.TypeMeta - out.Path = in.Path + *out = *in return nil } } @@ -2588,43 +2333,31 @@ func DeepCopy_api_PodSecurityContext(in interface{}, out interface{}, c *convers { in := in.(*PodSecurityContext) out := out.(*PodSecurityContext) - out.HostNetwork = in.HostNetwork - out.HostPID = in.HostPID - out.HostIPC = in.HostIPC + *out = *in if in.SELinuxOptions != nil { in, out := &in.SELinuxOptions, &out.SELinuxOptions *out = new(SELinuxOptions) **out = **in - } else { - out.SELinuxOptions = nil } if in.RunAsUser != nil { in, out := &in.RunAsUser, &out.RunAsUser *out = new(int64) **out = **in - } else { - out.RunAsUser = nil } if in.RunAsNonRoot != nil { in, out := &in.RunAsNonRoot, &out.RunAsNonRoot *out = new(bool) **out = **in - } else { - out.RunAsNonRoot = nil } if in.SupplementalGroups != nil { in, out := &in.SupplementalGroups, &out.SupplementalGroups *out = make([]int64, len(*in)) copy(*out, *in) - } else { - out.SupplementalGroups = nil } if in.FSGroup != nil { in, out := &in.FSGroup, &out.FSGroup *out = new(int64) **out = **in - } else { - out.FSGroup = nil } return nil } @@ -2634,14 +2367,14 @@ func DeepCopy_api_PodSignature(in interface{}, out interface{}, c *conversion.Cl { in := in.(*PodSignature) out := out.(*PodSignature) + *out = *in if in.PodController != nil { in, out := &in.PodController, &out.PodController - *out = new(OwnerReference) - if err := DeepCopy_api_OwnerReference(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.OwnerReference) } - } else { - out.PodController = nil } return nil } @@ -2651,6 +2384,7 @@ func DeepCopy_api_PodSpec(in interface{}, out interface{}, c *conversion.Cloner) { in := in.(*PodSpec) out := out.(*PodSpec) + *out = *in if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes *out = make([]Volume, len(*in)) @@ -2659,8 +2393,6 @@ func DeepCopy_api_PodSpec(in interface{}, out interface{}, c *conversion.Cloner) return err } } - } else { - out.Volumes = nil } if in.InitContainers != nil { in, out := &in.InitContainers, &out.InitContainers @@ -2670,8 +2402,6 @@ func DeepCopy_api_PodSpec(in interface{}, out interface{}, c *conversion.Cloner) return err } } - } else { - out.InitContainers = nil } if in.Containers != nil { in, out := &in.Containers, &out.Containers @@ -2681,56 +2411,57 @@ func DeepCopy_api_PodSpec(in interface{}, out interface{}, c *conversion.Cloner) return err } } - } else { - out.Containers = nil } - out.RestartPolicy = in.RestartPolicy if in.TerminationGracePeriodSeconds != nil { in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds *out = new(int64) **out = **in - } else { - out.TerminationGracePeriodSeconds = nil } if in.ActiveDeadlineSeconds != nil { in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds *out = new(int64) **out = **in - } else { - out.ActiveDeadlineSeconds = nil } - out.DNSPolicy = in.DNSPolicy if in.NodeSelector != nil { in, out := &in.NodeSelector, &out.NodeSelector *out = make(map[string]string) for key, val := range *in { (*out)[key] = val } - } else { - out.NodeSelector = nil } - out.ServiceAccountName = in.ServiceAccountName - out.NodeName = in.NodeName + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } if in.SecurityContext != nil { in, out := &in.SecurityContext, &out.SecurityContext *out = new(PodSecurityContext) if err := DeepCopy_api_PodSecurityContext(*in, *out, c); err != nil { return err } - } else { - out.SecurityContext = nil } if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(Affinity) + if err := DeepCopy_api_Affinity(*in, *out, c); err != nil { + return err + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]Toleration, len(*in)) for i := range *in { - (*out)[i] = (*in)[i] + if err := DeepCopy_api_Toleration(&(*in)[i], &(*out)[i], c); err != nil { + return err + } } - } else { - out.ImagePullSecrets = nil } - out.Hostname = in.Hostname - out.Subdomain = in.Subdomain return nil } } @@ -2739,7 +2470,7 @@ func DeepCopy_api_PodStatus(in interface{}, out interface{}, c *conversion.Clone { in := in.(*PodStatus) out := out.(*PodStatus) - out.Phase = in.Phase + *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]PodCondition, len(*in)) @@ -2748,19 +2479,11 @@ func DeepCopy_api_PodStatus(in interface{}, out interface{}, c *conversion.Clone return err } } - } else { - out.Conditions = nil } - out.Message = in.Message - out.Reason = in.Reason - out.HostIP = in.HostIP - out.PodIP = in.PodIP if in.StartTime != nil { in, out := &in.StartTime, &out.StartTime - *out = new(unversioned.Time) + *out = new(v1.Time) **out = (*in).DeepCopy() - } else { - out.StartTime = nil } if in.InitContainerStatuses != nil { in, out := &in.InitContainerStatuses, &out.InitContainerStatuses @@ -2770,8 +2493,6 @@ func DeepCopy_api_PodStatus(in interface{}, out interface{}, c *conversion.Clone return err } } - } else { - out.InitContainerStatuses = nil } if in.ContainerStatuses != nil { in, out := &in.ContainerStatuses, &out.ContainerStatuses @@ -2781,8 +2502,6 @@ func DeepCopy_api_PodStatus(in interface{}, out interface{}, c *conversion.Clone return err } } - } else { - out.ContainerStatuses = nil } return nil } @@ -2792,9 +2511,11 @@ func DeepCopy_api_PodStatusResult(in interface{}, out interface{}, c *conversion { in := in.(*PodStatusResult) out := out.(*PodStatusResult) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_api_PodStatus(&in.Status, &out.Status, c); err != nil { return err @@ -2807,9 +2528,11 @@ func DeepCopy_api_PodTemplate(in interface{}, out interface{}, c *conversion.Clo { in := in.(*PodTemplate) out := out.(*PodTemplate) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { return err @@ -2822,8 +2545,7 @@ func DeepCopy_api_PodTemplateList(in interface{}, out interface{}, c *conversion { in := in.(*PodTemplateList) out := out.(*PodTemplateList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodTemplate, len(*in)) @@ -2832,8 +2554,6 @@ func DeepCopy_api_PodTemplateList(in interface{}, out interface{}, c *conversion return err } } - } else { - out.Items = nil } return nil } @@ -2843,8 +2563,11 @@ func DeepCopy_api_PodTemplateSpec(in interface{}, out interface{}, c *conversion { in := in.(*PodTemplateSpec) out := out.(*PodTemplateSpec) - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_api_PodSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -2853,16 +2576,24 @@ func DeepCopy_api_PodTemplateSpec(in interface{}, out interface{}, c *conversion } } +func DeepCopy_api_PortworxVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PortworxVolumeSource) + out := out.(*PortworxVolumeSource) + *out = *in + return nil + } +} + func DeepCopy_api_Preconditions(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*Preconditions) out := out.(*Preconditions) + *out = *in if in.UID != nil { in, out := &in.UID, &out.UID *out = new(types.UID) **out = **in - } else { - out.UID = nil } return nil } @@ -2872,12 +2603,11 @@ func DeepCopy_api_PreferAvoidPodsEntry(in interface{}, out interface{}, c *conve { in := in.(*PreferAvoidPodsEntry) out := out.(*PreferAvoidPodsEntry) + *out = *in if err := DeepCopy_api_PodSignature(&in.PodSignature, &out.PodSignature, c); err != nil { return err } out.EvictionTime = in.EvictionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message return nil } } @@ -2886,7 +2616,7 @@ func DeepCopy_api_PreferredSchedulingTerm(in interface{}, out interface{}, c *co { in := in.(*PreferredSchedulingTerm) out := out.(*PreferredSchedulingTerm) - out.Weight = in.Weight + *out = *in if err := DeepCopy_api_NodeSelectorTerm(&in.Preference, &out.Preference, c); err != nil { return err } @@ -2898,14 +2628,33 @@ func DeepCopy_api_Probe(in interface{}, out interface{}, c *conversion.Cloner) e { in := in.(*Probe) out := out.(*Probe) + *out = *in if err := DeepCopy_api_Handler(&in.Handler, &out.Handler, c); err != nil { return err } - out.InitialDelaySeconds = in.InitialDelaySeconds - out.TimeoutSeconds = in.TimeoutSeconds - out.PeriodSeconds = in.PeriodSeconds - out.SuccessThreshold = in.SuccessThreshold - out.FailureThreshold = in.FailureThreshold + return nil + } +} + +func DeepCopy_api_ProjectedVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ProjectedVolumeSource) + out := out.(*ProjectedVolumeSource) + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]VolumeProjection, len(*in)) + for i := range *in { + if err := DeepCopy_api_VolumeProjection(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } return nil } } @@ -2914,11 +2663,7 @@ func DeepCopy_api_QuobyteVolumeSource(in interface{}, out interface{}, c *conver { in := in.(*QuobyteVolumeSource) out := out.(*QuobyteVolumeSource) - out.Registry = in.Registry - out.Volume = in.Volume - out.ReadOnly = in.ReadOnly - out.User = in.User - out.Group = in.Group + *out = *in return nil } } @@ -2927,26 +2672,17 @@ func DeepCopy_api_RBDVolumeSource(in interface{}, out interface{}, c *conversion { in := in.(*RBDVolumeSource) out := out.(*RBDVolumeSource) + *out = *in if in.CephMonitors != nil { in, out := &in.CephMonitors, &out.CephMonitors *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.CephMonitors = nil } - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef *out = new(LocalObjectReference) **out = **in - } else { - out.SecretRef = nil } - out.ReadOnly = in.ReadOnly return nil } } @@ -2955,17 +2691,16 @@ func DeepCopy_api_RangeAllocation(in interface{}, out interface{}, c *conversion { in := in.(*RangeAllocation) out := out.(*RangeAllocation) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } - out.Range = in.Range if in.Data != nil { in, out := &in.Data, &out.Data *out = make([]byte, len(*in)) copy(*out, *in) - } else { - out.Data = nil } return nil } @@ -2975,9 +2710,11 @@ func DeepCopy_api_ReplicationController(in interface{}, out interface{}, c *conv { in := in.(*ReplicationController) out := out.(*ReplicationController) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_api_ReplicationControllerSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -2993,11 +2730,8 @@ func DeepCopy_api_ReplicationControllerCondition(in interface{}, out interface{} { in := in.(*ReplicationControllerCondition) out := out.(*ReplicationControllerCondition) - out.Type = in.Type - out.Status = in.Status + *out = *in out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message return nil } } @@ -3006,8 +2740,7 @@ func DeepCopy_api_ReplicationControllerList(in interface{}, out interface{}, c * { in := in.(*ReplicationControllerList) out := out.(*ReplicationControllerList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicationController, len(*in)) @@ -3016,8 +2749,6 @@ func DeepCopy_api_ReplicationControllerList(in interface{}, out interface{}, c * return err } } - } else { - out.Items = nil } return nil } @@ -3027,16 +2758,13 @@ func DeepCopy_api_ReplicationControllerSpec(in interface{}, out interface{}, c * { in := in.(*ReplicationControllerSpec) out := out.(*ReplicationControllerSpec) - out.Replicas = in.Replicas - out.MinReadySeconds = in.MinReadySeconds + *out = *in if in.Selector != nil { in, out := &in.Selector, &out.Selector *out = make(map[string]string) for key, val := range *in { (*out)[key] = val } - } else { - out.Selector = nil } if in.Template != nil { in, out := &in.Template, &out.Template @@ -3044,8 +2772,6 @@ func DeepCopy_api_ReplicationControllerSpec(in interface{}, out interface{}, c * if err := DeepCopy_api_PodTemplateSpec(*in, *out, c); err != nil { return err } - } else { - out.Template = nil } return nil } @@ -3055,11 +2781,7 @@ func DeepCopy_api_ReplicationControllerStatus(in interface{}, out interface{}, c { in := in.(*ReplicationControllerStatus) out := out.(*ReplicationControllerStatus) - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.ObservedGeneration = in.ObservedGeneration + *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]ReplicationControllerCondition, len(*in)) @@ -3068,8 +2790,6 @@ func DeepCopy_api_ReplicationControllerStatus(in interface{}, out interface{}, c return err } } - } else { - out.Conditions = nil } return nil } @@ -3079,8 +2799,7 @@ func DeepCopy_api_ResourceFieldSelector(in interface{}, out interface{}, c *conv { in := in.(*ResourceFieldSelector) out := out.(*ResourceFieldSelector) - out.ContainerName = in.ContainerName - out.Resource = in.Resource + *out = *in out.Divisor = in.Divisor.DeepCopy() return nil } @@ -3090,9 +2809,11 @@ func DeepCopy_api_ResourceQuota(in interface{}, out interface{}, c *conversion.C { in := in.(*ResourceQuota) out := out.(*ResourceQuota) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_api_ResourceQuotaSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -3108,8 +2829,7 @@ func DeepCopy_api_ResourceQuotaList(in interface{}, out interface{}, c *conversi { in := in.(*ResourceQuotaList) out := out.(*ResourceQuotaList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ResourceQuota, len(*in)) @@ -3118,8 +2838,6 @@ func DeepCopy_api_ResourceQuotaList(in interface{}, out interface{}, c *conversi return err } } - } else { - out.Items = nil } return nil } @@ -3129,23 +2847,18 @@ func DeepCopy_api_ResourceQuotaSpec(in interface{}, out interface{}, c *conversi { in := in.(*ResourceQuotaSpec) out := out.(*ResourceQuotaSpec) + *out = *in if in.Hard != nil { in, out := &in.Hard, &out.Hard *out = make(ResourceList) for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Hard = nil } if in.Scopes != nil { in, out := &in.Scopes, &out.Scopes *out = make([]ResourceQuotaScope, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Scopes = nil + copy(*out, *in) } return nil } @@ -3155,14 +2868,13 @@ func DeepCopy_api_ResourceQuotaStatus(in interface{}, out interface{}, c *conver { in := in.(*ResourceQuotaStatus) out := out.(*ResourceQuotaStatus) + *out = *in if in.Hard != nil { in, out := &in.Hard, &out.Hard *out = make(ResourceList) for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Hard = nil } if in.Used != nil { in, out := &in.Used, &out.Used @@ -3170,8 +2882,6 @@ func DeepCopy_api_ResourceQuotaStatus(in interface{}, out interface{}, c *conver for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Used = nil } return nil } @@ -3181,14 +2891,13 @@ func DeepCopy_api_ResourceRequirements(in interface{}, out interface{}, c *conve { in := in.(*ResourceRequirements) out := out.(*ResourceRequirements) + *out = *in if in.Limits != nil { in, out := &in.Limits, &out.Limits *out = make(ResourceList) for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Limits = nil } if in.Requests != nil { in, out := &in.Requests, &out.Requests @@ -3196,8 +2905,6 @@ func DeepCopy_api_ResourceRequirements(in interface{}, out interface{}, c *conve for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.Requests = nil } return nil } @@ -3207,10 +2914,21 @@ func DeepCopy_api_SELinuxOptions(in interface{}, out interface{}, c *conversion. { in := in.(*SELinuxOptions) out := out.(*SELinuxOptions) - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level + *out = *in + return nil + } +} + +func DeepCopy_api_ScaleIOVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleIOVolumeSource) + out := out.(*ScaleIOVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } return nil } } @@ -3219,9 +2937,11 @@ func DeepCopy_api_Secret(in interface{}, out interface{}, c *conversion.Cloner) { in := in.(*Secret) out := out.(*Secret) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if in.Data != nil { in, out := &in.Data, &out.Data @@ -3233,10 +2953,21 @@ func DeepCopy_api_Secret(in interface{}, out interface{}, c *conversion.Cloner) (*out)[key] = *newVal.(*[]byte) } } - } else { - out.Data = nil } - out.Type = in.Type + return nil + } +} + +func DeepCopy_api_SecretEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretEnvSource) + out := out.(*SecretEnvSource) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } return nil } } @@ -3245,8 +2976,12 @@ func DeepCopy_api_SecretKeySelector(in interface{}, out interface{}, c *conversi { in := in.(*SecretKeySelector) out := out.(*SecretKeySelector) - out.LocalObjectReference = in.LocalObjectReference - out.Key = in.Key + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } return nil } } @@ -3255,8 +2990,7 @@ func DeepCopy_api_SecretList(in interface{}, out interface{}, c *conversion.Clon { in := in.(*SecretList) out := out.(*SecretList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Secret, len(*in)) @@ -3265,8 +2999,29 @@ func DeepCopy_api_SecretList(in interface{}, out interface{}, c *conversion.Clon return err } } - } else { - out.Items = nil + } + return nil + } +} + +func DeepCopy_api_SecretProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretProjection) + out := out.(*SecretProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in } return nil } @@ -3276,7 +3031,7 @@ func DeepCopy_api_SecretVolumeSource(in interface{}, out interface{}, c *convers { in := in.(*SecretVolumeSource) out := out.(*SecretVolumeSource) - out.SecretName = in.SecretName + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]KeyToPath, len(*in)) @@ -3285,15 +3040,16 @@ func DeepCopy_api_SecretVolumeSource(in interface{}, out interface{}, c *convers return err } } - } else { - out.Items = nil } if in.DefaultMode != nil { in, out := &in.DefaultMode, &out.DefaultMode *out = new(int32) **out = **in - } else { - out.DefaultMode = nil + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in } return nil } @@ -3303,49 +3059,38 @@ func DeepCopy_api_SecurityContext(in interface{}, out interface{}, c *conversion { in := in.(*SecurityContext) out := out.(*SecurityContext) + *out = *in if in.Capabilities != nil { in, out := &in.Capabilities, &out.Capabilities *out = new(Capabilities) if err := DeepCopy_api_Capabilities(*in, *out, c); err != nil { return err } - } else { - out.Capabilities = nil } if in.Privileged != nil { in, out := &in.Privileged, &out.Privileged *out = new(bool) **out = **in - } else { - out.Privileged = nil } if in.SELinuxOptions != nil { in, out := &in.SELinuxOptions, &out.SELinuxOptions *out = new(SELinuxOptions) **out = **in - } else { - out.SELinuxOptions = nil } if in.RunAsUser != nil { in, out := &in.RunAsUser, &out.RunAsUser *out = new(int64) **out = **in - } else { - out.RunAsUser = nil } if in.RunAsNonRoot != nil { in, out := &in.RunAsNonRoot, &out.RunAsNonRoot *out = new(bool) **out = **in - } else { - out.RunAsNonRoot = nil } if in.ReadOnlyRootFilesystem != nil { in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem *out = new(bool) **out = **in - } else { - out.ReadOnlyRootFilesystem = nil } return nil } @@ -3355,8 +3100,7 @@ func DeepCopy_api_SerializedReference(in interface{}, out interface{}, c *conver { in := in.(*SerializedReference) out := out.(*SerializedReference) - out.TypeMeta = in.TypeMeta - out.Reference = in.Reference + *out = *in return nil } } @@ -3365,9 +3109,11 @@ func DeepCopy_api_Service(in interface{}, out interface{}, c *conversion.Cloner) { in := in.(*Service) out := out.(*Service) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_api_ServiceSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -3383,27 +3129,26 @@ func DeepCopy_api_ServiceAccount(in interface{}, out interface{}, c *conversion. { in := in.(*ServiceAccount) out := out.(*ServiceAccount) - out.TypeMeta = in.TypeMeta - if err := DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if in.Secrets != nil { in, out := &in.Secrets, &out.Secrets *out = make([]ObjectReference, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Secrets = nil + copy(*out, *in) } if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets *out = make([]LocalObjectReference, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.ImagePullSecrets = nil + copy(*out, *in) + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in } return nil } @@ -3413,8 +3158,7 @@ func DeepCopy_api_ServiceAccountList(in interface{}, out interface{}, c *convers { in := in.(*ServiceAccountList) out := out.(*ServiceAccountList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ServiceAccount, len(*in)) @@ -3423,8 +3167,6 @@ func DeepCopy_api_ServiceAccountList(in interface{}, out interface{}, c *convers return err } } - } else { - out.Items = nil } return nil } @@ -3434,8 +3176,7 @@ func DeepCopy_api_ServiceList(in interface{}, out interface{}, c *conversion.Clo { in := in.(*ServiceList) out := out.(*ServiceList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Service, len(*in)) @@ -3444,8 +3185,6 @@ func DeepCopy_api_ServiceList(in interface{}, out interface{}, c *conversion.Clo return err } } - } else { - out.Items = nil } return nil } @@ -3455,11 +3194,7 @@ func DeepCopy_api_ServicePort(in interface{}, out interface{}, c *conversion.Clo { in := in.(*ServicePort) out := out.(*ServicePort) - out.Name = in.Name - out.Protocol = in.Protocol - out.Port = in.Port - out.TargetPort = in.TargetPort - out.NodePort = in.NodePort + *out = *in return nil } } @@ -3468,8 +3203,7 @@ func DeepCopy_api_ServiceProxyOptions(in interface{}, out interface{}, c *conver { in := in.(*ServiceProxyOptions) out := out.(*ServiceProxyOptions) - out.TypeMeta = in.TypeMeta - out.Path = in.Path + *out = *in return nil } } @@ -3478,15 +3212,11 @@ func DeepCopy_api_ServiceSpec(in interface{}, out interface{}, c *conversion.Clo { in := in.(*ServiceSpec) out := out.(*ServiceSpec) - out.Type = in.Type + *out = *in if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]ServicePort, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Ports = nil + copy(*out, *in) } if in.Selector != nil { in, out := &in.Selector, &out.Selector @@ -3494,26 +3224,16 @@ func DeepCopy_api_ServiceSpec(in interface{}, out interface{}, c *conversion.Clo for key, val := range *in { (*out)[key] = val } - } else { - out.Selector = nil } - out.ClusterIP = in.ClusterIP - out.ExternalName = in.ExternalName if in.ExternalIPs != nil { in, out := &in.ExternalIPs, &out.ExternalIPs *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.ExternalIPs = nil } - out.LoadBalancerIP = in.LoadBalancerIP - out.SessionAffinity = in.SessionAffinity if in.LoadBalancerSourceRanges != nil { in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.LoadBalancerSourceRanges = nil } return nil } @@ -3523,6 +3243,7 @@ func DeepCopy_api_ServiceStatus(in interface{}, out interface{}, c *conversion.C { in := in.(*ServiceStatus) out := out.(*ServiceStatus) + *out = *in if err := DeepCopy_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { return err } @@ -3534,8 +3255,7 @@ func DeepCopy_api_Sysctl(in interface{}, out interface{}, c *conversion.Cloner) { in := in.(*Sysctl) out := out.(*Sysctl) - out.Name = in.Name - out.Value = in.Value + *out = *in return nil } } @@ -3544,7 +3264,7 @@ func DeepCopy_api_TCPSocketAction(in interface{}, out interface{}, c *conversion { in := in.(*TCPSocketAction) out := out.(*TCPSocketAction) - out.Port = in.Port + *out = *in return nil } } @@ -3553,9 +3273,8 @@ func DeepCopy_api_Taint(in interface{}, out interface{}, c *conversion.Cloner) e { in := in.(*Taint) out := out.(*Taint) - out.Key = in.Key - out.Value = in.Value - out.Effect = in.Effect + *out = *in + out.TimeAdded = in.TimeAdded.DeepCopy() return nil } } @@ -3564,10 +3283,12 @@ func DeepCopy_api_Toleration(in interface{}, out interface{}, c *conversion.Clon { in := in.(*Toleration) out := out.(*Toleration) - out.Key = in.Key - out.Operator = in.Operator - out.Value = in.Value - out.Effect = in.Effect + *out = *in + if in.TolerationSeconds != nil { + in, out := &in.TolerationSeconds, &out.TolerationSeconds + *out = new(int64) + **out = **in + } return nil } } @@ -3576,7 +3297,7 @@ func DeepCopy_api_Volume(in interface{}, out interface{}, c *conversion.Cloner) { in := in.(*Volume) out := out.(*Volume) - out.Name = in.Name + *out = *in if err := DeepCopy_api_VolumeSource(&in.VolumeSource, &out.VolumeSource, c); err != nil { return err } @@ -3588,10 +3309,37 @@ func DeepCopy_api_VolumeMount(in interface{}, out interface{}, c *conversion.Clo { in := in.(*VolumeMount) out := out.(*VolumeMount) - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - out.SubPath = in.SubPath + *out = *in + return nil + } +} + +func DeepCopy_api_VolumeProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeProjection) + out := out.(*VolumeProjection) + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretProjection) + if err := DeepCopy_api_SecretProjection(*in, *out, c); err != nil { + return err + } + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIProjection) + if err := DeepCopy_api_DownwardAPIProjection(*in, *out, c); err != nil { + return err + } + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapProjection) + if err := DeepCopy_api_ConfigMapProjection(*in, *out, c); err != nil { + return err + } + } return nil } } @@ -3600,40 +3348,31 @@ func DeepCopy_api_VolumeSource(in interface{}, out interface{}, c *conversion.Cl { in := in.(*VolumeSource) out := out.(*VolumeSource) + *out = *in if in.HostPath != nil { in, out := &in.HostPath, &out.HostPath *out = new(HostPathVolumeSource) **out = **in - } else { - out.HostPath = nil } if in.EmptyDir != nil { in, out := &in.EmptyDir, &out.EmptyDir *out = new(EmptyDirVolumeSource) **out = **in - } else { - out.EmptyDir = nil } if in.GCEPersistentDisk != nil { in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk *out = new(GCEPersistentDiskVolumeSource) **out = **in - } else { - out.GCEPersistentDisk = nil } if in.AWSElasticBlockStore != nil { in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore *out = new(AWSElasticBlockStoreVolumeSource) **out = **in - } else { - out.AWSElasticBlockStore = nil } if in.GitRepo != nil { in, out := &in.GitRepo, &out.GitRepo *out = new(GitRepoVolumeSource) **out = **in - } else { - out.GitRepo = nil } if in.Secret != nil { in, out := &in.Secret, &out.Secret @@ -3641,36 +3380,28 @@ func DeepCopy_api_VolumeSource(in interface{}, out interface{}, c *conversion.Cl if err := DeepCopy_api_SecretVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.Secret = nil } if in.NFS != nil { in, out := &in.NFS, &out.NFS *out = new(NFSVolumeSource) **out = **in - } else { - out.NFS = nil } if in.ISCSI != nil { in, out := &in.ISCSI, &out.ISCSI *out = new(ISCSIVolumeSource) - **out = **in - } else { - out.ISCSI = nil + if err := DeepCopy_api_ISCSIVolumeSource(*in, *out, c); err != nil { + return err + } } if in.Glusterfs != nil { in, out := &in.Glusterfs, &out.Glusterfs *out = new(GlusterfsVolumeSource) **out = **in - } else { - out.Glusterfs = nil } if in.PersistentVolumeClaim != nil { in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim *out = new(PersistentVolumeClaimVolumeSource) **out = **in - } else { - out.PersistentVolumeClaim = nil } if in.RBD != nil { in, out := &in.RBD, &out.RBD @@ -3678,15 +3409,11 @@ func DeepCopy_api_VolumeSource(in interface{}, out interface{}, c *conversion.Cl if err := DeepCopy_api_RBDVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.RBD = nil } if in.Quobyte != nil { in, out := &in.Quobyte, &out.Quobyte *out = new(QuobyteVolumeSource) **out = **in - } else { - out.Quobyte = nil } if in.FlexVolume != nil { in, out := &in.FlexVolume, &out.FlexVolume @@ -3694,15 +3421,11 @@ func DeepCopy_api_VolumeSource(in interface{}, out interface{}, c *conversion.Cl if err := DeepCopy_api_FlexVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.FlexVolume = nil } if in.Cinder != nil { in, out := &in.Cinder, &out.Cinder *out = new(CinderVolumeSource) **out = **in - } else { - out.Cinder = nil } if in.CephFS != nil { in, out := &in.CephFS, &out.CephFS @@ -3710,15 +3433,11 @@ func DeepCopy_api_VolumeSource(in interface{}, out interface{}, c *conversion.Cl if err := DeepCopy_api_CephFSVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.CephFS = nil } if in.Flocker != nil { in, out := &in.Flocker, &out.Flocker *out = new(FlockerVolumeSource) **out = **in - } else { - out.Flocker = nil } if in.DownwardAPI != nil { in, out := &in.DownwardAPI, &out.DownwardAPI @@ -3726,8 +3445,6 @@ func DeepCopy_api_VolumeSource(in interface{}, out interface{}, c *conversion.Cl if err := DeepCopy_api_DownwardAPIVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.DownwardAPI = nil } if in.FC != nil { in, out := &in.FC, &out.FC @@ -3735,15 +3452,11 @@ func DeepCopy_api_VolumeSource(in interface{}, out interface{}, c *conversion.Cl if err := DeepCopy_api_FCVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.FC = nil } if in.AzureFile != nil { in, out := &in.AzureFile, &out.AzureFile *out = new(AzureFileVolumeSource) **out = **in - } else { - out.AzureFile = nil } if in.ConfigMap != nil { in, out := &in.ConfigMap, &out.ConfigMap @@ -3751,15 +3464,11 @@ func DeepCopy_api_VolumeSource(in interface{}, out interface{}, c *conversion.Cl if err := DeepCopy_api_ConfigMapVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.ConfigMap = nil } if in.VsphereVolume != nil { in, out := &in.VsphereVolume, &out.VsphereVolume *out = new(VsphereVirtualDiskVolumeSource) **out = **in - } else { - out.VsphereVolume = nil } if in.AzureDisk != nil { in, out := &in.AzureDisk, &out.AzureDisk @@ -3767,15 +3476,30 @@ func DeepCopy_api_VolumeSource(in interface{}, out interface{}, c *conversion.Cl if err := DeepCopy_api_AzureDiskVolumeSource(*in, *out, c); err != nil { return err } - } else { - out.AzureDisk = nil } if in.PhotonPersistentDisk != nil { in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk *out = new(PhotonPersistentDiskVolumeSource) **out = **in - } else { - out.PhotonPersistentDisk = nil + } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(ProjectedVolumeSource) + if err := DeepCopy_api_ProjectedVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + if err := DeepCopy_api_ScaleIOVolumeSource(*in, *out, c); err != nil { + return err + } } return nil } @@ -3785,8 +3509,7 @@ func DeepCopy_api_VsphereVirtualDiskVolumeSource(in interface{}, out interface{} { in := in.(*VsphereVirtualDiskVolumeSource) out := out.(*VsphereVirtualDiskVolumeSource) - out.VolumePath = in.VolumePath - out.FSType = in.FSType + *out = *in return nil } } @@ -3795,7 +3518,7 @@ func DeepCopy_api_WeightedPodAffinityTerm(in interface{}, out interface{}, c *co { in := in.(*WeightedPodAffinityTerm) out := out.(*WeightedPodAffinityTerm) - out.Weight = in.Weight + *out = *in if err := DeepCopy_api_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/pkg/apimachinery/BUILD b/vendor/k8s.io/kubernetes/pkg/apimachinery/BUILD deleted file mode 100644 index 53a7c57c40..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apimachinery/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "types.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api/meta:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/runtime:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["types_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = ["//pkg/api/unversioned:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apimachinery/announced/BUILD b/vendor/k8s.io/kubernetes/pkg/apimachinery/announced/BUILD deleted file mode 100644 index 0ff3d6790d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apimachinery/announced/BUILD +++ /dev/null @@ -1,38 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "announced.go", - "group_factory.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apimachinery:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/sets:go_default_library", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = ["announced_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = ["//pkg/util/sets:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apimachinery/registered/BUILD b/vendor/k8s.io/kubernetes/pkg/apimachinery/registered/BUILD deleted file mode 100644 index f477d10fff..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apimachinery/registered/BUILD +++ /dev/null @@ -1,35 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["registered.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api/meta:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apimachinery:go_default_library", - "//pkg/util/sets:go_default_library", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = ["registered_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/apimachinery:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apimachinery/registered/registered.go b/vendor/k8s.io/kubernetes/pkg/apimachinery/registered/registered.go deleted file mode 100644 index 5b61b4b069..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apimachinery/registered/registered.go +++ /dev/null @@ -1,403 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package to keep track of API Versions that can be registered and are enabled in api.Scheme. -package registered - -import ( - "fmt" - "os" - "sort" - "strings" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery" - "k8s.io/kubernetes/pkg/util/sets" -) - -var ( - DefaultAPIRegistrationManager = NewOrDie(os.Getenv("KUBE_API_VERSIONS")) -) - -// APIRegistrationManager provides the concept of what API groups are enabled. -// -// TODO: currently, it also provides a "registered" concept. But it's wrong to -// have both concepts in the same object. Therefore the "announced" package is -// going to take over the registered concept. After all the install packages -// are switched to using the announce package instead of this package, then we -// can combine the registered/enabled concepts in this object. Simplifying this -// isn't easy right now because there are so many callers of this package. -type APIRegistrationManager struct { - // registeredGroupVersions stores all API group versions for which RegisterGroup is called. - registeredVersions map[unversioned.GroupVersion]struct{} - - // thirdPartyGroupVersions are API versions which are dynamically - // registered (and unregistered) via API calls to the apiserver - thirdPartyGroupVersions []unversioned.GroupVersion - - // enabledVersions represents all enabled API versions. It should be a - // subset of registeredVersions. Please call EnableVersions() to add - // enabled versions. - enabledVersions map[unversioned.GroupVersion]struct{} - - // map of group meta for all groups. - groupMetaMap map[string]*apimachinery.GroupMeta - - // envRequestedVersions represents the versions requested via the - // KUBE_API_VERSIONS environment variable. The install package of each group - // checks this list before add their versions to the latest package and - // Scheme. This list is small and order matters, so represent as a slice - envRequestedVersions []unversioned.GroupVersion -} - -// NewAPIRegistrationManager constructs a new manager. The argument ought to be -// the value of the KUBE_API_VERSIONS env var, or a value of this which you -// wish to test. -func NewAPIRegistrationManager(kubeAPIVersions string) (*APIRegistrationManager, error) { - m := &APIRegistrationManager{ - registeredVersions: map[unversioned.GroupVersion]struct{}{}, - thirdPartyGroupVersions: []unversioned.GroupVersion{}, - enabledVersions: map[unversioned.GroupVersion]struct{}{}, - groupMetaMap: map[string]*apimachinery.GroupMeta{}, - envRequestedVersions: []unversioned.GroupVersion{}, - } - - if len(kubeAPIVersions) != 0 { - for _, version := range strings.Split(kubeAPIVersions, ",") { - gv, err := unversioned.ParseGroupVersion(version) - if err != nil { - return nil, fmt.Errorf("invalid api version: %s in KUBE_API_VERSIONS: %s.", - version, kubeAPIVersions) - } - m.envRequestedVersions = append(m.envRequestedVersions, gv) - } - } - return m, nil -} - -func NewOrDie(kubeAPIVersions string) *APIRegistrationManager { - m, err := NewAPIRegistrationManager(kubeAPIVersions) - if err != nil { - glog.Fatalf("Could not construct version manager: %v (KUBE_API_VERSIONS=%q)", err, kubeAPIVersions) - } - return m -} - -// People are calling global functions. Let them continue to do that (for now). -var ( - ValidateEnvRequestedVersions = DefaultAPIRegistrationManager.ValidateEnvRequestedVersions - AllPreferredGroupVersions = DefaultAPIRegistrationManager.AllPreferredGroupVersions - RESTMapper = DefaultAPIRegistrationManager.RESTMapper - GroupOrDie = DefaultAPIRegistrationManager.GroupOrDie - AddThirdPartyAPIGroupVersions = DefaultAPIRegistrationManager.AddThirdPartyAPIGroupVersions - IsThirdPartyAPIGroupVersion = DefaultAPIRegistrationManager.IsThirdPartyAPIGroupVersion - RegisteredGroupVersions = DefaultAPIRegistrationManager.RegisteredGroupVersions - IsRegisteredVersion = DefaultAPIRegistrationManager.IsRegisteredVersion - IsRegistered = DefaultAPIRegistrationManager.IsRegistered - Group = DefaultAPIRegistrationManager.Group - EnabledVersionsForGroup = DefaultAPIRegistrationManager.EnabledVersionsForGroup - EnabledVersions = DefaultAPIRegistrationManager.EnabledVersions - IsEnabledVersion = DefaultAPIRegistrationManager.IsEnabledVersion - IsAllowedVersion = DefaultAPIRegistrationManager.IsAllowedVersion - EnableVersions = DefaultAPIRegistrationManager.EnableVersions - RegisterGroup = DefaultAPIRegistrationManager.RegisterGroup - RegisterVersions = DefaultAPIRegistrationManager.RegisterVersions - InterfacesFor = DefaultAPIRegistrationManager.InterfacesFor -) - -// RegisterVersions adds the given group versions to the list of registered group versions. -func (m *APIRegistrationManager) RegisterVersions(availableVersions []unversioned.GroupVersion) { - for _, v := range availableVersions { - m.registeredVersions[v] = struct{}{} - } -} - -// RegisterGroup adds the given group to the list of registered groups. -func (m *APIRegistrationManager) RegisterGroup(groupMeta apimachinery.GroupMeta) error { - groupName := groupMeta.GroupVersion.Group - if _, found := m.groupMetaMap[groupName]; found { - return fmt.Errorf("group %v is already registered", m.groupMetaMap) - } - m.groupMetaMap[groupName] = &groupMeta - return nil -} - -// EnableVersions adds the versions for the given group to the list of enabled versions. -// Note that the caller should call RegisterGroup before calling this method. -// The caller of this function is responsible to add the versions to scheme and RESTMapper. -func (m *APIRegistrationManager) EnableVersions(versions ...unversioned.GroupVersion) error { - var unregisteredVersions []unversioned.GroupVersion - for _, v := range versions { - if _, found := m.registeredVersions[v]; !found { - unregisteredVersions = append(unregisteredVersions, v) - } - m.enabledVersions[v] = struct{}{} - } - if len(unregisteredVersions) != 0 { - return fmt.Errorf("Please register versions before enabling them: %v", unregisteredVersions) - } - return nil -} - -// IsAllowedVersion returns if the version is allowed by the KUBE_API_VERSIONS -// environment variable. If the environment variable is empty, then it always -// returns true. -func (m *APIRegistrationManager) IsAllowedVersion(v unversioned.GroupVersion) bool { - if len(m.envRequestedVersions) == 0 { - return true - } - for _, envGV := range m.envRequestedVersions { - if v == envGV { - return true - } - } - return false -} - -// IsEnabledVersion returns if a version is enabled. -func (m *APIRegistrationManager) IsEnabledVersion(v unversioned.GroupVersion) bool { - _, found := m.enabledVersions[v] - return found -} - -// EnabledVersions returns all enabled versions. Groups are randomly ordered, but versions within groups -// are priority order from best to worst -func (m *APIRegistrationManager) EnabledVersions() []unversioned.GroupVersion { - ret := []unversioned.GroupVersion{} - for _, groupMeta := range m.groupMetaMap { - for _, version := range groupMeta.GroupVersions { - if m.IsEnabledVersion(version) { - ret = append(ret, version) - } - } - } - return ret -} - -// EnabledVersionsForGroup returns all enabled versions for a group in order of best to worst -func (m *APIRegistrationManager) EnabledVersionsForGroup(group string) []unversioned.GroupVersion { - groupMeta, ok := m.groupMetaMap[group] - if !ok { - return []unversioned.GroupVersion{} - } - - ret := []unversioned.GroupVersion{} - for _, version := range groupMeta.GroupVersions { - if m.IsEnabledVersion(version) { - ret = append(ret, version) - } - } - return ret -} - -// Group returns the metadata of a group if the group is registered, otherwise -// an error is returned. -func (m *APIRegistrationManager) Group(group string) (*apimachinery.GroupMeta, error) { - groupMeta, found := m.groupMetaMap[group] - if !found { - return nil, fmt.Errorf("group %v has not been registered", group) - } - groupMetaCopy := *groupMeta - return &groupMetaCopy, nil -} - -// IsRegistered takes a string and determines if it's one of the registered groups -func (m *APIRegistrationManager) IsRegistered(group string) bool { - _, found := m.groupMetaMap[group] - return found -} - -// IsRegisteredVersion returns if a version is registered. -func (m *APIRegistrationManager) IsRegisteredVersion(v unversioned.GroupVersion) bool { - _, found := m.registeredVersions[v] - return found -} - -// RegisteredGroupVersions returns all registered group versions. -func (m *APIRegistrationManager) RegisteredGroupVersions() []unversioned.GroupVersion { - ret := []unversioned.GroupVersion{} - for groupVersion := range m.registeredVersions { - ret = append(ret, groupVersion) - } - return ret -} - -// IsThirdPartyAPIGroupVersion returns true if the api version is a user-registered group/version. -func (m *APIRegistrationManager) IsThirdPartyAPIGroupVersion(gv unversioned.GroupVersion) bool { - for ix := range m.thirdPartyGroupVersions { - if m.thirdPartyGroupVersions[ix] == gv { - return true - } - } - return false -} - -// AddThirdPartyAPIGroupVersions sets the list of third party versions, -// registers them in the API machinery and enables them. -// Skips GroupVersions that are already registered. -// Returns the list of GroupVersions that were skipped. -func (m *APIRegistrationManager) AddThirdPartyAPIGroupVersions(gvs ...unversioned.GroupVersion) []unversioned.GroupVersion { - filteredGVs := []unversioned.GroupVersion{} - skippedGVs := []unversioned.GroupVersion{} - for ix := range gvs { - if !m.IsRegisteredVersion(gvs[ix]) { - filteredGVs = append(filteredGVs, gvs[ix]) - } else { - glog.V(3).Infof("Skipping %s, because its already registered", gvs[ix].String()) - skippedGVs = append(skippedGVs, gvs[ix]) - } - } - if len(filteredGVs) == 0 { - return skippedGVs - } - m.RegisterVersions(filteredGVs) - m.EnableVersions(filteredGVs...) - m.thirdPartyGroupVersions = append(m.thirdPartyGroupVersions, filteredGVs...) - - return skippedGVs -} - -// InterfacesFor is a union meta.VersionInterfacesFunc func for all registered types -func (m *APIRegistrationManager) InterfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - groupMeta, err := m.Group(version.Group) - if err != nil { - return nil, err - } - return groupMeta.InterfacesFor(version) -} - -// TODO: This is an expedient function, because we don't check if a Group is -// supported throughout the code base. We will abandon this function and -// checking the error returned by the Group() function. -func (m *APIRegistrationManager) GroupOrDie(group string) *apimachinery.GroupMeta { - groupMeta, found := m.groupMetaMap[group] - if !found { - if group == "" { - panic("The legacy v1 API is not registered.") - } else { - panic(fmt.Sprintf("Group %s is not registered.", group)) - } - } - groupMetaCopy := *groupMeta - return &groupMetaCopy -} - -// RESTMapper returns a union RESTMapper of all known types with priorities chosen in the following order: -// 1. if KUBE_API_VERSIONS is specified, then KUBE_API_VERSIONS in order, OR -// 1. legacy kube group preferred version, extensions preferred version, metrics perferred version, legacy -// kube any version, extensions any version, metrics any version, all other groups alphabetical preferred version, -// all other groups alphabetical. -func (m *APIRegistrationManager) RESTMapper(versionPatterns ...unversioned.GroupVersion) meta.RESTMapper { - unionMapper := meta.MultiRESTMapper{} - unionedGroups := sets.NewString() - for enabledVersion := range m.enabledVersions { - if !unionedGroups.Has(enabledVersion.Group) { - unionedGroups.Insert(enabledVersion.Group) - groupMeta := m.groupMetaMap[enabledVersion.Group] - unionMapper = append(unionMapper, groupMeta.RESTMapper) - } - } - - if len(versionPatterns) != 0 { - resourcePriority := []unversioned.GroupVersionResource{} - kindPriority := []unversioned.GroupVersionKind{} - for _, versionPriority := range versionPatterns { - resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) - kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) - } - - return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} - } - - if len(m.envRequestedVersions) != 0 { - resourcePriority := []unversioned.GroupVersionResource{} - kindPriority := []unversioned.GroupVersionKind{} - - for _, versionPriority := range m.envRequestedVersions { - resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) - kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) - } - - return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} - } - - prioritizedGroups := []string{"", "extensions", "metrics"} - resourcePriority, kindPriority := m.prioritiesForGroups(prioritizedGroups...) - - prioritizedGroupsSet := sets.NewString(prioritizedGroups...) - remainingGroups := sets.String{} - for enabledVersion := range m.enabledVersions { - if !prioritizedGroupsSet.Has(enabledVersion.Group) { - remainingGroups.Insert(enabledVersion.Group) - } - } - - remainingResourcePriority, remainingKindPriority := m.prioritiesForGroups(remainingGroups.List()...) - resourcePriority = append(resourcePriority, remainingResourcePriority...) - kindPriority = append(kindPriority, remainingKindPriority...) - - return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} -} - -// prioritiesForGroups returns the resource and kind priorities for a PriorityRESTMapper, preferring the preferred version of each group first, -// then any non-preferred version of the group second. -func (m *APIRegistrationManager) prioritiesForGroups(groups ...string) ([]unversioned.GroupVersionResource, []unversioned.GroupVersionKind) { - resourcePriority := []unversioned.GroupVersionResource{} - kindPriority := []unversioned.GroupVersionKind{} - - for _, group := range groups { - availableVersions := m.EnabledVersionsForGroup(group) - if len(availableVersions) > 0 { - resourcePriority = append(resourcePriority, availableVersions[0].WithResource(meta.AnyResource)) - kindPriority = append(kindPriority, availableVersions[0].WithKind(meta.AnyKind)) - } - } - for _, group := range groups { - resourcePriority = append(resourcePriority, unversioned.GroupVersionResource{Group: group, Version: meta.AnyVersion, Resource: meta.AnyResource}) - kindPriority = append(kindPriority, unversioned.GroupVersionKind{Group: group, Version: meta.AnyVersion, Kind: meta.AnyKind}) - } - - return resourcePriority, kindPriority -} - -// AllPreferredGroupVersions returns the preferred versions of all registered -// groups in the form of "group1/version1,group2/version2,..." -func (m *APIRegistrationManager) AllPreferredGroupVersions() string { - if len(m.groupMetaMap) == 0 { - return "" - } - var defaults []string - for _, groupMeta := range m.groupMetaMap { - defaults = append(defaults, groupMeta.GroupVersion.String()) - } - sort.Strings(defaults) - return strings.Join(defaults, ",") -} - -// ValidateEnvRequestedVersions returns a list of versions that are requested in -// the KUBE_API_VERSIONS environment variable, but not enabled. -func (m *APIRegistrationManager) ValidateEnvRequestedVersions() []unversioned.GroupVersion { - var missingVersions []unversioned.GroupVersion - for _, v := range m.envRequestedVersions { - if _, found := m.enabledVersions[v]; !found { - missingVersions = append(missingVersions, v) - } - } - return missingVersions -} diff --git a/vendor/k8s.io/kubernetes/pkg/apimachinery/types.go b/vendor/k8s.io/kubernetes/pkg/apimachinery/types.go deleted file mode 100644 index c9146080a2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apimachinery/types.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apimachinery - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// GroupMeta stores the metadata of a group. -type GroupMeta struct { - // GroupVersion represents the preferred version of the group. - GroupVersion unversioned.GroupVersion - - // GroupVersions is Group + all versions in that group. - GroupVersions []unversioned.GroupVersion - - // Codec is the default codec for serializing output that should use - // the preferred version. Use this Codec when writing to - // disk, a data store that is not dynamically versioned, or in tests. - // This codec can decode any object that the schema is aware of. - Codec runtime.Codec - - // SelfLinker can set or get the SelfLink field of all API types. - // TODO: when versioning changes, make this part of each API definition. - // TODO(lavalamp): Combine SelfLinker & ResourceVersioner interfaces, force all uses - // to go through the InterfacesFor method below. - SelfLinker runtime.SelfLinker - - // RESTMapper provides the default mapping between REST paths and the objects declared in api.Scheme and all known - // versions. - RESTMapper meta.RESTMapper - - // InterfacesFor returns the default Codec and ResourceVersioner for a given version - // string, or an error if the version is not known. - // TODO: make this stop being a func pointer and always use the default - // function provided below once every place that populates this field has been changed. - InterfacesFor func(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) - - // InterfacesByVersion stores the per-version interfaces. - InterfacesByVersion map[unversioned.GroupVersion]*meta.VersionInterfaces -} - -// DefaultInterfacesFor returns the default Codec and ResourceVersioner for a given version -// string, or an error if the version is not known. -// TODO: Remove the "Default" prefix. -func (gm *GroupMeta) DefaultInterfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - if v, ok := gm.InterfacesByVersion[version]; ok { - return v, nil - } - return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, gm.GroupVersions) -} - -// AddVersionInterfaces adds the given version to the group. Only call during -// init, after that GroupMeta objects should be immutable. Not thread safe. -// (If you use this, be sure to set .InterfacesFor = .DefaultInterfacesFor) -// TODO: remove the "Interfaces" suffix and make this also maintain the -// .GroupVersions member. -func (gm *GroupMeta) AddVersionInterfaces(version unversioned.GroupVersion, interfaces *meta.VersionInterfaces) error { - if e, a := gm.GroupVersion.Group, version.Group; a != e { - return fmt.Errorf("got a version in group %v, but am in group %v", a, e) - } - if gm.InterfacesByVersion == nil { - gm.InterfacesByVersion = make(map[unversioned.GroupVersion]*meta.VersionInterfaces) - } - gm.InterfacesByVersion[version] = interfaces - - // TODO: refactor to make the below error not possible, this function - // should *set* GroupVersions rather than depend on it. - for _, v := range gm.GroupVersions { - if v == version { - return nil - } - } - return fmt.Errorf("added a version interface without the corresponding version %v being in the list %#v", version, gm.GroupVersions) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD index 8df04f8b7f..83de7a50b8 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -15,19 +12,34 @@ go_library( srcs = [ "doc.go", "register.go", - "types.generated.go", "types.go", "zz_generated.deepcopy.go", ], tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//pkg/util/intstr:go_default_library", - "//vendor:github.com/ugorji/go/codec", + "//pkg/apis/extensions:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/apps/install:all-srcs", + "//pkg/apis/apps/v1beta1:all-srcs", + "//pkg/apis/apps/validation:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/apps/OWNERS new file mode 100755 index 0000000000..1cdc56eec0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/OWNERS @@ -0,0 +1,21 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- deads2k +- caesarxuchao +- bprashanth +- pmorie +- sttts +- saad-ali +- ncdc +- timstclair +- timothysc +- dims +- errordeveloper +- mml +- m1093782566 +- mbohlool +- david-mcmahon +- kevin-wangzefeng +- jianhuiz diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go index 2f123702e3..bca1ff4ef0 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go @@ -15,7 +15,5 @@ limitations under the License. */ // +k8s:deepcopy-gen=package,register -// +k8s:openapi-gen=true -// +groupName=apps.k8s.io package apps diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/apps/install/BUILD index f066040df5..4afc6bd70a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/install/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -15,8 +12,24 @@ go_library( srcs = ["install.go"], tags = ["automanaged"], deps = [ - "//pkg/apimachinery/announced:go_default_library", + "//pkg/api:go_default_library", "//pkg/apis/apps:go_default_library", "//pkg/apis/apps/v1beta1:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/announced", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/registered", + "//vendor:k8s.io/apimachinery/pkg/runtime", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go index 9b89e3bbd0..344ddcbf7e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go @@ -19,12 +19,20 @@ limitations under the License. package install import ( - "k8s.io/kubernetes/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/apps/v1beta1" ) func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { if err := announced.NewGroupMetaFactory( &announced.GroupMetaFactoryArgs{ GroupName: apps.GroupName, @@ -35,7 +43,7 @@ func init() { announced.VersionToSchemeFunc{ v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, }, - ).Announce().RegisterAndEnable(); err != nil { + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { panic(err) } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go index 41d929ad60..dae876f37b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go @@ -17,9 +17,9 @@ limitations under the License. package apps import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/pkg/apis/extensions" ) var ( @@ -31,15 +31,15 @@ var ( const GroupName = "apps" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} // Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { +func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } // Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { +func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } @@ -47,10 +47,12 @@ func Resource(resource string) unversioned.GroupResource { func addKnownTypes(scheme *runtime.Scheme) error { // TODO this will get cleaned up with the scheme types are fixed scheme.AddKnownTypes(SchemeGroupVersion, + &extensions.Deployment{}, + &extensions.DeploymentList{}, + &extensions.DeploymentRollback{}, + &extensions.Scale{}, &StatefulSet{}, &StatefulSetList{}, - &api.ListOptions{}, - &api.DeleteOptions{}, ) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/types.generated.go deleted file mode 100644 index 444717153e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/types.generated.go +++ /dev/null @@ -1,1628 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package apps - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg2_api "k8s.io/kubernetes/pkg/api" - pkg4_resource "k8s.io/kubernetes/pkg/api/resource" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg3_types "k8s.io/kubernetes/pkg/types" - pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_api.ObjectMeta - var v1 pkg4_resource.Quantity - var v2 pkg1_unversioned.TypeMeta - var v3 pkg3_types.UID - var v4 pkg5_intstr.IntOrString - var v5 time.Time - _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 - } -} - -func (x *StatefulSet) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yy10.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.ObjectMeta - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy13 := &x.Spec - yy13.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.Spec - yy14.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Status - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *StatefulSet) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct19 := r.ContainerType() - if yyct19 == codecSelferValueTypeMap1234 { - yyl19 := r.ReadMapStart() - if yyl19 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl19, d) - } - } else if yyct19 == codecSelferValueTypeArray1234 { - yyl19 := r.ReadArrayStart() - if yyl19 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl19, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *StatefulSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys20Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys20Slc - var yyhl20 bool = l >= 0 - for yyj20 := 0; ; yyj20++ { - if yyhl20 { - if yyj20 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys20Slc = r.DecodeBytes(yys20Slc, true, true) - yys20 := string(yys20Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys20 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv23 := &x.ObjectMeta - yyv23.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = StatefulSetSpec{} - } else { - yyv24 := &x.Spec - yyv24.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = StatefulSetStatus{} - } else { - yyv25 := &x.Status - yyv25.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys20) - } // end switch yys20 - } // end for yyj20 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *StatefulSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj26 int - var yyb26 bool - var yyhl26 bool = l >= 0 - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv29 := &x.ObjectMeta - yyv29.CodecDecodeSelf(d) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = StatefulSetSpec{} - } else { - yyv30 := &x.Spec - yyv30.CodecDecodeSelf(d) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = StatefulSetStatus{} - } else { - yyv31 := &x.Status - yyv31.CodecDecodeSelf(d) - } - for { - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj26-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *StatefulSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym32 := z.EncBinary() - _ = yym32 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep33 := !z.EncBinary() - yy2arr33 := z.EncBasicHandle().StructToArray - var yyq33 [5]bool - _, _, _ = yysep33, yyq33, yy2arr33 - const yyr33 bool = false - yyq33[0] = x.Replicas != 0 - yyq33[1] = x.Selector != nil - yyq33[3] = len(x.VolumeClaimTemplates) != 0 - var yynn33 int - if yyr33 || yy2arr33 { - r.EncodeArrayStart(5) - } else { - yynn33 = 2 - for _, b := range yyq33 { - if b { - yynn33++ - } - } - r.EncodeMapStart(yynn33) - yynn33 = 0 - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[0] { - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq33[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym38 := z.EncBinary() - _ = yym38 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq33[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym39 := z.EncBinary() - _ = yym39 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy41 := &x.Template - yy41.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy42 := &x.Template - yy42.CodecEncodeSelf(e) - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[3] { - if x.VolumeClaimTemplates == nil { - r.EncodeNil() - } else { - yym44 := z.EncBinary() - _ = yym44 - if false { - } else { - h.encSliceapi_PersistentVolumeClaim(([]pkg2_api.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq33[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeClaimTemplates")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VolumeClaimTemplates == nil { - r.EncodeNil() - } else { - yym45 := z.EncBinary() - _ = yym45 - if false { - } else { - h.encSliceapi_PersistentVolumeClaim(([]pkg2_api.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) - } - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym47 := z.EncBinary() - _ = yym47 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym48 := z.EncBinary() - _ = yym48 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *StatefulSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym49 := z.DecBinary() - _ = yym49 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct50 := r.ContainerType() - if yyct50 == codecSelferValueTypeMap1234 { - yyl50 := r.ReadMapStart() - if yyl50 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl50, d) - } - } else if yyct50 == codecSelferValueTypeArray1234 { - yyl50 := r.ReadArrayStart() - if yyl50 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl50, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *StatefulSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys51Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys51Slc - var yyhl51 bool = l >= 0 - for yyj51 := 0; ; yyj51++ { - if yyhl51 { - if yyj51 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys51Slc = r.DecodeBytes(yys51Slc, true, true) - yys51 := string(yys51Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys51 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym54 := z.DecBinary() - _ = yym54 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv55 := &x.Template - yyv55.CodecDecodeSelf(d) - } - case "volumeClaimTemplates": - if r.TryDecodeAsNil() { - x.VolumeClaimTemplates = nil - } else { - yyv56 := &x.VolumeClaimTemplates - yym57 := z.DecBinary() - _ = yym57 - if false { - } else { - h.decSliceapi_PersistentVolumeClaim((*[]pkg2_api.PersistentVolumeClaim)(yyv56), d) - } - } - case "serviceName": - if r.TryDecodeAsNil() { - x.ServiceName = "" - } else { - x.ServiceName = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys51) - } // end switch yys51 - } // end for yyj51 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *StatefulSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj59 int - var yyb59 bool - var yyhl59 bool = l >= 0 - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l - } else { - yyb59 = r.CheckBreak() - } - if yyb59 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l - } else { - yyb59 = r.CheckBreak() - } - if yyb59 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym62 := z.DecBinary() - _ = yym62 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l - } else { - yyb59 = r.CheckBreak() - } - if yyb59 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv63 := &x.Template - yyv63.CodecDecodeSelf(d) - } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l - } else { - yyb59 = r.CheckBreak() - } - if yyb59 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeClaimTemplates = nil - } else { - yyv64 := &x.VolumeClaimTemplates - yym65 := z.DecBinary() - _ = yym65 - if false { - } else { - h.decSliceapi_PersistentVolumeClaim((*[]pkg2_api.PersistentVolumeClaim)(yyv64), d) - } - } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l - } else { - yyb59 = r.CheckBreak() - } - if yyb59 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServiceName = "" - } else { - x.ServiceName = string(r.DecodeString()) - } - for { - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l - } else { - yyb59 = r.CheckBreak() - } - if yyb59 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj59-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *StatefulSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym67 := z.EncBinary() - _ = yym67 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep68 := !z.EncBinary() - yy2arr68 := z.EncBasicHandle().StructToArray - var yyq68 [2]bool - _, _, _ = yysep68, yyq68, yy2arr68 - const yyr68 bool = false - yyq68[0] = x.ObservedGeneration != nil - var yynn68 int - if yyr68 || yy2arr68 { - r.EncodeArrayStart(2) - } else { - yynn68 = 1 - for _, b := range yyq68 { - if b { - yynn68++ - } - } - r.EncodeMapStart(yynn68) - yynn68 = 0 - } - if yyr68 || yy2arr68 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq68[0] { - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy70 := *x.ObservedGeneration - yym71 := z.EncBinary() - _ = yym71 - if false { - } else { - r.EncodeInt(int64(yy70)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq68[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy72 := *x.ObservedGeneration - yym73 := z.EncBinary() - _ = yym73 - if false { - } else { - r.EncodeInt(int64(yy72)) - } - } - } - } - if yyr68 || yy2arr68 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym75 := z.EncBinary() - _ = yym75 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym76 := z.EncBinary() - _ = yym76 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr68 || yy2arr68 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *StatefulSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym77 := z.DecBinary() - _ = yym77 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct78 := r.ContainerType() - if yyct78 == codecSelferValueTypeMap1234 { - yyl78 := r.ReadMapStart() - if yyl78 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl78, d) - } - } else if yyct78 == codecSelferValueTypeArray1234 { - yyl78 := r.ReadArrayStart() - if yyl78 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl78, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *StatefulSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys79Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys79Slc - var yyhl79 bool = l >= 0 - for yyj79 := 0; ; yyj79++ { - if yyhl79 { - if yyj79 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys79Slc = r.DecodeBytes(yys79Slc, true, true) - yys79 := string(yys79Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys79 { - case "observedGeneration": - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym81 := z.DecBinary() - _ = yym81 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys79) - } // end switch yys79 - } // end for yyj79 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *StatefulSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj83 int - var yyb83 bool - var yyhl83 bool = l >= 0 - yyj83++ - if yyhl83 { - yyb83 = yyj83 > l - } else { - yyb83 = r.CheckBreak() - } - if yyb83 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym85 := z.DecBinary() - _ = yym85 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - yyj83++ - if yyhl83 { - yyb83 = yyj83 > l - } else { - yyb83 = r.CheckBreak() - } - if yyb83 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - for { - yyj83++ - if yyhl83 { - yyb83 = yyj83 > l - } else { - yyb83 = r.CheckBreak() - } - if yyb83 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj83-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *StatefulSetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym87 := z.EncBinary() - _ = yym87 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep88 := !z.EncBinary() - yy2arr88 := z.EncBasicHandle().StructToArray - var yyq88 [4]bool - _, _, _ = yysep88, yyq88, yy2arr88 - const yyr88 bool = false - yyq88[0] = x.Kind != "" - yyq88[1] = x.APIVersion != "" - yyq88[2] = true - var yynn88 int - if yyr88 || yy2arr88 { - r.EncodeArrayStart(4) - } else { - yynn88 = 1 - for _, b := range yyq88 { - if b { - yynn88++ - } - } - r.EncodeMapStart(yynn88) - yynn88 = 0 - } - if yyr88 || yy2arr88 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq88[0] { - yym90 := z.EncBinary() - _ = yym90 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq88[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym91 := z.EncBinary() - _ = yym91 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr88 || yy2arr88 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq88[1] { - yym93 := z.EncBinary() - _ = yym93 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq88[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym94 := z.EncBinary() - _ = yym94 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr88 || yy2arr88 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq88[2] { - yy96 := &x.ListMeta - yym97 := z.EncBinary() - _ = yym97 - if false { - } else if z.HasExtensions() && z.EncExt(yy96) { - } else { - z.EncFallback(yy96) - } - } else { - r.EncodeNil() - } - } else { - if yyq88[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy98 := &x.ListMeta - yym99 := z.EncBinary() - _ = yym99 - if false { - } else if z.HasExtensions() && z.EncExt(yy98) { - } else { - z.EncFallback(yy98) - } - } - } - if yyr88 || yy2arr88 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym101 := z.EncBinary() - _ = yym101 - if false { - } else { - h.encSliceStatefulSet(([]StatefulSet)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym102 := z.EncBinary() - _ = yym102 - if false { - } else { - h.encSliceStatefulSet(([]StatefulSet)(x.Items), e) - } - } - } - if yyr88 || yy2arr88 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *StatefulSetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym103 := z.DecBinary() - _ = yym103 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct104 := r.ContainerType() - if yyct104 == codecSelferValueTypeMap1234 { - yyl104 := r.ReadMapStart() - if yyl104 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl104, d) - } - } else if yyct104 == codecSelferValueTypeArray1234 { - yyl104 := r.ReadArrayStart() - if yyl104 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl104, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *StatefulSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys105Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys105Slc - var yyhl105 bool = l >= 0 - for yyj105 := 0; ; yyj105++ { - if yyhl105 { - if yyj105 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys105Slc = r.DecodeBytes(yys105Slc, true, true) - yys105 := string(yys105Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys105 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv108 := &x.ListMeta - yym109 := z.DecBinary() - _ = yym109 - if false { - } else if z.HasExtensions() && z.DecExt(yyv108) { - } else { - z.DecFallback(yyv108, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv110 := &x.Items - yym111 := z.DecBinary() - _ = yym111 - if false { - } else { - h.decSliceStatefulSet((*[]StatefulSet)(yyv110), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys105) - } // end switch yys105 - } // end for yyj105 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *StatefulSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj112 int - var yyb112 bool - var yyhl112 bool = l >= 0 - yyj112++ - if yyhl112 { - yyb112 = yyj112 > l - } else { - yyb112 = r.CheckBreak() - } - if yyb112 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj112++ - if yyhl112 { - yyb112 = yyj112 > l - } else { - yyb112 = r.CheckBreak() - } - if yyb112 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj112++ - if yyhl112 { - yyb112 = yyj112 > l - } else { - yyb112 = r.CheckBreak() - } - if yyb112 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv115 := &x.ListMeta - yym116 := z.DecBinary() - _ = yym116 - if false { - } else if z.HasExtensions() && z.DecExt(yyv115) { - } else { - z.DecFallback(yyv115, false) - } - } - yyj112++ - if yyhl112 { - yyb112 = yyj112 > l - } else { - yyb112 = r.CheckBreak() - } - if yyb112 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv117 := &x.Items - yym118 := z.DecBinary() - _ = yym118 - if false { - } else { - h.decSliceStatefulSet((*[]StatefulSet)(yyv117), d) - } - } - for { - yyj112++ - if yyhl112 { - yyb112 = yyj112 > l - } else { - yyb112 = r.CheckBreak() - } - if yyb112 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj112-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceapi_PersistentVolumeClaim(v []pkg2_api.PersistentVolumeClaim, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv119 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy120 := &yyv119 - yy120.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceapi_PersistentVolumeClaim(v *[]pkg2_api.PersistentVolumeClaim, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv121 := *v - yyh121, yyl121 := z.DecSliceHelperStart() - var yyc121 bool - if yyl121 == 0 { - if yyv121 == nil { - yyv121 = []pkg2_api.PersistentVolumeClaim{} - yyc121 = true - } else if len(yyv121) != 0 { - yyv121 = yyv121[:0] - yyc121 = true - } - } else if yyl121 > 0 { - var yyrr121, yyrl121 int - var yyrt121 bool - if yyl121 > cap(yyv121) { - - yyrg121 := len(yyv121) > 0 - yyv2121 := yyv121 - yyrl121, yyrt121 = z.DecInferLen(yyl121, z.DecBasicHandle().MaxInitLen, 368) - if yyrt121 { - if yyrl121 <= cap(yyv121) { - yyv121 = yyv121[:yyrl121] - } else { - yyv121 = make([]pkg2_api.PersistentVolumeClaim, yyrl121) - } - } else { - yyv121 = make([]pkg2_api.PersistentVolumeClaim, yyrl121) - } - yyc121 = true - yyrr121 = len(yyv121) - if yyrg121 { - copy(yyv121, yyv2121) - } - } else if yyl121 != len(yyv121) { - yyv121 = yyv121[:yyl121] - yyc121 = true - } - yyj121 := 0 - for ; yyj121 < yyrr121; yyj121++ { - yyh121.ElemContainerState(yyj121) - if r.TryDecodeAsNil() { - yyv121[yyj121] = pkg2_api.PersistentVolumeClaim{} - } else { - yyv122 := &yyv121[yyj121] - yyv122.CodecDecodeSelf(d) - } - - } - if yyrt121 { - for ; yyj121 < yyl121; yyj121++ { - yyv121 = append(yyv121, pkg2_api.PersistentVolumeClaim{}) - yyh121.ElemContainerState(yyj121) - if r.TryDecodeAsNil() { - yyv121[yyj121] = pkg2_api.PersistentVolumeClaim{} - } else { - yyv123 := &yyv121[yyj121] - yyv123.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj121 := 0 - for ; !r.CheckBreak(); yyj121++ { - - if yyj121 >= len(yyv121) { - yyv121 = append(yyv121, pkg2_api.PersistentVolumeClaim{}) // var yyz121 pkg2_api.PersistentVolumeClaim - yyc121 = true - } - yyh121.ElemContainerState(yyj121) - if yyj121 < len(yyv121) { - if r.TryDecodeAsNil() { - yyv121[yyj121] = pkg2_api.PersistentVolumeClaim{} - } else { - yyv124 := &yyv121[yyj121] - yyv124.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj121 < len(yyv121) { - yyv121 = yyv121[:yyj121] - yyc121 = true - } else if yyj121 == 0 && yyv121 == nil { - yyv121 = []pkg2_api.PersistentVolumeClaim{} - yyc121 = true - } - } - yyh121.End() - if yyc121 { - *v = yyv121 - } -} - -func (x codecSelfer1234) encSliceStatefulSet(v []StatefulSet, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv125 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy126 := &yyv125 - yy126.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceStatefulSet(v *[]StatefulSet, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv127 := *v - yyh127, yyl127 := z.DecSliceHelperStart() - var yyc127 bool - if yyl127 == 0 { - if yyv127 == nil { - yyv127 = []StatefulSet{} - yyc127 = true - } else if len(yyv127) != 0 { - yyv127 = yyv127[:0] - yyc127 = true - } - } else if yyl127 > 0 { - var yyrr127, yyrl127 int - var yyrt127 bool - if yyl127 > cap(yyv127) { - - yyrg127 := len(yyv127) > 0 - yyv2127 := yyv127 - yyrl127, yyrt127 = z.DecInferLen(yyl127, z.DecBasicHandle().MaxInitLen, 776) - if yyrt127 { - if yyrl127 <= cap(yyv127) { - yyv127 = yyv127[:yyrl127] - } else { - yyv127 = make([]StatefulSet, yyrl127) - } - } else { - yyv127 = make([]StatefulSet, yyrl127) - } - yyc127 = true - yyrr127 = len(yyv127) - if yyrg127 { - copy(yyv127, yyv2127) - } - } else if yyl127 != len(yyv127) { - yyv127 = yyv127[:yyl127] - yyc127 = true - } - yyj127 := 0 - for ; yyj127 < yyrr127; yyj127++ { - yyh127.ElemContainerState(yyj127) - if r.TryDecodeAsNil() { - yyv127[yyj127] = StatefulSet{} - } else { - yyv128 := &yyv127[yyj127] - yyv128.CodecDecodeSelf(d) - } - - } - if yyrt127 { - for ; yyj127 < yyl127; yyj127++ { - yyv127 = append(yyv127, StatefulSet{}) - yyh127.ElemContainerState(yyj127) - if r.TryDecodeAsNil() { - yyv127[yyj127] = StatefulSet{} - } else { - yyv129 := &yyv127[yyj127] - yyv129.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj127 := 0 - for ; !r.CheckBreak(); yyj127++ { - - if yyj127 >= len(yyv127) { - yyv127 = append(yyv127, StatefulSet{}) // var yyz127 StatefulSet - yyc127 = true - } - yyh127.ElemContainerState(yyj127) - if yyj127 < len(yyv127) { - if r.TryDecodeAsNil() { - yyv127[yyj127] = StatefulSet{} - } else { - yyv130 := &yyv127[yyj127] - yyv130.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj127 < len(yyv127) { - yyv127 = yyv127[:yyj127] - yyc127 = true - } else if yyj127 == 0 && yyv127 == nil { - yyv127 = []StatefulSet{} - yyc127 = true - } - } - yyh127.End() - if yyc127 { - *v = yyv127 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go index b7cafa1820..7e3eea615e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go @@ -17,8 +17,8 @@ limitations under the License. package apps import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" ) // +genclient=true @@ -30,18 +30,18 @@ import ( // The StatefulSet guarantees that a given network identity will always // map to the same storage identity. type StatefulSet struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Spec defines the desired identities of pods in this set. // +optional - Spec StatefulSetSpec `json:"spec,omitempty"` + Spec StatefulSetSpec // Status is the current status of Pods in this StatefulSet. This data // may be out of date by some window of time. // +optional - Status StatefulSetStatus `json:"status,omitempty"` + Status StatefulSetStatus } // A StatefulSetSpec is the specification of a StatefulSet. @@ -52,19 +52,19 @@ type StatefulSetSpec struct { // If unspecified, defaults to 1. // TODO: Consider a rename of this field. // +optional - Replicas int32 `json:"replicas,omitempty"` + Replicas int32 // Selector is a label query over pods that should match the replica count. // If empty, defaulted to labels on the pod template. // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional - Selector *unversioned.LabelSelector `json:"selector,omitempty"` + Selector *metav1.LabelSelector // Template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet // will fulfill this Template, but have a unique identity from the rest // of the StatefulSet. - Template api.PodTemplateSpec `json:"template"` + Template api.PodTemplateSpec // VolumeClaimTemplates is a list of claims that pods are allowed to reference. // The StatefulSet controller is responsible for mapping network identities to @@ -74,30 +74,30 @@ type StatefulSetSpec struct { // any volumes in the template, with the same name. // TODO: Define the behavior if a claim already exists with the same name. // +optional - VolumeClaimTemplates []api.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"` + VolumeClaimTemplates []api.PersistentVolumeClaim // ServiceName is the name of the service that governs this StatefulSet. // This service must exist before the StatefulSet, and is responsible for // the network identity of the set. Pods get DNS/hostnames that follow the // pattern: pod-specific-string.serviceName.default.svc.cluster.local // where "pod-specific-string" is managed by the StatefulSet controller. - ServiceName string `json:"serviceName"` + ServiceName string } // StatefulSetStatus represents the current state of a StatefulSet. type StatefulSetStatus struct { - // most recent generation observed by this autoscaler. + // most recent generation observed by this StatefulSet. // +optional - ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + ObservedGeneration *int64 // Replicas is the number of actual replicas. - Replicas int32 `json:"replicas"` + Replicas int32 } // StatefulSetList is a collection of StatefulSets. type StatefulSetList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` - Items []StatefulSet `json:"items"` + metav1.ListMeta + Items []StatefulSet } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD index 840f44ca0c..9c4dd6d11b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD @@ -4,10 +4,8 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", "go_test", - "cgo_library", ) go_library( @@ -28,16 +26,47 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/api/unversioned:go_default_library", "//pkg/api/v1:go_default_library", "//pkg/apis/apps:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//pkg/util/intstr:go_default_library", - "//pkg/watch/versioned:go_default_library", + "//pkg/apis/extensions:go_default_library", "//vendor:github.com/gogo/protobuf/proto", + "//vendor:github.com/gogo/protobuf/sortkeys", "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/api/resource", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/util/intstr", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +go_test( + name = "go_default_xtest", + srcs = ["defaults_test.go"], + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/api/install:go_default_library", + "//pkg/api/v1:go_default_library", + "//pkg/apis/apps/install:go_default_library", + "//pkg/apis/apps/v1beta1:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/api/equality", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/util/intstr", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go index 1ab94d8bd5..6735517fae 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go @@ -19,12 +19,14 @@ package v1beta1 import ( "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/apis/extensions" ) func addConversionFuncs(scheme *runtime.Scheme) error { @@ -35,21 +37,49 @@ func addConversionFuncs(scheme *runtime.Scheme) error { err := scheme.AddConversionFuncs( Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec, Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec, + // extensions + // TODO: below conversions should be dropped in favor of auto-generated + // ones, see https://github.com/kubernetes/kubernetextensionsssues/39865 + Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, + Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, + Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, + Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, + Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, + Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, + Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, + Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, ) if err != nil { return err } - return api.Scheme.AddFieldLabelConversionFunc("apps/v1beta1", "StatefulSet", + // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. + err = scheme.AddFieldLabelConversionFunc("apps/v1beta1", "StatefulSet", func(label, value string) (string, string, error) { switch label { case "metadata.name", "metadata.namespace", "status.successful": return label, value, nil default: - return "", "", fmt.Errorf("field label not supported: %s", label) + return "", "", fmt.Errorf("field label not supported for StatefulSet: %s", label) } - }, - ) + }) + if err != nil { + return err + } + err = api.Scheme.AddFieldLabelConversionFunc("apps/v1beta1", "Deployment", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace": + return label, value, nil + default: + return "", "", fmt.Errorf("field label %q not supported for Deployment", label) + } + }) + if err != nil { + return err + } + + return nil } func Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error { @@ -58,7 +88,7 @@ func Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *StatefulSetSpec } if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) + *out = new(metav1.LabelSelector) if err := s.Convert(*in, *out, 0); err != nil { return err } @@ -88,7 +118,7 @@ func Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.StatefulSe *out.Replicas = in.Replicas if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) + *out = new(metav1.LabelSelector) if err := s.Convert(*in, *out, 0); err != nil { return err } @@ -112,3 +142,156 @@ func Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.StatefulSe out.ServiceName = in.ServiceName return nil } + +func Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { + out.Replicas = int32(in.Replicas) + + out.Selector = nil + out.TargetSelector = "" + if in.Selector != nil { + if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { + out.Selector = in.Selector.MatchLabels + } + + selector, err := metav1.LabelSelectorAsSelector(in.Selector) + if err != nil { + return fmt.Errorf("invalid label selector: %v", err) + } + out.TargetSelector = selector.String() + } + return nil +} + +func Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + + // Normally when 2 fields map to the same internal value we favor the old field, since + // old clients can't be expected to know about new fields but clients that know about the + // new field can be expected to know about the old field (though that's not quite true, due + // to kubectl apply). However, these fields are readonly, so any non-nil value should work. + if in.TargetSelector != "" { + labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) + if err != nil { + out.Selector = nil + return fmt.Errorf("failed to parse target selector: %v", err) + } + out.Selector = labelSelector + } else if in.Selector != nil { + out.Selector = new(metav1.LabelSelector) + selector := make(map[string]string) + for key, val := range in.Selector { + selector[key] = val + } + out.Selector.MatchLabels = selector + } else { + out.Selector = nil + } + return nil +} + +func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = *in.Replicas + } + out.Selector = in.Selector + if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + out.RevisionHistoryLimit = in.RevisionHistoryLimit + out.MinReadySeconds = in.MinReadySeconds + out.Paused = in.Paused + if in.RollbackTo != nil { + out.RollbackTo = new(extensions.RollbackConfig) + out.RollbackTo.Revision = in.RollbackTo.Revision + } else { + out.RollbackTo = nil + } + if in.ProgressDeadlineSeconds != nil { + out.ProgressDeadlineSeconds = new(int32) + *out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds + } + return nil +} + +func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error { + out.Replicas = &in.Replicas + out.Selector = in.Selector + if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + if in.RevisionHistoryLimit != nil { + out.RevisionHistoryLimit = new(int32) + *out.RevisionHistoryLimit = int32(*in.RevisionHistoryLimit) + } + out.MinReadySeconds = int32(in.MinReadySeconds) + out.Paused = in.Paused + if in.RollbackTo != nil { + out.RollbackTo = new(RollbackConfig) + out.RollbackTo.Revision = int64(in.RollbackTo.Revision) + } else { + out.RollbackTo = nil + } + if in.ProgressDeadlineSeconds != nil { + out.ProgressDeadlineSeconds = new(int32) + *out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds + } + return nil +} + +func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { + out.Type = DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + out.RollingUpdate = new(RollingUpdateDeployment) + if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { + out.Type = extensions.DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + out.RollingUpdate = new(extensions.RollingUpdateDeployment) + if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { + if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { + return err + } + if err := s.Convert(in.MaxSurge, &out.MaxSurge, 0); err != nil { + return err + } + return nil +} + +func Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { + if out.MaxUnavailable == nil { + out.MaxUnavailable = &intstr.IntOrString{} + } + if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { + return err + } + if out.MaxSurge == nil { + out.MaxSurge = &intstr.IntOrString{} + } + if err := s.Convert(&in.MaxSurge, out.MaxSurge, 0); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/defaults.go index f9cbe701d6..004cecd3f0 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/defaults.go @@ -17,14 +17,16 @@ limitations under the License. package v1beta1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { RegisterDefaults(scheme) return scheme.AddDefaultingFuncs( SetDefaults_StatefulSet, + SetDefaults_Deployment, ) } @@ -32,7 +34,7 @@ func SetDefaults_StatefulSet(obj *StatefulSet) { labels := obj.Spec.Template.Labels if labels != nil { if obj.Spec.Selector == nil { - obj.Spec.Selector = &unversioned.LabelSelector{ + obj.Spec.Selector = &metav1.LabelSelector{ MatchLabels: labels, } } @@ -45,3 +47,57 @@ func SetDefaults_StatefulSet(obj *StatefulSet) { *obj.Spec.Replicas = 1 } } + +// SetDefaults_Deployment sets additional defaults compared to its counterpart +// in extensions. These addons are: +// - MaxUnavailable during rolling update set to 25% (1 in extensions) +// - MaxSurge value during rolling update set to 25% (1 in extensions) +// - RevisionHistoryLimit set to 2 (not set in extensions) +// - ProgressDeadlineSeconds set to 600s (not set in extensions) +func SetDefaults_Deployment(obj *Deployment) { + // Default labels and selector to labels from pod template spec. + labels := obj.Spec.Template.Labels + + if labels != nil { + if obj.Spec.Selector == nil { + obj.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels} + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + // Set DeploymentSpec.Replicas to 1 if it is not set. + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } + strategy := &obj.Spec.Strategy + // Set default DeploymentStrategyType as RollingUpdate. + if strategy.Type == "" { + strategy.Type = RollingUpdateDeploymentStrategyType + } + if strategy.Type == RollingUpdateDeploymentStrategyType { + if strategy.RollingUpdate == nil { + rollingUpdate := RollingUpdateDeployment{} + strategy.RollingUpdate = &rollingUpdate + } + if strategy.RollingUpdate.MaxUnavailable == nil { + // Set default MaxUnavailable as 25% by default. + maxUnavailable := intstr.FromString("25%") + strategy.RollingUpdate.MaxUnavailable = &maxUnavailable + } + if strategy.RollingUpdate.MaxSurge == nil { + // Set default MaxSurge as 25% by default. + maxSurge := intstr.FromString("25%") + strategy.RollingUpdate.MaxSurge = &maxSurge + } + } + if obj.Spec.RevisionHistoryLimit == nil { + obj.Spec.RevisionHistoryLimit = new(int32) + *obj.Spec.RevisionHistoryLimit = 2 + } + if obj.Spec.ProgressDeadlineSeconds == nil { + obj.Spec.ProgressDeadlineSeconds = new(int32) + *obj.Spec.ProgressDeadlineSeconds = 600 + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/doc.go index 6e3d417615..b60cd06cae 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/doc.go @@ -18,6 +18,5 @@ limitations under the License. // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/apps // +k8s:openapi-gen=true // +k8s:defaulter-gen=TypeMeta -// +groupName=apps.k8s.io package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/generated.pb.go index fda97f8af5..bcd8752dbb 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -25,6 +25,18 @@ limitations under the License. k8s.io/kubernetes/pkg/apis/apps/v1beta1/generated.proto It has these top-level messages: + Deployment + DeploymentCondition + DeploymentList + DeploymentRollback + DeploymentSpec + DeploymentStatus + DeploymentStrategy + RollbackConfig + RollingUpdateDeployment + Scale + ScaleSpec + ScaleStatus StatefulSet StatefulSetList StatefulSetSpec @@ -36,11 +48,15 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" import strings "strings" import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" import io "io" @@ -53,29 +69,89 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. const _ = proto.GoGoProtoPackageIsVersion1 +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } +func (*DeploymentRollback) ProtoMessage() {} +func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } +func (*RollbackConfig) ProtoMessage() {} +func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } +func (*RollingUpdateDeployment) ProtoMessage() {} +func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + func (m *StatefulSet) Reset() { *m = StatefulSet{} } func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func init() { + proto.RegisterType((*Deployment)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1beta1.Deployment") + proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1beta1.DeploymentCondition") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1beta1.DeploymentList") + proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1beta1.DeploymentRollback") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1beta1.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1beta1.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1beta1.DeploymentStrategy") + proto.RegisterType((*RollbackConfig)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1beta1.RollbackConfig") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1beta1.RollingUpdateDeployment") + proto.RegisterType((*Scale)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1beta1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1beta1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1beta1.ScaleStatus") proto.RegisterType((*StatefulSet)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1beta1.StatefulSet") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1beta1.StatefulSetList") proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1beta1.StatefulSetSpec") proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1beta1.StatefulSetStatus") } -func (m *StatefulSet) Marshal() (data []byte, err error) { +func (m *Deployment) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -85,7 +161,7 @@ func (m *StatefulSet) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *StatefulSet) MarshalTo(data []byte) (int, error) { +func (m *Deployment) MarshalTo(data []byte) (int, error) { var i int _ = i var l int @@ -117,7 +193,7 @@ func (m *StatefulSet) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *StatefulSetList) Marshal() (data []byte, err error) { +func (m *DeploymentCondition) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -127,19 +203,69 @@ func (m *StatefulSetList) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *StatefulSetList) MarshalTo(data []byte) (int, error) { +func (m *DeploymentCondition) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastUpdateTime.Size())) + n4, err := m.LastUpdateTime.MarshalTo(data[i:]) if err != nil { return 0, err } i += n4 + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n5, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *DeploymentList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -155,7 +281,7 @@ func (m *StatefulSetList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *StatefulSetSpec) Marshal() (data []byte, err error) { +func (m *DeploymentRollback) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -165,7 +291,54 @@ func (m *StatefulSetSpec) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *StatefulSetSpec) MarshalTo(data []byte) (int, error) { +func (m *DeploymentRollback) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if len(m.UpdatedAnnotations) > 0 { + for k := range m.UpdatedAnnotations { + data[i] = 0x12 + i++ + v := m.UpdatedAnnotations[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) + n7, err := m.RollbackTo.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + return i, nil +} + +func (m *DeploymentSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentSpec) MarshalTo(data []byte) (int, error) { var i int _ = i var l int @@ -179,23 +352,95 @@ func (m *StatefulSetSpec) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n5, err := m.Selector.MarshalTo(data[i:]) + n8, err := m.Selector.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n5 + i += n8 } data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n6, err := m.Template.MarshalTo(data[i:]) + n9, err := m.Template.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n6 - if len(m.VolumeClaimTemplates) > 0 { - for _, msg := range m.VolumeClaimTemplates { - data[i] = 0x22 + i += n9 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Strategy.Size())) + n10, err := m.Strategy.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.RevisionHistoryLimit)) + } + data[i] = 0x38 + i++ + if m.Paused { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if m.RollbackTo != nil { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) + n11, err := m.RollbackTo.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.ProgressDeadlineSeconds != nil { + data[i] = 0x48 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ProgressDeadlineSeconds)) + } + return i, nil +} + +func (m *DeploymentStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.UpdatedReplicas)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AvailableReplicas)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0x32 i++ i = encodeVarintGenerated(data, i, uint64(msg.Size())) n, err := msg.MarshalTo(data[i:]) @@ -205,14 +450,13 @@ func (m *StatefulSetSpec) MarshalTo(data []byte) (int, error) { i += n } } - data[i] = 0x2a + data[i] = 0x38 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ServiceName))) - i += copy(data[i:], m.ServiceName) + i = encodeVarintGenerated(data, i, uint64(m.ReadyReplicas)) return i, nil } -func (m *StatefulSetStatus) Marshal() (data []byte, err error) { +func (m *DeploymentStrategy) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -222,176 +466,2739 @@ func (m *StatefulSetStatus) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *StatefulSetStatus) MarshalTo(data []byte) (int, error) { +func (m *DeploymentStrategy) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l - if m.ObservedGeneration != nil { - data[i] = 0x8 + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.RollingUpdate != nil { + data[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) + i = encodeVarintGenerated(data, i, uint64(m.RollingUpdate.Size())) + n12, err := m.RollingUpdate.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 } - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) return i, nil } -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *RollbackConfig) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err } - data[offset] = uint8(v) - return offset + 1 + return data[:n], nil } -func (m *StatefulSet) Size() (n int) { + +func (m *RollbackConfig) MarshalTo(data []byte) (int, error) { + var i int + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Revision)) + return i, nil } -func (m *StatefulSetList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *RollingUpdateDeployment) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err } - return n + return data[:n], nil } -func (m *StatefulSetSpec) Size() (n int) { +func (m *RollingUpdateDeployment) MarshalTo(data []byte) (int, error) { + var i int + _ = i var l int _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.MaxUnavailable != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxUnavailable.Size())) + n13, err := m.MaxUnavailable.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.VolumeClaimTemplates) > 0 { - for _, e := range m.VolumeClaimTemplates { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.MaxSurge != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxSurge.Size())) + n14, err := m.MaxSurge.MarshalTo(data[i:]) + if err != nil { + return 0, err } + i += n14 } - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - return n + return i, nil } -func (m *StatefulSetStatus) Size() (n int) { - var l int - _ = l - if m.ObservedGeneration != nil { - n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) +func (m *Scale) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err } - n += 1 + sovGenerated(uint64(m.Replicas)) - return n + return data[:n], nil } -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +func (m *Scale) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n15, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n15 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n16, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n16 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n17, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n17 + return i, nil } -func (this *StatefulSet) String() string { - if this == nil { - return "nil" + +func (m *ScaleSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err } - s := strings.Join([]string{`&StatefulSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return data[:n], nil } -func (this *StatefulSetList) String() string { - if this == nil { - return "nil" + +func (m *ScaleSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + return i, nil +} + +func (m *ScaleStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err } - s := strings.Join([]string{`&StatefulSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return data[:n], nil } -func (this *StatefulSetSpec) String() string { - if this == nil { - return "nil" + +func (m *ScaleStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k := range m.Selector { + data[i] = 0x12 + i++ + v := m.Selector[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } } - s := strings.Join([]string{`&StatefulSetSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_kubernetes_pkg_api_unversioned.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_kubernetes_pkg_api_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, - `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `}`, - }, "") - return s + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.TargetSelector))) + i += copy(data[i:], m.TargetSelector) + return i, nil } -func (this *StatefulSetStatus) String() string { - if this == nil { - return "nil" + +func (m *StatefulSet) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err } - s := strings.Join([]string{`&StatefulSetStatus{`, - `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `}`, - }, "") - return s + return data[:n], nil } -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + +func (m *StatefulSet) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n18, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + i += n18 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n19, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n19 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n20, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n20 + return i, nil +} + +func (m *StatefulSetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StatefulSetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n21, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n21 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StatefulSetSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StatefulSetSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n22, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n22 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n23, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n23 + if len(m.VolumeClaimTemplates) > 0 { + for _, msg := range m.VolumeClaimTemplates { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ServiceName))) + i += copy(data[i:], m.ServiceName) + return i, nil +} + +func (m *StatefulSetStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StatefulSetStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ObservedGeneration != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) + } + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *Deployment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentRollback) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UpdatedAnnotations) > 0 { + for k, v := range m.UpdatedAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.RollbackTo != nil { + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n +} + +func (m *DeploymentStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + return n +} + +func (m *DeploymentStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollbackConfig) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Revision)) + return n +} + +func (m *RollingUpdateDeployment) Size() (n int) { + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Scale) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleSpec) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func (m *ScaleStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.TargetSelector) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeClaimTemplates) > 0 { + for _, e := range m.VolumeClaimTemplates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetStatus) Size() (n int) { + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Deployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentRollback) String() string { + if this == nil { + return "nil" + } + keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) + for k := range this.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + mapStringForUpdatedAnnotations := "map[string]string{" + for _, k := range keysForUpdatedAnnotations { + mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) + } + mapStringForUpdatedAnnotations += "}" + s := strings.Join([]string{`&DeploymentRollback{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, + `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `RollbackTo:` + strings.Replace(fmt.Sprintf("%v", this.RollbackTo), "RollbackConfig", "RollbackConfig", 1) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollbackConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollbackConfig{`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleStatus) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_kubernetes_pkg_api_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetStatus{`, + `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Deployment) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentRollback) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.UpdatedAnnotations == nil { + m.UpdatedAnnotations = make(map[string]string) + } + m.UpdatedAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollbackTo == nil { + m.RollbackTo = &RollbackConfig{} + } + if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStrategy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollbackConfig) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scale) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Selector == nil { + m.Selector = make(map[string]string) + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetSelector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } func (m *StatefulSet) Unmarshal(data []byte) error { l := len(data) @@ -720,7 +3527,7 @@ func (m *StatefulSetSpec) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { return err @@ -1032,45 +3839,101 @@ var ( ) var fileDescriptorGenerated = []byte{ - // 627 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x93, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xc7, 0xe3, 0xa4, 0x29, 0x61, 0x53, 0xbe, 0x96, 0x0a, 0x45, 0x11, 0x72, 0xab, 0x5c, 0x08, - 0x52, 0xbb, 0x56, 0x4a, 0x2b, 0x2a, 0x8e, 0x46, 0x02, 0x21, 0x01, 0x45, 0x0e, 0xaa, 0xa0, 0x08, - 0xa4, 0xb5, 0x33, 0x0d, 0x4b, 0x6c, 0xaf, 0xe5, 0x1d, 0xe7, 0xcc, 0x85, 0x03, 0x37, 0xde, 0x82, - 0x47, 0xe0, 0x15, 0x2a, 0x71, 0xe9, 0x91, 0x53, 0x45, 0xc3, 0x8b, 0x20, 0x6f, 0x36, 0x1f, 0x34, - 0x49, 0xa9, 0x7a, 0xf3, 0xec, 0xce, 0xff, 0x37, 0x33, 0xff, 0x1d, 0x93, 0x87, 0xbd, 0x5d, 0xc5, - 0x84, 0x74, 0x7a, 0x99, 0x0f, 0x69, 0x0c, 0x08, 0xca, 0x49, 0x7a, 0x5d, 0x87, 0x27, 0x42, 0x39, - 0x3c, 0x49, 0x94, 0xd3, 0x6f, 0xf9, 0x80, 0xbc, 0xe5, 0x74, 0x21, 0x86, 0x94, 0x23, 0x74, 0x58, - 0x92, 0x4a, 0x94, 0xf4, 0xde, 0x50, 0xc8, 0x26, 0x42, 0x96, 0xf4, 0xba, 0x2c, 0x17, 0xb2, 0x5c, - 0xc8, 0x8c, 0xb0, 0xbe, 0xd9, 0x15, 0xf8, 0x31, 0xf3, 0x59, 0x20, 0x23, 0xa7, 0x2b, 0xbb, 0xd2, - 0xd1, 0x7a, 0x3f, 0x3b, 0xd4, 0x91, 0x0e, 0xf4, 0xd7, 0x90, 0x5b, 0xdf, 0x5a, 0xd8, 0x90, 0x93, - 0x82, 0x92, 0x59, 0x1a, 0xc0, 0xd9, 0x5e, 0xea, 0x3b, 0x8b, 0x35, 0x59, 0xdc, 0x87, 0x54, 0x09, - 0x19, 0x43, 0x67, 0x46, 0xb6, 0xb1, 0x58, 0xd6, 0x9f, 0x19, 0xb8, 0xbe, 0x39, 0x3f, 0x3b, 0xcd, - 0x62, 0x14, 0xd1, 0x6c, 0x4f, 0xad, 0xf9, 0xe9, 0x19, 0x8a, 0xd0, 0x11, 0x31, 0x2a, 0x4c, 0xcf, - 0x4a, 0x1a, 0xdf, 0x8b, 0xa4, 0xda, 0x46, 0x8e, 0x70, 0x98, 0x85, 0x6d, 0x40, 0xfa, 0x86, 0x54, - 0x22, 0x40, 0xde, 0xe1, 0xc8, 0x6b, 0xd6, 0xba, 0xd5, 0xac, 0x6e, 0x35, 0xd9, 0x42, 0xd7, 0x59, - 0xbf, 0xc5, 0xf6, 0xfc, 0x4f, 0x10, 0xe0, 0x0b, 0x40, 0xee, 0xd2, 0xa3, 0x93, 0xb5, 0xc2, 0xe0, - 0x64, 0x8d, 0x4c, 0xce, 0xbc, 0x31, 0x8d, 0x1e, 0x90, 0x25, 0x95, 0x40, 0x50, 0x2b, 0x6a, 0xea, - 0x2e, 0xbb, 0xe0, 0x5b, 0xb2, 0xa9, 0xee, 0xda, 0x09, 0x04, 0xee, 0x8a, 0xa9, 0xb2, 0x94, 0x47, - 0x9e, 0x66, 0x52, 0x9f, 0x2c, 0x2b, 0xe4, 0x98, 0xa9, 0x5a, 0x49, 0xd3, 0x1f, 0x5d, 0x8a, 0xae, - 0x09, 0xee, 0x75, 0xc3, 0x5f, 0x1e, 0xc6, 0x9e, 0x21, 0x37, 0x7e, 0x5a, 0xe4, 0xc6, 0x54, 0xf6, - 0x73, 0xa1, 0x90, 0xbe, 0x9f, 0x71, 0xcb, 0x39, 0xc7, 0xad, 0xa9, 0xbd, 0x60, 0xb9, 0x5c, 0x9b, - 0x76, 0xd3, 0x94, 0xab, 0x8c, 0x4e, 0xa6, 0x2c, 0x7b, 0x4b, 0xca, 0x02, 0x21, 0x52, 0xb5, 0xe2, - 0x7a, 0xa9, 0x59, 0xdd, 0xda, 0xbe, 0xcc, 0x54, 0xee, 0x35, 0x53, 0xa0, 0xfc, 0x2c, 0x47, 0x79, - 0x43, 0x62, 0xe3, 0x47, 0xe9, 0x9f, 0x69, 0x72, 0x2f, 0x69, 0x93, 0x54, 0x52, 0x48, 0x42, 0x11, - 0x70, 0xa5, 0xa7, 0x29, 0xbb, 0x2b, 0x79, 0x63, 0x9e, 0x39, 0xf3, 0xc6, 0xb7, 0xf4, 0x03, 0xa9, - 0x28, 0x08, 0x21, 0x40, 0x99, 0x9a, 0xf7, 0xdc, 0xbe, 0xe8, 0xdc, 0xdc, 0x87, 0xb0, 0x6d, 0xb4, - 0x43, 0xfe, 0x28, 0xf2, 0xc6, 0x4c, 0xfa, 0x8e, 0x54, 0x10, 0xa2, 0x24, 0xe4, 0x08, 0xe6, 0x45, - 0x37, 0xcf, 0xdf, 0xc2, 0x57, 0xb2, 0xf3, 0xda, 0x08, 0xf4, 0x92, 0x8c, 0x5d, 0x1d, 0x9d, 0x7a, - 0x63, 0x20, 0xfd, 0x62, 0x91, 0xd5, 0xbe, 0x0c, 0xb3, 0x08, 0x1e, 0x87, 0x5c, 0x44, 0xa3, 0x0c, - 0x55, 0x5b, 0xd2, 0x2e, 0x3f, 0xf8, 0x4f, 0xa5, 0x7c, 0x14, 0x85, 0x10, 0xe3, 0xfe, 0x84, 0xe1, - 0xde, 0x35, 0xf5, 0x56, 0xf7, 0xe7, 0x80, 0xbd, 0xb9, 0xe5, 0xe8, 0x0e, 0xa9, 0x2a, 0x48, 0xfb, - 0x22, 0x80, 0x97, 0x3c, 0x82, 0x5a, 0x79, 0xdd, 0x6a, 0x5e, 0x75, 0x6f, 0x1b, 0x50, 0xb5, 0x3d, - 0xb9, 0xf2, 0xa6, 0xf3, 0x1a, 0x5f, 0x2d, 0x72, 0x6b, 0x66, 0x6b, 0xe9, 0x13, 0x42, 0xa5, 0x9f, - 0xa7, 0x41, 0xe7, 0xe9, 0xf0, 0x17, 0x17, 0x32, 0xd6, 0xaf, 0x58, 0x72, 0xef, 0x0c, 0x4e, 0xd6, - 0xe8, 0xde, 0xcc, 0xad, 0x37, 0x47, 0x41, 0x37, 0xa6, 0x76, 0xa0, 0xa8, 0x77, 0x60, 0x6c, 0xe5, - 0xec, 0x1e, 0xb8, 0xf7, 0x8f, 0x4e, 0xed, 0xc2, 0xf1, 0xa9, 0x5d, 0xf8, 0x75, 0x6a, 0x17, 0x3e, - 0x0f, 0x6c, 0xeb, 0x68, 0x60, 0x5b, 0xc7, 0x03, 0xdb, 0xfa, 0x3d, 0xb0, 0xad, 0x6f, 0x7f, 0xec, - 0xc2, 0xc1, 0x15, 0xb3, 0x92, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x28, 0x58, 0x61, 0xec, 0xf5, - 0x05, 0x00, 0x00, + // 1525 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe4, 0x58, 0xcb, 0x6f, 0x5b, 0xc5, + 0x17, 0xce, 0x4d, 0xec, 0xc4, 0x99, 0x34, 0x4e, 0x33, 0xc9, 0xaf, 0xf1, 0x2f, 0x45, 0x4e, 0xe5, + 0x45, 0x1f, 0xa8, 0xbd, 0xa6, 0x69, 0xa1, 0x8f, 0x40, 0x45, 0xdc, 0x96, 0x52, 0x94, 0xd0, 0x6a, + 0xec, 0x54, 0xb4, 0x14, 0x89, 0xb1, 0x3d, 0xbd, 0x9d, 0xfa, 0xbe, 0x74, 0x67, 0x6c, 0xc5, 0x3b, + 0x36, 0x2c, 0x90, 0x58, 0xb0, 0x62, 0x87, 0xd8, 0x23, 0x24, 0x76, 0xfc, 0x0d, 0x11, 0x6c, 0xba, + 0x44, 0x2c, 0x22, 0xe2, 0xfe, 0x17, 0x5d, 0xa1, 0x99, 0x3b, 0xf7, 0xe5, 0x6b, 0x27, 0x8e, 0x11, + 0xdd, 0xb0, 0xf3, 0x9d, 0x39, 0xdf, 0x77, 0xce, 0xcc, 0x7c, 0xe7, 0xcc, 0x19, 0x83, 0x6b, 0xad, + 0xeb, 0x4c, 0xa7, 0x4e, 0xb9, 0xd5, 0xae, 0x13, 0xcf, 0x26, 0x9c, 0xb0, 0xb2, 0xdb, 0x32, 0xca, + 0xd8, 0xa5, 0xac, 0x8c, 0x5d, 0x97, 0x95, 0x3b, 0x97, 0xeb, 0x84, 0xe3, 0xcb, 0x65, 0x83, 0xd8, + 0xc4, 0xc3, 0x9c, 0x34, 0x75, 0xd7, 0x73, 0xb8, 0x03, 0xcf, 0xf9, 0x40, 0x3d, 0x02, 0xea, 0x6e, + 0xcb, 0xd0, 0x05, 0x50, 0x17, 0x40, 0x5d, 0x01, 0x57, 0x2f, 0x19, 0x94, 0x3f, 0x6f, 0xd7, 0xf5, + 0x86, 0x63, 0x95, 0x0d, 0xc7, 0x70, 0xca, 0x12, 0x5f, 0x6f, 0x3f, 0x93, 0x5f, 0xf2, 0x43, 0xfe, + 0xf2, 0x79, 0x57, 0xaf, 0xaa, 0x80, 0xb0, 0x4b, 0x2d, 0xdc, 0x78, 0x4e, 0x6d, 0xe2, 0x75, 0xa3, + 0x90, 0x2c, 0xc2, 0x71, 0xb9, 0x93, 0x8a, 0x66, 0xb5, 0x3c, 0x0c, 0xe5, 0xb5, 0x6d, 0x4e, 0x2d, + 0x92, 0x02, 0xbc, 0x77, 0x14, 0x80, 0x35, 0x9e, 0x13, 0x0b, 0xa7, 0x70, 0x57, 0x86, 0xe1, 0xda, + 0x9c, 0x9a, 0x65, 0x6a, 0x73, 0xc6, 0xbd, 0x14, 0x28, 0xb6, 0x26, 0x46, 0xbc, 0x0e, 0xf1, 0xa2, + 0x05, 0x91, 0x5d, 0x6c, 0xb9, 0x26, 0x19, 0xb4, 0xa6, 0x8b, 0x43, 0x8f, 0x66, 0x90, 0xf5, 0x07, + 0x87, 0x1c, 0x24, 0xd9, 0xe5, 0xc4, 0x66, 0xd4, 0xb1, 0x87, 0x1e, 0x67, 0xe9, 0xe7, 0x49, 0x00, + 0xee, 0x10, 0xd7, 0x74, 0xba, 0x16, 0xb1, 0x39, 0xfc, 0x12, 0xe4, 0xc4, 0x56, 0x37, 0x31, 0xc7, + 0x05, 0xed, 0x8c, 0x76, 0x7e, 0x6e, 0xfd, 0x1d, 0x5d, 0x1d, 0x78, 0x7c, 0xe5, 0xd1, 0x91, 0x0b, + 0x6b, 0xbd, 0x73, 0x59, 0x7f, 0x50, 0x7f, 0x41, 0x1a, 0x7c, 0x9b, 0x70, 0x5c, 0x81, 0x7b, 0xfb, + 0x6b, 0x13, 0xbd, 0xfd, 0x35, 0x10, 0x8d, 0xa1, 0x90, 0x15, 0x3e, 0x06, 0x19, 0xe6, 0x92, 0x46, + 0x61, 0x52, 0xb2, 0x5f, 0xd3, 0x47, 0x94, 0x93, 0x1e, 0x05, 0x59, 0x75, 0x49, 0xa3, 0x72, 0x42, + 0x39, 0xc9, 0x88, 0x2f, 0x24, 0x29, 0x21, 0x06, 0xd3, 0x8c, 0x63, 0xde, 0x66, 0x85, 0x29, 0x49, + 0x7e, 0x63, 0x1c, 0x72, 0x49, 0x50, 0xc9, 0x2b, 0xfa, 0x69, 0xff, 0x1b, 0x29, 0xe2, 0xd2, 0xc1, + 0x14, 0x58, 0x8a, 0x8c, 0x6f, 0x3b, 0x76, 0x93, 0x72, 0xea, 0xd8, 0x70, 0x03, 0x64, 0x78, 0xd7, + 0x25, 0x72, 0xcf, 0x66, 0x2b, 0xe7, 0x82, 0xe0, 0x6a, 0x5d, 0x97, 0xbc, 0xde, 0x5f, 0x5b, 0x19, + 0x00, 0x11, 0x53, 0x48, 0x82, 0xe0, 0xa3, 0x30, 0xee, 0x49, 0x09, 0xbf, 0x95, 0x74, 0xfe, 0x7a, + 0x7f, 0xed, 0x50, 0x49, 0xe8, 0x21, 0x67, 0x32, 0x58, 0x78, 0x16, 0x4c, 0x7b, 0x04, 0x33, 0xc7, + 0x2e, 0x64, 0x24, 0x6f, 0xb8, 0x28, 0x24, 0x47, 0x91, 0x9a, 0x85, 0x17, 0xc0, 0x8c, 0x45, 0x18, + 0xc3, 0x06, 0x29, 0x64, 0xa5, 0xe1, 0x82, 0x32, 0x9c, 0xd9, 0xf6, 0x87, 0x51, 0x30, 0x0f, 0x5f, + 0x80, 0xbc, 0x89, 0x19, 0xdf, 0x71, 0x9b, 0x98, 0x93, 0x1a, 0xb5, 0x48, 0x61, 0x5a, 0x6e, 0xf5, + 0xdb, 0xa3, 0xa9, 0x44, 0x20, 0x2a, 0xa7, 0x14, 0x7b, 0x7e, 0x2b, 0xc1, 0x84, 0xfa, 0x98, 0x61, + 0x07, 0x40, 0x31, 0x52, 0xf3, 0xb0, 0xcd, 0xfc, 0x2d, 0x13, 0xfe, 0x66, 0x8e, 0xed, 0x6f, 0x55, + 0xf9, 0x83, 0x5b, 0x29, 0x36, 0x34, 0xc0, 0x43, 0x69, 0x4f, 0x03, 0xf9, 0xe8, 0xc0, 0xb6, 0x28, + 0xe3, 0xf0, 0x69, 0x2a, 0x2d, 0xf4, 0xd1, 0x02, 0x10, 0x68, 0x99, 0x14, 0x27, 0x55, 0x10, 0xb9, + 0x60, 0x24, 0x96, 0x12, 0x9f, 0x81, 0x2c, 0xe5, 0xc4, 0x12, 0xc7, 0x3f, 0x75, 0x7e, 0x6e, 0xfd, + 0xca, 0x18, 0xb2, 0xad, 0xcc, 0x2b, 0xfe, 0xec, 0x7d, 0xc1, 0x84, 0x7c, 0xc2, 0xd2, 0xb7, 0x53, + 0x00, 0x46, 0x46, 0xc8, 0x31, 0xcd, 0x3a, 0x6e, 0xb4, 0xe0, 0x19, 0x90, 0xb1, 0xb1, 0x15, 0xa8, + 0x35, 0x4c, 0xa5, 0x4f, 0xb1, 0x45, 0x90, 0x9c, 0x81, 0x3f, 0x6a, 0x00, 0xb6, 0xe5, 0x51, 0x34, + 0x37, 0x6d, 0xdb, 0xe1, 0x58, 0xec, 0x4e, 0x10, 0x60, 0x75, 0x8c, 0x00, 0x03, 0xdf, 0xfa, 0x4e, + 0x8a, 0xf5, 0xae, 0xcd, 0xbd, 0x6e, 0x74, 0x4a, 0x69, 0x03, 0x34, 0x20, 0x14, 0xd8, 0x02, 0xc0, + 0x53, 0x9c, 0x35, 0x47, 0x25, 0xfc, 0xe8, 0xd5, 0x24, 0x08, 0xe7, 0xb6, 0x63, 0x3f, 0xa3, 0x46, + 0x54, 0xb2, 0x50, 0x48, 0x89, 0x62, 0xf4, 0xab, 0x77, 0xc1, 0xca, 0x90, 0xb8, 0xe1, 0x49, 0x30, + 0xd5, 0x22, 0x5d, 0x7f, 0x2b, 0x91, 0xf8, 0x09, 0x97, 0x41, 0xb6, 0x83, 0xcd, 0x36, 0xf1, 0xb3, + 0x19, 0xf9, 0x1f, 0x37, 0x27, 0xaf, 0x6b, 0xa5, 0x3f, 0xb3, 0x71, 0x65, 0x89, 0xca, 0x05, 0xcf, + 0x83, 0x9c, 0x47, 0x5c, 0x93, 0x36, 0x30, 0x93, 0x1c, 0xd9, 0xca, 0x09, 0xa1, 0x12, 0xa4, 0xc6, + 0x50, 0x38, 0x0b, 0xbf, 0x00, 0x39, 0x46, 0x4c, 0xd2, 0xe0, 0x8e, 0xa7, 0x8a, 0xe7, 0x95, 0x11, + 0x35, 0x88, 0xeb, 0xc4, 0xac, 0x2a, 0xa8, 0x4f, 0x1f, 0x7c, 0xa1, 0x90, 0x12, 0x7e, 0x0e, 0x72, + 0x9c, 0x58, 0xae, 0x89, 0x39, 0x51, 0xbb, 0x79, 0x69, 0xf8, 0x6e, 0x0a, 0xda, 0x87, 0x4e, 0xb3, + 0xa6, 0x00, 0xb2, 0x22, 0x87, 0x0a, 0x0f, 0x46, 0x51, 0x48, 0x08, 0x29, 0xc8, 0x31, 0x2e, 0xae, + 0x1d, 0xa3, 0x2b, 0x6b, 0xd1, 0xdc, 0xfa, 0xc6, 0x58, 0xb5, 0xd9, 0xa7, 0x88, 0x5c, 0x05, 0x23, + 0x28, 0xa4, 0x87, 0x9b, 0x60, 0xc1, 0xa2, 0x36, 0x22, 0xb8, 0xd9, 0xad, 0x92, 0x86, 0x63, 0x37, + 0x99, 0x2c, 0x6a, 0xd9, 0xca, 0x8a, 0x02, 0x2d, 0x6c, 0x27, 0xa7, 0x51, 0xbf, 0x3d, 0xdc, 0x02, + 0xcb, 0x1e, 0xe9, 0x50, 0x71, 0x71, 0x7e, 0x4c, 0x19, 0x77, 0xbc, 0xee, 0x16, 0xb5, 0x28, 0x97, + 0xa5, 0x2e, 0x5b, 0x29, 0xf4, 0xf6, 0xd7, 0x96, 0xd1, 0x80, 0x79, 0x34, 0x10, 0x25, 0xaa, 0xb0, + 0x8b, 0xdb, 0x8c, 0x34, 0x65, 0xe9, 0xca, 0x45, 0x55, 0xf8, 0xa1, 0x1c, 0x45, 0x6a, 0x16, 0x1a, + 0x09, 0x41, 0xe7, 0xfe, 0x99, 0xa0, 0xf3, 0xc3, 0xc5, 0x0c, 0x77, 0xc0, 0x8a, 0xeb, 0x39, 0x86, + 0x47, 0x18, 0xbb, 0x43, 0x70, 0xd3, 0xa4, 0x36, 0x09, 0x76, 0x6a, 0x56, 0xae, 0xf0, 0x74, 0x6f, + 0x7f, 0x6d, 0xe5, 0xe1, 0x60, 0x13, 0x34, 0x0c, 0x5b, 0xfa, 0x3e, 0x03, 0x4e, 0xf6, 0xdf, 0xa3, + 0xf0, 0x13, 0x00, 0x9d, 0xba, 0xec, 0x7d, 0x9a, 0xf7, 0xfc, 0xce, 0x83, 0x3a, 0xb6, 0x14, 0xfa, + 0x54, 0x94, 0xf1, 0x0f, 0x52, 0x16, 0x68, 0x00, 0x0a, 0x5e, 0x8c, 0xa5, 0xca, 0xa4, 0x0c, 0x34, + 0xd4, 0xc1, 0x80, 0x74, 0xd9, 0x04, 0x0b, 0xaa, 0x6a, 0x04, 0x93, 0x52, 0xd6, 0x31, 0x1d, 0xec, + 0x24, 0xa7, 0x51, 0xbf, 0x3d, 0xbc, 0x07, 0x16, 0x71, 0x07, 0x53, 0x13, 0xd7, 0x4d, 0x12, 0x92, + 0x64, 0x24, 0xc9, 0xff, 0x15, 0xc9, 0xe2, 0x66, 0xbf, 0x01, 0x4a, 0x63, 0xe0, 0x36, 0x58, 0x6a, + 0xdb, 0x69, 0x2a, 0x5f, 0x97, 0xa7, 0x15, 0xd5, 0xd2, 0x4e, 0xda, 0x04, 0x0d, 0xc2, 0x41, 0x17, + 0x80, 0x46, 0x70, 0xe5, 0xb3, 0xc2, 0xb4, 0xac, 0xc9, 0xef, 0x8f, 0x91, 0x4f, 0x61, 0xdf, 0x10, + 0xd5, 0xbf, 0x70, 0x88, 0xa1, 0x98, 0x0f, 0xb8, 0x01, 0xe6, 0x3d, 0x91, 0x21, 0x61, 0xe8, 0x33, + 0x32, 0xf4, 0xff, 0x29, 0xd8, 0x3c, 0x8a, 0x4f, 0xa2, 0xa4, 0x6d, 0xe9, 0x77, 0x2d, 0x7e, 0x09, + 0x05, 0x29, 0x0b, 0x6f, 0x26, 0x5a, 0xa6, 0xb3, 0x7d, 0x2d, 0xd3, 0xa9, 0x34, 0x22, 0xd6, 0x31, + 0x75, 0xc1, 0xbc, 0x10, 0x34, 0xb5, 0x0d, 0xff, 0x10, 0x55, 0x41, 0xfc, 0xf0, 0x58, 0xe9, 0x12, + 0xa2, 0x63, 0xd7, 0xe8, 0xa2, 0x5c, 0x4d, 0x7c, 0x12, 0x25, 0x3d, 0x95, 0x6e, 0x81, 0x7c, 0x32, + 0xd7, 0x7c, 0x5d, 0xfa, 0x89, 0xaf, 0x94, 0x1d, 0xd3, 0xa5, 0x3f, 0x8e, 0x42, 0x8b, 0xd2, 0x2b, + 0x0d, 0xac, 0x0c, 0xf1, 0x0e, 0x4d, 0x90, 0xb7, 0xf0, 0x6e, 0x4c, 0x07, 0x47, 0xf6, 0xe0, 0xe2, + 0xf5, 0xa1, 0xfb, 0xaf, 0x0f, 0xfd, 0xbe, 0xcd, 0x1f, 0x78, 0x55, 0xee, 0x51, 0xdb, 0xa8, 0x40, + 0xd1, 0x5f, 0x6d, 0x27, 0xb8, 0x50, 0x1f, 0x37, 0x7c, 0x02, 0x72, 0x16, 0xde, 0xad, 0xb6, 0x3d, + 0x23, 0xd8, 0xbf, 0xe3, 0xfb, 0x91, 0xb7, 0xc9, 0xb6, 0x62, 0x41, 0x21, 0x5f, 0xe9, 0x87, 0x49, + 0x90, 0xad, 0x36, 0xb0, 0x49, 0xde, 0xc0, 0x8b, 0xa2, 0x96, 0x78, 0x51, 0xac, 0x8f, 0xac, 0x01, + 0x19, 0xdf, 0xd0, 0xc7, 0xc4, 0xd3, 0xbe, 0xc7, 0xc4, 0xd5, 0x63, 0xf2, 0x1e, 0xfe, 0x8e, 0xb8, + 0x01, 0x66, 0x43, 0xf7, 0x89, 0xc2, 0xa6, 0x1d, 0x55, 0xd8, 0x4a, 0x3f, 0x4d, 0x82, 0xb9, 0x98, + 0x8b, 0xe3, 0xa1, 0xa1, 0x9b, 0xe8, 0x22, 0x44, 0xe5, 0xa8, 0x8c, 0xb3, 0x30, 0x3d, 0xe8, 0x20, + 0xfc, 0xe6, 0x2d, 0xba, 0x90, 0xd3, 0x8d, 0xc5, 0x2d, 0x90, 0xe7, 0xd8, 0x33, 0x08, 0x0f, 0xe6, + 0xe4, 0x86, 0xce, 0x46, 0xcf, 0x80, 0x5a, 0x62, 0x16, 0xf5, 0x59, 0xaf, 0x6e, 0x80, 0xf9, 0x84, + 0xb3, 0x63, 0x75, 0x5c, 0xbf, 0x88, 0xcd, 0xe2, 0x98, 0x93, 0x67, 0x6d, 0xb3, 0x4a, 0xde, 0xc4, + 0xfb, 0xf6, 0x49, 0x42, 0x8d, 0xd7, 0x47, 0xdf, 0xdc, 0x28, 0xca, 0xa1, 0x9a, 0xac, 0xf7, 0x69, + 0xf2, 0xe6, 0x58, 0xec, 0x87, 0x2b, 0xf3, 0x37, 0x0d, 0x2c, 0xc4, 0xac, 0xdf, 0xc0, 0xf3, 0xe7, + 0x71, 0xf2, 0xf9, 0x73, 0x75, 0x9c, 0x45, 0x0d, 0x79, 0xff, 0xfc, 0x3a, 0x95, 0x58, 0xcc, 0x7f, + 0xa8, 0xe3, 0xfe, 0x5a, 0x03, 0xcb, 0x1d, 0xc7, 0x6c, 0x5b, 0xe4, 0xb6, 0x89, 0xa9, 0x15, 0x58, + 0x88, 0xfe, 0xe5, 0x88, 0x37, 0xa6, 0xf4, 0x44, 0x3c, 0x46, 0x19, 0x27, 0x36, 0x7f, 0x14, 0x71, + 0x54, 0xde, 0x52, 0xfe, 0x96, 0x1f, 0x0d, 0x20, 0x46, 0x03, 0xdd, 0xc1, 0x77, 0xc1, 0x9c, 0x68, + 0xe4, 0x68, 0x83, 0x88, 0xd7, 0xa5, 0xfa, 0x7f, 0x61, 0x49, 0x11, 0xcd, 0x55, 0xa3, 0x29, 0x14, + 0xb7, 0x2b, 0x7d, 0xa3, 0x81, 0xc5, 0x94, 0x66, 0xe1, 0x47, 0x87, 0x74, 0x93, 0xa7, 0xfe, 0xad, + 0x4e, 0xb2, 0x72, 0x61, 0xef, 0xa0, 0x38, 0xf1, 0xf2, 0xa0, 0x38, 0xf1, 0xc7, 0x41, 0x71, 0xe2, + 0xab, 0x5e, 0x51, 0xdb, 0xeb, 0x15, 0xb5, 0x97, 0xbd, 0xa2, 0xf6, 0x57, 0xaf, 0xa8, 0x7d, 0xf7, + 0xaa, 0x38, 0xf1, 0x64, 0x46, 0x29, 0xf2, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd6, 0xb9, 0xde, + 0x1a, 0x56, 0x15, 0x00, 0x00, } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/generated.proto index 4d4b054b33..8ca77b2e6a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,15 +21,247 @@ syntax = 'proto2'; package k8s.io.kubernetes.pkg.apis.apps.v1beta1; -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; +import "k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// Deployment enables declarative updates for Pods and ReplicaSets. +message Deployment { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the Deployment. + // +optional + optional DeploymentSpec spec = 2; + + // Most recently observed status of the Deployment. + // +optional + optional DeploymentStatus status = 3; +} + +// DeploymentCondition describes the state of a deployment at a certain point. +message DeploymentCondition { + // Type of deployment condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // Last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// DeploymentList is a list of Deployments. +message DeploymentList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Deployments. + repeated Deployment items = 2; +} + +// DeploymentRollback stores the information required to rollback a deployment. +message DeploymentRollback { + // Required: This must match the Name of a deployment. + optional string name = 1; + + // The annotations to be updated to a deployment + // +optional + map updatedAnnotations = 2; + + // The config of this deployment rollback. + optional RollbackConfig rollbackTo = 3; +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +message DeploymentSpec { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + optional int32 replicas = 1; + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template describes the pods that will be created. + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + optional DeploymentStrategy strategy = 4; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 5; + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 2. + // +optional + optional int32 revisionHistoryLimit = 6; + + // Indicates that the deployment is paused. + // +optional + optional bool paused = 7; + + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + optional RollbackConfig rollbackTo = 8; + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Once autoRollback is + // implemented, the deployment controller will automatically rollback failed + // deployments. Note that progress will not be estimated during the time a + // deployment is paused. Defaults to 600s. + optional int32 progressDeadlineSeconds = 9; +} + +// DeploymentStatus is the most recently observed status of the Deployment. +message DeploymentStatus { + // The generation observed by the deployment controller. + // +optional + optional int64 observedGeneration = 1; + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + optional int32 replicas = 2; + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + optional int32 updatedReplicas = 3; + + // Total number of ready pods targeted by this deployment. + // +optional + optional int32 readyReplicas = 7; + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + optional int32 availableReplicas = 4; + + // Total number of unavailable pods targeted by this deployment. + // +optional + optional int32 unavailableReplicas = 5; + + // Represents the latest available observations of a deployment's current state. + repeated DeploymentCondition conditions = 6; +} + +// DeploymentStrategy describes how to replace existing pods with new ones. +message DeploymentStrategy { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + optional RollingUpdateDeployment rollingUpdate = 2; +} + +message RollbackConfig { + // The revision to rollback to. If set to 0, rollbck to the last revision. + // +optional + optional int64 revision = 1; +} + +// Spec to control the desired behavior of rolling update. +message RollingUpdateDeployment { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new RC can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; +} + +// Scale represents a scaling request for a resource. +message Scale { + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + optional ScaleSpec spec = 2; + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + optional ScaleStatus status = 3; +} + +// ScaleSpec describes the attributes of a scale subresource +message ScaleSpec { + // desired number of instances for the scaled object. + // +optional + optional int32 replicas = 1; +} + +// ScaleStatus represents the current status of a scale subresource. +message ScaleStatus { + // actual number of observed instances of the scaled object. + optional int32 replicas = 1; + + // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + map selector = 2; + + // label selector for pods that should match the replicas count. This is a serializated + // version of both map-based and more expressive set-based selectors. This is done to + // avoid introspection in the clients. The string will be in the same format as the + // query-param syntax. If the target type only supports map-based selectors, both this + // field and map-based selector field are populated. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + optional string targetSelector = 3; +} + // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: // - Network: A single stable DNS and hostname. @@ -38,7 +270,7 @@ option go_package = "v1beta1"; // map to the same storage identity. message StatefulSet { // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired identities of pods in this set. // +optional @@ -53,7 +285,7 @@ message StatefulSet { // StatefulSetList is a collection of StatefulSets. message StatefulSetList { // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated StatefulSet items = 2; } @@ -72,7 +304,7 @@ message StatefulSetSpec { // If empty, defaulted to labels on the pod template. // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 2; + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet @@ -100,7 +332,7 @@ message StatefulSetSpec { // StatefulSetStatus represents the current state of a StatefulSet. message StatefulSetStatus { - // most recent generation observed by this autoscaler. + // most recent generation observed by this StatefulSet. // +optional optional int64 observedGeneration = 1; diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/register.go index 07659fd7ca..6e618e1d8f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/register.go @@ -17,17 +17,21 @@ limitations under the License. package v1beta1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "apps" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} var ( SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) @@ -37,15 +41,13 @@ var ( // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, + &Deployment{}, + &DeploymentList{}, + &DeploymentRollback{}, + &Scale{}, &StatefulSet{}, &StatefulSetList{}, - &v1.ListOptions{}, - &v1.DeleteOptions{}, - &v1.ExportOptions{}, ) - versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } - -func (obj *StatefulSet) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *StatefulSetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/types.generated.go index f9ef225187..fb153f62a4 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/types.generated.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/types.generated.go @@ -25,11 +25,11 @@ import ( "errors" "fmt" codec1978 "github.com/ugorji/go/codec" - pkg4_resource "k8s.io/kubernetes/pkg/api/resource" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg3_types "k8s.io/kubernetes/pkg/types" - pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" + pkg4_resource "k8s.io/apimachinery/pkg/api/resource" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + pkg5_intstr "k8s.io/apimachinery/pkg/util/intstr" + pkg3_v1 "k8s.io/kubernetes/pkg/api/v1" "reflect" "runtime" time "time" @@ -66,16 +66,16 @@ func init() { } if false { // reference the types, but skip this branch at build/run time var v0 pkg4_resource.Quantity - var v1 pkg1_unversioned.TypeMeta - var v2 pkg2_v1.ObjectMeta - var v3 pkg3_types.UID - var v4 pkg5_intstr.IntOrString + var v1 pkg1_v1.TypeMeta + var v2 pkg2_types.UID + var v3 pkg5_intstr.IntOrString + var v4 pkg3_v1.PodTemplateSpec var v5 time.Time _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 } } -func (x *StatefulSet) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -89,17 +89,13 @@ func (x *StatefulSet) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool + var yyq2 [1]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true + yyq2[0] = x.Replicas != 0 var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) + r.EncodeArrayStart(1) } else { yynn2 = 0 for _, b := range yyq2 { @@ -117,100 +113,24 @@ func (x *StatefulSet) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym4 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + r.EncodeInt(int64(x.Replicas)) } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeInt(0) } } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym5 := z.EncBinary() _ = yym5 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + r.EncodeInt(int64(x.Replicas)) } } } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yy10.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.ObjectMeta - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy13 := &x.Spec - yy13.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.Spec - yy14.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Status - yy17.CodecEncodeSelf(e) - } - } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -220,29 +140,29 @@ func (x *StatefulSet) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *StatefulSet) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym18 := z.DecBinary() - _ = yym18 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct19 := r.ContainerType() - if yyct19 == codecSelferValueTypeMap1234 { - yyl19 := r.ReadMapStart() - if yyl19 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl19, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct19 == codecSelferValueTypeArray1234 { - yyl19 := r.ReadArrayStart() - if yyl19 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl19, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -250,16 +170,16 @@ func (x *StatefulSet) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *StatefulSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys20Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys20Slc - var yyhl20 bool = l >= 0 - for yyj20 := 0; ; yyj20++ { - if yyhl20 { - if yyj20 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -268,323 +188,184 @@ func (x *StatefulSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys20Slc = r.DecodeBytes(yys20Slc, true, true) - yys20 := string(yys20Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys20 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv23 := &x.ObjectMeta - yyv23.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = StatefulSetSpec{} - } else { - yyv24 := &x.Spec - yyv24.CodecDecodeSelf(d) - } - case "status": + switch yys3 { + case "replicas": if r.TryDecodeAsNil() { - x.Status = StatefulSetStatus{} + x.Replicas = 0 } else { - yyv25 := &x.Status - yyv25.CodecDecodeSelf(d) + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } } default: - z.DecStructFieldNotFound(-1, yys20) - } // end switch yys20 - } // end for yyj20 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *StatefulSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj26 int - var yyb26 bool - var yyhl26 bool = l >= 0 - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb26 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb26 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv29 := &x.ObjectMeta - yyv29.CodecDecodeSelf(d) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = StatefulSetSpec{} - } else { - yyv30 := &x.Spec - yyv30.CodecDecodeSelf(d) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = StatefulSetStatus{} + x.Replicas = 0 } else { - yyv31 := &x.Status - yyv31.CodecDecodeSelf(d) + yyv7 := &x.Replicas + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } } for { - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb26 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb26 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj26-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *StatefulSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym32 := z.EncBinary() - _ = yym32 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep33 := !z.EncBinary() - yy2arr33 := z.EncBasicHandle().StructToArray - var yyq33 [5]bool - _, _, _ = yysep33, yyq33, yy2arr33 - const yyr33 bool = false - yyq33[0] = x.Replicas != nil - yyq33[1] = x.Selector != nil - yyq33[3] = len(x.VolumeClaimTemplates) != 0 - var yynn33 int - if yyr33 || yy2arr33 { - r.EncodeArrayStart(5) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.Selector) != 0 + yyq2[2] = x.TargetSelector != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) } else { - yynn33 = 2 - for _, b := range yyq33 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn33++ + yynn2++ } } - r.EncodeMapStart(yynn33) - yynn33 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr33 || yy2arr33 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[0] { - if x.Replicas == nil { - r.EncodeNil() - } else { - yy35 := *x.Replicas - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - r.EncodeInt(int64(yy35)) - } - } + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - r.EncodeNil() + r.EncodeInt(int64(x.Replicas)) } } else { - if yyq33[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Replicas == nil { - r.EncodeNil() - } else { - yy37 := *x.Replicas - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - r.EncodeInt(int64(yy37)) - } - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) } } - if yyr33 || yy2arr33 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[1] { + if yyq2[1] { if x.Selector == nil { r.EncodeNil() } else { - yym40 := z.EncBinary() - _ = yym40 + yym7 := z.EncBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { } else { - z.EncFallback(x.Selector) + z.F.EncMapStringStringV(x.Selector, false, e) } } } else { r.EncodeNil() } } else { - if yyq33[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("selector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Selector == nil { r.EncodeNil() } else { - yym41 := z.EncBinary() - _ = yym41 + yym8 := z.EncBinary() + _ = yym8 if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { } else { - z.EncFallback(x.Selector) + z.F.EncMapStringStringV(x.Selector, false, e) } } } } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy43 := &x.Template - yy43.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy44 := &x.Template - yy44.CodecEncodeSelf(e) - } - if yyr33 || yy2arr33 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[3] { - if x.VolumeClaimTemplates == nil { - r.EncodeNil() + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { } else { - yym46 := z.EncBinary() - _ = yym46 - if false { - } else { - h.encSlicev1_PersistentVolumeClaim(([]pkg2_v1.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) - } + r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector)) } } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq33[3] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeClaimTemplates")) + r.EncodeString(codecSelferC_UTF81234, string("targetSelector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VolumeClaimTemplates == nil { - r.EncodeNil() + yym11 := z.EncBinary() + _ = yym11 + if false { } else { - yym47 := z.EncBinary() - _ = yym47 - if false { - } else { - h.encSlicev1_PersistentVolumeClaim(([]pkg2_v1.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) - } + r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector)) } } } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym49 := z.EncBinary() - _ = yym49 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym50 := z.EncBinary() - _ = yym50 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } - } - if yyr33 || yy2arr33 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -593,29 +374,29 @@ func (x *StatefulSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *StatefulSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym51 := z.DecBinary() - _ = yym51 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct52 := r.ContainerType() - if yyct52 == codecSelferValueTypeMap1234 { - yyl52 := r.ReadMapStart() - if yyl52 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl52, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct52 == codecSelferValueTypeArray1234 { - yyl52 := r.ReadArrayStart() - if yyl52 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl52, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -623,16 +404,16 @@ func (x *StatefulSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *StatefulSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys53Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys53Slc - var yyhl53 bool = l >= 0 - for yyj53 := 0; ; yyj53++ { - if yyhl53 { - if yyj53 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -641,292 +422,4930 @@ func (x *StatefulSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys53Slc = r.DecodeBytes(yys53Slc, true, true) - yys53 := string(yys53Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys53 { + switch yys3 { case "replicas": if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } + x.Replicas = 0 } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym55 := z.DecBinary() - _ = yym55 + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 if false { } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) } } case "selector": if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } + x.Selector = nil } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym57 := z.DecBinary() - _ = yym57 + yyv6 := &x.Selector + yym7 := z.DecBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.F.DecMapStringStringX(yyv6, false, d) + } + } + case "targetSelector": + if r.TryDecodeAsNil() { + x.TargetSelector = "" + } else { + yyv8 := &x.TargetSelector + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv11 := &x.Replicas + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv13 := &x.Selector + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecMapStringStringX(yyv13, false, d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetSelector = "" + } else { + yyv15 := &x.TargetSelector + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ScaleSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ScaleStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ScaleSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ScaleStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StatefulSet) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StatefulSet) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StatefulSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = StatefulSetSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = StatefulSetStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StatefulSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = StatefulSetSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = StatefulSetStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StatefulSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != nil + yyq2[1] = x.Selector != nil + yyq2[3] = len(x.VolumeClaimTemplates) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Replicas == nil { + r.EncodeNil() + } else { + yy4 := *x.Replicas + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Replicas == nil { + r.EncodeNil() + } else { + yy6 := *x.Replicas + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.Template + yy12.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.Template + yy14.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.VolumeClaimTemplates == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSlicev1_PersistentVolumeClaim(([]pkg3_v1.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeClaimTemplates")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VolumeClaimTemplates == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSlicev1_PersistentVolumeClaim(([]pkg3_v1.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serviceName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StatefulSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StatefulSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = pkg3_v1.PodTemplateSpec{} + } else { + yyv8 := &x.Template + yyv8.CodecDecodeSelf(d) + } + case "volumeClaimTemplates": + if r.TryDecodeAsNil() { + x.VolumeClaimTemplates = nil + } else { + yyv9 := &x.VolumeClaimTemplates + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicev1_PersistentVolumeClaim((*[]pkg3_v1.PersistentVolumeClaim)(yyv9), d) + } + } + case "serviceName": + if r.TryDecodeAsNil() { + x.ServiceName = "" + } else { + yyv11 := &x.ServiceName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StatefulSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = pkg3_v1.PodTemplateSpec{} + } else { + yyv18 := &x.Template + yyv18.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeClaimTemplates = nil + } else { + yyv19 := &x.VolumeClaimTemplates + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicev1_PersistentVolumeClaim((*[]pkg3_v1.PersistentVolumeClaim)(yyv19), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ServiceName = "" + } else { + yyv21 := &x.ServiceName + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StatefulSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy4 := *x.ObservedGeneration + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy6 := *x.ObservedGeneration + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StatefulSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StatefulSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "observedGeneration": + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv6 := &x.Replicas + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StatefulSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv11 := &x.Replicas + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StatefulSetList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceStatefulSet(([]StatefulSet)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceStatefulSet(([]StatefulSet)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StatefulSetList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StatefulSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceStatefulSet((*[]StatefulSet)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StatefulSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceStatefulSet((*[]StatefulSet)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Deployment) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Deployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = DeploymentSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = DeploymentStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = DeploymentSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = DeploymentStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [9]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != nil + yyq2[1] = x.Selector != nil + yyq2[3] = true + yyq2[4] = x.MinReadySeconds != 0 + yyq2[5] = x.RevisionHistoryLimit != nil + yyq2[6] = x.Paused != false + yyq2[7] = x.RollbackTo != nil + yyq2[8] = x.ProgressDeadlineSeconds != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(9) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Replicas == nil { + r.EncodeNil() + } else { + yy4 := *x.Replicas + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Replicas == nil { + r.EncodeNil() + } else { + yy6 := *x.Replicas + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.Template + yy12.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.Template + yy14.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy17 := &x.Strategy + yy17.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("strategy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy19 := &x.Strategy + yy19.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.RevisionHistoryLimit == nil { + r.EncodeNil() + } else { + yy25 := *x.RevisionHistoryLimit + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeInt(int64(yy25)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("revisionHistoryLimit")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RevisionHistoryLimit == nil { + r.EncodeNil() + } else { + yy27 := *x.RevisionHistoryLimit + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeInt(int64(yy27)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym30 := z.EncBinary() + _ = yym30 + if false { + } else { + r.EncodeBool(bool(x.Paused)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("paused")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeBool(bool(x.Paused)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.RollbackTo == nil { + r.EncodeNil() + } else { + x.RollbackTo.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RollbackTo == nil { + r.EncodeNil() + } else { + x.RollbackTo.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.ProgressDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy36 := *x.ProgressDeadlineSeconds + yym37 := z.EncBinary() + _ = yym37 + if false { + } else { + r.EncodeInt(int64(yy36)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("progressDeadlineSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ProgressDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy38 := *x.ProgressDeadlineSeconds + yym39 := z.EncBinary() + _ = yym39 + if false { + } else { + r.EncodeInt(int64(yy38)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { } else { z.DecFallback(x.Selector, false) } } - case "template": + case "template": + if r.TryDecodeAsNil() { + x.Template = pkg3_v1.PodTemplateSpec{} + } else { + yyv8 := &x.Template + yyv8.CodecDecodeSelf(d) + } + case "strategy": + if r.TryDecodeAsNil() { + x.Strategy = DeploymentStrategy{} + } else { + yyv9 := &x.Strategy + yyv9.CodecDecodeSelf(d) + } + case "minReadySeconds": + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv10 := &x.MinReadySeconds + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "revisionHistoryLimit": + if r.TryDecodeAsNil() { + if x.RevisionHistoryLimit != nil { + x.RevisionHistoryLimit = nil + } + } else { + if x.RevisionHistoryLimit == nil { + x.RevisionHistoryLimit = new(int32) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + case "paused": + if r.TryDecodeAsNil() { + x.Paused = false + } else { + yyv14 := &x.Paused + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "rollbackTo": + if r.TryDecodeAsNil() { + if x.RollbackTo != nil { + x.RollbackTo = nil + } + } else { + if x.RollbackTo == nil { + x.RollbackTo = new(RollbackConfig) + } + x.RollbackTo.CodecDecodeSelf(d) + } + case "progressDeadlineSeconds": + if r.TryDecodeAsNil() { + if x.ProgressDeadlineSeconds != nil { + x.ProgressDeadlineSeconds = nil + } + } else { + if x.ProgressDeadlineSeconds == nil { + x.ProgressDeadlineSeconds = new(int32) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj19 int + var yyb19 bool + var yyhl19 bool = l >= 0 + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = pkg3_v1.PodTemplateSpec{} + } else { + yyv24 := &x.Template + yyv24.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Strategy = DeploymentStrategy{} + } else { + yyv25 := &x.Strategy + yyv25.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv26 := &x.MinReadySeconds + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*int32)(yyv26)) = int32(r.DecodeInt(32)) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RevisionHistoryLimit != nil { + x.RevisionHistoryLimit = nil + } + } else { + if x.RevisionHistoryLimit == nil { + x.RevisionHistoryLimit = new(int32) + } + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Paused = false + } else { + yyv30 := &x.Paused + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*bool)(yyv30)) = r.DecodeBool() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RollbackTo != nil { + x.RollbackTo = nil + } + } else { + if x.RollbackTo == nil { + x.RollbackTo = new(RollbackConfig) + } + x.RollbackTo.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ProgressDeadlineSeconds != nil { + x.ProgressDeadlineSeconds = nil + } + } else { + if x.ProgressDeadlineSeconds == nil { + x.ProgressDeadlineSeconds = new(int32) + } + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) + } + } + for { + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj19-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentRollback) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[3] = len(x.UpdatedAnnotations) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.UpdatedAnnotations == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("updatedAnnotations")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.UpdatedAnnotations == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy16 := &x.RollbackTo + yy16.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy18 := &x.RollbackTo + yy18.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentRollback) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentRollback) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "updatedAnnotations": + if r.TryDecodeAsNil() { + x.UpdatedAnnotations = nil + } else { + yyv10 := &x.UpdatedAnnotations + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + z.F.DecMapStringStringX(yyv10, false, d) + } + } + case "rollbackTo": + if r.TryDecodeAsNil() { + x.RollbackTo = RollbackConfig{} + } else { + yyv12 := &x.RollbackTo + yyv12.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentRollback) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv14 := &x.Kind + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv16 := &x.APIVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv18 := &x.Name + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UpdatedAnnotations = nil + } else { + yyv20 := &x.UpdatedAnnotations + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + z.F.DecMapStringStringX(yyv20, false, d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RollbackTo = RollbackConfig{} + } else { + yyv22 := &x.RollbackTo + yyv22.CodecDecodeSelf(d) + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RollbackConfig) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Revision != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Revision)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("revision")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Revision)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RollbackConfig) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RollbackConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "revision": + if r.TryDecodeAsNil() { + x.Revision = 0 + } else { + yyv4 := &x.Revision + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RollbackConfig) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Revision = 0 + } else { + yyv7 := &x.Revision + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int64)(yyv7)) = int64(r.DecodeInt(64)) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentStrategy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Type != "" + yyq2[1] = x.RollingUpdate != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Type.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.RollingUpdate == nil { + r.EncodeNil() + } else { + x.RollingUpdate.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RollingUpdate == nil { + r.EncodeNil() + } else { + x.RollingUpdate.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentStrategy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "rollingUpdate": + if r.TryDecodeAsNil() { + if x.RollingUpdate != nil { + x.RollingUpdate = nil + } + } else { + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDeployment) + } + x.RollingUpdate.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv7 := &x.Type + yyv7.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RollingUpdate != nil { + x.RollingUpdate = nil + } + } else { + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDeployment) + } + x.RollingUpdate.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x DeploymentStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DeploymentStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.MaxUnavailable != nil + yyq2[1] = x.MaxSurge != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.MaxUnavailable == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { + } else if !yym4 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxUnavailable) + } else { + z.EncFallback(x.MaxUnavailable) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MaxUnavailable == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxUnavailable) + } else { + z.EncFallback(x.MaxUnavailable) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.MaxSurge == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxSurge) + } else { + z.EncFallback(x.MaxSurge) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxSurge")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MaxSurge == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxSurge) + } else { + z.EncFallback(x.MaxSurge) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RollingUpdateDeployment) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "maxUnavailable": + if r.TryDecodeAsNil() { + if x.MaxUnavailable != nil { + x.MaxUnavailable = nil + } + } else { + if x.MaxUnavailable == nil { + x.MaxUnavailable = new(pkg5_intstr.IntOrString) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxUnavailable) + } else { + z.DecFallback(x.MaxUnavailable, false) + } + } + case "maxSurge": + if r.TryDecodeAsNil() { + if x.MaxSurge != nil { + x.MaxSurge = nil + } + } else { + if x.MaxSurge == nil { + x.MaxSurge = new(pkg5_intstr.IntOrString) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxSurge) + } else { + z.DecFallback(x.MaxSurge, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RollingUpdateDeployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.MaxUnavailable != nil { + x.MaxUnavailable = nil + } + } else { + if x.MaxUnavailable == nil { + x.MaxUnavailable = new(pkg5_intstr.IntOrString) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxUnavailable) + } else { + z.DecFallback(x.MaxUnavailable, false) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.MaxSurge != nil { + x.MaxSurge = nil + } + } else { + if x.MaxSurge == nil { + x.MaxSurge = new(pkg5_intstr.IntOrString) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxSurge) + } else { + z.DecFallback(x.MaxSurge, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != 0 + yyq2[1] = x.Replicas != 0 + yyq2[2] = x.UpdatedReplicas != 0 + yyq2[3] = x.ReadyReplicas != 0 + yyq2[4] = x.AvailableReplicas != 0 + yyq2[5] = x.UnavailableReplicas != 0 + yyq2[6] = len(x.Conditions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.UpdatedReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("updatedReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.UpdatedReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readyReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.UnavailableReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("unavailableReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.UnavailableReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "observedGeneration": if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} + x.ObservedGeneration = 0 } else { - yyv58 := &x.Template - yyv58.CodecDecodeSelf(d) + yyv4 := &x.ObservedGeneration + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } } - case "volumeClaimTemplates": + case "replicas": if r.TryDecodeAsNil() { - x.VolumeClaimTemplates = nil + x.Replicas = 0 } else { - yyv59 := &x.VolumeClaimTemplates - yym60 := z.DecBinary() - _ = yym60 + yyv6 := &x.Replicas + yym7 := z.DecBinary() + _ = yym7 if false { } else { - h.decSlicev1_PersistentVolumeClaim((*[]pkg2_v1.PersistentVolumeClaim)(yyv59), d) + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) } } - case "serviceName": + case "updatedReplicas": if r.TryDecodeAsNil() { - x.ServiceName = "" + x.UpdatedReplicas = 0 + } else { + yyv8 := &x.UpdatedReplicas + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "readyReplicas": + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv10 := &x.ReadyReplicas + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "availableReplicas": + if r.TryDecodeAsNil() { + x.AvailableReplicas = 0 + } else { + yyv12 := &x.AvailableReplicas + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(yyv12)) = int32(r.DecodeInt(32)) + } + } + case "unavailableReplicas": + if r.TryDecodeAsNil() { + x.UnavailableReplicas = 0 + } else { + yyv14 := &x.UnavailableReplicas + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil } else { - x.ServiceName = string(r.DecodeString()) + yyv16 := &x.Conditions + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv16), d) + } } default: - z.DecStructFieldNotFound(-1, yys53) - } // end switch yys53 - } // end for yyj53 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *StatefulSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DeploymentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj62 int - var yyb62 bool - var yyhl62 bool = l >= 0 - yyj62++ - if yyhl62 { - yyb62 = yyj62 > l + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb62 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb62 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } + x.ObservedGeneration = 0 } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym64 := z.DecBinary() - _ = yym64 + yyv19 := &x.ObservedGeneration + yym20 := z.DecBinary() + _ = yym20 if false { } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + *((*int64)(yyv19)) = int64(r.DecodeInt(64)) } } - yyj62++ - if yyhl62 { - yyb62 = yyj62 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb62 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb62 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil + x.Replicas = 0 + } else { + yyv21 := &x.Replicas + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UpdatedReplicas = 0 + } else { + yyv23 := &x.UpdatedReplicas + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) } - yym66 := z.DecBinary() - _ = yym66 + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv25 := &x.ReadyReplicas + yym26 := z.DecBinary() + _ = yym26 if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { } else { - z.DecFallback(x.Selector, false) + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) } } - yyj62++ - if yyhl62 { - yyb62 = yyj62 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb62 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb62 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} + x.AvailableReplicas = 0 } else { - yyv67 := &x.Template - yyv67.CodecDecodeSelf(d) + yyv27 := &x.AvailableReplicas + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(yyv27)) = int32(r.DecodeInt(32)) + } } - yyj62++ - if yyhl62 { - yyb62 = yyj62 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb62 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb62 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.VolumeClaimTemplates = nil + x.UnavailableReplicas = 0 } else { - yyv68 := &x.VolumeClaimTemplates - yym69 := z.DecBinary() - _ = yym69 + yyv29 := &x.UnavailableReplicas + yym30 := z.DecBinary() + _ = yym30 if false { } else { - h.decSlicev1_PersistentVolumeClaim((*[]pkg2_v1.PersistentVolumeClaim)(yyv68), d) + *((*int32)(yyv29)) = int32(r.DecodeInt(32)) } } - yyj62++ - if yyhl62 { - yyb62 = yyj62 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb62 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb62 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ServiceName = "" + x.Conditions = nil } else { - x.ServiceName = string(r.DecodeString()) + yyv31 := &x.Conditions + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv31), d) + } } for { - yyj62++ - if yyhl62 { - yyb62 = yyj62 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb62 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb62 { + if yyb18 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj62-1, "") + z.DecStructFieldNotFound(yyj18-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *StatefulSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { +func (x DeploymentConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DeploymentConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *DeploymentCondition) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym71 := z.EncBinary() - _ = yym71 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep72 := !z.EncBinary() - yy2arr72 := z.EncBasicHandle().StructToArray - var yyq72 [2]bool - _, _, _ = yysep72, yyq72, yy2arr72 - const yyr72 bool = false - yyq72[0] = x.ObservedGeneration != nil - var yynn72 int - if yyr72 || yy2arr72 { - r.EncodeArrayStart(2) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) } else { - yynn72 = 1 - for _, b := range yyq72 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn72++ + yynn2++ } } - r.EncodeMapStart(yynn72) - yynn72 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr72 || yy2arr72 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq72[0] { - if x.ObservedGeneration == nil { - r.EncodeNil() + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf7 := &x.Status + yysf7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf8 := &x.Status + yysf8.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastUpdateTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) } else { - yy74 := *x.ObservedGeneration - yym75 := z.EncBinary() - _ = yym75 - if false { - } else { - r.EncodeInt(int64(yy74)) - } + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq72[0] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + r.EncodeString(codecSelferC_UTF81234, string("lastUpdateTime")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ObservedGeneration == nil { - r.EncodeNil() + yy12 := &x.LastUpdateTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) + } else { + z.EncFallback(yy17) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { } else { - yy76 := *x.ObservedGeneration - yym77 := z.EncBinary() - _ = yym77 - if false { - } else { - r.EncodeInt(int64(yy76)) - } + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) } } } - if yyr72 || yy2arr72 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym79 := z.EncBinary() - _ = yym79 - if false { + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } } else { - r.EncodeInt(int64(x.Replicas)) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym80 := z.EncBinary() - _ = yym80 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } } } - if yyr72 || yy2arr72 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -935,29 +5354,29 @@ func (x *StatefulSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *StatefulSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DeploymentCondition) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym81 := z.DecBinary() - _ = yym81 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct82 := r.ContainerType() - if yyct82 == codecSelferValueTypeMap1234 { - yyl82 := r.ReadMapStart() - if yyl82 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl82, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct82 == codecSelferValueTypeArray1234 { - yyl82 := r.ReadArrayStart() - if yyl82 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl82, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -965,16 +5384,16 @@ func (x *StatefulSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *StatefulSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DeploymentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys83Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys83Slc - var yyhl83 bool = l >= 0 - for yyj83 := 0; ; yyj83++ { - if yyhl83 { - if yyj83 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -983,142 +5402,282 @@ func (x *StatefulSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys83Slc = r.DecodeBytes(yys83Slc, true, true) - yys83 := string(yys83Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys83 { - case "observedGeneration": + switch yys3 { + case "type": if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "lastUpdateTime": + if r.TryDecodeAsNil() { + x.LastUpdateTime = pkg1_v1.Time{} + } else { + yyv6 := &x.LastUpdateTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) } + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) } - yym85 := z.DecBinary() - _ = yym85 + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv10 := &x.Reason + yym11 := z.DecBinary() + _ = yym11 if false { } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + *((*string)(yyv10)) = r.DecodeString() } } - case "replicas": + case "message": if r.TryDecodeAsNil() { - x.Replicas = 0 + x.Message = "" } else { - x.Replicas = int32(r.DecodeInt(32)) + yyv12 := &x.Message + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys83) - } // end switch yys83 - } // end for yyj83 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *StatefulSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DeploymentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj87 int - var yyb87 bool - var yyhl87 bool = l >= 0 - yyj87++ - if yyhl87 { - yyb87 = yyj87 > l + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb87 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb87 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil + x.Type = "" + } else { + yyv15 := &x.Type + yyv15.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv16 := &x.Status + yyv16.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastUpdateTime = pkg1_v1.Time{} + } else { + yyv17 := &x.LastUpdateTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv19 := &x.LastTransitionTime + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if yym20 { + z.DecBinaryUnmarshal(yyv19) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) + } else { + z.DecFallback(yyv19, false) } - yym89 := z.DecBinary() - _ = yym89 + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv21 := &x.Reason + yym22 := z.DecBinary() + _ = yym22 if false { } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + *((*string)(yyv21)) = r.DecodeString() } } - yyj87++ - if yyhl87 { - yyb87 = yyj87 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb87 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb87 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Replicas = 0 + x.Message = "" } else { - x.Replicas = int32(r.DecodeInt(32)) + yyv23 := &x.Message + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } } for { - yyj87++ - if yyhl87 { - yyb87 = yyj87 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb87 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb87 { + if yyb14 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj87-1, "") + z.DecStructFieldNotFound(yyj14-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *StatefulSetList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym91 := z.EncBinary() - _ = yym91 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep92 := !z.EncBinary() - yy2arr92 := z.EncBasicHandle().StructToArray - var yyq92 [4]bool - _, _, _ = yysep92, yyq92, yy2arr92 - const yyr92 bool = false - yyq92[0] = x.Kind != "" - yyq92[1] = x.APIVersion != "" - yyq92[2] = true - var yynn92 int - if yyr92 || yy2arr92 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn92 = 1 - for _, b := range yyq92 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn92++ + yynn2++ } } - r.EncodeMapStart(yynn92) - yynn92 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr92 || yy2arr92 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq92[0] { - yym94 := z.EncBinary() - _ = yym94 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -1127,23 +5686,23 @@ func (x *StatefulSetList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq92[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym95 := z.EncBinary() - _ = yym95 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr92 || yy2arr92 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq92[1] { - yym97 := z.EncBinary() - _ = yym97 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -1152,57 +5711,57 @@ func (x *StatefulSetList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq92[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym98 := z.EncBinary() - _ = yym98 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr92 || yy2arr92 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq92[2] { - yy100 := &x.ListMeta - yym101 := z.EncBinary() - _ = yym101 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy100) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy100) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq92[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy102 := &x.ListMeta - yym103 := z.EncBinary() - _ = yym103 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy102) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy102) + z.EncFallback(yy12) } } } - if yyr92 || yy2arr92 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym105 := z.EncBinary() - _ = yym105 + yym15 := z.EncBinary() + _ = yym15 if false { } else { - h.encSliceStatefulSet(([]StatefulSet)(x.Items), e) + h.encSliceDeployment(([]Deployment)(x.Items), e) } } } else { @@ -1212,15 +5771,15 @@ func (x *StatefulSetList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym106 := z.EncBinary() - _ = yym106 + yym16 := z.EncBinary() + _ = yym16 if false { } else { - h.encSliceStatefulSet(([]StatefulSet)(x.Items), e) + h.encSliceDeployment(([]Deployment)(x.Items), e) } } } - if yyr92 || yy2arr92 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -1229,29 +5788,29 @@ func (x *StatefulSetList) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *StatefulSetList) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DeploymentList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym107 := z.DecBinary() - _ = yym107 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct108 := r.ContainerType() - if yyct108 == codecSelferValueTypeMap1234 { - yyl108 := r.ReadMapStart() - if yyl108 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl108, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct108 == codecSelferValueTypeArray1234 { - yyl108 := r.ReadArrayStart() - if yyl108 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl108, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -1259,16 +5818,16 @@ func (x *StatefulSetList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *StatefulSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DeploymentList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys109Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys109Slc - var yyhl109 bool = l >= 0 - for yyj109 := 0; ; yyj109++ { - if yyhl109 { - if yyj109 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -1277,68 +5836,80 @@ func (x *StatefulSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys109Slc = r.DecodeBytes(yys109Slc, true, true) - yys109 := string(yys109Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys109 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv112 := &x.ListMeta - yym113 := z.DecBinary() - _ = yym113 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv112) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv112, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv114 := &x.Items - yym115 := z.DecBinary() - _ = yym115 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceStatefulSet((*[]StatefulSet)(yyv114), d) + h.decSliceDeployment((*[]Deployment)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys109) - } // end switch yys109 - } // end for yyj109 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *StatefulSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DeploymentList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj116 int - var yyb116 bool - var yyhl116 bool = l >= 0 - yyj116++ - if yyhl116 { - yyb116 = yyj116 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb116 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb116 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1346,15 +5917,21 @@ func (x *StatefulSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj116++ - if yyhl116 { - yyb116 = yyj116 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb116 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb116 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1362,38 +5939,44 @@ func (x *StatefulSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj116++ - if yyhl116 { - yyb116 = yyj116 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb116 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb116 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv119 := &x.ListMeta - yym120 := z.DecBinary() - _ = yym120 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv119) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv119, false) + z.DecFallback(yyv17, false) } } - yyj116++ - if yyhl116 { - yyb116 = yyj116 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb116 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb116 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1401,125 +5984,128 @@ func (x *StatefulSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Items = nil } else { - yyv121 := &x.Items - yym122 := z.DecBinary() - _ = yym122 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceStatefulSet((*[]StatefulSet)(yyv121), d) + h.decSliceDeployment((*[]Deployment)(yyv19), d) } } for { - yyj116++ - if yyhl116 { - yyb116 = yyj116 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb116 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb116 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj116-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) encSlicev1_PersistentVolumeClaim(v []pkg2_v1.PersistentVolumeClaim, e *codec1978.Encoder) { +func (x codecSelfer1234) encSlicev1_PersistentVolumeClaim(v []pkg3_v1.PersistentVolumeClaim, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv123 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy124 := &yyv123 - yy124.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSlicev1_PersistentVolumeClaim(v *[]pkg2_v1.PersistentVolumeClaim, d *codec1978.Decoder) { +func (x codecSelfer1234) decSlicev1_PersistentVolumeClaim(v *[]pkg3_v1.PersistentVolumeClaim, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv125 := *v - yyh125, yyl125 := z.DecSliceHelperStart() - var yyc125 bool - if yyl125 == 0 { - if yyv125 == nil { - yyv125 = []pkg2_v1.PersistentVolumeClaim{} - yyc125 = true - } else if len(yyv125) != 0 { - yyv125 = yyv125[:0] - yyc125 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg3_v1.PersistentVolumeClaim{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl125 > 0 { - var yyrr125, yyrl125 int - var yyrt125 bool - if yyl125 > cap(yyv125) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg125 := len(yyv125) > 0 - yyv2125 := yyv125 - yyrl125, yyrt125 = z.DecInferLen(yyl125, z.DecBasicHandle().MaxInitLen, 368) - if yyrt125 { - if yyrl125 <= cap(yyv125) { - yyv125 = yyv125[:yyrl125] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 376) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv125 = make([]pkg2_v1.PersistentVolumeClaim, yyrl125) + yyv1 = make([]pkg3_v1.PersistentVolumeClaim, yyrl1) } } else { - yyv125 = make([]pkg2_v1.PersistentVolumeClaim, yyrl125) + yyv1 = make([]pkg3_v1.PersistentVolumeClaim, yyrl1) } - yyc125 = true - yyrr125 = len(yyv125) - if yyrg125 { - copy(yyv125, yyv2125) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl125 != len(yyv125) { - yyv125 = yyv125[:yyl125] - yyc125 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj125 := 0 - for ; yyj125 < yyrr125; yyj125++ { - yyh125.ElemContainerState(yyj125) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv125[yyj125] = pkg2_v1.PersistentVolumeClaim{} + yyv1[yyj1] = pkg3_v1.PersistentVolumeClaim{} } else { - yyv126 := &yyv125[yyj125] - yyv126.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt125 { - for ; yyj125 < yyl125; yyj125++ { - yyv125 = append(yyv125, pkg2_v1.PersistentVolumeClaim{}) - yyh125.ElemContainerState(yyj125) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, pkg3_v1.PersistentVolumeClaim{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv125[yyj125] = pkg2_v1.PersistentVolumeClaim{} + yyv1[yyj1] = pkg3_v1.PersistentVolumeClaim{} } else { - yyv127 := &yyv125[yyj125] - yyv127.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj125 := 0 - for ; !r.CheckBreak(); yyj125++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj125 >= len(yyv125) { - yyv125 = append(yyv125, pkg2_v1.PersistentVolumeClaim{}) // var yyz125 pkg2_v1.PersistentVolumeClaim - yyc125 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, pkg3_v1.PersistentVolumeClaim{}) // var yyz1 pkg3_v1.PersistentVolumeClaim + yyc1 = true } - yyh125.ElemContainerState(yyj125) - if yyj125 < len(yyv125) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv125[yyj125] = pkg2_v1.PersistentVolumeClaim{} + yyv1[yyj1] = pkg3_v1.PersistentVolumeClaim{} } else { - yyv128 := &yyv125[yyj125] - yyv128.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -1527,17 +6113,17 @@ func (x codecSelfer1234) decSlicev1_PersistentVolumeClaim(v *[]pkg2_v1.Persisten } } - if yyj125 < len(yyv125) { - yyv125 = yyv125[:yyj125] - yyc125 = true - } else if yyj125 == 0 && yyv125 == nil { - yyv125 = []pkg2_v1.PersistentVolumeClaim{} - yyc125 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg3_v1.PersistentVolumeClaim{} + yyc1 = true } } - yyh125.End() - if yyc125 { - *v = yyv125 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -1546,10 +6132,10 @@ func (x codecSelfer1234) encSliceStatefulSet(v []StatefulSet, e *codec1978.Encod z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv129 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy130 := &yyv129 - yy130.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -1559,83 +6145,324 @@ func (x codecSelfer1234) decSliceStatefulSet(v *[]StatefulSet, d *codec1978.Deco z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv131 := *v - yyh131, yyl131 := z.DecSliceHelperStart() - var yyc131 bool - if yyl131 == 0 { - if yyv131 == nil { - yyv131 = []StatefulSet{} - yyc131 = true - } else if len(yyv131) != 0 { - yyv131 = yyv131[:0] - yyc131 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []StatefulSet{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 856) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]StatefulSet, yyrl1) + } + } else { + yyv1 = make([]StatefulSet, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = StatefulSet{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, StatefulSet{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = StatefulSet{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, StatefulSet{}) // var yyz1 StatefulSet + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = StatefulSet{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []StatefulSet{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceDeploymentCondition(v []DeploymentCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceDeploymentCondition(v *[]DeploymentCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []DeploymentCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]DeploymentCondition, yyrl1) + } + } else { + yyv1 = make([]DeploymentCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DeploymentCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, DeploymentCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DeploymentCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, DeploymentCondition{}) // var yyz1 DeploymentCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = DeploymentCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []DeploymentCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Deployment{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl131 > 0 { - var yyrr131, yyrl131 int - var yyrt131 bool - if yyl131 > cap(yyv131) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg131 := len(yyv131) > 0 - yyv2131 := yyv131 - yyrl131, yyrt131 = z.DecInferLen(yyl131, z.DecBasicHandle().MaxInitLen, 800) - if yyrt131 { - if yyrl131 <= cap(yyv131) { - yyv131 = yyv131[:yyrl131] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 920) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv131 = make([]StatefulSet, yyrl131) + yyv1 = make([]Deployment, yyrl1) } } else { - yyv131 = make([]StatefulSet, yyrl131) + yyv1 = make([]Deployment, yyrl1) } - yyc131 = true - yyrr131 = len(yyv131) - if yyrg131 { - copy(yyv131, yyv2131) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl131 != len(yyv131) { - yyv131 = yyv131[:yyl131] - yyc131 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj131 := 0 - for ; yyj131 < yyrr131; yyj131++ { - yyh131.ElemContainerState(yyj131) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv131[yyj131] = StatefulSet{} + yyv1[yyj1] = Deployment{} } else { - yyv132 := &yyv131[yyj131] - yyv132.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt131 { - for ; yyj131 < yyl131; yyj131++ { - yyv131 = append(yyv131, StatefulSet{}) - yyh131.ElemContainerState(yyj131) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Deployment{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv131[yyj131] = StatefulSet{} + yyv1[yyj1] = Deployment{} } else { - yyv133 := &yyv131[yyj131] - yyv133.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj131 := 0 - for ; !r.CheckBreak(); yyj131++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj131 >= len(yyv131) { - yyv131 = append(yyv131, StatefulSet{}) // var yyz131 StatefulSet - yyc131 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Deployment{}) // var yyz1 Deployment + yyc1 = true } - yyh131.ElemContainerState(yyj131) - if yyj131 < len(yyv131) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv131[yyj131] = StatefulSet{} + yyv1[yyj1] = Deployment{} } else { - yyv134 := &yyv131[yyj131] - yyv134.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -1643,16 +6470,16 @@ func (x codecSelfer1234) decSliceStatefulSet(v *[]StatefulSet, d *codec1978.Deco } } - if yyj131 < len(yyv131) { - yyv131 = yyv131[:yyj131] - yyc131 = true - } else if yyj131 == 0 && yyv131 == nil { - yyv131 = []StatefulSet{} - yyc131 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Deployment{} + yyc1 = true } } - yyh131.End() - if yyc131 { - *v = yyv131 + yyh1.End() + if yyc1 { + *v = yyv1 } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/types.go index c7b179f2c6..c76d4a5369 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/types.go @@ -17,10 +17,61 @@ limitations under the License. package v1beta1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/kubernetes/pkg/api/v1" ) +const ( + // StatefulSetInitAnnotation if present, and set to false, indicates that a Pod's readiness should be ignored. + StatefulSetInitAnnotation = "pod.alpha.kubernetes.io/initialized" +) + +// ScaleSpec describes the attributes of a scale subresource +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` +} + +// ScaleStatus represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` + + // label selector for pods that should match the replicas count. This is a serializated + // version of both map-based and more expressive set-based selectors. This is done to + // avoid introspection in the clients. The string will be in the same format as the + // query-param syntax. If the target type only supports map-based selectors, both this + // field and map-based selector field are populated. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` +} + +// +genclient=true +// +noMethods=true + +// Scale represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + // +genclient=true // StatefulSet represents a set of pods with consistent identities. @@ -30,9 +81,9 @@ import ( // The StatefulSet guarantees that a given network identity will always // map to the same storage identity. type StatefulSet struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the desired identities of pods in this set. // +optional @@ -58,7 +109,7 @@ type StatefulSetSpec struct { // If empty, defaulted to labels on the pod template. // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional - Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // Template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet @@ -86,7 +137,7 @@ type StatefulSetSpec struct { // StatefulSetStatus represents the current state of a StatefulSet. type StatefulSetStatus struct { - // most recent generation observed by this autoscaler. + // most recent generation observed by this StatefulSet. // +optional ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` @@ -96,8 +147,229 @@ type StatefulSetStatus struct { // StatefulSetList is a collection of StatefulSets. type StatefulSetList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// Deployment enables declarative updates for Pods and ReplicaSets. +type Deployment struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // Template describes the pods that will be created. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 2. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` + + // Indicates that the deployment is paused. + // +optional + Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` + + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"` + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Once autoRollback is + // implemented, the deployment controller will automatically rollback failed + // deployments. Note that progress will not be estimated during the time a + // deployment is paused. Defaults to 600s. + ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` +} + +// DeploymentRollback stores the information required to rollback a deployment. +type DeploymentRollback struct { + metav1.TypeMeta `json:",inline"` + // Required: This must match the Name of a deployment. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // The annotations to be updated to a deployment + // +optional + UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"` + // The config of this deployment rollback. + RollbackTo RollbackConfig `json:"rollbackTo" protobuf:"bytes,3,opt,name=rollbackTo"` +} + +type RollbackConfig struct { + // The revision to rollback to. If set to 0, rollbck to the last revision. + // +optional + Revision int64 `json:"revision,omitempty" protobuf:"varint,1,opt,name=revision"` +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing RCs (and label key that is added to its pods) to prevent the existing RCs + // to select new pods (and old pods being select by new RC). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +// DeploymentStrategy describes how to replace existing pods with new ones. +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new RC can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of desired pods. // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"` + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` +} + +// DeploymentStatus is the most recently observed status of the Deployment. +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` + + // Total number of unavailable pods targeted by this deployment. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` + + // Represents the latest available observations of a deployment's current state. + Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// DeploymentList is a list of Deployments. +type DeploymentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Deployments. + Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/types_swagger_doc_generated.go index 837181c1c0..44e9f3e45e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/types_swagger_doc_generated.go @@ -27,6 +27,143 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE +var map_Deployment = map[string]string{ + "": "Deployment enables declarative updates for Pods and ReplicaSets.", + "metadata": "Standard object metadata.", + "spec": "Specification of the desired behavior of the Deployment.", + "status": "Most recently observed status of the Deployment.", +} + +func (Deployment) SwaggerDoc() map[string]string { + return map_Deployment +} + +var map_DeploymentCondition = map[string]string{ + "": "DeploymentCondition describes the state of a deployment at a certain point.", + "type": "Type of deployment condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DeploymentCondition) SwaggerDoc() map[string]string { + return map_DeploymentCondition +} + +var map_DeploymentList = map[string]string{ + "": "DeploymentList is a list of Deployments.", + "metadata": "Standard list metadata.", + "items": "Items is the list of Deployments.", +} + +func (DeploymentList) SwaggerDoc() map[string]string { + return map_DeploymentList +} + +var map_DeploymentRollback = map[string]string{ + "": "DeploymentRollback stores the information required to rollback a deployment.", + "name": "Required: This must match the Name of a deployment.", + "updatedAnnotations": "The annotations to be updated to a deployment", + "rollbackTo": "The config of this deployment rollback.", +} + +func (DeploymentRollback) SwaggerDoc() map[string]string { + return map_DeploymentRollback +} + +var map_DeploymentSpec = map[string]string{ + "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", + "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", + "template": "Template describes the pods that will be created.", + "strategy": "The deployment strategy to use to replace existing pods with new ones.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.", + "paused": "Indicates that the deployment is paused.", + "rollbackTo": "The config this deployment is rolling back to. Will be cleared after rollback is done.", + "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Once autoRollback is implemented, the deployment controller will automatically rollback failed deployments. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", +} + +func (DeploymentSpec) SwaggerDoc() map[string]string { + return map_DeploymentSpec +} + +var map_DeploymentStatus = map[string]string{ + "": "DeploymentStatus is the most recently observed status of the Deployment.", + "observedGeneration": "The generation observed by the deployment controller.", + "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "readyReplicas": "Total number of ready pods targeted by this deployment.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + "unavailableReplicas": "Total number of unavailable pods targeted by this deployment.", + "conditions": "Represents the latest available observations of a deployment's current state.", +} + +func (DeploymentStatus) SwaggerDoc() map[string]string { + return map_DeploymentStatus +} + +var map_DeploymentStrategy = map[string]string{ + "": "DeploymentStrategy describes how to replace existing pods with new ones.", + "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", +} + +func (DeploymentStrategy) SwaggerDoc() map[string]string { + return map_DeploymentStrategy +} + +var map_RollbackConfig = map[string]string{ + "revision": "The revision to rollback to. If set to 0, rollbck to the last revision.", +} + +func (RollbackConfig) SwaggerDoc() map[string]string { + return map_RollbackConfig +} + +var map_RollingUpdateDeployment = map[string]string{ + "": "Spec to control the desired behavior of rolling update.", + "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", +} + +func (RollingUpdateDeployment) SwaggerDoc() map[string]string { + return map_RollingUpdateDeployment +} + +var map_Scale = map[string]string{ + "": "Scale represents a scaling request for a resource.", + "metadata": "Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.", +} + +func (Scale) SwaggerDoc() map[string]string { + return map_Scale +} + +var map_ScaleSpec = map[string]string{ + "": "ScaleSpec describes the attributes of a scale subresource", + "replicas": "desired number of instances for the scaled object.", +} + +func (ScaleSpec) SwaggerDoc() map[string]string { + return map_ScaleSpec +} + +var map_ScaleStatus = map[string]string{ + "": "ScaleStatus represents the current status of a scale subresource.", + "replicas": "actual number of observed instances of the scaled object.", + "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", +} + +func (ScaleStatus) SwaggerDoc() map[string]string { + return map_ScaleStatus +} + var map_StatefulSet = map[string]string{ "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", "spec": "Spec defines the desired identities of pods in this set.", @@ -60,7 +197,7 @@ func (StatefulSetSpec) SwaggerDoc() map[string]string { var map_StatefulSetStatus = map[string]string{ "": "StatefulSetStatus represents the current state of a StatefulSet.", - "observedGeneration": "most recent generation observed by this autoscaler.", + "observedGeneration": "most recent generation observed by this StatefulSet.", "replicas": "Replicas is the number of actual replicas.", } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go index 1fff844f00..470832d23f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,12 +21,12 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" + api_v1 "k8s.io/kubernetes/pkg/api/v1" apps "k8s.io/kubernetes/pkg/apis/apps" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" unsafe "unsafe" ) @@ -50,10 +50,7 @@ func RegisterConversions(scheme *runtime.Scheme) error { } func autoConvert_v1beta1_StatefulSet_To_apps_StatefulSet(in *StatefulSet, out *apps.StatefulSet, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -68,10 +65,7 @@ func Convert_v1beta1_StatefulSet_To_apps_StatefulSet(in *StatefulSet, out *apps. } func autoConvert_apps_StatefulSet_To_v1beta1_StatefulSet(in *apps.StatefulSet, out *StatefulSet, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -116,7 +110,7 @@ func autoConvert_apps_StatefulSetList_To_v1beta1_StatefulSetList(in *apps.Statef } } } else { - out.Items = nil + out.Items = make([]StatefulSet, 0) } return nil } @@ -126,11 +120,11 @@ func Convert_apps_StatefulSetList_To_v1beta1_StatefulSetList(in *apps.StatefulSe } func autoConvert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error { - if err := api.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } - out.Selector = (*unversioned.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } out.VolumeClaimTemplates = *(*[]api.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates)) @@ -139,14 +133,14 @@ func autoConvert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *StatefulSet } func autoConvert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.StatefulSetSpec, out *StatefulSetSpec, s conversion.Scope) error { - if err := api.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } - out.Selector = (*unversioned.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - out.VolumeClaimTemplates = *(*[]v1.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates)) + out.VolumeClaimTemplates = *(*[]api_v1.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates)) out.ServiceName = in.ServiceName return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.deepcopy.go index 26cda10bf4..d694c44381 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,10 +21,11 @@ limitations under the License. package v1beta1 import ( - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" + api_v1 "k8s.io/kubernetes/pkg/api/v1" reflect "reflect" ) @@ -36,6 +37,18 @@ func init() { // to allow building arbitrary schemes. func RegisterDeepCopies(scheme *runtime.Scheme) error { return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Deployment, InType: reflect.TypeOf(&Deployment{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentCondition, InType: reflect.TypeOf(&DeploymentCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentList, InType: reflect.TypeOf(&DeploymentList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentRollback, InType: reflect.TypeOf(&DeploymentRollback{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentSpec, InType: reflect.TypeOf(&DeploymentSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentStatus, InType: reflect.TypeOf(&DeploymentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentStrategy, InType: reflect.TypeOf(&DeploymentStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollbackConfig, InType: reflect.TypeOf(&RollbackConfig{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollingUpdateDeployment, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Scale, InType: reflect.TypeOf(&Scale{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StatefulSet, InType: reflect.TypeOf(&StatefulSet{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StatefulSetList, InType: reflect.TypeOf(&StatefulSetList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StatefulSetSpec, InType: reflect.TypeOf(&StatefulSetSpec{})}, @@ -43,13 +56,227 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { ) } +func DeepCopy_v1beta1_Deployment(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Deployment) + out := out.(*Deployment) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_DeploymentStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentCondition) + out := out.(*DeploymentCondition) + *out = *in + out.LastUpdateTime = in.LastUpdateTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1beta1_DeploymentList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentList) + out := out.(*DeploymentList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_Deployment(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentRollback(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentRollback) + out := out.(*DeploymentRollback) + *out = *in + if in.UpdatedAnnotations != nil { + in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentSpec) + out := out.(*DeploymentSpec) + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, c); err != nil { + return err + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.RollbackTo != nil { + in, out := &in.RollbackTo, &out.RollbackTo + *out = new(RollbackConfig) + **out = **in + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentStatus) + out := out.(*DeploymentStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_DeploymentCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentStrategy) + out := out.(*DeploymentStrategy) + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + if err := DeepCopy_v1beta1_RollingUpdateDeployment(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1beta1_RollbackConfig(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollbackConfig) + out := out.(*RollbackConfig) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_RollingUpdateDeployment(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollingUpdateDeployment) + out := out.(*RollingUpdateDeployment) + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } + return nil + } +} + +func DeepCopy_v1beta1_Scale(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Scale) + out := out.(*Scale) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_ScaleStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_ScaleSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleSpec) + out := out.(*ScaleSpec) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_ScaleStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleStatus) + out := out.(*ScaleStatus) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + func DeepCopy_v1beta1_StatefulSet(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*StatefulSet) out := out.(*StatefulSet) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_v1beta1_StatefulSetSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -65,8 +292,7 @@ func DeepCopy_v1beta1_StatefulSetList(in interface{}, out interface{}, c *conver { in := in.(*StatefulSetList) out := out.(*StatefulSetList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StatefulSet, len(*in)) @@ -75,8 +301,6 @@ func DeepCopy_v1beta1_StatefulSetList(in interface{}, out interface{}, c *conver return err } } - } else { - out.Items = nil } return nil } @@ -86,37 +310,32 @@ func DeepCopy_v1beta1_StatefulSetSpec(in interface{}, out interface{}, c *conver { in := in.(*StatefulSetSpec) out := out.(*StatefulSetSpec) + *out = *in if in.Replicas != nil { in, out := &in.Replicas, &out.Replicas *out = new(int32) **out = **in - } else { - out.Replicas = nil } if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.LabelSelector) } - } else { - out.Selector = nil } - if err := v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { return err } if in.VolumeClaimTemplates != nil { in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]v1.PersistentVolumeClaim, len(*in)) + *out = make([]api_v1.PersistentVolumeClaim, len(*in)) for i := range *in { - if err := v1.DeepCopy_v1_PersistentVolumeClaim(&(*in)[i], &(*out)[i], c); err != nil { + if err := api_v1.DeepCopy_v1_PersistentVolumeClaim(&(*in)[i], &(*out)[i], c); err != nil { return err } } - } else { - out.VolumeClaimTemplates = nil } - out.ServiceName = in.ServiceName return nil } } @@ -125,14 +344,12 @@ func DeepCopy_v1beta1_StatefulSetStatus(in interface{}, out interface{}, c *conv { in := in.(*StatefulSetStatus) out := out.(*StatefulSetStatus) + *out = *in if in.ObservedGeneration != nil { in, out := &in.ObservedGeneration, &out.ObservedGeneration *out = new(int64) **out = **in - } else { - out.ObservedGeneration = nil } - out.Replicas = in.Replicas return nil } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.defaults.go index 8493cd3a64..7d83113c8e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,19 +21,162 @@ limitations under the License. package v1beta1 import ( + runtime "k8s.io/apimachinery/pkg/runtime" v1 "k8s.io/kubernetes/pkg/api/v1" - runtime "k8s.io/kubernetes/pkg/runtime" ) // RegisterDefaults adds defaulters functions to the given scheme. // Public to allow building arbitrary schemes. // All generated defaulters are covering - they call all nested defaulters. func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&Deployment{}, func(obj interface{}) { SetObjectDefaults_Deployment(obj.(*Deployment)) }) + scheme.AddTypeDefaultingFunc(&DeploymentList{}, func(obj interface{}) { SetObjectDefaults_DeploymentList(obj.(*DeploymentList)) }) scheme.AddTypeDefaultingFunc(&StatefulSet{}, func(obj interface{}) { SetObjectDefaults_StatefulSet(obj.(*StatefulSet)) }) scheme.AddTypeDefaultingFunc(&StatefulSetList{}, func(obj interface{}) { SetObjectDefaults_StatefulSetList(obj.(*StatefulSetList)) }) return nil } +func SetObjectDefaults_Deployment(in *Deployment) { + SetDefaults_Deployment(in) + v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_DeploymentList(in *DeploymentList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Deployment(a) + } +} + func SetObjectDefaults_StatefulSet(in *StatefulSet) { SetDefaults_StatefulSet(in) v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) @@ -64,6 +207,23 @@ func SetObjectDefaults_StatefulSet(in *StatefulSet) { if a.VolumeSource.AzureDisk != nil { v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } } for i := range in.Spec.Template.Spec.InitContainers { a := &in.Spec.Template.Spec.InitContainers[i] diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go index c9d148140b..26cff14990 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,10 +21,10 @@ limitations under the License. package apps import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" reflect "reflect" ) @@ -47,9 +47,11 @@ func DeepCopy_apps_StatefulSet(in interface{}, out interface{}, c *conversion.Cl { in := in.(*StatefulSet) out := out.(*StatefulSet) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_apps_StatefulSetSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -65,8 +67,7 @@ func DeepCopy_apps_StatefulSetList(in interface{}, out interface{}, c *conversio { in := in.(*StatefulSetList) out := out.(*StatefulSetList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StatefulSet, len(*in)) @@ -75,8 +76,6 @@ func DeepCopy_apps_StatefulSetList(in interface{}, out interface{}, c *conversio return err } } - } else { - out.Items = nil } return nil } @@ -86,15 +85,14 @@ func DeepCopy_apps_StatefulSetSpec(in interface{}, out interface{}, c *conversio { in := in.(*StatefulSetSpec) out := out.(*StatefulSetSpec) - out.Replicas = in.Replicas + *out = *in if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.LabelSelector) } - } else { - out.Selector = nil } if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { return err @@ -107,10 +105,7 @@ func DeepCopy_apps_StatefulSetSpec(in interface{}, out interface{}, c *conversio return err } } - } else { - out.VolumeClaimTemplates = nil } - out.ServiceName = in.ServiceName return nil } } @@ -119,14 +114,12 @@ func DeepCopy_apps_StatefulSetStatus(in interface{}, out interface{}, c *convers { in := in.(*StatefulSetStatus) out := out.(*StatefulSetStatus) + *out = *in if in.ObservedGeneration != nil { in, out := &in.ObservedGeneration, &out.ObservedGeneration *out = new(int64) **out = **in - } else { - out.ObservedGeneration = nil } - out.Replicas = in.Replicas return nil } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/authentication/BUILD index d12e144d53..9fc60f952b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -15,17 +12,32 @@ go_library( srcs = [ "doc.go", "register.go", - "types.generated.go", "types.go", "zz_generated.deepcopy.go", ], tags = ["automanaged"], deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/authentication/install:all-srcs", + "//pkg/apis/authentication/v1:all-srcs", + "//pkg/apis/authentication/v1beta1:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/authentication/OWNERS new file mode 100755 index 0000000000..4135522b21 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/OWNERS @@ -0,0 +1,9 @@ +reviewers: +- liggitt +- lavalamp +- wojtek-t +- deads2k +- sttts +- timothysc +- mbohlool +- jianhuiz diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go index 35cb18de8b..7a8a65b77f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go @@ -16,5 +16,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +groupName=authentication.k8s.io -// +k8s:openapi-gen=true package authentication diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/BUILD index 0373e0e036..20a29fe1b0 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -15,9 +12,26 @@ go_library( srcs = ["install.go"], tags = ["automanaged"], deps = [ - "//pkg/apimachinery/announced:go_default_library", + "//pkg/api:go_default_library", "//pkg/apis/authentication:go_default_library", + "//pkg/apis/authentication/v1:go_default_library", "//pkg/apis/authentication/v1beta1:go_default_library", - "//pkg/util/sets:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/announced", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/registered", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/util/sets", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/install.go index 48796b9a09..2fa00ec6b1 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/install.go @@ -19,25 +19,35 @@ limitations under the License. package install import ( - "k8s.io/kubernetes/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/authentication" + "k8s.io/kubernetes/pkg/apis/authentication/v1" "k8s.io/kubernetes/pkg/apis/authentication/v1beta1" - "k8s.io/kubernetes/pkg/util/sets" ) func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { if err := announced.NewGroupMetaFactory( &announced.GroupMetaFactoryArgs{ GroupName: authentication.GroupName, - VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, + VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version, v1beta1.SchemeGroupVersion.Version}, ImportPrefix: "k8s.io/kubernetes/pkg/apis/authentication", RootScopedKinds: sets.NewString("TokenReview"), AddInternalObjectsToScheme: authentication.AddToScheme, }, announced.VersionToSchemeFunc{ v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + v1.SchemeGroupVersion.Version: v1.AddToScheme, }, - ).Announce().RegisterAndEnable(); err != nil { + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { panic(err) } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/register.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/register.go index 09384b19f5..b0ac3c28bf 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/register.go @@ -17,24 +17,23 @@ limitations under the License. package authentication import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "authentication.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} // Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { +func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } // Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { +func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } @@ -45,10 +44,6 @@ var ( func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &api.ListOptions{}, - &api.DeleteOptions{}, - &api.ExportOptions{}, - &TokenReview{}, ) return nil diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/types.generated.go deleted file mode 100644 index ea0b8f0e32..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/types.generated.go +++ /dev/null @@ -1,2394 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package authentication - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg2_api "k8s.io/kubernetes/pkg/api" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg3_types "k8s.io/kubernetes/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_api.ObjectMeta - var v1 pkg1_unversioned.TypeMeta - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 - } -} - -func (x *TokenReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [19]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = x.Name != "" - yyq2[3] = x.GenerateName != "" - yyq2[4] = x.Namespace != "" - yyq2[5] = x.SelfLink != "" - yyq2[6] = x.UID != "" - yyq2[7] = x.ResourceVersion != "" - yyq2[8] = x.Generation != 0 - yyq2[9] = true - yyq2[10] = x.ObjectMeta.DeletionTimestamp != nil && x.DeletionTimestamp != nil - yyq2[11] = x.ObjectMeta.DeletionGracePeriodSeconds != nil && x.DeletionGracePeriodSeconds != nil - yyq2[12] = len(x.Labels) != 0 - yyq2[13] = len(x.Annotations) != 0 - yyq2[14] = len(x.OwnerReferences) != 0 - yyq2[15] = len(x.Finalizers) != 0 - yyq2[16] = x.ClusterName != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(19) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("generateName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selfLink")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeInt(int64(x.Generation)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("generation")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeInt(int64(x.Generation)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - yy31 := &x.CreationTimestamp - yym32 := z.EncBinary() - _ = yym32 - if false { - } else if z.HasExtensions() && z.EncExt(yy31) { - } else if yym32 { - z.EncBinaryMarshal(yy31) - } else if !yym32 && z.IsJSONHandle() { - z.EncJSONMarshal(yy31) - } else { - z.EncFallback(yy31) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("creationTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy33 := &x.CreationTimestamp - yym34 := z.EncBinary() - _ = yym34 - if false { - } else if z.HasExtensions() && z.EncExt(yy33) { - } else if yym34 { - z.EncBinaryMarshal(yy33) - } else if !yym34 && z.IsJSONHandle() { - z.EncJSONMarshal(yy33) - } else { - z.EncFallback(yy33) - } - } - } - var yyn35 bool - if x.ObjectMeta.DeletionTimestamp == nil { - yyn35 = true - goto LABEL35 - } - LABEL35: - if yyr2 || yy2arr2 { - if yyn35 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.DeletionTimestamp == nil { - r.EncodeNil() - } else { - yym36 := z.EncBinary() - _ = yym36 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { - } else if yym36 { - z.EncBinaryMarshal(x.DeletionTimestamp) - } else if !yym36 && z.IsJSONHandle() { - z.EncJSONMarshal(x.DeletionTimestamp) - } else { - z.EncFallback(x.DeletionTimestamp) - } - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletionTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn35 { - r.EncodeNil() - } else { - if x.DeletionTimestamp == nil { - r.EncodeNil() - } else { - yym37 := z.EncBinary() - _ = yym37 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { - } else if yym37 { - z.EncBinaryMarshal(x.DeletionTimestamp) - } else if !yym37 && z.IsJSONHandle() { - z.EncJSONMarshal(x.DeletionTimestamp) - } else { - z.EncFallback(x.DeletionTimestamp) - } - } - } - } - } - var yyn38 bool - if x.ObjectMeta.DeletionGracePeriodSeconds == nil { - yyn38 = true - goto LABEL38 - } - LABEL38: - if yyr2 || yy2arr2 { - if yyn38 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.DeletionGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy39 := *x.DeletionGracePeriodSeconds - yym40 := z.EncBinary() - _ = yym40 - if false { - } else { - r.EncodeInt(int64(yy39)) - } - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletionGracePeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn38 { - r.EncodeNil() - } else { - if x.DeletionGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy41 := *x.DeletionGracePeriodSeconds - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - r.EncodeInt(int64(yy41)) - } - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.Labels == nil { - r.EncodeNil() - } else { - yym44 := z.EncBinary() - _ = yym44 - if false { - } else { - z.F.EncMapStringStringV(x.Labels, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("labels")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Labels == nil { - r.EncodeNil() - } else { - yym45 := z.EncBinary() - _ = yym45 - if false { - } else { - z.F.EncMapStringStringV(x.Labels, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.Annotations == nil { - r.EncodeNil() - } else { - yym47 := z.EncBinary() - _ = yym47 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("annotations")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Annotations == nil { - r.EncodeNil() - } else { - yym48 := z.EncBinary() - _ = yym48 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - if x.OwnerReferences == nil { - r.EncodeNil() - } else { - yym50 := z.EncBinary() - _ = yym50 - if false { - } else { - h.encSliceapi_OwnerReference(([]pkg2_api.OwnerReference)(x.OwnerReferences), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ownerReferences")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.OwnerReferences == nil { - r.EncodeNil() - } else { - yym51 := z.EncBinary() - _ = yym51 - if false { - } else { - h.encSliceapi_OwnerReference(([]pkg2_api.OwnerReference)(x.OwnerReferences), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[15] { - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym53 := z.EncBinary() - _ = yym53 - if false { - } else { - z.F.EncSliceStringV(x.Finalizers, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("finalizers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym54 := z.EncBinary() - _ = yym54 - if false { - } else { - z.F.EncSliceStringV(x.Finalizers, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[16] { - yym56 := z.EncBinary() - _ = yym56 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym57 := z.EncBinary() - _ = yym57 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy59 := &x.Spec - yy59.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy60 := &x.Spec - yy60.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy62 := &x.Status - yy62.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy63 := &x.Status - yy63.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *TokenReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym64 := z.DecBinary() - _ = yym64 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct65 := r.ContainerType() - if yyct65 == codecSelferValueTypeMap1234 { - yyl65 := r.ReadMapStart() - if yyl65 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl65, d) - } - } else if yyct65 == codecSelferValueTypeArray1234 { - yyl65 := r.ReadArrayStart() - if yyl65 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl65, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *TokenReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys66Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys66Slc - var yyhl66 bool = l >= 0 - for yyj66 := 0; ; yyj66++ { - if yyhl66 { - if yyj66 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys66Slc = r.DecodeBytes(yys66Slc, true, true) - yys66 := string(yys66Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys66 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "generateName": - if r.TryDecodeAsNil() { - x.GenerateName = "" - } else { - x.GenerateName = string(r.DecodeString()) - } - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - case "selfLink": - if r.TryDecodeAsNil() { - x.SelfLink = "" - } else { - x.SelfLink = string(r.DecodeString()) - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg3_types.UID(r.DecodeString()) - } - case "resourceVersion": - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - case "generation": - if r.TryDecodeAsNil() { - x.Generation = 0 - } else { - x.Generation = int64(r.DecodeInt(64)) - } - case "creationTimestamp": - if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg1_unversioned.Time{} - } else { - yyv76 := &x.CreationTimestamp - yym77 := z.DecBinary() - _ = yym77 - if false { - } else if z.HasExtensions() && z.DecExt(yyv76) { - } else if yym77 { - z.DecBinaryUnmarshal(yyv76) - } else if !yym77 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv76) - } else { - z.DecFallback(yyv76, false) - } - } - case "deletionTimestamp": - if x.ObjectMeta.DeletionTimestamp == nil { - x.ObjectMeta.DeletionTimestamp = new(pkg1_unversioned.Time) - } - if r.TryDecodeAsNil() { - if x.DeletionTimestamp != nil { - x.DeletionTimestamp = nil - } - } else { - if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg1_unversioned.Time) - } - yym79 := z.DecBinary() - _ = yym79 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym79 { - z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym79 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.DeletionTimestamp) - } else { - z.DecFallback(x.DeletionTimestamp, false) - } - } - case "deletionGracePeriodSeconds": - if x.ObjectMeta.DeletionGracePeriodSeconds == nil { - x.ObjectMeta.DeletionGracePeriodSeconds = new(int64) - } - if r.TryDecodeAsNil() { - if x.DeletionGracePeriodSeconds != nil { - x.DeletionGracePeriodSeconds = nil - } - } else { - if x.DeletionGracePeriodSeconds == nil { - x.DeletionGracePeriodSeconds = new(int64) - } - yym81 := z.DecBinary() - _ = yym81 - if false { - } else { - *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - case "labels": - if r.TryDecodeAsNil() { - x.Labels = nil - } else { - yyv82 := &x.Labels - yym83 := z.DecBinary() - _ = yym83 - if false { - } else { - z.F.DecMapStringStringX(yyv82, false, d) - } - } - case "annotations": - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv84 := &x.Annotations - yym85 := z.DecBinary() - _ = yym85 - if false { - } else { - z.F.DecMapStringStringX(yyv84, false, d) - } - } - case "ownerReferences": - if r.TryDecodeAsNil() { - x.OwnerReferences = nil - } else { - yyv86 := &x.OwnerReferences - yym87 := z.DecBinary() - _ = yym87 - if false { - } else { - h.decSliceapi_OwnerReference((*[]pkg2_api.OwnerReference)(yyv86), d) - } - } - case "finalizers": - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv88 := &x.Finalizers - yym89 := z.DecBinary() - _ = yym89 - if false { - } else { - z.F.DecSliceStringX(yyv88, false, d) - } - } - case "clusterName": - if r.TryDecodeAsNil() { - x.ClusterName = "" - } else { - x.ClusterName = string(r.DecodeString()) - } - case "Spec": - if r.TryDecodeAsNil() { - x.Spec = TokenReviewSpec{} - } else { - yyv91 := &x.Spec - yyv91.CodecDecodeSelf(d) - } - case "Status": - if r.TryDecodeAsNil() { - x.Status = TokenReviewStatus{} - } else { - yyv92 := &x.Status - yyv92.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys66) - } // end switch yys66 - } // end for yyj66 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *TokenReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj93 int - var yyb93 bool - var yyhl93 bool = l >= 0 - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.GenerateName = "" - } else { - x.GenerateName = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SelfLink = "" - } else { - x.SelfLink = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg3_types.UID(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Generation = 0 - } else { - x.Generation = int64(r.DecodeInt(64)) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg1_unversioned.Time{} - } else { - yyv103 := &x.CreationTimestamp - yym104 := z.DecBinary() - _ = yym104 - if false { - } else if z.HasExtensions() && z.DecExt(yyv103) { - } else if yym104 { - z.DecBinaryUnmarshal(yyv103) - } else if !yym104 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv103) - } else { - z.DecFallback(yyv103, false) - } - } - if x.ObjectMeta.DeletionTimestamp == nil { - x.ObjectMeta.DeletionTimestamp = new(pkg1_unversioned.Time) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeletionTimestamp != nil { - x.DeletionTimestamp = nil - } - } else { - if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg1_unversioned.Time) - } - yym106 := z.DecBinary() - _ = yym106 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym106 { - z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym106 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.DeletionTimestamp) - } else { - z.DecFallback(x.DeletionTimestamp, false) - } - } - if x.ObjectMeta.DeletionGracePeriodSeconds == nil { - x.ObjectMeta.DeletionGracePeriodSeconds = new(int64) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeletionGracePeriodSeconds != nil { - x.DeletionGracePeriodSeconds = nil - } - } else { - if x.DeletionGracePeriodSeconds == nil { - x.DeletionGracePeriodSeconds = new(int64) - } - yym108 := z.DecBinary() - _ = yym108 - if false { - } else { - *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Labels = nil - } else { - yyv109 := &x.Labels - yym110 := z.DecBinary() - _ = yym110 - if false { - } else { - z.F.DecMapStringStringX(yyv109, false, d) - } - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv111 := &x.Annotations - yym112 := z.DecBinary() - _ = yym112 - if false { - } else { - z.F.DecMapStringStringX(yyv111, false, d) - } - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OwnerReferences = nil - } else { - yyv113 := &x.OwnerReferences - yym114 := z.DecBinary() - _ = yym114 - if false { - } else { - h.decSliceapi_OwnerReference((*[]pkg2_api.OwnerReference)(yyv113), d) - } - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv115 := &x.Finalizers - yym116 := z.DecBinary() - _ = yym116 - if false { - } else { - z.F.DecSliceStringX(yyv115, false, d) - } - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterName = "" - } else { - x.ClusterName = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = TokenReviewSpec{} - } else { - yyv118 := &x.Spec - yyv118.CodecDecodeSelf(d) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = TokenReviewStatus{} - } else { - yyv119 := &x.Status - yyv119.CodecDecodeSelf(d) - } - for { - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj93-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *TokenReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym120 := z.EncBinary() - _ = yym120 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep121 := !z.EncBinary() - yy2arr121 := z.EncBasicHandle().StructToArray - var yyq121 [1]bool - _, _, _ = yysep121, yyq121, yy2arr121 - const yyr121 bool = false - var yynn121 int - if yyr121 || yy2arr121 { - r.EncodeArrayStart(1) - } else { - yynn121 = 1 - for _, b := range yyq121 { - if b { - yynn121++ - } - } - r.EncodeMapStart(yynn121) - yynn121 = 0 - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym123 := z.EncBinary() - _ = yym123 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Token)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Token")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym124 := z.EncBinary() - _ = yym124 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Token)) - } - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *TokenReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym125 := z.DecBinary() - _ = yym125 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct126 := r.ContainerType() - if yyct126 == codecSelferValueTypeMap1234 { - yyl126 := r.ReadMapStart() - if yyl126 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl126, d) - } - } else if yyct126 == codecSelferValueTypeArray1234 { - yyl126 := r.ReadArrayStart() - if yyl126 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl126, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *TokenReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys127Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys127Slc - var yyhl127 bool = l >= 0 - for yyj127 := 0; ; yyj127++ { - if yyhl127 { - if yyj127 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys127Slc = r.DecodeBytes(yys127Slc, true, true) - yys127 := string(yys127Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys127 { - case "Token": - if r.TryDecodeAsNil() { - x.Token = "" - } else { - x.Token = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys127) - } // end switch yys127 - } // end for yyj127 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *TokenReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj129 int - var yyb129 bool - var yyhl129 bool = l >= 0 - yyj129++ - if yyhl129 { - yyb129 = yyj129 > l - } else { - yyb129 = r.CheckBreak() - } - if yyb129 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Token = "" - } else { - x.Token = string(r.DecodeString()) - } - for { - yyj129++ - if yyhl129 { - yyb129 = yyj129 > l - } else { - yyb129 = r.CheckBreak() - } - if yyb129 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj129-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *TokenReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym131 := z.EncBinary() - _ = yym131 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep132 := !z.EncBinary() - yy2arr132 := z.EncBasicHandle().StructToArray - var yyq132 [3]bool - _, _, _ = yysep132, yyq132, yy2arr132 - const yyr132 bool = false - var yynn132 int - if yyr132 || yy2arr132 { - r.EncodeArrayStart(3) - } else { - yynn132 = 3 - for _, b := range yyq132 { - if b { - yynn132++ - } - } - r.EncodeMapStart(yynn132) - yynn132 = 0 - } - if yyr132 || yy2arr132 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym134 := z.EncBinary() - _ = yym134 - if false { - } else { - r.EncodeBool(bool(x.Authenticated)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Authenticated")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym135 := z.EncBinary() - _ = yym135 - if false { - } else { - r.EncodeBool(bool(x.Authenticated)) - } - } - if yyr132 || yy2arr132 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy137 := &x.User - yy137.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("User")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy138 := &x.User - yy138.CodecEncodeSelf(e) - } - if yyr132 || yy2arr132 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym140 := z.EncBinary() - _ = yym140 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Error)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Error")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym141 := z.EncBinary() - _ = yym141 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Error)) - } - } - if yyr132 || yy2arr132 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *TokenReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym142 := z.DecBinary() - _ = yym142 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct143 := r.ContainerType() - if yyct143 == codecSelferValueTypeMap1234 { - yyl143 := r.ReadMapStart() - if yyl143 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl143, d) - } - } else if yyct143 == codecSelferValueTypeArray1234 { - yyl143 := r.ReadArrayStart() - if yyl143 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl143, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *TokenReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys144Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys144Slc - var yyhl144 bool = l >= 0 - for yyj144 := 0; ; yyj144++ { - if yyhl144 { - if yyj144 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys144Slc = r.DecodeBytes(yys144Slc, true, true) - yys144 := string(yys144Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys144 { - case "Authenticated": - if r.TryDecodeAsNil() { - x.Authenticated = false - } else { - x.Authenticated = bool(r.DecodeBool()) - } - case "User": - if r.TryDecodeAsNil() { - x.User = UserInfo{} - } else { - yyv146 := &x.User - yyv146.CodecDecodeSelf(d) - } - case "Error": - if r.TryDecodeAsNil() { - x.Error = "" - } else { - x.Error = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys144) - } // end switch yys144 - } // end for yyj144 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *TokenReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj148 int - var yyb148 bool - var yyhl148 bool = l >= 0 - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l - } else { - yyb148 = r.CheckBreak() - } - if yyb148 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Authenticated = false - } else { - x.Authenticated = bool(r.DecodeBool()) - } - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l - } else { - yyb148 = r.CheckBreak() - } - if yyb148 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.User = UserInfo{} - } else { - yyv150 := &x.User - yyv150.CodecDecodeSelf(d) - } - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l - } else { - yyb148 = r.CheckBreak() - } - if yyb148 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Error = "" - } else { - x.Error = string(r.DecodeString()) - } - for { - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l - } else { - yyb148 = r.CheckBreak() - } - if yyb148 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj148-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *UserInfo) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym152 := z.EncBinary() - _ = yym152 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep153 := !z.EncBinary() - yy2arr153 := z.EncBasicHandle().StructToArray - var yyq153 [4]bool - _, _, _ = yysep153, yyq153, yy2arr153 - const yyr153 bool = false - var yynn153 int - if yyr153 || yy2arr153 { - r.EncodeArrayStart(4) - } else { - yynn153 = 4 - for _, b := range yyq153 { - if b { - yynn153++ - } - } - r.EncodeMapStart(yynn153) - yynn153 = 0 - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym155 := z.EncBinary() - _ = yym155 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Username)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Username")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym156 := z.EncBinary() - _ = yym156 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Username)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym158 := z.EncBinary() - _ = yym158 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("UID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym159 := z.EncBinary() - _ = yym159 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Groups == nil { - r.EncodeNil() - } else { - yym161 := z.EncBinary() - _ = yym161 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Groups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Groups == nil { - r.EncodeNil() - } else { - yym162 := z.EncBinary() - _ = yym162 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Extra == nil { - r.EncodeNil() - } else { - yym164 := z.EncBinary() - _ = yym164 - if false { - } else { - h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Extra")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Extra == nil { - r.EncodeNil() - } else { - yym165 := z.EncBinary() - _ = yym165 - if false { - } else { - h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *UserInfo) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym166 := z.DecBinary() - _ = yym166 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct167 := r.ContainerType() - if yyct167 == codecSelferValueTypeMap1234 { - yyl167 := r.ReadMapStart() - if yyl167 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl167, d) - } - } else if yyct167 == codecSelferValueTypeArray1234 { - yyl167 := r.ReadArrayStart() - if yyl167 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl167, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *UserInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys168Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys168Slc - var yyhl168 bool = l >= 0 - for yyj168 := 0; ; yyj168++ { - if yyhl168 { - if yyj168 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys168Slc = r.DecodeBytes(yys168Slc, true, true) - yys168 := string(yys168Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys168 { - case "Username": - if r.TryDecodeAsNil() { - x.Username = "" - } else { - x.Username = string(r.DecodeString()) - } - case "UID": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = string(r.DecodeString()) - } - case "Groups": - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv171 := &x.Groups - yym172 := z.DecBinary() - _ = yym172 - if false { - } else { - z.F.DecSliceStringX(yyv171, false, d) - } - } - case "Extra": - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv173 := &x.Extra - yym174 := z.DecBinary() - _ = yym174 - if false { - } else { - h.decMapstringExtraValue((*map[string]ExtraValue)(yyv173), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys168) - } // end switch yys168 - } // end for yyj168 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *UserInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj175 int - var yyb175 bool - var yyhl175 bool = l >= 0 - yyj175++ - if yyhl175 { - yyb175 = yyj175 > l - } else { - yyb175 = r.CheckBreak() - } - if yyb175 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Username = "" - } else { - x.Username = string(r.DecodeString()) - } - yyj175++ - if yyhl175 { - yyb175 = yyj175 > l - } else { - yyb175 = r.CheckBreak() - } - if yyb175 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = string(r.DecodeString()) - } - yyj175++ - if yyhl175 { - yyb175 = yyj175 > l - } else { - yyb175 = r.CheckBreak() - } - if yyb175 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv178 := &x.Groups - yym179 := z.DecBinary() - _ = yym179 - if false { - } else { - z.F.DecSliceStringX(yyv178, false, d) - } - } - yyj175++ - if yyhl175 { - yyb175 = yyj175 > l - } else { - yyb175 = r.CheckBreak() - } - if yyb175 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv180 := &x.Extra - yym181 := z.DecBinary() - _ = yym181 - if false { - } else { - h.decMapstringExtraValue((*map[string]ExtraValue)(yyv180), d) - } - } - for { - yyj175++ - if yyhl175 { - yyb175 = yyj175 > l - } else { - yyb175 = r.CheckBreak() - } - if yyb175 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj175-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ExtraValue) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym182 := z.EncBinary() - _ = yym182 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - h.encExtraValue((ExtraValue)(x), e) - } - } -} - -func (x *ExtraValue) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym183 := z.DecBinary() - _ = yym183 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - h.decExtraValue((*ExtraValue)(x), d) - } -} - -func (x codecSelfer1234) encSliceapi_OwnerReference(v []pkg2_api.OwnerReference, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv184 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy185 := &yyv184 - yy185.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceapi_OwnerReference(v *[]pkg2_api.OwnerReference, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv186 := *v - yyh186, yyl186 := z.DecSliceHelperStart() - var yyc186 bool - if yyl186 == 0 { - if yyv186 == nil { - yyv186 = []pkg2_api.OwnerReference{} - yyc186 = true - } else if len(yyv186) != 0 { - yyv186 = yyv186[:0] - yyc186 = true - } - } else if yyl186 > 0 { - var yyrr186, yyrl186 int - var yyrt186 bool - if yyl186 > cap(yyv186) { - - yyrg186 := len(yyv186) > 0 - yyv2186 := yyv186 - yyrl186, yyrt186 = z.DecInferLen(yyl186, z.DecBasicHandle().MaxInitLen, 72) - if yyrt186 { - if yyrl186 <= cap(yyv186) { - yyv186 = yyv186[:yyrl186] - } else { - yyv186 = make([]pkg2_api.OwnerReference, yyrl186) - } - } else { - yyv186 = make([]pkg2_api.OwnerReference, yyrl186) - } - yyc186 = true - yyrr186 = len(yyv186) - if yyrg186 { - copy(yyv186, yyv2186) - } - } else if yyl186 != len(yyv186) { - yyv186 = yyv186[:yyl186] - yyc186 = true - } - yyj186 := 0 - for ; yyj186 < yyrr186; yyj186++ { - yyh186.ElemContainerState(yyj186) - if r.TryDecodeAsNil() { - yyv186[yyj186] = pkg2_api.OwnerReference{} - } else { - yyv187 := &yyv186[yyj186] - yyv187.CodecDecodeSelf(d) - } - - } - if yyrt186 { - for ; yyj186 < yyl186; yyj186++ { - yyv186 = append(yyv186, pkg2_api.OwnerReference{}) - yyh186.ElemContainerState(yyj186) - if r.TryDecodeAsNil() { - yyv186[yyj186] = pkg2_api.OwnerReference{} - } else { - yyv188 := &yyv186[yyj186] - yyv188.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj186 := 0 - for ; !r.CheckBreak(); yyj186++ { - - if yyj186 >= len(yyv186) { - yyv186 = append(yyv186, pkg2_api.OwnerReference{}) // var yyz186 pkg2_api.OwnerReference - yyc186 = true - } - yyh186.ElemContainerState(yyj186) - if yyj186 < len(yyv186) { - if r.TryDecodeAsNil() { - yyv186[yyj186] = pkg2_api.OwnerReference{} - } else { - yyv189 := &yyv186[yyj186] - yyv189.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj186 < len(yyv186) { - yyv186 = yyv186[:yyj186] - yyc186 = true - } else if yyj186 == 0 && yyv186 == nil { - yyv186 = []pkg2_api.OwnerReference{} - yyc186 = true - } - } - yyh186.End() - if yyc186 { - *v = yyv186 - } -} - -func (x codecSelfer1234) encMapstringExtraValue(v map[string]ExtraValue, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk190, yyv190 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym191 := z.EncBinary() - _ = yym191 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk190)) - } - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyv190 == nil { - r.EncodeNil() - } else { - yyv190.CodecEncodeSelf(e) - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decMapstringExtraValue(v *map[string]ExtraValue, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv192 := *v - yyl192 := r.ReadMapStart() - yybh192 := z.DecBasicHandle() - if yyv192 == nil { - yyrl192, _ := z.DecInferLen(yyl192, yybh192.MaxInitLen, 40) - yyv192 = make(map[string]ExtraValue, yyrl192) - *v = yyv192 - } - var yymk192 string - var yymv192 ExtraValue - var yymg192 bool - if yybh192.MapValueReset { - yymg192 = true - } - if yyl192 > 0 { - for yyj192 := 0; yyj192 < yyl192; yyj192++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk192 = "" - } else { - yymk192 = string(r.DecodeString()) - } - - if yymg192 { - yymv192 = yyv192[yymk192] - } else { - yymv192 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv192 = nil - } else { - yyv194 := &yymv192 - yyv194.CodecDecodeSelf(d) - } - - if yyv192 != nil { - yyv192[yymk192] = yymv192 - } - } - } else if yyl192 < 0 { - for yyj192 := 0; !r.CheckBreak(); yyj192++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk192 = "" - } else { - yymk192 = string(r.DecodeString()) - } - - if yymg192 { - yymv192 = yyv192[yymk192] - } else { - yymv192 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv192 = nil - } else { - yyv196 := &yymv192 - yyv196.CodecDecodeSelf(d) - } - - if yyv192 != nil { - yyv192[yymk192] = yymv192 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encExtraValue(v ExtraValue, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv197 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym198 := z.EncBinary() - _ = yym198 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyv197)) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv199 := *v - yyh199, yyl199 := z.DecSliceHelperStart() - var yyc199 bool - if yyl199 == 0 { - if yyv199 == nil { - yyv199 = []string{} - yyc199 = true - } else if len(yyv199) != 0 { - yyv199 = yyv199[:0] - yyc199 = true - } - } else if yyl199 > 0 { - var yyrr199, yyrl199 int - var yyrt199 bool - if yyl199 > cap(yyv199) { - - yyrl199, yyrt199 = z.DecInferLen(yyl199, z.DecBasicHandle().MaxInitLen, 16) - if yyrt199 { - if yyrl199 <= cap(yyv199) { - yyv199 = yyv199[:yyrl199] - } else { - yyv199 = make([]string, yyrl199) - } - } else { - yyv199 = make([]string, yyrl199) - } - yyc199 = true - yyrr199 = len(yyv199) - } else if yyl199 != len(yyv199) { - yyv199 = yyv199[:yyl199] - yyc199 = true - } - yyj199 := 0 - for ; yyj199 < yyrr199; yyj199++ { - yyh199.ElemContainerState(yyj199) - if r.TryDecodeAsNil() { - yyv199[yyj199] = "" - } else { - yyv199[yyj199] = string(r.DecodeString()) - } - - } - if yyrt199 { - for ; yyj199 < yyl199; yyj199++ { - yyv199 = append(yyv199, "") - yyh199.ElemContainerState(yyj199) - if r.TryDecodeAsNil() { - yyv199[yyj199] = "" - } else { - yyv199[yyj199] = string(r.DecodeString()) - } - - } - } - - } else { - yyj199 := 0 - for ; !r.CheckBreak(); yyj199++ { - - if yyj199 >= len(yyv199) { - yyv199 = append(yyv199, "") // var yyz199 string - yyc199 = true - } - yyh199.ElemContainerState(yyj199) - if yyj199 < len(yyv199) { - if r.TryDecodeAsNil() { - yyv199[yyj199] = "" - } else { - yyv199[yyj199] = string(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj199 < len(yyv199) { - yyv199 = yyv199[:yyj199] - yyc199 = true - } else if yyj199 == 0 && yyv199 == nil { - yyv199 = []string{} - yyc199 = true - } - } - yyh199.End() - if yyc199 { - *v = yyv199 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/types.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/types.go index 16d7e8c2ae..9c1e66b7bb 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/types.go @@ -17,8 +17,7 @@ limitations under the License. package authentication import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( @@ -42,10 +41,10 @@ const ( // TokenReview attempts to authenticate a token to a known user. type TokenReview struct { - unversioned.TypeMeta - // ObjectMeta fulfills the meta.ObjectMetaAccessor interface so that the stock + metav1.TypeMeta + // ObjectMeta fulfills the metav1.ObjectMetaAccessor interface so that the stock // REST handler paths work - api.ObjectMeta + metav1.ObjectMeta // Spec holds information about the request being evaluated Spec TokenReviewSpec diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/BUILD new file mode 100644 index 0000000000..387b663e3f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/BUILD @@ -0,0 +1,47 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "conversion.go", + "defaults.go", + "doc.go", + "generated.pb.go", + "register.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.conversion.go", + "zz_generated.deepcopy.go", + "zz_generated.defaults.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/authentication:go_default_library", + "//vendor:github.com/gogo/protobuf/proto", + "//vendor:github.com/gogo/protobuf/sortkeys", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/conversion.go new file mode 100644 index 0000000000..2ff5732d6d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/conversion.go @@ -0,0 +1,26 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + return scheme.AddConversionFuncs() +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/defaults.go new file mode 100644 index 0000000000..d63d917543 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/defaults.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return scheme.AddDefaultingFuncs() +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/doc.go new file mode 100644 index 0000000000..0c6ce0ea4d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/authentication +// +groupName=authentication.k8s.io +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta +package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/generated.pb.go new file mode 100644 index 0000000000..6366c71ae9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/generated.pb.go @@ -0,0 +1,1281 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/authentication/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/authentication/v1/generated.proto + + It has these top-level messages: + ExtraValue + TokenReview + TokenReviewSpec + TokenReviewStatus + UserInfo +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func init() { + proto.RegisterType((*ExtraValue)(nil), "k8s.io.kubernetes.pkg.apis.authentication.v1.ExtraValue") + proto.RegisterType((*TokenReview)(nil), "k8s.io.kubernetes.pkg.apis.authentication.v1.TokenReview") + proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.kubernetes.pkg.apis.authentication.v1.TokenReviewSpec") + proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.kubernetes.pkg.apis.authentication.v1.TokenReviewStatus") + proto.RegisterType((*UserInfo)(nil), "k8s.io.kubernetes.pkg.apis.authentication.v1.UserInfo") +} +func (m ExtraValue) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m ExtraValue) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *TokenReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TokenReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *TokenReviewSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TokenReviewSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Token))) + i += copy(data[i:], m.Token) + return i, nil +} + +func (m *TokenReviewStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TokenReviewStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Authenticated { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.User.Size())) + n4, err := m.User.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Error))) + i += copy(data[i:], m.Error) + return i, nil +} + +func (m *UserInfo) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *UserInfo) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Username))) + i += copy(data[i:], m.Username) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.UID))) + i += copy(data[i:], m.UID) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Extra) > 0 { + for k := range m.Extra { + data[i] = 0x22 + i++ + v := m.Extra[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n5, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m ExtraValue) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TokenReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenReviewSpec) Size() (n int) { + var l int + _ = l + l = len(m.Token) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenReviewStatus) Size() (n int) { + var l int + _ = l + n += 2 + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *UserInfo) Size() (n int) { + var l int + _ = l + l = len(m.Username) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *TokenReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReviewSpec{`, + `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReviewStatus{`, + `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, + `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *UserInfo) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&UserInfo{`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExtraValue) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReviewSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReviewStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Authenticated", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Authenticated = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.User.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserInfo) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 655 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x53, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0xb5, 0xf3, 0x53, 0x25, 0x93, 0xaf, 0x1f, 0x65, 0x24, 0xa4, 0x28, 0x12, 0x4e, 0x14, 0x58, + 0x74, 0x51, 0xc6, 0xa4, 0xa0, 0x52, 0x15, 0x10, 0xaa, 0x45, 0x85, 0xba, 0x00, 0xa4, 0x81, 0x22, + 0xc4, 0x06, 0x26, 0xce, 0xad, 0x63, 0x52, 0xff, 0x68, 0x3c, 0x36, 0xed, 0xae, 0x8f, 0xc0, 0x92, + 0x25, 0xaf, 0xc1, 0x1b, 0x74, 0x47, 0x77, 0xb0, 0x40, 0x15, 0x0d, 0x2f, 0x82, 0x66, 0x3c, 0xd4, + 0x2e, 0x69, 0x85, 0xda, 0xdd, 0xcc, 0x99, 0x7b, 0xce, 0xbd, 0xe7, 0xde, 0xb9, 0xe8, 0xc1, 0x64, + 0x35, 0x21, 0x7e, 0x64, 0x4f, 0xd2, 0x21, 0xf0, 0x10, 0x04, 0x24, 0x76, 0x3c, 0xf1, 0x6c, 0x16, + 0xfb, 0x89, 0xcd, 0x52, 0x31, 0x86, 0x50, 0xf8, 0x2e, 0x13, 0x7e, 0x14, 0xda, 0xd9, 0xc0, 0xf6, + 0x20, 0x04, 0xce, 0x04, 0x8c, 0x48, 0xcc, 0x23, 0x11, 0xe1, 0xa5, 0x9c, 0x4d, 0x0a, 0x36, 0x89, + 0x27, 0x1e, 0x91, 0x6c, 0x72, 0x9a, 0x4d, 0xb2, 0x41, 0xe7, 0x96, 0xe7, 0x8b, 0x71, 0x3a, 0x24, + 0x6e, 0x14, 0xd8, 0x5e, 0xe4, 0x45, 0xb6, 0x12, 0x19, 0xa6, 0xdb, 0xea, 0xa6, 0x2e, 0xea, 0x94, + 0x8b, 0x77, 0xee, 0xea, 0xd2, 0x58, 0xec, 0x07, 0xcc, 0x1d, 0xfb, 0x21, 0xf0, 0xbd, 0xa2, 0xb8, + 0x00, 0x04, 0x3b, 0xa3, 0xa4, 0x8e, 0x7d, 0x1e, 0x8b, 0xa7, 0xa1, 0xf0, 0x03, 0x98, 0x21, 0xac, + 0xfc, 0x8b, 0x90, 0xb8, 0x63, 0x08, 0xd8, 0x0c, 0xef, 0xce, 0x79, 0xbc, 0x54, 0xf8, 0x3b, 0xb6, + 0x1f, 0x8a, 0x44, 0xf0, 0x19, 0x52, 0xc9, 0x53, 0x02, 0x3c, 0x03, 0x5e, 0x18, 0x82, 0x5d, 0x16, + 0xc4, 0x3b, 0x70, 0x86, 0xa7, 0xfe, 0x3d, 0x84, 0x36, 0x76, 0x05, 0x67, 0xaf, 0xd8, 0x4e, 0x0a, + 0xb8, 0x8b, 0xea, 0xbe, 0x80, 0x20, 0x69, 0x9b, 0xbd, 0xea, 0x62, 0xd3, 0x69, 0x4e, 0x8f, 0xba, + 0xf5, 0x4d, 0x09, 0xd0, 0x1c, 0x5f, 0x6b, 0x7c, 0xfa, 0xdc, 0x35, 0xf6, 0x7f, 0xf4, 0x8c, 0xfe, + 0x97, 0x0a, 0x6a, 0xbd, 0x8c, 0x26, 0x10, 0x52, 0xc8, 0x7c, 0xf8, 0x80, 0xdf, 0xa1, 0x86, 0xec, + 0xdb, 0x88, 0x09, 0xd6, 0x36, 0x7b, 0xe6, 0x62, 0x6b, 0xf9, 0x36, 0xd1, 0x23, 0x2c, 0xdb, 0x28, + 0x86, 0x28, 0xa3, 0x49, 0x36, 0x20, 0xcf, 0x87, 0xef, 0xc1, 0x15, 0x4f, 0x41, 0x30, 0x07, 0x1f, + 0x1c, 0x75, 0x8d, 0xe9, 0x51, 0x17, 0x15, 0x18, 0x3d, 0x51, 0xc5, 0x6f, 0x51, 0x2d, 0x89, 0xc1, + 0x6d, 0x57, 0x94, 0xfa, 0x43, 0x72, 0x91, 0x0f, 0x42, 0x4a, 0xa5, 0xbe, 0x88, 0xc1, 0x75, 0xfe, + 0xd3, 0xa9, 0x6a, 0xf2, 0x46, 0x95, 0x30, 0xf6, 0xd0, 0x5c, 0x22, 0x98, 0x48, 0x93, 0x76, 0x55, + 0xa5, 0x78, 0x74, 0xf9, 0x14, 0x4a, 0xc6, 0xf9, 0x5f, 0x27, 0x99, 0xcb, 0xef, 0x54, 0xcb, 0xf7, + 0x57, 0xd0, 0x95, 0xbf, 0xea, 0xc1, 0x37, 0x50, 0x5d, 0x48, 0x48, 0xf5, 0xae, 0xe9, 0xcc, 0x6b, + 0x66, 0x3d, 0x8f, 0xcb, 0xdf, 0xfa, 0x5f, 0x4d, 0x74, 0x75, 0x26, 0x0b, 0xbe, 0x8f, 0xe6, 0x4b, + 0xc5, 0xc0, 0x48, 0x49, 0x34, 0x9c, 0x6b, 0x5a, 0x62, 0x7e, 0xbd, 0xfc, 0x48, 0x4f, 0xc7, 0xe2, + 0xd7, 0xa8, 0x96, 0x26, 0xc0, 0x75, 0x53, 0x57, 0x2e, 0xe6, 0x78, 0x2b, 0x01, 0xbe, 0x19, 0x6e, + 0x47, 0x45, 0x37, 0x25, 0x42, 0x95, 0xa2, 0x74, 0x04, 0x9c, 0x47, 0x5c, 0x35, 0xb3, 0xe4, 0x68, + 0x43, 0x82, 0x34, 0x7f, 0xeb, 0x7f, 0xab, 0xa0, 0xc6, 0x1f, 0x15, 0xbc, 0x84, 0x1a, 0x92, 0x19, + 0xb2, 0x00, 0x74, 0x1b, 0x16, 0x34, 0x49, 0xc5, 0x48, 0x9c, 0x9e, 0x44, 0xe0, 0xeb, 0xa8, 0x9a, + 0xfa, 0x23, 0x55, 0x78, 0xd3, 0x69, 0xe9, 0xc0, 0xea, 0xd6, 0xe6, 0x63, 0x2a, 0x71, 0xdc, 0x47, + 0x73, 0x1e, 0x8f, 0xd2, 0x58, 0x0e, 0x53, 0xfe, 0x65, 0x24, 0xe7, 0xf0, 0x44, 0x21, 0x54, 0xbf, + 0xe0, 0x6d, 0x54, 0x07, 0xf9, 0xf9, 0xdb, 0xb5, 0x5e, 0x75, 0xb1, 0xb5, 0xbc, 0x7e, 0x39, 0xf7, + 0x44, 0x2d, 0xd0, 0x46, 0x28, 0xf8, 0x5e, 0xc9, 0xa5, 0xc4, 0x68, 0x2e, 0xdf, 0xe1, 0x7a, 0xc9, + 0x54, 0x0c, 0x5e, 0x40, 0xd5, 0x09, 0xec, 0xe5, 0x0e, 0xa9, 0x3c, 0xe2, 0x67, 0xa8, 0x9e, 0xc9, + 0xfd, 0xd3, 0x53, 0x58, 0xbd, 0x58, 0x1d, 0xc5, 0xfe, 0xd2, 0x5c, 0x66, 0xad, 0xb2, 0x6a, 0x3a, + 0x37, 0x0f, 0x8e, 0x2d, 0xe3, 0xf0, 0xd8, 0x32, 0xbe, 0x1f, 0x5b, 0xc6, 0xfe, 0xd4, 0x32, 0x0f, + 0xa6, 0x96, 0x79, 0x38, 0xb5, 0xcc, 0x9f, 0x53, 0xcb, 0xfc, 0xf8, 0xcb, 0x32, 0xde, 0x54, 0xb2, + 0xc1, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbb, 0x6b, 0x11, 0x20, 0xa4, 0x05, 0x00, 0x00, +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/generated.proto new file mode 100644 index 0000000000..ea5203d37b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/generated.proto @@ -0,0 +1,100 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.authentication.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// TokenReview attempts to authenticate a token to a known user. +// Note: TokenReview requests may be cached by the webhook token authenticator +// plugin in the kube-apiserver. +message TokenReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated + optional TokenReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request can be authenticated. + // +optional + optional TokenReviewStatus status = 3; +} + +// TokenReviewSpec is a description of the token authentication request. +message TokenReviewSpec { + // Token is the opaque bearer token. + // +optional + optional string token = 1; +} + +// TokenReviewStatus is the result of the token authentication request. +message TokenReviewStatus { + // Authenticated indicates that the token was associated with a known user. + // +optional + optional bool authenticated = 1; + + // User is the UserInfo associated with the provided token. + // +optional + optional UserInfo user = 2; + + // Error indicates that the token couldn't be checked + // +optional + optional string error = 3; +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +message UserInfo { + // The name that uniquely identifies this user among all active users. + // +optional + optional string username = 1; + + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + // +optional + optional string uid = 2; + + // The names of groups this user is a part of. + // +optional + repeated string groups = 3; + + // Any additional information provided by the authenticator. + // +optional + map extra = 4; +} + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/register.go new file mode 100644 index 0000000000..8661169af4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/register.go @@ -0,0 +1,48 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &TokenReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/types.go new file mode 100644 index 0000000000..e6ff587056 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/types.go @@ -0,0 +1,91 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// TokenReview attempts to authenticate a token to a known user. +// Note: TokenReview requests may be cached by the webhook token authenticator +// plugin in the kube-apiserver. +type TokenReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated + Spec TokenReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request can be authenticated. + // +optional + Status TokenReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// TokenReviewSpec is a description of the token authentication request. +type TokenReviewSpec struct { + // Token is the opaque bearer token. + // +optional + Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` +} + +// TokenReviewStatus is the result of the token authentication request. +type TokenReviewStatus struct { + // Authenticated indicates that the token was associated with a known user. + // +optional + Authenticated bool `json:"authenticated,omitempty" protobuf:"varint,1,opt,name=authenticated"` + // User is the UserInfo associated with the provided token. + // +optional + User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // Error indicates that the token couldn't be checked + // +optional + Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +type UserInfo struct { + // The name that uniquely identifies this user among all active users. + // +optional + Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"` + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + // +optional + UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` + // The names of groups this user is a part of. + // +optional + Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` + // Any additional information provided by the authenticator. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..bb235e4eaa --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/types_swagger_doc_generated.go @@ -0,0 +1,72 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_TokenReview = map[string]string{ + "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the request can be authenticated.", +} + +func (TokenReview) SwaggerDoc() map[string]string { + return map_TokenReview +} + +var map_TokenReviewSpec = map[string]string{ + "": "TokenReviewSpec is a description of the token authentication request.", + "token": "Token is the opaque bearer token.", +} + +func (TokenReviewSpec) SwaggerDoc() map[string]string { + return map_TokenReviewSpec +} + +var map_TokenReviewStatus = map[string]string{ + "": "TokenReviewStatus is the result of the token authentication request.", + "authenticated": "Authenticated indicates that the token was associated with a known user.", + "user": "User is the UserInfo associated with the provided token.", + "error": "Error indicates that the token couldn't be checked", +} + +func (TokenReviewStatus) SwaggerDoc() map[string]string { + return map_TokenReviewStatus +} + +var map_UserInfo = map[string]string{ + "": "UserInfo holds the information about the user needed to implement the user.Info interface.", + "username": "The name that uniquely identifies this user among all active users.", + "uid": "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.", + "groups": "The names of groups this user is a part of.", + "extra": "Any additional information provided by the authenticator.", +} + +func (UserInfo) SwaggerDoc() map[string]string { + return map_UserInfo +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.conversion.go new file mode 100644 index 0000000000..2b10a91b81 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.conversion.go @@ -0,0 +1,145 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + authentication "k8s.io/kubernetes/pkg/apis/authentication" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_TokenReview_To_authentication_TokenReview, + Convert_authentication_TokenReview_To_v1_TokenReview, + Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec, + Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec, + Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus, + Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus, + Convert_v1_UserInfo_To_authentication_UserInfo, + Convert_authentication_UserInfo_To_v1_UserInfo, + ) +} + +func autoConvert_v1_TokenReview_To_authentication_TokenReview(in *TokenReview, out *authentication.TokenReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_TokenReview_To_authentication_TokenReview(in *TokenReview, out *authentication.TokenReview, s conversion.Scope) error { + return autoConvert_v1_TokenReview_To_authentication_TokenReview(in, out, s) +} + +func autoConvert_authentication_TokenReview_To_v1_TokenReview(in *authentication.TokenReview, out *TokenReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authentication_TokenReview_To_v1_TokenReview(in *authentication.TokenReview, out *TokenReview, s conversion.Scope) error { + return autoConvert_authentication_TokenReview_To_v1_TokenReview(in, out, s) +} + +func autoConvert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { + out.Token = in.Token + return nil +} + +func Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { + return autoConvert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in, out, s) +} + +func autoConvert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { + out.Token = in.Token + return nil +} + +func Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { + return autoConvert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in, out, s) +} + +func autoConvert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error { + out.Authenticated = in.Authenticated + if err := Convert_v1_UserInfo_To_authentication_UserInfo(&in.User, &out.User, s); err != nil { + return err + } + out.Error = in.Error + return nil +} + +func Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error { + return autoConvert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in, out, s) +} + +func autoConvert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { + out.Authenticated = in.Authenticated + if err := Convert_authentication_UserInfo_To_v1_UserInfo(&in.User, &out.User, s); err != nil { + return err + } + out.Error = in.Error + return nil +} + +func Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { + return autoConvert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in, out, s) +} + +func autoConvert_v1_UserInfo_To_authentication_UserInfo(in *UserInfo, out *authentication.UserInfo, s conversion.Scope) error { + out.Username = in.Username + out.UID = in.UID + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]authentication.ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_v1_UserInfo_To_authentication_UserInfo(in *UserInfo, out *authentication.UserInfo, s conversion.Scope) error { + return autoConvert_v1_UserInfo_To_authentication_UserInfo(in, out, s) +} + +func autoConvert_authentication_UserInfo_To_v1_UserInfo(in *authentication.UserInfo, out *UserInfo, s conversion.Scope) error { + out.Username = in.Username + out.UID = in.UID + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_authentication_UserInfo_To_v1_UserInfo(in *authentication.UserInfo, out *UserInfo, s conversion.Scope) error { + return autoConvert_authentication_UserInfo_To_v1_UserInfo(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..0bc5640671 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.deepcopy.go @@ -0,0 +1,106 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TokenReview, InType: reflect.TypeOf(&TokenReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TokenReviewSpec, InType: reflect.TypeOf(&TokenReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TokenReviewStatus, InType: reflect.TypeOf(&TokenReviewStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_UserInfo, InType: reflect.TypeOf(&UserInfo{})}, + ) +} + +func DeepCopy_v1_TokenReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReview) + out := out.(*TokenReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_TokenReviewStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_TokenReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReviewSpec) + out := out.(*TokenReviewSpec) + *out = *in + return nil + } +} + +func DeepCopy_v1_TokenReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReviewStatus) + out := out.(*TokenReviewStatus) + *out = *in + if err := DeepCopy_v1_UserInfo(&in.User, &out.User, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_UserInfo(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*UserInfo) + out := out.(*UserInfo) + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } + } + return nil + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.defaults.go new file mode 100644 index 0000000000..6df448eb9f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/BUILD index c47917d64b..d853556943 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -23,17 +20,31 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.conversion.go", "zz_generated.deepcopy.go", + "zz_generated.defaults.go", ], tags = ["automanaged"], deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/apis/authentication:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", "//vendor:github.com/gogo/protobuf/proto", "//vendor:github.com/gogo/protobuf/sortkeys", "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/types", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/conversion.go index a0b7051c97..51f3adfc71 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/conversion.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) func addConversionFuncs(scheme *runtime.Scheme) error { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/defaults.go index bcd5e07d9c..1a4566479f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/defaults.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.pb.go index b210d3b4e2..808cd0a289 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.pb.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -390,7 +390,7 @@ func (this *TokenReview) String() string { return "nil" } s := strings.Join([]string{`&TokenReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1236,46 +1236,47 @@ var ( ) var fileDescriptorGenerated = []byte{ - // 642 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x93, 0xcf, 0x6e, 0x13, 0x31, - 0x10, 0xc6, 0xb3, 0xf9, 0x53, 0x12, 0x87, 0x42, 0xb1, 0x84, 0x14, 0x45, 0x62, 0x13, 0x85, 0x4b, - 0x90, 0x5a, 0xaf, 0x52, 0x09, 0xa8, 0x5a, 0x71, 0xe8, 0xaa, 0x05, 0xf5, 0x80, 0x90, 0x5c, 0x8a, - 0x10, 0x12, 0x07, 0x27, 0x99, 0x6e, 0x97, 0x6d, 0xd6, 0x2b, 0xaf, 0x9d, 0xd2, 0x5b, 0x1f, 0x81, - 0x23, 0x47, 0xde, 0x83, 0x17, 0xe8, 0xb1, 0x07, 0x0e, 0x1c, 0x50, 0x45, 0xc2, 0x8b, 0x20, 0x7b, - 0x4d, 0x93, 0x36, 0xcd, 0x81, 0xf6, 0xb6, 0xfe, 0x3c, 0xf3, 0x9b, 0x6f, 0xc6, 0x3b, 0x68, 0x33, - 0x5a, 0x4b, 0x49, 0xc8, 0xbd, 0x48, 0x75, 0x41, 0xc4, 0x20, 0x21, 0xf5, 0x92, 0x28, 0xf0, 0x58, - 0x12, 0xa6, 0x1e, 0x53, 0xf2, 0x00, 0x62, 0x19, 0xf6, 0x98, 0x0c, 0x79, 0xec, 0x0d, 0x3b, 0x5d, - 0x90, 0xac, 0xe3, 0x05, 0x10, 0x83, 0x60, 0x12, 0xfa, 0x24, 0x11, 0x5c, 0x72, 0xdc, 0xc9, 0x10, - 0x64, 0x82, 0x20, 0x49, 0x14, 0x10, 0x8d, 0x20, 0x97, 0x11, 0xc4, 0x22, 0xea, 0x2b, 0x41, 0x28, - 0x0f, 0x54, 0x97, 0xf4, 0xf8, 0xc0, 0x0b, 0x78, 0xc0, 0x3d, 0x43, 0xea, 0xaa, 0x7d, 0x73, 0x32, - 0x07, 0xf3, 0x95, 0x55, 0xa8, 0xaf, 0xce, 0x35, 0xe9, 0x09, 0x48, 0xb9, 0x12, 0x3d, 0xb8, 0xea, - 0xaa, 0xfe, 0x74, 0x7e, 0x8e, 0x8a, 0x87, 0x20, 0xd2, 0x90, 0xc7, 0xd0, 0x9f, 0x49, 0x5b, 0x9e, - 0x9f, 0x36, 0x9c, 0x69, 0xbd, 0xbe, 0x72, 0x7d, 0xb4, 0x50, 0xb1, 0x0c, 0x07, 0xb3, 0x9e, 0x3a, - 0xd7, 0x87, 0x2b, 0x19, 0x1e, 0x7a, 0x61, 0x2c, 0x53, 0x29, 0xae, 0xa6, 0xb4, 0x9e, 0x23, 0xb4, - 0xfd, 0x59, 0x0a, 0xf6, 0x8e, 0x1d, 0x2a, 0xc0, 0x0d, 0x54, 0x0a, 0x25, 0x0c, 0xd2, 0x9a, 0xd3, - 0x2c, 0xb4, 0x2b, 0x7e, 0x65, 0x7c, 0xde, 0x28, 0xed, 0x68, 0x81, 0x66, 0xfa, 0x7a, 0xf9, 0xeb, - 0xb7, 0x46, 0xee, 0xe4, 0x57, 0x33, 0xd7, 0xfa, 0x9e, 0x47, 0xd5, 0xb7, 0x3c, 0x82, 0x98, 0xc2, - 0x30, 0x84, 0x23, 0xfc, 0x1e, 0x95, 0x07, 0x20, 0x59, 0x9f, 0x49, 0x56, 0x73, 0x9a, 0x4e, 0xbb, - 0xba, 0xda, 0x26, 0x73, 0x1f, 0x8e, 0x0c, 0x3b, 0xe4, 0x4d, 0xf7, 0x13, 0xf4, 0xe4, 0x6b, 0x90, - 0xcc, 0xc7, 0xa7, 0xe7, 0x8d, 0xdc, 0xf8, 0xbc, 0x81, 0x26, 0x1a, 0xbd, 0xa0, 0xe1, 0x3e, 0x2a, - 0xa6, 0x09, 0xf4, 0x6a, 0x79, 0x43, 0xf5, 0xc9, 0x7f, 0xff, 0x0e, 0x64, 0xca, 0xe7, 0x6e, 0x02, - 0x3d, 0xff, 0xae, 0xad, 0x57, 0xd4, 0x27, 0x6a, 0xe8, 0xf8, 0x10, 0x2d, 0xa4, 0x92, 0x49, 0x95, - 0xd6, 0x0a, 0xa6, 0xce, 0xd6, 0x2d, 0xeb, 0x18, 0x96, 0x7f, 0xcf, 0x56, 0x5a, 0xc8, 0xce, 0xd4, - 0xd6, 0x68, 0x3d, 0x43, 0xf7, 0xaf, 0x98, 0xc2, 0x8f, 0x51, 0x49, 0x6a, 0xc9, 0x4c, 0xaf, 0xe2, - 0x2f, 0xda, 0xcc, 0x52, 0x16, 0x97, 0xdd, 0xb5, 0x7e, 0x38, 0xe8, 0xc1, 0x4c, 0x15, 0xbc, 0x81, - 0x16, 0xa7, 0x1c, 0x41, 0xdf, 0x20, 0xca, 0xfe, 0x43, 0x8b, 0x58, 0xdc, 0x9c, 0xbe, 0xa4, 0x97, - 0x63, 0xf1, 0x47, 0x54, 0x54, 0x29, 0x08, 0x3b, 0xde, 0x8d, 0x1b, 0xb4, 0xbd, 0x97, 0x82, 0xd8, - 0x89, 0xf7, 0xf9, 0x64, 0xae, 0x5a, 0xa1, 0x06, 0xab, 0xdb, 0x02, 0x21, 0xb8, 0x30, 0x63, 0x9d, - 0x6a, 0x6b, 0x5b, 0x8b, 0x34, 0xbb, 0x6b, 0x8d, 0xf2, 0xa8, 0xfc, 0x8f, 0x82, 0x97, 0x51, 0x59, - 0x67, 0xc6, 0x6c, 0x00, 0x76, 0x16, 0x4b, 0x36, 0xc9, 0xc4, 0x68, 0x9d, 0x5e, 0x44, 0xe0, 0x47, - 0xa8, 0xa0, 0xc2, 0xbe, 0x71, 0x5f, 0xf1, 0xab, 0x36, 0xb0, 0xb0, 0xb7, 0xb3, 0x45, 0xb5, 0x8e, - 0x5b, 0x68, 0x21, 0x10, 0x5c, 0x25, 0xfa, 0x59, 0xf5, 0x2f, 0x8d, 0xf4, 0x63, 0xbc, 0x32, 0x0a, - 0xb5, 0x37, 0x38, 0x42, 0x25, 0xd0, 0x3b, 0x50, 0x2b, 0x36, 0x0b, 0xed, 0xea, 0xea, 0xcb, 0x5b, - 0x8c, 0x80, 0x98, 0x65, 0xda, 0x8e, 0xa5, 0x38, 0x9e, 0x6a, 0x55, 0x6b, 0x34, 0xab, 0x51, 0x3f, - 0xb2, 0x0b, 0x67, 0x62, 0xf0, 0x12, 0x2a, 0x44, 0x70, 0x9c, 0xb5, 0x49, 0xf5, 0x27, 0xde, 0x45, - 0xa5, 0xa1, 0xde, 0x45, 0xfb, 0x1e, 0x2f, 0x6e, 0x60, 0x66, 0xb2, 0xd0, 0x34, 0x63, 0xad, 0xe7, - 0xd7, 0x1c, 0xff, 0xc9, 0xe9, 0xc8, 0xcd, 0x9d, 0x8d, 0xdc, 0xdc, 0xcf, 0x91, 0x9b, 0x3b, 0x19, - 0xbb, 0xce, 0xe9, 0xd8, 0x75, 0xce, 0xc6, 0xae, 0xf3, 0x7b, 0xec, 0x3a, 0x5f, 0xfe, 0xb8, 0xb9, - 0x0f, 0x77, 0x2c, 0xe0, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xca, 0xd0, 0x35, 0x8c, 0xb5, 0x05, - 0x00, 0x00, + // 668 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x4a, + 0x14, 0x8d, 0xf3, 0xd1, 0x97, 0x4c, 0x5e, 0xdf, 0xeb, 0x1b, 0xe9, 0x49, 0x51, 0x24, 0x9c, 0x28, + 0x6c, 0x8a, 0x54, 0xc6, 0xa4, 0xa0, 0x52, 0xb5, 0x62, 0x51, 0xab, 0x05, 0x75, 0x81, 0x90, 0xa6, + 0x94, 0x05, 0x12, 0x12, 0x13, 0xe7, 0xd6, 0x31, 0x8e, 0x3f, 0x34, 0x1e, 0xa7, 0xed, 0xae, 0x3f, + 0x81, 0x25, 0x4b, 0xfe, 0x0b, 0x9b, 0x2e, 0xbb, 0x60, 0xc1, 0x02, 0x55, 0x24, 0xfc, 0x11, 0x34, + 0xe3, 0xa1, 0x76, 0x49, 0x2b, 0x44, 0xbb, 0xf3, 0x9c, 0x7b, 0xcf, 0xb9, 0xf7, 0xdc, 0xeb, 0x8b, + 0xb6, 0xfc, 0xf5, 0x84, 0x78, 0x91, 0xe5, 0xa7, 0x03, 0xe0, 0x21, 0x08, 0x48, 0xac, 0xd8, 0x77, + 0x2d, 0x16, 0x7b, 0x89, 0xc5, 0x52, 0x31, 0x82, 0x50, 0x78, 0x0e, 0x13, 0x5e, 0x14, 0x5a, 0x93, + 0xfe, 0x00, 0x04, 0xeb, 0x5b, 0x2e, 0x84, 0xc0, 0x99, 0x80, 0x21, 0x89, 0x79, 0x24, 0x22, 0xdc, + 0xcf, 0x24, 0x48, 0x2e, 0x41, 0x62, 0xdf, 0x25, 0x52, 0x82, 0x5c, 0x96, 0x20, 0x5a, 0xa2, 0x7d, + 0xdf, 0xf5, 0xc4, 0x28, 0x1d, 0x10, 0x27, 0x0a, 0x2c, 0x37, 0x72, 0x23, 0x4b, 0x29, 0x0d, 0xd2, + 0x03, 0xf5, 0x52, 0x0f, 0xf5, 0x95, 0x55, 0x68, 0x3f, 0xd2, 0x4d, 0xb2, 0xd8, 0x0b, 0x98, 0x33, + 0xf2, 0x42, 0xe0, 0xc7, 0x79, 0x9b, 0x01, 0x08, 0x66, 0x4d, 0xe6, 0xfa, 0x6a, 0x5b, 0xd7, 0xb1, + 0x78, 0x1a, 0x0a, 0x2f, 0x80, 0x39, 0xc2, 0xda, 0xef, 0x08, 0x89, 0x33, 0x82, 0x80, 0xcd, 0xf1, + 0x1e, 0x5e, 0xc7, 0x4b, 0x85, 0x37, 0xb6, 0xbc, 0x50, 0x24, 0x82, 0xcf, 0x91, 0x0a, 0x9e, 0x12, + 0xe0, 0x13, 0xe0, 0xb9, 0x21, 0x38, 0x62, 0x41, 0x3c, 0x86, 0xab, 0x3c, 0xad, 0x5c, 0xbb, 0xae, + 0x2b, 0xb2, 0x7b, 0x8f, 0x11, 0xda, 0x39, 0x12, 0x9c, 0xbd, 0x62, 0xe3, 0x14, 0x70, 0x07, 0xd5, + 0x3c, 0x01, 0x41, 0xd2, 0x32, 0xba, 0x95, 0xe5, 0x86, 0xdd, 0x98, 0x9d, 0x77, 0x6a, 0xbb, 0x12, + 0xa0, 0x19, 0xbe, 0x51, 0xff, 0xf0, 0xb1, 0x53, 0x3a, 0xf9, 0xda, 0x2d, 0xf5, 0x3e, 0x95, 0x51, + 0xf3, 0x65, 0xe4, 0x43, 0x48, 0x61, 0xe2, 0xc1, 0x21, 0x7e, 0x8b, 0xea, 0x72, 0xca, 0x43, 0x26, + 0x58, 0xcb, 0xe8, 0x1a, 0xcb, 0xcd, 0xd5, 0x07, 0x44, 0x6f, 0xbd, 0x68, 0x3a, 0xdf, 0xbb, 0xcc, + 0x26, 0x93, 0x3e, 0x79, 0x31, 0x78, 0x07, 0x8e, 0x78, 0x0e, 0x82, 0xd9, 0xf8, 0xf4, 0xbc, 0x53, + 0x9a, 0x9d, 0x77, 0x50, 0x8e, 0xd1, 0x0b, 0x55, 0x3c, 0x44, 0xd5, 0x24, 0x06, 0xa7, 0x55, 0x56, + 0xea, 0x36, 0xf9, 0xe3, 0x7f, 0x8a, 0x14, 0xfa, 0xdd, 0x8b, 0xc1, 0xb1, 0xff, 0xd6, 0xf5, 0xaa, + 0xf2, 0x45, 0x95, 0x3a, 0x1e, 0xa3, 0x85, 0x44, 0x30, 0x91, 0x26, 0xad, 0x8a, 0xaa, 0xb3, 0x7d, + 0xcb, 0x3a, 0x4a, 0xcb, 0xfe, 0x47, 0x57, 0x5a, 0xc8, 0xde, 0x54, 0xd7, 0xe8, 0xad, 0xa1, 0x7f, + 0x7f, 0x69, 0x0a, 0xdf, 0x45, 0x35, 0x21, 0x21, 0x35, 0xc5, 0x86, 0xbd, 0xa8, 0x99, 0xb5, 0x2c, + 0x2f, 0x8b, 0xf5, 0x3e, 0x1b, 0xe8, 0xbf, 0xb9, 0x2a, 0x78, 0x13, 0x2d, 0x16, 0x3a, 0x82, 0xa1, + 0x92, 0xa8, 0xdb, 0xff, 0x6b, 0x89, 0xc5, 0xad, 0x62, 0x90, 0x5e, 0xce, 0xc5, 0x6f, 0x50, 0x35, + 0x4d, 0x80, 0xeb, 0xf1, 0x6e, 0xde, 0xc0, 0xf6, 0x7e, 0x02, 0x7c, 0x37, 0x3c, 0x88, 0xf2, 0xb9, + 0x4a, 0x84, 0x2a, 0x59, 0x69, 0x0b, 0x38, 0x8f, 0xb8, 0x1a, 0x6b, 0xc1, 0xd6, 0x8e, 0x04, 0x69, + 0x16, 0xeb, 0x4d, 0xcb, 0xa8, 0xfe, 0x53, 0x05, 0xaf, 0xa0, 0xba, 0x64, 0x86, 0x2c, 0x00, 0x3d, + 0x8b, 0x25, 0x4d, 0x52, 0x39, 0x12, 0xa7, 0x17, 0x19, 0xf8, 0x0e, 0xaa, 0xa4, 0xde, 0x50, 0x75, + 0xdf, 0xb0, 0x9b, 0x3a, 0xb1, 0xb2, 0xbf, 0xbb, 0x4d, 0x25, 0x8e, 0x7b, 0x68, 0xc1, 0xe5, 0x51, + 0x1a, 0xcb, 0xb5, 0xca, 0x5f, 0x1b, 0xc9, 0x65, 0x3c, 0x53, 0x08, 0xd5, 0x11, 0xec, 0xa3, 0x1a, + 0xc8, 0x5b, 0x68, 0x55, 0xbb, 0x95, 0xe5, 0xe6, 0xea, 0xd3, 0x5b, 0x8c, 0x80, 0xa8, 0xa3, 0xda, + 0x09, 0x05, 0x3f, 0x2e, 0x58, 0x95, 0x18, 0xcd, 0x6a, 0xb4, 0x0f, 0xf5, 0xe1, 0xa9, 0x1c, 0xbc, + 0x84, 0x2a, 0x3e, 0x1c, 0x67, 0x36, 0xa9, 0xfc, 0xc4, 0x7b, 0xa8, 0x36, 0x91, 0x37, 0xa9, 0xf7, + 0xf1, 0xe4, 0x06, 0xcd, 0xe4, 0x87, 0x4d, 0x33, 0xad, 0x8d, 0xf2, 0xba, 0x61, 0xdf, 0x3b, 0x9d, + 0x9a, 0xa5, 0xb3, 0xa9, 0x59, 0xfa, 0x32, 0x35, 0x4b, 0x27, 0x33, 0xd3, 0x38, 0x9d, 0x99, 0xc6, + 0xd9, 0xcc, 0x34, 0xbe, 0xcd, 0x4c, 0xe3, 0xfd, 0x77, 0xb3, 0xf4, 0xfa, 0x2f, 0x2d, 0xf0, 0x23, + 0x00, 0x00, 0xff, 0xff, 0xb9, 0x87, 0xc6, 0x94, 0xfa, 0x05, 0x00, 0x00, } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.proto index f36e28161b..cbc050970a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.proto +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,11 +21,12 @@ syntax = 'proto2'; package k8s.io.kubernetes.pkg.apis.authentication.v1beta1; -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; @@ -44,7 +45,7 @@ message ExtraValue { // plugin in the kube-apiserver. message TokenReview { // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional TokenReviewSpec spec = 2; diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/register.go index 53488da1fa..ddaa197021 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/register.go @@ -17,16 +17,21 @@ limitations under the License. package v1beta1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "authentication.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} var ( SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) @@ -36,11 +41,8 @@ var ( // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &v1.ListOptions{}, - &v1.DeleteOptions{}, - &v1.ExportOptions{}, - &TokenReview{}, ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/types.generated.go index 83982830b5..b8990af180 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/types.generated.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/types.generated.go @@ -25,9 +25,8 @@ import ( "errors" "fmt" codec1978 "github.com/ugorji/go/codec" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg3_types "k8s.io/kubernetes/pkg/types" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" "reflect" "runtime" time "time" @@ -63,11 +62,10 @@ func init() { panic(err) } if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_unversioned.TypeMeta - var v1 pkg2_v1.ObjectMeta - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 } } @@ -159,7 +157,13 @@ func (x *TokenReview) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[2] { yy10 := &x.ObjectMeta - yy10.CodecEncodeSelf(e) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } @@ -168,26 +172,32 @@ func (x *TokenReview) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.ObjectMeta - yy11.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy13 := &x.Spec - yy13.CodecEncodeSelf(e) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.Spec - yy14.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[4] { - yy16 := &x.Status - yy16.CodecEncodeSelf(e) + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } @@ -196,8 +206,8 @@ func (x *TokenReview) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Status - yy17.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } if yyr2 || yy2arr2 { @@ -213,25 +223,25 @@ func (x *TokenReview) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym18 := z.DecBinary() - _ = yym18 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct19 := r.ContainerType() - if yyct19 == codecSelferValueTypeMap1234 { - yyl19 := r.ReadMapStart() - if yyl19 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl19, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct19 == codecSelferValueTypeArray1234 { - yyl19 := r.ReadArrayStart() - if yyl19 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl19, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -243,12 +253,12 @@ func (x *TokenReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys20Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys20Slc - var yyhl20 bool = l >= 0 - for yyj20 := 0; ; yyj20++ { - if yyhl20 { - if yyj20 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -257,47 +267,65 @@ func (x *TokenReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys20Slc = r.DecodeBytes(yys20Slc, true, true) - yys20 := string(yys20Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys20 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv23 := &x.ObjectMeta - yyv23.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = TokenReviewSpec{} } else { - yyv24 := &x.Spec - yyv24.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = TokenReviewStatus{} } else { - yyv25 := &x.Status - yyv25.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys20) - } // end switch yys20 - } // end for yyj20 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -305,16 +333,16 @@ func (x *TokenReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj26 int - var yyb26 bool - var yyhl26 bool = l >= 0 - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb26 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb26 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -322,15 +350,21 @@ func (x *TokenReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb26 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb26 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -338,32 +372,44 @@ func (x *TokenReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb26 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb26 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv29 := &x.ObjectMeta - yyv29.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb26 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb26 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -371,16 +417,16 @@ func (x *TokenReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Spec = TokenReviewSpec{} } else { - yyv30 := &x.Spec - yyv30.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb26 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb26 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -388,21 +434,21 @@ func (x *TokenReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Status = TokenReviewStatus{} } else { - yyv31 := &x.Status - yyv31.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb26 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb26 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj26-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -414,35 +460,35 @@ func (x *TokenReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym32 := z.EncBinary() - _ = yym32 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep33 := !z.EncBinary() - yy2arr33 := z.EncBasicHandle().StructToArray - var yyq33 [1]bool - _, _, _ = yysep33, yyq33, yy2arr33 - const yyr33 bool = false - yyq33[0] = x.Token != "" - var yynn33 int - if yyr33 || yy2arr33 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Token != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn33 = 0 - for _, b := range yyq33 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn33++ + yynn2++ } } - r.EncodeMapStart(yynn33) - yynn33 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr33 || yy2arr33 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[0] { - yym35 := z.EncBinary() - _ = yym35 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Token)) @@ -451,19 +497,19 @@ func (x *TokenReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq33[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("token")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym36 := z.EncBinary() - _ = yym36 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Token)) } } } - if yyr33 || yy2arr33 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -476,25 +522,25 @@ func (x *TokenReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym37 := z.DecBinary() - _ = yym37 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct38 := r.ContainerType() - if yyct38 == codecSelferValueTypeMap1234 { - yyl38 := r.ReadMapStart() - if yyl38 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl38, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct38 == codecSelferValueTypeArray1234 { - yyl38 := r.ReadArrayStart() - if yyl38 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl38, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -506,12 +552,12 @@ func (x *TokenReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys39Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys39Slc - var yyhl39 bool = l >= 0 - for yyj39 := 0; ; yyj39++ { - if yyhl39 { - if yyj39 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -520,20 +566,26 @@ func (x *TokenReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys39Slc = r.DecodeBytes(yys39Slc, true, true) - yys39 := string(yys39Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys39 { + switch yys3 { case "token": if r.TryDecodeAsNil() { x.Token = "" } else { - x.Token = string(r.DecodeString()) + yyv4 := &x.Token + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys39) - } // end switch yys39 - } // end for yyj39 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -541,16 +593,16 @@ func (x *TokenReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj41 int - var yyb41 bool - var yyhl41 bool = l >= 0 - yyj41++ - if yyhl41 { - yyb41 = yyj41 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb41 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb41 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -558,20 +610,26 @@ func (x *TokenReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Token = "" } else { - x.Token = string(r.DecodeString()) + yyv7 := &x.Token + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } } for { - yyj41++ - if yyhl41 { - yyb41 = yyj41 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb41 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb41 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj41-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -583,37 +641,37 @@ func (x *TokenReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym43 := z.EncBinary() - _ = yym43 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep44 := !z.EncBinary() - yy2arr44 := z.EncBasicHandle().StructToArray - var yyq44 [3]bool - _, _, _ = yysep44, yyq44, yy2arr44 - const yyr44 bool = false - yyq44[0] = x.Authenticated != false - yyq44[1] = true - yyq44[2] = x.Error != "" - var yynn44 int - if yyr44 || yy2arr44 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Authenticated != false + yyq2[1] = true + yyq2[2] = x.Error != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn44 = 0 - for _, b := range yyq44 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn44++ + yynn2++ } } - r.EncodeMapStart(yynn44) - yynn44 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr44 || yy2arr44 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq44[0] { - yym46 := z.EncBinary() - _ = yym46 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeBool(bool(x.Authenticated)) @@ -622,40 +680,40 @@ func (x *TokenReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq44[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("authenticated")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym47 := z.EncBinary() - _ = yym47 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeBool(bool(x.Authenticated)) } } } - if yyr44 || yy2arr44 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq44[1] { - yy49 := &x.User - yy49.CodecEncodeSelf(e) + if yyq2[1] { + yy7 := &x.User + yy7.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq44[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("user")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy50 := &x.User - yy50.CodecEncodeSelf(e) + yy9 := &x.User + yy9.CodecEncodeSelf(e) } } - if yyr44 || yy2arr44 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq44[2] { - yym52 := z.EncBinary() - _ = yym52 + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Error)) @@ -664,19 +722,19 @@ func (x *TokenReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq44[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("error")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym53 := z.EncBinary() - _ = yym53 + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Error)) } } } - if yyr44 || yy2arr44 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -689,25 +747,25 @@ func (x *TokenReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym54 := z.DecBinary() - _ = yym54 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct55 := r.ContainerType() - if yyct55 == codecSelferValueTypeMap1234 { - yyl55 := r.ReadMapStart() - if yyl55 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl55, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct55 == codecSelferValueTypeArray1234 { - yyl55 := r.ReadArrayStart() - if yyl55 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl55, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -719,12 +777,12 @@ func (x *TokenReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys56Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys56Slc - var yyhl56 bool = l >= 0 - for yyj56 := 0; ; yyj56++ { - if yyhl56 { - if yyj56 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -733,33 +791,45 @@ func (x *TokenReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys56Slc = r.DecodeBytes(yys56Slc, true, true) - yys56 := string(yys56Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys56 { + switch yys3 { case "authenticated": if r.TryDecodeAsNil() { x.Authenticated = false } else { - x.Authenticated = bool(r.DecodeBool()) + yyv4 := &x.Authenticated + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } } case "user": if r.TryDecodeAsNil() { x.User = UserInfo{} } else { - yyv58 := &x.User - yyv58.CodecDecodeSelf(d) + yyv6 := &x.User + yyv6.CodecDecodeSelf(d) } case "error": if r.TryDecodeAsNil() { x.Error = "" } else { - x.Error = string(r.DecodeString()) + yyv7 := &x.Error + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys56) - } // end switch yys56 - } // end for yyj56 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -767,16 +837,16 @@ func (x *TokenReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj60 int - var yyb60 bool - var yyhl60 bool = l >= 0 - yyj60++ - if yyhl60 { - yyb60 = yyj60 > l + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb60 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb60 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -784,15 +854,21 @@ func (x *TokenReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.Authenticated = false } else { - x.Authenticated = bool(r.DecodeBool()) + yyv10 := &x.Authenticated + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } } - yyj60++ - if yyhl60 { - yyb60 = yyj60 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb60 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb60 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -800,16 +876,16 @@ func (x *TokenReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.User = UserInfo{} } else { - yyv62 := &x.User - yyv62.CodecDecodeSelf(d) + yyv12 := &x.User + yyv12.CodecDecodeSelf(d) } - yyj60++ - if yyhl60 { - yyb60 = yyj60 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb60 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb60 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -817,20 +893,26 @@ func (x *TokenReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.Error = "" } else { - x.Error = string(r.DecodeString()) + yyv13 := &x.Error + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } for { - yyj60++ - if yyhl60 { - yyb60 = yyj60 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb60 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb60 { + if yyb9 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj60-1, "") + z.DecStructFieldNotFound(yyj9-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -842,38 +924,38 @@ func (x *UserInfo) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym64 := z.EncBinary() - _ = yym64 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep65 := !z.EncBinary() - yy2arr65 := z.EncBasicHandle().StructToArray - var yyq65 [4]bool - _, _, _ = yysep65, yyq65, yy2arr65 - const yyr65 bool = false - yyq65[0] = x.Username != "" - yyq65[1] = x.UID != "" - yyq65[2] = len(x.Groups) != 0 - yyq65[3] = len(x.Extra) != 0 - var yynn65 int - if yyr65 || yy2arr65 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Username != "" + yyq2[1] = x.UID != "" + yyq2[2] = len(x.Groups) != 0 + yyq2[3] = len(x.Extra) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn65 = 0 - for _, b := range yyq65 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn65++ + yynn2++ } } - r.EncodeMapStart(yynn65) - yynn65 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr65 || yy2arr65 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq65[0] { - yym67 := z.EncBinary() - _ = yym67 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Username)) @@ -882,23 +964,23 @@ func (x *UserInfo) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq65[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("username")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym68 := z.EncBinary() - _ = yym68 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Username)) } } } - if yyr65 || yy2arr65 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq65[1] { - yym70 := z.EncBinary() - _ = yym70 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.UID)) @@ -907,26 +989,26 @@ func (x *UserInfo) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq65[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("uid")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym71 := z.EncBinary() - _ = yym71 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.UID)) } } } - if yyr65 || yy2arr65 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq65[2] { + if yyq2[2] { if x.Groups == nil { r.EncodeNil() } else { - yym73 := z.EncBinary() - _ = yym73 + yym10 := z.EncBinary() + _ = yym10 if false { } else { z.F.EncSliceStringV(x.Groups, false, e) @@ -936,15 +1018,15 @@ func (x *UserInfo) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq65[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("groups")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Groups == nil { r.EncodeNil() } else { - yym74 := z.EncBinary() - _ = yym74 + yym11 := z.EncBinary() + _ = yym11 if false { } else { z.F.EncSliceStringV(x.Groups, false, e) @@ -952,14 +1034,14 @@ func (x *UserInfo) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr65 || yy2arr65 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq65[3] { + if yyq2[3] { if x.Extra == nil { r.EncodeNil() } else { - yym76 := z.EncBinary() - _ = yym76 + yym13 := z.EncBinary() + _ = yym13 if false { } else { h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) @@ -969,15 +1051,15 @@ func (x *UserInfo) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq65[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("extra")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Extra == nil { r.EncodeNil() } else { - yym77 := z.EncBinary() - _ = yym77 + yym14 := z.EncBinary() + _ = yym14 if false { } else { h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) @@ -985,7 +1067,7 @@ func (x *UserInfo) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr65 || yy2arr65 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -998,25 +1080,25 @@ func (x *UserInfo) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym78 := z.DecBinary() - _ = yym78 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct79 := r.ContainerType() - if yyct79 == codecSelferValueTypeMap1234 { - yyl79 := r.ReadMapStart() - if yyl79 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl79, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct79 == codecSelferValueTypeArray1234 { - yyl79 := r.ReadArrayStart() - if yyl79 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl79, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -1028,12 +1110,12 @@ func (x *UserInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys80Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys80Slc - var yyhl80 bool = l >= 0 - for yyj80 := 0; ; yyj80++ { - if yyhl80 { - if yyj80 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -1042,50 +1124,62 @@ func (x *UserInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys80Slc = r.DecodeBytes(yys80Slc, true, true) - yys80 := string(yys80Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys80 { + switch yys3 { case "username": if r.TryDecodeAsNil() { x.Username = "" } else { - x.Username = string(r.DecodeString()) + yyv4 := &x.Username + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "uid": if r.TryDecodeAsNil() { x.UID = "" } else { - x.UID = string(r.DecodeString()) + yyv6 := &x.UID + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "groups": if r.TryDecodeAsNil() { x.Groups = nil } else { - yyv83 := &x.Groups - yym84 := z.DecBinary() - _ = yym84 + yyv8 := &x.Groups + yym9 := z.DecBinary() + _ = yym9 if false { } else { - z.F.DecSliceStringX(yyv83, false, d) + z.F.DecSliceStringX(yyv8, false, d) } } case "extra": if r.TryDecodeAsNil() { x.Extra = nil } else { - yyv85 := &x.Extra - yym86 := z.DecBinary() - _ = yym86 + yyv10 := &x.Extra + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decMapstringExtraValue((*map[string]ExtraValue)(yyv85), d) + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys80) - } // end switch yys80 - } // end for yyj80 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -1093,16 +1187,16 @@ func (x *UserInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj87 int - var yyb87 bool - var yyhl87 bool = l >= 0 - yyj87++ - if yyhl87 { - yyb87 = yyj87 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb87 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb87 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1110,15 +1204,21 @@ func (x *UserInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Username = "" } else { - x.Username = string(r.DecodeString()) + yyv13 := &x.Username + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj87++ - if yyhl87 { - yyb87 = yyj87 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb87 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb87 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1126,15 +1226,21 @@ func (x *UserInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.UID = "" } else { - x.UID = string(r.DecodeString()) + yyv15 := &x.UID + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj87++ - if yyhl87 { - yyb87 = yyj87 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb87 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb87 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1142,21 +1248,21 @@ func (x *UserInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Groups = nil } else { - yyv90 := &x.Groups - yym91 := z.DecBinary() - _ = yym91 + yyv17 := &x.Groups + yym18 := z.DecBinary() + _ = yym18 if false { } else { - z.F.DecSliceStringX(yyv90, false, d) + z.F.DecSliceStringX(yyv17, false, d) } } - yyj87++ - if yyhl87 { - yyb87 = yyj87 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb87 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb87 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1164,26 +1270,26 @@ func (x *UserInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Extra = nil } else { - yyv92 := &x.Extra - yym93 := z.DecBinary() - _ = yym93 + yyv19 := &x.Extra + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decMapstringExtraValue((*map[string]ExtraValue)(yyv92), d) + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv19), d) } } for { - yyj87++ - if yyhl87 { - yyb87 = yyj87 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb87 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb87 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj87-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -1195,8 +1301,8 @@ func (x ExtraValue) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym94 := z.EncBinary() - _ = yym94 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -1209,8 +1315,8 @@ func (x *ExtraValue) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym95 := z.DecBinary() - _ = yym95 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -1223,19 +1329,19 @@ func (x codecSelfer1234) encMapstringExtraValue(v map[string]ExtraValue, e *code z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeMapStart(len(v)) - for yyk96, yyv96 := range v { + for yyk1, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym97 := z.EncBinary() - _ = yym97 + yym2 := z.EncBinary() + _ = yym2 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk96)) + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) } z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyv96 == nil { + if yyv1 == nil { r.EncodeNil() } else { - yyv96.CodecEncodeSelf(e) + yyv1.CodecEncodeSelf(e) } } z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -1246,70 +1352,82 @@ func (x codecSelfer1234) decMapstringExtraValue(v *map[string]ExtraValue, d *cod z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv98 := *v - yyl98 := r.ReadMapStart() - yybh98 := z.DecBasicHandle() - if yyv98 == nil { - yyrl98, _ := z.DecInferLen(yyl98, yybh98.MaxInitLen, 40) - yyv98 = make(map[string]ExtraValue, yyrl98) - *v = yyv98 + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string]ExtraValue, yyrl1) + *v = yyv1 } - var yymk98 string - var yymv98 ExtraValue - var yymg98 bool - if yybh98.MapValueReset { - yymg98 = true + var yymk1 string + var yymv1 ExtraValue + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true } - if yyl98 > 0 { - for yyj98 := 0; yyj98 < yyl98; yyj98++ { + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { z.DecSendContainerState(codecSelfer_containerMapKey1234) if r.TryDecodeAsNil() { - yymk98 = "" + yymk1 = "" } else { - yymk98 = string(r.DecodeString()) + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } } - if yymg98 { - yymv98 = yyv98[yymk98] + if yymg1 { + yymv1 = yyv1[yymk1] } else { - yymv98 = nil + yymv1 = nil } z.DecSendContainerState(codecSelfer_containerMapValue1234) if r.TryDecodeAsNil() { - yymv98 = nil + yymv1 = nil } else { - yyv100 := &yymv98 - yyv100.CodecDecodeSelf(d) + yyv4 := &yymv1 + yyv4.CodecDecodeSelf(d) } - if yyv98 != nil { - yyv98[yymk98] = yymv98 + if yyv1 != nil { + yyv1[yymk1] = yymv1 } } - } else if yyl98 < 0 { - for yyj98 := 0; !r.CheckBreak(); yyj98++ { + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { z.DecSendContainerState(codecSelfer_containerMapKey1234) if r.TryDecodeAsNil() { - yymk98 = "" + yymk1 = "" } else { - yymk98 = string(r.DecodeString()) + yyv5 := &yymk1 + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } } - if yymg98 { - yymv98 = yyv98[yymk98] + if yymg1 { + yymv1 = yyv1[yymk1] } else { - yymv98 = nil + yymv1 = nil } z.DecSendContainerState(codecSelfer_containerMapValue1234) if r.TryDecodeAsNil() { - yymv98 = nil + yymv1 = nil } else { - yyv102 := &yymv98 - yyv102.CodecDecodeSelf(d) + yyv7 := &yymv1 + yyv7.CodecDecodeSelf(d) } - if yyv98 != nil { - yyv98[yymk98] = yymv98 + if yyv1 != nil { + yyv1[yymk1] = yymv1 } } } // else len==0: TODO: Should we clear map entries? @@ -1321,13 +1439,13 @@ func (x codecSelfer1234) encExtraValue(v ExtraValue, e *codec1978.Encoder) { z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv103 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym104 := z.EncBinary() - _ = yym104 + yym2 := z.EncBinary() + _ = yym2 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(yyv103)) + r.EncodeString(codecSelferC_UTF81234, string(yyv1)) } } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) @@ -1338,75 +1456,96 @@ func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv105 := *v - yyh105, yyl105 := z.DecSliceHelperStart() - var yyc105 bool - if yyl105 == 0 { - if yyv105 == nil { - yyv105 = []string{} - yyc105 = true - } else if len(yyv105) != 0 { - yyv105 = yyv105[:0] - yyc105 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl105 > 0 { - var yyrr105, yyrl105 int - var yyrt105 bool - if yyl105 > cap(yyv105) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrl105, yyrt105 = z.DecInferLen(yyl105, z.DecBasicHandle().MaxInitLen, 16) - if yyrt105 { - if yyrl105 <= cap(yyv105) { - yyv105 = yyv105[:yyrl105] + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv105 = make([]string, yyrl105) + yyv1 = make([]string, yyrl1) } } else { - yyv105 = make([]string, yyrl105) + yyv1 = make([]string, yyrl1) } - yyc105 = true - yyrr105 = len(yyv105) - } else if yyl105 != len(yyv105) { - yyv105 = yyv105[:yyl105] - yyc105 = true + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj105 := 0 - for ; yyj105 < yyrr105; yyj105++ { - yyh105.ElemContainerState(yyj105) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv105[yyj105] = "" + yyv1[yyj1] = "" } else { - yyv105[yyj105] = string(r.DecodeString()) + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } } } - if yyrt105 { - for ; yyj105 < yyl105; yyj105++ { - yyv105 = append(yyv105, "") - yyh105.ElemContainerState(yyj105) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv105[yyj105] = "" + yyv1[yyj1] = "" } else { - yyv105[yyj105] = string(r.DecodeString()) + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } } } } else { - yyj105 := 0 - for ; !r.CheckBreak(); yyj105++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj105 >= len(yyv105) { - yyv105 = append(yyv105, "") // var yyz105 string - yyc105 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 string + yyc1 = true } - yyh105.ElemContainerState(yyj105) - if yyj105 < len(yyv105) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv105[yyj105] = "" + yyv1[yyj1] = "" } else { - yyv105[yyj105] = string(r.DecodeString()) + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } } else { @@ -1414,16 +1553,16 @@ func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { } } - if yyj105 < len(yyv105) { - yyv105 = yyv105[:yyj105] - yyc105 = true - } else if yyj105 == 0 && yyv105 == nil { - yyv105 = []string{} - yyc105 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []string{} + yyc1 = true } } - yyh105.End() - if yyc105 { - *v = yyv105 + yyh1.End() + if yyc1 { + *v = yyv1 } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/types.go index 3cab98f7a8..57c96e3bc4 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/types.go @@ -19,8 +19,7 @@ package v1beta1 import ( "fmt" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // +genclient=true @@ -31,9 +30,9 @@ import ( // Note: TokenReview requests may be cached by the webhook token authenticator // plugin in the kube-apiserver. type TokenReview struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec holds information about the request being evaluated Spec TokenReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.conversion.go index e848c4943a..69f0fc8a71 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ limitations under the License. package v1beta1 import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" authentication "k8s.io/kubernetes/pkg/apis/authentication" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" unsafe "unsafe" ) @@ -47,10 +47,7 @@ func RegisterConversions(scheme *runtime.Scheme) error { } func autoConvert_v1beta1_TokenReview_To_authentication_TokenReview(in *TokenReview, out *authentication.TokenReview, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -65,10 +62,7 @@ func Convert_v1beta1_TokenReview_To_authentication_TokenReview(in *TokenReview, } func autoConvert_authentication_TokenReview_To_v1beta1_TokenReview(in *authentication.TokenReview, out *TokenReview, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.deepcopy.go index 01cbc76e4c..01260cc133 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" reflect "reflect" ) @@ -46,11 +46,12 @@ func DeepCopy_v1beta1_TokenReview(in interface{}, out interface{}, c *conversion { in := in.(*TokenReview) out := out.(*TokenReview) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } - out.Spec = in.Spec if err := DeepCopy_v1beta1_TokenReviewStatus(&in.Status, &out.Status, c); err != nil { return err } @@ -62,7 +63,7 @@ func DeepCopy_v1beta1_TokenReviewSpec(in interface{}, out interface{}, c *conver { in := in.(*TokenReviewSpec) out := out.(*TokenReviewSpec) - out.Token = in.Token + *out = *in return nil } } @@ -71,11 +72,10 @@ func DeepCopy_v1beta1_TokenReviewStatus(in interface{}, out interface{}, c *conv { in := in.(*TokenReviewStatus) out := out.(*TokenReviewStatus) - out.Authenticated = in.Authenticated + *out = *in if err := DeepCopy_v1beta1_UserInfo(&in.User, &out.User, c); err != nil { return err } - out.Error = in.Error return nil } } @@ -84,14 +84,11 @@ func DeepCopy_v1beta1_UserInfo(in interface{}, out interface{}, c *conversion.Cl { in := in.(*UserInfo) out := out.(*UserInfo) - out.Username = in.Username - out.UID = in.UID + *out = *in if in.Groups != nil { in, out := &in.Groups, &out.Groups *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Groups = nil } if in.Extra != nil { in, out := &in.Extra, &out.Extra @@ -103,8 +100,6 @@ func DeepCopy_v1beta1_UserInfo(in interface{}, out interface{}, c *conversion.Cl (*out)[key] = *newVal.(*ExtraValue) } } - } else { - out.Extra = nil } return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.defaults.go new file mode 100644 index 0000000000..e24e70be38 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/zz_generated.deepcopy.go index 7caf7b0c48..ec322c5f88 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ limitations under the License. package authentication import ( - api "k8s.io/kubernetes/pkg/api" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" reflect "reflect" ) @@ -46,11 +46,12 @@ func DeepCopy_authentication_TokenReview(in interface{}, out interface{}, c *con { in := in.(*TokenReview) out := out.(*TokenReview) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } - out.Spec = in.Spec if err := DeepCopy_authentication_TokenReviewStatus(&in.Status, &out.Status, c); err != nil { return err } @@ -62,7 +63,7 @@ func DeepCopy_authentication_TokenReviewSpec(in interface{}, out interface{}, c { in := in.(*TokenReviewSpec) out := out.(*TokenReviewSpec) - out.Token = in.Token + *out = *in return nil } } @@ -71,11 +72,10 @@ func DeepCopy_authentication_TokenReviewStatus(in interface{}, out interface{}, { in := in.(*TokenReviewStatus) out := out.(*TokenReviewStatus) - out.Authenticated = in.Authenticated + *out = *in if err := DeepCopy_authentication_UserInfo(&in.User, &out.User, c); err != nil { return err } - out.Error = in.Error return nil } } @@ -84,14 +84,11 @@ func DeepCopy_authentication_UserInfo(in interface{}, out interface{}, c *conver { in := in.(*UserInfo) out := out.(*UserInfo) - out.Username = in.Username - out.UID = in.UID + *out = *in if in.Groups != nil { in, out := &in.Groups, &out.Groups *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Groups = nil } if in.Extra != nil { in, out := &in.Extra, &out.Extra @@ -103,8 +100,6 @@ func DeepCopy_authentication_UserInfo(in interface{}, out interface{}, c *conver (*out)[key] = *newVal.(*ExtraValue) } } - } else { - out.Extra = nil } return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/authorization/BUILD index d12e144d53..326294e3d7 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -15,17 +12,33 @@ go_library( srcs = [ "doc.go", "register.go", - "types.generated.go", "types.go", "zz_generated.deepcopy.go", ], tags = ["automanaged"], deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/authorization/install:all-srcs", + "//pkg/apis/authorization/v1:all-srcs", + "//pkg/apis/authorization/v1beta1:all-srcs", + "//pkg/apis/authorization/validation:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/authorization/OWNERS new file mode 100755 index 0000000000..2fef50443b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/OWNERS @@ -0,0 +1,17 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- liggitt +- nikhiljindal +- erictune +- sttts +- ncdc +- timothysc +- dims +- mml +- mbohlool +- david-mcmahon +- jianhuiz diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go index 4e2deb516e..df9046f03c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go @@ -15,7 +15,5 @@ limitations under the License. */ // +k8s:deepcopy-gen=package,register -// +k8s:openapi-gen=true - // +groupName=authorization.k8s.io package authorization diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/BUILD index d9ca640a42..ac8d4d4bb7 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -15,9 +12,26 @@ go_library( srcs = ["install.go"], tags = ["automanaged"], deps = [ - "//pkg/apimachinery/announced:go_default_library", + "//pkg/api:go_default_library", "//pkg/apis/authorization:go_default_library", + "//pkg/apis/authorization/v1:go_default_library", "//pkg/apis/authorization/v1beta1:go_default_library", - "//pkg/util/sets:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/announced", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/registered", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/util/sets", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go index c2356d3ebd..1b82992f64 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go @@ -19,25 +19,35 @@ limitations under the License. package install import ( - "k8s.io/kubernetes/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/authorization" + "k8s.io/kubernetes/pkg/apis/authorization/v1" "k8s.io/kubernetes/pkg/apis/authorization/v1beta1" - "k8s.io/kubernetes/pkg/util/sets" ) func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { if err := announced.NewGroupMetaFactory( &announced.GroupMetaFactoryArgs{ GroupName: authorization.GroupName, - VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, + VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version, v1beta1.SchemeGroupVersion.Version}, ImportPrefix: "k8s.io/kubernetes/pkg/apis/authorization", RootScopedKinds: sets.NewString("SubjectAccessReview", "SelfSubjectAccessReview"), AddInternalObjectsToScheme: authorization.AddToScheme, }, announced.VersionToSchemeFunc{ v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + v1.SchemeGroupVersion.Version: v1.AddToScheme, }, - ).Announce().RegisterAndEnable(); err != nil { + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { panic(err) } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/register.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/register.go index 59d92fa7e5..5693885e4e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/register.go @@ -17,23 +17,23 @@ limitations under the License. package authorization import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "authorization.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} // Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { +func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } // Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { +func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.generated.go deleted file mode 100644 index c3a10de56d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.generated.go +++ /dev/null @@ -1,5607 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package authorization - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg2_api "k8s.io/kubernetes/pkg/api" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg3_types "k8s.io/kubernetes/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_api.ObjectMeta - var v1 pkg1_unversioned.TypeMeta - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 - } -} - -func (x *SubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [19]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = x.Name != "" - yyq2[3] = x.GenerateName != "" - yyq2[4] = x.Namespace != "" - yyq2[5] = x.SelfLink != "" - yyq2[6] = x.UID != "" - yyq2[7] = x.ResourceVersion != "" - yyq2[8] = x.Generation != 0 - yyq2[9] = true - yyq2[10] = x.ObjectMeta.DeletionTimestamp != nil && x.DeletionTimestamp != nil - yyq2[11] = x.ObjectMeta.DeletionGracePeriodSeconds != nil && x.DeletionGracePeriodSeconds != nil - yyq2[12] = len(x.Labels) != 0 - yyq2[13] = len(x.Annotations) != 0 - yyq2[14] = len(x.OwnerReferences) != 0 - yyq2[15] = len(x.Finalizers) != 0 - yyq2[16] = x.ClusterName != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(19) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("generateName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selfLink")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeInt(int64(x.Generation)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("generation")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeInt(int64(x.Generation)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - yy31 := &x.CreationTimestamp - yym32 := z.EncBinary() - _ = yym32 - if false { - } else if z.HasExtensions() && z.EncExt(yy31) { - } else if yym32 { - z.EncBinaryMarshal(yy31) - } else if !yym32 && z.IsJSONHandle() { - z.EncJSONMarshal(yy31) - } else { - z.EncFallback(yy31) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("creationTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy33 := &x.CreationTimestamp - yym34 := z.EncBinary() - _ = yym34 - if false { - } else if z.HasExtensions() && z.EncExt(yy33) { - } else if yym34 { - z.EncBinaryMarshal(yy33) - } else if !yym34 && z.IsJSONHandle() { - z.EncJSONMarshal(yy33) - } else { - z.EncFallback(yy33) - } - } - } - var yyn35 bool - if x.ObjectMeta.DeletionTimestamp == nil { - yyn35 = true - goto LABEL35 - } - LABEL35: - if yyr2 || yy2arr2 { - if yyn35 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.DeletionTimestamp == nil { - r.EncodeNil() - } else { - yym36 := z.EncBinary() - _ = yym36 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { - } else if yym36 { - z.EncBinaryMarshal(x.DeletionTimestamp) - } else if !yym36 && z.IsJSONHandle() { - z.EncJSONMarshal(x.DeletionTimestamp) - } else { - z.EncFallback(x.DeletionTimestamp) - } - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletionTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn35 { - r.EncodeNil() - } else { - if x.DeletionTimestamp == nil { - r.EncodeNil() - } else { - yym37 := z.EncBinary() - _ = yym37 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { - } else if yym37 { - z.EncBinaryMarshal(x.DeletionTimestamp) - } else if !yym37 && z.IsJSONHandle() { - z.EncJSONMarshal(x.DeletionTimestamp) - } else { - z.EncFallback(x.DeletionTimestamp) - } - } - } - } - } - var yyn38 bool - if x.ObjectMeta.DeletionGracePeriodSeconds == nil { - yyn38 = true - goto LABEL38 - } - LABEL38: - if yyr2 || yy2arr2 { - if yyn38 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.DeletionGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy39 := *x.DeletionGracePeriodSeconds - yym40 := z.EncBinary() - _ = yym40 - if false { - } else { - r.EncodeInt(int64(yy39)) - } - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletionGracePeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn38 { - r.EncodeNil() - } else { - if x.DeletionGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy41 := *x.DeletionGracePeriodSeconds - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - r.EncodeInt(int64(yy41)) - } - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.Labels == nil { - r.EncodeNil() - } else { - yym44 := z.EncBinary() - _ = yym44 - if false { - } else { - z.F.EncMapStringStringV(x.Labels, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("labels")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Labels == nil { - r.EncodeNil() - } else { - yym45 := z.EncBinary() - _ = yym45 - if false { - } else { - z.F.EncMapStringStringV(x.Labels, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.Annotations == nil { - r.EncodeNil() - } else { - yym47 := z.EncBinary() - _ = yym47 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("annotations")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Annotations == nil { - r.EncodeNil() - } else { - yym48 := z.EncBinary() - _ = yym48 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - if x.OwnerReferences == nil { - r.EncodeNil() - } else { - yym50 := z.EncBinary() - _ = yym50 - if false { - } else { - h.encSliceapi_OwnerReference(([]pkg2_api.OwnerReference)(x.OwnerReferences), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ownerReferences")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.OwnerReferences == nil { - r.EncodeNil() - } else { - yym51 := z.EncBinary() - _ = yym51 - if false { - } else { - h.encSliceapi_OwnerReference(([]pkg2_api.OwnerReference)(x.OwnerReferences), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[15] { - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym53 := z.EncBinary() - _ = yym53 - if false { - } else { - z.F.EncSliceStringV(x.Finalizers, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("finalizers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym54 := z.EncBinary() - _ = yym54 - if false { - } else { - z.F.EncSliceStringV(x.Finalizers, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[16] { - yym56 := z.EncBinary() - _ = yym56 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym57 := z.EncBinary() - _ = yym57 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy59 := &x.Spec - yy59.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy60 := &x.Spec - yy60.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy62 := &x.Status - yy62.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy63 := &x.Status - yy63.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym64 := z.DecBinary() - _ = yym64 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct65 := r.ContainerType() - if yyct65 == codecSelferValueTypeMap1234 { - yyl65 := r.ReadMapStart() - if yyl65 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl65, d) - } - } else if yyct65 == codecSelferValueTypeArray1234 { - yyl65 := r.ReadArrayStart() - if yyl65 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl65, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys66Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys66Slc - var yyhl66 bool = l >= 0 - for yyj66 := 0; ; yyj66++ { - if yyhl66 { - if yyj66 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys66Slc = r.DecodeBytes(yys66Slc, true, true) - yys66 := string(yys66Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys66 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "generateName": - if r.TryDecodeAsNil() { - x.GenerateName = "" - } else { - x.GenerateName = string(r.DecodeString()) - } - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - case "selfLink": - if r.TryDecodeAsNil() { - x.SelfLink = "" - } else { - x.SelfLink = string(r.DecodeString()) - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg3_types.UID(r.DecodeString()) - } - case "resourceVersion": - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - case "generation": - if r.TryDecodeAsNil() { - x.Generation = 0 - } else { - x.Generation = int64(r.DecodeInt(64)) - } - case "creationTimestamp": - if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg1_unversioned.Time{} - } else { - yyv76 := &x.CreationTimestamp - yym77 := z.DecBinary() - _ = yym77 - if false { - } else if z.HasExtensions() && z.DecExt(yyv76) { - } else if yym77 { - z.DecBinaryUnmarshal(yyv76) - } else if !yym77 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv76) - } else { - z.DecFallback(yyv76, false) - } - } - case "deletionTimestamp": - if x.ObjectMeta.DeletionTimestamp == nil { - x.ObjectMeta.DeletionTimestamp = new(pkg1_unversioned.Time) - } - if r.TryDecodeAsNil() { - if x.DeletionTimestamp != nil { - x.DeletionTimestamp = nil - } - } else { - if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg1_unversioned.Time) - } - yym79 := z.DecBinary() - _ = yym79 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym79 { - z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym79 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.DeletionTimestamp) - } else { - z.DecFallback(x.DeletionTimestamp, false) - } - } - case "deletionGracePeriodSeconds": - if x.ObjectMeta.DeletionGracePeriodSeconds == nil { - x.ObjectMeta.DeletionGracePeriodSeconds = new(int64) - } - if r.TryDecodeAsNil() { - if x.DeletionGracePeriodSeconds != nil { - x.DeletionGracePeriodSeconds = nil - } - } else { - if x.DeletionGracePeriodSeconds == nil { - x.DeletionGracePeriodSeconds = new(int64) - } - yym81 := z.DecBinary() - _ = yym81 - if false { - } else { - *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - case "labels": - if r.TryDecodeAsNil() { - x.Labels = nil - } else { - yyv82 := &x.Labels - yym83 := z.DecBinary() - _ = yym83 - if false { - } else { - z.F.DecMapStringStringX(yyv82, false, d) - } - } - case "annotations": - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv84 := &x.Annotations - yym85 := z.DecBinary() - _ = yym85 - if false { - } else { - z.F.DecMapStringStringX(yyv84, false, d) - } - } - case "ownerReferences": - if r.TryDecodeAsNil() { - x.OwnerReferences = nil - } else { - yyv86 := &x.OwnerReferences - yym87 := z.DecBinary() - _ = yym87 - if false { - } else { - h.decSliceapi_OwnerReference((*[]pkg2_api.OwnerReference)(yyv86), d) - } - } - case "finalizers": - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv88 := &x.Finalizers - yym89 := z.DecBinary() - _ = yym89 - if false { - } else { - z.F.DecSliceStringX(yyv88, false, d) - } - } - case "clusterName": - if r.TryDecodeAsNil() { - x.ClusterName = "" - } else { - x.ClusterName = string(r.DecodeString()) - } - case "Spec": - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv91 := &x.Spec - yyv91.CodecDecodeSelf(d) - } - case "Status": - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv92 := &x.Status - yyv92.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys66) - } // end switch yys66 - } // end for yyj66 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj93 int - var yyb93 bool - var yyhl93 bool = l >= 0 - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.GenerateName = "" - } else { - x.GenerateName = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SelfLink = "" - } else { - x.SelfLink = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg3_types.UID(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Generation = 0 - } else { - x.Generation = int64(r.DecodeInt(64)) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg1_unversioned.Time{} - } else { - yyv103 := &x.CreationTimestamp - yym104 := z.DecBinary() - _ = yym104 - if false { - } else if z.HasExtensions() && z.DecExt(yyv103) { - } else if yym104 { - z.DecBinaryUnmarshal(yyv103) - } else if !yym104 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv103) - } else { - z.DecFallback(yyv103, false) - } - } - if x.ObjectMeta.DeletionTimestamp == nil { - x.ObjectMeta.DeletionTimestamp = new(pkg1_unversioned.Time) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeletionTimestamp != nil { - x.DeletionTimestamp = nil - } - } else { - if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg1_unversioned.Time) - } - yym106 := z.DecBinary() - _ = yym106 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym106 { - z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym106 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.DeletionTimestamp) - } else { - z.DecFallback(x.DeletionTimestamp, false) - } - } - if x.ObjectMeta.DeletionGracePeriodSeconds == nil { - x.ObjectMeta.DeletionGracePeriodSeconds = new(int64) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeletionGracePeriodSeconds != nil { - x.DeletionGracePeriodSeconds = nil - } - } else { - if x.DeletionGracePeriodSeconds == nil { - x.DeletionGracePeriodSeconds = new(int64) - } - yym108 := z.DecBinary() - _ = yym108 - if false { - } else { - *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Labels = nil - } else { - yyv109 := &x.Labels - yym110 := z.DecBinary() - _ = yym110 - if false { - } else { - z.F.DecMapStringStringX(yyv109, false, d) - } - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv111 := &x.Annotations - yym112 := z.DecBinary() - _ = yym112 - if false { - } else { - z.F.DecMapStringStringX(yyv111, false, d) - } - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OwnerReferences = nil - } else { - yyv113 := &x.OwnerReferences - yym114 := z.DecBinary() - _ = yym114 - if false { - } else { - h.decSliceapi_OwnerReference((*[]pkg2_api.OwnerReference)(yyv113), d) - } - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv115 := &x.Finalizers - yym116 := z.DecBinary() - _ = yym116 - if false { - } else { - z.F.DecSliceStringX(yyv115, false, d) - } - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterName = "" - } else { - x.ClusterName = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv118 := &x.Spec - yyv118.CodecDecodeSelf(d) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv119 := &x.Status - yyv119.CodecDecodeSelf(d) - } - for { - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj93-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SelfSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym120 := z.EncBinary() - _ = yym120 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep121 := !z.EncBinary() - yy2arr121 := z.EncBasicHandle().StructToArray - var yyq121 [19]bool - _, _, _ = yysep121, yyq121, yy2arr121 - const yyr121 bool = false - yyq121[0] = x.Kind != "" - yyq121[1] = x.APIVersion != "" - yyq121[2] = x.Name != "" - yyq121[3] = x.GenerateName != "" - yyq121[4] = x.Namespace != "" - yyq121[5] = x.SelfLink != "" - yyq121[6] = x.UID != "" - yyq121[7] = x.ResourceVersion != "" - yyq121[8] = x.Generation != 0 - yyq121[9] = true - yyq121[10] = x.ObjectMeta.DeletionTimestamp != nil && x.DeletionTimestamp != nil - yyq121[11] = x.ObjectMeta.DeletionGracePeriodSeconds != nil && x.DeletionGracePeriodSeconds != nil - yyq121[12] = len(x.Labels) != 0 - yyq121[13] = len(x.Annotations) != 0 - yyq121[14] = len(x.OwnerReferences) != 0 - yyq121[15] = len(x.Finalizers) != 0 - yyq121[16] = x.ClusterName != "" - var yynn121 int - if yyr121 || yy2arr121 { - r.EncodeArrayStart(19) - } else { - yynn121 = 2 - for _, b := range yyq121 { - if b { - yynn121++ - } - } - r.EncodeMapStart(yynn121) - yynn121 = 0 - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq121[0] { - yym123 := z.EncBinary() - _ = yym123 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq121[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym124 := z.EncBinary() - _ = yym124 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq121[1] { - yym126 := z.EncBinary() - _ = yym126 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq121[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym127 := z.EncBinary() - _ = yym127 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq121[2] { - yym129 := z.EncBinary() - _ = yym129 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq121[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym130 := z.EncBinary() - _ = yym130 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq121[3] { - yym132 := z.EncBinary() - _ = yym132 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq121[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("generateName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym133 := z.EncBinary() - _ = yym133 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) - } - } - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq121[4] { - yym135 := z.EncBinary() - _ = yym135 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq121[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym136 := z.EncBinary() - _ = yym136 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq121[5] { - yym138 := z.EncBinary() - _ = yym138 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq121[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selfLink")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym139 := z.EncBinary() - _ = yym139 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) - } - } - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq121[6] { - yym141 := z.EncBinary() - _ = yym141 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq121[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym142 := z.EncBinary() - _ = yym142 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq121[7] { - yym144 := z.EncBinary() - _ = yym144 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq121[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym145 := z.EncBinary() - _ = yym145 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq121[8] { - yym147 := z.EncBinary() - _ = yym147 - if false { - } else { - r.EncodeInt(int64(x.Generation)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq121[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("generation")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym148 := z.EncBinary() - _ = yym148 - if false { - } else { - r.EncodeInt(int64(x.Generation)) - } - } - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq121[9] { - yy150 := &x.CreationTimestamp - yym151 := z.EncBinary() - _ = yym151 - if false { - } else if z.HasExtensions() && z.EncExt(yy150) { - } else if yym151 { - z.EncBinaryMarshal(yy150) - } else if !yym151 && z.IsJSONHandle() { - z.EncJSONMarshal(yy150) - } else { - z.EncFallback(yy150) - } - } else { - r.EncodeNil() - } - } else { - if yyq121[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("creationTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy152 := &x.CreationTimestamp - yym153 := z.EncBinary() - _ = yym153 - if false { - } else if z.HasExtensions() && z.EncExt(yy152) { - } else if yym153 { - z.EncBinaryMarshal(yy152) - } else if !yym153 && z.IsJSONHandle() { - z.EncJSONMarshal(yy152) - } else { - z.EncFallback(yy152) - } - } - } - var yyn154 bool - if x.ObjectMeta.DeletionTimestamp == nil { - yyn154 = true - goto LABEL154 - } - LABEL154: - if yyr121 || yy2arr121 { - if yyn154 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq121[10] { - if x.DeletionTimestamp == nil { - r.EncodeNil() - } else { - yym155 := z.EncBinary() - _ = yym155 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { - } else if yym155 { - z.EncBinaryMarshal(x.DeletionTimestamp) - } else if !yym155 && z.IsJSONHandle() { - z.EncJSONMarshal(x.DeletionTimestamp) - } else { - z.EncFallback(x.DeletionTimestamp) - } - } - } else { - r.EncodeNil() - } - } - } else { - if yyq121[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletionTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn154 { - r.EncodeNil() - } else { - if x.DeletionTimestamp == nil { - r.EncodeNil() - } else { - yym156 := z.EncBinary() - _ = yym156 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { - } else if yym156 { - z.EncBinaryMarshal(x.DeletionTimestamp) - } else if !yym156 && z.IsJSONHandle() { - z.EncJSONMarshal(x.DeletionTimestamp) - } else { - z.EncFallback(x.DeletionTimestamp) - } - } - } - } - } - var yyn157 bool - if x.ObjectMeta.DeletionGracePeriodSeconds == nil { - yyn157 = true - goto LABEL157 - } - LABEL157: - if yyr121 || yy2arr121 { - if yyn157 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq121[11] { - if x.DeletionGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy158 := *x.DeletionGracePeriodSeconds - yym159 := z.EncBinary() - _ = yym159 - if false { - } else { - r.EncodeInt(int64(yy158)) - } - } - } else { - r.EncodeNil() - } - } - } else { - if yyq121[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletionGracePeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn157 { - r.EncodeNil() - } else { - if x.DeletionGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy160 := *x.DeletionGracePeriodSeconds - yym161 := z.EncBinary() - _ = yym161 - if false { - } else { - r.EncodeInt(int64(yy160)) - } - } - } - } - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq121[12] { - if x.Labels == nil { - r.EncodeNil() - } else { - yym163 := z.EncBinary() - _ = yym163 - if false { - } else { - z.F.EncMapStringStringV(x.Labels, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq121[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("labels")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Labels == nil { - r.EncodeNil() - } else { - yym164 := z.EncBinary() - _ = yym164 - if false { - } else { - z.F.EncMapStringStringV(x.Labels, false, e) - } - } - } - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq121[13] { - if x.Annotations == nil { - r.EncodeNil() - } else { - yym166 := z.EncBinary() - _ = yym166 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq121[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("annotations")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Annotations == nil { - r.EncodeNil() - } else { - yym167 := z.EncBinary() - _ = yym167 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq121[14] { - if x.OwnerReferences == nil { - r.EncodeNil() - } else { - yym169 := z.EncBinary() - _ = yym169 - if false { - } else { - h.encSliceapi_OwnerReference(([]pkg2_api.OwnerReference)(x.OwnerReferences), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq121[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ownerReferences")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.OwnerReferences == nil { - r.EncodeNil() - } else { - yym170 := z.EncBinary() - _ = yym170 - if false { - } else { - h.encSliceapi_OwnerReference(([]pkg2_api.OwnerReference)(x.OwnerReferences), e) - } - } - } - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq121[15] { - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym172 := z.EncBinary() - _ = yym172 - if false { - } else { - z.F.EncSliceStringV(x.Finalizers, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq121[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("finalizers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym173 := z.EncBinary() - _ = yym173 - if false { - } else { - z.F.EncSliceStringV(x.Finalizers, false, e) - } - } - } - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq121[16] { - yym175 := z.EncBinary() - _ = yym175 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq121[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym176 := z.EncBinary() - _ = yym176 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) - } - } - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy178 := &x.Spec - yy178.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy179 := &x.Spec - yy179.CodecEncodeSelf(e) - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy181 := &x.Status - yy181.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy182 := &x.Status - yy182.CodecEncodeSelf(e) - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SelfSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym183 := z.DecBinary() - _ = yym183 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct184 := r.ContainerType() - if yyct184 == codecSelferValueTypeMap1234 { - yyl184 := r.ReadMapStart() - if yyl184 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl184, d) - } - } else if yyct184 == codecSelferValueTypeArray1234 { - yyl184 := r.ReadArrayStart() - if yyl184 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl184, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SelfSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys185Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys185Slc - var yyhl185 bool = l >= 0 - for yyj185 := 0; ; yyj185++ { - if yyhl185 { - if yyj185 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys185Slc = r.DecodeBytes(yys185Slc, true, true) - yys185 := string(yys185Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys185 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "generateName": - if r.TryDecodeAsNil() { - x.GenerateName = "" - } else { - x.GenerateName = string(r.DecodeString()) - } - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - case "selfLink": - if r.TryDecodeAsNil() { - x.SelfLink = "" - } else { - x.SelfLink = string(r.DecodeString()) - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg3_types.UID(r.DecodeString()) - } - case "resourceVersion": - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - case "generation": - if r.TryDecodeAsNil() { - x.Generation = 0 - } else { - x.Generation = int64(r.DecodeInt(64)) - } - case "creationTimestamp": - if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg1_unversioned.Time{} - } else { - yyv195 := &x.CreationTimestamp - yym196 := z.DecBinary() - _ = yym196 - if false { - } else if z.HasExtensions() && z.DecExt(yyv195) { - } else if yym196 { - z.DecBinaryUnmarshal(yyv195) - } else if !yym196 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv195) - } else { - z.DecFallback(yyv195, false) - } - } - case "deletionTimestamp": - if x.ObjectMeta.DeletionTimestamp == nil { - x.ObjectMeta.DeletionTimestamp = new(pkg1_unversioned.Time) - } - if r.TryDecodeAsNil() { - if x.DeletionTimestamp != nil { - x.DeletionTimestamp = nil - } - } else { - if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg1_unversioned.Time) - } - yym198 := z.DecBinary() - _ = yym198 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym198 { - z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym198 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.DeletionTimestamp) - } else { - z.DecFallback(x.DeletionTimestamp, false) - } - } - case "deletionGracePeriodSeconds": - if x.ObjectMeta.DeletionGracePeriodSeconds == nil { - x.ObjectMeta.DeletionGracePeriodSeconds = new(int64) - } - if r.TryDecodeAsNil() { - if x.DeletionGracePeriodSeconds != nil { - x.DeletionGracePeriodSeconds = nil - } - } else { - if x.DeletionGracePeriodSeconds == nil { - x.DeletionGracePeriodSeconds = new(int64) - } - yym200 := z.DecBinary() - _ = yym200 - if false { - } else { - *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - case "labels": - if r.TryDecodeAsNil() { - x.Labels = nil - } else { - yyv201 := &x.Labels - yym202 := z.DecBinary() - _ = yym202 - if false { - } else { - z.F.DecMapStringStringX(yyv201, false, d) - } - } - case "annotations": - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv203 := &x.Annotations - yym204 := z.DecBinary() - _ = yym204 - if false { - } else { - z.F.DecMapStringStringX(yyv203, false, d) - } - } - case "ownerReferences": - if r.TryDecodeAsNil() { - x.OwnerReferences = nil - } else { - yyv205 := &x.OwnerReferences - yym206 := z.DecBinary() - _ = yym206 - if false { - } else { - h.decSliceapi_OwnerReference((*[]pkg2_api.OwnerReference)(yyv205), d) - } - } - case "finalizers": - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv207 := &x.Finalizers - yym208 := z.DecBinary() - _ = yym208 - if false { - } else { - z.F.DecSliceStringX(yyv207, false, d) - } - } - case "clusterName": - if r.TryDecodeAsNil() { - x.ClusterName = "" - } else { - x.ClusterName = string(r.DecodeString()) - } - case "Spec": - if r.TryDecodeAsNil() { - x.Spec = SelfSubjectAccessReviewSpec{} - } else { - yyv210 := &x.Spec - yyv210.CodecDecodeSelf(d) - } - case "Status": - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv211 := &x.Status - yyv211.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys185) - } // end switch yys185 - } // end for yyj185 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SelfSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj212 int - var yyb212 bool - var yyhl212 bool = l >= 0 - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.GenerateName = "" - } else { - x.GenerateName = string(r.DecodeString()) - } - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SelfLink = "" - } else { - x.SelfLink = string(r.DecodeString()) - } - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg3_types.UID(r.DecodeString()) - } - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Generation = 0 - } else { - x.Generation = int64(r.DecodeInt(64)) - } - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg1_unversioned.Time{} - } else { - yyv222 := &x.CreationTimestamp - yym223 := z.DecBinary() - _ = yym223 - if false { - } else if z.HasExtensions() && z.DecExt(yyv222) { - } else if yym223 { - z.DecBinaryUnmarshal(yyv222) - } else if !yym223 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv222) - } else { - z.DecFallback(yyv222, false) - } - } - if x.ObjectMeta.DeletionTimestamp == nil { - x.ObjectMeta.DeletionTimestamp = new(pkg1_unversioned.Time) - } - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeletionTimestamp != nil { - x.DeletionTimestamp = nil - } - } else { - if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg1_unversioned.Time) - } - yym225 := z.DecBinary() - _ = yym225 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym225 { - z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym225 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.DeletionTimestamp) - } else { - z.DecFallback(x.DeletionTimestamp, false) - } - } - if x.ObjectMeta.DeletionGracePeriodSeconds == nil { - x.ObjectMeta.DeletionGracePeriodSeconds = new(int64) - } - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeletionGracePeriodSeconds != nil { - x.DeletionGracePeriodSeconds = nil - } - } else { - if x.DeletionGracePeriodSeconds == nil { - x.DeletionGracePeriodSeconds = new(int64) - } - yym227 := z.DecBinary() - _ = yym227 - if false { - } else { - *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Labels = nil - } else { - yyv228 := &x.Labels - yym229 := z.DecBinary() - _ = yym229 - if false { - } else { - z.F.DecMapStringStringX(yyv228, false, d) - } - } - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv230 := &x.Annotations - yym231 := z.DecBinary() - _ = yym231 - if false { - } else { - z.F.DecMapStringStringX(yyv230, false, d) - } - } - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OwnerReferences = nil - } else { - yyv232 := &x.OwnerReferences - yym233 := z.DecBinary() - _ = yym233 - if false { - } else { - h.decSliceapi_OwnerReference((*[]pkg2_api.OwnerReference)(yyv232), d) - } - } - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv234 := &x.Finalizers - yym235 := z.DecBinary() - _ = yym235 - if false { - } else { - z.F.DecSliceStringX(yyv234, false, d) - } - } - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterName = "" - } else { - x.ClusterName = string(r.DecodeString()) - } - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = SelfSubjectAccessReviewSpec{} - } else { - yyv237 := &x.Spec - yyv237.CodecDecodeSelf(d) - } - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv238 := &x.Status - yyv238.CodecDecodeSelf(d) - } - for { - yyj212++ - if yyhl212 { - yyb212 = yyj212 > l - } else { - yyb212 = r.CheckBreak() - } - if yyb212 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj212-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LocalSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym239 := z.EncBinary() - _ = yym239 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep240 := !z.EncBinary() - yy2arr240 := z.EncBasicHandle().StructToArray - var yyq240 [19]bool - _, _, _ = yysep240, yyq240, yy2arr240 - const yyr240 bool = false - yyq240[0] = x.Kind != "" - yyq240[1] = x.APIVersion != "" - yyq240[2] = x.Name != "" - yyq240[3] = x.GenerateName != "" - yyq240[4] = x.Namespace != "" - yyq240[5] = x.SelfLink != "" - yyq240[6] = x.UID != "" - yyq240[7] = x.ResourceVersion != "" - yyq240[8] = x.Generation != 0 - yyq240[9] = true - yyq240[10] = x.ObjectMeta.DeletionTimestamp != nil && x.DeletionTimestamp != nil - yyq240[11] = x.ObjectMeta.DeletionGracePeriodSeconds != nil && x.DeletionGracePeriodSeconds != nil - yyq240[12] = len(x.Labels) != 0 - yyq240[13] = len(x.Annotations) != 0 - yyq240[14] = len(x.OwnerReferences) != 0 - yyq240[15] = len(x.Finalizers) != 0 - yyq240[16] = x.ClusterName != "" - var yynn240 int - if yyr240 || yy2arr240 { - r.EncodeArrayStart(19) - } else { - yynn240 = 2 - for _, b := range yyq240 { - if b { - yynn240++ - } - } - r.EncodeMapStart(yynn240) - yynn240 = 0 - } - if yyr240 || yy2arr240 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq240[0] { - yym242 := z.EncBinary() - _ = yym242 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq240[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym243 := z.EncBinary() - _ = yym243 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr240 || yy2arr240 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq240[1] { - yym245 := z.EncBinary() - _ = yym245 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq240[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym246 := z.EncBinary() - _ = yym246 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr240 || yy2arr240 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq240[2] { - yym248 := z.EncBinary() - _ = yym248 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq240[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym249 := z.EncBinary() - _ = yym249 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr240 || yy2arr240 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq240[3] { - yym251 := z.EncBinary() - _ = yym251 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq240[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("generateName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym252 := z.EncBinary() - _ = yym252 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) - } - } - } - if yyr240 || yy2arr240 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq240[4] { - yym254 := z.EncBinary() - _ = yym254 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq240[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym255 := z.EncBinary() - _ = yym255 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr240 || yy2arr240 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq240[5] { - yym257 := z.EncBinary() - _ = yym257 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq240[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selfLink")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym258 := z.EncBinary() - _ = yym258 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) - } - } - } - if yyr240 || yy2arr240 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq240[6] { - yym260 := z.EncBinary() - _ = yym260 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq240[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym261 := z.EncBinary() - _ = yym261 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - } - if yyr240 || yy2arr240 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq240[7] { - yym263 := z.EncBinary() - _ = yym263 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq240[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym264 := z.EncBinary() - _ = yym264 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } - } - if yyr240 || yy2arr240 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq240[8] { - yym266 := z.EncBinary() - _ = yym266 - if false { - } else { - r.EncodeInt(int64(x.Generation)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq240[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("generation")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym267 := z.EncBinary() - _ = yym267 - if false { - } else { - r.EncodeInt(int64(x.Generation)) - } - } - } - if yyr240 || yy2arr240 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq240[9] { - yy269 := &x.CreationTimestamp - yym270 := z.EncBinary() - _ = yym270 - if false { - } else if z.HasExtensions() && z.EncExt(yy269) { - } else if yym270 { - z.EncBinaryMarshal(yy269) - } else if !yym270 && z.IsJSONHandle() { - z.EncJSONMarshal(yy269) - } else { - z.EncFallback(yy269) - } - } else { - r.EncodeNil() - } - } else { - if yyq240[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("creationTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy271 := &x.CreationTimestamp - yym272 := z.EncBinary() - _ = yym272 - if false { - } else if z.HasExtensions() && z.EncExt(yy271) { - } else if yym272 { - z.EncBinaryMarshal(yy271) - } else if !yym272 && z.IsJSONHandle() { - z.EncJSONMarshal(yy271) - } else { - z.EncFallback(yy271) - } - } - } - var yyn273 bool - if x.ObjectMeta.DeletionTimestamp == nil { - yyn273 = true - goto LABEL273 - } - LABEL273: - if yyr240 || yy2arr240 { - if yyn273 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq240[10] { - if x.DeletionTimestamp == nil { - r.EncodeNil() - } else { - yym274 := z.EncBinary() - _ = yym274 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { - } else if yym274 { - z.EncBinaryMarshal(x.DeletionTimestamp) - } else if !yym274 && z.IsJSONHandle() { - z.EncJSONMarshal(x.DeletionTimestamp) - } else { - z.EncFallback(x.DeletionTimestamp) - } - } - } else { - r.EncodeNil() - } - } - } else { - if yyq240[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletionTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn273 { - r.EncodeNil() - } else { - if x.DeletionTimestamp == nil { - r.EncodeNil() - } else { - yym275 := z.EncBinary() - _ = yym275 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { - } else if yym275 { - z.EncBinaryMarshal(x.DeletionTimestamp) - } else if !yym275 && z.IsJSONHandle() { - z.EncJSONMarshal(x.DeletionTimestamp) - } else { - z.EncFallback(x.DeletionTimestamp) - } - } - } - } - } - var yyn276 bool - if x.ObjectMeta.DeletionGracePeriodSeconds == nil { - yyn276 = true - goto LABEL276 - } - LABEL276: - if yyr240 || yy2arr240 { - if yyn276 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq240[11] { - if x.DeletionGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy277 := *x.DeletionGracePeriodSeconds - yym278 := z.EncBinary() - _ = yym278 - if false { - } else { - r.EncodeInt(int64(yy277)) - } - } - } else { - r.EncodeNil() - } - } - } else { - if yyq240[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletionGracePeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn276 { - r.EncodeNil() - } else { - if x.DeletionGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy279 := *x.DeletionGracePeriodSeconds - yym280 := z.EncBinary() - _ = yym280 - if false { - } else { - r.EncodeInt(int64(yy279)) - } - } - } - } - } - if yyr240 || yy2arr240 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq240[12] { - if x.Labels == nil { - r.EncodeNil() - } else { - yym282 := z.EncBinary() - _ = yym282 - if false { - } else { - z.F.EncMapStringStringV(x.Labels, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq240[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("labels")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Labels == nil { - r.EncodeNil() - } else { - yym283 := z.EncBinary() - _ = yym283 - if false { - } else { - z.F.EncMapStringStringV(x.Labels, false, e) - } - } - } - } - if yyr240 || yy2arr240 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq240[13] { - if x.Annotations == nil { - r.EncodeNil() - } else { - yym285 := z.EncBinary() - _ = yym285 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq240[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("annotations")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Annotations == nil { - r.EncodeNil() - } else { - yym286 := z.EncBinary() - _ = yym286 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } - } - if yyr240 || yy2arr240 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq240[14] { - if x.OwnerReferences == nil { - r.EncodeNil() - } else { - yym288 := z.EncBinary() - _ = yym288 - if false { - } else { - h.encSliceapi_OwnerReference(([]pkg2_api.OwnerReference)(x.OwnerReferences), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq240[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ownerReferences")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.OwnerReferences == nil { - r.EncodeNil() - } else { - yym289 := z.EncBinary() - _ = yym289 - if false { - } else { - h.encSliceapi_OwnerReference(([]pkg2_api.OwnerReference)(x.OwnerReferences), e) - } - } - } - } - if yyr240 || yy2arr240 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq240[15] { - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym291 := z.EncBinary() - _ = yym291 - if false { - } else { - z.F.EncSliceStringV(x.Finalizers, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq240[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("finalizers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym292 := z.EncBinary() - _ = yym292 - if false { - } else { - z.F.EncSliceStringV(x.Finalizers, false, e) - } - } - } - } - if yyr240 || yy2arr240 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq240[16] { - yym294 := z.EncBinary() - _ = yym294 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq240[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym295 := z.EncBinary() - _ = yym295 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) - } - } - } - if yyr240 || yy2arr240 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy297 := &x.Spec - yy297.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy298 := &x.Spec - yy298.CodecEncodeSelf(e) - } - if yyr240 || yy2arr240 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy300 := &x.Status - yy300.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy301 := &x.Status - yy301.CodecEncodeSelf(e) - } - if yyr240 || yy2arr240 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LocalSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym302 := z.DecBinary() - _ = yym302 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct303 := r.ContainerType() - if yyct303 == codecSelferValueTypeMap1234 { - yyl303 := r.ReadMapStart() - if yyl303 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl303, d) - } - } else if yyct303 == codecSelferValueTypeArray1234 { - yyl303 := r.ReadArrayStart() - if yyl303 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl303, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LocalSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys304Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys304Slc - var yyhl304 bool = l >= 0 - for yyj304 := 0; ; yyj304++ { - if yyhl304 { - if yyj304 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys304Slc = r.DecodeBytes(yys304Slc, true, true) - yys304 := string(yys304Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys304 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "generateName": - if r.TryDecodeAsNil() { - x.GenerateName = "" - } else { - x.GenerateName = string(r.DecodeString()) - } - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - case "selfLink": - if r.TryDecodeAsNil() { - x.SelfLink = "" - } else { - x.SelfLink = string(r.DecodeString()) - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg3_types.UID(r.DecodeString()) - } - case "resourceVersion": - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - case "generation": - if r.TryDecodeAsNil() { - x.Generation = 0 - } else { - x.Generation = int64(r.DecodeInt(64)) - } - case "creationTimestamp": - if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg1_unversioned.Time{} - } else { - yyv314 := &x.CreationTimestamp - yym315 := z.DecBinary() - _ = yym315 - if false { - } else if z.HasExtensions() && z.DecExt(yyv314) { - } else if yym315 { - z.DecBinaryUnmarshal(yyv314) - } else if !yym315 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv314) - } else { - z.DecFallback(yyv314, false) - } - } - case "deletionTimestamp": - if x.ObjectMeta.DeletionTimestamp == nil { - x.ObjectMeta.DeletionTimestamp = new(pkg1_unversioned.Time) - } - if r.TryDecodeAsNil() { - if x.DeletionTimestamp != nil { - x.DeletionTimestamp = nil - } - } else { - if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg1_unversioned.Time) - } - yym317 := z.DecBinary() - _ = yym317 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym317 { - z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym317 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.DeletionTimestamp) - } else { - z.DecFallback(x.DeletionTimestamp, false) - } - } - case "deletionGracePeriodSeconds": - if x.ObjectMeta.DeletionGracePeriodSeconds == nil { - x.ObjectMeta.DeletionGracePeriodSeconds = new(int64) - } - if r.TryDecodeAsNil() { - if x.DeletionGracePeriodSeconds != nil { - x.DeletionGracePeriodSeconds = nil - } - } else { - if x.DeletionGracePeriodSeconds == nil { - x.DeletionGracePeriodSeconds = new(int64) - } - yym319 := z.DecBinary() - _ = yym319 - if false { - } else { - *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - case "labels": - if r.TryDecodeAsNil() { - x.Labels = nil - } else { - yyv320 := &x.Labels - yym321 := z.DecBinary() - _ = yym321 - if false { - } else { - z.F.DecMapStringStringX(yyv320, false, d) - } - } - case "annotations": - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv322 := &x.Annotations - yym323 := z.DecBinary() - _ = yym323 - if false { - } else { - z.F.DecMapStringStringX(yyv322, false, d) - } - } - case "ownerReferences": - if r.TryDecodeAsNil() { - x.OwnerReferences = nil - } else { - yyv324 := &x.OwnerReferences - yym325 := z.DecBinary() - _ = yym325 - if false { - } else { - h.decSliceapi_OwnerReference((*[]pkg2_api.OwnerReference)(yyv324), d) - } - } - case "finalizers": - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv326 := &x.Finalizers - yym327 := z.DecBinary() - _ = yym327 - if false { - } else { - z.F.DecSliceStringX(yyv326, false, d) - } - } - case "clusterName": - if r.TryDecodeAsNil() { - x.ClusterName = "" - } else { - x.ClusterName = string(r.DecodeString()) - } - case "Spec": - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv329 := &x.Spec - yyv329.CodecDecodeSelf(d) - } - case "Status": - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv330 := &x.Status - yyv330.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys304) - } // end switch yys304 - } // end for yyj304 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LocalSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj331 int - var yyb331 bool - var yyhl331 bool = l >= 0 - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.GenerateName = "" - } else { - x.GenerateName = string(r.DecodeString()) - } - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SelfLink = "" - } else { - x.SelfLink = string(r.DecodeString()) - } - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg3_types.UID(r.DecodeString()) - } - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Generation = 0 - } else { - x.Generation = int64(r.DecodeInt(64)) - } - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg1_unversioned.Time{} - } else { - yyv341 := &x.CreationTimestamp - yym342 := z.DecBinary() - _ = yym342 - if false { - } else if z.HasExtensions() && z.DecExt(yyv341) { - } else if yym342 { - z.DecBinaryUnmarshal(yyv341) - } else if !yym342 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv341) - } else { - z.DecFallback(yyv341, false) - } - } - if x.ObjectMeta.DeletionTimestamp == nil { - x.ObjectMeta.DeletionTimestamp = new(pkg1_unversioned.Time) - } - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeletionTimestamp != nil { - x.DeletionTimestamp = nil - } - } else { - if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg1_unversioned.Time) - } - yym344 := z.DecBinary() - _ = yym344 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym344 { - z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym344 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.DeletionTimestamp) - } else { - z.DecFallback(x.DeletionTimestamp, false) - } - } - if x.ObjectMeta.DeletionGracePeriodSeconds == nil { - x.ObjectMeta.DeletionGracePeriodSeconds = new(int64) - } - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeletionGracePeriodSeconds != nil { - x.DeletionGracePeriodSeconds = nil - } - } else { - if x.DeletionGracePeriodSeconds == nil { - x.DeletionGracePeriodSeconds = new(int64) - } - yym346 := z.DecBinary() - _ = yym346 - if false { - } else { - *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Labels = nil - } else { - yyv347 := &x.Labels - yym348 := z.DecBinary() - _ = yym348 - if false { - } else { - z.F.DecMapStringStringX(yyv347, false, d) - } - } - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv349 := &x.Annotations - yym350 := z.DecBinary() - _ = yym350 - if false { - } else { - z.F.DecMapStringStringX(yyv349, false, d) - } - } - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OwnerReferences = nil - } else { - yyv351 := &x.OwnerReferences - yym352 := z.DecBinary() - _ = yym352 - if false { - } else { - h.decSliceapi_OwnerReference((*[]pkg2_api.OwnerReference)(yyv351), d) - } - } - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv353 := &x.Finalizers - yym354 := z.DecBinary() - _ = yym354 - if false { - } else { - z.F.DecSliceStringX(yyv353, false, d) - } - } - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterName = "" - } else { - x.ClusterName = string(r.DecodeString()) - } - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv356 := &x.Spec - yyv356.CodecDecodeSelf(d) - } - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv357 := &x.Status - yyv357.CodecDecodeSelf(d) - } - for { - yyj331++ - if yyhl331 { - yyb331 = yyj331 > l - } else { - yyb331 = r.CheckBreak() - } - if yyb331 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj331-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym358 := z.EncBinary() - _ = yym358 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep359 := !z.EncBinary() - yy2arr359 := z.EncBasicHandle().StructToArray - var yyq359 [7]bool - _, _, _ = yysep359, yyq359, yy2arr359 - const yyr359 bool = false - var yynn359 int - if yyr359 || yy2arr359 { - r.EncodeArrayStart(7) - } else { - yynn359 = 7 - for _, b := range yyq359 { - if b { - yynn359++ - } - } - r.EncodeMapStart(yynn359) - yynn359 = 0 - } - if yyr359 || yy2arr359 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym361 := z.EncBinary() - _ = yym361 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym362 := z.EncBinary() - _ = yym362 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - if yyr359 || yy2arr359 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym364 := z.EncBinary() - _ = yym364 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Verb")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym365 := z.EncBinary() - _ = yym365 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } - if yyr359 || yy2arr359 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym367 := z.EncBinary() - _ = yym367 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Group)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Group")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym368 := z.EncBinary() - _ = yym368 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Group)) - } - } - if yyr359 || yy2arr359 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym370 := z.EncBinary() - _ = yym370 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Version)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Version")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym371 := z.EncBinary() - _ = yym371 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Version)) - } - } - if yyr359 || yy2arr359 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym373 := z.EncBinary() - _ = yym373 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Resource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym374 := z.EncBinary() - _ = yym374 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) - } - } - if yyr359 || yy2arr359 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym376 := z.EncBinary() - _ = yym376 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Subresource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym377 := z.EncBinary() - _ = yym377 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) - } - } - if yyr359 || yy2arr359 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym379 := z.EncBinary() - _ = yym379 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym380 := z.EncBinary() - _ = yym380 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr359 || yy2arr359 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym381 := z.DecBinary() - _ = yym381 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct382 := r.ContainerType() - if yyct382 == codecSelferValueTypeMap1234 { - yyl382 := r.ReadMapStart() - if yyl382 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl382, d) - } - } else if yyct382 == codecSelferValueTypeArray1234 { - yyl382 := r.ReadArrayStart() - if yyl382 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl382, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys383Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys383Slc - var yyhl383 bool = l >= 0 - for yyj383 := 0; ; yyj383++ { - if yyhl383 { - if yyj383 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys383Slc = r.DecodeBytes(yys383Slc, true, true) - yys383 := string(yys383Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys383 { - case "Namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - case "Verb": - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - x.Verb = string(r.DecodeString()) - } - case "Group": - if r.TryDecodeAsNil() { - x.Group = "" - } else { - x.Group = string(r.DecodeString()) - } - case "Version": - if r.TryDecodeAsNil() { - x.Version = "" - } else { - x.Version = string(r.DecodeString()) - } - case "Resource": - if r.TryDecodeAsNil() { - x.Resource = "" - } else { - x.Resource = string(r.DecodeString()) - } - case "Subresource": - if r.TryDecodeAsNil() { - x.Subresource = "" - } else { - x.Subresource = string(r.DecodeString()) - } - case "Name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys383) - } // end switch yys383 - } // end for yyj383 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj391 int - var yyb391 bool - var yyhl391 bool = l >= 0 - yyj391++ - if yyhl391 { - yyb391 = yyj391 > l - } else { - yyb391 = r.CheckBreak() - } - if yyb391 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - yyj391++ - if yyhl391 { - yyb391 = yyj391 > l - } else { - yyb391 = r.CheckBreak() - } - if yyb391 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - x.Verb = string(r.DecodeString()) - } - yyj391++ - if yyhl391 { - yyb391 = yyj391 > l - } else { - yyb391 = r.CheckBreak() - } - if yyb391 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Group = "" - } else { - x.Group = string(r.DecodeString()) - } - yyj391++ - if yyhl391 { - yyb391 = yyj391 > l - } else { - yyb391 = r.CheckBreak() - } - if yyb391 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Version = "" - } else { - x.Version = string(r.DecodeString()) - } - yyj391++ - if yyhl391 { - yyb391 = yyj391 > l - } else { - yyb391 = r.CheckBreak() - } - if yyb391 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resource = "" - } else { - x.Resource = string(r.DecodeString()) - } - yyj391++ - if yyhl391 { - yyb391 = yyj391 > l - } else { - yyb391 = r.CheckBreak() - } - if yyb391 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subresource = "" - } else { - x.Subresource = string(r.DecodeString()) - } - yyj391++ - if yyhl391 { - yyb391 = yyj391 > l - } else { - yyb391 = r.CheckBreak() - } - if yyb391 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - for { - yyj391++ - if yyhl391 { - yyb391 = yyj391 > l - } else { - yyb391 = r.CheckBreak() - } - if yyb391 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj391-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NonResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym399 := z.EncBinary() - _ = yym399 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep400 := !z.EncBinary() - yy2arr400 := z.EncBasicHandle().StructToArray - var yyq400 [2]bool - _, _, _ = yysep400, yyq400, yy2arr400 - const yyr400 bool = false - var yynn400 int - if yyr400 || yy2arr400 { - r.EncodeArrayStart(2) - } else { - yynn400 = 2 - for _, b := range yyq400 { - if b { - yynn400++ - } - } - r.EncodeMapStart(yynn400) - yynn400 = 0 - } - if yyr400 || yy2arr400 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym402 := z.EncBinary() - _ = yym402 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym403 := z.EncBinary() - _ = yym403 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr400 || yy2arr400 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym405 := z.EncBinary() - _ = yym405 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Verb")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym406 := z.EncBinary() - _ = yym406 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } - if yyr400 || yy2arr400 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NonResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym407 := z.DecBinary() - _ = yym407 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct408 := r.ContainerType() - if yyct408 == codecSelferValueTypeMap1234 { - yyl408 := r.ReadMapStart() - if yyl408 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl408, d) - } - } else if yyct408 == codecSelferValueTypeArray1234 { - yyl408 := r.ReadArrayStart() - if yyl408 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl408, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NonResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys409Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys409Slc - var yyhl409 bool = l >= 0 - for yyj409 := 0; ; yyj409++ { - if yyhl409 { - if yyj409 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys409Slc = r.DecodeBytes(yys409Slc, true, true) - yys409 := string(yys409Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys409 { - case "Path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "Verb": - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - x.Verb = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys409) - } // end switch yys409 - } // end for yyj409 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NonResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj412 int - var yyb412 bool - var yyhl412 bool = l >= 0 - yyj412++ - if yyhl412 { - yyb412 = yyj412 > l - } else { - yyb412 = r.CheckBreak() - } - if yyb412 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj412++ - if yyhl412 { - yyb412 = yyj412 > l - } else { - yyb412 = r.CheckBreak() - } - if yyb412 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - x.Verb = string(r.DecodeString()) - } - for { - yyj412++ - if yyhl412 { - yyb412 = yyj412 > l - } else { - yyb412 = r.CheckBreak() - } - if yyb412 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj412-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym415 := z.EncBinary() - _ = yym415 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep416 := !z.EncBinary() - yy2arr416 := z.EncBasicHandle().StructToArray - var yyq416 [5]bool - _, _, _ = yysep416, yyq416, yy2arr416 - const yyr416 bool = false - var yynn416 int - if yyr416 || yy2arr416 { - r.EncodeArrayStart(5) - } else { - yynn416 = 5 - for _, b := range yyq416 { - if b { - yynn416++ - } - } - r.EncodeMapStart(yynn416) - yynn416 = 0 - } - if yyr416 || yy2arr416 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ResourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } - if yyr416 || yy2arr416 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("NonResourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } - if yyr416 || yy2arr416 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym420 := z.EncBinary() - _ = yym420 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("User")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym421 := z.EncBinary() - _ = yym421 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } - if yyr416 || yy2arr416 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Groups == nil { - r.EncodeNil() - } else { - yym423 := z.EncBinary() - _ = yym423 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Groups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Groups == nil { - r.EncodeNil() - } else { - yym424 := z.EncBinary() - _ = yym424 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } - if yyr416 || yy2arr416 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Extra == nil { - r.EncodeNil() - } else { - yym426 := z.EncBinary() - _ = yym426 - if false { - } else { - h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Extra")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Extra == nil { - r.EncodeNil() - } else { - yym427 := z.EncBinary() - _ = yym427 - if false { - } else { - h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) - } - } - } - if yyr416 || yy2arr416 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym428 := z.DecBinary() - _ = yym428 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct429 := r.ContainerType() - if yyct429 == codecSelferValueTypeMap1234 { - yyl429 := r.ReadMapStart() - if yyl429 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl429, d) - } - } else if yyct429 == codecSelferValueTypeArray1234 { - yyl429 := r.ReadArrayStart() - if yyl429 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl429, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys430Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys430Slc - var yyhl430 bool = l >= 0 - for yyj430 := 0; ; yyj430++ { - if yyhl430 { - if yyj430 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys430Slc = r.DecodeBytes(yys430Slc, true, true) - yys430 := string(yys430Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys430 { - case "ResourceAttributes": - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - case "NonResourceAttributes": - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - case "User": - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - case "Groups": - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv434 := &x.Groups - yym435 := z.DecBinary() - _ = yym435 - if false { - } else { - z.F.DecSliceStringX(yyv434, false, d) - } - } - case "Extra": - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv436 := &x.Extra - yym437 := z.DecBinary() - _ = yym437 - if false { - } else { - h.decMapstringExtraValue((*map[string]ExtraValue)(yyv436), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys430) - } // end switch yys430 - } // end for yyj430 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj438 int - var yyb438 bool - var yyhl438 bool = l >= 0 - yyj438++ - if yyhl438 { - yyb438 = yyj438 > l - } else { - yyb438 = r.CheckBreak() - } - if yyb438 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - yyj438++ - if yyhl438 { - yyb438 = yyj438 > l - } else { - yyb438 = r.CheckBreak() - } - if yyb438 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - yyj438++ - if yyhl438 { - yyb438 = yyj438 > l - } else { - yyb438 = r.CheckBreak() - } - if yyb438 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - yyj438++ - if yyhl438 { - yyb438 = yyj438 > l - } else { - yyb438 = r.CheckBreak() - } - if yyb438 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv442 := &x.Groups - yym443 := z.DecBinary() - _ = yym443 - if false { - } else { - z.F.DecSliceStringX(yyv442, false, d) - } - } - yyj438++ - if yyhl438 { - yyb438 = yyj438 > l - } else { - yyb438 = r.CheckBreak() - } - if yyb438 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv444 := &x.Extra - yym445 := z.DecBinary() - _ = yym445 - if false { - } else { - h.decMapstringExtraValue((*map[string]ExtraValue)(yyv444), d) - } - } - for { - yyj438++ - if yyhl438 { - yyb438 = yyj438 > l - } else { - yyb438 = r.CheckBreak() - } - if yyb438 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj438-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ExtraValue) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym446 := z.EncBinary() - _ = yym446 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - h.encExtraValue((ExtraValue)(x), e) - } - } -} - -func (x *ExtraValue) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym447 := z.DecBinary() - _ = yym447 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - h.decExtraValue((*ExtraValue)(x), d) - } -} - -func (x *SelfSubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym448 := z.EncBinary() - _ = yym448 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep449 := !z.EncBinary() - yy2arr449 := z.EncBasicHandle().StructToArray - var yyq449 [2]bool - _, _, _ = yysep449, yyq449, yy2arr449 - const yyr449 bool = false - var yynn449 int - if yyr449 || yy2arr449 { - r.EncodeArrayStart(2) - } else { - yynn449 = 2 - for _, b := range yyq449 { - if b { - yynn449++ - } - } - r.EncodeMapStart(yynn449) - yynn449 = 0 - } - if yyr449 || yy2arr449 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ResourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } - if yyr449 || yy2arr449 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("NonResourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } - if yyr449 || yy2arr449 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SelfSubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym452 := z.DecBinary() - _ = yym452 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct453 := r.ContainerType() - if yyct453 == codecSelferValueTypeMap1234 { - yyl453 := r.ReadMapStart() - if yyl453 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl453, d) - } - } else if yyct453 == codecSelferValueTypeArray1234 { - yyl453 := r.ReadArrayStart() - if yyl453 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl453, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys454Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys454Slc - var yyhl454 bool = l >= 0 - for yyj454 := 0; ; yyj454++ { - if yyhl454 { - if yyj454 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys454Slc = r.DecodeBytes(yys454Slc, true, true) - yys454 := string(yys454Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys454 { - case "ResourceAttributes": - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - case "NonResourceAttributes": - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys454) - } // end switch yys454 - } // end for yyj454 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj457 int - var yyb457 bool - var yyhl457 bool = l >= 0 - yyj457++ - if yyhl457 { - yyb457 = yyj457 > l - } else { - yyb457 = r.CheckBreak() - } - if yyb457 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - yyj457++ - if yyhl457 { - yyb457 = yyj457 > l - } else { - yyb457 = r.CheckBreak() - } - if yyb457 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - for { - yyj457++ - if yyhl457 { - yyb457 = yyj457 > l - } else { - yyb457 = r.CheckBreak() - } - if yyb457 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj457-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SubjectAccessReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym460 := z.EncBinary() - _ = yym460 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep461 := !z.EncBinary() - yy2arr461 := z.EncBasicHandle().StructToArray - var yyq461 [3]bool - _, _, _ = yysep461, yyq461, yy2arr461 - const yyr461 bool = false - var yynn461 int - if yyr461 || yy2arr461 { - r.EncodeArrayStart(3) - } else { - yynn461 = 3 - for _, b := range yyq461 { - if b { - yynn461++ - } - } - r.EncodeMapStart(yynn461) - yynn461 = 0 - } - if yyr461 || yy2arr461 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym463 := z.EncBinary() - _ = yym463 - if false { - } else { - r.EncodeBool(bool(x.Allowed)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Allowed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym464 := z.EncBinary() - _ = yym464 - if false { - } else { - r.EncodeBool(bool(x.Allowed)) - } - } - if yyr461 || yy2arr461 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym466 := z.EncBinary() - _ = yym466 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym467 := z.EncBinary() - _ = yym467 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - if yyr461 || yy2arr461 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym469 := z.EncBinary() - _ = yym469 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvaluationError)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("EvaluationError")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym470 := z.EncBinary() - _ = yym470 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvaluationError)) - } - } - if yyr461 || yy2arr461 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SubjectAccessReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym471 := z.DecBinary() - _ = yym471 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct472 := r.ContainerType() - if yyct472 == codecSelferValueTypeMap1234 { - yyl472 := r.ReadMapStart() - if yyl472 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl472, d) - } - } else if yyct472 == codecSelferValueTypeArray1234 { - yyl472 := r.ReadArrayStart() - if yyl472 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl472, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SubjectAccessReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys473Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys473Slc - var yyhl473 bool = l >= 0 - for yyj473 := 0; ; yyj473++ { - if yyhl473 { - if yyj473 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys473Slc = r.DecodeBytes(yys473Slc, true, true) - yys473 := string(yys473Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys473 { - case "Allowed": - if r.TryDecodeAsNil() { - x.Allowed = false - } else { - x.Allowed = bool(r.DecodeBool()) - } - case "Reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "EvaluationError": - if r.TryDecodeAsNil() { - x.EvaluationError = "" - } else { - x.EvaluationError = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys473) - } // end switch yys473 - } // end for yyj473 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SubjectAccessReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj477 int - var yyb477 bool - var yyhl477 bool = l >= 0 - yyj477++ - if yyhl477 { - yyb477 = yyj477 > l - } else { - yyb477 = r.CheckBreak() - } - if yyb477 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Allowed = false - } else { - x.Allowed = bool(r.DecodeBool()) - } - yyj477++ - if yyhl477 { - yyb477 = yyj477 > l - } else { - yyb477 = r.CheckBreak() - } - if yyb477 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj477++ - if yyhl477 { - yyb477 = yyj477 > l - } else { - yyb477 = r.CheckBreak() - } - if yyb477 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EvaluationError = "" - } else { - x.EvaluationError = string(r.DecodeString()) - } - for { - yyj477++ - if yyhl477 { - yyb477 = yyj477 > l - } else { - yyb477 = r.CheckBreak() - } - if yyb477 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj477-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceapi_OwnerReference(v []pkg2_api.OwnerReference, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv481 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy482 := &yyv481 - yy482.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceapi_OwnerReference(v *[]pkg2_api.OwnerReference, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv483 := *v - yyh483, yyl483 := z.DecSliceHelperStart() - var yyc483 bool - if yyl483 == 0 { - if yyv483 == nil { - yyv483 = []pkg2_api.OwnerReference{} - yyc483 = true - } else if len(yyv483) != 0 { - yyv483 = yyv483[:0] - yyc483 = true - } - } else if yyl483 > 0 { - var yyrr483, yyrl483 int - var yyrt483 bool - if yyl483 > cap(yyv483) { - - yyrg483 := len(yyv483) > 0 - yyv2483 := yyv483 - yyrl483, yyrt483 = z.DecInferLen(yyl483, z.DecBasicHandle().MaxInitLen, 72) - if yyrt483 { - if yyrl483 <= cap(yyv483) { - yyv483 = yyv483[:yyrl483] - } else { - yyv483 = make([]pkg2_api.OwnerReference, yyrl483) - } - } else { - yyv483 = make([]pkg2_api.OwnerReference, yyrl483) - } - yyc483 = true - yyrr483 = len(yyv483) - if yyrg483 { - copy(yyv483, yyv2483) - } - } else if yyl483 != len(yyv483) { - yyv483 = yyv483[:yyl483] - yyc483 = true - } - yyj483 := 0 - for ; yyj483 < yyrr483; yyj483++ { - yyh483.ElemContainerState(yyj483) - if r.TryDecodeAsNil() { - yyv483[yyj483] = pkg2_api.OwnerReference{} - } else { - yyv484 := &yyv483[yyj483] - yyv484.CodecDecodeSelf(d) - } - - } - if yyrt483 { - for ; yyj483 < yyl483; yyj483++ { - yyv483 = append(yyv483, pkg2_api.OwnerReference{}) - yyh483.ElemContainerState(yyj483) - if r.TryDecodeAsNil() { - yyv483[yyj483] = pkg2_api.OwnerReference{} - } else { - yyv485 := &yyv483[yyj483] - yyv485.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj483 := 0 - for ; !r.CheckBreak(); yyj483++ { - - if yyj483 >= len(yyv483) { - yyv483 = append(yyv483, pkg2_api.OwnerReference{}) // var yyz483 pkg2_api.OwnerReference - yyc483 = true - } - yyh483.ElemContainerState(yyj483) - if yyj483 < len(yyv483) { - if r.TryDecodeAsNil() { - yyv483[yyj483] = pkg2_api.OwnerReference{} - } else { - yyv486 := &yyv483[yyj483] - yyv486.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj483 < len(yyv483) { - yyv483 = yyv483[:yyj483] - yyc483 = true - } else if yyj483 == 0 && yyv483 == nil { - yyv483 = []pkg2_api.OwnerReference{} - yyc483 = true - } - } - yyh483.End() - if yyc483 { - *v = yyv483 - } -} - -func (x codecSelfer1234) encMapstringExtraValue(v map[string]ExtraValue, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk487, yyv487 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym488 := z.EncBinary() - _ = yym488 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk487)) - } - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyv487 == nil { - r.EncodeNil() - } else { - yyv487.CodecEncodeSelf(e) - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decMapstringExtraValue(v *map[string]ExtraValue, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv489 := *v - yyl489 := r.ReadMapStart() - yybh489 := z.DecBasicHandle() - if yyv489 == nil { - yyrl489, _ := z.DecInferLen(yyl489, yybh489.MaxInitLen, 40) - yyv489 = make(map[string]ExtraValue, yyrl489) - *v = yyv489 - } - var yymk489 string - var yymv489 ExtraValue - var yymg489 bool - if yybh489.MapValueReset { - yymg489 = true - } - if yyl489 > 0 { - for yyj489 := 0; yyj489 < yyl489; yyj489++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk489 = "" - } else { - yymk489 = string(r.DecodeString()) - } - - if yymg489 { - yymv489 = yyv489[yymk489] - } else { - yymv489 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv489 = nil - } else { - yyv491 := &yymv489 - yyv491.CodecDecodeSelf(d) - } - - if yyv489 != nil { - yyv489[yymk489] = yymv489 - } - } - } else if yyl489 < 0 { - for yyj489 := 0; !r.CheckBreak(); yyj489++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk489 = "" - } else { - yymk489 = string(r.DecodeString()) - } - - if yymg489 { - yymv489 = yyv489[yymk489] - } else { - yymv489 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv489 = nil - } else { - yyv493 := &yymv489 - yyv493.CodecDecodeSelf(d) - } - - if yyv489 != nil { - yyv489[yymk489] = yymv489 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encExtraValue(v ExtraValue, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv494 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym495 := z.EncBinary() - _ = yym495 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyv494)) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv496 := *v - yyh496, yyl496 := z.DecSliceHelperStart() - var yyc496 bool - if yyl496 == 0 { - if yyv496 == nil { - yyv496 = []string{} - yyc496 = true - } else if len(yyv496) != 0 { - yyv496 = yyv496[:0] - yyc496 = true - } - } else if yyl496 > 0 { - var yyrr496, yyrl496 int - var yyrt496 bool - if yyl496 > cap(yyv496) { - - yyrl496, yyrt496 = z.DecInferLen(yyl496, z.DecBasicHandle().MaxInitLen, 16) - if yyrt496 { - if yyrl496 <= cap(yyv496) { - yyv496 = yyv496[:yyrl496] - } else { - yyv496 = make([]string, yyrl496) - } - } else { - yyv496 = make([]string, yyrl496) - } - yyc496 = true - yyrr496 = len(yyv496) - } else if yyl496 != len(yyv496) { - yyv496 = yyv496[:yyl496] - yyc496 = true - } - yyj496 := 0 - for ; yyj496 < yyrr496; yyj496++ { - yyh496.ElemContainerState(yyj496) - if r.TryDecodeAsNil() { - yyv496[yyj496] = "" - } else { - yyv496[yyj496] = string(r.DecodeString()) - } - - } - if yyrt496 { - for ; yyj496 < yyl496; yyj496++ { - yyv496 = append(yyv496, "") - yyh496.ElemContainerState(yyj496) - if r.TryDecodeAsNil() { - yyv496[yyj496] = "" - } else { - yyv496[yyj496] = string(r.DecodeString()) - } - - } - } - - } else { - yyj496 := 0 - for ; !r.CheckBreak(); yyj496++ { - - if yyj496 >= len(yyv496) { - yyv496 = append(yyv496, "") // var yyz496 string - yyc496 = true - } - yyh496.ElemContainerState(yyj496) - if yyj496 < len(yyv496) { - if r.TryDecodeAsNil() { - yyv496[yyj496] = "" - } else { - yyv496[yyj496] = string(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj496 < len(yyv496) { - yyv496 = yyv496[:yyj496] - yyc496 = true - } else if yyj496 == 0 && yyv496 == nil { - yyv496 = []string{} - yyc496 = true - } - } - yyh496.End() - if yyc496 { - *v = yyv496 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go index 545d1fa82a..d8ccfaf358 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go @@ -17,8 +17,7 @@ limitations under the License. package authorization import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // +genclient=true @@ -28,8 +27,8 @@ import ( // SubjectAccessReview checks whether or not a user or group can perform an action. Not filling in a // spec.namespace means "in all namespaces". type SubjectAccessReview struct { - unversioned.TypeMeta - api.ObjectMeta + metav1.TypeMeta + metav1.ObjectMeta // Spec holds information about the request being evaluated Spec SubjectAccessReviewSpec @@ -46,8 +45,8 @@ type SubjectAccessReview struct { // spec.namespace means "in all namespaces". Self is a special case, because users should always be able // to check whether they can perform an action type SelfSubjectAccessReview struct { - unversioned.TypeMeta - api.ObjectMeta + metav1.TypeMeta + metav1.ObjectMeta // Spec holds information about the request being evaluated. Spec SelfSubjectAccessReviewSpec @@ -63,8 +62,8 @@ type SelfSubjectAccessReview struct { // Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions // checking. type LocalSubjectAccessReview struct { - unversioned.TypeMeta - api.ObjectMeta + metav1.TypeMeta + metav1.ObjectMeta // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace // you made the request against. If empty, it is defaulted. diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/BUILD new file mode 100644 index 0000000000..4ad249d585 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/BUILD @@ -0,0 +1,50 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "conversion.go", + "defaults.go", + "doc.go", + "generated.pb.go", + "register.go", + "types.generated.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.conversion.go", + "zz_generated.deepcopy.go", + "zz_generated.defaults.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/authorization:go_default_library", + "//vendor:github.com/gogo/protobuf/proto", + "//vendor:github.com/gogo/protobuf/sortkeys", + "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/types", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/conversion.go new file mode 100644 index 0000000000..2ff5732d6d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/conversion.go @@ -0,0 +1,26 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + return scheme.AddConversionFuncs() +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/defaults.go new file mode 100644 index 0000000000..d63d917543 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/defaults.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return scheme.AddDefaultingFuncs() +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/doc.go new file mode 100644 index 0000000000..e68328bc6c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/authorization +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=authorization.k8s.io +package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/generated.pb.go new file mode 100644 index 0000000000..cb2d2c61ac --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/generated.pb.go @@ -0,0 +1,2344 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/authorization/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/authorization/v1/generated.proto + + It has these top-level messages: + ExtraValue + LocalSubjectAccessReview + NonResourceAttributes + ResourceAttributes + SelfSubjectAccessReview + SelfSubjectAccessReviewSpec + SubjectAccessReview + SubjectAccessReviewSpec + SubjectAccessReviewStatus +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } +func (*LocalSubjectAccessReview) ProtoMessage() {} +func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } +func (*NonResourceAttributes) ProtoMessage() {} +func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } +func (*SelfSubjectAccessReview) ProtoMessage() {} +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } +func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} +func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{5} +} + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } +func (*SubjectAccessReviewSpec) ProtoMessage() {} +func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } +func (*SubjectAccessReviewStatus) ProtoMessage() {} +func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{8} +} + +func init() { + proto.RegisterType((*ExtraValue)(nil), "k8s.io.kubernetes.pkg.apis.authorization.v1.ExtraValue") + proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.kubernetes.pkg.apis.authorization.v1.LocalSubjectAccessReview") + proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.kubernetes.pkg.apis.authorization.v1.NonResourceAttributes") + proto.RegisterType((*ResourceAttributes)(nil), "k8s.io.kubernetes.pkg.apis.authorization.v1.ResourceAttributes") + proto.RegisterType((*SelfSubjectAccessReview)(nil), "k8s.io.kubernetes.pkg.apis.authorization.v1.SelfSubjectAccessReview") + proto.RegisterType((*SelfSubjectAccessReviewSpec)(nil), "k8s.io.kubernetes.pkg.apis.authorization.v1.SelfSubjectAccessReviewSpec") + proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.kubernetes.pkg.apis.authorization.v1.SubjectAccessReview") + proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.kubernetes.pkg.apis.authorization.v1.SubjectAccessReviewSpec") + proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.kubernetes.pkg.apis.authorization.v1.SubjectAccessReviewStatus") +} +func (m ExtraValue) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m ExtraValue) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *LocalSubjectAccessReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LocalSubjectAccessReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *NonResourceAttributes) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NonResourceAttributes) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Verb))) + i += copy(data[i:], m.Verb) + return i, nil +} + +func (m *ResourceAttributes) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceAttributes) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Verb))) + i += copy(data[i:], m.Verb) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Version))) + i += copy(data[i:], m.Version) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) + i += copy(data[i:], m.Resource) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Subresource))) + i += copy(data[i:], m.Subresource) + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + return i, nil +} + +func (m *SelfSubjectAccessReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SelfSubjectAccessReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n6, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + return i, nil +} + +func (m *SelfSubjectAccessReviewSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SelfSubjectAccessReviewSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ResourceAttributes != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ResourceAttributes.Size())) + n7, err := m.ResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.NonResourceAttributes != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NonResourceAttributes.Size())) + n8, err := m.NonResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *SubjectAccessReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SubjectAccessReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n10, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n11, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + return i, nil +} + +func (m *SubjectAccessReviewSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SubjectAccessReviewSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ResourceAttributes != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ResourceAttributes.Size())) + n12, err := m.ResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.NonResourceAttributes != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NonResourceAttributes.Size())) + n13, err := m.NonResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.User))) + i += copy(data[i:], m.User) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Extra) > 0 { + for k := range m.Extra { + data[i] = 0x2a + i++ + v := m.Extra[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n14, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + } + } + return i, nil +} + +func (m *SubjectAccessReviewStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SubjectAccessReviewStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Allowed { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.EvaluationError))) + i += copy(data[i:], m.EvaluationError) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m ExtraValue) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LocalSubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourceAttributes) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceAttributes) Size() (n int) { + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subresource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectAccessReviewSpec) Size() (n int) { + var l int + _ = l + if m.ResourceAttributes != nil { + l = m.ResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NonResourceAttributes != nil { + l = m.NonResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectAccessReviewSpec) Size() (n int) { + var l int + _ = l + if m.ResourceAttributes != nil { + l = m.ResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NonResourceAttributes != nil { + l = m.NonResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SubjectAccessReviewStatus) Size() (n int) { + var l int + _ = l + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LocalSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalSubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourceAttributes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourceAttributes{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAttributes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceAttributes{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectAccessReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, + `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewSpec) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&SubjectAccessReviewSpec{`, + `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReviewStatus{`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExtraValue) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalSubjectAccessReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourceAttributes) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAttributes) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectAccessReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectAccessReviewSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAttributes == nil { + m.ResourceAttributes = &ResourceAttributes{} + } + if err := m.ResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAttributes == nil { + m.ResourceAttributes = &ResourceAttributes{} + } + if err := m.ResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 904 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xfa, 0x5f, 0xec, 0x09, 0x90, 0x32, 0x55, 0xc9, 0x36, 0x48, 0xb6, 0x65, 0x10, 0x0a, + 0xa2, 0xec, 0x92, 0xf2, 0xa7, 0x55, 0x39, 0xa0, 0xac, 0x08, 0x7f, 0x24, 0xda, 0xa2, 0x89, 0xc8, + 0x01, 0x2e, 0x8c, 0x37, 0x2f, 0xf6, 0xd6, 0xde, 0x9d, 0x65, 0x66, 0x76, 0xdb, 0x70, 0xea, 0x8d, + 0x2b, 0x12, 0x17, 0x8e, 0x7c, 0x05, 0x3e, 0x00, 0x9c, 0x73, 0xa3, 0x07, 0x24, 0x38, 0x20, 0x8b, + 0x2c, 0x17, 0x3e, 0x06, 0x9a, 0xd9, 0x89, 0x37, 0xc6, 0x6b, 0x2a, 0x43, 0x25, 0x7a, 0xe8, 0x6d, + 0xe7, 0xbd, 0xdf, 0xef, 0xbd, 0xdf, 0xbc, 0x79, 0xb3, 0x6f, 0xd0, 0xdb, 0xe3, 0xeb, 0xc2, 0x09, + 0x98, 0x3b, 0x4e, 0x06, 0xc0, 0x23, 0x90, 0x20, 0xdc, 0x78, 0x3c, 0x74, 0x69, 0x1c, 0x08, 0x97, + 0x26, 0x72, 0xc4, 0x78, 0xf0, 0x25, 0x95, 0x01, 0x8b, 0xdc, 0x74, 0xc7, 0x1d, 0x42, 0x04, 0x9c, + 0x4a, 0x38, 0x74, 0x62, 0xce, 0x24, 0xc3, 0xaf, 0xe4, 0x64, 0xa7, 0x20, 0x3b, 0xf1, 0x78, 0xe8, + 0x28, 0xb2, 0x33, 0x47, 0x76, 0xd2, 0x9d, 0xad, 0x57, 0x87, 0x81, 0x1c, 0x25, 0x03, 0xc7, 0x67, + 0xa1, 0x3b, 0x64, 0x43, 0xe6, 0xea, 0x18, 0x83, 0xe4, 0x48, 0xaf, 0xf4, 0x42, 0x7f, 0xe5, 0xb1, + 0xb7, 0xde, 0x30, 0xc2, 0x68, 0x1c, 0x84, 0xd4, 0x1f, 0x05, 0x11, 0xf0, 0xe3, 0x42, 0x5a, 0x08, + 0x92, 0x96, 0x28, 0xda, 0x72, 0x97, 0xb1, 0x78, 0x12, 0xc9, 0x20, 0x84, 0x05, 0xc2, 0x5b, 0x0f, + 0x23, 0x08, 0x7f, 0x04, 0x21, 0x5d, 0xe0, 0xbd, 0xbe, 0x8c, 0x97, 0xc8, 0x60, 0xe2, 0x06, 0x91, + 0x14, 0x92, 0x2f, 0x90, 0xce, 0xed, 0x49, 0x00, 0x4f, 0x81, 0x17, 0x1b, 0x82, 0x7b, 0x34, 0x8c, + 0x27, 0x50, 0xb6, 0xa7, 0x2b, 0x4b, 0x8f, 0xa8, 0x04, 0xdd, 0xbf, 0x86, 0xd0, 0xde, 0x3d, 0xc9, + 0xe9, 0x01, 0x9d, 0x24, 0x80, 0xbb, 0xa8, 0x11, 0x48, 0x08, 0x85, 0x6d, 0xf5, 0x6a, 0xdb, 0x6d, + 0xaf, 0x9d, 0x4d, 0xbb, 0x8d, 0x0f, 0x95, 0x81, 0xe4, 0xf6, 0x1b, 0xad, 0x6f, 0xbf, 0xeb, 0x56, + 0xee, 0xff, 0xd6, 0xab, 0xf4, 0x7f, 0xae, 0x22, 0xfb, 0x23, 0xe6, 0xd3, 0xc9, 0x7e, 0x32, 0xb8, + 0x03, 0xbe, 0xdc, 0xf5, 0x7d, 0x10, 0x82, 0x40, 0x1a, 0xc0, 0x5d, 0xfc, 0x39, 0x6a, 0xa9, 0x92, + 0x1f, 0x52, 0x49, 0x6d, 0xab, 0x67, 0x6d, 0xaf, 0x5f, 0x7d, 0xcd, 0x31, 0x87, 0x7f, 0xbe, 0x02, + 0xc5, 0xf1, 0x2b, 0xb4, 0x93, 0xee, 0x38, 0xb7, 0x75, 0xac, 0x9b, 0x20, 0xa9, 0x87, 0x4f, 0xa6, + 0xdd, 0x4a, 0x36, 0xed, 0xa2, 0xc2, 0x46, 0x66, 0x51, 0xf1, 0x11, 0xaa, 0x8b, 0x18, 0x7c, 0xbb, + 0xaa, 0xa3, 0xbf, 0xeb, 0xac, 0xd0, 0x5a, 0x4e, 0x89, 0xe2, 0xfd, 0x18, 0x7c, 0xef, 0x29, 0x93, + 0xb1, 0xae, 0x56, 0x44, 0xc7, 0xc7, 0x11, 0x6a, 0x0a, 0x49, 0x65, 0x22, 0xec, 0x9a, 0xce, 0xf4, + 0xde, 0x7f, 0xce, 0xa4, 0xa3, 0x79, 0xcf, 0x98, 0x5c, 0xcd, 0x7c, 0x4d, 0x4c, 0x96, 0xfe, 0x67, + 0xe8, 0xd2, 0x2d, 0x16, 0x11, 0x10, 0x2c, 0xe1, 0x3e, 0xec, 0x4a, 0xc9, 0x83, 0x41, 0x22, 0x41, + 0xe0, 0x1e, 0xaa, 0xc7, 0x54, 0x8e, 0x74, 0x39, 0xdb, 0x85, 0xd4, 0x8f, 0xa9, 0x1c, 0x11, 0xed, + 0x51, 0x88, 0x14, 0xf8, 0x40, 0x97, 0xe4, 0x1c, 0xe2, 0x00, 0xf8, 0x80, 0x68, 0x4f, 0xff, 0xc7, + 0x2a, 0xc2, 0x25, 0xa1, 0x5d, 0xd4, 0x8e, 0x68, 0x08, 0x22, 0xa6, 0x3e, 0x98, 0xf8, 0xcf, 0x1a, + 0x76, 0xfb, 0xd6, 0x99, 0x83, 0x14, 0x98, 0x87, 0x67, 0xc2, 0x2f, 0xa0, 0xc6, 0x90, 0xb3, 0x24, + 0xd6, 0x55, 0x6b, 0x7b, 0x4f, 0x1b, 0x48, 0xe3, 0x7d, 0x65, 0x24, 0xb9, 0x0f, 0xbf, 0x8c, 0xd6, + 0x52, 0xe0, 0x22, 0x60, 0x91, 0x5d, 0xd7, 0xb0, 0x0d, 0x03, 0x5b, 0x3b, 0xc8, 0xcd, 0xe4, 0xcc, + 0x8f, 0xaf, 0xa0, 0x16, 0x37, 0xc2, 0xed, 0x86, 0xc6, 0x5e, 0x30, 0xd8, 0xd6, 0xd9, 0x86, 0xc8, + 0x0c, 0x81, 0xdf, 0x44, 0xeb, 0x22, 0x19, 0xcc, 0x08, 0x4d, 0x4d, 0xb8, 0x68, 0x08, 0xeb, 0xfb, + 0x85, 0x8b, 0x9c, 0xc7, 0xa9, 0x6d, 0xa9, 0x3d, 0xda, 0x6b, 0xf3, 0xdb, 0x52, 0x25, 0x20, 0xda, + 0xd3, 0xff, 0xa5, 0x8a, 0x36, 0xf7, 0x61, 0x72, 0xf4, 0xff, 0xf4, 0xfc, 0x9d, 0xb9, 0x9e, 0xff, + 0x60, 0xb5, 0x4e, 0x2c, 0x57, 0xfd, 0xd8, 0xf4, 0xfd, 0x0f, 0x55, 0xf4, 0xfc, 0x3f, 0x68, 0xc4, + 0x5f, 0x59, 0x08, 0xf3, 0x85, 0xd6, 0x35, 0x85, 0x7e, 0x67, 0x25, 0x71, 0x8b, 0x37, 0xc0, 0x7b, + 0x2e, 0x9b, 0x76, 0x4b, 0x6e, 0x06, 0x29, 0x49, 0x89, 0xbf, 0xb1, 0xd0, 0xa5, 0xa8, 0xec, 0x8a, + 0x9a, 0x73, 0xf1, 0x56, 0x12, 0x53, 0x7a, 0xd9, 0xbd, 0xcb, 0xd9, 0xb4, 0x5b, 0xfe, 0x1f, 0x20, + 0xe5, 0xb9, 0xfb, 0x3f, 0x55, 0xd1, 0xc5, 0x27, 0x7f, 0xe2, 0x47, 0xd9, 0x91, 0x7f, 0xd6, 0xd1, + 0xe6, 0x93, 0x6e, 0xfc, 0x57, 0xdd, 0x38, 0x1b, 0x10, 0xb5, 0xf9, 0x3f, 0xe9, 0x27, 0x02, 0xb8, + 0x19, 0x10, 0x7d, 0xd4, 0xd4, 0x43, 0x40, 0xd8, 0x75, 0xfd, 0xd4, 0x40, 0xea, 0x04, 0xf4, 0x74, + 0x10, 0xc4, 0x78, 0xb0, 0x44, 0x0d, 0x50, 0x6f, 0x13, 0xbb, 0xd1, 0xab, 0x6d, 0xaf, 0x5f, 0xbd, + 0xfd, 0x28, 0x5a, 0xcb, 0xd1, 0xaf, 0x9d, 0xbd, 0x48, 0xf2, 0xe3, 0x62, 0x2a, 0x69, 0x1b, 0xc9, + 0x93, 0x6d, 0x7d, 0x61, 0x5e, 0x44, 0x1a, 0x83, 0x2f, 0xa0, 0xda, 0x18, 0x8e, 0xf3, 0xa9, 0x48, + 0xd4, 0x27, 0xbe, 0x89, 0x1a, 0xa9, 0x7a, 0x2c, 0x99, 0x02, 0x5f, 0x5b, 0x49, 0x55, 0xf1, 0xd6, + 0x22, 0x79, 0x94, 0x1b, 0xd5, 0xeb, 0x56, 0xff, 0x7b, 0x0b, 0x5d, 0x5e, 0xda, 0xa0, 0x6a, 0x4c, + 0xd2, 0xc9, 0x84, 0xdd, 0x85, 0x43, 0x2d, 0xa3, 0x55, 0x8c, 0xc9, 0xdd, 0xdc, 0x4c, 0xce, 0xfc, + 0xf8, 0x25, 0xd4, 0xe4, 0x40, 0x05, 0x8b, 0xcc, 0x68, 0x9e, 0xf5, 0x36, 0xd1, 0x56, 0x62, 0xbc, + 0x78, 0x17, 0x6d, 0x80, 0x4a, 0xaf, 0x75, 0xed, 0x71, 0xce, 0xb8, 0x39, 0xaa, 0x4d, 0x43, 0xd8, + 0xd8, 0x9b, 0x77, 0x93, 0xbf, 0xe3, 0xbd, 0x17, 0x4f, 0x4e, 0x3b, 0x95, 0x07, 0xa7, 0x9d, 0xca, + 0xaf, 0xa7, 0x9d, 0xca, 0xfd, 0xac, 0x63, 0x9d, 0x64, 0x1d, 0xeb, 0x41, 0xd6, 0xb1, 0x7e, 0xcf, + 0x3a, 0xd6, 0xd7, 0x7f, 0x74, 0x2a, 0x9f, 0x56, 0xd3, 0x9d, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, + 0x8a, 0xc4, 0x1f, 0xd5, 0x30, 0x0c, 0x00, 0x00, +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/generated.proto new file mode 100644 index 0000000000..7036b8eb33 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/generated.proto @@ -0,0 +1,185 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.authorization.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. +// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions +// checking. +message LocalSubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace + // you made the request against. If empty, it is defaulted. + optional SubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface +message NonResourceAttributes { + // Path is the URL path of the request + // +optional + optional string path = 1; + + // Verb is the standard HTTP verb + // +optional + optional string verb = 2; +} + +// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +message ResourceAttributes { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // "" (empty) is defaulted for LocalSubjectAccessReviews + // "" (empty) is empty for cluster-scoped resources + // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + // +optional + optional string namespace = 1; + + // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +optional + optional string verb = 2; + + // Group is the API Group of the Resource. "*" means all. + // +optional + optional string group = 3; + + // Version is the API Version of the Resource. "*" means all. + // +optional + optional string version = 4; + + // Resource is one of the existing resource types. "*" means all. + // +optional + optional string resource = 5; + + // Subresource is one of the existing resource types. "" means none. + // +optional + optional string subresource = 6; + + // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + // +optional + optional string name = 7; +} + +// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a +// spec.namespace means "in all namespaces". Self is a special case, because users should always be able +// to check whether they can perform an action +message SelfSubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated. user and groups must be empty + optional SelfSubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +message SelfSubjectAccessReviewSpec { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + optional ResourceAttributes resourceAttributes = 1; + + // NonResourceAttributes describes information for a non-resource access request + // +optional + optional NonResourceAttributes nonResourceAttributes = 2; +} + +// SubjectAccessReview checks whether or not a user or group can perform an action. +message SubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated + optional SubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +message SubjectAccessReviewSpec { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + optional ResourceAttributes resourceAttributes = 1; + + // NonResourceAttributes describes information for a non-resource access request + // +optional + optional NonResourceAttributes nonResourceAttributes = 2; + + // User is the user you're testing for. + // If you specify "User" but not "Groups", then is it interpreted as "What if User were not a member of any groups + // +optional + optional string verb = 3; + + // Groups is the groups you're testing for. + // +optional + repeated string groups = 4; + + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + // +optional + map extra = 5; +} + +// SubjectAccessReviewStatus +message SubjectAccessReviewStatus { + // Allowed is required. True if the action would be allowed, false otherwise. + optional bool allowed = 1; + + // Reason is optional. It indicates why a request was allowed or denied. + // +optional + optional string reason = 2; + + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. + // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. + // +optional + optional string evaluationError = 3; +} + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/register.go new file mode 100644 index 0000000000..909bc0a7db --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectAccessReview{}, + &SubjectAccessReview{}, + &LocalSubjectAccessReview{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +func (obj *LocalSubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *SubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *SelfSubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/types.generated.go new file mode 100644 index 0000000000..2528afa8e8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/types.generated.go @@ -0,0 +1,3233 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 + } +} + +func (x *SubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SelfSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SelfSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SelfSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = SelfSubjectAccessReviewSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SelfSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = SelfSubjectAccessReviewSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LocalSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LocalSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LocalSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LocalSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Namespace != "" + yyq2[1] = x.Verb != "" + yyq2[2] = x.Group != "" + yyq2[3] = x.Version != "" + yyq2[4] = x.Resource != "" + yyq2[5] = x.Subresource != "" + yyq2[6] = x.Name != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("verb")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Group)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("group")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Group)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Version)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("version")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Version)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subresource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv4 := &x.Namespace + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "verb": + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv6 := &x.Verb + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "group": + if r.TryDecodeAsNil() { + x.Group = "" + } else { + yyv8 := &x.Group + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "version": + if r.TryDecodeAsNil() { + x.Version = "" + } else { + yyv10 := &x.Version + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "resource": + if r.TryDecodeAsNil() { + x.Resource = "" + } else { + yyv12 := &x.Resource + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "subresource": + if r.TryDecodeAsNil() { + x.Subresource = "" + } else { + yyv14 := &x.Subresource + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv16 := &x.Name + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv19 := &x.Namespace + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv21 := &x.Verb + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Group = "" + } else { + yyv23 := &x.Group + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Version = "" + } else { + yyv25 := &x.Version + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resource = "" + } else { + yyv27 := &x.Resource + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subresource = "" + } else { + yyv29 := &x.Subresource + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv31 := &x.Name + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NonResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Path != "" + yyq2[1] = x.Verb != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("verb")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NonResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NonResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "verb": + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv6 := &x.Verb + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NonResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv9 := &x.Path + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv11 := &x.Verb + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ResourceAttributes != nil + yyq2[1] = x.NonResourceAttributes != nil + yyq2[2] = x.User != "" + yyq2[3] = len(x.Groups) != 0 + yyq2[4] = len(x.Extra) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Groups == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("groups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Groups == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Extra == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("extra")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Extra == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "resourceAttributes": + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + case "nonResourceAttributes": + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + case "user": + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv6 := &x.User + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "groups": + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv8 := &x.Groups + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceStringX(yyv8, false, d) + } + } + case "extra": + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv10 := &x.Extra + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv15 := &x.User + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv17 := &x.Groups + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + z.F.DecSliceStringX(yyv17, false, d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv19 := &x.Extra + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ExtraValue) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encExtraValue((ExtraValue)(x), e) + } + } +} + +func (x *ExtraValue) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decExtraValue((*ExtraValue)(x), d) + } +} + +func (x *SelfSubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ResourceAttributes != nil + yyq2[1] = x.NonResourceAttributes != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SelfSubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "resourceAttributes": + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + case "nonResourceAttributes": + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SubjectAccessReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Reason != "" + yyq2[2] = x.EvaluationError != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.Allowed)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("allowed")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.Allowed)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EvaluationError)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("evaluationError")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EvaluationError)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SubjectAccessReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SubjectAccessReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "allowed": + if r.TryDecodeAsNil() { + x.Allowed = false + } else { + yyv4 := &x.Allowed + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv6 := &x.Reason + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "evaluationError": + if r.TryDecodeAsNil() { + x.EvaluationError = "" + } else { + yyv8 := &x.EvaluationError + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SubjectAccessReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Allowed = false + } else { + yyv11 := &x.Allowed + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv13 := &x.Reason + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EvaluationError = "" + } else { + yyv15 := &x.EvaluationError + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encMapstringExtraValue(v map[string]ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decMapstringExtraValue(v *map[string]ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string]ExtraValue, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 ExtraValue + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv4 := &yymv1 + yyv4.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv5 := &yymk1 + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv7 := &yymv1 + yyv7.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encExtraValue(v ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyv1)) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]string, yyrl1) + } + } else { + yyv1 = make([]string, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 string + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/types.go new file mode 100644 index 0000000000..38c314ffcf --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/types.go @@ -0,0 +1,176 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// SubjectAccessReview checks whether or not a user or group can perform an action. +type SubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated + Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a +// spec.namespace means "in all namespaces". Self is a special case, because users should always be able +// to check whether they can perform an action +type SelfSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated. user and groups must be empty + Spec SelfSubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient=true +// +noMethods=true + +// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. +// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions +// checking. +type LocalSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace + // you made the request against. If empty, it is defaulted. + Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +type ResourceAttributes struct { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // "" (empty) is defaulted for LocalSubjectAccessReviews + // "" (empty) is empty for cluster-scoped resources + // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` + // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +optional + Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"` + // Group is the API Group of the Resource. "*" means all. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` + // Version is the API Version of the Resource. "*" means all. + // +optional + Version string `json:"version,omitempty" protobuf:"bytes,4,opt,name=version"` + // Resource is one of the existing resource types. "*" means all. + // +optional + Resource string `json:"resource,omitempty" protobuf:"bytes,5,opt,name=resource"` + // Subresource is one of the existing resource types. "" means none. + // +optional + Subresource string `json:"subresource,omitempty" protobuf:"bytes,6,opt,name=subresource"` + // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"` +} + +// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface +type NonResourceAttributes struct { + // Path is the URL path of the request + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` + // Verb is the standard HTTP verb + // +optional + Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"` +} + +// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +type SubjectAccessReviewSpec struct { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"` + // NonResourceAttributes describes information for a non-resource access request + // +optional + NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"` + + // User is the user you're testing for. + // If you specify "User" but not "Groups", then is it interpreted as "What if User were not a member of any groups + // +optional + User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=verb"` + // Groups is the groups you're testing for. + // +optional + Groups []string `json:"groups,omitempty" protobuf:"bytes,4,rep,name=groups"` + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +type SelfSubjectAccessReviewSpec struct { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"` + // NonResourceAttributes describes information for a non-resource access request + // +optional + NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"` +} + +// SubjectAccessReviewStatus +type SubjectAccessReviewStatus struct { + // Allowed is required. True if the action would be allowed, false otherwise. + Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` + // Reason is optional. It indicates why a request was allowed or denied. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. + // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. + // +optional + EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,3,opt,name=evaluationError"` +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..33c0035b48 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/types_swagger_doc_generated.go @@ -0,0 +1,119 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_LocalSubjectAccessReview = map[string]string{ + "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", + "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { + return map_LocalSubjectAccessReview +} + +var map_NonResourceAttributes = map[string]string{ + "": "NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface", + "path": "Path is the URL path of the request", + "verb": "Verb is the standard HTTP verb", +} + +func (NonResourceAttributes) SwaggerDoc() map[string]string { + return map_NonResourceAttributes +} + +var map_ResourceAttributes = map[string]string{ + "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", + "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", + "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "group": "Group is the API Group of the Resource. \"*\" means all.", + "version": "Version is the API Version of the Resource. \"*\" means all.", + "resource": "Resource is one of the existing resource types. \"*\" means all.", + "subresource": "Subresource is one of the existing resource types. \"\" means none.", + "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", +} + +func (ResourceAttributes) SwaggerDoc() map[string]string { + return map_ResourceAttributes +} + +var map_SelfSubjectAccessReview = map[string]string{ + "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action", + "spec": "Spec holds information about the request being evaluated. user and groups must be empty", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (SelfSubjectAccessReview) SwaggerDoc() map[string]string { + return map_SelfSubjectAccessReview +} + +var map_SelfSubjectAccessReviewSpec = map[string]string{ + "": "SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", + "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", +} + +func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string { + return map_SelfSubjectAccessReviewSpec +} + +var map_SubjectAccessReview = map[string]string{ + "": "SubjectAccessReview checks whether or not a user or group can perform an action.", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (SubjectAccessReview) SwaggerDoc() map[string]string { + return map_SubjectAccessReview +} + +var map_SubjectAccessReviewSpec = map[string]string{ + "": "SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", + "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", + "user": "User is the user you're testing for. If you specify \"User\" but not \"Groups\", then is it interpreted as \"What if User were not a member of any groups", + "groups": "Groups is the groups you're testing for.", + "extra": "Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.", +} + +func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { + return map_SubjectAccessReviewSpec +} + +var map_SubjectAccessReviewStatus = map[string]string{ + "": "SubjectAccessReviewStatus", + "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "reason": "Reason is optional. It indicates why a request was allowed or denied.", + "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.", +} + +func (SubjectAccessReviewStatus) SwaggerDoc() map[string]string { + return map_SubjectAccessReviewStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/zz_generated.conversion.go new file mode 100644 index 0000000000..c81227ca2f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/zz_generated.conversion.go @@ -0,0 +1,263 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + authorization "k8s.io/kubernetes/pkg/apis/authorization" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview, + Convert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview, + Convert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes, + Convert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes, + Convert_v1_ResourceAttributes_To_authorization_ResourceAttributes, + Convert_authorization_ResourceAttributes_To_v1_ResourceAttributes, + Convert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview, + Convert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview, + Convert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec, + Convert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec, + Convert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview, + Convert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview, + Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec, + Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec, + Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus, + Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus, + ) +} + +func autoConvert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview(in, out, s) +} + +func autoConvert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { + out.Path = in.Path + out.Verb = in.Verb + return nil +} + +func Convert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { + return autoConvert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes(in, out, s) +} + +func autoConvert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { + out.Path = in.Path + out.Verb = in.Verb + return nil +} + +func Convert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { + return autoConvert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes(in, out, s) +} + +func autoConvert_v1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Verb = in.Verb + out.Group = in.Group + out.Version = in.Version + out.Resource = in.Resource + out.Subresource = in.Subresource + out.Name = in.Name + return nil +} + +func Convert_v1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { + return autoConvert_v1_ResourceAttributes_To_authorization_ResourceAttributes(in, out, s) +} + +func autoConvert_authorization_ResourceAttributes_To_v1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Verb = in.Verb + out.Group = in.Group + out.Version = in.Version + out.Resource = in.Resource + out.Subresource = in.Subresource + out.Name = in.Name + return nil +} + +func Convert_authorization_ResourceAttributes_To_v1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { + return autoConvert_authorization_ResourceAttributes_To_v1_ResourceAttributes(in, out, s) +} + +func autoConvert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview(in, out, s) +} + +func autoConvert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*authorization.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*authorization.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + return nil +} + +func Convert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + return nil +} + +func Convert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview(in, out, s) +} + +func autoConvert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*authorization.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*authorization.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + out.User = in.User + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]authorization.ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + out.User = in.User + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { + out.Allowed = in.Allowed + out.Reason = in.Reason + out.EvaluationError = in.EvaluationError + return nil +} + +func Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { + return autoConvert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { + out.Allowed = in.Allowed + out.Reason = in.Reason + out.EvaluationError = in.EvaluationError + return nil +} + +func Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..1f3199a358 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/zz_generated.deepcopy.go @@ -0,0 +1,179 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LocalSubjectAccessReview, InType: reflect.TypeOf(&LocalSubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NonResourceAttributes, InType: reflect.TypeOf(&NonResourceAttributes{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceAttributes, InType: reflect.TypeOf(&ResourceAttributes{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SelfSubjectAccessReview, InType: reflect.TypeOf(&SelfSubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SelfSubjectAccessReviewSpec, InType: reflect.TypeOf(&SelfSubjectAccessReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SubjectAccessReview, InType: reflect.TypeOf(&SubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SubjectAccessReviewSpec, InType: reflect.TypeOf(&SubjectAccessReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SubjectAccessReviewStatus, InType: reflect.TypeOf(&SubjectAccessReviewStatus{})}, + ) +} + +func DeepCopy_v1_LocalSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LocalSubjectAccessReview) + out := out.(*LocalSubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_NonResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NonResourceAttributes) + out := out.(*NonResourceAttributes) + *out = *in + return nil + } +} + +func DeepCopy_v1_ResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceAttributes) + out := out.(*ResourceAttributes) + *out = *in + return nil + } +} + +func DeepCopy_v1_SelfSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SelfSubjectAccessReview) + out := out.(*SelfSubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_SelfSubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SelfSubjectAccessReviewSpec) + out := out.(*SelfSubjectAccessReviewSpec) + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + **out = **in + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_SubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReview) + out := out.(*SubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_SubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReviewSpec) + out := out.(*SubjectAccessReviewSpec) + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + **out = **in + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + **out = **in + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } + } + return nil + } +} + +func DeepCopy_v1_SubjectAccessReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReviewStatus) + out := out.(*SubjectAccessReviewStatus) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/zz_generated.defaults.go new file mode 100644 index 0000000000..6df448eb9f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/BUILD index 328b31fe49..4ad249d585 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -23,18 +20,31 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.conversion.go", "zz_generated.deepcopy.go", + "zz_generated.defaults.go", ], tags = ["automanaged"], deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/apis/authorization:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//pkg/watch/versioned:go_default_library", "//vendor:github.com/gogo/protobuf/proto", "//vendor:github.com/gogo/protobuf/sortkeys", "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/types", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion.go index fa88791257..c401383656 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) func addConversionFuncs(scheme *runtime.Scheme) error { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/defaults.go index 57dd337dac..cb49b06ac2 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/defaults.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.pb.go index 062f999ac9..e35be352b7 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.pb.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -670,7 +670,7 @@ func (this *LocalSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&LocalSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -709,7 +709,7 @@ func (this *SelfSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -732,7 +732,7 @@ func (this *SubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2283,60 +2283,62 @@ var ( ) var fileDescriptorGenerated = []byte{ - // 874 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x56, 0x41, 0x8f, 0xdb, 0x44, - 0x14, 0x8e, 0x93, 0x78, 0x37, 0x99, 0x05, 0xb6, 0x4c, 0x55, 0xd6, 0x0d, 0x92, 0x13, 0x05, 0x09, - 0x6d, 0xa5, 0xd6, 0x66, 0x57, 0xaa, 0xa8, 0x2a, 0x0e, 0xac, 0xc5, 0xaa, 0xaa, 0xa0, 0x05, 0xcd, - 0xc2, 0x0a, 0xc1, 0x69, 0xec, 0x7d, 0x4d, 0x4c, 0x12, 0x8f, 0x35, 0x33, 0x76, 0x59, 0x4e, 0xfd, - 0x01, 0x1c, 0x38, 0xf6, 0xc8, 0x5f, 0xe0, 0x0f, 0x70, 0x65, 0x8f, 0xe5, 0x82, 0x40, 0x42, 0x11, - 0x6b, 0xfe, 0x05, 0x27, 0xe4, 0xf1, 0x24, 0x6e, 0x36, 0x0e, 0x28, 0xb0, 0x42, 0x1c, 0x7a, 0xf3, - 0xbc, 0xf7, 0xbd, 0xf7, 0xbe, 0x99, 0xf9, 0xc6, 0xef, 0xa1, 0x77, 0x47, 0x77, 0x84, 0x13, 0x32, - 0x77, 0x94, 0xf8, 0xc0, 0x23, 0x90, 0x20, 0xdc, 0x78, 0x34, 0x70, 0x69, 0x1c, 0x0a, 0x97, 0x26, - 0x72, 0xc8, 0x78, 0xf8, 0x15, 0x95, 0x21, 0x8b, 0xdc, 0x74, 0xcf, 0x07, 0x49, 0xf7, 0xdc, 0x01, - 0x44, 0xc0, 0xa9, 0x84, 0x13, 0x27, 0xe6, 0x4c, 0x32, 0xfc, 0x56, 0x91, 0xc1, 0x29, 0x33, 0x38, - 0xf1, 0x68, 0xe0, 0xe4, 0x19, 0x9c, 0x85, 0x0c, 0x8e, 0xce, 0xd0, 0xb9, 0x35, 0x08, 0xe5, 0x30, - 0xf1, 0x9d, 0x80, 0x4d, 0xdc, 0x01, 0x1b, 0x30, 0x57, 0x25, 0xf2, 0x93, 0x47, 0x6a, 0xa5, 0x16, - 0xea, 0xab, 0x28, 0xd0, 0xd9, 0x5f, 0x49, 0xd1, 0xe5, 0x20, 0x58, 0xc2, 0x03, 0xb8, 0x48, 0xaa, - 0x73, 0x7b, 0x75, 0x4c, 0x12, 0xa5, 0xc0, 0x45, 0xc8, 0x22, 0x38, 0x59, 0x0a, 0xbb, 0xb9, 0x3a, - 0x2c, 0x5d, 0xda, 0x79, 0xe7, 0x56, 0x35, 0x9a, 0x27, 0x91, 0x0c, 0x27, 0xcb, 0x9c, 0xf6, 0xaa, - 0xe1, 0x89, 0x0c, 0xc7, 0x6e, 0x18, 0x49, 0x21, 0xf9, 0xc5, 0x90, 0xfe, 0xdb, 0x08, 0x1d, 0x7e, - 0x29, 0x39, 0x3d, 0xa6, 0xe3, 0x04, 0x70, 0x17, 0x99, 0xa1, 0x84, 0x89, 0xb0, 0x8c, 0x5e, 0x63, - 0xb7, 0xed, 0xb5, 0xb3, 0x69, 0xd7, 0xbc, 0x9f, 0x1b, 0x48, 0x61, 0xbf, 0xdb, 0x7a, 0xfa, 0x6d, - 0xb7, 0xf6, 0xe4, 0xd7, 0x5e, 0xad, 0xff, 0x53, 0x1d, 0x59, 0x1f, 0xb0, 0x80, 0x8e, 0x8f, 0x12, - 0xff, 0x0b, 0x08, 0xe4, 0x41, 0x10, 0x80, 0x10, 0x04, 0xd2, 0x10, 0x1e, 0xe3, 0x4f, 0x51, 0x6b, - 0x02, 0x92, 0x9e, 0x50, 0x49, 0x2d, 0xa3, 0x67, 0xec, 0x6e, 0xed, 0xef, 0x3a, 0x2b, 0x2f, 0xd1, - 0x49, 0xf7, 0x9c, 0x0f, 0x55, 0x8e, 0x07, 0x20, 0xa9, 0x87, 0xcf, 0xa6, 0xdd, 0x5a, 0x36, 0xed, - 0xa2, 0xd2, 0x46, 0xe6, 0xd9, 0xf0, 0x08, 0x35, 0x45, 0x0c, 0x81, 0x55, 0x57, 0x59, 0xef, 0x3b, - 0xeb, 0x4a, 0xc3, 0xa9, 0xa0, 0x7b, 0x14, 0x43, 0xe0, 0xbd, 0xa4, 0xcb, 0x36, 0xf3, 0x15, 0x51, - 0x45, 0xb0, 0x40, 0x1b, 0x42, 0x52, 0x99, 0x08, 0xab, 0xa1, 0xca, 0xbd, 0x7f, 0x39, 0xe5, 0x54, - 0x4a, 0xef, 0x15, 0x5d, 0x70, 0xa3, 0x58, 0x13, 0x5d, 0xaa, 0xff, 0x39, 0xba, 0xf6, 0x90, 0x45, - 0x44, 0xeb, 0xee, 0x40, 0x4a, 0x1e, 0xfa, 0x89, 0x04, 0x81, 0x7b, 0xa8, 0x19, 0x53, 0x39, 0x54, - 0x07, 0xda, 0x2e, 0xf9, 0x7e, 0x44, 0xe5, 0x90, 0x28, 0x4f, 0x8e, 0x48, 0x81, 0xfb, 0xea, 0x70, - 0x9e, 0x43, 0x1c, 0x03, 0xf7, 0x89, 0xf2, 0xf4, 0xbf, 0xaf, 0x23, 0x5c, 0x91, 0xda, 0x45, 0xed, - 0x88, 0x4e, 0x40, 0xc4, 0x34, 0x00, 0x9d, 0xff, 0x55, 0x1d, 0xdd, 0x7e, 0x38, 0x73, 0x90, 0x12, - 0xf3, 0xf7, 0x95, 0xf0, 0x1b, 0xc8, 0x1c, 0x70, 0x96, 0xc4, 0xea, 0xe8, 0xda, 0xde, 0xcb, 0x1a, - 0x62, 0xde, 0xcb, 0x8d, 0xa4, 0xf0, 0xe1, 0x1b, 0x68, 0x53, 0x3f, 0x15, 0xab, 0xa9, 0x60, 0xdb, - 0x1a, 0xb6, 0x79, 0x5c, 0x98, 0xc9, 0xcc, 0x8f, 0x6f, 0xa2, 0xd6, 0xec, 0x2d, 0x5a, 0xa6, 0xc2, - 0x5e, 0xd1, 0xd8, 0xd6, 0x6c, 0x43, 0x64, 0x8e, 0xc0, 0xb7, 0xd1, 0x96, 0x48, 0xfc, 0x79, 0xc0, - 0x86, 0x0a, 0xb8, 0xaa, 0x03, 0xb6, 0x8e, 0x4a, 0x17, 0x79, 0x1e, 0x97, 0x6f, 0x2b, 0xdf, 0xa3, - 0xb5, 0xb9, 0xb8, 0xad, 0xfc, 0x08, 0x88, 0xf2, 0xf4, 0x7f, 0xa9, 0xa3, 0x9d, 0x23, 0x18, 0x3f, - 0xfa, 0x6f, 0x55, 0xcf, 0x16, 0x54, 0xff, 0xe0, 0x1f, 0xc8, 0xb0, 0x9a, 0xf2, 0xff, 0x4b, 0xf9, - 0x3f, 0xd4, 0xd1, 0xeb, 0x7f, 0x41, 0x14, 0x7f, 0x6d, 0x20, 0xcc, 0x97, 0xc4, 0xab, 0x8f, 0xfa, - 0xbd, 0xf5, 0x19, 0x2e, 0x3f, 0x04, 0xef, 0xb5, 0x6c, 0xda, 0xad, 0x78, 0x20, 0xa4, 0xa2, 0x2e, - 0x7e, 0x6a, 0xa0, 0x6b, 0x51, 0xd5, 0x4b, 0xd5, 0xd7, 0x74, 0x6f, 0x7d, 0x46, 0x95, 0x0f, 0xdf, - 0xbb, 0x9e, 0x4d, 0xbb, 0xd5, 0xff, 0x04, 0x52, 0x4d, 0xa0, 0xff, 0x63, 0x1d, 0x5d, 0x7d, 0xf1, - 0x5f, 0xbe, 0x5c, 0x75, 0xfe, 0xd1, 0x44, 0x3b, 0x2f, 0x94, 0xf9, 0x2f, 0x95, 0x39, 0x6f, 0x1c, - 0x8d, 0xc5, 0x3f, 0xec, 0x27, 0x02, 0xb8, 0x6e, 0x1c, 0xbd, 0x59, 0xe3, 0x68, 0xaa, 0x19, 0x04, - 0xe5, 0x57, 0xa1, 0x9a, 0x86, 0x98, 0x75, 0x8d, 0x53, 0x64, 0x42, 0x3e, 0xb3, 0x58, 0x66, 0xaf, - 0xb1, 0xbb, 0xb5, 0xff, 0xf1, 0xa5, 0x89, 0xcd, 0x51, 0xa3, 0xd0, 0x61, 0x24, 0xf9, 0x69, 0xd9, - 0xb0, 0x94, 0x8d, 0x14, 0x15, 0x3b, 0xa9, 0x1e, 0x97, 0x14, 0x06, 0x5f, 0x41, 0x8d, 0x11, 0x9c, - 0x16, 0x0d, 0x93, 0xe4, 0x9f, 0x98, 0x20, 0x33, 0xcd, 0x27, 0x29, 0x7d, 0xd0, 0xef, 0xac, 0x4f, - 0xad, 0x9c, 0xc6, 0x48, 0x91, 0xea, 0x6e, 0xfd, 0x8e, 0xd1, 0xff, 0xce, 0x40, 0xd7, 0x57, 0x4a, - 0x36, 0x6f, 0xa3, 0x74, 0x3c, 0x66, 0x8f, 0xe1, 0x44, 0x71, 0x69, 0x95, 0x6d, 0xf4, 0xa0, 0x30, - 0x93, 0x99, 0x1f, 0xbf, 0x89, 0x36, 0x38, 0x50, 0xc1, 0x22, 0xdd, 0xba, 0xe7, 0x6a, 0x27, 0xca, - 0x4a, 0xb4, 0x17, 0x1f, 0xa0, 0x6d, 0xc8, 0xcb, 0x2b, 0x72, 0x87, 0x9c, 0x33, 0xae, 0xaf, 0x6c, - 0x47, 0x07, 0x6c, 0x1f, 0x2e, 0xba, 0xc9, 0x45, 0xbc, 0x77, 0xe3, 0xec, 0xdc, 0xae, 0x3d, 0x3b, - 0xb7, 0x6b, 0x3f, 0x9f, 0xdb, 0xb5, 0x27, 0x99, 0x6d, 0x9c, 0x65, 0xb6, 0xf1, 0x2c, 0xb3, 0x8d, - 0xdf, 0x32, 0xdb, 0xf8, 0xe6, 0x77, 0xbb, 0xf6, 0xd9, 0xa6, 0xde, 0xf4, 0x9f, 0x01, 0x00, 0x00, - 0xff, 0xff, 0x0c, 0xa4, 0x8c, 0xef, 0x24, 0x0c, 0x00, 0x00, + // 904 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0xdc, 0x44, + 0x14, 0x5f, 0xef, 0x9f, 0x64, 0x77, 0x02, 0xa4, 0x4c, 0x55, 0xe2, 0x06, 0x69, 0x77, 0xb5, 0x48, + 0x28, 0x95, 0x8a, 0xdd, 0x94, 0x7f, 0x55, 0xc5, 0x81, 0x58, 0x44, 0x55, 0x05, 0x2d, 0x68, 0x02, + 0x39, 0xc0, 0x85, 0xb1, 0xf3, 0xba, 0x6b, 0x76, 0xed, 0xb1, 0x66, 0xc6, 0x6e, 0xc3, 0xa9, 0x1f, + 0x80, 0x03, 0xc7, 0x1e, 0xf9, 0x0a, 0x7c, 0x01, 0xae, 0xe4, 0xd8, 0x23, 0x48, 0x68, 0x45, 0xcc, + 0xb7, 0xe0, 0x84, 0x66, 0x3c, 0xbb, 0xce, 0xb2, 0x0e, 0xd5, 0x42, 0x11, 0x1c, 0x72, 0xb3, 0xdf, + 0xfb, 0xbd, 0xf7, 0x7e, 0xef, 0xcd, 0x9b, 0x79, 0x0f, 0xbd, 0x3f, 0xbe, 0x25, 0x9c, 0x90, 0xb9, + 0xe3, 0xd4, 0x07, 0x1e, 0x83, 0x04, 0xe1, 0x26, 0xe3, 0xa1, 0x4b, 0x93, 0x50, 0xb8, 0x34, 0x95, + 0x23, 0xc6, 0xc3, 0xaf, 0xa9, 0x0c, 0x59, 0xec, 0x66, 0xbb, 0x3e, 0x48, 0xba, 0xeb, 0x0e, 0x21, + 0x06, 0x4e, 0x25, 0x1c, 0x39, 0x09, 0x67, 0x92, 0xe1, 0x1b, 0x85, 0x07, 0xa7, 0xf4, 0xe0, 0x24, + 0xe3, 0xa1, 0xa3, 0x3c, 0x38, 0x0b, 0x1e, 0x1c, 0xe3, 0x61, 0xfb, 0x8d, 0x61, 0x28, 0x47, 0xa9, + 0xef, 0x04, 0x2c, 0x72, 0x87, 0x6c, 0xc8, 0x5c, 0xed, 0xc8, 0x4f, 0x1f, 0xe8, 0x3f, 0xfd, 0xa3, + 0xbf, 0x8a, 0x00, 0xdb, 0x6f, 0x19, 0x8a, 0x34, 0x09, 0x23, 0x1a, 0x8c, 0xc2, 0x18, 0xf8, 0x71, + 0x49, 0x32, 0x02, 0x49, 0xdd, 0x6c, 0x89, 0xd6, 0xb6, 0x7b, 0x9e, 0x15, 0x4f, 0x63, 0x19, 0x46, + 0xb0, 0x64, 0xf0, 0xce, 0xb3, 0x0c, 0x44, 0x30, 0x82, 0x88, 0x2e, 0xd9, 0xbd, 0x79, 0x9e, 0x5d, + 0x2a, 0xc3, 0x89, 0x1b, 0xc6, 0x52, 0x48, 0xbe, 0x64, 0x74, 0x26, 0x27, 0x01, 0x3c, 0x03, 0x5e, + 0x26, 0x04, 0x8f, 0x68, 0x94, 0x4c, 0xa0, 0x2a, 0xa7, 0xeb, 0xe7, 0x1e, 0x56, 0x05, 0x7a, 0xf0, + 0x2e, 0x42, 0xfb, 0x8f, 0x24, 0xa7, 0x87, 0x74, 0x92, 0x02, 0xee, 0xa1, 0x56, 0x28, 0x21, 0x12, + 0xb6, 0xd5, 0x6f, 0xec, 0x74, 0xbc, 0x4e, 0x3e, 0xed, 0xb5, 0xee, 0x2a, 0x01, 0x29, 0xe4, 0xb7, + 0xdb, 0x4f, 0xbe, 0xeb, 0xd5, 0x1e, 0xff, 0xd2, 0xaf, 0x0d, 0xa6, 0x75, 0x64, 0x7f, 0xc4, 0x02, + 0x3a, 0x39, 0x48, 0xfd, 0xaf, 0x20, 0x90, 0x7b, 0x41, 0x00, 0x42, 0x10, 0xc8, 0x42, 0x78, 0x88, + 0xbf, 0x44, 0x6d, 0x55, 0xf2, 0x23, 0x2a, 0xa9, 0x6d, 0xf5, 0xad, 0x9d, 0x8d, 0x9b, 0x37, 0x1c, + 0xd3, 0x01, 0x67, 0x2b, 0x50, 0xf6, 0x80, 0x42, 0x3b, 0xd9, 0xae, 0xf3, 0xb1, 0xf6, 0x75, 0x0f, + 0x24, 0xf5, 0xf0, 0xc9, 0xb4, 0x57, 0xcb, 0xa7, 0x3d, 0x54, 0xca, 0xc8, 0xdc, 0x2b, 0x1e, 0xa3, + 0xa6, 0x48, 0x20, 0xb0, 0xeb, 0xda, 0xfb, 0x5d, 0x67, 0xd5, 0xfe, 0x72, 0x2a, 0x68, 0x1f, 0x24, + 0x10, 0x78, 0x2f, 0x98, 0xb0, 0x4d, 0xf5, 0x47, 0x74, 0x10, 0x2c, 0xd0, 0x9a, 0x90, 0x54, 0xa6, + 0xc2, 0x6e, 0xe8, 0x70, 0x1f, 0x3e, 0x9f, 0x70, 0xda, 0xa5, 0xf7, 0x92, 0x09, 0xb8, 0x56, 0xfc, + 0x13, 0x13, 0x6a, 0xf0, 0x05, 0xba, 0x72, 0x9f, 0xc5, 0x04, 0x04, 0x4b, 0x79, 0x00, 0x7b, 0x52, + 0xf2, 0xd0, 0x4f, 0x25, 0x08, 0xdc, 0x47, 0xcd, 0x84, 0xca, 0x91, 0x2e, 0x6c, 0xa7, 0xe4, 0xfb, + 0x09, 0x95, 0x23, 0xa2, 0x35, 0x0a, 0x91, 0x01, 0xf7, 0x75, 0x71, 0xce, 0x20, 0x0e, 0x81, 0xfb, + 0x44, 0x6b, 0x06, 0x3f, 0xd4, 0x11, 0xae, 0x70, 0xed, 0xa2, 0x4e, 0x4c, 0x23, 0x10, 0x09, 0x0d, + 0xc0, 0xf8, 0x7f, 0xd9, 0x58, 0x77, 0xee, 0xcf, 0x14, 0xa4, 0xc4, 0x3c, 0x3b, 0x12, 0x7e, 0x0d, + 0xb5, 0x86, 0x9c, 0xa5, 0x89, 0x2e, 0x5d, 0xc7, 0x7b, 0xd1, 0x40, 0x5a, 0x77, 0x94, 0x90, 0x14, + 0x3a, 0x7c, 0x0d, 0xad, 0x67, 0xc0, 0x45, 0xc8, 0x62, 0xbb, 0xa9, 0x61, 0x9b, 0x06, 0xb6, 0x7e, + 0x58, 0x88, 0xc9, 0x4c, 0x8f, 0xaf, 0xa3, 0x36, 0x37, 0xc4, 0xed, 0x96, 0xc6, 0x5e, 0x32, 0xd8, + 0xf6, 0x2c, 0x21, 0x32, 0x47, 0xe0, 0xb7, 0xd1, 0x86, 0x48, 0xfd, 0xb9, 0xc1, 0x9a, 0x36, 0xb8, + 0x6c, 0x0c, 0x36, 0x0e, 0x4a, 0x15, 0x39, 0x8b, 0x53, 0x69, 0xa9, 0x1c, 0xed, 0xf5, 0xc5, 0xb4, + 0x54, 0x09, 0x88, 0xd6, 0x0c, 0x4e, 0xeb, 0x68, 0xeb, 0x00, 0x26, 0x0f, 0xfe, 0x9b, 0xee, 0x67, + 0x0b, 0xdd, 0x7f, 0xef, 0x6f, 0xb4, 0x63, 0x35, 0xf5, 0xff, 0xd7, 0x0d, 0xf8, 0xb1, 0x8e, 0x5e, + 0xfd, 0x0b, 0xa2, 0xf8, 0x1b, 0x0b, 0x61, 0xbe, 0xd4, 0xc4, 0xa6, 0xe4, 0x1f, 0xac, 0xce, 0x70, + 0xf9, 0x42, 0x78, 0xaf, 0xe4, 0xd3, 0x5e, 0xc5, 0x45, 0x21, 0x15, 0x71, 0xf1, 0x13, 0x0b, 0x5d, + 0x89, 0xab, 0x6e, 0xac, 0x39, 0xa6, 0x3b, 0xab, 0x33, 0xaa, 0x7c, 0x00, 0xbc, 0xab, 0xf9, 0xb4, + 0x57, 0xfd, 0x36, 0x90, 0x6a, 0x02, 0x83, 0x9f, 0xeb, 0xe8, 0xf2, 0xc5, 0x3b, 0xfd, 0xef, 0x74, + 0xe9, 0xef, 0x4d, 0xb4, 0x75, 0xd1, 0xa1, 0xff, 0xb0, 0x43, 0xe7, 0x83, 0xa4, 0xb1, 0xf8, 0xe2, + 0x7e, 0x26, 0x80, 0x9b, 0x41, 0xd2, 0x9f, 0x0d, 0x92, 0xa6, 0xde, 0x4d, 0x90, 0x3a, 0x0a, 0x3d, + 0x44, 0xc4, 0x6c, 0x8a, 0x1c, 0xa3, 0x16, 0xa8, 0x5d, 0xc6, 0x6e, 0xf5, 0x1b, 0x3b, 0x1b, 0x37, + 0x3f, 0x7d, 0x6e, 0xcd, 0xe6, 0xe8, 0x15, 0x69, 0x3f, 0x96, 0xfc, 0xb8, 0x1c, 0x60, 0x5a, 0x46, + 0x8a, 0x88, 0xdb, 0x99, 0x59, 0xa3, 0x34, 0x06, 0x5f, 0x42, 0x8d, 0x31, 0x1c, 0x17, 0x03, 0x94, + 0xa8, 0x4f, 0x4c, 0x50, 0x2b, 0x53, 0x1b, 0x96, 0x29, 0xf4, 0x7b, 0xab, 0x53, 0x2b, 0xb7, 0x34, + 0x52, 0xb8, 0xba, 0x5d, 0xbf, 0x65, 0x0d, 0xbe, 0xb7, 0xd0, 0xd5, 0x73, 0x5b, 0x56, 0x8d, 0x55, + 0x3a, 0x99, 0xb0, 0x87, 0x70, 0xa4, 0xb9, 0xb4, 0xcb, 0xb1, 0xba, 0x57, 0x88, 0xc9, 0x4c, 0x8f, + 0x5f, 0x47, 0x6b, 0x1c, 0xa8, 0x60, 0xb1, 0x19, 0xe5, 0xf3, 0x6e, 0x27, 0x5a, 0x4a, 0x8c, 0x16, + 0xef, 0xa1, 0x4d, 0x50, 0xe1, 0x35, 0xb9, 0x7d, 0xce, 0x19, 0x37, 0x47, 0xb6, 0x65, 0x0c, 0x36, + 0xf7, 0x17, 0xd5, 0xe4, 0xcf, 0x78, 0xef, 0xda, 0xc9, 0x69, 0xb7, 0xf6, 0xf4, 0xb4, 0x5b, 0xfb, + 0xe9, 0xb4, 0x5b, 0x7b, 0x9c, 0x77, 0xad, 0x93, 0xbc, 0x6b, 0x3d, 0xcd, 0xbb, 0xd6, 0xaf, 0x79, + 0xd7, 0xfa, 0xf6, 0xb7, 0x6e, 0xed, 0xf3, 0x75, 0x93, 0xf4, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x38, 0xbb, 0x77, 0xc4, 0x79, 0x0c, 0x00, 0x00, } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.proto index b3df897785..6b77db4623 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.proto +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,11 +21,12 @@ syntax = 'proto2'; package k8s.io.kubernetes.pkg.apis.authorization.v1beta1; -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; @@ -44,7 +45,7 @@ message ExtraValue { // checking. message LocalSubjectAccessReview { // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace // you made the request against. If empty, it is defaulted. @@ -105,7 +106,7 @@ message ResourceAttributes { // to check whether they can perform an action message SelfSubjectAccessReview { // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. user and groups must be empty optional SelfSubjectAccessReviewSpec spec = 2; @@ -130,7 +131,7 @@ message SelfSubjectAccessReviewSpec { // SubjectAccessReview checks whether or not a user or group can perform an action. message SubjectAccessReview { // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional SubjectAccessReviewSpec spec = 2; diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go index 6463599412..66549ed832 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go @@ -17,17 +17,21 @@ limitations under the License. package v1beta1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "authorization.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} var ( SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) @@ -37,19 +41,15 @@ var ( // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &v1.ListOptions{}, - &v1.DeleteOptions{}, - &v1.ExportOptions{}, - &SelfSubjectAccessReview{}, &SubjectAccessReview{}, &LocalSubjectAccessReview{}, ) - versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } -func (obj *LocalSubjectAccessReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *SubjectAccessReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *SelfSubjectAccessReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *LocalSubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *SubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *SelfSubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.generated.go index 0e640dea72..939603c58a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.generated.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.generated.go @@ -25,9 +25,8 @@ import ( "errors" "fmt" codec1978 "github.com/ugorji/go/codec" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg3_types "k8s.io/kubernetes/pkg/types" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" "reflect" "runtime" time "time" @@ -63,11 +62,10 @@ func init() { panic(err) } if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_unversioned.TypeMeta - var v1 pkg2_v1.ObjectMeta - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 } } @@ -159,7 +157,13 @@ func (x *SubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[2] { yy10 := &x.ObjectMeta - yy10.CodecEncodeSelf(e) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } @@ -168,26 +172,32 @@ func (x *SubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.ObjectMeta - yy11.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy13 := &x.Spec - yy13.CodecEncodeSelf(e) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.Spec - yy14.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[4] { - yy16 := &x.Status - yy16.CodecEncodeSelf(e) + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } @@ -196,8 +206,8 @@ func (x *SubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Status - yy17.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } if yyr2 || yy2arr2 { @@ -213,25 +223,25 @@ func (x *SubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym18 := z.DecBinary() - _ = yym18 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct19 := r.ContainerType() - if yyct19 == codecSelferValueTypeMap1234 { - yyl19 := r.ReadMapStart() - if yyl19 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl19, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct19 == codecSelferValueTypeArray1234 { - yyl19 := r.ReadArrayStart() - if yyl19 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl19, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -243,12 +253,12 @@ func (x *SubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys20Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys20Slc - var yyhl20 bool = l >= 0 - for yyj20 := 0; ; yyj20++ { - if yyhl20 { - if yyj20 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -257,47 +267,65 @@ func (x *SubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys20Slc = r.DecodeBytes(yys20Slc, true, true) - yys20 := string(yys20Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys20 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv23 := &x.ObjectMeta - yyv23.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = SubjectAccessReviewSpec{} } else { - yyv24 := &x.Spec - yyv24.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = SubjectAccessReviewStatus{} } else { - yyv25 := &x.Status - yyv25.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys20) - } // end switch yys20 - } // end for yyj20 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -305,16 +333,16 @@ func (x *SubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj26 int - var yyb26 bool - var yyhl26 bool = l >= 0 - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb26 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb26 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -322,15 +350,21 @@ func (x *SubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb26 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb26 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -338,32 +372,44 @@ func (x *SubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb26 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb26 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv29 := &x.ObjectMeta - yyv29.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb26 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb26 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -371,16 +417,16 @@ func (x *SubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Spec = SubjectAccessReviewSpec{} } else { - yyv30 := &x.Spec - yyv30.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb26 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb26 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -388,21 +434,21 @@ func (x *SubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Status = SubjectAccessReviewStatus{} } else { - yyv31 := &x.Status - yyv31.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb26 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb26 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj26-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -414,38 +460,38 @@ func (x *SelfSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym32 := z.EncBinary() - _ = yym32 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep33 := !z.EncBinary() - yy2arr33 := z.EncBasicHandle().StructToArray - var yyq33 [5]bool - _, _, _ = yysep33, yyq33, yy2arr33 - const yyr33 bool = false - yyq33[0] = x.Kind != "" - yyq33[1] = x.APIVersion != "" - yyq33[2] = true - yyq33[4] = true - var yynn33 int - if yyr33 || yy2arr33 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn33 = 1 - for _, b := range yyq33 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn33++ + yynn2++ } } - r.EncodeMapStart(yynn33) - yynn33 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr33 || yy2arr33 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[0] { - yym35 := z.EncBinary() - _ = yym35 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -454,23 +500,23 @@ func (x *SelfSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq33[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym36 := z.EncBinary() - _ = yym36 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr33 || yy2arr33 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[1] { - yym38 := z.EncBinary() - _ = yym38 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -479,64 +525,76 @@ func (x *SelfSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq33[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym39 := z.EncBinary() - _ = yym39 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr33 || yy2arr33 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[2] { - yy41 := &x.ObjectMeta - yy41.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq33[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy42 := &x.ObjectMeta - yy42.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr33 || yy2arr33 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy44 := &x.Spec - yy44.CodecEncodeSelf(e) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy45 := &x.Spec - yy45.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } - if yyr33 || yy2arr33 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[4] { - yy47 := &x.Status - yy47.CodecEncodeSelf(e) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq33[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy48 := &x.Status - yy48.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } - if yyr33 || yy2arr33 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -549,25 +607,25 @@ func (x *SelfSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym49 := z.DecBinary() - _ = yym49 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct50 := r.ContainerType() - if yyct50 == codecSelferValueTypeMap1234 { - yyl50 := r.ReadMapStart() - if yyl50 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl50, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct50 == codecSelferValueTypeArray1234 { - yyl50 := r.ReadArrayStart() - if yyl50 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl50, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -579,12 +637,12 @@ func (x *SelfSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys51Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys51Slc - var yyhl51 bool = l >= 0 - for yyj51 := 0; ; yyj51++ { - if yyhl51 { - if yyj51 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -593,47 +651,65 @@ func (x *SelfSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Dec } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys51Slc = r.DecodeBytes(yys51Slc, true, true) - yys51 := string(yys51Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys51 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv54 := &x.ObjectMeta - yyv54.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = SelfSubjectAccessReviewSpec{} } else { - yyv55 := &x.Spec - yyv55.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = SubjectAccessReviewStatus{} } else { - yyv56 := &x.Status - yyv56.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys51) - } // end switch yys51 - } // end for yyj51 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -641,16 +717,16 @@ func (x *SelfSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.D var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj57 int - var yyb57 bool - var yyhl57 bool = l >= 0 - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb57 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb57 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -658,15 +734,21 @@ func (x *SelfSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb57 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb57 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -674,32 +756,44 @@ func (x *SelfSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb57 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb57 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv60 := &x.ObjectMeta - yyv60.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb57 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb57 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -707,16 +801,16 @@ func (x *SelfSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Spec = SelfSubjectAccessReviewSpec{} } else { - yyv61 := &x.Spec - yyv61.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb57 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb57 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -724,21 +818,21 @@ func (x *SelfSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Status = SubjectAccessReviewStatus{} } else { - yyv62 := &x.Status - yyv62.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb57 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb57 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj57-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -750,38 +844,38 @@ func (x *LocalSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym63 := z.EncBinary() - _ = yym63 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep64 := !z.EncBinary() - yy2arr64 := z.EncBasicHandle().StructToArray - var yyq64 [5]bool - _, _, _ = yysep64, yyq64, yy2arr64 - const yyr64 bool = false - yyq64[0] = x.Kind != "" - yyq64[1] = x.APIVersion != "" - yyq64[2] = true - yyq64[4] = true - var yynn64 int - if yyr64 || yy2arr64 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn64 = 1 - for _, b := range yyq64 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn64++ + yynn2++ } } - r.EncodeMapStart(yynn64) - yynn64 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr64 || yy2arr64 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq64[0] { - yym66 := z.EncBinary() - _ = yym66 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -790,23 +884,23 @@ func (x *LocalSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq64[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym67 := z.EncBinary() - _ = yym67 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr64 || yy2arr64 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq64[1] { - yym69 := z.EncBinary() - _ = yym69 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -815,64 +909,76 @@ func (x *LocalSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq64[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym70 := z.EncBinary() - _ = yym70 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr64 || yy2arr64 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq64[2] { - yy72 := &x.ObjectMeta - yy72.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq64[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy73 := &x.ObjectMeta - yy73.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr64 || yy2arr64 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy75 := &x.Spec - yy75.CodecEncodeSelf(e) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy76 := &x.Spec - yy76.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } - if yyr64 || yy2arr64 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq64[4] { - yy78 := &x.Status - yy78.CodecEncodeSelf(e) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq64[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy79 := &x.Status - yy79.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } - if yyr64 || yy2arr64 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -885,25 +991,25 @@ func (x *LocalSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym80 := z.DecBinary() - _ = yym80 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct81 := r.ContainerType() - if yyct81 == codecSelferValueTypeMap1234 { - yyl81 := r.ReadMapStart() - if yyl81 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl81, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct81 == codecSelferValueTypeArray1234 { - yyl81 := r.ReadArrayStart() - if yyl81 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl81, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -915,12 +1021,12 @@ func (x *LocalSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.De var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys82Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys82Slc - var yyhl82 bool = l >= 0 - for yyj82 := 0; ; yyj82++ { - if yyhl82 { - if yyj82 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -929,47 +1035,65 @@ func (x *LocalSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.De } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys82Slc = r.DecodeBytes(yys82Slc, true, true) - yys82 := string(yys82Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys82 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv85 := &x.ObjectMeta - yyv85.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = SubjectAccessReviewSpec{} } else { - yyv86 := &x.Spec - yyv86.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = SubjectAccessReviewStatus{} } else { - yyv87 := &x.Status - yyv87.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys82) - } // end switch yys82 - } // end for yyj82 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -977,16 +1101,16 @@ func (x *LocalSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978. var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj88 int - var yyb88 bool - var yyhl88 bool = l >= 0 - yyj88++ - if yyhl88 { - yyb88 = yyj88 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb88 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb88 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -994,15 +1118,21 @@ func (x *LocalSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978. if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj88++ - if yyhl88 { - yyb88 = yyj88 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb88 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb88 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1010,32 +1140,44 @@ func (x *LocalSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978. if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj88++ - if yyhl88 { - yyb88 = yyj88 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb88 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb88 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv91 := &x.ObjectMeta - yyv91.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj88++ - if yyhl88 { - yyb88 = yyj88 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb88 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb88 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1043,16 +1185,16 @@ func (x *LocalSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978. if r.TryDecodeAsNil() { x.Spec = SubjectAccessReviewSpec{} } else { - yyv92 := &x.Spec - yyv92.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj88++ - if yyhl88 { - yyb88 = yyj88 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb88 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb88 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1060,21 +1202,21 @@ func (x *LocalSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978. if r.TryDecodeAsNil() { x.Status = SubjectAccessReviewStatus{} } else { - yyv93 := &x.Status - yyv93.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj88++ - if yyhl88 { - yyb88 = yyj88 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb88 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb88 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj88-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -1086,41 +1228,41 @@ func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym94 := z.EncBinary() - _ = yym94 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep95 := !z.EncBinary() - yy2arr95 := z.EncBasicHandle().StructToArray - var yyq95 [7]bool - _, _, _ = yysep95, yyq95, yy2arr95 - const yyr95 bool = false - yyq95[0] = x.Namespace != "" - yyq95[1] = x.Verb != "" - yyq95[2] = x.Group != "" - yyq95[3] = x.Version != "" - yyq95[4] = x.Resource != "" - yyq95[5] = x.Subresource != "" - yyq95[6] = x.Name != "" - var yynn95 int - if yyr95 || yy2arr95 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Namespace != "" + yyq2[1] = x.Verb != "" + yyq2[2] = x.Group != "" + yyq2[3] = x.Version != "" + yyq2[4] = x.Resource != "" + yyq2[5] = x.Subresource != "" + yyq2[6] = x.Name != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(7) } else { - yynn95 = 0 - for _, b := range yyq95 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn95++ + yynn2++ } } - r.EncodeMapStart(yynn95) - yynn95 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr95 || yy2arr95 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq95[0] { - yym97 := z.EncBinary() - _ = yym97 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) @@ -1129,23 +1271,23 @@ func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq95[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("namespace")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym98 := z.EncBinary() - _ = yym98 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) } } } - if yyr95 || yy2arr95 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq95[1] { - yym100 := z.EncBinary() - _ = yym100 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) @@ -1154,23 +1296,23 @@ func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq95[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("verb")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym101 := z.EncBinary() - _ = yym101 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) } } } - if yyr95 || yy2arr95 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq95[2] { - yym103 := z.EncBinary() - _ = yym103 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Group)) @@ -1179,23 +1321,23 @@ func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq95[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("group")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym104 := z.EncBinary() - _ = yym104 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Group)) } } } - if yyr95 || yy2arr95 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq95[3] { - yym106 := z.EncBinary() - _ = yym106 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Version)) @@ -1204,23 +1346,23 @@ func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq95[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("version")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym107 := z.EncBinary() - _ = yym107 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Version)) } } } - if yyr95 || yy2arr95 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq95[4] { - yym109 := z.EncBinary() - _ = yym109 + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) @@ -1229,23 +1371,23 @@ func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq95[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("resource")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym110 := z.EncBinary() - _ = yym110 + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) } } } - if yyr95 || yy2arr95 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq95[5] { - yym112 := z.EncBinary() - _ = yym112 + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) @@ -1254,23 +1396,23 @@ func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq95[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("subresource")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym113 := z.EncBinary() - _ = yym113 + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) } } } - if yyr95 || yy2arr95 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq95[6] { - yym115 := z.EncBinary() - _ = yym115 + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) @@ -1279,19 +1421,19 @@ func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq95[6] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("name")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym116 := z.EncBinary() - _ = yym116 + yym23 := z.EncBinary() + _ = yym23 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } } - if yyr95 || yy2arr95 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -1304,25 +1446,25 @@ func (x *ResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym117 := z.DecBinary() - _ = yym117 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct118 := r.ContainerType() - if yyct118 == codecSelferValueTypeMap1234 { - yyl118 := r.ReadMapStart() - if yyl118 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl118, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct118 == codecSelferValueTypeArray1234 { - yyl118 := r.ReadArrayStart() - if yyl118 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl118, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -1334,12 +1476,12 @@ func (x *ResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys119Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys119Slc - var yyhl119 bool = l >= 0 - for yyj119 := 0; ; yyj119++ { - if yyhl119 { - if yyj119 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -1348,56 +1490,98 @@ func (x *ResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys119Slc = r.DecodeBytes(yys119Slc, true, true) - yys119 := string(yys119Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys119 { + switch yys3 { case "namespace": if r.TryDecodeAsNil() { x.Namespace = "" } else { - x.Namespace = string(r.DecodeString()) + yyv4 := &x.Namespace + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "verb": if r.TryDecodeAsNil() { x.Verb = "" } else { - x.Verb = string(r.DecodeString()) + yyv6 := &x.Verb + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "group": if r.TryDecodeAsNil() { x.Group = "" } else { - x.Group = string(r.DecodeString()) + yyv8 := &x.Group + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } case "version": if r.TryDecodeAsNil() { x.Version = "" } else { - x.Version = string(r.DecodeString()) + yyv10 := &x.Version + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } case "resource": if r.TryDecodeAsNil() { x.Resource = "" } else { - x.Resource = string(r.DecodeString()) + yyv12 := &x.Resource + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } case "subresource": if r.TryDecodeAsNil() { x.Subresource = "" } else { - x.Subresource = string(r.DecodeString()) + yyv14 := &x.Subresource + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } case "name": if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv16 := &x.Name + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys119) - } // end switch yys119 - } // end for yyj119 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -1405,16 +1589,16 @@ func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj127 int - var yyb127 bool - var yyhl127 bool = l >= 0 - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb127 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb127 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1422,15 +1606,21 @@ func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Namespace = "" } else { - x.Namespace = string(r.DecodeString()) + yyv19 := &x.Namespace + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb127 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb127 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1438,15 +1628,21 @@ func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Verb = "" } else { - x.Verb = string(r.DecodeString()) + yyv21 := &x.Verb + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb127 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb127 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1454,15 +1650,21 @@ func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Group = "" } else { - x.Group = string(r.DecodeString()) + yyv23 := &x.Group + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb127 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb127 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1470,15 +1672,21 @@ func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Version = "" } else { - x.Version = string(r.DecodeString()) + yyv25 := &x.Version + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb127 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb127 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1486,15 +1694,21 @@ func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Resource = "" } else { - x.Resource = string(r.DecodeString()) + yyv27 := &x.Resource + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb127 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb127 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1502,15 +1716,21 @@ func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Subresource = "" } else { - x.Subresource = string(r.DecodeString()) + yyv29 := &x.Subresource + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb127 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb127 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1518,20 +1738,26 @@ func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv31 := &x.Name + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } } for { - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb127 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb127 { + if yyb18 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj127-1, "") + z.DecStructFieldNotFound(yyj18-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -1543,36 +1769,36 @@ func (x *NonResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym135 := z.EncBinary() - _ = yym135 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep136 := !z.EncBinary() - yy2arr136 := z.EncBasicHandle().StructToArray - var yyq136 [2]bool - _, _, _ = yysep136, yyq136, yy2arr136 - const yyr136 bool = false - yyq136[0] = x.Path != "" - yyq136[1] = x.Verb != "" - var yynn136 int - if yyr136 || yy2arr136 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Path != "" + yyq2[1] = x.Verb != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn136 = 0 - for _, b := range yyq136 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn136++ + yynn2++ } } - r.EncodeMapStart(yynn136) - yynn136 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr136 || yy2arr136 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq136[0] { - yym138 := z.EncBinary() - _ = yym138 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) @@ -1581,23 +1807,23 @@ func (x *NonResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq136[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("path")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym139 := z.EncBinary() - _ = yym139 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) } } } - if yyr136 || yy2arr136 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq136[1] { - yym141 := z.EncBinary() - _ = yym141 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) @@ -1606,19 +1832,19 @@ func (x *NonResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq136[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("verb")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym142 := z.EncBinary() - _ = yym142 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) } } } - if yyr136 || yy2arr136 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -1631,25 +1857,25 @@ func (x *NonResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym143 := z.DecBinary() - _ = yym143 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct144 := r.ContainerType() - if yyct144 == codecSelferValueTypeMap1234 { - yyl144 := r.ReadMapStart() - if yyl144 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl144, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct144 == codecSelferValueTypeArray1234 { - yyl144 := r.ReadArrayStart() - if yyl144 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl144, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -1661,12 +1887,12 @@ func (x *NonResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys145Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys145Slc - var yyhl145 bool = l >= 0 - for yyj145 := 0; ; yyj145++ { - if yyhl145 { - if yyj145 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -1675,26 +1901,38 @@ func (x *NonResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decod } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys145Slc = r.DecodeBytes(yys145Slc, true, true) - yys145 := string(yys145Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys145 { + switch yys3 { case "path": if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "verb": if r.TryDecodeAsNil() { x.Verb = "" } else { - x.Verb = string(r.DecodeString()) + yyv6 := &x.Verb + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys145) - } // end switch yys145 - } // end for yyj145 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -1702,16 +1940,16 @@ func (x *NonResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj148 int - var yyb148 bool - var yyhl148 bool = l >= 0 - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb148 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb148 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1719,15 +1957,21 @@ func (x *NonResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv9 := &x.Path + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb148 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb148 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1735,20 +1979,26 @@ func (x *NonResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.Verb = "" } else { - x.Verb = string(r.DecodeString()) + yyv11 := &x.Verb + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } for { - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb148 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb148 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj148-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -1760,37 +2010,37 @@ func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym151 := z.EncBinary() - _ = yym151 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep152 := !z.EncBinary() - yy2arr152 := z.EncBasicHandle().StructToArray - var yyq152 [5]bool - _, _, _ = yysep152, yyq152, yy2arr152 - const yyr152 bool = false - yyq152[0] = x.ResourceAttributes != nil - yyq152[1] = x.NonResourceAttributes != nil - yyq152[2] = x.User != "" - yyq152[3] = len(x.Groups) != 0 - yyq152[4] = len(x.Extra) != 0 - var yynn152 int - if yyr152 || yy2arr152 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ResourceAttributes != nil + yyq2[1] = x.NonResourceAttributes != nil + yyq2[2] = x.User != "" + yyq2[3] = len(x.Groups) != 0 + yyq2[4] = len(x.Extra) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn152 = 0 - for _, b := range yyq152 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn152++ + yynn2++ } } - r.EncodeMapStart(yynn152) - yynn152 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr152 || yy2arr152 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq152[0] { + if yyq2[0] { if x.ResourceAttributes == nil { r.EncodeNil() } else { @@ -1800,7 +2050,7 @@ func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq152[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -1811,9 +2061,9 @@ func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr152 || yy2arr152 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq152[1] { + if yyq2[1] { if x.NonResourceAttributes == nil { r.EncodeNil() } else { @@ -1823,7 +2073,7 @@ func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq152[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -1834,11 +2084,11 @@ func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr152 || yy2arr152 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq152[2] { - yym156 := z.EncBinary() - _ = yym156 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.User)) @@ -1847,26 +2097,26 @@ func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq152[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("user")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym157 := z.EncBinary() - _ = yym157 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.User)) } } } - if yyr152 || yy2arr152 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq152[3] { + if yyq2[3] { if x.Groups == nil { r.EncodeNil() } else { - yym159 := z.EncBinary() - _ = yym159 + yym13 := z.EncBinary() + _ = yym13 if false { } else { z.F.EncSliceStringV(x.Groups, false, e) @@ -1876,15 +2126,15 @@ func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq152[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("group")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Groups == nil { r.EncodeNil() } else { - yym160 := z.EncBinary() - _ = yym160 + yym14 := z.EncBinary() + _ = yym14 if false { } else { z.F.EncSliceStringV(x.Groups, false, e) @@ -1892,14 +2142,14 @@ func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr152 || yy2arr152 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq152[4] { + if yyq2[4] { if x.Extra == nil { r.EncodeNil() } else { - yym162 := z.EncBinary() - _ = yym162 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) @@ -1909,15 +2159,15 @@ func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq152[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("extra")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Extra == nil { r.EncodeNil() } else { - yym163 := z.EncBinary() - _ = yym163 + yym17 := z.EncBinary() + _ = yym17 if false { } else { h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) @@ -1925,7 +2175,7 @@ func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr152 || yy2arr152 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -1938,25 +2188,25 @@ func (x *SubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym164 := z.DecBinary() - _ = yym164 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct165 := r.ContainerType() - if yyct165 == codecSelferValueTypeMap1234 { - yyl165 := r.ReadMapStart() - if yyl165 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl165, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct165 == codecSelferValueTypeArray1234 { - yyl165 := r.ReadArrayStart() - if yyl165 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl165, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -1968,12 +2218,12 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys166Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys166Slc - var yyhl166 bool = l >= 0 - for yyj166 := 0; ; yyj166++ { - if yyhl166 { - if yyj166 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -1982,10 +2232,10 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Dec } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys166Slc = r.DecodeBytes(yys166Slc, true, true) - yys166 := string(yys166Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys166 { + switch yys3 { case "resourceAttributes": if r.TryDecodeAsNil() { if x.ResourceAttributes != nil { @@ -2012,36 +2262,42 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.User = "" } else { - x.User = string(r.DecodeString()) + yyv6 := &x.User + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "group": if r.TryDecodeAsNil() { x.Groups = nil } else { - yyv170 := &x.Groups - yym171 := z.DecBinary() - _ = yym171 + yyv8 := &x.Groups + yym9 := z.DecBinary() + _ = yym9 if false { } else { - z.F.DecSliceStringX(yyv170, false, d) + z.F.DecSliceStringX(yyv8, false, d) } } case "extra": if r.TryDecodeAsNil() { x.Extra = nil } else { - yyv172 := &x.Extra - yym173 := z.DecBinary() - _ = yym173 + yyv10 := &x.Extra + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decMapstringExtraValue((*map[string]ExtraValue)(yyv172), d) + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys166) - } // end switch yys166 - } // end for yyj166 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -2049,16 +2305,16 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.D var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj174 int - var yyb174 bool - var yyhl174 bool = l >= 0 - yyj174++ - if yyhl174 { - yyb174 = yyj174 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb174 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb174 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2073,13 +2329,13 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.D } x.ResourceAttributes.CodecDecodeSelf(d) } - yyj174++ - if yyhl174 { - yyb174 = yyj174 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb174 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb174 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2094,13 +2350,13 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.D } x.NonResourceAttributes.CodecDecodeSelf(d) } - yyj174++ - if yyhl174 { - yyb174 = yyj174 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb174 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb174 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2108,15 +2364,21 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.User = "" } else { - x.User = string(r.DecodeString()) + yyv15 := &x.User + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj174++ - if yyhl174 { - yyb174 = yyj174 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb174 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb174 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2124,21 +2386,21 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Groups = nil } else { - yyv178 := &x.Groups - yym179 := z.DecBinary() - _ = yym179 + yyv17 := &x.Groups + yym18 := z.DecBinary() + _ = yym18 if false { } else { - z.F.DecSliceStringX(yyv178, false, d) + z.F.DecSliceStringX(yyv17, false, d) } } - yyj174++ - if yyhl174 { - yyb174 = yyj174 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb174 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb174 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2146,26 +2408,26 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Extra = nil } else { - yyv180 := &x.Extra - yym181 := z.DecBinary() - _ = yym181 + yyv19 := &x.Extra + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decMapstringExtraValue((*map[string]ExtraValue)(yyv180), d) + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv19), d) } } for { - yyj174++ - if yyhl174 { - yyb174 = yyj174 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb174 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb174 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj174-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -2177,8 +2439,8 @@ func (x ExtraValue) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym182 := z.EncBinary() - _ = yym182 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -2191,8 +2453,8 @@ func (x *ExtraValue) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym183 := z.DecBinary() - _ = yym183 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -2207,34 +2469,34 @@ func (x *SelfSubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym184 := z.EncBinary() - _ = yym184 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep185 := !z.EncBinary() - yy2arr185 := z.EncBasicHandle().StructToArray - var yyq185 [2]bool - _, _, _ = yysep185, yyq185, yy2arr185 - const yyr185 bool = false - yyq185[0] = x.ResourceAttributes != nil - yyq185[1] = x.NonResourceAttributes != nil - var yynn185 int - if yyr185 || yy2arr185 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ResourceAttributes != nil + yyq2[1] = x.NonResourceAttributes != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn185 = 0 - for _, b := range yyq185 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn185++ + yynn2++ } } - r.EncodeMapStart(yynn185) - yynn185 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr185 || yy2arr185 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq185[0] { + if yyq2[0] { if x.ResourceAttributes == nil { r.EncodeNil() } else { @@ -2244,7 +2506,7 @@ func (x *SelfSubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq185[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -2255,9 +2517,9 @@ func (x *SelfSubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr185 || yy2arr185 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq185[1] { + if yyq2[1] { if x.NonResourceAttributes == nil { r.EncodeNil() } else { @@ -2267,7 +2529,7 @@ func (x *SelfSubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq185[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -2278,7 +2540,7 @@ func (x *SelfSubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr185 || yy2arr185 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -2291,25 +2553,25 @@ func (x *SelfSubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym188 := z.DecBinary() - _ = yym188 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct189 := r.ContainerType() - if yyct189 == codecSelferValueTypeMap1234 { - yyl189 := r.ReadMapStart() - if yyl189 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl189, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct189 == codecSelferValueTypeArray1234 { - yyl189 := r.ReadArrayStart() - if yyl189 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl189, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -2321,12 +2583,12 @@ func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys190Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys190Slc - var yyhl190 bool = l >= 0 - for yyj190 := 0; ; yyj190++ { - if yyhl190 { - if yyj190 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -2335,10 +2597,10 @@ func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978 } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys190Slc = r.DecodeBytes(yys190Slc, true, true) - yys190 := string(yys190Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys190 { + switch yys3 { case "resourceAttributes": if r.TryDecodeAsNil() { if x.ResourceAttributes != nil { @@ -2362,9 +2624,9 @@ func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978 x.NonResourceAttributes.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys190) - } // end switch yys190 - } // end for yyj190 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -2372,16 +2634,16 @@ func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec19 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj193 int - var yyb193 bool - var yyhl193 bool = l >= 0 - yyj193++ - if yyhl193 { - yyb193 = yyj193 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb193 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb193 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2396,13 +2658,13 @@ func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec19 } x.ResourceAttributes.CodecDecodeSelf(d) } - yyj193++ - if yyhl193 { - yyb193 = yyj193 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb193 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb193 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2418,17 +2680,17 @@ func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec19 x.NonResourceAttributes.CodecDecodeSelf(d) } for { - yyj193++ - if yyhl193 { - yyb193 = yyj193 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb193 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb193 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj193-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -2440,35 +2702,35 @@ func (x *SubjectAccessReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym196 := z.EncBinary() - _ = yym196 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep197 := !z.EncBinary() - yy2arr197 := z.EncBasicHandle().StructToArray - var yyq197 [3]bool - _, _, _ = yysep197, yyq197, yy2arr197 - const yyr197 bool = false - yyq197[1] = x.Reason != "" - yyq197[2] = x.EvaluationError != "" - var yynn197 int - if yyr197 || yy2arr197 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Reason != "" + yyq2[2] = x.EvaluationError != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn197 = 1 - for _, b := range yyq197 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn197++ + yynn2++ } } - r.EncodeMapStart(yynn197) - yynn197 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr197 || yy2arr197 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym199 := z.EncBinary() - _ = yym199 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeBool(bool(x.Allowed)) @@ -2477,18 +2739,18 @@ func (x *SubjectAccessReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("allowed")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym200 := z.EncBinary() - _ = yym200 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeBool(bool(x.Allowed)) } } - if yyr197 || yy2arr197 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq197[1] { - yym202 := z.EncBinary() - _ = yym202 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) @@ -2497,23 +2759,23 @@ func (x *SubjectAccessReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq197[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("reason")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym203 := z.EncBinary() - _ = yym203 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) } } } - if yyr197 || yy2arr197 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq197[2] { - yym205 := z.EncBinary() - _ = yym205 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.EvaluationError)) @@ -2522,19 +2784,19 @@ func (x *SubjectAccessReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq197[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("evaluationError")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym206 := z.EncBinary() - _ = yym206 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.EvaluationError)) } } } - if yyr197 || yy2arr197 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -2547,25 +2809,25 @@ func (x *SubjectAccessReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym207 := z.DecBinary() - _ = yym207 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct208 := r.ContainerType() - if yyct208 == codecSelferValueTypeMap1234 { - yyl208 := r.ReadMapStart() - if yyl208 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl208, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct208 == codecSelferValueTypeArray1234 { - yyl208 := r.ReadArrayStart() - if yyl208 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl208, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -2577,12 +2839,12 @@ func (x *SubjectAccessReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.D var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys209Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys209Slc - var yyhl209 bool = l >= 0 - for yyj209 := 0; ; yyj209++ { - if yyhl209 { - if yyj209 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -2591,32 +2853,50 @@ func (x *SubjectAccessReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.D } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys209Slc = r.DecodeBytes(yys209Slc, true, true) - yys209 := string(yys209Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys209 { + switch yys3 { case "allowed": if r.TryDecodeAsNil() { x.Allowed = false } else { - x.Allowed = bool(r.DecodeBool()) + yyv4 := &x.Allowed + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } } case "reason": if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv6 := &x.Reason + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "evaluationError": if r.TryDecodeAsNil() { x.EvaluationError = "" } else { - x.EvaluationError = string(r.DecodeString()) + yyv8 := &x.EvaluationError + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys209) - } // end switch yys209 - } // end for yyj209 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -2624,16 +2904,16 @@ func (x *SubjectAccessReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj213 int - var yyb213 bool - var yyhl213 bool = l >= 0 - yyj213++ - if yyhl213 { - yyb213 = yyj213 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb213 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb213 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2641,15 +2921,21 @@ func (x *SubjectAccessReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.Allowed = false } else { - x.Allowed = bool(r.DecodeBool()) + yyv11 := &x.Allowed + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } } - yyj213++ - if yyhl213 { - yyb213 = yyj213 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb213 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb213 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2657,15 +2943,21 @@ func (x *SubjectAccessReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv13 := &x.Reason + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj213++ - if yyhl213 { - yyb213 = yyj213 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb213 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb213 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2673,20 +2965,26 @@ func (x *SubjectAccessReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.EvaluationError = "" } else { - x.EvaluationError = string(r.DecodeString()) + yyv15 := &x.EvaluationError + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } for { - yyj213++ - if yyhl213 { - yyb213 = yyj213 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb213 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb213 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj213-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -2696,19 +2994,19 @@ func (x codecSelfer1234) encMapstringExtraValue(v map[string]ExtraValue, e *code z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeMapStart(len(v)) - for yyk217, yyv217 := range v { + for yyk1, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym218 := z.EncBinary() - _ = yym218 + yym2 := z.EncBinary() + _ = yym2 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk217)) + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) } z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyv217 == nil { + if yyv1 == nil { r.EncodeNil() } else { - yyv217.CodecEncodeSelf(e) + yyv1.CodecEncodeSelf(e) } } z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -2719,70 +3017,82 @@ func (x codecSelfer1234) decMapstringExtraValue(v *map[string]ExtraValue, d *cod z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv219 := *v - yyl219 := r.ReadMapStart() - yybh219 := z.DecBasicHandle() - if yyv219 == nil { - yyrl219, _ := z.DecInferLen(yyl219, yybh219.MaxInitLen, 40) - yyv219 = make(map[string]ExtraValue, yyrl219) - *v = yyv219 - } - var yymk219 string - var yymv219 ExtraValue - var yymg219 bool - if yybh219.MapValueReset { - yymg219 = true - } - if yyl219 > 0 { - for yyj219 := 0; yyj219 < yyl219; yyj219++ { + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string]ExtraValue, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 ExtraValue + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { z.DecSendContainerState(codecSelfer_containerMapKey1234) if r.TryDecodeAsNil() { - yymk219 = "" + yymk1 = "" } else { - yymk219 = string(r.DecodeString()) + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } } - if yymg219 { - yymv219 = yyv219[yymk219] + if yymg1 { + yymv1 = yyv1[yymk1] } else { - yymv219 = nil + yymv1 = nil } z.DecSendContainerState(codecSelfer_containerMapValue1234) if r.TryDecodeAsNil() { - yymv219 = nil + yymv1 = nil } else { - yyv221 := &yymv219 - yyv221.CodecDecodeSelf(d) + yyv4 := &yymv1 + yyv4.CodecDecodeSelf(d) } - if yyv219 != nil { - yyv219[yymk219] = yymv219 + if yyv1 != nil { + yyv1[yymk1] = yymv1 } } - } else if yyl219 < 0 { - for yyj219 := 0; !r.CheckBreak(); yyj219++ { + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { z.DecSendContainerState(codecSelfer_containerMapKey1234) if r.TryDecodeAsNil() { - yymk219 = "" + yymk1 = "" } else { - yymk219 = string(r.DecodeString()) + yyv5 := &yymk1 + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } } - if yymg219 { - yymv219 = yyv219[yymk219] + if yymg1 { + yymv1 = yyv1[yymk1] } else { - yymv219 = nil + yymv1 = nil } z.DecSendContainerState(codecSelfer_containerMapValue1234) if r.TryDecodeAsNil() { - yymv219 = nil + yymv1 = nil } else { - yyv223 := &yymv219 - yyv223.CodecDecodeSelf(d) + yyv7 := &yymv1 + yyv7.CodecDecodeSelf(d) } - if yyv219 != nil { - yyv219[yymk219] = yymv219 + if yyv1 != nil { + yyv1[yymk1] = yymv1 } } } // else len==0: TODO: Should we clear map entries? @@ -2794,13 +3104,13 @@ func (x codecSelfer1234) encExtraValue(v ExtraValue, e *codec1978.Encoder) { z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv224 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym225 := z.EncBinary() - _ = yym225 + yym2 := z.EncBinary() + _ = yym2 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(yyv224)) + r.EncodeString(codecSelferC_UTF81234, string(yyv1)) } } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) @@ -2811,75 +3121,96 @@ func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv226 := *v - yyh226, yyl226 := z.DecSliceHelperStart() - var yyc226 bool - if yyl226 == 0 { - if yyv226 == nil { - yyv226 = []string{} - yyc226 = true - } else if len(yyv226) != 0 { - yyv226 = yyv226[:0] - yyc226 = true - } - } else if yyl226 > 0 { - var yyrr226, yyrl226 int - var yyrt226 bool - if yyl226 > cap(yyv226) { + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrl226, yyrt226 = z.DecInferLen(yyl226, z.DecBasicHandle().MaxInitLen, 16) - if yyrt226 { - if yyrl226 <= cap(yyv226) { - yyv226 = yyv226[:yyrl226] + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv226 = make([]string, yyrl226) + yyv1 = make([]string, yyrl1) } } else { - yyv226 = make([]string, yyrl226) + yyv1 = make([]string, yyrl1) } - yyc226 = true - yyrr226 = len(yyv226) - } else if yyl226 != len(yyv226) { - yyv226 = yyv226[:yyl226] - yyc226 = true + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj226 := 0 - for ; yyj226 < yyrr226; yyj226++ { - yyh226.ElemContainerState(yyj226) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv226[yyj226] = "" + yyv1[yyj1] = "" } else { - yyv226[yyj226] = string(r.DecodeString()) + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } } } - if yyrt226 { - for ; yyj226 < yyl226; yyj226++ { - yyv226 = append(yyv226, "") - yyh226.ElemContainerState(yyj226) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv226[yyj226] = "" + yyv1[yyj1] = "" } else { - yyv226[yyj226] = string(r.DecodeString()) + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } } } } else { - yyj226 := 0 - for ; !r.CheckBreak(); yyj226++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj226 >= len(yyv226) { - yyv226 = append(yyv226, "") // var yyz226 string - yyc226 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 string + yyc1 = true } - yyh226.ElemContainerState(yyj226) - if yyj226 < len(yyv226) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv226[yyj226] = "" + yyv1[yyj1] = "" } else { - yyv226[yyj226] = string(r.DecodeString()) + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } } else { @@ -2887,16 +3218,16 @@ func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { } } - if yyj226 < len(yyv226) { - yyv226 = yyv226[:yyj226] - yyc226 = true - } else if yyj226 == 0 && yyv226 == nil { - yyv226 = []string{} - yyc226 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []string{} + yyc1 = true } } - yyh226.End() - if yyc226 { - *v = yyv226 + yyh1.End() + if yyc1 { + *v = yyv1 } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.go index dccc6cc5aa..8a1727423b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.go @@ -19,8 +19,7 @@ package v1beta1 import ( "fmt" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // +genclient=true @@ -29,9 +28,9 @@ import ( // SubjectAccessReview checks whether or not a user or group can perform an action. type SubjectAccessReview struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec holds information about the request being evaluated Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` @@ -49,9 +48,9 @@ type SubjectAccessReview struct { // spec.namespace means "in all namespaces". Self is a special case, because users should always be able // to check whether they can perform an action type SelfSubjectAccessReview struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec holds information about the request being evaluated. user and groups must be empty Spec SelfSubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` @@ -68,9 +67,9 @@ type SelfSubjectAccessReview struct { // Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions // checking. type LocalSubjectAccessReview struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace // you made the request against. If empty, it is defaulted. diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.conversion.go index 1f212ba1a3..92ed181ae1 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ limitations under the License. package v1beta1 import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" authorization "k8s.io/kubernetes/pkg/apis/authorization" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" unsafe "unsafe" ) @@ -55,10 +55,7 @@ func RegisterConversions(scheme *runtime.Scheme) error { } func autoConvert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -73,10 +70,7 @@ func Convert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAcces } func autoConvert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -141,10 +135,7 @@ func Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in * } func autoConvert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -159,10 +150,7 @@ func Convert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessR } func autoConvert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -197,10 +185,7 @@ func Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAcc } func autoConvert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -215,10 +200,7 @@ func Convert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in } func autoConvert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go index c365c8db0a..2bbd414ad2 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" reflect "reflect" ) @@ -50,14 +50,15 @@ func DeepCopy_v1beta1_LocalSubjectAccessReview(in interface{}, out interface{}, { in := in.(*LocalSubjectAccessReview) out := out.(*LocalSubjectAccessReview) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { return err } - out.Status = in.Status return nil } } @@ -66,8 +67,7 @@ func DeepCopy_v1beta1_NonResourceAttributes(in interface{}, out interface{}, c * { in := in.(*NonResourceAttributes) out := out.(*NonResourceAttributes) - out.Path = in.Path - out.Verb = in.Verb + *out = *in return nil } } @@ -76,13 +76,7 @@ func DeepCopy_v1beta1_ResourceAttributes(in interface{}, out interface{}, c *con { in := in.(*ResourceAttributes) out := out.(*ResourceAttributes) - out.Namespace = in.Namespace - out.Verb = in.Verb - out.Group = in.Group - out.Version = in.Version - out.Resource = in.Resource - out.Subresource = in.Subresource - out.Name = in.Name + *out = *in return nil } } @@ -91,14 +85,15 @@ func DeepCopy_v1beta1_SelfSubjectAccessReview(in interface{}, out interface{}, c { in := in.(*SelfSubjectAccessReview) out := out.(*SelfSubjectAccessReview) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_v1beta1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { return err } - out.Status = in.Status return nil } } @@ -107,19 +102,16 @@ func DeepCopy_v1beta1_SelfSubjectAccessReviewSpec(in interface{}, out interface{ { in := in.(*SelfSubjectAccessReviewSpec) out := out.(*SelfSubjectAccessReviewSpec) + *out = *in if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) **out = **in - } else { - out.ResourceAttributes = nil } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes *out = new(NonResourceAttributes) **out = **in - } else { - out.NonResourceAttributes = nil } return nil } @@ -129,14 +121,15 @@ func DeepCopy_v1beta1_SubjectAccessReview(in interface{}, out interface{}, c *co { in := in.(*SubjectAccessReview) out := out.(*SubjectAccessReview) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { return err } - out.Status = in.Status return nil } } @@ -145,27 +138,21 @@ func DeepCopy_v1beta1_SubjectAccessReviewSpec(in interface{}, out interface{}, c { in := in.(*SubjectAccessReviewSpec) out := out.(*SubjectAccessReviewSpec) + *out = *in if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) **out = **in - } else { - out.ResourceAttributes = nil } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes *out = new(NonResourceAttributes) **out = **in - } else { - out.NonResourceAttributes = nil } - out.User = in.User if in.Groups != nil { in, out := &in.Groups, &out.Groups *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Groups = nil } if in.Extra != nil { in, out := &in.Extra, &out.Extra @@ -177,8 +164,6 @@ func DeepCopy_v1beta1_SubjectAccessReviewSpec(in interface{}, out interface{}, c (*out)[key] = *newVal.(*ExtraValue) } } - } else { - out.Extra = nil } return nil } @@ -188,9 +173,7 @@ func DeepCopy_v1beta1_SubjectAccessReviewStatus(in interface{}, out interface{}, { in := in.(*SubjectAccessReviewStatus) out := out.(*SubjectAccessReviewStatus) - out.Allowed = in.Allowed - out.Reason = in.Reason - out.EvaluationError = in.EvaluationError + *out = *in return nil } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.defaults.go new file mode 100644 index 0000000000..e24e70be38 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/zz_generated.deepcopy.go index bf80861467..19ccebdaaa 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ limitations under the License. package authorization import ( - api "k8s.io/kubernetes/pkg/api" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" reflect "reflect" ) @@ -50,14 +50,15 @@ func DeepCopy_authorization_LocalSubjectAccessReview(in interface{}, out interfa { in := in.(*LocalSubjectAccessReview) out := out.(*LocalSubjectAccessReview) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { return err } - out.Status = in.Status return nil } } @@ -66,8 +67,7 @@ func DeepCopy_authorization_NonResourceAttributes(in interface{}, out interface{ { in := in.(*NonResourceAttributes) out := out.(*NonResourceAttributes) - out.Path = in.Path - out.Verb = in.Verb + *out = *in return nil } } @@ -76,13 +76,7 @@ func DeepCopy_authorization_ResourceAttributes(in interface{}, out interface{}, { in := in.(*ResourceAttributes) out := out.(*ResourceAttributes) - out.Namespace = in.Namespace - out.Verb = in.Verb - out.Group = in.Group - out.Version = in.Version - out.Resource = in.Resource - out.Subresource = in.Subresource - out.Name = in.Name + *out = *in return nil } } @@ -91,14 +85,15 @@ func DeepCopy_authorization_SelfSubjectAccessReview(in interface{}, out interfac { in := in.(*SelfSubjectAccessReview) out := out.(*SelfSubjectAccessReview) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_authorization_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { return err } - out.Status = in.Status return nil } } @@ -107,19 +102,16 @@ func DeepCopy_authorization_SelfSubjectAccessReviewSpec(in interface{}, out inte { in := in.(*SelfSubjectAccessReviewSpec) out := out.(*SelfSubjectAccessReviewSpec) + *out = *in if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) **out = **in - } else { - out.ResourceAttributes = nil } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes *out = new(NonResourceAttributes) **out = **in - } else { - out.NonResourceAttributes = nil } return nil } @@ -129,14 +121,15 @@ func DeepCopy_authorization_SubjectAccessReview(in interface{}, out interface{}, { in := in.(*SubjectAccessReview) out := out.(*SubjectAccessReview) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { return err } - out.Status = in.Status return nil } } @@ -145,27 +138,21 @@ func DeepCopy_authorization_SubjectAccessReviewSpec(in interface{}, out interfac { in := in.(*SubjectAccessReviewSpec) out := out.(*SubjectAccessReviewSpec) + *out = *in if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) **out = **in - } else { - out.ResourceAttributes = nil } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes *out = new(NonResourceAttributes) **out = **in - } else { - out.NonResourceAttributes = nil } - out.User = in.User if in.Groups != nil { in, out := &in.Groups, &out.Groups *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Groups = nil } if in.Extra != nil { in, out := &in.Extra, &out.Extra @@ -177,8 +164,6 @@ func DeepCopy_authorization_SubjectAccessReviewSpec(in interface{}, out interfac (*out)[key] = *newVal.(*ExtraValue) } } - } else { - out.Extra = nil } return nil } @@ -188,9 +173,7 @@ func DeepCopy_authorization_SubjectAccessReviewStatus(in interface{}, out interf { in := in.(*SubjectAccessReviewStatus) out := out.(*SubjectAccessReviewStatus) - out.Allowed = in.Allowed - out.Reason = in.Reason - out.EvaluationError = in.EvaluationError + *out = *in return nil } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/BUILD index d12e144d53..b3db9c986e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/BUILD @@ -4,28 +4,44 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( name = "go_default_library", srcs = [ + "annotations.go", "doc.go", "register.go", - "types.generated.go", "types.go", "zz_generated.deepcopy.go", ], tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/api/resource", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/autoscaling/install:all-srcs", + "//pkg/apis/autoscaling/v1:all-srcs", + "//pkg/apis/autoscaling/v2alpha1:all-srcs", + "//pkg/apis/autoscaling/validation:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/OWNERS new file mode 100755 index 0000000000..9fbc54e93a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/OWNERS @@ -0,0 +1,20 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- caesarxuchao +- erictune +- sttts +- ncdc +- timothysc +- piosz +- dims +- errordeveloper +- madhusudancs +- krousey +- mml +- mbohlool +- david-mcmahon +- jianhuiz diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go new file mode 100644 index 0000000000..4c377561bb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go @@ -0,0 +1,30 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling + +// MetricSpecsAnnotation is the annotation which holds non-CPU-utilization HPA metric +// specs when converting the `Metrics` field from autoscaling/v2alpha1 +const MetricSpecsAnnotation = "autoscaling.alpha.kubernetes.io/metrics" + +// MetricStatusesAnnotation is the annotation which holds non-CPU-utilization HPA metric +// statuses when converting the `CurrentMetrics` field from autoscaling/v2alpha1 +const MetricStatusesAnnotation = "autoscaling.alpha.kubernetes.io/current-metrics" + +// DefaultCPUUtilization is the default value for CPU utilization, provided no other +// metrics are present. This is here because it's used by both the v2alpha1 defaulting +// logic, and the pseudo-defaulting done in v1 conversion. +const DefaultCPUUtilization = 80 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go index 7550f9d2b2..2c770186ba 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go @@ -15,6 +15,5 @@ limitations under the License. */ // +k8s:deepcopy-gen=package,register -// +k8s:openapi-gen=true package autoscaling diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/BUILD index 5bde6801d9..cb6a73502d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -15,8 +12,25 @@ go_library( srcs = ["install.go"], tags = ["automanaged"], deps = [ - "//pkg/apimachinery/announced:go_default_library", + "//pkg/api:go_default_library", "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/autoscaling/v1:go_default_library", + "//pkg/apis/autoscaling/v2alpha1:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/announced", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/registered", + "//vendor:k8s.io/apimachinery/pkg/runtime", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go index 5d498baa54..3625e246c9 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go @@ -19,23 +19,33 @@ limitations under the License. package install import ( - "k8s.io/kubernetes/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/autoscaling/v1" + "k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1" ) func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { if err := announced.NewGroupMetaFactory( &announced.GroupMetaFactoryArgs{ GroupName: autoscaling.GroupName, - VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version}, + VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version, v2alpha1.SchemeGroupVersion.Version}, ImportPrefix: "k8s.io/kubernetes/pkg/apis/autoscaling", AddInternalObjectsToScheme: autoscaling.AddToScheme, }, announced.VersionToSchemeFunc{ - v1.SchemeGroupVersion.Version: v1.AddToScheme, + v1.SchemeGroupVersion.Version: v1.AddToScheme, + v2alpha1.SchemeGroupVersion.Version: v2alpha1.AddToScheme, }, - ).Announce().RegisterAndEnable(); err != nil { + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { panic(err) } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go index 0319dad2e6..2bcea84b9b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go @@ -17,24 +17,23 @@ limitations under the License. package autoscaling import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "autoscaling" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} // Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { +func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } // Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { +func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } @@ -49,8 +48,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &Scale{}, &HorizontalPodAutoscaler{}, &HorizontalPodAutoscalerList{}, - &api.ListOptions{}, - &api.DeleteOptions{}, ) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.generated.go deleted file mode 100644 index ecca35d7f2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.generated.go +++ /dev/null @@ -1,2656 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package autoscaling - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg2_api "k8s.io/kubernetes/pkg/api" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg3_types "k8s.io/kubernetes/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_api.ObjectMeta - var v1 pkg1_unversioned.TypeMeta - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 - } -} - -func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yy10.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.ObjectMeta - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy13 := &x.Spec - yy13.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.Spec - yy14.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Status - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct19 := r.ContainerType() - if yyct19 == codecSelferValueTypeMap1234 { - yyl19 := r.ReadMapStart() - if yyl19 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl19, d) - } - } else if yyct19 == codecSelferValueTypeArray1234 { - yyl19 := r.ReadArrayStart() - if yyl19 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl19, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys20Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys20Slc - var yyhl20 bool = l >= 0 - for yyj20 := 0; ; yyj20++ { - if yyhl20 { - if yyj20 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys20Slc = r.DecodeBytes(yys20Slc, true, true) - yys20 := string(yys20Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys20 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv23 := &x.ObjectMeta - yyv23.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ScaleSpec{} - } else { - yyv24 := &x.Spec - yyv24.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ScaleStatus{} - } else { - yyv25 := &x.Status - yyv25.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys20) - } // end switch yys20 - } // end for yyj20 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj26 int - var yyb26 bool - var yyhl26 bool = l >= 0 - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv29 := &x.ObjectMeta - yyv29.CodecDecodeSelf(d) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ScaleSpec{} - } else { - yyv30 := &x.Spec - yyv30.CodecDecodeSelf(d) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ScaleStatus{} - } else { - yyv31 := &x.Status - yyv31.CodecDecodeSelf(d) - } - for { - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj26-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym32 := z.EncBinary() - _ = yym32 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep33 := !z.EncBinary() - yy2arr33 := z.EncBasicHandle().StructToArray - var yyq33 [1]bool - _, _, _ = yysep33, yyq33, yy2arr33 - const yyr33 bool = false - yyq33[0] = x.Replicas != 0 - var yynn33 int - if yyr33 || yy2arr33 { - r.EncodeArrayStart(1) - } else { - yynn33 = 0 - for _, b := range yyq33 { - if b { - yynn33++ - } - } - r.EncodeMapStart(yynn33) - yynn33 = 0 - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[0] { - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq33[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym37 := z.DecBinary() - _ = yym37 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct38 := r.ContainerType() - if yyct38 == codecSelferValueTypeMap1234 { - yyl38 := r.ReadMapStart() - if yyl38 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl38, d) - } - } else if yyct38 == codecSelferValueTypeArray1234 { - yyl38 := r.ReadArrayStart() - if yyl38 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl38, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys39Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys39Slc - var yyhl39 bool = l >= 0 - for yyj39 := 0; ; yyj39++ { - if yyhl39 { - if yyj39 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys39Slc = r.DecodeBytes(yys39Slc, true, true) - yys39 := string(yys39Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys39 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys39) - } // end switch yys39 - } // end for yyj39 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj41 int - var yyb41 bool - var yyhl41 bool = l >= 0 - yyj41++ - if yyhl41 { - yyb41 = yyj41 > l - } else { - yyb41 = r.CheckBreak() - } - if yyb41 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - for { - yyj41++ - if yyhl41 { - yyb41 = yyj41 > l - } else { - yyb41 = r.CheckBreak() - } - if yyb41 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj41-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym43 := z.EncBinary() - _ = yym43 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep44 := !z.EncBinary() - yy2arr44 := z.EncBasicHandle().StructToArray - var yyq44 [2]bool - _, _, _ = yysep44, yyq44, yy2arr44 - const yyr44 bool = false - yyq44[1] = x.Selector != "" - var yynn44 int - if yyr44 || yy2arr44 { - r.EncodeArrayStart(2) - } else { - yynn44 = 1 - for _, b := range yyq44 { - if b { - yynn44++ - } - } - r.EncodeMapStart(yynn44) - yynn44 = 0 - } - if yyr44 || yy2arr44 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym46 := z.EncBinary() - _ = yym46 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym47 := z.EncBinary() - _ = yym47 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr44 || yy2arr44 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq44[1] { - yym49 := z.EncBinary() - _ = yym49 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Selector)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq44[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym50 := z.EncBinary() - _ = yym50 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Selector)) - } - } - } - if yyr44 || yy2arr44 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym51 := z.DecBinary() - _ = yym51 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct52 := r.ContainerType() - if yyct52 == codecSelferValueTypeMap1234 { - yyl52 := r.ReadMapStart() - if yyl52 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl52, d) - } - } else if yyct52 == codecSelferValueTypeArray1234 { - yyl52 := r.ReadArrayStart() - if yyl52 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl52, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys53Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys53Slc - var yyhl53 bool = l >= 0 - for yyj53 := 0; ; yyj53++ { - if yyhl53 { - if yyj53 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys53Slc = r.DecodeBytes(yys53Slc, true, true) - yys53 := string(yys53Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys53 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "selector": - if r.TryDecodeAsNil() { - x.Selector = "" - } else { - x.Selector = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys53) - } // end switch yys53 - } // end for yyj53 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj56 int - var yyb56 bool - var yyhl56 bool = l >= 0 - yyj56++ - if yyhl56 { - yyb56 = yyj56 > l - } else { - yyb56 = r.CheckBreak() - } - if yyb56 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj56++ - if yyhl56 { - yyb56 = yyj56 > l - } else { - yyb56 = r.CheckBreak() - } - if yyb56 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Selector = "" - } else { - x.Selector = string(r.DecodeString()) - } - for { - yyj56++ - if yyhl56 { - yyb56 = yyj56 > l - } else { - yyb56 = r.CheckBreak() - } - if yyb56 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj56-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CrossVersionObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym59 := z.EncBinary() - _ = yym59 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep60 := !z.EncBinary() - yy2arr60 := z.EncBasicHandle().StructToArray - var yyq60 [3]bool - _, _, _ = yysep60, yyq60, yy2arr60 - const yyr60 bool = false - yyq60[2] = x.APIVersion != "" - var yynn60 int - if yyr60 || yy2arr60 { - r.EncodeArrayStart(3) - } else { - yynn60 = 2 - for _, b := range yyq60 { - if b { - yynn60++ - } - } - r.EncodeMapStart(yynn60) - yynn60 = 0 - } - if yyr60 || yy2arr60 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym62 := z.EncBinary() - _ = yym62 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym63 := z.EncBinary() - _ = yym63 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - if yyr60 || yy2arr60 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym65 := z.EncBinary() - _ = yym65 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym66 := z.EncBinary() - _ = yym66 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr60 || yy2arr60 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq60[2] { - yym68 := z.EncBinary() - _ = yym68 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq60[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym69 := z.EncBinary() - _ = yym69 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr60 || yy2arr60 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CrossVersionObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym70 := z.DecBinary() - _ = yym70 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct71 := r.ContainerType() - if yyct71 == codecSelferValueTypeMap1234 { - yyl71 := r.ReadMapStart() - if yyl71 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl71, d) - } - } else if yyct71 == codecSelferValueTypeArray1234 { - yyl71 := r.ReadArrayStart() - if yyl71 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl71, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CrossVersionObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys72Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys72Slc - var yyhl72 bool = l >= 0 - for yyj72 := 0; ; yyj72++ { - if yyhl72 { - if yyj72 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys72Slc = r.DecodeBytes(yys72Slc, true, true) - yys72 := string(yys72Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys72 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys72) - } // end switch yys72 - } // end for yyj72 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CrossVersionObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj76 int - var yyb76 bool - var yyhl76 bool = l >= 0 - yyj76++ - if yyhl76 { - yyb76 = yyj76 > l - } else { - yyb76 = r.CheckBreak() - } - if yyb76 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj76++ - if yyhl76 { - yyb76 = yyj76 > l - } else { - yyb76 = r.CheckBreak() - } - if yyb76 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj76++ - if yyhl76 { - yyb76 = yyj76 > l - } else { - yyb76 = r.CheckBreak() - } - if yyb76 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj76++ - if yyhl76 { - yyb76 = yyj76 > l - } else { - yyb76 = r.CheckBreak() - } - if yyb76 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj76-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym80 := z.EncBinary() - _ = yym80 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep81 := !z.EncBinary() - yy2arr81 := z.EncBasicHandle().StructToArray - var yyq81 [4]bool - _, _, _ = yysep81, yyq81, yy2arr81 - const yyr81 bool = false - yyq81[1] = x.MinReplicas != nil - yyq81[3] = x.TargetCPUUtilizationPercentage != nil - var yynn81 int - if yyr81 || yy2arr81 { - r.EncodeArrayStart(4) - } else { - yynn81 = 2 - for _, b := range yyq81 { - if b { - yynn81++ - } - } - r.EncodeMapStart(yynn81) - yynn81 = 0 - } - if yyr81 || yy2arr81 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy83 := &x.ScaleTargetRef - yy83.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scaleTargetRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy84 := &x.ScaleTargetRef - yy84.CodecEncodeSelf(e) - } - if yyr81 || yy2arr81 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq81[1] { - if x.MinReplicas == nil { - r.EncodeNil() - } else { - yy86 := *x.MinReplicas - yym87 := z.EncBinary() - _ = yym87 - if false { - } else { - r.EncodeInt(int64(yy86)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq81[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MinReplicas == nil { - r.EncodeNil() - } else { - yy88 := *x.MinReplicas - yym89 := z.EncBinary() - _ = yym89 - if false { - } else { - r.EncodeInt(int64(yy88)) - } - } - } - } - if yyr81 || yy2arr81 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym91 := z.EncBinary() - _ = yym91 - if false { - } else { - r.EncodeInt(int64(x.MaxReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym92 := z.EncBinary() - _ = yym92 - if false { - } else { - r.EncodeInt(int64(x.MaxReplicas)) - } - } - if yyr81 || yy2arr81 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq81[3] { - if x.TargetCPUUtilizationPercentage == nil { - r.EncodeNil() - } else { - yy94 := *x.TargetCPUUtilizationPercentage - yym95 := z.EncBinary() - _ = yym95 - if false { - } else { - r.EncodeInt(int64(yy94)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq81[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetCPUUtilizationPercentage")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TargetCPUUtilizationPercentage == nil { - r.EncodeNil() - } else { - yy96 := *x.TargetCPUUtilizationPercentage - yym97 := z.EncBinary() - _ = yym97 - if false { - } else { - r.EncodeInt(int64(yy96)) - } - } - } - } - if yyr81 || yy2arr81 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym98 := z.DecBinary() - _ = yym98 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct99 := r.ContainerType() - if yyct99 == codecSelferValueTypeMap1234 { - yyl99 := r.ReadMapStart() - if yyl99 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl99, d) - } - } else if yyct99 == codecSelferValueTypeArray1234 { - yyl99 := r.ReadArrayStart() - if yyl99 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl99, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys100Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys100Slc - var yyhl100 bool = l >= 0 - for yyj100 := 0; ; yyj100++ { - if yyhl100 { - if yyj100 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys100Slc = r.DecodeBytes(yys100Slc, true, true) - yys100 := string(yys100Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys100 { - case "scaleTargetRef": - if r.TryDecodeAsNil() { - x.ScaleTargetRef = CrossVersionObjectReference{} - } else { - yyv101 := &x.ScaleTargetRef - yyv101.CodecDecodeSelf(d) - } - case "minReplicas": - if r.TryDecodeAsNil() { - if x.MinReplicas != nil { - x.MinReplicas = nil - } - } else { - if x.MinReplicas == nil { - x.MinReplicas = new(int32) - } - yym103 := z.DecBinary() - _ = yym103 - if false { - } else { - *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) - } - } - case "maxReplicas": - if r.TryDecodeAsNil() { - x.MaxReplicas = 0 - } else { - x.MaxReplicas = int32(r.DecodeInt(32)) - } - case "targetCPUUtilizationPercentage": - if r.TryDecodeAsNil() { - if x.TargetCPUUtilizationPercentage != nil { - x.TargetCPUUtilizationPercentage = nil - } - } else { - if x.TargetCPUUtilizationPercentage == nil { - x.TargetCPUUtilizationPercentage = new(int32) - } - yym106 := z.DecBinary() - _ = yym106 - if false { - } else { - *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys100) - } // end switch yys100 - } // end for yyj100 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj107 int - var yyb107 bool - var yyhl107 bool = l >= 0 - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ScaleTargetRef = CrossVersionObjectReference{} - } else { - yyv108 := &x.ScaleTargetRef - yyv108.CodecDecodeSelf(d) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.MinReplicas != nil { - x.MinReplicas = nil - } - } else { - if x.MinReplicas == nil { - x.MinReplicas = new(int32) - } - yym110 := z.DecBinary() - _ = yym110 - if false { - } else { - *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) - } - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxReplicas = 0 - } else { - x.MaxReplicas = int32(r.DecodeInt(32)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TargetCPUUtilizationPercentage != nil { - x.TargetCPUUtilizationPercentage = nil - } - } else { - if x.TargetCPUUtilizationPercentage == nil { - x.TargetCPUUtilizationPercentage = new(int32) - } - yym113 := z.DecBinary() - _ = yym113 - if false { - } else { - *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) - } - } - for { - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj107-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym114 := z.EncBinary() - _ = yym114 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep115 := !z.EncBinary() - yy2arr115 := z.EncBasicHandle().StructToArray - var yyq115 [5]bool - _, _, _ = yysep115, yyq115, yy2arr115 - const yyr115 bool = false - yyq115[0] = x.ObservedGeneration != nil - yyq115[1] = x.LastScaleTime != nil - yyq115[4] = x.CurrentCPUUtilizationPercentage != nil - var yynn115 int - if yyr115 || yy2arr115 { - r.EncodeArrayStart(5) - } else { - yynn115 = 2 - for _, b := range yyq115 { - if b { - yynn115++ - } - } - r.EncodeMapStart(yynn115) - yynn115 = 0 - } - if yyr115 || yy2arr115 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq115[0] { - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy117 := *x.ObservedGeneration - yym118 := z.EncBinary() - _ = yym118 - if false { - } else { - r.EncodeInt(int64(yy117)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq115[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy119 := *x.ObservedGeneration - yym120 := z.EncBinary() - _ = yym120 - if false { - } else { - r.EncodeInt(int64(yy119)) - } - } - } - } - if yyr115 || yy2arr115 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq115[1] { - if x.LastScaleTime == nil { - r.EncodeNil() - } else { - yym122 := z.EncBinary() - _ = yym122 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { - } else if yym122 { - z.EncBinaryMarshal(x.LastScaleTime) - } else if !yym122 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScaleTime) - } else { - z.EncFallback(x.LastScaleTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq115[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastScaleTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LastScaleTime == nil { - r.EncodeNil() - } else { - yym123 := z.EncBinary() - _ = yym123 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { - } else if yym123 { - z.EncBinaryMarshal(x.LastScaleTime) - } else if !yym123 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScaleTime) - } else { - z.EncFallback(x.LastScaleTime) - } - } - } - } - if yyr115 || yy2arr115 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym125 := z.EncBinary() - _ = yym125 - if false { - } else { - r.EncodeInt(int64(x.CurrentReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym126 := z.EncBinary() - _ = yym126 - if false { - } else { - r.EncodeInt(int64(x.CurrentReplicas)) - } - } - if yyr115 || yy2arr115 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym128 := z.EncBinary() - _ = yym128 - if false { - } else { - r.EncodeInt(int64(x.DesiredReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("desiredReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym129 := z.EncBinary() - _ = yym129 - if false { - } else { - r.EncodeInt(int64(x.DesiredReplicas)) - } - } - if yyr115 || yy2arr115 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq115[4] { - if x.CurrentCPUUtilizationPercentage == nil { - r.EncodeNil() - } else { - yy131 := *x.CurrentCPUUtilizationPercentage - yym132 := z.EncBinary() - _ = yym132 - if false { - } else { - r.EncodeInt(int64(yy131)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq115[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentCPUUtilizationPercentage")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CurrentCPUUtilizationPercentage == nil { - r.EncodeNil() - } else { - yy133 := *x.CurrentCPUUtilizationPercentage - yym134 := z.EncBinary() - _ = yym134 - if false { - } else { - r.EncodeInt(int64(yy133)) - } - } - } - } - if yyr115 || yy2arr115 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym135 := z.DecBinary() - _ = yym135 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct136 := r.ContainerType() - if yyct136 == codecSelferValueTypeMap1234 { - yyl136 := r.ReadMapStart() - if yyl136 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl136, d) - } - } else if yyct136 == codecSelferValueTypeArray1234 { - yyl136 := r.ReadArrayStart() - if yyl136 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl136, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys137Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys137Slc - var yyhl137 bool = l >= 0 - for yyj137 := 0; ; yyj137++ { - if yyhl137 { - if yyj137 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys137Slc = r.DecodeBytes(yys137Slc, true, true) - yys137 := string(yys137Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys137 { - case "observedGeneration": - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym139 := z.DecBinary() - _ = yym139 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - case "lastScaleTime": - if r.TryDecodeAsNil() { - if x.LastScaleTime != nil { - x.LastScaleTime = nil - } - } else { - if x.LastScaleTime == nil { - x.LastScaleTime = new(pkg1_unversioned.Time) - } - yym141 := z.DecBinary() - _ = yym141 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { - } else if yym141 { - z.DecBinaryUnmarshal(x.LastScaleTime) - } else if !yym141 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScaleTime) - } else { - z.DecFallback(x.LastScaleTime, false) - } - } - case "currentReplicas": - if r.TryDecodeAsNil() { - x.CurrentReplicas = 0 - } else { - x.CurrentReplicas = int32(r.DecodeInt(32)) - } - case "desiredReplicas": - if r.TryDecodeAsNil() { - x.DesiredReplicas = 0 - } else { - x.DesiredReplicas = int32(r.DecodeInt(32)) - } - case "currentCPUUtilizationPercentage": - if r.TryDecodeAsNil() { - if x.CurrentCPUUtilizationPercentage != nil { - x.CurrentCPUUtilizationPercentage = nil - } - } else { - if x.CurrentCPUUtilizationPercentage == nil { - x.CurrentCPUUtilizationPercentage = new(int32) - } - yym145 := z.DecBinary() - _ = yym145 - if false { - } else { - *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys137) - } // end switch yys137 - } // end for yyj137 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj146 int - var yyb146 bool - var yyhl146 bool = l >= 0 - yyj146++ - if yyhl146 { - yyb146 = yyj146 > l - } else { - yyb146 = r.CheckBreak() - } - if yyb146 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym148 := z.DecBinary() - _ = yym148 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - yyj146++ - if yyhl146 { - yyb146 = yyj146 > l - } else { - yyb146 = r.CheckBreak() - } - if yyb146 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LastScaleTime != nil { - x.LastScaleTime = nil - } - } else { - if x.LastScaleTime == nil { - x.LastScaleTime = new(pkg1_unversioned.Time) - } - yym150 := z.DecBinary() - _ = yym150 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { - } else if yym150 { - z.DecBinaryUnmarshal(x.LastScaleTime) - } else if !yym150 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScaleTime) - } else { - z.DecFallback(x.LastScaleTime, false) - } - } - yyj146++ - if yyhl146 { - yyb146 = yyj146 > l - } else { - yyb146 = r.CheckBreak() - } - if yyb146 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentReplicas = 0 - } else { - x.CurrentReplicas = int32(r.DecodeInt(32)) - } - yyj146++ - if yyhl146 { - yyb146 = yyj146 > l - } else { - yyb146 = r.CheckBreak() - } - if yyb146 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DesiredReplicas = 0 - } else { - x.DesiredReplicas = int32(r.DecodeInt(32)) - } - yyj146++ - if yyhl146 { - yyb146 = yyj146 > l - } else { - yyb146 = r.CheckBreak() - } - if yyb146 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CurrentCPUUtilizationPercentage != nil { - x.CurrentCPUUtilizationPercentage = nil - } - } else { - if x.CurrentCPUUtilizationPercentage == nil { - x.CurrentCPUUtilizationPercentage = new(int32) - } - yym154 := z.DecBinary() - _ = yym154 - if false { - } else { - *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) - } - } - for { - yyj146++ - if yyhl146 { - yyb146 = yyj146 > l - } else { - yyb146 = r.CheckBreak() - } - if yyb146 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj146-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym155 := z.EncBinary() - _ = yym155 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep156 := !z.EncBinary() - yy2arr156 := z.EncBasicHandle().StructToArray - var yyq156 [5]bool - _, _, _ = yysep156, yyq156, yy2arr156 - const yyr156 bool = false - yyq156[0] = x.Kind != "" - yyq156[1] = x.APIVersion != "" - yyq156[2] = true - yyq156[3] = true - yyq156[4] = true - var yynn156 int - if yyr156 || yy2arr156 { - r.EncodeArrayStart(5) - } else { - yynn156 = 0 - for _, b := range yyq156 { - if b { - yynn156++ - } - } - r.EncodeMapStart(yynn156) - yynn156 = 0 - } - if yyr156 || yy2arr156 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq156[0] { - yym158 := z.EncBinary() - _ = yym158 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq156[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym159 := z.EncBinary() - _ = yym159 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr156 || yy2arr156 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq156[1] { - yym161 := z.EncBinary() - _ = yym161 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq156[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym162 := z.EncBinary() - _ = yym162 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr156 || yy2arr156 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq156[2] { - yy164 := &x.ObjectMeta - yy164.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq156[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy165 := &x.ObjectMeta - yy165.CodecEncodeSelf(e) - } - } - if yyr156 || yy2arr156 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq156[3] { - yy167 := &x.Spec - yy167.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq156[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy168 := &x.Spec - yy168.CodecEncodeSelf(e) - } - } - if yyr156 || yy2arr156 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq156[4] { - yy170 := &x.Status - yy170.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq156[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy171 := &x.Status - yy171.CodecEncodeSelf(e) - } - } - if yyr156 || yy2arr156 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym172 := z.DecBinary() - _ = yym172 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct173 := r.ContainerType() - if yyct173 == codecSelferValueTypeMap1234 { - yyl173 := r.ReadMapStart() - if yyl173 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl173, d) - } - } else if yyct173 == codecSelferValueTypeArray1234 { - yyl173 := r.ReadArrayStart() - if yyl173 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl173, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys174Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys174Slc - var yyhl174 bool = l >= 0 - for yyj174 := 0; ; yyj174++ { - if yyhl174 { - if yyj174 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys174Slc = r.DecodeBytes(yys174Slc, true, true) - yys174 := string(yys174Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys174 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv177 := &x.ObjectMeta - yyv177.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = HorizontalPodAutoscalerSpec{} - } else { - yyv178 := &x.Spec - yyv178.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = HorizontalPodAutoscalerStatus{} - } else { - yyv179 := &x.Status - yyv179.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys174) - } // end switch yys174 - } // end for yyj174 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj180 int - var yyb180 bool - var yyhl180 bool = l >= 0 - yyj180++ - if yyhl180 { - yyb180 = yyj180 > l - } else { - yyb180 = r.CheckBreak() - } - if yyb180 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj180++ - if yyhl180 { - yyb180 = yyj180 > l - } else { - yyb180 = r.CheckBreak() - } - if yyb180 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj180++ - if yyhl180 { - yyb180 = yyj180 > l - } else { - yyb180 = r.CheckBreak() - } - if yyb180 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv183 := &x.ObjectMeta - yyv183.CodecDecodeSelf(d) - } - yyj180++ - if yyhl180 { - yyb180 = yyj180 > l - } else { - yyb180 = r.CheckBreak() - } - if yyb180 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = HorizontalPodAutoscalerSpec{} - } else { - yyv184 := &x.Spec - yyv184.CodecDecodeSelf(d) - } - yyj180++ - if yyhl180 { - yyb180 = yyj180 > l - } else { - yyb180 = r.CheckBreak() - } - if yyb180 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = HorizontalPodAutoscalerStatus{} - } else { - yyv185 := &x.Status - yyv185.CodecDecodeSelf(d) - } - for { - yyj180++ - if yyhl180 { - yyb180 = yyj180 > l - } else { - yyb180 = r.CheckBreak() - } - if yyb180 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj180-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym186 := z.EncBinary() - _ = yym186 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep187 := !z.EncBinary() - yy2arr187 := z.EncBasicHandle().StructToArray - var yyq187 [4]bool - _, _, _ = yysep187, yyq187, yy2arr187 - const yyr187 bool = false - yyq187[0] = x.Kind != "" - yyq187[1] = x.APIVersion != "" - yyq187[2] = true - var yynn187 int - if yyr187 || yy2arr187 { - r.EncodeArrayStart(4) - } else { - yynn187 = 1 - for _, b := range yyq187 { - if b { - yynn187++ - } - } - r.EncodeMapStart(yynn187) - yynn187 = 0 - } - if yyr187 || yy2arr187 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq187[0] { - yym189 := z.EncBinary() - _ = yym189 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq187[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym190 := z.EncBinary() - _ = yym190 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr187 || yy2arr187 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq187[1] { - yym192 := z.EncBinary() - _ = yym192 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq187[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym193 := z.EncBinary() - _ = yym193 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr187 || yy2arr187 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq187[2] { - yy195 := &x.ListMeta - yym196 := z.EncBinary() - _ = yym196 - if false { - } else if z.HasExtensions() && z.EncExt(yy195) { - } else { - z.EncFallback(yy195) - } - } else { - r.EncodeNil() - } - } else { - if yyq187[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy197 := &x.ListMeta - yym198 := z.EncBinary() - _ = yym198 - if false { - } else if z.HasExtensions() && z.EncExt(yy197) { - } else { - z.EncFallback(yy197) - } - } - } - if yyr187 || yy2arr187 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym200 := z.EncBinary() - _ = yym200 - if false { - } else { - h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym201 := z.EncBinary() - _ = yym201 - if false { - } else { - h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) - } - } - } - if yyr187 || yy2arr187 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym202 := z.DecBinary() - _ = yym202 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct203 := r.ContainerType() - if yyct203 == codecSelferValueTypeMap1234 { - yyl203 := r.ReadMapStart() - if yyl203 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl203, d) - } - } else if yyct203 == codecSelferValueTypeArray1234 { - yyl203 := r.ReadArrayStart() - if yyl203 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl203, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys204Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys204Slc - var yyhl204 bool = l >= 0 - for yyj204 := 0; ; yyj204++ { - if yyhl204 { - if yyj204 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys204Slc = r.DecodeBytes(yys204Slc, true, true) - yys204 := string(yys204Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys204 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv207 := &x.ListMeta - yym208 := z.DecBinary() - _ = yym208 - if false { - } else if z.HasExtensions() && z.DecExt(yyv207) { - } else { - z.DecFallback(yyv207, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv209 := &x.Items - yym210 := z.DecBinary() - _ = yym210 - if false { - } else { - h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv209), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys204) - } // end switch yys204 - } // end for yyj204 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj211 int - var yyb211 bool - var yyhl211 bool = l >= 0 - yyj211++ - if yyhl211 { - yyb211 = yyj211 > l - } else { - yyb211 = r.CheckBreak() - } - if yyb211 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj211++ - if yyhl211 { - yyb211 = yyj211 > l - } else { - yyb211 = r.CheckBreak() - } - if yyb211 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj211++ - if yyhl211 { - yyb211 = yyj211 > l - } else { - yyb211 = r.CheckBreak() - } - if yyb211 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv214 := &x.ListMeta - yym215 := z.DecBinary() - _ = yym215 - if false { - } else if z.HasExtensions() && z.DecExt(yyv214) { - } else { - z.DecFallback(yyv214, false) - } - } - yyj211++ - if yyhl211 { - yyb211 = yyj211 > l - } else { - yyb211 = r.CheckBreak() - } - if yyb211 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv216 := &x.Items - yym217 := z.DecBinary() - _ = yym217 - if false { - } else { - h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv216), d) - } - } - for { - yyj211++ - if yyhl211 { - yyb211 = yyj211 > l - } else { - yyb211 = r.CheckBreak() - } - if yyb211 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj211-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv218 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy219 := &yyv218 - yy219.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv220 := *v - yyh220, yyl220 := z.DecSliceHelperStart() - var yyc220 bool - if yyl220 == 0 { - if yyv220 == nil { - yyv220 = []HorizontalPodAutoscaler{} - yyc220 = true - } else if len(yyv220) != 0 { - yyv220 = yyv220[:0] - yyc220 = true - } - } else if yyl220 > 0 { - var yyrr220, yyrl220 int - var yyrt220 bool - if yyl220 > cap(yyv220) { - - yyrg220 := len(yyv220) > 0 - yyv2220 := yyv220 - yyrl220, yyrt220 = z.DecInferLen(yyl220, z.DecBasicHandle().MaxInitLen, 360) - if yyrt220 { - if yyrl220 <= cap(yyv220) { - yyv220 = yyv220[:yyrl220] - } else { - yyv220 = make([]HorizontalPodAutoscaler, yyrl220) - } - } else { - yyv220 = make([]HorizontalPodAutoscaler, yyrl220) - } - yyc220 = true - yyrr220 = len(yyv220) - if yyrg220 { - copy(yyv220, yyv2220) - } - } else if yyl220 != len(yyv220) { - yyv220 = yyv220[:yyl220] - yyc220 = true - } - yyj220 := 0 - for ; yyj220 < yyrr220; yyj220++ { - yyh220.ElemContainerState(yyj220) - if r.TryDecodeAsNil() { - yyv220[yyj220] = HorizontalPodAutoscaler{} - } else { - yyv221 := &yyv220[yyj220] - yyv221.CodecDecodeSelf(d) - } - - } - if yyrt220 { - for ; yyj220 < yyl220; yyj220++ { - yyv220 = append(yyv220, HorizontalPodAutoscaler{}) - yyh220.ElemContainerState(yyj220) - if r.TryDecodeAsNil() { - yyv220[yyj220] = HorizontalPodAutoscaler{} - } else { - yyv222 := &yyv220[yyj220] - yyv222.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj220 := 0 - for ; !r.CheckBreak(); yyj220++ { - - if yyj220 >= len(yyv220) { - yyv220 = append(yyv220, HorizontalPodAutoscaler{}) // var yyz220 HorizontalPodAutoscaler - yyc220 = true - } - yyh220.ElemContainerState(yyj220) - if yyj220 < len(yyv220) { - if r.TryDecodeAsNil() { - yyv220[yyj220] = HorizontalPodAutoscaler{} - } else { - yyv223 := &yyv220[yyj220] - yyv223.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj220 < len(yyv220) { - yyv220 = yyv220[:yyj220] - yyc220 = true - } else if yyj220 == 0 && yyv220 == nil { - yyv220 = []HorizontalPodAutoscaler{} - yyc220 = true - } - } - yyh220.End() - if yyc220 { - *v = yyv220 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go index 94cd54cb1a..65cdb54beb 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go @@ -17,119 +17,289 @@ limitations under the License. package autoscaling import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" ) // Scale represents a scaling request for a resource. type Scale struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. // +optional - Spec ScaleSpec `json:"spec,omitempty"` + Spec ScaleSpec // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. // +optional - Status ScaleStatus `json:"status,omitempty"` + Status ScaleStatus } // ScaleSpec describes the attributes of a scale subresource. type ScaleSpec struct { // desired number of instances for the scaled object. // +optional - Replicas int32 `json:"replicas,omitempty"` + Replicas int32 } // ScaleStatus represents the current status of a scale subresource. type ScaleStatus struct { // actual number of observed instances of the scaled object. - Replicas int32 `json:"replicas"` + Replicas int32 // label query over pods that should match the replicas count. This is same // as the label selector but in the string format to avoid introspection // by clients. The string will be in the same format as the query-param syntax. // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional - Selector string `json:"selector,omitempty"` + Selector string } // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" - Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + Kind string // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names - Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + Name string // API version of the referent // +optional - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` + APIVersion string } -// specification of a horizontal pod autoscaler. +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. type HorizontalPodAutoscalerSpec struct { - // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption - // and will set the desired number of pods by using its Scale subresource. - ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef"` - // lower limit for the number of pods that can be set by the autoscaler, default 1. + // ScaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + ScaleTargetRef CrossVersionObjectReference + // MinReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. + // It defaults to 1 pod. // +optional - MinReplicas *int32 `json:"minReplicas,omitempty"` - // upper limit for the number of pods that can be set by the autoscaler. It cannot be smaller than MinReplicas. - MaxReplicas int32 `json:"maxReplicas"` - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; - // if not specified the default autoscaling policy will be used. + MinReplicas *int32 + // MaxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + MaxReplicas int32 + // Metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. // +optional - TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty"` + Metrics []MetricSpec } -// current status of a horizontal pod autoscaler +// MetricSourceType indicates the type of metric. +type MetricSourceType string + +var ( + // ObjectMetricSourceType is a metric describing a kubernetes object + // (for example, hits-per-second on an Ingress object). + ObjectMetricSourceType MetricSourceType = "Object" + // PodsMetricSourceType is a metric describing each pod in the current scale + // target (for example, transactions-processed-per-second). The values + // will be averaged together before being compared to the target value. + PodsMetricSourceType MetricSourceType = "Pods" + // ResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ResourceMetricSourceType MetricSourceType = "Resource" +) + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +type MetricSpec struct { + // Type is the type of metric source. It should match one of the fields below. + Type MetricSourceType + + // Object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricSource + // Pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricSource + // Resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricSource +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricSource struct { + // Target is the described Kubernetes object. + Target CrossVersionObjectReference + + // MetricName is the name of the metric in question. + MetricName string + // TargetValue is the target value of the metric (as a quantity). + TargetValue resource.Quantity +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +type PodsMetricSource struct { + // MetricName is the name of the metric in question + MetricName string + // TargetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + TargetAverageValue resource.Quantity +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ResourceMetricSource struct { + // Name is the name of the resource in question. + Name api.ResourceName + // TargetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 + // TargetAverageValue is the the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity +} + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. type HorizontalPodAutoscalerStatus struct { - // most recent generation observed by this autoscaler. + // ObservedGeneration is the most recent generation observed by this autoscaler. // +optional - ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + ObservedGeneration *int64 - // last time the HorizontalPodAutoscaler scaled the number of pods; + // LastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, // used by the autoscaler to control how often the number of pods is changed. // +optional - LastScaleTime *unversioned.Time `json:"lastScaleTime,omitempty"` + LastScaleTime *metav1.Time + + // CurrentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + CurrentReplicas int32 + + // DesiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + DesiredReplicas int32 - // current number of replicas of pods managed by this autoscaler. - CurrentReplicas int32 `json:"currentReplicas"` + // CurrentMetrics is the last read state of the metrics used by this autoscaler. + CurrentMetrics []MetricStatus +} + +// MetricStatus describes the last-read state of a single metric. +type MetricStatus struct { + // Type is the type of metric source. It will match one of the fields below. + Type MetricSourceType - // desired number of replicas of pods managed by this autoscaler. - DesiredReplicas int32 `json:"desiredReplicas"` + // Object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricStatus + // Pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricStatus + // Resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricStatus +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricStatus struct { + // Target is the described Kubernetes object. + Target CrossVersionObjectReference + + // MetricName is the name of the metric in question. + MetricName string + // CurrentValue is the current value of the metric (as a quantity). + CurrentValue resource.Quantity +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +type PodsMetricStatus struct { + // MetricName is the name of the metric in question + MetricName string + // CurrentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + CurrentAverageValue resource.Quantity +} - // current average CPU utilization over all pods, represented as a percentage of requested CPU, - // e.g. 70 means that an average pod is using now 70% of its requested CPU. +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ResourceMetricStatus struct { + // Name is the name of the resource in question. + Name api.ResourceName + // CurrentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. // +optional - CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty"` + CurrentAverageUtilization *int32 + // CurrentAverageValue is the the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity } // +genclient=true -// configuration of a horizontal pod autoscaler. +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. type HorizontalPodAutoscaler struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta + // Metadata is the standard object metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta - // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // Spec is the specification for the behaviour of the autoscaler. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. // +optional - Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty"` + Spec HorizontalPodAutoscalerSpec - // current information about the autoscaler. + // Status is the current information about the autoscaler. // +optional - Status HorizontalPodAutoscalerStatus `json:"status,omitempty"` + Status HorizontalPodAutoscalerStatus } -// list of horizontal pod autoscaler objects. +// HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects. type HorizontalPodAutoscalerList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta + // Metadata is the standard list metadata. // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta - // list of horizontal pod autoscaler objects. - Items []HorizontalPodAutoscaler `json:"items"` + // Items is the list of horizontal pod autoscaler objects. + Items []HorizontalPodAutoscaler } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/BUILD index 04ef35ab65..c29d975262 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/BUILD @@ -4,15 +4,14 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", "go_test", - "cgo_library", ) go_library( name = "go_default_library", srcs = [ + "conversion.go", "defaults.go", "doc.go", "generated.pb.go", @@ -26,15 +25,17 @@ go_library( ], tags = ["automanaged"], deps = [ - "//pkg/api/unversioned:go_default_library", + "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", "//pkg/apis/autoscaling:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//pkg/watch/versioned:go_default_library", "//vendor:github.com/gogo/protobuf/proto", "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/api/resource", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/types", ], ) @@ -47,6 +48,19 @@ go_test( "//pkg/api/install:go_default_library", "//pkg/apis/autoscaling/install:go_default_library", "//pkg/apis/autoscaling/v1:go_default_library", - "//pkg/runtime:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/runtime", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion.go new file mode 100644 index 0000000000..b8096af807 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion.go @@ -0,0 +1,244 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/autoscaling" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs( + Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler, + Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler, + Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec, + Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, + Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus, + Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus, + ) + if err != nil { + return err + } + + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { + if err := autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in, out, s); err != nil { + return err + } + + otherMetrics := make([]MetricSpec, 0, len(in.Spec.Metrics)) + for _, metric := range in.Spec.Metrics { + if metric.Type == autoscaling.ResourceMetricSourceType && metric.Resource != nil && metric.Resource.Name == api.ResourceCPU && metric.Resource.TargetAverageUtilization != nil { + continue + } + + convMetric := MetricSpec{} + if err := Convert_autoscaling_MetricSpec_To_v1_MetricSpec(&metric, &convMetric, s); err != nil { + return err + } + otherMetrics = append(otherMetrics, convMetric) + } + + // NB: we need to save the status even if it maps to a CPU utilization status in order to save the raw value as well + currentMetrics := make([]MetricStatus, len(in.Status.CurrentMetrics)) + for i, currentMetric := range in.Status.CurrentMetrics { + if err := Convert_autoscaling_MetricStatus_To_v1_MetricStatus(¤tMetric, ¤tMetrics[i], s); err != nil { + return err + } + } + + if len(otherMetrics) > 0 || len(in.Status.CurrentMetrics) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)+2) + if old != nil { + for k, v := range old { + out.Annotations[k] = v + } + } + } + + if len(otherMetrics) > 0 { + otherMetricsEnc, err := json.Marshal(otherMetrics) + if err != nil { + return err + } + out.Annotations[autoscaling.MetricSpecsAnnotation] = string(otherMetricsEnc) + } + + if len(in.Status.CurrentMetrics) > 0 { + currentMetricsEnc, err := json.Marshal(currentMetrics) + if err != nil { + return err + } + out.Annotations[autoscaling.MetricStatusesAnnotation] = string(currentMetricsEnc) + } + + return nil +} + +func Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { + if err := autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s); err != nil { + return err + } + + if otherMetricsEnc, hasOtherMetrics := out.Annotations[autoscaling.MetricSpecsAnnotation]; hasOtherMetrics { + var otherMetrics []MetricSpec + if err := json.Unmarshal([]byte(otherMetricsEnc), &otherMetrics); err != nil { + return err + } + + // the normal Spec conversion could have populated out.Spec.Metrics with a single element, so deal with that + outMetrics := make([]autoscaling.MetricSpec, len(otherMetrics)+len(out.Spec.Metrics)) + for i, metric := range otherMetrics { + if err := Convert_v1_MetricSpec_To_autoscaling_MetricSpec(&metric, &outMetrics[i], s); err != nil { + return err + } + } + if out.Spec.Metrics != nil { + outMetrics[len(otherMetrics)] = out.Spec.Metrics[0] + } + out.Spec.Metrics = outMetrics + delete(out.Annotations, autoscaling.MetricSpecsAnnotation) + } + + if currentMetricsEnc, hasCurrentMetrics := out.Annotations[autoscaling.MetricStatusesAnnotation]; hasCurrentMetrics { + // ignore any existing status values -- the ones here have more information + var currentMetrics []MetricStatus + if err := json.Unmarshal([]byte(currentMetricsEnc), ¤tMetrics); err != nil { + return err + } + + out.Status.CurrentMetrics = make([]autoscaling.MetricStatus, len(currentMetrics)) + for i, currentMetric := range currentMetrics { + if err := Convert_v1_MetricStatus_To_autoscaling_MetricStatus(¤tMetric, &out.Status.CurrentMetrics[i], s); err != nil { + return err + } + } + delete(out.Annotations, autoscaling.MetricStatusesAnnotation) + } + + // autoscaling/v1 formerly had an implicit default applied in the controller. In v2alpha1, we apply it explicitly. + // We apply it here, explicitly, since we have access to the full set of metrics from the annotation. + if len(out.Spec.Metrics) == 0 { + // no other metrics, no explicit CPU value set + out.Spec.Metrics = []autoscaling.MetricSpec{ + { + Type: autoscaling.ResourceMetricSourceType, + Resource: &autoscaling.ResourceMetricSource{ + Name: api.ResourceCPU, + }, + }, + } + out.Spec.Metrics[0].Resource.TargetAverageUtilization = new(int32) + *out.Spec.Metrics[0].Resource.TargetAverageUtilization = autoscaling.DefaultCPUUtilization + } + + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + + out.MinReplicas = in.MinReplicas + out.MaxReplicas = in.MaxReplicas + + for _, metric := range in.Metrics { + if metric.Type == autoscaling.ResourceMetricSourceType && metric.Resource != nil && metric.Resource.Name == api.ResourceCPU { + if metric.Resource.TargetAverageUtilization != nil { + out.TargetCPUUtilizationPercentage = new(int32) + *out.TargetCPUUtilizationPercentage = *metric.Resource.TargetAverageUtilization + } + break + } + } + + return nil +} + +func Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + + out.MinReplicas = in.MinReplicas + out.MaxReplicas = in.MaxReplicas + + if in.TargetCPUUtilizationPercentage != nil { + out.Metrics = []autoscaling.MetricSpec{ + { + Type: autoscaling.ResourceMetricSourceType, + Resource: &autoscaling.ResourceMetricSource{ + Name: api.ResourceCPU, + }, + }, + } + out.Metrics[0].Resource.TargetAverageUtilization = new(int32) + *out.Metrics[0].Resource.TargetAverageUtilization = *in.TargetCPUUtilizationPercentage + } + + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.LastScaleTime = in.LastScaleTime + + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + + for _, metric := range in.CurrentMetrics { + if metric.Type == autoscaling.ResourceMetricSourceType && metric.Resource != nil && metric.Resource.Name == api.ResourceCPU { + if metric.Resource.CurrentAverageUtilization != nil { + + out.CurrentCPUUtilizationPercentage = new(int32) + *out.CurrentCPUUtilizationPercentage = *metric.Resource.CurrentAverageUtilization + } + } + } + return nil +} + +func Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.LastScaleTime = in.LastScaleTime + + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + + if in.CurrentCPUUtilizationPercentage != nil { + out.CurrentMetrics = []autoscaling.MetricStatus{ + { + Type: autoscaling.ResourceMetricSourceType, + Resource: &autoscaling.ResourceMetricStatus{ + Name: api.ResourceCPU, + }, + }, + } + out.CurrentMetrics[0].Resource.CurrentAverageUtilization = new(int32) + *out.CurrentMetrics[0].Resource.CurrentAverageUtilization = *in.CurrentCPUUtilizationPercentage + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go index e81d954905..d423ad1253 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go @@ -17,7 +17,7 @@ limitations under the License. package v1 import ( - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { @@ -32,4 +32,7 @@ func SetDefaults_HorizontalPodAutoscaler(obj *HorizontalPodAutoscaler) { minReplicas := int32(1) obj.Spec.MinReplicas = &minReplicas } + + // NB: we apply a default for CPU utilization in conversion because + // we need access to the annotations to properly apply the default. } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.pb.go index 99e32efd72..ee4ca5313e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.pb.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -30,6 +30,14 @@ limitations under the License. HorizontalPodAutoscalerList HorizontalPodAutoscalerSpec HorizontalPodAutoscalerStatus + MetricSpec + MetricStatus + ObjectMetricSource + ObjectMetricStatus + PodsMetricSource + PodsMetricStatus + ResourceMetricSource + ResourceMetricStatus Scale ScaleSpec ScaleStatus @@ -40,7 +48,10 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" +import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" import strings "strings" import reflect "reflect" @@ -84,17 +95,49 @@ func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func init() { proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.CrossVersionObjectReference") @@ -102,6 +145,14 @@ func init() { proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerList") proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerSpec") proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerStatus") + proto.RegisterType((*MetricSpec)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.MetricSpec") + proto.RegisterType((*MetricStatus)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.MetricStatus") + proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.ObjectMetricSource") + proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.ObjectMetricStatus") + proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.PodsMetricSource") + proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.PodsMetricStatus") + proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.ResourceMetricSource") + proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.ResourceMetricStatus") proto.RegisterType((*Scale)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.ScaleStatus") @@ -299,6 +350,318 @@ func (m *HorizontalPodAutoscalerStatus) MarshalTo(data []byte) (int, error) { return i, nil } +func (m *MetricSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *MetricSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.Object != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) + n7, err := m.Object.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.Pods != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Pods.Size())) + n8, err := m.Pods.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.Resource != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Resource.Size())) + n9, err := m.Resource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} + +func (m *MetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *MetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.Object != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) + n10, err := m.Object.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.Pods != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Pods.Size())) + n11, err := m.Pods.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.Resource != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Resource.Size())) + n12, err := m.Resource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} + +func (m *ObjectMetricSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectMetricSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) + n13, err := m.Target.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetValue.Size())) + n14, err := m.TargetValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + return i, nil +} + +func (m *ObjectMetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectMetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) + n15, err := m.Target.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n15 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentValue.Size())) + n16, err := m.CurrentValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n16 + return i, nil +} + +func (m *PodsMetricSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodsMetricSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetAverageValue.Size())) + n17, err := m.TargetAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n17 + return i, nil +} + +func (m *PodsMetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodsMetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentAverageValue.Size())) + n18, err := m.CurrentAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n18 + return i, nil +} + +func (m *ResourceMetricSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceMetricSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if m.TargetAverageUtilization != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetAverageValue.Size())) + n19, err := m.TargetAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n19 + } + return i, nil +} + +func (m *ResourceMetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceMetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if m.CurrentAverageUtilization != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.CurrentAverageUtilization)) + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentAverageValue.Size())) + n20, err := m.CurrentAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n20 + return i, nil +} + func (m *Scale) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -317,27 +680,27 @@ func (m *Scale) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(data[i:]) + n21, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n7 + i += n21 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n8, err := m.Spec.MarshalTo(data[i:]) + n22, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n8 + i += n22 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n9, err := m.Status.MarshalTo(data[i:]) + n23, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n9 + i += n23 return i, nil } @@ -485,22 +848,134 @@ func (m *HorizontalPodAutoscalerStatus) Size() (n int) { return n } -func (m *Scale) Size() (n int) { +func (m *MetricSpec) Size() (n int) { var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() + l = len(m.Type) n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *ScaleSpec) Size() (n int) { +func (m *MetricStatus) Size() (n int) { var l int _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ObjectMetricSource) Size() (n int) { + var l int + _ = l + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectMetricStatus) Size() (n int) { + var l int + _ = l + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodsMetricSource) Size() (n int) { + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodsMetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceMetricSource) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ResourceMetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.CurrentAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.CurrentAverageUtilization)) + } + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Scale) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleSpec) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) return n } @@ -543,7 +1018,7 @@ func (this *HorizontalPodAutoscaler) String() string { return "nil" } s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -555,7 +1030,7 @@ func (this *HorizontalPodAutoscalerList) String() string { return "nil" } s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -580,7 +1055,7 @@ func (this *HorizontalPodAutoscalerStatus) String() string { } s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, `CurrentCPUUtilizationPercentage:` + valueToStringGenerated(this.CurrentCPUUtilizationPercentage) + `,`, @@ -588,12 +1063,108 @@ func (this *HorizontalPodAutoscalerStatus) String() string { }, "") return s } +func (this *MetricSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricStatus{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricSource{`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricStatus{`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricSource{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricStatus{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *Scale) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -614,22 +1185,1205 @@ func (this *ScaleStatus) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ScaleStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, HorizontalPodAutoscaler{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ScaleTargetRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MinReplicas = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) + } + m.MaxReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MaxReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetCPUUtilizationPercentage", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetCPUUtilizationPercentage = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScaleTime == nil { + m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.LastScaleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.CurrentReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) + } + m.DesiredReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.DesiredReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentCPUUtilizationPercentage", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentCPUUtilizationPercentage = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricSource{} + } + if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricSource{} + } + if err := m.Pods.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricSource{} + } + if err := m.Resource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricStatus{} + } + if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricStatus{} + } + if err := m.Pods.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricStatus{} + } + if err := m.Resource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { +func (m *ObjectMetricStatus) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -652,17 +2406,17 @@ func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + return fmt.Errorf("proto: ObjectMetricStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ObjectMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -672,24 +2426,25 @@ func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(data[iNdEx:postIndex]) + if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -714,13 +2469,13 @@ func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.MetricName = string(data[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -730,20 +2485,21 @@ func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersion = string(data[iNdEx:postIndex]) + if err := m.CurrentValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -766,7 +2522,7 @@ func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { } return nil } -func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { +func (m *PodsMetricSource) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -789,17 +2545,17 @@ func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") + return fmt.Errorf("proto: PodsMetricSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodsMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -809,55 +2565,24 @@ func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } + m.MetricName = string(data[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -881,7 +2606,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.TargetAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -906,7 +2631,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { } return nil } -func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { +func (m *PodsMetricStatus) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -929,17 +2654,17 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") + return fmt.Errorf("proto: PodsMetricStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodsMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -949,25 +2674,24 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } + m.MetricName = string(data[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -991,8 +2715,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, HorizontalPodAutoscaler{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.CurrentAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1017,7 +2740,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { } return nil } -func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { +func (m *ResourceMetricSource) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -1040,17 +2763,17 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceMetricSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1060,25 +2783,24 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ScaleTargetRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } + m.Name = k8s_io_kubernetes_pkg_api_v1.ResourceName(data[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageUtilization", wireType) } var v int32 for shift := uint(0); ; shift += 7 { @@ -1095,12 +2817,12 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { break } } - m.MinReplicas = &v + m.TargetAverageUtilization = &v case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) } - m.MaxReplicas = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1110,31 +2832,25 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetCPUUtilizationPercentage", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF } - m.TargetCPUUtilizationPercentage = &v + if m.TargetAverageValue == nil { + m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + } + if err := m.TargetAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -1156,7 +2872,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { } return nil } -func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { +func (m *ResourceMetricStatus) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -1179,37 +2895,17 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceMetricStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ObservedGeneration = &v - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1219,30 +2915,26 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} - } - if err := m.LastScaleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } + m.Name = k8s_io_kubernetes_pkg_api_v1.ResourceName(data[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageUtilization", wireType) } - m.CurrentReplicas = 0 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1252,16 +2944,17 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) + m.CurrentAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) } - m.DesiredReplicas = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1271,31 +2964,22 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentCPUUtilizationPercentage", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF } - m.CurrentCPUUtilizationPercentage = &v + if err := m.CurrentAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -1730,58 +3414,85 @@ var ( ) var fileDescriptorGenerated = []byte{ - // 835 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x55, 0xcd, 0x6f, 0xdc, 0x44, - 0x14, 0x5f, 0xef, 0x47, 0x95, 0x8e, 0x49, 0x02, 0x83, 0xd4, 0xae, 0x52, 0x61, 0x47, 0x0b, 0x87, - 0x20, 0x5a, 0x5b, 0xbb, 0xa2, 0x88, 0x1e, 0xe3, 0xa0, 0xd2, 0x8a, 0x86, 0x46, 0x93, 0xb6, 0x42, - 0x48, 0x20, 0xcd, 0xda, 0xaf, 0xee, 0x74, 0xd7, 0x1e, 0x6b, 0x66, 0xbc, 0x42, 0x3d, 0x71, 0xe2, - 0xcc, 0x85, 0x03, 0xff, 0x0e, 0xa7, 0xdc, 0xe8, 0x91, 0xd3, 0x8a, 0x18, 0xf1, 0x5f, 0x70, 0x40, - 0x9e, 0x9d, 0x7a, 0xbf, 0xe2, 0x6d, 0x23, 0xe8, 0x6d, 0x67, 0xde, 0xef, 0xe3, 0xcd, 0x7b, 0xcf, - 0x6f, 0xd1, 0x9d, 0xd1, 0xe7, 0xd2, 0x63, 0xdc, 0x1f, 0xe5, 0x43, 0x10, 0x29, 0x28, 0x90, 0x7e, - 0x36, 0x8a, 0x7d, 0x9a, 0x31, 0xe9, 0xd3, 0x5c, 0x71, 0x19, 0xd2, 0x31, 0x4b, 0x63, 0x7f, 0xd2, - 0xf7, 0x63, 0x48, 0x41, 0x50, 0x05, 0x91, 0x97, 0x09, 0xae, 0x38, 0xfe, 0x78, 0x46, 0xf5, 0xe6, - 0x54, 0x2f, 0x1b, 0xc5, 0x5e, 0x49, 0xf5, 0x16, 0xa8, 0xde, 0xa4, 0xbf, 0x77, 0x2b, 0x66, 0xea, - 0x59, 0x3e, 0xf4, 0x42, 0x9e, 0xf8, 0x31, 0x8f, 0xb9, 0xaf, 0x15, 0x86, 0xf9, 0x53, 0x7d, 0xd2, - 0x07, 0xfd, 0x6b, 0xa6, 0xbc, 0x37, 0xa8, 0x4d, 0xca, 0x17, 0x20, 0x79, 0x2e, 0x42, 0x58, 0xcd, - 0x66, 0xef, 0x76, 0x3d, 0x27, 0x4f, 0x27, 0x20, 0x24, 0xe3, 0x29, 0x44, 0x6b, 0xb4, 0x9b, 0xf5, - 0xb4, 0xf5, 0x27, 0xef, 0xdd, 0xba, 0x18, 0x2d, 0xf2, 0x54, 0xb1, 0x64, 0x3d, 0xa7, 0xfe, 0xc5, - 0xf0, 0x5c, 0xb1, 0xb1, 0xcf, 0x52, 0x25, 0x95, 0x58, 0xa5, 0xf4, 0x7e, 0xb1, 0xd0, 0x8d, 0x23, - 0xc1, 0xa5, 0x7c, 0x32, 0x4b, 0xf9, 0xe1, 0xf0, 0x39, 0x84, 0x8a, 0xc0, 0x53, 0x10, 0x90, 0x86, - 0x80, 0xf7, 0x51, 0x7b, 0xc4, 0xd2, 0xa8, 0x6b, 0xed, 0x5b, 0x07, 0x57, 0x83, 0x77, 0xce, 0xa6, - 0x6e, 0xa3, 0x98, 0xba, 0xed, 0xaf, 0x58, 0x1a, 0x11, 0x1d, 0x29, 0x11, 0x29, 0x4d, 0xa0, 0xdb, - 0x5c, 0x46, 0x7c, 0x4d, 0x13, 0x20, 0x3a, 0x82, 0x07, 0x08, 0xd1, 0x8c, 0x19, 0x83, 0x6e, 0x4b, - 0xe3, 0xb0, 0xc1, 0xa1, 0xc3, 0x93, 0xfb, 0x26, 0x42, 0x16, 0x50, 0xbd, 0xdf, 0x9b, 0xe8, 0xfa, - 0x3d, 0x2e, 0xd8, 0x0b, 0x9e, 0x2a, 0x3a, 0x3e, 0xe1, 0xd1, 0xa1, 0xe9, 0x30, 0x08, 0xfc, 0x0d, - 0xda, 0x4a, 0x40, 0xd1, 0x88, 0x2a, 0xaa, 0xf3, 0xb2, 0x07, 0x07, 0x5e, 0xed, 0x6c, 0x78, 0x93, - 0xbe, 0x37, 0x7b, 0xd4, 0x31, 0x28, 0x3a, 0xf7, 0x9d, 0xdf, 0x91, 0x4a, 0x0d, 0x3f, 0x43, 0x6d, - 0x99, 0x41, 0xa8, 0xdf, 0x62, 0x0f, 0xee, 0x7a, 0x6f, 0x3c, 0x71, 0x5e, 0x4d, 0xae, 0xa7, 0x19, - 0x84, 0xf3, 0x9a, 0x94, 0x27, 0xa2, 0x1d, 0x70, 0x86, 0xae, 0x48, 0x45, 0x55, 0x2e, 0x75, 0x3d, - 0xec, 0xc1, 0xbd, 0xff, 0xc1, 0x4b, 0xeb, 0x05, 0x3b, 0xc6, 0xed, 0xca, 0xec, 0x4c, 0x8c, 0x4f, - 0xef, 0x6f, 0x0b, 0xdd, 0xa8, 0x61, 0x3e, 0x60, 0x52, 0xe1, 0xef, 0xd6, 0xaa, 0xea, 0x6f, 0xa8, - 0xea, 0xc2, 0x8c, 0x7b, 0x25, 0x5d, 0x17, 0xf7, 0x5d, 0x63, 0xbd, 0xf5, 0xea, 0x66, 0xa1, 0xb4, - 0x31, 0xea, 0x30, 0x05, 0x89, 0xec, 0x36, 0xf7, 0x5b, 0x07, 0xf6, 0x20, 0xf8, 0xef, 0xef, 0x0d, - 0xb6, 0x8d, 0x5d, 0xe7, 0x7e, 0x29, 0x4c, 0x66, 0xfa, 0xbd, 0x7f, 0x9a, 0xb5, 0xef, 0x2c, 0xeb, - 0x8f, 0x7f, 0xb2, 0xd0, 0x8e, 0x3e, 0x3e, 0xa2, 0x22, 0x86, 0x72, 0xd4, 0xcd, 0x73, 0x2f, 0xd3, - 0xee, 0x0d, 0x9f, 0x4c, 0x70, 0xcd, 0xa4, 0xb5, 0x73, 0xba, 0xe4, 0x42, 0x56, 0x5c, 0x71, 0x1f, - 0xd9, 0x09, 0x4b, 0x09, 0x64, 0x63, 0x16, 0x52, 0xa9, 0x67, 0xae, 0x13, 0xec, 0x16, 0x53, 0xd7, - 0x3e, 0x9e, 0x5f, 0x93, 0x45, 0x0c, 0xbe, 0x8d, 0xec, 0x84, 0xfe, 0x50, 0x51, 0x5a, 0x9a, 0xf2, - 0xbe, 0xf1, 0xb3, 0x8f, 0xe7, 0x21, 0xb2, 0x88, 0xc3, 0xcf, 0x91, 0xa3, 0xb4, 0xed, 0xd1, 0xc9, - 0xe3, 0xc7, 0x8a, 0x8d, 0xd9, 0x0b, 0xaa, 0x18, 0x4f, 0x4f, 0x40, 0x84, 0x90, 0x2a, 0x1a, 0x43, - 0xb7, 0xad, 0x95, 0x7a, 0xc5, 0xd4, 0x75, 0x1e, 0x6d, 0x44, 0x92, 0xd7, 0x28, 0xf5, 0x7e, 0x6b, - 0xa1, 0x0f, 0x36, 0x0e, 0x28, 0xbe, 0x8b, 0x30, 0x1f, 0x4a, 0x10, 0x13, 0x88, 0xbe, 0x9c, 0x6d, - 0xa3, 0x72, 0x2d, 0x94, 0x3d, 0x68, 0x05, 0xd7, 0x8a, 0xa9, 0x8b, 0x1f, 0xae, 0x45, 0xc9, 0x05, - 0x0c, 0x1c, 0xa1, 0xed, 0x31, 0x95, 0x6a, 0x56, 0x65, 0x66, 0x36, 0x90, 0x3d, 0xf8, 0xe4, 0x0d, - 0xa7, 0xb6, 0xa4, 0x04, 0xef, 0x15, 0x53, 0x77, 0xfb, 0xc1, 0xa2, 0x0a, 0x59, 0x16, 0xc5, 0x87, - 0x68, 0x37, 0xcc, 0x85, 0x80, 0x54, 0xad, 0x94, 0xfd, 0xba, 0x29, 0xfb, 0xee, 0xd1, 0x72, 0x98, - 0xac, 0xe2, 0x4b, 0x89, 0x08, 0x24, 0x13, 0x10, 0x55, 0x12, 0xed, 0x65, 0x89, 0x2f, 0x96, 0xc3, - 0x64, 0x15, 0x8f, 0x13, 0xe4, 0x1a, 0xd5, 0xda, 0x16, 0x76, 0xb4, 0xe4, 0x87, 0xc5, 0xd4, 0x75, - 0x8f, 0x36, 0x43, 0xc9, 0xeb, 0xb4, 0x7a, 0xbf, 0x36, 0x51, 0x47, 0x97, 0xe0, 0x2d, 0xee, 0xda, - 0x27, 0x4b, 0xbb, 0xf6, 0xd3, 0x4b, 0x7c, 0x7c, 0x3a, 0xb3, 0xda, 0xcd, 0xfa, 0xfd, 0xca, 0x66, - 0xfd, 0xec, 0xd2, 0xca, 0x9b, 0xf7, 0xe8, 0x1d, 0x74, 0xb5, 0x4a, 0x00, 0xdf, 0x44, 0x5b, 0xe2, - 0x55, 0x4f, 0x2d, 0xdd, 0x80, 0x6a, 0x07, 0x56, 0xcd, 0xac, 0x10, 0x3d, 0x86, 0xec, 0x05, 0x87, - 0xcb, 0x91, 0x4b, 0xb4, 0x84, 0x31, 0x84, 0x8a, 0x0b, 0xf3, 0x5f, 0x5b, 0xa1, 0x4f, 0xcd, 0x3d, - 0xa9, 0x10, 0xc1, 0x47, 0x67, 0xe7, 0x4e, 0xe3, 0xe5, 0xb9, 0xd3, 0xf8, 0xe3, 0xdc, 0x69, 0xfc, - 0x58, 0x38, 0xd6, 0x59, 0xe1, 0x58, 0x2f, 0x0b, 0xc7, 0xfa, 0xb3, 0x70, 0xac, 0x9f, 0xff, 0x72, - 0x1a, 0xdf, 0x36, 0x27, 0xfd, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x45, 0xec, 0x9a, 0x15, 0x8e, - 0x09, 0x00, 0x00, + // 1279 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x57, 0xcd, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xda, 0x4e, 0x94, 0xce, 0xa6, 0x1f, 0x4c, 0xaa, 0xd4, 0x4d, 0xa8, 0x37, 0x5a, 0x38, + 0xb4, 0xa8, 0xec, 0x12, 0x13, 0x2a, 0x22, 0x84, 0x50, 0x6c, 0x54, 0x5a, 0x51, 0xb7, 0x61, 0xe2, + 0x46, 0x7c, 0x09, 0x31, 0x59, 0x4f, 0x9d, 0x69, 0xbc, 0x1f, 0x9a, 0x1d, 0x5b, 0x4d, 0x24, 0x24, + 0x4e, 0x9c, 0xb9, 0x70, 0x46, 0xf0, 0x4f, 0x70, 0x2e, 0x12, 0x52, 0x8e, 0xbd, 0xc1, 0xc9, 0x22, + 0x0b, 0x37, 0xc4, 0x3f, 0x50, 0x71, 0x40, 0x3b, 0x3b, 0x5e, 0xef, 0xda, 0x5e, 0x27, 0x4e, 0xd3, + 0x22, 0x6e, 0xbb, 0x33, 0xef, 0xfd, 0x7e, 0xef, 0xfd, 0xe6, 0xcd, 0x9b, 0x19, 0xb0, 0xb6, 0xfb, + 0xb6, 0x6f, 0x50, 0xd7, 0xdc, 0x6d, 0x6f, 0x13, 0xe6, 0x10, 0x4e, 0x7c, 0xd3, 0xdb, 0x6d, 0x9a, + 0xd8, 0xa3, 0xbe, 0x89, 0xdb, 0xdc, 0xf5, 0x2d, 0xdc, 0xa2, 0x4e, 0xd3, 0xec, 0xac, 0x98, 0x4d, + 0xe2, 0x10, 0x86, 0x39, 0x69, 0x18, 0x1e, 0x73, 0xb9, 0x0b, 0xaf, 0x45, 0xae, 0x46, 0xdf, 0xd5, + 0xf0, 0x76, 0x9b, 0x46, 0xe8, 0x6a, 0x24, 0x5c, 0x8d, 0xce, 0xca, 0xe2, 0xeb, 0x4d, 0xca, 0x77, + 0xda, 0xdb, 0x86, 0xe5, 0xda, 0x66, 0xd3, 0x6d, 0xba, 0xa6, 0x40, 0xd8, 0x6e, 0x3f, 0x10, 0x7f, + 0xe2, 0x47, 0x7c, 0x45, 0xc8, 0x8b, 0xab, 0x32, 0x28, 0xec, 0x51, 0x1b, 0x5b, 0x3b, 0xd4, 0x21, + 0x6c, 0xaf, 0x17, 0x96, 0xc9, 0x88, 0xef, 0xb6, 0x99, 0x45, 0x06, 0xe3, 0x19, 0xeb, 0xe5, 0x9b, + 0x36, 0xe1, 0x78, 0x44, 0x16, 0x8b, 0x66, 0x96, 0x17, 0x6b, 0x3b, 0x9c, 0xda, 0xc3, 0x34, 0x37, + 0x8e, 0x72, 0xf0, 0xad, 0x1d, 0x62, 0xe3, 0x21, 0xbf, 0x37, 0xb3, 0xfc, 0xda, 0x9c, 0xb6, 0x4c, + 0xea, 0x70, 0x9f, 0xb3, 0x71, 0x39, 0xf9, 0x84, 0x75, 0x08, 0xeb, 0x27, 0x44, 0x1e, 0x61, 0xdb, + 0x6b, 0x91, 0x51, 0x39, 0x5d, 0xcf, 0x5c, 0xd4, 0x11, 0xd6, 0xfa, 0x77, 0x0a, 0x58, 0xaa, 0x32, + 0xd7, 0xf7, 0xb7, 0x08, 0xf3, 0xa9, 0xeb, 0xdc, 0xdb, 0x7e, 0x48, 0x2c, 0x8e, 0xc8, 0x03, 0xc2, + 0x88, 0x63, 0x11, 0xb8, 0x0c, 0x0a, 0xbb, 0xd4, 0x69, 0x14, 0x95, 0x65, 0xe5, 0xea, 0x99, 0xca, + 0xdc, 0x41, 0x57, 0x9b, 0x0a, 0xba, 0x5a, 0xe1, 0x43, 0xea, 0x34, 0x90, 0x98, 0x09, 0x2d, 0x1c, + 0x6c, 0x93, 0x62, 0x2e, 0x6d, 0x71, 0x17, 0xdb, 0x04, 0x89, 0x19, 0x58, 0x06, 0x00, 0x7b, 0x54, + 0x12, 0x14, 0xf3, 0xc2, 0x0e, 0x4a, 0x3b, 0xb0, 0xbe, 0x71, 0x5b, 0xce, 0xa0, 0x84, 0x95, 0xfe, + 0x6b, 0x0e, 0x5c, 0xba, 0xe5, 0x32, 0xba, 0xef, 0x3a, 0x1c, 0xb7, 0x36, 0xdc, 0xc6, 0xba, 0x2c, + 0x2a, 0xc2, 0xe0, 0x97, 0x60, 0x36, 0x5c, 0xd0, 0x06, 0xe6, 0x58, 0xc4, 0xa5, 0x96, 0xdf, 0x30, + 0x64, 0x39, 0x26, 0xf5, 0xed, 0x17, 0x64, 0x68, 0x6d, 0x74, 0x56, 0x8c, 0x28, 0xb9, 0x1a, 0xe1, + 0xb8, 0xcf, 0xdf, 0x1f, 0x43, 0x31, 0x2a, 0xdc, 0x01, 0x05, 0xdf, 0x23, 0x96, 0xc8, 0x49, 0x2d, + 0xdf, 0x34, 0x8e, 0x5d, 0xec, 0x46, 0x46, 0xcc, 0x9b, 0x1e, 0xb1, 0xfa, 0xda, 0x84, 0x7f, 0x48, + 0x30, 0x40, 0x0f, 0xcc, 0xf8, 0x1c, 0xf3, 0xb6, 0x2f, 0x74, 0x51, 0xcb, 0xb7, 0x4e, 0x81, 0x4b, + 0xe0, 0x55, 0xce, 0x49, 0xb6, 0x99, 0xe8, 0x1f, 0x49, 0x1e, 0xfd, 0x4f, 0x05, 0x2c, 0x65, 0x78, + 0xde, 0xa1, 0x3e, 0x87, 0x9f, 0x0f, 0xa9, 0x6b, 0x1c, 0x4f, 0xdd, 0xd0, 0x5b, 0x68, 0x7b, 0x41, + 0x32, 0xcf, 0xf6, 0x46, 0x12, 0xca, 0x36, 0xc1, 0x34, 0xe5, 0xc4, 0xf6, 0x8b, 0xb9, 0xe5, 0xfc, + 0x55, 0xb5, 0x5c, 0x79, 0xf6, 0x74, 0x2b, 0x67, 0x25, 0xdd, 0xf4, 0xed, 0x10, 0x18, 0x45, 0xf8, + 0xfa, 0x3f, 0xb9, 0xcc, 0x34, 0x43, 0xf9, 0xe1, 0x37, 0x0a, 0x38, 0x27, 0x7e, 0xeb, 0x98, 0x35, + 0x49, 0x58, 0xf1, 0x32, 0xdb, 0x49, 0x56, 0x7b, 0xcc, 0xce, 0xa9, 0x2c, 0xc8, 0xb0, 0xce, 0x6d, + 0xa6, 0x58, 0xd0, 0x00, 0x2b, 0x5c, 0x01, 0xaa, 0x4d, 0x1d, 0x44, 0xbc, 0x16, 0xb5, 0xb0, 0x2f, + 0x4a, 0x6e, 0xba, 0x72, 0x3e, 0xe8, 0x6a, 0x6a, 0xad, 0x3f, 0x8c, 0x92, 0x36, 0xf0, 0x2d, 0xa0, + 0xda, 0xf8, 0x51, 0xec, 0x92, 0x17, 0x2e, 0xf3, 0x92, 0x4f, 0xad, 0xf5, 0xa7, 0x50, 0xd2, 0x0e, + 0x3e, 0x04, 0x25, 0x2e, 0x68, 0xab, 0x1b, 0xf7, 0xef, 0x73, 0xda, 0xa2, 0xfb, 0x98, 0x53, 0xd7, + 0xd9, 0x20, 0xcc, 0x22, 0x0e, 0xc7, 0x4d, 0x52, 0x2c, 0x08, 0x24, 0x3d, 0xe8, 0x6a, 0xa5, 0xfa, + 0x58, 0x4b, 0x74, 0x04, 0x92, 0xfe, 0x38, 0x0f, 0xae, 0x8c, 0xad, 0x4f, 0x78, 0x13, 0x40, 0x77, + 0x5b, 0xf4, 0xb5, 0xc6, 0x07, 0x51, 0x53, 0x0a, 0xbb, 0x43, 0xb8, 0x06, 0xf9, 0xca, 0x42, 0xd0, + 0xd5, 0xe0, 0xbd, 0xa1, 0x59, 0x34, 0xc2, 0x03, 0x5a, 0xe0, 0x6c, 0x0b, 0xfb, 0x3c, 0x52, 0x99, + 0xca, 0x46, 0xa4, 0x96, 0x5f, 0x3b, 0x5e, 0xd1, 0x86, 0x1e, 0x95, 0x97, 0x82, 0xae, 0x76, 0xf6, + 0x4e, 0x12, 0x04, 0xa5, 0x31, 0xe1, 0x3a, 0x38, 0x6f, 0xb5, 0x19, 0x23, 0x0e, 0x1f, 0x50, 0xfd, + 0x92, 0x54, 0xfd, 0x7c, 0x35, 0x3d, 0x8d, 0x06, 0xed, 0x43, 0x88, 0x06, 0xf1, 0x29, 0x23, 0x8d, + 0x18, 0xa2, 0x90, 0x86, 0x78, 0x3f, 0x3d, 0x8d, 0x06, 0xed, 0xa1, 0x0d, 0x34, 0x89, 0x9a, 0xb9, + 0x82, 0xd3, 0x02, 0xf2, 0x95, 0xa0, 0xab, 0x69, 0xd5, 0xf1, 0xa6, 0xe8, 0x28, 0x2c, 0xfd, 0xaf, + 0x1c, 0x00, 0x35, 0xc2, 0x19, 0xb5, 0xc4, 0x8e, 0x59, 0x05, 0x05, 0xbe, 0xe7, 0x11, 0x79, 0x14, + 0x2c, 0xf7, 0x9a, 0x59, 0x7d, 0xcf, 0x23, 0x4f, 0xbb, 0xda, 0x05, 0x69, 0x29, 0x8e, 0xe7, 0x70, + 0x0c, 0x09, 0x6b, 0x88, 0xc1, 0x8c, 0x2b, 0x76, 0x86, 0x5c, 0x97, 0x77, 0x27, 0xd8, 0x5e, 0x71, + 0x6f, 0x8e, 0x81, 0x2b, 0x20, 0xec, 0x68, 0x72, 0xab, 0x49, 0x60, 0xf8, 0x09, 0x28, 0x78, 0x6e, + 0xa3, 0xd7, 0x41, 0xdf, 0x99, 0x80, 0x60, 0xc3, 0x6d, 0xf8, 0x29, 0xf8, 0xd9, 0x30, 0xa3, 0x70, + 0x14, 0x09, 0x48, 0x48, 0xc1, 0x6c, 0xef, 0xca, 0x21, 0x56, 0x4b, 0x2d, 0xbf, 0x37, 0x01, 0x3c, + 0x92, 0xae, 0x29, 0x8a, 0xb9, 0xb0, 0x33, 0xf6, 0x66, 0x50, 0x0c, 0xaf, 0xff, 0x9d, 0x03, 0x73, + 0xd2, 0x30, 0xda, 0x20, 0xff, 0xb1, 0xde, 0xd1, 0x29, 0xf2, 0xdc, 0xf4, 0x8e, 0xe0, 0x9f, 0xab, + 0xde, 0x11, 0x45, 0x96, 0xde, 0xdf, 0xe7, 0x00, 0x1c, 0x2e, 0x30, 0xe8, 0x80, 0x99, 0xa8, 0xb5, + 0x9d, 0xf2, 0x71, 0x10, 0x1f, 0xc7, 0xb2, 0xf3, 0x4b, 0x96, 0xf0, 0x72, 0x64, 0x0b, 0xfe, 0xbb, + 0xfd, 0x4b, 0x54, 0x7c, 0x39, 0xa9, 0xc5, 0x33, 0x28, 0x61, 0x05, 0x09, 0x50, 0x23, 0xef, 0x2d, + 0xdc, 0x6a, 0x13, 0xb9, 0x0e, 0x63, 0x4f, 0x69, 0xa3, 0x97, 0xb6, 0xf1, 0x51, 0x1b, 0x3b, 0x9c, + 0xf2, 0xbd, 0xfe, 0x79, 0x51, 0xef, 0x43, 0xa1, 0x24, 0xae, 0xfe, 0xe3, 0xa0, 0x42, 0x51, 0x5d, + 0xfe, 0x1f, 0x14, 0xda, 0x01, 0x73, 0xb2, 0xbb, 0x3d, 0x8b, 0x44, 0x17, 0x25, 0xcb, 0x5c, 0x35, + 0x81, 0x85, 0x52, 0xc8, 0xfa, 0xcf, 0x0a, 0xb8, 0x30, 0xd8, 0x46, 0x06, 0x42, 0x56, 0x8e, 0x15, + 0xf2, 0x3e, 0x80, 0x51, 0xc2, 0xeb, 0x1d, 0xc2, 0x70, 0x93, 0x44, 0x81, 0xe7, 0x4e, 0x14, 0xf8, + 0xa2, 0xe4, 0x82, 0xf5, 0x21, 0x44, 0x34, 0x82, 0x45, 0xff, 0x25, 0x9d, 0x44, 0xb4, 0xce, 0x27, + 0x49, 0xe2, 0x2b, 0x30, 0x2f, 0xd5, 0x39, 0x85, 0x2c, 0x96, 0x24, 0xd9, 0x7c, 0x75, 0x18, 0x12, + 0x8d, 0xe2, 0xd1, 0x7f, 0xca, 0x81, 0x8b, 0xa3, 0x9a, 0x2e, 0xac, 0xc9, 0x47, 0x4a, 0x94, 0xc5, + 0x5a, 0xf2, 0x91, 0xf2, 0xb4, 0xab, 0x5d, 0x1b, 0xf7, 0x64, 0x8a, 0xbb, 0x4a, 0xe2, 0x45, 0xf3, + 0x31, 0x28, 0xa6, 0x54, 0x4c, 0x9c, 0x9f, 0xf2, 0x02, 0xf7, 0x72, 0xd0, 0xd5, 0x8a, 0xf5, 0x0c, + 0x1b, 0x94, 0xe9, 0x0d, 0x3b, 0x23, 0xab, 0xe0, 0x64, 0xe5, 0xbb, 0x30, 0x41, 0x05, 0x3c, 0x1e, + 0x56, 0x2e, 0xaa, 0x82, 0x53, 0x56, 0xee, 0x33, 0x70, 0x39, 0xbd, 0x70, 0xc3, 0xd2, 0x5d, 0x09, + 0xba, 0xda, 0xe5, 0x6a, 0x96, 0x11, 0xca, 0xf6, 0xcf, 0xaa, 0xbe, 0xfc, 0x0b, 0xaa, 0xbe, 0x1f, + 0x72, 0x60, 0x5a, 0x5c, 0x19, 0x5f, 0xc0, 0x0b, 0x75, 0x2b, 0xf5, 0x42, 0x5d, 0x9d, 0xa0, 0x05, + 0x8b, 0x08, 0x33, 0xdf, 0xa3, 0x5f, 0x0c, 0xbc, 0x47, 0x6f, 0x4c, 0x8c, 0x3c, 0xfe, 0xf5, 0xb9, + 0x06, 0xce, 0xc4, 0x01, 0xc0, 0xeb, 0xe1, 0x69, 0x2f, 0xef, 0xc2, 0x8a, 0x58, 0xfb, 0xf8, 0xe9, + 0x18, 0x5f, 0x82, 0x63, 0x0b, 0x9d, 0x02, 0x35, 0xc1, 0x30, 0x99, 0x73, 0x68, 0xed, 0x93, 0x16, + 0xb1, 0xb8, 0xcb, 0xe4, 0x11, 0x12, 0x5b, 0x6f, 0xca, 0x71, 0x14, 0x5b, 0x54, 0x5e, 0x3d, 0x38, + 0x2c, 0x4d, 0x3d, 0x39, 0x2c, 0x4d, 0xfd, 0x76, 0x58, 0x9a, 0xfa, 0x3a, 0x28, 0x29, 0x07, 0x41, + 0x49, 0x79, 0x12, 0x94, 0x94, 0xdf, 0x83, 0x92, 0xf2, 0xed, 0x1f, 0xa5, 0xa9, 0x4f, 0x73, 0x9d, + 0x95, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x61, 0xfc, 0xd4, 0x31, 0x3f, 0x13, 0x00, 0x00, } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto index 56d19a9cc9..4953624af6 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,11 +21,13 @@ syntax = 'proto2'; package k8s.io.kubernetes.pkg.apis.autoscaling.v1; -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; @@ -47,7 +49,7 @@ message CrossVersionObjectReference { message HorizontalPodAutoscaler { // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. // +optional @@ -62,7 +64,7 @@ message HorizontalPodAutoscaler { message HorizontalPodAutoscalerList { // Standard list metadata. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; @@ -96,7 +98,7 @@ message HorizontalPodAutoscalerStatus { // last time the HorizontalPodAutoscaler scaled the number of pods; // used by the autoscaler to control how often the number of pods is changed. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastScaleTime = 2; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; // current number of replicas of pods managed by this autoscaler. optional int32 currentReplicas = 3; @@ -110,11 +112,160 @@ message HorizontalPodAutoscalerStatus { optional int32 currentCPUUtilizationPercentage = 5; } +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +message MetricSpec { + // type is the type of metric source. It should match one of the fields below. + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricSource object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricSource pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricSource resource = 4; +} + +// MetricStatus describes the last-read state of a single metric. +message MetricStatus { + // type is the type of metric source. It will match one of the fields below. + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricStatus object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricStatus pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricStatus resource = 4; +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricSource { + // target is the described Kubernetes object. + optional CrossVersionObjectReference target = 1; + + // metricName is the name of the metric in question. + optional string metricName = 2; + + // targetValue is the target value of the metric (as a quantity). + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricStatus { + // target is the described Kubernetes object. + optional CrossVersionObjectReference target = 1; + + // metricName is the name of the metric in question. + optional string metricName = 2; + + // currentValue is the current value of the metric (as a quantity). + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +message PodsMetricSource { + // metricName is the name of the metric in question + optional string metricName = 1; + + // targetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +message PodsMetricStatus { + // metricName is the name of the metric in question + optional string metricName = 1; + + // currentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + optional int32 targetAverageUtilization = 2; + + // targetAverageValue is the the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ResourceMetricStatus { + // name is the name of the resource in question. + optional string name = 1; + + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + optional int32 currentAverageUtilization = 2; + + // currentAverageValue is the the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; +} + // Scale represents a scaling request for a resource. message Scale { // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. // +optional diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go index 1563a42a58..aaa225261c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go @@ -17,20 +17,24 @@ limitations under the License. package v1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "autoscaling" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) AddToScheme = SchemeBuilder.AddToScheme ) @@ -40,10 +44,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { &HorizontalPodAutoscaler{}, &HorizontalPodAutoscalerList{}, &Scale{}, - &v1.ListOptions{}, - &v1.DeleteOptions{}, - &v1.ExportOptions{}, ) - versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.generated.go index 9b03510f5f..7bf45928cd 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.generated.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.generated.go @@ -25,9 +25,10 @@ import ( "errors" "fmt" codec1978 "github.com/ugorji/go/codec" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg3_types "k8s.io/kubernetes/pkg/types" + pkg3_resource "k8s.io/apimachinery/pkg/api/resource" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + pkg4_v1 "k8s.io/kubernetes/pkg/api/v1" "reflect" "runtime" time "time" @@ -63,11 +64,12 @@ func init() { panic(err) } if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_unversioned.Time - var v1 pkg2_v1.ObjectMeta - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 + var v0 pkg3_resource.Quantity + var v1 pkg1_v1.Time + var v2 pkg2_types.UID + var v3 pkg4_v1.ResourceName + var v4 time.Time + _, _, _, _, _ = v0, v1, v2, v3, v4 } } @@ -178,25 +180,25 @@ func (x *CrossVersionObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym12 := z.DecBinary() - _ = yym12 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct13 := r.ContainerType() - if yyct13 == codecSelferValueTypeMap1234 { - yyl13 := r.ReadMapStart() - if yyl13 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl13, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct13 == codecSelferValueTypeArray1234 { - yyl13 := r.ReadArrayStart() - if yyl13 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl13, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -208,12 +210,12 @@ func (x *CrossVersionObjectReference) codecDecodeSelfFromMap(l int, d *codec1978 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys14Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys14Slc - var yyhl14 bool = l >= 0 - for yyj14 := 0; ; yyj14++ { - if yyhl14 { - if yyj14 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -222,32 +224,50 @@ func (x *CrossVersionObjectReference) codecDecodeSelfFromMap(l int, d *codec1978 } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys14Slc = r.DecodeBytes(yys14Slc, true, true) - yys14 := string(yys14Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys14 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "name": if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv6 := &x.Name + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv8 := &x.APIVersion + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys14) - } // end switch yys14 - } // end for yyj14 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -255,16 +275,16 @@ func (x *CrossVersionObjectReference) codecDecodeSelfFromArray(l int, d *codec19 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj18 int - var yyb18 bool - var yyhl18 bool = l >= 0 - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb18 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb18 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -272,15 +292,21 @@ func (x *CrossVersionObjectReference) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb18 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb18 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -288,15 +314,21 @@ func (x *CrossVersionObjectReference) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv13 := &x.Name + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb18 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb18 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -304,20 +336,26 @@ func (x *CrossVersionObjectReference) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } for { - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb18 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb18 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj18-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -329,81 +367,81 @@ func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym22 := z.EncBinary() - _ = yym22 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep23 := !z.EncBinary() - yy2arr23 := z.EncBasicHandle().StructToArray - var yyq23 [4]bool - _, _, _ = yysep23, yyq23, yy2arr23 - const yyr23 bool = false - yyq23[1] = x.MinReplicas != nil - yyq23[3] = x.TargetCPUUtilizationPercentage != nil - var yynn23 int - if yyr23 || yy2arr23 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.MinReplicas != nil + yyq2[3] = x.TargetCPUUtilizationPercentage != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn23 = 2 - for _, b := range yyq23 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn23++ + yynn2++ } } - r.EncodeMapStart(yynn23) - yynn23 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr23 || yy2arr23 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy25 := &x.ScaleTargetRef - yy25.CodecEncodeSelf(e) + yy4 := &x.ScaleTargetRef + yy4.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("scaleTargetRef")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy26 := &x.ScaleTargetRef - yy26.CodecEncodeSelf(e) + yy6 := &x.ScaleTargetRef + yy6.CodecEncodeSelf(e) } - if yyr23 || yy2arr23 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq23[1] { + if yyq2[1] { if x.MinReplicas == nil { r.EncodeNil() } else { - yy28 := *x.MinReplicas - yym29 := z.EncBinary() - _ = yym29 + yy9 := *x.MinReplicas + yym10 := z.EncBinary() + _ = yym10 if false { } else { - r.EncodeInt(int64(yy28)) + r.EncodeInt(int64(yy9)) } } } else { r.EncodeNil() } } else { - if yyq23[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("minReplicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.MinReplicas == nil { r.EncodeNil() } else { - yy30 := *x.MinReplicas - yym31 := z.EncBinary() - _ = yym31 + yy11 := *x.MinReplicas + yym12 := z.EncBinary() + _ = yym12 if false { } else { - r.EncodeInt(int64(yy30)) + r.EncodeInt(int64(yy11)) } } } } - if yyr23 || yy2arr23 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym33 := z.EncBinary() - _ = yym33 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeInt(int64(x.MaxReplicas)) @@ -412,49 +450,49 @@ func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("maxReplicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym34 := z.EncBinary() - _ = yym34 + yym15 := z.EncBinary() + _ = yym15 if false { } else { r.EncodeInt(int64(x.MaxReplicas)) } } - if yyr23 || yy2arr23 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq23[3] { + if yyq2[3] { if x.TargetCPUUtilizationPercentage == nil { r.EncodeNil() } else { - yy36 := *x.TargetCPUUtilizationPercentage - yym37 := z.EncBinary() - _ = yym37 + yy17 := *x.TargetCPUUtilizationPercentage + yym18 := z.EncBinary() + _ = yym18 if false { } else { - r.EncodeInt(int64(yy36)) + r.EncodeInt(int64(yy17)) } } } else { r.EncodeNil() } } else { - if yyq23[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("targetCPUUtilizationPercentage")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.TargetCPUUtilizationPercentage == nil { r.EncodeNil() } else { - yy38 := *x.TargetCPUUtilizationPercentage - yym39 := z.EncBinary() - _ = yym39 + yy19 := *x.TargetCPUUtilizationPercentage + yym20 := z.EncBinary() + _ = yym20 if false { } else { - r.EncodeInt(int64(yy38)) + r.EncodeInt(int64(yy19)) } } } } - if yyr23 || yy2arr23 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -467,25 +505,25 @@ func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym40 := z.DecBinary() - _ = yym40 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct41 := r.ContainerType() - if yyct41 == codecSelferValueTypeMap1234 { - yyl41 := r.ReadMapStart() - if yyl41 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl41, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct41 == codecSelferValueTypeArray1234 { - yyl41 := r.ReadArrayStart() - if yyl41 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl41, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -497,12 +535,12 @@ func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys42Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys42Slc - var yyhl42 bool = l >= 0 - for yyj42 := 0; ; yyj42++ { - if yyhl42 { - if yyj42 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -511,16 +549,16 @@ func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978 } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys42Slc = r.DecodeBytes(yys42Slc, true, true) - yys42 := string(yys42Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys42 { + switch yys3 { case "scaleTargetRef": if r.TryDecodeAsNil() { x.ScaleTargetRef = CrossVersionObjectReference{} } else { - yyv43 := &x.ScaleTargetRef - yyv43.CodecDecodeSelf(d) + yyv4 := &x.ScaleTargetRef + yyv4.CodecDecodeSelf(d) } case "minReplicas": if r.TryDecodeAsNil() { @@ -531,8 +569,8 @@ func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978 if x.MinReplicas == nil { x.MinReplicas = new(int32) } - yym45 := z.DecBinary() - _ = yym45 + yym6 := z.DecBinary() + _ = yym6 if false { } else { *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) @@ -542,7 +580,13 @@ func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978 if r.TryDecodeAsNil() { x.MaxReplicas = 0 } else { - x.MaxReplicas = int32(r.DecodeInt(32)) + yyv7 := &x.MaxReplicas + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } } case "targetCPUUtilizationPercentage": if r.TryDecodeAsNil() { @@ -553,17 +597,17 @@ func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978 if x.TargetCPUUtilizationPercentage == nil { x.TargetCPUUtilizationPercentage = new(int32) } - yym48 := z.DecBinary() - _ = yym48 + yym10 := z.DecBinary() + _ = yym10 if false { } else { *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) } } default: - z.DecStructFieldNotFound(-1, yys42) - } // end switch yys42 - } // end for yyj42 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -571,16 +615,16 @@ func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec19 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj49 int - var yyb49 bool - var yyhl49 bool = l >= 0 - yyj49++ - if yyhl49 { - yyb49 = yyj49 > l + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb49 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb49 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -588,16 +632,16 @@ func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.ScaleTargetRef = CrossVersionObjectReference{} } else { - yyv50 := &x.ScaleTargetRef - yyv50.CodecDecodeSelf(d) + yyv12 := &x.ScaleTargetRef + yyv12.CodecDecodeSelf(d) } - yyj49++ - if yyhl49 { - yyb49 = yyj49 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb49 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb49 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -610,20 +654,20 @@ func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec19 if x.MinReplicas == nil { x.MinReplicas = new(int32) } - yym52 := z.DecBinary() - _ = yym52 + yym14 := z.DecBinary() + _ = yym14 if false { } else { *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) } } - yyj49++ - if yyhl49 { - yyb49 = yyj49 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb49 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb49 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -631,15 +675,21 @@ func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.MaxReplicas = 0 } else { - x.MaxReplicas = int32(r.DecodeInt(32)) + yyv15 := &x.MaxReplicas + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(yyv15)) = int32(r.DecodeInt(32)) + } } - yyj49++ - if yyhl49 { - yyb49 = yyj49 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb49 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb49 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -652,25 +702,25 @@ func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec19 if x.TargetCPUUtilizationPercentage == nil { x.TargetCPUUtilizationPercentage = new(int32) } - yym55 := z.DecBinary() - _ = yym55 + yym18 := z.DecBinary() + _ = yym18 if false { } else { *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) } } for { - yyj49++ - if yyhl49 { - yyb49 = yyj49 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb49 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb49 { + if yyb11 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj49-1, "") + z.DecStructFieldNotFound(yyj11-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -682,80 +732,80 @@ func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym56 := z.EncBinary() - _ = yym56 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep57 := !z.EncBinary() - yy2arr57 := z.EncBasicHandle().StructToArray - var yyq57 [5]bool - _, _, _ = yysep57, yyq57, yy2arr57 - const yyr57 bool = false - yyq57[0] = x.ObservedGeneration != nil - yyq57[1] = x.LastScaleTime != nil - yyq57[4] = x.CurrentCPUUtilizationPercentage != nil - var yynn57 int - if yyr57 || yy2arr57 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != nil + yyq2[1] = x.LastScaleTime != nil + yyq2[4] = x.CurrentCPUUtilizationPercentage != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn57 = 2 - for _, b := range yyq57 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn57++ + yynn2++ } } - r.EncodeMapStart(yynn57) - yynn57 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr57 || yy2arr57 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq57[0] { + if yyq2[0] { if x.ObservedGeneration == nil { r.EncodeNil() } else { - yy59 := *x.ObservedGeneration - yym60 := z.EncBinary() - _ = yym60 + yy4 := *x.ObservedGeneration + yym5 := z.EncBinary() + _ = yym5 if false { } else { - r.EncodeInt(int64(yy59)) + r.EncodeInt(int64(yy4)) } } } else { r.EncodeNil() } } else { - if yyq57[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.ObservedGeneration == nil { r.EncodeNil() } else { - yy61 := *x.ObservedGeneration - yym62 := z.EncBinary() - _ = yym62 + yy6 := *x.ObservedGeneration + yym7 := z.EncBinary() + _ = yym7 if false { } else { - r.EncodeInt(int64(yy61)) + r.EncodeInt(int64(yy6)) } } } } - if yyr57 || yy2arr57 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq57[1] { + if yyq2[1] { if x.LastScaleTime == nil { r.EncodeNil() } else { - yym64 := z.EncBinary() - _ = yym64 + yym9 := z.EncBinary() + _ = yym9 if false { } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { - } else if yym64 { + } else if yym9 { z.EncBinaryMarshal(x.LastScaleTime) - } else if !yym64 && z.IsJSONHandle() { + } else if !yym9 && z.IsJSONHandle() { z.EncJSONMarshal(x.LastScaleTime) } else { z.EncFallback(x.LastScaleTime) @@ -765,20 +815,20 @@ func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq57[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("lastScaleTime")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.LastScaleTime == nil { r.EncodeNil() } else { - yym65 := z.EncBinary() - _ = yym65 + yym10 := z.EncBinary() + _ = yym10 if false { } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { - } else if yym65 { + } else if yym10 { z.EncBinaryMarshal(x.LastScaleTime) - } else if !yym65 && z.IsJSONHandle() { + } else if !yym10 && z.IsJSONHandle() { z.EncJSONMarshal(x.LastScaleTime) } else { z.EncFallback(x.LastScaleTime) @@ -786,10 +836,10 @@ func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr57 || yy2arr57 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym67 := z.EncBinary() - _ = yym67 + yym12 := z.EncBinary() + _ = yym12 if false { } else { r.EncodeInt(int64(x.CurrentReplicas)) @@ -798,17 +848,17 @@ func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("currentReplicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym68 := z.EncBinary() - _ = yym68 + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeInt(int64(x.CurrentReplicas)) } } - if yyr57 || yy2arr57 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym70 := z.EncBinary() - _ = yym70 + yym15 := z.EncBinary() + _ = yym15 if false { } else { r.EncodeInt(int64(x.DesiredReplicas)) @@ -817,49 +867,49 @@ func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("desiredReplicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym71 := z.EncBinary() - _ = yym71 + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeInt(int64(x.DesiredReplicas)) } } - if yyr57 || yy2arr57 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq57[4] { + if yyq2[4] { if x.CurrentCPUUtilizationPercentage == nil { r.EncodeNil() } else { - yy73 := *x.CurrentCPUUtilizationPercentage - yym74 := z.EncBinary() - _ = yym74 + yy18 := *x.CurrentCPUUtilizationPercentage + yym19 := z.EncBinary() + _ = yym19 if false { } else { - r.EncodeInt(int64(yy73)) + r.EncodeInt(int64(yy18)) } } } else { r.EncodeNil() } } else { - if yyq57[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("currentCPUUtilizationPercentage")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.CurrentCPUUtilizationPercentage == nil { r.EncodeNil() } else { - yy75 := *x.CurrentCPUUtilizationPercentage - yym76 := z.EncBinary() - _ = yym76 + yy20 := *x.CurrentCPUUtilizationPercentage + yym21 := z.EncBinary() + _ = yym21 if false { } else { - r.EncodeInt(int64(yy75)) + r.EncodeInt(int64(yy20)) } } } } - if yyr57 || yy2arr57 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -872,25 +922,25 @@ func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym77 := z.DecBinary() - _ = yym77 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct78 := r.ContainerType() - if yyct78 == codecSelferValueTypeMap1234 { - yyl78 := r.ReadMapStart() - if yyl78 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl78, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct78 == codecSelferValueTypeArray1234 { - yyl78 := r.ReadArrayStart() - if yyl78 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl78, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -902,12 +952,12 @@ func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec19 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys79Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys79Slc - var yyhl79 bool = l >= 0 - for yyj79 := 0; ; yyj79++ { - if yyhl79 { - if yyj79 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -916,10 +966,10 @@ func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec19 } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys79Slc = r.DecodeBytes(yys79Slc, true, true) - yys79 := string(yys79Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys79 { + switch yys3 { case "observedGeneration": if r.TryDecodeAsNil() { if x.ObservedGeneration != nil { @@ -929,8 +979,8 @@ func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec19 if x.ObservedGeneration == nil { x.ObservedGeneration = new(int64) } - yym81 := z.DecBinary() - _ = yym81 + yym5 := z.DecBinary() + _ = yym5 if false { } else { *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) @@ -943,15 +993,15 @@ func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec19 } } else { if x.LastScaleTime == nil { - x.LastScaleTime = new(pkg1_unversioned.Time) + x.LastScaleTime = new(pkg1_v1.Time) } - yym83 := z.DecBinary() - _ = yym83 + yym7 := z.DecBinary() + _ = yym7 if false { } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { - } else if yym83 { + } else if yym7 { z.DecBinaryUnmarshal(x.LastScaleTime) - } else if !yym83 && z.IsJSONHandle() { + } else if !yym7 && z.IsJSONHandle() { z.DecJSONUnmarshal(x.LastScaleTime) } else { z.DecFallback(x.LastScaleTime, false) @@ -961,13 +1011,25 @@ func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec19 if r.TryDecodeAsNil() { x.CurrentReplicas = 0 } else { - x.CurrentReplicas = int32(r.DecodeInt(32)) + yyv8 := &x.CurrentReplicas + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } } case "desiredReplicas": if r.TryDecodeAsNil() { x.DesiredReplicas = 0 } else { - x.DesiredReplicas = int32(r.DecodeInt(32)) + yyv10 := &x.DesiredReplicas + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } } case "currentCPUUtilizationPercentage": if r.TryDecodeAsNil() { @@ -978,17 +1040,17 @@ func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec19 if x.CurrentCPUUtilizationPercentage == nil { x.CurrentCPUUtilizationPercentage = new(int32) } - yym87 := z.DecBinary() - _ = yym87 + yym13 := z.DecBinary() + _ = yym13 if false { } else { *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) } } default: - z.DecStructFieldNotFound(-1, yys79) - } // end switch yys79 - } // end for yyj79 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -996,16 +1058,16 @@ func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj88 int - var yyb88 bool - var yyhl88 bool = l >= 0 - yyj88++ - if yyhl88 { - yyb88 = yyj88 > l + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb88 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb88 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1018,20 +1080,20 @@ func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec if x.ObservedGeneration == nil { x.ObservedGeneration = new(int64) } - yym90 := z.DecBinary() - _ = yym90 + yym16 := z.DecBinary() + _ = yym16 if false { } else { *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) } } - yyj88++ - if yyhl88 { - yyb88 = yyj88 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb88 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb88 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1042,27 +1104,27 @@ func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec } } else { if x.LastScaleTime == nil { - x.LastScaleTime = new(pkg1_unversioned.Time) + x.LastScaleTime = new(pkg1_v1.Time) } - yym92 := z.DecBinary() - _ = yym92 + yym18 := z.DecBinary() + _ = yym18 if false { } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { - } else if yym92 { + } else if yym18 { z.DecBinaryUnmarshal(x.LastScaleTime) - } else if !yym92 && z.IsJSONHandle() { + } else if !yym18 && z.IsJSONHandle() { z.DecJSONUnmarshal(x.LastScaleTime) } else { z.DecFallback(x.LastScaleTime, false) } } - yyj88++ - if yyhl88 { - yyb88 = yyj88 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb88 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb88 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1070,15 +1132,21 @@ func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec if r.TryDecodeAsNil() { x.CurrentReplicas = 0 } else { - x.CurrentReplicas = int32(r.DecodeInt(32)) + yyv19 := &x.CurrentReplicas + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int32)(yyv19)) = int32(r.DecodeInt(32)) + } } - yyj88++ - if yyhl88 { - yyb88 = yyj88 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb88 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb88 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1086,15 +1154,21 @@ func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec if r.TryDecodeAsNil() { x.DesiredReplicas = 0 } else { - x.DesiredReplicas = int32(r.DecodeInt(32)) + yyv21 := &x.DesiredReplicas + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } } - yyj88++ - if yyhl88 { - yyb88 = yyj88 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb88 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb88 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1107,25 +1181,25 @@ func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec if x.CurrentCPUUtilizationPercentage == nil { x.CurrentCPUUtilizationPercentage = new(int32) } - yym96 := z.DecBinary() - _ = yym96 + yym24 := z.DecBinary() + _ = yym24 if false { } else { *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) } } for { - yyj88++ - if yyhl88 { - yyb88 = yyj88 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb88 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb88 { + if yyb14 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj88-1, "") + z.DecStructFieldNotFound(yyj14-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -1137,39 +1211,39 @@ func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym97 := z.EncBinary() - _ = yym97 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep98 := !z.EncBinary() - yy2arr98 := z.EncBasicHandle().StructToArray - var yyq98 [5]bool - _, _, _ = yysep98, yyq98, yy2arr98 - const yyr98 bool = false - yyq98[0] = x.Kind != "" - yyq98[1] = x.APIVersion != "" - yyq98[2] = true - yyq98[3] = true - yyq98[4] = true - var yynn98 int - if yyr98 || yy2arr98 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn98 = 0 - for _, b := range yyq98 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn98++ + yynn2++ } } - r.EncodeMapStart(yynn98) - yynn98 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr98 || yy2arr98 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq98[0] { - yym100 := z.EncBinary() - _ = yym100 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -1178,23 +1252,23 @@ func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq98[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym101 := z.EncBinary() - _ = yym101 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr98 || yy2arr98 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq98[1] { - yym103 := z.EncBinary() - _ = yym103 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -1203,70 +1277,82 @@ func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq98[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym104 := z.EncBinary() - _ = yym104 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr98 || yy2arr98 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq98[2] { - yy106 := &x.ObjectMeta - yy106.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq98[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy107 := &x.ObjectMeta - yy107.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr98 || yy2arr98 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq98[3] { - yy109 := &x.Spec - yy109.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq98[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy110 := &x.Spec - yy110.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr98 || yy2arr98 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq98[4] { - yy112 := &x.Status - yy112.CodecEncodeSelf(e) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq98[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy113 := &x.Status - yy113.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } - if yyr98 || yy2arr98 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -1279,25 +1365,25 @@ func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym114 := z.DecBinary() - _ = yym114 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct115 := r.ContainerType() - if yyct115 == codecSelferValueTypeMap1234 { - yyl115 := r.ReadMapStart() - if yyl115 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl115, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct115 == codecSelferValueTypeArray1234 { - yyl115 := r.ReadArrayStart() - if yyl115 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl115, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -1309,12 +1395,12 @@ func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys116Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys116Slc - var yyhl116 bool = l >= 0 - for yyj116 := 0; ; yyj116++ { - if yyhl116 { - if yyj116 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -1323,47 +1409,65 @@ func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Dec } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys116Slc = r.DecodeBytes(yys116Slc, true, true) - yys116 := string(yys116Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys116 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv119 := &x.ObjectMeta - yyv119.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = HorizontalPodAutoscalerSpec{} } else { - yyv120 := &x.Spec - yyv120.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = HorizontalPodAutoscalerStatus{} } else { - yyv121 := &x.Status - yyv121.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys116) - } // end switch yys116 - } // end for yyj116 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -1371,16 +1475,16 @@ func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.D var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj122 int - var yyb122 bool - var yyhl122 bool = l >= 0 - yyj122++ - if yyhl122 { - yyb122 = yyj122 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb122 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb122 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1388,15 +1492,21 @@ func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj122++ - if yyhl122 { - yyb122 = yyj122 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb122 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb122 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1404,32 +1514,44 @@ func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj122++ - if yyhl122 { - yyb122 = yyj122 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb122 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb122 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv125 := &x.ObjectMeta - yyv125.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj122++ - if yyhl122 { - yyb122 = yyj122 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb122 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb122 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1437,16 +1559,16 @@ func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Spec = HorizontalPodAutoscalerSpec{} } else { - yyv126 := &x.Spec - yyv126.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj122++ - if yyhl122 { - yyb122 = yyj122 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb122 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb122 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1454,21 +1576,21 @@ func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Status = HorizontalPodAutoscalerStatus{} } else { - yyv127 := &x.Status - yyv127.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj122++ - if yyhl122 { - yyb122 = yyj122 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb122 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb122 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj122-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -1480,37 +1602,37 @@ func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym128 := z.EncBinary() - _ = yym128 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep129 := !z.EncBinary() - yy2arr129 := z.EncBasicHandle().StructToArray - var yyq129 [4]bool - _, _, _ = yysep129, yyq129, yy2arr129 - const yyr129 bool = false - yyq129[0] = x.Kind != "" - yyq129[1] = x.APIVersion != "" - yyq129[2] = true - var yynn129 int - if yyr129 || yy2arr129 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn129 = 1 - for _, b := range yyq129 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn129++ + yynn2++ } } - r.EncodeMapStart(yynn129) - yynn129 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr129 || yy2arr129 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq129[0] { - yym131 := z.EncBinary() - _ = yym131 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -1519,23 +1641,23 @@ func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq129[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym132 := z.EncBinary() - _ = yym132 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr129 || yy2arr129 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq129[1] { - yym134 := z.EncBinary() - _ = yym134 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -1544,54 +1666,54 @@ func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq129[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym135 := z.EncBinary() - _ = yym135 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr129 || yy2arr129 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq129[2] { - yy137 := &x.ListMeta - yym138 := z.EncBinary() - _ = yym138 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy137) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy137) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq129[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy139 := &x.ListMeta - yym140 := z.EncBinary() - _ = yym140 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy139) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy139) + z.EncFallback(yy12) } } } - if yyr129 || yy2arr129 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym142 := z.EncBinary() - _ = yym142 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) @@ -1604,15 +1726,15 @@ func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym143 := z.EncBinary() - _ = yym143 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) } } } - if yyr129 || yy2arr129 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -1625,25 +1747,25 @@ func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym144 := z.DecBinary() - _ = yym144 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct145 := r.ContainerType() - if yyct145 == codecSelferValueTypeMap1234 { - yyl145 := r.ReadMapStart() - if yyl145 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl145, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct145 == codecSelferValueTypeArray1234 { - yyl145 := r.ReadArrayStart() - if yyl145 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl145, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -1655,12 +1777,12 @@ func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys146Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys146Slc - var yyhl146 bool = l >= 0 - for yyj146 := 0; ; yyj146++ { - if yyhl146 { - if yyj146 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -1669,51 +1791,63 @@ func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978 } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys146Slc = r.DecodeBytes(yys146Slc, true, true) - yys146 := string(yys146Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys146 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv149 := &x.ListMeta - yym150 := z.DecBinary() - _ = yym150 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv149) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv149, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv151 := &x.Items - yym152 := z.DecBinary() - _ = yym152 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv151), d) + h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys146) - } // end switch yys146 - } // end for yyj146 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -1721,16 +1855,16 @@ func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec19 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj153 int - var yyb153 bool - var yyhl153 bool = l >= 0 - yyj153++ - if yyhl153 { - yyb153 = yyj153 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb153 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb153 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1738,15 +1872,21 @@ func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj153++ - if yyhl153 { - yyb153 = yyj153 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb153 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb153 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1754,38 +1894,44 @@ func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj153++ - if yyhl153 { - yyb153 = yyj153 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb153 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb153 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv156 := &x.ListMeta - yym157 := z.DecBinary() - _ = yym157 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv156) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv156, false) + z.DecFallback(yyv17, false) } } - yyj153++ - if yyhl153 { - yyb153 = yyj153 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb153 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb153 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1793,26 +1939,26 @@ func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.Items = nil } else { - yyv158 := &x.Items - yym159 := z.DecBinary() - _ = yym159 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv158), d) + h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv19), d) } } for { - yyj153++ - if yyhl153 { - yyb153 = yyj153 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb153 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb153 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj153-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -1824,39 +1970,39 @@ func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym160 := z.EncBinary() - _ = yym160 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep161 := !z.EncBinary() - yy2arr161 := z.EncBasicHandle().StructToArray - var yyq161 [5]bool - _, _, _ = yysep161, yyq161, yy2arr161 - const yyr161 bool = false - yyq161[0] = x.Kind != "" - yyq161[1] = x.APIVersion != "" - yyq161[2] = true - yyq161[3] = true - yyq161[4] = true - var yynn161 int - if yyr161 || yy2arr161 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn161 = 0 - for _, b := range yyq161 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn161++ + yynn2++ } } - r.EncodeMapStart(yynn161) - yynn161 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr161 || yy2arr161 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq161[0] { - yym163 := z.EncBinary() - _ = yym163 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -1865,23 +2011,23 @@ func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq161[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym164 := z.EncBinary() - _ = yym164 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr161 || yy2arr161 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq161[1] { - yym166 := z.EncBinary() - _ = yym166 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -1890,70 +2036,82 @@ func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq161[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym167 := z.EncBinary() - _ = yym167 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr161 || yy2arr161 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq161[2] { - yy169 := &x.ObjectMeta - yy169.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq161[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy170 := &x.ObjectMeta - yy170.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr161 || yy2arr161 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq161[3] { - yy172 := &x.Spec - yy172.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq161[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy173 := &x.Spec - yy173.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr161 || yy2arr161 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq161[4] { - yy175 := &x.Status - yy175.CodecEncodeSelf(e) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq161[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy176 := &x.Status - yy176.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } - if yyr161 || yy2arr161 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -1966,25 +2124,25 @@ func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym177 := z.DecBinary() - _ = yym177 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct178 := r.ContainerType() - if yyct178 == codecSelferValueTypeMap1234 { - yyl178 := r.ReadMapStart() - if yyl178 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl178, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct178 == codecSelferValueTypeArray1234 { - yyl178 := r.ReadArrayStart() - if yyl178 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl178, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -1996,12 +2154,12 @@ func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys179Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys179Slc - var yyhl179 bool = l >= 0 - for yyj179 := 0; ; yyj179++ { - if yyhl179 { - if yyj179 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -2010,47 +2168,65 @@ func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys179Slc = r.DecodeBytes(yys179Slc, true, true) - yys179 := string(yys179Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys179 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv182 := &x.ObjectMeta - yyv182.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = ScaleSpec{} } else { - yyv183 := &x.Spec - yyv183.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = ScaleStatus{} } else { - yyv184 := &x.Status - yyv184.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys179) - } // end switch yys179 - } // end for yyj179 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -2058,16 +2234,16 @@ func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj185 int - var yyb185 bool - var yyhl185 bool = l >= 0 - yyj185++ - if yyhl185 { - yyb185 = yyj185 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb185 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb185 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2075,15 +2251,21 @@ func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj185++ - if yyhl185 { - yyb185 = yyj185 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb185 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb185 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2091,32 +2273,44 @@ func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj185++ - if yyhl185 { - yyb185 = yyj185 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb185 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb185 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv188 := &x.ObjectMeta - yyv188.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj185++ - if yyhl185 { - yyb185 = yyj185 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb185 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb185 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2124,16 +2318,16 @@ func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Spec = ScaleSpec{} } else { - yyv189 := &x.Spec - yyv189.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj185++ - if yyhl185 { - yyb185 = yyj185 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb185 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb185 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2141,21 +2335,21 @@ func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Status = ScaleStatus{} } else { - yyv190 := &x.Status - yyv190.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj185++ - if yyhl185 { - yyb185 = yyj185 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb185 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb185 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj185-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -2167,35 +2361,35 @@ func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym191 := z.EncBinary() - _ = yym191 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep192 := !z.EncBinary() - yy2arr192 := z.EncBasicHandle().StructToArray - var yyq192 [1]bool - _, _, _ = yysep192, yyq192, yy2arr192 - const yyr192 bool = false - yyq192[0] = x.Replicas != 0 - var yynn192 int - if yyr192 || yy2arr192 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn192 = 0 - for _, b := range yyq192 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn192++ + yynn2++ } } - r.EncodeMapStart(yynn192) - yynn192 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr192 || yy2arr192 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq192[0] { - yym194 := z.EncBinary() - _ = yym194 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeInt(int64(x.Replicas)) @@ -2204,19 +2398,19 @@ func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq192[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("replicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym195 := z.EncBinary() - _ = yym195 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeInt(int64(x.Replicas)) } } } - if yyr192 || yy2arr192 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -2229,25 +2423,25 @@ func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym196 := z.DecBinary() - _ = yym196 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct197 := r.ContainerType() - if yyct197 == codecSelferValueTypeMap1234 { - yyl197 := r.ReadMapStart() - if yyl197 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl197, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct197 == codecSelferValueTypeArray1234 { - yyl197 := r.ReadArrayStart() - if yyl197 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl197, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -2259,12 +2453,12 @@ func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys198Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys198Slc - var yyhl198 bool = l >= 0 - for yyj198 := 0; ; yyj198++ { - if yyhl198 { - if yyj198 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -2273,20 +2467,26 @@ func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys198Slc = r.DecodeBytes(yys198Slc, true, true) - yys198 := string(yys198Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys198 { + switch yys3 { case "replicas": if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int32(r.DecodeInt(32)) + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } } default: - z.DecStructFieldNotFound(-1, yys198) - } // end switch yys198 - } // end for yyj198 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -2294,16 +2494,16 @@ func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj200 int - var yyb200 bool - var yyhl200 bool = l >= 0 - yyj200++ - if yyhl200 { - yyb200 = yyj200 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb200 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb200 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2311,20 +2511,26 @@ func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int32(r.DecodeInt(32)) + yyv7 := &x.Replicas + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } } for { - yyj200++ - if yyhl200 { - yyb200 = yyj200 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb200 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb200 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj200-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -2336,34 +2542,34 @@ func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym202 := z.EncBinary() - _ = yym202 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep203 := !z.EncBinary() - yy2arr203 := z.EncBasicHandle().StructToArray - var yyq203 [2]bool - _, _, _ = yysep203, yyq203, yy2arr203 - const yyr203 bool = false - yyq203[1] = x.Selector != "" - var yynn203 int - if yyr203 || yy2arr203 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Selector != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn203 = 1 - for _, b := range yyq203 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn203++ + yynn2++ } } - r.EncodeMapStart(yynn203) - yynn203 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr203 || yy2arr203 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym205 := z.EncBinary() - _ = yym205 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeInt(int64(x.Replicas)) @@ -2372,18 +2578,18 @@ func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("replicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym206 := z.EncBinary() - _ = yym206 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeInt(int64(x.Replicas)) } } - if yyr203 || yy2arr203 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq203[1] { - yym208 := z.EncBinary() - _ = yym208 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Selector)) @@ -2392,19 +2598,19 @@ func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq203[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("selector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym209 := z.EncBinary() - _ = yym209 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Selector)) } } } - if yyr203 || yy2arr203 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -2417,25 +2623,25 @@ func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym210 := z.DecBinary() - _ = yym210 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct211 := r.ContainerType() - if yyct211 == codecSelferValueTypeMap1234 { - yyl211 := r.ReadMapStart() - if yyl211 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl211, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct211 == codecSelferValueTypeArray1234 { - yyl211 := r.ReadArrayStart() - if yyl211 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl211, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -2447,12 +2653,12 @@ func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys212Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys212Slc - var yyhl212 bool = l >= 0 - for yyj212 := 0; ; yyj212++ { - if yyhl212 { - if yyj212 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -2461,26 +2667,38 @@ func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys212Slc = r.DecodeBytes(yys212Slc, true, true) - yys212 := string(yys212Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys212 { + switch yys3 { case "replicas": if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int32(r.DecodeInt(32)) + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } } case "selector": if r.TryDecodeAsNil() { x.Selector = "" } else { - x.Selector = string(r.DecodeString()) + yyv6 := &x.Selector + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys212) - } // end switch yys212 - } // end for yyj212 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -2488,16 +2706,16 @@ func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj215 int - var yyb215 bool - var yyhl215 bool = l >= 0 - yyj215++ - if yyhl215 { - yyb215 = yyj215 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb215 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb215 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2505,15 +2723,21 @@ func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int32(r.DecodeInt(32)) + yyv9 := &x.Replicas + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*int32)(yyv9)) = int32(r.DecodeInt(32)) + } } - yyj215++ - if yyhl215 { - yyb215 = yyj215 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb215 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb215 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2521,20 +2745,2353 @@ func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Selector = "" } else { - x.Selector = string(r.DecodeString()) + yyv11 := &x.Selector + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x MetricSourceType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *MetricSourceType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *MetricSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Object != nil + yyq2[2] = x.Pods != nil + yyq2[3] = x.Resource != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("object")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *MetricSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *MetricSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "object": + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricSource) + } + x.Object.CodecDecodeSelf(d) + } + case "pods": + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricSource) + } + x.Pods.CodecDecodeSelf(d) + } + case "resource": + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricSource) + } + x.Resource.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *MetricSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv9 := &x.Type + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricSource) + } + x.Object.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricSource) + } + x.Pods.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricSource) + } + x.Resource.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ObjectMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.Target + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("target")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.Target + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.TargetValue + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.TargetValue + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(yy14) { + } else if !yym15 && z.IsJSONHandle() { + z.EncJSONMarshal(yy14) + } else { + z.EncFallback(yy14) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "target": + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv4 := &x.Target + yyv4.CodecDecodeSelf(d) + } + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv5 := &x.MetricName + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "targetValue": + if r.TryDecodeAsNil() { + x.TargetValue = pkg3_resource.Quantity{} + } else { + yyv7 := &x.TargetValue + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv10 := &x.Target + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv11 := &x.MetricName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetValue = pkg3_resource.Quantity{} + } else { + yyv13 := &x.TargetValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodsMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.TargetAverageValue + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.TargetAverageValue + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodsMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodsMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv4 := &x.MetricName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "targetAverageValue": + if r.TryDecodeAsNil() { + x.TargetAverageValue = pkg3_resource.Quantity{} + } else { + yyv6 := &x.TargetAverageValue + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodsMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv9 := &x.MetricName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetAverageValue = pkg3_resource.Quantity{} + } else { + yyv11 := &x.TargetAverageValue + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.TargetAverageUtilization != nil + yyq2[2] = x.TargetAverageValue != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf4 := &x.Name + yysf4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf5 := &x.Name + yysf5.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.TargetAverageUtilization == nil { + r.EncodeNil() + } else { + yy7 := *x.TargetAverageUtilization + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetAverageUtilization")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetAverageUtilization == nil { + r.EncodeNil() + } else { + yy9 := *x.TargetAverageUtilization + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.TargetAverageValue == nil { + r.EncodeNil() + } else { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.EncExt(x.TargetAverageValue) { + } else if !yym12 && z.IsJSONHandle() { + z.EncJSONMarshal(x.TargetAverageValue) + } else { + z.EncFallback(x.TargetAverageValue) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetAverageValue == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.TargetAverageValue) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(x.TargetAverageValue) + } else { + z.EncFallback(x.TargetAverageValue) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yyv4.CodecDecodeSelf(d) + } + case "targetAverageUtilization": + if r.TryDecodeAsNil() { + if x.TargetAverageUtilization != nil { + x.TargetAverageUtilization = nil + } + } else { + if x.TargetAverageUtilization == nil { + x.TargetAverageUtilization = new(int32) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int32)(x.TargetAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + case "targetAverageValue": + if r.TryDecodeAsNil() { + if x.TargetAverageValue != nil { + x.TargetAverageValue = nil + } + } else { + if x.TargetAverageValue == nil { + x.TargetAverageValue = new(pkg3_resource.Quantity) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(x.TargetAverageValue) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.TargetAverageValue) + } else { + z.DecFallback(x.TargetAverageValue, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv10 := &x.Name + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TargetAverageUtilization != nil { + x.TargetAverageUtilization = nil + } + } else { + if x.TargetAverageUtilization == nil { + x.TargetAverageUtilization = new(int32) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(x.TargetAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TargetAverageValue != nil { + x.TargetAverageValue = nil + } + } else { + if x.TargetAverageValue == nil { + x.TargetAverageValue = new(pkg3_resource.Quantity) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(x.TargetAverageValue) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.TargetAverageValue) + } else { + z.DecFallback(x.TargetAverageValue, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *MetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Object != nil + yyq2[2] = x.Pods != nil + yyq2[3] = x.Resource != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("object")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *MetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *MetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "object": + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricStatus) + } + x.Object.CodecDecodeSelf(d) + } + case "pods": + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricStatus) + } + x.Pods.CodecDecodeSelf(d) + } + case "resource": + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricStatus) + } + x.Resource.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *MetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv9 := &x.Type + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricStatus) + } + x.Object.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricStatus) + } + x.Pods.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricStatus) + } + x.Resource.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ObjectMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.Target + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("target")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.Target + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.CurrentValue + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.CurrentValue + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(yy14) { + } else if !yym15 && z.IsJSONHandle() { + z.EncJSONMarshal(yy14) + } else { + z.EncFallback(yy14) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "target": + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv4 := &x.Target + yyv4.CodecDecodeSelf(d) + } + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv5 := &x.MetricName + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "currentValue": + if r.TryDecodeAsNil() { + x.CurrentValue = pkg3_resource.Quantity{} + } else { + yyv7 := &x.CurrentValue + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv10 := &x.Target + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv11 := &x.MetricName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentValue = pkg3_resource.Quantity{} + } else { + yyv13 := &x.CurrentValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodsMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.CurrentAverageValue + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.CurrentAverageValue + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodsMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodsMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv4 := &x.MetricName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "currentAverageValue": + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg3_resource.Quantity{} + } else { + yyv6 := &x.CurrentAverageValue + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodsMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv9 := &x.MetricName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg3_resource.Quantity{} + } else { + yyv11 := &x.CurrentAverageValue + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.CurrentAverageUtilization != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf4 := &x.Name + yysf4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf5 := &x.Name + yysf5.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.CurrentAverageUtilization == nil { + r.EncodeNil() + } else { + yy7 := *x.CurrentAverageUtilization + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentAverageUtilization")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CurrentAverageUtilization == nil { + r.EncodeNil() + } else { + yy9 := *x.CurrentAverageUtilization + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.CurrentAverageValue + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.CurrentAverageValue + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(yy14) { + } else if !yym15 && z.IsJSONHandle() { + z.EncJSONMarshal(yy14) + } else { + z.EncFallback(yy14) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yyv4.CodecDecodeSelf(d) + } + case "currentAverageUtilization": + if r.TryDecodeAsNil() { + if x.CurrentAverageUtilization != nil { + x.CurrentAverageUtilization = nil + } + } else { + if x.CurrentAverageUtilization == nil { + x.CurrentAverageUtilization = new(int32) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int32)(x.CurrentAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + case "currentAverageValue": + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg3_resource.Quantity{} + } else { + yyv7 := &x.CurrentAverageValue + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv10 := &x.Name + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CurrentAverageUtilization != nil { + x.CurrentAverageUtilization = nil + } + } else { + if x.CurrentAverageUtilization == nil { + x.CurrentAverageUtilization = new(int32) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(x.CurrentAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg3_resource.Quantity{} + } else { + yyv13 := &x.CurrentAverageValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } } for { - yyj215++ - if yyhl215 { - yyb215 = yyj215 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb215 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb215 { + if yyb9 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj215-1, "") + z.DecStructFieldNotFound(yyj9-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -2544,10 +5101,10 @@ func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutosc z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv218 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy219 := &yyv218 - yy219.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -2557,83 +5114,86 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv220 := *v - yyh220, yyl220 := z.DecSliceHelperStart() - var yyc220 bool - if yyl220 == 0 { - if yyv220 == nil { - yyv220 = []HorizontalPodAutoscaler{} - yyc220 = true - } else if len(yyv220) != 0 { - yyv220 = yyv220[:0] - yyc220 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []HorizontalPodAutoscaler{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl220 > 0 { - var yyrr220, yyrl220 int - var yyrt220 bool - if yyl220 > cap(yyv220) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg220 := len(yyv220) > 0 - yyv2220 := yyv220 - yyrl220, yyrt220 = z.DecInferLen(yyl220, z.DecBasicHandle().MaxInitLen, 360) - if yyrt220 { - if yyrl220 <= cap(yyv220) { - yyv220 = yyv220[:yyrl220] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 360) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv220 = make([]HorizontalPodAutoscaler, yyrl220) + yyv1 = make([]HorizontalPodAutoscaler, yyrl1) } } else { - yyv220 = make([]HorizontalPodAutoscaler, yyrl220) + yyv1 = make([]HorizontalPodAutoscaler, yyrl1) } - yyc220 = true - yyrr220 = len(yyv220) - if yyrg220 { - copy(yyv220, yyv2220) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl220 != len(yyv220) { - yyv220 = yyv220[:yyl220] - yyc220 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj220 := 0 - for ; yyj220 < yyrr220; yyj220++ { - yyh220.ElemContainerState(yyj220) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv220[yyj220] = HorizontalPodAutoscaler{} + yyv1[yyj1] = HorizontalPodAutoscaler{} } else { - yyv221 := &yyv220[yyj220] - yyv221.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt220 { - for ; yyj220 < yyl220; yyj220++ { - yyv220 = append(yyv220, HorizontalPodAutoscaler{}) - yyh220.ElemContainerState(yyj220) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, HorizontalPodAutoscaler{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv220[yyj220] = HorizontalPodAutoscaler{} + yyv1[yyj1] = HorizontalPodAutoscaler{} } else { - yyv222 := &yyv220[yyj220] - yyv222.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj220 := 0 - for ; !r.CheckBreak(); yyj220++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj220 >= len(yyv220) { - yyv220 = append(yyv220, HorizontalPodAutoscaler{}) // var yyz220 HorizontalPodAutoscaler - yyc220 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, HorizontalPodAutoscaler{}) // var yyz1 HorizontalPodAutoscaler + yyc1 = true } - yyh220.ElemContainerState(yyj220) - if yyj220 < len(yyv220) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv220[yyj220] = HorizontalPodAutoscaler{} + yyv1[yyj1] = HorizontalPodAutoscaler{} } else { - yyv223 := &yyv220[yyj220] - yyv223.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -2641,16 +5201,16 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos } } - if yyj220 < len(yyv220) { - yyv220 = yyv220[:yyj220] - yyc220 = true - } else if yyj220 == 0 && yyv220 == nil { - yyv220 = []HorizontalPodAutoscaler{} - yyc220 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []HorizontalPodAutoscaler{} + yyc1 = true } } - yyh220.End() - if yyc220 { - *v = yyv220 + yyh1.End() + if yyc1 { + *v = yyv1 } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.go index a47891c1b0..81402bac23 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.go @@ -17,7 +17,8 @@ limitations under the License. package v1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api/v1" ) @@ -57,7 +58,7 @@ type HorizontalPodAutoscalerStatus struct { // last time the HorizontalPodAutoscaler scaled the number of pods; // used by the autoscaler to control how often the number of pods is changed. // +optional - LastScaleTime *unversioned.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` + LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` // current number of replicas of pods managed by this autoscaler. CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` @@ -75,10 +76,10 @@ type HorizontalPodAutoscalerStatus struct { // configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. // +optional @@ -91,10 +92,10 @@ type HorizontalPodAutoscaler struct { // list of horizontal pod autoscaler objects. type HorizontalPodAutoscalerList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // list of horizontal pod autoscaler objects. Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` @@ -102,10 +103,10 @@ type HorizontalPodAutoscalerList struct { // Scale represents a scaling request for a resource. type Scale struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. // +optional @@ -135,3 +136,161 @@ type ScaleStatus struct { // +optional Selector string `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` } + +// the types below are used in the alpha metrics annotation + +// MetricSourceType indicates the type of metric. +type MetricSourceType string + +var ( + // ObjectMetricSourceType is a metric describing a kubernetes object + // (for example, hits-per-second on an Ingress object). + ObjectMetricSourceType MetricSourceType = "Object" + // PodsMetricSourceType is a metric describing each pod in the current scale + // target (for example, transactions-processed-per-second). The values + // will be averaged together before being compared to the target value. + PodsMetricSourceType MetricSourceType = "Pods" + // ResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ResourceMetricSourceType MetricSourceType = "Resource" +) + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +type MetricSpec struct { + // type is the type of metric source. It should match one of the fields below. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricSource struct { + // target is the described Kubernetes object. + Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` + + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // targetValue is the target value of the metric (as a quantity). + TargetValue resource.Quantity `json:"targetValue" protobuf:"bytes,3,name=targetValue"` +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +type PodsMetricSource struct { + // metricName is the name of the metric in question + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // targetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + TargetAverageValue resource.Quantity `json:"targetAverageValue" protobuf:"bytes,2,name=targetAverageValue"` +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` +} + +// MetricStatus describes the last-read state of a single metric. +type MetricStatus struct { + // type is the type of metric source. It will match one of the fields below. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricStatus struct { + // target is the described Kubernetes object. + Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` + + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // currentValue is the current value of the metric (as a quantity). + CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +type PodsMetricStatus struct { + // metricName is the name of the metric in question + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // currentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,2,name=currentAverageValue"` +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ResourceMetricStatus struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go index 91b3f041c2..01d205f87e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go @@ -84,6 +84,94 @@ func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { return map_HorizontalPodAutoscalerStatus } +var map_MetricSpec = map[string]string{ + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should match one of the fields below.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", +} + +func (MetricSpec) SwaggerDoc() map[string]string { + return map_MetricSpec +} + +var map_MetricStatus = map[string]string{ + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will match one of the fields below.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", +} + +func (MetricStatus) SwaggerDoc() map[string]string { + return map_MetricStatus +} + +var map_ObjectMetricSource = map[string]string{ + "": "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "target": "target is the described Kubernetes object.", + "metricName": "metricName is the name of the metric in question.", + "targetValue": "targetValue is the target value of the metric (as a quantity).", +} + +func (ObjectMetricSource) SwaggerDoc() map[string]string { + return map_ObjectMetricSource +} + +var map_ObjectMetricStatus = map[string]string{ + "": "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "target": "target is the described Kubernetes object.", + "metricName": "metricName is the name of the metric in question.", + "currentValue": "currentValue is the current value of the metric (as a quantity).", +} + +func (ObjectMetricStatus) SwaggerDoc() map[string]string { + return map_ObjectMetricStatus +} + +var map_PodsMetricSource = map[string]string{ + "": "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "metricName": "metricName is the name of the metric in question", + "targetAverageValue": "targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)", +} + +func (PodsMetricSource) SwaggerDoc() map[string]string { + return map_PodsMetricSource +} + +var map_PodsMetricStatus = map[string]string{ + "": "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).", + "metricName": "metricName is the name of the metric in question", + "currentAverageValue": "currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)", +} + +func (PodsMetricStatus) SwaggerDoc() map[string]string { + return map_PodsMetricStatus +} + +var map_ResourceMetricSource = map[string]string{ + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "targetAverageValue": "targetAverageValue is the the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", +} + +func (ResourceMetricSource) SwaggerDoc() map[string]string { + return map_ResourceMetricSource +} + +var map_ResourceMetricStatus = map[string]string{ + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", + "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", + "currentAverageValue": "currentAverageValue is the the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", +} + +func (ResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ResourceMetricStatus +} + var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", "metadata": "Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.conversion.go index 4f339e6d1e..681f194032 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,10 +21,13 @@ limitations under the License. package v1 import ( - unversioned "k8s.io/kubernetes/pkg/api/unversioned" + resource "k8s.io/apimachinery/pkg/api/resource" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/kubernetes/pkg/api" + api_v1 "k8s.io/kubernetes/pkg/api/v1" autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" unsafe "unsafe" ) @@ -46,6 +49,22 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec, Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus, Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus, + Convert_v1_MetricSpec_To_autoscaling_MetricSpec, + Convert_autoscaling_MetricSpec_To_v1_MetricSpec, + Convert_v1_MetricStatus_To_autoscaling_MetricStatus, + Convert_autoscaling_MetricStatus_To_v1_MetricStatus, + Convert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource, + Convert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource, + Convert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus, + Convert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus, + Convert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource, + Convert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource, + Convert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus, + Convert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus, + Convert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource, + Convert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource, + Convert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus, + Convert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus, Convert_v1_Scale_To_autoscaling_Scale, Convert_autoscaling_Scale_To_v1_Scale, Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec, @@ -78,10 +97,7 @@ func Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectRef } func autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -91,15 +107,8 @@ func autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscal return nil } -func Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { - return autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s) -} - func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -109,13 +118,19 @@ func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscal return nil } -func Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { - return autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in, out, s) -} - func autoConvert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]autoscaling.HorizontalPodAutoscaler)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]autoscaling.HorizontalPodAutoscaler, len(*in)) + for i := range *in { + if err := Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -125,7 +140,17 @@ func Convert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscal func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]HorizontalPodAutoscaler)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + if err := Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]HorizontalPodAutoscaler, 0) + } return nil } @@ -139,59 +164,224 @@ func autoConvert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAuto } out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) out.MaxReplicas = in.MaxReplicas - out.TargetCPUUtilizationPercentage = (*int32)(unsafe.Pointer(in.TargetCPUUtilizationPercentage)) + // WARNING: in.TargetCPUUtilizationPercentage requires manual conversion: does not exist in peer-type return nil } -func Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { - return autoConvert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in, out, s) -} - func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { return err } out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) out.MaxReplicas = in.MaxReplicas - out.TargetCPUUtilizationPercentage = (*int32)(unsafe.Pointer(in.TargetCPUUtilizationPercentage)) + // WARNING: in.Metrics requires manual conversion: does not exist in peer-type return nil } -func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { - return autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in, out, s) -} - func autoConvert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) - out.LastScaleTime = (*unversioned.Time)(unsafe.Pointer(in.LastScaleTime)) + out.LastScaleTime = (*meta_v1.Time)(unsafe.Pointer(in.LastScaleTime)) out.CurrentReplicas = in.CurrentReplicas out.DesiredReplicas = in.DesiredReplicas - out.CurrentCPUUtilizationPercentage = (*int32)(unsafe.Pointer(in.CurrentCPUUtilizationPercentage)) + // WARNING: in.CurrentCPUUtilizationPercentage requires manual conversion: does not exist in peer-type return nil } -func Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { - return autoConvert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in, out, s) -} - func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) - out.LastScaleTime = (*unversioned.Time)(unsafe.Pointer(in.LastScaleTime)) + out.LastScaleTime = (*meta_v1.Time)(unsafe.Pointer(in.LastScaleTime)) out.CurrentReplicas = in.CurrentReplicas out.DesiredReplicas = in.DesiredReplicas - out.CurrentCPUUtilizationPercentage = (*int32)(unsafe.Pointer(in.CurrentCPUUtilizationPercentage)) + // WARNING: in.CurrentMetrics requires manual conversion: does not exist in peer-type return nil } -func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { - return autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in, out, s) +func autoConvert_v1_MetricSpec_To_autoscaling_MetricSpec(in *MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error { + out.Type = autoscaling.MetricSourceType(in.Type) + out.Object = (*autoscaling.ObjectMetricSource)(unsafe.Pointer(in.Object)) + out.Pods = (*autoscaling.PodsMetricSource)(unsafe.Pointer(in.Pods)) + out.Resource = (*autoscaling.ResourceMetricSource)(unsafe.Pointer(in.Resource)) + return nil } -func autoConvert_v1_Scale_To_autoscaling_Scale(in *Scale, out *autoscaling.Scale, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { +func Convert_v1_MetricSpec_To_autoscaling_MetricSpec(in *MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error { + return autoConvert_v1_MetricSpec_To_autoscaling_MetricSpec(in, out, s) +} + +func autoConvert_autoscaling_MetricSpec_To_v1_MetricSpec(in *autoscaling.MetricSpec, out *MetricSpec, s conversion.Scope) error { + out.Type = MetricSourceType(in.Type) + out.Object = (*ObjectMetricSource)(unsafe.Pointer(in.Object)) + out.Pods = (*PodsMetricSource)(unsafe.Pointer(in.Pods)) + out.Resource = (*ResourceMetricSource)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_autoscaling_MetricSpec_To_v1_MetricSpec(in *autoscaling.MetricSpec, out *MetricSpec, s conversion.Scope) error { + return autoConvert_autoscaling_MetricSpec_To_v1_MetricSpec(in, out, s) +} + +func autoConvert_v1_MetricStatus_To_autoscaling_MetricStatus(in *MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error { + out.Type = autoscaling.MetricSourceType(in.Type) + out.Object = (*autoscaling.ObjectMetricStatus)(unsafe.Pointer(in.Object)) + out.Pods = (*autoscaling.PodsMetricStatus)(unsafe.Pointer(in.Pods)) + out.Resource = (*autoscaling.ResourceMetricStatus)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_v1_MetricStatus_To_autoscaling_MetricStatus(in *MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error { + return autoConvert_v1_MetricStatus_To_autoscaling_MetricStatus(in, out, s) +} + +func autoConvert_autoscaling_MetricStatus_To_v1_MetricStatus(in *autoscaling.MetricStatus, out *MetricStatus, s conversion.Scope) error { + out.Type = MetricSourceType(in.Type) + out.Object = (*ObjectMetricStatus)(unsafe.Pointer(in.Object)) + out.Pods = (*PodsMetricStatus)(unsafe.Pointer(in.Pods)) + out.Resource = (*ResourceMetricStatus)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_autoscaling_MetricStatus_To_v1_MetricStatus(in *autoscaling.MetricStatus, out *MetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_MetricStatus_To_v1_MetricStatus(in, out, s) +} + +func autoConvert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error { + if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.TargetValue = in.TargetValue + return nil +} + +func Convert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error { + return autoConvert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in, out, s) +} + +func autoConvert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *ObjectMetricSource, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.TargetValue = in.TargetValue + return nil +} + +func Convert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *ObjectMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource(in, out, s) +} + +func autoConvert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error { + if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.CurrentValue = in.CurrentValue + return nil +} + +func Convert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error { + return autoConvert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *ObjectMetricStatus, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { return err } + out.MetricName = in.MetricName + out.CurrentValue = in.CurrentValue + return nil +} + +func Convert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *ObjectMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus(in, out, s) +} + +func autoConvert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { + out.MetricName = in.MetricName + out.TargetAverageValue = in.TargetAverageValue + return nil +} + +func Convert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { + return autoConvert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource(in, out, s) +} + +func autoConvert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *PodsMetricSource, s conversion.Scope) error { + out.MetricName = in.MetricName + out.TargetAverageValue = in.TargetAverageValue + return nil +} + +func Convert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *PodsMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource(in, out, s) +} + +func autoConvert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error { + out.MetricName = in.MetricName + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error { + return autoConvert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *PodsMetricStatus, s conversion.Scope) error { + out.MetricName = in.MetricName + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *PodsMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus(in, out, s) +} + +func autoConvert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { + out.Name = api.ResourceName(in.Name) + out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization)) + out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue)) + return nil +} + +func Convert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { + return autoConvert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in, out, s) +} + +func autoConvert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *ResourceMetricSource, s conversion.Scope) error { + out.Name = api_v1.ResourceName(in.Name) + out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization)) + out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue)) + return nil +} + +func Convert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *ResourceMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource(in, out, s) +} + +func autoConvert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { + out.Name = api.ResourceName(in.Name) + out.CurrentAverageUtilization = (*int32)(unsafe.Pointer(in.CurrentAverageUtilization)) + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { + return autoConvert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *ResourceMetricStatus, s conversion.Scope) error { + out.Name = api_v1.ResourceName(in.Name) + out.CurrentAverageUtilization = (*int32)(unsafe.Pointer(in.CurrentAverageUtilization)) + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *ResourceMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus(in, out, s) +} + +func autoConvert_v1_Scale_To_autoscaling_Scale(in *Scale, out *autoscaling.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta if err := Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -206,10 +396,7 @@ func Convert_v1_Scale_To_autoscaling_Scale(in *Scale, out *autoscaling.Scale, s } func autoConvert_autoscaling_Scale_To_v1_Scale(in *autoscaling.Scale, out *Scale, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go index 70ed6d0279..07047c2b06 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,10 +21,10 @@ limitations under the License. package v1 import ( - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - api_v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" + resource "k8s.io/apimachinery/pkg/api/resource" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" reflect "reflect" ) @@ -41,6 +41,14 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HorizontalPodAutoscalerList, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HorizontalPodAutoscalerSpec, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HorizontalPodAutoscalerStatus, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_MetricSpec, InType: reflect.TypeOf(&MetricSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_MetricStatus, InType: reflect.TypeOf(&MetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectMetricSource, InType: reflect.TypeOf(&ObjectMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectMetricStatus, InType: reflect.TypeOf(&ObjectMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodsMetricSource, InType: reflect.TypeOf(&PodsMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodsMetricStatus, InType: reflect.TypeOf(&PodsMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceMetricSource, InType: reflect.TypeOf(&ResourceMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceMetricStatus, InType: reflect.TypeOf(&ResourceMetricStatus{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Scale, InType: reflect.TypeOf(&Scale{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, @@ -51,9 +59,7 @@ func DeepCopy_v1_CrossVersionObjectReference(in interface{}, out interface{}, c { in := in.(*CrossVersionObjectReference) out := out.(*CrossVersionObjectReference) - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion + *out = *in return nil } } @@ -62,9 +68,11 @@ func DeepCopy_v1_HorizontalPodAutoscaler(in interface{}, out interface{}, c *con { in := in.(*HorizontalPodAutoscaler) out := out.(*HorizontalPodAutoscaler) - out.TypeMeta = in.TypeMeta - if err := api_v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } if err := DeepCopy_v1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -80,8 +88,7 @@ func DeepCopy_v1_HorizontalPodAutoscalerList(in interface{}, out interface{}, c { in := in.(*HorizontalPodAutoscalerList) out := out.(*HorizontalPodAutoscalerList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]HorizontalPodAutoscaler, len(*in)) @@ -90,8 +97,6 @@ func DeepCopy_v1_HorizontalPodAutoscalerList(in interface{}, out interface{}, c return err } } - } else { - out.Items = nil } return nil } @@ -101,21 +106,16 @@ func DeepCopy_v1_HorizontalPodAutoscalerSpec(in interface{}, out interface{}, c { in := in.(*HorizontalPodAutoscalerSpec) out := out.(*HorizontalPodAutoscalerSpec) - out.ScaleTargetRef = in.ScaleTargetRef + *out = *in if in.MinReplicas != nil { in, out := &in.MinReplicas, &out.MinReplicas *out = new(int32) **out = **in - } else { - out.MinReplicas = nil } - out.MaxReplicas = in.MaxReplicas if in.TargetCPUUtilizationPercentage != nil { in, out := &in.TargetCPUUtilizationPercentage, &out.TargetCPUUtilizationPercentage *out = new(int32) **out = **in - } else { - out.TargetCPUUtilizationPercentage = nil } return nil } @@ -125,43 +125,170 @@ func DeepCopy_v1_HorizontalPodAutoscalerStatus(in interface{}, out interface{}, { in := in.(*HorizontalPodAutoscalerStatus) out := out.(*HorizontalPodAutoscalerStatus) + *out = *in if in.ObservedGeneration != nil { in, out := &in.ObservedGeneration, &out.ObservedGeneration *out = new(int64) **out = **in - } else { - out.ObservedGeneration = nil } if in.LastScaleTime != nil { in, out := &in.LastScaleTime, &out.LastScaleTime - *out = new(unversioned.Time) + *out = new(meta_v1.Time) **out = (*in).DeepCopy() - } else { - out.LastScaleTime = nil } - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas if in.CurrentCPUUtilizationPercentage != nil { in, out := &in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage *out = new(int32) **out = **in - } else { - out.CurrentCPUUtilizationPercentage = nil } return nil } } +func DeepCopy_v1_MetricSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*MetricSpec) + out := out.(*MetricSpec) + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricSource) + if err := DeepCopy_v1_ObjectMetricSource(*in, *out, c); err != nil { + return err + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricSource) + if err := DeepCopy_v1_PodsMetricSource(*in, *out, c); err != nil { + return err + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricSource) + if err := DeepCopy_v1_ResourceMetricSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_MetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*MetricStatus) + out := out.(*MetricStatus) + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricStatus) + if err := DeepCopy_v1_ObjectMetricStatus(*in, *out, c); err != nil { + return err + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricStatus) + if err := DeepCopy_v1_PodsMetricStatus(*in, *out, c); err != nil { + return err + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricStatus) + if err := DeepCopy_v1_ResourceMetricStatus(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_ObjectMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMetricSource) + out := out.(*ObjectMetricSource) + *out = *in + out.TargetValue = in.TargetValue.DeepCopy() + return nil + } +} + +func DeepCopy_v1_ObjectMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMetricStatus) + out := out.(*ObjectMetricStatus) + *out = *in + out.CurrentValue = in.CurrentValue.DeepCopy() + return nil + } +} + +func DeepCopy_v1_PodsMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodsMetricSource) + out := out.(*PodsMetricSource) + *out = *in + out.TargetAverageValue = in.TargetAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_v1_PodsMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodsMetricStatus) + out := out.(*PodsMetricStatus) + *out = *in + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_v1_ResourceMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceMetricSource) + out := out.(*ResourceMetricSource) + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + *out = new(int32) + **out = **in + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + *out = new(resource.Quantity) + **out = (*in).DeepCopy() + } + return nil + } +} + +func DeepCopy_v1_ResourceMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceMetricStatus) + out := out.(*ResourceMetricStatus) + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + *out = new(int32) + **out = **in + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return nil + } +} + func DeepCopy_v1_Scale(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*Scale) out := out.(*Scale) - out.TypeMeta = in.TypeMeta - if err := api_v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } - out.Spec = in.Spec - out.Status = in.Status return nil } } @@ -170,7 +297,7 @@ func DeepCopy_v1_ScaleSpec(in interface{}, out interface{}, c *conversion.Cloner { in := in.(*ScaleSpec) out := out.(*ScaleSpec) - out.Replicas = in.Replicas + *out = *in return nil } } @@ -179,8 +306,7 @@ func DeepCopy_v1_ScaleStatus(in interface{}, out interface{}, c *conversion.Clon { in := in.(*ScaleStatus) out := out.(*ScaleStatus) - out.Replicas = in.Replicas - out.Selector = in.Selector + *out = *in return nil } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.defaults.go index 6d43b1a244..af20e98849 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ limitations under the License. package v1 import ( - runtime "k8s.io/kubernetes/pkg/runtime" + runtime "k8s.io/apimachinery/pkg/runtime" ) // RegisterDefaults adds defaulters functions to the given scheme. diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/BUILD new file mode 100644 index 0000000000..98d475a295 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/BUILD @@ -0,0 +1,50 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "defaults.go", + "doc.go", + "generated.pb.go", + "register.go", + "types.generated.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.conversion.go", + "zz_generated.deepcopy.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/api/v1:go_default_library", + "//pkg/apis/autoscaling:go_default_library", + "//vendor:github.com/gogo/protobuf/proto", + "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/api/resource", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/types", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/defaults.go new file mode 100644 index 0000000000..8764a9f42c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/defaults.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/autoscaling" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return scheme.AddDefaultingFuncs( + SetDefaults_HorizontalPodAutoscaler, + ) +} + +func SetDefaults_HorizontalPodAutoscaler(obj *HorizontalPodAutoscaler) { + if obj.Spec.MinReplicas == nil { + minReplicas := int32(1) + obj.Spec.MinReplicas = &minReplicas + } + + if len(obj.Spec.Metrics) == 0 { + utilizationDefaultVal := int32(autoscaling.DefaultCPUUtilization) + obj.Spec.Metrics = []MetricSpec{ + { + Type: ResourceMetricSourceType, + Resource: &ResourceMetricSource{ + Name: v1.ResourceCPU, + TargetAverageUtilization: &utilizationDefaultVal, + }, + }, + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/doc.go new file mode 100644 index 0000000000..043e77e49a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/autoscaling +// +k8s:openapi-gen=true + +package v2alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.pb.go new file mode 100644 index 0000000000..acce4fd716 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.pb.go @@ -0,0 +1,3062 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v2alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.proto + + It has these top-level messages: + CrossVersionObjectReference + HorizontalPodAutoscaler + HorizontalPodAutoscalerList + HorizontalPodAutoscalerSpec + HorizontalPodAutoscalerStatus + MetricSpec + MetricStatus + ObjectMetricSource + ObjectMetricStatus + PodsMetricSource + PodsMetricStatus + ResourceMetricSource + ResourceMetricStatus +*/ +package v2alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } +func (*CrossVersionObjectReference) ProtoMessage() {} +func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{0} +} + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } +func (*HorizontalPodAutoscalerList) ProtoMessage() {} +func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } +func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} +func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{3} +} + +func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } +func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} +func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{4} +} + +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func init() { + proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v2alpha1.CrossVersionObjectReference") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerStatus") + proto.RegisterType((*MetricSpec)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v2alpha1.MetricSpec") + proto.RegisterType((*MetricStatus)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v2alpha1.MetricStatus") + proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v2alpha1.ObjectMetricSource") + proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v2alpha1.ObjectMetricStatus") + proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v2alpha1.PodsMetricSource") + proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v2alpha1.PodsMetricStatus") + proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v2alpha1.ResourceMetricSource") + proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v2alpha1.ResourceMetricStatus") +} +func (m *CrossVersionObjectReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CrossVersionObjectReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + return i, nil +} + +func (m *HorizontalPodAutoscaler) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscaler) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *HorizontalPodAutoscalerList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HorizontalPodAutoscalerSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ScaleTargetRef.Size())) + n5, err := m.ScaleTargetRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.MinReplicas != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.MinReplicas)) + } + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxReplicas)) + if len(m.Metrics) > 0 { + for _, msg := range m.Metrics { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HorizontalPodAutoscalerStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ObservedGeneration != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastScaleTime.Size())) + n6, err := m.LastScaleTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + } + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentReplicas)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DesiredReplicas)) + if len(m.CurrentMetrics) > 0 { + for _, msg := range m.CurrentMetrics { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MetricSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *MetricSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.Object != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) + n7, err := m.Object.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.Pods != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Pods.Size())) + n8, err := m.Pods.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.Resource != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Resource.Size())) + n9, err := m.Resource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} + +func (m *MetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *MetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.Object != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) + n10, err := m.Object.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.Pods != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Pods.Size())) + n11, err := m.Pods.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.Resource != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Resource.Size())) + n12, err := m.Resource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} + +func (m *ObjectMetricSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectMetricSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) + n13, err := m.Target.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetValue.Size())) + n14, err := m.TargetValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + return i, nil +} + +func (m *ObjectMetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectMetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) + n15, err := m.Target.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n15 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentValue.Size())) + n16, err := m.CurrentValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n16 + return i, nil +} + +func (m *PodsMetricSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodsMetricSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetAverageValue.Size())) + n17, err := m.TargetAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n17 + return i, nil +} + +func (m *PodsMetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodsMetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentAverageValue.Size())) + n18, err := m.CurrentAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n18 + return i, nil +} + +func (m *ResourceMetricSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceMetricSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if m.TargetAverageUtilization != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetAverageValue.Size())) + n19, err := m.TargetAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n19 + } + return i, nil +} + +func (m *ResourceMetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceMetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if m.CurrentAverageUtilization != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.CurrentAverageUtilization)) + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentAverageValue.Size())) + n20, err := m.CurrentAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n20 + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *CrossVersionObjectReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscaler) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + var l int + _ = l + l = m.ScaleTargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.MinReplicas != nil { + n += 1 + sovGenerated(uint64(*m.MinReplicas)) + } + n += 1 + sovGenerated(uint64(m.MaxReplicas)) + if len(m.Metrics) > 0 { + for _, e := range m.Metrics { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + l = m.LastScaleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.DesiredReplicas)) + if len(m.CurrentMetrics) > 0 { + for _, e := range m.CurrentMetrics { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MetricSpec) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ObjectMetricSource) Size() (n int) { + var l int + _ = l + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectMetricStatus) Size() (n int) { + var l int + _ = l + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodsMetricSource) Size() (n int) { + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodsMetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceMetricSource) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ResourceMetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.CurrentAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.CurrentAverageUtilization)) + } + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CrossVersionObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CrossVersionObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscaler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscaler{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, + `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, + `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, + `Metrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Metrics), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, + `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, + `CurrentMetrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentMetrics), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricStatus{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricSource{`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricStatus{`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricSource{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricStatus{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, HorizontalPodAutoscaler{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ScaleTargetRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MinReplicas = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) + } + m.MaxReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MaxReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metrics = append(m.Metrics, MetricSpec{}) + if err := m.Metrics[len(m.Metrics)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScaleTime == nil { + m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.LastScaleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.CurrentReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) + } + m.DesiredReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.DesiredReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentMetrics = append(m.CurrentMetrics, MetricStatus{}) + if err := m.CurrentMetrics[len(m.CurrentMetrics)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricSource{} + } + if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricSource{} + } + if err := m.Pods.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricSource{} + } + if err := m.Resource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricStatus{} + } + if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricStatus{} + } + if err := m.Pods.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricStatus{} + } + if err := m.Resource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_kubernetes_pkg_api_v1.ResourceName(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetAverageValue == nil { + m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + } + if err := m.TargetAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_kubernetes_pkg_api_v1.ResourceName(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 1208 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x57, 0x5b, 0x6f, 0x1b, 0x45, + 0x14, 0x8e, 0x2f, 0x49, 0xc3, 0x38, 0x37, 0x26, 0x55, 0xea, 0x26, 0xd4, 0x8e, 0xf6, 0xa9, 0x54, + 0xb0, 0x4b, 0x4c, 0x41, 0x54, 0x08, 0xa1, 0xd8, 0x5c, 0x5a, 0x11, 0xa7, 0x61, 0x1a, 0x2a, 0x04, + 0x48, 0x30, 0x59, 0x4f, 0x9c, 0x21, 0xde, 0x8b, 0x76, 0x66, 0xad, 0x26, 0x52, 0x25, 0x7e, 0x00, + 0x0f, 0xbc, 0xf0, 0x13, 0x90, 0xf8, 0x07, 0x3c, 0x83, 0x84, 0x94, 0xc7, 0xf2, 0xc6, 0x93, 0x45, + 0xdc, 0x37, 0x7e, 0x42, 0x25, 0x2e, 0xda, 0x99, 0xf1, 0x5e, 0xbc, 0x5e, 0x13, 0x87, 0xb4, 0x82, + 0x37, 0xef, 0xcc, 0x39, 0xdf, 0x77, 0xce, 0xf9, 0xce, 0x9c, 0x19, 0x83, 0xb7, 0x0f, 0xdf, 0x60, + 0x3a, 0x75, 0x8c, 0x43, 0x7f, 0x8f, 0x78, 0x36, 0xe1, 0x84, 0x19, 0xee, 0x61, 0xdb, 0xc0, 0x2e, + 0x65, 0x06, 0xf6, 0xb9, 0xc3, 0x4c, 0xdc, 0xa1, 0x76, 0xdb, 0xe8, 0xd6, 0x70, 0xc7, 0x3d, 0xc0, + 0x1b, 0x46, 0x9b, 0xd8, 0xc4, 0xc3, 0x9c, 0xb4, 0x74, 0xd7, 0x73, 0xb8, 0x03, 0x0d, 0x09, 0xa0, + 0x47, 0x00, 0xba, 0x7b, 0xd8, 0xd6, 0x03, 0x00, 0x3d, 0x06, 0xa0, 0x0f, 0x00, 0x56, 0x5f, 0x6e, + 0x53, 0x7e, 0xe0, 0xef, 0xe9, 0xa6, 0x63, 0x19, 0x6d, 0xa7, 0xed, 0x18, 0x02, 0x67, 0xcf, 0xdf, + 0x17, 0x5f, 0xe2, 0x43, 0xfc, 0x92, 0xf8, 0xab, 0x37, 0x55, 0x80, 0xd8, 0xa5, 0x16, 0x36, 0x0f, + 0xa8, 0x4d, 0xbc, 0xa3, 0x41, 0x88, 0x86, 0x47, 0x98, 0xe3, 0x7b, 0x26, 0x19, 0x8e, 0x6a, 0xac, + 0x17, 0x33, 0x2c, 0xc2, 0xb1, 0xd1, 0x4d, 0xe5, 0xb2, 0x6a, 0x64, 0x79, 0x79, 0xbe, 0xcd, 0xa9, + 0x95, 0xa6, 0x79, 0xfd, 0x9f, 0x1c, 0x98, 0x79, 0x40, 0x2c, 0x9c, 0xf2, 0x7b, 0x35, 0xcb, 0xcf, + 0xe7, 0xb4, 0x63, 0x50, 0x9b, 0x33, 0xee, 0x8d, 0xcb, 0x89, 0x11, 0xaf, 0x4b, 0xbc, 0x28, 0x21, + 0xf2, 0x00, 0x5b, 0x6e, 0x87, 0x8c, 0xca, 0xe9, 0xa5, 0x4c, 0x81, 0x47, 0x59, 0xdf, 0x3a, 0x6b, + 0x3b, 0xa4, 0x5c, 0xb5, 0x6f, 0x73, 0x60, 0xad, 0xe1, 0x39, 0x8c, 0xdd, 0x27, 0x1e, 0xa3, 0x8e, + 0x7d, 0x77, 0xef, 0x4b, 0x62, 0x72, 0x44, 0xf6, 0x89, 0x47, 0x6c, 0x93, 0xc0, 0x75, 0x50, 0x3c, + 0xa4, 0x76, 0xab, 0x9c, 0x5b, 0xcf, 0x5d, 0x7f, 0xae, 0x3e, 0x77, 0xd2, 0xab, 0x4e, 0xf5, 0x7b, + 0xd5, 0xe2, 0x07, 0xd4, 0x6e, 0x21, 0xb1, 0x13, 0x58, 0xd8, 0xd8, 0x22, 0xe5, 0x7c, 0xd2, 0x62, + 0x1b, 0x5b, 0x04, 0x89, 0x1d, 0x58, 0x03, 0x00, 0xbb, 0x54, 0x11, 0x94, 0x0b, 0xc2, 0x0e, 0x2a, + 0x3b, 0xb0, 0xb9, 0x73, 0x47, 0xed, 0xa0, 0x98, 0x95, 0xf6, 0x38, 0x0f, 0xae, 0xdc, 0x76, 0x3c, + 0x7a, 0xec, 0xd8, 0x1c, 0x77, 0x76, 0x9c, 0xd6, 0xa6, 0xca, 0x83, 0x78, 0xf0, 0x0b, 0x30, 0x1b, + 0xf4, 0x42, 0x0b, 0x73, 0x2c, 0xe2, 0x2a, 0xd5, 0x5e, 0xd1, 0x55, 0x3f, 0xc7, 0xa5, 0x89, 0x3a, + 0x3a, 0xb0, 0xd6, 0xbb, 0x1b, 0xba, 0x4c, 0xae, 0x49, 0x38, 0x8e, 0xf8, 0xa3, 0x35, 0x14, 0xa2, + 0x42, 0x1b, 0x14, 0x99, 0x4b, 0x4c, 0x91, 0x53, 0xa9, 0xb6, 0xa5, 0x4f, 0x78, 0x5a, 0xf4, 0x8c, + 0xc8, 0xef, 0xb9, 0xc4, 0x8c, 0x2a, 0x14, 0x7c, 0x21, 0xc1, 0x03, 0xbb, 0x60, 0x86, 0x71, 0xcc, + 0x7d, 0x26, 0xaa, 0x53, 0xaa, 0x6d, 0x5f, 0x18, 0xa3, 0x40, 0xad, 0x2f, 0x28, 0xce, 0x19, 0xf9, + 0x8d, 0x14, 0x9b, 0xf6, 0x7b, 0x0e, 0xac, 0x65, 0x78, 0x6e, 0x51, 0xc6, 0xe1, 0x67, 0xa9, 0x4a, + 0xeb, 0x67, 0xab, 0x74, 0xe0, 0x2d, 0xea, 0xbc, 0xa4, 0x98, 0x67, 0x07, 0x2b, 0xb1, 0x2a, 0x5b, + 0x60, 0x9a, 0x72, 0x62, 0xb1, 0x72, 0x7e, 0xbd, 0x70, 0xbd, 0x54, 0xbb, 0x7d, 0x51, 0x49, 0xd7, + 0xe7, 0x15, 0xe9, 0xf4, 0x9d, 0x00, 0x1e, 0x49, 0x16, 0xed, 0xcf, 0x7c, 0x66, 0xb2, 0x81, 0x14, + 0xf0, 0xeb, 0x1c, 0x58, 0x10, 0x9f, 0xbb, 0xd8, 0x6b, 0x93, 0xe0, 0x0c, 0xa8, 0x9c, 0x27, 0xd7, + 0x7f, 0xcc, 0x89, 0xaa, 0xaf, 0xa8, 0xe0, 0x16, 0xee, 0x25, 0xb8, 0xd0, 0x10, 0x37, 0xdc, 0x00, + 0x25, 0x8b, 0xda, 0x88, 0xb8, 0x1d, 0x6a, 0x62, 0x26, 0x5a, 0x71, 0xba, 0xbe, 0xd8, 0xef, 0x55, + 0x4b, 0xcd, 0x68, 0x19, 0xc5, 0x6d, 0xe0, 0x6b, 0xa0, 0x64, 0xe1, 0x07, 0xa1, 0x4b, 0x41, 0xb8, + 0x2c, 0x2b, 0xbe, 0x52, 0x33, 0xda, 0x42, 0x71, 0x3b, 0xb8, 0x0f, 0x2e, 0x59, 0x84, 0x7b, 0xd4, + 0x64, 0xe5, 0xa2, 0x50, 0xe2, 0xcd, 0x89, 0x13, 0x6e, 0x0a, 0x7f, 0xd1, 0xdf, 0x8b, 0x8a, 0xef, + 0x92, 0x5c, 0x63, 0x68, 0x00, 0xae, 0xfd, 0x52, 0x00, 0xd7, 0xc6, 0xf6, 0x29, 0x7c, 0x0f, 0x40, + 0x67, 0x4f, 0x8c, 0xc9, 0xd6, 0xfb, 0x72, 0x50, 0x05, 0x13, 0x23, 0x50, 0xa1, 0x50, 0x5f, 0xe9, + 0xf7, 0xaa, 0xf0, 0x6e, 0x6a, 0x17, 0x8d, 0xf0, 0x80, 0x26, 0x98, 0xef, 0x60, 0xc6, 0x65, 0x85, + 0xa9, 0x1a, 0x4e, 0xa5, 0xda, 0x8d, 0xb3, 0x35, 0x6f, 0xe0, 0x51, 0x7f, 0xbe, 0xdf, 0xab, 0xce, + 0x6f, 0xc5, 0x41, 0x50, 0x12, 0x13, 0x6e, 0x82, 0x45, 0xd3, 0xf7, 0x3c, 0x62, 0xf3, 0xa1, 0x8a, + 0x5f, 0x51, 0x15, 0x58, 0x6c, 0x24, 0xb7, 0xd1, 0xb0, 0x7d, 0x00, 0xd1, 0x22, 0x8c, 0x7a, 0xa4, + 0x15, 0x42, 0x14, 0x93, 0x10, 0xef, 0x24, 0xb7, 0xd1, 0xb0, 0x3d, 0x7c, 0x08, 0x16, 0x14, 0xaa, + 0xaa, 0x77, 0x79, 0x5a, 0x68, 0xf8, 0xd6, 0x79, 0x35, 0x94, 0x13, 0x23, 0xec, 0xd2, 0x46, 0x02, + 0x1c, 0x0d, 0x91, 0x69, 0x7f, 0xe4, 0x01, 0x88, 0xc4, 0x87, 0x37, 0x41, 0x91, 0x1f, 0xb9, 0x44, + 0x5d, 0x17, 0xeb, 0x83, 0x51, 0xb7, 0x7b, 0xe4, 0x92, 0x27, 0xbd, 0xea, 0x92, 0xb2, 0x14, 0xb7, + 0x7f, 0xb0, 0x86, 0x84, 0x35, 0x6c, 0x83, 0x19, 0x47, 0x9c, 0x12, 0xa5, 0x53, 0x63, 0xe2, 0xd8, + 0xc3, 0x29, 0x1e, 0xc2, 0xd7, 0x41, 0x30, 0xef, 0xd4, 0xe1, 0x53, 0xf0, 0xf0, 0x73, 0x50, 0x74, + 0x9d, 0xd6, 0x60, 0xca, 0x6e, 0x4e, 0x4c, 0xb3, 0xe3, 0xb4, 0x58, 0x82, 0x64, 0x36, 0xc8, 0x2e, + 0x58, 0x45, 0x02, 0x18, 0x3a, 0x60, 0x76, 0xf0, 0xba, 0x11, 0x4a, 0x96, 0x6a, 0xef, 0x4e, 0x4c, + 0x82, 0x14, 0x40, 0x82, 0x68, 0x2e, 0x98, 0xa1, 0x83, 0x1d, 0x14, 0x92, 0x68, 0x7f, 0xe5, 0xc1, + 0x5c, 0x5c, 0xb8, 0xff, 0x86, 0x02, 0xb2, 0x87, 0x9e, 0xb2, 0x02, 0x92, 0xe4, 0x19, 0x28, 0x20, + 0x89, 0xb2, 0x14, 0xf8, 0x2e, 0x0f, 0x60, 0xba, 0xfd, 0x20, 0x07, 0x33, 0x5c, 0xcc, 0xf2, 0xa7, + 0x72, 0x89, 0x84, 0x17, 0xba, 0xba, 0x2f, 0x14, 0x57, 0xf0, 0xd4, 0x92, 0xd3, 0x76, 0x3b, 0x7a, + 0x92, 0x85, 0x4f, 0x9d, 0x66, 0xb8, 0x83, 0x62, 0x56, 0x90, 0x80, 0x92, 0xf4, 0xbe, 0x8f, 0x3b, + 0x3e, 0x51, 0xca, 0x8c, 0xbd, 0xe7, 0xf5, 0x41, 0xf2, 0xfa, 0x87, 0x3e, 0xb6, 0x39, 0xe5, 0x47, + 0xd1, 0x2d, 0xb3, 0x1b, 0x41, 0xa1, 0x38, 0xae, 0xf6, 0xfd, 0x70, 0x9d, 0x64, 0xbf, 0xfe, 0x7f, + 0xea, 0x74, 0x00, 0xe6, 0xd4, 0xf0, 0xfb, 0x37, 0x85, 0xba, 0xac, 0x58, 0xe6, 0x1a, 0x31, 0x2c, + 0x94, 0x40, 0xd6, 0x7e, 0xca, 0x81, 0xa5, 0xe1, 0x51, 0x33, 0x14, 0x72, 0xee, 0x4c, 0x21, 0x1f, + 0x03, 0x28, 0x13, 0xde, 0xec, 0x12, 0x0f, 0xb7, 0x89, 0x0c, 0x3c, 0x7f, 0xae, 0xc0, 0x57, 0x15, + 0x17, 0xdc, 0x4d, 0x21, 0xa2, 0x11, 0x2c, 0xda, 0xcf, 0xc9, 0x24, 0xa4, 0xda, 0xe7, 0x49, 0xe2, + 0x21, 0x58, 0x56, 0xd5, 0xb9, 0x80, 0x2c, 0xd6, 0x14, 0xd9, 0x72, 0x23, 0x0d, 0x89, 0x46, 0xf1, + 0x68, 0x3f, 0xe4, 0xc1, 0xe5, 0x51, 0x23, 0x19, 0x36, 0xd5, 0x1f, 0x1f, 0x99, 0xc5, 0xad, 0xf8, + 0x1f, 0x9f, 0x27, 0xbd, 0xea, 0x8b, 0xe3, 0xfe, 0xc1, 0x85, 0x13, 0x26, 0xf6, 0x2f, 0xe9, 0x63, + 0x50, 0x4e, 0x54, 0xf1, 0x23, 0x4e, 0x3b, 0xf4, 0x58, 0xbe, 0x80, 0xe4, 0xe3, 0xef, 0x85, 0x7e, + 0xaf, 0x5a, 0xde, 0xcd, 0xb0, 0x41, 0x99, 0xde, 0xb0, 0x3b, 0xb2, 0x0b, 0xce, 0xd7, 0xbe, 0x2b, + 0x13, 0x74, 0xc0, 0x8f, 0xe9, 0xca, 0xc9, 0x2e, 0xb8, 0xe0, 0xca, 0x7d, 0x0a, 0xae, 0x26, 0x85, + 0x4b, 0x97, 0xee, 0x5a, 0xbf, 0x57, 0xbd, 0xda, 0xc8, 0x32, 0x42, 0xd9, 0xfe, 0x59, 0xdd, 0x57, + 0x78, 0x36, 0xdd, 0x57, 0xbf, 0x71, 0x72, 0x5a, 0x99, 0x7a, 0x74, 0x5a, 0x99, 0xfa, 0xf5, 0xb4, + 0x32, 0xf5, 0x55, 0xbf, 0x92, 0x3b, 0xe9, 0x57, 0x72, 0x8f, 0xfa, 0x95, 0xdc, 0x6f, 0xfd, 0x4a, + 0xee, 0x9b, 0xc7, 0x95, 0xa9, 0x4f, 0x66, 0x07, 0x83, 0xf0, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x95, 0xf2, 0xec, 0x8a, 0x16, 0x12, 0x00, 0x00, +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.proto new file mode 100644 index 0000000000..27b99c2242 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.proto @@ -0,0 +1,275 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.autoscaling.v2alpha1; + +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v2alpha1"; + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +message CrossVersionObjectReference { + // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + optional string kind = 1; + + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + optional string name = 2; + + // API version of the referent + // +optional + optional string apiVersion = 3; +} + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +message HorizontalPodAutoscaler { + // metadata is the standard object metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the specification for the behaviour of the autoscaler. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + optional HorizontalPodAutoscalerSpec spec = 2; + + // status is the current information about the autoscaler. + // +optional + optional HorizontalPodAutoscalerStatus status = 3; +} + +// HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects. +message HorizontalPodAutoscalerList { + // metadata is the standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of horizontal pod autoscaler objects. + repeated HorizontalPodAutoscaler items = 2; +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +message HorizontalPodAutoscalerSpec { + // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + optional CrossVersionObjectReference scaleTargetRef = 1; + + // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. + // It defaults to 1 pod. + // +optional + optional int32 minReplicas = 2; + + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + optional int32 maxReplicas = 3; + + // metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // +optional + repeated MetricSpec metrics = 4; +} + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +message HorizontalPodAutoscalerStatus { + // observedGeneration is the most recent generation observed by this autoscaler. + // +optional + optional int64 observedGeneration = 1; + + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + + // currentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + optional int32 currentReplicas = 3; + + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + optional int32 desiredReplicas = 4; + + // currentMetrics is the last read state of the metrics used by this autoscaler. + repeated MetricStatus currentMetrics = 5; +} + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +message MetricSpec { + // type is the type of metric source. It should match one of the fields below. + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricSource object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricSource pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricSource resource = 4; +} + +// MetricStatus describes the last-read state of a single metric. +message MetricStatus { + // type is the type of metric source. It will match one of the fields below. + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricStatus object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricStatus pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricStatus resource = 4; +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricSource { + // target is the described Kubernetes object. + optional CrossVersionObjectReference target = 1; + + // metricName is the name of the metric in question. + optional string metricName = 2; + + // targetValue is the target value of the metric (as a quantity). + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricStatus { + // target is the described Kubernetes object. + optional CrossVersionObjectReference target = 1; + + // metricName is the name of the metric in question. + optional string metricName = 2; + + // currentValue is the current value of the metric (as a quantity). + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +message PodsMetricSource { + // metricName is the name of the metric in question + optional string metricName = 1; + + // targetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +message PodsMetricStatus { + // metricName is the name of the metric in question + optional string metricName = 1; + + // currentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + optional int32 targetAverageUtilization = 2; + + // targetAverageValue is the the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ResourceMetricStatus { + // name is the name of the resource in question. + optional string name = 1; + + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + optional int32 currentAverageUtilization = 2; + + // currentAverageValue is the the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; +} + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/register.go new file mode 100644 index 0000000000..2fc437f939 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/register.go @@ -0,0 +1,44 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "autoscaling" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2alpha1"} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &HorizontalPodAutoscaler{}, + &HorizontalPodAutoscalerList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/types.generated.go new file mode 100644 index 0000000000..f734a161ba --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/types.generated.go @@ -0,0 +1,4621 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v2alpha1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_resource "k8s.io/apimachinery/pkg/api/resource" + pkg3_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg4_types "k8s.io/apimachinery/pkg/types" + pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_resource.Quantity + var v1 pkg3_v1.Time + var v2 pkg4_types.UID + var v3 pkg2_v1.ResourceName + var v4 time.Time + _, _, _, _, _ = v0, v1, v2, v3, v4 + } +} + +func (x *CrossVersionObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CrossVersionObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CrossVersionObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv6 := &x.Name + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv8 := &x.APIVersion + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CrossVersionObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv13 := &x.Name + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.MinReplicas != nil + yyq2[3] = len(x.Metrics) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.ScaleTargetRef + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleTargetRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ScaleTargetRef + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.MinReplicas == nil { + r.EncodeNil() + } else { + yy9 := *x.MinReplicas + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MinReplicas == nil { + r.EncodeNil() + } else { + yy11 := *x.MinReplicas + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeInt(int64(yy11)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.MaxReplicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeInt(int64(x.MaxReplicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Metrics == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSliceMetricSpec(([]MetricSpec)(x.Metrics), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metrics")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Metrics == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSliceMetricSpec(([]MetricSpec)(x.Metrics), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "scaleTargetRef": + if r.TryDecodeAsNil() { + x.ScaleTargetRef = CrossVersionObjectReference{} + } else { + yyv4 := &x.ScaleTargetRef + yyv4.CodecDecodeSelf(d) + } + case "minReplicas": + if r.TryDecodeAsNil() { + if x.MinReplicas != nil { + x.MinReplicas = nil + } + } else { + if x.MinReplicas == nil { + x.MinReplicas = new(int32) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) + } + } + case "maxReplicas": + if r.TryDecodeAsNil() { + x.MaxReplicas = 0 + } else { + yyv7 := &x.MaxReplicas + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + case "metrics": + if r.TryDecodeAsNil() { + x.Metrics = nil + } else { + yyv9 := &x.Metrics + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceMetricSpec((*[]MetricSpec)(yyv9), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ScaleTargetRef = CrossVersionObjectReference{} + } else { + yyv12 := &x.ScaleTargetRef + yyv12.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.MinReplicas != nil { + x.MinReplicas = nil + } + } else { + if x.MinReplicas == nil { + x.MinReplicas = new(int32) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MaxReplicas = 0 + } else { + yyv15 := &x.MaxReplicas + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(yyv15)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Metrics = nil + } else { + yyv17 := &x.Metrics + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + h.decSliceMetricSpec((*[]MetricSpec)(yyv17), d) + } + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x MetricSourceType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *MetricSourceType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *MetricSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Object != nil + yyq2[2] = x.Pods != nil + yyq2[3] = x.Resource != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("object")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *MetricSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *MetricSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "object": + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricSource) + } + x.Object.CodecDecodeSelf(d) + } + case "pods": + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricSource) + } + x.Pods.CodecDecodeSelf(d) + } + case "resource": + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricSource) + } + x.Resource.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *MetricSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv9 := &x.Type + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricSource) + } + x.Object.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricSource) + } + x.Pods.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricSource) + } + x.Resource.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ObjectMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.Target + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("target")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.Target + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.TargetValue + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.TargetValue + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(yy14) { + } else if !yym15 && z.IsJSONHandle() { + z.EncJSONMarshal(yy14) + } else { + z.EncFallback(yy14) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "target": + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv4 := &x.Target + yyv4.CodecDecodeSelf(d) + } + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv5 := &x.MetricName + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "targetValue": + if r.TryDecodeAsNil() { + x.TargetValue = pkg1_resource.Quantity{} + } else { + yyv7 := &x.TargetValue + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv10 := &x.Target + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv11 := &x.MetricName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetValue = pkg1_resource.Quantity{} + } else { + yyv13 := &x.TargetValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodsMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.TargetAverageValue + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.TargetAverageValue + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodsMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodsMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv4 := &x.MetricName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "targetAverageValue": + if r.TryDecodeAsNil() { + x.TargetAverageValue = pkg1_resource.Quantity{} + } else { + yyv6 := &x.TargetAverageValue + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodsMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv9 := &x.MetricName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetAverageValue = pkg1_resource.Quantity{} + } else { + yyv11 := &x.TargetAverageValue + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.TargetAverageUtilization != nil + yyq2[2] = x.TargetAverageValue != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf4 := &x.Name + yysf4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf5 := &x.Name + yysf5.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.TargetAverageUtilization == nil { + r.EncodeNil() + } else { + yy7 := *x.TargetAverageUtilization + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetAverageUtilization")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetAverageUtilization == nil { + r.EncodeNil() + } else { + yy9 := *x.TargetAverageUtilization + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.TargetAverageValue == nil { + r.EncodeNil() + } else { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.EncExt(x.TargetAverageValue) { + } else if !yym12 && z.IsJSONHandle() { + z.EncJSONMarshal(x.TargetAverageValue) + } else { + z.EncFallback(x.TargetAverageValue) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetAverageValue == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.TargetAverageValue) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(x.TargetAverageValue) + } else { + z.EncFallback(x.TargetAverageValue) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yyv4.CodecDecodeSelf(d) + } + case "targetAverageUtilization": + if r.TryDecodeAsNil() { + if x.TargetAverageUtilization != nil { + x.TargetAverageUtilization = nil + } + } else { + if x.TargetAverageUtilization == nil { + x.TargetAverageUtilization = new(int32) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int32)(x.TargetAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + case "targetAverageValue": + if r.TryDecodeAsNil() { + if x.TargetAverageValue != nil { + x.TargetAverageValue = nil + } + } else { + if x.TargetAverageValue == nil { + x.TargetAverageValue = new(pkg1_resource.Quantity) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(x.TargetAverageValue) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.TargetAverageValue) + } else { + z.DecFallback(x.TargetAverageValue, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv10 := &x.Name + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TargetAverageUtilization != nil { + x.TargetAverageUtilization = nil + } + } else { + if x.TargetAverageUtilization == nil { + x.TargetAverageUtilization = new(int32) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(x.TargetAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TargetAverageValue != nil { + x.TargetAverageValue = nil + } + } else { + if x.TargetAverageValue == nil { + x.TargetAverageValue = new(pkg1_resource.Quantity) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(x.TargetAverageValue) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.TargetAverageValue) + } else { + z.DecFallback(x.TargetAverageValue, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != nil + yyq2[1] = x.LastScaleTime != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy4 := *x.ObservedGeneration + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy6 := *x.ObservedGeneration + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.LastScaleTime == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { + } else if yym9 { + z.EncBinaryMarshal(x.LastScaleTime) + } else if !yym9 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScaleTime) + } else { + z.EncFallback(x.LastScaleTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastScaleTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LastScaleTime == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { + } else if yym10 { + z.EncBinaryMarshal(x.LastScaleTime) + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScaleTime) + } else { + z.EncFallback(x.LastScaleTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeInt(int64(x.CurrentReplicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.CurrentReplicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeInt(int64(x.DesiredReplicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("desiredReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.DesiredReplicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.CurrentMetrics == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSliceMetricStatus(([]MetricStatus)(x.CurrentMetrics), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentMetrics")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CurrentMetrics == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceMetricStatus(([]MetricStatus)(x.CurrentMetrics), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "observedGeneration": + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + case "lastScaleTime": + if r.TryDecodeAsNil() { + if x.LastScaleTime != nil { + x.LastScaleTime = nil + } + } else { + if x.LastScaleTime == nil { + x.LastScaleTime = new(pkg3_v1.Time) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { + } else if yym7 { + z.DecBinaryUnmarshal(x.LastScaleTime) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScaleTime) + } else { + z.DecFallback(x.LastScaleTime, false) + } + } + case "currentReplicas": + if r.TryDecodeAsNil() { + x.CurrentReplicas = 0 + } else { + yyv8 := &x.CurrentReplicas + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "desiredReplicas": + if r.TryDecodeAsNil() { + x.DesiredReplicas = 0 + } else { + yyv10 := &x.DesiredReplicas + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "currentMetrics": + if r.TryDecodeAsNil() { + x.CurrentMetrics = nil + } else { + yyv12 := &x.CurrentMetrics + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + h.decSliceMetricStatus((*[]MetricStatus)(yyv12), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LastScaleTime != nil { + x.LastScaleTime = nil + } + } else { + if x.LastScaleTime == nil { + x.LastScaleTime = new(pkg3_v1.Time) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { + } else if yym18 { + z.DecBinaryUnmarshal(x.LastScaleTime) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScaleTime) + } else { + z.DecFallback(x.LastScaleTime, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentReplicas = 0 + } else { + yyv19 := &x.CurrentReplicas + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int32)(yyv19)) = int32(r.DecodeInt(32)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DesiredReplicas = 0 + } else { + yyv21 := &x.DesiredReplicas + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentMetrics = nil + } else { + yyv23 := &x.CurrentMetrics + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + h.decSliceMetricStatus((*[]MetricStatus)(yyv23), d) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *MetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Object != nil + yyq2[2] = x.Pods != nil + yyq2[3] = x.Resource != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("object")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *MetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *MetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "object": + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricStatus) + } + x.Object.CodecDecodeSelf(d) + } + case "pods": + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricStatus) + } + x.Pods.CodecDecodeSelf(d) + } + case "resource": + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricStatus) + } + x.Resource.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *MetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv9 := &x.Type + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricStatus) + } + x.Object.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricStatus) + } + x.Pods.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricStatus) + } + x.Resource.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ObjectMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.Target + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("target")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.Target + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.CurrentValue + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.CurrentValue + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(yy14) { + } else if !yym15 && z.IsJSONHandle() { + z.EncJSONMarshal(yy14) + } else { + z.EncFallback(yy14) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "target": + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv4 := &x.Target + yyv4.CodecDecodeSelf(d) + } + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv5 := &x.MetricName + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "currentValue": + if r.TryDecodeAsNil() { + x.CurrentValue = pkg1_resource.Quantity{} + } else { + yyv7 := &x.CurrentValue + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv10 := &x.Target + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv11 := &x.MetricName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentValue = pkg1_resource.Quantity{} + } else { + yyv13 := &x.CurrentValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodsMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.CurrentAverageValue + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.CurrentAverageValue + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodsMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodsMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv4 := &x.MetricName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "currentAverageValue": + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg1_resource.Quantity{} + } else { + yyv6 := &x.CurrentAverageValue + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodsMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv9 := &x.MetricName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg1_resource.Quantity{} + } else { + yyv11 := &x.CurrentAverageValue + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.CurrentAverageUtilization != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf4 := &x.Name + yysf4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf5 := &x.Name + yysf5.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.CurrentAverageUtilization == nil { + r.EncodeNil() + } else { + yy7 := *x.CurrentAverageUtilization + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentAverageUtilization")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CurrentAverageUtilization == nil { + r.EncodeNil() + } else { + yy9 := *x.CurrentAverageUtilization + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.CurrentAverageValue + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.CurrentAverageValue + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(yy14) { + } else if !yym15 && z.IsJSONHandle() { + z.EncJSONMarshal(yy14) + } else { + z.EncFallback(yy14) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yyv4.CodecDecodeSelf(d) + } + case "currentAverageUtilization": + if r.TryDecodeAsNil() { + if x.CurrentAverageUtilization != nil { + x.CurrentAverageUtilization = nil + } + } else { + if x.CurrentAverageUtilization == nil { + x.CurrentAverageUtilization = new(int32) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int32)(x.CurrentAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + case "currentAverageValue": + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg1_resource.Quantity{} + } else { + yyv7 := &x.CurrentAverageValue + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv10 := &x.Name + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CurrentAverageUtilization != nil { + x.CurrentAverageUtilization = nil + } + } else { + if x.CurrentAverageUtilization == nil { + x.CurrentAverageUtilization = new(int32) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(x.CurrentAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg1_resource.Quantity{} + } else { + yyv13 := &x.CurrentAverageValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg3_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = HorizontalPodAutoscalerSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = HorizontalPodAutoscalerStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg3_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = HorizontalPodAutoscalerSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = HorizontalPodAutoscalerStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg3_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg3_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceMetricSpec(v []MetricSpec, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceMetricSpec(v *[]MetricSpec, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []MetricSpec{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]MetricSpec, yyrl1) + } + } else { + yyv1 = make([]MetricSpec, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = MetricSpec{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, MetricSpec{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = MetricSpec{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, MetricSpec{}) // var yyz1 MetricSpec + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = MetricSpec{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []MetricSpec{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceMetricStatus(v []MetricStatus, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceMetricStatus(v *[]MetricStatus, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []MetricStatus{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]MetricStatus, yyrl1) + } + } else { + yyv1 = make([]MetricStatus, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = MetricStatus{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, MetricStatus{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = MetricStatus{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, MetricStatus{}) // var yyz1 MetricStatus + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = MetricStatus{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []MetricStatus{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []HorizontalPodAutoscaler{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 392) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]HorizontalPodAutoscaler, yyrl1) + } + } else { + yyv1 = make([]HorizontalPodAutoscaler, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, HorizontalPodAutoscaler{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, HorizontalPodAutoscaler{}) // var yyz1 HorizontalPodAutoscaler + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []HorizontalPodAutoscaler{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/types.go new file mode 100644 index 0000000000..6f75afb189 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/types.go @@ -0,0 +1,269 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/api/v1" +) + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +type CrossVersionObjectReference struct { + // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + // API version of the referent + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +type HorizontalPodAutoscalerSpec struct { + // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` + // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. + // It defaults to 1 pod. + // +optional + MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // +optional + Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"` +} + +// MetricSourceType indicates the type of metric. +type MetricSourceType string + +var ( + // ObjectMetricSourceType is a metric describing a kubernetes object + // (for example, hits-per-second on an Ingress object). + ObjectMetricSourceType MetricSourceType = "Object" + // PodsMetricSourceType is a metric describing each pod in the current scale + // target (for example, transactions-processed-per-second). The values + // will be averaged together before being compared to the target value. + PodsMetricSourceType MetricSourceType = "Pods" + // ResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ResourceMetricSourceType MetricSourceType = "Resource" +) + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +type MetricSpec struct { + // type is the type of metric source. It should match one of the fields below. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricSource struct { + // target is the described Kubernetes object. + Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` + + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // targetValue is the target value of the metric (as a quantity). + TargetValue resource.Quantity `json:"targetValue" protobuf:"bytes,3,name=targetValue"` +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +type PodsMetricSource struct { + // metricName is the name of the metric in question + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // targetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + TargetAverageValue resource.Quantity `json:"targetAverageValue" protobuf:"bytes,2,name=targetAverageValue"` +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` +} + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +type HorizontalPodAutoscalerStatus struct { + // observedGeneration is the most recent generation observed by this autoscaler. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` + + // currentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` + + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` + + // currentMetrics is the last read state of the metrics used by this autoscaler. + CurrentMetrics []MetricStatus `json:"currentMetrics" protobuf:"bytes,5,rep,name=currentMetrics"` +} + +// MetricStatus describes the last-read state of a single metric. +type MetricStatus struct { + // type is the type of metric source. It will match one of the fields below. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricStatus struct { + // target is the described Kubernetes object. + Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` + + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // currentValue is the current value of the metric (as a quantity). + CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +type PodsMetricStatus struct { + // metricName is the name of the metric in question + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // currentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,2,name=currentAverageValue"` +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ResourceMetricStatus struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` +} + +// +genclient=true + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +type HorizontalPodAutoscaler struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard object metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the specification for the behaviour of the autoscaler. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // status is the current information about the autoscaler. + // +optional + Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects. +type HorizontalPodAutoscalerList struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of horizontal pod autoscaler objects. + Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..6030e2dc33 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/types_swagger_doc_generated.go @@ -0,0 +1,175 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_CrossVersionObjectReference = map[string]string{ + "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", + "kind": "Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds\"", + "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "apiVersion": "API version of the referent", +} + +func (CrossVersionObjectReference) SwaggerDoc() map[string]string { + return map_CrossVersionObjectReference +} + +var map_HorizontalPodAutoscaler = map[string]string{ + "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", + "metadata": "metadata is the standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "spec is the specification for the behaviour of the autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "status": "status is the current information about the autoscaler.", +} + +func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscaler +} + +var map_HorizontalPodAutoscalerList = map[string]string{ + "": "HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects.", + "metadata": "metadata is the standard list metadata.", + "items": "items is the list of horizontal pod autoscaler objects.", +} + +func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerList +} + +var map_HorizontalPodAutoscalerSpec = map[string]string{ + "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", + "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod.", + "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", + "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond.", +} + +func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerSpec +} + +var map_HorizontalPodAutoscalerStatus = map[string]string{ + "": "HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.", + "observedGeneration": "observedGeneration is the most recent generation observed by this autoscaler.", + "lastScaleTime": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, used by the autoscaler to control how often the number of pods is changed.", + "currentReplicas": "currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler.", + "desiredReplicas": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler.", + "currentMetrics": "currentMetrics is the last read state of the metrics used by this autoscaler.", +} + +func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerStatus +} + +var map_MetricSpec = map[string]string{ + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should match one of the fields below.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", +} + +func (MetricSpec) SwaggerDoc() map[string]string { + return map_MetricSpec +} + +var map_MetricStatus = map[string]string{ + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will match one of the fields below.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", +} + +func (MetricStatus) SwaggerDoc() map[string]string { + return map_MetricStatus +} + +var map_ObjectMetricSource = map[string]string{ + "": "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "target": "target is the described Kubernetes object.", + "metricName": "metricName is the name of the metric in question.", + "targetValue": "targetValue is the target value of the metric (as a quantity).", +} + +func (ObjectMetricSource) SwaggerDoc() map[string]string { + return map_ObjectMetricSource +} + +var map_ObjectMetricStatus = map[string]string{ + "": "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "target": "target is the described Kubernetes object.", + "metricName": "metricName is the name of the metric in question.", + "currentValue": "currentValue is the current value of the metric (as a quantity).", +} + +func (ObjectMetricStatus) SwaggerDoc() map[string]string { + return map_ObjectMetricStatus +} + +var map_PodsMetricSource = map[string]string{ + "": "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "metricName": "metricName is the name of the metric in question", + "targetAverageValue": "targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)", +} + +func (PodsMetricSource) SwaggerDoc() map[string]string { + return map_PodsMetricSource +} + +var map_PodsMetricStatus = map[string]string{ + "": "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).", + "metricName": "metricName is the name of the metric in question", + "currentAverageValue": "currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)", +} + +func (PodsMetricStatus) SwaggerDoc() map[string]string { + return map_PodsMetricStatus +} + +var map_ResourceMetricSource = map[string]string{ + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "targetAverageValue": "targetAverageValue is the the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", +} + +func (ResourceMetricSource) SwaggerDoc() map[string]string { + return map_ResourceMetricSource +} + +var map_ResourceMetricStatus = map[string]string{ + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", + "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", + "currentAverageValue": "currentAverageValue is the the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", +} + +func (ResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ResourceMetricStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/zz_generated.conversion.go new file mode 100644 index 0000000000..95f4b4847b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/zz_generated.conversion.go @@ -0,0 +1,387 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v2alpha1 + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/kubernetes/pkg/api" + api_v1 "k8s.io/kubernetes/pkg/api/v1" + autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference, + Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference, + Convert_v2alpha1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler, + Convert_autoscaling_HorizontalPodAutoscaler_To_v2alpha1_HorizontalPodAutoscaler, + Convert_v2alpha1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList, + Convert_autoscaling_HorizontalPodAutoscalerList_To_v2alpha1_HorizontalPodAutoscalerList, + Convert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, + Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec, + Convert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus, + Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus, + Convert_v2alpha1_MetricSpec_To_autoscaling_MetricSpec, + Convert_autoscaling_MetricSpec_To_v2alpha1_MetricSpec, + Convert_v2alpha1_MetricStatus_To_autoscaling_MetricStatus, + Convert_autoscaling_MetricStatus_To_v2alpha1_MetricStatus, + Convert_v2alpha1_ObjectMetricSource_To_autoscaling_ObjectMetricSource, + Convert_autoscaling_ObjectMetricSource_To_v2alpha1_ObjectMetricSource, + Convert_v2alpha1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus, + Convert_autoscaling_ObjectMetricStatus_To_v2alpha1_ObjectMetricStatus, + Convert_v2alpha1_PodsMetricSource_To_autoscaling_PodsMetricSource, + Convert_autoscaling_PodsMetricSource_To_v2alpha1_PodsMetricSource, + Convert_v2alpha1_PodsMetricStatus_To_autoscaling_PodsMetricStatus, + Convert_autoscaling_PodsMetricStatus_To_v2alpha1_PodsMetricStatus, + Convert_v2alpha1_ResourceMetricSource_To_autoscaling_ResourceMetricSource, + Convert_autoscaling_ResourceMetricSource_To_v2alpha1_ResourceMetricSource, + Convert_v2alpha1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus, + Convert_autoscaling_ResourceMetricStatus_To_v2alpha1_ResourceMetricStatus, + ) +} + +func autoConvert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Name = in.Name + out.APIVersion = in.APIVersion + return nil +} + +func Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { + return autoConvert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in, out, s) +} + +func autoConvert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Name = in.Name + out.APIVersion = in.APIVersion + return nil +} + +func Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { + return autoConvert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(in, out, s) +} + +func autoConvert_v2alpha1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { + return autoConvert_v2alpha1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2alpha1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscaler_To_v2alpha1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2alpha1_HorizontalPodAutoscaler(in, out, s) +} + +func autoConvert_v2alpha1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]autoscaling.HorizontalPodAutoscaler)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v2alpha1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { + return autoConvert_v2alpha1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v2alpha1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]HorizontalPodAutoscaler, 0) + } else { + out.Items = *(*[]HorizontalPodAutoscaler)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerList_To_v2alpha1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v2alpha1_HorizontalPodAutoscalerList(in, out, s) +} + +func autoConvert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) + out.MaxReplicas = in.MaxReplicas + out.Metrics = *(*[]autoscaling.MetricSpec)(unsafe.Pointer(&in.Metrics)) + return nil +} + +func Convert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + return autoConvert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) + out.MaxReplicas = in.MaxReplicas + out.Metrics = *(*[]MetricSpec)(unsafe.Pointer(&in.Metrics)) + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec(in, out, s) +} + +func autoConvert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.LastScaleTime = (*v1.Time)(unsafe.Pointer(in.LastScaleTime)) + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + out.CurrentMetrics = *(*[]autoscaling.MetricStatus)(unsafe.Pointer(&in.CurrentMetrics)) + return nil +} + +func Convert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.LastScaleTime = (*v1.Time)(unsafe.Pointer(in.LastScaleTime)) + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + if in.CurrentMetrics == nil { + out.CurrentMetrics = make([]MetricStatus, 0) + } else { + out.CurrentMetrics = *(*[]MetricStatus)(unsafe.Pointer(&in.CurrentMetrics)) + } + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus(in, out, s) +} + +func autoConvert_v2alpha1_MetricSpec_To_autoscaling_MetricSpec(in *MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error { + out.Type = autoscaling.MetricSourceType(in.Type) + out.Object = (*autoscaling.ObjectMetricSource)(unsafe.Pointer(in.Object)) + out.Pods = (*autoscaling.PodsMetricSource)(unsafe.Pointer(in.Pods)) + out.Resource = (*autoscaling.ResourceMetricSource)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_v2alpha1_MetricSpec_To_autoscaling_MetricSpec(in *MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error { + return autoConvert_v2alpha1_MetricSpec_To_autoscaling_MetricSpec(in, out, s) +} + +func autoConvert_autoscaling_MetricSpec_To_v2alpha1_MetricSpec(in *autoscaling.MetricSpec, out *MetricSpec, s conversion.Scope) error { + out.Type = MetricSourceType(in.Type) + out.Object = (*ObjectMetricSource)(unsafe.Pointer(in.Object)) + out.Pods = (*PodsMetricSource)(unsafe.Pointer(in.Pods)) + out.Resource = (*ResourceMetricSource)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_autoscaling_MetricSpec_To_v2alpha1_MetricSpec(in *autoscaling.MetricSpec, out *MetricSpec, s conversion.Scope) error { + return autoConvert_autoscaling_MetricSpec_To_v2alpha1_MetricSpec(in, out, s) +} + +func autoConvert_v2alpha1_MetricStatus_To_autoscaling_MetricStatus(in *MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error { + out.Type = autoscaling.MetricSourceType(in.Type) + out.Object = (*autoscaling.ObjectMetricStatus)(unsafe.Pointer(in.Object)) + out.Pods = (*autoscaling.PodsMetricStatus)(unsafe.Pointer(in.Pods)) + out.Resource = (*autoscaling.ResourceMetricStatus)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_v2alpha1_MetricStatus_To_autoscaling_MetricStatus(in *MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_MetricStatus_To_autoscaling_MetricStatus(in, out, s) +} + +func autoConvert_autoscaling_MetricStatus_To_v2alpha1_MetricStatus(in *autoscaling.MetricStatus, out *MetricStatus, s conversion.Scope) error { + out.Type = MetricSourceType(in.Type) + out.Object = (*ObjectMetricStatus)(unsafe.Pointer(in.Object)) + out.Pods = (*PodsMetricStatus)(unsafe.Pointer(in.Pods)) + out.Resource = (*ResourceMetricStatus)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_autoscaling_MetricStatus_To_v2alpha1_MetricStatus(in *autoscaling.MetricStatus, out *MetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_MetricStatus_To_v2alpha1_MetricStatus(in, out, s) +} + +func autoConvert_v2alpha1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error { + if err := Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.TargetValue = in.TargetValue + return nil +} + +func Convert_v2alpha1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error { + return autoConvert_v2alpha1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in, out, s) +} + +func autoConvert_autoscaling_ObjectMetricSource_To_v2alpha1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *ObjectMetricSource, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.TargetValue = in.TargetValue + return nil +} + +func Convert_autoscaling_ObjectMetricSource_To_v2alpha1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *ObjectMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_ObjectMetricSource_To_v2alpha1_ObjectMetricSource(in, out, s) +} + +func autoConvert_v2alpha1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error { + if err := Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.CurrentValue = in.CurrentValue + return nil +} + +func Convert_v2alpha1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_ObjectMetricStatus_To_v2alpha1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *ObjectMetricStatus, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.CurrentValue = in.CurrentValue + return nil +} + +func Convert_autoscaling_ObjectMetricStatus_To_v2alpha1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *ObjectMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_ObjectMetricStatus_To_v2alpha1_ObjectMetricStatus(in, out, s) +} + +func autoConvert_v2alpha1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { + out.MetricName = in.MetricName + out.TargetAverageValue = in.TargetAverageValue + return nil +} + +func Convert_v2alpha1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { + return autoConvert_v2alpha1_PodsMetricSource_To_autoscaling_PodsMetricSource(in, out, s) +} + +func autoConvert_autoscaling_PodsMetricSource_To_v2alpha1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *PodsMetricSource, s conversion.Scope) error { + out.MetricName = in.MetricName + out.TargetAverageValue = in.TargetAverageValue + return nil +} + +func Convert_autoscaling_PodsMetricSource_To_v2alpha1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *PodsMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_PodsMetricSource_To_v2alpha1_PodsMetricSource(in, out, s) +} + +func autoConvert_v2alpha1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error { + out.MetricName = in.MetricName + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_v2alpha1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_PodsMetricStatus_To_v2alpha1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *PodsMetricStatus, s conversion.Scope) error { + out.MetricName = in.MetricName + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_autoscaling_PodsMetricStatus_To_v2alpha1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *PodsMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_PodsMetricStatus_To_v2alpha1_PodsMetricStatus(in, out, s) +} + +func autoConvert_v2alpha1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { + out.Name = api.ResourceName(in.Name) + out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization)) + out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue)) + return nil +} + +func Convert_v2alpha1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { + return autoConvert_v2alpha1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in, out, s) +} + +func autoConvert_autoscaling_ResourceMetricSource_To_v2alpha1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *ResourceMetricSource, s conversion.Scope) error { + out.Name = api_v1.ResourceName(in.Name) + out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization)) + out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue)) + return nil +} + +func Convert_autoscaling_ResourceMetricSource_To_v2alpha1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *ResourceMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_ResourceMetricSource_To_v2alpha1_ResourceMetricSource(in, out, s) +} + +func autoConvert_v2alpha1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { + out.Name = api.ResourceName(in.Name) + out.CurrentAverageUtilization = (*int32)(unsafe.Pointer(in.CurrentAverageUtilization)) + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_v2alpha1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_ResourceMetricStatus_To_v2alpha1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *ResourceMetricStatus, s conversion.Scope) error { + out.Name = api_v1.ResourceName(in.Name) + out.CurrentAverageUtilization = (*int32)(unsafe.Pointer(in.CurrentAverageUtilization)) + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_autoscaling_ResourceMetricStatus_To_v2alpha1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *ResourceMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_ResourceMetricStatus_To_v2alpha1_ResourceMetricStatus(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..1953eaea66 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/zz_generated.deepcopy.go @@ -0,0 +1,285 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v2alpha1 + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_CrossVersionObjectReference, InType: reflect.TypeOf(&CrossVersionObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_HorizontalPodAutoscaler, InType: reflect.TypeOf(&HorizontalPodAutoscaler{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_HorizontalPodAutoscalerList, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_HorizontalPodAutoscalerSpec, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_HorizontalPodAutoscalerStatus, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_MetricSpec, InType: reflect.TypeOf(&MetricSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_MetricStatus, InType: reflect.TypeOf(&MetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_ObjectMetricSource, InType: reflect.TypeOf(&ObjectMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_ObjectMetricStatus, InType: reflect.TypeOf(&ObjectMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_PodsMetricSource, InType: reflect.TypeOf(&PodsMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_PodsMetricStatus, InType: reflect.TypeOf(&PodsMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_ResourceMetricSource, InType: reflect.TypeOf(&ResourceMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_ResourceMetricStatus, InType: reflect.TypeOf(&ResourceMetricStatus{})}, + ) +} + +func DeepCopy_v2alpha1_CrossVersionObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CrossVersionObjectReference) + out := out.(*CrossVersionObjectReference) + *out = *in + return nil + } +} + +func DeepCopy_v2alpha1_HorizontalPodAutoscaler(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscaler) + out := out.(*HorizontalPodAutoscaler) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v2alpha1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v2alpha1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v2alpha1_HorizontalPodAutoscalerList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerList) + out := out.(*HorizontalPodAutoscalerList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + if err := DeepCopy_v2alpha1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v2alpha1_HorizontalPodAutoscalerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerSpec) + out := out.(*HorizontalPodAutoscalerSpec) + *out = *in + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricSpec, len(*in)) + for i := range *in { + if err := DeepCopy_v2alpha1_MetricSpec(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v2alpha1_HorizontalPodAutoscalerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerStatus) + out := out.(*HorizontalPodAutoscalerStatus) + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.LastScaleTime != nil { + in, out := &in.LastScaleTime, &out.LastScaleTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + if in.CurrentMetrics != nil { + in, out := &in.CurrentMetrics, &out.CurrentMetrics + *out = make([]MetricStatus, len(*in)) + for i := range *in { + if err := DeepCopy_v2alpha1_MetricStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v2alpha1_MetricSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*MetricSpec) + out := out.(*MetricSpec) + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricSource) + if err := DeepCopy_v2alpha1_ObjectMetricSource(*in, *out, c); err != nil { + return err + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricSource) + if err := DeepCopy_v2alpha1_PodsMetricSource(*in, *out, c); err != nil { + return err + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricSource) + if err := DeepCopy_v2alpha1_ResourceMetricSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v2alpha1_MetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*MetricStatus) + out := out.(*MetricStatus) + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricStatus) + if err := DeepCopy_v2alpha1_ObjectMetricStatus(*in, *out, c); err != nil { + return err + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricStatus) + if err := DeepCopy_v2alpha1_PodsMetricStatus(*in, *out, c); err != nil { + return err + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricStatus) + if err := DeepCopy_v2alpha1_ResourceMetricStatus(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v2alpha1_ObjectMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMetricSource) + out := out.(*ObjectMetricSource) + *out = *in + out.TargetValue = in.TargetValue.DeepCopy() + return nil + } +} + +func DeepCopy_v2alpha1_ObjectMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMetricStatus) + out := out.(*ObjectMetricStatus) + *out = *in + out.CurrentValue = in.CurrentValue.DeepCopy() + return nil + } +} + +func DeepCopy_v2alpha1_PodsMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodsMetricSource) + out := out.(*PodsMetricSource) + *out = *in + out.TargetAverageValue = in.TargetAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_v2alpha1_PodsMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodsMetricStatus) + out := out.(*PodsMetricStatus) + *out = *in + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_v2alpha1_ResourceMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceMetricSource) + out := out.(*ResourceMetricSource) + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + *out = new(int32) + **out = **in + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + *out = new(resource.Quantity) + **out = (*in).DeepCopy() + } + return nil + } +} + +func DeepCopy_v2alpha1_ResourceMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceMetricStatus) + out := out.(*ResourceMetricStatus) + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + *out = new(int32) + **out = **in + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return nil + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go index dde5db7e46..8639502a97 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,10 +21,10 @@ limitations under the License. package autoscaling import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" reflect "reflect" ) @@ -41,6 +41,14 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_HorizontalPodAutoscalerList, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_HorizontalPodAutoscalerSpec, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_HorizontalPodAutoscalerStatus, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_MetricSpec, InType: reflect.TypeOf(&MetricSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_MetricStatus, InType: reflect.TypeOf(&MetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ObjectMetricSource, InType: reflect.TypeOf(&ObjectMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ObjectMetricStatus, InType: reflect.TypeOf(&ObjectMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_PodsMetricSource, InType: reflect.TypeOf(&PodsMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_PodsMetricStatus, InType: reflect.TypeOf(&PodsMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ResourceMetricSource, InType: reflect.TypeOf(&ResourceMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ResourceMetricStatus, InType: reflect.TypeOf(&ResourceMetricStatus{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_Scale, InType: reflect.TypeOf(&Scale{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, @@ -51,9 +59,7 @@ func DeepCopy_autoscaling_CrossVersionObjectReference(in interface{}, out interf { in := in.(*CrossVersionObjectReference) out := out.(*CrossVersionObjectReference) - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion + *out = *in return nil } } @@ -62,9 +68,11 @@ func DeepCopy_autoscaling_HorizontalPodAutoscaler(in interface{}, out interface{ { in := in.(*HorizontalPodAutoscaler) out := out.(*HorizontalPodAutoscaler) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -80,8 +88,7 @@ func DeepCopy_autoscaling_HorizontalPodAutoscalerList(in interface{}, out interf { in := in.(*HorizontalPodAutoscalerList) out := out.(*HorizontalPodAutoscalerList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]HorizontalPodAutoscaler, len(*in)) @@ -90,8 +97,6 @@ func DeepCopy_autoscaling_HorizontalPodAutoscalerList(in interface{}, out interf return err } } - } else { - out.Items = nil } return nil } @@ -101,21 +106,20 @@ func DeepCopy_autoscaling_HorizontalPodAutoscalerSpec(in interface{}, out interf { in := in.(*HorizontalPodAutoscalerSpec) out := out.(*HorizontalPodAutoscalerSpec) - out.ScaleTargetRef = in.ScaleTargetRef + *out = *in if in.MinReplicas != nil { in, out := &in.MinReplicas, &out.MinReplicas *out = new(int32) **out = **in - } else { - out.MinReplicas = nil } - out.MaxReplicas = in.MaxReplicas - if in.TargetCPUUtilizationPercentage != nil { - in, out := &in.TargetCPUUtilizationPercentage, &out.TargetCPUUtilizationPercentage - *out = new(int32) - **out = **in - } else { - out.TargetCPUUtilizationPercentage = nil + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricSpec, len(*in)) + for i := range *in { + if err := DeepCopy_autoscaling_MetricSpec(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } } return nil } @@ -125,29 +129,160 @@ func DeepCopy_autoscaling_HorizontalPodAutoscalerStatus(in interface{}, out inte { in := in.(*HorizontalPodAutoscalerStatus) out := out.(*HorizontalPodAutoscalerStatus) + *out = *in if in.ObservedGeneration != nil { in, out := &in.ObservedGeneration, &out.ObservedGeneration *out = new(int64) **out = **in - } else { - out.ObservedGeneration = nil } if in.LastScaleTime != nil { in, out := &in.LastScaleTime, &out.LastScaleTime - *out = new(unversioned.Time) + *out = new(v1.Time) **out = (*in).DeepCopy() - } else { - out.LastScaleTime = nil } - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - if in.CurrentCPUUtilizationPercentage != nil { - in, out := &in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage + if in.CurrentMetrics != nil { + in, out := &in.CurrentMetrics, &out.CurrentMetrics + *out = make([]MetricStatus, len(*in)) + for i := range *in { + if err := DeepCopy_autoscaling_MetricStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_autoscaling_MetricSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*MetricSpec) + out := out.(*MetricSpec) + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricSource) + if err := DeepCopy_autoscaling_ObjectMetricSource(*in, *out, c); err != nil { + return err + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricSource) + if err := DeepCopy_autoscaling_PodsMetricSource(*in, *out, c); err != nil { + return err + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricSource) + if err := DeepCopy_autoscaling_ResourceMetricSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_autoscaling_MetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*MetricStatus) + out := out.(*MetricStatus) + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricStatus) + if err := DeepCopy_autoscaling_ObjectMetricStatus(*in, *out, c); err != nil { + return err + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricStatus) + if err := DeepCopy_autoscaling_PodsMetricStatus(*in, *out, c); err != nil { + return err + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricStatus) + if err := DeepCopy_autoscaling_ResourceMetricStatus(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_autoscaling_ObjectMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMetricSource) + out := out.(*ObjectMetricSource) + *out = *in + out.TargetValue = in.TargetValue.DeepCopy() + return nil + } +} + +func DeepCopy_autoscaling_ObjectMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMetricStatus) + out := out.(*ObjectMetricStatus) + *out = *in + out.CurrentValue = in.CurrentValue.DeepCopy() + return nil + } +} + +func DeepCopy_autoscaling_PodsMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodsMetricSource) + out := out.(*PodsMetricSource) + *out = *in + out.TargetAverageValue = in.TargetAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_autoscaling_PodsMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodsMetricStatus) + out := out.(*PodsMetricStatus) + *out = *in + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_autoscaling_ResourceMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceMetricSource) + out := out.(*ResourceMetricSource) + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + *out = new(int32) + **out = **in + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + *out = new(resource.Quantity) + **out = (*in).DeepCopy() + } + return nil + } +} + +func DeepCopy_autoscaling_ResourceMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceMetricStatus) + out := out.(*ResourceMetricStatus) + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization *out = new(int32) **out = **in - } else { - out.CurrentCPUUtilizationPercentage = nil } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() return nil } } @@ -156,12 +291,12 @@ func DeepCopy_autoscaling_Scale(in interface{}, out interface{}, c *conversion.C { in := in.(*Scale) out := out.(*Scale) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } - out.Spec = in.Spec - out.Status = in.Status return nil } } @@ -170,7 +305,7 @@ func DeepCopy_autoscaling_ScaleSpec(in interface{}, out interface{}, c *conversi { in := in.(*ScaleSpec) out := out.(*ScaleSpec) - out.Replicas = in.Replicas + *out = *in return nil } } @@ -179,8 +314,7 @@ func DeepCopy_autoscaling_ScaleStatus(in interface{}, out interface{}, c *conver { in := in.(*ScaleStatus) out := out.(*ScaleStatus) - out.Replicas = in.Replicas - out.Selector = in.Selector + *out = *in return nil } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/batch/BUILD index 8df04f8b7f..f0fc826f32 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -15,19 +12,34 @@ go_library( srcs = [ "doc.go", "register.go", - "types.generated.go", "types.go", "zz_generated.deepcopy.go", ], tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//pkg/util/intstr:go_default_library", - "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/batch/install:all-srcs", + "//pkg/apis/batch/v1:all-srcs", + "//pkg/apis/batch/v2alpha1:all-srcs", + "//pkg/apis/batch/validation:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/batch/OWNERS new file mode 100755 index 0000000000..502f907711 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/OWNERS @@ -0,0 +1,19 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- caesarxuchao +- erictune +- sttts +- saad-ali +- ncdc +- timothysc +- soltysh +- dims +- errordeveloper +- mml +- mbohlool +- david-mcmahon +- jianhuiz diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/doc.go index f8774e80ad..c6b203cd8e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/doc.go @@ -15,6 +15,5 @@ limitations under the License. */ // +k8s:deepcopy-gen=package,register -// +k8s:openapi-gen=true package batch diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/batch/install/BUILD index 4fd4efc709..a3bacba72e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/install/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -15,9 +12,25 @@ go_library( srcs = ["install.go"], tags = ["automanaged"], deps = [ - "//pkg/apimachinery/announced:go_default_library", + "//pkg/api:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/apis/batch/v1:go_default_library", "//pkg/apis/batch/v2alpha1:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/announced", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/registered", + "//vendor:k8s.io/apimachinery/pkg/runtime", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go index d10932cefb..90befc2e21 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go @@ -19,13 +19,21 @@ limitations under the License. package install import ( - "k8s.io/kubernetes/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/batch/v1" "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" ) func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { if err := announced.NewGroupMetaFactory( &announced.GroupMetaFactoryArgs{ GroupName: batch.GroupName, @@ -37,7 +45,7 @@ func init() { v1.SchemeGroupVersion.Version: v1.AddToScheme, v2alpha1.SchemeGroupVersion.Version: v2alpha1.AddToScheme, }, - ).Announce().RegisterAndEnable(); err != nil { + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { panic(err) } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/register.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/register.go index 56cc0ec99a..4601ca4ec1 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/register.go @@ -17,24 +17,23 @@ limitations under the License. package batch import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "batch" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} // Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { +func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } // Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { +func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } @@ -51,8 +50,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &JobTemplate{}, &CronJob{}, &CronJobList{}, - &api.ListOptions{}, - &api.DeleteOptions{}, ) scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJob"), &CronJob{}) scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJobList"), &CronJobList{}) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/types.generated.go deleted file mode 100644 index c0aaee07bb..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/types.generated.go +++ /dev/null @@ -1,4676 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package batch - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg2_api "k8s.io/kubernetes/pkg/api" - pkg4_resource "k8s.io/kubernetes/pkg/api/resource" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg3_types "k8s.io/kubernetes/pkg/types" - pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_api.ObjectMeta - var v1 pkg4_resource.Quantity - var v2 pkg1_unversioned.TypeMeta - var v3 pkg3_types.UID - var v4 pkg5_intstr.IntOrString - var v5 time.Time - _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 - } -} - -func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yy10.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.ObjectMeta - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy13 := &x.Spec - yy13.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.Spec - yy14.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Status - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct19 := r.ContainerType() - if yyct19 == codecSelferValueTypeMap1234 { - yyl19 := r.ReadMapStart() - if yyl19 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl19, d) - } - } else if yyct19 == codecSelferValueTypeArray1234 { - yyl19 := r.ReadArrayStart() - if yyl19 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl19, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys20Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys20Slc - var yyhl20 bool = l >= 0 - for yyj20 := 0; ; yyj20++ { - if yyhl20 { - if yyj20 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys20Slc = r.DecodeBytes(yys20Slc, true, true) - yys20 := string(yys20Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys20 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv23 := &x.ObjectMeta - yyv23.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv24 := &x.Spec - yyv24.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = JobStatus{} - } else { - yyv25 := &x.Status - yyv25.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys20) - } // end switch yys20 - } // end for yyj20 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj26 int - var yyb26 bool - var yyhl26 bool = l >= 0 - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv29 := &x.ObjectMeta - yyv29.CodecDecodeSelf(d) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv30 := &x.Spec - yyv30.CodecDecodeSelf(d) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = JobStatus{} - } else { - yyv31 := &x.Status - yyv31.CodecDecodeSelf(d) - } - for { - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj26-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym32 := z.EncBinary() - _ = yym32 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep33 := !z.EncBinary() - yy2arr33 := z.EncBasicHandle().StructToArray - var yyq33 [4]bool - _, _, _ = yysep33, yyq33, yy2arr33 - const yyr33 bool = false - yyq33[0] = x.Kind != "" - yyq33[1] = x.APIVersion != "" - yyq33[2] = true - var yynn33 int - if yyr33 || yy2arr33 { - r.EncodeArrayStart(4) - } else { - yynn33 = 1 - for _, b := range yyq33 { - if b { - yynn33++ - } - } - r.EncodeMapStart(yynn33) - yynn33 = 0 - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[0] { - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq33[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[1] { - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq33[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym39 := z.EncBinary() - _ = yym39 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[2] { - yy41 := &x.ListMeta - yym42 := z.EncBinary() - _ = yym42 - if false { - } else if z.HasExtensions() && z.EncExt(yy41) { - } else { - z.EncFallback(yy41) - } - } else { - r.EncodeNil() - } - } else { - if yyq33[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy43 := &x.ListMeta - yym44 := z.EncBinary() - _ = yym44 - if false { - } else if z.HasExtensions() && z.EncExt(yy43) { - } else { - z.EncFallback(yy43) - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym46 := z.EncBinary() - _ = yym46 - if false { - } else { - h.encSliceJob(([]Job)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym47 := z.EncBinary() - _ = yym47 - if false { - } else { - h.encSliceJob(([]Job)(x.Items), e) - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym48 := z.DecBinary() - _ = yym48 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct49 := r.ContainerType() - if yyct49 == codecSelferValueTypeMap1234 { - yyl49 := r.ReadMapStart() - if yyl49 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl49, d) - } - } else if yyct49 == codecSelferValueTypeArray1234 { - yyl49 := r.ReadArrayStart() - if yyl49 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl49, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys50Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys50Slc - var yyhl50 bool = l >= 0 - for yyj50 := 0; ; yyj50++ { - if yyhl50 { - if yyj50 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys50Slc = r.DecodeBytes(yys50Slc, true, true) - yys50 := string(yys50Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys50 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv53 := &x.ListMeta - yym54 := z.DecBinary() - _ = yym54 - if false { - } else if z.HasExtensions() && z.DecExt(yyv53) { - } else { - z.DecFallback(yyv53, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv55 := &x.Items - yym56 := z.DecBinary() - _ = yym56 - if false { - } else { - h.decSliceJob((*[]Job)(yyv55), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys50) - } // end switch yys50 - } // end for yyj50 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj57 int - var yyb57 bool - var yyhl57 bool = l >= 0 - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv60 := &x.ListMeta - yym61 := z.DecBinary() - _ = yym61 - if false { - } else if z.HasExtensions() && z.DecExt(yyv60) { - } else { - z.DecFallback(yyv60, false) - } - } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv62 := &x.Items - yym63 := z.DecBinary() - _ = yym63 - if false { - } else { - h.decSliceJob((*[]Job)(yyv62), d) - } - } - for { - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj57-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobTemplate) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym64 := z.EncBinary() - _ = yym64 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep65 := !z.EncBinary() - yy2arr65 := z.EncBasicHandle().StructToArray - var yyq65 [4]bool - _, _, _ = yysep65, yyq65, yy2arr65 - const yyr65 bool = false - yyq65[0] = x.Kind != "" - yyq65[1] = x.APIVersion != "" - yyq65[2] = true - yyq65[3] = true - var yynn65 int - if yyr65 || yy2arr65 { - r.EncodeArrayStart(4) - } else { - yynn65 = 0 - for _, b := range yyq65 { - if b { - yynn65++ - } - } - r.EncodeMapStart(yynn65) - yynn65 = 0 - } - if yyr65 || yy2arr65 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq65[0] { - yym67 := z.EncBinary() - _ = yym67 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq65[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym68 := z.EncBinary() - _ = yym68 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr65 || yy2arr65 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq65[1] { - yym70 := z.EncBinary() - _ = yym70 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq65[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym71 := z.EncBinary() - _ = yym71 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr65 || yy2arr65 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq65[2] { - yy73 := &x.ObjectMeta - yy73.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq65[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy74 := &x.ObjectMeta - yy74.CodecEncodeSelf(e) - } - } - if yyr65 || yy2arr65 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq65[3] { - yy76 := &x.Template - yy76.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq65[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy77 := &x.Template - yy77.CodecEncodeSelf(e) - } - } - if yyr65 || yy2arr65 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobTemplate) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym78 := z.DecBinary() - _ = yym78 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct79 := r.ContainerType() - if yyct79 == codecSelferValueTypeMap1234 { - yyl79 := r.ReadMapStart() - if yyl79 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl79, d) - } - } else if yyct79 == codecSelferValueTypeArray1234 { - yyl79 := r.ReadArrayStart() - if yyl79 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl79, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys80Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys80Slc - var yyhl80 bool = l >= 0 - for yyj80 := 0; ; yyj80++ { - if yyhl80 { - if yyj80 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys80Slc = r.DecodeBytes(yys80Slc, true, true) - yys80 := string(yys80Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys80 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv83 := &x.ObjectMeta - yyv83.CodecDecodeSelf(d) - } - case "template": - if r.TryDecodeAsNil() { - x.Template = JobTemplateSpec{} - } else { - yyv84 := &x.Template - yyv84.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys80) - } // end switch yys80 - } // end for yyj80 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj85 int - var yyb85 bool - var yyhl85 bool = l >= 0 - yyj85++ - if yyhl85 { - yyb85 = yyj85 > l - } else { - yyb85 = r.CheckBreak() - } - if yyb85 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj85++ - if yyhl85 { - yyb85 = yyj85 > l - } else { - yyb85 = r.CheckBreak() - } - if yyb85 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj85++ - if yyhl85 { - yyb85 = yyj85 > l - } else { - yyb85 = r.CheckBreak() - } - if yyb85 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv88 := &x.ObjectMeta - yyv88.CodecDecodeSelf(d) - } - yyj85++ - if yyhl85 { - yyb85 = yyj85 > l - } else { - yyb85 = r.CheckBreak() - } - if yyb85 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = JobTemplateSpec{} - } else { - yyv89 := &x.Template - yyv89.CodecDecodeSelf(d) - } - for { - yyj85++ - if yyhl85 { - yyb85 = yyj85 > l - } else { - yyb85 = r.CheckBreak() - } - if yyb85 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj85-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym90 := z.EncBinary() - _ = yym90 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep91 := !z.EncBinary() - yy2arr91 := z.EncBasicHandle().StructToArray - var yyq91 [2]bool - _, _, _ = yysep91, yyq91, yy2arr91 - const yyr91 bool = false - yyq91[0] = true - yyq91[1] = true - var yynn91 int - if yyr91 || yy2arr91 { - r.EncodeArrayStart(2) - } else { - yynn91 = 0 - for _, b := range yyq91 { - if b { - yynn91++ - } - } - r.EncodeMapStart(yynn91) - yynn91 = 0 - } - if yyr91 || yy2arr91 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq91[0] { - yy93 := &x.ObjectMeta - yy93.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq91[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy94 := &x.ObjectMeta - yy94.CodecEncodeSelf(e) - } - } - if yyr91 || yy2arr91 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq91[1] { - yy96 := &x.Spec - yy96.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq91[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy97 := &x.Spec - yy97.CodecEncodeSelf(e) - } - } - if yyr91 || yy2arr91 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym98 := z.DecBinary() - _ = yym98 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct99 := r.ContainerType() - if yyct99 == codecSelferValueTypeMap1234 { - yyl99 := r.ReadMapStart() - if yyl99 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl99, d) - } - } else if yyct99 == codecSelferValueTypeArray1234 { - yyl99 := r.ReadArrayStart() - if yyl99 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl99, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys100Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys100Slc - var yyhl100 bool = l >= 0 - for yyj100 := 0; ; yyj100++ { - if yyhl100 { - if yyj100 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys100Slc = r.DecodeBytes(yys100Slc, true, true) - yys100 := string(yys100Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys100 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv101 := &x.ObjectMeta - yyv101.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv102 := &x.Spec - yyv102.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys100) - } // end switch yys100 - } // end for yyj100 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj103 int - var yyb103 bool - var yyhl103 bool = l >= 0 - yyj103++ - if yyhl103 { - yyb103 = yyj103 > l - } else { - yyb103 = r.CheckBreak() - } - if yyb103 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv104 := &x.ObjectMeta - yyv104.CodecDecodeSelf(d) - } - yyj103++ - if yyhl103 { - yyb103 = yyj103 > l - } else { - yyb103 = r.CheckBreak() - } - if yyb103 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv105 := &x.Spec - yyv105.CodecDecodeSelf(d) - } - for { - yyj103++ - if yyhl103 { - yyb103 = yyj103 > l - } else { - yyb103 = r.CheckBreak() - } - if yyb103 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj103-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym106 := z.EncBinary() - _ = yym106 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep107 := !z.EncBinary() - yy2arr107 := z.EncBasicHandle().StructToArray - var yyq107 [6]bool - _, _, _ = yysep107, yyq107, yy2arr107 - const yyr107 bool = false - yyq107[0] = x.Parallelism != nil - yyq107[1] = x.Completions != nil - yyq107[2] = x.ActiveDeadlineSeconds != nil - yyq107[3] = x.Selector != nil - yyq107[4] = x.ManualSelector != nil - var yynn107 int - if yyr107 || yy2arr107 { - r.EncodeArrayStart(6) - } else { - yynn107 = 1 - for _, b := range yyq107 { - if b { - yynn107++ - } - } - r.EncodeMapStart(yynn107) - yynn107 = 0 - } - if yyr107 || yy2arr107 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq107[0] { - if x.Parallelism == nil { - r.EncodeNil() - } else { - yy109 := *x.Parallelism - yym110 := z.EncBinary() - _ = yym110 - if false { - } else { - r.EncodeInt(int64(yy109)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq107[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("parallelism")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Parallelism == nil { - r.EncodeNil() - } else { - yy111 := *x.Parallelism - yym112 := z.EncBinary() - _ = yym112 - if false { - } else { - r.EncodeInt(int64(yy111)) - } - } - } - } - if yyr107 || yy2arr107 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq107[1] { - if x.Completions == nil { - r.EncodeNil() - } else { - yy114 := *x.Completions - yym115 := z.EncBinary() - _ = yym115 - if false { - } else { - r.EncodeInt(int64(yy114)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq107[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("completions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Completions == nil { - r.EncodeNil() - } else { - yy116 := *x.Completions - yym117 := z.EncBinary() - _ = yym117 - if false { - } else { - r.EncodeInt(int64(yy116)) - } - } - } - } - if yyr107 || yy2arr107 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq107[2] { - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy119 := *x.ActiveDeadlineSeconds - yym120 := z.EncBinary() - _ = yym120 - if false { - } else { - r.EncodeInt(int64(yy119)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq107[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy121 := *x.ActiveDeadlineSeconds - yym122 := z.EncBinary() - _ = yym122 - if false { - } else { - r.EncodeInt(int64(yy121)) - } - } - } - } - if yyr107 || yy2arr107 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq107[3] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym124 := z.EncBinary() - _ = yym124 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq107[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym125 := z.EncBinary() - _ = yym125 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr107 || yy2arr107 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq107[4] { - if x.ManualSelector == nil { - r.EncodeNil() - } else { - yy127 := *x.ManualSelector - yym128 := z.EncBinary() - _ = yym128 - if false { - } else { - r.EncodeBool(bool(yy127)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq107[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("manualSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ManualSelector == nil { - r.EncodeNil() - } else { - yy129 := *x.ManualSelector - yym130 := z.EncBinary() - _ = yym130 - if false { - } else { - r.EncodeBool(bool(yy129)) - } - } - } - } - if yyr107 || yy2arr107 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy132 := &x.Template - yy132.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy133 := &x.Template - yy133.CodecEncodeSelf(e) - } - if yyr107 || yy2arr107 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym134 := z.DecBinary() - _ = yym134 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct135 := r.ContainerType() - if yyct135 == codecSelferValueTypeMap1234 { - yyl135 := r.ReadMapStart() - if yyl135 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl135, d) - } - } else if yyct135 == codecSelferValueTypeArray1234 { - yyl135 := r.ReadArrayStart() - if yyl135 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl135, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys136Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys136Slc - var yyhl136 bool = l >= 0 - for yyj136 := 0; ; yyj136++ { - if yyhl136 { - if yyj136 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys136Slc = r.DecodeBytes(yys136Slc, true, true) - yys136 := string(yys136Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys136 { - case "parallelism": - if r.TryDecodeAsNil() { - if x.Parallelism != nil { - x.Parallelism = nil - } - } else { - if x.Parallelism == nil { - x.Parallelism = new(int32) - } - yym138 := z.DecBinary() - _ = yym138 - if false { - } else { - *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) - } - } - case "completions": - if r.TryDecodeAsNil() { - if x.Completions != nil { - x.Completions = nil - } - } else { - if x.Completions == nil { - x.Completions = new(int32) - } - yym140 := z.DecBinary() - _ = yym140 - if false { - } else { - *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) - } - } - case "activeDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym142 := z.DecBinary() - _ = yym142 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym144 := z.DecBinary() - _ = yym144 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "manualSelector": - if r.TryDecodeAsNil() { - if x.ManualSelector != nil { - x.ManualSelector = nil - } - } else { - if x.ManualSelector == nil { - x.ManualSelector = new(bool) - } - yym146 := z.DecBinary() - _ = yym146 - if false { - } else { - *((*bool)(x.ManualSelector)) = r.DecodeBool() - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv147 := &x.Template - yyv147.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys136) - } // end switch yys136 - } // end for yyj136 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj148 int - var yyb148 bool - var yyhl148 bool = l >= 0 - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l - } else { - yyb148 = r.CheckBreak() - } - if yyb148 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Parallelism != nil { - x.Parallelism = nil - } - } else { - if x.Parallelism == nil { - x.Parallelism = new(int32) - } - yym150 := z.DecBinary() - _ = yym150 - if false { - } else { - *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) - } - } - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l - } else { - yyb148 = r.CheckBreak() - } - if yyb148 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Completions != nil { - x.Completions = nil - } - } else { - if x.Completions == nil { - x.Completions = new(int32) - } - yym152 := z.DecBinary() - _ = yym152 - if false { - } else { - *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) - } - } - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l - } else { - yyb148 = r.CheckBreak() - } - if yyb148 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym154 := z.DecBinary() - _ = yym154 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l - } else { - yyb148 = r.CheckBreak() - } - if yyb148 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym156 := z.DecBinary() - _ = yym156 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l - } else { - yyb148 = r.CheckBreak() - } - if yyb148 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ManualSelector != nil { - x.ManualSelector = nil - } - } else { - if x.ManualSelector == nil { - x.ManualSelector = new(bool) - } - yym158 := z.DecBinary() - _ = yym158 - if false { - } else { - *((*bool)(x.ManualSelector)) = r.DecodeBool() - } - } - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l - } else { - yyb148 = r.CheckBreak() - } - if yyb148 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv159 := &x.Template - yyv159.CodecDecodeSelf(d) - } - for { - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l - } else { - yyb148 = r.CheckBreak() - } - if yyb148 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj148-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym160 := z.EncBinary() - _ = yym160 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep161 := !z.EncBinary() - yy2arr161 := z.EncBasicHandle().StructToArray - var yyq161 [6]bool - _, _, _ = yysep161, yyq161, yy2arr161 - const yyr161 bool = false - yyq161[0] = len(x.Conditions) != 0 - yyq161[1] = x.StartTime != nil - yyq161[2] = x.CompletionTime != nil - yyq161[3] = x.Active != 0 - yyq161[4] = x.Succeeded != 0 - yyq161[5] = x.Failed != 0 - var yynn161 int - if yyr161 || yy2arr161 { - r.EncodeArrayStart(6) - } else { - yynn161 = 0 - for _, b := range yyq161 { - if b { - yynn161++ - } - } - r.EncodeMapStart(yynn161) - yynn161 = 0 - } - if yyr161 || yy2arr161 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq161[0] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym163 := z.EncBinary() - _ = yym163 - if false { - } else { - h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq161[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym164 := z.EncBinary() - _ = yym164 - if false { - } else { - h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) - } - } - } - } - if yyr161 || yy2arr161 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq161[1] { - if x.StartTime == nil { - r.EncodeNil() - } else { - yym166 := z.EncBinary() - _ = yym166 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym166 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym166 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq161[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.StartTime == nil { - r.EncodeNil() - } else { - yym167 := z.EncBinary() - _ = yym167 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym167 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym167 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } - } - if yyr161 || yy2arr161 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq161[2] { - if x.CompletionTime == nil { - r.EncodeNil() - } else { - yym169 := z.EncBinary() - _ = yym169 - if false { - } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym169 { - z.EncBinaryMarshal(x.CompletionTime) - } else if !yym169 && z.IsJSONHandle() { - z.EncJSONMarshal(x.CompletionTime) - } else { - z.EncFallback(x.CompletionTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq161[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("completionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CompletionTime == nil { - r.EncodeNil() - } else { - yym170 := z.EncBinary() - _ = yym170 - if false { - } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym170 { - z.EncBinaryMarshal(x.CompletionTime) - } else if !yym170 && z.IsJSONHandle() { - z.EncJSONMarshal(x.CompletionTime) - } else { - z.EncFallback(x.CompletionTime) - } - } - } - } - if yyr161 || yy2arr161 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq161[3] { - yym172 := z.EncBinary() - _ = yym172 - if false { - } else { - r.EncodeInt(int64(x.Active)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq161[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("active")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym173 := z.EncBinary() - _ = yym173 - if false { - } else { - r.EncodeInt(int64(x.Active)) - } - } - } - if yyr161 || yy2arr161 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq161[4] { - yym175 := z.EncBinary() - _ = yym175 - if false { - } else { - r.EncodeInt(int64(x.Succeeded)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq161[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("succeeded")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym176 := z.EncBinary() - _ = yym176 - if false { - } else { - r.EncodeInt(int64(x.Succeeded)) - } - } - } - if yyr161 || yy2arr161 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq161[5] { - yym178 := z.EncBinary() - _ = yym178 - if false { - } else { - r.EncodeInt(int64(x.Failed)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq161[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("failed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym179 := z.EncBinary() - _ = yym179 - if false { - } else { - r.EncodeInt(int64(x.Failed)) - } - } - } - if yyr161 || yy2arr161 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym180 := z.DecBinary() - _ = yym180 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct181 := r.ContainerType() - if yyct181 == codecSelferValueTypeMap1234 { - yyl181 := r.ReadMapStart() - if yyl181 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl181, d) - } - } else if yyct181 == codecSelferValueTypeArray1234 { - yyl181 := r.ReadArrayStart() - if yyl181 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl181, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys182Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys182Slc - var yyhl182 bool = l >= 0 - for yyj182 := 0; ; yyj182++ { - if yyhl182 { - if yyj182 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys182Slc = r.DecodeBytes(yys182Slc, true, true) - yys182 := string(yys182Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys182 { - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv183 := &x.Conditions - yym184 := z.DecBinary() - _ = yym184 - if false { - } else { - h.decSliceJobCondition((*[]JobCondition)(yyv183), d) - } - } - case "startTime": - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg1_unversioned.Time) - } - yym186 := z.DecBinary() - _ = yym186 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym186 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym186 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - case "completionTime": - if r.TryDecodeAsNil() { - if x.CompletionTime != nil { - x.CompletionTime = nil - } - } else { - if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_unversioned.Time) - } - yym188 := z.DecBinary() - _ = yym188 - if false { - } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym188 { - z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym188 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.CompletionTime) - } else { - z.DecFallback(x.CompletionTime, false) - } - } - case "active": - if r.TryDecodeAsNil() { - x.Active = 0 - } else { - x.Active = int32(r.DecodeInt(32)) - } - case "succeeded": - if r.TryDecodeAsNil() { - x.Succeeded = 0 - } else { - x.Succeeded = int32(r.DecodeInt(32)) - } - case "failed": - if r.TryDecodeAsNil() { - x.Failed = 0 - } else { - x.Failed = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys182) - } // end switch yys182 - } // end for yyj182 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj192 int - var yyb192 bool - var yyhl192 bool = l >= 0 - yyj192++ - if yyhl192 { - yyb192 = yyj192 > l - } else { - yyb192 = r.CheckBreak() - } - if yyb192 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv193 := &x.Conditions - yym194 := z.DecBinary() - _ = yym194 - if false { - } else { - h.decSliceJobCondition((*[]JobCondition)(yyv193), d) - } - } - yyj192++ - if yyhl192 { - yyb192 = yyj192 > l - } else { - yyb192 = r.CheckBreak() - } - if yyb192 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg1_unversioned.Time) - } - yym196 := z.DecBinary() - _ = yym196 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym196 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym196 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - yyj192++ - if yyhl192 { - yyb192 = yyj192 > l - } else { - yyb192 = r.CheckBreak() - } - if yyb192 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CompletionTime != nil { - x.CompletionTime = nil - } - } else { - if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_unversioned.Time) - } - yym198 := z.DecBinary() - _ = yym198 - if false { - } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym198 { - z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym198 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.CompletionTime) - } else { - z.DecFallback(x.CompletionTime, false) - } - } - yyj192++ - if yyhl192 { - yyb192 = yyj192 > l - } else { - yyb192 = r.CheckBreak() - } - if yyb192 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Active = 0 - } else { - x.Active = int32(r.DecodeInt(32)) - } - yyj192++ - if yyhl192 { - yyb192 = yyj192 > l - } else { - yyb192 = r.CheckBreak() - } - if yyb192 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Succeeded = 0 - } else { - x.Succeeded = int32(r.DecodeInt(32)) - } - yyj192++ - if yyhl192 { - yyb192 = yyj192 > l - } else { - yyb192 = r.CheckBreak() - } - if yyb192 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Failed = 0 - } else { - x.Failed = int32(r.DecodeInt(32)) - } - for { - yyj192++ - if yyhl192 { - yyb192 = yyj192 > l - } else { - yyb192 = r.CheckBreak() - } - if yyb192 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj192-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x JobConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym202 := z.EncBinary() - _ = yym202 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *JobConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym203 := z.DecBinary() - _ = yym203 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym204 := z.EncBinary() - _ = yym204 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep205 := !z.EncBinary() - yy2arr205 := z.EncBasicHandle().StructToArray - var yyq205 [6]bool - _, _, _ = yysep205, yyq205, yy2arr205 - const yyr205 bool = false - yyq205[2] = true - yyq205[3] = true - yyq205[4] = x.Reason != "" - yyq205[5] = x.Message != "" - var yynn205 int - if yyr205 || yy2arr205 { - r.EncodeArrayStart(6) - } else { - yynn205 = 2 - for _, b := range yyq205 { - if b { - yynn205++ - } - } - r.EncodeMapStart(yynn205) - yynn205 = 0 - } - if yyr205 || yy2arr205 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr205 || yy2arr205 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym208 := z.EncBinary() - _ = yym208 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym209 := z.EncBinary() - _ = yym209 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } - } - if yyr205 || yy2arr205 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq205[2] { - yy211 := &x.LastProbeTime - yym212 := z.EncBinary() - _ = yym212 - if false { - } else if z.HasExtensions() && z.EncExt(yy211) { - } else if yym212 { - z.EncBinaryMarshal(yy211) - } else if !yym212 && z.IsJSONHandle() { - z.EncJSONMarshal(yy211) - } else { - z.EncFallback(yy211) - } - } else { - r.EncodeNil() - } - } else { - if yyq205[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy213 := &x.LastProbeTime - yym214 := z.EncBinary() - _ = yym214 - if false { - } else if z.HasExtensions() && z.EncExt(yy213) { - } else if yym214 { - z.EncBinaryMarshal(yy213) - } else if !yym214 && z.IsJSONHandle() { - z.EncJSONMarshal(yy213) - } else { - z.EncFallback(yy213) - } - } - } - if yyr205 || yy2arr205 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq205[3] { - yy216 := &x.LastTransitionTime - yym217 := z.EncBinary() - _ = yym217 - if false { - } else if z.HasExtensions() && z.EncExt(yy216) { - } else if yym217 { - z.EncBinaryMarshal(yy216) - } else if !yym217 && z.IsJSONHandle() { - z.EncJSONMarshal(yy216) - } else { - z.EncFallback(yy216) - } - } else { - r.EncodeNil() - } - } else { - if yyq205[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy218 := &x.LastTransitionTime - yym219 := z.EncBinary() - _ = yym219 - if false { - } else if z.HasExtensions() && z.EncExt(yy218) { - } else if yym219 { - z.EncBinaryMarshal(yy218) - } else if !yym219 && z.IsJSONHandle() { - z.EncJSONMarshal(yy218) - } else { - z.EncFallback(yy218) - } - } - } - if yyr205 || yy2arr205 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq205[4] { - yym221 := z.EncBinary() - _ = yym221 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq205[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym222 := z.EncBinary() - _ = yym222 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr205 || yy2arr205 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq205[5] { - yym224 := z.EncBinary() - _ = yym224 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq205[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym225 := z.EncBinary() - _ = yym225 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr205 || yy2arr205 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym226 := z.DecBinary() - _ = yym226 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct227 := r.ContainerType() - if yyct227 == codecSelferValueTypeMap1234 { - yyl227 := r.ReadMapStart() - if yyl227 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl227, d) - } - } else if yyct227 == codecSelferValueTypeArray1234 { - yyl227 := r.ReadArrayStart() - if yyl227 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl227, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys228Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys228Slc - var yyhl228 bool = l >= 0 - for yyj228 := 0; ; yyj228++ { - if yyhl228 { - if yyj228 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys228Slc = r.DecodeBytes(yys228Slc, true, true) - yys228 := string(yys228Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys228 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = JobConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_api.ConditionStatus(r.DecodeString()) - } - case "lastProbeTime": - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_unversioned.Time{} - } else { - yyv231 := &x.LastProbeTime - yym232 := z.DecBinary() - _ = yym232 - if false { - } else if z.HasExtensions() && z.DecExt(yyv231) { - } else if yym232 { - z.DecBinaryUnmarshal(yyv231) - } else if !yym232 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv231) - } else { - z.DecFallback(yyv231, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} - } else { - yyv233 := &x.LastTransitionTime - yym234 := z.DecBinary() - _ = yym234 - if false { - } else if z.HasExtensions() && z.DecExt(yyv233) { - } else if yym234 { - z.DecBinaryUnmarshal(yyv233) - } else if !yym234 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv233) - } else { - z.DecFallback(yyv233, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys228) - } // end switch yys228 - } // end for yyj228 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj237 int - var yyb237 bool - var yyhl237 bool = l >= 0 - yyj237++ - if yyhl237 { - yyb237 = yyj237 > l - } else { - yyb237 = r.CheckBreak() - } - if yyb237 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = JobConditionType(r.DecodeString()) - } - yyj237++ - if yyhl237 { - yyb237 = yyj237 > l - } else { - yyb237 = r.CheckBreak() - } - if yyb237 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_api.ConditionStatus(r.DecodeString()) - } - yyj237++ - if yyhl237 { - yyb237 = yyj237 > l - } else { - yyb237 = r.CheckBreak() - } - if yyb237 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_unversioned.Time{} - } else { - yyv240 := &x.LastProbeTime - yym241 := z.DecBinary() - _ = yym241 - if false { - } else if z.HasExtensions() && z.DecExt(yyv240) { - } else if yym241 { - z.DecBinaryUnmarshal(yyv240) - } else if !yym241 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv240) - } else { - z.DecFallback(yyv240, false) - } - } - yyj237++ - if yyhl237 { - yyb237 = yyj237 > l - } else { - yyb237 = r.CheckBreak() - } - if yyb237 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} - } else { - yyv242 := &x.LastTransitionTime - yym243 := z.DecBinary() - _ = yym243 - if false { - } else if z.HasExtensions() && z.DecExt(yyv242) { - } else if yym243 { - z.DecBinaryUnmarshal(yyv242) - } else if !yym243 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv242) - } else { - z.DecFallback(yyv242, false) - } - } - yyj237++ - if yyhl237 { - yyb237 = yyj237 > l - } else { - yyb237 = r.CheckBreak() - } - if yyb237 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj237++ - if yyhl237 { - yyb237 = yyj237 > l - } else { - yyb237 = r.CheckBreak() - } - if yyb237 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj237++ - if yyhl237 { - yyb237 = yyj237 > l - } else { - yyb237 = r.CheckBreak() - } - if yyb237 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj237-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CronJob) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym246 := z.EncBinary() - _ = yym246 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep247 := !z.EncBinary() - yy2arr247 := z.EncBasicHandle().StructToArray - var yyq247 [5]bool - _, _, _ = yysep247, yyq247, yy2arr247 - const yyr247 bool = false - yyq247[0] = x.Kind != "" - yyq247[1] = x.APIVersion != "" - yyq247[2] = true - yyq247[3] = true - yyq247[4] = true - var yynn247 int - if yyr247 || yy2arr247 { - r.EncodeArrayStart(5) - } else { - yynn247 = 0 - for _, b := range yyq247 { - if b { - yynn247++ - } - } - r.EncodeMapStart(yynn247) - yynn247 = 0 - } - if yyr247 || yy2arr247 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq247[0] { - yym249 := z.EncBinary() - _ = yym249 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq247[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym250 := z.EncBinary() - _ = yym250 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr247 || yy2arr247 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq247[1] { - yym252 := z.EncBinary() - _ = yym252 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq247[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym253 := z.EncBinary() - _ = yym253 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr247 || yy2arr247 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq247[2] { - yy255 := &x.ObjectMeta - yy255.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq247[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy256 := &x.ObjectMeta - yy256.CodecEncodeSelf(e) - } - } - if yyr247 || yy2arr247 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq247[3] { - yy258 := &x.Spec - yy258.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq247[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy259 := &x.Spec - yy259.CodecEncodeSelf(e) - } - } - if yyr247 || yy2arr247 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq247[4] { - yy261 := &x.Status - yy261.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq247[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy262 := &x.Status - yy262.CodecEncodeSelf(e) - } - } - if yyr247 || yy2arr247 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CronJob) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym263 := z.DecBinary() - _ = yym263 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct264 := r.ContainerType() - if yyct264 == codecSelferValueTypeMap1234 { - yyl264 := r.ReadMapStart() - if yyl264 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl264, d) - } - } else if yyct264 == codecSelferValueTypeArray1234 { - yyl264 := r.ReadArrayStart() - if yyl264 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl264, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CronJob) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys265Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys265Slc - var yyhl265 bool = l >= 0 - for yyj265 := 0; ; yyj265++ { - if yyhl265 { - if yyj265 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys265Slc = r.DecodeBytes(yys265Slc, true, true) - yys265 := string(yys265Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys265 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv268 := &x.ObjectMeta - yyv268.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = CronJobSpec{} - } else { - yyv269 := &x.Spec - yyv269.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = CronJobStatus{} - } else { - yyv270 := &x.Status - yyv270.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys265) - } // end switch yys265 - } // end for yyj265 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CronJob) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj271 int - var yyb271 bool - var yyhl271 bool = l >= 0 - yyj271++ - if yyhl271 { - yyb271 = yyj271 > l - } else { - yyb271 = r.CheckBreak() - } - if yyb271 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj271++ - if yyhl271 { - yyb271 = yyj271 > l - } else { - yyb271 = r.CheckBreak() - } - if yyb271 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj271++ - if yyhl271 { - yyb271 = yyj271 > l - } else { - yyb271 = r.CheckBreak() - } - if yyb271 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv274 := &x.ObjectMeta - yyv274.CodecDecodeSelf(d) - } - yyj271++ - if yyhl271 { - yyb271 = yyj271 > l - } else { - yyb271 = r.CheckBreak() - } - if yyb271 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = CronJobSpec{} - } else { - yyv275 := &x.Spec - yyv275.CodecDecodeSelf(d) - } - yyj271++ - if yyhl271 { - yyb271 = yyj271 > l - } else { - yyb271 = r.CheckBreak() - } - if yyb271 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = CronJobStatus{} - } else { - yyv276 := &x.Status - yyv276.CodecDecodeSelf(d) - } - for { - yyj271++ - if yyhl271 { - yyb271 = yyj271 > l - } else { - yyb271 = r.CheckBreak() - } - if yyb271 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj271-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CronJobList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym277 := z.EncBinary() - _ = yym277 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep278 := !z.EncBinary() - yy2arr278 := z.EncBasicHandle().StructToArray - var yyq278 [4]bool - _, _, _ = yysep278, yyq278, yy2arr278 - const yyr278 bool = false - yyq278[0] = x.Kind != "" - yyq278[1] = x.APIVersion != "" - yyq278[2] = true - var yynn278 int - if yyr278 || yy2arr278 { - r.EncodeArrayStart(4) - } else { - yynn278 = 1 - for _, b := range yyq278 { - if b { - yynn278++ - } - } - r.EncodeMapStart(yynn278) - yynn278 = 0 - } - if yyr278 || yy2arr278 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq278[0] { - yym280 := z.EncBinary() - _ = yym280 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq278[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym281 := z.EncBinary() - _ = yym281 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr278 || yy2arr278 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq278[1] { - yym283 := z.EncBinary() - _ = yym283 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq278[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym284 := z.EncBinary() - _ = yym284 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr278 || yy2arr278 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq278[2] { - yy286 := &x.ListMeta - yym287 := z.EncBinary() - _ = yym287 - if false { - } else if z.HasExtensions() && z.EncExt(yy286) { - } else { - z.EncFallback(yy286) - } - } else { - r.EncodeNil() - } - } else { - if yyq278[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy288 := &x.ListMeta - yym289 := z.EncBinary() - _ = yym289 - if false { - } else if z.HasExtensions() && z.EncExt(yy288) { - } else { - z.EncFallback(yy288) - } - } - } - if yyr278 || yy2arr278 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym291 := z.EncBinary() - _ = yym291 - if false { - } else { - h.encSliceCronJob(([]CronJob)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym292 := z.EncBinary() - _ = yym292 - if false { - } else { - h.encSliceCronJob(([]CronJob)(x.Items), e) - } - } - } - if yyr278 || yy2arr278 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CronJobList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym293 := z.DecBinary() - _ = yym293 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct294 := r.ContainerType() - if yyct294 == codecSelferValueTypeMap1234 { - yyl294 := r.ReadMapStart() - if yyl294 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl294, d) - } - } else if yyct294 == codecSelferValueTypeArray1234 { - yyl294 := r.ReadArrayStart() - if yyl294 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl294, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CronJobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys295Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys295Slc - var yyhl295 bool = l >= 0 - for yyj295 := 0; ; yyj295++ { - if yyhl295 { - if yyj295 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys295Slc = r.DecodeBytes(yys295Slc, true, true) - yys295 := string(yys295Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys295 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv298 := &x.ListMeta - yym299 := z.DecBinary() - _ = yym299 - if false { - } else if z.HasExtensions() && z.DecExt(yyv298) { - } else { - z.DecFallback(yyv298, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv300 := &x.Items - yym301 := z.DecBinary() - _ = yym301 - if false { - } else { - h.decSliceCronJob((*[]CronJob)(yyv300), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys295) - } // end switch yys295 - } // end for yyj295 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CronJobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj302 int - var yyb302 bool - var yyhl302 bool = l >= 0 - yyj302++ - if yyhl302 { - yyb302 = yyj302 > l - } else { - yyb302 = r.CheckBreak() - } - if yyb302 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj302++ - if yyhl302 { - yyb302 = yyj302 > l - } else { - yyb302 = r.CheckBreak() - } - if yyb302 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj302++ - if yyhl302 { - yyb302 = yyj302 > l - } else { - yyb302 = r.CheckBreak() - } - if yyb302 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv305 := &x.ListMeta - yym306 := z.DecBinary() - _ = yym306 - if false { - } else if z.HasExtensions() && z.DecExt(yyv305) { - } else { - z.DecFallback(yyv305, false) - } - } - yyj302++ - if yyhl302 { - yyb302 = yyj302 > l - } else { - yyb302 = r.CheckBreak() - } - if yyb302 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv307 := &x.Items - yym308 := z.DecBinary() - _ = yym308 - if false { - } else { - h.decSliceCronJob((*[]CronJob)(yyv307), d) - } - } - for { - yyj302++ - if yyhl302 { - yyb302 = yyj302 > l - } else { - yyb302 = r.CheckBreak() - } - if yyb302 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj302-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CronJobSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym309 := z.EncBinary() - _ = yym309 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep310 := !z.EncBinary() - yy2arr310 := z.EncBasicHandle().StructToArray - var yyq310 [5]bool - _, _, _ = yysep310, yyq310, yy2arr310 - const yyr310 bool = false - yyq310[1] = x.StartingDeadlineSeconds != nil - yyq310[2] = x.ConcurrencyPolicy != "" - yyq310[3] = x.Suspend != nil - var yynn310 int - if yyr310 || yy2arr310 { - r.EncodeArrayStart(5) - } else { - yynn310 = 2 - for _, b := range yyq310 { - if b { - yynn310++ - } - } - r.EncodeMapStart(yynn310) - yynn310 = 0 - } - if yyr310 || yy2arr310 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym312 := z.EncBinary() - _ = yym312 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Schedule)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("schedule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym313 := z.EncBinary() - _ = yym313 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Schedule)) - } - } - if yyr310 || yy2arr310 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq310[1] { - if x.StartingDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy315 := *x.StartingDeadlineSeconds - yym316 := z.EncBinary() - _ = yym316 - if false { - } else { - r.EncodeInt(int64(yy315)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq310[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startingDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.StartingDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy317 := *x.StartingDeadlineSeconds - yym318 := z.EncBinary() - _ = yym318 - if false { - } else { - r.EncodeInt(int64(yy317)) - } - } - } - } - if yyr310 || yy2arr310 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq310[2] { - x.ConcurrencyPolicy.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq310[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrencyPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.ConcurrencyPolicy.CodecEncodeSelf(e) - } - } - if yyr310 || yy2arr310 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq310[3] { - if x.Suspend == nil { - r.EncodeNil() - } else { - yy321 := *x.Suspend - yym322 := z.EncBinary() - _ = yym322 - if false { - } else { - r.EncodeBool(bool(yy321)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq310[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("suspend")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Suspend == nil { - r.EncodeNil() - } else { - yy323 := *x.Suspend - yym324 := z.EncBinary() - _ = yym324 - if false { - } else { - r.EncodeBool(bool(yy323)) - } - } - } - } - if yyr310 || yy2arr310 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy326 := &x.JobTemplate - yy326.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("jobTemplate")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy327 := &x.JobTemplate - yy327.CodecEncodeSelf(e) - } - if yyr310 || yy2arr310 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CronJobSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym328 := z.DecBinary() - _ = yym328 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct329 := r.ContainerType() - if yyct329 == codecSelferValueTypeMap1234 { - yyl329 := r.ReadMapStart() - if yyl329 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl329, d) - } - } else if yyct329 == codecSelferValueTypeArray1234 { - yyl329 := r.ReadArrayStart() - if yyl329 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl329, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CronJobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys330Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys330Slc - var yyhl330 bool = l >= 0 - for yyj330 := 0; ; yyj330++ { - if yyhl330 { - if yyj330 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys330Slc = r.DecodeBytes(yys330Slc, true, true) - yys330 := string(yys330Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys330 { - case "schedule": - if r.TryDecodeAsNil() { - x.Schedule = "" - } else { - x.Schedule = string(r.DecodeString()) - } - case "startingDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.StartingDeadlineSeconds != nil { - x.StartingDeadlineSeconds = nil - } - } else { - if x.StartingDeadlineSeconds == nil { - x.StartingDeadlineSeconds = new(int64) - } - yym333 := z.DecBinary() - _ = yym333 - if false { - } else { - *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - case "concurrencyPolicy": - if r.TryDecodeAsNil() { - x.ConcurrencyPolicy = "" - } else { - x.ConcurrencyPolicy = ConcurrencyPolicy(r.DecodeString()) - } - case "suspend": - if r.TryDecodeAsNil() { - if x.Suspend != nil { - x.Suspend = nil - } - } else { - if x.Suspend == nil { - x.Suspend = new(bool) - } - yym336 := z.DecBinary() - _ = yym336 - if false { - } else { - *((*bool)(x.Suspend)) = r.DecodeBool() - } - } - case "jobTemplate": - if r.TryDecodeAsNil() { - x.JobTemplate = JobTemplateSpec{} - } else { - yyv337 := &x.JobTemplate - yyv337.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys330) - } // end switch yys330 - } // end for yyj330 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CronJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj338 int - var yyb338 bool - var yyhl338 bool = l >= 0 - yyj338++ - if yyhl338 { - yyb338 = yyj338 > l - } else { - yyb338 = r.CheckBreak() - } - if yyb338 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Schedule = "" - } else { - x.Schedule = string(r.DecodeString()) - } - yyj338++ - if yyhl338 { - yyb338 = yyj338 > l - } else { - yyb338 = r.CheckBreak() - } - if yyb338 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.StartingDeadlineSeconds != nil { - x.StartingDeadlineSeconds = nil - } - } else { - if x.StartingDeadlineSeconds == nil { - x.StartingDeadlineSeconds = new(int64) - } - yym341 := z.DecBinary() - _ = yym341 - if false { - } else { - *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj338++ - if yyhl338 { - yyb338 = yyj338 > l - } else { - yyb338 = r.CheckBreak() - } - if yyb338 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrencyPolicy = "" - } else { - x.ConcurrencyPolicy = ConcurrencyPolicy(r.DecodeString()) - } - yyj338++ - if yyhl338 { - yyb338 = yyj338 > l - } else { - yyb338 = r.CheckBreak() - } - if yyb338 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Suspend != nil { - x.Suspend = nil - } - } else { - if x.Suspend == nil { - x.Suspend = new(bool) - } - yym344 := z.DecBinary() - _ = yym344 - if false { - } else { - *((*bool)(x.Suspend)) = r.DecodeBool() - } - } - yyj338++ - if yyhl338 { - yyb338 = yyj338 > l - } else { - yyb338 = r.CheckBreak() - } - if yyb338 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.JobTemplate = JobTemplateSpec{} - } else { - yyv345 := &x.JobTemplate - yyv345.CodecDecodeSelf(d) - } - for { - yyj338++ - if yyhl338 { - yyb338 = yyj338 > l - } else { - yyb338 = r.CheckBreak() - } - if yyb338 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj338-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ConcurrencyPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym346 := z.EncBinary() - _ = yym346 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ConcurrencyPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym347 := z.DecBinary() - _ = yym347 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *CronJobStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym348 := z.EncBinary() - _ = yym348 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep349 := !z.EncBinary() - yy2arr349 := z.EncBasicHandle().StructToArray - var yyq349 [2]bool - _, _, _ = yysep349, yyq349, yy2arr349 - const yyr349 bool = false - yyq349[0] = len(x.Active) != 0 - yyq349[1] = x.LastScheduleTime != nil - var yynn349 int - if yyr349 || yy2arr349 { - r.EncodeArrayStart(2) - } else { - yynn349 = 0 - for _, b := range yyq349 { - if b { - yynn349++ - } - } - r.EncodeMapStart(yynn349) - yynn349 = 0 - } - if yyr349 || yy2arr349 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq349[0] { - if x.Active == nil { - r.EncodeNil() - } else { - yym351 := z.EncBinary() - _ = yym351 - if false { - } else { - h.encSliceapi_ObjectReference(([]pkg2_api.ObjectReference)(x.Active), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq349[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("active")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Active == nil { - r.EncodeNil() - } else { - yym352 := z.EncBinary() - _ = yym352 - if false { - } else { - h.encSliceapi_ObjectReference(([]pkg2_api.ObjectReference)(x.Active), e) - } - } - } - } - if yyr349 || yy2arr349 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq349[1] { - if x.LastScheduleTime == nil { - r.EncodeNil() - } else { - yym354 := z.EncBinary() - _ = yym354 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) { - } else if yym354 { - z.EncBinaryMarshal(x.LastScheduleTime) - } else if !yym354 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScheduleTime) - } else { - z.EncFallback(x.LastScheduleTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq349[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastScheduleTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LastScheduleTime == nil { - r.EncodeNil() - } else { - yym355 := z.EncBinary() - _ = yym355 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) { - } else if yym355 { - z.EncBinaryMarshal(x.LastScheduleTime) - } else if !yym355 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScheduleTime) - } else { - z.EncFallback(x.LastScheduleTime) - } - } - } - } - if yyr349 || yy2arr349 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CronJobStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym356 := z.DecBinary() - _ = yym356 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct357 := r.ContainerType() - if yyct357 == codecSelferValueTypeMap1234 { - yyl357 := r.ReadMapStart() - if yyl357 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl357, d) - } - } else if yyct357 == codecSelferValueTypeArray1234 { - yyl357 := r.ReadArrayStart() - if yyl357 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl357, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CronJobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys358Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys358Slc - var yyhl358 bool = l >= 0 - for yyj358 := 0; ; yyj358++ { - if yyhl358 { - if yyj358 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys358Slc = r.DecodeBytes(yys358Slc, true, true) - yys358 := string(yys358Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys358 { - case "active": - if r.TryDecodeAsNil() { - x.Active = nil - } else { - yyv359 := &x.Active - yym360 := z.DecBinary() - _ = yym360 - if false { - } else { - h.decSliceapi_ObjectReference((*[]pkg2_api.ObjectReference)(yyv359), d) - } - } - case "lastScheduleTime": - if r.TryDecodeAsNil() { - if x.LastScheduleTime != nil { - x.LastScheduleTime = nil - } - } else { - if x.LastScheduleTime == nil { - x.LastScheduleTime = new(pkg1_unversioned.Time) - } - yym362 := z.DecBinary() - _ = yym362 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) { - } else if yym362 { - z.DecBinaryUnmarshal(x.LastScheduleTime) - } else if !yym362 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScheduleTime) - } else { - z.DecFallback(x.LastScheduleTime, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys358) - } // end switch yys358 - } // end for yyj358 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CronJobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj363 int - var yyb363 bool - var yyhl363 bool = l >= 0 - yyj363++ - if yyhl363 { - yyb363 = yyj363 > l - } else { - yyb363 = r.CheckBreak() - } - if yyb363 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Active = nil - } else { - yyv364 := &x.Active - yym365 := z.DecBinary() - _ = yym365 - if false { - } else { - h.decSliceapi_ObjectReference((*[]pkg2_api.ObjectReference)(yyv364), d) - } - } - yyj363++ - if yyhl363 { - yyb363 = yyj363 > l - } else { - yyb363 = r.CheckBreak() - } - if yyb363 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LastScheduleTime != nil { - x.LastScheduleTime = nil - } - } else { - if x.LastScheduleTime == nil { - x.LastScheduleTime = new(pkg1_unversioned.Time) - } - yym367 := z.DecBinary() - _ = yym367 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) { - } else if yym367 { - z.DecBinaryUnmarshal(x.LastScheduleTime) - } else if !yym367 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScheduleTime) - } else { - z.DecFallback(x.LastScheduleTime, false) - } - } - for { - yyj363++ - if yyhl363 { - yyb363 = yyj363 > l - } else { - yyb363 = r.CheckBreak() - } - if yyb363 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj363-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv368 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy369 := &yyv368 - yy369.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv370 := *v - yyh370, yyl370 := z.DecSliceHelperStart() - var yyc370 bool - if yyl370 == 0 { - if yyv370 == nil { - yyv370 = []Job{} - yyc370 = true - } else if len(yyv370) != 0 { - yyv370 = yyv370[:0] - yyc370 = true - } - } else if yyl370 > 0 { - var yyrr370, yyrl370 int - var yyrt370 bool - if yyl370 > cap(yyv370) { - - yyrg370 := len(yyv370) > 0 - yyv2370 := yyv370 - yyrl370, yyrt370 = z.DecInferLen(yyl370, z.DecBasicHandle().MaxInitLen, 800) - if yyrt370 { - if yyrl370 <= cap(yyv370) { - yyv370 = yyv370[:yyrl370] - } else { - yyv370 = make([]Job, yyrl370) - } - } else { - yyv370 = make([]Job, yyrl370) - } - yyc370 = true - yyrr370 = len(yyv370) - if yyrg370 { - copy(yyv370, yyv2370) - } - } else if yyl370 != len(yyv370) { - yyv370 = yyv370[:yyl370] - yyc370 = true - } - yyj370 := 0 - for ; yyj370 < yyrr370; yyj370++ { - yyh370.ElemContainerState(yyj370) - if r.TryDecodeAsNil() { - yyv370[yyj370] = Job{} - } else { - yyv371 := &yyv370[yyj370] - yyv371.CodecDecodeSelf(d) - } - - } - if yyrt370 { - for ; yyj370 < yyl370; yyj370++ { - yyv370 = append(yyv370, Job{}) - yyh370.ElemContainerState(yyj370) - if r.TryDecodeAsNil() { - yyv370[yyj370] = Job{} - } else { - yyv372 := &yyv370[yyj370] - yyv372.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj370 := 0 - for ; !r.CheckBreak(); yyj370++ { - - if yyj370 >= len(yyv370) { - yyv370 = append(yyv370, Job{}) // var yyz370 Job - yyc370 = true - } - yyh370.ElemContainerState(yyj370) - if yyj370 < len(yyv370) { - if r.TryDecodeAsNil() { - yyv370[yyj370] = Job{} - } else { - yyv373 := &yyv370[yyj370] - yyv373.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj370 < len(yyv370) { - yyv370 = yyv370[:yyj370] - yyc370 = true - } else if yyj370 == 0 && yyv370 == nil { - yyv370 = []Job{} - yyc370 = true - } - } - yyh370.End() - if yyc370 { - *v = yyv370 - } -} - -func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv374 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy375 := &yyv374 - yy375.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv376 := *v - yyh376, yyl376 := z.DecSliceHelperStart() - var yyc376 bool - if yyl376 == 0 { - if yyv376 == nil { - yyv376 = []JobCondition{} - yyc376 = true - } else if len(yyv376) != 0 { - yyv376 = yyv376[:0] - yyc376 = true - } - } else if yyl376 > 0 { - var yyrr376, yyrl376 int - var yyrt376 bool - if yyl376 > cap(yyv376) { - - yyrg376 := len(yyv376) > 0 - yyv2376 := yyv376 - yyrl376, yyrt376 = z.DecInferLen(yyl376, z.DecBasicHandle().MaxInitLen, 112) - if yyrt376 { - if yyrl376 <= cap(yyv376) { - yyv376 = yyv376[:yyrl376] - } else { - yyv376 = make([]JobCondition, yyrl376) - } - } else { - yyv376 = make([]JobCondition, yyrl376) - } - yyc376 = true - yyrr376 = len(yyv376) - if yyrg376 { - copy(yyv376, yyv2376) - } - } else if yyl376 != len(yyv376) { - yyv376 = yyv376[:yyl376] - yyc376 = true - } - yyj376 := 0 - for ; yyj376 < yyrr376; yyj376++ { - yyh376.ElemContainerState(yyj376) - if r.TryDecodeAsNil() { - yyv376[yyj376] = JobCondition{} - } else { - yyv377 := &yyv376[yyj376] - yyv377.CodecDecodeSelf(d) - } - - } - if yyrt376 { - for ; yyj376 < yyl376; yyj376++ { - yyv376 = append(yyv376, JobCondition{}) - yyh376.ElemContainerState(yyj376) - if r.TryDecodeAsNil() { - yyv376[yyj376] = JobCondition{} - } else { - yyv378 := &yyv376[yyj376] - yyv378.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj376 := 0 - for ; !r.CheckBreak(); yyj376++ { - - if yyj376 >= len(yyv376) { - yyv376 = append(yyv376, JobCondition{}) // var yyz376 JobCondition - yyc376 = true - } - yyh376.ElemContainerState(yyj376) - if yyj376 < len(yyv376) { - if r.TryDecodeAsNil() { - yyv376[yyj376] = JobCondition{} - } else { - yyv379 := &yyv376[yyj376] - yyv379.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj376 < len(yyv376) { - yyv376 = yyv376[:yyj376] - yyc376 = true - } else if yyj376 == 0 && yyv376 == nil { - yyv376 = []JobCondition{} - yyc376 = true - } - } - yyh376.End() - if yyc376 { - *v = yyv376 - } -} - -func (x codecSelfer1234) encSliceCronJob(v []CronJob, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv380 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy381 := &yyv380 - yy381.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCronJob(v *[]CronJob, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv382 := *v - yyh382, yyl382 := z.DecSliceHelperStart() - var yyc382 bool - if yyl382 == 0 { - if yyv382 == nil { - yyv382 = []CronJob{} - yyc382 = true - } else if len(yyv382) != 0 { - yyv382 = yyv382[:0] - yyc382 = true - } - } else if yyl382 > 0 { - var yyrr382, yyrl382 int - var yyrt382 bool - if yyl382 > cap(yyv382) { - - yyrg382 := len(yyv382) > 0 - yyv2382 := yyv382 - yyrl382, yyrt382 = z.DecInferLen(yyl382, z.DecBasicHandle().MaxInitLen, 1048) - if yyrt382 { - if yyrl382 <= cap(yyv382) { - yyv382 = yyv382[:yyrl382] - } else { - yyv382 = make([]CronJob, yyrl382) - } - } else { - yyv382 = make([]CronJob, yyrl382) - } - yyc382 = true - yyrr382 = len(yyv382) - if yyrg382 { - copy(yyv382, yyv2382) - } - } else if yyl382 != len(yyv382) { - yyv382 = yyv382[:yyl382] - yyc382 = true - } - yyj382 := 0 - for ; yyj382 < yyrr382; yyj382++ { - yyh382.ElemContainerState(yyj382) - if r.TryDecodeAsNil() { - yyv382[yyj382] = CronJob{} - } else { - yyv383 := &yyv382[yyj382] - yyv383.CodecDecodeSelf(d) - } - - } - if yyrt382 { - for ; yyj382 < yyl382; yyj382++ { - yyv382 = append(yyv382, CronJob{}) - yyh382.ElemContainerState(yyj382) - if r.TryDecodeAsNil() { - yyv382[yyj382] = CronJob{} - } else { - yyv384 := &yyv382[yyj382] - yyv384.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj382 := 0 - for ; !r.CheckBreak(); yyj382++ { - - if yyj382 >= len(yyv382) { - yyv382 = append(yyv382, CronJob{}) // var yyz382 CronJob - yyc382 = true - } - yyh382.ElemContainerState(yyj382) - if yyj382 < len(yyv382) { - if r.TryDecodeAsNil() { - yyv382[yyj382] = CronJob{} - } else { - yyv385 := &yyv382[yyj382] - yyv385.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj382 < len(yyv382) { - yyv382 = yyv382[:yyj382] - yyc382 = true - } else if yyj382 == 0 && yyv382 == nil { - yyv382 = []CronJob{} - yyc382 = true - } - } - yyh382.End() - if yyc382 { - *v = yyv382 - } -} - -func (x codecSelfer1234) encSliceapi_ObjectReference(v []pkg2_api.ObjectReference, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv386 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy387 := &yyv386 - yy387.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceapi_ObjectReference(v *[]pkg2_api.ObjectReference, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv388 := *v - yyh388, yyl388 := z.DecSliceHelperStart() - var yyc388 bool - if yyl388 == 0 { - if yyv388 == nil { - yyv388 = []pkg2_api.ObjectReference{} - yyc388 = true - } else if len(yyv388) != 0 { - yyv388 = yyv388[:0] - yyc388 = true - } - } else if yyl388 > 0 { - var yyrr388, yyrl388 int - var yyrt388 bool - if yyl388 > cap(yyv388) { - - yyrg388 := len(yyv388) > 0 - yyv2388 := yyv388 - yyrl388, yyrt388 = z.DecInferLen(yyl388, z.DecBasicHandle().MaxInitLen, 112) - if yyrt388 { - if yyrl388 <= cap(yyv388) { - yyv388 = yyv388[:yyrl388] - } else { - yyv388 = make([]pkg2_api.ObjectReference, yyrl388) - } - } else { - yyv388 = make([]pkg2_api.ObjectReference, yyrl388) - } - yyc388 = true - yyrr388 = len(yyv388) - if yyrg388 { - copy(yyv388, yyv2388) - } - } else if yyl388 != len(yyv388) { - yyv388 = yyv388[:yyl388] - yyc388 = true - } - yyj388 := 0 - for ; yyj388 < yyrr388; yyj388++ { - yyh388.ElemContainerState(yyj388) - if r.TryDecodeAsNil() { - yyv388[yyj388] = pkg2_api.ObjectReference{} - } else { - yyv389 := &yyv388[yyj388] - yyv389.CodecDecodeSelf(d) - } - - } - if yyrt388 { - for ; yyj388 < yyl388; yyj388++ { - yyv388 = append(yyv388, pkg2_api.ObjectReference{}) - yyh388.ElemContainerState(yyj388) - if r.TryDecodeAsNil() { - yyv388[yyj388] = pkg2_api.ObjectReference{} - } else { - yyv390 := &yyv388[yyj388] - yyv390.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj388 := 0 - for ; !r.CheckBreak(); yyj388++ { - - if yyj388 >= len(yyv388) { - yyv388 = append(yyv388, pkg2_api.ObjectReference{}) // var yyz388 pkg2_api.ObjectReference - yyc388 = true - } - yyh388.ElemContainerState(yyj388) - if yyj388 < len(yyv388) { - if r.TryDecodeAsNil() { - yyv388[yyj388] = pkg2_api.ObjectReference{} - } else { - yyv391 := &yyv388[yyj388] - yyv391.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj388 < len(yyv388) { - yyv388 = yyv388[:yyj388] - yyc388 = true - } else if yyj388 == 0 && yyv388 == nil { - yyv388 = []pkg2_api.ObjectReference{} - yyc388 = true - } - } - yyh388.End() - if yyc388 { - *v = yyv388 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go index 8f919c265b..2113dca18a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go @@ -17,55 +17,55 @@ limitations under the License. package batch import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" ) // +genclient=true // Job represents the configuration of a single job. type Job struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Spec is a structure defining the expected behavior of a job. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional - Spec JobSpec `json:"spec,omitempty"` + Spec JobSpec // Status is a structure describing current status of a job. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional - Status JobStatus `json:"status,omitempty"` + Status JobStatus } // JobList is a collection of jobs. type JobList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // Standard list metadata // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta // Items is the list of Job. - Items []Job `json:"items"` + Items []Job } // JobTemplate describes a template for creating copies of a predefined pod. type JobTemplate struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Template defines jobs that will be created from this template // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional - Template JobTemplateSpec `json:"template,omitempty"` + Template JobTemplateSpec } // JobTemplateSpec describes the data a Job should have when created from a template @@ -73,12 +73,12 @@ type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Specification of the desired behavior of the job. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional - Spec JobSpec `json:"spec,omitempty"` + Spec JobSpec } // JobSpec describes how the job execution will look like. @@ -89,7 +89,7 @@ type JobSpec struct { // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), // i.e. when the work left to do is less than max parallelism. // +optional - Parallelism *int32 `json:"parallelism,omitempty"` + Parallelism *int32 // Completions specifies the desired number of successfully finished pods the // job should be run with. Setting to nil means that the success of any @@ -97,17 +97,17 @@ type JobSpec struct { // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. // +optional - Completions *int32 `json:"completions,omitempty"` + Completions *int32 // Optional duration in seconds relative to the startTime that the job may be active // before the system tries to terminate it; value must be positive integer // +optional - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` + ActiveDeadlineSeconds *int64 // Selector is a label query over pods that should match the pod count. // Normally, the system sets this field for you. // +optional - Selector *unversioned.LabelSelector `json:"selector,omitempty"` + Selector *metav1.LabelSelector // ManualSelector controls generation of pod labels and pod selectors. // Leave `manualSelector` unset unless you are certain what you are doing. @@ -119,11 +119,11 @@ type JobSpec struct { // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` // API. // +optional - ManualSelector *bool `json:"manualSelector,omitempty"` + ManualSelector *bool // Template is the object that describes the pod that will be created when // executing a job. - Template api.PodTemplateSpec `json:"template"` + Template api.PodTemplateSpec } // JobStatus represents the current state of a Job. @@ -131,31 +131,31 @@ type JobStatus struct { // Conditions represent the latest available observations of an object's current state. // +optional - Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + Conditions []JobCondition // StartTime represents time when the job was acknowledged by the Job Manager. // It is not guaranteed to be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. // +optional - StartTime *unversioned.Time `json:"startTime,omitempty"` + StartTime *metav1.Time // CompletionTime represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. // +optional - CompletionTime *unversioned.Time `json:"completionTime,omitempty"` + CompletionTime *metav1.Time // Active is the number of actively running pods. // +optional - Active int32 `json:"active,omitempty"` + Active int32 // Succeeded is the number of pods which reached Phase Succeeded. // +optional - Succeeded int32 `json:"succeeded,omitempty"` + Succeeded int32 // Failed is the number of pods which reached Phase Failed. // +optional - Failed int32 `json:"failed,omitempty"` + Failed int32 } type JobConditionType string @@ -171,79 +171,89 @@ const ( // JobCondition describes current state of a job. type JobCondition struct { // Type of job condition, Complete or Failed. - Type JobConditionType `json:"type"` + Type JobConditionType // Status of the condition, one of True, False, Unknown. - Status api.ConditionStatus `json:"status"` + Status api.ConditionStatus // Last time the condition was checked. // +optional - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty"` + LastProbeTime metav1.Time // Last time the condition transit from one status to another. // +optional - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` + LastTransitionTime metav1.Time // (brief) reason for the condition's last transition. // +optional - Reason string `json:"reason,omitempty"` + Reason string // Human readable message indicating details about last transition. // +optional - Message string `json:"message,omitempty"` + Message string } // +genclient=true // CronJob represents the configuration of a single cron job. type CronJob struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Spec is a structure defining the expected behavior of a job, including the schedule. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional - Spec CronJobSpec `json:"spec,omitempty"` + Spec CronJobSpec // Status is a structure describing current status of a job. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional - Status CronJobStatus `json:"status,omitempty"` + Status CronJobStatus } // CronJobList is a collection of cron jobs. type CronJobList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // Standard list metadata // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta // Items is the list of CronJob. - Items []CronJob `json:"items"` + Items []CronJob } // CronJobSpec describes how the job execution will look like and when it will actually run. type CronJobSpec struct { // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - Schedule string `json:"schedule"` + Schedule string // Optional deadline in seconds for starting the job if it misses scheduled // time for any reason. Missed jobs executions will be counted as failed ones. // +optional - StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"` + StartingDeadlineSeconds *int64 // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. // +optional - ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` + ConcurrencyPolicy ConcurrencyPolicy // Suspend flag tells the controller to suspend subsequent executions, it does // not apply to already started executions. Defaults to false. // +optional - Suspend *bool `json:"suspend,omitempty"` + Suspend *bool // JobTemplate is the object that describes the job that will be created when // executing a CronJob. - JobTemplate JobTemplateSpec `json:"jobTemplate"` + JobTemplate JobTemplateSpec + + // The number of successful finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + SuccessfulJobsHistoryLimit *int32 + + // The number of failed finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + FailedJobsHistoryLimit *int32 } // ConcurrencyPolicy describes how the job will be handled. @@ -268,9 +278,9 @@ const ( type CronJobStatus struct { // Active holds pointers to currently running jobs. // +optional - Active []api.ObjectReference `json:"active,omitempty"` + Active []api.ObjectReference // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. // +optional - LastScheduleTime *unversioned.Time `json:"lastScheduleTime,omitempty"` + LastScheduleTime *metav1.Time } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/BUILD index 2011e9176f..5637d0c379 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/BUILD @@ -4,10 +4,8 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", "go_test", - "cgo_library", ) go_library( @@ -28,17 +26,17 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/api/unversioned:go_default_library", "//pkg/api/v1:go_default_library", "//pkg/apis/batch:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//pkg/util/intstr:go_default_library", - "//pkg/watch/versioned:go_default_library", "//vendor:github.com/gogo/protobuf/proto", "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/api/resource", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/util/intstr", ], ) @@ -52,6 +50,20 @@ go_test( "//pkg/api/v1:go_default_library", "//pkg/apis/batch/install:go_default_library", "//pkg/apis/batch/v1:go_default_library", - "//pkg/runtime:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/runtime", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go index e996bad27a..bd952b0e80 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go @@ -19,11 +19,10 @@ package v1 import ( "fmt" - "k8s.io/kubernetes/pkg/api" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" v1 "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" ) func addConversionFuncs(scheme *runtime.Scheme) error { @@ -36,13 +35,13 @@ func addConversionFuncs(scheme *runtime.Scheme) error { return err } - return api.Scheme.AddFieldLabelConversionFunc("batch/v1", "Job", + return scheme.AddFieldLabelConversionFunc("batch/v1", "Job", func(label, value string) (string, string, error) { switch label { case "metadata.name", "metadata.namespace", "status.successful": return label, value, nil default: - return "", "", fmt.Errorf("field label not supported: %s", label) + return "", "", fmt.Errorf("field label %q not supported for Job", label) } }, ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go index 46685ef8a9..3603247f24 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go @@ -17,7 +17,7 @@ limitations under the License. package v1 import ( - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.pb.go index f5d0adcbc1..c3f5dd7c4c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.pb.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -37,7 +37,8 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" import strings "strings" @@ -467,7 +468,7 @@ func (this *Job) String() string { return "nil" } s := strings.Join([]string{`&Job{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "JobSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "JobStatus", "JobStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -481,8 +482,8 @@ func (this *JobCondition) String() string { s := strings.Join([]string{`&JobCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -494,7 +495,7 @@ func (this *JobList) String() string { return "nil" } s := strings.Join([]string{`&JobList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Job", "Job", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -508,7 +509,7 @@ func (this *JobSpec) String() string { `Parallelism:` + valueToStringGenerated(this.Parallelism) + `,`, `Completions:` + valueToStringGenerated(this.Completions) + `,`, `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_kubernetes_pkg_api_unversioned.LabelSelector", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `ManualSelector:` + valueToStringGenerated(this.ManualSelector) + `,`, `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `}`, @@ -521,8 +522,8 @@ func (this *JobStatus) String() string { } s := strings.Join([]string{`&JobStatus{`, `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "JobCondition", "JobCondition", 1), `&`, ``, 1) + `,`, - `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1) + `,`, - `CompletionTime:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTime), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1) + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `CompletionTime:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, `Active:` + fmt.Sprintf("%v", this.Active) + `,`, `Succeeded:` + fmt.Sprintf("%v", this.Succeeded) + `,`, `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, @@ -1131,7 +1132,7 @@ func (m *JobSpec) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { return err @@ -1296,7 +1297,7 @@ func (m *JobStatus) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.StartTime == nil { - m.StartTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { return err @@ -1329,7 +1330,7 @@ func (m *JobStatus) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.CompletionTime == nil { - m.CompletionTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + m.CompletionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.CompletionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { return err @@ -1519,59 +1520,61 @@ var ( ) var fileDescriptorGenerated = []byte{ - // 863 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x54, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0xce, 0x8f, 0xa6, 0x4d, 0xa6, 0x3f, 0x76, 0x19, 0xa9, 0x52, 0xc8, 0x21, 0x59, 0x05, 0x84, - 0x16, 0xb1, 0x1d, 0x2b, 0x65, 0x91, 0x10, 0x07, 0x24, 0x5c, 0x84, 0x44, 0xd5, 0xb2, 0xd5, 0xa4, - 0x42, 0x2b, 0x10, 0x48, 0x63, 0xfb, 0x6d, 0x3a, 0xd4, 0xf6, 0x58, 0x9e, 0x71, 0xd0, 0xde, 0xb8, - 0x71, 0xe5, 0xaf, 0x01, 0xf1, 0x1f, 0xf4, 0xb8, 0xe2, 0xc4, 0x29, 0xa2, 0xe6, 0xbf, 0xd8, 0x13, - 0xf2, 0x78, 0x62, 0x3b, 0x4d, 0x1a, 0xa5, 0xdc, 0x3c, 0x6f, 0xbe, 0xef, 0x7b, 0xcf, 0xef, 0x7d, - 0xf3, 0xd0, 0xc7, 0xd7, 0x9f, 0x4a, 0xc2, 0x85, 0x75, 0x9d, 0x38, 0x10, 0x87, 0xa0, 0x40, 0x5a, - 0xd1, 0xf5, 0xc4, 0x62, 0x11, 0x97, 0x96, 0xc3, 0x94, 0x7b, 0x65, 0x4d, 0x47, 0xd6, 0x04, 0x42, - 0x88, 0x99, 0x02, 0x8f, 0x44, 0xb1, 0x50, 0x02, 0xbf, 0x97, 0x93, 0x48, 0x49, 0x22, 0xd1, 0xf5, - 0x84, 0x64, 0x24, 0xa2, 0x49, 0x64, 0x3a, 0xea, 0x1d, 0x4d, 0xb8, 0xba, 0x4a, 0x1c, 0xe2, 0x8a, - 0xc0, 0x9a, 0x88, 0x89, 0xb0, 0x34, 0xd7, 0x49, 0x5e, 0xe9, 0x93, 0x3e, 0xe8, 0xaf, 0x5c, 0xb3, - 0x77, 0x7c, 0x6f, 0x21, 0x56, 0x0c, 0x52, 0x24, 0xb1, 0x0b, 0x77, 0xeb, 0xe8, 0x7d, 0x72, 0x3f, - 0x27, 0x09, 0xa7, 0x10, 0x4b, 0x2e, 0x42, 0xf0, 0x96, 0x68, 0xcf, 0xee, 0xa7, 0x2d, 0xff, 0x6c, - 0xef, 0x68, 0x35, 0x3a, 0x4e, 0x42, 0xc5, 0x83, 0xe5, 0x9a, 0x46, 0xab, 0xe1, 0x89, 0xe2, 0xbe, - 0xc5, 0x43, 0x25, 0x55, 0x7c, 0x97, 0x32, 0xfc, 0xb5, 0x81, 0x9a, 0xa7, 0xc2, 0xc1, 0x2f, 0x51, - 0x3b, 0x00, 0xc5, 0x3c, 0xa6, 0x58, 0xb7, 0xfe, 0xa4, 0xfe, 0x74, 0xf7, 0xf8, 0x29, 0xb9, 0xb7, - 0xd3, 0x64, 0x3a, 0x22, 0x2f, 0x9c, 0x9f, 0xc0, 0x55, 0xe7, 0xa0, 0x98, 0x8d, 0x6f, 0x66, 0x83, - 0x5a, 0x3a, 0x1b, 0xa0, 0x32, 0x46, 0x0b, 0x35, 0xfc, 0x0d, 0xda, 0x92, 0x11, 0xb8, 0xdd, 0x86, - 0x56, 0x7d, 0x46, 0x36, 0x98, 0x1f, 0x39, 0x15, 0xce, 0x38, 0x02, 0xd7, 0xde, 0x33, 0xca, 0x5b, - 0xd9, 0x89, 0x6a, 0x1d, 0xfc, 0x2d, 0xda, 0x96, 0x8a, 0xa9, 0x44, 0x76, 0x9b, 0x5a, 0x91, 0x6c, - 0xac, 0xa8, 0x59, 0xf6, 0x81, 0xd1, 0xdc, 0xce, 0xcf, 0xd4, 0xa8, 0x0d, 0xff, 0x6a, 0xa2, 0xbd, - 0x53, 0xe1, 0x9c, 0x88, 0xd0, 0xe3, 0x8a, 0x8b, 0x10, 0x3f, 0x47, 0x5b, 0xea, 0x75, 0x04, 0xba, - 0x1d, 0x1d, 0xfb, 0xc9, 0xbc, 0x94, 0xcb, 0xd7, 0x11, 0xbc, 0x9d, 0x0d, 0x1e, 0x57, 0xb1, 0x59, - 0x8c, 0x6a, 0x74, 0xa5, 0xbc, 0x86, 0xe6, 0x7d, 0xbe, 0x98, 0xee, 0xed, 0x6c, 0xb0, 0xd6, 0x02, - 0xa4, 0xd0, 0x5c, 0x2c, 0x0f, 0x5f, 0xa1, 0x7d, 0x9f, 0x49, 0x75, 0x11, 0x0b, 0x07, 0x2e, 0x79, - 0x00, 0xe6, 0xef, 0x3f, 0x5a, 0x33, 0xa5, 0x8a, 0x0f, 0x49, 0x46, 0xb1, 0x0f, 0x4d, 0x2d, 0xfb, - 0x67, 0x55, 0x25, 0xba, 0x28, 0x8c, 0x7f, 0x46, 0x38, 0x0b, 0x5c, 0xc6, 0x2c, 0x94, 0xf9, 0xdf, - 0x65, 0xe9, 0xb6, 0x1e, 0x9e, 0xae, 0x67, 0xd2, 0xe1, 0xb3, 0x25, 0x39, 0xba, 0x22, 0x05, 0xfe, - 0x00, 0x6d, 0xc7, 0xc0, 0xa4, 0x08, 0xbb, 0x2d, 0xdd, 0xba, 0x62, 0x52, 0x54, 0x47, 0xa9, 0xb9, - 0xc5, 0x1f, 0xa2, 0x9d, 0x00, 0xa4, 0x64, 0x13, 0xe8, 0x6e, 0x6b, 0xe0, 0x23, 0x03, 0xdc, 0x39, - 0xcf, 0xc3, 0x74, 0x7e, 0x3f, 0xfc, 0xa3, 0x8e, 0x76, 0x4e, 0x85, 0x73, 0xc6, 0xa5, 0xc2, 0x3f, - 0x2c, 0x59, 0xdc, 0xda, 0xf0, 0x6f, 0x32, 0xba, 0x76, 0xfa, 0x63, 0x93, 0xa8, 0x3d, 0x8f, 0x54, - 0x7c, 0x7e, 0x8e, 0x5a, 0x5c, 0x41, 0x90, 0xcd, 0xbd, 0xb9, 0xfe, 0xf9, 0x2c, 0xda, 0xd2, 0xde, - 0x37, 0xa2, 0xad, 0xaf, 0x33, 0x3a, 0xcd, 0x55, 0x86, 0x7f, 0x36, 0x75, 0xe5, 0x99, 0xf1, 0xf1, - 0x08, 0xed, 0x46, 0x2c, 0x66, 0xbe, 0x0f, 0x3e, 0x97, 0x81, 0x2e, 0xbe, 0x65, 0x3f, 0x4a, 0x67, - 0x83, 0xdd, 0x8b, 0x32, 0x4c, 0xab, 0x98, 0x8c, 0xe2, 0x8a, 0x20, 0xf2, 0x21, 0xeb, 0x6e, 0xee, - 0x45, 0x43, 0x39, 0x29, 0xc3, 0xb4, 0x8a, 0xc1, 0x2f, 0xd0, 0x21, 0x73, 0x15, 0x9f, 0xc2, 0x97, - 0xc0, 0x3c, 0x9f, 0x87, 0x30, 0x06, 0x57, 0x84, 0x5e, 0xfe, 0xce, 0x9a, 0xf6, 0xbb, 0xe9, 0x6c, - 0x70, 0xf8, 0xc5, 0x2a, 0x00, 0x5d, 0xcd, 0xc3, 0x3f, 0xa2, 0xb6, 0x04, 0x1f, 0x5c, 0x25, 0x62, - 0x63, 0x9f, 0xe7, 0x9b, 0x36, 0x9c, 0x39, 0xe0, 0x8f, 0x0d, 0xd7, 0xde, 0xcb, 0x3a, 0x3e, 0x3f, - 0xd1, 0x42, 0x13, 0x7f, 0x86, 0x0e, 0x02, 0x16, 0x26, 0xac, 0x40, 0x6a, 0xdf, 0xb4, 0x6d, 0x9c, - 0xce, 0x06, 0x07, 0xe7, 0x0b, 0x37, 0xf4, 0x0e, 0x12, 0x7f, 0x8f, 0xda, 0x0a, 0x82, 0xc8, 0x67, - 0x2a, 0x37, 0xd1, 0xee, 0xf1, 0xd1, 0xfa, 0x7d, 0x77, 0x21, 0xbc, 0x4b, 0x43, 0xd0, 0xab, 0xa9, - 0xb0, 0xc2, 0x3c, 0x4a, 0x0b, 0xc1, 0xe1, 0xef, 0x4d, 0xd4, 0x29, 0x16, 0x0e, 0x06, 0x84, 0xdc, - 0xf9, 0xa3, 0x96, 0xdd, 0xba, 0x76, 0xc7, 0x68, 0x53, 0x77, 0x14, 0xeb, 0xa0, 0xdc, 0xb2, 0x45, - 0x48, 0xd2, 0x8a, 0x30, 0x7e, 0x89, 0x3a, 0x52, 0xb1, 0x58, 0xe9, 0xd7, 0xda, 0x78, 0xf8, 0x6b, - 0xdd, 0x4f, 0x67, 0x83, 0xce, 0x78, 0xae, 0x40, 0x4b, 0x31, 0x3c, 0x41, 0x07, 0xa5, 0x4f, 0xfe, - 0xef, 0xee, 0xd1, 0x43, 0x39, 0x59, 0x90, 0xa1, 0x77, 0x64, 0xb3, 0x05, 0x90, 0x3b, 0x49, 0xdb, - 0xa5, 0x55, 0x2e, 0x80, 0xdc, 0x76, 0xd4, 0xdc, 0x62, 0x0b, 0x75, 0x64, 0xe2, 0xba, 0x00, 0x1e, - 0x78, 0x7a, 0xe6, 0x2d, 0xfb, 0x1d, 0x03, 0xed, 0x8c, 0xe7, 0x17, 0xb4, 0xc4, 0x64, 0xc2, 0xaf, - 0x18, 0xf7, 0xc1, 0xd3, 0xb3, 0xae, 0x08, 0x7f, 0xa5, 0xa3, 0xd4, 0xdc, 0xda, 0xef, 0xdf, 0xdc, - 0xf6, 0x6b, 0x6f, 0x6e, 0xfb, 0xb5, 0xbf, 0x6f, 0xfb, 0xb5, 0x5f, 0xd2, 0x7e, 0xfd, 0x26, 0xed, - 0xd7, 0xdf, 0xa4, 0xfd, 0xfa, 0x3f, 0x69, 0xbf, 0xfe, 0xdb, 0xbf, 0xfd, 0xda, 0x77, 0x8d, 0xe9, - 0xe8, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9a, 0x45, 0x86, 0xd9, 0xb8, 0x08, 0x00, 0x00, + // 885 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x54, 0xdd, 0x6e, 0xe3, 0x44, + 0x14, 0xce, 0x4f, 0xd3, 0x26, 0x93, 0xb6, 0xbb, 0x8c, 0x54, 0x29, 0xf4, 0x22, 0x59, 0x05, 0x84, + 0x0a, 0xda, 0xb5, 0x49, 0xbb, 0x42, 0x88, 0x0b, 0x24, 0x5c, 0x84, 0x44, 0xd5, 0xb2, 0xd5, 0xa4, + 0x02, 0x89, 0x1f, 0x89, 0xb1, 0x7d, 0x9a, 0x0e, 0xb5, 0x3d, 0x96, 0x67, 0x12, 0xd1, 0x3b, 0xde, + 0x00, 0x1e, 0x06, 0x21, 0x1e, 0xa1, 0x97, 0xbd, 0xe4, 0x2a, 0xa2, 0xe6, 0x2d, 0xf6, 0x0a, 0xcd, + 0x78, 0xfc, 0x93, 0x4d, 0x0a, 0xd9, 0xbd, 0xb3, 0xcf, 0x7c, 0xdf, 0x37, 0x67, 0xce, 0xf9, 0xce, + 0x41, 0x47, 0xd7, 0x1f, 0x0b, 0x8b, 0x71, 0xfb, 0x7a, 0xea, 0x42, 0x12, 0x81, 0x04, 0x61, 0xc7, + 0xd7, 0x13, 0x9b, 0xc6, 0x4c, 0xd8, 0x2e, 0x95, 0xde, 0x95, 0x3d, 0x1b, 0xd9, 0x13, 0x88, 0x20, + 0xa1, 0x12, 0x7c, 0x2b, 0x4e, 0xb8, 0xe4, 0xf8, 0x9d, 0x8c, 0x64, 0x95, 0x24, 0x2b, 0xbe, 0x9e, + 0x58, 0x8a, 0x64, 0x69, 0x92, 0x35, 0x1b, 0xed, 0x3f, 0x9b, 0x30, 0x79, 0x35, 0x75, 0x2d, 0x8f, + 0x87, 0xf6, 0x84, 0x4f, 0xb8, 0xad, 0xb9, 0xee, 0xf4, 0x52, 0xff, 0xe9, 0x1f, 0xfd, 0x95, 0x69, + 0xee, 0x3f, 0x37, 0x89, 0xd0, 0x98, 0x85, 0xd4, 0xbb, 0x62, 0x11, 0x24, 0x37, 0x65, 0x2a, 0x21, + 0x48, 0xba, 0x22, 0x93, 0x7d, 0xfb, 0x21, 0x56, 0x32, 0x8d, 0x24, 0x0b, 0x61, 0x89, 0xf0, 0xd1, + 0xff, 0x11, 0x84, 0x77, 0x05, 0x21, 0x5d, 0xe2, 0x1d, 0x3d, 0xc4, 0x9b, 0x4a, 0x16, 0xd8, 0x2c, + 0x92, 0x42, 0x26, 0x4b, 0xa4, 0xca, 0x9b, 0x04, 0x24, 0x33, 0x48, 0xca, 0x07, 0xc1, 0xcf, 0x34, + 0x8c, 0x03, 0x58, 0xf5, 0xa6, 0xa7, 0x0f, 0xb6, 0x64, 0x05, 0x7a, 0xf8, 0x6b, 0x03, 0x35, 0x4f, + 0xb8, 0x8b, 0x7f, 0x44, 0x6d, 0x55, 0x24, 0x9f, 0x4a, 0xda, 0xab, 0x3f, 0xa9, 0x1f, 0x74, 0x0f, + 0x3f, 0xb4, 0x4c, 0x9b, 0xaa, 0x39, 0x97, 0x8d, 0x52, 0x68, 0x6b, 0x36, 0xb2, 0x5e, 0xb8, 0x3f, + 0x81, 0x27, 0xcf, 0x40, 0x52, 0x07, 0xdf, 0xce, 0x07, 0xb5, 0x74, 0x3e, 0x40, 0x65, 0x8c, 0x14, + 0xaa, 0xf8, 0x2b, 0xb4, 0x21, 0x62, 0xf0, 0x7a, 0x0d, 0xad, 0xfe, 0xd4, 0x5a, 0xc3, 0x04, 0xd6, + 0x09, 0x77, 0xc7, 0x31, 0x78, 0xce, 0xb6, 0x51, 0xde, 0x50, 0x7f, 0x44, 0xeb, 0xe0, 0xaf, 0xd1, + 0xa6, 0x90, 0x54, 0x4e, 0x45, 0xaf, 0xa9, 0x15, 0xad, 0xb5, 0x15, 0x35, 0xcb, 0xd9, 0x35, 0x9a, + 0x9b, 0xd9, 0x3f, 0x31, 0x6a, 0xc3, 0xbb, 0x26, 0xda, 0x3e, 0xe1, 0xee, 0x31, 0x8f, 0x7c, 0x26, + 0x19, 0x8f, 0xf0, 0x73, 0xb4, 0x21, 0x6f, 0x62, 0xd0, 0x65, 0xe9, 0x38, 0x4f, 0xf2, 0x54, 0x2e, + 0x6e, 0x62, 0x78, 0x39, 0x1f, 0x3c, 0xae, 0x62, 0x55, 0x8c, 0x68, 0x74, 0x25, 0xbd, 0x86, 0xe6, + 0x7d, 0xba, 0x78, 0xdd, 0xcb, 0xf9, 0xe0, 0x3f, 0x1b, 0x65, 0x15, 0x9a, 0x8b, 0xe9, 0xe1, 0x09, + 0xda, 0x09, 0xa8, 0x90, 0xe7, 0x09, 0x77, 0xe1, 0x82, 0x85, 0x60, 0x5e, 0xff, 0xc1, 0x7a, 0xdd, + 0x52, 0x0c, 0x67, 0xcf, 0xa4, 0xb2, 0x73, 0x5a, 0x15, 0x22, 0x8b, 0xba, 0x78, 0x86, 0xb0, 0x0a, + 0x5c, 0x24, 0x34, 0x12, 0xd9, 0xe3, 0xd4, 0x6d, 0x1b, 0xaf, 0x7d, 0xdb, 0xbe, 0xb9, 0x0d, 0x9f, + 0x2e, 0xa9, 0x91, 0x15, 0x37, 0xe0, 0xf7, 0xd0, 0x66, 0x02, 0x54, 0xf0, 0xa8, 0xd7, 0xd2, 0x85, + 0x2b, 0xfa, 0x44, 0x74, 0x94, 0x98, 0x53, 0xfc, 0x3e, 0xda, 0x0a, 0x41, 0x08, 0x3a, 0x81, 0xde, + 0xa6, 0x06, 0x3e, 0x32, 0xc0, 0xad, 0xb3, 0x2c, 0x4c, 0xf2, 0xf3, 0xe1, 0x1f, 0x75, 0xb4, 0x75, + 0xc2, 0xdd, 0x53, 0x26, 0x24, 0xfe, 0x7e, 0xc9, 0xe8, 0xd6, 0x7a, 0x8f, 0x51, 0x6c, 0x6d, 0xf3, + 0xc7, 0xe6, 0x9e, 0x76, 0x1e, 0xa9, 0x98, 0xfc, 0x0c, 0xb5, 0x98, 0x84, 0x50, 0x35, 0xbd, 0x79, + 0xd0, 0x3d, 0x3c, 0x58, 0xd7, 0x93, 0xce, 0x8e, 0x11, 0x6d, 0x7d, 0xa9, 0xe8, 0x24, 0x53, 0x19, + 0xfe, 0xd9, 0xd4, 0x89, 0x2b, 0xd7, 0xe3, 0x11, 0xea, 0xc6, 0x34, 0xa1, 0x41, 0x00, 0x01, 0x13, + 0xa1, 0xce, 0xbd, 0xe5, 0x3c, 0x4a, 0xe7, 0x83, 0xee, 0x79, 0x19, 0x26, 0x55, 0x8c, 0xa2, 0x78, + 0x5c, 0xed, 0x09, 0x55, 0xdc, 0xcc, 0x88, 0x86, 0x72, 0x5c, 0x86, 0x49, 0x15, 0x83, 0x5f, 0xa0, + 0x3d, 0xea, 0x49, 0x36, 0x83, 0xcf, 0x81, 0xfa, 0x01, 0x8b, 0x60, 0x0c, 0x1e, 0x8f, 0xfc, 0x6c, + 0xc8, 0x9a, 0xce, 0xdb, 0xe9, 0x7c, 0xb0, 0xf7, 0xd9, 0x2a, 0x00, 0x59, 0xcd, 0xc3, 0x3f, 0xa0, + 0xb6, 0x80, 0x00, 0x3c, 0xc9, 0x13, 0x63, 0x9e, 0xa3, 0x35, 0xeb, 0x4d, 0x5d, 0x08, 0xc6, 0x86, + 0xea, 0x6c, 0xab, 0x82, 0xe7, 0x7f, 0xa4, 0x90, 0xc4, 0x9f, 0xa0, 0xdd, 0x90, 0x46, 0x53, 0x5a, + 0x20, 0xb5, 0x6b, 0xda, 0x0e, 0x4e, 0xe7, 0x83, 0xdd, 0xb3, 0x85, 0x13, 0xf2, 0x0a, 0x12, 0x7f, + 0x87, 0xda, 0x12, 0xc2, 0x38, 0xa0, 0x32, 0xb3, 0x50, 0xf7, 0xf0, 0xd9, 0xc3, 0xfd, 0x52, 0x29, + 0x9d, 0x73, 0xff, 0xc2, 0x10, 0xf4, 0x5a, 0x2a, 0x9c, 0x90, 0x47, 0x49, 0x21, 0x38, 0xfc, 0xbd, + 0x89, 0x3a, 0xc5, 0xb2, 0xc1, 0x80, 0x90, 0x97, 0x0f, 0xb4, 0xe8, 0xd5, 0xb5, 0x39, 0x46, 0xeb, + 0x9a, 0xa3, 0x58, 0x05, 0xe5, 0x86, 0x2d, 0x42, 0x82, 0x54, 0x84, 0xf1, 0x37, 0xa8, 0x23, 0x24, + 0x4d, 0xa4, 0x1e, 0xd5, 0xc6, 0x6b, 0x8f, 0xea, 0x4e, 0x3a, 0x1f, 0x74, 0xc6, 0xb9, 0x00, 0x29, + 0xb5, 0xf0, 0x25, 0xda, 0x2d, 0x5d, 0xf2, 0x86, 0x6b, 0x47, 0xb7, 0xe4, 0x78, 0x41, 0x85, 0xbc, + 0xa2, 0xaa, 0x86, 0x3f, 0xb3, 0x91, 0xf6, 0x4a, 0xab, 0x1c, 0xfe, 0xcc, 0x73, 0xc4, 0x9c, 0x62, + 0x1b, 0x75, 0xc4, 0xd4, 0xf3, 0x00, 0x7c, 0xf0, 0x75, 0xc7, 0x5b, 0xce, 0x5b, 0x06, 0xda, 0x19, + 0xe7, 0x07, 0xa4, 0xc4, 0x28, 0xe1, 0x4b, 0xca, 0x02, 0xf0, 0x75, 0xa7, 0x2b, 0xc2, 0x5f, 0xe8, + 0x28, 0x31, 0xa7, 0xce, 0xbb, 0xb7, 0xf7, 0xfd, 0xda, 0xdd, 0x7d, 0xbf, 0xf6, 0xd7, 0x7d, 0xbf, + 0xf6, 0x4b, 0xda, 0xaf, 0xdf, 0xa6, 0xfd, 0xfa, 0x5d, 0xda, 0xaf, 0xff, 0x9d, 0xf6, 0xeb, 0xbf, + 0xfd, 0xd3, 0xaf, 0x7d, 0xdb, 0x98, 0x8d, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xe7, 0x0a, + 0x8d, 0xf7, 0x08, 0x00, 0x00, } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto index 17a6add05a..283da1c8dc 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,11 +21,12 @@ syntax = 'proto2'; package k8s.io.kubernetes.pkg.apis.batch.v1; -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; @@ -35,7 +36,7 @@ message Job { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec is a structure defining the expected behavior of a job. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -58,11 +59,11 @@ message JobCondition { // Last time the condition was checked. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; // Last time the condition transit from one status to another. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // (brief) reason for the condition's last transition. // +optional @@ -78,7 +79,7 @@ message JobList { // Standard list metadata // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Job. repeated Job items = 2; @@ -112,7 +113,7 @@ message JobSpec { // Normally, the system sets this field for you. // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 4; + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // ManualSelector controls generation of pod labels and pod selectors. // Leave `manualSelector` unset unless you are certain what you are doing. @@ -144,13 +145,13 @@ message JobStatus { // It is not guaranteed to be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 2; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; // CompletionTime represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time completionTime = 3; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; // Active is the number of actively running pods. // +optional diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/register.go index 2d59089ba3..4ba570d1b5 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/register.go @@ -17,17 +17,21 @@ limitations under the License. package v1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "batch" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} var ( SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) @@ -39,10 +43,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Job{}, &JobList{}, - &v1.ListOptions{}, - &v1.DeleteOptions{}, - &v1.ExportOptions{}, ) - versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.generated.go index 0b6d641da2..1c1bb67171 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.generated.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.generated.go @@ -25,11 +25,11 @@ import ( "errors" "fmt" codec1978 "github.com/ugorji/go/codec" - pkg4_resource "k8s.io/kubernetes/pkg/api/resource" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg3_types "k8s.io/kubernetes/pkg/types" - pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" + pkg4_resource "k8s.io/apimachinery/pkg/api/resource" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + pkg5_intstr "k8s.io/apimachinery/pkg/util/intstr" + pkg3_v1 "k8s.io/kubernetes/pkg/api/v1" "reflect" "runtime" time "time" @@ -66,10 +66,10 @@ func init() { } if false { // reference the types, but skip this branch at build/run time var v0 pkg4_resource.Quantity - var v1 pkg1_unversioned.TypeMeta - var v2 pkg2_v1.ObjectMeta - var v3 pkg3_types.UID - var v4 pkg5_intstr.IntOrString + var v1 pkg1_v1.TypeMeta + var v2 pkg2_types.UID + var v3 pkg5_intstr.IntOrString + var v4 pkg3_v1.PodTemplateSpec var v5 time.Time _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 } @@ -164,7 +164,13 @@ func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[2] { yy10 := &x.ObjectMeta - yy10.CodecEncodeSelf(e) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } @@ -173,15 +179,21 @@ func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.ObjectMeta - yy11.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[3] { - yy13 := &x.Spec - yy13.CodecEncodeSelf(e) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } @@ -190,15 +202,15 @@ func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.Spec - yy14.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[4] { - yy16 := &x.Status - yy16.CodecEncodeSelf(e) + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } @@ -207,8 +219,8 @@ func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Status - yy17.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } if yyr2 || yy2arr2 { @@ -224,25 +236,25 @@ func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym18 := z.DecBinary() - _ = yym18 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct19 := r.ContainerType() - if yyct19 == codecSelferValueTypeMap1234 { - yyl19 := r.ReadMapStart() - if yyl19 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl19, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct19 == codecSelferValueTypeArray1234 { - yyl19 := r.ReadArrayStart() - if yyl19 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl19, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -254,12 +266,12 @@ func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys20Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys20Slc - var yyhl20 bool = l >= 0 - for yyj20 := 0; ; yyj20++ { - if yyhl20 { - if yyj20 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -268,47 +280,65 @@ func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys20Slc = r.DecodeBytes(yys20Slc, true, true) - yys20 := string(yys20Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys20 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv23 := &x.ObjectMeta - yyv23.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = JobSpec{} } else { - yyv24 := &x.Spec - yyv24.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = JobStatus{} } else { - yyv25 := &x.Status - yyv25.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys20) - } // end switch yys20 - } // end for yyj20 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -316,16 +346,16 @@ func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj26 int - var yyb26 bool - var yyhl26 bool = l >= 0 - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb26 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb26 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -333,15 +363,21 @@ func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb26 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb26 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -349,32 +385,44 @@ func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb26 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb26 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv29 := &x.ObjectMeta - yyv29.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb26 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb26 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -382,16 +430,16 @@ func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Spec = JobSpec{} } else { - yyv30 := &x.Spec - yyv30.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb26 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb26 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -399,21 +447,21 @@ func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Status = JobStatus{} } else { - yyv31 := &x.Status - yyv31.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb26 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb26 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj26-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -425,37 +473,37 @@ func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym32 := z.EncBinary() - _ = yym32 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep33 := !z.EncBinary() - yy2arr33 := z.EncBasicHandle().StructToArray - var yyq33 [4]bool - _, _, _ = yysep33, yyq33, yy2arr33 - const yyr33 bool = false - yyq33[0] = x.Kind != "" - yyq33[1] = x.APIVersion != "" - yyq33[2] = true - var yynn33 int - if yyr33 || yy2arr33 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn33 = 1 - for _, b := range yyq33 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn33++ + yynn2++ } } - r.EncodeMapStart(yynn33) - yynn33 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr33 || yy2arr33 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[0] { - yym35 := z.EncBinary() - _ = yym35 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -464,23 +512,23 @@ func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq33[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym36 := z.EncBinary() - _ = yym36 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr33 || yy2arr33 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[1] { - yym38 := z.EncBinary() - _ = yym38 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -489,54 +537,54 @@ func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq33[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym39 := z.EncBinary() - _ = yym39 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr33 || yy2arr33 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[2] { - yy41 := &x.ListMeta - yym42 := z.EncBinary() - _ = yym42 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy41) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy41) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq33[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy43 := &x.ListMeta - yym44 := z.EncBinary() - _ = yym44 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy43) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy43) + z.EncFallback(yy12) } } } - if yyr33 || yy2arr33 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym46 := z.EncBinary() - _ = yym46 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceJob(([]Job)(x.Items), e) @@ -549,15 +597,15 @@ func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym47 := z.EncBinary() - _ = yym47 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceJob(([]Job)(x.Items), e) } } } - if yyr33 || yy2arr33 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -570,25 +618,25 @@ func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym48 := z.DecBinary() - _ = yym48 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct49 := r.ContainerType() - if yyct49 == codecSelferValueTypeMap1234 { - yyl49 := r.ReadMapStart() - if yyl49 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl49, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct49 == codecSelferValueTypeArray1234 { - yyl49 := r.ReadArrayStart() - if yyl49 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl49, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -600,12 +648,12 @@ func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys50Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys50Slc - var yyhl50 bool = l >= 0 - for yyj50 := 0; ; yyj50++ { - if yyhl50 { - if yyj50 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -614,51 +662,63 @@ func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys50Slc = r.DecodeBytes(yys50Slc, true, true) - yys50 := string(yys50Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys50 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv53 := &x.ListMeta - yym54 := z.DecBinary() - _ = yym54 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv53) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv53, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv55 := &x.Items - yym56 := z.DecBinary() - _ = yym56 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceJob((*[]Job)(yyv55), d) + h.decSliceJob((*[]Job)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys50) - } // end switch yys50 - } // end for yyj50 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -666,16 +726,16 @@ func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj57 int - var yyb57 bool - var yyhl57 bool = l >= 0 - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb57 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb57 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -683,15 +743,21 @@ func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb57 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb57 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -699,38 +765,44 @@ func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb57 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb57 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv60 := &x.ListMeta - yym61 := z.DecBinary() - _ = yym61 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv60) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv60, false) + z.DecFallback(yyv17, false) } } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb57 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb57 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -738,26 +810,26 @@ func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Items = nil } else { - yyv62 := &x.Items - yym63 := z.DecBinary() - _ = yym63 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceJob((*[]Job)(yyv62), d) + h.decSliceJob((*[]Job)(yyv19), d) } } for { - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb57 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb57 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj57-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -769,147 +841,147 @@ func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym64 := z.EncBinary() - _ = yym64 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep65 := !z.EncBinary() - yy2arr65 := z.EncBasicHandle().StructToArray - var yyq65 [6]bool - _, _, _ = yysep65, yyq65, yy2arr65 - const yyr65 bool = false - yyq65[0] = x.Parallelism != nil - yyq65[1] = x.Completions != nil - yyq65[2] = x.ActiveDeadlineSeconds != nil - yyq65[3] = x.Selector != nil - yyq65[4] = x.ManualSelector != nil - var yynn65 int - if yyr65 || yy2arr65 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Parallelism != nil + yyq2[1] = x.Completions != nil + yyq2[2] = x.ActiveDeadlineSeconds != nil + yyq2[3] = x.Selector != nil + yyq2[4] = x.ManualSelector != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(6) } else { - yynn65 = 1 - for _, b := range yyq65 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn65++ + yynn2++ } } - r.EncodeMapStart(yynn65) - yynn65 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr65 || yy2arr65 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq65[0] { + if yyq2[0] { if x.Parallelism == nil { r.EncodeNil() } else { - yy67 := *x.Parallelism - yym68 := z.EncBinary() - _ = yym68 + yy4 := *x.Parallelism + yym5 := z.EncBinary() + _ = yym5 if false { } else { - r.EncodeInt(int64(yy67)) + r.EncodeInt(int64(yy4)) } } } else { r.EncodeNil() } } else { - if yyq65[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("parallelism")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Parallelism == nil { r.EncodeNil() } else { - yy69 := *x.Parallelism - yym70 := z.EncBinary() - _ = yym70 + yy6 := *x.Parallelism + yym7 := z.EncBinary() + _ = yym7 if false { } else { - r.EncodeInt(int64(yy69)) + r.EncodeInt(int64(yy6)) } } } } - if yyr65 || yy2arr65 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq65[1] { + if yyq2[1] { if x.Completions == nil { r.EncodeNil() } else { - yy72 := *x.Completions - yym73 := z.EncBinary() - _ = yym73 + yy9 := *x.Completions + yym10 := z.EncBinary() + _ = yym10 if false { } else { - r.EncodeInt(int64(yy72)) + r.EncodeInt(int64(yy9)) } } } else { r.EncodeNil() } } else { - if yyq65[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("completions")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Completions == nil { r.EncodeNil() } else { - yy74 := *x.Completions - yym75 := z.EncBinary() - _ = yym75 + yy11 := *x.Completions + yym12 := z.EncBinary() + _ = yym12 if false { } else { - r.EncodeInt(int64(yy74)) + r.EncodeInt(int64(yy11)) } } } } - if yyr65 || yy2arr65 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq65[2] { + if yyq2[2] { if x.ActiveDeadlineSeconds == nil { r.EncodeNil() } else { - yy77 := *x.ActiveDeadlineSeconds - yym78 := z.EncBinary() - _ = yym78 + yy14 := *x.ActiveDeadlineSeconds + yym15 := z.EncBinary() + _ = yym15 if false { } else { - r.EncodeInt(int64(yy77)) + r.EncodeInt(int64(yy14)) } } } else { r.EncodeNil() } } else { - if yyq65[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.ActiveDeadlineSeconds == nil { r.EncodeNil() } else { - yy79 := *x.ActiveDeadlineSeconds - yym80 := z.EncBinary() - _ = yym80 + yy16 := *x.ActiveDeadlineSeconds + yym17 := z.EncBinary() + _ = yym17 if false { } else { - r.EncodeInt(int64(yy79)) + r.EncodeInt(int64(yy16)) } } } } - if yyr65 || yy2arr65 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq65[3] { + if yyq2[3] { if x.Selector == nil { r.EncodeNil() } else { - yym82 := z.EncBinary() - _ = yym82 + yym19 := z.EncBinary() + _ = yym19 if false { } else if z.HasExtensions() && z.EncExt(x.Selector) { } else { @@ -920,15 +992,15 @@ func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq65[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("selector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Selector == nil { r.EncodeNil() } else { - yym83 := z.EncBinary() - _ = yym83 + yym20 := z.EncBinary() + _ = yym20 if false { } else if z.HasExtensions() && z.EncExt(x.Selector) { } else { @@ -937,53 +1009,53 @@ func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr65 || yy2arr65 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq65[4] { + if yyq2[4] { if x.ManualSelector == nil { r.EncodeNil() } else { - yy85 := *x.ManualSelector - yym86 := z.EncBinary() - _ = yym86 + yy22 := *x.ManualSelector + yym23 := z.EncBinary() + _ = yym23 if false { } else { - r.EncodeBool(bool(yy85)) + r.EncodeBool(bool(yy22)) } } } else { r.EncodeNil() } } else { - if yyq65[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("manualSelector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.ManualSelector == nil { r.EncodeNil() } else { - yy87 := *x.ManualSelector - yym88 := z.EncBinary() - _ = yym88 + yy24 := *x.ManualSelector + yym25 := z.EncBinary() + _ = yym25 if false { } else { - r.EncodeBool(bool(yy87)) + r.EncodeBool(bool(yy24)) } } } } - if yyr65 || yy2arr65 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy90 := &x.Template - yy90.CodecEncodeSelf(e) + yy27 := &x.Template + yy27.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("template")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy91 := &x.Template - yy91.CodecEncodeSelf(e) + yy29 := &x.Template + yy29.CodecEncodeSelf(e) } - if yyr65 || yy2arr65 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -996,25 +1068,25 @@ func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym92 := z.DecBinary() - _ = yym92 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct93 := r.ContainerType() - if yyct93 == codecSelferValueTypeMap1234 { - yyl93 := r.ReadMapStart() - if yyl93 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl93, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct93 == codecSelferValueTypeArray1234 { - yyl93 := r.ReadArrayStart() - if yyl93 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl93, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -1026,12 +1098,12 @@ func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys94Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys94Slc - var yyhl94 bool = l >= 0 - for yyj94 := 0; ; yyj94++ { - if yyhl94 { - if yyj94 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -1040,10 +1112,10 @@ func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys94Slc = r.DecodeBytes(yys94Slc, true, true) - yys94 := string(yys94Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys94 { + switch yys3 { case "parallelism": if r.TryDecodeAsNil() { if x.Parallelism != nil { @@ -1053,8 +1125,8 @@ func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.Parallelism == nil { x.Parallelism = new(int32) } - yym96 := z.DecBinary() - _ = yym96 + yym5 := z.DecBinary() + _ = yym5 if false { } else { *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) @@ -1069,8 +1141,8 @@ func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.Completions == nil { x.Completions = new(int32) } - yym98 := z.DecBinary() - _ = yym98 + yym7 := z.DecBinary() + _ = yym7 if false { } else { *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) @@ -1085,8 +1157,8 @@ func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.ActiveDeadlineSeconds == nil { x.ActiveDeadlineSeconds = new(int64) } - yym100 := z.DecBinary() - _ = yym100 + yym9 := z.DecBinary() + _ = yym9 if false { } else { *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) @@ -1099,10 +1171,10 @@ func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } else { if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) + x.Selector = new(pkg1_v1.LabelSelector) } - yym102 := z.DecBinary() - _ = yym102 + yym11 := z.DecBinary() + _ = yym11 if false { } else if z.HasExtensions() && z.DecExt(x.Selector) { } else { @@ -1118,8 +1190,8 @@ func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.ManualSelector == nil { x.ManualSelector = new(bool) } - yym104 := z.DecBinary() - _ = yym104 + yym13 := z.DecBinary() + _ = yym13 if false { } else { *((*bool)(x.ManualSelector)) = r.DecodeBool() @@ -1127,15 +1199,15 @@ func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } case "template": if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} + x.Template = pkg3_v1.PodTemplateSpec{} } else { - yyv105 := &x.Template - yyv105.CodecDecodeSelf(d) + yyv14 := &x.Template + yyv14.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys94) - } // end switch yys94 - } // end for yyj94 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -1143,16 +1215,16 @@ func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj106 int - var yyb106 bool - var yyhl106 bool = l >= 0 - yyj106++ - if yyhl106 { - yyb106 = yyj106 > l + var yyj15 int + var yyb15 bool + var yyhl15 bool = l >= 0 + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb106 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb106 { + if yyb15 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1165,20 +1237,20 @@ func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.Parallelism == nil { x.Parallelism = new(int32) } - yym108 := z.DecBinary() - _ = yym108 + yym17 := z.DecBinary() + _ = yym17 if false { } else { *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) } } - yyj106++ - if yyhl106 { - yyb106 = yyj106 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb106 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb106 { + if yyb15 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1191,20 +1263,20 @@ func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.Completions == nil { x.Completions = new(int32) } - yym110 := z.DecBinary() - _ = yym110 + yym19 := z.DecBinary() + _ = yym19 if false { } else { *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) } } - yyj106++ - if yyhl106 { - yyb106 = yyj106 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb106 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb106 { + if yyb15 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1217,20 +1289,20 @@ func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.ActiveDeadlineSeconds == nil { x.ActiveDeadlineSeconds = new(int64) } - yym112 := z.DecBinary() - _ = yym112 + yym21 := z.DecBinary() + _ = yym21 if false { } else { *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) } } - yyj106++ - if yyhl106 { - yyb106 = yyj106 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb106 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb106 { + if yyb15 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1241,23 +1313,23 @@ func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } } else { if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) + x.Selector = new(pkg1_v1.LabelSelector) } - yym114 := z.DecBinary() - _ = yym114 + yym23 := z.DecBinary() + _ = yym23 if false { } else if z.HasExtensions() && z.DecExt(x.Selector) { } else { z.DecFallback(x.Selector, false) } } - yyj106++ - if yyhl106 { - yyb106 = yyj106 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb106 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb106 { + if yyb15 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1270,42 +1342,42 @@ func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.ManualSelector == nil { x.ManualSelector = new(bool) } - yym116 := z.DecBinary() - _ = yym116 + yym25 := z.DecBinary() + _ = yym25 if false { } else { *((*bool)(x.ManualSelector)) = r.DecodeBool() } } - yyj106++ - if yyhl106 { - yyb106 = yyj106 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb106 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb106 { + if yyb15 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} + x.Template = pkg3_v1.PodTemplateSpec{} } else { - yyv117 := &x.Template - yyv117.CodecDecodeSelf(d) + yyv26 := &x.Template + yyv26.CodecDecodeSelf(d) } for { - yyj106++ - if yyhl106 { - yyb106 = yyj106 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb106 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb106 { + if yyb15 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj106-1, "") + z.DecStructFieldNotFound(yyj15-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -1317,43 +1389,43 @@ func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym118 := z.EncBinary() - _ = yym118 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep119 := !z.EncBinary() - yy2arr119 := z.EncBasicHandle().StructToArray - var yyq119 [6]bool - _, _, _ = yysep119, yyq119, yy2arr119 - const yyr119 bool = false - yyq119[0] = len(x.Conditions) != 0 - yyq119[1] = x.StartTime != nil - yyq119[2] = x.CompletionTime != nil - yyq119[3] = x.Active != 0 - yyq119[4] = x.Succeeded != 0 - yyq119[5] = x.Failed != 0 - var yynn119 int - if yyr119 || yy2arr119 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Conditions) != 0 + yyq2[1] = x.StartTime != nil + yyq2[2] = x.CompletionTime != nil + yyq2[3] = x.Active != 0 + yyq2[4] = x.Succeeded != 0 + yyq2[5] = x.Failed != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(6) } else { - yynn119 = 0 - for _, b := range yyq119 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn119++ + yynn2++ } } - r.EncodeMapStart(yynn119) - yynn119 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr119 || yy2arr119 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq119[0] { + if yyq2[0] { if x.Conditions == nil { r.EncodeNil() } else { - yym121 := z.EncBinary() - _ = yym121 + yym4 := z.EncBinary() + _ = yym4 if false { } else { h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) @@ -1363,15 +1435,15 @@ func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq119[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("conditions")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Conditions == nil { r.EncodeNil() } else { - yym122 := z.EncBinary() - _ = yym122 + yym5 := z.EncBinary() + _ = yym5 if false { } else { h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) @@ -1379,19 +1451,19 @@ func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr119 || yy2arr119 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq119[1] { + if yyq2[1] { if x.StartTime == nil { r.EncodeNil() } else { - yym124 := z.EncBinary() - _ = yym124 + yym7 := z.EncBinary() + _ = yym7 if false { } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym124 { + } else if yym7 { z.EncBinaryMarshal(x.StartTime) - } else if !yym124 && z.IsJSONHandle() { + } else if !yym7 && z.IsJSONHandle() { z.EncJSONMarshal(x.StartTime) } else { z.EncFallback(x.StartTime) @@ -1401,20 +1473,20 @@ func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq119[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("startTime")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.StartTime == nil { r.EncodeNil() } else { - yym125 := z.EncBinary() - _ = yym125 + yym8 := z.EncBinary() + _ = yym8 if false { } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym125 { + } else if yym8 { z.EncBinaryMarshal(x.StartTime) - } else if !yym125 && z.IsJSONHandle() { + } else if !yym8 && z.IsJSONHandle() { z.EncJSONMarshal(x.StartTime) } else { z.EncFallback(x.StartTime) @@ -1422,19 +1494,19 @@ func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr119 || yy2arr119 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq119[2] { + if yyq2[2] { if x.CompletionTime == nil { r.EncodeNil() } else { - yym127 := z.EncBinary() - _ = yym127 + yym10 := z.EncBinary() + _ = yym10 if false { } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym127 { + } else if yym10 { z.EncBinaryMarshal(x.CompletionTime) - } else if !yym127 && z.IsJSONHandle() { + } else if !yym10 && z.IsJSONHandle() { z.EncJSONMarshal(x.CompletionTime) } else { z.EncFallback(x.CompletionTime) @@ -1444,20 +1516,20 @@ func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq119[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("completionTime")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.CompletionTime == nil { r.EncodeNil() } else { - yym128 := z.EncBinary() - _ = yym128 + yym11 := z.EncBinary() + _ = yym11 if false { } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym128 { + } else if yym11 { z.EncBinaryMarshal(x.CompletionTime) - } else if !yym128 && z.IsJSONHandle() { + } else if !yym11 && z.IsJSONHandle() { z.EncJSONMarshal(x.CompletionTime) } else { z.EncFallback(x.CompletionTime) @@ -1465,11 +1537,11 @@ func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr119 || yy2arr119 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq119[3] { - yym130 := z.EncBinary() - _ = yym130 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeInt(int64(x.Active)) @@ -1478,23 +1550,23 @@ func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq119[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("active")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym131 := z.EncBinary() - _ = yym131 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeInt(int64(x.Active)) } } } - if yyr119 || yy2arr119 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq119[4] { - yym133 := z.EncBinary() - _ = yym133 + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeInt(int64(x.Succeeded)) @@ -1503,23 +1575,23 @@ func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq119[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("succeeded")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym134 := z.EncBinary() - _ = yym134 + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeInt(int64(x.Succeeded)) } } } - if yyr119 || yy2arr119 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq119[5] { - yym136 := z.EncBinary() - _ = yym136 + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 if false { } else { r.EncodeInt(int64(x.Failed)) @@ -1528,19 +1600,19 @@ func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq119[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("failed")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym137 := z.EncBinary() - _ = yym137 + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeInt(int64(x.Failed)) } } } - if yyr119 || yy2arr119 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -1553,25 +1625,25 @@ func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym138 := z.DecBinary() - _ = yym138 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct139 := r.ContainerType() - if yyct139 == codecSelferValueTypeMap1234 { - yyl139 := r.ReadMapStart() - if yyl139 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl139, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct139 == codecSelferValueTypeArray1234 { - yyl139 := r.ReadArrayStart() - if yyl139 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl139, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -1583,12 +1655,12 @@ func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys140Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys140Slc - var yyhl140 bool = l >= 0 - for yyj140 := 0; ; yyj140++ { - if yyhl140 { - if yyj140 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -1597,20 +1669,20 @@ func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys140Slc = r.DecodeBytes(yys140Slc, true, true) - yys140 := string(yys140Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys140 { + switch yys3 { case "conditions": if r.TryDecodeAsNil() { x.Conditions = nil } else { - yyv141 := &x.Conditions - yym142 := z.DecBinary() - _ = yym142 + yyv4 := &x.Conditions + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSliceJobCondition((*[]JobCondition)(yyv141), d) + h.decSliceJobCondition((*[]JobCondition)(yyv4), d) } } case "startTime": @@ -1620,15 +1692,15 @@ func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } else { if x.StartTime == nil { - x.StartTime = new(pkg1_unversioned.Time) + x.StartTime = new(pkg1_v1.Time) } - yym144 := z.DecBinary() - _ = yym144 + yym7 := z.DecBinary() + _ = yym7 if false { } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym144 { + } else if yym7 { z.DecBinaryUnmarshal(x.StartTime) - } else if !yym144 && z.IsJSONHandle() { + } else if !yym7 && z.IsJSONHandle() { z.DecJSONUnmarshal(x.StartTime) } else { z.DecFallback(x.StartTime, false) @@ -1641,15 +1713,15 @@ func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } else { if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_unversioned.Time) + x.CompletionTime = new(pkg1_v1.Time) } - yym146 := z.DecBinary() - _ = yym146 + yym9 := z.DecBinary() + _ = yym9 if false { } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym146 { + } else if yym9 { z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym146 && z.IsJSONHandle() { + } else if !yym9 && z.IsJSONHandle() { z.DecJSONUnmarshal(x.CompletionTime) } else { z.DecFallback(x.CompletionTime, false) @@ -1659,24 +1731,42 @@ func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Active = 0 } else { - x.Active = int32(r.DecodeInt(32)) + yyv10 := &x.Active + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } } case "succeeded": if r.TryDecodeAsNil() { x.Succeeded = 0 } else { - x.Succeeded = int32(r.DecodeInt(32)) + yyv12 := &x.Succeeded + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(yyv12)) = int32(r.DecodeInt(32)) + } } case "failed": if r.TryDecodeAsNil() { x.Failed = 0 } else { - x.Failed = int32(r.DecodeInt(32)) + yyv14 := &x.Failed + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } } default: - z.DecStructFieldNotFound(-1, yys140) - } // end switch yys140 - } // end for yyj140 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -1684,16 +1774,16 @@ func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj150 int - var yyb150 bool - var yyhl150 bool = l >= 0 - yyj150++ - if yyhl150 { - yyb150 = yyj150 > l + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb150 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb150 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1701,21 +1791,21 @@ func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Conditions = nil } else { - yyv151 := &x.Conditions - yym152 := z.DecBinary() - _ = yym152 + yyv17 := &x.Conditions + yym18 := z.DecBinary() + _ = yym18 if false { } else { - h.decSliceJobCondition((*[]JobCondition)(yyv151), d) + h.decSliceJobCondition((*[]JobCondition)(yyv17), d) } } - yyj150++ - if yyhl150 { - yyb150 = yyj150 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb150 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb150 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1726,27 +1816,27 @@ func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } } else { if x.StartTime == nil { - x.StartTime = new(pkg1_unversioned.Time) + x.StartTime = new(pkg1_v1.Time) } - yym154 := z.DecBinary() - _ = yym154 + yym20 := z.DecBinary() + _ = yym20 if false { } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym154 { + } else if yym20 { z.DecBinaryUnmarshal(x.StartTime) - } else if !yym154 && z.IsJSONHandle() { + } else if !yym20 && z.IsJSONHandle() { z.DecJSONUnmarshal(x.StartTime) } else { z.DecFallback(x.StartTime, false) } } - yyj150++ - if yyhl150 { - yyb150 = yyj150 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb150 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb150 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1757,27 +1847,27 @@ func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } } else { if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_unversioned.Time) + x.CompletionTime = new(pkg1_v1.Time) } - yym156 := z.DecBinary() - _ = yym156 + yym22 := z.DecBinary() + _ = yym22 if false { } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym156 { + } else if yym22 { z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym156 && z.IsJSONHandle() { + } else if !yym22 && z.IsJSONHandle() { z.DecJSONUnmarshal(x.CompletionTime) } else { z.DecFallback(x.CompletionTime, false) } } - yyj150++ - if yyhl150 { - yyb150 = yyj150 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb150 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb150 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1785,15 +1875,21 @@ func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Active = 0 } else { - x.Active = int32(r.DecodeInt(32)) + yyv23 := &x.Active + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } } - yyj150++ - if yyhl150 { - yyb150 = yyj150 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb150 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb150 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1801,15 +1897,21 @@ func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Succeeded = 0 } else { - x.Succeeded = int32(r.DecodeInt(32)) + yyv25 := &x.Succeeded + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } } - yyj150++ - if yyhl150 { - yyb150 = yyj150 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb150 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb150 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1817,20 +1919,26 @@ func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Failed = 0 } else { - x.Failed = int32(r.DecodeInt(32)) + yyv27 := &x.Failed + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(yyv27)) = int32(r.DecodeInt(32)) + } } for { - yyj150++ - if yyhl150 { - yyb150 = yyj150 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb150 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb150 { + if yyb16 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj150-1, "") + z.DecStructFieldNotFound(yyj16-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -1839,8 +1947,8 @@ func (x JobConditionType) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym160 := z.EncBinary() - _ = yym160 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -1852,8 +1960,8 @@ func (x *JobConditionType) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym161 := z.DecBinary() - _ = yym161 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -1868,34 +1976,34 @@ func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym162 := z.EncBinary() - _ = yym162 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep163 := !z.EncBinary() - yy2arr163 := z.EncBasicHandle().StructToArray - var yyq163 [6]bool - _, _, _ = yysep163, yyq163, yy2arr163 - const yyr163 bool = false - yyq163[2] = true - yyq163[3] = true - yyq163[4] = x.Reason != "" - yyq163[5] = x.Message != "" - var yynn163 int - if yyr163 || yy2arr163 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(6) } else { - yynn163 = 2 - for _, b := range yyq163 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn163++ + yynn2++ } } - r.EncodeMapStart(yynn163) - yynn163 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr163 || yy2arr163 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) x.Type.CodecEncodeSelf(e) } else { @@ -1904,106 +2012,96 @@ func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Type.CodecEncodeSelf(e) } - if yyr163 || yy2arr163 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym166 := z.EncBinary() - _ = yym166 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } + yysf7 := &x.Status + yysf7.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym167 := z.EncBinary() - _ = yym167 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } + yysf8 := &x.Status + yysf8.CodecEncodeSelf(e) } - if yyr163 || yy2arr163 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq163[2] { - yy169 := &x.LastProbeTime - yym170 := z.EncBinary() - _ = yym170 + if yyq2[2] { + yy10 := &x.LastProbeTime + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy169) { - } else if yym170 { - z.EncBinaryMarshal(yy169) - } else if !yym170 && z.IsJSONHandle() { - z.EncJSONMarshal(yy169) + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) } else { - z.EncFallback(yy169) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq163[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy171 := &x.LastProbeTime - yym172 := z.EncBinary() - _ = yym172 + yy12 := &x.LastProbeTime + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy171) { - } else if yym172 { - z.EncBinaryMarshal(yy171) - } else if !yym172 && z.IsJSONHandle() { - z.EncJSONMarshal(yy171) + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) } else { - z.EncFallback(yy171) + z.EncFallback(yy12) } } } - if yyr163 || yy2arr163 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq163[3] { - yy174 := &x.LastTransitionTime - yym175 := z.EncBinary() - _ = yym175 + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 if false { - } else if z.HasExtensions() && z.EncExt(yy174) { - } else if yym175 { - z.EncBinaryMarshal(yy174) - } else if !yym175 && z.IsJSONHandle() { - z.EncJSONMarshal(yy174) + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) } else { - z.EncFallback(yy174) + z.EncFallback(yy15) } } else { r.EncodeNil() } } else { - if yyq163[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy176 := &x.LastTransitionTime - yym177 := z.EncBinary() - _ = yym177 + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.EncExt(yy176) { - } else if yym177 { - z.EncBinaryMarshal(yy176) - } else if !yym177 && z.IsJSONHandle() { - z.EncJSONMarshal(yy176) + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) } else { - z.EncFallback(yy176) + z.EncFallback(yy17) } } } - if yyr163 || yy2arr163 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq163[4] { - yym179 := z.EncBinary() - _ = yym179 + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) @@ -2012,23 +2110,23 @@ func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq163[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("reason")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym180 := z.EncBinary() - _ = yym180 + yym21 := z.EncBinary() + _ = yym21 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) } } } - if yyr163 || yy2arr163 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq163[5] { - yym182 := z.EncBinary() - _ = yym182 + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) @@ -2037,19 +2135,19 @@ func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq163[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("message")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym183 := z.EncBinary() - _ = yym183 + yym24 := z.EncBinary() + _ = yym24 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) } } } - if yyr163 || yy2arr163 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -2062,25 +2160,25 @@ func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym184 := z.DecBinary() - _ = yym184 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct185 := r.ContainerType() - if yyct185 == codecSelferValueTypeMap1234 { - yyl185 := r.ReadMapStart() - if yyl185 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl185, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct185 == codecSelferValueTypeArray1234 { - yyl185 := r.ReadArrayStart() - if yyl185 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl185, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -2092,12 +2190,12 @@ func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys186Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys186Slc - var yyhl186 bool = l >= 0 - for yyj186 := 0; ; yyj186++ { - if yyhl186 { - if yyj186 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -2106,72 +2204,86 @@ func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys186Slc = r.DecodeBytes(yys186Slc, true, true) - yys186 := string(yys186Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys186 { + switch yys3 { case "type": if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = JobConditionType(r.DecodeString()) + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = "" } else { - x.Status = pkg2_v1.ConditionStatus(r.DecodeString()) + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) } case "lastProbeTime": if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_unversioned.Time{} + x.LastProbeTime = pkg1_v1.Time{} } else { - yyv189 := &x.LastProbeTime - yym190 := z.DecBinary() - _ = yym190 + yyv6 := &x.LastProbeTime + yym7 := z.DecBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.DecExt(yyv189) { - } else if yym190 { - z.DecBinaryUnmarshal(yyv189) - } else if !yym190 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv189) + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) } else { - z.DecFallback(yyv189, false) + z.DecFallback(yyv6, false) } } case "lastTransitionTime": if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} + x.LastTransitionTime = pkg1_v1.Time{} } else { - yyv191 := &x.LastTransitionTime - yym192 := z.DecBinary() - _ = yym192 + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv191) { - } else if yym192 { - z.DecBinaryUnmarshal(yyv191) - } else if !yym192 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv191) + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) } else { - z.DecFallback(yyv191, false) + z.DecFallback(yyv8, false) } } case "reason": if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv10 := &x.Reason + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } case "message": if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv12 := &x.Message + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys186) - } // end switch yys186 - } // end for yyj186 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -2179,16 +2291,16 @@ func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj195 int - var yyb195 bool - var yyhl195 bool = l >= 0 - yyj195++ - if yyhl195 { - yyb195 = yyj195 > l + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb195 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb195 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2196,15 +2308,16 @@ func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = JobConditionType(r.DecodeString()) + yyv15 := &x.Type + yyv15.CodecDecodeSelf(d) } - yyj195++ - if yyhl195 { - yyb195 = yyj195 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb195 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb195 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2212,69 +2325,70 @@ func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Status = "" } else { - x.Status = pkg2_v1.ConditionStatus(r.DecodeString()) + yyv16 := &x.Status + yyv16.CodecDecodeSelf(d) } - yyj195++ - if yyhl195 { - yyb195 = yyj195 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb195 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb195 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_unversioned.Time{} + x.LastProbeTime = pkg1_v1.Time{} } else { - yyv198 := &x.LastProbeTime - yym199 := z.DecBinary() - _ = yym199 + yyv17 := &x.LastProbeTime + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv198) { - } else if yym199 { - z.DecBinaryUnmarshal(yyv198) - } else if !yym199 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv198) + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) } else { - z.DecFallback(yyv198, false) + z.DecFallback(yyv17, false) } } - yyj195++ - if yyhl195 { - yyb195 = yyj195 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb195 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb195 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} + x.LastTransitionTime = pkg1_v1.Time{} } else { - yyv200 := &x.LastTransitionTime - yym201 := z.DecBinary() - _ = yym201 + yyv19 := &x.LastTransitionTime + yym20 := z.DecBinary() + _ = yym20 if false { - } else if z.HasExtensions() && z.DecExt(yyv200) { - } else if yym201 { - z.DecBinaryUnmarshal(yyv200) - } else if !yym201 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv200) + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if yym20 { + z.DecBinaryUnmarshal(yyv19) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) } else { - z.DecFallback(yyv200, false) + z.DecFallback(yyv19, false) } } - yyj195++ - if yyhl195 { - yyb195 = yyj195 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb195 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb195 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2282,15 +2396,21 @@ func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv21 := &x.Reason + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } } - yyj195++ - if yyhl195 { - yyb195 = yyj195 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb195 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb195 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2298,20 +2418,26 @@ func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv23 := &x.Message + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } } for { - yyj195++ - if yyhl195 { - yyb195 = yyj195 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb195 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb195 { + if yyb14 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj195-1, "") + z.DecStructFieldNotFound(yyj14-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -2321,10 +2447,10 @@ func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) { z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv204 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy205 := &yyv204 - yy205.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -2334,83 +2460,86 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv206 := *v - yyh206, yyl206 := z.DecSliceHelperStart() - var yyc206 bool - if yyl206 == 0 { - if yyv206 == nil { - yyv206 = []Job{} - yyc206 = true - } else if len(yyv206) != 0 { - yyv206 = yyv206[:0] - yyc206 = true - } - } else if yyl206 > 0 { - var yyrr206, yyrl206 int - var yyrt206 bool - if yyl206 > cap(yyv206) { + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Job{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg206 := len(yyv206) > 0 - yyv2206 := yyv206 - yyrl206, yyrt206 = z.DecInferLen(yyl206, z.DecBasicHandle().MaxInitLen, 824) - if yyrt206 { - if yyrl206 <= cap(yyv206) { - yyv206 = yyv206[:yyrl206] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 880) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv206 = make([]Job, yyrl206) + yyv1 = make([]Job, yyrl1) } } else { - yyv206 = make([]Job, yyrl206) + yyv1 = make([]Job, yyrl1) } - yyc206 = true - yyrr206 = len(yyv206) - if yyrg206 { - copy(yyv206, yyv2206) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl206 != len(yyv206) { - yyv206 = yyv206[:yyl206] - yyc206 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj206 := 0 - for ; yyj206 < yyrr206; yyj206++ { - yyh206.ElemContainerState(yyj206) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv206[yyj206] = Job{} + yyv1[yyj1] = Job{} } else { - yyv207 := &yyv206[yyj206] - yyv207.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt206 { - for ; yyj206 < yyl206; yyj206++ { - yyv206 = append(yyv206, Job{}) - yyh206.ElemContainerState(yyj206) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Job{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv206[yyj206] = Job{} + yyv1[yyj1] = Job{} } else { - yyv208 := &yyv206[yyj206] - yyv208.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj206 := 0 - for ; !r.CheckBreak(); yyj206++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj206 >= len(yyv206) { - yyv206 = append(yyv206, Job{}) // var yyz206 Job - yyc206 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Job{}) // var yyz1 Job + yyc1 = true } - yyh206.ElemContainerState(yyj206) - if yyj206 < len(yyv206) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv206[yyj206] = Job{} + yyv1[yyj1] = Job{} } else { - yyv209 := &yyv206[yyj206] - yyv209.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -2418,17 +2547,17 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { } } - if yyj206 < len(yyv206) { - yyv206 = yyv206[:yyj206] - yyc206 = true - } else if yyj206 == 0 && yyv206 == nil { - yyv206 = []Job{} - yyc206 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Job{} + yyc1 = true } } - yyh206.End() - if yyc206 { - *v = yyv206 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -2437,10 +2566,10 @@ func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Enc z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv210 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy211 := &yyv210 - yy211.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -2450,83 +2579,86 @@ func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.De z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv212 := *v - yyh212, yyl212 := z.DecSliceHelperStart() - var yyc212 bool - if yyl212 == 0 { - if yyv212 == nil { - yyv212 = []JobCondition{} - yyc212 = true - } else if len(yyv212) != 0 { - yyv212 = yyv212[:0] - yyc212 = true - } - } else if yyl212 > 0 { - var yyrr212, yyrl212 int - var yyrt212 bool - if yyl212 > cap(yyv212) { + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []JobCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg212 := len(yyv212) > 0 - yyv2212 := yyv212 - yyrl212, yyrt212 = z.DecInferLen(yyl212, z.DecBasicHandle().MaxInitLen, 112) - if yyrt212 { - if yyrl212 <= cap(yyv212) { - yyv212 = yyv212[:yyrl212] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv212 = make([]JobCondition, yyrl212) + yyv1 = make([]JobCondition, yyrl1) } } else { - yyv212 = make([]JobCondition, yyrl212) + yyv1 = make([]JobCondition, yyrl1) } - yyc212 = true - yyrr212 = len(yyv212) - if yyrg212 { - copy(yyv212, yyv2212) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl212 != len(yyv212) { - yyv212 = yyv212[:yyl212] - yyc212 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj212 := 0 - for ; yyj212 < yyrr212; yyj212++ { - yyh212.ElemContainerState(yyj212) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv212[yyj212] = JobCondition{} + yyv1[yyj1] = JobCondition{} } else { - yyv213 := &yyv212[yyj212] - yyv213.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt212 { - for ; yyj212 < yyl212; yyj212++ { - yyv212 = append(yyv212, JobCondition{}) - yyh212.ElemContainerState(yyj212) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, JobCondition{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv212[yyj212] = JobCondition{} + yyv1[yyj1] = JobCondition{} } else { - yyv214 := &yyv212[yyj212] - yyv214.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj212 := 0 - for ; !r.CheckBreak(); yyj212++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj212 >= len(yyv212) { - yyv212 = append(yyv212, JobCondition{}) // var yyz212 JobCondition - yyc212 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, JobCondition{}) // var yyz1 JobCondition + yyc1 = true } - yyh212.ElemContainerState(yyj212) - if yyj212 < len(yyv212) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv212[yyj212] = JobCondition{} + yyv1[yyj1] = JobCondition{} } else { - yyv215 := &yyv212[yyj212] - yyv215.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -2534,16 +2666,16 @@ func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.De } } - if yyj212 < len(yyv212) { - yyv212 = yyv212[:yyj212] - yyc212 = true - } else if yyj212 == 0 && yyv212 == nil { - yyv212 = []JobCondition{} - yyc212 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []JobCondition{} + yyc1 = true } } - yyh212.End() - if yyc212 { - *v = yyv212 + yyh1.End() + if yyc1 { + *v = yyv1 } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.go index af179bcce6..c59c5d832b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api/v1" ) @@ -25,11 +25,11 @@ import ( // Job represents the configuration of a single job. type Job struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is a structure defining the expected behavior of a job. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -44,11 +44,11 @@ type Job struct { // JobList is a collection of jobs. type JobList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of Job. Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"` @@ -83,7 +83,7 @@ type JobSpec struct { // Normally, the system sets this field for you. // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional - Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` // ManualSelector controls generation of pod labels and pod selectors. // Leave `manualSelector` unset unless you are certain what you are doing. @@ -116,13 +116,13 @@ type JobStatus struct { // It is not guaranteed to be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. // +optional - StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` + StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` // CompletionTime represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. // +optional - CompletionTime *unversioned.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` + CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` // Active is the number of actively running pods. // +optional @@ -155,10 +155,10 @@ type JobCondition struct { Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` // Last time the condition was checked. // +optional - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` // Last time the condition transit from one status to another. // +optional - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` // (brief) reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.conversion.go index d47abdc43f..e07cc76c53 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,12 +21,12 @@ limitations under the License. package v1 import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" api_v1 "k8s.io/kubernetes/pkg/api/v1" batch "k8s.io/kubernetes/pkg/apis/batch" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" unsafe "unsafe" ) @@ -52,10 +52,7 @@ func RegisterConversions(scheme *runtime.Scheme) error { } func autoConvert_v1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -70,10 +67,7 @@ func Convert_v1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) er } func autoConvert_batch_Job_To_v1_Job(in *batch.Job, out *Job, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_batch_JobSpec_To_v1_JobSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -146,7 +140,7 @@ func autoConvert_batch_JobList_To_v1_JobList(in *batch.JobList, out *JobList, s } } } else { - out.Items = nil + out.Items = make([]Job, 0) } return nil } @@ -159,7 +153,7 @@ func autoConvert_v1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s out.Parallelism = (*int32)(unsafe.Pointer(in.Parallelism)) out.Completions = (*int32)(unsafe.Pointer(in.Completions)) out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) - out.Selector = (*unversioned.LabelSelector)(unsafe.Pointer(in.Selector)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) out.ManualSelector = (*bool)(unsafe.Pointer(in.ManualSelector)) if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err @@ -171,7 +165,7 @@ func autoConvert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s out.Parallelism = (*int32)(unsafe.Pointer(in.Parallelism)) out.Completions = (*int32)(unsafe.Pointer(in.Completions)) out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) - out.Selector = (*unversioned.LabelSelector)(unsafe.Pointer(in.Selector)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) out.ManualSelector = (*bool)(unsafe.Pointer(in.ManualSelector)) if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err @@ -181,8 +175,8 @@ func autoConvert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s func autoConvert_v1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { out.Conditions = *(*[]batch.JobCondition)(unsafe.Pointer(&in.Conditions)) - out.StartTime = (*unversioned.Time)(unsafe.Pointer(in.StartTime)) - out.CompletionTime = (*unversioned.Time)(unsafe.Pointer(in.CompletionTime)) + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + out.CompletionTime = (*meta_v1.Time)(unsafe.Pointer(in.CompletionTime)) out.Active = in.Active out.Succeeded = in.Succeeded out.Failed = in.Failed @@ -195,8 +189,8 @@ func Convert_v1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus func autoConvert_batch_JobStatus_To_v1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { out.Conditions = *(*[]JobCondition)(unsafe.Pointer(&in.Conditions)) - out.StartTime = (*unversioned.Time)(unsafe.Pointer(in.StartTime)) - out.CompletionTime = (*unversioned.Time)(unsafe.Pointer(in.CompletionTime)) + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + out.CompletionTime = (*meta_v1.Time)(unsafe.Pointer(in.CompletionTime)) out.Active = in.Active out.Succeeded = in.Succeeded out.Failed = in.Failed diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.deepcopy.go index 162373788d..a5f0163a4e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,10 +21,10 @@ limitations under the License. package v1 import ( - unversioned "k8s.io/kubernetes/pkg/api/unversioned" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" api_v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" reflect "reflect" ) @@ -48,9 +48,11 @@ func DeepCopy_v1_Job(in interface{}, out interface{}, c *conversion.Cloner) erro { in := in.(*Job) out := out.(*Job) - out.TypeMeta = in.TypeMeta - if err := api_v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) } if err := DeepCopy_v1_JobSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -66,12 +68,9 @@ func DeepCopy_v1_JobCondition(in interface{}, out interface{}, c *conversion.Clo { in := in.(*JobCondition) out := out.(*JobCondition) - out.Type = in.Type - out.Status = in.Status + *out = *in out.LastProbeTime = in.LastProbeTime.DeepCopy() out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message return nil } } @@ -80,8 +79,7 @@ func DeepCopy_v1_JobList(in interface{}, out interface{}, c *conversion.Cloner) { in := in.(*JobList) out := out.(*JobList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Job, len(*in)) @@ -90,8 +88,6 @@ func DeepCopy_v1_JobList(in interface{}, out interface{}, c *conversion.Cloner) return err } } - } else { - out.Items = nil } return nil } @@ -101,42 +97,34 @@ func DeepCopy_v1_JobSpec(in interface{}, out interface{}, c *conversion.Cloner) { in := in.(*JobSpec) out := out.(*JobSpec) + *out = *in if in.Parallelism != nil { in, out := &in.Parallelism, &out.Parallelism *out = new(int32) **out = **in - } else { - out.Parallelism = nil } if in.Completions != nil { in, out := &in.Completions, &out.Completions *out = new(int32) **out = **in - } else { - out.Completions = nil } if in.ActiveDeadlineSeconds != nil { in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds *out = new(int64) **out = **in - } else { - out.ActiveDeadlineSeconds = nil } if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*meta_v1.LabelSelector) } - } else { - out.Selector = nil } if in.ManualSelector != nil { in, out := &in.ManualSelector, &out.ManualSelector *out = new(bool) **out = **in - } else { - out.ManualSelector = nil } if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { return err @@ -149,6 +137,7 @@ func DeepCopy_v1_JobStatus(in interface{}, out interface{}, c *conversion.Cloner { in := in.(*JobStatus) out := out.(*JobStatus) + *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]JobCondition, len(*in)) @@ -157,26 +146,17 @@ func DeepCopy_v1_JobStatus(in interface{}, out interface{}, c *conversion.Cloner return err } } - } else { - out.Conditions = nil } if in.StartTime != nil { in, out := &in.StartTime, &out.StartTime - *out = new(unversioned.Time) + *out = new(meta_v1.Time) **out = (*in).DeepCopy() - } else { - out.StartTime = nil } if in.CompletionTime != nil { in, out := &in.CompletionTime, &out.CompletionTime - *out = new(unversioned.Time) + *out = new(meta_v1.Time) **out = (*in).DeepCopy() - } else { - out.CompletionTime = nil } - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed return nil } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.defaults.go index 94d0bf7953..b291d5f896 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,8 +21,8 @@ limitations under the License. package v1 import ( + runtime "k8s.io/apimachinery/pkg/runtime" api_v1 "k8s.io/kubernetes/pkg/api/v1" - runtime "k8s.io/kubernetes/pkg/runtime" ) // RegisterDefaults adds defaulters functions to the given scheme. @@ -64,6 +64,23 @@ func SetObjectDefaults_Job(in *Job) { if a.VolumeSource.AzureDisk != nil { api_v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) } + if a.VolumeSource.Projected != nil { + api_v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + api_v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + api_v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } } for i := range in.Spec.Template.Spec.InitContainers { a := &in.Spec.Template.Spec.InitContainers[i] diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/BUILD index ab90933993..5381d008dd 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/BUILD @@ -4,10 +4,8 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", "go_test", - "cgo_library", ) go_library( @@ -28,17 +26,18 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/api/unversioned:go_default_library", "//pkg/api/v1:go_default_library", "//pkg/apis/batch:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//pkg/util/intstr:go_default_library", - "//pkg/watch/versioned:go_default_library", + "//pkg/apis/batch/v1:go_default_library", "//vendor:github.com/gogo/protobuf/proto", "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/api/resource", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/util/intstr", ], ) @@ -49,9 +48,21 @@ go_test( deps = [ "//pkg/api:go_default_library", "//pkg/api/install:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/apis/batch/install:go_default_library", "//pkg/apis/batch/v2alpha1:go_default_library", - "//pkg/runtime:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/runtime", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion.go index b5f7faf0f1..2393fdca97 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion.go @@ -19,32 +19,21 @@ package v2alpha1 import ( "fmt" - "k8s.io/kubernetes/pkg/api" - v1 "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - err := scheme.AddConversionFuncs( - Convert_batch_JobSpec_To_v2alpha1_JobSpec, - Convert_v2alpha1_JobSpec_To_batch_JobSpec, - ) - if err != nil { - return err - } - + var err error // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. - for _, kind := range []string{"Job", "JobTemplate", "CronJob"} { - err = api.Scheme.AddFieldLabelConversionFunc("batch/v2alpha1", kind, + for _, k := range []string{"Job", "JobTemplate", "CronJob"} { + kind := k // don't close over range variables + err = scheme.AddFieldLabelConversionFunc("batch/v2alpha1", kind, func(label, value string) (string, string, error) { switch label { case "metadata.name", "metadata.namespace", "status.successful": return label, value, nil default: - return "", "", fmt.Errorf("field label not supported: %s", label) + return "", "", fmt.Errorf("field label %q not supported for %q", label, kind) } }) if err != nil { @@ -53,39 +42,3 @@ func addConversionFuncs(scheme *runtime.Scheme) error { } return nil } - -func Convert_batch_JobSpec_To_v2alpha1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { - out.Parallelism = in.Parallelism - out.Completions = in.Completions - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - out.Selector = in.Selector - if in.ManualSelector != nil { - out.ManualSelector = new(bool) - *out.ManualSelector = *in.ManualSelector - } else { - out.ManualSelector = nil - } - - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_v2alpha1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { - out.Parallelism = in.Parallelism - out.Completions = in.Completions - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - out.Selector = in.Selector - if in.ManualSelector != nil { - out.ManualSelector = new(bool) - *out.ManualSelector = *in.ManualSelector - } else { - out.ManualSelector = nil - } - - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults.go index d645b64e5c..6da07cc7d2 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults.go @@ -17,36 +17,16 @@ limitations under the License. package v2alpha1 import ( - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { RegisterDefaults(scheme) return scheme.AddDefaultingFuncs( - SetDefaults_Job, SetDefaults_CronJob, ) } -func SetDefaults_Job(obj *Job) { - // For a non-parallel job, you can leave both `.spec.completions` and - // `.spec.parallelism` unset. When both are unset, both are defaulted to 1. - if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil { - obj.Spec.Completions = new(int32) - *obj.Spec.Completions = 1 - obj.Spec.Parallelism = new(int32) - *obj.Spec.Parallelism = 1 - } - if obj.Spec.Parallelism == nil { - obj.Spec.Parallelism = new(int32) - *obj.Spec.Parallelism = 1 - } - labels := obj.Spec.Template.Labels - if labels != nil && len(obj.Labels) == 0 { - obj.Labels = labels - } -} - func SetDefaults_CronJob(obj *CronJob) { if obj.Spec.ConcurrencyPolicy == "" { obj.Spec.ConcurrencyPolicy = AllowConcurrent diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.pb.go index 6dc65a871c..04601cc690 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.pb.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -29,11 +29,6 @@ limitations under the License. CronJobList CronJobSpec CronJobStatus - Job - JobCondition - JobList - JobSpec - JobStatus JobTemplate JobTemplateSpec */ @@ -43,7 +38,8 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" import strings "strings" @@ -76,44 +72,19 @@ func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } func (*CronJobStatus) ProtoMessage() {} func (*CronJobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -func (m *Job) Reset() { *m = Job{} } -func (*Job) ProtoMessage() {} -func (*Job) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *JobCondition) Reset() { *m = JobCondition{} } -func (*JobCondition) ProtoMessage() {} -func (*JobCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *JobList) Reset() { *m = JobList{} } -func (*JobList) ProtoMessage() {} -func (*JobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *JobSpec) Reset() { *m = JobSpec{} } -func (*JobSpec) ProtoMessage() {} -func (*JobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *JobStatus) Reset() { *m = JobStatus{} } -func (*JobStatus) ProtoMessage() {} -func (*JobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - func (m *JobTemplate) Reset() { *m = JobTemplate{} } func (*JobTemplate) ProtoMessage() {} -func (*JobTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*JobTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } func (*JobTemplateSpec) ProtoMessage() {} -func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func init() { proto.RegisterType((*CronJob)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.CronJob") proto.RegisterType((*CronJobList)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.CronJobList") proto.RegisterType((*CronJobSpec)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.CronJobSpec") proto.RegisterType((*CronJobStatus)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.CronJobStatus") - proto.RegisterType((*Job)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.Job") - proto.RegisterType((*JobCondition)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobCondition") - proto.RegisterType((*JobList)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobList") - proto.RegisterType((*JobSpec)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobSpec") - proto.RegisterType((*JobStatus)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobStatus") proto.RegisterType((*JobTemplate)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobTemplate") proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobTemplateSpec") } @@ -243,6 +214,16 @@ func (m *CronJobSpec) MarshalTo(data []byte) (int, error) { return 0, err } i += n5 + if m.SuccessfulJobsHistoryLimit != nil { + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.SuccessfulJobsHistoryLimit)) + } + if m.FailedJobsHistoryLimit != nil { + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.FailedJobsHistoryLimit)) + } return i, nil } @@ -286,256 +267,6 @@ func (m *CronJobStatus) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *Job) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Job) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n7 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n8, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n8 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n9, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n9 - return i, nil -} - -func (m *JobCondition) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *JobCondition) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) - n10, err := m.LastProbeTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n10 - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n11, err := m.LastTransitionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n11 - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - return i, nil -} - -func (m *JobList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *JobList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n12, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n12 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *JobSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *JobSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Parallelism != nil { - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.Parallelism)) - } - if m.Completions != nil { - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.Completions)) - } - if m.ActiveDeadlineSeconds != nil { - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds)) - } - if m.Selector != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n13, err := m.Selector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n13 - } - if m.ManualSelector != nil { - data[i] = 0x28 - i++ - if *m.ManualSelector { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - } - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n14, err := m.Template.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n14 - return i, nil -} - -func (m *JobStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *JobStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.StartTime != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) - n15, err := m.StartTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n15 - } - if m.CompletionTime != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.CompletionTime.Size())) - n16, err := m.CompletionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n16 - } - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Active)) - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Succeeded)) - data[i] = 0x30 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Failed)) - return i, nil -} - func (m *JobTemplate) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -554,19 +285,19 @@ func (m *JobTemplate) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n17, err := m.ObjectMeta.MarshalTo(data[i:]) + n7, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n17 + i += n7 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n18, err := m.Template.MarshalTo(data[i:]) + n8, err := m.Template.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n18 + i += n8 return i, nil } @@ -588,19 +319,19 @@ func (m *JobTemplateSpec) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n19, err := m.ObjectMeta.MarshalTo(data[i:]) + n9, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n19 + i += n9 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n20, err := m.Spec.MarshalTo(data[i:]) + n10, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n20 + i += n10 return i, nil } @@ -672,6 +403,12 @@ func (m *CronJobSpec) Size() (n int) { } l = m.JobTemplate.Size() n += 1 + l + sovGenerated(uint64(l)) + if m.SuccessfulJobsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.SuccessfulJobsHistoryLimit)) + } + if m.FailedJobsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.FailedJobsHistoryLimit)) + } return n } @@ -691,156 +428,65 @@ func (m *CronJobStatus) Size() (n int) { return n } -func (m *Job) Size() (n int) { +func (m *JobTemplate) Size() (n int) { var l int _ = l l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() + l = m.Template.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *JobCondition) Size() (n int) { +func (m *JobTemplateSpec) Size() (n int) { var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) + l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) + l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *JobList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break } } return n } - -func (m *JobSpec) Size() (n int) { - var l int - _ = l - if m.Parallelism != nil { - n += 1 + sovGenerated(uint64(*m.Parallelism)) +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CronJob) String() string { + if this == nil { + return "nil" } - if m.Completions != nil { - n += 1 + sovGenerated(uint64(*m.Completions)) + s := strings.Join([]string{`&CronJob{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CronJobList) String() string { + if this == nil { + return "nil" } - if m.ActiveDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ManualSelector != nil { - n += 2 - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *JobStatus) Size() (n int) { - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.StartTime != nil { - l = m.StartTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CompletionTime != nil { - l = m.CompletionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.Active)) - n += 1 + sovGenerated(uint64(m.Succeeded)) - n += 1 + sovGenerated(uint64(m.Failed)) - return n -} - -func (m *JobTemplate) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *JobTemplateSpec) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *CronJob) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CronJob{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CronJobList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CronJobList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CronJob", "CronJob", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CronJobSpec) String() string { - if this == nil { - return "nil" + s := strings.Join([]string{`&CronJobList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CronJob", "CronJob", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CronJobSpec) String() string { + if this == nil { + return "nil" } s := strings.Join([]string{`&CronJobSpec{`, `Schedule:` + fmt.Sprintf("%v", this.Schedule) + `,`, @@ -848,6 +494,8 @@ func (this *CronJobSpec) String() string { `ConcurrencyPolicy:` + fmt.Sprintf("%v", this.ConcurrencyPolicy) + `,`, `Suspend:` + valueToStringGenerated(this.Suspend) + `,`, `JobTemplate:` + strings.Replace(strings.Replace(this.JobTemplate.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, + `SuccessfulJobsHistoryLimit:` + valueToStringGenerated(this.SuccessfulJobsHistoryLimit) + `,`, + `FailedJobsHistoryLimit:` + valueToStringGenerated(this.FailedJobsHistoryLimit) + `,`, `}`, }, "") return s @@ -858,75 +506,7 @@ func (this *CronJobStatus) String() string { } s := strings.Join([]string{`&CronJobStatus{`, `Active:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Active), "ObjectReference", "k8s_io_kubernetes_pkg_api_v1.ObjectReference", 1), `&`, ``, 1) + `,`, - `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Job) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Job{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "JobSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "JobStatus", "JobStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *JobCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *JobList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Job", "Job", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *JobSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobSpec{`, - `Parallelism:` + valueToStringGenerated(this.Parallelism) + `,`, - `Completions:` + valueToStringGenerated(this.Completions) + `,`, - `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_kubernetes_pkg_api_unversioned.LabelSelector", 1) + `,`, - `ManualSelector:` + valueToStringGenerated(this.ManualSelector) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *JobStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobStatus{`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "JobCondition", "JobCondition", 1), `&`, ``, 1) + `,`, - `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1) + `,`, - `CompletionTime:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTime), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1) + `,`, - `Active:` + fmt.Sprintf("%v", this.Active) + `,`, - `Succeeded:` + fmt.Sprintf("%v", this.Succeeded) + `,`, - `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, + `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, `}`, }, "") return s @@ -936,7 +516,7 @@ func (this *JobTemplate) String() string { return "nil" } s := strings.Join([]string{`&JobTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -947,8 +527,8 @@ func (this *JobTemplateSpec) String() string { return "nil" } s := strings.Join([]string{`&JobTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "JobSpec", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "k8s_io_kubernetes_pkg_apis_batch_v1.JobSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -968,773 +548,31 @@ func (m *CronJob) Unmarshal(data []byte) error { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CronJob: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CronJob: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CronJobList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CronJobList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CronJobList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CronJob{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CronJobSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CronJobSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CronJobSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Schedule = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartingDeadlineSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.StartingDeadlineSeconds = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConcurrencyPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConcurrencyPolicy = ConcurrencyPolicy(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Suspend = &b - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JobTemplate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.JobTemplate.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CronJobStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CronJobStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CronJobStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Active = append(m.Active, k8s_io_kubernetes_pkg_api_v1.ObjectReference{}) - if err := m.Active[len(m.Active)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastScheduleTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LastScheduleTime == nil { - m.LastScheduleTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} - } - if err := m.LastScheduleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Job) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Job: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobCondition) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = JobConditionType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + if shift >= 64 { + return ErrIntOverflowGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJob: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJob: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1758,13 +596,13 @@ func (m *JobCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1788,15 +626,15 @@ func (m *JobCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1806,49 +644,21 @@ func (m *JobCondition) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err } - m.Message = string(data[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -1871,7 +681,7 @@ func (m *JobCondition) Unmarshal(data []byte) error { } return nil } -func (m *JobList) Unmarshal(data []byte) error { +func (m *CronJobList) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -1894,10 +704,10 @@ func (m *JobList) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobList: wiretype end group for non-group") + return fmt.Errorf("proto: CronJobList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CronJobList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1956,7 +766,7 @@ func (m *JobList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Job{}) + m.Items = append(m.Items, CronJob{}) if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } @@ -1982,7 +792,7 @@ func (m *JobList) Unmarshal(data []byte) error { } return nil } -func (m *JobSpec) Unmarshal(data []byte) error { +func (m *CronJobSpec) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -2005,17 +815,17 @@ func (m *JobSpec) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobSpec: wiretype end group for non-group") + return fmt.Errorf("proto: CronJobSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CronJobSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType) } - var v int32 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2025,17 +835,26 @@ func (m *JobSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.Parallelism = &v + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schedule = string(data[iNdEx:postIndex]) + iNdEx = postIndex case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Completions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartingDeadlineSeconds", wireType) } - var v int32 + var v int64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2045,17 +864,17 @@ func (m *JobSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - m.Completions = &v + m.StartingDeadlineSeconds = &v case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConcurrencyPolicy", wireType) } - var v int64 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2065,15 +884,45 @@ func (m *JobSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.ActiveDeadlineSeconds = &v + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConcurrencyPolicy = ConcurrencyPolicy(data[iNdEx:postIndex]) + iNdEx = postIndex case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Suspend = &b + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field JobTemplate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2097,18 +946,15 @@ func (m *JobSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} - } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.JobTemplate.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ManualSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SuccessfulJobsHistoryLimit", wireType) } - var v int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2118,18 +964,17 @@ func (m *JobSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.ManualSelector = &b - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + m.SuccessfulJobsHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailedJobsHistoryLimit", wireType) } - var msglen int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2139,22 +984,12 @@ func (m *JobSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.FailedJobsHistoryLimit = &v default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -2176,7 +1011,7 @@ func (m *JobSpec) Unmarshal(data []byte) error { } return nil } -func (m *JobStatus) Unmarshal(data []byte) error { +func (m *CronJobStatus) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -2199,46 +1034,15 @@ func (m *JobStatus) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobStatus: wiretype end group for non-group") + return fmt.Errorf("proto: CronJobStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CronJobStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, JobCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2262,16 +1066,14 @@ func (m *JobStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.StartTime == nil { - m.StartTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} - } - if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + m.Active = append(m.Active, k8s_io_kubernetes_pkg_api_v1.ObjectReference{}) + if err := m.Active[len(m.Active)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastScheduleTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2295,70 +1097,13 @@ func (m *JobStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.CompletionTime == nil { - m.CompletionTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + if m.LastScheduleTime == nil { + m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } - if err := m.CompletionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastScheduleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) - } - m.Active = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Active |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) - } - m.Succeeded = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Succeeded |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) - } - m.Failed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Failed |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -2706,77 +1451,55 @@ var ( ) var fileDescriptorGenerated = []byte{ - // 1141 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x56, 0x4b, 0x6f, 0xe4, 0x44, - 0x10, 0x8e, 0x67, 0x92, 0x79, 0xf4, 0x6c, 0x5e, 0x0d, 0xd1, 0x0e, 0x41, 0x9a, 0x89, 0x46, 0x02, - 0x65, 0x61, 0x63, 0x2b, 0xa3, 0x00, 0xcb, 0x1e, 0x90, 0xd6, 0x41, 0x48, 0x44, 0x89, 0x36, 0xea, - 0xc9, 0x42, 0x04, 0x0b, 0xa2, 0xc7, 0xee, 0xcc, 0x78, 0x63, 0xbb, 0x8d, 0xbb, 0x1d, 0x94, 0x1b, - 0x67, 0x4e, 0x48, 0xfc, 0x00, 0x7e, 0x07, 0x12, 0x1c, 0x38, 0x20, 0xe5, 0xb8, 0x20, 0x21, 0x71, - 0x1a, 0x11, 0xf3, 0x2f, 0x72, 0x42, 0x6e, 0xb7, 0x1f, 0xf3, 0x0a, 0x99, 0xa0, 0x45, 0xe2, 0xe6, - 0xae, 0xae, 0xef, 0xeb, 0xea, 0xaa, 0xaf, 0xab, 0x0c, 0xde, 0x3d, 0x7d, 0xc0, 0x54, 0x8b, 0x6a, - 0xa7, 0x41, 0x97, 0xf8, 0x2e, 0xe1, 0x84, 0x69, 0xde, 0x69, 0x4f, 0xc3, 0x9e, 0xc5, 0xb4, 0x2e, - 0xe6, 0x46, 0x5f, 0x3b, 0x6b, 0x63, 0xdb, 0xeb, 0xe3, 0x6d, 0xad, 0x47, 0x5c, 0xe2, 0x63, 0x4e, - 0x4c, 0xd5, 0xf3, 0x29, 0xa7, 0xf0, 0x5e, 0x0c, 0x55, 0x33, 0xa8, 0xea, 0x9d, 0xf6, 0xd4, 0x08, - 0xaa, 0x0a, 0xa8, 0x9a, 0x40, 0xd7, 0xb7, 0x7a, 0x16, 0xef, 0x07, 0x5d, 0xd5, 0xa0, 0x8e, 0xd6, - 0xa3, 0x3d, 0xaa, 0x09, 0x86, 0x6e, 0x70, 0x22, 0x56, 0x62, 0x21, 0xbe, 0x62, 0xe6, 0xf5, 0xf6, - 0xd4, 0xa0, 0x34, 0x9f, 0x30, 0x1a, 0xf8, 0x06, 0x19, 0x8d, 0x66, 0xfd, 0xad, 0xe9, 0x98, 0xc0, - 0x3d, 0x23, 0x3e, 0xb3, 0xa8, 0x4b, 0xcc, 0x31, 0xd8, 0xfd, 0xe9, 0xb0, 0xb3, 0xb1, 0x2b, 0xaf, - 0x6f, 0x4d, 0xf6, 0xf6, 0x03, 0x97, 0x5b, 0xce, 0x78, 0x4c, 0xdb, 0x93, 0xdd, 0x03, 0x6e, 0xd9, - 0x9a, 0xe5, 0x72, 0xc6, 0xfd, 0x51, 0x48, 0xeb, 0xfb, 0x02, 0x28, 0xef, 0xfa, 0xd4, 0xdd, 0xa3, - 0x5d, 0x78, 0x0c, 0x2a, 0x0e, 0xe1, 0xd8, 0xc4, 0x1c, 0xd7, 0x95, 0x0d, 0x65, 0xb3, 0xd6, 0xde, - 0x54, 0xa7, 0xe6, 0x5c, 0x3d, 0xdb, 0x56, 0x1f, 0x77, 0x9f, 0x11, 0x83, 0x1f, 0x10, 0x8e, 0x75, - 0x78, 0x31, 0x68, 0xce, 0x85, 0x83, 0x26, 0xc8, 0x6c, 0x28, 0x65, 0x83, 0xc7, 0x60, 0x9e, 0x79, - 0xc4, 0xa8, 0x17, 0x04, 0xeb, 0xdb, 0xea, 0x8d, 0x2b, 0xa9, 0xca, 0xd8, 0x3a, 0x1e, 0x31, 0xf4, - 0x3b, 0xf2, 0x8c, 0xf9, 0x68, 0x85, 0x04, 0x23, 0xfc, 0x02, 0x94, 0x18, 0xc7, 0x3c, 0x60, 0xf5, - 0xa2, 0xe0, 0x7e, 0x70, 0x0b, 0x6e, 0x81, 0xd7, 0x97, 0x24, 0x7b, 0x29, 0x5e, 0x23, 0xc9, 0xdb, - 0xfa, 0x45, 0x01, 0x35, 0xe9, 0xb9, 0x6f, 0x31, 0x0e, 0x3f, 0x1b, 0xcb, 0x92, 0x76, 0x4d, 0x96, - 0x72, 0x5a, 0x50, 0x23, 0xb8, 0x48, 0xd6, 0x8a, 0x3c, 0xaa, 0x92, 0x58, 0x72, 0xa9, 0xfa, 0x18, - 0x2c, 0x58, 0x9c, 0x38, 0xac, 0x5e, 0xd8, 0x28, 0x6e, 0xd6, 0xda, 0xed, 0xd9, 0xef, 0xa3, 0x2f, - 0x4a, 0xfa, 0x85, 0x0f, 0x23, 0x22, 0x14, 0xf3, 0xb5, 0xbe, 0x29, 0xa6, 0xf7, 0x88, 0xf2, 0x07, - 0xef, 0x83, 0x0a, 0x33, 0xfa, 0xc4, 0x0c, 0x6c, 0x22, 0xee, 0x51, 0xcd, 0xc2, 0xea, 0x48, 0x3b, - 0x4a, 0x3d, 0xe0, 0x13, 0x70, 0x97, 0x71, 0xec, 0x73, 0xcb, 0xed, 0xbd, 0x4f, 0xb0, 0x69, 0x5b, - 0x2e, 0xe9, 0x10, 0x83, 0xba, 0x26, 0x13, 0x45, 0x2d, 0xea, 0xaf, 0x86, 0x83, 0xe6, 0xdd, 0xce, - 0x64, 0x17, 0x34, 0x0d, 0x0b, 0x9f, 0x82, 0x55, 0x83, 0xba, 0x46, 0xe0, 0xfb, 0xc4, 0x35, 0xce, - 0x0f, 0xa9, 0x6d, 0x19, 0xe7, 0xa2, 0x92, 0x55, 0x5d, 0x95, 0xd1, 0xac, 0xee, 0x8e, 0x3a, 0x5c, - 0x4d, 0x32, 0xa2, 0x71, 0x22, 0xf8, 0x1a, 0x28, 0xb3, 0x80, 0x79, 0xc4, 0x35, 0xeb, 0xf3, 0x1b, - 0xca, 0x66, 0x45, 0xaf, 0x85, 0x83, 0x66, 0xb9, 0x13, 0x9b, 0x50, 0xb2, 0x07, 0xbf, 0x04, 0xb5, - 0x67, 0xb4, 0x7b, 0x44, 0x1c, 0xcf, 0xc6, 0x9c, 0xd4, 0x17, 0x44, 0x51, 0x1f, 0xce, 0x90, 0xf8, - 0xbd, 0x0c, 0x2d, 0x84, 0xfa, 0x92, 0x0c, 0xbd, 0x96, 0xdb, 0x40, 0xf9, 0x33, 0x5a, 0xbf, 0x2b, - 0x60, 0x71, 0x48, 0x7e, 0xf0, 0x09, 0x28, 0x61, 0x83, 0x5b, 0x67, 0x51, 0x31, 0xa2, 0xc2, 0x6f, - 0xdd, 0xe4, 0xe9, 0x21, 0x72, 0x42, 0xa2, 0x0b, 0x93, 0x4c, 0xbd, 0x8f, 0x04, 0x09, 0x92, 0x64, - 0xd0, 0x01, 0x2b, 0x36, 0x66, 0x3c, 0xa9, 0xe8, 0x91, 0xe5, 0x10, 0x91, 0x8b, 0x5a, 0xfb, 0xcd, - 0x1b, 0xaa, 0x36, 0x82, 0xe8, 0x2f, 0x87, 0x83, 0xe6, 0xca, 0xfe, 0x08, 0x11, 0x1a, 0xa3, 0x6e, - 0x7d, 0x57, 0x00, 0xc5, 0x17, 0xdb, 0x4a, 0x8e, 0x86, 0x5a, 0x49, 0x7b, 0xb6, 0x2a, 0x4d, 0x6d, - 0x23, 0x4f, 0x47, 0xda, 0xc8, 0xce, 0x8c, 0xbc, 0xd7, 0xb7, 0x90, 0xdf, 0x8a, 0xe0, 0xce, 0x1e, - 0xed, 0xee, 0x52, 0xd7, 0xb4, 0xb8, 0x45, 0x5d, 0xb8, 0x03, 0xe6, 0xf9, 0xb9, 0x97, 0xbc, 0xbb, - 0x8d, 0x24, 0xa0, 0xa3, 0x73, 0x8f, 0x5c, 0x0d, 0x9a, 0x2b, 0x79, 0xdf, 0xc8, 0x86, 0x84, 0x37, - 0xfc, 0x28, 0x0d, 0xb2, 0x20, 0x70, 0xef, 0x0d, 0x1f, 0x77, 0x35, 0x68, 0x5e, 0x3b, 0x5d, 0xd4, - 0x94, 0x73, 0x38, 0x3c, 0xd8, 0x07, 0x8b, 0x51, 0x21, 0x0f, 0x7d, 0xda, 0x8d, 0x05, 0x52, 0x9c, - 0x5d, 0x20, 0x6b, 0x32, 0x96, 0xc5, 0xfd, 0x3c, 0x13, 0x1a, 0x26, 0x86, 0x5f, 0x01, 0x18, 0x19, - 0x8e, 0x7c, 0xec, 0xb2, 0xf8, 0x76, 0xb7, 0xd4, 0xe3, 0xba, 0x3c, 0x0e, 0xee, 0x8f, 0xd1, 0xa1, - 0x09, 0x47, 0xc0, 0xd7, 0x41, 0xc9, 0x27, 0x98, 0x51, 0x57, 0xbc, 0xee, 0x6a, 0x56, 0x29, 0x24, - 0xac, 0x48, 0xee, 0xc2, 0x7b, 0xa0, 0xec, 0x10, 0xc6, 0x70, 0x8f, 0xd4, 0x4b, 0xc2, 0x71, 0x59, - 0x3a, 0x96, 0x0f, 0x62, 0x33, 0x4a, 0xf6, 0x5b, 0x3f, 0x29, 0xa0, 0xfc, 0x1f, 0xcd, 0x84, 0xce, - 0xf0, 0x4c, 0x50, 0x67, 0x13, 0xe7, 0x94, 0x79, 0xf0, 0x43, 0x51, 0xc4, 0x2f, 0x66, 0xc1, 0x36, - 0xa8, 0x79, 0xd8, 0xc7, 0xb6, 0x4d, 0x6c, 0x8b, 0x39, 0xe2, 0x0a, 0x0b, 0xfa, 0x72, 0xd4, 0xc1, - 0x0e, 0x33, 0x33, 0xca, 0xfb, 0x44, 0x10, 0x83, 0x3a, 0x9e, 0x4d, 0xa2, 0x1c, 0xc7, 0x8a, 0x94, - 0x90, 0xdd, 0xcc, 0x8c, 0xf2, 0x3e, 0xf0, 0x31, 0x58, 0x8b, 0xbb, 0xd2, 0xe8, 0x04, 0x29, 0x8a, - 0x09, 0xf2, 0x4a, 0x38, 0x68, 0xae, 0x3d, 0x9a, 0xe4, 0x80, 0x26, 0xe3, 0xe0, 0xe7, 0xa0, 0xc2, - 0x88, 0x4d, 0x0c, 0x4e, 0x7d, 0x29, 0xa2, 0x9d, 0x9b, 0xa6, 0x1d, 0x77, 0x89, 0xdd, 0x91, 0x58, - 0xfd, 0x8e, 0x18, 0x7a, 0x72, 0x85, 0x52, 0x4e, 0xf8, 0x10, 0x2c, 0x39, 0xd8, 0x0d, 0x70, 0xea, - 0x29, 0xd4, 0x53, 0xd1, 0x61, 0x38, 0x68, 0x2e, 0x1d, 0x0c, 0xed, 0xa0, 0x11, 0x4f, 0xf8, 0x29, - 0xa8, 0xf0, 0x64, 0xa2, 0x94, 0x44, 0x6c, 0xff, 0xd0, 0xd1, 0x0f, 0xa9, 0x39, 0x34, 0x44, 0x52, - 0x41, 0xa4, 0x13, 0x24, 0x25, 0x6c, 0xfd, 0x58, 0x04, 0xd5, 0x6c, 0x74, 0x9c, 0x02, 0x60, 0x24, - 0x4f, 0x9b, 0xc9, 0xf1, 0xf1, 0xce, 0x6c, 0x1a, 0x49, 0x5b, 0x43, 0xd6, 0x7d, 0x53, 0x13, 0x43, - 0x39, 0x7a, 0x78, 0x0c, 0xaa, 0x62, 0x98, 0x8b, 0x97, 0x5b, 0x98, 0xfd, 0xe5, 0x2e, 0x86, 0x83, - 0x66, 0xb5, 0x93, 0x30, 0xa0, 0x8c, 0x0c, 0xf6, 0xc0, 0x52, 0xa6, 0x96, 0xdb, 0xf6, 0x21, 0x51, - 0x9a, 0xdd, 0x21, 0x1a, 0x34, 0x42, 0x1b, 0x35, 0x03, 0x39, 0x6a, 0xe7, 0x85, 0x6a, 0xa7, 0xcd, - 0x4e, 0x0d, 0x54, 0x59, 0x60, 0x18, 0x84, 0x98, 0xc4, 0x14, 0x95, 0x5f, 0xd0, 0x57, 0xa5, 0x6b, - 0xb5, 0x93, 0x6c, 0xa0, 0xcc, 0x27, 0x22, 0x3e, 0xc1, 0x96, 0x4d, 0x4c, 0x51, 0xf1, 0x1c, 0xf1, - 0x07, 0xc2, 0x8a, 0xe4, 0x6e, 0xeb, 0x57, 0x05, 0xe4, 0x7f, 0x0d, 0x5e, 0xe0, 0xb4, 0xec, 0xe7, - 0x54, 0x58, 0xf8, 0xd7, 0xff, 0x35, 0xd7, 0x49, 0xf2, 0x67, 0x05, 0x2c, 0x8f, 0xf8, 0xff, 0xdf, - 0xfe, 0x02, 0xf4, 0x37, 0x2e, 0x2e, 0x1b, 0x73, 0xcf, 0x2f, 0x1b, 0x73, 0x7f, 0x5c, 0x36, 0xe6, - 0xbe, 0x0e, 0x1b, 0xca, 0x45, 0xd8, 0x50, 0x9e, 0x87, 0x0d, 0xe5, 0xcf, 0xb0, 0xa1, 0x7c, 0xfb, - 0x57, 0x63, 0xee, 0x93, 0x4a, 0xc2, 0xf3, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x63, 0x4c, 0xf3, - 0xee, 0xc9, 0x0e, 0x00, 0x00, + // 799 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x4f, 0xe3, 0x46, + 0x18, 0xc7, 0xe3, 0x90, 0x37, 0x26, 0xa5, 0x05, 0xb7, 0x82, 0x28, 0x95, 0x9c, 0x28, 0x52, 0xa5, + 0x14, 0x81, 0x5d, 0x42, 0x85, 0x68, 0x6f, 0x35, 0x55, 0xd5, 0x22, 0xfa, 0x22, 0x07, 0xd4, 0xaa, + 0x42, 0x15, 0x63, 0xe7, 0x49, 0x32, 0xc4, 0x6f, 0xf5, 0x8c, 0xa3, 0xe6, 0xd6, 0x8f, 0xd0, 0x6f, + 0xd1, 0x6f, 0xb1, 0x97, 0xdd, 0x03, 0x47, 0x0e, 0x7b, 0xd8, 0xbd, 0x44, 0x8b, 0xf7, 0x5b, 0xec, + 0x69, 0xe5, 0x89, 0x13, 0x07, 0x1c, 0x2f, 0x61, 0x57, 0xe2, 0xe6, 0x19, 0x3f, 0xff, 0xdf, 0x3c, + 0xcf, 0xf3, 0x7f, 0x66, 0xd0, 0x37, 0x83, 0x43, 0x2a, 0x13, 0x47, 0x19, 0xf8, 0x3a, 0x78, 0x36, + 0x30, 0xa0, 0x8a, 0x3b, 0xe8, 0x29, 0xd8, 0x25, 0x54, 0xd1, 0x31, 0x33, 0xfa, 0xca, 0xb0, 0x85, + 0x4d, 0xb7, 0x8f, 0xf7, 0x94, 0x1e, 0xd8, 0xe0, 0x61, 0x06, 0x1d, 0xd9, 0xf5, 0x1c, 0xe6, 0x88, + 0x5f, 0x4e, 0xa4, 0x72, 0x2c, 0x95, 0xdd, 0x41, 0x4f, 0x0e, 0xa5, 0x32, 0x97, 0xca, 0x53, 0x69, + 0x75, 0xb7, 0x47, 0x58, 0xdf, 0xd7, 0x65, 0xc3, 0xb1, 0x94, 0x9e, 0xd3, 0x73, 0x14, 0x4e, 0xd0, + 0xfd, 0x2e, 0x5f, 0xf1, 0x05, 0xff, 0x9a, 0x90, 0xab, 0x5f, 0x47, 0x49, 0x61, 0x97, 0x58, 0xd8, + 0xe8, 0x13, 0x1b, 0xbc, 0x51, 0x9c, 0x96, 0x05, 0x0c, 0x2b, 0xc3, 0x44, 0x3e, 0x55, 0x25, 0x4d, + 0xe5, 0xf9, 0x36, 0x23, 0x16, 0x24, 0x04, 0x07, 0xf7, 0x09, 0xa8, 0xd1, 0x07, 0x0b, 0x27, 0x74, + 0xfb, 0x69, 0x3a, 0x9f, 0x11, 0x53, 0x21, 0x36, 0xa3, 0xcc, 0x4b, 0x88, 0xe6, 0x6a, 0xa2, 0xe0, + 0x0d, 0xc1, 0x8b, 0x0b, 0x82, 0x7f, 0xb0, 0xe5, 0x9a, 0xb0, 0xa8, 0xa6, 0x9d, 0x54, 0x7b, 0x16, + 0x45, 0xef, 0xdf, 0x6f, 0x66, 0x42, 0xd4, 0xf8, 0x3f, 0x8b, 0x8a, 0x47, 0x9e, 0x63, 0x1f, 0x3b, + 0xba, 0x78, 0x81, 0x4a, 0x61, 0x77, 0x3b, 0x98, 0xe1, 0x8a, 0x50, 0x17, 0x9a, 0xe5, 0xd6, 0x57, + 0x72, 0xe4, 0xf2, 0x7c, 0xb1, 0xb1, 0xcf, 0x61, 0xb4, 0x3c, 0xdc, 0x93, 0x7f, 0xd5, 0x2f, 0xc1, + 0x60, 0x3f, 0x03, 0xc3, 0xaa, 0x78, 0x35, 0xae, 0x65, 0x82, 0x71, 0x0d, 0xc5, 0x7b, 0xda, 0x8c, + 0x2a, 0xfe, 0x81, 0x72, 0xd4, 0x05, 0xa3, 0x92, 0xe5, 0xf4, 0x03, 0x79, 0xe9, 0x19, 0x92, 0xa3, + 0x1c, 0xdb, 0x2e, 0x18, 0xea, 0x47, 0xd1, 0x19, 0xb9, 0x70, 0xa5, 0x71, 0xa2, 0x78, 0x81, 0x0a, + 0x94, 0x61, 0xe6, 0xd3, 0xca, 0x0a, 0x67, 0x1f, 0xbe, 0x07, 0x9b, 0xeb, 0xd5, 0x8f, 0x23, 0x7a, + 0x61, 0xb2, 0xd6, 0x22, 0x6e, 0xe3, 0x99, 0x80, 0xca, 0x51, 0xe4, 0x09, 0xa1, 0x4c, 0x3c, 0x4f, + 0x74, 0x4b, 0x5e, 0xae, 0x5b, 0xa1, 0x9a, 0xf7, 0x6a, 0x3d, 0x3a, 0xa9, 0x34, 0xdd, 0x99, 0xeb, + 0xd4, 0xef, 0x28, 0x4f, 0x18, 0x58, 0xb4, 0x92, 0xad, 0xaf, 0x34, 0xcb, 0xad, 0xd6, 0xc3, 0xcb, + 0x51, 0xd7, 0x22, 0x7c, 0xfe, 0xa7, 0x10, 0xa4, 0x4d, 0x78, 0x8d, 0x27, 0xb9, 0x59, 0x19, 0x61, + 0xfb, 0xc4, 0x1d, 0x54, 0x0a, 0x07, 0xbd, 0xe3, 0x9b, 0xc0, 0xcb, 0x58, 0x8d, 0xd3, 0x6a, 0x47, + 0xfb, 0xda, 0x2c, 0x42, 0x3c, 0x43, 0x5b, 0x94, 0x61, 0x8f, 0x11, 0xbb, 0xf7, 0x3d, 0xe0, 0x8e, + 0x49, 0x6c, 0x68, 0x83, 0xe1, 0xd8, 0x1d, 0xca, 0x3d, 0x5d, 0x51, 0x3f, 0x0f, 0xc6, 0xb5, 0xad, + 0xf6, 0xe2, 0x10, 0x2d, 0x4d, 0x2b, 0x9e, 0xa3, 0x0d, 0xc3, 0xb1, 0x0d, 0xdf, 0xf3, 0xc0, 0x36, + 0x46, 0xbf, 0x39, 0x26, 0x31, 0x46, 0xdc, 0xc8, 0x55, 0x55, 0x8e, 0xb2, 0xd9, 0x38, 0xba, 0x1b, + 0xf0, 0x66, 0xd1, 0xa6, 0x96, 0x04, 0x89, 0x5f, 0xa0, 0x22, 0xf5, 0xa9, 0x0b, 0x76, 0xa7, 0x92, + 0xab, 0x0b, 0xcd, 0x92, 0x5a, 0x0e, 0xc6, 0xb5, 0x62, 0x7b, 0xb2, 0xa5, 0x4d, 0xff, 0x89, 0x7f, + 0xa3, 0xf2, 0xa5, 0xa3, 0x9f, 0x82, 0xe5, 0x9a, 0x98, 0x41, 0x25, 0xcf, 0x3d, 0xfd, 0xf6, 0x01, + 0x8d, 0x3f, 0x8e, 0xd5, 0x7c, 0x4e, 0x3f, 0x8d, 0x52, 0x2f, 0xcf, 0xfd, 0xd0, 0xe6, 0xcf, 0x10, + 0xff, 0x42, 0x55, 0xea, 0x1b, 0x06, 0x50, 0xda, 0xf5, 0xcd, 0x63, 0x47, 0xa7, 0x3f, 0x12, 0xca, + 0x1c, 0x6f, 0x74, 0x42, 0x2c, 0xc2, 0x2a, 0x85, 0xba, 0xd0, 0xcc, 0xab, 0x52, 0x30, 0xae, 0x55, + 0xdb, 0xa9, 0x51, 0xda, 0x3b, 0x08, 0xa2, 0x86, 0x36, 0xbb, 0x98, 0x98, 0xd0, 0x49, 0xb0, 0x8b, + 0x9c, 0x5d, 0x0d, 0xc6, 0xb5, 0xcd, 0x1f, 0x16, 0x46, 0x68, 0x29, 0xca, 0xc6, 0x73, 0x01, 0xad, + 0xdd, 0xba, 0x31, 0xe2, 0x19, 0x2a, 0x60, 0x83, 0x91, 0x61, 0x38, 0x40, 0xe1, 0xb0, 0xee, 0xa6, + 0xf7, 0x2c, 0x7e, 0x2d, 0x34, 0xe8, 0x42, 0x68, 0x12, 0xc4, 0x17, 0xee, 0x3b, 0x0e, 0xd1, 0x22, + 0x98, 0x68, 0xa2, 0x75, 0x13, 0x53, 0x36, 0x9d, 0xc2, 0x53, 0x62, 0x01, 0xf7, 0xaf, 0xdc, 0xda, + 0x5e, 0xee, 0xa2, 0x85, 0x0a, 0xf5, 0xb3, 0x60, 0x5c, 0x5b, 0x3f, 0xb9, 0xc3, 0xd1, 0x12, 0xe4, + 0xc6, 0x4b, 0x01, 0xcd, 0xfb, 0xf4, 0x08, 0x8f, 0x61, 0x1f, 0x95, 0xd8, 0x74, 0xd8, 0xb2, 0x1f, + 0x3c, 0x6c, 0xb3, 0x5b, 0x3b, 0x9b, 0xb4, 0x19, 0xbd, 0xf1, 0x54, 0x40, 0x9f, 0xdc, 0x89, 0x7f, + 0x84, 0xfa, 0x7e, 0xb9, 0xf5, 0xd8, 0xef, 0x2c, 0x51, 0x1b, 0xaf, 0x2a, 0xed, 0x89, 0x57, 0xb7, + 0xaf, 0x6e, 0xa4, 0xcc, 0xf5, 0x8d, 0x94, 0x79, 0x71, 0x23, 0x65, 0xfe, 0x0d, 0x24, 0xe1, 0x2a, + 0x90, 0x84, 0xeb, 0x40, 0x12, 0x5e, 0x05, 0x92, 0xf0, 0xdf, 0x6b, 0x29, 0xf3, 0x67, 0x69, 0xda, + 0x9d, 0xb7, 0x01, 0x00, 0x00, 0xff, 0xff, 0x32, 0x5e, 0xac, 0x56, 0xd9, 0x08, 0x00, 0x00, } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto index 8f4995a5c7..4f51d6161a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,11 +21,13 @@ syntax = 'proto2'; package k8s.io.kubernetes.pkg.apis.batch.v2alpha1; -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; +import "k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v2alpha1"; @@ -35,7 +37,7 @@ message CronJob { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec is a structure defining the expected behavior of a job, including the schedule. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -53,7 +55,7 @@ message CronJobList { // Standard list metadata // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of CronJob. repeated CronJob items = 2; @@ -81,6 +83,16 @@ message CronJobSpec { // JobTemplate is the object that describes the job that will be created when // executing a CronJob. optional JobTemplateSpec jobTemplate = 5; + + // The number of successful finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + optional int32 successfulJobsHistoryLimit = 6; + + // The number of failed finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + optional int32 failedJobsHistoryLimit = 7; } // CronJobStatus represents the current state of a cron job. @@ -91,142 +103,7 @@ message CronJobStatus { // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastScheduleTime = 4; -} - -// Job represents the configuration of a single job. -message Job { - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - optional JobSpec spec = 2; - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - optional JobStatus status = 3; -} - -// JobCondition describes current state of a job. -message JobCondition { - // Type of job condition, Complete or Failed. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition was checked. - // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; - - // Last time the condition transit from one status to another. - // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; - - // (brief) reason for the condition's last transition. - // +optional - optional string reason = 5; - - // Human readable message indicating details about last transition. - // +optional - optional string message = 6; -} - -// JobList is a collection of jobs. -message JobList { - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of Job. - repeated Job items = 2; -} - -// JobSpec describes how the job execution will look like. -message JobSpec { - // Parallelism specifies the maximum desired number of pods the job should - // run at any given time. The actual number of pods running in steady state will - // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), - // i.e. when the work left to do is less than max parallelism. - // More info: http://kubernetes.io/docs/user-guide/jobs - // +optional - optional int32 parallelism = 1; - - // Completions specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any - // pod signals the success of all pods, and allows parallelism to have any positive - // value. Setting to 1 means that parallelism is limited to 1 and the success of that - // pod signals the success of the job. - // More info: http://kubernetes.io/docs/user-guide/jobs - // +optional - optional int32 completions = 2; - - // Optional duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer - // +optional - optional int64 activeDeadlineSeconds = 3; - - // Selector is a label query over pods that should match the pod count. - // Normally, the system sets this field for you. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors - // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 4; - - // ManualSelector controls generation of pod labels and pod selectors. - // Leave `manualSelector` unset unless you are certain what you are doing. - // When false or unset, the system pick labels unique to this job - // and appends those labels to the pod template. When true, - // the user is responsible for picking unique labels and specifying - // the selector. Failure to pick a unique label may cause this - // and other jobs to not function correctly. However, You may see - // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` - // API. - // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md - // +optional - optional bool manualSelector = 5; - - // Template is the object that describes the pod that will be created when - // executing a job. - // More info: http://kubernetes.io/docs/user-guide/jobs - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6; -} - -// JobStatus represents the current state of a Job. -message JobStatus { - // Conditions represent the latest available observations of an object's current state. - // More info: http://kubernetes.io/docs/user-guide/jobs - // +optional - repeated JobCondition conditions = 1; - - // StartTime represents time when the job was acknowledged by the Job Manager. - // It is not guaranteed to be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 2; - - // CompletionTime represents time when the job was completed. It is not guaranteed to - // be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time completionTime = 3; - - // Active is the number of actively running pods. - // +optional - optional int32 active = 4; - - // Succeeded is the number of pods which reached Phase Succeeded. - // +optional - optional int32 succeeded = 5; - - // Failed is the number of pods which reached Phase Failed. - // +optional - optional int32 failed = 6; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; } // JobTemplate describes a template for creating copies of a predefined pod. @@ -234,7 +111,7 @@ message JobTemplate { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Template defines jobs that will be created from this template // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -247,11 +124,11 @@ message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional - optional JobSpec spec = 2; + optional k8s.io.kubernetes.pkg.apis.batch.v1.JobSpec spec = 2; } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/register.go index 236fa8a147..5286ca4a08 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/register.go @@ -17,17 +17,21 @@ limitations under the License. package v2alpha1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "batch" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v2alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} var ( SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) @@ -37,16 +41,12 @@ var ( // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &Job{}, - &JobList{}, &JobTemplate{}, &CronJob{}, &CronJobList{}, - &v1.ListOptions{}, - &v1.DeleteOptions{}, ) scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJob"), &CronJob{}) scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJobList"), &CronJobList{}) - versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.generated.go index 88fc0d7a77..b4adb6142f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.generated.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.generated.go @@ -25,11 +25,12 @@ import ( "errors" "fmt" codec1978 "github.com/ugorji/go/codec" - pkg4_resource "k8s.io/kubernetes/pkg/api/resource" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg3_types "k8s.io/kubernetes/pkg/types" - pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" + pkg5_resource "k8s.io/apimachinery/pkg/api/resource" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + pkg6_intstr "k8s.io/apimachinery/pkg/util/intstr" + pkg4_v1 "k8s.io/kubernetes/pkg/api/v1" + pkg3_v1 "k8s.io/kubernetes/pkg/apis/batch/v1" "reflect" "runtime" time "time" @@ -65,17 +66,18 @@ func init() { panic(err) } if false { // reference the types, but skip this branch at build/run time - var v0 pkg4_resource.Quantity - var v1 pkg1_unversioned.TypeMeta - var v2 pkg2_v1.ObjectMeta - var v3 pkg3_types.UID - var v4 pkg5_intstr.IntOrString - var v5 time.Time - _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 + var v0 pkg5_resource.Quantity + var v1 pkg1_v1.TypeMeta + var v2 pkg2_types.UID + var v3 pkg6_intstr.IntOrString + var v4 pkg4_v1.PodTemplateSpec + var v5 pkg3_v1.JobSpec + var v6 time.Time + _, _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5, v6 } } -func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *JobTemplate) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -89,17 +91,16 @@ func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool + var yyq2 [4]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = x.Kind != "" yyq2[1] = x.APIVersion != "" yyq2[2] = true yyq2[3] = true - yyq2[4] = true var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) + r.EncodeArrayStart(4) } else { yynn2 = 0 for _, b := range yyq2 { @@ -164,7 +165,13 @@ func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[2] { yy10 := &x.ObjectMeta - yy10.CodecEncodeSelf(e) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } @@ -173,41 +180,30 @@ func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.ObjectMeta - yy11.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[3] { - yy13 := &x.Spec - yy13.CodecEncodeSelf(e) + yy15 := &x.Template + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.Spec - yy14.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) + r.EncodeString(codecSelferC_UTF81234, string("template")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Status + yy17 := &x.Template yy17.CodecEncodeSelf(e) } } @@ -220,29 +216,29 @@ func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *JobTemplate) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym18 := z.DecBinary() - _ = yym18 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct19 := r.ContainerType() - if yyct19 == codecSelferValueTypeMap1234 { - yyl19 := r.ReadMapStart() - if yyl19 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl19, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct19 == codecSelferValueTypeArray1234 { - yyl19 := r.ReadArrayStart() - if yyl19 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl19, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -250,16 +246,16 @@ func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *JobTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys20Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys20Slc - var yyhl20 bool = l >= 0 - for yyj20 := 0; ; yyj20++ { - if yyhl20 { - if yyj20 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -268,414 +264,75 @@ func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys20Slc = r.DecodeBytes(yys20Slc, true, true) - yys20 := string(yys20Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys20 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv23 := &x.ObjectMeta - yyv23.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv24 := &x.Spec - yyv24.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = JobStatus{} - } else { - yyv25 := &x.Status - yyv25.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys20) - } // end switch yys20 - } // end for yyj20 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj26 int - var yyb26 bool - var yyhl26 bool = l >= 0 - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv29 := &x.ObjectMeta - yyv29.CodecDecodeSelf(d) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv30 := &x.Spec - yyv30.CodecDecodeSelf(d) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = JobStatus{} - } else { - yyv31 := &x.Status - yyv31.CodecDecodeSelf(d) - } - for { - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj26-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym32 := z.EncBinary() - _ = yym32 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep33 := !z.EncBinary() - yy2arr33 := z.EncBasicHandle().StructToArray - var yyq33 [4]bool - _, _, _ = yysep33, yyq33, yy2arr33 - const yyr33 bool = false - yyq33[0] = x.Kind != "" - yyq33[1] = x.APIVersion != "" - yyq33[2] = true - var yynn33 int - if yyr33 || yy2arr33 { - r.EncodeArrayStart(4) - } else { - yynn33 = 1 - for _, b := range yyq33 { - if b { - yynn33++ - } - } - r.EncodeMapStart(yynn33) - yynn33 = 0 - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[0] { - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq33[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[1] { - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq33[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym39 := z.EncBinary() - _ = yym39 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[2] { - yy41 := &x.ListMeta - yym42 := z.EncBinary() - _ = yym42 - if false { - } else if z.HasExtensions() && z.EncExt(yy41) { - } else { - z.EncFallback(yy41) - } - } else { - r.EncodeNil() - } - } else { - if yyq33[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy43 := &x.ListMeta - yym44 := z.EncBinary() - _ = yym44 - if false { - } else if z.HasExtensions() && z.EncExt(yy43) { - } else { - z.EncFallback(yy43) - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym46 := z.EncBinary() - _ = yym46 - if false { - } else { - h.encSliceJob(([]Job)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { } else { - yym47 := z.EncBinary() - _ = yym47 - if false { - } else { - h.encSliceJob(([]Job)(x.Items), e) - } + *((*string)(yyv4)) = r.DecodeString() } } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym48 := z.DecBinary() - _ = yym48 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct49 := r.ContainerType() - if yyct49 == codecSelferValueTypeMap1234 { - yyl49 := r.ReadMapStart() - if yyl49 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl49, d) - } - } else if yyct49 == codecSelferValueTypeArray1234 { - yyl49 := r.ReadArrayStart() - if yyl49 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl49, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys50Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys50Slc - var yyhl50 bool = l >= 0 - for yyj50 := 0; ; yyj50++ { - if yyhl50 { - if yyj50 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys50Slc = r.DecodeBytes(yys50Slc, true, true) - yys50 := string(yys50Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys50 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv53 := &x.ListMeta - yym54 := z.DecBinary() - _ = yym54 + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv53) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv53, false) + z.DecFallback(yyv8, false) } } - case "items": + case "template": if r.TryDecodeAsNil() { - x.Items = nil + x.Template = JobTemplateSpec{} } else { - yyv55 := &x.Items - yym56 := z.DecBinary() - _ = yym56 - if false { - } else { - h.decSliceJob((*[]Job)(yyv55), d) - } + yyv10 := &x.Template + yyv10.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys50) - } // end switch yys50 - } // end for yyj50 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *JobTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj57 int - var yyb57 bool - var yyhl57 bool = l >= 0 - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb57 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb57 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -683,15 +340,21 @@ func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb57 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb57 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -699,344 +362,44 @@ func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb57 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb57 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv60 := &x.ListMeta - yym61 := z.DecBinary() - _ = yym61 + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 if false { - } else if z.HasExtensions() && z.DecExt(yyv60) { + } else if z.HasExtensions() && z.DecExt(yyv16) { } else { - z.DecFallback(yyv60, false) + z.DecFallback(yyv16, false) } } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb57 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv62 := &x.Items - yym63 := z.DecBinary() - _ = yym63 - if false { - } else { - h.decSliceJob((*[]Job)(yyv62), d) - } - } - for { - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj57-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobTemplate) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym64 := z.EncBinary() - _ = yym64 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep65 := !z.EncBinary() - yy2arr65 := z.EncBasicHandle().StructToArray - var yyq65 [4]bool - _, _, _ = yysep65, yyq65, yy2arr65 - const yyr65 bool = false - yyq65[0] = x.Kind != "" - yyq65[1] = x.APIVersion != "" - yyq65[2] = true - yyq65[3] = true - var yynn65 int - if yyr65 || yy2arr65 { - r.EncodeArrayStart(4) - } else { - yynn65 = 0 - for _, b := range yyq65 { - if b { - yynn65++ - } - } - r.EncodeMapStart(yynn65) - yynn65 = 0 - } - if yyr65 || yy2arr65 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq65[0] { - yym67 := z.EncBinary() - _ = yym67 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq65[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym68 := z.EncBinary() - _ = yym68 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr65 || yy2arr65 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq65[1] { - yym70 := z.EncBinary() - _ = yym70 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq65[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym71 := z.EncBinary() - _ = yym71 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr65 || yy2arr65 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq65[2] { - yy73 := &x.ObjectMeta - yy73.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq65[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy74 := &x.ObjectMeta - yy74.CodecEncodeSelf(e) - } - } - if yyr65 || yy2arr65 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq65[3] { - yy76 := &x.Template - yy76.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq65[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy77 := &x.Template - yy77.CodecEncodeSelf(e) - } - } - if yyr65 || yy2arr65 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobTemplate) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym78 := z.DecBinary() - _ = yym78 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct79 := r.ContainerType() - if yyct79 == codecSelferValueTypeMap1234 { - yyl79 := r.ReadMapStart() - if yyl79 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl79, d) - } - } else if yyct79 == codecSelferValueTypeArray1234 { - yyl79 := r.ReadArrayStart() - if yyl79 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl79, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys80Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys80Slc - var yyhl80 bool = l >= 0 - for yyj80 := 0; ; yyj80++ { - if yyhl80 { - if yyj80 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys80Slc = r.DecodeBytes(yys80Slc, true, true) - yys80 := string(yys80Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys80 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv83 := &x.ObjectMeta - yyv83.CodecDecodeSelf(d) - } - case "template": - if r.TryDecodeAsNil() { - x.Template = JobTemplateSpec{} - } else { - yyv84 := &x.Template - yyv84.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys80) - } // end switch yys80 - } // end for yyj80 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj85 int - var yyb85 bool - var yyhl85 bool = l >= 0 - yyj85++ - if yyhl85 { - yyb85 = yyj85 > l - } else { - yyb85 = r.CheckBreak() - } - if yyb85 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj85++ - if yyhl85 { - yyb85 = yyj85 > l - } else { - yyb85 = r.CheckBreak() - } - if yyb85 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj85++ - if yyhl85 { - yyb85 = yyj85 > l - } else { - yyb85 = r.CheckBreak() - } - if yyb85 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv88 := &x.ObjectMeta - yyv88.CodecDecodeSelf(d) - } - yyj85++ - if yyhl85 { - yyb85 = yyj85 > l - } else { - yyb85 = r.CheckBreak() - } - if yyb85 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1044,1518 +407,104 @@ func (x *JobTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Template = JobTemplateSpec{} } else { - yyv89 := &x.Template - yyv89.CodecDecodeSelf(d) + yyv18 := &x.Template + yyv18.CodecDecodeSelf(d) } for { - yyj85++ - if yyhl85 { - yyb85 = yyj85 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb85 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb85 { + if yyb11 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj85-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym90 := z.EncBinary() - _ = yym90 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep91 := !z.EncBinary() - yy2arr91 := z.EncBasicHandle().StructToArray - var yyq91 [2]bool - _, _, _ = yysep91, yyq91, yy2arr91 - const yyr91 bool = false - yyq91[0] = true - yyq91[1] = true - var yynn91 int - if yyr91 || yy2arr91 { - r.EncodeArrayStart(2) - } else { - yynn91 = 0 - for _, b := range yyq91 { - if b { - yynn91++ - } - } - r.EncodeMapStart(yynn91) - yynn91 = 0 - } - if yyr91 || yy2arr91 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq91[0] { - yy93 := &x.ObjectMeta - yy93.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq91[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy94 := &x.ObjectMeta - yy94.CodecEncodeSelf(e) - } - } - if yyr91 || yy2arr91 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq91[1] { - yy96 := &x.Spec - yy96.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq91[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy97 := &x.Spec - yy97.CodecEncodeSelf(e) - } - } - if yyr91 || yy2arr91 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym98 := z.DecBinary() - _ = yym98 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct99 := r.ContainerType() - if yyct99 == codecSelferValueTypeMap1234 { - yyl99 := r.ReadMapStart() - if yyl99 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl99, d) - } - } else if yyct99 == codecSelferValueTypeArray1234 { - yyl99 := r.ReadArrayStart() - if yyl99 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl99, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys100Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys100Slc - var yyhl100 bool = l >= 0 - for yyj100 := 0; ; yyj100++ { - if yyhl100 { - if yyj100 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys100Slc = r.DecodeBytes(yys100Slc, true, true) - yys100 := string(yys100Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys100 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv101 := &x.ObjectMeta - yyv101.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv102 := &x.Spec - yyv102.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys100) - } // end switch yys100 - } // end for yyj100 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj103 int - var yyb103 bool - var yyhl103 bool = l >= 0 - yyj103++ - if yyhl103 { - yyb103 = yyj103 > l - } else { - yyb103 = r.CheckBreak() - } - if yyb103 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv104 := &x.ObjectMeta - yyv104.CodecDecodeSelf(d) - } - yyj103++ - if yyhl103 { - yyb103 = yyj103 > l - } else { - yyb103 = r.CheckBreak() - } - if yyb103 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv105 := &x.Spec - yyv105.CodecDecodeSelf(d) - } - for { - yyj103++ - if yyhl103 { - yyb103 = yyj103 > l - } else { - yyb103 = r.CheckBreak() - } - if yyb103 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj103-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym106 := z.EncBinary() - _ = yym106 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep107 := !z.EncBinary() - yy2arr107 := z.EncBasicHandle().StructToArray - var yyq107 [6]bool - _, _, _ = yysep107, yyq107, yy2arr107 - const yyr107 bool = false - yyq107[0] = x.Parallelism != nil - yyq107[1] = x.Completions != nil - yyq107[2] = x.ActiveDeadlineSeconds != nil - yyq107[3] = x.Selector != nil - yyq107[4] = x.ManualSelector != nil - var yynn107 int - if yyr107 || yy2arr107 { - r.EncodeArrayStart(6) - } else { - yynn107 = 1 - for _, b := range yyq107 { - if b { - yynn107++ - } - } - r.EncodeMapStart(yynn107) - yynn107 = 0 - } - if yyr107 || yy2arr107 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq107[0] { - if x.Parallelism == nil { - r.EncodeNil() - } else { - yy109 := *x.Parallelism - yym110 := z.EncBinary() - _ = yym110 - if false { - } else { - r.EncodeInt(int64(yy109)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq107[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("parallelism")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Parallelism == nil { - r.EncodeNil() - } else { - yy111 := *x.Parallelism - yym112 := z.EncBinary() - _ = yym112 - if false { - } else { - r.EncodeInt(int64(yy111)) - } - } - } - } - if yyr107 || yy2arr107 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq107[1] { - if x.Completions == nil { - r.EncodeNil() - } else { - yy114 := *x.Completions - yym115 := z.EncBinary() - _ = yym115 - if false { - } else { - r.EncodeInt(int64(yy114)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq107[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("completions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Completions == nil { - r.EncodeNil() - } else { - yy116 := *x.Completions - yym117 := z.EncBinary() - _ = yym117 - if false { - } else { - r.EncodeInt(int64(yy116)) - } - } - } - } - if yyr107 || yy2arr107 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq107[2] { - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy119 := *x.ActiveDeadlineSeconds - yym120 := z.EncBinary() - _ = yym120 - if false { - } else { - r.EncodeInt(int64(yy119)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq107[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy121 := *x.ActiveDeadlineSeconds - yym122 := z.EncBinary() - _ = yym122 - if false { - } else { - r.EncodeInt(int64(yy121)) - } - } - } - } - if yyr107 || yy2arr107 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq107[3] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym124 := z.EncBinary() - _ = yym124 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq107[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym125 := z.EncBinary() - _ = yym125 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr107 || yy2arr107 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq107[4] { - if x.ManualSelector == nil { - r.EncodeNil() - } else { - yy127 := *x.ManualSelector - yym128 := z.EncBinary() - _ = yym128 - if false { - } else { - r.EncodeBool(bool(yy127)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq107[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("manualSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ManualSelector == nil { - r.EncodeNil() - } else { - yy129 := *x.ManualSelector - yym130 := z.EncBinary() - _ = yym130 - if false { - } else { - r.EncodeBool(bool(yy129)) - } - } - } - } - if yyr107 || yy2arr107 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy132 := &x.Template - yy132.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy133 := &x.Template - yy133.CodecEncodeSelf(e) - } - if yyr107 || yy2arr107 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym134 := z.DecBinary() - _ = yym134 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct135 := r.ContainerType() - if yyct135 == codecSelferValueTypeMap1234 { - yyl135 := r.ReadMapStart() - if yyl135 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl135, d) - } - } else if yyct135 == codecSelferValueTypeArray1234 { - yyl135 := r.ReadArrayStart() - if yyl135 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl135, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys136Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys136Slc - var yyhl136 bool = l >= 0 - for yyj136 := 0; ; yyj136++ { - if yyhl136 { - if yyj136 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys136Slc = r.DecodeBytes(yys136Slc, true, true) - yys136 := string(yys136Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys136 { - case "parallelism": - if r.TryDecodeAsNil() { - if x.Parallelism != nil { - x.Parallelism = nil - } - } else { - if x.Parallelism == nil { - x.Parallelism = new(int32) - } - yym138 := z.DecBinary() - _ = yym138 - if false { - } else { - *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) - } - } - case "completions": - if r.TryDecodeAsNil() { - if x.Completions != nil { - x.Completions = nil - } - } else { - if x.Completions == nil { - x.Completions = new(int32) - } - yym140 := z.DecBinary() - _ = yym140 - if false { - } else { - *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) - } - } - case "activeDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym142 := z.DecBinary() - _ = yym142 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym144 := z.DecBinary() - _ = yym144 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "manualSelector": - if r.TryDecodeAsNil() { - if x.ManualSelector != nil { - x.ManualSelector = nil - } - } else { - if x.ManualSelector == nil { - x.ManualSelector = new(bool) - } - yym146 := z.DecBinary() - _ = yym146 - if false { - } else { - *((*bool)(x.ManualSelector)) = r.DecodeBool() - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} - } else { - yyv147 := &x.Template - yyv147.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys136) - } // end switch yys136 - } // end for yyj136 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj148 int - var yyb148 bool - var yyhl148 bool = l >= 0 - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l - } else { - yyb148 = r.CheckBreak() - } - if yyb148 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Parallelism != nil { - x.Parallelism = nil - } - } else { - if x.Parallelism == nil { - x.Parallelism = new(int32) - } - yym150 := z.DecBinary() - _ = yym150 - if false { - } else { - *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) - } - } - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l - } else { - yyb148 = r.CheckBreak() - } - if yyb148 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Completions != nil { - x.Completions = nil - } - } else { - if x.Completions == nil { - x.Completions = new(int32) - } - yym152 := z.DecBinary() - _ = yym152 - if false { - } else { - *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) - } - } - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l - } else { - yyb148 = r.CheckBreak() - } - if yyb148 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym154 := z.DecBinary() - _ = yym154 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l - } else { - yyb148 = r.CheckBreak() - } - if yyb148 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym156 := z.DecBinary() - _ = yym156 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l - } else { - yyb148 = r.CheckBreak() - } - if yyb148 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ManualSelector != nil { - x.ManualSelector = nil - } - } else { - if x.ManualSelector == nil { - x.ManualSelector = new(bool) - } - yym158 := z.DecBinary() - _ = yym158 - if false { - } else { - *((*bool)(x.ManualSelector)) = r.DecodeBool() - } - } - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l - } else { - yyb148 = r.CheckBreak() - } - if yyb148 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} - } else { - yyv159 := &x.Template - yyv159.CodecDecodeSelf(d) - } - for { - yyj148++ - if yyhl148 { - yyb148 = yyj148 > l - } else { - yyb148 = r.CheckBreak() - } - if yyb148 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj148-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym160 := z.EncBinary() - _ = yym160 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep161 := !z.EncBinary() - yy2arr161 := z.EncBasicHandle().StructToArray - var yyq161 [6]bool - _, _, _ = yysep161, yyq161, yy2arr161 - const yyr161 bool = false - yyq161[0] = len(x.Conditions) != 0 - yyq161[1] = x.StartTime != nil - yyq161[2] = x.CompletionTime != nil - yyq161[3] = x.Active != 0 - yyq161[4] = x.Succeeded != 0 - yyq161[5] = x.Failed != 0 - var yynn161 int - if yyr161 || yy2arr161 { - r.EncodeArrayStart(6) - } else { - yynn161 = 0 - for _, b := range yyq161 { - if b { - yynn161++ - } - } - r.EncodeMapStart(yynn161) - yynn161 = 0 - } - if yyr161 || yy2arr161 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq161[0] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym163 := z.EncBinary() - _ = yym163 - if false { - } else { - h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq161[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym164 := z.EncBinary() - _ = yym164 - if false { - } else { - h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) - } - } - } - } - if yyr161 || yy2arr161 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq161[1] { - if x.StartTime == nil { - r.EncodeNil() - } else { - yym166 := z.EncBinary() - _ = yym166 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym166 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym166 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq161[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.StartTime == nil { - r.EncodeNil() - } else { - yym167 := z.EncBinary() - _ = yym167 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym167 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym167 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } - } - if yyr161 || yy2arr161 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq161[2] { - if x.CompletionTime == nil { - r.EncodeNil() - } else { - yym169 := z.EncBinary() - _ = yym169 - if false { - } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym169 { - z.EncBinaryMarshal(x.CompletionTime) - } else if !yym169 && z.IsJSONHandle() { - z.EncJSONMarshal(x.CompletionTime) - } else { - z.EncFallback(x.CompletionTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq161[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("completionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CompletionTime == nil { - r.EncodeNil() - } else { - yym170 := z.EncBinary() - _ = yym170 - if false { - } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym170 { - z.EncBinaryMarshal(x.CompletionTime) - } else if !yym170 && z.IsJSONHandle() { - z.EncJSONMarshal(x.CompletionTime) - } else { - z.EncFallback(x.CompletionTime) - } - } - } - } - if yyr161 || yy2arr161 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq161[3] { - yym172 := z.EncBinary() - _ = yym172 - if false { - } else { - r.EncodeInt(int64(x.Active)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq161[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("active")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym173 := z.EncBinary() - _ = yym173 - if false { - } else { - r.EncodeInt(int64(x.Active)) - } - } - } - if yyr161 || yy2arr161 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq161[4] { - yym175 := z.EncBinary() - _ = yym175 - if false { - } else { - r.EncodeInt(int64(x.Succeeded)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq161[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("succeeded")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym176 := z.EncBinary() - _ = yym176 - if false { - } else { - r.EncodeInt(int64(x.Succeeded)) - } - } - } - if yyr161 || yy2arr161 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq161[5] { - yym178 := z.EncBinary() - _ = yym178 - if false { - } else { - r.EncodeInt(int64(x.Failed)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq161[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("failed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym179 := z.EncBinary() - _ = yym179 - if false { - } else { - r.EncodeInt(int64(x.Failed)) - } - } - } - if yyr161 || yy2arr161 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym180 := z.DecBinary() - _ = yym180 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct181 := r.ContainerType() - if yyct181 == codecSelferValueTypeMap1234 { - yyl181 := r.ReadMapStart() - if yyl181 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl181, d) - } - } else if yyct181 == codecSelferValueTypeArray1234 { - yyl181 := r.ReadArrayStart() - if yyl181 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl181, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys182Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys182Slc - var yyhl182 bool = l >= 0 - for yyj182 := 0; ; yyj182++ { - if yyhl182 { - if yyj182 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys182Slc = r.DecodeBytes(yys182Slc, true, true) - yys182 := string(yys182Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys182 { - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv183 := &x.Conditions - yym184 := z.DecBinary() - _ = yym184 - if false { - } else { - h.decSliceJobCondition((*[]JobCondition)(yyv183), d) - } - } - case "startTime": - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg1_unversioned.Time) - } - yym186 := z.DecBinary() - _ = yym186 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym186 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym186 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - case "completionTime": - if r.TryDecodeAsNil() { - if x.CompletionTime != nil { - x.CompletionTime = nil - } - } else { - if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_unversioned.Time) - } - yym188 := z.DecBinary() - _ = yym188 - if false { - } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym188 { - z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym188 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.CompletionTime) - } else { - z.DecFallback(x.CompletionTime, false) - } - } - case "active": - if r.TryDecodeAsNil() { - x.Active = 0 - } else { - x.Active = int32(r.DecodeInt(32)) - } - case "succeeded": - if r.TryDecodeAsNil() { - x.Succeeded = 0 - } else { - x.Succeeded = int32(r.DecodeInt(32)) - } - case "failed": - if r.TryDecodeAsNil() { - x.Failed = 0 - } else { - x.Failed = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys182) - } // end switch yys182 - } // end for yyj182 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj192 int - var yyb192 bool - var yyhl192 bool = l >= 0 - yyj192++ - if yyhl192 { - yyb192 = yyj192 > l - } else { - yyb192 = r.CheckBreak() - } - if yyb192 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv193 := &x.Conditions - yym194 := z.DecBinary() - _ = yym194 - if false { - } else { - h.decSliceJobCondition((*[]JobCondition)(yyv193), d) - } - } - yyj192++ - if yyhl192 { - yyb192 = yyj192 > l - } else { - yyb192 = r.CheckBreak() - } - if yyb192 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg1_unversioned.Time) - } - yym196 := z.DecBinary() - _ = yym196 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym196 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym196 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - yyj192++ - if yyhl192 { - yyb192 = yyj192 > l - } else { - yyb192 = r.CheckBreak() - } - if yyb192 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CompletionTime != nil { - x.CompletionTime = nil - } - } else { - if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_unversioned.Time) - } - yym198 := z.DecBinary() - _ = yym198 - if false { - } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym198 { - z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym198 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.CompletionTime) - } else { - z.DecFallback(x.CompletionTime, false) - } - } - yyj192++ - if yyhl192 { - yyb192 = yyj192 > l - } else { - yyb192 = r.CheckBreak() - } - if yyb192 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Active = 0 - } else { - x.Active = int32(r.DecodeInt(32)) - } - yyj192++ - if yyhl192 { - yyb192 = yyj192 > l - } else { - yyb192 = r.CheckBreak() - } - if yyb192 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Succeeded = 0 - } else { - x.Succeeded = int32(r.DecodeInt(32)) - } - yyj192++ - if yyhl192 { - yyb192 = yyj192 > l - } else { - yyb192 = r.CheckBreak() - } - if yyb192 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Failed = 0 - } else { - x.Failed = int32(r.DecodeInt(32)) - } - for { - yyj192++ - if yyhl192 { - yyb192 = yyj192 > l - } else { - yyb192 = r.CheckBreak() - } - if yyb192 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj192-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x JobConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym202 := z.EncBinary() - _ = yym202 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *JobConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym203 := z.DecBinary() - _ = yym203 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() + z.DecStructFieldNotFound(yyj11-1, "") } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *JobTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym204 := z.EncBinary() - _ = yym204 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep205 := !z.EncBinary() - yy2arr205 := z.EncBasicHandle().StructToArray - var yyq205 [6]bool - _, _, _ = yysep205, yyq205, yy2arr205 - const yyr205 bool = false - yyq205[2] = true - yyq205[3] = true - yyq205[4] = x.Reason != "" - yyq205[5] = x.Message != "" - var yynn205 int - if yyr205 || yy2arr205 { - r.EncodeArrayStart(6) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) } else { - yynn205 = 2 - for _, b := range yyq205 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn205++ + yynn2++ } } - r.EncodeMapStart(yynn205) - yynn205 = 0 - } - if yyr205 || yy2arr205 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr205 || yy2arr205 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym208 := z.EncBinary() - _ = yym208 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym209 := z.EncBinary() - _ = yym209 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr205 || yy2arr205 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq205[2] { - yy211 := &x.LastProbeTime - yym212 := z.EncBinary() - _ = yym212 + if yyq2[0] { + yy4 := &x.ObjectMeta + yym5 := z.EncBinary() + _ = yym5 if false { - } else if z.HasExtensions() && z.EncExt(yy211) { - } else if yym212 { - z.EncBinaryMarshal(yy211) - } else if !yym212 && z.IsJSONHandle() { - z.EncJSONMarshal(yy211) + } else if z.HasExtensions() && z.EncExt(yy4) { } else { - z.EncFallback(yy211) + z.EncFallback(yy4) } } else { r.EncodeNil() } } else { - if yyq205[2] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy213 := &x.LastProbeTime - yym214 := z.EncBinary() - _ = yym214 + yy6 := &x.ObjectMeta + yym7 := z.EncBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.EncExt(yy213) { - } else if yym214 { - z.EncBinaryMarshal(yy213) - } else if !yym214 && z.IsJSONHandle() { - z.EncJSONMarshal(yy213) + } else if z.HasExtensions() && z.EncExt(yy6) { } else { - z.EncFallback(yy213) + z.EncFallback(yy6) } } } - if yyr205 || yy2arr205 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq205[3] { - yy216 := &x.LastTransitionTime - yym217 := z.EncBinary() - _ = yym217 - if false { - } else if z.HasExtensions() && z.EncExt(yy216) { - } else if yym217 { - z.EncBinaryMarshal(yy216) - } else if !yym217 && z.IsJSONHandle() { - z.EncJSONMarshal(yy216) - } else { - z.EncFallback(yy216) - } + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq205[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy218 := &x.LastTransitionTime - yym219 := z.EncBinary() - _ = yym219 - if false { - } else if z.HasExtensions() && z.EncExt(yy218) { - } else if yym219 { - z.EncBinaryMarshal(yy218) - } else if !yym219 && z.IsJSONHandle() { - z.EncJSONMarshal(yy218) - } else { - z.EncFallback(yy218) - } - } - } - if yyr205 || yy2arr205 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq205[4] { - yym221 := z.EncBinary() - _ = yym221 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq205[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym222 := z.EncBinary() - _ = yym222 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr205 || yy2arr205 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq205[5] { - yym224 := z.EncBinary() - _ = yym224 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq205[5] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) + r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym225 := z.EncBinary() - _ = yym225 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) } } - if yyr205 || yy2arr205 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -2564,29 +513,29 @@ func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *JobTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym226 := z.DecBinary() - _ = yym226 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct227 := r.ContainerType() - if yyct227 == codecSelferValueTypeMap1234 { - yyl227 := r.ReadMapStart() - if yyl227 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl227, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct227 == codecSelferValueTypeArray1234 { - yyl227 := r.ReadArrayStart() - if yyl227 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl227, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -2594,16 +543,16 @@ func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *JobTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys228Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys228Slc - var yyhl228 bool = l >= 0 - for yyj228 := 0; ; yyj228++ { - if yyhl228 { - if yyj228 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -2612,212 +561,96 @@ func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys228Slc = r.DecodeBytes(yys228Slc, true, true) - yys228 := string(yys228Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys228 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = JobConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_v1.ConditionStatus(r.DecodeString()) - } - case "lastProbeTime": - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_unversioned.Time{} - } else { - yyv231 := &x.LastProbeTime - yym232 := z.DecBinary() - _ = yym232 - if false { - } else if z.HasExtensions() && z.DecExt(yyv231) { - } else if yym232 { - z.DecBinaryUnmarshal(yyv231) - } else if !yym232 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv231) - } else { - z.DecFallback(yyv231, false) - } - } - case "lastTransitionTime": + switch yys3 { + case "metadata": if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv233 := &x.LastTransitionTime - yym234 := z.DecBinary() - _ = yym234 + yyv4 := &x.ObjectMeta + yym5 := z.DecBinary() + _ = yym5 if false { - } else if z.HasExtensions() && z.DecExt(yyv233) { - } else if yym234 { - z.DecBinaryUnmarshal(yyv233) - } else if !yym234 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv233) + } else if z.HasExtensions() && z.DecExt(yyv4) { } else { - z.DecFallback(yyv233, false) + z.DecFallback(yyv4, false) } } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": + case "spec": if r.TryDecodeAsNil() { - x.Message = "" + x.Spec = pkg3_v1.JobSpec{} } else { - x.Message = string(r.DecodeString()) + yyv6 := &x.Spec + yyv6.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys228) - } // end switch yys228 - } // end for yyj228 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *JobTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj237 int - var yyb237 bool - var yyhl237 bool = l >= 0 - yyj237++ - if yyhl237 { - yyb237 = yyj237 > l - } else { - yyb237 = r.CheckBreak() - } - if yyb237 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = JobConditionType(r.DecodeString()) - } - yyj237++ - if yyhl237 { - yyb237 = yyj237 > l - } else { - yyb237 = r.CheckBreak() - } - if yyb237 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_v1.ConditionStatus(r.DecodeString()) - } - yyj237++ - if yyhl237 { - yyb237 = yyj237 > l - } else { - yyb237 = r.CheckBreak() - } - if yyb237 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_unversioned.Time{} + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyv240 := &x.LastProbeTime - yym241 := z.DecBinary() - _ = yym241 - if false { - } else if z.HasExtensions() && z.DecExt(yyv240) { - } else if yym241 { - z.DecBinaryUnmarshal(yyv240) - } else if !yym241 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv240) - } else { - z.DecFallback(yyv240, false) - } - } - yyj237++ - if yyhl237 { - yyb237 = yyj237 > l - } else { - yyb237 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb237 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv242 := &x.LastTransitionTime - yym243 := z.DecBinary() - _ = yym243 + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv242) { - } else if yym243 { - z.DecBinaryUnmarshal(yyv242) - } else if !yym243 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv242) + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv242, false) + z.DecFallback(yyv8, false) } } - yyj237++ - if yyhl237 { - yyb237 = yyj237 > l - } else { - yyb237 = r.CheckBreak() - } - if yyb237 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj237++ - if yyhl237 { - yyb237 = yyj237 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb237 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb237 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Message = "" + x.Spec = pkg3_v1.JobSpec{} } else { - x.Message = string(r.DecodeString()) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } for { - yyj237++ - if yyhl237 { - yyb237 = yyj237 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb237 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb237 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj237-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -2829,39 +662,39 @@ func (x *CronJob) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym246 := z.EncBinary() - _ = yym246 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep247 := !z.EncBinary() - yy2arr247 := z.EncBasicHandle().StructToArray - var yyq247 [5]bool - _, _, _ = yysep247, yyq247, yy2arr247 - const yyr247 bool = false - yyq247[0] = x.Kind != "" - yyq247[1] = x.APIVersion != "" - yyq247[2] = true - yyq247[3] = true - yyq247[4] = true - var yynn247 int - if yyr247 || yy2arr247 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn247 = 0 - for _, b := range yyq247 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn247++ + yynn2++ } } - r.EncodeMapStart(yynn247) - yynn247 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr247 || yy2arr247 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq247[0] { - yym249 := z.EncBinary() - _ = yym249 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -2870,23 +703,23 @@ func (x *CronJob) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq247[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym250 := z.EncBinary() - _ = yym250 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr247 || yy2arr247 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq247[1] { - yym252 := z.EncBinary() - _ = yym252 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -2895,70 +728,82 @@ func (x *CronJob) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq247[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym253 := z.EncBinary() - _ = yym253 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr247 || yy2arr247 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq247[2] { - yy255 := &x.ObjectMeta - yy255.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq247[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy256 := &x.ObjectMeta - yy256.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr247 || yy2arr247 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq247[3] { - yy258 := &x.Spec - yy258.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq247[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy259 := &x.Spec - yy259.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr247 || yy2arr247 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq247[4] { - yy261 := &x.Status - yy261.CodecEncodeSelf(e) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq247[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy262 := &x.Status - yy262.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } - if yyr247 || yy2arr247 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -2971,25 +816,25 @@ func (x *CronJob) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym263 := z.DecBinary() - _ = yym263 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct264 := r.ContainerType() - if yyct264 == codecSelferValueTypeMap1234 { - yyl264 := r.ReadMapStart() - if yyl264 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl264, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct264 == codecSelferValueTypeArray1234 { - yyl264 := r.ReadArrayStart() - if yyl264 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl264, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -3001,12 +846,12 @@ func (x *CronJob) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys265Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys265Slc - var yyhl265 bool = l >= 0 - for yyj265 := 0; ; yyj265++ { - if yyhl265 { - if yyj265 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -3015,47 +860,65 @@ func (x *CronJob) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys265Slc = r.DecodeBytes(yys265Slc, true, true) - yys265 := string(yys265Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys265 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv268 := &x.ObjectMeta - yyv268.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = CronJobSpec{} } else { - yyv269 := &x.Spec - yyv269.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = CronJobStatus{} } else { - yyv270 := &x.Status - yyv270.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys265) - } // end switch yys265 - } // end for yyj265 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -3063,16 +926,16 @@ func (x *CronJob) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj271 int - var yyb271 bool - var yyhl271 bool = l >= 0 - yyj271++ - if yyhl271 { - yyb271 = yyj271 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb271 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb271 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3080,15 +943,21 @@ func (x *CronJob) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj271++ - if yyhl271 { - yyb271 = yyj271 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb271 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb271 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3096,32 +965,44 @@ func (x *CronJob) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj271++ - if yyhl271 { - yyb271 = yyj271 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb271 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb271 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv274 := &x.ObjectMeta - yyv274.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj271++ - if yyhl271 { - yyb271 = yyj271 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb271 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb271 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3129,16 +1010,16 @@ func (x *CronJob) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Spec = CronJobSpec{} } else { - yyv275 := &x.Spec - yyv275.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj271++ - if yyhl271 { - yyb271 = yyj271 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb271 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb271 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3146,21 +1027,21 @@ func (x *CronJob) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Status = CronJobStatus{} } else { - yyv276 := &x.Status - yyv276.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj271++ - if yyhl271 { - yyb271 = yyj271 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb271 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb271 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj271-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -3172,37 +1053,37 @@ func (x *CronJobList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym277 := z.EncBinary() - _ = yym277 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep278 := !z.EncBinary() - yy2arr278 := z.EncBasicHandle().StructToArray - var yyq278 [4]bool - _, _, _ = yysep278, yyq278, yy2arr278 - const yyr278 bool = false - yyq278[0] = x.Kind != "" - yyq278[1] = x.APIVersion != "" - yyq278[2] = true - var yynn278 int - if yyr278 || yy2arr278 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn278 = 1 - for _, b := range yyq278 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn278++ + yynn2++ } } - r.EncodeMapStart(yynn278) - yynn278 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr278 || yy2arr278 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq278[0] { - yym280 := z.EncBinary() - _ = yym280 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -3211,23 +1092,23 @@ func (x *CronJobList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq278[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym281 := z.EncBinary() - _ = yym281 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr278 || yy2arr278 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq278[1] { - yym283 := z.EncBinary() - _ = yym283 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -3236,54 +1117,54 @@ func (x *CronJobList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq278[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym284 := z.EncBinary() - _ = yym284 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr278 || yy2arr278 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq278[2] { - yy286 := &x.ListMeta - yym287 := z.EncBinary() - _ = yym287 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy286) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy286) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq278[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy288 := &x.ListMeta - yym289 := z.EncBinary() - _ = yym289 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy288) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy288) + z.EncFallback(yy12) } } } - if yyr278 || yy2arr278 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym291 := z.EncBinary() - _ = yym291 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceCronJob(([]CronJob)(x.Items), e) @@ -3296,15 +1177,15 @@ func (x *CronJobList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym292 := z.EncBinary() - _ = yym292 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceCronJob(([]CronJob)(x.Items), e) } } } - if yyr278 || yy2arr278 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -3317,25 +1198,25 @@ func (x *CronJobList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym293 := z.DecBinary() - _ = yym293 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct294 := r.ContainerType() - if yyct294 == codecSelferValueTypeMap1234 { - yyl294 := r.ReadMapStart() - if yyl294 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl294, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct294 == codecSelferValueTypeArray1234 { - yyl294 := r.ReadArrayStart() - if yyl294 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl294, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -3347,12 +1228,12 @@ func (x *CronJobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys295Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys295Slc - var yyhl295 bool = l >= 0 - for yyj295 := 0; ; yyj295++ { - if yyhl295 { - if yyj295 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -3361,51 +1242,63 @@ func (x *CronJobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys295Slc = r.DecodeBytes(yys295Slc, true, true) - yys295 := string(yys295Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys295 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv298 := &x.ListMeta - yym299 := z.DecBinary() - _ = yym299 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv298) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv298, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv300 := &x.Items - yym301 := z.DecBinary() - _ = yym301 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceCronJob((*[]CronJob)(yyv300), d) + h.decSliceCronJob((*[]CronJob)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys295) - } // end switch yys295 - } // end for yyj295 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -3413,16 +1306,16 @@ func (x *CronJobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj302 int - var yyb302 bool - var yyhl302 bool = l >= 0 - yyj302++ - if yyhl302 { - yyb302 = yyj302 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb302 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb302 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3430,15 +1323,21 @@ func (x *CronJobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj302++ - if yyhl302 { - yyb302 = yyj302 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb302 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb302 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3446,38 +1345,44 @@ func (x *CronJobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj302++ - if yyhl302 { - yyb302 = yyj302 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb302 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb302 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv305 := &x.ListMeta - yym306 := z.DecBinary() - _ = yym306 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv305) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv305, false) + z.DecFallback(yyv17, false) } } - yyj302++ - if yyhl302 { - yyb302 = yyj302 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb302 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb302 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3485,26 +1390,26 @@ func (x *CronJobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Items = nil } else { - yyv307 := &x.Items - yym308 := z.DecBinary() - _ = yym308 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceCronJob((*[]CronJob)(yyv307), d) + h.decSliceCronJob((*[]CronJob)(yyv19), d) } } for { - yyj302++ - if yyhl302 { - yyb302 = yyj302 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb302 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb302 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj302-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -3516,36 +1421,38 @@ func (x *CronJobSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym309 := z.EncBinary() - _ = yym309 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep310 := !z.EncBinary() - yy2arr310 := z.EncBasicHandle().StructToArray - var yyq310 [5]bool - _, _, _ = yysep310, yyq310, yy2arr310 - const yyr310 bool = false - yyq310[1] = x.StartingDeadlineSeconds != nil - yyq310[2] = x.ConcurrencyPolicy != "" - yyq310[3] = x.Suspend != nil - var yynn310 int - if yyr310 || yy2arr310 { - r.EncodeArrayStart(5) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.StartingDeadlineSeconds != nil + yyq2[2] = x.ConcurrencyPolicy != "" + yyq2[3] = x.Suspend != nil + yyq2[5] = x.SuccessfulJobsHistoryLimit != nil + yyq2[6] = x.FailedJobsHistoryLimit != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) } else { - yynn310 = 2 - for _, b := range yyq310 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn310++ + yynn2++ } } - r.EncodeMapStart(yynn310) - yynn310 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr310 || yy2arr310 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym312 := z.EncBinary() - _ = yym312 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Schedule)) @@ -3554,110 +1461,180 @@ func (x *CronJobSpec) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("schedule")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym313 := z.EncBinary() - _ = yym313 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Schedule)) } } - if yyr310 || yy2arr310 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq310[1] { + if yyq2[1] { if x.StartingDeadlineSeconds == nil { r.EncodeNil() } else { - yy315 := *x.StartingDeadlineSeconds - yym316 := z.EncBinary() - _ = yym316 + yy7 := *x.StartingDeadlineSeconds + yym8 := z.EncBinary() + _ = yym8 if false { } else { - r.EncodeInt(int64(yy315)) + r.EncodeInt(int64(yy7)) } } } else { r.EncodeNil() } } else { - if yyq310[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("startingDeadlineSeconds")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.StartingDeadlineSeconds == nil { r.EncodeNil() } else { - yy317 := *x.StartingDeadlineSeconds - yym318 := z.EncBinary() - _ = yym318 + yy9 := *x.StartingDeadlineSeconds + yym10 := z.EncBinary() + _ = yym10 if false { } else { - r.EncodeInt(int64(yy317)) + r.EncodeInt(int64(yy9)) } } } } - if yyr310 || yy2arr310 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq310[2] { + if yyq2[2] { x.ConcurrencyPolicy.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq310[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("concurrencyPolicy")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.ConcurrencyPolicy.CodecEncodeSelf(e) } } - if yyr310 || yy2arr310 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq310[3] { + if yyq2[3] { if x.Suspend == nil { r.EncodeNil() } else { - yy321 := *x.Suspend - yym322 := z.EncBinary() - _ = yym322 + yy15 := *x.Suspend + yym16 := z.EncBinary() + _ = yym16 if false { } else { - r.EncodeBool(bool(yy321)) + r.EncodeBool(bool(yy15)) } } } else { r.EncodeNil() } } else { - if yyq310[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("suspend")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Suspend == nil { r.EncodeNil() } else { - yy323 := *x.Suspend - yym324 := z.EncBinary() - _ = yym324 + yy17 := *x.Suspend + yym18 := z.EncBinary() + _ = yym18 if false { } else { - r.EncodeBool(bool(yy323)) + r.EncodeBool(bool(yy17)) } } } } - if yyr310 || yy2arr310 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy326 := &x.JobTemplate - yy326.CodecEncodeSelf(e) + yy20 := &x.JobTemplate + yy20.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("jobTemplate")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy327 := &x.JobTemplate - yy327.CodecEncodeSelf(e) + yy22 := &x.JobTemplate + yy22.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.SuccessfulJobsHistoryLimit == nil { + r.EncodeNil() + } else { + yy25 := *x.SuccessfulJobsHistoryLimit + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeInt(int64(yy25)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("successfulJobsHistoryLimit")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SuccessfulJobsHistoryLimit == nil { + r.EncodeNil() + } else { + yy27 := *x.SuccessfulJobsHistoryLimit + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeInt(int64(yy27)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.FailedJobsHistoryLimit == nil { + r.EncodeNil() + } else { + yy30 := *x.FailedJobsHistoryLimit + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeInt(int64(yy30)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("failedJobsHistoryLimit")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FailedJobsHistoryLimit == nil { + r.EncodeNil() + } else { + yy32 := *x.FailedJobsHistoryLimit + yym33 := z.EncBinary() + _ = yym33 + if false { + } else { + r.EncodeInt(int64(yy32)) + } + } + } } - if yyr310 || yy2arr310 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -3670,25 +1647,25 @@ func (x *CronJobSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym328 := z.DecBinary() - _ = yym328 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct329 := r.ContainerType() - if yyct329 == codecSelferValueTypeMap1234 { - yyl329 := r.ReadMapStart() - if yyl329 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl329, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct329 == codecSelferValueTypeArray1234 { - yyl329 := r.ReadArrayStart() - if yyl329 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl329, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -3700,12 +1677,12 @@ func (x *CronJobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys330Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys330Slc - var yyhl330 bool = l >= 0 - for yyj330 := 0; ; yyj330++ { - if yyhl330 { - if yyj330 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -3714,15 +1691,21 @@ func (x *CronJobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys330Slc = r.DecodeBytes(yys330Slc, true, true) - yys330 := string(yys330Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys330 { + switch yys3 { case "schedule": if r.TryDecodeAsNil() { x.Schedule = "" } else { - x.Schedule = string(r.DecodeString()) + yyv4 := &x.Schedule + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "startingDeadlineSeconds": if r.TryDecodeAsNil() { @@ -3733,8 +1716,8 @@ func (x *CronJobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.StartingDeadlineSeconds == nil { x.StartingDeadlineSeconds = new(int64) } - yym333 := z.DecBinary() - _ = yym333 + yym7 := z.DecBinary() + _ = yym7 if false { } else { *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64)) @@ -3744,7 +1727,8 @@ func (x *CronJobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ConcurrencyPolicy = "" } else { - x.ConcurrencyPolicy = ConcurrencyPolicy(r.DecodeString()) + yyv8 := &x.ConcurrencyPolicy + yyv8.CodecDecodeSelf(d) } case "suspend": if r.TryDecodeAsNil() { @@ -3755,8 +1739,8 @@ func (x *CronJobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.Suspend == nil { x.Suspend = new(bool) } - yym336 := z.DecBinary() - _ = yym336 + yym10 := z.DecBinary() + _ = yym10 if false { } else { *((*bool)(x.Suspend)) = r.DecodeBool() @@ -3766,13 +1750,45 @@ func (x *CronJobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.JobTemplate = JobTemplateSpec{} } else { - yyv337 := &x.JobTemplate - yyv337.CodecDecodeSelf(d) + yyv11 := &x.JobTemplate + yyv11.CodecDecodeSelf(d) + } + case "successfulJobsHistoryLimit": + if r.TryDecodeAsNil() { + if x.SuccessfulJobsHistoryLimit != nil { + x.SuccessfulJobsHistoryLimit = nil + } + } else { + if x.SuccessfulJobsHistoryLimit == nil { + x.SuccessfulJobsHistoryLimit = new(int32) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(x.SuccessfulJobsHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + case "failedJobsHistoryLimit": + if r.TryDecodeAsNil() { + if x.FailedJobsHistoryLimit != nil { + x.FailedJobsHistoryLimit = nil + } + } else { + if x.FailedJobsHistoryLimit == nil { + x.FailedJobsHistoryLimit = new(int32) + } + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(x.FailedJobsHistoryLimit)) = int32(r.DecodeInt(32)) + } } default: - z.DecStructFieldNotFound(-1, yys330) - } // end switch yys330 - } // end for yyj330 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -3780,16 +1796,16 @@ func (x *CronJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj338 int - var yyb338 bool - var yyhl338 bool = l >= 0 - yyj338++ - if yyhl338 { - yyb338 = yyj338 > l + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb338 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb338 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3797,15 +1813,21 @@ func (x *CronJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Schedule = "" } else { - x.Schedule = string(r.DecodeString()) + yyv17 := &x.Schedule + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } } - yyj338++ - if yyhl338 { - yyb338 = yyj338 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb338 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb338 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3818,20 +1840,20 @@ func (x *CronJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.StartingDeadlineSeconds == nil { x.StartingDeadlineSeconds = new(int64) } - yym341 := z.DecBinary() - _ = yym341 + yym20 := z.DecBinary() + _ = yym20 if false { } else { *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64)) } } - yyj338++ - if yyhl338 { - yyb338 = yyj338 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb338 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb338 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3839,15 +1861,16 @@ func (x *CronJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ConcurrencyPolicy = "" } else { - x.ConcurrencyPolicy = ConcurrencyPolicy(r.DecodeString()) + yyv21 := &x.ConcurrencyPolicy + yyv21.CodecDecodeSelf(d) } - yyj338++ - if yyhl338 { - yyb338 = yyj338 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb338 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb338 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3860,20 +1883,20 @@ func (x *CronJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.Suspend == nil { x.Suspend = new(bool) } - yym344 := z.DecBinary() - _ = yym344 + yym23 := z.DecBinary() + _ = yym23 if false { } else { *((*bool)(x.Suspend)) = r.DecodeBool() } } - yyj338++ - if yyhl338 { - yyb338 = yyj338 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb338 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb338 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3881,21 +1904,73 @@ func (x *CronJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.JobTemplate = JobTemplateSpec{} } else { - yyv345 := &x.JobTemplate - yyv345.CodecDecodeSelf(d) + yyv24 := &x.JobTemplate + yyv24.CodecDecodeSelf(d) + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SuccessfulJobsHistoryLimit != nil { + x.SuccessfulJobsHistoryLimit = nil + } + } else { + if x.SuccessfulJobsHistoryLimit == nil { + x.SuccessfulJobsHistoryLimit = new(int32) + } + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(x.SuccessfulJobsHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FailedJobsHistoryLimit != nil { + x.FailedJobsHistoryLimit = nil + } + } else { + if x.FailedJobsHistoryLimit == nil { + x.FailedJobsHistoryLimit = new(int32) + } + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(x.FailedJobsHistoryLimit)) = int32(r.DecodeInt(32)) + } } for { - yyj338++ - if yyhl338 { - yyb338 = yyj338 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb338 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb338 { + if yyb16 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj338-1, "") + z.DecStructFieldNotFound(yyj16-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -3904,8 +1979,8 @@ func (x ConcurrencyPolicy) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym346 := z.EncBinary() - _ = yym346 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -3917,8 +1992,8 @@ func (x *ConcurrencyPolicy) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym347 := z.DecBinary() - _ = yym347 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -3933,77 +2008,77 @@ func (x *CronJobStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym348 := z.EncBinary() - _ = yym348 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep349 := !z.EncBinary() - yy2arr349 := z.EncBasicHandle().StructToArray - var yyq349 [2]bool - _, _, _ = yysep349, yyq349, yy2arr349 - const yyr349 bool = false - yyq349[0] = len(x.Active) != 0 - yyq349[1] = x.LastScheduleTime != nil - var yynn349 int - if yyr349 || yy2arr349 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Active) != 0 + yyq2[1] = x.LastScheduleTime != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn349 = 0 - for _, b := range yyq349 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn349++ + yynn2++ } } - r.EncodeMapStart(yynn349) - yynn349 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr349 || yy2arr349 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq349[0] { + if yyq2[0] { if x.Active == nil { r.EncodeNil() } else { - yym351 := z.EncBinary() - _ = yym351 + yym4 := z.EncBinary() + _ = yym4 if false { } else { - h.encSlicev1_ObjectReference(([]pkg2_v1.ObjectReference)(x.Active), e) + h.encSlicev1_ObjectReference(([]pkg4_v1.ObjectReference)(x.Active), e) } } } else { r.EncodeNil() } } else { - if yyq349[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("active")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Active == nil { r.EncodeNil() } else { - yym352 := z.EncBinary() - _ = yym352 + yym5 := z.EncBinary() + _ = yym5 if false { } else { - h.encSlicev1_ObjectReference(([]pkg2_v1.ObjectReference)(x.Active), e) + h.encSlicev1_ObjectReference(([]pkg4_v1.ObjectReference)(x.Active), e) } } } } - if yyr349 || yy2arr349 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq349[1] { + if yyq2[1] { if x.LastScheduleTime == nil { r.EncodeNil() } else { - yym354 := z.EncBinary() - _ = yym354 + yym7 := z.EncBinary() + _ = yym7 if false { } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) { - } else if yym354 { + } else if yym7 { z.EncBinaryMarshal(x.LastScheduleTime) - } else if !yym354 && z.IsJSONHandle() { + } else if !yym7 && z.IsJSONHandle() { z.EncJSONMarshal(x.LastScheduleTime) } else { z.EncFallback(x.LastScheduleTime) @@ -4013,20 +2088,20 @@ func (x *CronJobStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq349[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("lastScheduleTime")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.LastScheduleTime == nil { r.EncodeNil() } else { - yym355 := z.EncBinary() - _ = yym355 + yym8 := z.EncBinary() + _ = yym8 if false { } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) { - } else if yym355 { + } else if yym8 { z.EncBinaryMarshal(x.LastScheduleTime) - } else if !yym355 && z.IsJSONHandle() { + } else if !yym8 && z.IsJSONHandle() { z.EncJSONMarshal(x.LastScheduleTime) } else { z.EncFallback(x.LastScheduleTime) @@ -4034,7 +2109,7 @@ func (x *CronJobStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr349 || yy2arr349 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -4047,25 +2122,25 @@ func (x *CronJobStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym356 := z.DecBinary() - _ = yym356 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct357 := r.ContainerType() - if yyct357 == codecSelferValueTypeMap1234 { - yyl357 := r.ReadMapStart() - if yyl357 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl357, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct357 == codecSelferValueTypeArray1234 { - yyl357 := r.ReadArrayStart() - if yyl357 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl357, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -4077,12 +2152,12 @@ func (x *CronJobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys358Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys358Slc - var yyhl358 bool = l >= 0 - for yyj358 := 0; ; yyj358++ { - if yyhl358 { - if yyj358 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -4091,20 +2166,20 @@ func (x *CronJobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys358Slc = r.DecodeBytes(yys358Slc, true, true) - yys358 := string(yys358Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys358 { + switch yys3 { case "active": if r.TryDecodeAsNil() { x.Active = nil } else { - yyv359 := &x.Active - yym360 := z.DecBinary() - _ = yym360 + yyv4 := &x.Active + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSlicev1_ObjectReference((*[]pkg2_v1.ObjectReference)(yyv359), d) + h.decSlicev1_ObjectReference((*[]pkg4_v1.ObjectReference)(yyv4), d) } } case "lastScheduleTime": @@ -4114,24 +2189,24 @@ func (x *CronJobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } else { if x.LastScheduleTime == nil { - x.LastScheduleTime = new(pkg1_unversioned.Time) + x.LastScheduleTime = new(pkg1_v1.Time) } - yym362 := z.DecBinary() - _ = yym362 + yym7 := z.DecBinary() + _ = yym7 if false { } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) { - } else if yym362 { + } else if yym7 { z.DecBinaryUnmarshal(x.LastScheduleTime) - } else if !yym362 && z.IsJSONHandle() { + } else if !yym7 && z.IsJSONHandle() { z.DecJSONUnmarshal(x.LastScheduleTime) } else { z.DecFallback(x.LastScheduleTime, false) } } default: - z.DecStructFieldNotFound(-1, yys358) - } // end switch yys358 - } // end for yyj358 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -4139,16 +2214,16 @@ func (x *CronJobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj363 int - var yyb363 bool - var yyhl363 bool = l >= 0 - yyj363++ - if yyhl363 { - yyb363 = yyj363 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb363 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb363 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4156,21 +2231,21 @@ func (x *CronJobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Active = nil } else { - yyv364 := &x.Active - yym365 := z.DecBinary() - _ = yym365 + yyv9 := &x.Active + yym10 := z.DecBinary() + _ = yym10 if false { } else { - h.decSlicev1_ObjectReference((*[]pkg2_v1.ObjectReference)(yyv364), d) + h.decSlicev1_ObjectReference((*[]pkg4_v1.ObjectReference)(yyv9), d) } } - yyj363++ - if yyhl363 { - yyb363 = yyj363 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb363 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb363 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4181,277 +2256,45 @@ func (x *CronJobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } } else { if x.LastScheduleTime == nil { - x.LastScheduleTime = new(pkg1_unversioned.Time) + x.LastScheduleTime = new(pkg1_v1.Time) } - yym367 := z.DecBinary() - _ = yym367 + yym12 := z.DecBinary() + _ = yym12 if false { } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) { - } else if yym367 { + } else if yym12 { z.DecBinaryUnmarshal(x.LastScheduleTime) - } else if !yym367 && z.IsJSONHandle() { + } else if !yym12 && z.IsJSONHandle() { z.DecJSONUnmarshal(x.LastScheduleTime) } else { z.DecFallback(x.LastScheduleTime, false) } } for { - yyj363++ - if yyhl363 { - yyb363 = yyj363 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb363 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb363 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj363-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv368 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy369 := &yyv368 - yy369.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv370 := *v - yyh370, yyl370 := z.DecSliceHelperStart() - var yyc370 bool - if yyl370 == 0 { - if yyv370 == nil { - yyv370 = []Job{} - yyc370 = true - } else if len(yyv370) != 0 { - yyv370 = yyv370[:0] - yyc370 = true - } - } else if yyl370 > 0 { - var yyrr370, yyrl370 int - var yyrt370 bool - if yyl370 > cap(yyv370) { - - yyrg370 := len(yyv370) > 0 - yyv2370 := yyv370 - yyrl370, yyrt370 = z.DecInferLen(yyl370, z.DecBasicHandle().MaxInitLen, 824) - if yyrt370 { - if yyrl370 <= cap(yyv370) { - yyv370 = yyv370[:yyrl370] - } else { - yyv370 = make([]Job, yyrl370) - } - } else { - yyv370 = make([]Job, yyrl370) - } - yyc370 = true - yyrr370 = len(yyv370) - if yyrg370 { - copy(yyv370, yyv2370) - } - } else if yyl370 != len(yyv370) { - yyv370 = yyv370[:yyl370] - yyc370 = true - } - yyj370 := 0 - for ; yyj370 < yyrr370; yyj370++ { - yyh370.ElemContainerState(yyj370) - if r.TryDecodeAsNil() { - yyv370[yyj370] = Job{} - } else { - yyv371 := &yyv370[yyj370] - yyv371.CodecDecodeSelf(d) - } - - } - if yyrt370 { - for ; yyj370 < yyl370; yyj370++ { - yyv370 = append(yyv370, Job{}) - yyh370.ElemContainerState(yyj370) - if r.TryDecodeAsNil() { - yyv370[yyj370] = Job{} - } else { - yyv372 := &yyv370[yyj370] - yyv372.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj370 := 0 - for ; !r.CheckBreak(); yyj370++ { - - if yyj370 >= len(yyv370) { - yyv370 = append(yyv370, Job{}) // var yyz370 Job - yyc370 = true - } - yyh370.ElemContainerState(yyj370) - if yyj370 < len(yyv370) { - if r.TryDecodeAsNil() { - yyv370[yyj370] = Job{} - } else { - yyv373 := &yyv370[yyj370] - yyv373.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj370 < len(yyv370) { - yyv370 = yyv370[:yyj370] - yyc370 = true - } else if yyj370 == 0 && yyv370 == nil { - yyv370 = []Job{} - yyc370 = true - } - } - yyh370.End() - if yyc370 { - *v = yyv370 - } -} - -func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv374 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy375 := &yyv374 - yy375.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv376 := *v - yyh376, yyl376 := z.DecSliceHelperStart() - var yyc376 bool - if yyl376 == 0 { - if yyv376 == nil { - yyv376 = []JobCondition{} - yyc376 = true - } else if len(yyv376) != 0 { - yyv376 = yyv376[:0] - yyc376 = true - } - } else if yyl376 > 0 { - var yyrr376, yyrl376 int - var yyrt376 bool - if yyl376 > cap(yyv376) { - - yyrg376 := len(yyv376) > 0 - yyv2376 := yyv376 - yyrl376, yyrt376 = z.DecInferLen(yyl376, z.DecBasicHandle().MaxInitLen, 112) - if yyrt376 { - if yyrl376 <= cap(yyv376) { - yyv376 = yyv376[:yyrl376] - } else { - yyv376 = make([]JobCondition, yyrl376) - } - } else { - yyv376 = make([]JobCondition, yyrl376) - } - yyc376 = true - yyrr376 = len(yyv376) - if yyrg376 { - copy(yyv376, yyv2376) - } - } else if yyl376 != len(yyv376) { - yyv376 = yyv376[:yyl376] - yyc376 = true - } - yyj376 := 0 - for ; yyj376 < yyrr376; yyj376++ { - yyh376.ElemContainerState(yyj376) - if r.TryDecodeAsNil() { - yyv376[yyj376] = JobCondition{} - } else { - yyv377 := &yyv376[yyj376] - yyv377.CodecDecodeSelf(d) - } - - } - if yyrt376 { - for ; yyj376 < yyl376; yyj376++ { - yyv376 = append(yyv376, JobCondition{}) - yyh376.ElemContainerState(yyj376) - if r.TryDecodeAsNil() { - yyv376[yyj376] = JobCondition{} - } else { - yyv378 := &yyv376[yyj376] - yyv378.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj376 := 0 - for ; !r.CheckBreak(); yyj376++ { - - if yyj376 >= len(yyv376) { - yyv376 = append(yyv376, JobCondition{}) // var yyz376 JobCondition - yyc376 = true - } - yyh376.ElemContainerState(yyj376) - if yyj376 < len(yyv376) { - if r.TryDecodeAsNil() { - yyv376[yyj376] = JobCondition{} - } else { - yyv379 := &yyv376[yyj376] - yyv379.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj376 < len(yyv376) { - yyv376 = yyv376[:yyj376] - yyc376 = true - } else if yyj376 == 0 && yyv376 == nil { - yyv376 = []JobCondition{} - yyc376 = true - } - } - yyh376.End() - if yyc376 { - *v = yyv376 - } -} - func (x codecSelfer1234) encSliceCronJob(v []CronJob, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv380 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy381 := &yyv380 - yy381.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -4461,83 +2304,86 @@ func (x codecSelfer1234) decSliceCronJob(v *[]CronJob, d *codec1978.Decoder) { z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv382 := *v - yyh382, yyl382 := z.DecSliceHelperStart() - var yyc382 bool - if yyl382 == 0 { - if yyv382 == nil { - yyv382 = []CronJob{} - yyc382 = true - } else if len(yyv382) != 0 { - yyv382 = yyv382[:0] - yyc382 = true - } - } else if yyl382 > 0 { - var yyrr382, yyrl382 int - var yyrt382 bool - if yyl382 > cap(yyv382) { - - yyrg382 := len(yyv382) > 0 - yyv2382 := yyv382 - yyrl382, yyrt382 = z.DecInferLen(yyl382, z.DecBasicHandle().MaxInitLen, 1072) - if yyrt382 { - if yyrl382 <= cap(yyv382) { - yyv382 = yyv382[:yyrl382] - } else { - yyv382 = make([]CronJob, yyrl382) - } - } else { - yyv382 = make([]CronJob, yyrl382) - } - yyc382 = true - yyrr382 = len(yyv382) - if yyrg382 { - copy(yyv382, yyv2382) - } - } else if yyl382 != len(yyv382) { - yyv382 = yyv382[:yyl382] - yyc382 = true - } - yyj382 := 0 - for ; yyj382 < yyrr382; yyj382++ { - yyh382.ElemContainerState(yyj382) + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []CronJob{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 1144) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]CronJob, yyrl1) + } + } else { + yyv1 = make([]CronJob, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv382[yyj382] = CronJob{} + yyv1[yyj1] = CronJob{} } else { - yyv383 := &yyv382[yyj382] - yyv383.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt382 { - for ; yyj382 < yyl382; yyj382++ { - yyv382 = append(yyv382, CronJob{}) - yyh382.ElemContainerState(yyj382) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, CronJob{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv382[yyj382] = CronJob{} + yyv1[yyj1] = CronJob{} } else { - yyv384 := &yyv382[yyj382] - yyv384.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj382 := 0 - for ; !r.CheckBreak(); yyj382++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj382 >= len(yyv382) { - yyv382 = append(yyv382, CronJob{}) // var yyz382 CronJob - yyc382 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, CronJob{}) // var yyz1 CronJob + yyc1 = true } - yyh382.ElemContainerState(yyj382) - if yyj382 < len(yyv382) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv382[yyj382] = CronJob{} + yyv1[yyj1] = CronJob{} } else { - yyv385 := &yyv382[yyj382] - yyv385.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -4545,115 +2391,118 @@ func (x codecSelfer1234) decSliceCronJob(v *[]CronJob, d *codec1978.Decoder) { } } - if yyj382 < len(yyv382) { - yyv382 = yyv382[:yyj382] - yyc382 = true - } else if yyj382 == 0 && yyv382 == nil { - yyv382 = []CronJob{} - yyc382 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []CronJob{} + yyc1 = true } } - yyh382.End() - if yyc382 { - *v = yyv382 + yyh1.End() + if yyc1 { + *v = yyv1 } } -func (x codecSelfer1234) encSlicev1_ObjectReference(v []pkg2_v1.ObjectReference, e *codec1978.Encoder) { +func (x codecSelfer1234) encSlicev1_ObjectReference(v []pkg4_v1.ObjectReference, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv386 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy387 := &yyv386 - yy387.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSlicev1_ObjectReference(v *[]pkg2_v1.ObjectReference, d *codec1978.Decoder) { +func (x codecSelfer1234) decSlicev1_ObjectReference(v *[]pkg4_v1.ObjectReference, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv388 := *v - yyh388, yyl388 := z.DecSliceHelperStart() - var yyc388 bool - if yyl388 == 0 { - if yyv388 == nil { - yyv388 = []pkg2_v1.ObjectReference{} - yyc388 = true - } else if len(yyv388) != 0 { - yyv388 = yyv388[:0] - yyc388 = true - } - } else if yyl388 > 0 { - var yyrr388, yyrl388 int - var yyrt388 bool - if yyl388 > cap(yyv388) { - - yyrg388 := len(yyv388) > 0 - yyv2388 := yyv388 - yyrl388, yyrt388 = z.DecInferLen(yyl388, z.DecBasicHandle().MaxInitLen, 112) - if yyrt388 { - if yyrl388 <= cap(yyv388) { - yyv388 = yyv388[:yyrl388] - } else { - yyv388 = make([]pkg2_v1.ObjectReference, yyrl388) - } - } else { - yyv388 = make([]pkg2_v1.ObjectReference, yyrl388) - } - yyc388 = true - yyrr388 = len(yyv388) - if yyrg388 { - copy(yyv388, yyv2388) - } - } else if yyl388 != len(yyv388) { - yyv388 = yyv388[:yyl388] - yyc388 = true - } - yyj388 := 0 - for ; yyj388 < yyrr388; yyj388++ { - yyh388.ElemContainerState(yyj388) + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg4_v1.ObjectReference{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]pkg4_v1.ObjectReference, yyrl1) + } + } else { + yyv1 = make([]pkg4_v1.ObjectReference, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv388[yyj388] = pkg2_v1.ObjectReference{} + yyv1[yyj1] = pkg4_v1.ObjectReference{} } else { - yyv389 := &yyv388[yyj388] - yyv389.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt388 { - for ; yyj388 < yyl388; yyj388++ { - yyv388 = append(yyv388, pkg2_v1.ObjectReference{}) - yyh388.ElemContainerState(yyj388) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, pkg4_v1.ObjectReference{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv388[yyj388] = pkg2_v1.ObjectReference{} + yyv1[yyj1] = pkg4_v1.ObjectReference{} } else { - yyv390 := &yyv388[yyj388] - yyv390.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj388 := 0 - for ; !r.CheckBreak(); yyj388++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj388 >= len(yyv388) { - yyv388 = append(yyv388, pkg2_v1.ObjectReference{}) // var yyz388 pkg2_v1.ObjectReference - yyc388 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, pkg4_v1.ObjectReference{}) // var yyz1 pkg4_v1.ObjectReference + yyc1 = true } - yyh388.ElemContainerState(yyj388) - if yyj388 < len(yyv388) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv388[yyj388] = pkg2_v1.ObjectReference{} + yyv1[yyj1] = pkg4_v1.ObjectReference{} } else { - yyv391 := &yyv388[yyj388] - yyv391.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -4661,16 +2510,16 @@ func (x codecSelfer1234) decSlicev1_ObjectReference(v *[]pkg2_v1.ObjectReference } } - if yyj388 < len(yyv388) { - yyv388 = yyv388[:yyj388] - yyc388 = true - } else if yyj388 == 0 && yyv388 == nil { - yyv388 = []pkg2_v1.ObjectReference{} - yyc388 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg4_v1.ObjectReference{} + yyc1 = true } } - yyh388.End() - if yyc388 { - *v = yyv388 + yyh1.End() + if yyc1 { + *v = yyv1 } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go index 73aaa7e36d..3c1fdf22a3 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go @@ -17,50 +17,18 @@ limitations under the License. package v2alpha1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api/v1" + batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1" ) -// +genclient=true - -// Job represents the configuration of a single job. -type Job struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// JobList is a collection of jobs. -type JobList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of Job. - Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"` -} - // JobTemplate describes a template for creating copies of a predefined pod. type JobTemplate struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Template defines jobs that will be created from this template // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -73,136 +41,23 @@ type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the job. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional - Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// JobSpec describes how the job execution will look like. -type JobSpec struct { - - // Parallelism specifies the maximum desired number of pods the job should - // run at any given time. The actual number of pods running in steady state will - // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), - // i.e. when the work left to do is less than max parallelism. - // More info: http://kubernetes.io/docs/user-guide/jobs - // +optional - Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` - - // Completions specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any - // pod signals the success of all pods, and allows parallelism to have any positive - // value. Setting to 1 means that parallelism is limited to 1 and the success of that - // pod signals the success of the job. - // More info: http://kubernetes.io/docs/user-guide/jobs - // +optional - Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` - - // Optional duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer - // +optional - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` - - // Selector is a label query over pods that should match the pod count. - // Normally, the system sets this field for you. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors - // +optional - Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` - - // ManualSelector controls generation of pod labels and pod selectors. - // Leave `manualSelector` unset unless you are certain what you are doing. - // When false or unset, the system pick labels unique to this job - // and appends those labels to the pod template. When true, - // the user is responsible for picking unique labels and specifying - // the selector. Failure to pick a unique label may cause this - // and other jobs to not function correctly. However, You may see - // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` - // API. - // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md - // +optional - ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` - - // Template is the object that describes the pod that will be created when - // executing a job. - // More info: http://kubernetes.io/docs/user-guide/jobs - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` -} - -// JobStatus represents the current state of a Job. -type JobStatus struct { - - // Conditions represent the latest available observations of an object's current state. - // More info: http://kubernetes.io/docs/user-guide/jobs - // +optional - Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` - - // StartTime represents time when the job was acknowledged by the Job Manager. - // It is not guaranteed to be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - // +optional - StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` - - // CompletionTime represents time when the job was completed. It is not guaranteed to - // be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - // +optional - CompletionTime *unversioned.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` - - // Active is the number of actively running pods. - // +optional - Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` - - // Succeeded is the number of pods which reached Phase Succeeded. - // +optional - Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` - - // Failed is the number of pods which reached Phase Failed. - // +optional - Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` -} - -type JobConditionType string - -// These are valid conditions of a job. -const ( - // JobComplete means the job has completed its execution. - JobComplete JobConditionType = "Complete" - // JobFailed means the job has failed its execution. - JobFailed JobConditionType = "Failed" -) - -// JobCondition describes current state of a job. -type JobCondition struct { - // Type of job condition, Complete or Failed. - Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` - // Last time the condition was checked. - // +optional - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` - // Last time the condition transit from one status to another. - // +optional - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // (brief) reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // Human readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` + Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } // +genclient=true // CronJob represents the configuration of a single cron job. type CronJob struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is a structure defining the expected behavior of a job, including the schedule. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -217,11 +72,11 @@ type CronJob struct { // CronJobList is a collection of cron jobs. type CronJobList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of CronJob. Items []CronJob `json:"items" protobuf:"bytes,2,rep,name=items"` @@ -250,6 +105,16 @@ type CronJobSpec struct { // JobTemplate is the object that describes the job that will be created when // executing a CronJob. JobTemplate JobTemplateSpec `json:"jobTemplate" protobuf:"bytes,5,opt,name=jobTemplate"` + + // The number of successful finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty" protobuf:"varint,6,opt,name=successfulJobsHistoryLimit"` + + // The number of failed finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty" protobuf:"varint,7,opt,name=failedJobsHistoryLimit"` } // ConcurrencyPolicy describes how the job will be handled. @@ -278,5 +143,5 @@ type CronJobStatus struct { // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. // +optional - LastScheduleTime *unversioned.Time `json:"lastScheduleTime,omitempty" protobuf:"bytes,4,opt,name=lastScheduleTime"` + LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty" protobuf:"bytes,4,opt,name=lastScheduleTime"` } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go index 6e8c40a0fe..c0b53b8e02 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go @@ -49,12 +49,14 @@ func (CronJobList) SwaggerDoc() map[string]string { } var map_CronJobSpec = map[string]string{ - "": "CronJobSpec describes how the job execution will look like and when it will actually run.", - "schedule": "Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", - "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.", - "suspend": "Suspend flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", - "jobTemplate": "JobTemplate is the object that describes the job that will be created when executing a CronJob.", + "": "CronJobSpec describes how the job execution will look like and when it will actually run.", + "schedule": "Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", + "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", + "concurrencyPolicy": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.", + "suspend": "Suspend flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", + "jobTemplate": "JobTemplate is the object that describes the job that will be created when executing a CronJob.", + "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.", + "failedJobsHistoryLimit": "The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.", } func (CronJobSpec) SwaggerDoc() map[string]string { @@ -71,69 +73,6 @@ func (CronJobStatus) SwaggerDoc() map[string]string { return map_CronJobStatus } -var map_Job = map[string]string{ - "": "Job represents the configuration of a single job.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", -} - -func (Job) SwaggerDoc() map[string]string { - return map_Job -} - -var map_JobCondition = map[string]string{ - "": "JobCondition describes current state of a job.", - "type": "Type of job condition, Complete or Failed.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastProbeTime": "Last time the condition was checked.", - "lastTransitionTime": "Last time the condition transit from one status to another.", - "reason": "(brief) reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (JobCondition) SwaggerDoc() map[string]string { - return map_JobCondition -} - -var map_JobList = map[string]string{ - "": "JobList is a collection of jobs.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "items": "Items is the list of Job.", -} - -func (JobList) SwaggerDoc() map[string]string { - return map_JobList -} - -var map_JobSpec = map[string]string{ - "": "JobSpec describes how the job execution will look like.", - "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://kubernetes.io/docs/user-guide/jobs", - "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://kubernetes.io/docs/user-guide/jobs", - "activeDeadlineSeconds": "Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", - "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", - "manualSelector": "ManualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md", - "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://kubernetes.io/docs/user-guide/jobs", -} - -func (JobSpec) SwaggerDoc() map[string]string { - return map_JobSpec -} - -var map_JobStatus = map[string]string{ - "": "JobStatus represents the current state of a Job.", - "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://kubernetes.io/docs/user-guide/jobs", - "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", - "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", - "active": "Active is the number of actively running pods.", - "succeeded": "Succeeded is the number of pods which reached Phase Succeeded.", - "failed": "Failed is the number of pods which reached Phase Failed.", -} - -func (JobStatus) SwaggerDoc() map[string]string { - return map_JobStatus -} - var map_JobTemplate = map[string]string{ "": "JobTemplate describes a template for creating copies of a predefined pod.", "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.conversion.go index 01f760c52f..8cd52b6efc 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,12 +21,13 @@ limitations under the License. package v2alpha1 import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" + api_v1 "k8s.io/kubernetes/pkg/api/v1" batch "k8s.io/kubernetes/pkg/apis/batch" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" + batch_v1 "k8s.io/kubernetes/pkg/apis/batch/v1" unsafe "unsafe" ) @@ -46,16 +47,6 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_batch_CronJobSpec_To_v2alpha1_CronJobSpec, Convert_v2alpha1_CronJobStatus_To_batch_CronJobStatus, Convert_batch_CronJobStatus_To_v2alpha1_CronJobStatus, - Convert_v2alpha1_Job_To_batch_Job, - Convert_batch_Job_To_v2alpha1_Job, - Convert_v2alpha1_JobCondition_To_batch_JobCondition, - Convert_batch_JobCondition_To_v2alpha1_JobCondition, - Convert_v2alpha1_JobList_To_batch_JobList, - Convert_batch_JobList_To_v2alpha1_JobList, - Convert_v2alpha1_JobSpec_To_batch_JobSpec, - Convert_batch_JobSpec_To_v2alpha1_JobSpec, - Convert_v2alpha1_JobStatus_To_batch_JobStatus, - Convert_batch_JobStatus_To_v2alpha1_JobStatus, Convert_v2alpha1_JobTemplate_To_batch_JobTemplate, Convert_batch_JobTemplate_To_v2alpha1_JobTemplate, Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec, @@ -64,10 +55,7 @@ func RegisterConversions(scheme *runtime.Scheme) error { } func autoConvert_v2alpha1_CronJob_To_batch_CronJob(in *CronJob, out *batch.CronJob, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v2alpha1_CronJobSpec_To_batch_CronJobSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -82,10 +70,7 @@ func Convert_v2alpha1_CronJob_To_batch_CronJob(in *CronJob, out *batch.CronJob, } func autoConvert_batch_CronJob_To_v2alpha1_CronJob(in *batch.CronJob, out *CronJob, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_batch_CronJobSpec_To_v2alpha1_CronJobSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -130,7 +115,7 @@ func autoConvert_batch_CronJobList_To_v2alpha1_CronJobList(in *batch.CronJobList } } } else { - out.Items = nil + out.Items = make([]CronJob, 0) } return nil } @@ -147,6 +132,8 @@ func autoConvert_v2alpha1_CronJobSpec_To_batch_CronJobSpec(in *CronJobSpec, out if err := Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil { return err } + out.SuccessfulJobsHistoryLimit = (*int32)(unsafe.Pointer(in.SuccessfulJobsHistoryLimit)) + out.FailedJobsHistoryLimit = (*int32)(unsafe.Pointer(in.FailedJobsHistoryLimit)) return nil } @@ -162,6 +149,8 @@ func autoConvert_batch_CronJobSpec_To_v2alpha1_CronJobSpec(in *batch.CronJobSpec if err := Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil { return err } + out.SuccessfulJobsHistoryLimit = (*int32)(unsafe.Pointer(in.SuccessfulJobsHistoryLimit)) + out.FailedJobsHistoryLimit = (*int32)(unsafe.Pointer(in.FailedJobsHistoryLimit)) return nil } @@ -171,7 +160,7 @@ func Convert_batch_CronJobSpec_To_v2alpha1_CronJobSpec(in *batch.CronJobSpec, ou func autoConvert_v2alpha1_CronJobStatus_To_batch_CronJobStatus(in *CronJobStatus, out *batch.CronJobStatus, s conversion.Scope) error { out.Active = *(*[]api.ObjectReference)(unsafe.Pointer(&in.Active)) - out.LastScheduleTime = (*unversioned.Time)(unsafe.Pointer(in.LastScheduleTime)) + out.LastScheduleTime = (*v1.Time)(unsafe.Pointer(in.LastScheduleTime)) return nil } @@ -180,8 +169,8 @@ func Convert_v2alpha1_CronJobStatus_To_batch_CronJobStatus(in *CronJobStatus, ou } func autoConvert_batch_CronJobStatus_To_v2alpha1_CronJobStatus(in *batch.CronJobStatus, out *CronJobStatus, s conversion.Scope) error { - out.Active = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.Active)) - out.LastScheduleTime = (*unversioned.Time)(unsafe.Pointer(in.LastScheduleTime)) + out.Active = *(*[]api_v1.ObjectReference)(unsafe.Pointer(&in.Active)) + out.LastScheduleTime = (*v1.Time)(unsafe.Pointer(in.LastScheduleTime)) return nil } @@ -189,167 +178,8 @@ func Convert_batch_CronJobStatus_To_v2alpha1_CronJobStatus(in *batch.CronJobStat return autoConvert_batch_CronJobStatus_To_v2alpha1_CronJobStatus(in, out, s) } -func autoConvert_v2alpha1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v2alpha1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v2alpha1_JobStatus_To_batch_JobStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v2alpha1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { - return autoConvert_v2alpha1_Job_To_batch_Job(in, out, s) -} - -func autoConvert_batch_Job_To_v2alpha1_Job(in *batch.Job, out *Job, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_batch_JobSpec_To_v2alpha1_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_batch_JobStatus_To_v2alpha1_JobStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_batch_Job_To_v2alpha1_Job(in *batch.Job, out *Job, s conversion.Scope) error { - return autoConvert_batch_Job_To_v2alpha1_Job(in, out, s) -} - -func autoConvert_v2alpha1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { - out.Type = batch.JobConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v2alpha1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { - return autoConvert_v2alpha1_JobCondition_To_batch_JobCondition(in, out, s) -} - -func autoConvert_batch_JobCondition_To_v2alpha1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { - out.Type = JobConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_batch_JobCondition_To_v2alpha1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { - return autoConvert_batch_JobCondition_To_v2alpha1_JobCondition(in, out, s) -} - -func autoConvert_v2alpha1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]batch.Job, len(*in)) - for i := range *in { - if err := Convert_v2alpha1_Job_To_batch_Job(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v2alpha1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { - return autoConvert_v2alpha1_JobList_To_batch_JobList(in, out, s) -} - -func autoConvert_batch_JobList_To_v2alpha1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Job, len(*in)) - for i := range *in { - if err := Convert_batch_Job_To_v2alpha1_Job(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_batch_JobList_To_v2alpha1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { - return autoConvert_batch_JobList_To_v2alpha1_JobList(in, out, s) -} - -func autoConvert_v2alpha1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { - out.Parallelism = (*int32)(unsafe.Pointer(in.Parallelism)) - out.Completions = (*int32)(unsafe.Pointer(in.Completions)) - out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) - out.Selector = (*unversioned.LabelSelector)(unsafe.Pointer(in.Selector)) - out.ManualSelector = (*bool)(unsafe.Pointer(in.ManualSelector)) - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func autoConvert_batch_JobSpec_To_v2alpha1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { - out.Parallelism = (*int32)(unsafe.Pointer(in.Parallelism)) - out.Completions = (*int32)(unsafe.Pointer(in.Completions)) - out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) - out.Selector = (*unversioned.LabelSelector)(unsafe.Pointer(in.Selector)) - out.ManualSelector = (*bool)(unsafe.Pointer(in.ManualSelector)) - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func autoConvert_v2alpha1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { - out.Conditions = *(*[]batch.JobCondition)(unsafe.Pointer(&in.Conditions)) - out.StartTime = (*unversioned.Time)(unsafe.Pointer(in.StartTime)) - out.CompletionTime = (*unversioned.Time)(unsafe.Pointer(in.CompletionTime)) - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed - return nil -} - -func Convert_v2alpha1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { - return autoConvert_v2alpha1_JobStatus_To_batch_JobStatus(in, out, s) -} - -func autoConvert_batch_JobStatus_To_v2alpha1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { - out.Conditions = *(*[]JobCondition)(unsafe.Pointer(&in.Conditions)) - out.StartTime = (*unversioned.Time)(unsafe.Pointer(in.StartTime)) - out.CompletionTime = (*unversioned.Time)(unsafe.Pointer(in.CompletionTime)) - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed - return nil -} - -func Convert_batch_JobStatus_To_v2alpha1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { - return autoConvert_batch_JobStatus_To_v2alpha1_JobStatus(in, out, s) -} - func autoConvert_v2alpha1_JobTemplate_To_batch_JobTemplate(in *JobTemplate, out *batch.JobTemplate, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } @@ -361,10 +191,7 @@ func Convert_v2alpha1_JobTemplate_To_batch_JobTemplate(in *JobTemplate, out *bat } func autoConvert_batch_JobTemplate_To_v2alpha1_JobTemplate(in *batch.JobTemplate, out *JobTemplate, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } @@ -376,11 +203,8 @@ func Convert_batch_JobTemplate_To_v2alpha1_JobTemplate(in *batch.JobTemplate, ou } func autoConvert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in *JobTemplateSpec, out *batch.JobTemplateSpec, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v2alpha1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { + out.ObjectMeta = in.ObjectMeta + if err := batch_v1.Convert_v1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { return err } return nil @@ -391,11 +215,8 @@ func Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in *JobTemplateSp } func autoConvert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in *batch.JobTemplateSpec, out *JobTemplateSpec, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_batch_JobSpec_To_v2alpha1_JobSpec(&in.Spec, &out.Spec, s); err != nil { + out.ObjectMeta = in.ObjectMeta + if err := batch_v1.Convert_batch_JobSpec_To_v1_JobSpec(&in.Spec, &out.Spec, s); err != nil { return err } return nil diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.deepcopy.go index d3d6847c56..2635965218 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,10 +21,11 @@ limitations under the License. package v2alpha1 import ( - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api_v1 "k8s.io/kubernetes/pkg/api/v1" + batch_v1 "k8s.io/kubernetes/pkg/apis/batch/v1" reflect "reflect" ) @@ -40,11 +41,6 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_CronJobList, InType: reflect.TypeOf(&CronJobList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_CronJobSpec, InType: reflect.TypeOf(&CronJobSpec{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_CronJobStatus, InType: reflect.TypeOf(&CronJobStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_Job, InType: reflect.TypeOf(&Job{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_JobCondition, InType: reflect.TypeOf(&JobCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_JobList, InType: reflect.TypeOf(&JobList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_JobSpec, InType: reflect.TypeOf(&JobSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_JobStatus, InType: reflect.TypeOf(&JobStatus{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_JobTemplate, InType: reflect.TypeOf(&JobTemplate{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_JobTemplateSpec, InType: reflect.TypeOf(&JobTemplateSpec{})}, ) @@ -54,9 +50,11 @@ func DeepCopy_v2alpha1_CronJob(in interface{}, out interface{}, c *conversion.Cl { in := in.(*CronJob) out := out.(*CronJob) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_v2alpha1_CronJobSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -72,8 +70,7 @@ func DeepCopy_v2alpha1_CronJobList(in interface{}, out interface{}, c *conversio { in := in.(*CronJobList) out := out.(*CronJobList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CronJob, len(*in)) @@ -82,8 +79,6 @@ func DeepCopy_v2alpha1_CronJobList(in interface{}, out interface{}, c *conversio return err } } - } else { - out.Items = nil } return nil } @@ -93,25 +88,30 @@ func DeepCopy_v2alpha1_CronJobSpec(in interface{}, out interface{}, c *conversio { in := in.(*CronJobSpec) out := out.(*CronJobSpec) - out.Schedule = in.Schedule + *out = *in if in.StartingDeadlineSeconds != nil { in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds *out = new(int64) **out = **in - } else { - out.StartingDeadlineSeconds = nil } - out.ConcurrencyPolicy = in.ConcurrencyPolicy if in.Suspend != nil { in, out := &in.Suspend, &out.Suspend *out = new(bool) **out = **in - } else { - out.Suspend = nil } if err := DeepCopy_v2alpha1_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, c); err != nil { return err } + if in.SuccessfulJobsHistoryLimit != nil { + in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit + *out = new(int32) + **out = **in + } + if in.FailedJobsHistoryLimit != nil { + in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit + *out = new(int32) + **out = **in + } return nil } } @@ -120,159 +120,17 @@ func DeepCopy_v2alpha1_CronJobStatus(in interface{}, out interface{}, c *convers { in := in.(*CronJobStatus) out := out.(*CronJobStatus) + *out = *in if in.Active != nil { in, out := &in.Active, &out.Active - *out = make([]v1.ObjectReference, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Active = nil + *out = make([]api_v1.ObjectReference, len(*in)) + copy(*out, *in) } if in.LastScheduleTime != nil { in, out := &in.LastScheduleTime, &out.LastScheduleTime - *out = new(unversioned.Time) - **out = (*in).DeepCopy() - } else { - out.LastScheduleTime = nil - } - return nil - } -} - -func DeepCopy_v2alpha1_Job(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Job) - out := out.(*Job) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v2alpha1_JobSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v2alpha1_JobStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v2alpha1_JobCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobCondition) - out := out.(*JobCondition) - out.Type = in.Type - out.Status = in.Status - out.LastProbeTime = in.LastProbeTime.DeepCopy() - out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message - return nil - } -} - -func DeepCopy_v2alpha1_JobList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobList) - out := out.(*JobList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Job, len(*in)) - for i := range *in { - if err := DeepCopy_v2alpha1_Job(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil - } -} - -func DeepCopy_v2alpha1_JobSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobSpec) - out := out.(*JobSpec) - if in.Parallelism != nil { - in, out := &in.Parallelism, &out.Parallelism - *out = new(int32) - **out = **in - } else { - out.Parallelism = nil - } - if in.Completions != nil { - in, out := &in.Completions, &out.Completions - *out = new(int32) - **out = **in - } else { - out.Completions = nil - } - if in.ActiveDeadlineSeconds != nil { - in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = **in - } else { - out.ActiveDeadlineSeconds = nil - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.Selector = nil - } - if in.ManualSelector != nil { - in, out := &in.ManualSelector, &out.ManualSelector - *out = new(bool) - **out = **in - } else { - out.ManualSelector = nil - } - if err := v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v2alpha1_JobStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobStatus) - out := out.(*JobStatus) - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]JobCondition, len(*in)) - for i := range *in { - if err := DeepCopy_v2alpha1_JobCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - *out = new(unversioned.Time) + *out = new(v1.Time) **out = (*in).DeepCopy() - } else { - out.StartTime = nil } - if in.CompletionTime != nil { - in, out := &in.CompletionTime, &out.CompletionTime - *out = new(unversioned.Time) - **out = (*in).DeepCopy() - } else { - out.CompletionTime = nil - } - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed return nil } } @@ -281,9 +139,11 @@ func DeepCopy_v2alpha1_JobTemplate(in interface{}, out interface{}, c *conversio { in := in.(*JobTemplate) out := out.(*JobTemplate) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_v2alpha1_JobTemplateSpec(&in.Template, &out.Template, c); err != nil { return err @@ -296,10 +156,13 @@ func DeepCopy_v2alpha1_JobTemplateSpec(in interface{}, out interface{}, c *conve { in := in.(*JobTemplateSpec) out := out.(*JobTemplateSpec) - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } - if err := DeepCopy_v2alpha1_JobSpec(&in.Spec, &out.Spec, c); err != nil { + if err := batch_v1.DeepCopy_v1_JobSpec(&in.Spec, &out.Spec, c); err != nil { return err } return nil diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.defaults.go index 6d3927c326..fab636ed8c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,8 +21,8 @@ limitations under the License. package v2alpha1 import ( + runtime "k8s.io/apimachinery/pkg/runtime" v1 "k8s.io/kubernetes/pkg/api/v1" - runtime "k8s.io/kubernetes/pkg/runtime" ) // RegisterDefaults adds defaulters functions to the given scheme. @@ -31,8 +31,6 @@ import ( func RegisterDefaults(scheme *runtime.Scheme) error { scheme.AddTypeDefaultingFunc(&CronJob{}, func(obj interface{}) { SetObjectDefaults_CronJob(obj.(*CronJob)) }) scheme.AddTypeDefaultingFunc(&CronJobList{}, func(obj interface{}) { SetObjectDefaults_CronJobList(obj.(*CronJobList)) }) - scheme.AddTypeDefaultingFunc(&Job{}, func(obj interface{}) { SetObjectDefaults_Job(obj.(*Job)) }) - scheme.AddTypeDefaultingFunc(&JobList{}, func(obj interface{}) { SetObjectDefaults_JobList(obj.(*JobList)) }) scheme.AddTypeDefaultingFunc(&JobTemplate{}, func(obj interface{}) { SetObjectDefaults_JobTemplate(obj.(*JobTemplate)) }) return nil } @@ -67,6 +65,23 @@ func SetObjectDefaults_CronJob(in *CronJob) { if a.VolumeSource.AzureDisk != nil { v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } } for i := range in.Spec.JobTemplate.Spec.Template.Spec.InitContainers { a := &in.Spec.JobTemplate.Spec.Template.Spec.InitContainers[i] @@ -161,130 +176,6 @@ func SetObjectDefaults_CronJobList(in *CronJobList) { } } -func SetObjectDefaults_Job(in *Job) { - SetDefaults_Job(in) - v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) - for i := range in.Spec.Template.Spec.Volumes { - a := &in.Spec.Template.Spec.Volumes[i] - v1.SetDefaults_Volume(a) - if a.VolumeSource.Secret != nil { - v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) - } - if a.VolumeSource.ISCSI != nil { - v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) - } - if a.VolumeSource.RBD != nil { - v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) - } - if a.VolumeSource.DownwardAPI != nil { - v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) - for j := range a.VolumeSource.DownwardAPI.Items { - b := &a.VolumeSource.DownwardAPI.Items[j] - if b.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.FieldRef) - } - } - } - if a.VolumeSource.ConfigMap != nil { - v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) - } - if a.VolumeSource.AzureDisk != nil { - v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) - } - } - for i := range in.Spec.Template.Spec.InitContainers { - a := &in.Spec.Template.Spec.InitContainers[i] - v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - v1.SetDefaults_ResourceList(&a.Resources.Limits) - v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } - for i := range in.Spec.Template.Spec.Containers { - a := &in.Spec.Template.Spec.Containers[i] - v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - v1.SetDefaults_ResourceList(&a.Resources.Limits) - v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } -} - -func SetObjectDefaults_JobList(in *JobList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_Job(a) - } -} - func SetObjectDefaults_JobTemplate(in *JobTemplate) { v1.SetDefaults_PodSpec(&in.Template.Spec.Template.Spec) for i := range in.Template.Spec.Template.Spec.Volumes { @@ -314,6 +205,23 @@ func SetObjectDefaults_JobTemplate(in *JobTemplate) { if a.VolumeSource.AzureDisk != nil { v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } } for i := range in.Template.Spec.Template.Spec.InitContainers { a := &in.Template.Spec.Template.Spec.InitContainers[i] diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go index e9a54781da..8e3ff29e75 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,10 +21,10 @@ limitations under the License. package batch import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" reflect "reflect" ) @@ -54,9 +54,11 @@ func DeepCopy_batch_CronJob(in interface{}, out interface{}, c *conversion.Clone { in := in.(*CronJob) out := out.(*CronJob) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_batch_CronJobSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -72,8 +74,7 @@ func DeepCopy_batch_CronJobList(in interface{}, out interface{}, c *conversion.C { in := in.(*CronJobList) out := out.(*CronJobList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CronJob, len(*in)) @@ -82,8 +83,6 @@ func DeepCopy_batch_CronJobList(in interface{}, out interface{}, c *conversion.C return err } } - } else { - out.Items = nil } return nil } @@ -93,25 +92,30 @@ func DeepCopy_batch_CronJobSpec(in interface{}, out interface{}, c *conversion.C { in := in.(*CronJobSpec) out := out.(*CronJobSpec) - out.Schedule = in.Schedule + *out = *in if in.StartingDeadlineSeconds != nil { in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds *out = new(int64) **out = **in - } else { - out.StartingDeadlineSeconds = nil } - out.ConcurrencyPolicy = in.ConcurrencyPolicy if in.Suspend != nil { in, out := &in.Suspend, &out.Suspend *out = new(bool) **out = **in - } else { - out.Suspend = nil } if err := DeepCopy_batch_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, c); err != nil { return err } + if in.SuccessfulJobsHistoryLimit != nil { + in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit + *out = new(int32) + **out = **in + } + if in.FailedJobsHistoryLimit != nil { + in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit + *out = new(int32) + **out = **in + } return nil } } @@ -120,21 +124,16 @@ func DeepCopy_batch_CronJobStatus(in interface{}, out interface{}, c *conversion { in := in.(*CronJobStatus) out := out.(*CronJobStatus) + *out = *in if in.Active != nil { in, out := &in.Active, &out.Active *out = make([]api.ObjectReference, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Active = nil + copy(*out, *in) } if in.LastScheduleTime != nil { in, out := &in.LastScheduleTime, &out.LastScheduleTime - *out = new(unversioned.Time) + *out = new(v1.Time) **out = (*in).DeepCopy() - } else { - out.LastScheduleTime = nil } return nil } @@ -144,9 +143,11 @@ func DeepCopy_batch_Job(in interface{}, out interface{}, c *conversion.Cloner) e { in := in.(*Job) out := out.(*Job) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_batch_JobSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -162,12 +163,9 @@ func DeepCopy_batch_JobCondition(in interface{}, out interface{}, c *conversion. { in := in.(*JobCondition) out := out.(*JobCondition) - out.Type = in.Type - out.Status = in.Status + *out = *in out.LastProbeTime = in.LastProbeTime.DeepCopy() out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message return nil } } @@ -176,8 +174,7 @@ func DeepCopy_batch_JobList(in interface{}, out interface{}, c *conversion.Clone { in := in.(*JobList) out := out.(*JobList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Job, len(*in)) @@ -186,8 +183,6 @@ func DeepCopy_batch_JobList(in interface{}, out interface{}, c *conversion.Clone return err } } - } else { - out.Items = nil } return nil } @@ -197,42 +192,34 @@ func DeepCopy_batch_JobSpec(in interface{}, out interface{}, c *conversion.Clone { in := in.(*JobSpec) out := out.(*JobSpec) + *out = *in if in.Parallelism != nil { in, out := &in.Parallelism, &out.Parallelism *out = new(int32) **out = **in - } else { - out.Parallelism = nil } if in.Completions != nil { in, out := &in.Completions, &out.Completions *out = new(int32) **out = **in - } else { - out.Completions = nil } if in.ActiveDeadlineSeconds != nil { in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds *out = new(int64) **out = **in - } else { - out.ActiveDeadlineSeconds = nil } if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.LabelSelector) } - } else { - out.Selector = nil } if in.ManualSelector != nil { in, out := &in.ManualSelector, &out.ManualSelector *out = new(bool) **out = **in - } else { - out.ManualSelector = nil } if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { return err @@ -245,6 +232,7 @@ func DeepCopy_batch_JobStatus(in interface{}, out interface{}, c *conversion.Clo { in := in.(*JobStatus) out := out.(*JobStatus) + *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]JobCondition, len(*in)) @@ -253,26 +241,17 @@ func DeepCopy_batch_JobStatus(in interface{}, out interface{}, c *conversion.Clo return err } } - } else { - out.Conditions = nil } if in.StartTime != nil { in, out := &in.StartTime, &out.StartTime - *out = new(unversioned.Time) + *out = new(v1.Time) **out = (*in).DeepCopy() - } else { - out.StartTime = nil } if in.CompletionTime != nil { in, out := &in.CompletionTime, &out.CompletionTime - *out = new(unversioned.Time) + *out = new(v1.Time) **out = (*in).DeepCopy() - } else { - out.CompletionTime = nil } - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed return nil } } @@ -281,9 +260,11 @@ func DeepCopy_batch_JobTemplate(in interface{}, out interface{}, c *conversion.C { in := in.(*JobTemplate) out := out.(*JobTemplate) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_batch_JobTemplateSpec(&in.Template, &out.Template, c); err != nil { return err @@ -296,8 +277,11 @@ func DeepCopy_batch_JobTemplateSpec(in interface{}, out interface{}, c *conversi { in := in.(*JobTemplateSpec) out := out.(*JobTemplateSpec) - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_batch_JobSpec(&in.Spec, &out.Spec, c); err != nil { return err diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/certificates/BUILD index d12e144d53..b1f242beee 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/BUILD @@ -4,28 +4,41 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( name = "go_default_library", srcs = [ "doc.go", + "helpers.go", "register.go", - "types.generated.go", "types.go", "zz_generated.deepcopy.go", ], tags = ["automanaged"], deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/certificates/install:all-srcs", + "//pkg/apis/certificates/v1beta1:all-srcs", + "//pkg/apis/certificates/validation:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/certificates/OWNERS new file mode 100755 index 0000000000..c67bd1172a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/OWNERS @@ -0,0 +1,14 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- deads2k +- caesarxuchao +- liggitt +- sttts +- timothysc +- dims +- errordeveloper +- mbohlool +- david-mcmahon +- jianhuiz diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go index 3a1b77095a..5bd58b490f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go @@ -15,7 +15,5 @@ limitations under the License. */ // +k8s:deepcopy-gen=package,register -// +k8s:openapi-gen=true - // +groupName=certificates.k8s.io package certificates diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/helpers.go new file mode 100644 index 0000000000..2608e40762 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/helpers.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificates + +import ( + "crypto/x509" + "encoding/pem" + "errors" +) + +// ParseCSR extracts the CSR from the API object and decodes it. +func ParseCSR(obj *CertificateSigningRequest) (*x509.CertificateRequest, error) { + // extract PEM from request object + pemBytes := obj.Spec.Request + block, _ := pem.Decode(pemBytes) + if block == nil || block.Type != "CERTIFICATE REQUEST" { + return nil, errors.New("PEM block type must be CERTIFICATE REQUEST") + } + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return nil, err + } + return csr, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/BUILD index 0c2e83b8f1..1ee09b9f02 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -15,9 +12,25 @@ go_library( srcs = ["install.go"], tags = ["automanaged"], deps = [ - "//pkg/apimachinery/announced:go_default_library", + "//pkg/api:go_default_library", "//pkg/apis/certificates:go_default_library", - "//pkg/apis/certificates/v1alpha1:go_default_library", - "//pkg/util/sets:go_default_library", + "//pkg/apis/certificates/v1beta1:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/announced", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/registered", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/util/sets", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/install.go index 0210b0daa1..73844eeb6f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/install.go @@ -19,25 +19,33 @@ limitations under the License. package install import ( - "k8s.io/kubernetes/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/certificates" - "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/apis/certificates/v1beta1" ) func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { if err := announced.NewGroupMetaFactory( &announced.GroupMetaFactoryArgs{ GroupName: certificates.GroupName, - VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version}, + VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, ImportPrefix: "k8s.io/kubernetes/pkg/apis/certificates", RootScopedKinds: sets.NewString("CertificateSigningRequest"), AddInternalObjectsToScheme: certificates.AddToScheme, }, announced.VersionToSchemeFunc{ - v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme, + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, }, - ).Announce().RegisterAndEnable(); err != nil { + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { panic(err) } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/register.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/register.go index 884044114c..f9d228d00e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/register.go @@ -17,9 +17,8 @@ limitations under the License. package certificates import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) var ( @@ -31,15 +30,15 @@ var ( const GroupName = "certificates.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} // Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { +func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } // Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { +func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } @@ -48,11 +47,9 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &CertificateSigningRequest{}, &CertificateSigningRequestList{}, - &api.ListOptions{}, - &api.DeleteOptions{}, ) return nil } -func (obj *CertificateSigningRequest) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *CertificateSigningRequestList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *CertificateSigningRequest) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *CertificateSigningRequestList) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/types.generated.go deleted file mode 100644 index 6f5c513bc0..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/types.generated.go +++ /dev/null @@ -1,1957 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package certificates - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg2_api "k8s.io/kubernetes/pkg/api" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg3_types "k8s.io/kubernetes/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_api.ObjectMeta - var v1 pkg1_unversioned.TypeMeta - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 - } -} - -func (x *CertificateSigningRequest) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yy10.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.ObjectMeta - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy13 := &x.Spec - yy13.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.Spec - yy14.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Status - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CertificateSigningRequest) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct19 := r.ContainerType() - if yyct19 == codecSelferValueTypeMap1234 { - yyl19 := r.ReadMapStart() - if yyl19 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl19, d) - } - } else if yyct19 == codecSelferValueTypeArray1234 { - yyl19 := r.ReadArrayStart() - if yyl19 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl19, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CertificateSigningRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys20Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys20Slc - var yyhl20 bool = l >= 0 - for yyj20 := 0; ; yyj20++ { - if yyhl20 { - if yyj20 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys20Slc = r.DecodeBytes(yys20Slc, true, true) - yys20 := string(yys20Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys20 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv23 := &x.ObjectMeta - yyv23.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = CertificateSigningRequestSpec{} - } else { - yyv24 := &x.Spec - yyv24.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = CertificateSigningRequestStatus{} - } else { - yyv25 := &x.Status - yyv25.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys20) - } // end switch yys20 - } // end for yyj20 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CertificateSigningRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj26 int - var yyb26 bool - var yyhl26 bool = l >= 0 - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv29 := &x.ObjectMeta - yyv29.CodecDecodeSelf(d) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = CertificateSigningRequestSpec{} - } else { - yyv30 := &x.Spec - yyv30.CodecDecodeSelf(d) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = CertificateSigningRequestStatus{} - } else { - yyv31 := &x.Status - yyv31.CodecDecodeSelf(d) - } - for { - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj26-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CertificateSigningRequestSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym32 := z.EncBinary() - _ = yym32 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep33 := !z.EncBinary() - yy2arr33 := z.EncBasicHandle().StructToArray - var yyq33 [4]bool - _, _, _ = yysep33, yyq33, yy2arr33 - const yyr33 bool = false - yyq33[1] = x.Username != "" - yyq33[2] = x.UID != "" - yyq33[3] = len(x.Groups) != 0 - var yynn33 int - if yyr33 || yy2arr33 { - r.EncodeArrayStart(4) - } else { - yynn33 = 1 - for _, b := range yyq33 { - if b { - yynn33++ - } - } - r.EncodeMapStart(yynn33) - yynn33 = 0 - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Request == nil { - r.EncodeNil() - } else { - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Request)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("request")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Request == nil { - r.EncodeNil() - } else { - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Request)) - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[1] { - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Username)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq33[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("username")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym39 := z.EncBinary() - _ = yym39 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Username)) - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[2] { - yym41 := z.EncBinary() - _ = yym41 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq33[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[3] { - if x.Groups == nil { - r.EncodeNil() - } else { - yym44 := z.EncBinary() - _ = yym44 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq33[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("groups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Groups == nil { - r.EncodeNil() - } else { - yym45 := z.EncBinary() - _ = yym45 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CertificateSigningRequestSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym46 := z.DecBinary() - _ = yym46 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct47 := r.ContainerType() - if yyct47 == codecSelferValueTypeMap1234 { - yyl47 := r.ReadMapStart() - if yyl47 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl47, d) - } - } else if yyct47 == codecSelferValueTypeArray1234 { - yyl47 := r.ReadArrayStart() - if yyl47 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl47, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CertificateSigningRequestSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys48Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys48Slc - var yyhl48 bool = l >= 0 - for yyj48 := 0; ; yyj48++ { - if yyhl48 { - if yyj48 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys48Slc = r.DecodeBytes(yys48Slc, true, true) - yys48 := string(yys48Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys48 { - case "request": - if r.TryDecodeAsNil() { - x.Request = nil - } else { - yyv49 := &x.Request - yym50 := z.DecBinary() - _ = yym50 - if false { - } else { - *yyv49 = r.DecodeBytes(*(*[]byte)(yyv49), false, false) - } - } - case "username": - if r.TryDecodeAsNil() { - x.Username = "" - } else { - x.Username = string(r.DecodeString()) - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = string(r.DecodeString()) - } - case "groups": - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv53 := &x.Groups - yym54 := z.DecBinary() - _ = yym54 - if false { - } else { - z.F.DecSliceStringX(yyv53, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys48) - } // end switch yys48 - } // end for yyj48 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CertificateSigningRequestSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj55 int - var yyb55 bool - var yyhl55 bool = l >= 0 - yyj55++ - if yyhl55 { - yyb55 = yyj55 > l - } else { - yyb55 = r.CheckBreak() - } - if yyb55 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Request = nil - } else { - yyv56 := &x.Request - yym57 := z.DecBinary() - _ = yym57 - if false { - } else { - *yyv56 = r.DecodeBytes(*(*[]byte)(yyv56), false, false) - } - } - yyj55++ - if yyhl55 { - yyb55 = yyj55 > l - } else { - yyb55 = r.CheckBreak() - } - if yyb55 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Username = "" - } else { - x.Username = string(r.DecodeString()) - } - yyj55++ - if yyhl55 { - yyb55 = yyj55 > l - } else { - yyb55 = r.CheckBreak() - } - if yyb55 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = string(r.DecodeString()) - } - yyj55++ - if yyhl55 { - yyb55 = yyj55 > l - } else { - yyb55 = r.CheckBreak() - } - if yyb55 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv60 := &x.Groups - yym61 := z.DecBinary() - _ = yym61 - if false { - } else { - z.F.DecSliceStringX(yyv60, false, d) - } - } - for { - yyj55++ - if yyhl55 { - yyb55 = yyj55 > l - } else { - yyb55 = r.CheckBreak() - } - if yyb55 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj55-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CertificateSigningRequestStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym62 := z.EncBinary() - _ = yym62 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep63 := !z.EncBinary() - yy2arr63 := z.EncBasicHandle().StructToArray - var yyq63 [2]bool - _, _, _ = yysep63, yyq63, yy2arr63 - const yyr63 bool = false - yyq63[0] = len(x.Conditions) != 0 - yyq63[1] = len(x.Certificate) != 0 - var yynn63 int - if yyr63 || yy2arr63 { - r.EncodeArrayStart(2) - } else { - yynn63 = 0 - for _, b := range yyq63 { - if b { - yynn63++ - } - } - r.EncodeMapStart(yynn63) - yynn63 = 0 - } - if yyr63 || yy2arr63 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq63[0] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym65 := z.EncBinary() - _ = yym65 - if false { - } else { - h.encSliceCertificateSigningRequestCondition(([]CertificateSigningRequestCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq63[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym66 := z.EncBinary() - _ = yym66 - if false { - } else { - h.encSliceCertificateSigningRequestCondition(([]CertificateSigningRequestCondition)(x.Conditions), e) - } - } - } - } - if yyr63 || yy2arr63 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq63[1] { - if x.Certificate == nil { - r.EncodeNil() - } else { - yym68 := z.EncBinary() - _ = yym68 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Certificate)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq63[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("certificate")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Certificate == nil { - r.EncodeNil() - } else { - yym69 := z.EncBinary() - _ = yym69 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Certificate)) - } - } - } - } - if yyr63 || yy2arr63 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CertificateSigningRequestStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym70 := z.DecBinary() - _ = yym70 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct71 := r.ContainerType() - if yyct71 == codecSelferValueTypeMap1234 { - yyl71 := r.ReadMapStart() - if yyl71 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl71, d) - } - } else if yyct71 == codecSelferValueTypeArray1234 { - yyl71 := r.ReadArrayStart() - if yyl71 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl71, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CertificateSigningRequestStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys72Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys72Slc - var yyhl72 bool = l >= 0 - for yyj72 := 0; ; yyj72++ { - if yyhl72 { - if yyj72 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys72Slc = r.DecodeBytes(yys72Slc, true, true) - yys72 := string(yys72Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys72 { - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv73 := &x.Conditions - yym74 := z.DecBinary() - _ = yym74 - if false { - } else { - h.decSliceCertificateSigningRequestCondition((*[]CertificateSigningRequestCondition)(yyv73), d) - } - } - case "certificate": - if r.TryDecodeAsNil() { - x.Certificate = nil - } else { - yyv75 := &x.Certificate - yym76 := z.DecBinary() - _ = yym76 - if false { - } else { - *yyv75 = r.DecodeBytes(*(*[]byte)(yyv75), false, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys72) - } // end switch yys72 - } // end for yyj72 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CertificateSigningRequestStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj77 int - var yyb77 bool - var yyhl77 bool = l >= 0 - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv78 := &x.Conditions - yym79 := z.DecBinary() - _ = yym79 - if false { - } else { - h.decSliceCertificateSigningRequestCondition((*[]CertificateSigningRequestCondition)(yyv78), d) - } - } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Certificate = nil - } else { - yyv80 := &x.Certificate - yym81 := z.DecBinary() - _ = yym81 - if false { - } else { - *yyv80 = r.DecodeBytes(*(*[]byte)(yyv80), false, false) - } - } - for { - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj77-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x RequestConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym82 := z.EncBinary() - _ = yym82 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *RequestConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym83 := z.DecBinary() - _ = yym83 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *CertificateSigningRequestCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym84 := z.EncBinary() - _ = yym84 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep85 := !z.EncBinary() - yy2arr85 := z.EncBasicHandle().StructToArray - var yyq85 [4]bool - _, _, _ = yysep85, yyq85, yy2arr85 - const yyr85 bool = false - yyq85[1] = x.Reason != "" - yyq85[2] = x.Message != "" - yyq85[3] = true - var yynn85 int - if yyr85 || yy2arr85 { - r.EncodeArrayStart(4) - } else { - yynn85 = 1 - for _, b := range yyq85 { - if b { - yynn85++ - } - } - r.EncodeMapStart(yynn85) - yynn85 = 0 - } - if yyr85 || yy2arr85 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr85 || yy2arr85 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq85[1] { - yym88 := z.EncBinary() - _ = yym88 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq85[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym89 := z.EncBinary() - _ = yym89 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr85 || yy2arr85 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq85[2] { - yym91 := z.EncBinary() - _ = yym91 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq85[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym92 := z.EncBinary() - _ = yym92 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr85 || yy2arr85 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq85[3] { - yy94 := &x.LastUpdateTime - yym95 := z.EncBinary() - _ = yym95 - if false { - } else if z.HasExtensions() && z.EncExt(yy94) { - } else if yym95 { - z.EncBinaryMarshal(yy94) - } else if !yym95 && z.IsJSONHandle() { - z.EncJSONMarshal(yy94) - } else { - z.EncFallback(yy94) - } - } else { - r.EncodeNil() - } - } else { - if yyq85[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastUpdateTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy96 := &x.LastUpdateTime - yym97 := z.EncBinary() - _ = yym97 - if false { - } else if z.HasExtensions() && z.EncExt(yy96) { - } else if yym97 { - z.EncBinaryMarshal(yy96) - } else if !yym97 && z.IsJSONHandle() { - z.EncJSONMarshal(yy96) - } else { - z.EncFallback(yy96) - } - } - } - if yyr85 || yy2arr85 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CertificateSigningRequestCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym98 := z.DecBinary() - _ = yym98 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct99 := r.ContainerType() - if yyct99 == codecSelferValueTypeMap1234 { - yyl99 := r.ReadMapStart() - if yyl99 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl99, d) - } - } else if yyct99 == codecSelferValueTypeArray1234 { - yyl99 := r.ReadArrayStart() - if yyl99 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl99, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CertificateSigningRequestCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys100Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys100Slc - var yyhl100 bool = l >= 0 - for yyj100 := 0; ; yyj100++ { - if yyhl100 { - if yyj100 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys100Slc = r.DecodeBytes(yys100Slc, true, true) - yys100 := string(yys100Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys100 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = RequestConditionType(r.DecodeString()) - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - case "lastUpdateTime": - if r.TryDecodeAsNil() { - x.LastUpdateTime = pkg1_unversioned.Time{} - } else { - yyv104 := &x.LastUpdateTime - yym105 := z.DecBinary() - _ = yym105 - if false { - } else if z.HasExtensions() && z.DecExt(yyv104) { - } else if yym105 { - z.DecBinaryUnmarshal(yyv104) - } else if !yym105 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv104) - } else { - z.DecFallback(yyv104, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys100) - } // end switch yys100 - } // end for yyj100 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CertificateSigningRequestCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj106 int - var yyb106 bool - var yyhl106 bool = l >= 0 - yyj106++ - if yyhl106 { - yyb106 = yyj106 > l - } else { - yyb106 = r.CheckBreak() - } - if yyb106 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = RequestConditionType(r.DecodeString()) - } - yyj106++ - if yyhl106 { - yyb106 = yyj106 > l - } else { - yyb106 = r.CheckBreak() - } - if yyb106 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj106++ - if yyhl106 { - yyb106 = yyj106 > l - } else { - yyb106 = r.CheckBreak() - } - if yyb106 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - yyj106++ - if yyhl106 { - yyb106 = yyj106 > l - } else { - yyb106 = r.CheckBreak() - } - if yyb106 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastUpdateTime = pkg1_unversioned.Time{} - } else { - yyv110 := &x.LastUpdateTime - yym111 := z.DecBinary() - _ = yym111 - if false { - } else if z.HasExtensions() && z.DecExt(yyv110) { - } else if yym111 { - z.DecBinaryUnmarshal(yyv110) - } else if !yym111 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv110) - } else { - z.DecFallback(yyv110, false) - } - } - for { - yyj106++ - if yyhl106 { - yyb106 = yyj106 > l - } else { - yyb106 = r.CheckBreak() - } - if yyb106 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj106-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CertificateSigningRequestList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym112 := z.EncBinary() - _ = yym112 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep113 := !z.EncBinary() - yy2arr113 := z.EncBasicHandle().StructToArray - var yyq113 [4]bool - _, _, _ = yysep113, yyq113, yy2arr113 - const yyr113 bool = false - yyq113[0] = x.Kind != "" - yyq113[1] = x.APIVersion != "" - yyq113[2] = true - yyq113[3] = len(x.Items) != 0 - var yynn113 int - if yyr113 || yy2arr113 { - r.EncodeArrayStart(4) - } else { - yynn113 = 0 - for _, b := range yyq113 { - if b { - yynn113++ - } - } - r.EncodeMapStart(yynn113) - yynn113 = 0 - } - if yyr113 || yy2arr113 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq113[0] { - yym115 := z.EncBinary() - _ = yym115 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq113[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym116 := z.EncBinary() - _ = yym116 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr113 || yy2arr113 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq113[1] { - yym118 := z.EncBinary() - _ = yym118 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq113[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym119 := z.EncBinary() - _ = yym119 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr113 || yy2arr113 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq113[2] { - yy121 := &x.ListMeta - yym122 := z.EncBinary() - _ = yym122 - if false { - } else if z.HasExtensions() && z.EncExt(yy121) { - } else { - z.EncFallback(yy121) - } - } else { - r.EncodeNil() - } - } else { - if yyq113[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy123 := &x.ListMeta - yym124 := z.EncBinary() - _ = yym124 - if false { - } else if z.HasExtensions() && z.EncExt(yy123) { - } else { - z.EncFallback(yy123) - } - } - } - if yyr113 || yy2arr113 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq113[3] { - if x.Items == nil { - r.EncodeNil() - } else { - yym126 := z.EncBinary() - _ = yym126 - if false { - } else { - h.encSliceCertificateSigningRequest(([]CertificateSigningRequest)(x.Items), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq113[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym127 := z.EncBinary() - _ = yym127 - if false { - } else { - h.encSliceCertificateSigningRequest(([]CertificateSigningRequest)(x.Items), e) - } - } - } - } - if yyr113 || yy2arr113 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CertificateSigningRequestList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym128 := z.DecBinary() - _ = yym128 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct129 := r.ContainerType() - if yyct129 == codecSelferValueTypeMap1234 { - yyl129 := r.ReadMapStart() - if yyl129 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl129, d) - } - } else if yyct129 == codecSelferValueTypeArray1234 { - yyl129 := r.ReadArrayStart() - if yyl129 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl129, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CertificateSigningRequestList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys130Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys130Slc - var yyhl130 bool = l >= 0 - for yyj130 := 0; ; yyj130++ { - if yyhl130 { - if yyj130 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys130Slc = r.DecodeBytes(yys130Slc, true, true) - yys130 := string(yys130Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys130 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv133 := &x.ListMeta - yym134 := z.DecBinary() - _ = yym134 - if false { - } else if z.HasExtensions() && z.DecExt(yyv133) { - } else { - z.DecFallback(yyv133, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv135 := &x.Items - yym136 := z.DecBinary() - _ = yym136 - if false { - } else { - h.decSliceCertificateSigningRequest((*[]CertificateSigningRequest)(yyv135), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys130) - } // end switch yys130 - } // end for yyj130 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CertificateSigningRequestList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj137 int - var yyb137 bool - var yyhl137 bool = l >= 0 - yyj137++ - if yyhl137 { - yyb137 = yyj137 > l - } else { - yyb137 = r.CheckBreak() - } - if yyb137 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj137++ - if yyhl137 { - yyb137 = yyj137 > l - } else { - yyb137 = r.CheckBreak() - } - if yyb137 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj137++ - if yyhl137 { - yyb137 = yyj137 > l - } else { - yyb137 = r.CheckBreak() - } - if yyb137 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv140 := &x.ListMeta - yym141 := z.DecBinary() - _ = yym141 - if false { - } else if z.HasExtensions() && z.DecExt(yyv140) { - } else { - z.DecFallback(yyv140, false) - } - } - yyj137++ - if yyhl137 { - yyb137 = yyj137 > l - } else { - yyb137 = r.CheckBreak() - } - if yyb137 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv142 := &x.Items - yym143 := z.DecBinary() - _ = yym143 - if false { - } else { - h.decSliceCertificateSigningRequest((*[]CertificateSigningRequest)(yyv142), d) - } - } - for { - yyj137++ - if yyhl137 { - yyb137 = yyj137 > l - } else { - yyb137 = r.CheckBreak() - } - if yyb137 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj137-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceCertificateSigningRequestCondition(v []CertificateSigningRequestCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv144 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy145 := &yyv144 - yy145.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCertificateSigningRequestCondition(v *[]CertificateSigningRequestCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv146 := *v - yyh146, yyl146 := z.DecSliceHelperStart() - var yyc146 bool - if yyl146 == 0 { - if yyv146 == nil { - yyv146 = []CertificateSigningRequestCondition{} - yyc146 = true - } else if len(yyv146) != 0 { - yyv146 = yyv146[:0] - yyc146 = true - } - } else if yyl146 > 0 { - var yyrr146, yyrl146 int - var yyrt146 bool - if yyl146 > cap(yyv146) { - - yyrg146 := len(yyv146) > 0 - yyv2146 := yyv146 - yyrl146, yyrt146 = z.DecInferLen(yyl146, z.DecBasicHandle().MaxInitLen, 72) - if yyrt146 { - if yyrl146 <= cap(yyv146) { - yyv146 = yyv146[:yyrl146] - } else { - yyv146 = make([]CertificateSigningRequestCondition, yyrl146) - } - } else { - yyv146 = make([]CertificateSigningRequestCondition, yyrl146) - } - yyc146 = true - yyrr146 = len(yyv146) - if yyrg146 { - copy(yyv146, yyv2146) - } - } else if yyl146 != len(yyv146) { - yyv146 = yyv146[:yyl146] - yyc146 = true - } - yyj146 := 0 - for ; yyj146 < yyrr146; yyj146++ { - yyh146.ElemContainerState(yyj146) - if r.TryDecodeAsNil() { - yyv146[yyj146] = CertificateSigningRequestCondition{} - } else { - yyv147 := &yyv146[yyj146] - yyv147.CodecDecodeSelf(d) - } - - } - if yyrt146 { - for ; yyj146 < yyl146; yyj146++ { - yyv146 = append(yyv146, CertificateSigningRequestCondition{}) - yyh146.ElemContainerState(yyj146) - if r.TryDecodeAsNil() { - yyv146[yyj146] = CertificateSigningRequestCondition{} - } else { - yyv148 := &yyv146[yyj146] - yyv148.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj146 := 0 - for ; !r.CheckBreak(); yyj146++ { - - if yyj146 >= len(yyv146) { - yyv146 = append(yyv146, CertificateSigningRequestCondition{}) // var yyz146 CertificateSigningRequestCondition - yyc146 = true - } - yyh146.ElemContainerState(yyj146) - if yyj146 < len(yyv146) { - if r.TryDecodeAsNil() { - yyv146[yyj146] = CertificateSigningRequestCondition{} - } else { - yyv149 := &yyv146[yyj146] - yyv149.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj146 < len(yyv146) { - yyv146 = yyv146[:yyj146] - yyc146 = true - } else if yyj146 == 0 && yyv146 == nil { - yyv146 = []CertificateSigningRequestCondition{} - yyc146 = true - } - } - yyh146.End() - if yyc146 { - *v = yyv146 - } -} - -func (x codecSelfer1234) encSliceCertificateSigningRequest(v []CertificateSigningRequest, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv150 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy151 := &yyv150 - yy151.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCertificateSigningRequest(v *[]CertificateSigningRequest, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv152 := *v - yyh152, yyl152 := z.DecSliceHelperStart() - var yyc152 bool - if yyl152 == 0 { - if yyv152 == nil { - yyv152 = []CertificateSigningRequest{} - yyc152 = true - } else if len(yyv152) != 0 { - yyv152 = yyv152[:0] - yyc152 = true - } - } else if yyl152 > 0 { - var yyrr152, yyrl152 int - var yyrt152 bool - if yyl152 > cap(yyv152) { - - yyrg152 := len(yyv152) > 0 - yyv2152 := yyv152 - yyrl152, yyrt152 = z.DecInferLen(yyl152, z.DecBasicHandle().MaxInitLen, 384) - if yyrt152 { - if yyrl152 <= cap(yyv152) { - yyv152 = yyv152[:yyrl152] - } else { - yyv152 = make([]CertificateSigningRequest, yyrl152) - } - } else { - yyv152 = make([]CertificateSigningRequest, yyrl152) - } - yyc152 = true - yyrr152 = len(yyv152) - if yyrg152 { - copy(yyv152, yyv2152) - } - } else if yyl152 != len(yyv152) { - yyv152 = yyv152[:yyl152] - yyc152 = true - } - yyj152 := 0 - for ; yyj152 < yyrr152; yyj152++ { - yyh152.ElemContainerState(yyj152) - if r.TryDecodeAsNil() { - yyv152[yyj152] = CertificateSigningRequest{} - } else { - yyv153 := &yyv152[yyj152] - yyv153.CodecDecodeSelf(d) - } - - } - if yyrt152 { - for ; yyj152 < yyl152; yyj152++ { - yyv152 = append(yyv152, CertificateSigningRequest{}) - yyh152.ElemContainerState(yyj152) - if r.TryDecodeAsNil() { - yyv152[yyj152] = CertificateSigningRequest{} - } else { - yyv154 := &yyv152[yyj152] - yyv154.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj152 := 0 - for ; !r.CheckBreak(); yyj152++ { - - if yyj152 >= len(yyv152) { - yyv152 = append(yyv152, CertificateSigningRequest{}) // var yyz152 CertificateSigningRequest - yyc152 = true - } - yyh152.ElemContainerState(yyj152) - if yyj152 < len(yyv152) { - if r.TryDecodeAsNil() { - yyv152[yyj152] = CertificateSigningRequest{} - } else { - yyv155 := &yyv152[yyj152] - yyv155.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj152 < len(yyv152) { - yyv152 = yyv152[:yyj152] - yyc152 = true - } else if yyj152 == 0 && yyv152 == nil { - yyv152 = []CertificateSigningRequest{} - yyc152 = true - } - } - yyh152.End() - if yyc152 { - *v = yyv152 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/types.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/types.go index e589ce74d7..4a7884a1ea 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/types.go @@ -16,54 +16,68 @@ limitations under the License. package certificates -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" -) +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient=true // +nonNamespaced=true // Describes a certificate signing request type CertificateSigningRequest struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // The certificate request itself and any additional information. // +optional - Spec CertificateSigningRequestSpec `json:"spec,omitempty"` + Spec CertificateSigningRequestSpec // Derived information about the request. // +optional - Status CertificateSigningRequestStatus `json:"status,omitempty"` + Status CertificateSigningRequestStatus } // This information is immutable after the request is created. Only the Request -// and ExtraInfo fields can be set on creation, other fields are derived by +// and Usages fields can be set on creation, other fields are derived by // Kubernetes and cannot be modified by users. type CertificateSigningRequestSpec struct { // Base64-encoded PKCS#10 CSR data - Request []byte `json:"request"` + Request []byte + + // usages specifies a set of usage contexts the key will be + // valid for. + // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + Usages []KeyUsage - // Information about the requesting user (if relevant) - // See user.Info interface for details + // Information about the requesting user. + // See user.Info interface for details. // +optional - Username string `json:"username,omitempty"` + Username string + // UID information about the requesting user. + // See user.Info interface for details. // +optional - UID string `json:"uid,omitempty"` + UID string + // Group information about the requesting user. + // See user.Info interface for details. // +optional - Groups []string `json:"groups,omitempty"` + Groups []string + // Extra information about the requesting user. + // See user.Info interface for details. + // +optional + Extra map[string]ExtraValue } +// ExtraValue masks the value so protobuf can generate +type ExtraValue []string + type CertificateSigningRequestStatus struct { // Conditions applied to the request, such as approval or denial. // +optional - Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty"` + Conditions []CertificateSigningRequestCondition // If request was approved, the controller will place the issued certificate here. // +optional - Certificate []byte `json:"certificate,omitempty"` + Certificate []byte } type RequestConditionType string @@ -76,23 +90,54 @@ const ( type CertificateSigningRequestCondition struct { // request approval state, currently Approved or Denied. - Type RequestConditionType `json:"type"` + Type RequestConditionType // brief reason for the request state // +optional - Reason string `json:"reason,omitempty"` + Reason string // human readable message with details about the request state // +optional - Message string `json:"message,omitempty"` + Message string // timestamp for the last update to this condition // +optional - LastUpdateTime unversioned.Time `json:"lastUpdateTime,omitempty"` + LastUpdateTime metav1.Time } type CertificateSigningRequestList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta // +optional - Items []CertificateSigningRequest `json:"items,omitempty"` + Items []CertificateSigningRequest } + +// KeyUsages specifies valid usage contexts for keys. +// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 +type KeyUsage string + +const ( + UsageSigning KeyUsage = "signing" + UsageDigitalSignature KeyUsage = "digital signature" + UsageContentCommittment KeyUsage = "content committment" + UsageKeyEncipherment KeyUsage = "key encipherment" + UsageKeyAgreement KeyUsage = "key agreement" + UsageDataEncipherment KeyUsage = "data encipherment" + UsageCertSign KeyUsage = "cert sign" + UsageCRLSign KeyUsage = "crl sign" + UsageEncipherOnly KeyUsage = "encipher only" + UsageDecipherOnly KeyUsage = "decipher only" + UsageAny KeyUsage = "any" + UsageServerAuth KeyUsage = "server auth" + UsageClientAuth KeyUsage = "client auth" + UsageCodeSigning KeyUsage = "code signing" + UsageEmailProtection KeyUsage = "email protection" + UsageSMIME KeyUsage = "s/mime" + UsageIPsecEndSystem KeyUsage = "ipsec end system" + UsageIPsecTunnel KeyUsage = "ipsec tunnel" + UsageIPsecUser KeyUsage = "ipsec user" + UsageTimestamping KeyUsage = "timestamping" + UsageOCSPSigning KeyUsage = "ocsp signing" + UsageMicrosoftSGC KeyUsage = "microsoft sgc" + UsageNetscapSGC KeyUsage = "netscape sgc" +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/BUILD deleted file mode 100644 index b05ca4b77c..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/BUILD +++ /dev/null @@ -1,39 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/apis/certificates:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//pkg/watch/versioned:go_default_library", - "//vendor:github.com/gogo/protobuf/proto", - "//vendor:github.com/ugorji/go/codec", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/conversion.go deleted file mode 100644 index 5275811b88..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/conversion.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/runtime" -) - -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions here. Currently there are none. - - return api.Scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.String(), "CertificateSigningRequest", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }, - ) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/doc.go deleted file mode 100644 index 0e0b1ceabc..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/certificates -// +k8s:openapi-gen=true -// +k8s:defaulter-gen=TypeMeta - -// +groupName=certificates.k8s.io -package v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.pb.go deleted file mode 100644 index 164ab9a110..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.pb.go +++ /dev/null @@ -1,1325 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.proto -// DO NOT EDIT! - -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.proto - - It has these top-level messages: - CertificateSigningRequest - CertificateSigningRequestCondition - CertificateSigningRequestList - CertificateSigningRequestSpec - CertificateSigningRequestStatus -*/ -package v1alpha1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 - -func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} } -func (*CertificateSigningRequest) ProtoMessage() {} -func (*CertificateSigningRequest) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} -} - -func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} } -func (*CertificateSigningRequestCondition) ProtoMessage() {} -func (*CertificateSigningRequestCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} -} - -func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} } -func (*CertificateSigningRequestList) ProtoMessage() {} -func (*CertificateSigningRequestList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} -} - -func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} } -func (*CertificateSigningRequestSpec) ProtoMessage() {} -func (*CertificateSigningRequestSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} -} - -func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} } -func (*CertificateSigningRequestStatus) ProtoMessage() {} -func (*CertificateSigningRequestStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} -} - -func init() { - proto.RegisterType((*CertificateSigningRequest)(nil), "k8s.io.kubernetes.pkg.apis.certificates.v1alpha1.CertificateSigningRequest") - proto.RegisterType((*CertificateSigningRequestCondition)(nil), "k8s.io.kubernetes.pkg.apis.certificates.v1alpha1.CertificateSigningRequestCondition") - proto.RegisterType((*CertificateSigningRequestList)(nil), "k8s.io.kubernetes.pkg.apis.certificates.v1alpha1.CertificateSigningRequestList") - proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.kubernetes.pkg.apis.certificates.v1alpha1.CertificateSigningRequestSpec") - proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.kubernetes.pkg.apis.certificates.v1alpha1.CertificateSigningRequestStatus") -} -func (m *CertificateSigningRequest) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *CertificateSigningRequest) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *CertificateSigningRequestCondition) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *CertificateSigningRequestCondition) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastUpdateTime.Size())) - n4, err := m.LastUpdateTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n4 - return i, nil -} - -func (m *CertificateSigningRequestList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *CertificateSigningRequestList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n5 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *CertificateSigningRequestSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *CertificateSigningRequestSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Request != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Request))) - i += copy(data[i:], m.Request) - } - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Username))) - i += copy(data[i:], m.Username) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.UID))) - i += copy(data[i:], m.UID) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - data[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *CertificateSigningRequestStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *CertificateSigningRequestStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Certificate != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Certificate))) - i += copy(data[i:], m.Certificate) - } - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *CertificateSigningRequest) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CertificateSigningRequestCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastUpdateTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CertificateSigningRequestList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CertificateSigningRequestSpec) Size() (n int) { - var l int - _ = l - if m.Request != nil { - l = len(m.Request) - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Username) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CertificateSigningRequestStatus) Size() (n int) { - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Certificate != nil { - l = len(m.Certificate) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *CertificateSigningRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CertificateSigningRequest{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CertificateSigningRequestSpec", "CertificateSigningRequestSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CertificateSigningRequestStatus", "CertificateSigningRequestStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CertificateSigningRequestCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CertificateSigningRequestCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CertificateSigningRequestList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CertificateSigningRequestList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CertificateSigningRequest", "CertificateSigningRequest", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CertificateSigningRequestSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CertificateSigningRequestSpec{`, - `Request:` + valueToStringGenerated(this.Request) + `,`, - `Username:` + fmt.Sprintf("%v", this.Username) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, - `}`, - }, "") - return s -} -func (this *CertificateSigningRequestStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CertificateSigningRequestStatus{`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "CertificateSigningRequestCondition", "CertificateSigningRequestCondition", 1), `&`, ``, 1) + `,`, - `Certificate:` + valueToStringGenerated(this.Certificate) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *CertificateSigningRequest) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateSigningRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateSigningRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CertificateSigningRequestCondition) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateSigningRequestCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateSigningRequestCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = RequestConditionType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastUpdateTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CertificateSigningRequestList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateSigningRequestList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateSigningRequestList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CertificateSigningRequest{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateSigningRequestSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateSigningRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Request = append(m.Request[:0], data[iNdEx:postIndex]...) - if m.Request == nil { - m.Request = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Username = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, string(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CertificateSigningRequestStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateSigningRequestStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateSigningRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, CertificateSigningRequestCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Certificate = append(m.Certificate[:0], data[iNdEx:postIndex]...) - if m.Certificate == nil { - m.Certificate = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -var fileDescriptorGenerated = []byte{ - // 692 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4a, - 0x14, 0x8e, 0x93, 0x34, 0x4d, 0x26, 0xbd, 0xed, 0xd5, 0xe8, 0xea, 0x2a, 0x37, 0x52, 0x9d, 0x2a, - 0xba, 0xa0, 0x00, 0xed, 0x98, 0x54, 0x42, 0xea, 0x12, 0xb9, 0x48, 0xa8, 0xa2, 0x55, 0xc5, 0xb4, - 0x95, 0x10, 0x12, 0x8b, 0x89, 0x73, 0xea, 0x0e, 0x69, 0x6c, 0xd7, 0x33, 0x8e, 0xd4, 0x1d, 0x4b, - 0x96, 0x3c, 0x01, 0xaf, 0xc1, 0x2b, 0x74, 0xd9, 0x25, 0xab, 0x40, 0xd3, 0x17, 0x60, 0xcd, 0x0a, - 0x79, 0x32, 0x4e, 0x4c, 0x52, 0x17, 0x90, 0xba, 0xcb, 0xf9, 0xe6, 0x9c, 0xef, 0x3b, 0x3f, 0x9f, - 0x83, 0x9e, 0xf6, 0xb6, 0x04, 0xe1, 0xbe, 0xd5, 0x8b, 0x3a, 0x10, 0x7a, 0x20, 0x41, 0x58, 0x41, - 0xcf, 0xb5, 0x58, 0xc0, 0x85, 0xe5, 0x40, 0x28, 0xf9, 0x31, 0x77, 0x58, 0x8c, 0x0e, 0xda, 0xec, - 0x34, 0x38, 0x61, 0x6d, 0xcb, 0x05, 0x0f, 0x42, 0x26, 0xa1, 0x4b, 0x82, 0xd0, 0x97, 0x3e, 0x7e, - 0x3c, 0x66, 0x20, 0x53, 0x06, 0x12, 0xf4, 0x5c, 0x12, 0x33, 0x90, 0x34, 0x03, 0x49, 0x18, 0xea, - 0x1b, 0x2e, 0x97, 0x27, 0x51, 0x87, 0x38, 0x7e, 0xdf, 0x72, 0x7d, 0xd7, 0xb7, 0x14, 0x51, 0x27, - 0x3a, 0x56, 0x91, 0x0a, 0xd4, 0xaf, 0xb1, 0x40, 0x7d, 0x33, 0xb3, 0x45, 0x2b, 0x04, 0xe1, 0x47, - 0xa1, 0x03, 0xb3, 0x4d, 0xd5, 0x9f, 0x64, 0xd7, 0x44, 0xde, 0x00, 0x42, 0xc1, 0x7d, 0x0f, 0xba, - 0x73, 0x65, 0xeb, 0xd9, 0x65, 0x83, 0xb9, 0xc9, 0xeb, 0x1b, 0x37, 0x67, 0x87, 0x91, 0x27, 0x79, - 0x7f, 0xbe, 0xa7, 0xf6, 0xcd, 0xe9, 0x91, 0xe4, 0xa7, 0x16, 0xf7, 0xa4, 0x90, 0xe1, 0x6c, 0x49, - 0xf3, 0x3a, 0x8f, 0xfe, 0xdb, 0x9e, 0xee, 0xf0, 0x80, 0xbb, 0x1e, 0xf7, 0x5c, 0x0a, 0x67, 0x11, - 0x08, 0x89, 0x5f, 0xa1, 0x72, 0x1f, 0x24, 0xeb, 0x32, 0xc9, 0x6a, 0xc6, 0x9a, 0xd1, 0xaa, 0x6e, - 0xb6, 0x48, 0xe6, 0x31, 0xc8, 0xa0, 0x4d, 0xf6, 0x3b, 0x6f, 0xc1, 0x91, 0x7b, 0x20, 0x99, 0x8d, - 0x2f, 0x86, 0x8d, 0xdc, 0x68, 0xd8, 0x40, 0x53, 0x8c, 0x4e, 0xd8, 0xf0, 0x19, 0x2a, 0x8a, 0x00, - 0x9c, 0x5a, 0x5e, 0xb1, 0xee, 0x93, 0x3f, 0x3d, 0x31, 0xc9, 0x6c, 0xfa, 0x20, 0x00, 0xc7, 0x5e, - 0xd2, 0xe2, 0xc5, 0x38, 0xa2, 0x4a, 0x0a, 0x9f, 0xa3, 0x92, 0x90, 0x4c, 0x46, 0xa2, 0x56, 0x50, - 0xa2, 0x2f, 0xef, 0x52, 0x54, 0x11, 0xdb, 0xcb, 0x5a, 0xb6, 0x34, 0x8e, 0xa9, 0x16, 0x6c, 0x7e, - 0xcc, 0xa3, 0x66, 0x66, 0xed, 0xb6, 0xef, 0x75, 0xb9, 0xe4, 0xbe, 0x87, 0xb7, 0x50, 0x51, 0x9e, - 0x07, 0xa0, 0x56, 0x5d, 0xb1, 0xff, 0x4f, 0x66, 0x38, 0x3c, 0x0f, 0xe0, 0xfb, 0xb0, 0xf1, 0xcf, - 0x6c, 0x7e, 0x8c, 0x53, 0x55, 0x81, 0xef, 0xa3, 0x52, 0x08, 0x4c, 0xf8, 0x9e, 0x5a, 0x68, 0x65, - 0xda, 0x08, 0x55, 0x28, 0xd5, 0xaf, 0xf8, 0x01, 0x5a, 0xec, 0x83, 0x10, 0xcc, 0x05, 0xb5, 0x84, - 0x8a, 0xbd, 0xa2, 0x13, 0x17, 0xf7, 0xc6, 0x30, 0x4d, 0xde, 0x71, 0x0f, 0x2d, 0x9f, 0x32, 0x21, - 0x8f, 0x82, 0x2e, 0x93, 0x70, 0xc8, 0xfb, 0x50, 0x2b, 0xaa, 0xb5, 0x3d, 0xba, 0xc5, 0x01, 0x29, - 0xe7, 0x93, 0xb8, 0xc4, 0xfe, 0x57, 0xd3, 0x2f, 0xef, 0xfe, 0x44, 0x45, 0x67, 0xa8, 0x9b, 0xdf, - 0x0c, 0xb4, 0x9a, 0xb9, 0xa0, 0x5d, 0x2e, 0x24, 0x7e, 0x33, 0x67, 0x45, 0xeb, 0x37, 0x1b, 0x89, - 0xcb, 0x95, 0x23, 0xff, 0xd6, 0xcd, 0x94, 0x13, 0x24, 0xe5, 0xc7, 0x00, 0x2d, 0x70, 0x09, 0x7d, - 0x51, 0xcb, 0xaf, 0x15, 0x5a, 0xd5, 0xcd, 0x17, 0x77, 0xe8, 0x0d, 0xfb, 0x2f, 0xad, 0xbb, 0xb0, - 0x13, 0x2b, 0xd0, 0xb1, 0x50, 0xf3, 0xd3, 0x6d, 0x23, 0xc7, 0xb6, 0xc5, 0xf7, 0xd0, 0x62, 0x38, - 0x0e, 0xd5, 0xc4, 0x4b, 0x76, 0x35, 0x3e, 0x94, 0xce, 0xa0, 0xc9, 0x1b, 0x5e, 0x47, 0xe5, 0x48, - 0x40, 0xe8, 0xb1, 0x3e, 0xe8, 0xeb, 0x4f, 0x06, 0x3d, 0xd2, 0x38, 0x9d, 0x64, 0xe0, 0x55, 0x54, - 0x88, 0x78, 0x57, 0x5f, 0xbf, 0xaa, 0x13, 0x0b, 0x47, 0x3b, 0xcf, 0x68, 0x8c, 0xe3, 0x26, 0x2a, - 0xb9, 0xa1, 0x1f, 0x05, 0xa2, 0x56, 0x5c, 0x2b, 0xb4, 0x2a, 0x36, 0x8a, 0x4d, 0xf4, 0x5c, 0x21, - 0x54, 0xbf, 0x34, 0xbf, 0x18, 0xa8, 0xf1, 0x8b, 0x2f, 0x01, 0xbf, 0x37, 0x10, 0x72, 0x12, 0xa3, - 0x8a, 0x9a, 0xa1, 0xb6, 0x7a, 0x78, 0x87, 0x5b, 0x9d, 0x7c, 0x05, 0xd3, 0x3f, 0x9a, 0x09, 0x24, - 0x68, 0x4a, 0x1b, 0xb7, 0x51, 0x35, 0xc5, 0xad, 0x56, 0xb4, 0x64, 0xaf, 0x8c, 0x86, 0x8d, 0x6a, - 0x8a, 0x9c, 0xa6, 0x73, 0xec, 0x87, 0x17, 0x57, 0x66, 0xee, 0xf2, 0xca, 0xcc, 0x7d, 0xbe, 0x32, - 0x73, 0xef, 0x46, 0xa6, 0x71, 0x31, 0x32, 0x8d, 0xcb, 0x91, 0x69, 0x7c, 0x1d, 0x99, 0xc6, 0x87, - 0x6b, 0x33, 0xf7, 0xba, 0x9c, 0x74, 0xf8, 0x23, 0x00, 0x00, 0xff, 0xff, 0x29, 0x0b, 0xef, 0x6d, - 0xe0, 0x06, 0x00, 0x00, -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.proto deleted file mode 100644 index 5c576a2e29..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.proto +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.certificates.v1alpha1; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// Describes a certificate signing request -message CertificateSigningRequest { - // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // The certificate request itself and any additional information. - // +optional - optional CertificateSigningRequestSpec spec = 2; - - // Derived information about the request. - // +optional - optional CertificateSigningRequestStatus status = 3; -} - -message CertificateSigningRequestCondition { - // request approval state, currently Approved or Denied. - optional string type = 1; - - // brief reason for the request state - // +optional - optional string reason = 2; - - // human readable message with details about the request state - // +optional - optional string message = 3; - - // timestamp for the last update to this condition - // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastUpdateTime = 4; -} - -message CertificateSigningRequestList { - // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - repeated CertificateSigningRequest items = 2; -} - -// This information is immutable after the request is created. Only the Request -// and ExtraInfo fields can be set on creation, other fields are derived by -// Kubernetes and cannot be modified by users. -message CertificateSigningRequestSpec { - // Base64-encoded PKCS#10 CSR data - optional bytes request = 1; - - // Information about the requesting user (if relevant) - // See user.Info interface for details - // +optional - optional string username = 2; - - // +optional - optional string uid = 3; - - // +optional - repeated string groups = 4; -} - -message CertificateSigningRequestStatus { - // Conditions applied to the request, such as approval or denial. - // +optional - repeated CertificateSigningRequestCondition conditions = 1; - - // If request was approved, the controller will place the issued certificate here. - // +optional - optional bytes certificate = 2; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/register.go deleted file mode 100644 index c17dd08a97..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/register.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" -) - -// GroupName is the group name use in this package -const GroupName = "certificates.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addConversionFuncs) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &CertificateSigningRequest{}, - &CertificateSigningRequestList{}, - &v1.ListOptions{}, - &v1.DeleteOptions{}, - &v1.ExportOptions{}, - ) - - // Add the watch version that applies - versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} - -func (obj *CertificateSigningRequest) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *CertificateSigningRequestList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types.generated.go deleted file mode 100644 index 5c6832b702..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types.generated.go +++ /dev/null @@ -1,1950 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1alpha1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg3_types "k8s.io/kubernetes/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_unversioned.TypeMeta - var v1 pkg2_v1.ObjectMeta - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 - } -} - -func (x *CertificateSigningRequest) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yy10.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.ObjectMeta - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy13 := &x.Spec - yy13.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.Spec - yy14.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Status - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CertificateSigningRequest) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct19 := r.ContainerType() - if yyct19 == codecSelferValueTypeMap1234 { - yyl19 := r.ReadMapStart() - if yyl19 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl19, d) - } - } else if yyct19 == codecSelferValueTypeArray1234 { - yyl19 := r.ReadArrayStart() - if yyl19 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl19, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CertificateSigningRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys20Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys20Slc - var yyhl20 bool = l >= 0 - for yyj20 := 0; ; yyj20++ { - if yyhl20 { - if yyj20 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys20Slc = r.DecodeBytes(yys20Slc, true, true) - yys20 := string(yys20Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys20 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv23 := &x.ObjectMeta - yyv23.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = CertificateSigningRequestSpec{} - } else { - yyv24 := &x.Spec - yyv24.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = CertificateSigningRequestStatus{} - } else { - yyv25 := &x.Status - yyv25.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys20) - } // end switch yys20 - } // end for yyj20 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CertificateSigningRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj26 int - var yyb26 bool - var yyhl26 bool = l >= 0 - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv29 := &x.ObjectMeta - yyv29.CodecDecodeSelf(d) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = CertificateSigningRequestSpec{} - } else { - yyv30 := &x.Spec - yyv30.CodecDecodeSelf(d) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = CertificateSigningRequestStatus{} - } else { - yyv31 := &x.Status - yyv31.CodecDecodeSelf(d) - } - for { - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj26-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CertificateSigningRequestSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym32 := z.EncBinary() - _ = yym32 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep33 := !z.EncBinary() - yy2arr33 := z.EncBasicHandle().StructToArray - var yyq33 [4]bool - _, _, _ = yysep33, yyq33, yy2arr33 - const yyr33 bool = false - yyq33[1] = x.Username != "" - yyq33[2] = x.UID != "" - yyq33[3] = len(x.Groups) != 0 - var yynn33 int - if yyr33 || yy2arr33 { - r.EncodeArrayStart(4) - } else { - yynn33 = 1 - for _, b := range yyq33 { - if b { - yynn33++ - } - } - r.EncodeMapStart(yynn33) - yynn33 = 0 - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Request == nil { - r.EncodeNil() - } else { - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Request)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("request")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Request == nil { - r.EncodeNil() - } else { - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Request)) - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[1] { - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Username)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq33[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("username")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym39 := z.EncBinary() - _ = yym39 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Username)) - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[2] { - yym41 := z.EncBinary() - _ = yym41 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq33[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[3] { - if x.Groups == nil { - r.EncodeNil() - } else { - yym44 := z.EncBinary() - _ = yym44 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq33[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("groups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Groups == nil { - r.EncodeNil() - } else { - yym45 := z.EncBinary() - _ = yym45 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CertificateSigningRequestSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym46 := z.DecBinary() - _ = yym46 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct47 := r.ContainerType() - if yyct47 == codecSelferValueTypeMap1234 { - yyl47 := r.ReadMapStart() - if yyl47 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl47, d) - } - } else if yyct47 == codecSelferValueTypeArray1234 { - yyl47 := r.ReadArrayStart() - if yyl47 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl47, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CertificateSigningRequestSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys48Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys48Slc - var yyhl48 bool = l >= 0 - for yyj48 := 0; ; yyj48++ { - if yyhl48 { - if yyj48 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys48Slc = r.DecodeBytes(yys48Slc, true, true) - yys48 := string(yys48Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys48 { - case "request": - if r.TryDecodeAsNil() { - x.Request = nil - } else { - yyv49 := &x.Request - yym50 := z.DecBinary() - _ = yym50 - if false { - } else { - *yyv49 = r.DecodeBytes(*(*[]byte)(yyv49), false, false) - } - } - case "username": - if r.TryDecodeAsNil() { - x.Username = "" - } else { - x.Username = string(r.DecodeString()) - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = string(r.DecodeString()) - } - case "groups": - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv53 := &x.Groups - yym54 := z.DecBinary() - _ = yym54 - if false { - } else { - z.F.DecSliceStringX(yyv53, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys48) - } // end switch yys48 - } // end for yyj48 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CertificateSigningRequestSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj55 int - var yyb55 bool - var yyhl55 bool = l >= 0 - yyj55++ - if yyhl55 { - yyb55 = yyj55 > l - } else { - yyb55 = r.CheckBreak() - } - if yyb55 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Request = nil - } else { - yyv56 := &x.Request - yym57 := z.DecBinary() - _ = yym57 - if false { - } else { - *yyv56 = r.DecodeBytes(*(*[]byte)(yyv56), false, false) - } - } - yyj55++ - if yyhl55 { - yyb55 = yyj55 > l - } else { - yyb55 = r.CheckBreak() - } - if yyb55 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Username = "" - } else { - x.Username = string(r.DecodeString()) - } - yyj55++ - if yyhl55 { - yyb55 = yyj55 > l - } else { - yyb55 = r.CheckBreak() - } - if yyb55 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = string(r.DecodeString()) - } - yyj55++ - if yyhl55 { - yyb55 = yyj55 > l - } else { - yyb55 = r.CheckBreak() - } - if yyb55 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv60 := &x.Groups - yym61 := z.DecBinary() - _ = yym61 - if false { - } else { - z.F.DecSliceStringX(yyv60, false, d) - } - } - for { - yyj55++ - if yyhl55 { - yyb55 = yyj55 > l - } else { - yyb55 = r.CheckBreak() - } - if yyb55 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj55-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CertificateSigningRequestStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym62 := z.EncBinary() - _ = yym62 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep63 := !z.EncBinary() - yy2arr63 := z.EncBasicHandle().StructToArray - var yyq63 [2]bool - _, _, _ = yysep63, yyq63, yy2arr63 - const yyr63 bool = false - yyq63[0] = len(x.Conditions) != 0 - yyq63[1] = len(x.Certificate) != 0 - var yynn63 int - if yyr63 || yy2arr63 { - r.EncodeArrayStart(2) - } else { - yynn63 = 0 - for _, b := range yyq63 { - if b { - yynn63++ - } - } - r.EncodeMapStart(yynn63) - yynn63 = 0 - } - if yyr63 || yy2arr63 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq63[0] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym65 := z.EncBinary() - _ = yym65 - if false { - } else { - h.encSliceCertificateSigningRequestCondition(([]CertificateSigningRequestCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq63[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym66 := z.EncBinary() - _ = yym66 - if false { - } else { - h.encSliceCertificateSigningRequestCondition(([]CertificateSigningRequestCondition)(x.Conditions), e) - } - } - } - } - if yyr63 || yy2arr63 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq63[1] { - if x.Certificate == nil { - r.EncodeNil() - } else { - yym68 := z.EncBinary() - _ = yym68 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Certificate)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq63[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("certificate")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Certificate == nil { - r.EncodeNil() - } else { - yym69 := z.EncBinary() - _ = yym69 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Certificate)) - } - } - } - } - if yyr63 || yy2arr63 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CertificateSigningRequestStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym70 := z.DecBinary() - _ = yym70 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct71 := r.ContainerType() - if yyct71 == codecSelferValueTypeMap1234 { - yyl71 := r.ReadMapStart() - if yyl71 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl71, d) - } - } else if yyct71 == codecSelferValueTypeArray1234 { - yyl71 := r.ReadArrayStart() - if yyl71 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl71, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CertificateSigningRequestStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys72Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys72Slc - var yyhl72 bool = l >= 0 - for yyj72 := 0; ; yyj72++ { - if yyhl72 { - if yyj72 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys72Slc = r.DecodeBytes(yys72Slc, true, true) - yys72 := string(yys72Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys72 { - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv73 := &x.Conditions - yym74 := z.DecBinary() - _ = yym74 - if false { - } else { - h.decSliceCertificateSigningRequestCondition((*[]CertificateSigningRequestCondition)(yyv73), d) - } - } - case "certificate": - if r.TryDecodeAsNil() { - x.Certificate = nil - } else { - yyv75 := &x.Certificate - yym76 := z.DecBinary() - _ = yym76 - if false { - } else { - *yyv75 = r.DecodeBytes(*(*[]byte)(yyv75), false, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys72) - } // end switch yys72 - } // end for yyj72 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CertificateSigningRequestStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj77 int - var yyb77 bool - var yyhl77 bool = l >= 0 - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv78 := &x.Conditions - yym79 := z.DecBinary() - _ = yym79 - if false { - } else { - h.decSliceCertificateSigningRequestCondition((*[]CertificateSigningRequestCondition)(yyv78), d) - } - } - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Certificate = nil - } else { - yyv80 := &x.Certificate - yym81 := z.DecBinary() - _ = yym81 - if false { - } else { - *yyv80 = r.DecodeBytes(*(*[]byte)(yyv80), false, false) - } - } - for { - yyj77++ - if yyhl77 { - yyb77 = yyj77 > l - } else { - yyb77 = r.CheckBreak() - } - if yyb77 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj77-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x RequestConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym82 := z.EncBinary() - _ = yym82 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *RequestConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym83 := z.DecBinary() - _ = yym83 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *CertificateSigningRequestCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym84 := z.EncBinary() - _ = yym84 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep85 := !z.EncBinary() - yy2arr85 := z.EncBasicHandle().StructToArray - var yyq85 [4]bool - _, _, _ = yysep85, yyq85, yy2arr85 - const yyr85 bool = false - yyq85[1] = x.Reason != "" - yyq85[2] = x.Message != "" - yyq85[3] = true - var yynn85 int - if yyr85 || yy2arr85 { - r.EncodeArrayStart(4) - } else { - yynn85 = 1 - for _, b := range yyq85 { - if b { - yynn85++ - } - } - r.EncodeMapStart(yynn85) - yynn85 = 0 - } - if yyr85 || yy2arr85 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr85 || yy2arr85 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq85[1] { - yym88 := z.EncBinary() - _ = yym88 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq85[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym89 := z.EncBinary() - _ = yym89 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr85 || yy2arr85 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq85[2] { - yym91 := z.EncBinary() - _ = yym91 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq85[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym92 := z.EncBinary() - _ = yym92 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr85 || yy2arr85 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq85[3] { - yy94 := &x.LastUpdateTime - yym95 := z.EncBinary() - _ = yym95 - if false { - } else if z.HasExtensions() && z.EncExt(yy94) { - } else if yym95 { - z.EncBinaryMarshal(yy94) - } else if !yym95 && z.IsJSONHandle() { - z.EncJSONMarshal(yy94) - } else { - z.EncFallback(yy94) - } - } else { - r.EncodeNil() - } - } else { - if yyq85[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastUpdateTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy96 := &x.LastUpdateTime - yym97 := z.EncBinary() - _ = yym97 - if false { - } else if z.HasExtensions() && z.EncExt(yy96) { - } else if yym97 { - z.EncBinaryMarshal(yy96) - } else if !yym97 && z.IsJSONHandle() { - z.EncJSONMarshal(yy96) - } else { - z.EncFallback(yy96) - } - } - } - if yyr85 || yy2arr85 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CertificateSigningRequestCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym98 := z.DecBinary() - _ = yym98 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct99 := r.ContainerType() - if yyct99 == codecSelferValueTypeMap1234 { - yyl99 := r.ReadMapStart() - if yyl99 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl99, d) - } - } else if yyct99 == codecSelferValueTypeArray1234 { - yyl99 := r.ReadArrayStart() - if yyl99 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl99, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CertificateSigningRequestCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys100Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys100Slc - var yyhl100 bool = l >= 0 - for yyj100 := 0; ; yyj100++ { - if yyhl100 { - if yyj100 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys100Slc = r.DecodeBytes(yys100Slc, true, true) - yys100 := string(yys100Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys100 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = RequestConditionType(r.DecodeString()) - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - case "lastUpdateTime": - if r.TryDecodeAsNil() { - x.LastUpdateTime = pkg1_unversioned.Time{} - } else { - yyv104 := &x.LastUpdateTime - yym105 := z.DecBinary() - _ = yym105 - if false { - } else if z.HasExtensions() && z.DecExt(yyv104) { - } else if yym105 { - z.DecBinaryUnmarshal(yyv104) - } else if !yym105 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv104) - } else { - z.DecFallback(yyv104, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys100) - } // end switch yys100 - } // end for yyj100 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CertificateSigningRequestCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj106 int - var yyb106 bool - var yyhl106 bool = l >= 0 - yyj106++ - if yyhl106 { - yyb106 = yyj106 > l - } else { - yyb106 = r.CheckBreak() - } - if yyb106 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = RequestConditionType(r.DecodeString()) - } - yyj106++ - if yyhl106 { - yyb106 = yyj106 > l - } else { - yyb106 = r.CheckBreak() - } - if yyb106 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj106++ - if yyhl106 { - yyb106 = yyj106 > l - } else { - yyb106 = r.CheckBreak() - } - if yyb106 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - yyj106++ - if yyhl106 { - yyb106 = yyj106 > l - } else { - yyb106 = r.CheckBreak() - } - if yyb106 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastUpdateTime = pkg1_unversioned.Time{} - } else { - yyv110 := &x.LastUpdateTime - yym111 := z.DecBinary() - _ = yym111 - if false { - } else if z.HasExtensions() && z.DecExt(yyv110) { - } else if yym111 { - z.DecBinaryUnmarshal(yyv110) - } else if !yym111 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv110) - } else { - z.DecFallback(yyv110, false) - } - } - for { - yyj106++ - if yyhl106 { - yyb106 = yyj106 > l - } else { - yyb106 = r.CheckBreak() - } - if yyb106 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj106-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CertificateSigningRequestList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym112 := z.EncBinary() - _ = yym112 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep113 := !z.EncBinary() - yy2arr113 := z.EncBasicHandle().StructToArray - var yyq113 [4]bool - _, _, _ = yysep113, yyq113, yy2arr113 - const yyr113 bool = false - yyq113[0] = x.Kind != "" - yyq113[1] = x.APIVersion != "" - yyq113[2] = true - var yynn113 int - if yyr113 || yy2arr113 { - r.EncodeArrayStart(4) - } else { - yynn113 = 1 - for _, b := range yyq113 { - if b { - yynn113++ - } - } - r.EncodeMapStart(yynn113) - yynn113 = 0 - } - if yyr113 || yy2arr113 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq113[0] { - yym115 := z.EncBinary() - _ = yym115 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq113[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym116 := z.EncBinary() - _ = yym116 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr113 || yy2arr113 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq113[1] { - yym118 := z.EncBinary() - _ = yym118 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq113[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym119 := z.EncBinary() - _ = yym119 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr113 || yy2arr113 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq113[2] { - yy121 := &x.ListMeta - yym122 := z.EncBinary() - _ = yym122 - if false { - } else if z.HasExtensions() && z.EncExt(yy121) { - } else { - z.EncFallback(yy121) - } - } else { - r.EncodeNil() - } - } else { - if yyq113[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy123 := &x.ListMeta - yym124 := z.EncBinary() - _ = yym124 - if false { - } else if z.HasExtensions() && z.EncExt(yy123) { - } else { - z.EncFallback(yy123) - } - } - } - if yyr113 || yy2arr113 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym126 := z.EncBinary() - _ = yym126 - if false { - } else { - h.encSliceCertificateSigningRequest(([]CertificateSigningRequest)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym127 := z.EncBinary() - _ = yym127 - if false { - } else { - h.encSliceCertificateSigningRequest(([]CertificateSigningRequest)(x.Items), e) - } - } - } - if yyr113 || yy2arr113 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CertificateSigningRequestList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym128 := z.DecBinary() - _ = yym128 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct129 := r.ContainerType() - if yyct129 == codecSelferValueTypeMap1234 { - yyl129 := r.ReadMapStart() - if yyl129 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl129, d) - } - } else if yyct129 == codecSelferValueTypeArray1234 { - yyl129 := r.ReadArrayStart() - if yyl129 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl129, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CertificateSigningRequestList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys130Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys130Slc - var yyhl130 bool = l >= 0 - for yyj130 := 0; ; yyj130++ { - if yyhl130 { - if yyj130 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys130Slc = r.DecodeBytes(yys130Slc, true, true) - yys130 := string(yys130Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys130 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv133 := &x.ListMeta - yym134 := z.DecBinary() - _ = yym134 - if false { - } else if z.HasExtensions() && z.DecExt(yyv133) { - } else { - z.DecFallback(yyv133, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv135 := &x.Items - yym136 := z.DecBinary() - _ = yym136 - if false { - } else { - h.decSliceCertificateSigningRequest((*[]CertificateSigningRequest)(yyv135), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys130) - } // end switch yys130 - } // end for yyj130 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CertificateSigningRequestList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj137 int - var yyb137 bool - var yyhl137 bool = l >= 0 - yyj137++ - if yyhl137 { - yyb137 = yyj137 > l - } else { - yyb137 = r.CheckBreak() - } - if yyb137 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj137++ - if yyhl137 { - yyb137 = yyj137 > l - } else { - yyb137 = r.CheckBreak() - } - if yyb137 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj137++ - if yyhl137 { - yyb137 = yyj137 > l - } else { - yyb137 = r.CheckBreak() - } - if yyb137 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv140 := &x.ListMeta - yym141 := z.DecBinary() - _ = yym141 - if false { - } else if z.HasExtensions() && z.DecExt(yyv140) { - } else { - z.DecFallback(yyv140, false) - } - } - yyj137++ - if yyhl137 { - yyb137 = yyj137 > l - } else { - yyb137 = r.CheckBreak() - } - if yyb137 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv142 := &x.Items - yym143 := z.DecBinary() - _ = yym143 - if false { - } else { - h.decSliceCertificateSigningRequest((*[]CertificateSigningRequest)(yyv142), d) - } - } - for { - yyj137++ - if yyhl137 { - yyb137 = yyj137 > l - } else { - yyb137 = r.CheckBreak() - } - if yyb137 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj137-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceCertificateSigningRequestCondition(v []CertificateSigningRequestCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv144 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy145 := &yyv144 - yy145.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCertificateSigningRequestCondition(v *[]CertificateSigningRequestCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv146 := *v - yyh146, yyl146 := z.DecSliceHelperStart() - var yyc146 bool - if yyl146 == 0 { - if yyv146 == nil { - yyv146 = []CertificateSigningRequestCondition{} - yyc146 = true - } else if len(yyv146) != 0 { - yyv146 = yyv146[:0] - yyc146 = true - } - } else if yyl146 > 0 { - var yyrr146, yyrl146 int - var yyrt146 bool - if yyl146 > cap(yyv146) { - - yyrg146 := len(yyv146) > 0 - yyv2146 := yyv146 - yyrl146, yyrt146 = z.DecInferLen(yyl146, z.DecBasicHandle().MaxInitLen, 72) - if yyrt146 { - if yyrl146 <= cap(yyv146) { - yyv146 = yyv146[:yyrl146] - } else { - yyv146 = make([]CertificateSigningRequestCondition, yyrl146) - } - } else { - yyv146 = make([]CertificateSigningRequestCondition, yyrl146) - } - yyc146 = true - yyrr146 = len(yyv146) - if yyrg146 { - copy(yyv146, yyv2146) - } - } else if yyl146 != len(yyv146) { - yyv146 = yyv146[:yyl146] - yyc146 = true - } - yyj146 := 0 - for ; yyj146 < yyrr146; yyj146++ { - yyh146.ElemContainerState(yyj146) - if r.TryDecodeAsNil() { - yyv146[yyj146] = CertificateSigningRequestCondition{} - } else { - yyv147 := &yyv146[yyj146] - yyv147.CodecDecodeSelf(d) - } - - } - if yyrt146 { - for ; yyj146 < yyl146; yyj146++ { - yyv146 = append(yyv146, CertificateSigningRequestCondition{}) - yyh146.ElemContainerState(yyj146) - if r.TryDecodeAsNil() { - yyv146[yyj146] = CertificateSigningRequestCondition{} - } else { - yyv148 := &yyv146[yyj146] - yyv148.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj146 := 0 - for ; !r.CheckBreak(); yyj146++ { - - if yyj146 >= len(yyv146) { - yyv146 = append(yyv146, CertificateSigningRequestCondition{}) // var yyz146 CertificateSigningRequestCondition - yyc146 = true - } - yyh146.ElemContainerState(yyj146) - if yyj146 < len(yyv146) { - if r.TryDecodeAsNil() { - yyv146[yyj146] = CertificateSigningRequestCondition{} - } else { - yyv149 := &yyv146[yyj146] - yyv149.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj146 < len(yyv146) { - yyv146 = yyv146[:yyj146] - yyc146 = true - } else if yyj146 == 0 && yyv146 == nil { - yyv146 = []CertificateSigningRequestCondition{} - yyc146 = true - } - } - yyh146.End() - if yyc146 { - *v = yyv146 - } -} - -func (x codecSelfer1234) encSliceCertificateSigningRequest(v []CertificateSigningRequest, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv150 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy151 := &yyv150 - yy151.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCertificateSigningRequest(v *[]CertificateSigningRequest, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv152 := *v - yyh152, yyl152 := z.DecSliceHelperStart() - var yyc152 bool - if yyl152 == 0 { - if yyv152 == nil { - yyv152 = []CertificateSigningRequest{} - yyc152 = true - } else if len(yyv152) != 0 { - yyv152 = yyv152[:0] - yyc152 = true - } - } else if yyl152 > 0 { - var yyrr152, yyrl152 int - var yyrt152 bool - if yyl152 > cap(yyv152) { - - yyrg152 := len(yyv152) > 0 - yyv2152 := yyv152 - yyrl152, yyrt152 = z.DecInferLen(yyl152, z.DecBasicHandle().MaxInitLen, 384) - if yyrt152 { - if yyrl152 <= cap(yyv152) { - yyv152 = yyv152[:yyrl152] - } else { - yyv152 = make([]CertificateSigningRequest, yyrl152) - } - } else { - yyv152 = make([]CertificateSigningRequest, yyrl152) - } - yyc152 = true - yyrr152 = len(yyv152) - if yyrg152 { - copy(yyv152, yyv2152) - } - } else if yyl152 != len(yyv152) { - yyv152 = yyv152[:yyl152] - yyc152 = true - } - yyj152 := 0 - for ; yyj152 < yyrr152; yyj152++ { - yyh152.ElemContainerState(yyj152) - if r.TryDecodeAsNil() { - yyv152[yyj152] = CertificateSigningRequest{} - } else { - yyv153 := &yyv152[yyj152] - yyv153.CodecDecodeSelf(d) - } - - } - if yyrt152 { - for ; yyj152 < yyl152; yyj152++ { - yyv152 = append(yyv152, CertificateSigningRequest{}) - yyh152.ElemContainerState(yyj152) - if r.TryDecodeAsNil() { - yyv152[yyj152] = CertificateSigningRequest{} - } else { - yyv154 := &yyv152[yyj152] - yyv154.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj152 := 0 - for ; !r.CheckBreak(); yyj152++ { - - if yyj152 >= len(yyv152) { - yyv152 = append(yyv152, CertificateSigningRequest{}) // var yyz152 CertificateSigningRequest - yyc152 = true - } - yyh152.ElemContainerState(yyj152) - if yyj152 < len(yyv152) { - if r.TryDecodeAsNil() { - yyv152[yyj152] = CertificateSigningRequest{} - } else { - yyv155 := &yyv152[yyj152] - yyv155.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj152 < len(yyv152) { - yyv152 = yyv152[:yyj152] - yyc152 = true - } else if yyj152 == 0 && yyv152 == nil { - yyv152 = []CertificateSigningRequest{} - yyc152 = true - } - } - yyh152.End() - if yyc152 { - *v = yyv152 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types.go deleted file mode 100644 index 54de065ae5..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" -) - -// +genclient=true -// +nonNamespaced=true - -// Describes a certificate signing request -type CertificateSigningRequest struct { - unversioned.TypeMeta `json:",inline"` - // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // The certificate request itself and any additional information. - // +optional - Spec CertificateSigningRequestSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Derived information about the request. - // +optional - Status CertificateSigningRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// This information is immutable after the request is created. Only the Request -// and ExtraInfo fields can be set on creation, other fields are derived by -// Kubernetes and cannot be modified by users. -type CertificateSigningRequestSpec struct { - // Base64-encoded PKCS#10 CSR data - Request []byte `json:"request" protobuf:"bytes,1,opt,name=request"` - - // Information about the requesting user (if relevant) - // See user.Info interface for details - // +optional - Username string `json:"username,omitempty" protobuf:"bytes,2,opt,name=username"` - // +optional - UID string `json:"uid,omitempty" protobuf:"bytes,3,opt,name=uid"` - // +optional - Groups []string `json:"groups,omitempty" protobuf:"bytes,4,rep,name=groups"` -} - -type CertificateSigningRequestStatus struct { - // Conditions applied to the request, such as approval or denial. - // +optional - Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` - - // If request was approved, the controller will place the issued certificate here. - // +optional - Certificate []byte `json:"certificate,omitempty" protobuf:"bytes,2,opt,name=certificate"` -} - -type RequestConditionType string - -// These are the possible conditions for a certificate request. -const ( - CertificateApproved RequestConditionType = "Approved" - CertificateDenied RequestConditionType = "Denied" -) - -type CertificateSigningRequestCondition struct { - // request approval state, currently Approved or Denied. - Type RequestConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=RequestConditionType"` - // brief reason for the request state - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` - // human readable message with details about the request state - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` - // timestamp for the last update to this condition - // +optional - LastUpdateTime unversioned.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,4,opt,name=lastUpdateTime"` -} - -type CertificateSigningRequestList struct { - unversioned.TypeMeta `json:",inline"` - // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - Items []CertificateSigningRequest `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index cf66d07467..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_CertificateSigningRequest = map[string]string{ - "": "Describes a certificate signing request", - "spec": "The certificate request itself and any additional information.", - "status": "Derived information about the request.", -} - -func (CertificateSigningRequest) SwaggerDoc() map[string]string { - return map_CertificateSigningRequest -} - -var map_CertificateSigningRequestCondition = map[string]string{ - "type": "request approval state, currently Approved or Denied.", - "reason": "brief reason for the request state", - "message": "human readable message with details about the request state", - "lastUpdateTime": "timestamp for the last update to this condition", -} - -func (CertificateSigningRequestCondition) SwaggerDoc() map[string]string { - return map_CertificateSigningRequestCondition -} - -var map_CertificateSigningRequestSpec = map[string]string{ - "": "This information is immutable after the request is created. Only the Request and ExtraInfo fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.", - "request": "Base64-encoded PKCS#10 CSR data", - "username": "Information about the requesting user (if relevant) See user.Info interface for details", -} - -func (CertificateSigningRequestSpec) SwaggerDoc() map[string]string { - return map_CertificateSigningRequestSpec -} - -var map_CertificateSigningRequestStatus = map[string]string{ - "conditions": "Conditions applied to the request, such as approval or denial.", - "certificate": "If request was approved, the controller will place the issued certificate here.", -} - -func (CertificateSigningRequestStatus) SwaggerDoc() map[string]string { - return map_CertificateSigningRequestStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/zz_generated.conversion.go deleted file mode 100644 index 8b93358ed5..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/zz_generated.conversion.go +++ /dev/null @@ -1,173 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1alpha1 - -import ( - certificates "k8s.io/kubernetes/pkg/apis/certificates" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1alpha1_CertificateSigningRequest_To_certificates_CertificateSigningRequest, - Convert_certificates_CertificateSigningRequest_To_v1alpha1_CertificateSigningRequest, - Convert_v1alpha1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition, - Convert_certificates_CertificateSigningRequestCondition_To_v1alpha1_CertificateSigningRequestCondition, - Convert_v1alpha1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList, - Convert_certificates_CertificateSigningRequestList_To_v1alpha1_CertificateSigningRequestList, - Convert_v1alpha1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec, - Convert_certificates_CertificateSigningRequestSpec_To_v1alpha1_CertificateSigningRequestSpec, - Convert_v1alpha1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus, - Convert_certificates_CertificateSigningRequestStatus_To_v1alpha1_CertificateSigningRequestStatus, - ) -} - -func autoConvert_v1alpha1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in *CertificateSigningRequest, out *certificates.CertificateSigningRequest, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1alpha1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1alpha1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in *CertificateSigningRequest, out *certificates.CertificateSigningRequest, s conversion.Scope) error { - return autoConvert_v1alpha1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in, out, s) -} - -func autoConvert_certificates_CertificateSigningRequest_To_v1alpha1_CertificateSigningRequest(in *certificates.CertificateSigningRequest, out *CertificateSigningRequest, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_certificates_CertificateSigningRequestSpec_To_v1alpha1_CertificateSigningRequestSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_certificates_CertificateSigningRequestStatus_To_v1alpha1_CertificateSigningRequestStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_certificates_CertificateSigningRequest_To_v1alpha1_CertificateSigningRequest(in *certificates.CertificateSigningRequest, out *CertificateSigningRequest, s conversion.Scope) error { - return autoConvert_certificates_CertificateSigningRequest_To_v1alpha1_CertificateSigningRequest(in, out, s) -} - -func autoConvert_v1alpha1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in *CertificateSigningRequestCondition, out *certificates.CertificateSigningRequestCondition, s conversion.Scope) error { - out.Type = certificates.RequestConditionType(in.Type) - out.Reason = in.Reason - out.Message = in.Message - out.LastUpdateTime = in.LastUpdateTime - return nil -} - -func Convert_v1alpha1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in *CertificateSigningRequestCondition, out *certificates.CertificateSigningRequestCondition, s conversion.Scope) error { - return autoConvert_v1alpha1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in, out, s) -} - -func autoConvert_certificates_CertificateSigningRequestCondition_To_v1alpha1_CertificateSigningRequestCondition(in *certificates.CertificateSigningRequestCondition, out *CertificateSigningRequestCondition, s conversion.Scope) error { - out.Type = RequestConditionType(in.Type) - out.Reason = in.Reason - out.Message = in.Message - out.LastUpdateTime = in.LastUpdateTime - return nil -} - -func Convert_certificates_CertificateSigningRequestCondition_To_v1alpha1_CertificateSigningRequestCondition(in *certificates.CertificateSigningRequestCondition, out *CertificateSigningRequestCondition, s conversion.Scope) error { - return autoConvert_certificates_CertificateSigningRequestCondition_To_v1alpha1_CertificateSigningRequestCondition(in, out, s) -} - -func autoConvert_v1alpha1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in *CertificateSigningRequestList, out *certificates.CertificateSigningRequestList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]certificates.CertificateSigningRequest)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1alpha1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in *CertificateSigningRequestList, out *certificates.CertificateSigningRequestList, s conversion.Scope) error { - return autoConvert_v1alpha1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in, out, s) -} - -func autoConvert_certificates_CertificateSigningRequestList_To_v1alpha1_CertificateSigningRequestList(in *certificates.CertificateSigningRequestList, out *CertificateSigningRequestList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]CertificateSigningRequest)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_certificates_CertificateSigningRequestList_To_v1alpha1_CertificateSigningRequestList(in *certificates.CertificateSigningRequestList, out *CertificateSigningRequestList, s conversion.Scope) error { - return autoConvert_certificates_CertificateSigningRequestList_To_v1alpha1_CertificateSigningRequestList(in, out, s) -} - -func autoConvert_v1alpha1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in *CertificateSigningRequestSpec, out *certificates.CertificateSigningRequestSpec, s conversion.Scope) error { - out.Request = *(*[]byte)(unsafe.Pointer(&in.Request)) - out.Username = in.Username - out.UID = in.UID - out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) - return nil -} - -func Convert_v1alpha1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in *CertificateSigningRequestSpec, out *certificates.CertificateSigningRequestSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in, out, s) -} - -func autoConvert_certificates_CertificateSigningRequestSpec_To_v1alpha1_CertificateSigningRequestSpec(in *certificates.CertificateSigningRequestSpec, out *CertificateSigningRequestSpec, s conversion.Scope) error { - out.Request = *(*[]byte)(unsafe.Pointer(&in.Request)) - out.Username = in.Username - out.UID = in.UID - out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) - return nil -} - -func Convert_certificates_CertificateSigningRequestSpec_To_v1alpha1_CertificateSigningRequestSpec(in *certificates.CertificateSigningRequestSpec, out *CertificateSigningRequestSpec, s conversion.Scope) error { - return autoConvert_certificates_CertificateSigningRequestSpec_To_v1alpha1_CertificateSigningRequestSpec(in, out, s) -} - -func autoConvert_v1alpha1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in *CertificateSigningRequestStatus, out *certificates.CertificateSigningRequestStatus, s conversion.Scope) error { - out.Conditions = *(*[]certificates.CertificateSigningRequestCondition)(unsafe.Pointer(&in.Conditions)) - out.Certificate = *(*[]byte)(unsafe.Pointer(&in.Certificate)) - return nil -} - -func Convert_v1alpha1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in *CertificateSigningRequestStatus, out *certificates.CertificateSigningRequestStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in, out, s) -} - -func autoConvert_certificates_CertificateSigningRequestStatus_To_v1alpha1_CertificateSigningRequestStatus(in *certificates.CertificateSigningRequestStatus, out *CertificateSigningRequestStatus, s conversion.Scope) error { - out.Conditions = *(*[]CertificateSigningRequestCondition)(unsafe.Pointer(&in.Conditions)) - out.Certificate = *(*[]byte)(unsafe.Pointer(&in.Certificate)) - return nil -} - -func Convert_certificates_CertificateSigningRequestStatus_To_v1alpha1_CertificateSigningRequestStatus(in *certificates.CertificateSigningRequestStatus, out *CertificateSigningRequestStatus, s conversion.Scope) error { - return autoConvert_certificates_CertificateSigningRequestStatus_To_v1alpha1_CertificateSigningRequestStatus(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index ce2e16969e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,145 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1alpha1 - -import ( - v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_CertificateSigningRequest, InType: reflect.TypeOf(&CertificateSigningRequest{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_CertificateSigningRequestCondition, InType: reflect.TypeOf(&CertificateSigningRequestCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_CertificateSigningRequestList, InType: reflect.TypeOf(&CertificateSigningRequestList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_CertificateSigningRequestSpec, InType: reflect.TypeOf(&CertificateSigningRequestSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_CertificateSigningRequestStatus, InType: reflect.TypeOf(&CertificateSigningRequestStatus{})}, - ) -} - -func DeepCopy_v1alpha1_CertificateSigningRequest(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CertificateSigningRequest) - out := out.(*CertificateSigningRequest) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1alpha1_CertificateSigningRequestSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1alpha1_CertificateSigningRequestStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1alpha1_CertificateSigningRequestCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CertificateSigningRequestCondition) - out := out.(*CertificateSigningRequestCondition) - out.Type = in.Type - out.Reason = in.Reason - out.Message = in.Message - out.LastUpdateTime = in.LastUpdateTime.DeepCopy() - return nil - } -} - -func DeepCopy_v1alpha1_CertificateSigningRequestList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CertificateSigningRequestList) - out := out.(*CertificateSigningRequestList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CertificateSigningRequest, len(*in)) - for i := range *in { - if err := DeepCopy_v1alpha1_CertificateSigningRequest(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil - } -} - -func DeepCopy_v1alpha1_CertificateSigningRequestSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CertificateSigningRequestSpec) - out := out.(*CertificateSigningRequestSpec) - if in.Request != nil { - in, out := &in.Request, &out.Request - *out = make([]byte, len(*in)) - copy(*out, *in) - } else { - out.Request = nil - } - out.Username = in.Username - out.UID = in.UID - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]string, len(*in)) - copy(*out, *in) - } else { - out.Groups = nil - } - return nil - } -} - -func DeepCopy_v1alpha1_CertificateSigningRequestStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CertificateSigningRequestStatus) - out := out.(*CertificateSigningRequestStatus) - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]CertificateSigningRequestCondition, len(*in)) - for i := range *in { - if err := DeepCopy_v1alpha1_CertificateSigningRequestCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - if in.Certificate != nil { - in, out := &in.Certificate, &out.Certificate - *out = make([]byte, len(*in)) - copy(*out, *in) - } else { - out.Certificate = nil - } - return nil - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/BUILD new file mode 100644 index 0000000000..345ced162d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/BUILD @@ -0,0 +1,51 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "conversion.go", + "defaults.go", + "doc.go", + "generated.pb.go", + "helpers.go", + "register.go", + "types.generated.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.conversion.go", + "zz_generated.deepcopy.go", + "zz_generated.defaults.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/certificates:go_default_library", + "//vendor:github.com/gogo/protobuf/proto", + "//vendor:github.com/gogo/protobuf/sortkeys", + "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/types", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/conversion.go new file mode 100644 index 0000000000..b9cf0b0163 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/conversion.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions here. Currently there are none. + + return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.String(), "CertificateSigningRequest", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/defaults.go new file mode 100644 index 0000000000..cd6a29d339 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/defaults.go @@ -0,0 +1,31 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import "k8s.io/apimachinery/pkg/runtime" + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_CertificateSigningRequestSpec, + ) +} +func SetDefaults_CertificateSigningRequestSpec(obj *CertificateSigningRequestSpec) { + if obj.Usages == nil { + obj.Usages = []KeyUsage{UsageDigitalSignature, UsageKeyEncipherment} + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/doc.go new file mode 100644 index 0000000000..703467121f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/certificates +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=certificates.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/generated.pb.go new file mode 100644 index 0000000000..b37d9abe6c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/generated.pb.go @@ -0,0 +1,1674 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/certificates/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/certificates/v1beta1/generated.proto + + It has these top-level messages: + CertificateSigningRequest + CertificateSigningRequestCondition + CertificateSigningRequestList + CertificateSigningRequestSpec + CertificateSigningRequestStatus + ExtraValue +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} } +func (*CertificateSigningRequest) ProtoMessage() {} +func (*CertificateSigningRequest) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{0} +} + +func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} } +func (*CertificateSigningRequestCondition) ProtoMessage() {} +func (*CertificateSigningRequestCondition) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} } +func (*CertificateSigningRequestList) ProtoMessage() {} +func (*CertificateSigningRequestList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} } +func (*CertificateSigningRequestSpec) ProtoMessage() {} +func (*CertificateSigningRequestSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{3} +} + +func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} } +func (*CertificateSigningRequestStatus) ProtoMessage() {} +func (*CertificateSigningRequestStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{4} +} + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func init() { + proto.RegisterType((*CertificateSigningRequest)(nil), "k8s.io.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequest") + proto.RegisterType((*CertificateSigningRequestCondition)(nil), "k8s.io.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequestCondition") + proto.RegisterType((*CertificateSigningRequestList)(nil), "k8s.io.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequestList") + proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequestSpec") + proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequestStatus") + proto.RegisterType((*ExtraValue)(nil), "k8s.io.kubernetes.pkg.apis.certificates.v1beta1.ExtraValue") +} +func (m *CertificateSigningRequest) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CertificateSigningRequest) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *CertificateSigningRequestCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CertificateSigningRequestCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastUpdateTime.Size())) + n4, err := m.LastUpdateTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + return i, nil +} + +func (m *CertificateSigningRequestList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CertificateSigningRequestList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CertificateSigningRequestSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CertificateSigningRequestSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Request != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Request))) + i += copy(data[i:], m.Request) + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Username))) + i += copy(data[i:], m.Username) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.UID))) + i += copy(data[i:], m.UID) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Usages) > 0 { + for _, s := range m.Usages { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Extra) > 0 { + for k := range m.Extra { + data[i] = 0x32 + i++ + v := m.Extra[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n6, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + } + } + return i, nil +} + +func (m *CertificateSigningRequestStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CertificateSigningRequestStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Certificate != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Certificate))) + i += copy(data[i:], m.Certificate) + } + return i, nil +} + +func (m ExtraValue) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m ExtraValue) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *CertificateSigningRequest) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CertificateSigningRequestCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CertificateSigningRequestList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CertificateSigningRequestSpec) Size() (n int) { + var l int + _ = l + if m.Request != nil { + l = len(m.Request) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Username) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Usages) > 0 { + for _, s := range m.Usages { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *CertificateSigningRequestStatus) Size() (n int) { + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Certificate != nil { + l = len(m.Certificate) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m ExtraValue) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CertificateSigningRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CertificateSigningRequest{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CertificateSigningRequestSpec", "CertificateSigningRequestSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CertificateSigningRequestStatus", "CertificateSigningRequestStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CertificateSigningRequestCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CertificateSigningRequestCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CertificateSigningRequestList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CertificateSigningRequestList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CertificateSigningRequest", "CertificateSigningRequest", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CertificateSigningRequestSpec) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&CertificateSigningRequestSpec{`, + `Request:` + valueToStringGenerated(this.Request) + `,`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Usages:` + fmt.Sprintf("%v", this.Usages) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func (this *CertificateSigningRequestStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CertificateSigningRequestStatus{`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "CertificateSigningRequestCondition", "CertificateSigningRequestCondition", 1), `&`, ``, 1) + `,`, + `Certificate:` + valueToStringGenerated(this.Certificate) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CertificateSigningRequest) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CertificateSigningRequestCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequestCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequestCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = RequestConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CertificateSigningRequestList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequestList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequestList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CertificateSigningRequest{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequestSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = append(m.Request[:0], data[iNdEx:postIndex]...) + if m.Request == nil { + m.Request = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usages", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Usages = append(m.Usages, KeyUsage(data[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CertificateSigningRequestStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequestStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, CertificateSigningRequestCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Certificate = append(m.Certificate[:0], data[iNdEx:postIndex]...) + if m.Certificate == nil { + m.Certificate = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExtraValue) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 839 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x54, 0xcf, 0x8f, 0xdb, 0x44, + 0x14, 0x8e, 0xf3, 0x6b, 0x93, 0xc9, 0xb2, 0xad, 0x46, 0xa8, 0x32, 0x2b, 0xd5, 0x5e, 0x59, 0x80, + 0xb6, 0xa8, 0xd8, 0x64, 0x41, 0xb0, 0x2a, 0x07, 0x24, 0x97, 0x0a, 0x15, 0x5a, 0x7e, 0xcc, 0x36, + 0x48, 0x20, 0x0e, 0x4c, 0x9c, 0x57, 0xef, 0x34, 0xf1, 0x0f, 0x3c, 0xe3, 0x68, 0x73, 0x41, 0xbd, + 0x71, 0xe5, 0xc8, 0x05, 0x89, 0x3f, 0x67, 0x8f, 0x3d, 0x72, 0x40, 0x11, 0x1b, 0x4e, 0x5c, 0xf8, + 0x03, 0x7a, 0x42, 0x33, 0x9e, 0xc4, 0x66, 0xd3, 0xd0, 0x56, 0xca, 0xcd, 0xf3, 0xcd, 0xf7, 0xbe, + 0xf7, 0xde, 0xf7, 0x9e, 0x07, 0x7d, 0x34, 0x3e, 0xe6, 0x2e, 0x4b, 0xbc, 0x71, 0x3e, 0x84, 0x2c, + 0x06, 0x01, 0xdc, 0x4b, 0xc7, 0xa1, 0x47, 0x53, 0xc6, 0xbd, 0x00, 0x32, 0xc1, 0x1e, 0xb2, 0x80, + 0x4a, 0x74, 0xda, 0x1f, 0x82, 0xa0, 0x7d, 0x2f, 0x84, 0x18, 0x32, 0x2a, 0x60, 0xe4, 0xa6, 0x59, + 0x22, 0x12, 0xec, 0x15, 0x02, 0x6e, 0x29, 0xe0, 0xa6, 0xe3, 0xd0, 0x95, 0x02, 0x6e, 0x55, 0xc0, + 0xd5, 0x02, 0xfb, 0x6f, 0x87, 0x4c, 0x9c, 0xe6, 0x43, 0x37, 0x48, 0x22, 0x2f, 0x4c, 0xc2, 0xc4, + 0x53, 0x3a, 0xc3, 0xfc, 0xa1, 0x3a, 0xa9, 0x83, 0xfa, 0x2a, 0xf4, 0xf7, 0xdf, 0xd3, 0x05, 0xd2, + 0x94, 0x45, 0x34, 0x38, 0x65, 0x31, 0x64, 0xb3, 0xb2, 0xc4, 0x08, 0x04, 0xf5, 0xa6, 0x6b, 0x55, + 0xed, 0x7b, 0x9b, 0xa2, 0xb2, 0x3c, 0x16, 0x2c, 0x82, 0xb5, 0x80, 0xf7, 0x9f, 0x17, 0xc0, 0x83, + 0x53, 0x88, 0xe8, 0x5a, 0xdc, 0xbb, 0x9b, 0xe2, 0x72, 0xc1, 0x26, 0x1e, 0x8b, 0x05, 0x17, 0xd9, + 0x5a, 0x50, 0xa5, 0x27, 0x0e, 0xd9, 0x14, 0xb2, 0xb2, 0x21, 0x38, 0xa3, 0x51, 0x3a, 0x81, 0x67, + 0xf5, 0x74, 0x73, 0xe3, 0xa8, 0x9e, 0xc1, 0x76, 0xfe, 0xae, 0xa3, 0xd7, 0x6e, 0x97, 0xfe, 0x9f, + 0xb0, 0x30, 0x66, 0x71, 0x48, 0xe0, 0x87, 0x1c, 0xb8, 0xc0, 0xdf, 0xa3, 0x8e, 0xb4, 0x6e, 0x44, + 0x05, 0x35, 0x8d, 0x03, 0xe3, 0xb0, 0x77, 0xf4, 0x8e, 0xab, 0x07, 0x59, 0xed, 0xa4, 0x1c, 0xa5, + 0x64, 0xbb, 0xd3, 0xbe, 0xfb, 0xc5, 0xf0, 0x11, 0x04, 0xe2, 0x3e, 0x08, 0xea, 0xe3, 0xf3, 0xb9, + 0x5d, 0x5b, 0xcc, 0x6d, 0x54, 0x62, 0x64, 0xa5, 0x8a, 0x53, 0xd4, 0xe4, 0x29, 0x04, 0x66, 0x5d, + 0xa9, 0x7f, 0xee, 0xbe, 0xe4, 0x9a, 0xb8, 0x1b, 0x6b, 0x3f, 0x49, 0x21, 0xf0, 0x77, 0x75, 0xee, + 0xa6, 0x3c, 0x11, 0x95, 0x09, 0x9f, 0xa1, 0x36, 0x17, 0x54, 0xe4, 0xdc, 0x6c, 0xa8, 0x9c, 0x5f, + 0x6e, 0x31, 0xa7, 0xd2, 0xf5, 0xf7, 0x74, 0xd6, 0x76, 0x71, 0x26, 0x3a, 0x9f, 0xf3, 0x6b, 0x1d, + 0x39, 0x1b, 0x63, 0x6f, 0x27, 0xf1, 0x88, 0x09, 0x96, 0xc4, 0xf8, 0x18, 0x35, 0xc5, 0x2c, 0x05, + 0x65, 0x78, 0xd7, 0x7f, 0x7d, 0xd9, 0xc2, 0x83, 0x59, 0x0a, 0x4f, 0xe7, 0xf6, 0xab, 0x97, 0xf9, + 0x12, 0x27, 0x2a, 0x02, 0xbf, 0x89, 0xda, 0x19, 0x50, 0x9e, 0xc4, 0xca, 0xce, 0x6e, 0x59, 0x08, + 0x51, 0x28, 0xd1, 0xb7, 0xf8, 0x06, 0xda, 0x89, 0x80, 0x73, 0x1a, 0x82, 0xf2, 0xa0, 0xeb, 0x5f, + 0xd1, 0xc4, 0x9d, 0xfb, 0x05, 0x4c, 0x96, 0xf7, 0xf8, 0x11, 0xda, 0x9b, 0x50, 0x2e, 0x06, 0xe9, + 0x88, 0x0a, 0x78, 0xc0, 0x22, 0x30, 0x9b, 0xca, 0xb5, 0xb7, 0x5e, 0x6c, 0x0f, 0x64, 0x84, 0x7f, + 0x4d, 0xab, 0xef, 0xdd, 0xfb, 0x8f, 0x12, 0xb9, 0xa4, 0xec, 0xfc, 0x63, 0xa0, 0xeb, 0x1b, 0xfd, + 0xb9, 0xc7, 0xb8, 0xc0, 0xdf, 0xad, 0xed, 0xa3, 0xfb, 0x62, 0x75, 0xc8, 0x68, 0xb5, 0x8d, 0x57, + 0x75, 0x2d, 0x9d, 0x25, 0x52, 0xd9, 0xc5, 0x04, 0xb5, 0x98, 0x80, 0x88, 0x9b, 0xf5, 0x83, 0xc6, + 0x61, 0xef, 0xe8, 0xd3, 0xed, 0x2d, 0x86, 0xff, 0x8a, 0x4e, 0xdb, 0xba, 0x2b, 0x13, 0x90, 0x22, + 0x8f, 0xb3, 0x68, 0xfc, 0x4f, 0xc3, 0x72, 0x65, 0xf1, 0x1b, 0x68, 0x27, 0x2b, 0x8e, 0xaa, 0xdf, + 0x5d, 0xbf, 0x27, 0xa7, 0xa4, 0x19, 0x64, 0x79, 0x87, 0x6f, 0xa2, 0x4e, 0xce, 0x21, 0x8b, 0x69, + 0x04, 0x7a, 0xf4, 0xab, 0x3e, 0x07, 0x1a, 0x27, 0x2b, 0x06, 0xbe, 0x8e, 0x1a, 0x39, 0x1b, 0xe9, + 0xd1, 0xf7, 0x34, 0xb1, 0x31, 0xb8, 0xfb, 0x31, 0x91, 0x38, 0x76, 0x50, 0x3b, 0xcc, 0x92, 0x3c, + 0xe5, 0x66, 0xf3, 0xa0, 0x71, 0xd8, 0xf5, 0x91, 0xdc, 0xa0, 0x4f, 0x14, 0x42, 0xf4, 0x0d, 0x3e, + 0x42, 0x9d, 0x31, 0xcc, 0x06, 0x6a, 0x85, 0x5a, 0x8a, 0x75, 0x4d, 0xb2, 0x14, 0xc0, 0x9f, 0xce, + 0xed, 0xce, 0x67, 0xfa, 0x96, 0xac, 0x78, 0xf8, 0x47, 0xd4, 0x82, 0x33, 0x91, 0x51, 0xb3, 0xad, + 0xec, 0xfd, 0x66, 0xbb, 0xff, 0xba, 0x7b, 0x47, 0x6a, 0xdf, 0x89, 0x45, 0x36, 0x2b, 0xdd, 0x56, + 0x18, 0x29, 0xd2, 0xee, 0xe7, 0x08, 0x95, 0x1c, 0x7c, 0x15, 0x35, 0xc6, 0x30, 0x2b, 0x7e, 0x32, + 0x22, 0x3f, 0xf1, 0x57, 0xa8, 0x35, 0xa5, 0x93, 0x1c, 0xf4, 0x5b, 0xf4, 0xe1, 0x4b, 0xd7, 0xa7, + 0xd4, 0xbf, 0x96, 0x12, 0xa4, 0x50, 0xba, 0x55, 0x3f, 0x36, 0x9c, 0xb9, 0x81, 0xec, 0xe7, 0xbc, + 0x18, 0xf8, 0x27, 0x03, 0xa1, 0x60, 0xf9, 0x43, 0x73, 0xd3, 0x50, 0x06, 0x9d, 0x6c, 0xcf, 0xa0, + 0xd5, 0x63, 0x51, 0xbe, 0xc6, 0x2b, 0x88, 0x93, 0x4a, 0x6a, 0xdc, 0x47, 0xbd, 0x8a, 0xb4, 0xb2, + 0x62, 0xd7, 0xbf, 0xb2, 0x98, 0xdb, 0xbd, 0x8a, 0x38, 0xa9, 0x72, 0x9c, 0x0f, 0xb4, 0xaf, 0xaa, + 0x73, 0x6c, 0x2f, 0x7f, 0x22, 0x43, 0xad, 0x45, 0xf7, 0xf2, 0xd2, 0xdf, 0xea, 0xfc, 0xf2, 0x9b, + 0x5d, 0x7b, 0xfc, 0xc7, 0x41, 0xcd, 0xbf, 0x71, 0x7e, 0x61, 0xd5, 0x9e, 0x5c, 0x58, 0xb5, 0xdf, + 0x2f, 0xac, 0xda, 0xe3, 0x85, 0x65, 0x9c, 0x2f, 0x2c, 0xe3, 0xc9, 0xc2, 0x32, 0xfe, 0x5c, 0x58, + 0xc6, 0xcf, 0x7f, 0x59, 0xb5, 0x6f, 0x77, 0x74, 0x67, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x07, + 0x0c, 0x3b, 0x3a, 0x80, 0x08, 0x00, 0x00, +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/generated.proto new file mode 100644 index 0000000000..215dc39a42 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/generated.proto @@ -0,0 +1,124 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.certificates.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Describes a certificate signing request +message CertificateSigningRequest { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The certificate request itself and any additional information. + // +optional + optional CertificateSigningRequestSpec spec = 2; + + // Derived information about the request. + // +optional + optional CertificateSigningRequestStatus status = 3; +} + +message CertificateSigningRequestCondition { + // request approval state, currently Approved or Denied. + optional string type = 1; + + // brief reason for the request state + // +optional + optional string reason = 2; + + // human readable message with details about the request state + // +optional + optional string message = 3; + + // timestamp for the last update to this condition + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; +} + +message CertificateSigningRequestList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated CertificateSigningRequest items = 2; +} + +// This information is immutable after the request is created. Only the Request +// and Usages fields can be set on creation, other fields are derived by +// Kubernetes and cannot be modified by users. +message CertificateSigningRequestSpec { + // Base64-encoded PKCS#10 CSR data + optional bytes request = 1; + + // allowedUsages specifies a set of usage contexts the key will be + // valid for. + // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + repeated string keyUsage = 5; + + // Information about the requesting user. + // See user.Info interface for details. + // +optional + optional string username = 2; + + // UID information about the requesting user. + // See user.Info interface for details. + // +optional + optional string uid = 3; + + // Group information about the requesting user. + // See user.Info interface for details. + // +optional + repeated string groups = 4; + + // Extra information about the requesting user. + // See user.Info interface for details. + // +optional + map extra = 6; +} + +message CertificateSigningRequestStatus { + // Conditions applied to the request, such as approval or denial. + // +optional + repeated CertificateSigningRequestCondition conditions = 1; + + // If request was approved, the controller will place the issued certificate here. + // +optional + optional bytes certificate = 2; +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/helpers.go new file mode 100644 index 0000000000..1375063c10 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/helpers.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "crypto/x509" + "encoding/pem" + "errors" +) + +// ParseCSR extracts the CSR from the API object and decodes it. +func ParseCSR(obj *CertificateSigningRequest) (*x509.CertificateRequest, error) { + // extract PEM from request object + pemBytes := obj.Spec.Request + block, _ := pem.Decode(pemBytes) + if block == nil || block.Type != "CERTIFICATE REQUEST" { + return nil, errors.New("PEM block type must be CERTIFICATE REQUEST") + } + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return nil, err + } + return csr, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/register.go new file mode 100644 index 0000000000..6574de971b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/register.go @@ -0,0 +1,59 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "certificates.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addConversionFuncs, addDefaultingFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CertificateSigningRequest{}, + &CertificateSigningRequestList{}, + ) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +func (obj *CertificateSigningRequest) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *CertificateSigningRequestList) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/types.generated.go new file mode 100644 index 0000000000..50d908bd7c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/types.generated.go @@ -0,0 +1,2624 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 + } +} + +func (x *CertificateSigningRequest) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CertificateSigningRequest) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CertificateSigningRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = CertificateSigningRequestSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = CertificateSigningRequestStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CertificateSigningRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = CertificateSigningRequestSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = CertificateSigningRequestStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CertificateSigningRequestSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.Usages) != 0 + yyq2[2] = x.Username != "" + yyq2[3] = x.UID != "" + yyq2[4] = len(x.Groups) != 0 + yyq2[5] = len(x.Extra) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Request == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Request)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("request")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Request == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Request)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Usages == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyUsage(([]KeyUsage)(x.Usages), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("usages")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Usages == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyUsage(([]KeyUsage)(x.Usages), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Username)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("username")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Username)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("uid")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Groups == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("groups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Groups == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.Extra == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("extra")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Extra == nil { + r.EncodeNil() + } else { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CertificateSigningRequestSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CertificateSigningRequestSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "request": + if r.TryDecodeAsNil() { + x.Request = nil + } else { + yyv4 := &x.Request + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *yyv4 = r.DecodeBytes(*(*[]byte)(yyv4), false, false) + } + } + case "usages": + if r.TryDecodeAsNil() { + x.Usages = nil + } else { + yyv6 := &x.Usages + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceKeyUsage((*[]KeyUsage)(yyv6), d) + } + } + case "username": + if r.TryDecodeAsNil() { + x.Username = "" + } else { + yyv8 := &x.Username + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "uid": + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv10 := &x.UID + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "groups": + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv12 := &x.Groups + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + z.F.DecSliceStringX(yyv12, false, d) + } + } + case "extra": + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv14 := &x.Extra + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv14), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CertificateSigningRequestSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Request = nil + } else { + yyv17 := &x.Request + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *yyv17 = r.DecodeBytes(*(*[]byte)(yyv17), false, false) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Usages = nil + } else { + yyv19 := &x.Usages + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceKeyUsage((*[]KeyUsage)(yyv19), d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Username = "" + } else { + yyv21 := &x.Username + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv23 := &x.UID + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv25 := &x.Groups + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + z.F.DecSliceStringX(yyv25, false, d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv27 := &x.Extra + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv27), d) + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj16-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ExtraValue) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encExtraValue((ExtraValue)(x), e) + } + } +} + +func (x *ExtraValue) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decExtraValue((*ExtraValue)(x), d) + } +} + +func (x *CertificateSigningRequestStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Conditions) != 0 + yyq2[1] = len(x.Certificate) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceCertificateSigningRequestCondition(([]CertificateSigningRequestCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceCertificateSigningRequestCondition(([]CertificateSigningRequestCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Certificate == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Certificate)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("certificate")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Certificate == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Certificate)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CertificateSigningRequestStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CertificateSigningRequestStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv4 := &x.Conditions + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceCertificateSigningRequestCondition((*[]CertificateSigningRequestCondition)(yyv4), d) + } + } + case "certificate": + if r.TryDecodeAsNil() { + x.Certificate = nil + } else { + yyv6 := &x.Certificate + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *yyv6 = r.DecodeBytes(*(*[]byte)(yyv6), false, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CertificateSigningRequestStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv9 := &x.Conditions + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceCertificateSigningRequestCondition((*[]CertificateSigningRequestCondition)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Certificate = nil + } else { + yyv11 := &x.Certificate + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *yyv11 = r.DecodeBytes(*(*[]byte)(yyv11), false, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x RequestConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *RequestConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *CertificateSigningRequestCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Reason != "" + yyq2[2] = x.Message != "" + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy13 := &x.LastUpdateTime + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(yy13) { + } else if yym14 { + z.EncBinaryMarshal(yy13) + } else if !yym14 && z.IsJSONHandle() { + z.EncJSONMarshal(yy13) + } else { + z.EncFallback(yy13) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastUpdateTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy15 := &x.LastUpdateTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CertificateSigningRequestCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CertificateSigningRequestCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv5 := &x.Reason + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv7 := &x.Message + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + case "lastUpdateTime": + if r.TryDecodeAsNil() { + x.LastUpdateTime = pkg1_v1.Time{} + } else { + yyv9 := &x.LastUpdateTime + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if yym10 { + z.DecBinaryUnmarshal(yyv9) + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) + } else { + z.DecFallback(yyv9, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CertificateSigningRequestCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv12 := &x.Type + yyv12.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv13 := &x.Reason + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv15 := &x.Message + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastUpdateTime = pkg1_v1.Time{} + } else { + yyv17 := &x.LastUpdateTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CertificateSigningRequestList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceCertificateSigningRequest(([]CertificateSigningRequest)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceCertificateSigningRequest(([]CertificateSigningRequest)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CertificateSigningRequestList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CertificateSigningRequestList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceCertificateSigningRequest((*[]CertificateSigningRequest)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CertificateSigningRequestList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceCertificateSigningRequest((*[]CertificateSigningRequest)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x KeyUsage) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *KeyUsage) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x codecSelfer1234) encSliceKeyUsage(v []KeyUsage, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceKeyUsage(v *[]KeyUsage, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []KeyUsage{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]KeyUsage, yyrl1) + } + } else { + yyv1 = make([]KeyUsage, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 KeyUsage + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []KeyUsage{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encMapstringExtraValue(v map[string]ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decMapstringExtraValue(v *map[string]ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string]ExtraValue, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 ExtraValue + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv4 := &yymv1 + yyv4.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv5 := &yymk1 + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv7 := &yymv1 + yyv7.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encExtraValue(v ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyv1)) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]string, yyrl1) + } + } else { + yyv1 = make([]string, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 string + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceCertificateSigningRequestCondition(v []CertificateSigningRequestCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCertificateSigningRequestCondition(v *[]CertificateSigningRequestCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []CertificateSigningRequestCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]CertificateSigningRequestCondition, yyrl1) + } + } else { + yyv1 = make([]CertificateSigningRequestCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CertificateSigningRequestCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, CertificateSigningRequestCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CertificateSigningRequestCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, CertificateSigningRequestCondition{}) // var yyz1 CertificateSigningRequestCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = CertificateSigningRequestCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []CertificateSigningRequestCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceCertificateSigningRequest(v []CertificateSigningRequest, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCertificateSigningRequest(v *[]CertificateSigningRequest, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []CertificateSigningRequest{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 416) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]CertificateSigningRequest, yyrl1) + } + } else { + yyv1 = make([]CertificateSigningRequest, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CertificateSigningRequest{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, CertificateSigningRequest{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CertificateSigningRequest{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, CertificateSigningRequest{}) // var yyz1 CertificateSigningRequest + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = CertificateSigningRequest{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []CertificateSigningRequest{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/types.go new file mode 100644 index 0000000000..a9149ba8df --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/types.go @@ -0,0 +1,152 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true + +// Describes a certificate signing request +type CertificateSigningRequest struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The certificate request itself and any additional information. + // +optional + Spec CertificateSigningRequestSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Derived information about the request. + // +optional + Status CertificateSigningRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// This information is immutable after the request is created. Only the Request +// and Usages fields can be set on creation, other fields are derived by +// Kubernetes and cannot be modified by users. +type CertificateSigningRequestSpec struct { + // Base64-encoded PKCS#10 CSR data + Request []byte `json:"request" protobuf:"bytes,1,opt,name=request"` + + // allowedUsages specifies a set of usage contexts the key will be + // valid for. + // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + Usages []KeyUsage `json:"usages,omitempty" protobuf:"bytes,5,opt,name=keyUsage"` + + // Information about the requesting user. + // See user.Info interface for details. + // +optional + Username string `json:"username,omitempty" protobuf:"bytes,2,opt,name=username"` + // UID information about the requesting user. + // See user.Info interface for details. + // +optional + UID string `json:"uid,omitempty" protobuf:"bytes,3,opt,name=uid"` + // Group information about the requesting user. + // See user.Info interface for details. + // +optional + Groups []string `json:"groups,omitempty" protobuf:"bytes,4,rep,name=groups"` + // Extra information about the requesting user. + // See user.Info interface for details. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,6,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +type CertificateSigningRequestStatus struct { + // Conditions applied to the request, such as approval or denial. + // +optional + Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` + + // If request was approved, the controller will place the issued certificate here. + // +optional + Certificate []byte `json:"certificate,omitempty" protobuf:"bytes,2,opt,name=certificate"` +} + +type RequestConditionType string + +// These are the possible conditions for a certificate request. +const ( + CertificateApproved RequestConditionType = "Approved" + CertificateDenied RequestConditionType = "Denied" +) + +type CertificateSigningRequestCondition struct { + // request approval state, currently Approved or Denied. + Type RequestConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=RequestConditionType"` + // brief reason for the request state + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` + // human readable message with details about the request state + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + // timestamp for the last update to this condition + // +optional + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,4,opt,name=lastUpdateTime"` +} + +type CertificateSigningRequestList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []CertificateSigningRequest `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// KeyUsages specifies valid usage contexts for keys. +// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 +type KeyUsage string + +const ( + UsageSigning KeyUsage = "signing" + UsageDigitalSignature KeyUsage = "digital signature" + UsageContentCommittment KeyUsage = "content committment" + UsageKeyEncipherment KeyUsage = "key encipherment" + UsageKeyAgreement KeyUsage = "key agreement" + UsageDataEncipherment KeyUsage = "data encipherment" + UsageCertSign KeyUsage = "cert sign" + UsageCRLSign KeyUsage = "crl sign" + UsageEncipherOnly KeyUsage = "encipher only" + UsageDecipherOnly KeyUsage = "decipher only" + UsageAny KeyUsage = "any" + UsageServerAuth KeyUsage = "server auth" + UsageClientAuth KeyUsage = "client auth" + UsageCodeSigning KeyUsage = "code signing" + UsageEmailProtection KeyUsage = "email protection" + UsageSMIME KeyUsage = "s/mime" + UsageIPsecEndSystem KeyUsage = "ipsec end system" + UsageIPsecTunnel KeyUsage = "ipsec tunnel" + UsageIPsecUser KeyUsage = "ipsec user" + UsageTimestamping KeyUsage = "timestamping" + UsageOCSPSigning KeyUsage = "ocsp signing" + UsageMicrosoftSGC KeyUsage = "microsoft sgc" + UsageNetscapSGC KeyUsage = "netscape sgc" +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..4fd91df061 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,74 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_CertificateSigningRequest = map[string]string{ + "": "Describes a certificate signing request", + "spec": "The certificate request itself and any additional information.", + "status": "Derived information about the request.", +} + +func (CertificateSigningRequest) SwaggerDoc() map[string]string { + return map_CertificateSigningRequest +} + +var map_CertificateSigningRequestCondition = map[string]string{ + "type": "request approval state, currently Approved or Denied.", + "reason": "brief reason for the request state", + "message": "human readable message with details about the request state", + "lastUpdateTime": "timestamp for the last update to this condition", +} + +func (CertificateSigningRequestCondition) SwaggerDoc() map[string]string { + return map_CertificateSigningRequestCondition +} + +var map_CertificateSigningRequestSpec = map[string]string{ + "": "This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.", + "request": "Base64-encoded PKCS#10 CSR data", + "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12", + "username": "Information about the requesting user. See user.Info interface for details.", + "uid": "UID information about the requesting user. See user.Info interface for details.", + "groups": "Group information about the requesting user. See user.Info interface for details.", + "extra": "Extra information about the requesting user. See user.Info interface for details.", +} + +func (CertificateSigningRequestSpec) SwaggerDoc() map[string]string { + return map_CertificateSigningRequestSpec +} + +var map_CertificateSigningRequestStatus = map[string]string{ + "conditions": "Conditions applied to the request, such as approval or denial.", + "certificate": "If request was approved, the controller will place the issued certificate here.", +} + +func (CertificateSigningRequestStatus) SwaggerDoc() map[string]string { + return map_CertificateSigningRequestStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/zz_generated.conversion.go new file mode 100644 index 0000000000..0ec3bf4c97 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/zz_generated.conversion.go @@ -0,0 +1,179 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + certificates "k8s.io/kubernetes/pkg/apis/certificates" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest, + Convert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest, + Convert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition, + Convert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition, + Convert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList, + Convert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList, + Convert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec, + Convert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec, + Convert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus, + Convert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus, + ) +} + +func autoConvert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in *CertificateSigningRequest, out *certificates.CertificateSigningRequest, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in *CertificateSigningRequest, out *certificates.CertificateSigningRequest, s conversion.Scope) error { + return autoConvert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in, out, s) +} + +func autoConvert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest(in *certificates.CertificateSigningRequest, out *CertificateSigningRequest, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest(in *certificates.CertificateSigningRequest, out *CertificateSigningRequest, s conversion.Scope) error { + return autoConvert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest(in, out, s) +} + +func autoConvert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in *CertificateSigningRequestCondition, out *certificates.CertificateSigningRequestCondition, s conversion.Scope) error { + out.Type = certificates.RequestConditionType(in.Type) + out.Reason = in.Reason + out.Message = in.Message + out.LastUpdateTime = in.LastUpdateTime + return nil +} + +func Convert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in *CertificateSigningRequestCondition, out *certificates.CertificateSigningRequestCondition, s conversion.Scope) error { + return autoConvert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in, out, s) +} + +func autoConvert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition(in *certificates.CertificateSigningRequestCondition, out *CertificateSigningRequestCondition, s conversion.Scope) error { + out.Type = RequestConditionType(in.Type) + out.Reason = in.Reason + out.Message = in.Message + out.LastUpdateTime = in.LastUpdateTime + return nil +} + +func Convert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition(in *certificates.CertificateSigningRequestCondition, out *CertificateSigningRequestCondition, s conversion.Scope) error { + return autoConvert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition(in, out, s) +} + +func autoConvert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in *CertificateSigningRequestList, out *certificates.CertificateSigningRequestList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]certificates.CertificateSigningRequest)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in *CertificateSigningRequestList, out *certificates.CertificateSigningRequestList, s conversion.Scope) error { + return autoConvert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in, out, s) +} + +func autoConvert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList(in *certificates.CertificateSigningRequestList, out *CertificateSigningRequestList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]CertificateSigningRequest, 0) + } else { + out.Items = *(*[]CertificateSigningRequest)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList(in *certificates.CertificateSigningRequestList, out *CertificateSigningRequestList, s conversion.Scope) error { + return autoConvert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList(in, out, s) +} + +func autoConvert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in *CertificateSigningRequestSpec, out *certificates.CertificateSigningRequestSpec, s conversion.Scope) error { + out.Request = *(*[]byte)(unsafe.Pointer(&in.Request)) + out.Usages = *(*[]certificates.KeyUsage)(unsafe.Pointer(&in.Usages)) + out.Username = in.Username + out.UID = in.UID + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]certificates.ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in *CertificateSigningRequestSpec, out *certificates.CertificateSigningRequestSpec, s conversion.Scope) error { + return autoConvert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in, out, s) +} + +func autoConvert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(in *certificates.CertificateSigningRequestSpec, out *CertificateSigningRequestSpec, s conversion.Scope) error { + if in.Request == nil { + out.Request = make([]byte, 0) + } else { + out.Request = *(*[]byte)(unsafe.Pointer(&in.Request)) + } + out.Usages = *(*[]KeyUsage)(unsafe.Pointer(&in.Usages)) + out.Username = in.Username + out.UID = in.UID + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(in *certificates.CertificateSigningRequestSpec, out *CertificateSigningRequestSpec, s conversion.Scope) error { + return autoConvert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(in, out, s) +} + +func autoConvert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in *CertificateSigningRequestStatus, out *certificates.CertificateSigningRequestStatus, s conversion.Scope) error { + out.Conditions = *(*[]certificates.CertificateSigningRequestCondition)(unsafe.Pointer(&in.Conditions)) + out.Certificate = *(*[]byte)(unsafe.Pointer(&in.Certificate)) + return nil +} + +func Convert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in *CertificateSigningRequestStatus, out *certificates.CertificateSigningRequestStatus, s conversion.Scope) error { + return autoConvert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in, out, s) +} + +func autoConvert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(in *certificates.CertificateSigningRequestStatus, out *CertificateSigningRequestStatus, s conversion.Scope) error { + out.Conditions = *(*[]CertificateSigningRequestCondition)(unsafe.Pointer(&in.Conditions)) + out.Certificate = *(*[]byte)(unsafe.Pointer(&in.Certificate)) + return nil +} + +func Convert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(in *certificates.CertificateSigningRequestStatus, out *CertificateSigningRequestStatus, s conversion.Scope) error { + return autoConvert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..800cdee477 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,150 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequest, InType: reflect.TypeOf(&CertificateSigningRequest{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequestCondition, InType: reflect.TypeOf(&CertificateSigningRequestCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequestList, InType: reflect.TypeOf(&CertificateSigningRequestList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequestSpec, InType: reflect.TypeOf(&CertificateSigningRequestSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequestStatus, InType: reflect.TypeOf(&CertificateSigningRequestStatus{})}, + ) +} + +func DeepCopy_v1beta1_CertificateSigningRequest(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequest) + out := out.(*CertificateSigningRequest) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_CertificateSigningRequestSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_CertificateSigningRequestStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_CertificateSigningRequestCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestCondition) + out := out.(*CertificateSigningRequestCondition) + *out = *in + out.LastUpdateTime = in.LastUpdateTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1beta1_CertificateSigningRequestList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestList) + out := out.(*CertificateSigningRequestList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CertificateSigningRequest, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_CertificateSigningRequest(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_CertificateSigningRequestSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestSpec) + out := out.(*CertificateSigningRequestSpec) + *out = *in + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Usages != nil { + in, out := &in.Usages, &out.Usages + *out = make([]KeyUsage, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_CertificateSigningRequestStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestStatus) + out := out.(*CertificateSigningRequestStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]CertificateSigningRequestCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_CertificateSigningRequestCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/zz_generated.defaults.go new file mode 100644 index 0000000000..3c5ae03627 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/zz_generated.defaults.go @@ -0,0 +1,47 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&CertificateSigningRequest{}, func(obj interface{}) { SetObjectDefaults_CertificateSigningRequest(obj.(*CertificateSigningRequest)) }) + scheme.AddTypeDefaultingFunc(&CertificateSigningRequestList{}, func(obj interface{}) { + SetObjectDefaults_CertificateSigningRequestList(obj.(*CertificateSigningRequestList)) + }) + return nil +} + +func SetObjectDefaults_CertificateSigningRequest(in *CertificateSigningRequest) { + SetDefaults_CertificateSigningRequestSpec(&in.Spec) +} + +func SetObjectDefaults_CertificateSigningRequestList(in *CertificateSigningRequestList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_CertificateSigningRequest(a) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/zz_generated.deepcopy.go index 764271bfa5..8769028913 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ limitations under the License. package certificates import ( - api "k8s.io/kubernetes/pkg/api" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" reflect "reflect" ) @@ -47,9 +47,11 @@ func DeepCopy_certificates_CertificateSigningRequest(in interface{}, out interfa { in := in.(*CertificateSigningRequest) out := out.(*CertificateSigningRequest) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_certificates_CertificateSigningRequestSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -65,9 +67,7 @@ func DeepCopy_certificates_CertificateSigningRequestCondition(in interface{}, ou { in := in.(*CertificateSigningRequestCondition) out := out.(*CertificateSigningRequestCondition) - out.Type = in.Type - out.Reason = in.Reason - out.Message = in.Message + *out = *in out.LastUpdateTime = in.LastUpdateTime.DeepCopy() return nil } @@ -77,8 +77,7 @@ func DeepCopy_certificates_CertificateSigningRequestList(in interface{}, out int { in := in.(*CertificateSigningRequestList) out := out.(*CertificateSigningRequestList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CertificateSigningRequest, len(*in)) @@ -87,8 +86,6 @@ func DeepCopy_certificates_CertificateSigningRequestList(in interface{}, out int return err } } - } else { - out.Items = nil } return nil } @@ -98,21 +95,32 @@ func DeepCopy_certificates_CertificateSigningRequestSpec(in interface{}, out int { in := in.(*CertificateSigningRequestSpec) out := out.(*CertificateSigningRequestSpec) + *out = *in if in.Request != nil { in, out := &in.Request, &out.Request *out = make([]byte, len(*in)) copy(*out, *in) - } else { - out.Request = nil } - out.Username = in.Username - out.UID = in.UID + if in.Usages != nil { + in, out := &in.Usages, &out.Usages + *out = make([]KeyUsage, len(*in)) + copy(*out, *in) + } if in.Groups != nil { in, out := &in.Groups, &out.Groups *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Groups = nil + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } } return nil } @@ -122,6 +130,7 @@ func DeepCopy_certificates_CertificateSigningRequestStatus(in interface{}, out i { in := in.(*CertificateSigningRequestStatus) out := out.(*CertificateSigningRequestStatus) + *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]CertificateSigningRequestCondition, len(*in)) @@ -130,15 +139,11 @@ func DeepCopy_certificates_CertificateSigningRequestStatus(in interface{}, out i return err } } - } else { - out.Conditions = nil } if in.Certificate != nil { in, out := &in.Certificate, &out.Certificate *out = make([]byte, len(*in)) copy(*out, *in) - } else { - out.Certificate = nil } return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/BUILD deleted file mode 100644 index d76601bc3e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/BUILD +++ /dev/null @@ -1,40 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "helpers.go", - "register.go", - "types.generated.go", - "types.go", - "zz_generated.deepcopy.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/config:go_default_library", - "//pkg/util/net:go_default_library", - "//vendor:github.com/ugorji/go/codec", - ], -) - -go_test( - name = "go_default_test", - srcs = ["helpers_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = ["//vendor:github.com/spf13/pflag"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/doc.go deleted file mode 100644 index 98ea9b555d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register -// +k8s:openapi-gen=true - -package componentconfig diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/helpers.go deleted file mode 100644 index 43b625b7ec..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/helpers.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package componentconfig - -import ( - "fmt" - "net" - - utilnet "k8s.io/kubernetes/pkg/util/net" -) - -// used for validating command line opts -// TODO(mikedanese): remove these when we remove command line flags - -type IPVar struct { - Val *string -} - -func (v IPVar) Set(s string) error { - if net.ParseIP(s) == nil { - return fmt.Errorf("%q is not a valid IP address", s) - } - if v.Val == nil { - // it's okay to panic here since this is programmer error - panic("the string pointer passed into IPVar should not be nil") - } - *v.Val = s - return nil -} - -func (v IPVar) String() string { - if v.Val == nil { - return "" - } - return *v.Val -} - -func (v IPVar) Type() string { - return "ip" -} - -func (m *ProxyMode) Set(s string) error { - *m = ProxyMode(s) - return nil -} - -func (m *ProxyMode) String() string { - if m != nil { - return string(*m) - } - return "" -} - -func (m *ProxyMode) Type() string { - return "ProxyMode" -} - -type PortRangeVar struct { - Val *string -} - -func (v PortRangeVar) Set(s string) error { - if _, err := utilnet.ParsePortRange(s); err != nil { - return fmt.Errorf("%q is not a valid port range: %v", s, err) - } - if v.Val == nil { - // it's okay to panic here since this is programmer error - panic("the string pointer passed into PortRangeVar should not be nil") - } - *v.Val = s - return nil -} - -func (v PortRangeVar) String() string { - if v.Val == nil { - return "" - } - return *v.Val -} - -func (v PortRangeVar) Type() string { - return "port-range" -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/BUILD deleted file mode 100644 index b783e08a18..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/BUILD +++ /dev/null @@ -1,22 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["install.go"], - tags = ["automanaged"], - deps = [ - "//pkg/apimachinery/announced:go_default_library", - "//pkg/apis/componentconfig:go_default_library", - "//pkg/apis/componentconfig/v1alpha1:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/install.go deleted file mode 100644 index 9bd194165b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/install.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the experimental API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "k8s.io/kubernetes/pkg/apimachinery/announced" - "k8s.io/kubernetes/pkg/apis/componentconfig" - "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" -) - -func init() { - if err := announced.NewGroupMetaFactory( - &announced.GroupMetaFactoryArgs{ - GroupName: componentconfig.GroupName, - VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version}, - ImportPrefix: "k8s.io/kubernetes/pkg/apis/componentconfig", - AddInternalObjectsToScheme: componentconfig.AddToScheme, - }, - announced.VersionToSchemeFunc{ - v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme, - }, - ).Announce().RegisterAndEnable(); err != nil { - panic(err) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/register.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/register.go deleted file mode 100644 index 77dc749bb1..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/register.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package componentconfig - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// GroupName is the group name use in this package -const GroupName = "componentconfig" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -func addKnownTypes(scheme *runtime.Scheme) error { - // TODO this will get cleaned up with the scheme types are fixed - scheme.AddKnownTypes(SchemeGroupVersion, - &KubeProxyConfiguration{}, - &KubeSchedulerConfiguration{}, - &KubeletConfiguration{}, - ) - return nil -} - -func (obj *KubeProxyConfiguration) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *KubeSchedulerConfiguration) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *KubeletConfiguration) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.generated.go deleted file mode 100644 index c872496ea7..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.generated.go +++ /dev/null @@ -1,12866 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package componentconfig - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg2_config "k8s.io/kubernetes/pkg/util/config" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_unversioned.TypeMeta - var v1 pkg2_config.ConfigurationMap - var v2 time.Duration - _, _, _ = v0, v1, v2 - } -} - -func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [23]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(23) - } else { - yynn2 = 21 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.BindAddress)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("bindAddress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.BindAddress)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("healthzBindAddress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(x.HealthzPort)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("healthzPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(x.HealthzPort)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostnameOverride)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostnameOverride")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostnameOverride)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.IPTablesMasqueradeBit == nil { - r.EncodeNil() - } else { - yy25 := *x.IPTablesMasqueradeBit - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeInt(int64(yy25)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iptablesMasqueradeBit")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.IPTablesMasqueradeBit == nil { - r.EncodeNil() - } else { - yy27 := *x.IPTablesMasqueradeBit - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeInt(int64(yy27)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy30 := &x.IPTablesSyncPeriod - yym31 := z.EncBinary() - _ = yym31 - if false { - } else if z.HasExtensions() && z.EncExt(yy30) { - } else if !yym31 && z.IsJSONHandle() { - z.EncJSONMarshal(yy30) - } else { - z.EncFallback(yy30) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iptablesSyncPeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy32 := &x.IPTablesSyncPeriod - yym33 := z.EncBinary() - _ = yym33 - if false { - } else if z.HasExtensions() && z.EncExt(yy32) { - } else if !yym33 && z.IsJSONHandle() { - z.EncJSONMarshal(yy32) - } else { - z.EncFallback(yy32) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy35 := &x.IPTablesMinSyncPeriod - yym36 := z.EncBinary() - _ = yym36 - if false { - } else if z.HasExtensions() && z.EncExt(yy35) { - } else if !yym36 && z.IsJSONHandle() { - z.EncJSONMarshal(yy35) - } else { - z.EncFallback(yy35) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iptablesMinSyncPeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy37 := &x.IPTablesMinSyncPeriod - yym38 := z.EncBinary() - _ = yym38 - if false { - } else if z.HasExtensions() && z.EncExt(yy37) { - } else if !yym38 && z.IsJSONHandle() { - z.EncJSONMarshal(yy37) - } else { - z.EncFallback(yy37) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym40 := z.EncBinary() - _ = yym40 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeconfigPath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeconfigPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym41 := z.EncBinary() - _ = yym41 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeconfigPath)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym43 := z.EncBinary() - _ = yym43 - if false { - } else { - r.EncodeBool(bool(x.MasqueradeAll)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("masqueradeAll")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym44 := z.EncBinary() - _ = yym44 - if false { - } else { - r.EncodeBool(bool(x.MasqueradeAll)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym46 := z.EncBinary() - _ = yym46 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Master)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("master")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym47 := z.EncBinary() - _ = yym47 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Master)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.OOMScoreAdj == nil { - r.EncodeNil() - } else { - yy49 := *x.OOMScoreAdj - yym50 := z.EncBinary() - _ = yym50 - if false { - } else { - r.EncodeInt(int64(yy49)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("oomScoreAdj")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.OOMScoreAdj == nil { - r.EncodeNil() - } else { - yy51 := *x.OOMScoreAdj - yym52 := z.EncBinary() - _ = yym52 - if false { - } else { - r.EncodeInt(int64(yy51)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Mode.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("mode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Mode.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym55 := z.EncBinary() - _ = yym55 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PortRange)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("portRange")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym56 := z.EncBinary() - _ = yym56 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PortRange)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym58 := z.EncBinary() - _ = yym58 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceContainer)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceContainer")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym59 := z.EncBinary() - _ = yym59 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceContainer)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy61 := &x.UDPIdleTimeout - yym62 := z.EncBinary() - _ = yym62 - if false { - } else if z.HasExtensions() && z.EncExt(yy61) { - } else if !yym62 && z.IsJSONHandle() { - z.EncJSONMarshal(yy61) - } else { - z.EncFallback(yy61) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("udpTimeoutMilliseconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy63 := &x.UDPIdleTimeout - yym64 := z.EncBinary() - _ = yym64 - if false { - } else if z.HasExtensions() && z.EncExt(yy63) { - } else if !yym64 && z.IsJSONHandle() { - z.EncJSONMarshal(yy63) - } else { - z.EncFallback(yy63) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym66 := z.EncBinary() - _ = yym66 - if false { - } else { - r.EncodeInt(int64(x.ConntrackMax)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conntrackMax")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym67 := z.EncBinary() - _ = yym67 - if false { - } else { - r.EncodeInt(int64(x.ConntrackMax)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym69 := z.EncBinary() - _ = yym69 - if false { - } else { - r.EncodeInt(int64(x.ConntrackMaxPerCore)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conntrackMaxPerCore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym70 := z.EncBinary() - _ = yym70 - if false { - } else { - r.EncodeInt(int64(x.ConntrackMaxPerCore)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym72 := z.EncBinary() - _ = yym72 - if false { - } else { - r.EncodeInt(int64(x.ConntrackMin)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conntrackMin")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym73 := z.EncBinary() - _ = yym73 - if false { - } else { - r.EncodeInt(int64(x.ConntrackMin)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy75 := &x.ConntrackTCPEstablishedTimeout - yym76 := z.EncBinary() - _ = yym76 - if false { - } else if z.HasExtensions() && z.EncExt(yy75) { - } else if !yym76 && z.IsJSONHandle() { - z.EncJSONMarshal(yy75) - } else { - z.EncFallback(yy75) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conntrackTCPEstablishedTimeout")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy77 := &x.ConntrackTCPEstablishedTimeout - yym78 := z.EncBinary() - _ = yym78 - if false { - } else if z.HasExtensions() && z.EncExt(yy77) { - } else if !yym78 && z.IsJSONHandle() { - z.EncJSONMarshal(yy77) - } else { - z.EncFallback(yy77) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy80 := &x.ConntrackTCPCloseWaitTimeout - yym81 := z.EncBinary() - _ = yym81 - if false { - } else if z.HasExtensions() && z.EncExt(yy80) { - } else if !yym81 && z.IsJSONHandle() { - z.EncJSONMarshal(yy80) - } else { - z.EncFallback(yy80) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conntrackTCPCloseWaitTimeout")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy82 := &x.ConntrackTCPCloseWaitTimeout - yym83 := z.EncBinary() - _ = yym83 - if false { - } else if z.HasExtensions() && z.EncExt(yy82) { - } else if !yym83 && z.IsJSONHandle() { - z.EncJSONMarshal(yy82) - } else { - z.EncFallback(yy82) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *KubeProxyConfiguration) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym84 := z.DecBinary() - _ = yym84 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct85 := r.ContainerType() - if yyct85 == codecSelferValueTypeMap1234 { - yyl85 := r.ReadMapStart() - if yyl85 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl85, d) - } - } else if yyct85 == codecSelferValueTypeArray1234 { - yyl85 := r.ReadArrayStart() - if yyl85 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl85, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *KubeProxyConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys86Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys86Slc - var yyhl86 bool = l >= 0 - for yyj86 := 0; ; yyj86++ { - if yyhl86 { - if yyj86 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys86Slc = r.DecodeBytes(yys86Slc, true, true) - yys86 := string(yys86Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys86 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "bindAddress": - if r.TryDecodeAsNil() { - x.BindAddress = "" - } else { - x.BindAddress = string(r.DecodeString()) - } - case "clusterCIDR": - if r.TryDecodeAsNil() { - x.ClusterCIDR = "" - } else { - x.ClusterCIDR = string(r.DecodeString()) - } - case "healthzBindAddress": - if r.TryDecodeAsNil() { - x.HealthzBindAddress = "" - } else { - x.HealthzBindAddress = string(r.DecodeString()) - } - case "healthzPort": - if r.TryDecodeAsNil() { - x.HealthzPort = 0 - } else { - x.HealthzPort = int32(r.DecodeInt(32)) - } - case "hostnameOverride": - if r.TryDecodeAsNil() { - x.HostnameOverride = "" - } else { - x.HostnameOverride = string(r.DecodeString()) - } - case "iptablesMasqueradeBit": - if r.TryDecodeAsNil() { - if x.IPTablesMasqueradeBit != nil { - x.IPTablesMasqueradeBit = nil - } - } else { - if x.IPTablesMasqueradeBit == nil { - x.IPTablesMasqueradeBit = new(int32) - } - yym95 := z.DecBinary() - _ = yym95 - if false { - } else { - *((*int32)(x.IPTablesMasqueradeBit)) = int32(r.DecodeInt(32)) - } - } - case "iptablesSyncPeriodSeconds": - if r.TryDecodeAsNil() { - x.IPTablesSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv96 := &x.IPTablesSyncPeriod - yym97 := z.DecBinary() - _ = yym97 - if false { - } else if z.HasExtensions() && z.DecExt(yyv96) { - } else if !yym97 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv96) - } else { - z.DecFallback(yyv96, false) - } - } - case "iptablesMinSyncPeriodSeconds": - if r.TryDecodeAsNil() { - x.IPTablesMinSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv98 := &x.IPTablesMinSyncPeriod - yym99 := z.DecBinary() - _ = yym99 - if false { - } else if z.HasExtensions() && z.DecExt(yyv98) { - } else if !yym99 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv98) - } else { - z.DecFallback(yyv98, false) - } - } - case "kubeconfigPath": - if r.TryDecodeAsNil() { - x.KubeconfigPath = "" - } else { - x.KubeconfigPath = string(r.DecodeString()) - } - case "masqueradeAll": - if r.TryDecodeAsNil() { - x.MasqueradeAll = false - } else { - x.MasqueradeAll = bool(r.DecodeBool()) - } - case "master": - if r.TryDecodeAsNil() { - x.Master = "" - } else { - x.Master = string(r.DecodeString()) - } - case "oomScoreAdj": - if r.TryDecodeAsNil() { - if x.OOMScoreAdj != nil { - x.OOMScoreAdj = nil - } - } else { - if x.OOMScoreAdj == nil { - x.OOMScoreAdj = new(int32) - } - yym104 := z.DecBinary() - _ = yym104 - if false { - } else { - *((*int32)(x.OOMScoreAdj)) = int32(r.DecodeInt(32)) - } - } - case "mode": - if r.TryDecodeAsNil() { - x.Mode = "" - } else { - x.Mode = ProxyMode(r.DecodeString()) - } - case "portRange": - if r.TryDecodeAsNil() { - x.PortRange = "" - } else { - x.PortRange = string(r.DecodeString()) - } - case "resourceContainer": - if r.TryDecodeAsNil() { - x.ResourceContainer = "" - } else { - x.ResourceContainer = string(r.DecodeString()) - } - case "udpTimeoutMilliseconds": - if r.TryDecodeAsNil() { - x.UDPIdleTimeout = pkg1_unversioned.Duration{} - } else { - yyv108 := &x.UDPIdleTimeout - yym109 := z.DecBinary() - _ = yym109 - if false { - } else if z.HasExtensions() && z.DecExt(yyv108) { - } else if !yym109 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv108) - } else { - z.DecFallback(yyv108, false) - } - } - case "conntrackMax": - if r.TryDecodeAsNil() { - x.ConntrackMax = 0 - } else { - x.ConntrackMax = int32(r.DecodeInt(32)) - } - case "conntrackMaxPerCore": - if r.TryDecodeAsNil() { - x.ConntrackMaxPerCore = 0 - } else { - x.ConntrackMaxPerCore = int32(r.DecodeInt(32)) - } - case "conntrackMin": - if r.TryDecodeAsNil() { - x.ConntrackMin = 0 - } else { - x.ConntrackMin = int32(r.DecodeInt(32)) - } - case "conntrackTCPEstablishedTimeout": - if r.TryDecodeAsNil() { - x.ConntrackTCPEstablishedTimeout = pkg1_unversioned.Duration{} - } else { - yyv113 := &x.ConntrackTCPEstablishedTimeout - yym114 := z.DecBinary() - _ = yym114 - if false { - } else if z.HasExtensions() && z.DecExt(yyv113) { - } else if !yym114 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv113) - } else { - z.DecFallback(yyv113, false) - } - } - case "conntrackTCPCloseWaitTimeout": - if r.TryDecodeAsNil() { - x.ConntrackTCPCloseWaitTimeout = pkg1_unversioned.Duration{} - } else { - yyv115 := &x.ConntrackTCPCloseWaitTimeout - yym116 := z.DecBinary() - _ = yym116 - if false { - } else if z.HasExtensions() && z.DecExt(yyv115) { - } else if !yym116 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv115) - } else { - z.DecFallback(yyv115, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys86) - } // end switch yys86 - } // end for yyj86 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj117 int - var yyb117 bool - var yyhl117 bool = l >= 0 - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.BindAddress = "" - } else { - x.BindAddress = string(r.DecodeString()) - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterCIDR = "" - } else { - x.ClusterCIDR = string(r.DecodeString()) - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HealthzBindAddress = "" - } else { - x.HealthzBindAddress = string(r.DecodeString()) - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HealthzPort = 0 - } else { - x.HealthzPort = int32(r.DecodeInt(32)) - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostnameOverride = "" - } else { - x.HostnameOverride = string(r.DecodeString()) - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.IPTablesMasqueradeBit != nil { - x.IPTablesMasqueradeBit = nil - } - } else { - if x.IPTablesMasqueradeBit == nil { - x.IPTablesMasqueradeBit = new(int32) - } - yym126 := z.DecBinary() - _ = yym126 - if false { - } else { - *((*int32)(x.IPTablesMasqueradeBit)) = int32(r.DecodeInt(32)) - } - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IPTablesSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv127 := &x.IPTablesSyncPeriod - yym128 := z.DecBinary() - _ = yym128 - if false { - } else if z.HasExtensions() && z.DecExt(yyv127) { - } else if !yym128 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv127) - } else { - z.DecFallback(yyv127, false) - } - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IPTablesMinSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv129 := &x.IPTablesMinSyncPeriod - yym130 := z.DecBinary() - _ = yym130 - if false { - } else if z.HasExtensions() && z.DecExt(yyv129) { - } else if !yym130 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv129) - } else { - z.DecFallback(yyv129, false) - } - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeconfigPath = "" - } else { - x.KubeconfigPath = string(r.DecodeString()) - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MasqueradeAll = false - } else { - x.MasqueradeAll = bool(r.DecodeBool()) - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Master = "" - } else { - x.Master = string(r.DecodeString()) - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.OOMScoreAdj != nil { - x.OOMScoreAdj = nil - } - } else { - if x.OOMScoreAdj == nil { - x.OOMScoreAdj = new(int32) - } - yym135 := z.DecBinary() - _ = yym135 - if false { - } else { - *((*int32)(x.OOMScoreAdj)) = int32(r.DecodeInt(32)) - } - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Mode = "" - } else { - x.Mode = ProxyMode(r.DecodeString()) - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PortRange = "" - } else { - x.PortRange = string(r.DecodeString()) - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceContainer = "" - } else { - x.ResourceContainer = string(r.DecodeString()) - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UDPIdleTimeout = pkg1_unversioned.Duration{} - } else { - yyv139 := &x.UDPIdleTimeout - yym140 := z.DecBinary() - _ = yym140 - if false { - } else if z.HasExtensions() && z.DecExt(yyv139) { - } else if !yym140 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv139) - } else { - z.DecFallback(yyv139, false) - } - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConntrackMax = 0 - } else { - x.ConntrackMax = int32(r.DecodeInt(32)) - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConntrackMaxPerCore = 0 - } else { - x.ConntrackMaxPerCore = int32(r.DecodeInt(32)) - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConntrackMin = 0 - } else { - x.ConntrackMin = int32(r.DecodeInt(32)) - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConntrackTCPEstablishedTimeout = pkg1_unversioned.Duration{} - } else { - yyv144 := &x.ConntrackTCPEstablishedTimeout - yym145 := z.DecBinary() - _ = yym145 - if false { - } else if z.HasExtensions() && z.DecExt(yyv144) { - } else if !yym145 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv144) - } else { - z.DecFallback(yyv144, false) - } - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConntrackTCPCloseWaitTimeout = pkg1_unversioned.Duration{} - } else { - yyv146 := &x.ConntrackTCPCloseWaitTimeout - yym147 := z.DecBinary() - _ = yym147 - if false { - } else if z.HasExtensions() && z.DecExt(yyv146) { - } else if !yym147 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv146) - } else { - z.DecFallback(yyv146, false) - } - } - for { - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj117-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ProxyMode) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym148 := z.EncBinary() - _ = yym148 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ProxyMode) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym149 := z.DecBinary() - _ = yym149 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x HairpinMode) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym150 := z.EncBinary() - _ = yym150 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *HairpinMode) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym151 := z.DecBinary() - _ = yym151 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym152 := z.EncBinary() - _ = yym152 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep153 := !z.EncBinary() - yy2arr153 := z.EncBasicHandle().StructToArray - var yyq153 [114]bool - _, _, _ = yysep153, yyq153, yy2arr153 - const yyr153 bool = false - yyq153[0] = x.Kind != "" - yyq153[1] = x.APIVersion != "" - yyq153[55] = x.CloudProvider != "" - yyq153[56] = x.CloudConfigFile != "" - yyq153[57] = x.KubeletCgroups != "" - yyq153[58] = x.ExperimentalCgroupsPerQOS != false - yyq153[59] = x.CgroupDriver != "" - yyq153[60] = x.RuntimeCgroups != "" - yyq153[61] = x.SystemCgroups != "" - yyq153[62] = x.CgroupRoot != "" - yyq153[66] = true - yyq153[67] = x.RktPath != "" - yyq153[68] = x.ExperimentalMounterPath != "" - yyq153[69] = x.RktAPIEndpoint != "" - yyq153[70] = x.RktStage1Image != "" - yyq153[89] = true - yyq153[90] = x.NodeIP != "" - yyq153[94] = x.EvictionHard != "" - yyq153[95] = x.EvictionSoft != "" - yyq153[96] = x.EvictionSoftGracePeriod != "" - yyq153[97] = true - yyq153[98] = x.EvictionMaxPodGracePeriod != 0 - yyq153[99] = x.EvictionMinimumReclaim != "" - yyq153[109] = len(x.AllowedUnsafeSysctls) != 0 - yyq153[111] = x.EnableCRI != false - yyq153[112] = x.ExperimentalFailSwapOn != false - yyq153[113] = x.ExperimentalCheckNodeCapabilitiesBeforeMount != false - var yynn153 int - if yyr153 || yy2arr153 { - r.EncodeArrayStart(114) - } else { - yynn153 = 87 - for _, b := range yyq153 { - if b { - yynn153++ - } - } - r.EncodeMapStart(yynn153) - yynn153 = 0 - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[0] { - yym155 := z.EncBinary() - _ = yym155 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq153[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym156 := z.EncBinary() - _ = yym156 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[1] { - yym158 := z.EncBinary() - _ = yym158 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq153[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym159 := z.EncBinary() - _ = yym159 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym161 := z.EncBinary() - _ = yym161 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodManifestPath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podManifestPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym162 := z.EncBinary() - _ = yym162 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodManifestPath)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy164 := &x.SyncFrequency - yym165 := z.EncBinary() - _ = yym165 - if false { - } else if z.HasExtensions() && z.EncExt(yy164) { - } else if !yym165 && z.IsJSONHandle() { - z.EncJSONMarshal(yy164) - } else { - z.EncFallback(yy164) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("syncFrequency")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy166 := &x.SyncFrequency - yym167 := z.EncBinary() - _ = yym167 - if false { - } else if z.HasExtensions() && z.EncExt(yy166) { - } else if !yym167 && z.IsJSONHandle() { - z.EncJSONMarshal(yy166) - } else { - z.EncFallback(yy166) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy169 := &x.FileCheckFrequency - yym170 := z.EncBinary() - _ = yym170 - if false { - } else if z.HasExtensions() && z.EncExt(yy169) { - } else if !yym170 && z.IsJSONHandle() { - z.EncJSONMarshal(yy169) - } else { - z.EncFallback(yy169) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fileCheckFrequency")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy171 := &x.FileCheckFrequency - yym172 := z.EncBinary() - _ = yym172 - if false { - } else if z.HasExtensions() && z.EncExt(yy171) { - } else if !yym172 && z.IsJSONHandle() { - z.EncJSONMarshal(yy171) - } else { - z.EncFallback(yy171) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy174 := &x.HTTPCheckFrequency - yym175 := z.EncBinary() - _ = yym175 - if false { - } else if z.HasExtensions() && z.EncExt(yy174) { - } else if !yym175 && z.IsJSONHandle() { - z.EncJSONMarshal(yy174) - } else { - z.EncFallback(yy174) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("httpCheckFrequency")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy176 := &x.HTTPCheckFrequency - yym177 := z.EncBinary() - _ = yym177 - if false { - } else if z.HasExtensions() && z.EncExt(yy176) { - } else if !yym177 && z.IsJSONHandle() { - z.EncJSONMarshal(yy176) - } else { - z.EncFallback(yy176) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym179 := z.EncBinary() - _ = yym179 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ManifestURL)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("manifestURL")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym180 := z.EncBinary() - _ = yym180 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ManifestURL)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym182 := z.EncBinary() - _ = yym182 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ManifestURLHeader)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("manifestURLHeader")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym183 := z.EncBinary() - _ = yym183 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ManifestURLHeader)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym185 := z.EncBinary() - _ = yym185 - if false { - } else { - r.EncodeBool(bool(x.EnableServer)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableServer")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym186 := z.EncBinary() - _ = yym186 - if false { - } else { - r.EncodeBool(bool(x.EnableServer)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym188 := z.EncBinary() - _ = yym188 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("address")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym189 := z.EncBinary() - _ = yym189 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym191 := z.EncBinary() - _ = yym191 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym192 := z.EncBinary() - _ = yym192 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym194 := z.EncBinary() - _ = yym194 - if false { - } else { - r.EncodeInt(int64(x.ReadOnlyPort)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnlyPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym195 := z.EncBinary() - _ = yym195 - if false { - } else { - r.EncodeInt(int64(x.ReadOnlyPort)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym197 := z.EncBinary() - _ = yym197 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TLSCertFile)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tlsCertFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym198 := z.EncBinary() - _ = yym198 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TLSCertFile)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym200 := z.EncBinary() - _ = yym200 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TLSPrivateKeyFile)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tlsPrivateKeyFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym201 := z.EncBinary() - _ = yym201 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TLSPrivateKeyFile)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym203 := z.EncBinary() - _ = yym203 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CertDirectory)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("certDirectory")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym204 := z.EncBinary() - _ = yym204 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CertDirectory)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy206 := &x.Authentication - yy206.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("authentication")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy207 := &x.Authentication - yy207.CodecEncodeSelf(e) - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy209 := &x.Authorization - yy209.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("authorization")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy210 := &x.Authorization - yy210.CodecEncodeSelf(e) - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym212 := z.EncBinary() - _ = yym212 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostnameOverride)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostnameOverride")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym213 := z.EncBinary() - _ = yym213 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostnameOverride)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym215 := z.EncBinary() - _ = yym215 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodInfraContainerImage)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podInfraContainerImage")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym216 := z.EncBinary() - _ = yym216 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodInfraContainerImage)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym218 := z.EncBinary() - _ = yym218 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DockerEndpoint)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("dockerEndpoint")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym219 := z.EncBinary() - _ = yym219 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DockerEndpoint)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym221 := z.EncBinary() - _ = yym221 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RootDirectory)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rootDirectory")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym222 := z.EncBinary() - _ = yym222 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RootDirectory)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym224 := z.EncBinary() - _ = yym224 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SeccompProfileRoot)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seccompProfileRoot")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym225 := z.EncBinary() - _ = yym225 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SeccompProfileRoot)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym227 := z.EncBinary() - _ = yym227 - if false { - } else { - r.EncodeBool(bool(x.AllowPrivileged)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("allowPrivileged")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym228 := z.EncBinary() - _ = yym228 - if false { - } else { - r.EncodeBool(bool(x.AllowPrivileged)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.HostNetworkSources == nil { - r.EncodeNil() - } else { - yym230 := z.EncBinary() - _ = yym230 - if false { - } else { - z.F.EncSliceStringV(x.HostNetworkSources, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostNetworkSources")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HostNetworkSources == nil { - r.EncodeNil() - } else { - yym231 := z.EncBinary() - _ = yym231 - if false { - } else { - z.F.EncSliceStringV(x.HostNetworkSources, false, e) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.HostPIDSources == nil { - r.EncodeNil() - } else { - yym233 := z.EncBinary() - _ = yym233 - if false { - } else { - z.F.EncSliceStringV(x.HostPIDSources, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPIDSources")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HostPIDSources == nil { - r.EncodeNil() - } else { - yym234 := z.EncBinary() - _ = yym234 - if false { - } else { - z.F.EncSliceStringV(x.HostPIDSources, false, e) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.HostIPCSources == nil { - r.EncodeNil() - } else { - yym236 := z.EncBinary() - _ = yym236 - if false { - } else { - z.F.EncSliceStringV(x.HostIPCSources, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIPCSources")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HostIPCSources == nil { - r.EncodeNil() - } else { - yym237 := z.EncBinary() - _ = yym237 - if false { - } else { - z.F.EncSliceStringV(x.HostIPCSources, false, e) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym239 := z.EncBinary() - _ = yym239 - if false { - } else { - r.EncodeInt(int64(x.RegistryPullQPS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("registryPullQPS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym240 := z.EncBinary() - _ = yym240 - if false { - } else { - r.EncodeInt(int64(x.RegistryPullQPS)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym242 := z.EncBinary() - _ = yym242 - if false { - } else { - r.EncodeInt(int64(x.RegistryBurst)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("registryBurst")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym243 := z.EncBinary() - _ = yym243 - if false { - } else { - r.EncodeInt(int64(x.RegistryBurst)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym245 := z.EncBinary() - _ = yym245 - if false { - } else { - r.EncodeInt(int64(x.EventRecordQPS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("eventRecordQPS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym246 := z.EncBinary() - _ = yym246 - if false { - } else { - r.EncodeInt(int64(x.EventRecordQPS)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym248 := z.EncBinary() - _ = yym248 - if false { - } else { - r.EncodeInt(int64(x.EventBurst)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("eventBurst")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym249 := z.EncBinary() - _ = yym249 - if false { - } else { - r.EncodeInt(int64(x.EventBurst)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym251 := z.EncBinary() - _ = yym251 - if false { - } else { - r.EncodeBool(bool(x.EnableDebuggingHandlers)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableDebuggingHandlers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym252 := z.EncBinary() - _ = yym252 - if false { - } else { - r.EncodeBool(bool(x.EnableDebuggingHandlers)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy254 := &x.MinimumGCAge - yym255 := z.EncBinary() - _ = yym255 - if false { - } else if z.HasExtensions() && z.EncExt(yy254) { - } else if !yym255 && z.IsJSONHandle() { - z.EncJSONMarshal(yy254) - } else { - z.EncFallback(yy254) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minimumGCAge")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy256 := &x.MinimumGCAge - yym257 := z.EncBinary() - _ = yym257 - if false { - } else if z.HasExtensions() && z.EncExt(yy256) { - } else if !yym257 && z.IsJSONHandle() { - z.EncJSONMarshal(yy256) - } else { - z.EncFallback(yy256) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym259 := z.EncBinary() - _ = yym259 - if false { - } else { - r.EncodeInt(int64(x.MaxPerPodContainerCount)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxPerPodContainerCount")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym260 := z.EncBinary() - _ = yym260 - if false { - } else { - r.EncodeInt(int64(x.MaxPerPodContainerCount)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym262 := z.EncBinary() - _ = yym262 - if false { - } else { - r.EncodeInt(int64(x.MaxContainerCount)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxContainerCount")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym263 := z.EncBinary() - _ = yym263 - if false { - } else { - r.EncodeInt(int64(x.MaxContainerCount)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym265 := z.EncBinary() - _ = yym265 - if false { - } else { - r.EncodeInt(int64(x.CAdvisorPort)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cAdvisorPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym266 := z.EncBinary() - _ = yym266 - if false { - } else { - r.EncodeInt(int64(x.CAdvisorPort)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym268 := z.EncBinary() - _ = yym268 - if false { - } else { - r.EncodeInt(int64(x.HealthzPort)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("healthzPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym269 := z.EncBinary() - _ = yym269 - if false { - } else { - r.EncodeInt(int64(x.HealthzPort)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym271 := z.EncBinary() - _ = yym271 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("healthzBindAddress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym272 := z.EncBinary() - _ = yym272 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym274 := z.EncBinary() - _ = yym274 - if false { - } else { - r.EncodeInt(int64(x.OOMScoreAdj)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("oomScoreAdj")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym275 := z.EncBinary() - _ = yym275 - if false { - } else { - r.EncodeInt(int64(x.OOMScoreAdj)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym277 := z.EncBinary() - _ = yym277 - if false { - } else { - r.EncodeBool(bool(x.RegisterNode)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("registerNode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym278 := z.EncBinary() - _ = yym278 - if false { - } else { - r.EncodeBool(bool(x.RegisterNode)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym280 := z.EncBinary() - _ = yym280 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDomain)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterDomain")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym281 := z.EncBinary() - _ = yym281 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDomain)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym283 := z.EncBinary() - _ = yym283 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MasterServiceNamespace)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("masterServiceNamespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym284 := z.EncBinary() - _ = yym284 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MasterServiceNamespace)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym286 := z.EncBinary() - _ = yym286 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDNS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterDNS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym287 := z.EncBinary() - _ = yym287 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDNS)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy289 := &x.StreamingConnectionIdleTimeout - yym290 := z.EncBinary() - _ = yym290 - if false { - } else if z.HasExtensions() && z.EncExt(yy289) { - } else if !yym290 && z.IsJSONHandle() { - z.EncJSONMarshal(yy289) - } else { - z.EncFallback(yy289) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("streamingConnectionIdleTimeout")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy291 := &x.StreamingConnectionIdleTimeout - yym292 := z.EncBinary() - _ = yym292 - if false { - } else if z.HasExtensions() && z.EncExt(yy291) { - } else if !yym292 && z.IsJSONHandle() { - z.EncJSONMarshal(yy291) - } else { - z.EncFallback(yy291) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy294 := &x.NodeStatusUpdateFrequency - yym295 := z.EncBinary() - _ = yym295 - if false { - } else if z.HasExtensions() && z.EncExt(yy294) { - } else if !yym295 && z.IsJSONHandle() { - z.EncJSONMarshal(yy294) - } else { - z.EncFallback(yy294) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeStatusUpdateFrequency")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy296 := &x.NodeStatusUpdateFrequency - yym297 := z.EncBinary() - _ = yym297 - if false { - } else if z.HasExtensions() && z.EncExt(yy296) { - } else if !yym297 && z.IsJSONHandle() { - z.EncJSONMarshal(yy296) - } else { - z.EncFallback(yy296) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy299 := &x.ImageMinimumGCAge - yym300 := z.EncBinary() - _ = yym300 - if false { - } else if z.HasExtensions() && z.EncExt(yy299) { - } else if !yym300 && z.IsJSONHandle() { - z.EncJSONMarshal(yy299) - } else { - z.EncFallback(yy299) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imageMinimumGCAge")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy301 := &x.ImageMinimumGCAge - yym302 := z.EncBinary() - _ = yym302 - if false { - } else if z.HasExtensions() && z.EncExt(yy301) { - } else if !yym302 && z.IsJSONHandle() { - z.EncJSONMarshal(yy301) - } else { - z.EncFallback(yy301) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym304 := z.EncBinary() - _ = yym304 - if false { - } else { - r.EncodeInt(int64(x.ImageGCHighThresholdPercent)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imageGCHighThresholdPercent")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym305 := z.EncBinary() - _ = yym305 - if false { - } else { - r.EncodeInt(int64(x.ImageGCHighThresholdPercent)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym307 := z.EncBinary() - _ = yym307 - if false { - } else { - r.EncodeInt(int64(x.ImageGCLowThresholdPercent)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imageGCLowThresholdPercent")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym308 := z.EncBinary() - _ = yym308 - if false { - } else { - r.EncodeInt(int64(x.ImageGCLowThresholdPercent)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym310 := z.EncBinary() - _ = yym310 - if false { - } else { - r.EncodeInt(int64(x.LowDiskSpaceThresholdMB)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lowDiskSpaceThresholdMB")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym311 := z.EncBinary() - _ = yym311 - if false { - } else { - r.EncodeInt(int64(x.LowDiskSpaceThresholdMB)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy313 := &x.VolumeStatsAggPeriod - yym314 := z.EncBinary() - _ = yym314 - if false { - } else if z.HasExtensions() && z.EncExt(yy313) { - } else if !yym314 && z.IsJSONHandle() { - z.EncJSONMarshal(yy313) - } else { - z.EncFallback(yy313) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeStatsAggPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy315 := &x.VolumeStatsAggPeriod - yym316 := z.EncBinary() - _ = yym316 - if false { - } else if z.HasExtensions() && z.EncExt(yy315) { - } else if !yym316 && z.IsJSONHandle() { - z.EncJSONMarshal(yy315) - } else { - z.EncFallback(yy315) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym318 := z.EncBinary() - _ = yym318 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NetworkPluginName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("networkPluginName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym319 := z.EncBinary() - _ = yym319 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NetworkPluginName)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym321 := z.EncBinary() - _ = yym321 - if false { - } else { - r.EncodeInt(int64(x.NetworkPluginMTU)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("networkPluginMTU")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym322 := z.EncBinary() - _ = yym322 - if false { - } else { - r.EncodeInt(int64(x.NetworkPluginMTU)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym324 := z.EncBinary() - _ = yym324 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NetworkPluginDir)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("networkPluginDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym325 := z.EncBinary() - _ = yym325 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NetworkPluginDir)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym327 := z.EncBinary() - _ = yym327 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CNIConfDir)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cniConfDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym328 := z.EncBinary() - _ = yym328 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CNIConfDir)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym330 := z.EncBinary() - _ = yym330 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CNIBinDir)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cniBinDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym331 := z.EncBinary() - _ = yym331 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CNIBinDir)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym333 := z.EncBinary() - _ = yym333 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumePluginDir)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumePluginDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym334 := z.EncBinary() - _ = yym334 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumePluginDir)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[55] { - yym336 := z.EncBinary() - _ = yym336 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CloudProvider)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq153[55] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cloudProvider")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym337 := z.EncBinary() - _ = yym337 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CloudProvider)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[56] { - yym339 := z.EncBinary() - _ = yym339 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CloudConfigFile)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq153[56] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cloudConfigFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym340 := z.EncBinary() - _ = yym340 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CloudConfigFile)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[57] { - yym342 := z.EncBinary() - _ = yym342 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeletCgroups)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq153[57] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeletCgroups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym343 := z.EncBinary() - _ = yym343 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeletCgroups)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[58] { - yym345 := z.EncBinary() - _ = yym345 - if false { - } else { - r.EncodeBool(bool(x.ExperimentalCgroupsPerQOS)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq153[58] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("experimentalCgroupsPerQOS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym346 := z.EncBinary() - _ = yym346 - if false { - } else { - r.EncodeBool(bool(x.ExperimentalCgroupsPerQOS)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[59] { - yym348 := z.EncBinary() - _ = yym348 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CgroupDriver)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq153[59] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cgroupDriver")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym349 := z.EncBinary() - _ = yym349 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CgroupDriver)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[60] { - yym351 := z.EncBinary() - _ = yym351 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RuntimeCgroups)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq153[60] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runtimeCgroups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym352 := z.EncBinary() - _ = yym352 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RuntimeCgroups)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[61] { - yym354 := z.EncBinary() - _ = yym354 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SystemCgroups)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq153[61] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("systemCgroups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym355 := z.EncBinary() - _ = yym355 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SystemCgroups)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[62] { - yym357 := z.EncBinary() - _ = yym357 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CgroupRoot)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq153[62] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cgroupRoot")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym358 := z.EncBinary() - _ = yym358 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CgroupRoot)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym360 := z.EncBinary() - _ = yym360 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntime)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerRuntime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym361 := z.EncBinary() - _ = yym361 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntime)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym363 := z.EncBinary() - _ = yym363 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RemoteRuntimeEndpoint)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("remoteRuntimeEndpoint")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym364 := z.EncBinary() - _ = yym364 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RemoteRuntimeEndpoint)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym366 := z.EncBinary() - _ = yym366 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RemoteImageEndpoint)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("remoteImageEndpoint")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym367 := z.EncBinary() - _ = yym367 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RemoteImageEndpoint)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[66] { - yy369 := &x.RuntimeRequestTimeout - yym370 := z.EncBinary() - _ = yym370 - if false { - } else if z.HasExtensions() && z.EncExt(yy369) { - } else if !yym370 && z.IsJSONHandle() { - z.EncJSONMarshal(yy369) - } else { - z.EncFallback(yy369) - } - } else { - r.EncodeNil() - } - } else { - if yyq153[66] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runtimeRequestTimeout")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy371 := &x.RuntimeRequestTimeout - yym372 := z.EncBinary() - _ = yym372 - if false { - } else if z.HasExtensions() && z.EncExt(yy371) { - } else if !yym372 && z.IsJSONHandle() { - z.EncJSONMarshal(yy371) - } else { - z.EncFallback(yy371) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[67] { - yym374 := z.EncBinary() - _ = yym374 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RktPath)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq153[67] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rktPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym375 := z.EncBinary() - _ = yym375 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RktPath)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[68] { - yym377 := z.EncBinary() - _ = yym377 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ExperimentalMounterPath)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq153[68] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("experimentalMounterPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym378 := z.EncBinary() - _ = yym378 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ExperimentalMounterPath)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[69] { - yym380 := z.EncBinary() - _ = yym380 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RktAPIEndpoint)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq153[69] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rktAPIEndpoint")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym381 := z.EncBinary() - _ = yym381 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RktAPIEndpoint)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[70] { - yym383 := z.EncBinary() - _ = yym383 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RktStage1Image)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq153[70] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rktStage1Image")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym384 := z.EncBinary() - _ = yym384 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RktStage1Image)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym386 := z.EncBinary() - _ = yym386 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LockFilePath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lockFilePath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym387 := z.EncBinary() - _ = yym387 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LockFilePath)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym389 := z.EncBinary() - _ = yym389 - if false { - } else { - r.EncodeBool(bool(x.ExitOnLockContention)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exitOnLockContention")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym390 := z.EncBinary() - _ = yym390 - if false { - } else { - r.EncodeBool(bool(x.ExitOnLockContention)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym392 := z.EncBinary() - _ = yym392 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HairpinMode)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hairpinMode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym393 := z.EncBinary() - _ = yym393 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HairpinMode)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym395 := z.EncBinary() - _ = yym395 - if false { - } else { - r.EncodeBool(bool(x.BabysitDaemons)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("babysitDaemons")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym396 := z.EncBinary() - _ = yym396 - if false { - } else { - r.EncodeBool(bool(x.BabysitDaemons)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym398 := z.EncBinary() - _ = yym398 - if false { - } else { - r.EncodeInt(int64(x.MaxPods)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxPods")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym399 := z.EncBinary() - _ = yym399 - if false { - } else { - r.EncodeInt(int64(x.MaxPods)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym401 := z.EncBinary() - _ = yym401 - if false { - } else { - r.EncodeInt(int64(x.NvidiaGPUs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nvidiaGPUs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym402 := z.EncBinary() - _ = yym402 - if false { - } else { - r.EncodeInt(int64(x.NvidiaGPUs)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym404 := z.EncBinary() - _ = yym404 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DockerExecHandlerName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("dockerExecHandlerName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym405 := z.EncBinary() - _ = yym405 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DockerExecHandlerName)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym407 := z.EncBinary() - _ = yym407 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym408 := z.EncBinary() - _ = yym408 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym410 := z.EncBinary() - _ = yym410 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResolverConfig)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resolvConf")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym411 := z.EncBinary() - _ = yym411 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResolverConfig)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym413 := z.EncBinary() - _ = yym413 - if false { - } else { - r.EncodeBool(bool(x.CPUCFSQuota)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cpuCFSQuota")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym414 := z.EncBinary() - _ = yym414 - if false { - } else { - r.EncodeBool(bool(x.CPUCFSQuota)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym416 := z.EncBinary() - _ = yym416 - if false { - } else { - r.EncodeBool(bool(x.Containerized)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerized")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym417 := z.EncBinary() - _ = yym417 - if false { - } else { - r.EncodeBool(bool(x.Containerized)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym419 := z.EncBinary() - _ = yym419 - if false { - } else { - r.EncodeInt(int64(x.MaxOpenFiles)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxOpenFiles")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym420 := z.EncBinary() - _ = yym420 - if false { - } else { - r.EncodeInt(int64(x.MaxOpenFiles)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym422 := z.EncBinary() - _ = yym422 - if false { - } else { - r.EncodeBool(bool(x.ReconcileCIDR)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reconcileCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym423 := z.EncBinary() - _ = yym423 - if false { - } else { - r.EncodeBool(bool(x.ReconcileCIDR)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym425 := z.EncBinary() - _ = yym425 - if false { - } else { - r.EncodeBool(bool(x.RegisterSchedulable)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("registerSchedulable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym426 := z.EncBinary() - _ = yym426 - if false { - } else { - r.EncodeBool(bool(x.RegisterSchedulable)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym428 := z.EncBinary() - _ = yym428 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("contentType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym429 := z.EncBinary() - _ = yym429 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym431 := z.EncBinary() - _ = yym431 - if false { - } else { - r.EncodeInt(int64(x.KubeAPIQPS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeAPIQPS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym432 := z.EncBinary() - _ = yym432 - if false { - } else { - r.EncodeInt(int64(x.KubeAPIQPS)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym434 := z.EncBinary() - _ = yym434 - if false { - } else { - r.EncodeInt(int64(x.KubeAPIBurst)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeAPIBurst")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym435 := z.EncBinary() - _ = yym435 - if false { - } else { - r.EncodeInt(int64(x.KubeAPIBurst)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym437 := z.EncBinary() - _ = yym437 - if false { - } else { - r.EncodeBool(bool(x.SerializeImagePulls)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serializeImagePulls")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym438 := z.EncBinary() - _ = yym438 - if false { - } else { - r.EncodeBool(bool(x.SerializeImagePulls)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[89] { - yy440 := &x.OutOfDiskTransitionFrequency - yym441 := z.EncBinary() - _ = yym441 - if false { - } else if z.HasExtensions() && z.EncExt(yy440) { - } else if !yym441 && z.IsJSONHandle() { - z.EncJSONMarshal(yy440) - } else { - z.EncFallback(yy440) - } - } else { - r.EncodeNil() - } - } else { - if yyq153[89] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("outOfDiskTransitionFrequency")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy442 := &x.OutOfDiskTransitionFrequency - yym443 := z.EncBinary() - _ = yym443 - if false { - } else if z.HasExtensions() && z.EncExt(yy442) { - } else if !yym443 && z.IsJSONHandle() { - z.EncJSONMarshal(yy442) - } else { - z.EncFallback(yy442) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[90] { - yym445 := z.EncBinary() - _ = yym445 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NodeIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq153[90] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym446 := z.EncBinary() - _ = yym446 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NodeIP)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.NodeLabels == nil { - r.EncodeNil() - } else { - yym448 := z.EncBinary() - _ = yym448 - if false { - } else { - z.F.EncMapStringStringV(x.NodeLabels, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeLabels")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NodeLabels == nil { - r.EncodeNil() - } else { - yym449 := z.EncBinary() - _ = yym449 - if false { - } else { - z.F.EncMapStringStringV(x.NodeLabels, false, e) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym451 := z.EncBinary() - _ = yym451 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NonMasqueradeCIDR)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nonMasqueradeCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym452 := z.EncBinary() - _ = yym452 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NonMasqueradeCIDR)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym454 := z.EncBinary() - _ = yym454 - if false { - } else { - r.EncodeBool(bool(x.EnableCustomMetrics)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableCustomMetrics")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym455 := z.EncBinary() - _ = yym455 - if false { - } else { - r.EncodeBool(bool(x.EnableCustomMetrics)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[94] { - yym457 := z.EncBinary() - _ = yym457 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvictionHard)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq153[94] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("evictionHard")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym458 := z.EncBinary() - _ = yym458 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvictionHard)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[95] { - yym460 := z.EncBinary() - _ = yym460 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoft)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq153[95] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("evictionSoft")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym461 := z.EncBinary() - _ = yym461 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoft)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[96] { - yym463 := z.EncBinary() - _ = yym463 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoftGracePeriod)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq153[96] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("evictionSoftGracePeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym464 := z.EncBinary() - _ = yym464 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoftGracePeriod)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[97] { - yy466 := &x.EvictionPressureTransitionPeriod - yym467 := z.EncBinary() - _ = yym467 - if false { - } else if z.HasExtensions() && z.EncExt(yy466) { - } else if !yym467 && z.IsJSONHandle() { - z.EncJSONMarshal(yy466) - } else { - z.EncFallback(yy466) - } - } else { - r.EncodeNil() - } - } else { - if yyq153[97] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("evictionPressureTransitionPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy468 := &x.EvictionPressureTransitionPeriod - yym469 := z.EncBinary() - _ = yym469 - if false { - } else if z.HasExtensions() && z.EncExt(yy468) { - } else if !yym469 && z.IsJSONHandle() { - z.EncJSONMarshal(yy468) - } else { - z.EncFallback(yy468) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[98] { - yym471 := z.EncBinary() - _ = yym471 - if false { - } else { - r.EncodeInt(int64(x.EvictionMaxPodGracePeriod)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq153[98] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("evictionMaxPodGracePeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym472 := z.EncBinary() - _ = yym472 - if false { - } else { - r.EncodeInt(int64(x.EvictionMaxPodGracePeriod)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[99] { - yym474 := z.EncBinary() - _ = yym474 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvictionMinimumReclaim)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq153[99] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("evictionMinimumReclaim")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym475 := z.EncBinary() - _ = yym475 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvictionMinimumReclaim)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym477 := z.EncBinary() - _ = yym477 - if false { - } else { - r.EncodeBool(bool(x.ExperimentalKernelMemcgNotification)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("experimentalKernelMemcgNotification")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym478 := z.EncBinary() - _ = yym478 - if false { - } else { - r.EncodeBool(bool(x.ExperimentalKernelMemcgNotification)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym480 := z.EncBinary() - _ = yym480 - if false { - } else { - r.EncodeInt(int64(x.PodsPerCore)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podsPerCore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym481 := z.EncBinary() - _ = yym481 - if false { - } else { - r.EncodeInt(int64(x.PodsPerCore)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym483 := z.EncBinary() - _ = yym483 - if false { - } else { - r.EncodeBool(bool(x.EnableControllerAttachDetach)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableControllerAttachDetach")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym484 := z.EncBinary() - _ = yym484 - if false { - } else { - r.EncodeBool(bool(x.EnableControllerAttachDetach)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.SystemReserved == nil { - r.EncodeNil() - } else { - yym486 := z.EncBinary() - _ = yym486 - if false { - } else if z.HasExtensions() && z.EncExt(x.SystemReserved) { - } else { - h.encconfig_ConfigurationMap((pkg2_config.ConfigurationMap)(x.SystemReserved), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("systemReserved")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SystemReserved == nil { - r.EncodeNil() - } else { - yym487 := z.EncBinary() - _ = yym487 - if false { - } else if z.HasExtensions() && z.EncExt(x.SystemReserved) { - } else { - h.encconfig_ConfigurationMap((pkg2_config.ConfigurationMap)(x.SystemReserved), e) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.KubeReserved == nil { - r.EncodeNil() - } else { - yym489 := z.EncBinary() - _ = yym489 - if false { - } else if z.HasExtensions() && z.EncExt(x.KubeReserved) { - } else { - h.encconfig_ConfigurationMap((pkg2_config.ConfigurationMap)(x.KubeReserved), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeReserved")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.KubeReserved == nil { - r.EncodeNil() - } else { - yym490 := z.EncBinary() - _ = yym490 - if false { - } else if z.HasExtensions() && z.EncExt(x.KubeReserved) { - } else { - h.encconfig_ConfigurationMap((pkg2_config.ConfigurationMap)(x.KubeReserved), e) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym492 := z.EncBinary() - _ = yym492 - if false { - } else { - r.EncodeBool(bool(x.ProtectKernelDefaults)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("protectKernelDefaults")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym493 := z.EncBinary() - _ = yym493 - if false { - } else { - r.EncodeBool(bool(x.ProtectKernelDefaults)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym495 := z.EncBinary() - _ = yym495 - if false { - } else { - r.EncodeBool(bool(x.MakeIPTablesUtilChains)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("makeIPTablesUtilChains")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym496 := z.EncBinary() - _ = yym496 - if false { - } else { - r.EncodeBool(bool(x.MakeIPTablesUtilChains)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym498 := z.EncBinary() - _ = yym498 - if false { - } else { - r.EncodeInt(int64(x.IPTablesMasqueradeBit)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iptablesMasqueradeBit")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym499 := z.EncBinary() - _ = yym499 - if false { - } else { - r.EncodeInt(int64(x.IPTablesMasqueradeBit)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym501 := z.EncBinary() - _ = yym501 - if false { - } else { - r.EncodeInt(int64(x.IPTablesDropBit)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iptablesDropBit")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym502 := z.EncBinary() - _ = yym502 - if false { - } else { - r.EncodeInt(int64(x.IPTablesDropBit)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[109] { - if x.AllowedUnsafeSysctls == nil { - r.EncodeNil() - } else { - yym504 := z.EncBinary() - _ = yym504 - if false { - } else { - z.F.EncSliceStringV(x.AllowedUnsafeSysctls, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq153[109] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("experimentalAllowedUnsafeSysctls")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AllowedUnsafeSysctls == nil { - r.EncodeNil() - } else { - yym505 := z.EncBinary() - _ = yym505 - if false { - } else { - z.F.EncSliceStringV(x.AllowedUnsafeSysctls, false, e) - } - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym507 := z.EncBinary() - _ = yym507 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FeatureGates)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("featureGates")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym508 := z.EncBinary() - _ = yym508 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FeatureGates)) - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[111] { - yym510 := z.EncBinary() - _ = yym510 - if false { - } else { - r.EncodeBool(bool(x.EnableCRI)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq153[111] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableCRI")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym511 := z.EncBinary() - _ = yym511 - if false { - } else { - r.EncodeBool(bool(x.EnableCRI)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[112] { - yym513 := z.EncBinary() - _ = yym513 - if false { - } else { - r.EncodeBool(bool(x.ExperimentalFailSwapOn)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq153[112] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("experimentalFailSwapOn")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym514 := z.EncBinary() - _ = yym514 - if false { - } else { - r.EncodeBool(bool(x.ExperimentalFailSwapOn)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq153[113] { - yym516 := z.EncBinary() - _ = yym516 - if false { - } else { - r.EncodeBool(bool(x.ExperimentalCheckNodeCapabilitiesBeforeMount)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq153[113] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ExperimentalCheckNodeCapabilitiesBeforeMount")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym517 := z.EncBinary() - _ = yym517 - if false { - } else { - r.EncodeBool(bool(x.ExperimentalCheckNodeCapabilitiesBeforeMount)) - } - } - } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *KubeletConfiguration) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym518 := z.DecBinary() - _ = yym518 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct519 := r.ContainerType() - if yyct519 == codecSelferValueTypeMap1234 { - yyl519 := r.ReadMapStart() - if yyl519 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl519, d) - } - } else if yyct519 == codecSelferValueTypeArray1234 { - yyl519 := r.ReadArrayStart() - if yyl519 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl519, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys520Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys520Slc - var yyhl520 bool = l >= 0 - for yyj520 := 0; ; yyj520++ { - if yyhl520 { - if yyj520 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys520Slc = r.DecodeBytes(yys520Slc, true, true) - yys520 := string(yys520Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys520 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "podManifestPath": - if r.TryDecodeAsNil() { - x.PodManifestPath = "" - } else { - x.PodManifestPath = string(r.DecodeString()) - } - case "syncFrequency": - if r.TryDecodeAsNil() { - x.SyncFrequency = pkg1_unversioned.Duration{} - } else { - yyv524 := &x.SyncFrequency - yym525 := z.DecBinary() - _ = yym525 - if false { - } else if z.HasExtensions() && z.DecExt(yyv524) { - } else if !yym525 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv524) - } else { - z.DecFallback(yyv524, false) - } - } - case "fileCheckFrequency": - if r.TryDecodeAsNil() { - x.FileCheckFrequency = pkg1_unversioned.Duration{} - } else { - yyv526 := &x.FileCheckFrequency - yym527 := z.DecBinary() - _ = yym527 - if false { - } else if z.HasExtensions() && z.DecExt(yyv526) { - } else if !yym527 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv526) - } else { - z.DecFallback(yyv526, false) - } - } - case "httpCheckFrequency": - if r.TryDecodeAsNil() { - x.HTTPCheckFrequency = pkg1_unversioned.Duration{} - } else { - yyv528 := &x.HTTPCheckFrequency - yym529 := z.DecBinary() - _ = yym529 - if false { - } else if z.HasExtensions() && z.DecExt(yyv528) { - } else if !yym529 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv528) - } else { - z.DecFallback(yyv528, false) - } - } - case "manifestURL": - if r.TryDecodeAsNil() { - x.ManifestURL = "" - } else { - x.ManifestURL = string(r.DecodeString()) - } - case "manifestURLHeader": - if r.TryDecodeAsNil() { - x.ManifestURLHeader = "" - } else { - x.ManifestURLHeader = string(r.DecodeString()) - } - case "enableServer": - if r.TryDecodeAsNil() { - x.EnableServer = false - } else { - x.EnableServer = bool(r.DecodeBool()) - } - case "address": - if r.TryDecodeAsNil() { - x.Address = "" - } else { - x.Address = string(r.DecodeString()) - } - case "port": - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - case "readOnlyPort": - if r.TryDecodeAsNil() { - x.ReadOnlyPort = 0 - } else { - x.ReadOnlyPort = int32(r.DecodeInt(32)) - } - case "tlsCertFile": - if r.TryDecodeAsNil() { - x.TLSCertFile = "" - } else { - x.TLSCertFile = string(r.DecodeString()) - } - case "tlsPrivateKeyFile": - if r.TryDecodeAsNil() { - x.TLSPrivateKeyFile = "" - } else { - x.TLSPrivateKeyFile = string(r.DecodeString()) - } - case "certDirectory": - if r.TryDecodeAsNil() { - x.CertDirectory = "" - } else { - x.CertDirectory = string(r.DecodeString()) - } - case "authentication": - if r.TryDecodeAsNil() { - x.Authentication = KubeletAuthentication{} - } else { - yyv539 := &x.Authentication - yyv539.CodecDecodeSelf(d) - } - case "authorization": - if r.TryDecodeAsNil() { - x.Authorization = KubeletAuthorization{} - } else { - yyv540 := &x.Authorization - yyv540.CodecDecodeSelf(d) - } - case "hostnameOverride": - if r.TryDecodeAsNil() { - x.HostnameOverride = "" - } else { - x.HostnameOverride = string(r.DecodeString()) - } - case "podInfraContainerImage": - if r.TryDecodeAsNil() { - x.PodInfraContainerImage = "" - } else { - x.PodInfraContainerImage = string(r.DecodeString()) - } - case "dockerEndpoint": - if r.TryDecodeAsNil() { - x.DockerEndpoint = "" - } else { - x.DockerEndpoint = string(r.DecodeString()) - } - case "rootDirectory": - if r.TryDecodeAsNil() { - x.RootDirectory = "" - } else { - x.RootDirectory = string(r.DecodeString()) - } - case "seccompProfileRoot": - if r.TryDecodeAsNil() { - x.SeccompProfileRoot = "" - } else { - x.SeccompProfileRoot = string(r.DecodeString()) - } - case "allowPrivileged": - if r.TryDecodeAsNil() { - x.AllowPrivileged = false - } else { - x.AllowPrivileged = bool(r.DecodeBool()) - } - case "hostNetworkSources": - if r.TryDecodeAsNil() { - x.HostNetworkSources = nil - } else { - yyv547 := &x.HostNetworkSources - yym548 := z.DecBinary() - _ = yym548 - if false { - } else { - z.F.DecSliceStringX(yyv547, false, d) - } - } - case "hostPIDSources": - if r.TryDecodeAsNil() { - x.HostPIDSources = nil - } else { - yyv549 := &x.HostPIDSources - yym550 := z.DecBinary() - _ = yym550 - if false { - } else { - z.F.DecSliceStringX(yyv549, false, d) - } - } - case "hostIPCSources": - if r.TryDecodeAsNil() { - x.HostIPCSources = nil - } else { - yyv551 := &x.HostIPCSources - yym552 := z.DecBinary() - _ = yym552 - if false { - } else { - z.F.DecSliceStringX(yyv551, false, d) - } - } - case "registryPullQPS": - if r.TryDecodeAsNil() { - x.RegistryPullQPS = 0 - } else { - x.RegistryPullQPS = int32(r.DecodeInt(32)) - } - case "registryBurst": - if r.TryDecodeAsNil() { - x.RegistryBurst = 0 - } else { - x.RegistryBurst = int32(r.DecodeInt(32)) - } - case "eventRecordQPS": - if r.TryDecodeAsNil() { - x.EventRecordQPS = 0 - } else { - x.EventRecordQPS = int32(r.DecodeInt(32)) - } - case "eventBurst": - if r.TryDecodeAsNil() { - x.EventBurst = 0 - } else { - x.EventBurst = int32(r.DecodeInt(32)) - } - case "enableDebuggingHandlers": - if r.TryDecodeAsNil() { - x.EnableDebuggingHandlers = false - } else { - x.EnableDebuggingHandlers = bool(r.DecodeBool()) - } - case "minimumGCAge": - if r.TryDecodeAsNil() { - x.MinimumGCAge = pkg1_unversioned.Duration{} - } else { - yyv558 := &x.MinimumGCAge - yym559 := z.DecBinary() - _ = yym559 - if false { - } else if z.HasExtensions() && z.DecExt(yyv558) { - } else if !yym559 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv558) - } else { - z.DecFallback(yyv558, false) - } - } - case "maxPerPodContainerCount": - if r.TryDecodeAsNil() { - x.MaxPerPodContainerCount = 0 - } else { - x.MaxPerPodContainerCount = int32(r.DecodeInt(32)) - } - case "maxContainerCount": - if r.TryDecodeAsNil() { - x.MaxContainerCount = 0 - } else { - x.MaxContainerCount = int32(r.DecodeInt(32)) - } - case "cAdvisorPort": - if r.TryDecodeAsNil() { - x.CAdvisorPort = 0 - } else { - x.CAdvisorPort = int32(r.DecodeInt(32)) - } - case "healthzPort": - if r.TryDecodeAsNil() { - x.HealthzPort = 0 - } else { - x.HealthzPort = int32(r.DecodeInt(32)) - } - case "healthzBindAddress": - if r.TryDecodeAsNil() { - x.HealthzBindAddress = "" - } else { - x.HealthzBindAddress = string(r.DecodeString()) - } - case "oomScoreAdj": - if r.TryDecodeAsNil() { - x.OOMScoreAdj = 0 - } else { - x.OOMScoreAdj = int32(r.DecodeInt(32)) - } - case "registerNode": - if r.TryDecodeAsNil() { - x.RegisterNode = false - } else { - x.RegisterNode = bool(r.DecodeBool()) - } - case "clusterDomain": - if r.TryDecodeAsNil() { - x.ClusterDomain = "" - } else { - x.ClusterDomain = string(r.DecodeString()) - } - case "masterServiceNamespace": - if r.TryDecodeAsNil() { - x.MasterServiceNamespace = "" - } else { - x.MasterServiceNamespace = string(r.DecodeString()) - } - case "clusterDNS": - if r.TryDecodeAsNil() { - x.ClusterDNS = "" - } else { - x.ClusterDNS = string(r.DecodeString()) - } - case "streamingConnectionIdleTimeout": - if r.TryDecodeAsNil() { - x.StreamingConnectionIdleTimeout = pkg1_unversioned.Duration{} - } else { - yyv570 := &x.StreamingConnectionIdleTimeout - yym571 := z.DecBinary() - _ = yym571 - if false { - } else if z.HasExtensions() && z.DecExt(yyv570) { - } else if !yym571 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv570) - } else { - z.DecFallback(yyv570, false) - } - } - case "nodeStatusUpdateFrequency": - if r.TryDecodeAsNil() { - x.NodeStatusUpdateFrequency = pkg1_unversioned.Duration{} - } else { - yyv572 := &x.NodeStatusUpdateFrequency - yym573 := z.DecBinary() - _ = yym573 - if false { - } else if z.HasExtensions() && z.DecExt(yyv572) { - } else if !yym573 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv572) - } else { - z.DecFallback(yyv572, false) - } - } - case "imageMinimumGCAge": - if r.TryDecodeAsNil() { - x.ImageMinimumGCAge = pkg1_unversioned.Duration{} - } else { - yyv574 := &x.ImageMinimumGCAge - yym575 := z.DecBinary() - _ = yym575 - if false { - } else if z.HasExtensions() && z.DecExt(yyv574) { - } else if !yym575 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv574) - } else { - z.DecFallback(yyv574, false) - } - } - case "imageGCHighThresholdPercent": - if r.TryDecodeAsNil() { - x.ImageGCHighThresholdPercent = 0 - } else { - x.ImageGCHighThresholdPercent = int32(r.DecodeInt(32)) - } - case "imageGCLowThresholdPercent": - if r.TryDecodeAsNil() { - x.ImageGCLowThresholdPercent = 0 - } else { - x.ImageGCLowThresholdPercent = int32(r.DecodeInt(32)) - } - case "lowDiskSpaceThresholdMB": - if r.TryDecodeAsNil() { - x.LowDiskSpaceThresholdMB = 0 - } else { - x.LowDiskSpaceThresholdMB = int32(r.DecodeInt(32)) - } - case "volumeStatsAggPeriod": - if r.TryDecodeAsNil() { - x.VolumeStatsAggPeriod = pkg1_unversioned.Duration{} - } else { - yyv579 := &x.VolumeStatsAggPeriod - yym580 := z.DecBinary() - _ = yym580 - if false { - } else if z.HasExtensions() && z.DecExt(yyv579) { - } else if !yym580 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv579) - } else { - z.DecFallback(yyv579, false) - } - } - case "networkPluginName": - if r.TryDecodeAsNil() { - x.NetworkPluginName = "" - } else { - x.NetworkPluginName = string(r.DecodeString()) - } - case "networkPluginMTU": - if r.TryDecodeAsNil() { - x.NetworkPluginMTU = 0 - } else { - x.NetworkPluginMTU = int32(r.DecodeInt(32)) - } - case "networkPluginDir": - if r.TryDecodeAsNil() { - x.NetworkPluginDir = "" - } else { - x.NetworkPluginDir = string(r.DecodeString()) - } - case "cniConfDir": - if r.TryDecodeAsNil() { - x.CNIConfDir = "" - } else { - x.CNIConfDir = string(r.DecodeString()) - } - case "cniBinDir": - if r.TryDecodeAsNil() { - x.CNIBinDir = "" - } else { - x.CNIBinDir = string(r.DecodeString()) - } - case "volumePluginDir": - if r.TryDecodeAsNil() { - x.VolumePluginDir = "" - } else { - x.VolumePluginDir = string(r.DecodeString()) - } - case "cloudProvider": - if r.TryDecodeAsNil() { - x.CloudProvider = "" - } else { - x.CloudProvider = string(r.DecodeString()) - } - case "cloudConfigFile": - if r.TryDecodeAsNil() { - x.CloudConfigFile = "" - } else { - x.CloudConfigFile = string(r.DecodeString()) - } - case "kubeletCgroups": - if r.TryDecodeAsNil() { - x.KubeletCgroups = "" - } else { - x.KubeletCgroups = string(r.DecodeString()) - } - case "experimentalCgroupsPerQOS": - if r.TryDecodeAsNil() { - x.ExperimentalCgroupsPerQOS = false - } else { - x.ExperimentalCgroupsPerQOS = bool(r.DecodeBool()) - } - case "cgroupDriver": - if r.TryDecodeAsNil() { - x.CgroupDriver = "" - } else { - x.CgroupDriver = string(r.DecodeString()) - } - case "runtimeCgroups": - if r.TryDecodeAsNil() { - x.RuntimeCgroups = "" - } else { - x.RuntimeCgroups = string(r.DecodeString()) - } - case "systemCgroups": - if r.TryDecodeAsNil() { - x.SystemCgroups = "" - } else { - x.SystemCgroups = string(r.DecodeString()) - } - case "cgroupRoot": - if r.TryDecodeAsNil() { - x.CgroupRoot = "" - } else { - x.CgroupRoot = string(r.DecodeString()) - } - case "containerRuntime": - if r.TryDecodeAsNil() { - x.ContainerRuntime = "" - } else { - x.ContainerRuntime = string(r.DecodeString()) - } - case "remoteRuntimeEndpoint": - if r.TryDecodeAsNil() { - x.RemoteRuntimeEndpoint = "" - } else { - x.RemoteRuntimeEndpoint = string(r.DecodeString()) - } - case "remoteImageEndpoint": - if r.TryDecodeAsNil() { - x.RemoteImageEndpoint = "" - } else { - x.RemoteImageEndpoint = string(r.DecodeString()) - } - case "runtimeRequestTimeout": - if r.TryDecodeAsNil() { - x.RuntimeRequestTimeout = pkg1_unversioned.Duration{} - } else { - yyv598 := &x.RuntimeRequestTimeout - yym599 := z.DecBinary() - _ = yym599 - if false { - } else if z.HasExtensions() && z.DecExt(yyv598) { - } else if !yym599 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv598) - } else { - z.DecFallback(yyv598, false) - } - } - case "rktPath": - if r.TryDecodeAsNil() { - x.RktPath = "" - } else { - x.RktPath = string(r.DecodeString()) - } - case "experimentalMounterPath": - if r.TryDecodeAsNil() { - x.ExperimentalMounterPath = "" - } else { - x.ExperimentalMounterPath = string(r.DecodeString()) - } - case "rktAPIEndpoint": - if r.TryDecodeAsNil() { - x.RktAPIEndpoint = "" - } else { - x.RktAPIEndpoint = string(r.DecodeString()) - } - case "rktStage1Image": - if r.TryDecodeAsNil() { - x.RktStage1Image = "" - } else { - x.RktStage1Image = string(r.DecodeString()) - } - case "lockFilePath": - if r.TryDecodeAsNil() { - x.LockFilePath = "" - } else { - x.LockFilePath = string(r.DecodeString()) - } - case "exitOnLockContention": - if r.TryDecodeAsNil() { - x.ExitOnLockContention = false - } else { - x.ExitOnLockContention = bool(r.DecodeBool()) - } - case "hairpinMode": - if r.TryDecodeAsNil() { - x.HairpinMode = "" - } else { - x.HairpinMode = string(r.DecodeString()) - } - case "babysitDaemons": - if r.TryDecodeAsNil() { - x.BabysitDaemons = false - } else { - x.BabysitDaemons = bool(r.DecodeBool()) - } - case "maxPods": - if r.TryDecodeAsNil() { - x.MaxPods = 0 - } else { - x.MaxPods = int32(r.DecodeInt(32)) - } - case "nvidiaGPUs": - if r.TryDecodeAsNil() { - x.NvidiaGPUs = 0 - } else { - x.NvidiaGPUs = int32(r.DecodeInt(32)) - } - case "dockerExecHandlerName": - if r.TryDecodeAsNil() { - x.DockerExecHandlerName = "" - } else { - x.DockerExecHandlerName = string(r.DecodeString()) - } - case "podCIDR": - if r.TryDecodeAsNil() { - x.PodCIDR = "" - } else { - x.PodCIDR = string(r.DecodeString()) - } - case "resolvConf": - if r.TryDecodeAsNil() { - x.ResolverConfig = "" - } else { - x.ResolverConfig = string(r.DecodeString()) - } - case "cpuCFSQuota": - if r.TryDecodeAsNil() { - x.CPUCFSQuota = false - } else { - x.CPUCFSQuota = bool(r.DecodeBool()) - } - case "containerized": - if r.TryDecodeAsNil() { - x.Containerized = false - } else { - x.Containerized = bool(r.DecodeBool()) - } - case "maxOpenFiles": - if r.TryDecodeAsNil() { - x.MaxOpenFiles = 0 - } else { - x.MaxOpenFiles = int64(r.DecodeInt(64)) - } - case "reconcileCIDR": - if r.TryDecodeAsNil() { - x.ReconcileCIDR = false - } else { - x.ReconcileCIDR = bool(r.DecodeBool()) - } - case "registerSchedulable": - if r.TryDecodeAsNil() { - x.RegisterSchedulable = false - } else { - x.RegisterSchedulable = bool(r.DecodeBool()) - } - case "contentType": - if r.TryDecodeAsNil() { - x.ContentType = "" - } else { - x.ContentType = string(r.DecodeString()) - } - case "kubeAPIQPS": - if r.TryDecodeAsNil() { - x.KubeAPIQPS = 0 - } else { - x.KubeAPIQPS = int32(r.DecodeInt(32)) - } - case "kubeAPIBurst": - if r.TryDecodeAsNil() { - x.KubeAPIBurst = 0 - } else { - x.KubeAPIBurst = int32(r.DecodeInt(32)) - } - case "serializeImagePulls": - if r.TryDecodeAsNil() { - x.SerializeImagePulls = false - } else { - x.SerializeImagePulls = bool(r.DecodeBool()) - } - case "outOfDiskTransitionFrequency": - if r.TryDecodeAsNil() { - x.OutOfDiskTransitionFrequency = pkg1_unversioned.Duration{} - } else { - yyv622 := &x.OutOfDiskTransitionFrequency - yym623 := z.DecBinary() - _ = yym623 - if false { - } else if z.HasExtensions() && z.DecExt(yyv622) { - } else if !yym623 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv622) - } else { - z.DecFallback(yyv622, false) - } - } - case "nodeIP": - if r.TryDecodeAsNil() { - x.NodeIP = "" - } else { - x.NodeIP = string(r.DecodeString()) - } - case "nodeLabels": - if r.TryDecodeAsNil() { - x.NodeLabels = nil - } else { - yyv625 := &x.NodeLabels - yym626 := z.DecBinary() - _ = yym626 - if false { - } else { - z.F.DecMapStringStringX(yyv625, false, d) - } - } - case "nonMasqueradeCIDR": - if r.TryDecodeAsNil() { - x.NonMasqueradeCIDR = "" - } else { - x.NonMasqueradeCIDR = string(r.DecodeString()) - } - case "enableCustomMetrics": - if r.TryDecodeAsNil() { - x.EnableCustomMetrics = false - } else { - x.EnableCustomMetrics = bool(r.DecodeBool()) - } - case "evictionHard": - if r.TryDecodeAsNil() { - x.EvictionHard = "" - } else { - x.EvictionHard = string(r.DecodeString()) - } - case "evictionSoft": - if r.TryDecodeAsNil() { - x.EvictionSoft = "" - } else { - x.EvictionSoft = string(r.DecodeString()) - } - case "evictionSoftGracePeriod": - if r.TryDecodeAsNil() { - x.EvictionSoftGracePeriod = "" - } else { - x.EvictionSoftGracePeriod = string(r.DecodeString()) - } - case "evictionPressureTransitionPeriod": - if r.TryDecodeAsNil() { - x.EvictionPressureTransitionPeriod = pkg1_unversioned.Duration{} - } else { - yyv632 := &x.EvictionPressureTransitionPeriod - yym633 := z.DecBinary() - _ = yym633 - if false { - } else if z.HasExtensions() && z.DecExt(yyv632) { - } else if !yym633 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv632) - } else { - z.DecFallback(yyv632, false) - } - } - case "evictionMaxPodGracePeriod": - if r.TryDecodeAsNil() { - x.EvictionMaxPodGracePeriod = 0 - } else { - x.EvictionMaxPodGracePeriod = int32(r.DecodeInt(32)) - } - case "evictionMinimumReclaim": - if r.TryDecodeAsNil() { - x.EvictionMinimumReclaim = "" - } else { - x.EvictionMinimumReclaim = string(r.DecodeString()) - } - case "experimentalKernelMemcgNotification": - if r.TryDecodeAsNil() { - x.ExperimentalKernelMemcgNotification = false - } else { - x.ExperimentalKernelMemcgNotification = bool(r.DecodeBool()) - } - case "podsPerCore": - if r.TryDecodeAsNil() { - x.PodsPerCore = 0 - } else { - x.PodsPerCore = int32(r.DecodeInt(32)) - } - case "enableControllerAttachDetach": - if r.TryDecodeAsNil() { - x.EnableControllerAttachDetach = false - } else { - x.EnableControllerAttachDetach = bool(r.DecodeBool()) - } - case "systemReserved": - if r.TryDecodeAsNil() { - x.SystemReserved = nil - } else { - yyv639 := &x.SystemReserved - yym640 := z.DecBinary() - _ = yym640 - if false { - } else if z.HasExtensions() && z.DecExt(yyv639) { - } else { - h.decconfig_ConfigurationMap((*pkg2_config.ConfigurationMap)(yyv639), d) - } - } - case "kubeReserved": - if r.TryDecodeAsNil() { - x.KubeReserved = nil - } else { - yyv641 := &x.KubeReserved - yym642 := z.DecBinary() - _ = yym642 - if false { - } else if z.HasExtensions() && z.DecExt(yyv641) { - } else { - h.decconfig_ConfigurationMap((*pkg2_config.ConfigurationMap)(yyv641), d) - } - } - case "protectKernelDefaults": - if r.TryDecodeAsNil() { - x.ProtectKernelDefaults = false - } else { - x.ProtectKernelDefaults = bool(r.DecodeBool()) - } - case "makeIPTablesUtilChains": - if r.TryDecodeAsNil() { - x.MakeIPTablesUtilChains = false - } else { - x.MakeIPTablesUtilChains = bool(r.DecodeBool()) - } - case "iptablesMasqueradeBit": - if r.TryDecodeAsNil() { - x.IPTablesMasqueradeBit = 0 - } else { - x.IPTablesMasqueradeBit = int32(r.DecodeInt(32)) - } - case "iptablesDropBit": - if r.TryDecodeAsNil() { - x.IPTablesDropBit = 0 - } else { - x.IPTablesDropBit = int32(r.DecodeInt(32)) - } - case "experimentalAllowedUnsafeSysctls": - if r.TryDecodeAsNil() { - x.AllowedUnsafeSysctls = nil - } else { - yyv647 := &x.AllowedUnsafeSysctls - yym648 := z.DecBinary() - _ = yym648 - if false { - } else { - z.F.DecSliceStringX(yyv647, false, d) - } - } - case "featureGates": - if r.TryDecodeAsNil() { - x.FeatureGates = "" - } else { - x.FeatureGates = string(r.DecodeString()) - } - case "enableCRI": - if r.TryDecodeAsNil() { - x.EnableCRI = false - } else { - x.EnableCRI = bool(r.DecodeBool()) - } - case "experimentalFailSwapOn": - if r.TryDecodeAsNil() { - x.ExperimentalFailSwapOn = false - } else { - x.ExperimentalFailSwapOn = bool(r.DecodeBool()) - } - case "ExperimentalCheckNodeCapabilitiesBeforeMount": - if r.TryDecodeAsNil() { - x.ExperimentalCheckNodeCapabilitiesBeforeMount = false - } else { - x.ExperimentalCheckNodeCapabilitiesBeforeMount = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys520) - } // end switch yys520 - } // end for yyj520 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj653 int - var yyb653 bool - var yyhl653 bool = l >= 0 - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodManifestPath = "" - } else { - x.PodManifestPath = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SyncFrequency = pkg1_unversioned.Duration{} - } else { - yyv657 := &x.SyncFrequency - yym658 := z.DecBinary() - _ = yym658 - if false { - } else if z.HasExtensions() && z.DecExt(yyv657) { - } else if !yym658 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv657) - } else { - z.DecFallback(yyv657, false) - } - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FileCheckFrequency = pkg1_unversioned.Duration{} - } else { - yyv659 := &x.FileCheckFrequency - yym660 := z.DecBinary() - _ = yym660 - if false { - } else if z.HasExtensions() && z.DecExt(yyv659) { - } else if !yym660 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv659) - } else { - z.DecFallback(yyv659, false) - } - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HTTPCheckFrequency = pkg1_unversioned.Duration{} - } else { - yyv661 := &x.HTTPCheckFrequency - yym662 := z.DecBinary() - _ = yym662 - if false { - } else if z.HasExtensions() && z.DecExt(yyv661) { - } else if !yym662 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv661) - } else { - z.DecFallback(yyv661, false) - } - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ManifestURL = "" - } else { - x.ManifestURL = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ManifestURLHeader = "" - } else { - x.ManifestURLHeader = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnableServer = false - } else { - x.EnableServer = bool(r.DecodeBool()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Address = "" - } else { - x.Address = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnlyPort = 0 - } else { - x.ReadOnlyPort = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TLSCertFile = "" - } else { - x.TLSCertFile = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TLSPrivateKeyFile = "" - } else { - x.TLSPrivateKeyFile = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CertDirectory = "" - } else { - x.CertDirectory = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Authentication = KubeletAuthentication{} - } else { - yyv672 := &x.Authentication - yyv672.CodecDecodeSelf(d) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Authorization = KubeletAuthorization{} - } else { - yyv673 := &x.Authorization - yyv673.CodecDecodeSelf(d) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostnameOverride = "" - } else { - x.HostnameOverride = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodInfraContainerImage = "" - } else { - x.PodInfraContainerImage = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DockerEndpoint = "" - } else { - x.DockerEndpoint = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RootDirectory = "" - } else { - x.RootDirectory = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SeccompProfileRoot = "" - } else { - x.SeccompProfileRoot = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AllowPrivileged = false - } else { - x.AllowPrivileged = bool(r.DecodeBool()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostNetworkSources = nil - } else { - yyv680 := &x.HostNetworkSources - yym681 := z.DecBinary() - _ = yym681 - if false { - } else { - z.F.DecSliceStringX(yyv680, false, d) - } - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostPIDSources = nil - } else { - yyv682 := &x.HostPIDSources - yym683 := z.DecBinary() - _ = yym683 - if false { - } else { - z.F.DecSliceStringX(yyv682, false, d) - } - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostIPCSources = nil - } else { - yyv684 := &x.HostIPCSources - yym685 := z.DecBinary() - _ = yym685 - if false { - } else { - z.F.DecSliceStringX(yyv684, false, d) - } - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RegistryPullQPS = 0 - } else { - x.RegistryPullQPS = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RegistryBurst = 0 - } else { - x.RegistryBurst = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EventRecordQPS = 0 - } else { - x.EventRecordQPS = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EventBurst = 0 - } else { - x.EventBurst = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnableDebuggingHandlers = false - } else { - x.EnableDebuggingHandlers = bool(r.DecodeBool()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinimumGCAge = pkg1_unversioned.Duration{} - } else { - yyv691 := &x.MinimumGCAge - yym692 := z.DecBinary() - _ = yym692 - if false { - } else if z.HasExtensions() && z.DecExt(yyv691) { - } else if !yym692 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv691) - } else { - z.DecFallback(yyv691, false) - } - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxPerPodContainerCount = 0 - } else { - x.MaxPerPodContainerCount = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxContainerCount = 0 - } else { - x.MaxContainerCount = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CAdvisorPort = 0 - } else { - x.CAdvisorPort = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HealthzPort = 0 - } else { - x.HealthzPort = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HealthzBindAddress = "" - } else { - x.HealthzBindAddress = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OOMScoreAdj = 0 - } else { - x.OOMScoreAdj = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RegisterNode = false - } else { - x.RegisterNode = bool(r.DecodeBool()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterDomain = "" - } else { - x.ClusterDomain = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MasterServiceNamespace = "" - } else { - x.MasterServiceNamespace = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterDNS = "" - } else { - x.ClusterDNS = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.StreamingConnectionIdleTimeout = pkg1_unversioned.Duration{} - } else { - yyv703 := &x.StreamingConnectionIdleTimeout - yym704 := z.DecBinary() - _ = yym704 - if false { - } else if z.HasExtensions() && z.DecExt(yyv703) { - } else if !yym704 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv703) - } else { - z.DecFallback(yyv703, false) - } - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeStatusUpdateFrequency = pkg1_unversioned.Duration{} - } else { - yyv705 := &x.NodeStatusUpdateFrequency - yym706 := z.DecBinary() - _ = yym706 - if false { - } else if z.HasExtensions() && z.DecExt(yyv705) { - } else if !yym706 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv705) - } else { - z.DecFallback(yyv705, false) - } - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImageMinimumGCAge = pkg1_unversioned.Duration{} - } else { - yyv707 := &x.ImageMinimumGCAge - yym708 := z.DecBinary() - _ = yym708 - if false { - } else if z.HasExtensions() && z.DecExt(yyv707) { - } else if !yym708 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv707) - } else { - z.DecFallback(yyv707, false) - } - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImageGCHighThresholdPercent = 0 - } else { - x.ImageGCHighThresholdPercent = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImageGCLowThresholdPercent = 0 - } else { - x.ImageGCLowThresholdPercent = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LowDiskSpaceThresholdMB = 0 - } else { - x.LowDiskSpaceThresholdMB = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeStatsAggPeriod = pkg1_unversioned.Duration{} - } else { - yyv712 := &x.VolumeStatsAggPeriod - yym713 := z.DecBinary() - _ = yym713 - if false { - } else if z.HasExtensions() && z.DecExt(yyv712) { - } else if !yym713 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv712) - } else { - z.DecFallback(yyv712, false) - } - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NetworkPluginName = "" - } else { - x.NetworkPluginName = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NetworkPluginMTU = 0 - } else { - x.NetworkPluginMTU = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NetworkPluginDir = "" - } else { - x.NetworkPluginDir = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CNIConfDir = "" - } else { - x.CNIConfDir = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CNIBinDir = "" - } else { - x.CNIBinDir = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumePluginDir = "" - } else { - x.VolumePluginDir = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CloudProvider = "" - } else { - x.CloudProvider = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CloudConfigFile = "" - } else { - x.CloudConfigFile = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeletCgroups = "" - } else { - x.KubeletCgroups = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExperimentalCgroupsPerQOS = false - } else { - x.ExperimentalCgroupsPerQOS = bool(r.DecodeBool()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CgroupDriver = "" - } else { - x.CgroupDriver = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RuntimeCgroups = "" - } else { - x.RuntimeCgroups = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SystemCgroups = "" - } else { - x.SystemCgroups = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CgroupRoot = "" - } else { - x.CgroupRoot = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerRuntime = "" - } else { - x.ContainerRuntime = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RemoteRuntimeEndpoint = "" - } else { - x.RemoteRuntimeEndpoint = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RemoteImageEndpoint = "" - } else { - x.RemoteImageEndpoint = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RuntimeRequestTimeout = pkg1_unversioned.Duration{} - } else { - yyv731 := &x.RuntimeRequestTimeout - yym732 := z.DecBinary() - _ = yym732 - if false { - } else if z.HasExtensions() && z.DecExt(yyv731) { - } else if !yym732 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv731) - } else { - z.DecFallback(yyv731, false) - } - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RktPath = "" - } else { - x.RktPath = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExperimentalMounterPath = "" - } else { - x.ExperimentalMounterPath = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RktAPIEndpoint = "" - } else { - x.RktAPIEndpoint = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RktStage1Image = "" - } else { - x.RktStage1Image = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LockFilePath = "" - } else { - x.LockFilePath = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExitOnLockContention = false - } else { - x.ExitOnLockContention = bool(r.DecodeBool()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HairpinMode = "" - } else { - x.HairpinMode = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.BabysitDaemons = false - } else { - x.BabysitDaemons = bool(r.DecodeBool()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxPods = 0 - } else { - x.MaxPods = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NvidiaGPUs = 0 - } else { - x.NvidiaGPUs = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DockerExecHandlerName = "" - } else { - x.DockerExecHandlerName = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodCIDR = "" - } else { - x.PodCIDR = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResolverConfig = "" - } else { - x.ResolverConfig = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CPUCFSQuota = false - } else { - x.CPUCFSQuota = bool(r.DecodeBool()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Containerized = false - } else { - x.Containerized = bool(r.DecodeBool()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxOpenFiles = 0 - } else { - x.MaxOpenFiles = int64(r.DecodeInt(64)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReconcileCIDR = false - } else { - x.ReconcileCIDR = bool(r.DecodeBool()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RegisterSchedulable = false - } else { - x.RegisterSchedulable = bool(r.DecodeBool()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContentType = "" - } else { - x.ContentType = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeAPIQPS = 0 - } else { - x.KubeAPIQPS = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeAPIBurst = 0 - } else { - x.KubeAPIBurst = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SerializeImagePulls = false - } else { - x.SerializeImagePulls = bool(r.DecodeBool()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OutOfDiskTransitionFrequency = pkg1_unversioned.Duration{} - } else { - yyv755 := &x.OutOfDiskTransitionFrequency - yym756 := z.DecBinary() - _ = yym756 - if false { - } else if z.HasExtensions() && z.DecExt(yyv755) { - } else if !yym756 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv755) - } else { - z.DecFallback(yyv755, false) - } - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeIP = "" - } else { - x.NodeIP = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeLabels = nil - } else { - yyv758 := &x.NodeLabels - yym759 := z.DecBinary() - _ = yym759 - if false { - } else { - z.F.DecMapStringStringX(yyv758, false, d) - } - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NonMasqueradeCIDR = "" - } else { - x.NonMasqueradeCIDR = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnableCustomMetrics = false - } else { - x.EnableCustomMetrics = bool(r.DecodeBool()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EvictionHard = "" - } else { - x.EvictionHard = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EvictionSoft = "" - } else { - x.EvictionSoft = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EvictionSoftGracePeriod = "" - } else { - x.EvictionSoftGracePeriod = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EvictionPressureTransitionPeriod = pkg1_unversioned.Duration{} - } else { - yyv765 := &x.EvictionPressureTransitionPeriod - yym766 := z.DecBinary() - _ = yym766 - if false { - } else if z.HasExtensions() && z.DecExt(yyv765) { - } else if !yym766 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv765) - } else { - z.DecFallback(yyv765, false) - } - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EvictionMaxPodGracePeriod = 0 - } else { - x.EvictionMaxPodGracePeriod = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EvictionMinimumReclaim = "" - } else { - x.EvictionMinimumReclaim = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExperimentalKernelMemcgNotification = false - } else { - x.ExperimentalKernelMemcgNotification = bool(r.DecodeBool()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodsPerCore = 0 - } else { - x.PodsPerCore = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnableControllerAttachDetach = false - } else { - x.EnableControllerAttachDetach = bool(r.DecodeBool()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SystemReserved = nil - } else { - yyv772 := &x.SystemReserved - yym773 := z.DecBinary() - _ = yym773 - if false { - } else if z.HasExtensions() && z.DecExt(yyv772) { - } else { - h.decconfig_ConfigurationMap((*pkg2_config.ConfigurationMap)(yyv772), d) - } - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeReserved = nil - } else { - yyv774 := &x.KubeReserved - yym775 := z.DecBinary() - _ = yym775 - if false { - } else if z.HasExtensions() && z.DecExt(yyv774) { - } else { - h.decconfig_ConfigurationMap((*pkg2_config.ConfigurationMap)(yyv774), d) - } - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ProtectKernelDefaults = false - } else { - x.ProtectKernelDefaults = bool(r.DecodeBool()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MakeIPTablesUtilChains = false - } else { - x.MakeIPTablesUtilChains = bool(r.DecodeBool()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IPTablesMasqueradeBit = 0 - } else { - x.IPTablesMasqueradeBit = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IPTablesDropBit = 0 - } else { - x.IPTablesDropBit = int32(r.DecodeInt(32)) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AllowedUnsafeSysctls = nil - } else { - yyv780 := &x.AllowedUnsafeSysctls - yym781 := z.DecBinary() - _ = yym781 - if false { - } else { - z.F.DecSliceStringX(yyv780, false, d) - } - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FeatureGates = "" - } else { - x.FeatureGates = string(r.DecodeString()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnableCRI = false - } else { - x.EnableCRI = bool(r.DecodeBool()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExperimentalFailSwapOn = false - } else { - x.ExperimentalFailSwapOn = bool(r.DecodeBool()) - } - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExperimentalCheckNodeCapabilitiesBeforeMount = false - } else { - x.ExperimentalCheckNodeCapabilitiesBeforeMount = bool(r.DecodeBool()) - } - for { - yyj653++ - if yyhl653 { - yyb653 = yyj653 > l - } else { - yyb653 = r.CheckBreak() - } - if yyb653 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj653-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x KubeletAuthorizationMode) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym786 := z.EncBinary() - _ = yym786 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *KubeletAuthorizationMode) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym787 := z.DecBinary() - _ = yym787 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *KubeletAuthorization) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym788 := z.EncBinary() - _ = yym788 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep789 := !z.EncBinary() - yy2arr789 := z.EncBasicHandle().StructToArray - var yyq789 [2]bool - _, _, _ = yysep789, yyq789, yy2arr789 - const yyr789 bool = false - var yynn789 int - if yyr789 || yy2arr789 { - r.EncodeArrayStart(2) - } else { - yynn789 = 2 - for _, b := range yyq789 { - if b { - yynn789++ - } - } - r.EncodeMapStart(yynn789) - yynn789 = 0 - } - if yyr789 || yy2arr789 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Mode.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("mode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Mode.CodecEncodeSelf(e) - } - if yyr789 || yy2arr789 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy792 := &x.Webhook - yy792.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("webhook")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy793 := &x.Webhook - yy793.CodecEncodeSelf(e) - } - if yyr789 || yy2arr789 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *KubeletAuthorization) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym794 := z.DecBinary() - _ = yym794 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct795 := r.ContainerType() - if yyct795 == codecSelferValueTypeMap1234 { - yyl795 := r.ReadMapStart() - if yyl795 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl795, d) - } - } else if yyct795 == codecSelferValueTypeArray1234 { - yyl795 := r.ReadArrayStart() - if yyl795 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl795, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *KubeletAuthorization) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys796Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys796Slc - var yyhl796 bool = l >= 0 - for yyj796 := 0; ; yyj796++ { - if yyhl796 { - if yyj796 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys796Slc = r.DecodeBytes(yys796Slc, true, true) - yys796 := string(yys796Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys796 { - case "mode": - if r.TryDecodeAsNil() { - x.Mode = "" - } else { - x.Mode = KubeletAuthorizationMode(r.DecodeString()) - } - case "webhook": - if r.TryDecodeAsNil() { - x.Webhook = KubeletWebhookAuthorization{} - } else { - yyv798 := &x.Webhook - yyv798.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys796) - } // end switch yys796 - } // end for yyj796 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *KubeletAuthorization) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj799 int - var yyb799 bool - var yyhl799 bool = l >= 0 - yyj799++ - if yyhl799 { - yyb799 = yyj799 > l - } else { - yyb799 = r.CheckBreak() - } - if yyb799 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Mode = "" - } else { - x.Mode = KubeletAuthorizationMode(r.DecodeString()) - } - yyj799++ - if yyhl799 { - yyb799 = yyj799 > l - } else { - yyb799 = r.CheckBreak() - } - if yyb799 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Webhook = KubeletWebhookAuthorization{} - } else { - yyv801 := &x.Webhook - yyv801.CodecDecodeSelf(d) - } - for { - yyj799++ - if yyhl799 { - yyb799 = yyj799 > l - } else { - yyb799 = r.CheckBreak() - } - if yyb799 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj799-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *KubeletWebhookAuthorization) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym802 := z.EncBinary() - _ = yym802 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep803 := !z.EncBinary() - yy2arr803 := z.EncBasicHandle().StructToArray - var yyq803 [2]bool - _, _, _ = yysep803, yyq803, yy2arr803 - const yyr803 bool = false - var yynn803 int - if yyr803 || yy2arr803 { - r.EncodeArrayStart(2) - } else { - yynn803 = 2 - for _, b := range yyq803 { - if b { - yynn803++ - } - } - r.EncodeMapStart(yynn803) - yynn803 = 0 - } - if yyr803 || yy2arr803 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy805 := &x.CacheAuthorizedTTL - yym806 := z.EncBinary() - _ = yym806 - if false { - } else if z.HasExtensions() && z.EncExt(yy805) { - } else if !yym806 && z.IsJSONHandle() { - z.EncJSONMarshal(yy805) - } else { - z.EncFallback(yy805) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cacheAuthorizedTTL")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy807 := &x.CacheAuthorizedTTL - yym808 := z.EncBinary() - _ = yym808 - if false { - } else if z.HasExtensions() && z.EncExt(yy807) { - } else if !yym808 && z.IsJSONHandle() { - z.EncJSONMarshal(yy807) - } else { - z.EncFallback(yy807) - } - } - if yyr803 || yy2arr803 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy810 := &x.CacheUnauthorizedTTL - yym811 := z.EncBinary() - _ = yym811 - if false { - } else if z.HasExtensions() && z.EncExt(yy810) { - } else if !yym811 && z.IsJSONHandle() { - z.EncJSONMarshal(yy810) - } else { - z.EncFallback(yy810) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cacheUnauthorizedTTL")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy812 := &x.CacheUnauthorizedTTL - yym813 := z.EncBinary() - _ = yym813 - if false { - } else if z.HasExtensions() && z.EncExt(yy812) { - } else if !yym813 && z.IsJSONHandle() { - z.EncJSONMarshal(yy812) - } else { - z.EncFallback(yy812) - } - } - if yyr803 || yy2arr803 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *KubeletWebhookAuthorization) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym814 := z.DecBinary() - _ = yym814 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct815 := r.ContainerType() - if yyct815 == codecSelferValueTypeMap1234 { - yyl815 := r.ReadMapStart() - if yyl815 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl815, d) - } - } else if yyct815 == codecSelferValueTypeArray1234 { - yyl815 := r.ReadArrayStart() - if yyl815 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl815, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *KubeletWebhookAuthorization) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys816Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys816Slc - var yyhl816 bool = l >= 0 - for yyj816 := 0; ; yyj816++ { - if yyhl816 { - if yyj816 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys816Slc = r.DecodeBytes(yys816Slc, true, true) - yys816 := string(yys816Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys816 { - case "cacheAuthorizedTTL": - if r.TryDecodeAsNil() { - x.CacheAuthorizedTTL = pkg1_unversioned.Duration{} - } else { - yyv817 := &x.CacheAuthorizedTTL - yym818 := z.DecBinary() - _ = yym818 - if false { - } else if z.HasExtensions() && z.DecExt(yyv817) { - } else if !yym818 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv817) - } else { - z.DecFallback(yyv817, false) - } - } - case "cacheUnauthorizedTTL": - if r.TryDecodeAsNil() { - x.CacheUnauthorizedTTL = pkg1_unversioned.Duration{} - } else { - yyv819 := &x.CacheUnauthorizedTTL - yym820 := z.DecBinary() - _ = yym820 - if false { - } else if z.HasExtensions() && z.DecExt(yyv819) { - } else if !yym820 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv819) - } else { - z.DecFallback(yyv819, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys816) - } // end switch yys816 - } // end for yyj816 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *KubeletWebhookAuthorization) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj821 int - var yyb821 bool - var yyhl821 bool = l >= 0 - yyj821++ - if yyhl821 { - yyb821 = yyj821 > l - } else { - yyb821 = r.CheckBreak() - } - if yyb821 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CacheAuthorizedTTL = pkg1_unversioned.Duration{} - } else { - yyv822 := &x.CacheAuthorizedTTL - yym823 := z.DecBinary() - _ = yym823 - if false { - } else if z.HasExtensions() && z.DecExt(yyv822) { - } else if !yym823 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv822) - } else { - z.DecFallback(yyv822, false) - } - } - yyj821++ - if yyhl821 { - yyb821 = yyj821 > l - } else { - yyb821 = r.CheckBreak() - } - if yyb821 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CacheUnauthorizedTTL = pkg1_unversioned.Duration{} - } else { - yyv824 := &x.CacheUnauthorizedTTL - yym825 := z.DecBinary() - _ = yym825 - if false { - } else if z.HasExtensions() && z.DecExt(yyv824) { - } else if !yym825 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv824) - } else { - z.DecFallback(yyv824, false) - } - } - for { - yyj821++ - if yyhl821 { - yyb821 = yyj821 > l - } else { - yyb821 = r.CheckBreak() - } - if yyb821 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj821-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *KubeletAuthentication) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym826 := z.EncBinary() - _ = yym826 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep827 := !z.EncBinary() - yy2arr827 := z.EncBasicHandle().StructToArray - var yyq827 [3]bool - _, _, _ = yysep827, yyq827, yy2arr827 - const yyr827 bool = false - var yynn827 int - if yyr827 || yy2arr827 { - r.EncodeArrayStart(3) - } else { - yynn827 = 3 - for _, b := range yyq827 { - if b { - yynn827++ - } - } - r.EncodeMapStart(yynn827) - yynn827 = 0 - } - if yyr827 || yy2arr827 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy829 := &x.X509 - yy829.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("x509")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy830 := &x.X509 - yy830.CodecEncodeSelf(e) - } - if yyr827 || yy2arr827 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy832 := &x.Webhook - yy832.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("webhook")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy833 := &x.Webhook - yy833.CodecEncodeSelf(e) - } - if yyr827 || yy2arr827 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy835 := &x.Anonymous - yy835.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("anonymous")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy836 := &x.Anonymous - yy836.CodecEncodeSelf(e) - } - if yyr827 || yy2arr827 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *KubeletAuthentication) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym837 := z.DecBinary() - _ = yym837 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct838 := r.ContainerType() - if yyct838 == codecSelferValueTypeMap1234 { - yyl838 := r.ReadMapStart() - if yyl838 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl838, d) - } - } else if yyct838 == codecSelferValueTypeArray1234 { - yyl838 := r.ReadArrayStart() - if yyl838 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl838, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *KubeletAuthentication) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys839Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys839Slc - var yyhl839 bool = l >= 0 - for yyj839 := 0; ; yyj839++ { - if yyhl839 { - if yyj839 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys839Slc = r.DecodeBytes(yys839Slc, true, true) - yys839 := string(yys839Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys839 { - case "x509": - if r.TryDecodeAsNil() { - x.X509 = KubeletX509Authentication{} - } else { - yyv840 := &x.X509 - yyv840.CodecDecodeSelf(d) - } - case "webhook": - if r.TryDecodeAsNil() { - x.Webhook = KubeletWebhookAuthentication{} - } else { - yyv841 := &x.Webhook - yyv841.CodecDecodeSelf(d) - } - case "anonymous": - if r.TryDecodeAsNil() { - x.Anonymous = KubeletAnonymousAuthentication{} - } else { - yyv842 := &x.Anonymous - yyv842.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys839) - } // end switch yys839 - } // end for yyj839 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *KubeletAuthentication) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj843 int - var yyb843 bool - var yyhl843 bool = l >= 0 - yyj843++ - if yyhl843 { - yyb843 = yyj843 > l - } else { - yyb843 = r.CheckBreak() - } - if yyb843 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.X509 = KubeletX509Authentication{} - } else { - yyv844 := &x.X509 - yyv844.CodecDecodeSelf(d) - } - yyj843++ - if yyhl843 { - yyb843 = yyj843 > l - } else { - yyb843 = r.CheckBreak() - } - if yyb843 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Webhook = KubeletWebhookAuthentication{} - } else { - yyv845 := &x.Webhook - yyv845.CodecDecodeSelf(d) - } - yyj843++ - if yyhl843 { - yyb843 = yyj843 > l - } else { - yyb843 = r.CheckBreak() - } - if yyb843 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Anonymous = KubeletAnonymousAuthentication{} - } else { - yyv846 := &x.Anonymous - yyv846.CodecDecodeSelf(d) - } - for { - yyj843++ - if yyhl843 { - yyb843 = yyj843 > l - } else { - yyb843 = r.CheckBreak() - } - if yyb843 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj843-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *KubeletX509Authentication) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym847 := z.EncBinary() - _ = yym847 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep848 := !z.EncBinary() - yy2arr848 := z.EncBasicHandle().StructToArray - var yyq848 [1]bool - _, _, _ = yysep848, yyq848, yy2arr848 - const yyr848 bool = false - var yynn848 int - if yyr848 || yy2arr848 { - r.EncodeArrayStart(1) - } else { - yynn848 = 1 - for _, b := range yyq848 { - if b { - yynn848++ - } - } - r.EncodeMapStart(yynn848) - yynn848 = 0 - } - if yyr848 || yy2arr848 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym850 := z.EncBinary() - _ = yym850 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClientCAFile)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clientCAFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym851 := z.EncBinary() - _ = yym851 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClientCAFile)) - } - } - if yyr848 || yy2arr848 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *KubeletX509Authentication) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym852 := z.DecBinary() - _ = yym852 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct853 := r.ContainerType() - if yyct853 == codecSelferValueTypeMap1234 { - yyl853 := r.ReadMapStart() - if yyl853 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl853, d) - } - } else if yyct853 == codecSelferValueTypeArray1234 { - yyl853 := r.ReadArrayStart() - if yyl853 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl853, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *KubeletX509Authentication) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys854Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys854Slc - var yyhl854 bool = l >= 0 - for yyj854 := 0; ; yyj854++ { - if yyhl854 { - if yyj854 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys854Slc = r.DecodeBytes(yys854Slc, true, true) - yys854 := string(yys854Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys854 { - case "clientCAFile": - if r.TryDecodeAsNil() { - x.ClientCAFile = "" - } else { - x.ClientCAFile = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys854) - } // end switch yys854 - } // end for yyj854 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *KubeletX509Authentication) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj856 int - var yyb856 bool - var yyhl856 bool = l >= 0 - yyj856++ - if yyhl856 { - yyb856 = yyj856 > l - } else { - yyb856 = r.CheckBreak() - } - if yyb856 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClientCAFile = "" - } else { - x.ClientCAFile = string(r.DecodeString()) - } - for { - yyj856++ - if yyhl856 { - yyb856 = yyj856 > l - } else { - yyb856 = r.CheckBreak() - } - if yyb856 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj856-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *KubeletWebhookAuthentication) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym858 := z.EncBinary() - _ = yym858 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep859 := !z.EncBinary() - yy2arr859 := z.EncBasicHandle().StructToArray - var yyq859 [2]bool - _, _, _ = yysep859, yyq859, yy2arr859 - const yyr859 bool = false - var yynn859 int - if yyr859 || yy2arr859 { - r.EncodeArrayStart(2) - } else { - yynn859 = 2 - for _, b := range yyq859 { - if b { - yynn859++ - } - } - r.EncodeMapStart(yynn859) - yynn859 = 0 - } - if yyr859 || yy2arr859 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym861 := z.EncBinary() - _ = yym861 - if false { - } else { - r.EncodeBool(bool(x.Enabled)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enabled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym862 := z.EncBinary() - _ = yym862 - if false { - } else { - r.EncodeBool(bool(x.Enabled)) - } - } - if yyr859 || yy2arr859 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy864 := &x.CacheTTL - yym865 := z.EncBinary() - _ = yym865 - if false { - } else if z.HasExtensions() && z.EncExt(yy864) { - } else if !yym865 && z.IsJSONHandle() { - z.EncJSONMarshal(yy864) - } else { - z.EncFallback(yy864) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cacheTTL")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy866 := &x.CacheTTL - yym867 := z.EncBinary() - _ = yym867 - if false { - } else if z.HasExtensions() && z.EncExt(yy866) { - } else if !yym867 && z.IsJSONHandle() { - z.EncJSONMarshal(yy866) - } else { - z.EncFallback(yy866) - } - } - if yyr859 || yy2arr859 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *KubeletWebhookAuthentication) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym868 := z.DecBinary() - _ = yym868 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct869 := r.ContainerType() - if yyct869 == codecSelferValueTypeMap1234 { - yyl869 := r.ReadMapStart() - if yyl869 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl869, d) - } - } else if yyct869 == codecSelferValueTypeArray1234 { - yyl869 := r.ReadArrayStart() - if yyl869 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl869, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *KubeletWebhookAuthentication) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys870Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys870Slc - var yyhl870 bool = l >= 0 - for yyj870 := 0; ; yyj870++ { - if yyhl870 { - if yyj870 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys870Slc = r.DecodeBytes(yys870Slc, true, true) - yys870 := string(yys870Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys870 { - case "enabled": - if r.TryDecodeAsNil() { - x.Enabled = false - } else { - x.Enabled = bool(r.DecodeBool()) - } - case "cacheTTL": - if r.TryDecodeAsNil() { - x.CacheTTL = pkg1_unversioned.Duration{} - } else { - yyv872 := &x.CacheTTL - yym873 := z.DecBinary() - _ = yym873 - if false { - } else if z.HasExtensions() && z.DecExt(yyv872) { - } else if !yym873 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv872) - } else { - z.DecFallback(yyv872, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys870) - } // end switch yys870 - } // end for yyj870 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *KubeletWebhookAuthentication) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj874 int - var yyb874 bool - var yyhl874 bool = l >= 0 - yyj874++ - if yyhl874 { - yyb874 = yyj874 > l - } else { - yyb874 = r.CheckBreak() - } - if yyb874 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Enabled = false - } else { - x.Enabled = bool(r.DecodeBool()) - } - yyj874++ - if yyhl874 { - yyb874 = yyj874 > l - } else { - yyb874 = r.CheckBreak() - } - if yyb874 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CacheTTL = pkg1_unversioned.Duration{} - } else { - yyv876 := &x.CacheTTL - yym877 := z.DecBinary() - _ = yym877 - if false { - } else if z.HasExtensions() && z.DecExt(yyv876) { - } else if !yym877 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv876) - } else { - z.DecFallback(yyv876, false) - } - } - for { - yyj874++ - if yyhl874 { - yyb874 = yyj874 > l - } else { - yyb874 = r.CheckBreak() - } - if yyb874 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj874-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *KubeletAnonymousAuthentication) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym878 := z.EncBinary() - _ = yym878 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep879 := !z.EncBinary() - yy2arr879 := z.EncBasicHandle().StructToArray - var yyq879 [1]bool - _, _, _ = yysep879, yyq879, yy2arr879 - const yyr879 bool = false - var yynn879 int - if yyr879 || yy2arr879 { - r.EncodeArrayStart(1) - } else { - yynn879 = 1 - for _, b := range yyq879 { - if b { - yynn879++ - } - } - r.EncodeMapStart(yynn879) - yynn879 = 0 - } - if yyr879 || yy2arr879 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym881 := z.EncBinary() - _ = yym881 - if false { - } else { - r.EncodeBool(bool(x.Enabled)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enabled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym882 := z.EncBinary() - _ = yym882 - if false { - } else { - r.EncodeBool(bool(x.Enabled)) - } - } - if yyr879 || yy2arr879 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *KubeletAnonymousAuthentication) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym883 := z.DecBinary() - _ = yym883 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct884 := r.ContainerType() - if yyct884 == codecSelferValueTypeMap1234 { - yyl884 := r.ReadMapStart() - if yyl884 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl884, d) - } - } else if yyct884 == codecSelferValueTypeArray1234 { - yyl884 := r.ReadArrayStart() - if yyl884 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl884, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *KubeletAnonymousAuthentication) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys885Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys885Slc - var yyhl885 bool = l >= 0 - for yyj885 := 0; ; yyj885++ { - if yyhl885 { - if yyj885 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys885Slc = r.DecodeBytes(yys885Slc, true, true) - yys885 := string(yys885Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys885 { - case "enabled": - if r.TryDecodeAsNil() { - x.Enabled = false - } else { - x.Enabled = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys885) - } // end switch yys885 - } // end for yyj885 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *KubeletAnonymousAuthentication) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj887 int - var yyb887 bool - var yyhl887 bool = l >= 0 - yyj887++ - if yyhl887 { - yyb887 = yyj887 > l - } else { - yyb887 = r.CheckBreak() - } - if yyb887 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Enabled = false - } else { - x.Enabled = bool(r.DecodeBool()) - } - for { - yyj887++ - if yyhl887 { - yyb887 = yyj887 > l - } else { - yyb887 = r.CheckBreak() - } - if yyb887 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj887-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym889 := z.EncBinary() - _ = yym889 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep890 := !z.EncBinary() - yy2arr890 := z.EncBasicHandle().StructToArray - var yyq890 [14]bool - _, _, _ = yysep890, yyq890, yy2arr890 - const yyr890 bool = false - yyq890[0] = x.Kind != "" - yyq890[1] = x.APIVersion != "" - var yynn890 int - if yyr890 || yy2arr890 { - r.EncodeArrayStart(14) - } else { - yynn890 = 12 - for _, b := range yyq890 { - if b { - yynn890++ - } - } - r.EncodeMapStart(yynn890) - yynn890 = 0 - } - if yyr890 || yy2arr890 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq890[0] { - yym892 := z.EncBinary() - _ = yym892 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq890[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym893 := z.EncBinary() - _ = yym893 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr890 || yy2arr890 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq890[1] { - yym895 := z.EncBinary() - _ = yym895 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq890[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym896 := z.EncBinary() - _ = yym896 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr890 || yy2arr890 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym898 := z.EncBinary() - _ = yym898 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym899 := z.EncBinary() - _ = yym899 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } - if yyr890 || yy2arr890 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym901 := z.EncBinary() - _ = yym901 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("address")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym902 := z.EncBinary() - _ = yym902 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } - if yyr890 || yy2arr890 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym904 := z.EncBinary() - _ = yym904 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.AlgorithmProvider)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("algorithmProvider")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym905 := z.EncBinary() - _ = yym905 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.AlgorithmProvider)) - } - } - if yyr890 || yy2arr890 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym907 := z.EncBinary() - _ = yym907 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PolicyConfigFile)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("policyConfigFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym908 := z.EncBinary() - _ = yym908 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PolicyConfigFile)) - } - } - if yyr890 || yy2arr890 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym910 := z.EncBinary() - _ = yym910 - if false { - } else { - r.EncodeBool(bool(x.EnableProfiling)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableProfiling")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym911 := z.EncBinary() - _ = yym911 - if false { - } else { - r.EncodeBool(bool(x.EnableProfiling)) - } - } - if yyr890 || yy2arr890 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym913 := z.EncBinary() - _ = yym913 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("contentType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym914 := z.EncBinary() - _ = yym914 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) - } - } - if yyr890 || yy2arr890 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym916 := z.EncBinary() - _ = yym916 - if false { - } else { - r.EncodeFloat32(float32(x.KubeAPIQPS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeAPIQPS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym917 := z.EncBinary() - _ = yym917 - if false { - } else { - r.EncodeFloat32(float32(x.KubeAPIQPS)) - } - } - if yyr890 || yy2arr890 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym919 := z.EncBinary() - _ = yym919 - if false { - } else { - r.EncodeInt(int64(x.KubeAPIBurst)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeAPIBurst")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym920 := z.EncBinary() - _ = yym920 - if false { - } else { - r.EncodeInt(int64(x.KubeAPIBurst)) - } - } - if yyr890 || yy2arr890 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym922 := z.EncBinary() - _ = yym922 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("schedulerName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym923 := z.EncBinary() - _ = yym923 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) - } - } - if yyr890 || yy2arr890 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym925 := z.EncBinary() - _ = yym925 - if false { - } else { - r.EncodeInt(int64(x.HardPodAffinitySymmetricWeight)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hardPodAffinitySymmetricWeight")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym926 := z.EncBinary() - _ = yym926 - if false { - } else { - r.EncodeInt(int64(x.HardPodAffinitySymmetricWeight)) - } - } - if yyr890 || yy2arr890 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym928 := z.EncBinary() - _ = yym928 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FailureDomains)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("failureDomains")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym929 := z.EncBinary() - _ = yym929 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FailureDomains)) - } - } - if yyr890 || yy2arr890 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy931 := &x.LeaderElection - yy931.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("leaderElection")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy932 := &x.LeaderElection - yy932.CodecEncodeSelf(e) - } - if yyr890 || yy2arr890 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *KubeSchedulerConfiguration) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym933 := z.DecBinary() - _ = yym933 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct934 := r.ContainerType() - if yyct934 == codecSelferValueTypeMap1234 { - yyl934 := r.ReadMapStart() - if yyl934 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl934, d) - } - } else if yyct934 == codecSelferValueTypeArray1234 { - yyl934 := r.ReadArrayStart() - if yyl934 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl934, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *KubeSchedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys935Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys935Slc - var yyhl935 bool = l >= 0 - for yyj935 := 0; ; yyj935++ { - if yyhl935 { - if yyj935 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys935Slc = r.DecodeBytes(yys935Slc, true, true) - yys935 := string(yys935Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys935 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "port": - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - case "address": - if r.TryDecodeAsNil() { - x.Address = "" - } else { - x.Address = string(r.DecodeString()) - } - case "algorithmProvider": - if r.TryDecodeAsNil() { - x.AlgorithmProvider = "" - } else { - x.AlgorithmProvider = string(r.DecodeString()) - } - case "policyConfigFile": - if r.TryDecodeAsNil() { - x.PolicyConfigFile = "" - } else { - x.PolicyConfigFile = string(r.DecodeString()) - } - case "enableProfiling": - if r.TryDecodeAsNil() { - x.EnableProfiling = false - } else { - x.EnableProfiling = bool(r.DecodeBool()) - } - case "contentType": - if r.TryDecodeAsNil() { - x.ContentType = "" - } else { - x.ContentType = string(r.DecodeString()) - } - case "kubeAPIQPS": - if r.TryDecodeAsNil() { - x.KubeAPIQPS = 0 - } else { - x.KubeAPIQPS = float32(r.DecodeFloat(true)) - } - case "kubeAPIBurst": - if r.TryDecodeAsNil() { - x.KubeAPIBurst = 0 - } else { - x.KubeAPIBurst = int32(r.DecodeInt(32)) - } - case "schedulerName": - if r.TryDecodeAsNil() { - x.SchedulerName = "" - } else { - x.SchedulerName = string(r.DecodeString()) - } - case "hardPodAffinitySymmetricWeight": - if r.TryDecodeAsNil() { - x.HardPodAffinitySymmetricWeight = 0 - } else { - x.HardPodAffinitySymmetricWeight = int(r.DecodeInt(codecSelferBitsize1234)) - } - case "failureDomains": - if r.TryDecodeAsNil() { - x.FailureDomains = "" - } else { - x.FailureDomains = string(r.DecodeString()) - } - case "leaderElection": - if r.TryDecodeAsNil() { - x.LeaderElection = LeaderElectionConfiguration{} - } else { - yyv949 := &x.LeaderElection - yyv949.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys935) - } // end switch yys935 - } // end for yyj935 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj950 int - var yyb950 bool - var yyhl950 bool = l >= 0 - yyj950++ - if yyhl950 { - yyb950 = yyj950 > l - } else { - yyb950 = r.CheckBreak() - } - if yyb950 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj950++ - if yyhl950 { - yyb950 = yyj950 > l - } else { - yyb950 = r.CheckBreak() - } - if yyb950 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj950++ - if yyhl950 { - yyb950 = yyj950 > l - } else { - yyb950 = r.CheckBreak() - } - if yyb950 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - yyj950++ - if yyhl950 { - yyb950 = yyj950 > l - } else { - yyb950 = r.CheckBreak() - } - if yyb950 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Address = "" - } else { - x.Address = string(r.DecodeString()) - } - yyj950++ - if yyhl950 { - yyb950 = yyj950 > l - } else { - yyb950 = r.CheckBreak() - } - if yyb950 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AlgorithmProvider = "" - } else { - x.AlgorithmProvider = string(r.DecodeString()) - } - yyj950++ - if yyhl950 { - yyb950 = yyj950 > l - } else { - yyb950 = r.CheckBreak() - } - if yyb950 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PolicyConfigFile = "" - } else { - x.PolicyConfigFile = string(r.DecodeString()) - } - yyj950++ - if yyhl950 { - yyb950 = yyj950 > l - } else { - yyb950 = r.CheckBreak() - } - if yyb950 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnableProfiling = false - } else { - x.EnableProfiling = bool(r.DecodeBool()) - } - yyj950++ - if yyhl950 { - yyb950 = yyj950 > l - } else { - yyb950 = r.CheckBreak() - } - if yyb950 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContentType = "" - } else { - x.ContentType = string(r.DecodeString()) - } - yyj950++ - if yyhl950 { - yyb950 = yyj950 > l - } else { - yyb950 = r.CheckBreak() - } - if yyb950 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeAPIQPS = 0 - } else { - x.KubeAPIQPS = float32(r.DecodeFloat(true)) - } - yyj950++ - if yyhl950 { - yyb950 = yyj950 > l - } else { - yyb950 = r.CheckBreak() - } - if yyb950 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeAPIBurst = 0 - } else { - x.KubeAPIBurst = int32(r.DecodeInt(32)) - } - yyj950++ - if yyhl950 { - yyb950 = yyj950 > l - } else { - yyb950 = r.CheckBreak() - } - if yyb950 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SchedulerName = "" - } else { - x.SchedulerName = string(r.DecodeString()) - } - yyj950++ - if yyhl950 { - yyb950 = yyj950 > l - } else { - yyb950 = r.CheckBreak() - } - if yyb950 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HardPodAffinitySymmetricWeight = 0 - } else { - x.HardPodAffinitySymmetricWeight = int(r.DecodeInt(codecSelferBitsize1234)) - } - yyj950++ - if yyhl950 { - yyb950 = yyj950 > l - } else { - yyb950 = r.CheckBreak() - } - if yyb950 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FailureDomains = "" - } else { - x.FailureDomains = string(r.DecodeString()) - } - yyj950++ - if yyhl950 { - yyb950 = yyj950 > l - } else { - yyb950 = r.CheckBreak() - } - if yyb950 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LeaderElection = LeaderElectionConfiguration{} - } else { - yyv964 := &x.LeaderElection - yyv964.CodecDecodeSelf(d) - } - for { - yyj950++ - if yyhl950 { - yyb950 = yyj950 > l - } else { - yyb950 = r.CheckBreak() - } - if yyb950 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj950-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LeaderElectionConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym965 := z.EncBinary() - _ = yym965 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep966 := !z.EncBinary() - yy2arr966 := z.EncBasicHandle().StructToArray - var yyq966 [4]bool - _, _, _ = yysep966, yyq966, yy2arr966 - const yyr966 bool = false - var yynn966 int - if yyr966 || yy2arr966 { - r.EncodeArrayStart(4) - } else { - yynn966 = 4 - for _, b := range yyq966 { - if b { - yynn966++ - } - } - r.EncodeMapStart(yynn966) - yynn966 = 0 - } - if yyr966 || yy2arr966 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym968 := z.EncBinary() - _ = yym968 - if false { - } else { - r.EncodeBool(bool(x.LeaderElect)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("leaderElect")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym969 := z.EncBinary() - _ = yym969 - if false { - } else { - r.EncodeBool(bool(x.LeaderElect)) - } - } - if yyr966 || yy2arr966 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy971 := &x.LeaseDuration - yym972 := z.EncBinary() - _ = yym972 - if false { - } else if z.HasExtensions() && z.EncExt(yy971) { - } else if !yym972 && z.IsJSONHandle() { - z.EncJSONMarshal(yy971) - } else { - z.EncFallback(yy971) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("leaseDuration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy973 := &x.LeaseDuration - yym974 := z.EncBinary() - _ = yym974 - if false { - } else if z.HasExtensions() && z.EncExt(yy973) { - } else if !yym974 && z.IsJSONHandle() { - z.EncJSONMarshal(yy973) - } else { - z.EncFallback(yy973) - } - } - if yyr966 || yy2arr966 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy976 := &x.RenewDeadline - yym977 := z.EncBinary() - _ = yym977 - if false { - } else if z.HasExtensions() && z.EncExt(yy976) { - } else if !yym977 && z.IsJSONHandle() { - z.EncJSONMarshal(yy976) - } else { - z.EncFallback(yy976) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("renewDeadline")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy978 := &x.RenewDeadline - yym979 := z.EncBinary() - _ = yym979 - if false { - } else if z.HasExtensions() && z.EncExt(yy978) { - } else if !yym979 && z.IsJSONHandle() { - z.EncJSONMarshal(yy978) - } else { - z.EncFallback(yy978) - } - } - if yyr966 || yy2arr966 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy981 := &x.RetryPeriod - yym982 := z.EncBinary() - _ = yym982 - if false { - } else if z.HasExtensions() && z.EncExt(yy981) { - } else if !yym982 && z.IsJSONHandle() { - z.EncJSONMarshal(yy981) - } else { - z.EncFallback(yy981) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("retryPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy983 := &x.RetryPeriod - yym984 := z.EncBinary() - _ = yym984 - if false { - } else if z.HasExtensions() && z.EncExt(yy983) { - } else if !yym984 && z.IsJSONHandle() { - z.EncJSONMarshal(yy983) - } else { - z.EncFallback(yy983) - } - } - if yyr966 || yy2arr966 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LeaderElectionConfiguration) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym985 := z.DecBinary() - _ = yym985 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct986 := r.ContainerType() - if yyct986 == codecSelferValueTypeMap1234 { - yyl986 := r.ReadMapStart() - if yyl986 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl986, d) - } - } else if yyct986 == codecSelferValueTypeArray1234 { - yyl986 := r.ReadArrayStart() - if yyl986 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl986, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LeaderElectionConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys987Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys987Slc - var yyhl987 bool = l >= 0 - for yyj987 := 0; ; yyj987++ { - if yyhl987 { - if yyj987 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys987Slc = r.DecodeBytes(yys987Slc, true, true) - yys987 := string(yys987Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys987 { - case "leaderElect": - if r.TryDecodeAsNil() { - x.LeaderElect = false - } else { - x.LeaderElect = bool(r.DecodeBool()) - } - case "leaseDuration": - if r.TryDecodeAsNil() { - x.LeaseDuration = pkg1_unversioned.Duration{} - } else { - yyv989 := &x.LeaseDuration - yym990 := z.DecBinary() - _ = yym990 - if false { - } else if z.HasExtensions() && z.DecExt(yyv989) { - } else if !yym990 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv989) - } else { - z.DecFallback(yyv989, false) - } - } - case "renewDeadline": - if r.TryDecodeAsNil() { - x.RenewDeadline = pkg1_unversioned.Duration{} - } else { - yyv991 := &x.RenewDeadline - yym992 := z.DecBinary() - _ = yym992 - if false { - } else if z.HasExtensions() && z.DecExt(yyv991) { - } else if !yym992 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv991) - } else { - z.DecFallback(yyv991, false) - } - } - case "retryPeriod": - if r.TryDecodeAsNil() { - x.RetryPeriod = pkg1_unversioned.Duration{} - } else { - yyv993 := &x.RetryPeriod - yym994 := z.DecBinary() - _ = yym994 - if false { - } else if z.HasExtensions() && z.DecExt(yyv993) { - } else if !yym994 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv993) - } else { - z.DecFallback(yyv993, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys987) - } // end switch yys987 - } // end for yyj987 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LeaderElectionConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj995 int - var yyb995 bool - var yyhl995 bool = l >= 0 - yyj995++ - if yyhl995 { - yyb995 = yyj995 > l - } else { - yyb995 = r.CheckBreak() - } - if yyb995 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LeaderElect = false - } else { - x.LeaderElect = bool(r.DecodeBool()) - } - yyj995++ - if yyhl995 { - yyb995 = yyj995 > l - } else { - yyb995 = r.CheckBreak() - } - if yyb995 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LeaseDuration = pkg1_unversioned.Duration{} - } else { - yyv997 := &x.LeaseDuration - yym998 := z.DecBinary() - _ = yym998 - if false { - } else if z.HasExtensions() && z.DecExt(yyv997) { - } else if !yym998 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv997) - } else { - z.DecFallback(yyv997, false) - } - } - yyj995++ - if yyhl995 { - yyb995 = yyj995 > l - } else { - yyb995 = r.CheckBreak() - } - if yyb995 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RenewDeadline = pkg1_unversioned.Duration{} - } else { - yyv999 := &x.RenewDeadline - yym1000 := z.DecBinary() - _ = yym1000 - if false { - } else if z.HasExtensions() && z.DecExt(yyv999) { - } else if !yym1000 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv999) - } else { - z.DecFallback(yyv999, false) - } - } - yyj995++ - if yyhl995 { - yyb995 = yyj995 > l - } else { - yyb995 = r.CheckBreak() - } - if yyb995 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RetryPeriod = pkg1_unversioned.Duration{} - } else { - yyv1001 := &x.RetryPeriod - yym1002 := z.DecBinary() - _ = yym1002 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1001) { - } else if !yym1002 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1001) - } else { - z.DecFallback(yyv1001, false) - } - } - for { - yyj995++ - if yyhl995 { - yyb995 = yyj995 > l - } else { - yyb995 = r.CheckBreak() - } - if yyb995 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj995-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1003 := z.EncBinary() - _ = yym1003 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1004 := !z.EncBinary() - yy2arr1004 := z.EncBasicHandle().StructToArray - var yyq1004 [61]bool - _, _, _ = yysep1004, yyq1004, yy2arr1004 - const yyr1004 bool = false - yyq1004[0] = x.Kind != "" - yyq1004[1] = x.APIVersion != "" - var yynn1004 int - if yyr1004 || yy2arr1004 { - r.EncodeArrayStart(61) - } else { - yynn1004 = 59 - for _, b := range yyq1004 { - if b { - yynn1004++ - } - } - r.EncodeMapStart(yynn1004) - yynn1004 = 0 - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1004[0] { - yym1006 := z.EncBinary() - _ = yym1006 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1004[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1007 := z.EncBinary() - _ = yym1007 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1004[1] { - yym1009 := z.EncBinary() - _ = yym1009 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1004[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1010 := z.EncBinary() - _ = yym1010 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1012 := z.EncBinary() - _ = yym1012 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1013 := z.EncBinary() - _ = yym1013 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1015 := z.EncBinary() - _ = yym1015 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("address")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1016 := z.EncBinary() - _ = yym1016 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1018 := z.EncBinary() - _ = yym1018 - if false { - } else { - r.EncodeBool(bool(x.UseServiceAccountCredentials)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("useServiceAccountCredentials")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1019 := z.EncBinary() - _ = yym1019 - if false { - } else { - r.EncodeBool(bool(x.UseServiceAccountCredentials)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1021 := z.EncBinary() - _ = yym1021 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CloudProvider)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cloudProvider")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1022 := z.EncBinary() - _ = yym1022 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CloudProvider)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1024 := z.EncBinary() - _ = yym1024 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CloudConfigFile)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cloudConfigFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1025 := z.EncBinary() - _ = yym1025 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CloudConfigFile)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1027 := z.EncBinary() - _ = yym1027 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentEndpointSyncs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrentEndpointSyncs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1028 := z.EncBinary() - _ = yym1028 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentEndpointSyncs)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1030 := z.EncBinary() - _ = yym1030 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentRSSyncs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrentRSSyncs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1031 := z.EncBinary() - _ = yym1031 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentRSSyncs)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1033 := z.EncBinary() - _ = yym1033 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentRCSyncs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrentRCSyncs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1034 := z.EncBinary() - _ = yym1034 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentRCSyncs)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1036 := z.EncBinary() - _ = yym1036 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentServiceSyncs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrentServiceSyncs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1037 := z.EncBinary() - _ = yym1037 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentServiceSyncs)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1039 := z.EncBinary() - _ = yym1039 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentResourceQuotaSyncs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrentResourceQuotaSyncs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1040 := z.EncBinary() - _ = yym1040 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentResourceQuotaSyncs)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1042 := z.EncBinary() - _ = yym1042 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentDeploymentSyncs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrentDeploymentSyncs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1043 := z.EncBinary() - _ = yym1043 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentDeploymentSyncs)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1045 := z.EncBinary() - _ = yym1045 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentDaemonSetSyncs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrentDaemonSetSyncs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1046 := z.EncBinary() - _ = yym1046 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentDaemonSetSyncs)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1048 := z.EncBinary() - _ = yym1048 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentJobSyncs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrentJobSyncs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1049 := z.EncBinary() - _ = yym1049 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentJobSyncs)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1051 := z.EncBinary() - _ = yym1051 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentNamespaceSyncs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrentNamespaceSyncs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1052 := z.EncBinary() - _ = yym1052 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentNamespaceSyncs)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1054 := z.EncBinary() - _ = yym1054 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentSATokenSyncs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrentSATokenSyncs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1055 := z.EncBinary() - _ = yym1055 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentSATokenSyncs)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1057 := z.EncBinary() - _ = yym1057 - if false { - } else { - r.EncodeInt(int64(x.LookupCacheSizeForRC)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lookupCacheSizeForRC")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1058 := z.EncBinary() - _ = yym1058 - if false { - } else { - r.EncodeInt(int64(x.LookupCacheSizeForRC)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1060 := z.EncBinary() - _ = yym1060 - if false { - } else { - r.EncodeInt(int64(x.LookupCacheSizeForRS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lookupCacheSizeForRS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1061 := z.EncBinary() - _ = yym1061 - if false { - } else { - r.EncodeInt(int64(x.LookupCacheSizeForRS)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1063 := z.EncBinary() - _ = yym1063 - if false { - } else { - r.EncodeInt(int64(x.LookupCacheSizeForDaemonSet)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lookupCacheSizeForDaemonSet")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1064 := z.EncBinary() - _ = yym1064 - if false { - } else { - r.EncodeInt(int64(x.LookupCacheSizeForDaemonSet)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1066 := &x.ServiceSyncPeriod - yym1067 := z.EncBinary() - _ = yym1067 - if false { - } else if z.HasExtensions() && z.EncExt(yy1066) { - } else if !yym1067 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1066) - } else { - z.EncFallback(yy1066) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceSyncPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1068 := &x.ServiceSyncPeriod - yym1069 := z.EncBinary() - _ = yym1069 - if false { - } else if z.HasExtensions() && z.EncExt(yy1068) { - } else if !yym1069 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1068) - } else { - z.EncFallback(yy1068) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1071 := &x.NodeSyncPeriod - yym1072 := z.EncBinary() - _ = yym1072 - if false { - } else if z.HasExtensions() && z.EncExt(yy1071) { - } else if !yym1072 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1071) - } else { - z.EncFallback(yy1071) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeSyncPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1073 := &x.NodeSyncPeriod - yym1074 := z.EncBinary() - _ = yym1074 - if false { - } else if z.HasExtensions() && z.EncExt(yy1073) { - } else if !yym1074 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1073) - } else { - z.EncFallback(yy1073) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1076 := &x.RouteReconciliationPeriod - yym1077 := z.EncBinary() - _ = yym1077 - if false { - } else if z.HasExtensions() && z.EncExt(yy1076) { - } else if !yym1077 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1076) - } else { - z.EncFallback(yy1076) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("routeReconciliationPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1078 := &x.RouteReconciliationPeriod - yym1079 := z.EncBinary() - _ = yym1079 - if false { - } else if z.HasExtensions() && z.EncExt(yy1078) { - } else if !yym1079 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1078) - } else { - z.EncFallback(yy1078) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1081 := &x.ResourceQuotaSyncPeriod - yym1082 := z.EncBinary() - _ = yym1082 - if false { - } else if z.HasExtensions() && z.EncExt(yy1081) { - } else if !yym1082 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1081) - } else { - z.EncFallback(yy1081) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceQuotaSyncPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1083 := &x.ResourceQuotaSyncPeriod - yym1084 := z.EncBinary() - _ = yym1084 - if false { - } else if z.HasExtensions() && z.EncExt(yy1083) { - } else if !yym1084 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1083) - } else { - z.EncFallback(yy1083) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1086 := &x.NamespaceSyncPeriod - yym1087 := z.EncBinary() - _ = yym1087 - if false { - } else if z.HasExtensions() && z.EncExt(yy1086) { - } else if !yym1087 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1086) - } else { - z.EncFallback(yy1086) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespaceSyncPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1088 := &x.NamespaceSyncPeriod - yym1089 := z.EncBinary() - _ = yym1089 - if false { - } else if z.HasExtensions() && z.EncExt(yy1088) { - } else if !yym1089 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1088) - } else { - z.EncFallback(yy1088) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1091 := &x.PVClaimBinderSyncPeriod - yym1092 := z.EncBinary() - _ = yym1092 - if false { - } else if z.HasExtensions() && z.EncExt(yy1091) { - } else if !yym1092 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1091) - } else { - z.EncFallback(yy1091) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("pvClaimBinderSyncPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1093 := &x.PVClaimBinderSyncPeriod - yym1094 := z.EncBinary() - _ = yym1094 - if false { - } else if z.HasExtensions() && z.EncExt(yy1093) { - } else if !yym1094 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1093) - } else { - z.EncFallback(yy1093) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1096 := &x.MinResyncPeriod - yym1097 := z.EncBinary() - _ = yym1097 - if false { - } else if z.HasExtensions() && z.EncExt(yy1096) { - } else if !yym1097 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1096) - } else { - z.EncFallback(yy1096) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minResyncPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1098 := &x.MinResyncPeriod - yym1099 := z.EncBinary() - _ = yym1099 - if false { - } else if z.HasExtensions() && z.EncExt(yy1098) { - } else if !yym1099 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1098) - } else { - z.EncFallback(yy1098) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1101 := z.EncBinary() - _ = yym1101 - if false { - } else { - r.EncodeInt(int64(x.TerminatedPodGCThreshold)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("terminatedPodGCThreshold")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1102 := z.EncBinary() - _ = yym1102 - if false { - } else { - r.EncodeInt(int64(x.TerminatedPodGCThreshold)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1104 := &x.HorizontalPodAutoscalerSyncPeriod - yym1105 := z.EncBinary() - _ = yym1105 - if false { - } else if z.HasExtensions() && z.EncExt(yy1104) { - } else if !yym1105 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1104) - } else { - z.EncFallback(yy1104) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("horizontalPodAutoscalerSyncPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1106 := &x.HorizontalPodAutoscalerSyncPeriod - yym1107 := z.EncBinary() - _ = yym1107 - if false { - } else if z.HasExtensions() && z.EncExt(yy1106) { - } else if !yym1107 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1106) - } else { - z.EncFallback(yy1106) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1109 := &x.DeploymentControllerSyncPeriod - yym1110 := z.EncBinary() - _ = yym1110 - if false { - } else if z.HasExtensions() && z.EncExt(yy1109) { - } else if !yym1110 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1109) - } else { - z.EncFallback(yy1109) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deploymentControllerSyncPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1111 := &x.DeploymentControllerSyncPeriod - yym1112 := z.EncBinary() - _ = yym1112 - if false { - } else if z.HasExtensions() && z.EncExt(yy1111) { - } else if !yym1112 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1111) - } else { - z.EncFallback(yy1111) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1114 := &x.PodEvictionTimeout - yym1115 := z.EncBinary() - _ = yym1115 - if false { - } else if z.HasExtensions() && z.EncExt(yy1114) { - } else if !yym1115 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1114) - } else { - z.EncFallback(yy1114) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podEvictionTimeout")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1116 := &x.PodEvictionTimeout - yym1117 := z.EncBinary() - _ = yym1117 - if false { - } else if z.HasExtensions() && z.EncExt(yy1116) { - } else if !yym1117 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1116) - } else { - z.EncFallback(yy1116) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1119 := z.EncBinary() - _ = yym1119 - if false { - } else { - r.EncodeFloat32(float32(x.DeletingPodsQps)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletingPodsQps")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1120 := z.EncBinary() - _ = yym1120 - if false { - } else { - r.EncodeFloat32(float32(x.DeletingPodsQps)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1122 := z.EncBinary() - _ = yym1122 - if false { - } else { - r.EncodeInt(int64(x.DeletingPodsBurst)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletingPodsBurst")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1123 := z.EncBinary() - _ = yym1123 - if false { - } else { - r.EncodeInt(int64(x.DeletingPodsBurst)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1125 := &x.NodeMonitorGracePeriod - yym1126 := z.EncBinary() - _ = yym1126 - if false { - } else if z.HasExtensions() && z.EncExt(yy1125) { - } else if !yym1126 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1125) - } else { - z.EncFallback(yy1125) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeMonitorGracePeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1127 := &x.NodeMonitorGracePeriod - yym1128 := z.EncBinary() - _ = yym1128 - if false { - } else if z.HasExtensions() && z.EncExt(yy1127) { - } else if !yym1128 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1127) - } else { - z.EncFallback(yy1127) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1130 := z.EncBinary() - _ = yym1130 - if false { - } else { - r.EncodeInt(int64(x.RegisterRetryCount)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("registerRetryCount")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1131 := z.EncBinary() - _ = yym1131 - if false { - } else { - r.EncodeInt(int64(x.RegisterRetryCount)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1133 := &x.NodeStartupGracePeriod - yym1134 := z.EncBinary() - _ = yym1134 - if false { - } else if z.HasExtensions() && z.EncExt(yy1133) { - } else if !yym1134 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1133) - } else { - z.EncFallback(yy1133) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeStartupGracePeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1135 := &x.NodeStartupGracePeriod - yym1136 := z.EncBinary() - _ = yym1136 - if false { - } else if z.HasExtensions() && z.EncExt(yy1135) { - } else if !yym1136 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1135) - } else { - z.EncFallback(yy1135) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1138 := &x.NodeMonitorPeriod - yym1139 := z.EncBinary() - _ = yym1139 - if false { - } else if z.HasExtensions() && z.EncExt(yy1138) { - } else if !yym1139 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1138) - } else { - z.EncFallback(yy1138) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeMonitorPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1140 := &x.NodeMonitorPeriod - yym1141 := z.EncBinary() - _ = yym1141 - if false { - } else if z.HasExtensions() && z.EncExt(yy1140) { - } else if !yym1141 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1140) - } else { - z.EncFallback(yy1140) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1143 := z.EncBinary() - _ = yym1143 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountKeyFile)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceAccountKeyFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1144 := z.EncBinary() - _ = yym1144 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountKeyFile)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1146 := z.EncBinary() - _ = yym1146 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterSigningCertFile)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterSigningCertFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1147 := z.EncBinary() - _ = yym1147 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterSigningCertFile)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1149 := z.EncBinary() - _ = yym1149 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterSigningKeyFile)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterSigningKeyFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1150 := z.EncBinary() - _ = yym1150 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterSigningKeyFile)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1152 := z.EncBinary() - _ = yym1152 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ApproveAllKubeletCSRsForGroup)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("approveAllKubeletCSRsForGroup")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1153 := z.EncBinary() - _ = yym1153 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ApproveAllKubeletCSRsForGroup)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1155 := z.EncBinary() - _ = yym1155 - if false { - } else { - r.EncodeBool(bool(x.EnableProfiling)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableProfiling")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1156 := z.EncBinary() - _ = yym1156 - if false { - } else { - r.EncodeBool(bool(x.EnableProfiling)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1158 := z.EncBinary() - _ = yym1158 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1159 := z.EncBinary() - _ = yym1159 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1161 := z.EncBinary() - _ = yym1161 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1162 := z.EncBinary() - _ = yym1162 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1164 := z.EncBinary() - _ = yym1164 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceCIDR)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1165 := z.EncBinary() - _ = yym1165 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceCIDR)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1167 := z.EncBinary() - _ = yym1167 - if false { - } else { - r.EncodeInt(int64(x.NodeCIDRMaskSize)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeCIDRMaskSize")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1168 := z.EncBinary() - _ = yym1168 - if false { - } else { - r.EncodeInt(int64(x.NodeCIDRMaskSize)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1170 := z.EncBinary() - _ = yym1170 - if false { - } else { - r.EncodeBool(bool(x.AllocateNodeCIDRs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("allocateNodeCIDRs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1171 := z.EncBinary() - _ = yym1171 - if false { - } else { - r.EncodeBool(bool(x.AllocateNodeCIDRs)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1173 := z.EncBinary() - _ = yym1173 - if false { - } else { - r.EncodeBool(bool(x.ConfigureCloudRoutes)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("configureCloudRoutes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1174 := z.EncBinary() - _ = yym1174 - if false { - } else { - r.EncodeBool(bool(x.ConfigureCloudRoutes)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1176 := z.EncBinary() - _ = yym1176 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RootCAFile)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rootCAFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1177 := z.EncBinary() - _ = yym1177 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RootCAFile)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1179 := z.EncBinary() - _ = yym1179 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("contentType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1180 := z.EncBinary() - _ = yym1180 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1182 := z.EncBinary() - _ = yym1182 - if false { - } else { - r.EncodeFloat32(float32(x.KubeAPIQPS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeAPIQPS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1183 := z.EncBinary() - _ = yym1183 - if false { - } else { - r.EncodeFloat32(float32(x.KubeAPIQPS)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1185 := z.EncBinary() - _ = yym1185 - if false { - } else { - r.EncodeInt(int64(x.KubeAPIBurst)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeAPIBurst")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1186 := z.EncBinary() - _ = yym1186 - if false { - } else { - r.EncodeInt(int64(x.KubeAPIBurst)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1188 := &x.LeaderElection - yy1188.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("leaderElection")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1189 := &x.LeaderElection - yy1189.CodecEncodeSelf(e) - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1191 := &x.VolumeConfiguration - yy1191.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeConfiguration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1192 := &x.VolumeConfiguration - yy1192.CodecEncodeSelf(e) - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1194 := &x.ControllerStartInterval - yym1195 := z.EncBinary() - _ = yym1195 - if false { - } else if z.HasExtensions() && z.EncExt(yy1194) { - } else if !yym1195 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1194) - } else { - z.EncFallback(yy1194) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("controllerStartInterval")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1196 := &x.ControllerStartInterval - yym1197 := z.EncBinary() - _ = yym1197 - if false { - } else if z.HasExtensions() && z.EncExt(yy1196) { - } else if !yym1197 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1196) - } else { - z.EncFallback(yy1196) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1199 := z.EncBinary() - _ = yym1199 - if false { - } else { - r.EncodeBool(bool(x.EnableGarbageCollector)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableGarbageCollector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1200 := z.EncBinary() - _ = yym1200 - if false { - } else { - r.EncodeBool(bool(x.EnableGarbageCollector)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1202 := z.EncBinary() - _ = yym1202 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentGCSyncs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrentGCSyncs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1203 := z.EncBinary() - _ = yym1203 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentGCSyncs)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1205 := z.EncBinary() - _ = yym1205 - if false { - } else { - r.EncodeFloat32(float32(x.NodeEvictionRate)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeEvictionRate")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1206 := z.EncBinary() - _ = yym1206 - if false { - } else { - r.EncodeFloat32(float32(x.NodeEvictionRate)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1208 := z.EncBinary() - _ = yym1208 - if false { - } else { - r.EncodeFloat32(float32(x.SecondaryNodeEvictionRate)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secondaryNodeEvictionRate")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1209 := z.EncBinary() - _ = yym1209 - if false { - } else { - r.EncodeFloat32(float32(x.SecondaryNodeEvictionRate)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1211 := z.EncBinary() - _ = yym1211 - if false { - } else { - r.EncodeInt(int64(x.LargeClusterSizeThreshold)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("largeClusterSizeThreshold")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1212 := z.EncBinary() - _ = yym1212 - if false { - } else { - r.EncodeInt(int64(x.LargeClusterSizeThreshold)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1214 := z.EncBinary() - _ = yym1214 - if false { - } else { - r.EncodeFloat32(float32(x.UnhealthyZoneThreshold)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("unhealthyZoneThreshold")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1215 := z.EncBinary() - _ = yym1215 - if false { - } else { - r.EncodeFloat32(float32(x.UnhealthyZoneThreshold)) - } - } - if yyr1004 || yy2arr1004 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *KubeControllerManagerConfiguration) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1216 := z.DecBinary() - _ = yym1216 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1217 := r.ContainerType() - if yyct1217 == codecSelferValueTypeMap1234 { - yyl1217 := r.ReadMapStart() - if yyl1217 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1217, d) - } - } else if yyct1217 == codecSelferValueTypeArray1234 { - yyl1217 := r.ReadArrayStart() - if yyl1217 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1217, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1218Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1218Slc - var yyhl1218 bool = l >= 0 - for yyj1218 := 0; ; yyj1218++ { - if yyhl1218 { - if yyj1218 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1218Slc = r.DecodeBytes(yys1218Slc, true, true) - yys1218 := string(yys1218Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1218 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "port": - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - case "address": - if r.TryDecodeAsNil() { - x.Address = "" - } else { - x.Address = string(r.DecodeString()) - } - case "useServiceAccountCredentials": - if r.TryDecodeAsNil() { - x.UseServiceAccountCredentials = false - } else { - x.UseServiceAccountCredentials = bool(r.DecodeBool()) - } - case "cloudProvider": - if r.TryDecodeAsNil() { - x.CloudProvider = "" - } else { - x.CloudProvider = string(r.DecodeString()) - } - case "cloudConfigFile": - if r.TryDecodeAsNil() { - x.CloudConfigFile = "" - } else { - x.CloudConfigFile = string(r.DecodeString()) - } - case "concurrentEndpointSyncs": - if r.TryDecodeAsNil() { - x.ConcurrentEndpointSyncs = 0 - } else { - x.ConcurrentEndpointSyncs = int32(r.DecodeInt(32)) - } - case "concurrentRSSyncs": - if r.TryDecodeAsNil() { - x.ConcurrentRSSyncs = 0 - } else { - x.ConcurrentRSSyncs = int32(r.DecodeInt(32)) - } - case "concurrentRCSyncs": - if r.TryDecodeAsNil() { - x.ConcurrentRCSyncs = 0 - } else { - x.ConcurrentRCSyncs = int32(r.DecodeInt(32)) - } - case "concurrentServiceSyncs": - if r.TryDecodeAsNil() { - x.ConcurrentServiceSyncs = 0 - } else { - x.ConcurrentServiceSyncs = int32(r.DecodeInt(32)) - } - case "concurrentResourceQuotaSyncs": - if r.TryDecodeAsNil() { - x.ConcurrentResourceQuotaSyncs = 0 - } else { - x.ConcurrentResourceQuotaSyncs = int32(r.DecodeInt(32)) - } - case "concurrentDeploymentSyncs": - if r.TryDecodeAsNil() { - x.ConcurrentDeploymentSyncs = 0 - } else { - x.ConcurrentDeploymentSyncs = int32(r.DecodeInt(32)) - } - case "concurrentDaemonSetSyncs": - if r.TryDecodeAsNil() { - x.ConcurrentDaemonSetSyncs = 0 - } else { - x.ConcurrentDaemonSetSyncs = int32(r.DecodeInt(32)) - } - case "concurrentJobSyncs": - if r.TryDecodeAsNil() { - x.ConcurrentJobSyncs = 0 - } else { - x.ConcurrentJobSyncs = int32(r.DecodeInt(32)) - } - case "concurrentNamespaceSyncs": - if r.TryDecodeAsNil() { - x.ConcurrentNamespaceSyncs = 0 - } else { - x.ConcurrentNamespaceSyncs = int32(r.DecodeInt(32)) - } - case "concurrentSATokenSyncs": - if r.TryDecodeAsNil() { - x.ConcurrentSATokenSyncs = 0 - } else { - x.ConcurrentSATokenSyncs = int32(r.DecodeInt(32)) - } - case "lookupCacheSizeForRC": - if r.TryDecodeAsNil() { - x.LookupCacheSizeForRC = 0 - } else { - x.LookupCacheSizeForRC = int32(r.DecodeInt(32)) - } - case "lookupCacheSizeForRS": - if r.TryDecodeAsNil() { - x.LookupCacheSizeForRS = 0 - } else { - x.LookupCacheSizeForRS = int32(r.DecodeInt(32)) - } - case "lookupCacheSizeForDaemonSet": - if r.TryDecodeAsNil() { - x.LookupCacheSizeForDaemonSet = 0 - } else { - x.LookupCacheSizeForDaemonSet = int32(r.DecodeInt(32)) - } - case "serviceSyncPeriod": - if r.TryDecodeAsNil() { - x.ServiceSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv1239 := &x.ServiceSyncPeriod - yym1240 := z.DecBinary() - _ = yym1240 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1239) { - } else if !yym1240 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1239) - } else { - z.DecFallback(yyv1239, false) - } - } - case "nodeSyncPeriod": - if r.TryDecodeAsNil() { - x.NodeSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv1241 := &x.NodeSyncPeriod - yym1242 := z.DecBinary() - _ = yym1242 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1241) { - } else if !yym1242 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1241) - } else { - z.DecFallback(yyv1241, false) - } - } - case "routeReconciliationPeriod": - if r.TryDecodeAsNil() { - x.RouteReconciliationPeriod = pkg1_unversioned.Duration{} - } else { - yyv1243 := &x.RouteReconciliationPeriod - yym1244 := z.DecBinary() - _ = yym1244 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1243) { - } else if !yym1244 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1243) - } else { - z.DecFallback(yyv1243, false) - } - } - case "resourceQuotaSyncPeriod": - if r.TryDecodeAsNil() { - x.ResourceQuotaSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv1245 := &x.ResourceQuotaSyncPeriod - yym1246 := z.DecBinary() - _ = yym1246 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1245) { - } else if !yym1246 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1245) - } else { - z.DecFallback(yyv1245, false) - } - } - case "namespaceSyncPeriod": - if r.TryDecodeAsNil() { - x.NamespaceSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv1247 := &x.NamespaceSyncPeriod - yym1248 := z.DecBinary() - _ = yym1248 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1247) { - } else if !yym1248 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1247) - } else { - z.DecFallback(yyv1247, false) - } - } - case "pvClaimBinderSyncPeriod": - if r.TryDecodeAsNil() { - x.PVClaimBinderSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv1249 := &x.PVClaimBinderSyncPeriod - yym1250 := z.DecBinary() - _ = yym1250 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1249) { - } else if !yym1250 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1249) - } else { - z.DecFallback(yyv1249, false) - } - } - case "minResyncPeriod": - if r.TryDecodeAsNil() { - x.MinResyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv1251 := &x.MinResyncPeriod - yym1252 := z.DecBinary() - _ = yym1252 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1251) { - } else if !yym1252 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1251) - } else { - z.DecFallback(yyv1251, false) - } - } - case "terminatedPodGCThreshold": - if r.TryDecodeAsNil() { - x.TerminatedPodGCThreshold = 0 - } else { - x.TerminatedPodGCThreshold = int32(r.DecodeInt(32)) - } - case "horizontalPodAutoscalerSyncPeriod": - if r.TryDecodeAsNil() { - x.HorizontalPodAutoscalerSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv1254 := &x.HorizontalPodAutoscalerSyncPeriod - yym1255 := z.DecBinary() - _ = yym1255 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1254) { - } else if !yym1255 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1254) - } else { - z.DecFallback(yyv1254, false) - } - } - case "deploymentControllerSyncPeriod": - if r.TryDecodeAsNil() { - x.DeploymentControllerSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv1256 := &x.DeploymentControllerSyncPeriod - yym1257 := z.DecBinary() - _ = yym1257 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1256) { - } else if !yym1257 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1256) - } else { - z.DecFallback(yyv1256, false) - } - } - case "podEvictionTimeout": - if r.TryDecodeAsNil() { - x.PodEvictionTimeout = pkg1_unversioned.Duration{} - } else { - yyv1258 := &x.PodEvictionTimeout - yym1259 := z.DecBinary() - _ = yym1259 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1258) { - } else if !yym1259 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1258) - } else { - z.DecFallback(yyv1258, false) - } - } - case "deletingPodsQps": - if r.TryDecodeAsNil() { - x.DeletingPodsQps = 0 - } else { - x.DeletingPodsQps = float32(r.DecodeFloat(true)) - } - case "deletingPodsBurst": - if r.TryDecodeAsNil() { - x.DeletingPodsBurst = 0 - } else { - x.DeletingPodsBurst = int32(r.DecodeInt(32)) - } - case "nodeMonitorGracePeriod": - if r.TryDecodeAsNil() { - x.NodeMonitorGracePeriod = pkg1_unversioned.Duration{} - } else { - yyv1262 := &x.NodeMonitorGracePeriod - yym1263 := z.DecBinary() - _ = yym1263 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1262) { - } else if !yym1263 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1262) - } else { - z.DecFallback(yyv1262, false) - } - } - case "registerRetryCount": - if r.TryDecodeAsNil() { - x.RegisterRetryCount = 0 - } else { - x.RegisterRetryCount = int32(r.DecodeInt(32)) - } - case "nodeStartupGracePeriod": - if r.TryDecodeAsNil() { - x.NodeStartupGracePeriod = pkg1_unversioned.Duration{} - } else { - yyv1265 := &x.NodeStartupGracePeriod - yym1266 := z.DecBinary() - _ = yym1266 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1265) { - } else if !yym1266 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1265) - } else { - z.DecFallback(yyv1265, false) - } - } - case "nodeMonitorPeriod": - if r.TryDecodeAsNil() { - x.NodeMonitorPeriod = pkg1_unversioned.Duration{} - } else { - yyv1267 := &x.NodeMonitorPeriod - yym1268 := z.DecBinary() - _ = yym1268 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1267) { - } else if !yym1268 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1267) - } else { - z.DecFallback(yyv1267, false) - } - } - case "serviceAccountKeyFile": - if r.TryDecodeAsNil() { - x.ServiceAccountKeyFile = "" - } else { - x.ServiceAccountKeyFile = string(r.DecodeString()) - } - case "clusterSigningCertFile": - if r.TryDecodeAsNil() { - x.ClusterSigningCertFile = "" - } else { - x.ClusterSigningCertFile = string(r.DecodeString()) - } - case "clusterSigningKeyFile": - if r.TryDecodeAsNil() { - x.ClusterSigningKeyFile = "" - } else { - x.ClusterSigningKeyFile = string(r.DecodeString()) - } - case "approveAllKubeletCSRsForGroup": - if r.TryDecodeAsNil() { - x.ApproveAllKubeletCSRsForGroup = "" - } else { - x.ApproveAllKubeletCSRsForGroup = string(r.DecodeString()) - } - case "enableProfiling": - if r.TryDecodeAsNil() { - x.EnableProfiling = false - } else { - x.EnableProfiling = bool(r.DecodeBool()) - } - case "clusterName": - if r.TryDecodeAsNil() { - x.ClusterName = "" - } else { - x.ClusterName = string(r.DecodeString()) - } - case "clusterCIDR": - if r.TryDecodeAsNil() { - x.ClusterCIDR = "" - } else { - x.ClusterCIDR = string(r.DecodeString()) - } - case "serviceCIDR": - if r.TryDecodeAsNil() { - x.ServiceCIDR = "" - } else { - x.ServiceCIDR = string(r.DecodeString()) - } - case "nodeCIDRMaskSize": - if r.TryDecodeAsNil() { - x.NodeCIDRMaskSize = 0 - } else { - x.NodeCIDRMaskSize = int32(r.DecodeInt(32)) - } - case "allocateNodeCIDRs": - if r.TryDecodeAsNil() { - x.AllocateNodeCIDRs = false - } else { - x.AllocateNodeCIDRs = bool(r.DecodeBool()) - } - case "configureCloudRoutes": - if r.TryDecodeAsNil() { - x.ConfigureCloudRoutes = false - } else { - x.ConfigureCloudRoutes = bool(r.DecodeBool()) - } - case "rootCAFile": - if r.TryDecodeAsNil() { - x.RootCAFile = "" - } else { - x.RootCAFile = string(r.DecodeString()) - } - case "contentType": - if r.TryDecodeAsNil() { - x.ContentType = "" - } else { - x.ContentType = string(r.DecodeString()) - } - case "kubeAPIQPS": - if r.TryDecodeAsNil() { - x.KubeAPIQPS = 0 - } else { - x.KubeAPIQPS = float32(r.DecodeFloat(true)) - } - case "kubeAPIBurst": - if r.TryDecodeAsNil() { - x.KubeAPIBurst = 0 - } else { - x.KubeAPIBurst = int32(r.DecodeInt(32)) - } - case "leaderElection": - if r.TryDecodeAsNil() { - x.LeaderElection = LeaderElectionConfiguration{} - } else { - yyv1284 := &x.LeaderElection - yyv1284.CodecDecodeSelf(d) - } - case "volumeConfiguration": - if r.TryDecodeAsNil() { - x.VolumeConfiguration = VolumeConfiguration{} - } else { - yyv1285 := &x.VolumeConfiguration - yyv1285.CodecDecodeSelf(d) - } - case "controllerStartInterval": - if r.TryDecodeAsNil() { - x.ControllerStartInterval = pkg1_unversioned.Duration{} - } else { - yyv1286 := &x.ControllerStartInterval - yym1287 := z.DecBinary() - _ = yym1287 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1286) { - } else if !yym1287 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1286) - } else { - z.DecFallback(yyv1286, false) - } - } - case "enableGarbageCollector": - if r.TryDecodeAsNil() { - x.EnableGarbageCollector = false - } else { - x.EnableGarbageCollector = bool(r.DecodeBool()) - } - case "concurrentGCSyncs": - if r.TryDecodeAsNil() { - x.ConcurrentGCSyncs = 0 - } else { - x.ConcurrentGCSyncs = int32(r.DecodeInt(32)) - } - case "nodeEvictionRate": - if r.TryDecodeAsNil() { - x.NodeEvictionRate = 0 - } else { - x.NodeEvictionRate = float32(r.DecodeFloat(true)) - } - case "secondaryNodeEvictionRate": - if r.TryDecodeAsNil() { - x.SecondaryNodeEvictionRate = 0 - } else { - x.SecondaryNodeEvictionRate = float32(r.DecodeFloat(true)) - } - case "largeClusterSizeThreshold": - if r.TryDecodeAsNil() { - x.LargeClusterSizeThreshold = 0 - } else { - x.LargeClusterSizeThreshold = int32(r.DecodeInt(32)) - } - case "unhealthyZoneThreshold": - if r.TryDecodeAsNil() { - x.UnhealthyZoneThreshold = 0 - } else { - x.UnhealthyZoneThreshold = float32(r.DecodeFloat(true)) - } - default: - z.DecStructFieldNotFound(-1, yys1218) - } // end switch yys1218 - } // end for yyj1218 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1294 int - var yyb1294 bool - var yyhl1294 bool = l >= 0 - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Address = "" - } else { - x.Address = string(r.DecodeString()) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UseServiceAccountCredentials = false - } else { - x.UseServiceAccountCredentials = bool(r.DecodeBool()) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CloudProvider = "" - } else { - x.CloudProvider = string(r.DecodeString()) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CloudConfigFile = "" - } else { - x.CloudConfigFile = string(r.DecodeString()) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrentEndpointSyncs = 0 - } else { - x.ConcurrentEndpointSyncs = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrentRSSyncs = 0 - } else { - x.ConcurrentRSSyncs = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrentRCSyncs = 0 - } else { - x.ConcurrentRCSyncs = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrentServiceSyncs = 0 - } else { - x.ConcurrentServiceSyncs = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrentResourceQuotaSyncs = 0 - } else { - x.ConcurrentResourceQuotaSyncs = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrentDeploymentSyncs = 0 - } else { - x.ConcurrentDeploymentSyncs = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrentDaemonSetSyncs = 0 - } else { - x.ConcurrentDaemonSetSyncs = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrentJobSyncs = 0 - } else { - x.ConcurrentJobSyncs = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrentNamespaceSyncs = 0 - } else { - x.ConcurrentNamespaceSyncs = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrentSATokenSyncs = 0 - } else { - x.ConcurrentSATokenSyncs = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LookupCacheSizeForRC = 0 - } else { - x.LookupCacheSizeForRC = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LookupCacheSizeForRS = 0 - } else { - x.LookupCacheSizeForRS = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LookupCacheSizeForDaemonSet = 0 - } else { - x.LookupCacheSizeForDaemonSet = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServiceSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv1315 := &x.ServiceSyncPeriod - yym1316 := z.DecBinary() - _ = yym1316 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1315) { - } else if !yym1316 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1315) - } else { - z.DecFallback(yyv1315, false) - } - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv1317 := &x.NodeSyncPeriod - yym1318 := z.DecBinary() - _ = yym1318 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1317) { - } else if !yym1318 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1317) - } else { - z.DecFallback(yyv1317, false) - } - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RouteReconciliationPeriod = pkg1_unversioned.Duration{} - } else { - yyv1319 := &x.RouteReconciliationPeriod - yym1320 := z.DecBinary() - _ = yym1320 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1319) { - } else if !yym1320 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1319) - } else { - z.DecFallback(yyv1319, false) - } - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceQuotaSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv1321 := &x.ResourceQuotaSyncPeriod - yym1322 := z.DecBinary() - _ = yym1322 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1321) { - } else if !yym1322 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1321) - } else { - z.DecFallback(yyv1321, false) - } - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NamespaceSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv1323 := &x.NamespaceSyncPeriod - yym1324 := z.DecBinary() - _ = yym1324 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1323) { - } else if !yym1324 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1323) - } else { - z.DecFallback(yyv1323, false) - } - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PVClaimBinderSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv1325 := &x.PVClaimBinderSyncPeriod - yym1326 := z.DecBinary() - _ = yym1326 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1325) { - } else if !yym1326 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1325) - } else { - z.DecFallback(yyv1325, false) - } - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinResyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv1327 := &x.MinResyncPeriod - yym1328 := z.DecBinary() - _ = yym1328 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1327) { - } else if !yym1328 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1327) - } else { - z.DecFallback(yyv1327, false) - } - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TerminatedPodGCThreshold = 0 - } else { - x.TerminatedPodGCThreshold = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HorizontalPodAutoscalerSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv1330 := &x.HorizontalPodAutoscalerSyncPeriod - yym1331 := z.DecBinary() - _ = yym1331 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1330) { - } else if !yym1331 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1330) - } else { - z.DecFallback(yyv1330, false) - } - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DeploymentControllerSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv1332 := &x.DeploymentControllerSyncPeriod - yym1333 := z.DecBinary() - _ = yym1333 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1332) { - } else if !yym1333 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1332) - } else { - z.DecFallback(yyv1332, false) - } - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodEvictionTimeout = pkg1_unversioned.Duration{} - } else { - yyv1334 := &x.PodEvictionTimeout - yym1335 := z.DecBinary() - _ = yym1335 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1334) { - } else if !yym1335 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1334) - } else { - z.DecFallback(yyv1334, false) - } - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DeletingPodsQps = 0 - } else { - x.DeletingPodsQps = float32(r.DecodeFloat(true)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DeletingPodsBurst = 0 - } else { - x.DeletingPodsBurst = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeMonitorGracePeriod = pkg1_unversioned.Duration{} - } else { - yyv1338 := &x.NodeMonitorGracePeriod - yym1339 := z.DecBinary() - _ = yym1339 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1338) { - } else if !yym1339 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1338) - } else { - z.DecFallback(yyv1338, false) - } - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RegisterRetryCount = 0 - } else { - x.RegisterRetryCount = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeStartupGracePeriod = pkg1_unversioned.Duration{} - } else { - yyv1341 := &x.NodeStartupGracePeriod - yym1342 := z.DecBinary() - _ = yym1342 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1341) { - } else if !yym1342 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1341) - } else { - z.DecFallback(yyv1341, false) - } - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeMonitorPeriod = pkg1_unversioned.Duration{} - } else { - yyv1343 := &x.NodeMonitorPeriod - yym1344 := z.DecBinary() - _ = yym1344 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1343) { - } else if !yym1344 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1343) - } else { - z.DecFallback(yyv1343, false) - } - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServiceAccountKeyFile = "" - } else { - x.ServiceAccountKeyFile = string(r.DecodeString()) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterSigningCertFile = "" - } else { - x.ClusterSigningCertFile = string(r.DecodeString()) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterSigningKeyFile = "" - } else { - x.ClusterSigningKeyFile = string(r.DecodeString()) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ApproveAllKubeletCSRsForGroup = "" - } else { - x.ApproveAllKubeletCSRsForGroup = string(r.DecodeString()) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnableProfiling = false - } else { - x.EnableProfiling = bool(r.DecodeBool()) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterName = "" - } else { - x.ClusterName = string(r.DecodeString()) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterCIDR = "" - } else { - x.ClusterCIDR = string(r.DecodeString()) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServiceCIDR = "" - } else { - x.ServiceCIDR = string(r.DecodeString()) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeCIDRMaskSize = 0 - } else { - x.NodeCIDRMaskSize = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AllocateNodeCIDRs = false - } else { - x.AllocateNodeCIDRs = bool(r.DecodeBool()) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConfigureCloudRoutes = false - } else { - x.ConfigureCloudRoutes = bool(r.DecodeBool()) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RootCAFile = "" - } else { - x.RootCAFile = string(r.DecodeString()) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContentType = "" - } else { - x.ContentType = string(r.DecodeString()) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeAPIQPS = 0 - } else { - x.KubeAPIQPS = float32(r.DecodeFloat(true)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeAPIBurst = 0 - } else { - x.KubeAPIBurst = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LeaderElection = LeaderElectionConfiguration{} - } else { - yyv1360 := &x.LeaderElection - yyv1360.CodecDecodeSelf(d) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeConfiguration = VolumeConfiguration{} - } else { - yyv1361 := &x.VolumeConfiguration - yyv1361.CodecDecodeSelf(d) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ControllerStartInterval = pkg1_unversioned.Duration{} - } else { - yyv1362 := &x.ControllerStartInterval - yym1363 := z.DecBinary() - _ = yym1363 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1362) { - } else if !yym1363 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1362) - } else { - z.DecFallback(yyv1362, false) - } - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnableGarbageCollector = false - } else { - x.EnableGarbageCollector = bool(r.DecodeBool()) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrentGCSyncs = 0 - } else { - x.ConcurrentGCSyncs = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeEvictionRate = 0 - } else { - x.NodeEvictionRate = float32(r.DecodeFloat(true)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SecondaryNodeEvictionRate = 0 - } else { - x.SecondaryNodeEvictionRate = float32(r.DecodeFloat(true)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LargeClusterSizeThreshold = 0 - } else { - x.LargeClusterSizeThreshold = int32(r.DecodeInt(32)) - } - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UnhealthyZoneThreshold = 0 - } else { - x.UnhealthyZoneThreshold = float32(r.DecodeFloat(true)) - } - for { - yyj1294++ - if yyhl1294 { - yyb1294 = yyj1294 > l - } else { - yyb1294 = r.CheckBreak() - } - if yyb1294 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1294-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *VolumeConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1370 := z.EncBinary() - _ = yym1370 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1371 := !z.EncBinary() - yy2arr1371 := z.EncBasicHandle().StructToArray - var yyq1371 [4]bool - _, _, _ = yysep1371, yyq1371, yy2arr1371 - const yyr1371 bool = false - var yynn1371 int - if yyr1371 || yy2arr1371 { - r.EncodeArrayStart(4) - } else { - yynn1371 = 4 - for _, b := range yyq1371 { - if b { - yynn1371++ - } - } - r.EncodeMapStart(yynn1371) - yynn1371 = 0 - } - if yyr1371 || yy2arr1371 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1373 := z.EncBinary() - _ = yym1373 - if false { - } else { - r.EncodeBool(bool(x.EnableHostPathProvisioning)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableHostPathProvisioning")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1374 := z.EncBinary() - _ = yym1374 - if false { - } else { - r.EncodeBool(bool(x.EnableHostPathProvisioning)) - } - } - if yyr1371 || yy2arr1371 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1376 := z.EncBinary() - _ = yym1376 - if false { - } else { - r.EncodeBool(bool(x.EnableDynamicProvisioning)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableDynamicProvisioning")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1377 := z.EncBinary() - _ = yym1377 - if false { - } else { - r.EncodeBool(bool(x.EnableDynamicProvisioning)) - } - } - if yyr1371 || yy2arr1371 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1379 := &x.PersistentVolumeRecyclerConfiguration - yy1379.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("persitentVolumeRecyclerConfiguration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1380 := &x.PersistentVolumeRecyclerConfiguration - yy1380.CodecEncodeSelf(e) - } - if yyr1371 || yy2arr1371 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1382 := z.EncBinary() - _ = yym1382 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FlexVolumePluginDir)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flexVolumePluginDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1383 := z.EncBinary() - _ = yym1383 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FlexVolumePluginDir)) - } - } - if yyr1371 || yy2arr1371 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *VolumeConfiguration) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1384 := z.DecBinary() - _ = yym1384 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1385 := r.ContainerType() - if yyct1385 == codecSelferValueTypeMap1234 { - yyl1385 := r.ReadMapStart() - if yyl1385 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1385, d) - } - } else if yyct1385 == codecSelferValueTypeArray1234 { - yyl1385 := r.ReadArrayStart() - if yyl1385 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1385, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *VolumeConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1386Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1386Slc - var yyhl1386 bool = l >= 0 - for yyj1386 := 0; ; yyj1386++ { - if yyhl1386 { - if yyj1386 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1386Slc = r.DecodeBytes(yys1386Slc, true, true) - yys1386 := string(yys1386Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1386 { - case "enableHostPathProvisioning": - if r.TryDecodeAsNil() { - x.EnableHostPathProvisioning = false - } else { - x.EnableHostPathProvisioning = bool(r.DecodeBool()) - } - case "enableDynamicProvisioning": - if r.TryDecodeAsNil() { - x.EnableDynamicProvisioning = false - } else { - x.EnableDynamicProvisioning = bool(r.DecodeBool()) - } - case "persitentVolumeRecyclerConfiguration": - if r.TryDecodeAsNil() { - x.PersistentVolumeRecyclerConfiguration = PersistentVolumeRecyclerConfiguration{} - } else { - yyv1389 := &x.PersistentVolumeRecyclerConfiguration - yyv1389.CodecDecodeSelf(d) - } - case "flexVolumePluginDir": - if r.TryDecodeAsNil() { - x.FlexVolumePluginDir = "" - } else { - x.FlexVolumePluginDir = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys1386) - } // end switch yys1386 - } // end for yyj1386 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *VolumeConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1391 int - var yyb1391 bool - var yyhl1391 bool = l >= 0 - yyj1391++ - if yyhl1391 { - yyb1391 = yyj1391 > l - } else { - yyb1391 = r.CheckBreak() - } - if yyb1391 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnableHostPathProvisioning = false - } else { - x.EnableHostPathProvisioning = bool(r.DecodeBool()) - } - yyj1391++ - if yyhl1391 { - yyb1391 = yyj1391 > l - } else { - yyb1391 = r.CheckBreak() - } - if yyb1391 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnableDynamicProvisioning = false - } else { - x.EnableDynamicProvisioning = bool(r.DecodeBool()) - } - yyj1391++ - if yyhl1391 { - yyb1391 = yyj1391 > l - } else { - yyb1391 = r.CheckBreak() - } - if yyb1391 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PersistentVolumeRecyclerConfiguration = PersistentVolumeRecyclerConfiguration{} - } else { - yyv1394 := &x.PersistentVolumeRecyclerConfiguration - yyv1394.CodecDecodeSelf(d) - } - yyj1391++ - if yyhl1391 { - yyb1391 = yyj1391 > l - } else { - yyb1391 = r.CheckBreak() - } - if yyb1391 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FlexVolumePluginDir = "" - } else { - x.FlexVolumePluginDir = string(r.DecodeString()) - } - for { - yyj1391++ - if yyhl1391 { - yyb1391 = yyj1391 > l - } else { - yyb1391 = r.CheckBreak() - } - if yyb1391 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1391-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeRecyclerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1396 := z.EncBinary() - _ = yym1396 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1397 := !z.EncBinary() - yy2arr1397 := z.EncBasicHandle().StructToArray - var yyq1397 [7]bool - _, _, _ = yysep1397, yyq1397, yy2arr1397 - const yyr1397 bool = false - var yynn1397 int - if yyr1397 || yy2arr1397 { - r.EncodeArrayStart(7) - } else { - yynn1397 = 7 - for _, b := range yyq1397 { - if b { - yynn1397++ - } - } - r.EncodeMapStart(yynn1397) - yynn1397 = 0 - } - if yyr1397 || yy2arr1397 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1399 := z.EncBinary() - _ = yym1399 - if false { - } else { - r.EncodeInt(int64(x.MaximumRetry)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maximumRetry")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1400 := z.EncBinary() - _ = yym1400 - if false { - } else { - r.EncodeInt(int64(x.MaximumRetry)) - } - } - if yyr1397 || yy2arr1397 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1402 := z.EncBinary() - _ = yym1402 - if false { - } else { - r.EncodeInt(int64(x.MinimumTimeoutNFS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minimumTimeoutNFS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1403 := z.EncBinary() - _ = yym1403 - if false { - } else { - r.EncodeInt(int64(x.MinimumTimeoutNFS)) - } - } - if yyr1397 || yy2arr1397 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1405 := z.EncBinary() - _ = yym1405 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodTemplateFilePathNFS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podTemplateFilePathNFS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1406 := z.EncBinary() - _ = yym1406 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodTemplateFilePathNFS)) - } - } - if yyr1397 || yy2arr1397 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1408 := z.EncBinary() - _ = yym1408 - if false { - } else { - r.EncodeInt(int64(x.IncrementTimeoutNFS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("incrementTimeoutNFS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1409 := z.EncBinary() - _ = yym1409 - if false { - } else { - r.EncodeInt(int64(x.IncrementTimeoutNFS)) - } - } - if yyr1397 || yy2arr1397 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1411 := z.EncBinary() - _ = yym1411 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodTemplateFilePathHostPath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podTemplateFilePathHostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1412 := z.EncBinary() - _ = yym1412 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodTemplateFilePathHostPath)) - } - } - if yyr1397 || yy2arr1397 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1414 := z.EncBinary() - _ = yym1414 - if false { - } else { - r.EncodeInt(int64(x.MinimumTimeoutHostPath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minimumTimeoutHostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1415 := z.EncBinary() - _ = yym1415 - if false { - } else { - r.EncodeInt(int64(x.MinimumTimeoutHostPath)) - } - } - if yyr1397 || yy2arr1397 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1417 := z.EncBinary() - _ = yym1417 - if false { - } else { - r.EncodeInt(int64(x.IncrementTimeoutHostPath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("incrementTimeoutHostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1418 := z.EncBinary() - _ = yym1418 - if false { - } else { - r.EncodeInt(int64(x.IncrementTimeoutHostPath)) - } - } - if yyr1397 || yy2arr1397 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeRecyclerConfiguration) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1419 := z.DecBinary() - _ = yym1419 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1420 := r.ContainerType() - if yyct1420 == codecSelferValueTypeMap1234 { - yyl1420 := r.ReadMapStart() - if yyl1420 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1420, d) - } - } else if yyct1420 == codecSelferValueTypeArray1234 { - yyl1420 := r.ReadArrayStart() - if yyl1420 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1420, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1421Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1421Slc - var yyhl1421 bool = l >= 0 - for yyj1421 := 0; ; yyj1421++ { - if yyhl1421 { - if yyj1421 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1421Slc = r.DecodeBytes(yys1421Slc, true, true) - yys1421 := string(yys1421Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1421 { - case "maximumRetry": - if r.TryDecodeAsNil() { - x.MaximumRetry = 0 - } else { - x.MaximumRetry = int32(r.DecodeInt(32)) - } - case "minimumTimeoutNFS": - if r.TryDecodeAsNil() { - x.MinimumTimeoutNFS = 0 - } else { - x.MinimumTimeoutNFS = int32(r.DecodeInt(32)) - } - case "podTemplateFilePathNFS": - if r.TryDecodeAsNil() { - x.PodTemplateFilePathNFS = "" - } else { - x.PodTemplateFilePathNFS = string(r.DecodeString()) - } - case "incrementTimeoutNFS": - if r.TryDecodeAsNil() { - x.IncrementTimeoutNFS = 0 - } else { - x.IncrementTimeoutNFS = int32(r.DecodeInt(32)) - } - case "podTemplateFilePathHostPath": - if r.TryDecodeAsNil() { - x.PodTemplateFilePathHostPath = "" - } else { - x.PodTemplateFilePathHostPath = string(r.DecodeString()) - } - case "minimumTimeoutHostPath": - if r.TryDecodeAsNil() { - x.MinimumTimeoutHostPath = 0 - } else { - x.MinimumTimeoutHostPath = int32(r.DecodeInt(32)) - } - case "incrementTimeoutHostPath": - if r.TryDecodeAsNil() { - x.IncrementTimeoutHostPath = 0 - } else { - x.IncrementTimeoutHostPath = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys1421) - } // end switch yys1421 - } // end for yyj1421 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1429 int - var yyb1429 bool - var yyhl1429 bool = l >= 0 - yyj1429++ - if yyhl1429 { - yyb1429 = yyj1429 > l - } else { - yyb1429 = r.CheckBreak() - } - if yyb1429 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaximumRetry = 0 - } else { - x.MaximumRetry = int32(r.DecodeInt(32)) - } - yyj1429++ - if yyhl1429 { - yyb1429 = yyj1429 > l - } else { - yyb1429 = r.CheckBreak() - } - if yyb1429 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinimumTimeoutNFS = 0 - } else { - x.MinimumTimeoutNFS = int32(r.DecodeInt(32)) - } - yyj1429++ - if yyhl1429 { - yyb1429 = yyj1429 > l - } else { - yyb1429 = r.CheckBreak() - } - if yyb1429 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodTemplateFilePathNFS = "" - } else { - x.PodTemplateFilePathNFS = string(r.DecodeString()) - } - yyj1429++ - if yyhl1429 { - yyb1429 = yyj1429 > l - } else { - yyb1429 = r.CheckBreak() - } - if yyb1429 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IncrementTimeoutNFS = 0 - } else { - x.IncrementTimeoutNFS = int32(r.DecodeInt(32)) - } - yyj1429++ - if yyhl1429 { - yyb1429 = yyj1429 > l - } else { - yyb1429 = r.CheckBreak() - } - if yyb1429 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodTemplateFilePathHostPath = "" - } else { - x.PodTemplateFilePathHostPath = string(r.DecodeString()) - } - yyj1429++ - if yyhl1429 { - yyb1429 = yyj1429 > l - } else { - yyb1429 = r.CheckBreak() - } - if yyb1429 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinimumTimeoutHostPath = 0 - } else { - x.MinimumTimeoutHostPath = int32(r.DecodeInt(32)) - } - yyj1429++ - if yyhl1429 { - yyb1429 = yyj1429 > l - } else { - yyb1429 = r.CheckBreak() - } - if yyb1429 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IncrementTimeoutHostPath = 0 - } else { - x.IncrementTimeoutHostPath = int32(r.DecodeInt(32)) - } - for { - yyj1429++ - if yyhl1429 { - yyb1429 = yyj1429 > l - } else { - yyb1429 = r.CheckBreak() - } - if yyb1429 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1429-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encconfig_ConfigurationMap(v pkg2_config.ConfigurationMap, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk1437, yyv1437 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym1438 := z.EncBinary() - _ = yym1438 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk1437)) - } - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1439 := z.EncBinary() - _ = yym1439 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyv1437)) - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decconfig_ConfigurationMap(v *pkg2_config.ConfigurationMap, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1440 := *v - yyl1440 := r.ReadMapStart() - yybh1440 := z.DecBasicHandle() - if yyv1440 == nil { - yyrl1440, _ := z.DecInferLen(yyl1440, yybh1440.MaxInitLen, 32) - yyv1440 = make(map[string]string, yyrl1440) - *v = yyv1440 - } - var yymk1440 string - var yymv1440 string - var yymg1440 bool - if yybh1440.MapValueReset { - } - if yyl1440 > 0 { - for yyj1440 := 0; yyj1440 < yyl1440; yyj1440++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1440 = "" - } else { - yymk1440 = string(r.DecodeString()) - } - - if yymg1440 { - yymv1440 = yyv1440[yymk1440] - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1440 = "" - } else { - yymv1440 = string(r.DecodeString()) - } - - if yyv1440 != nil { - yyv1440[yymk1440] = yymv1440 - } - } - } else if yyl1440 < 0 { - for yyj1440 := 0; !r.CheckBreak(); yyj1440++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1440 = "" - } else { - yymk1440 = string(r.DecodeString()) - } - - if yymg1440 { - yymv1440 = yyv1440[yymk1440] - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1440 = "" - } else { - yymv1440 = string(r.DecodeString()) - } - - if yyv1440 != nil { - yyv1440[yymk1440] = yymv1440 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go deleted file mode 100644 index 78a07ec246..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go +++ /dev/null @@ -1,814 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package componentconfig - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - utilconfig "k8s.io/kubernetes/pkg/util/config" -) - -type KubeProxyConfiguration struct { - unversioned.TypeMeta - - // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 - // for all interfaces) - BindAddress string `json:"bindAddress"` - // clusterCIDR is the CIDR range of the pods in the cluster. It is used to - // bridge traffic coming from outside of the cluster. If not provided, - // no off-cluster bridging will be performed. - ClusterCIDR string `json:"clusterCIDR"` - // healthzBindAddress is the IP address for the health check server to serve on, - // defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces) - HealthzBindAddress string `json:"healthzBindAddress"` - // healthzPort is the port to bind the health check server. Use 0 to disable. - HealthzPort int32 `json:"healthzPort"` - // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname. - HostnameOverride string `json:"hostnameOverride"` - // iptablesMasqueradeBit is the bit of the iptables fwmark space to use for SNAT if using - // the pure iptables proxy mode. Values must be within the range [0, 31]. - IPTablesMasqueradeBit *int32 `json:"iptablesMasqueradeBit"` - // iptablesSyncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', - // '2h22m'). Must be greater than 0. - IPTablesSyncPeriod unversioned.Duration `json:"iptablesSyncPeriodSeconds"` - // iptablesMinSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m', - // '2h22m'). - IPTablesMinSyncPeriod unversioned.Duration `json:"iptablesMinSyncPeriodSeconds"` - // kubeconfigPath is the path to the kubeconfig file with authorization information (the - // master location is set by the master flag). - KubeconfigPath string `json:"kubeconfigPath"` - // masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. - MasqueradeAll bool `json:"masqueradeAll"` - // master is the address of the Kubernetes API server (overrides any value in kubeconfig) - Master string `json:"master"` - // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within - // the range [-1000, 1000] - OOMScoreAdj *int32 `json:"oomScoreAdj"` - // mode specifies which proxy mode to use. - Mode ProxyMode `json:"mode"` - // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed - // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen. - PortRange string `json:"portRange"` - // resourceContainer is the absolute name of the resource-only container to create and run - // the Kube-proxy in (Default: /kube-proxy). - ResourceContainer string `json:"resourceContainer"` - // udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). - // Must be greater than 0. Only applicable for proxyMode=userspace. - UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"` - // conntrackMax is the maximum number of NAT connections to track (0 to - // leave as-is). This takes precedence over conntrackMaxPerCore and conntrackMin. - ConntrackMax int32 `json:"conntrackMax"` - // conntrackMaxPerCore is the maximum number of NAT connections to track - // per CPU core (0 to leave the limit as-is and ignore conntrackMin). - ConntrackMaxPerCore int32 `json:"conntrackMaxPerCore"` - // conntrackMin is the minimum value of connect-tracking records to allocate, - // regardless of conntrackMaxPerCore (set conntrackMaxPerCore=0 to leave the limit as-is). - ConntrackMin int32 `json:"conntrackMin"` - // conntrackTCPEstablishedTimeout is how long an idle TCP connection will be kept open - // (e.g. '2s'). Must be greater than 0. - ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"` - // conntrackTCPCloseWaitTimeout is how long an idle conntrack entry - // in CLOSE_WAIT state will remain in the conntrack - // table. (e.g. '60s'). Must be greater than 0 to set. - ConntrackTCPCloseWaitTimeout unversioned.Duration `json:"conntrackTCPCloseWaitTimeout"` -} - -// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables' -// (newer, faster). If blank, look at the Node object on the Kubernetes API and respect the -// 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the -// best-available proxy (currently iptables, but may change in future versions). If the -// iptables proxy is selected, regardless of how, but the system's kernel or iptables -// versions are insufficient, this always falls back to the userspace proxy. -type ProxyMode string - -const ( - ProxyModeUserspace ProxyMode = "userspace" - ProxyModeIPTables ProxyMode = "iptables" -) - -// HairpinMode denotes how the kubelet should configure networking to handle -// hairpin packets. -type HairpinMode string - -// Enum settings for different ways to handle hairpin packets. -const ( - // Set the hairpin flag on the veth of containers in the respective - // container runtime. - HairpinVeth = "hairpin-veth" - // Make the container bridge promiscuous. This will force it to accept - // hairpin packets, even if the flag isn't set on ports of the bridge. - PromiscuousBridge = "promiscuous-bridge" - // Neither of the above. If the kubelet is started in this hairpin mode - // and kube-proxy is running in iptables mode, hairpin packets will be - // dropped by the container bridge. - HairpinNone = "none" -) - -// TODO: curate the ordering and structure of this config object -type KubeletConfiguration struct { - unversioned.TypeMeta - - // podManifestPath is the path to the directory containing pod manifests to - // run, or the path to a single manifest file - PodManifestPath string `json:"podManifestPath"` - // syncFrequency is the max period between synchronizing running - // containers and config - SyncFrequency unversioned.Duration `json:"syncFrequency"` - // fileCheckFrequency is the duration between checking config files for - // new data - FileCheckFrequency unversioned.Duration `json:"fileCheckFrequency"` - // httpCheckFrequency is the duration between checking http for new data - HTTPCheckFrequency unversioned.Duration `json:"httpCheckFrequency"` - // manifestURL is the URL for accessing the container manifest - ManifestURL string `json:"manifestURL"` - // manifestURLHeader is the HTTP header to use when accessing the manifest - // URL, with the key separated from the value with a ':', as in 'key:value' - ManifestURLHeader string `json:"manifestURLHeader"` - // enableServer enables the Kubelet's server - EnableServer bool `json:"enableServer"` - // address is the IP address for the Kubelet to serve on (set to 0.0.0.0 - // for all interfaces) - Address string `json:"address"` - // port is the port for the Kubelet to serve on. - Port int32 `json:"port"` - // readOnlyPort is the read-only port for the Kubelet to serve on with - // no authentication/authorization (set to 0 to disable) - ReadOnlyPort int32 `json:"readOnlyPort"` - // tlsCertFile is the file containing x509 Certificate for HTTPS. (CA cert, - // if any, concatenated after server cert). If tlsCertFile and - // tlsPrivateKeyFile are not provided, a self-signed certificate - // and key are generated for the public address and saved to the directory - // passed to certDir. - TLSCertFile string `json:"tlsCertFile"` - // tlsPrivateKeyFile is the ile containing x509 private key matching - // tlsCertFile. - TLSPrivateKeyFile string `json:"tlsPrivateKeyFile"` - // certDirectory is the directory where the TLS certs are located (by - // default /var/run/kubernetes). If tlsCertFile and tlsPrivateKeyFile - // are provided, this flag will be ignored. - CertDirectory string `json:"certDirectory"` - // authentication specifies how requests to the Kubelet's server are authenticated - Authentication KubeletAuthentication `json:"authentication"` - // authorization specifies how requests to the Kubelet's server are authorized - Authorization KubeletAuthorization `json:"authorization"` - // hostnameOverride is the hostname used to identify the kubelet instead - // of the actual hostname. - HostnameOverride string `json:"hostnameOverride"` - // podInfraContainerImage is the image whose network/ipc namespaces - // containers in each pod will use. - PodInfraContainerImage string `json:"podInfraContainerImage"` - // dockerEndpoint is the path to the docker endpoint to communicate with. - DockerEndpoint string `json:"dockerEndpoint"` - // rootDirectory is the directory path to place kubelet files (volume - // mounts,etc). - RootDirectory string `json:"rootDirectory"` - // seccompProfileRoot is the directory path for seccomp profiles. - SeccompProfileRoot string `json:"seccompProfileRoot"` - // allowPrivileged enables containers to request privileged mode. - // Defaults to false. - AllowPrivileged bool `json:"allowPrivileged"` - // hostNetworkSources is a comma-separated list of sources from which the - // Kubelet allows pods to use of host network. Defaults to "*". Valid - // options are "file", "http", "api", and "*" (all sources). - HostNetworkSources []string `json:"hostNetworkSources"` - // hostPIDSources is a comma-separated list of sources from which the - // Kubelet allows pods to use the host pid namespace. Defaults to "*". - HostPIDSources []string `json:"hostPIDSources"` - // hostIPCSources is a comma-separated list of sources from which the - // Kubelet allows pods to use the host ipc namespace. Defaults to "*". - HostIPCSources []string `json:"hostIPCSources"` - // registryPullQPS is the limit of registry pulls per second. If 0, - // unlimited. Set to 0 for no limit. Defaults to 5.0. - RegistryPullQPS int32 `json:"registryPullQPS"` - // registryBurst is the maximum size of a bursty pulls, temporarily allows - // pulls to burst to this number, while still not exceeding registryQps. - // Only used if registryQPS > 0. - RegistryBurst int32 `json:"registryBurst"` - // eventRecordQPS is the maximum event creations per second. If 0, there - // is no limit enforced. - EventRecordQPS int32 `json:"eventRecordQPS"` - // eventBurst is the maximum size of a bursty event records, temporarily - // allows event records to burst to this number, while still not exceeding - // event-qps. Only used if eventQps > 0 - EventBurst int32 `json:"eventBurst"` - // enableDebuggingHandlers enables server endpoints for log collection - // and local running of containers and commands - EnableDebuggingHandlers bool `json:"enableDebuggingHandlers"` - // minimumGCAge is the minimum age for a finished container before it is - // garbage collected. - MinimumGCAge unversioned.Duration `json:"minimumGCAge"` - // maxPerPodContainerCount is the maximum number of old instances to - // retain per container. Each container takes up some disk space. - MaxPerPodContainerCount int32 `json:"maxPerPodContainerCount"` - // maxContainerCount is the maximum number of old instances of containers - // to retain globally. Each container takes up some disk space. - MaxContainerCount int32 `json:"maxContainerCount"` - // cAdvisorPort is the port of the localhost cAdvisor endpoint - CAdvisorPort int32 `json:"cAdvisorPort"` - // healthzPort is the port of the localhost healthz endpoint - HealthzPort int32 `json:"healthzPort"` - // healthzBindAddress is the IP address for the healthz server to serve - // on. - HealthzBindAddress string `json:"healthzBindAddress"` - // oomScoreAdj is The oom-score-adj value for kubelet process. Values - // must be within the range [-1000, 1000]. - OOMScoreAdj int32 `json:"oomScoreAdj"` - // registerNode enables automatic registration with the apiserver. - RegisterNode bool `json:"registerNode"` - // clusterDomain is the DNS domain for this cluster. If set, kubelet will - // configure all containers to search this domain in addition to the - // host's search domains. - ClusterDomain string `json:"clusterDomain"` - // masterServiceNamespace is The namespace from which the kubernetes - // master services should be injected into pods. - MasterServiceNamespace string `json:"masterServiceNamespace"` - // clusterDNS is the IP address for a cluster DNS server. If set, kubelet - // will configure all containers to use this for DNS resolution in - // addition to the host's DNS servers - ClusterDNS string `json:"clusterDNS"` - // streamingConnectionIdleTimeout is the maximum time a streaming connection - // can be idle before the connection is automatically closed. - StreamingConnectionIdleTimeout unversioned.Duration `json:"streamingConnectionIdleTimeout"` - // nodeStatusUpdateFrequency is the frequency that kubelet posts node - // status to master. Note: be cautious when changing the constant, it - // must work with nodeMonitorGracePeriod in nodecontroller. - NodeStatusUpdateFrequency unversioned.Duration `json:"nodeStatusUpdateFrequency"` - // imageMinimumGCAge is the minimum age for an unused image before it is - // garbage collected. - ImageMinimumGCAge unversioned.Duration `json:"imageMinimumGCAge"` - // imageGCHighThresholdPercent is the percent of disk usage after which - // image garbage collection is always run. - ImageGCHighThresholdPercent int32 `json:"imageGCHighThresholdPercent"` - // imageGCLowThresholdPercent is the percent of disk usage before which - // image garbage collection is never run. Lowest disk usage to garbage - // collect to. - ImageGCLowThresholdPercent int32 `json:"imageGCLowThresholdPercent"` - // lowDiskSpaceThresholdMB is the absolute free disk space, in MB, to - // maintain. When disk space falls below this threshold, new pods would - // be rejected. - LowDiskSpaceThresholdMB int32 `json:"lowDiskSpaceThresholdMB"` - // How frequently to calculate and cache volume disk usage for all pods - VolumeStatsAggPeriod unversioned.Duration `json:"volumeStatsAggPeriod"` - // networkPluginName is the name of the network plugin to be invoked for - // various events in kubelet/pod lifecycle - NetworkPluginName string `json:"networkPluginName"` - // networkPluginMTU is the MTU to be passed to the network plugin, - // and overrides the default MTU for cases where it cannot be automatically - // computed (such as IPSEC). - NetworkPluginMTU int32 `json:"networkPluginMTU"` - // networkPluginDir is the full path of the directory in which to search - // for network plugins (and, for backwards-compat, CNI config files) - NetworkPluginDir string `json:"networkPluginDir"` - // CNIConfDir is the full path of the directory in which to search for - // CNI config files - CNIConfDir string `json:"cniConfDir"` - // CNIBinDir is the full path of the directory in which to search for - // CNI plugin binaries - CNIBinDir string `json:"cniBinDir"` - // volumePluginDir is the full path of the directory in which to search - // for additional third party volume plugins - VolumePluginDir string `json:"volumePluginDir"` - // cloudProvider is the provider for cloud services. - // +optional - CloudProvider string `json:"cloudProvider,omitempty"` - // cloudConfigFile is the path to the cloud provider configuration file. - // +optional - CloudConfigFile string `json:"cloudConfigFile,omitempty"` - // KubeletCgroups is the absolute name of cgroups to isolate the kubelet in. - // +optional - KubeletCgroups string `json:"kubeletCgroups,omitempty"` - // Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes - // And all Burstable and BestEffort pods are brought up under their - // specific top level QoS cgroup. - // +optional - ExperimentalCgroupsPerQOS bool `json:"experimentalCgroupsPerQOS,omitempty"` - // driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd) - // +optional - CgroupDriver string `json:"cgroupDriver,omitempty"` - // Cgroups that container runtime is expected to be isolated in. - // +optional - RuntimeCgroups string `json:"runtimeCgroups,omitempty"` - // SystemCgroups is absolute name of cgroups in which to place - // all non-kernel processes that are not already in a container. Empty - // for no container. Rolling back the flag requires a reboot. - // +optional - SystemCgroups string `json:"systemCgroups,omitempty"` - // CgroupRoot is the root cgroup to use for pods. - // If ExperimentalCgroupsPerQOS is enabled, this is the root of the QoS cgroup hierarchy. - // +optional - CgroupRoot string `json:"cgroupRoot,omitempty"` - // containerRuntime is the container runtime to use. - ContainerRuntime string `json:"containerRuntime"` - // remoteRuntimeEndpoint is the endpoint of remote runtime service - RemoteRuntimeEndpoint string `json:"remoteRuntimeEndpoint"` - // remoteImageEndpoint is the endpoint of remote image service - RemoteImageEndpoint string `json:"remoteImageEndpoint"` - // runtimeRequestTimeout is the timeout for all runtime requests except long running - // requests - pull, logs, exec and attach. - // +optional - RuntimeRequestTimeout unversioned.Duration `json:"runtimeRequestTimeout,omitempty"` - // rktPath is the path of rkt binary. Leave empty to use the first rkt in - // $PATH. - // +optional - RktPath string `json:"rktPath,omitempty"` - // experimentalMounterPath is the path of mounter binary. Leave empty to use the default mount path - ExperimentalMounterPath string `json:"experimentalMounterPath,omitempty"` - // rktApiEndpoint is the endpoint of the rkt API service to communicate with. - // +optional - RktAPIEndpoint string `json:"rktAPIEndpoint,omitempty"` - // rktStage1Image is the image to use as stage1. Local paths and - // http/https URLs are supported. - // +optional - RktStage1Image string `json:"rktStage1Image,omitempty"` - // lockFilePath is the path that kubelet will use to as a lock file. - // It uses this file as a lock to synchronize with other kubelet processes - // that may be running. - LockFilePath string `json:"lockFilePath"` - // ExitOnLockContention is a flag that signifies to the kubelet that it is running - // in "bootstrap" mode. This requires that 'LockFilePath' has been set. - // This will cause the kubelet to listen to inotify events on the lock file, - // releasing it and exiting when another process tries to open that file. - ExitOnLockContention bool `json:"exitOnLockContention"` - // How should the kubelet configure the container bridge for hairpin packets. - // Setting this flag allows endpoints in a Service to loadbalance back to - // themselves if they should try to access their own Service. Values: - // "promiscuous-bridge": make the container bridge promiscuous. - // "hairpin-veth": set the hairpin flag on container veth interfaces. - // "none": do nothing. - // Generally, one must set --hairpin-mode=veth-flag to achieve hairpin NAT, - // because promiscous-bridge assumes the existence of a container bridge named cbr0. - HairpinMode string `json:"hairpinMode"` - // The node has babysitter process monitoring docker and kubelet. - BabysitDaemons bool `json:"babysitDaemons"` - // maxPods is the number of pods that can run on this Kubelet. - MaxPods int32 `json:"maxPods"` - // nvidiaGPUs is the number of NVIDIA GPU devices on this node. - NvidiaGPUs int32 `json:"nvidiaGPUs"` - // dockerExecHandlerName is the handler to use when executing a command - // in a container. Valid values are 'native' and 'nsenter'. Defaults to - // 'native'. - DockerExecHandlerName string `json:"dockerExecHandlerName"` - // The CIDR to use for pod IP addresses, only used in standalone mode. - // In cluster mode, this is obtained from the master. - PodCIDR string `json:"podCIDR"` - // ResolverConfig is the resolver configuration file used as the basis - // for the container DNS resolution configuration."), [] - ResolverConfig string `json:"resolvConf"` - // cpuCFSQuota is Enable CPU CFS quota enforcement for containers that - // specify CPU limits - CPUCFSQuota bool `json:"cpuCFSQuota"` - // containerized should be set to true if kubelet is running in a container. - Containerized bool `json:"containerized"` - // maxOpenFiles is Number of files that can be opened by Kubelet process. - MaxOpenFiles int64 `json:"maxOpenFiles"` - // reconcileCIDR is Reconcile node CIDR with the CIDR specified by the - // API server. Won't have any effect if register-node is false. - ReconcileCIDR bool `json:"reconcileCIDR"` - // registerSchedulable tells the kubelet to register the node as - // schedulable. Won't have any effect if register-node is false. - RegisterSchedulable bool `json:"registerSchedulable"` - // contentType is contentType of requests sent to apiserver. - ContentType string `json:"contentType"` - // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver - KubeAPIQPS int32 `json:"kubeAPIQPS"` - // kubeAPIBurst is the burst to allow while talking with kubernetes - // apiserver - KubeAPIBurst int32 `json:"kubeAPIBurst"` - // serializeImagePulls when enabled, tells the Kubelet to pull images one - // at a time. We recommend *not* changing the default value on nodes that - // run docker daemon with version < 1.9 or an Aufs storage backend. - // Issue #10959 has more details. - SerializeImagePulls bool `json:"serializeImagePulls"` - // outOfDiskTransitionFrequency is duration for which the kubelet has to - // wait before transitioning out of out-of-disk node condition status. - // +optional - OutOfDiskTransitionFrequency unversioned.Duration `json:"outOfDiskTransitionFrequency,omitempty"` - // nodeIP is IP address of the node. If set, kubelet will use this IP - // address for the node. - // +optional - NodeIP string `json:"nodeIP,omitempty"` - // nodeLabels to add when registering the node in the cluster. - NodeLabels map[string]string `json:"nodeLabels"` - // nonMasqueradeCIDR configures masquerading: traffic to IPs outside this range will use IP masquerade. - NonMasqueradeCIDR string `json:"nonMasqueradeCIDR"` - // enable gathering custom metrics. - EnableCustomMetrics bool `json:"enableCustomMetrics"` - // Comma-delimited list of hard eviction expressions. For example, 'memory.available<300Mi'. - // +optional - EvictionHard string `json:"evictionHard,omitempty"` - // Comma-delimited list of soft eviction expressions. For example, 'memory.available<300Mi'. - // +optional - EvictionSoft string `json:"evictionSoft,omitempty"` - // Comma-delimeted list of grace periods for each soft eviction signal. For example, 'memory.available=30s'. - // +optional - EvictionSoftGracePeriod string `json:"evictionSoftGracePeriod,omitempty"` - // Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. - // +optional - EvictionPressureTransitionPeriod unversioned.Duration `json:"evictionPressureTransitionPeriod,omitempty"` - // Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. - // +optional - EvictionMaxPodGracePeriod int32 `json:"evictionMaxPodGracePeriod,omitempty"` - // Comma-delimited list of minimum reclaims (e.g. imagefs.available=2Gi) that describes the minimum amount of resource the kubelet will reclaim when performing a pod eviction if that resource is under pressure. - // +optional - EvictionMinimumReclaim string `json:"evictionMinimumReclaim,omitempty"` - // If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling. - // +optional - ExperimentalKernelMemcgNotification bool `json:"experimentalKernelMemcgNotification"` - // Maximum number of pods per core. Cannot exceed MaxPods - PodsPerCore int32 `json:"podsPerCore"` - // enableControllerAttachDetach enables the Attach/Detach controller to - // manage attachment/detachment of volumes scheduled to this node, and - // disables kubelet from executing any attach/detach operations - EnableControllerAttachDetach bool `json:"enableControllerAttachDetach"` - // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs - // that describe resources reserved for non-kubernetes components. - // Currently only cpu and memory are supported. [default=none] - // See http://kubernetes.io/docs/user-guide/compute-resources for more detail. - SystemReserved utilconfig.ConfigurationMap `json:"systemReserved"` - // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs - // that describe resources reserved for kubernetes system components. - // Currently only cpu and memory are supported. [default=none] - // See http://kubernetes.io/docs/user-guide/compute-resources for more detail. - KubeReserved utilconfig.ConfigurationMap `json:"kubeReserved"` - // Default behaviour for kernel tuning - ProtectKernelDefaults bool `json:"protectKernelDefaults"` - // If true, Kubelet ensures a set of iptables rules are present on host. - // These rules will serve as utility for various components, e.g. kube-proxy. - // The rules will be created based on IPTablesMasqueradeBit and IPTablesDropBit. - MakeIPTablesUtilChains bool `json:"makeIPTablesUtilChains"` - // iptablesMasqueradeBit is the bit of the iptables fwmark space to use for SNAT - // Values must be within the range [0, 31]. - // Warning: Please match the value of corresponding parameter in kube-proxy - // TODO: clean up IPTablesMasqueradeBit in kube-proxy - IPTablesMasqueradeBit int32 `json:"iptablesMasqueradeBit"` - // iptablesDropBit is the bit of the iptables fwmark space to use for dropping packets. Kubelet will ensure iptables mark and drop rules. - // Values must be within the range [0, 31]. Must be different from IPTablesMasqueradeBit - IPTablesDropBit int32 `json:"iptablesDropBit"` - // Whitelist of unsafe sysctls or sysctl patterns (ending in *). - // +optional - AllowedUnsafeSysctls []string `json:"experimentalAllowedUnsafeSysctls,omitempty"` - // featureGates is a string of comma-separated key=value pairs that describe feature - // gates for alpha/experimental features. - FeatureGates string `json:"featureGates"` - // Enable Container Runtime Interface (CRI) integration. - // +optional - EnableCRI bool `json:"enableCRI,omitempty"` - // TODO(#34726:1.8.0): Remove the opt-in for failing when swap is enabled. - // Tells the Kubelet to fail to start if swap is enabled on the node. - ExperimentalFailSwapOn bool `json:"experimentalFailSwapOn,omitempty"` - // This flag, if set, enables a check prior to mount operations to verify that the required components - // (binaries, etc.) to mount the volume are available on the underlying node. If the check is enabled - // and fails the mount operation fails. - ExperimentalCheckNodeCapabilitiesBeforeMount bool `json:"ExperimentalCheckNodeCapabilitiesBeforeMount,omitempty"` -} - -type KubeletAuthorizationMode string - -const ( - // KubeletAuthorizationModeAlwaysAllow authorizes all authenticated requests - KubeletAuthorizationModeAlwaysAllow KubeletAuthorizationMode = "AlwaysAllow" - // KubeletAuthorizationModeWebhook uses the SubjectAccessReview API to determine authorization - KubeletAuthorizationModeWebhook KubeletAuthorizationMode = "Webhook" -) - -type KubeletAuthorization struct { - // mode is the authorization mode to apply to requests to the kubelet server. - // Valid values are AlwaysAllow and Webhook. - // Webhook mode uses the SubjectAccessReview API to determine authorization. - Mode KubeletAuthorizationMode `json:"mode"` - - // webhook contains settings related to Webhook authorization. - Webhook KubeletWebhookAuthorization `json:"webhook"` -} - -type KubeletWebhookAuthorization struct { - // cacheAuthorizedTTL is the duration to cache 'authorized' responses from the webhook authorizer. - CacheAuthorizedTTL unversioned.Duration `json:"cacheAuthorizedTTL"` - // cacheUnauthorizedTTL is the duration to cache 'unauthorized' responses from the webhook authorizer. - CacheUnauthorizedTTL unversioned.Duration `json:"cacheUnauthorizedTTL"` -} - -type KubeletAuthentication struct { - // x509 contains settings related to x509 client certificate authentication - X509 KubeletX509Authentication `json:"x509"` - // webhook contains settings related to webhook bearer token authentication - Webhook KubeletWebhookAuthentication `json:"webhook"` - // anonymous contains settings related to anonymous authentication - Anonymous KubeletAnonymousAuthentication `json:"anonymous"` -} - -type KubeletX509Authentication struct { - // clientCAFile is the path to a PEM-encoded certificate bundle. If set, any request presenting a client certificate - // signed by one of the authorities in the bundle is authenticated with a username corresponding to the CommonName, - // and groups corresponding to the Organization in the client certificate. - ClientCAFile string `json:"clientCAFile"` -} - -type KubeletWebhookAuthentication struct { - // enabled allows bearer token authentication backed by the tokenreviews.authentication.k8s.io API - Enabled bool `json:"enabled"` - // cacheTTL enables caching of authentication results - CacheTTL unversioned.Duration `json:"cacheTTL"` -} - -type KubeletAnonymousAuthentication struct { - // enabled allows anonymous requests to the kubelet server. - // Requests that are not rejected by another authentication method are treated as anonymous requests. - // Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. - Enabled bool `json:"enabled"` -} - -type KubeSchedulerConfiguration struct { - unversioned.TypeMeta - - // port is the port that the scheduler's http service runs on. - Port int32 `json:"port"` - // address is the IP address to serve on. - Address string `json:"address"` - // algorithmProvider is the scheduling algorithm provider to use. - AlgorithmProvider string `json:"algorithmProvider"` - // policyConfigFile is the filepath to the scheduler policy configuration. - PolicyConfigFile string `json:"policyConfigFile"` - // enableProfiling enables profiling via web interface. - EnableProfiling bool `json:"enableProfiling"` - // contentType is contentType of requests sent to apiserver. - ContentType string `json:"contentType"` - // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. - KubeAPIQPS float32 `json:"kubeAPIQPS"` - // kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver. - KubeAPIBurst int32 `json:"kubeAPIBurst"` - // schedulerName is name of the scheduler, used to select which pods - // will be processed by this scheduler, based on pod's annotation with - // key 'scheduler.alpha.kubernetes.io/name'. - SchedulerName string `json:"schedulerName"` - // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule - // corresponding to every RequiredDuringScheduling affinity rule. - // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. - HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"` - // Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity. - FailureDomains string `json:"failureDomains"` - // leaderElection defines the configuration of leader election client. - LeaderElection LeaderElectionConfiguration `json:"leaderElection"` -} - -// LeaderElectionConfiguration defines the configuration of leader election -// clients for components that can run with leader election enabled. -type LeaderElectionConfiguration struct { - // leaderElect enables a leader election client to gain leadership - // before executing the main loop. Enable this when running replicated - // components for high availability. - LeaderElect bool `json:"leaderElect"` - // leaseDuration is the duration that non-leader candidates will wait - // after observing a leadership renewal until attempting to acquire - // leadership of a led but unrenewed leader slot. This is effectively the - // maximum duration that a leader can be stopped before it is replaced - // by another candidate. This is only applicable if leader election is - // enabled. - LeaseDuration unversioned.Duration `json:"leaseDuration"` - // renewDeadline is the interval between attempts by the acting master to - // renew a leadership slot before it stops leading. This must be less - // than or equal to the lease duration. This is only applicable if leader - // election is enabled. - RenewDeadline unversioned.Duration `json:"renewDeadline"` - // retryPeriod is the duration the clients should wait between attempting - // acquisition and renewal of a leadership. This is only applicable if - // leader election is enabled. - RetryPeriod unversioned.Duration `json:"retryPeriod"` -} - -type KubeControllerManagerConfiguration struct { - unversioned.TypeMeta - - // port is the port that the controller-manager's http service runs on. - Port int32 `json:"port"` - // address is the IP address to serve on (set to 0.0.0.0 for all interfaces). - Address string `json:"address"` - // useServiceAccountCredentials indicates whether controllers should be run with - // individual service account credentials. - UseServiceAccountCredentials bool `json:"useServiceAccountCredentials"` - // cloudProvider is the provider for cloud services. - CloudProvider string `json:"cloudProvider"` - // cloudConfigFile is the path to the cloud provider configuration file. - CloudConfigFile string `json:"cloudConfigFile"` - // concurrentEndpointSyncs is the number of endpoint syncing operations - // that will be done concurrently. Larger number = faster endpoint updating, - // but more CPU (and network) load. - ConcurrentEndpointSyncs int32 `json:"concurrentEndpointSyncs"` - // concurrentRSSyncs is the number of replica sets that are allowed to sync - // concurrently. Larger number = more responsive replica management, but more - // CPU (and network) load. - ConcurrentRSSyncs int32 `json:"concurrentRSSyncs"` - // concurrentRCSyncs is the number of replication controllers that are - // allowed to sync concurrently. Larger number = more responsive replica - // management, but more CPU (and network) load. - ConcurrentRCSyncs int32 `json:"concurrentRCSyncs"` - // concurrentServiceSyncs is the number of services that are - // allowed to sync concurrently. Larger number = more responsive service - // management, but more CPU (and network) load. - ConcurrentServiceSyncs int32 `json:"concurrentServiceSyncs"` - // concurrentResourceQuotaSyncs is the number of resource quotas that are - // allowed to sync concurrently. Larger number = more responsive quota - // management, but more CPU (and network) load. - ConcurrentResourceQuotaSyncs int32 `json:"concurrentResourceQuotaSyncs"` - // concurrentDeploymentSyncs is the number of deployment objects that are - // allowed to sync concurrently. Larger number = more responsive deployments, - // but more CPU (and network) load. - ConcurrentDeploymentSyncs int32 `json:"concurrentDeploymentSyncs"` - // concurrentDaemonSetSyncs is the number of daemonset objects that are - // allowed to sync concurrently. Larger number = more responsive daemonset, - // but more CPU (and network) load. - ConcurrentDaemonSetSyncs int32 `json:"concurrentDaemonSetSyncs"` - // concurrentJobSyncs is the number of job objects that are - // allowed to sync concurrently. Larger number = more responsive jobs, - // but more CPU (and network) load. - ConcurrentJobSyncs int32 `json:"concurrentJobSyncs"` - // concurrentNamespaceSyncs is the number of namespace objects that are - // allowed to sync concurrently. - ConcurrentNamespaceSyncs int32 `json:"concurrentNamespaceSyncs"` - // concurrentSATokenSyncs is the number of service account token syncing operations - // that will be done concurrently. - ConcurrentSATokenSyncs int32 `json:"concurrentSATokenSyncs"` - // lookupCacheSizeForRC is the size of lookup cache for replication controllers. - // Larger number = more responsive replica management, but more MEM load. - LookupCacheSizeForRC int32 `json:"lookupCacheSizeForRC"` - // lookupCacheSizeForRS is the size of lookup cache for replicatsets. - // Larger number = more responsive replica management, but more MEM load. - LookupCacheSizeForRS int32 `json:"lookupCacheSizeForRS"` - // lookupCacheSizeForDaemonSet is the size of lookup cache for daemonsets. - // Larger number = more responsive daemonset, but more MEM load. - LookupCacheSizeForDaemonSet int32 `json:"lookupCacheSizeForDaemonSet"` - // serviceSyncPeriod is the period for syncing services with their external - // load balancers. - ServiceSyncPeriod unversioned.Duration `json:"serviceSyncPeriod"` - // nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer - // periods will result in fewer calls to cloud provider, but may delay addition - // of new nodes to cluster. - NodeSyncPeriod unversioned.Duration `json:"nodeSyncPeriod"` - // routeReconciliationPeriod is the period for reconciling routes created for Nodes by cloud provider.. - RouteReconciliationPeriod unversioned.Duration `json:"routeReconciliationPeriod"` - // resourceQuotaSyncPeriod is the period for syncing quota usage status - // in the system. - ResourceQuotaSyncPeriod unversioned.Duration `json:"resourceQuotaSyncPeriod"` - // namespaceSyncPeriod is the period for syncing namespace life-cycle - // updates. - NamespaceSyncPeriod unversioned.Duration `json:"namespaceSyncPeriod"` - // pvClaimBinderSyncPeriod is the period for syncing persistent volumes - // and persistent volume claims. - PVClaimBinderSyncPeriod unversioned.Duration `json:"pvClaimBinderSyncPeriod"` - // minResyncPeriod is the resync period in reflectors; will be random between - // minResyncPeriod and 2*minResyncPeriod. - MinResyncPeriod unversioned.Duration `json:"minResyncPeriod"` - // terminatedPodGCThreshold is the number of terminated pods that can exist - // before the terminated pod garbage collector starts deleting terminated pods. - // If <= 0, the terminated pod garbage collector is disabled. - TerminatedPodGCThreshold int32 `json:"terminatedPodGCThreshold"` - // horizontalPodAutoscalerSyncPeriod is the period for syncing the number of - // pods in horizontal pod autoscaler. - HorizontalPodAutoscalerSyncPeriod unversioned.Duration `json:"horizontalPodAutoscalerSyncPeriod"` - // deploymentControllerSyncPeriod is the period for syncing the deployments. - DeploymentControllerSyncPeriod unversioned.Duration `json:"deploymentControllerSyncPeriod"` - // podEvictionTimeout is the grace period for deleting pods on failed nodes. - PodEvictionTimeout unversioned.Duration `json:"podEvictionTimeout"` - // DEPRECATED: deletingPodsQps is the number of nodes per second on which pods are deleted in - // case of node failure. - DeletingPodsQps float32 `json:"deletingPodsQps"` - // DEPRECATED: deletingPodsBurst is the number of nodes on which pods are bursty deleted in - // case of node failure. For more details look into RateLimiter. - DeletingPodsBurst int32 `json:"deletingPodsBurst"` - // nodeMontiorGracePeriod is the amount of time which we allow a running node to be - // unresponsive before marking it unhealthy. Must be N times more than kubelet's - // nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet - // to post node status. - NodeMonitorGracePeriod unversioned.Duration `json:"nodeMonitorGracePeriod"` - // registerRetryCount is the number of retries for initial node registration. - // Retry interval equals node-sync-period. - RegisterRetryCount int32 `json:"registerRetryCount"` - // nodeStartupGracePeriod is the amount of time which we allow starting a node to - // be unresponsive before marking it unhealthy. - NodeStartupGracePeriod unversioned.Duration `json:"nodeStartupGracePeriod"` - // nodeMonitorPeriod is the period for syncing NodeStatus in NodeController. - NodeMonitorPeriod unversioned.Duration `json:"nodeMonitorPeriod"` - // serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key - // used to sign service account tokens. - ServiceAccountKeyFile string `json:"serviceAccountKeyFile"` - // clusterSigningCertFile is the filename containing a PEM-encoded - // X509 CA certificate used to issue cluster-scoped certificates - ClusterSigningCertFile string `json:"clusterSigningCertFile"` - // clusterSigningCertFile is the filename containing a PEM-encoded - // RSA or ECDSA private key used to issue cluster-scoped certificates - ClusterSigningKeyFile string `json:"clusterSigningKeyFile"` - // approveAllKubeletCSRs tells the CSR controller to approve all CSRs originating - // from the kubelet bootstrapping group automatically. - // WARNING: this grants all users with access to the certificates API group - // the ability to create credentials for any user that has access to the boostrapping - // user's credentials. - ApproveAllKubeletCSRsForGroup string `json:"approveAllKubeletCSRsForGroup"` - // enableProfiling enables profiling via web interface host:port/debug/pprof/ - EnableProfiling bool `json:"enableProfiling"` - // clusterName is the instance prefix for the cluster. - ClusterName string `json:"clusterName"` - // clusterCIDR is CIDR Range for Pods in cluster. - ClusterCIDR string `json:"clusterCIDR"` - // serviceCIDR is CIDR Range for Services in cluster. - ServiceCIDR string `json:"serviceCIDR"` - // NodeCIDRMaskSize is the mask size for node cidr in cluster. - NodeCIDRMaskSize int32 `json:"nodeCIDRMaskSize"` - // allocateNodeCIDRs enables CIDRs for Pods to be allocated and, if - // ConfigureCloudRoutes is true, to be set on the cloud provider. - AllocateNodeCIDRs bool `json:"allocateNodeCIDRs"` - // configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs - // to be configured on the cloud provider. - ConfigureCloudRoutes bool `json:"configureCloudRoutes"` - // rootCAFile is the root certificate authority will be included in service - // account's token secret. This must be a valid PEM-encoded CA bundle. - RootCAFile string `json:"rootCAFile"` - // contentType is contentType of requests sent to apiserver. - ContentType string `json:"contentType"` - // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. - KubeAPIQPS float32 `json:"kubeAPIQPS"` - // kubeAPIBurst is the burst to use while talking with kubernetes apiserver. - KubeAPIBurst int32 `json:"kubeAPIBurst"` - // leaderElection defines the configuration of leader election client. - LeaderElection LeaderElectionConfiguration `json:"leaderElection"` - // volumeConfiguration holds configuration for volume related features. - VolumeConfiguration VolumeConfiguration `json:"volumeConfiguration"` - // How long to wait between starting controller managers - ControllerStartInterval unversioned.Duration `json:"controllerStartInterval"` - // enables the generic garbage collector. MUST be synced with the - // corresponding flag of the kube-apiserver. WARNING: the generic garbage - // collector is an alpha feature. - EnableGarbageCollector bool `json:"enableGarbageCollector"` - // concurrentGCSyncs is the number of garbage collector workers that are - // allowed to sync concurrently. - ConcurrentGCSyncs int32 `json:"concurrentGCSyncs"` - // nodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is healthy - NodeEvictionRate float32 `json:"nodeEvictionRate"` - // secondaryNodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is unhealty - SecondaryNodeEvictionRate float32 `json:"secondaryNodeEvictionRate"` - // secondaryNodeEvictionRate is implicitly overridden to 0 for clusters smaller than or equal to largeClusterSizeThreshold - LargeClusterSizeThreshold int32 `json:"largeClusterSizeThreshold"` - // Zone is treated as unhealthy in nodeEvictionRate and secondaryNodeEvictionRate when at least - // unhealthyZoneThreshold (no less than 3) of Nodes in the zone are NotReady - UnhealthyZoneThreshold float32 `json:"unhealthyZoneThreshold"` -} - -// VolumeConfiguration contains *all* enumerated flags meant to configure all volume -// plugins. From this config, the controller-manager binary will create many instances of -// volume.VolumeConfig, each containing only the configuration needed for that plugin which -// are then passed to the appropriate plugin. The ControllerManager binary is the only part -// of the code which knows what plugins are supported and which flags correspond to each plugin. -type VolumeConfiguration struct { - // enableHostPathProvisioning enables HostPath PV provisioning when running without a - // cloud provider. This allows testing and development of provisioning features. HostPath - // provisioning is not supported in any way, won't work in a multi-node cluster, and - // should not be used for anything other than testing or development. - EnableHostPathProvisioning bool `json:"enableHostPathProvisioning"` - // enableDynamicProvisioning enables the provisioning of volumes when running within an environment - // that supports dynamic provisioning. Defaults to true. - EnableDynamicProvisioning bool `json:"enableDynamicProvisioning"` - // persistentVolumeRecyclerConfiguration holds configuration for persistent volume plugins. - PersistentVolumeRecyclerConfiguration PersistentVolumeRecyclerConfiguration `json:"persitentVolumeRecyclerConfiguration"` - // volumePluginDir is the full path of the directory in which the flex - // volume plugin should search for additional third party volume plugins - FlexVolumePluginDir string `json:"flexVolumePluginDir"` -} - -type PersistentVolumeRecyclerConfiguration struct { - // maximumRetry is number of retries the PV recycler will execute on failure to recycle - // PV. - MaximumRetry int32 `json:"maximumRetry"` - // minimumTimeoutNFS is the minimum ActiveDeadlineSeconds to use for an NFS Recycler - // pod. - MinimumTimeoutNFS int32 `json:"minimumTimeoutNFS"` - // podTemplateFilePathNFS is the file path to a pod definition used as a template for - // NFS persistent volume recycling - PodTemplateFilePathNFS string `json:"podTemplateFilePathNFS"` - // incrementTimeoutNFS is the increment of time added per Gi to ActiveDeadlineSeconds - // for an NFS scrubber pod. - IncrementTimeoutNFS int32 `json:"incrementTimeoutNFS"` - // podTemplateFilePathHostPath is the file path to a pod definition used as a template for - // HostPath persistent volume recycling. This is for development and testing only and - // will not work in a multi-node cluster. - PodTemplateFilePathHostPath string `json:"podTemplateFilePathHostPath"` - // minimumTimeoutHostPath is the minimum ActiveDeadlineSeconds to use for a HostPath - // Recycler pod. This is for development and testing only and will not work in a multi-node - // cluster. - MinimumTimeoutHostPath int32 `json:"minimumTimeoutHostPath"` - // incrementTimeoutHostPath is the increment of time added per Gi to ActiveDeadlineSeconds - // for a HostPath scrubber pod. This is for development and testing only and will not work - // in a multi-node cluster. - IncrementTimeoutHostPath int32 `json:"incrementTimeoutHostPath"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/BUILD deleted file mode 100644 index c8edcb54b1..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/BUILD +++ /dev/null @@ -1,36 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "defaults.go", - "doc.go", - "register.go", - "types.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apis/componentconfig:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/kubelet/qos:go_default_library", - "//pkg/kubelet/types:go_default_library", - "//pkg/master/ports:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/config:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go deleted file mode 100644 index 7cf8834b44..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go +++ /dev/null @@ -1,422 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "path/filepath" - "runtime" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/kubelet/qos" - kubetypes "k8s.io/kubernetes/pkg/kubelet/types" - "k8s.io/kubernetes/pkg/master/ports" - kruntime "k8s.io/kubernetes/pkg/runtime" -) - -const ( - defaultRootDir = "/var/lib/kubelet" - - // When these values are updated, also update test/e2e/framework/util.go - defaultPodInfraContainerImageName = "gcr.io/google_containers/pause" - defaultPodInfraContainerImageVersion = "3.0" - defaultPodInfraContainerImage = defaultPodInfraContainerImageName + - "-" + runtime.GOARCH + ":" + - defaultPodInfraContainerImageVersion - - // From pkg/kubelet/rkt/rkt.go to avoid circular import - defaultRktAPIServiceEndpoint = "localhost:15441" - - AutoDetectCloudProvider = "auto-detect" - - defaultIPTablesMasqueradeBit = 14 - defaultIPTablesDropBit = 15 -) - -var zeroDuration = unversioned.Duration{} - -func addDefaultingFuncs(scheme *kruntime.Scheme) error { - RegisterDefaults(scheme) - return scheme.AddDefaultingFuncs( - SetDefaults_KubeProxyConfiguration, - SetDefaults_KubeSchedulerConfiguration, - SetDefaults_LeaderElectionConfiguration, - SetDefaults_KubeletConfiguration, - ) -} - -func SetDefaults_KubeProxyConfiguration(obj *KubeProxyConfiguration) { - if obj.BindAddress == "" { - obj.BindAddress = "0.0.0.0" - } - if obj.HealthzPort == 0 { - obj.HealthzPort = 10249 - } - if obj.HealthzBindAddress == "" { - obj.HealthzBindAddress = "127.0.0.1" - } - if obj.OOMScoreAdj == nil { - temp := int32(qos.KubeProxyOOMScoreAdj) - obj.OOMScoreAdj = &temp - } - if obj.ResourceContainer == "" { - obj.ResourceContainer = "/kube-proxy" - } - if obj.IPTablesSyncPeriod.Duration == 0 { - obj.IPTablesSyncPeriod = unversioned.Duration{Duration: 30 * time.Second} - } - zero := unversioned.Duration{} - if obj.UDPIdleTimeout == zero { - obj.UDPIdleTimeout = unversioned.Duration{Duration: 250 * time.Millisecond} - } - // If ConntrackMax is set, respect it. - if obj.ConntrackMax == 0 { - // If ConntrackMax is *not* set, use per-core scaling. - if obj.ConntrackMaxPerCore == 0 { - obj.ConntrackMaxPerCore = 32 * 1024 - } - if obj.ConntrackMin == 0 { - obj.ConntrackMin = 128 * 1024 - } - } - if obj.IPTablesMasqueradeBit == nil { - temp := int32(14) - obj.IPTablesMasqueradeBit = &temp - } - if obj.ConntrackTCPEstablishedTimeout == zero { - obj.ConntrackTCPEstablishedTimeout = unversioned.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default) - } - if obj.ConntrackTCPCloseWaitTimeout == zero { - // See https://github.com/kubernetes/kubernetes/issues/32551. - // - // CLOSE_WAIT conntrack state occurs when the the Linux kernel - // sees a FIN from the remote server. Note: this is a half-close - // condition that persists as long as the local side keeps the - // socket open. The condition is rare as it is typical in most - // protocols for both sides to issue a close; this typically - // occurs when the local socket is lazily garbage collected. - // - // If the CLOSE_WAIT conntrack entry expires, then FINs from the - // local socket will not be properly SNAT'd and will not reach the - // remote server (if the connection was subject to SNAT). If the - // remote timeouts for FIN_WAIT* states exceed the CLOSE_WAIT - // timeout, then there will be an inconsistency in the state of - // the connection and a new connection reusing the SNAT (src, - // port) pair may be rejected by the remote side with RST. This - // can cause new calls to connect(2) to return with ECONNREFUSED. - // - // We set CLOSE_WAIT to one hour by default to better match - // typical server timeouts. - obj.ConntrackTCPCloseWaitTimeout = unversioned.Duration{Duration: 1 * time.Hour} - } -} - -func SetDefaults_KubeSchedulerConfiguration(obj *KubeSchedulerConfiguration) { - if obj.Port == 0 { - obj.Port = ports.SchedulerPort - } - if obj.Address == "" { - obj.Address = "0.0.0.0" - } - if obj.AlgorithmProvider == "" { - obj.AlgorithmProvider = "DefaultProvider" - } - if obj.ContentType == "" { - obj.ContentType = "application/vnd.kubernetes.protobuf" - } - if obj.KubeAPIQPS == 0 { - obj.KubeAPIQPS = 50.0 - } - if obj.KubeAPIBurst == 0 { - obj.KubeAPIBurst = 100 - } - if obj.SchedulerName == "" { - obj.SchedulerName = api.DefaultSchedulerName - } - if obj.HardPodAffinitySymmetricWeight == 0 { - obj.HardPodAffinitySymmetricWeight = api.DefaultHardPodAffinitySymmetricWeight - } - if obj.FailureDomains == "" { - obj.FailureDomains = api.DefaultFailureDomains - } -} - -func SetDefaults_LeaderElectionConfiguration(obj *LeaderElectionConfiguration) { - zero := unversioned.Duration{} - if obj.LeaseDuration == zero { - obj.LeaseDuration = unversioned.Duration{Duration: 15 * time.Second} - } - if obj.RenewDeadline == zero { - obj.RenewDeadline = unversioned.Duration{Duration: 10 * time.Second} - } - if obj.RetryPeriod == zero { - obj.RetryPeriod = unversioned.Duration{Duration: 2 * time.Second} - } -} - -func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) { - if obj.Authentication.Anonymous.Enabled == nil { - obj.Authentication.Anonymous.Enabled = boolVar(true) - } - if obj.Authentication.Webhook.Enabled == nil { - obj.Authentication.Webhook.Enabled = boolVar(false) - } - if obj.Authentication.Webhook.CacheTTL == zeroDuration { - obj.Authentication.Webhook.CacheTTL = unversioned.Duration{Duration: 2 * time.Minute} - } - if obj.Authorization.Mode == "" { - obj.Authorization.Mode = KubeletAuthorizationModeAlwaysAllow - } - if obj.Authorization.Webhook.CacheAuthorizedTTL == zeroDuration { - obj.Authorization.Webhook.CacheAuthorizedTTL = unversioned.Duration{Duration: 5 * time.Minute} - } - if obj.Authorization.Webhook.CacheUnauthorizedTTL == zeroDuration { - obj.Authorization.Webhook.CacheUnauthorizedTTL = unversioned.Duration{Duration: 30 * time.Second} - } - - if obj.Address == "" { - obj.Address = "0.0.0.0" - } - if obj.CloudProvider == "" { - obj.CloudProvider = AutoDetectCloudProvider - } - if obj.CAdvisorPort == 0 { - obj.CAdvisorPort = 4194 - } - if obj.VolumeStatsAggPeriod == zeroDuration { - obj.VolumeStatsAggPeriod = unversioned.Duration{Duration: time.Minute} - } - if obj.CertDirectory == "" { - obj.CertDirectory = "/var/run/kubernetes" - } - if obj.ExperimentalCgroupsPerQOS == nil { - obj.ExperimentalCgroupsPerQOS = boolVar(false) - } - if obj.ContainerRuntime == "" { - obj.ContainerRuntime = "docker" - } - if obj.RuntimeRequestTimeout == zeroDuration { - obj.RuntimeRequestTimeout = unversioned.Duration{Duration: 2 * time.Minute} - } - if obj.CPUCFSQuota == nil { - obj.CPUCFSQuota = boolVar(true) - } - if obj.DockerExecHandlerName == "" { - obj.DockerExecHandlerName = "native" - } - if obj.DockerEndpoint == "" && runtime.GOOS != "windows" { - obj.DockerEndpoint = "unix:///var/run/docker.sock" - } - if obj.EventBurst == 0 { - obj.EventBurst = 10 - } - if obj.EventRecordQPS == nil { - temp := int32(5) - obj.EventRecordQPS = &temp - } - if obj.EnableControllerAttachDetach == nil { - obj.EnableControllerAttachDetach = boolVar(true) - } - if obj.EnableDebuggingHandlers == nil { - obj.EnableDebuggingHandlers = boolVar(true) - } - if obj.EnableServer == nil { - obj.EnableServer = boolVar(true) - } - if obj.FileCheckFrequency == zeroDuration { - obj.FileCheckFrequency = unversioned.Duration{Duration: 20 * time.Second} - } - if obj.HealthzBindAddress == "" { - obj.HealthzBindAddress = "127.0.0.1" - } - if obj.HealthzPort == 0 { - obj.HealthzPort = 10248 - } - if obj.HostNetworkSources == nil { - obj.HostNetworkSources = []string{kubetypes.AllSource} - } - if obj.HostPIDSources == nil { - obj.HostPIDSources = []string{kubetypes.AllSource} - } - if obj.HostIPCSources == nil { - obj.HostIPCSources = []string{kubetypes.AllSource} - } - if obj.HTTPCheckFrequency == zeroDuration { - obj.HTTPCheckFrequency = unversioned.Duration{Duration: 20 * time.Second} - } - if obj.ImageMinimumGCAge == zeroDuration { - obj.ImageMinimumGCAge = unversioned.Duration{Duration: 2 * time.Minute} - } - if obj.ImageGCHighThresholdPercent == nil { - temp := int32(90) - obj.ImageGCHighThresholdPercent = &temp - } - if obj.ImageGCLowThresholdPercent == nil { - temp := int32(80) - obj.ImageGCLowThresholdPercent = &temp - } - if obj.LowDiskSpaceThresholdMB == 0 { - obj.LowDiskSpaceThresholdMB = 256 - } - if obj.MasterServiceNamespace == "" { - obj.MasterServiceNamespace = api.NamespaceDefault - } - if obj.MaxContainerCount == nil { - temp := int32(-1) - obj.MaxContainerCount = &temp - } - if obj.MaxPerPodContainerCount == 0 { - obj.MaxPerPodContainerCount = 1 - } - if obj.MaxOpenFiles == 0 { - obj.MaxOpenFiles = 1000000 - } - if obj.MaxPods == 0 { - obj.MaxPods = 110 - } - if obj.MinimumGCAge == zeroDuration { - obj.MinimumGCAge = unversioned.Duration{Duration: 0} - } - if obj.NonMasqueradeCIDR == "" { - obj.NonMasqueradeCIDR = "10.0.0.0/8" - } - if obj.VolumePluginDir == "" { - obj.VolumePluginDir = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" - } - if obj.NodeStatusUpdateFrequency == zeroDuration { - obj.NodeStatusUpdateFrequency = unversioned.Duration{Duration: 10 * time.Second} - } - if obj.OOMScoreAdj == nil { - temp := int32(qos.KubeletOOMScoreAdj) - obj.OOMScoreAdj = &temp - } - if obj.PodInfraContainerImage == "" { - obj.PodInfraContainerImage = defaultPodInfraContainerImage - } - if obj.Port == 0 { - obj.Port = ports.KubeletPort - } - if obj.ReadOnlyPort == 0 { - obj.ReadOnlyPort = ports.KubeletReadOnlyPort - } - if obj.RegisterNode == nil { - obj.RegisterNode = boolVar(true) - } - if obj.RegisterSchedulable == nil { - obj.RegisterSchedulable = boolVar(true) - } - if obj.RegistryBurst == 0 { - obj.RegistryBurst = 10 - } - if obj.RegistryPullQPS == nil { - temp := int32(5) - obj.RegistryPullQPS = &temp - } - if obj.ResolverConfig == "" { - obj.ResolverConfig = kubetypes.ResolvConfDefault - } - if obj.RktAPIEndpoint == "" { - obj.RktAPIEndpoint = defaultRktAPIServiceEndpoint - } - if obj.RootDirectory == "" { - obj.RootDirectory = defaultRootDir - } - if obj.SerializeImagePulls == nil { - obj.SerializeImagePulls = boolVar(true) - } - if obj.SeccompProfileRoot == "" { - obj.SeccompProfileRoot = filepath.Join(defaultRootDir, "seccomp") - } - if obj.StreamingConnectionIdleTimeout == zeroDuration { - obj.StreamingConnectionIdleTimeout = unversioned.Duration{Duration: 4 * time.Hour} - } - if obj.SyncFrequency == zeroDuration { - obj.SyncFrequency = unversioned.Duration{Duration: 1 * time.Minute} - } - if obj.ReconcileCIDR == nil { - obj.ReconcileCIDR = boolVar(true) - } - if obj.ContentType == "" { - obj.ContentType = "application/vnd.kubernetes.protobuf" - } - if obj.KubeAPIQPS == nil { - temp := int32(5) - obj.KubeAPIQPS = &temp - } - if obj.KubeAPIBurst == 0 { - obj.KubeAPIBurst = 10 - } - if obj.OutOfDiskTransitionFrequency == zeroDuration { - obj.OutOfDiskTransitionFrequency = unversioned.Duration{Duration: 5 * time.Minute} - } - if string(obj.HairpinMode) == "" { - obj.HairpinMode = PromiscuousBridge - } - if obj.EvictionHard == nil { - temp := "memory.available<100Mi" - obj.EvictionHard = &temp - } - if obj.EvictionPressureTransitionPeriod == zeroDuration { - obj.EvictionPressureTransitionPeriod = unversioned.Duration{Duration: 5 * time.Minute} - } - if obj.ExperimentalKernelMemcgNotification == nil { - obj.ExperimentalKernelMemcgNotification = boolVar(false) - } - if obj.SystemReserved == nil { - obj.SystemReserved = make(map[string]string) - } - if obj.KubeReserved == nil { - obj.KubeReserved = make(map[string]string) - } - if obj.MakeIPTablesUtilChains == nil { - obj.MakeIPTablesUtilChains = boolVar(true) - } - if obj.IPTablesMasqueradeBit == nil { - temp := int32(defaultIPTablesMasqueradeBit) - obj.IPTablesMasqueradeBit = &temp - } - if obj.IPTablesDropBit == nil { - temp := int32(defaultIPTablesDropBit) - obj.IPTablesDropBit = &temp - } - if obj.ExperimentalCgroupsPerQOS == nil { - temp := false - obj.ExperimentalCgroupsPerQOS = &temp - } - if obj.CgroupDriver == "" { - obj.CgroupDriver = "cgroupfs" - } - // NOTE: this is for backwards compatibility with earlier releases where cgroup-root was optional. - // if cgroups per qos is not enabled, and cgroup-root is not specified, we need to default to the - // container runtime default and not default to the root cgroup. - if obj.ExperimentalCgroupsPerQOS != nil { - if *obj.ExperimentalCgroupsPerQOS { - if obj.CgroupRoot == "" { - obj.CgroupRoot = "/" - } - } - } -} - -func boolVar(b bool) *bool { - return &b -} - -var ( - defaultCfg = KubeletConfiguration{} -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go deleted file mode 100644 index 0ae15b67c8..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/componentconfig -// +k8s:openapi-gen=true -// +k8s:defaulter-gen=TypeMeta - -package v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/register.go deleted file mode 100644 index 7017d9e98b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/register.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// GroupName is the group name use in this package -const GroupName = "componentconfig" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) - AddToScheme = SchemeBuilder.AddToScheme -) - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &KubeProxyConfiguration{}, - &KubeSchedulerConfiguration{}, - &KubeletConfiguration{}, - ) - return nil -} - -func (obj *KubeProxyConfiguration) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *KubeSchedulerConfiguration) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *KubeletConfiguration) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go deleted file mode 100644 index 7c192a17bf..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go +++ /dev/null @@ -1,570 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import "k8s.io/kubernetes/pkg/api/unversioned" - -type KubeProxyConfiguration struct { - unversioned.TypeMeta - - // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 - // for all interfaces) - BindAddress string `json:"bindAddress"` - // clusterCIDR is the CIDR range of the pods in the cluster. It is used to - // bridge traffic coming from outside of the cluster. If not provided, - // no off-cluster bridging will be performed. - ClusterCIDR string `json:"clusterCIDR"` - // healthzBindAddress is the IP address for the health check server to serve on, - // defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces) - HealthzBindAddress string `json:"healthzBindAddress"` - // healthzPort is the port to bind the health check server. Use 0 to disable. - HealthzPort int32 `json:"healthzPort"` - // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname. - HostnameOverride string `json:"hostnameOverride"` - // iptablesMasqueradeBit is the bit of the iptables fwmark space to use for SNAT if using - // the pure iptables proxy mode. Values must be within the range [0, 31]. - IPTablesMasqueradeBit *int32 `json:"iptablesMasqueradeBit"` - // iptablesSyncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', - // '2h22m'). Must be greater than 0. - IPTablesSyncPeriod unversioned.Duration `json:"iptablesSyncPeriodSeconds"` - // iptablesMinSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m', - // '2h22m'). - IPTablesMinSyncPeriod unversioned.Duration `json:"iptablesMinSyncPeriodSeconds"` - // kubeconfigPath is the path to the kubeconfig file with authorization information (the - // master location is set by the master flag). - KubeconfigPath string `json:"kubeconfigPath"` - // masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. - MasqueradeAll bool `json:"masqueradeAll"` - // master is the address of the Kubernetes API server (overrides any value in kubeconfig) - Master string `json:"master"` - // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within - // the range [-1000, 1000] - OOMScoreAdj *int32 `json:"oomScoreAdj"` - // mode specifies which proxy mode to use. - Mode ProxyMode `json:"mode"` - // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed - // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen. - PortRange string `json:"portRange"` - // resourceContainer is the bsolute name of the resource-only container to create and run - // the Kube-proxy in (Default: /kube-proxy). - ResourceContainer string `json:"resourceContainer"` - // udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). - // Must be greater than 0. Only applicable for proxyMode=userspace. - UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"` - // conntrackMax is the maximum number of NAT connections to track (0 to - // leave as-is). This takes precedence over conntrackMaxPerCore and conntrackMin. - ConntrackMax int32 `json:"conntrackMax"` - // conntrackMaxPerCore is the maximum number of NAT connections to track - // per CPU core (0 to leave the limit as-is and ignore conntrackMin). - ConntrackMaxPerCore int32 `json:"conntrackMaxPerCore"` - // conntrackMin is the minimum value of connect-tracking records to allocate, - // regardless of conntrackMaxPerCore (set conntrackMaxPerCore=0 to leave the limit as-is). - ConntrackMin int32 `json:"conntrackMin"` - // conntrackTCPEstablishedTimeout is how long an idle TCP connection - // will be kept open (e.g. '2s'). Must be greater than 0. - ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"` - // conntrackTCPCloseWaitTimeout is how long an idle conntrack entry - // in CLOSE_WAIT state will remain in the conntrack - // table. (e.g. '60s'). Must be greater than 0 to set. - ConntrackTCPCloseWaitTimeout unversioned.Duration `json:"conntrackTCPCloseWaitTimeout"` -} - -// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables' -// (experimental). If blank, look at the Node object on the Kubernetes API and respect the -// 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the -// best-available proxy (currently userspace, but may change in future versions). If the -// iptables proxy is selected, regardless of how, but the system's kernel or iptables -// versions are insufficient, this always falls back to the userspace proxy. -type ProxyMode string - -const ( - ProxyModeUserspace ProxyMode = "userspace" - ProxyModeIPTables ProxyMode = "iptables" -) - -type KubeSchedulerConfiguration struct { - unversioned.TypeMeta - - // port is the port that the scheduler's http service runs on. - Port int `json:"port"` - // address is the IP address to serve on. - Address string `json:"address"` - // algorithmProvider is the scheduling algorithm provider to use. - AlgorithmProvider string `json:"algorithmProvider"` - // policyConfigFile is the filepath to the scheduler policy configuration. - PolicyConfigFile string `json:"policyConfigFile"` - // enableProfiling enables profiling via web interface. - EnableProfiling *bool `json:"enableProfiling"` - // contentType is contentType of requests sent to apiserver. - ContentType string `json:"contentType"` - // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. - KubeAPIQPS float32 `json:"kubeAPIQPS"` - // kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver. - KubeAPIBurst int `json:"kubeAPIBurst"` - // schedulerName is name of the scheduler, used to select which pods - // will be processed by this scheduler, based on pod's annotation with - // key 'scheduler.alpha.kubernetes.io/name'. - SchedulerName string `json:"schedulerName"` - // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule - // corresponding to every RequiredDuringScheduling affinity rule. - // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. - HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"` - // Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity. - FailureDomains string `json:"failureDomains"` - // leaderElection defines the configuration of leader election client. - LeaderElection LeaderElectionConfiguration `json:"leaderElection"` -} - -// HairpinMode denotes how the kubelet should configure networking to handle -// hairpin packets. -type HairpinMode string - -// Enum settings for different ways to handle hairpin packets. -const ( - // Set the hairpin flag on the veth of containers in the respective - // container runtime. - HairpinVeth = "hairpin-veth" - // Make the container bridge promiscuous. This will force it to accept - // hairpin packets, even if the flag isn't set on ports of the bridge. - PromiscuousBridge = "promiscuous-bridge" - // Neither of the above. If the kubelet is started in this hairpin mode - // and kube-proxy is running in iptables mode, hairpin packets will be - // dropped by the container bridge. - HairpinNone = "none" -) - -// LeaderElectionConfiguration defines the configuration of leader election -// clients for components that can run with leader election enabled. -type LeaderElectionConfiguration struct { - // leaderElect enables a leader election client to gain leadership - // before executing the main loop. Enable this when running replicated - // components for high availability. - LeaderElect *bool `json:"leaderElect"` - // leaseDuration is the duration that non-leader candidates will wait - // after observing a leadership renewal until attempting to acquire - // leadership of a led but unrenewed leader slot. This is effectively the - // maximum duration that a leader can be stopped before it is replaced - // by another candidate. This is only applicable if leader election is - // enabled. - LeaseDuration unversioned.Duration `json:"leaseDuration"` - // renewDeadline is the interval between attempts by the acting master to - // renew a leadership slot before it stops leading. This must be less - // than or equal to the lease duration. This is only applicable if leader - // election is enabled. - RenewDeadline unversioned.Duration `json:"renewDeadline"` - // retryPeriod is the duration the clients should wait between attempting - // acquisition and renewal of a leadership. This is only applicable if - // leader election is enabled. - RetryPeriod unversioned.Duration `json:"retryPeriod"` -} - -type KubeletConfiguration struct { - unversioned.TypeMeta - - // podManifestPath is the path to the directory containing pod manifests to - // run, or the path to a single manifest file - PodManifestPath string `json:"podManifestPath"` - // syncFrequency is the max period between synchronizing running - // containers and config - SyncFrequency unversioned.Duration `json:"syncFrequency"` - // fileCheckFrequency is the duration between checking config files for - // new data - FileCheckFrequency unversioned.Duration `json:"fileCheckFrequency"` - // httpCheckFrequency is the duration between checking http for new data - HTTPCheckFrequency unversioned.Duration `json:"httpCheckFrequency"` - // manifestURL is the URL for accessing the container manifest - ManifestURL string `json:"manifestURL"` - // manifestURLHeader is the HTTP header to use when accessing the manifest - // URL, with the key separated from the value with a ':', as in 'key:value' - ManifestURLHeader string `json:"manifestURLHeader"` - // enableServer enables the Kubelet's server - EnableServer *bool `json:"enableServer"` - // address is the IP address for the Kubelet to serve on (set to 0.0.0.0 - // for all interfaces) - Address string `json:"address"` - // port is the port for the Kubelet to serve on. - Port int32 `json:"port"` - // readOnlyPort is the read-only port for the Kubelet to serve on with - // no authentication/authorization (set to 0 to disable) - ReadOnlyPort int32 `json:"readOnlyPort"` - // tlsCertFile is the file containing x509 Certificate for HTTPS. (CA cert, - // if any, concatenated after server cert). If tlsCertFile and - // tlsPrivateKeyFile are not provided, a self-signed certificate - // and key are generated for the public address and saved to the directory - // passed to certDir. - TLSCertFile string `json:"tlsCertFile"` - // tlsPrivateKeyFile is the ile containing x509 private key matching - // tlsCertFile. - TLSPrivateKeyFile string `json:"tlsPrivateKeyFile"` - // certDirectory is the directory where the TLS certs are located (by - // default /var/run/kubernetes). If tlsCertFile and tlsPrivateKeyFile - // are provided, this flag will be ignored. - CertDirectory string `json:"certDirectory"` - // authentication specifies how requests to the Kubelet's server are authenticated - Authentication KubeletAuthentication `json:"authentication"` - // authorization specifies how requests to the Kubelet's server are authorized - Authorization KubeletAuthorization `json:"authorization"` - // hostnameOverride is the hostname used to identify the kubelet instead - // of the actual hostname. - HostnameOverride string `json:"hostnameOverride"` - // podInfraContainerImage is the image whose network/ipc namespaces - // containers in each pod will use. - PodInfraContainerImage string `json:"podInfraContainerImage"` - // dockerEndpoint is the path to the docker endpoint to communicate with. - DockerEndpoint string `json:"dockerEndpoint"` - // rootDirectory is the directory path to place kubelet files (volume - // mounts,etc). - RootDirectory string `json:"rootDirectory"` - // seccompProfileRoot is the directory path for seccomp profiles. - SeccompProfileRoot string `json:"seccompProfileRoot"` - // allowPrivileged enables containers to request privileged mode. - // Defaults to false. - AllowPrivileged *bool `json:"allowPrivileged"` - // hostNetworkSources is a comma-separated list of sources from which the - // Kubelet allows pods to use of host network. Defaults to "*". Valid - // options are "file", "http", "api", and "*" (all sources). - HostNetworkSources []string `json:"hostNetworkSources"` - // hostPIDSources is a comma-separated list of sources from which the - // Kubelet allows pods to use the host pid namespace. Defaults to "*". - HostPIDSources []string `json:"hostPIDSources"` - // hostIPCSources is a comma-separated list of sources from which the - // Kubelet allows pods to use the host ipc namespace. Defaults to "*". - HostIPCSources []string `json:"hostIPCSources"` - // registryPullQPS is the limit of registry pulls per second. If 0, - // unlimited. Set to 0 for no limit. Defaults to 5.0. - RegistryPullQPS *int32 `json:"registryPullQPS"` - // registryBurst is the maximum size of a bursty pulls, temporarily allows - // pulls to burst to this number, while still not exceeding registryQps. - // Only used if registryQPS > 0. - RegistryBurst int32 `json:"registryBurst"` - // eventRecordQPS is the maximum event creations per second. If 0, there - // is no limit enforced. - EventRecordQPS *int32 `json:"eventRecordQPS"` - // eventBurst is the maximum size of a bursty event records, temporarily - // allows event records to burst to this number, while still not exceeding - // event-qps. Only used if eventQps > 0 - EventBurst int32 `json:"eventBurst"` - // enableDebuggingHandlers enables server endpoints for log collection - // and local running of containers and commands - EnableDebuggingHandlers *bool `json:"enableDebuggingHandlers"` - // minimumGCAge is the minimum age for a finished container before it is - // garbage collected. - MinimumGCAge unversioned.Duration `json:"minimumGCAge"` - // maxPerPodContainerCount is the maximum number of old instances to - // retain per container. Each container takes up some disk space. - MaxPerPodContainerCount int32 `json:"maxPerPodContainerCount"` - // maxContainerCount is the maximum number of old instances of containers - // to retain globally. Each container takes up some disk space. - MaxContainerCount *int32 `json:"maxContainerCount"` - // cAdvisorPort is the port of the localhost cAdvisor endpoint - CAdvisorPort int32 `json:"cAdvisorPort"` - // healthzPort is the port of the localhost healthz endpoint - HealthzPort int32 `json:"healthzPort"` - // healthzBindAddress is the IP address for the healthz server to serve - // on. - HealthzBindAddress string `json:"healthzBindAddress"` - // oomScoreAdj is The oom-score-adj value for kubelet process. Values - // must be within the range [-1000, 1000]. - OOMScoreAdj *int32 `json:"oomScoreAdj"` - // registerNode enables automatic registration with the apiserver. - RegisterNode *bool `json:"registerNode"` - // clusterDomain is the DNS domain for this cluster. If set, kubelet will - // configure all containers to search this domain in addition to the - // host's search domains. - ClusterDomain string `json:"clusterDomain"` - // masterServiceNamespace is The namespace from which the kubernetes - // master services should be injected into pods. - MasterServiceNamespace string `json:"masterServiceNamespace"` - // clusterDNS is the IP address for a cluster DNS server. If set, kubelet - // will configure all containers to use this for DNS resolution in - // addition to the host's DNS servers - ClusterDNS string `json:"clusterDNS"` - // streamingConnectionIdleTimeout is the maximum time a streaming connection - // can be idle before the connection is automatically closed. - StreamingConnectionIdleTimeout unversioned.Duration `json:"streamingConnectionIdleTimeout"` - // nodeStatusUpdateFrequency is the frequency that kubelet posts node - // status to master. Note: be cautious when changing the constant, it - // must work with nodeMonitorGracePeriod in nodecontroller. - NodeStatusUpdateFrequency unversioned.Duration `json:"nodeStatusUpdateFrequency"` - // imageMinimumGCAge is the minimum age for an unused image before it is - // garbage collected. - ImageMinimumGCAge unversioned.Duration `json:"imageMinimumGCAge"` - // imageGCHighThresholdPercent is the percent of disk usage after which - // image garbage collection is always run. The percent is calculated as - // this field value out of 100. - ImageGCHighThresholdPercent *int32 `json:"imageGCHighThresholdPercent"` - // imageGCLowThresholdPercent is the percent of disk usage before which - // image garbage collection is never run. Lowest disk usage to garbage - // collect to. The percent is calculated as this field value out of 100. - ImageGCLowThresholdPercent *int32 `json:"imageGCLowThresholdPercent"` - // lowDiskSpaceThresholdMB is the absolute free disk space, in MB, to - // maintain. When disk space falls below this threshold, new pods would - // be rejected. - LowDiskSpaceThresholdMB int32 `json:"lowDiskSpaceThresholdMB"` - // How frequently to calculate and cache volume disk usage for all pods - VolumeStatsAggPeriod unversioned.Duration `json:"volumeStatsAggPeriod"` - // networkPluginName is the name of the network plugin to be invoked for - // various events in kubelet/pod lifecycle - NetworkPluginName string `json:"networkPluginName"` - // networkPluginDir is the full path of the directory in which to search - // for network plugins (and, for backwards-compat, CNI config files) - NetworkPluginDir string `json:"networkPluginDir"` - // CNIConfDir is the full path of the directory in which to search for - // CNI config files - CNIConfDir string `json:"cniConfDir"` - // CNIBinDir is the full path of the directory in which to search for - // CNI plugin binaries - CNIBinDir string `json:"cniBinDir"` - // networkPluginMTU is the MTU to be passed to the network plugin, - // and overrides the default MTU for cases where it cannot be automatically - // computed (such as IPSEC). - NetworkPluginMTU int32 `json:"networkPluginMTU"` - // volumePluginDir is the full path of the directory in which to search - // for additional third party volume plugins - VolumePluginDir string `json:"volumePluginDir"` - // cloudProvider is the provider for cloud services. - CloudProvider string `json:"cloudProvider"` - // cloudConfigFile is the path to the cloud provider configuration file. - CloudConfigFile string `json:"cloudConfigFile"` - // kubeletCgroups is the absolute name of cgroups to isolate the kubelet in. - KubeletCgroups string `json:"kubeletCgroups"` - // runtimeCgroups are cgroups that container runtime is expected to be isolated in. - RuntimeCgroups string `json:"runtimeCgroups"` - // systemCgroups is absolute name of cgroups in which to place - // all non-kernel processes that are not already in a container. Empty - // for no container. Rolling back the flag requires a reboot. - SystemCgroups string `json:"systemCgroups"` - // cgroupRoot is the root cgroup to use for pods. This is handled by the - // container runtime on a best effort basis. - CgroupRoot string `json:"cgroupRoot"` - // Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes - // And all Burstable and BestEffort pods are brought up under their - // specific top level QoS cgroup. - // +optional - ExperimentalCgroupsPerQOS *bool `json:"experimentalCgroupsPerQOS,omitempty"` - // driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd) - // +optional - CgroupDriver string `json:"cgroupDriver,omitempty"` - // containerRuntime is the container runtime to use. - ContainerRuntime string `json:"containerRuntime"` - // remoteRuntimeEndpoint is the endpoint of remote runtime service - RemoteRuntimeEndpoint string `json:"remoteRuntimeEndpoint"` - // remoteImageEndpoint is the endpoint of remote image service - RemoteImageEndpoint string `json:"remoteImageEndpoint"` - // runtimeRequestTimeout is the timeout for all runtime requests except long running - // requests - pull, logs, exec and attach. - RuntimeRequestTimeout unversioned.Duration `json:"runtimeRequestTimeout"` - // rktPath is the path of rkt binary. Leave empty to use the first rkt in - // $PATH. - RktPath string `json:"rktPath"` - // experimentalMounterPath is the path to mounter binary. If not set, kubelet will attempt to use mount - // binary that is available via $PATH, - ExperimentalMounterPath string `json:"experimentalMounterPath,omitempty"` - // rktApiEndpoint is the endpoint of the rkt API service to communicate with. - RktAPIEndpoint string `json:"rktAPIEndpoint"` - // rktStage1Image is the image to use as stage1. Local paths and - // http/https URLs are supported. - RktStage1Image string `json:"rktStage1Image"` - // lockFilePath is the path that kubelet will use to as a lock file. - // It uses this file as a lock to synchronize with other kubelet processes - // that may be running. - LockFilePath *string `json:"lockFilePath"` - // ExitOnLockContention is a flag that signifies to the kubelet that it is running - // in "bootstrap" mode. This requires that 'LockFilePath' has been set. - // This will cause the kubelet to listen to inotify events on the lock file, - // releasing it and exiting when another process tries to open that file. - ExitOnLockContention bool `json:"exitOnLockContention"` - // How should the kubelet configure the container bridge for hairpin packets. - // Setting this flag allows endpoints in a Service to loadbalance back to - // themselves if they should try to access their own Service. Values: - // "promiscuous-bridge": make the container bridge promiscuous. - // "hairpin-veth": set the hairpin flag on container veth interfaces. - // "none": do nothing. - // Generally, one must set --hairpin-mode=veth-flag to achieve hairpin NAT, - // because promiscous-bridge assumes the existence of a container bridge named cbr0. - HairpinMode string `json:"hairpinMode"` - // The node has babysitter process monitoring docker and kubelet. - BabysitDaemons bool `json:"babysitDaemons"` - // maxPods is the number of pods that can run on this Kubelet. - MaxPods int32 `json:"maxPods"` - // nvidiaGPUs is the number of NVIDIA GPU devices on this node. - NvidiaGPUs int32 `json:"nvidiaGPUs"` - // dockerExecHandlerName is the handler to use when executing a command - // in a container. Valid values are 'native' and 'nsenter'. Defaults to - // 'native'. - DockerExecHandlerName string `json:"dockerExecHandlerName"` - // The CIDR to use for pod IP addresses, only used in standalone mode. - // In cluster mode, this is obtained from the master. - PodCIDR string `json:"podCIDR"` - // ResolverConfig is the resolver configuration file used as the basis - // for the container DNS resolution configuration."), [] - ResolverConfig string `json:"resolvConf"` - // cpuCFSQuota is Enable CPU CFS quota enforcement for containers that - // specify CPU limits - CPUCFSQuota *bool `json:"cpuCFSQuota"` - // containerized should be set to true if kubelet is running in a container. - Containerized *bool `json:"containerized"` - // maxOpenFiles is Number of files that can be opened by Kubelet process. - MaxOpenFiles int64 `json:"maxOpenFiles"` - // reconcileCIDR is Reconcile node CIDR with the CIDR specified by the - // API server. Won't have any effect if register-node is false. - ReconcileCIDR *bool `json:"reconcileCIDR"` - // registerSchedulable tells the kubelet to register the node as - // schedulable. Won't have any effect if register-node is false. - RegisterSchedulable *bool `json:"registerSchedulable"` - // contentType is contentType of requests sent to apiserver. - ContentType string `json:"contentType"` - // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver - KubeAPIQPS *int32 `json:"kubeAPIQPS"` - // kubeAPIBurst is the burst to allow while talking with kubernetes - // apiserver - KubeAPIBurst int32 `json:"kubeAPIBurst"` - // serializeImagePulls when enabled, tells the Kubelet to pull images one - // at a time. We recommend *not* changing the default value on nodes that - // run docker daemon with version < 1.9 or an Aufs storage backend. - // Issue #10959 has more details. - SerializeImagePulls *bool `json:"serializeImagePulls"` - // outOfDiskTransitionFrequency is duration for which the kubelet has to - // wait before transitioning out of out-of-disk node condition status. - OutOfDiskTransitionFrequency unversioned.Duration `json:"outOfDiskTransitionFrequency"` - // nodeIP is IP address of the node. If set, kubelet will use this IP - // address for the node. - NodeIP string `json:"nodeIP"` - // nodeLabels to add when registering the node in the cluster. - NodeLabels map[string]string `json:"nodeLabels"` - // nonMasqueradeCIDR configures masquerading: traffic to IPs outside this range will use IP masquerade. - NonMasqueradeCIDR string `json:"nonMasqueradeCIDR"` - // enable gathering custom metrics. - EnableCustomMetrics bool `json:"enableCustomMetrics"` - // Comma-delimited list of hard eviction expressions. For example, 'memory.available<300Mi'. - EvictionHard *string `json:"evictionHard"` - // Comma-delimited list of soft eviction expressions. For example, 'memory.available<300Mi'. - EvictionSoft string `json:"evictionSoft"` - // Comma-delimeted list of grace periods for each soft eviction signal. For example, 'memory.available=30s'. - EvictionSoftGracePeriod string `json:"evictionSoftGracePeriod"` - // Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. - EvictionPressureTransitionPeriod unversioned.Duration `json:"evictionPressureTransitionPeriod"` - // Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. - EvictionMaxPodGracePeriod int32 `json:"evictionMaxPodGracePeriod"` - // Comma-delimited list of minimum reclaims (e.g. imagefs.available=2Gi) that describes the minimum amount of resource the kubelet will reclaim when performing a pod eviction if that resource is under pressure. - EvictionMinimumReclaim string `json:"evictionMinimumReclaim"` - // If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling. - ExperimentalKernelMemcgNotification *bool `json:"experimentalKernelMemcgNotification"` - // Maximum number of pods per core. Cannot exceed MaxPods - PodsPerCore int32 `json:"podsPerCore"` - // enableControllerAttachDetach enables the Attach/Detach controller to - // manage attachment/detachment of volumes scheduled to this node, and - // disables kubelet from executing any attach/detach operations - EnableControllerAttachDetach *bool `json:"enableControllerAttachDetach"` - // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs - // that describe resources reserved for non-kubernetes components. - // Currently only cpu and memory are supported. [default=none] - // See http://kubernetes.io/docs/user-guide/compute-resources for more detail. - SystemReserved map[string]string `json:"systemReserved"` - // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs - // that describe resources reserved for kubernetes system components. - // Currently only cpu and memory are supported. [default=none] - // See http://kubernetes.io/docs/user-guide/compute-resources for more detail. - KubeReserved map[string]string `json:"kubeReserved"` - // Default behaviour for kernel tuning - ProtectKernelDefaults bool `json:"protectKernelDefaults"` - // If true, Kubelet ensures a set of iptables rules are present on host. - // These rules will serve as utility rules for various components, e.g. KubeProxy. - // The rules will be created based on IPTablesMasqueradeBit and IPTablesDropBit. - MakeIPTablesUtilChains *bool `json:"makeIPTablesUtilChains"` - // iptablesMasqueradeBit is the bit of the iptables fwmark space to mark for SNAT - // Values must be within the range [0, 31]. Must be different from other mark bits. - // Warning: Please match the value of corresponding parameter in kube-proxy - // TODO: clean up IPTablesMasqueradeBit in kube-proxy - IPTablesMasqueradeBit *int32 `json:"iptablesMasqueradeBit"` - // iptablesDropBit is the bit of the iptables fwmark space to mark for dropping packets. - // Values must be within the range [0, 31]. Must be different from other mark bits. - IPTablesDropBit *int32 `json:"iptablesDropBit"` - // Whitelist of unsafe sysctls or sysctl patterns (ending in *). Use these at your own risk. - // Resource isolation might be lacking and pod might influence each other on the same node. - // +optional - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty"` - // featureGates is a string of comma-separated key=value pairs that describe feature - // gates for alpha/experimental features. - FeatureGates string `json:"featureGates,omitempty"` - // Enable Container Runtime Interface (CRI) integration. - // +optional - EnableCRI bool `json:"enableCRI,omitempty"` - // TODO(#34726:1.8.0): Remove the opt-in for failing when swap is enabled. - // Tells the Kubelet to fail to start if swap is enabled on the node. - ExperimentalFailSwapOn bool `json:"experimentalFailSwapOn,omitempty"` - // This flag, if set, enables a check prior to mount operations to verify that the required components - // (binaries, etc.) to mount the volume are available on the underlying node. If the check is enabled - // and fails the mount operation fails. - ExperimentalCheckNodeCapabilitiesBeforeMount bool `json:"ExperimentalCheckNodeCapabilitiesBeforeMount,omitempty"` -} - -type KubeletAuthorizationMode string - -const ( - // KubeletAuthorizationModeAlwaysAllow authorizes all authenticated requests - KubeletAuthorizationModeAlwaysAllow KubeletAuthorizationMode = "AlwaysAllow" - // KubeletAuthorizationModeWebhook uses the SubjectAccessReview API to determine authorization - KubeletAuthorizationModeWebhook KubeletAuthorizationMode = "Webhook" -) - -type KubeletAuthorization struct { - // mode is the authorization mode to apply to requests to the kubelet server. - // Valid values are AlwaysAllow and Webhook. - // Webhook mode uses the SubjectAccessReview API to determine authorization. - Mode KubeletAuthorizationMode `json:"mode"` - - // webhook contains settings related to Webhook authorization. - Webhook KubeletWebhookAuthorization `json:"webhook"` -} - -type KubeletWebhookAuthorization struct { - // cacheAuthorizedTTL is the duration to cache 'authorized' responses from the webhook authorizer. - CacheAuthorizedTTL unversioned.Duration `json:"cacheAuthorizedTTL"` - // cacheUnauthorizedTTL is the duration to cache 'unauthorized' responses from the webhook authorizer. - CacheUnauthorizedTTL unversioned.Duration `json:"cacheUnauthorizedTTL"` -} - -type KubeletAuthentication struct { - // x509 contains settings related to x509 client certificate authentication - X509 KubeletX509Authentication `json:"x509"` - // webhook contains settings related to webhook bearer token authentication - Webhook KubeletWebhookAuthentication `json:"webhook"` - // anonymous contains settings related to anonymous authentication - Anonymous KubeletAnonymousAuthentication `json:"anonymous"` -} - -type KubeletX509Authentication struct { - // clientCAFile is the path to a PEM-encoded certificate bundle. If set, any request presenting a client certificate - // signed by one of the authorities in the bundle is authenticated with a username corresponding to the CommonName, - // and groups corresponding to the Organization in the client certificate. - ClientCAFile string `json:"clientCAFile"` -} - -type KubeletWebhookAuthentication struct { - // enabled allows bearer token authentication backed by the tokenreviews.authentication.k8s.io API - Enabled *bool `json:"enabled"` - // cacheTTL enables caching of authentication results - CacheTTL unversioned.Duration `json:"cacheTTL"` -} - -type KubeletAnonymousAuthentication struct { - // enabled allows anonymous requests to the kubelet server. - // Requests that are not rejected by another authentication method are treated as anonymous requests. - // Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. - Enabled *bool `json:"enabled"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go deleted file mode 100644 index b76653e914..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go +++ /dev/null @@ -1,681 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1alpha1 - -import ( - api "k8s.io/kubernetes/pkg/api" - componentconfig "k8s.io/kubernetes/pkg/apis/componentconfig" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" - config "k8s.io/kubernetes/pkg/util/config" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration, - Convert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration, - Convert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration, - Convert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration, - Convert_v1alpha1_KubeletAnonymousAuthentication_To_componentconfig_KubeletAnonymousAuthentication, - Convert_componentconfig_KubeletAnonymousAuthentication_To_v1alpha1_KubeletAnonymousAuthentication, - Convert_v1alpha1_KubeletAuthentication_To_componentconfig_KubeletAuthentication, - Convert_componentconfig_KubeletAuthentication_To_v1alpha1_KubeletAuthentication, - Convert_v1alpha1_KubeletAuthorization_To_componentconfig_KubeletAuthorization, - Convert_componentconfig_KubeletAuthorization_To_v1alpha1_KubeletAuthorization, - Convert_v1alpha1_KubeletConfiguration_To_componentconfig_KubeletConfiguration, - Convert_componentconfig_KubeletConfiguration_To_v1alpha1_KubeletConfiguration, - Convert_v1alpha1_KubeletWebhookAuthentication_To_componentconfig_KubeletWebhookAuthentication, - Convert_componentconfig_KubeletWebhookAuthentication_To_v1alpha1_KubeletWebhookAuthentication, - Convert_v1alpha1_KubeletWebhookAuthorization_To_componentconfig_KubeletWebhookAuthorization, - Convert_componentconfig_KubeletWebhookAuthorization_To_v1alpha1_KubeletWebhookAuthorization, - Convert_v1alpha1_KubeletX509Authentication_To_componentconfig_KubeletX509Authentication, - Convert_componentconfig_KubeletX509Authentication_To_v1alpha1_KubeletX509Authentication, - Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration, - Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration, - ) -} - -func autoConvert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *componentconfig.KubeProxyConfiguration, s conversion.Scope) error { - out.BindAddress = in.BindAddress - out.ClusterCIDR = in.ClusterCIDR - out.HealthzBindAddress = in.HealthzBindAddress - out.HealthzPort = in.HealthzPort - out.HostnameOverride = in.HostnameOverride - out.IPTablesMasqueradeBit = (*int32)(unsafe.Pointer(in.IPTablesMasqueradeBit)) - out.IPTablesSyncPeriod = in.IPTablesSyncPeriod - out.IPTablesMinSyncPeriod = in.IPTablesMinSyncPeriod - out.KubeconfigPath = in.KubeconfigPath - out.MasqueradeAll = in.MasqueradeAll - out.Master = in.Master - out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj)) - out.Mode = componentconfig.ProxyMode(in.Mode) - out.PortRange = in.PortRange - out.ResourceContainer = in.ResourceContainer - out.UDPIdleTimeout = in.UDPIdleTimeout - out.ConntrackMax = in.ConntrackMax - out.ConntrackMaxPerCore = in.ConntrackMaxPerCore - out.ConntrackMin = in.ConntrackMin - out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout - out.ConntrackTCPCloseWaitTimeout = in.ConntrackTCPCloseWaitTimeout - return nil -} - -func Convert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *componentconfig.KubeProxyConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in, out, s) -} - -func autoConvert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *componentconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error { - out.BindAddress = in.BindAddress - out.ClusterCIDR = in.ClusterCIDR - out.HealthzBindAddress = in.HealthzBindAddress - out.HealthzPort = in.HealthzPort - out.HostnameOverride = in.HostnameOverride - out.IPTablesMasqueradeBit = (*int32)(unsafe.Pointer(in.IPTablesMasqueradeBit)) - out.IPTablesSyncPeriod = in.IPTablesSyncPeriod - out.IPTablesMinSyncPeriod = in.IPTablesMinSyncPeriod - out.KubeconfigPath = in.KubeconfigPath - out.MasqueradeAll = in.MasqueradeAll - out.Master = in.Master - out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj)) - out.Mode = ProxyMode(in.Mode) - out.PortRange = in.PortRange - out.ResourceContainer = in.ResourceContainer - out.UDPIdleTimeout = in.UDPIdleTimeout - out.ConntrackMax = in.ConntrackMax - out.ConntrackMaxPerCore = in.ConntrackMaxPerCore - out.ConntrackMin = in.ConntrackMin - out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout - out.ConntrackTCPCloseWaitTimeout = in.ConntrackTCPCloseWaitTimeout - return nil -} - -func Convert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *componentconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error { - return autoConvert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in, out, s) -} - -func autoConvert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in *KubeSchedulerConfiguration, out *componentconfig.KubeSchedulerConfiguration, s conversion.Scope) error { - out.Port = int32(in.Port) - out.Address = in.Address - out.AlgorithmProvider = in.AlgorithmProvider - out.PolicyConfigFile = in.PolicyConfigFile - if err := api.Convert_Pointer_bool_To_bool(&in.EnableProfiling, &out.EnableProfiling, s); err != nil { - return err - } - out.ContentType = in.ContentType - out.KubeAPIQPS = in.KubeAPIQPS - out.KubeAPIBurst = int32(in.KubeAPIBurst) - out.SchedulerName = in.SchedulerName - out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight - out.FailureDomains = in.FailureDomains - if err := Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { - return err - } - return nil -} - -func Convert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in *KubeSchedulerConfiguration, out *componentconfig.KubeSchedulerConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in, out, s) -} - -func autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *componentconfig.KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, s conversion.Scope) error { - out.Port = int(in.Port) - out.Address = in.Address - out.AlgorithmProvider = in.AlgorithmProvider - out.PolicyConfigFile = in.PolicyConfigFile - if err := api.Convert_bool_To_Pointer_bool(&in.EnableProfiling, &out.EnableProfiling, s); err != nil { - return err - } - out.ContentType = in.ContentType - out.KubeAPIQPS = in.KubeAPIQPS - out.KubeAPIBurst = int(in.KubeAPIBurst) - out.SchedulerName = in.SchedulerName - out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight - out.FailureDomains = in.FailureDomains - if err := Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { - return err - } - return nil -} - -func Convert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *componentconfig.KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, s conversion.Scope) error { - return autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in, out, s) -} - -func autoConvert_v1alpha1_KubeletAnonymousAuthentication_To_componentconfig_KubeletAnonymousAuthentication(in *KubeletAnonymousAuthentication, out *componentconfig.KubeletAnonymousAuthentication, s conversion.Scope) error { - if err := api.Convert_Pointer_bool_To_bool(&in.Enabled, &out.Enabled, s); err != nil { - return err - } - return nil -} - -func Convert_v1alpha1_KubeletAnonymousAuthentication_To_componentconfig_KubeletAnonymousAuthentication(in *KubeletAnonymousAuthentication, out *componentconfig.KubeletAnonymousAuthentication, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeletAnonymousAuthentication_To_componentconfig_KubeletAnonymousAuthentication(in, out, s) -} - -func autoConvert_componentconfig_KubeletAnonymousAuthentication_To_v1alpha1_KubeletAnonymousAuthentication(in *componentconfig.KubeletAnonymousAuthentication, out *KubeletAnonymousAuthentication, s conversion.Scope) error { - if err := api.Convert_bool_To_Pointer_bool(&in.Enabled, &out.Enabled, s); err != nil { - return err - } - return nil -} - -func Convert_componentconfig_KubeletAnonymousAuthentication_To_v1alpha1_KubeletAnonymousAuthentication(in *componentconfig.KubeletAnonymousAuthentication, out *KubeletAnonymousAuthentication, s conversion.Scope) error { - return autoConvert_componentconfig_KubeletAnonymousAuthentication_To_v1alpha1_KubeletAnonymousAuthentication(in, out, s) -} - -func autoConvert_v1alpha1_KubeletAuthentication_To_componentconfig_KubeletAuthentication(in *KubeletAuthentication, out *componentconfig.KubeletAuthentication, s conversion.Scope) error { - if err := Convert_v1alpha1_KubeletX509Authentication_To_componentconfig_KubeletX509Authentication(&in.X509, &out.X509, s); err != nil { - return err - } - if err := Convert_v1alpha1_KubeletWebhookAuthentication_To_componentconfig_KubeletWebhookAuthentication(&in.Webhook, &out.Webhook, s); err != nil { - return err - } - if err := Convert_v1alpha1_KubeletAnonymousAuthentication_To_componentconfig_KubeletAnonymousAuthentication(&in.Anonymous, &out.Anonymous, s); err != nil { - return err - } - return nil -} - -func Convert_v1alpha1_KubeletAuthentication_To_componentconfig_KubeletAuthentication(in *KubeletAuthentication, out *componentconfig.KubeletAuthentication, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeletAuthentication_To_componentconfig_KubeletAuthentication(in, out, s) -} - -func autoConvert_componentconfig_KubeletAuthentication_To_v1alpha1_KubeletAuthentication(in *componentconfig.KubeletAuthentication, out *KubeletAuthentication, s conversion.Scope) error { - if err := Convert_componentconfig_KubeletX509Authentication_To_v1alpha1_KubeletX509Authentication(&in.X509, &out.X509, s); err != nil { - return err - } - if err := Convert_componentconfig_KubeletWebhookAuthentication_To_v1alpha1_KubeletWebhookAuthentication(&in.Webhook, &out.Webhook, s); err != nil { - return err - } - if err := Convert_componentconfig_KubeletAnonymousAuthentication_To_v1alpha1_KubeletAnonymousAuthentication(&in.Anonymous, &out.Anonymous, s); err != nil { - return err - } - return nil -} - -func Convert_componentconfig_KubeletAuthentication_To_v1alpha1_KubeletAuthentication(in *componentconfig.KubeletAuthentication, out *KubeletAuthentication, s conversion.Scope) error { - return autoConvert_componentconfig_KubeletAuthentication_To_v1alpha1_KubeletAuthentication(in, out, s) -} - -func autoConvert_v1alpha1_KubeletAuthorization_To_componentconfig_KubeletAuthorization(in *KubeletAuthorization, out *componentconfig.KubeletAuthorization, s conversion.Scope) error { - out.Mode = componentconfig.KubeletAuthorizationMode(in.Mode) - if err := Convert_v1alpha1_KubeletWebhookAuthorization_To_componentconfig_KubeletWebhookAuthorization(&in.Webhook, &out.Webhook, s); err != nil { - return err - } - return nil -} - -func Convert_v1alpha1_KubeletAuthorization_To_componentconfig_KubeletAuthorization(in *KubeletAuthorization, out *componentconfig.KubeletAuthorization, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeletAuthorization_To_componentconfig_KubeletAuthorization(in, out, s) -} - -func autoConvert_componentconfig_KubeletAuthorization_To_v1alpha1_KubeletAuthorization(in *componentconfig.KubeletAuthorization, out *KubeletAuthorization, s conversion.Scope) error { - out.Mode = KubeletAuthorizationMode(in.Mode) - if err := Convert_componentconfig_KubeletWebhookAuthorization_To_v1alpha1_KubeletWebhookAuthorization(&in.Webhook, &out.Webhook, s); err != nil { - return err - } - return nil -} - -func Convert_componentconfig_KubeletAuthorization_To_v1alpha1_KubeletAuthorization(in *componentconfig.KubeletAuthorization, out *KubeletAuthorization, s conversion.Scope) error { - return autoConvert_componentconfig_KubeletAuthorization_To_v1alpha1_KubeletAuthorization(in, out, s) -} - -func autoConvert_v1alpha1_KubeletConfiguration_To_componentconfig_KubeletConfiguration(in *KubeletConfiguration, out *componentconfig.KubeletConfiguration, s conversion.Scope) error { - out.PodManifestPath = in.PodManifestPath - out.SyncFrequency = in.SyncFrequency - out.FileCheckFrequency = in.FileCheckFrequency - out.HTTPCheckFrequency = in.HTTPCheckFrequency - out.ManifestURL = in.ManifestURL - out.ManifestURLHeader = in.ManifestURLHeader - if err := api.Convert_Pointer_bool_To_bool(&in.EnableServer, &out.EnableServer, s); err != nil { - return err - } - out.Address = in.Address - out.Port = in.Port - out.ReadOnlyPort = in.ReadOnlyPort - out.TLSCertFile = in.TLSCertFile - out.TLSPrivateKeyFile = in.TLSPrivateKeyFile - out.CertDirectory = in.CertDirectory - if err := Convert_v1alpha1_KubeletAuthentication_To_componentconfig_KubeletAuthentication(&in.Authentication, &out.Authentication, s); err != nil { - return err - } - if err := Convert_v1alpha1_KubeletAuthorization_To_componentconfig_KubeletAuthorization(&in.Authorization, &out.Authorization, s); err != nil { - return err - } - out.HostnameOverride = in.HostnameOverride - out.PodInfraContainerImage = in.PodInfraContainerImage - out.DockerEndpoint = in.DockerEndpoint - out.RootDirectory = in.RootDirectory - out.SeccompProfileRoot = in.SeccompProfileRoot - if err := api.Convert_Pointer_bool_To_bool(&in.AllowPrivileged, &out.AllowPrivileged, s); err != nil { - return err - } - out.HostNetworkSources = *(*[]string)(unsafe.Pointer(&in.HostNetworkSources)) - out.HostPIDSources = *(*[]string)(unsafe.Pointer(&in.HostPIDSources)) - out.HostIPCSources = *(*[]string)(unsafe.Pointer(&in.HostIPCSources)) - if err := api.Convert_Pointer_int32_To_int32(&in.RegistryPullQPS, &out.RegistryPullQPS, s); err != nil { - return err - } - out.RegistryBurst = in.RegistryBurst - if err := api.Convert_Pointer_int32_To_int32(&in.EventRecordQPS, &out.EventRecordQPS, s); err != nil { - return err - } - out.EventBurst = in.EventBurst - if err := api.Convert_Pointer_bool_To_bool(&in.EnableDebuggingHandlers, &out.EnableDebuggingHandlers, s); err != nil { - return err - } - out.MinimumGCAge = in.MinimumGCAge - out.MaxPerPodContainerCount = in.MaxPerPodContainerCount - if err := api.Convert_Pointer_int32_To_int32(&in.MaxContainerCount, &out.MaxContainerCount, s); err != nil { - return err - } - out.CAdvisorPort = in.CAdvisorPort - out.HealthzPort = in.HealthzPort - out.HealthzBindAddress = in.HealthzBindAddress - if err := api.Convert_Pointer_int32_To_int32(&in.OOMScoreAdj, &out.OOMScoreAdj, s); err != nil { - return err - } - if err := api.Convert_Pointer_bool_To_bool(&in.RegisterNode, &out.RegisterNode, s); err != nil { - return err - } - out.ClusterDomain = in.ClusterDomain - out.MasterServiceNamespace = in.MasterServiceNamespace - out.ClusterDNS = in.ClusterDNS - out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout - out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency - out.ImageMinimumGCAge = in.ImageMinimumGCAge - if err := api.Convert_Pointer_int32_To_int32(&in.ImageGCHighThresholdPercent, &out.ImageGCHighThresholdPercent, s); err != nil { - return err - } - if err := api.Convert_Pointer_int32_To_int32(&in.ImageGCLowThresholdPercent, &out.ImageGCLowThresholdPercent, s); err != nil { - return err - } - out.LowDiskSpaceThresholdMB = in.LowDiskSpaceThresholdMB - out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod - out.NetworkPluginName = in.NetworkPluginName - out.NetworkPluginDir = in.NetworkPluginDir - out.CNIConfDir = in.CNIConfDir - out.CNIBinDir = in.CNIBinDir - out.NetworkPluginMTU = in.NetworkPluginMTU - out.VolumePluginDir = in.VolumePluginDir - out.CloudProvider = in.CloudProvider - out.CloudConfigFile = in.CloudConfigFile - out.KubeletCgroups = in.KubeletCgroups - out.RuntimeCgroups = in.RuntimeCgroups - out.SystemCgroups = in.SystemCgroups - out.CgroupRoot = in.CgroupRoot - if err := api.Convert_Pointer_bool_To_bool(&in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS, s); err != nil { - return err - } - out.CgroupDriver = in.CgroupDriver - out.ContainerRuntime = in.ContainerRuntime - out.RemoteRuntimeEndpoint = in.RemoteRuntimeEndpoint - out.RemoteImageEndpoint = in.RemoteImageEndpoint - out.RuntimeRequestTimeout = in.RuntimeRequestTimeout - out.RktPath = in.RktPath - out.ExperimentalMounterPath = in.ExperimentalMounterPath - out.RktAPIEndpoint = in.RktAPIEndpoint - out.RktStage1Image = in.RktStage1Image - if err := api.Convert_Pointer_string_To_string(&in.LockFilePath, &out.LockFilePath, s); err != nil { - return err - } - out.ExitOnLockContention = in.ExitOnLockContention - out.HairpinMode = in.HairpinMode - out.BabysitDaemons = in.BabysitDaemons - out.MaxPods = in.MaxPods - out.NvidiaGPUs = in.NvidiaGPUs - out.DockerExecHandlerName = in.DockerExecHandlerName - out.PodCIDR = in.PodCIDR - out.ResolverConfig = in.ResolverConfig - if err := api.Convert_Pointer_bool_To_bool(&in.CPUCFSQuota, &out.CPUCFSQuota, s); err != nil { - return err - } - if err := api.Convert_Pointer_bool_To_bool(&in.Containerized, &out.Containerized, s); err != nil { - return err - } - out.MaxOpenFiles = in.MaxOpenFiles - if err := api.Convert_Pointer_bool_To_bool(&in.ReconcileCIDR, &out.ReconcileCIDR, s); err != nil { - return err - } - if err := api.Convert_Pointer_bool_To_bool(&in.RegisterSchedulable, &out.RegisterSchedulable, s); err != nil { - return err - } - out.ContentType = in.ContentType - if err := api.Convert_Pointer_int32_To_int32(&in.KubeAPIQPS, &out.KubeAPIQPS, s); err != nil { - return err - } - out.KubeAPIBurst = in.KubeAPIBurst - if err := api.Convert_Pointer_bool_To_bool(&in.SerializeImagePulls, &out.SerializeImagePulls, s); err != nil { - return err - } - out.OutOfDiskTransitionFrequency = in.OutOfDiskTransitionFrequency - out.NodeIP = in.NodeIP - out.NodeLabels = *(*map[string]string)(unsafe.Pointer(&in.NodeLabels)) - out.NonMasqueradeCIDR = in.NonMasqueradeCIDR - out.EnableCustomMetrics = in.EnableCustomMetrics - if err := api.Convert_Pointer_string_To_string(&in.EvictionHard, &out.EvictionHard, s); err != nil { - return err - } - out.EvictionSoft = in.EvictionSoft - out.EvictionSoftGracePeriod = in.EvictionSoftGracePeriod - out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod - out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod - out.EvictionMinimumReclaim = in.EvictionMinimumReclaim - if err := api.Convert_Pointer_bool_To_bool(&in.ExperimentalKernelMemcgNotification, &out.ExperimentalKernelMemcgNotification, s); err != nil { - return err - } - out.PodsPerCore = in.PodsPerCore - if err := api.Convert_Pointer_bool_To_bool(&in.EnableControllerAttachDetach, &out.EnableControllerAttachDetach, s); err != nil { - return err - } - out.SystemReserved = *(*config.ConfigurationMap)(unsafe.Pointer(&in.SystemReserved)) - out.KubeReserved = *(*config.ConfigurationMap)(unsafe.Pointer(&in.KubeReserved)) - out.ProtectKernelDefaults = in.ProtectKernelDefaults - if err := api.Convert_Pointer_bool_To_bool(&in.MakeIPTablesUtilChains, &out.MakeIPTablesUtilChains, s); err != nil { - return err - } - if err := api.Convert_Pointer_int32_To_int32(&in.IPTablesMasqueradeBit, &out.IPTablesMasqueradeBit, s); err != nil { - return err - } - if err := api.Convert_Pointer_int32_To_int32(&in.IPTablesDropBit, &out.IPTablesDropBit, s); err != nil { - return err - } - out.AllowedUnsafeSysctls = *(*[]string)(unsafe.Pointer(&in.AllowedUnsafeSysctls)) - out.FeatureGates = in.FeatureGates - out.EnableCRI = in.EnableCRI - out.ExperimentalFailSwapOn = in.ExperimentalFailSwapOn - out.ExperimentalCheckNodeCapabilitiesBeforeMount = in.ExperimentalCheckNodeCapabilitiesBeforeMount - return nil -} - -func Convert_v1alpha1_KubeletConfiguration_To_componentconfig_KubeletConfiguration(in *KubeletConfiguration, out *componentconfig.KubeletConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeletConfiguration_To_componentconfig_KubeletConfiguration(in, out, s) -} - -func autoConvert_componentconfig_KubeletConfiguration_To_v1alpha1_KubeletConfiguration(in *componentconfig.KubeletConfiguration, out *KubeletConfiguration, s conversion.Scope) error { - out.PodManifestPath = in.PodManifestPath - out.SyncFrequency = in.SyncFrequency - out.FileCheckFrequency = in.FileCheckFrequency - out.HTTPCheckFrequency = in.HTTPCheckFrequency - out.ManifestURL = in.ManifestURL - out.ManifestURLHeader = in.ManifestURLHeader - if err := api.Convert_bool_To_Pointer_bool(&in.EnableServer, &out.EnableServer, s); err != nil { - return err - } - out.Address = in.Address - out.Port = in.Port - out.ReadOnlyPort = in.ReadOnlyPort - out.TLSCertFile = in.TLSCertFile - out.TLSPrivateKeyFile = in.TLSPrivateKeyFile - out.CertDirectory = in.CertDirectory - if err := Convert_componentconfig_KubeletAuthentication_To_v1alpha1_KubeletAuthentication(&in.Authentication, &out.Authentication, s); err != nil { - return err - } - if err := Convert_componentconfig_KubeletAuthorization_To_v1alpha1_KubeletAuthorization(&in.Authorization, &out.Authorization, s); err != nil { - return err - } - out.HostnameOverride = in.HostnameOverride - out.PodInfraContainerImage = in.PodInfraContainerImage - out.DockerEndpoint = in.DockerEndpoint - out.RootDirectory = in.RootDirectory - out.SeccompProfileRoot = in.SeccompProfileRoot - if err := api.Convert_bool_To_Pointer_bool(&in.AllowPrivileged, &out.AllowPrivileged, s); err != nil { - return err - } - out.HostNetworkSources = *(*[]string)(unsafe.Pointer(&in.HostNetworkSources)) - out.HostPIDSources = *(*[]string)(unsafe.Pointer(&in.HostPIDSources)) - out.HostIPCSources = *(*[]string)(unsafe.Pointer(&in.HostIPCSources)) - if err := api.Convert_int32_To_Pointer_int32(&in.RegistryPullQPS, &out.RegistryPullQPS, s); err != nil { - return err - } - out.RegistryBurst = in.RegistryBurst - if err := api.Convert_int32_To_Pointer_int32(&in.EventRecordQPS, &out.EventRecordQPS, s); err != nil { - return err - } - out.EventBurst = in.EventBurst - if err := api.Convert_bool_To_Pointer_bool(&in.EnableDebuggingHandlers, &out.EnableDebuggingHandlers, s); err != nil { - return err - } - out.MinimumGCAge = in.MinimumGCAge - out.MaxPerPodContainerCount = in.MaxPerPodContainerCount - if err := api.Convert_int32_To_Pointer_int32(&in.MaxContainerCount, &out.MaxContainerCount, s); err != nil { - return err - } - out.CAdvisorPort = in.CAdvisorPort - out.HealthzPort = in.HealthzPort - out.HealthzBindAddress = in.HealthzBindAddress - if err := api.Convert_int32_To_Pointer_int32(&in.OOMScoreAdj, &out.OOMScoreAdj, s); err != nil { - return err - } - if err := api.Convert_bool_To_Pointer_bool(&in.RegisterNode, &out.RegisterNode, s); err != nil { - return err - } - out.ClusterDomain = in.ClusterDomain - out.MasterServiceNamespace = in.MasterServiceNamespace - out.ClusterDNS = in.ClusterDNS - out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout - out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency - out.ImageMinimumGCAge = in.ImageMinimumGCAge - if err := api.Convert_int32_To_Pointer_int32(&in.ImageGCHighThresholdPercent, &out.ImageGCHighThresholdPercent, s); err != nil { - return err - } - if err := api.Convert_int32_To_Pointer_int32(&in.ImageGCLowThresholdPercent, &out.ImageGCLowThresholdPercent, s); err != nil { - return err - } - out.LowDiskSpaceThresholdMB = in.LowDiskSpaceThresholdMB - out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod - out.NetworkPluginName = in.NetworkPluginName - out.NetworkPluginMTU = in.NetworkPluginMTU - out.NetworkPluginDir = in.NetworkPluginDir - out.CNIConfDir = in.CNIConfDir - out.CNIBinDir = in.CNIBinDir - out.VolumePluginDir = in.VolumePluginDir - out.CloudProvider = in.CloudProvider - out.CloudConfigFile = in.CloudConfigFile - out.KubeletCgroups = in.KubeletCgroups - if err := api.Convert_bool_To_Pointer_bool(&in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS, s); err != nil { - return err - } - out.CgroupDriver = in.CgroupDriver - out.RuntimeCgroups = in.RuntimeCgroups - out.SystemCgroups = in.SystemCgroups - out.CgroupRoot = in.CgroupRoot - out.ContainerRuntime = in.ContainerRuntime - out.RemoteRuntimeEndpoint = in.RemoteRuntimeEndpoint - out.RemoteImageEndpoint = in.RemoteImageEndpoint - out.RuntimeRequestTimeout = in.RuntimeRequestTimeout - out.RktPath = in.RktPath - out.ExperimentalMounterPath = in.ExperimentalMounterPath - out.RktAPIEndpoint = in.RktAPIEndpoint - out.RktStage1Image = in.RktStage1Image - if err := api.Convert_string_To_Pointer_string(&in.LockFilePath, &out.LockFilePath, s); err != nil { - return err - } - out.ExitOnLockContention = in.ExitOnLockContention - out.HairpinMode = in.HairpinMode - out.BabysitDaemons = in.BabysitDaemons - out.MaxPods = in.MaxPods - out.NvidiaGPUs = in.NvidiaGPUs - out.DockerExecHandlerName = in.DockerExecHandlerName - out.PodCIDR = in.PodCIDR - out.ResolverConfig = in.ResolverConfig - if err := api.Convert_bool_To_Pointer_bool(&in.CPUCFSQuota, &out.CPUCFSQuota, s); err != nil { - return err - } - if err := api.Convert_bool_To_Pointer_bool(&in.Containerized, &out.Containerized, s); err != nil { - return err - } - out.MaxOpenFiles = in.MaxOpenFiles - if err := api.Convert_bool_To_Pointer_bool(&in.ReconcileCIDR, &out.ReconcileCIDR, s); err != nil { - return err - } - if err := api.Convert_bool_To_Pointer_bool(&in.RegisterSchedulable, &out.RegisterSchedulable, s); err != nil { - return err - } - out.ContentType = in.ContentType - if err := api.Convert_int32_To_Pointer_int32(&in.KubeAPIQPS, &out.KubeAPIQPS, s); err != nil { - return err - } - out.KubeAPIBurst = in.KubeAPIBurst - if err := api.Convert_bool_To_Pointer_bool(&in.SerializeImagePulls, &out.SerializeImagePulls, s); err != nil { - return err - } - out.OutOfDiskTransitionFrequency = in.OutOfDiskTransitionFrequency - out.NodeIP = in.NodeIP - out.NodeLabels = *(*map[string]string)(unsafe.Pointer(&in.NodeLabels)) - out.NonMasqueradeCIDR = in.NonMasqueradeCIDR - out.EnableCustomMetrics = in.EnableCustomMetrics - if err := api.Convert_string_To_Pointer_string(&in.EvictionHard, &out.EvictionHard, s); err != nil { - return err - } - out.EvictionSoft = in.EvictionSoft - out.EvictionSoftGracePeriod = in.EvictionSoftGracePeriod - out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod - out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod - out.EvictionMinimumReclaim = in.EvictionMinimumReclaim - if err := api.Convert_bool_To_Pointer_bool(&in.ExperimentalKernelMemcgNotification, &out.ExperimentalKernelMemcgNotification, s); err != nil { - return err - } - out.PodsPerCore = in.PodsPerCore - if err := api.Convert_bool_To_Pointer_bool(&in.EnableControllerAttachDetach, &out.EnableControllerAttachDetach, s); err != nil { - return err - } - out.SystemReserved = *(*map[string]string)(unsafe.Pointer(&in.SystemReserved)) - out.KubeReserved = *(*map[string]string)(unsafe.Pointer(&in.KubeReserved)) - out.ProtectKernelDefaults = in.ProtectKernelDefaults - if err := api.Convert_bool_To_Pointer_bool(&in.MakeIPTablesUtilChains, &out.MakeIPTablesUtilChains, s); err != nil { - return err - } - if err := api.Convert_int32_To_Pointer_int32(&in.IPTablesMasqueradeBit, &out.IPTablesMasqueradeBit, s); err != nil { - return err - } - if err := api.Convert_int32_To_Pointer_int32(&in.IPTablesDropBit, &out.IPTablesDropBit, s); err != nil { - return err - } - out.AllowedUnsafeSysctls = *(*[]string)(unsafe.Pointer(&in.AllowedUnsafeSysctls)) - out.FeatureGates = in.FeatureGates - out.EnableCRI = in.EnableCRI - out.ExperimentalFailSwapOn = in.ExperimentalFailSwapOn - out.ExperimentalCheckNodeCapabilitiesBeforeMount = in.ExperimentalCheckNodeCapabilitiesBeforeMount - return nil -} - -func Convert_componentconfig_KubeletConfiguration_To_v1alpha1_KubeletConfiguration(in *componentconfig.KubeletConfiguration, out *KubeletConfiguration, s conversion.Scope) error { - return autoConvert_componentconfig_KubeletConfiguration_To_v1alpha1_KubeletConfiguration(in, out, s) -} - -func autoConvert_v1alpha1_KubeletWebhookAuthentication_To_componentconfig_KubeletWebhookAuthentication(in *KubeletWebhookAuthentication, out *componentconfig.KubeletWebhookAuthentication, s conversion.Scope) error { - if err := api.Convert_Pointer_bool_To_bool(&in.Enabled, &out.Enabled, s); err != nil { - return err - } - out.CacheTTL = in.CacheTTL - return nil -} - -func Convert_v1alpha1_KubeletWebhookAuthentication_To_componentconfig_KubeletWebhookAuthentication(in *KubeletWebhookAuthentication, out *componentconfig.KubeletWebhookAuthentication, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeletWebhookAuthentication_To_componentconfig_KubeletWebhookAuthentication(in, out, s) -} - -func autoConvert_componentconfig_KubeletWebhookAuthentication_To_v1alpha1_KubeletWebhookAuthentication(in *componentconfig.KubeletWebhookAuthentication, out *KubeletWebhookAuthentication, s conversion.Scope) error { - if err := api.Convert_bool_To_Pointer_bool(&in.Enabled, &out.Enabled, s); err != nil { - return err - } - out.CacheTTL = in.CacheTTL - return nil -} - -func Convert_componentconfig_KubeletWebhookAuthentication_To_v1alpha1_KubeletWebhookAuthentication(in *componentconfig.KubeletWebhookAuthentication, out *KubeletWebhookAuthentication, s conversion.Scope) error { - return autoConvert_componentconfig_KubeletWebhookAuthentication_To_v1alpha1_KubeletWebhookAuthentication(in, out, s) -} - -func autoConvert_v1alpha1_KubeletWebhookAuthorization_To_componentconfig_KubeletWebhookAuthorization(in *KubeletWebhookAuthorization, out *componentconfig.KubeletWebhookAuthorization, s conversion.Scope) error { - out.CacheAuthorizedTTL = in.CacheAuthorizedTTL - out.CacheUnauthorizedTTL = in.CacheUnauthorizedTTL - return nil -} - -func Convert_v1alpha1_KubeletWebhookAuthorization_To_componentconfig_KubeletWebhookAuthorization(in *KubeletWebhookAuthorization, out *componentconfig.KubeletWebhookAuthorization, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeletWebhookAuthorization_To_componentconfig_KubeletWebhookAuthorization(in, out, s) -} - -func autoConvert_componentconfig_KubeletWebhookAuthorization_To_v1alpha1_KubeletWebhookAuthorization(in *componentconfig.KubeletWebhookAuthorization, out *KubeletWebhookAuthorization, s conversion.Scope) error { - out.CacheAuthorizedTTL = in.CacheAuthorizedTTL - out.CacheUnauthorizedTTL = in.CacheUnauthorizedTTL - return nil -} - -func Convert_componentconfig_KubeletWebhookAuthorization_To_v1alpha1_KubeletWebhookAuthorization(in *componentconfig.KubeletWebhookAuthorization, out *KubeletWebhookAuthorization, s conversion.Scope) error { - return autoConvert_componentconfig_KubeletWebhookAuthorization_To_v1alpha1_KubeletWebhookAuthorization(in, out, s) -} - -func autoConvert_v1alpha1_KubeletX509Authentication_To_componentconfig_KubeletX509Authentication(in *KubeletX509Authentication, out *componentconfig.KubeletX509Authentication, s conversion.Scope) error { - out.ClientCAFile = in.ClientCAFile - return nil -} - -func Convert_v1alpha1_KubeletX509Authentication_To_componentconfig_KubeletX509Authentication(in *KubeletX509Authentication, out *componentconfig.KubeletX509Authentication, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeletX509Authentication_To_componentconfig_KubeletX509Authentication(in, out, s) -} - -func autoConvert_componentconfig_KubeletX509Authentication_To_v1alpha1_KubeletX509Authentication(in *componentconfig.KubeletX509Authentication, out *KubeletX509Authentication, s conversion.Scope) error { - out.ClientCAFile = in.ClientCAFile - return nil -} - -func Convert_componentconfig_KubeletX509Authentication_To_v1alpha1_KubeletX509Authentication(in *componentconfig.KubeletX509Authentication, out *KubeletX509Authentication, s conversion.Scope) error { - return autoConvert_componentconfig_KubeletX509Authentication_To_v1alpha1_KubeletX509Authentication(in, out, s) -} - -func autoConvert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in *LeaderElectionConfiguration, out *componentconfig.LeaderElectionConfiguration, s conversion.Scope) error { - if err := api.Convert_Pointer_bool_To_bool(&in.LeaderElect, &out.LeaderElect, s); err != nil { - return err - } - out.LeaseDuration = in.LeaseDuration - out.RenewDeadline = in.RenewDeadline - out.RetryPeriod = in.RetryPeriod - return nil -} - -func Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in *LeaderElectionConfiguration, out *componentconfig.LeaderElectionConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in, out, s) -} - -func autoConvert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *componentconfig.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error { - if err := api.Convert_bool_To_Pointer_bool(&in.LeaderElect, &out.LeaderElect, s); err != nil { - return err - } - out.LeaseDuration = in.LeaseDuration - out.RenewDeadline = in.RenewDeadline - out.RetryPeriod = in.RetryPeriod - return nil -} - -func Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *componentconfig.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error { - return autoConvert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index a725d641e2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,527 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1alpha1 - -import ( - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_KubeProxyConfiguration, InType: reflect.TypeOf(&KubeProxyConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_KubeSchedulerConfiguration, InType: reflect.TypeOf(&KubeSchedulerConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_KubeletAnonymousAuthentication, InType: reflect.TypeOf(&KubeletAnonymousAuthentication{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_KubeletAuthentication, InType: reflect.TypeOf(&KubeletAuthentication{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_KubeletAuthorization, InType: reflect.TypeOf(&KubeletAuthorization{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_KubeletConfiguration, InType: reflect.TypeOf(&KubeletConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_KubeletWebhookAuthentication, InType: reflect.TypeOf(&KubeletWebhookAuthentication{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_KubeletWebhookAuthorization, InType: reflect.TypeOf(&KubeletWebhookAuthorization{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_KubeletX509Authentication, InType: reflect.TypeOf(&KubeletX509Authentication{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_LeaderElectionConfiguration, InType: reflect.TypeOf(&LeaderElectionConfiguration{})}, - ) -} - -func DeepCopy_v1alpha1_KubeProxyConfiguration(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KubeProxyConfiguration) - out := out.(*KubeProxyConfiguration) - out.TypeMeta = in.TypeMeta - out.BindAddress = in.BindAddress - out.ClusterCIDR = in.ClusterCIDR - out.HealthzBindAddress = in.HealthzBindAddress - out.HealthzPort = in.HealthzPort - out.HostnameOverride = in.HostnameOverride - if in.IPTablesMasqueradeBit != nil { - in, out := &in.IPTablesMasqueradeBit, &out.IPTablesMasqueradeBit - *out = new(int32) - **out = **in - } else { - out.IPTablesMasqueradeBit = nil - } - out.IPTablesSyncPeriod = in.IPTablesSyncPeriod - out.IPTablesMinSyncPeriod = in.IPTablesMinSyncPeriod - out.KubeconfigPath = in.KubeconfigPath - out.MasqueradeAll = in.MasqueradeAll - out.Master = in.Master - if in.OOMScoreAdj != nil { - in, out := &in.OOMScoreAdj, &out.OOMScoreAdj - *out = new(int32) - **out = **in - } else { - out.OOMScoreAdj = nil - } - out.Mode = in.Mode - out.PortRange = in.PortRange - out.ResourceContainer = in.ResourceContainer - out.UDPIdleTimeout = in.UDPIdleTimeout - out.ConntrackMax = in.ConntrackMax - out.ConntrackMaxPerCore = in.ConntrackMaxPerCore - out.ConntrackMin = in.ConntrackMin - out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout - out.ConntrackTCPCloseWaitTimeout = in.ConntrackTCPCloseWaitTimeout - return nil - } -} - -func DeepCopy_v1alpha1_KubeSchedulerConfiguration(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KubeSchedulerConfiguration) - out := out.(*KubeSchedulerConfiguration) - out.TypeMeta = in.TypeMeta - out.Port = in.Port - out.Address = in.Address - out.AlgorithmProvider = in.AlgorithmProvider - out.PolicyConfigFile = in.PolicyConfigFile - if in.EnableProfiling != nil { - in, out := &in.EnableProfiling, &out.EnableProfiling - *out = new(bool) - **out = **in - } else { - out.EnableProfiling = nil - } - out.ContentType = in.ContentType - out.KubeAPIQPS = in.KubeAPIQPS - out.KubeAPIBurst = in.KubeAPIBurst - out.SchedulerName = in.SchedulerName - out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight - out.FailureDomains = in.FailureDomains - if err := DeepCopy_v1alpha1_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1alpha1_KubeletAnonymousAuthentication(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KubeletAnonymousAuthentication) - out := out.(*KubeletAnonymousAuthentication) - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } else { - out.Enabled = nil - } - return nil - } -} - -func DeepCopy_v1alpha1_KubeletAuthentication(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KubeletAuthentication) - out := out.(*KubeletAuthentication) - out.X509 = in.X509 - if err := DeepCopy_v1alpha1_KubeletWebhookAuthentication(&in.Webhook, &out.Webhook, c); err != nil { - return err - } - if err := DeepCopy_v1alpha1_KubeletAnonymousAuthentication(&in.Anonymous, &out.Anonymous, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1alpha1_KubeletAuthorization(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KubeletAuthorization) - out := out.(*KubeletAuthorization) - out.Mode = in.Mode - out.Webhook = in.Webhook - return nil - } -} - -func DeepCopy_v1alpha1_KubeletConfiguration(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KubeletConfiguration) - out := out.(*KubeletConfiguration) - out.TypeMeta = in.TypeMeta - out.PodManifestPath = in.PodManifestPath - out.SyncFrequency = in.SyncFrequency - out.FileCheckFrequency = in.FileCheckFrequency - out.HTTPCheckFrequency = in.HTTPCheckFrequency - out.ManifestURL = in.ManifestURL - out.ManifestURLHeader = in.ManifestURLHeader - if in.EnableServer != nil { - in, out := &in.EnableServer, &out.EnableServer - *out = new(bool) - **out = **in - } else { - out.EnableServer = nil - } - out.Address = in.Address - out.Port = in.Port - out.ReadOnlyPort = in.ReadOnlyPort - out.TLSCertFile = in.TLSCertFile - out.TLSPrivateKeyFile = in.TLSPrivateKeyFile - out.CertDirectory = in.CertDirectory - if err := DeepCopy_v1alpha1_KubeletAuthentication(&in.Authentication, &out.Authentication, c); err != nil { - return err - } - out.Authorization = in.Authorization - out.HostnameOverride = in.HostnameOverride - out.PodInfraContainerImage = in.PodInfraContainerImage - out.DockerEndpoint = in.DockerEndpoint - out.RootDirectory = in.RootDirectory - out.SeccompProfileRoot = in.SeccompProfileRoot - if in.AllowPrivileged != nil { - in, out := &in.AllowPrivileged, &out.AllowPrivileged - *out = new(bool) - **out = **in - } else { - out.AllowPrivileged = nil - } - if in.HostNetworkSources != nil { - in, out := &in.HostNetworkSources, &out.HostNetworkSources - *out = make([]string, len(*in)) - copy(*out, *in) - } else { - out.HostNetworkSources = nil - } - if in.HostPIDSources != nil { - in, out := &in.HostPIDSources, &out.HostPIDSources - *out = make([]string, len(*in)) - copy(*out, *in) - } else { - out.HostPIDSources = nil - } - if in.HostIPCSources != nil { - in, out := &in.HostIPCSources, &out.HostIPCSources - *out = make([]string, len(*in)) - copy(*out, *in) - } else { - out.HostIPCSources = nil - } - if in.RegistryPullQPS != nil { - in, out := &in.RegistryPullQPS, &out.RegistryPullQPS - *out = new(int32) - **out = **in - } else { - out.RegistryPullQPS = nil - } - out.RegistryBurst = in.RegistryBurst - if in.EventRecordQPS != nil { - in, out := &in.EventRecordQPS, &out.EventRecordQPS - *out = new(int32) - **out = **in - } else { - out.EventRecordQPS = nil - } - out.EventBurst = in.EventBurst - if in.EnableDebuggingHandlers != nil { - in, out := &in.EnableDebuggingHandlers, &out.EnableDebuggingHandlers - *out = new(bool) - **out = **in - } else { - out.EnableDebuggingHandlers = nil - } - out.MinimumGCAge = in.MinimumGCAge - out.MaxPerPodContainerCount = in.MaxPerPodContainerCount - if in.MaxContainerCount != nil { - in, out := &in.MaxContainerCount, &out.MaxContainerCount - *out = new(int32) - **out = **in - } else { - out.MaxContainerCount = nil - } - out.CAdvisorPort = in.CAdvisorPort - out.HealthzPort = in.HealthzPort - out.HealthzBindAddress = in.HealthzBindAddress - if in.OOMScoreAdj != nil { - in, out := &in.OOMScoreAdj, &out.OOMScoreAdj - *out = new(int32) - **out = **in - } else { - out.OOMScoreAdj = nil - } - if in.RegisterNode != nil { - in, out := &in.RegisterNode, &out.RegisterNode - *out = new(bool) - **out = **in - } else { - out.RegisterNode = nil - } - out.ClusterDomain = in.ClusterDomain - out.MasterServiceNamespace = in.MasterServiceNamespace - out.ClusterDNS = in.ClusterDNS - out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout - out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency - out.ImageMinimumGCAge = in.ImageMinimumGCAge - if in.ImageGCHighThresholdPercent != nil { - in, out := &in.ImageGCHighThresholdPercent, &out.ImageGCHighThresholdPercent - *out = new(int32) - **out = **in - } else { - out.ImageGCHighThresholdPercent = nil - } - if in.ImageGCLowThresholdPercent != nil { - in, out := &in.ImageGCLowThresholdPercent, &out.ImageGCLowThresholdPercent - *out = new(int32) - **out = **in - } else { - out.ImageGCLowThresholdPercent = nil - } - out.LowDiskSpaceThresholdMB = in.LowDiskSpaceThresholdMB - out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod - out.NetworkPluginName = in.NetworkPluginName - out.NetworkPluginDir = in.NetworkPluginDir - out.CNIConfDir = in.CNIConfDir - out.CNIBinDir = in.CNIBinDir - out.NetworkPluginMTU = in.NetworkPluginMTU - out.VolumePluginDir = in.VolumePluginDir - out.CloudProvider = in.CloudProvider - out.CloudConfigFile = in.CloudConfigFile - out.KubeletCgroups = in.KubeletCgroups - out.RuntimeCgroups = in.RuntimeCgroups - out.SystemCgroups = in.SystemCgroups - out.CgroupRoot = in.CgroupRoot - if in.ExperimentalCgroupsPerQOS != nil { - in, out := &in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS - *out = new(bool) - **out = **in - } else { - out.ExperimentalCgroupsPerQOS = nil - } - out.CgroupDriver = in.CgroupDriver - out.ContainerRuntime = in.ContainerRuntime - out.RemoteRuntimeEndpoint = in.RemoteRuntimeEndpoint - out.RemoteImageEndpoint = in.RemoteImageEndpoint - out.RuntimeRequestTimeout = in.RuntimeRequestTimeout - out.RktPath = in.RktPath - out.ExperimentalMounterPath = in.ExperimentalMounterPath - out.RktAPIEndpoint = in.RktAPIEndpoint - out.RktStage1Image = in.RktStage1Image - if in.LockFilePath != nil { - in, out := &in.LockFilePath, &out.LockFilePath - *out = new(string) - **out = **in - } else { - out.LockFilePath = nil - } - out.ExitOnLockContention = in.ExitOnLockContention - out.HairpinMode = in.HairpinMode - out.BabysitDaemons = in.BabysitDaemons - out.MaxPods = in.MaxPods - out.NvidiaGPUs = in.NvidiaGPUs - out.DockerExecHandlerName = in.DockerExecHandlerName - out.PodCIDR = in.PodCIDR - out.ResolverConfig = in.ResolverConfig - if in.CPUCFSQuota != nil { - in, out := &in.CPUCFSQuota, &out.CPUCFSQuota - *out = new(bool) - **out = **in - } else { - out.CPUCFSQuota = nil - } - if in.Containerized != nil { - in, out := &in.Containerized, &out.Containerized - *out = new(bool) - **out = **in - } else { - out.Containerized = nil - } - out.MaxOpenFiles = in.MaxOpenFiles - if in.ReconcileCIDR != nil { - in, out := &in.ReconcileCIDR, &out.ReconcileCIDR - *out = new(bool) - **out = **in - } else { - out.ReconcileCIDR = nil - } - if in.RegisterSchedulable != nil { - in, out := &in.RegisterSchedulable, &out.RegisterSchedulable - *out = new(bool) - **out = **in - } else { - out.RegisterSchedulable = nil - } - out.ContentType = in.ContentType - if in.KubeAPIQPS != nil { - in, out := &in.KubeAPIQPS, &out.KubeAPIQPS - *out = new(int32) - **out = **in - } else { - out.KubeAPIQPS = nil - } - out.KubeAPIBurst = in.KubeAPIBurst - if in.SerializeImagePulls != nil { - in, out := &in.SerializeImagePulls, &out.SerializeImagePulls - *out = new(bool) - **out = **in - } else { - out.SerializeImagePulls = nil - } - out.OutOfDiskTransitionFrequency = in.OutOfDiskTransitionFrequency - out.NodeIP = in.NodeIP - if in.NodeLabels != nil { - in, out := &in.NodeLabels, &out.NodeLabels - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } else { - out.NodeLabels = nil - } - out.NonMasqueradeCIDR = in.NonMasqueradeCIDR - out.EnableCustomMetrics = in.EnableCustomMetrics - if in.EvictionHard != nil { - in, out := &in.EvictionHard, &out.EvictionHard - *out = new(string) - **out = **in - } else { - out.EvictionHard = nil - } - out.EvictionSoft = in.EvictionSoft - out.EvictionSoftGracePeriod = in.EvictionSoftGracePeriod - out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod - out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod - out.EvictionMinimumReclaim = in.EvictionMinimumReclaim - if in.ExperimentalKernelMemcgNotification != nil { - in, out := &in.ExperimentalKernelMemcgNotification, &out.ExperimentalKernelMemcgNotification - *out = new(bool) - **out = **in - } else { - out.ExperimentalKernelMemcgNotification = nil - } - out.PodsPerCore = in.PodsPerCore - if in.EnableControllerAttachDetach != nil { - in, out := &in.EnableControllerAttachDetach, &out.EnableControllerAttachDetach - *out = new(bool) - **out = **in - } else { - out.EnableControllerAttachDetach = nil - } - if in.SystemReserved != nil { - in, out := &in.SystemReserved, &out.SystemReserved - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } else { - out.SystemReserved = nil - } - if in.KubeReserved != nil { - in, out := &in.KubeReserved, &out.KubeReserved - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } else { - out.KubeReserved = nil - } - out.ProtectKernelDefaults = in.ProtectKernelDefaults - if in.MakeIPTablesUtilChains != nil { - in, out := &in.MakeIPTablesUtilChains, &out.MakeIPTablesUtilChains - *out = new(bool) - **out = **in - } else { - out.MakeIPTablesUtilChains = nil - } - if in.IPTablesMasqueradeBit != nil { - in, out := &in.IPTablesMasqueradeBit, &out.IPTablesMasqueradeBit - *out = new(int32) - **out = **in - } else { - out.IPTablesMasqueradeBit = nil - } - if in.IPTablesDropBit != nil { - in, out := &in.IPTablesDropBit, &out.IPTablesDropBit - *out = new(int32) - **out = **in - } else { - out.IPTablesDropBit = nil - } - if in.AllowedUnsafeSysctls != nil { - in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } else { - out.AllowedUnsafeSysctls = nil - } - out.FeatureGates = in.FeatureGates - out.EnableCRI = in.EnableCRI - out.ExperimentalFailSwapOn = in.ExperimentalFailSwapOn - out.ExperimentalCheckNodeCapabilitiesBeforeMount = in.ExperimentalCheckNodeCapabilitiesBeforeMount - return nil - } -} - -func DeepCopy_v1alpha1_KubeletWebhookAuthentication(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KubeletWebhookAuthentication) - out := out.(*KubeletWebhookAuthentication) - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } else { - out.Enabled = nil - } - out.CacheTTL = in.CacheTTL - return nil - } -} - -func DeepCopy_v1alpha1_KubeletWebhookAuthorization(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KubeletWebhookAuthorization) - out := out.(*KubeletWebhookAuthorization) - out.CacheAuthorizedTTL = in.CacheAuthorizedTTL - out.CacheUnauthorizedTTL = in.CacheUnauthorizedTTL - return nil - } -} - -func DeepCopy_v1alpha1_KubeletX509Authentication(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KubeletX509Authentication) - out := out.(*KubeletX509Authentication) - out.ClientCAFile = in.ClientCAFile - return nil - } -} - -func DeepCopy_v1alpha1_LeaderElectionConfiguration(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LeaderElectionConfiguration) - out := out.(*LeaderElectionConfiguration) - if in.LeaderElect != nil { - in, out := &in.LeaderElect, &out.LeaderElect - *out = new(bool) - **out = **in - } else { - out.LeaderElect = nil - } - out.LeaseDuration = in.LeaseDuration - out.RenewDeadline = in.RenewDeadline - out.RetryPeriod = in.RetryPeriod - return nil - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.defaults.go deleted file mode 100644 index a0cf834df5..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.defaults.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by defaulter-gen. Do not edit it manually! - -package v1alpha1 - -import ( - runtime "k8s.io/kubernetes/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&KubeProxyConfiguration{}, func(obj interface{}) { SetObjectDefaults_KubeProxyConfiguration(obj.(*KubeProxyConfiguration)) }) - scheme.AddTypeDefaultingFunc(&KubeSchedulerConfiguration{}, func(obj interface{}) { SetObjectDefaults_KubeSchedulerConfiguration(obj.(*KubeSchedulerConfiguration)) }) - scheme.AddTypeDefaultingFunc(&KubeletConfiguration{}, func(obj interface{}) { SetObjectDefaults_KubeletConfiguration(obj.(*KubeletConfiguration)) }) - return nil -} - -func SetObjectDefaults_KubeProxyConfiguration(in *KubeProxyConfiguration) { - SetDefaults_KubeProxyConfiguration(in) -} - -func SetObjectDefaults_KubeSchedulerConfiguration(in *KubeSchedulerConfiguration) { - SetDefaults_KubeSchedulerConfiguration(in) - SetDefaults_LeaderElectionConfiguration(&in.LeaderElection) -} - -func SetObjectDefaults_KubeletConfiguration(in *KubeletConfiguration) { - SetDefaults_KubeletConfiguration(in) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go deleted file mode 100644 index 9c536b6845..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go +++ /dev/null @@ -1,482 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package componentconfig - -import ( - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" - config "k8s.io/kubernetes/pkg/util/config" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_componentconfig_IPVar, InType: reflect.TypeOf(&IPVar{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_componentconfig_KubeControllerManagerConfiguration, InType: reflect.TypeOf(&KubeControllerManagerConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_componentconfig_KubeProxyConfiguration, InType: reflect.TypeOf(&KubeProxyConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_componentconfig_KubeSchedulerConfiguration, InType: reflect.TypeOf(&KubeSchedulerConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_componentconfig_KubeletAnonymousAuthentication, InType: reflect.TypeOf(&KubeletAnonymousAuthentication{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_componentconfig_KubeletAuthentication, InType: reflect.TypeOf(&KubeletAuthentication{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_componentconfig_KubeletAuthorization, InType: reflect.TypeOf(&KubeletAuthorization{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_componentconfig_KubeletConfiguration, InType: reflect.TypeOf(&KubeletConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_componentconfig_KubeletWebhookAuthentication, InType: reflect.TypeOf(&KubeletWebhookAuthentication{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_componentconfig_KubeletWebhookAuthorization, InType: reflect.TypeOf(&KubeletWebhookAuthorization{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_componentconfig_KubeletX509Authentication, InType: reflect.TypeOf(&KubeletX509Authentication{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_componentconfig_LeaderElectionConfiguration, InType: reflect.TypeOf(&LeaderElectionConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_componentconfig_PersistentVolumeRecyclerConfiguration, InType: reflect.TypeOf(&PersistentVolumeRecyclerConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_componentconfig_PortRangeVar, InType: reflect.TypeOf(&PortRangeVar{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_componentconfig_VolumeConfiguration, InType: reflect.TypeOf(&VolumeConfiguration{})}, - ) -} - -func DeepCopy_componentconfig_IPVar(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*IPVar) - out := out.(*IPVar) - if in.Val != nil { - in, out := &in.Val, &out.Val - *out = new(string) - **out = **in - } else { - out.Val = nil - } - return nil - } -} - -func DeepCopy_componentconfig_KubeControllerManagerConfiguration(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KubeControllerManagerConfiguration) - out := out.(*KubeControllerManagerConfiguration) - out.TypeMeta = in.TypeMeta - out.Port = in.Port - out.Address = in.Address - out.UseServiceAccountCredentials = in.UseServiceAccountCredentials - out.CloudProvider = in.CloudProvider - out.CloudConfigFile = in.CloudConfigFile - out.ConcurrentEndpointSyncs = in.ConcurrentEndpointSyncs - out.ConcurrentRSSyncs = in.ConcurrentRSSyncs - out.ConcurrentRCSyncs = in.ConcurrentRCSyncs - out.ConcurrentServiceSyncs = in.ConcurrentServiceSyncs - out.ConcurrentResourceQuotaSyncs = in.ConcurrentResourceQuotaSyncs - out.ConcurrentDeploymentSyncs = in.ConcurrentDeploymentSyncs - out.ConcurrentDaemonSetSyncs = in.ConcurrentDaemonSetSyncs - out.ConcurrentJobSyncs = in.ConcurrentJobSyncs - out.ConcurrentNamespaceSyncs = in.ConcurrentNamespaceSyncs - out.ConcurrentSATokenSyncs = in.ConcurrentSATokenSyncs - out.LookupCacheSizeForRC = in.LookupCacheSizeForRC - out.LookupCacheSizeForRS = in.LookupCacheSizeForRS - out.LookupCacheSizeForDaemonSet = in.LookupCacheSizeForDaemonSet - out.ServiceSyncPeriod = in.ServiceSyncPeriod - out.NodeSyncPeriod = in.NodeSyncPeriod - out.RouteReconciliationPeriod = in.RouteReconciliationPeriod - out.ResourceQuotaSyncPeriod = in.ResourceQuotaSyncPeriod - out.NamespaceSyncPeriod = in.NamespaceSyncPeriod - out.PVClaimBinderSyncPeriod = in.PVClaimBinderSyncPeriod - out.MinResyncPeriod = in.MinResyncPeriod - out.TerminatedPodGCThreshold = in.TerminatedPodGCThreshold - out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod - out.DeploymentControllerSyncPeriod = in.DeploymentControllerSyncPeriod - out.PodEvictionTimeout = in.PodEvictionTimeout - out.DeletingPodsQps = in.DeletingPodsQps - out.DeletingPodsBurst = in.DeletingPodsBurst - out.NodeMonitorGracePeriod = in.NodeMonitorGracePeriod - out.RegisterRetryCount = in.RegisterRetryCount - out.NodeStartupGracePeriod = in.NodeStartupGracePeriod - out.NodeMonitorPeriod = in.NodeMonitorPeriod - out.ServiceAccountKeyFile = in.ServiceAccountKeyFile - out.ClusterSigningCertFile = in.ClusterSigningCertFile - out.ClusterSigningKeyFile = in.ClusterSigningKeyFile - out.ApproveAllKubeletCSRsForGroup = in.ApproveAllKubeletCSRsForGroup - out.EnableProfiling = in.EnableProfiling - out.ClusterName = in.ClusterName - out.ClusterCIDR = in.ClusterCIDR - out.ServiceCIDR = in.ServiceCIDR - out.NodeCIDRMaskSize = in.NodeCIDRMaskSize - out.AllocateNodeCIDRs = in.AllocateNodeCIDRs - out.ConfigureCloudRoutes = in.ConfigureCloudRoutes - out.RootCAFile = in.RootCAFile - out.ContentType = in.ContentType - out.KubeAPIQPS = in.KubeAPIQPS - out.KubeAPIBurst = in.KubeAPIBurst - out.LeaderElection = in.LeaderElection - out.VolumeConfiguration = in.VolumeConfiguration - out.ControllerStartInterval = in.ControllerStartInterval - out.EnableGarbageCollector = in.EnableGarbageCollector - out.ConcurrentGCSyncs = in.ConcurrentGCSyncs - out.NodeEvictionRate = in.NodeEvictionRate - out.SecondaryNodeEvictionRate = in.SecondaryNodeEvictionRate - out.LargeClusterSizeThreshold = in.LargeClusterSizeThreshold - out.UnhealthyZoneThreshold = in.UnhealthyZoneThreshold - return nil - } -} - -func DeepCopy_componentconfig_KubeProxyConfiguration(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KubeProxyConfiguration) - out := out.(*KubeProxyConfiguration) - out.TypeMeta = in.TypeMeta - out.BindAddress = in.BindAddress - out.ClusterCIDR = in.ClusterCIDR - out.HealthzBindAddress = in.HealthzBindAddress - out.HealthzPort = in.HealthzPort - out.HostnameOverride = in.HostnameOverride - if in.IPTablesMasqueradeBit != nil { - in, out := &in.IPTablesMasqueradeBit, &out.IPTablesMasqueradeBit - *out = new(int32) - **out = **in - } else { - out.IPTablesMasqueradeBit = nil - } - out.IPTablesSyncPeriod = in.IPTablesSyncPeriod - out.IPTablesMinSyncPeriod = in.IPTablesMinSyncPeriod - out.KubeconfigPath = in.KubeconfigPath - out.MasqueradeAll = in.MasqueradeAll - out.Master = in.Master - if in.OOMScoreAdj != nil { - in, out := &in.OOMScoreAdj, &out.OOMScoreAdj - *out = new(int32) - **out = **in - } else { - out.OOMScoreAdj = nil - } - out.Mode = in.Mode - out.PortRange = in.PortRange - out.ResourceContainer = in.ResourceContainer - out.UDPIdleTimeout = in.UDPIdleTimeout - out.ConntrackMax = in.ConntrackMax - out.ConntrackMaxPerCore = in.ConntrackMaxPerCore - out.ConntrackMin = in.ConntrackMin - out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout - out.ConntrackTCPCloseWaitTimeout = in.ConntrackTCPCloseWaitTimeout - return nil - } -} - -func DeepCopy_componentconfig_KubeSchedulerConfiguration(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KubeSchedulerConfiguration) - out := out.(*KubeSchedulerConfiguration) - out.TypeMeta = in.TypeMeta - out.Port = in.Port - out.Address = in.Address - out.AlgorithmProvider = in.AlgorithmProvider - out.PolicyConfigFile = in.PolicyConfigFile - out.EnableProfiling = in.EnableProfiling - out.ContentType = in.ContentType - out.KubeAPIQPS = in.KubeAPIQPS - out.KubeAPIBurst = in.KubeAPIBurst - out.SchedulerName = in.SchedulerName - out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight - out.FailureDomains = in.FailureDomains - out.LeaderElection = in.LeaderElection - return nil - } -} - -func DeepCopy_componentconfig_KubeletAnonymousAuthentication(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KubeletAnonymousAuthentication) - out := out.(*KubeletAnonymousAuthentication) - out.Enabled = in.Enabled - return nil - } -} - -func DeepCopy_componentconfig_KubeletAuthentication(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KubeletAuthentication) - out := out.(*KubeletAuthentication) - out.X509 = in.X509 - out.Webhook = in.Webhook - out.Anonymous = in.Anonymous - return nil - } -} - -func DeepCopy_componentconfig_KubeletAuthorization(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KubeletAuthorization) - out := out.(*KubeletAuthorization) - out.Mode = in.Mode - out.Webhook = in.Webhook - return nil - } -} - -func DeepCopy_componentconfig_KubeletConfiguration(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KubeletConfiguration) - out := out.(*KubeletConfiguration) - out.TypeMeta = in.TypeMeta - out.PodManifestPath = in.PodManifestPath - out.SyncFrequency = in.SyncFrequency - out.FileCheckFrequency = in.FileCheckFrequency - out.HTTPCheckFrequency = in.HTTPCheckFrequency - out.ManifestURL = in.ManifestURL - out.ManifestURLHeader = in.ManifestURLHeader - out.EnableServer = in.EnableServer - out.Address = in.Address - out.Port = in.Port - out.ReadOnlyPort = in.ReadOnlyPort - out.TLSCertFile = in.TLSCertFile - out.TLSPrivateKeyFile = in.TLSPrivateKeyFile - out.CertDirectory = in.CertDirectory - out.Authentication = in.Authentication - out.Authorization = in.Authorization - out.HostnameOverride = in.HostnameOverride - out.PodInfraContainerImage = in.PodInfraContainerImage - out.DockerEndpoint = in.DockerEndpoint - out.RootDirectory = in.RootDirectory - out.SeccompProfileRoot = in.SeccompProfileRoot - out.AllowPrivileged = in.AllowPrivileged - if in.HostNetworkSources != nil { - in, out := &in.HostNetworkSources, &out.HostNetworkSources - *out = make([]string, len(*in)) - copy(*out, *in) - } else { - out.HostNetworkSources = nil - } - if in.HostPIDSources != nil { - in, out := &in.HostPIDSources, &out.HostPIDSources - *out = make([]string, len(*in)) - copy(*out, *in) - } else { - out.HostPIDSources = nil - } - if in.HostIPCSources != nil { - in, out := &in.HostIPCSources, &out.HostIPCSources - *out = make([]string, len(*in)) - copy(*out, *in) - } else { - out.HostIPCSources = nil - } - out.RegistryPullQPS = in.RegistryPullQPS - out.RegistryBurst = in.RegistryBurst - out.EventRecordQPS = in.EventRecordQPS - out.EventBurst = in.EventBurst - out.EnableDebuggingHandlers = in.EnableDebuggingHandlers - out.MinimumGCAge = in.MinimumGCAge - out.MaxPerPodContainerCount = in.MaxPerPodContainerCount - out.MaxContainerCount = in.MaxContainerCount - out.CAdvisorPort = in.CAdvisorPort - out.HealthzPort = in.HealthzPort - out.HealthzBindAddress = in.HealthzBindAddress - out.OOMScoreAdj = in.OOMScoreAdj - out.RegisterNode = in.RegisterNode - out.ClusterDomain = in.ClusterDomain - out.MasterServiceNamespace = in.MasterServiceNamespace - out.ClusterDNS = in.ClusterDNS - out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout - out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency - out.ImageMinimumGCAge = in.ImageMinimumGCAge - out.ImageGCHighThresholdPercent = in.ImageGCHighThresholdPercent - out.ImageGCLowThresholdPercent = in.ImageGCLowThresholdPercent - out.LowDiskSpaceThresholdMB = in.LowDiskSpaceThresholdMB - out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod - out.NetworkPluginName = in.NetworkPluginName - out.NetworkPluginMTU = in.NetworkPluginMTU - out.NetworkPluginDir = in.NetworkPluginDir - out.CNIConfDir = in.CNIConfDir - out.CNIBinDir = in.CNIBinDir - out.VolumePluginDir = in.VolumePluginDir - out.CloudProvider = in.CloudProvider - out.CloudConfigFile = in.CloudConfigFile - out.KubeletCgroups = in.KubeletCgroups - out.ExperimentalCgroupsPerQOS = in.ExperimentalCgroupsPerQOS - out.CgroupDriver = in.CgroupDriver - out.RuntimeCgroups = in.RuntimeCgroups - out.SystemCgroups = in.SystemCgroups - out.CgroupRoot = in.CgroupRoot - out.ContainerRuntime = in.ContainerRuntime - out.RemoteRuntimeEndpoint = in.RemoteRuntimeEndpoint - out.RemoteImageEndpoint = in.RemoteImageEndpoint - out.RuntimeRequestTimeout = in.RuntimeRequestTimeout - out.RktPath = in.RktPath - out.ExperimentalMounterPath = in.ExperimentalMounterPath - out.RktAPIEndpoint = in.RktAPIEndpoint - out.RktStage1Image = in.RktStage1Image - out.LockFilePath = in.LockFilePath - out.ExitOnLockContention = in.ExitOnLockContention - out.HairpinMode = in.HairpinMode - out.BabysitDaemons = in.BabysitDaemons - out.MaxPods = in.MaxPods - out.NvidiaGPUs = in.NvidiaGPUs - out.DockerExecHandlerName = in.DockerExecHandlerName - out.PodCIDR = in.PodCIDR - out.ResolverConfig = in.ResolverConfig - out.CPUCFSQuota = in.CPUCFSQuota - out.Containerized = in.Containerized - out.MaxOpenFiles = in.MaxOpenFiles - out.ReconcileCIDR = in.ReconcileCIDR - out.RegisterSchedulable = in.RegisterSchedulable - out.ContentType = in.ContentType - out.KubeAPIQPS = in.KubeAPIQPS - out.KubeAPIBurst = in.KubeAPIBurst - out.SerializeImagePulls = in.SerializeImagePulls - out.OutOfDiskTransitionFrequency = in.OutOfDiskTransitionFrequency - out.NodeIP = in.NodeIP - if in.NodeLabels != nil { - in, out := &in.NodeLabels, &out.NodeLabels - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } else { - out.NodeLabels = nil - } - out.NonMasqueradeCIDR = in.NonMasqueradeCIDR - out.EnableCustomMetrics = in.EnableCustomMetrics - out.EvictionHard = in.EvictionHard - out.EvictionSoft = in.EvictionSoft - out.EvictionSoftGracePeriod = in.EvictionSoftGracePeriod - out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod - out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod - out.EvictionMinimumReclaim = in.EvictionMinimumReclaim - out.ExperimentalKernelMemcgNotification = in.ExperimentalKernelMemcgNotification - out.PodsPerCore = in.PodsPerCore - out.EnableControllerAttachDetach = in.EnableControllerAttachDetach - if in.SystemReserved != nil { - in, out := &in.SystemReserved, &out.SystemReserved - *out = make(config.ConfigurationMap) - for key, val := range *in { - (*out)[key] = val - } - } else { - out.SystemReserved = nil - } - if in.KubeReserved != nil { - in, out := &in.KubeReserved, &out.KubeReserved - *out = make(config.ConfigurationMap) - for key, val := range *in { - (*out)[key] = val - } - } else { - out.KubeReserved = nil - } - out.ProtectKernelDefaults = in.ProtectKernelDefaults - out.MakeIPTablesUtilChains = in.MakeIPTablesUtilChains - out.IPTablesMasqueradeBit = in.IPTablesMasqueradeBit - out.IPTablesDropBit = in.IPTablesDropBit - if in.AllowedUnsafeSysctls != nil { - in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } else { - out.AllowedUnsafeSysctls = nil - } - out.FeatureGates = in.FeatureGates - out.EnableCRI = in.EnableCRI - out.ExperimentalFailSwapOn = in.ExperimentalFailSwapOn - out.ExperimentalCheckNodeCapabilitiesBeforeMount = in.ExperimentalCheckNodeCapabilitiesBeforeMount - return nil - } -} - -func DeepCopy_componentconfig_KubeletWebhookAuthentication(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KubeletWebhookAuthentication) - out := out.(*KubeletWebhookAuthentication) - out.Enabled = in.Enabled - out.CacheTTL = in.CacheTTL - return nil - } -} - -func DeepCopy_componentconfig_KubeletWebhookAuthorization(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KubeletWebhookAuthorization) - out := out.(*KubeletWebhookAuthorization) - out.CacheAuthorizedTTL = in.CacheAuthorizedTTL - out.CacheUnauthorizedTTL = in.CacheUnauthorizedTTL - return nil - } -} - -func DeepCopy_componentconfig_KubeletX509Authentication(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KubeletX509Authentication) - out := out.(*KubeletX509Authentication) - out.ClientCAFile = in.ClientCAFile - return nil - } -} - -func DeepCopy_componentconfig_LeaderElectionConfiguration(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LeaderElectionConfiguration) - out := out.(*LeaderElectionConfiguration) - out.LeaderElect = in.LeaderElect - out.LeaseDuration = in.LeaseDuration - out.RenewDeadline = in.RenewDeadline - out.RetryPeriod = in.RetryPeriod - return nil - } -} - -func DeepCopy_componentconfig_PersistentVolumeRecyclerConfiguration(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolumeRecyclerConfiguration) - out := out.(*PersistentVolumeRecyclerConfiguration) - out.MaximumRetry = in.MaximumRetry - out.MinimumTimeoutNFS = in.MinimumTimeoutNFS - out.PodTemplateFilePathNFS = in.PodTemplateFilePathNFS - out.IncrementTimeoutNFS = in.IncrementTimeoutNFS - out.PodTemplateFilePathHostPath = in.PodTemplateFilePathHostPath - out.MinimumTimeoutHostPath = in.MinimumTimeoutHostPath - out.IncrementTimeoutHostPath = in.IncrementTimeoutHostPath - return nil - } -} - -func DeepCopy_componentconfig_PortRangeVar(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PortRangeVar) - out := out.(*PortRangeVar) - if in.Val != nil { - in, out := &in.Val, &out.Val - *out = new(string) - **out = **in - } else { - out.Val = nil - } - return nil - } -} - -func DeepCopy_componentconfig_VolumeConfiguration(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*VolumeConfiguration) - out := out.(*VolumeConfiguration) - out.EnableHostPathProvisioning = in.EnableHostPathProvisioning - out.EnableDynamicProvisioning = in.EnableDynamicProvisioning - out.PersistentVolumeRecyclerConfiguration = in.PersistentVolumeRecyclerConfiguration - out.FlexVolumePluginDir = in.FlexVolumePluginDir - return nil - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD index d5025da72a..b1024ddcf0 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD @@ -4,10 +4,15 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", "go_test", - "cgo_library", +) + +go_test( + name = "go_default_test", + srcs = ["helpers_test.go"], + library = ":go_default_library", + tags = ["automanaged"], ) go_library( @@ -16,29 +21,35 @@ go_library( "doc.go", "helpers.go", "register.go", - "types.generated.go", "types.go", "zz_generated.deepcopy.go", ], tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apis/autoscaling:go_default_library", - "//pkg/apis/batch:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//pkg/util/intstr:go_default_library", - "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/api/resource", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/util/intstr", ], ) -go_test( - name = "go_default_test", - srcs = ["helpers_test.go"], - library = "go_default_library", +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/extensions/install:all-srcs", + "//pkg/apis/extensions/v1beta1:all-srcs", + "//pkg/apis/extensions/validation:all-srcs", + ], tags = ["automanaged"], - deps = [], ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/extensions/OWNERS new file mode 100755 index 0000000000..494763a693 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/OWNERS @@ -0,0 +1,41 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- nikhiljindal +- bprashanth +- erictune +- pmorie +- sttts +- kargakis +- saad-ali +- janetkuo +- justinsb +- ncdc +- timstclair +- mwielgus +- timothysc +- soltysh +- piosz +- dims +- errordeveloper +- madhusudancs +- rootfs +- jszczepkowski +- mml +- resouer +- mbohlool +- david-mcmahon +- therc +- pweil- +- tmrts +- mqliang +- lukaszo +- jianhuiz diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go index 6b51a43e8b..2bbb71d057 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go @@ -15,6 +15,5 @@ limitations under the License. */ // +k8s:deepcopy-gen=package,register -// +k8s:openapi-gen=true package extensions diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/BUILD index fe2428727c..53b3892fbf 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -15,9 +12,25 @@ go_library( srcs = ["install.go"], tags = ["automanaged"], deps = [ - "//pkg/apimachinery/announced:go_default_library", + "//pkg/api:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/apis/extensions/v1beta1:go_default_library", - "//pkg/util/sets:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/announced", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/registered", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/util/sets", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/install.go index 7381d7ce79..53f870dda2 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/install.go @@ -19,13 +19,21 @@ limitations under the License. package install import ( - "k8s.io/kubernetes/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" - "k8s.io/kubernetes/pkg/util/sets" ) func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { if err := announced.NewGroupMetaFactory( &announced.GroupMetaFactoryArgs{ GroupName: extensions.GroupName, @@ -37,7 +45,7 @@ func init() { announced.VersionToSchemeFunc{ v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, }, - ).Announce().RegisterAndEnable(); err != nil { + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { panic(err) } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go index 31fc20a8ae..5983636c22 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go @@ -17,26 +17,23 @@ limitations under the License. package extensions import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "extensions" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} // Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { +func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } // Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { +func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } @@ -52,11 +49,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &Deployment{}, &DeploymentList{}, &DeploymentRollback{}, - &autoscaling.HorizontalPodAutoscaler{}, - &autoscaling.HorizontalPodAutoscalerList{}, - &batch.Job{}, - &batch.JobList{}, - &batch.JobTemplate{}, &ReplicationControllerDummy{}, &Scale{}, &ThirdPartyResource{}, @@ -67,11 +59,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ThirdPartyResourceDataList{}, &Ingress{}, &IngressList{}, - &api.ListOptions{}, - &api.DeleteOptions{}, &ReplicaSet{}, &ReplicaSetList{}, - &api.ExportOptions{}, &PodSecurityPolicy{}, &PodSecurityPolicyList{}, &NetworkPolicy{}, diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.generated.go deleted file mode 100644 index 8815ac362e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.generated.go +++ /dev/null @@ -1,19454 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package extensions - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg2_api "k8s.io/kubernetes/pkg/api" - pkg4_resource "k8s.io/kubernetes/pkg/api/resource" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg3_types "k8s.io/kubernetes/pkg/types" - pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_api.ObjectMeta - var v1 pkg4_resource.Quantity - var v2 pkg1_unversioned.LabelSelector - var v3 pkg3_types.UID - var v4 pkg5_intstr.IntOrString - var v5 time.Time - _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 - } -} - -func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Replicas != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct7 := r.ContainerType() - if yyct7 == codecSelferValueTypeMap1234 { - yyl7 := r.ReadMapStart() - if yyl7 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl7, d) - } - } else if yyct7 == codecSelferValueTypeArray1234 { - yyl7 := r.ReadArrayStart() - if yyl7 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl7, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys8Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys8Slc - var yyhl8 bool = l >= 0 - for yyj8 := 0; ; yyj8++ { - if yyhl8 { - if yyj8 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys8Slc = r.DecodeBytes(yys8Slc, true, true) - yys8 := string(yys8Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys8 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys8) - } // end switch yys8 - } // end for yyj8 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep13 := !z.EncBinary() - yy2arr13 := z.EncBasicHandle().StructToArray - var yyq13 [2]bool - _, _, _ = yysep13, yyq13, yy2arr13 - const yyr13 bool = false - yyq13[1] = x.Selector != nil - var yynn13 int - if yyr13 || yy2arr13 { - r.EncodeArrayStart(2) - } else { - yynn13 = 1 - for _, b := range yyq13 { - if b { - yynn13++ - } - } - r.EncodeMapStart(yynn13) - yynn13 = 0 - } - if yyr13 || yy2arr13 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr13 || yy2arr13 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq13[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq13[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr13 || yy2arr13 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym20 := z.DecBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct21 := r.ContainerType() - if yyct21 == codecSelferValueTypeMap1234 { - yyl21 := r.ReadMapStart() - if yyl21 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl21, d) - } - } else if yyct21 == codecSelferValueTypeArray1234 { - yyl21 := r.ReadArrayStart() - if yyl21 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl21, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys22Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys22Slc - var yyhl22 bool = l >= 0 - for yyj22 := 0; ; yyj22++ { - if yyhl22 { - if yyj22 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys22Slc = r.DecodeBytes(yys22Slc, true, true) - yys22 := string(yys22Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys22 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym25 := z.DecBinary() - _ = yym25 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys22) - } // end switch yys22 - } // end for yyj22 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj26 int - var yyb26 bool - var yyhl26 bool = l >= 0 - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym29 := z.DecBinary() - _ = yym29 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - for { - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj26-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym30 := z.EncBinary() - _ = yym30 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep31 := !z.EncBinary() - yy2arr31 := z.EncBasicHandle().StructToArray - var yyq31 [5]bool - _, _, _ = yysep31, yyq31, yy2arr31 - const yyr31 bool = false - yyq31[0] = x.Kind != "" - yyq31[1] = x.APIVersion != "" - yyq31[2] = true - yyq31[3] = true - yyq31[4] = true - var yynn31 int - if yyr31 || yy2arr31 { - r.EncodeArrayStart(5) - } else { - yynn31 = 0 - for _, b := range yyq31 { - if b { - yynn31++ - } - } - r.EncodeMapStart(yynn31) - yynn31 = 0 - } - if yyr31 || yy2arr31 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq31[0] { - yym33 := z.EncBinary() - _ = yym33 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq31[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym34 := z.EncBinary() - _ = yym34 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr31 || yy2arr31 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq31[1] { - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq31[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym37 := z.EncBinary() - _ = yym37 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr31 || yy2arr31 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq31[2] { - yy39 := &x.ObjectMeta - yy39.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq31[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy40 := &x.ObjectMeta - yy40.CodecEncodeSelf(e) - } - } - if yyr31 || yy2arr31 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq31[3] { - yy42 := &x.Spec - yy42.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq31[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy43 := &x.Spec - yy43.CodecEncodeSelf(e) - } - } - if yyr31 || yy2arr31 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq31[4] { - yy45 := &x.Status - yy45.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq31[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy46 := &x.Status - yy46.CodecEncodeSelf(e) - } - } - if yyr31 || yy2arr31 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym47 := z.DecBinary() - _ = yym47 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct48 := r.ContainerType() - if yyct48 == codecSelferValueTypeMap1234 { - yyl48 := r.ReadMapStart() - if yyl48 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl48, d) - } - } else if yyct48 == codecSelferValueTypeArray1234 { - yyl48 := r.ReadArrayStart() - if yyl48 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl48, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys49Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys49Slc - var yyhl49 bool = l >= 0 - for yyj49 := 0; ; yyj49++ { - if yyhl49 { - if yyj49 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys49Slc = r.DecodeBytes(yys49Slc, true, true) - yys49 := string(yys49Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys49 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv52 := &x.ObjectMeta - yyv52.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ScaleSpec{} - } else { - yyv53 := &x.Spec - yyv53.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ScaleStatus{} - } else { - yyv54 := &x.Status - yyv54.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys49) - } // end switch yys49 - } // end for yyj49 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj55 int - var yyb55 bool - var yyhl55 bool = l >= 0 - yyj55++ - if yyhl55 { - yyb55 = yyj55 > l - } else { - yyb55 = r.CheckBreak() - } - if yyb55 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj55++ - if yyhl55 { - yyb55 = yyj55 > l - } else { - yyb55 = r.CheckBreak() - } - if yyb55 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj55++ - if yyhl55 { - yyb55 = yyj55 > l - } else { - yyb55 = r.CheckBreak() - } - if yyb55 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv58 := &x.ObjectMeta - yyv58.CodecDecodeSelf(d) - } - yyj55++ - if yyhl55 { - yyb55 = yyj55 > l - } else { - yyb55 = r.CheckBreak() - } - if yyb55 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ScaleSpec{} - } else { - yyv59 := &x.Spec - yyv59.CodecDecodeSelf(d) - } - yyj55++ - if yyhl55 { - yyb55 = yyj55 > l - } else { - yyb55 = r.CheckBreak() - } - if yyb55 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ScaleStatus{} - } else { - yyv60 := &x.Status - yyv60.CodecDecodeSelf(d) - } - for { - yyj55++ - if yyhl55 { - yyb55 = yyj55 > l - } else { - yyb55 = r.CheckBreak() - } - if yyb55 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj55-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationControllerDummy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym61 := z.EncBinary() - _ = yym61 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep62 := !z.EncBinary() - yy2arr62 := z.EncBasicHandle().StructToArray - var yyq62 [2]bool - _, _, _ = yysep62, yyq62, yy2arr62 - const yyr62 bool = false - yyq62[0] = x.Kind != "" - yyq62[1] = x.APIVersion != "" - var yynn62 int - if yyr62 || yy2arr62 { - r.EncodeArrayStart(2) - } else { - yynn62 = 0 - for _, b := range yyq62 { - if b { - yynn62++ - } - } - r.EncodeMapStart(yynn62) - yynn62 = 0 - } - if yyr62 || yy2arr62 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq62[0] { - yym64 := z.EncBinary() - _ = yym64 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq62[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym65 := z.EncBinary() - _ = yym65 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr62 || yy2arr62 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq62[1] { - yym67 := z.EncBinary() - _ = yym67 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq62[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym68 := z.EncBinary() - _ = yym68 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr62 || yy2arr62 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationControllerDummy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym69 := z.DecBinary() - _ = yym69 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct70 := r.ContainerType() - if yyct70 == codecSelferValueTypeMap1234 { - yyl70 := r.ReadMapStart() - if yyl70 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl70, d) - } - } else if yyct70 == codecSelferValueTypeArray1234 { - yyl70 := r.ReadArrayStart() - if yyl70 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl70, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationControllerDummy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys71Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys71Slc - var yyhl71 bool = l >= 0 - for yyj71 := 0; ; yyj71++ { - if yyhl71 { - if yyj71 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys71Slc = r.DecodeBytes(yys71Slc, true, true) - yys71 := string(yys71Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys71 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys71) - } // end switch yys71 - } // end for yyj71 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationControllerDummy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj74 int - var yyb74 bool - var yyhl74 bool = l >= 0 - yyj74++ - if yyhl74 { - yyb74 = yyj74 > l - } else { - yyb74 = r.CheckBreak() - } - if yyb74 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj74++ - if yyhl74 { - yyb74 = yyj74 > l - } else { - yyb74 = r.CheckBreak() - } - if yyb74 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj74++ - if yyhl74 { - yyb74 = yyj74 > l - } else { - yyb74 = r.CheckBreak() - } - if yyb74 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj74-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CustomMetricTarget) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym77 := z.EncBinary() - _ = yym77 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep78 := !z.EncBinary() - yy2arr78 := z.EncBasicHandle().StructToArray - var yyq78 [2]bool - _, _, _ = yysep78, yyq78, yy2arr78 - const yyr78 bool = false - var yynn78 int - if yyr78 || yy2arr78 { - r.EncodeArrayStart(2) - } else { - yynn78 = 2 - for _, b := range yyq78 { - if b { - yynn78++ - } - } - r.EncodeMapStart(yynn78) - yynn78 = 0 - } - if yyr78 || yy2arr78 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym80 := z.EncBinary() - _ = yym80 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym81 := z.EncBinary() - _ = yym81 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr78 || yy2arr78 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy83 := &x.TargetValue - yym84 := z.EncBinary() - _ = yym84 - if false { - } else if z.HasExtensions() && z.EncExt(yy83) { - } else if !yym84 && z.IsJSONHandle() { - z.EncJSONMarshal(yy83) - } else { - z.EncFallback(yy83) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy85 := &x.TargetValue - yym86 := z.EncBinary() - _ = yym86 - if false { - } else if z.HasExtensions() && z.EncExt(yy85) { - } else if !yym86 && z.IsJSONHandle() { - z.EncJSONMarshal(yy85) - } else { - z.EncFallback(yy85) - } - } - if yyr78 || yy2arr78 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CustomMetricTarget) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym87 := z.DecBinary() - _ = yym87 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct88 := r.ContainerType() - if yyct88 == codecSelferValueTypeMap1234 { - yyl88 := r.ReadMapStart() - if yyl88 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl88, d) - } - } else if yyct88 == codecSelferValueTypeArray1234 { - yyl88 := r.ReadArrayStart() - if yyl88 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl88, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CustomMetricTarget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys89Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys89Slc - var yyhl89 bool = l >= 0 - for yyj89 := 0; ; yyj89++ { - if yyhl89 { - if yyj89 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys89Slc = r.DecodeBytes(yys89Slc, true, true) - yys89 := string(yys89Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys89 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "value": - if r.TryDecodeAsNil() { - x.TargetValue = pkg4_resource.Quantity{} - } else { - yyv91 := &x.TargetValue - yym92 := z.DecBinary() - _ = yym92 - if false { - } else if z.HasExtensions() && z.DecExt(yyv91) { - } else if !yym92 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv91) - } else { - z.DecFallback(yyv91, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys89) - } // end switch yys89 - } // end for yyj89 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CustomMetricTarget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj93 int - var yyb93 bool - var yyhl93 bool = l >= 0 - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetValue = pkg4_resource.Quantity{} - } else { - yyv95 := &x.TargetValue - yym96 := z.DecBinary() - _ = yym96 - if false { - } else if z.HasExtensions() && z.DecExt(yyv95) { - } else if !yym96 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv95) - } else { - z.DecFallback(yyv95, false) - } - } - for { - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj93-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CustomMetricTargetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym97 := z.EncBinary() - _ = yym97 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep98 := !z.EncBinary() - yy2arr98 := z.EncBasicHandle().StructToArray - var yyq98 [1]bool - _, _, _ = yysep98, yyq98, yy2arr98 - const yyr98 bool = false - var yynn98 int - if yyr98 || yy2arr98 { - r.EncodeArrayStart(1) - } else { - yynn98 = 1 - for _, b := range yyq98 { - if b { - yynn98++ - } - } - r.EncodeMapStart(yynn98) - yynn98 = 0 - } - if yyr98 || yy2arr98 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym100 := z.EncBinary() - _ = yym100 - if false { - } else { - h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym101 := z.EncBinary() - _ = yym101 - if false { - } else { - h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) - } - } - } - if yyr98 || yy2arr98 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CustomMetricTargetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym102 := z.DecBinary() - _ = yym102 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct103 := r.ContainerType() - if yyct103 == codecSelferValueTypeMap1234 { - yyl103 := r.ReadMapStart() - if yyl103 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl103, d) - } - } else if yyct103 == codecSelferValueTypeArray1234 { - yyl103 := r.ReadArrayStart() - if yyl103 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl103, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CustomMetricTargetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys104Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys104Slc - var yyhl104 bool = l >= 0 - for yyj104 := 0; ; yyj104++ { - if yyhl104 { - if yyj104 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys104Slc = r.DecodeBytes(yys104Slc, true, true) - yys104 := string(yys104Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys104 { - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv105 := &x.Items - yym106 := z.DecBinary() - _ = yym106 - if false { - } else { - h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv105), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys104) - } // end switch yys104 - } // end for yyj104 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CustomMetricTargetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj107 int - var yyb107 bool - var yyhl107 bool = l >= 0 - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv108 := &x.Items - yym109 := z.DecBinary() - _ = yym109 - if false { - } else { - h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv108), d) - } - } - for { - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj107-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CustomMetricCurrentStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym110 := z.EncBinary() - _ = yym110 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep111 := !z.EncBinary() - yy2arr111 := z.EncBasicHandle().StructToArray - var yyq111 [2]bool - _, _, _ = yysep111, yyq111, yy2arr111 - const yyr111 bool = false - var yynn111 int - if yyr111 || yy2arr111 { - r.EncodeArrayStart(2) - } else { - yynn111 = 2 - for _, b := range yyq111 { - if b { - yynn111++ - } - } - r.EncodeMapStart(yynn111) - yynn111 = 0 - } - if yyr111 || yy2arr111 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym113 := z.EncBinary() - _ = yym113 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym114 := z.EncBinary() - _ = yym114 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr111 || yy2arr111 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy116 := &x.CurrentValue - yym117 := z.EncBinary() - _ = yym117 - if false { - } else if z.HasExtensions() && z.EncExt(yy116) { - } else if !yym117 && z.IsJSONHandle() { - z.EncJSONMarshal(yy116) - } else { - z.EncFallback(yy116) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy118 := &x.CurrentValue - yym119 := z.EncBinary() - _ = yym119 - if false { - } else if z.HasExtensions() && z.EncExt(yy118) { - } else if !yym119 && z.IsJSONHandle() { - z.EncJSONMarshal(yy118) - } else { - z.EncFallback(yy118) - } - } - if yyr111 || yy2arr111 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CustomMetricCurrentStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym120 := z.DecBinary() - _ = yym120 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct121 := r.ContainerType() - if yyct121 == codecSelferValueTypeMap1234 { - yyl121 := r.ReadMapStart() - if yyl121 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl121, d) - } - } else if yyct121 == codecSelferValueTypeArray1234 { - yyl121 := r.ReadArrayStart() - if yyl121 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl121, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CustomMetricCurrentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys122Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys122Slc - var yyhl122 bool = l >= 0 - for yyj122 := 0; ; yyj122++ { - if yyhl122 { - if yyj122 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys122Slc = r.DecodeBytes(yys122Slc, true, true) - yys122 := string(yys122Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys122 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "value": - if r.TryDecodeAsNil() { - x.CurrentValue = pkg4_resource.Quantity{} - } else { - yyv124 := &x.CurrentValue - yym125 := z.DecBinary() - _ = yym125 - if false { - } else if z.HasExtensions() && z.DecExt(yyv124) { - } else if !yym125 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv124) - } else { - z.DecFallback(yyv124, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys122) - } // end switch yys122 - } // end for yyj122 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CustomMetricCurrentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj126 int - var yyb126 bool - var yyhl126 bool = l >= 0 - yyj126++ - if yyhl126 { - yyb126 = yyj126 > l - } else { - yyb126 = r.CheckBreak() - } - if yyb126 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj126++ - if yyhl126 { - yyb126 = yyj126 > l - } else { - yyb126 = r.CheckBreak() - } - if yyb126 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentValue = pkg4_resource.Quantity{} - } else { - yyv128 := &x.CurrentValue - yym129 := z.DecBinary() - _ = yym129 - if false { - } else if z.HasExtensions() && z.DecExt(yyv128) { - } else if !yym129 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv128) - } else { - z.DecFallback(yyv128, false) - } - } - for { - yyj126++ - if yyhl126 { - yyb126 = yyj126 > l - } else { - yyb126 = r.CheckBreak() - } - if yyb126 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj126-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CustomMetricCurrentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym130 := z.EncBinary() - _ = yym130 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep131 := !z.EncBinary() - yy2arr131 := z.EncBasicHandle().StructToArray - var yyq131 [1]bool - _, _, _ = yysep131, yyq131, yy2arr131 - const yyr131 bool = false - var yynn131 int - if yyr131 || yy2arr131 { - r.EncodeArrayStart(1) - } else { - yynn131 = 1 - for _, b := range yyq131 { - if b { - yynn131++ - } - } - r.EncodeMapStart(yynn131) - yynn131 = 0 - } - if yyr131 || yy2arr131 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym133 := z.EncBinary() - _ = yym133 - if false { - } else { - h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym134 := z.EncBinary() - _ = yym134 - if false { - } else { - h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) - } - } - } - if yyr131 || yy2arr131 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CustomMetricCurrentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym135 := z.DecBinary() - _ = yym135 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct136 := r.ContainerType() - if yyct136 == codecSelferValueTypeMap1234 { - yyl136 := r.ReadMapStart() - if yyl136 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl136, d) - } - } else if yyct136 == codecSelferValueTypeArray1234 { - yyl136 := r.ReadArrayStart() - if yyl136 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl136, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys137Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys137Slc - var yyhl137 bool = l >= 0 - for yyj137 := 0; ; yyj137++ { - if yyhl137 { - if yyj137 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys137Slc = r.DecodeBytes(yys137Slc, true, true) - yys137 := string(yys137Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys137 { - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv138 := &x.Items - yym139 := z.DecBinary() - _ = yym139 - if false { - } else { - h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv138), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys137) - } // end switch yys137 - } // end for yyj137 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj140 int - var yyb140 bool - var yyhl140 bool = l >= 0 - yyj140++ - if yyhl140 { - yyb140 = yyj140 > l - } else { - yyb140 = r.CheckBreak() - } - if yyb140 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv141 := &x.Items - yym142 := z.DecBinary() - _ = yym142 - if false { - } else { - h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv141), d) - } - } - for { - yyj140++ - if yyhl140 { - yyb140 = yyj140 > l - } else { - yyb140 = r.CheckBreak() - } - if yyb140 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj140-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ThirdPartyResource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym143 := z.EncBinary() - _ = yym143 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep144 := !z.EncBinary() - yy2arr144 := z.EncBasicHandle().StructToArray - var yyq144 [5]bool - _, _, _ = yysep144, yyq144, yy2arr144 - const yyr144 bool = false - yyq144[0] = x.Kind != "" - yyq144[1] = x.APIVersion != "" - yyq144[2] = true - yyq144[3] = x.Description != "" - yyq144[4] = len(x.Versions) != 0 - var yynn144 int - if yyr144 || yy2arr144 { - r.EncodeArrayStart(5) - } else { - yynn144 = 0 - for _, b := range yyq144 { - if b { - yynn144++ - } - } - r.EncodeMapStart(yynn144) - yynn144 = 0 - } - if yyr144 || yy2arr144 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq144[0] { - yym146 := z.EncBinary() - _ = yym146 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq144[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym147 := z.EncBinary() - _ = yym147 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr144 || yy2arr144 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq144[1] { - yym149 := z.EncBinary() - _ = yym149 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq144[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym150 := z.EncBinary() - _ = yym150 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr144 || yy2arr144 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq144[2] { - yy152 := &x.ObjectMeta - yy152.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq144[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy153 := &x.ObjectMeta - yy153.CodecEncodeSelf(e) - } - } - if yyr144 || yy2arr144 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq144[3] { - yym155 := z.EncBinary() - _ = yym155 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Description)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq144[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("description")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym156 := z.EncBinary() - _ = yym156 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Description)) - } - } - } - if yyr144 || yy2arr144 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq144[4] { - if x.Versions == nil { - r.EncodeNil() - } else { - yym158 := z.EncBinary() - _ = yym158 - if false { - } else { - h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq144[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("versions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Versions == nil { - r.EncodeNil() - } else { - yym159 := z.EncBinary() - _ = yym159 - if false { - } else { - h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) - } - } - } - } - if yyr144 || yy2arr144 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ThirdPartyResource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym160 := z.DecBinary() - _ = yym160 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct161 := r.ContainerType() - if yyct161 == codecSelferValueTypeMap1234 { - yyl161 := r.ReadMapStart() - if yyl161 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl161, d) - } - } else if yyct161 == codecSelferValueTypeArray1234 { - yyl161 := r.ReadArrayStart() - if yyl161 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl161, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ThirdPartyResource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys162Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys162Slc - var yyhl162 bool = l >= 0 - for yyj162 := 0; ; yyj162++ { - if yyhl162 { - if yyj162 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys162Slc = r.DecodeBytes(yys162Slc, true, true) - yys162 := string(yys162Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys162 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv165 := &x.ObjectMeta - yyv165.CodecDecodeSelf(d) - } - case "description": - if r.TryDecodeAsNil() { - x.Description = "" - } else { - x.Description = string(r.DecodeString()) - } - case "versions": - if r.TryDecodeAsNil() { - x.Versions = nil - } else { - yyv167 := &x.Versions - yym168 := z.DecBinary() - _ = yym168 - if false { - } else { - h.decSliceAPIVersion((*[]APIVersion)(yyv167), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys162) - } // end switch yys162 - } // end for yyj162 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ThirdPartyResource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj169 int - var yyb169 bool - var yyhl169 bool = l >= 0 - yyj169++ - if yyhl169 { - yyb169 = yyj169 > l - } else { - yyb169 = r.CheckBreak() - } - if yyb169 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj169++ - if yyhl169 { - yyb169 = yyj169 > l - } else { - yyb169 = r.CheckBreak() - } - if yyb169 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj169++ - if yyhl169 { - yyb169 = yyj169 > l - } else { - yyb169 = r.CheckBreak() - } - if yyb169 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv172 := &x.ObjectMeta - yyv172.CodecDecodeSelf(d) - } - yyj169++ - if yyhl169 { - yyb169 = yyj169 > l - } else { - yyb169 = r.CheckBreak() - } - if yyb169 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Description = "" - } else { - x.Description = string(r.DecodeString()) - } - yyj169++ - if yyhl169 { - yyb169 = yyj169 > l - } else { - yyb169 = r.CheckBreak() - } - if yyb169 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Versions = nil - } else { - yyv174 := &x.Versions - yym175 := z.DecBinary() - _ = yym175 - if false { - } else { - h.decSliceAPIVersion((*[]APIVersion)(yyv174), d) - } - } - for { - yyj169++ - if yyhl169 { - yyb169 = yyj169 > l - } else { - yyb169 = r.CheckBreak() - } - if yyb169 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj169-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ThirdPartyResourceList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym176 := z.EncBinary() - _ = yym176 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep177 := !z.EncBinary() - yy2arr177 := z.EncBasicHandle().StructToArray - var yyq177 [4]bool - _, _, _ = yysep177, yyq177, yy2arr177 - const yyr177 bool = false - yyq177[0] = x.Kind != "" - yyq177[1] = x.APIVersion != "" - yyq177[2] = true - var yynn177 int - if yyr177 || yy2arr177 { - r.EncodeArrayStart(4) - } else { - yynn177 = 1 - for _, b := range yyq177 { - if b { - yynn177++ - } - } - r.EncodeMapStart(yynn177) - yynn177 = 0 - } - if yyr177 || yy2arr177 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq177[0] { - yym179 := z.EncBinary() - _ = yym179 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq177[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym180 := z.EncBinary() - _ = yym180 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr177 || yy2arr177 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq177[1] { - yym182 := z.EncBinary() - _ = yym182 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq177[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym183 := z.EncBinary() - _ = yym183 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr177 || yy2arr177 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq177[2] { - yy185 := &x.ListMeta - yym186 := z.EncBinary() - _ = yym186 - if false { - } else if z.HasExtensions() && z.EncExt(yy185) { - } else { - z.EncFallback(yy185) - } - } else { - r.EncodeNil() - } - } else { - if yyq177[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy187 := &x.ListMeta - yym188 := z.EncBinary() - _ = yym188 - if false { - } else if z.HasExtensions() && z.EncExt(yy187) { - } else { - z.EncFallback(yy187) - } - } - } - if yyr177 || yy2arr177 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym190 := z.EncBinary() - _ = yym190 - if false { - } else { - h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym191 := z.EncBinary() - _ = yym191 - if false { - } else { - h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) - } - } - } - if yyr177 || yy2arr177 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ThirdPartyResourceList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym192 := z.DecBinary() - _ = yym192 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct193 := r.ContainerType() - if yyct193 == codecSelferValueTypeMap1234 { - yyl193 := r.ReadMapStart() - if yyl193 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl193, d) - } - } else if yyct193 == codecSelferValueTypeArray1234 { - yyl193 := r.ReadArrayStart() - if yyl193 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl193, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ThirdPartyResourceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys194Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys194Slc - var yyhl194 bool = l >= 0 - for yyj194 := 0; ; yyj194++ { - if yyhl194 { - if yyj194 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys194Slc = r.DecodeBytes(yys194Slc, true, true) - yys194 := string(yys194Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys194 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv197 := &x.ListMeta - yym198 := z.DecBinary() - _ = yym198 - if false { - } else if z.HasExtensions() && z.DecExt(yyv197) { - } else { - z.DecFallback(yyv197, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv199 := &x.Items - yym200 := z.DecBinary() - _ = yym200 - if false { - } else { - h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv199), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys194) - } // end switch yys194 - } // end for yyj194 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ThirdPartyResourceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj201 int - var yyb201 bool - var yyhl201 bool = l >= 0 - yyj201++ - if yyhl201 { - yyb201 = yyj201 > l - } else { - yyb201 = r.CheckBreak() - } - if yyb201 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj201++ - if yyhl201 { - yyb201 = yyj201 > l - } else { - yyb201 = r.CheckBreak() - } - if yyb201 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj201++ - if yyhl201 { - yyb201 = yyj201 > l - } else { - yyb201 = r.CheckBreak() - } - if yyb201 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv204 := &x.ListMeta - yym205 := z.DecBinary() - _ = yym205 - if false { - } else if z.HasExtensions() && z.DecExt(yyv204) { - } else { - z.DecFallback(yyv204, false) - } - } - yyj201++ - if yyhl201 { - yyb201 = yyj201 > l - } else { - yyb201 = r.CheckBreak() - } - if yyb201 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv206 := &x.Items - yym207 := z.DecBinary() - _ = yym207 - if false { - } else { - h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv206), d) - } - } - for { - yyj201++ - if yyhl201 { - yyb201 = yyj201 > l - } else { - yyb201 = r.CheckBreak() - } - if yyb201 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj201-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *APIVersion) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym208 := z.EncBinary() - _ = yym208 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep209 := !z.EncBinary() - yy2arr209 := z.EncBasicHandle().StructToArray - var yyq209 [1]bool - _, _, _ = yysep209, yyq209, yy2arr209 - const yyr209 bool = false - yyq209[0] = x.Name != "" - var yynn209 int - if yyr209 || yy2arr209 { - r.EncodeArrayStart(1) - } else { - yynn209 = 0 - for _, b := range yyq209 { - if b { - yynn209++ - } - } - r.EncodeMapStart(yynn209) - yynn209 = 0 - } - if yyr209 || yy2arr209 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq209[0] { - yym211 := z.EncBinary() - _ = yym211 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq209[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym212 := z.EncBinary() - _ = yym212 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr209 || yy2arr209 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *APIVersion) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym213 := z.DecBinary() - _ = yym213 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct214 := r.ContainerType() - if yyct214 == codecSelferValueTypeMap1234 { - yyl214 := r.ReadMapStart() - if yyl214 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl214, d) - } - } else if yyct214 == codecSelferValueTypeArray1234 { - yyl214 := r.ReadArrayStart() - if yyl214 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl214, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *APIVersion) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys215Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys215Slc - var yyhl215 bool = l >= 0 - for yyj215 := 0; ; yyj215++ { - if yyhl215 { - if yyj215 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys215Slc = r.DecodeBytes(yys215Slc, true, true) - yys215 := string(yys215Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys215 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys215) - } // end switch yys215 - } // end for yyj215 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *APIVersion) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj217 int - var yyb217 bool - var yyhl217 bool = l >= 0 - yyj217++ - if yyhl217 { - yyb217 = yyj217 > l - } else { - yyb217 = r.CheckBreak() - } - if yyb217 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - for { - yyj217++ - if yyhl217 { - yyb217 = yyj217 > l - } else { - yyb217 = r.CheckBreak() - } - if yyb217 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj217-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ThirdPartyResourceData) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym219 := z.EncBinary() - _ = yym219 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep220 := !z.EncBinary() - yy2arr220 := z.EncBasicHandle().StructToArray - var yyq220 [4]bool - _, _, _ = yysep220, yyq220, yy2arr220 - const yyr220 bool = false - yyq220[0] = x.Kind != "" - yyq220[1] = x.APIVersion != "" - yyq220[2] = true - yyq220[3] = len(x.Data) != 0 - var yynn220 int - if yyr220 || yy2arr220 { - r.EncodeArrayStart(4) - } else { - yynn220 = 0 - for _, b := range yyq220 { - if b { - yynn220++ - } - } - r.EncodeMapStart(yynn220) - yynn220 = 0 - } - if yyr220 || yy2arr220 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq220[0] { - yym222 := z.EncBinary() - _ = yym222 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq220[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym223 := z.EncBinary() - _ = yym223 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr220 || yy2arr220 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq220[1] { - yym225 := z.EncBinary() - _ = yym225 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq220[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym226 := z.EncBinary() - _ = yym226 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr220 || yy2arr220 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq220[2] { - yy228 := &x.ObjectMeta - yy228.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq220[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy229 := &x.ObjectMeta - yy229.CodecEncodeSelf(e) - } - } - if yyr220 || yy2arr220 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq220[3] { - if x.Data == nil { - r.EncodeNil() - } else { - yym231 := z.EncBinary() - _ = yym231 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq220[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("data")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym232 := z.EncBinary() - _ = yym232 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) - } - } - } - } - if yyr220 || yy2arr220 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ThirdPartyResourceData) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym233 := z.DecBinary() - _ = yym233 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct234 := r.ContainerType() - if yyct234 == codecSelferValueTypeMap1234 { - yyl234 := r.ReadMapStart() - if yyl234 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl234, d) - } - } else if yyct234 == codecSelferValueTypeArray1234 { - yyl234 := r.ReadArrayStart() - if yyl234 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl234, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ThirdPartyResourceData) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys235Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys235Slc - var yyhl235 bool = l >= 0 - for yyj235 := 0; ; yyj235++ { - if yyhl235 { - if yyj235 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys235Slc = r.DecodeBytes(yys235Slc, true, true) - yys235 := string(yys235Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys235 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv238 := &x.ObjectMeta - yyv238.CodecDecodeSelf(d) - } - case "data": - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv239 := &x.Data - yym240 := z.DecBinary() - _ = yym240 - if false { - } else { - *yyv239 = r.DecodeBytes(*(*[]byte)(yyv239), false, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys235) - } // end switch yys235 - } // end for yyj235 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ThirdPartyResourceData) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj241 int - var yyb241 bool - var yyhl241 bool = l >= 0 - yyj241++ - if yyhl241 { - yyb241 = yyj241 > l - } else { - yyb241 = r.CheckBreak() - } - if yyb241 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj241++ - if yyhl241 { - yyb241 = yyj241 > l - } else { - yyb241 = r.CheckBreak() - } - if yyb241 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj241++ - if yyhl241 { - yyb241 = yyj241 > l - } else { - yyb241 = r.CheckBreak() - } - if yyb241 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv244 := &x.ObjectMeta - yyv244.CodecDecodeSelf(d) - } - yyj241++ - if yyhl241 { - yyb241 = yyj241 > l - } else { - yyb241 = r.CheckBreak() - } - if yyb241 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv245 := &x.Data - yym246 := z.DecBinary() - _ = yym246 - if false { - } else { - *yyv245 = r.DecodeBytes(*(*[]byte)(yyv245), false, false) - } - } - for { - yyj241++ - if yyhl241 { - yyb241 = yyj241 > l - } else { - yyb241 = r.CheckBreak() - } - if yyb241 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj241-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym247 := z.EncBinary() - _ = yym247 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep248 := !z.EncBinary() - yy2arr248 := z.EncBasicHandle().StructToArray - var yyq248 [5]bool - _, _, _ = yysep248, yyq248, yy2arr248 - const yyr248 bool = false - yyq248[0] = x.Kind != "" - yyq248[1] = x.APIVersion != "" - yyq248[2] = true - yyq248[3] = true - yyq248[4] = true - var yynn248 int - if yyr248 || yy2arr248 { - r.EncodeArrayStart(5) - } else { - yynn248 = 0 - for _, b := range yyq248 { - if b { - yynn248++ - } - } - r.EncodeMapStart(yynn248) - yynn248 = 0 - } - if yyr248 || yy2arr248 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq248[0] { - yym250 := z.EncBinary() - _ = yym250 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq248[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym251 := z.EncBinary() - _ = yym251 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr248 || yy2arr248 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq248[1] { - yym253 := z.EncBinary() - _ = yym253 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq248[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym254 := z.EncBinary() - _ = yym254 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr248 || yy2arr248 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq248[2] { - yy256 := &x.ObjectMeta - yy256.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq248[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy257 := &x.ObjectMeta - yy257.CodecEncodeSelf(e) - } - } - if yyr248 || yy2arr248 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq248[3] { - yy259 := &x.Spec - yy259.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq248[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy260 := &x.Spec - yy260.CodecEncodeSelf(e) - } - } - if yyr248 || yy2arr248 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq248[4] { - yy262 := &x.Status - yy262.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq248[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy263 := &x.Status - yy263.CodecEncodeSelf(e) - } - } - if yyr248 || yy2arr248 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Deployment) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym264 := z.DecBinary() - _ = yym264 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct265 := r.ContainerType() - if yyct265 == codecSelferValueTypeMap1234 { - yyl265 := r.ReadMapStart() - if yyl265 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl265, d) - } - } else if yyct265 == codecSelferValueTypeArray1234 { - yyl265 := r.ReadArrayStart() - if yyl265 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl265, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Deployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys266Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys266Slc - var yyhl266 bool = l >= 0 - for yyj266 := 0; ; yyj266++ { - if yyhl266 { - if yyj266 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys266Slc = r.DecodeBytes(yys266Slc, true, true) - yys266 := string(yys266Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys266 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv269 := &x.ObjectMeta - yyv269.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = DeploymentSpec{} - } else { - yyv270 := &x.Spec - yyv270.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = DeploymentStatus{} - } else { - yyv271 := &x.Status - yyv271.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys266) - } // end switch yys266 - } // end for yyj266 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj272 int - var yyb272 bool - var yyhl272 bool = l >= 0 - yyj272++ - if yyhl272 { - yyb272 = yyj272 > l - } else { - yyb272 = r.CheckBreak() - } - if yyb272 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj272++ - if yyhl272 { - yyb272 = yyj272 > l - } else { - yyb272 = r.CheckBreak() - } - if yyb272 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj272++ - if yyhl272 { - yyb272 = yyj272 > l - } else { - yyb272 = r.CheckBreak() - } - if yyb272 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv275 := &x.ObjectMeta - yyv275.CodecDecodeSelf(d) - } - yyj272++ - if yyhl272 { - yyb272 = yyj272 > l - } else { - yyb272 = r.CheckBreak() - } - if yyb272 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = DeploymentSpec{} - } else { - yyv276 := &x.Spec - yyv276.CodecDecodeSelf(d) - } - yyj272++ - if yyhl272 { - yyb272 = yyj272 > l - } else { - yyb272 = r.CheckBreak() - } - if yyb272 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = DeploymentStatus{} - } else { - yyv277 := &x.Status - yyv277.CodecDecodeSelf(d) - } - for { - yyj272++ - if yyhl272 { - yyb272 = yyj272 > l - } else { - yyb272 = r.CheckBreak() - } - if yyb272 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj272-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym278 := z.EncBinary() - _ = yym278 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep279 := !z.EncBinary() - yy2arr279 := z.EncBasicHandle().StructToArray - var yyq279 [9]bool - _, _, _ = yysep279, yyq279, yy2arr279 - const yyr279 bool = false - yyq279[0] = x.Replicas != 0 - yyq279[1] = x.Selector != nil - yyq279[3] = true - yyq279[4] = x.MinReadySeconds != 0 - yyq279[5] = x.RevisionHistoryLimit != nil - yyq279[6] = x.Paused != false - yyq279[7] = x.RollbackTo != nil - yyq279[8] = x.ProgressDeadlineSeconds != nil - var yynn279 int - if yyr279 || yy2arr279 { - r.EncodeArrayStart(9) - } else { - yynn279 = 1 - for _, b := range yyq279 { - if b { - yynn279++ - } - } - r.EncodeMapStart(yynn279) - yynn279 = 0 - } - if yyr279 || yy2arr279 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq279[0] { - yym281 := z.EncBinary() - _ = yym281 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq279[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym282 := z.EncBinary() - _ = yym282 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - } - if yyr279 || yy2arr279 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq279[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym284 := z.EncBinary() - _ = yym284 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq279[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym285 := z.EncBinary() - _ = yym285 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr279 || yy2arr279 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy287 := &x.Template - yy287.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy288 := &x.Template - yy288.CodecEncodeSelf(e) - } - if yyr279 || yy2arr279 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq279[3] { - yy290 := &x.Strategy - yy290.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq279[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("strategy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy291 := &x.Strategy - yy291.CodecEncodeSelf(e) - } - } - if yyr279 || yy2arr279 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq279[4] { - yym293 := z.EncBinary() - _ = yym293 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq279[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym294 := z.EncBinary() - _ = yym294 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } - } - if yyr279 || yy2arr279 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq279[5] { - if x.RevisionHistoryLimit == nil { - r.EncodeNil() - } else { - yy296 := *x.RevisionHistoryLimit - yym297 := z.EncBinary() - _ = yym297 - if false { - } else { - r.EncodeInt(int64(yy296)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq279[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("revisionHistoryLimit")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RevisionHistoryLimit == nil { - r.EncodeNil() - } else { - yy298 := *x.RevisionHistoryLimit - yym299 := z.EncBinary() - _ = yym299 - if false { - } else { - r.EncodeInt(int64(yy298)) - } - } - } - } - if yyr279 || yy2arr279 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq279[6] { - yym301 := z.EncBinary() - _ = yym301 - if false { - } else { - r.EncodeBool(bool(x.Paused)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq279[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("paused")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym302 := z.EncBinary() - _ = yym302 - if false { - } else { - r.EncodeBool(bool(x.Paused)) - } - } - } - if yyr279 || yy2arr279 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq279[7] { - if x.RollbackTo == nil { - r.EncodeNil() - } else { - x.RollbackTo.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq279[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RollbackTo == nil { - r.EncodeNil() - } else { - x.RollbackTo.CodecEncodeSelf(e) - } - } - } - if yyr279 || yy2arr279 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq279[8] { - if x.ProgressDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy305 := *x.ProgressDeadlineSeconds - yym306 := z.EncBinary() - _ = yym306 - if false { - } else { - r.EncodeInt(int64(yy305)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq279[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("progressDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ProgressDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy307 := *x.ProgressDeadlineSeconds - yym308 := z.EncBinary() - _ = yym308 - if false { - } else { - r.EncodeInt(int64(yy307)) - } - } - } - } - if yyr279 || yy2arr279 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym309 := z.DecBinary() - _ = yym309 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct310 := r.ContainerType() - if yyct310 == codecSelferValueTypeMap1234 { - yyl310 := r.ReadMapStart() - if yyl310 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl310, d) - } - } else if yyct310 == codecSelferValueTypeArray1234 { - yyl310 := r.ReadArrayStart() - if yyl310 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl310, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys311Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys311Slc - var yyhl311 bool = l >= 0 - for yyj311 := 0; ; yyj311++ { - if yyhl311 { - if yyj311 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys311Slc = r.DecodeBytes(yys311Slc, true, true) - yys311 := string(yys311Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys311 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym314 := z.DecBinary() - _ = yym314 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv315 := &x.Template - yyv315.CodecDecodeSelf(d) - } - case "strategy": - if r.TryDecodeAsNil() { - x.Strategy = DeploymentStrategy{} - } else { - yyv316 := &x.Strategy - yyv316.CodecDecodeSelf(d) - } - case "minReadySeconds": - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - x.MinReadySeconds = int32(r.DecodeInt(32)) - } - case "revisionHistoryLimit": - if r.TryDecodeAsNil() { - if x.RevisionHistoryLimit != nil { - x.RevisionHistoryLimit = nil - } - } else { - if x.RevisionHistoryLimit == nil { - x.RevisionHistoryLimit = new(int32) - } - yym319 := z.DecBinary() - _ = yym319 - if false { - } else { - *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) - } - } - case "paused": - if r.TryDecodeAsNil() { - x.Paused = false - } else { - x.Paused = bool(r.DecodeBool()) - } - case "rollbackTo": - if r.TryDecodeAsNil() { - if x.RollbackTo != nil { - x.RollbackTo = nil - } - } else { - if x.RollbackTo == nil { - x.RollbackTo = new(RollbackConfig) - } - x.RollbackTo.CodecDecodeSelf(d) - } - case "progressDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.ProgressDeadlineSeconds != nil { - x.ProgressDeadlineSeconds = nil - } - } else { - if x.ProgressDeadlineSeconds == nil { - x.ProgressDeadlineSeconds = new(int32) - } - yym323 := z.DecBinary() - _ = yym323 - if false { - } else { - *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys311) - } // end switch yys311 - } // end for yyj311 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj324 int - var yyb324 bool - var yyhl324 bool = l >= 0 - yyj324++ - if yyhl324 { - yyb324 = yyj324 > l - } else { - yyb324 = r.CheckBreak() - } - if yyb324 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj324++ - if yyhl324 { - yyb324 = yyj324 > l - } else { - yyb324 = r.CheckBreak() - } - if yyb324 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym327 := z.DecBinary() - _ = yym327 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj324++ - if yyhl324 { - yyb324 = yyj324 > l - } else { - yyb324 = r.CheckBreak() - } - if yyb324 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv328 := &x.Template - yyv328.CodecDecodeSelf(d) - } - yyj324++ - if yyhl324 { - yyb324 = yyj324 > l - } else { - yyb324 = r.CheckBreak() - } - if yyb324 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Strategy = DeploymentStrategy{} - } else { - yyv329 := &x.Strategy - yyv329.CodecDecodeSelf(d) - } - yyj324++ - if yyhl324 { - yyb324 = yyj324 > l - } else { - yyb324 = r.CheckBreak() - } - if yyb324 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - x.MinReadySeconds = int32(r.DecodeInt(32)) - } - yyj324++ - if yyhl324 { - yyb324 = yyj324 > l - } else { - yyb324 = r.CheckBreak() - } - if yyb324 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RevisionHistoryLimit != nil { - x.RevisionHistoryLimit = nil - } - } else { - if x.RevisionHistoryLimit == nil { - x.RevisionHistoryLimit = new(int32) - } - yym332 := z.DecBinary() - _ = yym332 - if false { - } else { - *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) - } - } - yyj324++ - if yyhl324 { - yyb324 = yyj324 > l - } else { - yyb324 = r.CheckBreak() - } - if yyb324 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Paused = false - } else { - x.Paused = bool(r.DecodeBool()) - } - yyj324++ - if yyhl324 { - yyb324 = yyj324 > l - } else { - yyb324 = r.CheckBreak() - } - if yyb324 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RollbackTo != nil { - x.RollbackTo = nil - } - } else { - if x.RollbackTo == nil { - x.RollbackTo = new(RollbackConfig) - } - x.RollbackTo.CodecDecodeSelf(d) - } - yyj324++ - if yyhl324 { - yyb324 = yyj324 > l - } else { - yyb324 = r.CheckBreak() - } - if yyb324 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ProgressDeadlineSeconds != nil { - x.ProgressDeadlineSeconds = nil - } - } else { - if x.ProgressDeadlineSeconds == nil { - x.ProgressDeadlineSeconds = new(int32) - } - yym336 := z.DecBinary() - _ = yym336 - if false { - } else { - *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) - } - } - for { - yyj324++ - if yyhl324 { - yyb324 = yyj324 > l - } else { - yyb324 = r.CheckBreak() - } - if yyb324 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj324-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentRollback) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym337 := z.EncBinary() - _ = yym337 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep338 := !z.EncBinary() - yy2arr338 := z.EncBasicHandle().StructToArray - var yyq338 [5]bool - _, _, _ = yysep338, yyq338, yy2arr338 - const yyr338 bool = false - yyq338[0] = x.Kind != "" - yyq338[1] = x.APIVersion != "" - yyq338[3] = len(x.UpdatedAnnotations) != 0 - var yynn338 int - if yyr338 || yy2arr338 { - r.EncodeArrayStart(5) - } else { - yynn338 = 2 - for _, b := range yyq338 { - if b { - yynn338++ - } - } - r.EncodeMapStart(yynn338) - yynn338 = 0 - } - if yyr338 || yy2arr338 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq338[0] { - yym340 := z.EncBinary() - _ = yym340 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq338[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym341 := z.EncBinary() - _ = yym341 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr338 || yy2arr338 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq338[1] { - yym343 := z.EncBinary() - _ = yym343 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq338[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym344 := z.EncBinary() - _ = yym344 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr338 || yy2arr338 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym346 := z.EncBinary() - _ = yym346 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym347 := z.EncBinary() - _ = yym347 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr338 || yy2arr338 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq338[3] { - if x.UpdatedAnnotations == nil { - r.EncodeNil() - } else { - yym349 := z.EncBinary() - _ = yym349 - if false { - } else { - z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq338[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("updatedAnnotations")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.UpdatedAnnotations == nil { - r.EncodeNil() - } else { - yym350 := z.EncBinary() - _ = yym350 - if false { - } else { - z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) - } - } - } - } - if yyr338 || yy2arr338 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy352 := &x.RollbackTo - yy352.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy353 := &x.RollbackTo - yy353.CodecEncodeSelf(e) - } - if yyr338 || yy2arr338 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentRollback) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym354 := z.DecBinary() - _ = yym354 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct355 := r.ContainerType() - if yyct355 == codecSelferValueTypeMap1234 { - yyl355 := r.ReadMapStart() - if yyl355 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl355, d) - } - } else if yyct355 == codecSelferValueTypeArray1234 { - yyl355 := r.ReadArrayStart() - if yyl355 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl355, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentRollback) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys356Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys356Slc - var yyhl356 bool = l >= 0 - for yyj356 := 0; ; yyj356++ { - if yyhl356 { - if yyj356 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys356Slc = r.DecodeBytes(yys356Slc, true, true) - yys356 := string(yys356Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys356 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "updatedAnnotations": - if r.TryDecodeAsNil() { - x.UpdatedAnnotations = nil - } else { - yyv360 := &x.UpdatedAnnotations - yym361 := z.DecBinary() - _ = yym361 - if false { - } else { - z.F.DecMapStringStringX(yyv360, false, d) - } - } - case "rollbackTo": - if r.TryDecodeAsNil() { - x.RollbackTo = RollbackConfig{} - } else { - yyv362 := &x.RollbackTo - yyv362.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys356) - } // end switch yys356 - } // end for yyj356 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentRollback) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj363 int - var yyb363 bool - var yyhl363 bool = l >= 0 - yyj363++ - if yyhl363 { - yyb363 = yyj363 > l - } else { - yyb363 = r.CheckBreak() - } - if yyb363 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj363++ - if yyhl363 { - yyb363 = yyj363 > l - } else { - yyb363 = r.CheckBreak() - } - if yyb363 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj363++ - if yyhl363 { - yyb363 = yyj363 > l - } else { - yyb363 = r.CheckBreak() - } - if yyb363 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj363++ - if yyhl363 { - yyb363 = yyj363 > l - } else { - yyb363 = r.CheckBreak() - } - if yyb363 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UpdatedAnnotations = nil - } else { - yyv367 := &x.UpdatedAnnotations - yym368 := z.DecBinary() - _ = yym368 - if false { - } else { - z.F.DecMapStringStringX(yyv367, false, d) - } - } - yyj363++ - if yyhl363 { - yyb363 = yyj363 > l - } else { - yyb363 = r.CheckBreak() - } - if yyb363 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RollbackTo = RollbackConfig{} - } else { - yyv369 := &x.RollbackTo - yyv369.CodecDecodeSelf(d) - } - for { - yyj363++ - if yyhl363 { - yyb363 = yyj363 > l - } else { - yyb363 = r.CheckBreak() - } - if yyb363 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj363-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RollbackConfig) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym370 := z.EncBinary() - _ = yym370 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep371 := !z.EncBinary() - yy2arr371 := z.EncBasicHandle().StructToArray - var yyq371 [1]bool - _, _, _ = yysep371, yyq371, yy2arr371 - const yyr371 bool = false - yyq371[0] = x.Revision != 0 - var yynn371 int - if yyr371 || yy2arr371 { - r.EncodeArrayStart(1) - } else { - yynn371 = 0 - for _, b := range yyq371 { - if b { - yynn371++ - } - } - r.EncodeMapStart(yynn371) - yynn371 = 0 - } - if yyr371 || yy2arr371 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq371[0] { - yym373 := z.EncBinary() - _ = yym373 - if false { - } else { - r.EncodeInt(int64(x.Revision)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq371[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("revision")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym374 := z.EncBinary() - _ = yym374 - if false { - } else { - r.EncodeInt(int64(x.Revision)) - } - } - } - if yyr371 || yy2arr371 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RollbackConfig) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym375 := z.DecBinary() - _ = yym375 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct376 := r.ContainerType() - if yyct376 == codecSelferValueTypeMap1234 { - yyl376 := r.ReadMapStart() - if yyl376 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl376, d) - } - } else if yyct376 == codecSelferValueTypeArray1234 { - yyl376 := r.ReadArrayStart() - if yyl376 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl376, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RollbackConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys377Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys377Slc - var yyhl377 bool = l >= 0 - for yyj377 := 0; ; yyj377++ { - if yyhl377 { - if yyj377 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys377Slc = r.DecodeBytes(yys377Slc, true, true) - yys377 := string(yys377Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys377 { - case "revision": - if r.TryDecodeAsNil() { - x.Revision = 0 - } else { - x.Revision = int64(r.DecodeInt(64)) - } - default: - z.DecStructFieldNotFound(-1, yys377) - } // end switch yys377 - } // end for yyj377 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RollbackConfig) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj379 int - var yyb379 bool - var yyhl379 bool = l >= 0 - yyj379++ - if yyhl379 { - yyb379 = yyj379 > l - } else { - yyb379 = r.CheckBreak() - } - if yyb379 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Revision = 0 - } else { - x.Revision = int64(r.DecodeInt(64)) - } - for { - yyj379++ - if yyhl379 { - yyb379 = yyj379 > l - } else { - yyb379 = r.CheckBreak() - } - if yyb379 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj379-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentStrategy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym381 := z.EncBinary() - _ = yym381 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep382 := !z.EncBinary() - yy2arr382 := z.EncBasicHandle().StructToArray - var yyq382 [2]bool - _, _, _ = yysep382, yyq382, yy2arr382 - const yyr382 bool = false - yyq382[0] = x.Type != "" - yyq382[1] = x.RollingUpdate != nil - var yynn382 int - if yyr382 || yy2arr382 { - r.EncodeArrayStart(2) - } else { - yynn382 = 0 - for _, b := range yyq382 { - if b { - yynn382++ - } - } - r.EncodeMapStart(yynn382) - yynn382 = 0 - } - if yyr382 || yy2arr382 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq382[0] { - x.Type.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq382[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - } - if yyr382 || yy2arr382 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq382[1] { - if x.RollingUpdate == nil { - r.EncodeNil() - } else { - x.RollingUpdate.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq382[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RollingUpdate == nil { - r.EncodeNil() - } else { - x.RollingUpdate.CodecEncodeSelf(e) - } - } - } - if yyr382 || yy2arr382 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentStrategy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym385 := z.DecBinary() - _ = yym385 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct386 := r.ContainerType() - if yyct386 == codecSelferValueTypeMap1234 { - yyl386 := r.ReadMapStart() - if yyl386 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl386, d) - } - } else if yyct386 == codecSelferValueTypeArray1234 { - yyl386 := r.ReadArrayStart() - if yyl386 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl386, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys387Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys387Slc - var yyhl387 bool = l >= 0 - for yyj387 := 0; ; yyj387++ { - if yyhl387 { - if yyj387 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys387Slc = r.DecodeBytes(yys387Slc, true, true) - yys387 := string(yys387Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys387 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = DeploymentStrategyType(r.DecodeString()) - } - case "rollingUpdate": - if r.TryDecodeAsNil() { - if x.RollingUpdate != nil { - x.RollingUpdate = nil - } - } else { - if x.RollingUpdate == nil { - x.RollingUpdate = new(RollingUpdateDeployment) - } - x.RollingUpdate.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys387) - } // end switch yys387 - } // end for yyj387 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj390 int - var yyb390 bool - var yyhl390 bool = l >= 0 - yyj390++ - if yyhl390 { - yyb390 = yyj390 > l - } else { - yyb390 = r.CheckBreak() - } - if yyb390 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = DeploymentStrategyType(r.DecodeString()) - } - yyj390++ - if yyhl390 { - yyb390 = yyj390 > l - } else { - yyb390 = r.CheckBreak() - } - if yyb390 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RollingUpdate != nil { - x.RollingUpdate = nil - } - } else { - if x.RollingUpdate == nil { - x.RollingUpdate = new(RollingUpdateDeployment) - } - x.RollingUpdate.CodecDecodeSelf(d) - } - for { - yyj390++ - if yyhl390 { - yyb390 = yyj390 > l - } else { - yyb390 = r.CheckBreak() - } - if yyb390 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj390-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x DeploymentStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym393 := z.EncBinary() - _ = yym393 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *DeploymentStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym394 := z.DecBinary() - _ = yym394 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym395 := z.EncBinary() - _ = yym395 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep396 := !z.EncBinary() - yy2arr396 := z.EncBasicHandle().StructToArray - var yyq396 [2]bool - _, _, _ = yysep396, yyq396, yy2arr396 - const yyr396 bool = false - yyq396[0] = true - yyq396[1] = true - var yynn396 int - if yyr396 || yy2arr396 { - r.EncodeArrayStart(2) - } else { - yynn396 = 0 - for _, b := range yyq396 { - if b { - yynn396++ - } - } - r.EncodeMapStart(yynn396) - yynn396 = 0 - } - if yyr396 || yy2arr396 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq396[0] { - yy398 := &x.MaxUnavailable - yym399 := z.EncBinary() - _ = yym399 - if false { - } else if z.HasExtensions() && z.EncExt(yy398) { - } else if !yym399 && z.IsJSONHandle() { - z.EncJSONMarshal(yy398) - } else { - z.EncFallback(yy398) - } - } else { - r.EncodeNil() - } - } else { - if yyq396[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy400 := &x.MaxUnavailable - yym401 := z.EncBinary() - _ = yym401 - if false { - } else if z.HasExtensions() && z.EncExt(yy400) { - } else if !yym401 && z.IsJSONHandle() { - z.EncJSONMarshal(yy400) - } else { - z.EncFallback(yy400) - } - } - } - if yyr396 || yy2arr396 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq396[1] { - yy403 := &x.MaxSurge - yym404 := z.EncBinary() - _ = yym404 - if false { - } else if z.HasExtensions() && z.EncExt(yy403) { - } else if !yym404 && z.IsJSONHandle() { - z.EncJSONMarshal(yy403) - } else { - z.EncFallback(yy403) - } - } else { - r.EncodeNil() - } - } else { - if yyq396[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxSurge")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy405 := &x.MaxSurge - yym406 := z.EncBinary() - _ = yym406 - if false { - } else if z.HasExtensions() && z.EncExt(yy405) { - } else if !yym406 && z.IsJSONHandle() { - z.EncJSONMarshal(yy405) - } else { - z.EncFallback(yy405) - } - } - } - if yyr396 || yy2arr396 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RollingUpdateDeployment) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym407 := z.DecBinary() - _ = yym407 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct408 := r.ContainerType() - if yyct408 == codecSelferValueTypeMap1234 { - yyl408 := r.ReadMapStart() - if yyl408 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl408, d) - } - } else if yyct408 == codecSelferValueTypeArray1234 { - yyl408 := r.ReadArrayStart() - if yyl408 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl408, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys409Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys409Slc - var yyhl409 bool = l >= 0 - for yyj409 := 0; ; yyj409++ { - if yyhl409 { - if yyj409 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys409Slc = r.DecodeBytes(yys409Slc, true, true) - yys409 := string(yys409Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys409 { - case "maxUnavailable": - if r.TryDecodeAsNil() { - x.MaxUnavailable = pkg5_intstr.IntOrString{} - } else { - yyv410 := &x.MaxUnavailable - yym411 := z.DecBinary() - _ = yym411 - if false { - } else if z.HasExtensions() && z.DecExt(yyv410) { - } else if !yym411 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv410) - } else { - z.DecFallback(yyv410, false) - } - } - case "maxSurge": - if r.TryDecodeAsNil() { - x.MaxSurge = pkg5_intstr.IntOrString{} - } else { - yyv412 := &x.MaxSurge - yym413 := z.DecBinary() - _ = yym413 - if false { - } else if z.HasExtensions() && z.DecExt(yyv412) { - } else if !yym413 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv412) - } else { - z.DecFallback(yyv412, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys409) - } // end switch yys409 - } // end for yyj409 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RollingUpdateDeployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj414 int - var yyb414 bool - var yyhl414 bool = l >= 0 - yyj414++ - if yyhl414 { - yyb414 = yyj414 > l - } else { - yyb414 = r.CheckBreak() - } - if yyb414 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxUnavailable = pkg5_intstr.IntOrString{} - } else { - yyv415 := &x.MaxUnavailable - yym416 := z.DecBinary() - _ = yym416 - if false { - } else if z.HasExtensions() && z.DecExt(yyv415) { - } else if !yym416 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv415) - } else { - z.DecFallback(yyv415, false) - } - } - yyj414++ - if yyhl414 { - yyb414 = yyj414 > l - } else { - yyb414 = r.CheckBreak() - } - if yyb414 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxSurge = pkg5_intstr.IntOrString{} - } else { - yyv417 := &x.MaxSurge - yym418 := z.DecBinary() - _ = yym418 - if false { - } else if z.HasExtensions() && z.DecExt(yyv417) { - } else if !yym418 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv417) - } else { - z.DecFallback(yyv417, false) - } - } - for { - yyj414++ - if yyhl414 { - yyb414 = yyj414 > l - } else { - yyb414 = r.CheckBreak() - } - if yyb414 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj414-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym419 := z.EncBinary() - _ = yym419 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep420 := !z.EncBinary() - yy2arr420 := z.EncBasicHandle().StructToArray - var yyq420 [6]bool - _, _, _ = yysep420, yyq420, yy2arr420 - const yyr420 bool = false - yyq420[0] = x.ObservedGeneration != 0 - yyq420[1] = x.Replicas != 0 - yyq420[2] = x.UpdatedReplicas != 0 - yyq420[3] = x.AvailableReplicas != 0 - yyq420[4] = x.UnavailableReplicas != 0 - yyq420[5] = len(x.Conditions) != 0 - var yynn420 int - if yyr420 || yy2arr420 { - r.EncodeArrayStart(6) - } else { - yynn420 = 0 - for _, b := range yyq420 { - if b { - yynn420++ - } - } - r.EncodeMapStart(yynn420) - yynn420 = 0 - } - if yyr420 || yy2arr420 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq420[0] { - yym422 := z.EncBinary() - _ = yym422 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq420[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym423 := z.EncBinary() - _ = yym423 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } - } - if yyr420 || yy2arr420 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq420[1] { - yym425 := z.EncBinary() - _ = yym425 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq420[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym426 := z.EncBinary() - _ = yym426 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - } - if yyr420 || yy2arr420 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq420[2] { - yym428 := z.EncBinary() - _ = yym428 - if false { - } else { - r.EncodeInt(int64(x.UpdatedReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq420[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("updatedReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym429 := z.EncBinary() - _ = yym429 - if false { - } else { - r.EncodeInt(int64(x.UpdatedReplicas)) - } - } - } - if yyr420 || yy2arr420 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq420[3] { - yym431 := z.EncBinary() - _ = yym431 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq420[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym432 := z.EncBinary() - _ = yym432 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } - } - if yyr420 || yy2arr420 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq420[4] { - yym434 := z.EncBinary() - _ = yym434 - if false { - } else { - r.EncodeInt(int64(x.UnavailableReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq420[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("unavailableReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym435 := z.EncBinary() - _ = yym435 - if false { - } else { - r.EncodeInt(int64(x.UnavailableReplicas)) - } - } - } - if yyr420 || yy2arr420 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq420[5] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym437 := z.EncBinary() - _ = yym437 - if false { - } else { - h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq420[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym438 := z.EncBinary() - _ = yym438 - if false { - } else { - h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) - } - } - } - } - if yyr420 || yy2arr420 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym439 := z.DecBinary() - _ = yym439 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct440 := r.ContainerType() - if yyct440 == codecSelferValueTypeMap1234 { - yyl440 := r.ReadMapStart() - if yyl440 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl440, d) - } - } else if yyct440 == codecSelferValueTypeArray1234 { - yyl440 := r.ReadArrayStart() - if yyl440 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl440, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys441Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys441Slc - var yyhl441 bool = l >= 0 - for yyj441 := 0; ; yyj441++ { - if yyhl441 { - if yyj441 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys441Slc = r.DecodeBytes(yys441Slc, true, true) - yys441 := string(yys441Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys441 { - case "observedGeneration": - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "updatedReplicas": - if r.TryDecodeAsNil() { - x.UpdatedReplicas = 0 - } else { - x.UpdatedReplicas = int32(r.DecodeInt(32)) - } - case "availableReplicas": - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - x.AvailableReplicas = int32(r.DecodeInt(32)) - } - case "unavailableReplicas": - if r.TryDecodeAsNil() { - x.UnavailableReplicas = 0 - } else { - x.UnavailableReplicas = int32(r.DecodeInt(32)) - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv447 := &x.Conditions - yym448 := z.DecBinary() - _ = yym448 - if false { - } else { - h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv447), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys441) - } // end switch yys441 - } // end for yyj441 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj449 int - var yyb449 bool - var yyhl449 bool = l >= 0 - yyj449++ - if yyhl449 { - yyb449 = yyj449 > l - } else { - yyb449 = r.CheckBreak() - } - if yyb449 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - yyj449++ - if yyhl449 { - yyb449 = yyj449 > l - } else { - yyb449 = r.CheckBreak() - } - if yyb449 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj449++ - if yyhl449 { - yyb449 = yyj449 > l - } else { - yyb449 = r.CheckBreak() - } - if yyb449 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UpdatedReplicas = 0 - } else { - x.UpdatedReplicas = int32(r.DecodeInt(32)) - } - yyj449++ - if yyhl449 { - yyb449 = yyj449 > l - } else { - yyb449 = r.CheckBreak() - } - if yyb449 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - x.AvailableReplicas = int32(r.DecodeInt(32)) - } - yyj449++ - if yyhl449 { - yyb449 = yyj449 > l - } else { - yyb449 = r.CheckBreak() - } - if yyb449 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UnavailableReplicas = 0 - } else { - x.UnavailableReplicas = int32(r.DecodeInt(32)) - } - yyj449++ - if yyhl449 { - yyb449 = yyj449 > l - } else { - yyb449 = r.CheckBreak() - } - if yyb449 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv455 := &x.Conditions - yym456 := z.DecBinary() - _ = yym456 - if false { - } else { - h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv455), d) - } - } - for { - yyj449++ - if yyhl449 { - yyb449 = yyj449 > l - } else { - yyb449 = r.CheckBreak() - } - if yyb449 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj449-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x DeploymentConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym457 := z.EncBinary() - _ = yym457 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *DeploymentConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym458 := z.DecBinary() - _ = yym458 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *DeploymentCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym459 := z.EncBinary() - _ = yym459 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep460 := !z.EncBinary() - yy2arr460 := z.EncBasicHandle().StructToArray - var yyq460 [6]bool - _, _, _ = yysep460, yyq460, yy2arr460 - const yyr460 bool = false - yyq460[2] = true - yyq460[3] = true - yyq460[4] = x.Reason != "" - yyq460[5] = x.Message != "" - var yynn460 int - if yyr460 || yy2arr460 { - r.EncodeArrayStart(6) - } else { - yynn460 = 2 - for _, b := range yyq460 { - if b { - yynn460++ - } - } - r.EncodeMapStart(yynn460) - yynn460 = 0 - } - if yyr460 || yy2arr460 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr460 || yy2arr460 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym463 := z.EncBinary() - _ = yym463 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym464 := z.EncBinary() - _ = yym464 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } - } - if yyr460 || yy2arr460 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq460[2] { - yy466 := &x.LastUpdateTime - yym467 := z.EncBinary() - _ = yym467 - if false { - } else if z.HasExtensions() && z.EncExt(yy466) { - } else if yym467 { - z.EncBinaryMarshal(yy466) - } else if !yym467 && z.IsJSONHandle() { - z.EncJSONMarshal(yy466) - } else { - z.EncFallback(yy466) - } - } else { - r.EncodeNil() - } - } else { - if yyq460[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastUpdateTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy468 := &x.LastUpdateTime - yym469 := z.EncBinary() - _ = yym469 - if false { - } else if z.HasExtensions() && z.EncExt(yy468) { - } else if yym469 { - z.EncBinaryMarshal(yy468) - } else if !yym469 && z.IsJSONHandle() { - z.EncJSONMarshal(yy468) - } else { - z.EncFallback(yy468) - } - } - } - if yyr460 || yy2arr460 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq460[3] { - yy471 := &x.LastTransitionTime - yym472 := z.EncBinary() - _ = yym472 - if false { - } else if z.HasExtensions() && z.EncExt(yy471) { - } else if yym472 { - z.EncBinaryMarshal(yy471) - } else if !yym472 && z.IsJSONHandle() { - z.EncJSONMarshal(yy471) - } else { - z.EncFallback(yy471) - } - } else { - r.EncodeNil() - } - } else { - if yyq460[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy473 := &x.LastTransitionTime - yym474 := z.EncBinary() - _ = yym474 - if false { - } else if z.HasExtensions() && z.EncExt(yy473) { - } else if yym474 { - z.EncBinaryMarshal(yy473) - } else if !yym474 && z.IsJSONHandle() { - z.EncJSONMarshal(yy473) - } else { - z.EncFallback(yy473) - } - } - } - if yyr460 || yy2arr460 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq460[4] { - yym476 := z.EncBinary() - _ = yym476 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq460[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym477 := z.EncBinary() - _ = yym477 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr460 || yy2arr460 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq460[5] { - yym479 := z.EncBinary() - _ = yym479 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq460[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym480 := z.EncBinary() - _ = yym480 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr460 || yy2arr460 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym481 := z.DecBinary() - _ = yym481 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct482 := r.ContainerType() - if yyct482 == codecSelferValueTypeMap1234 { - yyl482 := r.ReadMapStart() - if yyl482 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl482, d) - } - } else if yyct482 == codecSelferValueTypeArray1234 { - yyl482 := r.ReadArrayStart() - if yyl482 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl482, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys483Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys483Slc - var yyhl483 bool = l >= 0 - for yyj483 := 0; ; yyj483++ { - if yyhl483 { - if yyj483 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys483Slc = r.DecodeBytes(yys483Slc, true, true) - yys483 := string(yys483Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys483 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = DeploymentConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_api.ConditionStatus(r.DecodeString()) - } - case "lastUpdateTime": - if r.TryDecodeAsNil() { - x.LastUpdateTime = pkg1_unversioned.Time{} - } else { - yyv486 := &x.LastUpdateTime - yym487 := z.DecBinary() - _ = yym487 - if false { - } else if z.HasExtensions() && z.DecExt(yyv486) { - } else if yym487 { - z.DecBinaryUnmarshal(yyv486) - } else if !yym487 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv486) - } else { - z.DecFallback(yyv486, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} - } else { - yyv488 := &x.LastTransitionTime - yym489 := z.DecBinary() - _ = yym489 - if false { - } else if z.HasExtensions() && z.DecExt(yyv488) { - } else if yym489 { - z.DecBinaryUnmarshal(yyv488) - } else if !yym489 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv488) - } else { - z.DecFallback(yyv488, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys483) - } // end switch yys483 - } // end for yyj483 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj492 int - var yyb492 bool - var yyhl492 bool = l >= 0 - yyj492++ - if yyhl492 { - yyb492 = yyj492 > l - } else { - yyb492 = r.CheckBreak() - } - if yyb492 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = DeploymentConditionType(r.DecodeString()) - } - yyj492++ - if yyhl492 { - yyb492 = yyj492 > l - } else { - yyb492 = r.CheckBreak() - } - if yyb492 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_api.ConditionStatus(r.DecodeString()) - } - yyj492++ - if yyhl492 { - yyb492 = yyj492 > l - } else { - yyb492 = r.CheckBreak() - } - if yyb492 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastUpdateTime = pkg1_unversioned.Time{} - } else { - yyv495 := &x.LastUpdateTime - yym496 := z.DecBinary() - _ = yym496 - if false { - } else if z.HasExtensions() && z.DecExt(yyv495) { - } else if yym496 { - z.DecBinaryUnmarshal(yyv495) - } else if !yym496 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv495) - } else { - z.DecFallback(yyv495, false) - } - } - yyj492++ - if yyhl492 { - yyb492 = yyj492 > l - } else { - yyb492 = r.CheckBreak() - } - if yyb492 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} - } else { - yyv497 := &x.LastTransitionTime - yym498 := z.DecBinary() - _ = yym498 - if false { - } else if z.HasExtensions() && z.DecExt(yyv497) { - } else if yym498 { - z.DecBinaryUnmarshal(yyv497) - } else if !yym498 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv497) - } else { - z.DecFallback(yyv497, false) - } - } - yyj492++ - if yyhl492 { - yyb492 = yyj492 > l - } else { - yyb492 = r.CheckBreak() - } - if yyb492 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj492++ - if yyhl492 { - yyb492 = yyj492 > l - } else { - yyb492 = r.CheckBreak() - } - if yyb492 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj492++ - if yyhl492 { - yyb492 = yyj492 > l - } else { - yyb492 = r.CheckBreak() - } - if yyb492 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj492-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym501 := z.EncBinary() - _ = yym501 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep502 := !z.EncBinary() - yy2arr502 := z.EncBasicHandle().StructToArray - var yyq502 [4]bool - _, _, _ = yysep502, yyq502, yy2arr502 - const yyr502 bool = false - yyq502[0] = x.Kind != "" - yyq502[1] = x.APIVersion != "" - yyq502[2] = true - var yynn502 int - if yyr502 || yy2arr502 { - r.EncodeArrayStart(4) - } else { - yynn502 = 1 - for _, b := range yyq502 { - if b { - yynn502++ - } - } - r.EncodeMapStart(yynn502) - yynn502 = 0 - } - if yyr502 || yy2arr502 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq502[0] { - yym504 := z.EncBinary() - _ = yym504 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq502[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym505 := z.EncBinary() - _ = yym505 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr502 || yy2arr502 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq502[1] { - yym507 := z.EncBinary() - _ = yym507 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq502[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym508 := z.EncBinary() - _ = yym508 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr502 || yy2arr502 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq502[2] { - yy510 := &x.ListMeta - yym511 := z.EncBinary() - _ = yym511 - if false { - } else if z.HasExtensions() && z.EncExt(yy510) { - } else { - z.EncFallback(yy510) - } - } else { - r.EncodeNil() - } - } else { - if yyq502[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy512 := &x.ListMeta - yym513 := z.EncBinary() - _ = yym513 - if false { - } else if z.HasExtensions() && z.EncExt(yy512) { - } else { - z.EncFallback(yy512) - } - } - } - if yyr502 || yy2arr502 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym515 := z.EncBinary() - _ = yym515 - if false { - } else { - h.encSliceDeployment(([]Deployment)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym516 := z.EncBinary() - _ = yym516 - if false { - } else { - h.encSliceDeployment(([]Deployment)(x.Items), e) - } - } - } - if yyr502 || yy2arr502 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym517 := z.DecBinary() - _ = yym517 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct518 := r.ContainerType() - if yyct518 == codecSelferValueTypeMap1234 { - yyl518 := r.ReadMapStart() - if yyl518 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl518, d) - } - } else if yyct518 == codecSelferValueTypeArray1234 { - yyl518 := r.ReadArrayStart() - if yyl518 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl518, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys519Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys519Slc - var yyhl519 bool = l >= 0 - for yyj519 := 0; ; yyj519++ { - if yyhl519 { - if yyj519 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys519Slc = r.DecodeBytes(yys519Slc, true, true) - yys519 := string(yys519Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys519 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv522 := &x.ListMeta - yym523 := z.DecBinary() - _ = yym523 - if false { - } else if z.HasExtensions() && z.DecExt(yyv522) { - } else { - z.DecFallback(yyv522, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv524 := &x.Items - yym525 := z.DecBinary() - _ = yym525 - if false { - } else { - h.decSliceDeployment((*[]Deployment)(yyv524), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys519) - } // end switch yys519 - } // end for yyj519 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj526 int - var yyb526 bool - var yyhl526 bool = l >= 0 - yyj526++ - if yyhl526 { - yyb526 = yyj526 > l - } else { - yyb526 = r.CheckBreak() - } - if yyb526 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj526++ - if yyhl526 { - yyb526 = yyj526 > l - } else { - yyb526 = r.CheckBreak() - } - if yyb526 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj526++ - if yyhl526 { - yyb526 = yyj526 > l - } else { - yyb526 = r.CheckBreak() - } - if yyb526 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv529 := &x.ListMeta - yym530 := z.DecBinary() - _ = yym530 - if false { - } else if z.HasExtensions() && z.DecExt(yyv529) { - } else { - z.DecFallback(yyv529, false) - } - } - yyj526++ - if yyhl526 { - yyb526 = yyj526 > l - } else { - yyb526 = r.CheckBreak() - } - if yyb526 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv531 := &x.Items - yym532 := z.DecBinary() - _ = yym532 - if false { - } else { - h.decSliceDeployment((*[]Deployment)(yyv531), d) - } - } - for { - yyj526++ - if yyhl526 { - yyb526 = yyj526 > l - } else { - yyb526 = r.CheckBreak() - } - if yyb526 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj526-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym533 := z.EncBinary() - _ = yym533 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep534 := !z.EncBinary() - yy2arr534 := z.EncBasicHandle().StructToArray - var yyq534 [2]bool - _, _, _ = yysep534, yyq534, yy2arr534 - const yyr534 bool = false - yyq534[0] = x.Selector != nil - var yynn534 int - if yyr534 || yy2arr534 { - r.EncodeArrayStart(2) - } else { - yynn534 = 1 - for _, b := range yyq534 { - if b { - yynn534++ - } - } - r.EncodeMapStart(yynn534) - yynn534 = 0 - } - if yyr534 || yy2arr534 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq534[0] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym536 := z.EncBinary() - _ = yym536 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq534[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym537 := z.EncBinary() - _ = yym537 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr534 || yy2arr534 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy539 := &x.Template - yy539.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy540 := &x.Template - yy540.CodecEncodeSelf(e) - } - if yyr534 || yy2arr534 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym541 := z.DecBinary() - _ = yym541 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct542 := r.ContainerType() - if yyct542 == codecSelferValueTypeMap1234 { - yyl542 := r.ReadMapStart() - if yyl542 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl542, d) - } - } else if yyct542 == codecSelferValueTypeArray1234 { - yyl542 := r.ReadArrayStart() - if yyl542 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl542, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys543Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys543Slc - var yyhl543 bool = l >= 0 - for yyj543 := 0; ; yyj543++ { - if yyhl543 { - if yyj543 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys543Slc = r.DecodeBytes(yys543Slc, true, true) - yys543 := string(yys543Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys543 { - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym545 := z.DecBinary() - _ = yym545 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv546 := &x.Template - yyv546.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys543) - } // end switch yys543 - } // end for yyj543 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj547 int - var yyb547 bool - var yyhl547 bool = l >= 0 - yyj547++ - if yyhl547 { - yyb547 = yyj547 > l - } else { - yyb547 = r.CheckBreak() - } - if yyb547 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym549 := z.DecBinary() - _ = yym549 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj547++ - if yyhl547 { - yyb547 = yyj547 > l - } else { - yyb547 = r.CheckBreak() - } - if yyb547 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv550 := &x.Template - yyv550.CodecDecodeSelf(d) - } - for { - yyj547++ - if yyhl547 { - yyb547 = yyj547 > l - } else { - yyb547 = r.CheckBreak() - } - if yyb547 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj547-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym551 := z.EncBinary() - _ = yym551 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep552 := !z.EncBinary() - yy2arr552 := z.EncBasicHandle().StructToArray - var yyq552 [4]bool - _, _, _ = yysep552, yyq552, yy2arr552 - const yyr552 bool = false - var yynn552 int - if yyr552 || yy2arr552 { - r.EncodeArrayStart(4) - } else { - yynn552 = 4 - for _, b := range yyq552 { - if b { - yynn552++ - } - } - r.EncodeMapStart(yynn552) - yynn552 = 0 - } - if yyr552 || yy2arr552 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym554 := z.EncBinary() - _ = yym554 - if false { - } else { - r.EncodeInt(int64(x.CurrentNumberScheduled)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentNumberScheduled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym555 := z.EncBinary() - _ = yym555 - if false { - } else { - r.EncodeInt(int64(x.CurrentNumberScheduled)) - } - } - if yyr552 || yy2arr552 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym557 := z.EncBinary() - _ = yym557 - if false { - } else { - r.EncodeInt(int64(x.NumberMisscheduled)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("numberMisscheduled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym558 := z.EncBinary() - _ = yym558 - if false { - } else { - r.EncodeInt(int64(x.NumberMisscheduled)) - } - } - if yyr552 || yy2arr552 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym560 := z.EncBinary() - _ = yym560 - if false { - } else { - r.EncodeInt(int64(x.DesiredNumberScheduled)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("desiredNumberScheduled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym561 := z.EncBinary() - _ = yym561 - if false { - } else { - r.EncodeInt(int64(x.DesiredNumberScheduled)) - } - } - if yyr552 || yy2arr552 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym563 := z.EncBinary() - _ = yym563 - if false { - } else { - r.EncodeInt(int64(x.NumberReady)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("numberReady")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym564 := z.EncBinary() - _ = yym564 - if false { - } else { - r.EncodeInt(int64(x.NumberReady)) - } - } - if yyr552 || yy2arr552 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym565 := z.DecBinary() - _ = yym565 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct566 := r.ContainerType() - if yyct566 == codecSelferValueTypeMap1234 { - yyl566 := r.ReadMapStart() - if yyl566 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl566, d) - } - } else if yyct566 == codecSelferValueTypeArray1234 { - yyl566 := r.ReadArrayStart() - if yyl566 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl566, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys567Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys567Slc - var yyhl567 bool = l >= 0 - for yyj567 := 0; ; yyj567++ { - if yyhl567 { - if yyj567 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys567Slc = r.DecodeBytes(yys567Slc, true, true) - yys567 := string(yys567Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys567 { - case "currentNumberScheduled": - if r.TryDecodeAsNil() { - x.CurrentNumberScheduled = 0 - } else { - x.CurrentNumberScheduled = int32(r.DecodeInt(32)) - } - case "numberMisscheduled": - if r.TryDecodeAsNil() { - x.NumberMisscheduled = 0 - } else { - x.NumberMisscheduled = int32(r.DecodeInt(32)) - } - case "desiredNumberScheduled": - if r.TryDecodeAsNil() { - x.DesiredNumberScheduled = 0 - } else { - x.DesiredNumberScheduled = int32(r.DecodeInt(32)) - } - case "numberReady": - if r.TryDecodeAsNil() { - x.NumberReady = 0 - } else { - x.NumberReady = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys567) - } // end switch yys567 - } // end for yyj567 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj572 int - var yyb572 bool - var yyhl572 bool = l >= 0 - yyj572++ - if yyhl572 { - yyb572 = yyj572 > l - } else { - yyb572 = r.CheckBreak() - } - if yyb572 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentNumberScheduled = 0 - } else { - x.CurrentNumberScheduled = int32(r.DecodeInt(32)) - } - yyj572++ - if yyhl572 { - yyb572 = yyj572 > l - } else { - yyb572 = r.CheckBreak() - } - if yyb572 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NumberMisscheduled = 0 - } else { - x.NumberMisscheduled = int32(r.DecodeInt(32)) - } - yyj572++ - if yyhl572 { - yyb572 = yyj572 > l - } else { - yyb572 = r.CheckBreak() - } - if yyb572 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DesiredNumberScheduled = 0 - } else { - x.DesiredNumberScheduled = int32(r.DecodeInt(32)) - } - yyj572++ - if yyhl572 { - yyb572 = yyj572 > l - } else { - yyb572 = r.CheckBreak() - } - if yyb572 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NumberReady = 0 - } else { - x.NumberReady = int32(r.DecodeInt(32)) - } - for { - yyj572++ - if yyhl572 { - yyb572 = yyj572 > l - } else { - yyb572 = r.CheckBreak() - } - if yyb572 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj572-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym577 := z.EncBinary() - _ = yym577 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep578 := !z.EncBinary() - yy2arr578 := z.EncBasicHandle().StructToArray - var yyq578 [5]bool - _, _, _ = yysep578, yyq578, yy2arr578 - const yyr578 bool = false - yyq578[0] = x.Kind != "" - yyq578[1] = x.APIVersion != "" - yyq578[2] = true - yyq578[3] = true - yyq578[4] = true - var yynn578 int - if yyr578 || yy2arr578 { - r.EncodeArrayStart(5) - } else { - yynn578 = 0 - for _, b := range yyq578 { - if b { - yynn578++ - } - } - r.EncodeMapStart(yynn578) - yynn578 = 0 - } - if yyr578 || yy2arr578 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq578[0] { - yym580 := z.EncBinary() - _ = yym580 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq578[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym581 := z.EncBinary() - _ = yym581 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr578 || yy2arr578 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq578[1] { - yym583 := z.EncBinary() - _ = yym583 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq578[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym584 := z.EncBinary() - _ = yym584 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr578 || yy2arr578 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq578[2] { - yy586 := &x.ObjectMeta - yy586.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq578[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy587 := &x.ObjectMeta - yy587.CodecEncodeSelf(e) - } - } - if yyr578 || yy2arr578 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq578[3] { - yy589 := &x.Spec - yy589.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq578[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy590 := &x.Spec - yy590.CodecEncodeSelf(e) - } - } - if yyr578 || yy2arr578 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq578[4] { - yy592 := &x.Status - yy592.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq578[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy593 := &x.Status - yy593.CodecEncodeSelf(e) - } - } - if yyr578 || yy2arr578 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonSet) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym594 := z.DecBinary() - _ = yym594 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct595 := r.ContainerType() - if yyct595 == codecSelferValueTypeMap1234 { - yyl595 := r.ReadMapStart() - if yyl595 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl595, d) - } - } else if yyct595 == codecSelferValueTypeArray1234 { - yyl595 := r.ReadArrayStart() - if yyl595 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl595, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys596Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys596Slc - var yyhl596 bool = l >= 0 - for yyj596 := 0; ; yyj596++ { - if yyhl596 { - if yyj596 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys596Slc = r.DecodeBytes(yys596Slc, true, true) - yys596 := string(yys596Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys596 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv599 := &x.ObjectMeta - yyv599.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = DaemonSetSpec{} - } else { - yyv600 := &x.Spec - yyv600.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = DaemonSetStatus{} - } else { - yyv601 := &x.Status - yyv601.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys596) - } // end switch yys596 - } // end for yyj596 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj602 int - var yyb602 bool - var yyhl602 bool = l >= 0 - yyj602++ - if yyhl602 { - yyb602 = yyj602 > l - } else { - yyb602 = r.CheckBreak() - } - if yyb602 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj602++ - if yyhl602 { - yyb602 = yyj602 > l - } else { - yyb602 = r.CheckBreak() - } - if yyb602 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj602++ - if yyhl602 { - yyb602 = yyj602 > l - } else { - yyb602 = r.CheckBreak() - } - if yyb602 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv605 := &x.ObjectMeta - yyv605.CodecDecodeSelf(d) - } - yyj602++ - if yyhl602 { - yyb602 = yyj602 > l - } else { - yyb602 = r.CheckBreak() - } - if yyb602 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = DaemonSetSpec{} - } else { - yyv606 := &x.Spec - yyv606.CodecDecodeSelf(d) - } - yyj602++ - if yyhl602 { - yyb602 = yyj602 > l - } else { - yyb602 = r.CheckBreak() - } - if yyb602 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = DaemonSetStatus{} - } else { - yyv607 := &x.Status - yyv607.CodecDecodeSelf(d) - } - for { - yyj602++ - if yyhl602 { - yyb602 = yyj602 > l - } else { - yyb602 = r.CheckBreak() - } - if yyb602 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj602-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonSetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym608 := z.EncBinary() - _ = yym608 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep609 := !z.EncBinary() - yy2arr609 := z.EncBasicHandle().StructToArray - var yyq609 [4]bool - _, _, _ = yysep609, yyq609, yy2arr609 - const yyr609 bool = false - yyq609[0] = x.Kind != "" - yyq609[1] = x.APIVersion != "" - yyq609[2] = true - var yynn609 int - if yyr609 || yy2arr609 { - r.EncodeArrayStart(4) - } else { - yynn609 = 1 - for _, b := range yyq609 { - if b { - yynn609++ - } - } - r.EncodeMapStart(yynn609) - yynn609 = 0 - } - if yyr609 || yy2arr609 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq609[0] { - yym611 := z.EncBinary() - _ = yym611 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq609[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym612 := z.EncBinary() - _ = yym612 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr609 || yy2arr609 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq609[1] { - yym614 := z.EncBinary() - _ = yym614 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq609[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym615 := z.EncBinary() - _ = yym615 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr609 || yy2arr609 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq609[2] { - yy617 := &x.ListMeta - yym618 := z.EncBinary() - _ = yym618 - if false { - } else if z.HasExtensions() && z.EncExt(yy617) { - } else { - z.EncFallback(yy617) - } - } else { - r.EncodeNil() - } - } else { - if yyq609[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy619 := &x.ListMeta - yym620 := z.EncBinary() - _ = yym620 - if false { - } else if z.HasExtensions() && z.EncExt(yy619) { - } else { - z.EncFallback(yy619) - } - } - } - if yyr609 || yy2arr609 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym622 := z.EncBinary() - _ = yym622 - if false { - } else { - h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym623 := z.EncBinary() - _ = yym623 - if false { - } else { - h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) - } - } - } - if yyr609 || yy2arr609 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonSetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym624 := z.DecBinary() - _ = yym624 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct625 := r.ContainerType() - if yyct625 == codecSelferValueTypeMap1234 { - yyl625 := r.ReadMapStart() - if yyl625 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl625, d) - } - } else if yyct625 == codecSelferValueTypeArray1234 { - yyl625 := r.ReadArrayStart() - if yyl625 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl625, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys626Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys626Slc - var yyhl626 bool = l >= 0 - for yyj626 := 0; ; yyj626++ { - if yyhl626 { - if yyj626 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys626Slc = r.DecodeBytes(yys626Slc, true, true) - yys626 := string(yys626Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys626 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv629 := &x.ListMeta - yym630 := z.DecBinary() - _ = yym630 - if false { - } else if z.HasExtensions() && z.DecExt(yyv629) { - } else { - z.DecFallback(yyv629, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv631 := &x.Items - yym632 := z.DecBinary() - _ = yym632 - if false { - } else { - h.decSliceDaemonSet((*[]DaemonSet)(yyv631), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys626) - } // end switch yys626 - } // end for yyj626 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj633 int - var yyb633 bool - var yyhl633 bool = l >= 0 - yyj633++ - if yyhl633 { - yyb633 = yyj633 > l - } else { - yyb633 = r.CheckBreak() - } - if yyb633 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj633++ - if yyhl633 { - yyb633 = yyj633 > l - } else { - yyb633 = r.CheckBreak() - } - if yyb633 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj633++ - if yyhl633 { - yyb633 = yyj633 > l - } else { - yyb633 = r.CheckBreak() - } - if yyb633 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv636 := &x.ListMeta - yym637 := z.DecBinary() - _ = yym637 - if false { - } else if z.HasExtensions() && z.DecExt(yyv636) { - } else { - z.DecFallback(yyv636, false) - } - } - yyj633++ - if yyhl633 { - yyb633 = yyj633 > l - } else { - yyb633 = r.CheckBreak() - } - if yyb633 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv638 := &x.Items - yym639 := z.DecBinary() - _ = yym639 - if false { - } else { - h.decSliceDaemonSet((*[]DaemonSet)(yyv638), d) - } - } - for { - yyj633++ - if yyhl633 { - yyb633 = yyj633 > l - } else { - yyb633 = r.CheckBreak() - } - if yyb633 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj633-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ThirdPartyResourceDataList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym640 := z.EncBinary() - _ = yym640 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep641 := !z.EncBinary() - yy2arr641 := z.EncBasicHandle().StructToArray - var yyq641 [4]bool - _, _, _ = yysep641, yyq641, yy2arr641 - const yyr641 bool = false - yyq641[0] = x.Kind != "" - yyq641[1] = x.APIVersion != "" - yyq641[2] = true - var yynn641 int - if yyr641 || yy2arr641 { - r.EncodeArrayStart(4) - } else { - yynn641 = 1 - for _, b := range yyq641 { - if b { - yynn641++ - } - } - r.EncodeMapStart(yynn641) - yynn641 = 0 - } - if yyr641 || yy2arr641 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq641[0] { - yym643 := z.EncBinary() - _ = yym643 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq641[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym644 := z.EncBinary() - _ = yym644 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr641 || yy2arr641 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq641[1] { - yym646 := z.EncBinary() - _ = yym646 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq641[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym647 := z.EncBinary() - _ = yym647 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr641 || yy2arr641 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq641[2] { - yy649 := &x.ListMeta - yym650 := z.EncBinary() - _ = yym650 - if false { - } else if z.HasExtensions() && z.EncExt(yy649) { - } else { - z.EncFallback(yy649) - } - } else { - r.EncodeNil() - } - } else { - if yyq641[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy651 := &x.ListMeta - yym652 := z.EncBinary() - _ = yym652 - if false { - } else if z.HasExtensions() && z.EncExt(yy651) { - } else { - z.EncFallback(yy651) - } - } - } - if yyr641 || yy2arr641 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym654 := z.EncBinary() - _ = yym654 - if false { - } else { - h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym655 := z.EncBinary() - _ = yym655 - if false { - } else { - h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) - } - } - } - if yyr641 || yy2arr641 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ThirdPartyResourceDataList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym656 := z.DecBinary() - _ = yym656 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct657 := r.ContainerType() - if yyct657 == codecSelferValueTypeMap1234 { - yyl657 := r.ReadMapStart() - if yyl657 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl657, d) - } - } else if yyct657 == codecSelferValueTypeArray1234 { - yyl657 := r.ReadArrayStart() - if yyl657 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl657, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ThirdPartyResourceDataList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys658Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys658Slc - var yyhl658 bool = l >= 0 - for yyj658 := 0; ; yyj658++ { - if yyhl658 { - if yyj658 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys658Slc = r.DecodeBytes(yys658Slc, true, true) - yys658 := string(yys658Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys658 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv661 := &x.ListMeta - yym662 := z.DecBinary() - _ = yym662 - if false { - } else if z.HasExtensions() && z.DecExt(yyv661) { - } else { - z.DecFallback(yyv661, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv663 := &x.Items - yym664 := z.DecBinary() - _ = yym664 - if false { - } else { - h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv663), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys658) - } // end switch yys658 - } // end for yyj658 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ThirdPartyResourceDataList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj665 int - var yyb665 bool - var yyhl665 bool = l >= 0 - yyj665++ - if yyhl665 { - yyb665 = yyj665 > l - } else { - yyb665 = r.CheckBreak() - } - if yyb665 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj665++ - if yyhl665 { - yyb665 = yyj665 > l - } else { - yyb665 = r.CheckBreak() - } - if yyb665 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj665++ - if yyhl665 { - yyb665 = yyj665 > l - } else { - yyb665 = r.CheckBreak() - } - if yyb665 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv668 := &x.ListMeta - yym669 := z.DecBinary() - _ = yym669 - if false { - } else if z.HasExtensions() && z.DecExt(yyv668) { - } else { - z.DecFallback(yyv668, false) - } - } - yyj665++ - if yyhl665 { - yyb665 = yyj665 > l - } else { - yyb665 = r.CheckBreak() - } - if yyb665 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv670 := &x.Items - yym671 := z.DecBinary() - _ = yym671 - if false { - } else { - h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv670), d) - } - } - for { - yyj665++ - if yyhl665 { - yyb665 = yyj665 > l - } else { - yyb665 = r.CheckBreak() - } - if yyb665 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj665-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym672 := z.EncBinary() - _ = yym672 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep673 := !z.EncBinary() - yy2arr673 := z.EncBasicHandle().StructToArray - var yyq673 [5]bool - _, _, _ = yysep673, yyq673, yy2arr673 - const yyr673 bool = false - yyq673[0] = x.Kind != "" - yyq673[1] = x.APIVersion != "" - yyq673[2] = true - yyq673[3] = true - yyq673[4] = true - var yynn673 int - if yyr673 || yy2arr673 { - r.EncodeArrayStart(5) - } else { - yynn673 = 0 - for _, b := range yyq673 { - if b { - yynn673++ - } - } - r.EncodeMapStart(yynn673) - yynn673 = 0 - } - if yyr673 || yy2arr673 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq673[0] { - yym675 := z.EncBinary() - _ = yym675 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq673[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym676 := z.EncBinary() - _ = yym676 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr673 || yy2arr673 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq673[1] { - yym678 := z.EncBinary() - _ = yym678 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq673[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym679 := z.EncBinary() - _ = yym679 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr673 || yy2arr673 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq673[2] { - yy681 := &x.ObjectMeta - yy681.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq673[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy682 := &x.ObjectMeta - yy682.CodecEncodeSelf(e) - } - } - if yyr673 || yy2arr673 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq673[3] { - yy684 := &x.Spec - yy684.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq673[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy685 := &x.Spec - yy685.CodecEncodeSelf(e) - } - } - if yyr673 || yy2arr673 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq673[4] { - yy687 := &x.Status - yy687.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq673[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy688 := &x.Status - yy688.CodecEncodeSelf(e) - } - } - if yyr673 || yy2arr673 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Ingress) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym689 := z.DecBinary() - _ = yym689 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct690 := r.ContainerType() - if yyct690 == codecSelferValueTypeMap1234 { - yyl690 := r.ReadMapStart() - if yyl690 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl690, d) - } - } else if yyct690 == codecSelferValueTypeArray1234 { - yyl690 := r.ReadArrayStart() - if yyl690 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl690, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Ingress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys691Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys691Slc - var yyhl691 bool = l >= 0 - for yyj691 := 0; ; yyj691++ { - if yyhl691 { - if yyj691 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys691Slc = r.DecodeBytes(yys691Slc, true, true) - yys691 := string(yys691Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys691 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv694 := &x.ObjectMeta - yyv694.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = IngressSpec{} - } else { - yyv695 := &x.Spec - yyv695.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = IngressStatus{} - } else { - yyv696 := &x.Status - yyv696.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys691) - } // end switch yys691 - } // end for yyj691 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Ingress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj697 int - var yyb697 bool - var yyhl697 bool = l >= 0 - yyj697++ - if yyhl697 { - yyb697 = yyj697 > l - } else { - yyb697 = r.CheckBreak() - } - if yyb697 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj697++ - if yyhl697 { - yyb697 = yyj697 > l - } else { - yyb697 = r.CheckBreak() - } - if yyb697 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj697++ - if yyhl697 { - yyb697 = yyj697 > l - } else { - yyb697 = r.CheckBreak() - } - if yyb697 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv700 := &x.ObjectMeta - yyv700.CodecDecodeSelf(d) - } - yyj697++ - if yyhl697 { - yyb697 = yyj697 > l - } else { - yyb697 = r.CheckBreak() - } - if yyb697 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = IngressSpec{} - } else { - yyv701 := &x.Spec - yyv701.CodecDecodeSelf(d) - } - yyj697++ - if yyhl697 { - yyb697 = yyj697 > l - } else { - yyb697 = r.CheckBreak() - } - if yyb697 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = IngressStatus{} - } else { - yyv702 := &x.Status - yyv702.CodecDecodeSelf(d) - } - for { - yyj697++ - if yyhl697 { - yyb697 = yyj697 > l - } else { - yyb697 = r.CheckBreak() - } - if yyb697 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj697-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym703 := z.EncBinary() - _ = yym703 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep704 := !z.EncBinary() - yy2arr704 := z.EncBasicHandle().StructToArray - var yyq704 [4]bool - _, _, _ = yysep704, yyq704, yy2arr704 - const yyr704 bool = false - yyq704[0] = x.Kind != "" - yyq704[1] = x.APIVersion != "" - yyq704[2] = true - var yynn704 int - if yyr704 || yy2arr704 { - r.EncodeArrayStart(4) - } else { - yynn704 = 1 - for _, b := range yyq704 { - if b { - yynn704++ - } - } - r.EncodeMapStart(yynn704) - yynn704 = 0 - } - if yyr704 || yy2arr704 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq704[0] { - yym706 := z.EncBinary() - _ = yym706 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq704[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym707 := z.EncBinary() - _ = yym707 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr704 || yy2arr704 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq704[1] { - yym709 := z.EncBinary() - _ = yym709 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq704[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym710 := z.EncBinary() - _ = yym710 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr704 || yy2arr704 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq704[2] { - yy712 := &x.ListMeta - yym713 := z.EncBinary() - _ = yym713 - if false { - } else if z.HasExtensions() && z.EncExt(yy712) { - } else { - z.EncFallback(yy712) - } - } else { - r.EncodeNil() - } - } else { - if yyq704[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy714 := &x.ListMeta - yym715 := z.EncBinary() - _ = yym715 - if false { - } else if z.HasExtensions() && z.EncExt(yy714) { - } else { - z.EncFallback(yy714) - } - } - } - if yyr704 || yy2arr704 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym717 := z.EncBinary() - _ = yym717 - if false { - } else { - h.encSliceIngress(([]Ingress)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym718 := z.EncBinary() - _ = yym718 - if false { - } else { - h.encSliceIngress(([]Ingress)(x.Items), e) - } - } - } - if yyr704 || yy2arr704 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym719 := z.DecBinary() - _ = yym719 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct720 := r.ContainerType() - if yyct720 == codecSelferValueTypeMap1234 { - yyl720 := r.ReadMapStart() - if yyl720 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl720, d) - } - } else if yyct720 == codecSelferValueTypeArray1234 { - yyl720 := r.ReadArrayStart() - if yyl720 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl720, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys721Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys721Slc - var yyhl721 bool = l >= 0 - for yyj721 := 0; ; yyj721++ { - if yyhl721 { - if yyj721 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys721Slc = r.DecodeBytes(yys721Slc, true, true) - yys721 := string(yys721Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys721 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv724 := &x.ListMeta - yym725 := z.DecBinary() - _ = yym725 - if false { - } else if z.HasExtensions() && z.DecExt(yyv724) { - } else { - z.DecFallback(yyv724, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv726 := &x.Items - yym727 := z.DecBinary() - _ = yym727 - if false { - } else { - h.decSliceIngress((*[]Ingress)(yyv726), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys721) - } // end switch yys721 - } // end for yyj721 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj728 int - var yyb728 bool - var yyhl728 bool = l >= 0 - yyj728++ - if yyhl728 { - yyb728 = yyj728 > l - } else { - yyb728 = r.CheckBreak() - } - if yyb728 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj728++ - if yyhl728 { - yyb728 = yyj728 > l - } else { - yyb728 = r.CheckBreak() - } - if yyb728 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj728++ - if yyhl728 { - yyb728 = yyj728 > l - } else { - yyb728 = r.CheckBreak() - } - if yyb728 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv731 := &x.ListMeta - yym732 := z.DecBinary() - _ = yym732 - if false { - } else if z.HasExtensions() && z.DecExt(yyv731) { - } else { - z.DecFallback(yyv731, false) - } - } - yyj728++ - if yyhl728 { - yyb728 = yyj728 > l - } else { - yyb728 = r.CheckBreak() - } - if yyb728 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv733 := &x.Items - yym734 := z.DecBinary() - _ = yym734 - if false { - } else { - h.decSliceIngress((*[]Ingress)(yyv733), d) - } - } - for { - yyj728++ - if yyhl728 { - yyb728 = yyj728 > l - } else { - yyb728 = r.CheckBreak() - } - if yyb728 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj728-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym735 := z.EncBinary() - _ = yym735 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep736 := !z.EncBinary() - yy2arr736 := z.EncBasicHandle().StructToArray - var yyq736 [3]bool - _, _, _ = yysep736, yyq736, yy2arr736 - const yyr736 bool = false - yyq736[0] = x.Backend != nil - yyq736[1] = len(x.TLS) != 0 - yyq736[2] = len(x.Rules) != 0 - var yynn736 int - if yyr736 || yy2arr736 { - r.EncodeArrayStart(3) - } else { - yynn736 = 0 - for _, b := range yyq736 { - if b { - yynn736++ - } - } - r.EncodeMapStart(yynn736) - yynn736 = 0 - } - if yyr736 || yy2arr736 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq736[0] { - if x.Backend == nil { - r.EncodeNil() - } else { - x.Backend.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq736[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("backend")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Backend == nil { - r.EncodeNil() - } else { - x.Backend.CodecEncodeSelf(e) - } - } - } - if yyr736 || yy2arr736 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq736[1] { - if x.TLS == nil { - r.EncodeNil() - } else { - yym739 := z.EncBinary() - _ = yym739 - if false { - } else { - h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq736[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tls")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TLS == nil { - r.EncodeNil() - } else { - yym740 := z.EncBinary() - _ = yym740 - if false { - } else { - h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e) - } - } - } - } - if yyr736 || yy2arr736 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq736[2] { - if x.Rules == nil { - r.EncodeNil() - } else { - yym742 := z.EncBinary() - _ = yym742 - if false { - } else { - h.encSliceIngressRule(([]IngressRule)(x.Rules), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq736[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rules")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Rules == nil { - r.EncodeNil() - } else { - yym743 := z.EncBinary() - _ = yym743 - if false { - } else { - h.encSliceIngressRule(([]IngressRule)(x.Rules), e) - } - } - } - } - if yyr736 || yy2arr736 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym744 := z.DecBinary() - _ = yym744 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct745 := r.ContainerType() - if yyct745 == codecSelferValueTypeMap1234 { - yyl745 := r.ReadMapStart() - if yyl745 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl745, d) - } - } else if yyct745 == codecSelferValueTypeArray1234 { - yyl745 := r.ReadArrayStart() - if yyl745 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl745, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys746Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys746Slc - var yyhl746 bool = l >= 0 - for yyj746 := 0; ; yyj746++ { - if yyhl746 { - if yyj746 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys746Slc = r.DecodeBytes(yys746Slc, true, true) - yys746 := string(yys746Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys746 { - case "backend": - if r.TryDecodeAsNil() { - if x.Backend != nil { - x.Backend = nil - } - } else { - if x.Backend == nil { - x.Backend = new(IngressBackend) - } - x.Backend.CodecDecodeSelf(d) - } - case "tls": - if r.TryDecodeAsNil() { - x.TLS = nil - } else { - yyv748 := &x.TLS - yym749 := z.DecBinary() - _ = yym749 - if false { - } else { - h.decSliceIngressTLS((*[]IngressTLS)(yyv748), d) - } - } - case "rules": - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv750 := &x.Rules - yym751 := z.DecBinary() - _ = yym751 - if false { - } else { - h.decSliceIngressRule((*[]IngressRule)(yyv750), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys746) - } // end switch yys746 - } // end for yyj746 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj752 int - var yyb752 bool - var yyhl752 bool = l >= 0 - yyj752++ - if yyhl752 { - yyb752 = yyj752 > l - } else { - yyb752 = r.CheckBreak() - } - if yyb752 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Backend != nil { - x.Backend = nil - } - } else { - if x.Backend == nil { - x.Backend = new(IngressBackend) - } - x.Backend.CodecDecodeSelf(d) - } - yyj752++ - if yyhl752 { - yyb752 = yyj752 > l - } else { - yyb752 = r.CheckBreak() - } - if yyb752 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TLS = nil - } else { - yyv754 := &x.TLS - yym755 := z.DecBinary() - _ = yym755 - if false { - } else { - h.decSliceIngressTLS((*[]IngressTLS)(yyv754), d) - } - } - yyj752++ - if yyhl752 { - yyb752 = yyj752 > l - } else { - yyb752 = r.CheckBreak() - } - if yyb752 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv756 := &x.Rules - yym757 := z.DecBinary() - _ = yym757 - if false { - } else { - h.decSliceIngressRule((*[]IngressRule)(yyv756), d) - } - } - for { - yyj752++ - if yyhl752 { - yyb752 = yyj752 > l - } else { - yyb752 = r.CheckBreak() - } - if yyb752 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj752-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressTLS) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym758 := z.EncBinary() - _ = yym758 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep759 := !z.EncBinary() - yy2arr759 := z.EncBasicHandle().StructToArray - var yyq759 [2]bool - _, _, _ = yysep759, yyq759, yy2arr759 - const yyr759 bool = false - yyq759[0] = len(x.Hosts) != 0 - yyq759[1] = x.SecretName != "" - var yynn759 int - if yyr759 || yy2arr759 { - r.EncodeArrayStart(2) - } else { - yynn759 = 0 - for _, b := range yyq759 { - if b { - yynn759++ - } - } - r.EncodeMapStart(yynn759) - yynn759 = 0 - } - if yyr759 || yy2arr759 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq759[0] { - if x.Hosts == nil { - r.EncodeNil() - } else { - yym761 := z.EncBinary() - _ = yym761 - if false { - } else { - z.F.EncSliceStringV(x.Hosts, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq759[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hosts")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Hosts == nil { - r.EncodeNil() - } else { - yym762 := z.EncBinary() - _ = yym762 - if false { - } else { - z.F.EncSliceStringV(x.Hosts, false, e) - } - } - } - } - if yyr759 || yy2arr759 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq759[1] { - yym764 := z.EncBinary() - _ = yym764 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq759[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym765 := z.EncBinary() - _ = yym765 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } - } - if yyr759 || yy2arr759 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressTLS) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym766 := z.DecBinary() - _ = yym766 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct767 := r.ContainerType() - if yyct767 == codecSelferValueTypeMap1234 { - yyl767 := r.ReadMapStart() - if yyl767 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl767, d) - } - } else if yyct767 == codecSelferValueTypeArray1234 { - yyl767 := r.ReadArrayStart() - if yyl767 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl767, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressTLS) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys768Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys768Slc - var yyhl768 bool = l >= 0 - for yyj768 := 0; ; yyj768++ { - if yyhl768 { - if yyj768 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys768Slc = r.DecodeBytes(yys768Slc, true, true) - yys768 := string(yys768Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys768 { - case "hosts": - if r.TryDecodeAsNil() { - x.Hosts = nil - } else { - yyv769 := &x.Hosts - yym770 := z.DecBinary() - _ = yym770 - if false { - } else { - z.F.DecSliceStringX(yyv769, false, d) - } - } - case "secretName": - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - x.SecretName = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys768) - } // end switch yys768 - } // end for yyj768 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressTLS) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj772 int - var yyb772 bool - var yyhl772 bool = l >= 0 - yyj772++ - if yyhl772 { - yyb772 = yyj772 > l - } else { - yyb772 = r.CheckBreak() - } - if yyb772 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hosts = nil - } else { - yyv773 := &x.Hosts - yym774 := z.DecBinary() - _ = yym774 - if false { - } else { - z.F.DecSliceStringX(yyv773, false, d) - } - } - yyj772++ - if yyhl772 { - yyb772 = yyj772 > l - } else { - yyb772 = r.CheckBreak() - } - if yyb772 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - x.SecretName = string(r.DecodeString()) - } - for { - yyj772++ - if yyhl772 { - yyb772 = yyj772 > l - } else { - yyb772 = r.CheckBreak() - } - if yyb772 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj772-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym776 := z.EncBinary() - _ = yym776 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep777 := !z.EncBinary() - yy2arr777 := z.EncBasicHandle().StructToArray - var yyq777 [1]bool - _, _, _ = yysep777, yyq777, yy2arr777 - const yyr777 bool = false - yyq777[0] = true - var yynn777 int - if yyr777 || yy2arr777 { - r.EncodeArrayStart(1) - } else { - yynn777 = 0 - for _, b := range yyq777 { - if b { - yynn777++ - } - } - r.EncodeMapStart(yynn777) - yynn777 = 0 - } - if yyr777 || yy2arr777 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq777[0] { - yy779 := &x.LoadBalancer - yy779.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq777[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("loadBalancer")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy780 := &x.LoadBalancer - yy780.CodecEncodeSelf(e) - } - } - if yyr777 || yy2arr777 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym781 := z.DecBinary() - _ = yym781 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct782 := r.ContainerType() - if yyct782 == codecSelferValueTypeMap1234 { - yyl782 := r.ReadMapStart() - if yyl782 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl782, d) - } - } else if yyct782 == codecSelferValueTypeArray1234 { - yyl782 := r.ReadArrayStart() - if yyl782 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl782, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys783Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys783Slc - var yyhl783 bool = l >= 0 - for yyj783 := 0; ; yyj783++ { - if yyhl783 { - if yyj783 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys783Slc = r.DecodeBytes(yys783Slc, true, true) - yys783 := string(yys783Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys783 { - case "loadBalancer": - if r.TryDecodeAsNil() { - x.LoadBalancer = pkg2_api.LoadBalancerStatus{} - } else { - yyv784 := &x.LoadBalancer - yyv784.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys783) - } // end switch yys783 - } // end for yyj783 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj785 int - var yyb785 bool - var yyhl785 bool = l >= 0 - yyj785++ - if yyhl785 { - yyb785 = yyj785 > l - } else { - yyb785 = r.CheckBreak() - } - if yyb785 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LoadBalancer = pkg2_api.LoadBalancerStatus{} - } else { - yyv786 := &x.LoadBalancer - yyv786.CodecDecodeSelf(d) - } - for { - yyj785++ - if yyhl785 { - yyb785 = yyj785 > l - } else { - yyb785 = r.CheckBreak() - } - if yyb785 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj785-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressRule) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym787 := z.EncBinary() - _ = yym787 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep788 := !z.EncBinary() - yy2arr788 := z.EncBasicHandle().StructToArray - var yyq788 [2]bool - _, _, _ = yysep788, yyq788, yy2arr788 - const yyr788 bool = false - yyq788[0] = x.Host != "" - yyq788[1] = x.IngressRuleValue.HTTP != nil && x.HTTP != nil - var yynn788 int - if yyr788 || yy2arr788 { - r.EncodeArrayStart(2) - } else { - yynn788 = 0 - for _, b := range yyq788 { - if b { - yynn788++ - } - } - r.EncodeMapStart(yynn788) - yynn788 = 0 - } - if yyr788 || yy2arr788 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq788[0] { - yym790 := z.EncBinary() - _ = yym790 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq788[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("host")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym791 := z.EncBinary() - _ = yym791 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } - } - var yyn792 bool - if x.IngressRuleValue.HTTP == nil { - yyn792 = true - goto LABEL792 - } - LABEL792: - if yyr788 || yy2arr788 { - if yyn792 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq788[1] { - if x.HTTP == nil { - r.EncodeNil() - } else { - x.HTTP.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq788[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("http")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn792 { - r.EncodeNil() - } else { - if x.HTTP == nil { - r.EncodeNil() - } else { - x.HTTP.CodecEncodeSelf(e) - } - } - } - } - if yyr788 || yy2arr788 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressRule) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym793 := z.DecBinary() - _ = yym793 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct794 := r.ContainerType() - if yyct794 == codecSelferValueTypeMap1234 { - yyl794 := r.ReadMapStart() - if yyl794 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl794, d) - } - } else if yyct794 == codecSelferValueTypeArray1234 { - yyl794 := r.ReadArrayStart() - if yyl794 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl794, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys795Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys795Slc - var yyhl795 bool = l >= 0 - for yyj795 := 0; ; yyj795++ { - if yyhl795 { - if yyj795 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys795Slc = r.DecodeBytes(yys795Slc, true, true) - yys795 := string(yys795Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys795 { - case "host": - if r.TryDecodeAsNil() { - x.Host = "" - } else { - x.Host = string(r.DecodeString()) - } - case "http": - if x.IngressRuleValue.HTTP == nil { - x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue) - } - if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil - } - } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) - } - x.HTTP.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys795) - } // end switch yys795 - } // end for yyj795 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj798 int - var yyb798 bool - var yyhl798 bool = l >= 0 - yyj798++ - if yyhl798 { - yyb798 = yyj798 > l - } else { - yyb798 = r.CheckBreak() - } - if yyb798 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Host = "" - } else { - x.Host = string(r.DecodeString()) - } - if x.IngressRuleValue.HTTP == nil { - x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue) - } - yyj798++ - if yyhl798 { - yyb798 = yyj798 > l - } else { - yyb798 = r.CheckBreak() - } - if yyb798 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil - } - } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) - } - x.HTTP.CodecDecodeSelf(d) - } - for { - yyj798++ - if yyhl798 { - yyb798 = yyj798 > l - } else { - yyb798 = r.CheckBreak() - } - if yyb798 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj798-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym801 := z.EncBinary() - _ = yym801 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep802 := !z.EncBinary() - yy2arr802 := z.EncBasicHandle().StructToArray - var yyq802 [1]bool - _, _, _ = yysep802, yyq802, yy2arr802 - const yyr802 bool = false - yyq802[0] = x.HTTP != nil - var yynn802 int - if yyr802 || yy2arr802 { - r.EncodeArrayStart(1) - } else { - yynn802 = 0 - for _, b := range yyq802 { - if b { - yynn802++ - } - } - r.EncodeMapStart(yynn802) - yynn802 = 0 - } - if yyr802 || yy2arr802 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq802[0] { - if x.HTTP == nil { - r.EncodeNil() - } else { - x.HTTP.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq802[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("http")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HTTP == nil { - r.EncodeNil() - } else { - x.HTTP.CodecEncodeSelf(e) - } - } - } - if yyr802 || yy2arr802 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym804 := z.DecBinary() - _ = yym804 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct805 := r.ContainerType() - if yyct805 == codecSelferValueTypeMap1234 { - yyl805 := r.ReadMapStart() - if yyl805 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl805, d) - } - } else if yyct805 == codecSelferValueTypeArray1234 { - yyl805 := r.ReadArrayStart() - if yyl805 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl805, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys806Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys806Slc - var yyhl806 bool = l >= 0 - for yyj806 := 0; ; yyj806++ { - if yyhl806 { - if yyj806 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys806Slc = r.DecodeBytes(yys806Slc, true, true) - yys806 := string(yys806Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys806 { - case "http": - if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil - } - } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) - } - x.HTTP.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys806) - } // end switch yys806 - } // end for yyj806 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj808 int - var yyb808 bool - var yyhl808 bool = l >= 0 - yyj808++ - if yyhl808 { - yyb808 = yyj808 > l - } else { - yyb808 = r.CheckBreak() - } - if yyb808 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil - } - } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) - } - x.HTTP.CodecDecodeSelf(d) - } - for { - yyj808++ - if yyhl808 { - yyb808 = yyj808 > l - } else { - yyb808 = r.CheckBreak() - } - if yyb808 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj808-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HTTPIngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym810 := z.EncBinary() - _ = yym810 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep811 := !z.EncBinary() - yy2arr811 := z.EncBasicHandle().StructToArray - var yyq811 [1]bool - _, _, _ = yysep811, yyq811, yy2arr811 - const yyr811 bool = false - var yynn811 int - if yyr811 || yy2arr811 { - r.EncodeArrayStart(1) - } else { - yynn811 = 1 - for _, b := range yyq811 { - if b { - yynn811++ - } - } - r.EncodeMapStart(yynn811) - yynn811 = 0 - } - if yyr811 || yy2arr811 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Paths == nil { - r.EncodeNil() - } else { - yym813 := z.EncBinary() - _ = yym813 - if false { - } else { - h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("paths")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Paths == nil { - r.EncodeNil() - } else { - yym814 := z.EncBinary() - _ = yym814 - if false { - } else { - h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e) - } - } - } - if yyr811 || yy2arr811 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HTTPIngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym815 := z.DecBinary() - _ = yym815 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct816 := r.ContainerType() - if yyct816 == codecSelferValueTypeMap1234 { - yyl816 := r.ReadMapStart() - if yyl816 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl816, d) - } - } else if yyct816 == codecSelferValueTypeArray1234 { - yyl816 := r.ReadArrayStart() - if yyl816 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl816, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HTTPIngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys817Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys817Slc - var yyhl817 bool = l >= 0 - for yyj817 := 0; ; yyj817++ { - if yyhl817 { - if yyj817 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys817Slc = r.DecodeBytes(yys817Slc, true, true) - yys817 := string(yys817Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys817 { - case "paths": - if r.TryDecodeAsNil() { - x.Paths = nil - } else { - yyv818 := &x.Paths - yym819 := z.DecBinary() - _ = yym819 - if false { - } else { - h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv818), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys817) - } // end switch yys817 - } // end for yyj817 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HTTPIngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj820 int - var yyb820 bool - var yyhl820 bool = l >= 0 - yyj820++ - if yyhl820 { - yyb820 = yyj820 > l - } else { - yyb820 = r.CheckBreak() - } - if yyb820 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Paths = nil - } else { - yyv821 := &x.Paths - yym822 := z.DecBinary() - _ = yym822 - if false { - } else { - h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv821), d) - } - } - for { - yyj820++ - if yyhl820 { - yyb820 = yyj820 > l - } else { - yyb820 = r.CheckBreak() - } - if yyb820 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj820-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HTTPIngressPath) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym823 := z.EncBinary() - _ = yym823 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep824 := !z.EncBinary() - yy2arr824 := z.EncBasicHandle().StructToArray - var yyq824 [2]bool - _, _, _ = yysep824, yyq824, yy2arr824 - const yyr824 bool = false - yyq824[0] = x.Path != "" - var yynn824 int - if yyr824 || yy2arr824 { - r.EncodeArrayStart(2) - } else { - yynn824 = 1 - for _, b := range yyq824 { - if b { - yynn824++ - } - } - r.EncodeMapStart(yynn824) - yynn824 = 0 - } - if yyr824 || yy2arr824 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq824[0] { - yym826 := z.EncBinary() - _ = yym826 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq824[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym827 := z.EncBinary() - _ = yym827 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr824 || yy2arr824 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy829 := &x.Backend - yy829.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("backend")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy830 := &x.Backend - yy830.CodecEncodeSelf(e) - } - if yyr824 || yy2arr824 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HTTPIngressPath) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym831 := z.DecBinary() - _ = yym831 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct832 := r.ContainerType() - if yyct832 == codecSelferValueTypeMap1234 { - yyl832 := r.ReadMapStart() - if yyl832 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl832, d) - } - } else if yyct832 == codecSelferValueTypeArray1234 { - yyl832 := r.ReadArrayStart() - if yyl832 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl832, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HTTPIngressPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys833Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys833Slc - var yyhl833 bool = l >= 0 - for yyj833 := 0; ; yyj833++ { - if yyhl833 { - if yyj833 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys833Slc = r.DecodeBytes(yys833Slc, true, true) - yys833 := string(yys833Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys833 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "backend": - if r.TryDecodeAsNil() { - x.Backend = IngressBackend{} - } else { - yyv835 := &x.Backend - yyv835.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys833) - } // end switch yys833 - } // end for yyj833 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HTTPIngressPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj836 int - var yyb836 bool - var yyhl836 bool = l >= 0 - yyj836++ - if yyhl836 { - yyb836 = yyj836 > l - } else { - yyb836 = r.CheckBreak() - } - if yyb836 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj836++ - if yyhl836 { - yyb836 = yyj836 > l - } else { - yyb836 = r.CheckBreak() - } - if yyb836 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Backend = IngressBackend{} - } else { - yyv838 := &x.Backend - yyv838.CodecDecodeSelf(d) - } - for { - yyj836++ - if yyhl836 { - yyb836 = yyj836 > l - } else { - yyb836 = r.CheckBreak() - } - if yyb836 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj836-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressBackend) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym839 := z.EncBinary() - _ = yym839 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep840 := !z.EncBinary() - yy2arr840 := z.EncBasicHandle().StructToArray - var yyq840 [2]bool - _, _, _ = yysep840, yyq840, yy2arr840 - const yyr840 bool = false - var yynn840 int - if yyr840 || yy2arr840 { - r.EncodeArrayStart(2) - } else { - yynn840 = 2 - for _, b := range yyq840 { - if b { - yynn840++ - } - } - r.EncodeMapStart(yynn840) - yynn840 = 0 - } - if yyr840 || yy2arr840 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym842 := z.EncBinary() - _ = yym842 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym843 := z.EncBinary() - _ = yym843 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } - } - if yyr840 || yy2arr840 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy845 := &x.ServicePort - yym846 := z.EncBinary() - _ = yym846 - if false { - } else if z.HasExtensions() && z.EncExt(yy845) { - } else if !yym846 && z.IsJSONHandle() { - z.EncJSONMarshal(yy845) - } else { - z.EncFallback(yy845) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("servicePort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy847 := &x.ServicePort - yym848 := z.EncBinary() - _ = yym848 - if false { - } else if z.HasExtensions() && z.EncExt(yy847) { - } else if !yym848 && z.IsJSONHandle() { - z.EncJSONMarshal(yy847) - } else { - z.EncFallback(yy847) - } - } - if yyr840 || yy2arr840 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressBackend) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym849 := z.DecBinary() - _ = yym849 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct850 := r.ContainerType() - if yyct850 == codecSelferValueTypeMap1234 { - yyl850 := r.ReadMapStart() - if yyl850 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl850, d) - } - } else if yyct850 == codecSelferValueTypeArray1234 { - yyl850 := r.ReadArrayStart() - if yyl850 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl850, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressBackend) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys851Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys851Slc - var yyhl851 bool = l >= 0 - for yyj851 := 0; ; yyj851++ { - if yyhl851 { - if yyj851 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys851Slc = r.DecodeBytes(yys851Slc, true, true) - yys851 := string(yys851Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys851 { - case "serviceName": - if r.TryDecodeAsNil() { - x.ServiceName = "" - } else { - x.ServiceName = string(r.DecodeString()) - } - case "servicePort": - if r.TryDecodeAsNil() { - x.ServicePort = pkg5_intstr.IntOrString{} - } else { - yyv853 := &x.ServicePort - yym854 := z.DecBinary() - _ = yym854 - if false { - } else if z.HasExtensions() && z.DecExt(yyv853) { - } else if !yym854 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv853) - } else { - z.DecFallback(yyv853, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys851) - } // end switch yys851 - } // end for yyj851 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressBackend) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj855 int - var yyb855 bool - var yyhl855 bool = l >= 0 - yyj855++ - if yyhl855 { - yyb855 = yyj855 > l - } else { - yyb855 = r.CheckBreak() - } - if yyb855 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServiceName = "" - } else { - x.ServiceName = string(r.DecodeString()) - } - yyj855++ - if yyhl855 { - yyb855 = yyj855 > l - } else { - yyb855 = r.CheckBreak() - } - if yyb855 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServicePort = pkg5_intstr.IntOrString{} - } else { - yyv857 := &x.ServicePort - yym858 := z.DecBinary() - _ = yym858 - if false { - } else if z.HasExtensions() && z.DecExt(yyv857) { - } else if !yym858 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv857) - } else { - z.DecFallback(yyv857, false) - } - } - for { - yyj855++ - if yyhl855 { - yyb855 = yyj855 > l - } else { - yyb855 = r.CheckBreak() - } - if yyb855 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj855-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicaSet) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym859 := z.EncBinary() - _ = yym859 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep860 := !z.EncBinary() - yy2arr860 := z.EncBasicHandle().StructToArray - var yyq860 [5]bool - _, _, _ = yysep860, yyq860, yy2arr860 - const yyr860 bool = false - yyq860[0] = x.Kind != "" - yyq860[1] = x.APIVersion != "" - yyq860[2] = true - yyq860[3] = true - yyq860[4] = true - var yynn860 int - if yyr860 || yy2arr860 { - r.EncodeArrayStart(5) - } else { - yynn860 = 0 - for _, b := range yyq860 { - if b { - yynn860++ - } - } - r.EncodeMapStart(yynn860) - yynn860 = 0 - } - if yyr860 || yy2arr860 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq860[0] { - yym862 := z.EncBinary() - _ = yym862 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq860[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym863 := z.EncBinary() - _ = yym863 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr860 || yy2arr860 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq860[1] { - yym865 := z.EncBinary() - _ = yym865 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq860[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym866 := z.EncBinary() - _ = yym866 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr860 || yy2arr860 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq860[2] { - yy868 := &x.ObjectMeta - yy868.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq860[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy869 := &x.ObjectMeta - yy869.CodecEncodeSelf(e) - } - } - if yyr860 || yy2arr860 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq860[3] { - yy871 := &x.Spec - yy871.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq860[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy872 := &x.Spec - yy872.CodecEncodeSelf(e) - } - } - if yyr860 || yy2arr860 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq860[4] { - yy874 := &x.Status - yy874.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq860[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy875 := &x.Status - yy875.CodecEncodeSelf(e) - } - } - if yyr860 || yy2arr860 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicaSet) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym876 := z.DecBinary() - _ = yym876 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct877 := r.ContainerType() - if yyct877 == codecSelferValueTypeMap1234 { - yyl877 := r.ReadMapStart() - if yyl877 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl877, d) - } - } else if yyct877 == codecSelferValueTypeArray1234 { - yyl877 := r.ReadArrayStart() - if yyl877 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl877, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicaSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys878Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys878Slc - var yyhl878 bool = l >= 0 - for yyj878 := 0; ; yyj878++ { - if yyhl878 { - if yyj878 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys878Slc = r.DecodeBytes(yys878Slc, true, true) - yys878 := string(yys878Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys878 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv881 := &x.ObjectMeta - yyv881.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ReplicaSetSpec{} - } else { - yyv882 := &x.Spec - yyv882.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ReplicaSetStatus{} - } else { - yyv883 := &x.Status - yyv883.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys878) - } // end switch yys878 - } // end for yyj878 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicaSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj884 int - var yyb884 bool - var yyhl884 bool = l >= 0 - yyj884++ - if yyhl884 { - yyb884 = yyj884 > l - } else { - yyb884 = r.CheckBreak() - } - if yyb884 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj884++ - if yyhl884 { - yyb884 = yyj884 > l - } else { - yyb884 = r.CheckBreak() - } - if yyb884 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj884++ - if yyhl884 { - yyb884 = yyj884 > l - } else { - yyb884 = r.CheckBreak() - } - if yyb884 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv887 := &x.ObjectMeta - yyv887.CodecDecodeSelf(d) - } - yyj884++ - if yyhl884 { - yyb884 = yyj884 > l - } else { - yyb884 = r.CheckBreak() - } - if yyb884 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ReplicaSetSpec{} - } else { - yyv888 := &x.Spec - yyv888.CodecDecodeSelf(d) - } - yyj884++ - if yyhl884 { - yyb884 = yyj884 > l - } else { - yyb884 = r.CheckBreak() - } - if yyb884 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ReplicaSetStatus{} - } else { - yyv889 := &x.Status - yyv889.CodecDecodeSelf(d) - } - for { - yyj884++ - if yyhl884 { - yyb884 = yyj884 > l - } else { - yyb884 = r.CheckBreak() - } - if yyb884 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj884-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicaSetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym890 := z.EncBinary() - _ = yym890 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep891 := !z.EncBinary() - yy2arr891 := z.EncBasicHandle().StructToArray - var yyq891 [4]bool - _, _, _ = yysep891, yyq891, yy2arr891 - const yyr891 bool = false - yyq891[0] = x.Kind != "" - yyq891[1] = x.APIVersion != "" - yyq891[2] = true - var yynn891 int - if yyr891 || yy2arr891 { - r.EncodeArrayStart(4) - } else { - yynn891 = 1 - for _, b := range yyq891 { - if b { - yynn891++ - } - } - r.EncodeMapStart(yynn891) - yynn891 = 0 - } - if yyr891 || yy2arr891 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq891[0] { - yym893 := z.EncBinary() - _ = yym893 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq891[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym894 := z.EncBinary() - _ = yym894 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr891 || yy2arr891 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq891[1] { - yym896 := z.EncBinary() - _ = yym896 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq891[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym897 := z.EncBinary() - _ = yym897 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr891 || yy2arr891 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq891[2] { - yy899 := &x.ListMeta - yym900 := z.EncBinary() - _ = yym900 - if false { - } else if z.HasExtensions() && z.EncExt(yy899) { - } else { - z.EncFallback(yy899) - } - } else { - r.EncodeNil() - } - } else { - if yyq891[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy901 := &x.ListMeta - yym902 := z.EncBinary() - _ = yym902 - if false { - } else if z.HasExtensions() && z.EncExt(yy901) { - } else { - z.EncFallback(yy901) - } - } - } - if yyr891 || yy2arr891 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym904 := z.EncBinary() - _ = yym904 - if false { - } else { - h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym905 := z.EncBinary() - _ = yym905 - if false { - } else { - h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e) - } - } - } - if yyr891 || yy2arr891 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicaSetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym906 := z.DecBinary() - _ = yym906 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct907 := r.ContainerType() - if yyct907 == codecSelferValueTypeMap1234 { - yyl907 := r.ReadMapStart() - if yyl907 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl907, d) - } - } else if yyct907 == codecSelferValueTypeArray1234 { - yyl907 := r.ReadArrayStart() - if yyl907 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl907, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicaSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys908Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys908Slc - var yyhl908 bool = l >= 0 - for yyj908 := 0; ; yyj908++ { - if yyhl908 { - if yyj908 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys908Slc = r.DecodeBytes(yys908Slc, true, true) - yys908 := string(yys908Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys908 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv911 := &x.ListMeta - yym912 := z.DecBinary() - _ = yym912 - if false { - } else if z.HasExtensions() && z.DecExt(yyv911) { - } else { - z.DecFallback(yyv911, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv913 := &x.Items - yym914 := z.DecBinary() - _ = yym914 - if false { - } else { - h.decSliceReplicaSet((*[]ReplicaSet)(yyv913), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys908) - } // end switch yys908 - } // end for yyj908 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicaSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj915 int - var yyb915 bool - var yyhl915 bool = l >= 0 - yyj915++ - if yyhl915 { - yyb915 = yyj915 > l - } else { - yyb915 = r.CheckBreak() - } - if yyb915 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj915++ - if yyhl915 { - yyb915 = yyj915 > l - } else { - yyb915 = r.CheckBreak() - } - if yyb915 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj915++ - if yyhl915 { - yyb915 = yyj915 > l - } else { - yyb915 = r.CheckBreak() - } - if yyb915 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv918 := &x.ListMeta - yym919 := z.DecBinary() - _ = yym919 - if false { - } else if z.HasExtensions() && z.DecExt(yyv918) { - } else { - z.DecFallback(yyv918, false) - } - } - yyj915++ - if yyhl915 { - yyb915 = yyj915 > l - } else { - yyb915 = r.CheckBreak() - } - if yyb915 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv920 := &x.Items - yym921 := z.DecBinary() - _ = yym921 - if false { - } else { - h.decSliceReplicaSet((*[]ReplicaSet)(yyv920), d) - } - } - for { - yyj915++ - if yyhl915 { - yyb915 = yyj915 > l - } else { - yyb915 = r.CheckBreak() - } - if yyb915 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj915-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicaSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym922 := z.EncBinary() - _ = yym922 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep923 := !z.EncBinary() - yy2arr923 := z.EncBasicHandle().StructToArray - var yyq923 [4]bool - _, _, _ = yysep923, yyq923, yy2arr923 - const yyr923 bool = false - yyq923[1] = x.MinReadySeconds != 0 - yyq923[2] = x.Selector != nil - yyq923[3] = true - var yynn923 int - if yyr923 || yy2arr923 { - r.EncodeArrayStart(4) - } else { - yynn923 = 1 - for _, b := range yyq923 { - if b { - yynn923++ - } - } - r.EncodeMapStart(yynn923) - yynn923 = 0 - } - if yyr923 || yy2arr923 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym925 := z.EncBinary() - _ = yym925 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym926 := z.EncBinary() - _ = yym926 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr923 || yy2arr923 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq923[1] { - yym928 := z.EncBinary() - _ = yym928 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq923[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym929 := z.EncBinary() - _ = yym929 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } - } - if yyr923 || yy2arr923 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq923[2] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym931 := z.EncBinary() - _ = yym931 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq923[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym932 := z.EncBinary() - _ = yym932 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr923 || yy2arr923 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq923[3] { - yy934 := &x.Template - yy934.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq923[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy935 := &x.Template - yy935.CodecEncodeSelf(e) - } - } - if yyr923 || yy2arr923 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicaSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym936 := z.DecBinary() - _ = yym936 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct937 := r.ContainerType() - if yyct937 == codecSelferValueTypeMap1234 { - yyl937 := r.ReadMapStart() - if yyl937 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl937, d) - } - } else if yyct937 == codecSelferValueTypeArray1234 { - yyl937 := r.ReadArrayStart() - if yyl937 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl937, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicaSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys938Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys938Slc - var yyhl938 bool = l >= 0 - for yyj938 := 0; ; yyj938++ { - if yyhl938 { - if yyj938 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys938Slc = r.DecodeBytes(yys938Slc, true, true) - yys938 := string(yys938Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys938 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "minReadySeconds": - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - x.MinReadySeconds = int32(r.DecodeInt(32)) - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym942 := z.DecBinary() - _ = yym942 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv943 := &x.Template - yyv943.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys938) - } // end switch yys938 - } // end for yyj938 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicaSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj944 int - var yyb944 bool - var yyhl944 bool = l >= 0 - yyj944++ - if yyhl944 { - yyb944 = yyj944 > l - } else { - yyb944 = r.CheckBreak() - } - if yyb944 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj944++ - if yyhl944 { - yyb944 = yyj944 > l - } else { - yyb944 = r.CheckBreak() - } - if yyb944 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - x.MinReadySeconds = int32(r.DecodeInt(32)) - } - yyj944++ - if yyhl944 { - yyb944 = yyj944 > l - } else { - yyb944 = r.CheckBreak() - } - if yyb944 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym948 := z.DecBinary() - _ = yym948 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj944++ - if yyhl944 { - yyb944 = yyj944 > l - } else { - yyb944 = r.CheckBreak() - } - if yyb944 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv949 := &x.Template - yyv949.CodecDecodeSelf(d) - } - for { - yyj944++ - if yyhl944 { - yyb944 = yyj944 > l - } else { - yyb944 = r.CheckBreak() - } - if yyb944 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj944-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym950 := z.EncBinary() - _ = yym950 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep951 := !z.EncBinary() - yy2arr951 := z.EncBasicHandle().StructToArray - var yyq951 [6]bool - _, _, _ = yysep951, yyq951, yy2arr951 - const yyr951 bool = false - yyq951[1] = x.FullyLabeledReplicas != 0 - yyq951[2] = x.ReadyReplicas != 0 - yyq951[3] = x.AvailableReplicas != 0 - yyq951[4] = x.ObservedGeneration != 0 - yyq951[5] = len(x.Conditions) != 0 - var yynn951 int - if yyr951 || yy2arr951 { - r.EncodeArrayStart(6) - } else { - yynn951 = 1 - for _, b := range yyq951 { - if b { - yynn951++ - } - } - r.EncodeMapStart(yynn951) - yynn951 = 0 - } - if yyr951 || yy2arr951 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym953 := z.EncBinary() - _ = yym953 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym954 := z.EncBinary() - _ = yym954 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr951 || yy2arr951 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq951[1] { - yym956 := z.EncBinary() - _ = yym956 - if false { - } else { - r.EncodeInt(int64(x.FullyLabeledReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq951[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym957 := z.EncBinary() - _ = yym957 - if false { - } else { - r.EncodeInt(int64(x.FullyLabeledReplicas)) - } - } - } - if yyr951 || yy2arr951 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq951[2] { - yym959 := z.EncBinary() - _ = yym959 - if false { - } else { - r.EncodeInt(int64(x.ReadyReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq951[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readyReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym960 := z.EncBinary() - _ = yym960 - if false { - } else { - r.EncodeInt(int64(x.ReadyReplicas)) - } - } - } - if yyr951 || yy2arr951 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq951[3] { - yym962 := z.EncBinary() - _ = yym962 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq951[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym963 := z.EncBinary() - _ = yym963 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } - } - if yyr951 || yy2arr951 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq951[4] { - yym965 := z.EncBinary() - _ = yym965 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq951[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym966 := z.EncBinary() - _ = yym966 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } - } - if yyr951 || yy2arr951 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq951[5] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym968 := z.EncBinary() - _ = yym968 - if false { - } else { - h.encSliceReplicaSetCondition(([]ReplicaSetCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq951[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym969 := z.EncBinary() - _ = yym969 - if false { - } else { - h.encSliceReplicaSetCondition(([]ReplicaSetCondition)(x.Conditions), e) - } - } - } - } - if yyr951 || yy2arr951 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicaSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym970 := z.DecBinary() - _ = yym970 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct971 := r.ContainerType() - if yyct971 == codecSelferValueTypeMap1234 { - yyl971 := r.ReadMapStart() - if yyl971 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl971, d) - } - } else if yyct971 == codecSelferValueTypeArray1234 { - yyl971 := r.ReadArrayStart() - if yyl971 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl971, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicaSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys972Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys972Slc - var yyhl972 bool = l >= 0 - for yyj972 := 0; ; yyj972++ { - if yyhl972 { - if yyj972 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys972Slc = r.DecodeBytes(yys972Slc, true, true) - yys972 := string(yys972Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys972 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "fullyLabeledReplicas": - if r.TryDecodeAsNil() { - x.FullyLabeledReplicas = 0 - } else { - x.FullyLabeledReplicas = int32(r.DecodeInt(32)) - } - case "readyReplicas": - if r.TryDecodeAsNil() { - x.ReadyReplicas = 0 - } else { - x.ReadyReplicas = int32(r.DecodeInt(32)) - } - case "availableReplicas": - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - x.AvailableReplicas = int32(r.DecodeInt(32)) - } - case "observedGeneration": - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv978 := &x.Conditions - yym979 := z.DecBinary() - _ = yym979 - if false { - } else { - h.decSliceReplicaSetCondition((*[]ReplicaSetCondition)(yyv978), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys972) - } // end switch yys972 - } // end for yyj972 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj980 int - var yyb980 bool - var yyhl980 bool = l >= 0 - yyj980++ - if yyhl980 { - yyb980 = yyj980 > l - } else { - yyb980 = r.CheckBreak() - } - if yyb980 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj980++ - if yyhl980 { - yyb980 = yyj980 > l - } else { - yyb980 = r.CheckBreak() - } - if yyb980 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FullyLabeledReplicas = 0 - } else { - x.FullyLabeledReplicas = int32(r.DecodeInt(32)) - } - yyj980++ - if yyhl980 { - yyb980 = yyj980 > l - } else { - yyb980 = r.CheckBreak() - } - if yyb980 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadyReplicas = 0 - } else { - x.ReadyReplicas = int32(r.DecodeInt(32)) - } - yyj980++ - if yyhl980 { - yyb980 = yyj980 > l - } else { - yyb980 = r.CheckBreak() - } - if yyb980 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - x.AvailableReplicas = int32(r.DecodeInt(32)) - } - yyj980++ - if yyhl980 { - yyb980 = yyj980 > l - } else { - yyb980 = r.CheckBreak() - } - if yyb980 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - yyj980++ - if yyhl980 { - yyb980 = yyj980 > l - } else { - yyb980 = r.CheckBreak() - } - if yyb980 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv986 := &x.Conditions - yym987 := z.DecBinary() - _ = yym987 - if false { - } else { - h.decSliceReplicaSetCondition((*[]ReplicaSetCondition)(yyv986), d) - } - } - for { - yyj980++ - if yyhl980 { - yyb980 = yyj980 > l - } else { - yyb980 = r.CheckBreak() - } - if yyb980 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj980-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ReplicaSetConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym988 := z.EncBinary() - _ = yym988 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ReplicaSetConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym989 := z.DecBinary() - _ = yym989 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ReplicaSetCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym990 := z.EncBinary() - _ = yym990 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep991 := !z.EncBinary() - yy2arr991 := z.EncBasicHandle().StructToArray - var yyq991 [5]bool - _, _, _ = yysep991, yyq991, yy2arr991 - const yyr991 bool = false - yyq991[2] = true - yyq991[3] = x.Reason != "" - yyq991[4] = x.Message != "" - var yynn991 int - if yyr991 || yy2arr991 { - r.EncodeArrayStart(5) - } else { - yynn991 = 2 - for _, b := range yyq991 { - if b { - yynn991++ - } - } - r.EncodeMapStart(yynn991) - yynn991 = 0 - } - if yyr991 || yy2arr991 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr991 || yy2arr991 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym994 := z.EncBinary() - _ = yym994 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym995 := z.EncBinary() - _ = yym995 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } - } - if yyr991 || yy2arr991 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq991[2] { - yy997 := &x.LastTransitionTime - yym998 := z.EncBinary() - _ = yym998 - if false { - } else if z.HasExtensions() && z.EncExt(yy997) { - } else if yym998 { - z.EncBinaryMarshal(yy997) - } else if !yym998 && z.IsJSONHandle() { - z.EncJSONMarshal(yy997) - } else { - z.EncFallback(yy997) - } - } else { - r.EncodeNil() - } - } else { - if yyq991[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy999 := &x.LastTransitionTime - yym1000 := z.EncBinary() - _ = yym1000 - if false { - } else if z.HasExtensions() && z.EncExt(yy999) { - } else if yym1000 { - z.EncBinaryMarshal(yy999) - } else if !yym1000 && z.IsJSONHandle() { - z.EncJSONMarshal(yy999) - } else { - z.EncFallback(yy999) - } - } - } - if yyr991 || yy2arr991 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq991[3] { - yym1002 := z.EncBinary() - _ = yym1002 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq991[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1003 := z.EncBinary() - _ = yym1003 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr991 || yy2arr991 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq991[4] { - yym1005 := z.EncBinary() - _ = yym1005 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq991[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1006 := z.EncBinary() - _ = yym1006 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr991 || yy2arr991 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicaSetCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1007 := z.DecBinary() - _ = yym1007 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1008 := r.ContainerType() - if yyct1008 == codecSelferValueTypeMap1234 { - yyl1008 := r.ReadMapStart() - if yyl1008 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1008, d) - } - } else if yyct1008 == codecSelferValueTypeArray1234 { - yyl1008 := r.ReadArrayStart() - if yyl1008 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1008, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicaSetCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1009Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1009Slc - var yyhl1009 bool = l >= 0 - for yyj1009 := 0; ; yyj1009++ { - if yyhl1009 { - if yyj1009 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1009Slc = r.DecodeBytes(yys1009Slc, true, true) - yys1009 := string(yys1009Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1009 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ReplicaSetConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_api.ConditionStatus(r.DecodeString()) - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} - } else { - yyv1012 := &x.LastTransitionTime - yym1013 := z.DecBinary() - _ = yym1013 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1012) { - } else if yym1013 { - z.DecBinaryUnmarshal(yyv1012) - } else if !yym1013 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1012) - } else { - z.DecFallback(yyv1012, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys1009) - } // end switch yys1009 - } // end for yyj1009 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicaSetCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1016 int - var yyb1016 bool - var yyhl1016 bool = l >= 0 - yyj1016++ - if yyhl1016 { - yyb1016 = yyj1016 > l - } else { - yyb1016 = r.CheckBreak() - } - if yyb1016 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ReplicaSetConditionType(r.DecodeString()) - } - yyj1016++ - if yyhl1016 { - yyb1016 = yyj1016 > l - } else { - yyb1016 = r.CheckBreak() - } - if yyb1016 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_api.ConditionStatus(r.DecodeString()) - } - yyj1016++ - if yyhl1016 { - yyb1016 = yyj1016 > l - } else { - yyb1016 = r.CheckBreak() - } - if yyb1016 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} - } else { - yyv1019 := &x.LastTransitionTime - yym1020 := z.DecBinary() - _ = yym1020 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1019) { - } else if yym1020 { - z.DecBinaryUnmarshal(yyv1019) - } else if !yym1020 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1019) - } else { - z.DecFallback(yyv1019, false) - } - } - yyj1016++ - if yyhl1016 { - yyb1016 = yyj1016 > l - } else { - yyb1016 = r.CheckBreak() - } - if yyb1016 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj1016++ - if yyhl1016 { - yyb1016 = yyj1016 > l - } else { - yyb1016 = r.CheckBreak() - } - if yyb1016 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj1016++ - if yyhl1016 { - yyb1016 = yyj1016 > l - } else { - yyb1016 = r.CheckBreak() - } - if yyb1016 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1016-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1023 := z.EncBinary() - _ = yym1023 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1024 := !z.EncBinary() - yy2arr1024 := z.EncBasicHandle().StructToArray - var yyq1024 [4]bool - _, _, _ = yysep1024, yyq1024, yy2arr1024 - const yyr1024 bool = false - yyq1024[0] = x.Kind != "" - yyq1024[1] = x.APIVersion != "" - yyq1024[2] = true - yyq1024[3] = true - var yynn1024 int - if yyr1024 || yy2arr1024 { - r.EncodeArrayStart(4) - } else { - yynn1024 = 0 - for _, b := range yyq1024 { - if b { - yynn1024++ - } - } - r.EncodeMapStart(yynn1024) - yynn1024 = 0 - } - if yyr1024 || yy2arr1024 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1024[0] { - yym1026 := z.EncBinary() - _ = yym1026 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1024[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1027 := z.EncBinary() - _ = yym1027 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr1024 || yy2arr1024 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1024[1] { - yym1029 := z.EncBinary() - _ = yym1029 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1024[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1030 := z.EncBinary() - _ = yym1030 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr1024 || yy2arr1024 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1024[2] { - yy1032 := &x.ObjectMeta - yy1032.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq1024[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1033 := &x.ObjectMeta - yy1033.CodecEncodeSelf(e) - } - } - if yyr1024 || yy2arr1024 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1024[3] { - yy1035 := &x.Spec - yy1035.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq1024[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1036 := &x.Spec - yy1036.CodecEncodeSelf(e) - } - } - if yyr1024 || yy2arr1024 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSecurityPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1037 := z.DecBinary() - _ = yym1037 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1038 := r.ContainerType() - if yyct1038 == codecSelferValueTypeMap1234 { - yyl1038 := r.ReadMapStart() - if yyl1038 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1038, d) - } - } else if yyct1038 == codecSelferValueTypeArray1234 { - yyl1038 := r.ReadArrayStart() - if yyl1038 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1038, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSecurityPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1039Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1039Slc - var yyhl1039 bool = l >= 0 - for yyj1039 := 0; ; yyj1039++ { - if yyhl1039 { - if yyj1039 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1039Slc = r.DecodeBytes(yys1039Slc, true, true) - yys1039 := string(yys1039Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1039 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv1042 := &x.ObjectMeta - yyv1042.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PodSecurityPolicySpec{} - } else { - yyv1043 := &x.Spec - yyv1043.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys1039) - } // end switch yys1039 - } // end for yyj1039 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSecurityPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1044 int - var yyb1044 bool - var yyhl1044 bool = l >= 0 - yyj1044++ - if yyhl1044 { - yyb1044 = yyj1044 > l - } else { - yyb1044 = r.CheckBreak() - } - if yyb1044 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj1044++ - if yyhl1044 { - yyb1044 = yyj1044 > l - } else { - yyb1044 = r.CheckBreak() - } - if yyb1044 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj1044++ - if yyhl1044 { - yyb1044 = yyj1044 > l - } else { - yyb1044 = r.CheckBreak() - } - if yyb1044 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv1047 := &x.ObjectMeta - yyv1047.CodecDecodeSelf(d) - } - yyj1044++ - if yyhl1044 { - yyb1044 = yyj1044 > l - } else { - yyb1044 = r.CheckBreak() - } - if yyb1044 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PodSecurityPolicySpec{} - } else { - yyv1048 := &x.Spec - yyv1048.CodecDecodeSelf(d) - } - for { - yyj1044++ - if yyhl1044 { - yyb1044 = yyj1044 > l - } else { - yyb1044 = r.CheckBreak() - } - if yyb1044 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1044-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1049 := z.EncBinary() - _ = yym1049 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1050 := !z.EncBinary() - yy2arr1050 := z.EncBasicHandle().StructToArray - var yyq1050 [14]bool - _, _, _ = yysep1050, yyq1050, yy2arr1050 - const yyr1050 bool = false - yyq1050[0] = x.Privileged != false - yyq1050[1] = len(x.DefaultAddCapabilities) != 0 - yyq1050[2] = len(x.RequiredDropCapabilities) != 0 - yyq1050[3] = len(x.AllowedCapabilities) != 0 - yyq1050[4] = len(x.Volumes) != 0 - yyq1050[5] = x.HostNetwork != false - yyq1050[6] = len(x.HostPorts) != 0 - yyq1050[7] = x.HostPID != false - yyq1050[8] = x.HostIPC != false - yyq1050[13] = x.ReadOnlyRootFilesystem != false - var yynn1050 int - if yyr1050 || yy2arr1050 { - r.EncodeArrayStart(14) - } else { - yynn1050 = 4 - for _, b := range yyq1050 { - if b { - yynn1050++ - } - } - r.EncodeMapStart(yynn1050) - yynn1050 = 0 - } - if yyr1050 || yy2arr1050 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1050[0] { - yym1052 := z.EncBinary() - _ = yym1052 - if false { - } else { - r.EncodeBool(bool(x.Privileged)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq1050[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("privileged")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1053 := z.EncBinary() - _ = yym1053 - if false { - } else { - r.EncodeBool(bool(x.Privileged)) - } - } - } - if yyr1050 || yy2arr1050 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1050[1] { - if x.DefaultAddCapabilities == nil { - r.EncodeNil() - } else { - yym1055 := z.EncBinary() - _ = yym1055 - if false { - } else { - h.encSliceapi_Capability(([]pkg2_api.Capability)(x.DefaultAddCapabilities), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1050[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("defaultAddCapabilities")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DefaultAddCapabilities == nil { - r.EncodeNil() - } else { - yym1056 := z.EncBinary() - _ = yym1056 - if false { - } else { - h.encSliceapi_Capability(([]pkg2_api.Capability)(x.DefaultAddCapabilities), e) - } - } - } - } - if yyr1050 || yy2arr1050 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1050[2] { - if x.RequiredDropCapabilities == nil { - r.EncodeNil() - } else { - yym1058 := z.EncBinary() - _ = yym1058 - if false { - } else { - h.encSliceapi_Capability(([]pkg2_api.Capability)(x.RequiredDropCapabilities), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1050[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requiredDropCapabilities")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RequiredDropCapabilities == nil { - r.EncodeNil() - } else { - yym1059 := z.EncBinary() - _ = yym1059 - if false { - } else { - h.encSliceapi_Capability(([]pkg2_api.Capability)(x.RequiredDropCapabilities), e) - } - } - } - } - if yyr1050 || yy2arr1050 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1050[3] { - if x.AllowedCapabilities == nil { - r.EncodeNil() - } else { - yym1061 := z.EncBinary() - _ = yym1061 - if false { - } else { - h.encSliceapi_Capability(([]pkg2_api.Capability)(x.AllowedCapabilities), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1050[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("allowedCapabilities")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AllowedCapabilities == nil { - r.EncodeNil() - } else { - yym1062 := z.EncBinary() - _ = yym1062 - if false { - } else { - h.encSliceapi_Capability(([]pkg2_api.Capability)(x.AllowedCapabilities), e) - } - } - } - } - if yyr1050 || yy2arr1050 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1050[4] { - if x.Volumes == nil { - r.EncodeNil() - } else { - yym1064 := z.EncBinary() - _ = yym1064 - if false { - } else { - h.encSliceFSType(([]FSType)(x.Volumes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1050[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Volumes == nil { - r.EncodeNil() - } else { - yym1065 := z.EncBinary() - _ = yym1065 - if false { - } else { - h.encSliceFSType(([]FSType)(x.Volumes), e) - } - } - } - } - if yyr1050 || yy2arr1050 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1050[5] { - yym1067 := z.EncBinary() - _ = yym1067 - if false { - } else { - r.EncodeBool(bool(x.HostNetwork)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq1050[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1068 := z.EncBinary() - _ = yym1068 - if false { - } else { - r.EncodeBool(bool(x.HostNetwork)) - } - } - } - if yyr1050 || yy2arr1050 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1050[6] { - if x.HostPorts == nil { - r.EncodeNil() - } else { - yym1070 := z.EncBinary() - _ = yym1070 - if false { - } else { - h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1050[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPorts")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HostPorts == nil { - r.EncodeNil() - } else { - yym1071 := z.EncBinary() - _ = yym1071 - if false { - } else { - h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) - } - } - } - } - if yyr1050 || yy2arr1050 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1050[7] { - yym1073 := z.EncBinary() - _ = yym1073 - if false { - } else { - r.EncodeBool(bool(x.HostPID)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq1050[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1074 := z.EncBinary() - _ = yym1074 - if false { - } else { - r.EncodeBool(bool(x.HostPID)) - } - } - } - if yyr1050 || yy2arr1050 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1050[8] { - yym1076 := z.EncBinary() - _ = yym1076 - if false { - } else { - r.EncodeBool(bool(x.HostIPC)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq1050[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1077 := z.EncBinary() - _ = yym1077 - if false { - } else { - r.EncodeBool(bool(x.HostIPC)) - } - } - } - if yyr1050 || yy2arr1050 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1079 := &x.SELinux - yy1079.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seLinux")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1080 := &x.SELinux - yy1080.CodecEncodeSelf(e) - } - if yyr1050 || yy2arr1050 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1082 := &x.RunAsUser - yy1082.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1083 := &x.RunAsUser - yy1083.CodecEncodeSelf(e) - } - if yyr1050 || yy2arr1050 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1085 := &x.SupplementalGroups - yy1085.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1086 := &x.SupplementalGroups - yy1086.CodecEncodeSelf(e) - } - if yyr1050 || yy2arr1050 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1088 := &x.FSGroup - yy1088.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsGroup")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1089 := &x.FSGroup - yy1089.CodecEncodeSelf(e) - } - if yyr1050 || yy2arr1050 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1050[13] { - yym1091 := z.EncBinary() - _ = yym1091 - if false { - } else { - r.EncodeBool(bool(x.ReadOnlyRootFilesystem)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq1050[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1092 := z.EncBinary() - _ = yym1092 - if false { - } else { - r.EncodeBool(bool(x.ReadOnlyRootFilesystem)) - } - } - } - if yyr1050 || yy2arr1050 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSecurityPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1093 := z.DecBinary() - _ = yym1093 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1094 := r.ContainerType() - if yyct1094 == codecSelferValueTypeMap1234 { - yyl1094 := r.ReadMapStart() - if yyl1094 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1094, d) - } - } else if yyct1094 == codecSelferValueTypeArray1234 { - yyl1094 := r.ReadArrayStart() - if yyl1094 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1094, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSecurityPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1095Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1095Slc - var yyhl1095 bool = l >= 0 - for yyj1095 := 0; ; yyj1095++ { - if yyhl1095 { - if yyj1095 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1095Slc = r.DecodeBytes(yys1095Slc, true, true) - yys1095 := string(yys1095Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1095 { - case "privileged": - if r.TryDecodeAsNil() { - x.Privileged = false - } else { - x.Privileged = bool(r.DecodeBool()) - } - case "defaultAddCapabilities": - if r.TryDecodeAsNil() { - x.DefaultAddCapabilities = nil - } else { - yyv1097 := &x.DefaultAddCapabilities - yym1098 := z.DecBinary() - _ = yym1098 - if false { - } else { - h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv1097), d) - } - } - case "requiredDropCapabilities": - if r.TryDecodeAsNil() { - x.RequiredDropCapabilities = nil - } else { - yyv1099 := &x.RequiredDropCapabilities - yym1100 := z.DecBinary() - _ = yym1100 - if false { - } else { - h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv1099), d) - } - } - case "allowedCapabilities": - if r.TryDecodeAsNil() { - x.AllowedCapabilities = nil - } else { - yyv1101 := &x.AllowedCapabilities - yym1102 := z.DecBinary() - _ = yym1102 - if false { - } else { - h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv1101), d) - } - } - case "volumes": - if r.TryDecodeAsNil() { - x.Volumes = nil - } else { - yyv1103 := &x.Volumes - yym1104 := z.DecBinary() - _ = yym1104 - if false { - } else { - h.decSliceFSType((*[]FSType)(yyv1103), d) - } - } - case "hostNetwork": - if r.TryDecodeAsNil() { - x.HostNetwork = false - } else { - x.HostNetwork = bool(r.DecodeBool()) - } - case "hostPorts": - if r.TryDecodeAsNil() { - x.HostPorts = nil - } else { - yyv1106 := &x.HostPorts - yym1107 := z.DecBinary() - _ = yym1107 - if false { - } else { - h.decSliceHostPortRange((*[]HostPortRange)(yyv1106), d) - } - } - case "hostPID": - if r.TryDecodeAsNil() { - x.HostPID = false - } else { - x.HostPID = bool(r.DecodeBool()) - } - case "hostIPC": - if r.TryDecodeAsNil() { - x.HostIPC = false - } else { - x.HostIPC = bool(r.DecodeBool()) - } - case "seLinux": - if r.TryDecodeAsNil() { - x.SELinux = SELinuxStrategyOptions{} - } else { - yyv1110 := &x.SELinux - yyv1110.CodecDecodeSelf(d) - } - case "runAsUser": - if r.TryDecodeAsNil() { - x.RunAsUser = RunAsUserStrategyOptions{} - } else { - yyv1111 := &x.RunAsUser - yyv1111.CodecDecodeSelf(d) - } - case "supplementalGroups": - if r.TryDecodeAsNil() { - x.SupplementalGroups = SupplementalGroupsStrategyOptions{} - } else { - yyv1112 := &x.SupplementalGroups - yyv1112.CodecDecodeSelf(d) - } - case "fsGroup": - if r.TryDecodeAsNil() { - x.FSGroup = FSGroupStrategyOptions{} - } else { - yyv1113 := &x.FSGroup - yyv1113.CodecDecodeSelf(d) - } - case "readOnlyRootFilesystem": - if r.TryDecodeAsNil() { - x.ReadOnlyRootFilesystem = false - } else { - x.ReadOnlyRootFilesystem = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys1095) - } // end switch yys1095 - } // end for yyj1095 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1115 int - var yyb1115 bool - var yyhl1115 bool = l >= 0 - yyj1115++ - if yyhl1115 { - yyb1115 = yyj1115 > l - } else { - yyb1115 = r.CheckBreak() - } - if yyb1115 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Privileged = false - } else { - x.Privileged = bool(r.DecodeBool()) - } - yyj1115++ - if yyhl1115 { - yyb1115 = yyj1115 > l - } else { - yyb1115 = r.CheckBreak() - } - if yyb1115 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DefaultAddCapabilities = nil - } else { - yyv1117 := &x.DefaultAddCapabilities - yym1118 := z.DecBinary() - _ = yym1118 - if false { - } else { - h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv1117), d) - } - } - yyj1115++ - if yyhl1115 { - yyb1115 = yyj1115 > l - } else { - yyb1115 = r.CheckBreak() - } - if yyb1115 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RequiredDropCapabilities = nil - } else { - yyv1119 := &x.RequiredDropCapabilities - yym1120 := z.DecBinary() - _ = yym1120 - if false { - } else { - h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv1119), d) - } - } - yyj1115++ - if yyhl1115 { - yyb1115 = yyj1115 > l - } else { - yyb1115 = r.CheckBreak() - } - if yyb1115 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AllowedCapabilities = nil - } else { - yyv1121 := &x.AllowedCapabilities - yym1122 := z.DecBinary() - _ = yym1122 - if false { - } else { - h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv1121), d) - } - } - yyj1115++ - if yyhl1115 { - yyb1115 = yyj1115 > l - } else { - yyb1115 = r.CheckBreak() - } - if yyb1115 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Volumes = nil - } else { - yyv1123 := &x.Volumes - yym1124 := z.DecBinary() - _ = yym1124 - if false { - } else { - h.decSliceFSType((*[]FSType)(yyv1123), d) - } - } - yyj1115++ - if yyhl1115 { - yyb1115 = yyj1115 > l - } else { - yyb1115 = r.CheckBreak() - } - if yyb1115 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostNetwork = false - } else { - x.HostNetwork = bool(r.DecodeBool()) - } - yyj1115++ - if yyhl1115 { - yyb1115 = yyj1115 > l - } else { - yyb1115 = r.CheckBreak() - } - if yyb1115 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostPorts = nil - } else { - yyv1126 := &x.HostPorts - yym1127 := z.DecBinary() - _ = yym1127 - if false { - } else { - h.decSliceHostPortRange((*[]HostPortRange)(yyv1126), d) - } - } - yyj1115++ - if yyhl1115 { - yyb1115 = yyj1115 > l - } else { - yyb1115 = r.CheckBreak() - } - if yyb1115 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostPID = false - } else { - x.HostPID = bool(r.DecodeBool()) - } - yyj1115++ - if yyhl1115 { - yyb1115 = yyj1115 > l - } else { - yyb1115 = r.CheckBreak() - } - if yyb1115 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostIPC = false - } else { - x.HostIPC = bool(r.DecodeBool()) - } - yyj1115++ - if yyhl1115 { - yyb1115 = yyj1115 > l - } else { - yyb1115 = r.CheckBreak() - } - if yyb1115 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SELinux = SELinuxStrategyOptions{} - } else { - yyv1130 := &x.SELinux - yyv1130.CodecDecodeSelf(d) - } - yyj1115++ - if yyhl1115 { - yyb1115 = yyj1115 > l - } else { - yyb1115 = r.CheckBreak() - } - if yyb1115 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RunAsUser = RunAsUserStrategyOptions{} - } else { - yyv1131 := &x.RunAsUser - yyv1131.CodecDecodeSelf(d) - } - yyj1115++ - if yyhl1115 { - yyb1115 = yyj1115 > l - } else { - yyb1115 = r.CheckBreak() - } - if yyb1115 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SupplementalGroups = SupplementalGroupsStrategyOptions{} - } else { - yyv1132 := &x.SupplementalGroups - yyv1132.CodecDecodeSelf(d) - } - yyj1115++ - if yyhl1115 { - yyb1115 = yyj1115 > l - } else { - yyb1115 = r.CheckBreak() - } - if yyb1115 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSGroup = FSGroupStrategyOptions{} - } else { - yyv1133 := &x.FSGroup - yyv1133.CodecDecodeSelf(d) - } - yyj1115++ - if yyhl1115 { - yyb1115 = yyj1115 > l - } else { - yyb1115 = r.CheckBreak() - } - if yyb1115 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnlyRootFilesystem = false - } else { - x.ReadOnlyRootFilesystem = bool(r.DecodeBool()) - } - for { - yyj1115++ - if yyhl1115 { - yyb1115 = yyj1115 > l - } else { - yyb1115 = r.CheckBreak() - } - if yyb1115 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1115-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HostPortRange) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1135 := z.EncBinary() - _ = yym1135 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1136 := !z.EncBinary() - yy2arr1136 := z.EncBasicHandle().StructToArray - var yyq1136 [2]bool - _, _, _ = yysep1136, yyq1136, yy2arr1136 - const yyr1136 bool = false - var yynn1136 int - if yyr1136 || yy2arr1136 { - r.EncodeArrayStart(2) - } else { - yynn1136 = 2 - for _, b := range yyq1136 { - if b { - yynn1136++ - } - } - r.EncodeMapStart(yynn1136) - yynn1136 = 0 - } - if yyr1136 || yy2arr1136 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1138 := z.EncBinary() - _ = yym1138 - if false { - } else { - r.EncodeInt(int64(x.Min)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("min")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1139 := z.EncBinary() - _ = yym1139 - if false { - } else { - r.EncodeInt(int64(x.Min)) - } - } - if yyr1136 || yy2arr1136 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1141 := z.EncBinary() - _ = yym1141 - if false { - } else { - r.EncodeInt(int64(x.Max)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("max")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1142 := z.EncBinary() - _ = yym1142 - if false { - } else { - r.EncodeInt(int64(x.Max)) - } - } - if yyr1136 || yy2arr1136 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HostPortRange) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1143 := z.DecBinary() - _ = yym1143 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1144 := r.ContainerType() - if yyct1144 == codecSelferValueTypeMap1234 { - yyl1144 := r.ReadMapStart() - if yyl1144 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1144, d) - } - } else if yyct1144 == codecSelferValueTypeArray1234 { - yyl1144 := r.ReadArrayStart() - if yyl1144 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1144, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HostPortRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1145Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1145Slc - var yyhl1145 bool = l >= 0 - for yyj1145 := 0; ; yyj1145++ { - if yyhl1145 { - if yyj1145 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1145Slc = r.DecodeBytes(yys1145Slc, true, true) - yys1145 := string(yys1145Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1145 { - case "min": - if r.TryDecodeAsNil() { - x.Min = 0 - } else { - x.Min = int(r.DecodeInt(codecSelferBitsize1234)) - } - case "max": - if r.TryDecodeAsNil() { - x.Max = 0 - } else { - x.Max = int(r.DecodeInt(codecSelferBitsize1234)) - } - default: - z.DecStructFieldNotFound(-1, yys1145) - } // end switch yys1145 - } // end for yyj1145 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HostPortRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1148 int - var yyb1148 bool - var yyhl1148 bool = l >= 0 - yyj1148++ - if yyhl1148 { - yyb1148 = yyj1148 > l - } else { - yyb1148 = r.CheckBreak() - } - if yyb1148 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Min = 0 - } else { - x.Min = int(r.DecodeInt(codecSelferBitsize1234)) - } - yyj1148++ - if yyhl1148 { - yyb1148 = yyj1148 > l - } else { - yyb1148 = r.CheckBreak() - } - if yyb1148 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Max = 0 - } else { - x.Max = int(r.DecodeInt(codecSelferBitsize1234)) - } - for { - yyj1148++ - if yyhl1148 { - yyb1148 = yyj1148 > l - } else { - yyb1148 = r.CheckBreak() - } - if yyb1148 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1148-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x FSType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1151 := z.EncBinary() - _ = yym1151 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *FSType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1152 := z.DecBinary() - _ = yym1152 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *SELinuxStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1153 := z.EncBinary() - _ = yym1153 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1154 := !z.EncBinary() - yy2arr1154 := z.EncBasicHandle().StructToArray - var yyq1154 [2]bool - _, _, _ = yysep1154, yyq1154, yy2arr1154 - const yyr1154 bool = false - yyq1154[1] = x.SELinuxOptions != nil - var yynn1154 int - if yyr1154 || yy2arr1154 { - r.EncodeArrayStart(2) - } else { - yynn1154 = 1 - for _, b := range yyq1154 { - if b { - yynn1154++ - } - } - r.EncodeMapStart(yynn1154) - yynn1154 = 0 - } - if yyr1154 || yy2arr1154 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Rule.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Rule.CodecEncodeSelf(e) - } - if yyr1154 || yy2arr1154 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1154[1] { - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq1154[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } - } - if yyr1154 || yy2arr1154 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SELinuxStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1157 := z.DecBinary() - _ = yym1157 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1158 := r.ContainerType() - if yyct1158 == codecSelferValueTypeMap1234 { - yyl1158 := r.ReadMapStart() - if yyl1158 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1158, d) - } - } else if yyct1158 == codecSelferValueTypeArray1234 { - yyl1158 := r.ReadArrayStart() - if yyl1158 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1158, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SELinuxStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1159Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1159Slc - var yyhl1159 bool = l >= 0 - for yyj1159 := 0; ; yyj1159++ { - if yyhl1159 { - if yyj1159 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1159Slc = r.DecodeBytes(yys1159Slc, true, true) - yys1159 := string(yys1159Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1159 { - case "rule": - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = SELinuxStrategy(r.DecodeString()) - } - case "seLinuxOptions": - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(pkg2_api.SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys1159) - } // end switch yys1159 - } // end for yyj1159 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SELinuxStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1162 int - var yyb1162 bool - var yyhl1162 bool = l >= 0 - yyj1162++ - if yyhl1162 { - yyb1162 = yyj1162 > l - } else { - yyb1162 = r.CheckBreak() - } - if yyb1162 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = SELinuxStrategy(r.DecodeString()) - } - yyj1162++ - if yyhl1162 { - yyb1162 = yyj1162 > l - } else { - yyb1162 = r.CheckBreak() - } - if yyb1162 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(pkg2_api.SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - for { - yyj1162++ - if yyhl1162 { - yyb1162 = yyj1162 > l - } else { - yyb1162 = r.CheckBreak() - } - if yyb1162 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1162-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x SELinuxStrategy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1165 := z.EncBinary() - _ = yym1165 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *SELinuxStrategy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1166 := z.DecBinary() - _ = yym1166 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *RunAsUserStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1167 := z.EncBinary() - _ = yym1167 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1168 := !z.EncBinary() - yy2arr1168 := z.EncBasicHandle().StructToArray - var yyq1168 [2]bool - _, _, _ = yysep1168, yyq1168, yy2arr1168 - const yyr1168 bool = false - yyq1168[1] = len(x.Ranges) != 0 - var yynn1168 int - if yyr1168 || yy2arr1168 { - r.EncodeArrayStart(2) - } else { - yynn1168 = 1 - for _, b := range yyq1168 { - if b { - yynn1168++ - } - } - r.EncodeMapStart(yynn1168) - yynn1168 = 0 - } - if yyr1168 || yy2arr1168 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Rule.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Rule.CodecEncodeSelf(e) - } - if yyr1168 || yy2arr1168 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1168[1] { - if x.Ranges == nil { - r.EncodeNil() - } else { - yym1171 := z.EncBinary() - _ = yym1171 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1168[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ranges")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ranges == nil { - r.EncodeNil() - } else { - yym1172 := z.EncBinary() - _ = yym1172 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } - } - if yyr1168 || yy2arr1168 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RunAsUserStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1173 := z.DecBinary() - _ = yym1173 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1174 := r.ContainerType() - if yyct1174 == codecSelferValueTypeMap1234 { - yyl1174 := r.ReadMapStart() - if yyl1174 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1174, d) - } - } else if yyct1174 == codecSelferValueTypeArray1234 { - yyl1174 := r.ReadArrayStart() - if yyl1174 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1174, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RunAsUserStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1175Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1175Slc - var yyhl1175 bool = l >= 0 - for yyj1175 := 0; ; yyj1175++ { - if yyhl1175 { - if yyj1175 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1175Slc = r.DecodeBytes(yys1175Slc, true, true) - yys1175 := string(yys1175Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1175 { - case "rule": - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = RunAsUserStrategy(r.DecodeString()) - } - case "ranges": - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv1177 := &x.Ranges - yym1178 := z.DecBinary() - _ = yym1178 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv1177), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1175) - } // end switch yys1175 - } // end for yyj1175 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RunAsUserStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1179 int - var yyb1179 bool - var yyhl1179 bool = l >= 0 - yyj1179++ - if yyhl1179 { - yyb1179 = yyj1179 > l - } else { - yyb1179 = r.CheckBreak() - } - if yyb1179 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = RunAsUserStrategy(r.DecodeString()) - } - yyj1179++ - if yyhl1179 { - yyb1179 = yyj1179 > l - } else { - yyb1179 = r.CheckBreak() - } - if yyb1179 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv1181 := &x.Ranges - yym1182 := z.DecBinary() - _ = yym1182 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv1181), d) - } - } - for { - yyj1179++ - if yyhl1179 { - yyb1179 = yyj1179 > l - } else { - yyb1179 = r.CheckBreak() - } - if yyb1179 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1179-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IDRange) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1183 := z.EncBinary() - _ = yym1183 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1184 := !z.EncBinary() - yy2arr1184 := z.EncBasicHandle().StructToArray - var yyq1184 [2]bool - _, _, _ = yysep1184, yyq1184, yy2arr1184 - const yyr1184 bool = false - var yynn1184 int - if yyr1184 || yy2arr1184 { - r.EncodeArrayStart(2) - } else { - yynn1184 = 2 - for _, b := range yyq1184 { - if b { - yynn1184++ - } - } - r.EncodeMapStart(yynn1184) - yynn1184 = 0 - } - if yyr1184 || yy2arr1184 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1186 := z.EncBinary() - _ = yym1186 - if false { - } else { - r.EncodeInt(int64(x.Min)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("min")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1187 := z.EncBinary() - _ = yym1187 - if false { - } else { - r.EncodeInt(int64(x.Min)) - } - } - if yyr1184 || yy2arr1184 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1189 := z.EncBinary() - _ = yym1189 - if false { - } else { - r.EncodeInt(int64(x.Max)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("max")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1190 := z.EncBinary() - _ = yym1190 - if false { - } else { - r.EncodeInt(int64(x.Max)) - } - } - if yyr1184 || yy2arr1184 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IDRange) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1191 := z.DecBinary() - _ = yym1191 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1192 := r.ContainerType() - if yyct1192 == codecSelferValueTypeMap1234 { - yyl1192 := r.ReadMapStart() - if yyl1192 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1192, d) - } - } else if yyct1192 == codecSelferValueTypeArray1234 { - yyl1192 := r.ReadArrayStart() - if yyl1192 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1192, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IDRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1193Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1193Slc - var yyhl1193 bool = l >= 0 - for yyj1193 := 0; ; yyj1193++ { - if yyhl1193 { - if yyj1193 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1193Slc = r.DecodeBytes(yys1193Slc, true, true) - yys1193 := string(yys1193Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1193 { - case "min": - if r.TryDecodeAsNil() { - x.Min = 0 - } else { - x.Min = int64(r.DecodeInt(64)) - } - case "max": - if r.TryDecodeAsNil() { - x.Max = 0 - } else { - x.Max = int64(r.DecodeInt(64)) - } - default: - z.DecStructFieldNotFound(-1, yys1193) - } // end switch yys1193 - } // end for yyj1193 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IDRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1196 int - var yyb1196 bool - var yyhl1196 bool = l >= 0 - yyj1196++ - if yyhl1196 { - yyb1196 = yyj1196 > l - } else { - yyb1196 = r.CheckBreak() - } - if yyb1196 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Min = 0 - } else { - x.Min = int64(r.DecodeInt(64)) - } - yyj1196++ - if yyhl1196 { - yyb1196 = yyj1196 > l - } else { - yyb1196 = r.CheckBreak() - } - if yyb1196 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Max = 0 - } else { - x.Max = int64(r.DecodeInt(64)) - } - for { - yyj1196++ - if yyhl1196 { - yyb1196 = yyj1196 > l - } else { - yyb1196 = r.CheckBreak() - } - if yyb1196 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1196-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x RunAsUserStrategy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1199 := z.EncBinary() - _ = yym1199 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *RunAsUserStrategy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1200 := z.DecBinary() - _ = yym1200 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *FSGroupStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1201 := z.EncBinary() - _ = yym1201 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1202 := !z.EncBinary() - yy2arr1202 := z.EncBasicHandle().StructToArray - var yyq1202 [2]bool - _, _, _ = yysep1202, yyq1202, yy2arr1202 - const yyr1202 bool = false - yyq1202[0] = x.Rule != "" - yyq1202[1] = len(x.Ranges) != 0 - var yynn1202 int - if yyr1202 || yy2arr1202 { - r.EncodeArrayStart(2) - } else { - yynn1202 = 0 - for _, b := range yyq1202 { - if b { - yynn1202++ - } - } - r.EncodeMapStart(yynn1202) - yynn1202 = 0 - } - if yyr1202 || yy2arr1202 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1202[0] { - x.Rule.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1202[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Rule.CodecEncodeSelf(e) - } - } - if yyr1202 || yy2arr1202 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1202[1] { - if x.Ranges == nil { - r.EncodeNil() - } else { - yym1205 := z.EncBinary() - _ = yym1205 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1202[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ranges")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ranges == nil { - r.EncodeNil() - } else { - yym1206 := z.EncBinary() - _ = yym1206 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } - } - if yyr1202 || yy2arr1202 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *FSGroupStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1207 := z.DecBinary() - _ = yym1207 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1208 := r.ContainerType() - if yyct1208 == codecSelferValueTypeMap1234 { - yyl1208 := r.ReadMapStart() - if yyl1208 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1208, d) - } - } else if yyct1208 == codecSelferValueTypeArray1234 { - yyl1208 := r.ReadArrayStart() - if yyl1208 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1208, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *FSGroupStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1209Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1209Slc - var yyhl1209 bool = l >= 0 - for yyj1209 := 0; ; yyj1209++ { - if yyhl1209 { - if yyj1209 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1209Slc = r.DecodeBytes(yys1209Slc, true, true) - yys1209 := string(yys1209Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1209 { - case "rule": - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = FSGroupStrategyType(r.DecodeString()) - } - case "ranges": - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv1211 := &x.Ranges - yym1212 := z.DecBinary() - _ = yym1212 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv1211), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1209) - } // end switch yys1209 - } // end for yyj1209 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *FSGroupStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1213 int - var yyb1213 bool - var yyhl1213 bool = l >= 0 - yyj1213++ - if yyhl1213 { - yyb1213 = yyj1213 > l - } else { - yyb1213 = r.CheckBreak() - } - if yyb1213 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = FSGroupStrategyType(r.DecodeString()) - } - yyj1213++ - if yyhl1213 { - yyb1213 = yyj1213 > l - } else { - yyb1213 = r.CheckBreak() - } - if yyb1213 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv1215 := &x.Ranges - yym1216 := z.DecBinary() - _ = yym1216 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv1215), d) - } - } - for { - yyj1213++ - if yyhl1213 { - yyb1213 = yyj1213 > l - } else { - yyb1213 = r.CheckBreak() - } - if yyb1213 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1213-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x FSGroupStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1217 := z.EncBinary() - _ = yym1217 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *FSGroupStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1218 := z.DecBinary() - _ = yym1218 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *SupplementalGroupsStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1219 := z.EncBinary() - _ = yym1219 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1220 := !z.EncBinary() - yy2arr1220 := z.EncBasicHandle().StructToArray - var yyq1220 [2]bool - _, _, _ = yysep1220, yyq1220, yy2arr1220 - const yyr1220 bool = false - yyq1220[0] = x.Rule != "" - yyq1220[1] = len(x.Ranges) != 0 - var yynn1220 int - if yyr1220 || yy2arr1220 { - r.EncodeArrayStart(2) - } else { - yynn1220 = 0 - for _, b := range yyq1220 { - if b { - yynn1220++ - } - } - r.EncodeMapStart(yynn1220) - yynn1220 = 0 - } - if yyr1220 || yy2arr1220 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1220[0] { - x.Rule.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1220[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Rule.CodecEncodeSelf(e) - } - } - if yyr1220 || yy2arr1220 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1220[1] { - if x.Ranges == nil { - r.EncodeNil() - } else { - yym1223 := z.EncBinary() - _ = yym1223 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1220[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ranges")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ranges == nil { - r.EncodeNil() - } else { - yym1224 := z.EncBinary() - _ = yym1224 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } - } - if yyr1220 || yy2arr1220 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SupplementalGroupsStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1225 := z.DecBinary() - _ = yym1225 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1226 := r.ContainerType() - if yyct1226 == codecSelferValueTypeMap1234 { - yyl1226 := r.ReadMapStart() - if yyl1226 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1226, d) - } - } else if yyct1226 == codecSelferValueTypeArray1234 { - yyl1226 := r.ReadArrayStart() - if yyl1226 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1226, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1227Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1227Slc - var yyhl1227 bool = l >= 0 - for yyj1227 := 0; ; yyj1227++ { - if yyhl1227 { - if yyj1227 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1227Slc = r.DecodeBytes(yys1227Slc, true, true) - yys1227 := string(yys1227Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1227 { - case "rule": - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = SupplementalGroupsStrategyType(r.DecodeString()) - } - case "ranges": - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv1229 := &x.Ranges - yym1230 := z.DecBinary() - _ = yym1230 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv1229), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1227) - } // end switch yys1227 - } // end for yyj1227 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1231 int - var yyb1231 bool - var yyhl1231 bool = l >= 0 - yyj1231++ - if yyhl1231 { - yyb1231 = yyj1231 > l - } else { - yyb1231 = r.CheckBreak() - } - if yyb1231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = SupplementalGroupsStrategyType(r.DecodeString()) - } - yyj1231++ - if yyhl1231 { - yyb1231 = yyj1231 > l - } else { - yyb1231 = r.CheckBreak() - } - if yyb1231 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv1233 := &x.Ranges - yym1234 := z.DecBinary() - _ = yym1234 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv1233), d) - } - } - for { - yyj1231++ - if yyhl1231 { - yyb1231 = yyj1231 > l - } else { - yyb1231 = r.CheckBreak() - } - if yyb1231 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1231-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x SupplementalGroupsStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1235 := z.EncBinary() - _ = yym1235 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *SupplementalGroupsStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1236 := z.DecBinary() - _ = yym1236 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1237 := z.EncBinary() - _ = yym1237 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1238 := !z.EncBinary() - yy2arr1238 := z.EncBasicHandle().StructToArray - var yyq1238 [4]bool - _, _, _ = yysep1238, yyq1238, yy2arr1238 - const yyr1238 bool = false - yyq1238[0] = x.Kind != "" - yyq1238[1] = x.APIVersion != "" - yyq1238[2] = true - var yynn1238 int - if yyr1238 || yy2arr1238 { - r.EncodeArrayStart(4) - } else { - yynn1238 = 1 - for _, b := range yyq1238 { - if b { - yynn1238++ - } - } - r.EncodeMapStart(yynn1238) - yynn1238 = 0 - } - if yyr1238 || yy2arr1238 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1238[0] { - yym1240 := z.EncBinary() - _ = yym1240 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1238[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1241 := z.EncBinary() - _ = yym1241 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr1238 || yy2arr1238 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1238[1] { - yym1243 := z.EncBinary() - _ = yym1243 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1238[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1244 := z.EncBinary() - _ = yym1244 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr1238 || yy2arr1238 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1238[2] { - yy1246 := &x.ListMeta - yym1247 := z.EncBinary() - _ = yym1247 - if false { - } else if z.HasExtensions() && z.EncExt(yy1246) { - } else { - z.EncFallback(yy1246) - } - } else { - r.EncodeNil() - } - } else { - if yyq1238[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1248 := &x.ListMeta - yym1249 := z.EncBinary() - _ = yym1249 - if false { - } else if z.HasExtensions() && z.EncExt(yy1248) { - } else { - z.EncFallback(yy1248) - } - } - } - if yyr1238 || yy2arr1238 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym1251 := z.EncBinary() - _ = yym1251 - if false { - } else { - h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym1252 := z.EncBinary() - _ = yym1252 - if false { - } else { - h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) - } - } - } - if yyr1238 || yy2arr1238 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSecurityPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1253 := z.DecBinary() - _ = yym1253 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1254 := r.ContainerType() - if yyct1254 == codecSelferValueTypeMap1234 { - yyl1254 := r.ReadMapStart() - if yyl1254 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1254, d) - } - } else if yyct1254 == codecSelferValueTypeArray1234 { - yyl1254 := r.ReadArrayStart() - if yyl1254 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1254, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSecurityPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1255Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1255Slc - var yyhl1255 bool = l >= 0 - for yyj1255 := 0; ; yyj1255++ { - if yyhl1255 { - if yyj1255 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1255Slc = r.DecodeBytes(yys1255Slc, true, true) - yys1255 := string(yys1255Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1255 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv1258 := &x.ListMeta - yym1259 := z.DecBinary() - _ = yym1259 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1258) { - } else { - z.DecFallback(yyv1258, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv1260 := &x.Items - yym1261 := z.DecBinary() - _ = yym1261 - if false { - } else { - h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv1260), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1255) - } // end switch yys1255 - } // end for yyj1255 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSecurityPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1262 int - var yyb1262 bool - var yyhl1262 bool = l >= 0 - yyj1262++ - if yyhl1262 { - yyb1262 = yyj1262 > l - } else { - yyb1262 = r.CheckBreak() - } - if yyb1262 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj1262++ - if yyhl1262 { - yyb1262 = yyj1262 > l - } else { - yyb1262 = r.CheckBreak() - } - if yyb1262 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj1262++ - if yyhl1262 { - yyb1262 = yyj1262 > l - } else { - yyb1262 = r.CheckBreak() - } - if yyb1262 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv1265 := &x.ListMeta - yym1266 := z.DecBinary() - _ = yym1266 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1265) { - } else { - z.DecFallback(yyv1265, false) - } - } - yyj1262++ - if yyhl1262 { - yyb1262 = yyj1262 > l - } else { - yyb1262 = r.CheckBreak() - } - if yyb1262 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv1267 := &x.Items - yym1268 := z.DecBinary() - _ = yym1268 - if false { - } else { - h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv1267), d) - } - } - for { - yyj1262++ - if yyhl1262 { - yyb1262 = yyj1262 > l - } else { - yyb1262 = r.CheckBreak() - } - if yyb1262 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1262-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1269 := z.EncBinary() - _ = yym1269 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1270 := !z.EncBinary() - yy2arr1270 := z.EncBasicHandle().StructToArray - var yyq1270 [4]bool - _, _, _ = yysep1270, yyq1270, yy2arr1270 - const yyr1270 bool = false - yyq1270[0] = x.Kind != "" - yyq1270[1] = x.APIVersion != "" - yyq1270[2] = true - yyq1270[3] = true - var yynn1270 int - if yyr1270 || yy2arr1270 { - r.EncodeArrayStart(4) - } else { - yynn1270 = 0 - for _, b := range yyq1270 { - if b { - yynn1270++ - } - } - r.EncodeMapStart(yynn1270) - yynn1270 = 0 - } - if yyr1270 || yy2arr1270 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1270[0] { - yym1272 := z.EncBinary() - _ = yym1272 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1270[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1273 := z.EncBinary() - _ = yym1273 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr1270 || yy2arr1270 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1270[1] { - yym1275 := z.EncBinary() - _ = yym1275 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1270[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1276 := z.EncBinary() - _ = yym1276 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr1270 || yy2arr1270 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1270[2] { - yy1278 := &x.ObjectMeta - yy1278.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq1270[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1279 := &x.ObjectMeta - yy1279.CodecEncodeSelf(e) - } - } - if yyr1270 || yy2arr1270 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1270[3] { - yy1281 := &x.Spec - yy1281.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq1270[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1282 := &x.Spec - yy1282.CodecEncodeSelf(e) - } - } - if yyr1270 || yy2arr1270 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1283 := z.DecBinary() - _ = yym1283 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1284 := r.ContainerType() - if yyct1284 == codecSelferValueTypeMap1234 { - yyl1284 := r.ReadMapStart() - if yyl1284 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1284, d) - } - } else if yyct1284 == codecSelferValueTypeArray1234 { - yyl1284 := r.ReadArrayStart() - if yyl1284 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1284, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1285Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1285Slc - var yyhl1285 bool = l >= 0 - for yyj1285 := 0; ; yyj1285++ { - if yyhl1285 { - if yyj1285 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1285Slc = r.DecodeBytes(yys1285Slc, true, true) - yys1285 := string(yys1285Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1285 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv1288 := &x.ObjectMeta - yyv1288.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = NetworkPolicySpec{} - } else { - yyv1289 := &x.Spec - yyv1289.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys1285) - } // end switch yys1285 - } // end for yyj1285 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1290 int - var yyb1290 bool - var yyhl1290 bool = l >= 0 - yyj1290++ - if yyhl1290 { - yyb1290 = yyj1290 > l - } else { - yyb1290 = r.CheckBreak() - } - if yyb1290 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj1290++ - if yyhl1290 { - yyb1290 = yyj1290 > l - } else { - yyb1290 = r.CheckBreak() - } - if yyb1290 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj1290++ - if yyhl1290 { - yyb1290 = yyj1290 > l - } else { - yyb1290 = r.CheckBreak() - } - if yyb1290 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv1293 := &x.ObjectMeta - yyv1293.CodecDecodeSelf(d) - } - yyj1290++ - if yyhl1290 { - yyb1290 = yyj1290 > l - } else { - yyb1290 = r.CheckBreak() - } - if yyb1290 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = NetworkPolicySpec{} - } else { - yyv1294 := &x.Spec - yyv1294.CodecDecodeSelf(d) - } - for { - yyj1290++ - if yyhl1290 { - yyb1290 = yyj1290 > l - } else { - yyb1290 = r.CheckBreak() - } - if yyb1290 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1290-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1295 := z.EncBinary() - _ = yym1295 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1296 := !z.EncBinary() - yy2arr1296 := z.EncBasicHandle().StructToArray - var yyq1296 [2]bool - _, _, _ = yysep1296, yyq1296, yy2arr1296 - const yyr1296 bool = false - yyq1296[1] = len(x.Ingress) != 0 - var yynn1296 int - if yyr1296 || yy2arr1296 { - r.EncodeArrayStart(2) - } else { - yynn1296 = 1 - for _, b := range yyq1296 { - if b { - yynn1296++ - } - } - r.EncodeMapStart(yynn1296) - yynn1296 = 0 - } - if yyr1296 || yy2arr1296 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1298 := &x.PodSelector - yym1299 := z.EncBinary() - _ = yym1299 - if false { - } else if z.HasExtensions() && z.EncExt(yy1298) { - } else { - z.EncFallback(yy1298) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1300 := &x.PodSelector - yym1301 := z.EncBinary() - _ = yym1301 - if false { - } else if z.HasExtensions() && z.EncExt(yy1300) { - } else { - z.EncFallback(yy1300) - } - } - if yyr1296 || yy2arr1296 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1296[1] { - if x.Ingress == nil { - r.EncodeNil() - } else { - yym1303 := z.EncBinary() - _ = yym1303 - if false { - } else { - h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1296[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ingress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ingress == nil { - r.EncodeNil() - } else { - yym1304 := z.EncBinary() - _ = yym1304 - if false { - } else { - h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e) - } - } - } - } - if yyr1296 || yy2arr1296 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1305 := z.DecBinary() - _ = yym1305 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1306 := r.ContainerType() - if yyct1306 == codecSelferValueTypeMap1234 { - yyl1306 := r.ReadMapStart() - if yyl1306 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1306, d) - } - } else if yyct1306 == codecSelferValueTypeArray1234 { - yyl1306 := r.ReadArrayStart() - if yyl1306 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1306, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1307Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1307Slc - var yyhl1307 bool = l >= 0 - for yyj1307 := 0; ; yyj1307++ { - if yyhl1307 { - if yyj1307 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1307Slc = r.DecodeBytes(yys1307Slc, true, true) - yys1307 := string(yys1307Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1307 { - case "podSelector": - if r.TryDecodeAsNil() { - x.PodSelector = pkg1_unversioned.LabelSelector{} - } else { - yyv1308 := &x.PodSelector - yym1309 := z.DecBinary() - _ = yym1309 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1308) { - } else { - z.DecFallback(yyv1308, false) - } - } - case "ingress": - if r.TryDecodeAsNil() { - x.Ingress = nil - } else { - yyv1310 := &x.Ingress - yym1311 := z.DecBinary() - _ = yym1311 - if false { - } else { - h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv1310), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1307) - } // end switch yys1307 - } // end for yyj1307 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1312 int - var yyb1312 bool - var yyhl1312 bool = l >= 0 - yyj1312++ - if yyhl1312 { - yyb1312 = yyj1312 > l - } else { - yyb1312 = r.CheckBreak() - } - if yyb1312 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodSelector = pkg1_unversioned.LabelSelector{} - } else { - yyv1313 := &x.PodSelector - yym1314 := z.DecBinary() - _ = yym1314 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1313) { - } else { - z.DecFallback(yyv1313, false) - } - } - yyj1312++ - if yyhl1312 { - yyb1312 = yyj1312 > l - } else { - yyb1312 = r.CheckBreak() - } - if yyb1312 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ingress = nil - } else { - yyv1315 := &x.Ingress - yym1316 := z.DecBinary() - _ = yym1316 - if false { - } else { - h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv1315), d) - } - } - for { - yyj1312++ - if yyhl1312 { - yyb1312 = yyj1312 > l - } else { - yyb1312 = r.CheckBreak() - } - if yyb1312 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1312-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicyIngressRule) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1317 := z.EncBinary() - _ = yym1317 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1318 := !z.EncBinary() - yy2arr1318 := z.EncBasicHandle().StructToArray - var yyq1318 [2]bool - _, _, _ = yysep1318, yyq1318, yy2arr1318 - const yyr1318 bool = false - yyq1318[0] = len(x.Ports) != 0 - yyq1318[1] = len(x.From) != 0 - var yynn1318 int - if yyr1318 || yy2arr1318 { - r.EncodeArrayStart(2) - } else { - yynn1318 = 0 - for _, b := range yyq1318 { - if b { - yynn1318++ - } - } - r.EncodeMapStart(yynn1318) - yynn1318 = 0 - } - if yyr1318 || yy2arr1318 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1318[0] { - if x.Ports == nil { - r.EncodeNil() - } else { - yym1320 := z.EncBinary() - _ = yym1320 - if false { - } else { - h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1318[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ports")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym1321 := z.EncBinary() - _ = yym1321 - if false { - } else { - h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e) - } - } - } - } - if yyr1318 || yy2arr1318 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1318[1] { - if x.From == nil { - r.EncodeNil() - } else { - yym1323 := z.EncBinary() - _ = yym1323 - if false { - } else { - h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1318[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("from")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.From == nil { - r.EncodeNil() - } else { - yym1324 := z.EncBinary() - _ = yym1324 - if false { - } else { - h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e) - } - } - } - } - if yyr1318 || yy2arr1318 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicyIngressRule) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1325 := z.DecBinary() - _ = yym1325 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1326 := r.ContainerType() - if yyct1326 == codecSelferValueTypeMap1234 { - yyl1326 := r.ReadMapStart() - if yyl1326 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1326, d) - } - } else if yyct1326 == codecSelferValueTypeArray1234 { - yyl1326 := r.ReadArrayStart() - if yyl1326 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1326, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicyIngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1327Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1327Slc - var yyhl1327 bool = l >= 0 - for yyj1327 := 0; ; yyj1327++ { - if yyhl1327 { - if yyj1327 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1327Slc = r.DecodeBytes(yys1327Slc, true, true) - yys1327 := string(yys1327Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1327 { - case "ports": - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv1328 := &x.Ports - yym1329 := z.DecBinary() - _ = yym1329 - if false { - } else { - h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv1328), d) - } - } - case "from": - if r.TryDecodeAsNil() { - x.From = nil - } else { - yyv1330 := &x.From - yym1331 := z.DecBinary() - _ = yym1331 - if false { - } else { - h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv1330), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1327) - } // end switch yys1327 - } // end for yyj1327 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicyIngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1332 int - var yyb1332 bool - var yyhl1332 bool = l >= 0 - yyj1332++ - if yyhl1332 { - yyb1332 = yyj1332 > l - } else { - yyb1332 = r.CheckBreak() - } - if yyb1332 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv1333 := &x.Ports - yym1334 := z.DecBinary() - _ = yym1334 - if false { - } else { - h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv1333), d) - } - } - yyj1332++ - if yyhl1332 { - yyb1332 = yyj1332 > l - } else { - yyb1332 = r.CheckBreak() - } - if yyb1332 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.From = nil - } else { - yyv1335 := &x.From - yym1336 := z.DecBinary() - _ = yym1336 - if false { - } else { - h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv1335), d) - } - } - for { - yyj1332++ - if yyhl1332 { - yyb1332 = yyj1332 > l - } else { - yyb1332 = r.CheckBreak() - } - if yyb1332 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1332-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicyPort) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1337 := z.EncBinary() - _ = yym1337 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1338 := !z.EncBinary() - yy2arr1338 := z.EncBasicHandle().StructToArray - var yyq1338 [2]bool - _, _, _ = yysep1338, yyq1338, yy2arr1338 - const yyr1338 bool = false - yyq1338[0] = x.Protocol != nil - yyq1338[1] = x.Port != nil - var yynn1338 int - if yyr1338 || yy2arr1338 { - r.EncodeArrayStart(2) - } else { - yynn1338 = 0 - for _, b := range yyq1338 { - if b { - yynn1338++ - } - } - r.EncodeMapStart(yynn1338) - yynn1338 = 0 - } - if yyr1338 || yy2arr1338 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1338[0] { - if x.Protocol == nil { - r.EncodeNil() - } else { - yy1340 := *x.Protocol - yym1341 := z.EncBinary() - _ = yym1341 - if false { - } else if z.HasExtensions() && z.EncExt(yy1340) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy1340)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1338[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("protocol")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Protocol == nil { - r.EncodeNil() - } else { - yy1342 := *x.Protocol - yym1343 := z.EncBinary() - _ = yym1343 - if false { - } else if z.HasExtensions() && z.EncExt(yy1342) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy1342)) - } - } - } - } - if yyr1338 || yy2arr1338 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1338[1] { - if x.Port == nil { - r.EncodeNil() - } else { - yym1345 := z.EncBinary() - _ = yym1345 - if false { - } else if z.HasExtensions() && z.EncExt(x.Port) { - } else if !yym1345 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Port) - } else { - z.EncFallback(x.Port) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1338[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Port == nil { - r.EncodeNil() - } else { - yym1346 := z.EncBinary() - _ = yym1346 - if false { - } else if z.HasExtensions() && z.EncExt(x.Port) { - } else if !yym1346 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Port) - } else { - z.EncFallback(x.Port) - } - } - } - } - if yyr1338 || yy2arr1338 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicyPort) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1347 := z.DecBinary() - _ = yym1347 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1348 := r.ContainerType() - if yyct1348 == codecSelferValueTypeMap1234 { - yyl1348 := r.ReadMapStart() - if yyl1348 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1348, d) - } - } else if yyct1348 == codecSelferValueTypeArray1234 { - yyl1348 := r.ReadArrayStart() - if yyl1348 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1348, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicyPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1349Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1349Slc - var yyhl1349 bool = l >= 0 - for yyj1349 := 0; ; yyj1349++ { - if yyhl1349 { - if yyj1349 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1349Slc = r.DecodeBytes(yys1349Slc, true, true) - yys1349 := string(yys1349Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1349 { - case "protocol": - if r.TryDecodeAsNil() { - if x.Protocol != nil { - x.Protocol = nil - } - } else { - if x.Protocol == nil { - x.Protocol = new(pkg2_api.Protocol) - } - x.Protocol.CodecDecodeSelf(d) - } - case "port": - if r.TryDecodeAsNil() { - if x.Port != nil { - x.Port = nil - } - } else { - if x.Port == nil { - x.Port = new(pkg5_intstr.IntOrString) - } - yym1352 := z.DecBinary() - _ = yym1352 - if false { - } else if z.HasExtensions() && z.DecExt(x.Port) { - } else if !yym1352 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Port) - } else { - z.DecFallback(x.Port, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys1349) - } // end switch yys1349 - } // end for yyj1349 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicyPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1353 int - var yyb1353 bool - var yyhl1353 bool = l >= 0 - yyj1353++ - if yyhl1353 { - yyb1353 = yyj1353 > l - } else { - yyb1353 = r.CheckBreak() - } - if yyb1353 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Protocol != nil { - x.Protocol = nil - } - } else { - if x.Protocol == nil { - x.Protocol = new(pkg2_api.Protocol) - } - x.Protocol.CodecDecodeSelf(d) - } - yyj1353++ - if yyhl1353 { - yyb1353 = yyj1353 > l - } else { - yyb1353 = r.CheckBreak() - } - if yyb1353 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Port != nil { - x.Port = nil - } - } else { - if x.Port == nil { - x.Port = new(pkg5_intstr.IntOrString) - } - yym1356 := z.DecBinary() - _ = yym1356 - if false { - } else if z.HasExtensions() && z.DecExt(x.Port) { - } else if !yym1356 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Port) - } else { - z.DecFallback(x.Port, false) - } - } - for { - yyj1353++ - if yyhl1353 { - yyb1353 = yyj1353 > l - } else { - yyb1353 = r.CheckBreak() - } - if yyb1353 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1353-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicyPeer) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1357 := z.EncBinary() - _ = yym1357 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1358 := !z.EncBinary() - yy2arr1358 := z.EncBasicHandle().StructToArray - var yyq1358 [2]bool - _, _, _ = yysep1358, yyq1358, yy2arr1358 - const yyr1358 bool = false - yyq1358[0] = x.PodSelector != nil - yyq1358[1] = x.NamespaceSelector != nil - var yynn1358 int - if yyr1358 || yy2arr1358 { - r.EncodeArrayStart(2) - } else { - yynn1358 = 0 - for _, b := range yyq1358 { - if b { - yynn1358++ - } - } - r.EncodeMapStart(yynn1358) - yynn1358 = 0 - } - if yyr1358 || yy2arr1358 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1358[0] { - if x.PodSelector == nil { - r.EncodeNil() - } else { - yym1360 := z.EncBinary() - _ = yym1360 - if false { - } else if z.HasExtensions() && z.EncExt(x.PodSelector) { - } else { - z.EncFallback(x.PodSelector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1358[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PodSelector == nil { - r.EncodeNil() - } else { - yym1361 := z.EncBinary() - _ = yym1361 - if false { - } else if z.HasExtensions() && z.EncExt(x.PodSelector) { - } else { - z.EncFallback(x.PodSelector) - } - } - } - } - if yyr1358 || yy2arr1358 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1358[1] { - if x.NamespaceSelector == nil { - r.EncodeNil() - } else { - yym1363 := z.EncBinary() - _ = yym1363 - if false { - } else if z.HasExtensions() && z.EncExt(x.NamespaceSelector) { - } else { - z.EncFallback(x.NamespaceSelector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1358[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespaceSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NamespaceSelector == nil { - r.EncodeNil() - } else { - yym1364 := z.EncBinary() - _ = yym1364 - if false { - } else if z.HasExtensions() && z.EncExt(x.NamespaceSelector) { - } else { - z.EncFallback(x.NamespaceSelector) - } - } - } - } - if yyr1358 || yy2arr1358 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicyPeer) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1365 := z.DecBinary() - _ = yym1365 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1366 := r.ContainerType() - if yyct1366 == codecSelferValueTypeMap1234 { - yyl1366 := r.ReadMapStart() - if yyl1366 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1366, d) - } - } else if yyct1366 == codecSelferValueTypeArray1234 { - yyl1366 := r.ReadArrayStart() - if yyl1366 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1366, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicyPeer) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1367Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1367Slc - var yyhl1367 bool = l >= 0 - for yyj1367 := 0; ; yyj1367++ { - if yyhl1367 { - if yyj1367 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1367Slc = r.DecodeBytes(yys1367Slc, true, true) - yys1367 := string(yys1367Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1367 { - case "podSelector": - if r.TryDecodeAsNil() { - if x.PodSelector != nil { - x.PodSelector = nil - } - } else { - if x.PodSelector == nil { - x.PodSelector = new(pkg1_unversioned.LabelSelector) - } - yym1369 := z.DecBinary() - _ = yym1369 - if false { - } else if z.HasExtensions() && z.DecExt(x.PodSelector) { - } else { - z.DecFallback(x.PodSelector, false) - } - } - case "namespaceSelector": - if r.TryDecodeAsNil() { - if x.NamespaceSelector != nil { - x.NamespaceSelector = nil - } - } else { - if x.NamespaceSelector == nil { - x.NamespaceSelector = new(pkg1_unversioned.LabelSelector) - } - yym1371 := z.DecBinary() - _ = yym1371 - if false { - } else if z.HasExtensions() && z.DecExt(x.NamespaceSelector) { - } else { - z.DecFallback(x.NamespaceSelector, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys1367) - } // end switch yys1367 - } // end for yyj1367 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicyPeer) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1372 int - var yyb1372 bool - var yyhl1372 bool = l >= 0 - yyj1372++ - if yyhl1372 { - yyb1372 = yyj1372 > l - } else { - yyb1372 = r.CheckBreak() - } - if yyb1372 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PodSelector != nil { - x.PodSelector = nil - } - } else { - if x.PodSelector == nil { - x.PodSelector = new(pkg1_unversioned.LabelSelector) - } - yym1374 := z.DecBinary() - _ = yym1374 - if false { - } else if z.HasExtensions() && z.DecExt(x.PodSelector) { - } else { - z.DecFallback(x.PodSelector, false) - } - } - yyj1372++ - if yyhl1372 { - yyb1372 = yyj1372 > l - } else { - yyb1372 = r.CheckBreak() - } - if yyb1372 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NamespaceSelector != nil { - x.NamespaceSelector = nil - } - } else { - if x.NamespaceSelector == nil { - x.NamespaceSelector = new(pkg1_unversioned.LabelSelector) - } - yym1376 := z.DecBinary() - _ = yym1376 - if false { - } else if z.HasExtensions() && z.DecExt(x.NamespaceSelector) { - } else { - z.DecFallback(x.NamespaceSelector, false) - } - } - for { - yyj1372++ - if yyhl1372 { - yyb1372 = yyj1372 > l - } else { - yyb1372 = r.CheckBreak() - } - if yyb1372 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1372-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1377 := z.EncBinary() - _ = yym1377 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1378 := !z.EncBinary() - yy2arr1378 := z.EncBasicHandle().StructToArray - var yyq1378 [4]bool - _, _, _ = yysep1378, yyq1378, yy2arr1378 - const yyr1378 bool = false - yyq1378[0] = x.Kind != "" - yyq1378[1] = x.APIVersion != "" - yyq1378[2] = true - var yynn1378 int - if yyr1378 || yy2arr1378 { - r.EncodeArrayStart(4) - } else { - yynn1378 = 1 - for _, b := range yyq1378 { - if b { - yynn1378++ - } - } - r.EncodeMapStart(yynn1378) - yynn1378 = 0 - } - if yyr1378 || yy2arr1378 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1378[0] { - yym1380 := z.EncBinary() - _ = yym1380 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1378[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1381 := z.EncBinary() - _ = yym1381 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr1378 || yy2arr1378 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1378[1] { - yym1383 := z.EncBinary() - _ = yym1383 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1378[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1384 := z.EncBinary() - _ = yym1384 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr1378 || yy2arr1378 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1378[2] { - yy1386 := &x.ListMeta - yym1387 := z.EncBinary() - _ = yym1387 - if false { - } else if z.HasExtensions() && z.EncExt(yy1386) { - } else { - z.EncFallback(yy1386) - } - } else { - r.EncodeNil() - } - } else { - if yyq1378[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1388 := &x.ListMeta - yym1389 := z.EncBinary() - _ = yym1389 - if false { - } else if z.HasExtensions() && z.EncExt(yy1388) { - } else { - z.EncFallback(yy1388) - } - } - } - if yyr1378 || yy2arr1378 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym1391 := z.EncBinary() - _ = yym1391 - if false { - } else { - h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym1392 := z.EncBinary() - _ = yym1392 - if false { - } else { - h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e) - } - } - } - if yyr1378 || yy2arr1378 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1393 := z.DecBinary() - _ = yym1393 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1394 := r.ContainerType() - if yyct1394 == codecSelferValueTypeMap1234 { - yyl1394 := r.ReadMapStart() - if yyl1394 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1394, d) - } - } else if yyct1394 == codecSelferValueTypeArray1234 { - yyl1394 := r.ReadArrayStart() - if yyl1394 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1394, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1395Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1395Slc - var yyhl1395 bool = l >= 0 - for yyj1395 := 0; ; yyj1395++ { - if yyhl1395 { - if yyj1395 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1395Slc = r.DecodeBytes(yys1395Slc, true, true) - yys1395 := string(yys1395Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1395 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv1398 := &x.ListMeta - yym1399 := z.DecBinary() - _ = yym1399 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1398) { - } else { - z.DecFallback(yyv1398, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv1400 := &x.Items - yym1401 := z.DecBinary() - _ = yym1401 - if false { - } else { - h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv1400), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1395) - } // end switch yys1395 - } // end for yyj1395 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1402 int - var yyb1402 bool - var yyhl1402 bool = l >= 0 - yyj1402++ - if yyhl1402 { - yyb1402 = yyj1402 > l - } else { - yyb1402 = r.CheckBreak() - } - if yyb1402 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj1402++ - if yyhl1402 { - yyb1402 = yyj1402 > l - } else { - yyb1402 = r.CheckBreak() - } - if yyb1402 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj1402++ - if yyhl1402 { - yyb1402 = yyj1402 > l - } else { - yyb1402 = r.CheckBreak() - } - if yyb1402 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv1405 := &x.ListMeta - yym1406 := z.DecBinary() - _ = yym1406 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1405) { - } else { - z.DecFallback(yyv1405, false) - } - } - yyj1402++ - if yyhl1402 { - yyb1402 = yyj1402 > l - } else { - yyb1402 = r.CheckBreak() - } - if yyb1402 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv1407 := &x.Items - yym1408 := z.DecBinary() - _ = yym1408 - if false { - } else { - h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv1407), d) - } - } - for { - yyj1402++ - if yyhl1402 { - yyb1402 = yyj1402 > l - } else { - yyb1402 = r.CheckBreak() - } - if yyb1402 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1402-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceCustomMetricTarget(v []CustomMetricTarget, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1409 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1410 := &yyv1409 - yy1410.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1411 := *v - yyh1411, yyl1411 := z.DecSliceHelperStart() - var yyc1411 bool - if yyl1411 == 0 { - if yyv1411 == nil { - yyv1411 = []CustomMetricTarget{} - yyc1411 = true - } else if len(yyv1411) != 0 { - yyv1411 = yyv1411[:0] - yyc1411 = true - } - } else if yyl1411 > 0 { - var yyrr1411, yyrl1411 int - var yyrt1411 bool - if yyl1411 > cap(yyv1411) { - - yyrg1411 := len(yyv1411) > 0 - yyv21411 := yyv1411 - yyrl1411, yyrt1411 = z.DecInferLen(yyl1411, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1411 { - if yyrl1411 <= cap(yyv1411) { - yyv1411 = yyv1411[:yyrl1411] - } else { - yyv1411 = make([]CustomMetricTarget, yyrl1411) - } - } else { - yyv1411 = make([]CustomMetricTarget, yyrl1411) - } - yyc1411 = true - yyrr1411 = len(yyv1411) - if yyrg1411 { - copy(yyv1411, yyv21411) - } - } else if yyl1411 != len(yyv1411) { - yyv1411 = yyv1411[:yyl1411] - yyc1411 = true - } - yyj1411 := 0 - for ; yyj1411 < yyrr1411; yyj1411++ { - yyh1411.ElemContainerState(yyj1411) - if r.TryDecodeAsNil() { - yyv1411[yyj1411] = CustomMetricTarget{} - } else { - yyv1412 := &yyv1411[yyj1411] - yyv1412.CodecDecodeSelf(d) - } - - } - if yyrt1411 { - for ; yyj1411 < yyl1411; yyj1411++ { - yyv1411 = append(yyv1411, CustomMetricTarget{}) - yyh1411.ElemContainerState(yyj1411) - if r.TryDecodeAsNil() { - yyv1411[yyj1411] = CustomMetricTarget{} - } else { - yyv1413 := &yyv1411[yyj1411] - yyv1413.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1411 := 0 - for ; !r.CheckBreak(); yyj1411++ { - - if yyj1411 >= len(yyv1411) { - yyv1411 = append(yyv1411, CustomMetricTarget{}) // var yyz1411 CustomMetricTarget - yyc1411 = true - } - yyh1411.ElemContainerState(yyj1411) - if yyj1411 < len(yyv1411) { - if r.TryDecodeAsNil() { - yyv1411[yyj1411] = CustomMetricTarget{} - } else { - yyv1414 := &yyv1411[yyj1411] - yyv1414.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1411 < len(yyv1411) { - yyv1411 = yyv1411[:yyj1411] - yyc1411 = true - } else if yyj1411 == 0 && yyv1411 == nil { - yyv1411 = []CustomMetricTarget{} - yyc1411 = true - } - } - yyh1411.End() - if yyc1411 { - *v = yyv1411 - } -} - -func (x codecSelfer1234) encSliceCustomMetricCurrentStatus(v []CustomMetricCurrentStatus, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1415 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1416 := &yyv1415 - yy1416.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurrentStatus, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1417 := *v - yyh1417, yyl1417 := z.DecSliceHelperStart() - var yyc1417 bool - if yyl1417 == 0 { - if yyv1417 == nil { - yyv1417 = []CustomMetricCurrentStatus{} - yyc1417 = true - } else if len(yyv1417) != 0 { - yyv1417 = yyv1417[:0] - yyc1417 = true - } - } else if yyl1417 > 0 { - var yyrr1417, yyrl1417 int - var yyrt1417 bool - if yyl1417 > cap(yyv1417) { - - yyrg1417 := len(yyv1417) > 0 - yyv21417 := yyv1417 - yyrl1417, yyrt1417 = z.DecInferLen(yyl1417, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1417 { - if yyrl1417 <= cap(yyv1417) { - yyv1417 = yyv1417[:yyrl1417] - } else { - yyv1417 = make([]CustomMetricCurrentStatus, yyrl1417) - } - } else { - yyv1417 = make([]CustomMetricCurrentStatus, yyrl1417) - } - yyc1417 = true - yyrr1417 = len(yyv1417) - if yyrg1417 { - copy(yyv1417, yyv21417) - } - } else if yyl1417 != len(yyv1417) { - yyv1417 = yyv1417[:yyl1417] - yyc1417 = true - } - yyj1417 := 0 - for ; yyj1417 < yyrr1417; yyj1417++ { - yyh1417.ElemContainerState(yyj1417) - if r.TryDecodeAsNil() { - yyv1417[yyj1417] = CustomMetricCurrentStatus{} - } else { - yyv1418 := &yyv1417[yyj1417] - yyv1418.CodecDecodeSelf(d) - } - - } - if yyrt1417 { - for ; yyj1417 < yyl1417; yyj1417++ { - yyv1417 = append(yyv1417, CustomMetricCurrentStatus{}) - yyh1417.ElemContainerState(yyj1417) - if r.TryDecodeAsNil() { - yyv1417[yyj1417] = CustomMetricCurrentStatus{} - } else { - yyv1419 := &yyv1417[yyj1417] - yyv1419.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1417 := 0 - for ; !r.CheckBreak(); yyj1417++ { - - if yyj1417 >= len(yyv1417) { - yyv1417 = append(yyv1417, CustomMetricCurrentStatus{}) // var yyz1417 CustomMetricCurrentStatus - yyc1417 = true - } - yyh1417.ElemContainerState(yyj1417) - if yyj1417 < len(yyv1417) { - if r.TryDecodeAsNil() { - yyv1417[yyj1417] = CustomMetricCurrentStatus{} - } else { - yyv1420 := &yyv1417[yyj1417] - yyv1420.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1417 < len(yyv1417) { - yyv1417 = yyv1417[:yyj1417] - yyc1417 = true - } else if yyj1417 == 0 && yyv1417 == nil { - yyv1417 = []CustomMetricCurrentStatus{} - yyc1417 = true - } - } - yyh1417.End() - if yyc1417 { - *v = yyv1417 - } -} - -func (x codecSelfer1234) encSliceAPIVersion(v []APIVersion, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1421 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1422 := &yyv1421 - yy1422.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1423 := *v - yyh1423, yyl1423 := z.DecSliceHelperStart() - var yyc1423 bool - if yyl1423 == 0 { - if yyv1423 == nil { - yyv1423 = []APIVersion{} - yyc1423 = true - } else if len(yyv1423) != 0 { - yyv1423 = yyv1423[:0] - yyc1423 = true - } - } else if yyl1423 > 0 { - var yyrr1423, yyrl1423 int - var yyrt1423 bool - if yyl1423 > cap(yyv1423) { - - yyrg1423 := len(yyv1423) > 0 - yyv21423 := yyv1423 - yyrl1423, yyrt1423 = z.DecInferLen(yyl1423, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1423 { - if yyrl1423 <= cap(yyv1423) { - yyv1423 = yyv1423[:yyrl1423] - } else { - yyv1423 = make([]APIVersion, yyrl1423) - } - } else { - yyv1423 = make([]APIVersion, yyrl1423) - } - yyc1423 = true - yyrr1423 = len(yyv1423) - if yyrg1423 { - copy(yyv1423, yyv21423) - } - } else if yyl1423 != len(yyv1423) { - yyv1423 = yyv1423[:yyl1423] - yyc1423 = true - } - yyj1423 := 0 - for ; yyj1423 < yyrr1423; yyj1423++ { - yyh1423.ElemContainerState(yyj1423) - if r.TryDecodeAsNil() { - yyv1423[yyj1423] = APIVersion{} - } else { - yyv1424 := &yyv1423[yyj1423] - yyv1424.CodecDecodeSelf(d) - } - - } - if yyrt1423 { - for ; yyj1423 < yyl1423; yyj1423++ { - yyv1423 = append(yyv1423, APIVersion{}) - yyh1423.ElemContainerState(yyj1423) - if r.TryDecodeAsNil() { - yyv1423[yyj1423] = APIVersion{} - } else { - yyv1425 := &yyv1423[yyj1423] - yyv1425.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1423 := 0 - for ; !r.CheckBreak(); yyj1423++ { - - if yyj1423 >= len(yyv1423) { - yyv1423 = append(yyv1423, APIVersion{}) // var yyz1423 APIVersion - yyc1423 = true - } - yyh1423.ElemContainerState(yyj1423) - if yyj1423 < len(yyv1423) { - if r.TryDecodeAsNil() { - yyv1423[yyj1423] = APIVersion{} - } else { - yyv1426 := &yyv1423[yyj1423] - yyv1426.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1423 < len(yyv1423) { - yyv1423 = yyv1423[:yyj1423] - yyc1423 = true - } else if yyj1423 == 0 && yyv1423 == nil { - yyv1423 = []APIVersion{} - yyc1423 = true - } - } - yyh1423.End() - if yyc1423 { - *v = yyv1423 - } -} - -func (x codecSelfer1234) encSliceThirdPartyResource(v []ThirdPartyResource, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1427 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1428 := &yyv1427 - yy1428.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1429 := *v - yyh1429, yyl1429 := z.DecSliceHelperStart() - var yyc1429 bool - if yyl1429 == 0 { - if yyv1429 == nil { - yyv1429 = []ThirdPartyResource{} - yyc1429 = true - } else if len(yyv1429) != 0 { - yyv1429 = yyv1429[:0] - yyc1429 = true - } - } else if yyl1429 > 0 { - var yyrr1429, yyrl1429 int - var yyrt1429 bool - if yyl1429 > cap(yyv1429) { - - yyrg1429 := len(yyv1429) > 0 - yyv21429 := yyv1429 - yyrl1429, yyrt1429 = z.DecInferLen(yyl1429, z.DecBasicHandle().MaxInitLen, 296) - if yyrt1429 { - if yyrl1429 <= cap(yyv1429) { - yyv1429 = yyv1429[:yyrl1429] - } else { - yyv1429 = make([]ThirdPartyResource, yyrl1429) - } - } else { - yyv1429 = make([]ThirdPartyResource, yyrl1429) - } - yyc1429 = true - yyrr1429 = len(yyv1429) - if yyrg1429 { - copy(yyv1429, yyv21429) - } - } else if yyl1429 != len(yyv1429) { - yyv1429 = yyv1429[:yyl1429] - yyc1429 = true - } - yyj1429 := 0 - for ; yyj1429 < yyrr1429; yyj1429++ { - yyh1429.ElemContainerState(yyj1429) - if r.TryDecodeAsNil() { - yyv1429[yyj1429] = ThirdPartyResource{} - } else { - yyv1430 := &yyv1429[yyj1429] - yyv1430.CodecDecodeSelf(d) - } - - } - if yyrt1429 { - for ; yyj1429 < yyl1429; yyj1429++ { - yyv1429 = append(yyv1429, ThirdPartyResource{}) - yyh1429.ElemContainerState(yyj1429) - if r.TryDecodeAsNil() { - yyv1429[yyj1429] = ThirdPartyResource{} - } else { - yyv1431 := &yyv1429[yyj1429] - yyv1431.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1429 := 0 - for ; !r.CheckBreak(); yyj1429++ { - - if yyj1429 >= len(yyv1429) { - yyv1429 = append(yyv1429, ThirdPartyResource{}) // var yyz1429 ThirdPartyResource - yyc1429 = true - } - yyh1429.ElemContainerState(yyj1429) - if yyj1429 < len(yyv1429) { - if r.TryDecodeAsNil() { - yyv1429[yyj1429] = ThirdPartyResource{} - } else { - yyv1432 := &yyv1429[yyj1429] - yyv1432.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1429 < len(yyv1429) { - yyv1429 = yyv1429[:yyj1429] - yyc1429 = true - } else if yyj1429 == 0 && yyv1429 == nil { - yyv1429 = []ThirdPartyResource{} - yyc1429 = true - } - } - yyh1429.End() - if yyc1429 { - *v = yyv1429 - } -} - -func (x codecSelfer1234) encSliceDeploymentCondition(v []DeploymentCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1433 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1434 := &yyv1433 - yy1434.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceDeploymentCondition(v *[]DeploymentCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1435 := *v - yyh1435, yyl1435 := z.DecSliceHelperStart() - var yyc1435 bool - if yyl1435 == 0 { - if yyv1435 == nil { - yyv1435 = []DeploymentCondition{} - yyc1435 = true - } else if len(yyv1435) != 0 { - yyv1435 = yyv1435[:0] - yyc1435 = true - } - } else if yyl1435 > 0 { - var yyrr1435, yyrl1435 int - var yyrt1435 bool - if yyl1435 > cap(yyv1435) { - - yyrg1435 := len(yyv1435) > 0 - yyv21435 := yyv1435 - yyrl1435, yyrt1435 = z.DecInferLen(yyl1435, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1435 { - if yyrl1435 <= cap(yyv1435) { - yyv1435 = yyv1435[:yyrl1435] - } else { - yyv1435 = make([]DeploymentCondition, yyrl1435) - } - } else { - yyv1435 = make([]DeploymentCondition, yyrl1435) - } - yyc1435 = true - yyrr1435 = len(yyv1435) - if yyrg1435 { - copy(yyv1435, yyv21435) - } - } else if yyl1435 != len(yyv1435) { - yyv1435 = yyv1435[:yyl1435] - yyc1435 = true - } - yyj1435 := 0 - for ; yyj1435 < yyrr1435; yyj1435++ { - yyh1435.ElemContainerState(yyj1435) - if r.TryDecodeAsNil() { - yyv1435[yyj1435] = DeploymentCondition{} - } else { - yyv1436 := &yyv1435[yyj1435] - yyv1436.CodecDecodeSelf(d) - } - - } - if yyrt1435 { - for ; yyj1435 < yyl1435; yyj1435++ { - yyv1435 = append(yyv1435, DeploymentCondition{}) - yyh1435.ElemContainerState(yyj1435) - if r.TryDecodeAsNil() { - yyv1435[yyj1435] = DeploymentCondition{} - } else { - yyv1437 := &yyv1435[yyj1435] - yyv1437.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1435 := 0 - for ; !r.CheckBreak(); yyj1435++ { - - if yyj1435 >= len(yyv1435) { - yyv1435 = append(yyv1435, DeploymentCondition{}) // var yyz1435 DeploymentCondition - yyc1435 = true - } - yyh1435.ElemContainerState(yyj1435) - if yyj1435 < len(yyv1435) { - if r.TryDecodeAsNil() { - yyv1435[yyj1435] = DeploymentCondition{} - } else { - yyv1438 := &yyv1435[yyj1435] - yyv1438.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1435 < len(yyv1435) { - yyv1435 = yyv1435[:yyj1435] - yyc1435 = true - } else if yyj1435 == 0 && yyv1435 == nil { - yyv1435 = []DeploymentCondition{} - yyc1435 = true - } - } - yyh1435.End() - if yyc1435 { - *v = yyv1435 - } -} - -func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1439 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1440 := &yyv1439 - yy1440.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1441 := *v - yyh1441, yyl1441 := z.DecSliceHelperStart() - var yyc1441 bool - if yyl1441 == 0 { - if yyv1441 == nil { - yyv1441 = []Deployment{} - yyc1441 = true - } else if len(yyv1441) != 0 { - yyv1441 = yyv1441[:0] - yyc1441 = true - } - } else if yyl1441 > 0 { - var yyrr1441, yyrl1441 int - var yyrt1441 bool - if yyl1441 > cap(yyv1441) { - - yyrg1441 := len(yyv1441) > 0 - yyv21441 := yyv1441 - yyrl1441, yyrt1441 = z.DecInferLen(yyl1441, z.DecBasicHandle().MaxInitLen, 832) - if yyrt1441 { - if yyrl1441 <= cap(yyv1441) { - yyv1441 = yyv1441[:yyrl1441] - } else { - yyv1441 = make([]Deployment, yyrl1441) - } - } else { - yyv1441 = make([]Deployment, yyrl1441) - } - yyc1441 = true - yyrr1441 = len(yyv1441) - if yyrg1441 { - copy(yyv1441, yyv21441) - } - } else if yyl1441 != len(yyv1441) { - yyv1441 = yyv1441[:yyl1441] - yyc1441 = true - } - yyj1441 := 0 - for ; yyj1441 < yyrr1441; yyj1441++ { - yyh1441.ElemContainerState(yyj1441) - if r.TryDecodeAsNil() { - yyv1441[yyj1441] = Deployment{} - } else { - yyv1442 := &yyv1441[yyj1441] - yyv1442.CodecDecodeSelf(d) - } - - } - if yyrt1441 { - for ; yyj1441 < yyl1441; yyj1441++ { - yyv1441 = append(yyv1441, Deployment{}) - yyh1441.ElemContainerState(yyj1441) - if r.TryDecodeAsNil() { - yyv1441[yyj1441] = Deployment{} - } else { - yyv1443 := &yyv1441[yyj1441] - yyv1443.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1441 := 0 - for ; !r.CheckBreak(); yyj1441++ { - - if yyj1441 >= len(yyv1441) { - yyv1441 = append(yyv1441, Deployment{}) // var yyz1441 Deployment - yyc1441 = true - } - yyh1441.ElemContainerState(yyj1441) - if yyj1441 < len(yyv1441) { - if r.TryDecodeAsNil() { - yyv1441[yyj1441] = Deployment{} - } else { - yyv1444 := &yyv1441[yyj1441] - yyv1444.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1441 < len(yyv1441) { - yyv1441 = yyv1441[:yyj1441] - yyc1441 = true - } else if yyj1441 == 0 && yyv1441 == nil { - yyv1441 = []Deployment{} - yyc1441 = true - } - } - yyh1441.End() - if yyc1441 { - *v = yyv1441 - } -} - -func (x codecSelfer1234) encSliceDaemonSet(v []DaemonSet, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1445 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1446 := &yyv1445 - yy1446.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1447 := *v - yyh1447, yyl1447 := z.DecSliceHelperStart() - var yyc1447 bool - if yyl1447 == 0 { - if yyv1447 == nil { - yyv1447 = []DaemonSet{} - yyc1447 = true - } else if len(yyv1447) != 0 { - yyv1447 = yyv1447[:0] - yyc1447 = true - } - } else if yyl1447 > 0 { - var yyrr1447, yyrl1447 int - var yyrt1447 bool - if yyl1447 > cap(yyv1447) { - - yyrg1447 := len(yyv1447) > 0 - yyv21447 := yyv1447 - yyrl1447, yyrt1447 = z.DecInferLen(yyl1447, z.DecBasicHandle().MaxInitLen, 728) - if yyrt1447 { - if yyrl1447 <= cap(yyv1447) { - yyv1447 = yyv1447[:yyrl1447] - } else { - yyv1447 = make([]DaemonSet, yyrl1447) - } - } else { - yyv1447 = make([]DaemonSet, yyrl1447) - } - yyc1447 = true - yyrr1447 = len(yyv1447) - if yyrg1447 { - copy(yyv1447, yyv21447) - } - } else if yyl1447 != len(yyv1447) { - yyv1447 = yyv1447[:yyl1447] - yyc1447 = true - } - yyj1447 := 0 - for ; yyj1447 < yyrr1447; yyj1447++ { - yyh1447.ElemContainerState(yyj1447) - if r.TryDecodeAsNil() { - yyv1447[yyj1447] = DaemonSet{} - } else { - yyv1448 := &yyv1447[yyj1447] - yyv1448.CodecDecodeSelf(d) - } - - } - if yyrt1447 { - for ; yyj1447 < yyl1447; yyj1447++ { - yyv1447 = append(yyv1447, DaemonSet{}) - yyh1447.ElemContainerState(yyj1447) - if r.TryDecodeAsNil() { - yyv1447[yyj1447] = DaemonSet{} - } else { - yyv1449 := &yyv1447[yyj1447] - yyv1449.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1447 := 0 - for ; !r.CheckBreak(); yyj1447++ { - - if yyj1447 >= len(yyv1447) { - yyv1447 = append(yyv1447, DaemonSet{}) // var yyz1447 DaemonSet - yyc1447 = true - } - yyh1447.ElemContainerState(yyj1447) - if yyj1447 < len(yyv1447) { - if r.TryDecodeAsNil() { - yyv1447[yyj1447] = DaemonSet{} - } else { - yyv1450 := &yyv1447[yyj1447] - yyv1450.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1447 < len(yyv1447) { - yyv1447 = yyv1447[:yyj1447] - yyc1447 = true - } else if yyj1447 == 0 && yyv1447 == nil { - yyv1447 = []DaemonSet{} - yyc1447 = true - } - } - yyh1447.End() - if yyc1447 { - *v = yyv1447 - } -} - -func (x codecSelfer1234) encSliceThirdPartyResourceData(v []ThirdPartyResourceData, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1451 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1452 := &yyv1451 - yy1452.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceData, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1453 := *v - yyh1453, yyl1453 := z.DecSliceHelperStart() - var yyc1453 bool - if yyl1453 == 0 { - if yyv1453 == nil { - yyv1453 = []ThirdPartyResourceData{} - yyc1453 = true - } else if len(yyv1453) != 0 { - yyv1453 = yyv1453[:0] - yyc1453 = true - } - } else if yyl1453 > 0 { - var yyrr1453, yyrl1453 int - var yyrt1453 bool - if yyl1453 > cap(yyv1453) { - - yyrg1453 := len(yyv1453) > 0 - yyv21453 := yyv1453 - yyrl1453, yyrt1453 = z.DecInferLen(yyl1453, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1453 { - if yyrl1453 <= cap(yyv1453) { - yyv1453 = yyv1453[:yyrl1453] - } else { - yyv1453 = make([]ThirdPartyResourceData, yyrl1453) - } - } else { - yyv1453 = make([]ThirdPartyResourceData, yyrl1453) - } - yyc1453 = true - yyrr1453 = len(yyv1453) - if yyrg1453 { - copy(yyv1453, yyv21453) - } - } else if yyl1453 != len(yyv1453) { - yyv1453 = yyv1453[:yyl1453] - yyc1453 = true - } - yyj1453 := 0 - for ; yyj1453 < yyrr1453; yyj1453++ { - yyh1453.ElemContainerState(yyj1453) - if r.TryDecodeAsNil() { - yyv1453[yyj1453] = ThirdPartyResourceData{} - } else { - yyv1454 := &yyv1453[yyj1453] - yyv1454.CodecDecodeSelf(d) - } - - } - if yyrt1453 { - for ; yyj1453 < yyl1453; yyj1453++ { - yyv1453 = append(yyv1453, ThirdPartyResourceData{}) - yyh1453.ElemContainerState(yyj1453) - if r.TryDecodeAsNil() { - yyv1453[yyj1453] = ThirdPartyResourceData{} - } else { - yyv1455 := &yyv1453[yyj1453] - yyv1455.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1453 := 0 - for ; !r.CheckBreak(); yyj1453++ { - - if yyj1453 >= len(yyv1453) { - yyv1453 = append(yyv1453, ThirdPartyResourceData{}) // var yyz1453 ThirdPartyResourceData - yyc1453 = true - } - yyh1453.ElemContainerState(yyj1453) - if yyj1453 < len(yyv1453) { - if r.TryDecodeAsNil() { - yyv1453[yyj1453] = ThirdPartyResourceData{} - } else { - yyv1456 := &yyv1453[yyj1453] - yyv1456.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1453 < len(yyv1453) { - yyv1453 = yyv1453[:yyj1453] - yyc1453 = true - } else if yyj1453 == 0 && yyv1453 == nil { - yyv1453 = []ThirdPartyResourceData{} - yyc1453 = true - } - } - yyh1453.End() - if yyc1453 { - *v = yyv1453 - } -} - -func (x codecSelfer1234) encSliceIngress(v []Ingress, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1457 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1458 := &yyv1457 - yy1458.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1459 := *v - yyh1459, yyl1459 := z.DecSliceHelperStart() - var yyc1459 bool - if yyl1459 == 0 { - if yyv1459 == nil { - yyv1459 = []Ingress{} - yyc1459 = true - } else if len(yyv1459) != 0 { - yyv1459 = yyv1459[:0] - yyc1459 = true - } - } else if yyl1459 > 0 { - var yyrr1459, yyrl1459 int - var yyrt1459 bool - if yyl1459 > cap(yyv1459) { - - yyrg1459 := len(yyv1459) > 0 - yyv21459 := yyv1459 - yyrl1459, yyrt1459 = z.DecInferLen(yyl1459, z.DecBasicHandle().MaxInitLen, 336) - if yyrt1459 { - if yyrl1459 <= cap(yyv1459) { - yyv1459 = yyv1459[:yyrl1459] - } else { - yyv1459 = make([]Ingress, yyrl1459) - } - } else { - yyv1459 = make([]Ingress, yyrl1459) - } - yyc1459 = true - yyrr1459 = len(yyv1459) - if yyrg1459 { - copy(yyv1459, yyv21459) - } - } else if yyl1459 != len(yyv1459) { - yyv1459 = yyv1459[:yyl1459] - yyc1459 = true - } - yyj1459 := 0 - for ; yyj1459 < yyrr1459; yyj1459++ { - yyh1459.ElemContainerState(yyj1459) - if r.TryDecodeAsNil() { - yyv1459[yyj1459] = Ingress{} - } else { - yyv1460 := &yyv1459[yyj1459] - yyv1460.CodecDecodeSelf(d) - } - - } - if yyrt1459 { - for ; yyj1459 < yyl1459; yyj1459++ { - yyv1459 = append(yyv1459, Ingress{}) - yyh1459.ElemContainerState(yyj1459) - if r.TryDecodeAsNil() { - yyv1459[yyj1459] = Ingress{} - } else { - yyv1461 := &yyv1459[yyj1459] - yyv1461.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1459 := 0 - for ; !r.CheckBreak(); yyj1459++ { - - if yyj1459 >= len(yyv1459) { - yyv1459 = append(yyv1459, Ingress{}) // var yyz1459 Ingress - yyc1459 = true - } - yyh1459.ElemContainerState(yyj1459) - if yyj1459 < len(yyv1459) { - if r.TryDecodeAsNil() { - yyv1459[yyj1459] = Ingress{} - } else { - yyv1462 := &yyv1459[yyj1459] - yyv1462.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1459 < len(yyv1459) { - yyv1459 = yyv1459[:yyj1459] - yyc1459 = true - } else if yyj1459 == 0 && yyv1459 == nil { - yyv1459 = []Ingress{} - yyc1459 = true - } - } - yyh1459.End() - if yyc1459 { - *v = yyv1459 - } -} - -func (x codecSelfer1234) encSliceIngressTLS(v []IngressTLS, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1463 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1464 := &yyv1463 - yy1464.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1465 := *v - yyh1465, yyl1465 := z.DecSliceHelperStart() - var yyc1465 bool - if yyl1465 == 0 { - if yyv1465 == nil { - yyv1465 = []IngressTLS{} - yyc1465 = true - } else if len(yyv1465) != 0 { - yyv1465 = yyv1465[:0] - yyc1465 = true - } - } else if yyl1465 > 0 { - var yyrr1465, yyrl1465 int - var yyrt1465 bool - if yyl1465 > cap(yyv1465) { - - yyrg1465 := len(yyv1465) > 0 - yyv21465 := yyv1465 - yyrl1465, yyrt1465 = z.DecInferLen(yyl1465, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1465 { - if yyrl1465 <= cap(yyv1465) { - yyv1465 = yyv1465[:yyrl1465] - } else { - yyv1465 = make([]IngressTLS, yyrl1465) - } - } else { - yyv1465 = make([]IngressTLS, yyrl1465) - } - yyc1465 = true - yyrr1465 = len(yyv1465) - if yyrg1465 { - copy(yyv1465, yyv21465) - } - } else if yyl1465 != len(yyv1465) { - yyv1465 = yyv1465[:yyl1465] - yyc1465 = true - } - yyj1465 := 0 - for ; yyj1465 < yyrr1465; yyj1465++ { - yyh1465.ElemContainerState(yyj1465) - if r.TryDecodeAsNil() { - yyv1465[yyj1465] = IngressTLS{} - } else { - yyv1466 := &yyv1465[yyj1465] - yyv1466.CodecDecodeSelf(d) - } - - } - if yyrt1465 { - for ; yyj1465 < yyl1465; yyj1465++ { - yyv1465 = append(yyv1465, IngressTLS{}) - yyh1465.ElemContainerState(yyj1465) - if r.TryDecodeAsNil() { - yyv1465[yyj1465] = IngressTLS{} - } else { - yyv1467 := &yyv1465[yyj1465] - yyv1467.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1465 := 0 - for ; !r.CheckBreak(); yyj1465++ { - - if yyj1465 >= len(yyv1465) { - yyv1465 = append(yyv1465, IngressTLS{}) // var yyz1465 IngressTLS - yyc1465 = true - } - yyh1465.ElemContainerState(yyj1465) - if yyj1465 < len(yyv1465) { - if r.TryDecodeAsNil() { - yyv1465[yyj1465] = IngressTLS{} - } else { - yyv1468 := &yyv1465[yyj1465] - yyv1468.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1465 < len(yyv1465) { - yyv1465 = yyv1465[:yyj1465] - yyc1465 = true - } else if yyj1465 == 0 && yyv1465 == nil { - yyv1465 = []IngressTLS{} - yyc1465 = true - } - } - yyh1465.End() - if yyc1465 { - *v = yyv1465 - } -} - -func (x codecSelfer1234) encSliceIngressRule(v []IngressRule, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1469 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1470 := &yyv1469 - yy1470.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1471 := *v - yyh1471, yyl1471 := z.DecSliceHelperStart() - var yyc1471 bool - if yyl1471 == 0 { - if yyv1471 == nil { - yyv1471 = []IngressRule{} - yyc1471 = true - } else if len(yyv1471) != 0 { - yyv1471 = yyv1471[:0] - yyc1471 = true - } - } else if yyl1471 > 0 { - var yyrr1471, yyrl1471 int - var yyrt1471 bool - if yyl1471 > cap(yyv1471) { - - yyrg1471 := len(yyv1471) > 0 - yyv21471 := yyv1471 - yyrl1471, yyrt1471 = z.DecInferLen(yyl1471, z.DecBasicHandle().MaxInitLen, 24) - if yyrt1471 { - if yyrl1471 <= cap(yyv1471) { - yyv1471 = yyv1471[:yyrl1471] - } else { - yyv1471 = make([]IngressRule, yyrl1471) - } - } else { - yyv1471 = make([]IngressRule, yyrl1471) - } - yyc1471 = true - yyrr1471 = len(yyv1471) - if yyrg1471 { - copy(yyv1471, yyv21471) - } - } else if yyl1471 != len(yyv1471) { - yyv1471 = yyv1471[:yyl1471] - yyc1471 = true - } - yyj1471 := 0 - for ; yyj1471 < yyrr1471; yyj1471++ { - yyh1471.ElemContainerState(yyj1471) - if r.TryDecodeAsNil() { - yyv1471[yyj1471] = IngressRule{} - } else { - yyv1472 := &yyv1471[yyj1471] - yyv1472.CodecDecodeSelf(d) - } - - } - if yyrt1471 { - for ; yyj1471 < yyl1471; yyj1471++ { - yyv1471 = append(yyv1471, IngressRule{}) - yyh1471.ElemContainerState(yyj1471) - if r.TryDecodeAsNil() { - yyv1471[yyj1471] = IngressRule{} - } else { - yyv1473 := &yyv1471[yyj1471] - yyv1473.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1471 := 0 - for ; !r.CheckBreak(); yyj1471++ { - - if yyj1471 >= len(yyv1471) { - yyv1471 = append(yyv1471, IngressRule{}) // var yyz1471 IngressRule - yyc1471 = true - } - yyh1471.ElemContainerState(yyj1471) - if yyj1471 < len(yyv1471) { - if r.TryDecodeAsNil() { - yyv1471[yyj1471] = IngressRule{} - } else { - yyv1474 := &yyv1471[yyj1471] - yyv1474.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1471 < len(yyv1471) { - yyv1471 = yyv1471[:yyj1471] - yyc1471 = true - } else if yyj1471 == 0 && yyv1471 == nil { - yyv1471 = []IngressRule{} - yyc1471 = true - } - } - yyh1471.End() - if yyc1471 { - *v = yyv1471 - } -} - -func (x codecSelfer1234) encSliceHTTPIngressPath(v []HTTPIngressPath, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1475 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1476 := &yyv1475 - yy1476.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1477 := *v - yyh1477, yyl1477 := z.DecSliceHelperStart() - var yyc1477 bool - if yyl1477 == 0 { - if yyv1477 == nil { - yyv1477 = []HTTPIngressPath{} - yyc1477 = true - } else if len(yyv1477) != 0 { - yyv1477 = yyv1477[:0] - yyc1477 = true - } - } else if yyl1477 > 0 { - var yyrr1477, yyrl1477 int - var yyrt1477 bool - if yyl1477 > cap(yyv1477) { - - yyrg1477 := len(yyv1477) > 0 - yyv21477 := yyv1477 - yyrl1477, yyrt1477 = z.DecInferLen(yyl1477, z.DecBasicHandle().MaxInitLen, 64) - if yyrt1477 { - if yyrl1477 <= cap(yyv1477) { - yyv1477 = yyv1477[:yyrl1477] - } else { - yyv1477 = make([]HTTPIngressPath, yyrl1477) - } - } else { - yyv1477 = make([]HTTPIngressPath, yyrl1477) - } - yyc1477 = true - yyrr1477 = len(yyv1477) - if yyrg1477 { - copy(yyv1477, yyv21477) - } - } else if yyl1477 != len(yyv1477) { - yyv1477 = yyv1477[:yyl1477] - yyc1477 = true - } - yyj1477 := 0 - for ; yyj1477 < yyrr1477; yyj1477++ { - yyh1477.ElemContainerState(yyj1477) - if r.TryDecodeAsNil() { - yyv1477[yyj1477] = HTTPIngressPath{} - } else { - yyv1478 := &yyv1477[yyj1477] - yyv1478.CodecDecodeSelf(d) - } - - } - if yyrt1477 { - for ; yyj1477 < yyl1477; yyj1477++ { - yyv1477 = append(yyv1477, HTTPIngressPath{}) - yyh1477.ElemContainerState(yyj1477) - if r.TryDecodeAsNil() { - yyv1477[yyj1477] = HTTPIngressPath{} - } else { - yyv1479 := &yyv1477[yyj1477] - yyv1479.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1477 := 0 - for ; !r.CheckBreak(); yyj1477++ { - - if yyj1477 >= len(yyv1477) { - yyv1477 = append(yyv1477, HTTPIngressPath{}) // var yyz1477 HTTPIngressPath - yyc1477 = true - } - yyh1477.ElemContainerState(yyj1477) - if yyj1477 < len(yyv1477) { - if r.TryDecodeAsNil() { - yyv1477[yyj1477] = HTTPIngressPath{} - } else { - yyv1480 := &yyv1477[yyj1477] - yyv1480.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1477 < len(yyv1477) { - yyv1477 = yyv1477[:yyj1477] - yyc1477 = true - } else if yyj1477 == 0 && yyv1477 == nil { - yyv1477 = []HTTPIngressPath{} - yyc1477 = true - } - } - yyh1477.End() - if yyc1477 { - *v = yyv1477 - } -} - -func (x codecSelfer1234) encSliceReplicaSet(v []ReplicaSet, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1481 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1482 := &yyv1481 - yy1482.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1483 := *v - yyh1483, yyl1483 := z.DecSliceHelperStart() - var yyc1483 bool - if yyl1483 == 0 { - if yyv1483 == nil { - yyv1483 = []ReplicaSet{} - yyc1483 = true - } else if len(yyv1483) != 0 { - yyv1483 = yyv1483[:0] - yyc1483 = true - } - } else if yyl1483 > 0 { - var yyrr1483, yyrl1483 int - var yyrt1483 bool - if yyl1483 > cap(yyv1483) { - - yyrg1483 := len(yyv1483) > 0 - yyv21483 := yyv1483 - yyrl1483, yyrt1483 = z.DecInferLen(yyl1483, z.DecBasicHandle().MaxInitLen, 768) - if yyrt1483 { - if yyrl1483 <= cap(yyv1483) { - yyv1483 = yyv1483[:yyrl1483] - } else { - yyv1483 = make([]ReplicaSet, yyrl1483) - } - } else { - yyv1483 = make([]ReplicaSet, yyrl1483) - } - yyc1483 = true - yyrr1483 = len(yyv1483) - if yyrg1483 { - copy(yyv1483, yyv21483) - } - } else if yyl1483 != len(yyv1483) { - yyv1483 = yyv1483[:yyl1483] - yyc1483 = true - } - yyj1483 := 0 - for ; yyj1483 < yyrr1483; yyj1483++ { - yyh1483.ElemContainerState(yyj1483) - if r.TryDecodeAsNil() { - yyv1483[yyj1483] = ReplicaSet{} - } else { - yyv1484 := &yyv1483[yyj1483] - yyv1484.CodecDecodeSelf(d) - } - - } - if yyrt1483 { - for ; yyj1483 < yyl1483; yyj1483++ { - yyv1483 = append(yyv1483, ReplicaSet{}) - yyh1483.ElemContainerState(yyj1483) - if r.TryDecodeAsNil() { - yyv1483[yyj1483] = ReplicaSet{} - } else { - yyv1485 := &yyv1483[yyj1483] - yyv1485.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1483 := 0 - for ; !r.CheckBreak(); yyj1483++ { - - if yyj1483 >= len(yyv1483) { - yyv1483 = append(yyv1483, ReplicaSet{}) // var yyz1483 ReplicaSet - yyc1483 = true - } - yyh1483.ElemContainerState(yyj1483) - if yyj1483 < len(yyv1483) { - if r.TryDecodeAsNil() { - yyv1483[yyj1483] = ReplicaSet{} - } else { - yyv1486 := &yyv1483[yyj1483] - yyv1486.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1483 < len(yyv1483) { - yyv1483 = yyv1483[:yyj1483] - yyc1483 = true - } else if yyj1483 == 0 && yyv1483 == nil { - yyv1483 = []ReplicaSet{} - yyc1483 = true - } - } - yyh1483.End() - if yyc1483 { - *v = yyv1483 - } -} - -func (x codecSelfer1234) encSliceReplicaSetCondition(v []ReplicaSetCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1487 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1488 := &yyv1487 - yy1488.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceReplicaSetCondition(v *[]ReplicaSetCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1489 := *v - yyh1489, yyl1489 := z.DecSliceHelperStart() - var yyc1489 bool - if yyl1489 == 0 { - if yyv1489 == nil { - yyv1489 = []ReplicaSetCondition{} - yyc1489 = true - } else if len(yyv1489) != 0 { - yyv1489 = yyv1489[:0] - yyc1489 = true - } - } else if yyl1489 > 0 { - var yyrr1489, yyrl1489 int - var yyrt1489 bool - if yyl1489 > cap(yyv1489) { - - yyrg1489 := len(yyv1489) > 0 - yyv21489 := yyv1489 - yyrl1489, yyrt1489 = z.DecInferLen(yyl1489, z.DecBasicHandle().MaxInitLen, 88) - if yyrt1489 { - if yyrl1489 <= cap(yyv1489) { - yyv1489 = yyv1489[:yyrl1489] - } else { - yyv1489 = make([]ReplicaSetCondition, yyrl1489) - } - } else { - yyv1489 = make([]ReplicaSetCondition, yyrl1489) - } - yyc1489 = true - yyrr1489 = len(yyv1489) - if yyrg1489 { - copy(yyv1489, yyv21489) - } - } else if yyl1489 != len(yyv1489) { - yyv1489 = yyv1489[:yyl1489] - yyc1489 = true - } - yyj1489 := 0 - for ; yyj1489 < yyrr1489; yyj1489++ { - yyh1489.ElemContainerState(yyj1489) - if r.TryDecodeAsNil() { - yyv1489[yyj1489] = ReplicaSetCondition{} - } else { - yyv1490 := &yyv1489[yyj1489] - yyv1490.CodecDecodeSelf(d) - } - - } - if yyrt1489 { - for ; yyj1489 < yyl1489; yyj1489++ { - yyv1489 = append(yyv1489, ReplicaSetCondition{}) - yyh1489.ElemContainerState(yyj1489) - if r.TryDecodeAsNil() { - yyv1489[yyj1489] = ReplicaSetCondition{} - } else { - yyv1491 := &yyv1489[yyj1489] - yyv1491.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1489 := 0 - for ; !r.CheckBreak(); yyj1489++ { - - if yyj1489 >= len(yyv1489) { - yyv1489 = append(yyv1489, ReplicaSetCondition{}) // var yyz1489 ReplicaSetCondition - yyc1489 = true - } - yyh1489.ElemContainerState(yyj1489) - if yyj1489 < len(yyv1489) { - if r.TryDecodeAsNil() { - yyv1489[yyj1489] = ReplicaSetCondition{} - } else { - yyv1492 := &yyv1489[yyj1489] - yyv1492.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1489 < len(yyv1489) { - yyv1489 = yyv1489[:yyj1489] - yyc1489 = true - } else if yyj1489 == 0 && yyv1489 == nil { - yyv1489 = []ReplicaSetCondition{} - yyc1489 = true - } - } - yyh1489.End() - if yyc1489 { - *v = yyv1489 - } -} - -func (x codecSelfer1234) encSliceapi_Capability(v []pkg2_api.Capability, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1493 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1494 := z.EncBinary() - _ = yym1494 - if false { - } else if z.HasExtensions() && z.EncExt(yyv1493) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyv1493)) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceapi_Capability(v *[]pkg2_api.Capability, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1495 := *v - yyh1495, yyl1495 := z.DecSliceHelperStart() - var yyc1495 bool - if yyl1495 == 0 { - if yyv1495 == nil { - yyv1495 = []pkg2_api.Capability{} - yyc1495 = true - } else if len(yyv1495) != 0 { - yyv1495 = yyv1495[:0] - yyc1495 = true - } - } else if yyl1495 > 0 { - var yyrr1495, yyrl1495 int - var yyrt1495 bool - if yyl1495 > cap(yyv1495) { - - yyrl1495, yyrt1495 = z.DecInferLen(yyl1495, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1495 { - if yyrl1495 <= cap(yyv1495) { - yyv1495 = yyv1495[:yyrl1495] - } else { - yyv1495 = make([]pkg2_api.Capability, yyrl1495) - } - } else { - yyv1495 = make([]pkg2_api.Capability, yyrl1495) - } - yyc1495 = true - yyrr1495 = len(yyv1495) - } else if yyl1495 != len(yyv1495) { - yyv1495 = yyv1495[:yyl1495] - yyc1495 = true - } - yyj1495 := 0 - for ; yyj1495 < yyrr1495; yyj1495++ { - yyh1495.ElemContainerState(yyj1495) - if r.TryDecodeAsNil() { - yyv1495[yyj1495] = "" - } else { - yyv1495[yyj1495] = pkg2_api.Capability(r.DecodeString()) - } - - } - if yyrt1495 { - for ; yyj1495 < yyl1495; yyj1495++ { - yyv1495 = append(yyv1495, "") - yyh1495.ElemContainerState(yyj1495) - if r.TryDecodeAsNil() { - yyv1495[yyj1495] = "" - } else { - yyv1495[yyj1495] = pkg2_api.Capability(r.DecodeString()) - } - - } - } - - } else { - yyj1495 := 0 - for ; !r.CheckBreak(); yyj1495++ { - - if yyj1495 >= len(yyv1495) { - yyv1495 = append(yyv1495, "") // var yyz1495 pkg2_api.Capability - yyc1495 = true - } - yyh1495.ElemContainerState(yyj1495) - if yyj1495 < len(yyv1495) { - if r.TryDecodeAsNil() { - yyv1495[yyj1495] = "" - } else { - yyv1495[yyj1495] = pkg2_api.Capability(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1495 < len(yyv1495) { - yyv1495 = yyv1495[:yyj1495] - yyc1495 = true - } else if yyj1495 == 0 && yyv1495 == nil { - yyv1495 = []pkg2_api.Capability{} - yyc1495 = true - } - } - yyh1495.End() - if yyc1495 { - *v = yyv1495 - } -} - -func (x codecSelfer1234) encSliceFSType(v []FSType, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1499 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1499.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1500 := *v - yyh1500, yyl1500 := z.DecSliceHelperStart() - var yyc1500 bool - if yyl1500 == 0 { - if yyv1500 == nil { - yyv1500 = []FSType{} - yyc1500 = true - } else if len(yyv1500) != 0 { - yyv1500 = yyv1500[:0] - yyc1500 = true - } - } else if yyl1500 > 0 { - var yyrr1500, yyrl1500 int - var yyrt1500 bool - if yyl1500 > cap(yyv1500) { - - yyrl1500, yyrt1500 = z.DecInferLen(yyl1500, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1500 { - if yyrl1500 <= cap(yyv1500) { - yyv1500 = yyv1500[:yyrl1500] - } else { - yyv1500 = make([]FSType, yyrl1500) - } - } else { - yyv1500 = make([]FSType, yyrl1500) - } - yyc1500 = true - yyrr1500 = len(yyv1500) - } else if yyl1500 != len(yyv1500) { - yyv1500 = yyv1500[:yyl1500] - yyc1500 = true - } - yyj1500 := 0 - for ; yyj1500 < yyrr1500; yyj1500++ { - yyh1500.ElemContainerState(yyj1500) - if r.TryDecodeAsNil() { - yyv1500[yyj1500] = "" - } else { - yyv1500[yyj1500] = FSType(r.DecodeString()) - } - - } - if yyrt1500 { - for ; yyj1500 < yyl1500; yyj1500++ { - yyv1500 = append(yyv1500, "") - yyh1500.ElemContainerState(yyj1500) - if r.TryDecodeAsNil() { - yyv1500[yyj1500] = "" - } else { - yyv1500[yyj1500] = FSType(r.DecodeString()) - } - - } - } - - } else { - yyj1500 := 0 - for ; !r.CheckBreak(); yyj1500++ { - - if yyj1500 >= len(yyv1500) { - yyv1500 = append(yyv1500, "") // var yyz1500 FSType - yyc1500 = true - } - yyh1500.ElemContainerState(yyj1500) - if yyj1500 < len(yyv1500) { - if r.TryDecodeAsNil() { - yyv1500[yyj1500] = "" - } else { - yyv1500[yyj1500] = FSType(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1500 < len(yyv1500) { - yyv1500 = yyv1500[:yyj1500] - yyc1500 = true - } else if yyj1500 == 0 && yyv1500 == nil { - yyv1500 = []FSType{} - yyc1500 = true - } - } - yyh1500.End() - if yyc1500 { - *v = yyv1500 - } -} - -func (x codecSelfer1234) encSliceHostPortRange(v []HostPortRange, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1504 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1505 := &yyv1504 - yy1505.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1506 := *v - yyh1506, yyl1506 := z.DecSliceHelperStart() - var yyc1506 bool - if yyl1506 == 0 { - if yyv1506 == nil { - yyv1506 = []HostPortRange{} - yyc1506 = true - } else if len(yyv1506) != 0 { - yyv1506 = yyv1506[:0] - yyc1506 = true - } - } else if yyl1506 > 0 { - var yyrr1506, yyrl1506 int - var yyrt1506 bool - if yyl1506 > cap(yyv1506) { - - yyrg1506 := len(yyv1506) > 0 - yyv21506 := yyv1506 - yyrl1506, yyrt1506 = z.DecInferLen(yyl1506, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1506 { - if yyrl1506 <= cap(yyv1506) { - yyv1506 = yyv1506[:yyrl1506] - } else { - yyv1506 = make([]HostPortRange, yyrl1506) - } - } else { - yyv1506 = make([]HostPortRange, yyrl1506) - } - yyc1506 = true - yyrr1506 = len(yyv1506) - if yyrg1506 { - copy(yyv1506, yyv21506) - } - } else if yyl1506 != len(yyv1506) { - yyv1506 = yyv1506[:yyl1506] - yyc1506 = true - } - yyj1506 := 0 - for ; yyj1506 < yyrr1506; yyj1506++ { - yyh1506.ElemContainerState(yyj1506) - if r.TryDecodeAsNil() { - yyv1506[yyj1506] = HostPortRange{} - } else { - yyv1507 := &yyv1506[yyj1506] - yyv1507.CodecDecodeSelf(d) - } - - } - if yyrt1506 { - for ; yyj1506 < yyl1506; yyj1506++ { - yyv1506 = append(yyv1506, HostPortRange{}) - yyh1506.ElemContainerState(yyj1506) - if r.TryDecodeAsNil() { - yyv1506[yyj1506] = HostPortRange{} - } else { - yyv1508 := &yyv1506[yyj1506] - yyv1508.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1506 := 0 - for ; !r.CheckBreak(); yyj1506++ { - - if yyj1506 >= len(yyv1506) { - yyv1506 = append(yyv1506, HostPortRange{}) // var yyz1506 HostPortRange - yyc1506 = true - } - yyh1506.ElemContainerState(yyj1506) - if yyj1506 < len(yyv1506) { - if r.TryDecodeAsNil() { - yyv1506[yyj1506] = HostPortRange{} - } else { - yyv1509 := &yyv1506[yyj1506] - yyv1509.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1506 < len(yyv1506) { - yyv1506 = yyv1506[:yyj1506] - yyc1506 = true - } else if yyj1506 == 0 && yyv1506 == nil { - yyv1506 = []HostPortRange{} - yyc1506 = true - } - } - yyh1506.End() - if yyc1506 { - *v = yyv1506 - } -} - -func (x codecSelfer1234) encSliceIDRange(v []IDRange, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1510 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1511 := &yyv1510 - yy1511.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1512 := *v - yyh1512, yyl1512 := z.DecSliceHelperStart() - var yyc1512 bool - if yyl1512 == 0 { - if yyv1512 == nil { - yyv1512 = []IDRange{} - yyc1512 = true - } else if len(yyv1512) != 0 { - yyv1512 = yyv1512[:0] - yyc1512 = true - } - } else if yyl1512 > 0 { - var yyrr1512, yyrl1512 int - var yyrt1512 bool - if yyl1512 > cap(yyv1512) { - - yyrg1512 := len(yyv1512) > 0 - yyv21512 := yyv1512 - yyrl1512, yyrt1512 = z.DecInferLen(yyl1512, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1512 { - if yyrl1512 <= cap(yyv1512) { - yyv1512 = yyv1512[:yyrl1512] - } else { - yyv1512 = make([]IDRange, yyrl1512) - } - } else { - yyv1512 = make([]IDRange, yyrl1512) - } - yyc1512 = true - yyrr1512 = len(yyv1512) - if yyrg1512 { - copy(yyv1512, yyv21512) - } - } else if yyl1512 != len(yyv1512) { - yyv1512 = yyv1512[:yyl1512] - yyc1512 = true - } - yyj1512 := 0 - for ; yyj1512 < yyrr1512; yyj1512++ { - yyh1512.ElemContainerState(yyj1512) - if r.TryDecodeAsNil() { - yyv1512[yyj1512] = IDRange{} - } else { - yyv1513 := &yyv1512[yyj1512] - yyv1513.CodecDecodeSelf(d) - } - - } - if yyrt1512 { - for ; yyj1512 < yyl1512; yyj1512++ { - yyv1512 = append(yyv1512, IDRange{}) - yyh1512.ElemContainerState(yyj1512) - if r.TryDecodeAsNil() { - yyv1512[yyj1512] = IDRange{} - } else { - yyv1514 := &yyv1512[yyj1512] - yyv1514.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1512 := 0 - for ; !r.CheckBreak(); yyj1512++ { - - if yyj1512 >= len(yyv1512) { - yyv1512 = append(yyv1512, IDRange{}) // var yyz1512 IDRange - yyc1512 = true - } - yyh1512.ElemContainerState(yyj1512) - if yyj1512 < len(yyv1512) { - if r.TryDecodeAsNil() { - yyv1512[yyj1512] = IDRange{} - } else { - yyv1515 := &yyv1512[yyj1512] - yyv1515.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1512 < len(yyv1512) { - yyv1512 = yyv1512[:yyj1512] - yyc1512 = true - } else if yyj1512 == 0 && yyv1512 == nil { - yyv1512 = []IDRange{} - yyc1512 = true - } - } - yyh1512.End() - if yyc1512 { - *v = yyv1512 - } -} - -func (x codecSelfer1234) encSlicePodSecurityPolicy(v []PodSecurityPolicy, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1516 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1517 := &yyv1516 - yy1517.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1518 := *v - yyh1518, yyl1518 := z.DecSliceHelperStart() - var yyc1518 bool - if yyl1518 == 0 { - if yyv1518 == nil { - yyv1518 = []PodSecurityPolicy{} - yyc1518 = true - } else if len(yyv1518) != 0 { - yyv1518 = yyv1518[:0] - yyc1518 = true - } - } else if yyl1518 > 0 { - var yyrr1518, yyrl1518 int - var yyrt1518 bool - if yyl1518 > cap(yyv1518) { - - yyrg1518 := len(yyv1518) > 0 - yyv21518 := yyv1518 - yyrl1518, yyrt1518 = z.DecInferLen(yyl1518, z.DecBasicHandle().MaxInitLen, 552) - if yyrt1518 { - if yyrl1518 <= cap(yyv1518) { - yyv1518 = yyv1518[:yyrl1518] - } else { - yyv1518 = make([]PodSecurityPolicy, yyrl1518) - } - } else { - yyv1518 = make([]PodSecurityPolicy, yyrl1518) - } - yyc1518 = true - yyrr1518 = len(yyv1518) - if yyrg1518 { - copy(yyv1518, yyv21518) - } - } else if yyl1518 != len(yyv1518) { - yyv1518 = yyv1518[:yyl1518] - yyc1518 = true - } - yyj1518 := 0 - for ; yyj1518 < yyrr1518; yyj1518++ { - yyh1518.ElemContainerState(yyj1518) - if r.TryDecodeAsNil() { - yyv1518[yyj1518] = PodSecurityPolicy{} - } else { - yyv1519 := &yyv1518[yyj1518] - yyv1519.CodecDecodeSelf(d) - } - - } - if yyrt1518 { - for ; yyj1518 < yyl1518; yyj1518++ { - yyv1518 = append(yyv1518, PodSecurityPolicy{}) - yyh1518.ElemContainerState(yyj1518) - if r.TryDecodeAsNil() { - yyv1518[yyj1518] = PodSecurityPolicy{} - } else { - yyv1520 := &yyv1518[yyj1518] - yyv1520.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1518 := 0 - for ; !r.CheckBreak(); yyj1518++ { - - if yyj1518 >= len(yyv1518) { - yyv1518 = append(yyv1518, PodSecurityPolicy{}) // var yyz1518 PodSecurityPolicy - yyc1518 = true - } - yyh1518.ElemContainerState(yyj1518) - if yyj1518 < len(yyv1518) { - if r.TryDecodeAsNil() { - yyv1518[yyj1518] = PodSecurityPolicy{} - } else { - yyv1521 := &yyv1518[yyj1518] - yyv1521.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1518 < len(yyv1518) { - yyv1518 = yyv1518[:yyj1518] - yyc1518 = true - } else if yyj1518 == 0 && yyv1518 == nil { - yyv1518 = []PodSecurityPolicy{} - yyc1518 = true - } - } - yyh1518.End() - if yyc1518 { - *v = yyv1518 - } -} - -func (x codecSelfer1234) encSliceNetworkPolicyIngressRule(v []NetworkPolicyIngressRule, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1522 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1523 := &yyv1522 - yy1523.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNetworkPolicyIngressRule(v *[]NetworkPolicyIngressRule, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1524 := *v - yyh1524, yyl1524 := z.DecSliceHelperStart() - var yyc1524 bool - if yyl1524 == 0 { - if yyv1524 == nil { - yyv1524 = []NetworkPolicyIngressRule{} - yyc1524 = true - } else if len(yyv1524) != 0 { - yyv1524 = yyv1524[:0] - yyc1524 = true - } - } else if yyl1524 > 0 { - var yyrr1524, yyrl1524 int - var yyrt1524 bool - if yyl1524 > cap(yyv1524) { - - yyrg1524 := len(yyv1524) > 0 - yyv21524 := yyv1524 - yyrl1524, yyrt1524 = z.DecInferLen(yyl1524, z.DecBasicHandle().MaxInitLen, 48) - if yyrt1524 { - if yyrl1524 <= cap(yyv1524) { - yyv1524 = yyv1524[:yyrl1524] - } else { - yyv1524 = make([]NetworkPolicyIngressRule, yyrl1524) - } - } else { - yyv1524 = make([]NetworkPolicyIngressRule, yyrl1524) - } - yyc1524 = true - yyrr1524 = len(yyv1524) - if yyrg1524 { - copy(yyv1524, yyv21524) - } - } else if yyl1524 != len(yyv1524) { - yyv1524 = yyv1524[:yyl1524] - yyc1524 = true - } - yyj1524 := 0 - for ; yyj1524 < yyrr1524; yyj1524++ { - yyh1524.ElemContainerState(yyj1524) - if r.TryDecodeAsNil() { - yyv1524[yyj1524] = NetworkPolicyIngressRule{} - } else { - yyv1525 := &yyv1524[yyj1524] - yyv1525.CodecDecodeSelf(d) - } - - } - if yyrt1524 { - for ; yyj1524 < yyl1524; yyj1524++ { - yyv1524 = append(yyv1524, NetworkPolicyIngressRule{}) - yyh1524.ElemContainerState(yyj1524) - if r.TryDecodeAsNil() { - yyv1524[yyj1524] = NetworkPolicyIngressRule{} - } else { - yyv1526 := &yyv1524[yyj1524] - yyv1526.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1524 := 0 - for ; !r.CheckBreak(); yyj1524++ { - - if yyj1524 >= len(yyv1524) { - yyv1524 = append(yyv1524, NetworkPolicyIngressRule{}) // var yyz1524 NetworkPolicyIngressRule - yyc1524 = true - } - yyh1524.ElemContainerState(yyj1524) - if yyj1524 < len(yyv1524) { - if r.TryDecodeAsNil() { - yyv1524[yyj1524] = NetworkPolicyIngressRule{} - } else { - yyv1527 := &yyv1524[yyj1524] - yyv1527.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1524 < len(yyv1524) { - yyv1524 = yyv1524[:yyj1524] - yyc1524 = true - } else if yyj1524 == 0 && yyv1524 == nil { - yyv1524 = []NetworkPolicyIngressRule{} - yyc1524 = true - } - } - yyh1524.End() - if yyc1524 { - *v = yyv1524 - } -} - -func (x codecSelfer1234) encSliceNetworkPolicyPort(v []NetworkPolicyPort, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1528 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1529 := &yyv1528 - yy1529.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNetworkPolicyPort(v *[]NetworkPolicyPort, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1530 := *v - yyh1530, yyl1530 := z.DecSliceHelperStart() - var yyc1530 bool - if yyl1530 == 0 { - if yyv1530 == nil { - yyv1530 = []NetworkPolicyPort{} - yyc1530 = true - } else if len(yyv1530) != 0 { - yyv1530 = yyv1530[:0] - yyc1530 = true - } - } else if yyl1530 > 0 { - var yyrr1530, yyrl1530 int - var yyrt1530 bool - if yyl1530 > cap(yyv1530) { - - yyrg1530 := len(yyv1530) > 0 - yyv21530 := yyv1530 - yyrl1530, yyrt1530 = z.DecInferLen(yyl1530, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1530 { - if yyrl1530 <= cap(yyv1530) { - yyv1530 = yyv1530[:yyrl1530] - } else { - yyv1530 = make([]NetworkPolicyPort, yyrl1530) - } - } else { - yyv1530 = make([]NetworkPolicyPort, yyrl1530) - } - yyc1530 = true - yyrr1530 = len(yyv1530) - if yyrg1530 { - copy(yyv1530, yyv21530) - } - } else if yyl1530 != len(yyv1530) { - yyv1530 = yyv1530[:yyl1530] - yyc1530 = true - } - yyj1530 := 0 - for ; yyj1530 < yyrr1530; yyj1530++ { - yyh1530.ElemContainerState(yyj1530) - if r.TryDecodeAsNil() { - yyv1530[yyj1530] = NetworkPolicyPort{} - } else { - yyv1531 := &yyv1530[yyj1530] - yyv1531.CodecDecodeSelf(d) - } - - } - if yyrt1530 { - for ; yyj1530 < yyl1530; yyj1530++ { - yyv1530 = append(yyv1530, NetworkPolicyPort{}) - yyh1530.ElemContainerState(yyj1530) - if r.TryDecodeAsNil() { - yyv1530[yyj1530] = NetworkPolicyPort{} - } else { - yyv1532 := &yyv1530[yyj1530] - yyv1532.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1530 := 0 - for ; !r.CheckBreak(); yyj1530++ { - - if yyj1530 >= len(yyv1530) { - yyv1530 = append(yyv1530, NetworkPolicyPort{}) // var yyz1530 NetworkPolicyPort - yyc1530 = true - } - yyh1530.ElemContainerState(yyj1530) - if yyj1530 < len(yyv1530) { - if r.TryDecodeAsNil() { - yyv1530[yyj1530] = NetworkPolicyPort{} - } else { - yyv1533 := &yyv1530[yyj1530] - yyv1533.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1530 < len(yyv1530) { - yyv1530 = yyv1530[:yyj1530] - yyc1530 = true - } else if yyj1530 == 0 && yyv1530 == nil { - yyv1530 = []NetworkPolicyPort{} - yyc1530 = true - } - } - yyh1530.End() - if yyc1530 { - *v = yyv1530 - } -} - -func (x codecSelfer1234) encSliceNetworkPolicyPeer(v []NetworkPolicyPeer, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1534 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1535 := &yyv1534 - yy1535.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNetworkPolicyPeer(v *[]NetworkPolicyPeer, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1536 := *v - yyh1536, yyl1536 := z.DecSliceHelperStart() - var yyc1536 bool - if yyl1536 == 0 { - if yyv1536 == nil { - yyv1536 = []NetworkPolicyPeer{} - yyc1536 = true - } else if len(yyv1536) != 0 { - yyv1536 = yyv1536[:0] - yyc1536 = true - } - } else if yyl1536 > 0 { - var yyrr1536, yyrl1536 int - var yyrt1536 bool - if yyl1536 > cap(yyv1536) { - - yyrg1536 := len(yyv1536) > 0 - yyv21536 := yyv1536 - yyrl1536, yyrt1536 = z.DecInferLen(yyl1536, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1536 { - if yyrl1536 <= cap(yyv1536) { - yyv1536 = yyv1536[:yyrl1536] - } else { - yyv1536 = make([]NetworkPolicyPeer, yyrl1536) - } - } else { - yyv1536 = make([]NetworkPolicyPeer, yyrl1536) - } - yyc1536 = true - yyrr1536 = len(yyv1536) - if yyrg1536 { - copy(yyv1536, yyv21536) - } - } else if yyl1536 != len(yyv1536) { - yyv1536 = yyv1536[:yyl1536] - yyc1536 = true - } - yyj1536 := 0 - for ; yyj1536 < yyrr1536; yyj1536++ { - yyh1536.ElemContainerState(yyj1536) - if r.TryDecodeAsNil() { - yyv1536[yyj1536] = NetworkPolicyPeer{} - } else { - yyv1537 := &yyv1536[yyj1536] - yyv1537.CodecDecodeSelf(d) - } - - } - if yyrt1536 { - for ; yyj1536 < yyl1536; yyj1536++ { - yyv1536 = append(yyv1536, NetworkPolicyPeer{}) - yyh1536.ElemContainerState(yyj1536) - if r.TryDecodeAsNil() { - yyv1536[yyj1536] = NetworkPolicyPeer{} - } else { - yyv1538 := &yyv1536[yyj1536] - yyv1538.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1536 := 0 - for ; !r.CheckBreak(); yyj1536++ { - - if yyj1536 >= len(yyv1536) { - yyv1536 = append(yyv1536, NetworkPolicyPeer{}) // var yyz1536 NetworkPolicyPeer - yyc1536 = true - } - yyh1536.ElemContainerState(yyj1536) - if yyj1536 < len(yyv1536) { - if r.TryDecodeAsNil() { - yyv1536[yyj1536] = NetworkPolicyPeer{} - } else { - yyv1539 := &yyv1536[yyj1536] - yyv1539.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1536 < len(yyv1536) { - yyv1536 = yyv1536[:yyj1536] - yyc1536 = true - } else if yyj1536 == 0 && yyv1536 == nil { - yyv1536 = []NetworkPolicyPeer{} - yyc1536 = true - } - } - yyh1536.End() - if yyc1536 { - *v = yyv1536 - } -} - -func (x codecSelfer1234) encSliceNetworkPolicy(v []NetworkPolicy, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1540 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1541 := &yyv1540 - yy1541.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1542 := *v - yyh1542, yyl1542 := z.DecSliceHelperStart() - var yyc1542 bool - if yyl1542 == 0 { - if yyv1542 == nil { - yyv1542 = []NetworkPolicy{} - yyc1542 = true - } else if len(yyv1542) != 0 { - yyv1542 = yyv1542[:0] - yyc1542 = true - } - } else if yyl1542 > 0 { - var yyrr1542, yyrl1542 int - var yyrt1542 bool - if yyl1542 > cap(yyv1542) { - - yyrg1542 := len(yyv1542) > 0 - yyv21542 := yyv1542 - yyrl1542, yyrt1542 = z.DecInferLen(yyl1542, z.DecBasicHandle().MaxInitLen, 312) - if yyrt1542 { - if yyrl1542 <= cap(yyv1542) { - yyv1542 = yyv1542[:yyrl1542] - } else { - yyv1542 = make([]NetworkPolicy, yyrl1542) - } - } else { - yyv1542 = make([]NetworkPolicy, yyrl1542) - } - yyc1542 = true - yyrr1542 = len(yyv1542) - if yyrg1542 { - copy(yyv1542, yyv21542) - } - } else if yyl1542 != len(yyv1542) { - yyv1542 = yyv1542[:yyl1542] - yyc1542 = true - } - yyj1542 := 0 - for ; yyj1542 < yyrr1542; yyj1542++ { - yyh1542.ElemContainerState(yyj1542) - if r.TryDecodeAsNil() { - yyv1542[yyj1542] = NetworkPolicy{} - } else { - yyv1543 := &yyv1542[yyj1542] - yyv1543.CodecDecodeSelf(d) - } - - } - if yyrt1542 { - for ; yyj1542 < yyl1542; yyj1542++ { - yyv1542 = append(yyv1542, NetworkPolicy{}) - yyh1542.ElemContainerState(yyj1542) - if r.TryDecodeAsNil() { - yyv1542[yyj1542] = NetworkPolicy{} - } else { - yyv1544 := &yyv1542[yyj1542] - yyv1544.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1542 := 0 - for ; !r.CheckBreak(); yyj1542++ { - - if yyj1542 >= len(yyv1542) { - yyv1542 = append(yyv1542, NetworkPolicy{}) // var yyz1542 NetworkPolicy - yyc1542 = true - } - yyh1542.ElemContainerState(yyj1542) - if yyj1542 < len(yyv1542) { - if r.TryDecodeAsNil() { - yyv1542[yyj1542] = NetworkPolicy{} - } else { - yyv1545 := &yyv1542[yyj1542] - yyv1545.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1542 < len(yyv1542) { - yyv1542 = yyv1542[:yyj1542] - yyc1542 = true - } else if yyj1542 == 0 && yyv1542 == nil { - yyv1542 = []NetworkPolicy{} - yyc1542 = true - } - } - yyh1542.End() - if yyc1542 { - *v = yyv1542 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go index 3010ae3544..c51bcfe4a3 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go @@ -29,10 +29,10 @@ support is experimental. package extensions import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/util/intstr" ) const ( @@ -46,18 +46,18 @@ const ( type ScaleSpec struct { // desired number of instances for the scaled object. // +optional - Replicas int32 `json:"replicas,omitempty"` + Replicas int32 } // represents the current status of a scale subresource. type ScaleStatus struct { // actual number of observed instances of the scaled object. - Replicas int32 `json:"replicas"` + Replicas int32 // label query over pods that should match the replicas count. // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional - Selector *unversioned.LabelSelector `json:"selector,omitempty"` + Selector *metav1.LabelSelector } // +genclient=true @@ -65,46 +65,46 @@ type ScaleStatus struct { // represents a scaling request for a resource. type Scale struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. // +optional - Spec ScaleSpec `json:"spec,omitempty"` + Spec ScaleSpec // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. // +optional - Status ScaleStatus `json:"status,omitempty"` + Status ScaleStatus } // Dummy definition type ReplicationControllerDummy struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta } // Alpha-level support for Custom Metrics in HPA (as annotations). type CustomMetricTarget struct { // Custom Metric name. - Name string `json:"name"` + Name string // Custom Metric value (average). - TargetValue resource.Quantity `json:"value"` + TargetValue resource.Quantity } type CustomMetricTargetList struct { - Items []CustomMetricTarget `json:"items"` + Items []CustomMetricTarget } type CustomMetricCurrentStatus struct { // Custom Metric name. - Name string `json:"name"` + Name string // Custom Metric value (average). - CurrentValue resource.Quantity `json:"value"` + CurrentValue resource.Quantity } type CustomMetricCurrentStatusList struct { - Items []CustomMetricCurrentStatus `json:"items"` + Items []CustomMetricCurrentStatus } // +genclient=true @@ -113,103 +113,103 @@ type CustomMetricCurrentStatusList struct { // A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource // types to the API. It consists of one or more Versions of the api. type ThirdPartyResource struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // Standard object metadata // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Description is the description of this object. // +optional - Description string `json:"description,omitempty"` + Description string // Versions are versions for this third party object - Versions []APIVersion `json:"versions,omitempty"` + Versions []APIVersion } type ThirdPartyResourceList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // Standard list metadata. // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta // Items is the list of horizontal pod autoscalers. - Items []ThirdPartyResource `json:"items"` + Items []ThirdPartyResource } // An APIVersion represents a single concrete version of an object model. -// TODO: we should consider merge this struct with GroupVersion in unversioned.go +// TODO: we should consider merge this struct with GroupVersion in metav1.go type APIVersion struct { // Name of this version (e.g. 'v1'). - Name string `json:"name,omitempty"` + Name string } // An internal object, used for versioned storage in etcd. Not exposed to the end user. type ThirdPartyResourceData struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // Standard object metadata. // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Data is the raw JSON data for this data. // +optional - Data []byte `json:"data,omitempty"` + Data []byte } // +genclient=true type Deployment struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Specification of the desired behavior of the Deployment. // +optional - Spec DeploymentSpec `json:"spec,omitempty"` + Spec DeploymentSpec // Most recently observed status of the Deployment. // +optional - Status DeploymentStatus `json:"status,omitempty"` + Status DeploymentStatus } type DeploymentSpec struct { // Number of desired pods. This is a pointer to distinguish between explicit // zero and not specified. Defaults to 1. // +optional - Replicas int32 `json:"replicas,omitempty"` + Replicas int32 // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional - Selector *unversioned.LabelSelector `json:"selector,omitempty"` + Selector *metav1.LabelSelector // Template describes the pods that will be created. - Template api.PodTemplateSpec `json:"template"` + Template api.PodTemplateSpec // The deployment strategy to use to replace existing pods with new ones. // +optional - Strategy DeploymentStrategy `json:"strategy,omitempty"` + Strategy DeploymentStrategy // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty"` + MinReadySeconds int32 // The number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. // +optional - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` + RevisionHistoryLimit *int32 // Indicates that the deployment is paused and will not be processed by the // deployment controller. // +optional - Paused bool `json:"paused,omitempty"` + Paused bool // The config this deployment is rolling back to. Will be cleared after rollback is done. // +optional - RollbackTo *RollbackConfig `json:"rollbackTo,omitempty"` + RollbackTo *RollbackConfig // The maximum time in seconds for a deployment to make progress before it // is considered to be failed. The deployment controller will continue to @@ -218,25 +218,25 @@ type DeploymentSpec struct { // implemented, the deployment controller will automatically rollback failed // deployments. Note that progress will not be estimated during the time a // deployment is paused. This is not set by default. - ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` + ProgressDeadlineSeconds *int32 } // DeploymentRollback stores the information required to rollback a deployment. type DeploymentRollback struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // Required: This must match the Name of a deployment. - Name string `json:"name"` + Name string // The annotations to be updated to a deployment // +optional - UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty"` + UpdatedAnnotations map[string]string // The config of this deployment rollback. - RollbackTo RollbackConfig `json:"rollbackTo"` + RollbackTo RollbackConfig } type RollbackConfig struct { // The revision to rollback to. If set to 0, rollbck to the last revision. // +optional - Revision int64 `json:"revision,omitempty"` + Revision int64 } const ( @@ -249,7 +249,7 @@ const ( type DeploymentStrategy struct { // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. // +optional - Type DeploymentStrategyType `json:"type,omitempty"` + Type DeploymentStrategyType // Rolling update config params. Present only if DeploymentStrategyType = // RollingUpdate. @@ -257,7 +257,7 @@ type DeploymentStrategy struct { // TODO: Update this to follow our convention for oneOf, whatever we decide it // to be. // +optional - RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty"` + RollingUpdate *RollingUpdateDeployment } type DeploymentStrategyType string @@ -274,7 +274,7 @@ const ( type RollingUpdateDeployment struct { // The maximum number of pods that can be unavailable during the update. // Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). - // Absolute number is calculated from percentage by rounding up. + // Absolute number is calculated from percentage by rounding down. // This can not be 0 if MaxSurge is 0. // By default, a fixed value of 1 is used. // Example: when this is set to 30%, the old RC can be scaled down by 30% @@ -283,7 +283,7 @@ type RollingUpdateDeployment struct { // that at least 70% of original number of pods are available at all times // during the update. // +optional - MaxUnavailable intstr.IntOrString `json:"maxUnavailable,omitempty"` + MaxUnavailable intstr.IntOrString // The maximum number of pods that can be scheduled above the original number of // pods. @@ -296,32 +296,36 @@ type RollingUpdateDeployment struct { // new RC can be scaled up further, ensuring that total number of pods running // at any time during the update is atmost 130% of original pods. // +optional - MaxSurge intstr.IntOrString `json:"maxSurge,omitempty"` + MaxSurge intstr.IntOrString } type DeploymentStatus struct { // The generation observed by the deployment controller. // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty"` + ObservedGeneration int64 // Total number of non-terminated pods targeted by this deployment (their labels match the selector). // +optional - Replicas int32 `json:"replicas,omitempty"` + Replicas int32 // Total number of non-terminated pods targeted by this deployment that have the desired template spec. // +optional - UpdatedReplicas int32 `json:"updatedReplicas,omitempty"` + UpdatedReplicas int32 + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. // +optional - AvailableReplicas int32 `json:"availableReplicas,omitempty"` + AvailableReplicas int32 // Total number of unavailable pods targeted by this deployment. // +optional - UnavailableReplicas int32 `json:"unavailableReplicas,omitempty"` + UnavailableReplicas int32 // Represents the latest available observations of a deployment's current state. - Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + Conditions []DeploymentCondition } type DeploymentConditionType string @@ -344,42 +348,41 @@ const ( // DeploymentCondition describes the state of a deployment at a certain point. type DeploymentCondition struct { // Type of deployment condition. - Type DeploymentConditionType `json:"type"` + Type DeploymentConditionType // Status of the condition, one of True, False, Unknown. - Status api.ConditionStatus `json:"status"` + Status api.ConditionStatus // The last time this condition was updated. - LastUpdateTime unversioned.Time `json:"lastUpdateTime,omitempty"` + LastUpdateTime metav1.Time // Last time the condition transitioned from one status to another. - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` + LastTransitionTime metav1.Time // The reason for the condition's last transition. - Reason string `json:"reason,omitempty"` + Reason string // A human readable message indicating details about the transition. - Message string `json:"message,omitempty"` + Message string } type DeploymentList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta // Items is the list of deployments. - Items []Deployment `json:"items"` + Items []Deployment } -// TODO(madhusudancs): Uncomment while implementing DaemonSet updates. -/* Commenting out for v1.2. We are planning to bring these types back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting the types out. type DaemonSetUpdateStrategy struct { - // Type of daemon set update. Only "RollingUpdate" is supported at this time. Default is RollingUpdate. -// +optional - Type DaemonSetUpdateStrategyType `json:"type,omitempty"` + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". + // Default is OnDelete. + // +optional + Type DaemonSetUpdateStrategyType - // Rolling update config params. Present only if DaemonSetUpdateStrategy = - // RollingUpdate. + // Rolling update config params. Present only if type = "RollingUpdate". //--- // TODO: Update this to follow our convention for oneOf, whatever we decide it // to be. Same as DeploymentStrategy.RollingUpdate. -// +optional - RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty"` + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + RollingUpdate *RollingUpdateDaemonSet } type DaemonSetUpdateStrategyType string @@ -387,6 +390,9 @@ type DaemonSetUpdateStrategyType string const ( // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" + + // Replace the old daemons only when it's killed + OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" ) // Spec to control the desired behavior of daemon set rolling update. @@ -397,129 +403,141 @@ type RollingUpdateDaemonSet struct { // number is calculated from percentage by rounding up. // This cannot be 0. // Default value is 1. - // Example: when this is set to 30%, 30% of the currently running DaemonSet - // pods can be stopped for an update at any given time. The update starts - // by stopping at most 30% of the currently running DaemonSet pods and then - // brings up new DaemonSet pods in their place. Once the new pods are ready, - // it then proceeds onto other DaemonSet pods, thus ensuring that at least - // 70% of original number of DaemonSet pods are available at all times - // during the update. -// +optional - MaxUnavailable intstr.IntOrString `json:"maxUnavailable,omitempty"` - - // Minimum number of seconds for which a newly created DaemonSet pod should - // be ready without any of its container crashing, for it to be considered - // available. Defaults to 0 (pod will be considered available as soon as it - // is ready). -// +optional - MinReadySeconds int `json:"minReadySeconds,omitempty"` + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + MaxUnavailable intstr.IntOrString } -*/ // DaemonSetSpec is the specification of a daemon set. type DaemonSetSpec struct { - // Selector is a label query over pods that are managed by the daemon set. + // A label query over pods that are managed by the daemon set. // Must match in order to be controlled. // If empty, defaulted to labels on Pod template. // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional - Selector *unversioned.LabelSelector `json:"selector,omitempty"` + Selector *metav1.LabelSelector - // Template is the object that describes the pod that will be created. + // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template - Template api.PodTemplateSpec `json:"template"` + Template api.PodTemplateSpec + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + UpdateStrategy DaemonSetUpdateStrategy - // TODO(madhusudancs): Uncomment while implementing DaemonSet updates. - /* Commenting out for v1.2. We are planning to bring these fields back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting these fields out. - // Update strategy to replace existing DaemonSet pods with new pods. + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). // +optional - UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty"` + MinReadySeconds int32 - // Label key that is added to DaemonSet pods to distinguish between old and - // new pod templates during DaemonSet update. - // Users can set this to an empty string to indicate that the system should - // not add any label. If unspecified, system uses - // DefaultDaemonSetUniqueLabelKey("daemonset.kubernetes.io/podTemplateHash"). - // Value of this key is hash of DaemonSetSpec.PodTemplateSpec. - // No label is added if this is set to empty string. + // A sequence number representing a specific generation of the template. + // Populated by the system. It can be set only during the creation. // +optional - UniqueLabelKey string `json:"uniqueLabelKey,omitempty"` - */ + TemplateGeneration int64 } -const ( - // DefaultDaemonSetUniqueLabelKey is the default key of the labels that is added - // to daemon set pods to distinguish between old and new pod templates during - // DaemonSet update. See DaemonSetSpec's UniqueLabelKey field for more information. - DefaultDaemonSetUniqueLabelKey string = "daemonset.kubernetes.io/podTemplateHash" -) - // DaemonSetStatus represents the current status of a daemon set. type DaemonSetStatus struct { - // CurrentNumberScheduled is the number of nodes that are running at least 1 + // The number of nodes that are running at least 1 // daemon pod and are supposed to run the daemon pod. - CurrentNumberScheduled int32 `json:"currentNumberScheduled"` + CurrentNumberScheduled int32 - // NumberMisscheduled is the number of nodes that are running the daemon pod, but are + // The number of nodes that are running the daemon pod, but are // not supposed to run the daemon pod. - NumberMisscheduled int32 `json:"numberMisscheduled"` + NumberMisscheduled int32 - // DesiredNumberScheduled is the total number of nodes that should be running the daemon + // The total number of nodes that should be running the daemon // pod (including nodes correctly running the daemon pod). - DesiredNumberScheduled int32 `json:"desiredNumberScheduled"` + DesiredNumberScheduled int32 - // NumberReady is the number of nodes that should be running the daemon pod and have one + // The number of nodes that should be running the daemon pod and have one // or more of the daemon pod running and ready. - NumberReady int32 `json:"numberReady"` + NumberReady int32 + + // The most recent generation observed by the daemon set controller. + // +optional + ObservedGeneration int64 + + // The total number of nodes that are running updated daemon pod + // +optional + UpdatedNumberScheduled int32 + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + NumberAvailable int32 + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + NumberUnavailable int32 } // +genclient=true // DaemonSet represents the configuration of a daemon set. type DaemonSet struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta - // Spec defines the desired behavior of this daemon set. + // The desired behavior of this daemon set. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional - Spec DaemonSetSpec `json:"spec,omitempty"` + Spec DaemonSetSpec - // Status is the current status of this daemon set. This data may be + // The current status of this daemon set. This data may be // out of date by some window of time. // Populated by the system. // Read-only. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional - Status DaemonSetStatus `json:"status,omitempty"` + Status DaemonSetStatus } +const ( + // DaemonSetTemplateGenerationKey is the key of the labels that is added + // to daemon set pods to distinguish between old and new pod templates + // during DaemonSet template update. + DaemonSetTemplateGenerationKey string = "pod-template-generation" +) + // DaemonSetList is a collection of daemon sets. type DaemonSetList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta - // Items is a list of daemon sets. - Items []DaemonSet `json:"items"` + // A list of daemon sets. + Items []DaemonSet } type ThirdPartyResourceDataList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // Standard list metadata // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta // Items is a list of third party objects - Items []ThirdPartyResourceData `json:"items"` + Items []ThirdPartyResourceData } // +genclient=true @@ -529,33 +547,33 @@ type ThirdPartyResourceDataList struct { // externally-reachable urls, load balance traffic, terminate SSL, offer name // based virtual hosting etc. type Ingress struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Spec is the desired state of the Ingress. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional - Spec IngressSpec `json:"spec,omitempty"` + Spec IngressSpec // Status is the current state of the Ingress. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional - Status IngressStatus `json:"status,omitempty"` + Status IngressStatus } // IngressList is a collection of Ingress. type IngressList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta // Items is the list of Ingress. - Items []Ingress `json:"items"` + Items []Ingress } // IngressSpec describes the Ingress the user wishes to exist. @@ -565,7 +583,7 @@ type IngressSpec struct { // is optional to allow the loadbalancer controller or defaulting logic to // specify a global default. // +optional - Backend *IngressBackend `json:"backend,omitempty"` + Backend *IngressBackend // TLS configuration. Currently the Ingress only supports a single TLS // port, 443. If multiple members of this list specify different hosts, they @@ -573,12 +591,12 @@ type IngressSpec struct { // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +optional - TLS []IngressTLS `json:"tls,omitempty"` + TLS []IngressTLS // A list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +optional - Rules []IngressRule `json:"rules,omitempty"` + Rules []IngressRule // TODO: Add the ability to specify load-balancer IP through claims } @@ -589,14 +607,14 @@ type IngressTLS struct { // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +optional - Hosts []string `json:"hosts,omitempty"` + Hosts []string // SecretName is the name of the secret used to terminate SSL traffic on 443. // Field is left optional to allow SSL routing based on SNI hostname alone. // If the SNI host in a listener conflicts with the "Host" header field used // by an IngressRule, the SNI host is used for termination and value of the // Host header is used for routing. // +optional - SecretName string `json:"secretName,omitempty"` + SecretName string // TODO: Consider specifying different modes of termination, protocols etc. } @@ -604,7 +622,7 @@ type IngressTLS struct { type IngressStatus struct { // LoadBalancer contains the current status of the load-balancer. // +optional - LoadBalancer api.LoadBalancerStatus `json:"loadBalancer,omitempty"` + LoadBalancer api.LoadBalancerStatus } // IngressRule represents the rules mapping the paths under a specified host to @@ -624,14 +642,14 @@ type IngressRule struct { // If the host is unspecified, the Ingress routes all traffic based on the // specified IngressRuleValue. // +optional - Host string `json:"host,omitempty"` + Host string // IngressRuleValue represents a rule to route requests for this IngressRule. // If unspecified, the rule defaults to a http catch-all. Whether that sends // just traffic matching the host to the default backend or all traffic to the // default backend, is left to the controller fulfilling the Ingress. Http is // currently the only supported IngressRuleValue. // +optional - IngressRuleValue `json:",inline,omitempty"` + IngressRuleValue } // IngressRuleValue represents a rule to apply against incoming requests. If the @@ -646,7 +664,7 @@ type IngressRuleValue struct { // usable by a loadbalancer, like http keep-alive. // +optional - HTTP *HTTPIngressRuleValue `json:"http,omitempty"` + HTTP *HTTPIngressRuleValue } // HTTPIngressRuleValue is a list of http selectors pointing to backends. @@ -656,7 +674,7 @@ type IngressRuleValue struct { // or '#'. type HTTPIngressRuleValue struct { // A collection of paths that map requests to backends. - Paths []HTTPIngressPath `json:"paths"` + Paths []HTTPIngressPath // TODO: Consider adding fields for ingress-type specific global // options usable by a loadbalancer, like http keep-alive. } @@ -672,47 +690,47 @@ type HTTPIngressPath struct { // a '/'. If unspecified, the path defaults to a catch all sending // traffic to the backend. // +optional - Path string `json:"path,omitempty"` + Path string // Backend defines the referenced service endpoint to which the traffic // will be forwarded to. - Backend IngressBackend `json:"backend"` + Backend IngressBackend } // IngressBackend describes all endpoints for a given service and port. type IngressBackend struct { // Specifies the name of the referenced service. - ServiceName string `json:"serviceName"` + ServiceName string // Specifies the port of the referenced service. - ServicePort intstr.IntOrString `json:"servicePort"` + ServicePort intstr.IntOrString } // +genclient=true // ReplicaSet represents the configuration of a replica set. type ReplicaSet struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Spec defines the desired behavior of this ReplicaSet. // +optional - Spec ReplicaSetSpec `json:"spec,omitempty"` + Spec ReplicaSetSpec // Status is the current status of this ReplicaSet. This data may be // out of date by some window of time. // +optional - Status ReplicaSetStatus `json:"status,omitempty"` + Status ReplicaSetStatus } // ReplicaSetList is a collection of ReplicaSets. type ReplicaSetList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta - Items []ReplicaSet `json:"items"` + Items []ReplicaSet } // ReplicaSetSpec is the specification of a ReplicaSet. @@ -720,51 +738,51 @@ type ReplicaSetList struct { // a Template set. type ReplicaSetSpec struct { // Replicas is the number of desired replicas. - Replicas int32 `json:"replicas"` + Replicas int32 // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty"` + MinReadySeconds int32 // Selector is a label query over pods that should match the replica count. // Must match in order to be controlled. // If empty, defaulted to labels on pod template. // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional - Selector *unversioned.LabelSelector `json:"selector,omitempty"` + Selector *metav1.LabelSelector // Template is the object that describes the pod that will be created if // insufficient replicas are detected. // +optional - Template api.PodTemplateSpec `json:"template,omitempty"` + Template api.PodTemplateSpec } // ReplicaSetStatus represents the current status of a ReplicaSet. type ReplicaSetStatus struct { // Replicas is the number of actual replicas. - Replicas int32 `json:"replicas"` + Replicas int32 // The number of pods that have labels matching the labels of the pod template of the replicaset. // +optional - FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty"` + FullyLabeledReplicas int32 // The number of ready replicas for this replica set. // +optional - ReadyReplicas int32 `json:"readyReplicas,omitempty"` + ReadyReplicas int32 // The number of available replicas (ready for at least minReadySeconds) for this replica set. // +optional - AvailableReplicas int32 `json:"availableReplicas,omitempty"` + AvailableReplicas int32 // ObservedGeneration is the most recent generation observed by the controller. // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty"` + ObservedGeneration int64 // Represents the latest available observations of a replica set's current state. // +optional - Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + Conditions []ReplicaSetCondition } type ReplicaSetConditionType string @@ -780,18 +798,18 @@ const ( // ReplicaSetCondition describes the state of a replica set at a certain point. type ReplicaSetCondition struct { // Type of replica set condition. - Type ReplicaSetConditionType `json:"type"` + Type ReplicaSetConditionType // Status of the condition, one of True, False, Unknown. - Status api.ConditionStatus `json:"status"` + Status api.ConditionStatus // The last time the condition transitioned from one status to another. // +optional - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` + LastTransitionTime metav1.Time // The reason for the condition's last transition. // +optional - Reason string `json:"reason,omitempty"` + Reason string // A human readable message indicating details about the transition. // +optional - Message string `json:"message,omitempty"` + Message string } // +genclient=true @@ -800,74 +818,74 @@ type ReplicaSetCondition struct { // PodSecurityPolicy governs the ability to make requests that affect the SecurityContext // that will be applied to a pod and container. type PodSecurityPolicy struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Spec defines the policy enforced. // +optional - Spec PodSecurityPolicySpec `json:"spec,omitempty"` + Spec PodSecurityPolicySpec } // PodSecurityPolicySpec defines the policy enforced. type PodSecurityPolicySpec struct { // Privileged determines if a pod can request to be run as privileged. // +optional - Privileged bool `json:"privileged,omitempty"` + Privileged bool // DefaultAddCapabilities is the default set of capabilities that will be added to the container // unless the pod spec specifically drops the capability. You may not list a capability in both // DefaultAddCapabilities and RequiredDropCapabilities. // +optional - DefaultAddCapabilities []api.Capability `json:"defaultAddCapabilities,omitempty"` + DefaultAddCapabilities []api.Capability // RequiredDropCapabilities are the capabilities that will be dropped from the container. These // are required to be dropped and cannot be added. // +optional - RequiredDropCapabilities []api.Capability `json:"requiredDropCapabilities,omitempty"` + RequiredDropCapabilities []api.Capability // AllowedCapabilities is a list of capabilities that can be requested to add to the container. // Capabilities in this field may be added at the pod author's discretion. // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. // +optional - AllowedCapabilities []api.Capability `json:"allowedCapabilities,omitempty"` + AllowedCapabilities []api.Capability // Volumes is a white list of allowed volume plugins. Empty indicates that all plugins // may be used. // +optional - Volumes []FSType `json:"volumes,omitempty"` + Volumes []FSType // HostNetwork determines if the policy allows the use of HostNetwork in the pod spec. // +optional - HostNetwork bool `json:"hostNetwork,omitempty"` + HostNetwork bool // HostPorts determines which host port ranges are allowed to be exposed. // +optional - HostPorts []HostPortRange `json:"hostPorts,omitempty"` + HostPorts []HostPortRange // HostPID determines if the policy allows the use of HostPID in the pod spec. // +optional - HostPID bool `json:"hostPID,omitempty"` + HostPID bool // HostIPC determines if the policy allows the use of HostIPC in the pod spec. // +optional - HostIPC bool `json:"hostIPC,omitempty"` + HostIPC bool // SELinux is the strategy that will dictate the allowable labels that may be set. - SELinux SELinuxStrategyOptions `json:"seLinux"` + SELinux SELinuxStrategyOptions // RunAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - RunAsUser RunAsUserStrategyOptions `json:"runAsUser"` + RunAsUser RunAsUserStrategyOptions // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups"` + SupplementalGroups SupplementalGroupsStrategyOptions // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. - FSGroup FSGroupStrategyOptions `json:"fsGroup"` + FSGroup FSGroupStrategyOptions // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file // system. If the container specifically requests to run with a non-read only root file system // the PSP should deny the pod. // If set to false the container may run with a read only root file system if it wishes but it // will not be forced to. // +optional - ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty"` + ReadOnlyRootFilesystem bool } // HostPortRange defines a range of host ports that will be enabled by a policy // for pods to use. It requires both the start and end to be defined. type HostPortRange struct { // Min is the start of the range, inclusive. - Min int `json:"min"` + Min int // Max is the end of the range, inclusive. - Max int `json:"max"` + Max int } // FSType gives strong typing to different file systems that are used by volumes. @@ -897,17 +915,20 @@ var ( Quobyte FSType = "quobyte" AzureDisk FSType = "azureDisk" PhotonPersistentDisk FSType = "photonPersistentDisk" + Projected FSType = "projected" + PortworxVolume FSType = "portworxVolume" + ScaleIO FSType = "scaleIO" All FSType = "*" ) // SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. type SELinuxStrategyOptions struct { // Rule is the strategy that will dictate the allowable labels that may be set. - Rule SELinuxStrategy `json:"rule"` + Rule SELinuxStrategy // seLinuxOptions required to run as; required for MustRunAs // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context // +optional - SELinuxOptions *api.SELinuxOptions `json:"seLinuxOptions,omitempty"` + SELinuxOptions *api.SELinuxOptions } // SELinuxStrategy denotes strategy types for generating SELinux options for a @@ -924,18 +945,18 @@ const ( // RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. type RunAsUserStrategyOptions struct { // Rule is the strategy that will dictate the allowable RunAsUser values that may be set. - Rule RunAsUserStrategy `json:"rule"` + Rule RunAsUserStrategy // Ranges are the allowed ranges of uids that may be used. // +optional - Ranges []IDRange `json:"ranges,omitempty"` + Ranges []IDRange } // IDRange provides a min/max of an allowed range of IDs. type IDRange struct { // Min is the start of the range, inclusive. - Min int64 `json:"min"` + Min int64 // Max is the end of the range, inclusive. - Max int64 `json:"max"` + Max int64 } // RunAsUserStrategy denotes strategy types for generating RunAsUser values for a @@ -955,11 +976,11 @@ const ( type FSGroupStrategyOptions struct { // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. // +optional - Rule FSGroupStrategyType `json:"rule,omitempty"` + Rule FSGroupStrategyType // Ranges are the allowed ranges of fs groups. If you would like to force a single // fs group then supply a single range with the same start and end. // +optional - Ranges []IDRange `json:"ranges,omitempty"` + Ranges []IDRange } // FSGroupStrategyType denotes strategy types for generating FSGroup values for a @@ -977,11 +998,11 @@ const ( type SupplementalGroupsStrategyOptions struct { // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. // +optional - Rule SupplementalGroupsStrategyType `json:"rule,omitempty"` + Rule SupplementalGroupsStrategyType // Ranges are the allowed ranges of supplemental groups. If you would like to force a single // supplemental group then supply a single range with the same start and end. // +optional - Ranges []IDRange `json:"ranges,omitempty"` + Ranges []IDRange } // SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental @@ -997,23 +1018,23 @@ const ( // PodSecurityPolicyList is a list of PodSecurityPolicy objects. type PodSecurityPolicyList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta - Items []PodSecurityPolicy `json:"items"` + Items []PodSecurityPolicy } // +genclient=true type NetworkPolicy struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Specification of the desired behavior for this NetworkPolicy. // +optional - Spec NetworkPolicySpec `json:"spec,omitempty"` + Spec NetworkPolicySpec } type NetworkPolicySpec struct { @@ -1022,7 +1043,7 @@ type NetworkPolicySpec struct { // same set of pods. In this case, the ingress rules for each are combined additively. // This field is NOT optional and follows standard label selector semantics. // An empty podSelector matches all pods in this namespace. - PodSelector unversioned.LabelSelector `json:"podSelector"` + PodSelector metav1.LabelSelector // List of ingress rules to be applied to the selected pods. // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, @@ -1033,7 +1054,7 @@ type NetworkPolicySpec struct { // If this field is present and contains at least one rule, this policy allows any traffic // which matches at least one of the ingress rules in this list. // +optional - Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty"` + Ingress []NetworkPolicyIngressRule } // This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. @@ -1046,7 +1067,7 @@ type NetworkPolicyIngressRule struct { // only if the traffic matches at least one port in the list. // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. // +optional - Ports []NetworkPolicyPort `json:"ports,omitempty"` + Ports []NetworkPolicyPort // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. @@ -1056,14 +1077,14 @@ type NetworkPolicyIngressRule struct { // traffic matches at least one item in the from list. // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. // +optional - From []NetworkPolicyPeer `json:"from,omitempty"` + From []NetworkPolicyPeer } type NetworkPolicyPort struct { // Optional. The protocol (TCP or UDP) which traffic must match. // If not specified, this field defaults to TCP. // +optional - Protocol *api.Protocol `json:"protocol,omitempty"` + Protocol *api.Protocol // If specified, the port on the given protocol. This can // either be a numerical or named port on a pod. If this field is not provided, @@ -1071,7 +1092,7 @@ type NetworkPolicyPort struct { // If present, only traffic on the specified protocol AND port // will be matched. // +optional - Port *intstr.IntOrString `json:"port,omitempty"` + Port *intstr.IntOrString } type NetworkPolicyPeer struct { @@ -1082,7 +1103,7 @@ type NetworkPolicyPeer struct { // If not provided, this selector selects no pods. // If present but empty, this selector selects all pods in this namespace. // +optional - PodSelector *unversioned.LabelSelector `json:"podSelector,omitempty"` + PodSelector *metav1.LabelSelector // Selects Namespaces using cluster scoped-labels. This // matches all pods in all namespaces selected by this label selector. @@ -1090,14 +1111,14 @@ type NetworkPolicyPeer struct { // If omitted, this selector selects no namespaces. // If present but empty, this selector selects all namespaces. // +optional - NamespaceSelector *unversioned.LabelSelector `json:"namespaceSelector,omitempty"` + NamespaceSelector *metav1.LabelSelector } // NetworkPolicyList is a list of NetworkPolicy objects. type NetworkPolicyList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta - Items []NetworkPolicy `json:"items"` + Items []NetworkPolicy } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD index 95f62d1389..fb58592ca1 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD @@ -4,10 +4,8 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", "go_test", - "cgo_library", ) go_library( @@ -28,40 +26,48 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/api/unversioned:go_default_library", "//pkg/api/v1:go_default_library", - "//pkg/apis/autoscaling:go_default_library", - "//pkg/apis/batch:go_default_library", "//pkg/apis/extensions:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//pkg/util/intstr:go_default_library", - "//pkg/watch/versioned:go_default_library", "//vendor:github.com/gogo/protobuf/proto", "//vendor:github.com/gogo/protobuf/sortkeys", "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/api/resource", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/util/intstr", ], ) go_test( name = "go_default_xtest", - srcs = [ - "conversion_test.go", - "defaults_test.go", - ], + srcs = ["defaults_test.go"], tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", "//pkg/api/install:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/api/unversioned:go_default_library", "//pkg/api/v1:go_default_library", - "//pkg/apis/batch:go_default_library", "//pkg/apis/extensions/install:go_default_library", "//pkg/apis/extensions/v1beta1:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/intstr:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/api/equality", + "//vendor:k8s.io/apimachinery/pkg/api/resource", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/util/intstr", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go index 0c1c37d047..38b8c2ac8f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go @@ -19,15 +19,12 @@ package v1beta1 import ( "fmt" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" v1 "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/intstr" ) func addConversionFuncs(scheme *runtime.Scheme) error { @@ -41,16 +38,10 @@ func addConversionFuncs(scheme *runtime.Scheme) error { Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, + Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet, + Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet, Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec, Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, - // autoscaling - Convert_autoscaling_CrossVersionObjectReference_To_v1beta1_SubresourceReference, - Convert_v1beta1_SubresourceReference_To_autoscaling_CrossVersionObjectReference, - Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec, - Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, - // batch - Convert_batch_JobSpec_To_v1beta1_JobSpec, - Convert_v1beta1_JobSpec_To_batch_JobSpec, ) if err != nil { return err @@ -59,7 +50,7 @@ func addConversionFuncs(scheme *runtime.Scheme) error { // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. for _, k := range []string{"DaemonSet", "Deployment", "Ingress"} { kind := k // don't close over range variables - err = api.Scheme.AddFieldLabelConversionFunc("extensions/v1beta1", kind, + err = scheme.AddFieldLabelConversionFunc("extensions/v1beta1", kind, func(label, value string) (string, string, error) { switch label { case "metadata.name", "metadata.namespace": @@ -74,16 +65,7 @@ func addConversionFuncs(scheme *runtime.Scheme) error { } } - return api.Scheme.AddFieldLabelConversionFunc("extensions/v1beta1", "Job", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name", "metadata.namespace", "status.successful": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }, - ) + return nil } func Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { @@ -96,7 +78,7 @@ func Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleS out.Selector = in.Selector.MatchLabels } - selector, err := unversioned.LabelSelectorAsSelector(in.Selector) + selector, err := metav1.LabelSelectorAsSelector(in.Selector) if err != nil { return fmt.Errorf("invalid label selector: %v", err) } @@ -113,14 +95,14 @@ func Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out // new field can be expected to know about the old field (though that's not quite true, due // to kubectl apply). However, these fields are readonly, so any non-nil value should work. if in.TargetSelector != "" { - labelSelector, err := unversioned.ParseToLabelSelector(in.TargetSelector) + labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) if err != nil { out.Selector = nil return fmt.Errorf("failed to parse target selector: %v", err) } out.Selector = labelSelector } else if in.Selector != nil { - out.Selector = new(unversioned.LabelSelector) + out.Selector = new(metav1.LabelSelector) selector := make(map[string]string) for key, val := range in.Selector { selector[key] = val @@ -239,125 +221,42 @@ func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployme return nil } -func Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error { - out.Replicas = new(int32) - *out.Replicas = int32(in.Replicas) - out.MinReadySeconds = in.MinReadySeconds - out.Selector = in.Selector - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { +func Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *RollingUpdateDaemonSet, s conversion.Scope) error { + if out.MaxUnavailable == nil { + out.MaxUnavailable = &intstr.IntOrString{} + } + if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { return err } return nil } -func Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { - if in.Replicas != nil { - out.Replicas = *in.Replicas - } - out.MinReadySeconds = in.MinReadySeconds - out.Selector = in.Selector - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { +func Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { + if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { return err } return nil } -func Convert_batch_JobSpec_To_v1beta1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { - out.Parallelism = in.Parallelism - out.Completions = in.Completions - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds +func Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error { + out.Replicas = new(int32) + *out.Replicas = int32(in.Replicas) + out.MinReadySeconds = in.MinReadySeconds out.Selector = in.Selector - // BEGIN non-standard conversion - // autoSelector has opposite meaning as manualSelector. - // in both cases, unset means false, and unset is always preferred to false. - // unset vs set-false distinction is not preserved. - manualSelector := in.ManualSelector != nil && *in.ManualSelector - autoSelector := !manualSelector - if autoSelector { - out.AutoSelector = new(bool) - *out.AutoSelector = true - } else { - out.AutoSelector = nil - } - // END non-standard conversion - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil } -func Convert_v1beta1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { - out.Parallelism = in.Parallelism - out.Completions = in.Completions - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - out.Selector = in.Selector - // BEGIN non-standard conversion - // autoSelector has opposite meaning as manualSelector. - // in both cases, unset means false, and unset is always preferred to false. - // unset vs set-false distinction is not preserved. - autoSelector := bool(in.AutoSelector != nil && *in.AutoSelector) - manualSelector := !autoSelector - if manualSelector { - out.ManualSelector = new(bool) - *out.ManualSelector = true - } else { - out.ManualSelector = nil +func Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = *in.Replicas } - // END non-standard conversion - + out.MinReadySeconds = in.MinReadySeconds + out.Selector = in.Selector if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil } - -func Convert_autoscaling_CrossVersionObjectReference_To_v1beta1_SubresourceReference(in *autoscaling.CrossVersionObjectReference, out *SubresourceReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion - out.Subresource = "scale" - return nil -} - -func Convert_v1beta1_SubresourceReference_To_autoscaling_CrossVersionObjectReference(in *SubresourceReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { - if err := Convert_autoscaling_CrossVersionObjectReference_To_v1beta1_SubresourceReference(&in.ScaleTargetRef, &out.ScaleRef, s); err != nil { - return err - } - if in.MinReplicas != nil { - out.MinReplicas = new(int32) - *out.MinReplicas = *in.MinReplicas - } else { - out.MinReplicas = nil - } - out.MaxReplicas = in.MaxReplicas - if in.TargetCPUUtilizationPercentage != nil { - out.CPUUtilization = &CPUTargetUtilization{TargetPercentage: *in.TargetCPUUtilizationPercentage} - } - return nil -} - -func Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { - if err := Convert_v1beta1_SubresourceReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleRef, &out.ScaleTargetRef, s); err != nil { - return err - } - if in.MinReplicas != nil { - out.MinReplicas = new(int32) - *out.MinReplicas = int32(*in.MinReplicas) - } else { - out.MinReplicas = nil - } - out.MaxReplicas = int32(in.MaxReplicas) - if in.CPUUtilization != nil { - out.TargetCPUUtilizationPercentage = new(int32) - *out.TargetCPUUtilizationPercentage = int32(in.CPUUtilization.TargetPercentage) - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go index 5b17d7ea11..5f14d8160e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go @@ -17,10 +17,10 @@ limitations under the License. package v1beta1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/intstr" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { @@ -28,8 +28,6 @@ func addDefaultingFuncs(scheme *runtime.Scheme) error { return scheme.AddDefaultingFuncs( SetDefaults_DaemonSet, SetDefaults_Deployment, - SetDefaults_Job, - SetDefaults_HorizontalPodAutoscaler, SetDefaults_ReplicaSet, SetDefaults_NetworkPolicy, ) @@ -41,7 +39,7 @@ func SetDefaults_DaemonSet(obj *DaemonSet) { // TODO: support templates defined elsewhere when we support them in the API if labels != nil { if obj.Spec.Selector == nil { - obj.Spec.Selector = &unversioned.LabelSelector{ + obj.Spec.Selector = &metav1.LabelSelector{ MatchLabels: labels, } } @@ -49,6 +47,21 @@ func SetDefaults_DaemonSet(obj *DaemonSet) { obj.Labels = labels } } + updateStrategy := &obj.Spec.UpdateStrategy + if updateStrategy.Type == "" { + updateStrategy.Type = OnDeleteDaemonSetStrategyType + } + if updateStrategy.Type == RollingUpdateDaemonSetStrategyType { + if updateStrategy.RollingUpdate == nil { + rollingUpdate := RollingUpdateDaemonSet{} + updateStrategy.RollingUpdate = &rollingUpdate + } + if updateStrategy.RollingUpdate.MaxUnavailable == nil { + // Set default MaxUnavailable as 1 by default. + maxUnavailable := intstr.FromInt(1) + updateStrategy.RollingUpdate.MaxUnavailable = &maxUnavailable + } + } } func SetDefaults_Deployment(obj *Deployment) { @@ -57,7 +70,7 @@ func SetDefaults_Deployment(obj *Deployment) { if labels != nil { if obj.Spec.Selector == nil { - obj.Spec.Selector = &unversioned.LabelSelector{MatchLabels: labels} + obj.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels} } if len(obj.Labels) == 0 { obj.Labels = labels @@ -91,57 +104,13 @@ func SetDefaults_Deployment(obj *Deployment) { } } -func SetDefaults_Job(obj *Job) { - labels := obj.Spec.Template.Labels - // TODO: support templates defined elsewhere when we support them in the API - if labels != nil { - // if an autoselector is requested, we'll build the selector later with controller-uid and job-name - autoSelector := bool(obj.Spec.AutoSelector != nil && *obj.Spec.AutoSelector) - - // otherwise, we are using a manual selector - manualSelector := !autoSelector - - // and default behavior for an unspecified manual selector is to use the pod template labels - if manualSelector && obj.Spec.Selector == nil { - obj.Spec.Selector = &unversioned.LabelSelector{ - MatchLabels: labels, - } - } - if len(obj.Labels) == 0 { - obj.Labels = labels - } - } - // For a non-parallel job, you can leave both `.spec.completions` and - // `.spec.parallelism` unset. When both are unset, both are defaulted to 1. - if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil { - obj.Spec.Completions = new(int32) - *obj.Spec.Completions = 1 - obj.Spec.Parallelism = new(int32) - *obj.Spec.Parallelism = 1 - } - if obj.Spec.Parallelism == nil { - obj.Spec.Parallelism = new(int32) - *obj.Spec.Parallelism = 1 - } -} - -func SetDefaults_HorizontalPodAutoscaler(obj *HorizontalPodAutoscaler) { - if obj.Spec.MinReplicas == nil { - minReplicas := int32(1) - obj.Spec.MinReplicas = &minReplicas - } - if obj.Spec.CPUUtilization == nil { - obj.Spec.CPUUtilization = &CPUTargetUtilization{TargetPercentage: 80} - } -} - func SetDefaults_ReplicaSet(obj *ReplicaSet) { labels := obj.Spec.Template.Labels // TODO: support templates defined elsewhere when we support them in the API if labels != nil { if obj.Spec.Selector == nil { - obj.Spec.Selector = &unversioned.LabelSelector{ + obj.Spec.Selector = &metav1.LabelSelector{ MatchLabels: labels, } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.pb.go index 88f3559acf..053cf10dbf 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,7 +26,6 @@ limitations under the License. It has these top-level messages: APIVersion - CPUTargetUtilization CustomMetricCurrentStatus CustomMetricCurrentStatusList CustomMetricTarget @@ -35,6 +34,7 @@ limitations under the License. DaemonSetList DaemonSetSpec DaemonSetStatus + DaemonSetUpdateStrategy Deployment DeploymentCondition DeploymentList @@ -42,14 +42,9 @@ limitations under the License. DeploymentSpec DeploymentStatus DeploymentStrategy - ExportOptions FSGroupStrategyOptions HTTPIngressPath HTTPIngressRuleValue - HorizontalPodAutoscaler - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus HostPortRange IDRange Ingress @@ -60,11 +55,6 @@ limitations under the License. IngressSpec IngressStatus IngressTLS - Job - JobCondition - JobList - JobSpec - JobStatus NetworkPolicy NetworkPolicyIngressRule NetworkPolicyList @@ -81,13 +71,13 @@ limitations under the License. ReplicaSetStatus ReplicationControllerDummy RollbackConfig + RollingUpdateDaemonSet RollingUpdateDeployment RunAsUserStrategyOptions SELinuxStrategyOptions Scale ScaleSpec ScaleStatus - SubresourceReference SupplementalGroupsStrategyOptions ThirdPartyResource ThirdPartyResourceData @@ -100,10 +90,11 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" -import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" -import k8s_io_kubernetes_pkg_util_intstr "k8s.io/kubernetes/pkg/util/intstr" +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" import strings "strings" import reflect "reflect" @@ -124,45 +115,45 @@ func (m *APIVersion) Reset() { *m = APIVersion{} } func (*APIVersion) ProtoMessage() {} func (*APIVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *CPUTargetUtilization) Reset() { *m = CPUTargetUtilization{} } -func (*CPUTargetUtilization) ProtoMessage() {} -func (*CPUTargetUtilization) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - func (m *CustomMetricCurrentStatus) Reset() { *m = CustomMetricCurrentStatus{} } func (*CustomMetricCurrentStatus) ProtoMessage() {} func (*CustomMetricCurrentStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} + return fileDescriptorGenerated, []int{1} } func (m *CustomMetricCurrentStatusList) Reset() { *m = CustomMetricCurrentStatusList{} } func (*CustomMetricCurrentStatusList) ProtoMessage() {} func (*CustomMetricCurrentStatusList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} + return fileDescriptorGenerated, []int{2} } func (m *CustomMetricTarget) Reset() { *m = CustomMetricTarget{} } func (*CustomMetricTarget) ProtoMessage() {} -func (*CustomMetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*CustomMetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *CustomMetricTargetList) Reset() { *m = CustomMetricTargetList{} } func (*CustomMetricTargetList) ProtoMessage() {} -func (*CustomMetricTargetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*CustomMetricTargetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} @@ -192,233 +183,184 @@ func (m *DeploymentStrategy) Reset() { *m = DeploymentStrateg func (*DeploymentStrategy) ProtoMessage() {} func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } -func (m *ExportOptions) Reset() { *m = ExportOptions{} } -func (*ExportOptions) ProtoMessage() {} -func (*ExportOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } - func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } - -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{21} -} - -func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } -func (*HorizontalPodAutoscalerList) ProtoMessage() {} -func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{22} -} - -func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } -func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} -func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{23} -} - -func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } -func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} -func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{24} -} +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *HostPortRange) Reset() { *m = HostPortRange{} } func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *IDRange) Reset() { *m = IDRange{} } func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } - -func (m *Job) Reset() { *m = Job{} } -func (*Job) ProtoMessage() {} -func (*Job) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } - -func (m *JobCondition) Reset() { *m = JobCondition{} } -func (*JobCondition) ProtoMessage() {} -func (*JobCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } - -func (m *JobList) Reset() { *m = JobList{} } -func (*JobList) ProtoMessage() {} -func (*JobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } - -func (m *JobSpec) Reset() { *m = JobSpec{} } -func (*JobSpec) ProtoMessage() {} -func (*JobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } - -func (m *JobStatus) Reset() { *m = JobStatus{} } -func (*JobStatus) ProtoMessage() {} -func (*JobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{41} + return fileDescriptorGenerated, []int{31} } func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } func (*ReplicationControllerDummy) ProtoMessage() {} func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{54} + return fileDescriptorGenerated, []int{44} } func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } + +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{56} + return fileDescriptorGenerated, []int{47} } func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{57} + return fileDescriptorGenerated, []int{48} } func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } - -func (m *SubresourceReference) Reset() { *m = SubresourceReference{} } -func (*SubresourceReference) ProtoMessage() {} -func (*SubresourceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{63} + return fileDescriptorGenerated, []int{53} } func (m *ThirdPartyResource) Reset() { *m = ThirdPartyResource{} } func (*ThirdPartyResource) ProtoMessage() {} -func (*ThirdPartyResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } +func (*ThirdPartyResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } func (m *ThirdPartyResourceData) Reset() { *m = ThirdPartyResourceData{} } func (*ThirdPartyResourceData) ProtoMessage() {} -func (*ThirdPartyResourceData) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } +func (*ThirdPartyResourceData) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } func (m *ThirdPartyResourceDataList) Reset() { *m = ThirdPartyResourceDataList{} } func (*ThirdPartyResourceDataList) ProtoMessage() {} func (*ThirdPartyResourceDataList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{66} + return fileDescriptorGenerated, []int{56} } func (m *ThirdPartyResourceList) Reset() { *m = ThirdPartyResourceList{} } func (*ThirdPartyResourceList) ProtoMessage() {} -func (*ThirdPartyResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } +func (*ThirdPartyResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } func init() { proto.RegisterType((*APIVersion)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.APIVersion") - proto.RegisterType((*CPUTargetUtilization)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.CPUTargetUtilization") proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.CustomMetricCurrentStatus") proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.CustomMetricCurrentStatusList") proto.RegisterType((*CustomMetricTarget)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.CustomMetricTarget") @@ -427,6 +369,7 @@ func init() { proto.RegisterType((*DaemonSetList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetList") proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetSpec") proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetStatus") + proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetUpdateStrategy") proto.RegisterType((*Deployment)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.Deployment") proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentCondition") proto.RegisterType((*DeploymentList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentList") @@ -434,14 +377,9 @@ func init() { proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentSpec") proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentStatus") proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentStrategy") - proto.RegisterType((*ExportOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ExportOptions") proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.FSGroupStrategyOptions") proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HTTPIngressPath") proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HTTPIngressRuleValue") - proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HorizontalPodAutoscaler") - proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HorizontalPodAutoscalerList") - proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HorizontalPodAutoscalerSpec") - proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HorizontalPodAutoscalerStatus") proto.RegisterType((*HostPortRange)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HostPortRange") proto.RegisterType((*IDRange)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IDRange") proto.RegisterType((*Ingress)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.Ingress") @@ -452,11 +390,6 @@ func init() { proto.RegisterType((*IngressSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressSpec") proto.RegisterType((*IngressStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressStatus") proto.RegisterType((*IngressTLS)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressTLS") - proto.RegisterType((*Job)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.Job") - proto.RegisterType((*JobCondition)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.JobCondition") - proto.RegisterType((*JobList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.JobList") - proto.RegisterType((*JobSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.JobSpec") - proto.RegisterType((*JobStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.JobStatus") proto.RegisterType((*NetworkPolicy)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicy") proto.RegisterType((*NetworkPolicyIngressRule)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyIngressRule") proto.RegisterType((*NetworkPolicyList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyList") @@ -473,13 +406,13 @@ func init() { proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetStatus") proto.RegisterType((*ReplicationControllerDummy)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ReplicationControllerDummy") proto.RegisterType((*RollbackConfig)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.RollbackConfig") + proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.RollingUpdateDaemonSet") proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.RollingUpdateDeployment") proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.RunAsUserStrategyOptions") proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*Scale)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ScaleStatus") - proto.RegisterType((*SubresourceReference)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.SubresourceReference") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.SupplementalGroupsStrategyOptions") proto.RegisterType((*ThirdPartyResource)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ThirdPartyResource") proto.RegisterType((*ThirdPartyResourceData)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ThirdPartyResourceData") @@ -508,27 +441,6 @@ func (m *APIVersion) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *CPUTargetUtilization) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *CPUTargetUtilization) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.TargetPercentage)) - return i, nil -} - func (m *CustomMetricCurrentStatus) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -762,6 +674,20 @@ func (m *DaemonSetSpec) MarshalTo(data []byte) (int, error) { return 0, err } i += n8 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.UpdateStrategy.Size())) + n9, err := m.UpdateStrategy.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TemplateGeneration)) return i, nil } @@ -792,6 +718,50 @@ func (m *DaemonSetStatus) MarshalTo(data []byte) (int, error) { data[i] = 0x20 i++ i = encodeVarintGenerated(data, i, uint64(m.NumberReady)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(m.UpdatedNumberScheduled)) + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NumberAvailable)) + data[i] = 0x40 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NumberUnavailable)) + return i, nil +} + +func (m *DaemonSetUpdateStrategy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DaemonSetUpdateStrategy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.RollingUpdate != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollingUpdate.Size())) + n10, err := m.RollingUpdate.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + } return i, nil } @@ -813,27 +783,27 @@ func (m *Deployment) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(data[i:]) + n11, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n9 + i += n11 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(data[i:]) + n12, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n10 + i += n12 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n11, err := m.Status.MarshalTo(data[i:]) + n13, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n11 + i += n13 return i, nil } @@ -871,19 +841,19 @@ func (m *DeploymentCondition) MarshalTo(data []byte) (int, error) { data[i] = 0x32 i++ i = encodeVarintGenerated(data, i, uint64(m.LastUpdateTime.Size())) - n12, err := m.LastUpdateTime.MarshalTo(data[i:]) + n14, err := m.LastUpdateTime.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n12 + i += n14 data[i] = 0x3a i++ i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n13, err := m.LastTransitionTime.MarshalTo(data[i:]) + n15, err := m.LastTransitionTime.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n13 + i += n15 return i, nil } @@ -905,11 +875,11 @@ func (m *DeploymentList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n14, err := m.ListMeta.MarshalTo(data[i:]) + n16, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n14 + i += n16 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -964,11 +934,11 @@ func (m *DeploymentRollback) MarshalTo(data []byte) (int, error) { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) - n15, err := m.RollbackTo.MarshalTo(data[i:]) + n17, err := m.RollbackTo.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n15 + i += n17 return i, nil } @@ -996,28 +966,28 @@ func (m *DeploymentSpec) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n16, err := m.Selector.MarshalTo(data[i:]) + n18, err := m.Selector.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n16 + i += n18 } data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n17, err := m.Template.MarshalTo(data[i:]) + n19, err := m.Template.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n17 + i += n19 data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(m.Strategy.Size())) - n18, err := m.Strategy.MarshalTo(data[i:]) + n20, err := m.Strategy.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n18 + i += n20 data[i] = 0x28 i++ i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) @@ -1038,11 +1008,11 @@ func (m *DeploymentSpec) MarshalTo(data []byte) (int, error) { data[i] = 0x42 i++ i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) - n19, err := m.RollbackTo.MarshalTo(data[i:]) + n21, err := m.RollbackTo.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n19 + i += n21 } if m.ProgressDeadlineSeconds != nil { data[i] = 0x48 @@ -1094,6 +1064,9 @@ func (m *DeploymentStatus) MarshalTo(data []byte) (int, error) { i += n } } + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ReadyReplicas)) return i, nil } @@ -1120,46 +1093,12 @@ func (m *DeploymentStrategy) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.RollingUpdate.Size())) - n20, err := m.RollingUpdate.MarshalTo(data[i:]) + n22, err := m.RollingUpdate.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n20 - } - return i, nil -} - -func (m *ExportOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ExportOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - if m.Export { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x10 - i++ - if m.Exact { - data[i] = 1 - } else { - data[i] = 0 + i += n22 } - i++ return i, nil } @@ -1219,11 +1158,11 @@ func (m *HTTPIngressPath) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Backend.Size())) - n21, err := m.Backend.MarshalTo(data[i:]) + n23, err := m.Backend.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n21 + i += n23 return i, nil } @@ -1257,7 +1196,55 @@ func (m *HTTPIngressRuleValue) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *HorizontalPodAutoscaler) Marshal() (data []byte, err error) { +func (m *HostPortRange) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HostPortRange) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Min)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Max)) + return i, nil +} + +func (m *IDRange) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IDRange) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Min)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Max)) + return i, nil +} + +func (m *Ingress) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1267,7 +1254,7 @@ func (m *HorizontalPodAutoscaler) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *HorizontalPodAutoscaler) MarshalTo(data []byte) (int, error) { +func (m *Ingress) MarshalTo(data []byte) (int, error) { var i int _ = i var l int @@ -1275,31 +1262,61 @@ func (m *HorizontalPodAutoscaler) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n22, err := m.ObjectMeta.MarshalTo(data[i:]) + n24, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n22 + i += n24 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n23, err := m.Spec.MarshalTo(data[i:]) + n25, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n23 + i += n25 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n24, err := m.Status.MarshalTo(data[i:]) + n26, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n24 + i += n26 + return i, nil +} + +func (m *IngressBackend) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressBackend) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ServiceName))) + i += copy(data[i:], m.ServiceName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ServicePort.Size())) + n27, err := m.ServicePort.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n27 return i, nil } -func (m *HorizontalPodAutoscalerList) Marshal() (data []byte, err error) { +func (m *IngressList) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1309,7 +1326,7 @@ func (m *HorizontalPodAutoscalerList) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *HorizontalPodAutoscalerList) MarshalTo(data []byte) (int, error) { +func (m *IngressList) MarshalTo(data []byte) (int, error) { var i int _ = i var l int @@ -1317,11 +1334,11 @@ func (m *HorizontalPodAutoscalerList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n25, err := m.ListMeta.MarshalTo(data[i:]) + n28, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n25 + i += n28 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -1337,7 +1354,7 @@ func (m *HorizontalPodAutoscalerList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *HorizontalPodAutoscalerSpec) Marshal() (data []byte, err error) { +func (m *IngressRule) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1347,41 +1364,27 @@ func (m *HorizontalPodAutoscalerSpec) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *HorizontalPodAutoscalerSpec) MarshalTo(data []byte) (int, error) { +func (m *IngressRule) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ScaleRef.Size())) - n26, err := m.ScaleRef.MarshalTo(data[i:]) + i = encodeVarintGenerated(data, i, uint64(len(m.Host))) + i += copy(data[i:], m.Host) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.IngressRuleValue.Size())) + n29, err := m.IngressRuleValue.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n26 - if m.MinReplicas != nil { - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.MinReplicas)) - } - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.MaxReplicas)) - if m.CPUUtilization != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.CPUUtilization.Size())) - n27, err := m.CPUUtilization.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n27 - } + i += n29 return i, nil } -func (m *HorizontalPodAutoscalerStatus) Marshal() (data []byte, err error) { +func (m *IngressRuleValue) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1391,41 +1394,77 @@ func (m *HorizontalPodAutoscalerStatus) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *HorizontalPodAutoscalerStatus) MarshalTo(data []byte) (int, error) { +func (m *IngressRuleValue) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l - if m.ObservedGeneration != nil { - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - data[i] = 0x12 + if m.HTTP != nil { + data[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.LastScaleTime.Size())) - n28, err := m.LastScaleTime.MarshalTo(data[i:]) + i = encodeVarintGenerated(data, i, uint64(m.HTTP.Size())) + n30, err := m.HTTP.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n28 + i += n30 } - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.CurrentReplicas)) - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(m.DesiredReplicas)) - if m.CurrentCPUUtilizationPercentage != nil { - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.CurrentCPUUtilizationPercentage)) + return i, nil +} + +func (m *IngressSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Backend != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Backend.Size())) + n31, err := m.Backend.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if len(m.TLS) > 0 { + for _, msg := range m.TLS { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } } return i, nil } -func (m *HostPortRange) Marshal() (data []byte, err error) { +func (m *IngressStatus) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1435,21 +1474,23 @@ func (m *HostPortRange) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *HostPortRange) MarshalTo(data []byte) (int, error) { +func (m *IngressStatus) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Min)) - data[i] = 0x10 + data[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.Max)) + i = encodeVarintGenerated(data, i, uint64(m.LoadBalancer.Size())) + n32, err := m.LoadBalancer.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n32 return i, nil } -func (m *IDRange) Marshal() (data []byte, err error) { +func (m *IngressTLS) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1459,21 +1500,34 @@ func (m *IDRange) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *IDRange) MarshalTo(data []byte) (int, error) { +func (m *IngressTLS) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Min)) - data[i] = 0x10 + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Max)) + i = encodeVarintGenerated(data, i, uint64(len(m.SecretName))) + i += copy(data[i:], m.SecretName) return i, nil } -func (m *Ingress) Marshal() (data []byte, err error) { +func (m *NetworkPolicy) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1483,7 +1537,7 @@ func (m *Ingress) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *Ingress) MarshalTo(data []byte) (int, error) { +func (m *NetworkPolicy) MarshalTo(data []byte) (int, error) { var i int _ = i var l int @@ -1491,31 +1545,23 @@ func (m *Ingress) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n29, err := m.ObjectMeta.MarshalTo(data[i:]) + n33, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n29 + i += n33 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n30, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n30 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n31, err := m.Status.MarshalTo(data[i:]) + n34, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n31 + i += n34 return i, nil } -func (m *IngressBackend) Marshal() (data []byte, err error) { +func (m *NetworkPolicyIngressRule) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1525,27 +1571,39 @@ func (m *IngressBackend) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *IngressBackend) MarshalTo(data []byte) (int, error) { +func (m *NetworkPolicyIngressRule) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ServiceName))) - i += copy(data[i:], m.ServiceName) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ServicePort.Size())) - n32, err := m.ServicePort.MarshalTo(data[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.From) > 0 { + for _, msg := range m.From { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } } - i += n32 return i, nil } -func (m *IngressList) Marshal() (data []byte, err error) { +func (m *NetworkPolicyList) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1555,7 +1613,7 @@ func (m *IngressList) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *IngressList) MarshalTo(data []byte) (int, error) { +func (m *NetworkPolicyList) MarshalTo(data []byte) (int, error) { var i int _ = i var l int @@ -1563,11 +1621,11 @@ func (m *IngressList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n33, err := m.ListMeta.MarshalTo(data[i:]) + n35, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n33 + i += n35 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -1583,7 +1641,7 @@ func (m *IngressList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *IngressRule) Marshal() (data []byte, err error) { +func (m *NetworkPolicyPeer) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1593,27 +1651,35 @@ func (m *IngressRule) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *IngressRule) MarshalTo(data []byte) (int, error) { +func (m *NetworkPolicyPeer) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Host))) - i += copy(data[i:], m.Host) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.IngressRuleValue.Size())) - n34, err := m.IngressRuleValue.MarshalTo(data[i:]) - if err != nil { - return 0, err + if m.PodSelector != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodSelector.Size())) + n36, err := m.PodSelector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n36 + } + if m.NamespaceSelector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NamespaceSelector.Size())) + n37, err := m.NamespaceSelector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n37 } - i += n34 return i, nil } -func (m *IngressRuleValue) Marshal() (data []byte, err error) { +func (m *NetworkPolicyPort) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1623,25 +1689,31 @@ func (m *IngressRuleValue) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *IngressRuleValue) MarshalTo(data []byte) (int, error) { +func (m *NetworkPolicyPort) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l - if m.HTTP != nil { + if m.Protocol != nil { data[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.HTTP.Size())) - n35, err := m.HTTP.MarshalTo(data[i:]) + i = encodeVarintGenerated(data, i, uint64(len(*m.Protocol))) + i += copy(data[i:], *m.Protocol) + } + if m.Port != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) + n38, err := m.Port.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n35 + i += n38 } return i, nil } -func (m *IngressSpec) Marshal() (data []byte, err error) { +func (m *NetworkPolicySpec) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1651,23 +1723,21 @@ func (m *IngressSpec) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *IngressSpec) MarshalTo(data []byte) (int, error) { +func (m *NetworkPolicySpec) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l - if m.Backend != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.Backend.Size())) - n36, err := m.Backend.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n36 + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodSelector.Size())) + n39, err := m.PodSelector.MarshalTo(data[i:]) + if err != nil { + return 0, err } - if len(m.TLS) > 0 { - for _, msg := range m.TLS { + i += n39 + if len(m.Ingress) > 0 { + for _, msg := range m.Ingress { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(msg.Size())) @@ -1678,22 +1748,10 @@ func (m *IngressSpec) MarshalTo(data []byte) (int, error) { i += n } } - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } return i, nil } -func (m *IngressStatus) Marshal() (data []byte, err error) { +func (m *PodSecurityPolicy) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1703,23 +1761,31 @@ func (m *IngressStatus) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *IngressStatus) MarshalTo(data []byte) (int, error) { +func (m *PodSecurityPolicy) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.LoadBalancer.Size())) - n37, err := m.LoadBalancer.MarshalTo(data[i:]) + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n40, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n40 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n41, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n37 + i += n41 return i, nil } -func (m *IngressTLS) Marshal() (data []byte, err error) { +func (m *PodSecurityPolicyList) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1729,16 +1795,62 @@ func (m *IngressTLS) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *IngressTLS) MarshalTo(data []byte) (int, error) { +func (m *PodSecurityPolicyList) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l - if len(m.Hosts) > 0 { - for _, s := range m.Hosts { - data[i] = 0xa + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n42, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n42 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 i++ - l = len(s) + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodSecurityPolicySpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodSecurityPolicySpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Privileged { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if len(m.DefaultAddCapabilities) > 0 { + for _, s := range m.DefaultAddCapabilities { + data[i] = 0x12 + i++ + l = len(s) for l >= 1<<7 { data[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 @@ -1749,14 +1861,131 @@ func (m *IngressTLS) MarshalTo(data []byte) (int, error) { i += copy(data[i:], s) } } - data[i] = 0x12 + if len(m.RequiredDropCapabilities) > 0 { + for _, s := range m.RequiredDropCapabilities { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.AllowedCapabilities) > 0 { + for _, s := range m.AllowedCapabilities { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x30 + i++ + if m.HostNetwork { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if len(m.HostPorts) > 0 { + for _, msg := range m.HostPorts { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x40 + i++ + if m.HostPID { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x48 + i++ + if m.HostIPC { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SELinux.Size())) + n43, err := m.SELinux.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n43 + data[i] = 0x5a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RunAsUser.Size())) + n44, err := m.RunAsUser.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n44 + data[i] = 0x62 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SupplementalGroups.Size())) + n45, err := m.SupplementalGroups.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n45 + data[i] = 0x6a + i++ + i = encodeVarintGenerated(data, i, uint64(m.FSGroup.Size())) + n46, err := m.FSGroup.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n46 + data[i] = 0x70 + i++ + if m.ReadOnlyRootFilesystem { + data[i] = 1 + } else { + data[i] = 0 + } i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SecretName))) - i += copy(data[i:], m.SecretName) return i, nil } -func (m *Job) Marshal() (data []byte, err error) { +func (m *ReplicaSet) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1766,7 +1995,7 @@ func (m *Job) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *Job) MarshalTo(data []byte) (int, error) { +func (m *ReplicaSet) MarshalTo(data []byte) (int, error) { var i int _ = i var l int @@ -1774,31 +2003,31 @@ func (m *Job) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n38, err := m.ObjectMeta.MarshalTo(data[i:]) + n47, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n38 + i += n47 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n39, err := m.Spec.MarshalTo(data[i:]) + n48, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n39 + i += n48 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n40, err := m.Status.MarshalTo(data[i:]) + n49, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n40 + i += n49 return i, nil } -func (m *JobCondition) Marshal() (data []byte, err error) { +func (m *ReplicaSetCondition) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1808,7 +2037,7 @@ func (m *JobCondition) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *JobCondition) MarshalTo(data []byte) (int, error) { +func (m *ReplicaSetCondition) MarshalTo(data []byte) (int, error) { var i int _ = i var l int @@ -1823,32 +2052,24 @@ func (m *JobCondition) MarshalTo(data []byte) (int, error) { i += copy(data[i:], m.Status) data[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) - n41, err := m.LastProbeTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n41 - data[i] = 0x22 - i++ i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n42, err := m.LastTransitionTime.MarshalTo(data[i:]) + n50, err := m.LastTransitionTime.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n42 - data[i] = 0x2a + i += n50 + data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) i += copy(data[i:], m.Reason) - data[i] = 0x32 + data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(len(m.Message))) i += copy(data[i:], m.Message) return i, nil } -func (m *JobList) Marshal() (data []byte, err error) { +func (m *ReplicaSetList) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1858,7 +2079,7 @@ func (m *JobList) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *JobList) MarshalTo(data []byte) (int, error) { +func (m *ReplicaSetList) MarshalTo(data []byte) (int, error) { var i int _ = i var l int @@ -1866,11 +2087,11 @@ func (m *JobList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n43, err := m.ListMeta.MarshalTo(data[i:]) + n51, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n43 + i += n51 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -1886,7 +2107,7 @@ func (m *JobList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *JobSpec) Marshal() (data []byte, err error) { +func (m *ReplicaSetSpec) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1896,58 +2117,41 @@ func (m *JobSpec) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *JobSpec) MarshalTo(data []byte) (int, error) { +func (m *ReplicaSetSpec) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l - if m.Parallelism != nil { + if m.Replicas != nil { data[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(*m.Parallelism)) - } - if m.Completions != nil { - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.Completions)) - } - if m.ActiveDeadlineSeconds != nil { - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds)) + i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) } if m.Selector != nil { - data[i] = 0x22 + data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n44, err := m.Selector.MarshalTo(data[i:]) + n52, err := m.Selector.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n44 - } - if m.AutoSelector != nil { - data[i] = 0x28 - i++ - if *m.AutoSelector { - data[i] = 1 - } else { - data[i] = 0 - } - i++ + i += n52 } - data[i] = 0x32 + data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n45, err := m.Template.MarshalTo(data[i:]) + n53, err := m.Template.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n45 + i += n53 + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) return i, nil } -func (m *JobStatus) Marshal() (data []byte, err error) { +func (m *ReplicaSetStatus) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -1957,14 +2161,29 @@ func (m *JobStatus) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *JobStatus) MarshalTo(data []byte) (int, error) { +func (m *ReplicaSetStatus) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FullyLabeledReplicas)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ReadyReplicas)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AvailableReplicas)) if len(m.Conditions) > 0 { for _, msg := range m.Conditions { - data[i] = 0xa + data[i] = 0x32 i++ i = encodeVarintGenerated(data, i, uint64(msg.Size())) n, err := msg.MarshalTo(data[i:]) @@ -1974,39 +2193,10 @@ func (m *JobStatus) MarshalTo(data []byte) (int, error) { i += n } } - if m.StartTime != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) - n46, err := m.StartTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n46 - } - if m.CompletionTime != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.CompletionTime.Size())) - n47, err := m.CompletionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n47 - } - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Active)) - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Succeeded)) - data[i] = 0x30 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Failed)) return i, nil } -func (m *NetworkPolicy) Marshal() (data []byte, err error) { +func (m *ReplicationControllerDummy) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -2016,31 +2206,15 @@ func (m *NetworkPolicy) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *NetworkPolicy) MarshalTo(data []byte) (int, error) { +func (m *ReplicationControllerDummy) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n48, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n48 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n49, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n49 return i, nil } -func (m *NetworkPolicyIngressRule) Marshal() (data []byte, err error) { +func (m *RollbackConfig) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -2050,39 +2224,18 @@ func (m *NetworkPolicyIngressRule) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *NetworkPolicyIngressRule) MarshalTo(data []byte) (int, error) { +func (m *RollbackConfig) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.From) > 0 { - for _, msg := range m.From { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Revision)) return i, nil } -func (m *NetworkPolicyList) Marshal() (data []byte, err error) { +func (m *RollingUpdateDaemonSet) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -2092,35 +2245,25 @@ func (m *NetworkPolicyList) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *NetworkPolicyList) MarshalTo(data []byte) (int, error) { +func (m *RollingUpdateDaemonSet) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n50, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n50 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n + if m.MaxUnavailable != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxUnavailable.Size())) + n54, err := m.MaxUnavailable.MarshalTo(data[i:]) + if err != nil { + return 0, err } + i += n54 } return i, nil } -func (m *NetworkPolicyPeer) Marshal() (data []byte, err error) { +func (m *RollingUpdateDeployment) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -2130,35 +2273,35 @@ func (m *NetworkPolicyPeer) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *NetworkPolicyPeer) MarshalTo(data []byte) (int, error) { +func (m *RollingUpdateDeployment) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l - if m.PodSelector != nil { + if m.MaxUnavailable != nil { data[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.PodSelector.Size())) - n51, err := m.PodSelector.MarshalTo(data[i:]) + i = encodeVarintGenerated(data, i, uint64(m.MaxUnavailable.Size())) + n55, err := m.MaxUnavailable.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n51 + i += n55 } - if m.NamespaceSelector != nil { + if m.MaxSurge != nil { data[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.NamespaceSelector.Size())) - n52, err := m.NamespaceSelector.MarshalTo(data[i:]) + i = encodeVarintGenerated(data, i, uint64(m.MaxSurge.Size())) + n56, err := m.MaxSurge.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n52 + i += n56 } return i, nil } -func (m *NetworkPolicyPort) Marshal() (data []byte, err error) { +func (m *RunAsUserStrategyOptions) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -2168,31 +2311,31 @@ func (m *NetworkPolicyPort) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *NetworkPolicyPort) MarshalTo(data []byte) (int, error) { +func (m *RunAsUserStrategyOptions) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l - if m.Protocol != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(*m.Protocol))) - i += copy(data[i:], *m.Protocol) - } - if m.Port != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) - n53, err := m.Port.MarshalTo(data[i:]) - if err != nil { - return 0, err + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) + i += copy(data[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n } - i += n53 } return i, nil } -func (m *NetworkPolicySpec) Marshal() (data []byte, err error) { +func (m *SELinuxStrategyOptions) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -2202,35 +2345,29 @@ func (m *NetworkPolicySpec) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *NetworkPolicySpec) MarshalTo(data []byte) (int, error) { +func (m *SELinuxStrategyOptions) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.PodSelector.Size())) - n54, err := m.PodSelector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n54 - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n + i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) + i += copy(data[i:], m.Rule) + if m.SELinuxOptions != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) + n57, err := m.SELinuxOptions.MarshalTo(data[i:]) + if err != nil { + return 0, err } + i += n57 } return i, nil } -func (m *PodSecurityPolicy) Marshal() (data []byte, err error) { +func (m *Scale) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -2240,7 +2377,7 @@ func (m *PodSecurityPolicy) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *PodSecurityPolicy) MarshalTo(data []byte) (int, error) { +func (m *Scale) MarshalTo(data []byte) (int, error) { var i int _ = i var l int @@ -2248,23 +2385,31 @@ func (m *PodSecurityPolicy) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n55, err := m.ObjectMeta.MarshalTo(data[i:]) + n58, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n55 + i += n58 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n56, err := m.Spec.MarshalTo(data[i:]) + n59, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n59 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n60, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n56 + i += n60 return i, nil } -func (m *PodSecurityPolicyList) Marshal() (data []byte, err error) { +func (m *ScaleSpec) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -2274,21 +2419,80 @@ func (m *PodSecurityPolicyList) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *PodSecurityPolicyList) MarshalTo(data []byte) (int, error) { +func (m *ScaleSpec) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + data[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n57, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + return i, nil +} + +func (m *ScaleStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) if err != nil { - return 0, err + return nil, err } - i += n57 - if len(m.Items) > 0 { - for _, msg := range m.Items { + return data[:n], nil +} + +func (m *ScaleStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k := range m.Selector { + data[i] = 0x12 + i++ + v := m.Selector[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.TargetSelector))) + i += copy(data[i:], m.TargetSelector) + return i, nil +} + +func (m *SupplementalGroupsStrategyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SupplementalGroupsStrategyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) + i += copy(data[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(msg.Size())) @@ -2302,7 +2506,7 @@ func (m *PodSecurityPolicyList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *PodSecurityPolicySpec) Marshal() (data []byte, err error) { +func (m *ThirdPartyResource) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -2312,159 +2516,39 @@ func (m *PodSecurityPolicySpec) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *PodSecurityPolicySpec) MarshalTo(data []byte) (int, error) { +func (m *ThirdPartyResource) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 + data[i] = 0xa i++ - if m.Privileged { - data[i] = 1 - } else { - data[i] = 0 + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n61, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err } + i += n61 + data[i] = 0x12 i++ - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - data[i] = 0x12 + i = encodeVarintGenerated(data, i, uint64(len(m.Description))) + i += copy(data[i:], m.Description) + if len(m.Versions) > 0 { + for _, msg := range m.Versions { + data[i] = 0x1a i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) + i += n } } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - data[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - data[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - data[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - data[i] = 0x30 - i++ - if m.HostNetwork { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - if len(m.HostPorts) > 0 { - for _, msg := range m.HostPorts { - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x40 - i++ - if m.HostPID { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x48 - i++ - if m.HostIPC { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x52 - i++ - i = encodeVarintGenerated(data, i, uint64(m.SELinux.Size())) - n58, err := m.SELinux.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n58 - data[i] = 0x5a - i++ - i = encodeVarintGenerated(data, i, uint64(m.RunAsUser.Size())) - n59, err := m.RunAsUser.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n59 - data[i] = 0x62 - i++ - i = encodeVarintGenerated(data, i, uint64(m.SupplementalGroups.Size())) - n60, err := m.SupplementalGroups.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n60 - data[i] = 0x6a - i++ - i = encodeVarintGenerated(data, i, uint64(m.FSGroup.Size())) - n61, err := m.FSGroup.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n61 - data[i] = 0x70 - i++ - if m.ReadOnlyRootFilesystem { - data[i] = 1 - } else { - data[i] = 0 - } - i++ return i, nil } -func (m *ReplicaSet) Marshal() (data []byte, err error) { +func (m *ThirdPartyResourceData) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -2474,7 +2558,7 @@ func (m *ReplicaSet) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *ReplicaSet) MarshalTo(data []byte) (int, error) { +func (m *ThirdPartyResourceData) MarshalTo(data []byte) (int, error) { var i int _ = i var l int @@ -2487,26 +2571,16 @@ func (m *ReplicaSet) MarshalTo(data []byte) (int, error) { return 0, err } i += n62 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n63, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n63 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n64, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err + if m.Data != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Data))) + i += copy(data[i:], m.Data) } - i += n64 return i, nil } -func (m *ReplicaSetCondition) Marshal() (data []byte, err error) { +func (m *ThirdPartyResourceDataList) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -2516,39 +2590,35 @@ func (m *ReplicaSetCondition) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *ReplicaSetCondition) MarshalTo(data []byte) (int, error) { +func (m *ThirdPartyResourceDataList) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n65, err := m.LastTransitionTime.MarshalTo(data[i:]) + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n63, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n65 - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) + i += n63 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } -func (m *ReplicaSetList) Marshal() (data []byte, err error) { +func (m *ThirdPartyResourceList) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -2558,7 +2628,7 @@ func (m *ReplicaSetList) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *ReplicaSetList) MarshalTo(data []byte) (int, error) { +func (m *ThirdPartyResourceList) MarshalTo(data []byte) (int, error) { var i int _ = i var l int @@ -2566,11 +2636,11 @@ func (m *ReplicaSetList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n66, err := m.ListMeta.MarshalTo(data[i:]) + n64, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n66 + i += n64 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -2586,618 +2656,397 @@ func (m *ReplicaSetList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *ReplicaSetSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - return data[:n], nil + data[offset] = uint8(v) + return offset + 1 } - -func (m *ReplicaSetSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i +func (m *APIVersion) Size() (n int) { var l int _ = l - if m.Replicas != nil { - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n67, err := m.Selector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n67 - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n68, err := m.Template.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n68 - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) - return i, nil + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ReplicaSetStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil +func (m *CustomMetricCurrentStatus) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ReplicaSetStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i +func (m *CustomMetricCurrentStatusList) Size() (n int) { var l int _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.FullyLabeledReplicas)) - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ReadyReplicas)) - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - return i, nil -} - -func (m *ReplicationControllerDummy) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil + return n } -func (m *ReplicationControllerDummy) MarshalTo(data []byte) (int, error) { - var i int - _ = i +func (m *CustomMetricTarget) Size() (n int) { var l int _ = l - return i, nil + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *RollbackConfig) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err +func (m *CustomMetricTargetList) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return data[:n], nil + return n } -func (m *RollbackConfig) MarshalTo(data []byte) (int, error) { - var i int - _ = i +func (m *DaemonSet) Size() (n int) { var l int _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Revision)) - return i, nil + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *RollingUpdateDeployment) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err +func (m *DaemonSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return data[:n], nil + return n } -func (m *RollingUpdateDeployment) MarshalTo(data []byte) (int, error) { - var i int - _ = i +func (m *DaemonSetSpec) Size() (n int) { var l int _ = l - if m.MaxUnavailable != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.MaxUnavailable.Size())) - n69, err := m.MaxUnavailable.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n69 - } - if m.MaxSurge != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.MaxSurge.Size())) - n70, err := m.MaxSurge.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n70 + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return i, nil + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + n += 1 + sovGenerated(uint64(m.TemplateGeneration)) + return n } -func (m *RunAsUserStrategyOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil +func (m *DaemonSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) + n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberReady)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberAvailable)) + n += 1 + sovGenerated(uint64(m.NumberUnavailable)) + return n } -func (m *RunAsUserStrategyOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i +func (m *DaemonSetUpdateStrategy) Size() (n int) { var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) - i += copy(data[i:], m.Rule) - if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return i, nil + return n } -func (m *SELinuxStrategyOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil +func (m *Deployment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *SELinuxStrategyOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i +func (m *DeploymentCondition) Size() (n int) { var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) - i += copy(data[i:], m.Rule) - if m.SELinuxOptions != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) - n71, err := m.SELinuxOptions.MarshalTo(data[i:]) - if err != nil { - return 0, err + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i += n71 } - return i, nil + return n } -func (m *Scale) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err +func (m *DeploymentRollback) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UpdatedAnnotations) > 0 { + for k, v := range m.UpdatedAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - return data[:n], nil + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *Scale) MarshalTo(data []byte) (int, error) { - var i int - _ = i +func (m *DeploymentSpec) Size() (n int) { var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n72, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n72 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n73, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) } - i += n73 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n74, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i += n74 - return i, nil -} - -func (m *ScaleSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) } - return data[:n], nil -} - -func (m *ScaleSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - return i, nil -} - -func (m *ScaleStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err + n += 2 + if m.RollbackTo != nil { + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return data[:n], nil + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n } -func (m *ScaleStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i +func (m *DeploymentStatus) Size() (n int) { var l int _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - if len(m.Selector) > 0 { - for k := range m.Selector { - data[i] = 0x12 - i++ - v := m.Selector[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.TargetSelector))) - i += copy(data[i:], m.TargetSelector) - return i, nil -} - -func (m *SubresourceReference) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + return n } -func (m *SubresourceReference) MarshalTo(data []byte) (int, error) { - var i int - _ = i +func (m *DeploymentStrategy) Size() (n int) { var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Subresource))) - i += copy(data[i:], m.Subresource) - return i, nil -} - -func (m *SupplementalGroupsStrategyOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return data[:n], nil + return n } -func (m *SupplementalGroupsStrategyOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i +func (m *FSGroupStrategyOptions) Size() (n int) { var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) - i += copy(data[i:], m.Rule) + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - return i, nil + return n } -func (m *ThirdPartyResource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil +func (m *HTTPIngressPath) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ThirdPartyResource) MarshalTo(data []byte) (int, error) { - var i int - _ = i +func (m *HTTPIngressRuleValue) Size() (n int) { var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n75, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n75 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Description))) - i += copy(data[i:], m.Description) - if len(m.Versions) > 0 { - for _, msg := range m.Versions { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - return i, nil -} - -func (m *ThirdPartyResourceData) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil + return n } -func (m *ThirdPartyResourceData) MarshalTo(data []byte) (int, error) { - var i int - _ = i +func (m *HostPortRange) Size() (n int) { var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n76, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n76 - if m.Data != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Data))) - i += copy(data[i:], m.Data) - } - return i, nil + n += 1 + sovGenerated(uint64(m.Min)) + n += 1 + sovGenerated(uint64(m.Max)) + return n } -func (m *ThirdPartyResourceDataList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil +func (m *IDRange) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Min)) + n += 1 + sovGenerated(uint64(m.Max)) + return n } -func (m *ThirdPartyResourceDataList) MarshalTo(data []byte) (int, error) { - var i int - _ = i +func (m *Ingress) Size() (n int) { var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n77, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n77 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ThirdPartyResourceList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ThirdPartyResourceList) MarshalTo(data []byte) (int, error) { - var i int - _ = i +func (m *IngressBackend) Size() (n int) { var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n78, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n78 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ServicePort.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *APIVersion) Size() (n int) { +func (m *IngressList) Size() (n int) { var l int _ = l - l = len(m.Name) + l = m.ListMeta.Size() n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *CPUTargetUtilization) Size() (n int) { +func (m *IngressRule) Size() (n int) { var l int _ = l - n += 1 + sovGenerated(uint64(m.TargetPercentage)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = m.IngressRuleValue.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *CustomMetricCurrentStatus) Size() (n int) { +func (m *IngressRuleValue) Size() (n int) { var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.CurrentValue.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.HTTP != nil { + l = m.HTTP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *CustomMetricCurrentStatusList) Size() (n int) { +func (m *IngressSpec) Size() (n int) { var l int _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { + if m.Backend != nil { + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.TLS) > 0 { + for _, e := range m.TLS { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -3205,41 +3054,57 @@ func (m *CustomMetricCurrentStatusList) Size() (n int) { return n } -func (m *CustomMetricTarget) Size() (n int) { +func (m *IngressStatus) Size() (n int) { var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.TargetValue.Size() + l = m.LoadBalancer.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *CustomMetricTargetList) Size() (n int) { +func (m *IngressTLS) Size() (n int) { var l int _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) n += 1 + l + sovGenerated(uint64(l)) } } + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *DaemonSet) Size() (n int) { +func (m *NetworkPolicy) Size() (n int) { var l int _ = l l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *DaemonSetList) Size() (n int) { +func (m *NetworkPolicyIngressRule) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.From) > 0 { + for _, e := range m.From { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyList) Size() (n int) { var l int _ = l l = m.ListMeta.Size() @@ -3253,59 +3118,59 @@ func (m *DaemonSetList) Size() (n int) { return n } -func (m *DaemonSetSpec) Size() (n int) { +func (m *NetworkPolicyPeer) Size() (n int) { var l int _ = l - if m.Selector != nil { - l = m.Selector.Size() + if m.PodSelector != nil { + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *DaemonSetStatus) Size() (n int) { +func (m *NetworkPolicyPort) Size() (n int) { var l int _ = l - n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) - n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberReady)) + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *Deployment) Size() (n int) { +func (m *NetworkPolicySpec) Size() (n int) { var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() + l = m.PodSelector.Size() n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *DeploymentCondition) Size() (n int) { +func (m *PodSecurityPolicy) Size() (n int) { var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastUpdateTime.Size() + l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() + l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *DeploymentList) Size() (n int) { +func (m *PodSecurityPolicyList) Size() (n int) { var l int _ = l l = m.ListMeta.Size() @@ -3319,97 +3184,90 @@ func (m *DeploymentList) Size() (n int) { return n } -func (m *DeploymentRollback) Size() (n int) { +func (m *PodSecurityPolicySpec) Size() (n int) { var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.UpdatedAnnotations) > 0 { - for k, v := range m.UpdatedAnnotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + n += 2 + if len(m.DefaultAddCapabilities) > 0 { + for _, s := range m.DefaultAddCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } } - l = m.RollbackTo.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DeploymentSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) + if len(m.RequiredDropCapabilities) > 0 { + for _, s := range m.RequiredDropCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.AllowedCapabilities) > 0 { + for _, s := range m.AllowedCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Strategy.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } n += 2 - if m.RollbackTo != nil { - l = m.RollbackTo.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ProgressDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) - } - return n -} - -func (m *DeploymentStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { + if len(m.HostPorts) > 0 { + for _, e := range m.HostPorts { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } + n += 2 + n += 2 + l = m.SELinux.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.RunAsUser.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.SupplementalGroups.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FSGroup.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } -func (m *DeploymentStrategy) Size() (n int) { +func (m *ReplicaSet) Size() (n int) { var l int _ = l - l = len(m.Type) + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } -func (m *ExportOptions) Size() (n int) { +func (m *ReplicaSetCondition) Size() (n int) { var l int _ = l - n += 2 - n += 2 + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *FSGroupStrategyOptions) Size() (n int) { +func (m *ReplicaSetList) Size() (n int) { var l int _ = l - l = len(m.Rule) + l = m.ListMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { + if len(m.Items) > 0 { + for _, e := range m.Items { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -3417,21 +3275,32 @@ func (m *FSGroupStrategyOptions) Size() (n int) { return n } -func (m *HTTPIngressPath) Size() (n int) { +func (m *ReplicaSetSpec) Size() (n int) { var l int _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Backend.Size() + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) return n } -func (m *HTTPIngressRuleValue) Size() (n int) { +func (m *ReplicaSetStatus) Size() (n int) { var l int _ = l - if len(m.Paths) > 0 { - for _, e := range m.Paths { + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -3439,83 +3308,70 @@ func (m *HTTPIngressRuleValue) Size() (n int) { return n } -func (m *HorizontalPodAutoscaler) Size() (n int) { +func (m *ReplicationControllerDummy) Size() (n int) { var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *HorizontalPodAutoscalerList) Size() (n int) { +func (m *RollbackConfig) Size() (n int) { var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } + n += 1 + sovGenerated(uint64(m.Revision)) return n } -func (m *HorizontalPodAutoscalerSpec) Size() (n int) { +func (m *RollingUpdateDaemonSet) Size() (n int) { var l int _ = l - l = m.ScaleRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.MinReplicas != nil { - n += 1 + sovGenerated(uint64(*m.MinReplicas)) - } - n += 1 + sovGenerated(uint64(m.MaxReplicas)) - if m.CPUUtilization != nil { - l = m.CPUUtilization.Size() + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *HorizontalPodAutoscalerStatus) Size() (n int) { +func (m *RollingUpdateDeployment) Size() (n int) { var l int _ = l - if m.ObservedGeneration != nil { - n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - l = m.LastScaleTime.Size() + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() n += 1 + l + sovGenerated(uint64(l)) } - n += 1 + sovGenerated(uint64(m.CurrentReplicas)) - n += 1 + sovGenerated(uint64(m.DesiredReplicas)) - if m.CurrentCPUUtilizationPercentage != nil { - n += 1 + sovGenerated(uint64(*m.CurrentCPUUtilizationPercentage)) + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *HostPortRange) Size() (n int) { +func (m *RunAsUserStrategyOptions) Size() (n int) { var l int _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *IDRange) Size() (n int) { +func (m *SELinuxStrategyOptions) Size() (n int) { var l int _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *Ingress) Size() (n int) { +func (m *Scale) Size() (n int) { var l int _ = l l = m.ObjectMeta.Size() @@ -3527,23 +3383,37 @@ func (m *Ingress) Size() (n int) { return n } -func (m *IngressBackend) Size() (n int) { +func (m *ScaleSpec) Size() (n int) { var l int _ = l - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ServicePort.Size() + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func (m *ScaleStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.TargetSelector) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *IngressList) Size() (n int) { +func (m *SupplementalGroupsStrategyOptions) Size() (n int) { var l int _ = l - l = m.ListMeta.Size() + l = len(m.Rule) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -3551,41 +3421,41 @@ func (m *IngressList) Size() (n int) { return n } -func (m *IngressRule) Size() (n int) { +func (m *ThirdPartyResource) Size() (n int) { var l int _ = l - l = len(m.Host) + l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.IngressRuleValue.Size() + l = len(m.Description) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *IngressRuleValue) Size() (n int) { +func (m *ThirdPartyResourceData) Size() (n int) { var l int _ = l - if m.HTTP != nil { - l = m.HTTP.Size() + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + l = len(m.Data) n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *IngressSpec) Size() (n int) { +func (m *ThirdPartyResourceDataList) Size() (n int) { var l int _ = l - if m.Backend != nil { - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.TLS) > 0 { - for _, e := range m.TLS { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Rules) > 0 { - for _, e := range m.Rules { + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -3593,2529 +3463,744 @@ func (m *IngressSpec) Size() (n int) { return n } -func (m *IngressStatus) Size() (n int) { +func (m *ThirdPartyResourceList) Size() (n int) { var l int _ = l - l = m.LoadBalancer.Size() + l = m.ListMeta.Size() n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *IngressTLS) Size() (n int) { - var l int - _ = l - if len(m.Hosts) > 0 { - for _, s := range m.Hosts { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break } } - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) return n } - -func (m *Job) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *JobCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } - -func (m *JobList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *APIVersion) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&APIVersion{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s } - -func (m *JobSpec) Size() (n int) { - var l int - _ = l - if m.Parallelism != nil { - n += 1 + sovGenerated(uint64(*m.Parallelism)) +func (this *CustomMetricCurrentStatus) String() string { + if this == nil { + return "nil" } - if m.Completions != nil { - n += 1 + sovGenerated(uint64(*m.Completions)) + s := strings.Join([]string{`&CustomMetricCurrentStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomMetricCurrentStatusList) String() string { + if this == nil { + return "nil" } - if m.ActiveDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + s := strings.Join([]string{`&CustomMetricCurrentStatusList{`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricCurrentStatus", "CustomMetricCurrentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomMetricTarget) String() string { + if this == nil { + return "nil" } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&CustomMetricTarget{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomMetricTargetList) String() string { + if this == nil { + return "nil" } - if m.AutoSelector != nil { - n += 2 + s := strings.Join([]string{`&CustomMetricTargetList{`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricTarget", "CustomMetricTarget", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSet) String() string { + if this == nil { + return "nil" } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&DaemonSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *JobStatus) Size() (n int) { - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *DaemonSetList) String() string { + if this == nil { + return "nil" } - if m.StartTime != nil { - l = m.StartTime.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&DaemonSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetSpec) String() string { + if this == nil { + return "nil" } - if m.CompletionTime != nil { - l = m.CompletionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&DaemonSetSpec{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetStatus) String() string { + if this == nil { + return "nil" } - n += 1 + sovGenerated(uint64(m.Active)) - n += 1 + sovGenerated(uint64(m.Succeeded)) - n += 1 + sovGenerated(uint64(m.Failed)) - return n + s := strings.Join([]string{`&DaemonSetStatus{`, + `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, + `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, + `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, + `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, + `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, + `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicy) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (this *DaemonSetUpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyIngressRule) Size() (n int) { - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *Deployment) String() string { + if this == nil { + return "nil" } - if len(m.From) > 0 { - for _, e := range m.From { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *DeploymentList) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyPeer) Size() (n int) { - var l int - _ = l - if m.PodSelector != nil { - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (this *DeploymentRollback) String() string { + if this == nil { + return "nil" } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) + for k := range this.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) } - return n + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + mapStringForUpdatedAnnotations := "map[string]string{" + for _, k := range keysForUpdatedAnnotations { + mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) + } + mapStringForUpdatedAnnotations += "}" + s := strings.Join([]string{`&DeploymentRollback{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, + `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyPort) Size() (n int) { - var l int - _ = l - if m.Protocol != nil { - l = len(*m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" } - if m.Port != nil { - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `RollbackTo:` + strings.Replace(fmt.Sprintf("%v", this.RollbackTo), "RollbackConfig", "RollbackConfig", 1) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicySpec) Size() (n int) { - var l int - _ = l - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicy) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (this *FSGroupStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FSGroupStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicyList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *HTTPIngressPath) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&HTTPIngressPath{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicySpec) Size() (n int) { - var l int - _ = l - n += 2 - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *HTTPIngressRuleValue) String() string { + if this == nil { + return "nil" } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&HTTPIngressRuleValue{`, + `Paths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Paths), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HostPortRange) String() string { + if this == nil { + return "nil" } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&HostPortRange{`, + `Min:` + fmt.Sprintf("%v", this.Min) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `}`, + }, "") + return s +} +func (this *IDRange) String() string { + if this == nil { + return "nil" } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&IDRange{`, + `Min:` + fmt.Sprintf("%v", this.Min) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `}`, + }, "") + return s +} +func (this *Ingress) String() string { + if this == nil { + return "nil" } - n += 2 - if len(m.HostPorts) > 0 { - for _, e := range m.HostPorts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&Ingress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressBackend) String() string { + if this == nil { + return "nil" } - n += 2 - n += 2 - l = m.SELinux.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.RunAsUser.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.SupplementalGroups.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FSGroup.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n + s := strings.Join([]string{`&IngressBackend{`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(this.ServicePort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSet) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (this *IngressList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Ingress", "Ingress", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (this *IngressRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressRule{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *IngressRuleValue) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&IngressRuleValue{`, + `HTTP:` + strings.Replace(fmt.Sprintf("%v", this.HTTP), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) +func (this *IngressSpec) String() string { + if this == nil { + return "nil" } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - return n -} - -func (m *ReplicaSetStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ReplicationControllerDummy) Size() (n int) { - var l int - _ = l - return n -} - -func (m *RollbackConfig) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Revision)) - return n -} - -func (m *RollingUpdateDeployment) Size() (n int) { - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.MaxSurge != nil { - l = m.MaxSurge.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RunAsUserStrategyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *SELinuxStrategyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Scale) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ScaleSpec) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - return n -} - -func (m *ScaleStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.TargetSelector) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SubresourceReference) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Subresource) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SupplementalGroupsStrategyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ThirdPartyResource) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Description) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Versions) > 0 { - for _, e := range m.Versions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ThirdPartyResourceData) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ThirdPartyResourceDataList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ThirdPartyResourceList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + s := strings.Join([]string{`&IngressSpec{`, + `Backend:` + strings.Replace(fmt.Sprintf("%v", this.Backend), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TLS), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "IngressRule", "IngressRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } -func (this *APIVersion) String() string { +func (this *IngressStatus) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&APIVersion{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + s := strings.Join([]string{`&IngressStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "k8s_io_kubernetes_pkg_api_v1.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *CPUTargetUtilization) String() string { +func (this *IngressTLS) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&CPUTargetUtilization{`, - `TargetPercentage:` + fmt.Sprintf("%v", this.TargetPercentage) + `,`, + s := strings.Join([]string{`&IngressTLS{`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, `}`, }, "") return s } -func (this *CustomMetricCurrentStatus) String() string { +func (this *NetworkPolicy) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&CustomMetricCurrentStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_kubernetes_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&NetworkPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *CustomMetricCurrentStatusList) String() string { +func (this *NetworkPolicyIngressRule) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&CustomMetricCurrentStatusList{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricCurrentStatus", "CustomMetricCurrentStatus", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&NetworkPolicyIngressRule{`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *CustomMetricTarget) String() string { +func (this *NetworkPolicyList) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&CustomMetricTarget{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_kubernetes_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&NetworkPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *CustomMetricTargetList) String() string { +func (this *NetworkPolicyPeer) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&CustomMetricTargetList{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricTarget", "CustomMetricTarget", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&NetworkPolicyPeer{`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `}`, }, "") return s } -func (this *DaemonSet) String() string { +func (this *NetworkPolicyPort) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&NetworkPolicyPort{`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, `}`, }, "") return s } -func (this *DaemonSetList) String() string { +func (this *NetworkPolicySpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&NetworkPolicySpec{`, + `PodSelector:` + strings.Replace(strings.Replace(this.PodSelector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *DaemonSetSpec) String() string { +func (this *PodSecurityPolicy) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_kubernetes_pkg_api_unversioned.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&PodSecurityPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *DaemonSetStatus) String() string { +func (this *PodSecurityPolicyList) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&DaemonSetStatus{`, - `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, - `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, - `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, - `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, + s := strings.Join([]string{`&PodSecurityPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *Deployment) String() string { +func (this *PodSecurityPolicySpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&PodSecurityPolicySpec{`, + `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, + `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, + `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, + `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, + `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, + `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, + `HostPorts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostPorts), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + `,`, + `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, + `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, + `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, + `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, + `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, + `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, + `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, `}`, }, "") return s } -func (this *DeploymentCondition) String() string { +func (this *ReplicaSet) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&DeploymentCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&ReplicaSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *DeploymentList) String() string { +func (this *ReplicaSetCondition) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&ReplicaSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, }, "") return s } -func (this *DeploymentRollback) String() string { +func (this *ReplicaSetList) String() string { if this == nil { return "nil" } - keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) - for k := range this.UpdatedAnnotations { - keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - mapStringForUpdatedAnnotations := "map[string]string{" - for _, k := range keysForUpdatedAnnotations { - mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) - } - mapStringForUpdatedAnnotations += "}" - s := strings.Join([]string{`&DeploymentRollback{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, - `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&ReplicaSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *DeploymentSpec) String() string { +func (this *ReplicaSetSpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&DeploymentSpec{`, + s := strings.Join([]string{`&ReplicaSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_kubernetes_pkg_api_unversioned.LabelSelector", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `RollbackTo:` + strings.Replace(fmt.Sprintf("%v", this.RollbackTo), "RollbackConfig", "RollbackConfig", 1) + `,`, - `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, `}`, }, "") return s } -func (this *DeploymentStatus) String() string { +func (this *ReplicaSetStatus) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&DeploymentStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + s := strings.Join([]string{`&ReplicaSetStatus{`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ExportOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExportOptions{`, - `Export:` + fmt.Sprintf("%v", this.Export) + `,`, - `Exact:` + fmt.Sprintf("%v", this.Exact) + `,`, - `}`, - }, "") - return s -} -func (this *FSGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&FSGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPIngressPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPIngressPath{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPIngressRuleValue) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPIngressRuleValue{`, - `Paths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Paths), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscaler) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *HorizontalPodAutoscalerList) String() string { +func (this *ReplicationControllerDummy) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&ReplicationControllerDummy{`, `}`, }, "") return s } -func (this *HorizontalPodAutoscalerSpec) String() string { +func (this *RollbackConfig) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, - `ScaleRef:` + strings.Replace(strings.Replace(this.ScaleRef.String(), "SubresourceReference", "SubresourceReference", 1), `&`, ``, 1) + `,`, - `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, - `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, - `CPUUtilization:` + strings.Replace(fmt.Sprintf("%v", this.CPUUtilization), "CPUTargetUtilization", "CPUTargetUtilization", 1) + `,`, + s := strings.Join([]string{`&RollbackConfig{`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `}`, }, "") return s } -func (this *HorizontalPodAutoscalerStatus) String() string { +func (this *RollingUpdateDaemonSet) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, - `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1) + `,`, - `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, - `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, - `CurrentCPUUtilizationPercentage:` + valueToStringGenerated(this.CurrentCPUUtilizationPercentage) + `,`, + s := strings.Join([]string{`&RollingUpdateDaemonSet{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, `}`, }, "") return s } -func (this *HostPortRange) String() string { +func (this *RollingUpdateDeployment) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&HostPortRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, `}`, }, "") return s } -func (this *IDRange) String() string { +func (this *RunAsUserStrategyOptions) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&IDRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + s := strings.Join([]string{`&RunAsUserStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *Ingress) String() string { +func (this *SELinuxStrategyOptions) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Ingress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&SELinuxStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "k8s_io_kubernetes_pkg_api_v1.SELinuxOptions", 1) + `,`, `}`, }, "") return s } -func (this *IngressBackend) String() string { +func (this *Scale) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&IngressBackend{`, - `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `ServicePort:` + strings.Replace(strings.Replace(this.ServicePort.String(), "IntOrString", "k8s_io_kubernetes_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *IngressList) String() string { +func (this *ScaleSpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&IngressList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Ingress", "Ingress", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `}`, }, "") return s } -func (this *IngressRule) String() string { +func (this *ScaleStatus) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&IngressRule{`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressRuleValue) String() string { - if this == nil { - return "nil" + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) } - s := strings.Join([]string{`&IngressRuleValue{`, - `HTTP:` + strings.Replace(fmt.Sprintf("%v", this.HTTP), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressSpec) String() string { - if this == nil { - return "nil" + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) } - s := strings.Join([]string{`&IngressSpec{`, - `Backend:` + strings.Replace(fmt.Sprintf("%v", this.Backend), "IngressBackend", "IngressBackend", 1) + `,`, - `TLS:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TLS), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "IngressRule", "IngressRule", 1), `&`, ``, 1) + `,`, + mapStringForSelector += "}" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, `}`, }, "") return s } -func (this *IngressStatus) String() string { +func (this *SupplementalGroupsStrategyOptions) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "k8s_io_kubernetes_pkg_api_v1.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *IngressTLS) String() string { +func (this *ThirdPartyResource) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&IngressTLS{`, - `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + s := strings.Join([]string{`&ThirdPartyResource{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "APIVersion", "APIVersion", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *Job) String() string { +func (this *ThirdPartyResourceData) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Job{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "JobSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "JobStatus", "JobStatus", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&ThirdPartyResourceData{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + valueToStringGenerated(this.Data) + `,`, `}`, }, "") return s -} -func (this *JobCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *JobList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Job", "Job", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *JobSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobSpec{`, - `Parallelism:` + valueToStringGenerated(this.Parallelism) + `,`, - `Completions:` + valueToStringGenerated(this.Completions) + `,`, - `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_kubernetes_pkg_api_unversioned.LabelSelector", 1) + `,`, - `AutoSelector:` + valueToStringGenerated(this.AutoSelector) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *JobStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobStatus{`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "JobCondition", "JobCondition", 1), `&`, ``, 1) + `,`, - `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1) + `,`, - `CompletionTime:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTime), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1) + `,`, - `Active:` + fmt.Sprintf("%v", this.Active) + `,`, - `Succeeded:` + fmt.Sprintf("%v", this.Succeeded) + `,`, - `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyIngressRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyIngressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyPeer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyPeer{`, - `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "k8s_io_kubernetes_pkg_api_unversioned.LabelSelector", 1) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_kubernetes_pkg_api_unversioned.LabelSelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyPort) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyPort{`, - `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, - `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "k8s_io_kubernetes_pkg_util_intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicySpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicySpec{`, - `PodSelector:` + strings.Replace(strings.Replace(this.PodSelector.String(), "LabelSelector", "k8s_io_kubernetes_pkg_api_unversioned.LabelSelector", 1), `&`, ``, 1) + `,`, - `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicyList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicySpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSecurityPolicySpec{`, - `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, - `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, - `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, - `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, - `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostPorts), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, - `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, - `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, - `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, - `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_kubernetes_pkg_api_unversioned.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationControllerDummy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicationControllerDummy{`, - `}`, - }, "") - return s -} -func (this *RollbackConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollbackConfig{`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDeployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_kubernetes_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_kubernetes_pkg_util_intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RunAsUserStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RunAsUserStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SELinuxStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SELinuxStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "k8s_io_kubernetes_pkg_api_v1.SELinuxOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Scale) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScaleSpec{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleStatus) String() string { - if this == nil { - return "nil" - } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) - } - mapStringForSelector += "}" - s := strings.Join([]string{`&ScaleStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `Selector:` + mapStringForSelector + `,`, - `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, - `}`, - }, "") - return s -} -func (this *SubresourceReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SubresourceReference{`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, - `}`, - }, "") - return s -} -func (this *SupplementalGroupsStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ThirdPartyResource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResource{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "APIVersion", "APIVersion", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ThirdPartyResourceData) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResourceData{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + valueToStringGenerated(this.Data) + `,`, - `}`, - }, "") - return s -} -func (this *ThirdPartyResourceDataList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResourceDataList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ThirdPartyResourceData", "ThirdPartyResourceData", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ThirdPartyResourceList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResourceList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ThirdPartyResource", "ThirdPartyResource", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *APIVersion) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIVersion: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIVersion: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CPUTargetUtilization) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CPUTargetUtilization: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CPUTargetUtilization: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetPercentage", wireType) - } - m.TargetPercentage = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.TargetPercentage |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricCurrentStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricCurrentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricCurrentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CurrentValue.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricCurrentStatusList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricCurrentStatusList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricCurrentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CustomMetricCurrentStatus{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricTarget) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricTarget: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricTarget: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TargetValue.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricTargetList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricTargetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricTargetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CustomMetricTarget{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSet) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, DaemonSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} - } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) - } - m.CurrentNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) - } - m.NumberMisscheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.NumberMisscheduled |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) - } - m.DesiredNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) - } - m.NumberReady = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.NumberReady |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF +} +func (this *ThirdPartyResourceDataList) String() string { + if this == nil { + return "nil" } - return nil + s := strings.Join([]string{`&ThirdPartyResourceDataList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ThirdPartyResourceData", "ThirdPartyResourceData", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } -func (m *Deployment) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Deployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } +func (this *ThirdPartyResourceList) String() string { + if this == nil { + return "nil" } - - if iNdEx > l { - return io.ErrUnexpectedEOF + s := strings.Join([]string{`&ThirdPartyResourceList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ThirdPartyResource", "ThirdPartyResource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" } - return nil + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) } -func (m *DeploymentCondition) Unmarshal(data []byte) error { +func (m *APIVersion) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -6138,102 +4223,15 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + return fmt.Errorf("proto: APIVersion: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: APIVersion: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DeploymentConditionType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6258,13 +4256,63 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + m.Name = string(data[iNdEx:postIndex]) iNdEx = postIndex - case 6: + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomMetricCurrentStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomMetricCurrentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomMetricCurrentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6274,25 +4322,24 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastUpdateTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(data[iNdEx:postIndex]) iNdEx = postIndex - case 7: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6316,7 +4363,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.CurrentValue.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6341,7 +4388,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { } return nil } -func (m *DeploymentList) Unmarshal(data []byte) error { +func (m *CustomMetricCurrentStatusList) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -6364,43 +4411,13 @@ func (m *DeploymentList) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") + return fmt.Errorf("proto: CustomMetricCurrentStatusList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CustomMetricCurrentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } @@ -6426,7 +4443,7 @@ func (m *DeploymentList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Deployment{}) + m.Items = append(m.Items, CustomMetricCurrentStatus{}) if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } @@ -6452,7 +4469,7 @@ func (m *DeploymentList) Unmarshal(data []byte) error { } return nil } -func (m *DeploymentRollback) Unmarshal(data []byte) error { +func (m *CustomMetricTarget) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -6475,10 +4492,10 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") + return fmt.Errorf("proto: CustomMetricTarget: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CustomMetricTarget: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -6512,7 +4529,7 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6536,22 +4553,65 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if err := m.TargetValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err } - var stringLenmapkey uint64 + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomMetricTargetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomMetricTargetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomMetricTargetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6561,22 +4621,78 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CustomMetricTarget{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSet) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6586,12 +4702,27 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6601,29 +4732,25 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.UpdatedAnnotations == nil { - m.UpdatedAnnotations = make(map[string]string) + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err } - m.UpdatedAnnotations[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6647,7 +4774,7 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6672,7 +4799,7 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { } return nil } -func (m *DeploymentSpec) Unmarshal(data []byte) error { +func (m *DaemonSetList) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -6695,17 +4822,17 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6715,15 +4842,25 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.Replicas = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6747,16 +4884,64 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} - } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, DaemonSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6780,13 +4965,16 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6810,15 +4998,15 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Strategy.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) } - m.MinReadySeconds = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6828,56 +5016,27 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } + if msglen < 0 { + return ErrInvalidLengthGenerated } - m.RevisionHistoryLimit = &v - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + if err := m.UpdateStrategy.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err } - m.Paused = bool(v != 0) - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) } - var msglen int + m.MinReadySeconds = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6887,30 +5046,16 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + m.MinReadySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollbackTo == nil { - m.RollbackTo = &RollbackConfig{} - } - if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TemplateGeneration", wireType) } - var v int32 + m.TemplateGeneration = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6920,12 +5065,11 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + m.TemplateGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - m.ProgressDeadlineSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -6947,7 +5091,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { } return nil } -func (m *DeploymentStatus) Unmarshal(data []byte) error { +func (m *DaemonSetStatus) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -6970,17 +5114,36 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) + } + m.CurrentNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) } - m.ObservedGeneration = 0 + m.NumberMisscheduled = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6990,16 +5153,16 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.NumberMisscheduled |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - case 2: + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) } - m.Replicas = 0 + m.DesiredNumberScheduled = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7009,16 +5172,16 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - case 3: + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) } - m.UpdatedReplicas = 0 + m.NumberReady = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7028,16 +5191,16 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.NumberReady |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - case 4: + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) } - m.AvailableReplicas = 0 + m.ObservedGeneration = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7047,16 +5210,16 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - case 5: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) } - m.UnavailableReplicas = 0 + m.UpdatedNumberScheduled = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7066,16 +5229,16 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) } - var msglen int + m.NumberAvailable = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7085,23 +5248,30 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + m.NumberAvailable |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) } - m.Conditions = append(m.Conditions, DeploymentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err + m.NumberUnavailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.NumberUnavailable |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -7123,7 +5293,7 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { } return nil } -func (m *DeploymentStrategy) Unmarshal(data []byte) error { +func (m *DaemonSetUpdateStrategy) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -7146,10 +5316,10 @@ func (m *DeploymentStrategy) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -7179,7 +5349,7 @@ func (m *DeploymentStrategy) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = DeploymentStrategyType(data[iNdEx:postIndex]) + m.Type = DaemonSetUpdateStrategyType(data[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -7208,7 +5378,7 @@ func (m *DeploymentStrategy) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDeployment{} + m.RollingUpdate = &RollingUpdateDaemonSet{} } if err := m.RollingUpdate.Unmarshal(data[iNdEx:postIndex]); err != nil { return err @@ -7235,7 +5405,7 @@ func (m *DeploymentStrategy) Unmarshal(data []byte) error { } return nil } -func (m *ExportOptions) Unmarshal(data []byte) error { +func (m *Deployment) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -7258,37 +5428,17 @@ func (m *ExportOptions) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group") + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Export = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7298,67 +5448,27 @@ func (m *ExportOptions) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.Exact = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FSGroupStrategyOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7368,24 +5478,25 @@ func (m *FSGroupStrategyOptions) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = FSGroupStrategyType(data[iNdEx:postIndex]) + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7409,8 +5520,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -7435,7 +5545,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(data []byte) error { } return nil } -func (m *HTTPIngressPath) Unmarshal(data []byte) error { +func (m *DeploymentCondition) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -7458,15 +5568,15 @@ func (m *HTTPIngressPath) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7491,13 +5601,13 @@ func (m *HTTPIngressPath) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(data[iNdEx:postIndex]) + m.Type = DeploymentConditionType(data[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7507,77 +5617,26 @@ func (m *HTTPIngressPath) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Backend.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } + m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPIngressRuleValue) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7587,78 +5646,26 @@ func (m *HTTPIngressRuleValue) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Paths = append(m.Paths, HTTPIngressPath{}) - if err := m.Paths[len(m.Paths)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } + m.Reason = string(data[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7668,25 +5675,24 @@ func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } + m.Message = string(data[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7710,13 +5716,13 @@ func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastUpdateTime.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7740,7 +5746,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -7765,7 +5771,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { } return nil } -func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { +func (m *DeploymentList) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -7788,10 +5794,10 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -7850,7 +5856,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, HorizontalPodAutoscaler{}) + m.Items = append(m.Items, Deployment{}) if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } @@ -7876,7 +5882,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { } return nil } -func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { +func (m *DeploymentRollback) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -7899,15 +5905,44 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7931,15 +5966,22 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ScaleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - var v int32 + var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7949,17 +5991,22 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.MinReplicas = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF } - m.MaxReplicas = 0 + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7969,14 +6016,44 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - case 4: + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.UpdatedAnnotations == nil { + m.UpdatedAnnotations = make(map[string]string) + } + m.UpdatedAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CPUUtilization", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8000,10 +6077,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.CPUUtilization == nil { - m.CPUUtilization = &CPUTargetUtilization{} - } - if err := m.CPUUtilization.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -8028,7 +6102,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { } return nil } -func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { +func (m *DeploymentSpec) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -8041,27 +6115,80 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } - var v int64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8071,15 +6198,25 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.ObservedGeneration = &v - case 2: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8103,18 +6240,15 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} - } - if err := m.LastScaleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Strategy.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) } - m.CurrentReplicas = 0 + m.MinReadySeconds = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8124,16 +6258,16 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - case 4: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) } - m.DesiredReplicas = 0 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8143,16 +6277,17 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - case 5: + m.RevisionHistoryLimit = &v + case 7: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentCPUUtilizationPercentage", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) } - var v int32 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8162,67 +6297,17 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.CurrentCPUUtilizationPercentage = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HostPortRange) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + m.Paused = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) } - m.Min = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8232,16 +6317,30 @@ func (m *HostPortRange) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - m.Min |= (int32(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - case 2: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollbackTo == nil { + m.RollbackTo = &RollbackConfig{} + } + if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) } - m.Max = 0 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8251,11 +6350,12 @@ func (m *HostPortRange) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - m.Max |= (int32(b) & 0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } + m.ProgressDeadlineSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -8277,7 +6377,7 @@ func (m *HostPortRange) Unmarshal(data []byte) error { } return nil } -func (m *IDRange) Unmarshal(data []byte) error { +func (m *DeploymentStatus) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -8300,17 +6400,17 @@ func (m *IDRange) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IDRange: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) } - m.Min = 0 + m.ObservedGeneration = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8320,16 +6420,16 @@ func (m *IDRange) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - m.Min |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) } - m.Max = 0 + m.Replicas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8339,66 +6439,35 @@ func (m *IDRange) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - m.Max |= (int64(b) & 0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Ingress) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Ingress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) } - var msglen int + m.AvailableReplicas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8408,25 +6477,33 @@ func (m *Ingress) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + m.AvailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8450,15 +6527,16 @@ func (m *Ingress) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) } - var msglen int + m.ReadyReplicas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8468,22 +6546,11 @@ func (m *Ingress) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + m.ReadyReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -8505,7 +6572,7 @@ func (m *Ingress) Unmarshal(data []byte) error { } return nil } -func (m *IngressBackend) Unmarshal(data []byte) error { +func (m *DeploymentStrategy) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -8528,15 +6595,15 @@ func (m *IngressBackend) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8561,11 +6628,11 @@ func (m *IngressBackend) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ServiceName = string(data[iNdEx:postIndex]) + m.Type = DeploymentStrategyType(data[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8589,7 +6656,10 @@ func (m *IngressBackend) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ServicePort.Unmarshal(data[iNdEx:postIndex]); err != nil { + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -8614,7 +6684,7 @@ func (m *IngressBackend) Unmarshal(data []byte) error { } return nil } -func (m *IngressList) Unmarshal(data []byte) error { +func (m *FSGroupStrategyOptions) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -8637,17 +6707,17 @@ func (m *IngressList) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8657,25 +6727,24 @@ func (m *IngressList) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } + m.Rule = FSGroupStrategyType(data[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8699,8 +6768,8 @@ func (m *IngressList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Ingress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -8725,7 +6794,7 @@ func (m *IngressList) Unmarshal(data []byte) error { } return nil } -func (m *IngressRule) Unmarshal(data []byte) error { +func (m *HTTPIngressPath) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -8748,15 +6817,15 @@ func (m *IngressRule) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8781,91 +6850,11 @@ func (m *IngressRule) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(data[iNdEx:postIndex]) + m.Path = string(data[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.IngressRuleValue.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressRuleValue) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8889,10 +6878,7 @@ func (m *IngressRuleValue) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.HTTP == nil { - m.HTTP = &HTTPIngressRuleValue{} - } - if err := m.HTTP.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Backend.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -8917,7 +6903,7 @@ func (m *IngressRuleValue) Unmarshal(data []byte) error { } return nil } -func (m *IngressSpec) Unmarshal(data []byte) error { +func (m *HTTPIngressRuleValue) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -8926,93 +6912,29 @@ func (m *IngressSpec) Unmarshal(data []byte) error { for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Backend == nil { - m.Backend = &IngressBackend{} - } - if err := m.Backend.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.TLS = append(m.TLS, IngressTLS{}) - if err := m.TLS[len(m.TLS)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 3: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9036,8 +6958,8 @@ func (m *IngressSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, IngressRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + m.Paths = append(m.Paths, HTTPIngressPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9062,7 +6984,7 @@ func (m *IngressSpec) Unmarshal(data []byte) error { } return nil } -func (m *IngressStatus) Unmarshal(data []byte) error { +func (m *HostPortRange) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -9085,17 +7007,17 @@ func (m *IngressStatus) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) } - var msglen int + m.Min = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9105,22 +7027,30 @@ func (m *IngressStatus) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + m.Min |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) } - if err := m.LoadBalancer.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Max |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -9142,7 +7072,7 @@ func (m *IngressStatus) Unmarshal(data []byte) error { } return nil } -func (m *IngressTLS) Unmarshal(data []byte) error { +func (m *IDRange) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -9165,17 +7095,17 @@ func (m *IngressTLS) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + return fmt.Errorf("proto: IDRange: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) } - var stringLen uint64 + m.Min = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9185,26 +7115,16 @@ func (m *IngressTLS) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.Min |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hosts = append(m.Hosts, string(data[iNdEx:postIndex])) - iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) } - var stringLen uint64 + m.Max = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9214,21 +7134,11 @@ func (m *IngressTLS) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.Max |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretName = string(data[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -9250,7 +7160,7 @@ func (m *IngressTLS) Unmarshal(data []byte) error { } return nil } -func (m *Job) Unmarshal(data []byte) error { +func (m *Ingress) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -9273,10 +7183,10 @@ func (m *Job) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Job: wiretype end group for non-group") + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -9390,7 +7300,7 @@ func (m *Job) Unmarshal(data []byte) error { } return nil } -func (m *JobCondition) Unmarshal(data []byte) error { +func (m *IngressBackend) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -9413,15 +7323,15 @@ func (m *JobCondition) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobCondition: wiretype end group for non-group") + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -9446,13 +7356,13 @@ func (m *JobCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = JobConditionType(data[iNdEx:postIndex]) + m.ServiceName = string(data[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9462,24 +7372,75 @@ func (m *JobCondition) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + if err := m.ServicePort.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9503,13 +7464,13 @@ func (m *JobCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9533,13 +7494,64 @@ func (m *JobCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, Ingress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressRule) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -9564,13 +7576,13 @@ func (m *JobCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(data[iNdEx:postIndex]) + m.Host = string(data[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9580,20 +7592,21 @@ func (m *JobCondition) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + if err := m.IngressRuleValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -9616,7 +7629,7 @@ func (m *JobCondition) Unmarshal(data []byte) error { } return nil } -func (m *JobList) Unmarshal(data []byte) error { +func (m *IngressRuleValue) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -9639,15 +7652,15 @@ func (m *JobList) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobList: wiretype end group for non-group") + return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9671,38 +7684,10 @@ func (m *JobList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF + if m.HTTP == nil { + m.HTTP = &HTTPIngressRuleValue{} } - m.Items = append(m.Items, Job{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.HTTP.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9727,7 +7712,7 @@ func (m *JobList) Unmarshal(data []byte) error { } return nil } -func (m *JobSpec) Unmarshal(data []byte) error { +func (m *IngressSpec) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -9750,17 +7735,17 @@ func (m *JobSpec) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobSpec: wiretype end group for non-group") + return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9770,55 +7755,28 @@ func (m *JobSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.Parallelism = &v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Completions", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF } - m.Completions = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + if m.Backend == nil { + m.Backend = &IngressBackend{} } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if err := m.Backend.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err } - m.ActiveDeadlineSeconds = &v - case 4: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9842,37 +7800,14 @@ func (m *JobSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} - } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + m.TLS = append(m.TLS, IngressTLS{}) + if err := m.TLS[len(m.TLS)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutoSelector", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AutoSelector = &b - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9896,7 +7831,8 @@ func (m *JobSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + m.Rules = append(m.Rules, IngressRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9921,7 +7857,7 @@ func (m *JobSpec) Unmarshal(data []byte) error { } return nil } -func (m *JobStatus) Unmarshal(data []byte) error { +func (m *IngressStatus) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -9944,15 +7880,15 @@ func (m *JobStatus) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobStatus: wiretype end group for non-group") + return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9976,49 +7912,65 @@ func (m *JobStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, JobCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LoadBalancer.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err } - if msglen < 0 { + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.StartTime == nil { - m.StartTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressTLS) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 3: + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10028,68 +7980,26 @@ func (m *JobStatus) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CompletionTime == nil { - m.CompletionTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} - } - if err := m.CompletionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) - } - m.Active = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Active |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) - } - m.Succeeded = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Succeeded |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } + return io.ErrUnexpectedEOF } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + m.Hosts = append(m.Hosts, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) } - m.Failed = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10099,11 +8009,21 @@ func (m *JobStatus) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - m.Failed |= (int32(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(data[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -10514,7 +8434,7 @@ func (m *NetworkPolicyPeer) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.PodSelector == nil { - m.PodSelector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} + m.PodSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.PodSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { return err @@ -10547,7 +8467,7 @@ func (m *NetworkPolicyPeer) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} + m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.NamespaceSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { return err @@ -10660,7 +8580,7 @@ func (m *NetworkPolicyPort) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.Port == nil { - m.Port = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} + m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.Port.Unmarshal(data[iNdEx:postIndex]); err != nil { return err @@ -11959,7 +9879,7 @@ func (m *ReplicaSetSpec) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { return err @@ -12330,6 +10250,89 @@ func (m *RollbackConfig) Unmarshal(data []byte) error { } return nil } +func (m *RollingUpdateDaemonSet) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 @@ -12386,7 +10389,7 @@ func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(data[iNdEx:postIndex]); err != nil { return err @@ -12419,7 +10422,7 @@ func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} + m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(data[iNdEx:postIndex]); err != nil { return err @@ -13086,172 +11089,6 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { } return nil } -func (m *SubresourceReference) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubresourceReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubresourceReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subresource = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *SupplementalGroupsStrategyOptions) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 @@ -13941,250 +11778,216 @@ var ( ) var fileDescriptorGenerated = []byte{ - // 3908 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe4, 0x5b, 0x5d, 0x6c, 0x24, 0xd9, - 0x55, 0x9e, 0xea, 0x76, 0xdb, 0xed, 0xd3, 0x63, 0x8f, 0xe7, 0x8e, 0xc7, 0xd3, 0xeb, 0xdd, 0x75, - 0x4f, 0x6a, 0xc5, 0x66, 0x22, 0x76, 0xdb, 0xcc, 0xb0, 0x13, 0x36, 0xbb, 0x9b, 0x4d, 0xdc, 0xf6, - 0xfc, 0x62, 0xcf, 0x74, 0x6e, 0xdb, 0x93, 0x21, 0xd9, 0x6c, 0xa8, 0xee, 0xba, 0x6e, 0xd7, 0xba, - 0xfe, 0xb6, 0xea, 0x96, 0xd7, 0x9d, 0x08, 0x25, 0x12, 0xe2, 0x35, 0xe4, 0x0d, 0x24, 0xe0, 0x81, - 0x07, 0xc4, 0x13, 0x11, 0x11, 0x48, 0x91, 0x78, 0x02, 0x04, 0x62, 0x91, 0x40, 0x04, 0x24, 0x04, - 0x0f, 0xd0, 0xcb, 0x3a, 0x42, 0x11, 0xef, 0x08, 0xa4, 0xe1, 0x05, 0xdd, 0x5b, 0xb7, 0x7e, 0xbb, - 0xaa, 0xc7, 0xd5, 0xfe, 0x91, 0x10, 0x6f, 0xdd, 0xf7, 0x9e, 0xf3, 0x9d, 0x73, 0x4f, 0x9d, 0x3a, - 0xe7, 0xdc, 0x7b, 0x4f, 0xc1, 0x17, 0xf7, 0xdf, 0x74, 0x9b, 0x9a, 0xb5, 0xba, 0xef, 0x75, 0x89, - 0x63, 0x12, 0x4a, 0xdc, 0x55, 0x7b, 0xbf, 0xbf, 0xaa, 0xd8, 0x9a, 0xbb, 0x4a, 0x0e, 0x29, 0x31, - 0x5d, 0xcd, 0x32, 0xdd, 0xd5, 0x83, 0x9b, 0x5d, 0x42, 0x95, 0x9b, 0xab, 0x7d, 0x62, 0x12, 0x47, - 0xa1, 0x44, 0x6d, 0xda, 0x8e, 0x45, 0x2d, 0xf4, 0xba, 0xcf, 0xde, 0x8c, 0xd8, 0x9b, 0xf6, 0x7e, - 0xbf, 0xc9, 0xd8, 0x9b, 0x11, 0x7b, 0x53, 0xb0, 0x2f, 0xbf, 0xde, 0xd7, 0xe8, 0x9e, 0xd7, 0x6d, - 0xf6, 0x2c, 0x63, 0xb5, 0x6f, 0xf5, 0xad, 0x55, 0x8e, 0xd2, 0xf5, 0x76, 0xf9, 0x3f, 0xfe, 0x87, - 0xff, 0xf2, 0xd1, 0x97, 0x6f, 0xe5, 0x2a, 0xb7, 0xea, 0x10, 0xd7, 0xf2, 0x9c, 0x1e, 0x49, 0x6b, - 0xb4, 0x7c, 0x3b, 0x9f, 0xc7, 0x33, 0x0f, 0x88, 0xc3, 0x14, 0x22, 0xea, 0x08, 0xdb, 0x6b, 0xf9, - 0x6c, 0x07, 0x23, 0xcb, 0x5e, 0x7e, 0x3d, 0x9b, 0xda, 0xf1, 0x4c, 0xaa, 0x19, 0xa3, 0x3a, 0xdd, - 0xcc, 0x26, 0xf7, 0xa8, 0xa6, 0xaf, 0x6a, 0x26, 0x75, 0xa9, 0x93, 0x66, 0x91, 0x9b, 0x00, 0x6b, - 0xed, 0x07, 0x4f, 0x7c, 0x7d, 0xd1, 0x75, 0x98, 0x32, 0x15, 0x83, 0xd4, 0xa5, 0xeb, 0xd2, 0x8d, - 0xd9, 0xd6, 0xc5, 0x8f, 0x87, 0x8d, 0x0b, 0x47, 0xc3, 0xc6, 0xd4, 0x23, 0xc5, 0x20, 0x98, 0xcf, - 0xc8, 0xef, 0xc1, 0xe2, 0x7a, 0x7b, 0x67, 0x5b, 0x71, 0xfa, 0x84, 0xee, 0x50, 0x4d, 0xd7, 0xbe, - 0xa5, 0x50, 0xc6, 0xb9, 0x01, 0x0b, 0x94, 0x0f, 0xb6, 0x89, 0xd3, 0x23, 0x26, 0x55, 0xfa, 0x3e, - 0x4a, 0xa5, 0x55, 0x17, 0x28, 0x0b, 0xdb, 0xa9, 0x79, 0x3c, 0xc2, 0x21, 0xff, 0x86, 0x04, 0x2f, - 0xac, 0x7b, 0x2e, 0xb5, 0x8c, 0x2d, 0x42, 0x1d, 0xad, 0xb7, 0xee, 0x39, 0x0e, 0x31, 0x69, 0x87, - 0x2a, 0xd4, 0x73, 0x9f, 0xaf, 0x1d, 0x7a, 0x0a, 0x95, 0x03, 0x45, 0xf7, 0x48, 0xbd, 0x74, 0x5d, - 0xba, 0x51, 0xbb, 0xf5, 0x5a, 0x33, 0xd7, 0x6d, 0x9a, 0xc1, 0x83, 0x6d, 0x7e, 0xc5, 0x53, 0x4c, - 0xaa, 0xd1, 0x41, 0x6b, 0x51, 0x00, 0x5e, 0x14, 0x52, 0x9f, 0x30, 0x24, 0xec, 0x03, 0xca, 0xdf, - 0x93, 0xe0, 0xe5, 0x5c, 0xcd, 0x36, 0x35, 0x97, 0x22, 0x03, 0x2a, 0x1a, 0x25, 0x86, 0x5b, 0x97, - 0xae, 0x97, 0x6f, 0xd4, 0x6e, 0xdd, 0x6f, 0x16, 0x72, 0xd9, 0x66, 0x2e, 0x78, 0x6b, 0x4e, 0xe8, - 0x55, 0x79, 0xc0, 0xe0, 0xb1, 0x2f, 0x45, 0xfe, 0x75, 0x09, 0x50, 0x9c, 0xc7, 0xb7, 0xee, 0x31, - 0x6c, 0xf4, 0xd5, 0x93, 0xd8, 0xe8, 0x8a, 0x00, 0xac, 0xf9, 0xe2, 0x12, 0x26, 0xfa, 0xae, 0x04, - 0x4b, 0xa3, 0x1a, 0x71, 0xdb, 0xec, 0x26, 0x6d, 0xb3, 0x76, 0x02, 0xdb, 0xf8, 0xa8, 0x39, 0x46, - 0xf9, 0x83, 0x12, 0xcc, 0x6e, 0x28, 0xc4, 0xb0, 0xcc, 0x0e, 0xa1, 0xe8, 0x29, 0x54, 0x0d, 0x42, - 0x15, 0x55, 0xa1, 0x0a, 0xb7, 0x47, 0xed, 0xd6, 0x8d, 0x31, 0x8b, 0x3d, 0xb8, 0xd9, 0x7c, 0xdc, - 0xfd, 0x80, 0xf4, 0xe8, 0x16, 0xa1, 0x4a, 0x0b, 0x09, 0x7c, 0x88, 0xc6, 0x70, 0x88, 0x86, 0xde, - 0x87, 0x29, 0xd7, 0x26, 0x3d, 0x61, 0xc2, 0x77, 0x0a, 0x2e, 0x27, 0xd4, 0xb0, 0x63, 0x93, 0x5e, - 0xf4, 0x8c, 0xd8, 0x3f, 0xcc, 0x71, 0xd1, 0x2e, 0x4c, 0xbb, 0xfc, 0xe1, 0xd7, 0xcb, 0x5c, 0xc2, - 0xbb, 0x13, 0x4b, 0xf0, 0x5d, 0x68, 0x5e, 0xc8, 0x98, 0xf6, 0xff, 0x63, 0x81, 0x2e, 0xff, 0xad, - 0x04, 0x73, 0x21, 0x2d, 0x7f, 0x52, 0xdf, 0x18, 0xb1, 0xd9, 0xea, 0x18, 0x9b, 0xc5, 0x22, 0x5d, - 0x93, 0xb1, 0x73, 0xd3, 0x2d, 0x08, 0x61, 0xd5, 0x60, 0x24, 0x66, 0xb8, 0x6f, 0x04, 0x8e, 0x50, - 0xe2, 0x8e, 0xf0, 0xe6, 0xa4, 0xeb, 0xca, 0x79, 0xfe, 0x7f, 0x13, 0x5f, 0x0f, 0xb3, 0x27, 0x7a, - 0x1f, 0xaa, 0x2e, 0xd1, 0x49, 0x8f, 0x5a, 0x8e, 0x58, 0xcf, 0x1b, 0xc7, 0x5d, 0x8f, 0xd2, 0x25, - 0x7a, 0x47, 0xf0, 0xb6, 0x2e, 0xb2, 0x05, 0x05, 0xff, 0x70, 0x88, 0x89, 0xbe, 0x0e, 0x55, 0x4a, - 0x0c, 0x5b, 0x57, 0x68, 0xf0, 0x42, 0xbd, 0x3e, 0xde, 0xc7, 0xda, 0x96, 0xba, 0x2d, 0x18, 0xf8, - 0xe3, 0x0f, 0xad, 0x15, 0x8c, 0xe2, 0x10, 0x50, 0xfe, 0x8b, 0x12, 0x5c, 0x4a, 0x3d, 0x4a, 0xf4, - 0x04, 0x96, 0x7a, 0x7e, 0x78, 0x78, 0xe4, 0x19, 0x5d, 0xe2, 0x74, 0x7a, 0x7b, 0x44, 0xf5, 0x74, - 0xa2, 0x8a, 0x70, 0xbb, 0x22, 0xf0, 0x96, 0xd6, 0x33, 0xa9, 0x70, 0x0e, 0x37, 0x7a, 0x08, 0xc8, - 0xe4, 0x43, 0x5b, 0x9a, 0xeb, 0x86, 0x98, 0x25, 0x8e, 0xb9, 0x2c, 0x30, 0xd1, 0xa3, 0x11, 0x0a, - 0x9c, 0xc1, 0xc5, 0x74, 0x54, 0x89, 0xab, 0x39, 0x44, 0x4d, 0xeb, 0x58, 0x4e, 0xea, 0xb8, 0x91, - 0x49, 0x85, 0x73, 0xb8, 0xd1, 0x6d, 0xa8, 0xf9, 0xd2, 0x30, 0x51, 0xd4, 0x41, 0x7d, 0x8a, 0x83, - 0x85, 0x21, 0xe9, 0x51, 0x34, 0x85, 0xe3, 0x74, 0xf2, 0x1f, 0x96, 0x00, 0x36, 0x88, 0xad, 0x5b, - 0x03, 0x83, 0x98, 0x67, 0x19, 0x16, 0xbe, 0x99, 0x08, 0x0b, 0x5f, 0x2c, 0xea, 0xdc, 0xa1, 0x8a, - 0xb9, 0x71, 0xa1, 0x9f, 0x8a, 0x0b, 0x5f, 0x9a, 0x5c, 0xc4, 0xf8, 0xc0, 0xf0, 0x93, 0x32, 0x5c, - 0x89, 0x88, 0xd7, 0x2d, 0x53, 0xd5, 0x78, 0x9a, 0x7f, 0x1b, 0xa6, 0xe8, 0xc0, 0x0e, 0xd2, 0xcb, - 0x67, 0x03, 0x15, 0xb7, 0x07, 0x36, 0x79, 0x36, 0x6c, 0x5c, 0xcb, 0x60, 0x61, 0x53, 0x98, 0x33, - 0xa1, 0x27, 0xa1, 0xf6, 0x25, 0xce, 0xfe, 0x6e, 0x52, 0xf8, 0xb3, 0x61, 0x63, 0x6c, 0x75, 0xd4, - 0x0c, 0x31, 0x93, 0xca, 0xa2, 0x57, 0x61, 0xda, 0x21, 0x8a, 0x6b, 0x99, 0xdc, 0x23, 0x66, 0xa3, - 0x45, 0x61, 0x3e, 0x8a, 0xc5, 0x2c, 0xfa, 0x1c, 0xcc, 0x18, 0xc4, 0x75, 0x59, 0x69, 0x52, 0xe1, - 0x84, 0x97, 0x04, 0xe1, 0xcc, 0x96, 0x3f, 0x8c, 0x83, 0x79, 0xb4, 0x0f, 0xf3, 0xba, 0xe2, 0xd2, - 0x1d, 0x5b, 0x55, 0x28, 0xd9, 0xd6, 0x0c, 0x52, 0x9f, 0xe6, 0x06, 0xff, 0xd9, 0x63, 0x06, 0x0f, - 0xc6, 0xd2, 0x5a, 0x12, 0xf0, 0xf3, 0x9b, 0x09, 0x28, 0x9c, 0x82, 0x46, 0x1f, 0x01, 0x62, 0x23, - 0xdb, 0x8e, 0x62, 0xba, 0xbe, 0xcd, 0x98, 0xc0, 0x99, 0xe2, 0x02, 0xc3, 0xf7, 0x74, 0x73, 0x04, - 0x0e, 0x67, 0x88, 0x90, 0xff, 0x4e, 0x82, 0xf9, 0xe8, 0x91, 0x9d, 0x47, 0xfc, 0x7f, 0x3f, 0x19, - 0xff, 0xbf, 0x30, 0xb1, 0xff, 0xe6, 0x24, 0x80, 0xdf, 0x2c, 0x03, 0x8a, 0x88, 0xb0, 0xa5, 0xeb, - 0x5d, 0xa5, 0xb7, 0x7f, 0x8c, 0xaa, 0xe8, 0xf7, 0x24, 0x40, 0x1e, 0x7f, 0x24, 0xea, 0x9a, 0x69, - 0x5a, 0x94, 0x57, 0xb5, 0x81, 0x9a, 0xbf, 0x34, 0xb1, 0x9a, 0x81, 0x06, 0xcd, 0x9d, 0x11, 0xec, - 0x3b, 0x26, 0x75, 0x06, 0xd1, 0x23, 0x1b, 0x25, 0xc0, 0x19, 0x0a, 0xa1, 0x0f, 0x01, 0x1c, 0x81, - 0xb9, 0x6d, 0x89, 0x28, 0x50, 0x34, 0xd0, 0x04, 0x4a, 0xad, 0x5b, 0xe6, 0xae, 0xd6, 0x8f, 0x62, - 0x1a, 0x0e, 0x81, 0x71, 0x4c, 0xc8, 0xf2, 0x1d, 0xb8, 0x96, 0xa3, 0x3d, 0x5a, 0x80, 0xf2, 0x3e, - 0x19, 0xf8, 0x66, 0xc5, 0xec, 0x27, 0x5a, 0x8c, 0x57, 0x97, 0xb3, 0xa2, 0x34, 0x7c, 0xab, 0xf4, - 0xa6, 0x24, 0xff, 0xb4, 0x12, 0x77, 0x36, 0x9e, 0x9c, 0x6f, 0x40, 0xd5, 0x21, 0xb6, 0xae, 0xf5, - 0x14, 0x57, 0x64, 0x2f, 0x9e, 0x66, 0xb1, 0x18, 0xc3, 0xe1, 0x6c, 0x22, 0x8d, 0x97, 0xce, 0x38, - 0x8d, 0x97, 0x4f, 0x39, 0x8d, 0x23, 0x0b, 0xaa, 0x2e, 0x65, 0x9b, 0xae, 0xbe, 0x9f, 0xb3, 0x8a, - 0x17, 0xc0, 0xf1, 0xb8, 0xed, 0x03, 0x45, 0x02, 0x83, 0x11, 0x1c, 0x0a, 0x41, 0x6b, 0x70, 0xc9, - 0xd0, 0x4c, 0x9e, 0xfc, 0x3a, 0xa4, 0x67, 0x99, 0xaa, 0xcb, 0x03, 0x5e, 0xa5, 0x75, 0x4d, 0x30, - 0x5d, 0xda, 0x4a, 0x4e, 0xe3, 0x34, 0x3d, 0xda, 0x84, 0x45, 0x87, 0x1c, 0x68, 0x4c, 0x8d, 0xfb, - 0x9a, 0x4b, 0x2d, 0x67, 0xb0, 0xa9, 0x19, 0x1a, 0xe5, 0x61, 0xb0, 0xd2, 0xaa, 0x1f, 0x0d, 0x1b, - 0x8b, 0x38, 0x63, 0x1e, 0x67, 0x72, 0xb1, 0x08, 0x6d, 0x2b, 0x9e, 0x4b, 0x54, 0x1e, 0xd5, 0xaa, - 0x51, 0x84, 0x6e, 0xf3, 0x51, 0x2c, 0x66, 0x91, 0x91, 0xf0, 0xee, 0xea, 0x69, 0x78, 0xf7, 0x7c, - 0xbe, 0x67, 0xa3, 0x1d, 0xb8, 0x66, 0x3b, 0x56, 0xdf, 0x21, 0xae, 0xbb, 0x41, 0x14, 0x55, 0xd7, - 0x4c, 0x12, 0xd8, 0x6b, 0x96, 0xaf, 0xf3, 0xc5, 0xa3, 0x61, 0xe3, 0x5a, 0x3b, 0x9b, 0x04, 0xe7, - 0xf1, 0xca, 0x9f, 0x94, 0x61, 0x21, 0x9d, 0x69, 0x59, 0x7d, 0x65, 0x75, 0x5d, 0xe2, 0x1c, 0x10, - 0xf5, 0x9e, 0xbf, 0x07, 0xd7, 0x2c, 0x93, 0x7b, 0x7d, 0x39, 0x0a, 0x02, 0x8f, 0x47, 0x28, 0x70, - 0x06, 0x17, 0x7a, 0x2d, 0xf6, 0xde, 0xf8, 0x15, 0x5a, 0xe8, 0x0d, 0x19, 0xef, 0xce, 0x1a, 0x5c, - 0x12, 0x81, 0x24, 0x98, 0x14, 0x65, 0x58, 0xe8, 0x0d, 0x3b, 0xc9, 0x69, 0x9c, 0xa6, 0x47, 0xf7, - 0xe0, 0xb2, 0x72, 0xa0, 0x68, 0xba, 0xd2, 0xd5, 0x49, 0x08, 0xe2, 0x97, 0x5f, 0x2f, 0x08, 0x90, - 0xcb, 0x6b, 0x69, 0x02, 0x3c, 0xca, 0x83, 0xb6, 0xe0, 0x8a, 0x67, 0x8e, 0x42, 0xf9, 0xde, 0xf9, - 0xa2, 0x80, 0xba, 0xb2, 0x33, 0x4a, 0x82, 0xb3, 0xf8, 0xd0, 0x01, 0x40, 0x2f, 0x28, 0x0a, 0xdc, - 0xfa, 0x34, 0x0f, 0xd6, 0xad, 0x89, 0xdf, 0xad, 0xb0, 0xbe, 0x88, 0x42, 0x62, 0x38, 0xe4, 0xe2, - 0x98, 0x24, 0xf9, 0xef, 0xa5, 0x78, 0x9a, 0x09, 0xde, 0x40, 0xf4, 0x56, 0xa2, 0x3a, 0x7a, 0x35, - 0x55, 0x1d, 0x2d, 0x8d, 0x72, 0xc4, 0x8a, 0xa3, 0xef, 0xc0, 0x1c, 0xf3, 0x4c, 0xcd, 0xec, 0xfb, - 0x4f, 0x43, 0x84, 0xb9, 0xbb, 0x13, 0x78, 0x7f, 0x88, 0x11, 0x4b, 0x97, 0x97, 0x8f, 0x86, 0x8d, - 0xb9, 0xc4, 0x24, 0x4e, 0xca, 0x93, 0xdf, 0x83, 0xb9, 0x3b, 0x87, 0xb6, 0xe5, 0xd0, 0xc7, 0xb6, - 0x9f, 0x6a, 0x5e, 0x85, 0x69, 0xc2, 0x07, 0xf8, 0x7a, 0x62, 0x2f, 0xad, 0x4f, 0x86, 0xc5, 0x2c, - 0x7a, 0x05, 0x2a, 0xe4, 0x50, 0xe9, 0x51, 0xae, 0x71, 0x35, 0x4a, 0xcc, 0x77, 0xd8, 0x20, 0xf6, - 0xe7, 0xe4, 0x1f, 0x4a, 0xb0, 0x74, 0xb7, 0x73, 0xcf, 0xb1, 0x3c, 0x3b, 0x58, 0x7c, 0x20, 0xe7, - 0x17, 0x60, 0xca, 0xf1, 0xf4, 0xc0, 0x6a, 0xaf, 0x04, 0x56, 0xc3, 0x9e, 0xce, 0xac, 0x76, 0x25, - 0xc5, 0xe5, 0x9b, 0x8c, 0x31, 0xa0, 0xf7, 0x61, 0xda, 0x51, 0xcc, 0x3e, 0x09, 0xd2, 0xf4, 0xe7, - 0x0b, 0xda, 0xea, 0xc1, 0x06, 0x66, 0xec, 0xb1, 0x7a, 0x91, 0xa3, 0x61, 0x81, 0x2a, 0xff, 0x8e, - 0x04, 0x97, 0xee, 0x6f, 0x6f, 0xb7, 0x1f, 0x98, 0xfc, 0x35, 0x6f, 0x2b, 0x74, 0x8f, 0x55, 0x12, - 0xb6, 0x42, 0xf7, 0xd2, 0x95, 0x04, 0x9b, 0xc3, 0x7c, 0x06, 0xed, 0xc1, 0x0c, 0x0b, 0x2f, 0xc4, - 0x54, 0x27, 0xdc, 0x07, 0x08, 0x71, 0x2d, 0x1f, 0x24, 0x2a, 0x52, 0xc5, 0x00, 0x0e, 0xe0, 0xe5, - 0x6f, 0xc3, 0x62, 0x4c, 0x3d, 0x66, 0x2f, 0x7e, 0x1e, 0x83, 0x7a, 0x50, 0x61, 0x9a, 0x04, 0xa7, - 0x2d, 0x45, 0x0f, 0x0f, 0x52, 0x4b, 0x8e, 0x1e, 0x28, 0xfb, 0xe7, 0x62, 0x1f, 0x5b, 0xfe, 0xa7, - 0x12, 0x5c, 0xbb, 0x6f, 0x39, 0xda, 0xb7, 0x2c, 0x93, 0x2a, 0x7a, 0xdb, 0x52, 0xd7, 0x3c, 0x6a, - 0xb9, 0x3d, 0x45, 0x27, 0xce, 0x19, 0xee, 0xb0, 0xf4, 0xc4, 0x0e, 0xeb, 0x61, 0xd1, 0x95, 0x65, - 0xeb, 0x9b, 0xbb, 0xdd, 0xa2, 0xa9, 0xed, 0xd6, 0xe6, 0x29, 0xc9, 0x1b, 0xbf, 0xf7, 0xfa, 0x0f, - 0x09, 0x5e, 0xcc, 0xe1, 0x3c, 0x8f, 0x12, 0x7d, 0x3f, 0x59, 0xa2, 0xdf, 0x3d, 0x9d, 0x35, 0xe7, - 0xd4, 0xeb, 0xff, 0x53, 0xca, 0x5d, 0x2b, 0xaf, 0x10, 0x3f, 0x84, 0x2a, 0xff, 0x87, 0xc9, 0xae, - 0x58, 0xeb, 0x7a, 0x41, 0x7d, 0x3a, 0x5e, 0x37, 0x38, 0xc6, 0xc4, 0x64, 0x97, 0x38, 0xc4, 0xec, - 0x91, 0x58, 0xf1, 0x24, 0xc0, 0x71, 0x28, 0x06, 0xdd, 0x84, 0x1a, 0x2f, 0x86, 0x12, 0xf9, 0xf5, - 0xd2, 0xd1, 0xb0, 0x51, 0xdb, 0x8a, 0x86, 0x71, 0x9c, 0x06, 0xdd, 0x86, 0x9a, 0xa1, 0x1c, 0xa6, - 0xb2, 0x6b, 0x78, 0x2e, 0xb1, 0x15, 0x4d, 0xe1, 0x38, 0x1d, 0xfa, 0x0e, 0xcc, 0xf7, 0x6c, 0x2f, - 0x76, 0x8a, 0x2e, 0xaa, 0xc3, 0xa2, 0x4b, 0xcc, 0x3a, 0x90, 0x6f, 0x21, 0xb6, 0xf1, 0x5c, 0x6f, - 0xef, 0xc4, 0xc6, 0x70, 0x4a, 0x9c, 0xfc, 0xe7, 0x65, 0x78, 0x79, 0xac, 0x8f, 0xa2, 0xbb, 0x63, - 0xaa, 0x96, 0xa5, 0x02, 0x15, 0x8b, 0x0a, 0x73, 0x6c, 0xff, 0xc9, 0xcd, 0xcd, 0x77, 0xb7, 0xa5, - 0xe2, 0xbb, 0x5b, 0x9e, 0xc2, 0x36, 0xe3, 0x28, 0x38, 0x09, 0xca, 0x2a, 0x1d, 0x71, 0xba, 0x95, - 0x57, 0xe9, 0xac, 0x27, 0xa7, 0x71, 0x9a, 0x9e, 0x41, 0x88, 0xc3, 0xa7, 0x54, 0x9d, 0x13, 0x42, - 0x6c, 0x24, 0xa7, 0x71, 0x9a, 0x1e, 0x19, 0xd0, 0x10, 0xa8, 0x49, 0xf3, 0xc7, 0x6e, 0x46, 0xfc, - 0x7a, 0xe7, 0x95, 0xa3, 0x61, 0xa3, 0xb1, 0x3e, 0x9e, 0x14, 0x3f, 0x0f, 0x4b, 0xde, 0x82, 0xb9, - 0xfb, 0x96, 0x4b, 0xdb, 0x2c, 0x25, 0xb3, 0xbc, 0x85, 0x5e, 0x86, 0xb2, 0xa1, 0x99, 0x62, 0x43, - 0x55, 0x13, 0x6a, 0x97, 0x99, 0xf3, 0xb2, 0x71, 0x3e, 0xad, 0x1c, 0x0a, 0xbf, 0x8e, 0xa6, 0x95, - 0x43, 0xcc, 0xc6, 0xe5, 0x7b, 0x30, 0x23, 0xf2, 0x62, 0x1c, 0xa8, 0x3c, 0x1e, 0xa8, 0x9c, 0x01, - 0xf4, 0xfb, 0x25, 0x98, 0x11, 0x69, 0xe4, 0x0c, 0x13, 0xc2, 0x7b, 0x89, 0x84, 0xf0, 0xd6, 0x64, - 0xa9, 0x36, 0x37, 0x01, 0xa8, 0xa9, 0x04, 0xf0, 0xce, 0x84, 0xf8, 0xe3, 0x03, 0xfe, 0x0f, 0x24, - 0x98, 0x4f, 0x26, 0x7d, 0x16, 0x51, 0xd8, 0x3b, 0xa4, 0xf5, 0xc8, 0xa3, 0xe8, 0xdc, 0x22, 0x8c, - 0x28, 0x9d, 0x68, 0x0a, 0xc7, 0xe9, 0x10, 0x09, 0xd9, 0x98, 0x3b, 0x08, 0xa3, 0x34, 0x73, 0x94, - 0xf6, 0xa8, 0xa6, 0x37, 0xfd, 0x6b, 0xc1, 0xe6, 0x03, 0x93, 0x3e, 0x76, 0x3a, 0xd4, 0xd1, 0xcc, - 0xfe, 0x88, 0x18, 0xee, 0x59, 0x71, 0x5c, 0xf9, 0xaf, 0x25, 0xa8, 0x09, 0x85, 0xcf, 0x23, 0x23, - 0x7d, 0x3d, 0x99, 0x91, 0x3e, 0x3f, 0x61, 0x3d, 0x95, 0x9d, 0x81, 0x7e, 0x14, 0xad, 0x85, 0x55, - 0x50, 0xac, 0xc0, 0xdb, 0xb3, 0x5c, 0x9a, 0x2e, 0xf0, 0xd8, 0x2b, 0x86, 0xf9, 0x0c, 0xfa, 0x35, - 0x09, 0x16, 0xb4, 0x54, 0xcd, 0x25, 0x4c, 0xfd, 0xa5, 0xc9, 0x54, 0x0b, 0x61, 0xa2, 0xcb, 0xd2, - 0xf4, 0x0c, 0x1e, 0x11, 0x29, 0x7b, 0x30, 0x42, 0x85, 0x14, 0x98, 0xda, 0xa3, 0xd4, 0x9e, 0x30, - 0x57, 0x66, 0x55, 0x93, 0xad, 0x2a, 0x5f, 0xfe, 0xf6, 0x76, 0x1b, 0x73, 0x68, 0xf9, 0x07, 0xa5, - 0xd0, 0x60, 0x1d, 0xff, 0x1d, 0x09, 0xeb, 0x5d, 0xe9, 0x34, 0xea, 0xdd, 0x5a, 0x56, 0xad, 0x8b, - 0x9e, 0x42, 0x99, 0xea, 0x93, 0x1e, 0x1b, 0x0a, 0x09, 0xdb, 0x9b, 0x9d, 0x28, 0x4e, 0x6d, 0x6f, - 0x76, 0x30, 0x83, 0x44, 0xdf, 0x84, 0x0a, 0xdb, 0x4d, 0xb0, 0x57, 0xbc, 0x3c, 0x79, 0x08, 0x61, - 0xf6, 0x8a, 0x3c, 0x8c, 0xfd, 0x73, 0xb1, 0x8f, 0x2b, 0x7f, 0x1b, 0xe6, 0x12, 0x71, 0x00, 0x7d, - 0x00, 0x17, 0x75, 0x4b, 0x51, 0x5b, 0x8a, 0xae, 0x98, 0x3d, 0x12, 0xdc, 0x4b, 0xfd, 0xdc, 0xf8, - 0x88, 0xb8, 0x19, 0xe3, 0x10, 0xf1, 0x24, 0xbc, 0xb0, 0x8e, 0xcf, 0xe1, 0x04, 0xb6, 0xac, 0x00, - 0x44, 0xab, 0x47, 0x0d, 0xa8, 0x30, 0x17, 0xf6, 0x77, 0x06, 0xb3, 0xad, 0x59, 0xa6, 0x2b, 0xf3, - 0x6c, 0x17, 0xfb, 0xe3, 0xe8, 0x16, 0x80, 0x4b, 0x7a, 0x0e, 0xa1, 0x3c, 0xec, 0xf8, 0xc7, 0xf4, - 0x61, 0x00, 0xee, 0x84, 0x33, 0x38, 0x46, 0x25, 0xff, 0x56, 0x09, 0xca, 0x0f, 0xad, 0xee, 0x19, - 0x06, 0xf9, 0xa7, 0x89, 0x20, 0x5f, 0xf4, 0xfd, 0x7f, 0x68, 0x75, 0x73, 0x03, 0xfc, 0x2f, 0xa7, - 0x02, 0xfc, 0x9b, 0x13, 0x60, 0x8f, 0x0f, 0xee, 0xff, 0x50, 0x86, 0x8b, 0x0f, 0xad, 0x6e, 0x74, - 0x85, 0xf2, 0x46, 0xe2, 0x90, 0xe0, 0x7a, 0xea, 0x90, 0x60, 0x21, 0x4e, 0x7b, 0x0e, 0x77, 0x27, - 0x7b, 0x7e, 0x61, 0xd6, 0x76, 0xac, 0xae, 0x5f, 0x98, 0x95, 0x8b, 0x17, 0x66, 0x57, 0x85, 0x2e, - 0xbc, 0x38, 0x0b, 0x91, 0x70, 0x12, 0x38, 0xe7, 0x96, 0x63, 0xea, 0xcc, 0x6f, 0x39, 0x62, 0xd7, - 0x43, 0x95, 0xe3, 0x5e, 0x0f, 0x4d, 0x8f, 0xbf, 0x1e, 0x92, 0xff, 0x4c, 0x82, 0x99, 0x87, 0x56, - 0xf7, 0x3c, 0x92, 0xdf, 0x57, 0x93, 0xc9, 0xef, 0x56, 0x71, 0x07, 0xcd, 0x49, 0x7c, 0x7f, 0x54, - 0xe6, 0x6b, 0xe0, 0x31, 0xfc, 0x26, 0xd4, 0x6c, 0xc5, 0x51, 0x74, 0x9d, 0xe8, 0x9a, 0x6b, 0x88, - 0xd2, 0x91, 0xef, 0x79, 0xda, 0xd1, 0x30, 0x8e, 0xd3, 0x30, 0x96, 0x9e, 0x65, 0xd8, 0x3a, 0x09, - 0x2e, 0x4a, 0x42, 0x96, 0xf5, 0x68, 0x18, 0xc7, 0x69, 0xd0, 0x63, 0xb8, 0xaa, 0xf4, 0xa8, 0x76, - 0x40, 0xd2, 0x87, 0xad, 0x65, 0x5e, 0x42, 0xbe, 0x70, 0x34, 0x6c, 0x5c, 0x5d, 0xcb, 0x22, 0xc0, - 0xd9, 0x7c, 0x89, 0x5b, 0x81, 0xa9, 0x33, 0xb8, 0x15, 0x78, 0x03, 0x2e, 0x2a, 0x1e, 0xb5, 0x82, - 0x19, 0xee, 0x3f, 0xd5, 0xd6, 0x02, 0x0b, 0xb9, 0x6b, 0xb1, 0x71, 0x9c, 0xa0, 0x4a, 0xdc, 0x25, - 0x4c, 0x9f, 0x76, 0x4b, 0xc0, 0x9f, 0x96, 0x61, 0x36, 0x0c, 0x3a, 0xc8, 0x4a, 0x9c, 0x7f, 0xfa, - 0xc7, 0x3d, 0x6f, 0x17, 0xf7, 0x90, 0x63, 0x1f, 0x7c, 0xa2, 0xa7, 0x30, 0xeb, 0x52, 0xc5, 0xa1, - 0x93, 0xee, 0xe1, 0xe6, 0x8e, 0x86, 0x8d, 0xd9, 0x4e, 0x80, 0x80, 0x23, 0x30, 0xd4, 0x87, 0xf9, - 0xc8, 0x57, 0x26, 0x8d, 0x44, 0xfe, 0xa6, 0x37, 0x01, 0x83, 0x53, 0xb0, 0x2c, 0x1c, 0xf8, 0xde, - 0x24, 0x36, 0x76, 0x61, 0x38, 0xf0, 0x5d, 0x0f, 0x8b, 0x59, 0xb4, 0x0a, 0xb3, 0xae, 0xd7, 0xeb, - 0x11, 0xa2, 0x12, 0x55, 0x6c, 0xd8, 0x2e, 0x0b, 0xd2, 0xd9, 0x4e, 0x30, 0x81, 0x23, 0x1a, 0x06, - 0xbc, 0xab, 0x68, 0x3a, 0x51, 0xc5, 0x25, 0x49, 0x08, 0x7c, 0x97, 0x8f, 0x62, 0x31, 0xcb, 0x9b, - 0x6e, 0x1e, 0x11, 0xfa, 0x91, 0xe5, 0xec, 0xb7, 0x2d, 0x5d, 0xeb, 0x0d, 0xce, 0x30, 0x73, 0x76, - 0x13, 0x99, 0xf3, 0xcb, 0x05, 0x5d, 0x23, 0xa1, 0x65, 0x5e, 0x0e, 0x95, 0xff, 0x5d, 0x82, 0x7a, - 0x82, 0x32, 0x5e, 0x4e, 0x13, 0xa8, 0xd8, 0x96, 0x43, 0x03, 0xe7, 0x3c, 0x91, 0x06, 0x6c, 0xef, - 0x11, 0x3b, 0x8d, 0x64, 0xb0, 0xd8, 0x47, 0x67, 0xeb, 0xdc, 0x75, 0x2c, 0x43, 0x04, 0xc9, 0x93, - 0x49, 0x21, 0xc4, 0x89, 0xd6, 0x79, 0xd7, 0xb1, 0x0c, 0xcc, 0xb1, 0xe5, 0x7f, 0x94, 0xe0, 0x72, - 0x82, 0xf2, 0x3c, 0xc2, 0xbf, 0x92, 0x0c, 0xff, 0xef, 0x9c, 0x64, 0x65, 0x39, 0x89, 0xe0, 0xbf, - 0xd3, 0xeb, 0x62, 0x16, 0x40, 0x7d, 0xa8, 0xd9, 0x96, 0xda, 0x39, 0x8d, 0xde, 0x29, 0x3f, 0x91, - 0x44, 0x60, 0x38, 0x8e, 0x8c, 0x06, 0x70, 0xd9, 0x54, 0x0c, 0xe2, 0xda, 0x4a, 0x8f, 0x74, 0x4e, - 0xe3, 0x8e, 0xf7, 0xea, 0xd1, 0xb0, 0x71, 0xf9, 0x51, 0x1a, 0x12, 0x8f, 0x4a, 0x91, 0xff, 0x78, - 0x64, 0xe5, 0x96, 0x43, 0xd1, 0x57, 0xa0, 0xca, 0x7b, 0x63, 0x7b, 0x96, 0x2e, 0x8a, 0xb4, 0xdb, - 0xec, 0xe1, 0xb4, 0xc5, 0xd8, 0xb3, 0x61, 0xe3, 0x67, 0xc6, 0x96, 0x5a, 0x01, 0x21, 0x0e, 0x61, - 0xd0, 0x26, 0x4c, 0xd9, 0x93, 0x6f, 0xc8, 0xf9, 0x0e, 0x8c, 0xef, 0xc2, 0x39, 0x8a, 0xfc, 0x9f, - 0x69, 0xb5, 0x79, 0x0e, 0xdf, 0x3f, 0xbd, 0x07, 0x16, 0x9e, 0x00, 0xe4, 0x3e, 0x34, 0x07, 0x66, - 0xc4, 0x7e, 0x54, 0x38, 0xe6, 0xbd, 0x93, 0x38, 0x66, 0x7c, 0x0f, 0x15, 0x16, 0x5d, 0xc1, 0x60, - 0x20, 0x88, 0xbf, 0x7f, 0x5c, 0xa1, 0x9e, 0xe7, 0x68, 0x74, 0x70, 0xe6, 0xb1, 0x73, 0x37, 0x11, - 0x3b, 0x37, 0x0a, 0x2e, 0x70, 0x44, 0xd3, 0xdc, 0xf8, 0xf9, 0xaf, 0x12, 0x5c, 0x1d, 0xa1, 0x3e, - 0x8f, 0xd8, 0x42, 0x92, 0xb1, 0xe5, 0xcb, 0x27, 0x5d, 0x61, 0x4e, 0x7c, 0xf9, 0x18, 0x32, 0xd6, - 0xc7, 0x5d, 0xf6, 0x16, 0x80, 0xed, 0x68, 0x07, 0x9a, 0x4e, 0xfa, 0xa2, 0x7f, 0xb1, 0x1a, 0x3d, - 0x93, 0x76, 0x38, 0x83, 0x63, 0x54, 0xe8, 0x57, 0x60, 0x49, 0x25, 0xbb, 0x8a, 0xa7, 0xd3, 0x35, - 0x55, 0x5d, 0x57, 0x6c, 0xa5, 0xab, 0xe9, 0x1a, 0xd5, 0xc4, 0x25, 0xe0, 0x6c, 0xeb, 0x8e, 0xdf, - 0x57, 0x98, 0x45, 0xf1, 0x6c, 0xd8, 0xf8, 0xec, 0xf8, 0x4d, 0x52, 0x40, 0x3c, 0xc0, 0x39, 0x42, - 0xd0, 0xaf, 0x4a, 0x50, 0x77, 0xc8, 0x87, 0x9e, 0xe6, 0x10, 0x75, 0xc3, 0xb1, 0xec, 0x84, 0x06, - 0x65, 0xae, 0xc1, 0xbd, 0xa3, 0x61, 0xa3, 0x8e, 0x73, 0x68, 0x8a, 0xe8, 0x90, 0x2b, 0x08, 0x51, - 0xb8, 0xa2, 0xe8, 0xba, 0xf5, 0x11, 0x49, 0x5a, 0x60, 0x8a, 0xcb, 0x6f, 0x1d, 0x0d, 0x1b, 0x57, - 0xd6, 0x46, 0xa7, 0x8b, 0x88, 0xce, 0x82, 0x47, 0xab, 0x30, 0x73, 0x60, 0xe9, 0x9e, 0x41, 0xdc, - 0x7a, 0x85, 0x4b, 0x62, 0x91, 0x76, 0xe6, 0x89, 0x3f, 0xf4, 0x8c, 0x15, 0x3a, 0x1d, 0xbe, 0x63, - 0x0d, 0xa8, 0xd0, 0x6d, 0xa8, 0xed, 0x59, 0x2e, 0x15, 0xef, 0x3a, 0x2f, 0x8b, 0xaa, 0x51, 0x70, - 0xb9, 0x1f, 0x4d, 0xe1, 0x38, 0x1d, 0x32, 0x60, 0x76, 0x4f, 0x9c, 0x68, 0xbb, 0xf5, 0x99, 0x89, - 0xf2, 0x5e, 0xe2, 0x44, 0x3c, 0xaa, 0xdb, 0x82, 0x61, 0x17, 0x47, 0x12, 0xd8, 0xbe, 0x8f, 0xff, - 0x79, 0xb0, 0xc1, 0x3b, 0x4e, 0xaa, 0x51, 0x08, 0xba, 0xef, 0x0f, 0xe3, 0x60, 0x3e, 0x20, 0x7d, - 0xd0, 0x5e, 0xe7, 0x0d, 0x22, 0x29, 0xd2, 0x07, 0xed, 0x75, 0x1c, 0xcc, 0x23, 0x1b, 0x66, 0x5c, - 0xb2, 0xa9, 0x99, 0xde, 0x61, 0x1d, 0xf8, 0xab, 0x7b, 0xa7, 0xe8, 0xc5, 0xd5, 0x1d, 0xce, 0x9d, - 0xba, 0x2d, 0x8f, 0x24, 0x8a, 0x79, 0x1c, 0x88, 0x41, 0x87, 0x30, 0xeb, 0x78, 0xe6, 0x9a, 0xbb, - 0xe3, 0x12, 0xa7, 0x5e, 0xe3, 0x32, 0x8b, 0x46, 0x65, 0x1c, 0xf0, 0xa7, 0xa5, 0x86, 0x16, 0x0c, - 0x29, 0x70, 0x24, 0x0c, 0xfd, 0xb6, 0x04, 0xc8, 0xf5, 0x6c, 0x5b, 0x27, 0x06, 0x31, 0xa9, 0xa2, - 0xf3, 0x0b, 0x7b, 0xb7, 0x7e, 0x91, 0xeb, 0xd0, 0x2e, 0x7c, 0x61, 0x97, 0x06, 0x4a, 0x2b, 0x13, - 0x1e, 0x00, 0x8c, 0x92, 0xe2, 0x0c, 0x3d, 0xd8, 0xa3, 0xd8, 0x75, 0xf9, 0xef, 0xfa, 0xdc, 0x44, - 0x8f, 0x22, 0xbb, 0x71, 0x21, 0x7a, 0x14, 0x62, 0x1e, 0x07, 0x62, 0xd0, 0x13, 0x58, 0x72, 0x88, - 0xa2, 0x3e, 0x36, 0xf5, 0x01, 0xb6, 0x2c, 0x7a, 0x57, 0xd3, 0x89, 0x3b, 0x70, 0x29, 0x31, 0xea, - 0xf3, 0xdc, 0x6d, 0xc2, 0x06, 0x68, 0x9c, 0x49, 0x85, 0x73, 0xb8, 0x79, 0x27, 0xb3, 0xb8, 0x67, - 0x3a, 0xdb, 0x0f, 0x1c, 0x4e, 0xd6, 0xc9, 0x1c, 0xa9, 0x78, 0x66, 0x9d, 0xcc, 0x31, 0x11, 0xe3, - 0xcf, 0xdf, 0xfe, 0xab, 0x04, 0x57, 0x22, 0xe2, 0x63, 0x77, 0x32, 0x67, 0xb0, 0x9c, 0xc3, 0x69, - 0x5c, 0xf6, 0x19, 0x59, 0xf9, 0x3c, 0xcf, 0xc8, 0x4e, 0xab, 0x85, 0x9a, 0x37, 0x17, 0x47, 0x56, - 0xfc, 0x3f, 0xd0, 0x5c, 0x1c, 0x29, 0x9b, 0x53, 0xc8, 0xfc, 0x49, 0x29, 0xbe, 0xa2, 0xff, 0x4f, - 0x1d, 0xac, 0x19, 0x0d, 0xa5, 0x53, 0xc5, 0x1a, 0x4a, 0xe5, 0x7f, 0x29, 0xc3, 0x42, 0xfa, 0xa5, - 0x4d, 0x34, 0x32, 0x4a, 0xcf, 0x6d, 0x64, 0x6c, 0xc3, 0xe2, 0xae, 0xa7, 0xeb, 0x03, 0x6e, 0x90, - 0xd8, 0x05, 0xbd, 0x7f, 0xf6, 0xf8, 0x92, 0xe0, 0x5c, 0xbc, 0x9b, 0x41, 0x83, 0x33, 0x39, 0x73, - 0x9a, 0x32, 0xcb, 0x13, 0x35, 0x65, 0xbe, 0x0d, 0x73, 0x0e, 0xff, 0xf6, 0x24, 0xd9, 0x37, 0x10, - 0x1e, 0x8e, 0xe3, 0xf8, 0x24, 0x4e, 0xd2, 0x66, 0x37, 0x58, 0x56, 0x26, 0x68, 0xb0, 0x3c, 0x8d, - 0x8e, 0xc8, 0x8c, 0xd8, 0xf7, 0xdc, 0x8e, 0xc8, 0x97, 0x60, 0x59, 0xb0, 0xb1, 0xff, 0xeb, 0x96, - 0x49, 0x1d, 0x4b, 0xd7, 0x89, 0xb3, 0xe1, 0x19, 0xc6, 0x40, 0x7e, 0x17, 0xe6, 0x93, 0x6d, 0xb9, - 0xfe, 0x93, 0xf7, 0x3b, 0x85, 0x45, 0x83, 0x41, 0xec, 0xc9, 0xfb, 0xe3, 0x38, 0xa4, 0x90, 0x3f, - 0x91, 0xe0, 0x5a, 0x4e, 0x67, 0x23, 0xfa, 0x00, 0xe6, 0x0d, 0xe5, 0x30, 0xd6, 0x32, 0x2a, 0x42, - 0x4b, 0xd1, 0x5d, 0x36, 0x3f, 0x3b, 0xdc, 0x4a, 0x20, 0xe1, 0x14, 0x32, 0x4f, 0xb8, 0xca, 0x61, - 0xc7, 0x73, 0xfa, 0x64, 0xc2, 0xbd, 0x3c, 0x7f, 0x7d, 0xb7, 0x04, 0x06, 0x0e, 0xd1, 0xe4, 0x1f, - 0x4a, 0x50, 0xcf, 0xab, 0xbe, 0xd0, 0xed, 0x44, 0x87, 0xe4, 0x67, 0x52, 0x1d, 0x92, 0x97, 0x47, - 0xf8, 0xce, 0xa9, 0x3f, 0xf2, 0x47, 0x12, 0x2c, 0x65, 0x57, 0xa9, 0xe8, 0xe7, 0x13, 0x1a, 0x37, - 0x52, 0x1a, 0x5f, 0x4a, 0x71, 0x09, 0x7d, 0xf7, 0x60, 0x5e, 0xd4, 0xb2, 0x02, 0xe6, 0x18, 0x9f, - 0xa8, 0x1e, 0x84, 0x85, 0x72, 0x50, 0x95, 0xf1, 0xe7, 0x98, 0x1c, 0xc3, 0x29, 0x5c, 0xf9, 0x77, - 0x4b, 0x50, 0xe1, 0x6d, 0x43, 0x67, 0x58, 0x42, 0x7d, 0x2d, 0x51, 0x42, 0x15, 0xbd, 0x58, 0xe4, - 0xda, 0xe5, 0x56, 0x4f, 0xdd, 0x54, 0xf5, 0xf4, 0xd6, 0x44, 0xe8, 0xe3, 0x0b, 0xa7, 0x2f, 0xc0, - 0x6c, 0xa8, 0x44, 0xb1, 0x40, 0xcd, 0xca, 0xd4, 0x5a, 0x4c, 0x44, 0xc1, 0x30, 0x7f, 0x90, 0xc8, - 0x94, 0x93, 0x7c, 0x4b, 0x1d, 0x93, 0xdd, 0x0c, 0x52, 0xa5, 0xff, 0xb9, 0x4d, 0xd4, 0xf8, 0x37, - 0x9a, 0x41, 0xdf, 0x85, 0x79, 0xff, 0x83, 0xf4, 0xf0, 0x0c, 0xad, 0xcc, 0xbd, 0x37, 0xfc, 0x8c, - 0x6b, 0x3b, 0x31, 0x8b, 0x53, 0xd4, 0xcb, 0x6f, 0xc3, 0x5c, 0x42, 0x58, 0xa1, 0xaf, 0x63, 0xfe, - 0x52, 0x82, 0xc5, 0xac, 0x56, 0x45, 0x74, 0x1d, 0xa6, 0xf6, 0x35, 0xd1, 0x5b, 0x11, 0xeb, 0x47, - 0xf9, 0x45, 0xcd, 0x54, 0x31, 0x9f, 0x09, 0x3f, 0x6e, 0x2a, 0xe5, 0x7e, 0xdc, 0x74, 0x0b, 0x40, - 0xb1, 0x35, 0xf1, 0x91, 0xbf, 0x58, 0x55, 0xe8, 0xbc, 0xd1, 0xe7, 0xff, 0x38, 0x46, 0xc5, 0x3b, - 0x90, 0x22, 0x7d, 0x44, 0x59, 0x18, 0xb5, 0x06, 0xc5, 0x54, 0x8d, 0xd3, 0xc9, 0x7f, 0x25, 0xc1, - 0x67, 0x9e, 0xbb, 0x83, 0x43, 0xad, 0x44, 0x78, 0x68, 0xa6, 0xc2, 0xc3, 0x4a, 0x3e, 0xc0, 0x39, - 0x76, 0x7f, 0x7f, 0xaf, 0x04, 0x68, 0x7b, 0x4f, 0x73, 0xd4, 0xb6, 0xe2, 0xd0, 0x01, 0x16, 0x0b, - 0x3c, 0xc3, 0x80, 0x71, 0x1b, 0x6a, 0x2a, 0x71, 0x7b, 0x8e, 0xc6, 0x8d, 0x24, 0x1e, 0x67, 0x68, - 0xf1, 0x8d, 0x68, 0x0a, 0xc7, 0xe9, 0x50, 0x1f, 0xaa, 0xa2, 0x56, 0x0c, 0x5a, 0x58, 0x8a, 0x16, - 0xbe, 0x91, 0x07, 0x44, 0xef, 0x87, 0x18, 0x70, 0x71, 0x08, 0x2e, 0x7f, 0x5f, 0x82, 0xa5, 0x51, - 0x83, 0x6c, 0xf8, 0x0d, 0x1a, 0x67, 0x65, 0x94, 0x97, 0x60, 0x8a, 0xa3, 0x32, 0x6b, 0x5c, 0xf4, - 0x4f, 0xc2, 0x99, 0x44, 0xcc, 0x47, 0xe5, 0x9f, 0x4a, 0xb0, 0x9c, 0xad, 0xd2, 0x79, 0xec, 0x37, - 0x3e, 0x48, 0xee, 0x37, 0x8a, 0x9e, 0x2a, 0x64, 0x2b, 0x9e, 0xb3, 0xf7, 0xf8, 0x24, 0xd3, 0xf8, - 0xe7, 0xb1, 0xca, 0xdd, 0xe4, 0x2a, 0xd7, 0x4e, 0xbc, 0xca, 0xec, 0x15, 0xb6, 0x3e, 0xf7, 0xf1, - 0xa7, 0x2b, 0x17, 0x7e, 0xfc, 0xe9, 0xca, 0x85, 0x7f, 0xfe, 0x74, 0xe5, 0xc2, 0x77, 0x8f, 0x56, - 0xa4, 0x8f, 0x8f, 0x56, 0xa4, 0x1f, 0x1f, 0xad, 0x48, 0xff, 0x76, 0xb4, 0x22, 0x7d, 0xff, 0x27, - 0x2b, 0x17, 0xbe, 0x36, 0x23, 0x30, 0xff, 0x37, 0x00, 0x00, 0xff, 0xff, 0x18, 0x07, 0xfd, 0xe8, - 0x4d, 0x46, 0x00, 0x00, + // 3369 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe4, 0x5b, 0xdb, 0x6f, 0x1b, 0xc7, + 0xd5, 0xf7, 0x8a, 0xa4, 0x45, 0x1d, 0x59, 0x92, 0x35, 0x72, 0x64, 0x46, 0x49, 0x44, 0x67, 0x3f, + 0x7c, 0x89, 0xf3, 0x7d, 0x09, 0xf5, 0xc5, 0xf9, 0x9c, 0x26, 0x4e, 0xe2, 0x44, 0x94, 0x7c, 0x51, + 0x21, 0xc9, 0xcc, 0x90, 0x32, 0x1a, 0xe7, 0xd6, 0x15, 0x39, 0xa2, 0xd6, 0xde, 0x5b, 0x76, 0x67, + 0x15, 0x11, 0x41, 0xdb, 0x00, 0x45, 0xf3, 0x58, 0xb4, 0x2f, 0x45, 0x0a, 0xa4, 0x8f, 0x7d, 0xe8, + 0x4b, 0x9b, 0x3c, 0xb4, 0x69, 0xff, 0x82, 0xfa, 0xa1, 0x28, 0x52, 0xa0, 0x05, 0x8a, 0x22, 0x15, + 0x6a, 0x05, 0xcd, 0x3f, 0xd0, 0xe6, 0xc5, 0x4f, 0xc5, 0xcc, 0xce, 0xde, 0x77, 0x65, 0x93, 0xb2, + 0x89, 0x02, 0x7d, 0xe3, 0xce, 0x9c, 0xf3, 0x3b, 0x97, 0x39, 0x73, 0xe6, 0xcc, 0x85, 0xf0, 0xd2, + 0x8d, 0xe7, 0x9c, 0x9a, 0x6a, 0x2e, 0xdc, 0x70, 0x37, 0x89, 0x6d, 0x10, 0x4a, 0x9c, 0x05, 0xeb, + 0x46, 0x77, 0x41, 0xb1, 0x54, 0x67, 0x81, 0xec, 0x52, 0x62, 0x38, 0xaa, 0x69, 0x38, 0x0b, 0x3b, + 0x4f, 0x6f, 0x12, 0xaa, 0x3c, 0xbd, 0xd0, 0x25, 0x06, 0xb1, 0x15, 0x4a, 0x3a, 0x35, 0xcb, 0x36, + 0xa9, 0x89, 0x9e, 0xf2, 0xd8, 0x6b, 0x21, 0x7b, 0xcd, 0xba, 0xd1, 0xad, 0x31, 0xf6, 0x5a, 0xc8, + 0x5e, 0x13, 0xec, 0x73, 0x4f, 0x75, 0x55, 0xba, 0xed, 0x6e, 0xd6, 0xda, 0xa6, 0xbe, 0xd0, 0x35, + 0xbb, 0xe6, 0x02, 0x47, 0xd9, 0x74, 0xb7, 0xf8, 0x17, 0xff, 0xe0, 0xbf, 0x3c, 0xf4, 0xb9, 0xff, + 0x17, 0xca, 0x29, 0x96, 0xaa, 0x2b, 0xed, 0x6d, 0xd5, 0x20, 0x76, 0xcf, 0x57, 0x6f, 0xc1, 0x26, + 0x8e, 0xe9, 0xda, 0x6d, 0x92, 0xd4, 0xe9, 0x40, 0x2e, 0x67, 0x41, 0x27, 0x54, 0x59, 0xd8, 0x49, + 0x59, 0x32, 0xb7, 0x90, 0xc7, 0x65, 0xbb, 0x06, 0x55, 0xf5, 0xb4, 0x98, 0x67, 0xef, 0xc4, 0xe0, + 0xb4, 0xb7, 0x89, 0xae, 0xa4, 0xf8, 0x9e, 0xc9, 0xe3, 0x73, 0xa9, 0xaa, 0x2d, 0xa8, 0x06, 0x75, + 0xa8, 0x7d, 0x90, 0x4d, 0x0e, 0xb1, 0x77, 0x88, 0x1d, 0x1d, 0x25, 0x45, 0xb7, 0x34, 0x92, 0x65, + 0xd3, 0x93, 0xb9, 0x83, 0x9b, 0x41, 0x2d, 0xd7, 0x00, 0x16, 0x1b, 0x2b, 0x57, 0x89, 0xcd, 0xc6, + 0x0c, 0x9d, 0x82, 0xa2, 0xa1, 0xe8, 0xa4, 0x22, 0x9d, 0x92, 0x4e, 0x8f, 0xd5, 0x8f, 0xdd, 0xdc, + 0xab, 0x1e, 0xd9, 0xdf, 0xab, 0x16, 0xd7, 0x15, 0x9d, 0x60, 0xde, 0x23, 0xff, 0x58, 0x82, 0x07, + 0x97, 0x5c, 0x87, 0x9a, 0xfa, 0x1a, 0xa1, 0xb6, 0xda, 0x5e, 0x72, 0x6d, 0x9b, 0x18, 0xb4, 0x49, + 0x15, 0xea, 0x3a, 0x77, 0xe6, 0x47, 0xd7, 0xa0, 0xb4, 0xa3, 0x68, 0x2e, 0xa9, 0x8c, 0x9c, 0x92, + 0x4e, 0x8f, 0x9f, 0xa9, 0xd5, 0x44, 0x2c, 0x45, 0x1d, 0xe3, 0x47, 0x53, 0xcd, 0x1f, 0xed, 0xda, + 0xab, 0xae, 0x62, 0x50, 0x95, 0xf6, 0xea, 0x27, 0x04, 0xe4, 0x31, 0x21, 0xf7, 0x2a, 0xc3, 0xc2, + 0x1e, 0xa4, 0xfc, 0x7d, 0x09, 0x1e, 0xc9, 0xd5, 0x6d, 0x55, 0x75, 0x28, 0xd2, 0xa1, 0xa4, 0x52, + 0xa2, 0x3b, 0x15, 0xe9, 0x54, 0xe1, 0xf4, 0xf8, 0x99, 0xcb, 0xb5, 0xbe, 0x22, 0xb9, 0x96, 0x0b, + 0x5e, 0x9f, 0x10, 0x7a, 0x95, 0x56, 0x18, 0x3c, 0xf6, 0xa4, 0xc8, 0x3f, 0x94, 0x00, 0x45, 0x79, + 0x5a, 0x8a, 0xdd, 0x25, 0xf4, 0x2e, 0xbc, 0xf4, 0xda, 0xe1, 0xbc, 0x34, 0x23, 0x20, 0xc7, 0x3d, + 0x81, 0x31, 0x27, 0xbd, 0x2f, 0xc1, 0x6c, 0x5a, 0x27, 0xee, 0x9d, 0xad, 0xb8, 0x77, 0x16, 0x0f, + 0xe1, 0x1d, 0x0f, 0x35, 0xc7, 0x2d, 0xbf, 0x1c, 0x81, 0xb1, 0x65, 0x85, 0xe8, 0xa6, 0xd1, 0x24, + 0x14, 0x7d, 0x13, 0xca, 0x6c, 0x7a, 0x76, 0x14, 0xaa, 0x70, 0x8f, 0x8c, 0x9f, 0xf9, 0xbf, 0x83, + 0xcc, 0x75, 0x6a, 0x8c, 0xba, 0xb6, 0xf3, 0x74, 0xed, 0xca, 0xe6, 0x75, 0xd2, 0xa6, 0x6b, 0x84, + 0x2a, 0x75, 0x24, 0xe4, 0x40, 0xd8, 0x86, 0x03, 0x54, 0xf4, 0x16, 0x14, 0x1d, 0x8b, 0xb4, 0x85, + 0x33, 0x5f, 0xec, 0xd3, 0xac, 0x40, 0xd3, 0xa6, 0x45, 0xda, 0xe1, 0x68, 0xb1, 0x2f, 0xcc, 0x71, + 0xd1, 0x16, 0x1c, 0x75, 0x78, 0x18, 0x54, 0x0a, 0x5c, 0xc2, 0xf9, 0x81, 0x25, 0x78, 0xc1, 0x34, + 0x29, 0x64, 0x1c, 0xf5, 0xbe, 0xb1, 0x40, 0x97, 0x7f, 0x27, 0xc1, 0x44, 0x40, 0xcb, 0x47, 0xec, + 0x8d, 0x94, 0xef, 0x6a, 0x77, 0xe7, 0x3b, 0xc6, 0xcd, 0x3d, 0x77, 0x5c, 0xc8, 0x2a, 0xfb, 0x2d, + 0x11, 0xbf, 0xbd, 0xe9, 0xc7, 0xc3, 0x08, 0x8f, 0x87, 0xe7, 0x06, 0x35, 0x2b, 0x27, 0x0c, 0xbe, + 0x28, 0x44, 0xcc, 0x61, 0xee, 0x44, 0x6f, 0x42, 0xd9, 0x21, 0x1a, 0x69, 0x53, 0xd3, 0x16, 0xe6, + 0x3c, 0x73, 0x97, 0xe6, 0x28, 0x9b, 0x44, 0x6b, 0x0a, 0xd6, 0xfa, 0x31, 0x66, 0x8f, 0xff, 0x85, + 0x03, 0x48, 0xf4, 0x3a, 0x94, 0x29, 0xd1, 0x2d, 0x4d, 0xa1, 0xfe, 0xc4, 0x7a, 0x2a, 0xdf, 0x24, + 0x06, 0xdb, 0x30, 0x3b, 0x2d, 0xc1, 0xc0, 0x07, 0x3f, 0x70, 0x96, 0xdf, 0x8a, 0x03, 0x40, 0xf4, + 0x81, 0x04, 0x93, 0xae, 0xd5, 0x61, 0xa4, 0x94, 0x25, 0xd8, 0x6e, 0x4f, 0x44, 0xc3, 0xc5, 0x41, + 0xdd, 0xb6, 0x11, 0x43, 0xab, 0xcf, 0x0a, 0xe1, 0x93, 0xf1, 0x76, 0x9c, 0x90, 0x8a, 0x16, 0x61, + 0x4a, 0x57, 0x0d, 0x4c, 0x94, 0x4e, 0xaf, 0x49, 0xda, 0xa6, 0xd1, 0x71, 0x2a, 0xc5, 0x53, 0xd2, + 0xe9, 0x52, 0xfd, 0xa4, 0x00, 0x98, 0x5a, 0x8b, 0x77, 0xe3, 0x24, 0x3d, 0xfa, 0x3a, 0x20, 0xdf, + 0xae, 0x4b, 0xde, 0x7a, 0xa1, 0x9a, 0x46, 0xa5, 0x74, 0x4a, 0x3a, 0x5d, 0xa8, 0xcf, 0x09, 0x14, + 0xd4, 0x4a, 0x51, 0xe0, 0x0c, 0x2e, 0xf9, 0x9f, 0x45, 0x98, 0x4a, 0x04, 0x38, 0xba, 0x0a, 0xb3, + 0x6d, 0x2f, 0x7d, 0xae, 0xbb, 0xfa, 0x26, 0xb1, 0x9b, 0xed, 0x6d, 0xd2, 0x71, 0x35, 0xd2, 0xe1, + 0xa3, 0x5e, 0xaa, 0xcf, 0x0b, 0x19, 0xb3, 0x4b, 0x99, 0x54, 0x38, 0x87, 0x9b, 0xe9, 0x6d, 0xf0, + 0xa6, 0x35, 0xd5, 0x71, 0x02, 0xcc, 0x11, 0x8e, 0x19, 0xe8, 0xbd, 0x9e, 0xa2, 0xc0, 0x19, 0x5c, + 0x4c, 0xc7, 0x0e, 0x71, 0x54, 0x9b, 0x74, 0x92, 0x3a, 0x16, 0xe2, 0x3a, 0x2e, 0x67, 0x52, 0xe1, + 0x1c, 0x6e, 0x74, 0x16, 0xc6, 0x3d, 0x69, 0xdc, 0xe3, 0x62, 0x68, 0x82, 0x84, 0xbd, 0x1e, 0x76, + 0xe1, 0x28, 0x1d, 0x33, 0xcd, 0xdc, 0xe4, 0x55, 0x40, 0x27, 0x7f, 0x48, 0xae, 0xa4, 0x28, 0x70, + 0x06, 0x17, 0x33, 0xcd, 0x8b, 0x99, 0x94, 0x69, 0x47, 0xe3, 0xa6, 0x6d, 0x64, 0x52, 0xe1, 0x1c, + 0x6e, 0x16, 0x79, 0x9e, 0xca, 0x8b, 0x3b, 0x8a, 0xaa, 0x29, 0x9b, 0x1a, 0xa9, 0x8c, 0xc6, 0x23, + 0x6f, 0x3d, 0xde, 0x8d, 0x93, 0xf4, 0xe8, 0x12, 0x4c, 0x7b, 0x4d, 0x1b, 0x86, 0x12, 0x80, 0x94, + 0x39, 0xc8, 0x83, 0x02, 0x64, 0x7a, 0x3d, 0x49, 0x80, 0xd3, 0x3c, 0xf2, 0x5f, 0x24, 0x38, 0x99, + 0x33, 0x93, 0xd0, 0xcb, 0x50, 0xa4, 0x3d, 0xcb, 0x5f, 0x7f, 0xff, 0xd7, 0xcf, 0xe8, 0xad, 0x9e, + 0x45, 0x6e, 0xef, 0x55, 0x1f, 0xca, 0x61, 0x63, 0xdd, 0x98, 0x33, 0xa2, 0x6f, 0xc3, 0x84, 0x6d, + 0x6a, 0x9a, 0x6a, 0x74, 0x3d, 0x12, 0x91, 0x4d, 0x2e, 0xf4, 0x39, 0xd3, 0x71, 0x14, 0x23, 0xcc, + 0x96, 0xd3, 0xfb, 0x7b, 0xd5, 0x89, 0x58, 0x1f, 0x8e, 0x8b, 0x93, 0x7f, 0x3d, 0x02, 0xb0, 0x4c, + 0x2c, 0xcd, 0xec, 0xe9, 0xc4, 0x18, 0xc6, 0x0a, 0xfa, 0x76, 0x6c, 0x05, 0x7d, 0xa9, 0xdf, 0x8c, + 0x16, 0xa8, 0x9a, 0xbb, 0x84, 0x76, 0x13, 0x4b, 0xe8, 0xcb, 0x83, 0x8b, 0x38, 0x78, 0x0d, 0xbd, + 0x55, 0x80, 0x99, 0x90, 0x78, 0xc9, 0x34, 0x3a, 0x2a, 0x9f, 0x13, 0x2f, 0xc4, 0x62, 0xe2, 0xf1, + 0x44, 0x4c, 0x9c, 0xcc, 0x60, 0x89, 0xc4, 0xc3, 0xd5, 0x40, 0xfb, 0x11, 0xce, 0x7e, 0x3e, 0x2e, + 0xfc, 0xf6, 0x5e, 0xf5, 0xc0, 0xa2, 0xbc, 0x16, 0x60, 0xc6, 0x95, 0x45, 0x8f, 0xc1, 0x51, 0x9b, + 0x28, 0x8e, 0x69, 0xf0, 0x34, 0x31, 0x16, 0x1a, 0x85, 0x79, 0x2b, 0x16, 0xbd, 0xe8, 0x09, 0x18, + 0xd5, 0x89, 0xe3, 0x28, 0x5d, 0xc2, 0x33, 0xc2, 0x58, 0x7d, 0x4a, 0x10, 0x8e, 0xae, 0x79, 0xcd, + 0xd8, 0xef, 0x47, 0xd7, 0x61, 0x52, 0x53, 0x1c, 0x11, 0xda, 0x2d, 0x55, 0x27, 0x7c, 0xce, 0x8f, + 0x9f, 0xf9, 0x9f, 0xbb, 0x8b, 0x18, 0xc6, 0x11, 0xae, 0x44, 0xab, 0x31, 0x24, 0x9c, 0x40, 0x46, + 0x3b, 0x80, 0x58, 0x4b, 0xcb, 0x56, 0x0c, 0xc7, 0x73, 0x19, 0x93, 0x37, 0xda, 0xb7, 0xbc, 0x20, + 0xbf, 0xad, 0xa6, 0xd0, 0x70, 0x86, 0x04, 0xf9, 0xf7, 0x12, 0x4c, 0x86, 0x03, 0x36, 0x84, 0x42, + 0xe9, 0xad, 0x78, 0xa1, 0xf4, 0xfc, 0xc0, 0xc1, 0x9b, 0x53, 0x29, 0x7d, 0x58, 0x00, 0x14, 0x12, + 0xb1, 0xd4, 0xb0, 0xa9, 0xb4, 0x6f, 0xdc, 0xc5, 0x3e, 0xe2, 0xa7, 0x12, 0x20, 0x91, 0xac, 0x17, + 0x0d, 0xc3, 0xa4, 0x3c, 0xff, 0xfb, 0x6a, 0xbe, 0x36, 0xb0, 0x9a, 0xbe, 0x06, 0xb5, 0x8d, 0x14, + 0xf6, 0x05, 0x83, 0xda, 0xbd, 0x70, 0xc4, 0xd2, 0x04, 0x38, 0x43, 0x21, 0xf4, 0x0e, 0x80, 0x2d, + 0x30, 0x5b, 0xa6, 0x48, 0x01, 0x2f, 0x0d, 0x90, 0x4d, 0x19, 0xc0, 0x92, 0x69, 0x6c, 0xa9, 0xdd, + 0x30, 0xa1, 0xe1, 0x00, 0x18, 0x47, 0x84, 0xcc, 0x5d, 0x80, 0x93, 0x39, 0xda, 0xa3, 0xe3, 0x50, + 0xb8, 0x41, 0x7a, 0x9e, 0x5b, 0x31, 0xfb, 0x89, 0x4e, 0x44, 0xf7, 0x63, 0x63, 0x62, 0x2b, 0x75, + 0x6e, 0xe4, 0x39, 0x49, 0xfe, 0xb2, 0x14, 0x8d, 0x35, 0x5e, 0xc5, 0x9e, 0x86, 0xb2, 0x4d, 0x2c, + 0x4d, 0x6d, 0x2b, 0x8e, 0xa8, 0x67, 0x78, 0x41, 0x8a, 0x45, 0x1b, 0x0e, 0x7a, 0x63, 0xf5, 0xee, + 0xc8, 0xfd, 0xad, 0x77, 0x0b, 0xf7, 0xba, 0xde, 0x35, 0xa1, 0xec, 0xf8, 0x85, 0x6e, 0x91, 0x83, + 0x2f, 0x1e, 0x22, 0x67, 0x8b, 0x1a, 0x37, 0x10, 0x18, 0x54, 0xb7, 0x81, 0x90, 0xac, 0xba, 0xb6, + 0xd4, 0x67, 0x5d, 0xbb, 0x0a, 0x27, 0x6c, 0xb2, 0xa3, 0x32, 0x35, 0x2e, 0xab, 0x0e, 0x35, 0xed, + 0xde, 0xaa, 0xaa, 0xab, 0x54, 0x94, 0x3d, 0x95, 0xfd, 0xbd, 0xea, 0x09, 0x9c, 0xd1, 0x8f, 0x33, + 0xb9, 0x58, 0x76, 0xb6, 0x14, 0xd7, 0x21, 0x1d, 0x9e, 0xd2, 0xca, 0x61, 0x76, 0x6e, 0xf0, 0x56, + 0x2c, 0x7a, 0x91, 0x1e, 0x0b, 0xee, 0xf2, 0xbd, 0x08, 0xee, 0xc9, 0xfc, 0xc0, 0x46, 0x1b, 0x70, + 0xd2, 0xb2, 0xcd, 0xae, 0x4d, 0x1c, 0x67, 0x99, 0x28, 0x1d, 0x4d, 0x35, 0x88, 0xef, 0xaf, 0x31, + 0x6e, 0xe7, 0x43, 0xfb, 0x7b, 0xd5, 0x93, 0x8d, 0x6c, 0x12, 0x9c, 0xc7, 0x2b, 0x7f, 0x54, 0x84, + 0xe3, 0xc9, 0x55, 0x36, 0xa7, 0x2a, 0x95, 0x06, 0xaa, 0x4a, 0x9f, 0x8c, 0x4c, 0x1b, 0xaf, 0x64, + 0x0f, 0xa2, 0x21, 0x63, 0xea, 0x2c, 0xc2, 0x94, 0xc8, 0x23, 0x7e, 0xa7, 0xa8, 0xcb, 0x83, 0x68, + 0xd8, 0x88, 0x77, 0xe3, 0x24, 0x3d, 0xab, 0x35, 0xc3, 0x12, 0xd2, 0x07, 0x29, 0xc6, 0x6b, 0xcd, + 0xc5, 0x24, 0x01, 0x4e, 0xf3, 0xa0, 0x35, 0x98, 0x71, 0x8d, 0x34, 0x94, 0x17, 0x9d, 0x0f, 0x09, + 0xa8, 0x99, 0x8d, 0x34, 0x09, 0xce, 0xe2, 0x43, 0x3b, 0x00, 0x6d, 0xbf, 0x20, 0x70, 0x2a, 0x47, + 0x79, 0xae, 0xae, 0x0f, 0x3c, 0xb7, 0x82, 0xda, 0x22, 0xcc, 0x88, 0x41, 0x93, 0x83, 0x23, 0x92, + 0xd0, 0x0b, 0x30, 0x61, 0xf3, 0x8d, 0x87, 0x6f, 0x80, 0x57, 0xbc, 0x3f, 0x20, 0xd8, 0x26, 0x70, + 0xb4, 0x13, 0xc7, 0x69, 0xe5, 0x3f, 0x48, 0xd1, 0x25, 0x2a, 0x28, 0xb5, 0xcf, 0xc5, 0xca, 0xaa, + 0xc7, 0x12, 0x65, 0xd5, 0x6c, 0x9a, 0x23, 0x52, 0x55, 0x7d, 0x27, 0xbb, 0xca, 0xbe, 0x78, 0xa8, + 0x2a, 0x3b, 0x5c, 0x6a, 0xef, 0x5c, 0x66, 0x7f, 0x22, 0xc1, 0xec, 0xc5, 0xe6, 0x25, 0xdb, 0x74, + 0x2d, 0x5f, 0xbd, 0x2b, 0x96, 0xe7, 0xab, 0xaf, 0x41, 0xd1, 0x76, 0x35, 0xdf, 0xae, 0xff, 0xf2, + 0xed, 0xc2, 0xae, 0xc6, 0xec, 0x9a, 0x49, 0x70, 0x79, 0x46, 0x31, 0x06, 0xf4, 0x16, 0x1c, 0xb5, + 0x15, 0xa3, 0x4b, 0xfc, 0x45, 0xf8, 0xd9, 0x3e, 0xad, 0x59, 0x59, 0xc6, 0x8c, 0x3d, 0x52, 0x0a, + 0x72, 0x34, 0x2c, 0x50, 0xe5, 0x9f, 0x48, 0x30, 0x75, 0xb9, 0xd5, 0x6a, 0xac, 0x18, 0x7c, 0x16, + 0x37, 0x14, 0xba, 0xcd, 0xea, 0x04, 0x4b, 0xa1, 0xdb, 0xc9, 0x3a, 0x81, 0xf5, 0x61, 0xde, 0x83, + 0xb6, 0x61, 0x94, 0x65, 0x0f, 0x62, 0x74, 0x06, 0x2c, 0xf1, 0x85, 0xb8, 0xba, 0x07, 0x12, 0xd6, + 0x9f, 0xa2, 0x01, 0xfb, 0xf0, 0xf2, 0x7b, 0x70, 0x22, 0xa2, 0x1e, 0xf3, 0x17, 0x3f, 0x9d, 0x44, + 0x6d, 0x28, 0x31, 0x4d, 0xfc, 0xb3, 0xc7, 0x7e, 0x8f, 0xd0, 0x12, 0x26, 0x87, 0x75, 0x14, 0xfb, + 0x72, 0xb0, 0x87, 0x2d, 0xaf, 0xc1, 0xc4, 0x65, 0xd3, 0xa1, 0x0d, 0xd3, 0xa6, 0xdc, 0x6d, 0xe8, + 0x11, 0x28, 0xe8, 0xaa, 0x21, 0x56, 0xe9, 0x71, 0xc1, 0x53, 0x60, 0xeb, 0x08, 0x6b, 0xe7, 0xdd, + 0xca, 0xae, 0xc8, 0x46, 0x61, 0xb7, 0xb2, 0x8b, 0x59, 0xbb, 0x7c, 0x09, 0x46, 0xc5, 0x70, 0x44, + 0x81, 0x0a, 0x07, 0x03, 0x15, 0x32, 0x80, 0x7e, 0x31, 0x02, 0xa3, 0x42, 0xfb, 0x21, 0x6c, 0xe6, + 0xde, 0x88, 0x6d, 0xe6, 0xce, 0x0d, 0x36, 0xd2, 0xb9, 0x3b, 0xb9, 0x4e, 0x62, 0x27, 0xf7, 0xe2, + 0x80, 0xf8, 0x07, 0x6f, 0xe3, 0x3e, 0x96, 0x60, 0x32, 0x1e, 0x73, 0xe8, 0x2c, 0x8c, 0xb3, 0x35, + 0x45, 0x6d, 0x93, 0xf5, 0xb0, 0x28, 0x0e, 0x0e, 0x56, 0x9a, 0x61, 0x17, 0x8e, 0xd2, 0xa1, 0x6e, + 0xc0, 0xc6, 0xc2, 0x42, 0x38, 0x25, 0xdf, 0xe5, 0x2e, 0x55, 0xb5, 0x9a, 0x77, 0x5f, 0x53, 0x5b, + 0x31, 0xe8, 0x15, 0xbb, 0x49, 0x6d, 0xd5, 0xe8, 0xa6, 0x04, 0xf1, 0x18, 0x8b, 0x22, 0xcb, 0x37, + 0x25, 0x18, 0x17, 0x2a, 0x0f, 0x61, 0x4b, 0xf2, 0x7a, 0x7c, 0x4b, 0xf2, 0xec, 0x80, 0xf3, 0x39, + 0x7b, 0x3f, 0xf2, 0x69, 0x68, 0x0a, 0x9b, 0xc1, 0x2c, 0xc1, 0x6c, 0x9b, 0x0e, 0x4d, 0x26, 0x18, + 0x36, 0xd7, 0x30, 0xef, 0x41, 0xdf, 0x93, 0xe0, 0xb8, 0x9a, 0x98, 0xf3, 0xc2, 0xd7, 0x2f, 0x0f, + 0xa6, 0x5a, 0x00, 0x53, 0xaf, 0x08, 0x79, 0xc7, 0x93, 0x3d, 0x38, 0x25, 0x52, 0x76, 0x21, 0x45, + 0x85, 0x14, 0x28, 0x6e, 0x53, 0x6a, 0x89, 0x41, 0x58, 0x1a, 0x3c, 0xf3, 0x84, 0x2a, 0x95, 0xb9, + 0xf9, 0xad, 0x56, 0x03, 0x73, 0x68, 0xf9, 0xe7, 0x23, 0x81, 0xc3, 0x9a, 0xde, 0x24, 0x09, 0xf2, + 0xad, 0x74, 0x2f, 0xf2, 0xed, 0x78, 0x56, 0xae, 0x45, 0xdf, 0x80, 0x02, 0xd5, 0x06, 0xdd, 0x94, + 0x0a, 0x09, 0xad, 0xd5, 0x66, 0x98, 0xb0, 0x5a, 0xab, 0x4d, 0xcc, 0x20, 0xd1, 0xdb, 0x50, 0x62, + 0xab, 0x19, 0x9b, 0xe3, 0x85, 0xc1, 0x73, 0x08, 0xf3, 0x57, 0x18, 0x61, 0xec, 0xcb, 0xc1, 0x1e, + 0xae, 0xfc, 0x1e, 0x4c, 0xc4, 0x12, 0x01, 0xba, 0x0e, 0xc7, 0x34, 0x53, 0xe9, 0xd4, 0x15, 0x4d, + 0x31, 0xda, 0xc4, 0x4e, 0xa6, 0xc6, 0xec, 0xfd, 0xcc, 0x6a, 0x84, 0x43, 0x24, 0x94, 0xe0, 0x02, + 0x31, 0xda, 0x87, 0x63, 0xd8, 0xb2, 0x02, 0x10, 0x5a, 0x8f, 0xaa, 0x50, 0x62, 0x21, 0xec, 0xad, + 0x4c, 0x63, 0xf5, 0x31, 0xa6, 0x2b, 0x8b, 0x6c, 0x07, 0x7b, 0xed, 0xe8, 0x0c, 0x80, 0x43, 0xda, + 0x36, 0xa1, 0x3c, 0xef, 0x78, 0x27, 0x40, 0x41, 0x06, 0x6e, 0x06, 0x3d, 0x38, 0x42, 0x25, 0xff, + 0x49, 0x82, 0x89, 0x75, 0x42, 0xdf, 0x35, 0xed, 0x1b, 0x0d, 0x53, 0x53, 0xdb, 0xbd, 0x21, 0xe4, + 0xfd, 0xcd, 0x58, 0xde, 0x7f, 0xa5, 0xcf, 0x31, 0x8b, 0x69, 0x9b, 0x97, 0xfd, 0xe5, 0xbf, 0x4b, + 0x50, 0x89, 0x51, 0x46, 0xd3, 0x04, 0x81, 0x92, 0x65, 0xda, 0xd4, 0x5f, 0xe3, 0x0f, 0xa5, 0x01, + 0x4b, 0xa9, 0x91, 0x55, 0x9e, 0xc1, 0x62, 0x0f, 0x9d, 0xd9, 0xb9, 0x65, 0x9b, 0xba, 0x88, 0xfb, + 0xc3, 0x49, 0x21, 0xc4, 0x0e, 0xed, 0xbc, 0x68, 0x9b, 0x3a, 0xe6, 0xd8, 0xf2, 0x1f, 0x25, 0x98, + 0x8e, 0x51, 0x0e, 0x21, 0xa5, 0x2b, 0xf1, 0x94, 0xfe, 0xe2, 0x61, 0x0c, 0xcb, 0x49, 0xec, 0x5f, + 0x25, 0xcd, 0x62, 0x0e, 0x40, 0x5b, 0x30, 0x6e, 0x99, 0x9d, 0xe6, 0x3d, 0xb8, 0x99, 0x9b, 0x62, + 0x2b, 0x64, 0x23, 0xc4, 0xc2, 0x51, 0x60, 0xb4, 0x0b, 0xd3, 0x86, 0xa2, 0x13, 0xc7, 0x52, 0xda, + 0xa4, 0x79, 0x0f, 0xce, 0x45, 0x1e, 0xe0, 0xb7, 0x05, 0x49, 0x44, 0x9c, 0x16, 0x22, 0xff, 0x2a, + 0x65, 0xb7, 0x69, 0x53, 0xf4, 0x2a, 0x94, 0xf9, 0x23, 0x89, 0xb6, 0xa9, 0x89, 0xa5, 0xed, 0x2c, + 0x1b, 0x9a, 0x86, 0x68, 0xbb, 0xbd, 0x57, 0xfd, 0xef, 0x03, 0x8f, 0x75, 0x7d, 0x42, 0x1c, 0xc0, + 0xa0, 0x75, 0x28, 0x5a, 0x87, 0x29, 0x33, 0xf8, 0xc2, 0xc2, 0x6b, 0x0b, 0x8e, 0x23, 0xff, 0x23, + 0xa9, 0x38, 0x5f, 0x5e, 0xae, 0xdf, 0xb3, 0x01, 0x0b, 0xca, 0x9a, 0xdc, 0x41, 0xb3, 0x61, 0x54, + 0xac, 0xb2, 0x22, 0x2e, 0x2f, 0x1d, 0x26, 0x2e, 0xa3, 0x2b, 0x43, 0xb0, 0x89, 0xf0, 0x1b, 0x7d, + 0x41, 0xf2, 0x5f, 0x25, 0x98, 0xe6, 0x0a, 0xb5, 0x5d, 0x5b, 0xa5, 0xbd, 0xa1, 0x65, 0xd0, 0xad, + 0x58, 0x06, 0x5d, 0xee, 0xd3, 0xd0, 0x94, 0xc6, 0xb9, 0x59, 0xf4, 0x73, 0x09, 0x1e, 0x48, 0x51, + 0x0f, 0x21, 0xc3, 0x90, 0x78, 0x86, 0x79, 0xe5, 0xb0, 0x06, 0xe6, 0x64, 0x99, 0x9b, 0x90, 0x61, + 0x1e, 0x0f, 0xdc, 0x33, 0x00, 0x96, 0xad, 0xee, 0xa8, 0x1a, 0xe9, 0x8a, 0xcb, 0xe0, 0x72, 0x38, + 0x24, 0x8d, 0xa0, 0x07, 0x47, 0xa8, 0xd0, 0xb7, 0x60, 0xb6, 0x43, 0xb6, 0x14, 0x57, 0xa3, 0x8b, + 0x9d, 0xce, 0x92, 0x62, 0x29, 0x9b, 0xaa, 0xa6, 0x52, 0x55, 0xec, 0xb0, 0xc7, 0xea, 0x17, 0xbc, + 0x4b, 0xda, 0x2c, 0x8a, 0xdb, 0x7b, 0xd5, 0xc7, 0x0f, 0xbe, 0x98, 0xf1, 0x89, 0x7b, 0x38, 0x47, + 0x08, 0xfa, 0xae, 0x04, 0x15, 0x9b, 0xbc, 0xe3, 0xaa, 0x36, 0xe9, 0x2c, 0xdb, 0xa6, 0x15, 0xd3, + 0xa0, 0xc0, 0x35, 0xb8, 0xb4, 0xbf, 0x57, 0xad, 0xe0, 0x1c, 0x9a, 0x7e, 0x74, 0xc8, 0x15, 0x84, + 0x28, 0xcc, 0x28, 0x9a, 0x66, 0xbe, 0x4b, 0xe2, 0x1e, 0x28, 0x72, 0xf9, 0xf5, 0xfd, 0xbd, 0xea, + 0xcc, 0x62, 0xba, 0xbb, 0x1f, 0xd1, 0x59, 0xf0, 0x68, 0x01, 0x46, 0x77, 0x4c, 0xcd, 0xd5, 0x89, + 0x53, 0x29, 0x71, 0x49, 0x2c, 0xe3, 0x8e, 0x5e, 0xf5, 0x9a, 0x6e, 0xef, 0x55, 0x8f, 0x5e, 0x6c, + 0xf2, 0xa3, 0x0f, 0x9f, 0x8a, 0xed, 0xd1, 0x58, 0xcd, 0x24, 0xa6, 0x3c, 0x3f, 0x77, 0x2d, 0x87, + 0x39, 0xe6, 0x72, 0xd8, 0x85, 0xa3, 0x74, 0x48, 0x87, 0xb1, 0x6d, 0xb1, 0x6f, 0x77, 0x2a, 0xa3, + 0x03, 0xad, 0x7e, 0xb1, 0x7d, 0x7f, 0x7d, 0x5a, 0x88, 0x1c, 0xf3, 0x9b, 0x1d, 0x1c, 0x4a, 0x40, + 0x4f, 0xc0, 0x28, 0xff, 0x58, 0x59, 0xe6, 0xa7, 0xb5, 0xe5, 0x30, 0x13, 0x5d, 0xf6, 0x9a, 0xb1, + 0xdf, 0xef, 0x93, 0xae, 0x34, 0x96, 0xf8, 0xe1, 0x6a, 0x82, 0x74, 0xa5, 0xb1, 0x84, 0xfd, 0x7e, + 0x64, 0xc1, 0xa8, 0x43, 0x56, 0x55, 0xc3, 0xdd, 0xad, 0xc0, 0x40, 0xd7, 0xc5, 0xcd, 0x0b, 0x9c, + 0x3b, 0x71, 0x14, 0x15, 0x4a, 0x14, 0xfd, 0xd8, 0x17, 0x83, 0x76, 0x61, 0xcc, 0x76, 0x8d, 0x45, + 0x67, 0xc3, 0x21, 0x76, 0x65, 0x9c, 0xcb, 0xec, 0x37, 0x39, 0x63, 0x9f, 0x3f, 0x29, 0x35, 0xf0, + 0x60, 0x40, 0x81, 0x43, 0x61, 0xe8, 0x23, 0x09, 0x90, 0xe3, 0x5a, 0x96, 0x46, 0x74, 0x62, 0x50, + 0x45, 0xe3, 0xa7, 0x61, 0x4e, 0xe5, 0x18, 0xd7, 0xa1, 0xd1, 0xaf, 0xdd, 0x29, 0xa0, 0xa4, 0x32, + 0xc1, 0x51, 0x73, 0x9a, 0x14, 0x67, 0xe8, 0xc1, 0x86, 0x62, 0xcb, 0xe1, 0xbf, 0x2b, 0x13, 0x03, + 0x0d, 0x45, 0xf6, 0xa9, 0x60, 0x38, 0x14, 0xa2, 0x1f, 0xfb, 0x62, 0xd0, 0x55, 0x98, 0xb5, 0x89, + 0xd2, 0xb9, 0x62, 0x68, 0x3d, 0x6c, 0x9a, 0xf4, 0xa2, 0xaa, 0x11, 0xa7, 0xe7, 0x50, 0xa2, 0x57, + 0x26, 0x79, 0xd8, 0x04, 0x4f, 0x2e, 0x70, 0x26, 0x15, 0xce, 0xe1, 0xe6, 0x2f, 0x01, 0xc4, 0x19, + 0xec, 0x70, 0xde, 0xd2, 0x1d, 0xee, 0x25, 0x40, 0xa8, 0xea, 0x7d, 0x7b, 0x09, 0x10, 0x11, 0x71, + 0xf0, 0x11, 0xd2, 0x57, 0x23, 0x30, 0x13, 0x12, 0xdf, 0xf5, 0x4b, 0x80, 0x0c, 0x96, 0x21, 0xbc, + 0x04, 0xc8, 0xbe, 0x4a, 0x2f, 0xdc, 0xef, 0xab, 0xf4, 0xfb, 0xf0, 0x02, 0x81, 0xdf, 0xce, 0x87, + 0x4e, 0xfc, 0xf7, 0xbf, 0x9d, 0x0f, 0x75, 0xcd, 0x29, 0x67, 0x7e, 0x33, 0x12, 0x35, 0xe8, 0x3f, + 0xe8, 0x0a, 0xf8, 0xf0, 0x2f, 0x0d, 0xe5, 0xcf, 0x0b, 0x70, 0x3c, 0x39, 0x63, 0x63, 0x37, 0x81, + 0xd2, 0x1d, 0x6f, 0x02, 0x1b, 0x70, 0x62, 0xcb, 0xd5, 0xb4, 0x1e, 0x77, 0x48, 0xe4, 0x3a, 0xd0, + 0x3b, 0xb5, 0x7f, 0x58, 0x70, 0x9e, 0xb8, 0x98, 0x41, 0x83, 0x33, 0x39, 0x73, 0x6e, 0x35, 0x0b, + 0x03, 0xdd, 0x6a, 0xa6, 0x2e, 0xd5, 0x8a, 0x77, 0x7f, 0xa9, 0x96, 0x7d, 0x43, 0x59, 0x1a, 0xe0, + 0x86, 0xf2, 0x5e, 0x5c, 0x29, 0x66, 0x24, 0xbe, 0x3b, 0x5d, 0x29, 0xca, 0x0f, 0xc3, 0x9c, 0x60, + 0x63, 0xdf, 0x4b, 0xa6, 0x41, 0x6d, 0x53, 0xd3, 0x88, 0xbd, 0xec, 0xea, 0x7a, 0x4f, 0x3e, 0x0f, + 0x93, 0xf1, 0x7b, 0x6d, 0x6f, 0xe4, 0xbd, 0xab, 0x76, 0x71, 0x97, 0x12, 0x19, 0x79, 0xaf, 0x1d, + 0x07, 0x14, 0xf2, 0x07, 0x12, 0xcc, 0x66, 0xbf, 0xa1, 0x43, 0x1a, 0x4c, 0xea, 0xca, 0x6e, 0xf4, + 0x11, 0xa1, 0x34, 0xe0, 0x8e, 0x1b, 0xed, 0xef, 0x55, 0x27, 0xd7, 0x62, 0x58, 0x38, 0x81, 0x2d, + 0x7f, 0x21, 0xc1, 0xc9, 0x9c, 0x6b, 0xc6, 0xe1, 0x6a, 0x82, 0xae, 0x41, 0x59, 0x57, 0x76, 0x9b, + 0xae, 0xdd, 0x25, 0x03, 0x9f, 0x31, 0xf0, 0x5c, 0xb2, 0x26, 0x50, 0x70, 0x80, 0x27, 0x7f, 0x22, + 0x41, 0x25, 0xaf, 0x1e, 0x44, 0x67, 0x63, 0x17, 0xa2, 0x8f, 0x26, 0x2e, 0x44, 0xa7, 0x53, 0x7c, + 0x43, 0xba, 0x0e, 0xfd, 0x54, 0x82, 0xd9, 0xec, 0xba, 0x19, 0x3d, 0x13, 0xd3, 0xb8, 0x9a, 0xd0, + 0x78, 0x2a, 0xc1, 0x25, 0xf4, 0xdd, 0x86, 0x49, 0x51, 0x5d, 0x0b, 0x18, 0xe1, 0xe5, 0x27, 0x0f, + 0xce, 0xaa, 0x02, 0xcc, 0xaf, 0x13, 0xf9, 0x48, 0xc6, 0xdb, 0x70, 0x02, 0x57, 0xfe, 0xd9, 0x08, + 0x94, 0x9a, 0x6d, 0x45, 0x23, 0x43, 0x28, 0xea, 0xae, 0xc5, 0x8a, 0xba, 0x7e, 0xdf, 0xf9, 0x73, + 0x2d, 0x73, 0xeb, 0xb9, 0xcd, 0x44, 0x3d, 0x77, 0x6e, 0x20, 0xf4, 0x83, 0x4b, 0xb9, 0xe7, 0x61, + 0x2c, 0x50, 0xa2, 0xbf, 0xd5, 0x43, 0xfe, 0x78, 0x04, 0xc6, 0x23, 0x22, 0xfa, 0x5c, 0x7b, 0x76, + 0x62, 0xab, 0xf7, 0x20, 0x7f, 0x29, 0x8a, 0xc8, 0xae, 0xf9, 0xeb, 0xb7, 0xf7, 0x86, 0x2e, 0x7c, + 0x0b, 0x95, 0x5e, 0xd6, 0xcf, 0xc3, 0x24, 0xe5, 0xff, 0xb0, 0x09, 0xce, 0xf8, 0x0a, 0x3c, 0x8a, + 0x83, 0x97, 0x99, 0xad, 0x58, 0x2f, 0x4e, 0x50, 0xcf, 0xbd, 0x00, 0x13, 0x31, 0x61, 0x7d, 0x3d, + 0x79, 0xfb, 0xad, 0x04, 0x8f, 0xde, 0x71, 0x4f, 0x86, 0xea, 0xb1, 0xe9, 0x55, 0x4b, 0x4c, 0xaf, + 0xf9, 0x7c, 0x80, 0x21, 0x3e, 0x96, 0xf8, 0xd1, 0x08, 0xa0, 0xd6, 0xb6, 0x6a, 0x77, 0x1a, 0x8a, + 0x4d, 0x7b, 0x58, 0xfc, 0x8f, 0x6a, 0x08, 0x13, 0xee, 0x2c, 0x8c, 0x77, 0x88, 0xd3, 0xb6, 0x55, + 0xee, 0x2c, 0xb1, 0x57, 0x08, 0xce, 0x41, 0x96, 0xc3, 0x2e, 0x1c, 0xa5, 0x43, 0x5d, 0x28, 0xef, + 0x78, 0xff, 0xd4, 0xf3, 0x6f, 0xde, 0xfa, 0x2d, 0x66, 0xc3, 0xff, 0xfa, 0x85, 0xf1, 0x25, 0x1a, + 0x1c, 0x1c, 0x80, 0xcb, 0x1f, 0x4a, 0x30, 0x9b, 0x76, 0xcc, 0x32, 0x53, 0xfd, 0xfe, 0x3b, 0xe7, + 0x61, 0x28, 0x72, 0x74, 0xe6, 0x95, 0x63, 0xde, 0x89, 0x37, 0x93, 0x8c, 0x79, 0xab, 0xfc, 0xa5, + 0x04, 0x73, 0xd9, 0xaa, 0x0d, 0x61, 0x2b, 0x71, 0x3d, 0xbe, 0x95, 0xe8, 0xf7, 0xd8, 0x20, 0x5b, + 0xef, 0x9c, 0x6d, 0xc5, 0x5e, 0xe6, 0x18, 0x0c, 0xc1, 0xc8, 0xad, 0xb8, 0x91, 0x8b, 0x87, 0x36, + 0x32, 0xdb, 0xc0, 0xfa, 0x13, 0x37, 0x6f, 0xcd, 0x1f, 0xf9, 0xec, 0xd6, 0xfc, 0x91, 0x3f, 0xdf, + 0x9a, 0x3f, 0xf2, 0xfe, 0xfe, 0xbc, 0x74, 0x73, 0x7f, 0x5e, 0xfa, 0x6c, 0x7f, 0x5e, 0xfa, 0xdb, + 0xfe, 0xbc, 0xf4, 0x83, 0x2f, 0xe6, 0x8f, 0x5c, 0x1b, 0x15, 0x98, 0xff, 0x0a, 0x00, 0x00, 0xff, + 0xff, 0x56, 0x18, 0xbd, 0xf5, 0xb1, 0x3c, 0x00, 0x00, } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto index da5d6d3bd7..8ba21bc9b4 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,11 +21,13 @@ syntax = 'proto2'; package k8s.io.kubernetes.pkg.apis.extensions.v1beta1; -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; @@ -37,18 +39,12 @@ message APIVersion { optional string name = 1; } -message CPUTargetUtilization { - // fraction of the requested CPU that should be utilized/used, - // e.g. 70 means that 70% of the requested CPU should be in use. - optional int32 targetPercentage = 1; -} - message CustomMetricCurrentStatus { // Custom Metric name. optional string name = 1; // Custom Metric value (average). - optional k8s.io.kubernetes.pkg.api.resource.Quantity value = 2; + optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; } message CustomMetricCurrentStatusList { @@ -61,7 +57,7 @@ message CustomMetricTarget { optional string name = 1; // Custom Metric value (average). - optional k8s.io.kubernetes.pkg.api.resource.Quantity value = 2; + optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; } message CustomMetricTargetList { @@ -73,14 +69,14 @@ message DaemonSet { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec defines the desired behavior of this daemon set. + // The desired behavior of this daemon set. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional optional DaemonSetSpec spec = 2; - // Status is the current status of this daemon set. This data may be + // The current status of this daemon set. This data may be // out of date by some window of time. // Populated by the system. // Read-only. @@ -94,56 +90,107 @@ message DaemonSetList { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of daemon sets. + // A list of daemon sets. repeated DaemonSet items = 2; } // DaemonSetSpec is the specification of a daemon set. message DaemonSetSpec { - // Selector is a label query over pods that are managed by the daemon set. + // A label query over pods that are managed by the daemon set. // Must match in order to be controlled. // If empty, defaulted to labels on Pod template. // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; - // Template is the object that describes the pod that will be created. + // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 2; + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + optional DaemonSetUpdateStrategy updateStrategy = 3; + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + optional int32 minReadySeconds = 4; + + // A sequence number representing a specific generation of the template. + // Populated by the system. It can be set only during the creation. + // +optional + optional int64 templateGeneration = 5; } // DaemonSetStatus represents the current status of a daemon set. message DaemonSetStatus { - // CurrentNumberScheduled is the number of nodes that are running at least 1 + // The number of nodes that are running at least 1 // daemon pod and are supposed to run the daemon pod. // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md optional int32 currentNumberScheduled = 1; - // NumberMisscheduled is the number of nodes that are running the daemon pod, but are + // The number of nodes that are running the daemon pod, but are // not supposed to run the daemon pod. // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md optional int32 numberMisscheduled = 2; - // DesiredNumberScheduled is the total number of nodes that should be running the daemon + // The total number of nodes that should be running the daemon // pod (including nodes correctly running the daemon pod). // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md optional int32 desiredNumberScheduled = 3; - // NumberReady is the number of nodes that should be running the daemon pod and have one + // The number of nodes that should be running the daemon pod and have one // or more of the daemon pod running and ready. optional int32 numberReady = 4; + + // The most recent generation observed by the daemon set controller. + // +optional + optional int64 observedGeneration = 5; + + // The total number of nodes that are running updated daemon pod + // +optional + optional int32 updatedNumberScheduled = 6; + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + optional int32 numberAvailable = 7; + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + optional int32 numberUnavailable = 8; +} + +message DaemonSetUpdateStrategy { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". + // Default is OnDelete. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if type = "RollingUpdate". + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as DeploymentStrategy.RollingUpdate. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + optional RollingUpdateDaemonSet rollingUpdate = 2; } // Deployment enables declarative updates for Pods and ReplicaSets. message Deployment { // Standard object metadata. // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the Deployment. // +optional @@ -163,10 +210,10 @@ message DeploymentCondition { optional string status = 2; // The last time this condition was updated. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastUpdateTime = 6; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; // Last time the condition transitioned from one status to another. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 7; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; // The reason for the condition's last transition. optional string reason = 4; @@ -179,7 +226,7 @@ message DeploymentCondition { message DeploymentList { // Standard list metadata. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Deployments. repeated Deployment items = 2; @@ -208,7 +255,7 @@ message DeploymentSpec { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 2; + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; @@ -261,6 +308,10 @@ message DeploymentStatus { // +optional optional int32 updatedReplicas = 3; + // Total number of ready pods targeted by this deployment. + // +optional + optional int32 readyReplicas = 7; + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. // +optional optional int32 availableReplicas = 4; @@ -288,15 +339,6 @@ message DeploymentStrategy { optional RollingUpdateDeployment rollingUpdate = 2; } -// ExportOptions is the query options to the standard REST get call. -message ExportOptions { - // Should this value be exported. Export strips fields that a user can not specify. - optional bool export = 1; - - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - optional bool exact = 2; -} - // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. message FSGroupStrategyOptions { // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. @@ -337,73 +379,6 @@ message HTTPIngressRuleValue { repeated HTTPIngressPath paths = 1; } -// configuration of a horizontal pod autoscaler. -message HorizontalPodAutoscaler { - // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. - // +optional - optional HorizontalPodAutoscalerSpec spec = 2; - - // current information about the autoscaler. - // +optional - optional HorizontalPodAutoscalerStatus status = 3; -} - -// list of horizontal pod autoscaler objects. -message HorizontalPodAutoscalerList { - // Standard list metadata. - // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // list of horizontal pod autoscaler objects. - repeated HorizontalPodAutoscaler items = 2; -} - -// specification of a horizontal pod autoscaler. -message HorizontalPodAutoscalerSpec { - // reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status, - // and will set the desired number of pods by modifying its spec. - optional SubresourceReference scaleRef = 1; - - // lower limit for the number of pods that can be set by the autoscaler, default 1. - // +optional - optional int32 minReplicas = 2; - - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. - optional int32 maxReplicas = 3; - - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; - // if not specified it defaults to the target CPU utilization at 80% of the requested resources. - // +optional - optional CPUTargetUtilization cpuUtilization = 4; -} - -// current status of a horizontal pod autoscaler -message HorizontalPodAutoscalerStatus { - // most recent generation observed by this autoscaler. - // +optional - optional int64 observedGeneration = 1; - - // last time the HorizontalPodAutoscaler scaled the number of pods; - // used by the autoscaler to control how often the number of pods is changed. - // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastScaleTime = 2; - - // current number of replicas of pods managed by this autoscaler. - optional int32 currentReplicas = 3; - - // desired number of replicas of pods managed by this autoscaler. - optional int32 desiredReplicas = 4; - - // current average CPU utilization over all pods, represented as a percentage of requested CPU, - // e.g. 70 means that an average pod is using now 70% of its requested CPU. - // +optional - optional int32 currentCPUUtilizationPercentage = 5; -} - // Host Port Range defines a range of host ports that will be enabled by a policy // for pods to use. It requires both the start and end to be defined. message HostPortRange { @@ -431,7 +406,7 @@ message Ingress { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec is the desired state of the Ingress. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -450,7 +425,7 @@ message IngressBackend { optional string serviceName = 1; // Specifies the port of the referenced service. - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString servicePort = 2; + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; } // IngressList is a collection of Ingress. @@ -458,7 +433,7 @@ message IngressList { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Ingress. repeated Ingress items = 2; @@ -549,143 +524,11 @@ message IngressTLS { optional string secretName = 2; } -// Job represents the configuration of a single job. -// DEPRECATED: extensions/v1beta1.Job is deprecated, use batch/v1.Job instead. -message Job { - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - optional JobSpec spec = 2; - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - optional JobStatus status = 3; -} - -// JobCondition describes current state of a job. -message JobCondition { - // Type of job condition, Complete or Failed. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition was checked. - // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; - - // Last time the condition transit from one status to another. - // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; - - // (brief) reason for the condition's last transition. - // +optional - optional string reason = 5; - - // Human readable message indicating details about last transition. - // +optional - optional string message = 6; -} - -// JobList is a collection of jobs. -// DEPRECATED: extensions/v1beta1.JobList is deprecated, use batch/v1.JobList instead. -message JobList { - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of Job. - repeated Job items = 2; -} - -// JobSpec describes how the job execution will look like. -message JobSpec { - // Parallelism specifies the maximum desired number of pods the job should - // run at any given time. The actual number of pods running in steady state will - // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), - // i.e. when the work left to do is less than max parallelism. - // More info: http://kubernetes.io/docs/user-guide/jobs - // +optional - optional int32 parallelism = 1; - - // Completions specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any - // pod signals the success of all pods, and allows parallelism to have any positive - // value. Setting to 1 means that parallelism is limited to 1 and the success of that - // pod signals the success of the job. - // More info: http://kubernetes.io/docs/user-guide/jobs - // +optional - optional int32 completions = 2; - - // Optional duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer - // +optional - optional int64 activeDeadlineSeconds = 3; - - // Selector is a label query over pods that should match the pod count. - // Normally, the system sets this field for you. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors - // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 4; - - // AutoSelector controls generation of pod labels and pod selectors. - // It was not present in the original extensions/v1beta1 Job definition, but exists - // to allow conversion from batch/v1 Jobs, where it corresponds to, but has the opposite - // meaning as, ManualSelector. - // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md - // +optional - optional bool autoSelector = 5; - - // Template is the object that describes the pod that will be created when - // executing a job. - // More info: http://kubernetes.io/docs/user-guide/jobs - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6; -} - -// JobStatus represents the current state of a Job. -message JobStatus { - // Conditions represent the latest available observations of an object's current state. - // More info: http://kubernetes.io/docs/user-guide/jobs - // +optional - repeated JobCondition conditions = 1; - - // StartTime represents time when the job was acknowledged by the Job Manager. - // It is not guaranteed to be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 2; - - // CompletionTime represents time when the job was completed. It is not guaranteed to - // be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time completionTime = 3; - - // Active is the number of actively running pods. - // +optional - optional int32 active = 4; - - // Succeeded is the number of pods which reached Phase Succeeded. - // +optional - optional int32 succeeded = 5; - - // Failed is the number of pods which reached Phase Failed. - // +optional - optional int32 failed = 6; -} - message NetworkPolicy { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior for this NetworkPolicy. // +optional @@ -720,7 +563,7 @@ message NetworkPolicyList { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of schema objects. repeated NetworkPolicy items = 2; @@ -732,7 +575,7 @@ message NetworkPolicyPeer { // If not provided, this selector selects no pods. // If present but empty, this selector selects all pods in this namespace. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector podSelector = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; // Selects Namespaces using cluster scoped-labels. This // matches all pods in all namespaces selected by this label selector. @@ -740,7 +583,7 @@ message NetworkPolicyPeer { // If omitted, this selector selects no namespaces. // If present but empty, this selector selects all namespaces. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector namespaceSelector = 2; + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; } message NetworkPolicyPort { @@ -755,7 +598,7 @@ message NetworkPolicyPort { // If present, only traffic on the specified protocol AND port // will be matched. // +optional - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 2; + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; } message NetworkPolicySpec { @@ -764,7 +607,7 @@ message NetworkPolicySpec { // same set of pods. In this case, the ingress rules for each are combined additively. // This field is NOT optional and follows standard label selector semantics. // An empty podSelector matches all pods in this namespace. - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector podSelector = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; // List of ingress rules to be applied to the selected pods. // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, @@ -784,7 +627,7 @@ message PodSecurityPolicy { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec defines the policy enforced. // +optional @@ -796,7 +639,7 @@ message PodSecurityPolicyList { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of schema objects. repeated PodSecurityPolicy items = 2; @@ -873,7 +716,7 @@ message ReplicaSet { // be the same as the Pod(s) that the ReplicaSet manages. // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -899,7 +742,7 @@ message ReplicaSetCondition { // The last time the condition transitioned from one status to another. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 3; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -915,7 +758,7 @@ message ReplicaSetList { // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ReplicaSets. // More info: http://kubernetes.io/docs/user-guide/replication-controller @@ -942,7 +785,7 @@ message ReplicaSetSpec { // Label keys and values that must match in order to be controlled by this replica set. // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 2; + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template is the object that describes the pod that will be created if // insufficient replicas are detected. @@ -988,11 +831,31 @@ message RollbackConfig { optional int64 revision = 1; } +// Spec to control the desired behavior of daemon set rolling update. +message RollingUpdateDaemonSet { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; +} + // Spec to control the desired behavior of rolling update. message RollingUpdateDeployment { // The maximum number of pods that can be unavailable during the update. // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // Absolute number is calculated from percentage by rounding up. + // Absolute number is calculated from percentage by rounding down. // This can not be 0 if MaxSurge is 0. // By default, a fixed value of 1 is used. // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods @@ -1001,7 +864,7 @@ message RollingUpdateDeployment { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of pods that can be scheduled above the desired number of // pods. @@ -1015,7 +878,7 @@ message RollingUpdateDeployment { // new RC can be scaled up further, ensuring that total number of pods running // at any time during the update is atmost 130% of desired pods. // +optional - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString maxSurge = 2; + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // Run A sUser Strategy Options defines the strategy type and any options used to create the strategy. @@ -1043,7 +906,7 @@ message SELinuxStrategyOptions { message Scale { // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. // +optional @@ -1080,25 +943,6 @@ message ScaleStatus { optional string targetSelector = 3; } -// SubresourceReference contains enough information to let you inspect or modify the referred subresource. -message SubresourceReference { - // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - // +optional - optional string kind = 1; - - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names - // +optional - optional string name = 2; - - // API version of the referent - // +optional - optional string apiVersion = 3; - - // Subresource name of the referent - // +optional - optional string subresource = 4; -} - // SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. message SupplementalGroupsStrategyOptions { // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. @@ -1116,7 +960,7 @@ message SupplementalGroupsStrategyOptions { message ThirdPartyResource { // Standard object metadata // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Description is the description of this object. // +optional @@ -1131,7 +975,7 @@ message ThirdPartyResource { message ThirdPartyResourceData { // Standard object metadata. // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Data is the raw JSON data for this data. // +optional @@ -1143,7 +987,7 @@ message ThirdPartyResourceDataList { // Standard list metadata // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of ThirdpartyResourceData. repeated ThirdPartyResourceData items = 2; @@ -1153,7 +997,7 @@ message ThirdPartyResourceDataList { message ThirdPartyResourceList { // Standard list metadata. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of ThirdPartyResources. repeated ThirdPartyResource items = 2; diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go index 6d8bb24e99..fa5b2e1b24 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go @@ -17,17 +17,21 @@ limitations under the License. package v1beta1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "extensions" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} var ( SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) @@ -40,10 +44,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &Deployment{}, &DeploymentList{}, &DeploymentRollback{}, - &HorizontalPodAutoscaler{}, - &HorizontalPodAutoscalerList{}, - &Job{}, - &JobList{}, &ReplicationControllerDummy{}, &Scale{}, &ThirdPartyResource{}, @@ -54,9 +54,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ThirdPartyResourceDataList{}, &Ingress{}, &IngressList{}, - &v1.ListOptions{}, - &v1.DeleteOptions{}, - &v1.ExportOptions{}, &ReplicaSet{}, &ReplicaSetList{}, &PodSecurityPolicy{}, @@ -65,6 +62,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &NetworkPolicyList{}, ) // Add the watch version that applies - versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.generated.go index 49b2fa78f7..fc147e0ca0 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.generated.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.generated.go @@ -25,11 +25,11 @@ import ( "errors" "fmt" codec1978 "github.com/ugorji/go/codec" - pkg4_resource "k8s.io/kubernetes/pkg/api/resource" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg3_types "k8s.io/kubernetes/pkg/types" - pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" + pkg3_resource "k8s.io/apimachinery/pkg/api/resource" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + pkg5_intstr "k8s.io/apimachinery/pkg/util/intstr" + pkg4_v1 "k8s.io/kubernetes/pkg/api/v1" "reflect" "runtime" time "time" @@ -65,11 +65,11 @@ func init() { panic(err) } if false { // reference the types, but skip this branch at build/run time - var v0 pkg4_resource.Quantity - var v1 pkg1_unversioned.TypeMeta - var v2 pkg2_v1.ObjectMeta - var v3 pkg3_types.UID - var v4 pkg5_intstr.IntOrString + var v0 pkg3_resource.Quantity + var v1 pkg1_v1.TypeMeta + var v2 pkg2_types.UID + var v3 pkg5_intstr.IntOrString + var v4 pkg4_v1.PodTemplateSpec var v5 time.Time _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 } @@ -144,25 +144,25 @@ func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym6 := z.DecBinary() - _ = yym6 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct7 := r.ContainerType() - if yyct7 == codecSelferValueTypeMap1234 { - yyl7 := r.ReadMapStart() - if yyl7 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl7, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct7 == codecSelferValueTypeArray1234 { - yyl7 := r.ReadArrayStart() - if yyl7 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl7, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -174,12 +174,12 @@ func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys8Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys8Slc - var yyhl8 bool = l >= 0 - for yyj8 := 0; ; yyj8++ { - if yyhl8 { - if yyj8 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -188,20 +188,26 @@ func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys8Slc = r.DecodeBytes(yys8Slc, true, true) - yys8 := string(yys8Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys8 { + switch yys3 { case "replicas": if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int32(r.DecodeInt(32)) + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } } default: - z.DecStructFieldNotFound(-1, yys8) - } // end switch yys8 - } // end for yyj8 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -209,16 +215,16 @@ func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb10 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb10 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -226,20 +232,26 @@ func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int32(r.DecodeInt(32)) + yyv7 := &x.Replicas + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } } for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb10 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb10 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -251,35 +263,35 @@ func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym12 := z.EncBinary() - _ = yym12 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep13 := !z.EncBinary() - yy2arr13 := z.EncBasicHandle().StructToArray - var yyq13 [3]bool - _, _, _ = yysep13, yyq13, yy2arr13 - const yyr13 bool = false - yyq13[1] = len(x.Selector) != 0 - yyq13[2] = x.TargetSelector != "" - var yynn13 int - if yyr13 || yy2arr13 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.Selector) != 0 + yyq2[2] = x.TargetSelector != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn13 = 1 - for _, b := range yyq13 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn13++ + yynn2++ } } - r.EncodeMapStart(yynn13) - yynn13 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr13 || yy2arr13 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym15 := z.EncBinary() - _ = yym15 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeInt(int64(x.Replicas)) @@ -288,21 +300,21 @@ func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("replicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeInt(int64(x.Replicas)) } } - if yyr13 || yy2arr13 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq13[1] { + if yyq2[1] { if x.Selector == nil { r.EncodeNil() } else { - yym18 := z.EncBinary() - _ = yym18 + yym7 := z.EncBinary() + _ = yym7 if false { } else { z.F.EncMapStringStringV(x.Selector, false, e) @@ -312,15 +324,15 @@ func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq13[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("selector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Selector == nil { r.EncodeNil() } else { - yym19 := z.EncBinary() - _ = yym19 + yym8 := z.EncBinary() + _ = yym8 if false { } else { z.F.EncMapStringStringV(x.Selector, false, e) @@ -328,11 +340,11 @@ func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr13 || yy2arr13 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq13[2] { - yym21 := z.EncBinary() - _ = yym21 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector)) @@ -341,19 +353,19 @@ func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq13[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("targetSelector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym22 := z.EncBinary() - _ = yym22 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector)) } } } - if yyr13 || yy2arr13 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -366,25 +378,25 @@ func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym23 := z.DecBinary() - _ = yym23 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct24 := r.ContainerType() - if yyct24 == codecSelferValueTypeMap1234 { - yyl24 := r.ReadMapStart() - if yyl24 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl24, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct24 == codecSelferValueTypeArray1234 { - yyl24 := r.ReadArrayStart() - if yyl24 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl24, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -396,12 +408,12 @@ func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys25Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys25Slc - var yyhl25 bool = l >= 0 - for yyj25 := 0; ; yyj25++ { - if yyhl25 { - if yyj25 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -410,38 +422,50 @@ func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys25Slc = r.DecodeBytes(yys25Slc, true, true) - yys25 := string(yys25Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys25 { + switch yys3 { case "replicas": if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int32(r.DecodeInt(32)) + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } } case "selector": if r.TryDecodeAsNil() { x.Selector = nil } else { - yyv27 := &x.Selector - yym28 := z.DecBinary() - _ = yym28 + yyv6 := &x.Selector + yym7 := z.DecBinary() + _ = yym7 if false { } else { - z.F.DecMapStringStringX(yyv27, false, d) + z.F.DecMapStringStringX(yyv6, false, d) } } case "targetSelector": if r.TryDecodeAsNil() { x.TargetSelector = "" } else { - x.TargetSelector = string(r.DecodeString()) + yyv8 := &x.TargetSelector + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys25) - } // end switch yys25 - } // end for yyj25 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -449,16 +473,16 @@ func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj30 int - var yyb30 bool - var yyhl30 bool = l >= 0 - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb30 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb30 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -466,15 +490,21 @@ func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int32(r.DecodeInt(32)) + yyv11 := &x.Replicas + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb30 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb30 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -482,21 +512,21 @@ func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Selector = nil } else { - yyv32 := &x.Selector - yym33 := z.DecBinary() - _ = yym33 + yyv13 := &x.Selector + yym14 := z.DecBinary() + _ = yym14 if false { } else { - z.F.DecMapStringStringX(yyv32, false, d) + z.F.DecMapStringStringX(yyv13, false, d) } } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb30 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb30 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -504,20 +534,26 @@ func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.TargetSelector = "" } else { - x.TargetSelector = string(r.DecodeString()) + yyv15 := &x.TargetSelector + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } for { - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb30 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb30 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj30-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -529,39 +565,39 @@ func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym35 := z.EncBinary() - _ = yym35 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep36 := !z.EncBinary() - yy2arr36 := z.EncBasicHandle().StructToArray - var yyq36 [5]bool - _, _, _ = yysep36, yyq36, yy2arr36 - const yyr36 bool = false - yyq36[0] = x.Kind != "" - yyq36[1] = x.APIVersion != "" - yyq36[2] = true - yyq36[3] = true - yyq36[4] = true - var yynn36 int - if yyr36 || yy2arr36 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn36 = 0 - for _, b := range yyq36 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn36++ + yynn2++ } } - r.EncodeMapStart(yynn36) - yynn36 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr36 || yy2arr36 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[0] { - yym38 := z.EncBinary() - _ = yym38 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -570,23 +606,23 @@ func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq36[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym39 := z.EncBinary() - _ = yym39 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr36 || yy2arr36 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[1] { - yym41 := z.EncBinary() - _ = yym41 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -595,70 +631,82 @@ func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq36[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym42 := z.EncBinary() - _ = yym42 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr36 || yy2arr36 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[2] { - yy44 := &x.ObjectMeta - yy44.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq36[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy45 := &x.ObjectMeta - yy45.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr36 || yy2arr36 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[3] { - yy47 := &x.Spec - yy47.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq36[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy48 := &x.Spec - yy48.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr36 || yy2arr36 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[4] { - yy50 := &x.Status - yy50.CodecEncodeSelf(e) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq36[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy51 := &x.Status - yy51.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } - if yyr36 || yy2arr36 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -671,25 +719,25 @@ func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym52 := z.DecBinary() - _ = yym52 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct53 := r.ContainerType() - if yyct53 == codecSelferValueTypeMap1234 { - yyl53 := r.ReadMapStart() - if yyl53 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl53, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct53 == codecSelferValueTypeArray1234 { - yyl53 := r.ReadArrayStart() - if yyl53 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl53, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -701,12 +749,12 @@ func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys54Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys54Slc - var yyhl54 bool = l >= 0 - for yyj54 := 0; ; yyj54++ { - if yyhl54 { - if yyj54 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -715,47 +763,65 @@ func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys54Slc = r.DecodeBytes(yys54Slc, true, true) - yys54 := string(yys54Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys54 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv57 := &x.ObjectMeta - yyv57.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = ScaleSpec{} } else { - yyv58 := &x.Spec - yyv58.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = ScaleStatus{} } else { - yyv59 := &x.Status - yyv59.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys54) - } // end switch yys54 - } // end for yyj54 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -763,16 +829,16 @@ func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj60 int - var yyb60 bool - var yyhl60 bool = l >= 0 - yyj60++ - if yyhl60 { - yyb60 = yyj60 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb60 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb60 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -780,15 +846,21 @@ func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj60++ - if yyhl60 { - yyb60 = yyj60 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb60 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb60 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -796,32 +868,44 @@ func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj60++ - if yyhl60 { - yyb60 = yyj60 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb60 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb60 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv63 := &x.ObjectMeta - yyv63.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj60++ - if yyhl60 { - yyb60 = yyj60 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb60 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb60 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -829,16 +913,16 @@ func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Spec = ScaleSpec{} } else { - yyv64 := &x.Spec - yyv64.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj60++ - if yyhl60 { - yyb60 = yyj60 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb60 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb60 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -846,21 +930,21 @@ func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Status = ScaleStatus{} } else { - yyv65 := &x.Status - yyv65.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj60++ - if yyhl60 { - yyb60 = yyj60 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb60 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb60 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj60-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -872,36 +956,36 @@ func (x *ReplicationControllerDummy) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym66 := z.EncBinary() - _ = yym66 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep67 := !z.EncBinary() - yy2arr67 := z.EncBasicHandle().StructToArray - var yyq67 [2]bool - _, _, _ = yysep67, yyq67, yy2arr67 - const yyr67 bool = false - yyq67[0] = x.Kind != "" - yyq67[1] = x.APIVersion != "" - var yynn67 int - if yyr67 || yy2arr67 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn67 = 0 - for _, b := range yyq67 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn67++ + yynn2++ } } - r.EncodeMapStart(yynn67) - yynn67 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr67 || yy2arr67 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq67[0] { - yym69 := z.EncBinary() - _ = yym69 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -910,23 +994,23 @@ func (x *ReplicationControllerDummy) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq67[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym70 := z.EncBinary() - _ = yym70 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr67 || yy2arr67 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq67[1] { - yym72 := z.EncBinary() - _ = yym72 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -935,19 +1019,19 @@ func (x *ReplicationControllerDummy) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq67[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym73 := z.EncBinary() - _ = yym73 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr67 || yy2arr67 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -960,25 +1044,25 @@ func (x *ReplicationControllerDummy) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym74 := z.DecBinary() - _ = yym74 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct75 := r.ContainerType() - if yyct75 == codecSelferValueTypeMap1234 { - yyl75 := r.ReadMapStart() - if yyl75 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl75, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct75 == codecSelferValueTypeArray1234 { - yyl75 := r.ReadArrayStart() - if yyl75 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl75, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -990,12 +1074,12 @@ func (x *ReplicationControllerDummy) codecDecodeSelfFromMap(l int, d *codec1978. var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys76Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys76Slc - var yyhl76 bool = l >= 0 - for yyj76 := 0; ; yyj76++ { - if yyhl76 { - if yyj76 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -1004,26 +1088,38 @@ func (x *ReplicationControllerDummy) codecDecodeSelfFromMap(l int, d *codec1978. } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys76Slc = r.DecodeBytes(yys76Slc, true, true) - yys76 := string(yys76Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys76 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys76) - } // end switch yys76 - } // end for yyj76 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -1031,16 +1127,16 @@ func (x *ReplicationControllerDummy) codecDecodeSelfFromArray(l int, d *codec197 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj79 int - var yyb79 bool - var yyhl79 bool = l >= 0 - yyj79++ - if yyhl79 { - yyb79 = yyj79 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb79 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb79 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1048,15 +1144,21 @@ func (x *ReplicationControllerDummy) codecDecodeSelfFromArray(l int, d *codec197 if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv9 := &x.Kind + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } - yyj79++ - if yyhl79 { - yyb79 = yyj79 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb79 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb79 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1064,159 +1166,107 @@ func (x *ReplicationControllerDummy) codecDecodeSelfFromArray(l int, d *codec197 if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv11 := &x.APIVersion + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } for { - yyj79++ - if yyhl79 { - yyb79 = yyj79 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb79 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb79 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj79-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *SubresourceReference) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *CustomMetricTarget) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym82 := z.EncBinary() - _ = yym82 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep83 := !z.EncBinary() - yy2arr83 := z.EncBasicHandle().StructToArray - var yyq83 [4]bool - _, _, _ = yysep83, yyq83, yy2arr83 - const yyr83 bool = false - yyq83[0] = x.Kind != "" - yyq83[1] = x.Name != "" - yyq83[2] = x.APIVersion != "" - yyq83[3] = x.Subresource != "" - var yynn83 int - if yyr83 || yy2arr83 { - r.EncodeArrayStart(4) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) } else { - yynn83 = 0 - for _, b := range yyq83 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn83++ + yynn2++ } } - r.EncodeMapStart(yynn83) - yynn83 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr83 || yy2arr83 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq83[0] { - yym85 := z.EncBinary() - _ = yym85 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } else { - if yyq83[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym86 := z.EncBinary() - _ = yym86 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr83 || yy2arr83 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq83[1] { - yym88 := z.EncBinary() - _ = yym88 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq83[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym89 := z.EncBinary() - _ = yym89 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } - if yyr83 || yy2arr83 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq83[2] { - yym91 := z.EncBinary() - _ = yym91 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } + yy7 := &x.TargetValue + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) } else { - r.EncodeString(codecSelferC_UTF81234, "") + z.EncFallback(yy7) } } else { - if yyq83[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym92 := z.EncBinary() - _ = yym92 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr83 || yy2arr83 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq83[3] { - yym94 := z.EncBinary() - _ = yym94 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.TargetValue + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq83[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subresource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym95 := z.EncBinary() - _ = yym95 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) - } + z.EncFallback(yy9) } } - if yyr83 || yy2arr83 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -1225,29 +1275,29 @@ func (x *SubresourceReference) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *SubresourceReference) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *CustomMetricTarget) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym96 := z.DecBinary() - _ = yym96 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct97 := r.ContainerType() - if yyct97 == codecSelferValueTypeMap1234 { - yyl97 := r.ReadMapStart() - if yyl97 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl97, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct97 == codecSelferValueTypeArray1234 { - yyl97 := r.ReadArrayStart() - if yyl97 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl97, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -1255,16 +1305,16 @@ func (x *SubresourceReference) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *SubresourceReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *CustomMetricTarget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys98Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys98Slc - var yyhl98 bool = l >= 0 - for yyj98 := 0; ; yyj98++ { - if yyhl98 { - if yyj98 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -1273,71 +1323,58 @@ func (x *SubresourceReference) codecDecodeSelfFromMap(l int, d *codec1978.Decode } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys98Slc = r.DecodeBytes(yys98Slc, true, true) - yys98 := string(yys98Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys98 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } + switch yys3 { case "name": if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } - case "subresource": + case "value": if r.TryDecodeAsNil() { - x.Subresource = "" + x.TargetValue = pkg3_resource.Quantity{} } else { - x.Subresource = string(r.DecodeString()) + yyv6 := &x.TargetValue + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } } default: - z.DecStructFieldNotFound(-1, yys98) - } // end switch yys98 - } // end for yyj98 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *SubresourceReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *CustomMetricTarget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj103 int - var yyb103 bool - var yyhl103 bool = l >= 0 - yyj103++ - if yyhl103 { - yyb103 = yyj103 > l - } else { - yyb103 = r.CheckBreak() - } - if yyb103 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj103++ - if yyhl103 { - yyb103 = yyj103 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb103 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb103 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1345,106 +1382,113 @@ func (x *SubresourceReference) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) - } - yyj103++ - if yyhl103 { - yyb103 = yyj103 > l - } else { - yyb103 = r.CheckBreak() - } - if yyb103 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) + yyv9 := &x.Name + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } - yyj103++ - if yyhl103 { - yyb103 = yyj103 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb103 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb103 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Subresource = "" + x.TargetValue = pkg3_resource.Quantity{} } else { - x.Subresource = string(r.DecodeString()) + yyv11 := &x.TargetValue + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } } for { - yyj103++ - if yyhl103 { - yyb103 = yyj103 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb103 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb103 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj103-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *CPUTargetUtilization) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *CustomMetricTargetList) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym108 := z.EncBinary() - _ = yym108 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep109 := !z.EncBinary() - yy2arr109 := z.EncBasicHandle().StructToArray - var yyq109 [1]bool - _, _, _ = yysep109, yyq109, yy2arr109 - const yyr109 bool = false - var yynn109 int - if yyr109 || yy2arr109 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn109 = 1 - for _, b := range yyq109 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn109++ + yynn2++ } } - r.EncodeMapStart(yynn109) - yynn109 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr109 || yy2arr109 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym111 := z.EncBinary() - _ = yym111 - if false { + if x.Items == nil { + r.EncodeNil() } else { - r.EncodeInt(int64(x.TargetPercentage)) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) + } } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetPercentage")) + r.EncodeString(codecSelferC_UTF81234, string("items")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym112 := z.EncBinary() - _ = yym112 - if false { + if x.Items == nil { + r.EncodeNil() } else { - r.EncodeInt(int64(x.TargetPercentage)) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) + } } } - if yyr109 || yy2arr109 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -1453,29 +1497,29 @@ func (x *CPUTargetUtilization) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *CPUTargetUtilization) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *CustomMetricTargetList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym113 := z.DecBinary() - _ = yym113 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct114 := r.ContainerType() - if yyct114 == codecSelferValueTypeMap1234 { - yyl114 := r.ReadMapStart() - if yyl114 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl114, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct114 == codecSelferValueTypeArray1234 { - yyl114 := r.ReadArrayStart() - if yyl114 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl114, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -1483,16 +1527,16 @@ func (x *CPUTargetUtilization) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *CPUTargetUtilization) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *CustomMetricTargetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys115Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys115Slc - var yyhl115 bool = l >= 0 - for yyj115 := 0; ; yyj115++ { - if yyhl115 { - if yyj115 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -1501,96 +1545,108 @@ func (x *CPUTargetUtilization) codecDecodeSelfFromMap(l int, d *codec1978.Decode } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys115Slc = r.DecodeBytes(yys115Slc, true, true) - yys115 := string(yys115Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys115 { - case "targetPercentage": + switch yys3 { + case "items": if r.TryDecodeAsNil() { - x.TargetPercentage = 0 + x.Items = nil } else { - x.TargetPercentage = int32(r.DecodeInt(32)) + yyv4 := &x.Items + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv4), d) + } } default: - z.DecStructFieldNotFound(-1, yys115) - } // end switch yys115 - } // end for yyj115 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *CPUTargetUtilization) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *CustomMetricTargetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj117 int - var yyb117 bool - var yyhl117 bool = l >= 0 - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb117 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb117 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.TargetPercentage = 0 + x.Items = nil } else { - x.TargetPercentage = int32(r.DecodeInt(32)) + yyv7 := &x.Items + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv7), d) + } } for { - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb117 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb117 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj117-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *CustomMetricTarget) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *CustomMetricCurrentStatus) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym119 := z.EncBinary() - _ = yym119 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep120 := !z.EncBinary() - yy2arr120 := z.EncBasicHandle().StructToArray - var yyq120 [2]bool - _, _, _ = yysep120, yyq120, yy2arr120 - const yyr120 bool = false - var yynn120 int - if yyr120 || yy2arr120 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn120 = 2 - for _, b := range yyq120 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn120++ + yynn2++ } } - r.EncodeMapStart(yynn120) - yynn120 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr120 || yy2arr120 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym122 := z.EncBinary() - _ = yym122 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) @@ -1599,41 +1655,41 @@ func (x *CustomMetricTarget) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("name")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym123 := z.EncBinary() - _ = yym123 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } - if yyr120 || yy2arr120 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy125 := &x.TargetValue - yym126 := z.EncBinary() - _ = yym126 + yy7 := &x.CurrentValue + yym8 := z.EncBinary() + _ = yym8 if false { - } else if z.HasExtensions() && z.EncExt(yy125) { - } else if !yym126 && z.IsJSONHandle() { - z.EncJSONMarshal(yy125) + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) } else { - z.EncFallback(yy125) + z.EncFallback(yy7) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("value")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy127 := &x.TargetValue - yym128 := z.EncBinary() - _ = yym128 + yy9 := &x.CurrentValue + yym10 := z.EncBinary() + _ = yym10 if false { - } else if z.HasExtensions() && z.EncExt(yy127) { - } else if !yym128 && z.IsJSONHandle() { - z.EncJSONMarshal(yy127) + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) } else { - z.EncFallback(yy127) + z.EncFallback(yy9) } } - if yyr120 || yy2arr120 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -1642,29 +1698,29 @@ func (x *CustomMetricTarget) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *CustomMetricTarget) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *CustomMetricCurrentStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym129 := z.DecBinary() - _ = yym129 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct130 := r.ContainerType() - if yyct130 == codecSelferValueTypeMap1234 { - yyl130 := r.ReadMapStart() - if yyl130 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl130, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct130 == codecSelferValueTypeArray1234 { - yyl130 := r.ReadArrayStart() - if yyl130 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl130, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -1672,16 +1728,16 @@ func (x *CustomMetricTarget) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *CustomMetricTarget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *CustomMetricCurrentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys131Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys131Slc - var yyhl131 bool = l >= 0 - for yyj131 := 0; ; yyj131++ { - if yyhl131 { - if yyj131 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -1690,52 +1746,58 @@ func (x *CustomMetricTarget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys131Slc = r.DecodeBytes(yys131Slc, true, true) - yys131 := string(yys131Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys131 { + switch yys3 { case "name": if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "value": if r.TryDecodeAsNil() { - x.TargetValue = pkg4_resource.Quantity{} + x.CurrentValue = pkg3_resource.Quantity{} } else { - yyv133 := &x.TargetValue - yym134 := z.DecBinary() - _ = yym134 + yyv6 := &x.CurrentValue + yym7 := z.DecBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.DecExt(yyv133) { - } else if !yym134 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv133) + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) } else { - z.DecFallback(yyv133, false) + z.DecFallback(yyv6, false) } } default: - z.DecStructFieldNotFound(-1, yys131) - } // end switch yys131 - } // end for yyj131 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *CustomMetricTarget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *CustomMetricCurrentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj135 int - var yyb135 bool - var yyhl135 bool = l >= 0 - yyj135++ - if yyhl135 { - yyb135 = yyj135 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb135 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb135 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1743,89 +1805,95 @@ func (x *CustomMetricTarget) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv9 := &x.Name + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } - yyj135++ - if yyhl135 { - yyb135 = yyj135 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb135 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb135 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.TargetValue = pkg4_resource.Quantity{} + x.CurrentValue = pkg3_resource.Quantity{} } else { - yyv137 := &x.TargetValue - yym138 := z.DecBinary() - _ = yym138 + yyv11 := &x.CurrentValue + yym12 := z.DecBinary() + _ = yym12 if false { - } else if z.HasExtensions() && z.DecExt(yyv137) { - } else if !yym138 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv137) + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) } else { - z.DecFallback(yyv137, false) + z.DecFallback(yyv11, false) } } for { - yyj135++ - if yyhl135 { - yyb135 = yyj135 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb135 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb135 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj135-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *CustomMetricTargetList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *CustomMetricCurrentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym139 := z.EncBinary() - _ = yym139 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep140 := !z.EncBinary() - yy2arr140 := z.EncBasicHandle().StructToArray - var yyq140 [1]bool - _, _, _ = yysep140, yyq140, yy2arr140 - const yyr140 bool = false - var yynn140 int - if yyr140 || yy2arr140 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn140 = 1 - for _, b := range yyq140 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn140++ + yynn2++ } } - r.EncodeMapStart(yynn140) - yynn140 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr140 || yy2arr140 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym142 := z.EncBinary() - _ = yym142 + yym4 := z.EncBinary() + _ = yym4 if false { } else { - h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) + h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) } } } else { @@ -1835,15 +1903,15 @@ func (x *CustomMetricTargetList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym143 := z.EncBinary() - _ = yym143 + yym5 := z.EncBinary() + _ = yym5 if false { } else { - h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) + h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) } } } - if yyr140 || yy2arr140 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -1852,29 +1920,29 @@ func (x *CustomMetricTargetList) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *CustomMetricTargetList) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *CustomMetricCurrentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym144 := z.DecBinary() - _ = yym144 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct145 := r.ContainerType() - if yyct145 == codecSelferValueTypeMap1234 { - yyl145 := r.ReadMapStart() - if yyl145 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl145, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct145 == codecSelferValueTypeArray1234 { - yyl145 := r.ReadArrayStart() - if yyl145 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl145, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -1882,16 +1950,16 @@ func (x *CustomMetricTargetList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *CustomMetricTargetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys146Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys146Slc - var yyhl146 bool = l >= 0 - for yyj146 := 0; ; yyj146++ { - if yyhl146 { - if yyj146 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -1900,43 +1968,43 @@ func (x *CustomMetricTargetList) codecDecodeSelfFromMap(l int, d *codec1978.Deco } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys146Slc = r.DecodeBytes(yys146Slc, true, true) - yys146 := string(yys146Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys146 { + switch yys3 { case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv147 := &x.Items - yym148 := z.DecBinary() - _ = yym148 + yyv4 := &x.Items + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv147), d) + h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv4), d) } } default: - z.DecStructFieldNotFound(-1, yys146) - } // end switch yys146 - } // end for yyj146 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *CustomMetricTargetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj149 int - var yyb149 bool - var yyhl149 bool = l >= 0 - yyj149++ - if yyhl149 { - yyb149 = yyj149 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb149 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb149 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1944,138 +2012,234 @@ func (x *CustomMetricTargetList) codecDecodeSelfFromArray(l int, d *codec1978.De if r.TryDecodeAsNil() { x.Items = nil } else { - yyv150 := &x.Items - yym151 := z.DecBinary() - _ = yym151 + yyv7 := &x.Items + yym8 := z.DecBinary() + _ = yym8 if false { } else { - h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv150), d) + h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv7), d) } } for { - yyj149++ - if yyhl149 { - yyb149 = yyj149 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb149 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb149 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj149-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *CustomMetricCurrentStatus) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ThirdPartyResource) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym152 := z.EncBinary() - _ = yym152 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep153 := !z.EncBinary() - yy2arr153 := z.EncBasicHandle().StructToArray - var yyq153 [2]bool - _, _, _ = yysep153, yyq153, yy2arr153 - const yyr153 bool = false - var yynn153 int - if yyr153 || yy2arr153 { - r.EncodeArrayStart(2) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = x.Description != "" + yyq2[4] = len(x.Versions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) } else { - yynn153 = 2 - for _, b := range yyq153 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn153++ + yynn2++ } } - r.EncodeMapStart(yynn153) - yynn153 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr153 || yy2arr153 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym155 := z.EncBinary() - _ = yym155 - if false { + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym156 := z.EncBinary() - _ = yym156 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } } } - if yyr153 || yy2arr153 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy158 := &x.CurrentValue - yym159 := z.EncBinary() - _ = yym159 - if false { - } else if z.HasExtensions() && z.EncExt(yy158) { - } else if !yym159 && z.IsJSONHandle() { - z.EncJSONMarshal(yy158) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } } else { - z.EncFallback(yy158) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy160 := &x.CurrentValue - yym161 := z.EncBinary() - _ = yym161 - if false { - } else if z.HasExtensions() && z.EncExt(yy160) { - } else if !yym161 && z.IsJSONHandle() { - z.EncJSONMarshal(yy160) + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { - z.EncFallback(yy160) + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr153 || yy2arr153 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Description)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("description")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Description)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Versions == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("versions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Versions == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) } } } } -func (x *CustomMetricCurrentStatus) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ThirdPartyResource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym162 := z.DecBinary() - _ = yym162 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct163 := r.ContainerType() - if yyct163 == codecSelferValueTypeMap1234 { - yyl163 := r.ReadMapStart() - if yyl163 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl163, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct163 == codecSelferValueTypeArray1234 { - yyl163 := r.ReadArrayStart() - if yyl163 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl163, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -2083,16 +2247,16 @@ func (x *CustomMetricCurrentStatus) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *CustomMetricCurrentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ThirdPartyResource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys164Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys164Slc - var yyhl164 bool = l >= 0 - for yyj164 := 0; ; yyj164++ { - if yyhl164 { - if yyj164 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -2101,142 +2265,334 @@ func (x *CustomMetricCurrentStatus) codecDecodeSelfFromMap(l int, d *codec1978.D } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys164Slc = r.DecodeBytes(yys164Slc, true, true) - yys164 := string(yys164Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys164 { - case "name": + switch yys3 { + case "kind": if r.TryDecodeAsNil() { - x.Name = "" + x.Kind = "" } else { - x.Name = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } - case "value": + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "description": if r.TryDecodeAsNil() { - x.CurrentValue = pkg4_resource.Quantity{} + x.Description = "" + } else { + yyv10 := &x.Description + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "versions": + if r.TryDecodeAsNil() { + x.Versions = nil } else { - yyv166 := &x.CurrentValue - yym167 := z.DecBinary() - _ = yym167 + yyv12 := &x.Versions + yym13 := z.DecBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.DecExt(yyv166) { - } else if !yym167 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv166) } else { - z.DecFallback(yyv166, false) + h.decSliceAPIVersion((*[]APIVersion)(yyv12), d) } } default: - z.DecStructFieldNotFound(-1, yys164) - } // end switch yys164 - } // end for yyj164 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *CustomMetricCurrentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ThirdPartyResource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj168 int - var yyb168 bool - var yyhl168 bool = l >= 0 - yyj168++ - if yyhl168 { - yyb168 = yyj168 > l + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb168 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb168 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Name = "" + x.Kind = "" + } else { + yyv15 := &x.Kind + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv17 := &x.APIVersion + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv19 := &x.ObjectMeta + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Description = "" } else { - x.Name = string(r.DecodeString()) + yyv21 := &x.Description + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } } - yyj168++ - if yyhl168 { - yyb168 = yyj168 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb168 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb168 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.CurrentValue = pkg4_resource.Quantity{} + x.Versions = nil } else { - yyv170 := &x.CurrentValue - yym171 := z.DecBinary() - _ = yym171 + yyv23 := &x.Versions + yym24 := z.DecBinary() + _ = yym24 if false { - } else if z.HasExtensions() && z.DecExt(yyv170) { - } else if !yym171 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv170) } else { - z.DecFallback(yyv170, false) + h.decSliceAPIVersion((*[]APIVersion)(yyv23), d) } } for { - yyj168++ - if yyhl168 { - yyb168 = yyj168 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb168 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb168 { + if yyb14 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj168-1, "") + z.DecStructFieldNotFound(yyj14-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *CustomMetricCurrentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ThirdPartyResourceList) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym172 := z.EncBinary() - _ = yym172 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep173 := !z.EncBinary() - yy2arr173 := z.EncBasicHandle().StructToArray - var yyq173 [1]bool - _, _, _ = yysep173, yyq173, yy2arr173 - const yyr173 bool = false - var yynn173 int - if yyr173 || yy2arr173 { - r.EncodeArrayStart(1) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) } else { - yynn173 = 1 - for _, b := range yyq173 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn173++ + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) } } - r.EncodeMapStart(yynn173) - yynn173 = 0 } - if yyr173 || yy2arr173 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym175 := z.EncBinary() - _ = yym175 + yym15 := z.EncBinary() + _ = yym15 if false { } else { - h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) + h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) } } } else { @@ -2246,15 +2602,15 @@ func (x *CustomMetricCurrentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym176 := z.EncBinary() - _ = yym176 + yym16 := z.EncBinary() + _ = yym16 if false { } else { - h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) + h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) } } } - if yyr173 || yy2arr173 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -2263,29 +2619,29 @@ func (x *CustomMetricCurrentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *CustomMetricCurrentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ThirdPartyResourceList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym177 := z.DecBinary() - _ = yym177 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct178 := r.ContainerType() - if yyct178 == codecSelferValueTypeMap1234 { - yyl178 := r.ReadMapStart() - if yyl178 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl178, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct178 == codecSelferValueTypeArray1234 { - yyl178 := r.ReadArrayStart() - if yyl178 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl178, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -2293,16 +2649,16 @@ func (x *CustomMetricCurrentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ThirdPartyResourceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys179Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys179Slc - var yyhl179 bool = l >= 0 - for yyj179 := 0; ; yyj179++ { - if yyhl179 { - if yyj179 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -2311,195 +2667,235 @@ func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromMap(l int, d *codec19 } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys179Slc = r.DecodeBytes(yys179Slc, true, true) - yys179 := string(yys179Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys179 { + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv180 := &x.Items - yym181 := z.DecBinary() - _ = yym181 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv180), d) + h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys179) - } // end switch yys179 - } // end for yyj179 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ThirdPartyResourceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj182 int - var yyb182 bool - var yyhl182 bool = l >= 0 - yyj182++ - if yyhl182 { - yyb182 = yyj182 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb182 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb182 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Items = nil + x.Kind = "" } else { - yyv183 := &x.Items - yym184 := z.DecBinary() - _ = yym184 + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 if false { } else { - h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv183), d) + *((*string)(yyv13)) = r.DecodeString() } } - for { - yyj182++ - if yyhl182 { - yyb182 = yyj182 > l - } else { - yyb182 = r.CheckBreak() - } - if yyb182 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj182-1, "") + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" } else { - yym185 := z.EncBinary() - _ = yym185 + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 if false { - } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep186 := !z.EncBinary() - yy2arr186 := z.EncBasicHandle().StructToArray - var yyq186 [4]bool - _, _, _ = yysep186, yyq186, yy2arr186 - const yyr186 bool = false - yyq186[1] = x.MinReplicas != nil - yyq186[3] = x.CPUUtilization != nil - var yynn186 int - if yyr186 || yy2arr186 { - r.EncodeArrayStart(4) + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *APIVersion) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) } else { - yynn186 = 2 - for _, b := range yyq186 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn186++ - } - } - r.EncodeMapStart(yynn186) - yynn186 = 0 - } - if yyr186 || yy2arr186 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy188 := &x.ScaleRef - yy188.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scaleRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy189 := &x.ScaleRef - yy189.CodecEncodeSelf(e) - } - if yyr186 || yy2arr186 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq186[1] { - if x.MinReplicas == nil { - r.EncodeNil() - } else { - yy191 := *x.MinReplicas - yym192 := z.EncBinary() - _ = yym192 - if false { - } else { - r.EncodeInt(int64(yy191)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq186[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MinReplicas == nil { - r.EncodeNil() - } else { - yy193 := *x.MinReplicas - yym194 := z.EncBinary() - _ = yym194 - if false { - } else { - r.EncodeInt(int64(yy193)) - } + yynn2++ } } + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr186 || yy2arr186 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym196 := z.EncBinary() - _ = yym196 - if false { - } else { - r.EncodeInt(int64(x.MaxReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym197 := z.EncBinary() - _ = yym197 - if false { - } else { - r.EncodeInt(int64(x.MaxReplicas)) - } - } - if yyr186 || yy2arr186 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq186[3] { - if x.CPUUtilization == nil { - r.EncodeNil() + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - x.CPUUtilization.CodecEncodeSelf(e) + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq186[3] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cpuUtilization")) + r.EncodeString(codecSelferC_UTF81234, string("name")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CPUUtilization == nil { - r.EncodeNil() + yym5 := z.EncBinary() + _ = yym5 + if false { } else { - x.CPUUtilization.CodecEncodeSelf(e) + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } } - if yyr186 || yy2arr186 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -2508,29 +2904,29 @@ func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *APIVersion) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym199 := z.DecBinary() - _ = yym199 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct200 := r.ContainerType() - if yyct200 == codecSelferValueTypeMap1234 { - yyl200 := r.ReadMapStart() - if yyl200 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl200, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct200 == codecSelferValueTypeArray1234 { - yyl200 := r.ReadArrayStart() - if yyl200 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl200, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -2538,16 +2934,16 @@ func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *APIVersion) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys201Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys201Slc - var yyhl201 bool = l >= 0 - for yyj201 := 0; ; yyj201++ { - if yyhl201 { - if yyj201 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -2556,345 +2952,221 @@ func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978 } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys201Slc = r.DecodeBytes(yys201Slc, true, true) - yys201 := string(yys201Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys201 { - case "scaleRef": - if r.TryDecodeAsNil() { - x.ScaleRef = SubresourceReference{} - } else { - yyv202 := &x.ScaleRef - yyv202.CodecDecodeSelf(d) - } - case "minReplicas": + switch yys3 { + case "name": if r.TryDecodeAsNil() { - if x.MinReplicas != nil { - x.MinReplicas = nil - } + x.Name = "" } else { - if x.MinReplicas == nil { - x.MinReplicas = new(int32) - } - yym204 := z.DecBinary() - _ = yym204 + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 if false { } else { - *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) - } - } - case "maxReplicas": - if r.TryDecodeAsNil() { - x.MaxReplicas = 0 - } else { - x.MaxReplicas = int32(r.DecodeInt(32)) - } - case "cpuUtilization": - if r.TryDecodeAsNil() { - if x.CPUUtilization != nil { - x.CPUUtilization = nil - } - } else { - if x.CPUUtilization == nil { - x.CPUUtilization = new(CPUTargetUtilization) + *((*string)(yyv4)) = r.DecodeString() } - x.CPUUtilization.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys201) - } // end switch yys201 - } // end for yyj201 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *APIVersion) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj207 int - var yyb207 bool - var yyhl207 bool = l >= 0 - yyj207++ - if yyhl207 { - yyb207 = yyj207 > l - } else { - yyb207 = r.CheckBreak() - } - if yyb207 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ScaleRef = SubresourceReference{} - } else { - yyv208 := &x.ScaleRef - yyv208.CodecDecodeSelf(d) - } - yyj207++ - if yyhl207 { - yyb207 = yyj207 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb207 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb207 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.MinReplicas != nil { - x.MinReplicas = nil - } + x.Name = "" } else { - if x.MinReplicas == nil { - x.MinReplicas = new(int32) - } - yym210 := z.DecBinary() - _ = yym210 + yyv7 := &x.Name + yym8 := z.DecBinary() + _ = yym8 if false { } else { - *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) - } - } - yyj207++ - if yyhl207 { - yyb207 = yyj207 > l - } else { - yyb207 = r.CheckBreak() - } - if yyb207 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxReplicas = 0 - } else { - x.MaxReplicas = int32(r.DecodeInt(32)) - } - yyj207++ - if yyhl207 { - yyb207 = yyj207 > l - } else { - yyb207 = r.CheckBreak() - } - if yyb207 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CPUUtilization != nil { - x.CPUUtilization = nil + *((*string)(yyv7)) = r.DecodeString() } - } else { - if x.CPUUtilization == nil { - x.CPUUtilization = new(CPUTargetUtilization) - } - x.CPUUtilization.CodecDecodeSelf(d) } for { - yyj207++ - if yyhl207 { - yyb207 = yyj207 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb207 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb207 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj207-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ThirdPartyResourceData) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym213 := z.EncBinary() - _ = yym213 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep214 := !z.EncBinary() - yy2arr214 := z.EncBasicHandle().StructToArray - var yyq214 [5]bool - _, _, _ = yysep214, yyq214, yy2arr214 - const yyr214 bool = false - yyq214[0] = x.ObservedGeneration != nil - yyq214[1] = x.LastScaleTime != nil - yyq214[4] = x.CurrentCPUUtilizationPercentage != nil - var yynn214 int - if yyr214 || yy2arr214 { - r.EncodeArrayStart(5) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = len(x.Data) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) } else { - yynn214 = 2 - for _, b := range yyq214 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn214++ + yynn2++ } } - r.EncodeMapStart(yynn214) - yynn214 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr214 || yy2arr214 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq214[0] { - if x.ObservedGeneration == nil { - r.EncodeNil() + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - yy216 := *x.ObservedGeneration - yym217 := z.EncBinary() - _ = yym217 - if false { - } else { - r.EncodeInt(int64(yy216)) - } + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq214[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ObservedGeneration == nil { - r.EncodeNil() + yym5 := z.EncBinary() + _ = yym5 + if false { } else { - yy218 := *x.ObservedGeneration - yym219 := z.EncBinary() - _ = yym219 - if false { - } else { - r.EncodeInt(int64(yy218)) - } + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr214 || yy2arr214 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq214[1] { - if x.LastScaleTime == nil { - r.EncodeNil() + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { } else { - yym221 := z.EncBinary() - _ = yym221 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { - } else if yym221 { - z.EncBinaryMarshal(x.LastScaleTime) - } else if !yym221 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScaleTime) - } else { - z.EncFallback(x.LastScaleTime) - } + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq214[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastScaleTime")) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LastScaleTime == nil { - r.EncodeNil() + yym8 := z.EncBinary() + _ = yym8 + if false { } else { - yym222 := z.EncBinary() - _ = yym222 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { - } else if yym222 { - z.EncBinaryMarshal(x.LastScaleTime) - } else if !yym222 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScaleTime) - } else { - z.EncFallback(x.LastScaleTime) - } + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr214 || yy2arr214 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym224 := z.EncBinary() - _ = yym224 - if false { - } else { - r.EncodeInt(int64(x.CurrentReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym225 := z.EncBinary() - _ = yym225 - if false { - } else { - r.EncodeInt(int64(x.CurrentReplicas)) - } - } - if yyr214 || yy2arr214 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym227 := z.EncBinary() - _ = yym227 - if false { + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { - r.EncodeInt(int64(x.DesiredReplicas)) + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("desiredReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym228 := z.EncBinary() - _ = yym228 - if false { - } else { - r.EncodeInt(int64(x.DesiredReplicas)) + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr214 || yy2arr214 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq214[4] { - if x.CurrentCPUUtilizationPercentage == nil { + if yyq2[3] { + if x.Data == nil { r.EncodeNil() } else { - yy230 := *x.CurrentCPUUtilizationPercentage - yym231 := z.EncBinary() - _ = yym231 + yym15 := z.EncBinary() + _ = yym15 if false { } else { - r.EncodeInt(int64(yy230)) + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) } } } else { r.EncodeNil() } } else { - if yyq214[4] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentCPUUtilizationPercentage")) + r.EncodeString(codecSelferC_UTF81234, string("data")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CurrentCPUUtilizationPercentage == nil { + if x.Data == nil { r.EncodeNil() } else { - yy232 := *x.CurrentCPUUtilizationPercentage - yym233 := z.EncBinary() - _ = yym233 + yym16 := z.EncBinary() + _ = yym16 if false { } else { - r.EncodeInt(int64(yy232)) + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) } } } } - if yyr214 || yy2arr214 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -2903,29 +3175,29 @@ func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ThirdPartyResourceData) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym234 := z.DecBinary() - _ = yym234 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct235 := r.ContainerType() - if yyct235 == codecSelferValueTypeMap1234 { - yyl235 := r.ReadMapStart() - if yyl235 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl235, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct235 == codecSelferValueTypeArray1234 { - yyl235 := r.ReadArrayStart() - if yyl235 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl235, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -2933,16 +3205,16 @@ func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ThirdPartyResourceData) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys236Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys236Slc - var yyhl236 bool = l >= 0 - for yyj236 := 0; ; yyj236++ { - if yyhl236 { - if yyj236 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -2951,260 +3223,218 @@ func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec19 } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys236Slc = r.DecodeBytes(yys236Slc, true, true) - yys236 := string(yys236Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys236 { - case "observedGeneration": + switch yys3 { + case "kind": if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } + x.Kind = "" } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym238 := z.DecBinary() - _ = yym238 + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 if false { } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + *((*string)(yyv4)) = r.DecodeString() } } - case "lastScaleTime": + case "apiVersion": if r.TryDecodeAsNil() { - if x.LastScaleTime != nil { - x.LastScaleTime = nil - } + x.APIVersion = "" } else { - if x.LastScaleTime == nil { - x.LastScaleTime = new(pkg1_unversioned.Time) - } - yym240 := z.DecBinary() - _ = yym240 + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { - } else if yym240 { - z.DecBinaryUnmarshal(x.LastScaleTime) - } else if !yym240 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScaleTime) } else { - z.DecFallback(x.LastScaleTime, false) + *((*string)(yyv6)) = r.DecodeString() } } - case "currentReplicas": - if r.TryDecodeAsNil() { - x.CurrentReplicas = 0 - } else { - x.CurrentReplicas = int32(r.DecodeInt(32)) - } - case "desiredReplicas": + case "metadata": if r.TryDecodeAsNil() { - x.DesiredReplicas = 0 + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - x.DesiredReplicas = int32(r.DecodeInt(32)) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } - case "currentCPUUtilizationPercentage": + case "data": if r.TryDecodeAsNil() { - if x.CurrentCPUUtilizationPercentage != nil { - x.CurrentCPUUtilizationPercentage = nil - } + x.Data = nil } else { - if x.CurrentCPUUtilizationPercentage == nil { - x.CurrentCPUUtilizationPercentage = new(int32) - } - yym244 := z.DecBinary() - _ = yym244 + yyv10 := &x.Data + yym11 := z.DecBinary() + _ = yym11 if false { } else { - *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) + *yyv10 = r.DecodeBytes(*(*[]byte)(yyv10), false, false) } } default: - z.DecStructFieldNotFound(-1, yys236) - } // end switch yys236 - } // end for yyj236 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ThirdPartyResourceData) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj245 int - var yyb245 bool - var yyhl245 bool = l >= 0 - yyj245++ - if yyhl245 { - yyb245 = yyj245 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb245 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb245 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } + x.Kind = "" } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym247 := z.DecBinary() - _ = yym247 + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 if false { } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + *((*string)(yyv13)) = r.DecodeString() } } - yyj245++ - if yyhl245 { - yyb245 = yyj245 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb245 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb245 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.LastScaleTime != nil { - x.LastScaleTime = nil - } + x.APIVersion = "" } else { - if x.LastScaleTime == nil { - x.LastScaleTime = new(pkg1_unversioned.Time) - } - yym249 := z.DecBinary() - _ = yym249 + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 if false { - } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { - } else if yym249 { - z.DecBinaryUnmarshal(x.LastScaleTime) - } else if !yym249 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScaleTime) } else { - z.DecFallback(x.LastScaleTime, false) + *((*string)(yyv15)) = r.DecodeString() } } - yyj245++ - if yyhl245 { - yyb245 = yyj245 > l - } else { - yyb245 = r.CheckBreak() - } - if yyb245 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentReplicas = 0 - } else { - x.CurrentReplicas = int32(r.DecodeInt(32)) - } - yyj245++ - if yyhl245 { - yyb245 = yyj245 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb245 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb245 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.DesiredReplicas = 0 + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - x.DesiredReplicas = int32(r.DecodeInt(32)) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj245++ - if yyhl245 { - yyb245 = yyj245 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb245 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb245 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.CurrentCPUUtilizationPercentage != nil { - x.CurrentCPUUtilizationPercentage = nil - } + x.Data = nil } else { - if x.CurrentCPUUtilizationPercentage == nil { - x.CurrentCPUUtilizationPercentage = new(int32) - } - yym253 := z.DecBinary() - _ = yym253 + yyv19 := &x.Data + yym20 := z.DecBinary() + _ = yym20 if false { } else { - *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) + *yyv19 = r.DecodeBytes(*(*[]byte)(yyv19), false, false) } } for { - yyj245++ - if yyhl245 { - yyb245 = yyj245 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb245 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb245 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj245-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym254 := z.EncBinary() - _ = yym254 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep255 := !z.EncBinary() - yy2arr255 := z.EncBasicHandle().StructToArray - var yyq255 [5]bool - _, _, _ = yysep255, yyq255, yy2arr255 - const yyr255 bool = false - yyq255[0] = x.Kind != "" - yyq255[1] = x.APIVersion != "" - yyq255[2] = true - yyq255[3] = true - yyq255[4] = true - var yynn255 int - if yyr255 || yy2arr255 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn255 = 0 - for _, b := range yyq255 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn255++ + yynn2++ } } - r.EncodeMapStart(yynn255) - yynn255 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr255 || yy2arr255 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq255[0] { - yym257 := z.EncBinary() - _ = yym257 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -3213,23 +3443,23 @@ func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq255[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym258 := z.EncBinary() - _ = yym258 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr255 || yy2arr255 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq255[1] { - yym260 := z.EncBinary() - _ = yym260 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -3238,70 +3468,82 @@ func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq255[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym261 := z.EncBinary() - _ = yym261 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr255 || yy2arr255 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq255[2] { - yy263 := &x.ObjectMeta - yy263.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq255[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy264 := &x.ObjectMeta - yy264.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr255 || yy2arr255 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq255[3] { - yy266 := &x.Spec - yy266.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq255[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy267 := &x.Spec - yy267.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr255 || yy2arr255 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq255[4] { - yy269 := &x.Status - yy269.CodecEncodeSelf(e) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq255[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy270 := &x.Status - yy270.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } - if yyr255 || yy2arr255 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -3310,29 +3552,29 @@ func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *Deployment) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym271 := z.DecBinary() - _ = yym271 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct272 := r.ContainerType() - if yyct272 == codecSelferValueTypeMap1234 { - yyl272 := r.ReadMapStart() - if yyl272 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl272, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct272 == codecSelferValueTypeArray1234 { - yyl272 := r.ReadArrayStart() - if yyl272 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl272, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -3340,16 +3582,16 @@ func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *Deployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys273Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys273Slc - var yyhl273 bool = l >= 0 - for yyj273 := 0; ; yyj273++ { - if yyhl273 { - if yyj273 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -3358,64 +3600,82 @@ func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Dec } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys273Slc = r.DecodeBytes(yys273Slc, true, true) - yys273 := string(yys273Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys273 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv276 := &x.ObjectMeta - yyv276.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { - x.Spec = HorizontalPodAutoscalerSpec{} + x.Spec = DeploymentSpec{} } else { - yyv277 := &x.Spec - yyv277.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { - x.Status = HorizontalPodAutoscalerStatus{} + x.Status = DeploymentStatus{} } else { - yyv278 := &x.Status - yyv278.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys273) - } // end switch yys273 - } // end for yyj273 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj279 int - var yyb279 bool - var yyhl279 bool = l >= 0 - yyj279++ - if yyhl279 { - yyb279 = yyj279 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb279 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb279 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3423,15 +3683,21 @@ func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj279++ - if yyhl279 { - yyb279 = yyj279 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb279 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb279 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3439,246 +3705,398 @@ func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj279++ - if yyhl279 { - yyb279 = yyj279 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb279 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb279 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv282 := &x.ObjectMeta - yyv282.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj279++ - if yyhl279 { - yyb279 = yyj279 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb279 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb279 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Spec = HorizontalPodAutoscalerSpec{} + x.Spec = DeploymentSpec{} } else { - yyv283 := &x.Spec - yyv283.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj279++ - if yyhl279 { - yyb279 = yyj279 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb279 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb279 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Status = HorizontalPodAutoscalerStatus{} + x.Status = DeploymentStatus{} } else { - yyv284 := &x.Status - yyv284.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj279++ - if yyhl279 { - yyb279 = yyj279 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb279 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb279 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj279-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym285 := z.EncBinary() - _ = yym285 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep286 := !z.EncBinary() - yy2arr286 := z.EncBasicHandle().StructToArray - var yyq286 [4]bool - _, _, _ = yysep286, yyq286, yy2arr286 - const yyr286 bool = false - yyq286[0] = x.Kind != "" - yyq286[1] = x.APIVersion != "" - yyq286[2] = true - var yynn286 int - if yyr286 || yy2arr286 { - r.EncodeArrayStart(4) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [9]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != nil + yyq2[1] = x.Selector != nil + yyq2[3] = true + yyq2[4] = x.MinReadySeconds != 0 + yyq2[5] = x.RevisionHistoryLimit != nil + yyq2[6] = x.Paused != false + yyq2[7] = x.RollbackTo != nil + yyq2[8] = x.ProgressDeadlineSeconds != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(9) } else { - yynn286 = 1 - for _, b := range yyq286 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn286++ + yynn2++ } } - r.EncodeMapStart(yynn286) - yynn286 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr286 || yy2arr286 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq286[0] { - yym288 := z.EncBinary() - _ = yym288 - if false { + if yyq2[0] { + if x.Replicas == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + yy4 := *x.Replicas + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeNil() } } else { - if yyq286[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym289 := z.EncBinary() - _ = yym289 - if false { + if x.Replicas == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + yy6 := *x.Replicas + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } } } } - if yyr286 || yy2arr286 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq286[1] { - yym291 := z.EncBinary() - _ = yym291 - if false { + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeNil() } } else { - if yyq286[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + r.EncodeString(codecSelferC_UTF81234, string("selector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym292 := z.EncBinary() - _ = yym292 - if false { + if x.Selector == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } } } } - if yyr286 || yy2arr286 { + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.Template + yy12.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.Template + yy14.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy17 := &x.Strategy + yy17.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("strategy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy19 := &x.Strategy + yy19.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq286[2] { - yy294 := &x.ListMeta - yym295 := z.EncBinary() - _ = yym295 + if yyq2[4] { + yym22 := z.EncBinary() + _ = yym22 if false { - } else if z.HasExtensions() && z.EncExt(yy294) { } else { - z.EncFallback(yy294) + r.EncodeInt(int64(x.MinReadySeconds)) } } else { - r.EncodeNil() + r.EncodeInt(0) } } else { - if yyq286[2] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) + r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy296 := &x.ListMeta - yym297 := z.EncBinary() - _ = yym297 + yym23 := z.EncBinary() + _ = yym23 if false { - } else if z.HasExtensions() && z.EncExt(yy296) { } else { - z.EncFallback(yy296) + r.EncodeInt(int64(x.MinReadySeconds)) } } } - if yyr286 || yy2arr286 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() + if yyq2[5] { + if x.RevisionHistoryLimit == nil { + r.EncodeNil() + } else { + yy25 := *x.RevisionHistoryLimit + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeInt(int64(yy25)) + } + } } else { - yym299 := z.EncBinary() - _ = yym299 + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("revisionHistoryLimit")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RevisionHistoryLimit == nil { + r.EncodeNil() + } else { + yy27 := *x.RevisionHistoryLimit + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeInt(int64(yy27)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym30 := z.EncBinary() + _ = yym30 if false { } else { - h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) + r.EncodeBool(bool(x.Paused)) } + } else { + r.EncodeBool(false) } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym300 := z.EncBinary() - _ = yym300 + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("paused")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym31 := z.EncBinary() + _ = yym31 if false { } else { - h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) + r.EncodeBool(bool(x.Paused)) } } } - if yyr286 || yy2arr286 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.RollbackTo == nil { + r.EncodeNil() + } else { + x.RollbackTo.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RollbackTo == nil { + r.EncodeNil() + } else { + x.RollbackTo.CodecEncodeSelf(e) + } + } } - } - } -} - -func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym301 := z.DecBinary() - _ = yym301 - if false { - } else if z.HasExtensions() && z.DecExt(x) { + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.ProgressDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy36 := *x.ProgressDeadlineSeconds + yym37 := z.EncBinary() + _ = yym37 + if false { + } else { + r.EncodeInt(int64(yy36)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("progressDeadlineSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ProgressDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy38 := *x.ProgressDeadlineSeconds + yym39 := z.EncBinary() + _ = yym39 + if false { + } else { + r.EncodeInt(int64(yy38)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct302 := r.ContainerType() - if yyct302 == codecSelferValueTypeMap1234 { - yyl302 := r.ReadMapStart() - if yyl302 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl302, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct302 == codecSelferValueTypeArray1234 { - yyl302 := r.ReadArrayStart() - if yyl302 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl302, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -3686,16 +4104,16 @@ func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DeploymentSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys303Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys303Slc - var yyhl303 bool = l >= 0 - for yyj303 := 0; ; yyj303++ { - if yyhl303 { - if yyj303 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -3704,194 +4122,396 @@ func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978 } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys303Slc = r.DecodeBytes(yys303Slc, true, true) - yys303 := string(yys303Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys303 { - case "kind": + switch yys3 { + case "replicas": if r.TryDecodeAsNil() { - x.Kind = "" + if x.Replicas != nil { + x.Replicas = nil + } } else { - x.Kind = string(r.DecodeString()) + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } } - case "apiVersion": + case "selector": if r.TryDecodeAsNil() { - x.APIVersion = "" + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = pkg4_v1.PodTemplateSpec{} } else { - x.APIVersion = string(r.DecodeString()) + yyv8 := &x.Template + yyv8.CodecDecodeSelf(d) } - case "metadata": + case "strategy": + if r.TryDecodeAsNil() { + x.Strategy = DeploymentStrategy{} + } else { + yyv9 := &x.Strategy + yyv9.CodecDecodeSelf(d) + } + case "minReadySeconds": if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.MinReadySeconds = 0 } else { - yyv306 := &x.ListMeta - yym307 := z.DecBinary() - _ = yym307 + yyv10 := &x.MinReadySeconds + yym11 := z.DecBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.DecExt(yyv306) { } else { - z.DecFallback(yyv306, false) + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) } } - case "items": + case "revisionHistoryLimit": if r.TryDecodeAsNil() { - x.Items = nil + if x.RevisionHistoryLimit != nil { + x.RevisionHistoryLimit = nil + } + } else { + if x.RevisionHistoryLimit == nil { + x.RevisionHistoryLimit = new(int32) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + case "paused": + if r.TryDecodeAsNil() { + x.Paused = false + } else { + yyv14 := &x.Paused + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "rollbackTo": + if r.TryDecodeAsNil() { + if x.RollbackTo != nil { + x.RollbackTo = nil + } + } else { + if x.RollbackTo == nil { + x.RollbackTo = new(RollbackConfig) + } + x.RollbackTo.CodecDecodeSelf(d) + } + case "progressDeadlineSeconds": + if r.TryDecodeAsNil() { + if x.ProgressDeadlineSeconds != nil { + x.ProgressDeadlineSeconds = nil + } } else { - yyv308 := &x.Items - yym309 := z.DecBinary() - _ = yym309 + if x.ProgressDeadlineSeconds == nil { + x.ProgressDeadlineSeconds = new(int32) + } + yym18 := z.DecBinary() + _ = yym18 if false { } else { - h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv308), d) + *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) } } default: - z.DecStructFieldNotFound(-1, yys303) - } // end switch yys303 - } // end for yyj303 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DeploymentSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj310 int - var yyb310 bool - var yyhl310 bool = l >= 0 - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + var yyj19 int + var yyb19 bool + var yyhl19 bool = l >= 0 + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb310 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb310 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Kind = "" + if x.Replicas != nil { + x.Replicas = nil + } } else { - x.Kind = string(r.DecodeString()) + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } } - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb310 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb310 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIVersion = "" + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = pkg4_v1.PodTemplateSpec{} + } else { + yyv24 := &x.Template + yyv24.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Strategy = DeploymentStrategy{} } else { - x.APIVersion = string(r.DecodeString()) + yyv25 := &x.Strategy + yyv25.CodecDecodeSelf(d) } - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb310 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb310 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.MinReadySeconds = 0 } else { - yyv313 := &x.ListMeta - yym314 := z.DecBinary() - _ = yym314 + yyv26 := &x.MinReadySeconds + yym27 := z.DecBinary() + _ = yym27 if false { - } else if z.HasExtensions() && z.DecExt(yyv313) { } else { - z.DecFallback(yyv313, false) + *((*int32)(yyv26)) = int32(r.DecodeInt(32)) } } - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb310 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb310 { + if yyb19 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Items = nil + if x.RevisionHistoryLimit != nil { + x.RevisionHistoryLimit = nil + } + } else { + if x.RevisionHistoryLimit == nil { + x.RevisionHistoryLimit = new(int32) + } + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Paused = false + } else { + yyv30 := &x.Paused + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*bool)(yyv30)) = r.DecodeBool() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyv315 := &x.Items - yym316 := z.DecBinary() - _ = yym316 + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RollbackTo != nil { + x.RollbackTo = nil + } + } else { + if x.RollbackTo == nil { + x.RollbackTo = new(RollbackConfig) + } + x.RollbackTo.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ProgressDeadlineSeconds != nil { + x.ProgressDeadlineSeconds = nil + } + } else { + if x.ProgressDeadlineSeconds == nil { + x.ProgressDeadlineSeconds = new(int32) + } + yym34 := z.DecBinary() + _ = yym34 if false { } else { - h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv315), d) + *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) } } for { - yyj310++ - if yyhl310 { - yyb310 = yyj310 > l + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l } else { - yyb310 = r.CheckBreak() + yyb19 = r.CheckBreak() } - if yyb310 { + if yyb19 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj310-1, "") + z.DecStructFieldNotFound(yyj19-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ThirdPartyResource) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DeploymentRollback) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym317 := z.EncBinary() - _ = yym317 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep318 := !z.EncBinary() - yy2arr318 := z.EncBasicHandle().StructToArray - var yyq318 [5]bool - _, _, _ = yysep318, yyq318, yy2arr318 - const yyr318 bool = false - yyq318[0] = x.Kind != "" - yyq318[1] = x.APIVersion != "" - yyq318[2] = true - yyq318[3] = x.Description != "" - yyq318[4] = len(x.Versions) != 0 - var yynn318 int - if yyr318 || yy2arr318 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[3] = len(x.UpdatedAnnotations) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn318 = 0 - for _, b := range yyq318 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn318++ + yynn2++ } } - r.EncodeMapStart(yynn318) - yynn318 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr318 || yy2arr318 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq318[0] { - yym320 := z.EncBinary() - _ = yym320 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -3900,23 +4520,23 @@ func (x *ThirdPartyResource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq318[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym321 := z.EncBinary() - _ = yym321 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr318 || yy2arr318 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq318[1] { - yym323 := z.EncBinary() - _ = yym323 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -3925,94 +4545,82 @@ func (x *ThirdPartyResource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq318[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym324 := z.EncBinary() - _ = yym324 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr318 || yy2arr318 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq318[2] { - yy326 := &x.ObjectMeta - yy326.CodecEncodeSelf(e) + yym10 := z.EncBinary() + _ = yym10 + if false { } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } else { - if yyq318[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy327 := &x.ObjectMeta - yy327.CodecEncodeSelf(e) - } - } - if yyr318 || yy2arr318 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq318[3] { - yym329 := z.EncBinary() - _ = yym329 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Description)) - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq318[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("description")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym330 := z.EncBinary() - _ = yym330 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Description)) - } + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } - if yyr318 || yy2arr318 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq318[4] { - if x.Versions == nil { + if yyq2[3] { + if x.UpdatedAnnotations == nil { r.EncodeNil() } else { - yym332 := z.EncBinary() - _ = yym332 + yym13 := z.EncBinary() + _ = yym13 if false { } else { - h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) + z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) } } } else { r.EncodeNil() } } else { - if yyq318[4] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("versions")) + r.EncodeString(codecSelferC_UTF81234, string("updatedAnnotations")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Versions == nil { + if x.UpdatedAnnotations == nil { r.EncodeNil() } else { - yym333 := z.EncBinary() - _ = yym333 + yym14 := z.EncBinary() + _ = yym14 if false { } else { - h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) + z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) } } } } - if yyr318 || yy2arr318 { + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy16 := &x.RollbackTo + yy16.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy18 := &x.RollbackTo + yy18.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -4021,29 +4629,29 @@ func (x *ThirdPartyResource) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ThirdPartyResource) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DeploymentRollback) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym334 := z.DecBinary() - _ = yym334 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct335 := r.ContainerType() - if yyct335 == codecSelferValueTypeMap1234 { - yyl335 := r.ReadMapStart() - if yyl335 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl335, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct335 == codecSelferValueTypeArray1234 { - yyl335 := r.ReadArrayStart() - if yyl335 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl335, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -4051,16 +4659,16 @@ func (x *ThirdPartyResource) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ThirdPartyResource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DeploymentRollback) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys336Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys336Slc - var yyhl336 bool = l >= 0 - for yyj336 := 0; ; yyj336++ { - if yyhl336 { - if yyj336 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -4069,68 +4677,86 @@ func (x *ThirdPartyResource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys336Slc = r.DecodeBytes(yys336Slc, true, true) - yys336 := string(yys336Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys336 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv339 := &x.ObjectMeta - yyv339.CodecDecodeSelf(d) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } - case "description": + case "name": if r.TryDecodeAsNil() { - x.Description = "" + x.Name = "" } else { - x.Description = string(r.DecodeString()) + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } - case "versions": + case "updatedAnnotations": if r.TryDecodeAsNil() { - x.Versions = nil + x.UpdatedAnnotations = nil } else { - yyv341 := &x.Versions - yym342 := z.DecBinary() - _ = yym342 + yyv10 := &x.UpdatedAnnotations + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceAPIVersion((*[]APIVersion)(yyv341), d) + z.F.DecMapStringStringX(yyv10, false, d) } } + case "rollbackTo": + if r.TryDecodeAsNil() { + x.RollbackTo = RollbackConfig{} + } else { + yyv12 := &x.RollbackTo + yyv12.CodecDecodeSelf(d) + } default: - z.DecStructFieldNotFound(-1, yys336) - } // end switch yys336 - } // end for yyj336 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ThirdPartyResource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DeploymentRollback) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj343 int - var yyb343 bool - var yyhl343 bool = l >= 0 - yyj343++ - if yyhl343 { - yyb343 = yyj343 > l + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb343 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb343 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4138,15 +4764,21 @@ func (x *ThirdPartyResource) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv14 := &x.Kind + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } - yyj343++ - if yyhl343 { - yyb343 = yyj343 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb343 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb343 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4154,219 +4786,148 @@ func (x *ThirdPartyResource) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv16 := &x.APIVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } } - yyj343++ - if yyhl343 { - yyb343 = yyj343 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb343 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb343 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.Name = "" } else { - yyv346 := &x.ObjectMeta - yyv346.CodecDecodeSelf(d) + yyv18 := &x.Name + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } } - yyj343++ - if yyhl343 { - yyb343 = yyj343 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb343 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb343 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Description = "" + x.UpdatedAnnotations = nil } else { - x.Description = string(r.DecodeString()) + yyv20 := &x.UpdatedAnnotations + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + z.F.DecMapStringStringX(yyv20, false, d) + } } - yyj343++ - if yyhl343 { - yyb343 = yyj343 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb343 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb343 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Versions = nil + x.RollbackTo = RollbackConfig{} } else { - yyv348 := &x.Versions - yym349 := z.DecBinary() - _ = yym349 - if false { - } else { - h.decSliceAPIVersion((*[]APIVersion)(yyv348), d) - } + yyv22 := &x.RollbackTo + yyv22.CodecDecodeSelf(d) } for { - yyj343++ - if yyhl343 { - yyb343 = yyj343 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb343 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb343 { + if yyb13 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj343-1, "") + z.DecStructFieldNotFound(yyj13-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ThirdPartyResourceList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *RollbackConfig) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym350 := z.EncBinary() - _ = yym350 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep351 := !z.EncBinary() - yy2arr351 := z.EncBasicHandle().StructToArray - var yyq351 [4]bool - _, _, _ = yysep351, yyq351, yy2arr351 - const yyr351 bool = false - yyq351[0] = x.Kind != "" - yyq351[1] = x.APIVersion != "" - yyq351[2] = true - var yynn351 int - if yyr351 || yy2arr351 { - r.EncodeArrayStart(4) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Revision != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) } else { - yynn351 = 1 - for _, b := range yyq351 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn351++ - } - } - r.EncodeMapStart(yynn351) - yynn351 = 0 - } - if yyr351 || yy2arr351 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq351[0] { - yym353 := z.EncBinary() - _ = yym353 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq351[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym354 := z.EncBinary() - _ = yym354 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr351 || yy2arr351 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq351[1] { - yym356 := z.EncBinary() - _ = yym356 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq351[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym357 := z.EncBinary() - _ = yym357 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + yynn2++ } } + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr351 || yy2arr351 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq351[2] { - yy359 := &x.ListMeta - yym360 := z.EncBinary() - _ = yym360 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { - } else if z.HasExtensions() && z.EncExt(yy359) { } else { - z.EncFallback(yy359) + r.EncodeInt(int64(x.Revision)) } } else { - r.EncodeNil() + r.EncodeInt(0) } } else { - if yyq351[2] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) + r.EncodeString(codecSelferC_UTF81234, string("revision")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy361 := &x.ListMeta - yym362 := z.EncBinary() - _ = yym362 - if false { - } else if z.HasExtensions() && z.EncExt(yy361) { - } else { - z.EncFallback(yy361) - } - } - } - if yyr351 || yy2arr351 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym364 := z.EncBinary() - _ = yym364 - if false { - } else { - h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym365 := z.EncBinary() - _ = yym365 + yym5 := z.EncBinary() + _ = yym5 if false { } else { - h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) + r.EncodeInt(int64(x.Revision)) } } } - if yyr351 || yy2arr351 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -4375,29 +4936,29 @@ func (x *ThirdPartyResourceList) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ThirdPartyResourceList) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *RollbackConfig) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym366 := z.DecBinary() - _ = yym366 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct367 := r.ContainerType() - if yyct367 == codecSelferValueTypeMap1234 { - yyl367 := r.ReadMapStart() - if yyl367 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl367, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct367 == codecSelferValueTypeArray1234 { - yyl367 := r.ReadArrayStart() - if yyl367 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl367, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -4405,16 +4966,16 @@ func (x *ThirdPartyResourceList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ThirdPartyResourceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *RollbackConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys368Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys368Slc - var yyhl368 bool = l >= 0 - for yyj368 := 0; ; yyj368++ { - if yyhl368 { - if yyj368 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -4423,211 +4984,145 @@ func (x *ThirdPartyResourceList) codecDecodeSelfFromMap(l int, d *codec1978.Deco } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys368Slc = r.DecodeBytes(yys368Slc, true, true) - yys368 := string(yys368Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys368 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv371 := &x.ListMeta - yym372 := z.DecBinary() - _ = yym372 - if false { - } else if z.HasExtensions() && z.DecExt(yyv371) { - } else { - z.DecFallback(yyv371, false) - } - } - case "items": + switch yys3 { + case "revision": if r.TryDecodeAsNil() { - x.Items = nil + x.Revision = 0 } else { - yyv373 := &x.Items - yym374 := z.DecBinary() - _ = yym374 + yyv4 := &x.Revision + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv373), d) + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) } } default: - z.DecStructFieldNotFound(-1, yys368) - } // end switch yys368 - } // end for yyj368 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ThirdPartyResourceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *RollbackConfig) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj375 int - var yyb375 bool - var yyhl375 bool = l >= 0 - yyj375++ - if yyhl375 { - yyb375 = yyj375 > l - } else { - yyb375 = r.CheckBreak() - } - if yyb375 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj375++ - if yyhl375 { - yyb375 = yyj375 > l - } else { - yyb375 = r.CheckBreak() - } - if yyb375 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj375++ - if yyhl375 { - yyb375 = yyj375 > l - } else { - yyb375 = r.CheckBreak() - } - if yyb375 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyv378 := &x.ListMeta - yym379 := z.DecBinary() - _ = yym379 - if false { - } else if z.HasExtensions() && z.DecExt(yyv378) { - } else { - z.DecFallback(yyv378, false) - } - } - yyj375++ - if yyhl375 { - yyb375 = yyj375 > l - } else { - yyb375 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb375 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Items = nil + x.Revision = 0 } else { - yyv380 := &x.Items - yym381 := z.DecBinary() - _ = yym381 + yyv7 := &x.Revision + yym8 := z.DecBinary() + _ = yym8 if false { } else { - h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv380), d) + *((*int64)(yyv7)) = int64(r.DecodeInt(64)) } } for { - yyj375++ - if yyhl375 { - yyb375 = yyj375 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb375 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb375 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj375-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *APIVersion) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DeploymentStrategy) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym382 := z.EncBinary() - _ = yym382 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep383 := !z.EncBinary() - yy2arr383 := z.EncBasicHandle().StructToArray - var yyq383 [1]bool - _, _, _ = yysep383, yyq383, yy2arr383 - const yyr383 bool = false - yyq383[0] = x.Name != "" - var yynn383 int - if yyr383 || yy2arr383 { - r.EncodeArrayStart(1) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Type != "" + yyq2[1] = x.RollingUpdate != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) } else { - yynn383 = 0 - for _, b := range yyq383 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn383++ + yynn2++ } } - r.EncodeMapStart(yynn383) - yynn383 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr383 || yy2arr383 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq383[0] { - yym385 := z.EncBinary() - _ = yym385 - if false { + if yyq2[0] { + x.Type.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.RollingUpdate == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + x.RollingUpdate.CodecEncodeSelf(e) } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeNil() } } else { - if yyq383[0] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) + r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym386 := z.EncBinary() - _ = yym386 - if false { + if x.RollingUpdate == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + x.RollingUpdate.CodecEncodeSelf(e) } } } - if yyr383 || yy2arr383 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -4636,29 +5131,29 @@ func (x *APIVersion) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *APIVersion) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DeploymentStrategy) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym387 := z.DecBinary() - _ = yym387 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct388 := r.ContainerType() - if yyct388 == codecSelferValueTypeMap1234 { - yyl388 := r.ReadMapStart() - if yyl388 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl388, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct388 == codecSelferValueTypeArray1234 { - yyl388 := r.ReadArrayStart() - if yyl388 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl388, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -4666,16 +5161,16 @@ func (x *APIVersion) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *APIVersion) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DeploymentStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys389Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys389Slc - var yyhl389 bool = l >= 0 - for yyj389 := 0; ; yyj389++ { - if yyhl389 { - if yyj389 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -4684,197 +5179,233 @@ func (x *APIVersion) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys389Slc = r.DecodeBytes(yys389Slc, true, true) - yys389 := string(yys389Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys389 { - case "name": + switch yys3 { + case "type": if r.TryDecodeAsNil() { - x.Name = "" + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "rollingUpdate": + if r.TryDecodeAsNil() { + if x.RollingUpdate != nil { + x.RollingUpdate = nil + } } else { - x.Name = string(r.DecodeString()) + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDeployment) + } + x.RollingUpdate.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys389) - } // end switch yys389 - } // end for yyj389 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *APIVersion) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DeploymentStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj391 int - var yyb391 bool - var yyhl391 bool = l >= 0 - yyj391++ - if yyhl391 { - yyb391 = yyj391 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb391 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb391 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Name = "" + x.Type = "" + } else { + yyv7 := &x.Type + yyv7.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RollingUpdate != nil { + x.RollingUpdate = nil + } } else { - x.Name = string(r.DecodeString()) + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDeployment) + } + x.RollingUpdate.CodecDecodeSelf(d) } for { - yyj391++ - if yyhl391 { - yyb391 = yyj391 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb391 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb391 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj391-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ThirdPartyResourceData) CodecEncodeSelf(e *codec1978.Encoder) { +func (x DeploymentStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DeploymentStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym393 := z.EncBinary() - _ = yym393 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep394 := !z.EncBinary() - yy2arr394 := z.EncBasicHandle().StructToArray - var yyq394 [4]bool - _, _, _ = yysep394, yyq394, yy2arr394 - const yyr394 bool = false - yyq394[0] = x.Kind != "" - yyq394[1] = x.APIVersion != "" - yyq394[2] = true - yyq394[3] = len(x.Data) != 0 - var yynn394 int - if yyr394 || yy2arr394 { - r.EncodeArrayStart(4) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.MaxUnavailable != nil + yyq2[1] = x.MaxSurge != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) } else { - yynn394 = 0 - for _, b := range yyq394 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn394++ - } - } - r.EncodeMapStart(yynn394) - yynn394 = 0 - } - if yyr394 || yy2arr394 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq394[0] { - yym396 := z.EncBinary() - _ = yym396 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq394[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym397 := z.EncBinary() - _ = yym397 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + yynn2++ } } + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr394 || yy2arr394 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq394[1] { - yym399 := z.EncBinary() - _ = yym399 - if false { + if yyq2[0] { + if x.MaxUnavailable == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { + } else if !yym4 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxUnavailable) + } else { + z.EncFallback(x.MaxUnavailable) + } } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeNil() } } else { - if yyq394[1] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym400 := z.EncBinary() - _ = yym400 - if false { + if x.MaxUnavailable == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxUnavailable) + } else { + z.EncFallback(x.MaxUnavailable) + } } } } - if yyr394 || yy2arr394 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq394[2] { - yy402 := &x.ObjectMeta - yy402.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq394[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy403 := &x.ObjectMeta - yy403.CodecEncodeSelf(e) - } - } - if yyr394 || yy2arr394 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq394[3] { - if x.Data == nil { + if yyq2[1] { + if x.MaxSurge == nil { r.EncodeNil() } else { - yym405 := z.EncBinary() - _ = yym405 + yym7 := z.EncBinary() + _ = yym7 if false { + } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxSurge) } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) + z.EncFallback(x.MaxSurge) } } } else { r.EncodeNil() } } else { - if yyq394[3] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("data")) + r.EncodeString(codecSelferC_UTF81234, string("maxSurge")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Data == nil { + if x.MaxSurge == nil { r.EncodeNil() } else { - yym406 := z.EncBinary() - _ = yym406 + yym8 := z.EncBinary() + _ = yym8 if false { + } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxSurge) } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) + z.EncFallback(x.MaxSurge) } } } } - if yyr394 || yy2arr394 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -4883,29 +5414,29 @@ func (x *ThirdPartyResourceData) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ThirdPartyResourceData) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *RollingUpdateDeployment) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym407 := z.DecBinary() - _ = yym407 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct408 := r.ContainerType() - if yyct408 == codecSelferValueTypeMap1234 { - yyl408 := r.ReadMapStart() - if yyl408 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl408, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct408 == codecSelferValueTypeArray1234 { - yyl408 := r.ReadArrayStart() - if yyl408 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl408, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -4913,16 +5444,16 @@ func (x *ThirdPartyResourceData) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ThirdPartyResourceData) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys409Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys409Slc - var yyhl409 bool = l >= 0 - for yyj409 := 0; ; yyj409++ { - if yyhl409 { - if yyj409 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -4931,279 +5462,357 @@ func (x *ThirdPartyResourceData) codecDecodeSelfFromMap(l int, d *codec1978.Deco } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys409Slc = r.DecodeBytes(yys409Slc, true, true) - yys409 := string(yys409Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys409 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": + switch yys3 { + case "maxUnavailable": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + if x.MaxUnavailable != nil { + x.MaxUnavailable = nil + } } else { - yyv412 := &x.ObjectMeta - yyv412.CodecDecodeSelf(d) + if x.MaxUnavailable == nil { + x.MaxUnavailable = new(pkg5_intstr.IntOrString) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxUnavailable) + } else { + z.DecFallback(x.MaxUnavailable, false) + } } - case "data": + case "maxSurge": if r.TryDecodeAsNil() { - x.Data = nil + if x.MaxSurge != nil { + x.MaxSurge = nil + } } else { - yyv413 := &x.Data - yym414 := z.DecBinary() - _ = yym414 + if x.MaxSurge == nil { + x.MaxSurge = new(pkg5_intstr.IntOrString) + } + yym7 := z.DecBinary() + _ = yym7 if false { + } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxSurge) } else { - *yyv413 = r.DecodeBytes(*(*[]byte)(yyv413), false, false) + z.DecFallback(x.MaxSurge, false) } } default: - z.DecStructFieldNotFound(-1, yys409) - } // end switch yys409 - } // end for yyj409 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ThirdPartyResourceData) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *RollingUpdateDeployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj415 int - var yyb415 bool - var yyhl415 bool = l >= 0 - yyj415++ - if yyhl415 { - yyb415 = yyj415 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb415 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb415 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Kind = "" + if x.MaxUnavailable != nil { + x.MaxUnavailable = nil + } } else { - x.Kind = string(r.DecodeString()) + if x.MaxUnavailable == nil { + x.MaxUnavailable = new(pkg5_intstr.IntOrString) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxUnavailable) + } else { + z.DecFallback(x.MaxUnavailable, false) + } } - yyj415++ - if yyhl415 { - yyb415 = yyj415 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb415 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb415 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj415++ - if yyhl415 { - yyb415 = yyj415 > l + if x.MaxSurge != nil { + x.MaxSurge = nil + } } else { - yyb415 = r.CheckBreak() - } - if yyb415 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv418 := &x.ObjectMeta - yyv418.CodecDecodeSelf(d) - } - yyj415++ - if yyhl415 { - yyb415 = yyj415 > l - } else { - yyb415 = r.CheckBreak() - } - if yyb415 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv419 := &x.Data - yym420 := z.DecBinary() - _ = yym420 - if false { - } else { - *yyv419 = r.DecodeBytes(*(*[]byte)(yyv419), false, false) - } + if x.MaxSurge == nil { + x.MaxSurge = new(pkg5_intstr.IntOrString) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxSurge) + } else { + z.DecFallback(x.MaxSurge, false) + } } for { - yyj415++ - if yyhl415 { - yyb415 = yyj415 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb415 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb415 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj415-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DeploymentStatus) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym421 := z.EncBinary() - _ = yym421 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep422 := !z.EncBinary() - yy2arr422 := z.EncBasicHandle().StructToArray - var yyq422 [5]bool - _, _, _ = yysep422, yyq422, yy2arr422 - const yyr422 bool = false - yyq422[0] = x.Kind != "" - yyq422[1] = x.APIVersion != "" - yyq422[2] = true - yyq422[3] = true - yyq422[4] = true - var yynn422 int - if yyr422 || yy2arr422 { - r.EncodeArrayStart(5) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != 0 + yyq2[1] = x.Replicas != 0 + yyq2[2] = x.UpdatedReplicas != 0 + yyq2[3] = x.ReadyReplicas != 0 + yyq2[4] = x.AvailableReplicas != 0 + yyq2[5] = x.UnavailableReplicas != 0 + yyq2[6] = len(x.Conditions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) } else { - yynn422 = 0 - for _, b := range yyq422 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn422++ + yynn2++ } } - r.EncodeMapStart(yynn422) - yynn422 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr422 || yy2arr422 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq422[0] { - yym424 := z.EncBinary() - _ = yym424 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + r.EncodeInt(int64(x.ObservedGeneration)) } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeInt(0) } } else { - if yyq422[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym425 := z.EncBinary() - _ = yym425 + yym5 := z.EncBinary() + _ = yym5 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + r.EncodeInt(int64(x.ObservedGeneration)) } } } - if yyr422 || yy2arr422 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq422[1] { - yym427 := z.EncBinary() - _ = yym427 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + r.EncodeInt(int64(x.Replicas)) } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeInt(0) } } else { - if yyq422[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym428 := z.EncBinary() - _ = yym428 + yym8 := z.EncBinary() + _ = yym8 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + r.EncodeInt(int64(x.Replicas)) } } } - if yyr422 || yy2arr422 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq422[2] { - yy430 := &x.ObjectMeta - yy430.CodecEncodeSelf(e) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.UpdatedReplicas)) + } } else { - r.EncodeNil() + r.EncodeInt(0) } } else { - if yyq422[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) + r.EncodeString(codecSelferC_UTF81234, string("updatedReplicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy431 := &x.ObjectMeta - yy431.CodecEncodeSelf(e) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.UpdatedReplicas)) + } } } - if yyr422 || yy2arr422 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq422[3] { - yy433 := &x.Spec - yy433.CodecEncodeSelf(e) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } } else { - r.EncodeNil() + r.EncodeInt(0) } } else { - if yyq422[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) + r.EncodeString(codecSelferC_UTF81234, string("readyReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.UnavailableReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("unavailableReplicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy434 := &x.Spec - yy434.CodecEncodeSelf(e) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.UnavailableReplicas)) + } } } - if yyr422 || yy2arr422 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq422[4] { - yy436 := &x.Status - yy436.CodecEncodeSelf(e) + if yyq2[6] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) + } + } } else { r.EncodeNil() } } else { - if yyq422[4] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy437 := &x.Status - yy437.CodecEncodeSelf(e) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) + } + } } } - if yyr422 || yy2arr422 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -5212,29 +5821,29 @@ func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *Deployment) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DeploymentStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym438 := z.DecBinary() - _ = yym438 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct439 := r.ContainerType() - if yyct439 == codecSelferValueTypeMap1234 { - yyl439 := r.ReadMapStart() - if yyl439 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl439, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct439 == codecSelferValueTypeArray1234 { - yyl439 := r.ReadArrayStart() - if yyl439 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl439, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -5242,16 +5851,16 @@ func (x *Deployment) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *Deployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DeploymentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys440Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys440Slc - var yyhl440 bool = l >= 0 - for yyj440 := 0; ; yyj440++ { - if yyhl440 { - if yyj440 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -5260,3485 +5869,483 @@ func (x *Deployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys440Slc = r.DecodeBytes(yys440Slc, true, true) - yys440 := string(yys440Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys440 { - case "kind": + switch yys3 { + case "observedGeneration": if r.TryDecodeAsNil() { - x.Kind = "" + x.ObservedGeneration = 0 } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.ObservedGeneration + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } } - case "apiVersion": + case "replicas": if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Replicas = 0 } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.Replicas + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } } - case "metadata": + case "updatedReplicas": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.UpdatedReplicas = 0 } else { - yyv443 := &x.ObjectMeta - yyv443.CodecDecodeSelf(d) + yyv8 := &x.UpdatedReplicas + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } } - case "spec": + case "readyReplicas": if r.TryDecodeAsNil() { - x.Spec = DeploymentSpec{} + x.ReadyReplicas = 0 } else { - yyv444 := &x.Spec - yyv444.CodecDecodeSelf(d) + yyv10 := &x.ReadyReplicas + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } } - case "status": + case "availableReplicas": if r.TryDecodeAsNil() { - x.Status = DeploymentStatus{} + x.AvailableReplicas = 0 + } else { + yyv12 := &x.AvailableReplicas + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(yyv12)) = int32(r.DecodeInt(32)) + } + } + case "unavailableReplicas": + if r.TryDecodeAsNil() { + x.UnavailableReplicas = 0 + } else { + yyv14 := &x.UnavailableReplicas + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil } else { - yyv445 := &x.Status - yyv445.CodecDecodeSelf(d) + yyv16 := &x.Conditions + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv16), d) + } } default: - z.DecStructFieldNotFound(-1, yys440) - } // end switch yys440 - } // end for yyj440 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DeploymentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj446 int - var yyb446 bool - var yyhl446 bool = l >= 0 - yyj446++ - if yyhl446 { - yyb446 = yyj446 > l + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb446 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb446 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Kind = "" + x.ObservedGeneration = 0 } else { - x.Kind = string(r.DecodeString()) + yyv19 := &x.ObservedGeneration + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(yyv19)) = int64(r.DecodeInt(64)) + } } - yyj446++ - if yyhl446 { - yyb446 = yyj446 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb446 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb446 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Replicas = 0 } else { - x.APIVersion = string(r.DecodeString()) + yyv21 := &x.Replicas + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } } - yyj446++ - if yyhl446 { - yyb446 = yyj446 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb446 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb446 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.UpdatedReplicas = 0 } else { - yyv449 := &x.ObjectMeta - yyv449.CodecDecodeSelf(d) + yyv23 := &x.UpdatedReplicas + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } } - yyj446++ - if yyhl446 { - yyb446 = yyj446 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb446 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb446 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Spec = DeploymentSpec{} + x.ReadyReplicas = 0 } else { - yyv450 := &x.Spec - yyv450.CodecDecodeSelf(d) + yyv25 := &x.ReadyReplicas + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } } - yyj446++ - if yyhl446 { - yyb446 = yyj446 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb446 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb446 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Status = DeploymentStatus{} + x.AvailableReplicas = 0 + } else { + yyv27 := &x.AvailableReplicas + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(yyv27)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyv451 := &x.Status - yyv451.CodecDecodeSelf(d) + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UnavailableReplicas = 0 + } else { + yyv29 := &x.UnavailableReplicas + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*int32)(yyv29)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv31 := &x.Conditions + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv31), d) + } } for { - yyj446++ - if yyhl446 { - yyb446 = yyj446 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb446 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb446 { + if yyb18 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj446-1, "") + z.DecStructFieldNotFound(yyj18-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) { +func (x DeploymentConditionType) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - if x == nil { - r.EncodeNil() + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { } else { - yym452 := z.EncBinary() - _ = yym452 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep453 := !z.EncBinary() - yy2arr453 := z.EncBasicHandle().StructToArray - var yyq453 [9]bool - _, _, _ = yysep453, yyq453, yy2arr453 - const yyr453 bool = false - yyq453[0] = x.Replicas != nil - yyq453[1] = x.Selector != nil - yyq453[3] = true - yyq453[4] = x.MinReadySeconds != 0 - yyq453[5] = x.RevisionHistoryLimit != nil - yyq453[6] = x.Paused != false - yyq453[7] = x.RollbackTo != nil - yyq453[8] = x.ProgressDeadlineSeconds != nil - var yynn453 int - if yyr453 || yy2arr453 { - r.EncodeArrayStart(9) - } else { - yynn453 = 1 - for _, b := range yyq453 { - if b { - yynn453++ - } - } - r.EncodeMapStart(yynn453) - yynn453 = 0 - } - if yyr453 || yy2arr453 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq453[0] { - if x.Replicas == nil { - r.EncodeNil() - } else { - yy455 := *x.Replicas - yym456 := z.EncBinary() - _ = yym456 - if false { - } else { - r.EncodeInt(int64(yy455)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq453[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Replicas == nil { - r.EncodeNil() - } else { - yy457 := *x.Replicas - yym458 := z.EncBinary() - _ = yym458 - if false { - } else { - r.EncodeInt(int64(yy457)) - } - } - } - } - if yyr453 || yy2arr453 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq453[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym460 := z.EncBinary() - _ = yym460 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq453[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym461 := z.EncBinary() - _ = yym461 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr453 || yy2arr453 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy463 := &x.Template - yy463.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy464 := &x.Template - yy464.CodecEncodeSelf(e) - } - if yyr453 || yy2arr453 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq453[3] { - yy466 := &x.Strategy - yy466.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq453[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("strategy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy467 := &x.Strategy - yy467.CodecEncodeSelf(e) - } - } - if yyr453 || yy2arr453 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq453[4] { - yym469 := z.EncBinary() - _ = yym469 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq453[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym470 := z.EncBinary() - _ = yym470 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } - } - if yyr453 || yy2arr453 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq453[5] { - if x.RevisionHistoryLimit == nil { - r.EncodeNil() - } else { - yy472 := *x.RevisionHistoryLimit - yym473 := z.EncBinary() - _ = yym473 - if false { - } else { - r.EncodeInt(int64(yy472)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq453[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("revisionHistoryLimit")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RevisionHistoryLimit == nil { - r.EncodeNil() - } else { - yy474 := *x.RevisionHistoryLimit - yym475 := z.EncBinary() - _ = yym475 - if false { - } else { - r.EncodeInt(int64(yy474)) - } - } - } - } - if yyr453 || yy2arr453 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq453[6] { - yym477 := z.EncBinary() - _ = yym477 - if false { - } else { - r.EncodeBool(bool(x.Paused)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq453[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("paused")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym478 := z.EncBinary() - _ = yym478 - if false { - } else { - r.EncodeBool(bool(x.Paused)) - } - } - } - if yyr453 || yy2arr453 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq453[7] { - if x.RollbackTo == nil { - r.EncodeNil() - } else { - x.RollbackTo.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq453[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RollbackTo == nil { - r.EncodeNil() - } else { - x.RollbackTo.CodecEncodeSelf(e) - } - } - } - if yyr453 || yy2arr453 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq453[8] { - if x.ProgressDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy481 := *x.ProgressDeadlineSeconds - yym482 := z.EncBinary() - _ = yym482 - if false { - } else { - r.EncodeInt(int64(yy481)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq453[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("progressDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ProgressDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy483 := *x.ProgressDeadlineSeconds - yym484 := z.EncBinary() - _ = yym484 - if false { - } else { - r.EncodeInt(int64(yy483)) - } - } - } - } - if yyr453 || yy2arr453 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym485 := z.DecBinary() - _ = yym485 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct486 := r.ContainerType() - if yyct486 == codecSelferValueTypeMap1234 { - yyl486 := r.ReadMapStart() - if yyl486 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl486, d) - } - } else if yyct486 == codecSelferValueTypeArray1234 { - yyl486 := r.ReadArrayStart() - if yyl486 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl486, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys487Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys487Slc - var yyhl487 bool = l >= 0 - for yyj487 := 0; ; yyj487++ { - if yyhl487 { - if yyj487 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys487Slc = r.DecodeBytes(yys487Slc, true, true) - yys487 := string(yys487Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys487 { - case "replicas": - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym489 := z.DecBinary() - _ = yym489 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym491 := z.DecBinary() - _ = yym491 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} - } else { - yyv492 := &x.Template - yyv492.CodecDecodeSelf(d) - } - case "strategy": - if r.TryDecodeAsNil() { - x.Strategy = DeploymentStrategy{} - } else { - yyv493 := &x.Strategy - yyv493.CodecDecodeSelf(d) - } - case "minReadySeconds": - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - x.MinReadySeconds = int32(r.DecodeInt(32)) - } - case "revisionHistoryLimit": - if r.TryDecodeAsNil() { - if x.RevisionHistoryLimit != nil { - x.RevisionHistoryLimit = nil - } - } else { - if x.RevisionHistoryLimit == nil { - x.RevisionHistoryLimit = new(int32) - } - yym496 := z.DecBinary() - _ = yym496 - if false { - } else { - *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) - } - } - case "paused": - if r.TryDecodeAsNil() { - x.Paused = false - } else { - x.Paused = bool(r.DecodeBool()) - } - case "rollbackTo": - if r.TryDecodeAsNil() { - if x.RollbackTo != nil { - x.RollbackTo = nil - } - } else { - if x.RollbackTo == nil { - x.RollbackTo = new(RollbackConfig) - } - x.RollbackTo.CodecDecodeSelf(d) - } - case "progressDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.ProgressDeadlineSeconds != nil { - x.ProgressDeadlineSeconds = nil - } - } else { - if x.ProgressDeadlineSeconds == nil { - x.ProgressDeadlineSeconds = new(int32) - } - yym500 := z.DecBinary() - _ = yym500 - if false { - } else { - *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys487) - } // end switch yys487 - } // end for yyj487 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj501 int - var yyb501 bool - var yyhl501 bool = l >= 0 - yyj501++ - if yyhl501 { - yyb501 = yyj501 > l - } else { - yyb501 = r.CheckBreak() - } - if yyb501 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym503 := z.DecBinary() - _ = yym503 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - yyj501++ - if yyhl501 { - yyb501 = yyj501 > l - } else { - yyb501 = r.CheckBreak() - } - if yyb501 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym505 := z.DecBinary() - _ = yym505 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj501++ - if yyhl501 { - yyb501 = yyj501 > l - } else { - yyb501 = r.CheckBreak() - } - if yyb501 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} - } else { - yyv506 := &x.Template - yyv506.CodecDecodeSelf(d) - } - yyj501++ - if yyhl501 { - yyb501 = yyj501 > l - } else { - yyb501 = r.CheckBreak() - } - if yyb501 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Strategy = DeploymentStrategy{} - } else { - yyv507 := &x.Strategy - yyv507.CodecDecodeSelf(d) - } - yyj501++ - if yyhl501 { - yyb501 = yyj501 > l - } else { - yyb501 = r.CheckBreak() - } - if yyb501 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - x.MinReadySeconds = int32(r.DecodeInt(32)) - } - yyj501++ - if yyhl501 { - yyb501 = yyj501 > l - } else { - yyb501 = r.CheckBreak() - } - if yyb501 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RevisionHistoryLimit != nil { - x.RevisionHistoryLimit = nil - } - } else { - if x.RevisionHistoryLimit == nil { - x.RevisionHistoryLimit = new(int32) - } - yym510 := z.DecBinary() - _ = yym510 - if false { - } else { - *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) - } - } - yyj501++ - if yyhl501 { - yyb501 = yyj501 > l - } else { - yyb501 = r.CheckBreak() - } - if yyb501 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Paused = false - } else { - x.Paused = bool(r.DecodeBool()) - } - yyj501++ - if yyhl501 { - yyb501 = yyj501 > l - } else { - yyb501 = r.CheckBreak() - } - if yyb501 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RollbackTo != nil { - x.RollbackTo = nil - } - } else { - if x.RollbackTo == nil { - x.RollbackTo = new(RollbackConfig) - } - x.RollbackTo.CodecDecodeSelf(d) - } - yyj501++ - if yyhl501 { - yyb501 = yyj501 > l - } else { - yyb501 = r.CheckBreak() - } - if yyb501 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ProgressDeadlineSeconds != nil { - x.ProgressDeadlineSeconds = nil - } - } else { - if x.ProgressDeadlineSeconds == nil { - x.ProgressDeadlineSeconds = new(int32) - } - yym514 := z.DecBinary() - _ = yym514 - if false { - } else { - *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) - } - } - for { - yyj501++ - if yyhl501 { - yyb501 = yyj501 > l - } else { - yyb501 = r.CheckBreak() - } - if yyb501 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj501-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentRollback) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym515 := z.EncBinary() - _ = yym515 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep516 := !z.EncBinary() - yy2arr516 := z.EncBasicHandle().StructToArray - var yyq516 [5]bool - _, _, _ = yysep516, yyq516, yy2arr516 - const yyr516 bool = false - yyq516[0] = x.Kind != "" - yyq516[1] = x.APIVersion != "" - yyq516[3] = len(x.UpdatedAnnotations) != 0 - var yynn516 int - if yyr516 || yy2arr516 { - r.EncodeArrayStart(5) - } else { - yynn516 = 2 - for _, b := range yyq516 { - if b { - yynn516++ - } - } - r.EncodeMapStart(yynn516) - yynn516 = 0 - } - if yyr516 || yy2arr516 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq516[0] { - yym518 := z.EncBinary() - _ = yym518 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq516[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym519 := z.EncBinary() - _ = yym519 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr516 || yy2arr516 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq516[1] { - yym521 := z.EncBinary() - _ = yym521 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq516[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym522 := z.EncBinary() - _ = yym522 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr516 || yy2arr516 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym524 := z.EncBinary() - _ = yym524 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym525 := z.EncBinary() - _ = yym525 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr516 || yy2arr516 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq516[3] { - if x.UpdatedAnnotations == nil { - r.EncodeNil() - } else { - yym527 := z.EncBinary() - _ = yym527 - if false { - } else { - z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq516[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("updatedAnnotations")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.UpdatedAnnotations == nil { - r.EncodeNil() - } else { - yym528 := z.EncBinary() - _ = yym528 - if false { - } else { - z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) - } - } - } - } - if yyr516 || yy2arr516 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy530 := &x.RollbackTo - yy530.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy531 := &x.RollbackTo - yy531.CodecEncodeSelf(e) - } - if yyr516 || yy2arr516 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentRollback) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym532 := z.DecBinary() - _ = yym532 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct533 := r.ContainerType() - if yyct533 == codecSelferValueTypeMap1234 { - yyl533 := r.ReadMapStart() - if yyl533 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl533, d) - } - } else if yyct533 == codecSelferValueTypeArray1234 { - yyl533 := r.ReadArrayStart() - if yyl533 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl533, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentRollback) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys534Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys534Slc - var yyhl534 bool = l >= 0 - for yyj534 := 0; ; yyj534++ { - if yyhl534 { - if yyj534 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys534Slc = r.DecodeBytes(yys534Slc, true, true) - yys534 := string(yys534Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys534 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "updatedAnnotations": - if r.TryDecodeAsNil() { - x.UpdatedAnnotations = nil - } else { - yyv538 := &x.UpdatedAnnotations - yym539 := z.DecBinary() - _ = yym539 - if false { - } else { - z.F.DecMapStringStringX(yyv538, false, d) - } - } - case "rollbackTo": - if r.TryDecodeAsNil() { - x.RollbackTo = RollbackConfig{} - } else { - yyv540 := &x.RollbackTo - yyv540.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys534) - } // end switch yys534 - } // end for yyj534 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentRollback) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj541 int - var yyb541 bool - var yyhl541 bool = l >= 0 - yyj541++ - if yyhl541 { - yyb541 = yyj541 > l - } else { - yyb541 = r.CheckBreak() - } - if yyb541 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj541++ - if yyhl541 { - yyb541 = yyj541 > l - } else { - yyb541 = r.CheckBreak() - } - if yyb541 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj541++ - if yyhl541 { - yyb541 = yyj541 > l - } else { - yyb541 = r.CheckBreak() - } - if yyb541 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj541++ - if yyhl541 { - yyb541 = yyj541 > l - } else { - yyb541 = r.CheckBreak() - } - if yyb541 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UpdatedAnnotations = nil - } else { - yyv545 := &x.UpdatedAnnotations - yym546 := z.DecBinary() - _ = yym546 - if false { - } else { - z.F.DecMapStringStringX(yyv545, false, d) - } - } - yyj541++ - if yyhl541 { - yyb541 = yyj541 > l - } else { - yyb541 = r.CheckBreak() - } - if yyb541 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RollbackTo = RollbackConfig{} - } else { - yyv547 := &x.RollbackTo - yyv547.CodecDecodeSelf(d) - } - for { - yyj541++ - if yyhl541 { - yyb541 = yyj541 > l - } else { - yyb541 = r.CheckBreak() - } - if yyb541 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj541-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RollbackConfig) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym548 := z.EncBinary() - _ = yym548 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep549 := !z.EncBinary() - yy2arr549 := z.EncBasicHandle().StructToArray - var yyq549 [1]bool - _, _, _ = yysep549, yyq549, yy2arr549 - const yyr549 bool = false - yyq549[0] = x.Revision != 0 - var yynn549 int - if yyr549 || yy2arr549 { - r.EncodeArrayStart(1) - } else { - yynn549 = 0 - for _, b := range yyq549 { - if b { - yynn549++ - } - } - r.EncodeMapStart(yynn549) - yynn549 = 0 - } - if yyr549 || yy2arr549 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq549[0] { - yym551 := z.EncBinary() - _ = yym551 - if false { - } else { - r.EncodeInt(int64(x.Revision)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq549[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("revision")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym552 := z.EncBinary() - _ = yym552 - if false { - } else { - r.EncodeInt(int64(x.Revision)) - } - } - } - if yyr549 || yy2arr549 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RollbackConfig) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym553 := z.DecBinary() - _ = yym553 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct554 := r.ContainerType() - if yyct554 == codecSelferValueTypeMap1234 { - yyl554 := r.ReadMapStart() - if yyl554 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl554, d) - } - } else if yyct554 == codecSelferValueTypeArray1234 { - yyl554 := r.ReadArrayStart() - if yyl554 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl554, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RollbackConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys555Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys555Slc - var yyhl555 bool = l >= 0 - for yyj555 := 0; ; yyj555++ { - if yyhl555 { - if yyj555 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys555Slc = r.DecodeBytes(yys555Slc, true, true) - yys555 := string(yys555Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys555 { - case "revision": - if r.TryDecodeAsNil() { - x.Revision = 0 - } else { - x.Revision = int64(r.DecodeInt(64)) - } - default: - z.DecStructFieldNotFound(-1, yys555) - } // end switch yys555 - } // end for yyj555 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RollbackConfig) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj557 int - var yyb557 bool - var yyhl557 bool = l >= 0 - yyj557++ - if yyhl557 { - yyb557 = yyj557 > l - } else { - yyb557 = r.CheckBreak() - } - if yyb557 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Revision = 0 - } else { - x.Revision = int64(r.DecodeInt(64)) - } - for { - yyj557++ - if yyhl557 { - yyb557 = yyj557 > l - } else { - yyb557 = r.CheckBreak() - } - if yyb557 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj557-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentStrategy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym559 := z.EncBinary() - _ = yym559 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep560 := !z.EncBinary() - yy2arr560 := z.EncBasicHandle().StructToArray - var yyq560 [2]bool - _, _, _ = yysep560, yyq560, yy2arr560 - const yyr560 bool = false - yyq560[0] = x.Type != "" - yyq560[1] = x.RollingUpdate != nil - var yynn560 int - if yyr560 || yy2arr560 { - r.EncodeArrayStart(2) - } else { - yynn560 = 0 - for _, b := range yyq560 { - if b { - yynn560++ - } - } - r.EncodeMapStart(yynn560) - yynn560 = 0 - } - if yyr560 || yy2arr560 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq560[0] { - x.Type.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq560[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - } - if yyr560 || yy2arr560 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq560[1] { - if x.RollingUpdate == nil { - r.EncodeNil() - } else { - x.RollingUpdate.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq560[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RollingUpdate == nil { - r.EncodeNil() - } else { - x.RollingUpdate.CodecEncodeSelf(e) - } - } - } - if yyr560 || yy2arr560 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentStrategy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym563 := z.DecBinary() - _ = yym563 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct564 := r.ContainerType() - if yyct564 == codecSelferValueTypeMap1234 { - yyl564 := r.ReadMapStart() - if yyl564 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl564, d) - } - } else if yyct564 == codecSelferValueTypeArray1234 { - yyl564 := r.ReadArrayStart() - if yyl564 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl564, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys565Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys565Slc - var yyhl565 bool = l >= 0 - for yyj565 := 0; ; yyj565++ { - if yyhl565 { - if yyj565 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys565Slc = r.DecodeBytes(yys565Slc, true, true) - yys565 := string(yys565Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys565 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = DeploymentStrategyType(r.DecodeString()) - } - case "rollingUpdate": - if r.TryDecodeAsNil() { - if x.RollingUpdate != nil { - x.RollingUpdate = nil - } - } else { - if x.RollingUpdate == nil { - x.RollingUpdate = new(RollingUpdateDeployment) - } - x.RollingUpdate.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys565) - } // end switch yys565 - } // end for yyj565 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj568 int - var yyb568 bool - var yyhl568 bool = l >= 0 - yyj568++ - if yyhl568 { - yyb568 = yyj568 > l - } else { - yyb568 = r.CheckBreak() - } - if yyb568 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = DeploymentStrategyType(r.DecodeString()) - } - yyj568++ - if yyhl568 { - yyb568 = yyj568 > l - } else { - yyb568 = r.CheckBreak() - } - if yyb568 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RollingUpdate != nil { - x.RollingUpdate = nil - } - } else { - if x.RollingUpdate == nil { - x.RollingUpdate = new(RollingUpdateDeployment) - } - x.RollingUpdate.CodecDecodeSelf(d) - } - for { - yyj568++ - if yyhl568 { - yyb568 = yyj568 > l - } else { - yyb568 = r.CheckBreak() - } - if yyb568 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj568-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x DeploymentStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym571 := z.EncBinary() - _ = yym571 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *DeploymentStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym572 := z.DecBinary() - _ = yym572 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym573 := z.EncBinary() - _ = yym573 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep574 := !z.EncBinary() - yy2arr574 := z.EncBasicHandle().StructToArray - var yyq574 [2]bool - _, _, _ = yysep574, yyq574, yy2arr574 - const yyr574 bool = false - yyq574[0] = x.MaxUnavailable != nil - yyq574[1] = x.MaxSurge != nil - var yynn574 int - if yyr574 || yy2arr574 { - r.EncodeArrayStart(2) - } else { - yynn574 = 0 - for _, b := range yyq574 { - if b { - yynn574++ - } - } - r.EncodeMapStart(yynn574) - yynn574 = 0 - } - if yyr574 || yy2arr574 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq574[0] { - if x.MaxUnavailable == nil { - r.EncodeNil() - } else { - yym576 := z.EncBinary() - _ = yym576 - if false { - } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { - } else if !yym576 && z.IsJSONHandle() { - z.EncJSONMarshal(x.MaxUnavailable) - } else { - z.EncFallback(x.MaxUnavailable) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq574[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MaxUnavailable == nil { - r.EncodeNil() - } else { - yym577 := z.EncBinary() - _ = yym577 - if false { - } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { - } else if !yym577 && z.IsJSONHandle() { - z.EncJSONMarshal(x.MaxUnavailable) - } else { - z.EncFallback(x.MaxUnavailable) - } - } - } - } - if yyr574 || yy2arr574 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq574[1] { - if x.MaxSurge == nil { - r.EncodeNil() - } else { - yym579 := z.EncBinary() - _ = yym579 - if false { - } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { - } else if !yym579 && z.IsJSONHandle() { - z.EncJSONMarshal(x.MaxSurge) - } else { - z.EncFallback(x.MaxSurge) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq574[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxSurge")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MaxSurge == nil { - r.EncodeNil() - } else { - yym580 := z.EncBinary() - _ = yym580 - if false { - } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { - } else if !yym580 && z.IsJSONHandle() { - z.EncJSONMarshal(x.MaxSurge) - } else { - z.EncFallback(x.MaxSurge) - } - } - } - } - if yyr574 || yy2arr574 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RollingUpdateDeployment) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym581 := z.DecBinary() - _ = yym581 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct582 := r.ContainerType() - if yyct582 == codecSelferValueTypeMap1234 { - yyl582 := r.ReadMapStart() - if yyl582 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl582, d) - } - } else if yyct582 == codecSelferValueTypeArray1234 { - yyl582 := r.ReadArrayStart() - if yyl582 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl582, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys583Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys583Slc - var yyhl583 bool = l >= 0 - for yyj583 := 0; ; yyj583++ { - if yyhl583 { - if yyj583 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys583Slc = r.DecodeBytes(yys583Slc, true, true) - yys583 := string(yys583Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys583 { - case "maxUnavailable": - if r.TryDecodeAsNil() { - if x.MaxUnavailable != nil { - x.MaxUnavailable = nil - } - } else { - if x.MaxUnavailable == nil { - x.MaxUnavailable = new(pkg5_intstr.IntOrString) - } - yym585 := z.DecBinary() - _ = yym585 - if false { - } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { - } else if !yym585 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.MaxUnavailable) - } else { - z.DecFallback(x.MaxUnavailable, false) - } - } - case "maxSurge": - if r.TryDecodeAsNil() { - if x.MaxSurge != nil { - x.MaxSurge = nil - } - } else { - if x.MaxSurge == nil { - x.MaxSurge = new(pkg5_intstr.IntOrString) - } - yym587 := z.DecBinary() - _ = yym587 - if false { - } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { - } else if !yym587 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.MaxSurge) - } else { - z.DecFallback(x.MaxSurge, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys583) - } // end switch yys583 - } // end for yyj583 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RollingUpdateDeployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj588 int - var yyb588 bool - var yyhl588 bool = l >= 0 - yyj588++ - if yyhl588 { - yyb588 = yyj588 > l - } else { - yyb588 = r.CheckBreak() - } - if yyb588 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.MaxUnavailable != nil { - x.MaxUnavailable = nil - } - } else { - if x.MaxUnavailable == nil { - x.MaxUnavailable = new(pkg5_intstr.IntOrString) - } - yym590 := z.DecBinary() - _ = yym590 - if false { - } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { - } else if !yym590 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.MaxUnavailable) - } else { - z.DecFallback(x.MaxUnavailable, false) - } - } - yyj588++ - if yyhl588 { - yyb588 = yyj588 > l - } else { - yyb588 = r.CheckBreak() - } - if yyb588 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.MaxSurge != nil { - x.MaxSurge = nil - } - } else { - if x.MaxSurge == nil { - x.MaxSurge = new(pkg5_intstr.IntOrString) - } - yym592 := z.DecBinary() - _ = yym592 - if false { - } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { - } else if !yym592 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.MaxSurge) - } else { - z.DecFallback(x.MaxSurge, false) - } - } - for { - yyj588++ - if yyhl588 { - yyb588 = yyj588 > l - } else { - yyb588 = r.CheckBreak() - } - if yyb588 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj588-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym593 := z.EncBinary() - _ = yym593 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep594 := !z.EncBinary() - yy2arr594 := z.EncBasicHandle().StructToArray - var yyq594 [6]bool - _, _, _ = yysep594, yyq594, yy2arr594 - const yyr594 bool = false - yyq594[0] = x.ObservedGeneration != 0 - yyq594[1] = x.Replicas != 0 - yyq594[2] = x.UpdatedReplicas != 0 - yyq594[3] = x.AvailableReplicas != 0 - yyq594[4] = x.UnavailableReplicas != 0 - yyq594[5] = len(x.Conditions) != 0 - var yynn594 int - if yyr594 || yy2arr594 { - r.EncodeArrayStart(6) - } else { - yynn594 = 0 - for _, b := range yyq594 { - if b { - yynn594++ - } - } - r.EncodeMapStart(yynn594) - yynn594 = 0 - } - if yyr594 || yy2arr594 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq594[0] { - yym596 := z.EncBinary() - _ = yym596 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq594[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym597 := z.EncBinary() - _ = yym597 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } - } - if yyr594 || yy2arr594 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq594[1] { - yym599 := z.EncBinary() - _ = yym599 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq594[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym600 := z.EncBinary() - _ = yym600 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - } - if yyr594 || yy2arr594 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq594[2] { - yym602 := z.EncBinary() - _ = yym602 - if false { - } else { - r.EncodeInt(int64(x.UpdatedReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq594[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("updatedReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym603 := z.EncBinary() - _ = yym603 - if false { - } else { - r.EncodeInt(int64(x.UpdatedReplicas)) - } - } - } - if yyr594 || yy2arr594 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq594[3] { - yym605 := z.EncBinary() - _ = yym605 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq594[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym606 := z.EncBinary() - _ = yym606 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } - } - if yyr594 || yy2arr594 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq594[4] { - yym608 := z.EncBinary() - _ = yym608 - if false { - } else { - r.EncodeInt(int64(x.UnavailableReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq594[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("unavailableReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym609 := z.EncBinary() - _ = yym609 - if false { - } else { - r.EncodeInt(int64(x.UnavailableReplicas)) - } - } - } - if yyr594 || yy2arr594 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq594[5] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym611 := z.EncBinary() - _ = yym611 - if false { - } else { - h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq594[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym612 := z.EncBinary() - _ = yym612 - if false { - } else { - h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) - } - } - } - } - if yyr594 || yy2arr594 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym613 := z.DecBinary() - _ = yym613 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct614 := r.ContainerType() - if yyct614 == codecSelferValueTypeMap1234 { - yyl614 := r.ReadMapStart() - if yyl614 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl614, d) - } - } else if yyct614 == codecSelferValueTypeArray1234 { - yyl614 := r.ReadArrayStart() - if yyl614 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl614, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys615Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys615Slc - var yyhl615 bool = l >= 0 - for yyj615 := 0; ; yyj615++ { - if yyhl615 { - if yyj615 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys615Slc = r.DecodeBytes(yys615Slc, true, true) - yys615 := string(yys615Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys615 { - case "observedGeneration": - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "updatedReplicas": - if r.TryDecodeAsNil() { - x.UpdatedReplicas = 0 - } else { - x.UpdatedReplicas = int32(r.DecodeInt(32)) - } - case "availableReplicas": - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - x.AvailableReplicas = int32(r.DecodeInt(32)) - } - case "unavailableReplicas": - if r.TryDecodeAsNil() { - x.UnavailableReplicas = 0 - } else { - x.UnavailableReplicas = int32(r.DecodeInt(32)) - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv621 := &x.Conditions - yym622 := z.DecBinary() - _ = yym622 - if false { - } else { - h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv621), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys615) - } // end switch yys615 - } // end for yyj615 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj623 int - var yyb623 bool - var yyhl623 bool = l >= 0 - yyj623++ - if yyhl623 { - yyb623 = yyj623 > l - } else { - yyb623 = r.CheckBreak() - } - if yyb623 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - yyj623++ - if yyhl623 { - yyb623 = yyj623 > l - } else { - yyb623 = r.CheckBreak() - } - if yyb623 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj623++ - if yyhl623 { - yyb623 = yyj623 > l - } else { - yyb623 = r.CheckBreak() - } - if yyb623 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UpdatedReplicas = 0 - } else { - x.UpdatedReplicas = int32(r.DecodeInt(32)) - } - yyj623++ - if yyhl623 { - yyb623 = yyj623 > l - } else { - yyb623 = r.CheckBreak() - } - if yyb623 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - x.AvailableReplicas = int32(r.DecodeInt(32)) - } - yyj623++ - if yyhl623 { - yyb623 = yyj623 > l - } else { - yyb623 = r.CheckBreak() - } - if yyb623 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UnavailableReplicas = 0 - } else { - x.UnavailableReplicas = int32(r.DecodeInt(32)) - } - yyj623++ - if yyhl623 { - yyb623 = yyj623 > l - } else { - yyb623 = r.CheckBreak() - } - if yyb623 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv629 := &x.Conditions - yym630 := z.DecBinary() - _ = yym630 - if false { - } else { - h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv629), d) - } - } - for { - yyj623++ - if yyhl623 { - yyb623 = yyj623 > l - } else { - yyb623 = r.CheckBreak() - } - if yyb623 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj623-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x DeploymentConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym631 := z.EncBinary() - _ = yym631 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *DeploymentConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym632 := z.DecBinary() - _ = yym632 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *DeploymentCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym633 := z.EncBinary() - _ = yym633 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep634 := !z.EncBinary() - yy2arr634 := z.EncBasicHandle().StructToArray - var yyq634 [6]bool - _, _, _ = yysep634, yyq634, yy2arr634 - const yyr634 bool = false - yyq634[2] = true - yyq634[3] = true - yyq634[4] = x.Reason != "" - yyq634[5] = x.Message != "" - var yynn634 int - if yyr634 || yy2arr634 { - r.EncodeArrayStart(6) - } else { - yynn634 = 2 - for _, b := range yyq634 { - if b { - yynn634++ - } - } - r.EncodeMapStart(yynn634) - yynn634 = 0 - } - if yyr634 || yy2arr634 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr634 || yy2arr634 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym637 := z.EncBinary() - _ = yym637 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym638 := z.EncBinary() - _ = yym638 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } - } - if yyr634 || yy2arr634 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq634[2] { - yy640 := &x.LastUpdateTime - yym641 := z.EncBinary() - _ = yym641 - if false { - } else if z.HasExtensions() && z.EncExt(yy640) { - } else if yym641 { - z.EncBinaryMarshal(yy640) - } else if !yym641 && z.IsJSONHandle() { - z.EncJSONMarshal(yy640) - } else { - z.EncFallback(yy640) - } - } else { - r.EncodeNil() - } - } else { - if yyq634[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastUpdateTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy642 := &x.LastUpdateTime - yym643 := z.EncBinary() - _ = yym643 - if false { - } else if z.HasExtensions() && z.EncExt(yy642) { - } else if yym643 { - z.EncBinaryMarshal(yy642) - } else if !yym643 && z.IsJSONHandle() { - z.EncJSONMarshal(yy642) - } else { - z.EncFallback(yy642) - } - } - } - if yyr634 || yy2arr634 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq634[3] { - yy645 := &x.LastTransitionTime - yym646 := z.EncBinary() - _ = yym646 - if false { - } else if z.HasExtensions() && z.EncExt(yy645) { - } else if yym646 { - z.EncBinaryMarshal(yy645) - } else if !yym646 && z.IsJSONHandle() { - z.EncJSONMarshal(yy645) - } else { - z.EncFallback(yy645) - } - } else { - r.EncodeNil() - } - } else { - if yyq634[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy647 := &x.LastTransitionTime - yym648 := z.EncBinary() - _ = yym648 - if false { - } else if z.HasExtensions() && z.EncExt(yy647) { - } else if yym648 { - z.EncBinaryMarshal(yy647) - } else if !yym648 && z.IsJSONHandle() { - z.EncJSONMarshal(yy647) - } else { - z.EncFallback(yy647) - } - } - } - if yyr634 || yy2arr634 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq634[4] { - yym650 := z.EncBinary() - _ = yym650 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq634[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym651 := z.EncBinary() - _ = yym651 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr634 || yy2arr634 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq634[5] { - yym653 := z.EncBinary() - _ = yym653 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq634[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym654 := z.EncBinary() - _ = yym654 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr634 || yy2arr634 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym655 := z.DecBinary() - _ = yym655 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct656 := r.ContainerType() - if yyct656 == codecSelferValueTypeMap1234 { - yyl656 := r.ReadMapStart() - if yyl656 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl656, d) - } - } else if yyct656 == codecSelferValueTypeArray1234 { - yyl656 := r.ReadArrayStart() - if yyl656 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl656, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys657Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys657Slc - var yyhl657 bool = l >= 0 - for yyj657 := 0; ; yyj657++ { - if yyhl657 { - if yyj657 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys657Slc = r.DecodeBytes(yys657Slc, true, true) - yys657 := string(yys657Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys657 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = DeploymentConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_v1.ConditionStatus(r.DecodeString()) - } - case "lastUpdateTime": - if r.TryDecodeAsNil() { - x.LastUpdateTime = pkg1_unversioned.Time{} - } else { - yyv660 := &x.LastUpdateTime - yym661 := z.DecBinary() - _ = yym661 - if false { - } else if z.HasExtensions() && z.DecExt(yyv660) { - } else if yym661 { - z.DecBinaryUnmarshal(yyv660) - } else if !yym661 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv660) - } else { - z.DecFallback(yyv660, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} - } else { - yyv662 := &x.LastTransitionTime - yym663 := z.DecBinary() - _ = yym663 - if false { - } else if z.HasExtensions() && z.DecExt(yyv662) { - } else if yym663 { - z.DecBinaryUnmarshal(yyv662) - } else if !yym663 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv662) - } else { - z.DecFallback(yyv662, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys657) - } // end switch yys657 - } // end for yyj657 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj666 int - var yyb666 bool - var yyhl666 bool = l >= 0 - yyj666++ - if yyhl666 { - yyb666 = yyj666 > l - } else { - yyb666 = r.CheckBreak() - } - if yyb666 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = DeploymentConditionType(r.DecodeString()) - } - yyj666++ - if yyhl666 { - yyb666 = yyj666 > l - } else { - yyb666 = r.CheckBreak() - } - if yyb666 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_v1.ConditionStatus(r.DecodeString()) - } - yyj666++ - if yyhl666 { - yyb666 = yyj666 > l - } else { - yyb666 = r.CheckBreak() - } - if yyb666 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastUpdateTime = pkg1_unversioned.Time{} - } else { - yyv669 := &x.LastUpdateTime - yym670 := z.DecBinary() - _ = yym670 - if false { - } else if z.HasExtensions() && z.DecExt(yyv669) { - } else if yym670 { - z.DecBinaryUnmarshal(yyv669) - } else if !yym670 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv669) - } else { - z.DecFallback(yyv669, false) - } - } - yyj666++ - if yyhl666 { - yyb666 = yyj666 > l - } else { - yyb666 = r.CheckBreak() - } - if yyb666 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} - } else { - yyv671 := &x.LastTransitionTime - yym672 := z.DecBinary() - _ = yym672 - if false { - } else if z.HasExtensions() && z.DecExt(yyv671) { - } else if yym672 { - z.DecBinaryUnmarshal(yyv671) - } else if !yym672 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv671) - } else { - z.DecFallback(yyv671, false) - } - } - yyj666++ - if yyhl666 { - yyb666 = yyj666 > l - } else { - yyb666 = r.CheckBreak() - } - if yyb666 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj666++ - if yyhl666 { - yyb666 = yyj666 > l - } else { - yyb666 = r.CheckBreak() - } - if yyb666 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj666++ - if yyhl666 { - yyb666 = yyj666 > l - } else { - yyb666 = r.CheckBreak() - } - if yyb666 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj666-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym675 := z.EncBinary() - _ = yym675 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep676 := !z.EncBinary() - yy2arr676 := z.EncBasicHandle().StructToArray - var yyq676 [4]bool - _, _, _ = yysep676, yyq676, yy2arr676 - const yyr676 bool = false - yyq676[0] = x.Kind != "" - yyq676[1] = x.APIVersion != "" - yyq676[2] = true - var yynn676 int - if yyr676 || yy2arr676 { - r.EncodeArrayStart(4) - } else { - yynn676 = 1 - for _, b := range yyq676 { - if b { - yynn676++ - } - } - r.EncodeMapStart(yynn676) - yynn676 = 0 - } - if yyr676 || yy2arr676 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq676[0] { - yym678 := z.EncBinary() - _ = yym678 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq676[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym679 := z.EncBinary() - _ = yym679 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr676 || yy2arr676 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq676[1] { - yym681 := z.EncBinary() - _ = yym681 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq676[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym682 := z.EncBinary() - _ = yym682 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr676 || yy2arr676 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq676[2] { - yy684 := &x.ListMeta - yym685 := z.EncBinary() - _ = yym685 - if false { - } else if z.HasExtensions() && z.EncExt(yy684) { - } else { - z.EncFallback(yy684) - } - } else { - r.EncodeNil() - } - } else { - if yyq676[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy686 := &x.ListMeta - yym687 := z.EncBinary() - _ = yym687 - if false { - } else if z.HasExtensions() && z.EncExt(yy686) { - } else { - z.EncFallback(yy686) - } - } - } - if yyr676 || yy2arr676 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym689 := z.EncBinary() - _ = yym689 - if false { - } else { - h.encSliceDeployment(([]Deployment)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym690 := z.EncBinary() - _ = yym690 - if false { - } else { - h.encSliceDeployment(([]Deployment)(x.Items), e) - } - } - } - if yyr676 || yy2arr676 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym691 := z.DecBinary() - _ = yym691 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct692 := r.ContainerType() - if yyct692 == codecSelferValueTypeMap1234 { - yyl692 := r.ReadMapStart() - if yyl692 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl692, d) - } - } else if yyct692 == codecSelferValueTypeArray1234 { - yyl692 := r.ReadArrayStart() - if yyl692 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl692, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys693Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys693Slc - var yyhl693 bool = l >= 0 - for yyj693 := 0; ; yyj693++ { - if yyhl693 { - if yyj693 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys693Slc = r.DecodeBytes(yys693Slc, true, true) - yys693 := string(yys693Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys693 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv696 := &x.ListMeta - yym697 := z.DecBinary() - _ = yym697 - if false { - } else if z.HasExtensions() && z.DecExt(yyv696) { - } else { - z.DecFallback(yyv696, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv698 := &x.Items - yym699 := z.DecBinary() - _ = yym699 - if false { - } else { - h.decSliceDeployment((*[]Deployment)(yyv698), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys693) - } // end switch yys693 - } // end for yyj693 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj700 int - var yyb700 bool - var yyhl700 bool = l >= 0 - yyj700++ - if yyhl700 { - yyb700 = yyj700 > l - } else { - yyb700 = r.CheckBreak() - } - if yyb700 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj700++ - if yyhl700 { - yyb700 = yyj700 > l - } else { - yyb700 = r.CheckBreak() - } - if yyb700 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj700++ - if yyhl700 { - yyb700 = yyj700 > l - } else { - yyb700 = r.CheckBreak() - } - if yyb700 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv703 := &x.ListMeta - yym704 := z.DecBinary() - _ = yym704 - if false { - } else if z.HasExtensions() && z.DecExt(yyv703) { - } else { - z.DecFallback(yyv703, false) - } - } - yyj700++ - if yyhl700 { - yyb700 = yyj700 > l - } else { - yyb700 = r.CheckBreak() - } - if yyb700 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv705 := &x.Items - yym706 := z.DecBinary() - _ = yym706 - if false { - } else { - h.decSliceDeployment((*[]Deployment)(yyv705), d) - } - } - for { - yyj700++ - if yyhl700 { - yyb700 = yyj700 > l - } else { - yyb700 = r.CheckBreak() - } - if yyb700 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj700-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym707 := z.EncBinary() - _ = yym707 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep708 := !z.EncBinary() - yy2arr708 := z.EncBasicHandle().StructToArray - var yyq708 [2]bool - _, _, _ = yysep708, yyq708, yy2arr708 - const yyr708 bool = false - yyq708[0] = x.Selector != nil - var yynn708 int - if yyr708 || yy2arr708 { - r.EncodeArrayStart(2) - } else { - yynn708 = 1 - for _, b := range yyq708 { - if b { - yynn708++ - } - } - r.EncodeMapStart(yynn708) - yynn708 = 0 - } - if yyr708 || yy2arr708 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq708[0] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym710 := z.EncBinary() - _ = yym710 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq708[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym711 := z.EncBinary() - _ = yym711 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr708 || yy2arr708 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy713 := &x.Template - yy713.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy714 := &x.Template - yy714.CodecEncodeSelf(e) - } - if yyr708 || yy2arr708 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym715 := z.DecBinary() - _ = yym715 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct716 := r.ContainerType() - if yyct716 == codecSelferValueTypeMap1234 { - yyl716 := r.ReadMapStart() - if yyl716 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl716, d) - } - } else if yyct716 == codecSelferValueTypeArray1234 { - yyl716 := r.ReadArrayStart() - if yyl716 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl716, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys717Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys717Slc - var yyhl717 bool = l >= 0 - for yyj717 := 0; ; yyj717++ { - if yyhl717 { - if yyj717 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys717Slc = r.DecodeBytes(yys717Slc, true, true) - yys717 := string(yys717Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys717 { - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym719 := z.DecBinary() - _ = yym719 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} - } else { - yyv720 := &x.Template - yyv720.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys717) - } // end switch yys717 - } // end for yyj717 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) + r.EncodeString(codecSelferC_UTF81234, string(x)) + } } -func (x *DaemonSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DeploymentConditionType) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj721 int - var yyb721 bool - var yyhl721 bool = l >= 0 - yyj721++ - if yyhl721 { - yyb721 = yyj721 > l - } else { - yyb721 = r.CheckBreak() - } - if yyb721 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym723 := z.DecBinary() - _ = yym723 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj721++ - if yyhl721 { - yyb721 = yyj721 > l - } else { - yyb721 = r.CheckBreak() - } - if yyb721 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { } else { - yyv724 := &x.Template - yyv724.CodecDecodeSelf(d) - } - for { - yyj721++ - if yyhl721 { - yyb721 = yyj721 > l - } else { - yyb721 = r.CheckBreak() - } - if yyb721 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj721-1, "") + *((*string)(x)) = r.DecodeString() } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *DaemonSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DeploymentCondition) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym725 := z.EncBinary() - _ = yym725 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep726 := !z.EncBinary() - yy2arr726 := z.EncBasicHandle().StructToArray - var yyq726 [4]bool - _, _, _ = yysep726, yyq726, yy2arr726 - const yyr726 bool = false - var yynn726 int - if yyr726 || yy2arr726 { - r.EncodeArrayStart(4) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) } else { - yynn726 = 4 - for _, b := range yyq726 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn726++ + yynn2++ } } - r.EncodeMapStart(yynn726) - yynn726 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr726 || yy2arr726 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym728 := z.EncBinary() - _ = yym728 - if false { - } else { - r.EncodeInt(int64(x.CurrentNumberScheduled)) - } + x.Type.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentNumberScheduled")) + r.EncodeString(codecSelferC_UTF81234, string("type")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym729 := z.EncBinary() - _ = yym729 - if false { - } else { - r.EncodeInt(int64(x.CurrentNumberScheduled)) - } + x.Type.CodecEncodeSelf(e) } - if yyr726 || yy2arr726 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym731 := z.EncBinary() - _ = yym731 - if false { - } else { - r.EncodeInt(int64(x.NumberMisscheduled)) - } + yysf7 := &x.Status + yysf7.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("numberMisscheduled")) + r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym732 := z.EncBinary() - _ = yym732 - if false { + yysf8 := &x.Status + yysf8.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastUpdateTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } } else { - r.EncodeInt(int64(x.NumberMisscheduled)) + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastUpdateTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastUpdateTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } } } - if yyr726 || yy2arr726 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym734 := z.EncBinary() - _ = yym734 - if false { + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } } else { - r.EncodeInt(int64(x.DesiredNumberScheduled)) + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("desiredNumberScheduled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym735 := z.EncBinary() - _ = yym735 - if false { - } else { - r.EncodeInt(int64(x.DesiredNumberScheduled)) + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) + } else { + z.EncFallback(yy17) + } } } - if yyr726 || yy2arr726 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym737 := z.EncBinary() - _ = yym737 - if false { + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } } else { - r.EncodeInt(int64(x.NumberReady)) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("numberReady")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym738 := z.EncBinary() - _ = yym738 - if false { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } } else { - r.EncodeInt(int64(x.NumberReady)) + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } } } - if yyr726 || yy2arr726 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -8747,29 +6354,29 @@ func (x *DaemonSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *DaemonSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DeploymentCondition) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym739 := z.DecBinary() - _ = yym739 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct740 := r.ContainerType() - if yyct740 == codecSelferValueTypeMap1234 { - yyl740 := r.ReadMapStart() - if yyl740 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl740, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct740 == codecSelferValueTypeArray1234 { - yyl740 := r.ReadArrayStart() - if yyl740 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl740, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -8777,16 +6384,16 @@ func (x *DaemonSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *DaemonSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DeploymentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys741Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys741Slc - var yyhl741 bool = l >= 0 - for yyj741 := 0; ; yyj741++ { - if yyhl741 { - if yyj741 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -8795,168 +6402,282 @@ func (x *DaemonSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys741Slc = r.DecodeBytes(yys741Slc, true, true) - yys741 := string(yys741Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys741 { - case "currentNumberScheduled": + switch yys3 { + case "type": if r.TryDecodeAsNil() { - x.CurrentNumberScheduled = 0 + x.Type = "" } else { - x.CurrentNumberScheduled = int32(r.DecodeInt(32)) + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) } - case "numberMisscheduled": + case "status": if r.TryDecodeAsNil() { - x.NumberMisscheduled = 0 + x.Status = "" } else { - x.NumberMisscheduled = int32(r.DecodeInt(32)) + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) } - case "desiredNumberScheduled": + case "lastUpdateTime": if r.TryDecodeAsNil() { - x.DesiredNumberScheduled = 0 + x.LastUpdateTime = pkg1_v1.Time{} } else { - x.DesiredNumberScheduled = int32(r.DecodeInt(32)) + yyv6 := &x.LastUpdateTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } } - case "numberReady": + case "lastTransitionTime": if r.TryDecodeAsNil() { - x.NumberReady = 0 + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv10 := &x.Reason + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" } else { - x.NumberReady = int32(r.DecodeInt(32)) + yyv12 := &x.Message + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys741) - } // end switch yys741 - } // end for yyj741 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *DaemonSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DeploymentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj746 int - var yyb746 bool - var yyhl746 bool = l >= 0 - yyj746++ - if yyhl746 { - yyb746 = yyj746 > l + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb746 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb746 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.CurrentNumberScheduled = 0 + x.Type = "" } else { - x.CurrentNumberScheduled = int32(r.DecodeInt(32)) + yyv15 := &x.Type + yyv15.CodecDecodeSelf(d) } - yyj746++ - if yyhl746 { - yyb746 = yyj746 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb746 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb746 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.NumberMisscheduled = 0 + x.Status = "" } else { - x.NumberMisscheduled = int32(r.DecodeInt(32)) + yyv16 := &x.Status + yyv16.CodecDecodeSelf(d) } - yyj746++ - if yyhl746 { - yyb746 = yyj746 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb746 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb746 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.DesiredNumberScheduled = 0 + x.LastUpdateTime = pkg1_v1.Time{} } else { - x.DesiredNumberScheduled = int32(r.DecodeInt(32)) + yyv17 := &x.LastUpdateTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } } - yyj746++ - if yyhl746 { - yyb746 = yyj746 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb746 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb746 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.NumberReady = 0 + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv19 := &x.LastTransitionTime + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if yym20 { + z.DecBinaryUnmarshal(yyv19) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv21 := &x.Reason + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" } else { - x.NumberReady = int32(r.DecodeInt(32)) + yyv23 := &x.Message + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } } for { - yyj746++ - if yyhl746 { - yyb746 = yyj746 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb746 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb746 { + if yyb14 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj746-1, "") + z.DecStructFieldNotFound(yyj14-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *DaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym751 := z.EncBinary() - _ = yym751 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep752 := !z.EncBinary() - yy2arr752 := z.EncBasicHandle().StructToArray - var yyq752 [5]bool - _, _, _ = yysep752, yyq752, yy2arr752 - const yyr752 bool = false - yyq752[0] = x.Kind != "" - yyq752[1] = x.APIVersion != "" - yyq752[2] = true - yyq752[3] = true - yyq752[4] = true - var yynn752 int - if yyr752 || yy2arr752 { - r.EncodeArrayStart(5) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) } else { - yynn752 = 0 - for _, b := range yyq752 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn752++ + yynn2++ } } - r.EncodeMapStart(yynn752) - yynn752 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr752 || yy2arr752 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq752[0] { - yym754 := z.EncBinary() - _ = yym754 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -8965,23 +6686,23 @@ func (x *DaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq752[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym755 := z.EncBinary() - _ = yym755 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr752 || yy2arr752 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq752[1] { - yym757 := z.EncBinary() - _ = yym757 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -8990,70 +6711,75 @@ func (x *DaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq752[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym758 := z.EncBinary() - _ = yym758 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr752 || yy2arr752 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq752[2] { - yy760 := &x.ObjectMeta - yy760.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq752[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy761 := &x.ObjectMeta - yy761.CodecEncodeSelf(e) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr752 || yy2arr752 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq752[3] { - yy763 := &x.Spec - yy763.CodecEncodeSelf(e) - } else { + if x.Items == nil { r.EncodeNil() - } - } else { - if yyq752[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy764 := &x.Spec - yy764.CodecEncodeSelf(e) - } - } - if yyr752 || yy2arr752 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq752[4] { - yy766 := &x.Status - yy766.CodecEncodeSelf(e) } else { - r.EncodeNil() + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceDeployment(([]Deployment)(x.Items), e) + } } } else { - if yyq752[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy767 := &x.Status - yy767.CodecEncodeSelf(e) + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceDeployment(([]Deployment)(x.Items), e) + } } } - if yyr752 || yy2arr752 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -9062,29 +6788,29 @@ func (x *DaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *DaemonSet) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DeploymentList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym768 := z.DecBinary() - _ = yym768 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct769 := r.ContainerType() - if yyct769 == codecSelferValueTypeMap1234 { - yyl769 := r.ReadMapStart() - if yyl769 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl769, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct769 == codecSelferValueTypeArray1234 { - yyl769 := r.ReadArrayStart() - if yyl769 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl769, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -9092,16 +6818,16 @@ func (x *DaemonSet) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *DaemonSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DeploymentList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys770Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys770Slc - var yyhl770 bool = l >= 0 - for yyj770 := 0; ; yyj770++ { - if yyhl770 { - if yyj770 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -9110,64 +6836,80 @@ func (x *DaemonSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys770Slc = r.DecodeBytes(yys770Slc, true, true) - yys770 := string(yys770Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys770 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv773 := &x.ObjectMeta - yyv773.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = DaemonSetSpec{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv774 := &x.Spec - yyv774.CodecDecodeSelf(d) + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } - case "status": + case "items": if r.TryDecodeAsNil() { - x.Status = DaemonSetStatus{} + x.Items = nil } else { - yyv775 := &x.Status - yyv775.CodecDecodeSelf(d) + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceDeployment((*[]Deployment)(yyv10), d) + } } default: - z.DecStructFieldNotFound(-1, yys770) - } // end switch yys770 - } // end for yyj770 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *DaemonSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DeploymentList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj776 int - var yyb776 bool - var yyhl776 bool = l >= 0 - yyj776++ - if yyhl776 { - yyb776 = yyj776 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb776 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb776 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9175,15 +6917,21 @@ func (x *DaemonSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj776++ - if yyhl776 { - yyb776 = yyj776 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb776 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb776 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9191,215 +6939,146 @@ func (x *DaemonSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) - } - yyj776++ - if yyhl776 { - yyb776 = yyj776 > l - } else { - yyb776 = r.CheckBreak() - } - if yyb776 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv779 := &x.ObjectMeta - yyv779.CodecDecodeSelf(d) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj776++ - if yyhl776 { - yyb776 = yyj776 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb776 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb776 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Spec = DaemonSetSpec{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv780 := &x.Spec - yyv780.CodecDecodeSelf(d) + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj776++ - if yyhl776 { - yyb776 = yyj776 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb776 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb776 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Status = DaemonSetStatus{} + x.Items = nil } else { - yyv781 := &x.Status - yyv781.CodecDecodeSelf(d) + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceDeployment((*[]Deployment)(yyv19), d) + } } for { - yyj776++ - if yyhl776 { - yyb776 = yyj776 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb776 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb776 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj776-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *DaemonSetList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DaemonSetUpdateStrategy) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym782 := z.EncBinary() - _ = yym782 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep783 := !z.EncBinary() - yy2arr783 := z.EncBasicHandle().StructToArray - var yyq783 [4]bool - _, _, _ = yysep783, yyq783, yy2arr783 - const yyr783 bool = false - yyq783[0] = x.Kind != "" - yyq783[1] = x.APIVersion != "" - yyq783[2] = true - var yynn783 int - if yyr783 || yy2arr783 { - r.EncodeArrayStart(4) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Type != "" + yyq2[1] = x.RollingUpdate != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) } else { - yynn783 = 1 - for _, b := range yyq783 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn783++ - } - } - r.EncodeMapStart(yynn783) - yynn783 = 0 - } - if yyr783 || yy2arr783 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq783[0] { - yym785 := z.EncBinary() - _ = yym785 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq783[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym786 := z.EncBinary() - _ = yym786 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + yynn2++ } } + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr783 || yy2arr783 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq783[1] { - yym788 := z.EncBinary() - _ = yym788 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } + if yyq2[0] { + x.Type.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq783[1] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + r.EncodeString(codecSelferC_UTF81234, string("type")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym789 := z.EncBinary() - _ = yym789 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } + x.Type.CodecEncodeSelf(e) } } - if yyr783 || yy2arr783 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq783[2] { - yy791 := &x.ListMeta - yym792 := z.EncBinary() - _ = yym792 - if false { - } else if z.HasExtensions() && z.EncExt(yy791) { + if yyq2[1] { + if x.RollingUpdate == nil { + r.EncodeNil() } else { - z.EncFallback(yy791) + x.RollingUpdate.CodecEncodeSelf(e) } } else { r.EncodeNil() } } else { - if yyq783[2] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) + r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy793 := &x.ListMeta - yym794 := z.EncBinary() - _ = yym794 - if false { - } else if z.HasExtensions() && z.EncExt(yy793) { - } else { - z.EncFallback(yy793) - } - } - } - if yyr783 || yy2arr783 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym796 := z.EncBinary() - _ = yym796 - if false { - } else { - h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym797 := z.EncBinary() - _ = yym797 - if false { + if x.RollingUpdate == nil { + r.EncodeNil() } else { - h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) + x.RollingUpdate.CodecEncodeSelf(e) } } } - if yyr783 || yy2arr783 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -9408,29 +7087,29 @@ func (x *DaemonSetList) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *DaemonSetList) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DaemonSetUpdateStrategy) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym798 := z.DecBinary() - _ = yym798 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct799 := r.ContainerType() - if yyct799 == codecSelferValueTypeMap1234 { - yyl799 := r.ReadMapStart() - if yyl799 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl799, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct799 == codecSelferValueTypeArray1234 { - yyl799 := r.ReadArrayStart() - if yyl799 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl799, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -9438,16 +7117,16 @@ func (x *DaemonSetList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *DaemonSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DaemonSetUpdateStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys800Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys800Slc - var yyhl800 bool = l >= 0 - for yyj800 := 0; ; yyj800++ { - if yyhl800 { - if yyj800 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -9456,294 +7135,193 @@ func (x *DaemonSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys800Slc = r.DecodeBytes(yys800Slc, true, true) - yys800 := string(yys800Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys800 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": + switch yys3 { + case "type": if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Type = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) } - case "metadata": + case "rollingUpdate": if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv803 := &x.ListMeta - yym804 := z.DecBinary() - _ = yym804 - if false { - } else if z.HasExtensions() && z.DecExt(yyv803) { - } else { - z.DecFallback(yyv803, false) + if x.RollingUpdate != nil { + x.RollingUpdate = nil } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil } else { - yyv805 := &x.Items - yym806 := z.DecBinary() - _ = yym806 - if false { - } else { - h.decSliceDaemonSet((*[]DaemonSet)(yyv805), d) + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDaemonSet) } + x.RollingUpdate.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys800) - } // end switch yys800 - } // end for yyj800 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *DaemonSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DaemonSetUpdateStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj807 int - var yyb807 bool - var yyhl807 bool = l >= 0 - yyj807++ - if yyhl807 { - yyb807 = yyj807 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb807 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb807 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Kind = "" + x.Type = "" } else { - x.Kind = string(r.DecodeString()) + yyv7 := &x.Type + yyv7.CodecDecodeSelf(d) } - yyj807++ - if yyhl807 { - yyb807 = yyj807 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb807 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb807 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj807++ - if yyhl807 { - yyb807 = yyj807 > l + if x.RollingUpdate != nil { + x.RollingUpdate = nil + } } else { - yyb807 = r.CheckBreak() - } - if yyb807 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDaemonSet) + } + x.RollingUpdate.CodecDecodeSelf(d) } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv810 := &x.ListMeta - yym811 := z.DecBinary() - _ = yym811 - if false { - } else if z.HasExtensions() && z.DecExt(yyv810) { + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - z.DecFallback(yyv810, false) + yyb6 = r.CheckBreak() + } + if yyb6 { + break } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") } - yyj807++ - if yyhl807 { - yyb807 = yyj807 > l + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x DaemonSetUpdateStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { } else { - yyb807 = r.CheckBreak() - } - if yyb807 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return + r.EncodeString(codecSelferC_UTF81234, string(x)) } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil +} + +func (x *DaemonSetUpdateStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { } else { - yyv812 := &x.Items - yym813 := z.DecBinary() - _ = yym813 - if false { - } else { - h.decSliceDaemonSet((*[]DaemonSet)(yyv812), d) - } - } - for { - yyj807++ - if yyhl807 { - yyb807 = yyj807 > l - } else { - yyb807 = r.CheckBreak() - } - if yyb807 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj807-1, "") + *((*string)(x)) = r.DecodeString() } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ThirdPartyResourceDataList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *RollingUpdateDaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym814 := z.EncBinary() - _ = yym814 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep815 := !z.EncBinary() - yy2arr815 := z.EncBasicHandle().StructToArray - var yyq815 [4]bool - _, _, _ = yysep815, yyq815, yy2arr815 - const yyr815 bool = false - yyq815[0] = x.Kind != "" - yyq815[1] = x.APIVersion != "" - yyq815[2] = true - var yynn815 int - if yyr815 || yy2arr815 { - r.EncodeArrayStart(4) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.MaxUnavailable != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) } else { - yynn815 = 1 - for _, b := range yyq815 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn815++ - } - } - r.EncodeMapStart(yynn815) - yynn815 = 0 - } - if yyr815 || yy2arr815 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq815[0] { - yym817 := z.EncBinary() - _ = yym817 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq815[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym818 := z.EncBinary() - _ = yym818 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr815 || yy2arr815 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq815[1] { - yym820 := z.EncBinary() - _ = yym820 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq815[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym821 := z.EncBinary() - _ = yym821 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + yynn2++ } } + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr815 || yy2arr815 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq815[2] { - yy823 := &x.ListMeta - yym824 := z.EncBinary() - _ = yym824 - if false { - } else if z.HasExtensions() && z.EncExt(yy823) { + if yyq2[0] { + if x.MaxUnavailable == nil { + r.EncodeNil() } else { - z.EncFallback(yy823) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { + } else if !yym4 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxUnavailable) + } else { + z.EncFallback(x.MaxUnavailable) + } } } else { r.EncodeNil() } } else { - if yyq815[2] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) + r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy825 := &x.ListMeta - yym826 := z.EncBinary() - _ = yym826 - if false { - } else if z.HasExtensions() && z.EncExt(yy825) { - } else { - z.EncFallback(yy825) - } - } - } - if yyr815 || yy2arr815 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym828 := z.EncBinary() - _ = yym828 - if false { - } else { - h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym829 := z.EncBinary() - _ = yym829 - if false { + if x.MaxUnavailable == nil { + r.EncodeNil() } else { - h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxUnavailable) + } else { + z.EncFallback(x.MaxUnavailable) + } } } } - if yyr815 || yy2arr815 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -9752,29 +7330,29 @@ func (x *ThirdPartyResourceDataList) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ThirdPartyResourceDataList) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *RollingUpdateDaemonSet) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym830 := z.DecBinary() - _ = yym830 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct831 := r.ContainerType() - if yyct831 == codecSelferValueTypeMap1234 { - yyl831 := r.ReadMapStart() - if yyl831 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl831, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct831 == codecSelferValueTypeArray1234 { - yyl831 := r.ReadArrayStart() - if yyl831 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl831, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -9782,16 +7360,16 @@ func (x *ThirdPartyResourceDataList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ThirdPartyResourceDataList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *RollingUpdateDaemonSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys832Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys832Slc - var yyhl832 bool = l >= 0 - for yyj832 := 0; ; yyj832++ { - if yyhl832 { - if yyj832 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -9800,291 +7378,236 @@ func (x *ThirdPartyResourceDataList) codecDecodeSelfFromMap(l int, d *codec1978. } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys832Slc = r.DecodeBytes(yys832Slc, true, true) - yys832 := string(yys832Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys832 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": + switch yys3 { + case "maxUnavailable": if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv835 := &x.ListMeta - yym836 := z.DecBinary() - _ = yym836 - if false { - } else if z.HasExtensions() && z.DecExt(yyv835) { - } else { - z.DecFallback(yyv835, false) + if x.MaxUnavailable != nil { + x.MaxUnavailable = nil } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil } else { - yyv837 := &x.Items - yym838 := z.DecBinary() - _ = yym838 + if x.MaxUnavailable == nil { + x.MaxUnavailable = new(pkg5_intstr.IntOrString) + } + yym5 := z.DecBinary() + _ = yym5 if false { + } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxUnavailable) } else { - h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv837), d) + z.DecFallback(x.MaxUnavailable, false) } } default: - z.DecStructFieldNotFound(-1, yys832) - } // end switch yys832 - } // end for yyj832 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ThirdPartyResourceDataList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *RollingUpdateDaemonSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj839 int - var yyb839 bool - var yyhl839 bool = l >= 0 - yyj839++ - if yyhl839 { - yyb839 = yyj839 > l - } else { - yyb839 = r.CheckBreak() - } - if yyb839 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj839++ - if yyhl839 { - yyb839 = yyj839 > l - } else { - yyb839 = r.CheckBreak() - } - if yyb839 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj839++ - if yyhl839 { - yyb839 = yyj839 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb839 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb839 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv842 := &x.ListMeta - yym843 := z.DecBinary() - _ = yym843 - if false { - } else if z.HasExtensions() && z.DecExt(yyv842) { - } else { - z.DecFallback(yyv842, false) + if x.MaxUnavailable != nil { + x.MaxUnavailable = nil } - } - yyj839++ - if yyhl839 { - yyb839 = yyj839 > l - } else { - yyb839 = r.CheckBreak() - } - if yyb839 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil } else { - yyv844 := &x.Items - yym845 := z.DecBinary() - _ = yym845 + if x.MaxUnavailable == nil { + x.MaxUnavailable = new(pkg5_intstr.IntOrString) + } + yym8 := z.DecBinary() + _ = yym8 if false { + } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxUnavailable) } else { - h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv844), d) + z.DecFallback(x.MaxUnavailable, false) } } for { - yyj839++ - if yyhl839 { - yyb839 = yyj839 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb839 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb839 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj839-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DaemonSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym846 := z.EncBinary() - _ = yym846 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep847 := !z.EncBinary() - yy2arr847 := z.EncBasicHandle().StructToArray - var yyq847 [5]bool - _, _, _ = yysep847, yyq847, yy2arr847 - const yyr847 bool = false - yyq847[0] = x.Kind != "" - yyq847[1] = x.APIVersion != "" - yyq847[2] = true - yyq847[3] = true - yyq847[4] = true - var yynn847 int - if yyr847 || yy2arr847 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Selector != nil + yyq2[2] = true + yyq2[3] = x.MinReadySeconds != 0 + yyq2[4] = x.TemplateGeneration != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn847 = 0 - for _, b := range yyq847 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn847++ + yynn2++ } } - r.EncodeMapStart(yynn847) - yynn847 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr847 || yy2arr847 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq847[0] { - yym849 := z.EncBinary() - _ = yym849 - if false { + if yyq2[0] { + if x.Selector == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeNil() } } else { - if yyq847[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) + r.EncodeString(codecSelferC_UTF81234, string("selector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym850 := z.EncBinary() - _ = yym850 - if false { + if x.Selector == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } } } } - if yyr847 || yy2arr847 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq847[1] { - yym852 := z.EncBinary() - _ = yym852 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } + yy7 := &x.Template + yy7.CodecEncodeSelf(e) } else { - if yyq847[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym853 := z.EncBinary() - _ = yym853 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.Template + yy9.CodecEncodeSelf(e) } - if yyr847 || yy2arr847 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq847[2] { - yy855 := &x.ObjectMeta - yy855.CodecEncodeSelf(e) + if yyq2[2] { + yy12 := &x.UpdateStrategy + yy12.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq847[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) + r.EncodeString(codecSelferC_UTF81234, string("updateStrategy")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy856 := &x.ObjectMeta - yy856.CodecEncodeSelf(e) + yy14 := &x.UpdateStrategy + yy14.CodecEncodeSelf(e) } } - if yyr847 || yy2arr847 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq847[3] { - yy858 := &x.Spec - yy858.CodecEncodeSelf(e) + if yyq2[3] { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } } else { - r.EncodeNil() + r.EncodeInt(0) } } else { - if yyq847[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) + r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy859 := &x.Spec - yy859.CodecEncodeSelf(e) + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } } } - if yyr847 || yy2arr847 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq847[4] { - yy861 := &x.Status - yy861.CodecEncodeSelf(e) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.TemplateGeneration)) + } } else { - r.EncodeNil() + r.EncodeInt(0) } } else { - if yyq847[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) + r.EncodeString(codecSelferC_UTF81234, string("templateGeneration")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy862 := &x.Status - yy862.CodecEncodeSelf(e) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeInt(int64(x.TemplateGeneration)) + } } } - if yyr847 || yy2arr847 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -10093,29 +7616,29 @@ func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DaemonSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym863 := z.DecBinary() - _ = yym863 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct864 := r.ContainerType() - if yyct864 == codecSelferValueTypeMap1234 { - yyl864 := r.ReadMapStart() - if yyl864 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl864, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct864 == codecSelferValueTypeArray1234 { - yyl864 := r.ReadArrayStart() - if yyl864 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl864, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -10123,16 +7646,16 @@ func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DaemonSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys865Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys865Slc - var yyhl865 bool = l >= 0 - for yyj865 := 0; ; yyj865++ { - if yyhl865 { - if yyj865 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -10141,296 +7664,411 @@ func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys865Slc = r.DecodeBytes(yys865Slc, true, true) - yys865 := string(yys865Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys865 { - case "kind": + switch yys3 { + case "selector": if r.TryDecodeAsNil() { - x.Kind = "" + if x.Selector != nil { + x.Selector = nil + } } else { - x.Kind = string(r.DecodeString()) + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } } - case "apiVersion": + case "template": if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Template = pkg4_v1.PodTemplateSpec{} } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.Template + yyv6.CodecDecodeSelf(d) } - case "metadata": + case "updateStrategy": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.UpdateStrategy = DaemonSetUpdateStrategy{} } else { - yyv868 := &x.ObjectMeta - yyv868.CodecDecodeSelf(d) + yyv7 := &x.UpdateStrategy + yyv7.CodecDecodeSelf(d) } - case "spec": + case "minReadySeconds": if r.TryDecodeAsNil() { - x.Spec = JobSpec{} + x.MinReadySeconds = 0 } else { - yyv869 := &x.Spec - yyv869.CodecDecodeSelf(d) + yyv8 := &x.MinReadySeconds + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } } - case "status": + case "templateGeneration": if r.TryDecodeAsNil() { - x.Status = JobStatus{} + x.TemplateGeneration = 0 } else { - yyv870 := &x.Status - yyv870.CodecDecodeSelf(d) + yyv10 := &x.TemplateGeneration + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int64)(yyv10)) = int64(r.DecodeInt(64)) + } } default: - z.DecStructFieldNotFound(-1, yys865) - } // end switch yys865 - } // end for yyj865 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DaemonSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj871 int - var yyb871 bool - var yyhl871 bool = l >= 0 - yyj871++ - if yyhl871 { - yyb871 = yyj871 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb871 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb871 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Kind = "" + if x.Selector != nil { + x.Selector = nil + } } else { - x.Kind = string(r.DecodeString()) + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } } - yyj871++ - if yyhl871 { - yyb871 = yyj871 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb871 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb871 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Template = pkg4_v1.PodTemplateSpec{} } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.Template + yyv15.CodecDecodeSelf(d) } - yyj871++ - if yyhl871 { - yyb871 = yyj871 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb871 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb871 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.UpdateStrategy = DaemonSetUpdateStrategy{} } else { - yyv874 := &x.ObjectMeta - yyv874.CodecDecodeSelf(d) + yyv16 := &x.UpdateStrategy + yyv16.CodecDecodeSelf(d) } - yyj871++ - if yyhl871 { - yyb871 = yyj871 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb871 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb871 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Spec = JobSpec{} + x.MinReadySeconds = 0 } else { - yyv875 := &x.Spec - yyv875.CodecDecodeSelf(d) + yyv17 := &x.MinReadySeconds + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } } - yyj871++ - if yyhl871 { - yyb871 = yyj871 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb871 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb871 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Status = JobStatus{} + x.TemplateGeneration = 0 } else { - yyv876 := &x.Status - yyv876.CodecDecodeSelf(d) + yyv19 := &x.TemplateGeneration + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(yyv19)) = int64(r.DecodeInt(64)) + } } for { - yyj871++ - if yyhl871 { - yyb871 = yyj871 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb871 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb871 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj871-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DaemonSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym877 := z.EncBinary() - _ = yym877 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep878 := !z.EncBinary() - yy2arr878 := z.EncBasicHandle().StructToArray - var yyq878 [4]bool - _, _, _ = yysep878, yyq878, yy2arr878 - const yyr878 bool = false - yyq878[0] = x.Kind != "" - yyq878[1] = x.APIVersion != "" - yyq878[2] = true - var yynn878 int - if yyr878 || yy2arr878 { - r.EncodeArrayStart(4) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[4] = x.ObservedGeneration != 0 + yyq2[5] = x.UpdatedNumberScheduled != 0 + yyq2[6] = x.NumberAvailable != 0 + yyq2[7] = x.NumberUnavailable != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(8) } else { - yynn878 = 1 - for _, b := range yyq878 { + yynn2 = 4 + for _, b := range yyq2 { if b { - yynn878++ + yynn2++ } } - r.EncodeMapStart(yynn878) - yynn878 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.CurrentNumberScheduled)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentNumberScheduled")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.CurrentNumberScheduled)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.NumberMisscheduled)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("numberMisscheduled")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.NumberMisscheduled)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.DesiredNumberScheduled)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("desiredNumberScheduled")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.DesiredNumberScheduled)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.NumberReady)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("numberReady")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.NumberReady)) + } } - if yyr878 || yy2arr878 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq878[0] { - yym880 := z.EncBinary() - _ = yym880 + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + r.EncodeInt(int64(x.ObservedGeneration)) } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeInt(0) } } else { - if yyq878[0] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym881 := z.EncBinary() - _ = yym881 + yym17 := z.EncBinary() + _ = yym17 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + r.EncodeInt(int64(x.ObservedGeneration)) } } } - if yyr878 || yy2arr878 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq878[1] { - yym883 := z.EncBinary() - _ = yym883 + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + r.EncodeInt(int64(x.UpdatedNumberScheduled)) } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeInt(0) } } else { - if yyq878[1] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + r.EncodeString(codecSelferC_UTF81234, string("updatedNumberScheduled")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym884 := z.EncBinary() - _ = yym884 + yym20 := z.EncBinary() + _ = yym20 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + r.EncodeInt(int64(x.UpdatedNumberScheduled)) } } } - if yyr878 || yy2arr878 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq878[2] { - yy886 := &x.ListMeta - yym887 := z.EncBinary() - _ = yym887 + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 if false { - } else if z.HasExtensions() && z.EncExt(yy886) { } else { - z.EncFallback(yy886) + r.EncodeInt(int64(x.NumberAvailable)) } } else { - r.EncodeNil() + r.EncodeInt(0) } } else { - if yyq878[2] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) + r.EncodeString(codecSelferC_UTF81234, string("numberAvailable")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy888 := &x.ListMeta - yym889 := z.EncBinary() - _ = yym889 + yym23 := z.EncBinary() + _ = yym23 if false { - } else if z.HasExtensions() && z.EncExt(yy888) { } else { - z.EncFallback(yy888) + r.EncodeInt(int64(x.NumberAvailable)) } } } - if yyr878 || yy2arr878 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym891 := z.EncBinary() - _ = yym891 + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 if false { } else { - h.encSliceJob(([]Job)(x.Items), e) + r.EncodeInt(int64(x.NumberUnavailable)) } + } else { + r.EncodeInt(0) } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym892 := z.EncBinary() - _ = yym892 + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("numberUnavailable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 if false { } else { - h.encSliceJob(([]Job)(x.Items), e) + r.EncodeInt(int64(x.NumberUnavailable)) } } } - if yyr878 || yy2arr878 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -10439,29 +8077,29 @@ func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DaemonSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym893 := z.DecBinary() - _ = yym893 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct894 := r.ContainerType() - if yyct894 == codecSelferValueTypeMap1234 { - yyl894 := r.ReadMapStart() - if yyl894 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl894, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct894 == codecSelferValueTypeArray1234 { - yyl894 := r.ReadArrayStart() - if yyl894 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl894, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -10469,16 +8107,16 @@ func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DaemonSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys895Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys895Slc - var yyhl895 bool = l >= 0 - for yyj895 := 0; ; yyj895++ { - if yyhl895 { - if yyj895 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -10487,376 +8125,461 @@ func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys895Slc = r.DecodeBytes(yys895Slc, true, true) - yys895 := string(yys895Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys895 { - case "kind": + switch yys3 { + case "currentNumberScheduled": if r.TryDecodeAsNil() { - x.Kind = "" + x.CurrentNumberScheduled = 0 } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.CurrentNumberScheduled + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } } - case "apiVersion": + case "numberMisscheduled": if r.TryDecodeAsNil() { - x.APIVersion = "" + x.NumberMisscheduled = 0 } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.NumberMisscheduled + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } } - case "metadata": + case "desiredNumberScheduled": if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.DesiredNumberScheduled = 0 } else { - yyv898 := &x.ListMeta - yym899 := z.DecBinary() - _ = yym899 + yyv8 := &x.DesiredNumberScheduled + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv898) { } else { - z.DecFallback(yyv898, false) + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) } } - case "items": + case "numberReady": if r.TryDecodeAsNil() { - x.Items = nil + x.NumberReady = 0 } else { - yyv900 := &x.Items - yym901 := z.DecBinary() - _ = yym901 + yyv10 := &x.NumberReady + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceJob((*[]Job)(yyv900), d) + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "observedGeneration": + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv12 := &x.ObservedGeneration + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int64)(yyv12)) = int64(r.DecodeInt(64)) + } + } + case "updatedNumberScheduled": + if r.TryDecodeAsNil() { + x.UpdatedNumberScheduled = 0 + } else { + yyv14 := &x.UpdatedNumberScheduled + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } + } + case "numberAvailable": + if r.TryDecodeAsNil() { + x.NumberAvailable = 0 + } else { + yyv16 := &x.NumberAvailable + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*int32)(yyv16)) = int32(r.DecodeInt(32)) + } + } + case "numberUnavailable": + if r.TryDecodeAsNil() { + x.NumberUnavailable = 0 + } else { + yyv18 := &x.NumberUnavailable + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*int32)(yyv18)) = int32(r.DecodeInt(32)) } } default: - z.DecStructFieldNotFound(-1, yys895) - } // end switch yys895 - } // end for yyj895 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DaemonSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj902 int - var yyb902 bool - var yyhl902 bool = l >= 0 - yyj902++ - if yyhl902 { - yyb902 = yyj902 > l + var yyj20 int + var yyb20 bool + var yyhl20 bool = l >= 0 + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentNumberScheduled = 0 + } else { + yyv21 := &x.CurrentNumberScheduled + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NumberMisscheduled = 0 + } else { + yyv23 := &x.NumberMisscheduled + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DesiredNumberScheduled = 0 + } else { + yyv25 := &x.DesiredNumberScheduled + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l } else { - yyb902 = r.CheckBreak() + yyb20 = r.CheckBreak() } - if yyb902 { + if yyb20 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Kind = "" + x.NumberReady = 0 } else { - x.Kind = string(r.DecodeString()) + yyv27 := &x.NumberReady + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(yyv27)) = int32(r.DecodeInt(32)) + } } - yyj902++ - if yyhl902 { - yyb902 = yyj902 > l + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l } else { - yyb902 = r.CheckBreak() + yyb20 = r.CheckBreak() } - if yyb902 { + if yyb20 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIVersion = "" + x.ObservedGeneration = 0 } else { - x.APIVersion = string(r.DecodeString()) + yyv29 := &x.ObservedGeneration + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*int64)(yyv29)) = int64(r.DecodeInt(64)) + } } - yyj902++ - if yyhl902 { - yyb902 = yyj902 > l + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l } else { - yyb902 = r.CheckBreak() + yyb20 = r.CheckBreak() } - if yyb902 { + if yyb20 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.UpdatedNumberScheduled = 0 } else { - yyv905 := &x.ListMeta - yym906 := z.DecBinary() - _ = yym906 + yyv31 := &x.UpdatedNumberScheduled + yym32 := z.DecBinary() + _ = yym32 if false { - } else if z.HasExtensions() && z.DecExt(yyv905) { } else { - z.DecFallback(yyv905, false) + *((*int32)(yyv31)) = int32(r.DecodeInt(32)) } } - yyj902++ - if yyhl902 { - yyb902 = yyj902 > l + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l } else { - yyb902 = r.CheckBreak() + yyb20 = r.CheckBreak() } - if yyb902 { + if yyb20 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Items = nil + x.NumberAvailable = 0 + } else { + yyv33 := &x.NumberAvailable + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*int32)(yyv33)) = int32(r.DecodeInt(32)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NumberUnavailable = 0 } else { - yyv907 := &x.Items - yym908 := z.DecBinary() - _ = yym908 + yyv35 := &x.NumberUnavailable + yym36 := z.DecBinary() + _ = yym36 if false { } else { - h.decSliceJob((*[]Job)(yyv907), d) + *((*int32)(yyv35)) = int32(r.DecodeInt(32)) } } for { - yyj902++ - if yyhl902 { - yyb902 = yyj902 > l + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l } else { - yyb902 = r.CheckBreak() + yyb20 = r.CheckBreak() } - if yyb902 { + if yyb20 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj902-1, "") + z.DecStructFieldNotFound(yyj20-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym909 := z.EncBinary() - _ = yym909 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep910 := !z.EncBinary() - yy2arr910 := z.EncBasicHandle().StructToArray - var yyq910 [6]bool - _, _, _ = yysep910, yyq910, yy2arr910 - const yyr910 bool = false - yyq910[0] = x.Parallelism != nil - yyq910[1] = x.Completions != nil - yyq910[2] = x.ActiveDeadlineSeconds != nil - yyq910[3] = x.Selector != nil - yyq910[4] = x.AutoSelector != nil - var yynn910 int - if yyr910 || yy2arr910 { - r.EncodeArrayStart(6) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) } else { - yynn910 = 1 - for _, b := range yyq910 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn910++ + yynn2++ } } - r.EncodeMapStart(yynn910) - yynn910 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr910 || yy2arr910 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq910[0] { - if x.Parallelism == nil { - r.EncodeNil() + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - yy912 := *x.Parallelism - yym913 := z.EncBinary() - _ = yym913 - if false { - } else { - r.EncodeInt(int64(yy912)) - } + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq910[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("parallelism")) + r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Parallelism == nil { - r.EncodeNil() + yym5 := z.EncBinary() + _ = yym5 + if false { } else { - yy914 := *x.Parallelism - yym915 := z.EncBinary() - _ = yym915 - if false { - } else { - r.EncodeInt(int64(yy914)) - } + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr910 || yy2arr910 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq910[1] { - if x.Completions == nil { - r.EncodeNil() + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { } else { - yy917 := *x.Completions - yym918 := z.EncBinary() - _ = yym918 - if false { - } else { - r.EncodeInt(int64(yy917)) - } + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq910[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("completions")) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Completions == nil { - r.EncodeNil() + yym8 := z.EncBinary() + _ = yym8 + if false { } else { - yy919 := *x.Completions - yym920 := z.EncBinary() - _ = yym920 - if false { - } else { - r.EncodeInt(int64(yy919)) - } + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr910 || yy2arr910 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq910[2] { - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - yy922 := *x.ActiveDeadlineSeconds - yym923 := z.EncBinary() - _ = yym923 - if false { - } else { - r.EncodeInt(int64(yy922)) - } + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq910[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - yy924 := *x.ActiveDeadlineSeconds - yym925 := z.EncBinary() - _ = yym925 - if false { - } else { - r.EncodeInt(int64(yy924)) - } + z.EncFallback(yy12) } } } - if yyr910 || yy2arr910 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq910[3] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym927 := z.EncBinary() - _ = yym927 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq910[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) + r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym928 := z.EncBinary() - _ = yym928 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr910 || yy2arr910 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq910[4] { - if x.AutoSelector == nil { - r.EncodeNil() - } else { - yy930 := *x.AutoSelector - yym931 := z.EncBinary() - _ = yym931 - if false { - } else { - r.EncodeBool(bool(yy930)) - } - } + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq910[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("autoSelector")) + r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AutoSelector == nil { - r.EncodeNil() - } else { - yy932 := *x.AutoSelector - yym933 := z.EncBinary() - _ = yym933 - if false { - } else { - r.EncodeBool(bool(yy932)) - } - } + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } - if yyr910 || yy2arr910 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy935 := &x.Template - yy935.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy936 := &x.Template - yy936.CodecEncodeSelf(e) - } - if yyr910 || yy2arr910 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -10865,29 +8588,29 @@ func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DaemonSet) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym937 := z.DecBinary() - _ = yym937 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct938 := r.ContainerType() - if yyct938 == codecSelferValueTypeMap1234 { - yyl938 := r.ReadMapStart() - if yyl938 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl938, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct938 == codecSelferValueTypeArray1234 { - yyl938 := r.ReadArrayStart() - if yyl938 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl938, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -10895,16 +8618,16 @@ func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DaemonSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys939Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys939Slc - var yyhl939 bool = l >= 0 - for yyj939 := 0; ; yyj939++ { - if yyhl939 { - if yyj939 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -10913,507 +8636,332 @@ func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys939Slc = r.DecodeBytes(yys939Slc, true, true) - yys939 := string(yys939Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys939 { - case "parallelism": - if r.TryDecodeAsNil() { - if x.Parallelism != nil { - x.Parallelism = nil - } - } else { - if x.Parallelism == nil { - x.Parallelism = new(int32) - } - yym941 := z.DecBinary() - _ = yym941 - if false { - } else { - *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) - } - } - case "completions": + switch yys3 { + case "kind": if r.TryDecodeAsNil() { - if x.Completions != nil { - x.Completions = nil - } + x.Kind = "" } else { - if x.Completions == nil { - x.Completions = new(int32) - } - yym943 := z.DecBinary() - _ = yym943 + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 if false { } else { - *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) + *((*string)(yyv4)) = r.DecodeString() } } - case "activeDeadlineSeconds": + case "apiVersion": if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } + x.APIVersion = "" } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym945 := z.DecBinary() - _ = yym945 + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 if false { } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) + *((*string)(yyv6)) = r.DecodeString() } } - case "selector": + case "metadata": if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym947 := z.DecBinary() - _ = yym947 + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(x.Selector, false) + z.DecFallback(yyv8, false) } } - case "autoSelector": + case "spec": if r.TryDecodeAsNil() { - if x.AutoSelector != nil { - x.AutoSelector = nil - } + x.Spec = DaemonSetSpec{} } else { - if x.AutoSelector == nil { - x.AutoSelector = new(bool) - } - yym949 := z.DecBinary() - _ = yym949 - if false { - } else { - *((*bool)(x.AutoSelector)) = r.DecodeBool() - } + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } - case "template": + case "status": if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} + x.Status = DaemonSetStatus{} } else { - yyv950 := &x.Template - yyv950.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys939) - } // end switch yys939 - } // end for yyj939 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DaemonSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj951 int - var yyb951 bool - var yyhl951 bool = l >= 0 - yyj951++ - if yyhl951 { - yyb951 = yyj951 > l - } else { - yyb951 = r.CheckBreak() - } - if yyb951 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Parallelism != nil { - x.Parallelism = nil - } - } else { - if x.Parallelism == nil { - x.Parallelism = new(int32) - } - yym953 := z.DecBinary() - _ = yym953 - if false { - } else { - *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) - } - } - yyj951++ - if yyhl951 { - yyb951 = yyj951 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb951 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb951 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.Completions != nil { - x.Completions = nil - } + x.Kind = "" } else { - if x.Completions == nil { - x.Completions = new(int32) - } - yym955 := z.DecBinary() - _ = yym955 + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 if false { } else { - *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) + *((*string)(yyv13)) = r.DecodeString() } } - yyj951++ - if yyhl951 { - yyb951 = yyj951 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb951 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb951 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } + x.APIVersion = "" } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym957 := z.DecBinary() - _ = yym957 + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 if false { } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) + *((*string)(yyv15)) = r.DecodeString() } } - yyj951++ - if yyhl951 { - yyb951 = yyj951 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb951 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb951 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym959 := z.DecBinary() - _ = yym959 + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(x.Selector, false) + z.DecFallback(yyv17, false) } } - yyj951++ - if yyhl951 { - yyb951 = yyj951 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb951 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb951 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.AutoSelector != nil { - x.AutoSelector = nil - } + x.Spec = DaemonSetSpec{} } else { - if x.AutoSelector == nil { - x.AutoSelector = new(bool) - } - yym961 := z.DecBinary() - _ = yym961 - if false { - } else { - *((*bool)(x.AutoSelector)) = r.DecodeBool() - } + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj951++ - if yyhl951 { - yyb951 = yyj951 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb951 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb951 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} + x.Status = DaemonSetStatus{} } else { - yyv962 := &x.Template - yyv962.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj951++ - if yyhl951 { - yyb951 = yyj951 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb951 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb951 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj951-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } - -func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym963 := z.EncBinary() - _ = yym963 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep964 := !z.EncBinary() - yy2arr964 := z.EncBasicHandle().StructToArray - var yyq964 [6]bool - _, _, _ = yysep964, yyq964, yy2arr964 - const yyr964 bool = false - yyq964[0] = len(x.Conditions) != 0 - yyq964[1] = x.StartTime != nil - yyq964[2] = x.CompletionTime != nil - yyq964[3] = x.Active != 0 - yyq964[4] = x.Succeeded != 0 - yyq964[5] = x.Failed != 0 - var yynn964 int - if yyr964 || yy2arr964 { - r.EncodeArrayStart(6) - } else { - yynn964 = 0 - for _, b := range yyq964 { - if b { - yynn964++ - } - } - r.EncodeMapStart(yynn964) - yynn964 = 0 - } - if yyr964 || yy2arr964 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq964[0] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym966 := z.EncBinary() - _ = yym966 - if false { - } else { - h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq964[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym967 := z.EncBinary() - _ = yym967 - if false { - } else { - h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) - } - } - } - } - if yyr964 || yy2arr964 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq964[1] { - if x.StartTime == nil { - r.EncodeNil() - } else { - yym969 := z.EncBinary() - _ = yym969 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym969 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym969 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } else { - r.EncodeNil() - } + +func (x *DaemonSetList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) } else { - if yyq964[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.StartTime == nil { - r.EncodeNil() - } else { - yym970 := z.EncBinary() - _ = yym970 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym970 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym970 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ } } + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr964 || yy2arr964 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq964[2] { - if x.CompletionTime == nil { - r.EncodeNil() + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - yym972 := z.EncBinary() - _ = yym972 - if false { - } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym972 { - z.EncBinaryMarshal(x.CompletionTime) - } else if !yym972 && z.IsJSONHandle() { - z.EncJSONMarshal(x.CompletionTime) - } else { - z.EncFallback(x.CompletionTime) - } + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq964[2] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("completionTime")) + r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CompletionTime == nil { - r.EncodeNil() + yym5 := z.EncBinary() + _ = yym5 + if false { } else { - yym973 := z.EncBinary() - _ = yym973 - if false { - } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym973 { - z.EncBinaryMarshal(x.CompletionTime) - } else if !yym973 && z.IsJSONHandle() { - z.EncJSONMarshal(x.CompletionTime) - } else { - z.EncFallback(x.CompletionTime) - } + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr964 || yy2arr964 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq964[3] { - yym975 := z.EncBinary() - _ = yym975 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { - r.EncodeInt(int64(x.Active)) + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } else { - r.EncodeInt(0) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq964[3] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("active")) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym976 := z.EncBinary() - _ = yym976 + yym8 := z.EncBinary() + _ = yym8 if false { } else { - r.EncodeInt(int64(x.Active)) + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr964 || yy2arr964 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq964[4] { - yym978 := z.EncBinary() - _ = yym978 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - r.EncodeInt(int64(x.Succeeded)) + z.EncFallback(yy10) } } else { - r.EncodeInt(0) + r.EncodeNil() } } else { - if yyq964[4] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("succeeded")) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym979 := z.EncBinary() - _ = yym979 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - r.EncodeInt(int64(x.Succeeded)) + z.EncFallback(yy12) } } } - if yyr964 || yy2arr964 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq964[5] { - yym981 := z.EncBinary() - _ = yym981 + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 if false { } else { - r.EncodeInt(int64(x.Failed)) + h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) } - } else { - r.EncodeInt(0) } } else { - if yyq964[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("failed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym982 := z.EncBinary() - _ = yym982 + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 if false { } else { - r.EncodeInt(int64(x.Failed)) + h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) } } } - if yyr964 || yy2arr964 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -11422,29 +8970,29 @@ func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DaemonSetList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym983 := z.DecBinary() - _ = yym983 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct984 := r.ContainerType() - if yyct984 == codecSelferValueTypeMap1234 { - yyl984 := r.ReadMapStart() - if yyl984 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl984, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct984 == codecSelferValueTypeArray1234 { - yyl984 := r.ReadArrayStart() - if yyl984 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl984, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -11452,16 +9000,16 @@ func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DaemonSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys985Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys985Slc - var yyhl985 bool = l >= 0 - for yyj985 := 0; ; yyj985++ { - if yyhl985 { - if yyj985 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -11470,459 +9018,318 @@ func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys985Slc = r.DecodeBytes(yys985Slc, true, true) - yys985 := string(yys985Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys985 { - case "conditions": + switch yys3 { + case "kind": if r.TryDecodeAsNil() { - x.Conditions = nil + x.Kind = "" } else { - yyv986 := &x.Conditions - yym987 := z.DecBinary() - _ = yym987 + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSliceJobCondition((*[]JobCondition)(yyv986), d) + *((*string)(yyv4)) = r.DecodeString() } } - case "startTime": + case "apiVersion": if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } + x.APIVersion = "" } else { - if x.StartTime == nil { - x.StartTime = new(pkg1_unversioned.Time) - } - yym989 := z.DecBinary() - _ = yym989 + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym989 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym989 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) } else { - z.DecFallback(x.StartTime, false) + *((*string)(yyv6)) = r.DecodeString() } } - case "completionTime": + case "metadata": if r.TryDecodeAsNil() { - if x.CompletionTime != nil { - x.CompletionTime = nil - } + x.ListMeta = pkg1_v1.ListMeta{} } else { - if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_unversioned.Time) - } - yym991 := z.DecBinary() - _ = yym991 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym991 { - z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym991 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.CompletionTime) + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(x.CompletionTime, false) + z.DecFallback(yyv8, false) } } - case "active": - if r.TryDecodeAsNil() { - x.Active = 0 - } else { - x.Active = int32(r.DecodeInt(32)) - } - case "succeeded": - if r.TryDecodeAsNil() { - x.Succeeded = 0 - } else { - x.Succeeded = int32(r.DecodeInt(32)) - } - case "failed": + case "items": if r.TryDecodeAsNil() { - x.Failed = 0 + x.Items = nil } else { - x.Failed = int32(r.DecodeInt(32)) + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceDaemonSet((*[]DaemonSet)(yyv10), d) + } } default: - z.DecStructFieldNotFound(-1, yys985) - } // end switch yys985 - } // end for yyj985 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DaemonSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj995 int - var yyb995 bool - var yyhl995 bool = l >= 0 - yyj995++ - if yyhl995 { - yyb995 = yyj995 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb995 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb995 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Conditions = nil + x.Kind = "" } else { - yyv996 := &x.Conditions - yym997 := z.DecBinary() - _ = yym997 + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 if false { } else { - h.decSliceJobCondition((*[]JobCondition)(yyv996), d) + *((*string)(yyv13)) = r.DecodeString() } } - yyj995++ - if yyhl995 { - yyb995 = yyj995 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb995 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb995 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } + x.APIVersion = "" } else { - if x.StartTime == nil { - x.StartTime = new(pkg1_unversioned.Time) - } - yym999 := z.DecBinary() - _ = yym999 + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym999 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym999 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) } else { - z.DecFallback(x.StartTime, false) + *((*string)(yyv15)) = r.DecodeString() } } - yyj995++ - if yyhl995 { - yyb995 = yyj995 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb995 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb995 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.CompletionTime != nil { - x.CompletionTime = nil - } + x.ListMeta = pkg1_v1.ListMeta{} } else { - if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_unversioned.Time) - } - yym1001 := z.DecBinary() - _ = yym1001 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym1001 { - z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym1001 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.CompletionTime) + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(x.CompletionTime, false) + z.DecFallback(yyv17, false) } } - yyj995++ - if yyhl995 { - yyb995 = yyj995 > l - } else { - yyb995 = r.CheckBreak() - } - if yyb995 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Active = 0 - } else { - x.Active = int32(r.DecodeInt(32)) - } - yyj995++ - if yyhl995 { - yyb995 = yyj995 > l - } else { - yyb995 = r.CheckBreak() - } - if yyb995 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Succeeded = 0 - } else { - x.Succeeded = int32(r.DecodeInt(32)) - } - yyj995++ - if yyhl995 { - yyb995 = yyj995 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb995 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb995 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Failed = 0 + x.Items = nil } else { - x.Failed = int32(r.DecodeInt(32)) + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceDaemonSet((*[]DaemonSet)(yyv19), d) + } } for { - yyj995++ - if yyhl995 { - yyb995 = yyj995 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb995 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb995 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj995-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x JobConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1005 := z.EncBinary() - _ = yym1005 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *JobConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1006 := z.DecBinary() - _ = yym1006 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ThirdPartyResourceDataList) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1007 := z.EncBinary() - _ = yym1007 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1008 := !z.EncBinary() - yy2arr1008 := z.EncBasicHandle().StructToArray - var yyq1008 [6]bool - _, _, _ = yysep1008, yyq1008, yy2arr1008 - const yyr1008 bool = false - yyq1008[2] = true - yyq1008[3] = true - yyq1008[4] = x.Reason != "" - yyq1008[5] = x.Message != "" - var yynn1008 int - if yyr1008 || yy2arr1008 { - r.EncodeArrayStart(6) + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) } else { - yynn1008 = 2 - for _, b := range yyq1008 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1008++ + yynn2++ } } - r.EncodeMapStart(yynn1008) - yynn1008 = 0 - } - if yyr1008 || yy2arr1008 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr1008 || yy2arr1008 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1011 := z.EncBinary() - _ = yym1011 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1012 := z.EncBinary() - _ = yym1012 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1008 || yy2arr1008 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1008[2] { - yy1014 := &x.LastProbeTime - yym1015 := z.EncBinary() - _ = yym1015 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { - } else if z.HasExtensions() && z.EncExt(yy1014) { - } else if yym1015 { - z.EncBinaryMarshal(yy1014) - } else if !yym1015 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1014) } else { - z.EncFallback(yy1014) + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1008[2] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) + r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1016 := &x.LastProbeTime - yym1017 := z.EncBinary() - _ = yym1017 + yym5 := z.EncBinary() + _ = yym5 if false { - } else if z.HasExtensions() && z.EncExt(yy1016) { - } else if yym1017 { - z.EncBinaryMarshal(yy1016) - } else if !yym1017 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1016) } else { - z.EncFallback(yy1016) + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr1008 || yy2arr1008 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1008[3] { - yy1019 := &x.LastTransitionTime - yym1020 := z.EncBinary() - _ = yym1020 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.EncExt(yy1019) { - } else if yym1020 { - z.EncBinaryMarshal(yy1019) - } else if !yym1020 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1019) } else { - z.EncFallback(yy1019) + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1008[3] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1021 := &x.LastTransitionTime - yym1022 := z.EncBinary() - _ = yym1022 + yym8 := z.EncBinary() + _ = yym8 if false { - } else if z.HasExtensions() && z.EncExt(yy1021) { - } else if yym1022 { - z.EncBinaryMarshal(yy1021) - } else if !yym1022 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1021) } else { - z.EncFallback(yy1021) + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr1008 || yy2arr1008 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1008[4] { - yym1024 := z.EncBinary() - _ = yym1024 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + z.EncFallback(yy10) } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeNil() } } else { - if yyq1008[4] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1025 := z.EncBinary() - _ = yym1025 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + z.EncFallback(yy12) } } } - if yyr1008 || yy2arr1008 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1008[5] { - yym1027 := z.EncBinary() - _ = yym1027 + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) } - } else { - r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1008[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1028 := z.EncBinary() - _ = yym1028 + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) } } } - if yyr1008 || yy2arr1008 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -11931,29 +9338,29 @@ func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ThirdPartyResourceDataList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1029 := z.DecBinary() - _ = yym1029 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1030 := r.ContainerType() - if yyct1030 == codecSelferValueTypeMap1234 { - yyl1030 := r.ReadMapStart() - if yyl1030 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1030, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1030 == codecSelferValueTypeArray1234 { - yyl1030 := r.ReadArrayStart() - if yyl1030 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1030, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -11961,16 +9368,16 @@ func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ThirdPartyResourceDataList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1031Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1031Slc - var yyhl1031 bool = l >= 0 - for yyj1031 := 0; ; yyj1031++ { - if yyhl1031 { - if yyj1031 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -11979,212 +9386,174 @@ func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1031Slc = r.DecodeBytes(yys1031Slc, true, true) - yys1031 := string(yys1031Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1031 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = JobConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_v1.ConditionStatus(r.DecodeString()) - } - case "lastProbeTime": + switch yys3 { + case "kind": if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_unversioned.Time{} + x.Kind = "" } else { - yyv1034 := &x.LastProbeTime - yym1035 := z.DecBinary() - _ = yym1035 + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 if false { - } else if z.HasExtensions() && z.DecExt(yyv1034) { - } else if yym1035 { - z.DecBinaryUnmarshal(yyv1034) - } else if !yym1035 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1034) } else { - z.DecFallback(yyv1034, false) + *((*string)(yyv4)) = r.DecodeString() } } - case "lastTransitionTime": + case "apiVersion": if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} + x.APIVersion = "" } else { - yyv1036 := &x.LastTransitionTime - yym1037 := z.DecBinary() - _ = yym1037 + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.DecExt(yyv1036) { - } else if yym1037 { - z.DecBinaryUnmarshal(yyv1036) - } else if !yym1037 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1036) } else { - z.DecFallback(yyv1036, false) + *((*string)(yyv6)) = r.DecodeString() } } - case "reason": + case "metadata": if r.TryDecodeAsNil() { - x.Reason = "" + x.ListMeta = pkg1_v1.ListMeta{} } else { - x.Reason = string(r.DecodeString()) + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } - case "message": + case "items": if r.TryDecodeAsNil() { - x.Message = "" + x.Items = nil } else { - x.Message = string(r.DecodeString()) + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv10), d) + } } default: - z.DecStructFieldNotFound(-1, yys1031) - } // end switch yys1031 - } // end for yyj1031 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ThirdPartyResourceDataList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1040 int - var yyb1040 bool - var yyhl1040 bool = l >= 0 - yyj1040++ - if yyhl1040 { - yyb1040 = yyj1040 > l - } else { - yyb1040 = r.CheckBreak() - } - if yyb1040 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = JobConditionType(r.DecodeString()) - } - yyj1040++ - if yyhl1040 { - yyb1040 = yyj1040 > l - } else { - yyb1040 = r.CheckBreak() - } - if yyb1040 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_v1.ConditionStatus(r.DecodeString()) - } - yyj1040++ - if yyhl1040 { - yyb1040 = yyj1040 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1040 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1040 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_unversioned.Time{} + x.Kind = "" } else { - yyv1043 := &x.LastProbeTime - yym1044 := z.DecBinary() - _ = yym1044 + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 if false { - } else if z.HasExtensions() && z.DecExt(yyv1043) { - } else if yym1044 { - z.DecBinaryUnmarshal(yyv1043) - } else if !yym1044 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1043) } else { - z.DecFallback(yyv1043, false) + *((*string)(yyv13)) = r.DecodeString() } } - yyj1040++ - if yyhl1040 { - yyb1040 = yyj1040 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1040 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1040 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} + x.APIVersion = "" } else { - yyv1045 := &x.LastTransitionTime - yym1046 := z.DecBinary() - _ = yym1046 + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 if false { - } else if z.HasExtensions() && z.DecExt(yyv1045) { - } else if yym1046 { - z.DecBinaryUnmarshal(yyv1045) - } else if !yym1046 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1045) } else { - z.DecFallback(yyv1045, false) + *((*string)(yyv15)) = r.DecodeString() } } - yyj1040++ - if yyhl1040 { - yyb1040 = yyj1040 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1040 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1040 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Reason = "" + x.ListMeta = pkg1_v1.ListMeta{} } else { - x.Reason = string(r.DecodeString()) + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj1040++ - if yyhl1040 { - yyb1040 = yyj1040 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1040 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1040 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Message = "" + x.Items = nil } else { - x.Message = string(r.DecodeString()) + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv19), d) + } } for { - yyj1040++ - if yyhl1040 { - yyb1040 = yyj1040 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1040 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1040 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1040-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -12196,39 +9565,39 @@ func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1049 := z.EncBinary() - _ = yym1049 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1050 := !z.EncBinary() - yy2arr1050 := z.EncBasicHandle().StructToArray - var yyq1050 [5]bool - _, _, _ = yysep1050, yyq1050, yy2arr1050 - const yyr1050 bool = false - yyq1050[0] = x.Kind != "" - yyq1050[1] = x.APIVersion != "" - yyq1050[2] = true - yyq1050[3] = true - yyq1050[4] = true - var yynn1050 int - if yyr1050 || yy2arr1050 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn1050 = 0 - for _, b := range yyq1050 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1050++ + yynn2++ } } - r.EncodeMapStart(yynn1050) - yynn1050 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1050 || yy2arr1050 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1050[0] { - yym1052 := z.EncBinary() - _ = yym1052 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -12237,23 +9606,23 @@ func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1050[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1053 := z.EncBinary() - _ = yym1053 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr1050 || yy2arr1050 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1050[1] { - yym1055 := z.EncBinary() - _ = yym1055 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -12262,70 +9631,82 @@ func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1050[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1056 := z.EncBinary() - _ = yym1056 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr1050 || yy2arr1050 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1050[2] { - yy1058 := &x.ObjectMeta - yy1058.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq1050[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1059 := &x.ObjectMeta - yy1059.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr1050 || yy2arr1050 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1050[3] { - yy1061 := &x.Spec - yy1061.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq1050[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1062 := &x.Spec - yy1062.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr1050 || yy2arr1050 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1050[4] { - yy1064 := &x.Status - yy1064.CodecEncodeSelf(e) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq1050[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1065 := &x.Status - yy1065.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } - if yyr1050 || yy2arr1050 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -12338,25 +9719,25 @@ func (x *Ingress) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1066 := z.DecBinary() - _ = yym1066 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1067 := r.ContainerType() - if yyct1067 == codecSelferValueTypeMap1234 { - yyl1067 := r.ReadMapStart() - if yyl1067 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1067, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1067 == codecSelferValueTypeArray1234 { - yyl1067 := r.ReadArrayStart() - if yyl1067 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1067, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -12368,12 +9749,12 @@ func (x *Ingress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1068Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1068Slc - var yyhl1068 bool = l >= 0 - for yyj1068 := 0; ; yyj1068++ { - if yyhl1068 { - if yyj1068 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -12382,47 +9763,65 @@ func (x *Ingress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1068Slc = r.DecodeBytes(yys1068Slc, true, true) - yys1068 := string(yys1068Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1068 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv1071 := &x.ObjectMeta - yyv1071.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = IngressSpec{} } else { - yyv1072 := &x.Spec - yyv1072.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = IngressStatus{} } else { - yyv1073 := &x.Status - yyv1073.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1068) - } // end switch yys1068 - } // end for yyj1068 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -12430,16 +9829,16 @@ func (x *Ingress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1074 int - var yyb1074 bool - var yyhl1074 bool = l >= 0 - yyj1074++ - if yyhl1074 { - yyb1074 = yyj1074 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1074 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1074 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12447,15 +9846,21 @@ func (x *Ingress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj1074++ - if yyhl1074 { - yyb1074 = yyj1074 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1074 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1074 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12463,32 +9868,44 @@ func (x *Ingress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj1074++ - if yyhl1074 { - yyb1074 = yyj1074 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1074 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1074 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv1077 := &x.ObjectMeta - yyv1077.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj1074++ - if yyhl1074 { - yyb1074 = yyj1074 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1074 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1074 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12496,16 +9913,16 @@ func (x *Ingress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Spec = IngressSpec{} } else { - yyv1078 := &x.Spec - yyv1078.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj1074++ - if yyhl1074 { - yyb1074 = yyj1074 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1074 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1074 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12513,21 +9930,21 @@ func (x *Ingress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Status = IngressStatus{} } else { - yyv1079 := &x.Status - yyv1079.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj1074++ - if yyhl1074 { - yyb1074 = yyj1074 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1074 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1074 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1074-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -12539,37 +9956,37 @@ func (x *IngressList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1080 := z.EncBinary() - _ = yym1080 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1081 := !z.EncBinary() - yy2arr1081 := z.EncBasicHandle().StructToArray - var yyq1081 [4]bool - _, _, _ = yysep1081, yyq1081, yy2arr1081 - const yyr1081 bool = false - yyq1081[0] = x.Kind != "" - yyq1081[1] = x.APIVersion != "" - yyq1081[2] = true - var yynn1081 int - if yyr1081 || yy2arr1081 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn1081 = 1 - for _, b := range yyq1081 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1081++ + yynn2++ } } - r.EncodeMapStart(yynn1081) - yynn1081 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1081 || yy2arr1081 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1081[0] { - yym1083 := z.EncBinary() - _ = yym1083 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -12578,23 +9995,23 @@ func (x *IngressList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1081[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1084 := z.EncBinary() - _ = yym1084 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr1081 || yy2arr1081 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1081[1] { - yym1086 := z.EncBinary() - _ = yym1086 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -12603,54 +10020,54 @@ func (x *IngressList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1081[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1087 := z.EncBinary() - _ = yym1087 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr1081 || yy2arr1081 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1081[2] { - yy1089 := &x.ListMeta - yym1090 := z.EncBinary() - _ = yym1090 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy1089) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy1089) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq1081[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1091 := &x.ListMeta - yym1092 := z.EncBinary() - _ = yym1092 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy1091) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy1091) + z.EncFallback(yy12) } } } - if yyr1081 || yy2arr1081 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym1094 := z.EncBinary() - _ = yym1094 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceIngress(([]Ingress)(x.Items), e) @@ -12663,15 +10080,15 @@ func (x *IngressList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym1095 := z.EncBinary() - _ = yym1095 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceIngress(([]Ingress)(x.Items), e) } } } - if yyr1081 || yy2arr1081 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -12684,25 +10101,25 @@ func (x *IngressList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1096 := z.DecBinary() - _ = yym1096 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1097 := r.ContainerType() - if yyct1097 == codecSelferValueTypeMap1234 { - yyl1097 := r.ReadMapStart() - if yyl1097 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1097, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1097 == codecSelferValueTypeArray1234 { - yyl1097 := r.ReadArrayStart() - if yyl1097 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1097, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -12714,12 +10131,12 @@ func (x *IngressList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1098Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1098Slc - var yyhl1098 bool = l >= 0 - for yyj1098 := 0; ; yyj1098++ { - if yyhl1098 { - if yyj1098 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -12728,51 +10145,63 @@ func (x *IngressList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1098Slc = r.DecodeBytes(yys1098Slc, true, true) - yys1098 := string(yys1098Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1098 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv1101 := &x.ListMeta - yym1102 := z.DecBinary() - _ = yym1102 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv1101) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv1101, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv1103 := &x.Items - yym1104 := z.DecBinary() - _ = yym1104 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceIngress((*[]Ingress)(yyv1103), d) + h.decSliceIngress((*[]Ingress)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys1098) - } // end switch yys1098 - } // end for yyj1098 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -12780,16 +10209,16 @@ func (x *IngressList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1105 int - var yyb1105 bool - var yyhl1105 bool = l >= 0 - yyj1105++ - if yyhl1105 { - yyb1105 = yyj1105 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1105 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1105 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12797,15 +10226,21 @@ func (x *IngressList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj1105++ - if yyhl1105 { - yyb1105 = yyj1105 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1105 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1105 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12813,38 +10248,44 @@ func (x *IngressList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj1105++ - if yyhl1105 { - yyb1105 = yyj1105 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1105 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1105 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv1108 := &x.ListMeta - yym1109 := z.DecBinary() - _ = yym1109 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv1108) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv1108, false) + z.DecFallback(yyv17, false) } } - yyj1105++ - if yyhl1105 { - yyb1105 = yyj1105 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1105 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1105 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -12852,26 +10293,26 @@ func (x *IngressList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Items = nil } else { - yyv1110 := &x.Items - yym1111 := z.DecBinary() - _ = yym1111 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceIngress((*[]Ingress)(yyv1110), d) + h.decSliceIngress((*[]Ingress)(yyv19), d) } } for { - yyj1105++ - if yyhl1105 { - yyb1105 = yyj1105 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1105 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1105 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1105-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -12883,35 +10324,35 @@ func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1112 := z.EncBinary() - _ = yym1112 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1113 := !z.EncBinary() - yy2arr1113 := z.EncBasicHandle().StructToArray - var yyq1113 [3]bool - _, _, _ = yysep1113, yyq1113, yy2arr1113 - const yyr1113 bool = false - yyq1113[0] = x.Backend != nil - yyq1113[1] = len(x.TLS) != 0 - yyq1113[2] = len(x.Rules) != 0 - var yynn1113 int - if yyr1113 || yy2arr1113 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Backend != nil + yyq2[1] = len(x.TLS) != 0 + yyq2[2] = len(x.Rules) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn1113 = 0 - for _, b := range yyq1113 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1113++ + yynn2++ } } - r.EncodeMapStart(yynn1113) - yynn1113 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1113 || yy2arr1113 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1113[0] { + if yyq2[0] { if x.Backend == nil { r.EncodeNil() } else { @@ -12921,7 +10362,7 @@ func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1113[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("backend")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -12932,14 +10373,14 @@ func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1113 || yy2arr1113 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1113[1] { + if yyq2[1] { if x.TLS == nil { r.EncodeNil() } else { - yym1116 := z.EncBinary() - _ = yym1116 + yym7 := z.EncBinary() + _ = yym7 if false { } else { h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e) @@ -12949,15 +10390,15 @@ func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1113[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("tls")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.TLS == nil { r.EncodeNil() } else { - yym1117 := z.EncBinary() - _ = yym1117 + yym8 := z.EncBinary() + _ = yym8 if false { } else { h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e) @@ -12965,14 +10406,14 @@ func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1113 || yy2arr1113 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1113[2] { + if yyq2[2] { if x.Rules == nil { r.EncodeNil() } else { - yym1119 := z.EncBinary() - _ = yym1119 + yym10 := z.EncBinary() + _ = yym10 if false { } else { h.encSliceIngressRule(([]IngressRule)(x.Rules), e) @@ -12982,15 +10423,15 @@ func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1113[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("rules")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Rules == nil { r.EncodeNil() } else { - yym1120 := z.EncBinary() - _ = yym1120 + yym11 := z.EncBinary() + _ = yym11 if false { } else { h.encSliceIngressRule(([]IngressRule)(x.Rules), e) @@ -12998,7 +10439,7 @@ func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1113 || yy2arr1113 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -13011,25 +10452,25 @@ func (x *IngressSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1121 := z.DecBinary() - _ = yym1121 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1122 := r.ContainerType() - if yyct1122 == codecSelferValueTypeMap1234 { - yyl1122 := r.ReadMapStart() - if yyl1122 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1122, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1122 == codecSelferValueTypeArray1234 { - yyl1122 := r.ReadArrayStart() - if yyl1122 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1122, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -13041,12 +10482,12 @@ func (x *IngressSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1123Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1123Slc - var yyhl1123 bool = l >= 0 - for yyj1123 := 0; ; yyj1123++ { - if yyhl1123 { - if yyj1123 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -13055,10 +10496,10 @@ func (x *IngressSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1123Slc = r.DecodeBytes(yys1123Slc, true, true) - yys1123 := string(yys1123Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1123 { + switch yys3 { case "backend": if r.TryDecodeAsNil() { if x.Backend != nil { @@ -13074,30 +10515,30 @@ func (x *IngressSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.TLS = nil } else { - yyv1125 := &x.TLS - yym1126 := z.DecBinary() - _ = yym1126 + yyv5 := &x.TLS + yym6 := z.DecBinary() + _ = yym6 if false { } else { - h.decSliceIngressTLS((*[]IngressTLS)(yyv1125), d) + h.decSliceIngressTLS((*[]IngressTLS)(yyv5), d) } } case "rules": if r.TryDecodeAsNil() { x.Rules = nil } else { - yyv1127 := &x.Rules - yym1128 := z.DecBinary() - _ = yym1128 + yyv7 := &x.Rules + yym8 := z.DecBinary() + _ = yym8 if false { } else { - h.decSliceIngressRule((*[]IngressRule)(yyv1127), d) + h.decSliceIngressRule((*[]IngressRule)(yyv7), d) } } default: - z.DecStructFieldNotFound(-1, yys1123) - } // end switch yys1123 - } // end for yyj1123 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -13105,16 +10546,16 @@ func (x *IngressSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1129 int - var yyb1129 bool - var yyhl1129 bool = l >= 0 - yyj1129++ - if yyhl1129 { - yyb1129 = yyj1129 > l + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb1129 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb1129 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13129,13 +10570,13 @@ func (x *IngressSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Backend.CodecDecodeSelf(d) } - yyj1129++ - if yyhl1129 { - yyb1129 = yyj1129 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb1129 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb1129 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13143,21 +10584,21 @@ func (x *IngressSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.TLS = nil } else { - yyv1131 := &x.TLS - yym1132 := z.DecBinary() - _ = yym1132 + yyv11 := &x.TLS + yym12 := z.DecBinary() + _ = yym12 if false { } else { - h.decSliceIngressTLS((*[]IngressTLS)(yyv1131), d) + h.decSliceIngressTLS((*[]IngressTLS)(yyv11), d) } } - yyj1129++ - if yyhl1129 { - yyb1129 = yyj1129 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb1129 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb1129 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13165,26 +10606,26 @@ func (x *IngressSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Rules = nil } else { - yyv1133 := &x.Rules - yym1134 := z.DecBinary() - _ = yym1134 + yyv13 := &x.Rules + yym14 := z.DecBinary() + _ = yym14 if false { } else { - h.decSliceIngressRule((*[]IngressRule)(yyv1133), d) + h.decSliceIngressRule((*[]IngressRule)(yyv13), d) } } for { - yyj1129++ - if yyhl1129 { - yyb1129 = yyj1129 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb1129 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb1129 { + if yyb9 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1129-1, "") + z.DecStructFieldNotFound(yyj9-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -13196,39 +10637,39 @@ func (x *IngressTLS) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1135 := z.EncBinary() - _ = yym1135 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1136 := !z.EncBinary() - yy2arr1136 := z.EncBasicHandle().StructToArray - var yyq1136 [2]bool - _, _, _ = yysep1136, yyq1136, yy2arr1136 - const yyr1136 bool = false - yyq1136[0] = len(x.Hosts) != 0 - yyq1136[1] = x.SecretName != "" - var yynn1136 int - if yyr1136 || yy2arr1136 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Hosts) != 0 + yyq2[1] = x.SecretName != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1136 = 0 - for _, b := range yyq1136 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1136++ + yynn2++ } } - r.EncodeMapStart(yynn1136) - yynn1136 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1136 || yy2arr1136 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1136[0] { + if yyq2[0] { if x.Hosts == nil { r.EncodeNil() } else { - yym1138 := z.EncBinary() - _ = yym1138 + yym4 := z.EncBinary() + _ = yym4 if false { } else { z.F.EncSliceStringV(x.Hosts, false, e) @@ -13238,15 +10679,15 @@ func (x *IngressTLS) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1136[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hosts")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Hosts == nil { r.EncodeNil() } else { - yym1139 := z.EncBinary() - _ = yym1139 + yym5 := z.EncBinary() + _ = yym5 if false { } else { z.F.EncSliceStringV(x.Hosts, false, e) @@ -13254,11 +10695,11 @@ func (x *IngressTLS) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1136 || yy2arr1136 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1136[1] { - yym1141 := z.EncBinary() - _ = yym1141 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) @@ -13267,19 +10708,19 @@ func (x *IngressTLS) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1136[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("secretName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1142 := z.EncBinary() - _ = yym1142 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) } } } - if yyr1136 || yy2arr1136 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -13292,25 +10733,25 @@ func (x *IngressTLS) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1143 := z.DecBinary() - _ = yym1143 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1144 := r.ContainerType() - if yyct1144 == codecSelferValueTypeMap1234 { - yyl1144 := r.ReadMapStart() - if yyl1144 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1144, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1144 == codecSelferValueTypeArray1234 { - yyl1144 := r.ReadArrayStart() - if yyl1144 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1144, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -13322,12 +10763,12 @@ func (x *IngressTLS) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1145Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1145Slc - var yyhl1145 bool = l >= 0 - for yyj1145 := 0; ; yyj1145++ { - if yyhl1145 { - if yyj1145 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -13336,32 +10777,38 @@ func (x *IngressTLS) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1145Slc = r.DecodeBytes(yys1145Slc, true, true) - yys1145 := string(yys1145Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1145 { + switch yys3 { case "hosts": if r.TryDecodeAsNil() { x.Hosts = nil } else { - yyv1146 := &x.Hosts - yym1147 := z.DecBinary() - _ = yym1147 + yyv4 := &x.Hosts + yym5 := z.DecBinary() + _ = yym5 if false { } else { - z.F.DecSliceStringX(yyv1146, false, d) + z.F.DecSliceStringX(yyv4, false, d) } } case "secretName": if r.TryDecodeAsNil() { x.SecretName = "" } else { - x.SecretName = string(r.DecodeString()) + yyv6 := &x.SecretName + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys1145) - } // end switch yys1145 - } // end for yyj1145 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -13369,16 +10816,16 @@ func (x *IngressTLS) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1149 int - var yyb1149 bool - var yyhl1149 bool = l >= 0 - yyj1149++ - if yyhl1149 { - yyb1149 = yyj1149 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1149 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1149 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13386,21 +10833,21 @@ func (x *IngressTLS) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Hosts = nil } else { - yyv1150 := &x.Hosts - yym1151 := z.DecBinary() - _ = yym1151 + yyv9 := &x.Hosts + yym10 := z.DecBinary() + _ = yym10 if false { } else { - z.F.DecSliceStringX(yyv1150, false, d) + z.F.DecSliceStringX(yyv9, false, d) } } - yyj1149++ - if yyhl1149 { - yyb1149 = yyj1149 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1149 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1149 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13408,20 +10855,26 @@ func (x *IngressTLS) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.SecretName = "" } else { - x.SecretName = string(r.DecodeString()) + yyv11 := &x.SecretName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } for { - yyj1149++ - if yyhl1149 { - yyb1149 = yyj1149 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1149 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1149 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1149-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -13433,48 +10886,48 @@ func (x *IngressStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1153 := z.EncBinary() - _ = yym1153 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1154 := !z.EncBinary() - yy2arr1154 := z.EncBasicHandle().StructToArray - var yyq1154 [1]bool - _, _, _ = yysep1154, yyq1154, yy2arr1154 - const yyr1154 bool = false - yyq1154[0] = true - var yynn1154 int - if yyr1154 || yy2arr1154 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn1154 = 0 - for _, b := range yyq1154 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1154++ + yynn2++ } } - r.EncodeMapStart(yynn1154) - yynn1154 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1154 || yy2arr1154 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1154[0] { - yy1156 := &x.LoadBalancer - yy1156.CodecEncodeSelf(e) + if yyq2[0] { + yy4 := &x.LoadBalancer + yy4.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq1154[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("loadBalancer")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1157 := &x.LoadBalancer - yy1157.CodecEncodeSelf(e) + yy6 := &x.LoadBalancer + yy6.CodecEncodeSelf(e) } } - if yyr1154 || yy2arr1154 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -13487,25 +10940,25 @@ func (x *IngressStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1158 := z.DecBinary() - _ = yym1158 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1159 := r.ContainerType() - if yyct1159 == codecSelferValueTypeMap1234 { - yyl1159 := r.ReadMapStart() - if yyl1159 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1159, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1159 == codecSelferValueTypeArray1234 { - yyl1159 := r.ReadArrayStart() - if yyl1159 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1159, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -13517,12 +10970,12 @@ func (x *IngressStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1160Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1160Slc - var yyhl1160 bool = l >= 0 - for yyj1160 := 0; ; yyj1160++ { - if yyhl1160 { - if yyj1160 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -13531,21 +10984,21 @@ func (x *IngressStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1160Slc = r.DecodeBytes(yys1160Slc, true, true) - yys1160 := string(yys1160Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1160 { + switch yys3 { case "loadBalancer": if r.TryDecodeAsNil() { - x.LoadBalancer = pkg2_v1.LoadBalancerStatus{} + x.LoadBalancer = pkg4_v1.LoadBalancerStatus{} } else { - yyv1161 := &x.LoadBalancer - yyv1161.CodecDecodeSelf(d) + yyv4 := &x.LoadBalancer + yyv4.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1160) - } // end switch yys1160 - } // end for yyj1160 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -13553,38 +11006,38 @@ func (x *IngressStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1162 int - var yyb1162 bool - var yyhl1162 bool = l >= 0 - yyj1162++ - if yyhl1162 { - yyb1162 = yyj1162 > l + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb1162 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb1162 { + if yyb5 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.LoadBalancer = pkg2_v1.LoadBalancerStatus{} + x.LoadBalancer = pkg4_v1.LoadBalancerStatus{} } else { - yyv1163 := &x.LoadBalancer - yyv1163.CodecDecodeSelf(d) + yyv6 := &x.LoadBalancer + yyv6.CodecDecodeSelf(d) } for { - yyj1162++ - if yyhl1162 { - yyb1162 = yyj1162 > l + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb1162 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb1162 { + if yyb5 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1162-1, "") + z.DecStructFieldNotFound(yyj5-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -13596,36 +11049,36 @@ func (x *IngressRule) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1164 := z.EncBinary() - _ = yym1164 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1165 := !z.EncBinary() - yy2arr1165 := z.EncBasicHandle().StructToArray - var yyq1165 [2]bool - _, _, _ = yysep1165, yyq1165, yy2arr1165 - const yyr1165 bool = false - yyq1165[0] = x.Host != "" - yyq1165[1] = x.IngressRuleValue.HTTP != nil && x.HTTP != nil - var yynn1165 int - if yyr1165 || yy2arr1165 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Host != "" + yyq2[1] = x.IngressRuleValue.HTTP != nil && x.HTTP != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1165 = 0 - for _, b := range yyq1165 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1165++ + yynn2++ } } - r.EncodeMapStart(yynn1165) - yynn1165 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1165 || yy2arr1165 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1165[0] { - yym1167 := z.EncBinary() - _ = yym1167 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Host)) @@ -13634,30 +11087,30 @@ func (x *IngressRule) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1165[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("host")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1168 := z.EncBinary() - _ = yym1168 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Host)) } } } - var yyn1169 bool + var yyn6 bool if x.IngressRuleValue.HTTP == nil { - yyn1169 = true - goto LABEL1169 + yyn6 = true + goto LABEL6 } - LABEL1169: - if yyr1165 || yy2arr1165 { - if yyn1169 { + LABEL6: + if yyr2 || yy2arr2 { + if yyn6 { r.EncodeNil() } else { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1165[1] { + if yyq2[1] { if x.HTTP == nil { r.EncodeNil() } else { @@ -13668,11 +11121,11 @@ func (x *IngressRule) CodecEncodeSelf(e *codec1978.Encoder) { } } } else { - if yyq1165[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("http")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn1169 { + if yyn6 { r.EncodeNil() } else { if x.HTTP == nil { @@ -13683,7 +11136,7 @@ func (x *IngressRule) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1165 || yy2arr1165 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -13696,25 +11149,25 @@ func (x *IngressRule) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1170 := z.DecBinary() - _ = yym1170 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1171 := r.ContainerType() - if yyct1171 == codecSelferValueTypeMap1234 { - yyl1171 := r.ReadMapStart() - if yyl1171 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1171, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1171 == codecSelferValueTypeArray1234 { - yyl1171 := r.ReadArrayStart() - if yyl1171 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1171, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -13726,12 +11179,12 @@ func (x *IngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1172Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1172Slc - var yyhl1172 bool = l >= 0 - for yyj1172 := 0; ; yyj1172++ { - if yyhl1172 { - if yyj1172 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -13740,15 +11193,21 @@ func (x *IngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1172Slc = r.DecodeBytes(yys1172Slc, true, true) - yys1172 := string(yys1172Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1172 { + switch yys3 { case "host": if r.TryDecodeAsNil() { x.Host = "" } else { - x.Host = string(r.DecodeString()) + yyv4 := &x.Host + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "http": if x.IngressRuleValue.HTTP == nil { @@ -13765,9 +11224,9 @@ func (x *IngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { x.HTTP.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1172) - } // end switch yys1172 - } // end for yyj1172 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -13775,16 +11234,16 @@ func (x *IngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1175 int - var yyb1175 bool - var yyhl1175 bool = l >= 0 - yyj1175++ - if yyhl1175 { - yyb1175 = yyj1175 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1175 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1175 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13792,18 +11251,24 @@ func (x *IngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Host = "" } else { - x.Host = string(r.DecodeString()) + yyv8 := &x.Host + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } if x.IngressRuleValue.HTTP == nil { x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue) } - yyj1175++ - if yyhl1175 { - yyb1175 = yyj1175 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1175 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1175 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13819,17 +11284,17 @@ func (x *IngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { x.HTTP.CodecDecodeSelf(d) } for { - yyj1175++ - if yyhl1175 { - yyb1175 = yyj1175 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1175 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1175 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1175-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -13841,33 +11306,33 @@ func (x *IngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1178 := z.EncBinary() - _ = yym1178 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1179 := !z.EncBinary() - yy2arr1179 := z.EncBasicHandle().StructToArray - var yyq1179 [1]bool - _, _, _ = yysep1179, yyq1179, yy2arr1179 - const yyr1179 bool = false - yyq1179[0] = x.HTTP != nil - var yynn1179 int - if yyr1179 || yy2arr1179 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.HTTP != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn1179 = 0 - for _, b := range yyq1179 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1179++ + yynn2++ } } - r.EncodeMapStart(yynn1179) - yynn1179 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1179 || yy2arr1179 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1179[0] { + if yyq2[0] { if x.HTTP == nil { r.EncodeNil() } else { @@ -13877,7 +11342,7 @@ func (x *IngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1179[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("http")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -13888,7 +11353,7 @@ func (x *IngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1179 || yy2arr1179 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -13901,25 +11366,25 @@ func (x *IngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1181 := z.DecBinary() - _ = yym1181 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1182 := r.ContainerType() - if yyct1182 == codecSelferValueTypeMap1234 { - yyl1182 := r.ReadMapStart() - if yyl1182 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1182, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1182 == codecSelferValueTypeArray1234 { - yyl1182 := r.ReadArrayStart() - if yyl1182 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1182, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -13931,12 +11396,12 @@ func (x *IngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1183Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1183Slc - var yyhl1183 bool = l >= 0 - for yyj1183 := 0; ; yyj1183++ { - if yyhl1183 { - if yyj1183 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -13945,10 +11410,10 @@ func (x *IngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1183Slc = r.DecodeBytes(yys1183Slc, true, true) - yys1183 := string(yys1183Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1183 { + switch yys3 { case "http": if r.TryDecodeAsNil() { if x.HTTP != nil { @@ -13961,9 +11426,9 @@ func (x *IngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { x.HTTP.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1183) - } // end switch yys1183 - } // end for yyj1183 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -13971,16 +11436,16 @@ func (x *IngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1185 int - var yyb1185 bool - var yyhl1185 bool = l >= 0 - yyj1185++ - if yyhl1185 { - yyb1185 = yyj1185 > l + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb1185 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb1185 { + if yyb5 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13996,17 +11461,17 @@ func (x *IngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) x.HTTP.CodecDecodeSelf(d) } for { - yyj1185++ - if yyhl1185 { - yyb1185 = yyj1185 > l + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb1185 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb1185 { + if yyb5 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1185-1, "") + z.DecStructFieldNotFound(yyj5-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -14018,36 +11483,36 @@ func (x *HTTPIngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1187 := z.EncBinary() - _ = yym1187 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1188 := !z.EncBinary() - yy2arr1188 := z.EncBasicHandle().StructToArray - var yyq1188 [1]bool - _, _, _ = yysep1188, yyq1188, yy2arr1188 - const yyr1188 bool = false - var yynn1188 int - if yyr1188 || yy2arr1188 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(1) } else { - yynn1188 = 1 - for _, b := range yyq1188 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1188++ + yynn2++ } } - r.EncodeMapStart(yynn1188) - yynn1188 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1188 || yy2arr1188 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Paths == nil { r.EncodeNil() } else { - yym1190 := z.EncBinary() - _ = yym1190 + yym4 := z.EncBinary() + _ = yym4 if false { } else { h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e) @@ -14060,15 +11525,15 @@ func (x *HTTPIngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { if x.Paths == nil { r.EncodeNil() } else { - yym1191 := z.EncBinary() - _ = yym1191 + yym5 := z.EncBinary() + _ = yym5 if false { } else { h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e) } } } - if yyr1188 || yy2arr1188 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -14081,25 +11546,25 @@ func (x *HTTPIngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1192 := z.DecBinary() - _ = yym1192 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1193 := r.ContainerType() - if yyct1193 == codecSelferValueTypeMap1234 { - yyl1193 := r.ReadMapStart() - if yyl1193 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1193, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1193 == codecSelferValueTypeArray1234 { - yyl1193 := r.ReadArrayStart() - if yyl1193 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1193, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -14111,12 +11576,12 @@ func (x *HTTPIngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1194Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1194Slc - var yyhl1194 bool = l >= 0 - for yyj1194 := 0; ; yyj1194++ { - if yyhl1194 { - if yyj1194 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -14125,26 +11590,26 @@ func (x *HTTPIngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decode } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1194Slc = r.DecodeBytes(yys1194Slc, true, true) - yys1194 := string(yys1194Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1194 { + switch yys3 { case "paths": if r.TryDecodeAsNil() { x.Paths = nil } else { - yyv1195 := &x.Paths - yym1196 := z.DecBinary() - _ = yym1196 + yyv4 := &x.Paths + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv1195), d) + h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv4), d) } } default: - z.DecStructFieldNotFound(-1, yys1194) - } // end switch yys1194 - } // end for yyj1194 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -14152,16 +11617,16 @@ func (x *HTTPIngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Deco var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1197 int - var yyb1197 bool - var yyhl1197 bool = l >= 0 - yyj1197++ - if yyhl1197 { - yyb1197 = yyj1197 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1197 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1197 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14169,26 +11634,26 @@ func (x *HTTPIngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.Paths = nil } else { - yyv1198 := &x.Paths - yym1199 := z.DecBinary() - _ = yym1199 + yyv7 := &x.Paths + yym8 := z.DecBinary() + _ = yym8 if false { } else { - h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv1198), d) + h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv7), d) } } for { - yyj1197++ - if yyhl1197 { - yyb1197 = yyj1197 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1197 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1197 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1197-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -14200,35 +11665,35 @@ func (x *HTTPIngressPath) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1200 := z.EncBinary() - _ = yym1200 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1201 := !z.EncBinary() - yy2arr1201 := z.EncBasicHandle().StructToArray - var yyq1201 [2]bool - _, _, _ = yysep1201, yyq1201, yy2arr1201 - const yyr1201 bool = false - yyq1201[0] = x.Path != "" - var yynn1201 int - if yyr1201 || yy2arr1201 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Path != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1201 = 1 - for _, b := range yyq1201 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1201++ + yynn2++ } } - r.EncodeMapStart(yynn1201) - yynn1201 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1201 || yy2arr1201 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1201[0] { - yym1203 := z.EncBinary() - _ = yym1203 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) @@ -14237,30 +11702,30 @@ func (x *HTTPIngressPath) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1201[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("path")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1204 := z.EncBinary() - _ = yym1204 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Path)) } } } - if yyr1201 || yy2arr1201 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1206 := &x.Backend - yy1206.CodecEncodeSelf(e) + yy7 := &x.Backend + yy7.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("backend")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1207 := &x.Backend - yy1207.CodecEncodeSelf(e) + yy9 := &x.Backend + yy9.CodecEncodeSelf(e) } - if yyr1201 || yy2arr1201 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -14273,25 +11738,25 @@ func (x *HTTPIngressPath) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1208 := z.DecBinary() - _ = yym1208 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1209 := r.ContainerType() - if yyct1209 == codecSelferValueTypeMap1234 { - yyl1209 := r.ReadMapStart() - if yyl1209 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1209, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1209 == codecSelferValueTypeArray1234 { - yyl1209 := r.ReadArrayStart() - if yyl1209 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1209, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -14303,12 +11768,12 @@ func (x *HTTPIngressPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1210Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1210Slc - var yyhl1210 bool = l >= 0 - for yyj1210 := 0; ; yyj1210++ { - if yyhl1210 { - if yyj1210 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -14317,27 +11782,33 @@ func (x *HTTPIngressPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1210Slc = r.DecodeBytes(yys1210Slc, true, true) - yys1210 := string(yys1210Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1210 { + switch yys3 { case "path": if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "backend": if r.TryDecodeAsNil() { x.Backend = IngressBackend{} } else { - yyv1212 := &x.Backend - yyv1212.CodecDecodeSelf(d) + yyv6 := &x.Backend + yyv6.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1210) - } // end switch yys1210 - } // end for yyj1210 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -14345,16 +11816,16 @@ func (x *HTTPIngressPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1213 int - var yyb1213 bool - var yyhl1213 bool = l >= 0 - yyj1213++ - if yyhl1213 { - yyb1213 = yyj1213 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1213 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1213 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14362,15 +11833,21 @@ func (x *HTTPIngressPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Path = "" } else { - x.Path = string(r.DecodeString()) + yyv8 := &x.Path + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } - yyj1213++ - if yyhl1213 { - yyb1213 = yyj1213 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1213 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1213 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14378,21 +11855,21 @@ func (x *HTTPIngressPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Backend = IngressBackend{} } else { - yyv1215 := &x.Backend - yyv1215.CodecDecodeSelf(d) + yyv10 := &x.Backend + yyv10.CodecDecodeSelf(d) } for { - yyj1213++ - if yyhl1213 { - yyb1213 = yyj1213 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1213 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1213 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1213-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -14404,33 +11881,33 @@ func (x *IngressBackend) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1216 := z.EncBinary() - _ = yym1216 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1217 := !z.EncBinary() - yy2arr1217 := z.EncBasicHandle().StructToArray - var yyq1217 [2]bool - _, _, _ = yysep1217, yyq1217, yy2arr1217 - const yyr1217 bool = false - var yynn1217 int - if yyr1217 || yy2arr1217 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1217 = 2 - for _, b := range yyq1217 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn1217++ + yynn2++ } } - r.EncodeMapStart(yynn1217) - yynn1217 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1217 || yy2arr1217 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1219 := z.EncBinary() - _ = yym1219 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) @@ -14439,41 +11916,41 @@ func (x *IngressBackend) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("serviceName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1220 := z.EncBinary() - _ = yym1220 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) } } - if yyr1217 || yy2arr1217 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1222 := &x.ServicePort - yym1223 := z.EncBinary() - _ = yym1223 + yy7 := &x.ServicePort + yym8 := z.EncBinary() + _ = yym8 if false { - } else if z.HasExtensions() && z.EncExt(yy1222) { - } else if !yym1223 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1222) + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) } else { - z.EncFallback(yy1222) + z.EncFallback(yy7) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("servicePort")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1224 := &x.ServicePort - yym1225 := z.EncBinary() - _ = yym1225 + yy9 := &x.ServicePort + yym10 := z.EncBinary() + _ = yym10 if false { - } else if z.HasExtensions() && z.EncExt(yy1224) { - } else if !yym1225 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1224) + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) } else { - z.EncFallback(yy1224) + z.EncFallback(yy9) } } - if yyr1217 || yy2arr1217 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -14486,25 +11963,25 @@ func (x *IngressBackend) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1226 := z.DecBinary() - _ = yym1226 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1227 := r.ContainerType() - if yyct1227 == codecSelferValueTypeMap1234 { - yyl1227 := r.ReadMapStart() - if yyl1227 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1227, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1227 == codecSelferValueTypeArray1234 { - yyl1227 := r.ReadArrayStart() - if yyl1227 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1227, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -14516,12 +11993,12 @@ func (x *IngressBackend) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1228Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1228Slc - var yyhl1228 bool = l >= 0 - for yyj1228 := 0; ; yyj1228++ { - if yyhl1228 { - if yyj1228 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -14530,35 +12007,41 @@ func (x *IngressBackend) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1228Slc = r.DecodeBytes(yys1228Slc, true, true) - yys1228 := string(yys1228Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1228 { + switch yys3 { case "serviceName": if r.TryDecodeAsNil() { x.ServiceName = "" } else { - x.ServiceName = string(r.DecodeString()) + yyv4 := &x.ServiceName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "servicePort": if r.TryDecodeAsNil() { x.ServicePort = pkg5_intstr.IntOrString{} } else { - yyv1230 := &x.ServicePort - yym1231 := z.DecBinary() - _ = yym1231 + yyv6 := &x.ServicePort + yym7 := z.DecBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.DecExt(yyv1230) { - } else if !yym1231 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1230) + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) } else { - z.DecFallback(yyv1230, false) + z.DecFallback(yyv6, false) } } default: - z.DecStructFieldNotFound(-1, yys1228) - } // end switch yys1228 - } // end for yyj1228 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -14566,16 +12049,16 @@ func (x *IngressBackend) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1232 int - var yyb1232 bool - var yyhl1232 bool = l >= 0 - yyj1232++ - if yyhl1232 { - yyb1232 = yyj1232 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1232 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1232 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14583,15 +12066,21 @@ func (x *IngressBackend) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ServiceName = "" } else { - x.ServiceName = string(r.DecodeString()) + yyv9 := &x.ServiceName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } } - yyj1232++ - if yyhl1232 { - yyb1232 = yyj1232 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1232 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1232 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14599,328 +12088,29 @@ func (x *IngressBackend) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ServicePort = pkg5_intstr.IntOrString{} } else { - yyv1234 := &x.ServicePort - yym1235 := z.DecBinary() - _ = yym1235 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1234) { - } else if !yym1235 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1234) - } else { - z.DecFallback(yyv1234, false) - } - } - for { - yyj1232++ - if yyhl1232 { - yyb1232 = yyj1232 > l - } else { - yyb1232 = r.CheckBreak() - } - if yyb1232 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1232-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ExportOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1236 := z.EncBinary() - _ = yym1236 + yyv11 := &x.ServicePort + yym12 := z.DecBinary() + _ = yym12 if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1237 := !z.EncBinary() - yy2arr1237 := z.EncBasicHandle().StructToArray - var yyq1237 [4]bool - _, _, _ = yysep1237, yyq1237, yy2arr1237 - const yyr1237 bool = false - yyq1237[0] = x.Kind != "" - yyq1237[1] = x.APIVersion != "" - var yynn1237 int - if yyr1237 || yy2arr1237 { - r.EncodeArrayStart(4) - } else { - yynn1237 = 2 - for _, b := range yyq1237 { - if b { - yynn1237++ - } - } - r.EncodeMapStart(yynn1237) - yynn1237 = 0 - } - if yyr1237 || yy2arr1237 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1237[0] { - yym1239 := z.EncBinary() - _ = yym1239 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1237[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1240 := z.EncBinary() - _ = yym1240 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr1237 || yy2arr1237 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1237[1] { - yym1242 := z.EncBinary() - _ = yym1242 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1237[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1243 := z.EncBinary() - _ = yym1243 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr1237 || yy2arr1237 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1245 := z.EncBinary() - _ = yym1245 - if false { - } else { - r.EncodeBool(bool(x.Export)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("export")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1246 := z.EncBinary() - _ = yym1246 - if false { - } else { - r.EncodeBool(bool(x.Export)) - } - } - if yyr1237 || yy2arr1237 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1248 := z.EncBinary() - _ = yym1248 - if false { - } else { - r.EncodeBool(bool(x.Exact)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exact")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1249 := z.EncBinary() - _ = yym1249 - if false { - } else { - r.EncodeBool(bool(x.Exact)) - } - } - if yyr1237 || yy2arr1237 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ExportOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1250 := z.DecBinary() - _ = yym1250 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1251 := r.ContainerType() - if yyct1251 == codecSelferValueTypeMap1234 { - yyl1251 := r.ReadMapStart() - if yyl1251 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1251, d) - } - } else if yyct1251 == codecSelferValueTypeArray1234 { - yyl1251 := r.ReadArrayStart() - if yyl1251 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1251, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ExportOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1252Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1252Slc - var yyhl1252 bool = l >= 0 - for yyj1252 := 0; ; yyj1252++ { - if yyhl1252 { - if yyj1252 >= l { - break - } + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) } else { - if r.CheckBreak() { - break - } + z.DecFallback(yyv11, false) } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1252Slc = r.DecodeBytes(yys1252Slc, true, true) - yys1252 := string(yys1252Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1252 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "export": - if r.TryDecodeAsNil() { - x.Export = false - } else { - x.Export = bool(r.DecodeBool()) - } - case "exact": - if r.TryDecodeAsNil() { - x.Exact = false - } else { - x.Exact = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys1252) - } // end switch yys1252 - } // end for yyj1252 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ExportOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1257 int - var yyb1257 bool - var yyhl1257 bool = l >= 0 - yyj1257++ - if yyhl1257 { - yyb1257 = yyj1257 > l - } else { - yyb1257 = r.CheckBreak() - } - if yyb1257 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj1257++ - if yyhl1257 { - yyb1257 = yyj1257 > l - } else { - yyb1257 = r.CheckBreak() - } - if yyb1257 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj1257++ - if yyhl1257 { - yyb1257 = yyj1257 > l - } else { - yyb1257 = r.CheckBreak() - } - if yyb1257 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Export = false - } else { - x.Export = bool(r.DecodeBool()) - } - yyj1257++ - if yyhl1257 { - yyb1257 = yyj1257 > l - } else { - yyb1257 = r.CheckBreak() - } - if yyb1257 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Exact = false - } else { - x.Exact = bool(r.DecodeBool()) } for { - yyj1257++ - if yyhl1257 { - yyb1257 = yyj1257 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1257 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1257 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1257-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -14932,39 +12122,39 @@ func (x *ReplicaSet) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1262 := z.EncBinary() - _ = yym1262 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1263 := !z.EncBinary() - yy2arr1263 := z.EncBasicHandle().StructToArray - var yyq1263 [5]bool - _, _, _ = yysep1263, yyq1263, yy2arr1263 - const yyr1263 bool = false - yyq1263[0] = x.Kind != "" - yyq1263[1] = x.APIVersion != "" - yyq1263[2] = true - yyq1263[3] = true - yyq1263[4] = true - var yynn1263 int - if yyr1263 || yy2arr1263 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn1263 = 0 - for _, b := range yyq1263 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1263++ + yynn2++ } } - r.EncodeMapStart(yynn1263) - yynn1263 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1263 || yy2arr1263 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1263[0] { - yym1265 := z.EncBinary() - _ = yym1265 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -14973,23 +12163,23 @@ func (x *ReplicaSet) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1263[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1266 := z.EncBinary() - _ = yym1266 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr1263 || yy2arr1263 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1263[1] { - yym1268 := z.EncBinary() - _ = yym1268 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -14998,70 +12188,82 @@ func (x *ReplicaSet) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1263[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1269 := z.EncBinary() - _ = yym1269 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr1263 || yy2arr1263 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1263[2] { - yy1271 := &x.ObjectMeta - yy1271.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq1263[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1272 := &x.ObjectMeta - yy1272.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr1263 || yy2arr1263 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1263[3] { - yy1274 := &x.Spec - yy1274.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq1263[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1275 := &x.Spec - yy1275.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr1263 || yy2arr1263 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1263[4] { - yy1277 := &x.Status - yy1277.CodecEncodeSelf(e) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq1263[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1278 := &x.Status - yy1278.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } - if yyr1263 || yy2arr1263 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -15074,25 +12276,25 @@ func (x *ReplicaSet) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1279 := z.DecBinary() - _ = yym1279 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1280 := r.ContainerType() - if yyct1280 == codecSelferValueTypeMap1234 { - yyl1280 := r.ReadMapStart() - if yyl1280 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1280, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1280 == codecSelferValueTypeArray1234 { - yyl1280 := r.ReadArrayStart() - if yyl1280 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1280, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -15104,12 +12306,12 @@ func (x *ReplicaSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1281Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1281Slc - var yyhl1281 bool = l >= 0 - for yyj1281 := 0; ; yyj1281++ { - if yyhl1281 { - if yyj1281 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -15118,47 +12320,65 @@ func (x *ReplicaSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1281Slc = r.DecodeBytes(yys1281Slc, true, true) - yys1281 := string(yys1281Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1281 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv1284 := &x.ObjectMeta - yyv1284.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = ReplicaSetSpec{} } else { - yyv1285 := &x.Spec - yyv1285.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = ReplicaSetStatus{} } else { - yyv1286 := &x.Status - yyv1286.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1281) - } // end switch yys1281 - } // end for yyj1281 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -15166,16 +12386,16 @@ func (x *ReplicaSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1287 int - var yyb1287 bool - var yyhl1287 bool = l >= 0 - yyj1287++ - if yyhl1287 { - yyb1287 = yyj1287 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1287 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1287 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15183,15 +12403,21 @@ func (x *ReplicaSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj1287++ - if yyhl1287 { - yyb1287 = yyj1287 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1287 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1287 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15199,32 +12425,44 @@ func (x *ReplicaSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj1287++ - if yyhl1287 { - yyb1287 = yyj1287 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1287 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1287 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv1290 := &x.ObjectMeta - yyv1290.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj1287++ - if yyhl1287 { - yyb1287 = yyj1287 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1287 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1287 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15232,16 +12470,16 @@ func (x *ReplicaSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Spec = ReplicaSetSpec{} } else { - yyv1291 := &x.Spec - yyv1291.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj1287++ - if yyhl1287 { - yyb1287 = yyj1287 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1287 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1287 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15249,21 +12487,21 @@ func (x *ReplicaSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Status = ReplicaSetStatus{} } else { - yyv1292 := &x.Status - yyv1292.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj1287++ - if yyhl1287 { - yyb1287 = yyj1287 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1287 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1287 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1287-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -15275,37 +12513,37 @@ func (x *ReplicaSetList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1293 := z.EncBinary() - _ = yym1293 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1294 := !z.EncBinary() - yy2arr1294 := z.EncBasicHandle().StructToArray - var yyq1294 [4]bool - _, _, _ = yysep1294, yyq1294, yy2arr1294 - const yyr1294 bool = false - yyq1294[0] = x.Kind != "" - yyq1294[1] = x.APIVersion != "" - yyq1294[2] = true - var yynn1294 int - if yyr1294 || yy2arr1294 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn1294 = 1 - for _, b := range yyq1294 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1294++ + yynn2++ } } - r.EncodeMapStart(yynn1294) - yynn1294 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1294 || yy2arr1294 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1294[0] { - yym1296 := z.EncBinary() - _ = yym1296 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -15314,23 +12552,23 @@ func (x *ReplicaSetList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1294[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1297 := z.EncBinary() - _ = yym1297 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr1294 || yy2arr1294 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1294[1] { - yym1299 := z.EncBinary() - _ = yym1299 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -15339,54 +12577,54 @@ func (x *ReplicaSetList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1294[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1300 := z.EncBinary() - _ = yym1300 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr1294 || yy2arr1294 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1294[2] { - yy1302 := &x.ListMeta - yym1303 := z.EncBinary() - _ = yym1303 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy1302) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy1302) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq1294[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1304 := &x.ListMeta - yym1305 := z.EncBinary() - _ = yym1305 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy1304) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy1304) + z.EncFallback(yy12) } } } - if yyr1294 || yy2arr1294 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym1307 := z.EncBinary() - _ = yym1307 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e) @@ -15399,15 +12637,15 @@ func (x *ReplicaSetList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym1308 := z.EncBinary() - _ = yym1308 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e) } } } - if yyr1294 || yy2arr1294 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -15420,25 +12658,25 @@ func (x *ReplicaSetList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1309 := z.DecBinary() - _ = yym1309 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1310 := r.ContainerType() - if yyct1310 == codecSelferValueTypeMap1234 { - yyl1310 := r.ReadMapStart() - if yyl1310 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1310, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1310 == codecSelferValueTypeArray1234 { - yyl1310 := r.ReadArrayStart() - if yyl1310 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1310, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -15450,12 +12688,12 @@ func (x *ReplicaSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1311Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1311Slc - var yyhl1311 bool = l >= 0 - for yyj1311 := 0; ; yyj1311++ { - if yyhl1311 { - if yyj1311 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -15464,51 +12702,63 @@ func (x *ReplicaSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1311Slc = r.DecodeBytes(yys1311Slc, true, true) - yys1311 := string(yys1311Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1311 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv1314 := &x.ListMeta - yym1315 := z.DecBinary() - _ = yym1315 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv1314) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv1314, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv1316 := &x.Items - yym1317 := z.DecBinary() - _ = yym1317 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceReplicaSet((*[]ReplicaSet)(yyv1316), d) + h.decSliceReplicaSet((*[]ReplicaSet)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys1311) - } // end switch yys1311 - } // end for yyj1311 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -15516,16 +12766,16 @@ func (x *ReplicaSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1318 int - var yyb1318 bool - var yyhl1318 bool = l >= 0 - yyj1318++ - if yyhl1318 { - yyb1318 = yyj1318 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1318 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1318 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15533,15 +12783,21 @@ func (x *ReplicaSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj1318++ - if yyhl1318 { - yyb1318 = yyj1318 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1318 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1318 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15549,38 +12805,44 @@ func (x *ReplicaSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj1318++ - if yyhl1318 { - yyb1318 = yyj1318 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1318 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1318 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv1321 := &x.ListMeta - yym1322 := z.DecBinary() - _ = yym1322 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv1321) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv1321, false) + z.DecFallback(yyv17, false) } } - yyj1318++ - if yyhl1318 { - yyb1318 = yyj1318 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1318 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1318 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15588,26 +12850,26 @@ func (x *ReplicaSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Items = nil } else { - yyv1323 := &x.Items - yym1324 := z.DecBinary() - _ = yym1324 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceReplicaSet((*[]ReplicaSet)(yyv1323), d) + h.decSliceReplicaSet((*[]ReplicaSet)(yyv19), d) } } for { - yyj1318++ - if yyhl1318 { - yyb1318 = yyj1318 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1318 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1318 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1318-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -15619,73 +12881,73 @@ func (x *ReplicaSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1325 := z.EncBinary() - _ = yym1325 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1326 := !z.EncBinary() - yy2arr1326 := z.EncBasicHandle().StructToArray - var yyq1326 [4]bool - _, _, _ = yysep1326, yyq1326, yy2arr1326 - const yyr1326 bool = false - yyq1326[0] = x.Replicas != nil - yyq1326[1] = x.MinReadySeconds != 0 - yyq1326[2] = x.Selector != nil - yyq1326[3] = true - var yynn1326 int - if yyr1326 || yy2arr1326 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != nil + yyq2[1] = x.MinReadySeconds != 0 + yyq2[2] = x.Selector != nil + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn1326 = 0 - for _, b := range yyq1326 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1326++ + yynn2++ } } - r.EncodeMapStart(yynn1326) - yynn1326 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1326 || yy2arr1326 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1326[0] { + if yyq2[0] { if x.Replicas == nil { r.EncodeNil() } else { - yy1328 := *x.Replicas - yym1329 := z.EncBinary() - _ = yym1329 + yy4 := *x.Replicas + yym5 := z.EncBinary() + _ = yym5 if false { } else { - r.EncodeInt(int64(yy1328)) + r.EncodeInt(int64(yy4)) } } } else { r.EncodeNil() } } else { - if yyq1326[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("replicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Replicas == nil { r.EncodeNil() } else { - yy1330 := *x.Replicas - yym1331 := z.EncBinary() - _ = yym1331 + yy6 := *x.Replicas + yym7 := z.EncBinary() + _ = yym7 if false { } else { - r.EncodeInt(int64(yy1330)) + r.EncodeInt(int64(yy6)) } } } } - if yyr1326 || yy2arr1326 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1326[1] { - yym1333 := z.EncBinary() - _ = yym1333 + if yyq2[1] { + yym9 := z.EncBinary() + _ = yym9 if false { } else { r.EncodeInt(int64(x.MinReadySeconds)) @@ -15694,26 +12956,26 @@ func (x *ReplicaSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq1326[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1334 := z.EncBinary() - _ = yym1334 + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeInt(int64(x.MinReadySeconds)) } } } - if yyr1326 || yy2arr1326 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1326[2] { + if yyq2[2] { if x.Selector == nil { r.EncodeNil() } else { - yym1336 := z.EncBinary() - _ = yym1336 + yym12 := z.EncBinary() + _ = yym12 if false { } else if z.HasExtensions() && z.EncExt(x.Selector) { } else { @@ -15724,15 +12986,15 @@ func (x *ReplicaSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1326[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("selector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Selector == nil { r.EncodeNil() } else { - yym1337 := z.EncBinary() - _ = yym1337 + yym13 := z.EncBinary() + _ = yym13 if false { } else if z.HasExtensions() && z.EncExt(x.Selector) { } else { @@ -15741,24 +13003,24 @@ func (x *ReplicaSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1326 || yy2arr1326 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1326[3] { - yy1339 := &x.Template - yy1339.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Template + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq1326[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("template")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1340 := &x.Template - yy1340.CodecEncodeSelf(e) + yy17 := &x.Template + yy17.CodecEncodeSelf(e) } } - if yyr1326 || yy2arr1326 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -15771,25 +13033,25 @@ func (x *ReplicaSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1341 := z.DecBinary() - _ = yym1341 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1342 := r.ContainerType() - if yyct1342 == codecSelferValueTypeMap1234 { - yyl1342 := r.ReadMapStart() - if yyl1342 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1342, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1342 == codecSelferValueTypeArray1234 { - yyl1342 := r.ReadArrayStart() - if yyl1342 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1342, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -15801,12 +13063,12 @@ func (x *ReplicaSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1343Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1343Slc - var yyhl1343 bool = l >= 0 - for yyj1343 := 0; ; yyj1343++ { - if yyhl1343 { - if yyj1343 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -15815,10 +13077,10 @@ func (x *ReplicaSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1343Slc = r.DecodeBytes(yys1343Slc, true, true) - yys1343 := string(yys1343Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1343 { + switch yys3 { case "replicas": if r.TryDecodeAsNil() { if x.Replicas != nil { @@ -15828,8 +13090,8 @@ func (x *ReplicaSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if x.Replicas == nil { x.Replicas = new(int32) } - yym1345 := z.DecBinary() - _ = yym1345 + yym5 := z.DecBinary() + _ = yym5 if false { } else { *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) @@ -15839,7 +13101,13 @@ func (x *ReplicaSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.MinReadySeconds = 0 } else { - x.MinReadySeconds = int32(r.DecodeInt(32)) + yyv6 := &x.MinReadySeconds + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } } case "selector": if r.TryDecodeAsNil() { @@ -15848,10 +13116,10 @@ func (x *ReplicaSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } else { if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) + x.Selector = new(pkg1_v1.LabelSelector) } - yym1348 := z.DecBinary() - _ = yym1348 + yym9 := z.DecBinary() + _ = yym9 if false { } else if z.HasExtensions() && z.DecExt(x.Selector) { } else { @@ -15860,15 +13128,15 @@ func (x *ReplicaSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } case "template": if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} + x.Template = pkg4_v1.PodTemplateSpec{} } else { - yyv1349 := &x.Template - yyv1349.CodecDecodeSelf(d) + yyv10 := &x.Template + yyv10.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1343) - } // end switch yys1343 - } // end for yyj1343 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -15876,16 +13144,16 @@ func (x *ReplicaSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1350 int - var yyb1350 bool - var yyhl1350 bool = l >= 0 - yyj1350++ - if yyhl1350 { - yyb1350 = yyj1350 > l + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb1350 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb1350 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15898,20 +13166,20 @@ func (x *ReplicaSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.Replicas == nil { x.Replicas = new(int32) } - yym1352 := z.DecBinary() - _ = yym1352 + yym13 := z.DecBinary() + _ = yym13 if false { } else { *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) } } - yyj1350++ - if yyhl1350 { - yyb1350 = yyj1350 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb1350 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb1350 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15919,15 +13187,21 @@ func (x *ReplicaSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.MinReadySeconds = 0 } else { - x.MinReadySeconds = int32(r.DecodeInt(32)) + yyv14 := &x.MinReadySeconds + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } } - yyj1350++ - if yyhl1350 { - yyb1350 = yyj1350 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb1350 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb1350 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15938,45 +13212,45 @@ func (x *ReplicaSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } } else { if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) + x.Selector = new(pkg1_v1.LabelSelector) } - yym1355 := z.DecBinary() - _ = yym1355 + yym17 := z.DecBinary() + _ = yym17 if false { } else if z.HasExtensions() && z.DecExt(x.Selector) { } else { z.DecFallback(x.Selector, false) } } - yyj1350++ - if yyhl1350 { - yyb1350 = yyj1350 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb1350 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb1350 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} + x.Template = pkg4_v1.PodTemplateSpec{} } else { - yyv1356 := &x.Template - yyv1356.CodecDecodeSelf(d) + yyv18 := &x.Template + yyv18.CodecDecodeSelf(d) } for { - yyj1350++ - if yyhl1350 { - yyb1350 = yyj1350 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb1350 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb1350 { + if yyb11 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1350-1, "") + z.DecStructFieldNotFound(yyj11-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -15988,38 +13262,38 @@ func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1357 := z.EncBinary() - _ = yym1357 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1358 := !z.EncBinary() - yy2arr1358 := z.EncBasicHandle().StructToArray - var yyq1358 [6]bool - _, _, _ = yysep1358, yyq1358, yy2arr1358 - const yyr1358 bool = false - yyq1358[1] = x.FullyLabeledReplicas != 0 - yyq1358[2] = x.ReadyReplicas != 0 - yyq1358[3] = x.AvailableReplicas != 0 - yyq1358[4] = x.ObservedGeneration != 0 - yyq1358[5] = len(x.Conditions) != 0 - var yynn1358 int - if yyr1358 || yy2arr1358 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FullyLabeledReplicas != 0 + yyq2[2] = x.ReadyReplicas != 0 + yyq2[3] = x.AvailableReplicas != 0 + yyq2[4] = x.ObservedGeneration != 0 + yyq2[5] = len(x.Conditions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(6) } else { - yynn1358 = 1 - for _, b := range yyq1358 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1358++ + yynn2++ } } - r.EncodeMapStart(yynn1358) - yynn1358 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1358 || yy2arr1358 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1360 := z.EncBinary() - _ = yym1360 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeInt(int64(x.Replicas)) @@ -16028,18 +13302,18 @@ func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("replicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1361 := z.EncBinary() - _ = yym1361 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeInt(int64(x.Replicas)) } } - if yyr1358 || yy2arr1358 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1358[1] { - yym1363 := z.EncBinary() - _ = yym1363 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeInt(int64(x.FullyLabeledReplicas)) @@ -16048,23 +13322,23 @@ func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq1358[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1364 := z.EncBinary() - _ = yym1364 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeInt(int64(x.FullyLabeledReplicas)) } } } - if yyr1358 || yy2arr1358 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1358[2] { - yym1366 := z.EncBinary() - _ = yym1366 + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeInt(int64(x.ReadyReplicas)) @@ -16073,23 +13347,23 @@ func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq1358[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("readyReplicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1367 := z.EncBinary() - _ = yym1367 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeInt(int64(x.ReadyReplicas)) } } } - if yyr1358 || yy2arr1358 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1358[3] { - yym1369 := z.EncBinary() - _ = yym1369 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeInt(int64(x.AvailableReplicas)) @@ -16098,23 +13372,23 @@ func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq1358[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1370 := z.EncBinary() - _ = yym1370 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeInt(int64(x.AvailableReplicas)) } } } - if yyr1358 || yy2arr1358 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1358[4] { - yym1372 := z.EncBinary() - _ = yym1372 + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeInt(int64(x.ObservedGeneration)) @@ -16123,26 +13397,26 @@ func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq1358[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1373 := z.EncBinary() - _ = yym1373 + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeInt(int64(x.ObservedGeneration)) } } } - if yyr1358 || yy2arr1358 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1358[5] { + if yyq2[5] { if x.Conditions == nil { r.EncodeNil() } else { - yym1375 := z.EncBinary() - _ = yym1375 + yym19 := z.EncBinary() + _ = yym19 if false { } else { h.encSliceReplicaSetCondition(([]ReplicaSetCondition)(x.Conditions), e) @@ -16152,15 +13426,15 @@ func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1358[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("conditions")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Conditions == nil { r.EncodeNil() } else { - yym1376 := z.EncBinary() - _ = yym1376 + yym20 := z.EncBinary() + _ = yym20 if false { } else { h.encSliceReplicaSetCondition(([]ReplicaSetCondition)(x.Conditions), e) @@ -16168,7 +13442,7 @@ func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1358 || yy2arr1358 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -16181,25 +13455,25 @@ func (x *ReplicaSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1377 := z.DecBinary() - _ = yym1377 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1378 := r.ContainerType() - if yyct1378 == codecSelferValueTypeMap1234 { - yyl1378 := r.ReadMapStart() - if yyl1378 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1378, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1378 == codecSelferValueTypeArray1234 { - yyl1378 := r.ReadArrayStart() - if yyl1378 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1378, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -16211,12 +13485,12 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1379Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1379Slc - var yyhl1379 bool = l >= 0 - for yyj1379 := 0; ; yyj1379++ { - if yyhl1379 { - if yyj1379 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -16225,56 +13499,86 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1379Slc = r.DecodeBytes(yys1379Slc, true, true) - yys1379 := string(yys1379Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1379 { + switch yys3 { case "replicas": if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int32(r.DecodeInt(32)) + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } } case "fullyLabeledReplicas": if r.TryDecodeAsNil() { x.FullyLabeledReplicas = 0 } else { - x.FullyLabeledReplicas = int32(r.DecodeInt(32)) + yyv6 := &x.FullyLabeledReplicas + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } } case "readyReplicas": if r.TryDecodeAsNil() { x.ReadyReplicas = 0 } else { - x.ReadyReplicas = int32(r.DecodeInt(32)) + yyv8 := &x.ReadyReplicas + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } } case "availableReplicas": if r.TryDecodeAsNil() { x.AvailableReplicas = 0 } else { - x.AvailableReplicas = int32(r.DecodeInt(32)) + yyv10 := &x.AvailableReplicas + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } } case "observedGeneration": if r.TryDecodeAsNil() { x.ObservedGeneration = 0 } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) + yyv12 := &x.ObservedGeneration + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int64)(yyv12)) = int64(r.DecodeInt(64)) + } } case "conditions": if r.TryDecodeAsNil() { x.Conditions = nil } else { - yyv1385 := &x.Conditions - yym1386 := z.DecBinary() - _ = yym1386 + yyv14 := &x.Conditions + yym15 := z.DecBinary() + _ = yym15 if false { } else { - h.decSliceReplicaSetCondition((*[]ReplicaSetCondition)(yyv1385), d) + h.decSliceReplicaSetCondition((*[]ReplicaSetCondition)(yyv14), d) } } default: - z.DecStructFieldNotFound(-1, yys1379) - } // end switch yys1379 - } // end for yyj1379 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -16282,16 +13586,16 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1387 int - var yyb1387 bool - var yyhl1387 bool = l >= 0 - yyj1387++ - if yyhl1387 { - yyb1387 = yyj1387 > l + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb1387 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb1387 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -16299,15 +13603,21 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int32(r.DecodeInt(32)) + yyv17 := &x.Replicas + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } } - yyj1387++ - if yyhl1387 { - yyb1387 = yyj1387 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb1387 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb1387 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -16315,15 +13625,21 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.FullyLabeledReplicas = 0 } else { - x.FullyLabeledReplicas = int32(r.DecodeInt(32)) + yyv19 := &x.FullyLabeledReplicas + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int32)(yyv19)) = int32(r.DecodeInt(32)) + } } - yyj1387++ - if yyhl1387 { - yyb1387 = yyj1387 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb1387 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb1387 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -16331,15 +13647,21 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.ReadyReplicas = 0 } else { - x.ReadyReplicas = int32(r.DecodeInt(32)) + yyv21 := &x.ReadyReplicas + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } } - yyj1387++ - if yyhl1387 { - yyb1387 = yyj1387 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb1387 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb1387 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -16347,15 +13669,21 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.AvailableReplicas = 0 } else { - x.AvailableReplicas = int32(r.DecodeInt(32)) + yyv23 := &x.AvailableReplicas + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } } - yyj1387++ - if yyhl1387 { - yyb1387 = yyj1387 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb1387 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb1387 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -16363,15 +13691,21 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.ObservedGeneration = 0 } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) + yyv25 := &x.ObservedGeneration + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int64)(yyv25)) = int64(r.DecodeInt(64)) + } } - yyj1387++ - if yyhl1387 { - yyb1387 = yyj1387 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb1387 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb1387 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -16379,26 +13713,26 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Conditions = nil } else { - yyv1393 := &x.Conditions - yym1394 := z.DecBinary() - _ = yym1394 + yyv27 := &x.Conditions + yym28 := z.DecBinary() + _ = yym28 if false { } else { - h.decSliceReplicaSetCondition((*[]ReplicaSetCondition)(yyv1393), d) + h.decSliceReplicaSetCondition((*[]ReplicaSetCondition)(yyv27), d) } } for { - yyj1387++ - if yyhl1387 { - yyb1387 = yyj1387 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb1387 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb1387 { + if yyb16 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1387-1, "") + z.DecStructFieldNotFound(yyj16-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -16407,8 +13741,8 @@ func (x ReplicaSetConditionType) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym1395 := z.EncBinary() - _ = yym1395 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -16420,8 +13754,8 @@ func (x *ReplicaSetConditionType) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1396 := z.DecBinary() - _ = yym1396 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -16436,33 +13770,33 @@ func (x *ReplicaSetCondition) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1397 := z.EncBinary() - _ = yym1397 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1398 := !z.EncBinary() - yy2arr1398 := z.EncBasicHandle().StructToArray - var yyq1398 [5]bool - _, _, _ = yysep1398, yyq1398, yy2arr1398 - const yyr1398 bool = false - yyq1398[2] = true - yyq1398[3] = x.Reason != "" - yyq1398[4] = x.Message != "" - var yynn1398 int - if yyr1398 || yy2arr1398 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = x.Reason != "" + yyq2[4] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn1398 = 2 - for _, b := range yyq1398 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn1398++ + yynn2++ } } - r.EncodeMapStart(yynn1398) - yynn1398 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1398 || yy2arr1398 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) x.Type.CodecEncodeSelf(e) } else { @@ -16471,69 +13805,59 @@ func (x *ReplicaSetCondition) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Type.CodecEncodeSelf(e) } - if yyr1398 || yy2arr1398 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1401 := z.EncBinary() - _ = yym1401 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } + yysf7 := &x.Status + yysf7.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1402 := z.EncBinary() - _ = yym1402 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } + yysf8 := &x.Status + yysf8.CodecEncodeSelf(e) } - if yyr1398 || yy2arr1398 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1398[2] { - yy1404 := &x.LastTransitionTime - yym1405 := z.EncBinary() - _ = yym1405 + if yyq2[2] { + yy10 := &x.LastTransitionTime + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy1404) { - } else if yym1405 { - z.EncBinaryMarshal(yy1404) - } else if !yym1405 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1404) + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) } else { - z.EncFallback(yy1404) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq1398[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1406 := &x.LastTransitionTime - yym1407 := z.EncBinary() - _ = yym1407 + yy12 := &x.LastTransitionTime + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy1406) { - } else if yym1407 { - z.EncBinaryMarshal(yy1406) - } else if !yym1407 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1406) + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) } else { - z.EncFallback(yy1406) + z.EncFallback(yy12) } } } - if yyr1398 || yy2arr1398 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1398[3] { - yym1409 := z.EncBinary() - _ = yym1409 + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) @@ -16542,23 +13866,23 @@ func (x *ReplicaSetCondition) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1398[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("reason")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1410 := z.EncBinary() - _ = yym1410 + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) } } } - if yyr1398 || yy2arr1398 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1398[4] { - yym1412 := z.EncBinary() - _ = yym1412 + if yyq2[4] { + yym18 := z.EncBinary() + _ = yym18 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) @@ -16567,19 +13891,19 @@ func (x *ReplicaSetCondition) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1398[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("message")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1413 := z.EncBinary() - _ = yym1413 + yym19 := z.EncBinary() + _ = yym19 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Message)) } } } - if yyr1398 || yy2arr1398 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -16592,25 +13916,25 @@ func (x *ReplicaSetCondition) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1414 := z.DecBinary() - _ = yym1414 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1415 := r.ContainerType() - if yyct1415 == codecSelferValueTypeMap1234 { - yyl1415 := r.ReadMapStart() - if yyl1415 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1415, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1415 == codecSelferValueTypeArray1234 { - yyl1415 := r.ReadArrayStart() - if yyl1415 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1415, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -16622,12 +13946,12 @@ func (x *ReplicaSetCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1416Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1416Slc - var yyhl1416 bool = l >= 0 - for yyj1416 := 0; ; yyj1416++ { - if yyhl1416 { - if yyj1416 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -16636,55 +13960,69 @@ func (x *ReplicaSetCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1416Slc = r.DecodeBytes(yys1416Slc, true, true) - yys1416 := string(yys1416Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1416 { + switch yys3 { case "type": if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = ReplicaSetConditionType(r.DecodeString()) + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = "" } else { - x.Status = pkg2_v1.ConditionStatus(r.DecodeString()) + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) } case "lastTransitionTime": if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} + x.LastTransitionTime = pkg1_v1.Time{} } else { - yyv1419 := &x.LastTransitionTime - yym1420 := z.DecBinary() - _ = yym1420 + yyv6 := &x.LastTransitionTime + yym7 := z.DecBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.DecExt(yyv1419) { - } else if yym1420 { - z.DecBinaryUnmarshal(yyv1419) - } else if !yym1420 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1419) + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) } else { - z.DecFallback(yyv1419, false) + z.DecFallback(yyv6, false) } } case "reason": if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv8 := &x.Reason + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } case "message": if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv10 := &x.Message + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys1416) - } // end switch yys1416 - } // end for yyj1416 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -16692,16 +14030,16 @@ func (x *ReplicaSetCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1423 int - var yyb1423 bool - var yyhl1423 bool = l >= 0 - yyj1423++ - if yyhl1423 { - yyb1423 = yyj1423 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1423 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1423 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -16709,15 +14047,16 @@ func (x *ReplicaSetCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Type = "" } else { - x.Type = ReplicaSetConditionType(r.DecodeString()) + yyv13 := &x.Type + yyv13.CodecDecodeSelf(d) } - yyj1423++ - if yyhl1423 { - yyb1423 = yyj1423 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1423 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1423 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -16725,42 +14064,43 @@ func (x *ReplicaSetCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Status = "" } else { - x.Status = pkg2_v1.ConditionStatus(r.DecodeString()) + yyv14 := &x.Status + yyv14.CodecDecodeSelf(d) } - yyj1423++ - if yyhl1423 { - yyb1423 = yyj1423 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1423 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1423 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} + x.LastTransitionTime = pkg1_v1.Time{} } else { - yyv1426 := &x.LastTransitionTime - yym1427 := z.DecBinary() - _ = yym1427 + yyv15 := &x.LastTransitionTime + yym16 := z.DecBinary() + _ = yym16 if false { - } else if z.HasExtensions() && z.DecExt(yyv1426) { - } else if yym1427 { - z.DecBinaryUnmarshal(yyv1426) - } else if !yym1427 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1426) + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else if yym16 { + z.DecBinaryUnmarshal(yyv15) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv15) } else { - z.DecFallback(yyv1426, false) + z.DecFallback(yyv15, false) } } - yyj1423++ - if yyhl1423 { - yyb1423 = yyj1423 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1423 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1423 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -16768,15 +14108,21 @@ func (x *ReplicaSetCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Reason = "" } else { - x.Reason = string(r.DecodeString()) + yyv17 := &x.Reason + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } } - yyj1423++ - if yyhl1423 { - yyb1423 = yyj1423 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1423 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1423 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -16784,20 +14130,26 @@ func (x *ReplicaSetCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Message = "" } else { - x.Message = string(r.DecodeString()) + yyv19 := &x.Message + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } } for { - yyj1423++ - if yyhl1423 { - yyb1423 = yyj1423 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1423 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1423 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1423-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -16809,38 +14161,38 @@ func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1430 := z.EncBinary() - _ = yym1430 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1431 := !z.EncBinary() - yy2arr1431 := z.EncBasicHandle().StructToArray - var yyq1431 [4]bool - _, _, _ = yysep1431, yyq1431, yy2arr1431 - const yyr1431 bool = false - yyq1431[0] = x.Kind != "" - yyq1431[1] = x.APIVersion != "" - yyq1431[2] = true - yyq1431[3] = true - var yynn1431 int - if yyr1431 || yy2arr1431 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn1431 = 0 - for _, b := range yyq1431 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1431++ + yynn2++ } } - r.EncodeMapStart(yynn1431) - yynn1431 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1431 || yy2arr1431 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1431[0] { - yym1433 := z.EncBinary() - _ = yym1433 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -16849,23 +14201,23 @@ func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1431[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1434 := z.EncBinary() - _ = yym1434 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr1431 || yy2arr1431 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1431[1] { - yym1436 := z.EncBinary() - _ = yym1436 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -16874,53 +14226,65 @@ func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1431[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1437 := z.EncBinary() - _ = yym1437 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr1431 || yy2arr1431 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1431[2] { - yy1439 := &x.ObjectMeta - yy1439.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq1431[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1440 := &x.ObjectMeta - yy1440.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr1431 || yy2arr1431 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1431[3] { - yy1442 := &x.Spec - yy1442.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq1431[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1443 := &x.Spec - yy1443.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr1431 || yy2arr1431 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -16933,25 +14297,25 @@ func (x *PodSecurityPolicy) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1444 := z.DecBinary() - _ = yym1444 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1445 := r.ContainerType() - if yyct1445 == codecSelferValueTypeMap1234 { - yyl1445 := r.ReadMapStart() - if yyl1445 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1445, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1445 == codecSelferValueTypeArray1234 { - yyl1445 := r.ReadArrayStart() - if yyl1445 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1445, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -16963,12 +14327,12 @@ func (x *PodSecurityPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1446Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1446Slc - var yyhl1446 bool = l >= 0 - for yyj1446 := 0; ; yyj1446++ { - if yyhl1446 { - if yyj1446 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -16977,40 +14341,58 @@ func (x *PodSecurityPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1446Slc = r.DecodeBytes(yys1446Slc, true, true) - yys1446 := string(yys1446Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1446 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv1449 := &x.ObjectMeta - yyv1449.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = PodSecurityPolicySpec{} } else { - yyv1450 := &x.Spec - yyv1450.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1446) - } // end switch yys1446 - } // end for yyj1446 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -17018,16 +14400,16 @@ func (x *PodSecurityPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1451 int - var yyb1451 bool - var yyhl1451 bool = l >= 0 - yyj1451++ - if yyhl1451 { - yyb1451 = yyj1451 > l + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb1451 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb1451 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -17035,15 +14417,21 @@ func (x *PodSecurityPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } - yyj1451++ - if yyhl1451 { - yyb1451 = yyj1451 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb1451 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb1451 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -17051,32 +14439,44 @@ func (x *PodSecurityPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } - yyj1451++ - if yyhl1451 { - yyb1451 = yyj1451 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb1451 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb1451 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv1454 := &x.ObjectMeta - yyv1454.CodecDecodeSelf(d) + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } } - yyj1451++ - if yyhl1451 { - yyb1451 = yyj1451 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb1451 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb1451 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -17084,21 +14484,21 @@ func (x *PodSecurityPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.Spec = PodSecurityPolicySpec{} } else { - yyv1455 := &x.Spec - yyv1455.CodecDecodeSelf(d) + yyv18 := &x.Spec + yyv18.CodecDecodeSelf(d) } for { - yyj1451++ - if yyhl1451 { - yyb1451 = yyj1451 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb1451 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb1451 { + if yyb11 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1451-1, "") + z.DecStructFieldNotFound(yyj11-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -17110,44 +14510,44 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1456 := z.EncBinary() - _ = yym1456 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1457 := !z.EncBinary() - yy2arr1457 := z.EncBasicHandle().StructToArray - var yyq1457 [14]bool - _, _, _ = yysep1457, yyq1457, yy2arr1457 - const yyr1457 bool = false - yyq1457[0] = x.Privileged != false - yyq1457[1] = len(x.DefaultAddCapabilities) != 0 - yyq1457[2] = len(x.RequiredDropCapabilities) != 0 - yyq1457[3] = len(x.AllowedCapabilities) != 0 - yyq1457[4] = len(x.Volumes) != 0 - yyq1457[5] = x.HostNetwork != false - yyq1457[6] = len(x.HostPorts) != 0 - yyq1457[7] = x.HostPID != false - yyq1457[8] = x.HostIPC != false - yyq1457[13] = x.ReadOnlyRootFilesystem != false - var yynn1457 int - if yyr1457 || yy2arr1457 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [14]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Privileged != false + yyq2[1] = len(x.DefaultAddCapabilities) != 0 + yyq2[2] = len(x.RequiredDropCapabilities) != 0 + yyq2[3] = len(x.AllowedCapabilities) != 0 + yyq2[4] = len(x.Volumes) != 0 + yyq2[5] = x.HostNetwork != false + yyq2[6] = len(x.HostPorts) != 0 + yyq2[7] = x.HostPID != false + yyq2[8] = x.HostIPC != false + yyq2[13] = x.ReadOnlyRootFilesystem != false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(14) } else { - yynn1457 = 4 - for _, b := range yyq1457 { + yynn2 = 4 + for _, b := range yyq2 { if b { - yynn1457++ + yynn2++ } } - r.EncodeMapStart(yynn1457) - yynn1457 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1457 || yy2arr1457 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1457[0] { - yym1459 := z.EncBinary() - _ = yym1459 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeBool(bool(x.Privileged)) @@ -17156,125 +14556,125 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq1457[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("privileged")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1460 := z.EncBinary() - _ = yym1460 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeBool(bool(x.Privileged)) } } } - if yyr1457 || yy2arr1457 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1457[1] { + if yyq2[1] { if x.DefaultAddCapabilities == nil { r.EncodeNil() } else { - yym1462 := z.EncBinary() - _ = yym1462 + yym7 := z.EncBinary() + _ = yym7 if false { } else { - h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.DefaultAddCapabilities), e) + h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.DefaultAddCapabilities), e) } } } else { r.EncodeNil() } } else { - if yyq1457[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("defaultAddCapabilities")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.DefaultAddCapabilities == nil { r.EncodeNil() } else { - yym1463 := z.EncBinary() - _ = yym1463 + yym8 := z.EncBinary() + _ = yym8 if false { } else { - h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.DefaultAddCapabilities), e) + h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.DefaultAddCapabilities), e) } } } } - if yyr1457 || yy2arr1457 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1457[2] { + if yyq2[2] { if x.RequiredDropCapabilities == nil { r.EncodeNil() } else { - yym1465 := z.EncBinary() - _ = yym1465 + yym10 := z.EncBinary() + _ = yym10 if false { } else { - h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.RequiredDropCapabilities), e) + h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.RequiredDropCapabilities), e) } } } else { r.EncodeNil() } } else { - if yyq1457[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("requiredDropCapabilities")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.RequiredDropCapabilities == nil { r.EncodeNil() } else { - yym1466 := z.EncBinary() - _ = yym1466 + yym11 := z.EncBinary() + _ = yym11 if false { } else { - h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.RequiredDropCapabilities), e) + h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.RequiredDropCapabilities), e) } } } } - if yyr1457 || yy2arr1457 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1457[3] { + if yyq2[3] { if x.AllowedCapabilities == nil { r.EncodeNil() } else { - yym1468 := z.EncBinary() - _ = yym1468 + yym13 := z.EncBinary() + _ = yym13 if false { } else { - h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.AllowedCapabilities), e) + h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.AllowedCapabilities), e) } } } else { r.EncodeNil() } } else { - if yyq1457[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("allowedCapabilities")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.AllowedCapabilities == nil { r.EncodeNil() } else { - yym1469 := z.EncBinary() - _ = yym1469 + yym14 := z.EncBinary() + _ = yym14 if false { } else { - h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.AllowedCapabilities), e) + h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.AllowedCapabilities), e) } } } } - if yyr1457 || yy2arr1457 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1457[4] { + if yyq2[4] { if x.Volumes == nil { r.EncodeNil() } else { - yym1471 := z.EncBinary() - _ = yym1471 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceFSType(([]FSType)(x.Volumes), e) @@ -17284,15 +14684,15 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1457[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("volumes")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Volumes == nil { r.EncodeNil() } else { - yym1472 := z.EncBinary() - _ = yym1472 + yym17 := z.EncBinary() + _ = yym17 if false { } else { h.encSliceFSType(([]FSType)(x.Volumes), e) @@ -17300,11 +14700,11 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1457 || yy2arr1457 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1457[5] { - yym1474 := z.EncBinary() - _ = yym1474 + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 if false { } else { r.EncodeBool(bool(x.HostNetwork)) @@ -17313,26 +14713,26 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq1457[5] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1475 := z.EncBinary() - _ = yym1475 + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeBool(bool(x.HostNetwork)) } } } - if yyr1457 || yy2arr1457 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1457[6] { + if yyq2[6] { if x.HostPorts == nil { r.EncodeNil() } else { - yym1477 := z.EncBinary() - _ = yym1477 + yym22 := z.EncBinary() + _ = yym22 if false { } else { h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) @@ -17342,15 +14742,15 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1457[6] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hostPorts")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.HostPorts == nil { r.EncodeNil() } else { - yym1478 := z.EncBinary() - _ = yym1478 + yym23 := z.EncBinary() + _ = yym23 if false { } else { h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) @@ -17358,11 +14758,11 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1457 || yy2arr1457 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1457[7] { - yym1480 := z.EncBinary() - _ = yym1480 + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 if false { } else { r.EncodeBool(bool(x.HostPID)) @@ -17371,23 +14771,23 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq1457[7] { + if yyq2[7] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hostPID")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1481 := z.EncBinary() - _ = yym1481 + yym26 := z.EncBinary() + _ = yym26 if false { } else { r.EncodeBool(bool(x.HostPID)) } } } - if yyr1457 || yy2arr1457 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1457[8] { - yym1483 := z.EncBinary() - _ = yym1483 + if yyq2[8] { + yym28 := z.EncBinary() + _ = yym28 if false { } else { r.EncodeBool(bool(x.HostIPC)) @@ -17396,67 +14796,67 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq1457[8] { + if yyq2[8] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1484 := z.EncBinary() - _ = yym1484 + yym29 := z.EncBinary() + _ = yym29 if false { } else { r.EncodeBool(bool(x.HostIPC)) } } } - if yyr1457 || yy2arr1457 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1486 := &x.SELinux - yy1486.CodecEncodeSelf(e) + yy31 := &x.SELinux + yy31.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("seLinux")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1487 := &x.SELinux - yy1487.CodecEncodeSelf(e) + yy33 := &x.SELinux + yy33.CodecEncodeSelf(e) } - if yyr1457 || yy2arr1457 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1489 := &x.RunAsUser - yy1489.CodecEncodeSelf(e) + yy36 := &x.RunAsUser + yy36.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1490 := &x.RunAsUser - yy1490.CodecEncodeSelf(e) + yy38 := &x.RunAsUser + yy38.CodecEncodeSelf(e) } - if yyr1457 || yy2arr1457 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1492 := &x.SupplementalGroups - yy1492.CodecEncodeSelf(e) + yy41 := &x.SupplementalGroups + yy41.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1493 := &x.SupplementalGroups - yy1493.CodecEncodeSelf(e) + yy43 := &x.SupplementalGroups + yy43.CodecEncodeSelf(e) } - if yyr1457 || yy2arr1457 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1495 := &x.FSGroup - yy1495.CodecEncodeSelf(e) + yy46 := &x.FSGroup + yy46.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("fsGroup")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1496 := &x.FSGroup - yy1496.CodecEncodeSelf(e) + yy48 := &x.FSGroup + yy48.CodecEncodeSelf(e) } - if yyr1457 || yy2arr1457 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1457[13] { - yym1498 := z.EncBinary() - _ = yym1498 + if yyq2[13] { + yym51 := z.EncBinary() + _ = yym51 if false { } else { r.EncodeBool(bool(x.ReadOnlyRootFilesystem)) @@ -17465,19 +14865,19 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq1457[13] { + if yyq2[13] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1499 := z.EncBinary() - _ = yym1499 + yym52 := z.EncBinary() + _ = yym52 if false { } else { r.EncodeBool(bool(x.ReadOnlyRootFilesystem)) } } } - if yyr1457 || yy2arr1457 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -17490,25 +14890,25 @@ func (x *PodSecurityPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1500 := z.DecBinary() - _ = yym1500 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1501 := r.ContainerType() - if yyct1501 == codecSelferValueTypeMap1234 { - yyl1501 := r.ReadMapStart() - if yyl1501 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1501, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1501 == codecSelferValueTypeArray1234 { - yyl1501 := r.ReadArrayStart() - if yyl1501 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1501, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -17520,12 +14920,12 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1502Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1502Slc - var yyhl1502 bool = l >= 0 - for yyj1502 := 0; ; yyj1502++ { - if yyhl1502 { - if yyj1502 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -17534,132 +14934,162 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decod } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1502Slc = r.DecodeBytes(yys1502Slc, true, true) - yys1502 := string(yys1502Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1502 { + switch yys3 { case "privileged": if r.TryDecodeAsNil() { x.Privileged = false } else { - x.Privileged = bool(r.DecodeBool()) + yyv4 := &x.Privileged + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } } case "defaultAddCapabilities": if r.TryDecodeAsNil() { x.DefaultAddCapabilities = nil } else { - yyv1504 := &x.DefaultAddCapabilities - yym1505 := z.DecBinary() - _ = yym1505 + yyv6 := &x.DefaultAddCapabilities + yym7 := z.DecBinary() + _ = yym7 if false { } else { - h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv1504), d) + h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv6), d) } } case "requiredDropCapabilities": if r.TryDecodeAsNil() { x.RequiredDropCapabilities = nil } else { - yyv1506 := &x.RequiredDropCapabilities - yym1507 := z.DecBinary() - _ = yym1507 + yyv8 := &x.RequiredDropCapabilities + yym9 := z.DecBinary() + _ = yym9 if false { } else { - h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv1506), d) + h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv8), d) } } case "allowedCapabilities": if r.TryDecodeAsNil() { x.AllowedCapabilities = nil } else { - yyv1508 := &x.AllowedCapabilities - yym1509 := z.DecBinary() - _ = yym1509 + yyv10 := &x.AllowedCapabilities + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv1508), d) + h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv10), d) } } case "volumes": if r.TryDecodeAsNil() { x.Volumes = nil } else { - yyv1510 := &x.Volumes - yym1511 := z.DecBinary() - _ = yym1511 + yyv12 := &x.Volumes + yym13 := z.DecBinary() + _ = yym13 if false { } else { - h.decSliceFSType((*[]FSType)(yyv1510), d) + h.decSliceFSType((*[]FSType)(yyv12), d) } } case "hostNetwork": if r.TryDecodeAsNil() { x.HostNetwork = false } else { - x.HostNetwork = bool(r.DecodeBool()) + yyv14 := &x.HostNetwork + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } } case "hostPorts": if r.TryDecodeAsNil() { x.HostPorts = nil } else { - yyv1513 := &x.HostPorts - yym1514 := z.DecBinary() - _ = yym1514 + yyv16 := &x.HostPorts + yym17 := z.DecBinary() + _ = yym17 if false { } else { - h.decSliceHostPortRange((*[]HostPortRange)(yyv1513), d) + h.decSliceHostPortRange((*[]HostPortRange)(yyv16), d) } } case "hostPID": if r.TryDecodeAsNil() { x.HostPID = false } else { - x.HostPID = bool(r.DecodeBool()) + yyv18 := &x.HostPID + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*bool)(yyv18)) = r.DecodeBool() + } } case "hostIPC": if r.TryDecodeAsNil() { x.HostIPC = false } else { - x.HostIPC = bool(r.DecodeBool()) + yyv20 := &x.HostIPC + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*bool)(yyv20)) = r.DecodeBool() + } } case "seLinux": if r.TryDecodeAsNil() { x.SELinux = SELinuxStrategyOptions{} } else { - yyv1517 := &x.SELinux - yyv1517.CodecDecodeSelf(d) + yyv22 := &x.SELinux + yyv22.CodecDecodeSelf(d) } case "runAsUser": if r.TryDecodeAsNil() { x.RunAsUser = RunAsUserStrategyOptions{} } else { - yyv1518 := &x.RunAsUser - yyv1518.CodecDecodeSelf(d) + yyv23 := &x.RunAsUser + yyv23.CodecDecodeSelf(d) } case "supplementalGroups": if r.TryDecodeAsNil() { x.SupplementalGroups = SupplementalGroupsStrategyOptions{} } else { - yyv1519 := &x.SupplementalGroups - yyv1519.CodecDecodeSelf(d) + yyv24 := &x.SupplementalGroups + yyv24.CodecDecodeSelf(d) } case "fsGroup": if r.TryDecodeAsNil() { x.FSGroup = FSGroupStrategyOptions{} } else { - yyv1520 := &x.FSGroup - yyv1520.CodecDecodeSelf(d) + yyv25 := &x.FSGroup + yyv25.CodecDecodeSelf(d) } case "readOnlyRootFilesystem": if r.TryDecodeAsNil() { x.ReadOnlyRootFilesystem = false } else { - x.ReadOnlyRootFilesystem = bool(r.DecodeBool()) + yyv26 := &x.ReadOnlyRootFilesystem + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*bool)(yyv26)) = r.DecodeBool() + } } default: - z.DecStructFieldNotFound(-1, yys1502) - } // end switch yys1502 - } // end for yyj1502 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -17667,16 +15097,16 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1522 int - var yyb1522 bool - var yyhl1522 bool = l >= 0 - yyj1522++ - if yyhl1522 { - yyb1522 = yyj1522 > l + var yyj28 int + var yyb28 bool + var yyhl28 bool = l >= 0 + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb1522 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb1522 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -17684,15 +15114,21 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.Privileged = false } else { - x.Privileged = bool(r.DecodeBool()) + yyv29 := &x.Privileged + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } } - yyj1522++ - if yyhl1522 { - yyb1522 = yyj1522 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb1522 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb1522 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -17700,21 +15136,21 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.DefaultAddCapabilities = nil } else { - yyv1524 := &x.DefaultAddCapabilities - yym1525 := z.DecBinary() - _ = yym1525 + yyv31 := &x.DefaultAddCapabilities + yym32 := z.DecBinary() + _ = yym32 if false { } else { - h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv1524), d) + h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv31), d) } } - yyj1522++ - if yyhl1522 { - yyb1522 = yyj1522 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb1522 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb1522 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -17722,21 +15158,21 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.RequiredDropCapabilities = nil } else { - yyv1526 := &x.RequiredDropCapabilities - yym1527 := z.DecBinary() - _ = yym1527 + yyv33 := &x.RequiredDropCapabilities + yym34 := z.DecBinary() + _ = yym34 if false { } else { - h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv1526), d) + h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv33), d) } } - yyj1522++ - if yyhl1522 { - yyb1522 = yyj1522 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb1522 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb1522 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -17744,21 +15180,21 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.AllowedCapabilities = nil } else { - yyv1528 := &x.AllowedCapabilities - yym1529 := z.DecBinary() - _ = yym1529 + yyv35 := &x.AllowedCapabilities + yym36 := z.DecBinary() + _ = yym36 if false { } else { - h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv1528), d) + h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv35), d) } } - yyj1522++ - if yyhl1522 { - yyb1522 = yyj1522 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb1522 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb1522 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -17766,21 +15202,21 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.Volumes = nil } else { - yyv1530 := &x.Volumes - yym1531 := z.DecBinary() - _ = yym1531 + yyv37 := &x.Volumes + yym38 := z.DecBinary() + _ = yym38 if false { } else { - h.decSliceFSType((*[]FSType)(yyv1530), d) + h.decSliceFSType((*[]FSType)(yyv37), d) } } - yyj1522++ - if yyhl1522 { - yyb1522 = yyj1522 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb1522 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb1522 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -17788,15 +15224,21 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.HostNetwork = false } else { - x.HostNetwork = bool(r.DecodeBool()) + yyv39 := &x.HostNetwork + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*bool)(yyv39)) = r.DecodeBool() + } } - yyj1522++ - if yyhl1522 { - yyb1522 = yyj1522 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb1522 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb1522 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -17804,21 +15246,21 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.HostPorts = nil } else { - yyv1533 := &x.HostPorts - yym1534 := z.DecBinary() - _ = yym1534 + yyv41 := &x.HostPorts + yym42 := z.DecBinary() + _ = yym42 if false { } else { - h.decSliceHostPortRange((*[]HostPortRange)(yyv1533), d) + h.decSliceHostPortRange((*[]HostPortRange)(yyv41), d) } } - yyj1522++ - if yyhl1522 { - yyb1522 = yyj1522 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb1522 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb1522 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -17826,15 +15268,21 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.HostPID = false } else { - x.HostPID = bool(r.DecodeBool()) + yyv43 := &x.HostPID + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*bool)(yyv43)) = r.DecodeBool() + } } - yyj1522++ - if yyhl1522 { - yyb1522 = yyj1522 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb1522 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb1522 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -17842,15 +15290,21 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.HostIPC = false } else { - x.HostIPC = bool(r.DecodeBool()) + yyv45 := &x.HostIPC + yym46 := z.DecBinary() + _ = yym46 + if false { + } else { + *((*bool)(yyv45)) = r.DecodeBool() + } } - yyj1522++ - if yyhl1522 { - yyb1522 = yyj1522 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb1522 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb1522 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -17858,16 +15312,16 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.SELinux = SELinuxStrategyOptions{} } else { - yyv1537 := &x.SELinux - yyv1537.CodecDecodeSelf(d) + yyv47 := &x.SELinux + yyv47.CodecDecodeSelf(d) } - yyj1522++ - if yyhl1522 { - yyb1522 = yyj1522 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb1522 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb1522 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -17875,16 +15329,16 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.RunAsUser = RunAsUserStrategyOptions{} } else { - yyv1538 := &x.RunAsUser - yyv1538.CodecDecodeSelf(d) + yyv48 := &x.RunAsUser + yyv48.CodecDecodeSelf(d) } - yyj1522++ - if yyhl1522 { - yyb1522 = yyj1522 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb1522 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb1522 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -17892,16 +15346,16 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.SupplementalGroups = SupplementalGroupsStrategyOptions{} } else { - yyv1539 := &x.SupplementalGroups - yyv1539.CodecDecodeSelf(d) + yyv49 := &x.SupplementalGroups + yyv49.CodecDecodeSelf(d) } - yyj1522++ - if yyhl1522 { - yyb1522 = yyj1522 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb1522 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb1522 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -17909,16 +15363,16 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.FSGroup = FSGroupStrategyOptions{} } else { - yyv1540 := &x.FSGroup - yyv1540.CodecDecodeSelf(d) + yyv50 := &x.FSGroup + yyv50.CodecDecodeSelf(d) } - yyj1522++ - if yyhl1522 { - yyb1522 = yyj1522 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb1522 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb1522 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -17926,20 +15380,26 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.ReadOnlyRootFilesystem = false } else { - x.ReadOnlyRootFilesystem = bool(r.DecodeBool()) + yyv51 := &x.ReadOnlyRootFilesystem + yym52 := z.DecBinary() + _ = yym52 + if false { + } else { + *((*bool)(yyv51)) = r.DecodeBool() + } } for { - yyj1522++ - if yyhl1522 { - yyb1522 = yyj1522 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb1522 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb1522 { + if yyb28 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1522-1, "") + z.DecStructFieldNotFound(yyj28-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -17948,8 +15408,8 @@ func (x FSType) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym1542 := z.EncBinary() - _ = yym1542 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -17961,8 +15421,8 @@ func (x *FSType) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1543 := z.DecBinary() - _ = yym1543 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -17977,33 +15437,33 @@ func (x *HostPortRange) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1544 := z.EncBinary() - _ = yym1544 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1545 := !z.EncBinary() - yy2arr1545 := z.EncBasicHandle().StructToArray - var yyq1545 [2]bool - _, _, _ = yysep1545, yyq1545, yy2arr1545 - const yyr1545 bool = false - var yynn1545 int - if yyr1545 || yy2arr1545 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1545 = 2 - for _, b := range yyq1545 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn1545++ + yynn2++ } } - r.EncodeMapStart(yynn1545) - yynn1545 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1545 || yy2arr1545 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1547 := z.EncBinary() - _ = yym1547 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeInt(int64(x.Min)) @@ -18012,17 +15472,17 @@ func (x *HostPortRange) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("min")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1548 := z.EncBinary() - _ = yym1548 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeInt(int64(x.Min)) } } - if yyr1545 || yy2arr1545 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1550 := z.EncBinary() - _ = yym1550 + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeInt(int64(x.Max)) @@ -18031,14 +15491,14 @@ func (x *HostPortRange) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("max")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1551 := z.EncBinary() - _ = yym1551 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeInt(int64(x.Max)) } } - if yyr1545 || yy2arr1545 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -18051,25 +15511,25 @@ func (x *HostPortRange) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1552 := z.DecBinary() - _ = yym1552 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1553 := r.ContainerType() - if yyct1553 == codecSelferValueTypeMap1234 { - yyl1553 := r.ReadMapStart() - if yyl1553 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1553, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1553 == codecSelferValueTypeArray1234 { - yyl1553 := r.ReadArrayStart() - if yyl1553 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1553, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -18081,12 +15541,12 @@ func (x *HostPortRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1554Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1554Slc - var yyhl1554 bool = l >= 0 - for yyj1554 := 0; ; yyj1554++ { - if yyhl1554 { - if yyj1554 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -18095,26 +15555,38 @@ func (x *HostPortRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1554Slc = r.DecodeBytes(yys1554Slc, true, true) - yys1554 := string(yys1554Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1554 { + switch yys3 { case "min": if r.TryDecodeAsNil() { x.Min = 0 } else { - x.Min = int32(r.DecodeInt(32)) + yyv4 := &x.Min + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } } case "max": if r.TryDecodeAsNil() { x.Max = 0 } else { - x.Max = int32(r.DecodeInt(32)) + yyv6 := &x.Max + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } } default: - z.DecStructFieldNotFound(-1, yys1554) - } // end switch yys1554 - } // end for yyj1554 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -18122,16 +15594,16 @@ func (x *HostPortRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1557 int - var yyb1557 bool - var yyhl1557 bool = l >= 0 - yyj1557++ - if yyhl1557 { - yyb1557 = yyj1557 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1557 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1557 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -18139,15 +15611,21 @@ func (x *HostPortRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Min = 0 } else { - x.Min = int32(r.DecodeInt(32)) + yyv9 := &x.Min + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*int32)(yyv9)) = int32(r.DecodeInt(32)) + } } - yyj1557++ - if yyhl1557 { - yyb1557 = yyj1557 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1557 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1557 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -18155,20 +15633,26 @@ func (x *HostPortRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Max = 0 } else { - x.Max = int32(r.DecodeInt(32)) + yyv11 := &x.Max + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } } for { - yyj1557++ - if yyhl1557 { - yyb1557 = yyj1557 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1557 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1557 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1557-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -18180,31 +15664,31 @@ func (x *SELinuxStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1560 := z.EncBinary() - _ = yym1560 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1561 := !z.EncBinary() - yy2arr1561 := z.EncBasicHandle().StructToArray - var yyq1561 [2]bool - _, _, _ = yysep1561, yyq1561, yy2arr1561 - const yyr1561 bool = false - yyq1561[1] = x.SELinuxOptions != nil - var yynn1561 int - if yyr1561 || yy2arr1561 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.SELinuxOptions != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1561 = 1 - for _, b := range yyq1561 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1561++ + yynn2++ } } - r.EncodeMapStart(yynn1561) - yynn1561 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1561 || yy2arr1561 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) x.Rule.CodecEncodeSelf(e) } else { @@ -18213,9 +15697,9 @@ func (x *SELinuxStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Rule.CodecEncodeSelf(e) } - if yyr1561 || yy2arr1561 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1561[1] { + if yyq2[1] { if x.SELinuxOptions == nil { r.EncodeNil() } else { @@ -18225,7 +15709,7 @@ func (x *SELinuxStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1561[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -18236,7 +15720,7 @@ func (x *SELinuxStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1561 || yy2arr1561 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -18249,25 +15733,25 @@ func (x *SELinuxStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1564 := z.DecBinary() - _ = yym1564 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1565 := r.ContainerType() - if yyct1565 == codecSelferValueTypeMap1234 { - yyl1565 := r.ReadMapStart() - if yyl1565 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1565, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1565 == codecSelferValueTypeArray1234 { - yyl1565 := r.ReadArrayStart() - if yyl1565 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1565, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -18279,12 +15763,12 @@ func (x *SELinuxStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Deco var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1566Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1566Slc - var yyhl1566 bool = l >= 0 - for yyj1566 := 0; ; yyj1566++ { - if yyhl1566 { - if yyj1566 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -18293,15 +15777,16 @@ func (x *SELinuxStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Deco } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1566Slc = r.DecodeBytes(yys1566Slc, true, true) - yys1566 := string(yys1566Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1566 { + switch yys3 { case "rule": if r.TryDecodeAsNil() { x.Rule = "" } else { - x.Rule = SELinuxStrategy(r.DecodeString()) + yyv4 := &x.Rule + yyv4.CodecDecodeSelf(d) } case "seLinuxOptions": if r.TryDecodeAsNil() { @@ -18310,14 +15795,14 @@ func (x *SELinuxStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Deco } } else { if x.SELinuxOptions == nil { - x.SELinuxOptions = new(pkg2_v1.SELinuxOptions) + x.SELinuxOptions = new(pkg4_v1.SELinuxOptions) } x.SELinuxOptions.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1566) - } // end switch yys1566 - } // end for yyj1566 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -18325,16 +15810,16 @@ func (x *SELinuxStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.De var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1569 int - var yyb1569 bool - var yyhl1569 bool = l >= 0 - yyj1569++ - if yyhl1569 { - yyb1569 = yyj1569 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1569 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1569 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -18342,15 +15827,16 @@ func (x *SELinuxStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.De if r.TryDecodeAsNil() { x.Rule = "" } else { - x.Rule = SELinuxStrategy(r.DecodeString()) + yyv7 := &x.Rule + yyv7.CodecDecodeSelf(d) } - yyj1569++ - if yyhl1569 { - yyb1569 = yyj1569 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1569 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1569 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -18361,22 +15847,22 @@ func (x *SELinuxStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.De } } else { if x.SELinuxOptions == nil { - x.SELinuxOptions = new(pkg2_v1.SELinuxOptions) + x.SELinuxOptions = new(pkg4_v1.SELinuxOptions) } x.SELinuxOptions.CodecDecodeSelf(d) } for { - yyj1569++ - if yyhl1569 { - yyb1569 = yyj1569 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb1569 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb1569 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1569-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -18385,8 +15871,8 @@ func (x SELinuxStrategy) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym1572 := z.EncBinary() - _ = yym1572 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -18398,8 +15884,8 @@ func (x *SELinuxStrategy) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1573 := z.DecBinary() - _ = yym1573 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -18414,31 +15900,31 @@ func (x *RunAsUserStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1574 := z.EncBinary() - _ = yym1574 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1575 := !z.EncBinary() - yy2arr1575 := z.EncBasicHandle().StructToArray - var yyq1575 [2]bool - _, _, _ = yysep1575, yyq1575, yy2arr1575 - const yyr1575 bool = false - yyq1575[1] = len(x.Ranges) != 0 - var yynn1575 int - if yyr1575 || yy2arr1575 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.Ranges) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1575 = 1 - for _, b := range yyq1575 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1575++ + yynn2++ } } - r.EncodeMapStart(yynn1575) - yynn1575 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1575 || yy2arr1575 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) x.Rule.CodecEncodeSelf(e) } else { @@ -18447,14 +15933,14 @@ func (x *RunAsUserStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Rule.CodecEncodeSelf(e) } - if yyr1575 || yy2arr1575 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1575[1] { + if yyq2[1] { if x.Ranges == nil { r.EncodeNil() } else { - yym1578 := z.EncBinary() - _ = yym1578 + yym7 := z.EncBinary() + _ = yym7 if false { } else { h.encSliceIDRange(([]IDRange)(x.Ranges), e) @@ -18464,15 +15950,15 @@ func (x *RunAsUserStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1575[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("ranges")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Ranges == nil { r.EncodeNil() } else { - yym1579 := z.EncBinary() - _ = yym1579 + yym8 := z.EncBinary() + _ = yym8 if false { } else { h.encSliceIDRange(([]IDRange)(x.Ranges), e) @@ -18480,7 +15966,7 @@ func (x *RunAsUserStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1575 || yy2arr1575 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -18493,25 +15979,25 @@ func (x *RunAsUserStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1580 := z.DecBinary() - _ = yym1580 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1581 := r.ContainerType() - if yyct1581 == codecSelferValueTypeMap1234 { - yyl1581 := r.ReadMapStart() - if yyl1581 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1581, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1581 == codecSelferValueTypeArray1234 { - yyl1581 := r.ReadArrayStart() - if yyl1581 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1581, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -18523,12 +16009,12 @@ func (x *RunAsUserStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.De var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1582Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1582Slc - var yyhl1582 bool = l >= 0 - for yyj1582 := 0; ; yyj1582++ { - if yyhl1582 { - if yyj1582 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -18537,32 +16023,33 @@ func (x *RunAsUserStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.De } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1582Slc = r.DecodeBytes(yys1582Slc, true, true) - yys1582 := string(yys1582Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1582 { + switch yys3 { case "rule": if r.TryDecodeAsNil() { x.Rule = "" } else { - x.Rule = RunAsUserStrategy(r.DecodeString()) + yyv4 := &x.Rule + yyv4.CodecDecodeSelf(d) } case "ranges": if r.TryDecodeAsNil() { x.Ranges = nil } else { - yyv1584 := &x.Ranges - yym1585 := z.DecBinary() - _ = yym1585 + yyv5 := &x.Ranges + yym6 := z.DecBinary() + _ = yym6 if false { } else { - h.decSliceIDRange((*[]IDRange)(yyv1584), d) + h.decSliceIDRange((*[]IDRange)(yyv5), d) } } default: - z.DecStructFieldNotFound(-1, yys1582) - } // end switch yys1582 - } // end for yyj1582 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -18570,16 +16057,16 @@ func (x *RunAsUserStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978. var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1586 int - var yyb1586 bool - var yyhl1586 bool = l >= 0 - yyj1586++ - if yyhl1586 { - yyb1586 = yyj1586 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1586 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1586 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -18587,15 +16074,16 @@ func (x *RunAsUserStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978. if r.TryDecodeAsNil() { x.Rule = "" } else { - x.Rule = RunAsUserStrategy(r.DecodeString()) + yyv8 := &x.Rule + yyv8.CodecDecodeSelf(d) } - yyj1586++ - if yyhl1586 { - yyb1586 = yyj1586 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1586 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1586 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -18603,26 +16091,26 @@ func (x *RunAsUserStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978. if r.TryDecodeAsNil() { x.Ranges = nil } else { - yyv1588 := &x.Ranges - yym1589 := z.DecBinary() - _ = yym1589 + yyv9 := &x.Ranges + yym10 := z.DecBinary() + _ = yym10 if false { } else { - h.decSliceIDRange((*[]IDRange)(yyv1588), d) + h.decSliceIDRange((*[]IDRange)(yyv9), d) } } for { - yyj1586++ - if yyhl1586 { - yyb1586 = yyj1586 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1586 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1586 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1586-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -18634,33 +16122,33 @@ func (x *IDRange) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1590 := z.EncBinary() - _ = yym1590 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1591 := !z.EncBinary() - yy2arr1591 := z.EncBasicHandle().StructToArray - var yyq1591 [2]bool - _, _, _ = yysep1591, yyq1591, yy2arr1591 - const yyr1591 bool = false - var yynn1591 int - if yyr1591 || yy2arr1591 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1591 = 2 - for _, b := range yyq1591 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn1591++ + yynn2++ } } - r.EncodeMapStart(yynn1591) - yynn1591 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1591 || yy2arr1591 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1593 := z.EncBinary() - _ = yym1593 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeInt(int64(x.Min)) @@ -18669,17 +16157,17 @@ func (x *IDRange) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("min")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1594 := z.EncBinary() - _ = yym1594 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeInt(int64(x.Min)) } } - if yyr1591 || yy2arr1591 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1596 := z.EncBinary() - _ = yym1596 + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeInt(int64(x.Max)) @@ -18688,14 +16176,14 @@ func (x *IDRange) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("max")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1597 := z.EncBinary() - _ = yym1597 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeInt(int64(x.Max)) } } - if yyr1591 || yy2arr1591 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -18708,25 +16196,25 @@ func (x *IDRange) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1598 := z.DecBinary() - _ = yym1598 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1599 := r.ContainerType() - if yyct1599 == codecSelferValueTypeMap1234 { - yyl1599 := r.ReadMapStart() - if yyl1599 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1599, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1599 == codecSelferValueTypeArray1234 { - yyl1599 := r.ReadArrayStart() - if yyl1599 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1599, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -18738,12 +16226,12 @@ func (x *IDRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1600Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1600Slc - var yyhl1600 bool = l >= 0 - for yyj1600 := 0; ; yyj1600++ { - if yyhl1600 { - if yyj1600 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -18752,26 +16240,38 @@ func (x *IDRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1600Slc = r.DecodeBytes(yys1600Slc, true, true) - yys1600 := string(yys1600Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1600 { + switch yys3 { case "min": if r.TryDecodeAsNil() { x.Min = 0 } else { - x.Min = int64(r.DecodeInt(64)) + yyv4 := &x.Min + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } } case "max": if r.TryDecodeAsNil() { x.Max = 0 } else { - x.Max = int64(r.DecodeInt(64)) + yyv6 := &x.Max + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int64)(yyv6)) = int64(r.DecodeInt(64)) + } } default: - z.DecStructFieldNotFound(-1, yys1600) - } // end switch yys1600 - } // end for yyj1600 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -18779,16 +16279,16 @@ func (x *IDRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1603 int - var yyb1603 bool - var yyhl1603 bool = l >= 0 - yyj1603++ - if yyhl1603 { - yyb1603 = yyj1603 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1603 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1603 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -18796,15 +16296,21 @@ func (x *IDRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Min = 0 } else { - x.Min = int64(r.DecodeInt(64)) + yyv9 := &x.Min + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*int64)(yyv9)) = int64(r.DecodeInt(64)) + } } - yyj1603++ - if yyhl1603 { - yyb1603 = yyj1603 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1603 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1603 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -18812,20 +16318,26 @@ func (x *IDRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Max = 0 } else { - x.Max = int64(r.DecodeInt(64)) + yyv11 := &x.Max + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int64)(yyv11)) = int64(r.DecodeInt(64)) + } } for { - yyj1603++ - if yyhl1603 { - yyb1603 = yyj1603 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1603 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1603 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1603-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -18834,8 +16346,8 @@ func (x RunAsUserStrategy) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym1606 := z.EncBinary() - _ = yym1606 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -18847,8 +16359,8 @@ func (x *RunAsUserStrategy) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1607 := z.DecBinary() - _ = yym1607 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -18863,54 +16375,54 @@ func (x *FSGroupStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1608 := z.EncBinary() - _ = yym1608 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1609 := !z.EncBinary() - yy2arr1609 := z.EncBasicHandle().StructToArray - var yyq1609 [2]bool - _, _, _ = yysep1609, yyq1609, yy2arr1609 - const yyr1609 bool = false - yyq1609[0] = x.Rule != "" - yyq1609[1] = len(x.Ranges) != 0 - var yynn1609 int - if yyr1609 || yy2arr1609 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Rule != "" + yyq2[1] = len(x.Ranges) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1609 = 0 - for _, b := range yyq1609 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1609++ + yynn2++ } } - r.EncodeMapStart(yynn1609) - yynn1609 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1609 || yy2arr1609 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1609[0] { + if yyq2[0] { x.Rule.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1609[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("rule")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Rule.CodecEncodeSelf(e) } } - if yyr1609 || yy2arr1609 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1609[1] { + if yyq2[1] { if x.Ranges == nil { r.EncodeNil() } else { - yym1612 := z.EncBinary() - _ = yym1612 + yym7 := z.EncBinary() + _ = yym7 if false { } else { h.encSliceIDRange(([]IDRange)(x.Ranges), e) @@ -18920,15 +16432,15 @@ func (x *FSGroupStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1609[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("ranges")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Ranges == nil { r.EncodeNil() } else { - yym1613 := z.EncBinary() - _ = yym1613 + yym8 := z.EncBinary() + _ = yym8 if false { } else { h.encSliceIDRange(([]IDRange)(x.Ranges), e) @@ -18936,7 +16448,7 @@ func (x *FSGroupStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1609 || yy2arr1609 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -18949,25 +16461,25 @@ func (x *FSGroupStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1614 := z.DecBinary() - _ = yym1614 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1615 := r.ContainerType() - if yyct1615 == codecSelferValueTypeMap1234 { - yyl1615 := r.ReadMapStart() - if yyl1615 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1615, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1615 == codecSelferValueTypeArray1234 { - yyl1615 := r.ReadArrayStart() - if yyl1615 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1615, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -18979,12 +16491,12 @@ func (x *FSGroupStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Deco var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1616Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1616Slc - var yyhl1616 bool = l >= 0 - for yyj1616 := 0; ; yyj1616++ { - if yyhl1616 { - if yyj1616 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -18993,32 +16505,33 @@ func (x *FSGroupStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Deco } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1616Slc = r.DecodeBytes(yys1616Slc, true, true) - yys1616 := string(yys1616Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1616 { + switch yys3 { case "rule": if r.TryDecodeAsNil() { x.Rule = "" } else { - x.Rule = FSGroupStrategyType(r.DecodeString()) + yyv4 := &x.Rule + yyv4.CodecDecodeSelf(d) } case "ranges": if r.TryDecodeAsNil() { x.Ranges = nil } else { - yyv1618 := &x.Ranges - yym1619 := z.DecBinary() - _ = yym1619 + yyv5 := &x.Ranges + yym6 := z.DecBinary() + _ = yym6 if false { } else { - h.decSliceIDRange((*[]IDRange)(yyv1618), d) + h.decSliceIDRange((*[]IDRange)(yyv5), d) } } default: - z.DecStructFieldNotFound(-1, yys1616) - } // end switch yys1616 - } // end for yyj1616 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -19026,16 +16539,16 @@ func (x *FSGroupStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.De var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1620 int - var yyb1620 bool - var yyhl1620 bool = l >= 0 - yyj1620++ - if yyhl1620 { - yyb1620 = yyj1620 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1620 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1620 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -19043,15 +16556,16 @@ func (x *FSGroupStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.De if r.TryDecodeAsNil() { x.Rule = "" } else { - x.Rule = FSGroupStrategyType(r.DecodeString()) + yyv8 := &x.Rule + yyv8.CodecDecodeSelf(d) } - yyj1620++ - if yyhl1620 { - yyb1620 = yyj1620 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1620 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1620 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -19059,26 +16573,26 @@ func (x *FSGroupStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.De if r.TryDecodeAsNil() { x.Ranges = nil } else { - yyv1622 := &x.Ranges - yym1623 := z.DecBinary() - _ = yym1623 + yyv9 := &x.Ranges + yym10 := z.DecBinary() + _ = yym10 if false { } else { - h.decSliceIDRange((*[]IDRange)(yyv1622), d) + h.decSliceIDRange((*[]IDRange)(yyv9), d) } } for { - yyj1620++ - if yyhl1620 { - yyb1620 = yyj1620 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1620 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1620 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1620-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -19087,8 +16601,8 @@ func (x FSGroupStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym1624 := z.EncBinary() - _ = yym1624 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -19100,8 +16614,8 @@ func (x *FSGroupStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1625 := z.DecBinary() - _ = yym1625 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -19116,54 +16630,54 @@ func (x *SupplementalGroupsStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder if x == nil { r.EncodeNil() } else { - yym1626 := z.EncBinary() - _ = yym1626 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1627 := !z.EncBinary() - yy2arr1627 := z.EncBasicHandle().StructToArray - var yyq1627 [2]bool - _, _, _ = yysep1627, yyq1627, yy2arr1627 - const yyr1627 bool = false - yyq1627[0] = x.Rule != "" - yyq1627[1] = len(x.Ranges) != 0 - var yynn1627 int - if yyr1627 || yy2arr1627 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Rule != "" + yyq2[1] = len(x.Ranges) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1627 = 0 - for _, b := range yyq1627 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1627++ + yynn2++ } } - r.EncodeMapStart(yynn1627) - yynn1627 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1627 || yy2arr1627 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1627[0] { + if yyq2[0] { x.Rule.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1627[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("rule")) z.EncSendContainerState(codecSelfer_containerMapValue1234) x.Rule.CodecEncodeSelf(e) } } - if yyr1627 || yy2arr1627 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1627[1] { + if yyq2[1] { if x.Ranges == nil { r.EncodeNil() } else { - yym1630 := z.EncBinary() - _ = yym1630 + yym7 := z.EncBinary() + _ = yym7 if false { } else { h.encSliceIDRange(([]IDRange)(x.Ranges), e) @@ -19173,15 +16687,15 @@ func (x *SupplementalGroupsStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder r.EncodeNil() } } else { - if yyq1627[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("ranges")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Ranges == nil { r.EncodeNil() } else { - yym1631 := z.EncBinary() - _ = yym1631 + yym8 := z.EncBinary() + _ = yym8 if false { } else { h.encSliceIDRange(([]IDRange)(x.Ranges), e) @@ -19189,7 +16703,7 @@ func (x *SupplementalGroupsStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder } } } - if yyr1627 || yy2arr1627 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -19202,25 +16716,25 @@ func (x *SupplementalGroupsStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1632 := z.DecBinary() - _ = yym1632 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1633 := r.ContainerType() - if yyct1633 == codecSelferValueTypeMap1234 { - yyl1633 := r.ReadMapStart() - if yyl1633 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1633, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1633 == codecSelferValueTypeArray1234 { - yyl1633 := r.ReadArrayStart() - if yyl1633 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1633, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -19232,12 +16746,12 @@ func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromMap(l int, d *cod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1634Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1634Slc - var yyhl1634 bool = l >= 0 - for yyj1634 := 0; ; yyj1634++ { - if yyhl1634 { - if yyj1634 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -19246,32 +16760,33 @@ func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromMap(l int, d *cod } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1634Slc = r.DecodeBytes(yys1634Slc, true, true) - yys1634 := string(yys1634Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1634 { + switch yys3 { case "rule": if r.TryDecodeAsNil() { x.Rule = "" } else { - x.Rule = SupplementalGroupsStrategyType(r.DecodeString()) + yyv4 := &x.Rule + yyv4.CodecDecodeSelf(d) } case "ranges": if r.TryDecodeAsNil() { x.Ranges = nil } else { - yyv1636 := &x.Ranges - yym1637 := z.DecBinary() - _ = yym1637 + yyv5 := &x.Ranges + yym6 := z.DecBinary() + _ = yym6 if false { } else { - h.decSliceIDRange((*[]IDRange)(yyv1636), d) + h.decSliceIDRange((*[]IDRange)(yyv5), d) } } default: - z.DecStructFieldNotFound(-1, yys1634) - } // end switch yys1634 - } // end for yyj1634 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -19279,16 +16794,16 @@ func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromArray(l int, d *c var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1638 int - var yyb1638 bool - var yyhl1638 bool = l >= 0 - yyj1638++ - if yyhl1638 { - yyb1638 = yyj1638 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1638 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1638 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -19296,15 +16811,16 @@ func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromArray(l int, d *c if r.TryDecodeAsNil() { x.Rule = "" } else { - x.Rule = SupplementalGroupsStrategyType(r.DecodeString()) + yyv8 := &x.Rule + yyv8.CodecDecodeSelf(d) } - yyj1638++ - if yyhl1638 { - yyb1638 = yyj1638 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1638 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1638 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -19312,26 +16828,26 @@ func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromArray(l int, d *c if r.TryDecodeAsNil() { x.Ranges = nil } else { - yyv1640 := &x.Ranges - yym1641 := z.DecBinary() - _ = yym1641 + yyv9 := &x.Ranges + yym10 := z.DecBinary() + _ = yym10 if false { } else { - h.decSliceIDRange((*[]IDRange)(yyv1640), d) + h.decSliceIDRange((*[]IDRange)(yyv9), d) } } for { - yyj1638++ - if yyhl1638 { - yyb1638 = yyj1638 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1638 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1638 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1638-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -19340,8 +16856,8 @@ func (x SupplementalGroupsStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - yym1642 := z.EncBinary() - _ = yym1642 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { @@ -19353,8 +16869,8 @@ func (x *SupplementalGroupsStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1643 := z.DecBinary() - _ = yym1643 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { @@ -19369,37 +16885,37 @@ func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1644 := z.EncBinary() - _ = yym1644 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1645 := !z.EncBinary() - yy2arr1645 := z.EncBasicHandle().StructToArray - var yyq1645 [4]bool - _, _, _ = yysep1645, yyq1645, yy2arr1645 - const yyr1645 bool = false - yyq1645[0] = x.Kind != "" - yyq1645[1] = x.APIVersion != "" - yyq1645[2] = true - var yynn1645 int - if yyr1645 || yy2arr1645 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn1645 = 1 - for _, b := range yyq1645 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1645++ + yynn2++ } } - r.EncodeMapStart(yynn1645) - yynn1645 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1645 || yy2arr1645 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1645[0] { - yym1647 := z.EncBinary() - _ = yym1647 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -19408,23 +16924,23 @@ func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1645[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1648 := z.EncBinary() - _ = yym1648 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr1645 || yy2arr1645 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1645[1] { - yym1650 := z.EncBinary() - _ = yym1650 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -19433,54 +16949,54 @@ func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1645[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1651 := z.EncBinary() - _ = yym1651 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr1645 || yy2arr1645 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1645[2] { - yy1653 := &x.ListMeta - yym1654 := z.EncBinary() - _ = yym1654 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy1653) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy1653) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq1645[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1655 := &x.ListMeta - yym1656 := z.EncBinary() - _ = yym1656 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy1655) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy1655) + z.EncFallback(yy12) } } } - if yyr1645 || yy2arr1645 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym1658 := z.EncBinary() - _ = yym1658 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) @@ -19493,15 +17009,15 @@ func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym1659 := z.EncBinary() - _ = yym1659 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) } } } - if yyr1645 || yy2arr1645 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -19514,25 +17030,25 @@ func (x *PodSecurityPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1660 := z.DecBinary() - _ = yym1660 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1661 := r.ContainerType() - if yyct1661 == codecSelferValueTypeMap1234 { - yyl1661 := r.ReadMapStart() - if yyl1661 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1661, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1661 == codecSelferValueTypeArray1234 { - yyl1661 := r.ReadArrayStart() - if yyl1661 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1661, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -19544,12 +17060,12 @@ func (x *PodSecurityPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1662Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1662Slc - var yyhl1662 bool = l >= 0 - for yyj1662 := 0; ; yyj1662++ { - if yyhl1662 { - if yyj1662 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -19558,51 +17074,63 @@ func (x *PodSecurityPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decod } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1662Slc = r.DecodeBytes(yys1662Slc, true, true) - yys1662 := string(yys1662Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1662 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv1665 := &x.ListMeta - yym1666 := z.DecBinary() - _ = yym1666 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv1665) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv1665, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv1667 := &x.Items - yym1668 := z.DecBinary() - _ = yym1668 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv1667), d) + h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys1662) - } // end switch yys1662 - } // end for yyj1662 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -19610,16 +17138,16 @@ func (x *PodSecurityPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1669 int - var yyb1669 bool - var yyhl1669 bool = l >= 0 - yyj1669++ - if yyhl1669 { - yyb1669 = yyj1669 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1669 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1669 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -19627,15 +17155,21 @@ func (x *PodSecurityPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj1669++ - if yyhl1669 { - yyb1669 = yyj1669 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1669 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1669 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -19643,38 +17177,44 @@ func (x *PodSecurityPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj1669++ - if yyhl1669 { - yyb1669 = yyj1669 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1669 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1669 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv1672 := &x.ListMeta - yym1673 := z.DecBinary() - _ = yym1673 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv1672) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv1672, false) + z.DecFallback(yyv17, false) } } - yyj1669++ - if yyhl1669 { - yyb1669 = yyj1669 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1669 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1669 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -19682,26 +17222,26 @@ func (x *PodSecurityPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.Items = nil } else { - yyv1674 := &x.Items - yym1675 := z.DecBinary() - _ = yym1675 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv1674), d) + h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv19), d) } } for { - yyj1669++ - if yyhl1669 { - yyb1669 = yyj1669 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1669 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1669 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1669-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -19713,38 +17253,38 @@ func (x *NetworkPolicy) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1676 := z.EncBinary() - _ = yym1676 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1677 := !z.EncBinary() - yy2arr1677 := z.EncBasicHandle().StructToArray - var yyq1677 [4]bool - _, _, _ = yysep1677, yyq1677, yy2arr1677 - const yyr1677 bool = false - yyq1677[0] = x.Kind != "" - yyq1677[1] = x.APIVersion != "" - yyq1677[2] = true - yyq1677[3] = true - var yynn1677 int - if yyr1677 || yy2arr1677 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn1677 = 0 - for _, b := range yyq1677 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1677++ + yynn2++ } } - r.EncodeMapStart(yynn1677) - yynn1677 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1677 || yy2arr1677 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1677[0] { - yym1679 := z.EncBinary() - _ = yym1679 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -19753,23 +17293,23 @@ func (x *NetworkPolicy) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1677[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1680 := z.EncBinary() - _ = yym1680 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr1677 || yy2arr1677 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1677[1] { - yym1682 := z.EncBinary() - _ = yym1682 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -19778,53 +17318,65 @@ func (x *NetworkPolicy) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1677[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1683 := z.EncBinary() - _ = yym1683 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr1677 || yy2arr1677 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1677[2] { - yy1685 := &x.ObjectMeta - yy1685.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq1677[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1686 := &x.ObjectMeta - yy1686.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr1677 || yy2arr1677 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1677[3] { - yy1688 := &x.Spec - yy1688.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq1677[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1689 := &x.Spec - yy1689.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr1677 || yy2arr1677 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -19837,25 +17389,25 @@ func (x *NetworkPolicy) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1690 := z.DecBinary() - _ = yym1690 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1691 := r.ContainerType() - if yyct1691 == codecSelferValueTypeMap1234 { - yyl1691 := r.ReadMapStart() - if yyl1691 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1691, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1691 == codecSelferValueTypeArray1234 { - yyl1691 := r.ReadArrayStart() - if yyl1691 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1691, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -19867,12 +17419,12 @@ func (x *NetworkPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1692Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1692Slc - var yyhl1692 bool = l >= 0 - for yyj1692 := 0; ; yyj1692++ { - if yyhl1692 { - if yyj1692 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -19881,40 +17433,58 @@ func (x *NetworkPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1692Slc = r.DecodeBytes(yys1692Slc, true, true) - yys1692 := string(yys1692Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1692 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv1695 := &x.ObjectMeta - yyv1695.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = NetworkPolicySpec{} } else { - yyv1696 := &x.Spec - yyv1696.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys1692) - } // end switch yys1692 - } // end for yyj1692 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -19922,16 +17492,16 @@ func (x *NetworkPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1697 int - var yyb1697 bool - var yyhl1697 bool = l >= 0 - yyj1697++ - if yyhl1697 { - yyb1697 = yyj1697 > l + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb1697 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb1697 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -19939,15 +17509,21 @@ func (x *NetworkPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } } - yyj1697++ - if yyhl1697 { - yyb1697 = yyj1697 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb1697 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb1697 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -19955,32 +17531,44 @@ func (x *NetworkPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } - yyj1697++ - if yyhl1697 { - yyb1697 = yyj1697 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb1697 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb1697 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv1700 := &x.ObjectMeta - yyv1700.CodecDecodeSelf(d) + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } } - yyj1697++ - if yyhl1697 { - yyb1697 = yyj1697 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb1697 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb1697 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -19988,21 +17576,21 @@ func (x *NetworkPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Spec = NetworkPolicySpec{} } else { - yyv1701 := &x.Spec - yyv1701.CodecDecodeSelf(d) + yyv18 := &x.Spec + yyv18.CodecDecodeSelf(d) } for { - yyj1697++ - if yyhl1697 { - yyb1697 = yyj1697 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb1697 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb1697 { + if yyb11 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1697-1, "") + z.DecStructFieldNotFound(yyj11-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -20014,61 +17602,61 @@ func (x *NetworkPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1702 := z.EncBinary() - _ = yym1702 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1703 := !z.EncBinary() - yy2arr1703 := z.EncBasicHandle().StructToArray - var yyq1703 [2]bool - _, _, _ = yysep1703, yyq1703, yy2arr1703 - const yyr1703 bool = false - yyq1703[1] = len(x.Ingress) != 0 - var yynn1703 int - if yyr1703 || yy2arr1703 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.Ingress) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1703 = 1 - for _, b := range yyq1703 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1703++ + yynn2++ } } - r.EncodeMapStart(yynn1703) - yynn1703 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1703 || yy2arr1703 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1705 := &x.PodSelector - yym1706 := z.EncBinary() - _ = yym1706 + yy4 := &x.PodSelector + yym5 := z.EncBinary() + _ = yym5 if false { - } else if z.HasExtensions() && z.EncExt(yy1705) { + } else if z.HasExtensions() && z.EncExt(yy4) { } else { - z.EncFallback(yy1705) + z.EncFallback(yy4) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("podSelector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1707 := &x.PodSelector - yym1708 := z.EncBinary() - _ = yym1708 + yy6 := &x.PodSelector + yym7 := z.EncBinary() + _ = yym7 if false { - } else if z.HasExtensions() && z.EncExt(yy1707) { + } else if z.HasExtensions() && z.EncExt(yy6) { } else { - z.EncFallback(yy1707) + z.EncFallback(yy6) } } - if yyr1703 || yy2arr1703 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1703[1] { + if yyq2[1] { if x.Ingress == nil { r.EncodeNil() } else { - yym1710 := z.EncBinary() - _ = yym1710 + yym9 := z.EncBinary() + _ = yym9 if false { } else { h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e) @@ -20078,15 +17666,15 @@ func (x *NetworkPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1703[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("ingress")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Ingress == nil { r.EncodeNil() } else { - yym1711 := z.EncBinary() - _ = yym1711 + yym10 := z.EncBinary() + _ = yym10 if false { } else { h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e) @@ -20094,7 +17682,7 @@ func (x *NetworkPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1703 || yy2arr1703 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -20107,25 +17695,25 @@ func (x *NetworkPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1712 := z.DecBinary() - _ = yym1712 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1713 := r.ContainerType() - if yyct1713 == codecSelferValueTypeMap1234 { - yyl1713 := r.ReadMapStart() - if yyl1713 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1713, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1713 == codecSelferValueTypeArray1234 { - yyl1713 := r.ReadArrayStart() - if yyl1713 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1713, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -20137,12 +17725,12 @@ func (x *NetworkPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1714Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1714Slc - var yyhl1714 bool = l >= 0 - for yyj1714 := 0; ; yyj1714++ { - if yyhl1714 { - if yyj1714 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -20151,39 +17739,39 @@ func (x *NetworkPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1714Slc = r.DecodeBytes(yys1714Slc, true, true) - yys1714 := string(yys1714Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1714 { + switch yys3 { case "podSelector": if r.TryDecodeAsNil() { - x.PodSelector = pkg1_unversioned.LabelSelector{} + x.PodSelector = pkg1_v1.LabelSelector{} } else { - yyv1715 := &x.PodSelector - yym1716 := z.DecBinary() - _ = yym1716 + yyv4 := &x.PodSelector + yym5 := z.DecBinary() + _ = yym5 if false { - } else if z.HasExtensions() && z.DecExt(yyv1715) { + } else if z.HasExtensions() && z.DecExt(yyv4) { } else { - z.DecFallback(yyv1715, false) + z.DecFallback(yyv4, false) } } case "ingress": if r.TryDecodeAsNil() { x.Ingress = nil } else { - yyv1717 := &x.Ingress - yym1718 := z.DecBinary() - _ = yym1718 + yyv6 := &x.Ingress + yym7 := z.DecBinary() + _ = yym7 if false { } else { - h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv1717), d) + h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv6), d) } } default: - z.DecStructFieldNotFound(-1, yys1714) - } // end switch yys1714 - } // end for yyj1714 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -20191,39 +17779,39 @@ func (x *NetworkPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1719 int - var yyb1719 bool - var yyhl1719 bool = l >= 0 - yyj1719++ - if yyhl1719 { - yyb1719 = yyj1719 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1719 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1719 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.PodSelector = pkg1_unversioned.LabelSelector{} + x.PodSelector = pkg1_v1.LabelSelector{} } else { - yyv1720 := &x.PodSelector - yym1721 := z.DecBinary() - _ = yym1721 + yyv9 := &x.PodSelector + yym10 := z.DecBinary() + _ = yym10 if false { - } else if z.HasExtensions() && z.DecExt(yyv1720) { + } else if z.HasExtensions() && z.DecExt(yyv9) { } else { - z.DecFallback(yyv1720, false) + z.DecFallback(yyv9, false) } } - yyj1719++ - if yyhl1719 { - yyb1719 = yyj1719 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1719 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1719 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -20231,26 +17819,26 @@ func (x *NetworkPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.Ingress = nil } else { - yyv1722 := &x.Ingress - yym1723 := z.DecBinary() - _ = yym1723 + yyv11 := &x.Ingress + yym12 := z.DecBinary() + _ = yym12 if false { } else { - h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv1722), d) + h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv11), d) } } for { - yyj1719++ - if yyhl1719 { - yyb1719 = yyj1719 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1719 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1719 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1719-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -20262,39 +17850,39 @@ func (x *NetworkPolicyIngressRule) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1724 := z.EncBinary() - _ = yym1724 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1725 := !z.EncBinary() - yy2arr1725 := z.EncBasicHandle().StructToArray - var yyq1725 [2]bool - _, _, _ = yysep1725, yyq1725, yy2arr1725 - const yyr1725 bool = false - yyq1725[0] = len(x.Ports) != 0 - yyq1725[1] = len(x.From) != 0 - var yynn1725 int - if yyr1725 || yy2arr1725 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Ports) != 0 + yyq2[1] = len(x.From) != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1725 = 0 - for _, b := range yyq1725 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1725++ + yynn2++ } } - r.EncodeMapStart(yynn1725) - yynn1725 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1725 || yy2arr1725 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1725[0] { + if yyq2[0] { if x.Ports == nil { r.EncodeNil() } else { - yym1727 := z.EncBinary() - _ = yym1727 + yym4 := z.EncBinary() + _ = yym4 if false { } else { h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e) @@ -20304,15 +17892,15 @@ func (x *NetworkPolicyIngressRule) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1725[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("ports")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Ports == nil { r.EncodeNil() } else { - yym1728 := z.EncBinary() - _ = yym1728 + yym5 := z.EncBinary() + _ = yym5 if false { } else { h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e) @@ -20320,14 +17908,14 @@ func (x *NetworkPolicyIngressRule) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1725 || yy2arr1725 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1725[1] { + if yyq2[1] { if x.From == nil { r.EncodeNil() } else { - yym1730 := z.EncBinary() - _ = yym1730 + yym7 := z.EncBinary() + _ = yym7 if false { } else { h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e) @@ -20337,15 +17925,15 @@ func (x *NetworkPolicyIngressRule) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1725[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("from")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.From == nil { r.EncodeNil() } else { - yym1731 := z.EncBinary() - _ = yym1731 + yym8 := z.EncBinary() + _ = yym8 if false { } else { h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e) @@ -20353,7 +17941,7 @@ func (x *NetworkPolicyIngressRule) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1725 || yy2arr1725 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -20366,25 +17954,25 @@ func (x *NetworkPolicyIngressRule) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1732 := z.DecBinary() - _ = yym1732 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1733 := r.ContainerType() - if yyct1733 == codecSelferValueTypeMap1234 { - yyl1733 := r.ReadMapStart() - if yyl1733 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1733, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1733 == codecSelferValueTypeArray1234 { - yyl1733 := r.ReadArrayStart() - if yyl1733 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1733, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -20396,12 +17984,12 @@ func (x *NetworkPolicyIngressRule) codecDecodeSelfFromMap(l int, d *codec1978.De var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1734Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1734Slc - var yyhl1734 bool = l >= 0 - for yyj1734 := 0; ; yyj1734++ { - if yyhl1734 { - if yyj1734 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -20410,38 +17998,38 @@ func (x *NetworkPolicyIngressRule) codecDecodeSelfFromMap(l int, d *codec1978.De } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1734Slc = r.DecodeBytes(yys1734Slc, true, true) - yys1734 := string(yys1734Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1734 { + switch yys3 { case "ports": if r.TryDecodeAsNil() { x.Ports = nil } else { - yyv1735 := &x.Ports - yym1736 := z.DecBinary() - _ = yym1736 + yyv4 := &x.Ports + yym5 := z.DecBinary() + _ = yym5 if false { } else { - h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv1735), d) + h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv4), d) } } case "from": if r.TryDecodeAsNil() { x.From = nil } else { - yyv1737 := &x.From - yym1738 := z.DecBinary() - _ = yym1738 + yyv6 := &x.From + yym7 := z.DecBinary() + _ = yym7 if false { } else { - h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv1737), d) + h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv6), d) } } default: - z.DecStructFieldNotFound(-1, yys1734) - } // end switch yys1734 - } // end for yyj1734 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -20449,16 +18037,16 @@ func (x *NetworkPolicyIngressRule) codecDecodeSelfFromArray(l int, d *codec1978. var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1739 int - var yyb1739 bool - var yyhl1739 bool = l >= 0 - yyj1739++ - if yyhl1739 { - yyb1739 = yyj1739 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1739 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1739 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -20466,21 +18054,21 @@ func (x *NetworkPolicyIngressRule) codecDecodeSelfFromArray(l int, d *codec1978. if r.TryDecodeAsNil() { x.Ports = nil } else { - yyv1740 := &x.Ports - yym1741 := z.DecBinary() - _ = yym1741 + yyv9 := &x.Ports + yym10 := z.DecBinary() + _ = yym10 if false { } else { - h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv1740), d) + h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv9), d) } } - yyj1739++ - if yyhl1739 { - yyb1739 = yyj1739 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1739 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1739 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -20488,26 +18076,26 @@ func (x *NetworkPolicyIngressRule) codecDecodeSelfFromArray(l int, d *codec1978. if r.TryDecodeAsNil() { x.From = nil } else { - yyv1742 := &x.From - yym1743 := z.DecBinary() - _ = yym1743 + yyv11 := &x.From + yym12 := z.DecBinary() + _ = yym12 if false { } else { - h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv1742), d) + h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv11), d) } } for { - yyj1739++ - if yyhl1739 { - yyb1739 = yyj1739 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1739 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1739 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1739-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -20519,79 +18107,69 @@ func (x *NetworkPolicyPort) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1744 := z.EncBinary() - _ = yym1744 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1745 := !z.EncBinary() - yy2arr1745 := z.EncBasicHandle().StructToArray - var yyq1745 [2]bool - _, _, _ = yysep1745, yyq1745, yy2arr1745 - const yyr1745 bool = false - yyq1745[0] = x.Protocol != nil - yyq1745[1] = x.Port != nil - var yynn1745 int - if yyr1745 || yy2arr1745 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Protocol != nil + yyq2[1] = x.Port != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1745 = 0 - for _, b := range yyq1745 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1745++ + yynn2++ } } - r.EncodeMapStart(yynn1745) - yynn1745 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1745 || yy2arr1745 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1745[0] { + if yyq2[0] { if x.Protocol == nil { r.EncodeNil() } else { - yy1747 := *x.Protocol - yym1748 := z.EncBinary() - _ = yym1748 - if false { - } else if z.HasExtensions() && z.EncExt(yy1747) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy1747)) - } + yy4 := *x.Protocol + yysf5 := &yy4 + yysf5.CodecEncodeSelf(e) } } else { r.EncodeNil() } } else { - if yyq1745[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("protocol")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Protocol == nil { r.EncodeNil() } else { - yy1749 := *x.Protocol - yym1750 := z.EncBinary() - _ = yym1750 - if false { - } else if z.HasExtensions() && z.EncExt(yy1749) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy1749)) - } + yy6 := *x.Protocol + yysf7 := &yy6 + yysf7.CodecEncodeSelf(e) } } } - if yyr1745 || yy2arr1745 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1745[1] { + if yyq2[1] { if x.Port == nil { r.EncodeNil() } else { - yym1752 := z.EncBinary() - _ = yym1752 + yym9 := z.EncBinary() + _ = yym9 if false { } else if z.HasExtensions() && z.EncExt(x.Port) { - } else if !yym1752 && z.IsJSONHandle() { + } else if !yym9 && z.IsJSONHandle() { z.EncJSONMarshal(x.Port) } else { z.EncFallback(x.Port) @@ -20601,18 +18179,18 @@ func (x *NetworkPolicyPort) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1745[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("port")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Port == nil { r.EncodeNil() } else { - yym1753 := z.EncBinary() - _ = yym1753 + yym10 := z.EncBinary() + _ = yym10 if false { } else if z.HasExtensions() && z.EncExt(x.Port) { - } else if !yym1753 && z.IsJSONHandle() { + } else if !yym10 && z.IsJSONHandle() { z.EncJSONMarshal(x.Port) } else { z.EncFallback(x.Port) @@ -20620,7 +18198,7 @@ func (x *NetworkPolicyPort) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1745 || yy2arr1745 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -20633,25 +18211,25 @@ func (x *NetworkPolicyPort) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1754 := z.DecBinary() - _ = yym1754 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1755 := r.ContainerType() - if yyct1755 == codecSelferValueTypeMap1234 { - yyl1755 := r.ReadMapStart() - if yyl1755 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1755, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1755 == codecSelferValueTypeArray1234 { - yyl1755 := r.ReadArrayStart() - if yyl1755 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1755, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -20663,12 +18241,12 @@ func (x *NetworkPolicyPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1756Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1756Slc - var yyhl1756 bool = l >= 0 - for yyj1756 := 0; ; yyj1756++ { - if yyhl1756 { - if yyj1756 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -20677,10 +18255,10 @@ func (x *NetworkPolicyPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1756Slc = r.DecodeBytes(yys1756Slc, true, true) - yys1756 := string(yys1756Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1756 { + switch yys3 { case "protocol": if r.TryDecodeAsNil() { if x.Protocol != nil { @@ -20688,7 +18266,7 @@ func (x *NetworkPolicyPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } else { if x.Protocol == nil { - x.Protocol = new(pkg2_v1.Protocol) + x.Protocol = new(pkg4_v1.Protocol) } x.Protocol.CodecDecodeSelf(d) } @@ -20701,20 +18279,20 @@ func (x *NetworkPolicyPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) if x.Port == nil { x.Port = new(pkg5_intstr.IntOrString) } - yym1759 := z.DecBinary() - _ = yym1759 + yym6 := z.DecBinary() + _ = yym6 if false { } else if z.HasExtensions() && z.DecExt(x.Port) { - } else if !yym1759 && z.IsJSONHandle() { + } else if !yym6 && z.IsJSONHandle() { z.DecJSONUnmarshal(x.Port) } else { z.DecFallback(x.Port, false) } } default: - z.DecStructFieldNotFound(-1, yys1756) - } // end switch yys1756 - } // end for yyj1756 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -20722,16 +18300,16 @@ func (x *NetworkPolicyPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1760 int - var yyb1760 bool - var yyhl1760 bool = l >= 0 - yyj1760++ - if yyhl1760 { - yyb1760 = yyj1760 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1760 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1760 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -20742,17 +18320,17 @@ func (x *NetworkPolicyPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder } } else { if x.Protocol == nil { - x.Protocol = new(pkg2_v1.Protocol) + x.Protocol = new(pkg4_v1.Protocol) } x.Protocol.CodecDecodeSelf(d) } - yyj1760++ - if yyhl1760 { - yyb1760 = yyj1760 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1760 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1760 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -20765,28 +18343,28 @@ func (x *NetworkPolicyPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if x.Port == nil { x.Port = new(pkg5_intstr.IntOrString) } - yym1763 := z.DecBinary() - _ = yym1763 + yym10 := z.DecBinary() + _ = yym10 if false { } else if z.HasExtensions() && z.DecExt(x.Port) { - } else if !yym1763 && z.IsJSONHandle() { + } else if !yym10 && z.IsJSONHandle() { z.DecJSONUnmarshal(x.Port) } else { z.DecFallback(x.Port, false) } } for { - yyj1760++ - if yyhl1760 { - yyb1760 = yyj1760 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb1760 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb1760 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1760-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -20798,39 +18376,39 @@ func (x *NetworkPolicyPeer) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1764 := z.EncBinary() - _ = yym1764 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1765 := !z.EncBinary() - yy2arr1765 := z.EncBasicHandle().StructToArray - var yyq1765 [2]bool - _, _, _ = yysep1765, yyq1765, yy2arr1765 - const yyr1765 bool = false - yyq1765[0] = x.PodSelector != nil - yyq1765[1] = x.NamespaceSelector != nil - var yynn1765 int - if yyr1765 || yy2arr1765 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.PodSelector != nil + yyq2[1] = x.NamespaceSelector != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn1765 = 0 - for _, b := range yyq1765 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn1765++ + yynn2++ } } - r.EncodeMapStart(yynn1765) - yynn1765 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1765 || yy2arr1765 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1765[0] { + if yyq2[0] { if x.PodSelector == nil { r.EncodeNil() } else { - yym1767 := z.EncBinary() - _ = yym1767 + yym4 := z.EncBinary() + _ = yym4 if false { } else if z.HasExtensions() && z.EncExt(x.PodSelector) { } else { @@ -20841,15 +18419,15 @@ func (x *NetworkPolicyPeer) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1765[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("podSelector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.PodSelector == nil { r.EncodeNil() } else { - yym1768 := z.EncBinary() - _ = yym1768 + yym5 := z.EncBinary() + _ = yym5 if false { } else if z.HasExtensions() && z.EncExt(x.PodSelector) { } else { @@ -20858,14 +18436,14 @@ func (x *NetworkPolicyPeer) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1765 || yy2arr1765 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1765[1] { + if yyq2[1] { if x.NamespaceSelector == nil { r.EncodeNil() } else { - yym1770 := z.EncBinary() - _ = yym1770 + yym7 := z.EncBinary() + _ = yym7 if false { } else if z.HasExtensions() && z.EncExt(x.NamespaceSelector) { } else { @@ -20876,15 +18454,15 @@ func (x *NetworkPolicyPeer) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq1765[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("namespaceSelector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.NamespaceSelector == nil { r.EncodeNil() } else { - yym1771 := z.EncBinary() - _ = yym1771 + yym8 := z.EncBinary() + _ = yym8 if false { } else if z.HasExtensions() && z.EncExt(x.NamespaceSelector) { } else { @@ -20893,7 +18471,7 @@ func (x *NetworkPolicyPeer) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr1765 || yy2arr1765 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -20906,25 +18484,25 @@ func (x *NetworkPolicyPeer) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1772 := z.DecBinary() - _ = yym1772 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1773 := r.ContainerType() - if yyct1773 == codecSelferValueTypeMap1234 { - yyl1773 := r.ReadMapStart() - if yyl1773 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1773, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1773 == codecSelferValueTypeArray1234 { - yyl1773 := r.ReadArrayStart() - if yyl1773 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1773, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -20936,12 +18514,12 @@ func (x *NetworkPolicyPeer) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1774Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1774Slc - var yyhl1774 bool = l >= 0 - for yyj1774 := 0; ; yyj1774++ { - if yyhl1774 { - if yyj1774 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -20950,10 +18528,10 @@ func (x *NetworkPolicyPeer) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1774Slc = r.DecodeBytes(yys1774Slc, true, true) - yys1774 := string(yys1774Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1774 { + switch yys3 { case "podSelector": if r.TryDecodeAsNil() { if x.PodSelector != nil { @@ -20961,10 +18539,10 @@ func (x *NetworkPolicyPeer) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } else { if x.PodSelector == nil { - x.PodSelector = new(pkg1_unversioned.LabelSelector) + x.PodSelector = new(pkg1_v1.LabelSelector) } - yym1776 := z.DecBinary() - _ = yym1776 + yym5 := z.DecBinary() + _ = yym5 if false { } else if z.HasExtensions() && z.DecExt(x.PodSelector) { } else { @@ -20978,10 +18556,10 @@ func (x *NetworkPolicyPeer) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } else { if x.NamespaceSelector == nil { - x.NamespaceSelector = new(pkg1_unversioned.LabelSelector) + x.NamespaceSelector = new(pkg1_v1.LabelSelector) } - yym1778 := z.DecBinary() - _ = yym1778 + yym7 := z.DecBinary() + _ = yym7 if false { } else if z.HasExtensions() && z.DecExt(x.NamespaceSelector) { } else { @@ -20989,9 +18567,9 @@ func (x *NetworkPolicyPeer) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } default: - z.DecStructFieldNotFound(-1, yys1774) - } // end switch yys1774 - } // end for yyj1774 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -20999,16 +18577,16 @@ func (x *NetworkPolicyPeer) codecDecodeSelfFromArray(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1779 int - var yyb1779 bool - var yyhl1779 bool = l >= 0 - yyj1779++ - if yyhl1779 { - yyb1779 = yyj1779 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1779 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1779 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21019,23 +18597,23 @@ func (x *NetworkPolicyPeer) codecDecodeSelfFromArray(l int, d *codec1978.Decoder } } else { if x.PodSelector == nil { - x.PodSelector = new(pkg1_unversioned.LabelSelector) + x.PodSelector = new(pkg1_v1.LabelSelector) } - yym1781 := z.DecBinary() - _ = yym1781 + yym10 := z.DecBinary() + _ = yym10 if false { } else if z.HasExtensions() && z.DecExt(x.PodSelector) { } else { z.DecFallback(x.PodSelector, false) } } - yyj1779++ - if yyhl1779 { - yyb1779 = yyj1779 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1779 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1779 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21046,10 +18624,10 @@ func (x *NetworkPolicyPeer) codecDecodeSelfFromArray(l int, d *codec1978.Decoder } } else { if x.NamespaceSelector == nil { - x.NamespaceSelector = new(pkg1_unversioned.LabelSelector) + x.NamespaceSelector = new(pkg1_v1.LabelSelector) } - yym1783 := z.DecBinary() - _ = yym1783 + yym12 := z.DecBinary() + _ = yym12 if false { } else if z.HasExtensions() && z.DecExt(x.NamespaceSelector) { } else { @@ -21057,17 +18635,17 @@ func (x *NetworkPolicyPeer) codecDecodeSelfFromArray(l int, d *codec1978.Decoder } } for { - yyj1779++ - if yyhl1779 { - yyb1779 = yyj1779 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb1779 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb1779 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1779-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -21079,37 +18657,37 @@ func (x *NetworkPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1784 := z.EncBinary() - _ = yym1784 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1785 := !z.EncBinary() - yy2arr1785 := z.EncBasicHandle().StructToArray - var yyq1785 [4]bool - _, _, _ = yysep1785, yyq1785, yy2arr1785 - const yyr1785 bool = false - yyq1785[0] = x.Kind != "" - yyq1785[1] = x.APIVersion != "" - yyq1785[2] = true - var yynn1785 int - if yyr1785 || yy2arr1785 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn1785 = 1 - for _, b := range yyq1785 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn1785++ + yynn2++ } } - r.EncodeMapStart(yynn1785) - yynn1785 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr1785 || yy2arr1785 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1785[0] { - yym1787 := z.EncBinary() - _ = yym1787 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -21118,23 +18696,23 @@ func (x *NetworkPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1785[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1788 := z.EncBinary() - _ = yym1788 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr1785 || yy2arr1785 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1785[1] { - yym1790 := z.EncBinary() - _ = yym1790 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -21143,54 +18721,54 @@ func (x *NetworkPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq1785[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1791 := z.EncBinary() - _ = yym1791 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr1785 || yy2arr1785 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1785[2] { - yy1793 := &x.ListMeta - yym1794 := z.EncBinary() - _ = yym1794 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy1793) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy1793) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq1785[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1795 := &x.ListMeta - yym1796 := z.EncBinary() - _ = yym1796 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy1795) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy1795) + z.EncFallback(yy12) } } } - if yyr1785 || yy2arr1785 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym1798 := z.EncBinary() - _ = yym1798 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e) @@ -21203,15 +18781,15 @@ func (x *NetworkPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym1799 := z.EncBinary() - _ = yym1799 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e) } } } - if yyr1785 || yy2arr1785 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -21224,25 +18802,25 @@ func (x *NetworkPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1800 := z.DecBinary() - _ = yym1800 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1801 := r.ContainerType() - if yyct1801 == codecSelferValueTypeMap1234 { - yyl1801 := r.ReadMapStart() - if yyl1801 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1801, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct1801 == codecSelferValueTypeArray1234 { - yyl1801 := r.ReadArrayStart() - if yyl1801 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1801, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -21254,12 +18832,12 @@ func (x *NetworkPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1802Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1802Slc - var yyhl1802 bool = l >= 0 - for yyj1802 := 0; ; yyj1802++ { - if yyhl1802 { - if yyj1802 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -21268,51 +18846,63 @@ func (x *NetworkPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1802Slc = r.DecodeBytes(yys1802Slc, true, true) - yys1802 := string(yys1802Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1802 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv1805 := &x.ListMeta - yym1806 := z.DecBinary() - _ = yym1806 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv1805) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv1805, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv1807 := &x.Items - yym1808 := z.DecBinary() - _ = yym1808 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv1807), d) + h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys1802) - } // end switch yys1802 - } // end for yyj1802 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -21320,16 +18910,16 @@ func (x *NetworkPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1809 int - var yyb1809 bool - var yyhl1809 bool = l >= 0 - yyj1809++ - if yyhl1809 { - yyb1809 = yyj1809 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1809 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1809 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21337,15 +18927,21 @@ func (x *NetworkPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj1809++ - if yyhl1809 { - yyb1809 = yyj1809 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1809 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1809 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21353,38 +18949,44 @@ func (x *NetworkPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj1809++ - if yyhl1809 { - yyb1809 = yyj1809 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1809 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1809 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv1812 := &x.ListMeta - yym1813 := z.DecBinary() - _ = yym1813 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv1812) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv1812, false) + z.DecFallback(yyv17, false) } } - yyj1809++ - if yyhl1809 { - yyb1809 = yyj1809 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1809 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1809 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -21392,26 +18994,26 @@ func (x *NetworkPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.Items = nil } else { - yyv1814 := &x.Items - yym1815 := z.DecBinary() - _ = yym1815 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv1814), d) + h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv19), d) } } for { - yyj1809++ - if yyhl1809 { - yyb1809 = yyj1809 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb1809 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb1809 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1809-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -21421,10 +19023,10 @@ func (x codecSelfer1234) encSliceCustomMetricTarget(v []CustomMetricTarget, e *c z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1816 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1817 := &yyv1816 - yy1817.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -21434,83 +19036,86 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1818 := *v - yyh1818, yyl1818 := z.DecSliceHelperStart() - var yyc1818 bool - if yyl1818 == 0 { - if yyv1818 == nil { - yyv1818 = []CustomMetricTarget{} - yyc1818 = true - } else if len(yyv1818) != 0 { - yyv1818 = yyv1818[:0] - yyc1818 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []CustomMetricTarget{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1818 > 0 { - var yyrr1818, yyrl1818 int - var yyrt1818 bool - if yyl1818 > cap(yyv1818) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1818 := len(yyv1818) > 0 - yyv21818 := yyv1818 - yyrl1818, yyrt1818 = z.DecInferLen(yyl1818, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1818 { - if yyrl1818 <= cap(yyv1818) { - yyv1818 = yyv1818[:yyrl1818] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1818 = make([]CustomMetricTarget, yyrl1818) + yyv1 = make([]CustomMetricTarget, yyrl1) } } else { - yyv1818 = make([]CustomMetricTarget, yyrl1818) + yyv1 = make([]CustomMetricTarget, yyrl1) } - yyc1818 = true - yyrr1818 = len(yyv1818) - if yyrg1818 { - copy(yyv1818, yyv21818) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1818 != len(yyv1818) { - yyv1818 = yyv1818[:yyl1818] - yyc1818 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1818 := 0 - for ; yyj1818 < yyrr1818; yyj1818++ { - yyh1818.ElemContainerState(yyj1818) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1818[yyj1818] = CustomMetricTarget{} + yyv1[yyj1] = CustomMetricTarget{} } else { - yyv1819 := &yyv1818[yyj1818] - yyv1819.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1818 { - for ; yyj1818 < yyl1818; yyj1818++ { - yyv1818 = append(yyv1818, CustomMetricTarget{}) - yyh1818.ElemContainerState(yyj1818) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, CustomMetricTarget{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1818[yyj1818] = CustomMetricTarget{} + yyv1[yyj1] = CustomMetricTarget{} } else { - yyv1820 := &yyv1818[yyj1818] - yyv1820.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1818 := 0 - for ; !r.CheckBreak(); yyj1818++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1818 >= len(yyv1818) { - yyv1818 = append(yyv1818, CustomMetricTarget{}) // var yyz1818 CustomMetricTarget - yyc1818 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, CustomMetricTarget{}) // var yyz1 CustomMetricTarget + yyc1 = true } - yyh1818.ElemContainerState(yyj1818) - if yyj1818 < len(yyv1818) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1818[yyj1818] = CustomMetricTarget{} + yyv1[yyj1] = CustomMetricTarget{} } else { - yyv1821 := &yyv1818[yyj1818] - yyv1821.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -21518,17 +19123,17 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * } } - if yyj1818 < len(yyv1818) { - yyv1818 = yyv1818[:yyj1818] - yyc1818 = true - } else if yyj1818 == 0 && yyv1818 == nil { - yyv1818 = []CustomMetricTarget{} - yyc1818 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []CustomMetricTarget{} + yyc1 = true } } - yyh1818.End() - if yyc1818 { - *v = yyv1818 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -21537,10 +19142,10 @@ func (x codecSelfer1234) encSliceCustomMetricCurrentStatus(v []CustomMetricCurre z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1822 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1823 := &yyv1822 - yy1823.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -21550,199 +19155,86 @@ func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurr z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1824 := *v - yyh1824, yyl1824 := z.DecSliceHelperStart() - var yyc1824 bool - if yyl1824 == 0 { - if yyv1824 == nil { - yyv1824 = []CustomMetricCurrentStatus{} - yyc1824 = true - } else if len(yyv1824) != 0 { - yyv1824 = yyv1824[:0] - yyc1824 = true - } - } else if yyl1824 > 0 { - var yyrr1824, yyrl1824 int - var yyrt1824 bool - if yyl1824 > cap(yyv1824) { - - yyrg1824 := len(yyv1824) > 0 - yyv21824 := yyv1824 - yyrl1824, yyrt1824 = z.DecInferLen(yyl1824, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1824 { - if yyrl1824 <= cap(yyv1824) { - yyv1824 = yyv1824[:yyrl1824] - } else { - yyv1824 = make([]CustomMetricCurrentStatus, yyrl1824) - } - } else { - yyv1824 = make([]CustomMetricCurrentStatus, yyrl1824) - } - yyc1824 = true - yyrr1824 = len(yyv1824) - if yyrg1824 { - copy(yyv1824, yyv21824) - } - } else if yyl1824 != len(yyv1824) { - yyv1824 = yyv1824[:yyl1824] - yyc1824 = true - } - yyj1824 := 0 - for ; yyj1824 < yyrr1824; yyj1824++ { - yyh1824.ElemContainerState(yyj1824) - if r.TryDecodeAsNil() { - yyv1824[yyj1824] = CustomMetricCurrentStatus{} - } else { - yyv1825 := &yyv1824[yyj1824] - yyv1825.CodecDecodeSelf(d) - } - - } - if yyrt1824 { - for ; yyj1824 < yyl1824; yyj1824++ { - yyv1824 = append(yyv1824, CustomMetricCurrentStatus{}) - yyh1824.ElemContainerState(yyj1824) - if r.TryDecodeAsNil() { - yyv1824[yyj1824] = CustomMetricCurrentStatus{} - } else { - yyv1826 := &yyv1824[yyj1824] - yyv1826.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1824 := 0 - for ; !r.CheckBreak(); yyj1824++ { - - if yyj1824 >= len(yyv1824) { - yyv1824 = append(yyv1824, CustomMetricCurrentStatus{}) // var yyz1824 CustomMetricCurrentStatus - yyc1824 = true - } - yyh1824.ElemContainerState(yyj1824) - if yyj1824 < len(yyv1824) { - if r.TryDecodeAsNil() { - yyv1824[yyj1824] = CustomMetricCurrentStatus{} - } else { - yyv1827 := &yyv1824[yyj1824] - yyv1827.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1824 < len(yyv1824) { - yyv1824 = yyv1824[:yyj1824] - yyc1824 = true - } else if yyj1824 == 0 && yyv1824 == nil { - yyv1824 = []CustomMetricCurrentStatus{} - yyc1824 = true - } - } - yyh1824.End() - if yyc1824 { - *v = yyv1824 - } -} - -func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1828 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1829 := &yyv1828 - yy1829.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1830 := *v - yyh1830, yyl1830 := z.DecSliceHelperStart() - var yyc1830 bool - if yyl1830 == 0 { - if yyv1830 == nil { - yyv1830 = []HorizontalPodAutoscaler{} - yyc1830 = true - } else if len(yyv1830) != 0 { - yyv1830 = yyv1830[:0] - yyc1830 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []CustomMetricCurrentStatus{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1830 > 0 { - var yyrr1830, yyrl1830 int - var yyrt1830 bool - if yyl1830 > cap(yyv1830) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1830 := len(yyv1830) > 0 - yyv21830 := yyv1830 - yyrl1830, yyrt1830 = z.DecInferLen(yyl1830, z.DecBasicHandle().MaxInitLen, 376) - if yyrt1830 { - if yyrl1830 <= cap(yyv1830) { - yyv1830 = yyv1830[:yyrl1830] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1830 = make([]HorizontalPodAutoscaler, yyrl1830) + yyv1 = make([]CustomMetricCurrentStatus, yyrl1) } } else { - yyv1830 = make([]HorizontalPodAutoscaler, yyrl1830) + yyv1 = make([]CustomMetricCurrentStatus, yyrl1) } - yyc1830 = true - yyrr1830 = len(yyv1830) - if yyrg1830 { - copy(yyv1830, yyv21830) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1830 != len(yyv1830) { - yyv1830 = yyv1830[:yyl1830] - yyc1830 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1830 := 0 - for ; yyj1830 < yyrr1830; yyj1830++ { - yyh1830.ElemContainerState(yyj1830) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1830[yyj1830] = HorizontalPodAutoscaler{} + yyv1[yyj1] = CustomMetricCurrentStatus{} } else { - yyv1831 := &yyv1830[yyj1830] - yyv1831.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1830 { - for ; yyj1830 < yyl1830; yyj1830++ { - yyv1830 = append(yyv1830, HorizontalPodAutoscaler{}) - yyh1830.ElemContainerState(yyj1830) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, CustomMetricCurrentStatus{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1830[yyj1830] = HorizontalPodAutoscaler{} + yyv1[yyj1] = CustomMetricCurrentStatus{} } else { - yyv1832 := &yyv1830[yyj1830] - yyv1832.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1830 := 0 - for ; !r.CheckBreak(); yyj1830++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1830 >= len(yyv1830) { - yyv1830 = append(yyv1830, HorizontalPodAutoscaler{}) // var yyz1830 HorizontalPodAutoscaler - yyc1830 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, CustomMetricCurrentStatus{}) // var yyz1 CustomMetricCurrentStatus + yyc1 = true } - yyh1830.ElemContainerState(yyj1830) - if yyj1830 < len(yyv1830) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1830[yyj1830] = HorizontalPodAutoscaler{} + yyv1[yyj1] = CustomMetricCurrentStatus{} } else { - yyv1833 := &yyv1830[yyj1830] - yyv1833.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -21750,17 +19242,17 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos } } - if yyj1830 < len(yyv1830) { - yyv1830 = yyv1830[:yyj1830] - yyc1830 = true - } else if yyj1830 == 0 && yyv1830 == nil { - yyv1830 = []HorizontalPodAutoscaler{} - yyc1830 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []CustomMetricCurrentStatus{} + yyc1 = true } } - yyh1830.End() - if yyc1830 { - *v = yyv1830 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -21769,10 +19261,10 @@ func (x codecSelfer1234) encSliceAPIVersion(v []APIVersion, e *codec1978.Encoder z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1834 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1835 := &yyv1834 - yy1835.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -21782,83 +19274,86 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1836 := *v - yyh1836, yyl1836 := z.DecSliceHelperStart() - var yyc1836 bool - if yyl1836 == 0 { - if yyv1836 == nil { - yyv1836 = []APIVersion{} - yyc1836 = true - } else if len(yyv1836) != 0 { - yyv1836 = yyv1836[:0] - yyc1836 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []APIVersion{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1836 > 0 { - var yyrr1836, yyrl1836 int - var yyrt1836 bool - if yyl1836 > cap(yyv1836) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1836 := len(yyv1836) > 0 - yyv21836 := yyv1836 - yyrl1836, yyrt1836 = z.DecInferLen(yyl1836, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1836 { - if yyrl1836 <= cap(yyv1836) { - yyv1836 = yyv1836[:yyrl1836] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1836 = make([]APIVersion, yyrl1836) + yyv1 = make([]APIVersion, yyrl1) } } else { - yyv1836 = make([]APIVersion, yyrl1836) + yyv1 = make([]APIVersion, yyrl1) } - yyc1836 = true - yyrr1836 = len(yyv1836) - if yyrg1836 { - copy(yyv1836, yyv21836) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1836 != len(yyv1836) { - yyv1836 = yyv1836[:yyl1836] - yyc1836 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1836 := 0 - for ; yyj1836 < yyrr1836; yyj1836++ { - yyh1836.ElemContainerState(yyj1836) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1836[yyj1836] = APIVersion{} + yyv1[yyj1] = APIVersion{} } else { - yyv1837 := &yyv1836[yyj1836] - yyv1837.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1836 { - for ; yyj1836 < yyl1836; yyj1836++ { - yyv1836 = append(yyv1836, APIVersion{}) - yyh1836.ElemContainerState(yyj1836) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, APIVersion{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1836[yyj1836] = APIVersion{} + yyv1[yyj1] = APIVersion{} } else { - yyv1838 := &yyv1836[yyj1836] - yyv1838.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1836 := 0 - for ; !r.CheckBreak(); yyj1836++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1836 >= len(yyv1836) { - yyv1836 = append(yyv1836, APIVersion{}) // var yyz1836 APIVersion - yyc1836 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, APIVersion{}) // var yyz1 APIVersion + yyc1 = true } - yyh1836.ElemContainerState(yyj1836) - if yyj1836 < len(yyv1836) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1836[yyj1836] = APIVersion{} + yyv1[yyj1] = APIVersion{} } else { - yyv1839 := &yyv1836[yyj1836] - yyv1839.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -21866,17 +19361,17 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode } } - if yyj1836 < len(yyv1836) { - yyv1836 = yyv1836[:yyj1836] - yyc1836 = true - } else if yyj1836 == 0 && yyv1836 == nil { - yyv1836 = []APIVersion{} - yyc1836 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []APIVersion{} + yyc1 = true } } - yyh1836.End() - if yyc1836 { - *v = yyv1836 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -21885,10 +19380,10 @@ func (x codecSelfer1234) encSliceThirdPartyResource(v []ThirdPartyResource, e *c z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1840 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1841 := &yyv1840 - yy1841.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -21898,83 +19393,86 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1842 := *v - yyh1842, yyl1842 := z.DecSliceHelperStart() - var yyc1842 bool - if yyl1842 == 0 { - if yyv1842 == nil { - yyv1842 = []ThirdPartyResource{} - yyc1842 = true - } else if len(yyv1842) != 0 { - yyv1842 = yyv1842[:0] - yyc1842 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ThirdPartyResource{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1842 > 0 { - var yyrr1842, yyrl1842 int - var yyrt1842 bool - if yyl1842 > cap(yyv1842) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1842 := len(yyv1842) > 0 - yyv21842 := yyv1842 - yyrl1842, yyrt1842 = z.DecInferLen(yyl1842, z.DecBasicHandle().MaxInitLen, 296) - if yyrt1842 { - if yyrl1842 <= cap(yyv1842) { - yyv1842 = yyv1842[:yyrl1842] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1842 = make([]ThirdPartyResource, yyrl1842) + yyv1 = make([]ThirdPartyResource, yyrl1) } } else { - yyv1842 = make([]ThirdPartyResource, yyrl1842) + yyv1 = make([]ThirdPartyResource, yyrl1) } - yyc1842 = true - yyrr1842 = len(yyv1842) - if yyrg1842 { - copy(yyv1842, yyv21842) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1842 != len(yyv1842) { - yyv1842 = yyv1842[:yyl1842] - yyc1842 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1842 := 0 - for ; yyj1842 < yyrr1842; yyj1842++ { - yyh1842.ElemContainerState(yyj1842) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1842[yyj1842] = ThirdPartyResource{} + yyv1[yyj1] = ThirdPartyResource{} } else { - yyv1843 := &yyv1842[yyj1842] - yyv1843.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1842 { - for ; yyj1842 < yyl1842; yyj1842++ { - yyv1842 = append(yyv1842, ThirdPartyResource{}) - yyh1842.ElemContainerState(yyj1842) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ThirdPartyResource{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1842[yyj1842] = ThirdPartyResource{} + yyv1[yyj1] = ThirdPartyResource{} } else { - yyv1844 := &yyv1842[yyj1842] - yyv1844.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1842 := 0 - for ; !r.CheckBreak(); yyj1842++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1842 >= len(yyv1842) { - yyv1842 = append(yyv1842, ThirdPartyResource{}) // var yyz1842 ThirdPartyResource - yyc1842 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ThirdPartyResource{}) // var yyz1 ThirdPartyResource + yyc1 = true } - yyh1842.ElemContainerState(yyj1842) - if yyj1842 < len(yyv1842) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1842[yyj1842] = ThirdPartyResource{} + yyv1[yyj1] = ThirdPartyResource{} } else { - yyv1845 := &yyv1842[yyj1842] - yyv1845.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -21982,17 +19480,17 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * } } - if yyj1842 < len(yyv1842) { - yyv1842 = yyv1842[:yyj1842] - yyc1842 = true - } else if yyj1842 == 0 && yyv1842 == nil { - yyv1842 = []ThirdPartyResource{} - yyc1842 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ThirdPartyResource{} + yyc1 = true } } - yyh1842.End() - if yyc1842 { - *v = yyv1842 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -22001,10 +19499,10 @@ func (x codecSelfer1234) encSliceDeploymentCondition(v []DeploymentCondition, e z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1846 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1847 := &yyv1846 - yy1847.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -22014,83 +19512,86 @@ func (x codecSelfer1234) decSliceDeploymentCondition(v *[]DeploymentCondition, d z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1848 := *v - yyh1848, yyl1848 := z.DecSliceHelperStart() - var yyc1848 bool - if yyl1848 == 0 { - if yyv1848 == nil { - yyv1848 = []DeploymentCondition{} - yyc1848 = true - } else if len(yyv1848) != 0 { - yyv1848 = yyv1848[:0] - yyc1848 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []DeploymentCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1848 > 0 { - var yyrr1848, yyrl1848 int - var yyrt1848 bool - if yyl1848 > cap(yyv1848) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1848 := len(yyv1848) > 0 - yyv21848 := yyv1848 - yyrl1848, yyrt1848 = z.DecInferLen(yyl1848, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1848 { - if yyrl1848 <= cap(yyv1848) { - yyv1848 = yyv1848[:yyrl1848] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1848 = make([]DeploymentCondition, yyrl1848) + yyv1 = make([]DeploymentCondition, yyrl1) } } else { - yyv1848 = make([]DeploymentCondition, yyrl1848) + yyv1 = make([]DeploymentCondition, yyrl1) } - yyc1848 = true - yyrr1848 = len(yyv1848) - if yyrg1848 { - copy(yyv1848, yyv21848) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1848 != len(yyv1848) { - yyv1848 = yyv1848[:yyl1848] - yyc1848 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1848 := 0 - for ; yyj1848 < yyrr1848; yyj1848++ { - yyh1848.ElemContainerState(yyj1848) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1848[yyj1848] = DeploymentCondition{} + yyv1[yyj1] = DeploymentCondition{} } else { - yyv1849 := &yyv1848[yyj1848] - yyv1849.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1848 { - for ; yyj1848 < yyl1848; yyj1848++ { - yyv1848 = append(yyv1848, DeploymentCondition{}) - yyh1848.ElemContainerState(yyj1848) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, DeploymentCondition{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1848[yyj1848] = DeploymentCondition{} + yyv1[yyj1] = DeploymentCondition{} } else { - yyv1850 := &yyv1848[yyj1848] - yyv1850.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1848 := 0 - for ; !r.CheckBreak(); yyj1848++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1848 >= len(yyv1848) { - yyv1848 = append(yyv1848, DeploymentCondition{}) // var yyz1848 DeploymentCondition - yyc1848 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, DeploymentCondition{}) // var yyz1 DeploymentCondition + yyc1 = true } - yyh1848.ElemContainerState(yyj1848) - if yyj1848 < len(yyv1848) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1848[yyj1848] = DeploymentCondition{} + yyv1[yyj1] = DeploymentCondition{} } else { - yyv1851 := &yyv1848[yyj1848] - yyv1851.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -22098,17 +19599,17 @@ func (x codecSelfer1234) decSliceDeploymentCondition(v *[]DeploymentCondition, d } } - if yyj1848 < len(yyv1848) { - yyv1848 = yyv1848[:yyj1848] - yyc1848 = true - } else if yyj1848 == 0 && yyv1848 == nil { - yyv1848 = []DeploymentCondition{} - yyc1848 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []DeploymentCondition{} + yyc1 = true } } - yyh1848.End() - if yyc1848 { - *v = yyv1848 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -22117,10 +19618,10 @@ func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1852 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1853 := &yyv1852 - yy1853.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -22130,83 +19631,86 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1854 := *v - yyh1854, yyl1854 := z.DecSliceHelperStart() - var yyc1854 bool - if yyl1854 == 0 { - if yyv1854 == nil { - yyv1854 = []Deployment{} - yyc1854 = true - } else if len(yyv1854) != 0 { - yyv1854 = yyv1854[:0] - yyc1854 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Deployment{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1854 > 0 { - var yyrr1854, yyrl1854 int - var yyrt1854 bool - if yyl1854 > cap(yyv1854) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1854 := len(yyv1854) > 0 - yyv21854 := yyv1854 - yyrl1854, yyrt1854 = z.DecInferLen(yyl1854, z.DecBasicHandle().MaxInitLen, 856) - if yyrt1854 { - if yyrl1854 <= cap(yyv1854) { - yyv1854 = yyv1854[:yyrl1854] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 920) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1854 = make([]Deployment, yyrl1854) + yyv1 = make([]Deployment, yyrl1) } } else { - yyv1854 = make([]Deployment, yyrl1854) + yyv1 = make([]Deployment, yyrl1) } - yyc1854 = true - yyrr1854 = len(yyv1854) - if yyrg1854 { - copy(yyv1854, yyv21854) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1854 != len(yyv1854) { - yyv1854 = yyv1854[:yyl1854] - yyc1854 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1854 := 0 - for ; yyj1854 < yyrr1854; yyj1854++ { - yyh1854.ElemContainerState(yyj1854) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1854[yyj1854] = Deployment{} + yyv1[yyj1] = Deployment{} } else { - yyv1855 := &yyv1854[yyj1854] - yyv1855.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1854 { - for ; yyj1854 < yyl1854; yyj1854++ { - yyv1854 = append(yyv1854, Deployment{}) - yyh1854.ElemContainerState(yyj1854) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Deployment{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1854[yyj1854] = Deployment{} + yyv1[yyj1] = Deployment{} } else { - yyv1856 := &yyv1854[yyj1854] - yyv1856.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1854 := 0 - for ; !r.CheckBreak(); yyj1854++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1854 >= len(yyv1854) { - yyv1854 = append(yyv1854, Deployment{}) // var yyz1854 Deployment - yyc1854 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Deployment{}) // var yyz1 Deployment + yyc1 = true } - yyh1854.ElemContainerState(yyj1854) - if yyj1854 < len(yyv1854) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1854[yyj1854] = Deployment{} + yyv1[yyj1] = Deployment{} } else { - yyv1857 := &yyv1854[yyj1854] - yyv1857.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -22214,17 +19718,17 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode } } - if yyj1854 < len(yyv1854) { - yyv1854 = yyv1854[:yyj1854] - yyc1854 = true - } else if yyj1854 == 0 && yyv1854 == nil { - yyv1854 = []Deployment{} - yyc1854 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Deployment{} + yyc1 = true } } - yyh1854.End() - if yyc1854 { - *v = yyv1854 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -22233,10 +19737,10 @@ func (x codecSelfer1234) encSliceDaemonSet(v []DaemonSet, e *codec1978.Encoder) z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1858 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1859 := &yyv1858 - yy1859.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -22246,83 +19750,86 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1860 := *v - yyh1860, yyl1860 := z.DecSliceHelperStart() - var yyc1860 bool - if yyl1860 == 0 { - if yyv1860 == nil { - yyv1860 = []DaemonSet{} - yyc1860 = true - } else if len(yyv1860) != 0 { - yyv1860 = yyv1860[:0] - yyc1860 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []DaemonSet{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1860 > 0 { - var yyrr1860, yyrl1860 int - var yyrt1860 bool - if yyl1860 > cap(yyv1860) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1860 := len(yyv1860) > 0 - yyv21860 := yyv1860 - yyrl1860, yyrt1860 = z.DecInferLen(yyl1860, z.DecBasicHandle().MaxInitLen, 752) - if yyrt1860 { - if yyrl1860 <= cap(yyv1860) { - yyv1860 = yyv1860[:yyrl1860] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 872) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1860 = make([]DaemonSet, yyrl1860) + yyv1 = make([]DaemonSet, yyrl1) } } else { - yyv1860 = make([]DaemonSet, yyrl1860) + yyv1 = make([]DaemonSet, yyrl1) } - yyc1860 = true - yyrr1860 = len(yyv1860) - if yyrg1860 { - copy(yyv1860, yyv21860) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1860 != len(yyv1860) { - yyv1860 = yyv1860[:yyl1860] - yyc1860 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1860 := 0 - for ; yyj1860 < yyrr1860; yyj1860++ { - yyh1860.ElemContainerState(yyj1860) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1860[yyj1860] = DaemonSet{} + yyv1[yyj1] = DaemonSet{} } else { - yyv1861 := &yyv1860[yyj1860] - yyv1861.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1860 { - for ; yyj1860 < yyl1860; yyj1860++ { - yyv1860 = append(yyv1860, DaemonSet{}) - yyh1860.ElemContainerState(yyj1860) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, DaemonSet{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1860[yyj1860] = DaemonSet{} + yyv1[yyj1] = DaemonSet{} } else { - yyv1862 := &yyv1860[yyj1860] - yyv1862.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1860 := 0 - for ; !r.CheckBreak(); yyj1860++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1860 >= len(yyv1860) { - yyv1860 = append(yyv1860, DaemonSet{}) // var yyz1860 DaemonSet - yyc1860 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, DaemonSet{}) // var yyz1 DaemonSet + yyc1 = true } - yyh1860.ElemContainerState(yyj1860) - if yyj1860 < len(yyv1860) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1860[yyj1860] = DaemonSet{} + yyv1[yyj1] = DaemonSet{} } else { - yyv1863 := &yyv1860[yyj1860] - yyv1863.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -22330,17 +19837,17 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) } } - if yyj1860 < len(yyv1860) { - yyv1860 = yyv1860[:yyj1860] - yyc1860 = true - } else if yyj1860 == 0 && yyv1860 == nil { - yyv1860 = []DaemonSet{} - yyc1860 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []DaemonSet{} + yyc1 = true } } - yyh1860.End() - if yyc1860 { - *v = yyv1860 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -22349,10 +19856,10 @@ func (x codecSelfer1234) encSliceThirdPartyResourceData(v []ThirdPartyResourceDa z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1864 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1865 := &yyv1864 - yy1865.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -22362,315 +19869,86 @@ func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceD z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1866 := *v - yyh1866, yyl1866 := z.DecSliceHelperStart() - var yyc1866 bool - if yyl1866 == 0 { - if yyv1866 == nil { - yyv1866 = []ThirdPartyResourceData{} - yyc1866 = true - } else if len(yyv1866) != 0 { - yyv1866 = yyv1866[:0] - yyc1866 = true - } - } else if yyl1866 > 0 { - var yyrr1866, yyrl1866 int - var yyrt1866 bool - if yyl1866 > cap(yyv1866) { - - yyrg1866 := len(yyv1866) > 0 - yyv21866 := yyv1866 - yyrl1866, yyrt1866 = z.DecInferLen(yyl1866, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1866 { - if yyrl1866 <= cap(yyv1866) { - yyv1866 = yyv1866[:yyrl1866] - } else { - yyv1866 = make([]ThirdPartyResourceData, yyrl1866) - } - } else { - yyv1866 = make([]ThirdPartyResourceData, yyrl1866) - } - yyc1866 = true - yyrr1866 = len(yyv1866) - if yyrg1866 { - copy(yyv1866, yyv21866) - } - } else if yyl1866 != len(yyv1866) { - yyv1866 = yyv1866[:yyl1866] - yyc1866 = true - } - yyj1866 := 0 - for ; yyj1866 < yyrr1866; yyj1866++ { - yyh1866.ElemContainerState(yyj1866) - if r.TryDecodeAsNil() { - yyv1866[yyj1866] = ThirdPartyResourceData{} - } else { - yyv1867 := &yyv1866[yyj1866] - yyv1867.CodecDecodeSelf(d) - } - - } - if yyrt1866 { - for ; yyj1866 < yyl1866; yyj1866++ { - yyv1866 = append(yyv1866, ThirdPartyResourceData{}) - yyh1866.ElemContainerState(yyj1866) - if r.TryDecodeAsNil() { - yyv1866[yyj1866] = ThirdPartyResourceData{} - } else { - yyv1868 := &yyv1866[yyj1866] - yyv1868.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1866 := 0 - for ; !r.CheckBreak(); yyj1866++ { - - if yyj1866 >= len(yyv1866) { - yyv1866 = append(yyv1866, ThirdPartyResourceData{}) // var yyz1866 ThirdPartyResourceData - yyc1866 = true - } - yyh1866.ElemContainerState(yyj1866) - if yyj1866 < len(yyv1866) { - if r.TryDecodeAsNil() { - yyv1866[yyj1866] = ThirdPartyResourceData{} - } else { - yyv1869 := &yyv1866[yyj1866] - yyv1869.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1866 < len(yyv1866) { - yyv1866 = yyv1866[:yyj1866] - yyc1866 = true - } else if yyj1866 == 0 && yyv1866 == nil { - yyv1866 = []ThirdPartyResourceData{} - yyc1866 = true - } - } - yyh1866.End() - if yyc1866 { - *v = yyv1866 - } -} - -func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1870 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1871 := &yyv1870 - yy1871.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1872 := *v - yyh1872, yyl1872 := z.DecSliceHelperStart() - var yyc1872 bool - if yyl1872 == 0 { - if yyv1872 == nil { - yyv1872 = []Job{} - yyc1872 = true - } else if len(yyv1872) != 0 { - yyv1872 = yyv1872[:0] - yyc1872 = true - } - } else if yyl1872 > 0 { - var yyrr1872, yyrl1872 int - var yyrt1872 bool - if yyl1872 > cap(yyv1872) { - - yyrg1872 := len(yyv1872) > 0 - yyv21872 := yyv1872 - yyrl1872, yyrt1872 = z.DecInferLen(yyl1872, z.DecBasicHandle().MaxInitLen, 824) - if yyrt1872 { - if yyrl1872 <= cap(yyv1872) { - yyv1872 = yyv1872[:yyrl1872] - } else { - yyv1872 = make([]Job, yyrl1872) - } - } else { - yyv1872 = make([]Job, yyrl1872) - } - yyc1872 = true - yyrr1872 = len(yyv1872) - if yyrg1872 { - copy(yyv1872, yyv21872) - } - } else if yyl1872 != len(yyv1872) { - yyv1872 = yyv1872[:yyl1872] - yyc1872 = true - } - yyj1872 := 0 - for ; yyj1872 < yyrr1872; yyj1872++ { - yyh1872.ElemContainerState(yyj1872) - if r.TryDecodeAsNil() { - yyv1872[yyj1872] = Job{} - } else { - yyv1873 := &yyv1872[yyj1872] - yyv1873.CodecDecodeSelf(d) - } - - } - if yyrt1872 { - for ; yyj1872 < yyl1872; yyj1872++ { - yyv1872 = append(yyv1872, Job{}) - yyh1872.ElemContainerState(yyj1872) - if r.TryDecodeAsNil() { - yyv1872[yyj1872] = Job{} - } else { - yyv1874 := &yyv1872[yyj1872] - yyv1874.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1872 := 0 - for ; !r.CheckBreak(); yyj1872++ { - - if yyj1872 >= len(yyv1872) { - yyv1872 = append(yyv1872, Job{}) // var yyz1872 Job - yyc1872 = true - } - yyh1872.ElemContainerState(yyj1872) - if yyj1872 < len(yyv1872) { - if r.TryDecodeAsNil() { - yyv1872[yyj1872] = Job{} - } else { - yyv1875 := &yyv1872[yyj1872] - yyv1875.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1872 < len(yyv1872) { - yyv1872 = yyv1872[:yyj1872] - yyc1872 = true - } else if yyj1872 == 0 && yyv1872 == nil { - yyv1872 = []Job{} - yyc1872 = true - } - } - yyh1872.End() - if yyc1872 { - *v = yyv1872 - } -} - -func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1876 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1877 := &yyv1876 - yy1877.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1878 := *v - yyh1878, yyl1878 := z.DecSliceHelperStart() - var yyc1878 bool - if yyl1878 == 0 { - if yyv1878 == nil { - yyv1878 = []JobCondition{} - yyc1878 = true - } else if len(yyv1878) != 0 { - yyv1878 = yyv1878[:0] - yyc1878 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ThirdPartyResourceData{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1878 > 0 { - var yyrr1878, yyrl1878 int - var yyrt1878 bool - if yyl1878 > cap(yyv1878) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1878 := len(yyv1878) > 0 - yyv21878 := yyv1878 - yyrl1878, yyrt1878 = z.DecInferLen(yyl1878, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1878 { - if yyrl1878 <= cap(yyv1878) { - yyv1878 = yyv1878[:yyrl1878] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1878 = make([]JobCondition, yyrl1878) + yyv1 = make([]ThirdPartyResourceData, yyrl1) } } else { - yyv1878 = make([]JobCondition, yyrl1878) + yyv1 = make([]ThirdPartyResourceData, yyrl1) } - yyc1878 = true - yyrr1878 = len(yyv1878) - if yyrg1878 { - copy(yyv1878, yyv21878) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1878 != len(yyv1878) { - yyv1878 = yyv1878[:yyl1878] - yyc1878 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1878 := 0 - for ; yyj1878 < yyrr1878; yyj1878++ { - yyh1878.ElemContainerState(yyj1878) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1878[yyj1878] = JobCondition{} + yyv1[yyj1] = ThirdPartyResourceData{} } else { - yyv1879 := &yyv1878[yyj1878] - yyv1879.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1878 { - for ; yyj1878 < yyl1878; yyj1878++ { - yyv1878 = append(yyv1878, JobCondition{}) - yyh1878.ElemContainerState(yyj1878) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ThirdPartyResourceData{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1878[yyj1878] = JobCondition{} + yyv1[yyj1] = ThirdPartyResourceData{} } else { - yyv1880 := &yyv1878[yyj1878] - yyv1880.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1878 := 0 - for ; !r.CheckBreak(); yyj1878++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1878 >= len(yyv1878) { - yyv1878 = append(yyv1878, JobCondition{}) // var yyz1878 JobCondition - yyc1878 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ThirdPartyResourceData{}) // var yyz1 ThirdPartyResourceData + yyc1 = true } - yyh1878.ElemContainerState(yyj1878) - if yyj1878 < len(yyv1878) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1878[yyj1878] = JobCondition{} + yyv1[yyj1] = ThirdPartyResourceData{} } else { - yyv1881 := &yyv1878[yyj1878] - yyv1881.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -22678,17 +19956,17 @@ func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.De } } - if yyj1878 < len(yyv1878) { - yyv1878 = yyv1878[:yyj1878] - yyc1878 = true - } else if yyj1878 == 0 && yyv1878 == nil { - yyv1878 = []JobCondition{} - yyc1878 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ThirdPartyResourceData{} + yyc1 = true } } - yyh1878.End() - if yyc1878 { - *v = yyv1878 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -22697,10 +19975,10 @@ func (x codecSelfer1234) encSliceIngress(v []Ingress, e *codec1978.Encoder) { z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1882 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1883 := &yyv1882 - yy1883.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -22710,83 +19988,86 @@ func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1884 := *v - yyh1884, yyl1884 := z.DecSliceHelperStart() - var yyc1884 bool - if yyl1884 == 0 { - if yyv1884 == nil { - yyv1884 = []Ingress{} - yyc1884 = true - } else if len(yyv1884) != 0 { - yyv1884 = yyv1884[:0] - yyc1884 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Ingress{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1884 > 0 { - var yyrr1884, yyrl1884 int - var yyrt1884 bool - if yyl1884 > cap(yyv1884) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1884 := len(yyv1884) > 0 - yyv21884 := yyv1884 - yyrl1884, yyrt1884 = z.DecInferLen(yyl1884, z.DecBasicHandle().MaxInitLen, 336) - if yyrt1884 { - if yyrl1884 <= cap(yyv1884) { - yyv1884 = yyv1884[:yyrl1884] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 336) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1884 = make([]Ingress, yyrl1884) + yyv1 = make([]Ingress, yyrl1) } } else { - yyv1884 = make([]Ingress, yyrl1884) + yyv1 = make([]Ingress, yyrl1) } - yyc1884 = true - yyrr1884 = len(yyv1884) - if yyrg1884 { - copy(yyv1884, yyv21884) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1884 != len(yyv1884) { - yyv1884 = yyv1884[:yyl1884] - yyc1884 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1884 := 0 - for ; yyj1884 < yyrr1884; yyj1884++ { - yyh1884.ElemContainerState(yyj1884) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1884[yyj1884] = Ingress{} + yyv1[yyj1] = Ingress{} } else { - yyv1885 := &yyv1884[yyj1884] - yyv1885.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1884 { - for ; yyj1884 < yyl1884; yyj1884++ { - yyv1884 = append(yyv1884, Ingress{}) - yyh1884.ElemContainerState(yyj1884) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Ingress{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1884[yyj1884] = Ingress{} + yyv1[yyj1] = Ingress{} } else { - yyv1886 := &yyv1884[yyj1884] - yyv1886.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1884 := 0 - for ; !r.CheckBreak(); yyj1884++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1884 >= len(yyv1884) { - yyv1884 = append(yyv1884, Ingress{}) // var yyz1884 Ingress - yyc1884 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Ingress{}) // var yyz1 Ingress + yyc1 = true } - yyh1884.ElemContainerState(yyj1884) - if yyj1884 < len(yyv1884) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1884[yyj1884] = Ingress{} + yyv1[yyj1] = Ingress{} } else { - yyv1887 := &yyv1884[yyj1884] - yyv1887.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -22794,17 +20075,17 @@ func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { } } - if yyj1884 < len(yyv1884) { - yyv1884 = yyv1884[:yyj1884] - yyc1884 = true - } else if yyj1884 == 0 && yyv1884 == nil { - yyv1884 = []Ingress{} - yyc1884 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Ingress{} + yyc1 = true } } - yyh1884.End() - if yyc1884 { - *v = yyv1884 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -22813,10 +20094,10 @@ func (x codecSelfer1234) encSliceIngressTLS(v []IngressTLS, e *codec1978.Encoder z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1888 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1889 := &yyv1888 - yy1889.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -22826,83 +20107,86 @@ func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decode z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1890 := *v - yyh1890, yyl1890 := z.DecSliceHelperStart() - var yyc1890 bool - if yyl1890 == 0 { - if yyv1890 == nil { - yyv1890 = []IngressTLS{} - yyc1890 = true - } else if len(yyv1890) != 0 { - yyv1890 = yyv1890[:0] - yyc1890 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []IngressTLS{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1890 > 0 { - var yyrr1890, yyrl1890 int - var yyrt1890 bool - if yyl1890 > cap(yyv1890) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1890 := len(yyv1890) > 0 - yyv21890 := yyv1890 - yyrl1890, yyrt1890 = z.DecInferLen(yyl1890, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1890 { - if yyrl1890 <= cap(yyv1890) { - yyv1890 = yyv1890[:yyrl1890] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1890 = make([]IngressTLS, yyrl1890) + yyv1 = make([]IngressTLS, yyrl1) } } else { - yyv1890 = make([]IngressTLS, yyrl1890) + yyv1 = make([]IngressTLS, yyrl1) } - yyc1890 = true - yyrr1890 = len(yyv1890) - if yyrg1890 { - copy(yyv1890, yyv21890) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1890 != len(yyv1890) { - yyv1890 = yyv1890[:yyl1890] - yyc1890 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1890 := 0 - for ; yyj1890 < yyrr1890; yyj1890++ { - yyh1890.ElemContainerState(yyj1890) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1890[yyj1890] = IngressTLS{} + yyv1[yyj1] = IngressTLS{} } else { - yyv1891 := &yyv1890[yyj1890] - yyv1891.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1890 { - for ; yyj1890 < yyl1890; yyj1890++ { - yyv1890 = append(yyv1890, IngressTLS{}) - yyh1890.ElemContainerState(yyj1890) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, IngressTLS{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1890[yyj1890] = IngressTLS{} + yyv1[yyj1] = IngressTLS{} } else { - yyv1892 := &yyv1890[yyj1890] - yyv1892.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1890 := 0 - for ; !r.CheckBreak(); yyj1890++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1890 >= len(yyv1890) { - yyv1890 = append(yyv1890, IngressTLS{}) // var yyz1890 IngressTLS - yyc1890 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, IngressTLS{}) // var yyz1 IngressTLS + yyc1 = true } - yyh1890.ElemContainerState(yyj1890) - if yyj1890 < len(yyv1890) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1890[yyj1890] = IngressTLS{} + yyv1[yyj1] = IngressTLS{} } else { - yyv1893 := &yyv1890[yyj1890] - yyv1893.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -22910,17 +20194,17 @@ func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decode } } - if yyj1890 < len(yyv1890) { - yyv1890 = yyv1890[:yyj1890] - yyc1890 = true - } else if yyj1890 == 0 && yyv1890 == nil { - yyv1890 = []IngressTLS{} - yyc1890 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []IngressTLS{} + yyc1 = true } } - yyh1890.End() - if yyc1890 { - *v = yyv1890 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -22929,10 +20213,10 @@ func (x codecSelfer1234) encSliceIngressRule(v []IngressRule, e *codec1978.Encod z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1894 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1895 := &yyv1894 - yy1895.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -22942,83 +20226,86 @@ func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Deco z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1896 := *v - yyh1896, yyl1896 := z.DecSliceHelperStart() - var yyc1896 bool - if yyl1896 == 0 { - if yyv1896 == nil { - yyv1896 = []IngressRule{} - yyc1896 = true - } else if len(yyv1896) != 0 { - yyv1896 = yyv1896[:0] - yyc1896 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []IngressRule{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1896 > 0 { - var yyrr1896, yyrl1896 int - var yyrt1896 bool - if yyl1896 > cap(yyv1896) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1896 := len(yyv1896) > 0 - yyv21896 := yyv1896 - yyrl1896, yyrt1896 = z.DecInferLen(yyl1896, z.DecBasicHandle().MaxInitLen, 24) - if yyrt1896 { - if yyrl1896 <= cap(yyv1896) { - yyv1896 = yyv1896[:yyrl1896] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1896 = make([]IngressRule, yyrl1896) + yyv1 = make([]IngressRule, yyrl1) } } else { - yyv1896 = make([]IngressRule, yyrl1896) + yyv1 = make([]IngressRule, yyrl1) } - yyc1896 = true - yyrr1896 = len(yyv1896) - if yyrg1896 { - copy(yyv1896, yyv21896) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1896 != len(yyv1896) { - yyv1896 = yyv1896[:yyl1896] - yyc1896 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1896 := 0 - for ; yyj1896 < yyrr1896; yyj1896++ { - yyh1896.ElemContainerState(yyj1896) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1896[yyj1896] = IngressRule{} + yyv1[yyj1] = IngressRule{} } else { - yyv1897 := &yyv1896[yyj1896] - yyv1897.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1896 { - for ; yyj1896 < yyl1896; yyj1896++ { - yyv1896 = append(yyv1896, IngressRule{}) - yyh1896.ElemContainerState(yyj1896) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, IngressRule{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1896[yyj1896] = IngressRule{} + yyv1[yyj1] = IngressRule{} } else { - yyv1898 := &yyv1896[yyj1896] - yyv1898.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1896 := 0 - for ; !r.CheckBreak(); yyj1896++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1896 >= len(yyv1896) { - yyv1896 = append(yyv1896, IngressRule{}) // var yyz1896 IngressRule - yyc1896 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, IngressRule{}) // var yyz1 IngressRule + yyc1 = true } - yyh1896.ElemContainerState(yyj1896) - if yyj1896 < len(yyv1896) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1896[yyj1896] = IngressRule{} + yyv1[yyj1] = IngressRule{} } else { - yyv1899 := &yyv1896[yyj1896] - yyv1899.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -23026,17 +20313,17 @@ func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Deco } } - if yyj1896 < len(yyv1896) { - yyv1896 = yyv1896[:yyj1896] - yyc1896 = true - } else if yyj1896 == 0 && yyv1896 == nil { - yyv1896 = []IngressRule{} - yyc1896 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []IngressRule{} + yyc1 = true } } - yyh1896.End() - if yyc1896 { - *v = yyv1896 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -23045,10 +20332,10 @@ func (x codecSelfer1234) encSliceHTTPIngressPath(v []HTTPIngressPath, e *codec19 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1900 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1901 := &yyv1900 - yy1901.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -23058,83 +20345,86 @@ func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1902 := *v - yyh1902, yyl1902 := z.DecSliceHelperStart() - var yyc1902 bool - if yyl1902 == 0 { - if yyv1902 == nil { - yyv1902 = []HTTPIngressPath{} - yyc1902 = true - } else if len(yyv1902) != 0 { - yyv1902 = yyv1902[:0] - yyc1902 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []HTTPIngressPath{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1902 > 0 { - var yyrr1902, yyrl1902 int - var yyrt1902 bool - if yyl1902 > cap(yyv1902) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1902 := len(yyv1902) > 0 - yyv21902 := yyv1902 - yyrl1902, yyrt1902 = z.DecInferLen(yyl1902, z.DecBasicHandle().MaxInitLen, 64) - if yyrt1902 { - if yyrl1902 <= cap(yyv1902) { - yyv1902 = yyv1902[:yyrl1902] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1902 = make([]HTTPIngressPath, yyrl1902) + yyv1 = make([]HTTPIngressPath, yyrl1) } } else { - yyv1902 = make([]HTTPIngressPath, yyrl1902) + yyv1 = make([]HTTPIngressPath, yyrl1) } - yyc1902 = true - yyrr1902 = len(yyv1902) - if yyrg1902 { - copy(yyv1902, yyv21902) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1902 != len(yyv1902) { - yyv1902 = yyv1902[:yyl1902] - yyc1902 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1902 := 0 - for ; yyj1902 < yyrr1902; yyj1902++ { - yyh1902.ElemContainerState(yyj1902) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1902[yyj1902] = HTTPIngressPath{} + yyv1[yyj1] = HTTPIngressPath{} } else { - yyv1903 := &yyv1902[yyj1902] - yyv1903.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1902 { - for ; yyj1902 < yyl1902; yyj1902++ { - yyv1902 = append(yyv1902, HTTPIngressPath{}) - yyh1902.ElemContainerState(yyj1902) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, HTTPIngressPath{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1902[yyj1902] = HTTPIngressPath{} + yyv1[yyj1] = HTTPIngressPath{} } else { - yyv1904 := &yyv1902[yyj1902] - yyv1904.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1902 := 0 - for ; !r.CheckBreak(); yyj1902++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1902 >= len(yyv1902) { - yyv1902 = append(yyv1902, HTTPIngressPath{}) // var yyz1902 HTTPIngressPath - yyc1902 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, HTTPIngressPath{}) // var yyz1 HTTPIngressPath + yyc1 = true } - yyh1902.ElemContainerState(yyj1902) - if yyj1902 < len(yyv1902) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1902[yyj1902] = HTTPIngressPath{} + yyv1[yyj1] = HTTPIngressPath{} } else { - yyv1905 := &yyv1902[yyj1902] - yyv1905.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -23142,17 +20432,17 @@ func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1 } } - if yyj1902 < len(yyv1902) { - yyv1902 = yyv1902[:yyj1902] - yyc1902 = true - } else if yyj1902 == 0 && yyv1902 == nil { - yyv1902 = []HTTPIngressPath{} - yyc1902 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []HTTPIngressPath{} + yyc1 = true } } - yyh1902.End() - if yyc1902 { - *v = yyv1902 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -23161,10 +20451,10 @@ func (x codecSelfer1234) encSliceReplicaSet(v []ReplicaSet, e *codec1978.Encoder z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1906 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1907 := &yyv1906 - yy1907.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -23174,83 +20464,86 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1908 := *v - yyh1908, yyl1908 := z.DecSliceHelperStart() - var yyc1908 bool - if yyl1908 == 0 { - if yyv1908 == nil { - yyv1908 = []ReplicaSet{} - yyc1908 = true - } else if len(yyv1908) != 0 { - yyv1908 = yyv1908[:0] - yyc1908 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ReplicaSet{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1908 > 0 { - var yyrr1908, yyrl1908 int - var yyrt1908 bool - if yyl1908 > cap(yyv1908) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1908 := len(yyv1908) > 0 - yyv21908 := yyv1908 - yyrl1908, yyrt1908 = z.DecInferLen(yyl1908, z.DecBasicHandle().MaxInitLen, 800) - if yyrt1908 { - if yyrl1908 <= cap(yyv1908) { - yyv1908 = yyv1908[:yyrl1908] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 856) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1908 = make([]ReplicaSet, yyrl1908) + yyv1 = make([]ReplicaSet, yyrl1) } } else { - yyv1908 = make([]ReplicaSet, yyrl1908) + yyv1 = make([]ReplicaSet, yyrl1) } - yyc1908 = true - yyrr1908 = len(yyv1908) - if yyrg1908 { - copy(yyv1908, yyv21908) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1908 != len(yyv1908) { - yyv1908 = yyv1908[:yyl1908] - yyc1908 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1908 := 0 - for ; yyj1908 < yyrr1908; yyj1908++ { - yyh1908.ElemContainerState(yyj1908) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1908[yyj1908] = ReplicaSet{} + yyv1[yyj1] = ReplicaSet{} } else { - yyv1909 := &yyv1908[yyj1908] - yyv1909.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1908 { - for ; yyj1908 < yyl1908; yyj1908++ { - yyv1908 = append(yyv1908, ReplicaSet{}) - yyh1908.ElemContainerState(yyj1908) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ReplicaSet{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1908[yyj1908] = ReplicaSet{} + yyv1[yyj1] = ReplicaSet{} } else { - yyv1910 := &yyv1908[yyj1908] - yyv1910.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1908 := 0 - for ; !r.CheckBreak(); yyj1908++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1908 >= len(yyv1908) { - yyv1908 = append(yyv1908, ReplicaSet{}) // var yyz1908 ReplicaSet - yyc1908 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ReplicaSet{}) // var yyz1 ReplicaSet + yyc1 = true } - yyh1908.ElemContainerState(yyj1908) - if yyj1908 < len(yyv1908) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1908[yyj1908] = ReplicaSet{} + yyv1[yyj1] = ReplicaSet{} } else { - yyv1911 := &yyv1908[yyj1908] - yyv1911.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -23258,17 +20551,17 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode } } - if yyj1908 < len(yyv1908) { - yyv1908 = yyv1908[:yyj1908] - yyc1908 = true - } else if yyj1908 == 0 && yyv1908 == nil { - yyv1908 = []ReplicaSet{} - yyc1908 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ReplicaSet{} + yyc1 = true } } - yyh1908.End() - if yyc1908 { - *v = yyv1908 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -23277,10 +20570,10 @@ func (x codecSelfer1234) encSliceReplicaSetCondition(v []ReplicaSetCondition, e z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1912 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1913 := &yyv1912 - yy1913.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -23290,83 +20583,86 @@ func (x codecSelfer1234) decSliceReplicaSetCondition(v *[]ReplicaSetCondition, d z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1914 := *v - yyh1914, yyl1914 := z.DecSliceHelperStart() - var yyc1914 bool - if yyl1914 == 0 { - if yyv1914 == nil { - yyv1914 = []ReplicaSetCondition{} - yyc1914 = true - } else if len(yyv1914) != 0 { - yyv1914 = yyv1914[:0] - yyc1914 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ReplicaSetCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1914 > 0 { - var yyrr1914, yyrl1914 int - var yyrt1914 bool - if yyl1914 > cap(yyv1914) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1914 := len(yyv1914) > 0 - yyv21914 := yyv1914 - yyrl1914, yyrt1914 = z.DecInferLen(yyl1914, z.DecBasicHandle().MaxInitLen, 88) - if yyrt1914 { - if yyrl1914 <= cap(yyv1914) { - yyv1914 = yyv1914[:yyrl1914] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 88) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1914 = make([]ReplicaSetCondition, yyrl1914) + yyv1 = make([]ReplicaSetCondition, yyrl1) } } else { - yyv1914 = make([]ReplicaSetCondition, yyrl1914) + yyv1 = make([]ReplicaSetCondition, yyrl1) } - yyc1914 = true - yyrr1914 = len(yyv1914) - if yyrg1914 { - copy(yyv1914, yyv21914) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1914 != len(yyv1914) { - yyv1914 = yyv1914[:yyl1914] - yyc1914 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1914 := 0 - for ; yyj1914 < yyrr1914; yyj1914++ { - yyh1914.ElemContainerState(yyj1914) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1914[yyj1914] = ReplicaSetCondition{} + yyv1[yyj1] = ReplicaSetCondition{} } else { - yyv1915 := &yyv1914[yyj1914] - yyv1915.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1914 { - for ; yyj1914 < yyl1914; yyj1914++ { - yyv1914 = append(yyv1914, ReplicaSetCondition{}) - yyh1914.ElemContainerState(yyj1914) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ReplicaSetCondition{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1914[yyj1914] = ReplicaSetCondition{} + yyv1[yyj1] = ReplicaSetCondition{} } else { - yyv1916 := &yyv1914[yyj1914] - yyv1916.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1914 := 0 - for ; !r.CheckBreak(); yyj1914++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1914 >= len(yyv1914) { - yyv1914 = append(yyv1914, ReplicaSetCondition{}) // var yyz1914 ReplicaSetCondition - yyc1914 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ReplicaSetCondition{}) // var yyz1 ReplicaSetCondition + yyc1 = true } - yyh1914.ElemContainerState(yyj1914) - if yyj1914 < len(yyv1914) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1914[yyj1914] = ReplicaSetCondition{} + yyv1[yyj1] = ReplicaSetCondition{} } else { - yyv1917 := &yyv1914[yyj1914] - yyv1917.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -23374,112 +20670,113 @@ func (x codecSelfer1234) decSliceReplicaSetCondition(v *[]ReplicaSetCondition, d } } - if yyj1914 < len(yyv1914) { - yyv1914 = yyv1914[:yyj1914] - yyc1914 = true - } else if yyj1914 == 0 && yyv1914 == nil { - yyv1914 = []ReplicaSetCondition{} - yyc1914 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ReplicaSetCondition{} + yyc1 = true } } - yyh1914.End() - if yyc1914 { - *v = yyv1914 + yyh1.End() + if yyc1 { + *v = yyv1 } } -func (x codecSelfer1234) encSlicev1_Capability(v []pkg2_v1.Capability, e *codec1978.Encoder) { +func (x codecSelfer1234) encSlicev1_Capability(v []pkg4_v1.Capability, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1918 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1919 := z.EncBinary() - _ = yym1919 - if false { - } else if z.HasExtensions() && z.EncExt(yyv1918) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyv1918)) - } + yysf2 := &yyv1 + yysf2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSlicev1_Capability(v *[]pkg2_v1.Capability, d *codec1978.Decoder) { +func (x codecSelfer1234) decSlicev1_Capability(v *[]pkg4_v1.Capability, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1920 := *v - yyh1920, yyl1920 := z.DecSliceHelperStart() - var yyc1920 bool - if yyl1920 == 0 { - if yyv1920 == nil { - yyv1920 = []pkg2_v1.Capability{} - yyc1920 = true - } else if len(yyv1920) != 0 { - yyv1920 = yyv1920[:0] - yyc1920 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg4_v1.Capability{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1920 > 0 { - var yyrr1920, yyrl1920 int - var yyrt1920 bool - if yyl1920 > cap(yyv1920) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrl1920, yyrt1920 = z.DecInferLen(yyl1920, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1920 { - if yyrl1920 <= cap(yyv1920) { - yyv1920 = yyv1920[:yyrl1920] + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1920 = make([]pkg2_v1.Capability, yyrl1920) + yyv1 = make([]pkg4_v1.Capability, yyrl1) } } else { - yyv1920 = make([]pkg2_v1.Capability, yyrl1920) + yyv1 = make([]pkg4_v1.Capability, yyrl1) } - yyc1920 = true - yyrr1920 = len(yyv1920) - } else if yyl1920 != len(yyv1920) { - yyv1920 = yyv1920[:yyl1920] - yyc1920 = true + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1920 := 0 - for ; yyj1920 < yyrr1920; yyj1920++ { - yyh1920.ElemContainerState(yyj1920) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1920[yyj1920] = "" + yyv1[yyj1] = "" } else { - yyv1920[yyj1920] = pkg2_v1.Capability(r.DecodeString()) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1920 { - for ; yyj1920 < yyl1920; yyj1920++ { - yyv1920 = append(yyv1920, "") - yyh1920.ElemContainerState(yyj1920) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1920[yyj1920] = "" + yyv1[yyj1] = "" } else { - yyv1920[yyj1920] = pkg2_v1.Capability(r.DecodeString()) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1920 := 0 - for ; !r.CheckBreak(); yyj1920++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1920 >= len(yyv1920) { - yyv1920 = append(yyv1920, "") // var yyz1920 pkg2_v1.Capability - yyc1920 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 pkg4_v1.Capability + yyc1 = true } - yyh1920.ElemContainerState(yyj1920) - if yyj1920 < len(yyv1920) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1920[yyj1920] = "" + yyv1[yyj1] = "" } else { - yyv1920[yyj1920] = pkg2_v1.Capability(r.DecodeString()) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -23487,17 +20784,17 @@ func (x codecSelfer1234) decSlicev1_Capability(v *[]pkg2_v1.Capability, d *codec } } - if yyj1920 < len(yyv1920) { - yyv1920 = yyv1920[:yyj1920] - yyc1920 = true - } else if yyj1920 == 0 && yyv1920 == nil { - yyv1920 = []pkg2_v1.Capability{} - yyc1920 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg4_v1.Capability{} + yyc1 = true } } - yyh1920.End() - if yyc1920 { - *v = yyv1920 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -23506,9 +20803,9 @@ func (x codecSelfer1234) encSliceFSType(v []FSType, e *codec1978.Encoder) { z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1924 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1924.CodecEncodeSelf(e) + yyv1.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -23518,75 +20815,81 @@ func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1925 := *v - yyh1925, yyl1925 := z.DecSliceHelperStart() - var yyc1925 bool - if yyl1925 == 0 { - if yyv1925 == nil { - yyv1925 = []FSType{} - yyc1925 = true - } else if len(yyv1925) != 0 { - yyv1925 = yyv1925[:0] - yyc1925 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []FSType{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1925 > 0 { - var yyrr1925, yyrl1925 int - var yyrt1925 bool - if yyl1925 > cap(yyv1925) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrl1925, yyrt1925 = z.DecInferLen(yyl1925, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1925 { - if yyrl1925 <= cap(yyv1925) { - yyv1925 = yyv1925[:yyrl1925] + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1925 = make([]FSType, yyrl1925) + yyv1 = make([]FSType, yyrl1) } } else { - yyv1925 = make([]FSType, yyrl1925) + yyv1 = make([]FSType, yyrl1) } - yyc1925 = true - yyrr1925 = len(yyv1925) - } else if yyl1925 != len(yyv1925) { - yyv1925 = yyv1925[:yyl1925] - yyc1925 = true + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1925 := 0 - for ; yyj1925 < yyrr1925; yyj1925++ { - yyh1925.ElemContainerState(yyj1925) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1925[yyj1925] = "" + yyv1[yyj1] = "" } else { - yyv1925[yyj1925] = FSType(r.DecodeString()) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1925 { - for ; yyj1925 < yyl1925; yyj1925++ { - yyv1925 = append(yyv1925, "") - yyh1925.ElemContainerState(yyj1925) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1925[yyj1925] = "" + yyv1[yyj1] = "" } else { - yyv1925[yyj1925] = FSType(r.DecodeString()) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1925 := 0 - for ; !r.CheckBreak(); yyj1925++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1925 >= len(yyv1925) { - yyv1925 = append(yyv1925, "") // var yyz1925 FSType - yyc1925 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 FSType + yyc1 = true } - yyh1925.ElemContainerState(yyj1925) - if yyj1925 < len(yyv1925) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1925[yyj1925] = "" + yyv1[yyj1] = "" } else { - yyv1925[yyj1925] = FSType(r.DecodeString()) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -23594,17 +20897,17 @@ func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { } } - if yyj1925 < len(yyv1925) { - yyv1925 = yyv1925[:yyj1925] - yyc1925 = true - } else if yyj1925 == 0 && yyv1925 == nil { - yyv1925 = []FSType{} - yyc1925 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []FSType{} + yyc1 = true } } - yyh1925.End() - if yyc1925 { - *v = yyv1925 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -23613,10 +20916,10 @@ func (x codecSelfer1234) encSliceHostPortRange(v []HostPortRange, e *codec1978.E z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1929 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1930 := &yyv1929 - yy1930.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -23626,83 +20929,86 @@ func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978. z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1931 := *v - yyh1931, yyl1931 := z.DecSliceHelperStart() - var yyc1931 bool - if yyl1931 == 0 { - if yyv1931 == nil { - yyv1931 = []HostPortRange{} - yyc1931 = true - } else if len(yyv1931) != 0 { - yyv1931 = yyv1931[:0] - yyc1931 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []HostPortRange{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1931 > 0 { - var yyrr1931, yyrl1931 int - var yyrt1931 bool - if yyl1931 > cap(yyv1931) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1931 := len(yyv1931) > 0 - yyv21931 := yyv1931 - yyrl1931, yyrt1931 = z.DecInferLen(yyl1931, z.DecBasicHandle().MaxInitLen, 8) - if yyrt1931 { - if yyrl1931 <= cap(yyv1931) { - yyv1931 = yyv1931[:yyrl1931] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1931 = make([]HostPortRange, yyrl1931) + yyv1 = make([]HostPortRange, yyrl1) } } else { - yyv1931 = make([]HostPortRange, yyrl1931) + yyv1 = make([]HostPortRange, yyrl1) } - yyc1931 = true - yyrr1931 = len(yyv1931) - if yyrg1931 { - copy(yyv1931, yyv21931) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1931 != len(yyv1931) { - yyv1931 = yyv1931[:yyl1931] - yyc1931 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1931 := 0 - for ; yyj1931 < yyrr1931; yyj1931++ { - yyh1931.ElemContainerState(yyj1931) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1931[yyj1931] = HostPortRange{} + yyv1[yyj1] = HostPortRange{} } else { - yyv1932 := &yyv1931[yyj1931] - yyv1932.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1931 { - for ; yyj1931 < yyl1931; yyj1931++ { - yyv1931 = append(yyv1931, HostPortRange{}) - yyh1931.ElemContainerState(yyj1931) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, HostPortRange{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1931[yyj1931] = HostPortRange{} + yyv1[yyj1] = HostPortRange{} } else { - yyv1933 := &yyv1931[yyj1931] - yyv1933.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1931 := 0 - for ; !r.CheckBreak(); yyj1931++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1931 >= len(yyv1931) { - yyv1931 = append(yyv1931, HostPortRange{}) // var yyz1931 HostPortRange - yyc1931 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, HostPortRange{}) // var yyz1 HostPortRange + yyc1 = true } - yyh1931.ElemContainerState(yyj1931) - if yyj1931 < len(yyv1931) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1931[yyj1931] = HostPortRange{} + yyv1[yyj1] = HostPortRange{} } else { - yyv1934 := &yyv1931[yyj1931] - yyv1934.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -23710,17 +21016,17 @@ func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978. } } - if yyj1931 < len(yyv1931) { - yyv1931 = yyv1931[:yyj1931] - yyc1931 = true - } else if yyj1931 == 0 && yyv1931 == nil { - yyv1931 = []HostPortRange{} - yyc1931 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []HostPortRange{} + yyc1 = true } } - yyh1931.End() - if yyc1931 { - *v = yyv1931 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -23729,10 +21035,10 @@ func (x codecSelfer1234) encSliceIDRange(v []IDRange, e *codec1978.Encoder) { z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1935 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1936 := &yyv1935 - yy1936.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -23742,83 +21048,86 @@ func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1937 := *v - yyh1937, yyl1937 := z.DecSliceHelperStart() - var yyc1937 bool - if yyl1937 == 0 { - if yyv1937 == nil { - yyv1937 = []IDRange{} - yyc1937 = true - } else if len(yyv1937) != 0 { - yyv1937 = yyv1937[:0] - yyc1937 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []IDRange{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1937 > 0 { - var yyrr1937, yyrl1937 int - var yyrt1937 bool - if yyl1937 > cap(yyv1937) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1937 := len(yyv1937) > 0 - yyv21937 := yyv1937 - yyrl1937, yyrt1937 = z.DecInferLen(yyl1937, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1937 { - if yyrl1937 <= cap(yyv1937) { - yyv1937 = yyv1937[:yyrl1937] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1937 = make([]IDRange, yyrl1937) + yyv1 = make([]IDRange, yyrl1) } } else { - yyv1937 = make([]IDRange, yyrl1937) + yyv1 = make([]IDRange, yyrl1) } - yyc1937 = true - yyrr1937 = len(yyv1937) - if yyrg1937 { - copy(yyv1937, yyv21937) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1937 != len(yyv1937) { - yyv1937 = yyv1937[:yyl1937] - yyc1937 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1937 := 0 - for ; yyj1937 < yyrr1937; yyj1937++ { - yyh1937.ElemContainerState(yyj1937) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1937[yyj1937] = IDRange{} + yyv1[yyj1] = IDRange{} } else { - yyv1938 := &yyv1937[yyj1937] - yyv1938.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1937 { - for ; yyj1937 < yyl1937; yyj1937++ { - yyv1937 = append(yyv1937, IDRange{}) - yyh1937.ElemContainerState(yyj1937) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, IDRange{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1937[yyj1937] = IDRange{} + yyv1[yyj1] = IDRange{} } else { - yyv1939 := &yyv1937[yyj1937] - yyv1939.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1937 := 0 - for ; !r.CheckBreak(); yyj1937++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1937 >= len(yyv1937) { - yyv1937 = append(yyv1937, IDRange{}) // var yyz1937 IDRange - yyc1937 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, IDRange{}) // var yyz1 IDRange + yyc1 = true } - yyh1937.ElemContainerState(yyj1937) - if yyj1937 < len(yyv1937) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1937[yyj1937] = IDRange{} + yyv1[yyj1] = IDRange{} } else { - yyv1940 := &yyv1937[yyj1937] - yyv1940.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -23826,17 +21135,17 @@ func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { } } - if yyj1937 < len(yyv1937) { - yyv1937 = yyv1937[:yyj1937] - yyc1937 = true - } else if yyj1937 == 0 && yyv1937 == nil { - yyv1937 = []IDRange{} - yyc1937 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []IDRange{} + yyc1 = true } } - yyh1937.End() - if yyc1937 { - *v = yyv1937 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -23845,10 +21154,10 @@ func (x codecSelfer1234) encSlicePodSecurityPolicy(v []PodSecurityPolicy, e *cod z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1941 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1942 := &yyv1941 - yy1942.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -23858,83 +21167,86 @@ func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *co z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1943 := *v - yyh1943, yyl1943 := z.DecSliceHelperStart() - var yyc1943 bool - if yyl1943 == 0 { - if yyv1943 == nil { - yyv1943 = []PodSecurityPolicy{} - yyc1943 = true - } else if len(yyv1943) != 0 { - yyv1943 = yyv1943[:0] - yyc1943 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodSecurityPolicy{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1943 > 0 { - var yyrr1943, yyrl1943 int - var yyrt1943 bool - if yyl1943 > cap(yyv1943) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1943 := len(yyv1943) > 0 - yyv21943 := yyv1943 - yyrl1943, yyrt1943 = z.DecInferLen(yyl1943, z.DecBasicHandle().MaxInitLen, 552) - if yyrt1943 { - if yyrl1943 <= cap(yyv1943) { - yyv1943 = yyv1943[:yyrl1943] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 552) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1943 = make([]PodSecurityPolicy, yyrl1943) + yyv1 = make([]PodSecurityPolicy, yyrl1) } } else { - yyv1943 = make([]PodSecurityPolicy, yyrl1943) + yyv1 = make([]PodSecurityPolicy, yyrl1) } - yyc1943 = true - yyrr1943 = len(yyv1943) - if yyrg1943 { - copy(yyv1943, yyv21943) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1943 != len(yyv1943) { - yyv1943 = yyv1943[:yyl1943] - yyc1943 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1943 := 0 - for ; yyj1943 < yyrr1943; yyj1943++ { - yyh1943.ElemContainerState(yyj1943) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1943[yyj1943] = PodSecurityPolicy{} + yyv1[yyj1] = PodSecurityPolicy{} } else { - yyv1944 := &yyv1943[yyj1943] - yyv1944.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1943 { - for ; yyj1943 < yyl1943; yyj1943++ { - yyv1943 = append(yyv1943, PodSecurityPolicy{}) - yyh1943.ElemContainerState(yyj1943) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodSecurityPolicy{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1943[yyj1943] = PodSecurityPolicy{} + yyv1[yyj1] = PodSecurityPolicy{} } else { - yyv1945 := &yyv1943[yyj1943] - yyv1945.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1943 := 0 - for ; !r.CheckBreak(); yyj1943++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1943 >= len(yyv1943) { - yyv1943 = append(yyv1943, PodSecurityPolicy{}) // var yyz1943 PodSecurityPolicy - yyc1943 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodSecurityPolicy{}) // var yyz1 PodSecurityPolicy + yyc1 = true } - yyh1943.ElemContainerState(yyj1943) - if yyj1943 < len(yyv1943) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1943[yyj1943] = PodSecurityPolicy{} + yyv1[yyj1] = PodSecurityPolicy{} } else { - yyv1946 := &yyv1943[yyj1943] - yyv1946.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -23942,17 +21254,17 @@ func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *co } } - if yyj1943 < len(yyv1943) { - yyv1943 = yyv1943[:yyj1943] - yyc1943 = true - } else if yyj1943 == 0 && yyv1943 == nil { - yyv1943 = []PodSecurityPolicy{} - yyc1943 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodSecurityPolicy{} + yyc1 = true } } - yyh1943.End() - if yyc1943 { - *v = yyv1943 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -23961,10 +21273,10 @@ func (x codecSelfer1234) encSliceNetworkPolicyIngressRule(v []NetworkPolicyIngre z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1947 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1948 := &yyv1947 - yy1948.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -23974,83 +21286,86 @@ func (x codecSelfer1234) decSliceNetworkPolicyIngressRule(v *[]NetworkPolicyIngr z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1949 := *v - yyh1949, yyl1949 := z.DecSliceHelperStart() - var yyc1949 bool - if yyl1949 == 0 { - if yyv1949 == nil { - yyv1949 = []NetworkPolicyIngressRule{} - yyc1949 = true - } else if len(yyv1949) != 0 { - yyv1949 = yyv1949[:0] - yyc1949 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NetworkPolicyIngressRule{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1949 > 0 { - var yyrr1949, yyrl1949 int - var yyrt1949 bool - if yyl1949 > cap(yyv1949) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1949 := len(yyv1949) > 0 - yyv21949 := yyv1949 - yyrl1949, yyrt1949 = z.DecInferLen(yyl1949, z.DecBasicHandle().MaxInitLen, 48) - if yyrt1949 { - if yyrl1949 <= cap(yyv1949) { - yyv1949 = yyv1949[:yyrl1949] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1949 = make([]NetworkPolicyIngressRule, yyrl1949) + yyv1 = make([]NetworkPolicyIngressRule, yyrl1) } } else { - yyv1949 = make([]NetworkPolicyIngressRule, yyrl1949) + yyv1 = make([]NetworkPolicyIngressRule, yyrl1) } - yyc1949 = true - yyrr1949 = len(yyv1949) - if yyrg1949 { - copy(yyv1949, yyv21949) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1949 != len(yyv1949) { - yyv1949 = yyv1949[:yyl1949] - yyc1949 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1949 := 0 - for ; yyj1949 < yyrr1949; yyj1949++ { - yyh1949.ElemContainerState(yyj1949) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1949[yyj1949] = NetworkPolicyIngressRule{} + yyv1[yyj1] = NetworkPolicyIngressRule{} } else { - yyv1950 := &yyv1949[yyj1949] - yyv1950.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1949 { - for ; yyj1949 < yyl1949; yyj1949++ { - yyv1949 = append(yyv1949, NetworkPolicyIngressRule{}) - yyh1949.ElemContainerState(yyj1949) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NetworkPolicyIngressRule{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1949[yyj1949] = NetworkPolicyIngressRule{} + yyv1[yyj1] = NetworkPolicyIngressRule{} } else { - yyv1951 := &yyv1949[yyj1949] - yyv1951.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1949 := 0 - for ; !r.CheckBreak(); yyj1949++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1949 >= len(yyv1949) { - yyv1949 = append(yyv1949, NetworkPolicyIngressRule{}) // var yyz1949 NetworkPolicyIngressRule - yyc1949 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NetworkPolicyIngressRule{}) // var yyz1 NetworkPolicyIngressRule + yyc1 = true } - yyh1949.ElemContainerState(yyj1949) - if yyj1949 < len(yyv1949) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1949[yyj1949] = NetworkPolicyIngressRule{} + yyv1[yyj1] = NetworkPolicyIngressRule{} } else { - yyv1952 := &yyv1949[yyj1949] - yyv1952.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -24058,17 +21373,17 @@ func (x codecSelfer1234) decSliceNetworkPolicyIngressRule(v *[]NetworkPolicyIngr } } - if yyj1949 < len(yyv1949) { - yyv1949 = yyv1949[:yyj1949] - yyc1949 = true - } else if yyj1949 == 0 && yyv1949 == nil { - yyv1949 = []NetworkPolicyIngressRule{} - yyc1949 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NetworkPolicyIngressRule{} + yyc1 = true } } - yyh1949.End() - if yyc1949 { - *v = yyv1949 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -24077,10 +21392,10 @@ func (x codecSelfer1234) encSliceNetworkPolicyPort(v []NetworkPolicyPort, e *cod z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1953 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1954 := &yyv1953 - yy1954.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -24090,83 +21405,86 @@ func (x codecSelfer1234) decSliceNetworkPolicyPort(v *[]NetworkPolicyPort, d *co z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1955 := *v - yyh1955, yyl1955 := z.DecSliceHelperStart() - var yyc1955 bool - if yyl1955 == 0 { - if yyv1955 == nil { - yyv1955 = []NetworkPolicyPort{} - yyc1955 = true - } else if len(yyv1955) != 0 { - yyv1955 = yyv1955[:0] - yyc1955 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NetworkPolicyPort{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1955 > 0 { - var yyrr1955, yyrl1955 int - var yyrt1955 bool - if yyl1955 > cap(yyv1955) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1955 := len(yyv1955) > 0 - yyv21955 := yyv1955 - yyrl1955, yyrt1955 = z.DecInferLen(yyl1955, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1955 { - if yyrl1955 <= cap(yyv1955) { - yyv1955 = yyv1955[:yyrl1955] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1955 = make([]NetworkPolicyPort, yyrl1955) + yyv1 = make([]NetworkPolicyPort, yyrl1) } } else { - yyv1955 = make([]NetworkPolicyPort, yyrl1955) + yyv1 = make([]NetworkPolicyPort, yyrl1) } - yyc1955 = true - yyrr1955 = len(yyv1955) - if yyrg1955 { - copy(yyv1955, yyv21955) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1955 != len(yyv1955) { - yyv1955 = yyv1955[:yyl1955] - yyc1955 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1955 := 0 - for ; yyj1955 < yyrr1955; yyj1955++ { - yyh1955.ElemContainerState(yyj1955) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1955[yyj1955] = NetworkPolicyPort{} + yyv1[yyj1] = NetworkPolicyPort{} } else { - yyv1956 := &yyv1955[yyj1955] - yyv1956.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1955 { - for ; yyj1955 < yyl1955; yyj1955++ { - yyv1955 = append(yyv1955, NetworkPolicyPort{}) - yyh1955.ElemContainerState(yyj1955) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NetworkPolicyPort{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1955[yyj1955] = NetworkPolicyPort{} + yyv1[yyj1] = NetworkPolicyPort{} } else { - yyv1957 := &yyv1955[yyj1955] - yyv1957.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1955 := 0 - for ; !r.CheckBreak(); yyj1955++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1955 >= len(yyv1955) { - yyv1955 = append(yyv1955, NetworkPolicyPort{}) // var yyz1955 NetworkPolicyPort - yyc1955 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NetworkPolicyPort{}) // var yyz1 NetworkPolicyPort + yyc1 = true } - yyh1955.ElemContainerState(yyj1955) - if yyj1955 < len(yyv1955) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1955[yyj1955] = NetworkPolicyPort{} + yyv1[yyj1] = NetworkPolicyPort{} } else { - yyv1958 := &yyv1955[yyj1955] - yyv1958.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -24174,17 +21492,17 @@ func (x codecSelfer1234) decSliceNetworkPolicyPort(v *[]NetworkPolicyPort, d *co } } - if yyj1955 < len(yyv1955) { - yyv1955 = yyv1955[:yyj1955] - yyc1955 = true - } else if yyj1955 == 0 && yyv1955 == nil { - yyv1955 = []NetworkPolicyPort{} - yyc1955 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NetworkPolicyPort{} + yyc1 = true } } - yyh1955.End() - if yyc1955 { - *v = yyv1955 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -24193,10 +21511,10 @@ func (x codecSelfer1234) encSliceNetworkPolicyPeer(v []NetworkPolicyPeer, e *cod z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1959 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1960 := &yyv1959 - yy1960.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -24206,83 +21524,86 @@ func (x codecSelfer1234) decSliceNetworkPolicyPeer(v *[]NetworkPolicyPeer, d *co z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1961 := *v - yyh1961, yyl1961 := z.DecSliceHelperStart() - var yyc1961 bool - if yyl1961 == 0 { - if yyv1961 == nil { - yyv1961 = []NetworkPolicyPeer{} - yyc1961 = true - } else if len(yyv1961) != 0 { - yyv1961 = yyv1961[:0] - yyc1961 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NetworkPolicyPeer{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1961 > 0 { - var yyrr1961, yyrl1961 int - var yyrt1961 bool - if yyl1961 > cap(yyv1961) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1961 := len(yyv1961) > 0 - yyv21961 := yyv1961 - yyrl1961, yyrt1961 = z.DecInferLen(yyl1961, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1961 { - if yyrl1961 <= cap(yyv1961) { - yyv1961 = yyv1961[:yyrl1961] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1961 = make([]NetworkPolicyPeer, yyrl1961) + yyv1 = make([]NetworkPolicyPeer, yyrl1) } } else { - yyv1961 = make([]NetworkPolicyPeer, yyrl1961) + yyv1 = make([]NetworkPolicyPeer, yyrl1) } - yyc1961 = true - yyrr1961 = len(yyv1961) - if yyrg1961 { - copy(yyv1961, yyv21961) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1961 != len(yyv1961) { - yyv1961 = yyv1961[:yyl1961] - yyc1961 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1961 := 0 - for ; yyj1961 < yyrr1961; yyj1961++ { - yyh1961.ElemContainerState(yyj1961) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1961[yyj1961] = NetworkPolicyPeer{} + yyv1[yyj1] = NetworkPolicyPeer{} } else { - yyv1962 := &yyv1961[yyj1961] - yyv1962.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1961 { - for ; yyj1961 < yyl1961; yyj1961++ { - yyv1961 = append(yyv1961, NetworkPolicyPeer{}) - yyh1961.ElemContainerState(yyj1961) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NetworkPolicyPeer{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1961[yyj1961] = NetworkPolicyPeer{} + yyv1[yyj1] = NetworkPolicyPeer{} } else { - yyv1963 := &yyv1961[yyj1961] - yyv1963.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1961 := 0 - for ; !r.CheckBreak(); yyj1961++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1961 >= len(yyv1961) { - yyv1961 = append(yyv1961, NetworkPolicyPeer{}) // var yyz1961 NetworkPolicyPeer - yyc1961 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NetworkPolicyPeer{}) // var yyz1 NetworkPolicyPeer + yyc1 = true } - yyh1961.ElemContainerState(yyj1961) - if yyj1961 < len(yyv1961) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1961[yyj1961] = NetworkPolicyPeer{} + yyv1[yyj1] = NetworkPolicyPeer{} } else { - yyv1964 := &yyv1961[yyj1961] - yyv1964.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -24290,17 +21611,17 @@ func (x codecSelfer1234) decSliceNetworkPolicyPeer(v *[]NetworkPolicyPeer, d *co } } - if yyj1961 < len(yyv1961) { - yyv1961 = yyv1961[:yyj1961] - yyc1961 = true - } else if yyj1961 == 0 && yyv1961 == nil { - yyv1961 = []NetworkPolicyPeer{} - yyc1961 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NetworkPolicyPeer{} + yyc1 = true } } - yyh1961.End() - if yyc1961 { - *v = yyv1961 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -24309,10 +21630,10 @@ func (x codecSelfer1234) encSliceNetworkPolicy(v []NetworkPolicy, e *codec1978.E z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1965 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1966 := &yyv1965 - yy1966.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -24322,83 +21643,86 @@ func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978. z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1967 := *v - yyh1967, yyl1967 := z.DecSliceHelperStart() - var yyc1967 bool - if yyl1967 == 0 { - if yyv1967 == nil { - yyv1967 = []NetworkPolicy{} - yyc1967 = true - } else if len(yyv1967) != 0 { - yyv1967 = yyv1967[:0] - yyc1967 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NetworkPolicy{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl1967 > 0 { - var yyrr1967, yyrl1967 int - var yyrt1967 bool - if yyl1967 > cap(yyv1967) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg1967 := len(yyv1967) > 0 - yyv21967 := yyv1967 - yyrl1967, yyrt1967 = z.DecInferLen(yyl1967, z.DecBasicHandle().MaxInitLen, 312) - if yyrt1967 { - if yyrl1967 <= cap(yyv1967) { - yyv1967 = yyv1967[:yyrl1967] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 312) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv1967 = make([]NetworkPolicy, yyrl1967) + yyv1 = make([]NetworkPolicy, yyrl1) } } else { - yyv1967 = make([]NetworkPolicy, yyrl1967) + yyv1 = make([]NetworkPolicy, yyrl1) } - yyc1967 = true - yyrr1967 = len(yyv1967) - if yyrg1967 { - copy(yyv1967, yyv21967) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl1967 != len(yyv1967) { - yyv1967 = yyv1967[:yyl1967] - yyc1967 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj1967 := 0 - for ; yyj1967 < yyrr1967; yyj1967++ { - yyh1967.ElemContainerState(yyj1967) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1967[yyj1967] = NetworkPolicy{} + yyv1[yyj1] = NetworkPolicy{} } else { - yyv1968 := &yyv1967[yyj1967] - yyv1968.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt1967 { - for ; yyj1967 < yyl1967; yyj1967++ { - yyv1967 = append(yyv1967, NetworkPolicy{}) - yyh1967.ElemContainerState(yyj1967) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NetworkPolicy{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1967[yyj1967] = NetworkPolicy{} + yyv1[yyj1] = NetworkPolicy{} } else { - yyv1969 := &yyv1967[yyj1967] - yyv1969.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj1967 := 0 - for ; !r.CheckBreak(); yyj1967++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj1967 >= len(yyv1967) { - yyv1967 = append(yyv1967, NetworkPolicy{}) // var yyz1967 NetworkPolicy - yyc1967 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NetworkPolicy{}) // var yyz1 NetworkPolicy + yyc1 = true } - yyh1967.ElemContainerState(yyj1967) - if yyj1967 < len(yyv1967) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1967[yyj1967] = NetworkPolicy{} + yyv1[yyj1] = NetworkPolicy{} } else { - yyv1970 := &yyv1967[yyj1967] - yyv1970.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -24406,16 +21730,16 @@ func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978. } } - if yyj1967 < len(yyv1967) { - yyv1967 = yyv1967[:yyj1967] - yyc1967 = true - } else if yyj1967 == 0 && yyv1967 == nil { - yyv1967 = []NetworkPolicy{} - yyc1967 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NetworkPolicy{} + yyc1 = true } } - yyh1967.End() - if yyc1967 { - *v = yyv1967 + yyh1.End() + if yyc1 { + *v = yyv1 } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.go index 9dfbc0e5d3..3b0a496e49 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.go @@ -17,10 +17,10 @@ limitations under the License. package v1beta1 import ( - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/util/intstr" ) // describes the attributes of a scale subresource @@ -54,10 +54,10 @@ type ScaleStatus struct { // represents a scaling request for a resource. type Scale struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. // +optional @@ -70,29 +70,7 @@ type Scale struct { // Dummy definition type ReplicationControllerDummy struct { - unversioned.TypeMeta `json:",inline"` -} - -// SubresourceReference contains enough information to let you inspect or modify the referred subresource. -type SubresourceReference struct { - // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - // +optional - Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"` - // API version of the referent - // +optional - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` - // Subresource name of the referent - // +optional - Subresource string `json:"subresource,omitempty" protobuf:"bytes,4,opt,name=subresource"` -} - -type CPUTargetUtilization struct { - // fraction of the requested CPU that should be utilized/used, - // e.g. 70 means that 70% of the requested CPU should be in use. - TargetPercentage int32 `json:"targetPercentage" protobuf:"varint,1,opt,name=targetPercentage"` + metav1.TypeMeta `json:",inline"` } // Alpha-level support for Custom Metrics in HPA (as annotations). @@ -118,83 +96,17 @@ type CustomMetricCurrentStatusList struct { Items []CustomMetricCurrentStatus `json:"items" protobuf:"bytes,1,rep,name=items"` } -// specification of a horizontal pod autoscaler. -type HorizontalPodAutoscalerSpec struct { - // reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status, - // and will set the desired number of pods by modifying its spec. - ScaleRef SubresourceReference `json:"scaleRef" protobuf:"bytes,1,opt,name=scaleRef"` - // lower limit for the number of pods that can be set by the autoscaler, default 1. - // +optional - MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. - MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; - // if not specified it defaults to the target CPU utilization at 80% of the requested resources. - // +optional - CPUUtilization *CPUTargetUtilization `json:"cpuUtilization,omitempty" protobuf:"bytes,4,opt,name=cpuUtilization"` -} - -// current status of a horizontal pod autoscaler -type HorizontalPodAutoscalerStatus struct { - // most recent generation observed by this autoscaler. - // +optional - ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // last time the HorizontalPodAutoscaler scaled the number of pods; - // used by the autoscaler to control how often the number of pods is changed. - // +optional - LastScaleTime *unversioned.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` - - // current number of replicas of pods managed by this autoscaler. - CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` - - // desired number of replicas of pods managed by this autoscaler. - DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` - - // current average CPU utilization over all pods, represented as a percentage of requested CPU, - // e.g. 70 means that an average pod is using now 70% of its requested CPU. - // +optional - CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"` -} - -// configuration of a horizontal pod autoscaler. -type HorizontalPodAutoscaler struct { - unversioned.TypeMeta `json:",inline"` - // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. - // +optional - Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // current information about the autoscaler. - // +optional - Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// list of horizontal pod autoscaler objects. -type HorizontalPodAutoscalerList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // list of horizontal pod autoscaler objects. - Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` -} - // +genclient=true // +nonNamespaced=true // A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource // types to the API. It consists of one or more Versions of the api. type ThirdPartyResource struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object metadata // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Description is the description of this object. // +optional @@ -207,11 +119,11 @@ type ThirdPartyResource struct { // ThirdPartyResourceList is a list of ThirdPartyResources. type ThirdPartyResourceList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of ThirdPartyResources. Items []ThirdPartyResource `json:"items" protobuf:"bytes,2,rep,name=items"` @@ -226,10 +138,10 @@ type APIVersion struct { // An internal object, used for versioned storage in etcd. Not exposed to the end user. type ThirdPartyResourceData struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object metadata. // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Data is the raw JSON data for this data. // +optional @@ -240,10 +152,10 @@ type ThirdPartyResourceData struct { // Deployment enables declarative updates for Pods and ReplicaSets. type Deployment struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object metadata. // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the Deployment. // +optional @@ -264,7 +176,7 @@ type DeploymentSpec struct { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional - Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` @@ -305,7 +217,7 @@ type DeploymentSpec struct { // DeploymentRollback stores the information required to rollback a deployment. type DeploymentRollback struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Required: This must match the Name of a deployment. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // The annotations to be updated to a deployment @@ -357,7 +269,7 @@ const ( type RollingUpdateDeployment struct { // The maximum number of pods that can be unavailable during the update. // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // Absolute number is calculated from percentage by rounding up. + // Absolute number is calculated from percentage by rounding down. // This can not be 0 if MaxSurge is 0. // By default, a fixed value of 1 is used. // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods @@ -397,6 +309,10 @@ type DeploymentStatus struct { // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. // +optional AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` @@ -433,9 +349,9 @@ type DeploymentCondition struct { // Status of the condition, one of True, False, Unknown. Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` // The last time this condition was updated. - LastUpdateTime unversioned.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` // Last time the condition transitioned from one status to another. - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` // The reason for the condition's last transition. Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` // A human readable message indicating details about the transition. @@ -444,29 +360,28 @@ type DeploymentCondition struct { // DeploymentList is a list of Deployments. type DeploymentList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of Deployments. Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` } -// TODO(madhusudancs): Uncomment while implementing DaemonSet updates. -/* Commenting out for v1.2. We are planning to bring these types back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting the types out. type DaemonSetUpdateStrategy struct { - // Type of daemon set update. Only "RollingUpdate" is supported at this time. Default is RollingUpdate. -// +optional - Type DaemonSetUpdateStrategyType `json:"type,omitempty"` + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". + // Default is OnDelete. + // +optional + Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` - // Rolling update config params. Present only if DaemonSetUpdateStrategy = - // RollingUpdate. + // Rolling update config params. Present only if type = "RollingUpdate". //--- // TODO: Update this to follow our convention for oneOf, whatever we decide it // to be. Same as DeploymentStrategy.RollingUpdate. -// +optional - RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty"` + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` } type DaemonSetUpdateStrategyType string @@ -474,6 +389,9 @@ type DaemonSetUpdateStrategyType string const ( // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" + + // Replace the old daemons only when it's killed + OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" ) // Spec to control the desired behavior of daemon set rolling update. @@ -484,104 +402,109 @@ type RollingUpdateDaemonSet struct { // number is calculated from percentage by rounding up. // This cannot be 0. // Default value is 1. - // Example: when this is set to 30%, 30% of the currently running DaemonSet - // pods can be stopped for an update at any given time. The update starts - // by stopping at most 30% of the currently running DaemonSet pods and then - // brings up new DaemonSet pods in their place. Once the new pods are ready, - // it then proceeds onto other DaemonSet pods, thus ensuring that at least - // 70% of original number of DaemonSet pods are available at all times - // during the update. -// +optional - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` - - // Minimum number of seconds for which a newly created DaemonSet pod should - // be ready without any of its container crashing, for it to be considered - // available. Defaults to 0 (pod will be considered available as soon as it - // is ready). -// +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty"` + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` } -*/ // DaemonSetSpec is the specification of a daemon set. type DaemonSetSpec struct { - // Selector is a label query over pods that are managed by the daemon set. + // A label query over pods that are managed by the daemon set. // Must match in order to be controlled. // If empty, defaulted to labels on Pod template. // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional - Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` - // Template is the object that describes the pod that will be created. + // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` - // TODO(madhusudancs): Uncomment while implementing DaemonSet updates. - /* Commenting out for v1.2. We are planning to bring these fields back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting these fields out. - // Update strategy to replace existing DaemonSet pods with new pods. + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"` + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). // +optional - UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty"` + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` - // Label key that is added to DaemonSet pods to distinguish between old and - // new pod templates during DaemonSet update. - // Users can set this to an empty string to indicate that the system should - // not add any label. If unspecified, system uses - // DefaultDaemonSetUniqueLabelKey("daemonset.kubernetes.io/podTemplateHash"). - // Value of this key is hash of DaemonSetSpec.PodTemplateSpec. - // No label is added if this is set to empty string. + // A sequence number representing a specific generation of the template. + // Populated by the system. It can be set only during the creation. // +optional - UniqueLabelKey *string `json:"uniqueLabelKey,omitempty"` - */ + TemplateGeneration int64 `json:"templateGeneration,omitempty" protobuf:"varint,5,opt,name=templateGeneration"` } -const ( - // DefaultDaemonSetUniqueLabelKey is the default key of the labels that is added - // to daemon set pods to distinguish between old and new pod templates during - // DaemonSet update. See DaemonSetSpec's UniqueLabelKey field for more information. - DefaultDaemonSetUniqueLabelKey string = "daemonset.kubernetes.io/podTemplateHash" -) - // DaemonSetStatus represents the current status of a daemon set. type DaemonSetStatus struct { - // CurrentNumberScheduled is the number of nodes that are running at least 1 + // The number of nodes that are running at least 1 // daemon pod and are supposed to run the daemon pod. // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"` - // NumberMisscheduled is the number of nodes that are running the daemon pod, but are + // The number of nodes that are running the daemon pod, but are // not supposed to run the daemon pod. // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"` - // DesiredNumberScheduled is the total number of nodes that should be running the daemon + // The total number of nodes that should be running the daemon // pod (including nodes correctly running the daemon pod). // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` - // NumberReady is the number of nodes that should be running the daemon pod and have one + // The number of nodes that should be running the daemon pod and have one // or more of the daemon pod running and ready. NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"` + + // The most recent generation observed by the daemon set controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"` + + // The total number of nodes that are running updated daemon pod + // +optional + UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"` + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"` + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"` } // +genclient=true // DaemonSet represents the configuration of a daemon set. type DaemonSet struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec defines the desired behavior of this daemon set. + // The desired behavior of this daemon set. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current status of this daemon set. This data may be + // The current status of this daemon set. This data may be // out of date by some window of time. // Populated by the system. // Read-only. @@ -590,25 +513,32 @@ type DaemonSet struct { Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +const ( + // DaemonSetTemplateGenerationKey is the key of the labels that is added + // to daemon set pods to distinguish between old and new pod templates + // during DaemonSet template update. + DaemonSetTemplateGenerationKey string = "pod-template-generation" +) + // DaemonSetList is a collection of daemon sets. type DaemonSetList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of daemon sets. + // A list of daemon sets. Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` } // ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. type ThirdPartyResourceDataList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of ThirdpartyResourceData. Items []ThirdPartyResourceData `json:"items" protobuf:"bytes,2,rep,name=items"` @@ -616,159 +546,16 @@ type ThirdPartyResourceDataList struct { // +genclient=true -// Job represents the configuration of a single job. -// DEPRECATED: extensions/v1beta1.Job is deprecated, use batch/v1.Job instead. -type Job struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// JobList is a collection of jobs. -// DEPRECATED: extensions/v1beta1.JobList is deprecated, use batch/v1.JobList instead. -type JobList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of Job. - Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// JobSpec describes how the job execution will look like. -type JobSpec struct { - - // Parallelism specifies the maximum desired number of pods the job should - // run at any given time. The actual number of pods running in steady state will - // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), - // i.e. when the work left to do is less than max parallelism. - // More info: http://kubernetes.io/docs/user-guide/jobs - // +optional - Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` - - // Completions specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any - // pod signals the success of all pods, and allows parallelism to have any positive - // value. Setting to 1 means that parallelism is limited to 1 and the success of that - // pod signals the success of the job. - // More info: http://kubernetes.io/docs/user-guide/jobs - // +optional - Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` - - // Optional duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer - // +optional - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` - - // Selector is a label query over pods that should match the pod count. - // Normally, the system sets this field for you. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors - // +optional - Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` - - // AutoSelector controls generation of pod labels and pod selectors. - // It was not present in the original extensions/v1beta1 Job definition, but exists - // to allow conversion from batch/v1 Jobs, where it corresponds to, but has the opposite - // meaning as, ManualSelector. - // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md - // +optional - AutoSelector *bool `json:"autoSelector,omitempty" protobuf:"varint,5,opt,name=autoSelector"` - - // Template is the object that describes the pod that will be created when - // executing a job. - // More info: http://kubernetes.io/docs/user-guide/jobs - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` -} - -// JobStatus represents the current state of a Job. -type JobStatus struct { - - // Conditions represent the latest available observations of an object's current state. - // More info: http://kubernetes.io/docs/user-guide/jobs - // +optional - Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` - - // StartTime represents time when the job was acknowledged by the Job Manager. - // It is not guaranteed to be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - // +optional - StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` - - // CompletionTime represents time when the job was completed. It is not guaranteed to - // be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - // +optional - CompletionTime *unversioned.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` - - // Active is the number of actively running pods. - // +optional - Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` - - // Succeeded is the number of pods which reached Phase Succeeded. - // +optional - Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` - - // Failed is the number of pods which reached Phase Failed. - // +optional - Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` -} - -type JobConditionType string - -// These are valid conditions of a job. -const ( - // JobComplete means the job has completed its execution. - JobComplete JobConditionType = "Complete" - // JobFailed means the job has failed its execution. - JobFailed JobConditionType = "Failed" -) - -// JobCondition describes current state of a job. -type JobCondition struct { - // Type of job condition, Complete or Failed. - Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` - // Last time the condition was checked. - // +optional - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` - // Last time the condition transit from one status to another. - // +optional - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // (brief) reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // Human readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` -} - -// +genclient=true - // Ingress is a collection of rules that allow inbound connections to reach the // endpoints defined by a backend. An Ingress can be configured to give services // externally-reachable urls, load balance traffic, terminate SSL, offer name // based virtual hosting etc. type Ingress struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is the desired state of the Ingress. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -783,11 +570,11 @@ type Ingress struct { // IngressList is a collection of Ingress. type IngressList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of Ingress. Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` @@ -923,26 +710,17 @@ type IngressBackend struct { ServicePort intstr.IntOrString `json:"servicePort" protobuf:"bytes,2,opt,name=servicePort"` } -// ExportOptions is the query options to the standard REST get call. -type ExportOptions struct { - unversioned.TypeMeta `json:",inline"` - // Should this value be exported. Export strips fields that a user can not specify. - Export bool `json:"export" protobuf:"varint,1,opt,name=export"` - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` -} - // +genclient=true // ReplicaSet represents the configuration of a ReplicaSet. type ReplicaSet struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the ReplicaSet. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status @@ -960,11 +738,11 @@ type ReplicaSet struct { // ReplicaSetList is a collection of ReplicaSets. type ReplicaSetList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ReplicaSets. // More info: http://kubernetes.io/docs/user-guide/replication-controller @@ -991,7 +769,7 @@ type ReplicaSetSpec struct { // Label keys and values that must match in order to be controlled by this replica set. // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional - Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // Template is the object that describes the pod that will be created if // insufficient replicas are detected. @@ -1045,7 +823,7 @@ type ReplicaSetCondition struct { Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` // The last time the condition transitioned from one status to another. // +optional - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` // The reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` @@ -1060,11 +838,11 @@ type ReplicaSetCondition struct { // Pod Security Policy governs the ability to make requests that affect the Security Context // that will be applied to a pod and container. type PodSecurityPolicy struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec defines the policy enforced. // +optional @@ -1257,22 +1035,22 @@ const ( // Pod Security Policy List is a list of PodSecurityPolicy objects. type PodSecurityPolicyList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of schema objects. Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } type NetworkPolicy struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior for this NetworkPolicy. // +optional @@ -1285,7 +1063,7 @@ type NetworkPolicySpec struct { // same set of pods. In this case, the ingress rules for each are combined additively. // This field is NOT optional and follows standard label selector semantics. // An empty podSelector matches all pods in this namespace. - PodSelector unversioned.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` + PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` // List of ingress rules to be applied to the selected pods. // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, @@ -1345,7 +1123,7 @@ type NetworkPolicyPeer struct { // If not provided, this selector selects no pods. // If present but empty, this selector selects all pods in this namespace. // +optional - PodSelector *unversioned.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` + PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` // Selects Namespaces using cluster scoped-labels. This // matches all pods in all namespaces selected by this label selector. @@ -1353,16 +1131,16 @@ type NetworkPolicyPeer struct { // If omitted, this selector selects no namespaces. // If present but empty, this selector selects all namespaces. // +optional - NamespaceSelector *unversioned.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` } // Network Policy List is a list of NetworkPolicy objects. type NetworkPolicyList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of schema objects. Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go index 1fd1fd5beb..587615590e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go @@ -36,14 +36,6 @@ func (APIVersion) SwaggerDoc() map[string]string { return map_APIVersion } -var map_CPUTargetUtilization = map[string]string{ - "targetPercentage": "fraction of the requested CPU that should be utilized/used, e.g. 70 means that 70% of the requested CPU should be in use.", -} - -func (CPUTargetUtilization) SwaggerDoc() map[string]string { - return map_CPUTargetUtilization -} - var map_CustomMetricCurrentStatus = map[string]string{ "name": "Custom Metric name.", "value": "Custom Metric value (average).", @@ -66,8 +58,8 @@ func (CustomMetricTarget) SwaggerDoc() map[string]string { var map_DaemonSet = map[string]string{ "": "DaemonSet represents the configuration of a daemon set.", "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the desired behavior of this daemon set. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is the current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "spec": "The desired behavior of this daemon set. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", } func (DaemonSet) SwaggerDoc() map[string]string { @@ -77,7 +69,7 @@ func (DaemonSet) SwaggerDoc() map[string]string { var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "items": "Items is a list of daemon sets.", + "items": "A list of daemon sets.", } func (DaemonSetList) SwaggerDoc() map[string]string { @@ -85,9 +77,12 @@ func (DaemonSetList) SwaggerDoc() map[string]string { } var map_DaemonSetSpec = map[string]string{ - "": "DaemonSetSpec is the specification of a daemon set.", - "selector": "Selector is a label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", - "template": "Template is the object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template", + "": "DaemonSetSpec is the specification of a daemon set.", + "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template", + "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", + "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", + "templateGeneration": "A sequence number representing a specific generation of the template. Populated by the system. It can be set only during the creation.", } func (DaemonSetSpec) SwaggerDoc() map[string]string { @@ -96,16 +91,29 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string { var map_DaemonSetStatus = map[string]string{ "": "DaemonSetStatus represents the current status of a daemon set.", - "currentNumberScheduled": "CurrentNumberScheduled is the number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", - "numberMisscheduled": "NumberMisscheduled is the number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", - "desiredNumberScheduled": "DesiredNumberScheduled is the total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", - "numberReady": "NumberReady is the number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", + "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", + "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", + "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", + "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", + "observedGeneration": "The most recent generation observed by the daemon set controller.", + "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod", + "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", } func (DaemonSetStatus) SwaggerDoc() map[string]string { return map_DaemonSetStatus } +var map_DaemonSetUpdateStrategy = map[string]string{ + "type": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is OnDelete.", + "rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".", +} + +func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string { + return map_DaemonSetUpdateStrategy +} + var map_Deployment = map[string]string{ "": "Deployment enables declarative updates for Pods and ReplicaSets.", "metadata": "Standard object metadata.", @@ -174,6 +182,7 @@ var map_DeploymentStatus = map[string]string{ "observedGeneration": "The generation observed by the deployment controller.", "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "readyReplicas": "Total number of ready pods targeted by this deployment.", "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", "unavailableReplicas": "Total number of unavailable pods targeted by this deployment.", "conditions": "Represents the latest available observations of a deployment's current state.", @@ -193,16 +202,6 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { return map_DeploymentStrategy } -var map_ExportOptions = map[string]string{ - "": "ExportOptions is the query options to the standard REST get call.", - "export": "Should this value be exported. Export strips fields that a user can not specify.", - "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'", -} - -func (ExportOptions) SwaggerDoc() map[string]string { - return map_ExportOptions -} - var map_FSGroupStrategyOptions = map[string]string{ "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.", "rule": "Rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", @@ -232,52 +231,6 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { return map_HTTPIngressRuleValue } -var map_HorizontalPodAutoscaler = map[string]string{ - "": "configuration of a horizontal pod autoscaler.", - "metadata": "Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", - "status": "current information about the autoscaler.", -} - -func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscaler -} - -var map_HorizontalPodAutoscalerList = map[string]string{ - "": "list of horizontal pod autoscaler objects.", - "metadata": "Standard list metadata.", - "items": "list of horizontal pod autoscaler objects.", -} - -func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerList -} - -var map_HorizontalPodAutoscalerSpec = map[string]string{ - "": "specification of a horizontal pod autoscaler.", - "scaleRef": "reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status, and will set the desired number of pods by modifying its spec.", - "minReplicas": "lower limit for the number of pods that can be set by the autoscaler, default 1.", - "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", - "cpuUtilization": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified it defaults to the target CPU utilization at 80% of the requested resources.", -} - -func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerSpec -} - -var map_HorizontalPodAutoscalerStatus = map[string]string{ - "": "current status of a horizontal pod autoscaler", - "observedGeneration": "most recent generation observed by this autoscaler.", - "lastScaleTime": "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", - "currentReplicas": "current number of replicas of pods managed by this autoscaler.", - "desiredReplicas": "desired number of replicas of pods managed by this autoscaler.", - "currentCPUUtilizationPercentage": "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", -} - -func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerStatus -} - var map_HostPortRange = map[string]string{ "": "Host Port Range defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined.", "min": "min is the start of the range, inclusive.", @@ -376,69 +329,6 @@ func (IngressTLS) SwaggerDoc() map[string]string { return map_IngressTLS } -var map_Job = map[string]string{ - "": "Job represents the configuration of a single job. DEPRECATED: extensions/v1beta1.Job is deprecated, use batch/v1.Job instead.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", -} - -func (Job) SwaggerDoc() map[string]string { - return map_Job -} - -var map_JobCondition = map[string]string{ - "": "JobCondition describes current state of a job.", - "type": "Type of job condition, Complete or Failed.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastProbeTime": "Last time the condition was checked.", - "lastTransitionTime": "Last time the condition transit from one status to another.", - "reason": "(brief) reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (JobCondition) SwaggerDoc() map[string]string { - return map_JobCondition -} - -var map_JobList = map[string]string{ - "": "JobList is a collection of jobs. DEPRECATED: extensions/v1beta1.JobList is deprecated, use batch/v1.JobList instead.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "items": "Items is the list of Job.", -} - -func (JobList) SwaggerDoc() map[string]string { - return map_JobList -} - -var map_JobSpec = map[string]string{ - "": "JobSpec describes how the job execution will look like.", - "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://kubernetes.io/docs/user-guide/jobs", - "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://kubernetes.io/docs/user-guide/jobs", - "activeDeadlineSeconds": "Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", - "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", - "autoSelector": "AutoSelector controls generation of pod labels and pod selectors. It was not present in the original extensions/v1beta1 Job definition, but exists to allow conversion from batch/v1 Jobs, where it corresponds to, but has the opposite meaning as, ManualSelector. More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md", - "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://kubernetes.io/docs/user-guide/jobs", -} - -func (JobSpec) SwaggerDoc() map[string]string { - return map_JobSpec -} - -var map_JobStatus = map[string]string{ - "": "JobStatus represents the current state of a Job.", - "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://kubernetes.io/docs/user-guide/jobs", - "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", - "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", - "active": "Active is the number of actively running pods.", - "succeeded": "Succeeded is the number of pods which reached Phase Succeeded.", - "failed": "Failed is the number of pods which reached Phase Failed.", -} - -func (JobStatus) SwaggerDoc() map[string]string { - return map_JobStatus -} - var map_NetworkPolicy = map[string]string{ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", "spec": "Specification of the desired behavior for this NetworkPolicy.", @@ -613,9 +503,18 @@ func (RollbackConfig) SwaggerDoc() map[string]string { return map_RollbackConfig } +var map_RollingUpdateDaemonSet = map[string]string{ + "": "Spec to control the desired behavior of daemon set rolling update.", + "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", +} + +func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { + return map_RollingUpdateDaemonSet +} + var map_RollingUpdateDeployment = map[string]string{ "": "Spec to control the desired behavior of rolling update.", - "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0 if MaxSurge is 0. By default, a fixed value of 1 is used. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. By default, a fixed value of 1 is used. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. By default, a value of 1 is used. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", } @@ -674,18 +573,6 @@ func (ScaleStatus) SwaggerDoc() map[string]string { return map_ScaleStatus } -var map_SubresourceReference = map[string]string{ - "": "SubresourceReference contains enough information to let you inspect or modify the referred subresource.", - "kind": "Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", - "subresource": "Subresource name of the referent", -} - -func (SubresourceReference) SwaggerDoc() map[string]string { - return map_SubresourceReference -} - var map_SupplementalGroupsStrategyOptions = map[string]string{ "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.", "rule": "Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go index 2a56a2384e..c6ccd16ec0 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,15 +21,13 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" - autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" - batch "k8s.io/kubernetes/pkg/apis/batch" + api_v1 "k8s.io/kubernetes/pkg/api/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" - intstr "k8s.io/kubernetes/pkg/util/intstr" unsafe "unsafe" ) @@ -59,6 +57,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec, Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus, Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus, + Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy, + Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy, Convert_v1beta1_Deployment_To_extensions_Deployment, Convert_extensions_Deployment_To_v1beta1_Deployment, Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition, @@ -73,22 +73,12 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus, Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, - Convert_v1beta1_ExportOptions_To_api_ExportOptions, - Convert_api_ExportOptions_To_v1beta1_ExportOptions, Convert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions, Convert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions, Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath, Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath, Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue, Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue, - Convert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler, - Convert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler, - Convert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList, - Convert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList, - Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, - Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec, - Convert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus, - Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus, Convert_v1beta1_HostPortRange_To_extensions_HostPortRange, Convert_extensions_HostPortRange_To_v1beta1_HostPortRange, Convert_v1beta1_IDRange_To_extensions_IDRange, @@ -109,16 +99,6 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_extensions_IngressStatus_To_v1beta1_IngressStatus, Convert_v1beta1_IngressTLS_To_extensions_IngressTLS, Convert_extensions_IngressTLS_To_v1beta1_IngressTLS, - Convert_v1beta1_Job_To_batch_Job, - Convert_batch_Job_To_v1beta1_Job, - Convert_v1beta1_JobCondition_To_batch_JobCondition, - Convert_batch_JobCondition_To_v1beta1_JobCondition, - Convert_v1beta1_JobList_To_batch_JobList, - Convert_batch_JobList_To_v1beta1_JobList, - Convert_v1beta1_JobSpec_To_batch_JobSpec, - Convert_batch_JobSpec_To_v1beta1_JobSpec, - Convert_v1beta1_JobStatus_To_batch_JobStatus, - Convert_batch_JobStatus_To_v1beta1_JobStatus, Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy, Convert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy, Convert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule, @@ -151,6 +131,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy, Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig, Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig, + Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet, + Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet, Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions, @@ -224,7 +206,11 @@ func Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCur } func autoConvert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, s conversion.Scope) error { - out.Items = *(*[]CustomMetricCurrentStatus)(unsafe.Pointer(&in.Items)) + if in.Items == nil { + out.Items = make([]CustomMetricCurrentStatus, 0) + } else { + out.Items = *(*[]CustomMetricCurrentStatus)(unsafe.Pointer(&in.Items)) + } return nil } @@ -262,7 +248,11 @@ func Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList } func autoConvert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in *extensions.CustomMetricTargetList, out *CustomMetricTargetList, s conversion.Scope) error { - out.Items = *(*[]CustomMetricTarget)(unsafe.Pointer(&in.Items)) + if in.Items == nil { + out.Items = make([]CustomMetricTarget, 0) + } else { + out.Items = *(*[]CustomMetricTarget)(unsafe.Pointer(&in.Items)) + } return nil } @@ -271,10 +261,7 @@ func Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList } func autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -289,10 +276,7 @@ func Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *DaemonSet, out *exten } func autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *DaemonSet, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -337,7 +321,7 @@ func autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extension } } } else { - out.Items = nil + out.Items = make([]DaemonSet, 0) } return nil } @@ -347,10 +331,15 @@ func Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.Da } func autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { - out.Selector = (*unversioned.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } + if err := Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.TemplateGeneration = in.TemplateGeneration return nil } @@ -359,10 +348,15 @@ func Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSetSpec } func autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error { - out.Selector = (*unversioned.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } + if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.TemplateGeneration = in.TemplateGeneration return nil } @@ -375,6 +369,10 @@ func autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *Daemo out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled out.NumberReady = in.NumberReady + out.ObservedGeneration = in.ObservedGeneration + out.UpdatedNumberScheduled = in.UpdatedNumberScheduled + out.NumberAvailable = in.NumberAvailable + out.NumberUnavailable = in.NumberUnavailable return nil } @@ -387,6 +385,10 @@ func autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *exten out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled out.NumberReady = in.NumberReady + out.ObservedGeneration = in.ObservedGeneration + out.UpdatedNumberScheduled = in.UpdatedNumberScheduled + out.NumberAvailable = in.NumberAvailable + out.NumberUnavailable = in.NumberUnavailable return nil } @@ -394,11 +396,44 @@ func Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extension return autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in, out, s) } -func autoConvert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *extensions.Deployment, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err +func autoConvert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = extensions.DaemonSetUpdateStrategyType(in.Type) + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(extensions.RollingUpdateDaemonSet) + if err := Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil } + return nil +} + +func Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in, out, s) +} + +func autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = DaemonSetUpdateStrategyType(in.Type) + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDaemonSet) + if err := Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *DaemonSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in, out, s) +} + +func autoConvert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *extensions.Deployment, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -413,10 +448,7 @@ func Convert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *ex } func autoConvert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *Deployment, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -446,7 +478,7 @@ func Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *D func autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in *extensions.DeploymentCondition, out *DeploymentCondition, s conversion.Scope) error { out.Type = DeploymentConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) + out.Status = api_v1.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -489,7 +521,7 @@ func autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensi } } } else { - out.Items = nil + out.Items = make([]Deployment, 0) } return nil } @@ -525,11 +557,11 @@ func Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *ext } func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { - if err := api.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } - out.Selector = (*unversioned.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { @@ -544,11 +576,11 @@ func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *Deploym } func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error { - if err := api.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } - out.Selector = (*unversioned.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { @@ -566,6 +598,7 @@ func autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *Dep out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas + out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.UnavailableReplicas = in.UnavailableReplicas out.Conditions = *(*[]extensions.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) @@ -580,6 +613,7 @@ func autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *ext out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas + out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.UnavailableReplicas = in.UnavailableReplicas out.Conditions = *(*[]DeploymentCondition)(unsafe.Pointer(&in.Conditions)) @@ -618,26 +652,6 @@ func autoConvert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in return nil } -func autoConvert_v1beta1_ExportOptions_To_api_ExportOptions(in *ExportOptions, out *api.ExportOptions, s conversion.Scope) error { - out.Export = in.Export - out.Exact = in.Exact - return nil -} - -func Convert_v1beta1_ExportOptions_To_api_ExportOptions(in *ExportOptions, out *api.ExportOptions, s conversion.Scope) error { - return autoConvert_v1beta1_ExportOptions_To_api_ExportOptions(in, out, s) -} - -func autoConvert_api_ExportOptions_To_v1beta1_ExportOptions(in *api.ExportOptions, out *ExportOptions, s conversion.Scope) error { - out.Export = in.Export - out.Exact = in.Exact - return nil -} - -func Convert_api_ExportOptions_To_v1beta1_ExportOptions(in *api.ExportOptions, out *ExportOptions, s conversion.Scope) error { - return autoConvert_api_ExportOptions_To_v1beta1_ExportOptions(in, out, s) -} - func autoConvert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(in *FSGroupStrategyOptions, out *extensions.FSGroupStrategyOptions, s conversion.Scope) error { out.Rule = extensions.FSGroupStrategyType(in.Rule) out.Ranges = *(*[]extensions.IDRange)(unsafe.Pointer(&in.Ranges)) @@ -692,130 +706,16 @@ func Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in } func autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error { - out.Paths = *(*[]HTTPIngressPath)(unsafe.Pointer(&in.Paths)) - return nil -} - -func Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error { - return autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in, out, s) -} - -func autoConvert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { - return autoConvert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s) -} - -func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { - return autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(in, out, s) -} - -func autoConvert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]autoscaling.HorizontalPodAutoscaler, len(*in)) - for i := range *in { - if err := Convert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } + if in.Paths == nil { + out.Paths = make([]HTTPIngressPath, 0) } else { - out.Items = nil + out.Paths = *(*[]HTTPIngressPath)(unsafe.Pointer(&in.Paths)) } return nil } -func Convert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { - return autoConvert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s) -} - -func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]HorizontalPodAutoscaler, len(*in)) - for i := range *in { - if err := Convert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { - return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList(in, out, s) -} - -func autoConvert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { - // WARNING: in.ScaleRef requires manual conversion: does not exist in peer-type - out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) - out.MaxReplicas = in.MaxReplicas - // WARNING: in.CPUUtilization requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { - // WARNING: in.ScaleTargetRef requires manual conversion: does not exist in peer-type - out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) - out.MaxReplicas = in.MaxReplicas - // WARNING: in.TargetCPUUtilizationPercentage requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { - out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) - out.LastScaleTime = (*unversioned.Time)(unsafe.Pointer(in.LastScaleTime)) - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - out.CurrentCPUUtilizationPercentage = (*int32)(unsafe.Pointer(in.CurrentCPUUtilizationPercentage)) - return nil -} - -func Convert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { - return autoConvert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in, out, s) -} - -func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { - out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) - out.LastScaleTime = (*unversioned.Time)(unsafe.Pointer(in.LastScaleTime)) - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - out.CurrentCPUUtilizationPercentage = (*int32)(unsafe.Pointer(in.CurrentCPUUtilizationPercentage)) - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { - return autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(in, out, s) +func Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error { + return autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in, out, s) } func autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *HostPortRange, out *extensions.HostPortRange, s conversion.Scope) error { @@ -859,10 +759,7 @@ func Convert_extensions_IDRange_To_v1beta1_IDRange(in *extensions.IDRange, out * } func autoConvert_v1beta1_Ingress_To_extensions_Ingress(in *Ingress, out *extensions.Ingress, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_IngressSpec_To_extensions_IngressSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -877,10 +774,7 @@ func Convert_v1beta1_Ingress_To_extensions_Ingress(in *Ingress, out *extensions. } func autoConvert_extensions_Ingress_To_v1beta1_Ingress(in *extensions.Ingress, out *Ingress, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_extensions_IngressSpec_To_v1beta1_IngressSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -926,7 +820,11 @@ func Convert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, out func autoConvert_extensions_IngressList_To_v1beta1_IngressList(in *extensions.IngressList, out *IngressList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]Ingress)(unsafe.Pointer(&in.Items)) + if in.Items == nil { + out.Items = make([]Ingress, 0) + } else { + out.Items = *(*[]Ingress)(unsafe.Pointer(&in.Items)) + } return nil } @@ -1042,167 +940,8 @@ func Convert_extensions_IngressTLS_To_v1beta1_IngressTLS(in *extensions.IngressT return autoConvert_extensions_IngressTLS_To_v1beta1_IngressTLS(in, out, s) } -func autoConvert_v1beta1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1beta1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_JobStatus_To_batch_JobStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { - return autoConvert_v1beta1_Job_To_batch_Job(in, out, s) -} - -func autoConvert_batch_Job_To_v1beta1_Job(in *batch.Job, out *Job, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_batch_JobSpec_To_v1beta1_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_batch_JobStatus_To_v1beta1_JobStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_batch_Job_To_v1beta1_Job(in *batch.Job, out *Job, s conversion.Scope) error { - return autoConvert_batch_Job_To_v1beta1_Job(in, out, s) -} - -func autoConvert_v1beta1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { - out.Type = batch.JobConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1beta1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { - return autoConvert_v1beta1_JobCondition_To_batch_JobCondition(in, out, s) -} - -func autoConvert_batch_JobCondition_To_v1beta1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { - out.Type = JobConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_batch_JobCondition_To_v1beta1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { - return autoConvert_batch_JobCondition_To_v1beta1_JobCondition(in, out, s) -} - -func autoConvert_v1beta1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]batch.Job, len(*in)) - for i := range *in { - if err := Convert_v1beta1_Job_To_batch_Job(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { - return autoConvert_v1beta1_JobList_To_batch_JobList(in, out, s) -} - -func autoConvert_batch_JobList_To_v1beta1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Job, len(*in)) - for i := range *in { - if err := Convert_batch_Job_To_v1beta1_Job(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_batch_JobList_To_v1beta1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { - return autoConvert_batch_JobList_To_v1beta1_JobList(in, out, s) -} - -func autoConvert_v1beta1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { - out.Parallelism = (*int32)(unsafe.Pointer(in.Parallelism)) - out.Completions = (*int32)(unsafe.Pointer(in.Completions)) - out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) - out.Selector = (*unversioned.LabelSelector)(unsafe.Pointer(in.Selector)) - // WARNING: in.AutoSelector requires manual conversion: does not exist in peer-type - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func autoConvert_batch_JobSpec_To_v1beta1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { - out.Parallelism = (*int32)(unsafe.Pointer(in.Parallelism)) - out.Completions = (*int32)(unsafe.Pointer(in.Completions)) - out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) - out.Selector = (*unversioned.LabelSelector)(unsafe.Pointer(in.Selector)) - // WARNING: in.ManualSelector requires manual conversion: does not exist in peer-type - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { - out.Conditions = *(*[]batch.JobCondition)(unsafe.Pointer(&in.Conditions)) - out.StartTime = (*unversioned.Time)(unsafe.Pointer(in.StartTime)) - out.CompletionTime = (*unversioned.Time)(unsafe.Pointer(in.CompletionTime)) - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed - return nil -} - -func Convert_v1beta1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { - return autoConvert_v1beta1_JobStatus_To_batch_JobStatus(in, out, s) -} - -func autoConvert_batch_JobStatus_To_v1beta1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { - out.Conditions = *(*[]JobCondition)(unsafe.Pointer(&in.Conditions)) - out.StartTime = (*unversioned.Time)(unsafe.Pointer(in.StartTime)) - out.CompletionTime = (*unversioned.Time)(unsafe.Pointer(in.CompletionTime)) - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed - return nil -} - -func Convert_batch_JobStatus_To_v1beta1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { - return autoConvert_batch_JobStatus_To_v1beta1_JobStatus(in, out, s) -} - func autoConvert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in *NetworkPolicy, out *extensions.NetworkPolicy, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -1214,10 +953,7 @@ func Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in *NetworkPolicy } func autoConvert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in *extensions.NetworkPolicy, out *NetworkPolicy, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -1260,7 +996,11 @@ func Convert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in *Netwo func autoConvert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in *extensions.NetworkPolicyList, out *NetworkPolicyList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]NetworkPolicy)(unsafe.Pointer(&in.Items)) + if in.Items == nil { + out.Items = make([]NetworkPolicy, 0) + } else { + out.Items = *(*[]NetworkPolicy)(unsafe.Pointer(&in.Items)) + } return nil } @@ -1269,8 +1009,8 @@ func Convert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in *exten } func autoConvert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in *NetworkPolicyPeer, out *extensions.NetworkPolicyPeer, s conversion.Scope) error { - out.PodSelector = (*unversioned.LabelSelector)(unsafe.Pointer(in.PodSelector)) - out.NamespaceSelector = (*unversioned.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) + out.PodSelector = (*v1.LabelSelector)(unsafe.Pointer(in.PodSelector)) + out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) return nil } @@ -1279,8 +1019,8 @@ func Convert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in *Netwo } func autoConvert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in *extensions.NetworkPolicyPeer, out *NetworkPolicyPeer, s conversion.Scope) error { - out.PodSelector = (*unversioned.LabelSelector)(unsafe.Pointer(in.PodSelector)) - out.NamespaceSelector = (*unversioned.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) + out.PodSelector = (*v1.LabelSelector)(unsafe.Pointer(in.PodSelector)) + out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) return nil } @@ -1299,7 +1039,7 @@ func Convert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(in *Netwo } func autoConvert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in *extensions.NetworkPolicyPort, out *NetworkPolicyPort, s conversion.Scope) error { - out.Protocol = (*v1.Protocol)(unsafe.Pointer(in.Protocol)) + out.Protocol = (*api_v1.Protocol)(unsafe.Pointer(in.Protocol)) out.Port = (*intstr.IntOrString)(unsafe.Pointer(in.Port)) return nil } @@ -1329,10 +1069,7 @@ func Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in *exten } func autoConvert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in *PodSecurityPolicy, out *extensions.PodSecurityPolicy, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -1344,10 +1081,7 @@ func Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in *PodSe } func autoConvert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in *extensions.PodSecurityPolicy, out *PodSecurityPolicy, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -1389,7 +1123,7 @@ func autoConvert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyLi } } } else { - out.Items = nil + out.Items = make([]PodSecurityPolicy, 0) } return nil } @@ -1440,9 +1174,9 @@ func Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(i func autoConvert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *extensions.PodSecurityPolicySpec, out *PodSecurityPolicySpec, s conversion.Scope) error { out.Privileged = in.Privileged - out.DefaultAddCapabilities = *(*[]v1.Capability)(unsafe.Pointer(&in.DefaultAddCapabilities)) - out.RequiredDropCapabilities = *(*[]v1.Capability)(unsafe.Pointer(&in.RequiredDropCapabilities)) - out.AllowedCapabilities = *(*[]v1.Capability)(unsafe.Pointer(&in.AllowedCapabilities)) + out.DefaultAddCapabilities = *(*[]api_v1.Capability)(unsafe.Pointer(&in.DefaultAddCapabilities)) + out.RequiredDropCapabilities = *(*[]api_v1.Capability)(unsafe.Pointer(&in.RequiredDropCapabilities)) + out.AllowedCapabilities = *(*[]api_v1.Capability)(unsafe.Pointer(&in.AllowedCapabilities)) out.Volumes = *(*[]FSType)(unsafe.Pointer(&in.Volumes)) out.HostNetwork = in.HostNetwork if in.HostPorts != nil { @@ -1479,10 +1213,7 @@ func Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(i } func autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -1497,10 +1228,7 @@ func Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *ReplicaSet, out *ex } func autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *ReplicaSet, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -1529,7 +1257,7 @@ func Convert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *R func autoConvert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *ReplicaSetCondition, s conversion.Scope) error { out.Type = ReplicaSetConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) + out.Status = api_v1.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason out.Message = in.Message @@ -1571,7 +1299,7 @@ func autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensi } } } else { - out.Items = nil + out.Items = make([]ReplicaSet, 0) } return nil } @@ -1581,24 +1309,24 @@ func Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions. } func autoConvert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { - if err := api.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds - out.Selector = (*unversioned.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil } func autoConvert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error { - if err := api.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds - out.Selector = (*unversioned.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil @@ -1666,15 +1394,25 @@ func Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions. return autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s) } +func autoConvert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { + // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) + return nil +} + +func autoConvert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *RollingUpdateDaemonSet, s conversion.Scope) error { + // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) + return nil +} + func autoConvert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/kubernetes/pkg/util/intstr.IntOrString vs k8s.io/kubernetes/pkg/util/intstr.IntOrString) - // WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/kubernetes/pkg/util/intstr.IntOrString vs k8s.io/kubernetes/pkg/util/intstr.IntOrString) + // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) + // WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } func autoConvert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/kubernetes/pkg/util/intstr.IntOrString vs *k8s.io/kubernetes/pkg/util/intstr.IntOrString) - // WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/kubernetes/pkg/util/intstr.IntOrString vs *k8s.io/kubernetes/pkg/util/intstr.IntOrString) + // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) + // WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } @@ -1710,7 +1448,7 @@ func Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions func autoConvert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in *extensions.SELinuxStrategyOptions, out *SELinuxStrategyOptions, s conversion.Scope) error { out.Rule = SELinuxStrategy(in.Rule) - out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.SELinuxOptions = (*api_v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) return nil } @@ -1719,10 +1457,7 @@ func Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions } func autoConvert_v1beta1_Scale_To_extensions_Scale(in *Scale, out *extensions.Scale, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -1737,10 +1472,7 @@ func Convert_v1beta1_Scale_To_extensions_Scale(in *Scale, out *extensions.Scale, } func autoConvert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *Scale, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -1774,14 +1506,14 @@ func Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, func autoConvert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { out.Replicas = in.Replicas - // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/kubernetes/pkg/api/unversioned.LabelSelector) + // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type return nil } func autoConvert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { out.Replicas = in.Replicas - // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/kubernetes/pkg/api/unversioned.LabelSelector vs map[string]string) + // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) return nil } @@ -1806,10 +1538,7 @@ func Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_Supplementa } func autoConvert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in *ThirdPartyResource, out *extensions.ThirdPartyResource, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta out.Description = in.Description out.Versions = *(*[]extensions.APIVersion)(unsafe.Pointer(&in.Versions)) return nil @@ -1820,10 +1549,7 @@ func Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in *Thi } func autoConvert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in *extensions.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta out.Description = in.Description out.Versions = *(*[]APIVersion)(unsafe.Pointer(&in.Versions)) return nil @@ -1834,10 +1560,7 @@ func Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in *ext } func autoConvert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in *ThirdPartyResourceData, out *extensions.ThirdPartyResourceData, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) return nil } @@ -1847,10 +1570,7 @@ func Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData } func autoConvert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in *extensions.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) return nil } @@ -1871,7 +1591,11 @@ func Convert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResource func autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]ThirdPartyResourceData)(unsafe.Pointer(&in.Items)) + if in.Items == nil { + out.Items = make([]ThirdPartyResourceData, 0) + } else { + out.Items = *(*[]ThirdPartyResourceData)(unsafe.Pointer(&in.Items)) + } return nil } @@ -1891,7 +1615,11 @@ func Convert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList func autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in *extensions.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]ThirdPartyResource)(unsafe.Pointer(&in.Items)) + if in.Items == nil { + out.Items = make([]ThirdPartyResource, 0) + } else { + out.Items = *(*[]ThirdPartyResource)(unsafe.Pointer(&in.Items)) + } return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go index a5e2ecb17d..5188d3ed76 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,11 +21,11 @@ limitations under the License. package v1beta1 import ( - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" - intstr "k8s.io/kubernetes/pkg/util/intstr" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" + api_v1 "k8s.io/kubernetes/pkg/api/v1" reflect "reflect" ) @@ -38,7 +38,6 @@ func init() { func RegisterDeepCopies(scheme *runtime.Scheme) error { return scheme.AddGeneratedDeepCopyFuncs( conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_APIVersion, InType: reflect.TypeOf(&APIVersion{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CPUTargetUtilization, InType: reflect.TypeOf(&CPUTargetUtilization{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CustomMetricCurrentStatus, InType: reflect.TypeOf(&CustomMetricCurrentStatus{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CustomMetricCurrentStatusList, InType: reflect.TypeOf(&CustomMetricCurrentStatusList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CustomMetricTarget, InType: reflect.TypeOf(&CustomMetricTarget{})}, @@ -47,6 +46,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetList, InType: reflect.TypeOf(&DaemonSetList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetSpec, InType: reflect.TypeOf(&DaemonSetSpec{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetStatus, InType: reflect.TypeOf(&DaemonSetStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetUpdateStrategy, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Deployment, InType: reflect.TypeOf(&Deployment{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentCondition, InType: reflect.TypeOf(&DeploymentCondition{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentList, InType: reflect.TypeOf(&DeploymentList{})}, @@ -54,14 +54,9 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentSpec, InType: reflect.TypeOf(&DeploymentSpec{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentStatus, InType: reflect.TypeOf(&DeploymentStatus{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentStrategy, InType: reflect.TypeOf(&DeploymentStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ExportOptions, InType: reflect.TypeOf(&ExportOptions{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_FSGroupStrategyOptions, InType: reflect.TypeOf(&FSGroupStrategyOptions{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HTTPIngressPath, InType: reflect.TypeOf(&HTTPIngressPath{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HTTPIngressRuleValue, InType: reflect.TypeOf(&HTTPIngressRuleValue{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HorizontalPodAutoscaler, InType: reflect.TypeOf(&HorizontalPodAutoscaler{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HorizontalPodAutoscalerList, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HorizontalPodAutoscalerSpec, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HorizontalPodAutoscalerStatus, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HostPortRange, InType: reflect.TypeOf(&HostPortRange{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IDRange, InType: reflect.TypeOf(&IDRange{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Ingress, InType: reflect.TypeOf(&Ingress{})}, @@ -72,11 +67,6 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressSpec, InType: reflect.TypeOf(&IngressSpec{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressStatus, InType: reflect.TypeOf(&IngressStatus{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressTLS, InType: reflect.TypeOf(&IngressTLS{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Job, InType: reflect.TypeOf(&Job{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_JobCondition, InType: reflect.TypeOf(&JobCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_JobList, InType: reflect.TypeOf(&JobList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_JobSpec, InType: reflect.TypeOf(&JobSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_JobStatus, InType: reflect.TypeOf(&JobStatus{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicy, InType: reflect.TypeOf(&NetworkPolicy{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicyIngressRule, InType: reflect.TypeOf(&NetworkPolicyIngressRule{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicyList, InType: reflect.TypeOf(&NetworkPolicyList{})}, @@ -93,13 +83,13 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicaSetStatus, InType: reflect.TypeOf(&ReplicaSetStatus{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicationControllerDummy, InType: reflect.TypeOf(&ReplicationControllerDummy{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollbackConfig, InType: reflect.TypeOf(&RollbackConfig{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollingUpdateDaemonSet, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollingUpdateDeployment, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RunAsUserStrategyOptions, InType: reflect.TypeOf(&RunAsUserStrategyOptions{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SELinuxStrategyOptions, InType: reflect.TypeOf(&SELinuxStrategyOptions{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Scale, InType: reflect.TypeOf(&Scale{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SubresourceReference, InType: reflect.TypeOf(&SubresourceReference{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SupplementalGroupsStrategyOptions, InType: reflect.TypeOf(&SupplementalGroupsStrategyOptions{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ThirdPartyResource, InType: reflect.TypeOf(&ThirdPartyResource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ThirdPartyResourceData, InType: reflect.TypeOf(&ThirdPartyResourceData{})}, @@ -112,16 +102,7 @@ func DeepCopy_v1beta1_APIVersion(in interface{}, out interface{}, c *conversion. { in := in.(*APIVersion) out := out.(*APIVersion) - out.Name = in.Name - return nil - } -} - -func DeepCopy_v1beta1_CPUTargetUtilization(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CPUTargetUtilization) - out := out.(*CPUTargetUtilization) - out.TargetPercentage = in.TargetPercentage + *out = *in return nil } } @@ -130,7 +111,7 @@ func DeepCopy_v1beta1_CustomMetricCurrentStatus(in interface{}, out interface{}, { in := in.(*CustomMetricCurrentStatus) out := out.(*CustomMetricCurrentStatus) - out.Name = in.Name + *out = *in out.CurrentValue = in.CurrentValue.DeepCopy() return nil } @@ -140,6 +121,7 @@ func DeepCopy_v1beta1_CustomMetricCurrentStatusList(in interface{}, out interfac { in := in.(*CustomMetricCurrentStatusList) out := out.(*CustomMetricCurrentStatusList) + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CustomMetricCurrentStatus, len(*in)) @@ -148,8 +130,6 @@ func DeepCopy_v1beta1_CustomMetricCurrentStatusList(in interface{}, out interfac return err } } - } else { - out.Items = nil } return nil } @@ -159,7 +139,7 @@ func DeepCopy_v1beta1_CustomMetricTarget(in interface{}, out interface{}, c *con { in := in.(*CustomMetricTarget) out := out.(*CustomMetricTarget) - out.Name = in.Name + *out = *in out.TargetValue = in.TargetValue.DeepCopy() return nil } @@ -169,6 +149,7 @@ func DeepCopy_v1beta1_CustomMetricTargetList(in interface{}, out interface{}, c { in := in.(*CustomMetricTargetList) out := out.(*CustomMetricTargetList) + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CustomMetricTarget, len(*in)) @@ -177,8 +158,6 @@ func DeepCopy_v1beta1_CustomMetricTargetList(in interface{}, out interface{}, c return err } } - } else { - out.Items = nil } return nil } @@ -188,14 +167,15 @@ func DeepCopy_v1beta1_DaemonSet(in interface{}, out interface{}, c *conversion.C { in := in.(*DaemonSet) out := out.(*DaemonSet) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_v1beta1_DaemonSetSpec(&in.Spec, &out.Spec, c); err != nil { return err } - out.Status = in.Status return nil } } @@ -204,8 +184,7 @@ func DeepCopy_v1beta1_DaemonSetList(in interface{}, out interface{}, c *conversi { in := in.(*DaemonSetList) out := out.(*DaemonSetList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DaemonSet, len(*in)) @@ -214,8 +193,6 @@ func DeepCopy_v1beta1_DaemonSetList(in interface{}, out interface{}, c *conversi return err } } - } else { - out.Items = nil } return nil } @@ -225,16 +202,19 @@ func DeepCopy_v1beta1_DaemonSetSpec(in interface{}, out interface{}, c *conversi { in := in.(*DaemonSetSpec) out := out.(*DaemonSetSpec) + *out = *in if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.LabelSelector) } - } else { - out.Selector = nil } - if err := v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, c); err != nil { return err } return nil @@ -245,10 +225,23 @@ func DeepCopy_v1beta1_DaemonSetStatus(in interface{}, out interface{}, c *conver { in := in.(*DaemonSetStatus) out := out.(*DaemonSetStatus) - out.CurrentNumberScheduled = in.CurrentNumberScheduled - out.NumberMisscheduled = in.NumberMisscheduled - out.DesiredNumberScheduled = in.DesiredNumberScheduled - out.NumberReady = in.NumberReady + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_DaemonSetUpdateStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetUpdateStrategy) + out := out.(*DaemonSetUpdateStrategy) + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDaemonSet) + if err := DeepCopy_v1beta1_RollingUpdateDaemonSet(*in, *out, c); err != nil { + return err + } + } return nil } } @@ -257,9 +250,11 @@ func DeepCopy_v1beta1_Deployment(in interface{}, out interface{}, c *conversion. { in := in.(*Deployment) out := out.(*Deployment) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -275,12 +270,9 @@ func DeepCopy_v1beta1_DeploymentCondition(in interface{}, out interface{}, c *co { in := in.(*DeploymentCondition) out := out.(*DeploymentCondition) - out.Type = in.Type - out.Status = in.Status + *out = *in out.LastUpdateTime = in.LastUpdateTime.DeepCopy() out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message return nil } } @@ -289,8 +281,7 @@ func DeepCopy_v1beta1_DeploymentList(in interface{}, out interface{}, c *convers { in := in.(*DeploymentList) out := out.(*DeploymentList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -299,8 +290,6 @@ func DeepCopy_v1beta1_DeploymentList(in interface{}, out interface{}, c *convers return err } } - } else { - out.Items = nil } return nil } @@ -310,18 +299,14 @@ func DeepCopy_v1beta1_DeploymentRollback(in interface{}, out interface{}, c *con { in := in.(*DeploymentRollback) out := out.(*DeploymentRollback) - out.TypeMeta = in.TypeMeta - out.Name = in.Name + *out = *in if in.UpdatedAnnotations != nil { in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations *out = make(map[string]string) for key, val := range *in { (*out)[key] = val } - } else { - out.UpdatedAnnotations = nil } - out.RollbackTo = in.RollbackTo return nil } } @@ -330,50 +315,40 @@ func DeepCopy_v1beta1_DeploymentSpec(in interface{}, out interface{}, c *convers { in := in.(*DeploymentSpec) out := out.(*DeploymentSpec) + *out = *in if in.Replicas != nil { in, out := &in.Replicas, &out.Replicas *out = new(int32) **out = **in - } else { - out.Replicas = nil } if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.LabelSelector) } - } else { - out.Selector = nil } - if err := v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { return err } if err := DeepCopy_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, c); err != nil { return err } - out.MinReadySeconds = in.MinReadySeconds if in.RevisionHistoryLimit != nil { in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit *out = new(int32) **out = **in - } else { - out.RevisionHistoryLimit = nil } - out.Paused = in.Paused if in.RollbackTo != nil { in, out := &in.RollbackTo, &out.RollbackTo *out = new(RollbackConfig) **out = **in - } else { - out.RollbackTo = nil } if in.ProgressDeadlineSeconds != nil { in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds *out = new(int32) **out = **in - } else { - out.ProgressDeadlineSeconds = nil } return nil } @@ -383,11 +358,7 @@ func DeepCopy_v1beta1_DeploymentStatus(in interface{}, out interface{}, c *conve { in := in.(*DeploymentStatus) out := out.(*DeploymentStatus) - out.ObservedGeneration = in.ObservedGeneration - out.Replicas = in.Replicas - out.UpdatedReplicas = in.UpdatedReplicas - out.AvailableReplicas = in.AvailableReplicas - out.UnavailableReplicas = in.UnavailableReplicas + *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]DeploymentCondition, len(*in)) @@ -396,8 +367,6 @@ func DeepCopy_v1beta1_DeploymentStatus(in interface{}, out interface{}, c *conve return err } } - } else { - out.Conditions = nil } return nil } @@ -407,44 +376,27 @@ func DeepCopy_v1beta1_DeploymentStrategy(in interface{}, out interface{}, c *con { in := in.(*DeploymentStrategy) out := out.(*DeploymentStrategy) - out.Type = in.Type + *out = *in if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(RollingUpdateDeployment) if err := DeepCopy_v1beta1_RollingUpdateDeployment(*in, *out, c); err != nil { return err } - } else { - out.RollingUpdate = nil } return nil } } -func DeepCopy_v1beta1_ExportOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ExportOptions) - out := out.(*ExportOptions) - out.TypeMeta = in.TypeMeta - out.Export = in.Export - out.Exact = in.Exact - return nil - } -} - func DeepCopy_v1beta1_FSGroupStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*FSGroupStrategyOptions) out := out.(*FSGroupStrategyOptions) - out.Rule = in.Rule + *out = *in if in.Ranges != nil { in, out := &in.Ranges, &out.Ranges *out = make([]IDRange, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Ranges = nil + copy(*out, *in) } return nil } @@ -454,8 +406,7 @@ func DeepCopy_v1beta1_HTTPIngressPath(in interface{}, out interface{}, c *conver { in := in.(*HTTPIngressPath) out := out.(*HTTPIngressPath) - out.Path = in.Path - out.Backend = in.Backend + *out = *in return nil } } @@ -464,108 +415,11 @@ func DeepCopy_v1beta1_HTTPIngressRuleValue(in interface{}, out interface{}, c *c { in := in.(*HTTPIngressRuleValue) out := out.(*HTTPIngressRuleValue) + *out = *in if in.Paths != nil { in, out := &in.Paths, &out.Paths *out = make([]HTTPIngressPath, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Paths = nil - } - return nil - } -} - -func DeepCopy_v1beta1_HorizontalPodAutoscaler(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HorizontalPodAutoscaler) - out := out.(*HorizontalPodAutoscaler) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_HorizontalPodAutoscalerList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HorizontalPodAutoscalerList) - out := out.(*HorizontalPodAutoscalerList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]HorizontalPodAutoscaler, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil - } -} - -func DeepCopy_v1beta1_HorizontalPodAutoscalerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HorizontalPodAutoscalerSpec) - out := out.(*HorizontalPodAutoscalerSpec) - out.ScaleRef = in.ScaleRef - if in.MinReplicas != nil { - in, out := &in.MinReplicas, &out.MinReplicas - *out = new(int32) - **out = **in - } else { - out.MinReplicas = nil - } - out.MaxReplicas = in.MaxReplicas - if in.CPUUtilization != nil { - in, out := &in.CPUUtilization, &out.CPUUtilization - *out = new(CPUTargetUtilization) - **out = **in - } else { - out.CPUUtilization = nil - } - return nil - } -} - -func DeepCopy_v1beta1_HorizontalPodAutoscalerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HorizontalPodAutoscalerStatus) - out := out.(*HorizontalPodAutoscalerStatus) - if in.ObservedGeneration != nil { - in, out := &in.ObservedGeneration, &out.ObservedGeneration - *out = new(int64) - **out = **in - } else { - out.ObservedGeneration = nil - } - if in.LastScaleTime != nil { - in, out := &in.LastScaleTime, &out.LastScaleTime - *out = new(unversioned.Time) - **out = (*in).DeepCopy() - } else { - out.LastScaleTime = nil - } - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - if in.CurrentCPUUtilizationPercentage != nil { - in, out := &in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage - *out = new(int32) - **out = **in - } else { - out.CurrentCPUUtilizationPercentage = nil + copy(*out, *in) } return nil } @@ -575,8 +429,7 @@ func DeepCopy_v1beta1_HostPortRange(in interface{}, out interface{}, c *conversi { in := in.(*HostPortRange) out := out.(*HostPortRange) - out.Min = in.Min - out.Max = in.Max + *out = *in return nil } } @@ -585,8 +438,7 @@ func DeepCopy_v1beta1_IDRange(in interface{}, out interface{}, c *conversion.Clo { in := in.(*IDRange) out := out.(*IDRange) - out.Min = in.Min - out.Max = in.Max + *out = *in return nil } } @@ -595,9 +447,11 @@ func DeepCopy_v1beta1_Ingress(in interface{}, out interface{}, c *conversion.Clo { in := in.(*Ingress) out := out.(*Ingress) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_v1beta1_IngressSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -613,8 +467,7 @@ func DeepCopy_v1beta1_IngressBackend(in interface{}, out interface{}, c *convers { in := in.(*IngressBackend) out := out.(*IngressBackend) - out.ServiceName = in.ServiceName - out.ServicePort = in.ServicePort + *out = *in return nil } } @@ -623,8 +476,7 @@ func DeepCopy_v1beta1_IngressList(in interface{}, out interface{}, c *conversion { in := in.(*IngressList) out := out.(*IngressList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Ingress, len(*in)) @@ -633,8 +485,6 @@ func DeepCopy_v1beta1_IngressList(in interface{}, out interface{}, c *conversion return err } } - } else { - out.Items = nil } return nil } @@ -644,7 +494,7 @@ func DeepCopy_v1beta1_IngressRule(in interface{}, out interface{}, c *conversion { in := in.(*IngressRule) out := out.(*IngressRule) - out.Host = in.Host + *out = *in if err := DeepCopy_v1beta1_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, c); err != nil { return err } @@ -656,14 +506,13 @@ func DeepCopy_v1beta1_IngressRuleValue(in interface{}, out interface{}, c *conve { in := in.(*IngressRuleValue) out := out.(*IngressRuleValue) + *out = *in if in.HTTP != nil { in, out := &in.HTTP, &out.HTTP *out = new(HTTPIngressRuleValue) if err := DeepCopy_v1beta1_HTTPIngressRuleValue(*in, *out, c); err != nil { return err } - } else { - out.HTTP = nil } return nil } @@ -673,12 +522,11 @@ func DeepCopy_v1beta1_IngressSpec(in interface{}, out interface{}, c *conversion { in := in.(*IngressSpec) out := out.(*IngressSpec) + *out = *in if in.Backend != nil { in, out := &in.Backend, &out.Backend *out = new(IngressBackend) **out = **in - } else { - out.Backend = nil } if in.TLS != nil { in, out := &in.TLS, &out.TLS @@ -688,8 +536,6 @@ func DeepCopy_v1beta1_IngressSpec(in interface{}, out interface{}, c *conversion return err } } - } else { - out.TLS = nil } if in.Rules != nil { in, out := &in.Rules, &out.Rules @@ -699,8 +545,6 @@ func DeepCopy_v1beta1_IngressSpec(in interface{}, out interface{}, c *conversion return err } } - } else { - out.Rules = nil } return nil } @@ -710,7 +554,8 @@ func DeepCopy_v1beta1_IngressStatus(in interface{}, out interface{}, c *conversi { in := in.(*IngressStatus) out := out.(*IngressStatus) - if err := v1.DeepCopy_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { + *out = *in + if err := api_v1.DeepCopy_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { return err } return nil @@ -721,162 +566,25 @@ func DeepCopy_v1beta1_IngressTLS(in interface{}, out interface{}, c *conversion. { in := in.(*IngressTLS) out := out.(*IngressTLS) + *out = *in if in.Hosts != nil { in, out := &in.Hosts, &out.Hosts *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Hosts = nil - } - out.SecretName = in.SecretName - return nil - } -} - -func DeepCopy_v1beta1_Job(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Job) - out := out.(*Job) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_JobSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_JobStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_JobCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobCondition) - out := out.(*JobCondition) - out.Type = in.Type - out.Status = in.Status - out.LastProbeTime = in.LastProbeTime.DeepCopy() - out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message - return nil - } -} - -func DeepCopy_v1beta1_JobList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobList) - out := out.(*JobList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Job, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_Job(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil } return nil } } -func DeepCopy_v1beta1_JobSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobSpec) - out := out.(*JobSpec) - if in.Parallelism != nil { - in, out := &in.Parallelism, &out.Parallelism - *out = new(int32) - **out = **in - } else { - out.Parallelism = nil - } - if in.Completions != nil { - in, out := &in.Completions, &out.Completions - *out = new(int32) - **out = **in - } else { - out.Completions = nil - } - if in.ActiveDeadlineSeconds != nil { - in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = **in - } else { - out.ActiveDeadlineSeconds = nil - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.Selector = nil - } - if in.AutoSelector != nil { - in, out := &in.AutoSelector, &out.AutoSelector - *out = new(bool) - **out = **in - } else { - out.AutoSelector = nil - } - if err := v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_JobStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobStatus) - out := out.(*JobStatus) - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]JobCondition, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_JobCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - *out = new(unversioned.Time) - **out = (*in).DeepCopy() - } else { - out.StartTime = nil - } - if in.CompletionTime != nil { - in, out := &in.CompletionTime, &out.CompletionTime - *out = new(unversioned.Time) - **out = (*in).DeepCopy() - } else { - out.CompletionTime = nil - } - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed - return nil - } -} - func DeepCopy_v1beta1_NetworkPolicy(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*NetworkPolicy) out := out.(*NetworkPolicy) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_v1beta1_NetworkPolicySpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -889,6 +597,7 @@ func DeepCopy_v1beta1_NetworkPolicyIngressRule(in interface{}, out interface{}, { in := in.(*NetworkPolicyIngressRule) out := out.(*NetworkPolicyIngressRule) + *out = *in if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]NetworkPolicyPort, len(*in)) @@ -897,8 +606,6 @@ func DeepCopy_v1beta1_NetworkPolicyIngressRule(in interface{}, out interface{}, return err } } - } else { - out.Ports = nil } if in.From != nil { in, out := &in.From, &out.From @@ -908,8 +615,6 @@ func DeepCopy_v1beta1_NetworkPolicyIngressRule(in interface{}, out interface{}, return err } } - } else { - out.From = nil } return nil } @@ -919,8 +624,7 @@ func DeepCopy_v1beta1_NetworkPolicyList(in interface{}, out interface{}, c *conv { in := in.(*NetworkPolicyList) out := out.(*NetworkPolicyList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]NetworkPolicy, len(*in)) @@ -929,8 +633,6 @@ func DeepCopy_v1beta1_NetworkPolicyList(in interface{}, out interface{}, c *conv return err } } - } else { - out.Items = nil } return nil } @@ -940,23 +642,22 @@ func DeepCopy_v1beta1_NetworkPolicyPeer(in interface{}, out interface{}, c *conv { in := in.(*NetworkPolicyPeer) out := out.(*NetworkPolicyPeer) + *out = *in if in.PodSelector != nil { in, out := &in.PodSelector, &out.PodSelector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.LabelSelector) } - } else { - out.PodSelector = nil } if in.NamespaceSelector != nil { in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.LabelSelector) } - } else { - out.NamespaceSelector = nil } return nil } @@ -966,19 +667,16 @@ func DeepCopy_v1beta1_NetworkPolicyPort(in interface{}, out interface{}, c *conv { in := in.(*NetworkPolicyPort) out := out.(*NetworkPolicyPort) + *out = *in if in.Protocol != nil { in, out := &in.Protocol, &out.Protocol - *out = new(v1.Protocol) + *out = new(api_v1.Protocol) **out = **in - } else { - out.Protocol = nil } if in.Port != nil { in, out := &in.Port, &out.Port *out = new(intstr.IntOrString) **out = **in - } else { - out.Port = nil } return nil } @@ -988,8 +686,11 @@ func DeepCopy_v1beta1_NetworkPolicySpec(in interface{}, out interface{}, c *conv { in := in.(*NetworkPolicySpec) out := out.(*NetworkPolicySpec) - if err := unversioned.DeepCopy_unversioned_LabelSelector(&in.PodSelector, &out.PodSelector, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.PodSelector); err != nil { return err + } else { + out.PodSelector = *newVal.(*v1.LabelSelector) } if in.Ingress != nil { in, out := &in.Ingress, &out.Ingress @@ -999,8 +700,6 @@ func DeepCopy_v1beta1_NetworkPolicySpec(in interface{}, out interface{}, c *conv return err } } - } else { - out.Ingress = nil } return nil } @@ -1010,9 +709,11 @@ func DeepCopy_v1beta1_PodSecurityPolicy(in interface{}, out interface{}, c *conv { in := in.(*PodSecurityPolicy) out := out.(*PodSecurityPolicy) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_v1beta1_PodSecurityPolicySpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -1025,8 +726,7 @@ func DeepCopy_v1beta1_PodSecurityPolicyList(in interface{}, out interface{}, c * { in := in.(*PodSecurityPolicyList) out := out.(*PodSecurityPolicyList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodSecurityPolicy, len(*in)) @@ -1035,8 +735,6 @@ func DeepCopy_v1beta1_PodSecurityPolicyList(in interface{}, out interface{}, c * return err } } - } else { - out.Items = nil } return nil } @@ -1046,55 +744,32 @@ func DeepCopy_v1beta1_PodSecurityPolicySpec(in interface{}, out interface{}, c * { in := in.(*PodSecurityPolicySpec) out := out.(*PodSecurityPolicySpec) - out.Privileged = in.Privileged + *out = *in if in.DefaultAddCapabilities != nil { in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]v1.Capability, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.DefaultAddCapabilities = nil + *out = make([]api_v1.Capability, len(*in)) + copy(*out, *in) } if in.RequiredDropCapabilities != nil { in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]v1.Capability, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.RequiredDropCapabilities = nil + *out = make([]api_v1.Capability, len(*in)) + copy(*out, *in) } if in.AllowedCapabilities != nil { in, out := &in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]v1.Capability, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.AllowedCapabilities = nil + *out = make([]api_v1.Capability, len(*in)) + copy(*out, *in) } if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes *out = make([]FSType, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Volumes = nil + copy(*out, *in) } - out.HostNetwork = in.HostNetwork if in.HostPorts != nil { in, out := &in.HostPorts, &out.HostPorts *out = make([]HostPortRange, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.HostPorts = nil + copy(*out, *in) } - out.HostPID = in.HostPID - out.HostIPC = in.HostIPC if err := DeepCopy_v1beta1_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, c); err != nil { return err } @@ -1107,7 +782,6 @@ func DeepCopy_v1beta1_PodSecurityPolicySpec(in interface{}, out interface{}, c * if err := DeepCopy_v1beta1_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, c); err != nil { return err } - out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem return nil } } @@ -1116,9 +790,11 @@ func DeepCopy_v1beta1_ReplicaSet(in interface{}, out interface{}, c *conversion. { in := in.(*ReplicaSet) out := out.(*ReplicaSet) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_v1beta1_ReplicaSetSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -1134,11 +810,8 @@ func DeepCopy_v1beta1_ReplicaSetCondition(in interface{}, out interface{}, c *co { in := in.(*ReplicaSetCondition) out := out.(*ReplicaSetCondition) - out.Type = in.Type - out.Status = in.Status + *out = *in out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message return nil } } @@ -1147,8 +820,7 @@ func DeepCopy_v1beta1_ReplicaSetList(in interface{}, out interface{}, c *convers { in := in.(*ReplicaSetList) out := out.(*ReplicaSetList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicaSet, len(*in)) @@ -1157,8 +829,6 @@ func DeepCopy_v1beta1_ReplicaSetList(in interface{}, out interface{}, c *convers return err } } - } else { - out.Items = nil } return nil } @@ -1168,24 +838,21 @@ func DeepCopy_v1beta1_ReplicaSetSpec(in interface{}, out interface{}, c *convers { in := in.(*ReplicaSetSpec) out := out.(*ReplicaSetSpec) + *out = *in if in.Replicas != nil { in, out := &in.Replicas, &out.Replicas *out = new(int32) **out = **in - } else { - out.Replicas = nil } - out.MinReadySeconds = in.MinReadySeconds if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.LabelSelector) } - } else { - out.Selector = nil } - if err := v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { return err } return nil @@ -1196,11 +863,7 @@ func DeepCopy_v1beta1_ReplicaSetStatus(in interface{}, out interface{}, c *conve { in := in.(*ReplicaSetStatus) out := out.(*ReplicaSetStatus) - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.ObservedGeneration = in.ObservedGeneration + *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]ReplicaSetCondition, len(*in)) @@ -1209,8 +872,6 @@ func DeepCopy_v1beta1_ReplicaSetStatus(in interface{}, out interface{}, c *conve return err } } - } else { - out.Conditions = nil } return nil } @@ -1220,7 +881,7 @@ func DeepCopy_v1beta1_ReplicationControllerDummy(in interface{}, out interface{} { in := in.(*ReplicationControllerDummy) out := out.(*ReplicationControllerDummy) - out.TypeMeta = in.TypeMeta + *out = *in return nil } } @@ -1229,7 +890,21 @@ func DeepCopy_v1beta1_RollbackConfig(in interface{}, out interface{}, c *convers { in := in.(*RollbackConfig) out := out.(*RollbackConfig) - out.Revision = in.Revision + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_RollingUpdateDaemonSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollingUpdateDaemonSet) + out := out.(*RollingUpdateDaemonSet) + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } return nil } } @@ -1238,19 +913,16 @@ func DeepCopy_v1beta1_RollingUpdateDeployment(in interface{}, out interface{}, c { in := in.(*RollingUpdateDeployment) out := out.(*RollingUpdateDeployment) + *out = *in if in.MaxUnavailable != nil { in, out := &in.MaxUnavailable, &out.MaxUnavailable *out = new(intstr.IntOrString) **out = **in - } else { - out.MaxUnavailable = nil } if in.MaxSurge != nil { in, out := &in.MaxSurge, &out.MaxSurge *out = new(intstr.IntOrString) **out = **in - } else { - out.MaxSurge = nil } return nil } @@ -1260,15 +932,11 @@ func DeepCopy_v1beta1_RunAsUserStrategyOptions(in interface{}, out interface{}, { in := in.(*RunAsUserStrategyOptions) out := out.(*RunAsUserStrategyOptions) - out.Rule = in.Rule + *out = *in if in.Ranges != nil { in, out := &in.Ranges, &out.Ranges *out = make([]IDRange, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Ranges = nil + copy(*out, *in) } return nil } @@ -1278,13 +946,11 @@ func DeepCopy_v1beta1_SELinuxStrategyOptions(in interface{}, out interface{}, c { in := in.(*SELinuxStrategyOptions) out := out.(*SELinuxStrategyOptions) - out.Rule = in.Rule + *out = *in if in.SELinuxOptions != nil { in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(v1.SELinuxOptions) + *out = new(api_v1.SELinuxOptions) **out = **in - } else { - out.SELinuxOptions = nil } return nil } @@ -1294,11 +960,12 @@ func DeepCopy_v1beta1_Scale(in interface{}, out interface{}, c *conversion.Clone { in := in.(*Scale) out := out.(*Scale) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } - out.Spec = in.Spec if err := DeepCopy_v1beta1_ScaleStatus(&in.Status, &out.Status, c); err != nil { return err } @@ -1310,7 +977,7 @@ func DeepCopy_v1beta1_ScaleSpec(in interface{}, out interface{}, c *conversion.C { in := in.(*ScaleSpec) out := out.(*ScaleSpec) - out.Replicas = in.Replicas + *out = *in return nil } } @@ -1319,29 +986,14 @@ func DeepCopy_v1beta1_ScaleStatus(in interface{}, out interface{}, c *conversion { in := in.(*ScaleStatus) out := out.(*ScaleStatus) - out.Replicas = in.Replicas + *out = *in if in.Selector != nil { in, out := &in.Selector, &out.Selector *out = make(map[string]string) for key, val := range *in { (*out)[key] = val } - } else { - out.Selector = nil } - out.TargetSelector = in.TargetSelector - return nil - } -} - -func DeepCopy_v1beta1_SubresourceReference(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SubresourceReference) - out := out.(*SubresourceReference) - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion - out.Subresource = in.Subresource return nil } } @@ -1350,15 +1002,11 @@ func DeepCopy_v1beta1_SupplementalGroupsStrategyOptions(in interface{}, out inte { in := in.(*SupplementalGroupsStrategyOptions) out := out.(*SupplementalGroupsStrategyOptions) - out.Rule = in.Rule + *out = *in if in.Ranges != nil { in, out := &in.Ranges, &out.Ranges *out = make([]IDRange, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Ranges = nil + copy(*out, *in) } return nil } @@ -1368,19 +1016,16 @@ func DeepCopy_v1beta1_ThirdPartyResource(in interface{}, out interface{}, c *con { in := in.(*ThirdPartyResource) out := out.(*ThirdPartyResource) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } - out.Description = in.Description if in.Versions != nil { in, out := &in.Versions, &out.Versions *out = make([]APIVersion, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Versions = nil + copy(*out, *in) } return nil } @@ -1390,16 +1035,16 @@ func DeepCopy_v1beta1_ThirdPartyResourceData(in interface{}, out interface{}, c { in := in.(*ThirdPartyResourceData) out := out.(*ThirdPartyResourceData) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if in.Data != nil { in, out := &in.Data, &out.Data *out = make([]byte, len(*in)) copy(*out, *in) - } else { - out.Data = nil } return nil } @@ -1409,8 +1054,7 @@ func DeepCopy_v1beta1_ThirdPartyResourceDataList(in interface{}, out interface{} { in := in.(*ThirdPartyResourceDataList) out := out.(*ThirdPartyResourceDataList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ThirdPartyResourceData, len(*in)) @@ -1419,8 +1063,6 @@ func DeepCopy_v1beta1_ThirdPartyResourceDataList(in interface{}, out interface{} return err } } - } else { - out.Items = nil } return nil } @@ -1430,8 +1072,7 @@ func DeepCopy_v1beta1_ThirdPartyResourceList(in interface{}, out interface{}, c { in := in.(*ThirdPartyResourceList) out := out.(*ThirdPartyResourceList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ThirdPartyResource, len(*in)) @@ -1440,8 +1081,6 @@ func DeepCopy_v1beta1_ThirdPartyResourceList(in interface{}, out interface{}, c return err } } - } else { - out.Items = nil } return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.defaults.go index 9be3b426f8..9869016d79 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,8 +21,8 @@ limitations under the License. package v1beta1 import ( + runtime "k8s.io/apimachinery/pkg/runtime" v1 "k8s.io/kubernetes/pkg/api/v1" - runtime "k8s.io/kubernetes/pkg/runtime" ) // RegisterDefaults adds defaulters functions to the given scheme. @@ -33,12 +33,6 @@ func RegisterDefaults(scheme *runtime.Scheme) error { scheme.AddTypeDefaultingFunc(&DaemonSetList{}, func(obj interface{}) { SetObjectDefaults_DaemonSetList(obj.(*DaemonSetList)) }) scheme.AddTypeDefaultingFunc(&Deployment{}, func(obj interface{}) { SetObjectDefaults_Deployment(obj.(*Deployment)) }) scheme.AddTypeDefaultingFunc(&DeploymentList{}, func(obj interface{}) { SetObjectDefaults_DeploymentList(obj.(*DeploymentList)) }) - scheme.AddTypeDefaultingFunc(&HorizontalPodAutoscaler{}, func(obj interface{}) { SetObjectDefaults_HorizontalPodAutoscaler(obj.(*HorizontalPodAutoscaler)) }) - scheme.AddTypeDefaultingFunc(&HorizontalPodAutoscalerList{}, func(obj interface{}) { - SetObjectDefaults_HorizontalPodAutoscalerList(obj.(*HorizontalPodAutoscalerList)) - }) - scheme.AddTypeDefaultingFunc(&Job{}, func(obj interface{}) { SetObjectDefaults_Job(obj.(*Job)) }) - scheme.AddTypeDefaultingFunc(&JobList{}, func(obj interface{}) { SetObjectDefaults_JobList(obj.(*JobList)) }) scheme.AddTypeDefaultingFunc(&NetworkPolicy{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicy(obj.(*NetworkPolicy)) }) scheme.AddTypeDefaultingFunc(&NetworkPolicyList{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicyList(obj.(*NetworkPolicyList)) }) scheme.AddTypeDefaultingFunc(&ReplicaSet{}, func(obj interface{}) { SetObjectDefaults_ReplicaSet(obj.(*ReplicaSet)) }) @@ -76,6 +70,23 @@ func SetObjectDefaults_DaemonSet(in *DaemonSet) { if a.VolumeSource.AzureDisk != nil { v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } } for i := range in.Spec.Template.Spec.InitContainers { a := &in.Spec.Template.Spec.InitContainers[i] @@ -200,140 +211,22 @@ func SetObjectDefaults_Deployment(in *Deployment) { if a.VolumeSource.AzureDisk != nil { v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) } - } - for i := range in.Spec.Template.Spec.InitContainers { - a := &in.Spec.Template.Spec.InitContainers[i] - v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } } } } - v1.SetDefaults_ResourceList(&a.Resources.Limits) - v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } - for i := range in.Spec.Template.Spec.Containers { - a := &in.Spec.Template.Spec.Containers[i] - v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - v1.SetDefaults_ResourceList(&a.Resources.Limits) - v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } -} - -func SetObjectDefaults_DeploymentList(in *DeploymentList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_Deployment(a) - } -} - -func SetObjectDefaults_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler) { - SetDefaults_HorizontalPodAutoscaler(in) -} - -func SetObjectDefaults_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_HorizontalPodAutoscaler(a) - } -} - -func SetObjectDefaults_Job(in *Job) { - SetDefaults_Job(in) - v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) - for i := range in.Spec.Template.Spec.Volumes { - a := &in.Spec.Template.Spec.Volumes[i] - v1.SetDefaults_Volume(a) - if a.VolumeSource.Secret != nil { - v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) - } - if a.VolumeSource.ISCSI != nil { - v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) - } - if a.VolumeSource.RBD != nil { - v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) - } - if a.VolumeSource.DownwardAPI != nil { - v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) - for j := range a.VolumeSource.DownwardAPI.Items { - b := &a.VolumeSource.DownwardAPI.Items[j] - if b.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.FieldRef) - } - } - } - if a.VolumeSource.ConfigMap != nil { - v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) - } - if a.VolumeSource.AzureDisk != nil { - v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) } } for i := range in.Spec.Template.Spec.InitContainers { @@ -422,10 +315,10 @@ func SetObjectDefaults_Job(in *Job) { } } -func SetObjectDefaults_JobList(in *JobList) { +func SetObjectDefaults_DeploymentList(in *DeploymentList) { for i := range in.Items { a := &in.Items[i] - SetObjectDefaults_Job(a) + SetObjectDefaults_Deployment(a) } } @@ -470,6 +363,23 @@ func SetObjectDefaults_ReplicaSet(in *ReplicaSet) { if a.VolumeSource.AzureDisk != nil { v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } } for i := range in.Spec.Template.Spec.InitContainers { a := &in.Spec.Template.Spec.InitContainers[i] diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/BUILD deleted file mode 100644 index 1aa5a0df1d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/BUILD +++ /dev/null @@ -1,49 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["validation.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/unversioned/validation:go_default_library", - "//pkg/api/validation:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/security/apparmor:go_default_library", - "//pkg/security/podsecuritypolicy/seccomp:go_default_library", - "//pkg/security/podsecuritypolicy/util:go_default_library", - "//pkg/util/intstr:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/util/validation:go_default_library", - "//pkg/util/validation/field:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["validation_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/security/apparmor:go_default_library", - "//pkg/security/podsecuritypolicy/seccomp:go_default_library", - "//pkg/security/podsecuritypolicy/util:go_default_library", - "//pkg/util/intstr:go_default_library", - "//pkg/util/validation/field:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go deleted file mode 100644 index bb5224f132..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go +++ /dev/null @@ -1,845 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "fmt" - "net" - "reflect" - "regexp" - "strconv" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - unversionedvalidation "k8s.io/kubernetes/pkg/api/unversioned/validation" - apivalidation "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/security/apparmor" - "k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp" - psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util" - "k8s.io/kubernetes/pkg/util/intstr" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/util/validation" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -func ValidateThirdPartyResourceUpdate(update, old *extensions.ThirdPartyResource) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...) - allErrs = append(allErrs, ValidateThirdPartyResource(update)...) - return allErrs -} - -func ValidateThirdPartyResourceName(name string, prefix bool) []string { - // Make sure it's a valid DNS subdomain - if msgs := apivalidation.NameIsDNSSubdomain(name, prefix); len(msgs) != 0 { - return msgs - } - - // Make sure it's at least three segments (kind + two-segment group name) - if !prefix { - parts := strings.Split(name, ".") - if len(parts) < 3 { - return []string{"must be at least three segments long: .."} - } - } - - return nil -} - -func ValidateThirdPartyResource(obj *extensions.ThirdPartyResource) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&obj.ObjectMeta, false, ValidateThirdPartyResourceName, field.NewPath("metadata"))...) - - versions := sets.String{} - if len(obj.Versions) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("versions"), "must specify at least one version")) - } - for ix := range obj.Versions { - version := &obj.Versions[ix] - if len(version.Name) == 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("versions").Index(ix).Child("name"), version, "must not be empty")) - } else { - for _, msg := range validation.IsDNS1123Label(version.Name) { - allErrs = append(allErrs, field.Invalid(field.NewPath("versions").Index(ix).Child("name"), version, msg)) - } - } - if versions.Has(version.Name) { - allErrs = append(allErrs, field.Duplicate(field.NewPath("versions").Index(ix).Child("name"), version)) - } - versions.Insert(version.Name) - } - return allErrs -} - -// ValidateDaemonSet tests if required fields in the DaemonSet are set. -func ValidateDaemonSet(ds *extensions.DaemonSet) field.ErrorList { - allErrs := apivalidation.ValidateObjectMeta(&ds.ObjectMeta, true, ValidateDaemonSetName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateDaemonSetUpdate tests if required fields in the DaemonSet are set. -func ValidateDaemonSetUpdate(ds, oldDS *extensions.DaemonSet) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"))...) - return allErrs -} - -// validateDaemonSetStatus validates a DaemonSetStatus -func validateDaemonSetStatus(status *extensions.DaemonSetStatus, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentNumberScheduled), fldPath.Child("currentNumberScheduled"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberMisscheduled), fldPath.Child("numberMisscheduled"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredNumberScheduled), fldPath.Child("desiredNumberScheduled"))...) - return allErrs -} - -// ValidateDaemonSetStatus validates tests if required fields in the DaemonSet Status section -func ValidateDaemonSetStatusUpdate(ds, oldDS *extensions.DaemonSet) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, validateDaemonSetStatus(&ds.Status, field.NewPath("status"))...) - return allErrs -} - -// ValidateDaemonSetSpec tests if required fields in the DaemonSetSpec are set. -func ValidateDaemonSetSpec(spec *extensions.DaemonSetSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) - - selector, err := unversioned.LabelSelectorAsSelector(spec.Selector) - if err == nil && !selector.Matches(labels.Set(spec.Template.Labels)) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "metadata", "labels"), spec.Template.Labels, "`selector` does not match template `labels`")) - } - if spec.Selector != nil && len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for daemonset.")) - } - - allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(&spec.Template, fldPath.Child("template"))...) - // Daemons typically run on more than one node, so mark Read-Write persistent disks as invalid. - allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(spec.Template.Spec.Volumes, fldPath.Child("template", "spec", "volumes"))...) - // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). - if spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) - } - return allErrs -} - -// ValidateDaemonSetName can be used to check whether the given daemon set name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateDaemonSetName = apivalidation.NameIsDNSSubdomain - -// Validates that the given name can be used as a deployment name. -var ValidateDeploymentName = apivalidation.NameIsDNSSubdomain - -func ValidatePositiveIntOrPercent(intOrPercent intstr.IntOrString, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - switch intOrPercent.Type { - case intstr.String: - for _, msg := range validation.IsValidPercent(intOrPercent.StrVal) { - allErrs = append(allErrs, field.Invalid(fldPath, intOrPercent, msg)) - } - case intstr.Int: - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(intOrPercent.IntValue()), fldPath)...) - default: - allErrs = append(allErrs, field.Invalid(fldPath, intOrPercent, "must be an integer or percentage (e.g '5%%')")) - } - return allErrs -} - -func getPercentValue(intOrStringValue intstr.IntOrString) (int, bool) { - if intOrStringValue.Type != intstr.String { - return 0, false - } - if len(validation.IsValidPercent(intOrStringValue.StrVal)) != 0 { - return 0, false - } - value, _ := strconv.Atoi(intOrStringValue.StrVal[:len(intOrStringValue.StrVal)-1]) - return value, true -} - -func getIntOrPercentValue(intOrStringValue intstr.IntOrString) int { - value, isPercent := getPercentValue(intOrStringValue) - if isPercent { - return value - } - return intOrStringValue.IntValue() -} - -func IsNotMoreThan100Percent(intOrStringValue intstr.IntOrString, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - value, isPercent := getPercentValue(intOrStringValue) - if !isPercent || value <= 100 { - return nil - } - allErrs = append(allErrs, field.Invalid(fldPath, intOrStringValue, "must not be greater than 100%")) - return allErrs -} - -func ValidateRollingUpdateDeployment(rollingUpdate *extensions.RollingUpdateDeployment, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) - allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxSurge, fldPath.Child("maxSurge"))...) - if getIntOrPercentValue(rollingUpdate.MaxUnavailable) == 0 && getIntOrPercentValue(rollingUpdate.MaxSurge) == 0 { - // Both MaxSurge and MaxUnavailable cannot be zero. - allErrs = append(allErrs, field.Invalid(fldPath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable, "may not be 0 when `maxSurge` is 0")) - } - // Validate that MaxUnavailable is not more than 100%. - allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) - return allErrs -} - -func ValidateDeploymentStrategy(strategy *extensions.DeploymentStrategy, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - switch strategy.Type { - case extensions.RecreateDeploymentStrategyType: - if strategy.RollingUpdate != nil { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("rollingUpdate"), "may not be specified when strategy `type` is '"+string(extensions.RecreateDeploymentStrategyType+"'"))) - } - case extensions.RollingUpdateDeploymentStrategyType: - // This should never happen since it's set and checked in defaults.go - if strategy.RollingUpdate == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("rollingUpdate"), "this should be defaulted and never be nil")) - } else { - allErrs = append(allErrs, ValidateRollingUpdateDeployment(strategy.RollingUpdate, fldPath.Child("rollingUpdate"))...) - } - default: - validValues := []string{string(extensions.RecreateDeploymentStrategyType), string(extensions.RollingUpdateDeploymentStrategyType)} - allErrs = append(allErrs, field.NotSupported(fldPath, strategy, validValues)) - } - return allErrs -} - -func ValidateRollback(rollback *extensions.RollbackConfig, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - v := rollback.Revision - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(v), fldPath.Child("version"))...) - return allErrs -} - -// Validates given deployment spec. -func ValidateDeploymentSpec(spec *extensions.DeploymentSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) - - if spec.Selector == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("selector"), "")) - } else { - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) - if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for deployment.")) - } - } - - selector, err := unversioned.LabelSelectorAsSelector(spec.Selector) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector.")) - } else { - allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"))...) - } - - allErrs = append(allErrs, ValidateDeploymentStrategy(&spec.Strategy, fldPath.Child("strategy"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) - if spec.RevisionHistoryLimit != nil { - // zero is a valid RevisionHistoryLimit - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.RevisionHistoryLimit), fldPath.Child("revisionHistoryLimit"))...) - } - if spec.RollbackTo != nil { - allErrs = append(allErrs, ValidateRollback(spec.RollbackTo, fldPath.Child("rollback"))...) - } - if spec.ProgressDeadlineSeconds != nil { - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.ProgressDeadlineSeconds), fldPath.Child("progressDeadlineSeconds"))...) - if *spec.ProgressDeadlineSeconds <= spec.MinReadySeconds { - allErrs = append(allErrs, field.Invalid(fldPath.Child("progressDeadlineSeconds"), spec.ProgressDeadlineSeconds, "must be greater than minReadySeconds.")) - } - } - return allErrs -} - -// Validates given deployment status. -func ValidateDeploymentStatus(status *extensions.DeploymentStatus, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(status.ObservedGeneration, fldPath.Child("observedGeneration"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Replicas), fldPath.Child("replicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UpdatedReplicas), fldPath.Child("updatedReplicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.AvailableReplicas), fldPath.Child("availableReplicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UnavailableReplicas), fldPath.Child("unavailableReplicas"))...) - return allErrs -} - -func ValidateDeploymentUpdate(update, old *extensions.Deployment) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateDeploymentSpec(&update.Spec, field.NewPath("spec"))...) - return allErrs -} - -func ValidateDeploymentStatusUpdate(update, old *extensions.Deployment) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateDeploymentStatus(&update.Status, field.NewPath("status"))...) - return allErrs -} - -func ValidateDeployment(obj *extensions.Deployment) field.ErrorList { - allErrs := apivalidation.ValidateObjectMeta(&obj.ObjectMeta, true, ValidateDeploymentName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateDeploymentSpec(&obj.Spec, field.NewPath("spec"))...) - return allErrs -} - -func ValidateDeploymentRollback(obj *extensions.DeploymentRollback) field.ErrorList { - allErrs := apivalidation.ValidateAnnotations(obj.UpdatedAnnotations, field.NewPath("updatedAnnotations")) - if len(obj.Name) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("name"), "name is required")) - } - allErrs = append(allErrs, ValidateRollback(&obj.RollbackTo, field.NewPath("rollback"))...) - return allErrs -} - -func ValidateThirdPartyResourceDataUpdate(update, old *extensions.ThirdPartyResourceData) field.ErrorList { - return apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata")) -} - -func ValidateThirdPartyResourceData(obj *extensions.ThirdPartyResourceData) field.ErrorList { - return apivalidation.ValidateObjectMeta(&obj.ObjectMeta, true, apivalidation.NameIsDNSLabel, field.NewPath("metadata")) -} - -// ValidateIngress tests if required fields in the Ingress are set. -func ValidateIngress(ingress *extensions.Ingress) field.ErrorList { - allErrs := apivalidation.ValidateObjectMeta(&ingress.ObjectMeta, true, ValidateIngressName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateIngressSpec(&ingress.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateIngressName validates that the given name can be used as an Ingress name. -var ValidateIngressName = apivalidation.NameIsDNSSubdomain - -func validateIngressTLS(spec *extensions.IngressSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - // TODO: Perform a more thorough validation of spec.TLS.Hosts that takes - // the wildcard spec from RFC 6125 into account. - for _, itls := range spec.TLS { - for i, host := range itls.Hosts { - if strings.Contains(host, "*") { - for _, msg := range validation.IsWildcardDNS1123Subdomain(host) { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("hosts"), host, msg)) - } - continue - } - for _, msg := range validation.IsDNS1123Subdomain(host) { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("hosts"), host, msg)) - } - } - } - - return allErrs -} - -// ValidateIngressSpec tests if required fields in the IngressSpec are set. -func ValidateIngressSpec(spec *extensions.IngressSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - // TODO: Is a default backend mandatory? - if spec.Backend != nil { - allErrs = append(allErrs, validateIngressBackend(spec.Backend, fldPath.Child("backend"))...) - } else if len(spec.Rules) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath, spec.Rules, "either `backend` or `rules` must be specified")) - } - if len(spec.Rules) > 0 { - allErrs = append(allErrs, validateIngressRules(spec.Rules, fldPath.Child("rules"))...) - } - if len(spec.TLS) > 0 { - allErrs = append(allErrs, validateIngressTLS(spec, fldPath.Child("tls"))...) - } - return allErrs -} - -// ValidateIngressUpdate tests if required fields in the Ingress are set. -func ValidateIngressUpdate(ingress, oldIngress *extensions.Ingress) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&ingress.ObjectMeta, &oldIngress.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateIngressSpec(&ingress.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateIngressStatusUpdate tests if required fields in the Ingress are set when updating status. -func ValidateIngressStatusUpdate(ingress, oldIngress *extensions.Ingress) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&ingress.ObjectMeta, &oldIngress.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, apivalidation.ValidateLoadBalancerStatus(&ingress.Status.LoadBalancer, field.NewPath("status", "loadBalancer"))...) - return allErrs -} - -func validateIngressRules(ingressRules []extensions.IngressRule, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(ingressRules) == 0 { - return append(allErrs, field.Required(fldPath, "")) - } - for i, ih := range ingressRules { - if len(ih.Host) > 0 { - if isIP := (net.ParseIP(ih.Host) != nil); isIP { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, "must be a DNS name, not an IP address")) - } - // TODO: Ports and ips are allowed in the host part of a url - // according to RFC 3986, consider allowing them. - if strings.Contains(ih.Host, "*") { - for _, msg := range validation.IsWildcardDNS1123Subdomain(ih.Host) { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, msg)) - } - continue - } - for _, msg := range validation.IsDNS1123Subdomain(ih.Host) { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, msg)) - } - } - allErrs = append(allErrs, validateIngressRuleValue(&ih.IngressRuleValue, fldPath.Index(0))...) - } - return allErrs -} - -func validateIngressRuleValue(ingressRule *extensions.IngressRuleValue, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if ingressRule.HTTP != nil { - allErrs = append(allErrs, validateHTTPIngressRuleValue(ingressRule.HTTP, fldPath.Child("http"))...) - } - return allErrs -} - -func validateHTTPIngressRuleValue(httpIngressRuleValue *extensions.HTTPIngressRuleValue, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(httpIngressRuleValue.Paths) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("paths"), "")) - } - for i, rule := range httpIngressRuleValue.Paths { - if len(rule.Path) > 0 { - if !strings.HasPrefix(rule.Path, "/") { - allErrs = append(allErrs, field.Invalid(fldPath.Child("paths").Index(i).Child("path"), rule.Path, "must be an absolute path")) - } - // TODO: More draconian path regex validation. - // Path must be a valid regex. This is the basic requirement. - // In addition to this any characters not allowed in a path per - // RFC 3986 section-3.3 cannot appear as a literal in the regex. - // Consider the example: http://host/valid?#bar, everything after - // the last '/' is a valid regex that matches valid#bar, which - // isn't a valid path, because the path terminates at the first ? - // or #. A more sophisticated form of validation would detect that - // the user is confusing url regexes with path regexes. - _, err := regexp.CompilePOSIX(rule.Path) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("paths").Index(i).Child("path"), rule.Path, "must be a valid regex")) - } - } - allErrs = append(allErrs, validateIngressBackend(&rule.Backend, fldPath.Child("backend"))...) - } - return allErrs -} - -// validateIngressBackend tests if a given backend is valid. -func validateIngressBackend(backend *extensions.IngressBackend, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - // All backends must reference a single local service by name, and a single service port by name or number. - if len(backend.ServiceName) == 0 { - return append(allErrs, field.Required(fldPath.Child("serviceName"), "")) - } else { - for _, msg := range apivalidation.ValidateServiceName(backend.ServiceName, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceName"), backend.ServiceName, msg)) - } - } - allErrs = append(allErrs, apivalidation.ValidatePortNumOrName(backend.ServicePort, fldPath.Child("servicePort"))...) - return allErrs -} - -func ValidateScale(scale *extensions.Scale) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&scale.ObjectMeta, true, apivalidation.NameIsDNSSubdomain, field.NewPath("metadata"))...) - - if scale.Spec.Replicas < 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "replicas"), scale.Spec.Replicas, "must be greater than or equal to 0")) - } - - return allErrs -} - -// ValidateReplicaSetName can be used to check whether the given ReplicaSet -// name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateReplicaSetName = apivalidation.NameIsDNSSubdomain - -// ValidateReplicaSet tests if required fields in the ReplicaSet are set. -func ValidateReplicaSet(rs *extensions.ReplicaSet) field.ErrorList { - allErrs := apivalidation.ValidateObjectMeta(&rs.ObjectMeta, true, ValidateReplicaSetName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateReplicaSetUpdate tests if required fields in the ReplicaSet are set. -func ValidateReplicaSetUpdate(rs, oldRs *extensions.ReplicaSet) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...) - allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateReplicaSetStatusUpdate tests if required fields in the ReplicaSet are set. -func ValidateReplicaSetStatusUpdate(rs, oldRs *extensions.ReplicaSet) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(rs.Status.Replicas), field.NewPath("status", "replicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(rs.Status.FullyLabeledReplicas), field.NewPath("status", "fullyLabeledReplicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(rs.Status.ReadyReplicas), field.NewPath("status", "readyReplicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(rs.Status.AvailableReplicas), field.NewPath("status", "availableReplicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(rs.Status.ObservedGeneration), field.NewPath("status", "observedGeneration"))...) - return allErrs -} - -// ValidateReplicaSetSpec tests if required fields in the ReplicaSet spec are set. -func ValidateReplicaSetSpec(spec *extensions.ReplicaSetSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) - - if spec.Selector == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("selector"), "")) - } else { - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) - if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for deployment.")) - } - } - - selector, err := unversioned.LabelSelectorAsSelector(spec.Selector) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector.")) - } else { - allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"))...) - } - return allErrs -} - -// Validates the given template and ensures that it is in accordance with the desired selector and replicas. -func ValidatePodTemplateSpecForReplicaSet(template *api.PodTemplateSpec, selector labels.Selector, replicas int32, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if template == nil { - allErrs = append(allErrs, field.Required(fldPath, "")) - } else { - if !selector.Empty() { - // Verify that the ReplicaSet selector matches the labels in template. - labels := labels.Set(template.Labels) - if !selector.Matches(labels) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`")) - } - } - allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(template, fldPath)...) - if replicas > 1 { - allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...) - } - // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). - if template.Spec.RestartPolicy != api.RestartPolicyAlways { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) - } - } - return allErrs -} - -// ValidatePodSecurityPolicyName can be used to check whether the given -// pod security policy name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidatePodSecurityPolicyName = apivalidation.NameIsDNSSubdomain - -func ValidatePodSecurityPolicy(psp *extensions.PodSecurityPolicy) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&psp.ObjectMeta, false, ValidatePodSecurityPolicyName, field.NewPath("metadata"))...) - allErrs = append(allErrs, ValidatePodSecurityPolicySpecificAnnotations(psp.Annotations, field.NewPath("metadata").Child("annotations"))...) - allErrs = append(allErrs, ValidatePodSecurityPolicySpec(&psp.Spec, field.NewPath("spec"))...) - return allErrs -} - -func ValidatePodSecurityPolicySpec(spec *extensions.PodSecurityPolicySpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - allErrs = append(allErrs, validatePSPRunAsUser(fldPath.Child("runAsUser"), &spec.RunAsUser)...) - allErrs = append(allErrs, validatePSPSELinux(fldPath.Child("seLinux"), &spec.SELinux)...) - allErrs = append(allErrs, validatePSPSupplementalGroup(fldPath.Child("supplementalGroups"), &spec.SupplementalGroups)...) - allErrs = append(allErrs, validatePSPFSGroup(fldPath.Child("fsGroup"), &spec.FSGroup)...) - allErrs = append(allErrs, validatePodSecurityPolicyVolumes(fldPath, spec.Volumes)...) - allErrs = append(allErrs, validatePSPCapsAgainstDrops(spec.RequiredDropCapabilities, spec.DefaultAddCapabilities, field.NewPath("defaultAddCapabilities"))...) - allErrs = append(allErrs, validatePSPCapsAgainstDrops(spec.RequiredDropCapabilities, spec.AllowedCapabilities, field.NewPath("allowedCapabilities"))...) - - return allErrs -} - -func ValidatePodSecurityPolicySpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if p := annotations[apparmor.DefaultProfileAnnotationKey]; p != "" { - if err := apparmor.ValidateProfileFormat(p); err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Key(apparmor.DefaultProfileAnnotationKey), p, err.Error())) - } - } - if allowed := annotations[apparmor.AllowedProfilesAnnotationKey]; allowed != "" { - for _, p := range strings.Split(allowed, ",") { - if err := apparmor.ValidateProfileFormat(p); err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Key(apparmor.AllowedProfilesAnnotationKey), allowed, err.Error())) - } - } - } - - sysctlAnnotation := annotations[extensions.SysctlsPodSecurityPolicyAnnotationKey] - sysctlFldPath := fldPath.Key(extensions.SysctlsPodSecurityPolicyAnnotationKey) - sysctls, err := extensions.SysctlsFromPodSecurityPolicyAnnotation(sysctlAnnotation) - if err != nil { - allErrs = append(allErrs, field.Invalid(sysctlFldPath, sysctlAnnotation, err.Error())) - } else { - allErrs = append(allErrs, validatePodSecurityPolicySysctls(sysctlFldPath, sysctls)...) - } - - if p := annotations[seccomp.DefaultProfileAnnotationKey]; p != "" { - allErrs = append(allErrs, apivalidation.ValidateSeccompProfile(p, fldPath.Key(seccomp.DefaultProfileAnnotationKey))...) - } - if allowed := annotations[seccomp.AllowedProfilesAnnotationKey]; allowed != "" { - for _, p := range strings.Split(allowed, ",") { - allErrs = append(allErrs, apivalidation.ValidateSeccompProfile(p, fldPath.Key(seccomp.AllowedProfilesAnnotationKey))...) - } - } - return allErrs -} - -// validatePSPSELinux validates the SELinux fields of PodSecurityPolicy. -func validatePSPSELinux(fldPath *field.Path, seLinux *extensions.SELinuxStrategyOptions) field.ErrorList { - allErrs := field.ErrorList{} - - // ensure the selinux strategy has a valid rule - supportedSELinuxRules := sets.NewString(string(extensions.SELinuxStrategyMustRunAs), - string(extensions.SELinuxStrategyRunAsAny)) - if !supportedSELinuxRules.Has(string(seLinux.Rule)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), seLinux.Rule, supportedSELinuxRules.List())) - } - - return allErrs -} - -// validatePSPRunAsUser validates the RunAsUser fields of PodSecurityPolicy. -func validatePSPRunAsUser(fldPath *field.Path, runAsUser *extensions.RunAsUserStrategyOptions) field.ErrorList { - allErrs := field.ErrorList{} - - // ensure the user strategy has a valid rule - supportedRunAsUserRules := sets.NewString(string(extensions.RunAsUserStrategyMustRunAs), - string(extensions.RunAsUserStrategyMustRunAsNonRoot), - string(extensions.RunAsUserStrategyRunAsAny)) - if !supportedRunAsUserRules.Has(string(runAsUser.Rule)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), runAsUser.Rule, supportedRunAsUserRules.List())) - } - - // validate range settings - for idx, rng := range runAsUser.Ranges { - allErrs = append(allErrs, validateIDRanges(fldPath.Child("ranges").Index(idx), rng)...) - } - - return allErrs -} - -// validatePSPFSGroup validates the FSGroupStrategyOptions fields of the PodSecurityPolicy. -func validatePSPFSGroup(fldPath *field.Path, groupOptions *extensions.FSGroupStrategyOptions) field.ErrorList { - allErrs := field.ErrorList{} - - supportedRules := sets.NewString( - string(extensions.FSGroupStrategyMustRunAs), - string(extensions.FSGroupStrategyRunAsAny), - ) - if !supportedRules.Has(string(groupOptions.Rule)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), groupOptions.Rule, supportedRules.List())) - } - - for idx, rng := range groupOptions.Ranges { - allErrs = append(allErrs, validateIDRanges(fldPath.Child("ranges").Index(idx), rng)...) - } - return allErrs -} - -// validatePSPSupplementalGroup validates the SupplementalGroupsStrategyOptions fields of the PodSecurityPolicy. -func validatePSPSupplementalGroup(fldPath *field.Path, groupOptions *extensions.SupplementalGroupsStrategyOptions) field.ErrorList { - allErrs := field.ErrorList{} - - supportedRules := sets.NewString( - string(extensions.SupplementalGroupsStrategyRunAsAny), - string(extensions.SupplementalGroupsStrategyMustRunAs), - ) - if !supportedRules.Has(string(groupOptions.Rule)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), groupOptions.Rule, supportedRules.List())) - } - - for idx, rng := range groupOptions.Ranges { - allErrs = append(allErrs, validateIDRanges(fldPath.Child("ranges").Index(idx), rng)...) - } - return allErrs -} - -// validatePodSecurityPolicyVolumes validates the volume fields of PodSecurityPolicy. -func validatePodSecurityPolicyVolumes(fldPath *field.Path, volumes []extensions.FSType) field.ErrorList { - allErrs := field.ErrorList{} - allowed := psputil.GetAllFSTypesAsSet() - // add in the * value since that is a pseudo type that is not included by default - allowed.Insert(string(extensions.All)) - for _, v := range volumes { - if !allowed.Has(string(v)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumes"), v, allowed.List())) - } - } - - return allErrs -} - -const sysctlPatternSegmentFmt string = "([a-z0-9][-_a-z0-9]*)?[a-z0-9*]" -const SysctlPatternFmt string = "(" + apivalidation.SysctlSegmentFmt + "\\.)*" + sysctlPatternSegmentFmt - -var sysctlPatternRegexp = regexp.MustCompile("^" + SysctlPatternFmt + "$") - -func IsValidSysctlPattern(name string) bool { - if len(name) > apivalidation.SysctlMaxLength { - return false - } - return sysctlPatternRegexp.MatchString(name) -} - -// validatePodSecurityPolicySysctls validates the sysctls fields of PodSecurityPolicy. -func validatePodSecurityPolicySysctls(fldPath *field.Path, sysctls []string) field.ErrorList { - allErrs := field.ErrorList{} - for i, s := range sysctls { - if !IsValidSysctlPattern(string(s)) { - allErrs = append( - allErrs, - field.Invalid(fldPath.Index(i), sysctls[i], fmt.Sprintf("must have at most %d characters and match regex %s", - apivalidation.SysctlMaxLength, - SysctlPatternFmt, - )), - ) - } - } - - return allErrs -} - -// validateIDRanges ensures the range is valid. -func validateIDRanges(fldPath *field.Path, rng extensions.IDRange) field.ErrorList { - allErrs := field.ErrorList{} - - // if 0 <= Min <= Max then we do not need to validate max. It is always greater than or - // equal to 0 and Min. - if rng.Min < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("min"), rng.Min, "min cannot be negative")) - } - if rng.Max < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("max"), rng.Max, "max cannot be negative")) - } - if rng.Min > rng.Max { - allErrs = append(allErrs, field.Invalid(fldPath.Child("min"), rng.Min, "min cannot be greater than max")) - } - - return allErrs -} - -// validatePSPCapsAgainstDrops ensures an allowed cap is not listed in the required drops. -func validatePSPCapsAgainstDrops(requiredDrops []api.Capability, capsToCheck []api.Capability, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if requiredDrops == nil { - return allErrs - } - for _, cap := range capsToCheck { - if hasCap(cap, requiredDrops) { - allErrs = append(allErrs, field.Invalid(fldPath, cap, - fmt.Sprintf("capability is listed in %s and requiredDropCapabilities", fldPath.String()))) - } - } - return allErrs -} - -// hasCap checks for needle in haystack. -func hasCap(needle api.Capability, haystack []api.Capability) bool { - for _, c := range haystack { - if needle == c { - return true - } - } - return false -} - -// ValidatePodSecurityPolicyUpdate validates a PSP for updates. -func ValidatePodSecurityPolicyUpdate(old *extensions.PodSecurityPolicy, new *extensions.PodSecurityPolicy) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&new.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...) - allErrs = append(allErrs, ValidatePodSecurityPolicySpecificAnnotations(new.Annotations, field.NewPath("metadata").Child("annotations"))...) - allErrs = append(allErrs, ValidatePodSecurityPolicySpec(&new.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateNetworkPolicyName can be used to check whether the given networkpolicy -// name is valid. -func ValidateNetworkPolicyName(name string, prefix bool) []string { - return apivalidation.NameIsDNSSubdomain(name, prefix) -} - -// ValidateNetworkPolicySpec tests if required fields in the networkpolicy spec are set. -func ValidateNetworkPolicySpec(spec *extensions.NetworkPolicySpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(&spec.PodSelector, fldPath.Child("podSelector"))...) - - // Validate ingress rules. - for _, i := range spec.Ingress { - // TODO: Update From to be a pointer to slice as soon as auto-generation supports it. - for _, f := range i.From { - numFroms := 0 - if f.PodSelector != nil { - numFroms++ - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(f.PodSelector, fldPath.Child("podSelector"))...) - } - if f.NamespaceSelector != nil { - if numFroms > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath, "may not specify more than 1 from type")) - } else { - numFroms++ - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(f.NamespaceSelector, fldPath.Child("namespaces"))...) - } - } - - if numFroms == 0 { - // At least one of PodSelector and NamespaceSelector must be defined. - allErrs = append(allErrs, field.Required(fldPath, "must specify a from type")) - } - } - } - return allErrs -} - -// ValidateNetworkPolicy validates a networkpolicy. -func ValidateNetworkPolicy(np *extensions.NetworkPolicy) field.ErrorList { - allErrs := apivalidation.ValidateObjectMeta(&np.ObjectMeta, true, ValidateNetworkPolicyName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateNetworkPolicySpec(&np.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateNetworkPolicyUpdate tests if an update to a NetworkPolicy is valid. -func ValidateNetworkPolicyUpdate(update, old *extensions.NetworkPolicy) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...) - if !reflect.DeepEqual(update.Spec, old.Spec) { - allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to networkpolicy spec are forbidden.")) - } - return allErrs -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go index 8714d2dbe8..d2187b5e42 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,11 +21,11 @@ limitations under the License. package extensions import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" - intstr "k8s.io/kubernetes/pkg/util/intstr" reflect "reflect" ) @@ -46,6 +46,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetList, InType: reflect.TypeOf(&DaemonSetList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetSpec, InType: reflect.TypeOf(&DaemonSetSpec{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetStatus, InType: reflect.TypeOf(&DaemonSetStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetUpdateStrategy, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_Deployment, InType: reflect.TypeOf(&Deployment{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentCondition, InType: reflect.TypeOf(&DeploymentCondition{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentList, InType: reflect.TypeOf(&DeploymentList{})}, @@ -82,6 +83,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSetStatus, InType: reflect.TypeOf(&ReplicaSetStatus{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicationControllerDummy, InType: reflect.TypeOf(&ReplicationControllerDummy{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RollbackConfig, InType: reflect.TypeOf(&RollbackConfig{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RollingUpdateDaemonSet, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RollingUpdateDeployment, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RunAsUserStrategyOptions, InType: reflect.TypeOf(&RunAsUserStrategyOptions{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_SELinuxStrategyOptions, InType: reflect.TypeOf(&SELinuxStrategyOptions{})}, @@ -100,7 +102,7 @@ func DeepCopy_extensions_APIVersion(in interface{}, out interface{}, c *conversi { in := in.(*APIVersion) out := out.(*APIVersion) - out.Name = in.Name + *out = *in return nil } } @@ -109,7 +111,7 @@ func DeepCopy_extensions_CustomMetricCurrentStatus(in interface{}, out interface { in := in.(*CustomMetricCurrentStatus) out := out.(*CustomMetricCurrentStatus) - out.Name = in.Name + *out = *in out.CurrentValue = in.CurrentValue.DeepCopy() return nil } @@ -119,6 +121,7 @@ func DeepCopy_extensions_CustomMetricCurrentStatusList(in interface{}, out inter { in := in.(*CustomMetricCurrentStatusList) out := out.(*CustomMetricCurrentStatusList) + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CustomMetricCurrentStatus, len(*in)) @@ -127,8 +130,6 @@ func DeepCopy_extensions_CustomMetricCurrentStatusList(in interface{}, out inter return err } } - } else { - out.Items = nil } return nil } @@ -138,7 +139,7 @@ func DeepCopy_extensions_CustomMetricTarget(in interface{}, out interface{}, c * { in := in.(*CustomMetricTarget) out := out.(*CustomMetricTarget) - out.Name = in.Name + *out = *in out.TargetValue = in.TargetValue.DeepCopy() return nil } @@ -148,6 +149,7 @@ func DeepCopy_extensions_CustomMetricTargetList(in interface{}, out interface{}, { in := in.(*CustomMetricTargetList) out := out.(*CustomMetricTargetList) + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CustomMetricTarget, len(*in)) @@ -156,8 +158,6 @@ func DeepCopy_extensions_CustomMetricTargetList(in interface{}, out interface{}, return err } } - } else { - out.Items = nil } return nil } @@ -167,14 +167,15 @@ func DeepCopy_extensions_DaemonSet(in interface{}, out interface{}, c *conversio { in := in.(*DaemonSet) out := out.(*DaemonSet) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_extensions_DaemonSetSpec(&in.Spec, &out.Spec, c); err != nil { return err } - out.Status = in.Status return nil } } @@ -183,8 +184,7 @@ func DeepCopy_extensions_DaemonSetList(in interface{}, out interface{}, c *conve { in := in.(*DaemonSetList) out := out.(*DaemonSetList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DaemonSet, len(*in)) @@ -193,8 +193,6 @@ func DeepCopy_extensions_DaemonSetList(in interface{}, out interface{}, c *conve return err } } - } else { - out.Items = nil } return nil } @@ -204,18 +202,21 @@ func DeepCopy_extensions_DaemonSetSpec(in interface{}, out interface{}, c *conve { in := in.(*DaemonSetSpec) out := out.(*DaemonSetSpec) + *out = *in if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.LabelSelector) } - } else { - out.Selector = nil } if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { return err } + if err := DeepCopy_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, c); err != nil { + return err + } return nil } } @@ -224,10 +225,21 @@ func DeepCopy_extensions_DaemonSetStatus(in interface{}, out interface{}, c *con { in := in.(*DaemonSetStatus) out := out.(*DaemonSetStatus) - out.CurrentNumberScheduled = in.CurrentNumberScheduled - out.NumberMisscheduled = in.NumberMisscheduled - out.DesiredNumberScheduled = in.DesiredNumberScheduled - out.NumberReady = in.NumberReady + *out = *in + return nil + } +} + +func DeepCopy_extensions_DaemonSetUpdateStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetUpdateStrategy) + out := out.(*DaemonSetUpdateStrategy) + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDaemonSet) + **out = **in + } return nil } } @@ -236,9 +248,11 @@ func DeepCopy_extensions_Deployment(in interface{}, out interface{}, c *conversi { in := in.(*Deployment) out := out.(*Deployment) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_extensions_DeploymentSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -254,12 +268,9 @@ func DeepCopy_extensions_DeploymentCondition(in interface{}, out interface{}, c { in := in.(*DeploymentCondition) out := out.(*DeploymentCondition) - out.Type = in.Type - out.Status = in.Status + *out = *in out.LastUpdateTime = in.LastUpdateTime.DeepCopy() out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message return nil } } @@ -268,8 +279,7 @@ func DeepCopy_extensions_DeploymentList(in interface{}, out interface{}, c *conv { in := in.(*DeploymentList) out := out.(*DeploymentList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -278,8 +288,6 @@ func DeepCopy_extensions_DeploymentList(in interface{}, out interface{}, c *conv return err } } - } else { - out.Items = nil } return nil } @@ -289,18 +297,14 @@ func DeepCopy_extensions_DeploymentRollback(in interface{}, out interface{}, c * { in := in.(*DeploymentRollback) out := out.(*DeploymentRollback) - out.TypeMeta = in.TypeMeta - out.Name = in.Name + *out = *in if in.UpdatedAnnotations != nil { in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations *out = make(map[string]string) for key, val := range *in { (*out)[key] = val } - } else { - out.UpdatedAnnotations = nil } - out.RollbackTo = in.RollbackTo return nil } } @@ -309,15 +313,14 @@ func DeepCopy_extensions_DeploymentSpec(in interface{}, out interface{}, c *conv { in := in.(*DeploymentSpec) out := out.(*DeploymentSpec) - out.Replicas = in.Replicas + *out = *in if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.LabelSelector) } - } else { - out.Selector = nil } if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { return err @@ -325,28 +328,20 @@ func DeepCopy_extensions_DeploymentSpec(in interface{}, out interface{}, c *conv if err := DeepCopy_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, c); err != nil { return err } - out.MinReadySeconds = in.MinReadySeconds if in.RevisionHistoryLimit != nil { in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit *out = new(int32) **out = **in - } else { - out.RevisionHistoryLimit = nil } - out.Paused = in.Paused if in.RollbackTo != nil { in, out := &in.RollbackTo, &out.RollbackTo *out = new(RollbackConfig) **out = **in - } else { - out.RollbackTo = nil } if in.ProgressDeadlineSeconds != nil { in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds *out = new(int32) **out = **in - } else { - out.ProgressDeadlineSeconds = nil } return nil } @@ -356,11 +351,7 @@ func DeepCopy_extensions_DeploymentStatus(in interface{}, out interface{}, c *co { in := in.(*DeploymentStatus) out := out.(*DeploymentStatus) - out.ObservedGeneration = in.ObservedGeneration - out.Replicas = in.Replicas - out.UpdatedReplicas = in.UpdatedReplicas - out.AvailableReplicas = in.AvailableReplicas - out.UnavailableReplicas = in.UnavailableReplicas + *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]DeploymentCondition, len(*in)) @@ -369,8 +360,6 @@ func DeepCopy_extensions_DeploymentStatus(in interface{}, out interface{}, c *co return err } } - } else { - out.Conditions = nil } return nil } @@ -380,13 +369,11 @@ func DeepCopy_extensions_DeploymentStrategy(in interface{}, out interface{}, c * { in := in.(*DeploymentStrategy) out := out.(*DeploymentStrategy) - out.Type = in.Type + *out = *in if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(RollingUpdateDeployment) **out = **in - } else { - out.RollingUpdate = nil } return nil } @@ -396,15 +383,11 @@ func DeepCopy_extensions_FSGroupStrategyOptions(in interface{}, out interface{}, { in := in.(*FSGroupStrategyOptions) out := out.(*FSGroupStrategyOptions) - out.Rule = in.Rule + *out = *in if in.Ranges != nil { in, out := &in.Ranges, &out.Ranges *out = make([]IDRange, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Ranges = nil + copy(*out, *in) } return nil } @@ -414,8 +397,7 @@ func DeepCopy_extensions_HTTPIngressPath(in interface{}, out interface{}, c *con { in := in.(*HTTPIngressPath) out := out.(*HTTPIngressPath) - out.Path = in.Path - out.Backend = in.Backend + *out = *in return nil } } @@ -424,14 +406,11 @@ func DeepCopy_extensions_HTTPIngressRuleValue(in interface{}, out interface{}, c { in := in.(*HTTPIngressRuleValue) out := out.(*HTTPIngressRuleValue) + *out = *in if in.Paths != nil { in, out := &in.Paths, &out.Paths *out = make([]HTTPIngressPath, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Paths = nil + copy(*out, *in) } return nil } @@ -441,8 +420,7 @@ func DeepCopy_extensions_HostPortRange(in interface{}, out interface{}, c *conve { in := in.(*HostPortRange) out := out.(*HostPortRange) - out.Min = in.Min - out.Max = in.Max + *out = *in return nil } } @@ -451,8 +429,7 @@ func DeepCopy_extensions_IDRange(in interface{}, out interface{}, c *conversion. { in := in.(*IDRange) out := out.(*IDRange) - out.Min = in.Min - out.Max = in.Max + *out = *in return nil } } @@ -461,9 +438,11 @@ func DeepCopy_extensions_Ingress(in interface{}, out interface{}, c *conversion. { in := in.(*Ingress) out := out.(*Ingress) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_extensions_IngressSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -479,8 +458,7 @@ func DeepCopy_extensions_IngressBackend(in interface{}, out interface{}, c *conv { in := in.(*IngressBackend) out := out.(*IngressBackend) - out.ServiceName = in.ServiceName - out.ServicePort = in.ServicePort + *out = *in return nil } } @@ -489,8 +467,7 @@ func DeepCopy_extensions_IngressList(in interface{}, out interface{}, c *convers { in := in.(*IngressList) out := out.(*IngressList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Ingress, len(*in)) @@ -499,8 +476,6 @@ func DeepCopy_extensions_IngressList(in interface{}, out interface{}, c *convers return err } } - } else { - out.Items = nil } return nil } @@ -510,7 +485,7 @@ func DeepCopy_extensions_IngressRule(in interface{}, out interface{}, c *convers { in := in.(*IngressRule) out := out.(*IngressRule) - out.Host = in.Host + *out = *in if err := DeepCopy_extensions_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, c); err != nil { return err } @@ -522,14 +497,13 @@ func DeepCopy_extensions_IngressRuleValue(in interface{}, out interface{}, c *co { in := in.(*IngressRuleValue) out := out.(*IngressRuleValue) + *out = *in if in.HTTP != nil { in, out := &in.HTTP, &out.HTTP *out = new(HTTPIngressRuleValue) if err := DeepCopy_extensions_HTTPIngressRuleValue(*in, *out, c); err != nil { return err } - } else { - out.HTTP = nil } return nil } @@ -539,12 +513,11 @@ func DeepCopy_extensions_IngressSpec(in interface{}, out interface{}, c *convers { in := in.(*IngressSpec) out := out.(*IngressSpec) + *out = *in if in.Backend != nil { in, out := &in.Backend, &out.Backend *out = new(IngressBackend) **out = **in - } else { - out.Backend = nil } if in.TLS != nil { in, out := &in.TLS, &out.TLS @@ -554,8 +527,6 @@ func DeepCopy_extensions_IngressSpec(in interface{}, out interface{}, c *convers return err } } - } else { - out.TLS = nil } if in.Rules != nil { in, out := &in.Rules, &out.Rules @@ -565,8 +536,6 @@ func DeepCopy_extensions_IngressSpec(in interface{}, out interface{}, c *convers return err } } - } else { - out.Rules = nil } return nil } @@ -576,6 +545,7 @@ func DeepCopy_extensions_IngressStatus(in interface{}, out interface{}, c *conve { in := in.(*IngressStatus) out := out.(*IngressStatus) + *out = *in if err := api.DeepCopy_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { return err } @@ -587,14 +557,12 @@ func DeepCopy_extensions_IngressTLS(in interface{}, out interface{}, c *conversi { in := in.(*IngressTLS) out := out.(*IngressTLS) + *out = *in if in.Hosts != nil { in, out := &in.Hosts, &out.Hosts *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Hosts = nil } - out.SecretName = in.SecretName return nil } } @@ -603,9 +571,11 @@ func DeepCopy_extensions_NetworkPolicy(in interface{}, out interface{}, c *conve { in := in.(*NetworkPolicy) out := out.(*NetworkPolicy) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_extensions_NetworkPolicySpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -618,6 +588,7 @@ func DeepCopy_extensions_NetworkPolicyIngressRule(in interface{}, out interface{ { in := in.(*NetworkPolicyIngressRule) out := out.(*NetworkPolicyIngressRule) + *out = *in if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]NetworkPolicyPort, len(*in)) @@ -626,8 +597,6 @@ func DeepCopy_extensions_NetworkPolicyIngressRule(in interface{}, out interface{ return err } } - } else { - out.Ports = nil } if in.From != nil { in, out := &in.From, &out.From @@ -637,8 +606,6 @@ func DeepCopy_extensions_NetworkPolicyIngressRule(in interface{}, out interface{ return err } } - } else { - out.From = nil } return nil } @@ -648,8 +615,7 @@ func DeepCopy_extensions_NetworkPolicyList(in interface{}, out interface{}, c *c { in := in.(*NetworkPolicyList) out := out.(*NetworkPolicyList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]NetworkPolicy, len(*in)) @@ -658,8 +624,6 @@ func DeepCopy_extensions_NetworkPolicyList(in interface{}, out interface{}, c *c return err } } - } else { - out.Items = nil } return nil } @@ -669,23 +633,22 @@ func DeepCopy_extensions_NetworkPolicyPeer(in interface{}, out interface{}, c *c { in := in.(*NetworkPolicyPeer) out := out.(*NetworkPolicyPeer) + *out = *in if in.PodSelector != nil { in, out := &in.PodSelector, &out.PodSelector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.LabelSelector) } - } else { - out.PodSelector = nil } if in.NamespaceSelector != nil { in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.LabelSelector) } - } else { - out.NamespaceSelector = nil } return nil } @@ -695,19 +658,16 @@ func DeepCopy_extensions_NetworkPolicyPort(in interface{}, out interface{}, c *c { in := in.(*NetworkPolicyPort) out := out.(*NetworkPolicyPort) + *out = *in if in.Protocol != nil { in, out := &in.Protocol, &out.Protocol *out = new(api.Protocol) **out = **in - } else { - out.Protocol = nil } if in.Port != nil { in, out := &in.Port, &out.Port *out = new(intstr.IntOrString) **out = **in - } else { - out.Port = nil } return nil } @@ -717,8 +677,11 @@ func DeepCopy_extensions_NetworkPolicySpec(in interface{}, out interface{}, c *c { in := in.(*NetworkPolicySpec) out := out.(*NetworkPolicySpec) - if err := unversioned.DeepCopy_unversioned_LabelSelector(&in.PodSelector, &out.PodSelector, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.PodSelector); err != nil { return err + } else { + out.PodSelector = *newVal.(*v1.LabelSelector) } if in.Ingress != nil { in, out := &in.Ingress, &out.Ingress @@ -728,8 +691,6 @@ func DeepCopy_extensions_NetworkPolicySpec(in interface{}, out interface{}, c *c return err } } - } else { - out.Ingress = nil } return nil } @@ -739,9 +700,11 @@ func DeepCopy_extensions_PodSecurityPolicy(in interface{}, out interface{}, c *c { in := in.(*PodSecurityPolicy) out := out.(*PodSecurityPolicy) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_extensions_PodSecurityPolicySpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -754,8 +717,7 @@ func DeepCopy_extensions_PodSecurityPolicyList(in interface{}, out interface{}, { in := in.(*PodSecurityPolicyList) out := out.(*PodSecurityPolicyList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodSecurityPolicy, len(*in)) @@ -764,8 +726,6 @@ func DeepCopy_extensions_PodSecurityPolicyList(in interface{}, out interface{}, return err } } - } else { - out.Items = nil } return nil } @@ -775,55 +735,32 @@ func DeepCopy_extensions_PodSecurityPolicySpec(in interface{}, out interface{}, { in := in.(*PodSecurityPolicySpec) out := out.(*PodSecurityPolicySpec) - out.Privileged = in.Privileged + *out = *in if in.DefaultAddCapabilities != nil { in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities *out = make([]api.Capability, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.DefaultAddCapabilities = nil + copy(*out, *in) } if in.RequiredDropCapabilities != nil { in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities *out = make([]api.Capability, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.RequiredDropCapabilities = nil + copy(*out, *in) } if in.AllowedCapabilities != nil { in, out := &in.AllowedCapabilities, &out.AllowedCapabilities *out = make([]api.Capability, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.AllowedCapabilities = nil + copy(*out, *in) } if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes *out = make([]FSType, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Volumes = nil + copy(*out, *in) } - out.HostNetwork = in.HostNetwork if in.HostPorts != nil { in, out := &in.HostPorts, &out.HostPorts *out = make([]HostPortRange, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.HostPorts = nil + copy(*out, *in) } - out.HostPID = in.HostPID - out.HostIPC = in.HostIPC if err := DeepCopy_extensions_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, c); err != nil { return err } @@ -836,7 +773,6 @@ func DeepCopy_extensions_PodSecurityPolicySpec(in interface{}, out interface{}, if err := DeepCopy_extensions_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, c); err != nil { return err } - out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem return nil } } @@ -845,9 +781,11 @@ func DeepCopy_extensions_ReplicaSet(in interface{}, out interface{}, c *conversi { in := in.(*ReplicaSet) out := out.(*ReplicaSet) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -863,11 +801,8 @@ func DeepCopy_extensions_ReplicaSetCondition(in interface{}, out interface{}, c { in := in.(*ReplicaSetCondition) out := out.(*ReplicaSetCondition) - out.Type = in.Type - out.Status = in.Status + *out = *in out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message return nil } } @@ -876,8 +811,7 @@ func DeepCopy_extensions_ReplicaSetList(in interface{}, out interface{}, c *conv { in := in.(*ReplicaSetList) out := out.(*ReplicaSetList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicaSet, len(*in)) @@ -886,8 +820,6 @@ func DeepCopy_extensions_ReplicaSetList(in interface{}, out interface{}, c *conv return err } } - } else { - out.Items = nil } return nil } @@ -897,16 +829,14 @@ func DeepCopy_extensions_ReplicaSetSpec(in interface{}, out interface{}, c *conv { in := in.(*ReplicaSetSpec) out := out.(*ReplicaSetSpec) - out.Replicas = in.Replicas - out.MinReadySeconds = in.MinReadySeconds + *out = *in if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.LabelSelector) } - } else { - out.Selector = nil } if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { return err @@ -919,11 +849,7 @@ func DeepCopy_extensions_ReplicaSetStatus(in interface{}, out interface{}, c *co { in := in.(*ReplicaSetStatus) out := out.(*ReplicaSetStatus) - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.ObservedGeneration = in.ObservedGeneration + *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]ReplicaSetCondition, len(*in)) @@ -932,8 +858,6 @@ func DeepCopy_extensions_ReplicaSetStatus(in interface{}, out interface{}, c *co return err } } - } else { - out.Conditions = nil } return nil } @@ -943,7 +867,7 @@ func DeepCopy_extensions_ReplicationControllerDummy(in interface{}, out interfac { in := in.(*ReplicationControllerDummy) out := out.(*ReplicationControllerDummy) - out.TypeMeta = in.TypeMeta + *out = *in return nil } } @@ -952,7 +876,16 @@ func DeepCopy_extensions_RollbackConfig(in interface{}, out interface{}, c *conv { in := in.(*RollbackConfig) out := out.(*RollbackConfig) - out.Revision = in.Revision + *out = *in + return nil + } +} + +func DeepCopy_extensions_RollingUpdateDaemonSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollingUpdateDaemonSet) + out := out.(*RollingUpdateDaemonSet) + *out = *in return nil } } @@ -961,8 +894,7 @@ func DeepCopy_extensions_RollingUpdateDeployment(in interface{}, out interface{} { in := in.(*RollingUpdateDeployment) out := out.(*RollingUpdateDeployment) - out.MaxUnavailable = in.MaxUnavailable - out.MaxSurge = in.MaxSurge + *out = *in return nil } } @@ -971,15 +903,11 @@ func DeepCopy_extensions_RunAsUserStrategyOptions(in interface{}, out interface{ { in := in.(*RunAsUserStrategyOptions) out := out.(*RunAsUserStrategyOptions) - out.Rule = in.Rule + *out = *in if in.Ranges != nil { in, out := &in.Ranges, &out.Ranges *out = make([]IDRange, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Ranges = nil + copy(*out, *in) } return nil } @@ -989,13 +917,11 @@ func DeepCopy_extensions_SELinuxStrategyOptions(in interface{}, out interface{}, { in := in.(*SELinuxStrategyOptions) out := out.(*SELinuxStrategyOptions) - out.Rule = in.Rule + *out = *in if in.SELinuxOptions != nil { in, out := &in.SELinuxOptions, &out.SELinuxOptions *out = new(api.SELinuxOptions) **out = **in - } else { - out.SELinuxOptions = nil } return nil } @@ -1005,11 +931,12 @@ func DeepCopy_extensions_Scale(in interface{}, out interface{}, c *conversion.Cl { in := in.(*Scale) out := out.(*Scale) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } - out.Spec = in.Spec if err := DeepCopy_extensions_ScaleStatus(&in.Status, &out.Status, c); err != nil { return err } @@ -1021,7 +948,7 @@ func DeepCopy_extensions_ScaleSpec(in interface{}, out interface{}, c *conversio { in := in.(*ScaleSpec) out := out.(*ScaleSpec) - out.Replicas = in.Replicas + *out = *in return nil } } @@ -1030,15 +957,14 @@ func DeepCopy_extensions_ScaleStatus(in interface{}, out interface{}, c *convers { in := in.(*ScaleStatus) out := out.(*ScaleStatus) - out.Replicas = in.Replicas + *out = *in if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.LabelSelector) } - } else { - out.Selector = nil } return nil } @@ -1048,15 +974,11 @@ func DeepCopy_extensions_SupplementalGroupsStrategyOptions(in interface{}, out i { in := in.(*SupplementalGroupsStrategyOptions) out := out.(*SupplementalGroupsStrategyOptions) - out.Rule = in.Rule + *out = *in if in.Ranges != nil { in, out := &in.Ranges, &out.Ranges *out = make([]IDRange, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Ranges = nil + copy(*out, *in) } return nil } @@ -1066,19 +988,16 @@ func DeepCopy_extensions_ThirdPartyResource(in interface{}, out interface{}, c * { in := in.(*ThirdPartyResource) out := out.(*ThirdPartyResource) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } - out.Description = in.Description if in.Versions != nil { in, out := &in.Versions, &out.Versions *out = make([]APIVersion, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Versions = nil + copy(*out, *in) } return nil } @@ -1088,16 +1007,16 @@ func DeepCopy_extensions_ThirdPartyResourceData(in interface{}, out interface{}, { in := in.(*ThirdPartyResourceData) out := out.(*ThirdPartyResourceData) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if in.Data != nil { in, out := &in.Data, &out.Data *out = make([]byte, len(*in)) copy(*out, *in) - } else { - out.Data = nil } return nil } @@ -1107,8 +1026,7 @@ func DeepCopy_extensions_ThirdPartyResourceDataList(in interface{}, out interfac { in := in.(*ThirdPartyResourceDataList) out := out.(*ThirdPartyResourceDataList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ThirdPartyResourceData, len(*in)) @@ -1117,8 +1035,6 @@ func DeepCopy_extensions_ThirdPartyResourceDataList(in interface{}, out interfac return err } } - } else { - out.Items = nil } return nil } @@ -1128,8 +1044,7 @@ func DeepCopy_extensions_ThirdPartyResourceList(in interface{}, out interface{}, { in := in.(*ThirdPartyResourceList) out := out.(*ThirdPartyResourceList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ThirdPartyResource, len(*in)) @@ -1138,8 +1053,6 @@ func DeepCopy_extensions_ThirdPartyResourceList(in interface{}, out interface{}, return err } } - } else { - out.Items = nil } return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/BUILD deleted file mode 100644 index d12e144d53..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/BUILD +++ /dev/null @@ -1,31 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.generated.go", - "types.go", - "zz_generated.deepcopy.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//vendor:github.com/ugorji/go/codec", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/doc.go deleted file mode 100644 index 3401639c6c..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register -// +groupName=imagepolicy.k8s.io -// +k8s:openapi-gen=true - -package imagepolicy diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/install/BUILD deleted file mode 100644 index 5ace8e7235..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/install/BUILD +++ /dev/null @@ -1,23 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["install.go"], - tags = ["automanaged"], - deps = [ - "//pkg/apimachinery/announced:go_default_library", - "//pkg/apis/imagepolicy:go_default_library", - "//pkg/apis/imagepolicy/v1alpha1:go_default_library", - "//pkg/util/sets:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/install/install.go deleted file mode 100644 index 7c01a2697b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/install/install.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the experimental API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "k8s.io/kubernetes/pkg/apimachinery/announced" - "k8s.io/kubernetes/pkg/apis/imagepolicy" - "k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1" - "k8s.io/kubernetes/pkg/util/sets" -) - -func init() { - if err := announced.NewGroupMetaFactory( - &announced.GroupMetaFactoryArgs{ - GroupName: imagepolicy.GroupName, - VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version}, - ImportPrefix: "k8s.io/kubernetes/pkg/apis/imagepolicy", - RootScopedKinds: sets.NewString("ImageReview"), - AddInternalObjectsToScheme: imagepolicy.AddToScheme, - }, - announced.VersionToSchemeFunc{ - v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme, - }, - ).Announce().RegisterAndEnable(); err != nil { - panic(err) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/register.go b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/register.go deleted file mode 100644 index 89e961ded6..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/register.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package imagepolicy - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// GroupName is the group name use in this package -const GroupName = "imagepolicy.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &api.ListOptions{}, - &api.DeleteOptions{}, - &api.ExportOptions{}, - - &ImageReview{}, - ) - // versioned.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/types.generated.go deleted file mode 100644 index 450f19f4d7..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/types.generated.go +++ /dev/null @@ -1,2194 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package imagepolicy - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg2_api "k8s.io/kubernetes/pkg/api" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg3_types "k8s.io/kubernetes/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_api.ObjectMeta - var v1 pkg1_unversioned.TypeMeta - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 - } -} - -func (x *ImageReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [19]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = x.Name != "" - yyq2[3] = x.GenerateName != "" - yyq2[4] = x.Namespace != "" - yyq2[5] = x.SelfLink != "" - yyq2[6] = x.UID != "" - yyq2[7] = x.ResourceVersion != "" - yyq2[8] = x.Generation != 0 - yyq2[9] = true - yyq2[10] = x.ObjectMeta.DeletionTimestamp != nil && x.DeletionTimestamp != nil - yyq2[11] = x.ObjectMeta.DeletionGracePeriodSeconds != nil && x.DeletionGracePeriodSeconds != nil - yyq2[12] = len(x.Labels) != 0 - yyq2[13] = len(x.Annotations) != 0 - yyq2[14] = len(x.OwnerReferences) != 0 - yyq2[15] = len(x.Finalizers) != 0 - yyq2[16] = x.ClusterName != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(19) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("generateName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selfLink")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeInt(int64(x.Generation)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("generation")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeInt(int64(x.Generation)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - yy31 := &x.CreationTimestamp - yym32 := z.EncBinary() - _ = yym32 - if false { - } else if z.HasExtensions() && z.EncExt(yy31) { - } else if yym32 { - z.EncBinaryMarshal(yy31) - } else if !yym32 && z.IsJSONHandle() { - z.EncJSONMarshal(yy31) - } else { - z.EncFallback(yy31) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("creationTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy33 := &x.CreationTimestamp - yym34 := z.EncBinary() - _ = yym34 - if false { - } else if z.HasExtensions() && z.EncExt(yy33) { - } else if yym34 { - z.EncBinaryMarshal(yy33) - } else if !yym34 && z.IsJSONHandle() { - z.EncJSONMarshal(yy33) - } else { - z.EncFallback(yy33) - } - } - } - var yyn35 bool - if x.ObjectMeta.DeletionTimestamp == nil { - yyn35 = true - goto LABEL35 - } - LABEL35: - if yyr2 || yy2arr2 { - if yyn35 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.DeletionTimestamp == nil { - r.EncodeNil() - } else { - yym36 := z.EncBinary() - _ = yym36 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { - } else if yym36 { - z.EncBinaryMarshal(x.DeletionTimestamp) - } else if !yym36 && z.IsJSONHandle() { - z.EncJSONMarshal(x.DeletionTimestamp) - } else { - z.EncFallback(x.DeletionTimestamp) - } - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletionTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn35 { - r.EncodeNil() - } else { - if x.DeletionTimestamp == nil { - r.EncodeNil() - } else { - yym37 := z.EncBinary() - _ = yym37 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { - } else if yym37 { - z.EncBinaryMarshal(x.DeletionTimestamp) - } else if !yym37 && z.IsJSONHandle() { - z.EncJSONMarshal(x.DeletionTimestamp) - } else { - z.EncFallback(x.DeletionTimestamp) - } - } - } - } - } - var yyn38 bool - if x.ObjectMeta.DeletionGracePeriodSeconds == nil { - yyn38 = true - goto LABEL38 - } - LABEL38: - if yyr2 || yy2arr2 { - if yyn38 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.DeletionGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy39 := *x.DeletionGracePeriodSeconds - yym40 := z.EncBinary() - _ = yym40 - if false { - } else { - r.EncodeInt(int64(yy39)) - } - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletionGracePeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn38 { - r.EncodeNil() - } else { - if x.DeletionGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy41 := *x.DeletionGracePeriodSeconds - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - r.EncodeInt(int64(yy41)) - } - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.Labels == nil { - r.EncodeNil() - } else { - yym44 := z.EncBinary() - _ = yym44 - if false { - } else { - z.F.EncMapStringStringV(x.Labels, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("labels")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Labels == nil { - r.EncodeNil() - } else { - yym45 := z.EncBinary() - _ = yym45 - if false { - } else { - z.F.EncMapStringStringV(x.Labels, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.Annotations == nil { - r.EncodeNil() - } else { - yym47 := z.EncBinary() - _ = yym47 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("annotations")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Annotations == nil { - r.EncodeNil() - } else { - yym48 := z.EncBinary() - _ = yym48 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - if x.OwnerReferences == nil { - r.EncodeNil() - } else { - yym50 := z.EncBinary() - _ = yym50 - if false { - } else { - h.encSliceapi_OwnerReference(([]pkg2_api.OwnerReference)(x.OwnerReferences), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ownerReferences")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.OwnerReferences == nil { - r.EncodeNil() - } else { - yym51 := z.EncBinary() - _ = yym51 - if false { - } else { - h.encSliceapi_OwnerReference(([]pkg2_api.OwnerReference)(x.OwnerReferences), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[15] { - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym53 := z.EncBinary() - _ = yym53 - if false { - } else { - z.F.EncSliceStringV(x.Finalizers, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("finalizers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym54 := z.EncBinary() - _ = yym54 - if false { - } else { - z.F.EncSliceStringV(x.Finalizers, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[16] { - yym56 := z.EncBinary() - _ = yym56 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym57 := z.EncBinary() - _ = yym57 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy59 := &x.Spec - yy59.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy60 := &x.Spec - yy60.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy62 := &x.Status - yy62.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy63 := &x.Status - yy63.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ImageReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym64 := z.DecBinary() - _ = yym64 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct65 := r.ContainerType() - if yyct65 == codecSelferValueTypeMap1234 { - yyl65 := r.ReadMapStart() - if yyl65 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl65, d) - } - } else if yyct65 == codecSelferValueTypeArray1234 { - yyl65 := r.ReadArrayStart() - if yyl65 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl65, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ImageReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys66Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys66Slc - var yyhl66 bool = l >= 0 - for yyj66 := 0; ; yyj66++ { - if yyhl66 { - if yyj66 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys66Slc = r.DecodeBytes(yys66Slc, true, true) - yys66 := string(yys66Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys66 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "generateName": - if r.TryDecodeAsNil() { - x.GenerateName = "" - } else { - x.GenerateName = string(r.DecodeString()) - } - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - case "selfLink": - if r.TryDecodeAsNil() { - x.SelfLink = "" - } else { - x.SelfLink = string(r.DecodeString()) - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg3_types.UID(r.DecodeString()) - } - case "resourceVersion": - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - case "generation": - if r.TryDecodeAsNil() { - x.Generation = 0 - } else { - x.Generation = int64(r.DecodeInt(64)) - } - case "creationTimestamp": - if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg1_unversioned.Time{} - } else { - yyv76 := &x.CreationTimestamp - yym77 := z.DecBinary() - _ = yym77 - if false { - } else if z.HasExtensions() && z.DecExt(yyv76) { - } else if yym77 { - z.DecBinaryUnmarshal(yyv76) - } else if !yym77 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv76) - } else { - z.DecFallback(yyv76, false) - } - } - case "deletionTimestamp": - if x.ObjectMeta.DeletionTimestamp == nil { - x.ObjectMeta.DeletionTimestamp = new(pkg1_unversioned.Time) - } - if r.TryDecodeAsNil() { - if x.DeletionTimestamp != nil { - x.DeletionTimestamp = nil - } - } else { - if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg1_unversioned.Time) - } - yym79 := z.DecBinary() - _ = yym79 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym79 { - z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym79 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.DeletionTimestamp) - } else { - z.DecFallback(x.DeletionTimestamp, false) - } - } - case "deletionGracePeriodSeconds": - if x.ObjectMeta.DeletionGracePeriodSeconds == nil { - x.ObjectMeta.DeletionGracePeriodSeconds = new(int64) - } - if r.TryDecodeAsNil() { - if x.DeletionGracePeriodSeconds != nil { - x.DeletionGracePeriodSeconds = nil - } - } else { - if x.DeletionGracePeriodSeconds == nil { - x.DeletionGracePeriodSeconds = new(int64) - } - yym81 := z.DecBinary() - _ = yym81 - if false { - } else { - *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - case "labels": - if r.TryDecodeAsNil() { - x.Labels = nil - } else { - yyv82 := &x.Labels - yym83 := z.DecBinary() - _ = yym83 - if false { - } else { - z.F.DecMapStringStringX(yyv82, false, d) - } - } - case "annotations": - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv84 := &x.Annotations - yym85 := z.DecBinary() - _ = yym85 - if false { - } else { - z.F.DecMapStringStringX(yyv84, false, d) - } - } - case "ownerReferences": - if r.TryDecodeAsNil() { - x.OwnerReferences = nil - } else { - yyv86 := &x.OwnerReferences - yym87 := z.DecBinary() - _ = yym87 - if false { - } else { - h.decSliceapi_OwnerReference((*[]pkg2_api.OwnerReference)(yyv86), d) - } - } - case "finalizers": - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv88 := &x.Finalizers - yym89 := z.DecBinary() - _ = yym89 - if false { - } else { - z.F.DecSliceStringX(yyv88, false, d) - } - } - case "clusterName": - if r.TryDecodeAsNil() { - x.ClusterName = "" - } else { - x.ClusterName = string(r.DecodeString()) - } - case "Spec": - if r.TryDecodeAsNil() { - x.Spec = ImageReviewSpec{} - } else { - yyv91 := &x.Spec - yyv91.CodecDecodeSelf(d) - } - case "Status": - if r.TryDecodeAsNil() { - x.Status = ImageReviewStatus{} - } else { - yyv92 := &x.Status - yyv92.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys66) - } // end switch yys66 - } // end for yyj66 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ImageReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj93 int - var yyb93 bool - var yyhl93 bool = l >= 0 - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.GenerateName = "" - } else { - x.GenerateName = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SelfLink = "" - } else { - x.SelfLink = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg3_types.UID(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Generation = 0 - } else { - x.Generation = int64(r.DecodeInt(64)) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg1_unversioned.Time{} - } else { - yyv103 := &x.CreationTimestamp - yym104 := z.DecBinary() - _ = yym104 - if false { - } else if z.HasExtensions() && z.DecExt(yyv103) { - } else if yym104 { - z.DecBinaryUnmarshal(yyv103) - } else if !yym104 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv103) - } else { - z.DecFallback(yyv103, false) - } - } - if x.ObjectMeta.DeletionTimestamp == nil { - x.ObjectMeta.DeletionTimestamp = new(pkg1_unversioned.Time) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeletionTimestamp != nil { - x.DeletionTimestamp = nil - } - } else { - if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg1_unversioned.Time) - } - yym106 := z.DecBinary() - _ = yym106 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym106 { - z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym106 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.DeletionTimestamp) - } else { - z.DecFallback(x.DeletionTimestamp, false) - } - } - if x.ObjectMeta.DeletionGracePeriodSeconds == nil { - x.ObjectMeta.DeletionGracePeriodSeconds = new(int64) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeletionGracePeriodSeconds != nil { - x.DeletionGracePeriodSeconds = nil - } - } else { - if x.DeletionGracePeriodSeconds == nil { - x.DeletionGracePeriodSeconds = new(int64) - } - yym108 := z.DecBinary() - _ = yym108 - if false { - } else { - *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Labels = nil - } else { - yyv109 := &x.Labels - yym110 := z.DecBinary() - _ = yym110 - if false { - } else { - z.F.DecMapStringStringX(yyv109, false, d) - } - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv111 := &x.Annotations - yym112 := z.DecBinary() - _ = yym112 - if false { - } else { - z.F.DecMapStringStringX(yyv111, false, d) - } - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OwnerReferences = nil - } else { - yyv113 := &x.OwnerReferences - yym114 := z.DecBinary() - _ = yym114 - if false { - } else { - h.decSliceapi_OwnerReference((*[]pkg2_api.OwnerReference)(yyv113), d) - } - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv115 := &x.Finalizers - yym116 := z.DecBinary() - _ = yym116 - if false { - } else { - z.F.DecSliceStringX(yyv115, false, d) - } - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterName = "" - } else { - x.ClusterName = string(r.DecodeString()) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ImageReviewSpec{} - } else { - yyv118 := &x.Spec - yyv118.CodecDecodeSelf(d) - } - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ImageReviewStatus{} - } else { - yyv119 := &x.Status - yyv119.CodecDecodeSelf(d) - } - for { - yyj93++ - if yyhl93 { - yyb93 = yyj93 > l - } else { - yyb93 = r.CheckBreak() - } - if yyb93 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj93-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ImageReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym120 := z.EncBinary() - _ = yym120 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep121 := !z.EncBinary() - yy2arr121 := z.EncBasicHandle().StructToArray - var yyq121 [3]bool - _, _, _ = yysep121, yyq121, yy2arr121 - const yyr121 bool = false - var yynn121 int - if yyr121 || yy2arr121 { - r.EncodeArrayStart(3) - } else { - yynn121 = 3 - for _, b := range yyq121 { - if b { - yynn121++ - } - } - r.EncodeMapStart(yynn121) - yynn121 = 0 - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Containers == nil { - r.EncodeNil() - } else { - yym123 := z.EncBinary() - _ = yym123 - if false { - } else { - h.encSliceImageReviewContainerSpec(([]ImageReviewContainerSpec)(x.Containers), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Containers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Containers == nil { - r.EncodeNil() - } else { - yym124 := z.EncBinary() - _ = yym124 - if false { - } else { - h.encSliceImageReviewContainerSpec(([]ImageReviewContainerSpec)(x.Containers), e) - } - } - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Annotations == nil { - r.EncodeNil() - } else { - yym126 := z.EncBinary() - _ = yym126 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Annotations")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Annotations == nil { - r.EncodeNil() - } else { - yym127 := z.EncBinary() - _ = yym127 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym129 := z.EncBinary() - _ = yym129 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym130 := z.EncBinary() - _ = yym130 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - if yyr121 || yy2arr121 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ImageReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym131 := z.DecBinary() - _ = yym131 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct132 := r.ContainerType() - if yyct132 == codecSelferValueTypeMap1234 { - yyl132 := r.ReadMapStart() - if yyl132 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl132, d) - } - } else if yyct132 == codecSelferValueTypeArray1234 { - yyl132 := r.ReadArrayStart() - if yyl132 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl132, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ImageReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys133Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys133Slc - var yyhl133 bool = l >= 0 - for yyj133 := 0; ; yyj133++ { - if yyhl133 { - if yyj133 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys133Slc = r.DecodeBytes(yys133Slc, true, true) - yys133 := string(yys133Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys133 { - case "Containers": - if r.TryDecodeAsNil() { - x.Containers = nil - } else { - yyv134 := &x.Containers - yym135 := z.DecBinary() - _ = yym135 - if false { - } else { - h.decSliceImageReviewContainerSpec((*[]ImageReviewContainerSpec)(yyv134), d) - } - } - case "Annotations": - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv136 := &x.Annotations - yym137 := z.DecBinary() - _ = yym137 - if false { - } else { - z.F.DecMapStringStringX(yyv136, false, d) - } - } - case "Namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys133) - } // end switch yys133 - } // end for yyj133 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ImageReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj139 int - var yyb139 bool - var yyhl139 bool = l >= 0 - yyj139++ - if yyhl139 { - yyb139 = yyj139 > l - } else { - yyb139 = r.CheckBreak() - } - if yyb139 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Containers = nil - } else { - yyv140 := &x.Containers - yym141 := z.DecBinary() - _ = yym141 - if false { - } else { - h.decSliceImageReviewContainerSpec((*[]ImageReviewContainerSpec)(yyv140), d) - } - } - yyj139++ - if yyhl139 { - yyb139 = yyj139 > l - } else { - yyb139 = r.CheckBreak() - } - if yyb139 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv142 := &x.Annotations - yym143 := z.DecBinary() - _ = yym143 - if false { - } else { - z.F.DecMapStringStringX(yyv142, false, d) - } - } - yyj139++ - if yyhl139 { - yyb139 = yyj139 > l - } else { - yyb139 = r.CheckBreak() - } - if yyb139 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - for { - yyj139++ - if yyhl139 { - yyb139 = yyj139 > l - } else { - yyb139 = r.CheckBreak() - } - if yyb139 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj139-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ImageReviewContainerSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym145 := z.EncBinary() - _ = yym145 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep146 := !z.EncBinary() - yy2arr146 := z.EncBasicHandle().StructToArray - var yyq146 [1]bool - _, _, _ = yysep146, yyq146, yy2arr146 - const yyr146 bool = false - var yynn146 int - if yyr146 || yy2arr146 { - r.EncodeArrayStart(1) - } else { - yynn146 = 1 - for _, b := range yyq146 { - if b { - yynn146++ - } - } - r.EncodeMapStart(yynn146) - yynn146 = 0 - } - if yyr146 || yy2arr146 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym148 := z.EncBinary() - _ = yym148 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Image")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym149 := z.EncBinary() - _ = yym149 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } - if yyr146 || yy2arr146 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ImageReviewContainerSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym150 := z.DecBinary() - _ = yym150 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct151 := r.ContainerType() - if yyct151 == codecSelferValueTypeMap1234 { - yyl151 := r.ReadMapStart() - if yyl151 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl151, d) - } - } else if yyct151 == codecSelferValueTypeArray1234 { - yyl151 := r.ReadArrayStart() - if yyl151 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl151, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ImageReviewContainerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys152Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys152Slc - var yyhl152 bool = l >= 0 - for yyj152 := 0; ; yyj152++ { - if yyhl152 { - if yyj152 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys152Slc = r.DecodeBytes(yys152Slc, true, true) - yys152 := string(yys152Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys152 { - case "Image": - if r.TryDecodeAsNil() { - x.Image = "" - } else { - x.Image = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys152) - } // end switch yys152 - } // end for yyj152 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ImageReviewContainerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj154 int - var yyb154 bool - var yyhl154 bool = l >= 0 - yyj154++ - if yyhl154 { - yyb154 = yyj154 > l - } else { - yyb154 = r.CheckBreak() - } - if yyb154 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Image = "" - } else { - x.Image = string(r.DecodeString()) - } - for { - yyj154++ - if yyhl154 { - yyb154 = yyj154 > l - } else { - yyb154 = r.CheckBreak() - } - if yyb154 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj154-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ImageReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym156 := z.EncBinary() - _ = yym156 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep157 := !z.EncBinary() - yy2arr157 := z.EncBasicHandle().StructToArray - var yyq157 [2]bool - _, _, _ = yysep157, yyq157, yy2arr157 - const yyr157 bool = false - var yynn157 int - if yyr157 || yy2arr157 { - r.EncodeArrayStart(2) - } else { - yynn157 = 2 - for _, b := range yyq157 { - if b { - yynn157++ - } - } - r.EncodeMapStart(yynn157) - yynn157 = 0 - } - if yyr157 || yy2arr157 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym159 := z.EncBinary() - _ = yym159 - if false { - } else { - r.EncodeBool(bool(x.Allowed)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Allowed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym160 := z.EncBinary() - _ = yym160 - if false { - } else { - r.EncodeBool(bool(x.Allowed)) - } - } - if yyr157 || yy2arr157 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym162 := z.EncBinary() - _ = yym162 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym163 := z.EncBinary() - _ = yym163 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - if yyr157 || yy2arr157 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ImageReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym164 := z.DecBinary() - _ = yym164 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct165 := r.ContainerType() - if yyct165 == codecSelferValueTypeMap1234 { - yyl165 := r.ReadMapStart() - if yyl165 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl165, d) - } - } else if yyct165 == codecSelferValueTypeArray1234 { - yyl165 := r.ReadArrayStart() - if yyl165 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl165, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ImageReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys166Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys166Slc - var yyhl166 bool = l >= 0 - for yyj166 := 0; ; yyj166++ { - if yyhl166 { - if yyj166 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys166Slc = r.DecodeBytes(yys166Slc, true, true) - yys166 := string(yys166Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys166 { - case "Allowed": - if r.TryDecodeAsNil() { - x.Allowed = false - } else { - x.Allowed = bool(r.DecodeBool()) - } - case "Reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys166) - } // end switch yys166 - } // end for yyj166 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ImageReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj169 int - var yyb169 bool - var yyhl169 bool = l >= 0 - yyj169++ - if yyhl169 { - yyb169 = yyj169 > l - } else { - yyb169 = r.CheckBreak() - } - if yyb169 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Allowed = false - } else { - x.Allowed = bool(r.DecodeBool()) - } - yyj169++ - if yyhl169 { - yyb169 = yyj169 > l - } else { - yyb169 = r.CheckBreak() - } - if yyb169 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - for { - yyj169++ - if yyhl169 { - yyb169 = yyj169 > l - } else { - yyb169 = r.CheckBreak() - } - if yyb169 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj169-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceapi_OwnerReference(v []pkg2_api.OwnerReference, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv172 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy173 := &yyv172 - yy173.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceapi_OwnerReference(v *[]pkg2_api.OwnerReference, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv174 := *v - yyh174, yyl174 := z.DecSliceHelperStart() - var yyc174 bool - if yyl174 == 0 { - if yyv174 == nil { - yyv174 = []pkg2_api.OwnerReference{} - yyc174 = true - } else if len(yyv174) != 0 { - yyv174 = yyv174[:0] - yyc174 = true - } - } else if yyl174 > 0 { - var yyrr174, yyrl174 int - var yyrt174 bool - if yyl174 > cap(yyv174) { - - yyrg174 := len(yyv174) > 0 - yyv2174 := yyv174 - yyrl174, yyrt174 = z.DecInferLen(yyl174, z.DecBasicHandle().MaxInitLen, 72) - if yyrt174 { - if yyrl174 <= cap(yyv174) { - yyv174 = yyv174[:yyrl174] - } else { - yyv174 = make([]pkg2_api.OwnerReference, yyrl174) - } - } else { - yyv174 = make([]pkg2_api.OwnerReference, yyrl174) - } - yyc174 = true - yyrr174 = len(yyv174) - if yyrg174 { - copy(yyv174, yyv2174) - } - } else if yyl174 != len(yyv174) { - yyv174 = yyv174[:yyl174] - yyc174 = true - } - yyj174 := 0 - for ; yyj174 < yyrr174; yyj174++ { - yyh174.ElemContainerState(yyj174) - if r.TryDecodeAsNil() { - yyv174[yyj174] = pkg2_api.OwnerReference{} - } else { - yyv175 := &yyv174[yyj174] - yyv175.CodecDecodeSelf(d) - } - - } - if yyrt174 { - for ; yyj174 < yyl174; yyj174++ { - yyv174 = append(yyv174, pkg2_api.OwnerReference{}) - yyh174.ElemContainerState(yyj174) - if r.TryDecodeAsNil() { - yyv174[yyj174] = pkg2_api.OwnerReference{} - } else { - yyv176 := &yyv174[yyj174] - yyv176.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj174 := 0 - for ; !r.CheckBreak(); yyj174++ { - - if yyj174 >= len(yyv174) { - yyv174 = append(yyv174, pkg2_api.OwnerReference{}) // var yyz174 pkg2_api.OwnerReference - yyc174 = true - } - yyh174.ElemContainerState(yyj174) - if yyj174 < len(yyv174) { - if r.TryDecodeAsNil() { - yyv174[yyj174] = pkg2_api.OwnerReference{} - } else { - yyv177 := &yyv174[yyj174] - yyv177.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj174 < len(yyv174) { - yyv174 = yyv174[:yyj174] - yyc174 = true - } else if yyj174 == 0 && yyv174 == nil { - yyv174 = []pkg2_api.OwnerReference{} - yyc174 = true - } - } - yyh174.End() - if yyc174 { - *v = yyv174 - } -} - -func (x codecSelfer1234) encSliceImageReviewContainerSpec(v []ImageReviewContainerSpec, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv178 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy179 := &yyv178 - yy179.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceImageReviewContainerSpec(v *[]ImageReviewContainerSpec, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv180 := *v - yyh180, yyl180 := z.DecSliceHelperStart() - var yyc180 bool - if yyl180 == 0 { - if yyv180 == nil { - yyv180 = []ImageReviewContainerSpec{} - yyc180 = true - } else if len(yyv180) != 0 { - yyv180 = yyv180[:0] - yyc180 = true - } - } else if yyl180 > 0 { - var yyrr180, yyrl180 int - var yyrt180 bool - if yyl180 > cap(yyv180) { - - yyrg180 := len(yyv180) > 0 - yyv2180 := yyv180 - yyrl180, yyrt180 = z.DecInferLen(yyl180, z.DecBasicHandle().MaxInitLen, 16) - if yyrt180 { - if yyrl180 <= cap(yyv180) { - yyv180 = yyv180[:yyrl180] - } else { - yyv180 = make([]ImageReviewContainerSpec, yyrl180) - } - } else { - yyv180 = make([]ImageReviewContainerSpec, yyrl180) - } - yyc180 = true - yyrr180 = len(yyv180) - if yyrg180 { - copy(yyv180, yyv2180) - } - } else if yyl180 != len(yyv180) { - yyv180 = yyv180[:yyl180] - yyc180 = true - } - yyj180 := 0 - for ; yyj180 < yyrr180; yyj180++ { - yyh180.ElemContainerState(yyj180) - if r.TryDecodeAsNil() { - yyv180[yyj180] = ImageReviewContainerSpec{} - } else { - yyv181 := &yyv180[yyj180] - yyv181.CodecDecodeSelf(d) - } - - } - if yyrt180 { - for ; yyj180 < yyl180; yyj180++ { - yyv180 = append(yyv180, ImageReviewContainerSpec{}) - yyh180.ElemContainerState(yyj180) - if r.TryDecodeAsNil() { - yyv180[yyj180] = ImageReviewContainerSpec{} - } else { - yyv182 := &yyv180[yyj180] - yyv182.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj180 := 0 - for ; !r.CheckBreak(); yyj180++ { - - if yyj180 >= len(yyv180) { - yyv180 = append(yyv180, ImageReviewContainerSpec{}) // var yyz180 ImageReviewContainerSpec - yyc180 = true - } - yyh180.ElemContainerState(yyj180) - if yyj180 < len(yyv180) { - if r.TryDecodeAsNil() { - yyv180[yyj180] = ImageReviewContainerSpec{} - } else { - yyv183 := &yyv180[yyj180] - yyv183.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj180 < len(yyv180) { - yyv180 = yyv180[:yyj180] - yyc180 = true - } else if yyj180 == 0 && yyv180 == nil { - yyv180 = []ImageReviewContainerSpec{} - yyc180 = true - } - } - yyh180.End() - if yyc180 { - *v = yyv180 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/types.go b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/types.go deleted file mode 100644 index 0be81f38b9..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/types.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package imagepolicy - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// +genclient=true -// +nonNamespaced=true -// +noMethods=true - -// ImageReview checks if the set of images in a pod are allowed. -type ImageReview struct { - unversioned.TypeMeta - api.ObjectMeta - - // Spec holds information about the pod being evaluated - Spec ImageReviewSpec - - // Status is filled in by the backend and indicates whether the pod should be allowed. - Status ImageReviewStatus -} - -// ImageReviewSpec is a description of the pod creation request. -type ImageReviewSpec struct { - // Containers is a list of a subset of the information in each container of the Pod being created. - Containers []ImageReviewContainerSpec - // Annotations is a list of key-value pairs extracted from the Pod's annotations. - // It only includes keys which match the pattern `*.image-policy.k8s.io/*`. - // It is up to each webhook backend to determine how to interpret these annotations, if at all. - Annotations map[string]string - // Namespace is the namespace the pod is being created in. - Namespace string -} - -// ImageReviewContainerSpec is a description of a container within the pod creation request. -type ImageReviewContainerSpec struct { - // This can be in the form image:tag or image@SHA:012345679abcdef. - Image string - // In future, we may add command line overrides, exec health check command lines, and so on. -} - -// ImageReviewStatus is the result of the token authentication request. -type ImageReviewStatus struct { - // Allowed indicates that all images were allowed to be run. - Allowed bool - // Reason should be empty unless Allowed is false in which case it - // may contain a short description of what is wrong. Kubernetes - // may truncate excessively long errors when displaying to the user. - Reason string -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/BUILD deleted file mode 100644 index 5bb15bc9e4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/BUILD +++ /dev/null @@ -1,37 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/apis/imagepolicy:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//vendor:github.com/gogo/protobuf/proto", - "//vendor:github.com/gogo/protobuf/sortkeys", - "//vendor:github.com/ugorji/go/codec", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/doc.go deleted file mode 100644 index cc8a147b4a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/imagepolicy -// +k8s:openapi-gen=true -// +k8s:defaulter-gen=TypeMeta - -// +groupName=imagepolicy.k8s.io -package v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/generated.pb.go deleted file mode 100644 index a7a1437a41..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/generated.pb.go +++ /dev/null @@ -1,1059 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/generated.proto -// DO NOT EDIT! - -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/generated.proto - - It has these top-level messages: - ImageReview - ImageReviewContainerSpec - ImageReviewSpec - ImageReviewStatus -*/ -package v1alpha1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 - -func (m *ImageReview) Reset() { *m = ImageReview{} } -func (*ImageReview) ProtoMessage() {} -func (*ImageReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *ImageReviewContainerSpec) Reset() { *m = ImageReviewContainerSpec{} } -func (*ImageReviewContainerSpec) ProtoMessage() {} -func (*ImageReviewContainerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} -} - -func (m *ImageReviewSpec) Reset() { *m = ImageReviewSpec{} } -func (*ImageReviewSpec) ProtoMessage() {} -func (*ImageReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *ImageReviewStatus) Reset() { *m = ImageReviewStatus{} } -func (*ImageReviewStatus) ProtoMessage() {} -func (*ImageReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func init() { - proto.RegisterType((*ImageReview)(nil), "k8s.io.kubernetes.pkg.apis.imagepolicy.v1alpha1.ImageReview") - proto.RegisterType((*ImageReviewContainerSpec)(nil), "k8s.io.kubernetes.pkg.apis.imagepolicy.v1alpha1.ImageReviewContainerSpec") - proto.RegisterType((*ImageReviewSpec)(nil), "k8s.io.kubernetes.pkg.apis.imagepolicy.v1alpha1.ImageReviewSpec") - proto.RegisterType((*ImageReviewStatus)(nil), "k8s.io.kubernetes.pkg.apis.imagepolicy.v1alpha1.ImageReviewStatus") -} -func (m *ImageReview) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ImageReview) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *ImageReviewContainerSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ImageReviewContainerSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Image))) - i += copy(data[i:], m.Image) - return i, nil -} - -func (m *ImageReviewSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ImageReviewSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Containers) > 0 { - for _, msg := range m.Containers { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Annotations) > 0 { - for k := range m.Annotations { - data[i] = 0x12 - i++ - v := m.Annotations[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) - i += copy(data[i:], m.Namespace) - return i, nil -} - -func (m *ImageReviewStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ImageReviewStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - if m.Allowed { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *ImageReview) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ImageReviewContainerSpec) Size() (n int) { - var l int - _ = l - l = len(m.Image) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ImageReviewSpec) Size() (n int) { - var l int - _ = l - if len(m.Containers) > 0 { - for _, e := range m.Containers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Annotations) > 0 { - for k, v := range m.Annotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ImageReviewStatus) Size() (n int) { - var l int - _ = l - n += 2 - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ImageReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageReviewSpec", "ImageReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageReviewStatus", "ImageReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ImageReviewContainerSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageReviewContainerSpec{`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `}`, - }, "") - return s -} -func (this *ImageReviewSpec) String() string { - if this == nil { - return "nil" - } - keysForAnnotations := make([]string, 0, len(this.Annotations)) - for k := range this.Annotations { - keysForAnnotations = append(keysForAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - mapStringForAnnotations := "map[string]string{" - for _, k := range keysForAnnotations { - mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) - } - mapStringForAnnotations += "}" - s := strings.Join([]string{`&ImageReviewSpec{`, - `Containers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Containers), "ImageReviewContainerSpec", "ImageReviewContainerSpec", 1), `&`, ``, 1) + `,`, - `Annotations:` + mapStringForAnnotations + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `}`, - }, "") - return s -} -func (this *ImageReviewStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageReviewStatus{`, - `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ImageReview) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageReviewContainerSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageReviewContainerSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageReviewContainerSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageReviewSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageReviewSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Containers = append(m.Containers, ImageReviewContainerSpec{}) - if err := m.Containers[len(m.Containers)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.Annotations == nil { - m.Annotations = make(map[string]string) - } - m.Annotations[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageReviewStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageReviewStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Allowed = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -var fileDescriptorGenerated = []byte{ - // 573 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x92, 0x3f, 0x6f, 0xd3, 0x4e, - 0x18, 0xc7, 0xe3, 0xa4, 0xff, 0x72, 0xf9, 0xfd, 0x68, 0x7b, 0x30, 0x58, 0x19, 0xdc, 0x2a, 0x48, - 0xa8, 0x20, 0x7a, 0xa7, 0x54, 0x42, 0xaa, 0x18, 0x28, 0x35, 0x62, 0xe8, 0x00, 0x88, 0x63, 0x41, - 0x6c, 0x17, 0xe7, 0xa9, 0x7b, 0x8d, 0x73, 0x67, 0xf9, 0xce, 0xae, 0x3a, 0x20, 0x31, 0x32, 0x30, - 0xf0, 0x6e, 0x78, 0x0b, 0x1d, 0x3b, 0x32, 0x55, 0x24, 0xbc, 0x11, 0x94, 0xb3, 0x53, 0x9b, 0xa4, - 0x19, 0x50, 0x36, 0x3f, 0xff, 0x3e, 0xdf, 0xef, 0x3d, 0x8f, 0xd1, 0xd1, 0xe0, 0x50, 0x13, 0xa1, - 0xe8, 0x20, 0xed, 0x41, 0x22, 0xc1, 0x80, 0xa6, 0xf1, 0x20, 0xa4, 0x3c, 0x16, 0x9a, 0x8a, 0x21, - 0x0f, 0x21, 0x56, 0x91, 0x08, 0x2e, 0x69, 0xd6, 0xe5, 0x51, 0x7c, 0xc6, 0xbb, 0x34, 0x04, 0x09, - 0x09, 0x37, 0xd0, 0x27, 0x71, 0xa2, 0x8c, 0xc2, 0x34, 0x07, 0x90, 0x12, 0x40, 0xe2, 0x41, 0x48, - 0x26, 0x00, 0x52, 0x01, 0x90, 0x29, 0xa0, 0xbd, 0x1f, 0x0a, 0x73, 0x96, 0xf6, 0x48, 0xa0, 0x86, - 0x34, 0x54, 0xa1, 0xa2, 0x96, 0xd3, 0x4b, 0x4f, 0x6d, 0x64, 0x03, 0xfb, 0x95, 0xf3, 0xdb, 0x07, - 0x0b, 0x0d, 0xd2, 0x04, 0xb4, 0x4a, 0x93, 0x00, 0x66, 0x3d, 0xb5, 0x9f, 0x2d, 0x9e, 0x49, 0x65, - 0x06, 0x89, 0x16, 0x4a, 0x42, 0x7f, 0x6e, 0xec, 0xe9, 0xe2, 0xb1, 0x6c, 0xee, 0xe1, 0xed, 0xfd, - 0xbb, 0xbb, 0x93, 0x54, 0x1a, 0x31, 0x9c, 0xf7, 0xd4, 0xbd, 0xbb, 0x3d, 0x35, 0x22, 0xa2, 0x42, - 0x1a, 0x6d, 0x92, 0xd9, 0x91, 0xce, 0x8f, 0x3a, 0x6a, 0x9d, 0x4c, 0x56, 0xc8, 0x20, 0x13, 0x70, - 0x81, 0x3f, 0xa2, 0x8d, 0x21, 0x18, 0xde, 0xe7, 0x86, 0xbb, 0xce, 0xae, 0xb3, 0xd7, 0x3a, 0xd8, - 0x23, 0x0b, 0xb7, 0x4f, 0xb2, 0x2e, 0x79, 0xd7, 0x3b, 0x87, 0xc0, 0xbc, 0x01, 0xc3, 0x7d, 0x7c, - 0x75, 0xb3, 0x53, 0x1b, 0xdf, 0xec, 0xa0, 0x32, 0xc7, 0x6e, 0x69, 0xb8, 0x87, 0x56, 0x74, 0x0c, - 0x81, 0x5b, 0xb7, 0xd4, 0x97, 0xe4, 0x1f, 0x6f, 0x4a, 0x2a, 0x2e, 0x3f, 0xc4, 0x10, 0xf8, 0xff, - 0x15, 0x6a, 0x2b, 0x93, 0x88, 0x59, 0x36, 0x3e, 0x47, 0x6b, 0xda, 0x70, 0x93, 0x6a, 0xb7, 0x61, - 0x55, 0xfc, 0xa5, 0x54, 0x2c, 0xc9, 0xbf, 0x57, 0xe8, 0xac, 0xe5, 0x31, 0x2b, 0x14, 0x3a, 0x47, - 0xc8, 0xad, 0x34, 0xbf, 0x52, 0xd2, 0x70, 0x21, 0x21, 0x99, 0xb8, 0xc1, 0x0f, 0xd1, 0xaa, 0xa5, - 0xdb, 0x15, 0x36, 0xfd, 0xff, 0x0b, 0xc4, 0x6a, 0x3e, 0x90, 0xd7, 0x3a, 0xdf, 0x1a, 0x68, 0x73, - 0xe6, 0x51, 0xf8, 0x33, 0x42, 0xc1, 0x94, 0xa4, 0x5d, 0x67, 0xb7, 0xb1, 0xd7, 0x3a, 0x38, 0x59, - 0xe6, 0x11, 0x7f, 0xf9, 0x2a, 0x2f, 0x74, 0x9b, 0xd6, 0xac, 0x22, 0x88, 0xbf, 0x3a, 0xa8, 0xc5, - 0xa5, 0x54, 0x86, 0x1b, 0xa1, 0xa4, 0x76, 0xeb, 0xd6, 0xc0, 0xfb, 0x65, 0x6f, 0x45, 0x8e, 0x4b, - 0xe6, 0x6b, 0x69, 0x92, 0x4b, 0xff, 0x7e, 0x61, 0xa4, 0x55, 0xa9, 0xb0, 0xaa, 0x34, 0xa6, 0xa8, - 0x29, 0xf9, 0x10, 0x74, 0xcc, 0x03, 0xb0, 0xd7, 0x6c, 0xfa, 0xdb, 0xc5, 0x50, 0xf3, 0xed, 0xb4, - 0xc0, 0xca, 0x9e, 0xf6, 0x0b, 0xb4, 0x35, 0x2b, 0x83, 0xb7, 0x50, 0x63, 0x00, 0x97, 0xf9, 0x15, - 0xd8, 0xe4, 0x13, 0x3f, 0x40, 0xab, 0x19, 0x8f, 0x52, 0xb0, 0xbf, 0x61, 0x93, 0xe5, 0xc1, 0xf3, - 0xfa, 0xa1, 0xd3, 0x39, 0x45, 0xdb, 0x73, 0xc7, 0xc7, 0x8f, 0xd1, 0x3a, 0x8f, 0x22, 0x75, 0x01, - 0x7d, 0x0b, 0xd9, 0xf0, 0x37, 0x0b, 0x0f, 0xeb, 0xc7, 0x79, 0x9a, 0x4d, 0xeb, 0xf8, 0x11, 0x5a, - 0x4b, 0x80, 0x6b, 0x25, 0x73, 0x74, 0xf9, 0xdf, 0x30, 0x9b, 0x65, 0x45, 0xd5, 0x7f, 0x72, 0x35, - 0xf2, 0x6a, 0xd7, 0x23, 0xaf, 0xf6, 0x73, 0xe4, 0xd5, 0xbe, 0x8c, 0x3d, 0xe7, 0x6a, 0xec, 0x39, - 0xd7, 0x63, 0xcf, 0xf9, 0x35, 0xf6, 0x9c, 0xef, 0xbf, 0xbd, 0xda, 0xa7, 0x8d, 0xe9, 0x1e, 0xff, - 0x04, 0x00, 0x00, 0xff, 0xff, 0x5b, 0xde, 0x19, 0x74, 0x3a, 0x05, 0x00, 0x00, -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/generated.proto deleted file mode 100644 index 7b26db9353..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/generated.proto +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.imagepolicy.v1alpha1; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// ImageReview checks if the set of images in a pod are allowed. -message ImageReview { - // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec holds information about the pod being evaluated - optional ImageReviewSpec spec = 2; - - // Status is filled in by the backend and indicates whether the pod should be allowed. - // +optional - optional ImageReviewStatus status = 3; -} - -// ImageReviewContainerSpec is a description of a container within the pod creation request. -message ImageReviewContainerSpec { - // This can be in the form image:tag or image@SHA:012345679abcdef. - // +optional - optional string image = 1; -} - -// ImageReviewSpec is a description of the pod creation request. -message ImageReviewSpec { - // Containers is a list of a subset of the information in each container of the Pod being created. - // +optional - repeated ImageReviewContainerSpec containers = 1; - - // Annotations is a list of key-value pairs extracted from the Pod's annotations. - // It only includes keys which match the pattern `*.image-policy.k8s.io/*`. - // It is up to each webhook backend to determine how to interpret these annotations, if at all. - // +optional - map annotations = 2; - - // Namespace is the namespace the pod is being created in. - // +optional - optional string namespace = 3; -} - -// ImageReviewStatus is the result of the token authentication request. -message ImageReviewStatus { - // Allowed indicates that all images were allowed to be run. - optional bool allowed = 1; - - // Reason should be empty unless Allowed is false in which case it - // may contain a short description of what is wrong. Kubernetes - // may truncate excessively long errors when displaying to the user. - // +optional - optional string reason = 2; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/register.go deleted file mode 100644 index 88848b8ac8..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/register.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" -) - -// GroupName is the group name for this API. -const GroupName = "imagepolicy.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &v1.ListOptions{}, - &v1.DeleteOptions{}, - &v1.ExportOptions{}, - - &ImageReview{}, - ) - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/types.generated.go deleted file mode 100644 index 1363b20f0a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/types.generated.go +++ /dev/null @@ -1,1208 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1alpha1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg3_types "k8s.io/kubernetes/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_unversioned.TypeMeta - var v1 pkg2_v1.ObjectMeta - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 - } -} - -func (x *ImageReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yy10.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.ObjectMeta - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy13 := &x.Spec - yy13.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.Spec - yy14.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Status - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ImageReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct19 := r.ContainerType() - if yyct19 == codecSelferValueTypeMap1234 { - yyl19 := r.ReadMapStart() - if yyl19 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl19, d) - } - } else if yyct19 == codecSelferValueTypeArray1234 { - yyl19 := r.ReadArrayStart() - if yyl19 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl19, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ImageReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys20Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys20Slc - var yyhl20 bool = l >= 0 - for yyj20 := 0; ; yyj20++ { - if yyhl20 { - if yyj20 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys20Slc = r.DecodeBytes(yys20Slc, true, true) - yys20 := string(yys20Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys20 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv23 := &x.ObjectMeta - yyv23.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ImageReviewSpec{} - } else { - yyv24 := &x.Spec - yyv24.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ImageReviewStatus{} - } else { - yyv25 := &x.Status - yyv25.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys20) - } // end switch yys20 - } // end for yyj20 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ImageReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj26 int - var yyb26 bool - var yyhl26 bool = l >= 0 - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv29 := &x.ObjectMeta - yyv29.CodecDecodeSelf(d) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ImageReviewSpec{} - } else { - yyv30 := &x.Spec - yyv30.CodecDecodeSelf(d) - } - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ImageReviewStatus{} - } else { - yyv31 := &x.Status - yyv31.CodecDecodeSelf(d) - } - for { - yyj26++ - if yyhl26 { - yyb26 = yyj26 > l - } else { - yyb26 = r.CheckBreak() - } - if yyb26 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj26-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ImageReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym32 := z.EncBinary() - _ = yym32 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep33 := !z.EncBinary() - yy2arr33 := z.EncBasicHandle().StructToArray - var yyq33 [3]bool - _, _, _ = yysep33, yyq33, yy2arr33 - const yyr33 bool = false - yyq33[0] = len(x.Containers) != 0 - yyq33[1] = len(x.Annotations) != 0 - yyq33[2] = x.Namespace != "" - var yynn33 int - if yyr33 || yy2arr33 { - r.EncodeArrayStart(3) - } else { - yynn33 = 0 - for _, b := range yyq33 { - if b { - yynn33++ - } - } - r.EncodeMapStart(yynn33) - yynn33 = 0 - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[0] { - if x.Containers == nil { - r.EncodeNil() - } else { - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - h.encSliceImageReviewContainerSpec(([]ImageReviewContainerSpec)(x.Containers), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq33[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Containers == nil { - r.EncodeNil() - } else { - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - h.encSliceImageReviewContainerSpec(([]ImageReviewContainerSpec)(x.Containers), e) - } - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[1] { - if x.Annotations == nil { - r.EncodeNil() - } else { - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq33[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("annotations")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Annotations == nil { - r.EncodeNil() - } else { - yym39 := z.EncBinary() - _ = yym39 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq33[2] { - yym41 := z.EncBinary() - _ = yym41 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq33[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr33 || yy2arr33 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ImageReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym43 := z.DecBinary() - _ = yym43 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct44 := r.ContainerType() - if yyct44 == codecSelferValueTypeMap1234 { - yyl44 := r.ReadMapStart() - if yyl44 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl44, d) - } - } else if yyct44 == codecSelferValueTypeArray1234 { - yyl44 := r.ReadArrayStart() - if yyl44 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl44, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ImageReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys45Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys45Slc - var yyhl45 bool = l >= 0 - for yyj45 := 0; ; yyj45++ { - if yyhl45 { - if yyj45 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys45Slc = r.DecodeBytes(yys45Slc, true, true) - yys45 := string(yys45Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys45 { - case "containers": - if r.TryDecodeAsNil() { - x.Containers = nil - } else { - yyv46 := &x.Containers - yym47 := z.DecBinary() - _ = yym47 - if false { - } else { - h.decSliceImageReviewContainerSpec((*[]ImageReviewContainerSpec)(yyv46), d) - } - } - case "annotations": - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv48 := &x.Annotations - yym49 := z.DecBinary() - _ = yym49 - if false { - } else { - z.F.DecMapStringStringX(yyv48, false, d) - } - } - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys45) - } // end switch yys45 - } // end for yyj45 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ImageReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj51 int - var yyb51 bool - var yyhl51 bool = l >= 0 - yyj51++ - if yyhl51 { - yyb51 = yyj51 > l - } else { - yyb51 = r.CheckBreak() - } - if yyb51 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Containers = nil - } else { - yyv52 := &x.Containers - yym53 := z.DecBinary() - _ = yym53 - if false { - } else { - h.decSliceImageReviewContainerSpec((*[]ImageReviewContainerSpec)(yyv52), d) - } - } - yyj51++ - if yyhl51 { - yyb51 = yyj51 > l - } else { - yyb51 = r.CheckBreak() - } - if yyb51 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv54 := &x.Annotations - yym55 := z.DecBinary() - _ = yym55 - if false { - } else { - z.F.DecMapStringStringX(yyv54, false, d) - } - } - yyj51++ - if yyhl51 { - yyb51 = yyj51 > l - } else { - yyb51 = r.CheckBreak() - } - if yyb51 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - for { - yyj51++ - if yyhl51 { - yyb51 = yyj51 > l - } else { - yyb51 = r.CheckBreak() - } - if yyb51 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj51-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ImageReviewContainerSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym57 := z.EncBinary() - _ = yym57 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep58 := !z.EncBinary() - yy2arr58 := z.EncBasicHandle().StructToArray - var yyq58 [1]bool - _, _, _ = yysep58, yyq58, yy2arr58 - const yyr58 bool = false - yyq58[0] = x.Image != "" - var yynn58 int - if yyr58 || yy2arr58 { - r.EncodeArrayStart(1) - } else { - yynn58 = 0 - for _, b := range yyq58 { - if b { - yynn58++ - } - } - r.EncodeMapStart(yynn58) - yynn58 = 0 - } - if yyr58 || yy2arr58 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq58[0] { - yym60 := z.EncBinary() - _ = yym60 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq58[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("image")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym61 := z.EncBinary() - _ = yym61 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } - } - if yyr58 || yy2arr58 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ImageReviewContainerSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym62 := z.DecBinary() - _ = yym62 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct63 := r.ContainerType() - if yyct63 == codecSelferValueTypeMap1234 { - yyl63 := r.ReadMapStart() - if yyl63 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl63, d) - } - } else if yyct63 == codecSelferValueTypeArray1234 { - yyl63 := r.ReadArrayStart() - if yyl63 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl63, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ImageReviewContainerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys64Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys64Slc - var yyhl64 bool = l >= 0 - for yyj64 := 0; ; yyj64++ { - if yyhl64 { - if yyj64 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys64Slc = r.DecodeBytes(yys64Slc, true, true) - yys64 := string(yys64Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys64 { - case "image": - if r.TryDecodeAsNil() { - x.Image = "" - } else { - x.Image = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys64) - } // end switch yys64 - } // end for yyj64 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ImageReviewContainerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj66 int - var yyb66 bool - var yyhl66 bool = l >= 0 - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Image = "" - } else { - x.Image = string(r.DecodeString()) - } - for { - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj66-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ImageReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym68 := z.EncBinary() - _ = yym68 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep69 := !z.EncBinary() - yy2arr69 := z.EncBasicHandle().StructToArray - var yyq69 [2]bool - _, _, _ = yysep69, yyq69, yy2arr69 - const yyr69 bool = false - yyq69[1] = x.Reason != "" - var yynn69 int - if yyr69 || yy2arr69 { - r.EncodeArrayStart(2) - } else { - yynn69 = 1 - for _, b := range yyq69 { - if b { - yynn69++ - } - } - r.EncodeMapStart(yynn69) - yynn69 = 0 - } - if yyr69 || yy2arr69 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym71 := z.EncBinary() - _ = yym71 - if false { - } else { - r.EncodeBool(bool(x.Allowed)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("allowed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym72 := z.EncBinary() - _ = yym72 - if false { - } else { - r.EncodeBool(bool(x.Allowed)) - } - } - if yyr69 || yy2arr69 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq69[1] { - yym74 := z.EncBinary() - _ = yym74 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq69[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym75 := z.EncBinary() - _ = yym75 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr69 || yy2arr69 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ImageReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym76 := z.DecBinary() - _ = yym76 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct77 := r.ContainerType() - if yyct77 == codecSelferValueTypeMap1234 { - yyl77 := r.ReadMapStart() - if yyl77 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl77, d) - } - } else if yyct77 == codecSelferValueTypeArray1234 { - yyl77 := r.ReadArrayStart() - if yyl77 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl77, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ImageReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys78Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys78Slc - var yyhl78 bool = l >= 0 - for yyj78 := 0; ; yyj78++ { - if yyhl78 { - if yyj78 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys78Slc = r.DecodeBytes(yys78Slc, true, true) - yys78 := string(yys78Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys78 { - case "allowed": - if r.TryDecodeAsNil() { - x.Allowed = false - } else { - x.Allowed = bool(r.DecodeBool()) - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys78) - } // end switch yys78 - } // end for yyj78 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ImageReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj81 int - var yyb81 bool - var yyhl81 bool = l >= 0 - yyj81++ - if yyhl81 { - yyb81 = yyj81 > l - } else { - yyb81 = r.CheckBreak() - } - if yyb81 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Allowed = false - } else { - x.Allowed = bool(r.DecodeBool()) - } - yyj81++ - if yyhl81 { - yyb81 = yyj81 > l - } else { - yyb81 = r.CheckBreak() - } - if yyb81 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - for { - yyj81++ - if yyhl81 { - yyb81 = yyj81 > l - } else { - yyb81 = r.CheckBreak() - } - if yyb81 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj81-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceImageReviewContainerSpec(v []ImageReviewContainerSpec, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv84 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy85 := &yyv84 - yy85.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceImageReviewContainerSpec(v *[]ImageReviewContainerSpec, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv86 := *v - yyh86, yyl86 := z.DecSliceHelperStart() - var yyc86 bool - if yyl86 == 0 { - if yyv86 == nil { - yyv86 = []ImageReviewContainerSpec{} - yyc86 = true - } else if len(yyv86) != 0 { - yyv86 = yyv86[:0] - yyc86 = true - } - } else if yyl86 > 0 { - var yyrr86, yyrl86 int - var yyrt86 bool - if yyl86 > cap(yyv86) { - - yyrg86 := len(yyv86) > 0 - yyv286 := yyv86 - yyrl86, yyrt86 = z.DecInferLen(yyl86, z.DecBasicHandle().MaxInitLen, 16) - if yyrt86 { - if yyrl86 <= cap(yyv86) { - yyv86 = yyv86[:yyrl86] - } else { - yyv86 = make([]ImageReviewContainerSpec, yyrl86) - } - } else { - yyv86 = make([]ImageReviewContainerSpec, yyrl86) - } - yyc86 = true - yyrr86 = len(yyv86) - if yyrg86 { - copy(yyv86, yyv286) - } - } else if yyl86 != len(yyv86) { - yyv86 = yyv86[:yyl86] - yyc86 = true - } - yyj86 := 0 - for ; yyj86 < yyrr86; yyj86++ { - yyh86.ElemContainerState(yyj86) - if r.TryDecodeAsNil() { - yyv86[yyj86] = ImageReviewContainerSpec{} - } else { - yyv87 := &yyv86[yyj86] - yyv87.CodecDecodeSelf(d) - } - - } - if yyrt86 { - for ; yyj86 < yyl86; yyj86++ { - yyv86 = append(yyv86, ImageReviewContainerSpec{}) - yyh86.ElemContainerState(yyj86) - if r.TryDecodeAsNil() { - yyv86[yyj86] = ImageReviewContainerSpec{} - } else { - yyv88 := &yyv86[yyj86] - yyv88.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj86 := 0 - for ; !r.CheckBreak(); yyj86++ { - - if yyj86 >= len(yyv86) { - yyv86 = append(yyv86, ImageReviewContainerSpec{}) // var yyz86 ImageReviewContainerSpec - yyc86 = true - } - yyh86.ElemContainerState(yyj86) - if yyj86 < len(yyv86) { - if r.TryDecodeAsNil() { - yyv86[yyj86] = ImageReviewContainerSpec{} - } else { - yyv89 := &yyv86[yyj86] - yyv89.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj86 < len(yyv86) { - yyv86 = yyv86[:yyj86] - yyc86 = true - } else if yyj86 == 0 && yyv86 == nil { - yyv86 = []ImageReviewContainerSpec{} - yyc86 = true - } - } - yyh86.End() - if yyc86 { - *v = yyv86 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/types.go deleted file mode 100644 index e62d874e5d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/types.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" -) - -// +genclient=true -// +nonNamespaced=true -// +noMethods=true - -// ImageReview checks if the set of images in a pod are allowed. -type ImageReview struct { - unversioned.TypeMeta `json:",inline"` - // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec holds information about the pod being evaluated - Spec ImageReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // Status is filled in by the backend and indicates whether the pod should be allowed. - // +optional - Status ImageReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// ImageReviewSpec is a description of the pod creation request. -type ImageReviewSpec struct { - // Containers is a list of a subset of the information in each container of the Pod being created. - // +optional - Containers []ImageReviewContainerSpec `json:"containers,omitempty" protobuf:"bytes,1,rep,name=containers"` - // Annotations is a list of key-value pairs extracted from the Pod's annotations. - // It only includes keys which match the pattern `*.image-policy.k8s.io/*`. - // It is up to each webhook backend to determine how to interpret these annotations, if at all. - // +optional - Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,2,rep,name=annotations"` - // Namespace is the namespace the pod is being created in. - // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` -} - -// ImageReviewContainerSpec is a description of a container within the pod creation request. -type ImageReviewContainerSpec struct { - // This can be in the form image:tag or image@SHA:012345679abcdef. - // +optional - Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"` - // In future, we may add command line overrides, exec health check command lines, and so on. -} - -// ImageReviewStatus is the result of the token authentication request. -type ImageReviewStatus struct { - // Allowed indicates that all images were allowed to be run. - Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` - // Reason should be empty unless Allowed is false in which case it - // may contain a short description of what is wrong. Kubernetes - // may truncate excessively long errors when displaying to the user. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index f4b26f5e8f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_ImageReview = map[string]string{ - "": "ImageReview checks if the set of images in a pod are allowed.", - "spec": "Spec holds information about the pod being evaluated", - "status": "Status is filled in by the backend and indicates whether the pod should be allowed.", -} - -func (ImageReview) SwaggerDoc() map[string]string { - return map_ImageReview -} - -var map_ImageReviewContainerSpec = map[string]string{ - "": "ImageReviewContainerSpec is a description of a container within the pod creation request.", - "image": "This can be in the form image:tag or image@SHA:012345679abcdef.", -} - -func (ImageReviewContainerSpec) SwaggerDoc() map[string]string { - return map_ImageReviewContainerSpec -} - -var map_ImageReviewSpec = map[string]string{ - "": "ImageReviewSpec is a description of the pod creation request.", - "containers": "Containers is a list of a subset of the information in each container of the Pod being created.", - "annotations": "Annotations is a list of key-value pairs extracted from the Pod's annotations. It only includes keys which match the pattern `*.image-policy.k8s.io/*`. It is up to each webhook backend to determine how to interpret these annotations, if at all.", - "namespace": "Namespace is the namespace the pod is being created in.", -} - -func (ImageReviewSpec) SwaggerDoc() map[string]string { - return map_ImageReviewSpec -} - -var map_ImageReviewStatus = map[string]string{ - "": "ImageReviewStatus is the result of the token authentication request.", - "allowed": "Allowed indicates that all images were allowed to be run.", - "reason": "Reason should be empty unless Allowed is false in which case it may contain a short description of what is wrong. Kubernetes may truncate excessively long errors when displaying to the user.", -} - -func (ImageReviewStatus) SwaggerDoc() map[string]string { - return map_ImageReviewStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/zz_generated.conversion.go deleted file mode 100644 index aa40b0a52f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/zz_generated.conversion.go +++ /dev/null @@ -1,143 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1alpha1 - -import ( - imagepolicy "k8s.io/kubernetes/pkg/apis/imagepolicy" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1alpha1_ImageReview_To_imagepolicy_ImageReview, - Convert_imagepolicy_ImageReview_To_v1alpha1_ImageReview, - Convert_v1alpha1_ImageReviewContainerSpec_To_imagepolicy_ImageReviewContainerSpec, - Convert_imagepolicy_ImageReviewContainerSpec_To_v1alpha1_ImageReviewContainerSpec, - Convert_v1alpha1_ImageReviewSpec_To_imagepolicy_ImageReviewSpec, - Convert_imagepolicy_ImageReviewSpec_To_v1alpha1_ImageReviewSpec, - Convert_v1alpha1_ImageReviewStatus_To_imagepolicy_ImageReviewStatus, - Convert_imagepolicy_ImageReviewStatus_To_v1alpha1_ImageReviewStatus, - ) -} - -func autoConvert_v1alpha1_ImageReview_To_imagepolicy_ImageReview(in *ImageReview, out *imagepolicy.ImageReview, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1alpha1_ImageReviewSpec_To_imagepolicy_ImageReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_ImageReviewStatus_To_imagepolicy_ImageReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1alpha1_ImageReview_To_imagepolicy_ImageReview(in *ImageReview, out *imagepolicy.ImageReview, s conversion.Scope) error { - return autoConvert_v1alpha1_ImageReview_To_imagepolicy_ImageReview(in, out, s) -} - -func autoConvert_imagepolicy_ImageReview_To_v1alpha1_ImageReview(in *imagepolicy.ImageReview, out *ImageReview, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_imagepolicy_ImageReviewSpec_To_v1alpha1_ImageReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_imagepolicy_ImageReviewStatus_To_v1alpha1_ImageReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_imagepolicy_ImageReview_To_v1alpha1_ImageReview(in *imagepolicy.ImageReview, out *ImageReview, s conversion.Scope) error { - return autoConvert_imagepolicy_ImageReview_To_v1alpha1_ImageReview(in, out, s) -} - -func autoConvert_v1alpha1_ImageReviewContainerSpec_To_imagepolicy_ImageReviewContainerSpec(in *ImageReviewContainerSpec, out *imagepolicy.ImageReviewContainerSpec, s conversion.Scope) error { - out.Image = in.Image - return nil -} - -func Convert_v1alpha1_ImageReviewContainerSpec_To_imagepolicy_ImageReviewContainerSpec(in *ImageReviewContainerSpec, out *imagepolicy.ImageReviewContainerSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_ImageReviewContainerSpec_To_imagepolicy_ImageReviewContainerSpec(in, out, s) -} - -func autoConvert_imagepolicy_ImageReviewContainerSpec_To_v1alpha1_ImageReviewContainerSpec(in *imagepolicy.ImageReviewContainerSpec, out *ImageReviewContainerSpec, s conversion.Scope) error { - out.Image = in.Image - return nil -} - -func Convert_imagepolicy_ImageReviewContainerSpec_To_v1alpha1_ImageReviewContainerSpec(in *imagepolicy.ImageReviewContainerSpec, out *ImageReviewContainerSpec, s conversion.Scope) error { - return autoConvert_imagepolicy_ImageReviewContainerSpec_To_v1alpha1_ImageReviewContainerSpec(in, out, s) -} - -func autoConvert_v1alpha1_ImageReviewSpec_To_imagepolicy_ImageReviewSpec(in *ImageReviewSpec, out *imagepolicy.ImageReviewSpec, s conversion.Scope) error { - out.Containers = *(*[]imagepolicy.ImageReviewContainerSpec)(unsafe.Pointer(&in.Containers)) - out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) - out.Namespace = in.Namespace - return nil -} - -func Convert_v1alpha1_ImageReviewSpec_To_imagepolicy_ImageReviewSpec(in *ImageReviewSpec, out *imagepolicy.ImageReviewSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_ImageReviewSpec_To_imagepolicy_ImageReviewSpec(in, out, s) -} - -func autoConvert_imagepolicy_ImageReviewSpec_To_v1alpha1_ImageReviewSpec(in *imagepolicy.ImageReviewSpec, out *ImageReviewSpec, s conversion.Scope) error { - out.Containers = *(*[]ImageReviewContainerSpec)(unsafe.Pointer(&in.Containers)) - out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) - out.Namespace = in.Namespace - return nil -} - -func Convert_imagepolicy_ImageReviewSpec_To_v1alpha1_ImageReviewSpec(in *imagepolicy.ImageReviewSpec, out *ImageReviewSpec, s conversion.Scope) error { - return autoConvert_imagepolicy_ImageReviewSpec_To_v1alpha1_ImageReviewSpec(in, out, s) -} - -func autoConvert_v1alpha1_ImageReviewStatus_To_imagepolicy_ImageReviewStatus(in *ImageReviewStatus, out *imagepolicy.ImageReviewStatus, s conversion.Scope) error { - out.Allowed = in.Allowed - out.Reason = in.Reason - return nil -} - -func Convert_v1alpha1_ImageReviewStatus_To_imagepolicy_ImageReviewStatus(in *ImageReviewStatus, out *imagepolicy.ImageReviewStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_ImageReviewStatus_To_imagepolicy_ImageReviewStatus(in, out, s) -} - -func autoConvert_imagepolicy_ImageReviewStatus_To_v1alpha1_ImageReviewStatus(in *imagepolicy.ImageReviewStatus, out *ImageReviewStatus, s conversion.Scope) error { - out.Allowed = in.Allowed - out.Reason = in.Reason - return nil -} - -func Convert_imagepolicy_ImageReviewStatus_To_v1alpha1_ImageReviewStatus(in *imagepolicy.ImageReviewStatus, out *ImageReviewStatus, s conversion.Scope) error { - return autoConvert_imagepolicy_ImageReviewStatus_To_v1alpha1_ImageReviewStatus(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 88192e5009..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,105 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1alpha1 - -import ( - v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ImageReview, InType: reflect.TypeOf(&ImageReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ImageReviewContainerSpec, InType: reflect.TypeOf(&ImageReviewContainerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ImageReviewSpec, InType: reflect.TypeOf(&ImageReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ImageReviewStatus, InType: reflect.TypeOf(&ImageReviewStatus{})}, - ) -} - -func DeepCopy_v1alpha1_ImageReview(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ImageReview) - out := out.(*ImageReview) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1alpha1_ImageReviewSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - out.Status = in.Status - return nil - } -} - -func DeepCopy_v1alpha1_ImageReviewContainerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ImageReviewContainerSpec) - out := out.(*ImageReviewContainerSpec) - out.Image = in.Image - return nil - } -} - -func DeepCopy_v1alpha1_ImageReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ImageReviewSpec) - out := out.(*ImageReviewSpec) - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]ImageReviewContainerSpec, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Containers = nil - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } else { - out.Annotations = nil - } - out.Namespace = in.Namespace - return nil - } -} - -func DeepCopy_v1alpha1_ImageReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ImageReviewStatus) - out := out.(*ImageReviewStatus) - out.Allowed = in.Allowed - out.Reason = in.Reason - return nil - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/zz_generated.deepcopy.go deleted file mode 100644 index a8748f5f17..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/zz_generated.deepcopy.go +++ /dev/null @@ -1,105 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package imagepolicy - -import ( - api "k8s.io/kubernetes/pkg/api" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_imagepolicy_ImageReview, InType: reflect.TypeOf(&ImageReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_imagepolicy_ImageReviewContainerSpec, InType: reflect.TypeOf(&ImageReviewContainerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_imagepolicy_ImageReviewSpec, InType: reflect.TypeOf(&ImageReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_imagepolicy_ImageReviewStatus, InType: reflect.TypeOf(&ImageReviewStatus{})}, - ) -} - -func DeepCopy_imagepolicy_ImageReview(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ImageReview) - out := out.(*ImageReview) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_imagepolicy_ImageReviewSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - out.Status = in.Status - return nil - } -} - -func DeepCopy_imagepolicy_ImageReviewContainerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ImageReviewContainerSpec) - out := out.(*ImageReviewContainerSpec) - out.Image = in.Image - return nil - } -} - -func DeepCopy_imagepolicy_ImageReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ImageReviewSpec) - out := out.(*ImageReviewSpec) - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]ImageReviewContainerSpec, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Containers = nil - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } else { - out.Annotations = nil - } - out.Namespace = in.Namespace - return nil - } -} - -func DeepCopy_imagepolicy_ImageReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ImageReviewStatus) - out := out.(*ImageReviewStatus) - out.Allowed = in.Allowed - out.Reason = in.Reason - return nil - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/policy/BUILD index a753dab29b..98fd8ccb49 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -15,18 +12,34 @@ go_library( srcs = [ "doc.go", "register.go", - "types.generated.go", "types.go", "zz_generated.deepcopy.go", ], tags = ["automanaged"], deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//pkg/util/intstr:go_default_library", - "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/util/intstr", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/policy/install:all-srcs", + "//pkg/apis/policy/v1alpha1:all-srcs", + "//pkg/apis/policy/v1beta1:all-srcs", + "//pkg/apis/policy/validation:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/policy/OWNERS new file mode 100755 index 0000000000..8d0eea5163 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/OWNERS @@ -0,0 +1,14 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- deads2k +- caesarxuchao +- sttts +- ncdc +- timothysc +- dims +- mml +- mbohlool +- david-mcmahon +- jianhuiz diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/doc.go index 6536d5c964..876858cd9a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/doc.go @@ -15,6 +15,5 @@ limitations under the License. */ // +k8s:deepcopy-gen=package,register -// +k8s:openapi-gen=true package policy diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/policy/install/BUILD index 7f62ce06d8..6d5c3b8f63 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/install/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -15,8 +12,24 @@ go_library( srcs = ["install.go"], tags = ["automanaged"], deps = [ - "//pkg/apimachinery/announced:go_default_library", + "//pkg/api:go_default_library", "//pkg/apis/policy:go_default_library", "//pkg/apis/policy/v1beta1:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/announced", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/registered", + "//vendor:k8s.io/apimachinery/pkg/runtime", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go index 69db365b97..76129575ce 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go @@ -19,12 +19,20 @@ limitations under the License. package install import ( - "k8s.io/kubernetes/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/apis/policy/v1beta1" ) func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { if err := announced.NewGroupMetaFactory( &announced.GroupMetaFactoryArgs{ GroupName: policy.GroupName, @@ -35,7 +43,7 @@ func init() { announced.VersionToSchemeFunc{ v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, }, - ).Announce().RegisterAndEnable(); err != nil { + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { panic(err) } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go index 40d8c0895c..5aadc3f1b8 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go @@ -17,24 +17,23 @@ limitations under the License. package policy import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "policy" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} // Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { +func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } // Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { +func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } @@ -49,7 +48,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &PodDisruptionBudget{}, &PodDisruptionBudgetList{}, - &api.ListOptions{}, &Eviction{}, ) return nil diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/types.generated.go deleted file mode 100644 index cc6848fbf8..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/types.generated.go +++ /dev/null @@ -1,1986 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package policy - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg3_api "k8s.io/kubernetes/pkg/api" - pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg4_types "k8s.io/kubernetes/pkg/types" - pkg1_intstr "k8s.io/kubernetes/pkg/util/intstr" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg3_api.ObjectMeta - var v1 pkg2_unversioned.LabelSelector - var v2 pkg4_types.UID - var v3 pkg1_intstr.IntOrString - var v4 time.Time - _, _, _, _, _ = v0, v1, v2, v3, v4 - } -} - -func (x *PodDisruptionBudgetSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = x.Selector != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.MinAvailable - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else if !yym5 && z.IsJSONHandle() { - z.EncJSONMarshal(yy4) - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minAvailable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.MinAvailable - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(yy6) - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodDisruptionBudgetSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym11 := z.DecBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct12 := r.ContainerType() - if yyct12 == codecSelferValueTypeMap1234 { - yyl12 := r.ReadMapStart() - if yyl12 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl12, d) - } - } else if yyct12 == codecSelferValueTypeArray1234 { - yyl12 := r.ReadArrayStart() - if yyl12 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl12, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys13Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys13Slc - var yyhl13 bool = l >= 0 - for yyj13 := 0; ; yyj13++ { - if yyhl13 { - if yyj13 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys13Slc = r.DecodeBytes(yys13Slc, true, true) - yys13 := string(yys13Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys13 { - case "minAvailable": - if r.TryDecodeAsNil() { - x.MinAvailable = pkg1_intstr.IntOrString{} - } else { - yyv14 := &x.MinAvailable - yym15 := z.DecBinary() - _ = yym15 - if false { - } else if z.HasExtensions() && z.DecExt(yyv14) { - } else if !yym15 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv14) - } else { - z.DecFallback(yyv14, false) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg2_unversioned.LabelSelector) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys13) - } // end switch yys13 - } // end for yyj13 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj18 int - var yyb18 bool - var yyhl18 bool = l >= 0 - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinAvailable = pkg1_intstr.IntOrString{} - } else { - yyv19 := &x.MinAvailable - yym20 := z.DecBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.DecExt(yyv19) { - } else if !yym20 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv19) - } else { - z.DecFallback(yyv19, false) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg2_unversioned.LabelSelector) - } - yym22 := z.DecBinary() - _ = yym22 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - for { - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj18-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodDisruptionBudgetStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep24 := !z.EncBinary() - yy2arr24 := z.EncBasicHandle().StructToArray - var yyq24 [6]bool - _, _, _ = yysep24, yyq24, yy2arr24 - const yyr24 bool = false - yyq24[0] = x.ObservedGeneration != 0 - var yynn24 int - if yyr24 || yy2arr24 { - r.EncodeArrayStart(6) - } else { - yynn24 = 5 - for _, b := range yyq24 { - if b { - yynn24++ - } - } - r.EncodeMapStart(yynn24) - yynn24 = 0 - } - if yyr24 || yy2arr24 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq24[0] { - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq24[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } - } - if yyr24 || yy2arr24 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.DisruptedPods == nil { - r.EncodeNil() - } else { - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - h.encMapstringunversioned_Time((map[string]pkg2_unversioned.Time)(x.DisruptedPods), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("disruptedPods")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DisruptedPods == nil { - r.EncodeNil() - } else { - yym30 := z.EncBinary() - _ = yym30 - if false { - } else { - h.encMapstringunversioned_Time((map[string]pkg2_unversioned.Time)(x.DisruptedPods), e) - } - } - } - if yyr24 || yy2arr24 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - r.EncodeInt(int64(x.PodDisruptionsAllowed)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("disruptionsAllowed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym33 := z.EncBinary() - _ = yym33 - if false { - } else { - r.EncodeInt(int64(x.PodDisruptionsAllowed)) - } - } - if yyr24 || yy2arr24 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - r.EncodeInt(int64(x.CurrentHealthy)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentHealthy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - r.EncodeInt(int64(x.CurrentHealthy)) - } - } - if yyr24 || yy2arr24 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - r.EncodeInt(int64(x.DesiredHealthy)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("desiredHealthy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym39 := z.EncBinary() - _ = yym39 - if false { - } else { - r.EncodeInt(int64(x.DesiredHealthy)) - } - } - if yyr24 || yy2arr24 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym41 := z.EncBinary() - _ = yym41 - if false { - } else { - r.EncodeInt(int64(x.ExpectedPods)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("expectedPods")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - r.EncodeInt(int64(x.ExpectedPods)) - } - } - if yyr24 || yy2arr24 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodDisruptionBudgetStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym43 := z.DecBinary() - _ = yym43 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct44 := r.ContainerType() - if yyct44 == codecSelferValueTypeMap1234 { - yyl44 := r.ReadMapStart() - if yyl44 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl44, d) - } - } else if yyct44 == codecSelferValueTypeArray1234 { - yyl44 := r.ReadArrayStart() - if yyl44 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl44, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys45Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys45Slc - var yyhl45 bool = l >= 0 - for yyj45 := 0; ; yyj45++ { - if yyhl45 { - if yyj45 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys45Slc = r.DecodeBytes(yys45Slc, true, true) - yys45 := string(yys45Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys45 { - case "observedGeneration": - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - case "disruptedPods": - if r.TryDecodeAsNil() { - x.DisruptedPods = nil - } else { - yyv47 := &x.DisruptedPods - yym48 := z.DecBinary() - _ = yym48 - if false { - } else { - h.decMapstringunversioned_Time((*map[string]pkg2_unversioned.Time)(yyv47), d) - } - } - case "disruptionsAllowed": - if r.TryDecodeAsNil() { - x.PodDisruptionsAllowed = 0 - } else { - x.PodDisruptionsAllowed = int32(r.DecodeInt(32)) - } - case "currentHealthy": - if r.TryDecodeAsNil() { - x.CurrentHealthy = 0 - } else { - x.CurrentHealthy = int32(r.DecodeInt(32)) - } - case "desiredHealthy": - if r.TryDecodeAsNil() { - x.DesiredHealthy = 0 - } else { - x.DesiredHealthy = int32(r.DecodeInt(32)) - } - case "expectedPods": - if r.TryDecodeAsNil() { - x.ExpectedPods = 0 - } else { - x.ExpectedPods = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys45) - } // end switch yys45 - } // end for yyj45 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj53 int - var yyb53 bool - var yyhl53 bool = l >= 0 - yyj53++ - if yyhl53 { - yyb53 = yyj53 > l - } else { - yyb53 = r.CheckBreak() - } - if yyb53 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - yyj53++ - if yyhl53 { - yyb53 = yyj53 > l - } else { - yyb53 = r.CheckBreak() - } - if yyb53 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DisruptedPods = nil - } else { - yyv55 := &x.DisruptedPods - yym56 := z.DecBinary() - _ = yym56 - if false { - } else { - h.decMapstringunversioned_Time((*map[string]pkg2_unversioned.Time)(yyv55), d) - } - } - yyj53++ - if yyhl53 { - yyb53 = yyj53 > l - } else { - yyb53 = r.CheckBreak() - } - if yyb53 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodDisruptionsAllowed = 0 - } else { - x.PodDisruptionsAllowed = int32(r.DecodeInt(32)) - } - yyj53++ - if yyhl53 { - yyb53 = yyj53 > l - } else { - yyb53 = r.CheckBreak() - } - if yyb53 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentHealthy = 0 - } else { - x.CurrentHealthy = int32(r.DecodeInt(32)) - } - yyj53++ - if yyhl53 { - yyb53 = yyj53 > l - } else { - yyb53 = r.CheckBreak() - } - if yyb53 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DesiredHealthy = 0 - } else { - x.DesiredHealthy = int32(r.DecodeInt(32)) - } - yyj53++ - if yyhl53 { - yyb53 = yyj53 > l - } else { - yyb53 = r.CheckBreak() - } - if yyb53 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExpectedPods = 0 - } else { - x.ExpectedPods = int32(r.DecodeInt(32)) - } - for { - yyj53++ - if yyhl53 { - yyb53 = yyj53 > l - } else { - yyb53 = r.CheckBreak() - } - if yyb53 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj53-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodDisruptionBudget) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym61 := z.EncBinary() - _ = yym61 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep62 := !z.EncBinary() - yy2arr62 := z.EncBasicHandle().StructToArray - var yyq62 [5]bool - _, _, _ = yysep62, yyq62, yy2arr62 - const yyr62 bool = false - yyq62[0] = x.Kind != "" - yyq62[1] = x.APIVersion != "" - yyq62[2] = true - yyq62[3] = true - yyq62[4] = true - var yynn62 int - if yyr62 || yy2arr62 { - r.EncodeArrayStart(5) - } else { - yynn62 = 0 - for _, b := range yyq62 { - if b { - yynn62++ - } - } - r.EncodeMapStart(yynn62) - yynn62 = 0 - } - if yyr62 || yy2arr62 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq62[0] { - yym64 := z.EncBinary() - _ = yym64 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq62[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym65 := z.EncBinary() - _ = yym65 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr62 || yy2arr62 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq62[1] { - yym67 := z.EncBinary() - _ = yym67 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq62[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym68 := z.EncBinary() - _ = yym68 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr62 || yy2arr62 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq62[2] { - yy70 := &x.ObjectMeta - yy70.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq62[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy71 := &x.ObjectMeta - yy71.CodecEncodeSelf(e) - } - } - if yyr62 || yy2arr62 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq62[3] { - yy73 := &x.Spec - yy73.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq62[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy74 := &x.Spec - yy74.CodecEncodeSelf(e) - } - } - if yyr62 || yy2arr62 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq62[4] { - yy76 := &x.Status - yy76.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq62[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy77 := &x.Status - yy77.CodecEncodeSelf(e) - } - } - if yyr62 || yy2arr62 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodDisruptionBudget) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym78 := z.DecBinary() - _ = yym78 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct79 := r.ContainerType() - if yyct79 == codecSelferValueTypeMap1234 { - yyl79 := r.ReadMapStart() - if yyl79 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl79, d) - } - } else if yyct79 == codecSelferValueTypeArray1234 { - yyl79 := r.ReadArrayStart() - if yyl79 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl79, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodDisruptionBudget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys80Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys80Slc - var yyhl80 bool = l >= 0 - for yyj80 := 0; ; yyj80++ { - if yyhl80 { - if yyj80 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys80Slc = r.DecodeBytes(yys80Slc, true, true) - yys80 := string(yys80Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys80 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_api.ObjectMeta{} - } else { - yyv83 := &x.ObjectMeta - yyv83.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PodDisruptionBudgetSpec{} - } else { - yyv84 := &x.Spec - yyv84.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PodDisruptionBudgetStatus{} - } else { - yyv85 := &x.Status - yyv85.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys80) - } // end switch yys80 - } // end for yyj80 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodDisruptionBudget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj86 int - var yyb86 bool - var yyhl86 bool = l >= 0 - yyj86++ - if yyhl86 { - yyb86 = yyj86 > l - } else { - yyb86 = r.CheckBreak() - } - if yyb86 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj86++ - if yyhl86 { - yyb86 = yyj86 > l - } else { - yyb86 = r.CheckBreak() - } - if yyb86 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj86++ - if yyhl86 { - yyb86 = yyj86 > l - } else { - yyb86 = r.CheckBreak() - } - if yyb86 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_api.ObjectMeta{} - } else { - yyv89 := &x.ObjectMeta - yyv89.CodecDecodeSelf(d) - } - yyj86++ - if yyhl86 { - yyb86 = yyj86 > l - } else { - yyb86 = r.CheckBreak() - } - if yyb86 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PodDisruptionBudgetSpec{} - } else { - yyv90 := &x.Spec - yyv90.CodecDecodeSelf(d) - } - yyj86++ - if yyhl86 { - yyb86 = yyj86 > l - } else { - yyb86 = r.CheckBreak() - } - if yyb86 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PodDisruptionBudgetStatus{} - } else { - yyv91 := &x.Status - yyv91.CodecDecodeSelf(d) - } - for { - yyj86++ - if yyhl86 { - yyb86 = yyj86 > l - } else { - yyb86 = r.CheckBreak() - } - if yyb86 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj86-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodDisruptionBudgetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym92 := z.EncBinary() - _ = yym92 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep93 := !z.EncBinary() - yy2arr93 := z.EncBasicHandle().StructToArray - var yyq93 [4]bool - _, _, _ = yysep93, yyq93, yy2arr93 - const yyr93 bool = false - yyq93[0] = x.Kind != "" - yyq93[1] = x.APIVersion != "" - yyq93[2] = true - var yynn93 int - if yyr93 || yy2arr93 { - r.EncodeArrayStart(4) - } else { - yynn93 = 1 - for _, b := range yyq93 { - if b { - yynn93++ - } - } - r.EncodeMapStart(yynn93) - yynn93 = 0 - } - if yyr93 || yy2arr93 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq93[0] { - yym95 := z.EncBinary() - _ = yym95 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq93[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym96 := z.EncBinary() - _ = yym96 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr93 || yy2arr93 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq93[1] { - yym98 := z.EncBinary() - _ = yym98 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq93[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym99 := z.EncBinary() - _ = yym99 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr93 || yy2arr93 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq93[2] { - yy101 := &x.ListMeta - yym102 := z.EncBinary() - _ = yym102 - if false { - } else if z.HasExtensions() && z.EncExt(yy101) { - } else { - z.EncFallback(yy101) - } - } else { - r.EncodeNil() - } - } else { - if yyq93[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy103 := &x.ListMeta - yym104 := z.EncBinary() - _ = yym104 - if false { - } else if z.HasExtensions() && z.EncExt(yy103) { - } else { - z.EncFallback(yy103) - } - } - } - if yyr93 || yy2arr93 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym106 := z.EncBinary() - _ = yym106 - if false { - } else { - h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym107 := z.EncBinary() - _ = yym107 - if false { - } else { - h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e) - } - } - } - if yyr93 || yy2arr93 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodDisruptionBudgetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym108 := z.DecBinary() - _ = yym108 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct109 := r.ContainerType() - if yyct109 == codecSelferValueTypeMap1234 { - yyl109 := r.ReadMapStart() - if yyl109 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl109, d) - } - } else if yyct109 == codecSelferValueTypeArray1234 { - yyl109 := r.ReadArrayStart() - if yyl109 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl109, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodDisruptionBudgetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys110Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys110Slc - var yyhl110 bool = l >= 0 - for yyj110 := 0; ; yyj110++ { - if yyhl110 { - if yyj110 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys110Slc = r.DecodeBytes(yys110Slc, true, true) - yys110 := string(yys110Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys110 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv113 := &x.ListMeta - yym114 := z.DecBinary() - _ = yym114 - if false { - } else if z.HasExtensions() && z.DecExt(yyv113) { - } else { - z.DecFallback(yyv113, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv115 := &x.Items - yym116 := z.DecBinary() - _ = yym116 - if false { - } else { - h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv115), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys110) - } // end switch yys110 - } // end for yyj110 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodDisruptionBudgetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj117 int - var yyb117 bool - var yyhl117 bool = l >= 0 - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv120 := &x.ListMeta - yym121 := z.DecBinary() - _ = yym121 - if false { - } else if z.HasExtensions() && z.DecExt(yyv120) { - } else { - z.DecFallback(yyv120, false) - } - } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv122 := &x.Items - yym123 := z.DecBinary() - _ = yym123 - if false { - } else { - h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv122), d) - } - } - for { - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l - } else { - yyb117 = r.CheckBreak() - } - if yyb117 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj117-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Eviction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym124 := z.EncBinary() - _ = yym124 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep125 := !z.EncBinary() - yy2arr125 := z.EncBasicHandle().StructToArray - var yyq125 [4]bool - _, _, _ = yysep125, yyq125, yy2arr125 - const yyr125 bool = false - yyq125[0] = x.Kind != "" - yyq125[1] = x.APIVersion != "" - yyq125[2] = true - yyq125[3] = x.DeleteOptions != nil - var yynn125 int - if yyr125 || yy2arr125 { - r.EncodeArrayStart(4) - } else { - yynn125 = 0 - for _, b := range yyq125 { - if b { - yynn125++ - } - } - r.EncodeMapStart(yynn125) - yynn125 = 0 - } - if yyr125 || yy2arr125 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq125[0] { - yym127 := z.EncBinary() - _ = yym127 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq125[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym128 := z.EncBinary() - _ = yym128 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr125 || yy2arr125 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq125[1] { - yym130 := z.EncBinary() - _ = yym130 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq125[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym131 := z.EncBinary() - _ = yym131 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr125 || yy2arr125 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq125[2] { - yy133 := &x.ObjectMeta - yy133.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq125[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy134 := &x.ObjectMeta - yy134.CodecEncodeSelf(e) - } - } - if yyr125 || yy2arr125 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq125[3] { - if x.DeleteOptions == nil { - r.EncodeNil() - } else { - x.DeleteOptions.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq125[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deleteOptions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DeleteOptions == nil { - r.EncodeNil() - } else { - x.DeleteOptions.CodecEncodeSelf(e) - } - } - } - if yyr125 || yy2arr125 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Eviction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym136 := z.DecBinary() - _ = yym136 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct137 := r.ContainerType() - if yyct137 == codecSelferValueTypeMap1234 { - yyl137 := r.ReadMapStart() - if yyl137 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl137, d) - } - } else if yyct137 == codecSelferValueTypeArray1234 { - yyl137 := r.ReadArrayStart() - if yyl137 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl137, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Eviction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys138Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys138Slc - var yyhl138 bool = l >= 0 - for yyj138 := 0; ; yyj138++ { - if yyhl138 { - if yyj138 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys138Slc = r.DecodeBytes(yys138Slc, true, true) - yys138 := string(yys138Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys138 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_api.ObjectMeta{} - } else { - yyv141 := &x.ObjectMeta - yyv141.CodecDecodeSelf(d) - } - case "deleteOptions": - if r.TryDecodeAsNil() { - if x.DeleteOptions != nil { - x.DeleteOptions = nil - } - } else { - if x.DeleteOptions == nil { - x.DeleteOptions = new(pkg3_api.DeleteOptions) - } - x.DeleteOptions.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys138) - } // end switch yys138 - } // end for yyj138 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Eviction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj143 int - var yyb143 bool - var yyhl143 bool = l >= 0 - yyj143++ - if yyhl143 { - yyb143 = yyj143 > l - } else { - yyb143 = r.CheckBreak() - } - if yyb143 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj143++ - if yyhl143 { - yyb143 = yyj143 > l - } else { - yyb143 = r.CheckBreak() - } - if yyb143 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj143++ - if yyhl143 { - yyb143 = yyj143 > l - } else { - yyb143 = r.CheckBreak() - } - if yyb143 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_api.ObjectMeta{} - } else { - yyv146 := &x.ObjectMeta - yyv146.CodecDecodeSelf(d) - } - yyj143++ - if yyhl143 { - yyb143 = yyj143 > l - } else { - yyb143 = r.CheckBreak() - } - if yyb143 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeleteOptions != nil { - x.DeleteOptions = nil - } - } else { - if x.DeleteOptions == nil { - x.DeleteOptions = new(pkg3_api.DeleteOptions) - } - x.DeleteOptions.CodecDecodeSelf(d) - } - for { - yyj143++ - if yyhl143 { - yyb143 = yyj143 > l - } else { - yyb143 = r.CheckBreak() - } - if yyb143 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj143-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encMapstringunversioned_Time(v map[string]pkg2_unversioned.Time, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk148, yyv148 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym149 := z.EncBinary() - _ = yym149 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk148)) - } - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy150 := &yyv148 - yym151 := z.EncBinary() - _ = yym151 - if false { - } else if z.HasExtensions() && z.EncExt(yy150) { - } else if yym151 { - z.EncBinaryMarshal(yy150) - } else if !yym151 && z.IsJSONHandle() { - z.EncJSONMarshal(yy150) - } else { - z.EncFallback(yy150) - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decMapstringunversioned_Time(v *map[string]pkg2_unversioned.Time, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv152 := *v - yyl152 := r.ReadMapStart() - yybh152 := z.DecBasicHandle() - if yyv152 == nil { - yyrl152, _ := z.DecInferLen(yyl152, yybh152.MaxInitLen, 40) - yyv152 = make(map[string]pkg2_unversioned.Time, yyrl152) - *v = yyv152 - } - var yymk152 string - var yymv152 pkg2_unversioned.Time - var yymg152 bool - if yybh152.MapValueReset { - yymg152 = true - } - if yyl152 > 0 { - for yyj152 := 0; yyj152 < yyl152; yyj152++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk152 = "" - } else { - yymk152 = string(r.DecodeString()) - } - - if yymg152 { - yymv152 = yyv152[yymk152] - } else { - yymv152 = pkg2_unversioned.Time{} - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv152 = pkg2_unversioned.Time{} - } else { - yyv154 := &yymv152 - yym155 := z.DecBinary() - _ = yym155 - if false { - } else if z.HasExtensions() && z.DecExt(yyv154) { - } else if yym155 { - z.DecBinaryUnmarshal(yyv154) - } else if !yym155 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv154) - } else { - z.DecFallback(yyv154, false) - } - } - - if yyv152 != nil { - yyv152[yymk152] = yymv152 - } - } - } else if yyl152 < 0 { - for yyj152 := 0; !r.CheckBreak(); yyj152++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk152 = "" - } else { - yymk152 = string(r.DecodeString()) - } - - if yymg152 { - yymv152 = yyv152[yymk152] - } else { - yymv152 = pkg2_unversioned.Time{} - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv152 = pkg2_unversioned.Time{} - } else { - yyv157 := &yymv152 - yym158 := z.DecBinary() - _ = yym158 - if false { - } else if z.HasExtensions() && z.DecExt(yyv157) { - } else if yym158 { - z.DecBinaryUnmarshal(yyv157) - } else if !yym158 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv157) - } else { - z.DecFallback(yyv157, false) - } - } - - if yyv152 != nil { - yyv152[yymk152] = yymv152 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encSlicePodDisruptionBudget(v []PodDisruptionBudget, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv159 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy160 := &yyv159 - yy160.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodDisruptionBudget(v *[]PodDisruptionBudget, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv161 := *v - yyh161, yyl161 := z.DecSliceHelperStart() - var yyc161 bool - if yyl161 == 0 { - if yyv161 == nil { - yyv161 = []PodDisruptionBudget{} - yyc161 = true - } else if len(yyv161) != 0 { - yyv161 = yyv161[:0] - yyc161 = true - } - } else if yyl161 > 0 { - var yyrr161, yyrl161 int - var yyrt161 bool - if yyl161 > cap(yyv161) { - - yyrg161 := len(yyv161) > 0 - yyv2161 := yyv161 - yyrl161, yyrt161 = z.DecInferLen(yyl161, z.DecBasicHandle().MaxInitLen, 328) - if yyrt161 { - if yyrl161 <= cap(yyv161) { - yyv161 = yyv161[:yyrl161] - } else { - yyv161 = make([]PodDisruptionBudget, yyrl161) - } - } else { - yyv161 = make([]PodDisruptionBudget, yyrl161) - } - yyc161 = true - yyrr161 = len(yyv161) - if yyrg161 { - copy(yyv161, yyv2161) - } - } else if yyl161 != len(yyv161) { - yyv161 = yyv161[:yyl161] - yyc161 = true - } - yyj161 := 0 - for ; yyj161 < yyrr161; yyj161++ { - yyh161.ElemContainerState(yyj161) - if r.TryDecodeAsNil() { - yyv161[yyj161] = PodDisruptionBudget{} - } else { - yyv162 := &yyv161[yyj161] - yyv162.CodecDecodeSelf(d) - } - - } - if yyrt161 { - for ; yyj161 < yyl161; yyj161++ { - yyv161 = append(yyv161, PodDisruptionBudget{}) - yyh161.ElemContainerState(yyj161) - if r.TryDecodeAsNil() { - yyv161[yyj161] = PodDisruptionBudget{} - } else { - yyv163 := &yyv161[yyj161] - yyv163.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj161 := 0 - for ; !r.CheckBreak(); yyj161++ { - - if yyj161 >= len(yyv161) { - yyv161 = append(yyv161, PodDisruptionBudget{}) // var yyz161 PodDisruptionBudget - yyc161 = true - } - yyh161.ElemContainerState(yyj161) - if yyj161 < len(yyv161) { - if r.TryDecodeAsNil() { - yyv161[yyj161] = PodDisruptionBudget{} - } else { - yyv164 := &yyv161[yyj161] - yyv164.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj161 < len(yyv161) { - yyv161 = yyv161[:yyj161] - yyc161 = true - } else if yyj161 == 0 && yyv161 == nil { - yyv161 = []PodDisruptionBudget{} - yyc161 = true - } - } - yyh161.End() - if yyc161 { - *v = yyv161 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go index b9f298f0e6..ef2ff713ad 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go @@ -17,9 +17,8 @@ limitations under the License. package policy import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/util/intstr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) // PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. @@ -29,12 +28,12 @@ type PodDisruptionBudgetSpec struct { // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". // +optional - MinAvailable intstr.IntOrString `json:"minAvailable,omitempty"` + MinAvailable intstr.IntOrString // Label query over pods whose evictions are managed by the disruption // budget. // +optional - Selector *unversioned.LabelSelector `json:"selector,omitempty"` + Selector *metav1.LabelSelector } // PodDisruptionBudgetStatus represents information about the status of a @@ -43,7 +42,7 @@ type PodDisruptionBudgetStatus struct { // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other // status informatio is valid only if observedGeneration equals to PDB's object generation. // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty"` + ObservedGeneration int64 // DisruptedPods contains information about pods whose eviction was // processed by the API server eviction subresource handler but has not @@ -56,43 +55,43 @@ type PodDisruptionBudgetStatus struct { // the list automatically by PodDisruptionBudget controller after some time. // If everything goes smooth this map should be empty for the most of the time. // Large number of entries in the map may indicate problems with pod deletions. - DisruptedPods map[string]unversioned.Time `json:"disruptedPods" protobuf:"bytes,5,rep,name=disruptedPods"` + DisruptedPods map[string]metav1.Time // Number of pod disruptions that are currently allowed. - PodDisruptionsAllowed int32 `json:"disruptionsAllowed"` + PodDisruptionsAllowed int32 // current number of healthy pods - CurrentHealthy int32 `json:"currentHealthy"` + CurrentHealthy int32 // minimum desired number of healthy pods - DesiredHealthy int32 `json:"desiredHealthy"` + DesiredHealthy int32 // total number of pods counted by this disruption budget - ExpectedPods int32 `json:"expectedPods"` + ExpectedPods int32 } // +genclient=true // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods type PodDisruptionBudget struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // Specification of the desired behavior of the PodDisruptionBudget. // +optional - Spec PodDisruptionBudgetSpec `json:"spec,omitempty"` + Spec PodDisruptionBudgetSpec // Most recently observed status of the PodDisruptionBudget. // +optional - Status PodDisruptionBudgetStatus `json:"status,omitempty"` + Status PodDisruptionBudgetStatus } // PodDisruptionBudgetList is a collection of PodDisruptionBudgets. type PodDisruptionBudgetList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - unversioned.ListMeta `json:"metadata,omitempty"` - Items []PodDisruptionBudget `json:"items"` + metav1.ListMeta + Items []PodDisruptionBudget } // +genclient=true @@ -102,13 +101,13 @@ type PodDisruptionBudgetList struct { // This is a subresource of Pod. A request to cause such an eviction is // created by POSTing to .../pods//eviction. type Eviction struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // ObjectMeta describes the pod that is being evicted. // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // DeleteOptions may be provided // +optional - DeleteOptions *api.DeleteOptions `json:"deleteOptions,omitempty"` + DeleteOptions *metav1.DeleteOptions } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/BUILD index dc3cb8ff4e..47990f1c8d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -24,17 +21,28 @@ go_library( ], tags = ["automanaged"], deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/apis/policy:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//pkg/util/intstr:go_default_library", - "//pkg/watch/versioned:go_default_library", "//vendor:github.com/gogo/protobuf/proto", "//vendor:github.com/gogo/protobuf/sortkeys", "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/util/intstr", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/generated.pb.go index 4e26157669..76aa80ccf4 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/generated.pb.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -37,8 +37,7 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" -import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" import strings "strings" import reflect "reflect" @@ -406,8 +405,8 @@ func (this *Eviction) String() string { return "nil" } s := strings.Join([]string{`&Eviction{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "k8s_io_kubernetes_pkg_api_v1.DeleteOptions", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions", 1) + `,`, `}`, }, "") return s @@ -417,7 +416,7 @@ func (this *PodDisruptionBudget) String() string { return "nil" } s := strings.Join([]string{`&PodDisruptionBudget{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -429,7 +428,7 @@ func (this *PodDisruptionBudgetList) String() string { return "nil" } s := strings.Join([]string{`&PodDisruptionBudgetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -440,8 +439,8 @@ func (this *PodDisruptionBudgetSpec) String() string { return "nil" } s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, - `MinAvailable:` + strings.Replace(strings.Replace(this.MinAvailable.String(), "IntOrString", "k8s_io_kubernetes_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_kubernetes_pkg_api_unversioned.LabelSelector", 1) + `,`, + `MinAvailable:` + strings.Replace(strings.Replace(this.MinAvailable.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -455,7 +454,7 @@ func (this *PodDisruptionBudgetStatus) String() string { keysForDisruptedPods = append(keysForDisruptedPods, k) } github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) - mapStringForDisruptedPods := "map[string]k8s_io_kubernetes_pkg_api_unversioned.Time{" + mapStringForDisruptedPods := "map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time{" for _, k := range keysForDisruptedPods { mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) } @@ -565,7 +564,7 @@ func (m *Eviction) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.DeleteOptions == nil { - m.DeleteOptions = &k8s_io_kubernetes_pkg_api_v1.DeleteOptions{} + m.DeleteOptions = &k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions{} } if err := m.DeleteOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { return err @@ -929,7 +928,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { return err @@ -1110,13 +1109,13 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue := &k8s_io_kubernetes_pkg_api_unversioned.Time{} + mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { return err } iNdEx = postmsgIndex if m.DisruptedPods == nil { - m.DisruptedPods = make(map[string]k8s_io_kubernetes_pkg_api_unversioned.Time) + m.DisruptedPods = make(map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time) } m.DisruptedPods[mapkey] = *mapvalue iNdEx = postIndex @@ -1323,53 +1322,54 @@ var ( ) var fileDescriptorGenerated = []byte{ - // 758 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x94, 0xcf, 0x6f, 0xdb, 0x36, - 0x14, 0xc7, 0xad, 0xd8, 0xce, 0x3c, 0xc6, 0x0e, 0x32, 0x6e, 0xd9, 0x3c, 0x03, 0x53, 0x06, 0x9f, - 0x12, 0x6c, 0xa1, 0xe0, 0x60, 0x03, 0xb2, 0x1d, 0x02, 0x44, 0x73, 0xb0, 0x65, 0x58, 0xe0, 0x40, - 0x19, 0xb0, 0x61, 0xc0, 0x0a, 0xe8, 0xc7, 0xab, 0xc2, 0x5a, 0x16, 0x05, 0x92, 0x52, 0xeb, 0x5b, - 0xff, 0x84, 0x1e, 0xfa, 0x1f, 0xf5, 0x12, 0xf4, 0x94, 0x63, 0x2f, 0x0d, 0x1a, 0xe7, 0x7f, 0xe8, - 0xb9, 0x90, 0x44, 0x3b, 0x96, 0x7f, 0x04, 0x01, 0xd2, 0xde, 0x44, 0xf2, 0x7d, 0xbe, 0xdf, 0xf7, - 0x1e, 0x1f, 0x85, 0x7e, 0xe9, 0xef, 0x0b, 0x42, 0x99, 0xd1, 0x8f, 0x1d, 0xe0, 0x21, 0x48, 0x10, - 0x46, 0xd4, 0xf7, 0x0d, 0x3b, 0xa2, 0xc2, 0x88, 0x58, 0x40, 0xdd, 0xa1, 0x91, 0x74, 0x1c, 0x90, - 0x76, 0xc7, 0xf0, 0x21, 0x04, 0x6e, 0x4b, 0xf0, 0x48, 0xc4, 0x99, 0x64, 0x78, 0x27, 0x47, 0xc9, - 0x2d, 0x4a, 0xa2, 0xbe, 0x4f, 0x52, 0x94, 0xe4, 0x28, 0x51, 0x68, 0x6b, 0xd7, 0xa7, 0xf2, 0x3c, - 0x76, 0x88, 0xcb, 0x06, 0x86, 0xcf, 0x7c, 0x66, 0x64, 0x0a, 0x4e, 0xfc, 0x38, 0x5b, 0x65, 0x8b, - 0xec, 0x2b, 0x57, 0x6e, 0xed, 0x2d, 0x4d, 0xca, 0xe0, 0x20, 0x58, 0xcc, 0x5d, 0x98, 0xcd, 0xa6, - 0xf5, 0xf3, 0x72, 0x26, 0x0e, 0x13, 0xe0, 0x82, 0xb2, 0x10, 0xbc, 0x39, 0xec, 0xc7, 0xe5, 0x58, - 0x32, 0x57, 0x72, 0x6b, 0x77, 0x71, 0x34, 0x8f, 0x43, 0x49, 0x07, 0xf3, 0x39, 0x75, 0x16, 0x87, - 0xc7, 0x92, 0x06, 0x06, 0x0d, 0xa5, 0x90, 0x7c, 0x16, 0x69, 0xbf, 0xd6, 0x50, 0xed, 0x28, 0xa1, - 0xae, 0xa4, 0x2c, 0xc4, 0xff, 0xa2, 0xda, 0x00, 0xa4, 0xed, 0xd9, 0xd2, 0x6e, 0x6a, 0xdf, 0x6b, - 0xdb, 0x6b, 0x7b, 0xdb, 0x64, 0x69, 0xd3, 0x49, 0xd2, 0x21, 0x3d, 0xe7, 0x09, 0xb8, 0xf2, 0x04, - 0xa4, 0x6d, 0xe2, 0x8b, 0xab, 0xad, 0xd2, 0xe8, 0x6a, 0x0b, 0xdd, 0xee, 0x59, 0x13, 0x35, 0xec, - 0xa1, 0x86, 0x07, 0x01, 0x48, 0xe8, 0x45, 0xa9, 0x93, 0x68, 0xae, 0x64, 0xf2, 0x3f, 0xdc, 0x2d, - 0xdf, 0x9d, 0x46, 0xcc, 0x2f, 0x46, 0x57, 0x5b, 0x8d, 0xc2, 0x96, 0x55, 0x14, 0x6d, 0xbf, 0x5a, - 0x41, 0x5f, 0x9e, 0x32, 0xaf, 0x4b, 0x05, 0x8f, 0xb3, 0x2d, 0x33, 0xf6, 0x7c, 0x90, 0x9f, 0xb4, - 0xae, 0x8a, 0x88, 0xc0, 0x55, 0xe5, 0x98, 0xe4, 0xde, 0x23, 0x4a, 0x16, 0xe4, 0x79, 0x16, 0x81, - 0x6b, 0xd6, 0x95, 0x5f, 0x25, 0x5d, 0x59, 0x99, 0x3a, 0x0e, 0xd0, 0xaa, 0x90, 0xb6, 0x8c, 0x45, - 0xb3, 0x9c, 0xf9, 0x74, 0x1f, 0xe8, 0x93, 0x69, 0x99, 0xeb, 0xca, 0x69, 0x35, 0x5f, 0x5b, 0xca, - 0xa3, 0xfd, 0x56, 0x43, 0xdf, 0x2c, 0xa0, 0xfe, 0xa2, 0x42, 0xe2, 0xff, 0xe7, 0x3a, 0x69, 0xdc, - 0xd1, 0xc9, 0xa9, 0x87, 0x40, 0x52, 0x3c, 0x6b, 0xe8, 0x86, 0xb2, 0xad, 0x8d, 0x77, 0xa6, 0xda, - 0xe9, 0xa2, 0x2a, 0x95, 0x30, 0x48, 0xc7, 0xa3, 0xbc, 0xbd, 0xb6, 0x77, 0xf0, 0xb0, 0x3a, 0xcd, - 0x86, 0xb2, 0xaa, 0x1e, 0xa7, 0xa2, 0x56, 0xae, 0xdd, 0xbe, 0x59, 0x5c, 0x5f, 0xda, 0x6f, 0x7c, - 0x8e, 0xea, 0x03, 0x1a, 0x1e, 0x26, 0x36, 0x0d, 0x6c, 0x27, 0x00, 0x55, 0x23, 0x59, 0x92, 0x47, - 0xfa, 0xb0, 0x48, 0xfe, 0xb0, 0xc8, 0x71, 0x28, 0x7b, 0xfc, 0x4c, 0x72, 0x1a, 0xfa, 0xe6, 0x57, - 0xca, 0xb7, 0x7e, 0x32, 0xa5, 0x65, 0x15, 0x94, 0xf1, 0x23, 0x54, 0x13, 0x10, 0x80, 0x2b, 0x19, - 0x57, 0xd3, 0xf3, 0xd3, 0x7d, 0x3b, 0x69, 0x3b, 0x10, 0x9c, 0x29, 0xd6, 0xac, 0xa7, 0xad, 0x1c, - 0xaf, 0xac, 0x89, 0x66, 0xfb, 0x7d, 0x05, 0x7d, 0xbb, 0xf4, 0xee, 0xf1, 0x9f, 0x08, 0x33, 0x47, - 0x00, 0x4f, 0xc0, 0xfb, 0x3d, 0xff, 0x23, 0x50, 0x16, 0x66, 0xd5, 0x96, 0xcd, 0x96, 0xca, 0x1e, - 0xf7, 0xe6, 0x22, 0xac, 0x05, 0x14, 0x7e, 0xa9, 0xa1, 0x86, 0x97, 0xdb, 0x80, 0x77, 0xca, 0xbc, - 0xf1, 0xed, 0xfd, 0xf3, 0x31, 0xa6, 0x94, 0x74, 0xa7, 0x95, 0x8f, 0x42, 0xc9, 0x87, 0xe6, 0xa6, - 0x4a, 0xb0, 0x51, 0x38, 0xb3, 0x8a, 0x49, 0xe0, 0x13, 0x84, 0xbd, 0x89, 0xa4, 0x38, 0x0c, 0x02, - 0xf6, 0x14, 0xbc, 0xec, 0x01, 0x55, 0xcd, 0xef, 0x94, 0xc2, 0x66, 0xc1, 0x77, 0x1c, 0x64, 0x2d, - 0x00, 0xf1, 0x01, 0x5a, 0x77, 0x63, 0xce, 0x21, 0x94, 0x7f, 0x80, 0x1d, 0xc8, 0xf3, 0x61, 0xb3, - 0x92, 0x49, 0x7d, 0xad, 0xa4, 0xd6, 0x7f, 0x2b, 0x9c, 0x5a, 0x33, 0xd1, 0x29, 0xef, 0x81, 0xa0, - 0x1c, 0xbc, 0x31, 0x5f, 0x2d, 0xf2, 0xdd, 0xc2, 0xa9, 0x35, 0x13, 0x8d, 0xf7, 0x51, 0x1d, 0x9e, - 0x45, 0xe0, 0x8e, 0x7b, 0xbc, 0x9a, 0xd1, 0x93, 0x49, 0x3b, 0x9a, 0x3a, 0xb3, 0x0a, 0x91, 0xad, - 0x01, 0xc2, 0xf3, 0x4d, 0xc4, 0x1b, 0xa8, 0xdc, 0x87, 0x61, 0x76, 0xe5, 0x9f, 0x5b, 0xe9, 0x27, - 0x3e, 0x44, 0xd5, 0xc4, 0x0e, 0x62, 0xb8, 0xc7, 0xbf, 0x79, 0x7a, 0x1c, 0xff, 0xa6, 0x03, 0xb0, - 0x72, 0xf2, 0xd7, 0x95, 0x7d, 0xcd, 0xdc, 0xb9, 0xb8, 0xd6, 0x4b, 0x97, 0xd7, 0x7a, 0xe9, 0xcd, - 0xb5, 0x5e, 0x7a, 0x3e, 0xd2, 0xb5, 0x8b, 0x91, 0xae, 0x5d, 0x8e, 0x74, 0xed, 0xdd, 0x48, 0xd7, - 0x5e, 0xdc, 0xe8, 0xa5, 0xff, 0x3e, 0x53, 0xb7, 0xfe, 0x21, 0x00, 0x00, 0xff, 0xff, 0x59, 0x3e, - 0xe1, 0xf8, 0x0d, 0x08, 0x00, 0x00, + // 773 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x94, 0xcb, 0x6e, 0xf3, 0x44, + 0x18, 0x86, 0xe3, 0x26, 0x29, 0x61, 0x9a, 0x54, 0x65, 0xa0, 0x10, 0x22, 0xe1, 0xa2, 0xac, 0x5a, + 0x04, 0x63, 0xda, 0x22, 0x54, 0x58, 0x54, 0xd4, 0xa4, 0x82, 0xa2, 0x56, 0xa9, 0x5c, 0x24, 0x24, + 0x04, 0x12, 0x63, 0xfb, 0xc3, 0x99, 0xc6, 0x27, 0x8d, 0xc7, 0xa1, 0xd9, 0x71, 0x09, 0x2c, 0xb8, + 0xa8, 0x4a, 0x6c, 0xba, 0x44, 0x08, 0x55, 0x34, 0x70, 0x0b, 0xec, 0x91, 0xc7, 0x93, 0x83, 0x9b, + 0x44, 0x0d, 0xea, 0xaf, 0x7f, 0xe7, 0x39, 0x3c, 0xef, 0xfb, 0x9d, 0xc6, 0xe8, 0x93, 0xfe, 0x51, + 0x42, 0x58, 0x64, 0xf4, 0x53, 0x1b, 0x78, 0x08, 0x02, 0x12, 0x23, 0xee, 0x7b, 0x06, 0x8d, 0x59, + 0x62, 0xc4, 0x91, 0xcf, 0x9c, 0xa1, 0x31, 0xd8, 0xb7, 0x41, 0xd0, 0x7d, 0xc3, 0x83, 0x10, 0x38, + 0x15, 0xe0, 0x92, 0x98, 0x47, 0x22, 0xc2, 0x7b, 0x39, 0x4a, 0xa6, 0x28, 0x89, 0xfb, 0x1e, 0xc9, + 0x50, 0x92, 0xa3, 0x44, 0xa1, 0xad, 0x0f, 0x3c, 0x26, 0x7a, 0xa9, 0x4d, 0x9c, 0x28, 0x30, 0xbc, + 0xc8, 0x8b, 0x0c, 0xa9, 0x60, 0xa7, 0x3f, 0xca, 0x95, 0x5c, 0xc8, 0xaf, 0x5c, 0xb9, 0xf5, 0x91, + 0x0a, 0x8a, 0xc6, 0x2c, 0xa0, 0x4e, 0x8f, 0x85, 0xc0, 0x87, 0xd3, 0xb0, 0x02, 0x10, 0xd4, 0x18, + 0xcc, 0xc5, 0xd3, 0x32, 0x96, 0x51, 0x3c, 0x0d, 0x05, 0x0b, 0x60, 0x0e, 0xf8, 0xf8, 0x29, 0x20, + 0x71, 0x7a, 0x10, 0xd0, 0x39, 0xee, 0x70, 0x19, 0x97, 0x0a, 0xe6, 0x1b, 0x2c, 0x14, 0x89, 0xe0, + 0x73, 0xd0, 0x4c, 0x4e, 0x09, 0xf0, 0x01, 0xf0, 0x69, 0x42, 0x70, 0x43, 0x83, 0xd8, 0x87, 0x45, + 0x39, 0xbd, 0xbf, 0xb4, 0x3d, 0x0b, 0x6e, 0xb7, 0xff, 0xd0, 0x50, 0xed, 0x74, 0xc0, 0x1c, 0xc1, + 0xa2, 0x10, 0xff, 0x80, 0x6a, 0x59, 0xa5, 0x5c, 0x2a, 0x68, 0x53, 0x7b, 0x57, 0xdb, 0xdd, 0x38, + 0xf8, 0x90, 0xa8, 0x8e, 0xcd, 0x06, 0x3e, 0xed, 0x59, 0x76, 0x9b, 0x0c, 0xf6, 0x49, 0xd7, 0xbe, + 0x06, 0x47, 0x5c, 0x80, 0xa0, 0x26, 0xbe, 0xbd, 0xdf, 0x29, 0x8d, 0xee, 0x77, 0xd0, 0x74, 0xcf, + 0x9a, 0xa8, 0x62, 0x1f, 0x35, 0x5c, 0xf0, 0x41, 0x40, 0x37, 0xce, 0x1c, 0x93, 0xe6, 0x9a, 0xb4, + 0x39, 0x5c, 0xcd, 0xa6, 0x33, 0x8b, 0x9a, 0xaf, 0x8d, 0xee, 0x77, 0x1a, 0x85, 0x2d, 0xab, 0x28, + 0xde, 0xfe, 0x6d, 0x0d, 0xbd, 0x7e, 0x19, 0xb9, 0x1d, 0x96, 0xf0, 0x54, 0x6e, 0x99, 0xa9, 0xeb, + 0x81, 0x78, 0x09, 0x79, 0xba, 0xa8, 0x92, 0xc4, 0xe0, 0xa8, 0xf4, 0x4c, 0xb2, 0xf2, 0xdc, 0x93, + 0x05, 0xf1, 0x5e, 0xc5, 0xe0, 0x98, 0x75, 0xe5, 0x57, 0xc9, 0x56, 0x96, 0x54, 0xc7, 0x3e, 0x5a, + 0x4f, 0x04, 0x15, 0x69, 0xd2, 0x2c, 0x4b, 0x9f, 0xce, 0x33, 0x7d, 0xa4, 0x96, 0xb9, 0xa9, 0x9c, + 0xd6, 0xf3, 0xb5, 0xa5, 0x3c, 0xda, 0x7f, 0x6a, 0xe8, 0xad, 0x05, 0xd4, 0x39, 0x4b, 0x04, 0xfe, + 0x6e, 0xae, 0xa2, 0x64, 0xb5, 0x8a, 0x66, 0xb4, 0xac, 0xe7, 0x96, 0x72, 0xad, 0x8d, 0x77, 0x66, + 0xaa, 0xe9, 0xa0, 0x2a, 0x13, 0x10, 0x64, 0xd3, 0x52, 0xde, 0xdd, 0x38, 0x38, 0x7e, 0x5e, 0x9a, + 0x66, 0x43, 0x59, 0x55, 0xcf, 0x32, 0x51, 0x2b, 0xd7, 0x6e, 0xff, 0xb3, 0x38, 0xbd, 0xac, 0xdc, + 0xf8, 0x1a, 0xd5, 0x03, 0x16, 0x9e, 0x0c, 0x28, 0xf3, 0xa9, 0xed, 0xc3, 0x93, 0x43, 0x93, 0xbd, + 0x6a, 0x92, 0xbf, 0x6a, 0x72, 0x16, 0x8a, 0x2e, 0xbf, 0x12, 0x9c, 0x85, 0x9e, 0xf9, 0x86, 0x72, + 0xae, 0x5f, 0xcc, 0xa8, 0x59, 0x05, 0x6d, 0xfc, 0x3d, 0xaa, 0x25, 0xe0, 0x83, 0x23, 0x22, 0xfe, + 0xff, 0x5e, 0xc7, 0x39, 0xb5, 0xc1, 0xbf, 0x52, 0xa8, 0x59, 0xcf, 0x6a, 0x39, 0x5e, 0x59, 0x13, + 0xc9, 0xf6, 0xbf, 0x15, 0xf4, 0xf6, 0xd2, 0xde, 0xe3, 0xaf, 0x10, 0x8e, 0x6c, 0xf9, 0xb3, 0x71, + 0xbf, 0xc8, 0xff, 0x14, 0x2c, 0x0a, 0x65, 0xba, 0x65, 0xb3, 0xa5, 0x82, 0xc7, 0xdd, 0xb9, 0x1b, + 0xd6, 0x02, 0x0a, 0xff, 0xaa, 0xa1, 0x86, 0x9b, 0xdb, 0x80, 0x7b, 0x19, 0xb9, 0xe3, 0xf6, 0x7d, + 0xf3, 0x22, 0xa6, 0x94, 0x74, 0x66, 0x95, 0x4f, 0x43, 0xc1, 0x87, 0xe6, 0xb6, 0x0a, 0xb0, 0x51, + 0x38, 0xb3, 0x8a, 0x41, 0xe0, 0x0b, 0x84, 0xdd, 0x89, 0x64, 0x72, 0xe2, 0xfb, 0xd1, 0x4f, 0xe0, + 0xca, 0x07, 0x54, 0x35, 0xdf, 0x51, 0x0a, 0xdb, 0x05, 0xdf, 0xf1, 0x25, 0x6b, 0x01, 0x88, 0x8f, + 0xd1, 0xa6, 0x93, 0x72, 0x0e, 0xa1, 0xf8, 0x12, 0xa8, 0x2f, 0x7a, 0xc3, 0x66, 0x45, 0x4a, 0xbd, + 0xa9, 0xa4, 0x36, 0x3f, 0x2f, 0x9c, 0x5a, 0x8f, 0x6e, 0x67, 0xbc, 0x0b, 0x09, 0xe3, 0xe0, 0x8e, + 0xf9, 0x6a, 0x91, 0xef, 0x14, 0x4e, 0xad, 0x47, 0xb7, 0xf1, 0x11, 0xaa, 0xc3, 0x4d, 0x0c, 0xce, + 0xb8, 0xc6, 0xeb, 0x92, 0x9e, 0x0c, 0xda, 0xe9, 0xcc, 0x99, 0x55, 0xb8, 0xd9, 0xf2, 0x11, 0x9e, + 0x2f, 0x22, 0xde, 0x42, 0xe5, 0x3e, 0x0c, 0x65, 0xcb, 0x5f, 0xb5, 0xb2, 0x4f, 0xfc, 0x19, 0xaa, + 0x0e, 0xa8, 0x9f, 0x82, 0x9a, 0xc6, 0xf7, 0x56, 0x9b, 0xc6, 0xaf, 0x59, 0x00, 0x56, 0x0e, 0x7e, + 0xba, 0x76, 0xa4, 0x99, 0x7b, 0xb7, 0x0f, 0x7a, 0xe9, 0xee, 0x41, 0x2f, 0xfd, 0xfe, 0xa0, 0x97, + 0x7e, 0x1e, 0xe9, 0xda, 0xed, 0x48, 0xd7, 0xee, 0x46, 0xba, 0xf6, 0xd7, 0x48, 0xd7, 0x7e, 0xf9, + 0x5b, 0x2f, 0x7d, 0xfb, 0x8a, 0x6a, 0xfa, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1b, 0xe9, 0x2f, + 0xf1, 0x61, 0x08, 0x00, 0x00, } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/generated.proto index 7782fd5253..d14d5de30e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,11 +21,12 @@ syntax = 'proto2'; package k8s.io.kubernetes.pkg.apis.policy.v1beta1; -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; @@ -35,15 +36,15 @@ option go_package = "v1beta1"; // created by POSTing to .../pods//evictions. message Eviction { // ObjectMeta describes the pod that is being evicted. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // DeleteOptions may be provided - optional k8s.io.kubernetes.pkg.api.v1.DeleteOptions deleteOptions = 2; + optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; } // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods message PodDisruptionBudget { - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the PodDisruptionBudget. optional PodDisruptionBudgetSpec spec = 2; @@ -54,7 +55,7 @@ message PodDisruptionBudget { // PodDisruptionBudgetList is a collection of PodDisruptionBudgets. message PodDisruptionBudgetList { - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated PodDisruptionBudget items = 2; } @@ -65,11 +66,11 @@ message PodDisruptionBudgetSpec { // "selector" will still be available after the eviction, i.e. even in the // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString minAvailable = 1; + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; // Label query over pods whose evictions are managed by the disruption // budget. - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 2; + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; } // PodDisruptionBudgetStatus represents information about the status of a @@ -91,7 +92,7 @@ message PodDisruptionBudgetStatus { // the list automatically by PodDisruptionBudget controller after some time. // If everything goes smooth this map should be empty for the most of the time. // Large number of entries in the map may indicate problems with pod deletions. - map disruptedPods = 2; + map disruptedPods = 2; // Number of pod disruptions that are currently allowed. optional int32 disruptionsAllowed = 3; diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/register.go index d293f80497..52bd65c8ba 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/register.go @@ -17,17 +17,21 @@ limitations under the License. package v1beta1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "policy" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} var ( SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) @@ -40,11 +44,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &PodDisruptionBudget{}, &PodDisruptionBudgetList{}, &Eviction{}, - &v1.ListOptions{}, - &v1.DeleteOptions{}, - &v1.ExportOptions{}, ) // Add the watch version that applies - versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/types.generated.go index 1918cba560..049bfe8e69 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/types.generated.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/types.generated.go @@ -25,10 +25,9 @@ import ( "errors" "fmt" codec1978 "github.com/ugorji/go/codec" - pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg3_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg4_types "k8s.io/kubernetes/pkg/types" - pkg1_intstr "k8s.io/kubernetes/pkg/util/intstr" + pkg2_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg3_types "k8s.io/apimachinery/pkg/types" + pkg1_intstr "k8s.io/apimachinery/pkg/util/intstr" "reflect" "runtime" time "time" @@ -64,12 +63,11 @@ func init() { panic(err) } if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_unversioned.LabelSelector - var v1 pkg3_v1.ObjectMeta - var v2 pkg4_types.UID - var v3 pkg1_intstr.IntOrString - var v4 time.Time - _, _, _, _, _ = v0, v1, v2, v3, v4 + var v0 pkg2_v1.LabelSelector + var v1 pkg3_types.UID + var v2 pkg1_intstr.IntOrString + var v3 time.Time + _, _, _, _ = v0, v1, v2, v3 } } @@ -186,25 +184,25 @@ func (x *PodDisruptionBudgetSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym11 := z.DecBinary() - _ = yym11 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct12 := r.ContainerType() - if yyct12 == codecSelferValueTypeMap1234 { - yyl12 := r.ReadMapStart() - if yyl12 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl12, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct12 == codecSelferValueTypeArray1234 { - yyl12 := r.ReadArrayStart() - if yyl12 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl12, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -216,12 +214,12 @@ func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys13Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys13Slc - var yyhl13 bool = l >= 0 - for yyj13 := 0; ; yyj13++ { - if yyhl13 { - if yyj13 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -230,23 +228,23 @@ func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Dec } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys13Slc = r.DecodeBytes(yys13Slc, true, true) - yys13 := string(yys13Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys13 { + switch yys3 { case "minAvailable": if r.TryDecodeAsNil() { x.MinAvailable = pkg1_intstr.IntOrString{} } else { - yyv14 := &x.MinAvailable - yym15 := z.DecBinary() - _ = yym15 + yyv4 := &x.MinAvailable + yym5 := z.DecBinary() + _ = yym5 if false { - } else if z.HasExtensions() && z.DecExt(yyv14) { - } else if !yym15 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv14) + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) } else { - z.DecFallback(yyv14, false) + z.DecFallback(yyv4, false) } } case "selector": @@ -256,10 +254,10 @@ func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Dec } } else { if x.Selector == nil { - x.Selector = new(pkg2_unversioned.LabelSelector) + x.Selector = new(pkg2_v1.LabelSelector) } - yym17 := z.DecBinary() - _ = yym17 + yym7 := z.DecBinary() + _ = yym7 if false { } else if z.HasExtensions() && z.DecExt(x.Selector) { } else { @@ -267,9 +265,9 @@ func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Dec } } default: - z.DecStructFieldNotFound(-1, yys13) - } // end switch yys13 - } // end for yyj13 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -277,16 +275,16 @@ func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromArray(l int, d *codec1978.D var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj18 int - var yyb18 bool - var yyhl18 bool = l >= 0 - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb18 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb18 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -294,24 +292,24 @@ func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.MinAvailable = pkg1_intstr.IntOrString{} } else { - yyv19 := &x.MinAvailable - yym20 := z.DecBinary() - _ = yym20 + yyv9 := &x.MinAvailable + yym10 := z.DecBinary() + _ = yym10 if false { - } else if z.HasExtensions() && z.DecExt(yyv19) { - } else if !yym20 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv19) + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) } else { - z.DecFallback(yyv19, false) + z.DecFallback(yyv9, false) } } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb18 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb18 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -322,10 +320,10 @@ func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromArray(l int, d *codec1978.D } } else { if x.Selector == nil { - x.Selector = new(pkg2_unversioned.LabelSelector) + x.Selector = new(pkg2_v1.LabelSelector) } - yym22 := z.DecBinary() - _ = yym22 + yym12 := z.DecBinary() + _ = yym12 if false { } else if z.HasExtensions() && z.DecExt(x.Selector) { } else { @@ -333,17 +331,17 @@ func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromArray(l int, d *codec1978.D } } for { - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb18 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb18 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj18-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -355,35 +353,35 @@ func (x *PodDisruptionBudgetStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym23 := z.EncBinary() - _ = yym23 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep24 := !z.EncBinary() - yy2arr24 := z.EncBasicHandle().StructToArray - var yyq24 [6]bool - _, _, _ = yysep24, yyq24, yy2arr24 - const yyr24 bool = false - yyq24[0] = x.ObservedGeneration != 0 - var yynn24 int - if yyr24 || yy2arr24 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != 0 + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(6) } else { - yynn24 = 5 - for _, b := range yyq24 { + yynn2 = 5 + for _, b := range yyq2 { if b { - yynn24++ + yynn2++ } } - r.EncodeMapStart(yynn24) - yynn24 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr24 || yy2arr24 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq24[0] { - yym26 := z.EncBinary() - _ = yym26 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeInt(int64(x.ObservedGeneration)) @@ -392,28 +390,28 @@ func (x *PodDisruptionBudgetStatus) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq24[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym27 := z.EncBinary() - _ = yym27 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeInt(int64(x.ObservedGeneration)) } } } - if yyr24 || yy2arr24 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.DisruptedPods == nil { r.EncodeNil() } else { - yym29 := z.EncBinary() - _ = yym29 + yym7 := z.EncBinary() + _ = yym7 if false { } else { - h.encMapstringunversioned_Time((map[string]pkg2_unversioned.Time)(x.DisruptedPods), e) + h.encMapstringv1_Time((map[string]pkg2_v1.Time)(x.DisruptedPods), e) } } } else { @@ -423,18 +421,18 @@ func (x *PodDisruptionBudgetStatus) CodecEncodeSelf(e *codec1978.Encoder) { if x.DisruptedPods == nil { r.EncodeNil() } else { - yym30 := z.EncBinary() - _ = yym30 + yym8 := z.EncBinary() + _ = yym8 if false { } else { - h.encMapstringunversioned_Time((map[string]pkg2_unversioned.Time)(x.DisruptedPods), e) + h.encMapstringv1_Time((map[string]pkg2_v1.Time)(x.DisruptedPods), e) } } } - if yyr24 || yy2arr24 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym32 := z.EncBinary() - _ = yym32 + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeInt(int64(x.PodDisruptionsAllowed)) @@ -443,17 +441,17 @@ func (x *PodDisruptionBudgetStatus) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("disruptionsAllowed")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym33 := z.EncBinary() - _ = yym33 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeInt(int64(x.PodDisruptionsAllowed)) } } - if yyr24 || yy2arr24 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym35 := z.EncBinary() - _ = yym35 + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeInt(int64(x.CurrentHealthy)) @@ -462,17 +460,17 @@ func (x *PodDisruptionBudgetStatus) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("currentHealthy")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym36 := z.EncBinary() - _ = yym36 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeInt(int64(x.CurrentHealthy)) } } - if yyr24 || yy2arr24 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym38 := z.EncBinary() - _ = yym38 + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeInt(int64(x.DesiredHealthy)) @@ -481,17 +479,17 @@ func (x *PodDisruptionBudgetStatus) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("desiredHealthy")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym39 := z.EncBinary() - _ = yym39 + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeInt(int64(x.DesiredHealthy)) } } - if yyr24 || yy2arr24 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym41 := z.EncBinary() - _ = yym41 + yym19 := z.EncBinary() + _ = yym19 if false { } else { r.EncodeInt(int64(x.ExpectedPods)) @@ -500,14 +498,14 @@ func (x *PodDisruptionBudgetStatus) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("expectedPods")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym42 := z.EncBinary() - _ = yym42 + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeInt(int64(x.ExpectedPods)) } } - if yyr24 || yy2arr24 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -520,25 +518,25 @@ func (x *PodDisruptionBudgetStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym43 := z.DecBinary() - _ = yym43 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct44 := r.ContainerType() - if yyct44 == codecSelferValueTypeMap1234 { - yyl44 := r.ReadMapStart() - if yyl44 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl44, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct44 == codecSelferValueTypeArray1234 { - yyl44 := r.ReadArrayStart() - if yyl44 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl44, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -550,12 +548,12 @@ func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromMap(l int, d *codec1978.D var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys45Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys45Slc - var yyhl45 bool = l >= 0 - for yyj45 := 0; ; yyj45++ { - if yyhl45 { - if yyj45 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -564,56 +562,86 @@ func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromMap(l int, d *codec1978.D } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys45Slc = r.DecodeBytes(yys45Slc, true, true) - yys45 := string(yys45Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys45 { + switch yys3 { case "observedGeneration": if r.TryDecodeAsNil() { x.ObservedGeneration = 0 } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) + yyv4 := &x.ObservedGeneration + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } } case "disruptedPods": if r.TryDecodeAsNil() { x.DisruptedPods = nil } else { - yyv47 := &x.DisruptedPods - yym48 := z.DecBinary() - _ = yym48 + yyv6 := &x.DisruptedPods + yym7 := z.DecBinary() + _ = yym7 if false { } else { - h.decMapstringunversioned_Time((*map[string]pkg2_unversioned.Time)(yyv47), d) + h.decMapstringv1_Time((*map[string]pkg2_v1.Time)(yyv6), d) } } case "disruptionsAllowed": if r.TryDecodeAsNil() { x.PodDisruptionsAllowed = 0 } else { - x.PodDisruptionsAllowed = int32(r.DecodeInt(32)) + yyv8 := &x.PodDisruptionsAllowed + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } } case "currentHealthy": if r.TryDecodeAsNil() { x.CurrentHealthy = 0 } else { - x.CurrentHealthy = int32(r.DecodeInt(32)) + yyv10 := &x.CurrentHealthy + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } } case "desiredHealthy": if r.TryDecodeAsNil() { x.DesiredHealthy = 0 } else { - x.DesiredHealthy = int32(r.DecodeInt(32)) + yyv12 := &x.DesiredHealthy + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(yyv12)) = int32(r.DecodeInt(32)) + } } case "expectedPods": if r.TryDecodeAsNil() { x.ExpectedPods = 0 } else { - x.ExpectedPods = int32(r.DecodeInt(32)) + yyv14 := &x.ExpectedPods + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } } default: - z.DecStructFieldNotFound(-1, yys45) - } // end switch yys45 - } // end for yyj45 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -621,16 +649,16 @@ func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromArray(l int, d *codec1978 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj53 int - var yyb53 bool - var yyhl53 bool = l >= 0 - yyj53++ - if yyhl53 { - yyb53 = yyj53 > l + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb53 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb53 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -638,15 +666,21 @@ func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.ObservedGeneration = 0 } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) + yyv17 := &x.ObservedGeneration + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int64)(yyv17)) = int64(r.DecodeInt(64)) + } } - yyj53++ - if yyhl53 { - yyb53 = yyj53 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb53 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb53 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -654,21 +688,21 @@ func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.DisruptedPods = nil } else { - yyv55 := &x.DisruptedPods - yym56 := z.DecBinary() - _ = yym56 + yyv19 := &x.DisruptedPods + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decMapstringunversioned_Time((*map[string]pkg2_unversioned.Time)(yyv55), d) + h.decMapstringv1_Time((*map[string]pkg2_v1.Time)(yyv19), d) } } - yyj53++ - if yyhl53 { - yyb53 = yyj53 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb53 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb53 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -676,15 +710,21 @@ func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.PodDisruptionsAllowed = 0 } else { - x.PodDisruptionsAllowed = int32(r.DecodeInt(32)) + yyv21 := &x.PodDisruptionsAllowed + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } } - yyj53++ - if yyhl53 { - yyb53 = yyj53 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb53 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb53 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -692,15 +732,21 @@ func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.CurrentHealthy = 0 } else { - x.CurrentHealthy = int32(r.DecodeInt(32)) + yyv23 := &x.CurrentHealthy + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } } - yyj53++ - if yyhl53 { - yyb53 = yyj53 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb53 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb53 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -708,15 +754,21 @@ func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.DesiredHealthy = 0 } else { - x.DesiredHealthy = int32(r.DecodeInt(32)) + yyv25 := &x.DesiredHealthy + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } } - yyj53++ - if yyhl53 { - yyb53 = yyj53 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb53 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb53 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -724,20 +776,26 @@ func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.ExpectedPods = 0 } else { - x.ExpectedPods = int32(r.DecodeInt(32)) + yyv27 := &x.ExpectedPods + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(yyv27)) = int32(r.DecodeInt(32)) + } } for { - yyj53++ - if yyhl53 { - yyb53 = yyj53 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb53 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb53 { + if yyb16 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj53-1, "") + z.DecStructFieldNotFound(yyj16-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -749,39 +807,39 @@ func (x *PodDisruptionBudget) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym61 := z.EncBinary() - _ = yym61 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep62 := !z.EncBinary() - yy2arr62 := z.EncBasicHandle().StructToArray - var yyq62 [5]bool - _, _, _ = yysep62, yyq62, yy2arr62 - const yyr62 bool = false - yyq62[0] = x.Kind != "" - yyq62[1] = x.APIVersion != "" - yyq62[2] = true - yyq62[3] = true - yyq62[4] = true - var yynn62 int - if yyr62 || yy2arr62 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn62 = 0 - for _, b := range yyq62 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn62++ + yynn2++ } } - r.EncodeMapStart(yynn62) - yynn62 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr62 || yy2arr62 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq62[0] { - yym64 := z.EncBinary() - _ = yym64 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -790,23 +848,23 @@ func (x *PodDisruptionBudget) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq62[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym65 := z.EncBinary() - _ = yym65 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr62 || yy2arr62 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq62[1] { - yym67 := z.EncBinary() - _ = yym67 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -815,70 +873,82 @@ func (x *PodDisruptionBudget) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq62[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym68 := z.EncBinary() - _ = yym68 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr62 || yy2arr62 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq62[2] { - yy70 := &x.ObjectMeta - yy70.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq62[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy71 := &x.ObjectMeta - yy71.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr62 || yy2arr62 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq62[3] { - yy73 := &x.Spec - yy73.CodecEncodeSelf(e) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq62[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy74 := &x.Spec - yy74.CodecEncodeSelf(e) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) } } - if yyr62 || yy2arr62 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq62[4] { - yy76 := &x.Status - yy76.CodecEncodeSelf(e) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { - if yyq62[4] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy77 := &x.Status - yy77.CodecEncodeSelf(e) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) } } - if yyr62 || yy2arr62 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -891,25 +961,25 @@ func (x *PodDisruptionBudget) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym78 := z.DecBinary() - _ = yym78 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct79 := r.ContainerType() - if yyct79 == codecSelferValueTypeMap1234 { - yyl79 := r.ReadMapStart() - if yyl79 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl79, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct79 == codecSelferValueTypeArray1234 { - yyl79 := r.ReadArrayStart() - if yyl79 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl79, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -921,12 +991,12 @@ func (x *PodDisruptionBudget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys80Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys80Slc - var yyhl80 bool = l >= 0 - for yyj80 := 0; ; yyj80++ { - if yyhl80 { - if yyj80 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -935,47 +1005,65 @@ func (x *PodDisruptionBudget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys80Slc = r.DecodeBytes(yys80Slc, true, true) - yys80 := string(yys80Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys80 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv83 := &x.ObjectMeta - yyv83.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "spec": if r.TryDecodeAsNil() { x.Spec = PodDisruptionBudgetSpec{} } else { - yyv84 := &x.Spec - yyv84.CodecDecodeSelf(d) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { x.Status = PodDisruptionBudgetStatus{} } else { - yyv85 := &x.Status - yyv85.CodecDecodeSelf(d) + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys80) - } // end switch yys80 - } // end for yyj80 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -983,16 +1071,16 @@ func (x *PodDisruptionBudget) codecDecodeSelfFromArray(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj86 int - var yyb86 bool - var yyhl86 bool = l >= 0 - yyj86++ - if yyhl86 { - yyb86 = yyj86 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb86 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb86 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1000,15 +1088,21 @@ func (x *PodDisruptionBudget) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj86++ - if yyhl86 { - yyb86 = yyj86 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb86 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb86 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1016,32 +1110,44 @@ func (x *PodDisruptionBudget) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj86++ - if yyhl86 { - yyb86 = yyj86 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb86 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb86 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv89 := &x.ObjectMeta - yyv89.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj86++ - if yyhl86 { - yyb86 = yyj86 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb86 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb86 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1049,16 +1155,16 @@ func (x *PodDisruptionBudget) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Spec = PodDisruptionBudgetSpec{} } else { - yyv90 := &x.Spec - yyv90.CodecDecodeSelf(d) + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) } - yyj86++ - if yyhl86 { - yyb86 = yyj86 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb86 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb86 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1066,21 +1172,21 @@ func (x *PodDisruptionBudget) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Status = PodDisruptionBudgetStatus{} } else { - yyv91 := &x.Status - yyv91.CodecDecodeSelf(d) + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) } for { - yyj86++ - if yyhl86 { - yyb86 = yyj86 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb86 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb86 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj86-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -1092,37 +1198,37 @@ func (x *PodDisruptionBudgetList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym92 := z.EncBinary() - _ = yym92 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep93 := !z.EncBinary() - yy2arr93 := z.EncBasicHandle().StructToArray - var yyq93 [4]bool - _, _, _ = yysep93, yyq93, yy2arr93 - const yyr93 bool = false - yyq93[0] = x.Kind != "" - yyq93[1] = x.APIVersion != "" - yyq93[2] = true - var yynn93 int - if yyr93 || yy2arr93 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn93 = 1 - for _, b := range yyq93 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn93++ + yynn2++ } } - r.EncodeMapStart(yynn93) - yynn93 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr93 || yy2arr93 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq93[0] { - yym95 := z.EncBinary() - _ = yym95 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -1131,23 +1237,23 @@ func (x *PodDisruptionBudgetList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq93[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym96 := z.EncBinary() - _ = yym96 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr93 || yy2arr93 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq93[1] { - yym98 := z.EncBinary() - _ = yym98 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -1156,54 +1262,54 @@ func (x *PodDisruptionBudgetList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq93[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym99 := z.EncBinary() - _ = yym99 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr93 || yy2arr93 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq93[2] { - yy101 := &x.ListMeta - yym102 := z.EncBinary() - _ = yym102 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy101) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy101) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq93[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy103 := &x.ListMeta - yym104 := z.EncBinary() - _ = yym104 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy103) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy103) + z.EncFallback(yy12) } } } - if yyr93 || yy2arr93 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym106 := z.EncBinary() - _ = yym106 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e) @@ -1216,15 +1322,15 @@ func (x *PodDisruptionBudgetList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym107 := z.EncBinary() - _ = yym107 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e) } } } - if yyr93 || yy2arr93 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -1237,25 +1343,25 @@ func (x *PodDisruptionBudgetList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym108 := z.DecBinary() - _ = yym108 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct109 := r.ContainerType() - if yyct109 == codecSelferValueTypeMap1234 { - yyl109 := r.ReadMapStart() - if yyl109 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl109, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct109 == codecSelferValueTypeArray1234 { - yyl109 := r.ReadArrayStart() - if yyl109 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl109, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -1267,12 +1373,12 @@ func (x *PodDisruptionBudgetList) codecDecodeSelfFromMap(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys110Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys110Slc - var yyhl110 bool = l >= 0 - for yyj110 := 0; ; yyj110++ { - if yyhl110 { - if yyj110 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -1281,51 +1387,63 @@ func (x *PodDisruptionBudgetList) codecDecodeSelfFromMap(l int, d *codec1978.Dec } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys110Slc = r.DecodeBytes(yys110Slc, true, true) - yys110 := string(yys110Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys110 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv113 := &x.ListMeta - yym114 := z.DecBinary() - _ = yym114 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv113) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv113, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv115 := &x.Items - yym116 := z.DecBinary() - _ = yym116 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv115), d) + h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys110) - } // end switch yys110 - } // end for yyj110 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -1333,16 +1451,16 @@ func (x *PodDisruptionBudgetList) codecDecodeSelfFromArray(l int, d *codec1978.D var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj117 int - var yyb117 bool - var yyhl117 bool = l >= 0 - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb117 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb117 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1350,15 +1468,21 @@ func (x *PodDisruptionBudgetList) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb117 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb117 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1366,38 +1490,44 @@ func (x *PodDisruptionBudgetList) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb117 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb117 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg2_v1.ListMeta{} } else { - yyv120 := &x.ListMeta - yym121 := z.DecBinary() - _ = yym121 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv120) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv120, false) + z.DecFallback(yyv17, false) } } - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb117 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb117 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1405,26 +1535,26 @@ func (x *PodDisruptionBudgetList) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Items = nil } else { - yyv122 := &x.Items - yym123 := z.DecBinary() - _ = yym123 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv122), d) + h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv19), d) } } for { - yyj117++ - if yyhl117 { - yyb117 = yyj117 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb117 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb117 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj117-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -1436,38 +1566,38 @@ func (x *Eviction) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym124 := z.EncBinary() - _ = yym124 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep125 := !z.EncBinary() - yy2arr125 := z.EncBasicHandle().StructToArray - var yyq125 [4]bool - _, _, _ = yysep125, yyq125, yy2arr125 - const yyr125 bool = false - yyq125[0] = x.Kind != "" - yyq125[1] = x.APIVersion != "" - yyq125[2] = true - yyq125[3] = x.DeleteOptions != nil - var yynn125 int - if yyr125 || yy2arr125 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = x.DeleteOptions != nil + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn125 = 0 - for _, b := range yyq125 { + yynn2 = 0 + for _, b := range yyq2 { if b { - yynn125++ + yynn2++ } } - r.EncodeMapStart(yynn125) - yynn125 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr125 || yy2arr125 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq125[0] { - yym127 := z.EncBinary() - _ = yym127 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -1476,23 +1606,23 @@ func (x *Eviction) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq125[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym128 := z.EncBinary() - _ = yym128 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr125 || yy2arr125 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq125[1] { - yym130 := z.EncBinary() - _ = yym130 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -1501,59 +1631,83 @@ func (x *Eviction) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq125[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym131 := z.EncBinary() - _ = yym131 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr125 || yy2arr125 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq125[2] { - yy133 := &x.ObjectMeta - yy133.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq125[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy134 := &x.ObjectMeta - yy134.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr125 || yy2arr125 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq125[3] { + if yyq2[3] { if x.DeleteOptions == nil { r.EncodeNil() } else { - x.DeleteOptions.CodecEncodeSelf(e) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(x.DeleteOptions) { + } else { + z.EncFallback(x.DeleteOptions) + } } } else { r.EncodeNil() } } else { - if yyq125[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("deleteOptions")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.DeleteOptions == nil { r.EncodeNil() } else { - x.DeleteOptions.CodecEncodeSelf(e) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(x.DeleteOptions) { + } else { + z.EncFallback(x.DeleteOptions) + } } } } - if yyr125 || yy2arr125 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -1566,25 +1720,25 @@ func (x *Eviction) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym136 := z.DecBinary() - _ = yym136 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct137 := r.ContainerType() - if yyct137 == codecSelferValueTypeMap1234 { - yyl137 := r.ReadMapStart() - if yyl137 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl137, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct137 == codecSelferValueTypeArray1234 { - yyl137 := r.ReadArrayStart() - if yyl137 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl137, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -1596,12 +1750,12 @@ func (x *Eviction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys138Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys138Slc - var yyhl138 bool = l >= 0 - for yyj138 := 0; ; yyj138++ { - if yyhl138 { - if yyj138 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -1610,28 +1764,46 @@ func (x *Eviction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys138Slc = r.DecodeBytes(yys138Slc, true, true) - yys138 := string(yys138Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys138 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv141 := &x.ObjectMeta - yyv141.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "deleteOptions": if r.TryDecodeAsNil() { @@ -1640,14 +1812,20 @@ func (x *Eviction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } else { if x.DeleteOptions == nil { - x.DeleteOptions = new(pkg3_v1.DeleteOptions) + x.DeleteOptions = new(pkg2_v1.DeleteOptions) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.DecExt(x.DeleteOptions) { + } else { + z.DecFallback(x.DeleteOptions, false) } - x.DeleteOptions.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys138) - } // end switch yys138 - } // end for yyj138 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -1655,16 +1833,16 @@ func (x *Eviction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj143 int - var yyb143 bool - var yyhl143 bool = l >= 0 - yyj143++ - if yyhl143 { - yyb143 = yyj143 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb143 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb143 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1672,15 +1850,21 @@ func (x *Eviction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj143++ - if yyhl143 { - yyb143 = yyj143 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb143 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb143 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1688,32 +1872,44 @@ func (x *Eviction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj143++ - if yyhl143 { - yyb143 = yyj143 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb143 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb143 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} + x.ObjectMeta = pkg2_v1.ObjectMeta{} } else { - yyv146 := &x.ObjectMeta - yyv146.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj143++ - if yyhl143 { - yyb143 = yyj143 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb143 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb143 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1724,145 +1920,163 @@ func (x *Eviction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } } else { if x.DeleteOptions == nil { - x.DeleteOptions = new(pkg3_v1.DeleteOptions) + x.DeleteOptions = new(pkg2_v1.DeleteOptions) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(x.DeleteOptions) { + } else { + z.DecFallback(x.DeleteOptions, false) } - x.DeleteOptions.CodecDecodeSelf(d) } for { - yyj143++ - if yyhl143 { - yyb143 = yyj143 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb143 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb143 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj143-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) encMapstringunversioned_Time(v map[string]pkg2_unversioned.Time, e *codec1978.Encoder) { +func (x codecSelfer1234) encMapstringv1_Time(v map[string]pkg2_v1.Time, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeMapStart(len(v)) - for yyk148, yyv148 := range v { + for yyk1, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym149 := z.EncBinary() - _ = yym149 + yym2 := z.EncBinary() + _ = yym2 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk148)) + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) } z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy150 := &yyv148 - yym151 := z.EncBinary() - _ = yym151 + yy3 := &yyv1 + yym4 := z.EncBinary() + _ = yym4 if false { - } else if z.HasExtensions() && z.EncExt(yy150) { - } else if yym151 { - z.EncBinaryMarshal(yy150) - } else if !yym151 && z.IsJSONHandle() { - z.EncJSONMarshal(yy150) + } else if z.HasExtensions() && z.EncExt(yy3) { + } else if yym4 { + z.EncBinaryMarshal(yy3) + } else if !yym4 && z.IsJSONHandle() { + z.EncJSONMarshal(yy3) } else { - z.EncFallback(yy150) + z.EncFallback(yy3) } } z.EncSendContainerState(codecSelfer_containerMapEnd1234) } -func (x codecSelfer1234) decMapstringunversioned_Time(v *map[string]pkg2_unversioned.Time, d *codec1978.Decoder) { +func (x codecSelfer1234) decMapstringv1_Time(v *map[string]pkg2_v1.Time, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv152 := *v - yyl152 := r.ReadMapStart() - yybh152 := z.DecBasicHandle() - if yyv152 == nil { - yyrl152, _ := z.DecInferLen(yyl152, yybh152.MaxInitLen, 40) - yyv152 = make(map[string]pkg2_unversioned.Time, yyrl152) - *v = yyv152 - } - var yymk152 string - var yymv152 pkg2_unversioned.Time - var yymg152 bool - if yybh152.MapValueReset { - yymg152 = true - } - if yyl152 > 0 { - for yyj152 := 0; yyj152 < yyl152; yyj152++ { + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string]pkg2_v1.Time, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 pkg2_v1.Time + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { z.DecSendContainerState(codecSelfer_containerMapKey1234) if r.TryDecodeAsNil() { - yymk152 = "" + yymk1 = "" } else { - yymk152 = string(r.DecodeString()) + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } } - if yymg152 { - yymv152 = yyv152[yymk152] + if yymg1 { + yymv1 = yyv1[yymk1] } else { - yymv152 = pkg2_unversioned.Time{} + yymv1 = pkg2_v1.Time{} } z.DecSendContainerState(codecSelfer_containerMapValue1234) if r.TryDecodeAsNil() { - yymv152 = pkg2_unversioned.Time{} + yymv1 = pkg2_v1.Time{} } else { - yyv154 := &yymv152 - yym155 := z.DecBinary() - _ = yym155 + yyv4 := &yymv1 + yym5 := z.DecBinary() + _ = yym5 if false { - } else if z.HasExtensions() && z.DecExt(yyv154) { - } else if yym155 { - z.DecBinaryUnmarshal(yyv154) - } else if !yym155 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv154) + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if yym5 { + z.DecBinaryUnmarshal(yyv4) + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) } else { - z.DecFallback(yyv154, false) + z.DecFallback(yyv4, false) } } - if yyv152 != nil { - yyv152[yymk152] = yymv152 + if yyv1 != nil { + yyv1[yymk1] = yymv1 } } - } else if yyl152 < 0 { - for yyj152 := 0; !r.CheckBreak(); yyj152++ { + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { z.DecSendContainerState(codecSelfer_containerMapKey1234) if r.TryDecodeAsNil() { - yymk152 = "" + yymk1 = "" } else { - yymk152 = string(r.DecodeString()) + yyv6 := &yymk1 + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } - if yymg152 { - yymv152 = yyv152[yymk152] + if yymg1 { + yymv1 = yyv1[yymk1] } else { - yymv152 = pkg2_unversioned.Time{} + yymv1 = pkg2_v1.Time{} } z.DecSendContainerState(codecSelfer_containerMapValue1234) if r.TryDecodeAsNil() { - yymv152 = pkg2_unversioned.Time{} + yymv1 = pkg2_v1.Time{} } else { - yyv157 := &yymv152 - yym158 := z.DecBinary() - _ = yym158 + yyv8 := &yymv1 + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv157) { - } else if yym158 { - z.DecBinaryUnmarshal(yyv157) - } else if !yym158 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv157) + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) } else { - z.DecFallback(yyv157, false) + z.DecFallback(yyv8, false) } } - if yyv152 != nil { - yyv152[yymk152] = yymv152 + if yyv1 != nil { + yyv1[yymk1] = yymv1 } } } // else len==0: TODO: Should we clear map entries? @@ -1874,10 +2088,10 @@ func (x codecSelfer1234) encSlicePodDisruptionBudget(v []PodDisruptionBudget, e z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv159 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy160 := &yyv159 - yy160.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -1887,83 +2101,86 @@ func (x codecSelfer1234) decSlicePodDisruptionBudget(v *[]PodDisruptionBudget, d z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv161 := *v - yyh161, yyl161 := z.DecSliceHelperStart() - var yyc161 bool - if yyl161 == 0 { - if yyv161 == nil { - yyv161 = []PodDisruptionBudget{} - yyc161 = true - } else if len(yyv161) != 0 { - yyv161 = yyv161[:0] - yyc161 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodDisruptionBudget{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl161 > 0 { - var yyrr161, yyrl161 int - var yyrt161 bool - if yyl161 > cap(yyv161) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg161 := len(yyv161) > 0 - yyv2161 := yyv161 - yyrl161, yyrt161 = z.DecInferLen(yyl161, z.DecBasicHandle().MaxInitLen, 328) - if yyrt161 { - if yyrl161 <= cap(yyv161) { - yyv161 = yyv161[:yyrl161] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv161 = make([]PodDisruptionBudget, yyrl161) + yyv1 = make([]PodDisruptionBudget, yyrl1) } } else { - yyv161 = make([]PodDisruptionBudget, yyrl161) + yyv1 = make([]PodDisruptionBudget, yyrl1) } - yyc161 = true - yyrr161 = len(yyv161) - if yyrg161 { - copy(yyv161, yyv2161) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl161 != len(yyv161) { - yyv161 = yyv161[:yyl161] - yyc161 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj161 := 0 - for ; yyj161 < yyrr161; yyj161++ { - yyh161.ElemContainerState(yyj161) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv161[yyj161] = PodDisruptionBudget{} + yyv1[yyj1] = PodDisruptionBudget{} } else { - yyv162 := &yyv161[yyj161] - yyv162.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt161 { - for ; yyj161 < yyl161; yyj161++ { - yyv161 = append(yyv161, PodDisruptionBudget{}) - yyh161.ElemContainerState(yyj161) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodDisruptionBudget{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv161[yyj161] = PodDisruptionBudget{} + yyv1[yyj1] = PodDisruptionBudget{} } else { - yyv163 := &yyv161[yyj161] - yyv163.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj161 := 0 - for ; !r.CheckBreak(); yyj161++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj161 >= len(yyv161) { - yyv161 = append(yyv161, PodDisruptionBudget{}) // var yyz161 PodDisruptionBudget - yyc161 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodDisruptionBudget{}) // var yyz1 PodDisruptionBudget + yyc1 = true } - yyh161.ElemContainerState(yyj161) - if yyj161 < len(yyv161) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv161[yyj161] = PodDisruptionBudget{} + yyv1[yyj1] = PodDisruptionBudget{} } else { - yyv164 := &yyv161[yyj161] - yyv164.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -1971,16 +2188,16 @@ func (x codecSelfer1234) decSlicePodDisruptionBudget(v *[]PodDisruptionBudget, d } } - if yyj161 < len(yyv161) { - yyv161 = yyv161[:yyj161] - yyc161 = true - } else if yyj161 == 0 && yyv161 == nil { - yyv161 = []PodDisruptionBudget{} - yyc161 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodDisruptionBudget{} + yyc1 = true } } - yyh161.End() - if yyc161 { - *v = yyv161 + yyh1.End() + if yyc1 { + *v = yyv1 } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/types.go index 0e7cff8572..ca5ea37306 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/types.go @@ -17,9 +17,8 @@ limitations under the License. package v1beta1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/util/intstr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) // PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. @@ -32,7 +31,7 @@ type PodDisruptionBudgetSpec struct { // Label query over pods whose evictions are managed by the disruption // budget. - Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` } // PodDisruptionBudgetStatus represents information about the status of a @@ -54,7 +53,7 @@ type PodDisruptionBudgetStatus struct { // the list automatically by PodDisruptionBudget controller after some time. // If everything goes smooth this map should be empty for the most of the time. // Large number of entries in the map may indicate problems with pod deletions. - DisruptedPods map[string]unversioned.Time `json:"disruptedPods" protobuf:"bytes,2,rep,name=disruptedPods"` + DisruptedPods map[string]metav1.Time `json:"disruptedPods" protobuf:"bytes,2,rep,name=disruptedPods"` // Number of pod disruptions that are currently allowed. PodDisruptionsAllowed int32 `json:"disruptionsAllowed" protobuf:"varint,3,opt,name=disruptionsAllowed"` @@ -73,8 +72,8 @@ type PodDisruptionBudgetStatus struct { // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods type PodDisruptionBudget struct { - unversioned.TypeMeta `json:",inline"` - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the PodDisruptionBudget. Spec PodDisruptionBudgetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -84,20 +83,23 @@ type PodDisruptionBudget struct { // PodDisruptionBudgetList is a collection of PodDisruptionBudgets. type PodDisruptionBudgetList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"` + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"` } +// +genclient=true +// +noMethods=true + // Eviction evicts a pod from its node subject to certain policies and safety constraints. // This is a subresource of Pod. A request to cause such an eviction is // created by POSTing to .../pods//evictions. type Eviction struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // ObjectMeta describes the pod that is being evicted. - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // DeleteOptions may be provided - DeleteOptions *v1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"` + DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"` } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/zz_generated.conversion.go index e3015bb0a1..258a58f2a2 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,12 +21,10 @@ limitations under the License. package v1beta1 import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" policy "k8s.io/kubernetes/pkg/apis/policy" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" unsafe "unsafe" ) @@ -52,11 +50,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { } func autoConvert_v1beta1_Eviction_To_policy_Eviction(in *Eviction, out *policy.Eviction, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - out.DeleteOptions = (*api.DeleteOptions)(unsafe.Pointer(in.DeleteOptions)) + out.ObjectMeta = in.ObjectMeta + out.DeleteOptions = (*v1.DeleteOptions)(unsafe.Pointer(in.DeleteOptions)) return nil } @@ -65,10 +60,7 @@ func Convert_v1beta1_Eviction_To_policy_Eviction(in *Eviction, out *policy.Evict } func autoConvert_policy_Eviction_To_v1beta1_Eviction(in *policy.Eviction, out *Eviction, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta out.DeleteOptions = (*v1.DeleteOptions)(unsafe.Pointer(in.DeleteOptions)) return nil } @@ -78,10 +70,7 @@ func Convert_policy_Eviction_To_v1beta1_Eviction(in *policy.Eviction, out *Evict } func autoConvert_v1beta1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *PodDisruptionBudget, out *policy.PodDisruptionBudget, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -96,10 +85,7 @@ func Convert_v1beta1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *PodDi } func autoConvert_policy_PodDisruptionBudget_To_v1beta1_PodDisruptionBudget(in *policy.PodDisruptionBudget, out *PodDisruptionBudget, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta if err := Convert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -125,7 +111,11 @@ func Convert_v1beta1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(i func autoConvert_policy_PodDisruptionBudgetList_To_v1beta1_PodDisruptionBudgetList(in *policy.PodDisruptionBudgetList, out *PodDisruptionBudgetList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]PodDisruptionBudget)(unsafe.Pointer(&in.Items)) + if in.Items == nil { + out.Items = make([]PodDisruptionBudget, 0) + } else { + out.Items = *(*[]PodDisruptionBudget)(unsafe.Pointer(&in.Items)) + } return nil } @@ -135,7 +125,7 @@ func Convert_policy_PodDisruptionBudgetList_To_v1beta1_PodDisruptionBudgetList(i func autoConvert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in *PodDisruptionBudgetSpec, out *policy.PodDisruptionBudgetSpec, s conversion.Scope) error { out.MinAvailable = in.MinAvailable - out.Selector = (*unversioned.LabelSelector)(unsafe.Pointer(in.Selector)) + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) return nil } @@ -145,7 +135,7 @@ func Convert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(i func autoConvert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(in *policy.PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, s conversion.Scope) error { out.MinAvailable = in.MinAvailable - out.Selector = (*unversioned.LabelSelector)(unsafe.Pointer(in.Selector)) + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) return nil } @@ -155,7 +145,7 @@ func Convert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(i func autoConvert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration - out.DisruptedPods = *(*map[string]unversioned.Time)(unsafe.Pointer(&in.DisruptedPods)) + out.DisruptedPods = *(*map[string]v1.Time)(unsafe.Pointer(&in.DisruptedPods)) out.PodDisruptionsAllowed = in.PodDisruptionsAllowed out.CurrentHealthy = in.CurrentHealthy out.DesiredHealthy = in.DesiredHealthy @@ -169,7 +159,7 @@ func Convert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStat func autoConvert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration - out.DisruptedPods = *(*map[string]unversioned.Time)(unsafe.Pointer(&in.DisruptedPods)) + out.DisruptedPods = *(*map[string]v1.Time)(unsafe.Pointer(&in.DisruptedPods)) out.PodDisruptionsAllowed = in.PodDisruptionsAllowed out.CurrentHealthy = in.CurrentHealthy out.DesiredHealthy = in.DesiredHealthy diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/zz_generated.deepcopy.go index c60bac0bd8..395e6689e3 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,10 +21,9 @@ limitations under the License. package v1beta1 import ( - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" reflect "reflect" ) @@ -48,18 +47,19 @@ func DeepCopy_v1beta1_Eviction(in interface{}, out interface{}, c *conversion.Cl { in := in.(*Eviction) out := out.(*Eviction) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if in.DeleteOptions != nil { in, out := &in.DeleteOptions, &out.DeleteOptions - *out = new(v1.DeleteOptions) - if err := v1.DeepCopy_v1_DeleteOptions(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.DeleteOptions) } - } else { - out.DeleteOptions = nil } return nil } @@ -69,9 +69,11 @@ func DeepCopy_v1beta1_PodDisruptionBudget(in interface{}, out interface{}, c *co { in := in.(*PodDisruptionBudget) out := out.(*PodDisruptionBudget) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_v1beta1_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -87,8 +89,7 @@ func DeepCopy_v1beta1_PodDisruptionBudgetList(in interface{}, out interface{}, c { in := in.(*PodDisruptionBudgetList) out := out.(*PodDisruptionBudgetList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodDisruptionBudget, len(*in)) @@ -97,8 +98,6 @@ func DeepCopy_v1beta1_PodDisruptionBudgetList(in interface{}, out interface{}, c return err } } - } else { - out.Items = nil } return nil } @@ -108,15 +107,14 @@ func DeepCopy_v1beta1_PodDisruptionBudgetSpec(in interface{}, out interface{}, c { in := in.(*PodDisruptionBudgetSpec) out := out.(*PodDisruptionBudgetSpec) - out.MinAvailable = in.MinAvailable + *out = *in if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.LabelSelector) } - } else { - out.Selector = nil } return nil } @@ -126,20 +124,14 @@ func DeepCopy_v1beta1_PodDisruptionBudgetStatus(in interface{}, out interface{}, { in := in.(*PodDisruptionBudgetStatus) out := out.(*PodDisruptionBudgetStatus) - out.ObservedGeneration = in.ObservedGeneration + *out = *in if in.DisruptedPods != nil { in, out := &in.DisruptedPods, &out.DisruptedPods - *out = make(map[string]unversioned.Time) + *out = make(map[string]v1.Time) for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.DisruptedPods = nil } - out.PodDisruptionsAllowed = in.PodDisruptionsAllowed - out.CurrentHealthy = in.CurrentHealthy - out.DesiredHealthy = in.DesiredHealthy - out.ExpectedPods = in.ExpectedPods return nil } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go index 3b840ec04d..298ec5fdab 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,10 +21,9 @@ limitations under the License. package policy import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" reflect "reflect" ) @@ -48,18 +47,19 @@ func DeepCopy_policy_Eviction(in interface{}, out interface{}, c *conversion.Clo { in := in.(*Eviction) out := out.(*Eviction) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if in.DeleteOptions != nil { in, out := &in.DeleteOptions, &out.DeleteOptions - *out = new(api.DeleteOptions) - if err := api.DeepCopy_api_DeleteOptions(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.DeleteOptions) } - } else { - out.DeleteOptions = nil } return nil } @@ -69,9 +69,11 @@ func DeepCopy_policy_PodDisruptionBudget(in interface{}, out interface{}, c *con { in := in.(*PodDisruptionBudget) out := out.(*PodDisruptionBudget) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_policy_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, c); err != nil { return err @@ -87,8 +89,7 @@ func DeepCopy_policy_PodDisruptionBudgetList(in interface{}, out interface{}, c { in := in.(*PodDisruptionBudgetList) out := out.(*PodDisruptionBudgetList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodDisruptionBudget, len(*in)) @@ -97,8 +98,6 @@ func DeepCopy_policy_PodDisruptionBudgetList(in interface{}, out interface{}, c return err } } - } else { - out.Items = nil } return nil } @@ -108,15 +107,14 @@ func DeepCopy_policy_PodDisruptionBudgetSpec(in interface{}, out interface{}, c { in := in.(*PodDisruptionBudgetSpec) out := out.(*PodDisruptionBudgetSpec) - out.MinAvailable = in.MinAvailable + *out = *in if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + if newVal, err := c.DeepCopy(*in); err != nil { return err + } else { + *out = newVal.(*v1.LabelSelector) } - } else { - out.Selector = nil } return nil } @@ -126,20 +124,14 @@ func DeepCopy_policy_PodDisruptionBudgetStatus(in interface{}, out interface{}, { in := in.(*PodDisruptionBudgetStatus) out := out.(*PodDisruptionBudgetStatus) - out.ObservedGeneration = in.ObservedGeneration + *out = *in if in.DisruptedPods != nil { in, out := &in.DisruptedPods, &out.DisruptedPods - *out = make(map[string]unversioned.Time) + *out = make(map[string]v1.Time) for key, val := range *in { (*out)[key] = val.DeepCopy() } - } else { - out.DisruptedPods = nil } - out.PodDisruptionsAllowed = in.PodDisruptionsAllowed - out.CurrentHealthy = in.CurrentHealthy - out.DesiredHealthy = in.DesiredHealthy - out.ExpectedPods = in.ExpectedPods return nil } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/rbac/BUILD index 25f9b664af..2373bf70e5 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -21,10 +18,29 @@ go_library( ], tags = ["automanaged"], deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/watch/versioned:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/util/sets", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/rbac/install:all-srcs", + "//pkg/apis/rbac/v1alpha1:all-srcs", + "//pkg/apis/rbac/v1beta1:all-srcs", + "//pkg/apis/rbac/validation:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/rbac/OWNERS new file mode 100755 index 0000000000..a35477b920 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/OWNERS @@ -0,0 +1,17 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- deads2k +- sttts +- ncdc +- timothysc +- dims +- krousey +- mml +- mbohlool +- david-mcmahon +- ericchiang +- lixiaobing10051267 +- jianhuiz +- liggitt diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go index aefe6e09b1..af9ffe2067 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go @@ -15,7 +15,5 @@ limitations under the License. */ // +k8s:deepcopy-gen=package,register -// +k8s:openapi-gen=true - // +groupName=rbac.authorization.k8s.io package rbac diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/helpers.go index 93ed6f9954..8b4677bb86 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/helpers.go @@ -20,12 +20,13 @@ import ( "fmt" "strings" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" ) -func RoleRefGroupKind(roleRef RoleRef) unversioned.GroupKind { - return unversioned.GroupKind{Group: roleRef.APIGroup, Kind: roleRef.Kind} +func RoleRefGroupKind(roleRef RoleRef) schema.GroupKind { + return schema.GroupKind{Group: roleRef.APIGroup, Kind: roleRef.Kind} } func VerbMatches(rule PolicyRule, requestedVerb string) bool { @@ -97,6 +98,32 @@ func NonResourceURLMatches(rule PolicyRule, requestedURL string) bool { return false } +// subjectsStrings returns users, groups, serviceaccounts, unknown for display purposes. +func SubjectsStrings(subjects []Subject) ([]string, []string, []string, []string) { + users := []string{} + groups := []string{} + sas := []string{} + others := []string{} + + for _, subject := range subjects { + switch subject.Kind { + case ServiceAccountKind: + sas = append(sas, fmt.Sprintf("%s/%s", subject.Namespace, subject.Name)) + + case UserKind: + users = append(users, subject.Name) + + case GroupKind: + groups = append(groups, subject.Name) + + default: + others = append(others, fmt.Sprintf("%s/%s/%s", subject.Kind, subject.Namespace, subject.Name)) + } + } + + return users, groups, sas, others +} + // +k8s:deepcopy-gen=false // PolicyRuleBuilder let's us attach methods. A no-no for API types. // We use it to construct rules in code. It's more compact than trying to write them @@ -107,27 +134,27 @@ type PolicyRuleBuilder struct { func NewRule(verbs ...string) *PolicyRuleBuilder { return &PolicyRuleBuilder{ - PolicyRule: PolicyRule{Verbs: verbs}, + PolicyRule: PolicyRule{Verbs: sets.NewString(verbs...).List()}, } } func (r *PolicyRuleBuilder) Groups(groups ...string) *PolicyRuleBuilder { - r.PolicyRule.APIGroups = append(r.PolicyRule.APIGroups, groups...) + r.PolicyRule.APIGroups = combine(r.PolicyRule.APIGroups, groups) return r } func (r *PolicyRuleBuilder) Resources(resources ...string) *PolicyRuleBuilder { - r.PolicyRule.Resources = append(r.PolicyRule.Resources, resources...) + r.PolicyRule.Resources = combine(r.PolicyRule.Resources, resources) return r } func (r *PolicyRuleBuilder) Names(names ...string) *PolicyRuleBuilder { - r.PolicyRule.ResourceNames = append(r.PolicyRule.ResourceNames, names...) + r.PolicyRule.ResourceNames = combine(r.PolicyRule.ResourceNames, names) return r } func (r *PolicyRuleBuilder) URLs(urls ...string) *PolicyRuleBuilder { - r.PolicyRule.NonResourceURLs = append(r.PolicyRule.NonResourceURLs, urls...) + r.PolicyRule.NonResourceURLs = combine(r.PolicyRule.NonResourceURLs, urls) return r } @@ -139,6 +166,12 @@ func (r *PolicyRuleBuilder) RuleOrDie() PolicyRule { return ret } +func combine(s1, s2 []string) []string { + s := sets.NewString(s1...) + s.Insert(s2...) + return s.List() +} + func (r *PolicyRuleBuilder) Rule() (PolicyRule, error) { if len(r.PolicyRule.Verbs) == 0 { return PolicyRule{}, fmt.Errorf("verbs are required: %#v", r.PolicyRule) @@ -175,7 +208,7 @@ type ClusterRoleBindingBuilder struct { func NewClusterBinding(clusterRoleName string) *ClusterRoleBindingBuilder { return &ClusterRoleBindingBuilder{ ClusterRoleBinding: ClusterRoleBinding{ - ObjectMeta: api.ObjectMeta{Name: clusterRoleName}, + ObjectMeta: metav1.ObjectMeta{Name: clusterRoleName}, RoleRef: RoleRef{ APIGroup: GroupName, Kind: "ClusterRole", @@ -187,14 +220,14 @@ func NewClusterBinding(clusterRoleName string) *ClusterRoleBindingBuilder { func (r *ClusterRoleBindingBuilder) Groups(groups ...string) *ClusterRoleBindingBuilder { for _, group := range groups { - r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: GroupKind, Name: group}) + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: GroupKind, APIGroup: GroupName, Name: group}) } return r } func (r *ClusterRoleBindingBuilder) Users(users ...string) *ClusterRoleBindingBuilder { for _, user := range users { - r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: UserKind, Name: user}) + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: UserKind, APIGroup: GroupName, Name: user}) } return r } @@ -221,3 +254,90 @@ func (r *ClusterRoleBindingBuilder) Binding() (ClusterRoleBinding, error) { return r.ClusterRoleBinding, nil } + +// +k8s:deepcopy-gen=false +// RoleBindingBuilder let's us attach methods. It is similar to +// ClusterRoleBindingBuilder above. +type RoleBindingBuilder struct { + RoleBinding RoleBinding +} + +// NewRoleBinding creates a RoleBinding builder that can be used +// to define the subjects of a role binding. At least one of +// the `Groups`, `Users` or `SAs` method must be called before +// calling the `Binding*` methods. +func NewRoleBinding(roleName, namespace string) *RoleBindingBuilder { + return &RoleBindingBuilder{ + RoleBinding: RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + Namespace: namespace, + }, + RoleRef: RoleRef{ + APIGroup: GroupName, + Kind: "Role", + Name: roleName, + }, + }, + } +} + +func NewRoleBindingForClusterRole(roleName, namespace string) *RoleBindingBuilder { + return &RoleBindingBuilder{ + RoleBinding: RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + Namespace: namespace, + }, + RoleRef: RoleRef{ + APIGroup: GroupName, + Kind: "ClusterRole", + Name: roleName, + }, + }, + } +} + +// Groups adds the specified groups as the subjects of the RoleBinding. +func (r *RoleBindingBuilder) Groups(groups ...string) *RoleBindingBuilder { + for _, group := range groups { + r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, Subject{Kind: GroupKind, Name: group}) + } + return r +} + +// Users adds the specified users as the subjects of the RoleBinding. +func (r *RoleBindingBuilder) Users(users ...string) *RoleBindingBuilder { + for _, user := range users { + r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, Subject{Kind: UserKind, Name: user}) + } + return r +} + +// SAs adds the specified service accounts as the subjects of the +// RoleBinding. +func (r *RoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *RoleBindingBuilder { + for _, saName := range serviceAccountNames { + r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, Subject{Kind: ServiceAccountKind, Namespace: namespace, Name: saName}) + } + return r +} + +// BindingOrDie calls the binding method and panics if there is an error. +func (r *RoleBindingBuilder) BindingOrDie() RoleBinding { + ret, err := r.Binding() + if err != nil { + panic(err) + } + return ret +} + +// Binding builds and returns the RoleBinding API object from the builder +// object. +func (r *RoleBindingBuilder) Binding() (RoleBinding, error) { + if len(r.RoleBinding.Subjects) == 0 { + return RoleBinding{}, fmt.Errorf("subjects are required: %#v", r.RoleBinding) + } + + return r.RoleBinding, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/BUILD index e99ae088d1..45a8aaa23f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -15,9 +12,26 @@ go_library( srcs = ["install.go"], tags = ["automanaged"], deps = [ - "//pkg/apimachinery/announced:go_default_library", + "//pkg/api:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/apis/rbac/v1alpha1:go_default_library", - "//pkg/util/sets:go_default_library", + "//pkg/apis/rbac/v1beta1:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/announced", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/registered", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/util/sets", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go index ce7b2a2fca..36067ed148 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go @@ -19,25 +19,35 @@ limitations under the License. package install import ( - "k8s.io/kubernetes/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/apis/rbac/v1beta1" ) func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { if err := announced.NewGroupMetaFactory( &announced.GroupMetaFactoryArgs{ GroupName: rbac.GroupName, - VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version}, + VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version, v1alpha1.SchemeGroupVersion.Version}, ImportPrefix: "k8s.io/kubernetes/pkg/apis/rbac", RootScopedKinds: sets.NewString("ClusterRole", "ClusterRoleBinding"), AddInternalObjectsToScheme: rbac.AddToScheme, }, announced.VersionToSchemeFunc{ + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme, }, - ).Announce().RegisterAndEnable(); err != nil { + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { panic(err) } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/register.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/register.go index f854cfac2c..f4a838bd89 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/register.go @@ -17,24 +17,22 @@ limitations under the License. package rbac import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/watch/versioned" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) const GroupName = "rbac.authorization.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} // Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { +func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } // Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { +func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } @@ -55,11 +53,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ClusterRoleBinding{}, &ClusterRoleBindingList{}, &ClusterRoleList{}, - - &api.ListOptions{}, - &api.DeleteOptions{}, - &api.ExportOptions{}, ) - versioned.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/types.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/types.go index 361654f479..ddc2456a02 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/types.go @@ -17,9 +17,7 @@ limitations under the License. package rbac import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Authorization is calculated against @@ -37,7 +35,8 @@ const ( ServiceAccountKind = "ServiceAccount" UserKind = "User" - UserAll = "*" + // AutoUpdateAnnotationKey is the name of an annotation which prevents reconciliation if set to "false" + AutoUpdateAnnotationKey = "rbac.authorization.kubernetes.io/autoupdate" ) // PolicyRule holds information that describes a policy rule, but does not contain information @@ -45,12 +44,9 @@ const ( type PolicyRule struct { // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. Verbs []string - // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. - // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. - AttributeRestrictions runtime.Object + // APIGroups is the name of the APIGroup that contains the resources. // If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. - APIGroups []string // Resources is a list of resources this rule applies to. ResourceAll represents all resources. Resources []string @@ -70,9 +66,10 @@ type Subject struct { // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". // If the Authorizer does not recognized the kind value, the Authorizer should report an error. Kind string - // APIVersion holds the API group and version of the referenced object. For non-object references such as "Group" and "User" this is - // expected to be API version of this API group. For example, "rbac/v1alpha1". - APIVersion string + // APIGroup holds the API group of the referenced subject. + // Defaults to "" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. + APIGroup string // Name of the object being referenced. Name string // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty @@ -94,9 +91,9 @@ type RoleRef struct { // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. type Role struct { - unversioned.TypeMeta + metav1.TypeMeta // Standard object's metadata. - api.ObjectMeta + metav1.ObjectMeta // Rules holds all the PolicyRules for this Role Rules []PolicyRule @@ -108,8 +105,8 @@ type Role struct { // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given // namespace only have effect in that namespace. type RoleBinding struct { - unversioned.TypeMeta - api.ObjectMeta + metav1.TypeMeta + metav1.ObjectMeta // Subjects holds references to the objects the role applies to. Subjects []Subject @@ -121,9 +118,9 @@ type RoleBinding struct { // RoleBindingList is a collection of RoleBindings type RoleBindingList struct { - unversioned.TypeMeta + metav1.TypeMeta // Standard object's metadata. - unversioned.ListMeta + metav1.ListMeta // Items is a list of roleBindings Items []RoleBinding @@ -131,9 +128,9 @@ type RoleBindingList struct { // RoleList is a collection of Roles type RoleList struct { - unversioned.TypeMeta + metav1.TypeMeta // Standard object's metadata. - unversioned.ListMeta + metav1.ListMeta // Items is a list of roles Items []Role @@ -144,9 +141,9 @@ type RoleList struct { // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. type ClusterRole struct { - unversioned.TypeMeta + metav1.TypeMeta // Standard object's metadata. - api.ObjectMeta + metav1.ObjectMeta // Rules holds all the PolicyRules for this ClusterRole Rules []PolicyRule @@ -158,9 +155,9 @@ type ClusterRole struct { // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. type ClusterRoleBinding struct { - unversioned.TypeMeta + metav1.TypeMeta // Standard object's metadata. - api.ObjectMeta + metav1.ObjectMeta // Subjects holds references to the objects the role applies to. Subjects []Subject @@ -172,9 +169,9 @@ type ClusterRoleBinding struct { // ClusterRoleBindingList is a collection of ClusterRoleBindings type ClusterRoleBindingList struct { - unversioned.TypeMeta + metav1.TypeMeta // Standard object's metadata. - unversioned.ListMeta + metav1.ListMeta // Items is a list of ClusterRoleBindings Items []ClusterRoleBinding @@ -182,9 +179,9 @@ type ClusterRoleBindingList struct { // ClusterRoleList is a collection of ClusterRoles type ClusterRoleList struct { - unversioned.TypeMeta + metav1.TypeMeta // Standard object's metadata. - unversioned.ListMeta + metav1.ListMeta // Items is a list of ClusterRoles Items []ClusterRole diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/BUILD index b13e99eb37..e7534657f6 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/BUILD @@ -4,18 +4,18 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", "go_test", - "cgo_library", ) go_library( name = "go_default_library", srcs = [ + "conversion.go", "defaults.go", "doc.go", "generated.pb.go", + "helpers.go", "register.go", "types.generated.go", "types.go", @@ -26,14 +26,38 @@ go_library( ], tags = ["automanaged"], deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/apis/rbac:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//pkg/watch/versioned:go_default_library", "//vendor:github.com/gogo/protobuf/proto", "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/types", ], ) + +go_test( + name = "go_default_xtest", + srcs = ["conversion_test.go"], + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/apis/rbac:go_default_library", + "//pkg/apis/rbac/install:go_default_library", + "//pkg/apis/rbac/v1alpha1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/conversion.go new file mode 100644 index 0000000000..4c1f4d698b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/conversion.go @@ -0,0 +1,81 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" + api "k8s.io/kubernetes/pkg/apis/rbac" +) + +// allAuthenticated matches k8s.io/apiserver/pkg/authentication/user.AllAuthenticated, +// but we don't want an client library (which must include types), depending on a server library +const allAuthenticated = "system:authenticated" + +func Convert_v1alpha1_Subject_To_rbac_Subject(in *Subject, out *api.Subject, s conversion.Scope) error { + if err := autoConvert_v1alpha1_Subject_To_rbac_Subject(in, out, s); err != nil { + return err + } + + // specifically set the APIGroup for the three subjects recognized in v1alpha1 + switch { + case in.Kind == ServiceAccountKind: + out.APIGroup = "" + case in.Kind == UserKind: + out.APIGroup = GroupName + case in.Kind == GroupKind: + out.APIGroup = GroupName + default: + // For unrecognized kinds, use the group portion of the APIVersion if we can get it + if gv, err := schema.ParseGroupVersion(in.APIVersion); err == nil { + out.APIGroup = gv.Group + } + } + + // User * in v1alpha1 will only match all authenticated users + // This is only for compatibility with old RBAC bindings + // Special treatment for * should not be included in v1beta1 + if out.Kind == UserKind && out.APIGroup == GroupName && out.Name == "*" { + out.Kind = GroupKind + out.Name = allAuthenticated + } + + return nil +} + +func Convert_rbac_Subject_To_v1alpha1_Subject(in *api.Subject, out *Subject, s conversion.Scope) error { + if err := autoConvert_rbac_Subject_To_v1alpha1_Subject(in, out, s); err != nil { + return err + } + + switch { + case in.Kind == ServiceAccountKind && in.APIGroup == "": + // Make service accounts v1 + out.APIVersion = "v1" + case in.Kind == UserKind && in.APIGroup == GroupName: + // users in the rbac API group get v1alpha + out.APIVersion = SchemeGroupVersion.String() + case in.Kind == GroupKind && in.APIGroup == GroupName: + // groups in the rbac API group get v1alpha + out.APIVersion = SchemeGroupVersion.String() + default: + // otherwise, they get an unspecified version of a group + out.APIVersion = schema.GroupVersion{Group: in.APIGroup}.String() + } + + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/defaults.go index f7e46b818b..49e934916e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/defaults.go @@ -17,7 +17,7 @@ limitations under the License. package v1alpha1 import ( - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { @@ -25,6 +25,7 @@ func addDefaultingFuncs(scheme *runtime.Scheme) error { return scheme.AddDefaultingFuncs( SetDefaults_ClusterRoleBinding, SetDefaults_RoleBinding, + SetDefaults_Subject, ) } @@ -38,3 +39,15 @@ func SetDefaults_RoleBinding(obj *RoleBinding) { obj.RoleRef.APIGroup = GroupName } } +func SetDefaults_Subject(obj *Subject) { + if len(obj.APIVersion) == 0 { + switch obj.Kind { + case ServiceAccountKind: + obj.APIVersion = "v1" + case UserKind: + obj.APIVersion = SchemeGroupVersion.String() + case GroupKind: + obj.APIVersion = SchemeGroupVersion.String() + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.pb.go index 8c2ab6c6f8..13a1853a39 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -27,9 +27,11 @@ limitations under the License. It has these top-level messages: ClusterRole ClusterRoleBinding + ClusterRoleBindingBuilder ClusterRoleBindingList ClusterRoleList PolicyRule + PolicyRuleBuilder Role RoleBinding RoleBindingList @@ -65,48 +67,60 @@ func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBindin func (*ClusterRoleBinding) ProtoMessage() {} func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *ClusterRoleBindingBuilder) Reset() { *m = ClusterRoleBindingBuilder{} } +func (*ClusterRoleBindingBuilder) ProtoMessage() {} +func (*ClusterRoleBindingBuilder) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *PolicyRule) Reset() { *m = PolicyRule{} } func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *PolicyRuleBuilder) Reset() { *m = PolicyRuleBuilder{} } +func (*PolicyRuleBuilder) ProtoMessage() {} +func (*PolicyRuleBuilder) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *Role) Reset() { *m = Role{} } func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *RoleList) Reset() { *m = RoleList{} } func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *RoleRef) Reset() { *m = RoleRef{} } func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func init() { proto.RegisterType((*ClusterRole)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRole") proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleBinding") + proto.RegisterType((*ClusterRoleBindingBuilder)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleBindingBuilder") proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleBindingList") proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleList") proto.RegisterType((*PolicyRule)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.PolicyRule") + proto.RegisterType((*PolicyRuleBuilder)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.PolicyRuleBuilder") proto.RegisterType((*Role)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.Role") proto.RegisterType((*RoleBinding)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.RoleBinding") proto.RegisterType((*RoleBindingList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.RoleBindingList") @@ -198,6 +212,32 @@ func (m *ClusterRoleBinding) MarshalTo(data []byte) (int, error) { return i, nil } +func (m *ClusterRoleBindingBuilder) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBindingBuilder) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ClusterRoleBinding.Size())) + n4, err := m.ClusterRoleBinding.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + return i, nil +} + func (m *ClusterRoleBindingList) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -216,11 +256,11 @@ func (m *ClusterRoleBindingList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(data[i:]) + n5, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n4 + i += n5 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -254,11 +294,11 @@ func (m *ClusterRoleList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(data[i:]) + n6, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n5 + i += n6 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -304,14 +344,6 @@ func (m *PolicyRule) MarshalTo(data []byte) (int, error) { i += copy(data[i:], s) } } - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.AttributeRestrictions.Size())) - n6, err := m.AttributeRestrictions.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.APIGroups) > 0 { for _, s := range m.APIGroups { data[i] = 0x1a @@ -375,6 +407,32 @@ func (m *PolicyRule) MarshalTo(data []byte) (int, error) { return i, nil } +func (m *PolicyRuleBuilder) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PolicyRuleBuilder) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PolicyRule.Size())) + n7, err := m.PolicyRule.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + return i, nil +} + func (m *Role) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -393,11 +451,11 @@ func (m *Role) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(data[i:]) + n8, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n7 + i += n8 if len(m.Rules) > 0 { for _, msg := range m.Rules { data[i] = 0x12 @@ -431,11 +489,11 @@ func (m *RoleBinding) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n8, err := m.ObjectMeta.MarshalTo(data[i:]) + n9, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n8 + i += n9 if len(m.Subjects) > 0 { for _, msg := range m.Subjects { data[i] = 0x12 @@ -451,11 +509,11 @@ func (m *RoleBinding) MarshalTo(data []byte) (int, error) { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) - n9, err := m.RoleRef.MarshalTo(data[i:]) + n10, err := m.RoleRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n9 + i += n10 return i, nil } @@ -477,11 +535,11 @@ func (m *RoleBindingList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(data[i:]) + n11, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n10 + i += n11 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -515,11 +573,11 @@ func (m *RoleList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(data[i:]) + n12, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n11 + i += n12 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -656,6 +714,14 @@ func (m *ClusterRoleBinding) Size() (n int) { return n } +func (m *ClusterRoleBindingBuilder) Size() (n int) { + var l int + _ = l + l = m.ClusterRoleBinding.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ClusterRoleBindingList) Size() (n int) { var l int _ = l @@ -693,8 +759,6 @@ func (m *PolicyRule) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - l = m.AttributeRestrictions.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.APIGroups) > 0 { for _, s := range m.APIGroups { l = len(s) @@ -722,6 +786,14 @@ func (m *PolicyRule) Size() (n int) { return n } +func (m *PolicyRuleBuilder) Size() (n int) { + var l int + _ = l + l = m.PolicyRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *Role) Size() (n int) { var l int _ = l @@ -824,7 +896,7 @@ func (this *ClusterRole) String() string { return "nil" } s := strings.Join([]string{`&ClusterRole{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -835,19 +907,29 @@ func (this *ClusterRoleBinding) String() string { return "nil" } s := strings.Join([]string{`&ClusterRoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } +func (this *ClusterRoleBindingBuilder) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleBindingBuilder{`, + `ClusterRoleBinding:` + strings.Replace(strings.Replace(this.ClusterRoleBinding.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *ClusterRoleBindingList) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&ClusterRoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -858,7 +940,7 @@ func (this *ClusterRoleList) String() string { return "nil" } s := strings.Join([]string{`&ClusterRoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -870,7 +952,6 @@ func (this *PolicyRule) String() string { } s := strings.Join([]string{`&PolicyRule{`, `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, - `AttributeRestrictions:` + strings.Replace(strings.Replace(this.AttributeRestrictions.String(), "RawExtension", "k8s_io_kubernetes_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, @@ -879,12 +960,22 @@ func (this *PolicyRule) String() string { }, "") return s } +func (this *PolicyRuleBuilder) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PolicyRuleBuilder{`, + `PolicyRule:` + strings.Replace(strings.Replace(this.PolicyRule.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *Role) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&Role{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -895,7 +986,7 @@ func (this *RoleBinding) String() string { return "nil" } s := strings.Join([]string{`&RoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, @@ -907,7 +998,7 @@ func (this *RoleBindingList) String() string { return "nil" } s := strings.Join([]string{`&RoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -918,7 +1009,7 @@ func (this *RoleList) String() string { return "nil" } s := strings.Join([]string{`&RoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -1209,6 +1300,86 @@ func (m *ClusterRoleBinding) Unmarshal(data []byte) error { } return nil } +func (m *ClusterRoleBindingBuilder) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingBuilder: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingBuilder: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleBinding", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClusterRoleBinding.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 @@ -1489,11 +1660,11 @@ func (m *PolicyRule) Unmarshal(data []byte) error { } m.Verbs = append(m.Verbs, string(data[iNdEx:postIndex])) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AttributeRestrictions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1503,25 +1674,24 @@ func (m *PolicyRule) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.AttributeRestrictions.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } + m.APIGroups = append(m.APIGroups, string(data[iNdEx:postIndex])) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1546,11 +1716,11 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.APIGroups = append(m.APIGroups, string(data[iNdEx:postIndex])) + m.Resources = append(m.Resources, string(data[iNdEx:postIndex])) iNdEx = postIndex - case 4: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1575,11 +1745,11 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Resources = append(m.Resources, string(data[iNdEx:postIndex])) + m.ResourceNames = append(m.ResourceNames, string(data[iNdEx:postIndex])) iNdEx = postIndex - case 5: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1604,13 +1774,63 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceNames = append(m.ResourceNames, string(data[iNdEx:postIndex])) + m.NonResourceURLs = append(m.NonResourceURLs, string(data[iNdEx:postIndex])) iNdEx = postIndex - case 6: + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRuleBuilder) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRuleBuilder: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRuleBuilder: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PolicyRule", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1620,20 +1840,21 @@ func (m *PolicyRule) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.NonResourceURLs = append(m.NonResourceURLs, string(data[iNdEx:postIndex])) + if err := m.PolicyRule.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -2539,56 +2760,58 @@ var ( ) var fileDescriptorGenerated = []byte{ - // 815 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x54, 0x41, 0x6b, 0xe3, 0x46, - 0x18, 0xb5, 0x62, 0xbb, 0xb1, 0xc6, 0x35, 0x6e, 0x54, 0x52, 0x84, 0xa1, 0xb2, 0xf1, 0xc9, 0x34, - 0xc9, 0x08, 0x9b, 0x86, 0xe6, 0xd0, 0x1e, 0xa2, 0x52, 0x4a, 0x68, 0x9a, 0x86, 0x09, 0x0d, 0x6d, - 0x68, 0x29, 0x63, 0x7b, 0xe2, 0x4c, 0x2d, 0x4b, 0x62, 0x66, 0xe4, 0xb6, 0xf4, 0x12, 0xf6, 0x17, - 0xec, 0xaf, 0xd8, 0xdb, 0x5e, 0xf6, 0xba, 0xb0, 0x87, 0x3d, 0xe5, 0xb0, 0x87, 0x1c, 0x97, 0x3d, - 0x98, 0x8d, 0xf6, 0x8f, 0x2c, 0x1a, 0x49, 0x96, 0x1d, 0xdb, 0x1b, 0x27, 0xb0, 0x81, 0x85, 0x3d, - 0x25, 0xf3, 0x7d, 0xef, 0xbd, 0xf9, 0xde, 0xe7, 0xd1, 0x03, 0x3b, 0xfd, 0x1d, 0x0e, 0xa9, 0x6b, - 0xf6, 0xfd, 0x36, 0x61, 0x0e, 0x11, 0x84, 0x9b, 0x5e, 0xbf, 0x67, 0x62, 0x8f, 0x72, 0x93, 0xb5, - 0x71, 0xc7, 0x1c, 0x36, 0xb1, 0xed, 0x9d, 0xe1, 0xa6, 0xd9, 0x23, 0x0e, 0x61, 0x58, 0x90, 0x2e, - 0xf4, 0x98, 0x2b, 0x5c, 0xad, 0x11, 0x31, 0x61, 0xca, 0x84, 0x5e, 0xbf, 0x07, 0x43, 0x26, 0x0c, - 0x99, 0x30, 0x61, 0x56, 0xb6, 0x7a, 0x54, 0x9c, 0xf9, 0x6d, 0xd8, 0x71, 0x07, 0x66, 0xcf, 0xed, - 0xb9, 0xa6, 0x14, 0x68, 0xfb, 0xa7, 0xf2, 0x24, 0x0f, 0xf2, 0xbf, 0x48, 0xb8, 0xd2, 0x5a, 0x38, - 0x92, 0xc9, 0x08, 0x77, 0x7d, 0xd6, 0x21, 0xd7, 0x87, 0xa9, 0x6c, 0x2f, 0xe6, 0xf8, 0xce, 0x90, - 0x30, 0x4e, 0x5d, 0x87, 0x74, 0x67, 0x68, 0x9b, 0x8b, 0x69, 0xc3, 0x19, 0xc7, 0x95, 0xad, 0xf9, - 0x68, 0xe6, 0x3b, 0x82, 0x0e, 0x66, 0x67, 0x6a, 0xce, 0x87, 0xfb, 0x82, 0xda, 0x26, 0x75, 0x04, - 0x17, 0xec, 0x3a, 0xa5, 0xfe, 0x5c, 0x01, 0xc5, 0xef, 0x6d, 0x9f, 0x0b, 0xc2, 0x90, 0x6b, 0x13, - 0xed, 0x37, 0x50, 0x18, 0x10, 0x81, 0xbb, 0x58, 0x60, 0x5d, 0xa9, 0x29, 0x8d, 0x62, 0xab, 0x01, - 0x17, 0xae, 0x1d, 0x0e, 0x9b, 0xf0, 0x97, 0xf6, 0xdf, 0xa4, 0x23, 0x7e, 0x26, 0x02, 0x5b, 0xda, - 0xc5, 0xa8, 0x9a, 0x09, 0x46, 0x55, 0x90, 0xd6, 0xd0, 0x58, 0x4d, 0xfb, 0x1d, 0xe4, 0x99, 0x6f, - 0x13, 0xae, 0xaf, 0xd4, 0xb2, 0x8d, 0x62, 0xeb, 0x6b, 0xb8, 0xec, 0xaf, 0x09, 0x0f, 0x5d, 0x9b, - 0x76, 0xfe, 0x43, 0xbe, 0x4d, 0xac, 0x52, 0x7c, 0x45, 0x3e, 0x3c, 0x71, 0x14, 0x29, 0xd6, 0x1f, - 0xaf, 0x00, 0x6d, 0xc2, 0x84, 0x45, 0x9d, 0x2e, 0x75, 0x7a, 0xef, 0xd1, 0xcb, 0x5f, 0xa0, 0xc0, - 0x7d, 0xd9, 0x48, 0xec, 0x34, 0x97, 0xb7, 0x73, 0x14, 0x31, 0xad, 0xcf, 0xe2, 0x2b, 0x0a, 0x71, - 0x81, 0xa3, 0xb1, 0xa8, 0xf6, 0x07, 0x58, 0x65, 0xae, 0x4d, 0x10, 0x39, 0xd5, 0xb3, 0x72, 0xf2, - 0x5b, 0xe8, 0xa3, 0x88, 0x68, 0x95, 0x63, 0xfd, 0xd5, 0xb8, 0x80, 0x12, 0xc9, 0xfa, 0x2b, 0x05, - 0x7c, 0x31, 0xbb, 0xaf, 0x7d, 0xca, 0x85, 0xf6, 0xe7, 0xcc, 0xce, 0xcc, 0x77, 0xec, 0x6c, 0xe2, - 0xa5, 0xc3, 0x90, 0x2e, 0x57, 0x37, 0xf6, 0x95, 0x54, 0x26, 0x16, 0x87, 0x41, 0x9e, 0x0a, 0x32, - 0x48, 0xb6, 0xf6, 0xed, 0xf2, 0xae, 0x66, 0xe7, 0x4d, 0x1f, 0xc3, 0x5e, 0x28, 0x89, 0x22, 0xe5, - 0xfa, 0x0b, 0x05, 0x94, 0x27, 0xc0, 0xf7, 0xe1, 0xea, 0x64, 0xda, 0xd5, 0xf6, 0xdd, 0x5c, 0xcd, - 0xb7, 0xf3, 0x20, 0x0b, 0x40, 0xfa, 0x01, 0x68, 0x55, 0x90, 0x1f, 0x12, 0xd6, 0xe6, 0xba, 0x52, - 0xcb, 0x36, 0x54, 0x4b, 0x0d, 0xf1, 0xc7, 0x61, 0x01, 0x45, 0x75, 0xed, 0x5c, 0x01, 0xeb, 0x58, - 0x08, 0x46, 0xdb, 0xbe, 0x20, 0x88, 0x70, 0xc1, 0x68, 0x47, 0x50, 0xd7, 0x09, 0x87, 0x0b, 0x8d, - 0x6f, 0x2c, 0x18, 0x2e, 0xce, 0x14, 0x88, 0xf0, 0x3f, 0x3f, 0xfc, 0x2b, 0x88, 0x13, 0xfa, 0xb7, - 0xbe, 0x8c, 0x47, 0x5a, 0xdf, 0x9d, 0xa7, 0x88, 0xe6, 0x5f, 0xa4, 0x6d, 0x00, 0x15, 0x7b, 0xf4, - 0x47, 0xe6, 0xfa, 0x1e, 0xd7, 0xb3, 0x72, 0xce, 0x52, 0x30, 0xaa, 0xaa, 0xbb, 0x87, 0x7b, 0x51, - 0x11, 0xa5, 0xfd, 0x10, 0x9c, 0x64, 0x2c, 0xd7, 0x73, 0x29, 0x18, 0x25, 0x45, 0x94, 0xf6, 0xb5, - 0x6f, 0x40, 0x29, 0x39, 0x1c, 0xe0, 0x01, 0xe1, 0x7a, 0x5e, 0x12, 0xd6, 0x82, 0x51, 0xb5, 0x84, - 0x26, 0x1b, 0x68, 0x1a, 0xa7, 0x7d, 0x07, 0xca, 0x8e, 0xeb, 0x24, 0x90, 0x5f, 0xd1, 0x3e, 0xd7, - 0x3f, 0x91, 0xd4, 0xcf, 0x83, 0x51, 0xb5, 0x7c, 0x30, 0xdd, 0x42, 0xd7, 0xb1, 0xf5, 0xa7, 0x0a, - 0xc8, 0x7d, 0xb8, 0xf1, 0xf8, 0x68, 0x05, 0x14, 0x3f, 0xe6, 0xe2, 0x12, 0xb9, 0x18, 0x46, 0xc7, - 0x3d, 0x07, 0xe2, 0xdd, 0xa3, 0xe3, 0xe6, 0x24, 0x7c, 0xa6, 0x80, 0xc2, 0x7d, 0x45, 0xe0, 0xd1, - 0xb4, 0x0f, 0x78, 0x4b, 0x1f, 0xf3, 0x0d, 0xfc, 0x0f, 0x92, 0xdf, 0x48, 0xdb, 0x04, 0x85, 0x24, - 0x33, 0xe4, 0xf8, 0x6a, 0x3a, 0x4d, 0x12, 0x2b, 0x68, 0x8c, 0xd0, 0x6a, 0x20, 0xd7, 0xa7, 0x4e, - 0x57, 0x46, 0x9e, 0x6a, 0x7d, 0x1a, 0x23, 0x73, 0x3f, 0x51, 0xa7, 0x8b, 0x64, 0x27, 0x44, 0x38, - 0x78, 0x40, 0xe4, 0x2b, 0x9a, 0x40, 0x84, 0x69, 0x81, 0x64, 0xa7, 0xfe, 0x44, 0x01, 0xab, 0xf1, - 0x0b, 0x1c, 0xeb, 0x29, 0x0b, 0xf5, 0x5a, 0x00, 0x60, 0x8f, 0x1e, 0x47, 0x4b, 0x8b, 0xef, 0x1d, - 0x7f, 0x2b, 0xbb, 0x87, 0x7b, 0x71, 0x07, 0x4d, 0xa0, 0x6e, 0x9e, 0x41, 0x33, 0x81, 0x1a, 0xfe, - 0xe5, 0x1e, 0xee, 0x10, 0x3d, 0x27, 0x61, 0x6b, 0x31, 0x4c, 0x3d, 0x48, 0x1a, 0x28, 0xc5, 0x58, - 0x5f, 0x5d, 0x5c, 0x19, 0x99, 0xcb, 0x2b, 0x23, 0xf3, 0xf2, 0xca, 0xc8, 0x9c, 0x07, 0x86, 0x72, - 0x11, 0x18, 0xca, 0x65, 0x60, 0x28, 0xaf, 0x03, 0x43, 0x79, 0xf8, 0xc6, 0xc8, 0x9c, 0x14, 0x92, - 0xc5, 0xbf, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x62, 0x32, 0x8a, 0x1f, 0x89, 0x0b, 0x00, 0x00, + // 847 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x54, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xce, 0xb4, 0x09, 0x4d, 0x5e, 0xa9, 0x4a, 0x07, 0x09, 0x99, 0x1e, 0x9c, 0xca, 0xa7, 0x0a, + 0x16, 0x9b, 0x96, 0x05, 0xf6, 0x00, 0x87, 0x35, 0x07, 0x54, 0xb1, 0x94, 0x6a, 0x56, 0xac, 0xc4, + 0x6a, 0x25, 0x98, 0x38, 0xb3, 0xc9, 0x10, 0xff, 0xd2, 0x8c, 0x1d, 0xb1, 0x42, 0x48, 0x1c, 0x39, + 0xf2, 0x57, 0x70, 0xe4, 0x80, 0xc4, 0x91, 0x13, 0x97, 0x0a, 0x2e, 0x3d, 0xc2, 0x25, 0xa2, 0xe6, + 0x0f, 0x01, 0x79, 0x3c, 0xfe, 0x51, 0x9c, 0xaa, 0x3f, 0x90, 0x22, 0x21, 0x71, 0x4a, 0xfc, 0xde, + 0xf7, 0x7d, 0xf3, 0xbe, 0x79, 0xf6, 0x07, 0xf7, 0x66, 0xf7, 0xa4, 0xcd, 0x23, 0x67, 0x96, 0x8e, + 0x98, 0x08, 0x59, 0xc2, 0xa4, 0x13, 0xcf, 0x26, 0x0e, 0x8d, 0xb9, 0x74, 0xc4, 0x88, 0x7a, 0xce, + 0xfc, 0x80, 0xfa, 0xf1, 0x94, 0x1e, 0x38, 0x13, 0x16, 0x32, 0x41, 0x13, 0x36, 0xb6, 0x63, 0x11, + 0x25, 0x11, 0xde, 0x2f, 0x98, 0x76, 0xcd, 0xb4, 0xe3, 0xd9, 0xc4, 0xce, 0x99, 0x76, 0xce, 0xb4, + 0x4b, 0xe6, 0xee, 0x6b, 0x13, 0x9e, 0x4c, 0xd3, 0x91, 0xed, 0x45, 0x81, 0x33, 0x89, 0x26, 0x91, + 0xa3, 0x04, 0x46, 0xe9, 0x53, 0xf5, 0xa4, 0x1e, 0xd4, 0xbf, 0x42, 0x78, 0xf7, 0xae, 0x1e, 0x89, + 0xc6, 0x3c, 0xa0, 0xde, 0x94, 0x87, 0x4c, 0x3c, 0xab, 0x87, 0x0a, 0x58, 0x42, 0x9d, 0x79, 0x6b, + 0x9c, 0x5d, 0xe7, 0x32, 0x96, 0x48, 0xc3, 0x84, 0x07, 0xac, 0x45, 0x78, 0xeb, 0x2a, 0x82, 0xf4, + 0xa6, 0x2c, 0xa0, 0x2d, 0xde, 0x1b, 0x97, 0xf1, 0xd2, 0x84, 0xfb, 0x0e, 0x0f, 0x13, 0x99, 0x88, + 0x16, 0xa9, 0xe1, 0x49, 0x32, 0x31, 0x67, 0xa2, 0x36, 0xc4, 0xbe, 0xa0, 0x41, 0xec, 0xb3, 0x65, + 0x9e, 0xee, 0x5c, 0xba, 0x9c, 0x25, 0x68, 0xeb, 0x17, 0x04, 0x9b, 0xef, 0xf9, 0xa9, 0x4c, 0x98, + 0x20, 0x91, 0xcf, 0xf0, 0x67, 0xd0, 0xcf, 0x2f, 0x6b, 0x4c, 0x13, 0x6a, 0xa0, 0x3d, 0xb4, 0xbf, + 0x79, 0xf8, 0xba, 0xad, 0x77, 0xd6, 0x9c, 0xbd, 0xde, 0x5a, 0x8e, 0xb6, 0xe7, 0x07, 0xf6, 0x47, + 0xa3, 0xcf, 0x99, 0x97, 0x7c, 0xc8, 0x12, 0xea, 0xe2, 0xd3, 0xc5, 0xb0, 0x93, 0x2d, 0x86, 0x50, + 0xd7, 0x48, 0xa5, 0x8a, 0x3f, 0x81, 0x9e, 0x48, 0x7d, 0x26, 0x8d, 0xb5, 0xbd, 0xf5, 0xfd, 0xcd, + 0xc3, 0xbb, 0xf6, 0x75, 0x5f, 0x09, 0xfb, 0x24, 0xf2, 0xb9, 0xf7, 0x8c, 0xa4, 0x3e, 0x73, 0xb7, + 0xf4, 0x11, 0xbd, 0xfc, 0x49, 0x92, 0x42, 0xd1, 0xfa, 0x71, 0x0d, 0x70, 0xc3, 0x8c, 0xcb, 0xc3, + 0x31, 0x0f, 0x27, 0x2b, 0xf0, 0xf4, 0x29, 0xf4, 0x65, 0xaa, 0x1a, 0xa5, 0xad, 0x83, 0xeb, 0xdb, + 0x7a, 0x58, 0x30, 0xdd, 0x17, 0xf4, 0x11, 0x7d, 0x5d, 0x90, 0xa4, 0x12, 0xc5, 0x4f, 0x60, 0x43, + 0x44, 0x3e, 0x23, 0xec, 0xa9, 0xb1, 0xae, 0x1c, 0xdc, 0x40, 0x9f, 0x14, 0x44, 0x77, 0x5b, 0xeb, + 0x6f, 0xe8, 0x02, 0x29, 0x25, 0xad, 0xef, 0x10, 0xbc, 0xdc, 0xbe, 0x37, 0x37, 0xe5, 0xfe, 0x98, + 0x09, 0xfc, 0x0d, 0x02, 0xec, 0xb5, 0xba, 0xfa, 0x26, 0xdf, 0xb9, 0xfe, 0x1c, 0x4b, 0x4e, 0xd8, + 0xd5, 0x23, 0x2d, 0xd9, 0x1a, 0x59, 0x72, 0xa6, 0xf5, 0x3b, 0x82, 0x97, 0xda, 0xd0, 0x07, 0x5c, + 0x26, 0xf8, 0x49, 0x6b, 0xc9, 0xf6, 0xf5, 0x96, 0x9c, 0xb3, 0xd5, 0x8a, 0xab, 0xfb, 0x2f, 0x2b, + 0x8d, 0x05, 0x53, 0xe8, 0xf1, 0x84, 0x05, 0xe5, 0x76, 0xff, 0x9d, 0xeb, 0xea, 0xe5, 0x3d, 0xca, + 0x25, 0x49, 0xa1, 0x6c, 0xfd, 0x8a, 0x60, 0xbb, 0x01, 0x5e, 0x81, 0xa9, 0xc7, 0x17, 0x4d, 0xbd, + 0x79, 0x3b, 0x53, 0xcb, 0xdd, 0xfc, 0x85, 0x00, 0xea, 0xef, 0x15, 0x0f, 0xa1, 0x37, 0x67, 0x62, + 0x24, 0x0d, 0xb4, 0xb7, 0xbe, 0x3f, 0x70, 0x07, 0x39, 0xfe, 0x51, 0x5e, 0x20, 0x45, 0x1d, 0xbf, + 0x0a, 0x03, 0x1a, 0xf3, 0xf7, 0x45, 0x94, 0xc6, 0xd2, 0x58, 0x57, 0xa0, 0xad, 0x6c, 0x31, 0x1c, + 0xdc, 0x3f, 0x39, 0x2a, 0x8a, 0xa4, 0xee, 0xe7, 0x60, 0xc1, 0x64, 0x94, 0x0a, 0x8f, 0x49, 0xa3, + 0x5b, 0x83, 0x49, 0x59, 0x24, 0x75, 0x1f, 0xbf, 0x0d, 0x5b, 0xe5, 0xc3, 0x31, 0x0d, 0x98, 0x34, + 0x7a, 0x8a, 0xb0, 0x93, 0x2d, 0x86, 0x5b, 0xa4, 0xd9, 0x20, 0x17, 0x71, 0xf8, 0x5d, 0xd8, 0x0e, + 0xa3, 0xb0, 0x84, 0x7c, 0x4c, 0x1e, 0x48, 0xe3, 0x39, 0x45, 0x7d, 0x31, 0x5b, 0x0c, 0xb7, 0x8f, + 0x2f, 0xb6, 0xc8, 0x3f, 0xb1, 0xd6, 0x57, 0xb0, 0xd3, 0x08, 0x2c, 0xfd, 0x2d, 0x4d, 0x01, 0xe2, + 0xaa, 0xa8, 0x57, 0x7a, 0xbb, 0x04, 0xac, 0x02, 0xa9, 0xae, 0x91, 0x86, 0xb6, 0xf5, 0x33, 0x82, + 0xee, 0x7f, 0x3f, 0xd1, 0xbf, 0x5f, 0x83, 0xcd, 0xff, 0xa3, 0xfc, 0x06, 0x51, 0x9e, 0xa7, 0xc8, + 0x6a, 0xa3, 0xf1, 0xf6, 0x29, 0x72, 0x75, 0x26, 0xfe, 0x84, 0xa0, 0xbf, 0xa2, 0x30, 0x7c, 0x78, + 0xd1, 0x86, 0x7d, 0x43, 0x1b, 0xcb, 0xe7, 0xff, 0x12, 0xca, 0x0d, 0xe1, 0x3b, 0xd0, 0x2f, 0x03, + 0x4c, 0x4d, 0x3f, 0xa8, 0xa7, 0x29, 0x33, 0x8e, 0x54, 0x08, 0xbc, 0x07, 0xdd, 0x19, 0x0f, 0xc7, + 0xc6, 0x9a, 0x42, 0x3e, 0xaf, 0x91, 0xdd, 0x0f, 0x78, 0x38, 0x26, 0xaa, 0x93, 0x23, 0x42, 0x1a, + 0x30, 0xf5, 0x0e, 0x35, 0x10, 0x79, 0x74, 0x11, 0xd5, 0xb1, 0x7e, 0x40, 0xb0, 0xa1, 0xdf, 0xbf, + 0x4a, 0x0f, 0x5d, 0xaa, 0x77, 0x08, 0x40, 0x63, 0xfe, 0x88, 0x09, 0xc9, 0xa3, 0x50, 0x9f, 0x5b, + 0x7d, 0x29, 0xf7, 0x4f, 0x8e, 0x74, 0x87, 0x34, 0x50, 0x57, 0xcf, 0x80, 0x1d, 0x18, 0xe4, 0xbf, + 0x32, 0xa6, 0x1e, 0x33, 0xba, 0x0a, 0xb6, 0xa3, 0x61, 0x83, 0xe3, 0xb2, 0x41, 0x6a, 0x8c, 0xfb, + 0xca, 0xe9, 0xb9, 0xd9, 0x39, 0x3b, 0x37, 0x3b, 0xbf, 0x9d, 0x9b, 0x9d, 0xaf, 0x33, 0x13, 0x9d, + 0x66, 0x26, 0x3a, 0xcb, 0x4c, 0xf4, 0x47, 0x66, 0xa2, 0x6f, 0xff, 0x34, 0x3b, 0x8f, 0xfb, 0xe5, + 0xc5, 0xff, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x53, 0x64, 0x1d, 0x0e, 0x87, 0x0c, 0x00, 0x00, } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto index 09f11e33b0..5a75fb240b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,11 +21,12 @@ syntax = 'proto2'; package k8s.io.kubernetes.pkg.apis.rbac.v1alpha1; -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1alpha1"; @@ -34,7 +35,7 @@ option go_package = "v1alpha1"; message ClusterRole { // Standard object's metadata. // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole repeated PolicyRule rules = 2; @@ -45,7 +46,7 @@ message ClusterRole { message ClusterRoleBinding { // Standard object's metadata. // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. repeated Subject subjects = 2; @@ -55,11 +56,19 @@ message ClusterRoleBinding { optional RoleRef roleRef = 3; } +// +k8s:deepcopy-gen=false +// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. +// We use it to construct bindings in code. It's more compact than trying to write them +// out in a literal. +message ClusterRoleBindingBuilder { + optional ClusterRoleBinding clusterRoleBinding = 1; +} + // ClusterRoleBindingList is a collection of ClusterRoleBindings message ClusterRoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoleBindings repeated ClusterRoleBinding items = 2; @@ -69,7 +78,7 @@ message ClusterRoleBindingList { message ClusterRoleList { // Standard object's metadata. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoles repeated ClusterRole items = 2; @@ -81,11 +90,6 @@ message PolicyRule { // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. repeated string verbs = 1; - // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. - // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. - // +optional - optional k8s.io.kubernetes.pkg.runtime.RawExtension attributeRestrictions = 2; - // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. // +optional @@ -107,11 +111,19 @@ message PolicyRule { repeated string nonResourceURLs = 6; } +// +k8s:deepcopy-gen=false +// PolicyRuleBuilder let's us attach methods. A no-no for API types. +// We use it to construct rules in code. It's more compact than trying to write them +// out in a literal and allows us to perform some basic checking during construction +message PolicyRuleBuilder { + optional PolicyRule policyRule = 1; +} + // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. message Role { // Standard object's metadata. // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role repeated PolicyRule rules = 2; @@ -123,7 +135,7 @@ message Role { message RoleBinding { // Standard object's metadata. // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. repeated Subject subjects = 2; @@ -137,7 +149,7 @@ message RoleBinding { message RoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of RoleBindings repeated RoleBinding items = 2; @@ -147,7 +159,7 @@ message RoleBindingList { message RoleList { // Standard object's metadata. // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of Roles repeated Role items = 2; @@ -172,7 +184,10 @@ message Subject { // If the Authorizer does not recognized the kind value, the Authorizer should report an error. optional string kind = 1; - // APIVersion holds the API group and version of the referenced object. + // APIVersion holds the API group and version of the referenced subject. + // Defaults to "v1" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io/v1alpha1" for User and Group subjects. + // +k8s:conversion-gen=false // +optional optional string apiVersion = 2; diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/helpers.go new file mode 100644 index 0000000000..81ecaef20b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/helpers.go @@ -0,0 +1,148 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen=false +// PolicyRuleBuilder let's us attach methods. A no-no for API types. +// We use it to construct rules in code. It's more compact than trying to write them +// out in a literal and allows us to perform some basic checking during construction +type PolicyRuleBuilder struct { + PolicyRule PolicyRule `protobuf:"bytes,1,opt,name=policyRule"` +} + +func NewRule(verbs ...string) *PolicyRuleBuilder { + return &PolicyRuleBuilder{ + PolicyRule: PolicyRule{Verbs: verbs}, + } +} + +func (r *PolicyRuleBuilder) Groups(groups ...string) *PolicyRuleBuilder { + r.PolicyRule.APIGroups = append(r.PolicyRule.APIGroups, groups...) + return r +} + +func (r *PolicyRuleBuilder) Resources(resources ...string) *PolicyRuleBuilder { + r.PolicyRule.Resources = append(r.PolicyRule.Resources, resources...) + return r +} + +func (r *PolicyRuleBuilder) Names(names ...string) *PolicyRuleBuilder { + r.PolicyRule.ResourceNames = append(r.PolicyRule.ResourceNames, names...) + return r +} + +func (r *PolicyRuleBuilder) URLs(urls ...string) *PolicyRuleBuilder { + r.PolicyRule.NonResourceURLs = append(r.PolicyRule.NonResourceURLs, urls...) + return r +} + +func (r *PolicyRuleBuilder) RuleOrDie() PolicyRule { + ret, err := r.Rule() + if err != nil { + panic(err) + } + return ret +} + +func (r *PolicyRuleBuilder) Rule() (PolicyRule, error) { + if len(r.PolicyRule.Verbs) == 0 { + return PolicyRule{}, fmt.Errorf("verbs are required: %#v", r.PolicyRule) + } + + switch { + case len(r.PolicyRule.NonResourceURLs) > 0: + if len(r.PolicyRule.APIGroups) != 0 || len(r.PolicyRule.Resources) != 0 || len(r.PolicyRule.ResourceNames) != 0 { + return PolicyRule{}, fmt.Errorf("non-resource rule may not have apiGroups, resources, or resourceNames: %#v", r.PolicyRule) + } + case len(r.PolicyRule.Resources) > 0: + if len(r.PolicyRule.NonResourceURLs) != 0 { + return PolicyRule{}, fmt.Errorf("resource rule may not have nonResourceURLs: %#v", r.PolicyRule) + } + if len(r.PolicyRule.APIGroups) == 0 { + // this a common bug + return PolicyRule{}, fmt.Errorf("resource rule must have apiGroups: %#v", r.PolicyRule) + } + default: + return PolicyRule{}, fmt.Errorf("a rule must have either nonResourceURLs or resources: %#v", r.PolicyRule) + } + + return r.PolicyRule, nil +} + +// +k8s:deepcopy-gen=false +// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. +// We use it to construct bindings in code. It's more compact than trying to write them +// out in a literal. +type ClusterRoleBindingBuilder struct { + ClusterRoleBinding ClusterRoleBinding `protobuf:"bytes,1,opt,name=clusterRoleBinding"` +} + +func NewClusterBinding(clusterRoleName string) *ClusterRoleBindingBuilder { + return &ClusterRoleBindingBuilder{ + ClusterRoleBinding: ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: clusterRoleName}, + RoleRef: RoleRef{ + APIGroup: GroupName, + Kind: "ClusterRole", + Name: clusterRoleName, + }, + }, + } +} + +func (r *ClusterRoleBindingBuilder) Groups(groups ...string) *ClusterRoleBindingBuilder { + for _, group := range groups { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: GroupKind, Name: group}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) Users(users ...string) *ClusterRoleBindingBuilder { + for _, user := range users { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: UserKind, Name: user}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *ClusterRoleBindingBuilder { + for _, saName := range serviceAccountNames { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: ServiceAccountKind, Namespace: namespace, Name: saName}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) BindingOrDie() ClusterRoleBinding { + ret, err := r.Binding() + if err != nil { + panic(err) + } + return ret +} + +func (r *ClusterRoleBindingBuilder) Binding() (ClusterRoleBinding, error) { + if len(r.ClusterRoleBinding.Subjects) == 0 { + return ClusterRoleBinding{}, fmt.Errorf("subjects are required: %#v", r.ClusterRoleBinding) + } + + return r.ClusterRoleBinding, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/register.go index 5b76247d79..3977e99c51 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/register.go @@ -17,16 +17,20 @@ limitations under the License. package v1alpha1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/watch/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) const GroupName = "rbac.authorization.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} var ( SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) @@ -45,11 +49,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ClusterRoleBinding{}, &ClusterRoleBindingList{}, &ClusterRoleList{}, - - &v1.ListOptions{}, - &v1.DeleteOptions{}, - &v1.ExportOptions{}, ) - versioned.AddToGroupVersion(scheme, SchemeGroupVersion) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.generated.go index 4481709b7a..64ba53a9be 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.generated.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.generated.go @@ -25,10 +25,8 @@ import ( "errors" "fmt" codec1978 "github.com/ugorji/go/codec" - pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg3_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg1_runtime "k8s.io/kubernetes/pkg/runtime" - pkg4_types "k8s.io/kubernetes/pkg/types" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" "reflect" "runtime" time "time" @@ -64,12 +62,10 @@ func init() { panic(err) } if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_unversioned.TypeMeta - var v1 pkg3_v1.ObjectMeta - var v2 pkg1_runtime.RawExtension - var v3 pkg4_types.UID - var v4 time.Time - _, _, _, _, _ = v0, v1, v2, v3, v4 + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 } } @@ -87,17 +83,16 @@ func (x *PolicyRule) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool + var yyq2 [5]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[1] = true - yyq2[2] = len(x.APIGroups) != 0 - yyq2[3] = len(x.Resources) != 0 - yyq2[4] = len(x.ResourceNames) != 0 - yyq2[5] = len(x.NonResourceURLs) != 0 + yyq2[1] = len(x.APIGroups) != 0 + yyq2[2] = len(x.Resources) != 0 + yyq2[3] = len(x.ResourceNames) != 0 + yyq2[4] = len(x.NonResourceURLs) != 0 var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) + r.EncodeArrayStart(5) } else { yynn2 = 1 for _, b := range yyq2 { @@ -138,44 +133,11 @@ func (x *PolicyRule) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[1] { - yy7 := &x.AttributeRestrictions - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("attributeRestrictions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.AttributeRestrictions - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { if x.APIGroups == nil { r.EncodeNil() } else { - yym12 := z.EncBinary() - _ = yym12 + yym7 := z.EncBinary() + _ = yym7 if false { } else { z.F.EncSliceStringV(x.APIGroups, false, e) @@ -185,15 +147,15 @@ func (x *PolicyRule) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2[2] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiGroups")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.APIGroups == nil { r.EncodeNil() } else { - yym13 := z.EncBinary() - _ = yym13 + yym8 := z.EncBinary() + _ = yym8 if false { } else { z.F.EncSliceStringV(x.APIGroups, false, e) @@ -203,12 +165,12 @@ func (x *PolicyRule) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { + if yyq2[2] { if x.Resources == nil { r.EncodeNil() } else { - yym15 := z.EncBinary() - _ = yym15 + yym10 := z.EncBinary() + _ = yym10 if false { } else { z.F.EncSliceStringV(x.Resources, false, e) @@ -218,15 +180,15 @@ func (x *PolicyRule) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2[3] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("resources")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.Resources == nil { r.EncodeNil() } else { - yym16 := z.EncBinary() - _ = yym16 + yym11 := z.EncBinary() + _ = yym11 if false { } else { z.F.EncSliceStringV(x.Resources, false, e) @@ -236,12 +198,12 @@ func (x *PolicyRule) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { + if yyq2[3] { if x.ResourceNames == nil { r.EncodeNil() } else { - yym18 := z.EncBinary() - _ = yym18 + yym13 := z.EncBinary() + _ = yym13 if false { } else { z.F.EncSliceStringV(x.ResourceNames, false, e) @@ -251,15 +213,15 @@ func (x *PolicyRule) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2[4] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("resourceNames")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.ResourceNames == nil { r.EncodeNil() } else { - yym19 := z.EncBinary() - _ = yym19 + yym14 := z.EncBinary() + _ = yym14 if false { } else { z.F.EncSliceStringV(x.ResourceNames, false, e) @@ -269,12 +231,12 @@ func (x *PolicyRule) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { + if yyq2[4] { if x.NonResourceURLs == nil { r.EncodeNil() } else { - yym21 := z.EncBinary() - _ = yym21 + yym16 := z.EncBinary() + _ = yym16 if false { } else { z.F.EncSliceStringV(x.NonResourceURLs, false, e) @@ -284,15 +246,15 @@ func (x *PolicyRule) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2[5] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nonResourceURLs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.NonResourceURLs == nil { r.EncodeNil() } else { - yym22 := z.EncBinary() - _ = yym22 + yym17 := z.EncBinary() + _ = yym17 if false { } else { z.F.EncSliceStringV(x.NonResourceURLs, false, e) @@ -313,25 +275,25 @@ func (x *PolicyRule) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym23 := z.DecBinary() - _ = yym23 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct24 := r.ContainerType() - if yyct24 == codecSelferValueTypeMap1234 { - yyl24 := r.ReadMapStart() - if yyl24 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl24, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct24 == codecSelferValueTypeArray1234 { - yyl24 := r.ReadArrayStart() - if yyl24 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl24, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -343,12 +305,12 @@ func (x *PolicyRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys25Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys25Slc - var yyhl25 bool = l >= 0 - for yyj25 := 0; ; yyj25++ { - if yyhl25 { - if yyj25 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -357,89 +319,74 @@ func (x *PolicyRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys25Slc = r.DecodeBytes(yys25Slc, true, true) - yys25 := string(yys25Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys25 { + switch yys3 { case "verbs": if r.TryDecodeAsNil() { x.Verbs = nil } else { - yyv26 := &x.Verbs - yym27 := z.DecBinary() - _ = yym27 + yyv4 := &x.Verbs + yym5 := z.DecBinary() + _ = yym5 if false { } else { - z.F.DecSliceStringX(yyv26, false, d) - } - } - case "attributeRestrictions": - if r.TryDecodeAsNil() { - x.AttributeRestrictions = pkg1_runtime.RawExtension{} - } else { - yyv28 := &x.AttributeRestrictions - yym29 := z.DecBinary() - _ = yym29 - if false { - } else if z.HasExtensions() && z.DecExt(yyv28) { - } else if !yym29 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv28) - } else { - z.DecFallback(yyv28, false) + z.F.DecSliceStringX(yyv4, false, d) } } case "apiGroups": if r.TryDecodeAsNil() { x.APIGroups = nil } else { - yyv30 := &x.APIGroups - yym31 := z.DecBinary() - _ = yym31 + yyv6 := &x.APIGroups + yym7 := z.DecBinary() + _ = yym7 if false { } else { - z.F.DecSliceStringX(yyv30, false, d) + z.F.DecSliceStringX(yyv6, false, d) } } case "resources": if r.TryDecodeAsNil() { x.Resources = nil } else { - yyv32 := &x.Resources - yym33 := z.DecBinary() - _ = yym33 + yyv8 := &x.Resources + yym9 := z.DecBinary() + _ = yym9 if false { } else { - z.F.DecSliceStringX(yyv32, false, d) + z.F.DecSliceStringX(yyv8, false, d) } } case "resourceNames": if r.TryDecodeAsNil() { x.ResourceNames = nil } else { - yyv34 := &x.ResourceNames - yym35 := z.DecBinary() - _ = yym35 + yyv10 := &x.ResourceNames + yym11 := z.DecBinary() + _ = yym11 if false { } else { - z.F.DecSliceStringX(yyv34, false, d) + z.F.DecSliceStringX(yyv10, false, d) } } case "nonResourceURLs": if r.TryDecodeAsNil() { x.NonResourceURLs = nil } else { - yyv36 := &x.NonResourceURLs - yym37 := z.DecBinary() - _ = yym37 + yyv12 := &x.NonResourceURLs + yym13 := z.DecBinary() + _ = yym13 if false { } else { - z.F.DecSliceStringX(yyv36, false, d) + z.F.DecSliceStringX(yyv12, false, d) } } default: - z.DecStructFieldNotFound(-1, yys25) - } // end switch yys25 - } // end for yyj25 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -447,16 +394,16 @@ func (x *PolicyRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj38 int - var yyb38 bool - var yyhl38 bool = l >= 0 - yyj38++ - if yyhl38 { - yyb38 = yyj38 > l + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb38 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb38 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -464,46 +411,21 @@ func (x *PolicyRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Verbs = nil } else { - yyv39 := &x.Verbs - yym40 := z.DecBinary() - _ = yym40 - if false { - } else { - z.F.DecSliceStringX(yyv39, false, d) - } - } - yyj38++ - if yyhl38 { - yyb38 = yyj38 > l - } else { - yyb38 = r.CheckBreak() - } - if yyb38 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AttributeRestrictions = pkg1_runtime.RawExtension{} - } else { - yyv41 := &x.AttributeRestrictions - yym42 := z.DecBinary() - _ = yym42 + yyv15 := &x.Verbs + yym16 := z.DecBinary() + _ = yym16 if false { - } else if z.HasExtensions() && z.DecExt(yyv41) { - } else if !yym42 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv41) } else { - z.DecFallback(yyv41, false) + z.F.DecSliceStringX(yyv15, false, d) } } - yyj38++ - if yyhl38 { - yyb38 = yyj38 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb38 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb38 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -511,21 +433,21 @@ func (x *PolicyRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIGroups = nil } else { - yyv43 := &x.APIGroups - yym44 := z.DecBinary() - _ = yym44 + yyv17 := &x.APIGroups + yym18 := z.DecBinary() + _ = yym18 if false { } else { - z.F.DecSliceStringX(yyv43, false, d) + z.F.DecSliceStringX(yyv17, false, d) } } - yyj38++ - if yyhl38 { - yyb38 = yyj38 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb38 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb38 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -533,21 +455,21 @@ func (x *PolicyRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Resources = nil } else { - yyv45 := &x.Resources - yym46 := z.DecBinary() - _ = yym46 + yyv19 := &x.Resources + yym20 := z.DecBinary() + _ = yym20 if false { } else { - z.F.DecSliceStringX(yyv45, false, d) + z.F.DecSliceStringX(yyv19, false, d) } } - yyj38++ - if yyhl38 { - yyb38 = yyj38 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb38 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb38 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -555,21 +477,21 @@ func (x *PolicyRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ResourceNames = nil } else { - yyv47 := &x.ResourceNames - yym48 := z.DecBinary() - _ = yym48 + yyv21 := &x.ResourceNames + yym22 := z.DecBinary() + _ = yym22 if false { } else { - z.F.DecSliceStringX(yyv47, false, d) + z.F.DecSliceStringX(yyv21, false, d) } } - yyj38++ - if yyhl38 { - yyb38 = yyj38 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb38 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb38 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -577,26 +499,26 @@ func (x *PolicyRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.NonResourceURLs = nil } else { - yyv49 := &x.NonResourceURLs - yym50 := z.DecBinary() - _ = yym50 + yyv23 := &x.NonResourceURLs + yym24 := z.DecBinary() + _ = yym24 if false { } else { - z.F.DecSliceStringX(yyv49, false, d) + z.F.DecSliceStringX(yyv23, false, d) } } for { - yyj38++ - if yyhl38 { - yyb38 = yyj38 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb38 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb38 { + if yyb14 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj38-1, "") + z.DecStructFieldNotFound(yyj14-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -608,35 +530,35 @@ func (x *Subject) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym51 := z.EncBinary() - _ = yym51 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep52 := !z.EncBinary() - yy2arr52 := z.EncBasicHandle().StructToArray - var yyq52 [4]bool - _, _, _ = yysep52, yyq52, yy2arr52 - const yyr52 bool = false - yyq52[1] = x.APIVersion != "" - yyq52[3] = x.Namespace != "" - var yynn52 int - if yyr52 || yy2arr52 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.APIVersion != "" + yyq2[3] = x.Namespace != "" + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn52 = 2 - for _, b := range yyq52 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn52++ + yynn2++ } } - r.EncodeMapStart(yynn52) - yynn52 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr52 || yy2arr52 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym54 := z.EncBinary() - _ = yym54 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -645,18 +567,18 @@ func (x *Subject) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym55 := z.EncBinary() - _ = yym55 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } - if yyr52 || yy2arr52 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq52[1] { - yym57 := z.EncBinary() - _ = yym57 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -665,22 +587,22 @@ func (x *Subject) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq52[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym58 := z.EncBinary() - _ = yym58 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr52 || yy2arr52 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym60 := z.EncBinary() - _ = yym60 + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) @@ -689,18 +611,18 @@ func (x *Subject) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("name")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym61 := z.EncBinary() - _ = yym61 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } - if yyr52 || yy2arr52 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq52[3] { - yym63 := z.EncBinary() - _ = yym63 + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) @@ -709,19 +631,19 @@ func (x *Subject) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq52[3] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("namespace")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym64 := z.EncBinary() - _ = yym64 + yym14 := z.EncBinary() + _ = yym14 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) } } } - if yyr52 || yy2arr52 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -734,25 +656,25 @@ func (x *Subject) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym65 := z.DecBinary() - _ = yym65 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct66 := r.ContainerType() - if yyct66 == codecSelferValueTypeMap1234 { - yyl66 := r.ReadMapStart() - if yyl66 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl66, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct66 == codecSelferValueTypeArray1234 { - yyl66 := r.ReadArrayStart() - if yyl66 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl66, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -764,12 +686,12 @@ func (x *Subject) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys67Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys67Slc - var yyhl67 bool = l >= 0 - for yyj67 := 0; ; yyj67++ { - if yyhl67 { - if yyj67 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -778,38 +700,62 @@ func (x *Subject) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys67Slc = r.DecodeBytes(yys67Slc, true, true) - yys67 := string(yys67Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys67 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "name": if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } case "namespace": if r.TryDecodeAsNil() { x.Namespace = "" } else { - x.Namespace = string(r.DecodeString()) + yyv10 := &x.Namespace + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys67) - } // end switch yys67 - } // end for yyj67 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -817,16 +763,16 @@ func (x *Subject) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj72 int - var yyb72 bool - var yyhl72 bool = l >= 0 - yyj72++ - if yyhl72 { - yyb72 = yyj72 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb72 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb72 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -834,15 +780,21 @@ func (x *Subject) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj72++ - if yyhl72 { - yyb72 = yyj72 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb72 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb72 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -850,15 +802,21 @@ func (x *Subject) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj72++ - if yyhl72 { - yyb72 = yyj72 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb72 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb72 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -866,15 +824,21 @@ func (x *Subject) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv17 := &x.Name + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } } - yyj72++ - if yyhl72 { - yyb72 = yyj72 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb72 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb72 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -882,20 +846,26 @@ func (x *Subject) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Namespace = "" } else { - x.Namespace = string(r.DecodeString()) + yyv19 := &x.Namespace + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } } for { - yyj72++ - if yyhl72 { - yyb72 = yyj72 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb72 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb72 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj72-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -907,33 +877,33 @@ func (x *RoleRef) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym77 := z.EncBinary() - _ = yym77 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep78 := !z.EncBinary() - yy2arr78 := z.EncBasicHandle().StructToArray - var yyq78 [3]bool - _, _, _ = yysep78, yyq78, yy2arr78 - const yyr78 bool = false - var yynn78 int - if yyr78 || yy2arr78 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn78 = 3 - for _, b := range yyq78 { + yynn2 = 3 + for _, b := range yyq2 { if b { - yynn78++ + yynn2++ } } - r.EncodeMapStart(yynn78) - yynn78 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr78 || yy2arr78 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym80 := z.EncBinary() - _ = yym80 + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) @@ -942,17 +912,17 @@ func (x *RoleRef) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiGroup")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym81 := z.EncBinary() - _ = yym81 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) } } - if yyr78 || yy2arr78 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym83 := z.EncBinary() - _ = yym83 + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -961,17 +931,17 @@ func (x *RoleRef) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym84 := z.EncBinary() - _ = yym84 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } - if yyr78 || yy2arr78 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym86 := z.EncBinary() - _ = yym86 + yym10 := z.EncBinary() + _ = yym10 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) @@ -980,14 +950,14 @@ func (x *RoleRef) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("name")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym87 := z.EncBinary() - _ = yym87 + yym11 := z.EncBinary() + _ = yym11 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } - if yyr78 || yy2arr78 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -1000,25 +970,25 @@ func (x *RoleRef) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym88 := z.DecBinary() - _ = yym88 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct89 := r.ContainerType() - if yyct89 == codecSelferValueTypeMap1234 { - yyl89 := r.ReadMapStart() - if yyl89 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl89, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct89 == codecSelferValueTypeArray1234 { - yyl89 := r.ReadArrayStart() - if yyl89 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl89, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -1030,12 +1000,12 @@ func (x *RoleRef) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys90Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys90Slc - var yyhl90 bool = l >= 0 - for yyj90 := 0; ; yyj90++ { - if yyhl90 { - if yyj90 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -1044,32 +1014,50 @@ func (x *RoleRef) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys90Slc = r.DecodeBytes(yys90Slc, true, true) - yys90 := string(yys90Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys90 { + switch yys3 { case "apiGroup": if r.TryDecodeAsNil() { x.APIGroup = "" } else { - x.APIGroup = string(r.DecodeString()) + yyv4 := &x.APIGroup + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv6 := &x.Kind + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "name": if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } } default: - z.DecStructFieldNotFound(-1, yys90) - } // end switch yys90 - } // end for yyj90 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -1077,16 +1065,16 @@ func (x *RoleRef) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj94 int - var yyb94 bool - var yyhl94 bool = l >= 0 - yyj94++ - if yyhl94 { - yyb94 = yyj94 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb94 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb94 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1094,15 +1082,21 @@ func (x *RoleRef) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIGroup = "" } else { - x.APIGroup = string(r.DecodeString()) + yyv11 := &x.APIGroup + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } } - yyj94++ - if yyhl94 { - yyb94 = yyj94 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb94 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb94 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1110,15 +1104,21 @@ func (x *RoleRef) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj94++ - if yyhl94 { - yyb94 = yyj94 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb94 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb94 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1126,20 +1126,26 @@ func (x *RoleRef) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Name = "" } else { - x.Name = string(r.DecodeString()) + yyv15 := &x.Name + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } for { - yyj94++ - if yyhl94 { - yyb94 = yyj94 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb94 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb94 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj94-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -1151,37 +1157,37 @@ func (x *Role) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym98 := z.EncBinary() - _ = yym98 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep99 := !z.EncBinary() - yy2arr99 := z.EncBasicHandle().StructToArray - var yyq99 [4]bool - _, _, _ = yysep99, yyq99, yy2arr99 - const yyr99 bool = false - yyq99[0] = x.Kind != "" - yyq99[1] = x.APIVersion != "" - yyq99[2] = true - var yynn99 int - if yyr99 || yy2arr99 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn99 = 1 - for _, b := range yyq99 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn99++ + yynn2++ } } - r.EncodeMapStart(yynn99) - yynn99 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr99 || yy2arr99 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq99[0] { - yym101 := z.EncBinary() - _ = yym101 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -1190,23 +1196,23 @@ func (x *Role) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq99[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym102 := z.EncBinary() - _ = yym102 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr99 || yy2arr99 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq99[1] { - yym104 := z.EncBinary() - _ = yym104 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -1215,42 +1221,54 @@ func (x *Role) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq99[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym105 := z.EncBinary() - _ = yym105 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr99 || yy2arr99 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq99[2] { - yy107 := &x.ObjectMeta - yy107.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq99[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy108 := &x.ObjectMeta - yy108.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr99 || yy2arr99 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Rules == nil { r.EncodeNil() } else { - yym110 := z.EncBinary() - _ = yym110 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) @@ -1263,15 +1281,15 @@ func (x *Role) CodecEncodeSelf(e *codec1978.Encoder) { if x.Rules == nil { r.EncodeNil() } else { - yym111 := z.EncBinary() - _ = yym111 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) } } } - if yyr99 || yy2arr99 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -1284,25 +1302,25 @@ func (x *Role) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym112 := z.DecBinary() - _ = yym112 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct113 := r.ContainerType() - if yyct113 == codecSelferValueTypeMap1234 { - yyl113 := r.ReadMapStart() - if yyl113 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl113, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct113 == codecSelferValueTypeArray1234 { - yyl113 := r.ReadArrayStart() - if yyl113 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl113, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -1314,12 +1332,12 @@ func (x *Role) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys114Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys114Slc - var yyhl114 bool = l >= 0 - for yyj114 := 0; ; yyj114++ { - if yyhl114 { - if yyj114 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -1328,45 +1346,63 @@ func (x *Role) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys114Slc = r.DecodeBytes(yys114Slc, true, true) - yys114 := string(yys114Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys114 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv117 := &x.ObjectMeta - yyv117.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "rules": if r.TryDecodeAsNil() { x.Rules = nil } else { - yyv118 := &x.Rules - yym119 := z.DecBinary() - _ = yym119 + yyv10 := &x.Rules + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSlicePolicyRule((*[]PolicyRule)(yyv118), d) + h.decSlicePolicyRule((*[]PolicyRule)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys114) - } // end switch yys114 - } // end for yyj114 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -1374,16 +1410,16 @@ func (x *Role) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj120 int - var yyb120 bool - var yyhl120 bool = l >= 0 - yyj120++ - if yyhl120 { - yyb120 = yyj120 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb120 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb120 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1391,15 +1427,21 @@ func (x *Role) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj120++ - if yyhl120 { - yyb120 = yyj120 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb120 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb120 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1407,32 +1449,44 @@ func (x *Role) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj120++ - if yyhl120 { - yyb120 = yyj120 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb120 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb120 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv123 := &x.ObjectMeta - yyv123.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj120++ - if yyhl120 { - yyb120 = yyj120 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb120 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb120 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1440,26 +1494,26 @@ func (x *Role) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Rules = nil } else { - yyv124 := &x.Rules - yym125 := z.DecBinary() - _ = yym125 + yyv19 := &x.Rules + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSlicePolicyRule((*[]PolicyRule)(yyv124), d) + h.decSlicePolicyRule((*[]PolicyRule)(yyv19), d) } } for { - yyj120++ - if yyhl120 { - yyb120 = yyj120 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb120 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb120 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj120-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -1471,37 +1525,37 @@ func (x *RoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym126 := z.EncBinary() - _ = yym126 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep127 := !z.EncBinary() - yy2arr127 := z.EncBasicHandle().StructToArray - var yyq127 [5]bool - _, _, _ = yysep127, yyq127, yy2arr127 - const yyr127 bool = false - yyq127[0] = x.Kind != "" - yyq127[1] = x.APIVersion != "" - yyq127[2] = true - var yynn127 int - if yyr127 || yy2arr127 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn127 = 2 - for _, b := range yyq127 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn127++ + yynn2++ } } - r.EncodeMapStart(yynn127) - yynn127 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr127 || yy2arr127 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq127[0] { - yym129 := z.EncBinary() - _ = yym129 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -1510,23 +1564,23 @@ func (x *RoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq127[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym130 := z.EncBinary() - _ = yym130 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr127 || yy2arr127 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq127[1] { - yym132 := z.EncBinary() - _ = yym132 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -1535,42 +1589,54 @@ func (x *RoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq127[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym133 := z.EncBinary() - _ = yym133 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr127 || yy2arr127 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq127[2] { - yy135 := &x.ObjectMeta - yy135.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq127[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy136 := &x.ObjectMeta - yy136.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr127 || yy2arr127 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Subjects == nil { r.EncodeNil() } else { - yym138 := z.EncBinary() - _ = yym138 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceSubject(([]Subject)(x.Subjects), e) @@ -1583,26 +1649,26 @@ func (x *RoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { if x.Subjects == nil { r.EncodeNil() } else { - yym139 := z.EncBinary() - _ = yym139 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceSubject(([]Subject)(x.Subjects), e) } } } - if yyr127 || yy2arr127 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy141 := &x.RoleRef - yy141.CodecEncodeSelf(e) + yy18 := &x.RoleRef + yy18.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("roleRef")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy142 := &x.RoleRef - yy142.CodecEncodeSelf(e) + yy20 := &x.RoleRef + yy20.CodecEncodeSelf(e) } - if yyr127 || yy2arr127 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -1615,25 +1681,25 @@ func (x *RoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym143 := z.DecBinary() - _ = yym143 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct144 := r.ContainerType() - if yyct144 == codecSelferValueTypeMap1234 { - yyl144 := r.ReadMapStart() - if yyl144 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl144, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct144 == codecSelferValueTypeArray1234 { - yyl144 := r.ReadArrayStart() - if yyl144 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl144, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -1645,12 +1711,12 @@ func (x *RoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys145Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys145Slc - var yyhl145 bool = l >= 0 - for yyj145 := 0; ; yyj145++ { - if yyhl145 { - if yyj145 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -1659,52 +1725,70 @@ func (x *RoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys145Slc = r.DecodeBytes(yys145Slc, true, true) - yys145 := string(yys145Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys145 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv148 := &x.ObjectMeta - yyv148.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "subjects": if r.TryDecodeAsNil() { x.Subjects = nil } else { - yyv149 := &x.Subjects - yym150 := z.DecBinary() - _ = yym150 + yyv10 := &x.Subjects + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceSubject((*[]Subject)(yyv149), d) + h.decSliceSubject((*[]Subject)(yyv10), d) } } case "roleRef": if r.TryDecodeAsNil() { x.RoleRef = RoleRef{} } else { - yyv151 := &x.RoleRef - yyv151.CodecDecodeSelf(d) + yyv12 := &x.RoleRef + yyv12.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys145) - } // end switch yys145 - } // end for yyj145 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -1712,16 +1796,16 @@ func (x *RoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj152 int - var yyb152 bool - var yyhl152 bool = l >= 0 - yyj152++ - if yyhl152 { - yyb152 = yyj152 > l + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb152 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb152 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1729,15 +1813,21 @@ func (x *RoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv14 := &x.Kind + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } - yyj152++ - if yyhl152 { - yyb152 = yyj152 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb152 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb152 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1745,32 +1835,44 @@ func (x *RoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv16 := &x.APIVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } } - yyj152++ - if yyhl152 { - yyb152 = yyj152 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb152 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb152 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv155 := &x.ObjectMeta - yyv155.CodecDecodeSelf(d) + yyv18 := &x.ObjectMeta + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else { + z.DecFallback(yyv18, false) + } } - yyj152++ - if yyhl152 { - yyb152 = yyj152 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb152 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb152 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1778,21 +1880,21 @@ func (x *RoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Subjects = nil } else { - yyv156 := &x.Subjects - yym157 := z.DecBinary() - _ = yym157 + yyv20 := &x.Subjects + yym21 := z.DecBinary() + _ = yym21 if false { } else { - h.decSliceSubject((*[]Subject)(yyv156), d) + h.decSliceSubject((*[]Subject)(yyv20), d) } } - yyj152++ - if yyhl152 { - yyb152 = yyj152 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb152 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb152 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1800,21 +1902,21 @@ func (x *RoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.RoleRef = RoleRef{} } else { - yyv158 := &x.RoleRef - yyv158.CodecDecodeSelf(d) + yyv22 := &x.RoleRef + yyv22.CodecDecodeSelf(d) } for { - yyj152++ - if yyhl152 { - yyb152 = yyj152 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb152 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb152 { + if yyb13 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj152-1, "") + z.DecStructFieldNotFound(yyj13-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -1826,37 +1928,37 @@ func (x *RoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym159 := z.EncBinary() - _ = yym159 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep160 := !z.EncBinary() - yy2arr160 := z.EncBasicHandle().StructToArray - var yyq160 [4]bool - _, _, _ = yysep160, yyq160, yy2arr160 - const yyr160 bool = false - yyq160[0] = x.Kind != "" - yyq160[1] = x.APIVersion != "" - yyq160[2] = true - var yynn160 int - if yyr160 || yy2arr160 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn160 = 1 - for _, b := range yyq160 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn160++ + yynn2++ } } - r.EncodeMapStart(yynn160) - yynn160 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr160 || yy2arr160 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq160[0] { - yym162 := z.EncBinary() - _ = yym162 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -1865,23 +1967,23 @@ func (x *RoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq160[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym163 := z.EncBinary() - _ = yym163 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr160 || yy2arr160 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq160[1] { - yym165 := z.EncBinary() - _ = yym165 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -1890,54 +1992,54 @@ func (x *RoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq160[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym166 := z.EncBinary() - _ = yym166 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr160 || yy2arr160 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq160[2] { - yy168 := &x.ListMeta - yym169 := z.EncBinary() - _ = yym169 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy168) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy168) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq160[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy170 := &x.ListMeta - yym171 := z.EncBinary() - _ = yym171 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy170) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy170) + z.EncFallback(yy12) } } } - if yyr160 || yy2arr160 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym173 := z.EncBinary() - _ = yym173 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) @@ -1950,15 +2052,15 @@ func (x *RoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym174 := z.EncBinary() - _ = yym174 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) } } } - if yyr160 || yy2arr160 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -1971,25 +2073,25 @@ func (x *RoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym175 := z.DecBinary() - _ = yym175 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct176 := r.ContainerType() - if yyct176 == codecSelferValueTypeMap1234 { - yyl176 := r.ReadMapStart() - if yyl176 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl176, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct176 == codecSelferValueTypeArray1234 { - yyl176 := r.ReadArrayStart() - if yyl176 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl176, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -2001,12 +2103,12 @@ func (x *RoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys177Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys177Slc - var yyhl177 bool = l >= 0 - for yyj177 := 0; ; yyj177++ { - if yyhl177 { - if yyj177 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -2015,51 +2117,63 @@ func (x *RoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys177Slc = r.DecodeBytes(yys177Slc, true, true) - yys177 := string(yys177Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys177 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv180 := &x.ListMeta - yym181 := z.DecBinary() - _ = yym181 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv180) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv180, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv182 := &x.Items - yym183 := z.DecBinary() - _ = yym183 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceRoleBinding((*[]RoleBinding)(yyv182), d) + h.decSliceRoleBinding((*[]RoleBinding)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys177) - } // end switch yys177 - } // end for yyj177 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -2067,16 +2181,16 @@ func (x *RoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj184 int - var yyb184 bool - var yyhl184 bool = l >= 0 - yyj184++ - if yyhl184 { - yyb184 = yyj184 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb184 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb184 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2084,15 +2198,21 @@ func (x *RoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj184++ - if yyhl184 { - yyb184 = yyj184 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb184 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb184 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2100,38 +2220,44 @@ func (x *RoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj184++ - if yyhl184 { - yyb184 = yyj184 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb184 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb184 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv187 := &x.ListMeta - yym188 := z.DecBinary() - _ = yym188 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv187) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv187, false) + z.DecFallback(yyv17, false) } } - yyj184++ - if yyhl184 { - yyb184 = yyj184 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb184 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb184 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2139,26 +2265,26 @@ func (x *RoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Items = nil } else { - yyv189 := &x.Items - yym190 := z.DecBinary() - _ = yym190 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceRoleBinding((*[]RoleBinding)(yyv189), d) + h.decSliceRoleBinding((*[]RoleBinding)(yyv19), d) } } for { - yyj184++ - if yyhl184 { - yyb184 = yyj184 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb184 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb184 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj184-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -2170,37 +2296,37 @@ func (x *RoleList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym191 := z.EncBinary() - _ = yym191 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep192 := !z.EncBinary() - yy2arr192 := z.EncBasicHandle().StructToArray - var yyq192 [4]bool - _, _, _ = yysep192, yyq192, yy2arr192 - const yyr192 bool = false - yyq192[0] = x.Kind != "" - yyq192[1] = x.APIVersion != "" - yyq192[2] = true - var yynn192 int - if yyr192 || yy2arr192 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn192 = 1 - for _, b := range yyq192 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn192++ + yynn2++ } } - r.EncodeMapStart(yynn192) - yynn192 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr192 || yy2arr192 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq192[0] { - yym194 := z.EncBinary() - _ = yym194 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -2209,23 +2335,23 @@ func (x *RoleList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq192[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym195 := z.EncBinary() - _ = yym195 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr192 || yy2arr192 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq192[1] { - yym197 := z.EncBinary() - _ = yym197 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -2234,54 +2360,54 @@ func (x *RoleList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq192[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym198 := z.EncBinary() - _ = yym198 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr192 || yy2arr192 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq192[2] { - yy200 := &x.ListMeta - yym201 := z.EncBinary() - _ = yym201 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy200) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy200) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq192[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy202 := &x.ListMeta - yym203 := z.EncBinary() - _ = yym203 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy202) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy202) + z.EncFallback(yy12) } } } - if yyr192 || yy2arr192 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym205 := z.EncBinary() - _ = yym205 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceRole(([]Role)(x.Items), e) @@ -2294,15 +2420,15 @@ func (x *RoleList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym206 := z.EncBinary() - _ = yym206 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceRole(([]Role)(x.Items), e) } } } - if yyr192 || yy2arr192 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -2315,25 +2441,25 @@ func (x *RoleList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym207 := z.DecBinary() - _ = yym207 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct208 := r.ContainerType() - if yyct208 == codecSelferValueTypeMap1234 { - yyl208 := r.ReadMapStart() - if yyl208 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl208, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct208 == codecSelferValueTypeArray1234 { - yyl208 := r.ReadArrayStart() - if yyl208 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl208, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -2345,12 +2471,12 @@ func (x *RoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys209Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys209Slc - var yyhl209 bool = l >= 0 - for yyj209 := 0; ; yyj209++ { - if yyhl209 { - if yyj209 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -2359,51 +2485,63 @@ func (x *RoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys209Slc = r.DecodeBytes(yys209Slc, true, true) - yys209 := string(yys209Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys209 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv212 := &x.ListMeta - yym213 := z.DecBinary() - _ = yym213 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv212) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv212, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv214 := &x.Items - yym215 := z.DecBinary() - _ = yym215 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceRole((*[]Role)(yyv214), d) + h.decSliceRole((*[]Role)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys209) - } // end switch yys209 - } // end for yyj209 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -2411,16 +2549,16 @@ func (x *RoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj216 int - var yyb216 bool - var yyhl216 bool = l >= 0 - yyj216++ - if yyhl216 { - yyb216 = yyj216 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb216 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb216 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2428,15 +2566,21 @@ func (x *RoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj216++ - if yyhl216 { - yyb216 = yyj216 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb216 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb216 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2444,38 +2588,44 @@ func (x *RoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj216++ - if yyhl216 { - yyb216 = yyj216 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb216 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb216 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv219 := &x.ListMeta - yym220 := z.DecBinary() - _ = yym220 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv219) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv219, false) + z.DecFallback(yyv17, false) } } - yyj216++ - if yyhl216 { - yyb216 = yyj216 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb216 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb216 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2483,26 +2633,26 @@ func (x *RoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Items = nil } else { - yyv221 := &x.Items - yym222 := z.DecBinary() - _ = yym222 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceRole((*[]Role)(yyv221), d) + h.decSliceRole((*[]Role)(yyv19), d) } } for { - yyj216++ - if yyhl216 { - yyb216 = yyj216 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb216 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb216 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj216-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -2514,37 +2664,37 @@ func (x *ClusterRole) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym223 := z.EncBinary() - _ = yym223 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep224 := !z.EncBinary() - yy2arr224 := z.EncBasicHandle().StructToArray - var yyq224 [4]bool - _, _, _ = yysep224, yyq224, yy2arr224 - const yyr224 bool = false - yyq224[0] = x.Kind != "" - yyq224[1] = x.APIVersion != "" - yyq224[2] = true - var yynn224 int - if yyr224 || yy2arr224 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn224 = 1 - for _, b := range yyq224 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn224++ + yynn2++ } } - r.EncodeMapStart(yynn224) - yynn224 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr224 || yy2arr224 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq224[0] { - yym226 := z.EncBinary() - _ = yym226 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -2553,23 +2703,23 @@ func (x *ClusterRole) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq224[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym227 := z.EncBinary() - _ = yym227 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr224 || yy2arr224 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq224[1] { - yym229 := z.EncBinary() - _ = yym229 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -2578,42 +2728,54 @@ func (x *ClusterRole) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq224[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym230 := z.EncBinary() - _ = yym230 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr224 || yy2arr224 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq224[2] { - yy232 := &x.ObjectMeta - yy232.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq224[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy233 := &x.ObjectMeta - yy233.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr224 || yy2arr224 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Rules == nil { r.EncodeNil() } else { - yym235 := z.EncBinary() - _ = yym235 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) @@ -2626,15 +2788,15 @@ func (x *ClusterRole) CodecEncodeSelf(e *codec1978.Encoder) { if x.Rules == nil { r.EncodeNil() } else { - yym236 := z.EncBinary() - _ = yym236 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) } } } - if yyr224 || yy2arr224 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -2647,25 +2809,25 @@ func (x *ClusterRole) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym237 := z.DecBinary() - _ = yym237 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct238 := r.ContainerType() - if yyct238 == codecSelferValueTypeMap1234 { - yyl238 := r.ReadMapStart() - if yyl238 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl238, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct238 == codecSelferValueTypeArray1234 { - yyl238 := r.ReadArrayStart() - if yyl238 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl238, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -2677,12 +2839,12 @@ func (x *ClusterRole) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys239Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys239Slc - var yyhl239 bool = l >= 0 - for yyj239 := 0; ; yyj239++ { - if yyhl239 { - if yyj239 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -2691,45 +2853,63 @@ func (x *ClusterRole) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys239Slc = r.DecodeBytes(yys239Slc, true, true) - yys239 := string(yys239Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys239 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv242 := &x.ObjectMeta - yyv242.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "rules": if r.TryDecodeAsNil() { x.Rules = nil } else { - yyv243 := &x.Rules - yym244 := z.DecBinary() - _ = yym244 + yyv10 := &x.Rules + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSlicePolicyRule((*[]PolicyRule)(yyv243), d) + h.decSlicePolicyRule((*[]PolicyRule)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys239) - } // end switch yys239 - } // end for yyj239 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -2737,16 +2917,16 @@ func (x *ClusterRole) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj245 int - var yyb245 bool - var yyhl245 bool = l >= 0 - yyj245++ - if yyhl245 { - yyb245 = yyj245 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb245 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb245 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2754,15 +2934,21 @@ func (x *ClusterRole) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj245++ - if yyhl245 { - yyb245 = yyj245 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb245 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb245 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2770,32 +2956,44 @@ func (x *ClusterRole) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj245++ - if yyhl245 { - yyb245 = yyj245 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb245 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb245 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv248 := &x.ObjectMeta - yyv248.CodecDecodeSelf(d) + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } } - yyj245++ - if yyhl245 { - yyb245 = yyj245 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb245 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb245 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2803,26 +3001,26 @@ func (x *ClusterRole) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Rules = nil } else { - yyv249 := &x.Rules - yym250 := z.DecBinary() - _ = yym250 + yyv19 := &x.Rules + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSlicePolicyRule((*[]PolicyRule)(yyv249), d) + h.decSlicePolicyRule((*[]PolicyRule)(yyv19), d) } } for { - yyj245++ - if yyhl245 { - yyb245 = yyj245 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb245 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb245 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj245-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -2834,37 +3032,37 @@ func (x *ClusterRoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym251 := z.EncBinary() - _ = yym251 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep252 := !z.EncBinary() - yy2arr252 := z.EncBasicHandle().StructToArray - var yyq252 [5]bool - _, _, _ = yysep252, yyq252, yy2arr252 - const yyr252 bool = false - yyq252[0] = x.Kind != "" - yyq252[1] = x.APIVersion != "" - yyq252[2] = true - var yynn252 int - if yyr252 || yy2arr252 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn252 = 2 - for _, b := range yyq252 { + yynn2 = 2 + for _, b := range yyq2 { if b { - yynn252++ + yynn2++ } } - r.EncodeMapStart(yynn252) - yynn252 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr252 || yy2arr252 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq252[0] { - yym254 := z.EncBinary() - _ = yym254 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -2873,23 +3071,23 @@ func (x *ClusterRoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq252[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym255 := z.EncBinary() - _ = yym255 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr252 || yy2arr252 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq252[1] { - yym257 := z.EncBinary() - _ = yym257 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -2898,42 +3096,54 @@ func (x *ClusterRoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq252[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym258 := z.EncBinary() - _ = yym258 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr252 || yy2arr252 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq252[2] { - yy260 := &x.ObjectMeta - yy260.CodecEncodeSelf(e) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } } else { - if yyq252[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy261 := &x.ObjectMeta - yy261.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } - if yyr252 || yy2arr252 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Subjects == nil { r.EncodeNil() } else { - yym263 := z.EncBinary() - _ = yym263 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceSubject(([]Subject)(x.Subjects), e) @@ -2946,26 +3156,26 @@ func (x *ClusterRoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { if x.Subjects == nil { r.EncodeNil() } else { - yym264 := z.EncBinary() - _ = yym264 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceSubject(([]Subject)(x.Subjects), e) } } } - if yyr252 || yy2arr252 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy266 := &x.RoleRef - yy266.CodecEncodeSelf(e) + yy18 := &x.RoleRef + yy18.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("roleRef")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy267 := &x.RoleRef - yy267.CodecEncodeSelf(e) + yy20 := &x.RoleRef + yy20.CodecEncodeSelf(e) } - if yyr252 || yy2arr252 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -2978,25 +3188,25 @@ func (x *ClusterRoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym268 := z.DecBinary() - _ = yym268 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct269 := r.ContainerType() - if yyct269 == codecSelferValueTypeMap1234 { - yyl269 := r.ReadMapStart() - if yyl269 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl269, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct269 == codecSelferValueTypeArray1234 { - yyl269 := r.ReadArrayStart() - if yyl269 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl269, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -3008,12 +3218,12 @@ func (x *ClusterRoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys270Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys270Slc - var yyhl270 bool = l >= 0 - for yyj270 := 0; ; yyj270++ { - if yyhl270 { - if yyj270 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -3022,52 +3232,70 @@ func (x *ClusterRoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys270Slc = r.DecodeBytes(yys270Slc, true, true) - yys270 := string(yys270Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys270 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv273 := &x.ObjectMeta - yyv273.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "subjects": if r.TryDecodeAsNil() { x.Subjects = nil } else { - yyv274 := &x.Subjects - yym275 := z.DecBinary() - _ = yym275 + yyv10 := &x.Subjects + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceSubject((*[]Subject)(yyv274), d) + h.decSliceSubject((*[]Subject)(yyv10), d) } } case "roleRef": if r.TryDecodeAsNil() { x.RoleRef = RoleRef{} } else { - yyv276 := &x.RoleRef - yyv276.CodecDecodeSelf(d) + yyv12 := &x.RoleRef + yyv12.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys270) - } // end switch yys270 - } // end for yyj270 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -3075,16 +3303,16 @@ func (x *ClusterRoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj277 int - var yyb277 bool - var yyhl277 bool = l >= 0 - yyj277++ - if yyhl277 { - yyb277 = yyj277 > l + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb277 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb277 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3092,15 +3320,21 @@ func (x *ClusterRoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv14 := &x.Kind + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } } - yyj277++ - if yyhl277 { - yyb277 = yyj277 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb277 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb277 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3108,32 +3342,44 @@ func (x *ClusterRoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv16 := &x.APIVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } } - yyj277++ - if yyhl277 { - yyb277 = yyj277 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb277 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb277 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv280 := &x.ObjectMeta - yyv280.CodecDecodeSelf(d) + yyv18 := &x.ObjectMeta + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else { + z.DecFallback(yyv18, false) + } } - yyj277++ - if yyhl277 { - yyb277 = yyj277 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb277 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb277 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3141,21 +3387,21 @@ func (x *ClusterRoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.Subjects = nil } else { - yyv281 := &x.Subjects - yym282 := z.DecBinary() - _ = yym282 + yyv20 := &x.Subjects + yym21 := z.DecBinary() + _ = yym21 if false { } else { - h.decSliceSubject((*[]Subject)(yyv281), d) + h.decSliceSubject((*[]Subject)(yyv20), d) } } - yyj277++ - if yyhl277 { - yyb277 = yyj277 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb277 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb277 { + if yyb13 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3163,21 +3409,21 @@ func (x *ClusterRoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.RoleRef = RoleRef{} } else { - yyv283 := &x.RoleRef - yyv283.CodecDecodeSelf(d) + yyv22 := &x.RoleRef + yyv22.CodecDecodeSelf(d) } for { - yyj277++ - if yyhl277 { - yyb277 = yyj277 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb277 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb277 { + if yyb13 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj277-1, "") + z.DecStructFieldNotFound(yyj13-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -3189,37 +3435,37 @@ func (x *ClusterRoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym284 := z.EncBinary() - _ = yym284 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep285 := !z.EncBinary() - yy2arr285 := z.EncBasicHandle().StructToArray - var yyq285 [4]bool - _, _, _ = yysep285, yyq285, yy2arr285 - const yyr285 bool = false - yyq285[0] = x.Kind != "" - yyq285[1] = x.APIVersion != "" - yyq285[2] = true - var yynn285 int - if yyr285 || yy2arr285 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn285 = 1 - for _, b := range yyq285 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn285++ + yynn2++ } } - r.EncodeMapStart(yynn285) - yynn285 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr285 || yy2arr285 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq285[0] { - yym287 := z.EncBinary() - _ = yym287 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -3228,23 +3474,23 @@ func (x *ClusterRoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq285[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym288 := z.EncBinary() - _ = yym288 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr285 || yy2arr285 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq285[1] { - yym290 := z.EncBinary() - _ = yym290 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -3253,54 +3499,54 @@ func (x *ClusterRoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq285[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym291 := z.EncBinary() - _ = yym291 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr285 || yy2arr285 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq285[2] { - yy293 := &x.ListMeta - yym294 := z.EncBinary() - _ = yym294 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy293) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy293) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq285[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy295 := &x.ListMeta - yym296 := z.EncBinary() - _ = yym296 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy295) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy295) + z.EncFallback(yy12) } } } - if yyr285 || yy2arr285 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym298 := z.EncBinary() - _ = yym298 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) @@ -3313,15 +3559,15 @@ func (x *ClusterRoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym299 := z.EncBinary() - _ = yym299 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) } } } - if yyr285 || yy2arr285 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -3334,25 +3580,25 @@ func (x *ClusterRoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym300 := z.DecBinary() - _ = yym300 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct301 := r.ContainerType() - if yyct301 == codecSelferValueTypeMap1234 { - yyl301 := r.ReadMapStart() - if yyl301 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl301, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct301 == codecSelferValueTypeArray1234 { - yyl301 := r.ReadArrayStart() - if yyl301 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl301, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -3364,12 +3610,12 @@ func (x *ClusterRoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Deco var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys302Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys302Slc - var yyhl302 bool = l >= 0 - for yyj302 := 0; ; yyj302++ { - if yyhl302 { - if yyj302 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -3378,51 +3624,63 @@ func (x *ClusterRoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Deco } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys302Slc = r.DecodeBytes(yys302Slc, true, true) - yys302 := string(yys302Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys302 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv305 := &x.ListMeta - yym306 := z.DecBinary() - _ = yym306 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv305) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv305, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv307 := &x.Items - yym308 := z.DecBinary() - _ = yym308 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv307), d) + h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys302) - } // end switch yys302 - } // end for yyj302 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -3430,16 +3688,16 @@ func (x *ClusterRoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.De var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj309 int - var yyb309 bool - var yyhl309 bool = l >= 0 - yyj309++ - if yyhl309 { - yyb309 = yyj309 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb309 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb309 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3447,15 +3705,21 @@ func (x *ClusterRoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.De if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj309++ - if yyhl309 { - yyb309 = yyj309 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb309 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb309 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3463,38 +3727,44 @@ func (x *ClusterRoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.De if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj309++ - if yyhl309 { - yyb309 = yyj309 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb309 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb309 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv312 := &x.ListMeta - yym313 := z.DecBinary() - _ = yym313 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv312) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv312, false) + z.DecFallback(yyv17, false) } } - yyj309++ - if yyhl309 { - yyb309 = yyj309 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb309 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb309 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3502,26 +3772,26 @@ func (x *ClusterRoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.De if r.TryDecodeAsNil() { x.Items = nil } else { - yyv314 := &x.Items - yym315 := z.DecBinary() - _ = yym315 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv314), d) + h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv19), d) } } for { - yyj309++ - if yyhl309 { - yyb309 = yyj309 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb309 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb309 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj309-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -3533,37 +3803,37 @@ func (x *ClusterRoleList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym316 := z.EncBinary() - _ = yym316 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep317 := !z.EncBinary() - yy2arr317 := z.EncBasicHandle().StructToArray - var yyq317 [4]bool - _, _, _ = yysep317, yyq317, yy2arr317 - const yyr317 bool = false - yyq317[0] = x.Kind != "" - yyq317[1] = x.APIVersion != "" - yyq317[2] = true - var yynn317 int - if yyr317 || yy2arr317 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn317 = 1 - for _, b := range yyq317 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn317++ + yynn2++ } } - r.EncodeMapStart(yynn317) - yynn317 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr317 || yy2arr317 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq317[0] { - yym319 := z.EncBinary() - _ = yym319 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -3572,23 +3842,23 @@ func (x *ClusterRoleList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq317[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym320 := z.EncBinary() - _ = yym320 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr317 || yy2arr317 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq317[1] { - yym322 := z.EncBinary() - _ = yym322 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -3597,54 +3867,54 @@ func (x *ClusterRoleList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq317[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym323 := z.EncBinary() - _ = yym323 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr317 || yy2arr317 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq317[2] { - yy325 := &x.ListMeta - yym326 := z.EncBinary() - _ = yym326 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy325) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy325) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq317[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy327 := &x.ListMeta - yym328 := z.EncBinary() - _ = yym328 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy327) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy327) + z.EncFallback(yy12) } } } - if yyr317 || yy2arr317 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym330 := z.EncBinary() - _ = yym330 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceClusterRole(([]ClusterRole)(x.Items), e) @@ -3657,15 +3927,15 @@ func (x *ClusterRoleList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym331 := z.EncBinary() - _ = yym331 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceClusterRole(([]ClusterRole)(x.Items), e) } } } - if yyr317 || yy2arr317 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -3678,25 +3948,25 @@ func (x *ClusterRoleList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym332 := z.DecBinary() - _ = yym332 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct333 := r.ContainerType() - if yyct333 == codecSelferValueTypeMap1234 { - yyl333 := r.ReadMapStart() - if yyl333 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl333, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct333 == codecSelferValueTypeArray1234 { - yyl333 := r.ReadArrayStart() - if yyl333 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl333, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -3708,12 +3978,12 @@ func (x *ClusterRoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys334Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys334Slc - var yyhl334 bool = l >= 0 - for yyj334 := 0; ; yyj334++ { - if yyhl334 { - if yyj334 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -3722,51 +3992,63 @@ func (x *ClusterRoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys334Slc = r.DecodeBytes(yys334Slc, true, true) - yys334 := string(yys334Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys334 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv337 := &x.ListMeta - yym338 := z.DecBinary() - _ = yym338 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv337) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv337, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv339 := &x.Items - yym340 := z.DecBinary() - _ = yym340 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceClusterRole((*[]ClusterRole)(yyv339), d) + h.decSliceClusterRole((*[]ClusterRole)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys334) - } // end switch yys334 - } // end for yyj334 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -3774,16 +4056,16 @@ func (x *ClusterRoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj341 int - var yyb341 bool - var yyhl341 bool = l >= 0 - yyj341++ - if yyhl341 { - yyb341 = yyj341 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb341 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb341 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3791,15 +4073,21 @@ func (x *ClusterRoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj341++ - if yyhl341 { - yyb341 = yyj341 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb341 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb341 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3807,38 +4095,44 @@ func (x *ClusterRoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj341++ - if yyhl341 { - yyb341 = yyj341 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb341 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb341 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv344 := &x.ListMeta - yym345 := z.DecBinary() - _ = yym345 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv344) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv344, false) + z.DecFallback(yyv17, false) } } - yyj341++ - if yyhl341 { - yyb341 = yyj341 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb341 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb341 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3846,26 +4140,26 @@ func (x *ClusterRoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Items = nil } else { - yyv346 := &x.Items - yym347 := z.DecBinary() - _ = yym347 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceClusterRole((*[]ClusterRole)(yyv346), d) + h.decSliceClusterRole((*[]ClusterRole)(yyv19), d) } } for { - yyj341++ - if yyhl341 { - yyb341 = yyj341 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb341 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb341 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj341-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -3875,10 +4169,10 @@ func (x codecSelfer1234) encSlicePolicyRule(v []PolicyRule, e *codec1978.Encoder z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv348 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy349 := &yyv348 - yy349.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -3888,83 +4182,86 @@ func (x codecSelfer1234) decSlicePolicyRule(v *[]PolicyRule, d *codec1978.Decode z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv350 := *v - yyh350, yyl350 := z.DecSliceHelperStart() - var yyc350 bool - if yyl350 == 0 { - if yyv350 == nil { - yyv350 = []PolicyRule{} - yyc350 = true - } else if len(yyv350) != 0 { - yyv350 = yyv350[:0] - yyc350 = true - } - } else if yyl350 > 0 { - var yyrr350, yyrl350 int - var yyrt350 bool - if yyl350 > cap(yyv350) { + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PolicyRule{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg350 := len(yyv350) > 0 - yyv2350 := yyv350 - yyrl350, yyrt350 = z.DecInferLen(yyl350, z.DecBasicHandle().MaxInitLen, 160) - if yyrt350 { - if yyrl350 <= cap(yyv350) { - yyv350 = yyv350[:yyrl350] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 120) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv350 = make([]PolicyRule, yyrl350) + yyv1 = make([]PolicyRule, yyrl1) } } else { - yyv350 = make([]PolicyRule, yyrl350) + yyv1 = make([]PolicyRule, yyrl1) } - yyc350 = true - yyrr350 = len(yyv350) - if yyrg350 { - copy(yyv350, yyv2350) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl350 != len(yyv350) { - yyv350 = yyv350[:yyl350] - yyc350 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj350 := 0 - for ; yyj350 < yyrr350; yyj350++ { - yyh350.ElemContainerState(yyj350) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv350[yyj350] = PolicyRule{} + yyv1[yyj1] = PolicyRule{} } else { - yyv351 := &yyv350[yyj350] - yyv351.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt350 { - for ; yyj350 < yyl350; yyj350++ { - yyv350 = append(yyv350, PolicyRule{}) - yyh350.ElemContainerState(yyj350) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PolicyRule{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv350[yyj350] = PolicyRule{} + yyv1[yyj1] = PolicyRule{} } else { - yyv352 := &yyv350[yyj350] - yyv352.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj350 := 0 - for ; !r.CheckBreak(); yyj350++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj350 >= len(yyv350) { - yyv350 = append(yyv350, PolicyRule{}) // var yyz350 PolicyRule - yyc350 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PolicyRule{}) // var yyz1 PolicyRule + yyc1 = true } - yyh350.ElemContainerState(yyj350) - if yyj350 < len(yyv350) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv350[yyj350] = PolicyRule{} + yyv1[yyj1] = PolicyRule{} } else { - yyv353 := &yyv350[yyj350] - yyv353.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -3972,17 +4269,17 @@ func (x codecSelfer1234) decSlicePolicyRule(v *[]PolicyRule, d *codec1978.Decode } } - if yyj350 < len(yyv350) { - yyv350 = yyv350[:yyj350] - yyc350 = true - } else if yyj350 == 0 && yyv350 == nil { - yyv350 = []PolicyRule{} - yyc350 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PolicyRule{} + yyc1 = true } } - yyh350.End() - if yyc350 { - *v = yyv350 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -3991,10 +4288,10 @@ func (x codecSelfer1234) encSliceSubject(v []Subject, e *codec1978.Encoder) { z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv354 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy355 := &yyv354 - yy355.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -4004,83 +4301,86 @@ func (x codecSelfer1234) decSliceSubject(v *[]Subject, d *codec1978.Decoder) { z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv356 := *v - yyh356, yyl356 := z.DecSliceHelperStart() - var yyc356 bool - if yyl356 == 0 { - if yyv356 == nil { - yyv356 = []Subject{} - yyc356 = true - } else if len(yyv356) != 0 { - yyv356 = yyv356[:0] - yyc356 = true - } - } else if yyl356 > 0 { - var yyrr356, yyrl356 int - var yyrt356 bool - if yyl356 > cap(yyv356) { + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Subject{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg356 := len(yyv356) > 0 - yyv2356 := yyv356 - yyrl356, yyrt356 = z.DecInferLen(yyl356, z.DecBasicHandle().MaxInitLen, 64) - if yyrt356 { - if yyrl356 <= cap(yyv356) { - yyv356 = yyv356[:yyrl356] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv356 = make([]Subject, yyrl356) + yyv1 = make([]Subject, yyrl1) } } else { - yyv356 = make([]Subject, yyrl356) + yyv1 = make([]Subject, yyrl1) } - yyc356 = true - yyrr356 = len(yyv356) - if yyrg356 { - copy(yyv356, yyv2356) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl356 != len(yyv356) { - yyv356 = yyv356[:yyl356] - yyc356 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj356 := 0 - for ; yyj356 < yyrr356; yyj356++ { - yyh356.ElemContainerState(yyj356) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv356[yyj356] = Subject{} + yyv1[yyj1] = Subject{} } else { - yyv357 := &yyv356[yyj356] - yyv357.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt356 { - for ; yyj356 < yyl356; yyj356++ { - yyv356 = append(yyv356, Subject{}) - yyh356.ElemContainerState(yyj356) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Subject{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv356[yyj356] = Subject{} + yyv1[yyj1] = Subject{} } else { - yyv358 := &yyv356[yyj356] - yyv358.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj356 := 0 - for ; !r.CheckBreak(); yyj356++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj356 >= len(yyv356) { - yyv356 = append(yyv356, Subject{}) // var yyz356 Subject - yyc356 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Subject{}) // var yyz1 Subject + yyc1 = true } - yyh356.ElemContainerState(yyj356) - if yyj356 < len(yyv356) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv356[yyj356] = Subject{} + yyv1[yyj1] = Subject{} } else { - yyv359 := &yyv356[yyj356] - yyv359.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -4088,17 +4388,17 @@ func (x codecSelfer1234) decSliceSubject(v *[]Subject, d *codec1978.Decoder) { } } - if yyj356 < len(yyv356) { - yyv356 = yyv356[:yyj356] - yyc356 = true - } else if yyj356 == 0 && yyv356 == nil { - yyv356 = []Subject{} - yyc356 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Subject{} + yyc1 = true } } - yyh356.End() - if yyc356 { - *v = yyv356 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -4107,10 +4407,10 @@ func (x codecSelfer1234) encSliceRoleBinding(v []RoleBinding, e *codec1978.Encod z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv360 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy361 := &yyv360 - yy361.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -4120,83 +4420,86 @@ func (x codecSelfer1234) decSliceRoleBinding(v *[]RoleBinding, d *codec1978.Deco z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv362 := *v - yyh362, yyl362 := z.DecSliceHelperStart() - var yyc362 bool - if yyl362 == 0 { - if yyv362 == nil { - yyv362 = []RoleBinding{} - yyc362 = true - } else if len(yyv362) != 0 { - yyv362 = yyv362[:0] - yyc362 = true - } - } else if yyl362 > 0 { - var yyrr362, yyrl362 int - var yyrt362 bool - if yyl362 > cap(yyv362) { + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []RoleBinding{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg362 := len(yyv362) > 0 - yyv2362 := yyv362 - yyrl362, yyrt362 = z.DecInferLen(yyl362, z.DecBasicHandle().MaxInitLen, 328) - if yyrt362 { - if yyrl362 <= cap(yyv362) { - yyv362 = yyv362[:yyrl362] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv362 = make([]RoleBinding, yyrl362) + yyv1 = make([]RoleBinding, yyrl1) } } else { - yyv362 = make([]RoleBinding, yyrl362) + yyv1 = make([]RoleBinding, yyrl1) } - yyc362 = true - yyrr362 = len(yyv362) - if yyrg362 { - copy(yyv362, yyv2362) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl362 != len(yyv362) { - yyv362 = yyv362[:yyl362] - yyc362 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj362 := 0 - for ; yyj362 < yyrr362; yyj362++ { - yyh362.ElemContainerState(yyj362) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv362[yyj362] = RoleBinding{} + yyv1[yyj1] = RoleBinding{} } else { - yyv363 := &yyv362[yyj362] - yyv363.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt362 { - for ; yyj362 < yyl362; yyj362++ { - yyv362 = append(yyv362, RoleBinding{}) - yyh362.ElemContainerState(yyj362) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, RoleBinding{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv362[yyj362] = RoleBinding{} + yyv1[yyj1] = RoleBinding{} } else { - yyv364 := &yyv362[yyj362] - yyv364.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj362 := 0 - for ; !r.CheckBreak(); yyj362++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj362 >= len(yyv362) { - yyv362 = append(yyv362, RoleBinding{}) // var yyz362 RoleBinding - yyc362 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, RoleBinding{}) // var yyz1 RoleBinding + yyc1 = true } - yyh362.ElemContainerState(yyj362) - if yyj362 < len(yyv362) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv362[yyj362] = RoleBinding{} + yyv1[yyj1] = RoleBinding{} } else { - yyv365 := &yyv362[yyj362] - yyv365.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -4204,17 +4507,17 @@ func (x codecSelfer1234) decSliceRoleBinding(v *[]RoleBinding, d *codec1978.Deco } } - if yyj362 < len(yyv362) { - yyv362 = yyv362[:yyj362] - yyc362 = true - } else if yyj362 == 0 && yyv362 == nil { - yyv362 = []RoleBinding{} - yyc362 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []RoleBinding{} + yyc1 = true } } - yyh362.End() - if yyc362 { - *v = yyv362 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -4223,10 +4526,10 @@ func (x codecSelfer1234) encSliceRole(v []Role, e *codec1978.Encoder) { z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv366 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy367 := &yyv366 - yy367.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -4236,83 +4539,86 @@ func (x codecSelfer1234) decSliceRole(v *[]Role, d *codec1978.Decoder) { z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv368 := *v - yyh368, yyl368 := z.DecSliceHelperStart() - var yyc368 bool - if yyl368 == 0 { - if yyv368 == nil { - yyv368 = []Role{} - yyc368 = true - } else if len(yyv368) != 0 { - yyv368 = yyv368[:0] - yyc368 = true - } - } else if yyl368 > 0 { - var yyrr368, yyrl368 int - var yyrt368 bool - if yyl368 > cap(yyv368) { + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Role{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg368 := len(yyv368) > 0 - yyv2368 := yyv368 - yyrl368, yyrt368 = z.DecInferLen(yyl368, z.DecBasicHandle().MaxInitLen, 280) - if yyrt368 { - if yyrl368 <= cap(yyv368) { - yyv368 = yyv368[:yyrl368] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv368 = make([]Role, yyrl368) + yyv1 = make([]Role, yyrl1) } } else { - yyv368 = make([]Role, yyrl368) + yyv1 = make([]Role, yyrl1) } - yyc368 = true - yyrr368 = len(yyv368) - if yyrg368 { - copy(yyv368, yyv2368) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl368 != len(yyv368) { - yyv368 = yyv368[:yyl368] - yyc368 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj368 := 0 - for ; yyj368 < yyrr368; yyj368++ { - yyh368.ElemContainerState(yyj368) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv368[yyj368] = Role{} + yyv1[yyj1] = Role{} } else { - yyv369 := &yyv368[yyj368] - yyv369.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt368 { - for ; yyj368 < yyl368; yyj368++ { - yyv368 = append(yyv368, Role{}) - yyh368.ElemContainerState(yyj368) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Role{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv368[yyj368] = Role{} + yyv1[yyj1] = Role{} } else { - yyv370 := &yyv368[yyj368] - yyv370.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj368 := 0 - for ; !r.CheckBreak(); yyj368++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj368 >= len(yyv368) { - yyv368 = append(yyv368, Role{}) // var yyz368 Role - yyc368 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Role{}) // var yyz1 Role + yyc1 = true } - yyh368.ElemContainerState(yyj368) - if yyj368 < len(yyv368) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv368[yyj368] = Role{} + yyv1[yyj1] = Role{} } else { - yyv371 := &yyv368[yyj368] - yyv371.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -4320,17 +4626,17 @@ func (x codecSelfer1234) decSliceRole(v *[]Role, d *codec1978.Decoder) { } } - if yyj368 < len(yyv368) { - yyv368 = yyv368[:yyj368] - yyc368 = true - } else if yyj368 == 0 && yyv368 == nil { - yyv368 = []Role{} - yyc368 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Role{} + yyc1 = true } } - yyh368.End() - if yyc368 { - *v = yyv368 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -4339,10 +4645,10 @@ func (x codecSelfer1234) encSliceClusterRoleBinding(v []ClusterRoleBinding, e *c z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv372 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy373 := &yyv372 - yy373.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -4352,83 +4658,86 @@ func (x codecSelfer1234) decSliceClusterRoleBinding(v *[]ClusterRoleBinding, d * z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv374 := *v - yyh374, yyl374 := z.DecSliceHelperStart() - var yyc374 bool - if yyl374 == 0 { - if yyv374 == nil { - yyv374 = []ClusterRoleBinding{} - yyc374 = true - } else if len(yyv374) != 0 { - yyv374 = yyv374[:0] - yyc374 = true - } - } else if yyl374 > 0 { - var yyrr374, yyrl374 int - var yyrt374 bool - if yyl374 > cap(yyv374) { + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ClusterRoleBinding{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg374 := len(yyv374) > 0 - yyv2374 := yyv374 - yyrl374, yyrt374 = z.DecInferLen(yyl374, z.DecBasicHandle().MaxInitLen, 328) - if yyrt374 { - if yyrl374 <= cap(yyv374) { - yyv374 = yyv374[:yyrl374] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv374 = make([]ClusterRoleBinding, yyrl374) + yyv1 = make([]ClusterRoleBinding, yyrl1) } } else { - yyv374 = make([]ClusterRoleBinding, yyrl374) + yyv1 = make([]ClusterRoleBinding, yyrl1) } - yyc374 = true - yyrr374 = len(yyv374) - if yyrg374 { - copy(yyv374, yyv2374) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl374 != len(yyv374) { - yyv374 = yyv374[:yyl374] - yyc374 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj374 := 0 - for ; yyj374 < yyrr374; yyj374++ { - yyh374.ElemContainerState(yyj374) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv374[yyj374] = ClusterRoleBinding{} + yyv1[yyj1] = ClusterRoleBinding{} } else { - yyv375 := &yyv374[yyj374] - yyv375.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt374 { - for ; yyj374 < yyl374; yyj374++ { - yyv374 = append(yyv374, ClusterRoleBinding{}) - yyh374.ElemContainerState(yyj374) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ClusterRoleBinding{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv374[yyj374] = ClusterRoleBinding{} + yyv1[yyj1] = ClusterRoleBinding{} } else { - yyv376 := &yyv374[yyj374] - yyv376.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj374 := 0 - for ; !r.CheckBreak(); yyj374++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj374 >= len(yyv374) { - yyv374 = append(yyv374, ClusterRoleBinding{}) // var yyz374 ClusterRoleBinding - yyc374 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ClusterRoleBinding{}) // var yyz1 ClusterRoleBinding + yyc1 = true } - yyh374.ElemContainerState(yyj374) - if yyj374 < len(yyv374) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv374[yyj374] = ClusterRoleBinding{} + yyv1[yyj1] = ClusterRoleBinding{} } else { - yyv377 := &yyv374[yyj374] - yyv377.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -4436,17 +4745,17 @@ func (x codecSelfer1234) decSliceClusterRoleBinding(v *[]ClusterRoleBinding, d * } } - if yyj374 < len(yyv374) { - yyv374 = yyv374[:yyj374] - yyc374 = true - } else if yyj374 == 0 && yyv374 == nil { - yyv374 = []ClusterRoleBinding{} - yyc374 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ClusterRoleBinding{} + yyc1 = true } } - yyh374.End() - if yyc374 { - *v = yyv374 + yyh1.End() + if yyc1 { + *v = yyv1 } } @@ -4455,10 +4764,10 @@ func (x codecSelfer1234) encSliceClusterRole(v []ClusterRole, e *codec1978.Encod z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv378 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy379 := &yyv378 - yy379.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -4468,83 +4777,86 @@ func (x codecSelfer1234) decSliceClusterRole(v *[]ClusterRole, d *codec1978.Deco z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv380 := *v - yyh380, yyl380 := z.DecSliceHelperStart() - var yyc380 bool - if yyl380 == 0 { - if yyv380 == nil { - yyv380 = []ClusterRole{} - yyc380 = true - } else if len(yyv380) != 0 { - yyv380 = yyv380[:0] - yyc380 = true - } - } else if yyl380 > 0 { - var yyrr380, yyrl380 int - var yyrt380 bool - if yyl380 > cap(yyv380) { + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ClusterRole{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg380 := len(yyv380) > 0 - yyv2380 := yyv380 - yyrl380, yyrt380 = z.DecInferLen(yyl380, z.DecBasicHandle().MaxInitLen, 280) - if yyrt380 { - if yyrl380 <= cap(yyv380) { - yyv380 = yyv380[:yyrl380] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv380 = make([]ClusterRole, yyrl380) + yyv1 = make([]ClusterRole, yyrl1) } } else { - yyv380 = make([]ClusterRole, yyrl380) + yyv1 = make([]ClusterRole, yyrl1) } - yyc380 = true - yyrr380 = len(yyv380) - if yyrg380 { - copy(yyv380, yyv2380) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl380 != len(yyv380) { - yyv380 = yyv380[:yyl380] - yyc380 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj380 := 0 - for ; yyj380 < yyrr380; yyj380++ { - yyh380.ElemContainerState(yyj380) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv380[yyj380] = ClusterRole{} + yyv1[yyj1] = ClusterRole{} } else { - yyv381 := &yyv380[yyj380] - yyv381.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt380 { - for ; yyj380 < yyl380; yyj380++ { - yyv380 = append(yyv380, ClusterRole{}) - yyh380.ElemContainerState(yyj380) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ClusterRole{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv380[yyj380] = ClusterRole{} + yyv1[yyj1] = ClusterRole{} } else { - yyv382 := &yyv380[yyj380] - yyv382.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj380 := 0 - for ; !r.CheckBreak(); yyj380++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj380 >= len(yyv380) { - yyv380 = append(yyv380, ClusterRole{}) // var yyz380 ClusterRole - yyc380 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ClusterRole{}) // var yyz1 ClusterRole + yyc1 = true } - yyh380.ElemContainerState(yyj380) - if yyj380 < len(yyv380) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv380[yyj380] = ClusterRole{} + yyv1[yyj1] = ClusterRole{} } else { - yyv383 := &yyv380[yyj380] - yyv383.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -4552,16 +4864,16 @@ func (x codecSelfer1234) decSliceClusterRole(v *[]ClusterRole, d *codec1978.Deco } } - if yyj380 < len(yyv380) { - yyv380 = yyv380[:yyj380] - yyc380 = true - } else if yyj380 == 0 && yyv380 == nil { - yyv380 = []ClusterRole{} - yyc380 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ClusterRole{} + yyc1 = true } } - yyh380.End() - if yyc380 { - *v = yyv380 + yyh1.End() + if yyc1 { + *v = yyv1 } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.go index 42617aca61..e9f8efb3b4 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.go @@ -17,9 +17,26 @@ limitations under the License. package v1alpha1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + + GroupKind = "Group" + ServiceAccountKind = "ServiceAccount" + UserKind = "User" + + // AutoUpdateAnnotationKey is the name of an annotation which prevents reconciliation if set to "false" + AutoUpdateAnnotationKey = "rbac.authorization.kubernetes.io/autoupdate" ) // Authorization is calculated against @@ -32,10 +49,6 @@ import ( type PolicyRule struct { // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` - // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. - // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. - // +optional - AttributeRestrictions runtime.RawExtension `json:"attributeRestrictions,omitempty" protobuf:"bytes,2,opt,name=attributeRestrictions"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. @@ -62,7 +75,10 @@ type Subject struct { // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". // If the Authorizer does not recognized the kind value, the Authorizer should report an error. Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // APIVersion holds the API group and version of the referenced object. + // APIVersion holds the API group and version of the referenced subject. + // Defaults to "v1" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io/v1alpha1" for User and Group subjects. + // +k8s:conversion-gen=false // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt.name=apiVersion"` // Name of the object being referenced. @@ -87,10 +103,10 @@ type RoleRef struct { // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. type Role struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this Role Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` @@ -102,10 +118,10 @@ type Role struct { // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given // namespace only have effect in that namespace. type RoleBinding struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Subjects holds references to the objects the role applies to. Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` @@ -117,10 +133,10 @@ type RoleBinding struct { // RoleBindingList is a collection of RoleBindings type RoleBindingList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of RoleBindings Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` @@ -128,10 +144,10 @@ type RoleBindingList struct { // RoleList is a collection of Roles type RoleList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of Roles Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` @@ -142,10 +158,10 @@ type RoleList struct { // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. type ClusterRole struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this ClusterRole Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` @@ -157,10 +173,10 @@ type ClusterRole struct { // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. type ClusterRoleBinding struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Subjects holds references to the objects the role applies to. Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` @@ -172,10 +188,10 @@ type ClusterRoleBinding struct { // ClusterRoleBindingList is a collection of ClusterRoleBindings type ClusterRoleBindingList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of ClusterRoleBindings Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` @@ -183,10 +199,10 @@ type ClusterRoleBindingList struct { // ClusterRoleList is a collection of ClusterRoles type ClusterRoleList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of ClusterRoles Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go index 633bae9799..d58a722af1 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go @@ -69,13 +69,12 @@ func (ClusterRoleList) SwaggerDoc() map[string]string { } var map_PolicyRule = map[string]string{ - "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", - "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", - "attributeRestrictions": "AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.", - "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", - "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", - "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", - "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", + "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", + "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", } func (PolicyRule) SwaggerDoc() map[string]string { @@ -137,7 +136,7 @@ func (RoleRef) SwaggerDoc() map[string]string { var map_Subject = map[string]string{ "": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", "kind": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", - "apiVersion": "APIVersion holds the API group and version of the referenced object.", + "apiVersion": "APIVersion holds the API group and version of the referenced subject. Defaults to \"v1\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io/v1alpha1\" for User and Group subjects.", "name": "Name of the object being referenced.", "namespace": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go index 21c79541f4..b8380f3cd8 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ limitations under the License. package v1alpha1 import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" rbac "k8s.io/kubernetes/pkg/apis/rbac" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" unsafe "unsafe" ) @@ -39,12 +39,16 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole, Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding, Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding, + Convert_v1alpha1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder, + Convert_rbac_ClusterRoleBindingBuilder_To_v1alpha1_ClusterRoleBindingBuilder, Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList, Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList, Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList, Convert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList, Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule, Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule, + Convert_v1alpha1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder, + Convert_rbac_PolicyRuleBuilder_To_v1alpha1_PolicyRuleBuilder, Convert_v1alpha1_Role_To_rbac_Role, Convert_rbac_Role_To_v1alpha1_Role, Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding, @@ -61,21 +65,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { } func autoConvert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]rbac.PolicyRule, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Rules = nil - } + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules)) return nil } @@ -84,20 +75,11 @@ func Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac } func autoConvert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]PolicyRule, len(*in)) - for i := range *in { - if err := Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } + out.ObjectMeta = in.ObjectMeta + if in.Rules == nil { + out.Rules = make([]PolicyRule, 0) } else { - out.Rules = nil + out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) } return nil } @@ -107,11 +89,18 @@ func Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in *rbac.ClusterRole, out } func autoConvert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err + out.ObjectMeta = in.ObjectMeta + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]rbac.Subject, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Subject_To_rbac_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = nil } - out.Subjects = *(*[]rbac.Subject)(unsafe.Pointer(&in.Subjects)) if err := Convert_v1alpha1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { return err } @@ -123,11 +112,18 @@ func Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterR } func autoConvert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err + out.ObjectMeta = in.ObjectMeta + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + for i := range *in { + if err := Convert_rbac_Subject_To_v1alpha1_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = make([]Subject, 0) } - out.Subjects = *(*[]Subject)(unsafe.Pointer(&in.Subjects)) if err := Convert_rbac_RoleRef_To_v1alpha1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { return err } @@ -138,33 +134,35 @@ func Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in *rbac.Clu return autoConvert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in, out, s) } -func autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]rbac.ClusterRoleBinding)(unsafe.Pointer(&in.Items)) +func autoConvert_v1alpha1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in *ClusterRoleBindingBuilder, out *rbac.ClusterRoleBindingBuilder, s conversion.Scope) error { + if err := Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(&in.ClusterRoleBinding, &out.ClusterRoleBinding, s); err != nil { + return err + } return nil } -func Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in, out, s) +func Convert_v1alpha1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in *ClusterRoleBindingBuilder, out *rbac.ClusterRoleBindingBuilder, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in, out, s) } -func autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]ClusterRoleBinding)(unsafe.Pointer(&in.Items)) +func autoConvert_rbac_ClusterRoleBindingBuilder_To_v1alpha1_ClusterRoleBindingBuilder(in *rbac.ClusterRoleBindingBuilder, out *ClusterRoleBindingBuilder, s conversion.Scope) error { + if err := Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(&in.ClusterRoleBinding, &out.ClusterRoleBinding, s); err != nil { + return err + } return nil } -func Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { - return autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in, out, s) +func Convert_rbac_ClusterRoleBindingBuilder_To_v1alpha1_ClusterRoleBindingBuilder(in *rbac.ClusterRoleBindingBuilder, out *ClusterRoleBindingBuilder, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBindingBuilder_To_v1alpha1_ClusterRoleBindingBuilder(in, out, s) } -func autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { +func autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]rbac.ClusterRole, len(*in)) + *out = make([]rbac.ClusterRoleBinding, len(*in)) for i := range *in { - if err := Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -174,22 +172,46 @@ func autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRol return nil } -func Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in, out, s) +func Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in, out, s) } -func autoConvert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { +func autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]ClusterRole, len(*in)) + *out = make([]ClusterRoleBinding, len(*in)) for i := range *in { - if err := Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { - out.Items = nil + out.Items = make([]ClusterRoleBinding, 0) + } + return nil +} + +func Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in, out, s) +} + +func autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]rbac.ClusterRole)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in, out, s) +} + +func autoConvert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]ClusterRole, 0) + } else { + out.Items = *(*[]ClusterRole)(unsafe.Pointer(&in.Items)) } return nil } @@ -200,9 +222,6 @@ func Convert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in *rbac.ClusterRo func autoConvert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) - if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.AttributeRestrictions, &out.AttributeRestrictions, s); err != nil { - return err - } out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) @@ -215,9 +234,10 @@ func Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.Po } func autoConvert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { - out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) - if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.AttributeRestrictions, &out.AttributeRestrictions, s); err != nil { - return err + if in.Verbs == nil { + out.Verbs = make([]string, 0) + } else { + out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) } out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) @@ -230,44 +250,44 @@ func Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in *rbac.PolicyRule, out *Po return autoConvert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in, out, s) } -func autoConvert_v1alpha1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { +func autoConvert_v1alpha1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in *PolicyRuleBuilder, out *rbac.PolicyRuleBuilder, s conversion.Scope) error { + if err := Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(&in.PolicyRule, &out.PolicyRule, s); err != nil { return err } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]rbac.PolicyRule, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Rules = nil + return nil +} + +func Convert_v1alpha1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in *PolicyRuleBuilder, out *rbac.PolicyRuleBuilder, s conversion.Scope) error { + return autoConvert_v1alpha1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in, out, s) +} + +func autoConvert_rbac_PolicyRuleBuilder_To_v1alpha1_PolicyRuleBuilder(in *rbac.PolicyRuleBuilder, out *PolicyRuleBuilder, s conversion.Scope) error { + if err := Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(&in.PolicyRule, &out.PolicyRule, s); err != nil { + return err } return nil } +func Convert_rbac_PolicyRuleBuilder_To_v1alpha1_PolicyRuleBuilder(in *rbac.PolicyRuleBuilder, out *PolicyRuleBuilder, s conversion.Scope) error { + return autoConvert_rbac_PolicyRuleBuilder_To_v1alpha1_PolicyRuleBuilder(in, out, s) +} + +func autoConvert_v1alpha1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules)) + return nil +} + func Convert_v1alpha1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { return autoConvert_v1alpha1_Role_To_rbac_Role(in, out, s) } func autoConvert_rbac_Role_To_v1alpha1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]PolicyRule, len(*in)) - for i := range *in { - if err := Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } + out.ObjectMeta = in.ObjectMeta + if in.Rules == nil { + out.Rules = make([]PolicyRule, 0) } else { - out.Rules = nil + out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) } return nil } @@ -277,11 +297,18 @@ func Convert_rbac_Role_To_v1alpha1_Role(in *rbac.Role, out *Role, s conversion.S } func autoConvert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err + out.ObjectMeta = in.ObjectMeta + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]rbac.Subject, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Subject_To_rbac_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = nil } - out.Subjects = *(*[]rbac.Subject)(unsafe.Pointer(&in.Subjects)) if err := Convert_v1alpha1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { return err } @@ -293,11 +320,18 @@ func Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac } func autoConvert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err + out.ObjectMeta = in.ObjectMeta + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + for i := range *in { + if err := Convert_rbac_Subject_To_v1alpha1_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = make([]Subject, 0) } - out.Subjects = *(*[]Subject)(unsafe.Pointer(&in.Subjects)) if err := Convert_rbac_RoleRef_To_v1alpha1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { return err } @@ -310,7 +344,17 @@ func Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in *rbac.RoleBinding, out func autoConvert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]rbac.RoleBinding)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]rbac.RoleBinding, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -320,7 +364,17 @@ func Convert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingLis func autoConvert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]RoleBinding)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + if err := Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]RoleBinding, 0) + } return nil } @@ -330,17 +384,7 @@ func Convert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in *rbac.RoleBindi func autoConvert_v1alpha1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]rbac.Role, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_Role_To_rbac_Role(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } + out.Items = *(*[]rbac.Role)(unsafe.Pointer(&in.Items)) return nil } @@ -350,16 +394,10 @@ func Convert_v1alpha1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList func autoConvert_rbac_RoleList_To_v1alpha1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Role, len(*in)) - for i := range *in { - if err := Convert_rbac_Role_To_v1alpha1_Role(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } + if in.Items == nil { + out.Items = make([]Role, 0) } else { - out.Items = nil + out.Items = *(*[]Role)(unsafe.Pointer(&in.Items)) } return nil } @@ -392,24 +430,16 @@ func Convert_rbac_RoleRef_To_v1alpha1_RoleRef(in *rbac.RoleRef, out *RoleRef, s func autoConvert_v1alpha1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error { out.Kind = in.Kind - out.APIVersion = in.APIVersion + // INFO: in.APIVersion opted out of conversion generation out.Name = in.Name out.Namespace = in.Namespace return nil } -func Convert_v1alpha1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error { - return autoConvert_v1alpha1_Subject_To_rbac_Subject(in, out, s) -} - func autoConvert_rbac_Subject_To_v1alpha1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error { out.Kind = in.Kind - out.APIVersion = in.APIVersion + // WARNING: in.APIGroup requires manual conversion: does not exist in peer-type out.Name = in.Name out.Namespace = in.Namespace return nil } - -func Convert_rbac_Subject_To_v1alpha1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error { - return autoConvert_rbac_Subject_To_v1alpha1_Subject(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.deepcopy.go index d4d2dfcf3f..f4dfd3ca7c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" reflect "reflect" ) @@ -53,9 +53,11 @@ func DeepCopy_v1alpha1_ClusterRole(in interface{}, out interface{}, c *conversio { in := in.(*ClusterRole) out := out.(*ClusterRole) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if in.Rules != nil { in, out := &in.Rules, &out.Rules @@ -65,8 +67,6 @@ func DeepCopy_v1alpha1_ClusterRole(in interface{}, out interface{}, c *conversio return err } } - } else { - out.Rules = nil } return nil } @@ -76,20 +76,17 @@ func DeepCopy_v1alpha1_ClusterRoleBinding(in interface{}, out interface{}, c *co { in := in.(*ClusterRoleBinding) out := out.(*ClusterRoleBinding) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if in.Subjects != nil { in, out := &in.Subjects, &out.Subjects *out = make([]Subject, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Subjects = nil + copy(*out, *in) } - out.RoleRef = in.RoleRef return nil } } @@ -98,8 +95,7 @@ func DeepCopy_v1alpha1_ClusterRoleBindingList(in interface{}, out interface{}, c { in := in.(*ClusterRoleBindingList) out := out.(*ClusterRoleBindingList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRoleBinding, len(*in)) @@ -108,8 +104,6 @@ func DeepCopy_v1alpha1_ClusterRoleBindingList(in interface{}, out interface{}, c return err } } - } else { - out.Items = nil } return nil } @@ -119,8 +113,7 @@ func DeepCopy_v1alpha1_ClusterRoleList(in interface{}, out interface{}, c *conve { in := in.(*ClusterRoleList) out := out.(*ClusterRoleList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRole, len(*in)) @@ -129,8 +122,6 @@ func DeepCopy_v1alpha1_ClusterRoleList(in interface{}, out interface{}, c *conve return err } } - } else { - out.Items = nil } return nil } @@ -140,43 +131,31 @@ func DeepCopy_v1alpha1_PolicyRule(in interface{}, out interface{}, c *conversion { in := in.(*PolicyRule) out := out.(*PolicyRule) + *out = *in if in.Verbs != nil { in, out := &in.Verbs, &out.Verbs *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Verbs = nil - } - if err := runtime.DeepCopy_runtime_RawExtension(&in.AttributeRestrictions, &out.AttributeRestrictions, c); err != nil { - return err } if in.APIGroups != nil { in, out := &in.APIGroups, &out.APIGroups *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.APIGroups = nil } if in.Resources != nil { in, out := &in.Resources, &out.Resources *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Resources = nil } if in.ResourceNames != nil { in, out := &in.ResourceNames, &out.ResourceNames *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.ResourceNames = nil } if in.NonResourceURLs != nil { in, out := &in.NonResourceURLs, &out.NonResourceURLs *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.NonResourceURLs = nil } return nil } @@ -186,9 +165,11 @@ func DeepCopy_v1alpha1_Role(in interface{}, out interface{}, c *conversion.Clone { in := in.(*Role) out := out.(*Role) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if in.Rules != nil { in, out := &in.Rules, &out.Rules @@ -198,8 +179,6 @@ func DeepCopy_v1alpha1_Role(in interface{}, out interface{}, c *conversion.Clone return err } } - } else { - out.Rules = nil } return nil } @@ -209,20 +188,17 @@ func DeepCopy_v1alpha1_RoleBinding(in interface{}, out interface{}, c *conversio { in := in.(*RoleBinding) out := out.(*RoleBinding) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if in.Subjects != nil { in, out := &in.Subjects, &out.Subjects *out = make([]Subject, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Subjects = nil + copy(*out, *in) } - out.RoleRef = in.RoleRef return nil } } @@ -231,8 +207,7 @@ func DeepCopy_v1alpha1_RoleBindingList(in interface{}, out interface{}, c *conve { in := in.(*RoleBindingList) out := out.(*RoleBindingList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RoleBinding, len(*in)) @@ -241,8 +216,6 @@ func DeepCopy_v1alpha1_RoleBindingList(in interface{}, out interface{}, c *conve return err } } - } else { - out.Items = nil } return nil } @@ -252,8 +225,7 @@ func DeepCopy_v1alpha1_RoleList(in interface{}, out interface{}, c *conversion.C { in := in.(*RoleList) out := out.(*RoleList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Role, len(*in)) @@ -262,8 +234,6 @@ func DeepCopy_v1alpha1_RoleList(in interface{}, out interface{}, c *conversion.C return err } } - } else { - out.Items = nil } return nil } @@ -273,9 +243,7 @@ func DeepCopy_v1alpha1_RoleRef(in interface{}, out interface{}, c *conversion.Cl { in := in.(*RoleRef) out := out.(*RoleRef) - out.APIGroup = in.APIGroup - out.Kind = in.Kind - out.Name = in.Name + *out = *in return nil } } @@ -284,10 +252,7 @@ func DeepCopy_v1alpha1_Subject(in interface{}, out interface{}, c *conversion.Cl { in := in.(*Subject) out := out.(*Subject) - out.Kind = in.Kind - out.APIVersion = in.APIVersion - out.Name = in.Name - out.Namespace = in.Namespace + *out = *in return nil } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go index 05e68f657b..1a5749be30 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ limitations under the License. package v1alpha1 import ( - runtime "k8s.io/kubernetes/pkg/runtime" + runtime "k8s.io/apimachinery/pkg/runtime" ) // RegisterDefaults adds defaulters functions to the given scheme. @@ -37,6 +37,10 @@ func RegisterDefaults(scheme *runtime.Scheme) error { func SetObjectDefaults_ClusterRoleBinding(in *ClusterRoleBinding) { SetDefaults_ClusterRoleBinding(in) + for i := range in.Subjects { + a := &in.Subjects[i] + SetDefaults_Subject(a) + } } func SetObjectDefaults_ClusterRoleBindingList(in *ClusterRoleBindingList) { @@ -48,6 +52,10 @@ func SetObjectDefaults_ClusterRoleBindingList(in *ClusterRoleBindingList) { func SetObjectDefaults_RoleBinding(in *RoleBinding) { SetDefaults_RoleBinding(in) + for i := range in.Subjects { + a := &in.Subjects[i] + SetDefaults_Subject(a) + } } func SetObjectDefaults_RoleBindingList(in *RoleBindingList) { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/BUILD new file mode 100644 index 0000000000..9794c8ae4d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/BUILD @@ -0,0 +1,49 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "defaults.go", + "doc.go", + "generated.pb.go", + "helpers.go", + "register.go", + "types.generated.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.conversion.go", + "zz_generated.deepcopy.go", + "zz_generated.defaults.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/rbac:go_default_library", + "//vendor:github.com/gogo/protobuf/proto", + "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/types", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/defaults.go new file mode 100644 index 0000000000..6c29ae500e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/defaults.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_ClusterRoleBinding, + SetDefaults_RoleBinding, + SetDefaults_Subject, + ) +} + +func SetDefaults_ClusterRoleBinding(obj *ClusterRoleBinding) { + if len(obj.RoleRef.APIGroup) == 0 { + obj.RoleRef.APIGroup = GroupName + } +} +func SetDefaults_RoleBinding(obj *RoleBinding) { + if len(obj.RoleRef.APIGroup) == 0 { + obj.RoleRef.APIGroup = GroupName + } +} +func SetDefaults_Subject(obj *Subject) { + if len(obj.APIGroup) == 0 { + switch obj.Kind { + case ServiceAccountKind: + obj.APIGroup = "" + case UserKind: + obj.APIGroup = GroupName + case GroupKind: + obj.APIGroup = GroupName + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/doc.go new file mode 100644 index 0000000000..cdf745270d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/rbac +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=rbac.authorization.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/generated.pb.go new file mode 100644 index 0000000000..6c2669b25f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/generated.pb.go @@ -0,0 +1,2816 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/rbac/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/rbac/v1beta1/generated.proto + + It has these top-level messages: + ClusterRole + ClusterRoleBinding + ClusterRoleBindingBuilder + ClusterRoleBindingList + ClusterRoleList + PolicyRule + PolicyRuleBuilder + Role + RoleBinding + RoleBindingList + RoleList + RoleRef + Subject +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *ClusterRoleBindingBuilder) Reset() { *m = ClusterRoleBindingBuilder{} } +func (*ClusterRoleBindingBuilder) ProtoMessage() {} +func (*ClusterRoleBindingBuilder) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *PolicyRuleBuilder) Reset() { *m = PolicyRuleBuilder{} } +func (*PolicyRuleBuilder) ProtoMessage() {} +func (*PolicyRuleBuilder) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func init() { + proto.RegisterType((*ClusterRole)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1beta1.ClusterRole") + proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1beta1.ClusterRoleBinding") + proto.RegisterType((*ClusterRoleBindingBuilder)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1beta1.ClusterRoleBindingBuilder") + proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1beta1.ClusterRoleBindingList") + proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1beta1.ClusterRoleList") + proto.RegisterType((*PolicyRule)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1beta1.PolicyRule") + proto.RegisterType((*PolicyRuleBuilder)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1beta1.PolicyRuleBuilder") + proto.RegisterType((*Role)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1beta1.Role") + proto.RegisterType((*RoleBinding)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1beta1.RoleBinding") + proto.RegisterType((*RoleBindingList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1beta1.RoleBindingList") + proto.RegisterType((*RoleList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1beta1.RoleList") + proto.RegisterType((*RoleRef)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1beta1.RoleRef") + proto.RegisterType((*Subject)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1beta1.Subject") +} +func (m *ClusterRole) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRole) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterRoleBinding) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBinding) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n2, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) + n3, err := m.RoleRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *ClusterRoleBindingBuilder) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBindingBuilder) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ClusterRoleBinding.Size())) + n4, err := m.ClusterRoleBinding.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + return i, nil +} + +func (m *ClusterRoleBindingList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBindingList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterRoleList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PolicyRule) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PolicyRule) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *PolicyRuleBuilder) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PolicyRuleBuilder) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PolicyRule.Size())) + n7, err := m.PolicyRule.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + return i, nil +} + +func (m *Role) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Role) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n8, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleBinding) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleBinding) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) + n10, err := m.RoleRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + return i, nil +} + +func (m *RoleBindingList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleBindingList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n11, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n12, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleRef) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleRef) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIGroup))) + i += copy(data[i:], m.APIGroup) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + return i, nil +} + +func (m *Subject) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Subject) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIGroup))) + i += copy(data[i:], m.APIGroup) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *ClusterRole) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleBinding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterRoleBindingBuilder) Size() (n int) { + var l int + _ = l + l = m.ClusterRoleBinding.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterRoleBindingList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRule) Size() (n int) { + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRuleBuilder) Size() (n int) { + var l int + _ = l + l = m.PolicyRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Role) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBinding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RoleBindingList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleRef) Size() (n int) { + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Subject) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterRole) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRole{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBindingBuilder) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleBindingBuilder{`, + `ClusterRoleBinding:` + strings.Replace(strings.Replace(this.ClusterRoleBinding.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBindingList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRuleBuilder) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PolicyRuleBuilder{`, + `PolicyRule:` + strings.Replace(strings.Replace(this.PolicyRule.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Role) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Role{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleRef{`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Subject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Subject{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterRole) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBinding) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBindingBuilder) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingBuilder: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingBuilder: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleBinding", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClusterRoleBinding.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRole{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRule) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRuleBuilder) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRuleBuilder: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRuleBuilder: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PolicyRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PolicyRule.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBinding) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Role{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleRef) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 830 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x54, 0xbf, 0x8f, 0xe3, 0x44, + 0x14, 0xce, 0x64, 0x13, 0x6d, 0x3c, 0xcb, 0x2a, 0xec, 0x20, 0x21, 0x93, 0xc2, 0x89, 0xdc, 0xb0, + 0x88, 0x3b, 0xfb, 0xf6, 0xee, 0xc4, 0x21, 0x21, 0x0a, 0x4c, 0x81, 0x4e, 0x1c, 0xcb, 0x69, 0x10, + 0x88, 0x5f, 0x42, 0x37, 0x71, 0xe6, 0xbc, 0x43, 0xfc, 0x4b, 0x33, 0xe3, 0x88, 0x13, 0x14, 0x74, + 0xb4, 0xfc, 0x13, 0x74, 0xd7, 0xd1, 0x52, 0x51, 0x2d, 0x54, 0x57, 0x6e, 0x15, 0xb1, 0xe6, 0x0f, + 0x01, 0xd9, 0x1e, 0xff, 0x08, 0x4e, 0xd8, 0xb0, 0x48, 0x91, 0x90, 0xa8, 0x92, 0x79, 0xef, 0xfb, + 0xde, 0xbc, 0xef, 0xbd, 0xf1, 0x07, 0xef, 0xcd, 0x5f, 0x17, 0x16, 0x8b, 0xec, 0x79, 0x32, 0xa5, + 0x3c, 0xa4, 0x92, 0x0a, 0x3b, 0x9e, 0x7b, 0x36, 0x89, 0x99, 0xb0, 0xf9, 0x94, 0xb8, 0xf6, 0xe2, + 0x64, 0x4a, 0x25, 0x39, 0xb1, 0x3d, 0x1a, 0x52, 0x4e, 0x24, 0x9d, 0x59, 0x31, 0x8f, 0x64, 0x84, + 0x5e, 0x2e, 0x88, 0x56, 0x4d, 0xb4, 0xe2, 0xb9, 0x67, 0x65, 0x44, 0x2b, 0x23, 0x5a, 0x8a, 0x38, + 0xba, 0xe9, 0x31, 0x79, 0x96, 0x4c, 0x2d, 0x37, 0x0a, 0x6c, 0x2f, 0xf2, 0x22, 0x3b, 0xe7, 0x4f, + 0x93, 0xc7, 0xf9, 0x29, 0x3f, 0xe4, 0xff, 0x8a, 0xba, 0xa3, 0xbb, 0xaa, 0x21, 0x12, 0xb3, 0x80, + 0xb8, 0x67, 0x2c, 0xa4, 0xfc, 0x49, 0xdd, 0x52, 0x40, 0x25, 0xb1, 0x17, 0xad, 0x6e, 0x46, 0xf6, + 0x26, 0x16, 0x4f, 0x42, 0xc9, 0x02, 0xda, 0x22, 0xbc, 0x76, 0x15, 0x41, 0xb8, 0x67, 0x34, 0x20, + 0x2d, 0xde, 0x9d, 0x4d, 0xbc, 0x44, 0x32, 0xdf, 0x66, 0xa1, 0x14, 0x92, 0xb7, 0x48, 0x0d, 0x4d, + 0x82, 0xf2, 0x05, 0xe5, 0xb5, 0x20, 0xfa, 0x15, 0x09, 0x62, 0x9f, 0xae, 0xd3, 0x74, 0x63, 0xe3, + 0x6a, 0xd6, 0xa0, 0xcd, 0x5f, 0x00, 0x3c, 0x78, 0xdb, 0x4f, 0x84, 0xa4, 0x1c, 0x47, 0x3e, 0x45, + 0x8f, 0xe0, 0x20, 0x1b, 0xd6, 0x8c, 0x48, 0xa2, 0x83, 0x09, 0x38, 0x3e, 0xb8, 0x7d, 0xcb, 0x52, + 0x2b, 0x6b, 0xf6, 0x5e, 0x2f, 0x2d, 0x43, 0x5b, 0x8b, 0x13, 0xeb, 0xfd, 0xe9, 0x97, 0xd4, 0x95, + 0xef, 0x51, 0x49, 0x1c, 0x74, 0xbe, 0x1c, 0x77, 0xd2, 0xe5, 0x18, 0xd6, 0x31, 0x5c, 0x55, 0x45, + 0x1f, 0xc3, 0x3e, 0x4f, 0x7c, 0x2a, 0xf4, 0xee, 0x64, 0xef, 0xf8, 0xe0, 0xf6, 0x1d, 0x6b, 0xcb, + 0x17, 0x61, 0x3d, 0x8c, 0x7c, 0xe6, 0x3e, 0xc1, 0x89, 0x4f, 0x9d, 0x43, 0x75, 0x43, 0x3f, 0x3b, + 0x09, 0x5c, 0x14, 0x34, 0x7f, 0xec, 0x42, 0xd4, 0xd0, 0xe2, 0xb0, 0x70, 0xc6, 0x42, 0x6f, 0x07, + 0x92, 0xbe, 0x80, 0x03, 0x91, 0xe4, 0x89, 0x52, 0xd5, 0xad, 0xad, 0x55, 0x7d, 0x50, 0x10, 0x9d, + 0xe7, 0xd5, 0x0d, 0x03, 0x15, 0x10, 0xb8, 0xaa, 0x89, 0x3e, 0x83, 0xfb, 0x3c, 0xf2, 0x29, 0xa6, + 0x8f, 0xf5, 0xbd, 0x55, 0x01, 0x57, 0x96, 0xc7, 0x05, 0xcf, 0x19, 0xaa, 0xf2, 0xfb, 0x2a, 0x80, + 0xcb, 0x8a, 0xe6, 0x0f, 0x00, 0xbe, 0xd4, 0x9e, 0x9a, 0x93, 0x30, 0x7f, 0x46, 0x39, 0xfa, 0x0e, + 0x40, 0xe4, 0xb6, 0xb2, 0x6a, 0x8e, 0x6f, 0x6c, 0xdd, 0xc6, 0x9a, 0x0b, 0x46, 0xaa, 0xa3, 0x35, + 0x2b, 0xc3, 0x6b, 0xae, 0x34, 0x2f, 0x00, 0x7c, 0xb1, 0x0d, 0x7d, 0xc0, 0x84, 0x44, 0x9f, 0xb7, + 0x36, 0x6c, 0x6d, 0xb7, 0xe1, 0x8c, 0x9d, 0xef, 0xb7, 0x9a, 0x7e, 0x19, 0x69, 0x6c, 0xf7, 0x11, + 0xec, 0x33, 0x49, 0x83, 0x72, 0xb5, 0xff, 0x4a, 0x74, 0xf5, 0x70, 0xef, 0x67, 0x15, 0x71, 0x51, + 0xd8, 0xfc, 0x15, 0xc0, 0x61, 0x03, 0xbc, 0x03, 0x4d, 0x9f, 0xac, 0x6a, 0xba, 0x7b, 0x2d, 0x4d, + 0xeb, 0xc5, 0xfc, 0x01, 0x20, 0xac, 0x3f, 0x55, 0x34, 0x86, 0xfd, 0x05, 0xe5, 0x53, 0xa1, 0x83, + 0xc9, 0xde, 0xb1, 0xe6, 0x68, 0x19, 0xfe, 0xa3, 0x2c, 0x80, 0x8b, 0x38, 0x7a, 0x15, 0x6a, 0x24, + 0x66, 0xef, 0xf0, 0x28, 0x89, 0x8b, 0x76, 0x34, 0xe7, 0x30, 0x5d, 0x8e, 0xb5, 0xb7, 0x1e, 0xde, + 0x2f, 0x82, 0xb8, 0xce, 0x67, 0x60, 0x4e, 0x45, 0x94, 0x70, 0x97, 0x0a, 0x7d, 0xaf, 0x06, 0xe3, + 0x32, 0x88, 0xeb, 0x3c, 0xba, 0x07, 0x0f, 0xcb, 0xc3, 0x29, 0x09, 0xa8, 0xd0, 0x7b, 0x39, 0xe1, + 0x28, 0x5d, 0x8e, 0x0f, 0x71, 0x33, 0x81, 0x57, 0x71, 0xe8, 0x4d, 0x38, 0x0c, 0xa3, 0xb0, 0x84, + 0x7c, 0x88, 0x1f, 0x08, 0xbd, 0x9f, 0x53, 0x5f, 0x48, 0x97, 0xe3, 0xe1, 0xe9, 0x6a, 0x0a, 0xff, + 0x15, 0x6b, 0x7e, 0x03, 0x8f, 0x1a, 0x5e, 0xa5, 0x3e, 0x24, 0x0f, 0xc2, 0xb8, 0x0a, 0xaa, 0x8d, + 0x5e, 0xcb, 0xfb, 0x2a, 0x2b, 0xaa, 0x63, 0xb8, 0x51, 0xda, 0xfc, 0x19, 0xc0, 0xde, 0x7f, 0xde, + 0xca, 0x9f, 0x76, 0xe1, 0xc1, 0xff, 0x1e, 0xbe, 0xb5, 0x87, 0x67, 0x06, 0xb2, 0x5b, 0x53, 0xbc, + 0xb6, 0x81, 0x5c, 0xed, 0x86, 0x3f, 0x01, 0x38, 0xd8, 0x91, 0x0d, 0xe2, 0x55, 0x15, 0x37, 0xff, + 0x99, 0x8a, 0xf5, 0xed, 0x7f, 0x0d, 0xcb, 0xfd, 0xa0, 0x1b, 0x70, 0x50, 0x5a, 0x57, 0xde, 0xbc, + 0x56, 0x37, 0x53, 0xba, 0x1b, 0xae, 0x10, 0x68, 0x02, 0x7b, 0x73, 0x16, 0xce, 0xf4, 0x6e, 0x8e, + 0x7c, 0x4e, 0x21, 0x7b, 0xef, 0xb2, 0x70, 0x86, 0xf3, 0x4c, 0x86, 0x08, 0x49, 0x40, 0xf3, 0x07, + 0xd4, 0x40, 0x64, 0xa6, 0x85, 0xf3, 0x8c, 0xf9, 0x14, 0xc0, 0x7d, 0xf5, 0xf8, 0xaa, 0x7a, 0x60, + 0x63, 0xbd, 0x66, 0x7f, 0xdd, 0x6d, 0xfa, 0xfb, 0xfb, 0xdb, 0x91, 0x0d, 0xb5, 0xec, 0x57, 0xc4, + 0xc4, 0xa5, 0x7a, 0x2f, 0x87, 0x1d, 0x29, 0x98, 0x76, 0x5a, 0x26, 0x70, 0x8d, 0x71, 0x5e, 0x39, + 0xbf, 0x34, 0x3a, 0xcf, 0x2e, 0x8d, 0xce, 0xc5, 0xa5, 0xd1, 0xf9, 0x36, 0x35, 0xc0, 0x79, 0x6a, + 0x80, 0x67, 0xa9, 0x01, 0x7e, 0x4b, 0x0d, 0xf0, 0xfd, 0xef, 0x46, 0xe7, 0xd3, 0x7d, 0x35, 0xf1, + 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x96, 0xa1, 0xd4, 0x72, 0x0c, 0x00, 0x00, +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/generated.proto new file mode 100644 index 0000000000..542e2b0276 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/generated.proto @@ -0,0 +1,200 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.rbac.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +message ClusterRole { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this ClusterRole + repeated PolicyRule rules = 2; +} + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +message ClusterRoleBinding { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + repeated Subject subjects = 2; + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional RoleRef roleRef = 3; +} + +// +k8s:deepcopy-gen=false +// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. +// We use it to construct bindings in code. It's more compact than trying to write them +// out in a literal. +message ClusterRoleBindingBuilder { + optional ClusterRoleBinding clusterRoleBinding = 1; +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +message ClusterRoleBindingList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoleBindings + repeated ClusterRoleBinding items = 2; +} + +// ClusterRoleList is a collection of ClusterRoles +message ClusterRoleList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoles + repeated ClusterRole items = 2; +} + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +message PolicyRule { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + repeated string verbs = 1; + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + // +optional + repeated string apiGroups = 2; + + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // +optional + repeated string resources = 3; + + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +optional + repeated string resourceNames = 4; + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + // +optional + repeated string nonResourceURLs = 5; +} + +// +k8s:deepcopy-gen=false +// PolicyRuleBuilder let's us attach methods. A no-no for API types. +// We use it to construct rules in code. It's more compact than trying to write them +// out in a literal and allows us to perform some basic checking during construction +message PolicyRuleBuilder { + optional PolicyRule policyRule = 1; +} + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +message Role { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this Role + repeated PolicyRule rules = 2; +} + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +message RoleBinding { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + repeated Subject subjects = 2; + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional RoleRef roleRef = 3; +} + +// RoleBindingList is a collection of RoleBindings +message RoleBindingList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of RoleBindings + repeated RoleBinding items = 2; +} + +// RoleList is a collection of Roles +message RoleList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of Roles + repeated Role items = 2; +} + +// RoleRef contains information that points to the role being used +message RoleRef { + // APIGroup is the group for the resource being referenced + optional string apiGroup = 1; + + // Kind is the type of resource being referenced + optional string kind = 2; + + // Name is the name of resource being referenced + optional string name = 3; +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +message Subject { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + optional string kind = 1; + + // APIGroup holds the API group of the referenced subject. + // Defaults to "" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. + // +optional + optional string apiGroup = 2; + + // Name of the object being referenced. + optional string name = 3; + + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + // +optional + optional string namespace = 4; +} + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/helpers.go new file mode 100644 index 0000000000..f38776f26e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/helpers.go @@ -0,0 +1,148 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen=false +// PolicyRuleBuilder let's us attach methods. A no-no for API types. +// We use it to construct rules in code. It's more compact than trying to write them +// out in a literal and allows us to perform some basic checking during construction +type PolicyRuleBuilder struct { + PolicyRule PolicyRule `protobuf:"bytes,1,opt,name=policyRule"` +} + +func NewRule(verbs ...string) *PolicyRuleBuilder { + return &PolicyRuleBuilder{ + PolicyRule: PolicyRule{Verbs: verbs}, + } +} + +func (r *PolicyRuleBuilder) Groups(groups ...string) *PolicyRuleBuilder { + r.PolicyRule.APIGroups = append(r.PolicyRule.APIGroups, groups...) + return r +} + +func (r *PolicyRuleBuilder) Resources(resources ...string) *PolicyRuleBuilder { + r.PolicyRule.Resources = append(r.PolicyRule.Resources, resources...) + return r +} + +func (r *PolicyRuleBuilder) Names(names ...string) *PolicyRuleBuilder { + r.PolicyRule.ResourceNames = append(r.PolicyRule.ResourceNames, names...) + return r +} + +func (r *PolicyRuleBuilder) URLs(urls ...string) *PolicyRuleBuilder { + r.PolicyRule.NonResourceURLs = append(r.PolicyRule.NonResourceURLs, urls...) + return r +} + +func (r *PolicyRuleBuilder) RuleOrDie() PolicyRule { + ret, err := r.Rule() + if err != nil { + panic(err) + } + return ret +} + +func (r *PolicyRuleBuilder) Rule() (PolicyRule, error) { + if len(r.PolicyRule.Verbs) == 0 { + return PolicyRule{}, fmt.Errorf("verbs are required: %#v", r.PolicyRule) + } + + switch { + case len(r.PolicyRule.NonResourceURLs) > 0: + if len(r.PolicyRule.APIGroups) != 0 || len(r.PolicyRule.Resources) != 0 || len(r.PolicyRule.ResourceNames) != 0 { + return PolicyRule{}, fmt.Errorf("non-resource rule may not have apiGroups, resources, or resourceNames: %#v", r.PolicyRule) + } + case len(r.PolicyRule.Resources) > 0: + if len(r.PolicyRule.NonResourceURLs) != 0 { + return PolicyRule{}, fmt.Errorf("resource rule may not have nonResourceURLs: %#v", r.PolicyRule) + } + if len(r.PolicyRule.APIGroups) == 0 { + // this a common bug + return PolicyRule{}, fmt.Errorf("resource rule must have apiGroups: %#v", r.PolicyRule) + } + default: + return PolicyRule{}, fmt.Errorf("a rule must have either nonResourceURLs or resources: %#v", r.PolicyRule) + } + + return r.PolicyRule, nil +} + +// +k8s:deepcopy-gen=false +// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. +// We use it to construct bindings in code. It's more compact than trying to write them +// out in a literal. +type ClusterRoleBindingBuilder struct { + ClusterRoleBinding ClusterRoleBinding `protobuf:"bytes,1,opt,name=clusterRoleBinding"` +} + +func NewClusterBinding(clusterRoleName string) *ClusterRoleBindingBuilder { + return &ClusterRoleBindingBuilder{ + ClusterRoleBinding: ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: clusterRoleName}, + RoleRef: RoleRef{ + APIGroup: GroupName, + Kind: "ClusterRole", + Name: clusterRoleName, + }, + }, + } +} + +func (r *ClusterRoleBindingBuilder) Groups(groups ...string) *ClusterRoleBindingBuilder { + for _, group := range groups { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: GroupKind, Name: group}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) Users(users ...string) *ClusterRoleBindingBuilder { + for _, user := range users { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: UserKind, Name: user}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *ClusterRoleBindingBuilder { + for _, saName := range serviceAccountNames { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: ServiceAccountKind, Namespace: namespace, Name: saName}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) BindingOrDie() ClusterRoleBinding { + ret, err := r.Binding() + if err != nil { + panic(err) + } + return ret +} + +func (r *ClusterRoleBindingBuilder) Binding() (ClusterRoleBinding, error) { + if len(r.ClusterRoleBinding.Subjects) == 0 { + return ClusterRoleBinding{}, fmt.Errorf("subjects are required: %#v", r.ClusterRoleBinding) + } + + return r.ClusterRoleBinding, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/register.go new file mode 100644 index 0000000000..cab2e77e4b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "rbac.authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/types.generated.go new file mode 100644 index 0000000000..56de3e3e7b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/types.generated.go @@ -0,0 +1,4879 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 + } +} + +func (x *PolicyRule) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.APIGroups) != 0 + yyq2[2] = len(x.Resources) != 0 + yyq2[3] = len(x.ResourceNames) != 0 + yyq2[4] = len(x.NonResourceURLs) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Verbs == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.Verbs, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("verbs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Verbs == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.Verbs, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.APIGroups == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncSliceStringV(x.APIGroups, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiGroups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.APIGroups == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncSliceStringV(x.APIGroups, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Resources == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceStringV(x.Resources, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resources")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Resources == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceStringV(x.Resources, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.ResourceNames == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncSliceStringV(x.ResourceNames, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceNames")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceNames == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncSliceStringV(x.ResourceNames, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.NonResourceURLs == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncSliceStringV(x.NonResourceURLs, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nonResourceURLs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NonResourceURLs == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncSliceStringV(x.NonResourceURLs, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PolicyRule) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PolicyRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "verbs": + if r.TryDecodeAsNil() { + x.Verbs = nil + } else { + yyv4 := &x.Verbs + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "apiGroups": + if r.TryDecodeAsNil() { + x.APIGroups = nil + } else { + yyv6 := &x.APIGroups + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + case "resources": + if r.TryDecodeAsNil() { + x.Resources = nil + } else { + yyv8 := &x.Resources + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceStringX(yyv8, false, d) + } + } + case "resourceNames": + if r.TryDecodeAsNil() { + x.ResourceNames = nil + } else { + yyv10 := &x.ResourceNames + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + z.F.DecSliceStringX(yyv10, false, d) + } + } + case "nonResourceURLs": + if r.TryDecodeAsNil() { + x.NonResourceURLs = nil + } else { + yyv12 := &x.NonResourceURLs + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + z.F.DecSliceStringX(yyv12, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PolicyRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Verbs = nil + } else { + yyv15 := &x.Verbs + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + z.F.DecSliceStringX(yyv15, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIGroups = nil + } else { + yyv17 := &x.APIGroups + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + z.F.DecSliceStringX(yyv17, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resources = nil + } else { + yyv19 := &x.Resources + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + z.F.DecSliceStringX(yyv19, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ResourceNames = nil + } else { + yyv21 := &x.ResourceNames + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + z.F.DecSliceStringX(yyv21, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NonResourceURLs = nil + } else { + yyv23 := &x.NonResourceURLs + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + z.F.DecSliceStringX(yyv23, false, d) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Subject) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.APIGroup != "" + yyq2[3] = x.Namespace != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiGroup")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Subject) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Subject) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiGroup": + if r.TryDecodeAsNil() { + x.APIGroup = "" + } else { + yyv6 := &x.APIGroup + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv10 := &x.Namespace + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Subject) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIGroup = "" + } else { + yyv15 := &x.APIGroup + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv17 := &x.Name + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv19 := &x.Namespace + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleRef) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiGroup")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleRef) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleRef) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "apiGroup": + if r.TryDecodeAsNil() { + x.APIGroup = "" + } else { + yyv4 := &x.APIGroup + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv6 := &x.Kind + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleRef) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIGroup = "" + } else { + yyv11 := &x.APIGroup + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv15 := &x.Name + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Role) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rules")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Role) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Role) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "rules": + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv10 := &x.Rules + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Role) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv19 := &x.Rules + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subjects")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy18 := &x.RoleRef + yy18.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("roleRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy20 := &x.RoleRef + yy20.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "subjects": + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv10 := &x.Subjects + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv10), d) + } + } + case "roleRef": + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv12 := &x.RoleRef + yyv12.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv14 := &x.Kind + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv16 := &x.APIVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv18 := &x.ObjectMeta + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else { + z.DecFallback(yyv18, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv20 := &x.Subjects + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv20), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv22 := &x.RoleRef + yyv22.CodecDecodeSelf(d) + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceRoleBinding((*[]RoleBinding)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceRoleBinding((*[]RoleBinding)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceRole(([]Role)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceRole(([]Role)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceRole((*[]Role)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceRole((*[]Role)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRole) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rules")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRole) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRole) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "rules": + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv10 := &x.Rules + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRole) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv19 := &x.Rules + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subjects")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy18 := &x.RoleRef + yy18.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("roleRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy20 := &x.RoleRef + yy20.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "subjects": + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv10 := &x.Subjects + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv10), d) + } + } + case "roleRef": + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv12 := &x.RoleRef + yyv12.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv14 := &x.Kind + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv16 := &x.APIVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv18 := &x.ObjectMeta + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else { + z.DecFallback(yyv18, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv20 := &x.Subjects + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv20), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv22 := &x.RoleRef + yyv22.CodecDecodeSelf(d) + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRoleList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceClusterRole(([]ClusterRole)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceClusterRole(([]ClusterRole)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRoleList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceClusterRole((*[]ClusterRole)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceClusterRole((*[]ClusterRole)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSlicePolicyRule(v []PolicyRule, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePolicyRule(v *[]PolicyRule, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PolicyRule{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 120) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PolicyRule, yyrl1) + } + } else { + yyv1 = make([]PolicyRule, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PolicyRule{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PolicyRule{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PolicyRule{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PolicyRule{}) // var yyz1 PolicyRule + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PolicyRule{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PolicyRule{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceSubject(v []Subject, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceSubject(v *[]Subject, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Subject{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Subject, yyrl1) + } + } else { + yyv1 = make([]Subject, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Subject{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Subject{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Subject{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Subject{}) // var yyz1 Subject + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Subject{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Subject{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceRoleBinding(v []RoleBinding, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceRoleBinding(v *[]RoleBinding, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []RoleBinding{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]RoleBinding, yyrl1) + } + } else { + yyv1 = make([]RoleBinding, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = RoleBinding{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, RoleBinding{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = RoleBinding{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, RoleBinding{}) // var yyz1 RoleBinding + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = RoleBinding{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []RoleBinding{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceRole(v []Role, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceRole(v *[]Role, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Role{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Role, yyrl1) + } + } else { + yyv1 = make([]Role, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Role{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Role{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Role{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Role{}) // var yyz1 Role + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Role{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Role{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceClusterRoleBinding(v []ClusterRoleBinding, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceClusterRoleBinding(v *[]ClusterRoleBinding, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ClusterRoleBinding{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ClusterRoleBinding, yyrl1) + } + } else { + yyv1 = make([]ClusterRoleBinding, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRoleBinding{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ClusterRoleBinding{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRoleBinding{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ClusterRoleBinding{}) // var yyz1 ClusterRoleBinding + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRoleBinding{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ClusterRoleBinding{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceClusterRole(v []ClusterRole, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceClusterRole(v *[]ClusterRole, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ClusterRole{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ClusterRole, yyrl1) + } + } else { + yyv1 = make([]ClusterRole, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRole{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ClusterRole{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRole{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ClusterRole{}) // var yyz1 ClusterRole + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRole{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ClusterRole{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/types.go new file mode 100644 index 0000000000..89a47db9e3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/types.go @@ -0,0 +1,207 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + + GroupKind = "Group" + ServiceAccountKind = "ServiceAccount" + UserKind = "User" + + // AutoUpdateAnnotationKey is the name of an annotation which prevents reconciliation if set to "false" + AutoUpdateAnnotationKey = "rbac.authorization.kubernetes.io/autoupdate" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +type PolicyRule struct { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + // +optional + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // +optional + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + // +optional + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,5,rep,name=nonResourceURLs"` +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +type Subject struct { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // APIGroup holds the API group of the referenced subject. + // Defaults to "" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. + // +optional + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"` + // Name of the object being referenced. + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` +} + +// RoleRef contains information that points to the role being used +type RoleRef struct { + // APIGroup is the group for the resource being referenced + APIGroup string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"` + // Kind is the type of resource being referenced + Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` + // Name is the name of resource being referenced + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` +} + +// +genclient=true + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +type Role struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this Role + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// +genclient=true + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +type RoleBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// RoleBindingList is a collection of RoleBindings +type RoleBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of RoleBindings + Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// RoleList is a collection of Roles +type RoleList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of Roles + Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true +// +nonNamespaced=true + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +type ClusterRole struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this ClusterRole + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// +genclient=true +// +nonNamespaced=true + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +type ClusterRoleBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +type ClusterRoleBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoleBindings + Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ClusterRoleList is a collection of ClusterRoles +type ClusterRoleList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoles + Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..1463d8feac --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,148 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ClusterRole = map[string]string{ + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", +} + +func (ClusterRole) SwaggerDoc() map[string]string { + return map_ClusterRole +} + +var map_ClusterRoleBinding = map[string]string{ + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (ClusterRoleBinding) SwaggerDoc() map[string]string { + return map_ClusterRoleBinding +} + +var map_ClusterRoleBindingList = map[string]string{ + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoleBindings", +} + +func (ClusterRoleBindingList) SwaggerDoc() map[string]string { + return map_ClusterRoleBindingList +} + +var map_ClusterRoleList = map[string]string{ + "": "ClusterRoleList is a collection of ClusterRoles", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoles", +} + +func (ClusterRoleList) SwaggerDoc() map[string]string { + return map_ClusterRoleList +} + +var map_PolicyRule = map[string]string{ + "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", + "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", +} + +func (PolicyRule) SwaggerDoc() map[string]string { + return map_PolicyRule +} + +var map_Role = map[string]string{ + "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this Role", +} + +func (Role) SwaggerDoc() map[string]string { + return map_Role +} + +var map_RoleBinding = map[string]string{ + "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (RoleBinding) SwaggerDoc() map[string]string { + return map_RoleBinding +} + +var map_RoleBindingList = map[string]string{ + "": "RoleBindingList is a collection of RoleBindings", + "metadata": "Standard object's metadata.", + "items": "Items is a list of RoleBindings", +} + +func (RoleBindingList) SwaggerDoc() map[string]string { + return map_RoleBindingList +} + +var map_RoleList = map[string]string{ + "": "RoleList is a collection of Roles", + "metadata": "Standard object's metadata.", + "items": "Items is a list of Roles", +} + +func (RoleList) SwaggerDoc() map[string]string { + return map_RoleList +} + +var map_RoleRef = map[string]string{ + "": "RoleRef contains information that points to the role being used", + "apiGroup": "APIGroup is the group for the resource being referenced", + "kind": "Kind is the type of resource being referenced", + "name": "Name is the name of resource being referenced", +} + +func (RoleRef) SwaggerDoc() map[string]string { + return map_RoleRef +} + +var map_Subject = map[string]string{ + "": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", + "kind": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", + "apiGroup": "APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.", + "name": "Name of the object being referenced.", + "namespace": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/zz_generated.conversion.go new file mode 100644 index 0000000000..8854e1967e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/zz_generated.conversion.go @@ -0,0 +1,389 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + rbac "k8s.io/kubernetes/pkg/apis/rbac" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_ClusterRole_To_rbac_ClusterRole, + Convert_rbac_ClusterRole_To_v1beta1_ClusterRole, + Convert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding, + Convert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding, + Convert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder, + Convert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder, + Convert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList, + Convert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList, + Convert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList, + Convert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList, + Convert_v1beta1_PolicyRule_To_rbac_PolicyRule, + Convert_rbac_PolicyRule_To_v1beta1_PolicyRule, + Convert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder, + Convert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder, + Convert_v1beta1_Role_To_rbac_Role, + Convert_rbac_Role_To_v1beta1_Role, + Convert_v1beta1_RoleBinding_To_rbac_RoleBinding, + Convert_rbac_RoleBinding_To_v1beta1_RoleBinding, + Convert_v1beta1_RoleBindingList_To_rbac_RoleBindingList, + Convert_rbac_RoleBindingList_To_v1beta1_RoleBindingList, + Convert_v1beta1_RoleList_To_rbac_RoleList, + Convert_rbac_RoleList_To_v1beta1_RoleList, + Convert_v1beta1_RoleRef_To_rbac_RoleRef, + Convert_rbac_RoleRef_To_v1beta1_RoleRef, + Convert_v1beta1_Subject_To_rbac_Subject, + Convert_rbac_Subject_To_v1beta1_Subject, + ) +} + +func autoConvert_v1beta1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules)) + return nil +} + +func Convert_v1beta1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterRole_To_rbac_ClusterRole(in, out, s) +} + +func autoConvert_rbac_ClusterRole_To_v1beta1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Rules == nil { + out.Rules = make([]PolicyRule, 0) + } else { + out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) + } + return nil +} + +func Convert_rbac_ClusterRole_To_v1beta1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { + return autoConvert_rbac_ClusterRole_To_v1beta1_ClusterRole(in, out, s) +} + +func autoConvert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subjects = *(*[]rbac.Subject)(unsafe.Pointer(&in.Subjects)) + if err := Convert_v1beta1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in, out, s) +} + +func autoConvert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Subjects == nil { + out.Subjects = make([]Subject, 0) + } else { + out.Subjects = *(*[]Subject)(unsafe.Pointer(&in.Subjects)) + } + if err := Convert_rbac_RoleRef_To_v1beta1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(in, out, s) +} + +func autoConvert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in *ClusterRoleBindingBuilder, out *rbac.ClusterRoleBindingBuilder, s conversion.Scope) error { + if err := Convert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(&in.ClusterRoleBinding, &out.ClusterRoleBinding, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in *ClusterRoleBindingBuilder, out *rbac.ClusterRoleBindingBuilder, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in, out, s) +} + +func autoConvert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder(in *rbac.ClusterRoleBindingBuilder, out *ClusterRoleBindingBuilder, s conversion.Scope) error { + if err := Convert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(&in.ClusterRoleBinding, &out.ClusterRoleBinding, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder(in *rbac.ClusterRoleBindingBuilder, out *ClusterRoleBindingBuilder, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder(in, out, s) +} + +func autoConvert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]rbac.ClusterRoleBinding)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in, out, s) +} + +func autoConvert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]ClusterRoleBinding, 0) + } else { + out.Items = *(*[]ClusterRoleBinding)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList(in, out, s) +} + +func autoConvert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]rbac.ClusterRole)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList(in, out, s) +} + +func autoConvert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]ClusterRole, 0) + } else { + out.Items = *(*[]ClusterRole)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList(in, out, s) +} + +func autoConvert_v1beta1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { + out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) + out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) + out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) + return nil +} + +func Convert_v1beta1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { + return autoConvert_v1beta1_PolicyRule_To_rbac_PolicyRule(in, out, s) +} + +func autoConvert_rbac_PolicyRule_To_v1beta1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { + if in.Verbs == nil { + out.Verbs = make([]string, 0) + } else { + out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) + } + out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) + out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) + return nil +} + +func Convert_rbac_PolicyRule_To_v1beta1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { + return autoConvert_rbac_PolicyRule_To_v1beta1_PolicyRule(in, out, s) +} + +func autoConvert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in *PolicyRuleBuilder, out *rbac.PolicyRuleBuilder, s conversion.Scope) error { + if err := Convert_v1beta1_PolicyRule_To_rbac_PolicyRule(&in.PolicyRule, &out.PolicyRule, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in *PolicyRuleBuilder, out *rbac.PolicyRuleBuilder, s conversion.Scope) error { + return autoConvert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in, out, s) +} + +func autoConvert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder(in *rbac.PolicyRuleBuilder, out *PolicyRuleBuilder, s conversion.Scope) error { + if err := Convert_rbac_PolicyRule_To_v1beta1_PolicyRule(&in.PolicyRule, &out.PolicyRule, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder(in *rbac.PolicyRuleBuilder, out *PolicyRuleBuilder, s conversion.Scope) error { + return autoConvert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder(in, out, s) +} + +func autoConvert_v1beta1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules)) + return nil +} + +func Convert_v1beta1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { + return autoConvert_v1beta1_Role_To_rbac_Role(in, out, s) +} + +func autoConvert_rbac_Role_To_v1beta1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Rules == nil { + out.Rules = make([]PolicyRule, 0) + } else { + out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) + } + return nil +} + +func Convert_rbac_Role_To_v1beta1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { + return autoConvert_rbac_Role_To_v1beta1_Role(in, out, s) +} + +func autoConvert_v1beta1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subjects = *(*[]rbac.Subject)(unsafe.Pointer(&in.Subjects)) + if err := Convert_v1beta1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { + return autoConvert_v1beta1_RoleBinding_To_rbac_RoleBinding(in, out, s) +} + +func autoConvert_rbac_RoleBinding_To_v1beta1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Subjects == nil { + out.Subjects = make([]Subject, 0) + } else { + out.Subjects = *(*[]Subject)(unsafe.Pointer(&in.Subjects)) + } + if err := Convert_rbac_RoleRef_To_v1beta1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_RoleBinding_To_v1beta1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { + return autoConvert_rbac_RoleBinding_To_v1beta1_RoleBinding(in, out, s) +} + +func autoConvert_v1beta1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]rbac.RoleBinding)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { + return autoConvert_v1beta1_RoleBindingList_To_rbac_RoleBindingList(in, out, s) +} + +func autoConvert_rbac_RoleBindingList_To_v1beta1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]RoleBinding, 0) + } else { + out.Items = *(*[]RoleBinding)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_rbac_RoleBindingList_To_v1beta1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { + return autoConvert_rbac_RoleBindingList_To_v1beta1_RoleBindingList(in, out, s) +} + +func autoConvert_v1beta1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]rbac.Role)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { + return autoConvert_v1beta1_RoleList_To_rbac_RoleList(in, out, s) +} + +func autoConvert_rbac_RoleList_To_v1beta1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]Role, 0) + } else { + out.Items = *(*[]Role)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_rbac_RoleList_To_v1beta1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { + return autoConvert_rbac_RoleList_To_v1beta1_RoleList(in, out, s) +} + +func autoConvert_v1beta1_RoleRef_To_rbac_RoleRef(in *RoleRef, out *rbac.RoleRef, s conversion.Scope) error { + out.APIGroup = in.APIGroup + out.Kind = in.Kind + out.Name = in.Name + return nil +} + +func Convert_v1beta1_RoleRef_To_rbac_RoleRef(in *RoleRef, out *rbac.RoleRef, s conversion.Scope) error { + return autoConvert_v1beta1_RoleRef_To_rbac_RoleRef(in, out, s) +} + +func autoConvert_rbac_RoleRef_To_v1beta1_RoleRef(in *rbac.RoleRef, out *RoleRef, s conversion.Scope) error { + out.APIGroup = in.APIGroup + out.Kind = in.Kind + out.Name = in.Name + return nil +} + +func Convert_rbac_RoleRef_To_v1beta1_RoleRef(in *rbac.RoleRef, out *RoleRef, s conversion.Scope) error { + return autoConvert_rbac_RoleRef_To_v1beta1_RoleRef(in, out, s) +} + +func autoConvert_v1beta1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error { + out.Kind = in.Kind + out.APIGroup = in.APIGroup + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +func Convert_v1beta1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error { + return autoConvert_v1beta1_Subject_To_rbac_Subject(in, out, s) +} + +func autoConvert_rbac_Subject_To_v1beta1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error { + out.Kind = in.Kind + out.APIGroup = in.APIGroup + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +func Convert_rbac_Subject_To_v1beta1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error { + return autoConvert_rbac_Subject_To_v1beta1_Subject(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..dccae9b4f5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,258 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterRole, InType: reflect.TypeOf(&ClusterRole{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterRoleBinding, InType: reflect.TypeOf(&ClusterRoleBinding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterRoleBindingList, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterRoleList, InType: reflect.TypeOf(&ClusterRoleList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PolicyRule, InType: reflect.TypeOf(&PolicyRule{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Role, InType: reflect.TypeOf(&Role{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RoleBinding, InType: reflect.TypeOf(&RoleBinding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RoleBindingList, InType: reflect.TypeOf(&RoleBindingList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RoleList, InType: reflect.TypeOf(&RoleList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RoleRef, InType: reflect.TypeOf(&RoleRef{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Subject, InType: reflect.TypeOf(&Subject{})}, + ) +} + +func DeepCopy_v1beta1_ClusterRole(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRole) + out := out.(*ClusterRole) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_ClusterRoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleBinding) + out := out.(*ClusterRoleBinding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_ClusterRoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleBindingList) + out := out.(*ClusterRoleBindingList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_ClusterRoleBinding(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_ClusterRoleList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleList) + out := out.(*ClusterRoleList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRole, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_ClusterRole(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_PolicyRule(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PolicyRule) + out := out.(*PolicyRule) + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_Role(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Role) + out := out.(*Role) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_RoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleBinding) + out := out.(*RoleBinding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_RoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleBindingList) + out := out.(*RoleBindingList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_RoleBinding(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_RoleList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleList) + out := out.(*RoleList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_Role(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_RoleRef(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleRef) + out := out.(*RoleRef) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_Subject(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Subject) + out := out.(*Subject) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/zz_generated.defaults.go new file mode 100644 index 0000000000..fa5bfb6abb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/zz_generated.defaults.go @@ -0,0 +1,66 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&ClusterRoleBinding{}, func(obj interface{}) { SetObjectDefaults_ClusterRoleBinding(obj.(*ClusterRoleBinding)) }) + scheme.AddTypeDefaultingFunc(&ClusterRoleBindingList{}, func(obj interface{}) { SetObjectDefaults_ClusterRoleBindingList(obj.(*ClusterRoleBindingList)) }) + scheme.AddTypeDefaultingFunc(&RoleBinding{}, func(obj interface{}) { SetObjectDefaults_RoleBinding(obj.(*RoleBinding)) }) + scheme.AddTypeDefaultingFunc(&RoleBindingList{}, func(obj interface{}) { SetObjectDefaults_RoleBindingList(obj.(*RoleBindingList)) }) + return nil +} + +func SetObjectDefaults_ClusterRoleBinding(in *ClusterRoleBinding) { + SetDefaults_ClusterRoleBinding(in) + for i := range in.Subjects { + a := &in.Subjects[i] + SetDefaults_Subject(a) + } +} + +func SetObjectDefaults_ClusterRoleBindingList(in *ClusterRoleBindingList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ClusterRoleBinding(a) + } +} + +func SetObjectDefaults_RoleBinding(in *RoleBinding) { + SetDefaults_RoleBinding(in) + for i := range in.Subjects { + a := &in.Subjects[i] + SetDefaults_Subject(a) + } +} + +func SetObjectDefaults_RoleBindingList(in *RoleBindingList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_RoleBinding(a) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/zz_generated.deepcopy.go index a216d2c052..db5d08cdf3 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ limitations under the License. package rbac import ( - api "k8s.io/kubernetes/pkg/api" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" reflect "reflect" ) @@ -53,9 +53,11 @@ func DeepCopy_rbac_ClusterRole(in interface{}, out interface{}, c *conversion.Cl { in := in.(*ClusterRole) out := out.(*ClusterRole) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if in.Rules != nil { in, out := &in.Rules, &out.Rules @@ -65,8 +67,6 @@ func DeepCopy_rbac_ClusterRole(in interface{}, out interface{}, c *conversion.Cl return err } } - } else { - out.Rules = nil } return nil } @@ -76,20 +76,17 @@ func DeepCopy_rbac_ClusterRoleBinding(in interface{}, out interface{}, c *conver { in := in.(*ClusterRoleBinding) out := out.(*ClusterRoleBinding) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if in.Subjects != nil { in, out := &in.Subjects, &out.Subjects *out = make([]Subject, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Subjects = nil + copy(*out, *in) } - out.RoleRef = in.RoleRef return nil } } @@ -98,8 +95,7 @@ func DeepCopy_rbac_ClusterRoleBindingList(in interface{}, out interface{}, c *co { in := in.(*ClusterRoleBindingList) out := out.(*ClusterRoleBindingList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRoleBinding, len(*in)) @@ -108,8 +104,6 @@ func DeepCopy_rbac_ClusterRoleBindingList(in interface{}, out interface{}, c *co return err } } - } else { - out.Items = nil } return nil } @@ -119,8 +113,7 @@ func DeepCopy_rbac_ClusterRoleList(in interface{}, out interface{}, c *conversio { in := in.(*ClusterRoleList) out := out.(*ClusterRoleList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRole, len(*in)) @@ -129,8 +122,6 @@ func DeepCopy_rbac_ClusterRoleList(in interface{}, out interface{}, c *conversio return err } } - } else { - out.Items = nil } return nil } @@ -140,47 +131,31 @@ func DeepCopy_rbac_PolicyRule(in interface{}, out interface{}, c *conversion.Clo { in := in.(*PolicyRule) out := out.(*PolicyRule) + *out = *in if in.Verbs != nil { in, out := &in.Verbs, &out.Verbs *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Verbs = nil - } - if in.AttributeRestrictions == nil { - out.AttributeRestrictions = nil - } else if newVal, err := c.DeepCopy(&in.AttributeRestrictions); err != nil { - return err - } else { - out.AttributeRestrictions = *newVal.(*runtime.Object) } if in.APIGroups != nil { in, out := &in.APIGroups, &out.APIGroups *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.APIGroups = nil } if in.Resources != nil { in, out := &in.Resources, &out.Resources *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.Resources = nil } if in.ResourceNames != nil { in, out := &in.ResourceNames, &out.ResourceNames *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.ResourceNames = nil } if in.NonResourceURLs != nil { in, out := &in.NonResourceURLs, &out.NonResourceURLs *out = make([]string, len(*in)) copy(*out, *in) - } else { - out.NonResourceURLs = nil } return nil } @@ -190,9 +165,11 @@ func DeepCopy_rbac_Role(in interface{}, out interface{}, c *conversion.Cloner) e { in := in.(*Role) out := out.(*Role) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if in.Rules != nil { in, out := &in.Rules, &out.Rules @@ -202,8 +179,6 @@ func DeepCopy_rbac_Role(in interface{}, out interface{}, c *conversion.Cloner) e return err } } - } else { - out.Rules = nil } return nil } @@ -213,20 +188,17 @@ func DeepCopy_rbac_RoleBinding(in interface{}, out interface{}, c *conversion.Cl { in := in.(*RoleBinding) out := out.(*RoleBinding) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if in.Subjects != nil { in, out := &in.Subjects, &out.Subjects *out = make([]Subject, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.Subjects = nil + copy(*out, *in) } - out.RoleRef = in.RoleRef return nil } } @@ -235,8 +207,7 @@ func DeepCopy_rbac_RoleBindingList(in interface{}, out interface{}, c *conversio { in := in.(*RoleBindingList) out := out.(*RoleBindingList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RoleBinding, len(*in)) @@ -245,8 +216,6 @@ func DeepCopy_rbac_RoleBindingList(in interface{}, out interface{}, c *conversio return err } } - } else { - out.Items = nil } return nil } @@ -256,8 +225,7 @@ func DeepCopy_rbac_RoleList(in interface{}, out interface{}, c *conversion.Clone { in := in.(*RoleList) out := out.(*RoleList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Role, len(*in)) @@ -266,8 +234,6 @@ func DeepCopy_rbac_RoleList(in interface{}, out interface{}, c *conversion.Clone return err } } - } else { - out.Items = nil } return nil } @@ -277,9 +243,7 @@ func DeepCopy_rbac_RoleRef(in interface{}, out interface{}, c *conversion.Cloner { in := in.(*RoleRef) out := out.(*RoleRef) - out.APIGroup = in.APIGroup - out.Kind = in.Kind - out.Name = in.Name + *out = *in return nil } } @@ -288,10 +252,7 @@ func DeepCopy_rbac_Subject(in interface{}, out interface{}, c *conversion.Cloner { in := in.(*Subject) out := out.(*Subject) - out.Kind = in.Kind - out.APIVersion = in.APIVersion - out.Name = in.Name - out.Namespace = in.Namespace + *out = *in return nil } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/settings/BUILD new file mode 100644 index 0000000000..a97c56d55a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/BUILD @@ -0,0 +1,44 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + "types.go", + "zz_generated.deepcopy.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/settings/install:all-srcs", + "//pkg/apis/settings/v1alpha1:all-srcs", + "//pkg/apis/settings/validation:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/doc.go new file mode 100644 index 0000000000..7ff0cd047a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register +// +groupName=settings.k8s.io +package settings diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/settings/install/BUILD new file mode 100644 index 0000000000..b0c7f6e203 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/install/BUILD @@ -0,0 +1,35 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["install.go"], + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/apis/settings:go_default_library", + "//pkg/apis/settings/v1alpha1:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/announced", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/registered", + "//vendor:k8s.io/apimachinery/pkg/runtime", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/install/install.go new file mode 100644 index 0000000000..105a78d864 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/install/install.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the settings API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/settings" + "k8s.io/kubernetes/pkg/apis/settings/v1alpha1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: settings.GroupName, + VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/kubernetes/pkg/apis/settings", + AddInternalObjectsToScheme: settings.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/register.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/register.go new file mode 100644 index 0000000000..8584701279 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// GroupName is the group name use in this package +const GroupName = "settings.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PodPreset{}, + &PodPresetList{}, + ) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/types.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/types.go new file mode 100644 index 0000000000..910143e2a3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/types.go @@ -0,0 +1,63 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/api" +) + +// +genclient=true + +// PodPreset is a policy resource that defines additional runtime +// requirements for a Pod. +type PodPreset struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // +optional + Spec PodPresetSpec +} + +// PodPresetSpec is a description of a pod injection policy. +type PodPresetSpec struct { + // Selector is a label query over a set of resources, in this case pods. + // Required. + Selector metav1.LabelSelector + // Env defines the collection of EnvVar to inject into containers. + // +optional + Env []api.EnvVar + // EnvFrom defines the collection of EnvFromSource to inject into containers. + // +optional + EnvFrom []api.EnvFromSource + // Volumes defines the collection of Volume to inject into the pod. + // +optional + Volumes []api.Volume + // VolumeMounts defines the collection of VolumeMount to inject into containers. + // +optional + VolumeMounts []api.VolumeMount +} + +// PodPresetList is a list of PodPreset objects. +type PodPresetList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []PodPreset +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/BUILD new file mode 100644 index 0000000000..82160919fa --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/BUILD @@ -0,0 +1,46 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "generated.pb.go", + "register.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.conversion.go", + "zz_generated.deepcopy.go", + "zz_generated.defaults.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/api/v1:go_default_library", + "//pkg/apis/settings:go_default_library", + "//vendor:github.com/gogo/protobuf/proto", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/doc.go new file mode 100644 index 0000000000..348c931751 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/settings +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=settings.k8s.io +package v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/generated.pb.go new file mode 100644 index 0000000000..fb206e12ee --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/generated.pb.go @@ -0,0 +1,924 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/settings/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/settings/v1alpha1/generated.proto + + It has these top-level messages: + PodPreset + PodPresetList + PodPresetSpec +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *PodPreset) Reset() { *m = PodPreset{} } +func (*PodPreset) ProtoMessage() {} +func (*PodPreset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *PodPresetList) Reset() { *m = PodPresetList{} } +func (*PodPresetList) ProtoMessage() {} +func (*PodPresetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *PodPresetSpec) Reset() { *m = PodPresetSpec{} } +func (*PodPresetSpec) ProtoMessage() {} +func (*PodPresetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func init() { + proto.RegisterType((*PodPreset)(nil), "k8s.io.kubernetes.pkg.apis.settings.v1alpha1.PodPreset") + proto.RegisterType((*PodPresetList)(nil), "k8s.io.kubernetes.pkg.apis.settings.v1alpha1.PodPresetList") + proto.RegisterType((*PodPresetSpec)(nil), "k8s.io.kubernetes.pkg.apis.settings.v1alpha1.PodPresetSpec") +} +func (m *PodPreset) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodPreset) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func (m *PodPresetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodPresetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodPresetSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodPresetSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n4, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Env) > 0 { + for _, msg := range m.Env { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.EnvFrom) > 0 { + for _, msg := range m.EnvFrom { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Volumes) > 0 { + for _, msg := range m.Volumes { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.VolumeMounts) > 0 { + for _, msg := range m.VolumeMounts { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *PodPreset) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodPresetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodPresetSpec) Size() (n int) { + var l int + _ = l + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PodPreset) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodPreset{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodPresetSpec", "PodPresetSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodPresetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodPresetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodPreset", "PodPreset", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodPresetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodPresetSpec{`, + `Selector:` + strings.Replace(strings.Replace(this.Selector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "k8s_io_kubernetes_pkg_api_v1.EnvVar", 1), `&`, ``, 1) + `,`, + `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "k8s_io_kubernetes_pkg_api_v1.EnvFromSource", 1), `&`, ``, 1) + `,`, + `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "k8s_io_kubernetes_pkg_api_v1.Volume", 1), `&`, ``, 1) + `,`, + `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "k8s_io_kubernetes_pkg_api_v1.VolumeMount", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PodPreset) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodPreset: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodPreset: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodPresetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodPresetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodPresetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodPreset{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodPresetSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodPresetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodPresetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, k8s_io_kubernetes_pkg_api_v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EnvFrom = append(m.EnvFrom, k8s_io_kubernetes_pkg_api_v1.EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, k8s_io_kubernetes_pkg_api_v1.Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMounts = append(m.VolumeMounts, k8s_io_kubernetes_pkg_api_v1.VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 550 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x9b, 0x6d, 0x4b, 0xeb, 0xb4, 0x8b, 0x12, 0x3c, 0x84, 0x1e, 0xb2, 0x4b, 0xf1, 0xb0, + 0xea, 0x3a, 0xb1, 0xab, 0xa8, 0xa0, 0xa7, 0xc8, 0x0a, 0x82, 0xcb, 0x2e, 0x29, 0xf4, 0x20, 0x2b, + 0x38, 0x4d, 0x9f, 0x69, 0x6c, 0x93, 0x09, 0x33, 0x93, 0xa0, 0x37, 0x3f, 0x82, 0x5f, 0x4a, 0x28, + 0xe8, 0x61, 0x8f, 0x9e, 0x16, 0x5b, 0xbf, 0x88, 0xcc, 0x74, 0xd2, 0x44, 0xba, 0x65, 0xab, 0xb7, + 0x79, 0x8f, 0xf7, 0xff, 0xbd, 0xff, 0x3f, 0x2f, 0xe8, 0xc5, 0xe4, 0x19, 0xc7, 0x21, 0x75, 0x26, + 0xe9, 0x10, 0x58, 0x0c, 0x02, 0xb8, 0x93, 0x4c, 0x02, 0x87, 0x24, 0x21, 0x77, 0x38, 0x08, 0x11, + 0xc6, 0x01, 0x77, 0xb2, 0x1e, 0x99, 0x26, 0x63, 0xd2, 0x73, 0x02, 0x88, 0x81, 0x11, 0x01, 0x23, + 0x9c, 0x30, 0x2a, 0xa8, 0x79, 0xb8, 0x54, 0xe3, 0x42, 0x8d, 0x93, 0x49, 0x80, 0xa5, 0x1a, 0xe7, + 0x6a, 0x9c, 0xab, 0x3b, 0x0f, 0x82, 0x50, 0x8c, 0xd3, 0x21, 0xf6, 0x69, 0xe4, 0x04, 0x34, 0xa0, + 0x8e, 0x82, 0x0c, 0xd3, 0x0f, 0xaa, 0x52, 0x85, 0x7a, 0x2d, 0xe1, 0x9d, 0xc7, 0xda, 0x1a, 0x49, + 0xc2, 0x88, 0xf8, 0xe3, 0x30, 0x06, 0xf6, 0xb9, 0x30, 0x17, 0x81, 0x20, 0x4e, 0xb6, 0x66, 0xa9, + 0xe3, 0x6c, 0x52, 0xb1, 0x34, 0x16, 0x61, 0x04, 0x6b, 0x82, 0x27, 0xd7, 0x09, 0xb8, 0x3f, 0x86, + 0x88, 0xac, 0xe9, 0x4a, 0xf6, 0x38, 0xb0, 0x0c, 0x58, 0xe1, 0x0d, 0x3e, 0x91, 0x28, 0x99, 0xc2, + 0x55, 0xf6, 0x0e, 0x37, 0x7e, 0xef, 0x2b, 0xa6, 0xbb, 0x3f, 0x0c, 0x74, 0xe3, 0x8c, 0x8e, 0xce, + 0x18, 0x70, 0x10, 0xe6, 0x7b, 0xd4, 0x94, 0xa9, 0x47, 0x44, 0x10, 0xcb, 0xd8, 0x37, 0x0e, 0x5a, + 0x47, 0x0f, 0xb1, 0x3e, 0x40, 0xd9, 0x7c, 0x71, 0x02, 0x39, 0x8d, 0xb3, 0x1e, 0x3e, 0x1d, 0x7e, + 0x04, 0x5f, 0x9c, 0x80, 0x20, 0xae, 0x39, 0xbb, 0xdc, 0xab, 0x2c, 0x2e, 0xf7, 0x50, 0xd1, 0xf3, + 0x56, 0x54, 0xf3, 0x1d, 0xaa, 0xf1, 0x04, 0x7c, 0x6b, 0x47, 0xd1, 0x9f, 0xe3, 0x7f, 0x39, 0x2f, + 0x5e, 0x19, 0xed, 0x27, 0xe0, 0xbb, 0x6d, 0xbd, 0xa8, 0x26, 0x2b, 0x4f, 0x61, 0xbb, 0xdf, 0x0d, + 0xb4, 0xbb, 0x9a, 0x7a, 0x13, 0x72, 0x61, 0x9e, 0xaf, 0x45, 0xc2, 0xdb, 0x45, 0x92, 0x6a, 0x15, + 0xe8, 0x96, 0xde, 0xd3, 0xcc, 0x3b, 0xa5, 0x38, 0xe7, 0xa8, 0x1e, 0x0a, 0x88, 0xb8, 0xb5, 0xb3, + 0x5f, 0x3d, 0x68, 0x1d, 0x3d, 0xfd, 0xcf, 0x3c, 0xee, 0xae, 0xde, 0x51, 0x7f, 0x2d, 0x69, 0xde, + 0x12, 0xda, 0xfd, 0x56, 0x2d, 0xa5, 0x91, 0x29, 0x4d, 0x82, 0x9a, 0x1c, 0xa6, 0xe0, 0x0b, 0xca, + 0x74, 0x9a, 0x47, 0x5b, 0xa6, 0x21, 0x43, 0x98, 0xf6, 0xb5, 0xb4, 0x88, 0x94, 0x77, 0xbc, 0x15, + 0xd6, 0x7c, 0x89, 0xaa, 0x10, 0x67, 0x3a, 0xd0, 0x9d, 0xcd, 0x81, 0x24, 0xf5, 0x38, 0xce, 0x06, + 0x84, 0xb9, 0x2d, 0x8d, 0xab, 0x1e, 0xc7, 0x99, 0x27, 0xd5, 0xe6, 0x00, 0x35, 0x20, 0xce, 0x5e, + 0x31, 0x1a, 0x59, 0x55, 0x05, 0xba, 0x7f, 0x2d, 0x48, 0x0e, 0xf7, 0x69, 0xca, 0x7c, 0x70, 0x6f, + 0x6a, 0x5e, 0x43, 0xb7, 0xbd, 0x1c, 0x66, 0x9e, 0xa2, 0x46, 0x46, 0xa7, 0x69, 0x04, 0xdc, 0xaa, + 0x6d, 0x63, 0x70, 0xa0, 0x86, 0x0b, 0xe0, 0xb2, 0xe6, 0x5e, 0x4e, 0x31, 0x7d, 0xd4, 0x5e, 0x3e, + 0x4f, 0x68, 0x1a, 0x0b, 0x6e, 0xd5, 0x15, 0xf5, 0xee, 0x36, 0x54, 0xa5, 0x70, 0x6f, 0x6b, 0x74, + 0xbb, 0xd4, 0xe4, 0xde, 0x5f, 0x50, 0xf7, 0xde, 0x6c, 0x6e, 0x57, 0x2e, 0xe6, 0x76, 0xe5, 0xe7, + 0xdc, 0xae, 0x7c, 0x59, 0xd8, 0xc6, 0x6c, 0x61, 0x1b, 0x17, 0x0b, 0xdb, 0xf8, 0xb5, 0xb0, 0x8d, + 0xaf, 0xbf, 0xed, 0xca, 0xdb, 0x66, 0xfe, 0x4f, 0xfc, 0x09, 0x00, 0x00, 0xff, 0xff, 0x91, 0x84, + 0x03, 0x10, 0x2f, 0x05, 0x00, 0x00, +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/generated.proto new file mode 100644 index 0000000000..b5a80fa637 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/generated.proto @@ -0,0 +1,76 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.settings.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// PodPreset is a policy resource that defines additional runtime +// requirements for a Pod. +message PodPreset { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // +optional + optional PodPresetSpec spec = 2; +} + +// PodPresetList is a list of PodPreset objects. +message PodPresetList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated PodPreset items = 2; +} + +// PodPresetSpec is a description of a pod injection policy. +message PodPresetSpec { + // Selector is a label query over a set of resources, in this case pods. + // Required. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + + // Env defines the collection of EnvVar to inject into containers. + // +optional + repeated k8s.io.kubernetes.pkg.api.v1.EnvVar env = 2; + + // EnvFrom defines the collection of EnvFromSource to inject into containers. + // +optional + repeated k8s.io.kubernetes.pkg.api.v1.EnvFromSource envFrom = 3; + + // Volumes defines the collection of Volume to inject into the pod. + // +optional + repeated k8s.io.kubernetes.pkg.api.v1.Volume volumes = 4; + + // VolumeMounts defines the collection of VolumeMount to inject into containers. + // +optional + repeated k8s.io.kubernetes.pkg.api.v1.VolumeMount volumeMounts = 5; +} + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/register.go new file mode 100644 index 0000000000..45afb50ca9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/register.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "settings.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PodPreset{}, + &PodPresetList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/types.go new file mode 100644 index 0000000000..fbfffd1360 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/types.go @@ -0,0 +1,67 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/api/v1" +) + +// +genclient=true + +// PodPreset is a policy resource that defines additional runtime +// requirements for a Pod. +type PodPreset struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // +optional + Spec PodPresetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// PodPresetSpec is a description of a pod injection policy. +type PodPresetSpec struct { + // Selector is a label query over a set of resources, in this case pods. + // Required. + Selector metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` + + // Env defines the collection of EnvVar to inject into containers. + // +optional + Env []v1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"` + // EnvFrom defines the collection of EnvFromSource to inject into containers. + // +optional + EnvFrom []v1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,3,rep,name=envFrom"` + // Volumes defines the collection of Volume to inject into the pod. + // +optional + Volumes []v1.Volume `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"` + // VolumeMounts defines the collection of VolumeMount to inject into containers. + // +optional + VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty" protobuf:"bytes,5,rep,name=volumeMounts"` +} + +// PodPresetList is a list of PodPreset objects. +type PodPresetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []PodPreset `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..5b4dc665de --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,61 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_PodPreset = map[string]string{ + "": "PodPreset is a policy resource that defines additional runtime requirements for a Pod.", +} + +func (PodPreset) SwaggerDoc() map[string]string { + return map_PodPreset +} + +var map_PodPresetList = map[string]string{ + "": "PodPresetList is a list of PodPreset objects.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (PodPresetList) SwaggerDoc() map[string]string { + return map_PodPresetList +} + +var map_PodPresetSpec = map[string]string{ + "": "PodPresetSpec is a description of a pod injection policy.", + "selector": "Selector is a label query over a set of resources, in this case pods. Required.", + "env": "Env defines the collection of EnvVar to inject into containers.", + "envFrom": "EnvFrom defines the collection of EnvFromSource to inject into containers.", + "volumes": "Volumes defines the collection of Volume to inject into the pod.", + "volumeMounts": "VolumeMounts defines the collection of VolumeMount to inject into containers.", +} + +func (PodPresetSpec) SwaggerDoc() map[string]string { + return map_PodPresetSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.conversion.go new file mode 100644 index 0000000000..f919a7ee1f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,159 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1alpha1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + settings "k8s.io/kubernetes/pkg/apis/settings" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_PodPreset_To_settings_PodPreset, + Convert_settings_PodPreset_To_v1alpha1_PodPreset, + Convert_v1alpha1_PodPresetList_To_settings_PodPresetList, + Convert_settings_PodPresetList_To_v1alpha1_PodPresetList, + Convert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec, + Convert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec, + ) +} + +func autoConvert_v1alpha1_PodPreset_To_settings_PodPreset(in *PodPreset, out *settings.PodPreset, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_PodPreset_To_settings_PodPreset(in *PodPreset, out *settings.PodPreset, s conversion.Scope) error { + return autoConvert_v1alpha1_PodPreset_To_settings_PodPreset(in, out, s) +} + +func autoConvert_settings_PodPreset_To_v1alpha1_PodPreset(in *settings.PodPreset, out *PodPreset, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_settings_PodPreset_To_v1alpha1_PodPreset(in *settings.PodPreset, out *PodPreset, s conversion.Scope) error { + return autoConvert_settings_PodPreset_To_v1alpha1_PodPreset(in, out, s) +} + +func autoConvert_v1alpha1_PodPresetList_To_settings_PodPresetList(in *PodPresetList, out *settings.PodPresetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]settings.PodPreset, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_PodPreset_To_settings_PodPreset(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_PodPresetList_To_settings_PodPresetList(in *PodPresetList, out *settings.PodPresetList, s conversion.Scope) error { + return autoConvert_v1alpha1_PodPresetList_To_settings_PodPresetList(in, out, s) +} + +func autoConvert_settings_PodPresetList_To_v1alpha1_PodPresetList(in *settings.PodPresetList, out *PodPresetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodPreset, len(*in)) + for i := range *in { + if err := Convert_settings_PodPreset_To_v1alpha1_PodPreset(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]PodPreset, 0) + } + return nil +} + +func Convert_settings_PodPresetList_To_v1alpha1_PodPresetList(in *settings.PodPresetList, out *PodPresetList, s conversion.Scope) error { + return autoConvert_settings_PodPresetList_To_v1alpha1_PodPresetList(in, out, s) +} + +func autoConvert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec(in *PodPresetSpec, out *settings.PodPresetSpec, s conversion.Scope) error { + out.Selector = in.Selector + out.Env = *(*[]api.EnvVar)(unsafe.Pointer(&in.Env)) + out.EnvFrom = *(*[]api.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]api.Volume, len(*in)) + for i := range *in { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + out.VolumeMounts = *(*[]api.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + return nil +} + +func Convert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec(in *PodPresetSpec, out *settings.PodPresetSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec(in, out, s) +} + +func autoConvert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec(in *settings.PodPresetSpec, out *PodPresetSpec, s conversion.Scope) error { + out.Selector = in.Selector + out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env)) + out.EnvFrom = *(*[]v1.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + return nil +} + +func Convert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec(in *settings.PodPresetSpec, out *PodPresetSpec, s conversion.Scope) error { + return autoConvert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..86dc3dc066 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,124 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api_v1 "k8s.io/kubernetes/pkg/api/v1" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_PodPreset, InType: reflect.TypeOf(&PodPreset{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_PodPresetList, InType: reflect.TypeOf(&PodPresetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_PodPresetSpec, InType: reflect.TypeOf(&PodPresetSpec{})}, + ) +} + +func DeepCopy_v1alpha1_PodPreset(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPreset) + out := out.(*PodPreset) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1alpha1_PodPresetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1alpha1_PodPresetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPresetList) + out := out.(*PodPresetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodPreset, len(*in)) + for i := range *in { + if err := DeepCopy_v1alpha1_PodPreset(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1alpha1_PodPresetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPresetSpec) + out := out.(*PodPresetSpec) + *out = *in + if newVal, err := c.DeepCopy(&in.Selector); err != nil { + return err + } else { + out.Selector = *newVal.(*v1.LabelSelector) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]api_v1.EnvVar, len(*in)) + for i := range *in { + if err := api_v1.DeepCopy_v1_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]api_v1.EnvFromSource, len(*in)) + for i := range *in { + if err := api_v1.DeepCopy_v1_EnvFromSource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]api_v1.Volume, len(*in)) + for i := range *in { + if err := api_v1.DeepCopy_v1_Volume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]api_v1.VolumeMount, len(*in)) + copy(*out, *in) + } + return nil + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.defaults.go new file mode 100644 index 0000000000..6e117b33ab --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,98 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/kubernetes/pkg/api/v1" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&PodPreset{}, func(obj interface{}) { SetObjectDefaults_PodPreset(obj.(*PodPreset)) }) + scheme.AddTypeDefaultingFunc(&PodPresetList{}, func(obj interface{}) { SetObjectDefaults_PodPresetList(obj.(*PodPresetList)) }) + return nil +} + +func SetObjectDefaults_PodPreset(in *PodPreset) { + for i := range in.Spec.Env { + a := &in.Spec.Env[i] + if a.ValueFrom != nil { + if a.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(a.ValueFrom.FieldRef) + } + } + } + for i := range in.Spec.Volumes { + a := &in.Spec.Volumes[i] + v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } +} + +func SetObjectDefaults_PodPresetList(in *PodPresetList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_PodPreset(a) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/zz_generated.deepcopy.go new file mode 100644 index 0000000000..c124fb2f94 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/zz_generated.deepcopy.go @@ -0,0 +1,124 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package settings + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/kubernetes/pkg/api" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_settings_PodPreset, InType: reflect.TypeOf(&PodPreset{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_settings_PodPresetList, InType: reflect.TypeOf(&PodPresetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_settings_PodPresetSpec, InType: reflect.TypeOf(&PodPresetSpec{})}, + ) +} + +func DeepCopy_settings_PodPreset(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPreset) + out := out.(*PodPreset) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_settings_PodPresetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_settings_PodPresetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPresetList) + out := out.(*PodPresetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodPreset, len(*in)) + for i := range *in { + if err := DeepCopy_settings_PodPreset(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_settings_PodPresetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPresetSpec) + out := out.(*PodPresetSpec) + *out = *in + if newVal, err := c.DeepCopy(&in.Selector); err != nil { + return err + } else { + out.Selector = *newVal.(*v1.LabelSelector) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]api.EnvVar, len(*in)) + for i := range *in { + if err := api.DeepCopy_api_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]api.EnvFromSource, len(*in)) + for i := range *in { + if err := api.DeepCopy_api_EnvFromSource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]api.Volume, len(*in)) + for i := range *in { + if err := api.DeepCopy_api_Volume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]api.VolumeMount, len(*in)) + copy(*out, *in) + } + return nil + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/storage/BUILD index d12e144d53..71ee22cd85 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -15,17 +12,34 @@ go_library( srcs = [ "doc.go", "register.go", - "types.generated.go", "types.go", "zz_generated.deepcopy.go", ], tags = ["automanaged"], deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/storage/install:all-srcs", + "//pkg/apis/storage/util:all-srcs", + "//pkg/apis/storage/v1:all-srcs", + "//pkg/apis/storage/v1beta1:all-srcs", + "//pkg/apis/storage/validation:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/storage/OWNERS new file mode 100755 index 0000000000..d59ed6e1d8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/OWNERS @@ -0,0 +1,3 @@ +reviewers: +- deads2k +- mbohlool diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go index 0294d1350b..a7eb30b643 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go @@ -16,5 +16,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +groupName=storage.k8s.io -// +g8k:openapi-gen=true package storage diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/storage/install/BUILD index e45749f04d..c9b9ddf938 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/install/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -15,9 +12,26 @@ go_library( srcs = ["install.go"], tags = ["automanaged"], deps = [ - "//pkg/apimachinery/announced:go_default_library", + "//pkg/api:go_default_library", "//pkg/apis/storage:go_default_library", + "//pkg/apis/storage/v1:go_default_library", "//pkg/apis/storage/v1beta1:go_default_library", - "//pkg/util/sets:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/announced", + "//vendor:k8s.io/apimachinery/pkg/apimachinery/registered", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/util/sets", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/install/install.go index b46c4d6f0d..8ba3050dcf 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/install/install.go @@ -19,25 +19,36 @@ limitations under the License. package install import ( - "k8s.io/kubernetes/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/storage" + "k8s.io/kubernetes/pkg/apis/storage/v1" "k8s.io/kubernetes/pkg/apis/storage/v1beta1" - "k8s.io/kubernetes/pkg/util/sets" ) func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { if err := announced.NewGroupMetaFactory( &announced.GroupMetaFactoryArgs{ - GroupName: storage.GroupName, - VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, + GroupName: storage.GroupName, + // TODO: change the order when GKE supports v1 + VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version, v1.SchemeGroupVersion.Version}, ImportPrefix: "k8s.io/kubernetes/pkg/apis/storage", RootScopedKinds: sets.NewString("StorageClass"), AddInternalObjectsToScheme: storage.AddToScheme, }, announced.VersionToSchemeFunc{ + v1.SchemeGroupVersion.Version: v1.AddToScheme, v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, }, - ).Announce().RegisterAndEnable(); err != nil { + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { panic(err) } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/register.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/register.go index 2da8d17c2c..aaa619b4d9 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/register.go @@ -17,24 +17,23 @@ limitations under the License. package storage import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "storage.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} // Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { +func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } // Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { +func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } @@ -45,10 +44,6 @@ var ( func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &api.ListOptions{}, - &api.DeleteOptions{}, - &api.ExportOptions{}, - &StorageClass{}, &StorageClassList{}, ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/types.generated.go deleted file mode 100644 index 25c4b04c86..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/types.generated.go +++ /dev/null @@ -1,900 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package storage - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg2_api "k8s.io/kubernetes/pkg/api" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg3_types "k8s.io/kubernetes/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_api.ObjectMeta - var v1 pkg1_unversioned.TypeMeta - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 - } -} - -func (x *StorageClass) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[4] = len(x.Parameters) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yy10.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.ObjectMeta - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Provisioner)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("provisioner")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Provisioner)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Parameters == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - z.F.EncMapStringStringV(x.Parameters, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("parameters")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Parameters == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - z.F.EncMapStringStringV(x.Parameters, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *StorageClass) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct19 := r.ContainerType() - if yyct19 == codecSelferValueTypeMap1234 { - yyl19 := r.ReadMapStart() - if yyl19 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl19, d) - } - } else if yyct19 == codecSelferValueTypeArray1234 { - yyl19 := r.ReadArrayStart() - if yyl19 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl19, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *StorageClass) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys20Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys20Slc - var yyhl20 bool = l >= 0 - for yyj20 := 0; ; yyj20++ { - if yyhl20 { - if yyj20 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys20Slc = r.DecodeBytes(yys20Slc, true, true) - yys20 := string(yys20Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys20 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv23 := &x.ObjectMeta - yyv23.CodecDecodeSelf(d) - } - case "provisioner": - if r.TryDecodeAsNil() { - x.Provisioner = "" - } else { - x.Provisioner = string(r.DecodeString()) - } - case "parameters": - if r.TryDecodeAsNil() { - x.Parameters = nil - } else { - yyv25 := &x.Parameters - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - z.F.DecMapStringStringX(yyv25, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys20) - } // end switch yys20 - } // end for yyj20 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *StorageClass) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj27 int - var yyb27 bool - var yyhl27 bool = l >= 0 - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv30 := &x.ObjectMeta - yyv30.CodecDecodeSelf(d) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Provisioner = "" - } else { - x.Provisioner = string(r.DecodeString()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Parameters = nil - } else { - yyv32 := &x.Parameters - yym33 := z.DecBinary() - _ = yym33 - if false { - } else { - z.F.DecMapStringStringX(yyv32, false, d) - } - } - for { - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj27-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *StorageClassList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym34 := z.EncBinary() - _ = yym34 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep35 := !z.EncBinary() - yy2arr35 := z.EncBasicHandle().StructToArray - var yyq35 [4]bool - _, _, _ = yysep35, yyq35, yy2arr35 - const yyr35 bool = false - yyq35[0] = x.Kind != "" - yyq35[1] = x.APIVersion != "" - yyq35[2] = true - var yynn35 int - if yyr35 || yy2arr35 { - r.EncodeArrayStart(4) - } else { - yynn35 = 1 - for _, b := range yyq35 { - if b { - yynn35++ - } - } - r.EncodeMapStart(yynn35) - yynn35 = 0 - } - if yyr35 || yy2arr35 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq35[0] { - yym37 := z.EncBinary() - _ = yym37 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq35[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr35 || yy2arr35 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq35[1] { - yym40 := z.EncBinary() - _ = yym40 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq35[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym41 := z.EncBinary() - _ = yym41 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr35 || yy2arr35 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq35[2] { - yy43 := &x.ListMeta - yym44 := z.EncBinary() - _ = yym44 - if false { - } else if z.HasExtensions() && z.EncExt(yy43) { - } else { - z.EncFallback(yy43) - } - } else { - r.EncodeNil() - } - } else { - if yyq35[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy45 := &x.ListMeta - yym46 := z.EncBinary() - _ = yym46 - if false { - } else if z.HasExtensions() && z.EncExt(yy45) { - } else { - z.EncFallback(yy45) - } - } - } - if yyr35 || yy2arr35 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym48 := z.EncBinary() - _ = yym48 - if false { - } else { - h.encSliceStorageClass(([]StorageClass)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym49 := z.EncBinary() - _ = yym49 - if false { - } else { - h.encSliceStorageClass(([]StorageClass)(x.Items), e) - } - } - } - if yyr35 || yy2arr35 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *StorageClassList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym50 := z.DecBinary() - _ = yym50 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct51 := r.ContainerType() - if yyct51 == codecSelferValueTypeMap1234 { - yyl51 := r.ReadMapStart() - if yyl51 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl51, d) - } - } else if yyct51 == codecSelferValueTypeArray1234 { - yyl51 := r.ReadArrayStart() - if yyl51 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl51, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *StorageClassList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys52Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys52Slc - var yyhl52 bool = l >= 0 - for yyj52 := 0; ; yyj52++ { - if yyhl52 { - if yyj52 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys52Slc = r.DecodeBytes(yys52Slc, true, true) - yys52 := string(yys52Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys52 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv55 := &x.ListMeta - yym56 := z.DecBinary() - _ = yym56 - if false { - } else if z.HasExtensions() && z.DecExt(yyv55) { - } else { - z.DecFallback(yyv55, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv57 := &x.Items - yym58 := z.DecBinary() - _ = yym58 - if false { - } else { - h.decSliceStorageClass((*[]StorageClass)(yyv57), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys52) - } // end switch yys52 - } // end for yyj52 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *StorageClassList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj59 int - var yyb59 bool - var yyhl59 bool = l >= 0 - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l - } else { - yyb59 = r.CheckBreak() - } - if yyb59 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l - } else { - yyb59 = r.CheckBreak() - } - if yyb59 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l - } else { - yyb59 = r.CheckBreak() - } - if yyb59 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv62 := &x.ListMeta - yym63 := z.DecBinary() - _ = yym63 - if false { - } else if z.HasExtensions() && z.DecExt(yyv62) { - } else { - z.DecFallback(yyv62, false) - } - } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l - } else { - yyb59 = r.CheckBreak() - } - if yyb59 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv64 := &x.Items - yym65 := z.DecBinary() - _ = yym65 - if false { - } else { - h.decSliceStorageClass((*[]StorageClass)(yyv64), d) - } - } - for { - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l - } else { - yyb59 = r.CheckBreak() - } - if yyb59 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj59-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceStorageClass(v []StorageClass, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv66 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy67 := &yyv66 - yy67.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceStorageClass(v *[]StorageClass, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv68 := *v - yyh68, yyl68 := z.DecSliceHelperStart() - var yyc68 bool - if yyl68 == 0 { - if yyv68 == nil { - yyv68 = []StorageClass{} - yyc68 = true - } else if len(yyv68) != 0 { - yyv68 = yyv68[:0] - yyc68 = true - } - } else if yyl68 > 0 { - var yyrr68, yyrl68 int - var yyrt68 bool - if yyl68 > cap(yyv68) { - - yyrg68 := len(yyv68) > 0 - yyv268 := yyv68 - yyrl68, yyrt68 = z.DecInferLen(yyl68, z.DecBasicHandle().MaxInitLen, 280) - if yyrt68 { - if yyrl68 <= cap(yyv68) { - yyv68 = yyv68[:yyrl68] - } else { - yyv68 = make([]StorageClass, yyrl68) - } - } else { - yyv68 = make([]StorageClass, yyrl68) - } - yyc68 = true - yyrr68 = len(yyv68) - if yyrg68 { - copy(yyv68, yyv268) - } - } else if yyl68 != len(yyv68) { - yyv68 = yyv68[:yyl68] - yyc68 = true - } - yyj68 := 0 - for ; yyj68 < yyrr68; yyj68++ { - yyh68.ElemContainerState(yyj68) - if r.TryDecodeAsNil() { - yyv68[yyj68] = StorageClass{} - } else { - yyv69 := &yyv68[yyj68] - yyv69.CodecDecodeSelf(d) - } - - } - if yyrt68 { - for ; yyj68 < yyl68; yyj68++ { - yyv68 = append(yyv68, StorageClass{}) - yyh68.ElemContainerState(yyj68) - if r.TryDecodeAsNil() { - yyv68[yyj68] = StorageClass{} - } else { - yyv70 := &yyv68[yyj68] - yyv70.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj68 := 0 - for ; !r.CheckBreak(); yyj68++ { - - if yyj68 >= len(yyv68) { - yyv68 = append(yyv68, StorageClass{}) // var yyz68 StorageClass - yyc68 = true - } - yyh68.ElemContainerState(yyj68) - if yyj68 < len(yyv68) { - if r.TryDecodeAsNil() { - yyv68[yyj68] = StorageClass{} - } else { - yyv71 := &yyv68[yyj68] - yyv71.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj68 < len(yyv68) { - yyv68 = yyv68[:yyj68] - yyc68 = true - } else if yyj68 == 0 && yyv68 == nil { - yyv68 = []StorageClass{} - yyc68 = true - } - } - yyh68.End() - if yyc68 { - *v = yyv68 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/types.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/types.go index 1e4baae59e..3d589de5c1 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/types.go @@ -16,10 +16,7 @@ limitations under the License. package storage -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" -) +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient=true // +nonNamespaced=true @@ -31,15 +28,15 @@ import ( // called "profiles" in other storage systems. // The name of a StorageClass object is significant, and is how users can request a particular class. type StorageClass struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // +optional - api.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta // provisioner is the driver expected to handle this StorageClass. // This is an optionally-prefixed name, like a label key. // For example: "kubernetes.io/gce-pd" or "kubernetes.io/aws-ebs". // This value may not be empty. - Provisioner string `json:"provisioner"` + Provisioner string // parameters holds parameters for the provisioner. // These values are opaque to the system and are passed directly @@ -47,17 +44,17 @@ type StorageClass struct { // not empty. The maximum number of parameters is // 512, with a cumulative max size of 256K // +optional - Parameters map[string]string `json:"parameters,omitempty"` + Parameters map[string]string } // StorageClassList is a collection of storage classes. type StorageClassList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta // Standard list metadata // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - unversioned.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta // Items is the list of StorageClasses - Items []StorageClass `json:"items"` + Items []StorageClass } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/util/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/storage/util/BUILD deleted file mode 100644 index 56266ade03..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/util/BUILD +++ /dev/null @@ -1,18 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["helpers.go"], - tags = ["automanaged"], - deps = ["//pkg/api:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/util/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/util/helpers.go deleted file mode 100644 index 49eada9934..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/util/helpers.go +++ /dev/null @@ -1,136 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "k8s.io/kubernetes/pkg/api" -) - -// IsDefaultStorageClassAnnotation represents a StorageClass annotation that -// marks a class as the default StorageClass -//TODO: Update IsDefaultStorageClassannotation and remove Beta when no longer used -const IsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class" -const BetaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class" - -// AlphaStorageClassAnnotation represents the previous alpha storage class -// annotation. it's no longer used and held here for posterity. -const AlphaStorageClassAnnotation = "volume.alpha.kubernetes.io/storage-class" - -// BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. -// It's currently still used and will be held for backwards compatibility -const BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" - -// StorageClassAnnotation represents the storage class associated with a resource. -// It currently matches the Beta value and can change when official is set. -// - in PersistentVolumeClaim it represents required class to match. -// Only PersistentVolumes with the same class (i.e. annotation with the same -// value) can be bound to the claim. In case no such volume exists, the -// controller will provision a new one using StorageClass instance with -// the same name as the annotation value. -// - in PersistentVolume it represents storage class to which the persistent -// volume belongs. -//TODO: Update this to final annotation value as it matches BetaStorageClassAnnotation for now -const StorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" - -// GetVolumeStorageClass returns value of StorageClassAnnotation or empty string in case -// the annotation does not exist. -// TODO: change to PersistentVolume.Spec.Class value when this attribute is -// introduced. -func GetVolumeStorageClass(volume *api.PersistentVolume) string { - if class, found := volume.Annotations[StorageClassAnnotation]; found { - return class - } - - // 'nil' is interpreted as "", i.e. the volume does not belong to any class. - return "" -} - -// GetClaimStorageClass returns name of class that is requested by given claim. -// Request for `nil` class is interpreted as request for class "", -// i.e. for a classless PV. -// TODO: change to PersistentVolumeClaim.Spec.Class value when this -// attribute is introduced. -func GetClaimStorageClass(claim *api.PersistentVolumeClaim) string { - if class, found := claim.Annotations[StorageClassAnnotation]; found { - return class - } - - return "" -} - -// GetStorageClassAnnotation returns the StorageClass value -// if the annotation is set, empty string if not -// TODO: remove Alpha and Beta when no longer used or needed -func GetStorageClassAnnotation(obj api.ObjectMeta) string { - if class, ok := obj.Annotations[StorageClassAnnotation]; ok { - return class - } - if class, ok := obj.Annotations[BetaStorageClassAnnotation]; ok { - return class - } - if class, ok := obj.Annotations[AlphaStorageClassAnnotation]; ok { - return class - } - - return "" -} - -// HasStorageClassAnnotation returns a boolean -// if the annotation is set -// TODO: remove Alpha and Beta when no longer used or needed -func HasStorageClassAnnotation(obj api.ObjectMeta) bool { - if _, found := obj.Annotations[StorageClassAnnotation]; found { - return found - } - if _, found := obj.Annotations[BetaStorageClassAnnotation]; found { - return found - } - if _, found := obj.Annotations[AlphaStorageClassAnnotation]; found { - return found - } - - return false - -} - -// IsDefaultAnnotationText returns a pretty Yes/No String if -// the annotation is set -// TODO: remove Beta when no longer needed -func IsDefaultAnnotationText(obj api.ObjectMeta) string { - if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" { - return "Yes" - } - if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" { - return "Yes" - } - - return "No" -} - -// IsDefaultAnnotation returns a boolean if -// the annotation is set -// TODO: remove Beta when no longer needed -func IsDefaultAnnotation(obj api.ObjectMeta) bool { - if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" { - return true - } - if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" { - return true - } - - return false -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/BUILD new file mode 100644 index 0000000000..19527f1d0d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/BUILD @@ -0,0 +1,48 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "generated.pb.go", + "register.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.conversion.go", + "zz_generated.deepcopy.go", + "zz_generated.defaults.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/storage:go_default_library", + "//vendor:github.com/gogo/protobuf/proto", + "//vendor:github.com/gogo/protobuf/sortkeys", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/storage/v1/util:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/doc.go new file mode 100644 index 0000000000..32f99daea9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/storage +// +groupName=storage.k8s.io +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta +package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/generated.pb.go new file mode 100644 index 0000000000..3228238b7d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/generated.pb.go @@ -0,0 +1,730 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/storage/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/storage/v1/generated.proto + + It has these top-level messages: + StorageClass + StorageClassList +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func init() { + proto.RegisterType((*StorageClass)(nil), "k8s.io.kubernetes.pkg.apis.storage.v1.StorageClass") + proto.RegisterType((*StorageClassList)(nil), "k8s.io.kubernetes.pkg.apis.storage.v1.StorageClassList") +} +func (m *StorageClass) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StorageClass) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Provisioner))) + i += copy(data[i:], m.Provisioner) + if len(m.Parameters) > 0 { + for k := range m.Parameters { + data[i] = 0x1a + i++ + v := m.Parameters[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + return i, nil +} + +func (m *StorageClassList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StorageClassList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *StorageClass) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Provisioner) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *StorageClassList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *StorageClass) String() string { + if this == nil { + return "nil" + } + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&StorageClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `}`, + }, "") + return s +} +func (this *StorageClassList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *StorageClass) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provisioner = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageClassList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 474 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x92, 0x4f, 0x6f, 0xd3, 0x30, + 0x18, 0xc6, 0xe3, 0x54, 0x95, 0x36, 0x17, 0x44, 0x15, 0x38, 0x54, 0x3d, 0x64, 0xd5, 0x04, 0x52, + 0x2f, 0xd8, 0x74, 0x63, 0x68, 0x42, 0xe2, 0xd2, 0x89, 0x03, 0x12, 0x88, 0x29, 0x5c, 0x10, 0xe2, + 0x80, 0xdb, 0xbd, 0xa4, 0x26, 0x4d, 0x1c, 0xd9, 0x6f, 0x02, 0xbd, 0xf1, 0x11, 0xf8, 0x58, 0x15, + 0xa7, 0x1d, 0x39, 0x0d, 0x1a, 0xbe, 0x08, 0xca, 0x1f, 0x96, 0x88, 0x6c, 0xa2, 0xda, 0x2d, 0xaf, + 0xed, 0xdf, 0xe3, 0xe7, 0x79, 0x1c, 0x7a, 0x14, 0x1c, 0x1b, 0x26, 0x15, 0x0f, 0x92, 0x19, 0xe8, + 0x08, 0x10, 0x0c, 0x8f, 0x03, 0x9f, 0x8b, 0x58, 0x1a, 0x6e, 0x50, 0x69, 0xe1, 0x03, 0x4f, 0x27, + 0xdc, 0x87, 0x08, 0xb4, 0x40, 0x38, 0x63, 0xb1, 0x56, 0xa8, 0x9c, 0x07, 0x25, 0xc6, 0x6a, 0x8c, + 0xc5, 0x81, 0xcf, 0x72, 0x8c, 0x55, 0x18, 0x4b, 0x27, 0xc3, 0x87, 0xbe, 0xc4, 0x45, 0x32, 0x63, + 0x73, 0x15, 0x72, 0x5f, 0xf9, 0x8a, 0x17, 0xf4, 0x2c, 0xf9, 0x58, 0x4c, 0xc5, 0x50, 0x7c, 0x95, + 0xaa, 0xc3, 0xc7, 0x95, 0x19, 0x11, 0xcb, 0x50, 0xcc, 0x17, 0x32, 0x02, 0xbd, 0xaa, 0xed, 0x84, + 0x80, 0xe2, 0x0a, 0x2f, 0x43, 0x7e, 0x1d, 0xa5, 0x93, 0x08, 0x65, 0x08, 0x2d, 0xe0, 0xc9, 0xff, + 0x00, 0x33, 0x5f, 0x40, 0x28, 0x5a, 0xdc, 0xe1, 0x75, 0x5c, 0x82, 0x72, 0xc9, 0x65, 0x84, 0x06, + 0x75, 0x0b, 0x6a, 0x64, 0x32, 0xa0, 0x53, 0xd0, 0x75, 0x20, 0xf8, 0x22, 0xc2, 0x78, 0x79, 0x55, + 0xbf, 0xfb, 0x3f, 0x6d, 0x7a, 0xeb, 0x4d, 0xd9, 0xe3, 0xc9, 0x52, 0x18, 0xe3, 0x7c, 0xa0, 0x3b, + 0x79, 0xfe, 0x33, 0x81, 0x62, 0x40, 0x46, 0x64, 0xdc, 0x3b, 0x78, 0xc4, 0xaa, 0x37, 0x68, 0xda, + 0xa9, 0x5f, 0x21, 0x3f, 0xcd, 0xd2, 0x09, 0x7b, 0x3d, 0xfb, 0x04, 0x73, 0x7c, 0x05, 0x28, 0xa6, + 0xce, 0xfa, 0x62, 0xcf, 0xca, 0x2e, 0xf6, 0x68, 0xbd, 0xe6, 0x5d, 0xaa, 0x3a, 0x47, 0xb4, 0x17, + 0x6b, 0x95, 0x4a, 0x23, 0x55, 0x04, 0x7a, 0x60, 0x8f, 0xc8, 0x78, 0x77, 0x7a, 0xb7, 0x42, 0x7a, + 0xa7, 0xf5, 0x96, 0xd7, 0x3c, 0xe7, 0x7c, 0xa6, 0x34, 0x16, 0x5a, 0x84, 0x80, 0xa0, 0xcd, 0xa0, + 0x33, 0xea, 0x8c, 0x7b, 0x07, 0x27, 0x6c, 0xab, 0xdf, 0x83, 0x35, 0x13, 0xb2, 0xd3, 0x4b, 0x95, + 0xe7, 0x11, 0xea, 0x55, 0xed, 0xb6, 0xde, 0xf0, 0x1a, 0x57, 0x0d, 0x9f, 0xd1, 0x3b, 0xff, 0x20, + 0x4e, 0x9f, 0x76, 0x02, 0x58, 0x15, 0xfd, 0xec, 0x7a, 0xf9, 0xa7, 0x73, 0x8f, 0x76, 0x53, 0xb1, + 0x4c, 0xa0, 0x8c, 0xe3, 0x95, 0xc3, 0x53, 0xfb, 0x98, 0xec, 0x7f, 0x27, 0xb4, 0xdf, 0xbc, 0xff, + 0xa5, 0x34, 0xe8, 0xbc, 0x6f, 0xb5, 0xcc, 0xb6, 0x6b, 0x39, 0xa7, 0x8b, 0x8e, 0xfb, 0x95, 0xeb, + 0x9d, 0xbf, 0x2b, 0x8d, 0x86, 0xdf, 0xd2, 0xae, 0x44, 0x08, 0xcd, 0xc0, 0x2e, 0x5a, 0x3a, 0xbc, + 0x41, 0x4b, 0xd3, 0xdb, 0x95, 0x7e, 0xf7, 0x45, 0xae, 0xe4, 0x95, 0x82, 0xd3, 0xfb, 0xeb, 0x8d, + 0x6b, 0x9d, 0x6f, 0x5c, 0xeb, 0xc7, 0xc6, 0xb5, 0xbe, 0x66, 0x2e, 0x59, 0x67, 0x2e, 0x39, 0xcf, + 0x5c, 0xf2, 0x2b, 0x73, 0xc9, 0xb7, 0xdf, 0xae, 0xf5, 0xce, 0x4e, 0x27, 0x7f, 0x02, 0x00, 0x00, + 0xff, 0xff, 0xe8, 0xe1, 0xb9, 0x93, 0xec, 0x03, 0x00, 0x00, +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/generated.proto new file mode 100644 index 0000000000..92e18cf9e9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/generated.proto @@ -0,0 +1,63 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.storage.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +message StorageClass { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Provisioner indicates the type of the provisioner. + optional string provisioner = 2; + + // Parameters holds the parameters for the provisioner that should + // create volumes of this storage class. + // +optional + map parameters = 3; +} + +// StorageClassList is a collection of storage classes. +message StorageClassList { + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of StorageClasses + repeated StorageClass items = 2; +} + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/register.go new file mode 100644 index 0000000000..24d6bfa7d3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "storage.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &StorageClass{}, + &StorageClassList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/types.go new file mode 100644 index 0000000000..0591b397cd --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/types.go @@ -0,0 +1,57 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +type StorageClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Provisioner indicates the type of the provisioner. + Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` + + // Parameters holds the parameters for the provisioner that should + // create volumes of this storage class. + // +optional + Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` +} + +// StorageClassList is a collection of storage classes. +type StorageClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of StorageClasses + Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..6f27177085 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/types_swagger_doc_generated.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_StorageClass = map[string]string{ + "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "provisioner": "Provisioner indicates the type of the provisioner.", + "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", +} + +func (StorageClass) SwaggerDoc() map[string]string { + return map_StorageClass +} + +var map_StorageClassList = map[string]string{ + "": "StorageClassList is a collection of storage classes.", + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of StorageClasses", +} + +func (StorageClassList) SwaggerDoc() map[string]string { + return map_StorageClassList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go new file mode 100644 index 0000000000..f1774db88f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go @@ -0,0 +1,89 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + storage "k8s.io/kubernetes/pkg/apis/storage" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_StorageClass_To_storage_StorageClass, + Convert_storage_StorageClass_To_v1_StorageClass, + Convert_v1_StorageClassList_To_storage_StorageClassList, + Convert_storage_StorageClassList_To_v1_StorageClassList, + ) +} + +func autoConvert_v1_StorageClass_To_storage_StorageClass(in *StorageClass, out *storage.StorageClass, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Provisioner = in.Provisioner + out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters)) + return nil +} + +func Convert_v1_StorageClass_To_storage_StorageClass(in *StorageClass, out *storage.StorageClass, s conversion.Scope) error { + return autoConvert_v1_StorageClass_To_storage_StorageClass(in, out, s) +} + +func autoConvert_storage_StorageClass_To_v1_StorageClass(in *storage.StorageClass, out *StorageClass, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Provisioner = in.Provisioner + out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters)) + return nil +} + +func Convert_storage_StorageClass_To_v1_StorageClass(in *storage.StorageClass, out *StorageClass, s conversion.Scope) error { + return autoConvert_storage_StorageClass_To_v1_StorageClass(in, out, s) +} + +func autoConvert_v1_StorageClassList_To_storage_StorageClassList(in *StorageClassList, out *storage.StorageClassList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]storage.StorageClass)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_StorageClassList_To_storage_StorageClassList(in *StorageClassList, out *storage.StorageClassList, s conversion.Scope) error { + return autoConvert_v1_StorageClassList_To_storage_StorageClassList(in, out, s) +} + +func autoConvert_storage_StorageClassList_To_v1_StorageClassList(in *storage.StorageClassList, out *StorageClassList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]StorageClass, 0) + } else { + out.Items = *(*[]StorageClass)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_storage_StorageClassList_To_v1_StorageClassList(in *storage.StorageClassList, out *StorageClassList, s conversion.Scope) error { + return autoConvert_storage_StorageClassList_To_v1_StorageClassList(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..8b3786ab19 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.deepcopy.go @@ -0,0 +1,80 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_StorageClass, InType: reflect.TypeOf(&StorageClass{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_StorageClassList, InType: reflect.TypeOf(&StorageClassList{})}, + ) +} + +func DeepCopy_v1_StorageClass(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClass) + out := out.(*StorageClass) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1_StorageClassList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClassList) + out := out.(*StorageClassList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageClass, len(*in)) + for i := range *in { + if err := DeepCopy_v1_StorageClass(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.defaults.go new file mode 100644 index 0000000000..6df448eb9f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/BUILD index 83d3c4363c..5d325ea33c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -21,18 +18,34 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.conversion.go", "zz_generated.deepcopy.go", + "zz_generated.defaults.go", ], tags = ["automanaged"], deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/apis/storage:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//pkg/watch/versioned:go_default_library", "//vendor:github.com/gogo/protobuf/proto", "//vendor:github.com/gogo/protobuf/sortkeys", "//vendor:github.com/ugorji/go/codec", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/conversion", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/types", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/storage/v1beta1/util:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.pb.go index 0a22e648f0..7d1e3792df 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -233,7 +233,7 @@ func (this *StorageClass) String() string { } mapStringForParameters += "}" s := strings.Join([]string{`&StorageClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, `Parameters:` + mapStringForParameters + `,`, `}`, @@ -245,7 +245,7 @@ func (this *StorageClassList) String() string { return "nil" } s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -696,34 +696,36 @@ var ( ) var fileDescriptorGenerated = []byte{ - // 462 bytes of a gzipped FileDescriptorProto + // 486 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x92, 0xcf, 0x8b, 0xd3, 0x40, - 0x14, 0xc7, 0x33, 0x2d, 0xc5, 0xdd, 0xa9, 0x62, 0x89, 0x1e, 0x4a, 0x0e, 0xd9, 0xb2, 0xa7, 0x2a, - 0xee, 0x0c, 0x2d, 0x2c, 0x94, 0x05, 0x2f, 0x15, 0x41, 0x41, 0x71, 0x89, 0x17, 0x11, 0xf6, 0x30, - 0x69, 0x9f, 0x71, 0x4c, 0x93, 0x09, 0x33, 0x2f, 0x81, 0x05, 0x0f, 0xfe, 0x09, 0xfe, 0x59, 0x3d, - 0xf6, 0xe8, 0x41, 0x16, 0x1b, 0xff, 0x11, 0x69, 0x12, 0x37, 0xa1, 0x3f, 0x44, 0xf6, 0x36, 0x6f, - 0xe6, 0x7d, 0xbe, 0xef, 0xfb, 0xbe, 0x0c, 0xbd, 0x08, 0x27, 0x86, 0x49, 0xc5, 0xc3, 0xd4, 0x07, - 0x1d, 0x03, 0x82, 0xe1, 0x49, 0x18, 0x70, 0x91, 0x48, 0xc3, 0x0d, 0x2a, 0x2d, 0x02, 0xe0, 0xd9, - 0xc8, 0x07, 0x14, 0x23, 0x1e, 0x40, 0x0c, 0x5a, 0x20, 0xcc, 0x59, 0xa2, 0x15, 0x2a, 0xfb, 0x69, - 0xc9, 0xb2, 0x9a, 0x65, 0x49, 0x18, 0xb0, 0x0d, 0xcb, 0x2a, 0x96, 0x55, 0xac, 0x73, 0x16, 0x48, - 0xfc, 0x9c, 0xfa, 0x6c, 0xa6, 0x22, 0x1e, 0xa8, 0x40, 0xf1, 0x42, 0xc2, 0x4f, 0x3f, 0x15, 0x55, - 0x51, 0x14, 0xa7, 0x52, 0xda, 0x19, 0x1f, 0xb4, 0xc5, 0x35, 0x18, 0x95, 0xea, 0x19, 0x6c, 0xdb, - 0x71, 0xce, 0x0f, 0x33, 0x69, 0x9c, 0x81, 0x36, 0x52, 0xc5, 0x30, 0xdf, 0xc1, 0x9e, 0x1d, 0xc6, - 0xb2, 0x9d, 0x9d, 0x9d, 0xb3, 0xfd, 0xdd, 0x3a, 0x8d, 0x51, 0x46, 0xbb, 0x9e, 0x46, 0xfb, 0xdb, - 0x53, 0x94, 0x0b, 0x2e, 0x63, 0x34, 0xa8, 0xb7, 0x91, 0xd3, 0x9f, 0x2d, 0x7a, 0xff, 0x7d, 0x99, - 0xde, 0x8b, 0x85, 0x30, 0xc6, 0xfe, 0x40, 0x8f, 0x22, 0x40, 0x31, 0x17, 0x28, 0xfa, 0x64, 0x40, - 0x86, 0xdd, 0xf1, 0x90, 0x1d, 0x4c, 0x9e, 0x65, 0x23, 0xf6, 0xce, 0xff, 0x02, 0x33, 0x7c, 0x0b, - 0x28, 0xa6, 0xf6, 0xf2, 0xe6, 0xc4, 0xca, 0x6f, 0x4e, 0x68, 0x7d, 0xe7, 0xdd, 0xaa, 0xd9, 0xe7, - 0xb4, 0x9b, 0x68, 0x95, 0xc9, 0x22, 0x19, 0xdd, 0x6f, 0x0d, 0xc8, 0xf0, 0x78, 0xfa, 0xa8, 0x42, - 0xba, 0x97, 0xf5, 0x93, 0xd7, 0xec, 0xb3, 0xbf, 0x52, 0x9a, 0x08, 0x2d, 0x22, 0x40, 0xd0, 0xa6, - 0xdf, 0x1e, 0xb4, 0x87, 0xdd, 0xf1, 0x2b, 0xf6, 0xff, 0x9f, 0x81, 0x35, 0xd7, 0x63, 0x97, 0xb7, - 0x52, 0x2f, 0x63, 0xd4, 0xd7, 0xb5, 0xe5, 0xfa, 0xc1, 0x6b, 0xcc, 0x73, 0x9e, 0xd3, 0x87, 0x5b, - 0x88, 0xdd, 0xa3, 0xed, 0x10, 0xae, 0x8b, 0x70, 0x8e, 0xbd, 0xcd, 0xd1, 0x7e, 0x4c, 0x3b, 0x99, - 0x58, 0xa4, 0x50, 0xee, 0xe4, 0x95, 0xc5, 0x45, 0x6b, 0x42, 0x4e, 0x57, 0x84, 0xf6, 0x9a, 0xf3, - 0xdf, 0x48, 0x83, 0xf6, 0xd5, 0x4e, 0xc4, 0xfc, 0x1f, 0x11, 0x37, 0x7e, 0x13, 0xdb, 0xe0, 0x45, - 0xd2, 0xbd, 0xca, 0xf6, 0xd1, 0xdf, 0x9b, 0x46, 0xce, 0x57, 0xb4, 0x23, 0x11, 0x22, 0xd3, 0x6f, - 0x15, 0x59, 0x4d, 0xee, 0x9a, 0xd5, 0xf4, 0x41, 0x35, 0xa4, 0xf3, 0x7a, 0x23, 0xe7, 0x95, 0xaa, - 0xd3, 0x27, 0xcb, 0xb5, 0x6b, 0xad, 0xd6, 0xae, 0xf5, 0x63, 0xed, 0x5a, 0xdf, 0x72, 0x97, 0x2c, - 0x73, 0x97, 0xac, 0x72, 0x97, 0xfc, 0xca, 0x5d, 0xf2, 0xfd, 0xb7, 0x6b, 0x7d, 0xbc, 0x57, 0xa9, - 0xfd, 0x09, 0x00, 0x00, 0xff, 0xff, 0x86, 0x21, 0xa9, 0x43, 0xef, 0x03, 0x00, 0x00, + 0x14, 0xc7, 0x33, 0x5d, 0x8a, 0xbb, 0x53, 0xc5, 0x12, 0x3d, 0x94, 0x1e, 0xb2, 0x65, 0x4f, 0x55, + 0x74, 0xc6, 0xae, 0x3f, 0x28, 0x0b, 0x5e, 0x2a, 0x82, 0x82, 0xe2, 0x12, 0x6f, 0xa2, 0xe0, 0xa4, + 0xfb, 0x4c, 0xc7, 0x34, 0x99, 0x30, 0xf3, 0x12, 0x2c, 0x78, 0xf0, 0x4f, 0xf0, 0xcf, 0xea, 0xcd, + 0x3d, 0x7a, 0x5a, 0x6c, 0xf4, 0x0f, 0x91, 0xfc, 0x70, 0x13, 0xcc, 0x16, 0x17, 0x6f, 0x99, 0x99, + 0xf7, 0xf9, 0xbe, 0xef, 0xfb, 0xbe, 0xd0, 0xa3, 0x60, 0x6a, 0x98, 0x54, 0x3c, 0x48, 0x3c, 0xd0, + 0x11, 0x20, 0x18, 0x1e, 0x07, 0x3e, 0x17, 0xb1, 0x34, 0xdc, 0xa0, 0xd2, 0xc2, 0x07, 0x9e, 0x4e, + 0x3c, 0x40, 0x31, 0xe1, 0x3e, 0x44, 0xa0, 0x05, 0xc2, 0x09, 0x8b, 0xb5, 0x42, 0x65, 0xdf, 0x2e, + 0x59, 0x56, 0xb3, 0x2c, 0x0e, 0x7c, 0x96, 0xb3, 0xac, 0x62, 0x59, 0xc5, 0x0e, 0xef, 0xfa, 0x12, + 0x17, 0x89, 0xc7, 0xe6, 0x2a, 0xe4, 0xbe, 0xf2, 0x15, 0x2f, 0x24, 0xbc, 0xe4, 0x43, 0x71, 0x2a, + 0x0e, 0xc5, 0x57, 0x29, 0x3d, 0x7c, 0x50, 0xd9, 0x12, 0xb1, 0x0c, 0xc5, 0x7c, 0x21, 0x23, 0xd0, + 0xab, 0xda, 0x58, 0x08, 0x28, 0x78, 0xda, 0x32, 0x34, 0xe4, 0xdb, 0x28, 0x9d, 0x44, 0x28, 0x43, + 0x68, 0x01, 0x8f, 0xfe, 0x05, 0x98, 0xf9, 0x02, 0x42, 0xd1, 0xe2, 0xee, 0x6f, 0xe3, 0x12, 0x94, + 0x4b, 0x2e, 0x23, 0x34, 0xa8, 0x5b, 0x50, 0x63, 0x26, 0x03, 0x3a, 0x05, 0x5d, 0x0f, 0x04, 0x9f, + 0x44, 0x18, 0x2f, 0xe1, 0xa2, 0x99, 0xee, 0x6c, 0x5d, 0xd0, 0x05, 0xd5, 0x07, 0xbf, 0x3a, 0xf4, + 0xea, 0xeb, 0x32, 0xfa, 0x27, 0x4b, 0x61, 0x8c, 0xfd, 0x9e, 0xee, 0xe6, 0x69, 0x9d, 0x08, 0x14, + 0x03, 0x32, 0x22, 0xe3, 0xde, 0xe1, 0x3d, 0x56, 0xad, 0xad, 0x69, 0xbe, 0x5e, 0x5c, 0x5e, 0xcd, + 0xd2, 0x09, 0x7b, 0xe5, 0x7d, 0x84, 0x39, 0xbe, 0x04, 0x14, 0x33, 0x7b, 0x7d, 0xb6, 0x6f, 0x65, + 0x67, 0xfb, 0xb4, 0xbe, 0x73, 0xcf, 0x55, 0xed, 0x87, 0xb4, 0x17, 0x6b, 0x95, 0x4a, 0x23, 0x55, + 0x04, 0x7a, 0xd0, 0x19, 0x91, 0xf1, 0xde, 0xec, 0x46, 0x85, 0xf4, 0x8e, 0xeb, 0x27, 0xb7, 0x59, + 0x67, 0x7f, 0xa6, 0x34, 0x16, 0x5a, 0x84, 0x80, 0xa0, 0xcd, 0x60, 0x67, 0xb4, 0x33, 0xee, 0x1d, + 0x3e, 0x63, 0x97, 0xff, 0xa3, 0x58, 0x73, 0x4c, 0x76, 0x7c, 0x2e, 0xf5, 0x34, 0x42, 0xbd, 0xaa, + 0x2d, 0xd7, 0x0f, 0x6e, 0xa3, 0xdf, 0xf0, 0x31, 0xbd, 0xfe, 0x17, 0x62, 0xf7, 0xe9, 0x4e, 0x00, + 0xab, 0x22, 0xa4, 0x3d, 0x37, 0xff, 0xb4, 0x6f, 0xd2, 0x6e, 0x2a, 0x96, 0x09, 0x94, 0x33, 0xb9, + 0xe5, 0xe1, 0xa8, 0x33, 0x25, 0x07, 0xdf, 0x08, 0xed, 0x37, 0xfb, 0xbf, 0x90, 0x06, 0xed, 0xb7, + 0xad, 0xa8, 0xd9, 0xe5, 0xa2, 0xce, 0xe9, 0x22, 0xe8, 0x7e, 0xe5, 0x7a, 0xf7, 0xcf, 0x4d, 0x23, + 0xe6, 0x77, 0xb4, 0x2b, 0x11, 0x42, 0x33, 0xe8, 0x14, 0x51, 0x4d, 0xff, 0x37, 0xaa, 0xd9, 0xb5, + 0xaa, 0x49, 0xf7, 0x79, 0x2e, 0xe7, 0x96, 0xaa, 0xb3, 0x5b, 0xeb, 0x8d, 0x63, 0x9d, 0x6e, 0x1c, + 0xeb, 0xfb, 0xc6, 0xb1, 0xbe, 0x64, 0x0e, 0x59, 0x67, 0x0e, 0x39, 0xcd, 0x1c, 0xf2, 0x23, 0x73, + 0xc8, 0xd7, 0x9f, 0x8e, 0xf5, 0xe6, 0x4a, 0xa5, 0xf6, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x99, 0xdf, + 0x66, 0x94, 0x33, 0x04, 0x00, 0x00, } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.proto index bdc0631cf2..0b9dbaeada 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,11 +21,12 @@ syntax = 'proto2'; package k8s.io.kubernetes.pkg.apis.storage.v1beta1; -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; @@ -39,7 +40,7 @@ message StorageClass { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Provisioner indicates the type of the provisioner. optional string provisioner = 2; @@ -55,7 +56,7 @@ message StorageClassList { // Standard list metadata // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of StorageClasses repeated StorageClass items = 2; diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/register.go index e850a634d3..70087f3797 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/register.go @@ -17,17 +17,21 @@ limitations under the License. package v1beta1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "storage.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} var ( SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) @@ -37,14 +41,10 @@ var ( // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &v1.ListOptions{}, - &v1.DeleteOptions{}, - &v1.ExportOptions{}, - &StorageClass{}, &StorageClassList{}, ) - versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/types.generated.go index 7f59ec8f5c..ddc516b28f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/types.generated.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/types.generated.go @@ -25,9 +25,8 @@ import ( "errors" "fmt" codec1978 "github.com/ugorji/go/codec" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg3_types "k8s.io/kubernetes/pkg/types" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" "reflect" "runtime" time "time" @@ -63,11 +62,10 @@ func init() { panic(err) } if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_unversioned.TypeMeta - var v1 pkg2_v1.ObjectMeta - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 } } @@ -159,7 +157,13 @@ func (x *StorageClass) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[2] { yy10 := &x.ObjectMeta - yy10.CodecEncodeSelf(e) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } } else { r.EncodeNil() } @@ -168,14 +172,20 @@ func (x *StorageClass) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.ObjectMeta - yy11.CodecEncodeSelf(e) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym13 := z.EncBinary() - _ = yym13 + yym15 := z.EncBinary() + _ = yym15 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Provisioner)) @@ -184,8 +194,8 @@ func (x *StorageClass) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("provisioner")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Provisioner)) @@ -197,8 +207,8 @@ func (x *StorageClass) CodecEncodeSelf(e *codec1978.Encoder) { if x.Parameters == nil { r.EncodeNil() } else { - yym16 := z.EncBinary() - _ = yym16 + yym18 := z.EncBinary() + _ = yym18 if false { } else { z.F.EncMapStringStringV(x.Parameters, false, e) @@ -215,8 +225,8 @@ func (x *StorageClass) CodecEncodeSelf(e *codec1978.Encoder) { if x.Parameters == nil { r.EncodeNil() } else { - yym17 := z.EncBinary() - _ = yym17 + yym19 := z.EncBinary() + _ = yym19 if false { } else { z.F.EncMapStringStringV(x.Parameters, false, e) @@ -237,25 +247,25 @@ func (x *StorageClass) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym18 := z.DecBinary() - _ = yym18 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct19 := r.ContainerType() - if yyct19 == codecSelferValueTypeMap1234 { - yyl19 := r.ReadMapStart() - if yyl19 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl19, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct19 == codecSelferValueTypeArray1234 { - yyl19 := r.ReadArrayStart() - if yyl19 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl19, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -267,12 +277,12 @@ func (x *StorageClass) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys20Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys20Slc - var yyhl20 bool = l >= 0 - for yyj20 := 0; ; yyj20++ { - if yyhl20 { - if yyj20 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -281,51 +291,75 @@ func (x *StorageClass) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys20Slc = r.DecodeBytes(yys20Slc, true, true) - yys20 := string(yys20Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys20 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv23 := &x.ObjectMeta - yyv23.CodecDecodeSelf(d) + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } } case "provisioner": if r.TryDecodeAsNil() { x.Provisioner = "" } else { - x.Provisioner = string(r.DecodeString()) + yyv10 := &x.Provisioner + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } } case "parameters": if r.TryDecodeAsNil() { x.Parameters = nil } else { - yyv25 := &x.Parameters - yym26 := z.DecBinary() - _ = yym26 + yyv12 := &x.Parameters + yym13 := z.DecBinary() + _ = yym13 if false { } else { - z.F.DecMapStringStringX(yyv25, false, d) + z.F.DecMapStringStringX(yyv12, false, d) } } default: - z.DecStructFieldNotFound(-1, yys20) - } // end switch yys20 - } // end for yyj20 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -333,16 +367,16 @@ func (x *StorageClass) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj27 int - var yyb27 bool - var yyhl27 bool = l >= 0 - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb27 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb27 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -350,15 +384,21 @@ func (x *StorageClass) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv15 := &x.Kind + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb27 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb27 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -366,32 +406,44 @@ func (x *StorageClass) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv17 := &x.APIVersion + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb27 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb27 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} + x.ObjectMeta = pkg1_v1.ObjectMeta{} } else { - yyv30 := &x.ObjectMeta - yyv30.CodecDecodeSelf(d) + yyv19 := &x.ObjectMeta + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else { + z.DecFallback(yyv19, false) + } } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb27 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb27 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -399,15 +451,21 @@ func (x *StorageClass) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Provisioner = "" } else { - x.Provisioner = string(r.DecodeString()) + yyv21 := &x.Provisioner + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb27 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb27 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -415,26 +473,26 @@ func (x *StorageClass) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Parameters = nil } else { - yyv32 := &x.Parameters - yym33 := z.DecBinary() - _ = yym33 + yyv23 := &x.Parameters + yym24 := z.DecBinary() + _ = yym24 if false { } else { - z.F.DecMapStringStringX(yyv32, false, d) + z.F.DecMapStringStringX(yyv23, false, d) } } for { - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb27 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb27 { + if yyb14 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj27-1, "") + z.DecStructFieldNotFound(yyj14-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -446,37 +504,37 @@ func (x *StorageClassList) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym34 := z.EncBinary() - _ = yym34 + yym1 := z.EncBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep35 := !z.EncBinary() - yy2arr35 := z.EncBasicHandle().StructToArray - var yyq35 [4]bool - _, _, _ = yysep35, yyq35, yy2arr35 - const yyr35 bool = false - yyq35[0] = x.Kind != "" - yyq35[1] = x.APIVersion != "" - yyq35[2] = true - var yynn35 int - if yyr35 || yy2arr35 { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn35 = 1 - for _, b := range yyq35 { + yynn2 = 1 + for _, b := range yyq2 { if b { - yynn35++ + yynn2++ } } - r.EncodeMapStart(yynn35) - yynn35 = 0 + r.EncodeMapStart(yynn2) + yynn2 = 0 } - if yyr35 || yy2arr35 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq35[0] { - yym37 := z.EncBinary() - _ = yym37 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -485,23 +543,23 @@ func (x *StorageClassList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq35[0] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym38 := z.EncBinary() - _ = yym38 + yym5 := z.EncBinary() + _ = yym5 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr35 || yy2arr35 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq35[1] { - yym40 := z.EncBinary() - _ = yym40 + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -510,54 +568,54 @@ func (x *StorageClassList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq35[1] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym41 := z.EncBinary() - _ = yym41 + yym8 := z.EncBinary() + _ = yym8 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr35 || yy2arr35 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq35[2] { - yy43 := &x.ListMeta - yym44 := z.EncBinary() - _ = yym44 + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 if false { - } else if z.HasExtensions() && z.EncExt(yy43) { + } else if z.HasExtensions() && z.EncExt(yy10) { } else { - z.EncFallback(yy43) + z.EncFallback(yy10) } } else { r.EncodeNil() } } else { - if yyq35[2] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy45 := &x.ListMeta - yym46 := z.EncBinary() - _ = yym46 + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy45) { + } else if z.HasExtensions() && z.EncExt(yy12) { } else { - z.EncFallback(yy45) + z.EncFallback(yy12) } } } - if yyr35 || yy2arr35 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym48 := z.EncBinary() - _ = yym48 + yym15 := z.EncBinary() + _ = yym15 if false { } else { h.encSliceStorageClass(([]StorageClass)(x.Items), e) @@ -570,15 +628,15 @@ func (x *StorageClassList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym49 := z.EncBinary() - _ = yym49 + yym16 := z.EncBinary() + _ = yym16 if false { } else { h.encSliceStorageClass(([]StorageClass)(x.Items), e) } } } - if yyr35 || yy2arr35 { + if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -591,25 +649,25 @@ func (x *StorageClassList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym50 := z.DecBinary() - _ = yym50 + yym1 := z.DecBinary() + _ = yym1 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct51 := r.ContainerType() - if yyct51 == codecSelferValueTypeMap1234 { - yyl51 := r.ReadMapStart() - if yyl51 == 0 { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl51, d) + x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct51 == codecSelferValueTypeArray1234 { - yyl51 := r.ReadArrayStart() - if yyl51 == 0 { + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl51, d) + x.codecDecodeSelfFromArray(yyl2, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -621,12 +679,12 @@ func (x *StorageClassList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys52Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys52Slc - var yyhl52 bool = l >= 0 - for yyj52 := 0; ; yyj52++ { - if yyhl52 { - if yyj52 >= l { + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { break } } else { @@ -635,51 +693,63 @@ func (x *StorageClassList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys52Slc = r.DecodeBytes(yys52Slc, true, true) - yys52 := string(yys52Slc) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys52 { + switch yys3 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } } case "apiVersion": if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } } case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv55 := &x.ListMeta - yym56 := z.DecBinary() - _ = yym56 + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 if false { - } else if z.HasExtensions() && z.DecExt(yyv55) { + } else if z.HasExtensions() && z.DecExt(yyv8) { } else { - z.DecFallback(yyv55, false) + z.DecFallback(yyv8, false) } } case "items": if r.TryDecodeAsNil() { x.Items = nil } else { - yyv57 := &x.Items - yym58 := z.DecBinary() - _ = yym58 + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 if false { } else { - h.decSliceStorageClass((*[]StorageClass)(yyv57), d) + h.decSliceStorageClass((*[]StorageClass)(yyv10), d) } } default: - z.DecStructFieldNotFound(-1, yys52) - } // end switch yys52 - } // end for yyj52 + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -687,16 +757,16 @@ func (x *StorageClassList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj59 int - var yyb59 bool - var yyhl59 bool = l >= 0 - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb59 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb59 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -704,15 +774,21 @@ func (x *StorageClassList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Kind = "" } else { - x.Kind = string(r.DecodeString()) + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb59 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb59 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -720,38 +796,44 @@ func (x *StorageClassList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb59 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb59 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ListMeta = pkg1_v1.ListMeta{} } else { - yyv62 := &x.ListMeta - yym63 := z.DecBinary() - _ = yym63 + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 if false { - } else if z.HasExtensions() && z.DecExt(yyv62) { + } else if z.HasExtensions() && z.DecExt(yyv17) { } else { - z.DecFallback(yyv62, false) + z.DecFallback(yyv17, false) } } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb59 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb59 { + if yyb12 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -759,26 +841,26 @@ func (x *StorageClassList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Items = nil } else { - yyv64 := &x.Items - yym65 := z.DecBinary() - _ = yym65 + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceStorageClass((*[]StorageClass)(yyv64), d) + h.decSliceStorageClass((*[]StorageClass)(yyv19), d) } } for { - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l } else { - yyb59 = r.CheckBreak() + yyb12 = r.CheckBreak() } - if yyb59 { + if yyb12 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj59-1, "") + z.DecStructFieldNotFound(yyj12-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -788,10 +870,10 @@ func (x codecSelfer1234) encSliceStorageClass(v []StorageClass, e *codec1978.Enc z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv66 := range v { + for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy67 := &yyv66 - yy67.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -801,83 +883,86 @@ func (x codecSelfer1234) decSliceStorageClass(v *[]StorageClass, d *codec1978.De z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv68 := *v - yyh68, yyl68 := z.DecSliceHelperStart() - var yyc68 bool - if yyl68 == 0 { - if yyv68 == nil { - yyv68 = []StorageClass{} - yyc68 = true - } else if len(yyv68) != 0 { - yyv68 = yyv68[:0] - yyc68 = true + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []StorageClass{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true } - } else if yyl68 > 0 { - var yyrr68, yyrl68 int - var yyrt68 bool - if yyl68 > cap(yyv68) { + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { - yyrg68 := len(yyv68) > 0 - yyv268 := yyv68 - yyrl68, yyrt68 = z.DecInferLen(yyl68, z.DecBasicHandle().MaxInitLen, 280) - if yyrt68 { - if yyrl68 <= cap(yyv68) { - yyv68 = yyv68[:yyrl68] + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] } else { - yyv68 = make([]StorageClass, yyrl68) + yyv1 = make([]StorageClass, yyrl1) } } else { - yyv68 = make([]StorageClass, yyrl68) + yyv1 = make([]StorageClass, yyrl1) } - yyc68 = true - yyrr68 = len(yyv68) - if yyrg68 { - copy(yyv68, yyv268) + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) } - } else if yyl68 != len(yyv68) { - yyv68 = yyv68[:yyl68] - yyc68 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyj68 := 0 - for ; yyj68 < yyrr68; yyj68++ { - yyh68.ElemContainerState(yyj68) + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv68[yyj68] = StorageClass{} + yyv1[yyj1] = StorageClass{} } else { - yyv69 := &yyv68[yyj68] - yyv69.CodecDecodeSelf(d) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } - if yyrt68 { - for ; yyj68 < yyl68; yyj68++ { - yyv68 = append(yyv68, StorageClass{}) - yyh68.ElemContainerState(yyj68) + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, StorageClass{}) + yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv68[yyj68] = StorageClass{} + yyv1[yyj1] = StorageClass{} } else { - yyv70 := &yyv68[yyj68] - yyv70.CodecDecodeSelf(d) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } } } else { - yyj68 := 0 - for ; !r.CheckBreak(); yyj68++ { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { - if yyj68 >= len(yyv68) { - yyv68 = append(yyv68, StorageClass{}) // var yyz68 StorageClass - yyc68 = true + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, StorageClass{}) // var yyz1 StorageClass + yyc1 = true } - yyh68.ElemContainerState(yyj68) - if yyj68 < len(yyv68) { + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv68[yyj68] = StorageClass{} + yyv1[yyj1] = StorageClass{} } else { - yyv71 := &yyv68[yyj68] - yyv71.CodecDecodeSelf(d) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -885,16 +970,16 @@ func (x codecSelfer1234) decSliceStorageClass(v *[]StorageClass, d *codec1978.De } } - if yyj68 < len(yyv68) { - yyv68 = yyv68[:yyj68] - yyc68 = true - } else if yyj68 == 0 && yyv68 == nil { - yyv68 = []StorageClass{} - yyc68 = true + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []StorageClass{} + yyc1 = true } } - yyh68.End() - if yyc68 { - *v = yyv68 + yyh1.End() + if yyc1 { + *v = yyv1 } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/types.go index f36d9319e5..afbd5bcdbf 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/types.go @@ -17,8 +17,7 @@ limitations under the License. package v1beta1 import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // +genclient=true @@ -30,11 +29,11 @@ import ( // StorageClasses are non-namespaced; the name of the storage class // according to etcd is in ObjectMeta.Name. type StorageClass struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Provisioner indicates the type of the provisioner. Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` @@ -47,11 +46,11 @@ type StorageClass struct { // StorageClassList is a collection of storage classes. type StorageClassList struct { - unversioned.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // +optional - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of StorageClasses Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/zz_generated.conversion.go index ef6b401489..35a730e561 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ limitations under the License. package v1beta1 import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" storage "k8s.io/kubernetes/pkg/apis/storage" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" unsafe "unsafe" ) @@ -43,10 +43,7 @@ func RegisterConversions(scheme *runtime.Scheme) error { } func autoConvert_v1beta1_StorageClass_To_storage_StorageClass(in *StorageClass, out *storage.StorageClass, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta out.Provisioner = in.Provisioner out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters)) return nil @@ -57,10 +54,7 @@ func Convert_v1beta1_StorageClass_To_storage_StorageClass(in *StorageClass, out } func autoConvert_storage_StorageClass_To_v1beta1_StorageClass(in *storage.StorageClass, out *StorageClass, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } + out.ObjectMeta = in.ObjectMeta out.Provisioner = in.Provisioner out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters)) return nil @@ -82,7 +76,11 @@ func Convert_v1beta1_StorageClassList_To_storage_StorageClassList(in *StorageCla func autoConvert_storage_StorageClassList_To_v1beta1_StorageClassList(in *storage.StorageClassList, out *StorageClassList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]StorageClass)(unsafe.Pointer(&in.Items)) + if in.Items == nil { + out.Items = make([]StorageClass, 0) + } else { + out.Items = *(*[]StorageClass)(unsafe.Pointer(&in.Items)) + } return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/zz_generated.deepcopy.go index d9e7ed0956..f2ea4f2fb1 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" reflect "reflect" ) @@ -44,19 +44,18 @@ func DeepCopy_v1beta1_StorageClass(in interface{}, out interface{}, c *conversio { in := in.(*StorageClass) out := out.(*StorageClass) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } - out.Provisioner = in.Provisioner if in.Parameters != nil { in, out := &in.Parameters, &out.Parameters *out = make(map[string]string) for key, val := range *in { (*out)[key] = val } - } else { - out.Parameters = nil } return nil } @@ -66,8 +65,7 @@ func DeepCopy_v1beta1_StorageClassList(in interface{}, out interface{}, c *conve { in := in.(*StorageClassList) out := out.(*StorageClassList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StorageClass, len(*in)) @@ -76,8 +74,6 @@ func DeepCopy_v1beta1_StorageClassList(in interface{}, out interface{}, c *conve return err } } - } else { - out.Items = nil } return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/zz_generated.defaults.go new file mode 100644 index 0000000000..e24e70be38 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/zz_generated.deepcopy.go index 7542669781..2ef9f1767f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ limitations under the License. package storage import ( - api "k8s.io/kubernetes/pkg/api" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" reflect "reflect" ) @@ -44,19 +44,18 @@ func DeepCopy_storage_StorageClass(in interface{}, out interface{}, c *conversio { in := in.(*StorageClass) out := out.(*StorageClass) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) } - out.Provisioner = in.Provisioner if in.Parameters != nil { in, out := &in.Parameters, &out.Parameters *out = make(map[string]string) for key, val := range *in { (*out)[key] = val } - } else { - out.Parameters = nil } return nil } @@ -66,8 +65,7 @@ func DeepCopy_storage_StorageClassList(in interface{}, out interface{}, c *conve { in := in.(*StorageClassList) out := out.(*StorageClassList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StorageClass, len(*in)) @@ -76,8 +74,6 @@ func DeepCopy_storage_StorageClassList(in interface{}, out interface{}, c *conve return err } } - } else { - out.Items = nil } return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/auth/authenticator/BUILD b/vendor/k8s.io/kubernetes/pkg/auth/authenticator/BUILD deleted file mode 100644 index 71636b554b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/auth/authenticator/BUILD +++ /dev/null @@ -1,18 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["interfaces.go"], - tags = ["automanaged"], - deps = ["//pkg/auth/user:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/auth/authenticator/interfaces.go b/vendor/k8s.io/kubernetes/pkg/auth/authenticator/interfaces.go deleted file mode 100644 index 1eafe605c6..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/auth/authenticator/interfaces.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package authenticator - -import ( - "net/http" - - "k8s.io/kubernetes/pkg/auth/user" -) - -// Token checks a string value against a backing authentication store and returns -// information about the current user and true if successful, false if not successful, -// or an error if the token could not be checked. -type Token interface { - AuthenticateToken(token string) (user.Info, bool, error) -} - -// Request attempts to extract authentication information from a request and returns -// information about the current user and true if successful, false if not successful, -// or an error if the request could not be checked. -type Request interface { - AuthenticateRequest(req *http.Request) (user.Info, bool, error) -} - -// Password checks a username and password against a backing authentication store and -// returns information about the user and true if successful, false if not successful, -// or an error if the username and password could not be checked -type Password interface { - AuthenticatePassword(user, password string) (user.Info, bool, error) -} - -// TokenFunc is a function that implements the Token interface. -type TokenFunc func(token string) (user.Info, bool, error) - -// AuthenticateToken implements authenticator.Token. -func (f TokenFunc) AuthenticateToken(token string) (user.Info, bool, error) { - return f(token) -} - -// RequestFunc is a function that implements the Request interface. -type RequestFunc func(req *http.Request) (user.Info, bool, error) - -// AuthenticateRequest implements authenticator.Request. -func (f RequestFunc) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { - return f(req) -} - -// PasswordFunc is a function that implements the Password interface. -type PasswordFunc func(user, password string) (user.Info, bool, error) - -// AuthenticatePassword implements authenticator.Password. -func (f PasswordFunc) AuthenticatePassword(user, password string) (user.Info, bool, error) { - return f(user, password) -} diff --git a/vendor/k8s.io/kubernetes/pkg/auth/user/BUILD b/vendor/k8s.io/kubernetes/pkg/auth/user/BUILD deleted file mode 100644 index 8826e26282..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/auth/user/BUILD +++ /dev/null @@ -1,20 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "user.go", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/auth/user/doc.go b/vendor/k8s.io/kubernetes/pkg/auth/user/doc.go deleted file mode 100644 index 570c51ae99..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/auth/user/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package user contains utilities for dealing with simple user exchange in the auth -// packages. The user.Info interface defines an interface for exchanging that info. -package user diff --git a/vendor/k8s.io/kubernetes/pkg/auth/user/user.go b/vendor/k8s.io/kubernetes/pkg/auth/user/user.go deleted file mode 100644 index 3af1959888..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/auth/user/user.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package user - -// Info describes a user that has been authenticated to the system. -type Info interface { - // GetName returns the name that uniquely identifies this user among all - // other active users. - GetName() string - // GetUID returns a unique value for a particular user that will change - // if the user is removed from the system and another user is added with - // the same name. - GetUID() string - // GetGroups returns the names of the groups the user is a member of - GetGroups() []string - - // GetExtra can contain any additional information that the authenticator - // thought was interesting. One example would be scopes on a token. - // Keys in this map should be namespaced to the authenticator or - // authenticator/authorizer pair making use of them. - // For instance: "example.org/foo" instead of "foo" - // This is a map[string][]string because it needs to be serializeable into - // a SubjectAccessReviewSpec.authorization.k8s.io for proper authorization - // delegation flows - // In order to faithfully round-trip through an impersonation flow, these keys - // MUST be lowercase. - GetExtra() map[string][]string -} - -// DefaultInfo provides a simple user information exchange object -// for components that implement the UserInfo interface. -type DefaultInfo struct { - Name string - UID string - Groups []string - Extra map[string][]string -} - -func (i *DefaultInfo) GetName() string { - return i.Name -} - -func (i *DefaultInfo) GetUID() string { - return i.UID -} - -func (i *DefaultInfo) GetGroups() []string { - return i.Groups -} - -func (i *DefaultInfo) GetExtra() map[string][]string { - return i.Extra -} - -// well-known user and group names -const ( - SystemPrivilegedGroup = "system:masters" - NodesGroup = "system:nodes" - AllUnauthenticated = "system:unauthenticated" - AllAuthenticated = "system:authenticated" - - Anonymous = "system:anonymous" - APIServerUser = "system:apiserver" -) diff --git a/vendor/k8s.io/kubernetes/pkg/capabilities/BUILD b/vendor/k8s.io/kubernetes/pkg/capabilities/BUILD deleted file mode 100644 index 39a0982bde..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/capabilities/BUILD +++ /dev/null @@ -1,20 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "capabilities.go", - "doc.go", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go b/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go deleted file mode 100644 index 96146c6b09..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package capabilities - -import ( - "sync" -) - -// Capabilities defines the set of capabilities available within the system. -// For now these are global. Eventually they may be per-user -type Capabilities struct { - AllowPrivileged bool - - // Pod sources from which to allow privileged capabilities like host networking, sharing the host - // IPC namespace, and sharing the host PID namespace. - PrivilegedSources PrivilegedSources - - // PerConnectionBandwidthLimitBytesPerSec limits the throughput of each connection (currently only used for proxy, exec, attach) - PerConnectionBandwidthLimitBytesPerSec int64 -} - -// PrivilegedSources defines the pod sources allowed to make privileged requests for certain types -// of capabilities like host networking, sharing the host IPC namespace, and sharing the host PID namespace. -type PrivilegedSources struct { - // List of pod sources for which using host network is allowed. - HostNetworkSources []string - - // List of pod sources for which using host pid namespace is allowed. - HostPIDSources []string - - // List of pod sources for which using host ipc is allowed. - HostIPCSources []string -} - -// TODO: Clean these up into a singleton -var once sync.Once -var lock sync.Mutex -var capabilities *Capabilities - -// Initialize the capability set. This can only be done once per binary, subsequent calls are ignored. -func Initialize(c Capabilities) { - // Only do this once - once.Do(func() { - capabilities = &c - }) -} - -// Setup the capability set. It wraps Initialize for improving usability. -func Setup(allowPrivileged bool, privilegedSources PrivilegedSources, perConnectionBytesPerSec int64) { - Initialize(Capabilities{ - AllowPrivileged: allowPrivileged, - PrivilegedSources: privilegedSources, - PerConnectionBandwidthLimitBytesPerSec: perConnectionBytesPerSec, - }) -} - -// SetCapabilitiesForTests. Convenience method for testing. This should only be called from tests. -func SetForTests(c Capabilities) { - lock.Lock() - defer lock.Unlock() - capabilities = &c -} - -// Returns a read-only copy of the system capabilities. -func Get() Capabilities { - lock.Lock() - defer lock.Unlock() - // This check prevents clobbering of capabilities that might've been set via SetForTests - if capabilities == nil { - Initialize(Capabilities{ - AllowPrivileged: false, - PrivilegedSources: PrivilegedSources{ - HostNetworkSources: []string{}, - HostPIDSources: []string{}, - HostIPCSources: []string{}, - }, - }) - } - return *capabilities -} diff --git a/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go b/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go deleted file mode 100644 index e2042a881e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// package capabilities manages system level capabilities -package capabilities diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/BUILD b/vendor/k8s.io/kubernetes/pkg/client/cache/BUILD deleted file mode 100644 index eba219a415..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/BUILD +++ /dev/null @@ -1,100 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "controller.go", - "delta_fifo.go", - "doc.go", - "expiration_cache.go", - "expiration_cache_fakes.go", - "fake_custom_store.go", - "fifo.go", - "index.go", - "listers.go", - "listers_core.go", - "listers_extensions.go", - "listers_rbac.go", - "listwatch.go", - "mutation_detector.go", - "reflector.go", - "shared_informer.go", - "store.go", - "thread_safe_store.go", - "undelta_store.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apis/apps:go_default_library", - "//pkg/apis/certificates:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/apis/policy:go_default_library", - "//pkg/apis/rbac:go_default_library", - "//pkg/apis/storage:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/fields:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/clock:go_default_library", - "//pkg/util/diff:go_default_library", - "//pkg/util/runtime:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/util/wait:go_default_library", - "//pkg/watch:go_default_library", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "controller_test.go", - "delta_fifo_test.go", - "expiration_cache_test.go", - "fifo_test.go", - "index_test.go", - "listers_test.go", - "listwatch_test.go", - "mutation_detector_test.go", - "processor_listener_test.go", - "reflector_test.go", - "store_test.go", - "undelta_store_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/testing/cache:go_default_library", - "//pkg/fields:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/clock:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/util/testing:go_default_library", - "//pkg/util/wait:go_default_library", - "//pkg/watch:go_default_library", - "//vendor:github.com/google/gofuzz", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/listers.go b/vendor/k8s.io/kubernetes/pkg/client/cache/listers.go deleted file mode 100644 index bc65876e03..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/listers.go +++ /dev/null @@ -1,478 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "fmt" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/certificates" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/policy" - "k8s.io/kubernetes/pkg/apis/storage" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" -) - -// AppendFunc is used to add a matching item to whatever list the caller is using -type AppendFunc func(interface{}) - -func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { - for _, m := range store.List() { - metadata, err := meta.Accessor(m) - if err != nil { - return err - } - if selector.Matches(labels.Set(metadata.GetLabels())) { - appendFn(m) - } - } - return nil -} - -func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { - if namespace == api.NamespaceAll { - for _, m := range indexer.List() { - metadata, err := meta.Accessor(m) - if err != nil { - return err - } - if selector.Matches(labels.Set(metadata.GetLabels())) { - appendFn(m) - } - } - return nil - } - - items, err := indexer.Index(NamespaceIndex, &api.ObjectMeta{Namespace: namespace}) - if err != nil { - // Ignore error; do slow search without index. - glog.Warningf("can not retrieve list of objects using index : %v", err) - for _, m := range indexer.List() { - metadata, err := meta.Accessor(m) - if err != nil { - return err - } - if metadata.GetNamespace() == namespace && selector.Matches(labels.Set(metadata.GetLabels())) { - appendFn(m) - } - - } - return nil - } - for _, m := range items { - metadata, err := meta.Accessor(m) - if err != nil { - return err - } - if selector.Matches(labels.Set(metadata.GetLabels())) { - appendFn(m) - } - } - - return nil -} - -// GenericLister is a lister skin on a generic Indexer -type GenericLister interface { - // List will return all objects across namespaces - List(selector labels.Selector) (ret []runtime.Object, err error) - // Get will attempt to retrieve assuming that name==key - Get(name string) (runtime.Object, error) - // ByNamespace will give you a GenericNamespaceLister for one namespace - ByNamespace(namespace string) GenericNamespaceLister -} - -// GenericNamespaceLister is a lister skin on a generic Indexer -type GenericNamespaceLister interface { - // List will return all objects in this namespace - List(selector labels.Selector) (ret []runtime.Object, err error) - // Get will attempt to retrieve by namespace and name - Get(name string) (runtime.Object, error) -} - -func NewGenericLister(indexer Indexer, resource unversioned.GroupResource) GenericLister { - return &genericLister{indexer: indexer, resource: resource} -} - -type genericLister struct { - indexer Indexer - resource unversioned.GroupResource -} - -func (s *genericLister) List(selector labels.Selector) (ret []runtime.Object, err error) { - err = ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(runtime.Object)) - }) - return ret, err -} - -func (s *genericLister) ByNamespace(namespace string) GenericNamespaceLister { - return &genericNamespaceLister{indexer: s.indexer, namespace: namespace, resource: s.resource} -} - -func (s *genericLister) Get(name string) (runtime.Object, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(s.resource, name) - } - return obj.(runtime.Object), nil -} - -type genericNamespaceLister struct { - indexer Indexer - namespace string - resource unversioned.GroupResource -} - -func (s *genericNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) { - err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(runtime.Object)) - }) - return ret, err -} - -func (s *genericNamespaceLister) Get(name string) (runtime.Object, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(s.resource, name) - } - return obj.(runtime.Object), nil -} - -// TODO: generate these classes and methods for all resources of interest using -// a script. Can use "go generate" once 1.4 is supported by all users. - -// NodeConditionPredicate is a function that indicates whether the given node's conditions meet -// some set of criteria defined by the function. -type NodeConditionPredicate func(node *api.Node) bool - -// StoreToNodeLister makes a Store have the List method of the client.NodeInterface -// The Store must contain (only) Nodes. -type StoreToNodeLister struct { - Store -} - -func (s *StoreToNodeLister) List() (machines api.NodeList, err error) { - for _, m := range s.Store.List() { - machines.Items = append(machines.Items, *(m.(*api.Node))) - } - return machines, nil -} - -// NodeCondition returns a storeToNodeConditionLister -func (s *StoreToNodeLister) NodeCondition(predicate NodeConditionPredicate) storeToNodeConditionLister { - // TODO: Move this filtering server side. Currently our selectors don't facilitate searching through a list so we - // have the reflector filter out the Unschedulable field and sift through node conditions in the lister. - return storeToNodeConditionLister{s.Store, predicate} -} - -// storeToNodeConditionLister filters and returns nodes matching the given type and status from the store. -type storeToNodeConditionLister struct { - store Store - predicate NodeConditionPredicate -} - -// List returns a list of nodes that match the conditions defined by the predicate functions in the storeToNodeConditionLister. -func (s storeToNodeConditionLister) List() (nodes []*api.Node, err error) { - for _, m := range s.store.List() { - node := m.(*api.Node) - if s.predicate(node) { - nodes = append(nodes, node) - } else { - glog.V(5).Infof("Node %s matches none of the conditions", node.Name) - } - } - return -} - -// StoreToDaemonSetLister gives a store List and Exists methods. The store must contain only DaemonSets. -type StoreToDaemonSetLister struct { - Store -} - -// Exists checks if the given daemon set exists in the store. -func (s *StoreToDaemonSetLister) Exists(ds *extensions.DaemonSet) (bool, error) { - _, exists, err := s.Store.Get(ds) - if err != nil { - return false, err - } - return exists, nil -} - -// List lists all daemon sets in the store. -// TODO: converge on the interface in pkg/client -func (s *StoreToDaemonSetLister) List() (dss extensions.DaemonSetList, err error) { - for _, c := range s.Store.List() { - dss.Items = append(dss.Items, *(c.(*extensions.DaemonSet))) - } - return dss, nil -} - -// GetPodDaemonSets returns a list of daemon sets managing a pod. -// Returns an error if and only if no matching daemon sets are found. -func (s *StoreToDaemonSetLister) GetPodDaemonSets(pod *api.Pod) (daemonSets []extensions.DaemonSet, err error) { - var selector labels.Selector - var daemonSet extensions.DaemonSet - - if len(pod.Labels) == 0 { - err = fmt.Errorf("no daemon sets found for pod %v because it has no labels", pod.Name) - return - } - - for _, m := range s.Store.List() { - daemonSet = *m.(*extensions.DaemonSet) - if daemonSet.Namespace != pod.Namespace { - continue - } - selector, err = unversioned.LabelSelectorAsSelector(daemonSet.Spec.Selector) - if err != nil { - // this should not happen if the DaemonSet passed validation - return nil, err - } - - // If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything. - if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { - continue - } - daemonSets = append(daemonSets, daemonSet) - } - if len(daemonSets) == 0 { - err = fmt.Errorf("could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) - } - return -} - -// StoreToEndpointsLister makes a Store that lists endpoints. -type StoreToEndpointsLister struct { - Store -} - -// List lists all endpoints in the store. -func (s *StoreToEndpointsLister) List() (services api.EndpointsList, err error) { - for _, m := range s.Store.List() { - services.Items = append(services.Items, *(m.(*api.Endpoints))) - } - return services, nil -} - -// GetServiceEndpoints returns the endpoints of a service, matched on service name. -func (s *StoreToEndpointsLister) GetServiceEndpoints(svc *api.Service) (ep api.Endpoints, err error) { - for _, m := range s.Store.List() { - ep = *m.(*api.Endpoints) - if svc.Name == ep.Name && svc.Namespace == ep.Namespace { - return ep, nil - } - } - err = fmt.Errorf("could not find endpoints for service: %v", svc.Name) - return -} - -// Typed wrapper around a store of PersistentVolumes -type StoreToPVFetcher struct { - Store -} - -// GetPersistentVolumeInfo returns cached data for the PersistentVolume 'id'. -func (s *StoreToPVFetcher) GetPersistentVolumeInfo(id string) (*api.PersistentVolume, error) { - o, exists, err := s.Get(&api.PersistentVolume{ObjectMeta: api.ObjectMeta{Name: id}}) - - if err != nil { - return nil, fmt.Errorf("error retrieving PersistentVolume '%v' from cache: %v", id, err) - } - - if !exists { - return nil, fmt.Errorf("PersistentVolume '%v' not found", id) - } - - return o.(*api.PersistentVolume), nil -} - -// StoreToStatefulSetLister gives a store List and Exists methods. The store must contain only StatefulSets. -type StoreToStatefulSetLister struct { - Store -} - -// Exists checks if the given StatefulSet exists in the store. -func (s *StoreToStatefulSetLister) Exists(ps *apps.StatefulSet) (bool, error) { - _, exists, err := s.Store.Get(ps) - if err != nil { - return false, err - } - return exists, nil -} - -// List lists all StatefulSets in the store. -func (s *StoreToStatefulSetLister) List() (psList []apps.StatefulSet, err error) { - for _, ps := range s.Store.List() { - psList = append(psList, *(ps.(*apps.StatefulSet))) - } - return psList, nil -} - -type storeStatefulSetsNamespacer struct { - store Store - namespace string -} - -func (s *StoreToStatefulSetLister) StatefulSets(namespace string) storeStatefulSetsNamespacer { - return storeStatefulSetsNamespacer{s.Store, namespace} -} - -// GetPodStatefulSets returns a list of StatefulSets managing a pod. Returns an error only if no matching StatefulSets are found. -func (s *StoreToStatefulSetLister) GetPodStatefulSets(pod *api.Pod) (psList []apps.StatefulSet, err error) { - var selector labels.Selector - var ps apps.StatefulSet - - if len(pod.Labels) == 0 { - err = fmt.Errorf("no StatefulSets found for pod %v because it has no labels", pod.Name) - return - } - - for _, m := range s.Store.List() { - ps = *m.(*apps.StatefulSet) - if ps.Namespace != pod.Namespace { - continue - } - selector, err = unversioned.LabelSelectorAsSelector(ps.Spec.Selector) - if err != nil { - err = fmt.Errorf("invalid selector: %v", err) - return - } - - // If a StatefulSet with a nil or empty selector creeps in, it should match nothing, not everything. - if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { - continue - } - psList = append(psList, ps) - } - if len(psList) == 0 { - err = fmt.Errorf("could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) - } - return -} - -// StoreToCertificateRequestLister gives a store List and Exists methods. The store must contain only CertificateRequests. -type StoreToCertificateRequestLister struct { - Store -} - -// Exists checks if the given csr exists in the store. -func (s *StoreToCertificateRequestLister) Exists(csr *certificates.CertificateSigningRequest) (bool, error) { - _, exists, err := s.Store.Get(csr) - if err != nil { - return false, err - } - return exists, nil -} - -// StoreToCertificateRequestLister lists all csrs in the store. -func (s *StoreToCertificateRequestLister) List() (csrs certificates.CertificateSigningRequestList, err error) { - for _, c := range s.Store.List() { - csrs.Items = append(csrs.Items, *(c.(*certificates.CertificateSigningRequest))) - } - return csrs, nil -} - -type StoreToPodDisruptionBudgetLister struct { - Store -} - -// GetPodPodDisruptionBudgets returns a list of PodDisruptionBudgets matching a pod. Returns an error only if no matching PodDisruptionBudgets are found. -func (s *StoreToPodDisruptionBudgetLister) GetPodPodDisruptionBudgets(pod *api.Pod) (pdbList []policy.PodDisruptionBudget, err error) { - var selector labels.Selector - - if len(pod.Labels) == 0 { - err = fmt.Errorf("no PodDisruptionBudgets found for pod %v because it has no labels", pod.Name) - return - } - - for _, m := range s.Store.List() { - pdb, ok := m.(*policy.PodDisruptionBudget) - if !ok { - glog.Errorf("Unexpected: %v is not a PodDisruptionBudget", m) - continue - } - if pdb.Namespace != pod.Namespace { - continue - } - selector, err = unversioned.LabelSelectorAsSelector(pdb.Spec.Selector) - if err != nil { - glog.Warningf("invalid selector: %v", err) - // TODO(mml): add an event to the PDB - continue - } - - // If a PDB with a nil or empty selector creeps in, it should match nothing, not everything. - if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { - continue - } - pdbList = append(pdbList, *pdb) - } - if len(pdbList) == 0 { - err = fmt.Errorf("could not find PodDisruptionBudget for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) - } - return -} - -// StorageClassLister knows how to list storage classes -type StorageClassLister interface { - List(selector labels.Selector) (ret []*storage.StorageClass, err error) - Get(name string) (*storage.StorageClass, error) -} - -// storageClassLister implements StorageClassLister -type storageClassLister struct { - indexer Indexer -} - -// NewStorageClassLister returns a new lister. -func NewStorageClassLister(indexer Indexer) StorageClassLister { - return &storageClassLister{indexer: indexer} -} - -// List returns a list of storage classes -func (s *storageClassLister) List(selector labels.Selector) (ret []*storage.StorageClass, err error) { - err = ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*storage.StorageClass)) - }) - return ret, err -} - -// List returns a list of storage classes -func (s *storageClassLister) Get(name string) (*storage.StorageClass, error) { - key := &storage.StorageClass{ObjectMeta: api.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(storage.Resource("storageclass"), name) - } - return obj.(*storage.StorageClass), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/listers_core.go b/vendor/k8s.io/kubernetes/pkg/client/cache/listers_core.go deleted file mode 100644 index 1e8eb6055e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/listers_core.go +++ /dev/null @@ -1,348 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/labels" -) - -// TODO: generate these classes and methods for all resources of interest using -// a script. Can use "go generate" once 1.4 is supported by all users. - -// Lister makes an Index have the List method. The Stores must contain only the expected type -// Example: -// s := cache.NewStore() -// lw := cache.ListWatch{Client: c, FieldSelector: sel, Resource: "pods"} -// r := cache.NewReflector(lw, &api.Pod{}, s).Run() -// l := StoreToPodLister{s} -// l.List() - -// StoreToPodLister helps list pods -type StoreToPodLister struct { - Indexer Indexer -} - -func (s *StoreToPodLister) List(selector labels.Selector) (ret []*api.Pod, err error) { - err = ListAll(s.Indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.Pod)) - }) - return ret, err -} - -func (s *StoreToPodLister) Pods(namespace string) storePodsNamespacer { - return storePodsNamespacer{Indexer: s.Indexer, namespace: namespace} -} - -type storePodsNamespacer struct { - Indexer Indexer - namespace string -} - -func (s storePodsNamespacer) List(selector labels.Selector) (ret []*api.Pod, err error) { - err = ListAllByNamespace(s.Indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*api.Pod)) - }) - return ret, err -} - -func (s storePodsNamespacer) Get(name string) (*api.Pod, error) { - obj, exists, err := s.Indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(api.Resource("pod"), name) - } - return obj.(*api.Pod), nil -} - -// StoreToServiceLister helps list services -type StoreToServiceLister struct { - Indexer Indexer -} - -func (s *StoreToServiceLister) List(selector labels.Selector) (ret []*api.Service, err error) { - err = ListAll(s.Indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.Service)) - }) - return ret, err -} - -func (s *StoreToServiceLister) Services(namespace string) storeServicesNamespacer { - return storeServicesNamespacer{s.Indexer, namespace} -} - -type storeServicesNamespacer struct { - indexer Indexer - namespace string -} - -func (s storeServicesNamespacer) List(selector labels.Selector) (ret []*api.Service, err error) { - err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*api.Service)) - }) - return ret, err -} - -func (s storeServicesNamespacer) Get(name string) (*api.Service, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(api.Resource("service"), name) - } - return obj.(*api.Service), nil -} - -// TODO: Move this back to scheduler as a helper function that takes a Store, -// rather than a method of StoreToServiceLister. -func (s *StoreToServiceLister) GetPodServices(pod *api.Pod) (services []*api.Service, err error) { - allServices, err := s.Services(pod.Namespace).List(labels.Everything()) - if err != nil { - return nil, err - } - - for i := range allServices { - service := allServices[i] - if service.Spec.Selector == nil { - // services with nil selectors match nothing, not everything. - continue - } - selector := labels.Set(service.Spec.Selector).AsSelectorPreValidated() - if selector.Matches(labels.Set(pod.Labels)) { - services = append(services, service) - } - } - - return services, nil -} - -// StoreToReplicationControllerLister helps list rcs -type StoreToReplicationControllerLister struct { - Indexer Indexer -} - -func (s *StoreToReplicationControllerLister) List(selector labels.Selector) (ret []*api.ReplicationController, err error) { - err = ListAll(s.Indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.ReplicationController)) - }) - return ret, err -} - -func (s *StoreToReplicationControllerLister) ReplicationControllers(namespace string) storeReplicationControllersNamespacer { - return storeReplicationControllersNamespacer{s.Indexer, namespace} -} - -type storeReplicationControllersNamespacer struct { - indexer Indexer - namespace string -} - -func (s storeReplicationControllersNamespacer) List(selector labels.Selector) (ret []*api.ReplicationController, err error) { - err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*api.ReplicationController)) - }) - return ret, err -} - -func (s storeReplicationControllersNamespacer) Get(name string) (*api.ReplicationController, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(api.Resource("replicationcontroller"), name) - } - return obj.(*api.ReplicationController), nil -} - -// GetPodControllers returns a list of replication controllers managing a pod. Returns an error only if no matching controllers are found. -func (s *StoreToReplicationControllerLister) GetPodControllers(pod *api.Pod) (controllers []*api.ReplicationController, err error) { - if len(pod.Labels) == 0 { - err = fmt.Errorf("no controllers found for pod %v because it has no labels", pod.Name) - return - } - - key := &api.ReplicationController{ObjectMeta: api.ObjectMeta{Namespace: pod.Namespace}} - items, err := s.Indexer.Index(NamespaceIndex, key) - if err != nil { - return - } - - for _, m := range items { - rc := m.(*api.ReplicationController) - selector := labels.Set(rc.Spec.Selector).AsSelectorPreValidated() - - // If an rc with a nil or empty selector creeps in, it should match nothing, not everything. - if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { - continue - } - controllers = append(controllers, rc) - } - if len(controllers) == 0 { - err = fmt.Errorf("could not find controller for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) - } - return -} - -// StoreToServiceAccountLister helps list service accounts -type StoreToServiceAccountLister struct { - Indexer Indexer -} - -func (s *StoreToServiceAccountLister) List(selector labels.Selector) (ret []*api.ServiceAccount, err error) { - err = ListAll(s.Indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.ServiceAccount)) - }) - return ret, err -} - -func (s *StoreToServiceAccountLister) ServiceAccounts(namespace string) storeServiceAccountsNamespacer { - return storeServiceAccountsNamespacer{s.Indexer, namespace} -} - -type storeServiceAccountsNamespacer struct { - indexer Indexer - namespace string -} - -func (s storeServiceAccountsNamespacer) List(selector labels.Selector) (ret []*api.ServiceAccount, err error) { - err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*api.ServiceAccount)) - }) - return ret, err -} - -func (s storeServiceAccountsNamespacer) Get(name string) (*api.ServiceAccount, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(api.Resource("serviceaccount"), name) - } - return obj.(*api.ServiceAccount), nil -} - -// StoreToLimitRangeLister helps list limit ranges -type StoreToLimitRangeLister struct { - Indexer Indexer -} - -func (s *StoreToLimitRangeLister) List(selector labels.Selector) (ret []*api.LimitRange, err error) { - err = ListAll(s.Indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.LimitRange)) - }) - return ret, err -} - -// StoreToPersistentVolumeClaimLister helps list pvcs -type StoreToPersistentVolumeClaimLister struct { - Indexer Indexer -} - -// List returns all persistentvolumeclaims that match the specified selector -func (s *StoreToPersistentVolumeClaimLister) List(selector labels.Selector) (ret []*api.PersistentVolumeClaim, err error) { - err = ListAll(s.Indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.PersistentVolumeClaim)) - }) - return ret, err -} - -func (s *StoreToLimitRangeLister) LimitRanges(namespace string) storeLimitRangesNamespacer { - return storeLimitRangesNamespacer{s.Indexer, namespace} -} - -type storeLimitRangesNamespacer struct { - indexer Indexer - namespace string -} - -func (s storeLimitRangesNamespacer) List(selector labels.Selector) (ret []*api.LimitRange, err error) { - err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*api.LimitRange)) - }) - return ret, err -} - -func (s storeLimitRangesNamespacer) Get(name string) (*api.LimitRange, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(api.Resource("limitrange"), name) - } - return obj.(*api.LimitRange), nil -} - -// PersistentVolumeClaims returns all claims in a specified namespace. -func (s *StoreToPersistentVolumeClaimLister) PersistentVolumeClaims(namespace string) storePersistentVolumeClaimsNamespacer { - return storePersistentVolumeClaimsNamespacer{Indexer: s.Indexer, namespace: namespace} -} - -type storePersistentVolumeClaimsNamespacer struct { - Indexer Indexer - namespace string -} - -func (s storePersistentVolumeClaimsNamespacer) List(selector labels.Selector) (ret []*api.PersistentVolumeClaim, err error) { - err = ListAllByNamespace(s.Indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*api.PersistentVolumeClaim)) - }) - return ret, err -} - -func (s storePersistentVolumeClaimsNamespacer) Get(name string) (*api.PersistentVolumeClaim, error) { - obj, exists, err := s.Indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(api.Resource("persistentvolumeclaims"), name) - } - return obj.(*api.PersistentVolumeClaim), nil -} - -// IndexerToNamespaceLister gives an Indexer List method -type IndexerToNamespaceLister struct { - Indexer -} - -// List returns a list of namespaces -func (i *IndexerToNamespaceLister) List(selector labels.Selector) (ret []*api.Namespace, err error) { - err = ListAll(i.Indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.Namespace)) - }) - return ret, err -} - -func (i *IndexerToNamespaceLister) Get(name string) (*api.Namespace, error) { - obj, exists, err := i.Indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(api.Resource("namespace"), name) - } - return obj.(*api.Namespace), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/listers_extensions.go b/vendor/k8s.io/kubernetes/pkg/client/cache/listers_extensions.go deleted file mode 100644 index 539657765d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/listers_extensions.go +++ /dev/null @@ -1,210 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/labels" -) - -// TODO: generate these classes and methods for all resources of interest using -// a script. Can use "go generate" once 1.4 is supported by all users. - -// Lister makes an Index have the List method. The Stores must contain only the expected type -// Example: -// s := cache.NewStore() -// lw := cache.ListWatch{Client: c, FieldSelector: sel, Resource: "pods"} -// r := cache.NewReflector(lw, &extensions.Deployment{}, s).Run() -// l := StoreToDeploymentLister{s} -// l.List() - -// StoreToDeploymentLister helps list deployments -type StoreToDeploymentLister struct { - Indexer Indexer -} - -func (s *StoreToDeploymentLister) List(selector labels.Selector) (ret []*extensions.Deployment, err error) { - err = ListAll(s.Indexer, selector, func(m interface{}) { - ret = append(ret, m.(*extensions.Deployment)) - }) - return ret, err -} - -func (s *StoreToDeploymentLister) Deployments(namespace string) storeDeploymentsNamespacer { - return storeDeploymentsNamespacer{Indexer: s.Indexer, namespace: namespace} -} - -type storeDeploymentsNamespacer struct { - Indexer Indexer - namespace string -} - -func (s storeDeploymentsNamespacer) List(selector labels.Selector) (ret []*extensions.Deployment, err error) { - err = ListAllByNamespace(s.Indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*extensions.Deployment)) - }) - return ret, err -} - -func (s storeDeploymentsNamespacer) Get(name string) (*extensions.Deployment, error) { - obj, exists, err := s.Indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(extensions.Resource("deployment"), name) - } - return obj.(*extensions.Deployment), nil -} - -// GetDeploymentsForReplicaSet returns a list of deployments managing a replica set. Returns an error only if no matching deployments are found. -func (s *StoreToDeploymentLister) GetDeploymentsForReplicaSet(rs *extensions.ReplicaSet) (deployments []*extensions.Deployment, err error) { - if len(rs.Labels) == 0 { - err = fmt.Errorf("no deployments found for ReplicaSet %v because it has no labels", rs.Name) - return - } - - // TODO: MODIFY THIS METHOD so that it checks for the podTemplateSpecHash label - dList, err := s.Deployments(rs.Namespace).List(labels.Everything()) - if err != nil { - return - } - for _, d := range dList { - selector, err := unversioned.LabelSelectorAsSelector(d.Spec.Selector) - if err != nil { - return nil, fmt.Errorf("invalid label selector: %v", err) - } - // If a deployment with a nil or empty selector creeps in, it should match nothing, not everything. - if selector.Empty() || !selector.Matches(labels.Set(rs.Labels)) { - continue - } - deployments = append(deployments, d) - } - if len(deployments) == 0 { - err = fmt.Errorf("could not find deployments set for ReplicaSet %s in namespace %s with labels: %v", rs.Name, rs.Namespace, rs.Labels) - } - return -} - -// GetDeploymentsForDeployments returns a list of deployments managing a pod. Returns an error only if no matching deployments are found. -// TODO eliminate shallow copies -func (s *StoreToDeploymentLister) GetDeploymentsForPod(pod *api.Pod) (deployments []*extensions.Deployment, err error) { - if len(pod.Labels) == 0 { - err = fmt.Errorf("no deployments found for Pod %v because it has no labels", pod.Name) - return - } - - if len(pod.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 { - return - } - - dList, err := s.Deployments(pod.Namespace).List(labels.Everything()) - if err != nil { - return - } - for _, d := range dList { - selector, err := unversioned.LabelSelectorAsSelector(d.Spec.Selector) - if err != nil { - return nil, fmt.Errorf("invalid label selector: %v", err) - } - // If a deployment with a nil or empty selector creeps in, it should match nothing, not everything. - if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { - continue - } - deployments = append(deployments, d) - } - if len(deployments) == 0 { - err = fmt.Errorf("could not find deployments set for Pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) - } - return -} - -// StoreToReplicaSetLister helps list replicasets -type StoreToReplicaSetLister struct { - Indexer Indexer -} - -func (s *StoreToReplicaSetLister) List(selector labels.Selector) (ret []*extensions.ReplicaSet, err error) { - err = ListAll(s.Indexer, selector, func(m interface{}) { - ret = append(ret, m.(*extensions.ReplicaSet)) - }) - return ret, err -} - -func (s *StoreToReplicaSetLister) ReplicaSets(namespace string) storeReplicaSetsNamespacer { - return storeReplicaSetsNamespacer{Indexer: s.Indexer, namespace: namespace} -} - -type storeReplicaSetsNamespacer struct { - Indexer Indexer - namespace string -} - -func (s storeReplicaSetsNamespacer) List(selector labels.Selector) (ret []*extensions.ReplicaSet, err error) { - err = ListAllByNamespace(s.Indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*extensions.ReplicaSet)) - }) - return ret, err -} - -func (s storeReplicaSetsNamespacer) Get(name string) (*extensions.ReplicaSet, error) { - obj, exists, err := s.Indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(extensions.Resource("replicaset"), name) - } - return obj.(*extensions.ReplicaSet), nil -} - -// GetPodReplicaSets returns a list of ReplicaSets managing a pod. Returns an error only if no matching ReplicaSets are found. -func (s *StoreToReplicaSetLister) GetPodReplicaSets(pod *api.Pod) (rss []*extensions.ReplicaSet, err error) { - if len(pod.Labels) == 0 { - err = fmt.Errorf("no ReplicaSets found for pod %v because it has no labels", pod.Name) - return - } - - list, err := s.ReplicaSets(pod.Namespace).List(labels.Everything()) - if err != nil { - return - } - for _, rs := range list { - if rs.Namespace != pod.Namespace { - continue - } - selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector) - if err != nil { - return nil, fmt.Errorf("invalid selector: %v", err) - } - - // If a ReplicaSet with a nil or empty selector creeps in, it should match nothing, not everything. - if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { - continue - } - rss = append(rss, rs) - } - if len(rss) == 0 { - err = fmt.Errorf("could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) - } - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/listers_rbac.go b/vendor/k8s.io/kubernetes/pkg/client/cache/listers_rbac.go deleted file mode 100644 index 2b1cf8bb2a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/listers_rbac.go +++ /dev/null @@ -1,234 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/labels" -) - -// TODO: generate these classes and methods for all resources of interest using -// a script. Can use "go generate" once 1.4 is supported by all users. - -// Lister makes an Index have the List method. The Stores must contain only the expected type -// Example: -// s := cache.NewStore() -// lw := cache.ListWatch{Client: c, FieldSelector: sel, Resource: "pods"} -// r := cache.NewReflector(lw, &rbac.ClusterRole{}, s).Run() -// l := clusterRoleLister{s} -// l.List() - -func NewClusterRoleLister(indexer Indexer) ClusterRoleLister { - return &clusterRoleLister{indexer: indexer} -} -func NewClusterRoleBindingLister(indexer Indexer) ClusterRoleBindingLister { - return &clusterRoleBindingLister{indexer: indexer} -} -func NewRoleLister(indexer Indexer) RoleLister { - return &roleLister{indexer: indexer} -} -func NewRoleBindingLister(indexer Indexer) RoleBindingLister { - return &roleBindingLister{indexer: indexer} -} - -// these interfaces are used by the rbac authorizer -type authorizerClusterRoleGetter interface { - GetClusterRole(name string) (*rbac.ClusterRole, error) -} - -type authorizerClusterRoleBindingLister interface { - ListClusterRoleBindings() ([]*rbac.ClusterRoleBinding, error) -} - -type authorizerRoleGetter interface { - GetRole(namespace, name string) (*rbac.Role, error) -} - -type authorizerRoleBindingLister interface { - ListRoleBindings(namespace string) ([]*rbac.RoleBinding, error) -} - -type ClusterRoleLister interface { - authorizerClusterRoleGetter - List(selector labels.Selector) (ret []*rbac.ClusterRole, err error) - Get(name string) (*rbac.ClusterRole, error) -} - -type clusterRoleLister struct { - indexer Indexer -} - -func (s *clusterRoleLister) List(selector labels.Selector) (ret []*rbac.ClusterRole, err error) { - err = ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*rbac.ClusterRole)) - }) - return ret, err -} - -func (s clusterRoleLister) Get(name string) (*rbac.ClusterRole, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(rbac.Resource("clusterrole"), name) - } - return obj.(*rbac.ClusterRole), nil -} - -func (s clusterRoleLister) GetClusterRole(name string) (*rbac.ClusterRole, error) { - return s.Get(name) -} - -type ClusterRoleBindingLister interface { - authorizerClusterRoleBindingLister - List(selector labels.Selector) (ret []*rbac.ClusterRoleBinding, err error) - Get(name string) (*rbac.ClusterRoleBinding, error) -} - -type clusterRoleBindingLister struct { - indexer Indexer -} - -func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*rbac.ClusterRoleBinding, err error) { - err = ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*rbac.ClusterRoleBinding)) - }) - return ret, err -} - -func (s clusterRoleBindingLister) Get(name string) (*rbac.ClusterRoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(rbac.Resource("clusterrolebinding"), name) - } - return obj.(*rbac.ClusterRoleBinding), nil -} - -func (s clusterRoleBindingLister) ListClusterRoleBindings() ([]*rbac.ClusterRoleBinding, error) { - return s.List(labels.Everything()) -} - -type RoleLister interface { - authorizerRoleGetter - List(selector labels.Selector) (ret []*rbac.Role, err error) - Roles(namespace string) RoleNamespaceLister -} - -type RoleNamespaceLister interface { - List(selector labels.Selector) (ret []*rbac.Role, err error) - Get(name string) (*rbac.Role, error) -} - -type roleLister struct { - indexer Indexer -} - -func (s *roleLister) List(selector labels.Selector) (ret []*rbac.Role, err error) { - err = ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*rbac.Role)) - }) - return ret, err -} - -func (s *roleLister) Roles(namespace string) RoleNamespaceLister { - return roleNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -func (s roleLister) GetRole(namespace, name string) (*rbac.Role, error) { - return s.Roles(namespace).Get(name) -} - -type roleNamespaceLister struct { - indexer Indexer - namespace string -} - -func (s roleNamespaceLister) List(selector labels.Selector) (ret []*rbac.Role, err error) { - err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*rbac.Role)) - }) - return ret, err -} - -func (s roleNamespaceLister) Get(name string) (*rbac.Role, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(rbac.Resource("role"), name) - } - return obj.(*rbac.Role), nil -} - -type RoleBindingLister interface { - authorizerRoleBindingLister - List(selector labels.Selector) (ret []*rbac.RoleBinding, err error) - RoleBindings(namespace string) RoleBindingNamespaceLister -} - -type RoleBindingNamespaceLister interface { - List(selector labels.Selector) (ret []*rbac.RoleBinding, err error) - Get(name string) (*rbac.RoleBinding, error) -} - -type roleBindingLister struct { - indexer Indexer -} - -func (s *roleBindingLister) List(selector labels.Selector) (ret []*rbac.RoleBinding, err error) { - err = ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*rbac.RoleBinding)) - }) - return ret, err -} - -func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { - return roleBindingNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -func (s roleBindingLister) ListRoleBindings(namespace string) ([]*rbac.RoleBinding, error) { - return s.RoleBindings(namespace).List(labels.Everything()) -} - -type roleBindingNamespaceLister struct { - indexer Indexer - namespace string -} - -func (s roleBindingNamespaceLister) List(selector labels.Selector) (ret []*rbac.RoleBinding, err error) { - err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*rbac.RoleBinding)) - }) - return ret, err -} - -func (s roleBindingNamespaceLister) Get(name string) (*rbac.RoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(rbac.Resource("rolebinding"), name) - } - return obj.(*rbac.RoleBinding), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/shared_informer.go b/vendor/k8s.io/kubernetes/pkg/client/cache/shared_informer.go deleted file mode 100644 index be68b87f5f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/shared_informer.go +++ /dev/null @@ -1,417 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "fmt" - "sync" - "time" - - "k8s.io/kubernetes/pkg/runtime" - utilruntime "k8s.io/kubernetes/pkg/util/runtime" - "k8s.io/kubernetes/pkg/util/wait" - - "github.com/golang/glog" -) - -// if you use this, there is one behavior change compared to a standard Informer. -// When you receive a notification, the cache will be AT LEAST as fresh as the -// notification, but it MAY be more fresh. You should NOT depend on the contents -// of the cache exactly matching the notification you've received in handler -// functions. If there was a create, followed by a delete, the cache may NOT -// have your item. This has advantages over the broadcaster since it allows us -// to share a common cache across many controllers. Extending the broadcaster -// would have required us keep duplicate caches for each watch. -type SharedInformer interface { - // events to a single handler are delivered sequentially, but there is no coordination between different handlers - // You may NOT add a handler *after* the SharedInformer is running. That will result in an error being returned. - // TODO we should try to remove this restriction eventually. - AddEventHandler(handler ResourceEventHandler) error - GetStore() Store - // GetController gives back a synthetic interface that "votes" to start the informer - GetController() ControllerInterface - Run(stopCh <-chan struct{}) - HasSynced() bool - LastSyncResourceVersion() string -} - -type SharedIndexInformer interface { - SharedInformer - // AddIndexers add indexers to the informer before it starts. - AddIndexers(indexers Indexers) error - GetIndexer() Indexer -} - -// NewSharedInformer creates a new instance for the listwatcher. -// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can -// be shared amongst all consumers. -func NewSharedInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer { - return NewSharedIndexInformer(lw, objType, resyncPeriod, Indexers{}) -} - -// NewSharedIndexInformer creates a new instance for the listwatcher. -// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can -// be shared amongst all consumers. -func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { - sharedIndexInformer := &sharedIndexInformer{ - processor: &sharedProcessor{}, - indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers), - listerWatcher: lw, - objectType: objType, - fullResyncPeriod: resyncPeriod, - cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", objType)), - } - return sharedIndexInformer -} - -// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced. -type InformerSynced func() bool - -// syncedPollPeriod controls how often you look at the status of your sync funcs -const syncedPollPeriod = 100 * time.Millisecond - -// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false -// if the contoller should shutdown -func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { - err := wait.PollUntil(syncedPollPeriod, - func() (bool, error) { - for _, syncFunc := range cacheSyncs { - if !syncFunc() { - return false, nil - } - } - return true, nil - }, - stopCh) - if err != nil { - glog.V(2).Infof("stop requested") - return false - } - - glog.V(4).Infof("caches populated") - return true -} - -type sharedIndexInformer struct { - indexer Indexer - controller *Controller - - processor *sharedProcessor - cacheMutationDetector CacheMutationDetector - - // This block is tracked to handle late initialization of the controller - listerWatcher ListerWatcher - objectType runtime.Object - fullResyncPeriod time.Duration - - started bool - startedLock sync.Mutex - - // blockDeltas gives a way to stop all event distribution so that a late event handler - // can safely join the shared informer. - blockDeltas sync.Mutex - // stopCh is the channel used to stop the main Run process. We have to track it so that - // late joiners can have a proper stop - stopCh <-chan struct{} -} - -// dummyController hides the fact that a SharedInformer is different from a dedicated one -// where a caller can `Run`. The run method is disonnected in this case, because higher -// level logic will decide when to start the SharedInformer and related controller. -// Because returning information back is always asynchronous, the legacy callers shouldn't -// notice any change in behavior. -type dummyController struct { - informer *sharedIndexInformer -} - -func (v *dummyController) Run(stopCh <-chan struct{}) { -} - -func (v *dummyController) HasSynced() bool { - return v.informer.HasSynced() -} - -type updateNotification struct { - oldObj interface{} - newObj interface{} -} - -type addNotification struct { - newObj interface{} -} - -type deleteNotification struct { - oldObj interface{} -} - -func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { - defer utilruntime.HandleCrash() - - fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, s.indexer) - - cfg := &Config{ - Queue: fifo, - ListerWatcher: s.listerWatcher, - ObjectType: s.objectType, - FullResyncPeriod: s.fullResyncPeriod, - RetryOnError: false, - - Process: s.HandleDeltas, - } - - func() { - s.startedLock.Lock() - defer s.startedLock.Unlock() - - s.controller = New(cfg) - s.started = true - }() - - s.stopCh = stopCh - s.cacheMutationDetector.Run(stopCh) - s.processor.run(stopCh) - s.controller.Run(stopCh) -} - -func (s *sharedIndexInformer) isStarted() bool { - s.startedLock.Lock() - defer s.startedLock.Unlock() - return s.started -} - -func (s *sharedIndexInformer) HasSynced() bool { - s.startedLock.Lock() - defer s.startedLock.Unlock() - - if s.controller == nil { - return false - } - return s.controller.HasSynced() -} - -func (s *sharedIndexInformer) LastSyncResourceVersion() string { - s.startedLock.Lock() - defer s.startedLock.Unlock() - - if s.controller == nil || s.controller.reflector == nil { - return "" - } - return s.controller.reflector.LastSyncResourceVersion() -} - -func (s *sharedIndexInformer) GetStore() Store { - return s.indexer -} - -func (s *sharedIndexInformer) GetIndexer() Indexer { - return s.indexer -} - -func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error { - s.startedLock.Lock() - defer s.startedLock.Unlock() - - if s.started { - return fmt.Errorf("informer has already started") - } - - return s.indexer.AddIndexers(indexers) -} - -func (s *sharedIndexInformer) GetController() ControllerInterface { - return &dummyController{informer: s} -} - -func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) error { - s.startedLock.Lock() - defer s.startedLock.Unlock() - - if !s.started { - listener := newProcessListener(handler) - s.processor.listeners = append(s.processor.listeners, listener) - return nil - } - - // in order to safely join, we have to - // 1. stop sending add/update/delete notifications - // 2. do a list against the store - // 3. send synthetic "Add" events to the new handler - // 4. unblock - s.blockDeltas.Lock() - defer s.blockDeltas.Unlock() - - listener := newProcessListener(handler) - s.processor.listeners = append(s.processor.listeners, listener) - - go listener.run(s.stopCh) - go listener.pop(s.stopCh) - - items := s.indexer.List() - for i := range items { - listener.add(addNotification{newObj: items[i]}) - } - - return nil -} - -func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error { - s.blockDeltas.Lock() - defer s.blockDeltas.Unlock() - - // from oldest to newest - for _, d := range obj.(Deltas) { - switch d.Type { - case Sync, Added, Updated: - s.cacheMutationDetector.AddObject(d.Object) - if old, exists, err := s.indexer.Get(d.Object); err == nil && exists { - if err := s.indexer.Update(d.Object); err != nil { - return err - } - s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}) - } else { - if err := s.indexer.Add(d.Object); err != nil { - return err - } - s.processor.distribute(addNotification{newObj: d.Object}) - } - case Deleted: - if err := s.indexer.Delete(d.Object); err != nil { - return err - } - s.processor.distribute(deleteNotification{oldObj: d.Object}) - } - } - return nil -} - -type sharedProcessor struct { - listeners []*processorListener -} - -func (p *sharedProcessor) distribute(obj interface{}) { - for _, listener := range p.listeners { - listener.add(obj) - } -} - -func (p *sharedProcessor) run(stopCh <-chan struct{}) { - for _, listener := range p.listeners { - go listener.run(stopCh) - go listener.pop(stopCh) - } -} - -type processorListener struct { - // lock/cond protects access to 'pendingNotifications'. - lock sync.RWMutex - cond sync.Cond - - // pendingNotifications is an unbounded slice that holds all notifications not yet distributed - // there is one per listener, but a failing/stalled listener will have infinite pendingNotifications - // added until we OOM. - // TODO This is no worse that before, since reflectors were backed by unbounded DeltaFIFOs, but - // we should try to do something better - pendingNotifications []interface{} - - nextCh chan interface{} - - handler ResourceEventHandler -} - -func newProcessListener(handler ResourceEventHandler) *processorListener { - ret := &processorListener{ - pendingNotifications: []interface{}{}, - nextCh: make(chan interface{}), - handler: handler, - } - - ret.cond.L = &ret.lock - return ret -} - -func (p *processorListener) add(notification interface{}) { - p.lock.Lock() - defer p.lock.Unlock() - - p.pendingNotifications = append(p.pendingNotifications, notification) - p.cond.Broadcast() -} - -func (p *processorListener) pop(stopCh <-chan struct{}) { - defer utilruntime.HandleCrash() - - for { - blockingGet := func() (interface{}, bool) { - p.lock.Lock() - defer p.lock.Unlock() - - for len(p.pendingNotifications) == 0 { - // check if we're shutdown - select { - case <-stopCh: - return nil, true - default: - } - p.cond.Wait() - } - - nt := p.pendingNotifications[0] - p.pendingNotifications = p.pendingNotifications[1:] - return nt, false - } - - notification, stopped := blockingGet() - if stopped { - return - } - - select { - case <-stopCh: - return - case p.nextCh <- notification: - } - } -} - -func (p *processorListener) run(stopCh <-chan struct{}) { - defer utilruntime.HandleCrash() - - for { - var next interface{} - select { - case <-stopCh: - func() { - p.lock.Lock() - defer p.lock.Unlock() - p.cond.Broadcast() - }() - return - case next = <-p.nextCh: - } - - switch notification := next.(type) { - case updateNotification: - p.handler.OnUpdate(notification.oldObj, notification.newObj) - case addNotification: - p.handler.OnAdd(notification.newObj) - case deleteNotification: - p.handler.OnDelete(notification.oldObj) - default: - utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next)) - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/BUILD new file mode 100644 index 0000000000..39cc0ffb1b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/BUILD @@ -0,0 +1,90 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "clientset.go", + "doc.go", + "import_known_versions.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/api/install:go_default_library", + "//pkg/apis/apps/install:go_default_library", + "//pkg/apis/authentication/install:go_default_library", + "//pkg/apis/authorization/install:go_default_library", + "//pkg/apis/autoscaling/install:go_default_library", + "//pkg/apis/batch/install:go_default_library", + "//pkg/apis/certificates/install:go_default_library", + "//pkg/apis/extensions/install:go_default_library", + "//pkg/apis/policy/install:go_default_library", + "//pkg/apis/rbac/install:go_default_library", + "//pkg/apis/settings/install:go_default_library", + "//pkg/apis/storage/install:go_default_library", + "//pkg/client/clientset_generated/clientset/typed/apps/v1beta1:go_default_library", + "//pkg/client/clientset_generated/clientset/typed/authentication/v1:go_default_library", + "//pkg/client/clientset_generated/clientset/typed/authentication/v1beta1:go_default_library", + "//pkg/client/clientset_generated/clientset/typed/authorization/v1:go_default_library", + "//pkg/client/clientset_generated/clientset/typed/authorization/v1beta1:go_default_library", + "//pkg/client/clientset_generated/clientset/typed/autoscaling/v1:go_default_library", + "//pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1:go_default_library", + "//pkg/client/clientset_generated/clientset/typed/batch/v1:go_default_library", + "//pkg/client/clientset_generated/clientset/typed/batch/v2alpha1:go_default_library", + "//pkg/client/clientset_generated/clientset/typed/certificates/v1beta1:go_default_library", + "//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library", + "//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library", + "//pkg/client/clientset_generated/clientset/typed/policy/v1beta1:go_default_library", + "//pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1:go_default_library", + "//pkg/client/clientset_generated/clientset/typed/rbac/v1beta1:go_default_library", + "//pkg/client/clientset_generated/clientset/typed/settings/v1alpha1:go_default_library", + "//pkg/client/clientset_generated/clientset/typed/storage/v1:go_default_library", + "//pkg/client/clientset_generated/clientset/typed/storage/v1beta1:go_default_library", + "//vendor:github.com/golang/glog", + "//vendor:k8s.io/client-go/discovery", + "//vendor:k8s.io/client-go/rest", + "//vendor:k8s.io/client-go/util/flowcontrol", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/clientset/fake:all-srcs", + "//pkg/client/clientset_generated/clientset/scheme:all-srcs", + "//pkg/client/clientset_generated/clientset/typed/apps/v1beta1:all-srcs", + "//pkg/client/clientset_generated/clientset/typed/authentication/v1:all-srcs", + "//pkg/client/clientset_generated/clientset/typed/authentication/v1beta1:all-srcs", + "//pkg/client/clientset_generated/clientset/typed/authorization/v1:all-srcs", + "//pkg/client/clientset_generated/clientset/typed/authorization/v1beta1:all-srcs", + "//pkg/client/clientset_generated/clientset/typed/autoscaling/v1:all-srcs", + "//pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1:all-srcs", + "//pkg/client/clientset_generated/clientset/typed/batch/v1:all-srcs", + "//pkg/client/clientset_generated/clientset/typed/batch/v2alpha1:all-srcs", + "//pkg/client/clientset_generated/clientset/typed/certificates/v1beta1:all-srcs", + "//pkg/client/clientset_generated/clientset/typed/core/v1:all-srcs", + "//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:all-srcs", + "//pkg/client/clientset_generated/clientset/typed/policy/v1beta1:all-srcs", + "//pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1:all-srcs", + "//pkg/client/clientset_generated/clientset/typed/rbac/v1beta1:all-srcs", + "//pkg/client/clientset_generated/clientset/typed/settings/v1alpha1:all-srcs", + "//pkg/client/clientset_generated/clientset/typed/storage/v1:all-srcs", + "//pkg/client/clientset_generated/clientset/typed/storage/v1beta1:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/clientset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/clientset.go new file mode 100644 index 0000000000..88df7ab770 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/clientset.go @@ -0,0 +1,514 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientset + +import ( + glog "github.com/golang/glog" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" + appsv1beta1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1" + authenticationv1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1" + authenticationv1beta1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1" + authorizationv1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1" + authorizationv1beta1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1" + autoscalingv1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1" + autoscalingv2alpha1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1" + batchv1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1" + batchv2alpha1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1" + certificatesv1beta1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1" + corev1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1" + extensionsv1beta1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1" + policyv1beta1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1" + rbacv1alpha1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1" + rbacv1beta1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1" + settingsv1alpha1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1" + storagev1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1" + storagev1beta1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + CoreV1() corev1.CoreV1Interface + // Deprecated: please explicitly pick a version if possible. + Core() corev1.CoreV1Interface + AppsV1beta1() appsv1beta1.AppsV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Apps() appsv1beta1.AppsV1beta1Interface + AuthenticationV1() authenticationv1.AuthenticationV1Interface + // Deprecated: please explicitly pick a version if possible. + Authentication() authenticationv1.AuthenticationV1Interface + AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface + AuthorizationV1() authorizationv1.AuthorizationV1Interface + // Deprecated: please explicitly pick a version if possible. + Authorization() authorizationv1.AuthorizationV1Interface + AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface + AutoscalingV1() autoscalingv1.AutoscalingV1Interface + // Deprecated: please explicitly pick a version if possible. + Autoscaling() autoscalingv1.AutoscalingV1Interface + AutoscalingV2alpha1() autoscalingv2alpha1.AutoscalingV2alpha1Interface + BatchV1() batchv1.BatchV1Interface + // Deprecated: please explicitly pick a version if possible. + Batch() batchv1.BatchV1Interface + BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface + CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Certificates() certificatesv1beta1.CertificatesV1beta1Interface + ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Extensions() extensionsv1beta1.ExtensionsV1beta1Interface + PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Policy() policyv1beta1.PolicyV1beta1Interface + RbacV1beta1() rbacv1beta1.RbacV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Rbac() rbacv1beta1.RbacV1beta1Interface + RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface + SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface + // Deprecated: please explicitly pick a version if possible. + Settings() settingsv1alpha1.SettingsV1alpha1Interface + StorageV1beta1() storagev1beta1.StorageV1beta1Interface + StorageV1() storagev1.StorageV1Interface + // Deprecated: please explicitly pick a version if possible. + Storage() storagev1.StorageV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + *corev1.CoreV1Client + *appsv1beta1.AppsV1beta1Client + *authenticationv1.AuthenticationV1Client + *authenticationv1beta1.AuthenticationV1beta1Client + *authorizationv1.AuthorizationV1Client + *authorizationv1beta1.AuthorizationV1beta1Client + *autoscalingv1.AutoscalingV1Client + *autoscalingv2alpha1.AutoscalingV2alpha1Client + *batchv1.BatchV1Client + *batchv2alpha1.BatchV2alpha1Client + *certificatesv1beta1.CertificatesV1beta1Client + *extensionsv1beta1.ExtensionsV1beta1Client + *policyv1beta1.PolicyV1beta1Client + *rbacv1beta1.RbacV1beta1Client + *rbacv1alpha1.RbacV1alpha1Client + *settingsv1alpha1.SettingsV1alpha1Client + *storagev1beta1.StorageV1beta1Client + *storagev1.StorageV1Client +} + +// CoreV1 retrieves the CoreV1Client +func (c *Clientset) CoreV1() corev1.CoreV1Interface { + if c == nil { + return nil + } + return c.CoreV1Client +} + +// Deprecated: Core retrieves the default version of CoreClient. +// Please explicitly pick a version. +func (c *Clientset) Core() corev1.CoreV1Interface { + if c == nil { + return nil + } + return c.CoreV1Client +} + +// AppsV1beta1 retrieves the AppsV1beta1Client +func (c *Clientset) AppsV1beta1() appsv1beta1.AppsV1beta1Interface { + if c == nil { + return nil + } + return c.AppsV1beta1Client +} + +// Deprecated: Apps retrieves the default version of AppsClient. +// Please explicitly pick a version. +func (c *Clientset) Apps() appsv1beta1.AppsV1beta1Interface { + if c == nil { + return nil + } + return c.AppsV1beta1Client +} + +// AuthenticationV1 retrieves the AuthenticationV1Client +func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interface { + if c == nil { + return nil + } + return c.AuthenticationV1Client +} + +// Deprecated: Authentication retrieves the default version of AuthenticationClient. +// Please explicitly pick a version. +func (c *Clientset) Authentication() authenticationv1.AuthenticationV1Interface { + if c == nil { + return nil + } + return c.AuthenticationV1Client +} + +// AuthenticationV1beta1 retrieves the AuthenticationV1beta1Client +func (c *Clientset) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface { + if c == nil { + return nil + } + return c.AuthenticationV1beta1Client +} + +// AuthorizationV1 retrieves the AuthorizationV1Client +func (c *Clientset) AuthorizationV1() authorizationv1.AuthorizationV1Interface { + if c == nil { + return nil + } + return c.AuthorizationV1Client +} + +// Deprecated: Authorization retrieves the default version of AuthorizationClient. +// Please explicitly pick a version. +func (c *Clientset) Authorization() authorizationv1.AuthorizationV1Interface { + if c == nil { + return nil + } + return c.AuthorizationV1Client +} + +// AuthorizationV1beta1 retrieves the AuthorizationV1beta1Client +func (c *Clientset) AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface { + if c == nil { + return nil + } + return c.AuthorizationV1beta1Client +} + +// AutoscalingV1 retrieves the AutoscalingV1Client +func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { + if c == nil { + return nil + } + return c.AutoscalingV1Client +} + +// Deprecated: Autoscaling retrieves the default version of AutoscalingClient. +// Please explicitly pick a version. +func (c *Clientset) Autoscaling() autoscalingv1.AutoscalingV1Interface { + if c == nil { + return nil + } + return c.AutoscalingV1Client +} + +// AutoscalingV2alpha1 retrieves the AutoscalingV2alpha1Client +func (c *Clientset) AutoscalingV2alpha1() autoscalingv2alpha1.AutoscalingV2alpha1Interface { + if c == nil { + return nil + } + return c.AutoscalingV2alpha1Client +} + +// BatchV1 retrieves the BatchV1Client +func (c *Clientset) BatchV1() batchv1.BatchV1Interface { + if c == nil { + return nil + } + return c.BatchV1Client +} + +// Deprecated: Batch retrieves the default version of BatchClient. +// Please explicitly pick a version. +func (c *Clientset) Batch() batchv1.BatchV1Interface { + if c == nil { + return nil + } + return c.BatchV1Client +} + +// BatchV2alpha1 retrieves the BatchV2alpha1Client +func (c *Clientset) BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface { + if c == nil { + return nil + } + return c.BatchV2alpha1Client +} + +// CertificatesV1beta1 retrieves the CertificatesV1beta1Client +func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface { + if c == nil { + return nil + } + return c.CertificatesV1beta1Client +} + +// Deprecated: Certificates retrieves the default version of CertificatesClient. +// Please explicitly pick a version. +func (c *Clientset) Certificates() certificatesv1beta1.CertificatesV1beta1Interface { + if c == nil { + return nil + } + return c.CertificatesV1beta1Client +} + +// ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client +func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { + if c == nil { + return nil + } + return c.ExtensionsV1beta1Client +} + +// Deprecated: Extensions retrieves the default version of ExtensionsClient. +// Please explicitly pick a version. +func (c *Clientset) Extensions() extensionsv1beta1.ExtensionsV1beta1Interface { + if c == nil { + return nil + } + return c.ExtensionsV1beta1Client +} + +// PolicyV1beta1 retrieves the PolicyV1beta1Client +func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { + if c == nil { + return nil + } + return c.PolicyV1beta1Client +} + +// Deprecated: Policy retrieves the default version of PolicyClient. +// Please explicitly pick a version. +func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface { + if c == nil { + return nil + } + return c.PolicyV1beta1Client +} + +// RbacV1beta1 retrieves the RbacV1beta1Client +func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface { + if c == nil { + return nil + } + return c.RbacV1beta1Client +} + +// Deprecated: Rbac retrieves the default version of RbacClient. +// Please explicitly pick a version. +func (c *Clientset) Rbac() rbacv1beta1.RbacV1beta1Interface { + if c == nil { + return nil + } + return c.RbacV1beta1Client +} + +// RbacV1alpha1 retrieves the RbacV1alpha1Client +func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { + if c == nil { + return nil + } + return c.RbacV1alpha1Client +} + +// SettingsV1alpha1 retrieves the SettingsV1alpha1Client +func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface { + if c == nil { + return nil + } + return c.SettingsV1alpha1Client +} + +// Deprecated: Settings retrieves the default version of SettingsClient. +// Please explicitly pick a version. +func (c *Clientset) Settings() settingsv1alpha1.SettingsV1alpha1Interface { + if c == nil { + return nil + } + return c.SettingsV1alpha1Client +} + +// StorageV1beta1 retrieves the StorageV1beta1Client +func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { + if c == nil { + return nil + } + return c.StorageV1beta1Client +} + +// StorageV1 retrieves the StorageV1Client +func (c *Clientset) StorageV1() storagev1.StorageV1Interface { + if c == nil { + return nil + } + return c.StorageV1Client +} + +// Deprecated: Storage retrieves the default version of StorageClient. +// Please explicitly pick a version. +func (c *Clientset) Storage() storagev1.StorageV1Interface { + if c == nil { + return nil + } + return c.StorageV1Client +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.CoreV1Client, err = corev1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AppsV1beta1Client, err = appsv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AuthenticationV1Client, err = authenticationv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AuthenticationV1beta1Client, err = authenticationv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AuthorizationV1Client, err = authorizationv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AuthorizationV1beta1Client, err = authorizationv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AutoscalingV1Client, err = autoscalingv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AutoscalingV2alpha1Client, err = autoscalingv2alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.BatchV1Client, err = batchv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.BatchV2alpha1Client, err = batchv2alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.CertificatesV1beta1Client, err = certificatesv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.ExtensionsV1beta1Client, err = extensionsv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.PolicyV1beta1Client, err = policyv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.RbacV1beta1Client, err = rbacv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.RbacV1alpha1Client, err = rbacv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.SettingsV1alpha1Client, err = settingsv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.StorageV1beta1Client, err = storagev1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.StorageV1Client, err = storagev1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + glog.Errorf("failed to create the DiscoveryClient: %v", err) + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.CoreV1Client = corev1.NewForConfigOrDie(c) + cs.AppsV1beta1Client = appsv1beta1.NewForConfigOrDie(c) + cs.AuthenticationV1Client = authenticationv1.NewForConfigOrDie(c) + cs.AuthenticationV1beta1Client = authenticationv1beta1.NewForConfigOrDie(c) + cs.AuthorizationV1Client = authorizationv1.NewForConfigOrDie(c) + cs.AuthorizationV1beta1Client = authorizationv1beta1.NewForConfigOrDie(c) + cs.AutoscalingV1Client = autoscalingv1.NewForConfigOrDie(c) + cs.AutoscalingV2alpha1Client = autoscalingv2alpha1.NewForConfigOrDie(c) + cs.BatchV1Client = batchv1.NewForConfigOrDie(c) + cs.BatchV2alpha1Client = batchv2alpha1.NewForConfigOrDie(c) + cs.CertificatesV1beta1Client = certificatesv1beta1.NewForConfigOrDie(c) + cs.ExtensionsV1beta1Client = extensionsv1beta1.NewForConfigOrDie(c) + cs.PolicyV1beta1Client = policyv1beta1.NewForConfigOrDie(c) + cs.RbacV1beta1Client = rbacv1beta1.NewForConfigOrDie(c) + cs.RbacV1alpha1Client = rbacv1alpha1.NewForConfigOrDie(c) + cs.SettingsV1alpha1Client = settingsv1alpha1.NewForConfigOrDie(c) + cs.StorageV1beta1Client = storagev1beta1.NewForConfigOrDie(c) + cs.StorageV1Client = storagev1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.CoreV1Client = corev1.New(c) + cs.AppsV1beta1Client = appsv1beta1.New(c) + cs.AuthenticationV1Client = authenticationv1.New(c) + cs.AuthenticationV1beta1Client = authenticationv1beta1.New(c) + cs.AuthorizationV1Client = authorizationv1.New(c) + cs.AuthorizationV1beta1Client = authorizationv1beta1.New(c) + cs.AutoscalingV1Client = autoscalingv1.New(c) + cs.AutoscalingV2alpha1Client = autoscalingv2alpha1.New(c) + cs.BatchV1Client = batchv1.New(c) + cs.BatchV2alpha1Client = batchv2alpha1.New(c) + cs.CertificatesV1beta1Client = certificatesv1beta1.New(c) + cs.ExtensionsV1beta1Client = extensionsv1beta1.New(c) + cs.PolicyV1beta1Client = policyv1beta1.New(c) + cs.RbacV1beta1Client = rbacv1beta1.New(c) + cs.RbacV1alpha1Client = rbacv1alpha1.New(c) + cs.SettingsV1alpha1Client = settingsv1alpha1.New(c) + cs.StorageV1beta1Client = storagev1beta1.New(c) + cs.StorageV1Client = storagev1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/doc.go new file mode 100644 index 0000000000..7d72e7fb20 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated clientset. +package clientset diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/import_known_versions.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/import_known_versions.go new file mode 100644 index 0000000000..061c2af6d5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/import_known_versions.go @@ -0,0 +1,42 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientset + +// These imports are the API groups the client will support. +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + _ "k8s.io/kubernetes/pkg/api/install" + _ "k8s.io/kubernetes/pkg/apis/apps/install" + _ "k8s.io/kubernetes/pkg/apis/authentication/install" + _ "k8s.io/kubernetes/pkg/apis/authorization/install" + _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" + _ "k8s.io/kubernetes/pkg/apis/batch/install" + _ "k8s.io/kubernetes/pkg/apis/certificates/install" + _ "k8s.io/kubernetes/pkg/apis/extensions/install" + _ "k8s.io/kubernetes/pkg/apis/policy/install" + _ "k8s.io/kubernetes/pkg/apis/rbac/install" + _ "k8s.io/kubernetes/pkg/apis/settings/install" + _ "k8s.io/kubernetes/pkg/apis/storage/install" +) + +func init() { + if missingVersions := api.Registry.ValidateEnvRequestedVersions(); len(missingVersions) != 0 { + panic(fmt.Sprintf("KUBE_API_VERSIONS contains versions that are not installed: %q.", missingVersions)) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme/BUILD new file mode 100644 index 0000000000..aa2be384b2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme/BUILD @@ -0,0 +1,54 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/api/v1:go_default_library", + "//pkg/apis/apps/v1beta1:go_default_library", + "//pkg/apis/authentication/v1:go_default_library", + "//pkg/apis/authentication/v1beta1:go_default_library", + "//pkg/apis/authorization/v1:go_default_library", + "//pkg/apis/authorization/v1beta1:go_default_library", + "//pkg/apis/autoscaling/v1:go_default_library", + "//pkg/apis/autoscaling/v2alpha1:go_default_library", + "//pkg/apis/batch/v1:go_default_library", + "//pkg/apis/batch/v2alpha1:go_default_library", + "//pkg/apis/certificates/v1beta1:go_default_library", + "//pkg/apis/extensions/v1beta1:go_default_library", + "//pkg/apis/policy/v1beta1:go_default_library", + "//pkg/apis/rbac/v1alpha1:go_default_library", + "//pkg/apis/rbac/v1beta1:go_default_library", + "//pkg/apis/settings/v1alpha1:go_default_library", + "//pkg/apis/storage/v1:go_default_library", + "//pkg/apis/storage/v1beta1:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme/doc.go new file mode 100644 index 0000000000..5d8ec824f0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme/register.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme/register.go new file mode 100644 index 0000000000..813426e915 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme/register.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + corev1 "k8s.io/kubernetes/pkg/api/v1" + appsv1beta1 "k8s.io/kubernetes/pkg/apis/apps/v1beta1" + authenticationv1 "k8s.io/kubernetes/pkg/apis/authentication/v1" + authenticationv1beta1 "k8s.io/kubernetes/pkg/apis/authentication/v1beta1" + authorizationv1 "k8s.io/kubernetes/pkg/apis/authorization/v1" + authorizationv1beta1 "k8s.io/kubernetes/pkg/apis/authorization/v1beta1" + autoscalingv1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1" + autoscalingv2alpha1 "k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1" + batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1" + batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" + certificatesv1beta1 "k8s.io/kubernetes/pkg/apis/certificates/v1beta1" + extensionsv1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + policyv1beta1 "k8s.io/kubernetes/pkg/apis/policy/v1beta1" + rbacv1alpha1 "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1" + rbacv1beta1 "k8s.io/kubernetes/pkg/apis/rbac/v1beta1" + settingsv1alpha1 "k8s.io/kubernetes/pkg/apis/settings/v1alpha1" + storagev1 "k8s.io/kubernetes/pkg/apis/storage/v1" + storagev1beta1 "k8s.io/kubernetes/pkg/apis/storage/v1beta1" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + AddToScheme(Scheme) +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kuberentes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +func AddToScheme(scheme *runtime.Scheme) { + corev1.AddToScheme(scheme) + appsv1beta1.AddToScheme(scheme) + authenticationv1.AddToScheme(scheme) + authenticationv1beta1.AddToScheme(scheme) + authorizationv1.AddToScheme(scheme) + authorizationv1beta1.AddToScheme(scheme) + autoscalingv1.AddToScheme(scheme) + autoscalingv2alpha1.AddToScheme(scheme) + batchv1.AddToScheme(scheme) + batchv2alpha1.AddToScheme(scheme) + certificatesv1beta1.AddToScheme(scheme) + extensionsv1beta1.AddToScheme(scheme) + policyv1beta1.AddToScheme(scheme) + rbacv1beta1.AddToScheme(scheme) + rbacv1alpha1.AddToScheme(scheme) + settingsv1alpha1.AddToScheme(scheme) + storagev1beta1.AddToScheme(scheme) + storagev1.AddToScheme(scheme) + +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/BUILD new file mode 100644 index 0000000000..77bf54c922 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/BUILD @@ -0,0 +1,46 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "apps_client.go", + "deployment.go", + "doc.go", + "generated_expansion.go", + "scale.go", + "statefulset.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/apps/v1beta1:go_default_library", + "//pkg/client/clientset_generated/clientset/scheme:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/watch", + "//vendor:k8s.io/client-go/rest", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/clientset/typed/apps/v1beta1/fake:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/apps_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/apps_client.go new file mode 100644 index 0000000000..c15b515927 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/apps_client.go @@ -0,0 +1,98 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/apps/v1beta1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +type AppsV1beta1Interface interface { + RESTClient() rest.Interface + DeploymentsGetter + ScalesGetter + StatefulSetsGetter +} + +// AppsV1beta1Client is used to interact with features provided by the apps group. +type AppsV1beta1Client struct { + restClient rest.Interface +} + +func (c *AppsV1beta1Client) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *AppsV1beta1Client) Scales(namespace string) ScaleInterface { + return newScales(c, namespace) +} + +func (c *AppsV1beta1Client) StatefulSets(namespace string) StatefulSetInterface { + return newStatefulSets(c, namespace) +} + +// NewForConfig creates a new AppsV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AppsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AppsV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new AppsV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AppsV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AppsV1beta1Client for the given RESTClient. +func New(c rest.Interface) *AppsV1beta1Client { + return &AppsV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AppsV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/deployment.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/deployment.go new file mode 100644 index 0000000000..9e375f6851 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/deployment.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/apps/v1beta1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// DeploymentsGetter has a method to return a DeploymentInterface. +// A group's client should implement this interface. +type DeploymentsGetter interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + Create(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Update(*v1beta1.Deployment) (*v1beta1.Deployment, error) + UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Deployment, error) + List(opts v1.ListOptions) (*v1beta1.DeploymentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) + DeploymentExpansion +} + +// deployments implements DeploymentInterface +type deployments struct { + client rest.Interface + ns string +} + +// newDeployments returns a Deployments +func newDeployments(c *AppsV1beta1Client, namespace string) *deployments { + return &deployments{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + result = &v1beta1.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched deployment. +func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("deployments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/doc.go new file mode 100644 index 0000000000..11b5238972 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..deca5c8663 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type DeploymentExpansion interface{} + +type ScaleExpansion interface{} + +type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/scale.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/scale.go new file mode 100644 index 0000000000..d3bf9e1032 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/scale.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// ScalesGetter has a method to return a ScaleInterface. +// A group's client should implement this interface. +type ScalesGetter interface { + Scales(namespace string) ScaleInterface +} + +// ScaleInterface has methods to work with Scale resources. +type ScaleInterface interface { + ScaleExpansion +} + +// scales implements ScaleInterface +type scales struct { + client rest.Interface + ns string +} + +// newScales returns a Scales +func newScales(c *AppsV1beta1Client, namespace string) *scales { + return &scales{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/statefulset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/statefulset.go new file mode 100644 index 0000000000..7acff8de04 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/statefulset.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/apps/v1beta1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// StatefulSetsGetter has a method to return a StatefulSetInterface. +// A group's client should implement this interface. +type StatefulSetsGetter interface { + StatefulSets(namespace string) StatefulSetInterface +} + +// StatefulSetInterface has methods to work with StatefulSet resources. +type StatefulSetInterface interface { + Create(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) + Update(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) + UpdateStatus(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.StatefulSet, error) + List(opts v1.ListOptions) (*v1beta1.StatefulSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) + StatefulSetExpansion +} + +// statefulSets implements StatefulSetInterface +type statefulSets struct { + client rest.Interface + ns string +} + +// newStatefulSets returns a StatefulSets +func newStatefulSets(c *AppsV1beta1Client, namespace string) *statefulSets { + return &statefulSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Create(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("statefulsets"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Update(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + Body(statefulSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *statefulSets) UpdateStatus(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + SubResource("status"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. +func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. +func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. +func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { + result = &v1beta1.StatefulSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested statefulSets. +func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched statefulSet. +func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("statefulsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/BUILD new file mode 100644 index 0000000000..0c0cc89923 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/BUILD @@ -0,0 +1,42 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "authentication_client.go", + "doc.go", + "generated_expansion.go", + "tokenreview.go", + "tokenreview_expansion.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/authentication/v1:go_default_library", + "//pkg/client/clientset_generated/clientset/scheme:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + "//vendor:k8s.io/client-go/rest", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/clientset/typed/authentication/v1/fake:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/authentication_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/authentication_client.go new file mode 100644 index 0000000000..beed71ea3c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/authentication_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/apis/authentication/v1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +type AuthenticationV1Interface interface { + RESTClient() rest.Interface + TokenReviewsGetter +} + +// AuthenticationV1Client is used to interact with features provided by the authentication.k8s.io group. +type AuthenticationV1Client struct { + restClient rest.Interface +} + +func (c *AuthenticationV1Client) TokenReviews() TokenReviewInterface { + return newTokenReviews(c) +} + +// NewForConfig creates a new AuthenticationV1Client for the given config. +func NewForConfig(c *rest.Config) (*AuthenticationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuthenticationV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthenticationV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthenticationV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthenticationV1Client for the given RESTClient. +func New(c rest.Interface) *AuthenticationV1Client { + return &AuthenticationV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthenticationV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/doc.go new file mode 100644 index 0000000000..54673bfa73 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/generated_expansion.go new file mode 100644 index 0000000000..42e76d5e43 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/generated_expansion.go @@ -0,0 +1,17 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/tokenreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/tokenreview.go new file mode 100644 index 0000000000..9cfef4e6ac --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/tokenreview.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + rest "k8s.io/client-go/rest" +) + +// TokenReviewsGetter has a method to return a TokenReviewInterface. +// A group's client should implement this interface. +type TokenReviewsGetter interface { + TokenReviews() TokenReviewInterface +} + +// TokenReviewInterface has methods to work with TokenReview resources. +type TokenReviewInterface interface { + TokenReviewExpansion +} + +// tokenReviews implements TokenReviewInterface +type tokenReviews struct { + client rest.Interface +} + +// newTokenReviews returns a TokenReviews +func newTokenReviews(c *AuthenticationV1Client) *tokenReviews { + return &tokenReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/tokenreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/tokenreview_expansion.go new file mode 100644 index 0000000000..012749ec3e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/tokenreview_expansion.go @@ -0,0 +1,35 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authenticationapi "k8s.io/kubernetes/pkg/apis/authentication/v1" +) + +type TokenReviewExpansion interface { + Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) +} + +func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { + result = &authenticationapi.TokenReview{} + err = c.client.Post(). + Resource("tokenreviews"). + Body(tokenReview). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/BUILD new file mode 100644 index 0000000000..24065ef6c9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/BUILD @@ -0,0 +1,42 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "authentication_client.go", + "doc.go", + "generated_expansion.go", + "tokenreview.go", + "tokenreview_expansion.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/authentication/v1beta1:go_default_library", + "//pkg/client/clientset_generated/clientset/scheme:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + "//vendor:k8s.io/client-go/rest", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/fake:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/authentication_client.go new file mode 100644 index 0000000000..b3aa17eb0b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/authentication_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/authentication/v1beta1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +type AuthenticationV1beta1Interface interface { + RESTClient() rest.Interface + TokenReviewsGetter +} + +// AuthenticationV1beta1Client is used to interact with features provided by the authentication.k8s.io group. +type AuthenticationV1beta1Client struct { + restClient rest.Interface +} + +func (c *AuthenticationV1beta1Client) TokenReviews() TokenReviewInterface { + return newTokenReviews(c) +} + +// NewForConfig creates a new AuthenticationV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AuthenticationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuthenticationV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthenticationV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthenticationV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthenticationV1beta1Client for the given RESTClient. +func New(c rest.Interface) *AuthenticationV1beta1Client { + return &AuthenticationV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthenticationV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/doc.go new file mode 100644 index 0000000000..11b5238972 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..2b7e8ca0bf --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/generated_expansion.go @@ -0,0 +1,17 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/tokenreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/tokenreview.go new file mode 100644 index 0000000000..7f9f1e9fa0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/tokenreview.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// TokenReviewsGetter has a method to return a TokenReviewInterface. +// A group's client should implement this interface. +type TokenReviewsGetter interface { + TokenReviews() TokenReviewInterface +} + +// TokenReviewInterface has methods to work with TokenReview resources. +type TokenReviewInterface interface { + TokenReviewExpansion +} + +// tokenReviews implements TokenReviewInterface +type tokenReviews struct { + client rest.Interface +} + +// newTokenReviews returns a TokenReviews +func newTokenReviews(c *AuthenticationV1beta1Client) *tokenReviews { + return &tokenReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/tokenreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/tokenreview_expansion.go new file mode 100644 index 0000000000..35d81d80f4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/tokenreview_expansion.go @@ -0,0 +1,35 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authenticationapi "k8s.io/kubernetes/pkg/apis/authentication/v1beta1" +) + +type TokenReviewExpansion interface { + Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) +} + +func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { + result = &authenticationapi.TokenReview{} + err = c.client.Post(). + Resource("tokenreviews"). + Body(tokenReview). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/BUILD new file mode 100644 index 0000000000..b131c50ba5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/BUILD @@ -0,0 +1,46 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "authorization_client.go", + "doc.go", + "generated_expansion.go", + "localsubjectaccessreview.go", + "localsubjectaccessreview_expansion.go", + "selfsubjectaccessreview.go", + "selfsubjectaccessreview_expansion.go", + "subjectaccessreview.go", + "subjectaccessreview_expansion.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/authorization/v1:go_default_library", + "//pkg/client/clientset_generated/clientset/scheme:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + "//vendor:k8s.io/client-go/rest", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/clientset/typed/authorization/v1/fake:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/authorization_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/authorization_client.go new file mode 100644 index 0000000000..232fdc9594 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/authorization_client.go @@ -0,0 +1,98 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/apis/authorization/v1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +type AuthorizationV1Interface interface { + RESTClient() rest.Interface + LocalSubjectAccessReviewsGetter + SelfSubjectAccessReviewsGetter + SubjectAccessReviewsGetter +} + +// AuthorizationV1Client is used to interact with features provided by the authorization.k8s.io group. +type AuthorizationV1Client struct { + restClient rest.Interface +} + +func (c *AuthorizationV1Client) LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface { + return newLocalSubjectAccessReviews(c, namespace) +} + +func (c *AuthorizationV1Client) SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface { + return newSelfSubjectAccessReviews(c) +} + +func (c *AuthorizationV1Client) SubjectAccessReviews() SubjectAccessReviewInterface { + return newSubjectAccessReviews(c) +} + +// NewForConfig creates a new AuthorizationV1Client for the given config. +func NewForConfig(c *rest.Config) (*AuthorizationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuthorizationV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthorizationV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthorizationV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthorizationV1Client for the given RESTClient. +func New(c rest.Interface) *AuthorizationV1Client { + return &AuthorizationV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthorizationV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/doc.go new file mode 100644 index 0000000000..54673bfa73 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/generated_expansion.go new file mode 100644 index 0000000000..42e76d5e43 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/generated_expansion.go @@ -0,0 +1,17 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/localsubjectaccessreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/localsubjectaccessreview.go new file mode 100644 index 0000000000..b2085bceb2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/localsubjectaccessreview.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + rest "k8s.io/client-go/rest" +) + +// LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. +// A group's client should implement this interface. +type LocalSubjectAccessReviewsGetter interface { + LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface +} + +// LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. +type LocalSubjectAccessReviewInterface interface { + LocalSubjectAccessReviewExpansion +} + +// localSubjectAccessReviews implements LocalSubjectAccessReviewInterface +type localSubjectAccessReviews struct { + client rest.Interface + ns string +} + +// newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews +func newLocalSubjectAccessReviews(c *AuthorizationV1Client, namespace string) *localSubjectAccessReviews { + return &localSubjectAccessReviews{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/localsubjectaccessreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/localsubjectaccessreview_expansion.go new file mode 100644 index 0000000000..13dd9ff3fd --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/localsubjectaccessreview_expansion.go @@ -0,0 +1,36 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authorizationapi "k8s.io/kubernetes/pkg/apis/authorization/v1" +) + +type LocalSubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) +} + +func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { + result = &authorizationapi.LocalSubjectAccessReview{} + err = c.client.Post(). + Namespace(c.ns). + Resource("localsubjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/selfsubjectaccessreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/selfsubjectaccessreview.go new file mode 100644 index 0000000000..cfb019eaaf --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/selfsubjectaccessreview.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. +// A group's client should implement this interface. +type SelfSubjectAccessReviewsGetter interface { + SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface +} + +// SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. +type SelfSubjectAccessReviewInterface interface { + SelfSubjectAccessReviewExpansion +} + +// selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface +type selfSubjectAccessReviews struct { + client rest.Interface +} + +// newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews +func newSelfSubjectAccessReviews(c *AuthorizationV1Client) *selfSubjectAccessReviews { + return &selfSubjectAccessReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/selfsubjectaccessreview_expansion.go new file mode 100644 index 0000000000..698d8eef43 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/selfsubjectaccessreview_expansion.go @@ -0,0 +1,35 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authorizationapi "k8s.io/kubernetes/pkg/apis/authorization/v1" +) + +type SelfSubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) +} + +func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { + result = &authorizationapi.SelfSubjectAccessReview{} + err = c.client.Post(). + Resource("selfsubjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/subjectaccessreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/subjectaccessreview.go new file mode 100644 index 0000000000..08f6d60952 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/subjectaccessreview.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. +// A group's client should implement this interface. +type SubjectAccessReviewsGetter interface { + SubjectAccessReviews() SubjectAccessReviewInterface +} + +// SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. +type SubjectAccessReviewInterface interface { + SubjectAccessReviewExpansion +} + +// subjectAccessReviews implements SubjectAccessReviewInterface +type subjectAccessReviews struct { + client rest.Interface +} + +// newSubjectAccessReviews returns a SubjectAccessReviews +func newSubjectAccessReviews(c *AuthorizationV1Client) *subjectAccessReviews { + return &subjectAccessReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/subjectaccessreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/subjectaccessreview_expansion.go new file mode 100644 index 0000000000..eb1db89816 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/subjectaccessreview_expansion.go @@ -0,0 +1,36 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authorizationapi "k8s.io/kubernetes/pkg/apis/authorization/v1" +) + +// The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface. +type SubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) +} + +func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { + result = &authorizationapi.SubjectAccessReview{} + err = c.client.Post(). + Resource("subjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/BUILD new file mode 100644 index 0000000000..e9d7293df6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/BUILD @@ -0,0 +1,46 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "authorization_client.go", + "doc.go", + "generated_expansion.go", + "localsubjectaccessreview.go", + "localsubjectaccessreview_expansion.go", + "selfsubjectaccessreview.go", + "selfsubjectaccessreview_expansion.go", + "subjectaccessreview.go", + "subjectaccessreview_expansion.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/authorization/v1beta1:go_default_library", + "//pkg/client/clientset_generated/clientset/scheme:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + "//vendor:k8s.io/client-go/rest", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/fake:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/authorization_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/authorization_client.go new file mode 100644 index 0000000000..3801cdc75d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/authorization_client.go @@ -0,0 +1,98 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/authorization/v1beta1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +type AuthorizationV1beta1Interface interface { + RESTClient() rest.Interface + LocalSubjectAccessReviewsGetter + SelfSubjectAccessReviewsGetter + SubjectAccessReviewsGetter +} + +// AuthorizationV1beta1Client is used to interact with features provided by the authorization.k8s.io group. +type AuthorizationV1beta1Client struct { + restClient rest.Interface +} + +func (c *AuthorizationV1beta1Client) LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface { + return newLocalSubjectAccessReviews(c, namespace) +} + +func (c *AuthorizationV1beta1Client) SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface { + return newSelfSubjectAccessReviews(c) +} + +func (c *AuthorizationV1beta1Client) SubjectAccessReviews() SubjectAccessReviewInterface { + return newSubjectAccessReviews(c) +} + +// NewForConfig creates a new AuthorizationV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AuthorizationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuthorizationV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthorizationV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthorizationV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthorizationV1beta1Client for the given RESTClient. +func New(c rest.Interface) *AuthorizationV1beta1Client { + return &AuthorizationV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthorizationV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/doc.go new file mode 100644 index 0000000000..11b5238972 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..2b7e8ca0bf --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/generated_expansion.go @@ -0,0 +1,17 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/localsubjectaccessreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/localsubjectaccessreview.go new file mode 100644 index 0000000000..9b8e103419 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/localsubjectaccessreview.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. +// A group's client should implement this interface. +type LocalSubjectAccessReviewsGetter interface { + LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface +} + +// LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. +type LocalSubjectAccessReviewInterface interface { + LocalSubjectAccessReviewExpansion +} + +// localSubjectAccessReviews implements LocalSubjectAccessReviewInterface +type localSubjectAccessReviews struct { + client rest.Interface + ns string +} + +// newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews +func newLocalSubjectAccessReviews(c *AuthorizationV1beta1Client, namespace string) *localSubjectAccessReviews { + return &localSubjectAccessReviews{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go new file mode 100644 index 0000000000..3a3c64c1b8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go @@ -0,0 +1,36 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authorizationapi "k8s.io/kubernetes/pkg/apis/authorization/v1beta1" +) + +type LocalSubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) +} + +func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { + result = &authorizationapi.LocalSubjectAccessReview{} + err = c.client.Post(). + Namespace(c.ns). + Resource("localsubjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/selfsubjectaccessreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/selfsubjectaccessreview.go new file mode 100644 index 0000000000..1ef3e49afe --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/selfsubjectaccessreview.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. +// A group's client should implement this interface. +type SelfSubjectAccessReviewsGetter interface { + SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface +} + +// SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. +type SelfSubjectAccessReviewInterface interface { + SelfSubjectAccessReviewExpansion +} + +// selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface +type selfSubjectAccessReviews struct { + client rest.Interface +} + +// newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews +func newSelfSubjectAccessReviews(c *AuthorizationV1beta1Client) *selfSubjectAccessReviews { + return &selfSubjectAccessReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go new file mode 100644 index 0000000000..a64678243e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go @@ -0,0 +1,35 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authorizationapi "k8s.io/kubernetes/pkg/apis/authorization/v1beta1" +) + +type SelfSubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) +} + +func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { + result = &authorizationapi.SelfSubjectAccessReview{} + err = c.client.Post(). + Resource("selfsubjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/subjectaccessreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/subjectaccessreview.go new file mode 100644 index 0000000000..cd60e9df6b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/subjectaccessreview.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. +// A group's client should implement this interface. +type SubjectAccessReviewsGetter interface { + SubjectAccessReviews() SubjectAccessReviewInterface +} + +// SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. +type SubjectAccessReviewInterface interface { + SubjectAccessReviewExpansion +} + +// subjectAccessReviews implements SubjectAccessReviewInterface +type subjectAccessReviews struct { + client rest.Interface +} + +// newSubjectAccessReviews returns a SubjectAccessReviews +func newSubjectAccessReviews(c *AuthorizationV1beta1Client) *subjectAccessReviews { + return &subjectAccessReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/subjectaccessreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/subjectaccessreview_expansion.go new file mode 100644 index 0000000000..bf57a7b07d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/subjectaccessreview_expansion.go @@ -0,0 +1,36 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authorizationapi "k8s.io/kubernetes/pkg/apis/authorization/v1beta1" +) + +// The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface. +type SubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) +} + +func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { + result = &authorizationapi.SubjectAccessReview{} + err = c.client.Post(). + Resource("subjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/BUILD new file mode 100644 index 0000000000..1bd2c93c60 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/BUILD @@ -0,0 +1,44 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "autoscaling_client.go", + "doc.go", + "generated_expansion.go", + "horizontalpodautoscaler.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/autoscaling/v1:go_default_library", + "//pkg/client/clientset_generated/clientset/scheme:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/watch", + "//vendor:k8s.io/client-go/rest", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/clientset/typed/autoscaling/v1/fake:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/autoscaling_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/autoscaling_client.go new file mode 100644 index 0000000000..7ea617b8a1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/autoscaling_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +type AutoscalingV1Interface interface { + RESTClient() rest.Interface + HorizontalPodAutoscalersGetter +} + +// AutoscalingV1Client is used to interact with features provided by the autoscaling group. +type AutoscalingV1Client struct { + restClient rest.Interface +} + +func (c *AutoscalingV1Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { + return newHorizontalPodAutoscalers(c, namespace) +} + +// NewForConfig creates a new AutoscalingV1Client for the given config. +func NewForConfig(c *rest.Config) (*AutoscalingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AutoscalingV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AutoscalingV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AutoscalingV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AutoscalingV1Client for the given RESTClient. +func New(c rest.Interface) *AutoscalingV1Client { + return &AutoscalingV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AutoscalingV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/doc.go new file mode 100644 index 0000000000..54673bfa73 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/generated_expansion.go new file mode 100644 index 0000000000..effefbd50b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/horizontalpodautoscaler.go new file mode 100644 index 0000000000..1651dbf191 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/horizontalpodautoscaler.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. +// A group's client should implement this interface. +type HorizontalPodAutoscalersGetter interface { + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface +} + +// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. +type HorizontalPodAutoscalerInterface interface { + Create(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) + Update(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) + UpdateStatus(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.HorizontalPodAutoscaler, error) + List(opts meta_v1.ListOptions) (*v1.HorizontalPodAutoscalerList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) + HorizontalPodAutoscalerExpansion +} + +// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type horizontalPodAutoscalers struct { + client rest.Interface + ns string +} + +// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers +func newHorizontalPodAutoscalers(c *AutoscalingV1Client, namespace string) *horizontalPodAutoscalers { + return &horizontalPodAutoscalers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Post(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + SubResource("status"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *horizontalPodAutoscalers) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *horizontalPodAutoscalers) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *horizontalPodAutoscalers) Get(name string, options meta_v1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *horizontalPodAutoscalers) List(opts meta_v1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { + result = &v1.HorizontalPodAutoscalerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *horizontalPodAutoscalers) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched horizontalPodAutoscaler. +func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/BUILD new file mode 100644 index 0000000000..1e89dfe12d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/BUILD @@ -0,0 +1,44 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "autoscaling_client.go", + "doc.go", + "generated_expansion.go", + "horizontalpodautoscaler.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/autoscaling/v2alpha1:go_default_library", + "//pkg/client/clientset_generated/clientset/scheme:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/watch", + "//vendor:k8s.io/client-go/rest", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/fake:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/autoscaling_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/autoscaling_client.go new file mode 100644 index 0000000000..90ce24f306 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/autoscaling_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + v2alpha1 "k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +type AutoscalingV2alpha1Interface interface { + RESTClient() rest.Interface + HorizontalPodAutoscalersGetter +} + +// AutoscalingV2alpha1Client is used to interact with features provided by the autoscaling group. +type AutoscalingV2alpha1Client struct { + restClient rest.Interface +} + +func (c *AutoscalingV2alpha1Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { + return newHorizontalPodAutoscalers(c, namespace) +} + +// NewForConfig creates a new AutoscalingV2alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*AutoscalingV2alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AutoscalingV2alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new AutoscalingV2alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AutoscalingV2alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AutoscalingV2alpha1Client for the given RESTClient. +func New(c rest.Interface) *AutoscalingV2alpha1Client { + return &AutoscalingV2alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v2alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AutoscalingV2alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/doc.go new file mode 100644 index 0000000000..d29bd3f4e1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v2alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/generated_expansion.go new file mode 100644 index 0000000000..e40f2c5a13 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/horizontalpodautoscaler.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/horizontalpodautoscaler.go new file mode 100644 index 0000000000..d359517e9f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/horizontalpodautoscaler.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v2alpha1 "k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. +// A group's client should implement this interface. +type HorizontalPodAutoscalersGetter interface { + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface +} + +// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. +type HorizontalPodAutoscalerInterface interface { + Create(*v2alpha1.HorizontalPodAutoscaler) (*v2alpha1.HorizontalPodAutoscaler, error) + Update(*v2alpha1.HorizontalPodAutoscaler) (*v2alpha1.HorizontalPodAutoscaler, error) + UpdateStatus(*v2alpha1.HorizontalPodAutoscaler) (*v2alpha1.HorizontalPodAutoscaler, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v2alpha1.HorizontalPodAutoscaler, error) + List(opts v1.ListOptions) (*v2alpha1.HorizontalPodAutoscalerList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.HorizontalPodAutoscaler, err error) + HorizontalPodAutoscalerExpansion +} + +// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type horizontalPodAutoscalers struct { + client rest.Interface + ns string +} + +// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers +func newHorizontalPodAutoscalers(c *AutoscalingV2alpha1Client, namespace string) *horizontalPodAutoscalers { + return &horizontalPodAutoscalers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2alpha1.HorizontalPodAutoscaler) (result *v2alpha1.HorizontalPodAutoscaler, err error) { + result = &v2alpha1.HorizontalPodAutoscaler{} + err = c.client.Post(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2alpha1.HorizontalPodAutoscaler) (result *v2alpha1.HorizontalPodAutoscaler, err error) { + result = &v2alpha1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2alpha1.HorizontalPodAutoscaler) (result *v2alpha1.HorizontalPodAutoscaler, err error) { + result = &v2alpha1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + SubResource("status"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2alpha1.HorizontalPodAutoscaler, err error) { + result = &v2alpha1.HorizontalPodAutoscaler{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2alpha1.HorizontalPodAutoscalerList, err error) { + result = &v2alpha1.HorizontalPodAutoscalerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched horizontalPodAutoscaler. +func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.HorizontalPodAutoscaler, err error) { + result = &v2alpha1.HorizontalPodAutoscaler{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/BUILD new file mode 100644 index 0000000000..6c41977996 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/BUILD @@ -0,0 +1,44 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "batch_client.go", + "doc.go", + "generated_expansion.go", + "job.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/batch/v1:go_default_library", + "//pkg/client/clientset_generated/clientset/scheme:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/watch", + "//vendor:k8s.io/client-go/rest", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/clientset/typed/batch/v1/fake:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/batch_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/batch_client.go new file mode 100644 index 0000000000..a269f00631 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/batch_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/apis/batch/v1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +type BatchV1Interface interface { + RESTClient() rest.Interface + JobsGetter +} + +// BatchV1Client is used to interact with features provided by the batch group. +type BatchV1Client struct { + restClient rest.Interface +} + +func (c *BatchV1Client) Jobs(namespace string) JobInterface { + return newJobs(c, namespace) +} + +// NewForConfig creates a new BatchV1Client for the given config. +func NewForConfig(c *rest.Config) (*BatchV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &BatchV1Client{client}, nil +} + +// NewForConfigOrDie creates a new BatchV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *BatchV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new BatchV1Client for the given RESTClient. +func New(c rest.Interface) *BatchV1Client { + return &BatchV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *BatchV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/doc.go new file mode 100644 index 0000000000..54673bfa73 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/generated_expansion.go new file mode 100644 index 0000000000..68d7741fa0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +type JobExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/job.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/job.go new file mode 100644 index 0000000000..63b223ce4b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/job.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/apis/batch/v1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// JobsGetter has a method to return a JobInterface. +// A group's client should implement this interface. +type JobsGetter interface { + Jobs(namespace string) JobInterface +} + +// JobInterface has methods to work with Job resources. +type JobInterface interface { + Create(*v1.Job) (*v1.Job, error) + Update(*v1.Job) (*v1.Job, error) + UpdateStatus(*v1.Job) (*v1.Job, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Job, error) + List(opts meta_v1.ListOptions) (*v1.JobList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) + JobExpansion +} + +// jobs implements JobInterface +type jobs struct { + client rest.Interface + ns string +} + +// newJobs returns a Jobs +func newJobs(c *BatchV1Client, namespace string) *jobs { + return &jobs{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Create(job *v1.Job) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Post(). + Namespace(c.ns). + Resource("jobs"). + Body(job). + Do(). + Into(result) + return +} + +// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Update(job *v1.Job) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + Body(job). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *jobs) UpdateStatus(job *v1.Job) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + SubResource("status"). + Body(job). + Do(). + Into(result) + return +} + +// Delete takes name of the job and deletes it. Returns an error if one occurs. +func (c *jobs) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *jobs) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the job, and returns the corresponding job object, and an error if there is any. +func (c *jobs) Get(name string, options meta_v1.GetOptions) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Jobs that match those selectors. +func (c *jobs) List(opts meta_v1.ListOptions) (result *v1.JobList, err error) { + result = &v1.JobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested jobs. +func (c *jobs) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched job. +func (c *jobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("jobs"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/BUILD new file mode 100644 index 0000000000..3526694b7a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/BUILD @@ -0,0 +1,44 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "batch_client.go", + "cronjob.go", + "doc.go", + "generated_expansion.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/batch/v2alpha1:go_default_library", + "//pkg/client/clientset_generated/clientset/scheme:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/watch", + "//vendor:k8s.io/client-go/rest", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/fake:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/batch_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/batch_client.go new file mode 100644 index 0000000000..bd0e365e4e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/batch_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + v2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +type BatchV2alpha1Interface interface { + RESTClient() rest.Interface + CronJobsGetter +} + +// BatchV2alpha1Client is used to interact with features provided by the batch group. +type BatchV2alpha1Client struct { + restClient rest.Interface +} + +func (c *BatchV2alpha1Client) CronJobs(namespace string) CronJobInterface { + return newCronJobs(c, namespace) +} + +// NewForConfig creates a new BatchV2alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*BatchV2alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &BatchV2alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new BatchV2alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *BatchV2alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new BatchV2alpha1Client for the given RESTClient. +func New(c rest.Interface) *BatchV2alpha1Client { + return &BatchV2alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v2alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *BatchV2alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/cronjob.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/cronjob.go new file mode 100644 index 0000000000..bcf2b75f70 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/cronjob.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// CronJobsGetter has a method to return a CronJobInterface. +// A group's client should implement this interface. +type CronJobsGetter interface { + CronJobs(namespace string) CronJobInterface +} + +// CronJobInterface has methods to work with CronJob resources. +type CronJobInterface interface { + Create(*v2alpha1.CronJob) (*v2alpha1.CronJob, error) + Update(*v2alpha1.CronJob) (*v2alpha1.CronJob, error) + UpdateStatus(*v2alpha1.CronJob) (*v2alpha1.CronJob, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v2alpha1.CronJob, error) + List(opts v1.ListOptions) (*v2alpha1.CronJobList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) + CronJobExpansion +} + +// cronJobs implements CronJobInterface +type cronJobs struct { + client rest.Interface + ns string +} + +// newCronJobs returns a CronJobs +func newCronJobs(c *BatchV2alpha1Client, namespace string) *cronJobs { + return &cronJobs{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. +func (c *cronJobs) Create(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cronjobs"). + Body(cronJob). + Do(). + Into(result) + return +} + +// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. +func (c *cronJobs) Update(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cronjobs"). + Name(cronJob.Name). + Body(cronJob). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *cronJobs) UpdateStatus(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cronjobs"). + Name(cronJob.Name). + SubResource("status"). + Body(cronJob). + Do(). + Into(result) + return +} + +// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. +func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cronjobs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. +func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CronJobs that match those selectors. +func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) { + result = &v2alpha1.CronJobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cronJobs. +func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched cronJob. +func (c *cronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cronjobs"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/doc.go new file mode 100644 index 0000000000..d29bd3f4e1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v2alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/generated_expansion.go new file mode 100644 index 0000000000..078027ef49 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +type CronJobExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/BUILD new file mode 100644 index 0000000000..89804d8a98 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/BUILD @@ -0,0 +1,45 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "certificates_client.go", + "certificatesigningrequest.go", + "certificatesigningrequest_expansion.go", + "doc.go", + "generated_expansion.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/certificates/v1beta1:go_default_library", + "//pkg/client/clientset_generated/clientset/scheme:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/watch", + "//vendor:k8s.io/client-go/rest", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/fake:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/certificates_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/certificates_client.go new file mode 100644 index 0000000000..15151f5179 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/certificates_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/certificates/v1beta1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +type CertificatesV1beta1Interface interface { + RESTClient() rest.Interface + CertificateSigningRequestsGetter +} + +// CertificatesV1beta1Client is used to interact with features provided by the certificates.k8s.io group. +type CertificatesV1beta1Client struct { + restClient rest.Interface +} + +func (c *CertificatesV1beta1Client) CertificateSigningRequests() CertificateSigningRequestInterface { + return newCertificateSigningRequests(c) +} + +// NewForConfig creates a new CertificatesV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*CertificatesV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CertificatesV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new CertificatesV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CertificatesV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CertificatesV1beta1Client for the given RESTClient. +func New(c rest.Interface) *CertificatesV1beta1Client { + return &CertificatesV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CertificatesV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/certificatesigningrequest.go new file mode 100644 index 0000000000..e30b929eaa --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/certificatesigningrequest.go @@ -0,0 +1,161 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/certificates/v1beta1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// CertificateSigningRequestsGetter has a method to return a CertificateSigningRequestInterface. +// A group's client should implement this interface. +type CertificateSigningRequestsGetter interface { + CertificateSigningRequests() CertificateSigningRequestInterface +} + +// CertificateSigningRequestInterface has methods to work with CertificateSigningRequest resources. +type CertificateSigningRequestInterface interface { + Create(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) + Update(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) + UpdateStatus(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.CertificateSigningRequest, error) + List(opts v1.ListOptions) (*v1beta1.CertificateSigningRequestList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) + CertificateSigningRequestExpansion +} + +// certificateSigningRequests implements CertificateSigningRequestInterface +type certificateSigningRequests struct { + client rest.Interface +} + +// newCertificateSigningRequests returns a CertificateSigningRequests +func newCertificateSigningRequests(c *CertificatesV1beta1Client) *certificateSigningRequests { + return &certificateSigningRequests{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. +func (c *certificateSigningRequests) Create(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Post(). + Resource("certificatesigningrequests"). + Body(certificateSigningRequest). + Do(). + Into(result) + return +} + +// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. +func (c *certificateSigningRequests) Update(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Put(). + Resource("certificatesigningrequests"). + Name(certificateSigningRequest.Name). + Body(certificateSigningRequest). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *certificateSigningRequests) UpdateStatus(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Put(). + Resource("certificatesigningrequests"). + Name(certificateSigningRequest.Name). + SubResource("status"). + Body(certificateSigningRequest). + Do(). + Into(result) + return +} + +// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. +func (c *certificateSigningRequests) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("certificatesigningrequests"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *certificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("certificatesigningrequests"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. +func (c *certificateSigningRequests) Get(name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Get(). + Resource("certificatesigningrequests"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. +func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { + result = &v1beta1.CertificateSigningRequestList{} + err = c.client.Get(). + Resource("certificatesigningrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested certificateSigningRequests. +func (c *certificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("certificatesigningrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched certificateSigningRequest. +func (c *certificateSigningRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Patch(pt). + Resource("certificatesigningrequests"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/certificatesigningrequest_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/certificatesigningrequest_expansion.go new file mode 100644 index 0000000000..e7db459866 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/certificatesigningrequest_expansion.go @@ -0,0 +1,37 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + certificates "k8s.io/kubernetes/pkg/apis/certificates/v1beta1" +) + +type CertificateSigningRequestExpansion interface { + UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) +} + +func (c *certificateSigningRequests) UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) { + result = &certificates.CertificateSigningRequest{} + err = c.client.Put(). + Resource("certificatesigningrequests"). + Name(certificateSigningRequest.Name). + Body(certificateSigningRequest). + SubResource("approval"). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/doc.go new file mode 100644 index 0000000000..11b5238972 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..2b7e8ca0bf --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/generated_expansion.go @@ -0,0 +1,17 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/BUILD new file mode 100644 index 0000000000..bbac9ef302 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/BUILD @@ -0,0 +1,69 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "componentstatus.go", + "configmap.go", + "core_client.go", + "doc.go", + "endpoints.go", + "event.go", + "event_expansion.go", + "generated_expansion.go", + "limitrange.go", + "namespace.go", + "namespace_expansion.go", + "node.go", + "node_expansion.go", + "persistentvolume.go", + "persistentvolumeclaim.go", + "pod.go", + "pod_expansion.go", + "podtemplate.go", + "replicationcontroller.go", + "resourcequota.go", + "secret.go", + "service.go", + "service_expansion.go", + "serviceaccount.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/api/v1:go_default_library", + "//pkg/apis/policy/v1beta1:go_default_library", + "//pkg/client/clientset_generated/clientset/scheme:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/fields", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/util/net", + "//vendor:k8s.io/apimachinery/pkg/watch", + "//vendor:k8s.io/client-go/rest", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/clientset/typed/core/v1/fake:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/componentstatus.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/componentstatus.go new file mode 100644 index 0000000000..1e4d6e0fe8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/componentstatus.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/api/v1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// ComponentStatusesGetter has a method to return a ComponentStatusInterface. +// A group's client should implement this interface. +type ComponentStatusesGetter interface { + ComponentStatuses() ComponentStatusInterface +} + +// ComponentStatusInterface has methods to work with ComponentStatus resources. +type ComponentStatusInterface interface { + Create(*v1.ComponentStatus) (*v1.ComponentStatus, error) + Update(*v1.ComponentStatus) (*v1.ComponentStatus, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ComponentStatus, error) + List(opts meta_v1.ListOptions) (*v1.ComponentStatusList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error) + ComponentStatusExpansion +} + +// componentStatuses implements ComponentStatusInterface +type componentStatuses struct { + client rest.Interface +} + +// newComponentStatuses returns a ComponentStatuses +func newComponentStatuses(c *CoreV1Client) *componentStatuses { + return &componentStatuses{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. +func (c *componentStatuses) Create(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Post(). + Resource("componentstatuses"). + Body(componentStatus). + Do(). + Into(result) + return +} + +// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. +func (c *componentStatuses) Update(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Put(). + Resource("componentstatuses"). + Name(componentStatus.Name). + Body(componentStatus). + Do(). + Into(result) + return +} + +// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. +func (c *componentStatuses) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("componentstatuses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *componentStatuses) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Resource("componentstatuses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. +func (c *componentStatuses) Get(name string, options meta_v1.GetOptions) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Get(). + Resource("componentstatuses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. +func (c *componentStatuses) List(opts meta_v1.ListOptions) (result *v1.ComponentStatusList, err error) { + result = &v1.ComponentStatusList{} + err = c.client.Get(). + Resource("componentstatuses"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested componentStatuses. +func (c *componentStatuses) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("componentstatuses"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched componentStatus. +func (c *componentStatuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Patch(pt). + Resource("componentstatuses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/configmap.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/configmap.go new file mode 100644 index 0000000000..7175491288 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/configmap.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/api/v1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// ConfigMapsGetter has a method to return a ConfigMapInterface. +// A group's client should implement this interface. +type ConfigMapsGetter interface { + ConfigMaps(namespace string) ConfigMapInterface +} + +// ConfigMapInterface has methods to work with ConfigMap resources. +type ConfigMapInterface interface { + Create(*v1.ConfigMap) (*v1.ConfigMap, error) + Update(*v1.ConfigMap) (*v1.ConfigMap, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ConfigMap, error) + List(opts meta_v1.ListOptions) (*v1.ConfigMapList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) + ConfigMapExpansion +} + +// configMaps implements ConfigMapInterface +type configMaps struct { + client rest.Interface + ns string +} + +// newConfigMaps returns a ConfigMaps +func newConfigMaps(c *CoreV1Client, namespace string) *configMaps { + return &configMaps{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. +func (c *configMaps) Create(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Post(). + Namespace(c.ns). + Resource("configmaps"). + Body(configMap). + Do(). + Into(result) + return +} + +// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. +func (c *configMaps) Update(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Put(). + Namespace(c.ns). + Resource("configmaps"). + Name(configMap.Name). + Body(configMap). + Do(). + Into(result) + return +} + +// Delete takes name of the configMap and deletes it. Returns an error if one occurs. +func (c *configMaps) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("configmaps"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *configMaps) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. +func (c *configMaps) Get(name string, options meta_v1.GetOptions) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. +func (c *configMaps) List(opts meta_v1.ListOptions) (result *v1.ConfigMapList, err error) { + result = &v1.ConfigMapList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested configMaps. +func (c *configMaps) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched configMap. +func (c *configMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("configmaps"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/core_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/core_client.go new file mode 100644 index 0000000000..6fdcaf128c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/core_client.go @@ -0,0 +1,163 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +type CoreV1Interface interface { + RESTClient() rest.Interface + ComponentStatusesGetter + ConfigMapsGetter + EndpointsGetter + EventsGetter + LimitRangesGetter + NamespacesGetter + NodesGetter + PersistentVolumesGetter + PersistentVolumeClaimsGetter + PodsGetter + PodTemplatesGetter + ReplicationControllersGetter + ResourceQuotasGetter + SecretsGetter + ServicesGetter + ServiceAccountsGetter +} + +// CoreV1Client is used to interact with features provided by the group. +type CoreV1Client struct { + restClient rest.Interface +} + +func (c *CoreV1Client) ComponentStatuses() ComponentStatusInterface { + return newComponentStatuses(c) +} + +func (c *CoreV1Client) ConfigMaps(namespace string) ConfigMapInterface { + return newConfigMaps(c, namespace) +} + +func (c *CoreV1Client) Endpoints(namespace string) EndpointsInterface { + return newEndpoints(c, namespace) +} + +func (c *CoreV1Client) Events(namespace string) EventInterface { + return newEvents(c, namespace) +} + +func (c *CoreV1Client) LimitRanges(namespace string) LimitRangeInterface { + return newLimitRanges(c, namespace) +} + +func (c *CoreV1Client) Namespaces() NamespaceInterface { + return newNamespaces(c) +} + +func (c *CoreV1Client) Nodes() NodeInterface { + return newNodes(c) +} + +func (c *CoreV1Client) PersistentVolumes() PersistentVolumeInterface { + return newPersistentVolumes(c) +} + +func (c *CoreV1Client) PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface { + return newPersistentVolumeClaims(c, namespace) +} + +func (c *CoreV1Client) Pods(namespace string) PodInterface { + return newPods(c, namespace) +} + +func (c *CoreV1Client) PodTemplates(namespace string) PodTemplateInterface { + return newPodTemplates(c, namespace) +} + +func (c *CoreV1Client) ReplicationControllers(namespace string) ReplicationControllerInterface { + return newReplicationControllers(c, namespace) +} + +func (c *CoreV1Client) ResourceQuotas(namespace string) ResourceQuotaInterface { + return newResourceQuotas(c, namespace) +} + +func (c *CoreV1Client) Secrets(namespace string) SecretInterface { + return newSecrets(c, namespace) +} + +func (c *CoreV1Client) Services(namespace string) ServiceInterface { + return newServices(c, namespace) +} + +func (c *CoreV1Client) ServiceAccounts(namespace string) ServiceAccountInterface { + return newServiceAccounts(c, namespace) +} + +// NewForConfig creates a new CoreV1Client for the given config. +func NewForConfig(c *rest.Config) (*CoreV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreV1Client{client}, nil +} + +// NewForConfigOrDie creates a new CoreV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CoreV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreV1Client for the given RESTClient. +func New(c rest.Interface) *CoreV1Client { + return &CoreV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/api" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/doc.go new file mode 100644 index 0000000000..54673bfa73 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/endpoints.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/endpoints.go new file mode 100644 index 0000000000..8b06ed802e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/endpoints.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/api/v1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// EndpointsGetter has a method to return a EndpointsInterface. +// A group's client should implement this interface. +type EndpointsGetter interface { + Endpoints(namespace string) EndpointsInterface +} + +// EndpointsInterface has methods to work with Endpoints resources. +type EndpointsInterface interface { + Create(*v1.Endpoints) (*v1.Endpoints, error) + Update(*v1.Endpoints) (*v1.Endpoints, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Endpoints, error) + List(opts meta_v1.ListOptions) (*v1.EndpointsList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) + EndpointsExpansion +} + +// endpoints implements EndpointsInterface +type endpoints struct { + client rest.Interface + ns string +} + +// newEndpoints returns a Endpoints +func newEndpoints(c *CoreV1Client, namespace string) *endpoints { + return &endpoints{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *endpoints) Create(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Post(). + Namespace(c.ns). + Resource("endpoints"). + Body(endpoints). + Do(). + Into(result) + return +} + +// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *endpoints) Update(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Put(). + Namespace(c.ns). + Resource("endpoints"). + Name(endpoints.Name). + Body(endpoints). + Do(). + Into(result) + return +} + +// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. +func (c *endpoints) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpoints"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *endpoints) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. +func (c *endpoints) Get(name string, options meta_v1.GetOptions) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Endpoints that match those selectors. +func (c *endpoints) List(opts meta_v1.ListOptions) (result *v1.EndpointsList, err error) { + result = &v1.EndpointsList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested endpoints. +func (c *endpoints) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched endpoints. +func (c *endpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("endpoints"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/event.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/event.go new file mode 100644 index 0000000000..72544caea8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/event.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/api/v1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// EventsGetter has a method to return a EventInterface. +// A group's client should implement this interface. +type EventsGetter interface { + Events(namespace string) EventInterface +} + +// EventInterface has methods to work with Event resources. +type EventInterface interface { + Create(*v1.Event) (*v1.Event, error) + Update(*v1.Event) (*v1.Event, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Event, error) + List(opts meta_v1.ListOptions) (*v1.EventList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) + EventExpansion +} + +// events implements EventInterface +type events struct { + client rest.Interface + ns string +} + +// newEvents returns a Events +func newEvents(c *CoreV1Client, namespace string) *events { + return &events{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Create(event *v1.Event) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Post(). + Namespace(c.ns). + Resource("events"). + Body(event). + Do(). + Into(result) + return +} + +// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Update(event *v1.Event) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Put(). + Namespace(c.ns). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return +} + +// Delete takes name of the event and deletes it. Returns an error if one occurs. +func (c *events) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *events) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the event, and returns the corresponding event object, and an error if there is any. +func (c *events) Get(name string, options meta_v1.GetOptions) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Events that match those selectors. +func (c *events) List(opts meta_v1.ListOptions) (result *v1.EventList, err error) { + result = &v1.EventList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested events. +func (c *events) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched event. +func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("events"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/event_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/event_expansion.go new file mode 100644 index 0000000000..5182e0768f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/event_expansion.go @@ -0,0 +1,163 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/api/v1" +) + +// The EventExpansion interface allows manually adding extra methods to the EventInterface. +type EventExpansion interface { + // CreateWithEventNamespace is the same as a Create, except that it sends the request to the event.Namespace. + CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) + // UpdateWithEventNamespace is the same as a Update, except that it sends the request to the event.Namespace. + UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) + PatchWithEventNamespace(event *v1.Event, data []byte) (*v1.Event, error) + // Search finds events about the specified object + Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) + // Returns the appropriate field selector based on the API version being used to communicate with the server. + // The returned field selector can be used with List and Watch to filter desired events. + GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector +} + +// CreateWithEventNamespace makes a new event. Returns the copy of the event the server returns, +// or an error. The namespace to create the event within is deduced from the +// event; it must either match this event client's namespace, or this event +// client must have been created with the "" namespace. +func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + if e.ns != "" && event.Namespace != e.ns { + return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + } + result := &v1.Event{} + err := e.client.Post(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Body(event). + Do(). + Into(result) + return result, err +} + +// UpdateWithEventNamespace modifies an existing event. It returns the copy of the event that the server returns, +// or an error. The namespace and key to update the event within is deduced from the event. The +// namespace must either match this event client's namespace, or this event client must have been +// created with the "" namespace. Update also requires the ResourceVersion to be set in the event +// object. +func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + result := &v1.Event{} + err := e.client.Put(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return result, err +} + +// PatchWithEventNamespace modifies an existing event. It returns the copy of +// the event that the server returns, or an error. The namespace and name of the +// target event is deduced from the incompleteEvent. The namespace must either +// match this event client's namespace, or this event client must have been +// created with the "" namespace. +func (e *events) PatchWithEventNamespace(incompleteEvent *v1.Event, data []byte) (*v1.Event, error) { + if e.ns != "" && incompleteEvent.Namespace != e.ns { + return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", incompleteEvent.Namespace, e.ns) + } + result := &v1.Event{} + err := e.client.Patch(types.StrategicMergePatchType). + NamespaceIfScoped(incompleteEvent.Namespace, len(incompleteEvent.Namespace) > 0). + Resource("events"). + Name(incompleteEvent.Name). + Body(data). + Do(). + Into(result) + return result, err +} + +// Search finds events about the specified object. The namespace of the +// object must match this event's client namespace unless the event client +// was made with the "" namespace. +func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) { + ref, err := v1.GetReference(scheme, objOrRef) + if err != nil { + return nil, err + } + if e.ns != "" && ref.Namespace != e.ns { + return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns) + } + stringRefKind := string(ref.Kind) + var refKind *string + if stringRefKind != "" { + refKind = &stringRefKind + } + stringRefUID := string(ref.UID) + var refUID *string + if stringRefUID != "" { + refUID = &stringRefUID + } + fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) + return e.List(metav1.ListOptions{FieldSelector: fieldSelector.String()}) +} + +// Returns the appropriate field selector based on the API version being used to communicate with the server. +// The returned field selector can be used with List and Watch to filter desired events. +func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { + apiVersion := e.client.APIVersion().String() + field := fields.Set{} + if involvedObjectName != nil { + field[GetInvolvedObjectNameFieldLabel(apiVersion)] = *involvedObjectName + } + if involvedObjectNamespace != nil { + field["involvedObject.namespace"] = *involvedObjectNamespace + } + if involvedObjectKind != nil { + field["involvedObject.kind"] = *involvedObjectKind + } + if involvedObjectUID != nil { + field["involvedObject.uid"] = *involvedObjectUID + } + return field.AsSelector() +} + +// Returns the appropriate field label to use for name of the involved object as per the given API version. +func GetInvolvedObjectNameFieldLabel(version string) string { + return "involvedObject.name" +} + +// TODO: This is a temporary arrangement and will be removed once all clients are moved to use the clientset. +type EventSinkImpl struct { + Interface EventInterface +} + +func (e *EventSinkImpl) Create(event *v1.Event) (*v1.Event, error) { + return e.Interface.CreateWithEventNamespace(event) +} + +func (e *EventSinkImpl) Update(event *v1.Event) (*v1.Event, error) { + return e.Interface.UpdateWithEventNamespace(event) +} + +func (e *EventSinkImpl) Patch(event *v1.Event, data []byte) (*v1.Event, error) { + return e.Interface.PatchWithEventNamespace(event, data) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/generated_expansion.go new file mode 100644 index 0000000000..5fe0585b41 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/generated_expansion.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +type ComponentStatusExpansion interface{} + +type ConfigMapExpansion interface{} + +type EndpointsExpansion interface{} + +type LimitRangeExpansion interface{} + +type PersistentVolumeExpansion interface{} + +type PersistentVolumeClaimExpansion interface{} + +type PodTemplateExpansion interface{} + +type ReplicationControllerExpansion interface{} + +type ResourceQuotaExpansion interface{} + +type SecretExpansion interface{} + +type ServiceAccountExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/limitrange.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/limitrange.go new file mode 100644 index 0000000000..d9d7349135 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/limitrange.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/api/v1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// LimitRangesGetter has a method to return a LimitRangeInterface. +// A group's client should implement this interface. +type LimitRangesGetter interface { + LimitRanges(namespace string) LimitRangeInterface +} + +// LimitRangeInterface has methods to work with LimitRange resources. +type LimitRangeInterface interface { + Create(*v1.LimitRange) (*v1.LimitRange, error) + Update(*v1.LimitRange) (*v1.LimitRange, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.LimitRange, error) + List(opts meta_v1.ListOptions) (*v1.LimitRangeList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error) + LimitRangeExpansion +} + +// limitRanges implements LimitRangeInterface +type limitRanges struct { + client rest.Interface + ns string +} + +// newLimitRanges returns a LimitRanges +func newLimitRanges(c *CoreV1Client, namespace string) *limitRanges { + return &limitRanges{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. +func (c *limitRanges) Create(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Post(). + Namespace(c.ns). + Resource("limitranges"). + Body(limitRange). + Do(). + Into(result) + return +} + +// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. +func (c *limitRanges) Update(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Put(). + Namespace(c.ns). + Resource("limitranges"). + Name(limitRange.Name). + Body(limitRange). + Do(). + Into(result) + return +} + +// Delete takes name of the limitRange and deletes it. Returns an error if one occurs. +func (c *limitRanges) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("limitranges"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *limitRanges) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. +func (c *limitRanges) Get(name string, options meta_v1.GetOptions) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. +func (c *limitRanges) List(opts meta_v1.ListOptions) (result *v1.LimitRangeList, err error) { + result = &v1.LimitRangeList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested limitRanges. +func (c *limitRanges) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched limitRange. +func (c *limitRanges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("limitranges"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/namespace.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/namespace.go new file mode 100644 index 0000000000..2b6e1daed7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/namespace.go @@ -0,0 +1,161 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/api/v1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// NamespacesGetter has a method to return a NamespaceInterface. +// A group's client should implement this interface. +type NamespacesGetter interface { + Namespaces() NamespaceInterface +} + +// NamespaceInterface has methods to work with Namespace resources. +type NamespaceInterface interface { + Create(*v1.Namespace) (*v1.Namespace, error) + Update(*v1.Namespace) (*v1.Namespace, error) + UpdateStatus(*v1.Namespace) (*v1.Namespace, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Namespace, error) + List(opts meta_v1.ListOptions) (*v1.NamespaceList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) + NamespaceExpansion +} + +// namespaces implements NamespaceInterface +type namespaces struct { + client rest.Interface +} + +// newNamespaces returns a Namespaces +func newNamespaces(c *CoreV1Client) *namespaces { + return &namespaces{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *namespaces) Create(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Post(). + Resource("namespaces"). + Body(namespace). + Do(). + Into(result) + return +} + +// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *namespaces) Update(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put(). + Resource("namespaces"). + Name(namespace.Name). + Body(namespace). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *namespaces) UpdateStatus(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put(). + Resource("namespaces"). + Name(namespace.Name). + SubResource("status"). + Body(namespace). + Do(). + Into(result) + return +} + +// Delete takes name of the namespace and deletes it. Returns an error if one occurs. +func (c *namespaces) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("namespaces"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *namespaces) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Resource("namespaces"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. +func (c *namespaces) Get(name string, options meta_v1.GetOptions) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Get(). + Resource("namespaces"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Namespaces that match those selectors. +func (c *namespaces) List(opts meta_v1.ListOptions) (result *v1.NamespaceList, err error) { + result = &v1.NamespaceList{} + err = c.client.Get(). + Resource("namespaces"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested namespaces. +func (c *namespaces) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("namespaces"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched namespace. +func (c *namespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Patch(pt). + Resource("namespaces"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/namespace_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/namespace_expansion.go new file mode 100644 index 0000000000..36de2beeaa --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/namespace_expansion.go @@ -0,0 +1,31 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import "k8s.io/kubernetes/pkg/api/v1" + +// The NamespaceExpansion interface allows manually adding extra methods to the NamespaceInterface. +type NamespaceExpansion interface { + Finalize(item *v1.Namespace) (*v1.Namespace, error) +} + +// Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. +func (c *namespaces) Finalize(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put().Resource("namespaces").Name(namespace.Name).SubResource("finalize").Body(namespace).Do().Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/node.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/node.go new file mode 100644 index 0000000000..c3aa1a1ae9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/node.go @@ -0,0 +1,161 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/api/v1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// NodesGetter has a method to return a NodeInterface. +// A group's client should implement this interface. +type NodesGetter interface { + Nodes() NodeInterface +} + +// NodeInterface has methods to work with Node resources. +type NodeInterface interface { + Create(*v1.Node) (*v1.Node, error) + Update(*v1.Node) (*v1.Node, error) + UpdateStatus(*v1.Node) (*v1.Node, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Node, error) + List(opts meta_v1.ListOptions) (*v1.NodeList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) + NodeExpansion +} + +// nodes implements NodeInterface +type nodes struct { + client rest.Interface +} + +// newNodes returns a Nodes +func newNodes(c *CoreV1Client) *nodes { + return &nodes{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. +func (c *nodes) Create(node *v1.Node) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Post(). + Resource("nodes"). + Body(node). + Do(). + Into(result) + return +} + +// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. +func (c *nodes) Update(node *v1.Node) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Put(). + Resource("nodes"). + Name(node.Name). + Body(node). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *nodes) UpdateStatus(node *v1.Node) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Put(). + Resource("nodes"). + Name(node.Name). + SubResource("status"). + Body(node). + Do(). + Into(result) + return +} + +// Delete takes name of the node and deletes it. Returns an error if one occurs. +func (c *nodes) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("nodes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *nodes) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Resource("nodes"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the node, and returns the corresponding node object, and an error if there is any. +func (c *nodes) Get(name string, options meta_v1.GetOptions) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Get(). + Resource("nodes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Nodes that match those selectors. +func (c *nodes) List(opts meta_v1.ListOptions) (result *v1.NodeList, err error) { + result = &v1.NodeList{} + err = c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested nodes. +func (c *nodes) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched node. +func (c *nodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Patch(pt). + Resource("nodes"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/node_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/node_expansion.go new file mode 100644 index 0000000000..cd61419c95 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/node_expansion.go @@ -0,0 +1,43 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/api/v1" +) + +// The NodeExpansion interface allows manually adding extra methods to the NodeInterface. +type NodeExpansion interface { + // PatchStatus modifies the status of an existing node. It returns the copy + // of the node that the server returns, or an error. + PatchStatus(nodeName string, data []byte) (*v1.Node, error) +} + +// PatchStatus modifies the status of an existing node. It returns the copy of +// the node that the server returns, or an error. +func (c *nodes) PatchStatus(nodeName string, data []byte) (*v1.Node, error) { + result := &v1.Node{} + err := c.client.Patch(types.StrategicMergePatchType). + Resource("nodes"). + Name(nodeName). + SubResource("status"). + Body(data). + Do(). + Into(result) + return result, err +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/persistentvolume.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/persistentvolume.go new file mode 100644 index 0000000000..82af0e99fa --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/persistentvolume.go @@ -0,0 +1,161 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/api/v1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// PersistentVolumesGetter has a method to return a PersistentVolumeInterface. +// A group's client should implement this interface. +type PersistentVolumesGetter interface { + PersistentVolumes() PersistentVolumeInterface +} + +// PersistentVolumeInterface has methods to work with PersistentVolume resources. +type PersistentVolumeInterface interface { + Create(*v1.PersistentVolume) (*v1.PersistentVolume, error) + Update(*v1.PersistentVolume) (*v1.PersistentVolume, error) + UpdateStatus(*v1.PersistentVolume) (*v1.PersistentVolume, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.PersistentVolume, error) + List(opts meta_v1.ListOptions) (*v1.PersistentVolumeList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error) + PersistentVolumeExpansion +} + +// persistentVolumes implements PersistentVolumeInterface +type persistentVolumes struct { + client rest.Interface +} + +// newPersistentVolumes returns a PersistentVolumes +func newPersistentVolumes(c *CoreV1Client) *persistentVolumes { + return &persistentVolumes{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. +func (c *persistentVolumes) Create(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Post(). + Resource("persistentvolumes"). + Body(persistentVolume). + Do(). + Into(result) + return +} + +// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. +func (c *persistentVolumes) Update(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Put(). + Resource("persistentvolumes"). + Name(persistentVolume.Name). + Body(persistentVolume). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *persistentVolumes) UpdateStatus(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Put(). + Resource("persistentvolumes"). + Name(persistentVolume.Name). + SubResource("status"). + Body(persistentVolume). + Do(). + Into(result) + return +} + +// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. +func (c *persistentVolumes) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("persistentvolumes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *persistentVolumes) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Resource("persistentvolumes"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. +func (c *persistentVolumes) Get(name string, options meta_v1.GetOptions) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Get(). + Resource("persistentvolumes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. +func (c *persistentVolumes) List(opts meta_v1.ListOptions) (result *v1.PersistentVolumeList, err error) { + result = &v1.PersistentVolumeList{} + err = c.client.Get(). + Resource("persistentvolumes"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested persistentVolumes. +func (c *persistentVolumes) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("persistentvolumes"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched persistentVolume. +func (c *persistentVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Patch(pt). + Resource("persistentvolumes"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/persistentvolumeclaim.go new file mode 100644 index 0000000000..45f9c160b2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/persistentvolumeclaim.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/api/v1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// PersistentVolumeClaimsGetter has a method to return a PersistentVolumeClaimInterface. +// A group's client should implement this interface. +type PersistentVolumeClaimsGetter interface { + PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface +} + +// PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources. +type PersistentVolumeClaimInterface interface { + Create(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) + Update(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) + UpdateStatus(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.PersistentVolumeClaim, error) + List(opts meta_v1.ListOptions) (*v1.PersistentVolumeClaimList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error) + PersistentVolumeClaimExpansion +} + +// persistentVolumeClaims implements PersistentVolumeClaimInterface +type persistentVolumeClaims struct { + client rest.Interface + ns string +} + +// newPersistentVolumeClaims returns a PersistentVolumeClaims +func newPersistentVolumeClaims(c *CoreV1Client, namespace string) *persistentVolumeClaims { + return &persistentVolumeClaims{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. +func (c *persistentVolumeClaims) Create(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Post(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Body(persistentVolumeClaim). + Do(). + Into(result) + return +} + +// Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. +func (c *persistentVolumeClaims) Update(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Put(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(persistentVolumeClaim.Name). + Body(persistentVolumeClaim). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *persistentVolumeClaims) UpdateStatus(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Put(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(persistentVolumeClaim.Name). + SubResource("status"). + Body(persistentVolumeClaim). + Do(). + Into(result) + return +} + +// Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. +func (c *persistentVolumeClaims) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *persistentVolumeClaims) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. +func (c *persistentVolumeClaims) Get(name string, options meta_v1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Get(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. +func (c *persistentVolumeClaims) List(opts meta_v1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { + result = &v1.PersistentVolumeClaimList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested persistentVolumeClaims. +func (c *persistentVolumeClaims) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched persistentVolumeClaim. +func (c *persistentVolumeClaims) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/pod.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/pod.go new file mode 100644 index 0000000000..68e574e9f4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/pod.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/api/v1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// PodsGetter has a method to return a PodInterface. +// A group's client should implement this interface. +type PodsGetter interface { + Pods(namespace string) PodInterface +} + +// PodInterface has methods to work with Pod resources. +type PodInterface interface { + Create(*v1.Pod) (*v1.Pod, error) + Update(*v1.Pod) (*v1.Pod, error) + UpdateStatus(*v1.Pod) (*v1.Pod, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Pod, error) + List(opts meta_v1.ListOptions) (*v1.PodList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) + PodExpansion +} + +// pods implements PodInterface +type pods struct { + client rest.Interface + ns string +} + +// newPods returns a Pods +func newPods(c *CoreV1Client, namespace string) *pods { + return &pods{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) Create(pod *v1.Pod) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Post(). + Namespace(c.ns). + Resource("pods"). + Body(pod). + Do(). + Into(result) + return +} + +// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) Update(pod *v1.Pod) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(pod.Name). + Body(pod). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *pods) UpdateStatus(pod *v1.Pod) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(pod.Name). + SubResource("status"). + Body(pod). + Do(). + Into(result) + return +} + +// Delete takes name of the pod and deletes it. Returns an error if one occurs. +func (c *pods) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pods"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *pods) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. +func (c *pods) Get(name string, options meta_v1.GetOptions) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Pods that match those selectors. +func (c *pods) List(opts meta_v1.ListOptions) (result *v1.PodList, err error) { + result = &v1.PodList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested pods. +func (c *pods) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched pod. +func (c *pods) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("pods"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/pod_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/pod_expansion.go new file mode 100644 index 0000000000..d91af537af --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/pod_expansion.go @@ -0,0 +1,45 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + restclient "k8s.io/client-go/rest" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" + policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1" +) + +// The PodExpansion interface allows manually adding extra methods to the PodInterface. +type PodExpansion interface { + Bind(binding *v1.Binding) error + Evict(eviction *policy.Eviction) error + GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request +} + +// Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored). +func (c *pods) Bind(binding *v1.Binding) error { + return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).SubResource("binding").Body(binding).Do().Error() +} + +func (c *pods) Evict(eviction *policy.Eviction) error { + return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do().Error() +} + +// Get constructs a request for getting the logs for a pod +func (c *pods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { + return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, api.ParameterCodec) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/podtemplate.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/podtemplate.go new file mode 100644 index 0000000000..133b6706b8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/podtemplate.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/api/v1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// PodTemplatesGetter has a method to return a PodTemplateInterface. +// A group's client should implement this interface. +type PodTemplatesGetter interface { + PodTemplates(namespace string) PodTemplateInterface +} + +// PodTemplateInterface has methods to work with PodTemplate resources. +type PodTemplateInterface interface { + Create(*v1.PodTemplate) (*v1.PodTemplate, error) + Update(*v1.PodTemplate) (*v1.PodTemplate, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.PodTemplate, error) + List(opts meta_v1.ListOptions) (*v1.PodTemplateList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error) + PodTemplateExpansion +} + +// podTemplates implements PodTemplateInterface +type podTemplates struct { + client rest.Interface + ns string +} + +// newPodTemplates returns a PodTemplates +func newPodTemplates(c *CoreV1Client, namespace string) *podTemplates { + return &podTemplates{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. +func (c *podTemplates) Create(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podtemplates"). + Body(podTemplate). + Do(). + Into(result) + return +} + +// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. +func (c *podTemplates) Update(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podtemplates"). + Name(podTemplate.Name). + Body(podTemplate). + Do(). + Into(result) + return +} + +// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. +func (c *podTemplates) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podtemplates"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podTemplates) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. +func (c *podTemplates) Get(name string, options meta_v1.GetOptions) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. +func (c *podTemplates) List(opts meta_v1.ListOptions) (result *v1.PodTemplateList, err error) { + result = &v1.PodTemplateList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podTemplates. +func (c *podTemplates) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched podTemplate. +func (c *podTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podtemplates"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/replicationcontroller.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/replicationcontroller.go new file mode 100644 index 0000000000..7914e4a0d1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/replicationcontroller.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/api/v1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// ReplicationControllersGetter has a method to return a ReplicationControllerInterface. +// A group's client should implement this interface. +type ReplicationControllersGetter interface { + ReplicationControllers(namespace string) ReplicationControllerInterface +} + +// ReplicationControllerInterface has methods to work with ReplicationController resources. +type ReplicationControllerInterface interface { + Create(*v1.ReplicationController) (*v1.ReplicationController, error) + Update(*v1.ReplicationController) (*v1.ReplicationController, error) + UpdateStatus(*v1.ReplicationController) (*v1.ReplicationController, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ReplicationController, error) + List(opts meta_v1.ListOptions) (*v1.ReplicationControllerList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) + ReplicationControllerExpansion +} + +// replicationControllers implements ReplicationControllerInterface +type replicationControllers struct { + client rest.Interface + ns string +} + +// newReplicationControllers returns a ReplicationControllers +func newReplicationControllers(c *CoreV1Client, namespace string) *replicationControllers { + return &replicationControllers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. +func (c *replicationControllers) Create(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Body(replicationController). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. +func (c *replicationControllers) Update(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationController.Name). + Body(replicationController). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *replicationControllers) UpdateStatus(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationController.Name). + SubResource("status"). + Body(replicationController). + Do(). + Into(result) + return +} + +// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. +func (c *replicationControllers) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicationControllers) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. +func (c *replicationControllers) Get(name string, options meta_v1.GetOptions) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. +func (c *replicationControllers) List(opts meta_v1.ListOptions) (result *v1.ReplicationControllerList, err error) { + result = &v1.ReplicationControllerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicationControllers. +func (c *replicationControllers) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched replicationController. +func (c *replicationControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("replicationcontrollers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/resourcequota.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/resourcequota.go new file mode 100644 index 0000000000..0e9380fddf --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/resourcequota.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/api/v1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// ResourceQuotasGetter has a method to return a ResourceQuotaInterface. +// A group's client should implement this interface. +type ResourceQuotasGetter interface { + ResourceQuotas(namespace string) ResourceQuotaInterface +} + +// ResourceQuotaInterface has methods to work with ResourceQuota resources. +type ResourceQuotaInterface interface { + Create(*v1.ResourceQuota) (*v1.ResourceQuota, error) + Update(*v1.ResourceQuota) (*v1.ResourceQuota, error) + UpdateStatus(*v1.ResourceQuota) (*v1.ResourceQuota, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ResourceQuota, error) + List(opts meta_v1.ListOptions) (*v1.ResourceQuotaList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error) + ResourceQuotaExpansion +} + +// resourceQuotas implements ResourceQuotaInterface +type resourceQuotas struct { + client rest.Interface + ns string +} + +// newResourceQuotas returns a ResourceQuotas +func newResourceQuotas(c *CoreV1Client, namespace string) *resourceQuotas { + return &resourceQuotas{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *resourceQuotas) Create(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Post(). + Namespace(c.ns). + Resource("resourcequotas"). + Body(resourceQuota). + Do(). + Into(result) + return +} + +// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *resourceQuotas) Update(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(resourceQuota.Name). + Body(resourceQuota). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *resourceQuotas) UpdateStatus(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(resourceQuota.Name). + SubResource("status"). + Body(resourceQuota). + Do(). + Into(result) + return +} + +// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. +func (c *resourceQuotas) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *resourceQuotas) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. +func (c *resourceQuotas) Get(name string, options meta_v1.GetOptions) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. +func (c *resourceQuotas) List(opts meta_v1.ListOptions) (result *v1.ResourceQuotaList, err error) { + result = &v1.ResourceQuotaList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resourceQuotas. +func (c *resourceQuotas) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched resourceQuota. +func (c *resourceQuotas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("resourcequotas"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/secret.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/secret.go new file mode 100644 index 0000000000..29435d8a5f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/secret.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/api/v1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// SecretsGetter has a method to return a SecretInterface. +// A group's client should implement this interface. +type SecretsGetter interface { + Secrets(namespace string) SecretInterface +} + +// SecretInterface has methods to work with Secret resources. +type SecretInterface interface { + Create(*v1.Secret) (*v1.Secret, error) + Update(*v1.Secret) (*v1.Secret, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Secret, error) + List(opts meta_v1.ListOptions) (*v1.SecretList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) + SecretExpansion +} + +// secrets implements SecretInterface +type secrets struct { + client rest.Interface + ns string +} + +// newSecrets returns a Secrets +func newSecrets(c *CoreV1Client, namespace string) *secrets { + return &secrets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *secrets) Create(secret *v1.Secret) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Post(). + Namespace(c.ns). + Resource("secrets"). + Body(secret). + Do(). + Into(result) + return +} + +// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *secrets) Update(secret *v1.Secret) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Put(). + Namespace(c.ns). + Resource("secrets"). + Name(secret.Name). + Body(secret). + Do(). + Into(result) + return +} + +// Delete takes name of the secret and deletes it. Returns an error if one occurs. +func (c *secrets) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("secrets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *secrets) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. +func (c *secrets) Get(name string, options meta_v1.GetOptions) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Secrets that match those selectors. +func (c *secrets) List(opts meta_v1.ListOptions) (result *v1.SecretList, err error) { + result = &v1.SecretList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested secrets. +func (c *secrets) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched secret. +func (c *secrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("secrets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/service.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/service.go new file mode 100644 index 0000000000..41b5573c99 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/service.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/api/v1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// ServicesGetter has a method to return a ServiceInterface. +// A group's client should implement this interface. +type ServicesGetter interface { + Services(namespace string) ServiceInterface +} + +// ServiceInterface has methods to work with Service resources. +type ServiceInterface interface { + Create(*v1.Service) (*v1.Service, error) + Update(*v1.Service) (*v1.Service, error) + UpdateStatus(*v1.Service) (*v1.Service, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Service, error) + List(opts meta_v1.ListOptions) (*v1.ServiceList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) + ServiceExpansion +} + +// services implements ServiceInterface +type services struct { + client rest.Interface + ns string +} + +// newServices returns a Services +func newServices(c *CoreV1Client, namespace string) *services { + return &services{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Create(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Post(). + Namespace(c.ns). + Resource("services"). + Body(service). + Do(). + Into(result) + return +} + +// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Update(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + Body(service). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *services) UpdateStatus(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + SubResource("status"). + Body(service). + Do(). + Into(result) + return +} + +// Delete takes name of the service and deletes it. Returns an error if one occurs. +func (c *services) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *services) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the service, and returns the corresponding service object, and an error if there is any. +func (c *services) Get(name string, options meta_v1.GetOptions) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Services that match those selectors. +func (c *services) List(opts meta_v1.ListOptions) (result *v1.ServiceList, err error) { + result = &v1.ServiceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *services) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched service. +func (c *services) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("services"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/service_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/service_expansion.go new file mode 100644 index 0000000000..4937fd1a39 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/service_expansion.go @@ -0,0 +1,41 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/util/net" + restclient "k8s.io/client-go/rest" +) + +// The ServiceExpansion interface allows manually adding extra methods to the ServiceInterface. +type ServiceExpansion interface { + ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper +} + +// ProxyGet returns a response of the service by calling it through the proxy. +func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { + request := c.client.Get(). + Namespace(c.ns). + Resource("services"). + SubResource("proxy"). + Name(net.JoinSchemeNamePort(scheme, name, port)). + Suffix(path) + for k, v := range params { + request = request.Param(k, v) + } + return request +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/serviceaccount.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/serviceaccount.go new file mode 100644 index 0000000000..b8e70ed5cb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/serviceaccount.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/api/v1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// ServiceAccountsGetter has a method to return a ServiceAccountInterface. +// A group's client should implement this interface. +type ServiceAccountsGetter interface { + ServiceAccounts(namespace string) ServiceAccountInterface +} + +// ServiceAccountInterface has methods to work with ServiceAccount resources. +type ServiceAccountInterface interface { + Create(*v1.ServiceAccount) (*v1.ServiceAccount, error) + Update(*v1.ServiceAccount) (*v1.ServiceAccount, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ServiceAccount, error) + List(opts meta_v1.ListOptions) (*v1.ServiceAccountList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error) + ServiceAccountExpansion +} + +// serviceAccounts implements ServiceAccountInterface +type serviceAccounts struct { + client rest.Interface + ns string +} + +// newServiceAccounts returns a ServiceAccounts +func newServiceAccounts(c *CoreV1Client, namespace string) *serviceAccounts { + return &serviceAccounts{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. +func (c *serviceAccounts) Create(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Post(). + Namespace(c.ns). + Resource("serviceaccounts"). + Body(serviceAccount). + Do(). + Into(result) + return +} + +// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. +func (c *serviceAccounts) Update(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Put(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(serviceAccount.Name). + Body(serviceAccount). + Do(). + Into(result) + return +} + +// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. +func (c *serviceAccounts) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *serviceAccounts) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. +func (c *serviceAccounts) Get(name string, options meta_v1.GetOptions) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. +func (c *serviceAccounts) List(opts meta_v1.ListOptions) (result *v1.ServiceAccountList, err error) { + result = &v1.ServiceAccountList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested serviceAccounts. +func (c *serviceAccounts) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched serviceAccount. +func (c *serviceAccounts) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("serviceaccounts"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/BUILD new file mode 100644 index 0000000000..d5cfd9abc1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/BUILD @@ -0,0 +1,54 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "daemonset.go", + "deployment.go", + "deployment_expansion.go", + "doc.go", + "extensions_client.go", + "generated_expansion.go", + "ingress.go", + "podsecuritypolicy.go", + "replicaset.go", + "scale.go", + "scale_expansion.go", + "thirdpartyresource.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/extensions/v1beta1:go_default_library", + "//pkg/client/clientset_generated/clientset/scheme:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/api/meta", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/runtime/schema", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/watch", + "//vendor:k8s.io/client-go/rest", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/fake:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/daemonset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/daemonset.go new file mode 100644 index 0000000000..f55eb1e696 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/daemonset.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// DaemonSetsGetter has a method to return a DaemonSetInterface. +// A group's client should implement this interface. +type DaemonSetsGetter interface { + DaemonSets(namespace string) DaemonSetInterface +} + +// DaemonSetInterface has methods to work with DaemonSet resources. +type DaemonSetInterface interface { + Create(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) + Update(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) + UpdateStatus(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.DaemonSet, error) + List(opts v1.ListOptions) (*v1beta1.DaemonSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) + DaemonSetExpansion +} + +// daemonSets implements DaemonSetInterface +type daemonSets struct { + client rest.Interface + ns string +} + +// newDaemonSets returns a DaemonSets +func newDaemonSets(c *ExtensionsV1beta1Client, namespace string) *daemonSets { + return &daemonSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("daemonsets"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + Body(daemonSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *daemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + SubResource("status"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. +func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. +func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. +func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { + result = &v1beta1.DaemonSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested daemonSets. +func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched daemonSet. +func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("daemonsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/deployment.go new file mode 100644 index 0000000000..a632d8ba5f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/deployment.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// DeploymentsGetter has a method to return a DeploymentInterface. +// A group's client should implement this interface. +type DeploymentsGetter interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + Create(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Update(*v1beta1.Deployment) (*v1beta1.Deployment, error) + UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Deployment, error) + List(opts v1.ListOptions) (*v1beta1.DeploymentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) + DeploymentExpansion +} + +// deployments implements DeploymentInterface +type deployments struct { + client rest.Interface + ns string +} + +// newDeployments returns a Deployments +func newDeployments(c *ExtensionsV1beta1Client, namespace string) *deployments { + return &deployments{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + result = &v1beta1.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched deployment. +func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("deployments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/deployment_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/deployment_expansion.go new file mode 100644 index 0000000000..cd3cd1027e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/deployment_expansion.go @@ -0,0 +1,29 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + +// The DeploymentExpansion interface allows manually adding extra methods to the DeploymentInterface. +type DeploymentExpansion interface { + Rollback(*v1beta1.DeploymentRollback) error +} + +// Rollback applied the provided DeploymentRollback to the named deployment in the current namespace. +func (c *deployments) Rollback(deploymentRollback *v1beta1.DeploymentRollback) error { + return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do().Error() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/doc.go new file mode 100644 index 0000000000..11b5238972 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/extensions_client.go new file mode 100644 index 0000000000..3f86c853fc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/extensions_client.go @@ -0,0 +1,118 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +type ExtensionsV1beta1Interface interface { + RESTClient() rest.Interface + DaemonSetsGetter + DeploymentsGetter + IngressesGetter + PodSecurityPoliciesGetter + ReplicaSetsGetter + ScalesGetter + ThirdPartyResourcesGetter +} + +// ExtensionsV1beta1Client is used to interact with features provided by the extensions group. +type ExtensionsV1beta1Client struct { + restClient rest.Interface +} + +func (c *ExtensionsV1beta1Client) DaemonSets(namespace string) DaemonSetInterface { + return newDaemonSets(c, namespace) +} + +func (c *ExtensionsV1beta1Client) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *ExtensionsV1beta1Client) Ingresses(namespace string) IngressInterface { + return newIngresses(c, namespace) +} + +func (c *ExtensionsV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { + return newPodSecurityPolicies(c) +} + +func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterface { + return newReplicaSets(c, namespace) +} + +func (c *ExtensionsV1beta1Client) Scales(namespace string) ScaleInterface { + return newScales(c, namespace) +} + +func (c *ExtensionsV1beta1Client) ThirdPartyResources() ThirdPartyResourceInterface { + return newThirdPartyResources(c) +} + +// NewForConfig creates a new ExtensionsV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*ExtensionsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &ExtensionsV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new ExtensionsV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ExtensionsV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ExtensionsV1beta1Client for the given RESTClient. +func New(c rest.Interface) *ExtensionsV1beta1Client { + return &ExtensionsV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ExtensionsV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..d0a3d64bcf --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/generated_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type DaemonSetExpansion interface{} + +type IngressExpansion interface{} + +type PodSecurityPolicyExpansion interface{} + +type ReplicaSetExpansion interface{} + +type ThirdPartyResourceExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/ingress.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/ingress.go new file mode 100644 index 0000000000..8df8772f12 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/ingress.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// IngressesGetter has a method to return a IngressInterface. +// A group's client should implement this interface. +type IngressesGetter interface { + Ingresses(namespace string) IngressInterface +} + +// IngressInterface has methods to work with Ingress resources. +type IngressInterface interface { + Create(*v1beta1.Ingress) (*v1beta1.Ingress, error) + Update(*v1beta1.Ingress) (*v1beta1.Ingress, error) + UpdateStatus(*v1beta1.Ingress) (*v1beta1.Ingress, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Ingress, error) + List(opts v1.ListOptions) (*v1beta1.IngressList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) + IngressExpansion +} + +// ingresses implements IngressInterface +type ingresses struct { + client rest.Interface + ns string +} + +// newIngresses returns a Ingresses +func newIngresses(c *ExtensionsV1beta1Client, namespace string) *ingresses { + return &ingresses{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Post(). + Namespace(c.ns). + Resource("ingresses"). + Body(ingress). + Do(). + Into(result) + return +} + +// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresses"). + Name(ingress.Name). + Body(ingress). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *ingresses) UpdateStatus(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresses"). + Name(ingress.Name). + SubResource("status"). + Body(ingress). + Do(). + Into(result) + return +} + +// Delete takes name of the ingress and deletes it. Returns an error if one occurs. +func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. +func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Ingresses that match those selectors. +func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { + result = &v1beta1.IngressList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested ingresses. +func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched ingress. +func (c *ingresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("ingresses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/podsecuritypolicy.go new file mode 100644 index 0000000000..d8ef5b89a6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/podsecuritypolicy.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. +// A group's client should implement this interface. +type PodSecurityPoliciesGetter interface { + PodSecurityPolicies() PodSecurityPolicyInterface +} + +// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. +type PodSecurityPolicyInterface interface { + Create(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) + Update(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) + List(opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) + PodSecurityPolicyExpansion +} + +// podSecurityPolicies implements PodSecurityPolicyInterface +type podSecurityPolicies struct { + client rest.Interface +} + +// newPodSecurityPolicies returns a PodSecurityPolicies +func newPodSecurityPolicies(c *ExtensionsV1beta1Client) *podSecurityPolicies { + return &podSecurityPolicies{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Post(). + Resource("podsecuritypolicies"). + Body(podSecurityPolicy). + Do(). + Into(result) + return +} + +// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Put(). + Resource("podsecuritypolicies"). + Name(podSecurityPolicy.Name). + Body(podSecurityPolicy). + Do(). + Into(result) + return +} + +// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. +func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("podsecuritypolicies"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("podsecuritypolicies"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. +func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. +func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { + result = &v1beta1.PodSecurityPolicyList{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podSecurityPolicies. +func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched podSecurityPolicy. +func (c *podSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Patch(pt). + Resource("podsecuritypolicies"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/replicaset.go new file mode 100644 index 0000000000..f9c61d6987 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/replicaset.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// ReplicaSetsGetter has a method to return a ReplicaSetInterface. +// A group's client should implement this interface. +type ReplicaSetsGetter interface { + ReplicaSets(namespace string) ReplicaSetInterface +} + +// ReplicaSetInterface has methods to work with ReplicaSet resources. +type ReplicaSetInterface interface { + Create(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) + Update(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) + UpdateStatus(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ReplicaSet, error) + List(opts v1.ListOptions) (*v1beta1.ReplicaSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) + ReplicaSetExpansion +} + +// replicaSets implements ReplicaSetInterface +type replicaSets struct { + client rest.Interface + ns string +} + +// newReplicaSets returns a ReplicaSets +func newReplicaSets(c *ExtensionsV1beta1Client, namespace string) *replicaSets { + return &replicaSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicasets"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + Body(replicaSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *replicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + SubResource("status"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. +func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. +func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. +func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { + result = &v1beta1.ReplicaSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched replicaSet. +func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("replicasets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/scale.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/scale.go new file mode 100644 index 0000000000..733012adee --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/scale.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// ScalesGetter has a method to return a ScaleInterface. +// A group's client should implement this interface. +type ScalesGetter interface { + Scales(namespace string) ScaleInterface +} + +// ScaleInterface has methods to work with Scale resources. +type ScaleInterface interface { + ScaleExpansion +} + +// scales implements ScaleInterface +type scales struct { + client rest.Interface + ns string +} + +// newScales returns a Scales +func newScales(c *ExtensionsV1beta1Client, namespace string) *scales { + return &scales{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/scale_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/scale_expansion.go new file mode 100644 index 0000000000..52ec0388b7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/scale_expansion.go @@ -0,0 +1,65 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" +) + +// The ScaleExpansion interface allows manually adding extra methods to the ScaleInterface. +type ScaleExpansion interface { + Get(kind string, name string) (*v1beta1.Scale, error) + Update(kind string, scale *v1beta1.Scale) (*v1beta1.Scale, error) +} + +// Get takes the reference to scale subresource and returns the subresource or error, if one occurs. +func (c *scales) Get(kind string, name string) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + + // TODO this method needs to take a proper unambiguous kind + fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} + resource, _ := meta.KindToResource(fullyQualifiedKind) + + err = c.client.Get(). + Namespace(c.ns). + Resource(resource.Resource). + Name(name). + SubResource("scale"). + Do(). + Into(result) + return +} + +func (c *scales) Update(kind string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + + // TODO this method needs to take a proper unambiguous kind + fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} + resource, _ := meta.KindToResource(fullyQualifiedKind) + + err = c.client.Put(). + Namespace(scale.Namespace). + Resource(resource.Resource). + Name(scale.Name). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/thirdpartyresource.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/thirdpartyresource.go new file mode 100644 index 0000000000..cf1b52864d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/thirdpartyresource.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// ThirdPartyResourcesGetter has a method to return a ThirdPartyResourceInterface. +// A group's client should implement this interface. +type ThirdPartyResourcesGetter interface { + ThirdPartyResources() ThirdPartyResourceInterface +} + +// ThirdPartyResourceInterface has methods to work with ThirdPartyResource resources. +type ThirdPartyResourceInterface interface { + Create(*v1beta1.ThirdPartyResource) (*v1beta1.ThirdPartyResource, error) + Update(*v1beta1.ThirdPartyResource) (*v1beta1.ThirdPartyResource, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ThirdPartyResource, error) + List(opts v1.ListOptions) (*v1beta1.ThirdPartyResourceList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ThirdPartyResource, err error) + ThirdPartyResourceExpansion +} + +// thirdPartyResources implements ThirdPartyResourceInterface +type thirdPartyResources struct { + client rest.Interface +} + +// newThirdPartyResources returns a ThirdPartyResources +func newThirdPartyResources(c *ExtensionsV1beta1Client) *thirdPartyResources { + return &thirdPartyResources{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a thirdPartyResource and creates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. +func (c *thirdPartyResources) Create(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { + result = &v1beta1.ThirdPartyResource{} + err = c.client.Post(). + Resource("thirdpartyresources"). + Body(thirdPartyResource). + Do(). + Into(result) + return +} + +// Update takes the representation of a thirdPartyResource and updates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. +func (c *thirdPartyResources) Update(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { + result = &v1beta1.ThirdPartyResource{} + err = c.client.Put(). + Resource("thirdpartyresources"). + Name(thirdPartyResource.Name). + Body(thirdPartyResource). + Do(). + Into(result) + return +} + +// Delete takes name of the thirdPartyResource and deletes it. Returns an error if one occurs. +func (c *thirdPartyResources) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("thirdpartyresources"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *thirdPartyResources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("thirdpartyresources"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the thirdPartyResource, and returns the corresponding thirdPartyResource object, and an error if there is any. +func (c *thirdPartyResources) Get(name string, options v1.GetOptions) (result *v1beta1.ThirdPartyResource, err error) { + result = &v1beta1.ThirdPartyResource{} + err = c.client.Get(). + Resource("thirdpartyresources"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ThirdPartyResources that match those selectors. +func (c *thirdPartyResources) List(opts v1.ListOptions) (result *v1beta1.ThirdPartyResourceList, err error) { + result = &v1beta1.ThirdPartyResourceList{} + err = c.client.Get(). + Resource("thirdpartyresources"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested thirdPartyResources. +func (c *thirdPartyResources) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("thirdpartyresources"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched thirdPartyResource. +func (c *thirdPartyResources) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ThirdPartyResource, err error) { + result = &v1beta1.ThirdPartyResource{} + err = c.client.Patch(pt). + Resource("thirdpartyresources"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/BUILD new file mode 100644 index 0000000000..4476c97ad8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/BUILD @@ -0,0 +1,46 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "eviction.go", + "eviction_expansion.go", + "generated_expansion.go", + "poddisruptionbudget.go", + "policy_client.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/policy/v1beta1:go_default_library", + "//pkg/client/clientset_generated/clientset/scheme:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/watch", + "//vendor:k8s.io/client-go/rest", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/clientset/typed/policy/v1beta1/fake:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/doc.go new file mode 100644 index 0000000000..11b5238972 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/eviction.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/eviction.go new file mode 100644 index 0000000000..9c4133e369 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/eviction.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// EvictionsGetter has a method to return a EvictionInterface. +// A group's client should implement this interface. +type EvictionsGetter interface { + Evictions(namespace string) EvictionInterface +} + +// EvictionInterface has methods to work with Eviction resources. +type EvictionInterface interface { + EvictionExpansion +} + +// evictions implements EvictionInterface +type evictions struct { + client rest.Interface + ns string +} + +// newEvictions returns a Evictions +func newEvictions(c *PolicyV1beta1Client, namespace string) *evictions { + return &evictions{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/eviction_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/eviction_expansion.go new file mode 100644 index 0000000000..c24252cb01 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/eviction_expansion.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1" +) + +// The EvictionExpansion interface allows manually adding extra methods to the ScaleInterface. +type EvictionExpansion interface { + Evict(eviction *policy.Eviction) error +} + +func (c *evictions) Evict(eviction *policy.Eviction) error { + return c.client.Post(). + AbsPath("/api/v1"). + Namespace(eviction.Namespace). + Resource("pods"). + Name(eviction.Name). + SubResource("eviction"). + Body(eviction). + Do(). + Error() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..511adc6ef7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type PodDisruptionBudgetExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/poddisruptionbudget.go new file mode 100644 index 0000000000..c32f95d42b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/poddisruptionbudget.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/policy/v1beta1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// PodDisruptionBudgetsGetter has a method to return a PodDisruptionBudgetInterface. +// A group's client should implement this interface. +type PodDisruptionBudgetsGetter interface { + PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface +} + +// PodDisruptionBudgetInterface has methods to work with PodDisruptionBudget resources. +type PodDisruptionBudgetInterface interface { + Create(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) + Update(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) + UpdateStatus(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.PodDisruptionBudget, error) + List(opts v1.ListOptions) (*v1beta1.PodDisruptionBudgetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) + PodDisruptionBudgetExpansion +} + +// podDisruptionBudgets implements PodDisruptionBudgetInterface +type podDisruptionBudgets struct { + client rest.Interface + ns string +} + +// newPodDisruptionBudgets returns a PodDisruptionBudgets +func newPodDisruptionBudgets(c *PolicyV1beta1Client, namespace string) *podDisruptionBudgets { + return &podDisruptionBudgets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. +func (c *podDisruptionBudgets) Create(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Post(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Body(podDisruptionBudget). + Do(). + Into(result) + return +} + +// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. +func (c *podDisruptionBudgets) Update(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Put(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Name(podDisruptionBudget.Name). + Body(podDisruptionBudget). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *podDisruptionBudgets) UpdateStatus(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Put(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Name(podDisruptionBudget.Name). + SubResource("status"). + Body(podDisruptionBudget). + Do(). + Into(result) + return +} + +// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. +func (c *podDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. +func (c *podDisruptionBudgets) Get(name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Get(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. +func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { + result = &v1beta1.PodDisruptionBudgetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. +func (c *podDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched podDisruptionBudget. +func (c *podDisruptionBudgets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/policy_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/policy_client.go new file mode 100644 index 0000000000..2424ed90ff --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/policy_client.go @@ -0,0 +1,93 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/policy/v1beta1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +type PolicyV1beta1Interface interface { + RESTClient() rest.Interface + EvictionsGetter + PodDisruptionBudgetsGetter +} + +// PolicyV1beta1Client is used to interact with features provided by the policy group. +type PolicyV1beta1Client struct { + restClient rest.Interface +} + +func (c *PolicyV1beta1Client) Evictions(namespace string) EvictionInterface { + return newEvictions(c, namespace) +} + +func (c *PolicyV1beta1Client) PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface { + return newPodDisruptionBudgets(c, namespace) +} + +// NewForConfig creates a new PolicyV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*PolicyV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &PolicyV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new PolicyV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *PolicyV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new PolicyV1beta1Client for the given RESTClient. +func New(c rest.Interface) *PolicyV1beta1Client { + return &PolicyV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *PolicyV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/BUILD new file mode 100644 index 0000000000..581150fdd9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/BUILD @@ -0,0 +1,47 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "clusterrole.go", + "clusterrolebinding.go", + "doc.go", + "generated_expansion.go", + "rbac_client.go", + "role.go", + "rolebinding.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/rbac/v1alpha1:go_default_library", + "//pkg/client/clientset_generated/clientset/scheme:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/watch", + "//vendor:k8s.io/client-go/rest", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/fake:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/clusterrole.go new file mode 100644 index 0000000000..4e41cad06a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/clusterrole.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// ClusterRolesGetter has a method to return a ClusterRoleInterface. +// A group's client should implement this interface. +type ClusterRolesGetter interface { + ClusterRoles() ClusterRoleInterface +} + +// ClusterRoleInterface has methods to work with ClusterRole resources. +type ClusterRoleInterface interface { + Create(*v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error) + Update(*v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.ClusterRole, error) + List(opts v1.ListOptions) (*v1alpha1.ClusterRoleList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) + ClusterRoleExpansion +} + +// clusterRoles implements ClusterRoleInterface +type clusterRoles struct { + client rest.Interface +} + +// newClusterRoles returns a ClusterRoles +func newClusterRoles(c *RbacV1alpha1Client) *clusterRoles { + return &clusterRoles{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Create(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Post(). + Resource("clusterroles"). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Update(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Put(). + Resource("clusterroles"). + Name(clusterRole.Name). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. +func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. +func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Get(). + Resource("clusterroles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. +func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { + result = &v1alpha1.ClusterRoleList{} + err = c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched clusterRole. +func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Patch(pt). + Resource("clusterroles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/clusterrolebinding.go new file mode 100644 index 0000000000..759d57adfd --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/clusterrolebinding.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. +// A group's client should implement this interface. +type ClusterRoleBindingsGetter interface { + ClusterRoleBindings() ClusterRoleBindingInterface +} + +// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. +type ClusterRoleBindingInterface interface { + Create(*v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error) + Update(*v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.ClusterRoleBinding, error) + List(opts v1.ListOptions) (*v1alpha1.ClusterRoleBindingList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) + ClusterRoleBindingExpansion +} + +// clusterRoleBindings implements ClusterRoleBindingInterface +type clusterRoleBindings struct { + client rest.Interface +} + +// newClusterRoleBindings returns a ClusterRoleBindings +func newClusterRoleBindings(c *RbacV1alpha1Client) *clusterRoleBindings { + return &clusterRoleBindings{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Create(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Post(). + Resource("clusterrolebindings"). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Update(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Put(). + Resource("clusterrolebindings"). + Name(clusterRoleBinding.Name). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. +func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. +func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Get(). + Resource("clusterrolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. +func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { + result = &v1alpha1.ClusterRoleBindingList{} + err = c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched clusterRoleBinding. +func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Patch(pt). + Resource("clusterrolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/doc.go new file mode 100644 index 0000000000..ba8d10d3b6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/generated_expansion.go new file mode 100644 index 0000000000..f506fc3468 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/generated_expansion.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +type ClusterRoleExpansion interface{} + +type ClusterRoleBindingExpansion interface{} + +type RoleExpansion interface{} + +type RoleBindingExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/rbac_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/rbac_client.go new file mode 100644 index 0000000000..b9d09e5bc8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/rbac_client.go @@ -0,0 +1,103 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + v1alpha1 "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +type RbacV1alpha1Interface interface { + RESTClient() rest.Interface + ClusterRolesGetter + ClusterRoleBindingsGetter + RolesGetter + RoleBindingsGetter +} + +// RbacV1alpha1Client is used to interact with features provided by the rbac.authorization.k8s.io group. +type RbacV1alpha1Client struct { + restClient rest.Interface +} + +func (c *RbacV1alpha1Client) ClusterRoles() ClusterRoleInterface { + return newClusterRoles(c) +} + +func (c *RbacV1alpha1Client) ClusterRoleBindings() ClusterRoleBindingInterface { + return newClusterRoleBindings(c) +} + +func (c *RbacV1alpha1Client) Roles(namespace string) RoleInterface { + return newRoles(c, namespace) +} + +func (c *RbacV1alpha1Client) RoleBindings(namespace string) RoleBindingInterface { + return newRoleBindings(c, namespace) +} + +// NewForConfig creates a new RbacV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*RbacV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &RbacV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new RbacV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *RbacV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new RbacV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *RbacV1alpha1Client { + return &RbacV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *RbacV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/role.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/role.go new file mode 100644 index 0000000000..900e1cc1e4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/role.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// RolesGetter has a method to return a RoleInterface. +// A group's client should implement this interface. +type RolesGetter interface { + Roles(namespace string) RoleInterface +} + +// RoleInterface has methods to work with Role resources. +type RoleInterface interface { + Create(*v1alpha1.Role) (*v1alpha1.Role, error) + Update(*v1alpha1.Role) (*v1alpha1.Role, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.Role, error) + List(opts v1.ListOptions) (*v1alpha1.RoleList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) + RoleExpansion +} + +// roles implements RoleInterface +type roles struct { + client rest.Interface + ns string +} + +// newRoles returns a Roles +func newRoles(c *RbacV1alpha1Client, namespace string) *roles { + return &roles{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Create(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Post(). + Namespace(c.ns). + Resource("roles"). + Body(role). + Do(). + Into(result) + return +} + +// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Update(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Put(). + Namespace(c.ns). + Resource("roles"). + Name(role.Name). + Body(role). + Do(). + Into(result) + return +} + +// Delete takes name of the role and deletes it. Returns an error if one occurs. +func (c *roles) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the role, and returns the corresponding role object, and an error if there is any. +func (c *roles) Get(name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Roles that match those selectors. +func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { + result = &v1alpha1.RoleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched role. +func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("roles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/rolebinding.go new file mode 100644 index 0000000000..63481f0ad7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/rolebinding.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// RoleBindingsGetter has a method to return a RoleBindingInterface. +// A group's client should implement this interface. +type RoleBindingsGetter interface { + RoleBindings(namespace string) RoleBindingInterface +} + +// RoleBindingInterface has methods to work with RoleBinding resources. +type RoleBindingInterface interface { + Create(*v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error) + Update(*v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.RoleBinding, error) + List(opts v1.ListOptions) (*v1alpha1.RoleBindingList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) + RoleBindingExpansion +} + +// roleBindings implements RoleBindingInterface +type roleBindings struct { + client rest.Interface + ns string +} + +// newRoleBindings returns a RoleBindings +func newRoleBindings(c *RbacV1alpha1Client, namespace string) *roleBindings { + return &roleBindings{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Create(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Post(). + Namespace(c.ns). + Resource("rolebindings"). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Update(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("rolebindings"). + Name(roleBinding.Name). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. +func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. +func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. +func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { + result = &v1alpha1.RoleBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched roleBinding. +func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("rolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/BUILD new file mode 100644 index 0000000000..e9608d7136 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/BUILD @@ -0,0 +1,47 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "clusterrole.go", + "clusterrolebinding.go", + "doc.go", + "generated_expansion.go", + "rbac_client.go", + "role.go", + "rolebinding.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/rbac/v1beta1:go_default_library", + "//pkg/client/clientset_generated/clientset/scheme:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/watch", + "//vendor:k8s.io/client-go/rest", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/fake:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/clusterrole.go new file mode 100644 index 0000000000..767f784f7d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/clusterrole.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/rbac/v1beta1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// ClusterRolesGetter has a method to return a ClusterRoleInterface. +// A group's client should implement this interface. +type ClusterRolesGetter interface { + ClusterRoles() ClusterRoleInterface +} + +// ClusterRoleInterface has methods to work with ClusterRole resources. +type ClusterRoleInterface interface { + Create(*v1beta1.ClusterRole) (*v1beta1.ClusterRole, error) + Update(*v1beta1.ClusterRole) (*v1beta1.ClusterRole, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ClusterRole, error) + List(opts v1.ListOptions) (*v1beta1.ClusterRoleList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) + ClusterRoleExpansion +} + +// clusterRoles implements ClusterRoleInterface +type clusterRoles struct { + client rest.Interface +} + +// newClusterRoles returns a ClusterRoles +func newClusterRoles(c *RbacV1beta1Client) *clusterRoles { + return &clusterRoles{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Create(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { + result = &v1beta1.ClusterRole{} + err = c.client.Post(). + Resource("clusterroles"). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Update(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { + result = &v1beta1.ClusterRole{} + err = c.client.Put(). + Resource("clusterroles"). + Name(clusterRole.Name). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. +func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. +func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { + result = &v1beta1.ClusterRole{} + err = c.client.Get(). + Resource("clusterroles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. +func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { + result = &v1beta1.ClusterRoleList{} + err = c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched clusterRole. +func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) { + result = &v1beta1.ClusterRole{} + err = c.client.Patch(pt). + Resource("clusterroles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/clusterrolebinding.go new file mode 100644 index 0000000000..d629b13aff --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/clusterrolebinding.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/rbac/v1beta1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. +// A group's client should implement this interface. +type ClusterRoleBindingsGetter interface { + ClusterRoleBindings() ClusterRoleBindingInterface +} + +// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. +type ClusterRoleBindingInterface interface { + Create(*v1beta1.ClusterRoleBinding) (*v1beta1.ClusterRoleBinding, error) + Update(*v1beta1.ClusterRoleBinding) (*v1beta1.ClusterRoleBinding, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ClusterRoleBinding, error) + List(opts v1.ListOptions) (*v1beta1.ClusterRoleBindingList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) + ClusterRoleBindingExpansion +} + +// clusterRoleBindings implements ClusterRoleBindingInterface +type clusterRoleBindings struct { + client rest.Interface +} + +// newClusterRoleBindings returns a ClusterRoleBindings +func newClusterRoleBindings(c *RbacV1beta1Client) *clusterRoleBindings { + return &clusterRoleBindings{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Create(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { + result = &v1beta1.ClusterRoleBinding{} + err = c.client.Post(). + Resource("clusterrolebindings"). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Update(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { + result = &v1beta1.ClusterRoleBinding{} + err = c.client.Put(). + Resource("clusterrolebindings"). + Name(clusterRoleBinding.Name). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. +func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. +func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { + result = &v1beta1.ClusterRoleBinding{} + err = c.client.Get(). + Resource("clusterrolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. +func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { + result = &v1beta1.ClusterRoleBindingList{} + err = c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched clusterRoleBinding. +func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { + result = &v1beta1.ClusterRoleBinding{} + err = c.client.Patch(pt). + Resource("clusterrolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/doc.go new file mode 100644 index 0000000000..11b5238972 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..d7f80c0042 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/generated_expansion.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type ClusterRoleExpansion interface{} + +type ClusterRoleBindingExpansion interface{} + +type RoleExpansion interface{} + +type RoleBindingExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/rbac_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/rbac_client.go new file mode 100644 index 0000000000..20c8da68c8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/rbac_client.go @@ -0,0 +1,103 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/rbac/v1beta1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +type RbacV1beta1Interface interface { + RESTClient() rest.Interface + ClusterRolesGetter + ClusterRoleBindingsGetter + RolesGetter + RoleBindingsGetter +} + +// RbacV1beta1Client is used to interact with features provided by the rbac.authorization.k8s.io group. +type RbacV1beta1Client struct { + restClient rest.Interface +} + +func (c *RbacV1beta1Client) ClusterRoles() ClusterRoleInterface { + return newClusterRoles(c) +} + +func (c *RbacV1beta1Client) ClusterRoleBindings() ClusterRoleBindingInterface { + return newClusterRoleBindings(c) +} + +func (c *RbacV1beta1Client) Roles(namespace string) RoleInterface { + return newRoles(c, namespace) +} + +func (c *RbacV1beta1Client) RoleBindings(namespace string) RoleBindingInterface { + return newRoleBindings(c, namespace) +} + +// NewForConfig creates a new RbacV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*RbacV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &RbacV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new RbacV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *RbacV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new RbacV1beta1Client for the given RESTClient. +func New(c rest.Interface) *RbacV1beta1Client { + return &RbacV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *RbacV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/role.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/role.go new file mode 100644 index 0000000000..cc6a217e2f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/role.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/rbac/v1beta1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// RolesGetter has a method to return a RoleInterface. +// A group's client should implement this interface. +type RolesGetter interface { + Roles(namespace string) RoleInterface +} + +// RoleInterface has methods to work with Role resources. +type RoleInterface interface { + Create(*v1beta1.Role) (*v1beta1.Role, error) + Update(*v1beta1.Role) (*v1beta1.Role, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Role, error) + List(opts v1.ListOptions) (*v1beta1.RoleList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) + RoleExpansion +} + +// roles implements RoleInterface +type roles struct { + client rest.Interface + ns string +} + +// newRoles returns a Roles +func newRoles(c *RbacV1beta1Client, namespace string) *roles { + return &roles{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Create(role *v1beta1.Role) (result *v1beta1.Role, err error) { + result = &v1beta1.Role{} + err = c.client.Post(). + Namespace(c.ns). + Resource("roles"). + Body(role). + Do(). + Into(result) + return +} + +// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Update(role *v1beta1.Role) (result *v1beta1.Role, err error) { + result = &v1beta1.Role{} + err = c.client.Put(). + Namespace(c.ns). + Resource("roles"). + Name(role.Name). + Body(role). + Do(). + Into(result) + return +} + +// Delete takes name of the role and deletes it. Returns an error if one occurs. +func (c *roles) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the role, and returns the corresponding role object, and an error if there is any. +func (c *roles) Get(name string, options v1.GetOptions) (result *v1beta1.Role, err error) { + result = &v1beta1.Role{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Roles that match those selectors. +func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) { + result = &v1beta1.RoleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched role. +func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) { + result = &v1beta1.Role{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("roles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/rolebinding.go new file mode 100644 index 0000000000..5c87ad1e68 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/rolebinding.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/rbac/v1beta1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// RoleBindingsGetter has a method to return a RoleBindingInterface. +// A group's client should implement this interface. +type RoleBindingsGetter interface { + RoleBindings(namespace string) RoleBindingInterface +} + +// RoleBindingInterface has methods to work with RoleBinding resources. +type RoleBindingInterface interface { + Create(*v1beta1.RoleBinding) (*v1beta1.RoleBinding, error) + Update(*v1beta1.RoleBinding) (*v1beta1.RoleBinding, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.RoleBinding, error) + List(opts v1.ListOptions) (*v1beta1.RoleBindingList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) + RoleBindingExpansion +} + +// roleBindings implements RoleBindingInterface +type roleBindings struct { + client rest.Interface + ns string +} + +// newRoleBindings returns a RoleBindings +func newRoleBindings(c *RbacV1beta1Client, namespace string) *roleBindings { + return &roleBindings{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Create(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { + result = &v1beta1.RoleBinding{} + err = c.client.Post(). + Namespace(c.ns). + Resource("rolebindings"). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Update(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { + result = &v1beta1.RoleBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("rolebindings"). + Name(roleBinding.Name). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. +func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. +func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { + result = &v1beta1.RoleBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. +func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { + result = &v1beta1.RoleBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched roleBinding. +func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) { + result = &v1beta1.RoleBinding{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("rolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/BUILD new file mode 100644 index 0000000000..150ee381b1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/BUILD @@ -0,0 +1,44 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "generated_expansion.go", + "podpreset.go", + "settings_client.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/settings/v1alpha1:go_default_library", + "//pkg/client/clientset_generated/clientset/scheme:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/watch", + "//vendor:k8s.io/client-go/rest", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/fake:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/doc.go new file mode 100644 index 0000000000..ba8d10d3b6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/generated_expansion.go new file mode 100644 index 0000000000..d599b2935c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +type PodPresetExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/podpreset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/podpreset.go new file mode 100644 index 0000000000..f94f98a766 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/podpreset.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "k8s.io/kubernetes/pkg/apis/settings/v1alpha1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// PodPresetsGetter has a method to return a PodPresetInterface. +// A group's client should implement this interface. +type PodPresetsGetter interface { + PodPresets(namespace string) PodPresetInterface +} + +// PodPresetInterface has methods to work with PodPreset resources. +type PodPresetInterface interface { + Create(*v1alpha1.PodPreset) (*v1alpha1.PodPreset, error) + Update(*v1alpha1.PodPreset) (*v1alpha1.PodPreset, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.PodPreset, error) + List(opts v1.ListOptions) (*v1alpha1.PodPresetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) + PodPresetExpansion +} + +// podPresets implements PodPresetInterface +type podPresets struct { + client rest.Interface + ns string +} + +// newPodPresets returns a PodPresets +func newPodPresets(c *SettingsV1alpha1Client, namespace string) *podPresets { + return &podPresets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a podPreset and creates it. Returns the server's representation of the podPreset, and an error, if there is any. +func (c *podPresets) Create(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { + result = &v1alpha1.PodPreset{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podpresets"). + Body(podPreset). + Do(). + Into(result) + return +} + +// Update takes the representation of a podPreset and updates it. Returns the server's representation of the podPreset, and an error, if there is any. +func (c *podPresets) Update(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { + result = &v1alpha1.PodPreset{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podpresets"). + Name(podPreset.Name). + Body(podPreset). + Do(). + Into(result) + return +} + +// Delete takes name of the podPreset and deletes it. Returns an error if one occurs. +func (c *podPresets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podpresets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podpresets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the podPreset, and returns the corresponding podPreset object, and an error if there is any. +func (c *podPresets) Get(name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) { + result = &v1alpha1.PodPreset{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podpresets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodPresets that match those selectors. +func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { + result = &v1alpha1.PodPresetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podpresets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podPresets. +func (c *podPresets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podpresets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched podPreset. +func (c *podPresets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) { + result = &v1alpha1.PodPreset{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podpresets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/settings_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/settings_client.go new file mode 100644 index 0000000000..1d6dc46c1a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/settings_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + v1alpha1 "k8s.io/kubernetes/pkg/apis/settings/v1alpha1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +type SettingsV1alpha1Interface interface { + RESTClient() rest.Interface + PodPresetsGetter +} + +// SettingsV1alpha1Client is used to interact with features provided by the settings.k8s.io group. +type SettingsV1alpha1Client struct { + restClient rest.Interface +} + +func (c *SettingsV1alpha1Client) PodPresets(namespace string) PodPresetInterface { + return newPodPresets(c, namespace) +} + +// NewForConfig creates a new SettingsV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*SettingsV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &SettingsV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new SettingsV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SettingsV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SettingsV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *SettingsV1alpha1Client { + return &SettingsV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SettingsV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/BUILD new file mode 100644 index 0000000000..290a7731c4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/BUILD @@ -0,0 +1,44 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "generated_expansion.go", + "storage_client.go", + "storageclass.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/storage/v1:go_default_library", + "//pkg/client/clientset_generated/clientset/scheme:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/watch", + "//vendor:k8s.io/client-go/rest", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/clientset/typed/storage/v1/fake:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/doc.go new file mode 100644 index 0000000000..54673bfa73 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/generated_expansion.go new file mode 100644 index 0000000000..39df9fb879 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +type StorageClassExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/storage_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/storage_client.go new file mode 100644 index 0000000000..5d37d8417c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/storage_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/apis/storage/v1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +type StorageV1Interface interface { + RESTClient() rest.Interface + StorageClassesGetter +} + +// StorageV1Client is used to interact with features provided by the storage.k8s.io group. +type StorageV1Client struct { + restClient rest.Interface +} + +func (c *StorageV1Client) StorageClasses() StorageClassInterface { + return newStorageClasses(c) +} + +// NewForConfig creates a new StorageV1Client for the given config. +func NewForConfig(c *rest.Config) (*StorageV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &StorageV1Client{client}, nil +} + +// NewForConfigOrDie creates a new StorageV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *StorageV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new StorageV1Client for the given RESTClient. +func New(c rest.Interface) *StorageV1Client { + return &StorageV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *StorageV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/storageclass.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/storageclass.go new file mode 100644 index 0000000000..3a1efa0163 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/storageclass.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kubernetes/pkg/apis/storage/v1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// StorageClassesGetter has a method to return a StorageClassInterface. +// A group's client should implement this interface. +type StorageClassesGetter interface { + StorageClasses() StorageClassInterface +} + +// StorageClassInterface has methods to work with StorageClass resources. +type StorageClassInterface interface { + Create(*v1.StorageClass) (*v1.StorageClass, error) + Update(*v1.StorageClass) (*v1.StorageClass, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.StorageClass, error) + List(opts meta_v1.ListOptions) (*v1.StorageClassList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error) + StorageClassExpansion +} + +// storageClasses implements StorageClassInterface +type storageClasses struct { + client rest.Interface +} + +// newStorageClasses returns a StorageClasses +func newStorageClasses(c *StorageV1Client) *storageClasses { + return &storageClasses{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Create(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) { + result = &v1.StorageClass{} + err = c.client.Post(). + Resource("storageclasses"). + Body(storageClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Update(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) { + result = &v1.StorageClass{} + err = c.client.Put(). + Resource("storageclasses"). + Name(storageClass.Name). + Body(storageClass). + Do(). + Into(result) + return +} + +// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. +func (c *storageClasses) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("storageclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *storageClasses) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Resource("storageclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. +func (c *storageClasses) Get(name string, options meta_v1.GetOptions) (result *v1.StorageClass, err error) { + result = &v1.StorageClass{} + err = c.client.Get(). + Resource("storageclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. +func (c *storageClasses) List(opts meta_v1.ListOptions) (result *v1.StorageClassList, err error) { + result = &v1.StorageClassList{} + err = c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storageClasses. +func (c *storageClasses) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched storageClass. +func (c *storageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error) { + result = &v1.StorageClass{} + err = c.client.Patch(pt). + Resource("storageclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/BUILD new file mode 100644 index 0000000000..aafb4e789a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/BUILD @@ -0,0 +1,44 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "generated_expansion.go", + "storage_client.go", + "storageclass.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/storage/v1beta1:go_default_library", + "//pkg/client/clientset_generated/clientset/scheme:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/watch", + "//vendor:k8s.io/client-go/rest", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/clientset/typed/storage/v1beta1/fake:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/doc.go new file mode 100644 index 0000000000..11b5238972 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..6f3f0c55e6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type StorageClassExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/storage_client.go new file mode 100644 index 0000000000..6925575e06 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/storage_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/storage/v1beta1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +type StorageV1beta1Interface interface { + RESTClient() rest.Interface + StorageClassesGetter +} + +// StorageV1beta1Client is used to interact with features provided by the storage.k8s.io group. +type StorageV1beta1Client struct { + restClient rest.Interface +} + +func (c *StorageV1beta1Client) StorageClasses() StorageClassInterface { + return newStorageClasses(c) +} + +// NewForConfig creates a new StorageV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*StorageV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &StorageV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new StorageV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *StorageV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new StorageV1beta1Client for the given RESTClient. +func New(c rest.Interface) *StorageV1beta1Client { + return &StorageV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *StorageV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/storageclass.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/storageclass.go new file mode 100644 index 0000000000..69fb440ee2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/storageclass.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/kubernetes/pkg/apis/storage/v1beta1" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme" +) + +// StorageClassesGetter has a method to return a StorageClassInterface. +// A group's client should implement this interface. +type StorageClassesGetter interface { + StorageClasses() StorageClassInterface +} + +// StorageClassInterface has methods to work with StorageClass resources. +type StorageClassInterface interface { + Create(*v1beta1.StorageClass) (*v1beta1.StorageClass, error) + Update(*v1beta1.StorageClass) (*v1beta1.StorageClass, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.StorageClass, error) + List(opts v1.ListOptions) (*v1beta1.StorageClassList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) + StorageClassExpansion +} + +// storageClasses implements StorageClassInterface +type storageClasses struct { + client rest.Interface +} + +// newStorageClasses returns a StorageClasses +func newStorageClasses(c *StorageV1beta1Client) *storageClasses { + return &storageClasses{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Create(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Post(). + Resource("storageclasses"). + Body(storageClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Update(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Put(). + Resource("storageclasses"). + Name(storageClass.Name). + Body(storageClass). + Do(). + Into(result) + return +} + +// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. +func (c *storageClasses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("storageclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *storageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("storageclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. +func (c *storageClasses) Get(name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Get(). + Resource("storageclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. +func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { + result = &v1beta1.StorageClassList{} + err = c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storageClasses. +func (c *storageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched storageClass. +func (c *storageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Patch(pt). + Resource("storageclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/BUILD deleted file mode 100644 index 68c324f32b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/BUILD +++ /dev/null @@ -1,52 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "clientset.go", - "doc.go", - "import_known_versions.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api/install:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/apps/install:go_default_library", - "//pkg/apis/authentication/install:go_default_library", - "//pkg/apis/authorization/install:go_default_library", - "//pkg/apis/autoscaling/install:go_default_library", - "//pkg/apis/batch/install:go_default_library", - "//pkg/apis/certificates/install:go_default_library", - "//pkg/apis/componentconfig/install:go_default_library", - "//pkg/apis/extensions/install:go_default_library", - "//pkg/apis/policy/install:go_default_library", - "//pkg/apis/rbac/install:go_default_library", - "//pkg/apis/storage/install:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/apps/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/policy/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/storage/internalversion:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/typed/discovery:go_default_library", - "//pkg/util/flowcontrol:go_default_library", - "//plugin/pkg/client/auth:go_default_library", - "//vendor:github.com/golang/glog", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go deleted file mode 100644 index 7819009c80..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go +++ /dev/null @@ -1,271 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalclientset - -import ( - "github.com/golang/glog" - internalversionapps "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" - internalversionauthentication "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion" - internalversionauthorization "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion" - internalversionautoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion" - internalversionbatch "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" - internalversioncertificates "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion" - internalversioncore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - internalversionextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" - internalversionpolicy "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion" - internalversionrbac "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion" - internalversionstorage "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion" - restclient "k8s.io/kubernetes/pkg/client/restclient" - discovery "k8s.io/kubernetes/pkg/client/typed/discovery" - "k8s.io/kubernetes/pkg/util/flowcontrol" - _ "k8s.io/kubernetes/plugin/pkg/client/auth" -) - -type Interface interface { - Discovery() discovery.DiscoveryInterface - Core() internalversioncore.CoreInterface - - Apps() internalversionapps.AppsInterface - - Authentication() internalversionauthentication.AuthenticationInterface - - Authorization() internalversionauthorization.AuthorizationInterface - - Autoscaling() internalversionautoscaling.AutoscalingInterface - - Batch() internalversionbatch.BatchInterface - - Certificates() internalversioncertificates.CertificatesInterface - - Extensions() internalversionextensions.ExtensionsInterface - - Policy() internalversionpolicy.PolicyInterface - - Rbac() internalversionrbac.RbacInterface - - Storage() internalversionstorage.StorageInterface -} - -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. -type Clientset struct { - *discovery.DiscoveryClient - *internalversioncore.CoreClient - *internalversionapps.AppsClient - *internalversionauthentication.AuthenticationClient - *internalversionauthorization.AuthorizationClient - *internalversionautoscaling.AutoscalingClient - *internalversionbatch.BatchClient - *internalversioncertificates.CertificatesClient - *internalversionextensions.ExtensionsClient - *internalversionpolicy.PolicyClient - *internalversionrbac.RbacClient - *internalversionstorage.StorageClient -} - -// Core retrieves the CoreClient -func (c *Clientset) Core() internalversioncore.CoreInterface { - if c == nil { - return nil - } - return c.CoreClient -} - -// Apps retrieves the AppsClient -func (c *Clientset) Apps() internalversionapps.AppsInterface { - if c == nil { - return nil - } - return c.AppsClient -} - -// Authentication retrieves the AuthenticationClient -func (c *Clientset) Authentication() internalversionauthentication.AuthenticationInterface { - if c == nil { - return nil - } - return c.AuthenticationClient -} - -// Authorization retrieves the AuthorizationClient -func (c *Clientset) Authorization() internalversionauthorization.AuthorizationInterface { - if c == nil { - return nil - } - return c.AuthorizationClient -} - -// Autoscaling retrieves the AutoscalingClient -func (c *Clientset) Autoscaling() internalversionautoscaling.AutoscalingInterface { - if c == nil { - return nil - } - return c.AutoscalingClient -} - -// Batch retrieves the BatchClient -func (c *Clientset) Batch() internalversionbatch.BatchInterface { - if c == nil { - return nil - } - return c.BatchClient -} - -// Certificates retrieves the CertificatesClient -func (c *Clientset) Certificates() internalversioncertificates.CertificatesInterface { - if c == nil { - return nil - } - return c.CertificatesClient -} - -// Extensions retrieves the ExtensionsClient -func (c *Clientset) Extensions() internalversionextensions.ExtensionsInterface { - if c == nil { - return nil - } - return c.ExtensionsClient -} - -// Policy retrieves the PolicyClient -func (c *Clientset) Policy() internalversionpolicy.PolicyInterface { - if c == nil { - return nil - } - return c.PolicyClient -} - -// Rbac retrieves the RbacClient -func (c *Clientset) Rbac() internalversionrbac.RbacInterface { - if c == nil { - return nil - } - return c.RbacClient -} - -// Storage retrieves the StorageClient -func (c *Clientset) Storage() internalversionstorage.StorageInterface { - if c == nil { - return nil - } - return c.StorageClient -} - -// Discovery retrieves the DiscoveryClient -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - return c.DiscoveryClient -} - -// NewForConfig creates a new Clientset for the given config. -func NewForConfig(c *restclient.Config) (*Clientset, error) { - configShallowCopy := *c - if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { - configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) - } - var clientset Clientset - var err error - clientset.CoreClient, err = internalversioncore.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - clientset.AppsClient, err = internalversionapps.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - clientset.AuthenticationClient, err = internalversionauthentication.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - clientset.AuthorizationClient, err = internalversionauthorization.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - clientset.AutoscalingClient, err = internalversionautoscaling.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - clientset.BatchClient, err = internalversionbatch.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - clientset.CertificatesClient, err = internalversioncertificates.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - clientset.ExtensionsClient, err = internalversionextensions.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - clientset.PolicyClient, err = internalversionpolicy.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - clientset.RbacClient, err = internalversionrbac.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - clientset.StorageClient, err = internalversionstorage.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - - clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) - if err != nil { - glog.Errorf("failed to create the DiscoveryClient: %v", err) - return nil, err - } - return &clientset, nil -} - -// NewForConfigOrDie creates a new Clientset for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *Clientset { - var clientset Clientset - clientset.CoreClient = internalversioncore.NewForConfigOrDie(c) - clientset.AppsClient = internalversionapps.NewForConfigOrDie(c) - clientset.AuthenticationClient = internalversionauthentication.NewForConfigOrDie(c) - clientset.AuthorizationClient = internalversionauthorization.NewForConfigOrDie(c) - clientset.AutoscalingClient = internalversionautoscaling.NewForConfigOrDie(c) - clientset.BatchClient = internalversionbatch.NewForConfigOrDie(c) - clientset.CertificatesClient = internalversioncertificates.NewForConfigOrDie(c) - clientset.ExtensionsClient = internalversionextensions.NewForConfigOrDie(c) - clientset.PolicyClient = internalversionpolicy.NewForConfigOrDie(c) - clientset.RbacClient = internalversionrbac.NewForConfigOrDie(c) - clientset.StorageClient = internalversionstorage.NewForConfigOrDie(c) - - clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &clientset -} - -// New creates a new Clientset for the given RESTClient. -func New(c restclient.Interface) *Clientset { - var clientset Clientset - clientset.CoreClient = internalversioncore.New(c) - clientset.AppsClient = internalversionapps.New(c) - clientset.AuthenticationClient = internalversionauthentication.New(c) - clientset.AuthorizationClient = internalversionauthorization.New(c) - clientset.AutoscalingClient = internalversionautoscaling.New(c) - clientset.BatchClient = internalversionbatch.New(c) - clientset.CertificatesClient = internalversioncertificates.New(c) - clientset.ExtensionsClient = internalversionextensions.New(c) - clientset.PolicyClient = internalversionpolicy.New(c) - clientset.RbacClient = internalversionrbac.New(c) - clientset.StorageClient = internalversionstorage.New(c) - - clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) - return &clientset -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go deleted file mode 100644 index cf081af2ca..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// This package has the automatically generated clientset. -package internalclientset diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake/BUILD deleted file mode 100644 index 846af8c57d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake/BUILD +++ /dev/null @@ -1,52 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "clientset_generated.go", - "doc.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/apps/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/policy/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/storage/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake:go_default_library", - "//pkg/client/testing/core:go_default_library", - "//pkg/client/typed/discovery:go_default_library", - "//pkg/client/typed/discovery/fake:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go deleted file mode 100644 index e6eac0081c..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go +++ /dev/null @@ -1,138 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apimachinery/registered" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - internalversionapps "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" - fakeinternalversionapps "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake" - internalversionauthentication "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion" - fakeinternalversionauthentication "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake" - internalversionauthorization "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion" - fakeinternalversionauthorization "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake" - internalversionautoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion" - fakeinternalversionautoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake" - internalversionbatch "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" - fakeinternalversionbatch "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake" - internalversioncertificates "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion" - fakeinternalversioncertificates "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake" - internalversioncore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - fakeinternalversioncore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake" - internalversionextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" - fakeinternalversionextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake" - internalversionpolicy "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion" - fakeinternalversionpolicy "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake" - internalversionrbac "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion" - fakeinternalversionrbac "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake" - internalversionstorage "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion" - fakeinternalversionstorage "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake" - "k8s.io/kubernetes/pkg/client/testing/core" - "k8s.io/kubernetes/pkg/client/typed/discovery" - fakediscovery "k8s.io/kubernetes/pkg/client/typed/discovery/fake" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/watch" -) - -// NewSimpleClientset returns a clientset that will respond with the provided objects. -// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement -// for a real clientset and is mostly useful in simple unit tests. -func NewSimpleClientset(objects ...runtime.Object) *Clientset { - o := core.NewObjectTracker(api.Scheme, api.Codecs.UniversalDecoder()) - for _, obj := range objects { - if err := o.Add(obj); err != nil { - panic(err) - } - } - - fakePtr := core.Fake{} - fakePtr.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper())) - - fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil)) - - return &Clientset{fakePtr} -} - -// Clientset implements clientset.Interface. Meant to be embedded into a -// struct to get a default implementation. This makes faking out just the method -// you want to test easier. -type Clientset struct { - core.Fake -} - -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - return &fakediscovery.FakeDiscovery{Fake: &c.Fake} -} - -var _ clientset.Interface = &Clientset{} - -// Core retrieves the CoreClient -func (c *Clientset) Core() internalversioncore.CoreInterface { - return &fakeinternalversioncore.FakeCore{Fake: &c.Fake} -} - -// Apps retrieves the AppsClient -func (c *Clientset) Apps() internalversionapps.AppsInterface { - return &fakeinternalversionapps.FakeApps{Fake: &c.Fake} -} - -// Authentication retrieves the AuthenticationClient -func (c *Clientset) Authentication() internalversionauthentication.AuthenticationInterface { - return &fakeinternalversionauthentication.FakeAuthentication{Fake: &c.Fake} -} - -// Authorization retrieves the AuthorizationClient -func (c *Clientset) Authorization() internalversionauthorization.AuthorizationInterface { - return &fakeinternalversionauthorization.FakeAuthorization{Fake: &c.Fake} -} - -// Autoscaling retrieves the AutoscalingClient -func (c *Clientset) Autoscaling() internalversionautoscaling.AutoscalingInterface { - return &fakeinternalversionautoscaling.FakeAutoscaling{Fake: &c.Fake} -} - -// Batch retrieves the BatchClient -func (c *Clientset) Batch() internalversionbatch.BatchInterface { - return &fakeinternalversionbatch.FakeBatch{Fake: &c.Fake} -} - -// Certificates retrieves the CertificatesClient -func (c *Clientset) Certificates() internalversioncertificates.CertificatesInterface { - return &fakeinternalversioncertificates.FakeCertificates{Fake: &c.Fake} -} - -// Extensions retrieves the ExtensionsClient -func (c *Clientset) Extensions() internalversionextensions.ExtensionsInterface { - return &fakeinternalversionextensions.FakeExtensions{Fake: &c.Fake} -} - -// Policy retrieves the PolicyClient -func (c *Clientset) Policy() internalversionpolicy.PolicyInterface { - return &fakeinternalversionpolicy.FakePolicy{Fake: &c.Fake} -} - -// Rbac retrieves the RbacClient -func (c *Clientset) Rbac() internalversionrbac.RbacInterface { - return &fakeinternalversionrbac.FakeRbac{Fake: &c.Fake} -} - -// Storage retrieves the StorageClient -func (c *Clientset) Storage() internalversionstorage.StorageInterface { - return &fakeinternalversionstorage.FakeStorage{Fake: &c.Fake} -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake/doc.go deleted file mode 100644 index 937b6ba852..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// This package has the automatically generated fake clientset. -package fake diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/import_known_versions.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/import_known_versions.go deleted file mode 100644 index bb9fee8303..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/import_known_versions.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalclientset - -// These imports are the API groups the client will support. -import ( - "fmt" - - _ "k8s.io/kubernetes/pkg/api/install" - "k8s.io/kubernetes/pkg/apimachinery/registered" - _ "k8s.io/kubernetes/pkg/apis/apps/install" - _ "k8s.io/kubernetes/pkg/apis/authentication/install" - _ "k8s.io/kubernetes/pkg/apis/authorization/install" - _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" - _ "k8s.io/kubernetes/pkg/apis/batch/install" - _ "k8s.io/kubernetes/pkg/apis/certificates/install" - _ "k8s.io/kubernetes/pkg/apis/componentconfig/install" - _ "k8s.io/kubernetes/pkg/apis/extensions/install" - _ "k8s.io/kubernetes/pkg/apis/policy/install" - _ "k8s.io/kubernetes/pkg/apis/rbac/install" - _ "k8s.io/kubernetes/pkg/apis/storage/install" -) - -func init() { - if missingVersions := registered.ValidateEnvRequestedVersions(); len(missingVersions) != 0 { - panic(fmt.Sprintf("KUBE_API_VERSIONS contains versions that are not installed: %q.", missingVersions)) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/BUILD deleted file mode 100644 index 057a7c8412..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "apps_client.go", - "doc.go", - "generated_expansion.go", - "statefulset.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/apps:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/apps_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/apps_client.go deleted file mode 100644 index 84ffd5f9fe..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/apps_client.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type AppsInterface interface { - RESTClient() restclient.Interface - StatefulSetsGetter -} - -// AppsClient is used to interact with features provided by the k8s.io/kubernetes/pkg/apimachinery/registered.Group group. -type AppsClient struct { - restClient restclient.Interface -} - -func (c *AppsClient) StatefulSets(namespace string) StatefulSetInterface { - return newStatefulSets(c, namespace) -} - -// NewForConfig creates a new AppsClient for the given config. -func NewForConfig(c *restclient.Config) (*AppsClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AppsClient{client}, nil -} - -// NewForConfigOrDie creates a new AppsClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *AppsClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AppsClient for the given RESTClient. -func New(c restclient.Interface) *AppsClient { - return &AppsClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if apps group is not registered, return an error - g, err := registered.Group("apps") - if err != nil { - return err - } - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != g.GroupVersion.Group { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - } - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AppsClient) RESTClient() restclient.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/doc.go deleted file mode 100644 index d908962a44..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/BUILD deleted file mode 100644 index dcf23b3687..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/BUILD +++ /dev/null @@ -1,31 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fake_apps_client.go", - "fake_statefulset.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apis/apps:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/apps/internalversion:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/testing/core:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/doc.go deleted file mode 100644 index 083f78f3a4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// Package fake has the automatically generated clients. -package fake diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/fake_apps_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/fake_apps_client.go deleted file mode 100644 index f38c729600..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/fake_apps_client.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - internalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" - restclient "k8s.io/kubernetes/pkg/client/restclient" - core "k8s.io/kubernetes/pkg/client/testing/core" -) - -type FakeApps struct { - *core.Fake -} - -func (c *FakeApps) StatefulSets(namespace string) internalversion.StatefulSetInterface { - return &FakeStatefulSets{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeApps) RESTClient() restclient.Interface { - var ret *restclient.RESTClient - return ret -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/fake_statefulset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/fake_statefulset.go deleted file mode 100644 index be5392632a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/fake_statefulset.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - apps "k8s.io/kubernetes/pkg/apis/apps" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeStatefulSets implements StatefulSetInterface -type FakeStatefulSets struct { - Fake *FakeApps - ns string -} - -var statefulsetsResource = unversioned.GroupVersionResource{Group: "apps", Version: "", Resource: "statefulsets"} - -func (c *FakeStatefulSets) Create(statefulSet *apps.StatefulSet) (result *apps.StatefulSet, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &apps.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*apps.StatefulSet), err -} - -func (c *FakeStatefulSets) Update(statefulSet *apps.StatefulSet) (result *apps.StatefulSet, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &apps.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*apps.StatefulSet), err -} - -func (c *FakeStatefulSets) UpdateStatus(statefulSet *apps.StatefulSet) (*apps.StatefulSet, error) { - obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &apps.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*apps.StatefulSet), err -} - -func (c *FakeStatefulSets) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(statefulsetsResource, c.ns, name), &apps.StatefulSet{}) - - return err -} - -func (c *FakeStatefulSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &apps.StatefulSetList{}) - return err -} - -func (c *FakeStatefulSets) Get(name string) (result *apps.StatefulSet, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(statefulsetsResource, c.ns, name), &apps.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*apps.StatefulSet), err -} - -func (c *FakeStatefulSets) List(opts api.ListOptions) (result *apps.StatefulSetList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(statefulsetsResource, c.ns, opts), &apps.StatefulSetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &apps.StatefulSetList{} - for _, item := range obj.(*apps.StatefulSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *FakeStatefulSets) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(statefulsetsResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *FakeStatefulSets) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *apps.StatefulSet, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, data, subresources...), &apps.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*apps.StatefulSet), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/generated_expansion.go deleted file mode 100644 index 14e220e7a3..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/generated_expansion.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/statefulset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/statefulset.go deleted file mode 100644 index 18089e70a6..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/statefulset.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - apps "k8s.io/kubernetes/pkg/apis/apps" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// StatefulSetsGetter has a method to return a StatefulSetInterface. -// A group's client should implement this interface. -type StatefulSetsGetter interface { - StatefulSets(namespace string) StatefulSetInterface -} - -// StatefulSetInterface has methods to work with StatefulSet resources. -type StatefulSetInterface interface { - Create(*apps.StatefulSet) (*apps.StatefulSet, error) - Update(*apps.StatefulSet) (*apps.StatefulSet, error) - UpdateStatus(*apps.StatefulSet) (*apps.StatefulSet, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*apps.StatefulSet, error) - List(opts api.ListOptions) (*apps.StatefulSetList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *apps.StatefulSet, err error) - StatefulSetExpansion -} - -// statefulSets implements StatefulSetInterface -type statefulSets struct { - client restclient.Interface - ns string -} - -// newStatefulSets returns a StatefulSets -func newStatefulSets(c *AppsClient, namespace string) *statefulSets { - return &statefulSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(statefulSet *apps.StatefulSet) (result *apps.StatefulSet, err error) { - result = &apps.StatefulSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("statefulsets"). - Body(statefulSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(statefulSet *apps.StatefulSet) (result *apps.StatefulSet, err error) { - result = &apps.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - Body(statefulSet). - Do(). - Into(result) - return -} - -func (c *statefulSets) UpdateStatus(statefulSet *apps.StatefulSet) (result *apps.StatefulSet, err error) { - result = &apps.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - SubResource("status"). - Body(statefulSet). - Do(). - Into(result) - return -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(name string) (result *apps.StatefulSet, err error) { - result = &apps.StatefulSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(opts api.ListOptions) (result *apps.StatefulSetList, err error) { - result = &apps.StatefulSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *apps.StatefulSet, err error) { - result = &apps.StatefulSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("statefulsets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/BUILD deleted file mode 100644 index ee345d2536..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "authentication_client.go", - "doc.go", - "generated_expansion.go", - "tokenreview.go", - "tokenreview_expansion.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/authentication:go_default_library", - "//pkg/client/restclient:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/authentication_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/authentication_client.go deleted file mode 100644 index a4f9bdef4c..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/authentication_client.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type AuthenticationInterface interface { - RESTClient() restclient.Interface - TokenReviewsGetter -} - -// AuthenticationClient is used to interact with features provided by the k8s.io/kubernetes/pkg/apimachinery/registered.Group group. -type AuthenticationClient struct { - restClient restclient.Interface -} - -func (c *AuthenticationClient) TokenReviews() TokenReviewInterface { - return newTokenReviews(c) -} - -// NewForConfig creates a new AuthenticationClient for the given config. -func NewForConfig(c *restclient.Config) (*AuthenticationClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AuthenticationClient{client}, nil -} - -// NewForConfigOrDie creates a new AuthenticationClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *AuthenticationClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AuthenticationClient for the given RESTClient. -func New(c restclient.Interface) *AuthenticationClient { - return &AuthenticationClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if authentication group is not registered, return an error - g, err := registered.Group("authentication.k8s.io") - if err != nil { - return err - } - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != g.GroupVersion.Group { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - } - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AuthenticationClient) RESTClient() restclient.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/doc.go deleted file mode 100644 index d908962a44..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/BUILD deleted file mode 100644 index 1d9db2959a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fake_authentication_client.go", - "fake_generated_expansion.go", - "fake_tokenreview.go", - "fake_tokenreview_expansion.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/apis/authentication:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/testing/core:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/doc.go deleted file mode 100644 index 083f78f3a4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// Package fake has the automatically generated clients. -package fake diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/fake_authentication_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/fake_authentication_client.go deleted file mode 100644 index ca8db0215b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/fake_authentication_client.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - internalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion" - restclient "k8s.io/kubernetes/pkg/client/restclient" - core "k8s.io/kubernetes/pkg/client/testing/core" -) - -type FakeAuthentication struct { - *core.Fake -} - -func (c *FakeAuthentication) TokenReviews() internalversion.TokenReviewInterface { - return &FakeTokenReviews{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeAuthentication) RESTClient() restclient.Interface { - var ret *restclient.RESTClient - return ret -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/fake_tokenreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/fake_tokenreview.go deleted file mode 100644 index 6508f5a94a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/fake_tokenreview.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -// FakeTokenReviews implements TokenReviewInterface -type FakeTokenReviews struct { - Fake *FakeAuthentication -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/fake_tokenreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/fake_tokenreview_expansion.go deleted file mode 100644 index b00de0e6b5..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/fake_tokenreview_expansion.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - authenticationapi "k8s.io/kubernetes/pkg/apis/authentication" - "k8s.io/kubernetes/pkg/client/testing/core" -) - -func (c *FakeTokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { - obj, err := c.Fake.Invokes(core.NewRootCreateAction(authenticationapi.SchemeGroupVersion.WithResource("tokenreviews"), tokenReview), &authenticationapi.TokenReview{}) - return obj.(*authenticationapi.TokenReview), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/generated_expansion.go deleted file mode 100644 index 85214a8140..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/generated_expansion.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/tokenreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/tokenreview.go deleted file mode 100644 index 9987d63e2e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/tokenreview.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -// TokenReviewsGetter has a method to return a TokenReviewInterface. -// A group's client should implement this interface. -type TokenReviewsGetter interface { - TokenReviews() TokenReviewInterface -} - -// TokenReviewInterface has methods to work with TokenReview resources. -type TokenReviewInterface interface { - TokenReviewExpansion -} - -// tokenReviews implements TokenReviewInterface -type tokenReviews struct { - client restclient.Interface -} - -// newTokenReviews returns a TokenReviews -func newTokenReviews(c *AuthenticationClient) *tokenReviews { - return &tokenReviews{ - client: c.RESTClient(), - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/tokenreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/tokenreview_expansion.go deleted file mode 100644 index 57bdc3ccda..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/tokenreview_expansion.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - authenticationapi "k8s.io/kubernetes/pkg/apis/authentication" -) - -type TokenReviewExpansion interface { - Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) -} - -func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { - result = &authenticationapi.TokenReview{} - err = c.client.Post(). - Resource("tokenreviews"). - Body(tokenReview). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/BUILD deleted file mode 100644 index f8d2e8701e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "authorization_client.go", - "doc.go", - "generated_expansion.go", - "localsubjectaccessreview.go", - "localsubjectaccessreview_expansion.go", - "selfsubjectaccessreview.go", - "selfsubjectaccessreview_expansion.go", - "subjectaccessreview.go", - "subjectaccessreview_expansion.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/authorization:go_default_library", - "//pkg/client/restclient:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/authorization_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/authorization_client.go deleted file mode 100644 index b9b85f6370..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/authorization_client.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type AuthorizationInterface interface { - RESTClient() restclient.Interface - LocalSubjectAccessReviewsGetter - SelfSubjectAccessReviewsGetter - SubjectAccessReviewsGetter -} - -// AuthorizationClient is used to interact with features provided by the k8s.io/kubernetes/pkg/apimachinery/registered.Group group. -type AuthorizationClient struct { - restClient restclient.Interface -} - -func (c *AuthorizationClient) LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface { - return newLocalSubjectAccessReviews(c, namespace) -} - -func (c *AuthorizationClient) SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface { - return newSelfSubjectAccessReviews(c) -} - -func (c *AuthorizationClient) SubjectAccessReviews() SubjectAccessReviewInterface { - return newSubjectAccessReviews(c) -} - -// NewForConfig creates a new AuthorizationClient for the given config. -func NewForConfig(c *restclient.Config) (*AuthorizationClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AuthorizationClient{client}, nil -} - -// NewForConfigOrDie creates a new AuthorizationClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *AuthorizationClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AuthorizationClient for the given RESTClient. -func New(c restclient.Interface) *AuthorizationClient { - return &AuthorizationClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if authorization group is not registered, return an error - g, err := registered.Group("authorization.k8s.io") - if err != nil { - return err - } - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != g.GroupVersion.Group { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - } - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AuthorizationClient) RESTClient() restclient.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/doc.go deleted file mode 100644 index d908962a44..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/BUILD deleted file mode 100644 index 448df0e4e7..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fake_authorization_client.go", - "fake_generated_expansion.go", - "fake_localsubjectaccessreview.go", - "fake_localsubjectaccessreview_expansion.go", - "fake_selfsubjectaccessreview.go", - "fake_selfsubjectaccessreview_expansion.go", - "fake_subjectaccessreview.go", - "fake_subjectaccessreview_expansion.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/apis/authorization:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/testing/core:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/doc.go deleted file mode 100644 index 083f78f3a4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// Package fake has the automatically generated clients. -package fake diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_authorization_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_authorization_client.go deleted file mode 100644 index ce668628f6..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_authorization_client.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - internalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion" - restclient "k8s.io/kubernetes/pkg/client/restclient" - core "k8s.io/kubernetes/pkg/client/testing/core" -) - -type FakeAuthorization struct { - *core.Fake -} - -func (c *FakeAuthorization) LocalSubjectAccessReviews(namespace string) internalversion.LocalSubjectAccessReviewInterface { - return &FakeLocalSubjectAccessReviews{c, namespace} -} - -func (c *FakeAuthorization) SelfSubjectAccessReviews() internalversion.SelfSubjectAccessReviewInterface { - return &FakeSelfSubjectAccessReviews{c} -} - -func (c *FakeAuthorization) SubjectAccessReviews() internalversion.SubjectAccessReviewInterface { - return &FakeSubjectAccessReviews{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeAuthorization) RESTClient() restclient.Interface { - var ret *restclient.RESTClient - return ret -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_generated_expansion.go deleted file mode 100644 index 8754e39d87..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_generated_expansion.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_localsubjectaccessreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_localsubjectaccessreview.go deleted file mode 100644 index 50a1e3a899..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_localsubjectaccessreview.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -// FakeLocalSubjectAccessReviews implements LocalSubjectAccessReviewInterface -type FakeLocalSubjectAccessReviews struct { - Fake *FakeAuthorization - ns string -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_localsubjectaccessreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_localsubjectaccessreview_expansion.go deleted file mode 100644 index 6e6164aafa..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_localsubjectaccessreview_expansion.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - authorizationapi "k8s.io/kubernetes/pkg/apis/authorization" - - "k8s.io/kubernetes/pkg/client/testing/core" -) - -func (c *FakeLocalSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { - obj, err := c.Fake.Invokes(core.NewCreateAction(authorizationapi.SchemeGroupVersion.WithResource("localsubjectaccessreviews"), c.ns, sar), &authorizationapi.SubjectAccessReview{}) - return obj.(*authorizationapi.LocalSubjectAccessReview), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_selfsubjectaccessreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_selfsubjectaccessreview.go deleted file mode 100644 index b107c1b2c4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_selfsubjectaccessreview.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -// FakeSelfSubjectAccessReviews implements SelfSubjectAccessReviewInterface -type FakeSelfSubjectAccessReviews struct { - Fake *FakeAuthorization -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_selfsubjectaccessreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_selfsubjectaccessreview_expansion.go deleted file mode 100644 index 15f9c8d234..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_selfsubjectaccessreview_expansion.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - authorizationapi "k8s.io/kubernetes/pkg/apis/authorization" - "k8s.io/kubernetes/pkg/client/testing/core" -) - -func (c *FakeSelfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { - obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("selfsubjectaccessreviews"), sar), &authorizationapi.SelfSubjectAccessReview{}) - return obj.(*authorizationapi.SelfSubjectAccessReview), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_subjectaccessreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_subjectaccessreview.go deleted file mode 100644 index 0a01cd81a4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_subjectaccessreview.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -// FakeSubjectAccessReviews implements SubjectAccessReviewInterface -type FakeSubjectAccessReviews struct { - Fake *FakeAuthorization -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_subjectaccessreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_subjectaccessreview_expansion.go deleted file mode 100644 index 54eed3652d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_subjectaccessreview_expansion.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - authorizationapi "k8s.io/kubernetes/pkg/apis/authorization" - - "k8s.io/kubernetes/pkg/client/testing/core" -) - -func (c *FakeSubjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { - obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("subjectaccessreviews"), sar), &authorizationapi.SubjectAccessReview{}) - return obj.(*authorizationapi.SubjectAccessReview), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/generated_expansion.go deleted file mode 100644 index 85214a8140..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/generated_expansion.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/localsubjectaccessreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/localsubjectaccessreview.go deleted file mode 100644 index 5277a227d7..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/localsubjectaccessreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -// LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. -// A group's client should implement this interface. -type LocalSubjectAccessReviewsGetter interface { - LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface -} - -// LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. -type LocalSubjectAccessReviewInterface interface { - LocalSubjectAccessReviewExpansion -} - -// localSubjectAccessReviews implements LocalSubjectAccessReviewInterface -type localSubjectAccessReviews struct { - client restclient.Interface - ns string -} - -// newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews -func newLocalSubjectAccessReviews(c *AuthorizationClient, namespace string) *localSubjectAccessReviews { - return &localSubjectAccessReviews{ - client: c.RESTClient(), - ns: namespace, - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/localsubjectaccessreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/localsubjectaccessreview_expansion.go deleted file mode 100644 index b49970bfea..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/localsubjectaccessreview_expansion.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - authorizationapi "k8s.io/kubernetes/pkg/apis/authorization" -) - -type LocalSubjectAccessReviewExpansion interface { - Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) -} - -func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { - result = &authorizationapi.LocalSubjectAccessReview{} - err = c.client.Post(). - Namespace(c.ns). - Resource("localsubjectaccessreviews"). - Body(sar). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectaccessreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectaccessreview.go deleted file mode 100644 index 7e22f832b3..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectaccessreview.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -// SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. -// A group's client should implement this interface. -type SelfSubjectAccessReviewsGetter interface { - SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface -} - -// SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. -type SelfSubjectAccessReviewInterface interface { - SelfSubjectAccessReviewExpansion -} - -// selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface -type selfSubjectAccessReviews struct { - client restclient.Interface -} - -// newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews -func newSelfSubjectAccessReviews(c *AuthorizationClient) *selfSubjectAccessReviews { - return &selfSubjectAccessReviews{ - client: c.RESTClient(), - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectaccessreview_expansion.go deleted file mode 100644 index fcfe9e97b5..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectaccessreview_expansion.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - authorizationapi "k8s.io/kubernetes/pkg/apis/authorization" -) - -type SelfSubjectAccessReviewExpansion interface { - Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) -} - -func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { - result = &authorizationapi.SelfSubjectAccessReview{} - err = c.client.Post(). - Resource("selfsubjectaccessreviews"). - Body(sar). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/subjectaccessreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/subjectaccessreview.go deleted file mode 100644 index 344bcebf09..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/subjectaccessreview.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -// SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. -// A group's client should implement this interface. -type SubjectAccessReviewsGetter interface { - SubjectAccessReviews() SubjectAccessReviewInterface -} - -// SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. -type SubjectAccessReviewInterface interface { - SubjectAccessReviewExpansion -} - -// subjectAccessReviews implements SubjectAccessReviewInterface -type subjectAccessReviews struct { - client restclient.Interface -} - -// newSubjectAccessReviews returns a SubjectAccessReviews -func newSubjectAccessReviews(c *AuthorizationClient) *subjectAccessReviews { - return &subjectAccessReviews{ - client: c.RESTClient(), - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/subjectaccessreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/subjectaccessreview_expansion.go deleted file mode 100644 index 44d672d604..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/subjectaccessreview_expansion.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - authorizationapi "k8s.io/kubernetes/pkg/apis/authorization" -) - -type SubjectAccessReviewExpansion interface { - Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) -} - -func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { - result = &authorizationapi.SubjectAccessReview{} - err = c.client.Post(). - Resource("subjectaccessreviews"). - Body(sar). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/BUILD deleted file mode 100644 index 180c3c20a5..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "autoscaling_client.go", - "doc.go", - "generated_expansion.go", - "horizontalpodautoscaler.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/autoscaling:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/autoscaling_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/autoscaling_client.go deleted file mode 100644 index 0291b6d6c8..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/autoscaling_client.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type AutoscalingInterface interface { - RESTClient() restclient.Interface - HorizontalPodAutoscalersGetter -} - -// AutoscalingClient is used to interact with features provided by the k8s.io/kubernetes/pkg/apimachinery/registered.Group group. -type AutoscalingClient struct { - restClient restclient.Interface -} - -func (c *AutoscalingClient) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { - return newHorizontalPodAutoscalers(c, namespace) -} - -// NewForConfig creates a new AutoscalingClient for the given config. -func NewForConfig(c *restclient.Config) (*AutoscalingClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AutoscalingClient{client}, nil -} - -// NewForConfigOrDie creates a new AutoscalingClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *AutoscalingClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AutoscalingClient for the given RESTClient. -func New(c restclient.Interface) *AutoscalingClient { - return &AutoscalingClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if autoscaling group is not registered, return an error - g, err := registered.Group("autoscaling") - if err != nil { - return err - } - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != g.GroupVersion.Group { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - } - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AutoscalingClient) RESTClient() restclient.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/doc.go deleted file mode 100644 index d908962a44..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/BUILD deleted file mode 100644 index 88190760f5..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/BUILD +++ /dev/null @@ -1,31 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fake_autoscaling_client.go", - "fake_horizontalpodautoscaler.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apis/autoscaling:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/testing/core:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/doc.go deleted file mode 100644 index 083f78f3a4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// Package fake has the automatically generated clients. -package fake diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/fake_autoscaling_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/fake_autoscaling_client.go deleted file mode 100644 index b36326f4a2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/fake_autoscaling_client.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - internalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion" - restclient "k8s.io/kubernetes/pkg/client/restclient" - core "k8s.io/kubernetes/pkg/client/testing/core" -) - -type FakeAutoscaling struct { - *core.Fake -} - -func (c *FakeAutoscaling) HorizontalPodAutoscalers(namespace string) internalversion.HorizontalPodAutoscalerInterface { - return &FakeHorizontalPodAutoscalers{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeAutoscaling) RESTClient() restclient.Interface { - var ret *restclient.RESTClient - return ret -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/fake_horizontalpodautoscaler.go deleted file mode 100644 index 408d39b71a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/fake_horizontalpodautoscaler.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface -type FakeHorizontalPodAutoscalers struct { - Fake *FakeAutoscaling - ns string -} - -var horizontalpodautoscalersResource = unversioned.GroupVersionResource{Group: "autoscaling", Version: "", Resource: "horizontalpodautoscalers"} - -func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &autoscaling.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*autoscaling.HorizontalPodAutoscaler), err -} - -func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &autoscaling.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*autoscaling.HorizontalPodAutoscaler), err -} - -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) { - obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &autoscaling.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*autoscaling.HorizontalPodAutoscaler), err -} - -func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &autoscaling.HorizontalPodAutoscaler{}) - - return err -} - -func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &autoscaling.HorizontalPodAutoscalerList{}) - return err -} - -func (c *FakeHorizontalPodAutoscalers) Get(name string) (result *autoscaling.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &autoscaling.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*autoscaling.HorizontalPodAutoscaler), err -} - -func (c *FakeHorizontalPodAutoscalers) List(opts api.ListOptions) (result *autoscaling.HorizontalPodAutoscalerList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(horizontalpodautoscalersResource, c.ns, opts), &autoscaling.HorizontalPodAutoscalerList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &autoscaling.HorizontalPodAutoscalerList{} - for _, item := range obj.(*autoscaling.HorizontalPodAutoscalerList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *FakeHorizontalPodAutoscalers) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *autoscaling.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, data, subresources...), &autoscaling.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*autoscaling.HorizontalPodAutoscaler), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/generated_expansion.go deleted file mode 100644 index 386fb42223..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/generated_expansion.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/horizontalpodautoscaler.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/horizontalpodautoscaler.go deleted file mode 100644 index fea42bb821..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/horizontalpodautoscaler.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. -// A group's client should implement this interface. -type HorizontalPodAutoscalersGetter interface { - HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface -} - -// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. -type HorizontalPodAutoscalerInterface interface { - Create(*autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) - Update(*autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) - UpdateStatus(*autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*autoscaling.HorizontalPodAutoscaler, error) - List(opts api.ListOptions) (*autoscaling.HorizontalPodAutoscalerList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *autoscaling.HorizontalPodAutoscaler, err error) - HorizontalPodAutoscalerExpansion -} - -// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface -type horizontalPodAutoscalers struct { - client restclient.Interface - ns string -} - -// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers -func newHorizontalPodAutoscalers(c *AutoscalingClient, namespace string) *horizontalPodAutoscalers { - return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { - result = &autoscaling.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { - result = &autoscaling.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { - result = &autoscaling.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(name string) (result *autoscaling.HorizontalPodAutoscaler, err error) { - result = &autoscaling.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(opts api.ListOptions) (result *autoscaling.HorizontalPodAutoscalerList, err error) { - result = &autoscaling.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *autoscaling.HorizontalPodAutoscaler, err error) { - result = &autoscaling.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/BUILD deleted file mode 100644 index 6d839f0fff..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/BUILD +++ /dev/null @@ -1,30 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "batch_client.go", - "cronjob.go", - "doc.go", - "generated_expansion.go", - "job.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/batch:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/batch_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/batch_client.go deleted file mode 100644 index c1b62f1f29..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/batch_client.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type BatchInterface interface { - RESTClient() restclient.Interface - CronJobsGetter - JobsGetter -} - -// BatchClient is used to interact with features provided by the k8s.io/kubernetes/pkg/apimachinery/registered.Group group. -type BatchClient struct { - restClient restclient.Interface -} - -func (c *BatchClient) CronJobs(namespace string) CronJobInterface { - return newCronJobs(c, namespace) -} - -func (c *BatchClient) Jobs(namespace string) JobInterface { - return newJobs(c, namespace) -} - -// NewForConfig creates a new BatchClient for the given config. -func NewForConfig(c *restclient.Config) (*BatchClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &BatchClient{client}, nil -} - -// NewForConfigOrDie creates a new BatchClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *BatchClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new BatchClient for the given RESTClient. -func New(c restclient.Interface) *BatchClient { - return &BatchClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if batch group is not registered, return an error - g, err := registered.Group("batch") - if err != nil { - return err - } - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != g.GroupVersion.Group { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - } - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *BatchClient) RESTClient() restclient.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/cronjob.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/cronjob.go deleted file mode 100644 index 2c17896aaa..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/cronjob.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - batch "k8s.io/kubernetes/pkg/apis/batch" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// CronJobsGetter has a method to return a CronJobInterface. -// A group's client should implement this interface. -type CronJobsGetter interface { - CronJobs(namespace string) CronJobInterface -} - -// CronJobInterface has methods to work with CronJob resources. -type CronJobInterface interface { - Create(*batch.CronJob) (*batch.CronJob, error) - Update(*batch.CronJob) (*batch.CronJob, error) - UpdateStatus(*batch.CronJob) (*batch.CronJob, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*batch.CronJob, error) - List(opts api.ListOptions) (*batch.CronJobList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *batch.CronJob, err error) - CronJobExpansion -} - -// cronJobs implements CronJobInterface -type cronJobs struct { - client restclient.Interface - ns string -} - -// newCronJobs returns a CronJobs -func newCronJobs(c *BatchClient, namespace string) *cronJobs { - return &cronJobs{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Create(cronJob *batch.CronJob) (result *batch.CronJob, err error) { - result = &batch.CronJob{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cronjobs"). - Body(cronJob). - Do(). - Into(result) - return -} - -// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Update(cronJob *batch.CronJob) (result *batch.CronJob, err error) { - result = &batch.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - Body(cronJob). - Do(). - Into(result) - return -} - -func (c *cronJobs) UpdateStatus(cronJob *batch.CronJob) (result *batch.CronJob, err error) { - result = &batch.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - SubResource("status"). - Body(cronJob). - Do(). - Into(result) - return -} - -// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *cronJobs) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cronJobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *cronJobs) Get(name string) (result *batch.CronJob, err error) { - result = &batch.CronJob{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *cronJobs) List(opts api.ListOptions) (result *batch.CronJobList, err error) { - result = &batch.CronJobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cronJobs. -func (c *cronJobs) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched cronJob. -func (c *cronJobs) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *batch.CronJob, err error) { - result = &batch.CronJob{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cronjobs"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/doc.go deleted file mode 100644 index d908962a44..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/BUILD deleted file mode 100644 index 28b1ec20a7..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/BUILD +++ /dev/null @@ -1,32 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fake_batch_client.go", - "fake_cronjob.go", - "fake_job.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apis/batch:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/testing/core:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/doc.go deleted file mode 100644 index 083f78f3a4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// Package fake has the automatically generated clients. -package fake diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_batch_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_batch_client.go deleted file mode 100644 index bc7695b735..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_batch_client.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - internalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" - restclient "k8s.io/kubernetes/pkg/client/restclient" - core "k8s.io/kubernetes/pkg/client/testing/core" -) - -type FakeBatch struct { - *core.Fake -} - -func (c *FakeBatch) CronJobs(namespace string) internalversion.CronJobInterface { - return &FakeCronJobs{c, namespace} -} - -func (c *FakeBatch) Jobs(namespace string) internalversion.JobInterface { - return &FakeJobs{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeBatch) RESTClient() restclient.Interface { - var ret *restclient.RESTClient - return ret -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_cronjob.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_cronjob.go deleted file mode 100644 index aa2d7e5b9e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_cronjob.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - batch "k8s.io/kubernetes/pkg/apis/batch" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeCronJobs implements CronJobInterface -type FakeCronJobs struct { - Fake *FakeBatch - ns string -} - -var cronjobsResource = unversioned.GroupVersionResource{Group: "batch", Version: "", Resource: "cronjobs"} - -func (c *FakeCronJobs) Create(cronJob *batch.CronJob) (result *batch.CronJob, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(cronjobsResource, c.ns, cronJob), &batch.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*batch.CronJob), err -} - -func (c *FakeCronJobs) Update(cronJob *batch.CronJob) (result *batch.CronJob, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(cronjobsResource, c.ns, cronJob), &batch.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*batch.CronJob), err -} - -func (c *FakeCronJobs) UpdateStatus(cronJob *batch.CronJob) (*batch.CronJob, error) { - obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &batch.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*batch.CronJob), err -} - -func (c *FakeCronJobs) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(cronjobsResource, c.ns, name), &batch.CronJob{}) - - return err -} - -func (c *FakeCronJobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(cronjobsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &batch.CronJobList{}) - return err -} - -func (c *FakeCronJobs) Get(name string) (result *batch.CronJob, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(cronjobsResource, c.ns, name), &batch.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*batch.CronJob), err -} - -func (c *FakeCronJobs) List(opts api.ListOptions) (result *batch.CronJobList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(cronjobsResource, c.ns, opts), &batch.CronJobList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &batch.CronJobList{} - for _, item := range obj.(*batch.CronJobList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cronJobs. -func (c *FakeCronJobs) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(cronjobsResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched cronJob. -func (c *FakeCronJobs) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *batch.CronJob, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(cronjobsResource, c.ns, name, data, subresources...), &batch.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*batch.CronJob), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_job.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_job.go deleted file mode 100644 index 1914c73b1e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_job.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - batch "k8s.io/kubernetes/pkg/apis/batch" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeJobs implements JobInterface -type FakeJobs struct { - Fake *FakeBatch - ns string -} - -var jobsResource = unversioned.GroupVersionResource{Group: "batch", Version: "", Resource: "jobs"} - -func (c *FakeJobs) Create(job *batch.Job) (result *batch.Job, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(jobsResource, c.ns, job), &batch.Job{}) - - if obj == nil { - return nil, err - } - return obj.(*batch.Job), err -} - -func (c *FakeJobs) Update(job *batch.Job) (result *batch.Job, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(jobsResource, c.ns, job), &batch.Job{}) - - if obj == nil { - return nil, err - } - return obj.(*batch.Job), err -} - -func (c *FakeJobs) UpdateStatus(job *batch.Job) (*batch.Job, error) { - obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction(jobsResource, "status", c.ns, job), &batch.Job{}) - - if obj == nil { - return nil, err - } - return obj.(*batch.Job), err -} - -func (c *FakeJobs) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(jobsResource, c.ns, name), &batch.Job{}) - - return err -} - -func (c *FakeJobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(jobsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &batch.JobList{}) - return err -} - -func (c *FakeJobs) Get(name string) (result *batch.Job, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(jobsResource, c.ns, name), &batch.Job{}) - - if obj == nil { - return nil, err - } - return obj.(*batch.Job), err -} - -func (c *FakeJobs) List(opts api.ListOptions) (result *batch.JobList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(jobsResource, c.ns, opts), &batch.JobList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &batch.JobList{} - for _, item := range obj.(*batch.JobList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested jobs. -func (c *FakeJobs) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(jobsResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched job. -func (c *FakeJobs) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *batch.Job, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(jobsResource, c.ns, name, data, subresources...), &batch.Job{}) - - if obj == nil { - return nil, err - } - return obj.(*batch.Job), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/generated_expansion.go deleted file mode 100644 index 866c057afc..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -type CronJobExpansion interface{} - -type JobExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/job.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/job.go deleted file mode 100644 index f5fb8ffa22..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/job.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - batch "k8s.io/kubernetes/pkg/apis/batch" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// JobsGetter has a method to return a JobInterface. -// A group's client should implement this interface. -type JobsGetter interface { - Jobs(namespace string) JobInterface -} - -// JobInterface has methods to work with Job resources. -type JobInterface interface { - Create(*batch.Job) (*batch.Job, error) - Update(*batch.Job) (*batch.Job, error) - UpdateStatus(*batch.Job) (*batch.Job, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*batch.Job, error) - List(opts api.ListOptions) (*batch.JobList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *batch.Job, err error) - JobExpansion -} - -// jobs implements JobInterface -type jobs struct { - client restclient.Interface - ns string -} - -// newJobs returns a Jobs -func newJobs(c *BatchClient, namespace string) *jobs { - return &jobs{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Create(job *batch.Job) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.client.Post(). - Namespace(c.ns). - Resource("jobs"). - Body(job). - Do(). - Into(result) - return -} - -// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Update(job *batch.Job) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - Body(job). - Do(). - Into(result) - return -} - -func (c *jobs) UpdateStatus(job *batch.Job) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - SubResource("status"). - Body(job). - Do(). - Into(result) - return -} - -// Delete takes name of the job and deletes it. Returns an error if one occurs. -func (c *jobs) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *jobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the job, and returns the corresponding job object, and an error if there is any. -func (c *jobs) Get(name string) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Jobs that match those selectors. -func (c *jobs) List(opts api.ListOptions) (result *batch.JobList, err error) { - result = &batch.JobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested jobs. -func (c *jobs) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched job. -func (c *jobs) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("jobs"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/BUILD deleted file mode 100644 index f3b1e6b0cb..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/BUILD +++ /dev/null @@ -1,30 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "certificates_client.go", - "certificatesigningrequest.go", - "certificatesigningrequest_expansion.go", - "doc.go", - "generated_expansion.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/certificates:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificates_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificates_client.go deleted file mode 100644 index 0c10fdb59c..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificates_client.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type CertificatesInterface interface { - RESTClient() restclient.Interface - CertificateSigningRequestsGetter -} - -// CertificatesClient is used to interact with features provided by the k8s.io/kubernetes/pkg/apimachinery/registered.Group group. -type CertificatesClient struct { - restClient restclient.Interface -} - -func (c *CertificatesClient) CertificateSigningRequests() CertificateSigningRequestInterface { - return newCertificateSigningRequests(c) -} - -// NewForConfig creates a new CertificatesClient for the given config. -func NewForConfig(c *restclient.Config) (*CertificatesClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &CertificatesClient{client}, nil -} - -// NewForConfigOrDie creates a new CertificatesClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *CertificatesClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new CertificatesClient for the given RESTClient. -func New(c restclient.Interface) *CertificatesClient { - return &CertificatesClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if certificates group is not registered, return an error - g, err := registered.Group("certificates.k8s.io") - if err != nil { - return err - } - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != g.GroupVersion.Group { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - } - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *CertificatesClient) RESTClient() restclient.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest.go deleted file mode 100644 index b647475fb2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest.go +++ /dev/null @@ -1,155 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - certificates "k8s.io/kubernetes/pkg/apis/certificates" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// CertificateSigningRequestsGetter has a method to return a CertificateSigningRequestInterface. -// A group's client should implement this interface. -type CertificateSigningRequestsGetter interface { - CertificateSigningRequests() CertificateSigningRequestInterface -} - -// CertificateSigningRequestInterface has methods to work with CertificateSigningRequest resources. -type CertificateSigningRequestInterface interface { - Create(*certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error) - Update(*certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error) - UpdateStatus(*certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*certificates.CertificateSigningRequest, error) - List(opts api.ListOptions) (*certificates.CertificateSigningRequestList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *certificates.CertificateSigningRequest, err error) - CertificateSigningRequestExpansion -} - -// certificateSigningRequests implements CertificateSigningRequestInterface -type certificateSigningRequests struct { - client restclient.Interface -} - -// newCertificateSigningRequests returns a CertificateSigningRequests -func newCertificateSigningRequests(c *CertificatesClient) *certificateSigningRequests { - return &certificateSigningRequests{ - client: c.RESTClient(), - } -} - -// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Create(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) { - result = &certificates.CertificateSigningRequest{} - err = c.client.Post(). - Resource("certificatesigningrequests"). - Body(certificateSigningRequest). - Do(). - Into(result) - return -} - -// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Update(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) { - result = &certificates.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - Body(certificateSigningRequest). - Do(). - Into(result) - return -} - -func (c *certificateSigningRequests) UpdateStatus(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) { - result = &certificates.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - SubResource("status"). - Body(certificateSigningRequest). - Do(). - Into(result) - return -} - -// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. -func (c *certificateSigningRequests) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("certificatesigningrequests"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *certificateSigningRequests) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("certificatesigningrequests"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. -func (c *certificateSigningRequests) Get(name string) (result *certificates.CertificateSigningRequest, err error) { - result = &certificates.CertificateSigningRequest{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. -func (c *certificateSigningRequests) List(opts api.ListOptions) (result *certificates.CertificateSigningRequestList, err error) { - result = &certificates.CertificateSigningRequestList{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *certificateSigningRequests) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("certificatesigningrequests"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched certificateSigningRequest. -func (c *certificateSigningRequests) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *certificates.CertificateSigningRequest, err error) { - result = &certificates.CertificateSigningRequest{} - err = c.client.Patch(pt). - Resource("certificatesigningrequests"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest_expansion.go deleted file mode 100644 index ffdde20560..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest_expansion.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import "k8s.io/kubernetes/pkg/apis/certificates" - -type CertificateSigningRequestExpansion interface { - UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) -} - -func (c *certificateSigningRequests) UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) { - result = &certificates.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - Body(certificateSigningRequest). - SubResource("approval"). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/doc.go deleted file mode 100644 index d908962a44..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/BUILD deleted file mode 100644 index e6bb476da1..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/BUILD +++ /dev/null @@ -1,32 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fake_certificates_client.go", - "fake_certificatesigningrequest.go", - "fake_certificatesigningrequest_expansion.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apis/certificates:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/testing/core:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/doc.go deleted file mode 100644 index 083f78f3a4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// Package fake has the automatically generated clients. -package fake diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/fake_certificates_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/fake_certificates_client.go deleted file mode 100644 index d52bcdd2ea..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/fake_certificates_client.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - internalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion" - restclient "k8s.io/kubernetes/pkg/client/restclient" - core "k8s.io/kubernetes/pkg/client/testing/core" -) - -type FakeCertificates struct { - *core.Fake -} - -func (c *FakeCertificates) CertificateSigningRequests() internalversion.CertificateSigningRequestInterface { - return &FakeCertificateSigningRequests{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeCertificates) RESTClient() restclient.Interface { - var ret *restclient.RESTClient - return ret -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/fake_certificatesigningrequest.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/fake_certificatesigningrequest.go deleted file mode 100644 index 482d41930e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/fake_certificatesigningrequest.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - certificates "k8s.io/kubernetes/pkg/apis/certificates" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeCertificateSigningRequests implements CertificateSigningRequestInterface -type FakeCertificateSigningRequests struct { - Fake *FakeCertificates -} - -var certificatesigningrequestsResource = unversioned.GroupVersionResource{Group: "certificates.k8s.io", Version: "", Resource: "certificatesigningrequests"} - -func (c *FakeCertificateSigningRequests) Create(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) { - obj, err := c.Fake. - Invokes(core.NewRootCreateAction(certificatesigningrequestsResource, certificateSigningRequest), &certificates.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*certificates.CertificateSigningRequest), err -} - -func (c *FakeCertificateSigningRequests) Update(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) { - obj, err := c.Fake. - Invokes(core.NewRootUpdateAction(certificatesigningrequestsResource, certificateSigningRequest), &certificates.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*certificates.CertificateSigningRequest), err -} - -func (c *FakeCertificateSigningRequests) UpdateStatus(certificateSigningRequest *certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error) { - obj, err := c.Fake. - Invokes(core.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "status", certificateSigningRequest), &certificates.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*certificates.CertificateSigningRequest), err -} - -func (c *FakeCertificateSigningRequests) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewRootDeleteAction(certificatesigningrequestsResource, name), &certificates.CertificateSigningRequest{}) - return err -} - -func (c *FakeCertificateSigningRequests) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewRootDeleteCollectionAction(certificatesigningrequestsResource, listOptions) - - _, err := c.Fake.Invokes(action, &certificates.CertificateSigningRequestList{}) - return err -} - -func (c *FakeCertificateSigningRequests) Get(name string) (result *certificates.CertificateSigningRequest, err error) { - obj, err := c.Fake. - Invokes(core.NewRootGetAction(certificatesigningrequestsResource, name), &certificates.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*certificates.CertificateSigningRequest), err -} - -func (c *FakeCertificateSigningRequests) List(opts api.ListOptions) (result *certificates.CertificateSigningRequestList, err error) { - obj, err := c.Fake. - Invokes(core.NewRootListAction(certificatesigningrequestsResource, opts), &certificates.CertificateSigningRequestList{}) - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &certificates.CertificateSigningRequestList{} - for _, item := range obj.(*certificates.CertificateSigningRequestList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *FakeCertificateSigningRequests) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewRootWatchAction(certificatesigningrequestsResource, opts)) -} - -// Patch applies the patch and returns the patched certificateSigningRequest. -func (c *FakeCertificateSigningRequests) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *certificates.CertificateSigningRequest, err error) { - obj, err := c.Fake. - Invokes(core.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, data, subresources...), &certificates.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*certificates.CertificateSigningRequest), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/generated_expansion.go deleted file mode 100644 index 85214a8140..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/generated_expansion.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/BUILD deleted file mode 100644 index 6ac7612564..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/BUILD +++ /dev/null @@ -1,51 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "componentstatus.go", - "configmap.go", - "core_client.go", - "doc.go", - "endpoints.go", - "event.go", - "event_expansion.go", - "generated_expansion.go", - "limitrange.go", - "namespace.go", - "namespace_expansion.go", - "node.go", - "node_expansion.go", - "persistentvolume.go", - "persistentvolumeclaim.go", - "pod.go", - "pod_expansion.go", - "podtemplate.go", - "replicationcontroller.go", - "resourcequota.go", - "secret.go", - "service.go", - "service_expansion.go", - "serviceaccount.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/fields:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/net:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/componentstatus.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/componentstatus.go deleted file mode 100644 index 25ec37e4e8..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/componentstatus.go +++ /dev/null @@ -1,141 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ComponentStatusesGetter has a method to return a ComponentStatusInterface. -// A group's client should implement this interface. -type ComponentStatusesGetter interface { - ComponentStatuses() ComponentStatusInterface -} - -// ComponentStatusInterface has methods to work with ComponentStatus resources. -type ComponentStatusInterface interface { - Create(*api.ComponentStatus) (*api.ComponentStatus, error) - Update(*api.ComponentStatus) (*api.ComponentStatus, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.ComponentStatus, error) - List(opts api.ListOptions) (*api.ComponentStatusList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.ComponentStatus, err error) - ComponentStatusExpansion -} - -// componentStatuses implements ComponentStatusInterface -type componentStatuses struct { - client restclient.Interface -} - -// newComponentStatuses returns a ComponentStatuses -func newComponentStatuses(c *CoreClient) *componentStatuses { - return &componentStatuses{ - client: c.RESTClient(), - } -} - -// Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Create(componentStatus *api.ComponentStatus) (result *api.ComponentStatus, err error) { - result = &api.ComponentStatus{} - err = c.client.Post(). - Resource("componentstatuses"). - Body(componentStatus). - Do(). - Into(result) - return -} - -// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Update(componentStatus *api.ComponentStatus) (result *api.ComponentStatus, err error) { - result = &api.ComponentStatus{} - err = c.client.Put(). - Resource("componentstatuses"). - Name(componentStatus.Name). - Body(componentStatus). - Do(). - Into(result) - return -} - -// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. -func (c *componentStatuses) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("componentstatuses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *componentStatuses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("componentstatuses"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. -func (c *componentStatuses) Get(name string) (result *api.ComponentStatus, err error) { - result = &api.ComponentStatus{} - err = c.client.Get(). - Resource("componentstatuses"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. -func (c *componentStatuses) List(opts api.ListOptions) (result *api.ComponentStatusList, err error) { - result = &api.ComponentStatusList{} - err = c.client.Get(). - Resource("componentstatuses"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested componentStatuses. -func (c *componentStatuses) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("componentstatuses"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched componentStatus. -func (c *componentStatuses) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.ComponentStatus, err error) { - result = &api.ComponentStatus{} - err = c.client.Patch(pt). - Resource("componentstatuses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/configmap.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/configmap.go deleted file mode 100644 index 0956777bb7..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/configmap.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ConfigMapsGetter has a method to return a ConfigMapInterface. -// A group's client should implement this interface. -type ConfigMapsGetter interface { - ConfigMaps(namespace string) ConfigMapInterface -} - -// ConfigMapInterface has methods to work with ConfigMap resources. -type ConfigMapInterface interface { - Create(*api.ConfigMap) (*api.ConfigMap, error) - Update(*api.ConfigMap) (*api.ConfigMap, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.ConfigMap, error) - List(opts api.ListOptions) (*api.ConfigMapList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.ConfigMap, err error) - ConfigMapExpansion -} - -// configMaps implements ConfigMapInterface -type configMaps struct { - client restclient.Interface - ns string -} - -// newConfigMaps returns a ConfigMaps -func newConfigMaps(c *CoreClient, namespace string) *configMaps { - return &configMaps{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Create(configMap *api.ConfigMap) (result *api.ConfigMap, err error) { - result = &api.ConfigMap{} - err = c.client.Post(). - Namespace(c.ns). - Resource("configmaps"). - Body(configMap). - Do(). - Into(result) - return -} - -// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Update(configMap *api.ConfigMap) (result *api.ConfigMap, err error) { - result = &api.ConfigMap{} - err = c.client.Put(). - Namespace(c.ns). - Resource("configmaps"). - Name(configMap.Name). - Body(configMap). - Do(). - Into(result) - return -} - -// Delete takes name of the configMap and deletes it. Returns an error if one occurs. -func (c *configMaps) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *configMaps) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. -func (c *configMaps) Get(name string) (result *api.ConfigMap, err error) { - result = &api.ConfigMap{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. -func (c *configMaps) List(opts api.ListOptions) (result *api.ConfigMapList, err error) { - result = &api.ConfigMapList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested configMaps. -func (c *configMaps) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched configMap. -func (c *configMaps) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.ConfigMap, err error) { - result = &api.ConfigMap{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("configmaps"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/core_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/core_client.go deleted file mode 100644 index b5d4a88893..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/core_client.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type CoreInterface interface { - RESTClient() restclient.Interface - ComponentStatusesGetter - ConfigMapsGetter - EndpointsGetter - EventsGetter - LimitRangesGetter - NamespacesGetter - NodesGetter - PersistentVolumesGetter - PersistentVolumeClaimsGetter - PodsGetter - PodTemplatesGetter - ReplicationControllersGetter - ResourceQuotasGetter - SecretsGetter - ServicesGetter - ServiceAccountsGetter -} - -// CoreClient is used to interact with features provided by the k8s.io/kubernetes/pkg/apimachinery/registered.Group group. -type CoreClient struct { - restClient restclient.Interface -} - -func (c *CoreClient) ComponentStatuses() ComponentStatusInterface { - return newComponentStatuses(c) -} - -func (c *CoreClient) ConfigMaps(namespace string) ConfigMapInterface { - return newConfigMaps(c, namespace) -} - -func (c *CoreClient) Endpoints(namespace string) EndpointsInterface { - return newEndpoints(c, namespace) -} - -func (c *CoreClient) Events(namespace string) EventInterface { - return newEvents(c, namespace) -} - -func (c *CoreClient) LimitRanges(namespace string) LimitRangeInterface { - return newLimitRanges(c, namespace) -} - -func (c *CoreClient) Namespaces() NamespaceInterface { - return newNamespaces(c) -} - -func (c *CoreClient) Nodes() NodeInterface { - return newNodes(c) -} - -func (c *CoreClient) PersistentVolumes() PersistentVolumeInterface { - return newPersistentVolumes(c) -} - -func (c *CoreClient) PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface { - return newPersistentVolumeClaims(c, namespace) -} - -func (c *CoreClient) Pods(namespace string) PodInterface { - return newPods(c, namespace) -} - -func (c *CoreClient) PodTemplates(namespace string) PodTemplateInterface { - return newPodTemplates(c, namespace) -} - -func (c *CoreClient) ReplicationControllers(namespace string) ReplicationControllerInterface { - return newReplicationControllers(c, namespace) -} - -func (c *CoreClient) ResourceQuotas(namespace string) ResourceQuotaInterface { - return newResourceQuotas(c, namespace) -} - -func (c *CoreClient) Secrets(namespace string) SecretInterface { - return newSecrets(c, namespace) -} - -func (c *CoreClient) Services(namespace string) ServiceInterface { - return newServices(c, namespace) -} - -func (c *CoreClient) ServiceAccounts(namespace string) ServiceAccountInterface { - return newServiceAccounts(c, namespace) -} - -// NewForConfig creates a new CoreClient for the given config. -func NewForConfig(c *restclient.Config) (*CoreClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &CoreClient{client}, nil -} - -// NewForConfigOrDie creates a new CoreClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *CoreClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new CoreClient for the given RESTClient. -func New(c restclient.Interface) *CoreClient { - return &CoreClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if core group is not registered, return an error - g, err := registered.Group("") - if err != nil { - return err - } - config.APIPath = "/api" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != g.GroupVersion.Group { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - } - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *CoreClient) RESTClient() restclient.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/doc.go deleted file mode 100644 index d908962a44..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/endpoints.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/endpoints.go deleted file mode 100644 index a7989de3ba..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/endpoints.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// EndpointsGetter has a method to return a EndpointsInterface. -// A group's client should implement this interface. -type EndpointsGetter interface { - Endpoints(namespace string) EndpointsInterface -} - -// EndpointsInterface has methods to work with Endpoints resources. -type EndpointsInterface interface { - Create(*api.Endpoints) (*api.Endpoints, error) - Update(*api.Endpoints) (*api.Endpoints, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.Endpoints, error) - List(opts api.ListOptions) (*api.EndpointsList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Endpoints, err error) - EndpointsExpansion -} - -// endpoints implements EndpointsInterface -type endpoints struct { - client restclient.Interface - ns string -} - -// newEndpoints returns a Endpoints -func newEndpoints(c *CoreClient, namespace string) *endpoints { - return &endpoints{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Create(endpoints *api.Endpoints) (result *api.Endpoints, err error) { - result = &api.Endpoints{} - err = c.client.Post(). - Namespace(c.ns). - Resource("endpoints"). - Body(endpoints). - Do(). - Into(result) - return -} - -// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Update(endpoints *api.Endpoints) (result *api.Endpoints, err error) { - result = &api.Endpoints{} - err = c.client.Put(). - Namespace(c.ns). - Resource("endpoints"). - Name(endpoints.Name). - Body(endpoints). - Do(). - Into(result) - return -} - -// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. -func (c *endpoints) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *endpoints) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. -func (c *endpoints) Get(name string) (result *api.Endpoints, err error) { - result = &api.Endpoints{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Endpoints that match those selectors. -func (c *endpoints) List(opts api.ListOptions) (result *api.EndpointsList, err error) { - result = &api.EndpointsList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested endpoints. -func (c *endpoints) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched endpoints. -func (c *endpoints) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Endpoints, err error) { - result = &api.Endpoints{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("endpoints"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event.go deleted file mode 100644 index 8667ac4688..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// EventsGetter has a method to return a EventInterface. -// A group's client should implement this interface. -type EventsGetter interface { - Events(namespace string) EventInterface -} - -// EventInterface has methods to work with Event resources. -type EventInterface interface { - Create(*api.Event) (*api.Event, error) - Update(*api.Event) (*api.Event, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.Event, error) - List(opts api.ListOptions) (*api.EventList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Event, err error) - EventExpansion -} - -// events implements EventInterface -type events struct { - client restclient.Interface - ns string -} - -// newEvents returns a Events -func newEvents(c *CoreClient, namespace string) *events { - return &events{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(event *api.Event) (result *api.Event, err error) { - result = &api.Event{} - err = c.client.Post(). - Namespace(c.ns). - Resource("events"). - Body(event). - Do(). - Into(result) - return -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(event *api.Event) (result *api.Event, err error) { - result = &api.Event{} - err = c.client.Put(). - Namespace(c.ns). - Resource("events"). - Name(event.Name). - Body(event). - Do(). - Into(result) - return -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(name string) (result *api.Event, err error) { - result = &api.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(opts api.ListOptions) (result *api.EventList, err error) { - result = &api.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched event. -func (c *events) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Event, err error) { - result = &api.Event{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("events"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event_expansion.go deleted file mode 100644 index 2d316aaed3..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event_expansion.go +++ /dev/null @@ -1,161 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/runtime" -) - -// The EventExpansion interface allows manually adding extra methods to the EventInterface. -type EventExpansion interface { - // CreateWithEventNamespace is the same as a Create, except that it sends the request to the event.Namespace. - CreateWithEventNamespace(event *api.Event) (*api.Event, error) - // UpdateWithEventNamespace is the same as a Update, except that it sends the request to the event.Namespace. - UpdateWithEventNamespace(event *api.Event) (*api.Event, error) - PatchWithEventNamespace(event *api.Event, data []byte) (*api.Event, error) - // Search finds events about the specified object - Search(objOrRef runtime.Object) (*api.EventList, error) - // Returns the appropriate field selector based on the API version being used to communicate with the server. - // The returned field selector can be used with List and Watch to filter desired events. - GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector -} - -// CreateWithEventNamespace makes a new event. Returns the copy of the event the server returns, -// or an error. The namespace to create the event within is deduced from the -// event; it must either match this event client's namespace, or this event -// client must have been created with the "" namespace. -func (e *events) CreateWithEventNamespace(event *api.Event) (*api.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) - } - result := &api.Event{} - err := e.client.Post(). - NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). - Resource("events"). - Body(event). - Do(). - Into(result) - return result, err -} - -// UpdateWithEventNamespace modifies an existing event. It returns the copy of the event that the server returns, -// or an error. The namespace and key to update the event within is deduced from the event. The -// namespace must either match this event client's namespace, or this event client must have been -// created with the "" namespace. Update also requires the ResourceVersion to be set in the event -// object. -func (e *events) UpdateWithEventNamespace(event *api.Event) (*api.Event, error) { - result := &api.Event{} - err := e.client.Put(). - NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). - Resource("events"). - Name(event.Name). - Body(event). - Do(). - Into(result) - return result, err -} - -// PatchWithEventNamespace modifies an existing event. It returns the copy of -// the event that the server returns, or an error. The namespace and name of the -// target event is deduced from the incompleteEvent. The namespace must either -// match this event client's namespace, or this event client must have been -// created with the "" namespace. -func (e *events) PatchWithEventNamespace(incompleteEvent *api.Event, data []byte) (*api.Event, error) { - if e.ns != "" && incompleteEvent.Namespace != e.ns { - return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", incompleteEvent.Namespace, e.ns) - } - result := &api.Event{} - err := e.client.Patch(api.StrategicMergePatchType). - NamespaceIfScoped(incompleteEvent.Namespace, len(incompleteEvent.Namespace) > 0). - Resource("events"). - Name(incompleteEvent.Name). - Body(data). - Do(). - Into(result) - return result, err -} - -// Search finds events about the specified object. The namespace of the -// object must match this event's client namespace unless the event client -// was made with the "" namespace. -func (e *events) Search(objOrRef runtime.Object) (*api.EventList, error) { - ref, err := api.GetReference(objOrRef) - if err != nil { - return nil, err - } - if e.ns != "" && ref.Namespace != e.ns { - return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns) - } - stringRefKind := string(ref.Kind) - var refKind *string - if stringRefKind != "" { - refKind = &stringRefKind - } - stringRefUID := string(ref.UID) - var refUID *string - if stringRefUID != "" { - refUID = &stringRefUID - } - fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) - return e.List(api.ListOptions{FieldSelector: fieldSelector}) -} - -// Returns the appropriate field selector based on the API version being used to communicate with the server. -// The returned field selector can be used with List and Watch to filter desired events. -func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { - apiVersion := e.client.APIVersion().String() - field := fields.Set{} - if involvedObjectName != nil { - field[GetInvolvedObjectNameFieldLabel(apiVersion)] = *involvedObjectName - } - if involvedObjectNamespace != nil { - field["involvedObject.namespace"] = *involvedObjectNamespace - } - if involvedObjectKind != nil { - field["involvedObject.kind"] = *involvedObjectKind - } - if involvedObjectUID != nil { - field["involvedObject.uid"] = *involvedObjectUID - } - return field.AsSelector() -} - -// Returns the appropriate field label to use for name of the involved object as per the given API version. -func GetInvolvedObjectNameFieldLabel(version string) string { - return "involvedObject.name" -} - -// TODO: This is a temporary arrangement and will be removed once all clients are moved to use the clientset. -type EventSinkImpl struct { - Interface EventInterface -} - -func (e *EventSinkImpl) Create(event *api.Event) (*api.Event, error) { - return e.Interface.CreateWithEventNamespace(event) -} - -func (e *EventSinkImpl) Update(event *api.Event) (*api.Event, error) { - return e.Interface.UpdateWithEventNamespace(event) -} - -func (e *EventSinkImpl) Patch(event *api.Event, data []byte) (*api.Event, error) { - return e.Interface.PatchWithEventNamespace(event, data) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/BUILD deleted file mode 100644 index 62161dbd45..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/BUILD +++ /dev/null @@ -1,52 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fake_componentstatus.go", - "fake_configmap.go", - "fake_core_client.go", - "fake_endpoints.go", - "fake_event.go", - "fake_event_expansion.go", - "fake_limitrange.go", - "fake_namespace.go", - "fake_namespace_expansion.go", - "fake_node.go", - "fake_node_expansion.go", - "fake_persistentvolume.go", - "fake_persistentvolumeclaim.go", - "fake_pod.go", - "fake_pod_expansion.go", - "fake_podtemplate.go", - "fake_replicationcontroller.go", - "fake_resourcequota.go", - "fake_secret.go", - "fake_service.go", - "fake_service_expansion.go", - "fake_serviceaccount.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/testing/core:go_default_library", - "//pkg/fields:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/doc.go deleted file mode 100644 index 083f78f3a4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// Package fake has the automatically generated clients. -package fake diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_componentstatus.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_componentstatus.go deleted file mode 100644 index da32f6f8c2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_componentstatus.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeComponentStatuses implements ComponentStatusInterface -type FakeComponentStatuses struct { - Fake *FakeCore -} - -var componentstatusesResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "componentstatuses"} - -func (c *FakeComponentStatuses) Create(componentStatus *api.ComponentStatus) (result *api.ComponentStatus, err error) { - obj, err := c.Fake. - Invokes(core.NewRootCreateAction(componentstatusesResource, componentStatus), &api.ComponentStatus{}) - if obj == nil { - return nil, err - } - return obj.(*api.ComponentStatus), err -} - -func (c *FakeComponentStatuses) Update(componentStatus *api.ComponentStatus) (result *api.ComponentStatus, err error) { - obj, err := c.Fake. - Invokes(core.NewRootUpdateAction(componentstatusesResource, componentStatus), &api.ComponentStatus{}) - if obj == nil { - return nil, err - } - return obj.(*api.ComponentStatus), err -} - -func (c *FakeComponentStatuses) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewRootDeleteAction(componentstatusesResource, name), &api.ComponentStatus{}) - return err -} - -func (c *FakeComponentStatuses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewRootDeleteCollectionAction(componentstatusesResource, listOptions) - - _, err := c.Fake.Invokes(action, &api.ComponentStatusList{}) - return err -} - -func (c *FakeComponentStatuses) Get(name string) (result *api.ComponentStatus, err error) { - obj, err := c.Fake. - Invokes(core.NewRootGetAction(componentstatusesResource, name), &api.ComponentStatus{}) - if obj == nil { - return nil, err - } - return obj.(*api.ComponentStatus), err -} - -func (c *FakeComponentStatuses) List(opts api.ListOptions) (result *api.ComponentStatusList, err error) { - obj, err := c.Fake. - Invokes(core.NewRootListAction(componentstatusesResource, opts), &api.ComponentStatusList{}) - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &api.ComponentStatusList{} - for _, item := range obj.(*api.ComponentStatusList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested componentStatuses. -func (c *FakeComponentStatuses) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewRootWatchAction(componentstatusesResource, opts)) -} - -// Patch applies the patch and returns the patched componentStatus. -func (c *FakeComponentStatuses) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.ComponentStatus, err error) { - obj, err := c.Fake. - Invokes(core.NewRootPatchSubresourceAction(componentstatusesResource, name, data, subresources...), &api.ComponentStatus{}) - if obj == nil { - return nil, err - } - return obj.(*api.ComponentStatus), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_configmap.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_configmap.go deleted file mode 100644 index 2e00fabb5b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_configmap.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeConfigMaps implements ConfigMapInterface -type FakeConfigMaps struct { - Fake *FakeCore - ns string -} - -var configmapsResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "configmaps"} - -func (c *FakeConfigMaps) Create(configMap *api.ConfigMap) (result *api.ConfigMap, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(configmapsResource, c.ns, configMap), &api.ConfigMap{}) - - if obj == nil { - return nil, err - } - return obj.(*api.ConfigMap), err -} - -func (c *FakeConfigMaps) Update(configMap *api.ConfigMap) (result *api.ConfigMap, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(configmapsResource, c.ns, configMap), &api.ConfigMap{}) - - if obj == nil { - return nil, err - } - return obj.(*api.ConfigMap), err -} - -func (c *FakeConfigMaps) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(configmapsResource, c.ns, name), &api.ConfigMap{}) - - return err -} - -func (c *FakeConfigMaps) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(configmapsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &api.ConfigMapList{}) - return err -} - -func (c *FakeConfigMaps) Get(name string) (result *api.ConfigMap, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(configmapsResource, c.ns, name), &api.ConfigMap{}) - - if obj == nil { - return nil, err - } - return obj.(*api.ConfigMap), err -} - -func (c *FakeConfigMaps) List(opts api.ListOptions) (result *api.ConfigMapList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(configmapsResource, c.ns, opts), &api.ConfigMapList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &api.ConfigMapList{} - for _, item := range obj.(*api.ConfigMapList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested configMaps. -func (c *FakeConfigMaps) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(configmapsResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched configMap. -func (c *FakeConfigMaps) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.ConfigMap, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(configmapsResource, c.ns, name, data, subresources...), &api.ConfigMap{}) - - if obj == nil { - return nil, err - } - return obj.(*api.ConfigMap), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_core_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_core_client.go deleted file mode 100644 index 66808c32db..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_core_client.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - internalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - restclient "k8s.io/kubernetes/pkg/client/restclient" - core "k8s.io/kubernetes/pkg/client/testing/core" -) - -type FakeCore struct { - *core.Fake -} - -func (c *FakeCore) ComponentStatuses() internalversion.ComponentStatusInterface { - return &FakeComponentStatuses{c} -} - -func (c *FakeCore) ConfigMaps(namespace string) internalversion.ConfigMapInterface { - return &FakeConfigMaps{c, namespace} -} - -func (c *FakeCore) Endpoints(namespace string) internalversion.EndpointsInterface { - return &FakeEndpoints{c, namespace} -} - -func (c *FakeCore) Events(namespace string) internalversion.EventInterface { - return &FakeEvents{c, namespace} -} - -func (c *FakeCore) LimitRanges(namespace string) internalversion.LimitRangeInterface { - return &FakeLimitRanges{c, namespace} -} - -func (c *FakeCore) Namespaces() internalversion.NamespaceInterface { - return &FakeNamespaces{c} -} - -func (c *FakeCore) Nodes() internalversion.NodeInterface { - return &FakeNodes{c} -} - -func (c *FakeCore) PersistentVolumes() internalversion.PersistentVolumeInterface { - return &FakePersistentVolumes{c} -} - -func (c *FakeCore) PersistentVolumeClaims(namespace string) internalversion.PersistentVolumeClaimInterface { - return &FakePersistentVolumeClaims{c, namespace} -} - -func (c *FakeCore) Pods(namespace string) internalversion.PodInterface { - return &FakePods{c, namespace} -} - -func (c *FakeCore) PodTemplates(namespace string) internalversion.PodTemplateInterface { - return &FakePodTemplates{c, namespace} -} - -func (c *FakeCore) ReplicationControllers(namespace string) internalversion.ReplicationControllerInterface { - return &FakeReplicationControllers{c, namespace} -} - -func (c *FakeCore) ResourceQuotas(namespace string) internalversion.ResourceQuotaInterface { - return &FakeResourceQuotas{c, namespace} -} - -func (c *FakeCore) Secrets(namespace string) internalversion.SecretInterface { - return &FakeSecrets{c, namespace} -} - -func (c *FakeCore) Services(namespace string) internalversion.ServiceInterface { - return &FakeServices{c, namespace} -} - -func (c *FakeCore) ServiceAccounts(namespace string) internalversion.ServiceAccountInterface { - return &FakeServiceAccounts{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeCore) RESTClient() restclient.Interface { - var ret *restclient.RESTClient - return ret -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_endpoints.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_endpoints.go deleted file mode 100644 index 2705a25e8b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_endpoints.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeEndpoints implements EndpointsInterface -type FakeEndpoints struct { - Fake *FakeCore - ns string -} - -var endpointsResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "endpoints"} - -func (c *FakeEndpoints) Create(endpoints *api.Endpoints) (result *api.Endpoints, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(endpointsResource, c.ns, endpoints), &api.Endpoints{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Endpoints), err -} - -func (c *FakeEndpoints) Update(endpoints *api.Endpoints) (result *api.Endpoints, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(endpointsResource, c.ns, endpoints), &api.Endpoints{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Endpoints), err -} - -func (c *FakeEndpoints) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(endpointsResource, c.ns, name), &api.Endpoints{}) - - return err -} - -func (c *FakeEndpoints) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(endpointsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &api.EndpointsList{}) - return err -} - -func (c *FakeEndpoints) Get(name string) (result *api.Endpoints, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(endpointsResource, c.ns, name), &api.Endpoints{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Endpoints), err -} - -func (c *FakeEndpoints) List(opts api.ListOptions) (result *api.EndpointsList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(endpointsResource, c.ns, opts), &api.EndpointsList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &api.EndpointsList{} - for _, item := range obj.(*api.EndpointsList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested endpoints. -func (c *FakeEndpoints) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(endpointsResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched endpoints. -func (c *FakeEndpoints) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Endpoints, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(endpointsResource, c.ns, name, data, subresources...), &api.Endpoints{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Endpoints), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_event.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_event.go deleted file mode 100644 index 21980a7a7d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_event.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeEvents implements EventInterface -type FakeEvents struct { - Fake *FakeCore - ns string -} - -var eventsResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "events"} - -func (c *FakeEvents) Create(event *api.Event) (result *api.Event, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(eventsResource, c.ns, event), &api.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Event), err -} - -func (c *FakeEvents) Update(event *api.Event) (result *api.Event, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(eventsResource, c.ns, event), &api.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Event), err -} - -func (c *FakeEvents) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(eventsResource, c.ns, name), &api.Event{}) - - return err -} - -func (c *FakeEvents) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(eventsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &api.EventList{}) - return err -} - -func (c *FakeEvents) Get(name string) (result *api.Event, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(eventsResource, c.ns, name), &api.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Event), err -} - -func (c *FakeEvents) List(opts api.ListOptions) (result *api.EventList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(eventsResource, c.ns, opts), &api.EventList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &api.EventList{} - for _, item := range obj.(*api.EventList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *FakeEvents) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(eventsResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched event. -func (c *FakeEvents) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Event, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(eventsResource, c.ns, name, data, subresources...), &api.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Event), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_event_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_event_expansion.go deleted file mode 100644 index c9a07517c2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_event_expansion.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/testing/core" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/runtime" -) - -func (c *FakeEvents) CreateWithEventNamespace(event *api.Event) (*api.Event, error) { - action := core.NewRootCreateAction(eventsResource, event) - if c.ns != "" { - action = core.NewCreateAction(eventsResource, c.ns, event) - } - obj, err := c.Fake.Invokes(action, event) - if obj == nil { - return nil, err - } - - return obj.(*api.Event), err -} - -// Update replaces an existing event. Returns the copy of the event the server returns, or an error. -func (c *FakeEvents) UpdateWithEventNamespace(event *api.Event) (*api.Event, error) { - action := core.NewRootUpdateAction(eventsResource, event) - if c.ns != "" { - action = core.NewUpdateAction(eventsResource, c.ns, event) - } - obj, err := c.Fake.Invokes(action, event) - if obj == nil { - return nil, err - } - - return obj.(*api.Event), err -} - -// PatchWithEventNamespace patches an existing event. Returns the copy of the event the server returns, or an error. -func (c *FakeEvents) PatchWithEventNamespace(event *api.Event, data []byte) (*api.Event, error) { - action := core.NewRootPatchAction(eventsResource, event.Name, data) - if c.ns != "" { - action = core.NewPatchAction(eventsResource, c.ns, event.Name, data) - } - obj, err := c.Fake.Invokes(action, event) - if obj == nil { - return nil, err - } - - return obj.(*api.Event), err -} - -// Search returns a list of events matching the specified object. -func (c *FakeEvents) Search(objOrRef runtime.Object) (*api.EventList, error) { - action := core.NewRootListAction(eventsResource, api.ListOptions{}) - if c.ns != "" { - action = core.NewListAction(eventsResource, c.ns, api.ListOptions{}) - } - obj, err := c.Fake.Invokes(action, &api.EventList{}) - if obj == nil { - return nil, err - } - - return obj.(*api.EventList), err -} - -func (c *FakeEvents) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { - action := core.GenericActionImpl{} - action.Verb = "get-field-selector" - action.Resource = eventsResource - - c.Fake.Invokes(action, nil) - return fields.Everything() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_limitrange.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_limitrange.go deleted file mode 100644 index 1706df0c82..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_limitrange.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeLimitRanges implements LimitRangeInterface -type FakeLimitRanges struct { - Fake *FakeCore - ns string -} - -var limitrangesResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "limitranges"} - -func (c *FakeLimitRanges) Create(limitRange *api.LimitRange) (result *api.LimitRange, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(limitrangesResource, c.ns, limitRange), &api.LimitRange{}) - - if obj == nil { - return nil, err - } - return obj.(*api.LimitRange), err -} - -func (c *FakeLimitRanges) Update(limitRange *api.LimitRange) (result *api.LimitRange, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(limitrangesResource, c.ns, limitRange), &api.LimitRange{}) - - if obj == nil { - return nil, err - } - return obj.(*api.LimitRange), err -} - -func (c *FakeLimitRanges) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(limitrangesResource, c.ns, name), &api.LimitRange{}) - - return err -} - -func (c *FakeLimitRanges) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(limitrangesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &api.LimitRangeList{}) - return err -} - -func (c *FakeLimitRanges) Get(name string) (result *api.LimitRange, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(limitrangesResource, c.ns, name), &api.LimitRange{}) - - if obj == nil { - return nil, err - } - return obj.(*api.LimitRange), err -} - -func (c *FakeLimitRanges) List(opts api.ListOptions) (result *api.LimitRangeList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(limitrangesResource, c.ns, opts), &api.LimitRangeList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &api.LimitRangeList{} - for _, item := range obj.(*api.LimitRangeList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested limitRanges. -func (c *FakeLimitRanges) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(limitrangesResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched limitRange. -func (c *FakeLimitRanges) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.LimitRange, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(limitrangesResource, c.ns, name, data, subresources...), &api.LimitRange{}) - - if obj == nil { - return nil, err - } - return obj.(*api.LimitRange), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_namespace.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_namespace.go deleted file mode 100644 index e5c4ceca05..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_namespace.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeNamespaces implements NamespaceInterface -type FakeNamespaces struct { - Fake *FakeCore -} - -var namespacesResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "namespaces"} - -func (c *FakeNamespaces) Create(namespace *api.Namespace) (result *api.Namespace, err error) { - obj, err := c.Fake. - Invokes(core.NewRootCreateAction(namespacesResource, namespace), &api.Namespace{}) - if obj == nil { - return nil, err - } - return obj.(*api.Namespace), err -} - -func (c *FakeNamespaces) Update(namespace *api.Namespace) (result *api.Namespace, err error) { - obj, err := c.Fake. - Invokes(core.NewRootUpdateAction(namespacesResource, namespace), &api.Namespace{}) - if obj == nil { - return nil, err - } - return obj.(*api.Namespace), err -} - -func (c *FakeNamespaces) UpdateStatus(namespace *api.Namespace) (*api.Namespace, error) { - obj, err := c.Fake. - Invokes(core.NewRootUpdateSubresourceAction(namespacesResource, "status", namespace), &api.Namespace{}) - if obj == nil { - return nil, err - } - return obj.(*api.Namespace), err -} - -func (c *FakeNamespaces) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewRootDeleteAction(namespacesResource, name), &api.Namespace{}) - return err -} - -func (c *FakeNamespaces) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewRootDeleteCollectionAction(namespacesResource, listOptions) - - _, err := c.Fake.Invokes(action, &api.NamespaceList{}) - return err -} - -func (c *FakeNamespaces) Get(name string) (result *api.Namespace, err error) { - obj, err := c.Fake. - Invokes(core.NewRootGetAction(namespacesResource, name), &api.Namespace{}) - if obj == nil { - return nil, err - } - return obj.(*api.Namespace), err -} - -func (c *FakeNamespaces) List(opts api.ListOptions) (result *api.NamespaceList, err error) { - obj, err := c.Fake. - Invokes(core.NewRootListAction(namespacesResource, opts), &api.NamespaceList{}) - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &api.NamespaceList{} - for _, item := range obj.(*api.NamespaceList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested namespaces. -func (c *FakeNamespaces) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewRootWatchAction(namespacesResource, opts)) -} - -// Patch applies the patch and returns the patched namespace. -func (c *FakeNamespaces) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Namespace, err error) { - obj, err := c.Fake. - Invokes(core.NewRootPatchSubresourceAction(namespacesResource, name, data, subresources...), &api.Namespace{}) - if obj == nil { - return nil, err - } - return obj.(*api.Namespace), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_node.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_node.go deleted file mode 100644 index 2788608c72..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_node.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeNodes implements NodeInterface -type FakeNodes struct { - Fake *FakeCore -} - -var nodesResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "nodes"} - -func (c *FakeNodes) Create(node *api.Node) (result *api.Node, err error) { - obj, err := c.Fake. - Invokes(core.NewRootCreateAction(nodesResource, node), &api.Node{}) - if obj == nil { - return nil, err - } - return obj.(*api.Node), err -} - -func (c *FakeNodes) Update(node *api.Node) (result *api.Node, err error) { - obj, err := c.Fake. - Invokes(core.NewRootUpdateAction(nodesResource, node), &api.Node{}) - if obj == nil { - return nil, err - } - return obj.(*api.Node), err -} - -func (c *FakeNodes) UpdateStatus(node *api.Node) (*api.Node, error) { - obj, err := c.Fake. - Invokes(core.NewRootUpdateSubresourceAction(nodesResource, "status", node), &api.Node{}) - if obj == nil { - return nil, err - } - return obj.(*api.Node), err -} - -func (c *FakeNodes) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewRootDeleteAction(nodesResource, name), &api.Node{}) - return err -} - -func (c *FakeNodes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewRootDeleteCollectionAction(nodesResource, listOptions) - - _, err := c.Fake.Invokes(action, &api.NodeList{}) - return err -} - -func (c *FakeNodes) Get(name string) (result *api.Node, err error) { - obj, err := c.Fake. - Invokes(core.NewRootGetAction(nodesResource, name), &api.Node{}) - if obj == nil { - return nil, err - } - return obj.(*api.Node), err -} - -func (c *FakeNodes) List(opts api.ListOptions) (result *api.NodeList, err error) { - obj, err := c.Fake. - Invokes(core.NewRootListAction(nodesResource, opts), &api.NodeList{}) - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &api.NodeList{} - for _, item := range obj.(*api.NodeList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested nodes. -func (c *FakeNodes) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewRootWatchAction(nodesResource, opts)) -} - -// Patch applies the patch and returns the patched node. -func (c *FakeNodes) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Node, err error) { - obj, err := c.Fake. - Invokes(core.NewRootPatchSubresourceAction(nodesResource, name, data, subresources...), &api.Node{}) - if obj == nil { - return nil, err - } - return obj.(*api.Node), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_persistentvolume.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_persistentvolume.go deleted file mode 100644 index e7d0446cde..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_persistentvolume.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakePersistentVolumes implements PersistentVolumeInterface -type FakePersistentVolumes struct { - Fake *FakeCore -} - -var persistentvolumesResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "persistentvolumes"} - -func (c *FakePersistentVolumes) Create(persistentVolume *api.PersistentVolume) (result *api.PersistentVolume, err error) { - obj, err := c.Fake. - Invokes(core.NewRootCreateAction(persistentvolumesResource, persistentVolume), &api.PersistentVolume{}) - if obj == nil { - return nil, err - } - return obj.(*api.PersistentVolume), err -} - -func (c *FakePersistentVolumes) Update(persistentVolume *api.PersistentVolume) (result *api.PersistentVolume, err error) { - obj, err := c.Fake. - Invokes(core.NewRootUpdateAction(persistentvolumesResource, persistentVolume), &api.PersistentVolume{}) - if obj == nil { - return nil, err - } - return obj.(*api.PersistentVolume), err -} - -func (c *FakePersistentVolumes) UpdateStatus(persistentVolume *api.PersistentVolume) (*api.PersistentVolume, error) { - obj, err := c.Fake. - Invokes(core.NewRootUpdateSubresourceAction(persistentvolumesResource, "status", persistentVolume), &api.PersistentVolume{}) - if obj == nil { - return nil, err - } - return obj.(*api.PersistentVolume), err -} - -func (c *FakePersistentVolumes) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewRootDeleteAction(persistentvolumesResource, name), &api.PersistentVolume{}) - return err -} - -func (c *FakePersistentVolumes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewRootDeleteCollectionAction(persistentvolumesResource, listOptions) - - _, err := c.Fake.Invokes(action, &api.PersistentVolumeList{}) - return err -} - -func (c *FakePersistentVolumes) Get(name string) (result *api.PersistentVolume, err error) { - obj, err := c.Fake. - Invokes(core.NewRootGetAction(persistentvolumesResource, name), &api.PersistentVolume{}) - if obj == nil { - return nil, err - } - return obj.(*api.PersistentVolume), err -} - -func (c *FakePersistentVolumes) List(opts api.ListOptions) (result *api.PersistentVolumeList, err error) { - obj, err := c.Fake. - Invokes(core.NewRootListAction(persistentvolumesResource, opts), &api.PersistentVolumeList{}) - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &api.PersistentVolumeList{} - for _, item := range obj.(*api.PersistentVolumeList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested persistentVolumes. -func (c *FakePersistentVolumes) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewRootWatchAction(persistentvolumesResource, opts)) -} - -// Patch applies the patch and returns the patched persistentVolume. -func (c *FakePersistentVolumes) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.PersistentVolume, err error) { - obj, err := c.Fake. - Invokes(core.NewRootPatchSubresourceAction(persistentvolumesResource, name, data, subresources...), &api.PersistentVolume{}) - if obj == nil { - return nil, err - } - return obj.(*api.PersistentVolume), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_persistentvolumeclaim.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_persistentvolumeclaim.go deleted file mode 100644 index c394f43c95..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_persistentvolumeclaim.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakePersistentVolumeClaims implements PersistentVolumeClaimInterface -type FakePersistentVolumeClaims struct { - Fake *FakeCore - ns string -} - -var persistentvolumeclaimsResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "persistentvolumeclaims"} - -func (c *FakePersistentVolumeClaims) Create(persistentVolumeClaim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &api.PersistentVolumeClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*api.PersistentVolumeClaim), err -} - -func (c *FakePersistentVolumeClaims) Update(persistentVolumeClaim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &api.PersistentVolumeClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*api.PersistentVolumeClaim), err -} - -func (c *FakePersistentVolumeClaims) UpdateStatus(persistentVolumeClaim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) { - obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction(persistentvolumeclaimsResource, "status", c.ns, persistentVolumeClaim), &api.PersistentVolumeClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*api.PersistentVolumeClaim), err -} - -func (c *FakePersistentVolumeClaims) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(persistentvolumeclaimsResource, c.ns, name), &api.PersistentVolumeClaim{}) - - return err -} - -func (c *FakePersistentVolumeClaims) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(persistentvolumeclaimsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &api.PersistentVolumeClaimList{}) - return err -} - -func (c *FakePersistentVolumeClaims) Get(name string) (result *api.PersistentVolumeClaim, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(persistentvolumeclaimsResource, c.ns, name), &api.PersistentVolumeClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*api.PersistentVolumeClaim), err -} - -func (c *FakePersistentVolumeClaims) List(opts api.ListOptions) (result *api.PersistentVolumeClaimList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(persistentvolumeclaimsResource, c.ns, opts), &api.PersistentVolumeClaimList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &api.PersistentVolumeClaimList{} - for _, item := range obj.(*api.PersistentVolumeClaimList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested persistentVolumeClaims. -func (c *FakePersistentVolumeClaims) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(persistentvolumeclaimsResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched persistentVolumeClaim. -func (c *FakePersistentVolumeClaims) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.PersistentVolumeClaim, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, name, data, subresources...), &api.PersistentVolumeClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*api.PersistentVolumeClaim), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_pod.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_pod.go deleted file mode 100644 index dd50f12ccb..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_pod.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakePods implements PodInterface -type FakePods struct { - Fake *FakeCore - ns string -} - -var podsResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "pods"} - -func (c *FakePods) Create(pod *api.Pod) (result *api.Pod, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(podsResource, c.ns, pod), &api.Pod{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Pod), err -} - -func (c *FakePods) Update(pod *api.Pod) (result *api.Pod, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(podsResource, c.ns, pod), &api.Pod{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Pod), err -} - -func (c *FakePods) UpdateStatus(pod *api.Pod) (*api.Pod, error) { - obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction(podsResource, "status", c.ns, pod), &api.Pod{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Pod), err -} - -func (c *FakePods) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(podsResource, c.ns, name), &api.Pod{}) - - return err -} - -func (c *FakePods) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(podsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &api.PodList{}) - return err -} - -func (c *FakePods) Get(name string) (result *api.Pod, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(podsResource, c.ns, name), &api.Pod{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Pod), err -} - -func (c *FakePods) List(opts api.ListOptions) (result *api.PodList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(podsResource, c.ns, opts), &api.PodList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &api.PodList{} - for _, item := range obj.(*api.PodList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested pods. -func (c *FakePods) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(podsResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched pod. -func (c *FakePods) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Pod, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(podsResource, c.ns, name, data, subresources...), &api.Pod{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Pod), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_pod_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_pod_expansion.go deleted file mode 100644 index 26343982c5..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_pod_expansion.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/client/testing/core" -) - -func (c *FakePods) Bind(binding *api.Binding) error { - action := core.CreateActionImpl{} - action.Verb = "create" - action.Resource = podsResource - action.Subresource = "bindings" - action.Object = binding - - _, err := c.Fake.Invokes(action, binding) - return err -} - -func (c *FakePods) GetLogs(name string, opts *api.PodLogOptions) *restclient.Request { - action := core.GenericActionImpl{} - action.Verb = "get" - action.Namespace = c.ns - action.Resource = podsResource - action.Subresource = "logs" - action.Value = opts - - _, _ = c.Fake.Invokes(action, &api.Pod{}) - return &restclient.Request{} -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_podtemplate.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_podtemplate.go deleted file mode 100644 index 57a95a21dd..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_podtemplate.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakePodTemplates implements PodTemplateInterface -type FakePodTemplates struct { - Fake *FakeCore - ns string -} - -var podtemplatesResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "podtemplates"} - -func (c *FakePodTemplates) Create(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(podtemplatesResource, c.ns, podTemplate), &api.PodTemplate{}) - - if obj == nil { - return nil, err - } - return obj.(*api.PodTemplate), err -} - -func (c *FakePodTemplates) Update(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(podtemplatesResource, c.ns, podTemplate), &api.PodTemplate{}) - - if obj == nil { - return nil, err - } - return obj.(*api.PodTemplate), err -} - -func (c *FakePodTemplates) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(podtemplatesResource, c.ns, name), &api.PodTemplate{}) - - return err -} - -func (c *FakePodTemplates) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(podtemplatesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &api.PodTemplateList{}) - return err -} - -func (c *FakePodTemplates) Get(name string) (result *api.PodTemplate, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(podtemplatesResource, c.ns, name), &api.PodTemplate{}) - - if obj == nil { - return nil, err - } - return obj.(*api.PodTemplate), err -} - -func (c *FakePodTemplates) List(opts api.ListOptions) (result *api.PodTemplateList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(podtemplatesResource, c.ns, opts), &api.PodTemplateList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &api.PodTemplateList{} - for _, item := range obj.(*api.PodTemplateList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested podTemplates. -func (c *FakePodTemplates) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(podtemplatesResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched podTemplate. -func (c *FakePodTemplates) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.PodTemplate, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(podtemplatesResource, c.ns, name, data, subresources...), &api.PodTemplate{}) - - if obj == nil { - return nil, err - } - return obj.(*api.PodTemplate), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_replicationcontroller.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_replicationcontroller.go deleted file mode 100644 index 935dc1f126..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_replicationcontroller.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeReplicationControllers implements ReplicationControllerInterface -type FakeReplicationControllers struct { - Fake *FakeCore - ns string -} - -var replicationcontrollersResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "replicationcontrollers"} - -func (c *FakeReplicationControllers) Create(replicationController *api.ReplicationController) (result *api.ReplicationController, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(replicationcontrollersResource, c.ns, replicationController), &api.ReplicationController{}) - - if obj == nil { - return nil, err - } - return obj.(*api.ReplicationController), err -} - -func (c *FakeReplicationControllers) Update(replicationController *api.ReplicationController) (result *api.ReplicationController, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(replicationcontrollersResource, c.ns, replicationController), &api.ReplicationController{}) - - if obj == nil { - return nil, err - } - return obj.(*api.ReplicationController), err -} - -func (c *FakeReplicationControllers) UpdateStatus(replicationController *api.ReplicationController) (*api.ReplicationController, error) { - obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction(replicationcontrollersResource, "status", c.ns, replicationController), &api.ReplicationController{}) - - if obj == nil { - return nil, err - } - return obj.(*api.ReplicationController), err -} - -func (c *FakeReplicationControllers) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(replicationcontrollersResource, c.ns, name), &api.ReplicationController{}) - - return err -} - -func (c *FakeReplicationControllers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(replicationcontrollersResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &api.ReplicationControllerList{}) - return err -} - -func (c *FakeReplicationControllers) Get(name string) (result *api.ReplicationController, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(replicationcontrollersResource, c.ns, name), &api.ReplicationController{}) - - if obj == nil { - return nil, err - } - return obj.(*api.ReplicationController), err -} - -func (c *FakeReplicationControllers) List(opts api.ListOptions) (result *api.ReplicationControllerList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(replicationcontrollersResource, c.ns, opts), &api.ReplicationControllerList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &api.ReplicationControllerList{} - for _, item := range obj.(*api.ReplicationControllerList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested replicationControllers. -func (c *FakeReplicationControllers) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(replicationcontrollersResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched replicationController. -func (c *FakeReplicationControllers) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.ReplicationController, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, name, data, subresources...), &api.ReplicationController{}) - - if obj == nil { - return nil, err - } - return obj.(*api.ReplicationController), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_resourcequota.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_resourcequota.go deleted file mode 100644 index 40188d77ad..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_resourcequota.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeResourceQuotas implements ResourceQuotaInterface -type FakeResourceQuotas struct { - Fake *FakeCore - ns string -} - -var resourcequotasResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "resourcequotas"} - -func (c *FakeResourceQuotas) Create(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(resourcequotasResource, c.ns, resourceQuota), &api.ResourceQuota{}) - - if obj == nil { - return nil, err - } - return obj.(*api.ResourceQuota), err -} - -func (c *FakeResourceQuotas) Update(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(resourcequotasResource, c.ns, resourceQuota), &api.ResourceQuota{}) - - if obj == nil { - return nil, err - } - return obj.(*api.ResourceQuota), err -} - -func (c *FakeResourceQuotas) UpdateStatus(resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error) { - obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction(resourcequotasResource, "status", c.ns, resourceQuota), &api.ResourceQuota{}) - - if obj == nil { - return nil, err - } - return obj.(*api.ResourceQuota), err -} - -func (c *FakeResourceQuotas) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(resourcequotasResource, c.ns, name), &api.ResourceQuota{}) - - return err -} - -func (c *FakeResourceQuotas) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(resourcequotasResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &api.ResourceQuotaList{}) - return err -} - -func (c *FakeResourceQuotas) Get(name string) (result *api.ResourceQuota, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(resourcequotasResource, c.ns, name), &api.ResourceQuota{}) - - if obj == nil { - return nil, err - } - return obj.(*api.ResourceQuota), err -} - -func (c *FakeResourceQuotas) List(opts api.ListOptions) (result *api.ResourceQuotaList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(resourcequotasResource, c.ns, opts), &api.ResourceQuotaList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &api.ResourceQuotaList{} - for _, item := range obj.(*api.ResourceQuotaList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested resourceQuotas. -func (c *FakeResourceQuotas) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(resourcequotasResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched resourceQuota. -func (c *FakeResourceQuotas) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.ResourceQuota, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(resourcequotasResource, c.ns, name, data, subresources...), &api.ResourceQuota{}) - - if obj == nil { - return nil, err - } - return obj.(*api.ResourceQuota), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_secret.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_secret.go deleted file mode 100644 index b0c339ce56..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_secret.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeSecrets implements SecretInterface -type FakeSecrets struct { - Fake *FakeCore - ns string -} - -var secretsResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "secrets"} - -func (c *FakeSecrets) Create(secret *api.Secret) (result *api.Secret, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(secretsResource, c.ns, secret), &api.Secret{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Secret), err -} - -func (c *FakeSecrets) Update(secret *api.Secret) (result *api.Secret, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(secretsResource, c.ns, secret), &api.Secret{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Secret), err -} - -func (c *FakeSecrets) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(secretsResource, c.ns, name), &api.Secret{}) - - return err -} - -func (c *FakeSecrets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(secretsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &api.SecretList{}) - return err -} - -func (c *FakeSecrets) Get(name string) (result *api.Secret, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(secretsResource, c.ns, name), &api.Secret{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Secret), err -} - -func (c *FakeSecrets) List(opts api.ListOptions) (result *api.SecretList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(secretsResource, c.ns, opts), &api.SecretList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &api.SecretList{} - for _, item := range obj.(*api.SecretList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested secrets. -func (c *FakeSecrets) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(secretsResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched secret. -func (c *FakeSecrets) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Secret, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(secretsResource, c.ns, name, data, subresources...), &api.Secret{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Secret), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_service.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_service.go deleted file mode 100644 index 20d81829f7..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_service.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeServices implements ServiceInterface -type FakeServices struct { - Fake *FakeCore - ns string -} - -var servicesResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "services"} - -func (c *FakeServices) Create(service *api.Service) (result *api.Service, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(servicesResource, c.ns, service), &api.Service{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Service), err -} - -func (c *FakeServices) Update(service *api.Service) (result *api.Service, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(servicesResource, c.ns, service), &api.Service{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Service), err -} - -func (c *FakeServices) UpdateStatus(service *api.Service) (*api.Service, error) { - obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &api.Service{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Service), err -} - -func (c *FakeServices) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(servicesResource, c.ns, name), &api.Service{}) - - return err -} - -func (c *FakeServices) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(servicesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &api.ServiceList{}) - return err -} - -func (c *FakeServices) Get(name string) (result *api.Service, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(servicesResource, c.ns, name), &api.Service{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Service), err -} - -func (c *FakeServices) List(opts api.ListOptions) (result *api.ServiceList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(servicesResource, c.ns, opts), &api.ServiceList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &api.ServiceList{} - for _, item := range obj.(*api.ServiceList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested services. -func (c *FakeServices) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(servicesResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched service. -func (c *FakeServices) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Service, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(servicesResource, c.ns, name, data, subresources...), &api.Service{}) - - if obj == nil { - return nil, err - } - return obj.(*api.Service), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_serviceaccount.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_serviceaccount.go deleted file mode 100644 index 5c70cf4b1f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_serviceaccount.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeServiceAccounts implements ServiceAccountInterface -type FakeServiceAccounts struct { - Fake *FakeCore - ns string -} - -var serviceaccountsResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "serviceaccounts"} - -func (c *FakeServiceAccounts) Create(serviceAccount *api.ServiceAccount) (result *api.ServiceAccount, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(serviceaccountsResource, c.ns, serviceAccount), &api.ServiceAccount{}) - - if obj == nil { - return nil, err - } - return obj.(*api.ServiceAccount), err -} - -func (c *FakeServiceAccounts) Update(serviceAccount *api.ServiceAccount) (result *api.ServiceAccount, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(serviceaccountsResource, c.ns, serviceAccount), &api.ServiceAccount{}) - - if obj == nil { - return nil, err - } - return obj.(*api.ServiceAccount), err -} - -func (c *FakeServiceAccounts) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(serviceaccountsResource, c.ns, name), &api.ServiceAccount{}) - - return err -} - -func (c *FakeServiceAccounts) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(serviceaccountsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &api.ServiceAccountList{}) - return err -} - -func (c *FakeServiceAccounts) Get(name string) (result *api.ServiceAccount, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(serviceaccountsResource, c.ns, name), &api.ServiceAccount{}) - - if obj == nil { - return nil, err - } - return obj.(*api.ServiceAccount), err -} - -func (c *FakeServiceAccounts) List(opts api.ListOptions) (result *api.ServiceAccountList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(serviceaccountsResource, c.ns, opts), &api.ServiceAccountList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &api.ServiceAccountList{} - for _, item := range obj.(*api.ServiceAccountList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested serviceAccounts. -func (c *FakeServiceAccounts) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(serviceaccountsResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched serviceAccount. -func (c *FakeServiceAccounts) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.ServiceAccount, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(serviceaccountsResource, c.ns, name, data, subresources...), &api.ServiceAccount{}) - - if obj == nil { - return nil, err - } - return obj.(*api.ServiceAccount), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/generated_expansion.go deleted file mode 100644 index 39ad1b2fe3..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/generated_expansion.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -type ComponentStatusExpansion interface{} - -type ConfigMapExpansion interface{} - -type EndpointsExpansion interface{} - -type LimitRangeExpansion interface{} - -type PersistentVolumeExpansion interface{} - -type PersistentVolumeClaimExpansion interface{} - -type PodTemplateExpansion interface{} - -type ReplicationControllerExpansion interface{} - -type ResourceQuotaExpansion interface{} - -type SecretExpansion interface{} - -type ServiceAccountExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/limitrange.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/limitrange.go deleted file mode 100644 index 51c7b799a2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/limitrange.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// LimitRangesGetter has a method to return a LimitRangeInterface. -// A group's client should implement this interface. -type LimitRangesGetter interface { - LimitRanges(namespace string) LimitRangeInterface -} - -// LimitRangeInterface has methods to work with LimitRange resources. -type LimitRangeInterface interface { - Create(*api.LimitRange) (*api.LimitRange, error) - Update(*api.LimitRange) (*api.LimitRange, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.LimitRange, error) - List(opts api.ListOptions) (*api.LimitRangeList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.LimitRange, err error) - LimitRangeExpansion -} - -// limitRanges implements LimitRangeInterface -type limitRanges struct { - client restclient.Interface - ns string -} - -// newLimitRanges returns a LimitRanges -func newLimitRanges(c *CoreClient, namespace string) *limitRanges { - return &limitRanges{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Create(limitRange *api.LimitRange) (result *api.LimitRange, err error) { - result = &api.LimitRange{} - err = c.client.Post(). - Namespace(c.ns). - Resource("limitranges"). - Body(limitRange). - Do(). - Into(result) - return -} - -// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Update(limitRange *api.LimitRange) (result *api.LimitRange, err error) { - result = &api.LimitRange{} - err = c.client.Put(). - Namespace(c.ns). - Resource("limitranges"). - Name(limitRange.Name). - Body(limitRange). - Do(). - Into(result) - return -} - -// Delete takes name of the limitRange and deletes it. Returns an error if one occurs. -func (c *limitRanges) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *limitRanges) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. -func (c *limitRanges) Get(name string) (result *api.LimitRange, err error) { - result = &api.LimitRange{} - err = c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. -func (c *limitRanges) List(opts api.ListOptions) (result *api.LimitRangeList, err error) { - result = &api.LimitRangeList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested limitRanges. -func (c *limitRanges) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched limitRange. -func (c *limitRanges) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.LimitRange, err error) { - result = &api.LimitRange{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("limitranges"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace.go deleted file mode 100644 index b94a2aa27f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// NamespacesGetter has a method to return a NamespaceInterface. -// A group's client should implement this interface. -type NamespacesGetter interface { - Namespaces() NamespaceInterface -} - -// NamespaceInterface has methods to work with Namespace resources. -type NamespaceInterface interface { - Create(*api.Namespace) (*api.Namespace, error) - Update(*api.Namespace) (*api.Namespace, error) - UpdateStatus(*api.Namespace) (*api.Namespace, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.Namespace, error) - List(opts api.ListOptions) (*api.NamespaceList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Namespace, err error) - NamespaceExpansion -} - -// namespaces implements NamespaceInterface -type namespaces struct { - client restclient.Interface -} - -// newNamespaces returns a Namespaces -func newNamespaces(c *CoreClient) *namespaces { - return &namespaces{ - client: c.RESTClient(), - } -} - -// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Create(namespace *api.Namespace) (result *api.Namespace, err error) { - result = &api.Namespace{} - err = c.client.Post(). - Resource("namespaces"). - Body(namespace). - Do(). - Into(result) - return -} - -// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Update(namespace *api.Namespace) (result *api.Namespace, err error) { - result = &api.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - Body(namespace). - Do(). - Into(result) - return -} - -func (c *namespaces) UpdateStatus(namespace *api.Namespace) (result *api.Namespace, err error) { - result = &api.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - SubResource("status"). - Body(namespace). - Do(). - Into(result) - return -} - -// Delete takes name of the namespace and deletes it. Returns an error if one occurs. -func (c *namespaces) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("namespaces"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *namespaces) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("namespaces"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. -func (c *namespaces) Get(name string) (result *api.Namespace, err error) { - result = &api.Namespace{} - err = c.client.Get(). - Resource("namespaces"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Namespaces that match those selectors. -func (c *namespaces) List(opts api.ListOptions) (result *api.NamespaceList, err error) { - result = &api.NamespaceList{} - err = c.client.Get(). - Resource("namespaces"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested namespaces. -func (c *namespaces) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("namespaces"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched namespace. -func (c *namespaces) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Namespace, err error) { - result = &api.Namespace{} - err = c.client.Patch(pt). - Resource("namespaces"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace_expansion.go deleted file mode 100644 index 456de1cdfc..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace_expansion.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import "k8s.io/kubernetes/pkg/api" - -// The NamespaceExpansion interface allows manually adding extra methods to the NamespaceInterface. -type NamespaceExpansion interface { - Finalize(item *api.Namespace) (*api.Namespace, error) -} - -// Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. -func (c *namespaces) Finalize(namespace *api.Namespace) (result *api.Namespace, err error) { - result = &api.Namespace{} - err = c.client.Put().Resource("namespaces").Name(namespace.Name).SubResource("finalize").Body(namespace).Do().Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node.go deleted file mode 100644 index ebf6d77e07..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// NodesGetter has a method to return a NodeInterface. -// A group's client should implement this interface. -type NodesGetter interface { - Nodes() NodeInterface -} - -// NodeInterface has methods to work with Node resources. -type NodeInterface interface { - Create(*api.Node) (*api.Node, error) - Update(*api.Node) (*api.Node, error) - UpdateStatus(*api.Node) (*api.Node, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.Node, error) - List(opts api.ListOptions) (*api.NodeList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Node, err error) - NodeExpansion -} - -// nodes implements NodeInterface -type nodes struct { - client restclient.Interface -} - -// newNodes returns a Nodes -func newNodes(c *CoreClient) *nodes { - return &nodes{ - client: c.RESTClient(), - } -} - -// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Create(node *api.Node) (result *api.Node, err error) { - result = &api.Node{} - err = c.client.Post(). - Resource("nodes"). - Body(node). - Do(). - Into(result) - return -} - -// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Update(node *api.Node) (result *api.Node, err error) { - result = &api.Node{} - err = c.client.Put(). - Resource("nodes"). - Name(node.Name). - Body(node). - Do(). - Into(result) - return -} - -func (c *nodes) UpdateStatus(node *api.Node) (result *api.Node, err error) { - result = &api.Node{} - err = c.client.Put(). - Resource("nodes"). - Name(node.Name). - SubResource("status"). - Body(node). - Do(). - Into(result) - return -} - -// Delete takes name of the node and deletes it. Returns an error if one occurs. -func (c *nodes) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("nodes"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *nodes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("nodes"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the node, and returns the corresponding node object, and an error if there is any. -func (c *nodes) Get(name string) (result *api.Node, err error) { - result = &api.Node{} - err = c.client.Get(). - Resource("nodes"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Nodes that match those selectors. -func (c *nodes) List(opts api.ListOptions) (result *api.NodeList, err error) { - result = &api.NodeList{} - err = c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested nodes. -func (c *nodes) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("nodes"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched node. -func (c *nodes) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Node, err error) { - result = &api.Node{} - err = c.client.Patch(pt). - Resource("nodes"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node_expansion.go deleted file mode 100644 index ec917502d0..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node_expansion.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import "k8s.io/kubernetes/pkg/api" - -// The NodeExpansion interface allows manually adding extra methods to the NodeInterface. -type NodeExpansion interface { - // PatchStatus modifies the status of an existing node. It returns the copy - // of the node that the server returns, or an error. - PatchStatus(nodeName string, data []byte) (*api.Node, error) -} - -// PatchStatus modifies the status of an existing node. It returns the copy of -// the node that the server returns, or an error. -func (c *nodes) PatchStatus(nodeName string, data []byte) (*api.Node, error) { - result := &api.Node{} - err := c.client.Patch(api.StrategicMergePatchType). - Resource("nodes"). - Name(nodeName). - SubResource("status"). - Body(data). - Do(). - Into(result) - return result, err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolume.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolume.go deleted file mode 100644 index dd63e5d01d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolume.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// PersistentVolumesGetter has a method to return a PersistentVolumeInterface. -// A group's client should implement this interface. -type PersistentVolumesGetter interface { - PersistentVolumes() PersistentVolumeInterface -} - -// PersistentVolumeInterface has methods to work with PersistentVolume resources. -type PersistentVolumeInterface interface { - Create(*api.PersistentVolume) (*api.PersistentVolume, error) - Update(*api.PersistentVolume) (*api.PersistentVolume, error) - UpdateStatus(*api.PersistentVolume) (*api.PersistentVolume, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.PersistentVolume, error) - List(opts api.ListOptions) (*api.PersistentVolumeList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.PersistentVolume, err error) - PersistentVolumeExpansion -} - -// persistentVolumes implements PersistentVolumeInterface -type persistentVolumes struct { - client restclient.Interface -} - -// newPersistentVolumes returns a PersistentVolumes -func newPersistentVolumes(c *CoreClient) *persistentVolumes { - return &persistentVolumes{ - client: c.RESTClient(), - } -} - -// Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Create(persistentVolume *api.PersistentVolume) (result *api.PersistentVolume, err error) { - result = &api.PersistentVolume{} - err = c.client.Post(). - Resource("persistentvolumes"). - Body(persistentVolume). - Do(). - Into(result) - return -} - -// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Update(persistentVolume *api.PersistentVolume) (result *api.PersistentVolume, err error) { - result = &api.PersistentVolume{} - err = c.client.Put(). - Resource("persistentvolumes"). - Name(persistentVolume.Name). - Body(persistentVolume). - Do(). - Into(result) - return -} - -func (c *persistentVolumes) UpdateStatus(persistentVolume *api.PersistentVolume) (result *api.PersistentVolume, err error) { - result = &api.PersistentVolume{} - err = c.client.Put(). - Resource("persistentvolumes"). - Name(persistentVolume.Name). - SubResource("status"). - Body(persistentVolume). - Do(). - Into(result) - return -} - -// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. -func (c *persistentVolumes) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("persistentvolumes"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *persistentVolumes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("persistentvolumes"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. -func (c *persistentVolumes) Get(name string) (result *api.PersistentVolume, err error) { - result = &api.PersistentVolume{} - err = c.client.Get(). - Resource("persistentvolumes"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. -func (c *persistentVolumes) List(opts api.ListOptions) (result *api.PersistentVolumeList, err error) { - result = &api.PersistentVolumeList{} - err = c.client.Get(). - Resource("persistentvolumes"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested persistentVolumes. -func (c *persistentVolumes) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("persistentvolumes"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched persistentVolume. -func (c *persistentVolumes) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.PersistentVolume, err error) { - result = &api.PersistentVolume{} - err = c.client.Patch(pt). - Resource("persistentvolumes"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolumeclaim.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolumeclaim.go deleted file mode 100644 index ff87b85e00..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolumeclaim.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// PersistentVolumeClaimsGetter has a method to return a PersistentVolumeClaimInterface. -// A group's client should implement this interface. -type PersistentVolumeClaimsGetter interface { - PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface -} - -// PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources. -type PersistentVolumeClaimInterface interface { - Create(*api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) - Update(*api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) - UpdateStatus(*api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.PersistentVolumeClaim, error) - List(opts api.ListOptions) (*api.PersistentVolumeClaimList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.PersistentVolumeClaim, err error) - PersistentVolumeClaimExpansion -} - -// persistentVolumeClaims implements PersistentVolumeClaimInterface -type persistentVolumeClaims struct { - client restclient.Interface - ns string -} - -// newPersistentVolumeClaims returns a PersistentVolumeClaims -func newPersistentVolumeClaims(c *CoreClient, namespace string) *persistentVolumeClaims { - return &persistentVolumeClaims{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Create(persistentVolumeClaim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { - result = &api.PersistentVolumeClaim{} - err = c.client.Post(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Body(persistentVolumeClaim). - Do(). - Into(result) - return -} - -// Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Update(persistentVolumeClaim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { - result = &api.PersistentVolumeClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(persistentVolumeClaim.Name). - Body(persistentVolumeClaim). - Do(). - Into(result) - return -} - -func (c *persistentVolumeClaims) UpdateStatus(persistentVolumeClaim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { - result = &api.PersistentVolumeClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(persistentVolumeClaim.Name). - SubResource("status"). - Body(persistentVolumeClaim). - Do(). - Into(result) - return -} - -// Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. -func (c *persistentVolumeClaims) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *persistentVolumeClaims) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. -func (c *persistentVolumeClaims) Get(name string) (result *api.PersistentVolumeClaim, err error) { - result = &api.PersistentVolumeClaim{} - err = c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. -func (c *persistentVolumeClaims) List(opts api.ListOptions) (result *api.PersistentVolumeClaimList, err error) { - result = &api.PersistentVolumeClaimList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested persistentVolumeClaims. -func (c *persistentVolumeClaims) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched persistentVolumeClaim. -func (c *persistentVolumeClaims) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.PersistentVolumeClaim, err error) { - result = &api.PersistentVolumeClaim{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod.go deleted file mode 100644 index 1495f1ea0c..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// PodsGetter has a method to return a PodInterface. -// A group's client should implement this interface. -type PodsGetter interface { - Pods(namespace string) PodInterface -} - -// PodInterface has methods to work with Pod resources. -type PodInterface interface { - Create(*api.Pod) (*api.Pod, error) - Update(*api.Pod) (*api.Pod, error) - UpdateStatus(*api.Pod) (*api.Pod, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.Pod, error) - List(opts api.ListOptions) (*api.PodList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Pod, err error) - PodExpansion -} - -// pods implements PodInterface -type pods struct { - client restclient.Interface - ns string -} - -// newPods returns a Pods -func newPods(c *CoreClient, namespace string) *pods { - return &pods{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Create(pod *api.Pod) (result *api.Pod, err error) { - result = &api.Pod{} - err = c.client.Post(). - Namespace(c.ns). - Resource("pods"). - Body(pod). - Do(). - Into(result) - return -} - -// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Update(pod *api.Pod) (result *api.Pod, err error) { - result = &api.Pod{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pods"). - Name(pod.Name). - Body(pod). - Do(). - Into(result) - return -} - -func (c *pods) UpdateStatus(pod *api.Pod) (result *api.Pod, err error) { - result = &api.Pod{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pods"). - Name(pod.Name). - SubResource("status"). - Body(pod). - Do(). - Into(result) - return -} - -// Delete takes name of the pod and deletes it. Returns an error if one occurs. -func (c *pods) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("pods"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *pods) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. -func (c *pods) Get(name string) (result *api.Pod, err error) { - result = &api.Pod{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Pods that match those selectors. -func (c *pods) List(opts api.ListOptions) (result *api.PodList, err error) { - result = &api.PodList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested pods. -func (c *pods) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched pod. -func (c *pods) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Pod, err error) { - result = &api.Pod{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("pods"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod_expansion.go deleted file mode 100644 index be7dd3a711..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod_expansion.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/restclient" -) - -// The PodExpansion interface allows manually adding extra methods to the PodInterface. -type PodExpansion interface { - Bind(binding *api.Binding) error - GetLogs(name string, opts *api.PodLogOptions) *restclient.Request -} - -// Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored). -func (c *pods) Bind(binding *api.Binding) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).SubResource("binding").Body(binding).Do().Error() -} - -// Get constructs a request for getting the logs for a pod -func (c *pods) GetLogs(name string, opts *api.PodLogOptions) *restclient.Request { - return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, api.ParameterCodec) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/podtemplate.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/podtemplate.go deleted file mode 100644 index a610e4db2b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/podtemplate.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// PodTemplatesGetter has a method to return a PodTemplateInterface. -// A group's client should implement this interface. -type PodTemplatesGetter interface { - PodTemplates(namespace string) PodTemplateInterface -} - -// PodTemplateInterface has methods to work with PodTemplate resources. -type PodTemplateInterface interface { - Create(*api.PodTemplate) (*api.PodTemplate, error) - Update(*api.PodTemplate) (*api.PodTemplate, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.PodTemplate, error) - List(opts api.ListOptions) (*api.PodTemplateList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.PodTemplate, err error) - PodTemplateExpansion -} - -// podTemplates implements PodTemplateInterface -type podTemplates struct { - client restclient.Interface - ns string -} - -// newPodTemplates returns a PodTemplates -func newPodTemplates(c *CoreClient, namespace string) *podTemplates { - return &podTemplates{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Create(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) { - result = &api.PodTemplate{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podtemplates"). - Body(podTemplate). - Do(). - Into(result) - return -} - -// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Update(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) { - result = &api.PodTemplate{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podtemplates"). - Name(podTemplate.Name). - Body(podTemplate). - Do(). - Into(result) - return -} - -// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. -func (c *podTemplates) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podTemplates) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. -func (c *podTemplates) Get(name string) (result *api.PodTemplate, err error) { - result = &api.PodTemplate{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. -func (c *podTemplates) List(opts api.ListOptions) (result *api.PodTemplateList, err error) { - result = &api.PodTemplateList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podTemplates. -func (c *podTemplates) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched podTemplate. -func (c *podTemplates) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.PodTemplate, err error) { - result = &api.PodTemplate{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("podtemplates"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/replicationcontroller.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/replicationcontroller.go deleted file mode 100644 index 58f97f9181..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/replicationcontroller.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ReplicationControllersGetter has a method to return a ReplicationControllerInterface. -// A group's client should implement this interface. -type ReplicationControllersGetter interface { - ReplicationControllers(namespace string) ReplicationControllerInterface -} - -// ReplicationControllerInterface has methods to work with ReplicationController resources. -type ReplicationControllerInterface interface { - Create(*api.ReplicationController) (*api.ReplicationController, error) - Update(*api.ReplicationController) (*api.ReplicationController, error) - UpdateStatus(*api.ReplicationController) (*api.ReplicationController, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.ReplicationController, error) - List(opts api.ListOptions) (*api.ReplicationControllerList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.ReplicationController, err error) - ReplicationControllerExpansion -} - -// replicationControllers implements ReplicationControllerInterface -type replicationControllers struct { - client restclient.Interface - ns string -} - -// newReplicationControllers returns a ReplicationControllers -func newReplicationControllers(c *CoreClient, namespace string) *replicationControllers { - return &replicationControllers{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Create(replicationController *api.ReplicationController) (result *api.ReplicationController, err error) { - result = &api.ReplicationController{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Body(replicationController). - Do(). - Into(result) - return -} - -// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Update(replicationController *api.ReplicationController) (result *api.ReplicationController, err error) { - result = &api.ReplicationController{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationController.Name). - Body(replicationController). - Do(). - Into(result) - return -} - -func (c *replicationControllers) UpdateStatus(replicationController *api.ReplicationController) (result *api.ReplicationController, err error) { - result = &api.ReplicationController{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationController.Name). - SubResource("status"). - Body(replicationController). - Do(). - Into(result) - return -} - -// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. -func (c *replicationControllers) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicationControllers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. -func (c *replicationControllers) Get(name string) (result *api.ReplicationController, err error) { - result = &api.ReplicationController{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. -func (c *replicationControllers) List(opts api.ListOptions) (result *api.ReplicationControllerList, err error) { - result = &api.ReplicationControllerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicationControllers. -func (c *replicationControllers) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched replicationController. -func (c *replicationControllers) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.ReplicationController, err error) { - result = &api.ReplicationController{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicationcontrollers"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/resourcequota.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/resourcequota.go deleted file mode 100644 index 254a7361f9..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/resourcequota.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ResourceQuotasGetter has a method to return a ResourceQuotaInterface. -// A group's client should implement this interface. -type ResourceQuotasGetter interface { - ResourceQuotas(namespace string) ResourceQuotaInterface -} - -// ResourceQuotaInterface has methods to work with ResourceQuota resources. -type ResourceQuotaInterface interface { - Create(*api.ResourceQuota) (*api.ResourceQuota, error) - Update(*api.ResourceQuota) (*api.ResourceQuota, error) - UpdateStatus(*api.ResourceQuota) (*api.ResourceQuota, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.ResourceQuota, error) - List(opts api.ListOptions) (*api.ResourceQuotaList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.ResourceQuota, err error) - ResourceQuotaExpansion -} - -// resourceQuotas implements ResourceQuotaInterface -type resourceQuotas struct { - client restclient.Interface - ns string -} - -// newResourceQuotas returns a ResourceQuotas -func newResourceQuotas(c *CoreClient, namespace string) *resourceQuotas { - return &resourceQuotas{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Create(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { - result = &api.ResourceQuota{} - err = c.client.Post(). - Namespace(c.ns). - Resource("resourcequotas"). - Body(resourceQuota). - Do(). - Into(result) - return -} - -// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Update(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { - result = &api.ResourceQuota{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(resourceQuota.Name). - Body(resourceQuota). - Do(). - Into(result) - return -} - -func (c *resourceQuotas) UpdateStatus(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { - result = &api.ResourceQuota{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(resourceQuota.Name). - SubResource("status"). - Body(resourceQuota). - Do(). - Into(result) - return -} - -// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. -func (c *resourceQuotas) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceQuotas) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. -func (c *resourceQuotas) Get(name string) (result *api.ResourceQuota, err error) { - result = &api.ResourceQuota{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. -func (c *resourceQuotas) List(opts api.ListOptions) (result *api.ResourceQuotaList, err error) { - result = &api.ResourceQuotaList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceQuotas. -func (c *resourceQuotas) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched resourceQuota. -func (c *resourceQuotas) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.ResourceQuota, err error) { - result = &api.ResourceQuota{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("resourcequotas"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/secret.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/secret.go deleted file mode 100644 index 6fb40f5a6e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/secret.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// SecretsGetter has a method to return a SecretInterface. -// A group's client should implement this interface. -type SecretsGetter interface { - Secrets(namespace string) SecretInterface -} - -// SecretInterface has methods to work with Secret resources. -type SecretInterface interface { - Create(*api.Secret) (*api.Secret, error) - Update(*api.Secret) (*api.Secret, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.Secret, error) - List(opts api.ListOptions) (*api.SecretList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Secret, err error) - SecretExpansion -} - -// secrets implements SecretInterface -type secrets struct { - client restclient.Interface - ns string -} - -// newSecrets returns a Secrets -func newSecrets(c *CoreClient, namespace string) *secrets { - return &secrets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Create(secret *api.Secret) (result *api.Secret, err error) { - result = &api.Secret{} - err = c.client.Post(). - Namespace(c.ns). - Resource("secrets"). - Body(secret). - Do(). - Into(result) - return -} - -// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Update(secret *api.Secret) (result *api.Secret, err error) { - result = &api.Secret{} - err = c.client.Put(). - Namespace(c.ns). - Resource("secrets"). - Name(secret.Name). - Body(secret). - Do(). - Into(result) - return -} - -// Delete takes name of the secret and deletes it. Returns an error if one occurs. -func (c *secrets) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *secrets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. -func (c *secrets) Get(name string) (result *api.Secret, err error) { - result = &api.Secret{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Secrets that match those selectors. -func (c *secrets) List(opts api.ListOptions) (result *api.SecretList, err error) { - result = &api.SecretList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested secrets. -func (c *secrets) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched secret. -func (c *secrets) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Secret, err error) { - result = &api.Secret{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("secrets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service.go deleted file mode 100644 index 2e912e4b1d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ServicesGetter has a method to return a ServiceInterface. -// A group's client should implement this interface. -type ServicesGetter interface { - Services(namespace string) ServiceInterface -} - -// ServiceInterface has methods to work with Service resources. -type ServiceInterface interface { - Create(*api.Service) (*api.Service, error) - Update(*api.Service) (*api.Service, error) - UpdateStatus(*api.Service) (*api.Service, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.Service, error) - List(opts api.ListOptions) (*api.ServiceList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Service, err error) - ServiceExpansion -} - -// services implements ServiceInterface -type services struct { - client restclient.Interface - ns string -} - -// newServices returns a Services -func newServices(c *CoreClient, namespace string) *services { - return &services{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Create(service *api.Service) (result *api.Service, err error) { - result = &api.Service{} - err = c.client.Post(). - Namespace(c.ns). - Resource("services"). - Body(service). - Do(). - Into(result) - return -} - -// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Update(service *api.Service) (result *api.Service, err error) { - result = &api.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - Body(service). - Do(). - Into(result) - return -} - -func (c *services) UpdateStatus(service *api.Service) (result *api.Service, err error) { - result = &api.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - SubResource("status"). - Body(service). - Do(). - Into(result) - return -} - -// Delete takes name of the service and deletes it. Returns an error if one occurs. -func (c *services) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("services"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *services) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the service, and returns the corresponding service object, and an error if there is any. -func (c *services) Get(name string) (result *api.Service, err error) { - result = &api.Service{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Services that match those selectors. -func (c *services) List(opts api.ListOptions) (result *api.ServiceList, err error) { - result = &api.ServiceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested services. -func (c *services) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched service. -func (c *services) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.Service, err error) { - result = &api.Service{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("services"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service_expansion.go deleted file mode 100644 index 7a7c1daeeb..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service_expansion.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/util/net" -) - -// The ServiceExpansion interface allows manually adding extra methods to the ServiceInterface. -type ServiceExpansion interface { - ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper -} - -// ProxyGet returns a response of the service by calling it through the proxy. -func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { - request := c.client.Get(). - Prefix("proxy"). - Namespace(c.ns). - Resource("services"). - Name(net.JoinSchemeNamePort(scheme, name, port)). - Suffix(path) - for k, v := range params { - request = request.Param(k, v) - } - return request -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/serviceaccount.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/serviceaccount.go deleted file mode 100644 index 1ace726198..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/serviceaccount.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ServiceAccountsGetter has a method to return a ServiceAccountInterface. -// A group's client should implement this interface. -type ServiceAccountsGetter interface { - ServiceAccounts(namespace string) ServiceAccountInterface -} - -// ServiceAccountInterface has methods to work with ServiceAccount resources. -type ServiceAccountInterface interface { - Create(*api.ServiceAccount) (*api.ServiceAccount, error) - Update(*api.ServiceAccount) (*api.ServiceAccount, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.ServiceAccount, error) - List(opts api.ListOptions) (*api.ServiceAccountList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.ServiceAccount, err error) - ServiceAccountExpansion -} - -// serviceAccounts implements ServiceAccountInterface -type serviceAccounts struct { - client restclient.Interface - ns string -} - -// newServiceAccounts returns a ServiceAccounts -func newServiceAccounts(c *CoreClient, namespace string) *serviceAccounts { - return &serviceAccounts{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Create(serviceAccount *api.ServiceAccount) (result *api.ServiceAccount, err error) { - result = &api.ServiceAccount{} - err = c.client.Post(). - Namespace(c.ns). - Resource("serviceaccounts"). - Body(serviceAccount). - Do(). - Into(result) - return -} - -// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Update(serviceAccount *api.ServiceAccount) (result *api.ServiceAccount, err error) { - result = &api.ServiceAccount{} - err = c.client.Put(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(serviceAccount.Name). - Body(serviceAccount). - Do(). - Into(result) - return -} - -// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. -func (c *serviceAccounts) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *serviceAccounts) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. -func (c *serviceAccounts) Get(name string) (result *api.ServiceAccount, err error) { - result = &api.ServiceAccount{} - err = c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. -func (c *serviceAccounts) List(opts api.ListOptions) (result *api.ServiceAccountList, err error) { - result = &api.ServiceAccountList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested serviceAccounts. -func (c *serviceAccounts) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched serviceAccount. -func (c *serviceAccounts) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *api.ServiceAccount, err error) { - result = &api.ServiceAccount{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("serviceaccounts"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/BUILD deleted file mode 100644 index 56c4089dcf..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/BUILD +++ /dev/null @@ -1,40 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "daemonset.go", - "deployment.go", - "deployment_expansion.go", - "doc.go", - "extensions_client.go", - "generated_expansion.go", - "ingress.go", - "networkpolicy.go", - "podsecuritypolicy.go", - "replicaset.go", - "scale.go", - "scale_expansion.go", - "thirdpartyresource.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/daemonset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/daemonset.go deleted file mode 100644 index c219deddb9..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/daemonset.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// DaemonSetsGetter has a method to return a DaemonSetInterface. -// A group's client should implement this interface. -type DaemonSetsGetter interface { - DaemonSets(namespace string) DaemonSetInterface -} - -// DaemonSetInterface has methods to work with DaemonSet resources. -type DaemonSetInterface interface { - Create(*extensions.DaemonSet) (*extensions.DaemonSet, error) - Update(*extensions.DaemonSet) (*extensions.DaemonSet, error) - UpdateStatus(*extensions.DaemonSet) (*extensions.DaemonSet, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*extensions.DaemonSet, error) - List(opts api.ListOptions) (*extensions.DaemonSetList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.DaemonSet, err error) - DaemonSetExpansion -} - -// daemonSets implements DaemonSetInterface -type daemonSets struct { - client restclient.Interface - ns string -} - -// newDaemonSets returns a DaemonSets -func newDaemonSets(c *ExtensionsClient, namespace string) *daemonSets { - return &daemonSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - Body(daemonSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - Body(daemonSet). - Do(). - Into(result) - return -} - -func (c *daemonSets) UpdateStatus(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - Body(daemonSet). - Do(). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(name string) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(opts api.ListOptions) (result *extensions.DaemonSetList, err error) { - result = &extensions.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment.go deleted file mode 100644 index 7da091b6c2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// DeploymentsGetter has a method to return a DeploymentInterface. -// A group's client should implement this interface. -type DeploymentsGetter interface { - Deployments(namespace string) DeploymentInterface -} - -// DeploymentInterface has methods to work with Deployment resources. -type DeploymentInterface interface { - Create(*extensions.Deployment) (*extensions.Deployment, error) - Update(*extensions.Deployment) (*extensions.Deployment, error) - UpdateStatus(*extensions.Deployment) (*extensions.Deployment, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*extensions.Deployment, error) - List(opts api.ListOptions) (*extensions.DeploymentList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.Deployment, err error) - DeploymentExpansion -} - -// deployments implements DeploymentInterface -type deployments struct { - client restclient.Interface - ns string -} - -// newDeployments returns a Deployments -func newDeployments(c *ExtensionsClient, namespace string) *deployments { - return &deployments{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - Body(deployment). - Do(). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - Body(deployment). - Do(). - Into(result) - return -} - -func (c *deployments) UpdateStatus(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - Body(deployment). - Do(). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(name string) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(opts api.ListOptions) (result *extensions.DeploymentList, err error) { - result = &extensions.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment_expansion.go deleted file mode 100644 index f8d7a1c634..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment_expansion.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import "k8s.io/kubernetes/pkg/apis/extensions" - -// The DeploymentExpansion interface allows manually adding extra methods to the DeploymentInterface. -type DeploymentExpansion interface { - Rollback(*extensions.DeploymentRollback) error -} - -// Rollback applied the provided DeploymentRollback to the named deployment in the current namespace. -func (c *deployments) Rollback(deploymentRollback *extensions.DeploymentRollback) error { - return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do().Error() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/doc.go deleted file mode 100644 index d908962a44..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/extensions_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/extensions_client.go deleted file mode 100644 index 99bf3b5977..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/extensions_client.go +++ /dev/null @@ -1,134 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type ExtensionsInterface interface { - RESTClient() restclient.Interface - DaemonSetsGetter - DeploymentsGetter - IngressesGetter - NetworkPoliciesGetter - PodSecurityPoliciesGetter - ReplicaSetsGetter - ScalesGetter - ThirdPartyResourcesGetter -} - -// ExtensionsClient is used to interact with features provided by the k8s.io/kubernetes/pkg/apimachinery/registered.Group group. -type ExtensionsClient struct { - restClient restclient.Interface -} - -func (c *ExtensionsClient) DaemonSets(namespace string) DaemonSetInterface { - return newDaemonSets(c, namespace) -} - -func (c *ExtensionsClient) Deployments(namespace string) DeploymentInterface { - return newDeployments(c, namespace) -} - -func (c *ExtensionsClient) Ingresses(namespace string) IngressInterface { - return newIngresses(c, namespace) -} - -func (c *ExtensionsClient) NetworkPolicies(namespace string) NetworkPolicyInterface { - return newNetworkPolicies(c, namespace) -} - -func (c *ExtensionsClient) PodSecurityPolicies() PodSecurityPolicyInterface { - return newPodSecurityPolicies(c) -} - -func (c *ExtensionsClient) ReplicaSets(namespace string) ReplicaSetInterface { - return newReplicaSets(c, namespace) -} - -func (c *ExtensionsClient) Scales(namespace string) ScaleInterface { - return newScales(c, namespace) -} - -func (c *ExtensionsClient) ThirdPartyResources() ThirdPartyResourceInterface { - return newThirdPartyResources(c) -} - -// NewForConfig creates a new ExtensionsClient for the given config. -func NewForConfig(c *restclient.Config) (*ExtensionsClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &ExtensionsClient{client}, nil -} - -// NewForConfigOrDie creates a new ExtensionsClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *ExtensionsClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new ExtensionsClient for the given RESTClient. -func New(c restclient.Interface) *ExtensionsClient { - return &ExtensionsClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if extensions group is not registered, return an error - g, err := registered.Group("extensions") - if err != nil { - return err - } - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != g.GroupVersion.Group { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - } - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *ExtensionsClient) RESTClient() restclient.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/BUILD deleted file mode 100644 index 2b21d6406f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/BUILD +++ /dev/null @@ -1,40 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fake_daemonset.go", - "fake_deployment.go", - "fake_deployment_expansion.go", - "fake_extensions_client.go", - "fake_ingress.go", - "fake_networkpolicy.go", - "fake_podsecuritypolicy.go", - "fake_replicaset.go", - "fake_scale.go", - "fake_scale_expansion.go", - "fake_thirdpartyresource.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/testing/core:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/doc.go deleted file mode 100644 index 083f78f3a4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// Package fake has the automatically generated clients. -package fake diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_daemonset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_daemonset.go deleted file mode 100644 index 6b193b5f2a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_daemonset.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeDaemonSets implements DaemonSetInterface -type FakeDaemonSets struct { - Fake *FakeExtensions - ns string -} - -var daemonsetsResource = unversioned.GroupVersionResource{Group: "extensions", Version: "", Resource: "daemonsets"} - -func (c *FakeDaemonSets) Create(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &extensions.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.DaemonSet), err -} - -func (c *FakeDaemonSets) Update(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &extensions.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.DaemonSet), err -} - -func (c *FakeDaemonSets) UpdateStatus(daemonSet *extensions.DaemonSet) (*extensions.DaemonSet, error) { - obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &extensions.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.DaemonSet), err -} - -func (c *FakeDaemonSets) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(daemonsetsResource, c.ns, name), &extensions.DaemonSet{}) - - return err -} - -func (c *FakeDaemonSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &extensions.DaemonSetList{}) - return err -} - -func (c *FakeDaemonSets) Get(name string) (result *extensions.DaemonSet, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(daemonsetsResource, c.ns, name), &extensions.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.DaemonSet), err -} - -func (c *FakeDaemonSets) List(opts api.ListOptions) (result *extensions.DaemonSetList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(daemonsetsResource, c.ns, opts), &extensions.DaemonSetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &extensions.DaemonSetList{} - for _, item := range obj.(*extensions.DaemonSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *FakeDaemonSets) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(daemonsetsResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *FakeDaemonSets) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.DaemonSet, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, data, subresources...), &extensions.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.DaemonSet), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_deployment.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_deployment.go deleted file mode 100644 index d001e6048a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_deployment.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeDeployments implements DeploymentInterface -type FakeDeployments struct { - Fake *FakeExtensions - ns string -} - -var deploymentsResource = unversioned.GroupVersionResource{Group: "extensions", Version: "", Resource: "deployments"} - -func (c *FakeDeployments) Create(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(deploymentsResource, c.ns, deployment), &extensions.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.Deployment), err -} - -func (c *FakeDeployments) Update(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(deploymentsResource, c.ns, deployment), &extensions.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.Deployment), err -} - -func (c *FakeDeployments) UpdateStatus(deployment *extensions.Deployment) (*extensions.Deployment, error) { - obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &extensions.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.Deployment), err -} - -func (c *FakeDeployments) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(deploymentsResource, c.ns, name), &extensions.Deployment{}) - - return err -} - -func (c *FakeDeployments) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &extensions.DeploymentList{}) - return err -} - -func (c *FakeDeployments) Get(name string) (result *extensions.Deployment, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(deploymentsResource, c.ns, name), &extensions.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.Deployment), err -} - -func (c *FakeDeployments) List(opts api.ListOptions) (result *extensions.DeploymentList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(deploymentsResource, c.ns, opts), &extensions.DeploymentList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &extensions.DeploymentList{} - for _, item := range obj.(*extensions.DeploymentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *FakeDeployments) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(deploymentsResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched deployment. -func (c *FakeDeployments) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.Deployment, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(deploymentsResource, c.ns, name, data, subresources...), &extensions.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.Deployment), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_extensions_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_extensions_client.go deleted file mode 100644 index 9ab490eb13..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_extensions_client.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - internalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" - restclient "k8s.io/kubernetes/pkg/client/restclient" - core "k8s.io/kubernetes/pkg/client/testing/core" -) - -type FakeExtensions struct { - *core.Fake -} - -func (c *FakeExtensions) DaemonSets(namespace string) internalversion.DaemonSetInterface { - return &FakeDaemonSets{c, namespace} -} - -func (c *FakeExtensions) Deployments(namespace string) internalversion.DeploymentInterface { - return &FakeDeployments{c, namespace} -} - -func (c *FakeExtensions) Ingresses(namespace string) internalversion.IngressInterface { - return &FakeIngresses{c, namespace} -} - -func (c *FakeExtensions) NetworkPolicies(namespace string) internalversion.NetworkPolicyInterface { - return &FakeNetworkPolicies{c, namespace} -} - -func (c *FakeExtensions) PodSecurityPolicies() internalversion.PodSecurityPolicyInterface { - return &FakePodSecurityPolicies{c} -} - -func (c *FakeExtensions) ReplicaSets(namespace string) internalversion.ReplicaSetInterface { - return &FakeReplicaSets{c, namespace} -} - -func (c *FakeExtensions) Scales(namespace string) internalversion.ScaleInterface { - return &FakeScales{c, namespace} -} - -func (c *FakeExtensions) ThirdPartyResources() internalversion.ThirdPartyResourceInterface { - return &FakeThirdPartyResources{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeExtensions) RESTClient() restclient.Interface { - var ret *restclient.RESTClient - return ret -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_ingress.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_ingress.go deleted file mode 100644 index 12d6b27db9..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_ingress.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeIngresses implements IngressInterface -type FakeIngresses struct { - Fake *FakeExtensions - ns string -} - -var ingressesResource = unversioned.GroupVersionResource{Group: "extensions", Version: "", Resource: "ingresses"} - -func (c *FakeIngresses) Create(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(ingressesResource, c.ns, ingress), &extensions.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.Ingress), err -} - -func (c *FakeIngresses) Update(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(ingressesResource, c.ns, ingress), &extensions.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.Ingress), err -} - -func (c *FakeIngresses) UpdateStatus(ingress *extensions.Ingress) (*extensions.Ingress, error) { - obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &extensions.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.Ingress), err -} - -func (c *FakeIngresses) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(ingressesResource, c.ns, name), &extensions.Ingress{}) - - return err -} - -func (c *FakeIngresses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(ingressesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &extensions.IngressList{}) - return err -} - -func (c *FakeIngresses) Get(name string) (result *extensions.Ingress, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(ingressesResource, c.ns, name), &extensions.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.Ingress), err -} - -func (c *FakeIngresses) List(opts api.ListOptions) (result *extensions.IngressList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(ingressesResource, c.ns, opts), &extensions.IngressList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &extensions.IngressList{} - for _, item := range obj.(*extensions.IngressList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *FakeIngresses) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(ingressesResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched ingress. -func (c *FakeIngresses) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.Ingress, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(ingressesResource, c.ns, name, data, subresources...), &extensions.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.Ingress), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_networkpolicy.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_networkpolicy.go deleted file mode 100644 index 6e86fe8e95..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_networkpolicy.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeNetworkPolicies implements NetworkPolicyInterface -type FakeNetworkPolicies struct { - Fake *FakeExtensions - ns string -} - -var networkpoliciesResource = unversioned.GroupVersionResource{Group: "extensions", Version: "", Resource: "networkpolicies"} - -func (c *FakeNetworkPolicies) Create(networkPolicy *extensions.NetworkPolicy) (result *extensions.NetworkPolicy, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(networkpoliciesResource, c.ns, networkPolicy), &extensions.NetworkPolicy{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.NetworkPolicy), err -} - -func (c *FakeNetworkPolicies) Update(networkPolicy *extensions.NetworkPolicy) (result *extensions.NetworkPolicy, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(networkpoliciesResource, c.ns, networkPolicy), &extensions.NetworkPolicy{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.NetworkPolicy), err -} - -func (c *FakeNetworkPolicies) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(networkpoliciesResource, c.ns, name), &extensions.NetworkPolicy{}) - - return err -} - -func (c *FakeNetworkPolicies) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(networkpoliciesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &extensions.NetworkPolicyList{}) - return err -} - -func (c *FakeNetworkPolicies) Get(name string) (result *extensions.NetworkPolicy, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(networkpoliciesResource, c.ns, name), &extensions.NetworkPolicy{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.NetworkPolicy), err -} - -func (c *FakeNetworkPolicies) List(opts api.ListOptions) (result *extensions.NetworkPolicyList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(networkpoliciesResource, c.ns, opts), &extensions.NetworkPolicyList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &extensions.NetworkPolicyList{} - for _, item := range obj.(*extensions.NetworkPolicyList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *FakeNetworkPolicies) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(networkpoliciesResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched networkPolicy. -func (c *FakeNetworkPolicies) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.NetworkPolicy, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, data, subresources...), &extensions.NetworkPolicy{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.NetworkPolicy), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_podsecuritypolicy.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_podsecuritypolicy.go deleted file mode 100644 index ce469529a4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_podsecuritypolicy.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakePodSecurityPolicies implements PodSecurityPolicyInterface -type FakePodSecurityPolicies struct { - Fake *FakeExtensions -} - -var podsecuritypoliciesResource = unversioned.GroupVersionResource{Group: "extensions", Version: "", Resource: "podsecuritypolicies"} - -func (c *FakePodSecurityPolicies) Create(podSecurityPolicy *extensions.PodSecurityPolicy) (result *extensions.PodSecurityPolicy, err error) { - obj, err := c.Fake. - Invokes(core.NewRootCreateAction(podsecuritypoliciesResource, podSecurityPolicy), &extensions.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*extensions.PodSecurityPolicy), err -} - -func (c *FakePodSecurityPolicies) Update(podSecurityPolicy *extensions.PodSecurityPolicy) (result *extensions.PodSecurityPolicy, err error) { - obj, err := c.Fake. - Invokes(core.NewRootUpdateAction(podsecuritypoliciesResource, podSecurityPolicy), &extensions.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*extensions.PodSecurityPolicy), err -} - -func (c *FakePodSecurityPolicies) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewRootDeleteAction(podsecuritypoliciesResource, name), &extensions.PodSecurityPolicy{}) - return err -} - -func (c *FakePodSecurityPolicies) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewRootDeleteCollectionAction(podsecuritypoliciesResource, listOptions) - - _, err := c.Fake.Invokes(action, &extensions.PodSecurityPolicyList{}) - return err -} - -func (c *FakePodSecurityPolicies) Get(name string) (result *extensions.PodSecurityPolicy, err error) { - obj, err := c.Fake. - Invokes(core.NewRootGetAction(podsecuritypoliciesResource, name), &extensions.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*extensions.PodSecurityPolicy), err -} - -func (c *FakePodSecurityPolicies) List(opts api.ListOptions) (result *extensions.PodSecurityPolicyList, err error) { - obj, err := c.Fake. - Invokes(core.NewRootListAction(podsecuritypoliciesResource, opts), &extensions.PodSecurityPolicyList{}) - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &extensions.PodSecurityPolicyList{} - for _, item := range obj.(*extensions.PodSecurityPolicyList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *FakePodSecurityPolicies) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewRootWatchAction(podsecuritypoliciesResource, opts)) -} - -// Patch applies the patch and returns the patched podSecurityPolicy. -func (c *FakePodSecurityPolicies) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.PodSecurityPolicy, err error) { - obj, err := c.Fake. - Invokes(core.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, data, subresources...), &extensions.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*extensions.PodSecurityPolicy), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_replicaset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_replicaset.go deleted file mode 100644 index 52779dd09c..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_replicaset.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeReplicaSets implements ReplicaSetInterface -type FakeReplicaSets struct { - Fake *FakeExtensions - ns string -} - -var replicasetsResource = unversioned.GroupVersionResource{Group: "extensions", Version: "", Resource: "replicasets"} - -func (c *FakeReplicaSets) Create(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(replicasetsResource, c.ns, replicaSet), &extensions.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.ReplicaSet), err -} - -func (c *FakeReplicaSets) Update(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &extensions.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.ReplicaSet), err -} - -func (c *FakeReplicaSets) UpdateStatus(replicaSet *extensions.ReplicaSet) (*extensions.ReplicaSet, error) { - obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &extensions.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.ReplicaSet), err -} - -func (c *FakeReplicaSets) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(replicasetsResource, c.ns, name), &extensions.ReplicaSet{}) - - return err -} - -func (c *FakeReplicaSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(replicasetsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &extensions.ReplicaSetList{}) - return err -} - -func (c *FakeReplicaSets) Get(name string) (result *extensions.ReplicaSet, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(replicasetsResource, c.ns, name), &extensions.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.ReplicaSet), err -} - -func (c *FakeReplicaSets) List(opts api.ListOptions) (result *extensions.ReplicaSetList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(replicasetsResource, c.ns, opts), &extensions.ReplicaSetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &extensions.ReplicaSetList{} - for _, item := range obj.(*extensions.ReplicaSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *FakeReplicaSets) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(replicasetsResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *FakeReplicaSets) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.ReplicaSet, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(replicasetsResource, c.ns, name, data, subresources...), &extensions.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*extensions.ReplicaSet), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_scale.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_scale.go deleted file mode 100644 index 0179e4edce..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_scale.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -// FakeScales implements ScaleInterface -type FakeScales struct { - Fake *FakeExtensions - ns string -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_scale_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_scale_expansion.go deleted file mode 100644 index 481b8197f0..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_scale_expansion.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/testing/core" -) - -func (c *FakeScales) Get(kind string, name string) (result *extensions.Scale, err error) { - action := core.GetActionImpl{} - action.Verb = "get" - action.Namespace = c.ns - action.Resource = unversioned.GroupVersionResource{Resource: kind} - action.Subresource = "scale" - action.Name = name - obj, err := c.Fake.Invokes(action, &extensions.Scale{}) - result = obj.(*extensions.Scale) - return -} - -func (c *FakeScales) Update(kind string, scale *extensions.Scale) (result *extensions.Scale, err error) { - action := core.UpdateActionImpl{} - action.Verb = "update" - action.Namespace = c.ns - action.Resource = unversioned.GroupVersionResource{Resource: kind} - action.Subresource = "scale" - action.Object = scale - obj, err := c.Fake.Invokes(action, scale) - result = obj.(*extensions.Scale) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_thirdpartyresource.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_thirdpartyresource.go deleted file mode 100644 index 196120566c..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_thirdpartyresource.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeThirdPartyResources implements ThirdPartyResourceInterface -type FakeThirdPartyResources struct { - Fake *FakeExtensions -} - -var thirdpartyresourcesResource = unversioned.GroupVersionResource{Group: "extensions", Version: "", Resource: "thirdpartyresources"} - -func (c *FakeThirdPartyResources) Create(thirdPartyResource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { - obj, err := c.Fake. - Invokes(core.NewRootCreateAction(thirdpartyresourcesResource, thirdPartyResource), &extensions.ThirdPartyResource{}) - if obj == nil { - return nil, err - } - return obj.(*extensions.ThirdPartyResource), err -} - -func (c *FakeThirdPartyResources) Update(thirdPartyResource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { - obj, err := c.Fake. - Invokes(core.NewRootUpdateAction(thirdpartyresourcesResource, thirdPartyResource), &extensions.ThirdPartyResource{}) - if obj == nil { - return nil, err - } - return obj.(*extensions.ThirdPartyResource), err -} - -func (c *FakeThirdPartyResources) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewRootDeleteAction(thirdpartyresourcesResource, name), &extensions.ThirdPartyResource{}) - return err -} - -func (c *FakeThirdPartyResources) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewRootDeleteCollectionAction(thirdpartyresourcesResource, listOptions) - - _, err := c.Fake.Invokes(action, &extensions.ThirdPartyResourceList{}) - return err -} - -func (c *FakeThirdPartyResources) Get(name string) (result *extensions.ThirdPartyResource, err error) { - obj, err := c.Fake. - Invokes(core.NewRootGetAction(thirdpartyresourcesResource, name), &extensions.ThirdPartyResource{}) - if obj == nil { - return nil, err - } - return obj.(*extensions.ThirdPartyResource), err -} - -func (c *FakeThirdPartyResources) List(opts api.ListOptions) (result *extensions.ThirdPartyResourceList, err error) { - obj, err := c.Fake. - Invokes(core.NewRootListAction(thirdpartyresourcesResource, opts), &extensions.ThirdPartyResourceList{}) - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &extensions.ThirdPartyResourceList{} - for _, item := range obj.(*extensions.ThirdPartyResourceList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested thirdPartyResources. -func (c *FakeThirdPartyResources) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewRootWatchAction(thirdpartyresourcesResource, opts)) -} - -// Patch applies the patch and returns the patched thirdPartyResource. -func (c *FakeThirdPartyResources) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.ThirdPartyResource, err error) { - obj, err := c.Fake. - Invokes(core.NewRootPatchSubresourceAction(thirdpartyresourcesResource, name, data, subresources...), &extensions.ThirdPartyResource{}) - if obj == nil { - return nil, err - } - return obj.(*extensions.ThirdPartyResource), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/generated_expansion.go deleted file mode 100644 index 070aeeaa60..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/generated_expansion.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -type DaemonSetExpansion interface{} - -type IngressExpansion interface{} - -type NetworkPolicyExpansion interface{} - -type PodSecurityPolicyExpansion interface{} - -type ReplicaSetExpansion interface{} - -type ThirdPartyResourceExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/ingress.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/ingress.go deleted file mode 100644 index 2b031e9ae2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/ingress.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// IngressesGetter has a method to return a IngressInterface. -// A group's client should implement this interface. -type IngressesGetter interface { - Ingresses(namespace string) IngressInterface -} - -// IngressInterface has methods to work with Ingress resources. -type IngressInterface interface { - Create(*extensions.Ingress) (*extensions.Ingress, error) - Update(*extensions.Ingress) (*extensions.Ingress, error) - UpdateStatus(*extensions.Ingress) (*extensions.Ingress, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*extensions.Ingress, error) - List(opts api.ListOptions) (*extensions.IngressList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.Ingress, err error) - IngressExpansion -} - -// ingresses implements IngressInterface -type ingresses struct { - client restclient.Interface - ns string -} - -// newIngresses returns a Ingresses -func newIngresses(c *ExtensionsClient, namespace string) *ingresses { - return &ingresses{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - Body(ingress). - Do(). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - Body(ingress). - Do(). - Into(result) - return -} - -func (c *ingresses) UpdateStatus(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - Body(ingress). - Do(). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(name string) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(opts api.ListOptions) (result *extensions.IngressList, err error) { - result = &extensions.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ingresses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/networkpolicy.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/networkpolicy.go deleted file mode 100644 index 40127cf117..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/networkpolicy.go +++ /dev/null @@ -1,152 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// NetworkPoliciesGetter has a method to return a NetworkPolicyInterface. -// A group's client should implement this interface. -type NetworkPoliciesGetter interface { - NetworkPolicies(namespace string) NetworkPolicyInterface -} - -// NetworkPolicyInterface has methods to work with NetworkPolicy resources. -type NetworkPolicyInterface interface { - Create(*extensions.NetworkPolicy) (*extensions.NetworkPolicy, error) - Update(*extensions.NetworkPolicy) (*extensions.NetworkPolicy, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*extensions.NetworkPolicy, error) - List(opts api.ListOptions) (*extensions.NetworkPolicyList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.NetworkPolicy, err error) - NetworkPolicyExpansion -} - -// networkPolicies implements NetworkPolicyInterface -type networkPolicies struct { - client restclient.Interface - ns string -} - -// newNetworkPolicies returns a NetworkPolicies -func newNetworkPolicies(c *ExtensionsClient, namespace string) *networkPolicies { - return &networkPolicies{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Create(networkPolicy *extensions.NetworkPolicy) (result *extensions.NetworkPolicy, err error) { - result = &extensions.NetworkPolicy{} - err = c.client.Post(). - Namespace(c.ns). - Resource("networkpolicies"). - Body(networkPolicy). - Do(). - Into(result) - return -} - -// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Update(networkPolicy *extensions.NetworkPolicy) (result *extensions.NetworkPolicy, err error) { - result = &extensions.NetworkPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(networkPolicy.Name). - Body(networkPolicy). - Do(). - Into(result) - return -} - -// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *networkPolicies) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *networkPolicies) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *networkPolicies) Get(name string) (result *extensions.NetworkPolicy, err error) { - result = &extensions.NetworkPolicy{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *networkPolicies) List(opts api.ListOptions) (result *extensions.NetworkPolicyList, err error) { - result = &extensions.NetworkPolicyList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *networkPolicies) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched networkPolicy. -func (c *networkPolicies) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.NetworkPolicy, err error) { - result = &extensions.NetworkPolicy{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("networkpolicies"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/podsecuritypolicy.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/podsecuritypolicy.go deleted file mode 100644 index cffa9e874f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/podsecuritypolicy.go +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. -// A group's client should implement this interface. -type PodSecurityPoliciesGetter interface { - PodSecurityPolicies() PodSecurityPolicyInterface -} - -// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. -type PodSecurityPolicyInterface interface { - Create(*extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) - Update(*extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*extensions.PodSecurityPolicy, error) - List(opts api.ListOptions) (*extensions.PodSecurityPolicyList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.PodSecurityPolicy, err error) - PodSecurityPolicyExpansion -} - -// podSecurityPolicies implements PodSecurityPolicyInterface -type podSecurityPolicies struct { - client restclient.Interface -} - -// newPodSecurityPolicies returns a PodSecurityPolicies -func newPodSecurityPolicies(c *ExtensionsClient) *podSecurityPolicies { - return &podSecurityPolicies{ - client: c.RESTClient(), - } -} - -// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Create(podSecurityPolicy *extensions.PodSecurityPolicy) (result *extensions.PodSecurityPolicy, err error) { - result = &extensions.PodSecurityPolicy{} - err = c.client.Post(). - Resource("podsecuritypolicies"). - Body(podSecurityPolicy). - Do(). - Into(result) - return -} - -// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Update(podSecurityPolicy *extensions.PodSecurityPolicy) (result *extensions.PodSecurityPolicy, err error) { - result = &extensions.PodSecurityPolicy{} - err = c.client.Put(). - Resource("podsecuritypolicies"). - Name(podSecurityPolicy.Name). - Body(podSecurityPolicy). - Do(). - Into(result) - return -} - -// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *podSecurityPolicies) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("podsecuritypolicies"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSecurityPolicies) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("podsecuritypolicies"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *podSecurityPolicies) Get(name string) (result *extensions.PodSecurityPolicy, err error) { - result = &extensions.PodSecurityPolicy{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *podSecurityPolicies) List(opts api.ListOptions) (result *extensions.PodSecurityPolicyList, err error) { - result = &extensions.PodSecurityPolicyList{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *podSecurityPolicies) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("podsecuritypolicies"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched podSecurityPolicy. -func (c *podSecurityPolicies) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.PodSecurityPolicy, err error) { - result = &extensions.PodSecurityPolicy{} - err = c.client.Patch(pt). - Resource("podsecuritypolicies"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/replicaset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/replicaset.go deleted file mode 100644 index 02d386574b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/replicaset.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ReplicaSetsGetter has a method to return a ReplicaSetInterface. -// A group's client should implement this interface. -type ReplicaSetsGetter interface { - ReplicaSets(namespace string) ReplicaSetInterface -} - -// ReplicaSetInterface has methods to work with ReplicaSet resources. -type ReplicaSetInterface interface { - Create(*extensions.ReplicaSet) (*extensions.ReplicaSet, error) - Update(*extensions.ReplicaSet) (*extensions.ReplicaSet, error) - UpdateStatus(*extensions.ReplicaSet) (*extensions.ReplicaSet, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*extensions.ReplicaSet, error) - List(opts api.ListOptions) (*extensions.ReplicaSetList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.ReplicaSet, err error) - ReplicaSetExpansion -} - -// replicaSets implements ReplicaSetInterface -type replicaSets struct { - client restclient.Interface - ns string -} - -// newReplicaSets returns a ReplicaSets -func newReplicaSets(c *ExtensionsClient, namespace string) *replicaSets { - return &replicaSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - Body(replicaSet). - Do(). - Into(result) - return -} - -func (c *replicaSets) UpdateStatus(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(name string) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(opts api.ListOptions) (result *extensions.ReplicaSetList, err error) { - result = &extensions.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/scale.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/scale.go deleted file mode 100644 index f1e703c3d4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/scale.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -// ScalesGetter has a method to return a ScaleInterface. -// A group's client should implement this interface. -type ScalesGetter interface { - Scales(namespace string) ScaleInterface -} - -// ScaleInterface has methods to work with Scale resources. -type ScaleInterface interface { - ScaleExpansion -} - -// scales implements ScaleInterface -type scales struct { - client restclient.Interface - ns string -} - -// newScales returns a Scales -func newScales(c *ExtensionsClient, namespace string) *scales { - return &scales{ - client: c.RESTClient(), - ns: namespace, - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/scale_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/scale_expansion.go deleted file mode 100644 index 6b1e64dd86..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/scale_expansion.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" -) - -// The ScaleExpansion interface allows manually adding extra methods to the ScaleInterface. -type ScaleExpansion interface { - Get(kind string, name string) (*extensions.Scale, error) - Update(kind string, scale *extensions.Scale) (*extensions.Scale, error) -} - -// Get takes the reference to scale subresource and returns the subresource or error, if one occurs. -func (c *scales) Get(kind string, name string) (result *extensions.Scale, err error) { - result = &extensions.Scale{} - - // TODO this method needs to take a proper unambiguous kind - fullyQualifiedKind := unversioned.GroupVersionKind{Kind: kind} - resource, _ := meta.KindToResource(fullyQualifiedKind) - - err = c.client.Get(). - Namespace(c.ns). - Resource(resource.Resource). - Name(name). - SubResource("scale"). - Do(). - Into(result) - return -} - -func (c *scales) Update(kind string, scale *extensions.Scale) (result *extensions.Scale, err error) { - result = &extensions.Scale{} - - // TODO this method needs to take a proper unambiguous kind - fullyQualifiedKind := unversioned.GroupVersionKind{Kind: kind} - resource, _ := meta.KindToResource(fullyQualifiedKind) - - err = c.client.Put(). - Namespace(scale.Namespace). - Resource(resource.Resource). - Name(scale.Name). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/thirdpartyresource.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/thirdpartyresource.go deleted file mode 100644 index a3056f330a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/thirdpartyresource.go +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ThirdPartyResourcesGetter has a method to return a ThirdPartyResourceInterface. -// A group's client should implement this interface. -type ThirdPartyResourcesGetter interface { - ThirdPartyResources() ThirdPartyResourceInterface -} - -// ThirdPartyResourceInterface has methods to work with ThirdPartyResource resources. -type ThirdPartyResourceInterface interface { - Create(*extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error) - Update(*extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*extensions.ThirdPartyResource, error) - List(opts api.ListOptions) (*extensions.ThirdPartyResourceList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.ThirdPartyResource, err error) - ThirdPartyResourceExpansion -} - -// thirdPartyResources implements ThirdPartyResourceInterface -type thirdPartyResources struct { - client restclient.Interface -} - -// newThirdPartyResources returns a ThirdPartyResources -func newThirdPartyResources(c *ExtensionsClient) *thirdPartyResources { - return &thirdPartyResources{ - client: c.RESTClient(), - } -} - -// Create takes the representation of a thirdPartyResource and creates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. -func (c *thirdPartyResources) Create(thirdPartyResource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { - result = &extensions.ThirdPartyResource{} - err = c.client.Post(). - Resource("thirdpartyresources"). - Body(thirdPartyResource). - Do(). - Into(result) - return -} - -// Update takes the representation of a thirdPartyResource and updates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. -func (c *thirdPartyResources) Update(thirdPartyResource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { - result = &extensions.ThirdPartyResource{} - err = c.client.Put(). - Resource("thirdpartyresources"). - Name(thirdPartyResource.Name). - Body(thirdPartyResource). - Do(). - Into(result) - return -} - -// Delete takes name of the thirdPartyResource and deletes it. Returns an error if one occurs. -func (c *thirdPartyResources) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("thirdpartyresources"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *thirdPartyResources) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("thirdpartyresources"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the thirdPartyResource, and returns the corresponding thirdPartyResource object, and an error if there is any. -func (c *thirdPartyResources) Get(name string) (result *extensions.ThirdPartyResource, err error) { - result = &extensions.ThirdPartyResource{} - err = c.client.Get(). - Resource("thirdpartyresources"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ThirdPartyResources that match those selectors. -func (c *thirdPartyResources) List(opts api.ListOptions) (result *extensions.ThirdPartyResourceList, err error) { - result = &extensions.ThirdPartyResourceList{} - err = c.client.Get(). - Resource("thirdpartyresources"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested thirdPartyResources. -func (c *thirdPartyResources) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("thirdpartyresources"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched thirdPartyResource. -func (c *thirdPartyResources) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.ThirdPartyResource, err error) { - result = &extensions.ThirdPartyResource{} - err = c.client.Patch(pt). - Resource("thirdpartyresources"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/BUILD deleted file mode 100644 index a3f783f2a3..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/BUILD +++ /dev/null @@ -1,31 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "eviction.go", - "eviction_expansion.go", - "generated_expansion.go", - "poddisruptionbudget.go", - "policy_client.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/policy:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/doc.go deleted file mode 100644 index d908962a44..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/eviction.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/eviction.go deleted file mode 100644 index b16a393437..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/eviction.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -// EvictionsGetter has a method to return a EvictionInterface. -// A group's client should implement this interface. -type EvictionsGetter interface { - Evictions(namespace string) EvictionInterface -} - -// EvictionInterface has methods to work with Eviction resources. -type EvictionInterface interface { - EvictionExpansion -} - -// evictions implements EvictionInterface -type evictions struct { - client restclient.Interface - ns string -} - -// newEvictions returns a Evictions -func newEvictions(c *PolicyClient, namespace string) *evictions { - return &evictions{ - client: c.RESTClient(), - ns: namespace, - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/eviction_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/eviction_expansion.go deleted file mode 100644 index 8e2030101b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/eviction_expansion.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - policy "k8s.io/kubernetes/pkg/apis/policy" -) - -// The EvictionExpansion interface allows manually adding extra methods to the ScaleInterface. -type EvictionExpansion interface { - Evict(eviction *policy.Eviction) error -} - -func (c *evictions) Evict(eviction *policy.Eviction) error { - return c.client.Post(). - AbsPath("/api/v1"). - Namespace(eviction.Namespace). - Resource("pods"). - Name(eviction.Name). - SubResource("eviction"). - Body(eviction). - Do(). - Error() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/BUILD deleted file mode 100644 index 38d87c65e6..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fake_eviction.go", - "fake_eviction_expansion.go", - "fake_poddisruptionbudget.go", - "fake_policy_client.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apis/policy:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/policy/internalversion:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/testing/core:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/doc.go deleted file mode 100644 index 083f78f3a4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// Package fake has the automatically generated clients. -package fake diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_poddisruptionbudget.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_poddisruptionbudget.go deleted file mode 100644 index d5c2337ea4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_poddisruptionbudget.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - policy "k8s.io/kubernetes/pkg/apis/policy" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakePodDisruptionBudgets implements PodDisruptionBudgetInterface -type FakePodDisruptionBudgets struct { - Fake *FakePolicy - ns string -} - -var poddisruptionbudgetsResource = unversioned.GroupVersionResource{Group: "policy", Version: "", Resource: "poddisruptionbudgets"} - -func (c *FakePodDisruptionBudgets) Create(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &policy.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*policy.PodDisruptionBudget), err -} - -func (c *FakePodDisruptionBudgets) Update(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &policy.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*policy.PodDisruptionBudget), err -} - -func (c *FakePodDisruptionBudgets) UpdateStatus(podDisruptionBudget *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) { - obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget), &policy.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*policy.PodDisruptionBudget), err -} - -func (c *FakePodDisruptionBudgets) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(poddisruptionbudgetsResource, c.ns, name), &policy.PodDisruptionBudget{}) - - return err -} - -func (c *FakePodDisruptionBudgets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(poddisruptionbudgetsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &policy.PodDisruptionBudgetList{}) - return err -} - -func (c *FakePodDisruptionBudgets) Get(name string) (result *policy.PodDisruptionBudget, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(poddisruptionbudgetsResource, c.ns, name), &policy.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*policy.PodDisruptionBudget), err -} - -func (c *FakePodDisruptionBudgets) List(opts api.ListOptions) (result *policy.PodDisruptionBudgetList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(poddisruptionbudgetsResource, c.ns, opts), &policy.PodDisruptionBudgetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &policy.PodDisruptionBudgetList{} - for _, item := range obj.(*policy.PodDisruptionBudgetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *FakePodDisruptionBudgets) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(poddisruptionbudgetsResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched podDisruptionBudget. -func (c *FakePodDisruptionBudgets) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *policy.PodDisruptionBudget, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, data, subresources...), &policy.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*policy.PodDisruptionBudget), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_policy_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_policy_client.go deleted file mode 100644 index c277e61f42..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_policy_client.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - internalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion" - restclient "k8s.io/kubernetes/pkg/client/restclient" - core "k8s.io/kubernetes/pkg/client/testing/core" -) - -type FakePolicy struct { - *core.Fake -} - -func (c *FakePolicy) Evictions(namespace string) internalversion.EvictionInterface { - return &FakeEvictions{c, namespace} -} - -func (c *FakePolicy) PodDisruptionBudgets(namespace string) internalversion.PodDisruptionBudgetInterface { - return &FakePodDisruptionBudgets{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakePolicy) RESTClient() restclient.Interface { - var ret *restclient.RESTClient - return ret -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/generated_expansion.go deleted file mode 100644 index f60d699df6..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/generated_expansion.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -type PodDisruptionBudgetExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/poddisruptionbudget.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/poddisruptionbudget.go deleted file mode 100644 index d71984dbad..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/poddisruptionbudget.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - policy "k8s.io/kubernetes/pkg/apis/policy" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// PodDisruptionBudgetsGetter has a method to return a PodDisruptionBudgetInterface. -// A group's client should implement this interface. -type PodDisruptionBudgetsGetter interface { - PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface -} - -// PodDisruptionBudgetInterface has methods to work with PodDisruptionBudget resources. -type PodDisruptionBudgetInterface interface { - Create(*policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) - Update(*policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) - UpdateStatus(*policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*policy.PodDisruptionBudget, error) - List(opts api.ListOptions) (*policy.PodDisruptionBudgetList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *policy.PodDisruptionBudget, err error) - PodDisruptionBudgetExpansion -} - -// podDisruptionBudgets implements PodDisruptionBudgetInterface -type podDisruptionBudgets struct { - client restclient.Interface - ns string -} - -// newPodDisruptionBudgets returns a PodDisruptionBudgets -func newPodDisruptionBudgets(c *PolicyClient, namespace string) *podDisruptionBudgets { - return &podDisruptionBudgets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Create(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) { - result = &policy.PodDisruptionBudget{} - err = c.client.Post(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Body(podDisruptionBudget). - Do(). - Into(result) - return -} - -// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Update(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) { - result = &policy.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - Body(podDisruptionBudget). - Do(). - Into(result) - return -} - -func (c *podDisruptionBudgets) UpdateStatus(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) { - result = &policy.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - SubResource("status"). - Body(podDisruptionBudget). - Do(). - Into(result) - return -} - -// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. -func (c *podDisruptionBudgets) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podDisruptionBudgets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. -func (c *podDisruptionBudgets) Get(name string) (result *policy.PodDisruptionBudget, err error) { - result = &policy.PodDisruptionBudget{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. -func (c *podDisruptionBudgets) List(opts api.ListOptions) (result *policy.PodDisruptionBudgetList, err error) { - result = &policy.PodDisruptionBudgetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *podDisruptionBudgets) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched podDisruptionBudget. -func (c *podDisruptionBudgets) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *policy.PodDisruptionBudget, err error) { - result = &policy.PodDisruptionBudget{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/policy_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/policy_client.go deleted file mode 100644 index b968424b00..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/policy_client.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type PolicyInterface interface { - RESTClient() restclient.Interface - EvictionsGetter - PodDisruptionBudgetsGetter -} - -// PolicyClient is used to interact with features provided by the k8s.io/kubernetes/pkg/apimachinery/registered.Group group. -type PolicyClient struct { - restClient restclient.Interface -} - -func (c *PolicyClient) Evictions(namespace string) EvictionInterface { - return newEvictions(c, namespace) -} - -func (c *PolicyClient) PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface { - return newPodDisruptionBudgets(c, namespace) -} - -// NewForConfig creates a new PolicyClient for the given config. -func NewForConfig(c *restclient.Config) (*PolicyClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &PolicyClient{client}, nil -} - -// NewForConfigOrDie creates a new PolicyClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *PolicyClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new PolicyClient for the given RESTClient. -func New(c restclient.Interface) *PolicyClient { - return &PolicyClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if policy group is not registered, return an error - g, err := registered.Group("policy") - if err != nil { - return err - } - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != g.GroupVersion.Group { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - } - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *PolicyClient) RESTClient() restclient.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/BUILD deleted file mode 100644 index 3809282fe5..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/BUILD +++ /dev/null @@ -1,32 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "clusterrole.go", - "clusterrolebinding.go", - "doc.go", - "generated_expansion.go", - "rbac_client.go", - "role.go", - "rolebinding.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/rbac:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrole.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrole.go deleted file mode 100644 index e595057e2f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrole.go +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - rbac "k8s.io/kubernetes/pkg/apis/rbac" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ClusterRolesGetter has a method to return a ClusterRoleInterface. -// A group's client should implement this interface. -type ClusterRolesGetter interface { - ClusterRoles() ClusterRoleInterface -} - -// ClusterRoleInterface has methods to work with ClusterRole resources. -type ClusterRoleInterface interface { - Create(*rbac.ClusterRole) (*rbac.ClusterRole, error) - Update(*rbac.ClusterRole) (*rbac.ClusterRole, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*rbac.ClusterRole, error) - List(opts api.ListOptions) (*rbac.ClusterRoleList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *rbac.ClusterRole, err error) - ClusterRoleExpansion -} - -// clusterRoles implements ClusterRoleInterface -type clusterRoles struct { - client restclient.Interface -} - -// newClusterRoles returns a ClusterRoles -func newClusterRoles(c *RbacClient) *clusterRoles { - return &clusterRoles{ - client: c.RESTClient(), - } -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { - result = &rbac.ClusterRole{} - err = c.client.Post(). - Resource("clusterroles"). - Body(clusterRole). - Do(). - Into(result) - return -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { - result = &rbac.ClusterRole{} - err = c.client.Put(). - Resource("clusterroles"). - Name(clusterRole.Name). - Body(clusterRole). - Do(). - Into(result) - return -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(name string) (result *rbac.ClusterRole, err error) { - result = &rbac.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(opts api.ListOptions) (result *rbac.ClusterRoleList, err error) { - result = &rbac.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("clusterroles"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *rbac.ClusterRole, err error) { - result = &rbac.ClusterRole{} - err = c.client.Patch(pt). - Resource("clusterroles"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrolebinding.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrolebinding.go deleted file mode 100644 index 92300ce2a6..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrolebinding.go +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - rbac "k8s.io/kubernetes/pkg/apis/rbac" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. -// A group's client should implement this interface. -type ClusterRoleBindingsGetter interface { - ClusterRoleBindings() ClusterRoleBindingInterface -} - -// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. -type ClusterRoleBindingInterface interface { - Create(*rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error) - Update(*rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*rbac.ClusterRoleBinding, error) - List(opts api.ListOptions) (*rbac.ClusterRoleBindingList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *rbac.ClusterRoleBinding, err error) - ClusterRoleBindingExpansion -} - -// clusterRoleBindings implements ClusterRoleBindingInterface -type clusterRoleBindings struct { - client restclient.Interface -} - -// newClusterRoleBindings returns a ClusterRoleBindings -func newClusterRoleBindings(c *RbacClient) *clusterRoleBindings { - return &clusterRoleBindings{ - client: c.RESTClient(), - } -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { - result = &rbac.ClusterRoleBinding{} - err = c.client.Post(). - Resource("clusterrolebindings"). - Body(clusterRoleBinding). - Do(). - Into(result) - return -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { - result = &rbac.ClusterRoleBinding{} - err = c.client.Put(). - Resource("clusterrolebindings"). - Name(clusterRoleBinding.Name). - Body(clusterRoleBinding). - Do(). - Into(result) - return -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(name string) (result *rbac.ClusterRoleBinding, err error) { - result = &rbac.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(opts api.ListOptions) (result *rbac.ClusterRoleBindingList, err error) { - result = &rbac.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("clusterrolebindings"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *rbac.ClusterRoleBinding, err error) { - result = &rbac.ClusterRoleBinding{} - err = c.client.Patch(pt). - Resource("clusterrolebindings"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/doc.go deleted file mode 100644 index d908962a44..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/BUILD deleted file mode 100644 index 6bd5c0179a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fake_clusterrole.go", - "fake_clusterrolebinding.go", - "fake_rbac_client.go", - "fake_role.go", - "fake_rolebinding.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apis/rbac:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/testing/core:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/doc.go deleted file mode 100644 index 083f78f3a4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// Package fake has the automatically generated clients. -package fake diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_clusterrole.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_clusterrole.go deleted file mode 100644 index 10fe274ced..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_clusterrole.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - rbac "k8s.io/kubernetes/pkg/apis/rbac" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeClusterRoles implements ClusterRoleInterface -type FakeClusterRoles struct { - Fake *FakeRbac -} - -var clusterrolesResource = unversioned.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "", Resource: "clusterroles"} - -func (c *FakeClusterRoles) Create(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(core.NewRootCreateAction(clusterrolesResource, clusterRole), &rbac.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*rbac.ClusterRole), err -} - -func (c *FakeClusterRoles) Update(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(core.NewRootUpdateAction(clusterrolesResource, clusterRole), &rbac.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*rbac.ClusterRole), err -} - -func (c *FakeClusterRoles) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewRootDeleteAction(clusterrolesResource, name), &rbac.ClusterRole{}) - return err -} - -func (c *FakeClusterRoles) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewRootDeleteCollectionAction(clusterrolesResource, listOptions) - - _, err := c.Fake.Invokes(action, &rbac.ClusterRoleList{}) - return err -} - -func (c *FakeClusterRoles) Get(name string) (result *rbac.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(core.NewRootGetAction(clusterrolesResource, name), &rbac.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*rbac.ClusterRole), err -} - -func (c *FakeClusterRoles) List(opts api.ListOptions) (result *rbac.ClusterRoleList, err error) { - obj, err := c.Fake. - Invokes(core.NewRootListAction(clusterrolesResource, opts), &rbac.ClusterRoleList{}) - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &rbac.ClusterRoleList{} - for _, item := range obj.(*rbac.ClusterRoleList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *FakeClusterRoles) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewRootWatchAction(clusterrolesResource, opts)) -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *FakeClusterRoles) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *rbac.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(core.NewRootPatchSubresourceAction(clusterrolesResource, name, data, subresources...), &rbac.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*rbac.ClusterRole), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_clusterrolebinding.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_clusterrolebinding.go deleted file mode 100644 index 1e59d65710..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_clusterrolebinding.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - rbac "k8s.io/kubernetes/pkg/apis/rbac" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeClusterRoleBindings implements ClusterRoleBindingInterface -type FakeClusterRoleBindings struct { - Fake *FakeRbac -} - -var clusterrolebindingsResource = unversioned.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "", Resource: "clusterrolebindings"} - -func (c *FakeClusterRoleBindings) Create(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(core.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &rbac.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*rbac.ClusterRoleBinding), err -} - -func (c *FakeClusterRoleBindings) Update(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(core.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &rbac.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*rbac.ClusterRoleBinding), err -} - -func (c *FakeClusterRoleBindings) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewRootDeleteAction(clusterrolebindingsResource, name), &rbac.ClusterRoleBinding{}) - return err -} - -func (c *FakeClusterRoleBindings) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOptions) - - _, err := c.Fake.Invokes(action, &rbac.ClusterRoleBindingList{}) - return err -} - -func (c *FakeClusterRoleBindings) Get(name string) (result *rbac.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(core.NewRootGetAction(clusterrolebindingsResource, name), &rbac.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*rbac.ClusterRoleBinding), err -} - -func (c *FakeClusterRoleBindings) List(opts api.ListOptions) (result *rbac.ClusterRoleBindingList, err error) { - obj, err := c.Fake. - Invokes(core.NewRootListAction(clusterrolebindingsResource, opts), &rbac.ClusterRoleBindingList{}) - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &rbac.ClusterRoleBindingList{} - for _, item := range obj.(*rbac.ClusterRoleBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *FakeClusterRoleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewRootWatchAction(clusterrolebindingsResource, opts)) -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *FakeClusterRoleBindings) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *rbac.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(core.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, data, subresources...), &rbac.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*rbac.ClusterRoleBinding), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_rbac_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_rbac_client.go deleted file mode 100644 index eba5b0375a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_rbac_client.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - internalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion" - restclient "k8s.io/kubernetes/pkg/client/restclient" - core "k8s.io/kubernetes/pkg/client/testing/core" -) - -type FakeRbac struct { - *core.Fake -} - -func (c *FakeRbac) ClusterRoles() internalversion.ClusterRoleInterface { - return &FakeClusterRoles{c} -} - -func (c *FakeRbac) ClusterRoleBindings() internalversion.ClusterRoleBindingInterface { - return &FakeClusterRoleBindings{c} -} - -func (c *FakeRbac) Roles(namespace string) internalversion.RoleInterface { - return &FakeRoles{c, namespace} -} - -func (c *FakeRbac) RoleBindings(namespace string) internalversion.RoleBindingInterface { - return &FakeRoleBindings{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeRbac) RESTClient() restclient.Interface { - var ret *restclient.RESTClient - return ret -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_role.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_role.go deleted file mode 100644 index b1a197f6df..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_role.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - rbac "k8s.io/kubernetes/pkg/apis/rbac" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeRoles implements RoleInterface -type FakeRoles struct { - Fake *FakeRbac - ns string -} - -var rolesResource = unversioned.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "", Resource: "roles"} - -func (c *FakeRoles) Create(role *rbac.Role) (result *rbac.Role, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(rolesResource, c.ns, role), &rbac.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*rbac.Role), err -} - -func (c *FakeRoles) Update(role *rbac.Role) (result *rbac.Role, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(rolesResource, c.ns, role), &rbac.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*rbac.Role), err -} - -func (c *FakeRoles) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(rolesResource, c.ns, name), &rbac.Role{}) - - return err -} - -func (c *FakeRoles) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(rolesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &rbac.RoleList{}) - return err -} - -func (c *FakeRoles) Get(name string) (result *rbac.Role, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(rolesResource, c.ns, name), &rbac.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*rbac.Role), err -} - -func (c *FakeRoles) List(opts api.ListOptions) (result *rbac.RoleList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(rolesResource, c.ns, opts), &rbac.RoleList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &rbac.RoleList{} - for _, item := range obj.(*rbac.RoleList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *FakeRoles) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(rolesResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched role. -func (c *FakeRoles) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *rbac.Role, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(rolesResource, c.ns, name, data, subresources...), &rbac.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*rbac.Role), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_rolebinding.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_rolebinding.go deleted file mode 100644 index dd80ede851..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_rolebinding.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - rbac "k8s.io/kubernetes/pkg/apis/rbac" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeRoleBindings implements RoleBindingInterface -type FakeRoleBindings struct { - Fake *FakeRbac - ns string -} - -var rolebindingsResource = unversioned.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "", Resource: "rolebindings"} - -func (c *FakeRoleBindings) Create(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(core.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &rbac.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*rbac.RoleBinding), err -} - -func (c *FakeRoleBindings) Update(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(core.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &rbac.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*rbac.RoleBinding), err -} - -func (c *FakeRoleBindings) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewDeleteAction(rolebindingsResource, c.ns, name), &rbac.RoleBinding{}) - - return err -} - -func (c *FakeRoleBindings) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &rbac.RoleBindingList{}) - return err -} - -func (c *FakeRoleBindings) Get(name string) (result *rbac.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(core.NewGetAction(rolebindingsResource, c.ns, name), &rbac.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*rbac.RoleBinding), err -} - -func (c *FakeRoleBindings) List(opts api.ListOptions) (result *rbac.RoleBindingList, err error) { - obj, err := c.Fake. - Invokes(core.NewListAction(rolebindingsResource, c.ns, opts), &rbac.RoleBindingList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &rbac.RoleBindingList{} - for _, item := range obj.(*rbac.RoleBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *FakeRoleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewWatchAction(rolebindingsResource, c.ns, opts)) - -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *FakeRoleBindings) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *rbac.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(core.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, data, subresources...), &rbac.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*rbac.RoleBinding), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/generated_expansion.go deleted file mode 100644 index bbdee22a35..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/generated_expansion.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -type ClusterRoleExpansion interface{} - -type ClusterRoleBindingExpansion interface{} - -type RoleExpansion interface{} - -type RoleBindingExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rbac_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rbac_client.go deleted file mode 100644 index 40dfd5a630..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rbac_client.go +++ /dev/null @@ -1,114 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type RbacInterface interface { - RESTClient() restclient.Interface - ClusterRolesGetter - ClusterRoleBindingsGetter - RolesGetter - RoleBindingsGetter -} - -// RbacClient is used to interact with features provided by the k8s.io/kubernetes/pkg/apimachinery/registered.Group group. -type RbacClient struct { - restClient restclient.Interface -} - -func (c *RbacClient) ClusterRoles() ClusterRoleInterface { - return newClusterRoles(c) -} - -func (c *RbacClient) ClusterRoleBindings() ClusterRoleBindingInterface { - return newClusterRoleBindings(c) -} - -func (c *RbacClient) Roles(namespace string) RoleInterface { - return newRoles(c, namespace) -} - -func (c *RbacClient) RoleBindings(namespace string) RoleBindingInterface { - return newRoleBindings(c, namespace) -} - -// NewForConfig creates a new RbacClient for the given config. -func NewForConfig(c *restclient.Config) (*RbacClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &RbacClient{client}, nil -} - -// NewForConfigOrDie creates a new RbacClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *RbacClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new RbacClient for the given RESTClient. -func New(c restclient.Interface) *RbacClient { - return &RbacClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if rbac group is not registered, return an error - g, err := registered.Group("rbac.authorization.k8s.io") - if err != nil { - return err - } - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != g.GroupVersion.Group { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - } - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *RbacClient) RESTClient() restclient.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/role.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/role.go deleted file mode 100644 index 1203535b47..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/role.go +++ /dev/null @@ -1,152 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - rbac "k8s.io/kubernetes/pkg/apis/rbac" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// RolesGetter has a method to return a RoleInterface. -// A group's client should implement this interface. -type RolesGetter interface { - Roles(namespace string) RoleInterface -} - -// RoleInterface has methods to work with Role resources. -type RoleInterface interface { - Create(*rbac.Role) (*rbac.Role, error) - Update(*rbac.Role) (*rbac.Role, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*rbac.Role, error) - List(opts api.ListOptions) (*rbac.RoleList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *rbac.Role, err error) - RoleExpansion -} - -// roles implements RoleInterface -type roles struct { - client restclient.Interface - ns string -} - -// newRoles returns a Roles -func newRoles(c *RbacClient, namespace string) *roles { - return &roles{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(role *rbac.Role) (result *rbac.Role, err error) { - result = &rbac.Role{} - err = c.client.Post(). - Namespace(c.ns). - Resource("roles"). - Body(role). - Do(). - Into(result) - return -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(role *rbac.Role) (result *rbac.Role, err error) { - result = &rbac.Role{} - err = c.client.Put(). - Namespace(c.ns). - Resource("roles"). - Name(role.Name). - Body(role). - Do(). - Into(result) - return -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(name string) (result *rbac.Role, err error) { - result = &rbac.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(opts api.ListOptions) (result *rbac.RoleList, err error) { - result = &rbac.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched role. -func (c *roles) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *rbac.Role, err error) { - result = &rbac.Role{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("roles"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rolebinding.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rolebinding.go deleted file mode 100644 index 839aff4b0b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rolebinding.go +++ /dev/null @@ -1,152 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - rbac "k8s.io/kubernetes/pkg/apis/rbac" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// RoleBindingsGetter has a method to return a RoleBindingInterface. -// A group's client should implement this interface. -type RoleBindingsGetter interface { - RoleBindings(namespace string) RoleBindingInterface -} - -// RoleBindingInterface has methods to work with RoleBinding resources. -type RoleBindingInterface interface { - Create(*rbac.RoleBinding) (*rbac.RoleBinding, error) - Update(*rbac.RoleBinding) (*rbac.RoleBinding, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*rbac.RoleBinding, error) - List(opts api.ListOptions) (*rbac.RoleBindingList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *rbac.RoleBinding, err error) - RoleBindingExpansion -} - -// roleBindings implements RoleBindingInterface -type roleBindings struct { - client restclient.Interface - ns string -} - -// newRoleBindings returns a RoleBindings -func newRoleBindings(c *RbacClient, namespace string) *roleBindings { - return &roleBindings{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { - result = &rbac.RoleBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("rolebindings"). - Body(roleBinding). - Do(). - Into(result) - return -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { - result = &rbac.RoleBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("rolebindings"). - Name(roleBinding.Name). - Body(roleBinding). - Do(). - Into(result) - return -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(name string) (result *rbac.RoleBinding, err error) { - result = &rbac.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(opts api.ListOptions) (result *rbac.RoleBindingList, err error) { - result = &rbac.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *rbac.RoleBinding, err error) { - result = &rbac.RoleBinding{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("rolebindings"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/BUILD deleted file mode 100644 index 3bab317b05..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "storage_client.go", - "storageclass.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/storage:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/doc.go deleted file mode 100644 index d908962a44..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/BUILD deleted file mode 100644 index 73f1037441..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/BUILD +++ /dev/null @@ -1,31 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fake_storage_client.go", - "fake_storageclass.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apis/storage:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/storage/internalversion:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/testing/core:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/doc.go deleted file mode 100644 index 083f78f3a4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// Package fake has the automatically generated clients. -package fake diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/fake_storage_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/fake_storage_client.go deleted file mode 100644 index 37721e0510..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/fake_storage_client.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - internalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion" - restclient "k8s.io/kubernetes/pkg/client/restclient" - core "k8s.io/kubernetes/pkg/client/testing/core" -) - -type FakeStorage struct { - *core.Fake -} - -func (c *FakeStorage) StorageClasses() internalversion.StorageClassInterface { - return &FakeStorageClasses{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeStorage) RESTClient() restclient.Interface { - var ret *restclient.RESTClient - return ret -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/fake_storageclass.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/fake_storageclass.go deleted file mode 100644 index c6cc03375f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/fake_storageclass.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - storage "k8s.io/kubernetes/pkg/apis/storage" - core "k8s.io/kubernetes/pkg/client/testing/core" - labels "k8s.io/kubernetes/pkg/labels" - watch "k8s.io/kubernetes/pkg/watch" -) - -// FakeStorageClasses implements StorageClassInterface -type FakeStorageClasses struct { - Fake *FakeStorage -} - -var storageclassesResource = unversioned.GroupVersionResource{Group: "storage.k8s.io", Version: "", Resource: "storageclasses"} - -func (c *FakeStorageClasses) Create(storageClass *storage.StorageClass) (result *storage.StorageClass, err error) { - obj, err := c.Fake. - Invokes(core.NewRootCreateAction(storageclassesResource, storageClass), &storage.StorageClass{}) - if obj == nil { - return nil, err - } - return obj.(*storage.StorageClass), err -} - -func (c *FakeStorageClasses) Update(storageClass *storage.StorageClass) (result *storage.StorageClass, err error) { - obj, err := c.Fake. - Invokes(core.NewRootUpdateAction(storageclassesResource, storageClass), &storage.StorageClass{}) - if obj == nil { - return nil, err - } - return obj.(*storage.StorageClass), err -} - -func (c *FakeStorageClasses) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(core.NewRootDeleteAction(storageclassesResource, name), &storage.StorageClass{}) - return err -} - -func (c *FakeStorageClasses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewRootDeleteCollectionAction(storageclassesResource, listOptions) - - _, err := c.Fake.Invokes(action, &storage.StorageClassList{}) - return err -} - -func (c *FakeStorageClasses) Get(name string) (result *storage.StorageClass, err error) { - obj, err := c.Fake. - Invokes(core.NewRootGetAction(storageclassesResource, name), &storage.StorageClass{}) - if obj == nil { - return nil, err - } - return obj.(*storage.StorageClass), err -} - -func (c *FakeStorageClasses) List(opts api.ListOptions) (result *storage.StorageClassList, err error) { - obj, err := c.Fake. - Invokes(core.NewRootListAction(storageclassesResource, opts), &storage.StorageClassList{}) - if obj == nil { - return nil, err - } - - label, _, _ := core.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &storage.StorageClassList{} - for _, item := range obj.(*storage.StorageClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested storageClasses. -func (c *FakeStorageClasses) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(core.NewRootWatchAction(storageclassesResource, opts)) -} - -// Patch applies the patch and returns the patched storageClass. -func (c *FakeStorageClasses) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *storage.StorageClass, err error) { - obj, err := c.Fake. - Invokes(core.NewRootPatchSubresourceAction(storageclassesResource, name, data, subresources...), &storage.StorageClass{}) - if obj == nil { - return nil, err - } - return obj.(*storage.StorageClass), err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/generated_expansion.go deleted file mode 100644 index 6b85162067..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/generated_expansion.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -type StorageClassExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storage_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storage_client.go deleted file mode 100644 index 75692f1afb..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storage_client.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type StorageInterface interface { - RESTClient() restclient.Interface - StorageClassesGetter -} - -// StorageClient is used to interact with features provided by the k8s.io/kubernetes/pkg/apimachinery/registered.Group group. -type StorageClient struct { - restClient restclient.Interface -} - -func (c *StorageClient) StorageClasses() StorageClassInterface { - return newStorageClasses(c) -} - -// NewForConfig creates a new StorageClient for the given config. -func NewForConfig(c *restclient.Config) (*StorageClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &StorageClient{client}, nil -} - -// NewForConfigOrDie creates a new StorageClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *StorageClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new StorageClient for the given RESTClient. -func New(c restclient.Interface) *StorageClient { - return &StorageClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if storage group is not registered, return an error - g, err := registered.Group("storage.k8s.io") - if err != nil { - return err - } - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != g.GroupVersion.Group { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - } - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *StorageClient) RESTClient() restclient.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storageclass.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storageclass.go deleted file mode 100644 index 4d2ece0788..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storageclass.go +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/api" - storage "k8s.io/kubernetes/pkg/apis/storage" - restclient "k8s.io/kubernetes/pkg/client/restclient" - watch "k8s.io/kubernetes/pkg/watch" -) - -// StorageClassesGetter has a method to return a StorageClassInterface. -// A group's client should implement this interface. -type StorageClassesGetter interface { - StorageClasses() StorageClassInterface -} - -// StorageClassInterface has methods to work with StorageClass resources. -type StorageClassInterface interface { - Create(*storage.StorageClass) (*storage.StorageClass, error) - Update(*storage.StorageClass) (*storage.StorageClass, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*storage.StorageClass, error) - List(opts api.ListOptions) (*storage.StorageClassList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *storage.StorageClass, err error) - StorageClassExpansion -} - -// storageClasses implements StorageClassInterface -type storageClasses struct { - client restclient.Interface -} - -// newStorageClasses returns a StorageClasses -func newStorageClasses(c *StorageClient) *storageClasses { - return &storageClasses{ - client: c.RESTClient(), - } -} - -// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Create(storageClass *storage.StorageClass) (result *storage.StorageClass, err error) { - result = &storage.StorageClass{} - err = c.client.Post(). - Resource("storageclasses"). - Body(storageClass). - Do(). - Into(result) - return -} - -// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Update(storageClass *storage.StorageClass) (result *storage.StorageClass, err error) { - result = &storage.StorageClass{} - err = c.client.Put(). - Resource("storageclasses"). - Name(storageClass.Name). - Body(storageClass). - Do(). - Into(result) - return -} - -// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *storageClasses) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("storageclasses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *storageClasses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("storageclasses"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *storageClasses) Get(name string) (result *storage.StorageClass, err error) { - result = &storage.StorageClass{} - err = c.client.Get(). - Resource("storageclasses"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *storageClasses) List(opts api.ListOptions) (result *storage.StorageClassList, err error) { - result = &storage.StorageClassList{} - err = c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageClasses. -func (c *storageClasses) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("storageclasses"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched storageClass. -func (c *storageClasses) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *storage.StorageClass, err error) { - result = &storage.StorageClass{} - err = c.client.Patch(pt). - Resource("storageclasses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/leaderelection/BUILD b/vendor/k8s.io/kubernetes/pkg/client/leaderelection/BUILD deleted file mode 100644 index a539ba5617..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/leaderelection/BUILD +++ /dev/null @@ -1,44 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["leaderelection.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api/errors:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apis/componentconfig:go_default_library", - "//pkg/client/leaderelection/resourcelock:go_default_library", - "//pkg/util/runtime:go_default_library", - "//pkg/util/wait:go_default_library", - "//vendor:github.com/golang/glog", - "//vendor:github.com/spf13/pflag", - ], -) - -go_test( - name = "go_default_test", - srcs = ["leaderelection_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", - "//pkg/client/leaderelection/resourcelock:go_default_library", - "//pkg/client/record:go_default_library", - "//pkg/client/testing/core:go_default_library", - "//pkg/runtime:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/leaderelection/OWNERS b/vendor/k8s.io/kubernetes/pkg/client/leaderelection/OWNERS deleted file mode 100644 index 7ff5b3ed94..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/leaderelection/OWNERS +++ /dev/null @@ -1,3 +0,0 @@ -assignees: - - mikedanese - - timothysc diff --git a/vendor/k8s.io/kubernetes/pkg/client/leaderelection/resourcelock/BUILD b/vendor/k8s.io/kubernetes/pkg/client/leaderelection/resourcelock/BUILD deleted file mode 100644 index 3daed466a5..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/leaderelection/resourcelock/BUILD +++ /dev/null @@ -1,26 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "endpointslock.go", - "interface.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/client/record:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/leaderelection/resourcelock/interface.go b/vendor/k8s.io/kubernetes/pkg/client/leaderelection/resourcelock/interface.go deleted file mode 100644 index 8bf97c5caf..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/leaderelection/resourcelock/interface.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resourcelock - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/client/record" -) - -const ( - LeaderElectionRecordAnnotationKey = "control-plane.alpha.kubernetes.io/leader" -) - -// LeaderElectionRecord is the record that is stored in the leader election annotation. -// This information should be used for observational purposes only and could be replaced -// with a random string (e.g. UUID) with only slight modification of this code. -// TODO(mikedanese): this should potentially be versioned -type LeaderElectionRecord struct { - HolderIdentity string `json:"holderIdentity"` - LeaseDurationSeconds int `json:"leaseDurationSeconds"` - AcquireTime unversioned.Time `json:"acquireTime"` - RenewTime unversioned.Time `json:"renewTime"` - LeaderTransitions int `json:"leaderTransitions"` -} - -// ResourceLockConfig common data that exists across different -// resource locks -type ResourceLockConfig struct { - Identity string - EventRecorder record.EventRecorder -} - -// Interface offers a common interface for locking on arbitrary -// resources used in leader election. The Interface is used -// to hide the details on specific implementations in order to allow -// them to change over time. This interface is strictly for use -// by the leaderelection code. -type Interface interface { - // Get returns the LeaderElectionRecord - Get() (*LeaderElectionRecord, error) - - // Create attempts to create a LeaderElectionRecord - Create(ler LeaderElectionRecord) error - - // Update will update and existing LeaderElectionRecord - Update(ler LeaderElectionRecord) error - - // RecordEvent is used to record events - RecordEvent(string) - - // Identity will return the locks Identity - Identity() string - - // Describe is used to convert details on current resource lock - // into a string - Describe() string -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/metrics/BUILD b/vendor/k8s.io/kubernetes/pkg/client/metrics/BUILD deleted file mode 100644 index fc5abd57b1..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/metrics/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["metrics.go"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/record/BUILD b/vendor/k8s.io/kubernetes/pkg/client/record/BUILD deleted file mode 100644 index 6679d725c1..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/record/BUILD +++ /dev/null @@ -1,57 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "event.go", - "events_cache.go", - "fake.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/clock:go_default_library", - "//pkg/util/runtime:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/util/strategicpatch:go_default_library", - "//pkg/watch:go_default_library", - "//vendor:github.com/golang/glog", - "//vendor:github.com/golang/groupcache/lru", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "event_test.go", - "events_cache_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/install:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/clock:go_default_library", - "//pkg/util/diff:go_default_library", - "//pkg/util/strategicpatch:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/record/event.go b/vendor/k8s.io/kubernetes/pkg/client/record/event.go deleted file mode 100644 index 55873a73d9..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/record/event.go +++ /dev/null @@ -1,316 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package record - -import ( - "fmt" - "math/rand" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/clock" - utilruntime "k8s.io/kubernetes/pkg/util/runtime" - "k8s.io/kubernetes/pkg/watch" - - "net/http" - - "github.com/golang/glog" -) - -const maxTriesPerEvent = 12 - -var defaultSleepDuration = 10 * time.Second - -const maxQueuedEvents = 1000 - -// EventSink knows how to store events (client.Client implements it.) -// EventSink must respect the namespace that will be embedded in 'event'. -// It is assumed that EventSink will return the same sorts of errors as -// pkg/client's REST client. -type EventSink interface { - Create(event *api.Event) (*api.Event, error) - Update(event *api.Event) (*api.Event, error) - Patch(oldEvent *api.Event, data []byte) (*api.Event, error) -} - -// EventRecorder knows how to record events on behalf of an EventSource. -type EventRecorder interface { - // Event constructs an event from the given information and puts it in the queue for sending. - // 'object' is the object this event is about. Event will make a reference-- or you may also - // pass a reference to the object directly. - // 'type' of this event, and can be one of Normal, Warning. New types could be added in future - // 'reason' is the reason this event is generated. 'reason' should be short and unique; it - // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used - // to automate handling of events, so imagine people writing switch statements to handle them. - // You want to make that easy. - // 'message' is intended to be human readable. - // - // The resulting event will be created in the same namespace as the reference object. - Event(object runtime.Object, eventtype, reason, message string) - - // Eventf is just like Event, but with Sprintf for the message field. - Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) - - // PastEventf is just like Eventf, but with an option to specify the event's 'timestamp' field. - PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) -} - -// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. -type EventBroadcaster interface { - // StartEventWatcher starts sending events received from this EventBroadcaster to the given - // event handler function. The return value can be ignored or used to stop recording, if - // desired. - StartEventWatcher(eventHandler func(*api.Event)) watch.Interface - - // StartRecordingToSink starts sending events received from this EventBroadcaster to the given - // sink. The return value can be ignored or used to stop recording, if desired. - StartRecordingToSink(sink EventSink) watch.Interface - - // StartLogging starts sending events received from this EventBroadcaster to the given logging - // function. The return value can be ignored or used to stop recording, if desired. - StartLogging(logf func(format string, args ...interface{})) watch.Interface - - // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster - // with the event source set to the given event source. - NewRecorder(source api.EventSource) EventRecorder -} - -// Creates a new event broadcaster. -func NewBroadcaster() EventBroadcaster { - return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration} -} - -func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { - return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration} -} - -type eventBroadcasterImpl struct { - *watch.Broadcaster - sleepDuration time.Duration -} - -// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. -// The return value can be ignored or used to stop recording, if desired. -// TODO: make me an object with parameterizable queue length and retry interval -func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface { - // The default math/rand package functions aren't thread safe, so create a - // new Rand object for each StartRecording call. - randGen := rand.New(rand.NewSource(time.Now().UnixNano())) - eventCorrelator := NewEventCorrelator(clock.RealClock{}) - return eventBroadcaster.StartEventWatcher( - func(event *api.Event) { - recordToSink(sink, event, eventCorrelator, randGen, eventBroadcaster.sleepDuration) - }) -} - -func recordToSink(sink EventSink, event *api.Event, eventCorrelator *EventCorrelator, randGen *rand.Rand, sleepDuration time.Duration) { - // Make a copy before modification, because there could be multiple listeners. - // Events are safe to copy like this. - eventCopy := *event - event = &eventCopy - result, err := eventCorrelator.EventCorrelate(event) - if err != nil { - utilruntime.HandleError(err) - } - if result.Skip { - return - } - tries := 0 - for { - if recordEvent(sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) { - break - } - tries++ - if tries >= maxTriesPerEvent { - glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) - break - } - // Randomize the first sleep so that various clients won't all be - // synced up if the master goes down. - if tries == 1 { - time.Sleep(time.Duration(float64(sleepDuration) * randGen.Float64())) - } else { - time.Sleep(sleepDuration) - } - } -} - -func isKeyNotFoundError(err error) bool { - statusErr, _ := err.(*errors.StatusError) - - if statusErr != nil && statusErr.Status().Code == http.StatusNotFound { - return true - } - - return false -} - -// recordEvent attempts to write event to a sink. It returns true if the event -// was successfully recorded or discarded, false if it should be retried. -// If updateExistingEvent is false, it creates a new event, otherwise it updates -// existing event. -func recordEvent(sink EventSink, event *api.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool { - var newEvent *api.Event - var err error - if updateExistingEvent { - newEvent, err = sink.Patch(event, patch) - } - // Update can fail because the event may have been removed and it no longer exists. - if !updateExistingEvent || (updateExistingEvent && isKeyNotFoundError(err)) { - // Making sure that ResourceVersion is empty on creation - event.ResourceVersion = "" - newEvent, err = sink.Create(event) - } - if err == nil { - // we need to update our event correlator with the server returned state to handle name/resourceversion - eventCorrelator.UpdateState(newEvent) - return true - } - - // If we can't contact the server, then hold everything while we keep trying. - // Otherwise, something about the event is malformed and we should abandon it. - switch err.(type) { - case *restclient.RequestConstructionError: - // We will construct the request the same next time, so don't keep trying. - glog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) - return true - case *errors.StatusError: - if errors.IsAlreadyExists(err) { - glog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) - } else { - glog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) - } - return true - case *errors.UnexpectedObjectError: - // We don't expect this; it implies the server's response didn't match a - // known pattern. Go ahead and retry. - default: - // This case includes actual http transport errors. Go ahead and retry. - } - glog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) - return false -} - -// StartLogging starts sending events received from this EventBroadcaster to the given logging function. -// The return value can be ignored or used to stop recording, if desired. -func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface { - return eventBroadcaster.StartEventWatcher( - func(e *api.Event) { - logf("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message) - }) -} - -// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. -// The return value can be ignored or used to stop recording, if desired. -func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*api.Event)) watch.Interface { - watcher := eventBroadcaster.Watch() - go func() { - defer utilruntime.HandleCrash() - for { - watchEvent, open := <-watcher.ResultChan() - if !open { - return - } - event, ok := watchEvent.Object.(*api.Event) - if !ok { - // This is all local, so there's no reason this should - // ever happen. - continue - } - eventHandler(event) - } - }() - return watcher -} - -// NewRecorder returns an EventRecorder that records events with the given event source. -func (eventBroadcaster *eventBroadcasterImpl) NewRecorder(source api.EventSource) EventRecorder { - return &recorderImpl{source, eventBroadcaster.Broadcaster, clock.RealClock{}} -} - -type recorderImpl struct { - source api.EventSource - *watch.Broadcaster - clock clock.Clock -} - -func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp unversioned.Time, eventtype, reason, message string) { - ref, err := api.GetReference(object) - if err != nil { - glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) - return - } - - if !validateEventType(eventtype) { - glog.Errorf("Unsupported event type: '%v'", eventtype) - return - } - - event := recorder.makeEvent(ref, eventtype, reason, message) - event.Source = recorder.source - - go func() { - // NOTE: events should be a non-blocking operation - defer utilruntime.HandleCrash() - recorder.Action(watch.Added, event) - }() -} - -func validateEventType(eventtype string) bool { - switch eventtype { - case api.EventTypeNormal, api.EventTypeWarning: - return true - } - return false -} - -func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) { - recorder.generateEvent(object, unversioned.Now(), eventtype, reason, message) -} - -func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { - recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...)) -} - -func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) { - recorder.generateEvent(object, timestamp, eventtype, reason, fmt.Sprintf(messageFmt, args...)) -} - -func (recorder *recorderImpl) makeEvent(ref *api.ObjectReference, eventtype, reason, message string) *api.Event { - t := unversioned.Time{Time: recorder.clock.Now()} - namespace := ref.Namespace - if namespace == "" { - namespace = api.NamespaceDefault - } - return &api.Event{ - ObjectMeta: api.ObjectMeta{ - Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), - Namespace: namespace, - }, - InvolvedObject: *ref, - Reason: reason, - Message: message, - FirstTimestamp: t, - LastTimestamp: t, - Count: 1, - Type: eventtype, - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/record/fake.go b/vendor/k8s.io/kubernetes/pkg/client/record/fake.go deleted file mode 100644 index e063a4fc64..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/record/fake.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package record - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// FakeRecorder is used as a fake during tests. It is thread safe. It is usable -// when created manually and not by NewFakeRecorder, however all events may be -// thrown away in this case. -type FakeRecorder struct { - Events chan string -} - -func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { - if f.Events != nil { - f.Events <- fmt.Sprintf("%s %s %s", eventtype, reason, message) - } -} - -func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { - if f.Events != nil { - f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) - } -} - -func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) { -} - -// NewFakeRecorder creates new fake event recorder with event channel with -// buffer of given size. -func NewFakeRecorder(bufferSize int) *FakeRecorder { - return &FakeRecorder{ - Events: make(chan string, bufferSize), - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/BUILD b/vendor/k8s.io/kubernetes/pkg/client/restclient/BUILD deleted file mode 100644 index 1ad99a9d89..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/restclient/BUILD +++ /dev/null @@ -1,83 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "client.go", - "config.go", - "plugin.go", - "request.go", - "transport.go", - "url_utils.go", - "urlbackoff.go", - "versions.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/api/validation/path:go_default_library", - "//pkg/client/metrics:go_default_library", - "//pkg/client/transport:go_default_library", - "//pkg/client/unversioned/clientcmd/api:go_default_library", - "//pkg/fields:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer/streaming:go_default_library", - "//pkg/util/cert:go_default_library", - "//pkg/util/flowcontrol:go_default_library", - "//pkg/util/net:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/version:go_default_library", - "//pkg/watch:go_default_library", - "//pkg/watch/versioned:go_default_library", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "client_test.go", - "config_test.go", - "plugin_test.go", - "request_test.go", - "url_utils_test.go", - "urlbackoff_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/client/unversioned/clientcmd/api:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer/streaming:go_default_library", - "//pkg/util/clock:go_default_library", - "//pkg/util/diff:go_default_library", - "//pkg/util/flowcontrol:go_default_library", - "//pkg/util/httpstream:go_default_library", - "//pkg/util/intstr:go_default_library", - "//pkg/util/testing:go_default_library", - "//pkg/watch:go_default_library", - "//pkg/watch/versioned:go_default_library", - "//vendor:github.com/google/gofuzz", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/client.go b/vendor/k8s.io/kubernetes/pkg/client/restclient/client.go deleted file mode 100644 index 69759f7a0f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/restclient/client.go +++ /dev/null @@ -1,258 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package restclient - -import ( - "fmt" - "mime" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/flowcontrol" -) - -const ( - // Environment variables: Note that the duration should be long enough that the backoff - // persists for some reasonable time (i.e. 120 seconds). The typical base might be "1". - envBackoffBase = "KUBE_CLIENT_BACKOFF_BASE" - envBackoffDuration = "KUBE_CLIENT_BACKOFF_DURATION" -) - -// Interface captures the set of operations for generically interacting with Kubernetes REST apis. -type Interface interface { - GetRateLimiter() flowcontrol.RateLimiter - Verb(verb string) *Request - Post() *Request - Put() *Request - Patch(pt api.PatchType) *Request - Get() *Request - Delete() *Request - APIVersion() unversioned.GroupVersion -} - -// RESTClient imposes common Kubernetes API conventions on a set of resource paths. -// The baseURL is expected to point to an HTTP or HTTPS path that is the parent -// of one or more resources. The server should return a decodable API resource -// object, or an api.Status object which contains information about the reason for -// any failure. -// -// Most consumers should use client.New() to get a Kubernetes API client. -type RESTClient struct { - // base is the root URL for all invocations of the client - base *url.URL - // versionedAPIPath is a path segment connecting the base URL to the resource root - versionedAPIPath string - - // contentConfig is the information used to communicate with the server. - contentConfig ContentConfig - - // serializers contain all serializers for underlying content type. - serializers Serializers - - // creates BackoffManager that is passed to requests. - createBackoffMgr func() BackoffManager - - // TODO extract this into a wrapper interface via the RESTClient interface in kubectl. - Throttle flowcontrol.RateLimiter - - // Set specific behavior of the client. If not set http.DefaultClient will be used. - Client *http.Client -} - -type Serializers struct { - Encoder runtime.Encoder - Decoder runtime.Decoder - StreamingSerializer runtime.Serializer - Framer runtime.Framer - RenegotiatedDecoder func(contentType string, params map[string]string) (runtime.Decoder, error) -} - -// NewRESTClient creates a new RESTClient. This client performs generic REST functions -// such as Get, Put, Post, and Delete on specified paths. Codec controls encoding and -// decoding of responses from the server. -func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConfig, maxQPS float32, maxBurst int, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) { - base := *baseURL - if !strings.HasSuffix(base.Path, "/") { - base.Path += "/" - } - base.RawQuery = "" - base.Fragment = "" - - if config.GroupVersion == nil { - config.GroupVersion = &unversioned.GroupVersion{} - } - if len(config.ContentType) == 0 { - config.ContentType = "application/json" - } - serializers, err := createSerializers(config) - if err != nil { - return nil, err - } - - var throttle flowcontrol.RateLimiter - if maxQPS > 0 && rateLimiter == nil { - throttle = flowcontrol.NewTokenBucketRateLimiter(maxQPS, maxBurst) - } else if rateLimiter != nil { - throttle = rateLimiter - } - return &RESTClient{ - base: &base, - versionedAPIPath: versionedAPIPath, - contentConfig: config, - serializers: *serializers, - createBackoffMgr: readExpBackoffConfig, - Throttle: throttle, - Client: client, - }, nil -} - -// GetRateLimiter returns rate limier for a given client, or nil if it's called on a nil client -func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter { - if c == nil { - return nil - } - return c.Throttle -} - -// readExpBackoffConfig handles the internal logic of determining what the -// backoff policy is. By default if no information is available, NoBackoff. -// TODO Generalize this see #17727 . -func readExpBackoffConfig() BackoffManager { - backoffBase := os.Getenv(envBackoffBase) - backoffDuration := os.Getenv(envBackoffDuration) - - backoffBaseInt, errBase := strconv.ParseInt(backoffBase, 10, 64) - backoffDurationInt, errDuration := strconv.ParseInt(backoffDuration, 10, 64) - if errBase != nil || errDuration != nil { - return &NoBackoff{} - } - return &URLBackoff{ - Backoff: flowcontrol.NewBackOff( - time.Duration(backoffBaseInt)*time.Second, - time.Duration(backoffDurationInt)*time.Second)} -} - -// createSerializers creates all necessary serializers for given contentType. -// TODO: the negotiated serializer passed to this method should probably return -// serializers that control decoding and versioning without this package -// being aware of the types. Depends on whether RESTClient must deal with -// generic infrastructure. -func createSerializers(config ContentConfig) (*Serializers, error) { - mediaTypes := config.NegotiatedSerializer.SupportedMediaTypes() - contentType := config.ContentType - mediaType, _, err := mime.ParseMediaType(contentType) - if err != nil { - return nil, fmt.Errorf("the content type specified in the client configuration is not recognized: %v", err) - } - info, ok := runtime.SerializerInfoForMediaType(mediaTypes, mediaType) - if !ok { - if len(contentType) != 0 || len(mediaTypes) == 0 { - return nil, fmt.Errorf("no serializers registered for %s", contentType) - } - info = mediaTypes[0] - } - - internalGV := unversioned.GroupVersions{ - { - Group: config.GroupVersion.Group, - Version: runtime.APIVersionInternal, - }, - // always include the legacy group as a decoding target to handle non-error `Status` return types - { - Group: "", - Version: runtime.APIVersionInternal, - }, - } - - s := &Serializers{ - Encoder: config.NegotiatedSerializer.EncoderForVersion(info.Serializer, *config.GroupVersion), - Decoder: config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), - - RenegotiatedDecoder: func(contentType string, params map[string]string) (runtime.Decoder, error) { - info, ok := runtime.SerializerInfoForMediaType(mediaTypes, contentType) - if !ok { - return nil, fmt.Errorf("serializer for %s not registered", contentType) - } - return config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), nil - }, - } - if info.StreamSerializer != nil { - s.StreamingSerializer = info.StreamSerializer.Serializer - s.Framer = info.StreamSerializer.Framer - } - - return s, nil -} - -// Verb begins a request with a verb (GET, POST, PUT, DELETE). -// -// Example usage of RESTClient's request building interface: -// c, err := NewRESTClient(...) -// if err != nil { ... } -// resp, err := c.Verb("GET"). -// Path("pods"). -// SelectorParam("labels", "area=staging"). -// Timeout(10*time.Second). -// Do() -// if err != nil { ... } -// list, ok := resp.(*api.PodList) -// -func (c *RESTClient) Verb(verb string) *Request { - backoff := c.createBackoffMgr() - - if c.Client == nil { - return NewRequest(nil, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle) - } - return NewRequest(c.Client, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle) -} - -// Post begins a POST request. Short for c.Verb("POST"). -func (c *RESTClient) Post() *Request { - return c.Verb("POST") -} - -// Put begins a PUT request. Short for c.Verb("PUT"). -func (c *RESTClient) Put() *Request { - return c.Verb("PUT") -} - -// Patch begins a PATCH request. Short for c.Verb("Patch"). -func (c *RESTClient) Patch(pt api.PatchType) *Request { - return c.Verb("PATCH").SetHeader("Content-Type", string(pt)) -} - -// Get begins a GET request. Short for c.Verb("GET"). -func (c *RESTClient) Get() *Request { - return c.Verb("GET") -} - -// Delete begins a DELETE request. Short for c.Verb("DELETE"). -func (c *RESTClient) Delete() *Request { - return c.Verb("DELETE") -} - -// APIVersion returns the APIVersion this RESTClient is expected to use. -func (c *RESTClient) APIVersion() unversioned.GroupVersion { - return *c.contentConfig.GroupVersion -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/config.go b/vendor/k8s.io/kubernetes/pkg/client/restclient/config.go deleted file mode 100644 index e739d796af..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/restclient/config.go +++ /dev/null @@ -1,368 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package restclient - -import ( - "fmt" - "io/ioutil" - "net" - "net/http" - "os" - "path" - gruntime "runtime" - "strings" - "time" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" - "k8s.io/kubernetes/pkg/runtime" - certutil "k8s.io/kubernetes/pkg/util/cert" - "k8s.io/kubernetes/pkg/util/flowcontrol" - "k8s.io/kubernetes/pkg/version" -) - -const ( - DefaultQPS float32 = 5.0 - DefaultBurst int = 10 -) - -// Config holds the common attributes that can be passed to a Kubernetes client on -// initialization. -type Config struct { - // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. - // If a URL is given then the (optional) Path of that URL represents a prefix that must - // be appended to all request URIs used to access the apiserver. This allows a frontend - // proxy to easily relocate all of the apiserver endpoints. - Host string - // APIPath is a sub-path that points to an API root. - APIPath string - // Prefix is the sub path of the server. If not specified, the client will set - // a default value. Use "/" to indicate the server root should be used - Prefix string - - // ContentConfig contains settings that affect how objects are transformed when - // sent to the server. - ContentConfig - - // Server requires Basic authentication - Username string - Password string - - // Server requires Bearer authentication. This client will not attempt to use - // refresh tokens for an OAuth2 flow. - // TODO: demonstrate an OAuth2 compatible client. - BearerToken string - - // Impersonate is the username that this RESTClient will impersonate - Impersonate string - - // Server requires plugin-specified authentication. - AuthProvider *clientcmdapi.AuthProviderConfig - - // Callback to persist config for AuthProvider. - AuthConfigPersister AuthProviderConfigPersister - - // TLSClientConfig contains settings to enable transport layer security - TLSClientConfig - - // Server should be accessed without verifying the TLS - // certificate. For testing only. - Insecure bool - - // UserAgent is an optional field that specifies the caller of this request. - UserAgent string - - // Transport may be used for custom HTTP behavior. This attribute may not - // be specified with the TLS client certificate options. Use WrapTransport - // for most client level operations. - Transport http.RoundTripper - // WrapTransport will be invoked for custom HTTP behavior after the underlying - // transport is initialized (either the transport created from TLSClientConfig, - // Transport, or http.DefaultTransport). The config may layer other RoundTrippers - // on top of the returned RoundTripper. - WrapTransport func(rt http.RoundTripper) http.RoundTripper - - // QPS indicates the maximum QPS to the master from this client. - // If it's zero, the created RESTClient will use DefaultQPS: 5 - QPS float32 - - // Maximum burst for throttle. - // If it's zero, the created RESTClient will use DefaultBurst: 10. - Burst int - - // Rate limiter for limiting connections to the master from this client. If present overwrites QPS/Burst - RateLimiter flowcontrol.RateLimiter - - // The maximum length of time to wait before giving up on a server request. A value of zero means no timeout. - Timeout time.Duration - - // Version forces a specific version to be used (if registered) - // Do we need this? - // Version string -} - -// TLSClientConfig contains settings to enable transport layer security -type TLSClientConfig struct { - // Server requires TLS client certificate authentication - CertFile string - // Server requires TLS client certificate authentication - KeyFile string - // Trusted root certificates for server - CAFile string - - // CertData holds PEM-encoded bytes (typically read from a client certificate file). - // CertData takes precedence over CertFile - CertData []byte - // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). - // KeyData takes precedence over KeyFile - KeyData []byte - // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). - // CAData takes precedence over CAFile - CAData []byte -} - -type ContentConfig struct { - // AcceptContentTypes specifies the types the client will accept and is optional. - // If not set, ContentType will be used to define the Accept header - AcceptContentTypes string - // ContentType specifies the wire format used to communicate with the server. - // This value will be set as the Accept header on requests made to the server, and - // as the default content type on any object sent to the server. If not set, - // "application/json" is used. - ContentType string - // GroupVersion is the API version to talk to. Must be provided when initializing - // a RESTClient directly. When initializing a Client, will be set with the default - // code version. - GroupVersion *unversioned.GroupVersion - // NegotiatedSerializer is used for obtaining encoders and decoders for multiple - // supported media types. - NegotiatedSerializer runtime.NegotiatedSerializer -} - -// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config -// object. Note that a RESTClient may require fields that are optional when initializing a Client. -// A RESTClient created by this method is generic - it expects to operate on an API that follows -// the Kubernetes conventions, but may not be the Kubernetes API. -func RESTClientFor(config *Config) (*RESTClient, error) { - if config.GroupVersion == nil { - return nil, fmt.Errorf("GroupVersion is required when initializing a RESTClient") - } - if config.NegotiatedSerializer == nil { - return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") - } - qps := config.QPS - if config.QPS == 0.0 { - qps = DefaultQPS - } - burst := config.Burst - if config.Burst == 0 { - burst = DefaultBurst - } - - baseURL, versionedAPIPath, err := defaultServerUrlFor(config) - if err != nil { - return nil, err - } - - transport, err := TransportFor(config) - if err != nil { - return nil, err - } - - var httpClient *http.Client - if transport != http.DefaultTransport { - httpClient = &http.Client{Transport: transport} - if config.Timeout > 0 { - httpClient.Timeout = config.Timeout - } - } - - return NewRESTClient(baseURL, versionedAPIPath, config.ContentConfig, qps, burst, config.RateLimiter, httpClient) -} - -// UnversionedRESTClientFor is the same as RESTClientFor, except that it allows -// the config.Version to be empty. -func UnversionedRESTClientFor(config *Config) (*RESTClient, error) { - if config.NegotiatedSerializer == nil { - return nil, fmt.Errorf("NeogitatedSerializer is required when initializing a RESTClient") - } - - baseURL, versionedAPIPath, err := defaultServerUrlFor(config) - if err != nil { - return nil, err - } - - transport, err := TransportFor(config) - if err != nil { - return nil, err - } - - var httpClient *http.Client - if transport != http.DefaultTransport { - httpClient = &http.Client{Transport: transport} - if config.Timeout > 0 { - httpClient.Timeout = config.Timeout - } - } - - versionConfig := config.ContentConfig - if versionConfig.GroupVersion == nil { - v := unversioned.SchemeGroupVersion - versionConfig.GroupVersion = &v - } - - return NewRESTClient(baseURL, versionedAPIPath, versionConfig, config.QPS, config.Burst, config.RateLimiter, httpClient) -} - -// SetKubernetesDefaults sets default values on the provided client config for accessing the -// Kubernetes API or returns an error if any of the defaults are impossible or invalid. -func SetKubernetesDefaults(config *Config) error { - if len(config.UserAgent) == 0 { - config.UserAgent = DefaultKubernetesUserAgent() - } - return nil -} - -// DefaultKubernetesUserAgent returns the default user agent that clients can use. -func DefaultKubernetesUserAgent() string { - commit := version.Get().GitCommit - if len(commit) > 7 { - commit = commit[:7] - } - if len(commit) == 0 { - commit = "unknown" - } - version := version.Get().GitVersion - seg := strings.SplitN(version, "-", 2) - version = seg[0] - return fmt.Sprintf("%s/%s (%s/%s) kubernetes/%s", path.Base(os.Args[0]), version, gruntime.GOOS, gruntime.GOARCH, commit) -} - -// InClusterConfig returns a config object which uses the service account -// kubernetes gives to pods. It's intended for clients that expect to be -// running inside a pod running on kubernetes. It will return an error if -// called from a process not running in a kubernetes environment. -func InClusterConfig() (*Config, error) { - host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT") - if len(host) == 0 || len(port) == 0 { - return nil, fmt.Errorf("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined") - } - - token, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountTokenKey) - if err != nil { - return nil, err - } - tlsClientConfig := TLSClientConfig{} - rootCAFile := "/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountRootCAKey - if _, err := certutil.NewPool(rootCAFile); err != nil { - glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) - } else { - tlsClientConfig.CAFile = rootCAFile - } - - return &Config{ - // TODO: switch to using cluster DNS. - Host: "https://" + net.JoinHostPort(host, port), - BearerToken: string(token), - TLSClientConfig: tlsClientConfig, - }, nil -} - -// IsConfigTransportTLS returns true if and only if the provided -// config will result in a protected connection to the server when it -// is passed to restclient.RESTClientFor(). Use to determine when to -// send credentials over the wire. -// -// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are -// still possible. -func IsConfigTransportTLS(config Config) bool { - baseURL, _, err := defaultServerUrlFor(&config) - if err != nil { - return false - } - return baseURL.Scheme == "https" -} - -// LoadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, -// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are -// either populated or were empty to start. -func LoadTLSFiles(c *Config) error { - var err error - c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile) - if err != nil { - return err - } - - c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile) - if err != nil { - return err - } - - c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile) - if err != nil { - return err - } - return nil -} - -// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, -// or an error if an error occurred reading the file -func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { - if len(data) > 0 { - return data, nil - } - if len(file) > 0 { - fileData, err := ioutil.ReadFile(file) - if err != nil { - return []byte{}, err - } - return fileData, nil - } - return nil, nil -} - -func AddUserAgent(config *Config, userAgent string) *Config { - fullUserAgent := DefaultKubernetesUserAgent() + "/" + userAgent - config.UserAgent = fullUserAgent - return config -} - -// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) removed -func AnonymousClientConfig(config *Config) *Config { - // copy only known safe fields - return &Config{ - Host: config.Host, - APIPath: config.APIPath, - Prefix: config.Prefix, - ContentConfig: config.ContentConfig, - TLSClientConfig: TLSClientConfig{ - CAFile: config.TLSClientConfig.CAFile, - CAData: config.TLSClientConfig.CAData, - }, - RateLimiter: config.RateLimiter, - Insecure: config.Insecure, - UserAgent: config.UserAgent, - Transport: config.Transport, - WrapTransport: config.WrapTransport, - QPS: config.QPS, - Burst: config.Burst, - Timeout: config.Timeout, - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/transport.go b/vendor/k8s.io/kubernetes/pkg/client/restclient/transport.go deleted file mode 100644 index 5f3d671d65..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/restclient/transport.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package restclient - -import ( - "crypto/tls" - "net/http" - - "k8s.io/kubernetes/pkg/client/transport" -) - -// TLSConfigFor returns a tls.Config that will provide the transport level security defined -// by the provided Config. Will return nil if no transport level security is requested. -func TLSConfigFor(config *Config) (*tls.Config, error) { - cfg, err := config.TransportConfig() - if err != nil { - return nil, err - } - return transport.TLSConfigFor(cfg) -} - -// TransportFor returns an http.RoundTripper that will provide the authentication -// or transport level security defined by the provided Config. Will return the -// default http.DefaultTransport if no special case behavior is needed. -func TransportFor(config *Config) (http.RoundTripper, error) { - cfg, err := config.TransportConfig() - if err != nil { - return nil, err - } - return transport.New(cfg) -} - -// HTTPWrappersForConfig wraps a round tripper with any relevant layered behavior from the -// config. Exposed to allow more clients that need HTTP-like behavior but then must hijack -// the underlying connection (like WebSocket or HTTP2 clients). Pure HTTP clients should use -// the higher level TransportFor or RESTClientFor methods. -func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) { - cfg, err := config.TransportConfig() - if err != nil { - return nil, err - } - return transport.HTTPWrappersForConfig(cfg, rt) -} - -// TransportConfig converts a client config to an appropriate transport config. -func (c *Config) TransportConfig() (*transport.Config, error) { - wt := c.WrapTransport - if c.AuthProvider != nil { - provider, err := GetAuthProvider(c.Host, c.AuthProvider, c.AuthConfigPersister) - if err != nil { - return nil, err - } - if wt != nil { - previousWT := wt - wt = func(rt http.RoundTripper) http.RoundTripper { - return provider.WrapTransport(previousWT(rt)) - } - } else { - wt = provider.WrapTransport - } - } - return &transport.Config{ - UserAgent: c.UserAgent, - Transport: c.Transport, - WrapTransport: wt, - TLS: transport.TLSConfig{ - CAFile: c.CAFile, - CAData: c.CAData, - CertFile: c.CertFile, - CertData: c.CertData, - KeyFile: c.KeyFile, - KeyData: c.KeyData, - Insecure: c.Insecure, - }, - Username: c.Username, - Password: c.Password, - BearerToken: c.BearerToken, - Impersonate: c.Impersonate, - }, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/retry/BUILD b/vendor/k8s.io/kubernetes/pkg/client/retry/BUILD deleted file mode 100644 index 9d268ba57e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/retry/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["util.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api/errors:go_default_library", - "//pkg/util/wait:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["util_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api/errors:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/util/wait:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/retry/util.go b/vendor/k8s.io/kubernetes/pkg/client/retry/util.go deleted file mode 100644 index 0732b964cb..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/retry/util.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package retry - -import ( - "time" - - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/util/wait" -) - -// DefaultRetry is the recommended retry for a conflict where multiple clients -// are making changes to the same resource. -var DefaultRetry = wait.Backoff{ - Steps: 5, - Duration: 10 * time.Millisecond, - Factor: 1.0, - Jitter: 0.1, -} - -// DefaultBackoff is the recommended backoff for a conflict where a client -// may be attempting to make an unrelated modification to a resource under -// active management by one or more controllers. -var DefaultBackoff = wait.Backoff{ - Steps: 4, - Duration: 10 * time.Millisecond, - Factor: 5.0, - Jitter: 0.1, -} - -// RetryConflict executes the provided function repeatedly, retrying if the server returns a conflicting -// write. Callers should preserve previous executions if they wish to retry changes. It performs an -// exponential backoff. -// -// var pod *api.Pod -// err := RetryOnConflict(DefaultBackoff, func() (err error) { -// pod, err = c.Pods("mynamespace").UpdateStatus(podStatus) -// return -// }) -// if err != nil { -// // may be conflict if max retries were hit -// return err -// } -// ... -// -// TODO: Make Backoff an interface? -func RetryOnConflict(backoff wait.Backoff, fn func() error) error { - var lastConflictErr error - err := wait.ExponentialBackoff(backoff, func() (bool, error) { - err := fn() - switch { - case err == nil: - return true, nil - case errors.IsConflict(err): - lastConflictErr = err - return false, nil - default: - return false, err - } - }) - if err == wait.ErrWaitTimeout { - err = lastConflictErr - } - return err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/testing/core/BUILD b/vendor/k8s.io/kubernetes/pkg/client/testing/core/BUILD deleted file mode 100644 index ce9d02aa39..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/testing/core/BUILD +++ /dev/null @@ -1,45 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "actions.go", - "fake.go", - "fixture.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/fields:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/version:go_default_library", - "//pkg/watch:go_default_library", - ], -) - -go_test( - name = "go_default_xtest", - srcs = ["fake_test.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/testing/core/actions.go b/vendor/k8s.io/kubernetes/pkg/client/testing/core/actions.go deleted file mode 100644 index 534517f6e8..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/testing/core/actions.go +++ /dev/null @@ -1,476 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package core - -import ( - "fmt" - "path" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" -) - -func NewRootGetAction(resource unversioned.GroupVersionResource, name string) GetActionImpl { - action := GetActionImpl{} - action.Verb = "get" - action.Resource = resource - action.Name = name - - return action -} - -func NewGetAction(resource unversioned.GroupVersionResource, namespace, name string) GetActionImpl { - action := GetActionImpl{} - action.Verb = "get" - action.Resource = resource - action.Namespace = namespace - action.Name = name - - return action -} - -func NewRootListAction(resource unversioned.GroupVersionResource, opts interface{}) ListActionImpl { - action := ListActionImpl{} - action.Verb = "list" - action.Resource = resource - labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) - action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} - - return action -} - -func NewListAction(resource unversioned.GroupVersionResource, namespace string, opts interface{}) ListActionImpl { - action := ListActionImpl{} - action.Verb = "list" - action.Resource = resource - action.Namespace = namespace - labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) - action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} - - return action -} - -func NewRootCreateAction(resource unversioned.GroupVersionResource, object runtime.Object) CreateActionImpl { - action := CreateActionImpl{} - action.Verb = "create" - action.Resource = resource - action.Object = object - - return action -} - -func NewCreateAction(resource unversioned.GroupVersionResource, namespace string, object runtime.Object) CreateActionImpl { - action := CreateActionImpl{} - action.Verb = "create" - action.Resource = resource - action.Namespace = namespace - action.Object = object - - return action -} - -func NewRootUpdateAction(resource unversioned.GroupVersionResource, object runtime.Object) UpdateActionImpl { - action := UpdateActionImpl{} - action.Verb = "update" - action.Resource = resource - action.Object = object - - return action -} - -func NewUpdateAction(resource unversioned.GroupVersionResource, namespace string, object runtime.Object) UpdateActionImpl { - action := UpdateActionImpl{} - action.Verb = "update" - action.Resource = resource - action.Namespace = namespace - action.Object = object - - return action -} - -func NewRootPatchAction(resource unversioned.GroupVersionResource, name string, patch []byte) PatchActionImpl { - action := PatchActionImpl{} - action.Verb = "patch" - action.Resource = resource - action.Name = name - action.Patch = patch - - return action -} - -func NewPatchAction(resource unversioned.GroupVersionResource, namespace string, name string, patch []byte) PatchActionImpl { - action := PatchActionImpl{} - action.Verb = "patch" - action.Resource = resource - action.Namespace = namespace - action.Name = name - action.Patch = patch - - return action -} - -func NewRootPatchSubresourceAction(resource unversioned.GroupVersionResource, name string, patch []byte, subresources ...string) PatchActionImpl { - action := PatchActionImpl{} - action.Verb = "patch" - action.Resource = resource - action.Subresource = path.Join(subresources...) - action.Name = name - action.Patch = patch - - return action -} - -func NewPatchSubresourceAction(resource unversioned.GroupVersionResource, namespace, name string, patch []byte, subresources ...string) PatchActionImpl { - action := PatchActionImpl{} - action.Verb = "patch" - action.Resource = resource - action.Subresource = path.Join(subresources...) - action.Namespace = namespace - action.Name = name - action.Patch = patch - - return action -} - -func NewRootUpdateSubresourceAction(resource unversioned.GroupVersionResource, subresource string, object runtime.Object) UpdateActionImpl { - action := UpdateActionImpl{} - action.Verb = "update" - action.Resource = resource - action.Subresource = subresource - action.Object = object - - return action -} -func NewUpdateSubresourceAction(resource unversioned.GroupVersionResource, subresource string, namespace string, object runtime.Object) UpdateActionImpl { - action := UpdateActionImpl{} - action.Verb = "update" - action.Resource = resource - action.Subresource = subresource - action.Namespace = namespace - action.Object = object - - return action -} - -func NewRootDeleteAction(resource unversioned.GroupVersionResource, name string) DeleteActionImpl { - action := DeleteActionImpl{} - action.Verb = "delete" - action.Resource = resource - action.Name = name - - return action -} - -func NewDeleteAction(resource unversioned.GroupVersionResource, namespace, name string) DeleteActionImpl { - action := DeleteActionImpl{} - action.Verb = "delete" - action.Resource = resource - action.Namespace = namespace - action.Name = name - - return action -} - -func NewRootDeleteCollectionAction(resource unversioned.GroupVersionResource, opts interface{}) DeleteCollectionActionImpl { - action := DeleteCollectionActionImpl{} - action.Verb = "delete-collection" - action.Resource = resource - labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) - action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} - - return action -} - -func NewDeleteCollectionAction(resource unversioned.GroupVersionResource, namespace string, opts interface{}) DeleteCollectionActionImpl { - action := DeleteCollectionActionImpl{} - action.Verb = "delete-collection" - action.Resource = resource - action.Namespace = namespace - labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) - action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} - - return action -} - -func NewRootWatchAction(resource unversioned.GroupVersionResource, opts interface{}) WatchActionImpl { - action := WatchActionImpl{} - action.Verb = "watch" - action.Resource = resource - labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) - action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} - - return action -} - -func ExtractFromListOptions(opts interface{}) (labelSelector labels.Selector, fieldSelector fields.Selector, resourceVersion string) { - var err error - switch t := opts.(type) { - case api.ListOptions: - labelSelector = t.LabelSelector - fieldSelector = t.FieldSelector - resourceVersion = t.ResourceVersion - case v1.ListOptions: - labelSelector, err = labels.Parse(t.LabelSelector) - if err != nil { - panic(err) - } - fieldSelector, err = fields.ParseSelector(t.FieldSelector) - if err != nil { - panic(err) - } - resourceVersion = t.ResourceVersion - default: - panic(fmt.Errorf("expect a ListOptions")) - } - if labelSelector == nil { - labelSelector = labels.Everything() - } - if fieldSelector == nil { - fieldSelector = fields.Everything() - } - return labelSelector, fieldSelector, resourceVersion -} - -func NewWatchAction(resource unversioned.GroupVersionResource, namespace string, opts interface{}) WatchActionImpl { - action := WatchActionImpl{} - action.Verb = "watch" - action.Resource = resource - action.Namespace = namespace - labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) - action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} - - return action -} - -func NewProxyGetAction(resource unversioned.GroupVersionResource, namespace, scheme, name, port, path string, params map[string]string) ProxyGetActionImpl { - action := ProxyGetActionImpl{} - action.Verb = "get" - action.Resource = resource - action.Namespace = namespace - action.Scheme = scheme - action.Name = name - action.Port = port - action.Path = path - action.Params = params - return action -} - -type ListRestrictions struct { - Labels labels.Selector - Fields fields.Selector -} -type WatchRestrictions struct { - Labels labels.Selector - Fields fields.Selector - ResourceVersion string -} - -type Action interface { - GetNamespace() string - GetVerb() string - GetResource() unversioned.GroupVersionResource - GetSubresource() string - Matches(verb, resource string) bool -} - -type GenericAction interface { - Action - GetValue() interface{} -} - -type GetAction interface { - Action - GetName() string -} - -type ListAction interface { - Action - GetListRestrictions() ListRestrictions -} - -type CreateAction interface { - Action - GetObject() runtime.Object -} - -type UpdateAction interface { - Action - GetObject() runtime.Object -} - -type DeleteAction interface { - Action - GetName() string -} - -type WatchAction interface { - Action - GetWatchRestrictions() WatchRestrictions -} - -type ProxyGetAction interface { - Action - GetScheme() string - GetName() string - GetPort() string - GetPath() string - GetParams() map[string]string -} - -type ActionImpl struct { - Namespace string - Verb string - Resource unversioned.GroupVersionResource - Subresource string -} - -func (a ActionImpl) GetNamespace() string { - return a.Namespace -} -func (a ActionImpl) GetVerb() string { - return a.Verb -} -func (a ActionImpl) GetResource() unversioned.GroupVersionResource { - return a.Resource -} -func (a ActionImpl) GetSubresource() string { - return a.Subresource -} -func (a ActionImpl) Matches(verb, resource string) bool { - return strings.ToLower(verb) == strings.ToLower(a.Verb) && - strings.ToLower(resource) == strings.ToLower(a.Resource.Resource) -} - -type GenericActionImpl struct { - ActionImpl - Value interface{} -} - -func (a GenericActionImpl) GetValue() interface{} { - return a.Value -} - -type GetActionImpl struct { - ActionImpl - Name string -} - -func (a GetActionImpl) GetName() string { - return a.Name -} - -type ListActionImpl struct { - ActionImpl - ListRestrictions ListRestrictions -} - -func (a ListActionImpl) GetListRestrictions() ListRestrictions { - return a.ListRestrictions -} - -type CreateActionImpl struct { - ActionImpl - Object runtime.Object -} - -func (a CreateActionImpl) GetObject() runtime.Object { - return a.Object -} - -type UpdateActionImpl struct { - ActionImpl - Object runtime.Object -} - -func (a UpdateActionImpl) GetObject() runtime.Object { - return a.Object -} - -type PatchActionImpl struct { - ActionImpl - Name string - Patch []byte -} - -func (a PatchActionImpl) GetName() string { - return a.Name -} - -func (a PatchActionImpl) GetPatch() []byte { - return a.Patch -} - -type DeleteActionImpl struct { - ActionImpl - Name string -} - -func (a DeleteActionImpl) GetName() string { - return a.Name -} - -type DeleteCollectionActionImpl struct { - ActionImpl - ListRestrictions ListRestrictions -} - -func (a DeleteCollectionActionImpl) GetListRestrictions() ListRestrictions { - return a.ListRestrictions -} - -type WatchActionImpl struct { - ActionImpl - WatchRestrictions WatchRestrictions -} - -func (a WatchActionImpl) GetWatchRestrictions() WatchRestrictions { - return a.WatchRestrictions -} - -type ProxyGetActionImpl struct { - ActionImpl - Scheme string - Name string - Port string - Path string - Params map[string]string -} - -func (a ProxyGetActionImpl) GetScheme() string { - return a.Scheme -} - -func (a ProxyGetActionImpl) GetName() string { - return a.Name -} - -func (a ProxyGetActionImpl) GetPort() string { - return a.Port -} - -func (a ProxyGetActionImpl) GetPath() string { - return a.Path -} - -func (a ProxyGetActionImpl) GetParams() map[string]string { - return a.Params -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/testing/core/fake.go b/vendor/k8s.io/kubernetes/pkg/client/testing/core/fake.go deleted file mode 100644 index 9ff59dbcd6..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/testing/core/fake.go +++ /dev/null @@ -1,251 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package core - -import ( - "fmt" - "sync" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/version" - "k8s.io/kubernetes/pkg/watch" -) - -// Fake implements client.Interface. Meant to be embedded into a struct to get -// a default implementation. This makes faking out just the method you want to -// test easier. -type Fake struct { - sync.RWMutex - actions []Action // these may be castable to other types, but "Action" is the minimum - - // ReactionChain is the list of reactors that will be attempted for every - // request in the order they are tried. - ReactionChain []Reactor - // WatchReactionChain is the list of watch reactors that will be attempted - // for every request in the order they are tried. - WatchReactionChain []WatchReactor - // ProxyReactionChain is the list of proxy reactors that will be attempted - // for every request in the order they are tried. - ProxyReactionChain []ProxyReactor - - Resources map[string]*unversioned.APIResourceList -} - -// Reactor is an interface to allow the composition of reaction functions. -type Reactor interface { - // Handles indicates whether or not this Reactor deals with a given - // action. - Handles(action Action) bool - // React handles the action and returns results. It may choose to - // delegate by indicated handled=false. - React(action Action) (handled bool, ret runtime.Object, err error) -} - -// WatchReactor is an interface to allow the composition of watch functions. -type WatchReactor interface { - // Handles indicates whether or not this Reactor deals with a given - // action. - Handles(action Action) bool - // React handles a watch action and returns results. It may choose to - // delegate by indicating handled=false. - React(action Action) (handled bool, ret watch.Interface, err error) -} - -// ProxyReactor is an interface to allow the composition of proxy get -// functions. -type ProxyReactor interface { - // Handles indicates whether or not this Reactor deals with a given - // action. - Handles(action Action) bool - // React handles a watch action and returns results. It may choose to - // delegate by indicating handled=false. - React(action Action) (handled bool, ret restclient.ResponseWrapper, err error) -} - -// ReactionFunc is a function that returns an object or error for a given -// Action. If "handled" is false, then the test client will ignore the -// results and continue to the next ReactionFunc. A ReactionFunc can describe -// reactions on subresources by testing the result of the action's -// GetSubresource() method. -type ReactionFunc func(action Action) (handled bool, ret runtime.Object, err error) - -// WatchReactionFunc is a function that returns a watch interface. If -// "handled" is false, then the test client will ignore the results and -// continue to the next ReactionFunc. -type WatchReactionFunc func(action Action) (handled bool, ret watch.Interface, err error) - -// ProxyReactionFunc is a function that returns a ResponseWrapper interface -// for a given Action. If "handled" is false, then the test client will -// ignore the results and continue to the next ProxyReactionFunc. -type ProxyReactionFunc func(action Action) (handled bool, ret restclient.ResponseWrapper, err error) - -// AddReactor appends a reactor to the end of the chain. -func (c *Fake) AddReactor(verb, resource string, reaction ReactionFunc) { - c.ReactionChain = append(c.ReactionChain, &SimpleReactor{verb, resource, reaction}) -} - -// PrependReactor adds a reactor to the beginning of the chain. -func (c *Fake) PrependReactor(verb, resource string, reaction ReactionFunc) { - c.ReactionChain = append([]Reactor{&SimpleReactor{verb, resource, reaction}}, c.ReactionChain...) -} - -// AddWatchReactor appends a reactor to the end of the chain. -func (c *Fake) AddWatchReactor(resource string, reaction WatchReactionFunc) { - c.WatchReactionChain = append(c.WatchReactionChain, &SimpleWatchReactor{resource, reaction}) -} - -// PrependWatchReactor adds a reactor to the beginning of the chain. -func (c *Fake) PrependWatchReactor(resource string, reaction WatchReactionFunc) { - c.WatchReactionChain = append([]WatchReactor{&SimpleWatchReactor{resource, reaction}}, c.WatchReactionChain...) -} - -// AddProxyReactor appends a reactor to the end of the chain. -func (c *Fake) AddProxyReactor(resource string, reaction ProxyReactionFunc) { - c.ProxyReactionChain = append(c.ProxyReactionChain, &SimpleProxyReactor{resource, reaction}) -} - -// PrependProxyReactor adds a reactor to the beginning of the chain. -func (c *Fake) PrependProxyReactor(resource string, reaction ProxyReactionFunc) { - c.ProxyReactionChain = append([]ProxyReactor{&SimpleProxyReactor{resource, reaction}}, c.ProxyReactionChain...) -} - -// Invokes records the provided Action and then invokes the ReactionFunc that -// handles the action if one exists. defaultReturnObj is expected to be of the -// same type a normal call would return. -func (c *Fake) Invokes(action Action, defaultReturnObj runtime.Object) (runtime.Object, error) { - c.Lock() - defer c.Unlock() - - c.actions = append(c.actions, action) - for _, reactor := range c.ReactionChain { - if !reactor.Handles(action) { - continue - } - - handled, ret, err := reactor.React(action) - if !handled { - continue - } - - return ret, err - } - - return defaultReturnObj, nil -} - -// InvokesWatch records the provided Action and then invokes the ReactionFunc -// that handles the action if one exists. -func (c *Fake) InvokesWatch(action Action) (watch.Interface, error) { - c.Lock() - defer c.Unlock() - - c.actions = append(c.actions, action) - for _, reactor := range c.WatchReactionChain { - if !reactor.Handles(action) { - continue - } - - handled, ret, err := reactor.React(action) - if !handled { - continue - } - - return ret, err - } - - return nil, fmt.Errorf("unhandled watch: %#v", action) -} - -// InvokesProxy records the provided Action and then invokes the ReactionFunc -// that handles the action if one exists. -func (c *Fake) InvokesProxy(action Action) restclient.ResponseWrapper { - c.Lock() - defer c.Unlock() - - c.actions = append(c.actions, action) - for _, reactor := range c.ProxyReactionChain { - if !reactor.Handles(action) { - continue - } - - handled, ret, err := reactor.React(action) - if !handled || err != nil { - continue - } - - return ret - } - - return nil -} - -// ClearActions clears the history of actions called on the fake client. -func (c *Fake) ClearActions() { - c.Lock() - defer c.Unlock() - - c.actions = make([]Action, 0) -} - -// Actions returns a chronologically ordered slice fake actions called on the -// fake client. -func (c *Fake) Actions() []Action { - c.RLock() - defer c.RUnlock() - fa := make([]Action, len(c.actions)) - copy(fa, c.actions) - return fa -} - -// TODO: this probably should be moved to somewhere else. -type FakeDiscovery struct { - *Fake -} - -func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*unversioned.APIResourceList, error) { - action := ActionImpl{ - Verb: "get", - Resource: unversioned.GroupVersionResource{Resource: "resource"}, - } - c.Invokes(action, nil) - return c.Resources[groupVersion], nil -} - -func (c *FakeDiscovery) ServerResources() (map[string]*unversioned.APIResourceList, error) { - action := ActionImpl{ - Verb: "get", - Resource: unversioned.GroupVersionResource{Resource: "resource"}, - } - c.Invokes(action, nil) - return c.Resources, nil -} - -func (c *FakeDiscovery) ServerGroups() (*unversioned.APIGroupList, error) { - return nil, nil -} - -func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { - action := ActionImpl{} - action.Verb = "get" - action.Resource = unversioned.GroupVersionResource{Resource: "version"} - - c.Invokes(action, nil) - versionInfo := version.Get() - return &versionInfo, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/transport/BUILD b/vendor/k8s.io/kubernetes/pkg/client/transport/BUILD deleted file mode 100644 index 6cb2a0bd21..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/transport/BUILD +++ /dev/null @@ -1,38 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "cache.go", - "config.go", - "round_trippers.go", - "transport.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/util/net:go_default_library", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "cache_test.go", - "round_trippers_test.go", - "transport_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/transport/config.go b/vendor/k8s.io/kubernetes/pkg/client/transport/config.go deleted file mode 100644 index 6e5c68a30b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/transport/config.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package transport - -import "net/http" - -// Config holds various options for establishing a transport. -type Config struct { - // UserAgent is an optional field that specifies the caller of this - // request. - UserAgent string - - // The base TLS configuration for this transport. - TLS TLSConfig - - // Username and password for basic authentication - Username string - Password string - - // Bearer token for authentication - BearerToken string - - // Impersonate is the username that this Config will impersonate - Impersonate string - - // Transport may be used for custom HTTP behavior. This attribute may - // not be specified with the TLS client certificate options. Use - // WrapTransport for most client level operations. - Transport http.RoundTripper - - // WrapTransport will be invoked for custom HTTP behavior after the - // underlying transport is initialized (either the transport created - // from TLSClientConfig, Transport, or http.DefaultTransport). The - // config may layer other RoundTrippers on top of the returned - // RoundTripper. - WrapTransport func(rt http.RoundTripper) http.RoundTripper -} - -// HasCA returns whether the configuration has a certificate authority or not. -func (c *Config) HasCA() bool { - return len(c.TLS.CAData) > 0 || len(c.TLS.CAFile) > 0 -} - -// HasBasicAuth returns whether the configuration has basic authentication or not. -func (c *Config) HasBasicAuth() bool { - return len(c.Username) != 0 -} - -// HasTokenAuth returns whether the configuration has token authentication or not. -func (c *Config) HasTokenAuth() bool { - return len(c.BearerToken) != 0 -} - -// HasCertAuth returns whether the configuration has certificate authentication or not. -func (c *Config) HasCertAuth() bool { - return len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0 -} - -// TLSConfig holds the information needed to set up a TLS transport. -type TLSConfig struct { - CAFile string // Path of the PEM-encoded server trusted root certificates. - CertFile string // Path of the PEM-encoded client certificate. - KeyFile string // Path of the PEM-encoded client key. - - Insecure bool // Server should be accessed without verifying the certificate. For testing only. - - CAData []byte // Bytes of the PEM-encoded server trusted root certificates. Supercedes CAFile. - CertData []byte // Bytes of the PEM-encoded client certificate. Supercedes CertFile. - KeyData []byte // Bytes of the PEM-encoded client key. Supercedes KeyFile. -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/transport/round_trippers.go b/vendor/k8s.io/kubernetes/pkg/client/transport/round_trippers.go deleted file mode 100644 index aadf0cbf9a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/transport/round_trippers.go +++ /dev/null @@ -1,337 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package transport - -import ( - "fmt" - "net/http" - "time" - - "github.com/golang/glog" -) - -// HTTPWrappersForConfig wraps a round tripper with any relevant layered -// behavior from the config. Exposed to allow more clients that need HTTP-like -// behavior but then must hijack the underlying connection (like WebSocket or -// HTTP2 clients). Pure HTTP clients should use the RoundTripper returned from -// New. -func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) { - if config.WrapTransport != nil { - rt = config.WrapTransport(rt) - } - - rt = DebugWrappers(rt) - - // Set authentication wrappers - switch { - case config.HasBasicAuth() && config.HasTokenAuth(): - return nil, fmt.Errorf("username/password or bearer token may be set, but not both") - case config.HasTokenAuth(): - rt = NewBearerAuthRoundTripper(config.BearerToken, rt) - case config.HasBasicAuth(): - rt = NewBasicAuthRoundTripper(config.Username, config.Password, rt) - } - if len(config.UserAgent) > 0 { - rt = NewUserAgentRoundTripper(config.UserAgent, rt) - } - if len(config.Impersonate) > 0 { - rt = NewImpersonatingRoundTripper(config.Impersonate, rt) - } - return rt, nil -} - -// DebugWrappers wraps a round tripper and logs based on the current log level. -func DebugWrappers(rt http.RoundTripper) http.RoundTripper { - switch { - case bool(glog.V(9)): - rt = newDebuggingRoundTripper(rt, debugCurlCommand, debugURLTiming, debugResponseHeaders) - case bool(glog.V(8)): - rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus, debugResponseHeaders) - case bool(glog.V(7)): - rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus) - case bool(glog.V(6)): - rt = newDebuggingRoundTripper(rt, debugURLTiming) - } - - return rt -} - -type requestCanceler interface { - CancelRequest(*http.Request) -} - -type userAgentRoundTripper struct { - agent string - rt http.RoundTripper -} - -func NewUserAgentRoundTripper(agent string, rt http.RoundTripper) http.RoundTripper { - return &userAgentRoundTripper{agent, rt} -} - -func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if len(req.Header.Get("User-Agent")) != 0 { - return rt.rt.RoundTrip(req) - } - req = cloneRequest(req) - req.Header.Set("User-Agent", rt.agent) - return rt.rt.RoundTrip(req) -} - -func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - glog.Errorf("CancelRequest not implemented") - } -} - -func (rt *userAgentRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } - -type basicAuthRoundTripper struct { - username string - password string - rt http.RoundTripper -} - -// NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a -// request unless it has already been set. -func NewBasicAuthRoundTripper(username, password string, rt http.RoundTripper) http.RoundTripper { - return &basicAuthRoundTripper{username, password, rt} -} - -func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if len(req.Header.Get("Authorization")) != 0 { - return rt.rt.RoundTrip(req) - } - req = cloneRequest(req) - req.SetBasicAuth(rt.username, rt.password) - return rt.rt.RoundTrip(req) -} - -func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - glog.Errorf("CancelRequest not implemented") - } -} - -func (rt *basicAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } - -type impersonatingRoundTripper struct { - impersonate string - delegate http.RoundTripper -} - -// NewImpersonatingRoundTripper will add an Act-As header to a request unless it has already been set. -func NewImpersonatingRoundTripper(impersonate string, delegate http.RoundTripper) http.RoundTripper { - return &impersonatingRoundTripper{impersonate, delegate} -} - -func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if len(req.Header.Get("Impersonate-User")) != 0 { - return rt.delegate.RoundTrip(req) - } - req = cloneRequest(req) - req.Header.Set("Impersonate-User", rt.impersonate) - return rt.delegate.RoundTrip(req) -} - -func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.delegate.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - glog.Errorf("CancelRequest not implemented") - } -} - -func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.delegate } - -type bearerAuthRoundTripper struct { - bearer string - rt http.RoundTripper -} - -// NewBearerAuthRoundTripper adds the provided bearer token to a request -// unless the authorization header has already been set. -func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper { - return &bearerAuthRoundTripper{bearer, rt} -} - -func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if len(req.Header.Get("Authorization")) != 0 { - return rt.rt.RoundTrip(req) - } - - req = cloneRequest(req) - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rt.bearer)) - return rt.rt.RoundTrip(req) -} - -func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - glog.Errorf("CancelRequest not implemented") - } -} - -func (rt *bearerAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return r2 -} - -// requestInfo keeps track of information about a request/response combination -type requestInfo struct { - RequestHeaders http.Header - RequestVerb string - RequestURL string - - ResponseStatus string - ResponseHeaders http.Header - ResponseErr error - - Duration time.Duration -} - -// newRequestInfo creates a new RequestInfo based on an http request -func newRequestInfo(req *http.Request) *requestInfo { - return &requestInfo{ - RequestURL: req.URL.String(), - RequestVerb: req.Method, - RequestHeaders: req.Header, - } -} - -// complete adds information about the response to the requestInfo -func (r *requestInfo) complete(response *http.Response, err error) { - if err != nil { - r.ResponseErr = err - return - } - r.ResponseStatus = response.Status - r.ResponseHeaders = response.Header -} - -// toCurl returns a string that can be run as a command in a terminal (minus the body) -func (r *requestInfo) toCurl() string { - headers := "" - for key, values := range r.RequestHeaders { - for _, value := range values { - headers += fmt.Sprintf(` -H %q`, fmt.Sprintf("%s: %s", key, value)) - } - } - - return fmt.Sprintf("curl -k -v -X%s %s %s", r.RequestVerb, headers, r.RequestURL) -} - -// debuggingRoundTripper will display information about the requests passing -// through it based on what is configured -type debuggingRoundTripper struct { - delegatedRoundTripper http.RoundTripper - - levels map[debugLevel]bool -} - -type debugLevel int - -const ( - debugJustURL debugLevel = iota - debugURLTiming - debugCurlCommand - debugRequestHeaders - debugResponseStatus - debugResponseHeaders -) - -func newDebuggingRoundTripper(rt http.RoundTripper, levels ...debugLevel) *debuggingRoundTripper { - drt := &debuggingRoundTripper{ - delegatedRoundTripper: rt, - levels: make(map[debugLevel]bool, len(levels)), - } - for _, v := range levels { - drt.levels[v] = true - } - return drt -} - -func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - glog.Errorf("CancelRequest not implemented") - } -} - -func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - reqInfo := newRequestInfo(req) - - if rt.levels[debugJustURL] { - glog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL) - } - if rt.levels[debugCurlCommand] { - glog.Infof("%s", reqInfo.toCurl()) - - } - if rt.levels[debugRequestHeaders] { - glog.Infof("Request Headers:") - for key, values := range reqInfo.RequestHeaders { - for _, value := range values { - glog.Infof(" %s: %s", key, value) - } - } - } - - startTime := time.Now() - response, err := rt.delegatedRoundTripper.RoundTrip(req) - reqInfo.Duration = time.Since(startTime) - - reqInfo.complete(response, err) - - if rt.levels[debugURLTiming] { - glog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) - } - if rt.levels[debugResponseStatus] { - glog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) - } - if rt.levels[debugResponseHeaders] { - glog.Infof("Response Headers:") - for key, values := range reqInfo.ResponseHeaders { - for _, value := range values { - glog.Infof(" %s: %s", key, value) - } - } - } - - return response, err -} - -func (rt *debuggingRoundTripper) WrappedRoundTripper() http.RoundTripper { - return rt.delegatedRoundTripper -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/transport/transport.go b/vendor/k8s.io/kubernetes/pkg/client/transport/transport.go deleted file mode 100644 index 9c5b9ef3c3..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/transport/transport.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package transport - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net/http" -) - -// New returns an http.RoundTripper that will provide the authentication -// or transport level security defined by the provided Config. -func New(config *Config) (http.RoundTripper, error) { - // Set transport level security - if config.Transport != nil && (config.HasCA() || config.HasCertAuth() || config.TLS.Insecure) { - return nil, fmt.Errorf("using a custom transport with TLS certificate options or the insecure flag is not allowed") - } - - var ( - rt http.RoundTripper - err error - ) - - if config.Transport != nil { - rt = config.Transport - } else { - rt, err = tlsCache.get(config) - if err != nil { - return nil, err - } - } - - return HTTPWrappersForConfig(config, rt) -} - -// TLSConfigFor returns a tls.Config that will provide the transport level security defined -// by the provided Config. Will return nil if no transport level security is requested. -func TLSConfigFor(c *Config) (*tls.Config, error) { - if !(c.HasCA() || c.HasCertAuth() || c.TLS.Insecure) { - return nil, nil - } - if c.HasCA() && c.TLS.Insecure { - return nil, fmt.Errorf("specifying a root certificates file with the insecure flag is not allowed") - } - if err := loadTLSFiles(c); err != nil { - return nil, err - } - - tlsConfig := &tls.Config{ - // Can't use SSLv3 because of POODLE and BEAST - // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher - // Can't use TLSv1.1 because of RC4 cipher usage - MinVersion: tls.VersionTLS12, - InsecureSkipVerify: c.TLS.Insecure, - } - - if c.HasCA() { - tlsConfig.RootCAs = rootCertPool(c.TLS.CAData) - } - - if c.HasCertAuth() { - cert, err := tls.X509KeyPair(c.TLS.CertData, c.TLS.KeyData) - if err != nil { - return nil, err - } - tlsConfig.Certificates = []tls.Certificate{cert} - } - - return tlsConfig, nil -} - -// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, -// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are -// either populated or were empty to start. -func loadTLSFiles(c *Config) error { - var err error - c.TLS.CAData, err = dataFromSliceOrFile(c.TLS.CAData, c.TLS.CAFile) - if err != nil { - return err - } - - c.TLS.CertData, err = dataFromSliceOrFile(c.TLS.CertData, c.TLS.CertFile) - if err != nil { - return err - } - - c.TLS.KeyData, err = dataFromSliceOrFile(c.TLS.KeyData, c.TLS.KeyFile) - if err != nil { - return err - } - return nil -} - -// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, -// or an error if an error occurred reading the file -func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { - if len(data) > 0 { - return data, nil - } - if len(file) > 0 { - fileData, err := ioutil.ReadFile(file) - if err != nil { - return []byte{}, err - } - return fileData, nil - } - return nil, nil -} - -// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs". -// When caData is not empty, it will be the ONLY information used in the CertPool. -func rootCertPool(caData []byte) *x509.CertPool { - // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go - // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values - // It doesn't allow trusting either/or, but hopefully that won't be an issue - if len(caData) == 0 { - return nil - } - - // if we have caData, use it - certPool := x509.NewCertPool() - certPool.AppendCertsFromPEM(caData) - return certPool -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/BUILD b/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/BUILD deleted file mode 100644 index eb8e5624eb..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/BUILD +++ /dev/null @@ -1,74 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "discovery_client.go", - "helper.go", - "restmapper.go", - "unstructured.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/version:go_default_library", - "//plugin/pkg/client/auth:go_default_library", - "//vendor:github.com/emicklei/go-restful/swagger", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "discovery_client_test.go", - "restmapper_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api/errors:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/restclient/fake:go_default_library", - "//pkg/version:go_default_library", - "//vendor:github.com/emicklei/go-restful/swagger", - "//vendor:github.com/stretchr/testify/assert", - ], -) - -go_test( - name = "go_default_xtest", - srcs = ["helper_blackbox_test.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/restclient/fake:go_default_library", - "//pkg/client/typed/discovery:go_default_library", - "//pkg/runtime:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go b/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go deleted file mode 100644 index 377bbfd3b4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go +++ /dev/null @@ -1,385 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package discovery - -import ( - "encoding/json" - "fmt" - "net/url" - "sort" - "strings" - - "github.com/emicklei/go-restful/swagger" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer" - "k8s.io/kubernetes/pkg/version" -) - -// DiscoveryInterface holds the methods that discover server-supported API groups, -// versions and resources. -type DiscoveryInterface interface { - RESTClient() restclient.Interface - ServerGroupsInterface - ServerResourcesInterface - ServerVersionInterface - SwaggerSchemaInterface -} - -// CachedDiscoveryInterface is a DiscoveryInterface with cache invalidation and freshness. -type CachedDiscoveryInterface interface { - DiscoveryInterface - // Fresh returns true if no cached data was used that had been retrieved before the instantiation. - Fresh() bool - // Invalidate enforces that no cached data is used in the future that is older than the current time. - Invalidate() -} - -// ServerGroupsInterface has methods for obtaining supported groups on the API server -type ServerGroupsInterface interface { - // ServerGroups returns the supported groups, with information like supported versions and the - // preferred version. - ServerGroups() (*unversioned.APIGroupList, error) -} - -// ServerResourcesInterface has methods for obtaining supported resources on the API server -type ServerResourcesInterface interface { - // ServerResourcesForGroupVersion returns the supported resources for a group and version. - ServerResourcesForGroupVersion(groupVersion string) (*unversioned.APIResourceList, error) - // ServerResources returns the supported resources for all groups and versions. - ServerResources() (map[string]*unversioned.APIResourceList, error) - // ServerPreferredResources returns the supported resources with the version preferred by the - // server. - ServerPreferredResources() ([]unversioned.GroupVersionResource, error) - // ServerPreferredNamespacedResources returns the supported namespaced resources with the - // version preferred by the server. - ServerPreferredNamespacedResources() ([]unversioned.GroupVersionResource, error) -} - -// ServerVersionInterface has a method for retrieving the server's version. -type ServerVersionInterface interface { - // ServerVersion retrieves and parses the server's version (git version). - ServerVersion() (*version.Info, error) -} - -// SwaggerSchemaInterface has a method to retrieve the swagger schema. -type SwaggerSchemaInterface interface { - // SwaggerSchema retrieves and parses the swagger API schema the server supports. - SwaggerSchema(version unversioned.GroupVersion) (*swagger.ApiDeclaration, error) -} - -// DiscoveryClient implements the functions that discover server-supported API groups, -// versions and resources. -type DiscoveryClient struct { - restClient restclient.Interface - - LegacyPrefix string -} - -// Convert unversioned.APIVersions to unversioned.APIGroup. APIVersions is used by legacy v1, so -// group would be "". -func apiVersionsToAPIGroup(apiVersions *unversioned.APIVersions) (apiGroup unversioned.APIGroup) { - groupVersions := []unversioned.GroupVersionForDiscovery{} - for _, version := range apiVersions.Versions { - groupVersion := unversioned.GroupVersionForDiscovery{ - GroupVersion: version, - Version: version, - } - groupVersions = append(groupVersions, groupVersion) - } - apiGroup.Versions = groupVersions - // There should be only one groupVersion returned at /api - apiGroup.PreferredVersion = groupVersions[0] - return -} - -// ServerGroups returns the supported groups, with information like supported versions and the -// preferred version. -func (d *DiscoveryClient) ServerGroups() (apiGroupList *unversioned.APIGroupList, err error) { - // Get the groupVersions exposed at /api - v := &unversioned.APIVersions{} - err = d.restClient.Get().AbsPath(d.LegacyPrefix).Do().Into(v) - apiGroup := unversioned.APIGroup{} - if err == nil { - apiGroup = apiVersionsToAPIGroup(v) - } - if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { - return nil, err - } - - // Get the groupVersions exposed at /apis - apiGroupList = &unversioned.APIGroupList{} - err = d.restClient.Get().AbsPath("/apis").Do().Into(apiGroupList) - if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { - return nil, err - } - // to be compatible with a v1.0 server, if it's a 403 or 404, ignore and return whatever we got from /api - if err != nil && (errors.IsNotFound(err) || errors.IsForbidden(err)) { - apiGroupList = &unversioned.APIGroupList{} - } - - // append the group retrieved from /api to the list - apiGroupList.Groups = append(apiGroupList.Groups, apiGroup) - return apiGroupList, nil -} - -// ServerResourcesForGroupVersion returns the supported resources for a group and version. -func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (resources *unversioned.APIResourceList, err error) { - url := url.URL{} - if len(groupVersion) == 0 { - return nil, fmt.Errorf("groupVersion shouldn't be empty") - } - if len(d.LegacyPrefix) > 0 && groupVersion == "v1" { - url.Path = d.LegacyPrefix + "/" + groupVersion - } else { - url.Path = "/apis/" + groupVersion - } - resources = &unversioned.APIResourceList{} - err = d.restClient.Get().AbsPath(url.String()).Do().Into(resources) - if err != nil { - // ignore 403 or 404 error to be compatible with an v1.0 server. - if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) { - return resources, nil - } - return nil, err - } - return resources, nil -} - -// ServerResources returns the supported resources for all groups and versions. -func (d *DiscoveryClient) ServerResources() (map[string]*unversioned.APIResourceList, error) { - apiGroups, err := d.ServerGroups() - if err != nil { - return nil, err - } - groupVersions := unversioned.ExtractGroupVersions(apiGroups) - result := map[string]*unversioned.APIResourceList{} - for _, groupVersion := range groupVersions { - resources, err := d.ServerResourcesForGroupVersion(groupVersion) - if err != nil { - return nil, err - } - result[groupVersion] = resources - } - return result, nil -} - -// ErrGroupDiscoveryFailed is returned if one or more API groups fail to load. -type ErrGroupDiscoveryFailed struct { - // Groups is a list of the groups that failed to load and the error cause - Groups map[unversioned.GroupVersion]error -} - -// Error implements the error interface -func (e *ErrGroupDiscoveryFailed) Error() string { - var groups []string - for k, v := range e.Groups { - groups = append(groups, fmt.Sprintf("%s: %v", k, v)) - } - sort.Strings(groups) - return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(groups, ", ")) -} - -// IsGroupDiscoveryFailedError returns true if the provided error indicates the server was unable to discover -// a complete list of APIs for the client to use. -func IsGroupDiscoveryFailedError(err error) bool { - _, ok := err.(*ErrGroupDiscoveryFailed) - return err != nil && ok -} - -// serverPreferredResources returns the supported resources with the version preferred by the -// server. If namespaced is true, only namespaced resources will be returned. -func (d *DiscoveryClient) serverPreferredResources(namespaced bool) ([]unversioned.GroupVersionResource, error) { - // retry in case the groups supported by the server change after ServerGroup() returns. - const maxRetries = 2 - var failedGroups map[unversioned.GroupVersion]error - var results []unversioned.GroupVersionResource - var resources map[unversioned.GroupResource]string -RetrieveGroups: - for i := 0; i < maxRetries; i++ { - results = []unversioned.GroupVersionResource{} - resources = map[unversioned.GroupResource]string{} - failedGroups = make(map[unversioned.GroupVersion]error) - serverGroupList, err := d.ServerGroups() - if err != nil { - return results, err - } - - for _, apiGroup := range serverGroupList.Groups { - versions := apiGroup.Versions - for _, version := range versions { - groupVersion := unversioned.GroupVersion{Group: apiGroup.Name, Version: version.Version} - apiResourceList, err := d.ServerResourcesForGroupVersion(version.GroupVersion) - if err != nil { - if i < maxRetries-1 { - continue RetrieveGroups - } - failedGroups[groupVersion] = err - continue - } - for _, apiResource := range apiResourceList.APIResources { - // ignore the root scoped resources if "namespaced" is true. - if namespaced && !apiResource.Namespaced { - continue - } - if strings.Contains(apiResource.Name, "/") { - continue - } - gvr := groupVersion.WithResource(apiResource.Name) - if _, ok := resources[gvr.GroupResource()]; ok { - if gvr.Version != apiGroup.PreferredVersion.Version { - continue - } - // remove previous entry, because it will be replaced with a preferred one - for i := range results { - if results[i].GroupResource() == gvr.GroupResource() { - results = append(results[:i], results[i+1:]...) - } - } - } - resources[gvr.GroupResource()] = gvr.Version - results = append(results, gvr) - } - } - } - if len(failedGroups) == 0 { - return results, nil - } - } - return results, &ErrGroupDiscoveryFailed{Groups: failedGroups} -} - -// ServerPreferredResources returns the supported resources with the version preferred by the -// server. -func (d *DiscoveryClient) ServerPreferredResources() ([]unversioned.GroupVersionResource, error) { - return d.serverPreferredResources(false) -} - -// ServerPreferredNamespacedResources returns the supported namespaced resources with the -// version preferred by the server. -func (d *DiscoveryClient) ServerPreferredNamespacedResources() ([]unversioned.GroupVersionResource, error) { - return d.serverPreferredResources(true) -} - -// ServerVersion retrieves and parses the server's version (git version). -func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { - body, err := d.restClient.Get().AbsPath("/version").Do().Raw() - if err != nil { - return nil, err - } - var info version.Info - err = json.Unmarshal(body, &info) - if err != nil { - return nil, fmt.Errorf("got '%s': %v", string(body), err) - } - return &info, nil -} - -// SwaggerSchema retrieves and parses the swagger API schema the server supports. -func (d *DiscoveryClient) SwaggerSchema(version unversioned.GroupVersion) (*swagger.ApiDeclaration, error) { - if version.Empty() { - return nil, fmt.Errorf("groupVersion cannot be empty") - } - - groupList, err := d.ServerGroups() - if err != nil { - return nil, err - } - groupVersions := unversioned.ExtractGroupVersions(groupList) - // This check also takes care the case that kubectl is newer than the running endpoint - if stringDoesntExistIn(version.String(), groupVersions) { - return nil, fmt.Errorf("API version: %v is not supported by the server. Use one of: %v", version, groupVersions) - } - var path string - if len(d.LegacyPrefix) > 0 && version == v1.SchemeGroupVersion { - path = "/swaggerapi" + d.LegacyPrefix + "/" + version.Version - } else { - path = "/swaggerapi/apis/" + version.Group + "/" + version.Version - } - - body, err := d.restClient.Get().AbsPath(path).Do().Raw() - if err != nil { - return nil, err - } - var schema swagger.ApiDeclaration - err = json.Unmarshal(body, &schema) - if err != nil { - return nil, fmt.Errorf("got '%s': %v", string(body), err) - } - return &schema, nil -} - -func setDiscoveryDefaults(config *restclient.Config) error { - config.APIPath = "" - config.GroupVersion = nil - codec := runtime.NoopEncoder{Decoder: api.Codecs.UniversalDecoder()} - config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec}) - if len(config.UserAgent) == 0 { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - return nil -} - -// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. This client -// can be used to discover supported resources in the API server. -func NewDiscoveryClientForConfig(c *restclient.Config) (*DiscoveryClient, error) { - config := *c - if err := setDiscoveryDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.UnversionedRESTClientFor(&config) - return &DiscoveryClient{restClient: client, LegacyPrefix: "/api"}, err -} - -// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. If -// there is an error, it panics. -func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient { - client, err := NewDiscoveryClientForConfig(c) - if err != nil { - panic(err) - } - return client - -} - -// New creates a new DiscoveryClient for the given RESTClient. -func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient { - return &DiscoveryClient{restClient: c, LegacyPrefix: "/api"} -} - -func stringDoesntExistIn(str string, slice []string) bool { - for _, s := range slice { - if s == str { - return false - } - } - return true -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *DiscoveryClient) RESTClient() restclient.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/fake/BUILD b/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/fake/BUILD deleted file mode 100644 index 8e78b2c7d4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/fake/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["discovery.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/testing/core:go_default_library", - "//pkg/version:go_default_library", - "//vendor:github.com/emicklei/go-restful/swagger", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/fake/discovery.go b/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/fake/discovery.go deleted file mode 100644 index 596a51aac5..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/fake/discovery.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "github.com/emicklei/go-restful/swagger" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/client/testing/core" - "k8s.io/kubernetes/pkg/version" -) - -type FakeDiscovery struct { - *core.Fake -} - -func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*unversioned.APIResourceList, error) { - action := core.ActionImpl{ - Verb: "get", - Resource: unversioned.GroupVersionResource{Resource: "resource"}, - } - c.Invokes(action, nil) - return c.Resources[groupVersion], nil -} - -func (c *FakeDiscovery) ServerResources() (map[string]*unversioned.APIResourceList, error) { - action := core.ActionImpl{ - Verb: "get", - Resource: unversioned.GroupVersionResource{Resource: "resource"}, - } - c.Invokes(action, nil) - return c.Resources, nil -} - -func (c *FakeDiscovery) ServerPreferredResources() ([]unversioned.GroupVersionResource, error) { - return nil, nil -} - -func (c *FakeDiscovery) ServerPreferredNamespacedResources() ([]unversioned.GroupVersionResource, error) { - return nil, nil -} - -func (c *FakeDiscovery) ServerGroups() (*unversioned.APIGroupList, error) { - return nil, nil -} - -func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { - action := core.ActionImpl{} - action.Verb = "get" - action.Resource = unversioned.GroupVersionResource{Resource: "version"} - - c.Invokes(action, nil) - versionInfo := version.Get() - return &versionInfo, nil -} - -func (c *FakeDiscovery) SwaggerSchema(version unversioned.GroupVersion) (*swagger.ApiDeclaration, error) { - action := core.ActionImpl{} - action.Verb = "get" - if version == v1.SchemeGroupVersion { - action.Resource = unversioned.GroupVersionResource{Resource: "/swaggerapi/api/" + version.Version} - } else { - action.Resource = unversioned.GroupVersionResource{Resource: "/swaggerapi/apis/" + version.Group + "/" + version.Version} - } - - c.Invokes(action, nil) - return &swagger.ApiDeclaration{}, nil -} - -func (c *FakeDiscovery) RESTClient() restclient.Interface { - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/helper.go b/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/helper.go deleted file mode 100644 index 1ac4a25a41..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/helper.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package discovery - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/version" - // Import solely to initialize client auth plugins. - _ "k8s.io/kubernetes/plugin/pkg/client/auth" -) - -// MatchesServerVersion queries the server to compares the build version -// (git hash) of the client with the server's build version. It returns an error -// if it failed to contact the server or if the versions are not an exact match. -func MatchesServerVersion(client DiscoveryInterface) error { - cVer := version.Get() - sVer, err := client.ServerVersion() - if err != nil { - return fmt.Errorf("couldn't read version from server: %v\n", err) - } - // GitVersion includes GitCommit and GitTreeState, but best to be safe? - if cVer.GitVersion != sVer.GitVersion || cVer.GitCommit != sVer.GitCommit || cVer.GitTreeState != sVer.GitTreeState { - return fmt.Errorf("server version (%#v) differs from client version (%#v)!\n", sVer, cVer) - } - - return nil -} - -// NegotiateVersion queries the server's supported api versions to find -// a version that both client and server support. -// - If no version is provided, try registered client versions in order of -// preference. -// - If version is provided and the server does not support it, -// return an error. -func NegotiateVersion(client DiscoveryInterface, requiredGV *unversioned.GroupVersion, clientRegisteredGVs []unversioned.GroupVersion) (*unversioned.GroupVersion, error) { - clientVersions := sets.String{} - for _, gv := range clientRegisteredGVs { - clientVersions.Insert(gv.String()) - } - groups, err := client.ServerGroups() - if err != nil { - // This is almost always a connection error, and higher level code should treat this as a generic error, - // not a negotiation specific error. - return nil, err - } - versions := unversioned.ExtractGroupVersions(groups) - serverVersions := sets.String{} - for _, v := range versions { - serverVersions.Insert(v) - } - - // If version explicitly requested verify that both client and server support it. - // If server does not support warn, but try to negotiate a lower version. - if requiredGV != nil { - if !clientVersions.Has(requiredGV.String()) { - return nil, fmt.Errorf("client does not support API version %q; client supported API versions: %v", requiredGV, clientVersions) - - } - // If the server supports no versions, then we should just use the preferredGV - // This can happen because discovery fails due to 403 Forbidden errors - if len(serverVersions) == 0 { - return requiredGV, nil - } - if serverVersions.Has(requiredGV.String()) { - return requiredGV, nil - } - // If we are using an explicit config version the server does not support, fail. - return nil, fmt.Errorf("server does not support API version %q", requiredGV) - } - - for _, clientGV := range clientRegisteredGVs { - if serverVersions.Has(clientGV.String()) { - // Version was not explicitly requested in command config (--api-version). - // Ok to fall back to a supported version with a warning. - // TODO: caesarxuchao: enable the warning message when we have - // proper fix. Please refer to issue #14895. - // if len(version) != 0 { - // glog.Warningf("Server does not support API version '%s'. Falling back to '%s'.", version, clientVersion) - // } - t := clientGV - return &t, nil - } - } - - // if we have no server versions and we have no required version, choose the first clientRegisteredVersion - if len(serverVersions) == 0 && len(clientRegisteredGVs) > 0 { - return &clientRegisteredGVs[0], nil - } - - return nil, fmt.Errorf("failed to negotiate an api version; server supports: %v, client supports: %v", - serverVersions, clientVersions) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/restmapper.go b/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/restmapper.go deleted file mode 100644 index 793dd017ff..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/restmapper.go +++ /dev/null @@ -1,318 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package discovery - -import ( - "fmt" - "sync" - - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - - "github.com/golang/glog" -) - -// APIGroupResources is an API group with a mapping of versions to -// resources. -type APIGroupResources struct { - Group unversioned.APIGroup - // A mapping of version string to a slice of APIResources for - // that version. - VersionedResources map[string][]unversioned.APIResource -} - -// NewRESTMapper returns a PriorityRESTMapper based on the discovered -// groups and resources passed in. -func NewRESTMapper(groupResources []*APIGroupResources, versionInterfaces meta.VersionInterfacesFunc) meta.RESTMapper { - unionMapper := meta.MultiRESTMapper{} - - var groupPriority []string - var resourcePriority []unversioned.GroupVersionResource - var kindPriority []unversioned.GroupVersionKind - - for _, group := range groupResources { - groupPriority = append(groupPriority, group.Group.Name) - - if len(group.Group.PreferredVersion.Version) != 0 { - preferred := group.Group.PreferredVersion.Version - if _, ok := group.VersionedResources[preferred]; ok { - resourcePriority = append(resourcePriority, unversioned.GroupVersionResource{ - Group: group.Group.Name, - Version: group.Group.PreferredVersion.Version, - Resource: meta.AnyResource, - }) - - kindPriority = append(kindPriority, unversioned.GroupVersionKind{ - Group: group.Group.Name, - Version: group.Group.PreferredVersion.Version, - Kind: meta.AnyKind, - }) - } - } - - for _, discoveryVersion := range group.Group.Versions { - resources, ok := group.VersionedResources[discoveryVersion.Version] - if !ok { - continue - } - - gv := unversioned.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version} - versionMapper := meta.NewDefaultRESTMapper([]unversioned.GroupVersion{gv}, versionInterfaces) - - for _, resource := range resources { - scope := meta.RESTScopeNamespace - if !resource.Namespaced { - scope = meta.RESTScopeRoot - } - versionMapper.Add(gv.WithKind(resource.Kind), scope) - // TODO only do this if it supports listing - versionMapper.Add(gv.WithKind(resource.Kind+"List"), scope) - } - // TODO why is this type not in discovery (at least for "v1") - versionMapper.Add(gv.WithKind("List"), meta.RESTScopeRoot) - unionMapper = append(unionMapper, versionMapper) - } - } - - for _, group := range groupPriority { - resourcePriority = append(resourcePriority, unversioned.GroupVersionResource{ - Group: group, - Version: meta.AnyVersion, - Resource: meta.AnyResource, - }) - kindPriority = append(kindPriority, unversioned.GroupVersionKind{ - Group: group, - Version: meta.AnyVersion, - Kind: meta.AnyKind, - }) - } - - return meta.PriorityRESTMapper{ - Delegate: unionMapper, - ResourcePriority: resourcePriority, - KindPriority: kindPriority, - } -} - -// GetAPIGroupResources uses the provided discovery client to gather -// discovery information and populate a slice of APIGroupResources. -func GetAPIGroupResources(cl DiscoveryInterface) ([]*APIGroupResources, error) { - apiGroups, err := cl.ServerGroups() - if err != nil { - return nil, err - } - var result []*APIGroupResources - for _, group := range apiGroups.Groups { - groupResources := &APIGroupResources{ - Group: group, - VersionedResources: make(map[string][]unversioned.APIResource), - } - for _, version := range group.Versions { - resources, err := cl.ServerResourcesForGroupVersion(version.GroupVersion) - if err != nil { - if errors.IsNotFound(err) { - continue // ignore as this can race with deletion of 3rd party APIs - } - return nil, err - } - groupResources.VersionedResources[version.Version] = resources.APIResources - } - result = append(result, groupResources) - } - return result, nil -} - -// DeferredDiscoveryRESTMapper is a RESTMapper that will defer -// initialization of the RESTMapper until the first mapping is -// requested. -type DeferredDiscoveryRESTMapper struct { - initMu sync.Mutex - delegate meta.RESTMapper - cl CachedDiscoveryInterface - versionInterface meta.VersionInterfacesFunc -} - -// NewDeferredDiscoveryRESTMapper returns a -// DeferredDiscoveryRESTMapper that will lazily query the provided -// client for discovery information to do REST mappings. -func NewDeferredDiscoveryRESTMapper(cl CachedDiscoveryInterface, versionInterface meta.VersionInterfacesFunc) *DeferredDiscoveryRESTMapper { - return &DeferredDiscoveryRESTMapper{ - cl: cl, - versionInterface: versionInterface, - } -} - -func (d *DeferredDiscoveryRESTMapper) getDelegate() (meta.RESTMapper, error) { - d.initMu.Lock() - defer d.initMu.Unlock() - - if d.delegate != nil { - return d.delegate, nil - } - - groupResources, err := GetAPIGroupResources(d.cl) - if err != nil { - return nil, err - } - - d.delegate = NewRESTMapper(groupResources, d.versionInterface) - return d.delegate, err -} - -// Reset resets the internally cached Discovery information and will -// cause the next mapping request to re-discover. -func (d *DeferredDiscoveryRESTMapper) Reset() { - glog.V(5).Info("Invalidating discovery information") - - d.initMu.Lock() - defer d.initMu.Unlock() - - d.cl.Invalidate() - d.delegate = nil -} - -// KindFor takes a partial resource and returns back the single match. -// It returns an error if there are multiple matches. -func (d *DeferredDiscoveryRESTMapper) KindFor(resource unversioned.GroupVersionResource) (gvk unversioned.GroupVersionKind, err error) { - del, err := d.getDelegate() - if err != nil { - return unversioned.GroupVersionKind{}, err - } - gvk, err = del.KindFor(resource) - if err != nil && !d.cl.Fresh() { - d.Reset() - gvk, err = d.KindFor(resource) - } - return -} - -// KindsFor takes a partial resource and returns back the list of -// potential kinds in priority order. -func (d *DeferredDiscoveryRESTMapper) KindsFor(resource unversioned.GroupVersionResource) (gvks []unversioned.GroupVersionKind, err error) { - del, err := d.getDelegate() - if err != nil { - return nil, err - } - gvks, err = del.KindsFor(resource) - if len(gvks) == 0 && !d.cl.Fresh() { - d.Reset() - gvks, err = d.KindsFor(resource) - } - return -} - -// ResourceFor takes a partial resource and returns back the single -// match. It returns an error if there are multiple matches. -func (d *DeferredDiscoveryRESTMapper) ResourceFor(input unversioned.GroupVersionResource) (gvr unversioned.GroupVersionResource, err error) { - del, err := d.getDelegate() - if err != nil { - return unversioned.GroupVersionResource{}, err - } - gvr, err = del.ResourceFor(input) - if err != nil && !d.cl.Fresh() { - d.Reset() - gvr, err = d.ResourceFor(input) - } - return -} - -// ResourcesFor takes a partial resource and returns back the list of -// potential resource in priority order. -func (d *DeferredDiscoveryRESTMapper) ResourcesFor(input unversioned.GroupVersionResource) (gvrs []unversioned.GroupVersionResource, err error) { - del, err := d.getDelegate() - if err != nil { - return nil, err - } - gvrs, err = del.ResourcesFor(input) - if len(gvrs) == 0 && !d.cl.Fresh() { - d.Reset() - gvrs, err = d.ResourcesFor(input) - } - return -} - -// RESTMapping identifies a preferred resource mapping for the -// provided group kind. -func (d *DeferredDiscoveryRESTMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (m *meta.RESTMapping, err error) { - del, err := d.getDelegate() - if err != nil { - return nil, err - } - m, err = del.RESTMapping(gk, versions...) - if err != nil && !d.cl.Fresh() { - d.Reset() - m, err = d.RESTMapping(gk, versions...) - } - return -} - -// RESTMappings returns the RESTMappings for the provided group kind -// in a rough internal preferred order. If no kind is found, it will -// return a NoResourceMatchError. -func (d *DeferredDiscoveryRESTMapper) RESTMappings(gk unversioned.GroupKind) (ms []*meta.RESTMapping, err error) { - del, err := d.getDelegate() - if err != nil { - return nil, err - } - ms, err = del.RESTMappings(gk) - if len(ms) == 0 && !d.cl.Fresh() { - d.Reset() - ms, err = d.RESTMappings(gk) - } - return -} - -// AliasesForResource returns whether a resource has an alias or not. -func (d *DeferredDiscoveryRESTMapper) AliasesForResource(resource string) (as []string, ok bool) { - del, err := d.getDelegate() - if err != nil { - return nil, false - } - as, ok = del.AliasesForResource(resource) - if len(as) == 0 && !d.cl.Fresh() { - d.Reset() - as, ok = d.AliasesForResource(resource) - } - return -} - -// ResourceSingularizer converts a resource name from plural to -// singular (e.g., from pods to pod). -func (d *DeferredDiscoveryRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { - del, err := d.getDelegate() - if err != nil { - return resource, err - } - singular, err = del.ResourceSingularizer(resource) - if err != nil && !d.cl.Fresh() { - d.Reset() - singular, err = d.ResourceSingularizer(resource) - } - return -} - -func (d *DeferredDiscoveryRESTMapper) String() string { - del, err := d.getDelegate() - if err != nil { - return fmt.Sprintf("DeferredDiscoveryRESTMapper{%v}", err) - } - return fmt.Sprintf("DeferredDiscoveryRESTMapper{\n\t%v\n}", del) -} - -// Make sure it satisfies the interface -var _ meta.RESTMapper = &DeferredDiscoveryRESTMapper{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/unstructured.go b/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/unstructured.go deleted file mode 100644 index afa74e7dee..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/unstructured.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package discovery - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// UnstructuredObjectTyper provides a runtime.ObjectTyper implmentation for -// runtime.Unstructured object based on discovery information. -type UnstructuredObjectTyper struct { - registered map[unversioned.GroupVersionKind]bool -} - -// NewUnstructuredObjectTyper returns a runtime.ObjectTyper for -// unstructred objects based on discovery information. -func NewUnstructuredObjectTyper(groupResources []*APIGroupResources) *UnstructuredObjectTyper { - dot := &UnstructuredObjectTyper{registered: make(map[unversioned.GroupVersionKind]bool)} - for _, group := range groupResources { - for _, discoveryVersion := range group.Group.Versions { - resources, ok := group.VersionedResources[discoveryVersion.Version] - if !ok { - continue - } - - gv := unversioned.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version} - for _, resource := range resources { - dot.registered[gv.WithKind(resource.Kind)] = true - } - } - } - return dot -} - -// ObjectKind returns the group,version,kind of the provided object, or an error -// if the object in not *runtime.Unstructured or has no group,version,kind -// information. -func (d *UnstructuredObjectTyper) ObjectKind(obj runtime.Object) (unversioned.GroupVersionKind, error) { - if _, ok := obj.(*runtime.Unstructured); !ok { - return unversioned.GroupVersionKind{}, fmt.Errorf("type %T is invalid for dynamic object typer", obj) - } - - return obj.GetObjectKind().GroupVersionKind(), nil -} - -// ObjectKinds returns a slice of one element with the group,version,kind of the -// provided object, or an error if the object is not *runtime.Unstructured or -// has no group,version,kind information. unversionedType will always be false -// because runtime.Unstructured object should always have group,version,kind -// information set. -func (d *UnstructuredObjectTyper) ObjectKinds(obj runtime.Object) (gvks []unversioned.GroupVersionKind, unversionedType bool, err error) { - gvk, err := d.ObjectKind(obj) - if err != nil { - return nil, false, err - } - - return []unversioned.GroupVersionKind{gvk}, false, nil -} - -// Recognizes returns true if the provided group,version,kind was in the -// discovery information. -func (d *UnstructuredObjectTyper) Recognizes(gvk unversioned.GroupVersionKind) bool { - return d.registered[gvk] -} - -// IsUnversioned returns false always because *runtime.Unstructured objects -// should always have group,version,kind information set. ok will be true if the -// object's group,version,kind is registered. -func (d *UnstructuredObjectTyper) IsUnversioned(obj runtime.Object) (unversioned bool, ok bool) { - gvk, err := d.ObjectKind(obj) - if err != nil { - return false, false - } - - return false, d.registered[gvk] -} - -var _ runtime.ObjectTyper = &UnstructuredObjectTyper{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/typed/dynamic/BUILD b/vendor/k8s.io/kubernetes/pkg/client/typed/dynamic/BUILD deleted file mode 100644 index 5f2803a627..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/typed/dynamic/BUILD +++ /dev/null @@ -1,53 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "client.go", - "client_pool.go", - "dynamic_util.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/conversion/queryparams:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer:go_default_library", - "//pkg/util/flowcontrol:go_default_library", - "//pkg/watch:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "client_test.go", - "dynamic_util_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer/streaming:go_default_library", - "//pkg/watch:go_default_library", - "//pkg/watch/versioned:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/typed/dynamic/client.go b/vendor/k8s.io/kubernetes/pkg/client/typed/dynamic/client.go deleted file mode 100644 index 362fce5f4c..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/typed/dynamic/client.go +++ /dev/null @@ -1,298 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package dynamic provides a client interface to arbitrary Kubernetes -// APIs that exposes common high level operations and exposes common -// metadata. -package dynamic - -import ( - "encoding/json" - "errors" - "io" - "net/url" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/conversion/queryparams" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer" - "k8s.io/kubernetes/pkg/util/flowcontrol" - "k8s.io/kubernetes/pkg/watch" -) - -// Client is a Kubernetes client that allows you to access metadata -// and manipulate metadata of a Kubernetes API group. -type Client struct { - cl *restclient.RESTClient - parameterCodec runtime.ParameterCodec -} - -// NewClient returns a new client based on the passed in config. The -// codec is ignored, as the dynamic client uses it's own codec. -func NewClient(conf *restclient.Config) (*Client, error) { - // avoid changing the original config - confCopy := *conf - conf = &confCopy - - contentConfig := ContentConfig() - contentConfig.GroupVersion = conf.GroupVersion - if conf.NegotiatedSerializer != nil { - contentConfig.NegotiatedSerializer = conf.NegotiatedSerializer - } - conf.ContentConfig = contentConfig - - if conf.APIPath == "" { - conf.APIPath = "/api" - } - - if len(conf.UserAgent) == 0 { - conf.UserAgent = restclient.DefaultKubernetesUserAgent() - } - - cl, err := restclient.RESTClientFor(conf) - if err != nil { - return nil, err - } - - return &Client{cl: cl}, nil -} - -// GetRateLimiter returns rate limier. -func (c *Client) GetRateLimiter() flowcontrol.RateLimiter { - return c.cl.GetRateLimiter() -} - -// Resource returns an API interface to the specified resource for this client's -// group and version. If resource is not a namespaced resource, then namespace -// is ignored. The ResourceClient inherits the parameter codec of c. -func (c *Client) Resource(resource *unversioned.APIResource, namespace string) *ResourceClient { - return &ResourceClient{ - cl: c.cl, - resource: resource, - ns: namespace, - parameterCodec: c.parameterCodec, - } -} - -// ParameterCodec returns a client with the provided parameter codec. -func (c *Client) ParameterCodec(parameterCodec runtime.ParameterCodec) *Client { - return &Client{ - cl: c.cl, - parameterCodec: parameterCodec, - } -} - -// ResourceClient is an API interface to a specific resource under a -// dynamic client. -type ResourceClient struct { - cl *restclient.RESTClient - resource *unversioned.APIResource - ns string - parameterCodec runtime.ParameterCodec -} - -// List returns a list of objects for this resource. -func (rc *ResourceClient) List(opts runtime.Object) (runtime.Object, error) { - parameterEncoder := rc.parameterCodec - if parameterEncoder == nil { - parameterEncoder = defaultParameterEncoder - } - return rc.cl.Get(). - NamespaceIfScoped(rc.ns, rc.resource.Namespaced). - Resource(rc.resource.Name). - VersionedParams(opts, parameterEncoder). - Do(). - Get() -} - -// Get gets the resource with the specified name. -func (rc *ResourceClient) Get(name string) (*runtime.Unstructured, error) { - result := new(runtime.Unstructured) - err := rc.cl.Get(). - NamespaceIfScoped(rc.ns, rc.resource.Namespaced). - Resource(rc.resource.Name). - Name(name). - Do(). - Into(result) - return result, err -} - -// Delete deletes the resource with the specified name. -func (rc *ResourceClient) Delete(name string, opts *v1.DeleteOptions) error { - return rc.cl.Delete(). - NamespaceIfScoped(rc.ns, rc.resource.Namespaced). - Resource(rc.resource.Name). - Name(name). - Body(opts). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (rc *ResourceClient) DeleteCollection(deleteOptions *v1.DeleteOptions, listOptions runtime.Object) error { - parameterEncoder := rc.parameterCodec - if parameterEncoder == nil { - parameterEncoder = defaultParameterEncoder - } - return rc.cl.Delete(). - NamespaceIfScoped(rc.ns, rc.resource.Namespaced). - Resource(rc.resource.Name). - VersionedParams(listOptions, parameterEncoder). - Body(deleteOptions). - Do(). - Error() -} - -// Create creates the provided resource. -func (rc *ResourceClient) Create(obj *runtime.Unstructured) (*runtime.Unstructured, error) { - result := new(runtime.Unstructured) - err := rc.cl.Post(). - NamespaceIfScoped(rc.ns, rc.resource.Namespaced). - Resource(rc.resource.Name). - Body(obj). - Do(). - Into(result) - return result, err -} - -// Update updates the provided resource. -func (rc *ResourceClient) Update(obj *runtime.Unstructured) (*runtime.Unstructured, error) { - result := new(runtime.Unstructured) - if len(obj.GetName()) == 0 { - return result, errors.New("object missing name") - } - err := rc.cl.Put(). - NamespaceIfScoped(rc.ns, rc.resource.Namespaced). - Resource(rc.resource.Name). - Name(obj.GetName()). - Body(obj). - Do(). - Into(result) - return result, err -} - -// Watch returns a watch.Interface that watches the resource. -func (rc *ResourceClient) Watch(opts runtime.Object) (watch.Interface, error) { - parameterEncoder := rc.parameterCodec - if parameterEncoder == nil { - parameterEncoder = defaultParameterEncoder - } - return rc.cl.Get(). - Prefix("watch"). - NamespaceIfScoped(rc.ns, rc.resource.Namespaced). - Resource(rc.resource.Name). - VersionedParams(opts, parameterEncoder). - Watch() -} - -func (rc *ResourceClient) Patch(name string, pt api.PatchType, data []byte) (*runtime.Unstructured, error) { - result := new(runtime.Unstructured) - err := rc.cl.Patch(pt). - NamespaceIfScoped(rc.ns, rc.resource.Namespaced). - Resource(rc.resource.Name). - Name(name). - Body(data). - Do(). - Into(result) - return result, err -} - -// dynamicCodec is a codec that wraps the standard unstructured codec -// with special handling for Status objects. -type dynamicCodec struct{} - -func (dynamicCodec) Decode(data []byte, gvk *unversioned.GroupVersionKind, obj runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { - obj, gvk, err := runtime.UnstructuredJSONScheme.Decode(data, gvk, obj) - if err != nil { - return nil, nil, err - } - - if _, ok := obj.(*unversioned.Status); !ok && strings.ToLower(gvk.Kind) == "status" { - obj = &unversioned.Status{} - err := json.Unmarshal(data, obj) - if err != nil { - return nil, nil, err - } - } - - return obj, gvk, nil -} - -func (dynamicCodec) Encode(obj runtime.Object, w io.Writer) error { - return runtime.UnstructuredJSONScheme.Encode(obj, w) -} - -// ContentConfig returns a restclient.ContentConfig for dynamic types. -func ContentConfig() restclient.ContentConfig { - var jsonInfo runtime.SerializerInfo - // TODO: api.Codecs here should become "pkg/apis/server/scheme" which is the minimal core you need - // to talk to a kubernetes server - for _, info := range api.Codecs.SupportedMediaTypes() { - if info.MediaType == runtime.ContentTypeJSON { - jsonInfo = info - break - } - } - - jsonInfo.Serializer = dynamicCodec{} - jsonInfo.PrettySerializer = nil - return restclient.ContentConfig{ - AcceptContentTypes: runtime.ContentTypeJSON, - ContentType: runtime.ContentTypeJSON, - NegotiatedSerializer: serializer.NegotiatedSerializerWrapper(jsonInfo), - } -} - -// paramaterCodec is a codec converts an API object to query -// parameters without trying to convert to the target version. -type parameterCodec struct{} - -func (parameterCodec) EncodeParameters(obj runtime.Object, to unversioned.GroupVersion) (url.Values, error) { - return queryparams.Convert(obj) -} - -func (parameterCodec) DecodeParameters(parameters url.Values, from unversioned.GroupVersion, into runtime.Object) error { - return errors.New("DecodeParameters not implemented on dynamic parameterCodec") -} - -var defaultParameterEncoder runtime.ParameterCodec = parameterCodec{} - -type versionedParameterEncoderWithV1Fallback struct{} - -func (versionedParameterEncoderWithV1Fallback) EncodeParameters(obj runtime.Object, to unversioned.GroupVersion) (url.Values, error) { - ret, err := api.ParameterCodec.EncodeParameters(obj, to) - if err != nil && runtime.IsNotRegisteredError(err) { - // fallback to v1 - return api.ParameterCodec.EncodeParameters(obj, v1.SchemeGroupVersion) - } - return ret, err -} - -func (versionedParameterEncoderWithV1Fallback) DecodeParameters(parameters url.Values, from unversioned.GroupVersion, into runtime.Object) error { - return errors.New("DecodeParameters not implemented on versionedParameterEncoderWithV1Fallback") -} - -// VersionedParameterEncoderWithV1Fallback is useful for encoding query -// parameters for thirdparty resources. It tries to convert object to the -// specified version before converting it to query parameters, and falls back to -// converting to v1 if the object is not registered in the specified version. -// For the record, currently API server always treats query parameters sent to a -// thirdparty resource endpoint as v1. -var VersionedParameterEncoderWithV1Fallback runtime.ParameterCodec = versionedParameterEncoderWithV1Fallback{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/typed/dynamic/client_pool.go b/vendor/k8s.io/kubernetes/pkg/client/typed/dynamic/client_pool.go deleted file mode 100644 index 85f59cac50..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/typed/dynamic/client_pool.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dynamic - -import ( - "sync" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/client/restclient" -) - -// ClientPool manages a pool of dynamic clients. -type ClientPool interface { - // ClientForGroupVersionKind returns a client configured for the specified groupVersionResource. - // Resource may be empty. - ClientForGroupVersionResource(resource unversioned.GroupVersionResource) (*Client, error) - // ClientForGroupVersionKind returns a client configured for the specified groupVersionKind. - // Kind may be empty. - ClientForGroupVersionKind(kind unversioned.GroupVersionKind) (*Client, error) -} - -// APIPathResolverFunc knows how to convert a groupVersion to its API path. The Kind field is -// optional. -type APIPathResolverFunc func(kind unversioned.GroupVersionKind) string - -// LegacyAPIPathResolverFunc can resolve paths properly with the legacy API. -func LegacyAPIPathResolverFunc(kind unversioned.GroupVersionKind) string { - if len(kind.Group) == 0 { - return "/api" - } - return "/apis" -} - -// clientPoolImpl implements ClientPool and caches clients for the resource group versions -// is asked to retrieve. This type is thread safe. -type clientPoolImpl struct { - lock sync.RWMutex - config *restclient.Config - clients map[unversioned.GroupVersion]*Client - apiPathResolverFunc APIPathResolverFunc - mapper meta.RESTMapper -} - -// NewClientPool returns a ClientPool from the specified config. It reuses clients for the the same -// group version. It is expected this type may be wrapped by specific logic that special cases certain -// resources or groups. -func NewClientPool(config *restclient.Config, mapper meta.RESTMapper, apiPathResolverFunc APIPathResolverFunc) ClientPool { - confCopy := *config - - return &clientPoolImpl{ - config: &confCopy, - clients: map[unversioned.GroupVersion]*Client{}, - apiPathResolverFunc: apiPathResolverFunc, - mapper: mapper, - } -} - -// ClientForGroupVersionResource uses the provided RESTMapper to identify the appropriate resource. Resource may -// be empty. If no matching kind is found the underlying client for that group is still returned. -func (c *clientPoolImpl) ClientForGroupVersionResource(resource unversioned.GroupVersionResource) (*Client, error) { - kinds, err := c.mapper.KindsFor(resource) - if err != nil { - if meta.IsNoMatchError(err) { - return c.ClientForGroupVersionKind(unversioned.GroupVersionKind{Group: resource.Group, Version: resource.Version}) - } - return nil, err - } - return c.ClientForGroupVersionKind(kinds[0]) -} - -// ClientForGroupVersion returns a client for the specified groupVersion, creates one if none exists. Kind -// in the GroupVersionKind may be empty. -func (c *clientPoolImpl) ClientForGroupVersionKind(kind unversioned.GroupVersionKind) (*Client, error) { - c.lock.Lock() - defer c.lock.Unlock() - - gv := kind.GroupVersion() - - // do we have a client already configured? - if existingClient, found := c.clients[gv]; found { - return existingClient, nil - } - - // avoid changing the original config - confCopy := *c.config - conf := &confCopy - - // we need to set the api path based on group version, if no group, default to legacy path - conf.APIPath = c.apiPathResolverFunc(kind) - - // we need to make a client - conf.GroupVersion = &gv - - dynamicClient, err := NewClient(conf) - if err != nil { - return nil, err - } - c.clients[gv] = dynamicClient - return dynamicClient, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/typed/dynamic/dynamic_util.go b/vendor/k8s.io/kubernetes/pkg/client/typed/dynamic/dynamic_util.go deleted file mode 100644 index 4ffe313e61..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/typed/dynamic/dynamic_util.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dynamic - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// VersionInterfaces provides an object converter and metadata -// accessor appropriate for use with unstructured objects. -func VersionInterfaces(unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - return &meta.VersionInterfaces{ - ObjectConvertor: &runtime.UnstructuredObjectConverter{}, - MetadataAccessor: meta.NewAccessor(), - }, nil -} - -// NewDiscoveryRESTMapper returns a RESTMapper based on discovery information. -func NewDiscoveryRESTMapper(resources []*unversioned.APIResourceList, versionFunc meta.VersionInterfacesFunc) (*meta.DefaultRESTMapper, error) { - rm := meta.NewDefaultRESTMapper(nil, versionFunc) - for _, resourceList := range resources { - gv, err := unversioned.ParseGroupVersion(resourceList.GroupVersion) - if err != nil { - return nil, err - } - - for _, resource := range resourceList.APIResources { - gvk := gv.WithKind(resource.Kind) - scope := meta.RESTScopeRoot - if resource.Namespaced { - scope = meta.RESTScopeNamespace - } - rm.Add(gvk, scope) - } - } - return rm, nil -} - -// ObjectTyper provides an ObjectTyper implementation for -// runtime.Unstructured object based on discovery information. -type ObjectTyper struct { - registered map[unversioned.GroupVersionKind]bool -} - -// NewObjectTyper constructs an ObjectTyper from discovery information. -func NewObjectTyper(resources []*unversioned.APIResourceList) (runtime.ObjectTyper, error) { - ot := &ObjectTyper{registered: make(map[unversioned.GroupVersionKind]bool)} - for _, resourceList := range resources { - gv, err := unversioned.ParseGroupVersion(resourceList.GroupVersion) - if err != nil { - return nil, err - } - - for _, resource := range resourceList.APIResources { - ot.registered[gv.WithKind(resource.Kind)] = true - } - } - return ot, nil -} - -// ObjectKinds returns a slice of one element with the -// group,version,kind of the provided object, or an error if the -// object is not *runtime.Unstructured or has no group,version,kind -// information. -func (ot *ObjectTyper) ObjectKinds(obj runtime.Object) ([]unversioned.GroupVersionKind, bool, error) { - if _, ok := obj.(*runtime.Unstructured); !ok { - return nil, false, fmt.Errorf("type %T is invalid for dynamic object typer", obj) - } - return []unversioned.GroupVersionKind{obj.GetObjectKind().GroupVersionKind()}, false, nil -} - -// Recognizes returns true if the provided group,version,kind was in -// the discovery information. -func (ot *ObjectTyper) Recognizes(gvk unversioned.GroupVersionKind) bool { - return ot.registered[gvk] -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/BUILD b/vendor/k8s.io/kubernetes/pkg/client/unversioned/BUILD deleted file mode 100644 index 190c1cbdeb..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/BUILD +++ /dev/null @@ -1,52 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "conditions.go", - "helper.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/apps:go_default_library", - "//pkg/apis/batch:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/apps/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/util/wait:go_default_library", - "//pkg/watch:go_default_library", - "//plugin/pkg/client/auth:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["helper_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/runtime:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/auth/BUILD b/vendor/k8s.io/kubernetes/pkg/client/unversioned/auth/BUILD deleted file mode 100644 index a8f4db86f5..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/auth/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["clientauth.go"], - tags = ["automanaged"], - deps = ["//pkg/client/restclient:go_default_library"], -) - -go_test( - name = "go_default_xtest", - srcs = ["clientauth_test.go"], - tags = ["automanaged"], - deps = ["//pkg/client/unversioned/auth:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/BUILD b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/BUILD deleted file mode 100644 index 5efd450488..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/BUILD +++ /dev/null @@ -1,63 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "auth_loaders.go", - "client_config.go", - "config.go", - "doc.go", - "loader.go", - "merged_client_builder.go", - "overrides.go", - "validation.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/unversioned/auth:go_default_library", - "//pkg/client/unversioned/clientcmd/api:go_default_library", - "//pkg/client/unversioned/clientcmd/api/latest:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/errors:go_default_library", - "//pkg/util/homedir:go_default_library", - "//pkg/util/validation:go_default_library", - "//vendor:github.com/golang/glog", - "//vendor:github.com/howeyc/gopass", - "//vendor:github.com/imdario/mergo", - "//vendor:github.com/spf13/pflag", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "client_config_test.go", - "loader_test.go", - "merged_client_builder_test.go", - "validation_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/client/restclient:go_default_library", - "//pkg/client/unversioned/clientcmd/api:go_default_library", - "//pkg/client/unversioned/clientcmd/api/latest:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/errors:go_default_library", - "//vendor:github.com/ghodss/yaml", - "//vendor:github.com/imdario/mergo", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/BUILD b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/BUILD deleted file mode 100644 index e854fcc4ac..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/BUILD +++ /dev/null @@ -1,36 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "helpers.go", - "register.go", - "types.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/runtime:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "helpers_test.go", - "types_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = ["//vendor:github.com/ghodss/yaml"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/BUILD b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/BUILD deleted file mode 100644 index b72bfcb01f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["latest.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/client/unversioned/clientcmd/api:go_default_library", - "//pkg/client/unversioned/clientcmd/api/v1:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer/json:go_default_library", - "//pkg/runtime/serializer/versioning:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/register.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/register.go deleted file mode 100644 index 9c4c0f1db4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/register.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// SchemeGroupVersion is group version used to register these objects -// TODO this should be in the "kubeconfig" group -var SchemeGroupVersion = unversioned.GroupVersion{Group: "", Version: runtime.APIVersionInternal} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Config{}, - ) - return nil -} - -func (obj *Config) GetObjectKind() unversioned.ObjectKind { return obj } -func (obj *Config) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} -func (obj *Config) GroupVersionKind() unversioned.GroupVersionKind { - return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types.go deleted file mode 100644 index c7ddec8873..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "k8s.io/kubernetes/pkg/runtime" -) - -// Where possible, json tags match the cli argument names. -// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. - -// Config holds the information needed to build connect to remote kubernetes clusters as a given user -// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() -type Config struct { - // Legacy field from pkg/api/types.go TypeMeta. - // TODO(jlowdermilk): remove this after eliminating downstream dependencies. - // +optional - Kind string `json:"kind,omitempty"` - // DEPRECATED: APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). - // Because a cluster can run multiple API groups and potentially multiple versions of each, it no longer makes sense to specify - // a single value for the cluster version. - // This field isn't really needed anyway, so we are deprecating it without replacement. - // It will be ignored if it is present. - // +optional - APIVersion string `json:"apiVersion,omitempty"` - // Preferences holds general information to be use for cli interactions - Preferences Preferences `json:"preferences"` - // Clusters is a map of referencable names to cluster configs - Clusters map[string]*Cluster `json:"clusters"` - // AuthInfos is a map of referencable names to user configs - AuthInfos map[string]*AuthInfo `json:"users"` - // Contexts is a map of referencable names to context configs - Contexts map[string]*Context `json:"contexts"` - // CurrentContext is the name of the context that you would like to use by default - CurrentContext string `json:"current-context"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions map[string]runtime.Object `json:"extensions,omitempty"` -} - -// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() -type Preferences struct { - // +optional - Colors bool `json:"colors,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions map[string]runtime.Object `json:"extensions,omitempty"` -} - -// Cluster contains information about how to communicate with a kubernetes cluster -type Cluster struct { - // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. - LocationOfOrigin string - // Server is the address of the kubernetes cluster (https://hostname:port). - Server string `json:"server"` - // APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). - // +optional - APIVersion string `json:"api-version,omitempty"` - // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. - // +optional - InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` - // CertificateAuthority is the path to a cert file for the certificate authority. - // +optional - CertificateAuthority string `json:"certificate-authority,omitempty"` - // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority - // +optional - CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions map[string]runtime.Object `json:"extensions,omitempty"` -} - -// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. -type AuthInfo struct { - // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. - LocationOfOrigin string - // ClientCertificate is the path to a client cert file for TLS. - // +optional - ClientCertificate string `json:"client-certificate,omitempty"` - // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate - // +optional - ClientCertificateData []byte `json:"client-certificate-data,omitempty"` - // ClientKey is the path to a client key file for TLS. - // +optional - ClientKey string `json:"client-key,omitempty"` - // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey - // +optional - ClientKeyData []byte `json:"client-key-data,omitempty"` - // Token is the bearer token for authentication to the kubernetes cluster. - // +optional - Token string `json:"token,omitempty"` - // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. - // +optional - TokenFile string `json:"tokenFile,omitempty"` - // Impersonate is the username to act-as. - // +optional - Impersonate string `json:"act-as,omitempty"` - // Username is the username for basic authentication to the kubernetes cluster. - // +optional - Username string `json:"username,omitempty"` - // Password is the password for basic authentication to the kubernetes cluster. - // +optional - Password string `json:"password,omitempty"` - // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. - // +optional - AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions map[string]runtime.Object `json:"extensions,omitempty"` -} - -// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) -type Context struct { - // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. - LocationOfOrigin string - // Cluster is the name of the cluster for this context - Cluster string `json:"cluster"` - // AuthInfo is the name of the authInfo for this context - AuthInfo string `json:"user"` - // Namespace is the default namespace to use on unspecified requests - // +optional - Namespace string `json:"namespace,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions map[string]runtime.Object `json:"extensions,omitempty"` -} - -// AuthProviderConfig holds the configuration for a specified auth provider. -type AuthProviderConfig struct { - Name string `json:"name"` - // +optional - Config map[string]string `json:"config,omitempty"` -} - -// NewConfig is a convenience function that returns a new Config object with non-nil maps -func NewConfig() *Config { - return &Config{ - Preferences: *NewPreferences(), - Clusters: make(map[string]*Cluster), - AuthInfos: make(map[string]*AuthInfo), - Contexts: make(map[string]*Context), - Extensions: make(map[string]runtime.Object), - } -} - -// NewConfig is a convenience function that returns a new Config object with non-nil maps -func NewContext() *Context { - return &Context{Extensions: make(map[string]runtime.Object)} -} - -// NewConfig is a convenience function that returns a new Config object with non-nil maps -func NewCluster() *Cluster { - return &Cluster{Extensions: make(map[string]runtime.Object)} -} - -// NewConfig is a convenience function that returns a new Config object with non-nil maps -func NewAuthInfo() *AuthInfo { - return &AuthInfo{Extensions: make(map[string]runtime.Object)} -} - -// NewConfig is a convenience function that returns a new Config object with non-nil maps -func NewPreferences() *Preferences { - return &Preferences{Extensions: make(map[string]runtime.Object)} -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/BUILD deleted file mode 100644 index dcd283d390..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/BUILD +++ /dev/null @@ -1,27 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "register.go", - "types.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/client/unversioned/clientcmd/api:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/conversion.go deleted file mode 100644 index f21072e35d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/conversion.go +++ /dev/null @@ -1,227 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "sort" - - "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" -) - -func addConversionFuncs(scheme *runtime.Scheme) error { - return scheme.AddConversionFuncs( - func(in *Cluster, out *api.Cluster, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *api.Cluster, out *Cluster, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *Preferences, out *api.Preferences, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *api.Preferences, out *Preferences, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *AuthInfo, out *api.AuthInfo, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *api.AuthInfo, out *AuthInfo, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *Context, out *api.Context, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *api.Context, out *Context, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - - func(in *Config, out *api.Config, s conversion.Scope) error { - out.CurrentContext = in.CurrentContext - if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil { - return err - } - - out.Clusters = make(map[string]*api.Cluster) - if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil { - return err - } - out.AuthInfos = make(map[string]*api.AuthInfo) - if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil { - return err - } - out.Contexts = make(map[string]*api.Context) - if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil { - return err - } - out.Extensions = make(map[string]runtime.Object) - if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil { - return err - } - return nil - }, - func(in *api.Config, out *Config, s conversion.Scope) error { - out.CurrentContext = in.CurrentContext - if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil { - return err - } - - out.Clusters = make([]NamedCluster, 0, 0) - if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil { - return err - } - out.AuthInfos = make([]NamedAuthInfo, 0, 0) - if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil { - return err - } - out.Contexts = make([]NamedContext, 0, 0) - if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil { - return err - } - out.Extensions = make([]NamedExtension, 0, 0) - if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil { - return err - } - return nil - }, - func(in *[]NamedCluster, out *map[string]*api.Cluster, s conversion.Scope) error { - for _, curr := range *in { - newCluster := api.NewCluster() - if err := s.Convert(&curr.Cluster, newCluster, 0); err != nil { - return err - } - (*out)[curr.Name] = newCluster - } - - return nil - }, - func(in *map[string]*api.Cluster, out *[]NamedCluster, s conversion.Scope) error { - allKeys := make([]string, 0, len(*in)) - for key := range *in { - allKeys = append(allKeys, key) - } - sort.Strings(allKeys) - - for _, key := range allKeys { - newCluster := (*in)[key] - oldCluster := &Cluster{} - if err := s.Convert(newCluster, oldCluster, 0); err != nil { - return err - } - - namedCluster := NamedCluster{key, *oldCluster} - *out = append(*out, namedCluster) - } - - return nil - }, - func(in *[]NamedAuthInfo, out *map[string]*api.AuthInfo, s conversion.Scope) error { - for _, curr := range *in { - newAuthInfo := api.NewAuthInfo() - if err := s.Convert(&curr.AuthInfo, newAuthInfo, 0); err != nil { - return err - } - (*out)[curr.Name] = newAuthInfo - } - - return nil - }, - func(in *map[string]*api.AuthInfo, out *[]NamedAuthInfo, s conversion.Scope) error { - allKeys := make([]string, 0, len(*in)) - for key := range *in { - allKeys = append(allKeys, key) - } - sort.Strings(allKeys) - - for _, key := range allKeys { - newAuthInfo := (*in)[key] - oldAuthInfo := &AuthInfo{} - if err := s.Convert(newAuthInfo, oldAuthInfo, 0); err != nil { - return err - } - - namedAuthInfo := NamedAuthInfo{key, *oldAuthInfo} - *out = append(*out, namedAuthInfo) - } - - return nil - }, - func(in *[]NamedContext, out *map[string]*api.Context, s conversion.Scope) error { - for _, curr := range *in { - newContext := api.NewContext() - if err := s.Convert(&curr.Context, newContext, 0); err != nil { - return err - } - (*out)[curr.Name] = newContext - } - - return nil - }, - func(in *map[string]*api.Context, out *[]NamedContext, s conversion.Scope) error { - allKeys := make([]string, 0, len(*in)) - for key := range *in { - allKeys = append(allKeys, key) - } - sort.Strings(allKeys) - - for _, key := range allKeys { - newContext := (*in)[key] - oldContext := &Context{} - if err := s.Convert(newContext, oldContext, 0); err != nil { - return err - } - - namedContext := NamedContext{key, *oldContext} - *out = append(*out, namedContext) - } - - return nil - }, - func(in *[]NamedExtension, out *map[string]runtime.Object, s conversion.Scope) error { - for _, curr := range *in { - var newExtension runtime.Object - if err := s.Convert(&curr.Extension, &newExtension, 0); err != nil { - return err - } - (*out)[curr.Name] = newExtension - } - - return nil - }, - func(in *map[string]runtime.Object, out *[]NamedExtension, s conversion.Scope) error { - allKeys := make([]string, 0, len(*in)) - for key := range *in { - allKeys = append(allKeys, key) - } - sort.Strings(allKeys) - - for _, key := range allKeys { - newExtension := (*in)[key] - oldExtension := &runtime.RawExtension{} - if err := s.Convert(newExtension, oldExtension, 0); err != nil { - return err - } - - namedExtension := NamedExtension{key, *oldExtension} - *out = append(*out, namedExtension) - } - - return nil - }, - ) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/register.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/register.go deleted file mode 100644 index 015901d2a1..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/register.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// SchemeGroupVersion is group version used to register these objects -// TODO this should be in the "kubeconfig" group -var SchemeGroupVersion = unversioned.GroupVersion{Group: "", Version: "v1"} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addConversionFuncs) - AddToScheme = SchemeBuilder.AddToScheme -) - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Config{}, - ) - return nil -} - -func (obj *Config) GetObjectKind() unversioned.ObjectKind { return obj } -func (obj *Config) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} -func (obj *Config) GroupVersionKind() unversioned.GroupVersionKind { - return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/types.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/types.go deleted file mode 100644 index 035fbdf76c..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/types.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/kubernetes/pkg/runtime" -) - -// Where possible, json tags match the cli argument names. -// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. - -// Config holds the information needed to build connect to remote kubernetes clusters as a given user -type Config struct { - // Legacy field from pkg/api/types.go TypeMeta. - // TODO(jlowdermilk): remove this after eliminating downstream dependencies. - // +optional - Kind string `json:"kind,omitempty"` - // DEPRECATED: APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). - // Because a cluster can run multiple API groups and potentially multiple versions of each, it no longer makes sense to specify - // a single value for the cluster version. - // This field isn't really needed anyway, so we are deprecating it without replacement. - // It will be ignored if it is present. - // +optional - APIVersion string `json:"apiVersion,omitempty"` - // Preferences holds general information to be use for cli interactions - Preferences Preferences `json:"preferences"` - // Clusters is a map of referencable names to cluster configs - Clusters []NamedCluster `json:"clusters"` - // AuthInfos is a map of referencable names to user configs - AuthInfos []NamedAuthInfo `json:"users"` - // Contexts is a map of referencable names to context configs - Contexts []NamedContext `json:"contexts"` - // CurrentContext is the name of the context that you would like to use by default - CurrentContext string `json:"current-context"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -type Preferences struct { - // +optional - Colors bool `json:"colors,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -// Cluster contains information about how to communicate with a kubernetes cluster -type Cluster struct { - // Server is the address of the kubernetes cluster (https://hostname:port). - Server string `json:"server"` - // APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). - // +optional - APIVersion string `json:"api-version,omitempty"` - // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. - // +optional - InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` - // CertificateAuthority is the path to a cert file for the certificate authority. - // +optional - CertificateAuthority string `json:"certificate-authority,omitempty"` - // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority - // +optional - CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. -type AuthInfo struct { - // ClientCertificate is the path to a client cert file for TLS. - // +optional - ClientCertificate string `json:"client-certificate,omitempty"` - // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate - // +optional - ClientCertificateData []byte `json:"client-certificate-data,omitempty"` - // ClientKey is the path to a client key file for TLS. - // +optional - ClientKey string `json:"client-key,omitempty"` - // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey - // +optional - ClientKeyData []byte `json:"client-key-data,omitempty"` - // Token is the bearer token for authentication to the kubernetes cluster. - // +optional - Token string `json:"token,omitempty"` - // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. - // +optional - TokenFile string `json:"tokenFile,omitempty"` - // Impersonate is the username to imperonate. The name matches the flag. - // +optional - Impersonate string `json:"as,omitempty"` - // Username is the username for basic authentication to the kubernetes cluster. - // +optional - Username string `json:"username,omitempty"` - // Password is the password for basic authentication to the kubernetes cluster. - // +optional - Password string `json:"password,omitempty"` - // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. - // +optional - AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) -type Context struct { - // Cluster is the name of the cluster for this context - Cluster string `json:"cluster"` - // AuthInfo is the name of the authInfo for this context - AuthInfo string `json:"user"` - // Namespace is the default namespace to use on unspecified requests - // +optional - Namespace string `json:"namespace,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -// NamedCluster relates nicknames to cluster information -type NamedCluster struct { - // Name is the nickname for this Cluster - Name string `json:"name"` - // Cluster holds the cluster information - Cluster Cluster `json:"cluster"` -} - -// NamedContext relates nicknames to context information -type NamedContext struct { - // Name is the nickname for this Context - Name string `json:"name"` - // Context holds the context information - Context Context `json:"context"` -} - -// NamedAuthInfo relates nicknames to auth information -type NamedAuthInfo struct { - // Name is the nickname for this AuthInfo - Name string `json:"name"` - // AuthInfo holds the auth information - AuthInfo AuthInfo `json:"user"` -} - -// NamedExtension relates nicknames to extension information -type NamedExtension struct { - // Name is the nickname for this Extension - Name string `json:"name"` - // Extension holds the extension information - Extension runtime.RawExtension `json:"extension"` -} - -// AuthProviderConfig holds the configuration for a specified auth provider. -type AuthProviderConfig struct { - Name string `json:"name"` - Config map[string]string `json:"config"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/config.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/config.go deleted file mode 100644 index 9df69a7412..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/config.go +++ /dev/null @@ -1,472 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "errors" - "os" - "path" - "path/filepath" - "reflect" - "sort" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/client/restclient" - clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" -) - -// ConfigAccess is used by subcommands and methods in this package to load and modify the appropriate config files -type ConfigAccess interface { - // GetLoadingPrecedence returns the slice of files that should be used for loading and inspecting the config - GetLoadingPrecedence() []string - // GetStartingConfig returns the config that subcommands should being operating against. It may or may not be merged depending on loading rules - GetStartingConfig() (*clientcmdapi.Config, error) - // GetDefaultFilename returns the name of the file you should write into (create if necessary), if you're trying to create a new stanza as opposed to updating an existing one. - GetDefaultFilename() string - // IsExplicitFile indicates whether or not this command is interested in exactly one file. This implementation only ever does that via a flag, but implementations that handle local, global, and flags may have more - IsExplicitFile() bool - // GetExplicitFile returns the particular file this command is operating against. This implementation only ever has one, but implementations that handle local, global, and flags may have more - GetExplicitFile() string -} - -type PathOptions struct { - // GlobalFile is the full path to the file to load as the global (final) option - GlobalFile string - // EnvVar is the env var name that points to the list of kubeconfig files to load - EnvVar string - // ExplicitFileFlag is the name of the flag to use for prompting for the kubeconfig file - ExplicitFileFlag string - - // GlobalFileSubpath is an optional value used for displaying help - GlobalFileSubpath string - - LoadingRules *ClientConfigLoadingRules -} - -func (o *PathOptions) GetEnvVarFiles() []string { - if len(o.EnvVar) == 0 { - return []string{} - } - - envVarValue := os.Getenv(o.EnvVar) - if len(envVarValue) == 0 { - return []string{} - } - - return filepath.SplitList(envVarValue) -} - -func (o *PathOptions) GetLoadingPrecedence() []string { - if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { - return envVarFiles - } - - return []string{o.GlobalFile} -} - -func (o *PathOptions) GetStartingConfig() (*clientcmdapi.Config, error) { - // don't mutate the original - loadingRules := *o.LoadingRules - loadingRules.Precedence = o.GetLoadingPrecedence() - - clientConfig := NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, &ConfigOverrides{}) - rawConfig, err := clientConfig.RawConfig() - if os.IsNotExist(err) { - return clientcmdapi.NewConfig(), nil - } - if err != nil { - return nil, err - } - - return &rawConfig, nil -} - -func (o *PathOptions) GetDefaultFilename() string { - if o.IsExplicitFile() { - return o.GetExplicitFile() - } - - if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { - if len(envVarFiles) == 1 { - return envVarFiles[0] - } - - // if any of the envvar files already exists, return it - for _, envVarFile := range envVarFiles { - if _, err := os.Stat(envVarFile); err == nil { - return envVarFile - } - } - - // otherwise, return the last one in the list - return envVarFiles[len(envVarFiles)-1] - } - - return o.GlobalFile -} - -func (o *PathOptions) IsExplicitFile() bool { - if len(o.LoadingRules.ExplicitPath) > 0 { - return true - } - - return false -} - -func (o *PathOptions) GetExplicitFile() string { - return o.LoadingRules.ExplicitPath -} - -func NewDefaultPathOptions() *PathOptions { - ret := &PathOptions{ - GlobalFile: RecommendedHomeFile, - EnvVar: RecommendedConfigPathEnvVar, - ExplicitFileFlag: RecommendedConfigPathFlag, - - GlobalFileSubpath: path.Join(RecommendedHomeDir, RecommendedFileName), - - LoadingRules: NewDefaultClientConfigLoadingRules(), - } - ret.LoadingRules.DoNotResolvePaths = true - - return ret -} - -// ModifyConfig takes a Config object, iterates through Clusters, AuthInfos, and Contexts, uses the LocationOfOrigin if specified or -// uses the default destination file to write the results into. This results in multiple file reads, but it's very easy to follow. -// Preferences and CurrentContext should always be set in the default destination file. Since we can't distinguish between empty and missing values -// (no nil strings), we're forced have separate handling for them. In the kubeconfig cases, newConfig should have at most one difference, -// that means that this code will only write into a single file. If you want to relativizePaths, you must provide a fully qualified path in any -// modified element. -func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, relativizePaths bool) error { - possibleSources := configAccess.GetLoadingPrecedence() - // sort the possible kubeconfig files so we always "lock" in the same order - // to avoid deadlock (note: this can fail w/ symlinks, but... come on). - sort.Strings(possibleSources) - for _, filename := range possibleSources { - if err := lockFile(filename); err != nil { - return err - } - defer unlockFile(filename) - } - - startingConfig, err := configAccess.GetStartingConfig() - if err != nil { - return err - } - - // We need to find all differences, locate their original files, read a partial config to modify only that stanza and write out the file. - // Special case the test for current context and preferences since those always write to the default file. - if reflect.DeepEqual(*startingConfig, newConfig) { - // nothing to do - return nil - } - - if startingConfig.CurrentContext != newConfig.CurrentContext { - if err := writeCurrentContext(configAccess, newConfig.CurrentContext); err != nil { - return err - } - } - - if !reflect.DeepEqual(startingConfig.Preferences, newConfig.Preferences) { - if err := writePreferences(configAccess, newConfig.Preferences); err != nil { - return err - } - } - - // Search every cluster, authInfo, and context. First from new to old for differences, then from old to new for deletions - for key, cluster := range newConfig.Clusters { - startingCluster, exists := startingConfig.Clusters[key] - if !reflect.DeepEqual(cluster, startingCluster) || !exists { - destinationFile := cluster.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - t := *cluster - - configToWrite.Clusters[key] = &t - configToWrite.Clusters[key].LocationOfOrigin = destinationFile - if relativizePaths { - if err := RelativizeClusterLocalPaths(configToWrite.Clusters[key]); err != nil { - return err - } - } - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, context := range newConfig.Contexts { - startingContext, exists := startingConfig.Contexts[key] - if !reflect.DeepEqual(context, startingContext) || !exists { - destinationFile := context.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - configToWrite.Contexts[key] = context - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, authInfo := range newConfig.AuthInfos { - startingAuthInfo, exists := startingConfig.AuthInfos[key] - if !reflect.DeepEqual(authInfo, startingAuthInfo) || !exists { - destinationFile := authInfo.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - t := *authInfo - configToWrite.AuthInfos[key] = &t - configToWrite.AuthInfos[key].LocationOfOrigin = destinationFile - if relativizePaths { - if err := RelativizeAuthInfoLocalPaths(configToWrite.AuthInfos[key]); err != nil { - return err - } - } - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, cluster := range startingConfig.Clusters { - if _, exists := newConfig.Clusters[key]; !exists { - destinationFile := cluster.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - delete(configToWrite.Clusters, key) - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, context := range startingConfig.Contexts { - if _, exists := newConfig.Contexts[key]; !exists { - destinationFile := context.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - delete(configToWrite.Contexts, key) - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, authInfo := range startingConfig.AuthInfos { - if _, exists := newConfig.AuthInfos[key]; !exists { - destinationFile := authInfo.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - delete(configToWrite.AuthInfos, key) - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - return nil -} - -func PersisterForUser(configAccess ConfigAccess, user string) restclient.AuthProviderConfigPersister { - return &persister{configAccess, user} -} - -type persister struct { - configAccess ConfigAccess - user string -} - -func (p *persister) Persist(config map[string]string) error { - newConfig, err := p.configAccess.GetStartingConfig() - if err != nil { - return err - } - authInfo, ok := newConfig.AuthInfos[p.user] - if ok && authInfo.AuthProvider != nil { - authInfo.AuthProvider.Config = config - ModifyConfig(p.configAccess, *newConfig, false) - } - return nil -} - -// writeCurrentContext takes three possible paths. -// If newCurrentContext is the same as the startingConfig's current context, then we exit. -// If newCurrentContext has a value, then that value is written into the default destination file. -// If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file -func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error { - if startingConfig, err := configAccess.GetStartingConfig(); err != nil { - return err - } else if startingConfig.CurrentContext == newCurrentContext { - return nil - } - - if configAccess.IsExplicitFile() { - file := configAccess.GetExplicitFile() - currConfig, err := getConfigFromFile(file) - if err != nil { - return err - } - currConfig.CurrentContext = newCurrentContext - if err := WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - - if len(newCurrentContext) > 0 { - destinationFile := configAccess.GetDefaultFilename() - config, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - config.CurrentContext = newCurrentContext - - if err := WriteToFile(*config, destinationFile); err != nil { - return err - } - - return nil - } - - // we're supposed to be clearing the current context. We need to find the first spot in the chain that is setting it and clear it - for _, file := range configAccess.GetLoadingPrecedence() { - if _, err := os.Stat(file); err == nil { - currConfig, err := getConfigFromFile(file) - if err != nil { - return err - } - - if len(currConfig.CurrentContext) > 0 { - currConfig.CurrentContext = newCurrentContext - if err := WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - } - } - - return errors.New("no config found to write context") -} - -func writePreferences(configAccess ConfigAccess, newPrefs clientcmdapi.Preferences) error { - if startingConfig, err := configAccess.GetStartingConfig(); err != nil { - return err - } else if reflect.DeepEqual(startingConfig.Preferences, newPrefs) { - return nil - } - - if configAccess.IsExplicitFile() { - file := configAccess.GetExplicitFile() - currConfig, err := getConfigFromFile(file) - if err != nil { - return err - } - currConfig.Preferences = newPrefs - if err := WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - - for _, file := range configAccess.GetLoadingPrecedence() { - currConfig, err := getConfigFromFile(file) - if err != nil { - return err - } - - if !reflect.DeepEqual(currConfig.Preferences, newPrefs) { - currConfig.Preferences = newPrefs - if err := WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - } - - return errors.New("no config found to write preferences") -} - -// getConfigFromFile tries to read a kubeconfig file and if it can't, returns an error. One exception, missing files result in empty configs, not an error. -func getConfigFromFile(filename string) (*clientcmdapi.Config, error) { - config, err := LoadFromFile(filename) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - if config == nil { - config = clientcmdapi.NewConfig() - } - return config, nil -} - -// GetConfigFromFileOrDie tries to read a kubeconfig file and if it can't, it calls exit. One exception, missing files result in empty configs, not an exit -func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config { - config, err := getConfigFromFile(filename) - if err != nil { - glog.FatalDepth(1, err) - } - - return config -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/doc.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/doc.go deleted file mode 100644 index 30ef6f36da..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/doc.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package clientcmd provides one stop shopping for building a working client from a fixed config, -from a .kubeconfig file, from command line flags, or from any merged combination. - -Sample usage from merged .kubeconfig files (local directory, home directory) - - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - // if you want to change the loading rules (which files in which order), you can do so here - - configOverrides := &clientcmd.ConfigOverrides{} - // if you want to change override values or bind them to flags, there are methods to help you - - kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) - config, err := kubeConfig.ClientConfig() - if err != nil { - // Do something - } - client, err := unversioned.New(config) - // ... -*/ -package clientcmd diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation.go deleted file mode 100644 index 63f8adec62..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation.go +++ /dev/null @@ -1,270 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "errors" - "fmt" - "os" - "reflect" - "strings" - - clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/validation" -) - -var ( - ErrNoContext = errors.New("no context chosen") - ErrEmptyConfig = errors.New("no configuration has been provided") - // message is for consistency with old behavior - ErrEmptyCluster = errors.New("cluster has no server defined") -) - -type errContextNotFound struct { - ContextName string -} - -func (e *errContextNotFound) Error() string { - return fmt.Sprintf("context was not found for specified context: %v", e.ContextName) -} - -// IsContextNotFound returns a boolean indicating whether the error is known to -// report that a context was not found -func IsContextNotFound(err error) bool { - if err == nil { - return false - } - if _, ok := err.(*errContextNotFound); ok || err == ErrNoContext { - return true - } - return strings.Contains(err.Error(), "context was not found for specified context") -} - -// IsEmptyConfig returns true if the provided error indicates the provided configuration -// is empty. -func IsEmptyConfig(err error) bool { - switch t := err.(type) { - case errConfigurationInvalid: - return len(t) == 1 && t[0] == ErrEmptyConfig - } - return err == ErrEmptyConfig -} - -// errConfigurationInvalid is a set of errors indicating the configuration is invalid. -type errConfigurationInvalid []error - -// errConfigurationInvalid implements error and Aggregate -var _ error = errConfigurationInvalid{} -var _ utilerrors.Aggregate = errConfigurationInvalid{} - -func newErrConfigurationInvalid(errs []error) error { - switch len(errs) { - case 0: - return nil - default: - return errConfigurationInvalid(errs) - } -} - -// Error implements the error interface -func (e errConfigurationInvalid) Error() string { - return fmt.Sprintf("invalid configuration: %v", utilerrors.NewAggregate(e).Error()) -} - -// Errors implements the AggregateError interface -func (e errConfigurationInvalid) Errors() []error { - return e -} - -// IsConfigurationInvalid returns true if the provided error indicates the configuration is invalid. -func IsConfigurationInvalid(err error) bool { - switch err.(type) { - case *errContextNotFound, errConfigurationInvalid: - return true - } - return IsContextNotFound(err) -} - -// Validate checks for errors in the Config. It does not return early so that it can find as many errors as possible. -func Validate(config clientcmdapi.Config) error { - validationErrors := make([]error, 0) - - if clientcmdapi.IsConfigEmpty(&config) { - return newErrConfigurationInvalid([]error{ErrEmptyConfig}) - } - - if len(config.CurrentContext) != 0 { - if _, exists := config.Contexts[config.CurrentContext]; !exists { - validationErrors = append(validationErrors, &errContextNotFound{config.CurrentContext}) - } - } - - for contextName, context := range config.Contexts { - validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) - } - - for authInfoName, authInfo := range config.AuthInfos { - validationErrors = append(validationErrors, validateAuthInfo(authInfoName, *authInfo)...) - } - - for clusterName, clusterInfo := range config.Clusters { - validationErrors = append(validationErrors, validateClusterInfo(clusterName, *clusterInfo)...) - } - - return newErrConfigurationInvalid(validationErrors) -} - -// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, -// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. -func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error { - validationErrors := make([]error, 0) - - if clientcmdapi.IsConfigEmpty(&config) { - return newErrConfigurationInvalid([]error{ErrEmptyConfig}) - } - - var contextName string - if len(passedContextName) != 0 { - contextName = passedContextName - } else { - contextName = config.CurrentContext - } - - if len(contextName) == 0 { - return ErrNoContext - } - - context, exists := config.Contexts[contextName] - if !exists { - validationErrors = append(validationErrors, &errContextNotFound{contextName}) - } - - if exists { - validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) - validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *config.AuthInfos[context.AuthInfo])...) - validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *config.Clusters[context.Cluster])...) - } - - return newErrConfigurationInvalid(validationErrors) -} - -// validateClusterInfo looks for conflicts and errors in the cluster info -func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) []error { - validationErrors := make([]error, 0) - - if reflect.DeepEqual(clientcmdapi.Cluster{}, clusterInfo) { - return []error{ErrEmptyCluster} - } - - if len(clusterInfo.Server) == 0 { - if len(clusterName) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("default cluster has no server defined")) - } else { - validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName)) - } - } - // Make sure CA data and CA file aren't both specified - if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { - validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override.", clusterName)) - } - if len(clusterInfo.CertificateAuthority) != 0 { - clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) - defer clientCertCA.Close() - if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) - } - } - - return validationErrors -} - -// validateAuthInfo looks for conflicts and errors in the auth info -func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []error { - validationErrors := make([]error, 0) - - usingAuthPath := false - methods := make([]string, 0, 3) - if len(authInfo.Token) != 0 { - methods = append(methods, "token") - } - if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { - methods = append(methods, "basicAuth") - } - - if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { - // Make sure cert data and file aren't both specified - if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { - validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override.", authInfoName)) - } - // Make sure key data and file aren't both specified - if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { - validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) - } - // Make sure a key is specified - if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method.", authInfoName)) - } - - if len(authInfo.ClientCertificate) != 0 { - clientCertFile, err := os.Open(authInfo.ClientCertificate) - defer clientCertFile.Close() - if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) - } - } - if len(authInfo.ClientKey) != 0 { - clientKeyFile, err := os.Open(authInfo.ClientKey) - defer clientKeyFile.Close() - if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) - } - } - } - - // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case - if (len(methods) > 1) && (!usingAuthPath) { - validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) - } - - return validationErrors -} - -// validateContext looks for errors in the context. It is not transitive, so errors in the reference authInfo or cluster configs are not included in this return -func validateContext(contextName string, context clientcmdapi.Context, config clientcmdapi.Config) []error { - validationErrors := make([]error, 0) - - if len(context.AuthInfo) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("user was not specified for context %q", contextName)) - } else if _, exists := config.AuthInfos[context.AuthInfo]; !exists { - validationErrors = append(validationErrors, fmt.Errorf("user %q was not found for context %q", context.AuthInfo, contextName)) - } - - if len(context.Cluster) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("cluster was not specified for context %q", contextName)) - } else if _, exists := config.Clusters[context.Cluster]; !exists { - validationErrors = append(validationErrors, fmt.Errorf("cluster %q was not found for context %q", context.Cluster, contextName)) - } - - if len(context.Namespace) != 0 { - if len(validation.IsDNS1123Label(context.Namespace)) != 0 { - validationErrors = append(validationErrors, fmt.Errorf("namespace %q for context %q does not conform to the kubernetes DNS_LABEL rules", context.Namespace, contextName)) - } - } - - return validationErrors -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/conditions.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/conditions.go deleted file mode 100644 index ee39add8d2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/conditions.go +++ /dev/null @@ -1,272 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/extensions" - appsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" - batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" - coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" - "k8s.io/kubernetes/pkg/util/wait" - "k8s.io/kubernetes/pkg/watch" -) - -// ControllerHasDesiredReplicas returns a condition that will be true if and only if -// the desired replica count for a controller's ReplicaSelector equals the Replicas count. -func ControllerHasDesiredReplicas(rcClient coreclient.ReplicationControllersGetter, controller *api.ReplicationController) wait.ConditionFunc { - - // If we're given a controller where the status lags the spec, it either means that the controller is stale, - // or that the rc manager hasn't noticed the update yet. Polling status.Replicas is not safe in the latter case. - desiredGeneration := controller.Generation - - return func() (bool, error) { - ctrl, err := rcClient.ReplicationControllers(controller.Namespace).Get(controller.Name) - if err != nil { - return false, err - } - // There's a chance a concurrent update modifies the Spec.Replicas causing this check to pass, - // or, after this check has passed, a modification causes the rc manager to create more pods. - // This will not be an issue once we've implemented graceful delete for rcs, but till then - // concurrent stop operations on the same rc might have unintended side effects. - return ctrl.Status.ObservedGeneration >= desiredGeneration && ctrl.Status.Replicas == ctrl.Spec.Replicas, nil - } -} - -// ReplicaSetHasDesiredReplicas returns a condition that will be true if and only if -// the desired replica count for a ReplicaSet's ReplicaSelector equals the Replicas count. -func ReplicaSetHasDesiredReplicas(rsClient extensionsclient.ReplicaSetsGetter, replicaSet *extensions.ReplicaSet) wait.ConditionFunc { - - // If we're given a ReplicaSet where the status lags the spec, it either means that the - // ReplicaSet is stale, or that the ReplicaSet manager hasn't noticed the update yet. - // Polling status.Replicas is not safe in the latter case. - desiredGeneration := replicaSet.Generation - - return func() (bool, error) { - rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name) - if err != nil { - return false, err - } - // There's a chance a concurrent update modifies the Spec.Replicas causing this check to - // pass, or, after this check has passed, a modification causes the ReplicaSet manager to - // create more pods. This will not be an issue once we've implemented graceful delete for - // ReplicaSets, but till then concurrent stop operations on the same ReplicaSet might have - // unintended side effects. - return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.Replicas == rs.Spec.Replicas, nil - } -} - -// StatefulSetHasDesiredPets returns a conditon that checks the number of petset replicas -func StatefulSetHasDesiredPets(psClient appsclient.StatefulSetsGetter, petset *apps.StatefulSet) wait.ConditionFunc { - // TODO: Differentiate between 0 pets and a really quick scale down using generation. - return func() (bool, error) { - ps, err := psClient.StatefulSets(petset.Namespace).Get(petset.Name) - if err != nil { - return false, err - } - return ps.Status.Replicas == ps.Spec.Replicas, nil - } -} - -// JobHasDesiredParallelism returns a condition that will be true if the desired parallelism count -// for a job equals the current active counts or is less by an appropriate successful/unsuccessful count. -func JobHasDesiredParallelism(jobClient batchclient.JobsGetter, job *batch.Job) wait.ConditionFunc { - return func() (bool, error) { - job, err := jobClient.Jobs(job.Namespace).Get(job.Name) - if err != nil { - return false, err - } - - // desired parallelism can be either the exact number, in which case return immediately - if job.Status.Active == *job.Spec.Parallelism { - return true, nil - } - if job.Spec.Completions == nil { - // A job without specified completions needs to wait for Active to reach Parallelism. - return false, nil - } - - // otherwise count successful - progress := *job.Spec.Completions - job.Status.Active - job.Status.Succeeded - return progress == 0, nil - } -} - -// DeploymentHasDesiredReplicas returns a condition that will be true if and only if -// the desired replica count for a deployment equals its updated replicas count. -// (non-terminated pods that have the desired template spec). -func DeploymentHasDesiredReplicas(dClient extensionsclient.DeploymentsGetter, deployment *extensions.Deployment) wait.ConditionFunc { - // If we're given a deployment where the status lags the spec, it either - // means that the deployment is stale, or that the deployment manager hasn't - // noticed the update yet. Polling status.Replicas is not safe in the latter - // case. - desiredGeneration := deployment.Generation - - return func() (bool, error) { - deployment, err := dClient.Deployments(deployment.Namespace).Get(deployment.Name) - if err != nil { - return false, err - } - return deployment.Status.ObservedGeneration >= desiredGeneration && - deployment.Status.UpdatedReplicas == deployment.Spec.Replicas, nil - } -} - -// ErrPodCompleted is returned by PodRunning or PodContainerRunning to indicate that -// the pod has already reached completed state. -var ErrPodCompleted = fmt.Errorf("pod ran to completion") - -// ErrContainerTerminated is returned by PodContainerRunning in the intermediate -// state where the pod indicates it's still running, but its container is already terminated -var ErrContainerTerminated = fmt.Errorf("container terminated") - -// PodRunning returns true if the pod is running, false if the pod has not yet reached running state, -// returns ErrPodCompleted if the pod has run to completion, or an error in any other case. -func PodRunning(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodRunning: - return true, nil - case api.PodFailed, api.PodSucceeded: - return false, ErrPodCompleted - } - } - return false, nil -} - -// PodCompleted returns true if the pod has run to completion, false if the pod has not yet -// reached running state, or an error in any other case. -func PodCompleted(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodFailed, api.PodSucceeded: - return true, nil - } - } - return false, nil -} - -// PodRunningAndReady returns true if the pod is running and ready, false if the pod has not -// yet reached those states, returns ErrPodCompleted if the pod has run to completion, or -// an error in any other case. -func PodRunningAndReady(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodFailed, api.PodSucceeded: - return false, ErrPodCompleted - case api.PodRunning: - return api.IsPodReady(t), nil - } - } - return false, nil -} - -// PodNotPending returns true if the pod has left the pending state, false if it has not, -// or an error in any other case (such as if the pod was deleted). -func PodNotPending(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodPending: - return false, nil - default: - return true, nil - } - } - return false, nil -} - -// PodContainerRunning returns false until the named container has ContainerStatus running (at least once), -// and will return an error if the pod is deleted, runs to completion, or the container pod is not available. -func PodContainerRunning(containerName string) watch.ConditionFunc { - return func(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodRunning, api.PodPending: - case api.PodFailed, api.PodSucceeded: - return false, ErrPodCompleted - default: - return false, nil - } - for _, s := range t.Status.ContainerStatuses { - if s.Name != containerName { - continue - } - if s.State.Terminated != nil { - return false, ErrContainerTerminated - } - return s.State.Running != nil, nil - } - for _, s := range t.Status.InitContainerStatuses { - if s.Name != containerName { - continue - } - if s.State.Terminated != nil { - return false, ErrContainerTerminated - } - return s.State.Running != nil, nil - } - return false, nil - } - return false, nil - } -} - -// ServiceAccountHasSecrets returns true if the service account has at least one secret, -// false if it does not, or an error. -func ServiceAccountHasSecrets(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(unversioned.GroupResource{Resource: "serviceaccounts"}, "") - } - switch t := event.Object.(type) { - case *api.ServiceAccount: - return len(t.Secrets) > 0, nil - } - return false, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/helper.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/helper.go deleted file mode 100644 index 59a992ad75..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/helper.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/client/restclient" - // Import solely to initialize client auth plugins. - _ "k8s.io/kubernetes/plugin/pkg/client/auth" -) - -const ( - legacyAPIPath = "/api" - defaultAPIPath = "/apis" -) - -// SetKubernetesDefaults sets default values on the provided client config for accessing the -// Kubernetes API or returns an error if any of the defaults are impossible or invalid. -// TODO: this method needs to be split into one that sets defaults per group, expected to be fix in PR "Refactoring clientcache.go and helper.go #14592" -func SetKubernetesDefaults(config *restclient.Config) error { - if config.APIPath == "" { - config.APIPath = legacyAPIPath - } - if config.GroupVersion == nil || config.GroupVersion.Group != api.GroupName { - g, err := registered.Group(api.GroupName) - if err != nil { - return err - } - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - } - if config.NegotiatedSerializer == nil { - config.NegotiatedSerializer = api.Codecs - } - return restclient.SetKubernetesDefaults(config) -} - -func setGroupDefaults(groupName string, config *restclient.Config) error { - config.APIPath = defaultAPIPath - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != groupName { - g, err := registered.Group(groupName) - if err != nil { - return err - } - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - } - if config.NegotiatedSerializer == nil { - config.NegotiatedSerializer = api.Codecs - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/BUILD b/vendor/k8s.io/kubernetes/pkg/cloudprovider/BUILD index 7860acc401..147781b8a7 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -19,8 +16,24 @@ go_library( ], tags = ["automanaged"], deps = [ - "//pkg/api:go_default_library", - "//pkg/types:go_default_library", + "//pkg/api/v1:go_default_library", "//vendor:github.com/golang/glog", + "//vendor:k8s.io/apimachinery/pkg/types", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/cloudprovider/providers:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS b/vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS index eeb7982bab..657fe57964 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS @@ -1,4 +1,42 @@ -assignees: - - mikedanese -owners: - - mikedanese +approvers: +- mikedanese +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- derekwaynecarr +- caesarxuchao +- vishh +- mikedanese +- liggitt +- bprashanth +- gmarek +- erictune +- davidopp +- pmorie +- sttts +- quinton-hoole +- dchen1107 +- saad-ali +- zmerlynn +- luxas +- justinsb +- roberthbailey +- eparis +- jlowdermilk +- piosz +- jsafrane +- dims +- krousey +- rootfs +- jszczepkowski +- markturansky +- vmarmol +- girishkalele +- satnam6502 +- jdef +- freehan +- jingxu97 diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go index 6ed104ff15..de83e9bd1b 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go @@ -21,8 +21,8 @@ import ( "fmt" "strings" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/types" + "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/api/v1" ) // Interface is an abstract, pluggable interface for cloud providers. @@ -53,7 +53,7 @@ type Clusters interface { // TODO(#6812): Use a shorter name that's less likely to be longer than cloud // providers' name length limits. -func GetLoadBalancerName(service *api.Service) string { +func GetLoadBalancerName(service *v1.Service) string { //GCE requires that the name of a load balancer starts with a lower case letter. ret := "a" + string(service.UID) ret = strings.Replace(ret, "-", "", -1) @@ -81,26 +81,28 @@ type LoadBalancer interface { // TODO: Break this up into different interfaces (LB, etc) when we have more than one type of service // GetLoadBalancer returns whether the specified load balancer exists, and // if so, what its status is. - // Implementations must treat the *api.Service parameter as read-only and not modify it. + // Implementations must treat the *v1.Service parameter as read-only and not modify it. // Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager - GetLoadBalancer(clusterName string, service *api.Service) (status *api.LoadBalancerStatus, exists bool, err error) + GetLoadBalancer(clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) // EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer - // Implementations must treat the *api.Service parameter as read-only and not modify it. + // Implementations must treat the *v1.Service and *v1.Node + // parameters as read-only and not modify them. // Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager - EnsureLoadBalancer(clusterName string, service *api.Service, nodeNames []string) (*api.LoadBalancerStatus, error) + EnsureLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) // UpdateLoadBalancer updates hosts under the specified load balancer. - // Implementations must treat the *api.Service parameter as read-only and not modify it. + // Implementations must treat the *v1.Service and *v1.Node + // parameters as read-only and not modify them. // Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager - UpdateLoadBalancer(clusterName string, service *api.Service, nodeNames []string) error + UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error // EnsureLoadBalancerDeleted deletes the specified load balancer if it // exists, returning nil if the load balancer specified either didn't exist or // was successfully deleted. // This construction is useful because many cloud providers' load balancers // have multiple underlying components, meaning a Get could say that the LB // doesn't exist even if some part of it is still laying around. - // Implementations must treat the *api.Service parameter as read-only and not modify it. + // Implementations must treat the *v1.Service parameter as read-only and not modify it. // Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager - EnsureLoadBalancerDeleted(clusterName string, service *api.Service) error + EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error } // Instances is an abstract, pluggable interface for sets of instances. @@ -109,7 +111,7 @@ type Instances interface { // TODO(roberthbailey): This currently is only used in such a way that it // returns the address of the calling instance. We should do a rename to // make this clearer. - NodeAddresses(name types.NodeName) ([]api.NodeAddress, error) + NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) // ExternalID returns the cloud provider ID of the node with the specified NodeName. // Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound) ExternalID(nodeName types.NodeName) (string, error) @@ -117,8 +119,6 @@ type Instances interface { InstanceID(nodeName types.NodeName) (string, error) // InstanceType returns the type of the specified instance. InstanceType(name types.NodeName) (string, error) - // List lists instances that match 'filter' which is a regular expression which must match the entire instance name (fqdn) - List(filter string) ([]types.NodeName, error) // AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances // expected format for the key is standard ssh-keygen format: AddSSHKeyToAllInstances(user string, keyData []byte) error @@ -152,7 +152,10 @@ type Routes interface { DeleteRoute(clusterName string, route *Route) error } -var InstanceNotFound = errors.New("instance not found") +var ( + InstanceNotFound = errors.New("instance not found") + DiskNotFound = errors.New("disk is not found") +) // Zone represents the location of a particular machine. type Zone struct { diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/plugins.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/plugins.go index 19e90b4d50..0fc41f5eaf 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/plugins.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/plugins.go @@ -37,6 +37,8 @@ var ( providers = make(map[string]Factory) ) +const externalCloudProvider = "external" + // RegisterCloudProvider registers a cloudprovider.Factory by name. This // is expected to happen during app startup. func RegisterCloudProvider(name string, cloud Factory) { @@ -71,7 +73,7 @@ func CloudProviders() []string { } // GetCloudProvider creates an instance of the named cloud provider, or nil if -// the name is not known. The error return is only used if the named provider +// the name is unknown. The error return is only used if the named provider // was known but failed to initialize. The config parameter specifies the // io.Reader handler of the configuration file for the cloud provider, or nil // for no configuation. @@ -85,6 +87,11 @@ func GetCloudProvider(name string, config io.Reader) (Interface, error) { return f(config) } +// Detects if the string is an external cloud provider +func IsExternal(name string) bool { + return name == externalCloudProvider +} + // InitCloudProvider creates an instance of the named cloud provider. func InitCloudProvider(name string, configFilePath string) (Interface, error) { var cloud Interface @@ -95,6 +102,11 @@ func InitCloudProvider(name string, configFilePath string) (Interface, error) { return nil, nil } + if IsExternal(name) { + glog.Info("External cloud provider specified") + return nil, nil + } + if configFilePath != "" { var config *os.File config, err = os.Open(configFilePath) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD index c8098b12a4..bd5785d09e 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD @@ -4,10 +4,8 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", "go_test", - "cgo_library", ) go_library( @@ -19,16 +17,10 @@ go_library( ], tags = ["automanaged"], deps = [ - "//pkg/api:go_default_library", - "//pkg/api/service:go_default_library", - "//pkg/api/unversioned:go_default_library", + "//pkg/api/v1:go_default_library", + "//pkg/api/v1/service:go_default_library", "//pkg/cloudprovider:go_default_library", - "//pkg/types:go_default_library", - "//pkg/util/errors:go_default_library", - "//pkg/util/flowcontrol:go_default_library", "//pkg/util/net/sets:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/util/wait:go_default_library", "//pkg/volume:go_default_library", "//vendor:cloud.google.com/go/compute/metadata", "//vendor:github.com/golang/glog", @@ -39,13 +31,31 @@ go_library( "//vendor:google.golang.org/api/container/v1", "//vendor:google.golang.org/api/googleapi", "//vendor:gopkg.in/gcfg.v1", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/util/errors", + "//vendor:k8s.io/apimachinery/pkg/util/sets", + "//vendor:k8s.io/apimachinery/pkg/util/wait", + "//vendor:k8s.io/client-go/util/flowcontrol", ], ) go_test( name = "go_default_test", srcs = ["gce_test.go"], - library = "go_default_library", + library = ":go_default_library", + tags = ["automanaged"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], tags = ["automanaged"], - deps = [], ) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/OWNERS b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/OWNERS new file mode 100644 index 0000000000..f5cf4a0124 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/OWNERS @@ -0,0 +1,3 @@ +approvers: +- saad-ali +- jingxu97 diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go index c56dd43b6b..c9bdf667a1 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go @@ -30,16 +30,16 @@ import ( "gopkg.in/gcfg.v1" - "k8s.io/kubernetes/pkg/api" - apiservice "k8s.io/kubernetes/pkg/api/service" - "k8s.io/kubernetes/pkg/api/unversioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/flowcontrol" + "k8s.io/kubernetes/pkg/api/v1" + apiservice "k8s.io/kubernetes/pkg/api/v1/service" "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/types" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/flowcontrol" netsets "k8s.io/kubernetes/pkg/util/net/sets" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/volume" "cloud.google.com/go/compute/metadata" @@ -63,8 +63,9 @@ const ( // AffinityTypeClientIPProto - affinity based on Client IP and port. gceAffinityTypeClientIPProto = "CLIENT_IP_PROTO" - operationPollInterval = 3 * time.Second - operationPollTimeoutDuration = 30 * time.Minute + operationPollInterval = 3 * time.Second + // Creating Route in very large clusters, may take more than half an hour. + operationPollTimeoutDuration = time.Hour // Each page can have 500 results, but we cap how many pages // are iterated through to prevent infinite loops if the API @@ -527,12 +528,12 @@ func (gce *GCECloud) waitForZoneOp(op *compute.Operation, zone string) error { } // GetLoadBalancer is an implementation of LoadBalancer.GetLoadBalancer -func (gce *GCECloud) GetLoadBalancer(clusterName string, service *api.Service) (*api.LoadBalancerStatus, bool, error) { +func (gce *GCECloud) GetLoadBalancer(clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) { loadBalancerName := cloudprovider.GetLoadBalancerName(service) fwd, err := gce.service.ForwardingRules.Get(gce.projectID, gce.region, loadBalancerName).Do() if err == nil { - status := &api.LoadBalancerStatus{} - status.Ingress = []api.LoadBalancerIngress{{IP: fwd.IPAddress}} + status := &v1.LoadBalancerStatus{} + status.Ingress = []v1.LoadBalancerIngress{{IP: fwd.IPAddress}} return status, true, nil } @@ -547,6 +548,14 @@ func isHTTPErrorCode(err error, code int) bool { return ok && apiErr.Code == code } +func nodeNames(nodes []*v1.Node) []string { + ret := make([]string, len(nodes)) + for i, node := range nodes { + ret[i] = node.Name + } + return ret +} + // EnsureLoadBalancer is an implementation of LoadBalancer.EnsureLoadBalancer. // Our load balancers in GCE consist of four separate GCE resources - a static // IP address, a firewall rule, a target pool, and a forwarding rule. This @@ -554,11 +563,12 @@ func isHTTPErrorCode(err error, code int) bool { // Due to an interesting series of design decisions, this handles both creating // new load balancers and updating existing load balancers, recognizing when // each is needed. -func (gce *GCECloud) EnsureLoadBalancer(clusterName string, apiService *api.Service, hostNames []string) (*api.LoadBalancerStatus, error) { - if len(hostNames) == 0 { +func (gce *GCECloud) EnsureLoadBalancer(clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { + if len(nodes) == 0 { return nil, fmt.Errorf("Cannot EnsureLoadBalancer() with no hosts") } + hostNames := nodeNames(nodes) hosts, err := gce.getInstancesByNames(hostNames) if err != nil { return nil, err @@ -832,8 +842,8 @@ func (gce *GCECloud) EnsureLoadBalancer(clusterName string, apiService *api.Serv glog.Infof("EnsureLoadBalancer(%v(%v)): created forwarding rule, IP %s", loadBalancerName, serviceName, ipAddress) } - status := &api.LoadBalancerStatus{} - status.Ingress = []api.LoadBalancerIngress{{IP: ipAddress}} + status := &v1.LoadBalancerStatus{} + status.Ingress = []v1.LoadBalancerIngress{{IP: ipAddress}} return status, nil } @@ -889,7 +899,7 @@ func (gce *GCECloud) ensureHttpHealthCheck(name, path string, port int32) (hc *c // IP is being requested. // Returns whether the forwarding rule exists, whether it needs to be updated, // what its IP address is (if it exists), and any error we encountered. -func (gce *GCECloud) forwardingRuleNeedsUpdate(name, region string, loadBalancerIP string, ports []api.ServicePort) (exists bool, needsUpdate bool, ipAddress string, err error) { +func (gce *GCECloud) forwardingRuleNeedsUpdate(name, region string, loadBalancerIP string, ports []v1.ServicePort) (exists bool, needsUpdate bool, ipAddress string, err error) { fwd, err := gce.service.ForwardingRules.Get(gce.projectID, region, name).Do() if err != nil { if isHTTPErrorCode(err, http.StatusNotFound) { @@ -926,13 +936,13 @@ func (gce *GCECloud) forwardingRuleNeedsUpdate(name, region string, loadBalancer return true, false, fwd.IPAddress, nil } -func loadBalancerPortRange(ports []api.ServicePort) (string, error) { +func loadBalancerPortRange(ports []v1.ServicePort) (string, error) { if len(ports) == 0 { return "", fmt.Errorf("no ports specified for GCE load balancer") } // The service controller verified all the protocols match on the ports, just check and use the first one - if ports[0].Protocol != api.ProtocolTCP && ports[0].Protocol != api.ProtocolUDP { + if ports[0].Protocol != v1.ProtocolTCP && ports[0].Protocol != v1.ProtocolUDP { return "", fmt.Errorf("Invalid protocol %s, only TCP and UDP are supported", string(ports[0].Protocol)) } @@ -951,7 +961,7 @@ func loadBalancerPortRange(ports []api.ServicePort) (string, error) { // Doesn't check whether the hosts have changed, since host updating is handled // separately. -func (gce *GCECloud) targetPoolNeedsUpdate(name, region string, affinityType api.ServiceAffinity) (exists bool, needsUpdate bool, err error) { +func (gce *GCECloud) targetPoolNeedsUpdate(name, region string, affinityType v1.ServiceAffinity) (exists bool, needsUpdate bool, err error) { tp, err := gce.service.TargetPools.Get(gce.projectID, region, name).Do() if err != nil { if isHTTPErrorCode(err, http.StatusNotFound) { @@ -977,11 +987,11 @@ func (gce *GCECloud) targetPoolNeedsUpdate(name, region string, affinityType api } // translate from what K8s supports to what the cloud provider supports for session affinity. -func translateAffinityType(affinityType api.ServiceAffinity) string { +func translateAffinityType(affinityType v1.ServiceAffinity) string { switch affinityType { - case api.ServiceAffinityClientIP: + case v1.ServiceAffinityClientIP: return gceAffinityTypeClientIP - case api.ServiceAffinityNone: + case v1.ServiceAffinityNone: return gceAffinityTypeNone default: glog.Errorf("Unexpected affinity type: %v", affinityType) @@ -989,7 +999,7 @@ func translateAffinityType(affinityType api.ServiceAffinity) string { } } -func (gce *GCECloud) firewallNeedsUpdate(name, serviceName, region, ipAddress string, ports []api.ServicePort, sourceRanges netsets.IPNet) (exists bool, needsUpdate bool, err error) { +func (gce *GCECloud) firewallNeedsUpdate(name, serviceName, region, ipAddress string, ports []v1.ServicePort, sourceRanges netsets.IPNet) (exists bool, needsUpdate bool, err error) { fw, err := gce.service.Firewalls.Get(gce.projectID, makeFirewallName(name)).Do() if err != nil { if isHTTPErrorCode(err, http.StatusNotFound) { @@ -1050,7 +1060,7 @@ func slicesEqual(x, y []string) bool { return true } -func (gce *GCECloud) createForwardingRule(name, serviceName, region, ipAddress string, ports []api.ServicePort) error { +func (gce *GCECloud) createForwardingRule(name, serviceName, region, ipAddress string, ports []v1.ServicePort) error { portRange, err := loadBalancerPortRange(ports) if err != nil { return err @@ -1077,7 +1087,7 @@ func (gce *GCECloud) createForwardingRule(name, serviceName, region, ipAddress s return nil } -func (gce *GCECloud) createTargetPool(name, serviceName, region string, hosts []*gceInstance, affinityType api.ServiceAffinity, hc *compute.HttpHealthCheck) error { +func (gce *GCECloud) createTargetPool(name, serviceName, region string, hosts []*gceInstance, affinityType v1.ServiceAffinity, hc *compute.HttpHealthCheck) error { var instances []string for _, host := range hosts { instances = append(instances, makeHostURL(gce.projectID, host.Zone, host.Name)) @@ -1114,7 +1124,7 @@ func (gce *GCECloud) createTargetPool(name, serviceName, region string, hosts [] return nil } -func (gce *GCECloud) createFirewall(name, region, desc string, sourceRanges netsets.IPNet, ports []api.ServicePort, hosts []*gceInstance) error { +func (gce *GCECloud) createFirewall(name, region, desc string, sourceRanges netsets.IPNet, ports []v1.ServicePort, hosts []*gceInstance) error { firewall, err := gce.firewallObject(name, region, desc, sourceRanges, ports, hosts) if err != nil { return err @@ -1132,7 +1142,7 @@ func (gce *GCECloud) createFirewall(name, region, desc string, sourceRanges nets return nil } -func (gce *GCECloud) updateFirewall(name, region, desc string, sourceRanges netsets.IPNet, ports []api.ServicePort, hosts []*gceInstance) error { +func (gce *GCECloud) updateFirewall(name, region, desc string, sourceRanges netsets.IPNet, ports []v1.ServicePort, hosts []*gceInstance) error { firewall, err := gce.firewallObject(name, region, desc, sourceRanges, ports, hosts) if err != nil { return err @@ -1150,7 +1160,7 @@ func (gce *GCECloud) updateFirewall(name, region, desc string, sourceRanges nets return nil } -func (gce *GCECloud) firewallObject(name, region, desc string, sourceRanges netsets.IPNet, ports []api.ServicePort, hosts []*gceInstance) (*compute.Firewall, error) { +func (gce *GCECloud) firewallObject(name, region, desc string, sourceRanges netsets.IPNet, ports []v1.ServicePort, hosts []*gceInstance) (*compute.Firewall, error) { allowedPorts := make([]string, len(ports)) for ix := range ports { allowedPorts[ix] = strconv.Itoa(int(ports[ix].Port)) @@ -1331,8 +1341,8 @@ func (gce *GCECloud) ensureStaticIP(name, serviceName, region, existingIP string } // UpdateLoadBalancer is an implementation of LoadBalancer.UpdateLoadBalancer. -func (gce *GCECloud) UpdateLoadBalancer(clusterName string, service *api.Service, hostNames []string) error { - hosts, err := gce.getInstancesByNames(hostNames) +func (gce *GCECloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error { + hosts, err := gce.getInstancesByNames(nodeNames(nodes)) if err != nil { return err } @@ -1402,7 +1412,7 @@ func (gce *GCECloud) updateTargetPool(loadBalancerName string, existing sets.Str } // EnsureLoadBalancerDeleted is an implementation of LoadBalancer.EnsureLoadBalancerDeleted. -func (gce *GCECloud) EnsureLoadBalancerDeleted(clusterName string, service *api.Service) error { +func (gce *GCECloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error { loadBalancerName := cloudprovider.GetLoadBalancerName(service) glog.V(2).Infof("EnsureLoadBalancerDeleted(%v, %v, %v, %v, %v)", clusterName, service.Namespace, service.Name, loadBalancerName, gce.region) @@ -1555,15 +1565,15 @@ func (gce *GCECloud) CreateFirewall(name, desc string, sourceRanges netsets.IPNe return err } // TODO: This completely breaks modularity in the cloudprovider but the methods - // shared with the TCPLoadBalancer take api.ServicePorts. - svcPorts := []api.ServicePort{} + // shared with the TCPLoadBalancer take v1.ServicePorts. + svcPorts := []v1.ServicePort{} // TODO: Currently the only consumer of this method is the GCE L7 // loadbalancer controller, which never needs a protocol other than TCP. // We should pipe through a mapping of port:protocol and default to TCP // if UDP ports are required. This means the method signature will change // forcing downstream clients to refactor interfaces. for _, p := range ports { - svcPorts = append(svcPorts, api.ServicePort{Port: int32(p), Protocol: api.ProtocolTCP}) + svcPorts = append(svcPorts, v1.ServicePort{Port: int32(p), Protocol: v1.ProtocolTCP}) } hosts, err := gce.getInstancesByNames(hostNames) if err != nil { @@ -1589,15 +1599,15 @@ func (gce *GCECloud) UpdateFirewall(name, desc string, sourceRanges netsets.IPNe return err } // TODO: This completely breaks modularity in the cloudprovider but the methods - // shared with the TCPLoadBalancer take api.ServicePorts. - svcPorts := []api.ServicePort{} + // shared with the TCPLoadBalancer take v1.ServicePorts. + svcPorts := []v1.ServicePort{} // TODO: Currently the only consumer of this method is the GCE L7 // loadbalancer controller, which never needs a protocol other than TCP. // We should pipe through a mapping of port:protocol and default to TCP // if UDP ports are required. This means the method signature will change, // forcing downstream clients to refactor interfaces. for _, p := range ports { - svcPorts = append(svcPorts, api.ServicePort{Port: int32(p), Protocol: api.ProtocolTCP}) + svcPorts = append(svcPorts, v1.ServicePort{Port: int32(p), Protocol: v1.ProtocolTCP}) } hosts, err := gce.getInstancesByNames(hostNames) if err != nil { @@ -2171,7 +2181,7 @@ func (gce *GCECloud) AddSSHKeyToAllInstances(user string, keyData []byte) error } // NodeAddresses is an implementation of Instances.NodeAddresses. -func (gce *GCECloud) NodeAddresses(_ types.NodeName) ([]api.NodeAddress, error) { +func (gce *GCECloud) NodeAddresses(_ types.NodeName) ([]v1.NodeAddress, error) { internalIP, err := metadata.Get("instance/network-interfaces/0/ip") if err != nil { return nil, fmt.Errorf("couldn't get internal IP: %v", err) @@ -2180,9 +2190,9 @@ func (gce *GCECloud) NodeAddresses(_ types.NodeName) ([]api.NodeAddress, error) if err != nil { return nil, fmt.Errorf("couldn't get external IP: %v", err) } - return []api.NodeAddress{ - {Type: api.NodeInternalIP, Address: internalIP}, - {Type: api.NodeExternalIP, Address: externalIP}, + return []v1.NodeAddress{ + {Type: v1.NodeInternalIP, Address: internalIP}, + {Type: v1.NodeExternalIP, Address: externalIP}, }, nil } @@ -2268,37 +2278,6 @@ func (gce *GCECloud) InstanceType(nodeName types.NodeName) (string, error) { return instance.Type, nil } -// List is an implementation of Instances.List. -func (gce *GCECloud) List(filter string) ([]types.NodeName, error) { - var instances []types.NodeName - // TODO: Parallelize, although O(zones) so not too bad (N <= 3 typically) - for _, zone := range gce.managedZones { - pageToken := "" - page := 0 - for ; page == 0 || (pageToken != "" && page < maxPages); page++ { - listCall := gce.service.Instances.List(gce.projectID, zone) - if len(filter) > 0 { - listCall = listCall.Filter("name eq " + filter) - } - if pageToken != "" { - listCall = listCall.PageToken(pageToken) - } - res, err := listCall.Do() - if err != nil { - return nil, err - } - pageToken = res.NextPageToken - for _, instance := range res.Items { - instances = append(instances, mapInstanceToNodeName(instance)) - } - } - if page >= maxPages { - glog.Errorf("List exceeded maxPages=%d for Instances.List: truncating.", maxPages) - } - } - return instances, nil -} - // GetAllZones returns all the zones in which nodes are running func (gce *GCECloud) GetAllZones() (sets.String, error) { // Fast-path for non-multizone @@ -2421,7 +2400,12 @@ func (gce *GCECloud) CreateRoute(clusterName string, nameHint string, route *clo Description: k8sNodeRouteTag, }).Do() if err != nil { - return err + if isHTTPErrorCode(err, http.StatusConflict) { + glog.Info("Route %v already exists.") + return nil + } else { + return err + } } return gce.waitForGlobalOp(insertOp) } @@ -2500,7 +2484,12 @@ func (gce *GCECloud) CreateDisk(name string, diskType string, zone string, sizeG return err } - return gce.waitForZoneOp(createOp, zone) + err = gce.waitForZoneOp(createOp, zone) + if isGCEError(err, "alreadyExists") { + glog.Warningf("GCE PD %q already exists, reusing", name) + return nil + } + return err } func (gce *GCECloud) doDeleteDisk(diskToDelete string) error { @@ -2522,6 +2511,10 @@ func (gce *GCECloud) DeleteDisk(diskToDelete string) error { if isGCEError(err, "resourceInUseByAnotherResource") { return volume.NewDeletedVolumeInUseError(err.Error()) } + + if err == cloudprovider.DiskNotFound { + return nil + } return err } @@ -2582,8 +2575,8 @@ func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]st } labels := make(map[string]string) - labels[unversioned.LabelZoneFailureDomain] = zone - labels[unversioned.LabelZoneRegion] = region + labels[metav1.LabelZoneFailureDomain] = zone + labels[metav1.LabelZoneRegion] = region return labels, nil } @@ -2604,7 +2597,7 @@ func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOn } attachedDisk := gce.convertDiskToAttachedDisk(disk, readWrite) - attachOp, err := gce.service.Instances.AttachDisk(gce.projectID, disk.Zone, instanceName, attachedDisk).Do() + attachOp, err := gce.service.Instances.AttachDisk(gce.projectID, disk.Zone, instance.Name, attachedDisk).Do() if err != nil { return err } @@ -2723,6 +2716,7 @@ func (gce *GCECloud) getDiskByName(diskName string, zone string) (*gceDisk, erro // Scans all managed zones to return the GCE PD // Prefer getDiskByName, if the zone can be established +// Return cloudprovider.DiskNotFound if the given disk cannot be found in any zone func (gce *GCECloud) getDiskByNameUnknownZone(diskName string) (*gceDisk, error) { // Note: this is the gotcha right now with GCE PD support: // disk names are not unique per-region. @@ -2753,7 +2747,8 @@ func (gce *GCECloud) getDiskByNameUnknownZone(diskName string) (*gceDisk, error) if found != nil { return found, nil } - return nil, fmt.Errorf("GCE persistent disk %q not found in managed zones (%s)", diskName, strings.Join(gce.managedZones, ",")) + glog.Warningf("GCE persistent disk %q not found in managed zones (%s)", diskName, strings.Join(gce.managedZones, ",")) + return nil, cloudprovider.DiskNotFound } // GetGCERegion returns region of the gce zone. Zone names @@ -2905,14 +2900,13 @@ func (gce *GCECloud) getInstancesByNames(names []string) ([]*gceInstance, error) // Gets the named instance, returning cloudprovider.InstanceNotFound if the instance is not found func (gce *GCECloud) getInstanceByName(name string) (*gceInstance, error) { // Avoid changing behaviour when not managing multiple zones - if len(gce.managedZones) == 1 { + for _, zone := range gce.managedZones { name = canonicalizeInstanceName(name) - zone := gce.managedZones[0] res, err := gce.service.Instances.Get(gce.projectID, zone, name).Do() if err != nil { - glog.Errorf("getInstanceByName/single-zone: failed to get instance %s; err: %v", name, err) + glog.Errorf("getInstanceByName: failed to get instance %s; err: %v", name, err) if isHTTPErrorCode(err, http.StatusNotFound) { - return nil, cloudprovider.InstanceNotFound + continue } return nil, err } @@ -2925,16 +2919,7 @@ func (gce *GCECloud) getInstanceByName(name string) (*gceInstance, error) { }, nil } - instances, err := gce.getInstancesByNames([]string{name}) - if err != nil { - glog.Errorf("getInstanceByName/multiple-zones: failed to get instance %s; err: %v", name, err) - return nil, err - } - if len(instances) != 1 || instances[0] == nil { - // getInstancesByNames not obeying its contract - return nil, fmt.Errorf("unexpected return value from getInstancesByNames: %v", instances) - } - return instances[0], nil + return nil, cloudprovider.InstanceNotFound } // Returns the last component of a URL, i.e. anything after the last slash diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/token_source.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/token_source.go index a30cae577c..2f4bb09119 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/token_source.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/token_source.go @@ -22,7 +22,7 @@ import ( "strings" "time" - "k8s.io/kubernetes/pkg/util/flowcontrol" + "k8s.io/client-go/util/flowcontrol" "github.com/prometheus/client_golang/prometheus" "golang.org/x/oauth2" diff --git a/vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions b/vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions deleted file mode 100644 index c45988ec9b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions +++ /dev/null @@ -1,18 +0,0 @@ -{ - "Rules": [ - { - "SelectorRegexp": "k8s[.]io/kubernetes/pkg/client/unversioned$", - "ForbiddenPrefixes": [ - "k8s.io/kubernetes/pkg/client/unversioned" - ] - }, - { - "SelectorRegexp": "k8s[.]io/kubernetes/pkg/client/unversioned/testclient$", - "ForbiddenPrefixes": [ - "k8s.io/kubernetes/pkg/client/unversioned/testclient" - ] - } - - ] -} - diff --git a/vendor/k8s.io/kubernetes/pkg/controller/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/BUILD deleted file mode 100644 index 26d93d511f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/BUILD +++ /dev/null @@ -1,70 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "client_builder.go", - "controller_ref_manager.go", - "controller_utils.go", - "doc.go", - "lookup_cache.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/validation:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/client/cache:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//pkg/client/record:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/fields:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/serviceaccount:go_default_library", - "//pkg/util/clock:go_default_library", - "//pkg/util/hash:go_default_library", - "//pkg/util/integer:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/watch:go_default_library", - "//vendor:github.com/golang/glog", - "//vendor:github.com/golang/groupcache/lru", - ], -) - -go_test( - name = "go_default_test", - srcs = ["controller_utils_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/client/cache:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/client/record:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/securitycontext:go_default_library", - "//pkg/util/clock:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/util/testing:go_default_library", - "//pkg/util/uuid:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/OWNERS b/vendor/k8s.io/kubernetes/pkg/controller/OWNERS deleted file mode 100644 index ac83016025..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -assignees: - - bprashanth - - derekwaynecarr - - mikedanese diff --git a/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go b/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go deleted file mode 100644 index 88f9c97c1e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "fmt" - "time" - - "k8s.io/kubernetes/pkg/api" - apierrors "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/client/cache" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/serviceaccount" - "k8s.io/kubernetes/pkg/watch" - - "github.com/golang/glog" -) - -// ControllerClientBuilder allow syou to get clients and configs for controllers -type ControllerClientBuilder interface { - Config(name string) (*restclient.Config, error) - Client(name string) (clientset.Interface, error) - ClientOrDie(name string) clientset.Interface -} - -// SimpleControllerClientBuilder returns a fixed client with different user agents -type SimpleControllerClientBuilder struct { - // ClientConfig is a skeleton config to clone and use as the basis for each controller client - ClientConfig *restclient.Config -} - -func (b SimpleControllerClientBuilder) Config(name string) (*restclient.Config, error) { - clientConfig := *b.ClientConfig - return &clientConfig, nil -} - -func (b SimpleControllerClientBuilder) Client(name string) (clientset.Interface, error) { - clientConfig, err := b.Config(name) - if err != nil { - return nil, err - } - return clientset.NewForConfig(restclient.AddUserAgent(clientConfig, name)) -} - -func (b SimpleControllerClientBuilder) ClientOrDie(name string) clientset.Interface { - client, err := b.Client(name) - if err != nil { - glog.Fatal(err) - } - return client -} - -// SAControllerClientBuilder is a ControllerClientBuilder that returns clients identifying as -// service accounts -type SAControllerClientBuilder struct { - // ClientConfig is a skeleton config to clone and use as the basis for each controller client - ClientConfig *restclient.Config - - // CoreClient is used to provision service accounts if needed and watch for their associated tokens - // to construct a controller client - CoreClient unversionedcore.CoreInterface - - // Namespace is the namespace used to host the service accounts that will back the - // controllers. It must be highly privileged namespace which normal users cannot inspect. - Namespace string -} - -// config returns a complete clientConfig for constructing clients. This is separate in anticipation of composition -// which means that not all clientsets are known here -func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, error) { - clientConfig := restclient.AnonymousClientConfig(b.ClientConfig) - - // we need the SA UID to find a matching SA token - sa, err := b.CoreClient.ServiceAccounts(b.Namespace).Get(name) - if err != nil && !apierrors.IsNotFound(err) { - return nil, err - } else if apierrors.IsNotFound(err) { - // check to see if the namespace exists. If it isn't a NotFound, just try to create the SA. - // It'll probably fail, but perhaps that will have a better message. - if _, err := b.CoreClient.Namespaces().Get(b.Namespace); apierrors.IsNotFound(err) { - _, err = b.CoreClient.Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: b.Namespace}}) - if err != nil && !apierrors.IsAlreadyExists(err) { - return nil, err - } - } - - sa, err = b.CoreClient.ServiceAccounts(b.Namespace).Create( - &api.ServiceAccount{ObjectMeta: api.ObjectMeta{Namespace: b.Namespace, Name: name}}) - if err != nil { - return nil, err - } - } - - lw := &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)}) - return b.CoreClient.Secrets(b.Namespace).List(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)}) - return b.CoreClient.Secrets(b.Namespace).Watch(options) - }, - } - _, err = cache.ListWatchUntil(30*time.Second, lw, - func(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, nil - case watch.Error: - return false, fmt.Errorf("error watching") - - case watch.Added, watch.Modified: - secret := event.Object.(*api.Secret) - if !serviceaccount.IsServiceAccountToken(secret, sa) || - len(secret.Data[api.ServiceAccountTokenKey]) == 0 { - return false, nil - } - // TODO maybe verify the token is valid - clientConfig.BearerToken = string(secret.Data[api.ServiceAccountTokenKey]) - restclient.AddUserAgent(clientConfig, serviceaccount.MakeUsername(b.Namespace, name)) - return true, nil - - default: - return false, fmt.Errorf("unexpected event type: %v", event.Type) - } - }) - if err != nil { - return nil, fmt.Errorf("unable to get token for service account: %v", err) - } - - return clientConfig, nil -} - -func (b SAControllerClientBuilder) Client(name string) (clientset.Interface, error) { - clientConfig, err := b.Config(name) - if err != nil { - return nil, err - } - return clientset.NewForConfig(clientConfig) -} - -func (b SAControllerClientBuilder) ClientOrDie(name string) clientset.Interface { - client, err := b.Client(name) - if err != nil { - glog.Fatal(err) - } - return client -} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go b/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go deleted file mode 100644 index 52a8667f20..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go +++ /dev/null @@ -1,144 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "fmt" - "strings" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/labels" -) - -type PodControllerRefManager struct { - podControl PodControlInterface - controllerObject api.ObjectMeta - controllerSelector labels.Selector - controllerKind unversioned.GroupVersionKind -} - -// NewPodControllerRefManager returns a PodControllerRefManager that exposes -// methods to manage the controllerRef of pods. -func NewPodControllerRefManager( - podControl PodControlInterface, - controllerObject api.ObjectMeta, - controllerSelector labels.Selector, - controllerKind unversioned.GroupVersionKind, -) *PodControllerRefManager { - return &PodControllerRefManager{podControl, controllerObject, controllerSelector, controllerKind} -} - -// Classify first filters out inactive pods, then it classify the remaining pods -// into three categories: 1. matchesAndControlled are the pods whose labels -// match the selector of the RC, and have a controllerRef pointing to the -// controller 2. matchesNeedsController are the pods whose labels match the RC, -// but don't have a controllerRef. (Pods with matching labels but with a -// controllerRef pointing to other object are ignored) 3. controlledDoesNotMatch -// are the pods that have a controllerRef pointing to the controller, but their -// labels no longer match the selector. -func (m *PodControllerRefManager) Classify(pods []*api.Pod) ( - matchesAndControlled []*api.Pod, - matchesNeedsController []*api.Pod, - controlledDoesNotMatch []*api.Pod) { - for i := range pods { - pod := pods[i] - if !IsPodActive(pod) { - glog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v", - pod.Namespace, pod.Name, pod.Status.Phase, pod.DeletionTimestamp) - continue - } - controllerRef := getControllerOf(pod.ObjectMeta) - if controllerRef != nil { - if controllerRef.UID == m.controllerObject.UID { - // already controlled - if m.controllerSelector.Matches(labels.Set(pod.Labels)) { - matchesAndControlled = append(matchesAndControlled, pod) - } else { - controlledDoesNotMatch = append(controlledDoesNotMatch, pod) - } - } else { - // ignoring the pod controlled by other controller - glog.V(4).Infof("Ignoring pod %v/%v, it's owned by [%s/%s, name: %s, uid: %s]", - pod.Namespace, pod.Name, controllerRef.APIVersion, controllerRef.Kind, controllerRef.Name, controllerRef.UID) - continue - } - } else { - if !m.controllerSelector.Matches(labels.Set(pod.Labels)) { - continue - } - matchesNeedsController = append(matchesNeedsController, pod) - } - } - return matchesAndControlled, matchesNeedsController, controlledDoesNotMatch -} - -// getControllerOf returns the controllerRef if controllee has a controller, -// otherwise returns nil. -func getControllerOf(controllee api.ObjectMeta) *api.OwnerReference { - for _, owner := range controllee.OwnerReferences { - // controlled by other controller - if owner.Controller != nil && *owner.Controller == true { - return &owner - } - } - return nil -} - -// AdoptPod sends a patch to take control of the pod. It returns the error if -// the patching fails. -func (m *PodControllerRefManager) AdoptPod(pod *api.Pod) error { - // we should not adopt any pods if the controller is about to be deleted - if m.controllerObject.DeletionTimestamp != nil { - return fmt.Errorf("cancel the adopt attempt for pod %s because the controlller is being deleted", - strings.Join([]string{pod.Namespace, pod.Name, string(pod.UID)}, "_")) - } - addControllerPatch := fmt.Sprintf( - `{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true}],"uid":"%s"}}`, - m.controllerKind.GroupVersion(), m.controllerKind.Kind, - m.controllerObject.Name, m.controllerObject.UID, pod.UID) - return m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(addControllerPatch)) -} - -// ReleasePod sends a patch to free the pod from the control of the controller. -// It returns the error if the patching fails. 404 and 422 errors are ignored. -func (m *PodControllerRefManager) ReleasePod(pod *api.Pod) error { - glog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s", - pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.controllerObject.Name) - deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.controllerObject.UID, pod.UID) - err := m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(deleteOwnerRefPatch)) - if err != nil { - if errors.IsNotFound(err) { - // If the pod no longer exists, ignore it. - return nil - } - if errors.IsInvalid(err) { - // Invalid error will be returned in two cases: 1. the pod - // has no owner reference, 2. the uid of the pod doesn't - // match, which means the pod is deleted and then recreated. - // In both cases, the error can be ignored. - - // TODO: If the pod has owner references, but none of them - // has the owner.UID, server will silently ignore the patch. - // Investigate why. - return nil - } - } - return err -} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go deleted file mode 100644 index 11f871a8b0..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go +++ /dev/null @@ -1,799 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/cache" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/client/record" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/clock" - "k8s.io/kubernetes/pkg/util/integer" - "k8s.io/kubernetes/pkg/util/sets" -) - -const ( - // If a watch drops a delete event for a pod, it'll take this long - // before a dormant controller waiting for those packets is woken up anyway. It is - // specifically targeted at the case where some problem prevents an update - // of expectations, without it the controller could stay asleep forever. This should - // be set based on the expected latency of watch events. - // - // Currently a controller can service (create *and* observe the watch events for said - // creation) about 10 pods a second, so it takes about 1 min to service - // 500 pods. Just creation is limited to 20qps, and watching happens with ~10-30s - // latency/pod at the scale of 3000 pods over 100 nodes. - ExpectationsTimeout = 5 * time.Minute -) - -var ( - KeyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc -) - -type ResyncPeriodFunc func() time.Duration - -// Returns 0 for resyncPeriod in case resyncing is not needed. -func NoResyncPeriodFunc() time.Duration { - return 0 -} - -// StaticResyncPeriodFunc returns the resync period specified -func StaticResyncPeriodFunc(resyncPeriod time.Duration) ResyncPeriodFunc { - return func() time.Duration { - return resyncPeriod - } -} - -// Expectations are a way for controllers to tell the controller manager what they expect. eg: -// ControllerExpectations: { -// controller1: expects 2 adds in 2 minutes -// controller2: expects 2 dels in 2 minutes -// controller3: expects -1 adds in 2 minutes => controller3's expectations have already been met -// } -// -// Implementation: -// ControlleeExpectation = pair of atomic counters to track controllee's creation/deletion -// ControllerExpectationsStore = TTLStore + a ControlleeExpectation per controller -// -// * Once set expectations can only be lowered -// * A controller isn't synced till its expectations are either fulfilled, or expire -// * Controllers that don't set expectations will get woken up for every matching controllee - -// ExpKeyFunc to parse out the key from a ControlleeExpectation -var ExpKeyFunc = func(obj interface{}) (string, error) { - if e, ok := obj.(*ControlleeExpectations); ok { - return e.key, nil - } - return "", fmt.Errorf("Could not find key for obj %#v", obj) -} - -// ControllerExpectationsInterface is an interface that allows users to set and wait on expectations. -// Only abstracted out for testing. -// Warning: if using KeyFunc it is not safe to use a single ControllerExpectationsInterface with different -// types of controllers, because the keys might conflict across types. -type ControllerExpectationsInterface interface { - GetExpectations(controllerKey string) (*ControlleeExpectations, bool, error) - SatisfiedExpectations(controllerKey string) bool - DeleteExpectations(controllerKey string) - SetExpectations(controllerKey string, add, del int) error - ExpectCreations(controllerKey string, adds int) error - ExpectDeletions(controllerKey string, dels int) error - CreationObserved(controllerKey string) - DeletionObserved(controllerKey string) - RaiseExpectations(controllerKey string, add, del int) - LowerExpectations(controllerKey string, add, del int) -} - -// ControllerExpectations is a cache mapping controllers to what they expect to see before being woken up for a sync. -type ControllerExpectations struct { - cache.Store -} - -// GetExpectations returns the ControlleeExpectations of the given controller. -func (r *ControllerExpectations) GetExpectations(controllerKey string) (*ControlleeExpectations, bool, error) { - if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists { - return exp.(*ControlleeExpectations), true, nil - } else { - return nil, false, err - } -} - -// DeleteExpectations deletes the expectations of the given controller from the TTLStore. -func (r *ControllerExpectations) DeleteExpectations(controllerKey string) { - if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists { - if err := r.Delete(exp); err != nil { - glog.V(2).Infof("Error deleting expectations for controller %v: %v", controllerKey, err) - } - } -} - -// SatisfiedExpectations returns true if the required adds/dels for the given controller have been observed. -// Add/del counts are established by the controller at sync time, and updated as controllees are observed by the controller -// manager. -func (r *ControllerExpectations) SatisfiedExpectations(controllerKey string) bool { - if exp, exists, err := r.GetExpectations(controllerKey); exists { - if exp.Fulfilled() { - return true - } else if exp.isExpired() { - glog.V(4).Infof("Controller expectations expired %#v", exp) - return true - } else { - glog.V(4).Infof("Controller still waiting on expectations %#v", exp) - return false - } - } else if err != nil { - glog.V(2).Infof("Error encountered while checking expectations %#v, forcing sync", err) - } else { - // When a new controller is created, it doesn't have expectations. - // When it doesn't see expected watch events for > TTL, the expectations expire. - // - In this case it wakes up, creates/deletes controllees, and sets expectations again. - // When it has satisfied expectations and no controllees need to be created/destroyed > TTL, the expectations expire. - // - In this case it continues without setting expectations till it needs to create/delete controllees. - glog.V(4).Infof("Controller %v either never recorded expectations, or the ttl expired.", controllerKey) - } - // Trigger a sync if we either encountered and error (which shouldn't happen since we're - // getting from local store) or this controller hasn't established expectations. - return true -} - -// TODO: Extend ExpirationCache to support explicit expiration. -// TODO: Make this possible to disable in tests. -// TODO: Support injection of clock. -func (exp *ControlleeExpectations) isExpired() bool { - return clock.RealClock{}.Since(exp.timestamp) > ExpectationsTimeout -} - -// SetExpectations registers new expectations for the given controller. Forgets existing expectations. -func (r *ControllerExpectations) SetExpectations(controllerKey string, add, del int) error { - exp := &ControlleeExpectations{add: int64(add), del: int64(del), key: controllerKey, timestamp: clock.RealClock{}.Now()} - glog.V(4).Infof("Setting expectations %#v", exp) - return r.Add(exp) -} - -func (r *ControllerExpectations) ExpectCreations(controllerKey string, adds int) error { - return r.SetExpectations(controllerKey, adds, 0) -} - -func (r *ControllerExpectations) ExpectDeletions(controllerKey string, dels int) error { - return r.SetExpectations(controllerKey, 0, dels) -} - -// Decrements the expectation counts of the given controller. -func (r *ControllerExpectations) LowerExpectations(controllerKey string, add, del int) { - if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists { - exp.Add(int64(-add), int64(-del)) - // The expectations might've been modified since the update on the previous line. - glog.V(4).Infof("Lowered expectations %#v", exp) - } -} - -// Increments the expectation counts of the given controller. -func (r *ControllerExpectations) RaiseExpectations(controllerKey string, add, del int) { - if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists { - exp.Add(int64(add), int64(del)) - // The expectations might've been modified since the update on the previous line. - glog.V(4).Infof("Raised expectations %#v", exp) - } -} - -// CreationObserved atomically decrements the `add` expectation count of the given controller. -func (r *ControllerExpectations) CreationObserved(controllerKey string) { - r.LowerExpectations(controllerKey, 1, 0) -} - -// DeletionObserved atomically decrements the `del` expectation count of the given controller. -func (r *ControllerExpectations) DeletionObserved(controllerKey string) { - r.LowerExpectations(controllerKey, 0, 1) -} - -// Expectations are either fulfilled, or expire naturally. -type Expectations interface { - Fulfilled() bool -} - -// ControlleeExpectations track controllee creates/deletes. -type ControlleeExpectations struct { - // Important: Since these two int64 fields are using sync/atomic, they have to be at the top of the struct due to a bug on 32-bit platforms - // See: https://golang.org/pkg/sync/atomic/ for more information - add int64 - del int64 - key string - timestamp time.Time -} - -// Add increments the add and del counters. -func (e *ControlleeExpectations) Add(add, del int64) { - atomic.AddInt64(&e.add, add) - atomic.AddInt64(&e.del, del) -} - -// Fulfilled returns true if this expectation has been fulfilled. -func (e *ControlleeExpectations) Fulfilled() bool { - // TODO: think about why this line being atomic doesn't matter - return atomic.LoadInt64(&e.add) <= 0 && atomic.LoadInt64(&e.del) <= 0 -} - -// GetExpectations returns the add and del expectations of the controllee. -func (e *ControlleeExpectations) GetExpectations() (int64, int64) { - return atomic.LoadInt64(&e.add), atomic.LoadInt64(&e.del) -} - -// NewControllerExpectations returns a store for ControllerExpectations. -func NewControllerExpectations() *ControllerExpectations { - return &ControllerExpectations{cache.NewStore(ExpKeyFunc)} -} - -// UIDSetKeyFunc to parse out the key from a UIDSet. -var UIDSetKeyFunc = func(obj interface{}) (string, error) { - if u, ok := obj.(*UIDSet); ok { - return u.key, nil - } - return "", fmt.Errorf("Could not find key for obj %#v", obj) -} - -// UIDSet holds a key and a set of UIDs. Used by the -// UIDTrackingControllerExpectations to remember which UID it has seen/still -// waiting for. -type UIDSet struct { - sets.String - key string -} - -// UIDTrackingControllerExpectations tracks the UID of the pods it deletes. -// This cache is needed over plain old expectations to safely handle graceful -// deletion. The desired behavior is to treat an update that sets the -// DeletionTimestamp on an object as a delete. To do so consistenly, one needs -// to remember the expected deletes so they aren't double counted. -// TODO: Track creates as well (#22599) -type UIDTrackingControllerExpectations struct { - ControllerExpectationsInterface - // TODO: There is a much nicer way to do this that involves a single store, - // a lock per entry, and a ControlleeExpectationsInterface type. - uidStoreLock sync.Mutex - // Store used for the UIDs associated with any expectation tracked via the - // ControllerExpectationsInterface. - uidStore cache.Store -} - -// GetUIDs is a convenience method to avoid exposing the set of expected uids. -// The returned set is not thread safe, all modifications must be made holding -// the uidStoreLock. -func (u *UIDTrackingControllerExpectations) GetUIDs(controllerKey string) sets.String { - if uid, exists, err := u.uidStore.GetByKey(controllerKey); err == nil && exists { - return uid.(*UIDSet).String - } - return nil -} - -// ExpectDeletions records expectations for the given deleteKeys, against the given controller. -func (u *UIDTrackingControllerExpectations) ExpectDeletions(rcKey string, deletedKeys []string) error { - u.uidStoreLock.Lock() - defer u.uidStoreLock.Unlock() - - if existing := u.GetUIDs(rcKey); existing != nil && existing.Len() != 0 { - glog.Errorf("Clobbering existing delete keys: %+v", existing) - } - expectedUIDs := sets.NewString() - for _, k := range deletedKeys { - expectedUIDs.Insert(k) - } - glog.V(4).Infof("Controller %v waiting on deletions for: %+v", rcKey, deletedKeys) - if err := u.uidStore.Add(&UIDSet{expectedUIDs, rcKey}); err != nil { - return err - } - return u.ControllerExpectationsInterface.ExpectDeletions(rcKey, expectedUIDs.Len()) -} - -// DeletionObserved records the given deleteKey as a deletion, for the given rc. -func (u *UIDTrackingControllerExpectations) DeletionObserved(rcKey, deleteKey string) { - u.uidStoreLock.Lock() - defer u.uidStoreLock.Unlock() - - uids := u.GetUIDs(rcKey) - if uids != nil && uids.Has(deleteKey) { - glog.V(4).Infof("Controller %v received delete for pod %v", rcKey, deleteKey) - u.ControllerExpectationsInterface.DeletionObserved(rcKey) - uids.Delete(deleteKey) - } -} - -// DeleteExpectations deletes the UID set and invokes DeleteExpectations on the -// underlying ControllerExpectationsInterface. -func (u *UIDTrackingControllerExpectations) DeleteExpectations(rcKey string) { - u.uidStoreLock.Lock() - defer u.uidStoreLock.Unlock() - - u.ControllerExpectationsInterface.DeleteExpectations(rcKey) - if uidExp, exists, err := u.uidStore.GetByKey(rcKey); err == nil && exists { - if err := u.uidStore.Delete(uidExp); err != nil { - glog.V(2).Infof("Error deleting uid expectations for controller %v: %v", rcKey, err) - } - } -} - -// NewUIDTrackingControllerExpectations returns a wrapper around -// ControllerExpectations that is aware of deleteKeys. -func NewUIDTrackingControllerExpectations(ce ControllerExpectationsInterface) *UIDTrackingControllerExpectations { - return &UIDTrackingControllerExpectations{ControllerExpectationsInterface: ce, uidStore: cache.NewStore(UIDSetKeyFunc)} -} - -// Reasons for pod events -const ( - // FailedCreatePodReason is added in an event and in a replica set condition - // when a pod for a replica set is failed to be created. - FailedCreatePodReason = "FailedCreate" - // SuccessfulCreatePodReason is added in an event when a pod for a replica set - // is successfully created. - SuccessfulCreatePodReason = "SuccessfulCreate" - // FailedDeletePodReason is added in an event and in a replica set condition - // when a pod for a replica set is failed to be deleted. - FailedDeletePodReason = "FailedDelete" - // SuccessfulDeletePodReason is added in an event when a pod for a replica set - // is successfully deleted. - SuccessfulDeletePodReason = "SuccessfulDelete" -) - -// PodControlInterface is an interface that knows how to add or delete pods -// created as an interface to allow testing. -type PodControlInterface interface { - // CreatePods creates new pods according to the spec. - CreatePods(namespace string, template *api.PodTemplateSpec, object runtime.Object) error - // CreatePodsOnNode creates a new pod according to the spec on the specified node. - CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error - // CreatePodsWithControllerRef creates new pods according to the spec, and sets object as the pod's controller. - CreatePodsWithControllerRef(namespace string, template *api.PodTemplateSpec, object runtime.Object, controllerRef *api.OwnerReference) error - // DeletePod deletes the pod identified by podID. - DeletePod(namespace string, podID string, object runtime.Object) error - // PatchPod patches the pod. - PatchPod(namespace, name string, data []byte) error -} - -// RealPodControl is the default implementation of PodControlInterface. -type RealPodControl struct { - KubeClient clientset.Interface - Recorder record.EventRecorder -} - -var _ PodControlInterface = &RealPodControl{} - -func getPodsLabelSet(template *api.PodTemplateSpec) labels.Set { - desiredLabels := make(labels.Set) - for k, v := range template.Labels { - desiredLabels[k] = v - } - return desiredLabels -} - -func getPodsFinalizers(template *api.PodTemplateSpec) []string { - desiredFinalizers := make([]string, len(template.Finalizers)) - copy(desiredFinalizers, template.Finalizers) - return desiredFinalizers -} - -func getPodsAnnotationSet(template *api.PodTemplateSpec, object runtime.Object) (labels.Set, error) { - desiredAnnotations := make(labels.Set) - for k, v := range template.Annotations { - desiredAnnotations[k] = v - } - createdByRef, err := api.GetReference(object) - if err != nil { - return desiredAnnotations, fmt.Errorf("unable to get controller reference: %v", err) - } - - // TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients - // would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment. - // We need to consistently handle this case of annotation versioning. - codec := api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: api.GroupName, Version: "v1"}) - - createdByRefJson, err := runtime.Encode(codec, &api.SerializedReference{ - Reference: *createdByRef, - }) - if err != nil { - return desiredAnnotations, fmt.Errorf("unable to serialize controller reference: %v", err) - } - desiredAnnotations[api.CreatedByAnnotation] = string(createdByRefJson) - return desiredAnnotations, nil -} - -func getPodsPrefix(controllerName string) string { - // use the dash (if the name isn't too long) to make the pod name a bit prettier - prefix := fmt.Sprintf("%s-", controllerName) - if len(validation.ValidatePodName(prefix, true)) != 0 { - prefix = controllerName - } - return prefix -} - -func (r RealPodControl) CreatePods(namespace string, template *api.PodTemplateSpec, object runtime.Object) error { - return r.createPods("", namespace, template, object, nil) -} - -func (r RealPodControl) CreatePodsWithControllerRef(namespace string, template *api.PodTemplateSpec, controllerObject runtime.Object, controllerRef *api.OwnerReference) error { - if controllerRef == nil { - return fmt.Errorf("controllerRef is nil") - } - if len(controllerRef.APIVersion) == 0 { - return fmt.Errorf("controllerRef has empty APIVersion") - } - if len(controllerRef.Kind) == 0 { - return fmt.Errorf("controllerRef has empty Kind") - } - if controllerRef.Controller == nil || *controllerRef.Controller != true { - return fmt.Errorf("controllerRef.Controller is not set") - } - return r.createPods("", namespace, template, controllerObject, controllerRef) -} - -func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error { - return r.createPods(nodeName, namespace, template, object, nil) -} - -func (r RealPodControl) PatchPod(namespace, name string, data []byte) error { - _, err := r.KubeClient.Core().Pods(namespace).Patch(name, api.StrategicMergePatchType, data) - return err -} - -func GetPodFromTemplate(template *api.PodTemplateSpec, parentObject runtime.Object, controllerRef *api.OwnerReference) (*api.Pod, error) { - desiredLabels := getPodsLabelSet(template) - desiredFinalizers := getPodsFinalizers(template) - desiredAnnotations, err := getPodsAnnotationSet(template, parentObject) - if err != nil { - return nil, err - } - accessor, err := meta.Accessor(parentObject) - if err != nil { - return nil, fmt.Errorf("parentObject does not have ObjectMeta, %v", err) - } - prefix := getPodsPrefix(accessor.GetName()) - - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ - Labels: desiredLabels, - Annotations: desiredAnnotations, - GenerateName: prefix, - Finalizers: desiredFinalizers, - }, - } - if controllerRef != nil { - pod.OwnerReferences = append(pod.OwnerReferences, *controllerRef) - } - if err := api.Scheme.Convert(&template.Spec, &pod.Spec, nil); err != nil { - return nil, fmt.Errorf("unable to convert pod template: %v", err) - } - return pod, nil -} - -func (r RealPodControl) createPods(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object, controllerRef *api.OwnerReference) error { - pod, err := GetPodFromTemplate(template, object, controllerRef) - if err != nil { - return err - } - if len(nodeName) != 0 { - pod.Spec.NodeName = nodeName - } - if labels.Set(pod.Labels).AsSelectorPreValidated().Empty() { - return fmt.Errorf("unable to create pods, no labels") - } - if newPod, err := r.KubeClient.Core().Pods(namespace).Create(pod); err != nil { - r.Recorder.Eventf(object, api.EventTypeWarning, FailedCreatePodReason, "Error creating: %v", err) - return fmt.Errorf("unable to create pods: %v", err) - } else { - accessor, err := meta.Accessor(object) - if err != nil { - glog.Errorf("parentObject does not have ObjectMeta, %v", err) - return nil - } - glog.V(4).Infof("Controller %v created pod %v", accessor.GetName(), newPod.Name) - r.Recorder.Eventf(object, api.EventTypeNormal, SuccessfulCreatePodReason, "Created pod: %v", newPod.Name) - } - return nil -} - -func (r RealPodControl) DeletePod(namespace string, podID string, object runtime.Object) error { - accessor, err := meta.Accessor(object) - if err != nil { - return fmt.Errorf("object does not have ObjectMeta, %v", err) - } - glog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID) - if err := r.KubeClient.Core().Pods(namespace).Delete(podID, nil); err != nil { - r.Recorder.Eventf(object, api.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err) - return fmt.Errorf("unable to delete pods: %v", err) - } else { - r.Recorder.Eventf(object, api.EventTypeNormal, SuccessfulDeletePodReason, "Deleted pod: %v", podID) - } - return nil -} - -type FakePodControl struct { - sync.Mutex - Templates []api.PodTemplateSpec - ControllerRefs []api.OwnerReference - DeletePodName []string - Patches [][]byte - Err error -} - -var _ PodControlInterface = &FakePodControl{} - -func (f *FakePodControl) PatchPod(namespace, name string, data []byte) error { - f.Lock() - defer f.Unlock() - f.Patches = append(f.Patches, data) - if f.Err != nil { - return f.Err - } - return nil -} - -func (f *FakePodControl) CreatePods(namespace string, spec *api.PodTemplateSpec, object runtime.Object) error { - f.Lock() - defer f.Unlock() - f.Templates = append(f.Templates, *spec) - if f.Err != nil { - return f.Err - } - return nil -} - -func (f *FakePodControl) CreatePodsWithControllerRef(namespace string, spec *api.PodTemplateSpec, object runtime.Object, controllerRef *api.OwnerReference) error { - f.Lock() - defer f.Unlock() - f.Templates = append(f.Templates, *spec) - f.ControllerRefs = append(f.ControllerRefs, *controllerRef) - if f.Err != nil { - return f.Err - } - return nil -} - -func (f *FakePodControl) CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error { - f.Lock() - defer f.Unlock() - f.Templates = append(f.Templates, *template) - if f.Err != nil { - return f.Err - } - return nil -} - -func (f *FakePodControl) DeletePod(namespace string, podID string, object runtime.Object) error { - f.Lock() - defer f.Unlock() - f.DeletePodName = append(f.DeletePodName, podID) - if f.Err != nil { - return f.Err - } - return nil -} - -func (f *FakePodControl) Clear() { - f.Lock() - defer f.Unlock() - f.DeletePodName = []string{} - f.Templates = []api.PodTemplateSpec{} - f.ControllerRefs = []api.OwnerReference{} - f.Patches = [][]byte{} -} - -// ByLogging allows custom sorting of pods so the best one can be picked for getting its logs. -type ByLogging []*api.Pod - -func (s ByLogging) Len() int { return len(s) } -func (s ByLogging) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -func (s ByLogging) Less(i, j int) bool { - // 1. assigned < unassigned - if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) { - return len(s[i].Spec.NodeName) > 0 - } - // 2. PodRunning < PodUnknown < PodPending - m := map[api.PodPhase]int{api.PodRunning: 0, api.PodUnknown: 1, api.PodPending: 2} - if m[s[i].Status.Phase] != m[s[j].Status.Phase] { - return m[s[i].Status.Phase] < m[s[j].Status.Phase] - } - // 3. ready < not ready - if api.IsPodReady(s[i]) != api.IsPodReady(s[j]) { - return api.IsPodReady(s[i]) - } - // TODO: take availability into account when we push minReadySeconds information from deployment into pods, - // see https://github.com/kubernetes/kubernetes/issues/22065 - // 4. Been ready for more time < less time < empty time - if api.IsPodReady(s[i]) && api.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { - return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i])) - } - // 5. Pods with containers with higher restart counts < lower restart counts - if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) { - return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j]) - } - // 6. older pods < newer pods < empty timestamp pods - if !s[i].CreationTimestamp.Equal(s[j].CreationTimestamp) { - return afterOrZero(s[j].CreationTimestamp, s[i].CreationTimestamp) - } - return false -} - -// ActivePods type allows custom sorting of pods so a controller can pick the best ones to delete. -type ActivePods []*api.Pod - -func (s ActivePods) Len() int { return len(s) } -func (s ActivePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -func (s ActivePods) Less(i, j int) bool { - // 1. Unassigned < assigned - // If only one of the pods is unassigned, the unassigned one is smaller - if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) { - return len(s[i].Spec.NodeName) == 0 - } - // 2. PodPending < PodUnknown < PodRunning - m := map[api.PodPhase]int{api.PodPending: 0, api.PodUnknown: 1, api.PodRunning: 2} - if m[s[i].Status.Phase] != m[s[j].Status.Phase] { - return m[s[i].Status.Phase] < m[s[j].Status.Phase] - } - // 3. Not ready < ready - // If only one of the pods is not ready, the not ready one is smaller - if api.IsPodReady(s[i]) != api.IsPodReady(s[j]) { - return !api.IsPodReady(s[i]) - } - // TODO: take availability into account when we push minReadySeconds information from deployment into pods, - // see https://github.com/kubernetes/kubernetes/issues/22065 - // 4. Been ready for empty time < less time < more time - // If both pods are ready, the latest ready one is smaller - if api.IsPodReady(s[i]) && api.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { - return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j])) - } - // 5. Pods with containers with higher restart counts < lower restart counts - if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) { - return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j]) - } - // 6. Empty creation time pods < newer pods < older pods - if !s[i].CreationTimestamp.Equal(s[j].CreationTimestamp) { - return afterOrZero(s[i].CreationTimestamp, s[j].CreationTimestamp) - } - return false -} - -// afterOrZero checks if time t1 is after time t2; if one of them -// is zero, the zero time is seen as after non-zero time. -func afterOrZero(t1, t2 unversioned.Time) bool { - if t1.Time.IsZero() || t2.Time.IsZero() { - return t1.Time.IsZero() - } - return t1.After(t2.Time) -} - -func podReadyTime(pod *api.Pod) unversioned.Time { - if api.IsPodReady(pod) { - for _, c := range pod.Status.Conditions { - // we only care about pod ready conditions - if c.Type == api.PodReady && c.Status == api.ConditionTrue { - return c.LastTransitionTime - } - } - } - return unversioned.Time{} -} - -func maxContainerRestarts(pod *api.Pod) int { - maxRestarts := 0 - for _, c := range pod.Status.ContainerStatuses { - maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount)) - } - return maxRestarts -} - -// FilterActivePods returns pods that have not terminated. -func FilterActivePods(pods []*api.Pod) []*api.Pod { - var result []*api.Pod - for _, p := range pods { - if IsPodActive(p) { - result = append(result, p) - } else { - glog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v", - p.Namespace, p.Name, p.Status.Phase, p.DeletionTimestamp) - } - } - return result -} - -func IsPodActive(p *api.Pod) bool { - return api.PodSucceeded != p.Status.Phase && - api.PodFailed != p.Status.Phase && - p.DeletionTimestamp == nil -} - -// FilterActiveReplicaSets returns replica sets that have (or at least ought to have) pods. -func FilterActiveReplicaSets(replicaSets []*extensions.ReplicaSet) []*extensions.ReplicaSet { - active := []*extensions.ReplicaSet{} - for i := range replicaSets { - rs := replicaSets[i] - - if rs != nil && rs.Spec.Replicas > 0 { - active = append(active, replicaSets[i]) - } - } - return active -} - -// PodKey returns a key unique to the given pod within a cluster. -// It's used so we consistently use the same key scheme in this module. -// It does exactly what cache.MetaNamespaceKeyFunc would have done -// except there's not possibility for error since we know the exact type. -func PodKey(pod *api.Pod) string { - return fmt.Sprintf("%v/%v", pod.Namespace, pod.Name) -} - -// ControllersByCreationTimestamp sorts a list of ReplicationControllers by creation timestamp, using their names as a tie breaker. -type ControllersByCreationTimestamp []*api.ReplicationController - -func (o ControllersByCreationTimestamp) Len() int { return len(o) } -func (o ControllersByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } -func (o ControllersByCreationTimestamp) Less(i, j int) bool { - if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) { - return o[i].Name < o[j].Name - } - return o[i].CreationTimestamp.Before(o[j].CreationTimestamp) -} - -// ReplicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker. -type ReplicaSetsByCreationTimestamp []*extensions.ReplicaSet - -func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) } -func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } -func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool { - if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) { - return o[i].Name < o[j].Name - } - return o[i].CreationTimestamp.Before(o[j].CreationTimestamp) -} - -// ReplicaSetsBySizeOlder sorts a list of ReplicaSet by size in descending order, using their creation timestamp or name as a tie breaker. -// By using the creation timestamp, this sorts from old to new replica sets. -type ReplicaSetsBySizeOlder []*extensions.ReplicaSet - -func (o ReplicaSetsBySizeOlder) Len() int { return len(o) } -func (o ReplicaSetsBySizeOlder) Swap(i, j int) { o[i], o[j] = o[j], o[i] } -func (o ReplicaSetsBySizeOlder) Less(i, j int) bool { - if o[i].Spec.Replicas == o[j].Spec.Replicas { - return ReplicaSetsByCreationTimestamp(o).Less(i, j) - } - return o[i].Spec.Replicas > o[j].Spec.Replicas -} - -// ReplicaSetsBySizeNewer sorts a list of ReplicaSet by size in descending order, using their creation timestamp or name as a tie breaker. -// By using the creation timestamp, this sorts from new to old replica sets. -type ReplicaSetsBySizeNewer []*extensions.ReplicaSet - -func (o ReplicaSetsBySizeNewer) Len() int { return len(o) } -func (o ReplicaSetsBySizeNewer) Swap(i, j int) { o[i], o[j] = o[j], o[i] } -func (o ReplicaSetsBySizeNewer) Less(i, j int) bool { - if o[i].Spec.Replicas == o[j].Spec.Replicas { - return ReplicaSetsByCreationTimestamp(o).Less(j, i) - } - return o[i].Spec.Replicas > o[j].Spec.Replicas -} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD deleted file mode 100644 index 22a5a92bdf..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD +++ /dev/null @@ -1,52 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["deployment_util.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/annotations:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/controller:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/errors:go_default_library", - "//pkg/util/integer:go_default_library", - "//pkg/util/intstr:go_default_library", - "//pkg/util/labels:go_default_library", - "//pkg/util/pod:go_default_library", - "//pkg/util/wait:go_default_library", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = ["deployment_util_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", - "//pkg/client/testing/core:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/intstr:go_default_library", - "//vendor:github.com/stretchr/testify/assert", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go b/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go deleted file mode 100644 index b520eda583..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go +++ /dev/null @@ -1,1009 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "sort" - "strconv" - "strings" - "time" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/annotations" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/integer" - intstrutil "k8s.io/kubernetes/pkg/util/intstr" - labelsutil "k8s.io/kubernetes/pkg/util/labels" - podutil "k8s.io/kubernetes/pkg/util/pod" - "k8s.io/kubernetes/pkg/util/wait" -) - -const ( - // RevisionAnnotation is the revision annotation of a deployment's replica sets which records its rollout sequence - RevisionAnnotation = "deployment.kubernetes.io/revision" - // RevisionHistoryAnnotation maintains the history of all old revisions that a replica set has served for a deployment. - RevisionHistoryAnnotation = "deployment.kubernetes.io/revision-history" - // DesiredReplicasAnnotation is the desired replicas for a deployment recorded as an annotation - // in its replica sets. Helps in separating scaling events from the rollout process and for - // determining if the new replica set for a deployment is really saturated. - DesiredReplicasAnnotation = "deployment.kubernetes.io/desired-replicas" - // MaxReplicasAnnotation is the maximum replicas a deployment can have at a given point, which - // is deployment.spec.replicas + maxSurge. Used by the underlying replica sets to estimate their - // proportions in case the deployment has surge replicas. - MaxReplicasAnnotation = "deployment.kubernetes.io/max-replicas" - - // RollbackRevisionNotFound is not found rollback event reason - RollbackRevisionNotFound = "DeploymentRollbackRevisionNotFound" - // RollbackTemplateUnchanged is the template unchanged rollback event reason - RollbackTemplateUnchanged = "DeploymentRollbackTemplateUnchanged" - // RollbackDone is the done rollback event reason - RollbackDone = "DeploymentRollback" - // OverlapAnnotation marks deployments with overlapping selector with other deployments - // TODO: Delete this annotation when we gracefully handle overlapping selectors. - // See https://github.com/kubernetes/kubernetes/issues/2210 - OverlapAnnotation = "deployment.kubernetes.io/error-selector-overlapping-with" - // SelectorUpdateAnnotation marks the last time deployment selector update - // TODO: Delete this annotation when we gracefully handle overlapping selectors. - // See https://github.com/kubernetes/kubernetes/issues/2210 - SelectorUpdateAnnotation = "deployment.kubernetes.io/selector-updated-at" - - // Reasons for deployment conditions - // - // Progressing: - // - // ReplicaSetUpdatedReason is added in a deployment when one of its replica sets is updated as part - // of the rollout process. - ReplicaSetUpdatedReason = "ReplicaSetUpdated" - // FailedRSCreateReason is added in a deployment when it cannot create a new replica set. - FailedRSCreateReason = "ReplicaSetCreateError" - // NewReplicaSetReason is added in a deployment when it creates a new replica set. - NewReplicaSetReason = "NewReplicaSetCreated" - // FoundNewRSReason is added in a deployment when it adopts an existing replica set. - FoundNewRSReason = "FoundNewReplicaSet" - // NewRSAvailableReason is added in a deployment when its newest replica set is made available - // ie. the number of new pods that have passed readiness checks and run for at least minReadySeconds - // is at least the minimum available pods that need to run for the deployment. - NewRSAvailableReason = "NewReplicaSetAvailable" - // TimedOutReason is added in a deployment when its newest replica set fails to show any progress - // within the given deadline (progressDeadlineSeconds). - TimedOutReason = "ProgressDeadlineExceeded" - // PausedDeployReason is added in a deployment when it is paused. Lack of progress shouldn't be - // estimated once a deployment is paused. - PausedDeployReason = "DeploymentPaused" - // ResumedDeployReason is added in a deployment when it is resumed. Useful for not failing accidentally - // deployments that paused amidst a rollout and are bounded by a deadline. - ResumedDeployReason = "DeploymentResumed" - // - // Available: - // - // MinimumReplicasAvailable is added in a deployment when it has its minimum replicas required available. - MinimumReplicasAvailable = "MinimumReplicasAvailable" - // MinimumReplicasUnavailable is added in a deployment when it doesn't have the minimum required replicas - // available. - MinimumReplicasUnavailable = "MinimumReplicasUnavailable" -) - -// NewDeploymentCondition creates a new deployment condition. -func NewDeploymentCondition(condType extensions.DeploymentConditionType, status api.ConditionStatus, reason, message string) *extensions.DeploymentCondition { - return &extensions.DeploymentCondition{ - Type: condType, - Status: status, - LastUpdateTime: unversioned.Now(), - LastTransitionTime: unversioned.Now(), - Reason: reason, - Message: message, - } -} - -// GetDeploymentCondition returns the condition with the provided type. -func GetDeploymentCondition(status extensions.DeploymentStatus, condType extensions.DeploymentConditionType) *extensions.DeploymentCondition { - for i := range status.Conditions { - c := status.Conditions[i] - if c.Type == condType { - return &c - } - } - return nil -} - -// SetDeploymentCondition updates the deployment to include the provided condition. If the condition that -// we are about to add already exists and has the same status and reason then we are not going to update. -func SetDeploymentCondition(status *extensions.DeploymentStatus, condition extensions.DeploymentCondition) { - currentCond := GetDeploymentCondition(*status, condition.Type) - if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason { - return - } - // Do not update lastTransitionTime if the status of the condition doesn't change. - if currentCond != nil && currentCond.Status == condition.Status { - condition.LastTransitionTime = currentCond.LastTransitionTime - } - newConditions := filterOutCondition(status.Conditions, condition.Type) - status.Conditions = append(newConditions, condition) -} - -// RemoveDeploymentCondition removes the deployment condition with the provided type. -func RemoveDeploymentCondition(status *extensions.DeploymentStatus, condType extensions.DeploymentConditionType) { - status.Conditions = filterOutCondition(status.Conditions, condType) -} - -// filterOutCondition returns a new slice of deployment conditions without conditions with the provided type. -func filterOutCondition(conditions []extensions.DeploymentCondition, condType extensions.DeploymentConditionType) []extensions.DeploymentCondition { - var newConditions []extensions.DeploymentCondition - for _, c := range conditions { - if c.Type == condType { - continue - } - newConditions = append(newConditions, c) - } - return newConditions -} - -// ReplicaSetToDeploymentCondition converts a replica set condition into a deployment condition. -// Useful for promoting replica set failure conditions into deployments. -func ReplicaSetToDeploymentCondition(cond extensions.ReplicaSetCondition) extensions.DeploymentCondition { - return extensions.DeploymentCondition{ - Type: extensions.DeploymentConditionType(cond.Type), - Status: cond.Status, - LastTransitionTime: cond.LastTransitionTime, - LastUpdateTime: cond.LastTransitionTime, - Reason: cond.Reason, - Message: cond.Message, - } -} - -// SetDeploymentRevision updates the revision for a deployment. -func SetDeploymentRevision(deployment *extensions.Deployment, revision string) bool { - updated := false - - if deployment.Annotations == nil { - deployment.Annotations = make(map[string]string) - } - if deployment.Annotations[RevisionAnnotation] != revision { - deployment.Annotations[RevisionAnnotation] = revision - updated = true - } - - return updated -} - -// MaxRevision finds the highest revision in the replica sets -func MaxRevision(allRSs []*extensions.ReplicaSet) int64 { - max := int64(0) - for _, rs := range allRSs { - if v, err := Revision(rs); err != nil { - // Skip the replica sets when it failed to parse their revision information - glog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs) - } else if v > max { - max = v - } - } - return max -} - -// LastRevision finds the second max revision number in all replica sets (the last revision) -func LastRevision(allRSs []*extensions.ReplicaSet) int64 { - max, secMax := int64(0), int64(0) - for _, rs := range allRSs { - if v, err := Revision(rs); err != nil { - // Skip the replica sets when it failed to parse their revision information - glog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs) - } else if v >= max { - secMax = max - max = v - } else if v > secMax { - secMax = v - } - } - return secMax -} - -// Revision returns the revision number of the input object. -func Revision(obj runtime.Object) (int64, error) { - acc, err := meta.Accessor(obj) - if err != nil { - return 0, err - } - v, ok := acc.GetAnnotations()[RevisionAnnotation] - if !ok { - return 0, nil - } - return strconv.ParseInt(v, 10, 64) -} - -// SetNewReplicaSetAnnotations sets new replica set's annotations appropriately by updating its revision and -// copying required deployment annotations to it; it returns true if replica set's annotation is changed. -func SetNewReplicaSetAnnotations(deployment *extensions.Deployment, newRS *extensions.ReplicaSet, newRevision string, exists bool) bool { - // First, copy deployment's annotations (except for apply and revision annotations) - annotationChanged := copyDeploymentAnnotationsToReplicaSet(deployment, newRS) - // Then, update replica set's revision annotation - if newRS.Annotations == nil { - newRS.Annotations = make(map[string]string) - } - oldRevision, ok := newRS.Annotations[RevisionAnnotation] - // The newRS's revision should be the greatest among all RSes. Usually, its revision number is newRevision (the max revision number - // of all old RSes + 1). However, it's possible that some of the old RSes are deleted after the newRS revision being updated, and - // newRevision becomes smaller than newRS's revision. We should only update newRS revision when it's smaller than newRevision. - if oldRevision < newRevision { - newRS.Annotations[RevisionAnnotation] = newRevision - annotationChanged = true - glog.V(4).Infof("Updating replica set %q revision to %s", newRS.Name, newRevision) - } - // If a revision annotation already existed and this replica set was updated with a new revision - // then that means we are rolling back to this replica set. We need to preserve the old revisions - // for historical information. - if ok && annotationChanged { - revisionHistoryAnnotation := newRS.Annotations[RevisionHistoryAnnotation] - oldRevisions := strings.Split(revisionHistoryAnnotation, ",") - if len(oldRevisions[0]) == 0 { - newRS.Annotations[RevisionHistoryAnnotation] = oldRevision - } else { - oldRevisions = append(oldRevisions, oldRevision) - newRS.Annotations[RevisionHistoryAnnotation] = strings.Join(oldRevisions, ",") - } - } - // If the new replica set is about to be created, we need to add replica annotations to it. - if !exists && SetReplicasAnnotations(newRS, deployment.Spec.Replicas, deployment.Spec.Replicas+MaxSurge(*deployment)) { - annotationChanged = true - } - return annotationChanged -} - -var annotationsToSkip = map[string]bool{ - annotations.LastAppliedConfigAnnotation: true, - RevisionAnnotation: true, - RevisionHistoryAnnotation: true, - DesiredReplicasAnnotation: true, - MaxReplicasAnnotation: true, - OverlapAnnotation: true, - SelectorUpdateAnnotation: true, -} - -// skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key -// TODO: How to decide which annotations should / should not be copied? -// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615 -func skipCopyAnnotation(key string) bool { - return annotationsToSkip[key] -} - -// copyDeploymentAnnotationsToReplicaSet copies deployment's annotations to replica set's annotations, -// and returns true if replica set's annotation is changed. -// Note that apply and revision annotations are not copied. -func copyDeploymentAnnotationsToReplicaSet(deployment *extensions.Deployment, rs *extensions.ReplicaSet) bool { - rsAnnotationsChanged := false - if rs.Annotations == nil { - rs.Annotations = make(map[string]string) - } - for k, v := range deployment.Annotations { - // newRS revision is updated automatically in getNewReplicaSet, and the deployment's revision number is then updated - // by copying its newRS revision number. We should not copy deployment's revision to its newRS, since the update of - // deployment revision number may fail (revision becomes stale) and the revision number in newRS is more reliable. - if skipCopyAnnotation(k) || rs.Annotations[k] == v { - continue - } - rs.Annotations[k] = v - rsAnnotationsChanged = true - } - return rsAnnotationsChanged -} - -// SetDeploymentAnnotationsTo sets deployment's annotations as given RS's annotations. -// This action should be done if and only if the deployment is rolling back to this rs. -// Note that apply and revision annotations are not changed. -func SetDeploymentAnnotationsTo(deployment *extensions.Deployment, rollbackToRS *extensions.ReplicaSet) { - deployment.Annotations = getSkippedAnnotations(deployment.Annotations) - for k, v := range rollbackToRS.Annotations { - if !skipCopyAnnotation(k) { - deployment.Annotations[k] = v - } - } -} - -func getSkippedAnnotations(annotations map[string]string) map[string]string { - skippedAnnotations := make(map[string]string) - for k, v := range annotations { - if skipCopyAnnotation(k) { - skippedAnnotations[k] = v - } - } - return skippedAnnotations -} - -// FindActiveOrLatest returns the only active or the latest replica set in case there is at most one active -// replica set. If there are more active replica sets, then we should proportionally scale them. -func FindActiveOrLatest(newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet) *extensions.ReplicaSet { - if newRS == nil && len(oldRSs) == 0 { - return nil - } - - sort.Sort(sort.Reverse(controller.ReplicaSetsByCreationTimestamp(oldRSs))) - allRSs := controller.FilterActiveReplicaSets(append(oldRSs, newRS)) - - switch len(allRSs) { - case 0: - // If there is no active replica set then we should return the newest. - if newRS != nil { - return newRS - } - return oldRSs[0] - case 1: - return allRSs[0] - default: - return nil - } -} - -// GetDesiredReplicasAnnotation returns the number of desired replicas -func GetDesiredReplicasAnnotation(rs *extensions.ReplicaSet) (int32, bool) { - return getIntFromAnnotation(rs, DesiredReplicasAnnotation) -} - -func getMaxReplicasAnnotation(rs *extensions.ReplicaSet) (int32, bool) { - return getIntFromAnnotation(rs, MaxReplicasAnnotation) -} - -func getIntFromAnnotation(rs *extensions.ReplicaSet, annotationKey string) (int32, bool) { - annotationValue, ok := rs.Annotations[annotationKey] - if !ok { - return int32(0), false - } - intValue, err := strconv.Atoi(annotationValue) - if err != nil { - glog.Warningf("Cannot convert the value %q with annotation key %q for the replica set %q", - annotationValue, annotationKey, rs.Name) - return int32(0), false - } - return int32(intValue), true -} - -// SetReplicasAnnotations sets the desiredReplicas and maxReplicas into the annotations -func SetReplicasAnnotations(rs *extensions.ReplicaSet, desiredReplicas, maxReplicas int32) bool { - updated := false - if rs.Annotations == nil { - rs.Annotations = make(map[string]string) - } - desiredString := fmt.Sprintf("%d", desiredReplicas) - if hasString := rs.Annotations[DesiredReplicasAnnotation]; hasString != desiredString { - rs.Annotations[DesiredReplicasAnnotation] = desiredString - updated = true - } - maxString := fmt.Sprintf("%d", maxReplicas) - if hasString := rs.Annotations[MaxReplicasAnnotation]; hasString != maxString { - rs.Annotations[MaxReplicasAnnotation] = maxString - updated = true - } - return updated -} - -// MaxUnavailable returns the maximum unavailable pods a rolling deployment can take. -func MaxUnavailable(deployment extensions.Deployment) int32 { - if !IsRollingUpdate(&deployment) { - return int32(0) - } - // Error caught by validation - _, maxUnavailable, _ := ResolveFenceposts(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, &deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas) - return maxUnavailable -} - -// MinAvailable returns the minimum vailable pods of a given deployment -func MinAvailable(deployment *extensions.Deployment) int32 { - if !IsRollingUpdate(deployment) { - return int32(0) - } - return deployment.Spec.Replicas - MaxUnavailable(*deployment) -} - -// MaxSurge returns the maximum surge pods a rolling deployment can take. -func MaxSurge(deployment extensions.Deployment) int32 { - if !IsRollingUpdate(&deployment) { - return int32(0) - } - // Error caught by validation - maxSurge, _, _ := ResolveFenceposts(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, &deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas) - return maxSurge -} - -// GetProportion will estimate the proportion for the provided replica set using 1. the current size -// of the parent deployment, 2. the replica count that needs be added on the replica sets of the -// deployment, and 3. the total replicas added in the replica sets of the deployment so far. -func GetProportion(rs *extensions.ReplicaSet, d extensions.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 { - if rs == nil || rs.Spec.Replicas == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded { - return int32(0) - } - - rsFraction := getReplicaSetFraction(*rs, d) - allowed := deploymentReplicasToAdd - deploymentReplicasAdded - - if deploymentReplicasToAdd > 0 { - // Use the minimum between the replica set fraction and the maximum allowed replicas - // when scaling up. This way we ensure we will not scale up more than the allowed - // replicas we can add. - return integer.Int32Min(rsFraction, allowed) - } - // Use the maximum between the replica set fraction and the maximum allowed replicas - // when scaling down. This way we ensure we will not scale down more than the allowed - // replicas we can remove. - return integer.Int32Max(rsFraction, allowed) -} - -// getReplicaSetFraction estimates the fraction of replicas a replica set can have in -// 1. a scaling event during a rollout or 2. when scaling a paused deployment. -func getReplicaSetFraction(rs extensions.ReplicaSet, d extensions.Deployment) int32 { - // If we are scaling down to zero then the fraction of this replica set is its whole size (negative) - if d.Spec.Replicas == int32(0) { - return -rs.Spec.Replicas - } - - deploymentReplicas := d.Spec.Replicas + MaxSurge(d) - annotatedReplicas, ok := getMaxReplicasAnnotation(&rs) - if !ok { - // If we cannot find the annotation then fallback to the current deployment size. Note that this - // will not be an accurate proportion estimation in case other replica sets have different values - // which means that the deployment was scaled at some point but we at least will stay in limits - // due to the min-max comparisons in getProportion. - annotatedReplicas = d.Status.Replicas - } - - // We should never proportionally scale up from zero which means rs.spec.replicas and annotatedReplicas - // will never be zero here. - newRSsize := (float64(rs.Spec.Replicas * deploymentReplicas)) / float64(annotatedReplicas) - return integer.RoundToInt32(newRSsize) - rs.Spec.Replicas -} - -// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and ReplicaSetList from client interface. -// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. -// The third returned value is the new replica set, and it may be nil if it doesn't exist yet. -func GetAllReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, *extensions.ReplicaSet, error) { - rsList, err := listReplicaSets(deployment, c) - if err != nil { - return nil, nil, nil, err - } - podList, err := listPods(deployment, c) - if err != nil { - return nil, nil, nil, err - } - oldRSes, allOldRSes, err := FindOldReplicaSets(deployment, rsList, podList) - if err != nil { - return nil, nil, nil, err - } - newRS, err := FindNewReplicaSet(deployment, rsList) - if err != nil { - return nil, nil, nil, err - } - return oldRSes, allOldRSes, newRS, nil -} - -// GetOldReplicaSets returns the old replica sets targeted by the given Deployment; get PodList and ReplicaSetList from client interface. -// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. -func GetOldReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { - rsList, err := listReplicaSets(deployment, c) - if err != nil { - return nil, nil, err - } - podList, err := listPods(deployment, c) - if err != nil { - return nil, nil, err - } - return FindOldReplicaSets(deployment, rsList, podList) -} - -// GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface. -// Returns nil if the new replica set doesn't exist yet. -func GetNewReplicaSet(deployment *extensions.Deployment, c clientset.Interface) (*extensions.ReplicaSet, error) { - rsList, err := listReplicaSets(deployment, c) - if err != nil { - return nil, err - } - return FindNewReplicaSet(deployment, rsList) -} - -// listReplicaSets lists all RSes the given deployment targets with the given client interface. -func listReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, error) { - return ListReplicaSets(deployment, - func(namespace string, options api.ListOptions) ([]*extensions.ReplicaSet, error) { - rsList, err := c.Extensions().ReplicaSets(namespace).List(options) - if err != nil { - return nil, err - } - ret := []*extensions.ReplicaSet{} - for i := range rsList.Items { - ret = append(ret, &rsList.Items[i]) - } - return ret, err - }) -} - -// listReplicaSets lists all Pods the given deployment targets with the given client interface. -func listPods(deployment *extensions.Deployment, c clientset.Interface) (*api.PodList, error) { - return ListPods(deployment, - func(namespace string, options api.ListOptions) (*api.PodList, error) { - return c.Core().Pods(namespace).List(options) - }) -} - -// TODO: switch this to full namespacers -type rsListFunc func(string, api.ListOptions) ([]*extensions.ReplicaSet, error) -type podListFunc func(string, api.ListOptions) (*api.PodList, error) - -// ListReplicaSets returns a slice of RSes the given deployment targets. -func ListReplicaSets(deployment *extensions.Deployment, getRSList rsListFunc) ([]*extensions.ReplicaSet, error) { - // TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector - // should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830; - // or use controllerRef, see https://github.com/kubernetes/kubernetes/issues/2210 - namespace := deployment.Namespace - selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) - if err != nil { - return nil, err - } - options := api.ListOptions{LabelSelector: selector} - return getRSList(namespace, options) -} - -// ListPods returns a list of pods the given deployment targets. -func ListPods(deployment *extensions.Deployment, getPodList podListFunc) (*api.PodList, error) { - namespace := deployment.Namespace - selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) - if err != nil { - return nil, err - } - options := api.ListOptions{LabelSelector: selector} - return getPodList(namespace, options) -} - -// equalIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash] -// We ignore pod-template-hash because the hash result would be different upon podTemplateSpec API changes -// (e.g. the addition of a new field will cause the hash code to change) -// Note that we assume input podTemplateSpecs contain non-empty labels -func equalIgnoreHash(template1, template2 api.PodTemplateSpec) (bool, error) { - // First, compare template.Labels (ignoring hash) - labels1, labels2 := template1.Labels, template2.Labels - // The podTemplateSpec must have a non-empty label so that label selectors can find them. - // This is checked by validation (of resources contain a podTemplateSpec). - if len(labels1) == 0 || len(labels2) == 0 { - return false, fmt.Errorf("Unexpected empty labels found in given template") - } - if len(labels1) > len(labels2) { - labels1, labels2 = labels2, labels1 - } - // We make sure len(labels2) >= len(labels1) - for k, v := range labels2 { - if labels1[k] != v && k != extensions.DefaultDeploymentUniqueLabelKey { - return false, nil - } - } - - // Then, compare the templates without comparing their labels - template1.Labels, template2.Labels = nil, nil - result := api.Semantic.DeepEqual(template1, template2) - return result, nil -} - -// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template). -func FindNewReplicaSet(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) (*extensions.ReplicaSet, error) { - newRSTemplate := GetNewReplicaSetTemplate(deployment) - for i := range rsList { - equal, err := equalIgnoreHash(rsList[i].Spec.Template, newRSTemplate) - if err != nil { - return nil, err - } - if equal { - // This is the new ReplicaSet. - return rsList[i], nil - } - } - // new ReplicaSet does not exist. - return nil, nil -} - -// FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given PodList and slice of RSes. -// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. -func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, podList *api.PodList) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { - // Find all pods whose labels match deployment.Spec.Selector, and corresponding replica sets for pods in podList. - // All pods and replica sets are labeled with pod-template-hash to prevent overlapping - oldRSs := map[string]*extensions.ReplicaSet{} - allOldRSs := map[string]*extensions.ReplicaSet{} - newRSTemplate := GetNewReplicaSetTemplate(deployment) - for _, pod := range podList.Items { - podLabelsSelector := labels.Set(pod.ObjectMeta.Labels) - for _, rs := range rsList { - rsLabelsSelector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector) - if err != nil { - return nil, nil, fmt.Errorf("invalid label selector: %v", err) - } - // Filter out replica set that has the same pod template spec as the deployment - that is the new replica set. - equal, err := equalIgnoreHash(rs.Spec.Template, newRSTemplate) - if err != nil { - return nil, nil, err - } - if equal { - continue - } - allOldRSs[rs.ObjectMeta.Name] = rs - if rsLabelsSelector.Matches(podLabelsSelector) { - oldRSs[rs.ObjectMeta.Name] = rs - } - } - } - requiredRSs := []*extensions.ReplicaSet{} - for key := range oldRSs { - value := oldRSs[key] - requiredRSs = append(requiredRSs, value) - } - allRSs := []*extensions.ReplicaSet{} - for key := range allOldRSs { - value := allOldRSs[key] - allRSs = append(allRSs, value) - } - return requiredRSs, allRSs, nil -} - -// WaitForReplicaSetUpdated polls the replica set until it is updated. -func WaitForReplicaSetUpdated(c clientset.Interface, desiredGeneration int64, namespace, name string) error { - return wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { - rs, err := c.Extensions().ReplicaSets(namespace).Get(name) - if err != nil { - return false, err - } - return rs.Status.ObservedGeneration >= desiredGeneration, nil - }) -} - -// WaitForPodsHashPopulated polls the replica set until updated and fully labeled. -func WaitForPodsHashPopulated(c clientset.Interface, desiredGeneration int64, namespace, name string) error { - return wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) { - rs, err := c.Extensions().ReplicaSets(namespace).Get(name) - if err != nil { - return false, err - } - return rs.Status.ObservedGeneration >= desiredGeneration && - rs.Status.FullyLabeledReplicas == rs.Spec.Replicas, nil - }) -} - -// LabelPodsWithHash labels all pods in the given podList with the new hash label. -// The returned bool value can be used to tell if all pods are actually labeled. -func LabelPodsWithHash(podList *api.PodList, rs *extensions.ReplicaSet, c clientset.Interface, namespace, hash string) (bool, error) { - allPodsLabeled := true - for _, pod := range podList.Items { - // Only label the pod that doesn't already have the new hash - if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash { - if _, podUpdated, err := podutil.UpdatePodWithRetries(c.Core().Pods(namespace), &pod, - func(podToUpdate *api.Pod) error { - // Precondition: the pod doesn't contain the new hash in its label. - if podToUpdate.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash { - return errors.ErrPreconditionViolated - } - podToUpdate.Labels = labelsutil.AddLabel(podToUpdate.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash) - return nil - }); err != nil { - return false, fmt.Errorf("error in adding template hash label %s to pod %+v: %s", hash, pod, err) - } else if podUpdated { - glog.V(4).Infof("Labeled %s %s/%s of %s %s/%s with hash %s.", pod.Kind, pod.Namespace, pod.Name, rs.Kind, rs.Namespace, rs.Name, hash) - } else { - // If the pod wasn't updated but didn't return error when we try to update it, we've hit "pod not found" or "precondition violated" error. - // Then we can't say all pods are labeled - allPodsLabeled = false - } - } - } - return allPodsLabeled, nil -} - -// GetNewReplicaSetTemplate returns the desired PodTemplateSpec for the new ReplicaSet corresponding to the given ReplicaSet. -func GetNewReplicaSetTemplate(deployment *extensions.Deployment) api.PodTemplateSpec { - // newRS will have the same template as in deployment spec, plus a unique label in some cases. - newRSTemplate := api.PodTemplateSpec{ - ObjectMeta: deployment.Spec.Template.ObjectMeta, - Spec: deployment.Spec.Template.Spec, - } - newRSTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel( - deployment.Spec.Template.ObjectMeta.Labels, - extensions.DefaultDeploymentUniqueLabelKey, - podutil.GetPodTemplateSpecHash(newRSTemplate)) - return newRSTemplate -} - -// SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment. -func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template api.PodTemplateSpec) *extensions.Deployment { - deployment.Spec.Template.ObjectMeta = template.ObjectMeta - deployment.Spec.Template.Spec = template.Spec - deployment.Spec.Template.ObjectMeta.Labels = labelsutil.CloneAndRemoveLabel( - deployment.Spec.Template.ObjectMeta.Labels, - extensions.DefaultDeploymentUniqueLabelKey) - return deployment -} - -// GetReplicaCountForReplicaSets returns the sum of Replicas of the given replica sets. -func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { - totalReplicas := int32(0) - for _, rs := range replicaSets { - if rs != nil { - totalReplicas += rs.Spec.Replicas - } - } - return totalReplicas -} - -// GetActualReplicaCountForReplicaSets returns the sum of actual replicas of the given replica sets. -func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { - totalActualReplicas := int32(0) - for _, rs := range replicaSets { - if rs != nil { - totalActualReplicas += rs.Status.Replicas - } - } - return totalActualReplicas -} - -// GetAvailableReplicaCountForReplicaSets returns the number of available pods corresponding to the given replica sets. -func GetAvailableReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { - totalAvailableReplicas := int32(0) - for _, rs := range replicaSets { - if rs != nil { - totalAvailableReplicas += rs.Status.AvailableReplicas - } - } - return totalAvailableReplicas -} - -// IsPodAvailable return true if the pod is available. -// TODO: Remove this once we start using replica set status for calculating available pods -// for a deployment. -func IsPodAvailable(pod *api.Pod, minReadySeconds int32, now time.Time) bool { - if !controller.IsPodActive(pod) { - return false - } - // Check if we've passed minReadySeconds since LastTransitionTime - // If so, this pod is ready - for _, c := range pod.Status.Conditions { - // we only care about pod ready conditions - if c.Type == api.PodReady && c.Status == api.ConditionTrue { - glog.V(4).Infof("Comparing pod %s/%s ready condition last transition time %s + minReadySeconds %d with now %s.", pod.Namespace, pod.Name, c.LastTransitionTime.String(), minReadySeconds, now.String()) - // 2 cases that this ready condition is valid (passed minReadySeconds, i.e. the pod is available): - // 1. minReadySeconds == 0, or - // 2. LastTransitionTime (is set) + minReadySeconds (>0) < current time - minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second - if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now) { - return true - } - } - } - return false -} - -// IsRollingUpdate returns true if the strategy type is a rolling update. -func IsRollingUpdate(deployment *extensions.Deployment) bool { - return deployment.Spec.Strategy.Type == extensions.RollingUpdateDeploymentStrategyType -} - -// DeploymentComplete considers a deployment to be complete once its desired replicas equals its -// updatedReplicas and it doesn't violate minimum availability. -func DeploymentComplete(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool { - return newStatus.UpdatedReplicas == deployment.Spec.Replicas && - newStatus.AvailableReplicas >= deployment.Spec.Replicas-MaxUnavailable(*deployment) -} - -// DeploymentProgressing reports progress for a deployment. Progress is estimated by comparing the -// current with the new status of the deployment that the controller is observing. The following -// algorithm is already used in the kubectl rolling updater to report lack of progress. -func DeploymentProgressing(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool { - oldStatus := deployment.Status - - // Old replicas that need to be scaled down - oldStatusOldReplicas := oldStatus.Replicas - oldStatus.UpdatedReplicas - newStatusOldReplicas := newStatus.Replicas - newStatus.UpdatedReplicas - - return (newStatus.UpdatedReplicas > oldStatus.UpdatedReplicas) || (newStatusOldReplicas < oldStatusOldReplicas) -} - -// used for unit testing -var nowFn = func() time.Time { return time.Now() } - -// DeploymentTimedOut considers a deployment to have timed out once its condition that reports progress -// is older than progressDeadlineSeconds or a Progressing condition with a TimedOutReason reason already -// exists. -func DeploymentTimedOut(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool { - if deployment.Spec.ProgressDeadlineSeconds == nil { - return false - } - - // Look for the Progressing condition. If it doesn't exist, we have no base to estimate progress. - // If it's already set with a TimedOutReason reason, we have already timed out, no need to check - // again. - condition := GetDeploymentCondition(*newStatus, extensions.DeploymentProgressing) - if condition == nil { - return false - } - if condition.Reason == TimedOutReason { - return true - } - - // Look at the difference in seconds between now and the last time we reported any - // progress or tried to create a replica set, or resumed a paused deployment and - // compare against progressDeadlineSeconds. - from := condition.LastUpdateTime - delta := time.Duration(*deployment.Spec.ProgressDeadlineSeconds) * time.Second - return from.Add(delta).Before(nowFn()) -} - -// NewRSNewReplicas calculates the number of replicas a deployment's new RS should have. -// When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it. -// 1) The new RS is saturated: newRS's replicas == deployment's replicas -// 2) Max number of pods allowed is reached: deployment's replicas + maxSurge == all RSs' replicas -func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) (int32, error) { - switch deployment.Spec.Strategy.Type { - case extensions.RollingUpdateDeploymentStrategyType: - // Check if we can scale up. - maxSurge, err := intstrutil.GetValueFromIntOrPercent(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(deployment.Spec.Replicas), true) - if err != nil { - return 0, err - } - // Find the total number of pods - currentPodCount := GetReplicaCountForReplicaSets(allRSs) - maxTotalPods := deployment.Spec.Replicas + int32(maxSurge) - if currentPodCount >= maxTotalPods { - // Cannot scale up. - return newRS.Spec.Replicas, nil - } - // Scale up. - scaleUpCount := maxTotalPods - currentPodCount - // Do not exceed the number of desired replicas. - scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(deployment.Spec.Replicas-newRS.Spec.Replicas))) - return newRS.Spec.Replicas + scaleUpCount, nil - case extensions.RecreateDeploymentStrategyType: - return deployment.Spec.Replicas, nil - default: - return 0, fmt.Errorf("deployment type %v isn't supported", deployment.Spec.Strategy.Type) - } -} - -// IsSaturated checks if the new replica set is saturated by comparing its size with its deployment size. -// Both the deployment and the replica set have to believe this replica set can own all of the desired -// replicas in the deployment and the annotation helps in achieving that. -func IsSaturated(deployment *extensions.Deployment, rs *extensions.ReplicaSet) bool { - if rs == nil { - return false - } - desiredString := rs.Annotations[DesiredReplicasAnnotation] - desired, err := strconv.Atoi(desiredString) - if err != nil { - return false - } - return rs.Spec.Replicas == deployment.Spec.Replicas && int32(desired) == deployment.Spec.Replicas -} - -// WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration. -// Returns error if polling timesout. -func WaitForObservedDeployment(getDeploymentFunc func() (*extensions.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error { - // TODO: This should take clientset.Interface when all code is updated to use clientset. Keeping it this way allows the function to be used by callers who have client.Interface. - return wait.Poll(interval, timeout, func() (bool, error) { - deployment, err := getDeploymentFunc() - if err != nil { - return false, err - } - return deployment.Status.ObservedGeneration >= desiredGeneration, nil - }) -} - -// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one -// step. For example: -// -// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1) -// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1) -// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) -// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1) -// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) -// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1) -func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { - surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, int(desired), true) - if err != nil { - return 0, 0, err - } - unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, int(desired), false) - if err != nil { - return 0, 0, err - } - - if surge == 0 && unavailable == 0 { - // Validation should never allow the user to explicitly use zero values for both maxSurge - // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. - // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the - // theory that surge might not work due to quota. - unavailable = 1 - } - - return int32(surge), int32(unavailable), nil -} - -func DeploymentDeepCopy(deployment *extensions.Deployment) (*extensions.Deployment, error) { - objCopy, err := api.Scheme.DeepCopy(deployment) - if err != nil { - return nil, err - } - copied, ok := objCopy.(*extensions.Deployment) - if !ok { - return nil, fmt.Errorf("expected Deployment, got %#v", objCopy) - } - return copied, nil -} - -// SelectorUpdatedBefore returns true if the former deployment's selector -// is updated before the latter, false otherwise -func SelectorUpdatedBefore(d1, d2 *extensions.Deployment) bool { - t1, t2 := LastSelectorUpdate(d1), LastSelectorUpdate(d2) - return t1.Before(t2) -} - -// LastSelectorUpdate returns the last time given deployment's selector is updated -func LastSelectorUpdate(d *extensions.Deployment) unversioned.Time { - t := d.Annotations[SelectorUpdateAnnotation] - if len(t) > 0 { - parsedTime, err := time.Parse(t, time.RFC3339) - // If failed to parse the time, use creation timestamp instead - if err != nil { - return d.CreationTimestamp - } - return unversioned.Time{Time: parsedTime} - } - // If it's never updated, use creation timestamp instead - return d.CreationTimestamp -} - -// BySelectorLastUpdateTime sorts a list of deployments by the last update time of their selector, -// first using their creation timestamp and then their names as a tie breaker. -type BySelectorLastUpdateTime []*extensions.Deployment - -func (o BySelectorLastUpdateTime) Len() int { return len(o) } -func (o BySelectorLastUpdateTime) Swap(i, j int) { o[i], o[j] = o[j], o[i] } -func (o BySelectorLastUpdateTime) Less(i, j int) bool { - ti, tj := LastSelectorUpdate(o[i]), LastSelectorUpdate(o[j]) - if ti.Equal(tj) { - if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) { - return o[i].Name < o[j].Name - } - return o[i].CreationTimestamp.Before(o[j].CreationTimestamp) - } - return ti.Before(tj) -} - -// OverlapsWith returns true when two given deployments are different and overlap with each other -func OverlapsWith(current, other *extensions.Deployment) (bool, error) { - if current.UID == other.UID { - return false, nil - } - currentSelector, err := unversioned.LabelSelectorAsSelector(current.Spec.Selector) - if err != nil { - return false, fmt.Errorf("deployment %s/%s has invalid label selector: %v", current.Namespace, current.Name, err) - } - otherSelector, err := unversioned.LabelSelectorAsSelector(other.Spec.Selector) - if err != nil { - // Broken selectors from other deployments shouldn't block current deployment. Just log the error and continue. - glog.V(2).Infof("Skip overlapping check: deployment %s/%s has invalid label selector: %v", other.Namespace, other.Name, err) - return false, nil - } - return (!currentSelector.Empty() && currentSelector.Matches(labels.Set(other.Spec.Template.Labels))) || - (!otherSelector.Empty() && otherSelector.Matches(labels.Set(current.Spec.Template.Labels))), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/doc.go b/vendor/k8s.io/kubernetes/pkg/controller/doc.go deleted file mode 100644 index ded3905820..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package controller contains code for controllers (like the replication -// controller). -package controller diff --git a/vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go b/vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go deleted file mode 100644 index 84029c5f3f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "hash/adler32" - "sync" - - "github.com/golang/groupcache/lru" - "k8s.io/kubernetes/pkg/api/meta" - hashutil "k8s.io/kubernetes/pkg/util/hash" -) - -type objectWithMeta interface { - meta.Object -} - -// keyFunc returns the key of an object, which is used to look up in the cache for it's matching object. -// Since we match objects by namespace and Labels/Selector, so if two objects have the same namespace and labels, -// they will have the same key. -func keyFunc(obj objectWithMeta) uint64 { - hash := adler32.New() - hashutil.DeepHashObject(hash, &equivalenceLabelObj{ - namespace: obj.GetNamespace(), - labels: obj.GetLabels(), - }) - return uint64(hash.Sum32()) -} - -type equivalenceLabelObj struct { - namespace string - labels map[string]string -} - -// MatchingCache save label and selector matching relationship -type MatchingCache struct { - mutex sync.RWMutex - cache *lru.Cache -} - -// NewMatchingCache return a NewMatchingCache, which save label and selector matching relationship. -func NewMatchingCache(maxCacheEntries int) *MatchingCache { - return &MatchingCache{ - cache: lru.New(maxCacheEntries), - } -} - -// Add will add matching information to the cache. -func (c *MatchingCache) Add(labelObj objectWithMeta, selectorObj objectWithMeta) { - key := keyFunc(labelObj) - c.mutex.Lock() - defer c.mutex.Unlock() - c.cache.Add(key, selectorObj) -} - -// GetMatchingObject lookup the matching object for a given object. -// Note: the cache information may be invalid since the controller may be deleted or updated, -// we need check in the external request to ensure the cache data is not dirty. -func (c *MatchingCache) GetMatchingObject(labelObj objectWithMeta) (controller interface{}, exists bool) { - key := keyFunc(labelObj) - // NOTE: we use Lock() instead of RLock() here because lru's Get() method also modifies state( - // it need update the least recently usage information). So we can not call it concurrently. - c.mutex.Lock() - defer c.mutex.Unlock() - return c.cache.Get(key) -} - -// Update update the cached matching information. -func (c *MatchingCache) Update(labelObj objectWithMeta, selectorObj objectWithMeta) { - c.Add(labelObj, selectorObj) -} - -// InvalidateAll invalidate the whole cache. -func (c *MatchingCache) InvalidateAll() { - c.mutex.Lock() - defer c.mutex.Unlock() - c.cache = lru.New(c.cache.MaxEntries) -} diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/BUILD b/vendor/k8s.io/kubernetes/pkg/conversion/BUILD deleted file mode 100644 index 9f9991f66d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/conversion/BUILD +++ /dev/null @@ -1,40 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "cloner.go", - "converter.go", - "deep_equal.go", - "doc.go", - "helper.go", - ], - tags = ["automanaged"], - deps = ["//third_party/forked/golang/reflect:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = [ - "converter_test.go", - "deep_copy_test.go", - "helper_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/util/diff:go_default_library", - "//vendor:github.com/google/gofuzz", - "//vendor:github.com/spf13/pflag", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/OWNERS b/vendor/k8s.io/kubernetes/pkg/conversion/OWNERS deleted file mode 100644 index a046efc0c2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/conversion/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -assignees: - - derekwaynecarr - - lavalamp - - smarterclayton - - wojtek-t diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/BUILD b/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/BUILD deleted file mode 100644 index 74b1761e43..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/BUILD +++ /dev/null @@ -1,30 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "convert.go", - "doc.go", - ], - tags = ["automanaged"], -) - -go_test( - name = "go_default_xtest", - srcs = ["convert_test.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/conversion/queryparams:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/BUILD b/vendor/k8s.io/kubernetes/pkg/credentialprovider/BUILD deleted file mode 100644 index 9e1a82064f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/BUILD +++ /dev/null @@ -1,41 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "config.go", - "doc.go", - "keyring.go", - "plugins.go", - "provider.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/util/sets:go_default_library", - "//vendor:github.com/docker/engine-api/types", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "config_test.go", - "keyring_test.go", - "provider_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS b/vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS deleted file mode 100644 index 766c481bd7..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS +++ /dev/null @@ -1,3 +0,0 @@ -assignees: - - erictune - - liggitt diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go deleted file mode 100644 index 40bf0ce844..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go +++ /dev/null @@ -1,289 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package credentialprovider - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/golang/glog" -) - -// DockerConfigJson represents ~/.docker/config.json file info -// see https://github.com/docker/docker/pull/12009 -type DockerConfigJson struct { - Auths DockerConfig `json:"auths"` - // +optional - HttpHeaders map[string]string `json:"HttpHeaders,omitempty"` -} - -// DockerConfig represents the config file used by the docker CLI. -// This config that represents the credentials that should be used -// when pulling images from specific image repositories. -type DockerConfig map[string]DockerConfigEntry - -type DockerConfigEntry struct { - Username string - Password string - Email string - Provider DockerConfigProvider -} - -var ( - preferredPathLock sync.Mutex - preferredPath = "" - workingDirPath = "" - homeDirPath = os.Getenv("HOME") - rootDirPath = "/" - homeJsonDirPath = filepath.Join(homeDirPath, ".docker") - rootJsonDirPath = filepath.Join(rootDirPath, ".docker") - - configFileName = ".dockercfg" - configJsonFileName = "config.json" -) - -func SetPreferredDockercfgPath(path string) { - preferredPathLock.Lock() - defer preferredPathLock.Unlock() - preferredPath = path -} - -func GetPreferredDockercfgPath() string { - preferredPathLock.Lock() - defer preferredPathLock.Unlock() - return preferredPath -} - -//DefaultDockercfgPaths returns default search paths of .dockercfg -func DefaultDockercfgPaths() []string { - return []string{GetPreferredDockercfgPath(), workingDirPath, homeDirPath, rootDirPath} -} - -//DefaultDockerConfigJSONPaths returns default search paths of .docker/config.json -func DefaultDockerConfigJSONPaths() []string { - return []string{GetPreferredDockercfgPath(), workingDirPath, homeJsonDirPath, rootJsonDirPath} -} - -// ReadDockercfgFile attempts to read a legacy dockercfg file from the given paths. -// if searchPaths is empty, the default paths are used. -func ReadDockercfgFile(searchPaths []string) (cfg DockerConfig, err error) { - if len(searchPaths) == 0 { - searchPaths = DefaultDockercfgPaths() - } - - for _, configPath := range searchPaths { - absDockerConfigFileLocation, err := filepath.Abs(filepath.Join(configPath, configFileName)) - if err != nil { - glog.Errorf("while trying to canonicalize %s: %v", configPath, err) - continue - } - glog.V(4).Infof("looking for .dockercfg at %s", absDockerConfigFileLocation) - contents, err := ioutil.ReadFile(absDockerConfigFileLocation) - if os.IsNotExist(err) { - continue - } - if err != nil { - glog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err) - continue - } - cfg, err := readDockerConfigFileFromBytes(contents) - if err == nil { - glog.V(4).Infof("found .dockercfg at %s", absDockerConfigFileLocation) - return cfg, nil - } - } - return nil, fmt.Errorf("couldn't find valid .dockercfg after checking in %v", searchPaths) -} - -// ReadDockerConfigJSONFile attempts to read a docker config.json file from the given paths. -// if searchPaths is empty, the default paths are used. -func ReadDockerConfigJSONFile(searchPaths []string) (cfg DockerConfig, err error) { - if len(searchPaths) == 0 { - searchPaths = DefaultDockerConfigJSONPaths() - } - - for _, configPath := range searchPaths { - absDockerConfigFileLocation, err := filepath.Abs(filepath.Join(configPath, configJsonFileName)) - if err != nil { - glog.Errorf("while trying to canonicalize %s: %v", configPath, err) - continue - } - glog.V(4).Infof("looking for .docker/config.json at %s", absDockerConfigFileLocation) - contents, err := ioutil.ReadFile(absDockerConfigFileLocation) - if os.IsNotExist(err) { - continue - } - if err != nil { - glog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err) - continue - } - cfg, err := readDockerConfigJsonFileFromBytes(contents) - if err == nil { - glog.V(4).Infof("found .docker/config.json at %s", absDockerConfigFileLocation) - return cfg, nil - } - } - return nil, fmt.Errorf("couldn't find valid .docker/config.json after checking in %v", searchPaths) -} - -func ReadDockerConfigFile() (cfg DockerConfig, err error) { - if cfg, err := ReadDockerConfigJSONFile(nil); err == nil { - return cfg, nil - } - // Can't find latest config file so check for the old one - return ReadDockercfgFile(nil) -} - -// HttpError wraps a non-StatusOK error code as an error. -type HttpError struct { - StatusCode int - Url string -} - -// Error implements error -func (he *HttpError) Error() string { - return fmt.Sprintf("http status code: %d while fetching url %s", - he.StatusCode, he.Url) -} - -func ReadUrl(url string, client *http.Client, header *http.Header) (body []byte, err error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - if header != nil { - req.Header = *header - } - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - glog.V(2).Infof("body of failing http response: %v", resp.Body) - return nil, &HttpError{ - StatusCode: resp.StatusCode, - Url: url, - } - } - - contents, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - return contents, nil -} - -func ReadDockerConfigFileFromUrl(url string, client *http.Client, header *http.Header) (cfg DockerConfig, err error) { - if contents, err := ReadUrl(url, client, header); err != nil { - return nil, err - } else { - return readDockerConfigFileFromBytes(contents) - } -} - -func readDockerConfigFileFromBytes(contents []byte) (cfg DockerConfig, err error) { - if err = json.Unmarshal(contents, &cfg); err != nil { - glog.Errorf("while trying to parse blob %q: %v", contents, err) - return nil, err - } - return -} - -func readDockerConfigJsonFileFromBytes(contents []byte) (cfg DockerConfig, err error) { - var cfgJson DockerConfigJson - if err = json.Unmarshal(contents, &cfgJson); err != nil { - glog.Errorf("while trying to parse blob %q: %v", contents, err) - return nil, err - } - cfg = cfgJson.Auths - return -} - -// dockerConfigEntryWithAuth is used solely for deserializing the Auth field -// into a dockerConfigEntry during JSON deserialization. -type dockerConfigEntryWithAuth struct { - // +optional - Username string `json:"username,omitempty"` - // +optional - Password string `json:"password,omitempty"` - // +optional - Email string `json:"email,omitempty"` - // +optional - Auth string `json:"auth,omitempty"` -} - -func (ident *DockerConfigEntry) UnmarshalJSON(data []byte) error { - var tmp dockerConfigEntryWithAuth - err := json.Unmarshal(data, &tmp) - if err != nil { - return err - } - - ident.Username = tmp.Username - ident.Password = tmp.Password - ident.Email = tmp.Email - - if len(tmp.Auth) == 0 { - return nil - } - - ident.Username, ident.Password, err = decodeDockerConfigFieldAuth(tmp.Auth) - return err -} - -func (ident DockerConfigEntry) MarshalJSON() ([]byte, error) { - toEncode := dockerConfigEntryWithAuth{ident.Username, ident.Password, ident.Email, ""} - toEncode.Auth = encodeDockerConfigFieldAuth(ident.Username, ident.Password) - - return json.Marshal(toEncode) -} - -// decodeDockerConfigFieldAuth deserializes the "auth" field from dockercfg into a -// username and a password. The format of the auth field is base64(:). -func decodeDockerConfigFieldAuth(field string) (username, password string, err error) { - decoded, err := base64.StdEncoding.DecodeString(field) - if err != nil { - return - } - - parts := strings.SplitN(string(decoded), ":", 2) - if len(parts) != 2 { - err = fmt.Errorf("unable to parse auth field") - return - } - - username = parts[0] - password = parts[1] - - return -} - -func encodeDockerConfigFieldAuth(username, password string) string { - fieldValue := username + ":" + password - - return base64.StdEncoding.EncodeToString([]byte(fieldValue)) -} diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/doc.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/doc.go deleted file mode 100644 index 41c12410f0..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package credentialprovider supplies interfaces and implementations for -// docker registry providers to expose their authentication scheme. -package credentialprovider diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go deleted file mode 100644 index ed712ccfdc..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go +++ /dev/null @@ -1,338 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package credentialprovider - -import ( - "encoding/json" - "net" - "net/url" - "path/filepath" - "sort" - "strings" - - "github.com/golang/glog" - - dockertypes "github.com/docker/engine-api/types" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/util/sets" -) - -// DockerKeyring tracks a set of docker registry credentials, maintaining a -// reverse index across the registry endpoints. A registry endpoint is made -// up of a host (e.g. registry.example.com), but it may also contain a path -// (e.g. registry.example.com/foo) This index is important for two reasons: -// - registry endpoints may overlap, and when this happens we must find the -// most specific match for a given image -// - iterating a map does not yield predictable results -type DockerKeyring interface { - Lookup(image string) ([]LazyAuthConfiguration, bool) -} - -// BasicDockerKeyring is a trivial map-backed implementation of DockerKeyring -type BasicDockerKeyring struct { - index []string - creds map[string][]LazyAuthConfiguration -} - -// lazyDockerKeyring is an implementation of DockerKeyring that lazily -// materializes its dockercfg based on a set of dockerConfigProviders. -type lazyDockerKeyring struct { - Providers []DockerConfigProvider -} - -// LazyAuthConfiguration wraps dockertypes.AuthConfig, potentially deferring its -// binding. If Provider is non-nil, it will be used to obtain new credentials -// by calling LazyProvide() on it. -type LazyAuthConfiguration struct { - dockertypes.AuthConfig - Provider DockerConfigProvider -} - -func DockerConfigEntryToLazyAuthConfiguration(ident DockerConfigEntry) LazyAuthConfiguration { - return LazyAuthConfiguration{ - AuthConfig: dockertypes.AuthConfig{ - Username: ident.Username, - Password: ident.Password, - Email: ident.Email, - }, - } -} - -func (dk *BasicDockerKeyring) Add(cfg DockerConfig) { - if dk.index == nil { - dk.index = make([]string, 0) - dk.creds = make(map[string][]LazyAuthConfiguration) - } - for loc, ident := range cfg { - - var creds LazyAuthConfiguration - if ident.Provider != nil { - creds = LazyAuthConfiguration{ - Provider: ident.Provider, - } - } else { - creds = DockerConfigEntryToLazyAuthConfiguration(ident) - } - - value := loc - if !strings.HasPrefix(value, "https://") && !strings.HasPrefix(value, "http://") { - value = "https://" + value - } - parsed, err := url.Parse(value) - if err != nil { - glog.Errorf("Entry %q in dockercfg invalid (%v), ignoring", loc, err) - continue - } - - // The docker client allows exact matches: - // foo.bar.com/namespace - // Or hostname matches: - // foo.bar.com - // It also considers /v2/ and /v1/ equivalent to the hostname - // See ResolveAuthConfig in docker/registry/auth.go. - effectivePath := parsed.Path - if strings.HasPrefix(effectivePath, "/v2/") || strings.HasPrefix(effectivePath, "/v1/") { - effectivePath = effectivePath[3:] - } - var key string - if (len(effectivePath) > 0) && (effectivePath != "/") { - key = parsed.Host + effectivePath - } else { - key = parsed.Host - } - dk.creds[key] = append(dk.creds[key], creds) - dk.index = append(dk.index, key) - } - - eliminateDupes := sets.NewString(dk.index...) - dk.index = eliminateDupes.List() - - // Update the index used to identify which credentials to use for a given - // image. The index is reverse-sorted so more specific paths are matched - // first. For example, if for the given image "quay.io/coreos/etcd", - // credentials for "quay.io/coreos" should match before "quay.io". - sort.Sort(sort.Reverse(sort.StringSlice(dk.index))) -} - -const ( - defaultRegistryHost = "index.docker.io" - defaultRegistryKey = defaultRegistryHost + "/v1/" -) - -// isDefaultRegistryMatch determines whether the given image will -// pull from the default registry (DockerHub) based on the -// characteristics of its name. -func isDefaultRegistryMatch(image string) bool { - parts := strings.SplitN(image, "/", 2) - - if len(parts[0]) == 0 { - return false - } - - if len(parts) == 1 { - // e.g. library/ubuntu - return true - } - - if parts[0] == "docker.io" || parts[0] == "index.docker.io" { - // resolve docker.io/image and index.docker.io/image as default registry - return true - } - - // From: http://blog.docker.com/2013/07/how-to-use-your-own-registry/ - // Docker looks for either a “.” (domain separator) or “:” (port separator) - // to learn that the first part of the repository name is a location and not - // a user name. - return !strings.ContainsAny(parts[0], ".:") -} - -// url.Parse require a scheme, but ours don't have schemes. Adding a -// scheme to make url.Parse happy, then clear out the resulting scheme. -func parseSchemelessUrl(schemelessUrl string) (*url.URL, error) { - parsed, err := url.Parse("https://" + schemelessUrl) - if err != nil { - return nil, err - } - // clear out the resulting scheme - parsed.Scheme = "" - return parsed, nil -} - -// split the host name into parts, as well as the port -func splitUrl(url *url.URL) (parts []string, port string) { - host, port, err := net.SplitHostPort(url.Host) - if err != nil { - // could not parse port - host, port = url.Host, "" - } - return strings.Split(host, "."), port -} - -// overloaded version of urlsMatch, operating on strings instead of URLs. -func urlsMatchStr(glob string, target string) (bool, error) { - globUrl, err := parseSchemelessUrl(glob) - if err != nil { - return false, err - } - targetUrl, err := parseSchemelessUrl(target) - if err != nil { - return false, err - } - return urlsMatch(globUrl, targetUrl) -} - -// check whether the given target url matches the glob url, which may have -// glob wild cards in the host name. -// -// Examples: -// globUrl=*.docker.io, targetUrl=blah.docker.io => match -// globUrl=*.docker.io, targetUrl=not.right.io => no match -// -// Note that we don't support wildcards in ports and paths yet. -func urlsMatch(globUrl *url.URL, targetUrl *url.URL) (bool, error) { - globUrlParts, globPort := splitUrl(globUrl) - targetUrlParts, targetPort := splitUrl(targetUrl) - if globPort != targetPort { - // port doesn't match - return false, nil - } - if len(globUrlParts) != len(targetUrlParts) { - // host name does not have the same number of parts - return false, nil - } - if !strings.HasPrefix(targetUrl.Path, globUrl.Path) { - // the path of the credential must be a prefix - return false, nil - } - for k, globUrlPart := range globUrlParts { - targetUrlPart := targetUrlParts[k] - matched, err := filepath.Match(globUrlPart, targetUrlPart) - if err != nil { - return false, err - } - if !matched { - // glob mismatch for some part - return false, nil - } - } - // everything matches - return true, nil -} - -// Lookup implements the DockerKeyring method for fetching credentials based on image name. -// Multiple credentials may be returned if there are multiple potentially valid credentials -// available. This allows for rotation. -func (dk *BasicDockerKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool) { - // range over the index as iterating over a map does not provide a predictable ordering - ret := []LazyAuthConfiguration{} - for _, k := range dk.index { - // both k and image are schemeless URLs because even though schemes are allowed - // in the credential configurations, we remove them in Add. - if matched, _ := urlsMatchStr(k, image); !matched { - continue - } - - ret = append(ret, dk.creds[k]...) - } - - if len(ret) > 0 { - return ret, true - } - - // Use credentials for the default registry if provided, and appropriate - if isDefaultRegistryMatch(image) { - if auth, ok := dk.creds[defaultRegistryHost]; ok { - return auth, true - } - } - - return []LazyAuthConfiguration{}, false -} - -// Lookup implements the DockerKeyring method for fetching credentials -// based on image name. -func (dk *lazyDockerKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool) { - keyring := &BasicDockerKeyring{} - - for _, p := range dk.Providers { - keyring.Add(p.Provide()) - } - - return keyring.Lookup(image) -} - -type FakeKeyring struct { - auth []LazyAuthConfiguration - ok bool -} - -func (f *FakeKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool) { - return f.auth, f.ok -} - -// unionDockerKeyring delegates to a set of keyrings. -type unionDockerKeyring struct { - keyrings []DockerKeyring -} - -func (k *unionDockerKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool) { - authConfigs := []LazyAuthConfiguration{} - for _, subKeyring := range k.keyrings { - if subKeyring == nil { - continue - } - - currAuthResults, _ := subKeyring.Lookup(image) - authConfigs = append(authConfigs, currAuthResults...) - } - - return authConfigs, (len(authConfigs) > 0) -} - -// MakeDockerKeyring inspects the passedSecrets to see if they contain any DockerConfig secrets. If they do, -// then a DockerKeyring is built based on every hit and unioned with the defaultKeyring. -// If they do not, then the default keyring is returned -func MakeDockerKeyring(passedSecrets []api.Secret, defaultKeyring DockerKeyring) (DockerKeyring, error) { - passedCredentials := []DockerConfig{} - for _, passedSecret := range passedSecrets { - if dockerConfigJsonBytes, dockerConfigJsonExists := passedSecret.Data[api.DockerConfigJsonKey]; (passedSecret.Type == api.SecretTypeDockerConfigJson) && dockerConfigJsonExists && (len(dockerConfigJsonBytes) > 0) { - dockerConfigJson := DockerConfigJson{} - if err := json.Unmarshal(dockerConfigJsonBytes, &dockerConfigJson); err != nil { - return nil, err - } - - passedCredentials = append(passedCredentials, dockerConfigJson.Auths) - } else if dockercfgBytes, dockercfgExists := passedSecret.Data[api.DockerConfigKey]; (passedSecret.Type == api.SecretTypeDockercfg) && dockercfgExists && (len(dockercfgBytes) > 0) { - dockercfg := DockerConfig{} - if err := json.Unmarshal(dockercfgBytes, &dockercfg); err != nil { - return nil, err - } - - passedCredentials = append(passedCredentials, dockercfg) - } - } - - if len(passedCredentials) > 0 { - basicKeyring := &BasicDockerKeyring{} - for _, currCredentials := range passedCredentials { - basicKeyring.Add(currCredentials) - } - return &unionDockerKeyring{[]DockerKeyring{basicKeyring, defaultKeyring}}, nil - } - - return defaultKeyring, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugins.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugins.go deleted file mode 100644 index 76c2e72490..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugins.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package credentialprovider - -import ( - "sync" - - "github.com/golang/glog" -) - -// All registered credential providers. -var providersMutex sync.Mutex -var providers = make(map[string]DockerConfigProvider) - -// RegisterCredentialProvider is called by provider implementations on -// initialization to register themselves, like so: -// func init() { -// RegisterCredentialProvider("name", &myProvider{...}) -// } -func RegisterCredentialProvider(name string, provider DockerConfigProvider) { - providersMutex.Lock() - defer providersMutex.Unlock() - _, found := providers[name] - if found { - glog.Fatalf("Credential provider %q was registered twice", name) - } - glog.V(4).Infof("Registered credential provider %q", name) - providers[name] = provider -} - -// NewDockerKeyring creates a DockerKeyring to use for resolving credentials, -// which lazily draws from the set of registered credential providers. -func NewDockerKeyring() DockerKeyring { - keyring := &lazyDockerKeyring{ - Providers: make([]DockerConfigProvider, 0), - } - - // TODO(mattmoor): iterating over the map is non-deterministic. We should - // introduce the notion of priorities for conflict resolution. - for name, provider := range providers { - if provider.Enabled() { - glog.V(4).Infof("Registering credential provider: %v", name) - keyring.Providers = append(keyring.Providers, provider) - } - } - - return keyring -} diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go deleted file mode 100644 index cb93bd7fb2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package credentialprovider - -import ( - "os" - "reflect" - "sync" - "time" - - dockertypes "github.com/docker/engine-api/types" - "github.com/golang/glog" -) - -// DockerConfigProvider is the interface that registered extensions implement -// to materialize 'dockercfg' credentials. -type DockerConfigProvider interface { - // Enabled returns true if the config provider is enabled. - // Implementations can be blocking - e.g. metadata server unavailable. - Enabled() bool - // Provide returns docker configuration. - // Implementations can be blocking - e.g. metadata server unavailable. - Provide() DockerConfig - // LazyProvide() gets called after URL matches have been performed, so the - // location used as the key in DockerConfig would be redundant. - LazyProvide() *DockerConfigEntry -} - -func LazyProvide(creds LazyAuthConfiguration) dockertypes.AuthConfig { - if creds.Provider != nil { - entry := *creds.Provider.LazyProvide() - return DockerConfigEntryToLazyAuthConfiguration(entry).AuthConfig - } else { - return creds.AuthConfig - } - -} - -// A DockerConfigProvider that simply reads the .dockercfg file -type defaultDockerConfigProvider struct{} - -// init registers our default provider, which simply reads the .dockercfg file. -func init() { - RegisterCredentialProvider(".dockercfg", - &CachingDockerConfigProvider{ - Provider: &defaultDockerConfigProvider{}, - Lifetime: 5 * time.Minute, - }) -} - -// CachingDockerConfigProvider implements DockerConfigProvider by composing -// with another DockerConfigProvider and caching the DockerConfig it provides -// for a pre-specified lifetime. -type CachingDockerConfigProvider struct { - Provider DockerConfigProvider - Lifetime time.Duration - - // cache fields - cacheDockerConfig DockerConfig - expiration time.Time - mu sync.Mutex -} - -// Enabled implements dockerConfigProvider -func (d *defaultDockerConfigProvider) Enabled() bool { - return true -} - -// Provide implements dockerConfigProvider -func (d *defaultDockerConfigProvider) Provide() DockerConfig { - // Read the standard Docker credentials from .dockercfg - if cfg, err := ReadDockerConfigFile(); err == nil { - return cfg - } else if !os.IsNotExist(err) { - glog.V(4).Infof("Unable to parse Docker config file: %v", err) - } - return DockerConfig{} -} - -// LazyProvide implements dockerConfigProvider. Should never be called. -func (d *defaultDockerConfigProvider) LazyProvide() *DockerConfigEntry { - return nil -} - -// Enabled implements dockerConfigProvider -func (d *CachingDockerConfigProvider) Enabled() bool { - return d.Provider.Enabled() -} - -// LazyProvide implements dockerConfigProvider. Should never be called. -func (d *CachingDockerConfigProvider) LazyProvide() *DockerConfigEntry { - return nil -} - -// Provide implements dockerConfigProvider -func (d *CachingDockerConfigProvider) Provide() DockerConfig { - d.mu.Lock() - defer d.mu.Unlock() - - // If the cache hasn't expired, return our cache - if time.Now().Before(d.expiration) { - return d.cacheDockerConfig - } - - glog.V(2).Infof("Refreshing cache for provider: %v", reflect.TypeOf(d.Provider).String()) - d.cacheDockerConfig = d.Provider.Provide() - d.expiration = time.Now().Add(d.Lifetime) - return d.cacheDockerConfig -} diff --git a/vendor/k8s.io/kubernetes/pkg/fieldpath/BUILD b/vendor/k8s.io/kubernetes/pkg/fieldpath/BUILD deleted file mode 100644 index e621102ae6..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/fieldpath/BUILD +++ /dev/null @@ -1,37 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fieldpath.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/resource:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["fieldpath_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/resource:go_default_library", - "//vendor:github.com/stretchr/testify/assert", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go b/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go deleted file mode 100644 index 83cbdce0c8..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package fieldpath supplies methods for extracting fields from objects -// given a path to a field. -package fieldpath diff --git a/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go b/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go deleted file mode 100644 index 08460c00ee..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fieldpath - -import ( - "fmt" - "math" - "strconv" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/resource" -) - -// formatMap formats map[string]string to a string. -func FormatMap(m map[string]string) (fmtStr string) { - for key, value := range m { - fmtStr += fmt.Sprintf("%v=%q\n", key, value) - } - fmtStr = strings.TrimSuffix(fmtStr, "\n") - - return -} - -// ExtractFieldPathAsString extracts the field from the given object -// and returns it as a string. The object must be a pointer to an -// API type. -// -// Currently, this API is limited to supporting the fieldpaths: -// -// 1. metadata.name - The name of an API object -// 2. metadata.namespace - The namespace of an API object -func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) { - accessor, err := meta.Accessor(obj) - if err != nil { - return "", nil - } - - switch fieldPath { - case "metadata.annotations": - return FormatMap(accessor.GetAnnotations()), nil - case "metadata.labels": - return FormatMap(accessor.GetLabels()), nil - case "metadata.name": - return accessor.GetName(), nil - case "metadata.namespace": - return accessor.GetNamespace(), nil - } - - return "", fmt.Errorf("Unsupported fieldPath: %v", fieldPath) -} - -// ExtractResourceValueByContainerName extracts the value of a resource -// by providing container name -func ExtractResourceValueByContainerName(fs *api.ResourceFieldSelector, pod *api.Pod, containerName string) (string, error) { - container, err := findContainerInPod(pod, containerName) - if err != nil { - return "", err - } - return ExtractContainerResourceValue(fs, container) -} - -// ExtractResourceValueByContainerNameAndNodeAllocatable extracts the value of a resource -// by providing container name and node allocatable -func ExtractResourceValueByContainerNameAndNodeAllocatable(fs *api.ResourceFieldSelector, pod *api.Pod, containerName string, nodeAllocatable api.ResourceList) (string, error) { - realContainer, err := findContainerInPod(pod, containerName) - if err != nil { - return "", err - } - - containerCopy, err := api.Scheme.DeepCopy(realContainer) - if err != nil { - return "", fmt.Errorf("failed to perform a deep copy of container object: %v", err) - } - - container, ok := containerCopy.(*api.Container) - if !ok { - return "", fmt.Errorf("unexpected type returned from deep copy of container object") - } - - MergeContainerResourceLimits(container, nodeAllocatable) - - return ExtractContainerResourceValue(fs, container) -} - -// ExtractContainerResourceValue extracts the value of a resource -// in an already known container -func ExtractContainerResourceValue(fs *api.ResourceFieldSelector, container *api.Container) (string, error) { - divisor := resource.Quantity{} - if divisor.Cmp(fs.Divisor) == 0 { - divisor = resource.MustParse("1") - } else { - divisor = fs.Divisor - } - - switch fs.Resource { - case "limits.cpu": - return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor) - case "limits.memory": - return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor) - case "requests.cpu": - return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor) - case "requests.memory": - return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor) - } - - return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource) -} - -// findContainerInPod finds a container by its name in the provided pod -func findContainerInPod(pod *api.Pod, containerName string) (*api.Container, error) { - for _, container := range pod.Spec.Containers { - if container.Name == containerName { - return &container, nil - } - } - return nil, fmt.Errorf("container %s not found", containerName) -} - -// convertResourceCPUTOString converts cpu value to the format of divisor and returns -// ceiling of the value. -func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) { - c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue()))) - return strconv.FormatInt(c, 10), nil -} - -// convertResourceMemoryToString converts memory value to the format of divisor and returns -// ceiling of the value. -func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) { - m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value()))) - return strconv.FormatInt(m, 10), nil -} - -// MergeContainerResourceLimits checks if a limit is applied for -// the container, and if not, it sets the limit to the passed resource list. -func MergeContainerResourceLimits(container *api.Container, - allocatable api.ResourceList) { - if container.Resources.Limits == nil { - container.Resources.Limits = make(api.ResourceList) - } - for _, resource := range []api.ResourceName{api.ResourceCPU, api.ResourceMemory} { - if quantity, exists := container.Resources.Limits[resource]; !exists || quantity.IsZero() { - if cap, exists := allocatable[resource]; exists { - container.Resources.Limits[resource] = *cap.Copy() - } - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/fields/BUILD b/vendor/k8s.io/kubernetes/pkg/fields/BUILD deleted file mode 100644 index b76afe5429..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/fields/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fields.go", - "requirements.go", - "selector.go", - ], - tags = ["automanaged"], - deps = ["//pkg/selection:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = [ - "fields_test.go", - "selector_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/fields/selector.go b/vendor/k8s.io/kubernetes/pkg/fields/selector.go deleted file mode 100644 index 75161daf1d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/fields/selector.go +++ /dev/null @@ -1,278 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fields - -import ( - "fmt" - "sort" - "strings" - - "k8s.io/kubernetes/pkg/selection" -) - -// Selector represents a field selector. -type Selector interface { - // Matches returns true if this selector matches the given set of fields. - Matches(Fields) bool - - // Empty returns true if this selector does not restrict the selection space. - Empty() bool - - // RequiresExactMatch allows a caller to introspect whether a given selector - // requires a single specific field to be set, and if so returns the value it - // requires. - RequiresExactMatch(field string) (value string, found bool) - - // Transform returns a new copy of the selector after TransformFunc has been - // applied to the entire selector, or an error if fn returns an error. - Transform(fn TransformFunc) (Selector, error) - - // Requirements converts this interface to Requirements to expose - // more detailed selection information. - Requirements() Requirements - - // String returns a human readable string that represents this selector. - String() string -} - -// Everything returns a selector that matches all fields. -func Everything() Selector { - return andTerm{} -} - -type hasTerm struct { - field, value string -} - -func (t *hasTerm) Matches(ls Fields) bool { - return ls.Get(t.field) == t.value -} - -func (t *hasTerm) Empty() bool { - return false -} - -func (t *hasTerm) RequiresExactMatch(field string) (value string, found bool) { - if t.field == field { - return t.value, true - } - return "", false -} - -func (t *hasTerm) Transform(fn TransformFunc) (Selector, error) { - field, value, err := fn(t.field, t.value) - if err != nil { - return nil, err - } - return &hasTerm{field, value}, nil -} - -func (t *hasTerm) Requirements() Requirements { - return []Requirement{{ - Field: t.field, - Operator: selection.Equals, - Value: t.value, - }} -} - -func (t *hasTerm) String() string { - return fmt.Sprintf("%v=%v", t.field, t.value) -} - -type notHasTerm struct { - field, value string -} - -func (t *notHasTerm) Matches(ls Fields) bool { - return ls.Get(t.field) != t.value -} - -func (t *notHasTerm) Empty() bool { - return false -} - -func (t *notHasTerm) RequiresExactMatch(field string) (value string, found bool) { - return "", false -} - -func (t *notHasTerm) Transform(fn TransformFunc) (Selector, error) { - field, value, err := fn(t.field, t.value) - if err != nil { - return nil, err - } - return ¬HasTerm{field, value}, nil -} - -func (t *notHasTerm) Requirements() Requirements { - return []Requirement{{ - Field: t.field, - Operator: selection.NotEquals, - Value: t.value, - }} -} - -func (t *notHasTerm) String() string { - return fmt.Sprintf("%v!=%v", t.field, t.value) -} - -type andTerm []Selector - -func (t andTerm) Matches(ls Fields) bool { - for _, q := range t { - if !q.Matches(ls) { - return false - } - } - return true -} - -func (t andTerm) Empty() bool { - if t == nil { - return true - } - if len([]Selector(t)) == 0 { - return true - } - for i := range t { - if !t[i].Empty() { - return false - } - } - return true -} - -func (t andTerm) RequiresExactMatch(field string) (string, bool) { - if t == nil || len([]Selector(t)) == 0 { - return "", false - } - for i := range t { - if value, found := t[i].RequiresExactMatch(field); found { - return value, found - } - } - return "", false -} - -func (t andTerm) Transform(fn TransformFunc) (Selector, error) { - next := make([]Selector, len([]Selector(t))) - for i, s := range []Selector(t) { - n, err := s.Transform(fn) - if err != nil { - return nil, err - } - next[i] = n - } - return andTerm(next), nil -} - -func (t andTerm) Requirements() Requirements { - reqs := make([]Requirement, 0, len(t)) - for _, s := range []Selector(t) { - rs := s.Requirements() - reqs = append(reqs, rs...) - } - return reqs -} - -func (t andTerm) String() string { - var terms []string - for _, q := range t { - terms = append(terms, q.String()) - } - return strings.Join(terms, ",") -} - -// SelectorFromSet returns a Selector which will match exactly the given Set. A -// nil Set is considered equivalent to Everything(). -func SelectorFromSet(ls Set) Selector { - if ls == nil { - return Everything() - } - items := make([]Selector, 0, len(ls)) - for field, value := range ls { - items = append(items, &hasTerm{field: field, value: value}) - } - if len(items) == 1 { - return items[0] - } - return andTerm(items) -} - -// ParseSelectorOrDie takes a string representing a selector and returns an -// object suitable for matching, or panic when an error occur. -func ParseSelectorOrDie(s string) Selector { - selector, err := ParseSelector(s) - if err != nil { - panic(err) - } - return selector -} - -// ParseSelector takes a string representing a selector and returns an -// object suitable for matching, or an error. -func ParseSelector(selector string) (Selector, error) { - return parseSelector(selector, - func(lhs, rhs string) (newLhs, newRhs string, err error) { - return lhs, rhs, nil - }) -} - -// Parses the selector and runs them through the given TransformFunc. -func ParseAndTransformSelector(selector string, fn TransformFunc) (Selector, error) { - return parseSelector(selector, fn) -} - -// Function to transform selectors. -type TransformFunc func(field, value string) (newField, newValue string, err error) - -func try(selectorPiece, op string) (lhs, rhs string, ok bool) { - pieces := strings.Split(selectorPiece, op) - if len(pieces) == 2 { - return pieces[0], pieces[1], true - } - return "", "", false -} - -func parseSelector(selector string, fn TransformFunc) (Selector, error) { - parts := strings.Split(selector, ",") - sort.StringSlice(parts).Sort() - var items []Selector - for _, part := range parts { - if part == "" { - continue - } - if lhs, rhs, ok := try(part, "!="); ok { - items = append(items, ¬HasTerm{field: lhs, value: rhs}) - } else if lhs, rhs, ok := try(part, "=="); ok { - items = append(items, &hasTerm{field: lhs, value: rhs}) - } else if lhs, rhs, ok := try(part, "="); ok { - items = append(items, &hasTerm{field: lhs, value: rhs}) - } else { - return nil, fmt.Errorf("invalid selector: '%s'; can't understand '%s'", selector, part) - } - } - if len(items) == 1 { - return items[0].Transform(fn) - } - return andTerm(items).Transform(fn) -} - -// OneTermEqualSelector returns an object that matches objects where one field/field equals one value. -// Cannot return an error. -func OneTermEqualSelector(k, v string) Selector { - return &hasTerm{field: k, value: v} -} diff --git a/vendor/k8s.io/kubernetes/pkg/genericapiserver/openapi/common/BUILD b/vendor/k8s.io/kubernetes/pkg/genericapiserver/openapi/common/BUILD deleted file mode 100644 index d9f5a68f2b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/genericapiserver/openapi/common/BUILD +++ /dev/null @@ -1,24 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "common.go", - "doc.go", - ], - tags = ["automanaged"], - deps = [ - "//vendor:github.com/emicklei/go-restful", - "//vendor:github.com/go-openapi/spec", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/genericapiserver/openapi/common/doc.go b/vendor/k8s.io/kubernetes/pkg/genericapiserver/openapi/common/doc.go deleted file mode 100644 index 0308d58b4d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/genericapiserver/openapi/common/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// package common holds shared codes and types between open API code generator and spec generator. -package common diff --git a/vendor/k8s.io/kubernetes/pkg/healthz/BUILD b/vendor/k8s.io/kubernetes/pkg/healthz/BUILD deleted file mode 100644 index a24ee08d83..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/healthz/BUILD +++ /dev/null @@ -1,28 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "healthz.go", - ], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = ["healthz_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/healthz/doc.go b/vendor/k8s.io/kubernetes/pkg/healthz/doc.go deleted file mode 100644 index 28980e5d96..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/healthz/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package healthz implements basic http server health checking. -// Usage: -// import "k8s.io/kubernetes/pkg/healthz" -// healthz.DefaultHealthz() -package healthz diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD deleted file mode 100644 index cc112dd3ef..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD +++ /dev/null @@ -1,179 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "apply.go", - "autoscale.go", - "bash_comp_utils.go", - "cluster.go", - "configmap.go", - "custom_column_printer.go", - "deployment.go", - "describe.go", - "doc.go", - "explain.go", - "generate.go", - "history.go", - "interfaces.go", - "kubectl.go", - "namespace.go", - "proxy_server.go", - "quota.go", - "resource_filter.go", - "resource_printer.go", - "rollback.go", - "rolling_updater.go", - "rollout_status.go", - "run.go", - "scale.go", - "secret.go", - "secret_for_docker_registry.go", - "secret_for_tls.go", - "service.go", - "service_basic.go", - "serviceaccount.go", - "sorted_resource_name_list.go", - "sorting_printer.go", - "stop.go", - ], - tags = ["automanaged"], - deps = [ - "//federation/apis/federation:go_default_library", - "//federation/apis/federation/v1beta1:go_default_library", - "//federation/client/clientset_generated/federation_internalclientset:go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/annotations:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/events:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/util:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/apis/apps:go_default_library", - "//pkg/apis/autoscaling:go_default_library", - "//pkg/apis/batch:go_default_library", - "//pkg/apis/batch/v1:go_default_library", - "//pkg/apis/batch/v2alpha1:go_default_library", - "//pkg/apis/certificates:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/apis/policy:go_default_library", - "//pkg/apis/rbac:go_default_library", - "//pkg/apis/storage:go_default_library", - "//pkg/apis/storage/util:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/apps/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/retry:go_default_library", - "//pkg/client/unversioned:go_default_library", - "//pkg/controller/deployment/util:go_default_library", - "//pkg/credentialprovider:go_default_library", - "//pkg/fieldpath:go_default_library", - "//pkg/fields:go_default_library", - "//pkg/kubectl/resource:go_default_library", - "//pkg/kubelet/qos:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//pkg/util:go_default_library", - "//pkg/util/cert:go_default_library", - "//pkg/util/errors:go_default_library", - "//pkg/util/integer:go_default_library", - "//pkg/util/intstr:go_default_library", - "//pkg/util/jsonpath:go_default_library", - "//pkg/util/node:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/util/slice:go_default_library", - "//pkg/util/uuid:go_default_library", - "//pkg/util/validation:go_default_library", - "//pkg/util/wait:go_default_library", - "//pkg/watch:go_default_library", - "//vendor:github.com/emicklei/go-restful/swagger", - "//vendor:github.com/ghodss/yaml", - "//vendor:github.com/golang/glog", - "//vendor:github.com/spf13/cobra", - "//vendor:github.com/spf13/pflag", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "cluster_test.go", - "configmap_test.go", - "custom_column_printer_test.go", - "deployment_test.go", - "describe_test.go", - "generate_test.go", - "kubectl_test.go", - "namespace_test.go", - "proxy_server_test.go", - "quota_test.go", - "resource_printer_test.go", - "rolling_updater_test.go", - "rollout_status_test.go", - "run_test.go", - "scale_test.go", - "secret_for_docker_registry_test.go", - "secret_for_tls_test.go", - "secret_test.go", - "service_basic_test.go", - "service_test.go", - "serviceaccount_test.go", - "sorted_resource_name_list_test.go", - "sorting_printer_test.go", - "stop_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//federation/apis/federation:go_default_library", - "//federation/apis/federation/v1beta1:go_default_library", - "//federation/client/clientset_generated/federation_internalclientset/fake:go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/api/testing:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/batch:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/apis/policy:go_default_library", - "//pkg/apis/storage:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/restclient/fake:go_default_library", - "//pkg/client/testing/core:go_default_library", - "//pkg/controller/deployment/util:go_default_library", - "//pkg/kubectl/testing:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer/yaml:go_default_library", - "//pkg/util/diff:go_default_library", - "//pkg/util/intstr:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/util/testing:go_default_library", - "//pkg/watch:go_default_library", - "//vendor:github.com/ghodss/yaml", - "//vendor:github.com/spf13/cobra", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/OWNERS b/vendor/k8s.io/kubernetes/pkg/kubectl/OWNERS deleted file mode 100644 index ce699e9679..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -approvers: -- sig-cli-maintainers -reviewers: -- sig-cli diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go deleted file mode 100644 index 6dd4f6bc32..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go +++ /dev/null @@ -1,190 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "encoding/json" - - "k8s.io/kubernetes/pkg/api/annotations" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/kubectl/resource" - "k8s.io/kubernetes/pkg/runtime" -) - -// GetOriginalConfiguration retrieves the original configuration of the object -// from the annotation, or nil if no annotation was found. -func GetOriginalConfiguration(mapping *meta.RESTMapping, obj runtime.Object) ([]byte, error) { - annots, err := mapping.MetadataAccessor.Annotations(obj) - if err != nil { - return nil, err - } - - if annots == nil { - return nil, nil - } - - original, ok := annots[annotations.LastAppliedConfigAnnotation] - if !ok { - return nil, nil - } - - return []byte(original), nil -} - -// SetOriginalConfiguration sets the original configuration of the object -// as the annotation on the object for later use in computing a three way patch. -func SetOriginalConfiguration(info *resource.Info, original []byte) error { - if len(original) < 1 { - return nil - } - - accessor := info.Mapping.MetadataAccessor - annots, err := accessor.Annotations(info.Object) - if err != nil { - return err - } - - if annots == nil { - annots = map[string]string{} - } - - annots[annotations.LastAppliedConfigAnnotation] = string(original) - if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annots); err != nil { - return err - } - - return nil -} - -// GetModifiedConfiguration retrieves the modified configuration of the object. -// If annotate is true, it embeds the result as an annotation in the modified -// configuration. If an object was read from the command input, it will use that -// version of the object. Otherwise, it will use the version from the server. -func GetModifiedConfiguration(info *resource.Info, annotate bool, codec runtime.Encoder) ([]byte, error) { - // First serialize the object without the annotation to prevent recursion, - // then add that serialization to it as the annotation and serialize it again. - var modified []byte - if info.VersionedObject != nil { - // If an object was read from input, use that version. - accessor, err := meta.Accessor(info.VersionedObject) - if err != nil { - return nil, err - } - - // Get the current annotations from the object. - annots := accessor.GetAnnotations() - if annots == nil { - annots = map[string]string{} - } - - original := annots[annotations.LastAppliedConfigAnnotation] - delete(annots, annotations.LastAppliedConfigAnnotation) - accessor.SetAnnotations(annots) - // TODO: this needs to be abstracted - there should be no assumption that versioned object - // can be marshalled to JSON. - modified, err = json.Marshal(info.VersionedObject) - if err != nil { - return nil, err - } - - if annotate { - annots[annotations.LastAppliedConfigAnnotation] = string(modified) - accessor.SetAnnotations(annots) - // TODO: this needs to be abstracted - there should be no assumption that versioned object - // can be marshalled to JSON. - modified, err = json.Marshal(info.VersionedObject) - if err != nil { - return nil, err - } - } - - // Restore the object to its original condition. - annots[annotations.LastAppliedConfigAnnotation] = original - accessor.SetAnnotations(annots) - } else { - // Otherwise, use the server side version of the object. - accessor := info.Mapping.MetadataAccessor - // Get the current annotations from the object. - annots, err := accessor.Annotations(info.Object) - if err != nil { - return nil, err - } - - if annots == nil { - annots = map[string]string{} - } - - original := annots[annotations.LastAppliedConfigAnnotation] - delete(annots, annotations.LastAppliedConfigAnnotation) - if err := accessor.SetAnnotations(info.Object, annots); err != nil { - return nil, err - } - - modified, err = runtime.Encode(codec, info.Object) - if err != nil { - return nil, err - } - - if annotate { - annots[annotations.LastAppliedConfigAnnotation] = string(modified) - if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annots); err != nil { - return nil, err - } - - modified, err = runtime.Encode(codec, info.Object) - if err != nil { - return nil, err - } - } - - // Restore the object to its original condition. - annots[annotations.LastAppliedConfigAnnotation] = original - if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annots); err != nil { - return nil, err - } - } - - return modified, nil -} - -// UpdateApplyAnnotation calls CreateApplyAnnotation if the last applied -// configuration annotation is already present. Otherwise, it does nothing. -func UpdateApplyAnnotation(info *resource.Info, codec runtime.Encoder) error { - if original, err := GetOriginalConfiguration(info.Mapping, info.Object); err != nil || len(original) <= 0 { - return err - } - return CreateApplyAnnotation(info, codec) -} - -// CreateApplyAnnotation gets the modified configuration of the object, -// without embedding it again, and then sets it on the object as the annotation. -func CreateApplyAnnotation(info *resource.Info, codec runtime.Encoder) error { - modified, err := GetModifiedConfiguration(info, false, codec) - if err != nil { - return err - } - return SetOriginalConfiguration(info, modified) -} - -// Create the annotation used by kubectl apply only when createAnnotation is true -// Otherwise, only update the annotation when it already exists -func CreateOrUpdateAnnotation(createAnnotation bool, info *resource.Info, codec runtime.Encoder) error { - if createAnnotation { - return CreateApplyAnnotation(info, codec) - } - return UpdateApplyAnnotation(info, codec) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go b/vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go deleted file mode 100644 index 6e5f5a24e6..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strconv" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/runtime" -) - -type HorizontalPodAutoscalerV1Beta1 struct{} - -func (HorizontalPodAutoscalerV1Beta1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"default-name", true}, - {"name", false}, - {"scaleRef-kind", false}, - {"scaleRef-name", false}, - {"scaleRef-apiVersion", false}, - {"scaleRef-subresource", false}, - {"min", false}, - {"max", true}, - {"cpu-percent", false}, - } -} - -func (HorizontalPodAutoscalerV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - return generateHPA(genericParams) -} - -type HorizontalPodAutoscalerV1 struct{} - -func (HorizontalPodAutoscalerV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"default-name", true}, - {"name", false}, - {"scaleRef-kind", false}, - {"scaleRef-name", false}, - {"scaleRef-apiVersion", false}, - {"min", false}, - {"max", true}, - {"cpu-percent", false}, - } -} - -func (HorizontalPodAutoscalerV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - return generateHPA(genericParams) -} - -func generateHPA(genericParams map[string]interface{}) (runtime.Object, error) { - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - - name, found := params["name"] - if !found || len(name) == 0 { - name, found = params["default-name"] - if !found || len(name) == 0 { - return nil, fmt.Errorf("'name' is a required parameter.") - } - } - minString, found := params["min"] - min := -1 - var err error - if found { - if min, err = strconv.Atoi(minString); err != nil { - return nil, err - } - } - maxString, found := params["max"] - if !found { - return nil, fmt.Errorf("'max' is a required parameter.") - } - max, err := strconv.Atoi(maxString) - if err != nil { - return nil, err - } - cpuString, found := params["cpu-percent"] - cpu := -1 - if found { - if cpu, err = strconv.Atoi(cpuString); err != nil { - return nil, err - } - } - - scaler := autoscaling.HorizontalPodAutoscaler{ - ObjectMeta: api.ObjectMeta{ - Name: name, - }, - Spec: autoscaling.HorizontalPodAutoscalerSpec{ - ScaleTargetRef: autoscaling.CrossVersionObjectReference{ - Kind: params["scaleRef-kind"], - Name: params["scaleRef-name"], - APIVersion: params["scaleRef-apiVersion"], - }, - MaxReplicas: int32(max), - }, - } - if min > 0 { - v := int32(min) - scaler.Spec.MinReplicas = &v - } - if cpu >= 0 { - c := int32(cpu) - scaler.Spec.TargetCPUUtilizationPercentage = &c - } - return &scaler, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/bash_comp_utils.go b/vendor/k8s.io/kubernetes/pkg/kubectl/bash_comp_utils.go deleted file mode 100644 index 346200276f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/bash_comp_utils.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// A set of common functions needed by cmd/kubectl and pkg/kubectl packages. - -package kubectl - -import ( - "strings" - - "github.com/spf13/cobra" - - "k8s.io/kubernetes/pkg/kubectl/resource" -) - -func AddJsonFilenameFlag(cmd *cobra.Command, value *[]string, usage string) { - cmd.Flags().StringSliceVarP(value, "filename", "f", *value, usage) - annotations := make([]string, 0, len(resource.FileExtensions)) - for _, ext := range resource.FileExtensions { - annotations = append(annotations, strings.TrimLeft(ext, ".")) - } - cmd.Flags().SetAnnotation("filename", cobra.BashCompFilenameExt, annotations) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cluster.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cluster.go deleted file mode 100644 index 895817826b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cluster.go +++ /dev/null @@ -1,125 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - - federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" -) - -// ClusterGeneratorV1Beta1 supports stable generation of a -// federation/cluster resource. -type ClusterGeneratorV1Beta1 struct { - // Name of the cluster context (required) - Name string - // ClientCIDR is the CIDR range in which the Kubernetes APIServer - // is available for the client (optional) - ClientCIDR string - // ServerAddress is the APIServer address of the Kubernetes cluster - // that is being registered (required) - ServerAddress string - // SecretName is the name of the secret that stores the credentials - // for the Kubernetes cluster that is being registered (optional) - SecretName string -} - -// Ensure it supports the generator pattern that uses parameter -// injection. -var _ Generator = &ClusterGeneratorV1Beta1{} - -// Ensure it supports the generator pattern that uses parameters -// specified during construction. -var _ StructuredGenerator = &ClusterGeneratorV1Beta1{} - -// Generate returns a cluster resource using the specified parameters. -func (s ClusterGeneratorV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) - if err != nil { - return nil, err - } - clustergen := &ClusterGeneratorV1Beta1{} - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - clustergen.Name = params["name"] - clustergen.ClientCIDR = params["client-cidr"] - clustergen.ServerAddress = params["server-address"] - clustergen.SecretName = params["secret"] - return clustergen.StructuredGenerate() -} - -// ParamNames returns the set of supported input parameters when using -// the parameter injection generator pattern. -func (s ClusterGeneratorV1Beta1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"client-cidr", false}, - {"server-address", true}, - {"secret", false}, - } -} - -// StructuredGenerate outputs a federation cluster resource object -// using the configured fields. -func (s ClusterGeneratorV1Beta1) StructuredGenerate() (runtime.Object, error) { - if err := s.validate(); err != nil { - return nil, err - } - if s.ClientCIDR == "" { - s.ClientCIDR = "0.0.0.0/0" - } - if s.SecretName == "" { - s.SecretName = s.Name - } - cluster := &federationapi.Cluster{ - ObjectMeta: v1.ObjectMeta{ - Name: s.Name, - }, - Spec: federationapi.ClusterSpec{ - ServerAddressByClientCIDRs: []federationapi.ServerAddressByClientCIDR{ - { - ClientCIDR: s.ClientCIDR, - ServerAddress: s.ServerAddress, - }, - }, - SecretRef: &v1.LocalObjectReference{ - Name: s.SecretName, - }, - }, - } - return cluster, nil -} - -// validate validates required fields are set to support structured -// generation. -func (s ClusterGeneratorV1Beta1) validate() error { - if len(s.Name) == 0 { - return fmt.Errorf("name must be specified") - } - if len(s.ServerAddress) == 0 { - return fmt.Errorf("server address must be specified") - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD deleted file mode 100644 index e567c8eb63..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD +++ /dev/null @@ -1,111 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "cached_discovery.go", - "clientcache.go", - "factory.go", - "helpers.go", - "printing.go", - "shortcut_restmapper.go", - ], - tags = ["automanaged"], - deps = [ - "//federation/apis/federation:go_default_library", - "//federation/client/clientset_generated/federation_internalclientset:go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/service:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/validation:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/apps:go_default_library", - "//pkg/apis/batch:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/typed/discovery:go_default_library", - "//pkg/client/typed/dynamic:go_default_library", - "//pkg/client/unversioned:go_default_library", - "//pkg/client/unversioned/clientcmd:go_default_library", - "//pkg/controller:go_default_library", - "//pkg/kubectl:go_default_library", - "//pkg/kubectl/resource:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/registry/extensions/thirdpartyresourcedata:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer/json:go_default_library", - "//pkg/util/errors:go_default_library", - "//pkg/util/exec:go_default_library", - "//pkg/util/flag:go_default_library", - "//pkg/util/homedir:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/util/strategicpatch:go_default_library", - "//pkg/version:go_default_library", - "//pkg/watch:go_default_library", - "//vendor:github.com/emicklei/go-restful/swagger", - "//vendor:github.com/evanphx/json-patch", - "//vendor:github.com/golang/glog", - "//vendor:github.com/spf13/cobra", - "//vendor:github.com/spf13/pflag", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "cached_discovery_test.go", - "factory_test.go", - "helpers_test.go", - "shortcut_restmapper_test.go", - ], - data = [ - "//api/swagger-spec", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/api/testing:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/api/validation:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/client/restclient/fake:go_default_library", - "//pkg/client/testing/core:go_default_library", - "//pkg/client/typed/discovery:go_default_library", - "//pkg/client/unversioned/clientcmd:go_default_library", - "//pkg/client/unversioned/clientcmd/api:go_default_library", - "//pkg/controller:go_default_library", - "//pkg/kubectl:go_default_library", - "//pkg/kubectl/resource:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/exec:go_default_library", - "//pkg/util/flag:go_default_library", - "//pkg/util/validation/field:go_default_library", - "//pkg/version:go_default_library", - "//pkg/watch:go_default_library", - "//vendor:github.com/emicklei/go-restful/swagger", - "//vendor:github.com/stretchr/testify/assert", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/cached_discovery.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/cached_discovery.go deleted file mode 100644 index ad1e1df767..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/cached_discovery.go +++ /dev/null @@ -1,252 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "errors" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "github.com/emicklei/go-restful/swagger" - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/client/typed/discovery" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/version" -) - -// CachedDiscoveryClient implements the functions that discovery server-supported API groups, -// versions and resources. -type CachedDiscoveryClient struct { - delegate discovery.DiscoveryInterface - - // cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. - cacheDirectory string - - // ttl is how long the cache should be considered valid - ttl time.Duration - - // mutex protects the variables below - mutex sync.Mutex - - // ourFiles are all filenames of cache files created by this process - ourFiles map[string]struct{} - // invalidated is true if all cache files should be ignored that are not ours (e.g. after Invalidate() was called) - invalidated bool - // fresh is true if all used cache files were ours - fresh bool -} - -var _ discovery.CachedDiscoveryInterface = &CachedDiscoveryClient{} - -// ServerResourcesForGroupVersion returns the supported resources for a group and version. -func (d *CachedDiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (*unversioned.APIResourceList, error) { - filename := filepath.Join(d.cacheDirectory, groupVersion, "serverresources.json") - cachedBytes, err := d.getCachedFile(filename) - // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. - if err == nil { - cachedResources := &unversioned.APIResourceList{} - if err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), cachedBytes, cachedResources); err == nil { - glog.V(6).Infof("returning cached discovery info from %v", filename) - return cachedResources, nil - } - } - - liveResources, err := d.delegate.ServerResourcesForGroupVersion(groupVersion) - if err != nil { - return liveResources, err - } - - if err := d.writeCachedFile(filename, liveResources); err != nil { - glog.V(3).Infof("failed to write cache to %v due to %v", filename, err) - } - - return liveResources, nil -} - -// ServerResources returns the supported resources for all groups and versions. -func (d *CachedDiscoveryClient) ServerResources() (map[string]*unversioned.APIResourceList, error) { - apiGroups, err := d.ServerGroups() - if err != nil { - return nil, err - } - groupVersions := unversioned.ExtractGroupVersions(apiGroups) - result := map[string]*unversioned.APIResourceList{} - for _, groupVersion := range groupVersions { - resources, err := d.ServerResourcesForGroupVersion(groupVersion) - if err != nil { - return nil, err - } - result[groupVersion] = resources - } - return result, nil -} - -func (d *CachedDiscoveryClient) ServerGroups() (*unversioned.APIGroupList, error) { - filename := filepath.Join(d.cacheDirectory, "servergroups.json") - cachedBytes, err := d.getCachedFile(filename) - // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. - if err == nil { - cachedGroups := &unversioned.APIGroupList{} - if err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), cachedBytes, cachedGroups); err == nil { - glog.V(6).Infof("returning cached discovery info from %v", filename) - return cachedGroups, nil - } - } - - liveGroups, err := d.delegate.ServerGroups() - if err != nil { - return liveGroups, err - } - - if err := d.writeCachedFile(filename, liveGroups); err != nil { - glog.V(3).Infof("failed to write cache to %v due to %v", filename, err) - } - - return liveGroups, nil -} - -func (d *CachedDiscoveryClient) getCachedFile(filename string) ([]byte, error) { - // after invalidation ignore cache files not created by this process - d.mutex.Lock() - _, ourFile := d.ourFiles[filename] - if d.invalidated && !ourFile { - d.mutex.Unlock() - return nil, errors.New("cache invalidated") - } - d.mutex.Unlock() - - file, err := os.Open(filename) - if err != nil { - return nil, err - } - - fileInfo, err := file.Stat() - if err != nil { - return nil, err - } - - if time.Now().After(fileInfo.ModTime().Add(d.ttl)) { - return nil, errors.New("cache expired") - } - - // the cache is present and its valid. Try to read and use it. - cachedBytes, err := ioutil.ReadAll(file) - if err != nil { - return nil, err - } - - d.mutex.Lock() - defer d.mutex.Unlock() - d.fresh = d.fresh && ourFile - - return cachedBytes, nil -} - -func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Object) error { - if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil { - return err - } - - bytes, err := runtime.Encode(api.Codecs.LegacyCodec(), obj) - if err != nil { - return err - } - - f, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)+".") - if err != nil { - return err - } - defer os.Remove(f.Name()) - _, err = f.Write(bytes) - if err != nil { - return err - } - - err = f.Chmod(0755) - if err != nil { - return err - } - - name := f.Name() - err = f.Close() - if err != nil { - return err - } - - // atomic rename - d.mutex.Lock() - defer d.mutex.Unlock() - err = os.Rename(name, filename) - if err == nil { - d.ourFiles[filename] = struct{}{} - } - return err -} - -func (d *CachedDiscoveryClient) RESTClient() restclient.Interface { - return d.delegate.RESTClient() -} - -func (d *CachedDiscoveryClient) ServerPreferredResources() ([]unversioned.GroupVersionResource, error) { - return d.delegate.ServerPreferredResources() -} - -func (d *CachedDiscoveryClient) ServerPreferredNamespacedResources() ([]unversioned.GroupVersionResource, error) { - return d.delegate.ServerPreferredNamespacedResources() -} - -func (d *CachedDiscoveryClient) ServerVersion() (*version.Info, error) { - return d.delegate.ServerVersion() -} - -func (d *CachedDiscoveryClient) SwaggerSchema(version unversioned.GroupVersion) (*swagger.ApiDeclaration, error) { - return d.delegate.SwaggerSchema(version) -} - -func (d *CachedDiscoveryClient) Fresh() bool { - d.mutex.Lock() - defer d.mutex.Unlock() - - return d.fresh -} - -func (d *CachedDiscoveryClient) Invalidate() { - d.mutex.Lock() - defer d.mutex.Unlock() - - d.ourFiles = map[string]struct{}{} - d.fresh = true - d.invalidated = true -} - -// NewCachedDiscoveryClient creates a new DiscoveryClient. cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. -func NewCachedDiscoveryClient(delegate discovery.DiscoveryInterface, cacheDirectory string, ttl time.Duration) *CachedDiscoveryClient { - return &CachedDiscoveryClient{ - delegate: delegate, - cacheDirectory: cacheDirectory, - ttl: ttl, - ourFiles: map[string]struct{}{}, - fresh: true, - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/clientcache.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/clientcache.go deleted file mode 100644 index db8897cf3b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/clientcache.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "sync" - - fed_clientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/client/typed/discovery" - oldclient "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" -) - -func NewClientCache(loader clientcmd.ClientConfig) *ClientCache { - return &ClientCache{ - clientsets: make(map[unversioned.GroupVersion]*internalclientset.Clientset), - configs: make(map[unversioned.GroupVersion]*restclient.Config), - fedClientSets: make(map[unversioned.GroupVersion]fed_clientset.Interface), - loader: loader, - } -} - -// ClientCache caches previously loaded clients for reuse, and ensures MatchServerVersion -// is invoked only once -type ClientCache struct { - loader clientcmd.ClientConfig - clientsets map[unversioned.GroupVersion]*internalclientset.Clientset - fedClientSets map[unversioned.GroupVersion]fed_clientset.Interface - configs map[unversioned.GroupVersion]*restclient.Config - - matchVersion bool - - defaultConfigLock sync.Mutex - defaultConfig *restclient.Config - discoveryClient discovery.DiscoveryInterface -} - -// also looks up the discovery client. We can't do this during init because the flags won't have been set -// because this is constructed pre-command execution before the command tree is even set up -func (c *ClientCache) getDefaultConfig() (restclient.Config, discovery.DiscoveryInterface, error) { - c.defaultConfigLock.Lock() - defer c.defaultConfigLock.Unlock() - - if c.defaultConfig != nil && c.discoveryClient != nil { - return *c.defaultConfig, c.discoveryClient, nil - } - - config, err := c.loader.ClientConfig() - if err != nil { - return restclient.Config{}, nil, err - } - discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) - if err != nil { - return restclient.Config{}, nil, err - } - if c.matchVersion { - if err := discovery.MatchesServerVersion(discoveryClient); err != nil { - return restclient.Config{}, nil, err - } - } - - c.defaultConfig = config - c.discoveryClient = discoveryClient - return *c.defaultConfig, c.discoveryClient, nil -} - -// ClientConfigForVersion returns the correct config for a server -func (c *ClientCache) ClientConfigForVersion(requiredVersion *unversioned.GroupVersion) (*restclient.Config, error) { - // TODO: have a better config copy method - config, discoveryClient, err := c.getDefaultConfig() - if err != nil { - return nil, err - } - if requiredVersion == nil && config.GroupVersion != nil { - // if someone has set the values via flags, our config will have the groupVersion set - // that means it is required. - requiredVersion = config.GroupVersion - } - - // required version may still be nil, since config.GroupVersion may have been nil. Do the check - // before looking up from the cache - if requiredVersion != nil { - if config, ok := c.configs[*requiredVersion]; ok { - return config, nil - } - } - - negotiatedVersion, err := discovery.NegotiateVersion(discoveryClient, requiredVersion, registered.EnabledVersions()) - if err != nil { - return nil, err - } - config.GroupVersion = negotiatedVersion - - // TODO this isn't what we want. Each clientset should be setting defaults as it sees fit. - oldclient.SetKubernetesDefaults(&config) - - if requiredVersion != nil { - c.configs[*requiredVersion] = &config - } - - // `version` does not necessarily equal `config.Version`. However, we know that we call this method again with - // `config.Version`, we should get the config we've just built. - configCopy := config - c.configs[*config.GroupVersion] = &configCopy - - return &config, nil -} - -// ClientSetForVersion initializes or reuses a clientset for the specified version, or returns an -// error if that is not possible -func (c *ClientCache) ClientSetForVersion(requiredVersion *unversioned.GroupVersion) (*internalclientset.Clientset, error) { - if requiredVersion != nil { - if clientset, ok := c.clientsets[*requiredVersion]; ok { - return clientset, nil - } - } - config, err := c.ClientConfigForVersion(requiredVersion) - if err != nil { - return nil, err - } - - clientset, err := internalclientset.NewForConfig(config) - if err != nil { - return nil, err - } - c.clientsets[*config.GroupVersion] = clientset - - // `version` does not necessarily equal `config.Version`. However, we know that if we call this method again with - // `version`, we should get a client based on the same config we just found. There's no guarantee that a client - // is copiable, so create a new client and save it in the cache. - if requiredVersion != nil { - configCopy := *config - clientset, err := internalclientset.NewForConfig(&configCopy) - if err != nil { - return nil, err - } - c.clientsets[*requiredVersion] = clientset - } - - return clientset, nil -} - -func (c *ClientCache) FederationClientSetForVersion(version *unversioned.GroupVersion) (fed_clientset.Interface, error) { - if version != nil { - if clientSet, found := c.fedClientSets[*version]; found { - return clientSet, nil - } - } - config, err := c.ClientConfigForVersion(version) - if err != nil { - return nil, err - } - - // TODO: support multi versions of client with clientset - clientSet, err := fed_clientset.NewForConfig(config) - if err != nil { - return nil, err - } - c.fedClientSets[*config.GroupVersion] = clientSet - - if version != nil { - configCopy := *config - clientSet, err := fed_clientset.NewForConfig(&configCopy) - if err != nil { - return nil, err - } - c.fedClientSets[*version] = clientSet - } - - return clientSet, nil -} - -func (c *ClientCache) FederationClientForVersion(version *unversioned.GroupVersion) (*restclient.RESTClient, error) { - fedClientSet, err := c.FederationClientSetForVersion(version) - if err != nil { - return nil, err - } - return fedClientSet.Federation().RESTClient().(*restclient.RESTClient), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go deleted file mode 100644 index bd5dc14614..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go +++ /dev/null @@ -1,1334 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "bytes" - "errors" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "os/user" - "path" - "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - "time" - - "github.com/emicklei/go-restful/swagger" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - - "k8s.io/kubernetes/federation/apis/federation" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/service" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/client/typed/discovery" - "k8s.io/kubernetes/pkg/client/typed/dynamic" - client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" - "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/resource" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer/json" - utilflag "k8s.io/kubernetes/pkg/util/flag" - "k8s.io/kubernetes/pkg/util/homedir" - "k8s.io/kubernetes/pkg/watch" -) - -const ( - FlagMatchBinaryVersion = "match-server-version" -) - -// Factory provides abstractions that allow the Kubectl command to be extended across multiple types -// of resources and different API sets. -// TODO: make the functions interfaces -// TODO: pass the various interfaces on the factory directly into the command constructors (so the -// commands are decoupled from the factory). -type Factory interface { - // Returns internal flagset - FlagSet() *pflag.FlagSet - - // Returns a discovery client - DiscoveryClient() (discovery.CachedDiscoveryInterface, error) - // Returns interfaces for dealing with arbitrary runtime.Objects. - Object() (meta.RESTMapper, runtime.ObjectTyper) - // Returns interfaces for dealing with arbitrary - // runtime.Unstructured. This performs API calls to discover types. - UnstructuredObject() (meta.RESTMapper, runtime.ObjectTyper, error) - // Returns interfaces for decoding objects - if toInternal is set, decoded objects will be converted - // into their internal form (if possible). Eventually the internal form will be removed as an option, - // and only versioned objects will be returned. - Decoder(toInternal bool) runtime.Decoder - // Returns an encoder capable of encoding a provided object into JSON in the default desired version. - JSONEncoder() runtime.Encoder - // ClientSet gives you back an internal, generated clientset - ClientSet() (*internalclientset.Clientset, error) - // Returns a RESTClient for accessing Kubernetes resources or an error. - RESTClient() (*restclient.RESTClient, error) - // Returns a client.Config for accessing the Kubernetes server. - ClientConfig() (*restclient.Config, error) - // Returns a RESTClient for working with the specified RESTMapping or an error. This is intended - // for working with arbitrary resources and is not guaranteed to point to a Kubernetes APIServer. - ClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) - // Returns a RESTClient for working with Unstructured objects. - UnstructuredClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) - // Returns a Describer for displaying the specified RESTMapping type or an error. - Describer(mapping *meta.RESTMapping) (kubectl.Describer, error) - // Returns a Printer for formatting objects of the given type or an error. - Printer(mapping *meta.RESTMapping, options kubectl.PrintOptions) (kubectl.ResourcePrinter, error) - // Returns a Scaler for changing the size of the specified RESTMapping type or an error - Scaler(mapping *meta.RESTMapping) (kubectl.Scaler, error) - // Returns a Reaper for gracefully shutting down resources. - Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) - // Returns a HistoryViewer for viewing change history - HistoryViewer(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) - // Returns a Rollbacker for changing the rollback version of the specified RESTMapping type or an error - Rollbacker(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) - // Returns a StatusViewer for printing rollout status. - StatusViewer(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) - // MapBasedSelectorForObject returns the map-based selector associated with the provided object. If a - // new set-based selector is provided, an error is returned if the selector cannot be converted to a - // map-based selector - MapBasedSelectorForObject(object runtime.Object) (string, error) - // PortsForObject returns the ports associated with the provided object - PortsForObject(object runtime.Object) ([]string, error) - // ProtocolsForObject returns the mapping associated with the provided object - ProtocolsForObject(object runtime.Object) (map[string]string, error) - // LabelsForObject returns the labels associated with the provided object - LabelsForObject(object runtime.Object) (map[string]string, error) - // LogsForObject returns a request for the logs associated with the provided object - LogsForObject(object, options runtime.Object) (*restclient.Request, error) - // Pauser marks the object in the info as paused ie. it will not be reconciled by its controller. - Pauser(info *resource.Info) (bool, error) - // Resumer resumes a paused object inside the info ie. it will be reconciled by its controller. - Resumer(info *resource.Info) (bool, error) - // Returns a schema that can validate objects stored on disk. - Validator(validate bool, cacheDir string) (validation.Schema, error) - // SwaggerSchema returns the schema declaration for the provided group version kind. - SwaggerSchema(unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) - // Returns the default namespace to use in cases where no - // other namespace is specified and whether the namespace was - // overridden. - DefaultNamespace() (string, bool, error) - // Generators returns the generators for the provided command - Generators(cmdName string) map[string]kubectl.Generator - // Check whether the kind of resources could be exposed - CanBeExposed(kind unversioned.GroupKind) error - // Check whether the kind of resources could be autoscaled - CanBeAutoscaled(kind unversioned.GroupKind) error - // AttachablePodForObject returns the pod to which to attach given an object. - AttachablePodForObject(object runtime.Object) (*api.Pod, error) - // UpdatePodSpecForObject will call the provided function on the pod spec this object supports, - // return false if no pod spec is supported, or return an error. - UpdatePodSpecForObject(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) - // EditorEnvs returns a group of environment variables that the edit command - // can range over in order to determine if the user has specified an editor - // of their choice. - EditorEnvs() []string - // PrintObjectSpecificMessage prints object-specific messages on the provided writer - PrintObjectSpecificMessage(obj runtime.Object, out io.Writer) - - // Command will stringify and return all environment arguments ie. a command run by a client - // using the factory. - Command() string - // BindFlags adds any flags that are common to all kubectl sub commands. - BindFlags(flags *pflag.FlagSet) - // BindExternalFlags adds any flags defined by external projects (not part of pflags) - BindExternalFlags(flags *pflag.FlagSet) - - DefaultResourceFilterOptions(cmd *cobra.Command, withNamespace bool) *kubectl.PrintOptions - // DefaultResourceFilterFunc returns a collection of FilterFuncs suitable for filtering specific resource types. - DefaultResourceFilterFunc() kubectl.Filters - - // PrintObject prints an api object given command line flags to modify the output format - PrintObject(cmd *cobra.Command, mapper meta.RESTMapper, obj runtime.Object, out io.Writer) error - // PrinterForMapping returns a printer suitable for displaying the provided resource type. - // Requires that printer flags have been added to cmd (see AddPrinterFlags). - PrinterForMapping(cmd *cobra.Command, mapping *meta.RESTMapping, withNamespace bool) (kubectl.ResourcePrinter, error) - // One stop shopping for a Builder - NewBuilder() *resource.Builder - - // SuggestedPodTemplateResources returns a list of resource types that declare a pod template - SuggestedPodTemplateResources() []unversioned.GroupResource -} - -const ( - RunV1GeneratorName = "run/v1" - RunPodV1GeneratorName = "run-pod/v1" - ServiceV1GeneratorName = "service/v1" - ServiceV2GeneratorName = "service/v2" - ServiceNodePortGeneratorV1Name = "service-nodeport/v1" - ServiceClusterIPGeneratorV1Name = "service-clusterip/v1" - ServiceLoadBalancerGeneratorV1Name = "service-loadbalancer/v1" - ServiceAccountV1GeneratorName = "serviceaccount/v1" - HorizontalPodAutoscalerV1Beta1GeneratorName = "horizontalpodautoscaler/v1beta1" - HorizontalPodAutoscalerV1GeneratorName = "horizontalpodautoscaler/v1" - DeploymentV1Beta1GeneratorName = "deployment/v1beta1" - DeploymentBasicV1Beta1GeneratorName = "deployment-basic/v1beta1" - JobV1Beta1GeneratorName = "job/v1beta1" - JobV1GeneratorName = "job/v1" - CronJobV2Alpha1GeneratorName = "cronjob/v2alpha1" - ScheduledJobV2Alpha1GeneratorName = "scheduledjob/v2alpha1" - NamespaceV1GeneratorName = "namespace/v1" - ResourceQuotaV1GeneratorName = "resourcequotas/v1" - SecretV1GeneratorName = "secret/v1" - SecretForDockerRegistryV1GeneratorName = "secret-for-docker-registry/v1" - SecretForTLSV1GeneratorName = "secret-for-tls/v1" - ConfigMapV1GeneratorName = "configmap/v1" - ClusterV1Beta1GeneratorName = "cluster/v1beta1" -) - -// DefaultGenerators returns the set of default generators for use in Factory instances -func DefaultGenerators(cmdName string) map[string]kubectl.Generator { - var generator map[string]kubectl.Generator - switch cmdName { - case "expose": - generator = map[string]kubectl.Generator{ - ServiceV1GeneratorName: kubectl.ServiceGeneratorV1{}, - ServiceV2GeneratorName: kubectl.ServiceGeneratorV2{}, - } - case "service-clusterip": - generator = map[string]kubectl.Generator{ - ServiceClusterIPGeneratorV1Name: kubectl.ServiceClusterIPGeneratorV1{}, - } - case "service-nodeport": - generator = map[string]kubectl.Generator{ - ServiceNodePortGeneratorV1Name: kubectl.ServiceNodePortGeneratorV1{}, - } - case "service-loadbalancer": - generator = map[string]kubectl.Generator{ - ServiceLoadBalancerGeneratorV1Name: kubectl.ServiceLoadBalancerGeneratorV1{}, - } - case "deployment": - generator = map[string]kubectl.Generator{ - DeploymentBasicV1Beta1GeneratorName: kubectl.DeploymentBasicGeneratorV1{}, - } - case "run": - generator = map[string]kubectl.Generator{ - RunV1GeneratorName: kubectl.BasicReplicationController{}, - RunPodV1GeneratorName: kubectl.BasicPod{}, - DeploymentV1Beta1GeneratorName: kubectl.DeploymentV1Beta1{}, - JobV1Beta1GeneratorName: kubectl.JobV1Beta1{}, - JobV1GeneratorName: kubectl.JobV1{}, - ScheduledJobV2Alpha1GeneratorName: kubectl.CronJobV2Alpha1{}, - CronJobV2Alpha1GeneratorName: kubectl.CronJobV2Alpha1{}, - } - case "autoscale": - generator = map[string]kubectl.Generator{ - HorizontalPodAutoscalerV1Beta1GeneratorName: kubectl.HorizontalPodAutoscalerV1Beta1{}, - HorizontalPodAutoscalerV1GeneratorName: kubectl.HorizontalPodAutoscalerV1{}, - } - case "namespace": - generator = map[string]kubectl.Generator{ - NamespaceV1GeneratorName: kubectl.NamespaceGeneratorV1{}, - } - case "quota": - generator = map[string]kubectl.Generator{ - ResourceQuotaV1GeneratorName: kubectl.ResourceQuotaGeneratorV1{}, - } - case "secret": - generator = map[string]kubectl.Generator{ - SecretV1GeneratorName: kubectl.SecretGeneratorV1{}, - } - case "secret-for-docker-registry": - generator = map[string]kubectl.Generator{ - SecretForDockerRegistryV1GeneratorName: kubectl.SecretForDockerRegistryGeneratorV1{}, - } - case "secret-for-tls": - generator = map[string]kubectl.Generator{ - SecretForTLSV1GeneratorName: kubectl.SecretForTLSGeneratorV1{}, - } - } - - return generator -} - -func getGroupVersionKinds(gvks []unversioned.GroupVersionKind, group string) []unversioned.GroupVersionKind { - result := []unversioned.GroupVersionKind{} - for ix := range gvks { - if gvks[ix].Group == group { - result = append(result, gvks[ix]) - } - } - return result -} - -func makeInterfacesFor(versionList []unversioned.GroupVersion) func(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - accessor := meta.NewAccessor() - return func(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - for ix := range versionList { - if versionList[ix].String() == version.String() { - return &meta.VersionInterfaces{ - ObjectConvertor: thirdpartyresourcedata.NewThirdPartyObjectConverter(api.Scheme), - MetadataAccessor: accessor, - }, nil - } - } - return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, versionList) - } -} - -type factory struct { - flags *pflag.FlagSet - clientConfig clientcmd.ClientConfig - - clients *ClientCache -} - -// NewFactory creates a factory with the default Kubernetes resources defined -// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig. -// if optionalClientConfig is not nil, then this factory will make use of it. -func NewFactory(optionalClientConfig clientcmd.ClientConfig) Factory { - flags := pflag.NewFlagSet("", pflag.ContinueOnError) - flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags - - clientConfig := optionalClientConfig - if optionalClientConfig == nil { - clientConfig = DefaultClientConfig(flags) - } - - clients := NewClientCache(clientConfig) - - f := &factory{ - flags: flags, - clientConfig: clientConfig, - clients: clients, - } - - return f -} - -func (f *factory) FlagSet() *pflag.FlagSet { - return f.flags -} - -func (f *factory) DiscoveryClient() (discovery.CachedDiscoveryInterface, error) { - cfg, err := f.clientConfig.ClientConfig() - if err != nil { - return nil, err - } - discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) - if err != nil { - return nil, err - } - cacheDir := computeDiscoverCacheDir(filepath.Join(homedir.HomeDir(), ".kube", "cache", "discovery"), cfg.Host) - return NewCachedDiscoveryClient(discoveryClient, cacheDir, time.Duration(10*time.Minute)), nil -} - -func (f *factory) Object() (meta.RESTMapper, runtime.ObjectTyper) { - mapper := registered.RESTMapper() - discoveryClient, err := f.DiscoveryClient() - if err == nil { - mapper = meta.FirstHitRESTMapper{ - MultiRESTMapper: meta.MultiRESTMapper{ - discovery.NewDeferredDiscoveryRESTMapper(discoveryClient, registered.InterfacesFor), - registered.RESTMapper(), // hardcoded fall back - }, - } - } - - // wrap with shortcuts - mapper = NewShortcutExpander(mapper, discoveryClient) - - // wrap with output preferences - cfg, err := f.clients.ClientConfigForVersion(nil) - checkErrWithPrefix("failed to get client config: ", err) - cmdApiVersion := unversioned.GroupVersion{} - if cfg.GroupVersion != nil { - cmdApiVersion = *cfg.GroupVersion - } - mapper = kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}} - return mapper, api.Scheme -} - -func (f *factory) UnstructuredObject() (meta.RESTMapper, runtime.ObjectTyper, error) { - discoveryClient, err := f.DiscoveryClient() - if err != nil { - return nil, nil, err - } - groupResources, err := discovery.GetAPIGroupResources(discoveryClient) - if err != nil && !discoveryClient.Fresh() { - discoveryClient.Invalidate() - groupResources, err = discovery.GetAPIGroupResources(discoveryClient) - } - if err != nil { - return nil, nil, err - } - - mapper := discovery.NewDeferredDiscoveryRESTMapper(discoveryClient, meta.InterfacesForUnstructured) - typer := discovery.NewUnstructuredObjectTyper(groupResources) - return NewShortcutExpander(mapper, discoveryClient), typer, nil -} - -func (f *factory) RESTClient() (*restclient.RESTClient, error) { - clientConfig, err := f.clients.ClientConfigForVersion(nil) - if err != nil { - return nil, err - } - return restclient.RESTClientFor(clientConfig) -} - -func (f *factory) ClientSet() (*internalclientset.Clientset, error) { - return f.clients.ClientSetForVersion(nil) -} - -func (f *factory) ClientConfig() (*restclient.Config, error) { - return f.clients.ClientConfigForVersion(nil) -} - -func (f *factory) ClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) { - cfg, err := f.clientConfig.ClientConfig() - if err != nil { - return nil, err - } - if err := client.SetKubernetesDefaults(cfg); err != nil { - return nil, err - } - gvk := mapping.GroupVersionKind - switch gvk.Group { - case federation.GroupName: - mappingVersion := mapping.GroupVersionKind.GroupVersion() - return f.clients.FederationClientForVersion(&mappingVersion) - case api.GroupName: - cfg.APIPath = "/api" - default: - cfg.APIPath = "/apis" - } - gv := gvk.GroupVersion() - cfg.GroupVersion = &gv - if registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) { - cfg.NegotiatedSerializer = thirdpartyresourcedata.NewNegotiatedSerializer(api.Codecs, gvk.Kind, gv, gv) - } - return restclient.RESTClientFor(cfg) -} - -func (f *factory) UnstructuredClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) { - cfg, err := f.clientConfig.ClientConfig() - if err != nil { - return nil, err - } - if err := restclient.SetKubernetesDefaults(cfg); err != nil { - return nil, err - } - cfg.APIPath = "/apis" - if mapping.GroupVersionKind.Group == api.GroupName { - cfg.APIPath = "/api" - } - gv := mapping.GroupVersionKind.GroupVersion() - cfg.ContentConfig = dynamic.ContentConfig() - cfg.GroupVersion = &gv - return restclient.RESTClientFor(cfg) -} - -func (f *factory) Describer(mapping *meta.RESTMapping) (kubectl.Describer, error) { - mappingVersion := mapping.GroupVersionKind.GroupVersion() - if mapping.GroupVersionKind.Group == federation.GroupName { - fedClientSet, err := f.clients.FederationClientSetForVersion(&mappingVersion) - if err != nil { - return nil, err - } - if mapping.GroupVersionKind.Kind == "Cluster" { - return &kubectl.ClusterDescriber{Interface: fedClientSet}, nil - } - } - clientset, err := f.clients.ClientSetForVersion(&mappingVersion) - if err != nil { - return nil, err - } - if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), clientset); ok { - return describer, nil - } - return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind) -} - -func (f *factory) Decoder(toInternal bool) runtime.Decoder { - var decoder runtime.Decoder - if toInternal { - decoder = api.Codecs.UniversalDecoder() - } else { - decoder = api.Codecs.UniversalDeserializer() - } - return thirdpartyresourcedata.NewDecoder(decoder, "") -} - -func (f *factory) JSONEncoder() runtime.Encoder { - return api.Codecs.LegacyCodec(registered.EnabledVersions()...) -} - -func (f *factory) Printer(mapping *meta.RESTMapping, options kubectl.PrintOptions) (kubectl.ResourcePrinter, error) { - return kubectl.NewHumanReadablePrinter(options), nil -} - -func (f *factory) MapBasedSelectorForObject(object runtime.Object) (string, error) { - // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) - switch t := object.(type) { - case *api.ReplicationController: - return kubectl.MakeLabels(t.Spec.Selector), nil - case *api.Pod: - if len(t.Labels) == 0 { - return "", fmt.Errorf("the pod has no labels and cannot be exposed") - } - return kubectl.MakeLabels(t.Labels), nil - case *api.Service: - if t.Spec.Selector == nil { - return "", fmt.Errorf("the service has no pod selector set") - } - return kubectl.MakeLabels(t.Spec.Selector), nil - case *extensions.Deployment: - // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals - // operator, DoubleEquals operator and In operator with only one element in the set. - if len(t.Spec.Selector.MatchExpressions) > 0 { - return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) - } - return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil - case *extensions.ReplicaSet: - // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals - // operator, DoubleEquals operator and In operator with only one element in the set. - if len(t.Spec.Selector.MatchExpressions) > 0 { - return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) - } - return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil - default: - gvks, _, err := api.Scheme.ObjectKinds(object) - if err != nil { - return "", err - } - return "", fmt.Errorf("cannot extract pod selector from %v", gvks[0]) - } -} - -func (f *factory) PortsForObject(object runtime.Object) ([]string, error) { - // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) - switch t := object.(type) { - case *api.ReplicationController: - return getPorts(t.Spec.Template.Spec), nil - case *api.Pod: - return getPorts(t.Spec), nil - case *api.Service: - return getServicePorts(t.Spec), nil - case *extensions.Deployment: - return getPorts(t.Spec.Template.Spec), nil - case *extensions.ReplicaSet: - return getPorts(t.Spec.Template.Spec), nil - default: - gvks, _, err := api.Scheme.ObjectKinds(object) - if err != nil { - return nil, err - } - return nil, fmt.Errorf("cannot extract ports from %v", gvks[0]) - } -} - -func (f *factory) ProtocolsForObject(object runtime.Object) (map[string]string, error) { - // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) - switch t := object.(type) { - case *api.ReplicationController: - return getProtocols(t.Spec.Template.Spec), nil - case *api.Pod: - return getProtocols(t.Spec), nil - case *api.Service: - return getServiceProtocols(t.Spec), nil - case *extensions.Deployment: - return getProtocols(t.Spec.Template.Spec), nil - case *extensions.ReplicaSet: - return getProtocols(t.Spec.Template.Spec), nil - default: - gvks, _, err := api.Scheme.ObjectKinds(object) - if err != nil { - return nil, err - } - return nil, fmt.Errorf("cannot extract protocols from %v", gvks[0]) - } -} - -func (f *factory) LabelsForObject(object runtime.Object) (map[string]string, error) { - return meta.NewAccessor().Labels(object) -} - -func (f *factory) LogsForObject(object, options runtime.Object) (*restclient.Request, error) { - clientset, err := f.clients.ClientSetForVersion(nil) - if err != nil { - return nil, err - } - - switch t := object.(type) { - case *api.Pod: - opts, ok := options.(*api.PodLogOptions) - if !ok { - return nil, errors.New("provided options object is not a PodLogOptions") - } - return clientset.Core().Pods(t.Namespace).GetLogs(t.Name, opts), nil - - case *api.ReplicationController: - opts, ok := options.(*api.PodLogOptions) - if !ok { - return nil, errors.New("provided options object is not a PodLogOptions") - } - selector := labels.SelectorFromSet(t.Spec.Selector) - sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) } - pod, numPods, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 20*time.Second, sortBy) - if err != nil { - return nil, err - } - if numPods > 1 { - fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name) - } - - return clientset.Core().Pods(pod.Namespace).GetLogs(pod.Name, opts), nil - - case *extensions.ReplicaSet: - opts, ok := options.(*api.PodLogOptions) - if !ok { - return nil, errors.New("provided options object is not a PodLogOptions") - } - selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) - if err != nil { - return nil, fmt.Errorf("invalid label selector: %v", err) - } - sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) } - pod, numPods, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 20*time.Second, sortBy) - if err != nil { - return nil, err - } - if numPods > 1 { - fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name) - } - - return clientset.Core().Pods(pod.Namespace).GetLogs(pod.Name, opts), nil - - default: - gvks, _, err := api.Scheme.ObjectKinds(object) - if err != nil { - return nil, err - } - return nil, fmt.Errorf("cannot get the logs from %v", gvks[0]) - } -} - -func (f *factory) Pauser(info *resource.Info) (bool, error) { - switch obj := info.Object.(type) { - case *extensions.Deployment: - if obj.Spec.Paused { - return true, errors.New("is already paused") - } - obj.Spec.Paused = true - return true, nil - default: - return false, fmt.Errorf("pausing is not supported") - } -} - -func (f *factory) Resumer(info *resource.Info) (bool, error) { - switch obj := info.Object.(type) { - case *extensions.Deployment: - if !obj.Spec.Paused { - return true, errors.New("is not paused") - } - obj.Spec.Paused = false - return true, nil - default: - return false, fmt.Errorf("resuming is not supported") - } -} - -func (f *factory) Scaler(mapping *meta.RESTMapping) (kubectl.Scaler, error) { - mappingVersion := mapping.GroupVersionKind.GroupVersion() - clientset, err := f.clients.ClientSetForVersion(&mappingVersion) - if err != nil { - return nil, err - } - return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), clientset) -} - -func (f *factory) Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) { - mappingVersion := mapping.GroupVersionKind.GroupVersion() - clientset, clientsetErr := f.clients.ClientSetForVersion(&mappingVersion) - reaper, reaperErr := kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), clientset) - - if kubectl.IsNoSuchReaperError(reaperErr) { - return nil, reaperErr - } - if clientsetErr != nil { - return nil, clientsetErr - } - return reaper, reaperErr -} - -func (f *factory) HistoryViewer(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) { - mappingVersion := mapping.GroupVersionKind.GroupVersion() - clientset, err := f.clients.ClientSetForVersion(&mappingVersion) - if err != nil { - return nil, err - } - return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset) -} - -func (f *factory) Rollbacker(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) { - mappingVersion := mapping.GroupVersionKind.GroupVersion() - clientset, err := f.clients.ClientSetForVersion(&mappingVersion) - if err != nil { - return nil, err - } - return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), clientset) -} - -func (f *factory) StatusViewer(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) { - mappingVersion := mapping.GroupVersionKind.GroupVersion() - clientset, err := f.clients.ClientSetForVersion(&mappingVersion) - if err != nil { - return nil, err - } - return kubectl.StatusViewerFor(mapping.GroupVersionKind.GroupKind(), clientset) -} - -func (f *factory) Validator(validate bool, cacheDir string) (validation.Schema, error) { - if validate { - clientConfig, err := f.clients.ClientConfigForVersion(nil) - if err != nil { - return nil, err - } - restclient, err := restclient.RESTClientFor(clientConfig) - if err != nil { - return nil, err - } - clientset, err := f.clients.ClientSetForVersion(nil) - if err != nil { - return nil, err - } - dir := cacheDir - if len(dir) > 0 { - version, err := clientset.Discovery().ServerVersion() - if err == nil { - dir = path.Join(cacheDir, version.String()) - } else { - dir = "" // disable caching as a fallback - } - } - fedClient, err := f.clients.FederationClientForVersion(nil) - if err != nil { - return nil, err - } - swaggerSchema := &clientSwaggerSchema{ - c: restclient, - fedc: fedClient, - cacheDir: dir, - } - return validation.ConjunctiveSchema{ - swaggerSchema, - validation.NoDoubleKeySchema{}, - }, nil - } - return validation.NullSchema{}, nil -} - -func (f *factory) SwaggerSchema(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) { - version := gvk.GroupVersion() - clientset, err := f.clients.ClientSetForVersion(&version) - if err != nil { - return nil, err - } - return clientset.Discovery().SwaggerSchema(version) -} - -func (f *factory) DefaultNamespace() (string, bool, error) { - return f.clientConfig.Namespace() -} - -func (f *factory) Generators(cmdName string) map[string]kubectl.Generator { - return DefaultGenerators(cmdName) -} - -func (f *factory) CanBeExposed(kind unversioned.GroupKind) error { - switch kind { - case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"): - // nothing to do here - default: - return fmt.Errorf("cannot expose a %s", kind) - } - return nil -} - -func (f *factory) CanBeAutoscaled(kind unversioned.GroupKind) error { - switch kind { - case api.Kind("ReplicationController"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"): - // nothing to do here - default: - return fmt.Errorf("cannot autoscale a %v", kind) - } - return nil -} - -func (f *factory) AttachablePodForObject(object runtime.Object) (*api.Pod, error) { - clientset, err := f.clients.ClientSetForVersion(nil) - if err != nil { - return nil, err - } - switch t := object.(type) { - case *api.ReplicationController: - selector := labels.SelectorFromSet(t.Spec.Selector) - sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } - pod, _, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 1*time.Minute, sortBy) - return pod, err - case *extensions.Deployment: - selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) - if err != nil { - return nil, fmt.Errorf("invalid label selector: %v", err) - } - sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } - pod, _, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 1*time.Minute, sortBy) - return pod, err - case *batch.Job: - selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) - if err != nil { - return nil, fmt.Errorf("invalid label selector: %v", err) - } - sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } - pod, _, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 1*time.Minute, sortBy) - return pod, err - case *api.Pod: - return t, nil - default: - gvks, _, err := api.Scheme.ObjectKinds(object) - if err != nil { - return nil, err - } - return nil, fmt.Errorf("cannot attach to %v: not implemented", gvks[0]) - } -} - -func (f *factory) UpdatePodSpecForObject(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) { - // TODO: replace with a swagger schema based approach (identify pod template via schema introspection) - switch t := obj.(type) { - case *api.Pod: - return true, fn(&t.Spec) - case *api.ReplicationController: - if t.Spec.Template == nil { - t.Spec.Template = &api.PodTemplateSpec{} - } - return true, fn(&t.Spec.Template.Spec) - case *extensions.Deployment: - return true, fn(&t.Spec.Template.Spec) - case *extensions.DaemonSet: - return true, fn(&t.Spec.Template.Spec) - case *extensions.ReplicaSet: - return true, fn(&t.Spec.Template.Spec) - case *apps.StatefulSet: - return true, fn(&t.Spec.Template.Spec) - case *batch.Job: - return true, fn(&t.Spec.Template.Spec) - default: - return false, fmt.Errorf("the object is not a pod or does not have a pod template") - } -} - -func (f *factory) EditorEnvs() []string { - return []string{"KUBE_EDITOR", "EDITOR"} -} - -func (f *factory) PrintObjectSpecificMessage(obj runtime.Object, out io.Writer) { - switch obj := obj.(type) { - case *api.Service: - if obj.Spec.Type == api.ServiceTypeNodePort { - msg := fmt.Sprintf( - `You have exposed your service on an external port on all nodes in your -cluster. If you want to expose this service to the external internet, you may -need to set up firewall rules for the service port(s) (%s) to serve traffic. - -See http://kubernetes.io/docs/user-guide/services-firewalls for more details. -`, - makePortsString(obj.Spec.Ports, true)) - out.Write([]byte(msg)) - } - - if _, ok := obj.Annotations[service.AnnotationLoadBalancerSourceRangesKey]; ok { - msg := fmt.Sprintf( - `You are using service annotation [service.beta.kubernetes.io/load-balancer-source-ranges]. -It has been promoted to field [loadBalancerSourceRanges] in service spec. This annotation will be deprecated in the future. -Please use the loadBalancerSourceRanges field instead. - -See http://kubernetes.io/docs/user-guide/services-firewalls for more details. -`) - out.Write([]byte(msg)) - } - } -} - -// GetFirstPod returns a pod matching the namespace and label selector -// and the number of all pods that match the label selector. -func GetFirstPod(client coreclient.PodsGetter, namespace string, selector labels.Selector, timeout time.Duration, sortBy func([]*api.Pod) sort.Interface) (*api.Pod, int, error) { - options := api.ListOptions{LabelSelector: selector} - - podList, err := client.Pods(namespace).List(options) - if err != nil { - return nil, 0, err - } - pods := []*api.Pod{} - for i := range podList.Items { - pod := podList.Items[i] - pods = append(pods, &pod) - } - if len(pods) > 0 { - sort.Sort(sortBy(pods)) - return pods[0], len(podList.Items), nil - } - - // Watch until we observe a pod - options.ResourceVersion = podList.ResourceVersion - w, err := client.Pods(namespace).Watch(options) - if err != nil { - return nil, 0, err - } - defer w.Stop() - - condition := func(event watch.Event) (bool, error) { - return event.Type == watch.Added || event.Type == watch.Modified, nil - } - event, err := watch.Until(timeout, w, condition) - if err != nil { - return nil, 0, err - } - pod, ok := event.Object.(*api.Pod) - if !ok { - return nil, 0, fmt.Errorf("%#v is not a pod event", event) - } - return pod, 1, nil -} - -// TODO: We need to filter out stuff like secrets. -func (f *factory) Command() string { - if len(os.Args) == 0 { - return "" - } - base := filepath.Base(os.Args[0]) - args := append([]string{base}, os.Args[1:]...) - return strings.Join(args, " ") -} - -func (f *factory) BindFlags(flags *pflag.FlagSet) { - // Merge factory's flags - flags.AddFlagSet(f.flags) - - // Globally persistent flags across all subcommands. - // TODO Change flag names to consts to allow safer lookup from subcommands. - // TODO Add a verbose flag that turns on glog logging. Probably need a way - // to do that automatically for every subcommand. - flags.BoolVar(&f.clients.matchVersion, FlagMatchBinaryVersion, false, "Require server version to match client version") - - // Normalize all flags that are coming from other packages or pre-configurations - // a.k.a. change all "_" to "-". e.g. glog package - flags.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) -} - -func (f *factory) BindExternalFlags(flags *pflag.FlagSet) { - // any flags defined by external projects (not part of pflags) - flags.AddGoFlagSet(flag.CommandLine) -} - -func makePortsString(ports []api.ServicePort, useNodePort bool) string { - pieces := make([]string, len(ports)) - for ix := range ports { - var port int32 - if useNodePort { - port = ports[ix].NodePort - } else { - port = ports[ix].Port - } - pieces[ix] = fmt.Sprintf("%s:%d", strings.ToLower(string(ports[ix].Protocol)), port) - } - return strings.Join(pieces, ",") -} - -func getPorts(spec api.PodSpec) []string { - result := []string{} - for _, container := range spec.Containers { - for _, port := range container.Ports { - result = append(result, strconv.Itoa(int(port.ContainerPort))) - } - } - return result -} - -func getProtocols(spec api.PodSpec) map[string]string { - result := make(map[string]string) - for _, container := range spec.Containers { - for _, port := range container.Ports { - result[strconv.Itoa(int(port.ContainerPort))] = string(port.Protocol) - } - } - return result -} - -// Extracts the ports exposed by a service from the given service spec. -func getServicePorts(spec api.ServiceSpec) []string { - result := []string{} - for _, servicePort := range spec.Ports { - result = append(result, strconv.Itoa(int(servicePort.Port))) - } - return result -} - -// Extracts the protocols exposed by a service from the given service spec. -func getServiceProtocols(spec api.ServiceSpec) map[string]string { - result := make(map[string]string) - for _, servicePort := range spec.Ports { - result[strconv.Itoa(int(servicePort.Port))] = string(servicePort.Protocol) - } - return result -} - -type clientSwaggerSchema struct { - c *restclient.RESTClient - fedc *restclient.RESTClient - cacheDir string -} - -const schemaFileName = "schema.json" - -type schemaClient interface { - Get() *restclient.Request -} - -func recursiveSplit(dir string) []string { - parent, file := path.Split(dir) - if len(parent) == 0 { - return []string{file} - } - return append(recursiveSplit(parent[:len(parent)-1]), file) -} - -func substituteUserHome(dir string) (string, error) { - if len(dir) == 0 || dir[0] != '~' { - return dir, nil - } - parts := recursiveSplit(dir) - if len(parts[0]) == 1 { - parts[0] = os.Getenv("HOME") - } else { - usr, err := user.Lookup(parts[0][1:]) - if err != nil { - return "", err - } - parts[0] = usr.HomeDir - } - return path.Join(parts...), nil -} - -func writeSchemaFile(schemaData []byte, cacheDir, cacheFile, prefix, groupVersion string) error { - if err := os.MkdirAll(path.Join(cacheDir, prefix, groupVersion), 0755); err != nil { - return err - } - tmpFile, err := ioutil.TempFile(cacheDir, "schema") - if err != nil { - // If we can't write, keep going. - if os.IsPermission(err) { - return nil - } - return err - } - if _, err := io.Copy(tmpFile, bytes.NewBuffer(schemaData)); err != nil { - return err - } - if err := os.Link(tmpFile.Name(), cacheFile); err != nil { - // If we can't write due to file existing, or permission problems, keep going. - if os.IsExist(err) || os.IsPermission(err) { - return nil - } - return err - } - return nil -} - -func getSchemaAndValidate(c schemaClient, data []byte, prefix, groupVersion, cacheDir string, delegate validation.Schema) (err error) { - var schemaData []byte - var firstSeen bool - fullDir, err := substituteUserHome(cacheDir) - if err != nil { - return err - } - cacheFile := path.Join(fullDir, prefix, groupVersion, schemaFileName) - - if len(cacheDir) != 0 { - if schemaData, err = ioutil.ReadFile(cacheFile); err != nil && !os.IsNotExist(err) { - return err - } - } - if schemaData == nil { - firstSeen = true - schemaData, err = downloadSchemaAndStore(c, cacheDir, fullDir, cacheFile, prefix, groupVersion) - if err != nil { - return err - } - } - schema, err := validation.NewSwaggerSchemaFromBytes(schemaData, delegate) - if err != nil { - return err - } - err = schema.ValidateBytes(data) - if _, ok := err.(validation.TypeNotFoundError); ok && !firstSeen { - // As a temporary hack, kubectl would re-get the schema if validation - // fails for type not found reason. - // TODO: runtime-config settings needs to make into the file's name - schemaData, err = downloadSchemaAndStore(c, cacheDir, fullDir, cacheFile, prefix, groupVersion) - if err != nil { - return err - } - schema, err := validation.NewSwaggerSchemaFromBytes(schemaData, delegate) - if err != nil { - return err - } - return schema.ValidateBytes(data) - } - - return err -} - -// Download swagger schema from apiserver and store it to file. -func downloadSchemaAndStore(c schemaClient, cacheDir, fullDir, cacheFile, prefix, groupVersion string) (schemaData []byte, err error) { - schemaData, err = c.Get(). - AbsPath("/swaggerapi", prefix, groupVersion). - Do(). - Raw() - if err != nil { - return - } - if len(cacheDir) != 0 { - if err = writeSchemaFile(schemaData, fullDir, cacheFile, prefix, groupVersion); err != nil { - return - } - } - return -} - -func (c *clientSwaggerSchema) ValidateBytes(data []byte) error { - gvk, err := json.DefaultMetaFactory.Interpret(data) - if err != nil { - return err - } - if ok := registered.IsEnabledVersion(gvk.GroupVersion()); !ok { - // if we don't have this in our scheme, just skip validation because its an object we don't recognize - return nil - } - - switch gvk.Group { - case federation.GroupName: - if c.fedc == nil { - return errors.New("unable to validate: no federation client") - } - return getSchemaAndValidate(c.fedc, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) - - case api.GroupName: - return getSchemaAndValidate(c.c, data, "api", gvk.GroupVersion().String(), c.cacheDir, c) - - default: - return getSchemaAndValidate(c.c, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) - } -} - -// DefaultClientConfig creates a clientcmd.ClientConfig with the following hierarchy: -// 1. Use the kubeconfig builder. The number of merges and overrides here gets a little crazy. Stay with me. -// 1. Merge the kubeconfig itself. This is done with the following hierarchy rules: -// 1. CommandLineLocation - this parsed from the command line, so it must be late bound. If you specify this, -// then no other kubeconfig files are merged. This file must exist. -// 2. If $KUBECONFIG is set, then it is treated as a list of files that should be merged. -// 3. HomeDirectoryLocation -// Empty filenames are ignored. Files with non-deserializable content produced errors. -// The first file to set a particular value or map key wins and the value or map key is never changed. -// This means that the first file to set CurrentContext will have its context preserved. It also means -// that if two files specify a "red-user", only values from the first file's red-user are used. Even -// non-conflicting entries from the second file's "red-user" are discarded. -// 2. Determine the context to use based on the first hit in this chain -// 1. command line argument - again, parsed from the command line, so it must be late bound -// 2. CurrentContext from the merged kubeconfig file -// 3. Empty is allowed at this stage -// 3. Determine the cluster info and auth info to use. At this point, we may or may not have a context. They -// are built based on the first hit in this chain. (run it twice, once for auth, once for cluster) -// 1. command line argument -// 2. If context is present, then use the context value -// 3. Empty is allowed -// 4. Determine the actual cluster info to use. At this point, we may or may not have a cluster info. Build -// each piece of the cluster info based on the chain: -// 1. command line argument -// 2. If cluster info is present and a value for the attribute is present, use it. -// 3. If you don't have a server location, bail. -// 5. Auth info is build using the same rules as cluster info, EXCEPT that you can only have one authentication -// technique per auth info. The following conditions result in an error: -// 1. If there are two conflicting techniques specified from the command line, fail. -// 2. If the command line does not specify one, and the auth info has conflicting techniques, fail. -// 3. If the command line specifies one and the auth info specifies another, honor the command line technique. -// 2. Use default values and potentially prompt for auth information -// -// However, if it appears that we're running in a kubernetes cluster -// container environment, then run with the auth info kubernetes mounted for -// us. Specifically: -// The env vars KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT are -// set, and the file /var/run/secrets/kubernetes.io/serviceaccount/token -// exists and is not a directory. -func DefaultClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig { - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - // use the standard defaults for this client command - // DEPRECATED: remove and replace with something more accurate - loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig - - flags.StringVar(&loadingRules.ExplicitPath, "kubeconfig", "", "Path to the kubeconfig file to use for CLI requests.") - - overrides := &clientcmd.ConfigOverrides{ClusterDefaults: clientcmd.ClusterDefaults} - - flagNames := clientcmd.RecommendedConfigOverrideFlags("") - // short flagnames are disabled by default. These are here for compatibility with existing scripts - flagNames.ClusterOverrideFlags.APIServer.ShortName = "s" - - clientcmd.BindOverrideFlags(overrides, flags, flagNames) - clientConfig := clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, overrides, os.Stdin) - - return clientConfig -} - -func (f *factory) DefaultResourceFilterOptions(cmd *cobra.Command, withNamespace bool) *kubectl.PrintOptions { - columnLabel, err := cmd.Flags().GetStringSlice("label-columns") - if err != nil { - columnLabel = []string{} - } - opts := &kubectl.PrintOptions{ - NoHeaders: GetFlagBool(cmd, "no-headers"), - WithNamespace: withNamespace, - Wide: GetWideFlag(cmd), - ShowAll: GetFlagBool(cmd, "show-all"), - ShowLabels: GetFlagBool(cmd, "show-labels"), - AbsoluteTimestamps: isWatch(cmd), - ColumnLabels: columnLabel, - } - - return opts -} - -func (f *factory) DefaultResourceFilterFunc() kubectl.Filters { - return kubectl.NewResourceFilter() -} - -func (f *factory) PrintObject(cmd *cobra.Command, mapper meta.RESTMapper, obj runtime.Object, out io.Writer) error { - gvks, _, err := api.Scheme.ObjectKinds(obj) - if err != nil { - return err - } - - mapping, err := mapper.RESTMapping(gvks[0].GroupKind()) - if err != nil { - return err - } - - printer, err := f.PrinterForMapping(cmd, mapping, false) - if err != nil { - return err - } - return printer.PrintObj(obj, out) -} - -func (f *factory) PrinterForMapping(cmd *cobra.Command, mapping *meta.RESTMapping, withNamespace bool) (kubectl.ResourcePrinter, error) { - printer, ok, err := PrinterForCommand(cmd) - if err != nil { - return nil, err - } - if ok { - clientConfig, err := f.ClientConfig() - if err != nil { - return nil, err - } - - version, err := OutputVersion(cmd, clientConfig.GroupVersion) - if err != nil { - return nil, err - } - if version.Empty() && mapping != nil { - version = mapping.GroupVersionKind.GroupVersion() - } - if version.Empty() { - return nil, fmt.Errorf("you must specify an output-version when using this output format") - } - - if mapping != nil { - printer = kubectl.NewVersionedPrinter(printer, mapping.ObjectConvertor, version, mapping.GroupVersionKind.GroupVersion()) - } - - } else { - // Some callers do not have "label-columns" so we can't use the GetFlagStringSlice() helper - columnLabel, err := cmd.Flags().GetStringSlice("label-columns") - if err != nil { - columnLabel = []string{} - } - printer, err = f.Printer(mapping, kubectl.PrintOptions{ - NoHeaders: GetFlagBool(cmd, "no-headers"), - WithNamespace: withNamespace, - Wide: GetWideFlag(cmd), - ShowAll: GetFlagBool(cmd, "show-all"), - ShowLabels: GetFlagBool(cmd, "show-labels"), - AbsoluteTimestamps: isWatch(cmd), - ColumnLabels: columnLabel, - }) - if err != nil { - return nil, err - } - printer = maybeWrapSortingPrinter(cmd, printer) - } - - return printer, nil -} - -func (f *factory) NewBuilder() *resource.Builder { - mapper, typer := f.Object() - - return resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)) -} - -func (f *factory) SuggestedPodTemplateResources() []unversioned.GroupResource { - return []unversioned.GroupResource{ - {Resource: "replicationcontroller"}, - {Resource: "deployment"}, - {Resource: "daemonset"}, - {Resource: "job"}, - {Resource: "replicaset"}, - } -} - -// overlyCautiousIllegalFileCharacters matches characters that *might* not be supported. Windows is really restrictive, so this is really restrictive -var overlyCautiousIllegalFileCharacters = regexp.MustCompile(`[^(\w/\.)]`) - -// computeDiscoverCacheDir takes the parentDir and the host and comes up with a "usually non-colliding" name. -func computeDiscoverCacheDir(parentDir, host string) string { - // strip the optional scheme from host if its there: - schemelessHost := strings.Replace(strings.Replace(host, "https://", "", 1), "http://", "", 1) - // now do a simple collapse of non-AZ09 characters. Collisions are possible but unlikely. Even if we do collide the problem is short lived - safeHost := overlyCautiousIllegalFileCharacters.ReplaceAllString(schemelessHost, "_") - - return filepath.Join(parentDir, safeHost) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go deleted file mode 100644 index 01e34a64ed..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go +++ /dev/null @@ -1,727 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/url" - "os" - "strings" - "time" - - "k8s.io/kubernetes/pkg/api" - kerrors "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/resource" - "k8s.io/kubernetes/pkg/runtime" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - utilexec "k8s.io/kubernetes/pkg/util/exec" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/util/strategicpatch" - - jsonpatch "github.com/evanphx/json-patch" - "github.com/golang/glog" - "github.com/spf13/cobra" -) - -const ( - ApplyAnnotationsFlag = "save-config" - DefaultErrorExitCode = 1 -) - -type debugError interface { - DebugError() (msg string, args []interface{}) -} - -// AddSourceToErr adds handleResourcePrefix and source string to error message. -// verb is the string like "creating", "deleting" etc. -// source is the filename or URL to the template file(*.json or *.yaml), or stdin to use to handle the resource. -func AddSourceToErr(verb string, source string, err error) error { - if source != "" { - if statusError, ok := err.(kerrors.APIStatus); ok { - status := statusError.Status() - status.Message = fmt.Sprintf("error when %s %q: %v", verb, source, status.Message) - return &kerrors.StatusError{ErrStatus: status} - } - return fmt.Errorf("error when %s %q: %v", verb, source, err) - } - return err -} - -var fatalErrHandler = fatal - -// BehaviorOnFatal allows you to override the default behavior when a fatal -// error occurs, which is to call os.Exit(code). You can pass 'panic' as a function -// here if you prefer the panic() over os.Exit(1). -func BehaviorOnFatal(f func(string, int)) { - fatalErrHandler = f -} - -// DefaultBehaviorOnFatal allows you to undo any previous override. Useful in -// tests. -func DefaultBehaviorOnFatal() { - fatalErrHandler = fatal -} - -// fatal prints the message (if provided) and then exits. If V(2) or greater, -// glog.Fatal is invoked for extended information. -func fatal(msg string, code int) { - if glog.V(2) { - glog.FatalDepth(2, msg) - } - if len(msg) > 0 { - // add newline if needed - if !strings.HasSuffix(msg, "\n") { - msg += "\n" - } - fmt.Fprint(os.Stderr, msg) - } - os.Exit(code) -} - -// ErrExit may be passed to CheckError to instruct it to output nothing but exit with -// status code 1. -var ErrExit = fmt.Errorf("exit") - -// CheckErr prints a user friendly error to STDERR and exits with a non-zero -// exit code. Unrecognized errors will be printed with an "error: " prefix. -// -// This method is generic to the command in use and may be used by non-Kubectl -// commands. -func CheckErr(err error) { - checkErr("", err, fatalErrHandler) -} - -// checkErrWithPrefix works like CheckErr, but adds a caller-defined prefix to non-nil errors -func checkErrWithPrefix(prefix string, err error) { - checkErr(prefix, err, fatalErrHandler) -} - -// checkErr formats a given error as a string and calls the passed handleErr -// func with that string and an kubectl exit code. -func checkErr(prefix string, err error, handleErr func(string, int)) { - // unwrap aggregates of 1 - if agg, ok := err.(utilerrors.Aggregate); ok && len(agg.Errors()) == 1 { - err = agg.Errors()[0] - } - - switch { - case err == nil: - return - case err == ErrExit: - handleErr("", DefaultErrorExitCode) - return - case kerrors.IsInvalid(err): - details := err.(*kerrors.StatusError).Status().Details - s := fmt.Sprintf("%sThe %s %q is invalid", prefix, details.Kind, details.Name) - if len(details.Causes) > 0 { - errs := statusCausesToAggrError(details.Causes) - handleErr(MultilineError(s+": ", errs), DefaultErrorExitCode) - } else { - handleErr(s, DefaultErrorExitCode) - } - case clientcmd.IsConfigurationInvalid(err): - handleErr(MultilineError(fmt.Sprintf("%sError in configuration: ", prefix), err), DefaultErrorExitCode) - default: - switch err := err.(type) { - case *meta.NoResourceMatchError: - switch { - case len(err.PartialResource.Group) > 0 && len(err.PartialResource.Version) > 0: - handleErr(fmt.Sprintf("%sthe server doesn't have a resource type %q in group %q and version %q", prefix, err.PartialResource.Resource, err.PartialResource.Group, err.PartialResource.Version), DefaultErrorExitCode) - case len(err.PartialResource.Group) > 0: - handleErr(fmt.Sprintf("%sthe server doesn't have a resource type %q in group %q", prefix, err.PartialResource.Resource, err.PartialResource.Group), DefaultErrorExitCode) - case len(err.PartialResource.Version) > 0: - handleErr(fmt.Sprintf("%sthe server doesn't have a resource type %q in version %q", prefix, err.PartialResource.Resource, err.PartialResource.Version), DefaultErrorExitCode) - default: - handleErr(fmt.Sprintf("%sthe server doesn't have a resource type %q", prefix, err.PartialResource.Resource), DefaultErrorExitCode) - } - case utilerrors.Aggregate: - handleErr(MultipleErrors(prefix, err.Errors()), DefaultErrorExitCode) - case utilexec.ExitError: - // do not print anything, only terminate with given error - handleErr("", err.ExitStatus()) - default: // for any other error type - msg, ok := StandardErrorMessage(err) - if !ok { - msg = err.Error() - if !strings.HasPrefix(msg, "error: ") { - msg = fmt.Sprintf("error: %s", msg) - } - } - handleErr(msg, DefaultErrorExitCode) - } - } -} - -func statusCausesToAggrError(scs []unversioned.StatusCause) utilerrors.Aggregate { - errs := make([]error, 0, len(scs)) - errorMsgs := sets.NewString() - for _, sc := range scs { - // check for duplicate error messages and skip them - msg := fmt.Sprintf("%s: %s", sc.Field, sc.Message) - if errorMsgs.Has(msg) { - continue - } - errorMsgs.Insert(msg) - errs = append(errs, errors.New(msg)) - } - return utilerrors.NewAggregate(errs) -} - -// StandardErrorMessage translates common errors into a human readable message, or returns -// false if the error is not one of the recognized types. It may also log extended -// information to glog. -// -// This method is generic to the command in use and may be used by non-Kubectl -// commands. -func StandardErrorMessage(err error) (string, bool) { - if debugErr, ok := err.(debugError); ok { - glog.V(4).Infof(debugErr.DebugError()) - } - status, isStatus := err.(kerrors.APIStatus) - switch { - case isStatus: - switch s := status.Status(); { - case s.Reason == unversioned.StatusReasonUnauthorized: - return fmt.Sprintf("error: You must be logged in to the server (%s)", s.Message), true - case len(s.Reason) > 0: - return fmt.Sprintf("Error from server (%s): %s", s.Reason, err.Error()), true - default: - return fmt.Sprintf("Error from server: %s", err.Error()), true - } - case kerrors.IsUnexpectedObjectError(err): - return fmt.Sprintf("Server returned an unexpected response: %s", err.Error()), true - } - switch t := err.(type) { - case *url.Error: - glog.V(4).Infof("Connection error: %s %s: %v", t.Op, t.URL, t.Err) - switch { - case strings.Contains(t.Err.Error(), "connection refused"): - host := t.URL - if server, err := url.Parse(t.URL); err == nil { - host = server.Host - } - return fmt.Sprintf("The connection to the server %s was refused - did you specify the right host or port?", host), true - } - return fmt.Sprintf("Unable to connect to the server: %v", t.Err), true - } - return "", false -} - -// MultilineError returns a string representing an error that splits sub errors into their own -// lines. The returned string will end with a newline. -func MultilineError(prefix string, err error) string { - if agg, ok := err.(utilerrors.Aggregate); ok { - errs := utilerrors.Flatten(agg).Errors() - buf := &bytes.Buffer{} - switch len(errs) { - case 0: - return fmt.Sprintf("%s%v\n", prefix, err) - case 1: - return fmt.Sprintf("%s%v\n", prefix, messageForError(errs[0])) - default: - fmt.Fprintln(buf, prefix) - for _, err := range errs { - fmt.Fprintf(buf, "* %v\n", messageForError(err)) - } - return buf.String() - } - } - return fmt.Sprintf("%s%s\n", prefix, err) -} - -// PrintErrorWithCauses prints an error's kind, name, and each of the error's causes in a new line. -// The returned string will end with a newline. -// Returns true if a case exists to handle the error type, or false otherwise. -func PrintErrorWithCauses(err error, errOut io.Writer) bool { - switch t := err.(type) { - case *kerrors.StatusError: - errorDetails := t.Status().Details - if errorDetails != nil { - fmt.Fprintf(errOut, "error: %s %q is invalid\n\n", errorDetails.Kind, errorDetails.Name) - for _, cause := range errorDetails.Causes { - fmt.Fprintf(errOut, "* %s: %s\n", cause.Field, cause.Message) - } - return true - } - } - - fmt.Fprintf(errOut, "error: %v\n", err) - return false -} - -// MultipleErrors returns a newline delimited string containing -// the prefix and referenced errors in standard form. -func MultipleErrors(prefix string, errs []error) string { - buf := &bytes.Buffer{} - for _, err := range errs { - fmt.Fprintf(buf, "%s%v\n", prefix, messageForError(err)) - } - return buf.String() -} - -// messageForError returns the string representing the error. -func messageForError(err error) string { - msg, ok := StandardErrorMessage(err) - if !ok { - msg = err.Error() - } - return msg -} - -func UsageError(cmd *cobra.Command, format string, args ...interface{}) error { - msg := fmt.Sprintf(format, args...) - return fmt.Errorf("%s\nSee '%s -h' for help and examples.", msg, cmd.CommandPath()) -} - -func IsFilenameEmpty(filenames []string) bool { - if len(filenames) == 0 { - return true - } - return false -} - -// Whether this cmd need watching objects. -func isWatch(cmd *cobra.Command) bool { - if w, err := cmd.Flags().GetBool("watch"); w && err == nil { - return true - } - - if wo, err := cmd.Flags().GetBool("watch-only"); wo && err == nil { - return true - } - - return false -} - -func GetFlagString(cmd *cobra.Command, flag string) string { - s, err := cmd.Flags().GetString(flag) - if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) - } - return s -} - -// GetFlagStringSlice can be used to accept multiple argument with flag repetition (e.g. -f arg1,arg2 -f arg3 ...) -func GetFlagStringSlice(cmd *cobra.Command, flag string) []string { - s, err := cmd.Flags().GetStringSlice(flag) - if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) - } - return s -} - -// GetFlagStringArray can be used to accept multiple argument with flag repetition (e.g. -f arg1 -f arg2 ...) -func GetFlagStringArray(cmd *cobra.Command, flag string) []string { - s, err := cmd.Flags().GetStringArray(flag) - if err != nil { - glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err) - } - return s -} - -// GetWideFlag is used to determine if "-o wide" is used -func GetWideFlag(cmd *cobra.Command) bool { - f := cmd.Flags().Lookup("output") - if f.Value.String() == "wide" { - return true - } - return false -} - -func GetFlagBool(cmd *cobra.Command, flag string) bool { - b, err := cmd.Flags().GetBool(flag) - if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) - } - return b -} - -// Assumes the flag has a default value. -func GetFlagInt(cmd *cobra.Command, flag string) int { - i, err := cmd.Flags().GetInt(flag) - if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) - } - return i -} - -// Assumes the flag has a default value. -func GetFlagInt64(cmd *cobra.Command, flag string) int64 { - i, err := cmd.Flags().GetInt64(flag) - if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) - } - return i -} - -func GetFlagDuration(cmd *cobra.Command, flag string) time.Duration { - d, err := cmd.Flags().GetDuration(flag) - if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) - } - return d -} - -func AddValidateFlags(cmd *cobra.Command) { - cmd.Flags().Bool("validate", true, "If true, use a schema to validate the input before sending it") - cmd.Flags().String("schema-cache-dir", fmt.Sprintf("~/%s/%s", clientcmd.RecommendedHomeDir, clientcmd.RecommendedSchemaName), fmt.Sprintf("If non-empty, load/store cached API schemas in this directory, default is '$HOME/%s/%s'", clientcmd.RecommendedHomeDir, clientcmd.RecommendedSchemaName)) - cmd.MarkFlagFilename("schema-cache-dir") -} - -func AddFilenameOptionFlags(cmd *cobra.Command, options *resource.FilenameOptions, usage string) { - kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, "Filename, directory, or URL to files "+usage) - cmd.Flags().BoolVarP(&options.Recursive, "recursive", "R", options.Recursive, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.") -} - -// AddDryRunFlag adds dry-run flag to a command. Usually used by mutations. -func AddDryRunFlag(cmd *cobra.Command) { - cmd.Flags().Bool("dry-run", false, "If true, only print the object that would be sent, without sending it.") -} - -func AddApplyAnnotationFlags(cmd *cobra.Command) { - cmd.Flags().Bool(ApplyAnnotationsFlag, false, "If true, the configuration of current object will be saved in its annotation. This is useful when you want to perform kubectl apply on this object in the future.") -} - -// AddGeneratorFlags adds flags common to resource generation commands -// TODO: need to take a pass at other generator commands to use this set of flags -func AddGeneratorFlags(cmd *cobra.Command, defaultGenerator string) { - cmd.Flags().String("generator", defaultGenerator, "The name of the API generator to use.") - AddDryRunFlag(cmd) -} - -func ReadConfigDataFromReader(reader io.Reader, source string) ([]byte, error) { - data, err := ioutil.ReadAll(reader) - if err != nil { - return nil, err - } - - if len(data) == 0 { - return nil, fmt.Errorf("Read from %s but no data found", source) - } - - return data, nil -} - -// Merge requires JSON serialization -// TODO: merge assumes JSON serialization, and does not properly abstract API retrieval -func Merge(codec runtime.Codec, dst runtime.Object, fragment, kind string) (runtime.Object, error) { - // encode dst into versioned json and apply fragment directly too it - target, err := runtime.Encode(codec, dst) - if err != nil { - return nil, err - } - patched, err := jsonpatch.MergePatch(target, []byte(fragment)) - if err != nil { - return nil, err - } - out, err := runtime.Decode(codec, patched) - if err != nil { - return nil, err - } - return out, nil -} - -// DumpReaderToFile writes all data from the given io.Reader to the specified file -// (usually for temporary use). -func DumpReaderToFile(reader io.Reader, filename string) error { - f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - defer f.Close() - if err != nil { - return err - } - buffer := make([]byte, 1024) - for { - count, err := reader.Read(buffer) - if err == io.EOF { - break - } - if err != nil { - return err - } - _, err = f.Write(buffer[:count]) - if err != nil { - return err - } - } - return nil -} - -// UpdateObject updates resource object with updateFn -func UpdateObject(info *resource.Info, codec runtime.Codec, updateFn func(runtime.Object) error) (runtime.Object, error) { - helper := resource.NewHelper(info.Client, info.Mapping) - - if err := updateFn(info.Object); err != nil { - return nil, err - } - - // Update the annotation used by kubectl apply - if err := kubectl.UpdateApplyAnnotation(info, codec); err != nil { - return nil, err - } - - if _, err := helper.Replace(info.Namespace, info.Name, true, info.Object); err != nil { - return nil, err - } - - return info.Object, nil -} - -// AddCmdRecordFlag adds --record flag to command -func AddRecordFlag(cmd *cobra.Command) { - cmd.Flags().Bool("record", false, "Record current kubectl command in the resource annotation. If set to false, do not record the command. If set to true, record the command. If not set, default to updating the existing annotation value only if one already exists.") -} - -func GetRecordFlag(cmd *cobra.Command) bool { - return GetFlagBool(cmd, "record") -} - -func GetDryRunFlag(cmd *cobra.Command) bool { - return GetFlagBool(cmd, "dry-run") -} - -// RecordChangeCause annotate change-cause to input runtime object. -func RecordChangeCause(obj runtime.Object, changeCause string) error { - accessor, err := meta.Accessor(obj) - if err != nil { - return err - } - annotations := accessor.GetAnnotations() - if annotations == nil { - annotations = make(map[string]string) - } - annotations[kubectl.ChangeCauseAnnotation] = changeCause - accessor.SetAnnotations(annotations) - return nil -} - -// ChangeResourcePatch creates a strategic merge patch between the origin input resource info -// and the annotated with change-cause input resource info. -func ChangeResourcePatch(info *resource.Info, changeCause string) ([]byte, error) { - oldData, err := json.Marshal(info.Object) - if err != nil { - return nil, err - } - if err := RecordChangeCause(info.Object, changeCause); err != nil { - return nil, err - } - newData, err := json.Marshal(info.Object) - if err != nil { - return nil, err - } - return strategicpatch.CreateTwoWayMergePatch(oldData, newData, info.Object) -} - -// containsChangeCause checks if input resource info contains change-cause annotation. -func ContainsChangeCause(info *resource.Info) bool { - annotations, err := info.Mapping.MetadataAccessor.Annotations(info.Object) - if err != nil { - return false - } - return len(annotations[kubectl.ChangeCauseAnnotation]) > 0 -} - -// ShouldRecord checks if we should record current change cause -func ShouldRecord(cmd *cobra.Command, info *resource.Info) bool { - return GetRecordFlag(cmd) || (ContainsChangeCause(info) && !cmd.Flags().Changed("record")) -} - -func AddInclude3rdPartyFlags(cmd *cobra.Command) { - cmd.Flags().Bool("include-extended-apis", true, "If true, include definitions of new APIs via calls to the API server. [default true]") - cmd.Flags().MarkDeprecated("include-extended-apis", "No longer required.") -} - -// GetResourcesAndPairs retrieves resources and "KEY=VALUE or KEY-" pair args from given args -func GetResourcesAndPairs(args []string, pairType string) (resources []string, pairArgs []string, err error) { - foundPair := false - for _, s := range args { - nonResource := strings.Contains(s, "=") || strings.HasSuffix(s, "-") - switch { - case !foundPair && nonResource: - foundPair = true - fallthrough - case foundPair && nonResource: - pairArgs = append(pairArgs, s) - case !foundPair && !nonResource: - resources = append(resources, s) - case foundPair && !nonResource: - err = fmt.Errorf("all resources must be specified before %s changes: %s", pairType, s) - return - } - } - return -} - -// ParsePairs retrieves new and remove pairs (if supportRemove is true) from "KEY=VALUE or KEY-" pair args -func ParsePairs(pairArgs []string, pairType string, supportRemove bool) (newPairs map[string]string, removePairs []string, err error) { - newPairs = map[string]string{} - if supportRemove { - removePairs = []string{} - } - var invalidBuf bytes.Buffer - - for _, pairArg := range pairArgs { - if strings.Index(pairArg, "=") != -1 { - parts := strings.SplitN(pairArg, "=", 2) - if len(parts) != 2 { - if invalidBuf.Len() > 0 { - invalidBuf.WriteString(", ") - } - invalidBuf.WriteString(fmt.Sprintf(pairArg)) - } else { - newPairs[parts[0]] = parts[1] - } - } else if supportRemove && strings.HasSuffix(pairArg, "-") { - removePairs = append(removePairs, pairArg[:len(pairArg)-1]) - } else { - if invalidBuf.Len() > 0 { - invalidBuf.WriteString(", ") - } - invalidBuf.WriteString(fmt.Sprintf(pairArg)) - } - } - if invalidBuf.Len() > 0 { - err = fmt.Errorf("invalid %s format: %s", pairType, invalidBuf.String()) - return - } - - return -} - -// MaybeConvertObject attempts to convert an object to a specific group/version. If the object is -// a third party resource it is simply passed through. -func MaybeConvertObject(obj runtime.Object, gv unversioned.GroupVersion, converter runtime.ObjectConvertor) (runtime.Object, error) { - switch obj.(type) { - case *extensions.ThirdPartyResourceData: - // conversion is not supported for 3rd party objects - return obj, nil - default: - return converter.ConvertToVersion(obj, gv) - } -} - -// MustPrintWithKinds determines if printer is dealing -// with multiple resource kinds, in which case it will -// return true, indicating resource kind will be -// included as part of printer output -func MustPrintWithKinds(objs []runtime.Object, infos []*resource.Info, sorter *kubectl.RuntimeSort, printAll bool) bool { - var lastMap *meta.RESTMapping - - if len(infos) == 1 && printAll { - return true - } - - for ix := range objs { - var mapping *meta.RESTMapping - if sorter != nil { - mapping = infos[sorter.OriginalPosition(ix)].Mapping - } else { - mapping = infos[ix].Mapping - } - - // display "kind" only if we have mixed resources - if lastMap != nil && mapping.Resource != lastMap.Resource { - return true - } - lastMap = mapping - } - - return false -} - -// FilterResourceList receives a list of runtime objects. -// If any objects are filtered, that number is returned along with a modified list. -func FilterResourceList(obj runtime.Object, filterFuncs kubectl.Filters, filterOpts *kubectl.PrintOptions) (int, []runtime.Object, error) { - items, err := meta.ExtractList(obj) - if err != nil { - return 0, []runtime.Object{obj}, utilerrors.NewAggregate([]error{err}) - } - if errs := runtime.DecodeList(items, api.Codecs.UniversalDecoder(), runtime.UnstructuredJSONScheme); len(errs) > 0 { - return 0, []runtime.Object{obj}, utilerrors.NewAggregate(errs) - } - - filterCount := 0 - list := make([]runtime.Object, 0, len(items)) - for _, obj := range items { - if isFiltered, err := filterFuncs.Filter(obj, filterOpts); !isFiltered { - if err != nil { - glog.V(2).Infof("Unable to filter resource: %v", err) - continue - } - list = append(list, obj) - } else if isFiltered { - filterCount++ - } - } - return filterCount, list, nil -} - -func PrintFilterCount(hiddenObjNum int, resource string, options *kubectl.PrintOptions) { - if !options.NoHeaders && !options.ShowAll && hiddenObjNum > 0 { - glog.V(2).Infof(" info: %d completed object(s) was(were) not shown in %s list. Pass --show-all to see all objects.\n\n", hiddenObjNum, resource) - } -} - -// ObjectListToVersionedObject receives a list of api objects and a group version -// and squashes the list's items into a single versioned runtime.Object. -func ObjectListToVersionedObject(objects []runtime.Object, version unversioned.GroupVersion) (runtime.Object, error) { - objectList := &api.List{Items: objects} - converted, err := resource.TryConvert(api.Scheme, objectList, version, registered.GroupOrDie(api.GroupName).GroupVersion) - if err != nil { - return nil, err - } - return converted, nil -} - -// IsSiblingCommandExists receives a pointer to a cobra command and a target string. -// Returns true if the target string is found in the list of sibling commands. -func IsSiblingCommandExists(cmd *cobra.Command, targetCmdName string) bool { - for _, c := range cmd.Parent().Commands() { - if c.Name() == targetCmdName { - return true - } - } - - return false -} - -// DefaultSubCommandRun prints a command's help string to the specified output if no -// arguments (sub-commands) are provided, or a usage error otherwise. -func DefaultSubCommandRun(out io.Writer) func(c *cobra.Command, args []string) { - return func(c *cobra.Command, args []string) { - c.SetOutput(out) - RequireNoArguments(c, args) - c.Help() - } -} - -// RequireNoArguments exits with a usage error if extra arguments are provided. -func RequireNoArguments(c *cobra.Command, args []string) { - if len(args) > 0 { - CheckErr(UsageError(c, fmt.Sprintf(`unknown command %q`, strings.Join(args, " ")))) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go deleted file mode 100644 index 60d724d7e7..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "io" - "strings" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/kubectl" - - "github.com/spf13/cobra" -) - -// AddPrinterFlags adds printing related flags to a command (e.g. output format, no headers, template path) -func AddPrinterFlags(cmd *cobra.Command) { - AddOutputFlags(cmd) - cmd.Flags().String("output-version", "", "Output the formatted object with the given group version (for ex: 'extensions/v1beta1').") - AddNoHeadersFlags(cmd) - cmd.Flags().Bool("show-labels", false, "When printing, show all labels as the last column (default hide labels column)") - cmd.Flags().String("template", "", "Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].") - cmd.MarkFlagFilename("template") - cmd.Flags().String("sort-by", "", "If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. '{.metadata.name}'). The field in the API resource specified by this JSONPath expression must be an integer or a string.") - cmd.Flags().BoolP("show-all", "a", false, "When printing, show all resources (default hide terminated pods.)") -} - -// AddOutputFlagsForMutation adds output related flags to a command. Used by mutations only. -func AddOutputFlagsForMutation(cmd *cobra.Command) { - cmd.Flags().StringP("output", "o", "", "Output mode. Use \"-o name\" for shorter output (resource/name).") -} - -// AddOutputFlags adds output related flags to a command. -func AddOutputFlags(cmd *cobra.Command) { - cmd.Flags().StringP("output", "o", "", "Output format. One of: json|yaml|wide|name|custom-columns=...|custom-columns-file=...|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See custom columns [http://kubernetes.io/docs/user-guide/kubectl-overview/#custom-columns], golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://kubernetes.io/docs/user-guide/jsonpath].") -} - -// AddNoHeadersFlags adds no-headers flags to a command. -func AddNoHeadersFlags(cmd *cobra.Command) { - cmd.Flags().Bool("no-headers", false, "When using the default or custom-column output format, don't print headers.") -} - -// PrintSuccess prints message after finishing mutating operations -func PrintSuccess(mapper meta.RESTMapper, shortOutput bool, out io.Writer, resource string, name string, dryRun bool, operation string) { - resource, _ = mapper.ResourceSingularizer(resource) - dryRunMsg := "" - if dryRun { - dryRunMsg = " (dry run)" - } - if shortOutput { - // -o name: prints resource/name - if len(resource) > 0 { - fmt.Fprintf(out, "%s/%s\n", resource, name) - } else { - fmt.Fprintf(out, "%s\n", name) - } - } else { - // understandable output by default - if len(resource) > 0 { - fmt.Fprintf(out, "%s \"%s\" %s%s\n", resource, name, operation, dryRunMsg) - } else { - fmt.Fprintf(out, "\"%s\" %s%s\n", name, operation, dryRunMsg) - } - } -} - -// ValidateOutputArgs validates -o flag args for mutations -func ValidateOutputArgs(cmd *cobra.Command) error { - outputMode := GetFlagString(cmd, "output") - if outputMode != "" && outputMode != "name" { - return UsageError(cmd, "Unexpected -o output mode: %v. We only support '-o name'.", outputMode) - } - return nil -} - -// OutputVersion returns the preferred output version for generic content (JSON, YAML, or templates) -// defaultVersion is never mutated. Nil simply allows clean passing in common usage from client.Config -func OutputVersion(cmd *cobra.Command, defaultVersion *unversioned.GroupVersion) (unversioned.GroupVersion, error) { - outputVersionString := GetFlagString(cmd, "output-version") - if len(outputVersionString) == 0 { - if defaultVersion == nil { - return unversioned.GroupVersion{}, nil - } - - return *defaultVersion, nil - } - - return unversioned.ParseGroupVersion(outputVersionString) -} - -// PrinterForCommand returns the default printer for this command. -// Requires that printer flags have been added to cmd (see AddPrinterFlags). -func PrinterForCommand(cmd *cobra.Command) (kubectl.ResourcePrinter, bool, error) { - outputFormat := GetFlagString(cmd, "output") - - // templates are logically optional for specifying a format. - // TODO once https://github.com/kubernetes/kubernetes/issues/12668 is fixed, this should fall back to GetFlagString - templateFile, _ := cmd.Flags().GetString("template") - if len(outputFormat) == 0 && len(templateFile) != 0 { - outputFormat = "template" - } - - templateFormat := []string{ - "go-template=", "go-template-file=", "jsonpath=", "jsonpath-file=", "custom-columns=", "custom-columns-file=", - } - for _, format := range templateFormat { - if strings.HasPrefix(outputFormat, format) { - templateFile = outputFormat[len(format):] - outputFormat = format[:len(format)-1] - } - } - - printer, generic, err := kubectl.GetPrinter(outputFormat, templateFile, GetFlagBool(cmd, "no-headers")) - if err != nil { - return nil, generic, err - } - - return maybeWrapSortingPrinter(cmd, printer), generic, nil -} - -func maybeWrapSortingPrinter(cmd *cobra.Command, printer kubectl.ResourcePrinter) kubectl.ResourcePrinter { - sorting, err := cmd.Flags().GetString("sort-by") - if err != nil { - // error can happen on missing flag or bad flag type. In either case, this command didn't intent to sort - return printer - } - - if len(sorting) != 0 { - return &kubectl.SortingPrinter{ - Delegate: printer, - SortField: fmt.Sprintf("{%s}", sorting), - } - } - return printer -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/shortcut_restmapper.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/shortcut_restmapper.go deleted file mode 100644 index 2a7c7bcac6..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/shortcut_restmapper.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "strings" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/client/typed/discovery" - "k8s.io/kubernetes/pkg/kubectl" -) - -// ShortcutExpander is a RESTMapper that can be used for Kubernetes resources. It expands the resource first, then invokes the wrapped -type ShortcutExpander struct { - RESTMapper meta.RESTMapper - - All []unversioned.GroupResource - - discoveryClient discovery.DiscoveryInterface -} - -var _ meta.RESTMapper = &ShortcutExpander{} - -func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface) ShortcutExpander { - return ShortcutExpander{All: userResources, RESTMapper: delegate, discoveryClient: client} -} - -func (e ShortcutExpander) getAll() []unversioned.GroupResource { - if e.discoveryClient == nil { - return e.All - } - - // Check if we have access to server resources - apiResources, err := e.discoveryClient.ServerResources() - if err != nil { - return e.All - } - - availableResources := []unversioned.GroupVersionResource{} - for groupVersionString, resourceList := range apiResources { - currVersion, err := unversioned.ParseGroupVersion(groupVersionString) - if err != nil { - return e.All - } - - for _, resource := range resourceList.APIResources { - availableResources = append(availableResources, currVersion.WithResource(resource.Name)) - } - } - - availableAll := []unversioned.GroupResource{} - for _, requestedResource := range e.All { - for _, availableResource := range availableResources { - if requestedResource.Group == availableResource.Group && - requestedResource.Resource == availableResource.Resource { - availableAll = append(availableAll, requestedResource) - break - } - } - } - - return availableAll -} - -func (e ShortcutExpander) KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) { - return e.RESTMapper.KindFor(expandResourceShortcut(resource)) -} - -func (e ShortcutExpander) KindsFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionKind, error) { - return e.RESTMapper.KindsFor(expandResourceShortcut(resource)) -} - -func (e ShortcutExpander) ResourcesFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) { - return e.RESTMapper.ResourcesFor(expandResourceShortcut(resource)) -} - -func (e ShortcutExpander) ResourceFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) { - return e.RESTMapper.ResourceFor(expandResourceShortcut(resource)) -} - -func (e ShortcutExpander) ResourceSingularizer(resource string) (string, error) { - return e.RESTMapper.ResourceSingularizer(expandResourceShortcut(unversioned.GroupVersionResource{Resource: resource}).Resource) -} - -func (e ShortcutExpander) RESTMapping(gk unversioned.GroupKind, versions ...string) (*meta.RESTMapping, error) { - return e.RESTMapper.RESTMapping(gk, versions...) -} - -func (e ShortcutExpander) RESTMappings(gk unversioned.GroupKind) ([]*meta.RESTMapping, error) { - return e.RESTMapper.RESTMappings(gk) -} - -// userResources are the resource names that apply to the primary, user facing resources used by -// client tools. They are in deletion-first order - dependent resources should be last. -var userResources = []unversioned.GroupResource{ - {Group: "", Resource: "pods"}, - {Group: "", Resource: "replicationcontrollers"}, - {Group: "", Resource: "services"}, - {Group: "apps", Resource: "statefulsets"}, - {Group: "autoscaling", Resource: "horizontalpodautoscalers"}, - {Group: "extensions", Resource: "jobs"}, - {Group: "extensions", Resource: "deployments"}, - {Group: "extensions", Resource: "replicasets"}, -} - -// AliasesForResource returns whether a resource has an alias or not -func (e ShortcutExpander) AliasesForResource(resource string) ([]string, bool) { - if strings.ToLower(resource) == "all" { - var resources []unversioned.GroupResource - if resources = e.getAll(); len(resources) == 0 { - resources = userResources - } - aliases := []string{} - for _, r := range resources { - aliases = append(aliases, r.Resource) - } - return aliases, true - } - expanded := expandResourceShortcut(unversioned.GroupVersionResource{Resource: resource}).Resource - return []string{expanded}, (expanded != resource) -} - -// expandResourceShortcut will return the expanded version of resource -// (something that a pkg/api/meta.RESTMapper can understand), if it is -// indeed a shortcut. Otherwise, will return resource unmodified. -func expandResourceShortcut(resource unversioned.GroupVersionResource) unversioned.GroupVersionResource { - if expanded, ok := kubectl.ShortForms[resource.Resource]; ok { - resource.Resource = expanded - return resource - } - return resource -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go b/vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go deleted file mode 100644 index 3e3a6c06cc..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go +++ /dev/null @@ -1,211 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/validation" -) - -// ConfigMapGeneratorV1 supports stable generation of a configMap. -type ConfigMapGeneratorV1 struct { - // Name of configMap (required) - Name string - // Type of configMap (optional) - Type string - // FileSources to derive the configMap from (optional) - FileSources []string - // LiteralSources to derive the configMap from (optional) - LiteralSources []string -} - -// Ensure it supports the generator pattern that uses parameter injection. -var _ Generator = &ConfigMapGeneratorV1{} - -// Ensure it supports the generator pattern that uses parameters specified during construction. -var _ StructuredGenerator = &ConfigMapGeneratorV1{} - -// Generate returns a configMap using the specified parameters. -func (s ConfigMapGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) - if err != nil { - return nil, err - } - delegate := &ConfigMapGeneratorV1{} - fromFileStrings, found := genericParams["from-file"] - if found { - fromFileArray, isArray := fromFileStrings.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found :%v", fromFileStrings) - } - delegate.FileSources = fromFileArray - delete(genericParams, "from-file") - } - fromLiteralStrings, found := genericParams["from-literal"] - if found { - fromLiteralArray, isArray := fromLiteralStrings.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found :%v", fromFileStrings) - } - delegate.LiteralSources = fromLiteralArray - delete(genericParams, "from-literal") - } - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - delegate.Name = params["name"] - delegate.Type = params["type"] - return delegate.StructuredGenerate() -} - -// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern. -func (s ConfigMapGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"type", false}, - {"from-file", false}, - {"from-literal", false}, - {"force", false}, - } -} - -// StructuredGenerate outputs a configMap object using the configured fields. -func (s ConfigMapGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := s.validate(); err != nil { - return nil, err - } - configMap := &api.ConfigMap{} - configMap.Name = s.Name - configMap.Data = map[string]string{} - if len(s.FileSources) > 0 { - if err := handleConfigMapFromFileSources(configMap, s.FileSources); err != nil { - return nil, err - } - } - if len(s.LiteralSources) > 0 { - if err := handleConfigMapFromLiteralSources(configMap, s.LiteralSources); err != nil { - return nil, err - } - } - return configMap, nil -} - -// validate validates required fields are set to support structured generation. -func (s ConfigMapGeneratorV1) validate() error { - if len(s.Name) == 0 { - return fmt.Errorf("name must be specified") - } - return nil -} - -// handleConfigMapFromLiteralSources adds the specified literal source -// information into the provided configMap. -func handleConfigMapFromLiteralSources(configMap *api.ConfigMap, literalSources []string) error { - for _, literalSource := range literalSources { - keyName, value, err := parseLiteralSource(literalSource) - if err != nil { - return err - } - err = addKeyFromLiteralToConfigMap(configMap, keyName, value) - if err != nil { - return err - } - } - return nil -} - -// handleConfigMapFromFileSources adds the specified file source information -// into the provided configMap -func handleConfigMapFromFileSources(configMap *api.ConfigMap, fileSources []string) error { - for _, fileSource := range fileSources { - keyName, filePath, err := parseFileSource(fileSource) - if err != nil { - return err - } - info, err := os.Stat(filePath) - if err != nil { - switch err := err.(type) { - case *os.PathError: - return fmt.Errorf("error reading %s: %v", filePath, err.Err) - default: - return fmt.Errorf("error reading %s: %v", filePath, err) - } - } - if info.IsDir() { - if strings.Contains(fileSource, "=") { - return fmt.Errorf("cannot give a key name for a directory path.") - } - fileList, err := ioutil.ReadDir(filePath) - if err != nil { - return fmt.Errorf("error listing files in %s: %v", filePath, err) - } - for _, item := range fileList { - itemPath := path.Join(filePath, item.Name()) - if item.Mode().IsRegular() { - keyName = item.Name() - err = addKeyFromFileToConfigMap(configMap, keyName, itemPath) - if err != nil { - return err - } - } - } - } else { - err = addKeyFromFileToConfigMap(configMap, keyName, filePath) - if err != nil { - return err - } - } - } - - return nil -} - -// addKeyFromFileToConfigMap adds a key with the given name to a ConfigMap, populating -// the value with the content of the given file path, or returns an error. -func addKeyFromFileToConfigMap(configMap *api.ConfigMap, keyName, filePath string) error { - data, err := ioutil.ReadFile(filePath) - if err != nil { - return err - } - return addKeyFromLiteralToConfigMap(configMap, keyName, string(data)) -} - -// addKeyFromLiteralToConfigMap adds the given key and data to the given config map, -// returning an error if the key is not valid or if the key already exists. -func addKeyFromLiteralToConfigMap(configMap *api.ConfigMap, keyName, data string) error { - // Note, the rules for ConfigMap keys are the exact same as the ones for SecretKeys. - if errs := validation.IsConfigMapKey(keyName); len(errs) != 0 { - return fmt.Errorf("%q is not a valid key name for a ConfigMap: %s", keyName, strings.Join(errs, ";")) - } - if _, entryExists := configMap.Data[keyName]; entryExists { - return fmt.Errorf("cannot add key %s, another key by that name already exists: %v.", keyName, configMap.Data) - } - configMap.Data[keyName] = data - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/custom_column_printer.go b/vendor/k8s.io/kubernetes/pkg/kubectl/custom_column_printer.go deleted file mode 100644 index a73c99d9fd..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/custom_column_printer.go +++ /dev/null @@ -1,241 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "bufio" - "bytes" - "fmt" - "io" - "reflect" - "regexp" - "strings" - "text/tabwriter" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/jsonpath" -) - -const ( - columnwidth = 10 - tabwidth = 4 - padding = 3 - padding_character = ' ' - flags = 0 -) - -var jsonRegexp = regexp.MustCompile("^\\{\\.?([^{}]+)\\}$|^\\.?([^{}]+)$") - -// MassageJSONPath attempts to be flexible with JSONPath expressions, it accepts: -// * metadata.name (no leading '.' or curly brances '{...}' -// * {metadata.name} (no leading '.') -// * .metadata.name (no curly braces '{...}') -// * {.metadata.name} (complete expression) -// And transforms them all into a valid jsonpat expression: -// {.metadata.name} -func massageJSONPath(pathExpression string) (string, error) { - if len(pathExpression) == 0 { - return pathExpression, nil - } - submatches := jsonRegexp.FindStringSubmatch(pathExpression) - if submatches == nil { - return "", fmt.Errorf("unexpected path string, expected a 'name1.name2' or '.name1.name2' or '{name1.name2}' or '{.name1.name2}'") - } - if len(submatches) != 3 { - return "", fmt.Errorf("unexpected submatch list: %v", submatches) - } - var fieldSpec string - if len(submatches[1]) != 0 { - fieldSpec = submatches[1] - } else { - fieldSpec = submatches[2] - } - return fmt.Sprintf("{.%s}", fieldSpec), nil -} - -// NewCustomColumnsPrinterFromSpec creates a custom columns printer from a comma separated list of
: pairs. -// e.g. NAME:metadata.name,API_VERSION:apiVersion creates a printer that prints: -// -// NAME API_VERSION -// foo bar -func NewCustomColumnsPrinterFromSpec(spec string, decoder runtime.Decoder, noHeaders bool) (*CustomColumnsPrinter, error) { - if len(spec) == 0 { - return nil, fmt.Errorf("custom-columns format specified but no custom columns given") - } - parts := strings.Split(spec, ",") - columns := make([]Column, len(parts)) - for ix := range parts { - colSpec := strings.Split(parts[ix], ":") - if len(colSpec) != 2 { - return nil, fmt.Errorf("unexpected custom-columns spec: %s, expected
:", parts[ix]) - } - spec, err := massageJSONPath(colSpec[1]) - if err != nil { - return nil, err - } - columns[ix] = Column{Header: colSpec[0], FieldSpec: spec} - } - return &CustomColumnsPrinter{Columns: columns, Decoder: decoder, NoHeaders: noHeaders}, nil -} - -func splitOnWhitespace(line string) []string { - lineScanner := bufio.NewScanner(bytes.NewBufferString(line)) - lineScanner.Split(bufio.ScanWords) - result := []string{} - for lineScanner.Scan() { - result = append(result, lineScanner.Text()) - } - return result -} - -// NewCustomColumnsPrinterFromTemplate creates a custom columns printer from a template stream. The template is expected -// to consist of two lines, whitespace separated. The first line is the header line, the second line is the jsonpath field spec -// For example, the template below: -// NAME API_VERSION -// {metadata.name} {apiVersion} -func NewCustomColumnsPrinterFromTemplate(templateReader io.Reader, decoder runtime.Decoder) (*CustomColumnsPrinter, error) { - scanner := bufio.NewScanner(templateReader) - if !scanner.Scan() { - return nil, fmt.Errorf("invalid template, missing header line. Expected format is one line of space separated headers, one line of space separated column specs.") - } - headers := splitOnWhitespace(scanner.Text()) - - if !scanner.Scan() { - return nil, fmt.Errorf("invalid template, missing spec line. Expected format is one line of space separated headers, one line of space separated column specs.") - } - specs := splitOnWhitespace(scanner.Text()) - - if len(headers) != len(specs) { - return nil, fmt.Errorf("number of headers (%d) and field specifications (%d) don't match", len(headers), len(specs)) - } - - columns := make([]Column, len(headers)) - for ix := range headers { - spec, err := massageJSONPath(specs[ix]) - if err != nil { - return nil, err - } - columns[ix] = Column{ - Header: headers[ix], - FieldSpec: spec, - } - } - return &CustomColumnsPrinter{Columns: columns, Decoder: decoder, NoHeaders: false}, nil -} - -// Column represents a user specified column -type Column struct { - // The header to print above the column, general style is ALL_CAPS - Header string - // The pointer to the field in the object to print in JSONPath form - // e.g. {.ObjectMeta.Name}, see pkg/util/jsonpath for more details. - FieldSpec string -} - -// CustomColumnPrinter is a printer that knows how to print arbitrary columns -// of data from templates specified in the `Columns` array -type CustomColumnsPrinter struct { - Columns []Column - Decoder runtime.Decoder - NoHeaders bool -} - -func (s *CustomColumnsPrinter) AfterPrint(w io.Writer, res string) error { - return nil -} - -func (s *CustomColumnsPrinter) PrintObj(obj runtime.Object, out io.Writer) error { - w := tabwriter.NewWriter(out, columnwidth, tabwidth, padding, padding_character, flags) - - if !s.NoHeaders { - headers := make([]string, len(s.Columns)) - for ix := range s.Columns { - headers[ix] = s.Columns[ix].Header - } - fmt.Fprintln(w, strings.Join(headers, "\t")) - } - parsers := make([]*jsonpath.JSONPath, len(s.Columns)) - for ix := range s.Columns { - parsers[ix] = jsonpath.New(fmt.Sprintf("column%d", ix)) - if err := parsers[ix].Parse(s.Columns[ix].FieldSpec); err != nil { - return err - } - } - - if meta.IsListType(obj) { - objs, err := meta.ExtractList(obj) - if err != nil { - return err - } - for ix := range objs { - if err := s.printOneObject(objs[ix], parsers, w); err != nil { - return err - } - } - } else { - if err := s.printOneObject(obj, parsers, w); err != nil { - return err - } - } - return w.Flush() -} - -func (s *CustomColumnsPrinter) printOneObject(obj runtime.Object, parsers []*jsonpath.JSONPath, out io.Writer) error { - columns := make([]string, len(parsers)) - switch u := obj.(type) { - case *runtime.Unknown: - if len(u.Raw) > 0 { - var err error - if obj, err = runtime.Decode(s.Decoder, u.Raw); err != nil { - return fmt.Errorf("can't decode object for printing: %v (%s)", err, u.Raw) - } - } - } - - for ix := range parsers { - parser := parsers[ix] - - var values [][]reflect.Value - var err error - if unstructured, ok := obj.(*runtime.Unstructured); ok { - values, err = parser.FindResults(unstructured.Object) - } else { - values, err = parser.FindResults(reflect.ValueOf(obj).Elem().Interface()) - } - - if err != nil { - return err - } - if len(values) == 0 || len(values[0]) == 0 { - fmt.Fprintf(out, "\t") - } - valueStrings := []string{} - for arrIx := range values { - for valIx := range values[arrIx] { - valueStrings = append(valueStrings, fmt.Sprintf("%v", values[arrIx][valIx].Interface())) - } - } - columns[ix] = strings.Join(valueStrings, ",") - } - fmt.Fprintln(out, strings.Join(columns, "\t")) - return nil -} - -func (s *CustomColumnsPrinter) HandledResources() []string { - return []string{} -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/deployment.go b/vendor/k8s.io/kubernetes/pkg/kubectl/deployment.go deleted file mode 100644 index 035366585c..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/deployment.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/runtime" -) - -// DeploymentGeneratorV1 supports stable generation of a deployment -type DeploymentBasicGeneratorV1 struct { - Name string - Images []string -} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &DeploymentBasicGeneratorV1{} - -func (DeploymentBasicGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"image", true}, - } -} - -func (s DeploymentBasicGeneratorV1) Generate(params map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), params) - if err != nil { - return nil, err - } - name, isString := params["name"].(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for 'name'", name) - } - imageStrings, isArray := params["image"].([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found :%v", imageStrings) - } - delegate := &DeploymentBasicGeneratorV1{Name: name, Images: imageStrings} - return delegate.StructuredGenerate() -} - -// StructuredGenerate outputs a deployment object using the configured fields -func (s *DeploymentBasicGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := s.validate(); err != nil { - return nil, err - } - - podSpec := api.PodSpec{Containers: []api.Container{}} - for _, imageString := range s.Images { - imageSplit := strings.Split(imageString, "/") - name := imageSplit[len(imageSplit)-1] - podSpec.Containers = append(podSpec.Containers, api.Container{Name: name, Image: imageString}) - } - - // setup default label and selector - labels := map[string]string{} - labels["app"] = s.Name - selector := unversioned.LabelSelector{MatchLabels: labels} - deployment := extensions.Deployment{ - ObjectMeta: api.ObjectMeta{ - Name: s.Name, - Labels: labels, - }, - Spec: extensions.DeploymentSpec{ - Replicas: 1, - Selector: &selector, - Template: api.PodTemplateSpec{ - ObjectMeta: api.ObjectMeta{ - Labels: labels, - }, - Spec: podSpec, - }, - }, - } - return &deployment, nil -} - -// validate validates required fields are set to support structured generation -func (s *DeploymentBasicGeneratorV1) validate() error { - if len(s.Name) == 0 { - return fmt.Errorf("name must be specified") - } - if len(s.Images) == 0 { - return fmt.Errorf("at least one image must be specified") - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/describe.go b/vendor/k8s.io/kubernetes/pkg/kubectl/describe.go deleted file mode 100644 index 023313aa6f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/describe.go +++ /dev/null @@ -1,2704 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net" - "net/url" - "reflect" - "sort" - "strings" - "time" - - "k8s.io/kubernetes/federation/apis/federation" - fed_clientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/events" - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/certificates" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/policy" - "k8s.io/kubernetes/pkg/apis/storage" - storageutil "k8s.io/kubernetes/pkg/apis/storage/util" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" - deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" - "k8s.io/kubernetes/pkg/fieldpath" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/kubelet/qos" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/types" - certutil "k8s.io/kubernetes/pkg/util/cert" - "k8s.io/kubernetes/pkg/util/intstr" - "k8s.io/kubernetes/pkg/util/sets" - - "github.com/golang/glog" -) - -// Describer generates output for the named resource or an error -// if the output could not be generated. Implementers typically -// abstract the retrieval of the named object from a remote server. -type Describer interface { - Describe(namespace, name string, describerSettings DescriberSettings) (output string, err error) -} - -// DescriberSettings holds display configuration for each object -// describer to control what is printed. -type DescriberSettings struct { - ShowEvents bool -} - -// ObjectDescriber is an interface for displaying arbitrary objects with extra -// information. Use when an object is in hand (on disk, or already retrieved). -// Implementers may ignore the additional information passed on extra, or use it -// by default. ObjectDescribers may return ErrNoDescriber if no suitable describer -// is found. -type ObjectDescriber interface { - DescribeObject(object interface{}, extra ...interface{}) (output string, err error) -} - -// ErrNoDescriber is a structured error indicating the provided object or objects -// cannot be described. -type ErrNoDescriber struct { - Types []string -} - -// Error implements the error interface. -func (e ErrNoDescriber) Error() string { - return fmt.Sprintf("no describer has been defined for %v", e.Types) -} - -func describerMap(c clientset.Interface) map[unversioned.GroupKind]Describer { - m := map[unversioned.GroupKind]Describer{ - api.Kind("Pod"): &PodDescriber{c}, - api.Kind("ReplicationController"): &ReplicationControllerDescriber{c}, - api.Kind("Secret"): &SecretDescriber{c}, - api.Kind("Service"): &ServiceDescriber{c}, - api.Kind("ServiceAccount"): &ServiceAccountDescriber{c}, - api.Kind("Node"): &NodeDescriber{c}, - api.Kind("LimitRange"): &LimitRangeDescriber{c}, - api.Kind("ResourceQuota"): &ResourceQuotaDescriber{c}, - api.Kind("PersistentVolume"): &PersistentVolumeDescriber{c}, - api.Kind("PersistentVolumeClaim"): &PersistentVolumeClaimDescriber{c}, - api.Kind("Namespace"): &NamespaceDescriber{c}, - api.Kind("Endpoints"): &EndpointsDescriber{c}, - api.Kind("ConfigMap"): &ConfigMapDescriber{c}, - - extensions.Kind("ReplicaSet"): &ReplicaSetDescriber{c}, - extensions.Kind("HorizontalPodAutoscaler"): &HorizontalPodAutoscalerDescriber{c}, - extensions.Kind("NetworkPolicy"): &NetworkPolicyDescriber{c}, - autoscaling.Kind("HorizontalPodAutoscaler"): &HorizontalPodAutoscalerDescriber{c}, - extensions.Kind("DaemonSet"): &DaemonSetDescriber{c}, - extensions.Kind("Deployment"): &DeploymentDescriber{c}, - extensions.Kind("Job"): &JobDescriber{c}, - extensions.Kind("Ingress"): &IngressDescriber{c}, - batch.Kind("Job"): &JobDescriber{c}, - batch.Kind("CronJob"): &CronJobDescriber{c}, - apps.Kind("StatefulSet"): &StatefulSetDescriber{c}, - certificates.Kind("CertificateSigningRequest"): &CertificateSigningRequestDescriber{c}, - storage.Kind("StorageClass"): &StorageClassDescriber{c}, - policy.Kind("PodDisruptionBudget"): &PodDisruptionBudgetDescriber{c}, - } - - return m -} - -// List of all resource types we can describe -func DescribableResources() []string { - keys := make([]string, 0) - - for k := range describerMap(nil) { - resource := strings.ToLower(k.Kind) - keys = append(keys, resource) - } - return keys -} - -// Describer returns the default describe functions for each of the standard -// Kubernetes types. -func DescriberFor(kind unversioned.GroupKind, c clientset.Interface) (Describer, bool) { - f, ok := describerMap(c)[kind] - return f, ok -} - -// DefaultObjectDescriber can describe the default Kubernetes objects. -var DefaultObjectDescriber ObjectDescriber - -func init() { - d := &Describers{} - err := d.Add( - describeLimitRange, - describeQuota, - describePod, - describeService, - describeReplicationController, - describeDaemonSet, - describeNode, - describeNamespace, - ) - if err != nil { - glog.Fatalf("Cannot register describers: %v", err) - } - DefaultObjectDescriber = d -} - -// NamespaceDescriber generates information about a namespace -type NamespaceDescriber struct { - clientset.Interface -} - -func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - ns, err := d.Core().Namespaces().Get(name) - if err != nil { - return "", err - } - resourceQuotaList, err := d.Core().ResourceQuotas(name).List(api.ListOptions{}) - if err != nil { - if errors.IsNotFound(err) { - // Server does not support resource quotas. - // Not an error, will not show resource quotas information. - resourceQuotaList = nil - } else { - return "", err - } - } - limitRangeList, err := d.Core().LimitRanges(name).List(api.ListOptions{}) - if err != nil { - if errors.IsNotFound(err) { - // Server does not support limit ranges. - // Not an error, will not show limit ranges information. - limitRangeList = nil - } else { - return "", err - } - } - return describeNamespace(ns, resourceQuotaList, limitRangeList) -} - -func describeNamespace(namespace *api.Namespace, resourceQuotaList *api.ResourceQuotaList, limitRangeList *api.LimitRangeList) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", namespace.Name) - printLabelsMultiline(out, "Labels", namespace.Labels) - fmt.Fprintf(out, "Status:\t%s\n", string(namespace.Status.Phase)) - if resourceQuotaList != nil { - fmt.Fprintf(out, "\n") - DescribeResourceQuotas(resourceQuotaList, out) - } - if limitRangeList != nil { - fmt.Fprintf(out, "\n") - DescribeLimitRanges(limitRangeList, out) - } - return nil - }) -} - -func describeLimitRangeSpec(spec api.LimitRangeSpec, prefix string, w io.Writer) { - for i := range spec.Limits { - item := spec.Limits[i] - maxResources := item.Max - minResources := item.Min - defaultLimitResources := item.Default - defaultRequestResources := item.DefaultRequest - ratio := item.MaxLimitRequestRatio - - set := map[api.ResourceName]bool{} - for k := range maxResources { - set[k] = true - } - for k := range minResources { - set[k] = true - } - for k := range defaultLimitResources { - set[k] = true - } - for k := range defaultRequestResources { - set[k] = true - } - for k := range ratio { - set[k] = true - } - - for k := range set { - // if no value is set, we output - - maxValue := "-" - minValue := "-" - defaultLimitValue := "-" - defaultRequestValue := "-" - ratioValue := "-" - - maxQuantity, maxQuantityFound := maxResources[k] - if maxQuantityFound { - maxValue = maxQuantity.String() - } - - minQuantity, minQuantityFound := minResources[k] - if minQuantityFound { - minValue = minQuantity.String() - } - - defaultLimitQuantity, defaultLimitQuantityFound := defaultLimitResources[k] - if defaultLimitQuantityFound { - defaultLimitValue = defaultLimitQuantity.String() - } - - defaultRequestQuantity, defaultRequestQuantityFound := defaultRequestResources[k] - if defaultRequestQuantityFound { - defaultRequestValue = defaultRequestQuantity.String() - } - - ratioQuantity, ratioQuantityFound := ratio[k] - if ratioQuantityFound { - ratioValue = ratioQuantity.String() - } - - msg := "%s%s\t%v\t%v\t%v\t%v\t%v\t%v\n" - fmt.Fprintf(w, msg, prefix, item.Type, k, minValue, maxValue, defaultRequestValue, defaultLimitValue, ratioValue) - } - } -} - -// DescribeLimitRanges merges a set of limit range items into a single tabular description -func DescribeLimitRanges(limitRanges *api.LimitRangeList, w io.Writer) { - if len(limitRanges.Items) == 0 { - fmt.Fprint(w, "No resource limits.\n") - return - } - fmt.Fprintf(w, "Resource Limits\n Type\tResource\tMin\tMax\tDefault Request\tDefault Limit\tMax Limit/Request Ratio\n") - fmt.Fprintf(w, " ----\t--------\t---\t---\t---------------\t-------------\t-----------------------\n") - for _, limitRange := range limitRanges.Items { - describeLimitRangeSpec(limitRange.Spec, " ", w) - } -} - -// DescribeResourceQuotas merges a set of quota items into a single tabular description of all quotas -func DescribeResourceQuotas(quotas *api.ResourceQuotaList, w io.Writer) { - if len(quotas.Items) == 0 { - fmt.Fprint(w, "No resource quota.\n") - return - } - sort.Sort(SortableResourceQuotas(quotas.Items)) - - fmt.Fprint(w, "Resource Quotas") - for _, q := range quotas.Items { - fmt.Fprintf(w, "\n Name:\t%s\n", q.Name) - if len(q.Spec.Scopes) > 0 { - scopes := make([]string, 0, len(q.Spec.Scopes)) - for _, scope := range q.Spec.Scopes { - scopes = append(scopes, string(scope)) - } - sort.Strings(scopes) - fmt.Fprintf(w, " Scopes:\t%s\n", strings.Join(scopes, ", ")) - for _, scope := range scopes { - helpText := helpTextForResourceQuotaScope(api.ResourceQuotaScope(scope)) - if len(helpText) > 0 { - fmt.Fprintf(w, " * %s\n", helpText) - } - } - } - - fmt.Fprintf(w, " Resource\tUsed\tHard\n") - fmt.Fprint(w, " --------\t---\t---\n") - - resources := make([]api.ResourceName, 0, len(q.Status.Hard)) - for resource := range q.Status.Hard { - resources = append(resources, resource) - } - sort.Sort(SortableResourceNames(resources)) - - for _, resource := range resources { - hardQuantity := q.Status.Hard[resource] - usedQuantity := q.Status.Used[resource] - fmt.Fprintf(w, " %s\t%s\t%s\n", string(resource), usedQuantity.String(), hardQuantity.String()) - } - } -} - -// LimitRangeDescriber generates information about a limit range -type LimitRangeDescriber struct { - clientset.Interface -} - -func (d *LimitRangeDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - lr := d.Core().LimitRanges(namespace) - - limitRange, err := lr.Get(name) - if err != nil { - return "", err - } - return describeLimitRange(limitRange) -} - -func describeLimitRange(limitRange *api.LimitRange) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", limitRange.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", limitRange.Namespace) - fmt.Fprintf(out, "Type\tResource\tMin\tMax\tDefault Request\tDefault Limit\tMax Limit/Request Ratio\n") - fmt.Fprintf(out, "----\t--------\t---\t---\t---------------\t-------------\t-----------------------\n") - describeLimitRangeSpec(limitRange.Spec, "", out) - return nil - }) -} - -// ResourceQuotaDescriber generates information about a resource quota -type ResourceQuotaDescriber struct { - clientset.Interface -} - -func (d *ResourceQuotaDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - rq := d.Core().ResourceQuotas(namespace) - - resourceQuota, err := rq.Get(name) - if err != nil { - return "", err - } - - return describeQuota(resourceQuota) -} - -func helpTextForResourceQuotaScope(scope api.ResourceQuotaScope) string { - switch scope { - case api.ResourceQuotaScopeTerminating: - return "Matches all pods that have an active deadline." - case api.ResourceQuotaScopeNotTerminating: - return "Matches all pods that do not have an active deadline." - case api.ResourceQuotaScopeBestEffort: - return "Matches all pods that have best effort quality of service." - case api.ResourceQuotaScopeNotBestEffort: - return "Matches all pods that do not have best effort quality of service." - default: - return "" - } -} -func describeQuota(resourceQuota *api.ResourceQuota) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", resourceQuota.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", resourceQuota.Namespace) - if len(resourceQuota.Spec.Scopes) > 0 { - scopes := make([]string, 0, len(resourceQuota.Spec.Scopes)) - for _, scope := range resourceQuota.Spec.Scopes { - scopes = append(scopes, string(scope)) - } - sort.Strings(scopes) - fmt.Fprintf(out, "Scopes:\t%s\n", strings.Join(scopes, ", ")) - for _, scope := range scopes { - helpText := helpTextForResourceQuotaScope(api.ResourceQuotaScope(scope)) - if len(helpText) > 0 { - fmt.Fprintf(out, " * %s\n", helpText) - } - } - } - fmt.Fprintf(out, "Resource\tUsed\tHard\n") - fmt.Fprintf(out, "--------\t----\t----\n") - - resources := make([]api.ResourceName, 0, len(resourceQuota.Status.Hard)) - for resource := range resourceQuota.Status.Hard { - resources = append(resources, resource) - } - sort.Sort(SortableResourceNames(resources)) - - msg := "%v\t%v\t%v\n" - for i := range resources { - resource := resources[i] - hardQuantity := resourceQuota.Status.Hard[resource] - usedQuantity := resourceQuota.Status.Used[resource] - fmt.Fprintf(out, msg, resource, usedQuantity.String(), hardQuantity.String()) - } - return nil - }) -} - -// PodDescriber generates information about a pod and the replication controllers that -// create it. -type PodDescriber struct { - clientset.Interface -} - -func (d *PodDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - pod, err := d.Core().Pods(namespace).Get(name) - if err != nil { - if describerSettings.ShowEvents { - eventsInterface := d.Core().Events(namespace) - selector := eventsInterface.GetFieldSelector(&name, &namespace, nil, nil) - options := api.ListOptions{FieldSelector: selector} - events, err2 := eventsInterface.List(options) - if describerSettings.ShowEvents && err2 == nil && len(events.Items) > 0 { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Pod '%v': error '%v', but found events.\n", name, err) - DescribeEvents(events, out) - return nil - }) - } - } - return "", err - } - - var events *api.EventList - if describerSettings.ShowEvents { - if ref, err := api.GetReference(pod); err != nil { - glog.Errorf("Unable to construct reference to '%#v': %v", pod, err) - } else { - ref.Kind = "" - events, _ = d.Core().Events(namespace).Search(ref) - } - } - - return describePod(pod, events) -} - -func describePod(pod *api.Pod, events *api.EventList) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", pod.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", pod.Namespace) - fmt.Fprintf(out, "Node:\t%s\n", pod.Spec.NodeName+"/"+pod.Status.HostIP) - if pod.Status.StartTime != nil { - fmt.Fprintf(out, "Start Time:\t%s\n", pod.Status.StartTime.Time.Format(time.RFC1123Z)) - } - printLabelsMultiline(out, "Labels", pod.Labels) - if pod.DeletionTimestamp != nil { - fmt.Fprintf(out, "Status:\tTerminating (expires %s)\n", pod.DeletionTimestamp.Time.Format(time.RFC1123Z)) - fmt.Fprintf(out, "Termination Grace Period:\t%ds\n", *pod.DeletionGracePeriodSeconds) - } else { - fmt.Fprintf(out, "Status:\t%s\n", string(pod.Status.Phase)) - } - if len(pod.Status.Reason) > 0 { - fmt.Fprintf(out, "Reason:\t%s\n", pod.Status.Reason) - } - if len(pod.Status.Message) > 0 { - fmt.Fprintf(out, "Message:\t%s\n", pod.Status.Message) - } - fmt.Fprintf(out, "IP:\t%s\n", pod.Status.PodIP) - fmt.Fprintf(out, "Controllers:\t%s\n", printControllers(pod.Annotations)) - if len(pod.Spec.InitContainers) > 0 { - describeContainers("Init Containers", pod.Spec.InitContainers, pod.Status.InitContainerStatuses, EnvValueRetriever(pod), out, "") - } - describeContainers("Containers", pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(pod), out, "") - if len(pod.Status.Conditions) > 0 { - fmt.Fprint(out, "Conditions:\n Type\tStatus\n") - for _, c := range pod.Status.Conditions { - fmt.Fprintf(out, " %v \t%v \n", - c.Type, - c.Status) - } - } - describeVolumes(pod.Spec.Volumes, out, "") - fmt.Fprintf(out, "QoS Class:\t%s\n", qos.GetPodQOS(pod)) - printTolerationsInAnnotationMultiline(out, "Tolerations", pod.Annotations) - if events != nil { - DescribeEvents(events, out) - } - return nil - }) -} - -func printControllers(annotation map[string]string) string { - value, ok := annotation[api.CreatedByAnnotation] - if ok { - var r api.SerializedReference - err := json.Unmarshal([]byte(value), &r) - if err == nil { - return fmt.Sprintf("%s/%s", r.Reference.Kind, r.Reference.Name) - } - } - return "" -} - -// TODO: Do a better job at indenting, maybe by using a prefix writer -func describeVolumes(volumes []api.Volume, out io.Writer, space string) { - if volumes == nil || len(volumes) == 0 { - fmt.Fprintf(out, "%sNo volumes.\n", space) - return - } - fmt.Fprintf(out, "%sVolumes:\n", space) - for _, volume := range volumes { - nameIndent := "" - if len(space) > 0 { - nameIndent = " " - } - fmt.Fprintf(out, " %s%v:\n", nameIndent, volume.Name) - switch { - case volume.VolumeSource.HostPath != nil: - printHostPathVolumeSource(volume.VolumeSource.HostPath, out) - case volume.VolumeSource.EmptyDir != nil: - printEmptyDirVolumeSource(volume.VolumeSource.EmptyDir, out) - case volume.VolumeSource.GCEPersistentDisk != nil: - printGCEPersistentDiskVolumeSource(volume.VolumeSource.GCEPersistentDisk, out) - case volume.VolumeSource.AWSElasticBlockStore != nil: - printAWSElasticBlockStoreVolumeSource(volume.VolumeSource.AWSElasticBlockStore, out) - case volume.VolumeSource.GitRepo != nil: - printGitRepoVolumeSource(volume.VolumeSource.GitRepo, out) - case volume.VolumeSource.Secret != nil: - printSecretVolumeSource(volume.VolumeSource.Secret, out) - case volume.VolumeSource.ConfigMap != nil: - printConfigMapVolumeSource(volume.VolumeSource.ConfigMap, out) - case volume.VolumeSource.NFS != nil: - printNFSVolumeSource(volume.VolumeSource.NFS, out) - case volume.VolumeSource.ISCSI != nil: - printISCSIVolumeSource(volume.VolumeSource.ISCSI, out) - case volume.VolumeSource.Glusterfs != nil: - printGlusterfsVolumeSource(volume.VolumeSource.Glusterfs, out) - case volume.VolumeSource.PersistentVolumeClaim != nil: - printPersistentVolumeClaimVolumeSource(volume.VolumeSource.PersistentVolumeClaim, out) - case volume.VolumeSource.RBD != nil: - printRBDVolumeSource(volume.VolumeSource.RBD, out) - case volume.VolumeSource.Quobyte != nil: - printQuobyteVolumeSource(volume.VolumeSource.Quobyte, out) - case volume.VolumeSource.DownwardAPI != nil: - printDownwardAPIVolumeSource(volume.VolumeSource.DownwardAPI, out) - case volume.VolumeSource.AzureDisk != nil: - printAzureDiskVolumeSource(volume.VolumeSource.AzureDisk, out) - case volume.VolumeSource.VsphereVolume != nil: - printVsphereVolumeSource(volume.VolumeSource.VsphereVolume, out) - case volume.VolumeSource.Cinder != nil: - printCinderVolumeSource(volume.VolumeSource.Cinder, out) - case volume.VolumeSource.PhotonPersistentDisk != nil: - printPhotonPersistentDiskVolumeSource(volume.VolumeSource.PhotonPersistentDisk, out) - default: - fmt.Fprintf(out, " \n") - } - } -} - -func printHostPathVolumeSource(hostPath *api.HostPathVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tHostPath (bare host directory volume)\n"+ - " Path:\t%v\n", hostPath.Path) -} - -func printEmptyDirVolumeSource(emptyDir *api.EmptyDirVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tEmptyDir (a temporary directory that shares a pod's lifetime)\n"+ - " Medium:\t%v\n", emptyDir.Medium) -} - -func printGCEPersistentDiskVolumeSource(gce *api.GCEPersistentDiskVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tGCEPersistentDisk (a Persistent Disk resource in Google Compute Engine)\n"+ - " PDName:\t%v\n"+ - " FSType:\t%v\n"+ - " Partition:\t%v\n"+ - " ReadOnly:\t%v\n", - gce.PDName, gce.FSType, gce.Partition, gce.ReadOnly) -} - -func printAWSElasticBlockStoreVolumeSource(aws *api.AWSElasticBlockStoreVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tAWSElasticBlockStore (a Persistent Disk resource in AWS)\n"+ - " VolumeID:\t%v\n"+ - " FSType:\t%v\n"+ - " Partition:\t%v\n"+ - " ReadOnly:\t%v\n", - aws.VolumeID, aws.FSType, aws.Partition, aws.ReadOnly) -} - -func printGitRepoVolumeSource(git *api.GitRepoVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tGitRepo (a volume that is pulled from git when the pod is created)\n"+ - " Repository:\t%v\n"+ - " Revision:\t%v\n", - git.Repository, git.Revision) -} - -func printSecretVolumeSource(secret *api.SecretVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tSecret (a volume populated by a Secret)\n"+ - " SecretName:\t%v\n", secret.SecretName) -} - -func printConfigMapVolumeSource(configMap *api.ConfigMapVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tConfigMap (a volume populated by a ConfigMap)\n"+ - " Name:\t%v\n", configMap.Name) -} - -func printNFSVolumeSource(nfs *api.NFSVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tNFS (an NFS mount that lasts the lifetime of a pod)\n"+ - " Server:\t%v\n"+ - " Path:\t%v\n"+ - " ReadOnly:\t%v\n", - nfs.Server, nfs.Path, nfs.ReadOnly) -} - -func printQuobyteVolumeSource(quobyte *api.QuobyteVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tQuobyte (a Quobyte mount on the host that shares a pod's lifetime)\n"+ - " Registry:\t%v\n"+ - " Volume:\t%v\n"+ - " ReadOnly:\t%v\n", - quobyte.Registry, quobyte.Volume, quobyte.ReadOnly) -} - -func printISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tISCSI (an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod)\n"+ - " TargetPortal:\t%v\n"+ - " IQN:\t%v\n"+ - " Lun:\t%v\n"+ - " ISCSIInterface\t%v\n"+ - " FSType:\t%v\n"+ - " ReadOnly:\t%v\n", - iscsi.TargetPortal, iscsi.IQN, iscsi.Lun, iscsi.ISCSIInterface, iscsi.FSType, iscsi.ReadOnly) -} - -func printGlusterfsVolumeSource(glusterfs *api.GlusterfsVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tGlusterfs (a Glusterfs mount on the host that shares a pod's lifetime)\n"+ - " EndpointsName:\t%v\n"+ - " Path:\t%v\n"+ - " ReadOnly:\t%v\n", - glusterfs.EndpointsName, glusterfs.Path, glusterfs.ReadOnly) -} - -func printPersistentVolumeClaimVolumeSource(claim *api.PersistentVolumeClaimVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tPersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)\n"+ - " ClaimName:\t%v\n"+ - " ReadOnly:\t%v\n", - claim.ClaimName, claim.ReadOnly) -} - -func printRBDVolumeSource(rbd *api.RBDVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tRBD (a Rados Block Device mount on the host that shares a pod's lifetime)\n"+ - " CephMonitors:\t%v\n"+ - " RBDImage:\t%v\n"+ - " FSType:\t%v\n"+ - " RBDPool:\t%v\n"+ - " RadosUser:\t%v\n"+ - " Keyring:\t%v\n"+ - " SecretRef:\t%v\n"+ - " ReadOnly:\t%v\n", - rbd.CephMonitors, rbd.RBDImage, rbd.FSType, rbd.RBDPool, rbd.RadosUser, rbd.Keyring, rbd.SecretRef, rbd.ReadOnly) -} - -func printDownwardAPIVolumeSource(d *api.DownwardAPIVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tDownwardAPI (a volume populated by information about the pod)\n Items:\n") - for _, mapping := range d.Items { - if mapping.FieldRef != nil { - fmt.Fprintf(out, " %v -> %v\n", mapping.FieldRef.FieldPath, mapping.Path) - } - if mapping.ResourceFieldRef != nil { - fmt.Fprintf(out, " %v -> %v\n", mapping.ResourceFieldRef.Resource, mapping.Path) - } - } -} - -func printAzureDiskVolumeSource(d *api.AzureDiskVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tAzureDisk (an Azure Data Disk mount on the host and bind mount to the pod)\n"+ - " DiskName:\t%v\n"+ - " DiskURI:\t%v\n"+ - " FSType:\t%v\n"+ - " CachingMode:\t%v\n"+ - " ReadOnly:\t%v\n", - d.DiskName, d.DataDiskURI, *d.FSType, *d.CachingMode, *d.ReadOnly) -} - -func printVsphereVolumeSource(vsphere *api.VsphereVirtualDiskVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tvSphereVolume (a Persistent Disk resource in vSphere)\n"+ - " VolumePath:\t%v\n"+ - " FSType:\t%v\n", - vsphere.VolumePath, vsphere.FSType) -} - -func printPhotonPersistentDiskVolumeSource(photon *api.PhotonPersistentDiskVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tPhotonPersistentDisk (a Persistent Disk resource in photon platform)\n"+ - " PdID:\t%v\n"+ - " FSType:\t%v\n", - photon.PdID, photon.FSType) -} - -func printCinderVolumeSource(cinder *api.CinderVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tCinder (a Persistent Disk resource in OpenStack)\n"+ - " VolumeID:\t%v\n"+ - " FSType:\t%v\n"+ - " ReadOnly:\t%v\n", - cinder.VolumeID, cinder.FSType, cinder.ReadOnly) -} - -type PersistentVolumeDescriber struct { - clientset.Interface -} - -func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - c := d.Core().PersistentVolumes() - - pv, err := c.Get(name) - if err != nil { - return "", err - } - - storage := pv.Spec.Capacity[api.ResourceStorage] - - var events *api.EventList - if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(pv) - } - - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", pv.Name) - printLabelsMultiline(out, "Labels", pv.Labels) - fmt.Fprintf(out, "StorageClass:\t%s\n", storageutil.GetStorageClassAnnotation(pv.ObjectMeta)) - fmt.Fprintf(out, "Status:\t%s\n", pv.Status.Phase) - if pv.Spec.ClaimRef != nil { - fmt.Fprintf(out, "Claim:\t%s\n", pv.Spec.ClaimRef.Namespace+"/"+pv.Spec.ClaimRef.Name) - } else { - fmt.Fprintf(out, "Claim:\t%s\n", "") - } - fmt.Fprintf(out, "Reclaim Policy:\t%v\n", pv.Spec.PersistentVolumeReclaimPolicy) - fmt.Fprintf(out, "Access Modes:\t%s\n", api.GetAccessModesAsString(pv.Spec.AccessModes)) - fmt.Fprintf(out, "Capacity:\t%s\n", storage.String()) - fmt.Fprintf(out, "Message:\t%s\n", pv.Status.Message) - fmt.Fprintf(out, "Source:\n") - - switch { - case pv.Spec.HostPath != nil: - printHostPathVolumeSource(pv.Spec.HostPath, out) - case pv.Spec.GCEPersistentDisk != nil: - printGCEPersistentDiskVolumeSource(pv.Spec.GCEPersistentDisk, out) - case pv.Spec.AWSElasticBlockStore != nil: - printAWSElasticBlockStoreVolumeSource(pv.Spec.AWSElasticBlockStore, out) - case pv.Spec.NFS != nil: - printNFSVolumeSource(pv.Spec.NFS, out) - case pv.Spec.ISCSI != nil: - printISCSIVolumeSource(pv.Spec.ISCSI, out) - case pv.Spec.Glusterfs != nil: - printGlusterfsVolumeSource(pv.Spec.Glusterfs, out) - case pv.Spec.RBD != nil: - printRBDVolumeSource(pv.Spec.RBD, out) - case pv.Spec.Quobyte != nil: - printQuobyteVolumeSource(pv.Spec.Quobyte, out) - case pv.Spec.VsphereVolume != nil: - printVsphereVolumeSource(pv.Spec.VsphereVolume, out) - case pv.Spec.Cinder != nil: - printCinderVolumeSource(pv.Spec.Cinder, out) - case pv.Spec.AzureDisk != nil: - printAzureDiskVolumeSource(pv.Spec.AzureDisk, out) - case pv.Spec.PhotonPersistentDisk != nil: - printPhotonPersistentDiskVolumeSource(pv.Spec.PhotonPersistentDisk, out) - } - - if events != nil { - DescribeEvents(events, out) - } - - return nil - }) -} - -type PersistentVolumeClaimDescriber struct { - clientset.Interface -} - -func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - c := d.Core().PersistentVolumeClaims(namespace) - - pvc, err := c.Get(name) - if err != nil { - return "", err - } - - storage := pvc.Spec.Resources.Requests[api.ResourceStorage] - capacity := "" - accessModes := "" - if pvc.Spec.VolumeName != "" { - accessModes = api.GetAccessModesAsString(pvc.Status.AccessModes) - storage = pvc.Status.Capacity[api.ResourceStorage] - capacity = storage.String() - } - - events, _ := d.Core().Events(namespace).Search(pvc) - - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", pvc.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", pvc.Namespace) - fmt.Fprintf(out, "StorageClass:\t%s\n", storageutil.GetStorageClassAnnotation(pvc.ObjectMeta)) - fmt.Fprintf(out, "Status:\t%v\n", pvc.Status.Phase) - fmt.Fprintf(out, "Volume:\t%s\n", pvc.Spec.VolumeName) - printLabelsMultiline(out, "Labels", pvc.Labels) - fmt.Fprintf(out, "Capacity:\t%s\n", capacity) - fmt.Fprintf(out, "Access Modes:\t%s\n", accessModes) - if events != nil { - DescribeEvents(events, out) - } - - return nil - }) -} - -// TODO: Do a better job at indenting, maybe by using a prefix writer -func describeContainers(label string, containers []api.Container, containerStatuses []api.ContainerStatus, resolverFn EnvVarResolverFunc, out io.Writer, space string) { - statuses := map[string]api.ContainerStatus{} - for _, status := range containerStatuses { - statuses[status.Name] = status - } - if len(containers) == 0 { - fmt.Fprintf(out, "%s%s: \n", space, label) - } else { - fmt.Fprintf(out, "%s%s:\n", space, label) - } - for _, container := range containers { - status, ok := statuses[container.Name] - nameIndent := "" - if len(space) > 0 { - nameIndent = " " - } - fmt.Fprintf(out, " %s%v:\n", nameIndent, container.Name) - if ok { - fmt.Fprintf(out, " Container ID:\t%s\n", status.ContainerID) - } - fmt.Fprintf(out, " Image:\t%s\n", container.Image) - if ok { - fmt.Fprintf(out, " Image ID:\t%s\n", status.ImageID) - } - portString := describeContainerPorts(container.Ports) - if strings.Contains(portString, ",") { - fmt.Fprintf(out, " Ports:\t%s\n", portString) - } else { - fmt.Fprintf(out, " Port:\t%s\n", portString) - } - - if len(container.Command) > 0 { - fmt.Fprintf(out, " Command:\n") - for _, c := range container.Command { - fmt.Fprintf(out, " %s\n", c) - } - } - if len(container.Args) > 0 { - fmt.Fprintf(out, " Args:\n") - for _, arg := range container.Args { - fmt.Fprintf(out, " %s\n", arg) - } - } - - resources := container.Resources - if len(resources.Limits) > 0 { - fmt.Fprintf(out, " Limits:\n") - } - for _, name := range SortedResourceNames(resources.Limits) { - quantity := resources.Limits[name] - fmt.Fprintf(out, " %s:\t%s\n", name, quantity.String()) - } - - if len(resources.Requests) > 0 { - fmt.Fprintf(out, " Requests:\n") - } - for _, name := range SortedResourceNames(resources.Requests) { - quantity := resources.Requests[name] - fmt.Fprintf(out, " %s:\t%s\n", name, quantity.String()) - } - - if ok { - describeStatus("State", status.State, out) - if status.LastTerminationState.Terminated != nil { - describeStatus("Last State", status.LastTerminationState, out) - } - fmt.Fprintf(out, " Ready:\t%v\n", printBool(status.Ready)) - fmt.Fprintf(out, " Restart Count:\t%d\n", status.RestartCount) - } - - if container.LivenessProbe != nil { - probe := DescribeProbe(container.LivenessProbe) - fmt.Fprintf(out, " Liveness:\t%s\n", probe) - } - if container.ReadinessProbe != nil { - probe := DescribeProbe(container.ReadinessProbe) - fmt.Fprintf(out, " Readiness:\t%s\n", probe) - } - - none := "" - if len(container.VolumeMounts) == 0 { - none = "\t" - } - - fmt.Fprintf(out, " Volume Mounts:%s\n", none) - sort.Sort(SortableVolumeMounts(container.VolumeMounts)) - for _, mount := range container.VolumeMounts { - flags := []string{} - switch { - case mount.ReadOnly: - flags = append(flags, "ro") - case !mount.ReadOnly: - flags = append(flags, "rw") - case len(mount.SubPath) > 0: - flags = append(flags, fmt.Sprintf("path=%q", mount.SubPath)) - } - fmt.Fprintf(out, " %s from %s (%s)\n", mount.MountPath, mount.Name, strings.Join(flags, ",")) - } - - none = "" - if len(container.Env) == 0 { - none = "\t" - } - fmt.Fprintf(out, " Environment Variables:%s\n", none) - for _, e := range container.Env { - if e.ValueFrom == nil { - fmt.Fprintf(out, " %s:\t%s\n", e.Name, e.Value) - continue - } - - switch { - case e.ValueFrom.FieldRef != nil: - var valueFrom string - if resolverFn != nil { - valueFrom = resolverFn(e) - } - fmt.Fprintf(out, " %s:\t%s (%s:%s)\n", e.Name, valueFrom, e.ValueFrom.FieldRef.APIVersion, e.ValueFrom.FieldRef.FieldPath) - case e.ValueFrom.ResourceFieldRef != nil: - valueFrom, err := fieldpath.ExtractContainerResourceValue(e.ValueFrom.ResourceFieldRef, &container) - if err != nil { - valueFrom = "" - } - resource := e.ValueFrom.ResourceFieldRef.Resource - if valueFrom == "0" && (resource == "limits.cpu" || resource == "limits.memory") { - valueFrom = "node allocatable" - } - fmt.Fprintf(out, " %s:\t%s (%s)\n", e.Name, valueFrom, resource) - case e.ValueFrom.SecretKeyRef != nil: - fmt.Fprintf(out, " %s:\t\n", e.Name, e.ValueFrom.SecretKeyRef.Key, e.ValueFrom.SecretKeyRef.Name) - case e.ValueFrom.ConfigMapKeyRef != nil: - fmt.Fprintf(out, " %s:\t\n", e.Name, e.ValueFrom.ConfigMapKeyRef.Key, e.ValueFrom.ConfigMapKeyRef.Name) - } - } - } -} - -func describeContainerPorts(cPorts []api.ContainerPort) string { - ports := make([]string, 0, len(cPorts)) - for _, cPort := range cPorts { - ports = append(ports, fmt.Sprintf("%d/%s", cPort.ContainerPort, cPort.Protocol)) - } - return strings.Join(ports, ", ") -} - -// DescribeProbe is exported for consumers in other API groups that have probes -func DescribeProbe(probe *api.Probe) string { - attrs := fmt.Sprintf("delay=%ds timeout=%ds period=%ds #success=%d #failure=%d", probe.InitialDelaySeconds, probe.TimeoutSeconds, probe.PeriodSeconds, probe.SuccessThreshold, probe.FailureThreshold) - switch { - case probe.Exec != nil: - return fmt.Sprintf("exec %v %s", probe.Exec.Command, attrs) - case probe.HTTPGet != nil: - url := &url.URL{} - url.Scheme = strings.ToLower(string(probe.HTTPGet.Scheme)) - if len(probe.HTTPGet.Port.String()) > 0 { - url.Host = net.JoinHostPort(probe.HTTPGet.Host, probe.HTTPGet.Port.String()) - } else { - url.Host = probe.HTTPGet.Host - } - url.Path = probe.HTTPGet.Path - return fmt.Sprintf("http-get %s %s", url.String(), attrs) - case probe.TCPSocket != nil: - return fmt.Sprintf("tcp-socket :%s %s", probe.TCPSocket.Port.String(), attrs) - } - return fmt.Sprintf("unknown %s", attrs) -} - -type EnvVarResolverFunc func(e api.EnvVar) string - -// EnvValueFrom is exported for use by describers in other packages -func EnvValueRetriever(pod *api.Pod) EnvVarResolverFunc { - return func(e api.EnvVar) string { - internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(e.ValueFrom.FieldRef.APIVersion, "Pod", e.ValueFrom.FieldRef.FieldPath, "") - if err != nil { - return "" // pod validation should catch this on create - } - - valueFrom, err := fieldpath.ExtractFieldPathAsString(pod, internalFieldPath) - if err != nil { - return "" // pod validation should catch this on create - } - - return valueFrom - } -} - -func describeStatus(stateName string, state api.ContainerState, out io.Writer) { - switch { - case state.Running != nil: - fmt.Fprintf(out, " %s:\tRunning\n", stateName) - fmt.Fprintf(out, " Started:\t%v\n", state.Running.StartedAt.Time.Format(time.RFC1123Z)) - case state.Waiting != nil: - fmt.Fprintf(out, " %s:\tWaiting\n", stateName) - if state.Waiting.Reason != "" { - fmt.Fprintf(out, " Reason:\t%s\n", state.Waiting.Reason) - } - case state.Terminated != nil: - fmt.Fprintf(out, " %s:\tTerminated\n", stateName) - if state.Terminated.Reason != "" { - fmt.Fprintf(out, " Reason:\t%s\n", state.Terminated.Reason) - } - if state.Terminated.Message != "" { - fmt.Fprintf(out, " Message:\t%s\n", state.Terminated.Message) - } - fmt.Fprintf(out, " Exit Code:\t%d\n", state.Terminated.ExitCode) - if state.Terminated.Signal > 0 { - fmt.Fprintf(out, " Signal:\t%d\n", state.Terminated.Signal) - } - fmt.Fprintf(out, " Started:\t%s\n", state.Terminated.StartedAt.Time.Format(time.RFC1123Z)) - fmt.Fprintf(out, " Finished:\t%s\n", state.Terminated.FinishedAt.Time.Format(time.RFC1123Z)) - default: - fmt.Fprintf(out, " %s:\tWaiting\n", stateName) - } -} - -func printBoolPtr(value *bool) string { - if value != nil { - return printBool(*value) - } - - return "" -} - -func printBool(value bool) string { - if value { - return "True" - } - - return "False" -} - -// ReplicationControllerDescriber generates information about a replication controller -// and the pods it has created. -type ReplicationControllerDescriber struct { - clientset.Interface -} - -func (d *ReplicationControllerDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - rc := d.Core().ReplicationControllers(namespace) - pc := d.Core().Pods(namespace) - - controller, err := rc.Get(name) - if err != nil { - return "", err - } - - running, waiting, succeeded, failed, err := getPodStatusForController(pc, labels.SelectorFromSet(controller.Spec.Selector)) - if err != nil { - return "", err - } - - var events *api.EventList - if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(controller) - } - - return describeReplicationController(controller, events, running, waiting, succeeded, failed) -} - -func describeReplicationController(controller *api.ReplicationController, events *api.EventList, running, waiting, succeeded, failed int) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", controller.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", controller.Namespace) - if controller.Spec.Template != nil { - fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&controller.Spec.Template.Spec)) - } else { - fmt.Fprintf(out, "Image(s):\t%s\n", "") - } - fmt.Fprintf(out, "Selector:\t%s\n", labels.FormatLabels(controller.Spec.Selector)) - printLabelsMultiline(out, "Labels", controller.Labels) - fmt.Fprintf(out, "Replicas:\t%d current / %d desired\n", controller.Status.Replicas, controller.Spec.Replicas) - fmt.Fprintf(out, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) - if controller.Spec.Template != nil { - describeVolumes(controller.Spec.Template.Spec.Volumes, out, "") - } - if events != nil { - DescribeEvents(events, out) - } - return nil - }) -} - -func DescribePodTemplate(template *api.PodTemplateSpec, out io.Writer) { - if template == nil { - fmt.Fprintf(out, " ") - return - } - printLabelsMultiline(out, " Labels", template.Labels) - if len(template.Annotations) > 0 { - printLabelsMultiline(out, " Annotations", template.Annotations) - } - if len(template.Spec.ServiceAccountName) > 0 { - fmt.Fprintf(out, " Service Account:\t%s\n", template.Spec.ServiceAccountName) - } - if len(template.Spec.InitContainers) > 0 { - describeContainers("Init Containers", template.Spec.InitContainers, nil, nil, out, " ") - } - describeContainers("Containers", template.Spec.Containers, nil, nil, out, " ") - describeVolumes(template.Spec.Volumes, out, " ") -} - -// ReplicaSetDescriber generates information about a ReplicaSet and the pods it has created. -type ReplicaSetDescriber struct { - clientset.Interface -} - -func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - rsc := d.Extensions().ReplicaSets(namespace) - pc := d.Core().Pods(namespace) - - rs, err := rsc.Get(name) - if err != nil { - return "", err - } - - selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector) - if err != nil { - return "", err - } - - running, waiting, succeeded, failed, getPodErr := getPodStatusForController(pc, selector) - - var events *api.EventList - if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(rs) - } - - return describeReplicaSet(rs, events, running, waiting, succeeded, failed, getPodErr) -} - -func describeReplicaSet(rs *extensions.ReplicaSet, events *api.EventList, running, waiting, succeeded, failed int, getPodErr error) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", rs.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", rs.Namespace) - fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&rs.Spec.Template.Spec)) - fmt.Fprintf(out, "Selector:\t%s\n", unversioned.FormatLabelSelector(rs.Spec.Selector)) - printLabelsMultiline(out, "Labels", rs.Labels) - fmt.Fprintf(out, "Replicas:\t%d current / %d desired\n", rs.Status.Replicas, rs.Spec.Replicas) - fmt.Fprintf(out, "Pods Status:\t") - if getPodErr != nil { - fmt.Fprintf(out, "error in fetching pods: %s\n", getPodErr) - } else { - fmt.Fprintf(out, "%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) - } - describeVolumes(rs.Spec.Template.Spec.Volumes, out, "") - if events != nil { - DescribeEvents(events, out) - } - return nil - }) -} - -// JobDescriber generates information about a job and the pods it has created. -type JobDescriber struct { - clientset.Interface -} - -func (d *JobDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - job, err := d.Batch().Jobs(namespace).Get(name) - if err != nil { - return "", err - } - - var events *api.EventList - if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(job) - } - - return describeJob(job, events) -} - -func describeJob(job *batch.Job, events *api.EventList) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", job.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", job.Namespace) - fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&job.Spec.Template.Spec)) - selector, _ := unversioned.LabelSelectorAsSelector(job.Spec.Selector) - fmt.Fprintf(out, "Selector:\t%s\n", selector) - fmt.Fprintf(out, "Parallelism:\t%d\n", *job.Spec.Parallelism) - if job.Spec.Completions != nil { - fmt.Fprintf(out, "Completions:\t%d\n", *job.Spec.Completions) - } else { - fmt.Fprintf(out, "Completions:\t\n") - } - if job.Status.StartTime != nil { - fmt.Fprintf(out, "Start Time:\t%s\n", job.Status.StartTime.Time.Format(time.RFC1123Z)) - } - if job.Spec.ActiveDeadlineSeconds != nil { - fmt.Fprintf(out, "Active Deadline Seconds:\t%ds\n", *job.Spec.ActiveDeadlineSeconds) - } - printLabelsMultiline(out, "Labels", job.Labels) - fmt.Fprintf(out, "Pods Statuses:\t%d Running / %d Succeeded / %d Failed\n", job.Status.Active, job.Status.Succeeded, job.Status.Failed) - describeVolumes(job.Spec.Template.Spec.Volumes, out, "") - if events != nil { - DescribeEvents(events, out) - } - return nil - }) -} - -// CronJobDescriber generates information about a scheduled job and the jobs it has created. -type CronJobDescriber struct { - clientset.Interface -} - -func (d *CronJobDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - scheduledJob, err := d.Batch().CronJobs(namespace).Get(name) - if err != nil { - return "", err - } - - var events *api.EventList - if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(scheduledJob) - } - - return describeCronJob(scheduledJob, events) -} - -func describeCronJob(scheduledJob *batch.CronJob, events *api.EventList) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", scheduledJob.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", scheduledJob.Namespace) - fmt.Fprintf(out, "Schedule:\t%s\n", scheduledJob.Spec.Schedule) - fmt.Fprintf(out, "Concurrency Policy:\t%s\n", scheduledJob.Spec.ConcurrencyPolicy) - fmt.Fprintf(out, "Suspend:\t%s\n", printBoolPtr(scheduledJob.Spec.Suspend)) - if scheduledJob.Spec.StartingDeadlineSeconds != nil { - fmt.Fprintf(out, "Starting Deadline Seconds:\t%ds\n", *scheduledJob.Spec.StartingDeadlineSeconds) - } else { - fmt.Fprintf(out, "Starting Deadline Seconds:\t\n") - } - describeJobTemplate(scheduledJob.Spec.JobTemplate, out) - printLabelsMultiline(out, "Labels", scheduledJob.Labels) - if scheduledJob.Status.LastScheduleTime != nil { - fmt.Fprintf(out, "Last Schedule Time:\t%s\n", scheduledJob.Status.LastScheduleTime.Time.Format(time.RFC1123Z)) - } else { - fmt.Fprintf(out, "Last Schedule Time:\t\n") - } - printActiveJobs(out, "Active Jobs", scheduledJob.Status.Active) - if events != nil { - DescribeEvents(events, out) - } - return nil - }) -} - -func describeJobTemplate(jobTemplate batch.JobTemplateSpec, out io.Writer) { - fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&jobTemplate.Spec.Template.Spec)) - if jobTemplate.Spec.Selector != nil { - selector, _ := unversioned.LabelSelectorAsSelector(jobTemplate.Spec.Selector) - fmt.Fprintf(out, "Selector:\t%s\n", selector) - } else { - fmt.Fprintf(out, "Selector:\t\n") - } - if jobTemplate.Spec.Parallelism != nil { - fmt.Fprintf(out, "Parallelism:\t%d\n", *jobTemplate.Spec.Parallelism) - } else { - fmt.Fprintf(out, "Parallelism:\t\n") - } - if jobTemplate.Spec.Completions != nil { - fmt.Fprintf(out, "Completions:\t%d\n", *jobTemplate.Spec.Completions) - } else { - fmt.Fprintf(out, "Completions:\t\n") - } - if jobTemplate.Spec.ActiveDeadlineSeconds != nil { - fmt.Fprintf(out, "Active Deadline Seconds:\t%ds\n", *jobTemplate.Spec.ActiveDeadlineSeconds) - } - describeVolumes(jobTemplate.Spec.Template.Spec.Volumes, out, "") -} - -func printActiveJobs(out io.Writer, title string, jobs []api.ObjectReference) { - fmt.Fprintf(out, "%s:\t", title) - if len(jobs) == 0 { - fmt.Fprintln(out, "") - return - } - - for i, job := range jobs { - if i != 0 { - fmt.Fprint(out, ", ") - } - fmt.Fprintf(out, "%s", job.Name) - } - fmt.Fprintln(out, "") -} - -// DaemonSetDescriber generates information about a daemon set and the pods it has created. -type DaemonSetDescriber struct { - clientset.Interface -} - -func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - dc := d.Extensions().DaemonSets(namespace) - pc := d.Core().Pods(namespace) - - daemon, err := dc.Get(name) - if err != nil { - return "", err - } - - selector, err := unversioned.LabelSelectorAsSelector(daemon.Spec.Selector) - if err != nil { - return "", err - } - running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector) - if err != nil { - return "", err - } - - var events *api.EventList - if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(daemon) - } - - return describeDaemonSet(daemon, events, running, waiting, succeeded, failed) -} - -func describeDaemonSet(daemon *extensions.DaemonSet, events *api.EventList, running, waiting, succeeded, failed int) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", daemon.Name) - fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&daemon.Spec.Template.Spec)) - selector, err := unversioned.LabelSelectorAsSelector(daemon.Spec.Selector) - if err != nil { - // this shouldn't happen if LabelSelector passed validation - return err - } - fmt.Fprintf(out, "Selector:\t%s\n", selector) - fmt.Fprintf(out, "Node-Selector:\t%s\n", labels.FormatLabels(daemon.Spec.Template.Spec.NodeSelector)) - printLabelsMultiline(out, "Labels", daemon.Labels) - fmt.Fprintf(out, "Desired Number of Nodes Scheduled: %d\n", daemon.Status.DesiredNumberScheduled) - fmt.Fprintf(out, "Current Number of Nodes Scheduled: %d\n", daemon.Status.CurrentNumberScheduled) - fmt.Fprintf(out, "Number of Nodes Misscheduled: %d\n", daemon.Status.NumberMisscheduled) - fmt.Fprintf(out, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) - if events != nil { - DescribeEvents(events, out) - } - return nil - }) -} - -// SecretDescriber generates information about a secret -type SecretDescriber struct { - clientset.Interface -} - -func (d *SecretDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - c := d.Core().Secrets(namespace) - - secret, err := c.Get(name) - if err != nil { - return "", err - } - - return describeSecret(secret) -} - -func describeSecret(secret *api.Secret) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", secret.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", secret.Namespace) - printLabelsMultiline(out, "Labels", secret.Labels) - printLabelsMultiline(out, "Annotations", secret.Annotations) - - fmt.Fprintf(out, "\nType:\t%s\n", secret.Type) - - fmt.Fprintf(out, "\nData\n====\n") - for k, v := range secret.Data { - switch { - case k == api.ServiceAccountTokenKey && secret.Type == api.SecretTypeServiceAccountToken: - fmt.Fprintf(out, "%s:\t%s\n", k, string(v)) - default: - fmt.Fprintf(out, "%s:\t%d bytes\n", k, len(v)) - } - } - - return nil - }) -} - -type IngressDescriber struct { - clientset.Interface -} - -func (i *IngressDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - c := i.Extensions().Ingresses(namespace) - ing, err := c.Get(name) - if err != nil { - return "", err - } - return i.describeIngress(ing, describerSettings) -} - -func (i *IngressDescriber) describeBackend(ns string, backend *extensions.IngressBackend) string { - endpoints, _ := i.Core().Endpoints(ns).Get(backend.ServiceName) - service, _ := i.Core().Services(ns).Get(backend.ServiceName) - spName := "" - for i := range service.Spec.Ports { - sp := &service.Spec.Ports[i] - switch backend.ServicePort.Type { - case intstr.String: - if backend.ServicePort.StrVal == sp.Name { - spName = sp.Name - } - case intstr.Int: - if int32(backend.ServicePort.IntVal) == sp.Port { - spName = sp.Name - } - } - } - return formatEndpoints(endpoints, sets.NewString(spName)) -} - -func (i *IngressDescriber) describeIngress(ing *extensions.Ingress, describerSettings DescriberSettings) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%v\n", ing.Name) - fmt.Fprintf(out, "Namespace:\t%v\n", ing.Namespace) - fmt.Fprintf(out, "Address:\t%v\n", loadBalancerStatusStringer(ing.Status.LoadBalancer, true)) - def := ing.Spec.Backend - ns := ing.Namespace - if def == nil { - // Ingresses that don't specify a default backend inherit the - // default backend in the kube-system namespace. - def = &extensions.IngressBackend{ - ServiceName: "default-http-backend", - ServicePort: intstr.IntOrString{Type: intstr.Int, IntVal: 80}, - } - ns = api.NamespaceSystem - } - fmt.Fprintf(out, "Default backend:\t%s (%s)\n", backendStringer(def), i.describeBackend(ns, def)) - if len(ing.Spec.TLS) != 0 { - describeIngressTLS(out, ing.Spec.TLS) - } - fmt.Fprint(out, "Rules:\n Host\tPath\tBackends\n") - fmt.Fprint(out, " ----\t----\t--------\n") - count := 0 - for _, rules := range ing.Spec.Rules { - if rules.HTTP == nil { - continue - } - count++ - host := rules.Host - if len(host) == 0 { - host = "*" - } - fmt.Fprintf(out, " %s\t\n", host) - for _, path := range rules.HTTP.Paths { - fmt.Fprintf(out, " \t%s \t%s (%s)\n", path.Path, backendStringer(&path.Backend), i.describeBackend(ns, &path.Backend)) - } - } - if count == 0 { - fmt.Fprintf(out, " %s\t%s \t%s (%s)\n", "*", "*", backendStringer(def), i.describeBackend(ns, def)) - } - describeIngressAnnotations(out, ing.Annotations) - - if describerSettings.ShowEvents { - events, _ := i.Core().Events(ing.Namespace).Search(ing) - if events != nil { - DescribeEvents(events, out) - } - } - return nil - }) -} - -func describeIngressTLS(out io.Writer, ingTLS []extensions.IngressTLS) { - fmt.Fprintf(out, "TLS:\n") - for _, t := range ingTLS { - if t.SecretName == "" { - fmt.Fprintf(out, " SNI routes %v\n", strings.Join(t.Hosts, ",")) - } else { - fmt.Fprintf(out, " %v terminates %v\n", t.SecretName, strings.Join(t.Hosts, ",")) - } - } - return -} - -// TODO: Move from annotations into Ingress status. -func describeIngressAnnotations(out io.Writer, annotations map[string]string) { - fmt.Fprintf(out, "Annotations:\n") - for k, v := range annotations { - if !strings.HasPrefix(k, "ingress") { - continue - } - parts := strings.Split(k, "/") - name := parts[len(parts)-1] - fmt.Fprintf(out, " %v:\t%s\n", name, v) - } - return -} - -// ServiceDescriber generates information about a service. -type ServiceDescriber struct { - clientset.Interface -} - -func (d *ServiceDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - c := d.Core().Services(namespace) - - service, err := c.Get(name) - if err != nil { - return "", err - } - - endpoints, _ := d.Core().Endpoints(namespace).Get(name) - var events *api.EventList - if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(service) - } - return describeService(service, endpoints, events) -} - -func buildIngressString(ingress []api.LoadBalancerIngress) string { - var buffer bytes.Buffer - - for i := range ingress { - if i != 0 { - buffer.WriteString(", ") - } - if ingress[i].IP != "" { - buffer.WriteString(ingress[i].IP) - } else { - buffer.WriteString(ingress[i].Hostname) - } - } - return buffer.String() -} - -func describeService(service *api.Service, endpoints *api.Endpoints, events *api.EventList) (string, error) { - if endpoints == nil { - endpoints = &api.Endpoints{} - } - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", service.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", service.Namespace) - printLabelsMultiline(out, "Labels", service.Labels) - fmt.Fprintf(out, "Selector:\t%s\n", labels.FormatLabels(service.Spec.Selector)) - fmt.Fprintf(out, "Type:\t%s\n", service.Spec.Type) - fmt.Fprintf(out, "IP:\t%s\n", service.Spec.ClusterIP) - if len(service.Spec.ExternalIPs) > 0 { - fmt.Fprintf(out, "External IPs:\t%v\n", strings.Join(service.Spec.ExternalIPs, ",")) - } - if service.Spec.ExternalName != "" { - fmt.Fprintf(out, "External Name:\t%s\n", service.Spec.ExternalName) - } - if len(service.Status.LoadBalancer.Ingress) > 0 { - list := buildIngressString(service.Status.LoadBalancer.Ingress) - fmt.Fprintf(out, "LoadBalancer Ingress:\t%s\n", list) - } - for i := range service.Spec.Ports { - sp := &service.Spec.Ports[i] - - name := sp.Name - if name == "" { - name = "" - } - fmt.Fprintf(out, "Port:\t%s\t%d/%s\n", name, sp.Port, sp.Protocol) - if sp.NodePort != 0 { - fmt.Fprintf(out, "NodePort:\t%s\t%d/%s\n", name, sp.NodePort, sp.Protocol) - } - fmt.Fprintf(out, "Endpoints:\t%s\n", formatEndpoints(endpoints, sets.NewString(sp.Name))) - } - fmt.Fprintf(out, "Session Affinity:\t%s\n", service.Spec.SessionAffinity) - if events != nil { - DescribeEvents(events, out) - } - return nil - }) -} - -// EndpointsDescriber generates information about an Endpoint. -type EndpointsDescriber struct { - clientset.Interface -} - -func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - c := d.Core().Endpoints(namespace) - - ep, err := c.Get(name) - if err != nil { - return "", err - } - - var events *api.EventList - if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(ep) - } - - return describeEndpoints(ep, events) -} - -func describeEndpoints(ep *api.Endpoints, events *api.EventList) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", ep.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", ep.Namespace) - printLabelsMultiline(out, "Labels", ep.Labels) - - fmt.Fprintf(out, "Subsets:\n") - for i := range ep.Subsets { - subset := &ep.Subsets[i] - - addresses := make([]string, 0, len(subset.Addresses)) - for _, addr := range subset.Addresses { - addresses = append(addresses, addr.IP) - } - addressesString := strings.Join(addresses, ",") - if len(addressesString) == 0 { - addressesString = "" - } - fmt.Fprintf(out, " Addresses:\t%s\n", addressesString) - - notReadyAddresses := make([]string, 0, len(subset.NotReadyAddresses)) - for _, addr := range subset.NotReadyAddresses { - notReadyAddresses = append(notReadyAddresses, addr.IP) - } - notReadyAddressesString := strings.Join(notReadyAddresses, ",") - if len(notReadyAddressesString) == 0 { - notReadyAddressesString = "" - } - fmt.Fprintf(out, " NotReadyAddresses:\t%s\n", notReadyAddressesString) - - if len(subset.Ports) > 0 { - fmt.Fprintf(out, " Ports:\n") - fmt.Fprintf(out, " Name\tPort\tProtocol\n") - fmt.Fprintf(out, " ----\t----\t--------\n") - for _, port := range subset.Ports { - name := port.Name - if len(name) == 0 { - name = "" - } - fmt.Fprintf(out, " %s\t%d\t%s\n", name, port.Port, port.Protocol) - } - } - fmt.Fprintf(out, "\n") - } - - if events != nil { - DescribeEvents(events, out) - } - return nil - }) -} - -// ServiceAccountDescriber generates information about a service. -type ServiceAccountDescriber struct { - clientset.Interface -} - -func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - c := d.Core().ServiceAccounts(namespace) - - serviceAccount, err := c.Get(name) - if err != nil { - return "", err - } - - tokens := []api.Secret{} - - tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)}) - options := api.ListOptions{FieldSelector: tokenSelector} - secrets, err := d.Core().Secrets(namespace).List(options) - if err == nil { - for _, s := range secrets.Items { - name, _ := s.Annotations[api.ServiceAccountNameKey] - uid, _ := s.Annotations[api.ServiceAccountUIDKey] - if name == serviceAccount.Name && uid == string(serviceAccount.UID) { - tokens = append(tokens, s) - } - } - } - - return describeServiceAccount(serviceAccount, tokens) -} - -func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Secret) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", serviceAccount.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", serviceAccount.Namespace) - printLabelsMultiline(out, "Labels", serviceAccount.Labels) - fmt.Fprintln(out) - - var ( - emptyHeader = " " - pullHeader = "Image pull secrets:" - mountHeader = "Mountable secrets: " - tokenHeader = "Tokens: " - - pullSecretNames = []string{} - mountSecretNames = []string{} - tokenSecretNames = []string{} - ) - - for _, s := range serviceAccount.ImagePullSecrets { - pullSecretNames = append(pullSecretNames, s.Name) - } - for _, s := range serviceAccount.Secrets { - mountSecretNames = append(mountSecretNames, s.Name) - } - for _, s := range tokens { - tokenSecretNames = append(tokenSecretNames, s.Name) - } - - types := map[string][]string{ - pullHeader: pullSecretNames, - mountHeader: mountSecretNames, - tokenHeader: tokenSecretNames, - } - for header, names := range types { - if len(names) == 0 { - fmt.Fprintf(out, "%s\t\n", header) - } else { - prefix := header - for _, name := range names { - fmt.Fprintf(out, "%s\t%s\n", prefix, name) - prefix = emptyHeader - } - } - fmt.Fprintln(out) - } - - return nil - }) -} - -// NodeDescriber generates information about a node. -type NodeDescriber struct { - clientset.Interface -} - -func (d *NodeDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - mc := d.Core().Nodes() - node, err := mc.Get(name) - if err != nil { - return "", err - } - - fieldSelector, err := fields.ParseSelector("spec.nodeName=" + name + ",status.phase!=" + string(api.PodSucceeded) + ",status.phase!=" + string(api.PodFailed)) - if err != nil { - return "", err - } - // in a policy aware setting, users may have access to a node, but not all pods - // in that case, we note that the user does not have access to the pods - canViewPods := true - nodeNonTerminatedPodsList, err := d.Core().Pods(namespace).List(api.ListOptions{FieldSelector: fieldSelector}) - if err != nil { - if !errors.IsForbidden(err) { - return "", err - } - canViewPods = false - } - - var events *api.EventList - if describerSettings.ShowEvents { - if ref, err := api.GetReference(node); err != nil { - glog.Errorf("Unable to construct reference to '%#v': %v", node, err) - } else { - // TODO: We haven't decided the namespace for Node object yet. - ref.UID = types.UID(ref.Name) - events, _ = d.Core().Events("").Search(ref) - } - } - - return describeNode(node, nodeNonTerminatedPodsList, events, canViewPods) -} - -func describeNode(node *api.Node, nodeNonTerminatedPodsList *api.PodList, events *api.EventList, canViewPods bool) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", node.Name) - fmt.Fprintf(out, "Role:\t%s\n", findNodeRole(node)) - printLabelsMultiline(out, "Labels", node.Labels) - printTaintsInAnnotationMultiline(out, "Taints", node.Annotations) - fmt.Fprintf(out, "CreationTimestamp:\t%s\n", node.CreationTimestamp.Time.Format(time.RFC1123Z)) - fmt.Fprintf(out, "Phase:\t%v\n", node.Status.Phase) - if len(node.Status.Conditions) > 0 { - fmt.Fprint(out, "Conditions:\n Type\tStatus\tLastHeartbeatTime\tLastTransitionTime\tReason\tMessage\n") - fmt.Fprint(out, " ----\t------\t-----------------\t------------------\t------\t-------\n") - for _, c := range node.Status.Conditions { - fmt.Fprintf(out, " %v \t%v \t%s \t%s \t%v \t%v\n", - c.Type, - c.Status, - c.LastHeartbeatTime.Time.Format(time.RFC1123Z), - c.LastTransitionTime.Time.Format(time.RFC1123Z), - c.Reason, - c.Message) - } - } - addresses := make([]string, 0, len(node.Status.Addresses)) - for _, address := range node.Status.Addresses { - addresses = append(addresses, address.Address) - } - - printResourceList := func(resourceList api.ResourceList) { - resources := make([]api.ResourceName, 0, len(resourceList)) - for resource := range resourceList { - resources = append(resources, resource) - } - sort.Sort(SortableResourceNames(resources)) - for _, resource := range resources { - value := resourceList[resource] - fmt.Fprintf(out, " %s:\t%s\n", resource, value.String()) - } - } - - fmt.Fprintf(out, "Addresses:\t%s\n", strings.Join(addresses, ",")) - if len(node.Status.Capacity) > 0 { - fmt.Fprintf(out, "Capacity:\n") - printResourceList(node.Status.Capacity) - } - if len(node.Status.Allocatable) > 0 { - fmt.Fprintf(out, "Allocatable:\n") - printResourceList(node.Status.Allocatable) - } - - fmt.Fprintf(out, "System Info:\n") - fmt.Fprintf(out, " Machine ID:\t%s\n", node.Status.NodeInfo.MachineID) - fmt.Fprintf(out, " System UUID:\t%s\n", node.Status.NodeInfo.SystemUUID) - fmt.Fprintf(out, " Boot ID:\t%s\n", node.Status.NodeInfo.BootID) - fmt.Fprintf(out, " Kernel Version:\t%s\n", node.Status.NodeInfo.KernelVersion) - fmt.Fprintf(out, " OS Image:\t%s\n", node.Status.NodeInfo.OSImage) - fmt.Fprintf(out, " Operating System:\t%s\n", node.Status.NodeInfo.OperatingSystem) - fmt.Fprintf(out, " Architecture:\t%s\n", node.Status.NodeInfo.Architecture) - fmt.Fprintf(out, " Container Runtime Version:\t%s\n", node.Status.NodeInfo.ContainerRuntimeVersion) - fmt.Fprintf(out, " Kubelet Version:\t%s\n", node.Status.NodeInfo.KubeletVersion) - fmt.Fprintf(out, " Kube-Proxy Version:\t%s\n", node.Status.NodeInfo.KubeProxyVersion) - - if len(node.Spec.PodCIDR) > 0 { - fmt.Fprintf(out, "PodCIDR:\t%s\n", node.Spec.PodCIDR) - } - if len(node.Spec.ExternalID) > 0 { - fmt.Fprintf(out, "ExternalID:\t%s\n", node.Spec.ExternalID) - } - if canViewPods && nodeNonTerminatedPodsList != nil { - if err := describeNodeResource(nodeNonTerminatedPodsList, node, out); err != nil { - return err - } - } else { - fmt.Fprintf(out, "Pods:\tnot authorized\n") - } - if events != nil { - DescribeEvents(events, out) - } - return nil - }) -} - -type StatefulSetDescriber struct { - client clientset.Interface -} - -func (p *StatefulSetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - ps, err := p.client.Apps().StatefulSets(namespace).Get(name) - if err != nil { - return "", err - } - pc := p.client.Core().Pods(namespace) - - selector, err := unversioned.LabelSelectorAsSelector(ps.Spec.Selector) - if err != nil { - return "", err - } - - running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector) - if err != nil { - return "", err - } - - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", ps.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", ps.Namespace) - fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&ps.Spec.Template.Spec)) - fmt.Fprintf(out, "Selector:\t%s\n", unversioned.FormatLabelSelector(ps.Spec.Selector)) - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(ps.Labels)) - fmt.Fprintf(out, "Replicas:\t%d current / %d desired\n", ps.Status.Replicas, ps.Spec.Replicas) - fmt.Fprintf(out, "Annotations:\t%s\n", labels.FormatLabels(ps.Annotations)) - fmt.Fprintf(out, "CreationTimestamp:\t%s\n", ps.CreationTimestamp.Time.Format(time.RFC1123Z)) - fmt.Fprintf(out, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) - describeVolumes(ps.Spec.Template.Spec.Volumes, out, "") - if describerSettings.ShowEvents { - events, _ := p.client.Core().Events(namespace).Search(ps) - if events != nil { - DescribeEvents(events, out) - } - } - return nil - }) -} - -type CertificateSigningRequestDescriber struct { - client clientset.Interface -} - -func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - csr, err := p.client.Certificates().CertificateSigningRequests().Get(name) - if err != nil { - return "", err - } - - cr, err := certutil.ParseCSR(csr) - if err != nil { - return "", fmt.Errorf("Error parsing CSR: %v", err) - } - status, err := extractCSRStatus(csr) - if err != nil { - return "", err - } - - printListHelper := func(out io.Writer, prefix, name string, values []string) { - if len(values) == 0 { - return - } - fmt.Fprintf(out, prefix+name+":\t") - fmt.Fprintf(out, strings.Join(values, "\n"+prefix+"\t")) - fmt.Fprintf(out, "\n") - } - - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", csr.Name) - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(csr.Labels)) - fmt.Fprintf(out, "Annotations:\t%s\n", labels.FormatLabels(csr.Annotations)) - fmt.Fprintf(out, "CreationTimestamp:\t%s\n", csr.CreationTimestamp.Time.Format(time.RFC1123Z)) - fmt.Fprintf(out, "Requesting User:\t%s\n", csr.Spec.Username) - fmt.Fprintf(out, "Status:\t%s\n", status) - - fmt.Fprintf(out, "Subject:\n") - fmt.Fprintf(out, "\tCommon Name:\t%s\n", cr.Subject.CommonName) - fmt.Fprintf(out, "\tSerial Number:\t%s\n", cr.Subject.SerialNumber) - printListHelper(out, "\t", "Organization", cr.Subject.Organization) - printListHelper(out, "\t", "Organizational Unit", cr.Subject.OrganizationalUnit) - printListHelper(out, "\t", "Country", cr.Subject.Country) - printListHelper(out, "\t", "Locality", cr.Subject.Locality) - printListHelper(out, "\t", "Province", cr.Subject.Province) - printListHelper(out, "\t", "StreetAddress", cr.Subject.StreetAddress) - printListHelper(out, "\t", "PostalCode", cr.Subject.PostalCode) - - if len(cr.DNSNames)+len(cr.EmailAddresses)+len(cr.IPAddresses) > 0 { - fmt.Fprintf(out, "Subject Alternative Names:\n") - printListHelper(out, "\t", "DNS Names", cr.DNSNames) - printListHelper(out, "\t", "Email Addresses", cr.EmailAddresses) - var ipaddrs []string - for _, ipaddr := range cr.IPAddresses { - ipaddrs = append(ipaddrs, ipaddr.String()) - } - printListHelper(out, "\t", "IP Addresses", ipaddrs) - } - - if describerSettings.ShowEvents { - events, _ := p.client.Core().Events(namespace).Search(csr) - if events != nil { - DescribeEvents(events, out) - } - } - return nil - }) -} - -// HorizontalPodAutoscalerDescriber generates information about a horizontal pod autoscaler. -type HorizontalPodAutoscalerDescriber struct { - client clientset.Interface -} - -func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - hpa, err := d.client.Autoscaling().HorizontalPodAutoscalers(namespace).Get(name) - if err != nil { - return "", err - } - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", hpa.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", hpa.Namespace) - printLabelsMultiline(out, "Labels", hpa.Labels) - printLabelsMultiline(out, "Annotations", hpa.Annotations) - fmt.Fprintf(out, "CreationTimestamp:\t%s\n", hpa.CreationTimestamp.Time.Format(time.RFC1123Z)) - fmt.Fprintf(out, "Reference:\t%s/%s\n", - hpa.Spec.ScaleTargetRef.Kind, - hpa.Spec.ScaleTargetRef.Name) - if hpa.Spec.TargetCPUUtilizationPercentage != nil { - fmt.Fprintf(out, "Target CPU utilization:\t%d%%\n", *hpa.Spec.TargetCPUUtilizationPercentage) - fmt.Fprintf(out, "Current CPU utilization:\t") - if hpa.Status.CurrentCPUUtilizationPercentage != nil { - fmt.Fprintf(out, "%d%%\n", *hpa.Status.CurrentCPUUtilizationPercentage) - } else { - fmt.Fprintf(out, "\n") - } - } - minReplicas := "" - if hpa.Spec.MinReplicas != nil { - minReplicas = fmt.Sprintf("%d", *hpa.Spec.MinReplicas) - } - fmt.Fprintf(out, "Min replicas:\t%s\n", minReplicas) - fmt.Fprintf(out, "Max replicas:\t%d\n", hpa.Spec.MaxReplicas) - - // TODO: switch to scale subresource once the required code is submitted. - if strings.ToLower(hpa.Spec.ScaleTargetRef.Kind) == "replicationcontroller" { - fmt.Fprintf(out, "ReplicationController pods:\t") - rc, err := d.client.Core().ReplicationControllers(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Name) - if err == nil { - fmt.Fprintf(out, "%d current / %d desired\n", rc.Status.Replicas, rc.Spec.Replicas) - } else { - fmt.Fprintf(out, "failed to check Replication Controller\n") - } - } - - if describerSettings.ShowEvents { - events, _ := d.client.Core().Events(namespace).Search(hpa) - if events != nil { - DescribeEvents(events, out) - } - } - return nil - }) -} - -func describeNodeResource(nodeNonTerminatedPodsList *api.PodList, node *api.Node, out io.Writer) error { - fmt.Fprintf(out, "Non-terminated Pods:\t(%d in total)\n", len(nodeNonTerminatedPodsList.Items)) - fmt.Fprint(out, " Namespace\tName\t\tCPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") - fmt.Fprint(out, " ---------\t----\t\t------------\t----------\t---------------\t-------------\n") - allocatable := node.Status.Capacity - if len(node.Status.Allocatable) > 0 { - allocatable = node.Status.Allocatable - } - - for _, pod := range nodeNonTerminatedPodsList.Items { - req, limit, err := api.PodRequestsAndLimits(&pod) - if err != nil { - return err - } - cpuReq, cpuLimit, memoryReq, memoryLimit := req[api.ResourceCPU], limit[api.ResourceCPU], req[api.ResourceMemory], limit[api.ResourceMemory] - fractionCpuReq := float64(cpuReq.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 - fractionCpuLimit := float64(cpuLimit.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 - fractionMemoryReq := float64(memoryReq.Value()) / float64(allocatable.Memory().Value()) * 100 - fractionMemoryLimit := float64(memoryLimit.Value()) / float64(allocatable.Memory().Value()) * 100 - fmt.Fprintf(out, " %s\t%s\t\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\n", pod.Namespace, pod.Name, - cpuReq.String(), int64(fractionCpuReq), cpuLimit.String(), int64(fractionCpuLimit), - memoryReq.String(), int64(fractionMemoryReq), memoryLimit.String(), int64(fractionMemoryLimit)) - } - - fmt.Fprint(out, "Allocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.\n CPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") - fmt.Fprint(out, " ------------\t----------\t---------------\t-------------\n") - reqs, limits, err := getPodsTotalRequestsAndLimits(nodeNonTerminatedPodsList) - if err != nil { - return err - } - cpuReqs, cpuLimits, memoryReqs, memoryLimits := reqs[api.ResourceCPU], limits[api.ResourceCPU], reqs[api.ResourceMemory], limits[api.ResourceMemory] - fractionCpuReqs := float64(cpuReqs.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 - fractionCpuLimits := float64(cpuLimits.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 - fractionMemoryReqs := float64(memoryReqs.Value()) / float64(allocatable.Memory().Value()) * 100 - fractionMemoryLimits := float64(memoryLimits.Value()) / float64(allocatable.Memory().Value()) * 100 - fmt.Fprintf(out, " %s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\n", - cpuReqs.String(), int64(fractionCpuReqs), cpuLimits.String(), int64(fractionCpuLimits), - memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits)) - return nil -} - -func filterTerminatedPods(pods []*api.Pod) []*api.Pod { - if len(pods) == 0 { - return pods - } - result := []*api.Pod{} - for _, pod := range pods { - if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed { - continue - } - result = append(result, pod) - } - return result -} - -func getPodsTotalRequestsAndLimits(podList *api.PodList) (reqs map[api.ResourceName]resource.Quantity, limits map[api.ResourceName]resource.Quantity, err error) { - reqs, limits = map[api.ResourceName]resource.Quantity{}, map[api.ResourceName]resource.Quantity{} - for _, pod := range podList.Items { - podReqs, podLimits, err := api.PodRequestsAndLimits(&pod) - if err != nil { - return nil, nil, err - } - for podReqName, podReqValue := range podReqs { - if value, ok := reqs[podReqName]; !ok { - reqs[podReqName] = *podReqValue.Copy() - } else { - value.Add(podReqValue) - reqs[podReqName] = value - } - } - for podLimitName, podLimitValue := range podLimits { - if value, ok := limits[podLimitName]; !ok { - limits[podLimitName] = *podLimitValue.Copy() - } else { - value.Add(podLimitValue) - limits[podLimitName] = value - } - } - } - return -} - -func DescribeEvents(el *api.EventList, w io.Writer) { - if len(el.Items) == 0 { - fmt.Fprint(w, "No events.\n") - return - } - sort.Sort(events.SortableEvents(el.Items)) - fmt.Fprint(w, "Events:\n FirstSeen\tLastSeen\tCount\tFrom\tSubObjectPath\tType\tReason\tMessage\n") - fmt.Fprint(w, " ---------\t--------\t-----\t----\t-------------\t--------\t------\t-------\n") - for _, e := range el.Items { - fmt.Fprintf(w, " %s\t%s\t%d\t%v\t%v\t%v\t%v\t%v\n", - translateTimestamp(e.FirstTimestamp), - translateTimestamp(e.LastTimestamp), - e.Count, - e.Source, - e.InvolvedObject.FieldPath, - e.Type, - e.Reason, - e.Message) - } -} - -// DeploymentDescriber generates information about a deployment. -type DeploymentDescriber struct { - clientset.Interface -} - -func (dd *DeploymentDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - d, err := dd.Extensions().Deployments(namespace).Get(name) - if err != nil { - return "", err - } - selector, err := unversioned.LabelSelectorAsSelector(d.Spec.Selector) - if err != nil { - return "", err - } - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", d.ObjectMeta.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", d.ObjectMeta.Namespace) - fmt.Fprintf(out, "CreationTimestamp:\t%s\n", d.CreationTimestamp.Time.Format(time.RFC1123Z)) - printLabelsMultiline(out, "Labels", d.Labels) - fmt.Fprintf(out, "Selector:\t%s\n", selector) - fmt.Fprintf(out, "Replicas:\t%d updated | %d total | %d available | %d unavailable\n", d.Status.UpdatedReplicas, d.Spec.Replicas, d.Status.AvailableReplicas, d.Status.UnavailableReplicas) - fmt.Fprintf(out, "StrategyType:\t%s\n", d.Spec.Strategy.Type) - fmt.Fprintf(out, "MinReadySeconds:\t%d\n", d.Spec.MinReadySeconds) - if d.Spec.Strategy.RollingUpdate != nil { - ru := d.Spec.Strategy.RollingUpdate - fmt.Fprintf(out, "RollingUpdateStrategy:\t%s max unavailable, %s max surge\n", ru.MaxUnavailable.String(), ru.MaxSurge.String()) - } - if len(d.Status.Conditions) > 0 { - fmt.Fprint(out, "Conditions:\n Type\tStatus\tReason\n") - fmt.Fprint(out, " ----\t------\t------\n") - for _, c := range d.Status.Conditions { - fmt.Fprintf(out, " %v \t%v\t%v\n", c.Type, c.Status, c.Reason) - } - } - oldRSs, _, newRS, err := deploymentutil.GetAllReplicaSets(d, dd) - if err == nil { - fmt.Fprintf(out, "OldReplicaSets:\t%s\n", printReplicaSetsByLabels(oldRSs)) - var newRSs []*extensions.ReplicaSet - if newRS != nil { - newRSs = append(newRSs, newRS) - } - fmt.Fprintf(out, "NewReplicaSet:\t%s\n", printReplicaSetsByLabels(newRSs)) - } - overlapWith := d.Annotations[deploymentutil.OverlapAnnotation] - if len(overlapWith) > 0 { - fmt.Fprintf(out, "!!!WARNING!!! This deployment has overlapping label selector with deployment %q and won't behave as expected. Please fix it before continue.\n", overlapWith) - } - if describerSettings.ShowEvents { - events, err := dd.Core().Events(namespace).Search(d) - if err == nil && events != nil { - DescribeEvents(events, out) - } - } - return nil - }) -} - -// Get all daemon set whose selectors would match a given set of labels. -// TODO: Move this to pkg/client and ideally implement it server-side (instead -// of getting all DS's and searching through them manually). -// TODO: write an interface for controllers and fuse getReplicationControllersForLabels -// and getDaemonSetsForLabels. -func getDaemonSetsForLabels(c extensionsclient.DaemonSetInterface, labelsToMatch labels.Labels) ([]extensions.DaemonSet, error) { - // Get all daemon sets - // TODO: this needs a namespace scope as argument - dss, err := c.List(api.ListOptions{}) - if err != nil { - return nil, fmt.Errorf("error getting daemon set: %v", err) - } - - // Find the ones that match labelsToMatch. - var matchingDaemonSets []extensions.DaemonSet - for _, ds := range dss.Items { - selector, err := unversioned.LabelSelectorAsSelector(ds.Spec.Selector) - if err != nil { - // this should never happen if the DaemonSet passed validation - return nil, err - } - if selector.Matches(labelsToMatch) { - matchingDaemonSets = append(matchingDaemonSets, ds) - } - } - return matchingDaemonSets, nil -} - -func printReplicationControllersByLabels(matchingRCs []*api.ReplicationController) string { - // Format the matching RC's into strings. - rcStrings := make([]string, 0, len(matchingRCs)) - for _, controller := range matchingRCs { - rcStrings = append(rcStrings, fmt.Sprintf("%s (%d/%d replicas created)", controller.Name, controller.Status.Replicas, controller.Spec.Replicas)) - } - - list := strings.Join(rcStrings, ", ") - if list == "" { - return "" - } - return list -} - -func printReplicaSetsByLabels(matchingRSs []*extensions.ReplicaSet) string { - // Format the matching ReplicaSets into strings. - rsStrings := make([]string, 0, len(matchingRSs)) - for _, rs := range matchingRSs { - rsStrings = append(rsStrings, fmt.Sprintf("%s (%d/%d replicas created)", rs.Name, rs.Status.Replicas, rs.Spec.Replicas)) - } - - list := strings.Join(rsStrings, ", ") - if list == "" { - return "" - } - return list -} - -func getPodStatusForController(c coreclient.PodInterface, selector labels.Selector) (running, waiting, succeeded, failed int, err error) { - options := api.ListOptions{LabelSelector: selector} - rcPods, err := c.List(options) - if err != nil { - return - } - for _, pod := range rcPods.Items { - switch pod.Status.Phase { - case api.PodRunning: - running++ - case api.PodPending: - waiting++ - case api.PodSucceeded: - succeeded++ - case api.PodFailed: - failed++ - } - } - return -} - -// ConfigMapDescriber generates information about a ConfigMap -type ConfigMapDescriber struct { - clientset.Interface -} - -func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - c := d.Core().ConfigMaps(namespace) - - configMap, err := c.Get(name) - if err != nil { - return "", err - } - - return describeConfigMap(configMap) -} - -func describeConfigMap(configMap *api.ConfigMap) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", configMap.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", configMap.Namespace) - printLabelsMultiline(out, "Labels", configMap.Labels) - printLabelsMultiline(out, "Annotations", configMap.Annotations) - - fmt.Fprintf(out, "\nData\n====\n") - for k, v := range configMap.Data { - fmt.Fprintf(out, "%s:\t%d bytes\n", k, len(v)) - } - - return nil - }) -} - -type ClusterDescriber struct { - fed_clientset.Interface -} - -func (d *ClusterDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - cluster, err := d.Federation().Clusters().Get(name) - if err != nil { - return "", err - } - return describeCluster(cluster) -} - -func describeCluster(cluster *federation.Cluster) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", cluster.Name) - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(cluster.Labels)) - - fmt.Fprintf(out, "ServerAddressByClientCIDRs:\n ClientCIDR\tServerAddress\n") - fmt.Fprintf(out, " ----\t----\n") - for _, cidrAddr := range cluster.Spec.ServerAddressByClientCIDRs { - fmt.Fprintf(out, " %v \t%v\n\n", cidrAddr.ClientCIDR, cidrAddr.ServerAddress) - } - - if len(cluster.Status.Conditions) > 0 { - fmt.Fprint(out, "Conditions:\n Type\tStatus\tLastUpdateTime\tLastTransitionTime\tReason\tMessage\n") - fmt.Fprint(out, " ----\t------\t-----------------\t------------------\t------\t-------\n") - for _, c := range cluster.Status.Conditions { - fmt.Fprintf(out, " %v \t%v \t%s \t%s \t%v \t%v\n", - c.Type, - c.Status, - c.LastProbeTime.Time.Format(time.RFC1123Z), - c.LastTransitionTime.Time.Format(time.RFC1123Z), - c.Reason, - c.Message) - } - } - return nil - }) -} - -// NetworkPolicyDescriber generates information about a NetworkPolicy -type NetworkPolicyDescriber struct { - clientset.Interface -} - -func (d *NetworkPolicyDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - c := d.Extensions().NetworkPolicies(namespace) - - networkPolicy, err := c.Get(name) - if err != nil { - return "", err - } - - return describeNetworkPolicy(networkPolicy) -} - -func describeNetworkPolicy(networkPolicy *extensions.NetworkPolicy) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", networkPolicy.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", networkPolicy.Namespace) - printLabelsMultiline(out, "Labels", networkPolicy.Labels) - printLabelsMultiline(out, "Annotations", networkPolicy.Annotations) - - return nil - }) -} - -type StorageClassDescriber struct { - clientset.Interface -} - -func (s *StorageClassDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - sc, err := s.Storage().StorageClasses().Get(name) - if err != nil { - return "", err - } - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", sc.Name) - fmt.Fprintf(out, "IsDefaultClass:\t%s\n", storageutil.IsDefaultAnnotationText(sc.ObjectMeta)) - fmt.Fprintf(out, "Annotations:\t%s\n", labels.FormatLabels(sc.Annotations)) - fmt.Fprintf(out, "Provisioner:\t%s\n", sc.Provisioner) - fmt.Fprintf(out, "Parameters:\t%s\n", labels.FormatLabels(sc.Parameters)) - if describerSettings.ShowEvents { - events, err := s.Core().Events(namespace).Search(sc) - if err != nil { - return err - } - if events != nil { - DescribeEvents(events, out) - } - } - return nil - }) -} - -type PodDisruptionBudgetDescriber struct { - clientset.Interface -} - -func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - pdb, err := p.Policy().PodDisruptionBudgets(namespace).Get(name) - if err != nil { - return "", err - } - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", pdb.Name) - fmt.Fprintf(out, "Min available:\t%s\n", pdb.Spec.MinAvailable.String()) - if pdb.Spec.Selector != nil { - fmt.Fprintf(out, "Selector:\t%s\n", unversioned.FormatLabelSelector(pdb.Spec.Selector)) - } else { - fmt.Fprintf(out, "Selector:\t\n") - } - fmt.Fprintf(out, "Status:\n") - fmt.Fprintf(out, " Allowed disruptions:\t%d\n", pdb.Status.PodDisruptionsAllowed) - fmt.Fprintf(out, " Current:\t%d\n", pdb.Status.CurrentHealthy) - fmt.Fprintf(out, " Desired:\t%d\n", pdb.Status.DesiredHealthy) - fmt.Fprintf(out, " Total:\t%d\n", pdb.Status.ExpectedPods) - if describerSettings.ShowEvents { - events, err := p.Core().Events(namespace).Search(pdb) - if err != nil { - return err - } - if events != nil { - DescribeEvents(events, out) - } - } - return nil - }) -} - -// newErrNoDescriber creates a new ErrNoDescriber with the names of the provided types. -func newErrNoDescriber(types ...reflect.Type) error { - names := make([]string, 0, len(types)) - for _, t := range types { - names = append(names, t.String()) - } - return ErrNoDescriber{Types: names} -} - -// Describers implements ObjectDescriber against functions registered via Add. Those functions can -// be strongly typed. Types are exactly matched (no conversion or assignable checks). -type Describers struct { - searchFns map[reflect.Type][]typeFunc -} - -// DescribeObject implements ObjectDescriber and will attempt to print the provided object to a string, -// if at least one describer function has been registered with the exact types passed, or if any -// describer can print the exact object in its first argument (the remainder will be provided empty -// values). If no function registered with Add can satisfy the passed objects, an ErrNoDescriber will -// be returned -// TODO: reorder and partial match extra. -func (d *Describers) DescribeObject(exact interface{}, extra ...interface{}) (string, error) { - exactType := reflect.TypeOf(exact) - fns, ok := d.searchFns[exactType] - if !ok { - return "", newErrNoDescriber(exactType) - } - if len(extra) == 0 { - for _, typeFn := range fns { - if len(typeFn.Extra) == 0 { - return typeFn.Describe(exact, extra...) - } - } - typeFn := fns[0] - for _, t := range typeFn.Extra { - v := reflect.New(t).Elem() - extra = append(extra, v.Interface()) - } - return fns[0].Describe(exact, extra...) - } - - types := make([]reflect.Type, 0, len(extra)) - for _, obj := range extra { - types = append(types, reflect.TypeOf(obj)) - } - for _, typeFn := range fns { - if typeFn.Matches(types) { - return typeFn.Describe(exact, extra...) - } - } - return "", newErrNoDescriber(append([]reflect.Type{exactType}, types...)...) -} - -// Add adds one or more describer functions to the Describer. The passed function must -// match the signature: -// -// func(...) (string, error) -// -// Any number of arguments may be provided. -func (d *Describers) Add(fns ...interface{}) error { - for _, fn := range fns { - fv := reflect.ValueOf(fn) - ft := fv.Type() - if ft.Kind() != reflect.Func { - return fmt.Errorf("expected func, got: %v", ft) - } - numIn := ft.NumIn() - if numIn == 0 { - return fmt.Errorf("expected at least one 'in' params, got: %v", ft) - } - if ft.NumOut() != 2 { - return fmt.Errorf("expected two 'out' params - (string, error), got: %v", ft) - } - types := make([]reflect.Type, 0, numIn) - for i := 0; i < numIn; i++ { - types = append(types, ft.In(i)) - } - if ft.Out(0) != reflect.TypeOf(string("")) { - return fmt.Errorf("expected string return, got: %v", ft) - } - var forErrorType error - // This convolution is necessary, otherwise TypeOf picks up on the fact - // that forErrorType is nil. - errorType := reflect.TypeOf(&forErrorType).Elem() - if ft.Out(1) != errorType { - return fmt.Errorf("expected error return, got: %v", ft) - } - - exact := types[0] - extra := types[1:] - if d.searchFns == nil { - d.searchFns = make(map[reflect.Type][]typeFunc) - } - fns := d.searchFns[exact] - fn := typeFunc{Extra: extra, Fn: fv} - fns = append(fns, fn) - d.searchFns[exact] = fns - } - return nil -} - -// typeFunc holds information about a describer function and the types it accepts -type typeFunc struct { - Extra []reflect.Type - Fn reflect.Value -} - -// Matches returns true when the passed types exactly match the Extra list. -func (fn typeFunc) Matches(types []reflect.Type) bool { - if len(fn.Extra) != len(types) { - return false - } - // reorder the items in array types and fn.Extra - // convert the type into string and sort them, check if they are matched - varMap := make(map[reflect.Type]bool) - for i := range fn.Extra { - varMap[fn.Extra[i]] = true - } - for i := range types { - if _, found := varMap[types[i]]; !found { - return false - } - } - return true -} - -// Describe invokes the nested function with the exact number of arguments. -func (fn typeFunc) Describe(exact interface{}, extra ...interface{}) (string, error) { - values := []reflect.Value{reflect.ValueOf(exact)} - for _, obj := range extra { - values = append(values, reflect.ValueOf(obj)) - } - out := fn.Fn.Call(values) - s := out[0].Interface().(string) - var err error - if !out[1].IsNil() { - err = out[1].Interface().(error) - } - return s, err -} - -// printLabelsMultiline prints multiple labels with a proper alignment. -func printLabelsMultiline(out io.Writer, title string, labels map[string]string) { - printLabelsMultilineWithIndent(out, "", title, "\t", labels) -} - -// printLabelsMultiline prints multiple labels with a user-defined alignment. -func printLabelsMultilineWithIndent(out io.Writer, initialIndent, title, innerIndent string, labels map[string]string) { - - fmt.Fprintf(out, "%s%s:%s", initialIndent, title, innerIndent) - - if labels == nil || len(labels) == 0 { - fmt.Fprintln(out, "") - return - } - - // to print labels in the sorted order - keys := make([]string, 0, len(labels)) - for key := range labels { - keys = append(keys, key) - } - sort.Strings(keys) - - for i, key := range keys { - if i != 0 { - fmt.Fprint(out, initialIndent) - fmt.Fprint(out, innerIndent) - } - fmt.Fprintf(out, "%s=%s\n", key, labels[key]) - i++ - } -} - -// printTaintsMultiline prints multiple taints with a proper alignment. -func printTaintsInAnnotationMultiline(out io.Writer, title string, annotations map[string]string) { - taints, err := api.GetTaintsFromNodeAnnotations(annotations) - if err != nil { - taints = []api.Taint{} - } - printTaintsMultilineWithIndent(out, "", title, "\t", taints) -} - -// printTaintsMultilineWithIndent prints multiple taints with a user-defined alignment. -func printTaintsMultilineWithIndent(out io.Writer, initialIndent, title, innerIndent string, taints []api.Taint) { - fmt.Fprintf(out, "%s%s:%s", initialIndent, title, innerIndent) - - if taints == nil || len(taints) == 0 { - fmt.Fprintln(out, "") - return - } - - // to print taints in the sorted order - keys := make([]string, 0, len(taints)) - for _, taint := range taints { - keys = append(keys, string(taint.Effect)+","+taint.Key) - } - sort.Strings(keys) - - for i, key := range keys { - for _, taint := range taints { - if string(taint.Effect)+","+taint.Key == key { - if i != 0 { - fmt.Fprint(out, initialIndent) - fmt.Fprint(out, innerIndent) - } - fmt.Fprintf(out, "%s\n", taint.ToString()) - i++ - } - } - } -} - -// printTolerationsMultiline prints multiple tolerations with a proper alignment. -func printTolerationsInAnnotationMultiline(out io.Writer, title string, annotations map[string]string) { - tolerations, err := api.GetTolerationsFromPodAnnotations(annotations) - if err != nil { - tolerations = []api.Toleration{} - } - printTolerationsMultilineWithIndent(out, "", title, "\t", tolerations) -} - -// printTolerationsMultilineWithIndent prints multiple tolerations with a user-defined alignment. -func printTolerationsMultilineWithIndent(out io.Writer, initialIndent, title, innerIndent string, tolerations []api.Toleration) { - fmt.Fprintf(out, "%s%s:%s", initialIndent, title, innerIndent) - - if tolerations == nil || len(tolerations) == 0 { - fmt.Fprintln(out, "") - return - } - - // to print tolerations in the sorted order - keys := make([]string, 0, len(tolerations)) - for _, toleration := range tolerations { - keys = append(keys, toleration.Key) - } - sort.Strings(keys) - - for i, key := range keys { - for _, toleration := range tolerations { - if toleration.Key == key { - if i != 0 { - fmt.Fprint(out, initialIndent) - fmt.Fprint(out, innerIndent) - } - fmt.Fprintf(out, "%s=%s", toleration.Key, toleration.Value) - if len(toleration.Operator) != 0 { - fmt.Fprintf(out, ":%s", toleration.Operator) - } - if len(toleration.Effect) != 0 { - fmt.Fprintf(out, ":%s", toleration.Effect) - } - fmt.Fprintf(out, "\n") - i++ - } - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/doc.go b/vendor/k8s.io/kubernetes/pkg/kubectl/doc.go deleted file mode 100644 index d1516ebb7a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package kubectl is a set of libraries that are used by the kubectl command line tool. -// They are separated out into a library to support unit testing. Most functionality should -// be included in this package, and the main kubectl should really just be an entry point. -package kubectl diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/explain.go b/vendor/k8s.io/kubernetes/pkg/kubectl/explain.go deleted file mode 100644 index dd85a7f366..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/explain.go +++ /dev/null @@ -1,251 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "io" - "strings" - - "github.com/emicklei/go-restful/swagger" - - "k8s.io/kubernetes/pkg/api/meta" - apiutil "k8s.io/kubernetes/pkg/api/util" -) - -var allModels = make(map[string]*swagger.NamedModel) -var recursive = false // this is global for convenience, can become int for multiple levels - -// SplitAndParseResourceRequest separates the users input into a model and fields -func SplitAndParseResourceRequest(inResource string, mapper meta.RESTMapper) (string, []string, error) { - inResource, fieldsPath := splitDotNotation(inResource) - inResource, _ = mapper.ResourceSingularizer(inResource) - return inResource, fieldsPath, nil -} - -// PrintModelDescription prints the description of a specific model or dot path -func PrintModelDescription(inModel string, fieldsPath []string, w io.Writer, swaggerSchema *swagger.ApiDeclaration, r bool) error { - recursive = r // this is global for convenience - apiVer := apiutil.GetVersion(swaggerSchema.ApiVersion) + "." - - var pointedModel *swagger.NamedModel - for i := range swaggerSchema.Models.List { - name := swaggerSchema.Models.List[i].Name - - allModels[name] = &swaggerSchema.Models.List[i] - if strings.ToLower(name) == strings.ToLower(apiVer+inModel) { - pointedModel = &swaggerSchema.Models.List[i] - } - } - if pointedModel == nil { - return fmt.Errorf("requested resource %q is not defined", inModel) - } - - if len(fieldsPath) == 0 { - return printTopLevelResourceInfo(w, pointedModel) - } - - var pointedModelAsProp *swagger.NamedModelProperty - for _, field := range fieldsPath { - if prop, nextModel, isModel := getField(pointedModel, field); prop != nil { - if isModel { - pointedModelAsProp = prop - pointedModel = allModels[nextModel] - } else { - return printPrimitive(w, prop) - } - } else { - return fmt.Errorf("field %q does not exist", field) - } - } - return printModelInfo(w, pointedModel, pointedModelAsProp) -} - -func splitDotNotation(model string) (string, []string) { - var fieldsPath []string - dotModel := strings.Split(model, ".") - if len(dotModel) >= 1 { - fieldsPath = dotModel[1:] - } - return dotModel[0], fieldsPath -} - -func getPointedModel(prop *swagger.ModelProperty) (string, bool) { - if prop.Ref != nil { - return *prop.Ref, true - } else if *prop.Type == "array" && prop.Items.Ref != nil { - return *prop.Items.Ref, true - } - return "", false -} - -func getField(model *swagger.NamedModel, sField string) (*swagger.NamedModelProperty, string, bool) { - for _, prop := range model.Model.Properties.List { - if prop.Name == sField { - pointedModel, isModel := getPointedModel(&prop.Property) - return &prop, pointedModel, isModel - } - } - return nil, "", false -} - -func printModelInfo(w io.Writer, model *swagger.NamedModel, modelProp *swagger.NamedModelProperty) error { - t, _ := getFieldType(&modelProp.Property) - fmt.Fprintf(w, "RESOURCE: %s <%s>\n\n", modelProp.Name, t) - fieldDesc, _ := wrapAndIndentText(modelProp.Property.Description, " ", 80) - fmt.Fprintf(w, "DESCRIPTION:\n%s\n\n%s\n", fieldDesc, indentText(model.Model.Description, " ")) - return printFields(w, model) -} - -func printPrimitive(w io.Writer, field *swagger.NamedModelProperty) error { - t, _ := getFieldType(&field.Property) - fmt.Fprintf(w, "FIELD: %s <%s>\n\n", field.Name, t) - d, _ := wrapAndIndentText(field.Property.Description, " ", 80) - fmt.Fprintf(w, "DESCRIPTION:\n%s\n", d) - return nil -} - -func printTopLevelResourceInfo(w io.Writer, model *swagger.NamedModel) error { - fmt.Fprintf(w, "DESCRIPTION:\n%s\n", model.Model.Description) - return printFields(w, model) -} - -func printFields(w io.Writer, model *swagger.NamedModel) error { - fmt.Fprint(w, "\nFIELDS:\n") - for _, field := range model.Model.Properties.List { - fieldType, err := getFieldType(&field.Property) - if err != nil { - return err - } - - if arrayContains(model.Model.Required, field.Name) { - fmt.Fprintf(w, " %s\t<%s> -required-\n", field.Name, fieldType) - } else { - fmt.Fprintf(w, " %s\t<%s>\n", field.Name, fieldType) - } - - if recursive { - pointedModel, isModel := getPointedModel(&field.Property) - if isModel { - for _, nestedField := range allModels[pointedModel].Model.Properties.List { - t, _ := getFieldType(&nestedField.Property) - fmt.Fprintf(w, " %s\t<%s>\n", nestedField.Name, t) - } - } - } else { - fieldDesc, _ := wrapAndIndentText(field.Property.Description, " ", 80) - fmt.Fprintf(w, "%s\n\n", fieldDesc) - } - } - fmt.Fprint(w, "\n") - return nil -} - -func getFieldType(prop *swagger.ModelProperty) (string, error) { - if prop.Type == nil { - return "Object", nil - } else if *prop.Type == "any" { - // Swagger Spec doesn't return information for maps. - return "map[string]string", nil - } else if *prop.Type == "array" { - if prop.Items == nil { - return "", fmt.Errorf("error in swagger spec. Property: %v contains an array without type", prop) - } - if prop.Items.Ref != nil { - fieldType := "[]Object" - return fieldType, nil - } - fieldType := "[]" + *prop.Items.Type - return fieldType, nil - } - return *prop.Type, nil -} - -func wrapAndIndentText(desc, indent string, lim int) (string, error) { - words := strings.Split(strings.Replace(strings.TrimSpace(desc), "\n", " ", -1), " ") - n := len(words) - - for i := 0; i < n; i++ { - if len(words[i]) > lim { - if strings.Contains(words[i], "/") { - s := breakURL(words[i]) - words = append(words[:i], append(s, words[i+1:]...)...) - i = i + len(s) - 1 - } else { - fmt.Println(len(words[i])) - return "", fmt.Errorf("there are words longer that the break limit is") - } - } - } - - var lines []string - line := []string{indent} - lineL := len(indent) - for i := 0; i < len(words); i++ { - w := words[i] - - if strings.HasSuffix(w, "/") && lineL+len(w)-1 < lim { - prev := line[len(line)-1] - if strings.HasSuffix(prev, "/") { - if i+1 < len(words)-1 && !strings.HasSuffix(words[i+1], "/") { - w = strings.TrimSuffix(w, "/") - } - - line[len(line)-1] = prev + w - lineL += len(w) - } else { - line = append(line, w) - lineL += len(w) + 1 - } - } else if lineL+len(w) < lim { - line = append(line, w) - lineL += len(w) + 1 - } else { - lines = append(lines, strings.Join(line, " ")) - line = []string{indent, w} - lineL = len(indent) + len(w) - } - } - lines = append(lines, strings.Join(line, " ")) - - return strings.Join(lines, "\n"), nil -} - -func breakURL(url string) []string { - var buf []string - for _, part := range strings.Split(url, "/") { - buf = append(buf, part+"/") - } - return buf -} - -func indentText(text, indent string) string { - lines := strings.Split(text, "\n") - for i := range lines { - lines[i] = indent + lines[i] - } - return strings.Join(lines, "\n") -} - -func arrayContains(s []string, e string) bool { - for _, a := range s { - if a == e { - return true - } - } - return false -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/generate.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate.go deleted file mode 100644 index e1ffe370ec..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/generate.go +++ /dev/null @@ -1,200 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "reflect" - "strconv" - "strings" - - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "k8s.io/kubernetes/pkg/runtime" - utilerrors "k8s.io/kubernetes/pkg/util/errors" -) - -// GeneratorParam is a parameter for a generator -// TODO: facilitate structured json generator input schemes -type GeneratorParam struct { - Name string - Required bool -} - -// Generator is an interface for things that can generate API objects from input parameters. -type Generator interface { - // Generate creates an API object given a set of parameters - Generate(params map[string]interface{}) (runtime.Object, error) - // ParamNames returns the list of parameters that this generator uses - ParamNames() []GeneratorParam -} - -// StructuredGenerator is an interface for things that can generate API objects not using parameter injection -type StructuredGenerator interface { - // StructuredGenerator creates an API object using pre-configured parameters - StructuredGenerate() (runtime.Object, error) -} - -func IsZero(i interface{}) bool { - if i == nil { - return true - } - return reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) -} - -// ValidateParams ensures that all required params are present in the params map -func ValidateParams(paramSpec []GeneratorParam, params map[string]interface{}) error { - allErrs := []error{} - for ix := range paramSpec { - if paramSpec[ix].Required { - value, found := params[paramSpec[ix].Name] - if !found || IsZero(value) { - allErrs = append(allErrs, fmt.Errorf("Parameter: %s is required", paramSpec[ix].Name)) - } - } - } - return utilerrors.NewAggregate(allErrs) -} - -// AnnotateFlags annotates all flags that are used by generators. -func AnnotateFlags(cmd *cobra.Command, generators map[string]Generator) { - // Iterate over all generators and mark any flags used by them. - for name, generator := range generators { - generatorParams := map[string]struct{}{} - for _, param := range generator.ParamNames() { - generatorParams[param.Name] = struct{}{} - } - - cmd.Flags().VisitAll(func(flag *pflag.Flag) { - if _, found := generatorParams[flag.Name]; !found { - // This flag is not used by the current generator - // so skip it. - return - } - if flag.Annotations == nil { - flag.Annotations = map[string][]string{} - } - if annotations := flag.Annotations["generator"]; annotations == nil { - flag.Annotations["generator"] = []string{} - } - flag.Annotations["generator"] = append(flag.Annotations["generator"], name) - }) - } -} - -// EnsureFlagsValid ensures that no invalid flags are being used against a generator. -func EnsureFlagsValid(cmd *cobra.Command, generators map[string]Generator, generatorInUse string) error { - AnnotateFlags(cmd, generators) - - allErrs := []error{} - cmd.Flags().VisitAll(func(flag *pflag.Flag) { - // If the flag hasn't changed, don't validate it. - if !flag.Changed { - return - } - // Look into the flag annotations for the generators that can use it. - if annotations := flag.Annotations["generator"]; len(annotations) > 0 { - annotationMap := map[string]struct{}{} - for _, ann := range annotations { - annotationMap[ann] = struct{}{} - } - // If the current generator is not annotated, then this flag shouldn't - // be used with it. - if _, found := annotationMap[generatorInUse]; !found { - allErrs = append(allErrs, fmt.Errorf("cannot use --%s with --generator=%s", flag.Name, generatorInUse)) - } - } - }) - return utilerrors.NewAggregate(allErrs) -} - -// MakeParams is a utility that creates generator parameters from a command line -func MakeParams(cmd *cobra.Command, params []GeneratorParam) map[string]interface{} { - result := map[string]interface{}{} - for ix := range params { - f := cmd.Flags().Lookup(params[ix].Name) - if f != nil { - result[params[ix].Name] = f.Value.String() - } - } - return result -} - -func MakeProtocols(protocols map[string]string) string { - out := []string{} - for key, value := range protocols { - out = append(out, fmt.Sprintf("%s/%s", key, value)) - } - return strings.Join(out, ",") -} - -func ParseProtocols(protocols interface{}) (map[string]string, error) { - protocolsString, isString := protocols.(string) - if !isString { - return nil, fmt.Errorf("expected string, found %v", protocols) - } - if len(protocolsString) == 0 { - return nil, fmt.Errorf("no protocols passed") - } - portProtocolMap := map[string]string{} - protocolsSlice := strings.Split(protocolsString, ",") - for ix := range protocolsSlice { - portProtocol := strings.Split(protocolsSlice[ix], "/") - if len(portProtocol) != 2 { - return nil, fmt.Errorf("unexpected port protocol mapping: %s", protocolsSlice[ix]) - } - portProtocolMap[portProtocol[0]] = portProtocol[1] - } - return portProtocolMap, nil -} - -func MakeLabels(labels map[string]string) string { - out := []string{} - for key, value := range labels { - out = append(out, fmt.Sprintf("%s=%s", key, value)) - } - return strings.Join(out, ",") -} - -// ParseLabels turns a string representation of a label set into a map[string]string -func ParseLabels(labelSpec interface{}) (map[string]string, error) { - labelString, isString := labelSpec.(string) - if !isString { - return nil, fmt.Errorf("expected string, found %v", labelSpec) - } - if len(labelString) == 0 { - return nil, fmt.Errorf("no label spec passed") - } - labels := map[string]string{} - labelSpecs := strings.Split(labelString, ",") - for ix := range labelSpecs { - labelSpec := strings.Split(labelSpecs[ix], "=") - if len(labelSpec) != 2 { - return nil, fmt.Errorf("unexpected label spec: %s", labelSpecs[ix]) - } - labels[labelSpec[0]] = labelSpec[1] - } - return labels, nil -} - -func GetBool(params map[string]string, key string, defValue bool) (bool, error) { - if val, found := params[key]; !found { - return defValue, nil - } else { - return strconv.ParseBool(val) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/history.go b/vendor/k8s.io/kubernetes/pkg/kubectl/history.go deleted file mode 100644 index 8485139fe2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/history.go +++ /dev/null @@ -1,129 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "bytes" - "fmt" - "io" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" - "k8s.io/kubernetes/pkg/runtime" - sliceutil "k8s.io/kubernetes/pkg/util/slice" -) - -const ( - ChangeCauseAnnotation = "kubernetes.io/change-cause" -) - -// HistoryViewer provides an interface for resources have historical information. -type HistoryViewer interface { - ViewHistory(namespace, name string, revision int64) (string, error) -} - -func HistoryViewerFor(kind unversioned.GroupKind, c clientset.Interface) (HistoryViewer, error) { - switch kind { - case extensions.Kind("Deployment"): - return &DeploymentHistoryViewer{c}, nil - } - return nil, fmt.Errorf("no history viewer has been implemented for %q", kind) -} - -type DeploymentHistoryViewer struct { - c clientset.Interface -} - -// ViewHistory returns a revision-to-replicaset map as the revision history of a deployment -func (h *DeploymentHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) { - deployment, err := h.c.Extensions().Deployments(namespace).Get(name) - if err != nil { - return "", fmt.Errorf("failed to retrieve deployment %s: %v", name, err) - } - _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, h.c) - if err != nil { - return "", fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", name, err) - } - allRSs := allOldRSs - if newRS != nil { - allRSs = append(allRSs, newRS) - } - - historyInfo := make(map[int64]*api.PodTemplateSpec) - for _, rs := range allRSs { - v, err := deploymentutil.Revision(rs) - if err != nil { - continue - } - historyInfo[v] = &rs.Spec.Template - changeCause := getChangeCause(rs) - if historyInfo[v].Annotations == nil { - historyInfo[v].Annotations = make(map[string]string) - } - if len(changeCause) > 0 { - historyInfo[v].Annotations[ChangeCauseAnnotation] = changeCause - } - } - - if len(historyInfo) == 0 { - return "No rollout history found.", nil - } - - if revision > 0 { - // Print details of a specific revision - template, ok := historyInfo[revision] - if !ok { - return "", fmt.Errorf("unable to find the specified revision") - } - buf := bytes.NewBuffer([]byte{}) - DescribePodTemplate(template, buf) - return buf.String(), nil - } - - // Sort the revisionToChangeCause map by revision - revisions := make([]int64, 0, len(historyInfo)) - for r := range historyInfo { - revisions = append(revisions, r) - } - sliceutil.SortInts64(revisions) - - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "REVISION\tCHANGE-CAUSE\n") - for _, r := range revisions { - // Find the change-cause of revision r - changeCause := historyInfo[r].Annotations[ChangeCauseAnnotation] - if len(changeCause) == 0 { - changeCause = "" - } - fmt.Fprintf(out, "%d\t%s\n", r, changeCause) - } - return nil - }) -} - -// getChangeCause returns the change-cause annotation of the input object -func getChangeCause(obj runtime.Object) string { - accessor, err := meta.Accessor(obj) - if err != nil { - return "" - } - return accessor.GetAnnotations()[ChangeCauseAnnotation] -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/interfaces.go b/vendor/k8s.io/kubernetes/pkg/kubectl/interfaces.go deleted file mode 100644 index f8acb1ea0d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/interfaces.go +++ /dev/null @@ -1,32 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/restclient" -) - -// RESTClient is a client helper for dealing with RESTful resources -// in a generic way. -type RESTClient interface { - Get() *client.Request - Post() *client.Request - Patch(api.PatchType) *client.Request - Delete() *client.Request - Put() *client.Request -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/kubectl.go b/vendor/k8s.io/kubernetes/pkg/kubectl/kubectl.go deleted file mode 100644 index 80a050a91d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/kubectl.go +++ /dev/null @@ -1,185 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// A set of common functions needed by cmd/kubectl and pkg/kubectl packages. -package kubectl - -import ( - "errors" - "fmt" - "path" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" -) - -const ( - kubectlAnnotationPrefix = "kubectl.kubernetes.io/" -) - -type NamespaceInfo struct { - Namespace string -} - -func listOfImages(spec *api.PodSpec) []string { - images := make([]string, 0, len(spec.Containers)) - for _, container := range spec.Containers { - images = append(images, container.Image) - } - return images -} - -func makeImageList(spec *api.PodSpec) string { - return strings.Join(listOfImages(spec), ",") -} - -// OutputVersionMapper is a RESTMapper that will prefer mappings that -// correspond to a preferred output version (if feasible) -type OutputVersionMapper struct { - meta.RESTMapper - - // output versions takes a list of preferred GroupVersions. Only the first - // hit for a given group will have effect. This allows different output versions - // depending upon the group of the kind being requested - OutputVersions []unversioned.GroupVersion -} - -// RESTMapping implements meta.RESTMapper by prepending the output version to the preferred version list. -func (m OutputVersionMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (*meta.RESTMapping, error) { - for _, preferredVersion := range m.OutputVersions { - if gk.Group == preferredVersion.Group { - mapping, err := m.RESTMapper.RESTMapping(gk, preferredVersion.Version) - if err == nil { - return mapping, nil - } - - break - } - } - - return m.RESTMapper.RESTMapping(gk, versions...) -} - -// ShortForms is the list of short names to their expanded names -var ShortForms = map[string]string{ - // Please keep this alphabetized - // If you add an entry here, please also take a look at pkg/kubectl/cmd/cmd.go - // and add an entry to valid_resources when appropriate. - "cm": "configmaps", - "cs": "componentstatuses", - "csr": "certificatesigningrequests", - "deploy": "deployments", - "ds": "daemonsets", - "ep": "endpoints", - "ev": "events", - "hpa": "horizontalpodautoscalers", - "ing": "ingresses", - "limits": "limitranges", - "no": "nodes", - "ns": "namespaces", - "po": "pods", - "psp": "podSecurityPolicies", - "pvc": "persistentvolumeclaims", - "pv": "persistentvolumes", - "quota": "resourcequotas", - "rc": "replicationcontrollers", - "rs": "replicasets", - "sa": "serviceaccounts", - "svc": "services", -} - -// ResourceShortFormFor looks up for a short form of resource names. -func ResourceShortFormFor(resource string) (string, bool) { - var alias string - exists := false - for k, val := range ShortForms { - if val == resource { - alias = k - exists = true - break - } - } - return alias, exists -} - -// ResourceAliases returns the resource shortcuts and plural forms for the given resources. -func ResourceAliases(rs []string) []string { - as := make([]string, 0, len(rs)) - plurals := make(map[string]struct{}, len(rs)) - for _, r := range rs { - var plural string - switch { - case r == "endpoints": - plural = r // exception. "endpoint" does not exist. Why? - case strings.HasSuffix(r, "y"): - plural = r[0:len(r)-1] + "ies" - case strings.HasSuffix(r, "s"): - plural = r + "es" - default: - plural = r + "s" - } - as = append(as, plural) - - plurals[plural] = struct{}{} - } - - for sf, r := range ShortForms { - if _, found := plurals[r]; found { - as = append(as, sf) - } - } - return as -} - -// parseFileSource parses the source given. Acceptable formats include: -// -// 1. source-path: the basename will become the key name -// 2. source-name=source-path: the source-name will become the key name and source-path is the path to the key file -// -// Key names cannot include '='. -func parseFileSource(source string) (keyName, filePath string, err error) { - numSeparators := strings.Count(source, "=") - switch { - case numSeparators == 0: - return path.Base(source), source, nil - case numSeparators == 1 && strings.HasPrefix(source, "="): - return "", "", fmt.Errorf("key name for file path %v missing.", strings.TrimPrefix(source, "=")) - case numSeparators == 1 && strings.HasSuffix(source, "="): - return "", "", fmt.Errorf("file path for key name %v missing.", strings.TrimSuffix(source, "=")) - case numSeparators > 1: - return "", "", errors.New("Key names or file paths cannot contain '='.") - default: - components := strings.Split(source, "=") - return components[0], components[1], nil - } -} - -// parseLiteralSource parses the source key=val pair -func parseLiteralSource(source string) (keyName, value string, err error) { - // leading equal is invalid - if strings.Index(source, "=") == 0 { - return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) - } - // split after the first equal (so values can have the = character) - items := strings.SplitN(source, "=", 2) - if len(items) != 2 { - return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) - } - - return items[0], items[1], nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go b/vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go deleted file mode 100644 index 6d58c226ef..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/runtime" -) - -// NamespaceGeneratorV1 supports stable generation of a namespace -type NamespaceGeneratorV1 struct { - // Name of namespace - Name string -} - -// Ensure it supports the generator pattern that uses parameter injection -var _ Generator = &NamespaceGeneratorV1{} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &NamespaceGeneratorV1{} - -// Generate returns a namespace using the specified parameters -func (g NamespaceGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(g.ParamNames(), genericParams) - if err != nil { - return nil, err - } - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - delegate := &NamespaceGeneratorV1{Name: params["name"]} - return delegate.StructuredGenerate() -} - -// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern -func (g NamespaceGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - } -} - -// StructuredGenerate outputs a namespace object using the configured fields -func (g *NamespaceGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := g.validate(); err != nil { - return nil, err - } - namespace := &api.Namespace{} - namespace.Name = g.Name - return namespace, nil -} - -// validate validates required fields are set to support structured generation -func (g *NamespaceGeneratorV1) validate() error { - if len(g.Name) == 0 { - return fmt.Errorf("name must be specified") - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/proxy_server.go b/vendor/k8s.io/kubernetes/pkg/kubectl/proxy_server.go deleted file mode 100644 index b837fc4038..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/proxy_server.go +++ /dev/null @@ -1,248 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "net" - "net/http" - "net/http/httputil" - "net/url" - "os" - "regexp" - "strings" - "time" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/util" -) - -const ( - DefaultHostAcceptRE = "^localhost$,^127\\.0\\.0\\.1$,^\\[::1\\]$" - DefaultPathAcceptRE = "^/.*" - DefaultPathRejectRE = "^/api/.*/pods/.*/exec,^/api/.*/pods/.*/attach" - DefaultMethodRejectRE = "POST,PUT,PATCH" -) - -var ( - // The reverse proxy will periodically flush the io writer at this frequency. - // Only matters for long poll connections like the one used to watch. With an - // interval of 0 the reverse proxy will buffer content sent on any connection - // with transfer-encoding=chunked. - // TODO: Flush after each chunk so the client doesn't suffer a 100ms latency per - // watch event. - ReverseProxyFlushInterval = 100 * time.Millisecond -) - -// FilterServer rejects requests which don't match one of the specified regular expressions -type FilterServer struct { - // Only paths that match this regexp will be accepted - AcceptPaths []*regexp.Regexp - // Paths that match this regexp will be rejected, even if they match the above - RejectPaths []*regexp.Regexp - // Hosts are required to match this list of regexp - AcceptHosts []*regexp.Regexp - // Methods that match this regexp are rejected - RejectMethods []*regexp.Regexp - // The delegate to call to handle accepted requests. - delegate http.Handler -} - -// Splits a comma separated list of regexps into an array of Regexp objects. -func MakeRegexpArray(str string) ([]*regexp.Regexp, error) { - parts := strings.Split(str, ",") - result := make([]*regexp.Regexp, len(parts)) - for ix := range parts { - re, err := regexp.Compile(parts[ix]) - if err != nil { - return nil, err - } - result[ix] = re - } - return result, nil -} - -func MakeRegexpArrayOrDie(str string) []*regexp.Regexp { - result, err := MakeRegexpArray(str) - if err != nil { - glog.Fatalf("Error compiling re: %v", err) - } - return result -} - -func matchesRegexp(str string, regexps []*regexp.Regexp) bool { - for _, re := range regexps { - if re.MatchString(str) { - glog.V(6).Infof("%v matched %s", str, re) - return true - } - } - return false -} - -func (f *FilterServer) accept(method, path, host string) bool { - if matchesRegexp(path, f.RejectPaths) { - glog.V(3).Infof("Filter rejecting %v %v %v", method, path, host) - return false - } - if matchesRegexp(method, f.RejectMethods) { - glog.V(3).Infof("Filter rejecting %v %v %v", method, path, host) - return false - } - if matchesRegexp(path, f.AcceptPaths) && matchesRegexp(host, f.AcceptHosts) { - glog.V(3).Infof("Filter accepting %v %v %v", method, path, host) - return true - } - glog.V(3).Infof("Filter rejecting %v %v %v", method, path, host) - return false -} - -// Make a copy of f which passes requests along to the new delegate. -func (f *FilterServer) HandlerFor(delegate http.Handler) *FilterServer { - f2 := *f - f2.delegate = delegate - return &f2 -} - -// Get host from a host header value like "localhost" or "localhost:8080" -func extractHost(header string) (host string) { - host, _, err := net.SplitHostPort(header) - if err != nil { - host = header - } - return host -} - -func (f *FilterServer) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - host := extractHost(req.Host) - if f.accept(req.Method, req.URL.Path, host) { - f.delegate.ServeHTTP(rw, req) - return - } - rw.WriteHeader(http.StatusForbidden) - rw.Write([]byte("

Unauthorized

")) -} - -// ProxyServer is a http.Handler which proxies Kubernetes APIs to remote API server. -type ProxyServer struct { - handler http.Handler -} - -// NewProxyServer creates and installs a new ProxyServer. -// It automatically registers the created ProxyServer to http.DefaultServeMux. -// 'filter', if non-nil, protects requests to the api only. -func NewProxyServer(filebase string, apiProxyPrefix string, staticPrefix string, filter *FilterServer, cfg *restclient.Config) (*ProxyServer, error) { - host := cfg.Host - if !strings.HasSuffix(host, "/") { - host = host + "/" - } - target, err := url.Parse(host) - if err != nil { - return nil, err - } - proxy := newProxy(target) - if proxy.Transport, err = restclient.TransportFor(cfg); err != nil { - return nil, err - } - proxyServer := http.Handler(proxy) - if filter != nil { - proxyServer = filter.HandlerFor(proxyServer) - } - - if !strings.HasPrefix(apiProxyPrefix, "/api") { - proxyServer = stripLeaveSlash(apiProxyPrefix, proxyServer) - } - - mux := http.NewServeMux() - mux.Handle(apiProxyPrefix, proxyServer) - if filebase != "" { - // Require user to explicitly request this behavior rather than - // serving their working directory by default. - mux.Handle(staticPrefix, newFileHandler(staticPrefix, filebase)) - } - return &ProxyServer{handler: mux}, nil -} - -// Listen is a simple wrapper around net.Listen. -func (s *ProxyServer) Listen(address string, port int) (net.Listener, error) { - return net.Listen("tcp", fmt.Sprintf("%s:%d", address, port)) -} - -// ListenUnix does net.Listen for a unix socket -func (s *ProxyServer) ListenUnix(path string) (net.Listener, error) { - // Remove any socket, stale or not, but fall through for other files - fi, err := os.Stat(path) - if err == nil && (fi.Mode()&os.ModeSocket) != 0 { - os.Remove(path) - } - // Default to only user accessible socket, caller can open up later if desired - oldmask, _ := util.Umask(0077) - l, err := net.Listen("unix", path) - util.Umask(oldmask) - return l, err -} - -// Serve starts the server using given listener, loops forever. -func (s *ProxyServer) ServeOnListener(l net.Listener) error { - server := http.Server{ - Handler: s.handler, - } - return server.Serve(l) -} - -func newProxy(target *url.URL) *httputil.ReverseProxy { - director := func(req *http.Request) { - req.URL.Scheme = target.Scheme - req.URL.Host = target.Host - req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path) - } - return &httputil.ReverseProxy{Director: director, FlushInterval: ReverseProxyFlushInterval} -} - -func newFileHandler(prefix, base string) http.Handler { - return http.StripPrefix(prefix, http.FileServer(http.Dir(base))) -} - -func singleJoiningSlash(a, b string) string { - aslash := strings.HasSuffix(a, "/") - bslash := strings.HasPrefix(b, "/") - switch { - case aslash && bslash: - return a + b[1:] - case !aslash && !bslash: - return a + "/" + b - } - return a + b -} - -// like http.StripPrefix, but always leaves an initial slash. (so that our -// regexps will work.) -func stripLeaveSlash(prefix string, h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - p := strings.TrimPrefix(req.URL.Path, prefix) - if len(p) >= len(req.URL.Path) { - http.NotFound(w, req) - return - } - if len(p) > 0 && p[:1] != "/" { - p = "/" + p - } - req.URL.Path = p - h.ServeHTTP(w, req) - }) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/quota.go b/vendor/k8s.io/kubernetes/pkg/kubectl/quota.go deleted file mode 100644 index 1261aba20b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/quota.go +++ /dev/null @@ -1,125 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/runtime" -) - -// ResourceQuotaGeneratorV1 supports stable generation of a resource quota -type ResourceQuotaGeneratorV1 struct { - // The name of a quota object. - Name string - - // The hard resource limit string before parsing. - Hard string - - // The scopes of a quota object before parsing. - Scopes string -} - -// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern -func (g ResourceQuotaGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"hard", true}, - {"scopes", false}, - } -} - -// Ensure it supports the generator pattern that uses parameter injection -var _ Generator = &ResourceQuotaGeneratorV1{} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &ResourceQuotaGeneratorV1{} - -func (g ResourceQuotaGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(g.ParamNames(), genericParams) - if err != nil { - return nil, err - } - - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - - delegate := &ResourceQuotaGeneratorV1{} - delegate.Name = params["name"] - delegate.Hard = params["hard"] - delegate.Scopes = params["scopes"] - return delegate.StructuredGenerate() -} - -// StructuredGenerate outputs a ResourceQuota object using the configured fields -func (g *ResourceQuotaGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := g.validate(); err != nil { - return nil, err - } - - resourceList, err := populateResourceList(g.Hard) - if err != nil { - return nil, err - } - - scopes, err := parseScopes(g.Scopes) - if err != nil { - return nil, err - } - - resourceQuota := &api.ResourceQuota{} - resourceQuota.Name = g.Name - resourceQuota.Spec.Hard = resourceList - resourceQuota.Spec.Scopes = scopes - return resourceQuota, nil -} - -// validate validates required fields are set to support structured generation -func (r *ResourceQuotaGeneratorV1) validate() error { - if len(r.Name) == 0 { - return fmt.Errorf("name must be specified") - } - return nil -} - -func parseScopes(spec string) ([]api.ResourceQuotaScope, error) { - // empty input gets a nil response to preserve generator test expected behaviors - if spec == "" { - return nil, nil - } - - scopes := strings.Split(spec, ",") - result := make([]api.ResourceQuotaScope, 0, len(scopes)) - for _, scope := range scopes { - // intentionally do not verify the scope against the valid scope list. This is done by the apiserver anyway. - - if scope == "" { - return nil, fmt.Errorf("invalid resource quota scope \"\"") - } - - result = append(result, api.ResourceQuotaScope(scope)) - } - return result, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/BUILD deleted file mode 100644 index ede130505d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/BUILD +++ /dev/null @@ -1,80 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "builder.go", - "doc.go", - "helper.go", - "interfaces.go", - "mapper.go", - "result.go", - "selector.go", - "visitor.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/validation:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/client/restclient:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/errors:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/util/yaml:go_default_library", - "//pkg/watch:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "builder_test.go", - "helper_test.go", - "visitor_test.go", - ], - data = [ - "//examples:config", - "//test/fixtures", - ], - library = "go_default_library", - tags = [ - "automanaged", - "skip", - ], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/api/testing:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/client/restclient/fake:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer/streaming:go_default_library", - "//pkg/util/errors:go_default_library", - "//pkg/util/testing:go_default_library", - "//pkg/watch:go_default_library", - "//pkg/watch/versioned:go_default_library", - "//vendor:github.com/ghodss/yaml", - "//vendor:github.com/stretchr/testify/assert", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/builder.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/builder.go deleted file mode 100644 index 49ce6aa748..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/builder.go +++ /dev/null @@ -1,805 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "fmt" - "io" - "net/url" - "os" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/sets" -) - -var FileExtensions = []string{".json", ".yaml", ".yml"} -var InputExtensions = append(FileExtensions, "stdin") - -const defaultHttpGetAttempts int = 3 - -// Builder provides convenience functions for taking arguments and parameters -// from the command line and converting them to a list of resources to iterate -// over using the Visitor interface. -type Builder struct { - mapper *Mapper - - errs []error - - paths []Visitor - stream bool - dir bool - - selector labels.Selector - selectAll bool - - resources []string - - namespace string - allNamespace bool - names []string - - resourceTuples []resourceTuple - - defaultNamespace bool - requireNamespace bool - - flatten bool - latest bool - - requireObject bool - - singleResourceType bool - continueOnError bool - - singular bool - - export bool - - schema validation.Schema -} - -var missingResourceError = fmt.Errorf(`You must provide one or more resources by argument or filename. -Example resource specifications include: - '-f rsrc.yaml' - '--filename=rsrc.json' - 'pods my-pod' - 'services'`) - -// TODO: expand this to include other errors. -func IsUsageError(err error) bool { - if err == nil { - return false - } - return err == missingResourceError -} - -type FilenameOptions struct { - Filenames []string - Recursive bool -} - -type resourceTuple struct { - Resource string - Name string -} - -// NewBuilder creates a builder that operates on generic objects. -func NewBuilder(mapper meta.RESTMapper, typer runtime.ObjectTyper, clientMapper ClientMapper, decoder runtime.Decoder) *Builder { - return &Builder{ - mapper: &Mapper{typer, mapper, clientMapper, decoder}, - requireObject: true, - } -} - -func (b *Builder) Schema(schema validation.Schema) *Builder { - b.schema = schema - return b -} - -// FilenameParam groups input in two categories: URLs and files (files, directories, STDIN) -// If enforceNamespace is false, namespaces in the specs will be allowed to -// override the default namespace. If it is true, namespaces that don't match -// will cause an error. -// If ContinueOnError() is set prior to this method, objects on the path that are not -// recognized will be ignored (but logged at V(2)). -func (b *Builder) FilenameParam(enforceNamespace bool, filenameOptions *FilenameOptions) *Builder { - recursive := filenameOptions.Recursive - paths := filenameOptions.Filenames - for _, s := range paths { - switch { - case s == "-": - b.Stdin() - case strings.Index(s, "http://") == 0 || strings.Index(s, "https://") == 0: - url, err := url.Parse(s) - if err != nil { - b.errs = append(b.errs, fmt.Errorf("the URL passed to filename %q is not valid: %v", s, err)) - continue - } - b.URL(defaultHttpGetAttempts, url) - default: - if !recursive { - b.singular = true - } - b.Path(recursive, s) - } - } - - if enforceNamespace { - b.RequireNamespace() - } - - return b -} - -// URL accepts a number of URLs directly. -func (b *Builder) URL(httpAttemptCount int, urls ...*url.URL) *Builder { - for _, u := range urls { - b.paths = append(b.paths, &URLVisitor{ - URL: u, - StreamVisitor: NewStreamVisitor(nil, b.mapper, u.String(), b.schema), - HttpAttemptCount: httpAttemptCount, - }) - } - return b -} - -// Stdin will read objects from the standard input. If ContinueOnError() is set -// prior to this method being called, objects in the stream that are unrecognized -// will be ignored (but logged at V(2)). -func (b *Builder) Stdin() *Builder { - b.stream = true - b.paths = append(b.paths, FileVisitorForSTDIN(b.mapper, b.schema)) - return b -} - -// Stream will read objects from the provided reader, and if an error occurs will -// include the name string in the error message. If ContinueOnError() is set -// prior to this method being called, objects in the stream that are unrecognized -// will be ignored (but logged at V(2)). -func (b *Builder) Stream(r io.Reader, name string) *Builder { - b.stream = true - b.paths = append(b.paths, NewStreamVisitor(r, b.mapper, name, b.schema)) - return b -} - -// Path accepts a set of paths that may be files, directories (all can containing -// one or more resources). Creates a FileVisitor for each file and then each -// FileVisitor is streaming the content to a StreamVisitor. If ContinueOnError() is set -// prior to this method being called, objects on the path that are unrecognized will be -// ignored (but logged at V(2)). -func (b *Builder) Path(recursive bool, paths ...string) *Builder { - for _, p := range paths { - _, err := os.Stat(p) - if os.IsNotExist(err) { - b.errs = append(b.errs, fmt.Errorf("the path %q does not exist", p)) - continue - } - if err != nil { - b.errs = append(b.errs, fmt.Errorf("the path %q cannot be accessed: %v", p, err)) - continue - } - - visitors, err := ExpandPathsToFileVisitors(b.mapper, p, recursive, FileExtensions, b.schema) - if err != nil { - b.errs = append(b.errs, fmt.Errorf("error reading %q: %v", p, err)) - } - if len(visitors) > 1 { - b.dir = true - } - - b.paths = append(b.paths, visitors...) - } - return b -} - -// ResourceTypes is a list of types of resources to operate on, when listing objects on -// the server or retrieving objects that match a selector. -func (b *Builder) ResourceTypes(types ...string) *Builder { - b.resources = append(b.resources, types...) - return b -} - -// ResourceNames accepts a default type and one or more names, and creates tuples of -// resources -func (b *Builder) ResourceNames(resource string, names ...string) *Builder { - for _, name := range names { - // See if this input string is of type/name format - tuple, ok, err := splitResourceTypeName(name) - if err != nil { - b.errs = append(b.errs, err) - return b - } - - if ok { - b.resourceTuples = append(b.resourceTuples, tuple) - continue - } - if len(resource) == 0 { - b.errs = append(b.errs, fmt.Errorf("the argument %q must be RESOURCE/NAME", name)) - continue - } - - // Use the given default type to create a resource tuple - b.resourceTuples = append(b.resourceTuples, resourceTuple{Resource: resource, Name: name}) - } - return b -} - -// SelectorParam defines a selector that should be applied to the object types to load. -// This will not affect files loaded from disk or URL. If the parameter is empty it is -// a no-op - to select all resources invoke `b.Selector(labels.Everything)`. -func (b *Builder) SelectorParam(s string) *Builder { - selector, err := labels.Parse(s) - if err != nil { - b.errs = append(b.errs, fmt.Errorf("the provided selector %q is not valid: %v", s, err)) - return b - } - if selector.Empty() { - return b - } - if b.selectAll { - b.errs = append(b.errs, fmt.Errorf("found non empty selector %q with previously set 'all' parameter. ", s)) - return b - } - return b.Selector(selector) -} - -// Selector accepts a selector directly, and if non nil will trigger a list action. -func (b *Builder) Selector(selector labels.Selector) *Builder { - b.selector = selector - return b -} - -// ExportParam accepts the export boolean for these resources -func (b *Builder) ExportParam(export bool) *Builder { - b.export = export - return b -} - -// NamespaceParam accepts the namespace that these resources should be -// considered under from - used by DefaultNamespace() and RequireNamespace() -func (b *Builder) NamespaceParam(namespace string) *Builder { - b.namespace = namespace - return b -} - -// DefaultNamespace instructs the builder to set the namespace value for any object found -// to NamespaceParam() if empty. -func (b *Builder) DefaultNamespace() *Builder { - b.defaultNamespace = true - return b -} - -// AllNamespaces instructs the builder to use NamespaceAll as a namespace to request resources -// across all of the namespace. This overrides the namespace set by NamespaceParam(). -func (b *Builder) AllNamespaces(allNamespace bool) *Builder { - if allNamespace { - b.namespace = api.NamespaceAll - } - b.allNamespace = allNamespace - return b -} - -// RequireNamespace instructs the builder to set the namespace value for any object found -// to NamespaceParam() if empty, and if the value on the resource does not match -// NamespaceParam() an error will be returned. -func (b *Builder) RequireNamespace() *Builder { - b.requireNamespace = true - return b -} - -// SelectEverythingParam -func (b *Builder) SelectAllParam(selectAll bool) *Builder { - if selectAll && b.selector != nil { - b.errs = append(b.errs, fmt.Errorf("setting 'all' parameter but found a non empty selector. ")) - return b - } - b.selectAll = selectAll - return b -} - -// ResourceTypeOrNameArgs indicates that the builder should accept arguments -// of the form `([,,...]| [,,...])`. When one argument is -// received, the types provided will be retrieved from the server (and be comma delimited). -// When two or more arguments are received, they must be a single type and resource name(s). -// The allowEmptySelector permits to select all the resources (via Everything func). -func (b *Builder) ResourceTypeOrNameArgs(allowEmptySelector bool, args ...string) *Builder { - args = normalizeMultipleResourcesArgs(args) - if ok, err := hasCombinedTypeArgs(args); ok { - if err != nil { - b.errs = append(b.errs, err) - return b - } - for _, s := range args { - tuple, ok, err := splitResourceTypeName(s) - if err != nil { - b.errs = append(b.errs, err) - return b - } - if ok { - b.resourceTuples = append(b.resourceTuples, tuple) - } - } - return b - } - if len(args) > 0 { - // Try replacing aliases only in types - args[0] = b.ReplaceAliases(args[0]) - } - switch { - case len(args) > 2: - b.names = append(b.names, args[1:]...) - b.ResourceTypes(SplitResourceArgument(args[0])...) - case len(args) == 2: - b.names = append(b.names, args[1]) - b.ResourceTypes(SplitResourceArgument(args[0])...) - case len(args) == 1: - b.ResourceTypes(SplitResourceArgument(args[0])...) - if b.selector == nil && allowEmptySelector { - b.selector = labels.Everything() - } - case len(args) == 0: - default: - b.errs = append(b.errs, fmt.Errorf("arguments must consist of a resource or a resource and name")) - } - return b -} - -// ReplaceAliases accepts an argument and tries to expand any existing -// aliases found in it -func (b *Builder) ReplaceAliases(input string) string { - replaced := []string{} - for _, arg := range strings.Split(input, ",") { - if aliases, ok := b.mapper.AliasesForResource(arg); ok { - arg = strings.Join(aliases, ",") - } - replaced = append(replaced, arg) - } - return strings.Join(replaced, ",") -} - -func hasCombinedTypeArgs(args []string) (bool, error) { - hasSlash := 0 - for _, s := range args { - if strings.Contains(s, "/") { - hasSlash++ - } - } - switch { - case hasSlash > 0 && hasSlash == len(args): - return true, nil - case hasSlash > 0 && hasSlash != len(args): - baseCmd := "cmd" - if len(os.Args) > 0 { - baseCmdSlice := strings.Split(os.Args[0], "/") - baseCmd = baseCmdSlice[len(baseCmdSlice)-1] - } - return true, fmt.Errorf("there is no need to specify a resource type as a separate argument when passing arguments in resource/name form (e.g. '%s get resource/' instead of '%s get resource resource/'", baseCmd, baseCmd) - default: - return false, nil - } -} - -// Normalize args convert multiple resources to resource tuples, a,b,c d -// as a transform to a/d b/d c/d -func normalizeMultipleResourcesArgs(args []string) []string { - if len(args) >= 2 { - resources := []string{} - resources = append(resources, SplitResourceArgument(args[0])...) - if len(resources) > 1 { - names := []string{} - names = append(names, args[1:]...) - newArgs := []string{} - for _, resource := range resources { - for _, name := range names { - newArgs = append(newArgs, strings.Join([]string{resource, name}, "/")) - } - } - return newArgs - } - } - return args -} - -// splitResourceTypeName handles type/name resource formats and returns a resource tuple -// (empty or not), whether it successfully found one, and an error -func splitResourceTypeName(s string) (resourceTuple, bool, error) { - if !strings.Contains(s, "/") { - return resourceTuple{}, false, nil - } - seg := strings.Split(s, "/") - if len(seg) != 2 { - return resourceTuple{}, false, fmt.Errorf("arguments in resource/name form may not have more than one slash") - } - resource, name := seg[0], seg[1] - if len(resource) == 0 || len(name) == 0 || len(SplitResourceArgument(resource)) != 1 { - return resourceTuple{}, false, fmt.Errorf("arguments in resource/name form must have a single resource and name") - } - return resourceTuple{Resource: resource, Name: name}, true, nil -} - -// Flatten will convert any objects with a field named "Items" that is an array of runtime.Object -// compatible types into individual entries and give them their own items. The original object -// is not passed to any visitors. -func (b *Builder) Flatten() *Builder { - b.flatten = true - return b -} - -// Latest will fetch the latest copy of any objects loaded from URLs or files from the server. -func (b *Builder) Latest() *Builder { - b.latest = true - return b -} - -// RequireObject ensures that resulting infos have an object set. If false, resulting info may not have an object set. -func (b *Builder) RequireObject(require bool) *Builder { - b.requireObject = require - return b -} - -// ContinueOnError will attempt to load and visit as many objects as possible, even if some visits -// return errors or some objects cannot be loaded. The default behavior is to terminate after -// the first error is returned from a VisitorFunc. -func (b *Builder) ContinueOnError() *Builder { - b.continueOnError = true - return b -} - -// SingleResourceType will cause the builder to error if the user specifies more than a single type -// of resource. -func (b *Builder) SingleResourceType() *Builder { - b.singleResourceType = true - return b -} - -// mappingFor returns the RESTMapping for the Kind referenced by the resource. -// prefers a fully specified GroupVersionResource match. If we don't have one match on GroupResource -func (b *Builder) mappingFor(resourceArg string) (*meta.RESTMapping, error) { - fullySpecifiedGVR, groupResource := unversioned.ParseResourceArg(resourceArg) - gvk := unversioned.GroupVersionKind{} - if fullySpecifiedGVR != nil { - gvk, _ = b.mapper.KindFor(*fullySpecifiedGVR) - } - if gvk.Empty() { - var err error - gvk, err = b.mapper.KindFor(groupResource.WithVersion("")) - if err != nil { - return nil, err - } - } - - return b.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) -} - -func (b *Builder) resourceMappings() ([]*meta.RESTMapping, error) { - if len(b.resources) > 1 && b.singleResourceType { - return nil, fmt.Errorf("you may only specify a single resource type") - } - mappings := []*meta.RESTMapping{} - for _, r := range b.resources { - mapping, err := b.mappingFor(r) - if err != nil { - return nil, err - } - - mappings = append(mappings, mapping) - } - return mappings, nil -} - -func (b *Builder) resourceTupleMappings() (map[string]*meta.RESTMapping, error) { - mappings := make(map[string]*meta.RESTMapping) - canonical := make(map[string]struct{}) - for _, r := range b.resourceTuples { - if _, ok := mappings[r.Resource]; ok { - continue - } - mapping, err := b.mappingFor(r.Resource) - if err != nil { - return nil, err - } - - mappings[mapping.Resource] = mapping - mappings[r.Resource] = mapping - canonical[mapping.Resource] = struct{}{} - } - if len(canonical) > 1 && b.singleResourceType { - return nil, fmt.Errorf("you may only specify a single resource type") - } - return mappings, nil -} - -func (b *Builder) visitorResult() *Result { - if len(b.errs) > 0 { - return &Result{err: utilerrors.NewAggregate(b.errs)} - } - - if b.selectAll { - b.selector = labels.Everything() - } - - // visit items specified by paths - if len(b.paths) != 0 { - return b.visitByPaths() - } - - // visit selectors - if b.selector != nil { - return b.visitBySelector() - } - - // visit items specified by resource and name - if len(b.resourceTuples) != 0 { - return b.visitByResource() - } - - // visit items specified by name - if len(b.names) != 0 { - return b.visitByName() - } - - if len(b.resources) != 0 { - return &Result{err: fmt.Errorf("resource(s) were provided, but no name, label selector, or --all flag specified")} - } - return &Result{err: missingResourceError} -} - -func (b *Builder) visitBySelector() *Result { - if len(b.names) != 0 { - return &Result{err: fmt.Errorf("name cannot be provided when a selector is specified")} - } - if len(b.resourceTuples) != 0 { - return &Result{err: fmt.Errorf("selectors and the all flag cannot be used when passing resource/name arguments")} - } - if len(b.resources) == 0 { - return &Result{err: fmt.Errorf("at least one resource must be specified to use a selector")} - } - mappings, err := b.resourceMappings() - if err != nil { - return &Result{err: err} - } - - visitors := []Visitor{} - for _, mapping := range mappings { - client, err := b.mapper.ClientForMapping(mapping) - if err != nil { - return &Result{err: err} - } - selectorNamespace := b.namespace - if mapping.Scope.Name() != meta.RESTScopeNameNamespace { - selectorNamespace = "" - } - visitors = append(visitors, NewSelector(client, mapping, selectorNamespace, b.selector, b.export)) - } - if b.continueOnError { - return &Result{visitor: EagerVisitorList(visitors), sources: visitors} - } - return &Result{visitor: VisitorList(visitors), sources: visitors} -} - -func (b *Builder) visitByResource() *Result { - // if b.singular is false, this could be by default, so double-check length - // of resourceTuples to determine if in fact it is singular or not - isSingular := b.singular - if !isSingular { - isSingular = len(b.resourceTuples) == 1 - } - - if len(b.resources) != 0 { - return &Result{singular: isSingular, err: fmt.Errorf("you may not specify individual resources and bulk resources in the same call")} - } - - // retrieve one client for each resource - mappings, err := b.resourceTupleMappings() - if err != nil { - return &Result{singular: isSingular, err: err} - } - clients := make(map[string]RESTClient) - for _, mapping := range mappings { - s := fmt.Sprintf("%s/%s", mapping.GroupVersionKind.GroupVersion().String(), mapping.Resource) - if _, ok := clients[s]; ok { - continue - } - client, err := b.mapper.ClientForMapping(mapping) - if err != nil { - return &Result{err: err} - } - clients[s] = client - } - - items := []Visitor{} - for _, tuple := range b.resourceTuples { - mapping, ok := mappings[tuple.Resource] - if !ok { - return &Result{singular: isSingular, err: fmt.Errorf("resource %q is not recognized: %v", tuple.Resource, mappings)} - } - s := fmt.Sprintf("%s/%s", mapping.GroupVersionKind.GroupVersion().String(), mapping.Resource) - client, ok := clients[s] - if !ok { - return &Result{singular: isSingular, err: fmt.Errorf("could not find a client for resource %q", tuple.Resource)} - } - - selectorNamespace := b.namespace - if mapping.Scope.Name() != meta.RESTScopeNameNamespace { - selectorNamespace = "" - } else { - if len(b.namespace) == 0 { - errMsg := "namespace may not be empty when retrieving a resource by name" - if b.allNamespace { - errMsg = "a resource cannot be retrieved by name across all namespaces" - } - return &Result{singular: isSingular, err: fmt.Errorf(errMsg)} - } - } - - info := NewInfo(client, mapping, selectorNamespace, tuple.Name, b.export) - items = append(items, info) - } - - var visitors Visitor - if b.continueOnError { - visitors = EagerVisitorList(items) - } else { - visitors = VisitorList(items) - } - return &Result{singular: isSingular, visitor: visitors, sources: items} -} - -func (b *Builder) visitByName() *Result { - isSingular := len(b.names) == 1 - - if len(b.paths) != 0 { - return &Result{singular: isSingular, err: fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify a resource by arguments as well")} - } - if len(b.resources) == 0 { - return &Result{singular: isSingular, err: fmt.Errorf("you must provide a resource and a resource name together")} - } - if len(b.resources) > 1 { - return &Result{singular: isSingular, err: fmt.Errorf("you must specify only one resource")} - } - - mappings, err := b.resourceMappings() - if err != nil { - return &Result{singular: isSingular, err: err} - } - mapping := mappings[0] - - client, err := b.mapper.ClientForMapping(mapping) - if err != nil { - return &Result{err: err} - } - - selectorNamespace := b.namespace - if mapping.Scope.Name() != meta.RESTScopeNameNamespace { - selectorNamespace = "" - } else { - if len(b.namespace) == 0 { - errMsg := "namespace may not be empty when retrieving a resource by name" - if b.allNamespace { - errMsg = "a resource cannot be retrieved by name across all namespaces" - } - return &Result{singular: isSingular, err: fmt.Errorf(errMsg)} - } - } - - visitors := []Visitor{} - for _, name := range b.names { - info := NewInfo(client, mapping, selectorNamespace, name, b.export) - visitors = append(visitors, info) - } - return &Result{singular: isSingular, visitor: VisitorList(visitors), sources: visitors} -} - -func (b *Builder) visitByPaths() *Result { - singular := !b.dir && !b.stream && len(b.paths) == 1 - if len(b.resources) != 0 { - return &Result{singular: singular, err: fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify resource arguments as well")} - } - if len(b.names) != 0 { - return &Result{err: fmt.Errorf("name cannot be provided when a path is specified")} - } - if len(b.resourceTuples) != 0 { - return &Result{err: fmt.Errorf("resource/name arguments cannot be provided when a path is specified")} - } - - var visitors Visitor - if b.continueOnError { - visitors = EagerVisitorList(b.paths) - } else { - visitors = VisitorList(b.paths) - } - - // only items from disk can be refetched - if b.latest { - // must flatten lists prior to fetching - if b.flatten { - visitors = NewFlattenListVisitor(visitors, b.mapper) - } - // must set namespace prior to fetching - if b.defaultNamespace { - visitors = NewDecoratedVisitor(visitors, SetNamespace(b.namespace)) - } - visitors = NewDecoratedVisitor(visitors, RetrieveLatest) - } - if b.selector != nil { - visitors = NewFilteredVisitor(visitors, FilterBySelector(b.selector)) - } - return &Result{singular: singular, visitor: visitors, sources: b.paths} -} - -// Do returns a Result object with a Visitor for the resources identified by the Builder. -// The visitor will respect the error behavior specified by ContinueOnError. Note that stream -// inputs are consumed by the first execution - use Infos() or Object() on the Result to capture a list -// for further iteration. -func (b *Builder) Do() *Result { - r := b.visitorResult() - if r.err != nil { - return r - } - if b.flatten { - r.visitor = NewFlattenListVisitor(r.visitor, b.mapper) - } - helpers := []VisitorFunc{} - if b.defaultNamespace { - helpers = append(helpers, SetNamespace(b.namespace)) - } - if b.requireNamespace { - helpers = append(helpers, RequireNamespace(b.namespace)) - } - helpers = append(helpers, FilterNamespace) - if b.requireObject { - helpers = append(helpers, RetrieveLazy) - } - r.visitor = NewDecoratedVisitor(r.visitor, helpers...) - if b.continueOnError { - r.visitor = ContinueOnErrorVisitor{r.visitor} - } - return r -} - -// SplitResourceArgument splits the argument with commas and returns unique -// strings in the original order. -func SplitResourceArgument(arg string) []string { - out := []string{} - set := sets.NewString() - for _, s := range strings.Split(arg, ",") { - if set.Has(s) { - continue - } - set.Insert(s) - out = append(out, s) - } - return out -} - -// HasNames returns true if the provided args contain resource names -func HasNames(args []string) (bool, error) { - args = normalizeMultipleResourcesArgs(args) - hasCombinedTypes, err := hasCombinedTypeArgs(args) - if err != nil { - return false, err - } - return hasCombinedTypes || len(args) > 1, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/doc.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/doc.go deleted file mode 100644 index a0e22e7cf7..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package resource assists clients in dealing with RESTful objects that match the -// Kubernetes API conventions. The Helper object provides simple CRUD operations -// on resources. The Visitor interface makes it easy to deal with multiple resources -// in bulk for retrieval and operation. The Builder object simplifies converting -// standard command line arguments and parameters into a Visitor that can iterate -// over all of the identified resources, whether on the server or on the local -// filesystem. -package resource diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/helper.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/helper.go deleted file mode 100644 index 6b5f8d639b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/helper.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "strconv" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/watch" -) - -// Helper provides methods for retrieving or mutating a RESTful -// resource. -type Helper struct { - // The name of this resource as the server would recognize it - Resource string - // A RESTClient capable of mutating this resource. - RESTClient RESTClient - // An interface for reading or writing the resource version of this - // type. - Versioner runtime.ResourceVersioner - // True if the resource type is scoped to namespaces - NamespaceScoped bool -} - -// NewHelper creates a Helper from a ResourceMapping -func NewHelper(client RESTClient, mapping *meta.RESTMapping) *Helper { - return &Helper{ - Resource: mapping.Resource, - RESTClient: client, - Versioner: mapping.MetadataAccessor, - NamespaceScoped: mapping.Scope.Name() == meta.RESTScopeNameNamespace, - } -} - -func (m *Helper) Get(namespace, name string, export bool) (runtime.Object, error) { - req := m.RESTClient.Get(). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(m.Resource). - Name(name) - if export { - req.Param("export", strconv.FormatBool(export)) - } - return req.Do().Get() -} - -// TODO: add field selector -func (m *Helper) List(namespace, apiVersion string, selector labels.Selector, export bool) (runtime.Object, error) { - req := m.RESTClient.Get(). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(m.Resource). - LabelsSelectorParam(selector) - if export { - req.Param("export", strconv.FormatBool(export)) - } - return req.Do().Get() -} - -func (m *Helper) Watch(namespace, resourceVersion, apiVersion string, labelSelector labels.Selector) (watch.Interface, error) { - return m.RESTClient.Get(). - Prefix("watch"). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(m.Resource). - Param("resourceVersion", resourceVersion). - LabelsSelectorParam(labelSelector). - Watch() -} - -func (m *Helper) WatchSingle(namespace, name, resourceVersion string) (watch.Interface, error) { - return m.RESTClient.Get(). - Prefix("watch"). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(m.Resource). - Name(name). - Param("resourceVersion", resourceVersion). - Watch() -} - -func (m *Helper) Delete(namespace, name string) error { - return m.RESTClient.Delete(). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(m.Resource). - Name(name). - Do(). - Error() -} - -func (m *Helper) Create(namespace string, modify bool, obj runtime.Object) (runtime.Object, error) { - if modify { - // Attempt to version the object based on client logic. - version, err := m.Versioner.ResourceVersion(obj) - if err != nil { - // We don't know how to clear the version on this object, so send it to the server as is - return m.createResource(m.RESTClient, m.Resource, namespace, obj) - } - if version != "" { - if err := m.Versioner.SetResourceVersion(obj, ""); err != nil { - return nil, err - } - } - } - - return m.createResource(m.RESTClient, m.Resource, namespace, obj) -} - -func (m *Helper) createResource(c RESTClient, resource, namespace string, obj runtime.Object) (runtime.Object, error) { - return c.Post().NamespaceIfScoped(namespace, m.NamespaceScoped).Resource(resource).Body(obj).Do().Get() -} -func (m *Helper) Patch(namespace, name string, pt api.PatchType, data []byte) (runtime.Object, error) { - return m.RESTClient.Patch(pt). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(m.Resource). - Name(name). - Body(data). - Do(). - Get() -} - -func (m *Helper) Replace(namespace, name string, overwrite bool, obj runtime.Object) (runtime.Object, error) { - c := m.RESTClient - - // Attempt to version the object based on client logic. - version, err := m.Versioner.ResourceVersion(obj) - if err != nil { - // We don't know how to version this object, so send it to the server as is - return m.replaceResource(c, m.Resource, namespace, name, obj) - } - if version == "" && overwrite { - // Retrieve the current version of the object to overwrite the server object - serverObj, err := c.Get().NamespaceIfScoped(namespace, m.NamespaceScoped).Resource(m.Resource).Name(name).Do().Get() - if err != nil { - // The object does not exist, but we want it to be created - return m.replaceResource(c, m.Resource, namespace, name, obj) - } - serverVersion, err := m.Versioner.ResourceVersion(serverObj) - if err != nil { - return nil, err - } - if err := m.Versioner.SetResourceVersion(obj, serverVersion); err != nil { - return nil, err - } - } - - return m.replaceResource(c, m.Resource, namespace, name, obj) -} - -func (m *Helper) replaceResource(c RESTClient, resource, namespace, name string, obj runtime.Object) (runtime.Object, error) { - return c.Put().NamespaceIfScoped(namespace, m.NamespaceScoped).Resource(resource).Name(name).Body(obj).Do().Get() -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/interfaces.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/interfaces.go deleted file mode 100644 index 7a872eb2e4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/interfaces.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - client "k8s.io/kubernetes/pkg/client/restclient" -) - -// RESTClient is a client helper for dealing with RESTful resources -// in a generic way. -type RESTClient interface { - Get() *client.Request - Post() *client.Request - Patch(api.PatchType) *client.Request - Delete() *client.Request - Put() *client.Request -} - -// ClientMapper abstracts retrieving a Client for mapped objects. -type ClientMapper interface { - ClientForMapping(mapping *meta.RESTMapping) (RESTClient, error) -} - -// ClientMapperFunc implements ClientMapper for a function -type ClientMapperFunc func(mapping *meta.RESTMapping) (RESTClient, error) - -// ClientForMapping implements ClientMapper -func (f ClientMapperFunc) ClientForMapping(mapping *meta.RESTMapping) (RESTClient, error) { - return f(mapping) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/mapper.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/mapper.go deleted file mode 100644 index 66f0da449f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/mapper.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "fmt" - "reflect" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// DisabledClientForMapping allows callers to avoid allowing remote calls when handling -// resources. -type DisabledClientForMapping struct { - ClientMapper -} - -func (f DisabledClientForMapping) ClientForMapping(mapping *meta.RESTMapping) (RESTClient, error) { - return nil, nil -} - -// Mapper is a convenience struct for holding references to the three interfaces -// needed to create Info for arbitrary objects. -type Mapper struct { - runtime.ObjectTyper - meta.RESTMapper - ClientMapper - runtime.Decoder -} - -// InfoForData creates an Info object for the given data. An error is returned -// if any of the decoding or client lookup steps fail. Name and namespace will be -// set into Info if the mapping's MetadataAccessor can retrieve them. -func (m *Mapper) InfoForData(data []byte, source string) (*Info, error) { - versions := &runtime.VersionedObjects{} - _, gvk, err := m.Decode(data, nil, versions) - if err != nil { - return nil, fmt.Errorf("unable to decode %q: %v", source, err) - } - - obj, versioned := versions.Last(), versions.First() - mapping, err := m.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, fmt.Errorf("unable to recognize %q: %v", source, err) - } - - client, err := m.ClientForMapping(mapping) - if err != nil { - return nil, fmt.Errorf("unable to connect to a server to handle %q: %v", mapping.Resource, err) - } - - name, _ := mapping.MetadataAccessor.Name(obj) - namespace, _ := mapping.MetadataAccessor.Namespace(obj) - resourceVersion, _ := mapping.MetadataAccessor.ResourceVersion(obj) - - return &Info{ - Mapping: mapping, - Client: client, - Namespace: namespace, - Name: name, - Source: source, - VersionedObject: versioned, - Object: obj, - ResourceVersion: resourceVersion, - }, nil -} - -// InfoForObject creates an Info object for the given Object. An error is returned -// if the object cannot be introspected. Name and namespace will be set into Info -// if the mapping's MetadataAccessor can retrieve them. -func (m *Mapper) InfoForObject(obj runtime.Object, preferredGVKs []unversioned.GroupVersionKind) (*Info, error) { - groupVersionKinds, _, err := m.ObjectKinds(obj) - if err != nil { - return nil, fmt.Errorf("unable to get type info from the object %q: %v", reflect.TypeOf(obj), err) - } - - groupVersionKind := groupVersionKinds[0] - if len(groupVersionKinds) > 1 && len(preferredGVKs) > 0 { - groupVersionKind = preferredObjectKind(groupVersionKinds, preferredGVKs) - } - - mapping, err := m.RESTMapping(groupVersionKind.GroupKind(), groupVersionKind.Version) - if err != nil { - return nil, fmt.Errorf("unable to recognize %v: %v", groupVersionKind, err) - } - client, err := m.ClientForMapping(mapping) - if err != nil { - return nil, fmt.Errorf("unable to connect to a server to handle %q: %v", mapping.Resource, err) - } - name, _ := mapping.MetadataAccessor.Name(obj) - namespace, _ := mapping.MetadataAccessor.Namespace(obj) - resourceVersion, _ := mapping.MetadataAccessor.ResourceVersion(obj) - return &Info{ - Mapping: mapping, - Client: client, - Namespace: namespace, - Name: name, - - Object: obj, - ResourceVersion: resourceVersion, - }, nil -} - -// preferredObjectKind picks the possibility that most closely matches the priority list in this order: -// GroupVersionKind matches (exact match) -// GroupKind matches -// Group matches -func preferredObjectKind(possibilities []unversioned.GroupVersionKind, preferences []unversioned.GroupVersionKind) unversioned.GroupVersionKind { - // Exact match - for _, priority := range preferences { - for _, possibility := range possibilities { - if possibility == priority { - return possibility - } - } - } - - // GroupKind match - for _, priority := range preferences { - for _, possibility := range possibilities { - if possibility.GroupKind() == priority.GroupKind() { - return possibility - } - } - } - - // Group match - for _, priority := range preferences { - for _, possibility := range possibilities { - if possibility.Group == priority.Group { - return possibility - } - } - } - - // Just pick the first - return possibilities[0] -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/result.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/result.go deleted file mode 100644 index 78f11e4a53..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/result.go +++ /dev/null @@ -1,291 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "fmt" - "reflect" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/runtime" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/watch" -) - -// ErrMatchFunc can be used to filter errors that may not be true failures. -type ErrMatchFunc func(error) bool - -// Result contains helper methods for dealing with the outcome of a Builder. -type Result struct { - err error - visitor Visitor - - sources []Visitor - singular bool - - ignoreErrors []utilerrors.Matcher - - // populated by a call to Infos - info []*Info -} - -// IgnoreErrors will filter errors that occur when by visiting the result -// (but not errors that occur by creating the result in the first place), -// eliminating any that match fns. This is best used in combination with -// Builder.ContinueOnError(), where the visitors accumulate errors and return -// them after visiting as a slice of errors. If no errors remain after -// filtering, the various visitor methods on Result will return nil for -// err. -func (r *Result) IgnoreErrors(fns ...ErrMatchFunc) *Result { - for _, fn := range fns { - r.ignoreErrors = append(r.ignoreErrors, utilerrors.Matcher(fn)) - } - return r -} - -// Err returns one or more errors (via a util.ErrorList) that occurred prior -// to visiting the elements in the visitor. To see all errors including those -// that occur during visitation, invoke Infos(). -func (r *Result) Err() error { - return r.err -} - -// Visit implements the Visitor interface on the items described in the Builder. -// Note that some visitor sources are not traversable more than once, or may -// return different results. If you wish to operate on the same set of resources -// multiple times, use the Infos() method. -func (r *Result) Visit(fn VisitorFunc) error { - if r.err != nil { - return r.err - } - err := r.visitor.Visit(fn) - return utilerrors.FilterOut(err, r.ignoreErrors...) -} - -// IntoSingular sets the provided boolean pointer to true if the Builder input -// reflected a single item, or multiple. -func (r *Result) IntoSingular(b *bool) *Result { - *b = r.singular - return r -} - -// Infos returns an array of all of the resource infos retrieved via traversal. -// Will attempt to traverse the entire set of visitors only once, and will return -// a cached list on subsequent calls. -func (r *Result) Infos() ([]*Info, error) { - if r.err != nil { - return nil, r.err - } - if r.info != nil { - return r.info, nil - } - - infos := []*Info{} - err := r.visitor.Visit(func(info *Info, err error) error { - if err != nil { - return err - } - infos = append(infos, info) - return nil - }) - err = utilerrors.FilterOut(err, r.ignoreErrors...) - - r.info, r.err = infos, err - return infos, err -} - -// Object returns a single object representing the output of a single visit to all -// found resources. If the Builder was a singular context (expected to return a -// single resource by user input) and only a single resource was found, the resource -// will be returned as is. Otherwise, the returned resources will be part of an -// api.List. The ResourceVersion of the api.List will be set only if it is identical -// across all infos returned. -func (r *Result) Object() (runtime.Object, error) { - infos, err := r.Infos() - if err != nil { - return nil, err - } - - versions := sets.String{} - objects := []runtime.Object{} - for _, info := range infos { - if info.Object != nil { - objects = append(objects, info.Object) - versions.Insert(info.ResourceVersion) - } - } - - if len(objects) == 1 { - if r.singular { - return objects[0], nil - } - // if the item is a list already, don't create another list - if meta.IsListType(objects[0]) { - return objects[0], nil - } - } - - version := "" - if len(versions) == 1 { - version = versions.List()[0] - } - return &api.List{ - ListMeta: unversioned.ListMeta{ - ResourceVersion: version, - }, - Items: objects, - }, err -} - -// ResourceMapping returns a single meta.RESTMapping representing the -// resources located by the builder, or an error if more than one -// mapping was found. -func (r *Result) ResourceMapping() (*meta.RESTMapping, error) { - if r.err != nil { - return nil, r.err - } - mappings := map[string]*meta.RESTMapping{} - for i := range r.sources { - m, ok := r.sources[i].(ResourceMapping) - if !ok { - return nil, fmt.Errorf("a resource mapping could not be loaded from %v", reflect.TypeOf(r.sources[i])) - } - mapping := m.ResourceMapping() - mappings[mapping.Resource] = mapping - } - if len(mappings) != 1 { - return nil, fmt.Errorf("expected only a single resource type") - } - for _, mapping := range mappings { - return mapping, nil - } - return nil, nil -} - -// Watch retrieves changes that occur on the server to the specified resource. -// It currently supports watching a single source - if the resource source -// (selectors or pure types) can be watched, they will be, otherwise the list -// will be visited (equivalent to the Infos() call) and if there is a single -// resource present, it will be watched, otherwise an error will be returned. -func (r *Result) Watch(resourceVersion string) (watch.Interface, error) { - if r.err != nil { - return nil, r.err - } - if len(r.sources) != 1 { - return nil, fmt.Errorf("you may only watch a single resource or type of resource at a time") - } - w, ok := r.sources[0].(Watchable) - if !ok { - info, err := r.Infos() - if err != nil { - return nil, err - } - if len(info) != 1 { - return nil, fmt.Errorf("watch is only supported on individual resources and resource collections - %d resources were found", len(info)) - } - return info[0].Watch(resourceVersion) - } - return w.Watch(resourceVersion) -} - -// AsVersionedObject converts a list of infos into a single object - either a List containing -// the objects as children, or if only a single Object is present, as that object. The provided -// version will be preferred as the conversion target, but the Object's mapping version will be -// used if that version is not present. -func AsVersionedObject(infos []*Info, forceList bool, version unversioned.GroupVersion, encoder runtime.Encoder) (runtime.Object, error) { - objects, err := AsVersionedObjects(infos, version, encoder) - if err != nil { - return nil, err - } - - var object runtime.Object - if len(objects) == 1 && !forceList { - object = objects[0] - } else { - object = &api.List{Items: objects} - converted, err := TryConvert(api.Scheme, object, version, registered.GroupOrDie(api.GroupName).GroupVersion) - if err != nil { - return nil, err - } - object = converted - } - return object, nil -} - -// AsVersionedObjects converts a list of infos into versioned objects. The provided -// version will be preferred as the conversion target, but the Object's mapping version will be -// used if that version is not present. -func AsVersionedObjects(infos []*Info, version unversioned.GroupVersion, encoder runtime.Encoder) ([]runtime.Object, error) { - objects := []runtime.Object{} - for _, info := range infos { - if info.Object == nil { - continue - } - - // TODO: use info.VersionedObject as the value? - switch obj := info.Object.(type) { - case *extensions.ThirdPartyResourceData: - objects = append(objects, &runtime.Unknown{Raw: obj.Data}) - continue - } - - // objects that are not part of api.Scheme must be converted to JSON - // TODO: convert to map[string]interface{}, attach to runtime.Unknown? - if !version.Empty() { - if _, _, err := api.Scheme.ObjectKinds(info.Object); runtime.IsNotRegisteredError(err) { - // TODO: ideally this would encode to version, but we don't expose multiple codecs here. - data, err := runtime.Encode(encoder, info.Object) - if err != nil { - return nil, err - } - // TODO: Set ContentEncoding and ContentType. - objects = append(objects, &runtime.Unknown{Raw: data}) - continue - } - } - - converted, err := TryConvert(info.Mapping.ObjectConvertor, info.Object, version, info.Mapping.GroupVersionKind.GroupVersion()) - if err != nil { - return nil, err - } - objects = append(objects, converted) - } - return objects, nil -} - -// TryConvert attempts to convert the given object to the provided versions in order. This function assumes -// the object is in internal version. -func TryConvert(converter runtime.ObjectConvertor, object runtime.Object, versions ...unversioned.GroupVersion) (runtime.Object, error) { - var last error - for _, version := range versions { - if version.Empty() { - return object, nil - } - obj, err := converter.ConvertToVersion(object, version) - if err != nil { - last = err - continue - } - return obj, nil - } - return nil, last -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/selector.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/selector.go deleted file mode 100644 index bf9e04950b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/selector.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/watch" -) - -// Selector is a Visitor for resources that match a label selector. -type Selector struct { - Client RESTClient - Mapping *meta.RESTMapping - Namespace string - Selector labels.Selector - Export bool -} - -// NewSelector creates a resource selector which hides details of getting items by their label selector. -func NewSelector(client RESTClient, mapping *meta.RESTMapping, namespace string, selector labels.Selector, export bool) *Selector { - return &Selector{ - Client: client, - Mapping: mapping, - Namespace: namespace, - Selector: selector, - Export: export, - } -} - -// Visit implements Visitor -func (r *Selector) Visit(fn VisitorFunc) error { - list, err := NewHelper(r.Client, r.Mapping).List(r.Namespace, r.ResourceMapping().GroupVersionKind.GroupVersion().String(), r.Selector, r.Export) - if err != nil { - if errors.IsBadRequest(err) || errors.IsNotFound(err) { - if se, ok := err.(*errors.StatusError); ok { - // modify the message without hiding this is an API error - if r.Selector.Empty() { - se.ErrStatus.Message = fmt.Sprintf("Unable to list %q: %v", r.Mapping.Resource, se.ErrStatus.Message) - } else { - se.ErrStatus.Message = fmt.Sprintf("Unable to find %q that match the selector %q: %v", r.Mapping.Resource, r.Selector, se.ErrStatus.Message) - } - return se - } - if r.Selector.Empty() { - return fmt.Errorf("Unable to list %q: %v", r.Mapping.Resource, err) - } else { - return fmt.Errorf("Unable to find %q that match the selector %q: %v", r.Mapping.Resource, r.Selector, err) - } - } - return err - } - accessor := r.Mapping.MetadataAccessor - resourceVersion, _ := accessor.ResourceVersion(list) - info := &Info{ - Client: r.Client, - Mapping: r.Mapping, - Namespace: r.Namespace, - - Object: list, - ResourceVersion: resourceVersion, - } - return fn(info, nil) -} - -func (r *Selector) Watch(resourceVersion string) (watch.Interface, error) { - return NewHelper(r.Client, r.Mapping).Watch(r.Namespace, resourceVersion, r.ResourceMapping().GroupVersionKind.GroupVersion().String(), r.Selector) -} - -// ResourceMapping returns the mapping for this resource and implements ResourceMapping -func (r *Selector) ResourceMapping() *meta.RESTMapping { - return r.Mapping -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/visitor.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/visitor.go deleted file mode 100644 index f8909bf15b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/visitor.go +++ /dev/null @@ -1,690 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "bytes" - "fmt" - "io" - "net/http" - "net/url" - "os" - "path/filepath" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/yaml" - "k8s.io/kubernetes/pkg/watch" -) - -const ( - constSTDINstr string = "STDIN" - stopValidateMessage = "if you choose to ignore these errors, turn validation off with --validate=false" -) - -// Visitor lets clients walk a list of resources. -type Visitor interface { - Visit(VisitorFunc) error -} - -// VisitorFunc implements the Visitor interface for a matching function. -// If there was a problem walking a list of resources, the incoming error -// will describe the problem and the function can decide how to handle that error. -// A nil returned indicates to accept an error to continue loops even when errors happen. -// This is useful for ignoring certain kinds of errors or aggregating errors in some way. -type VisitorFunc func(*Info, error) error - -// Watchable describes a resource that can be watched for changes that occur on the server, -// beginning after the provided resource version. -type Watchable interface { - Watch(resourceVersion string) (watch.Interface, error) -} - -// ResourceMapping allows an object to return the resource mapping associated with -// the resource or resources it represents. -type ResourceMapping interface { - ResourceMapping() *meta.RESTMapping -} - -// Info contains temporary info to execute a REST call, or show the results -// of an already completed REST call. -type Info struct { - Client RESTClient - Mapping *meta.RESTMapping - Namespace string - Name string - - // Optional, Source is the filename or URL to template file (.json or .yaml), - // or stdin to use to handle the resource - Source string - // Optional, this is the provided object in a versioned type before defaulting - // and conversions into its corresponding internal type. This is useful for - // reflecting on user intent which may be lost after defaulting and conversions. - VersionedObject interface{} - // Optional, this is the most recent value returned by the server if available - runtime.Object - // Optional, this is the most recent resource version the server knows about for - // this type of resource. It may not match the resource version of the object, - // but if set it should be equal to or newer than the resource version of the - // object (however the server defines resource version). - ResourceVersion string - // Optional, should this resource be exported, stripped of cluster-specific and instance specific fields - Export bool -} - -// NewInfo returns a new info object -func NewInfo(client RESTClient, mapping *meta.RESTMapping, namespace, name string, export bool) *Info { - return &Info{ - Client: client, - Mapping: mapping, - Namespace: namespace, - Name: name, - Export: export, - } -} - -// Visit implements Visitor -func (i *Info) Visit(fn VisitorFunc) error { - return fn(i, nil) -} - -// Get retrieves the object from the Namespace and Name fields -func (i *Info) Get() (err error) { - obj, err := NewHelper(i.Client, i.Mapping).Get(i.Namespace, i.Name, i.Export) - if err != nil { - if errors.IsNotFound(err) && len(i.Namespace) > 0 && i.Namespace != api.NamespaceDefault && i.Namespace != api.NamespaceAll { - err2 := i.Client.Get().AbsPath("api", "v1", "namespaces", i.Namespace).Do().Error() - if err2 != nil && errors.IsNotFound(err2) { - return err2 - } - } - return err - } - i.Object = obj - i.ResourceVersion, _ = i.Mapping.MetadataAccessor.ResourceVersion(obj) - return nil -} - -// Refresh updates the object with another object. If ignoreError is set -// the Object will be updated even if name, namespace, or resourceVersion -// attributes cannot be loaded from the object. -func (i *Info) Refresh(obj runtime.Object, ignoreError bool) error { - name, err := i.Mapping.MetadataAccessor.Name(obj) - if err != nil { - if !ignoreError { - return err - } - } else { - i.Name = name - } - namespace, err := i.Mapping.MetadataAccessor.Namespace(obj) - if err != nil { - if !ignoreError { - return err - } - } else { - i.Namespace = namespace - } - version, err := i.Mapping.MetadataAccessor.ResourceVersion(obj) - if err != nil { - if !ignoreError { - return err - } - } else { - i.ResourceVersion = version - } - i.Object = obj - return nil -} - -// Namespaced returns true if the object belongs to a namespace -func (i *Info) Namespaced() bool { - return i.Mapping != nil && i.Mapping.Scope.Name() == meta.RESTScopeNameNamespace -} - -// Watch returns server changes to this object after it was retrieved. -func (i *Info) Watch(resourceVersion string) (watch.Interface, error) { - return NewHelper(i.Client, i.Mapping).WatchSingle(i.Namespace, i.Name, resourceVersion) -} - -// ResourceMapping returns the mapping for this resource and implements ResourceMapping -func (i *Info) ResourceMapping() *meta.RESTMapping { - return i.Mapping -} - -// VisitorList implements Visit for the sub visitors it contains. The first error -// returned from a child Visitor will terminate iteration. -type VisitorList []Visitor - -// Visit implements Visitor -func (l VisitorList) Visit(fn VisitorFunc) error { - for i := range l { - if err := l[i].Visit(fn); err != nil { - return err - } - } - return nil -} - -// EagerVisitorList implements Visit for the sub visitors it contains. All errors -// will be captured and returned at the end of iteration. -type EagerVisitorList []Visitor - -// Visit implements Visitor, and gathers errors that occur during processing until -// all sub visitors have been visited. -func (l EagerVisitorList) Visit(fn VisitorFunc) error { - errs := []error(nil) - for i := range l { - if err := l[i].Visit(func(info *Info, err error) error { - if err != nil { - errs = append(errs, err) - return nil - } - if err := fn(info, nil); err != nil { - errs = append(errs, err) - } - return nil - }); err != nil { - errs = append(errs, err) - } - } - return utilerrors.NewAggregate(errs) -} - -func ValidateSchema(data []byte, schema validation.Schema) error { - if schema == nil { - return nil - } - if err := schema.ValidateBytes(data); err != nil { - return fmt.Errorf("error validating data: %v; %s", err, stopValidateMessage) - } - return nil -} - -// URLVisitor downloads the contents of a URL, and if successful, returns -// an info object representing the downloaded object. -type URLVisitor struct { - URL *url.URL - *StreamVisitor - HttpAttemptCount int -} - -func (v *URLVisitor) Visit(fn VisitorFunc) error { - body, err := readHttpWithRetries(httpgetImpl, time.Second, v.URL.String(), v.HttpAttemptCount) - if err != nil { - return err - } - defer body.Close() - v.StreamVisitor.Reader = body - return v.StreamVisitor.Visit(fn) -} - -// readHttpWithRetries tries to http.Get the v.URL retries times before giving up. -func readHttpWithRetries(get httpget, duration time.Duration, u string, attempts int) (io.ReadCloser, error) { - var err error - var body io.ReadCloser - if attempts <= 0 { - return nil, fmt.Errorf("http attempts must be greater than 0, was %d", attempts) - } - for i := 0; i < attempts; i++ { - var statusCode int - var status string - if i > 0 { - time.Sleep(duration) - } - - // Try to get the URL - statusCode, status, body, err = get(u) - - // Retry Errors - if err != nil { - continue - } - - // Error - Set the error condition from the StatusCode - if statusCode != 200 { - err = fmt.Errorf("unable to read URL %q, server reported %s, status code=%d", u, status, statusCode) - } - - if statusCode >= 500 && statusCode < 600 { - // Retry 500's - continue - } else { - // Don't retry other StatusCodes - break - } - } - return body, err -} - -// httpget Defines function to retrieve a url and return the results. Exists for unit test stubbing. -type httpget func(url string) (int, string, io.ReadCloser, error) - -// httpgetImpl Implements a function to retrieve a url and return the results. -func httpgetImpl(url string) (int, string, io.ReadCloser, error) { - resp, err := http.Get(url) - if err != nil { - return 0, "", nil, err - } - return resp.StatusCode, resp.Status, resp.Body, nil -} - -// DecoratedVisitor will invoke the decorators in order prior to invoking the visitor function -// passed to Visit. An error will terminate the visit. -type DecoratedVisitor struct { - visitor Visitor - decorators []VisitorFunc -} - -// NewDecoratedVisitor will create a visitor that invokes the provided visitor functions before -// the user supplied visitor function is invoked, giving them the opportunity to mutate the Info -// object or terminate early with an error. -func NewDecoratedVisitor(v Visitor, fn ...VisitorFunc) Visitor { - if len(fn) == 0 { - return v - } - return DecoratedVisitor{v, fn} -} - -// Visit implements Visitor -func (v DecoratedVisitor) Visit(fn VisitorFunc) error { - return v.visitor.Visit(func(info *Info, err error) error { - if err != nil { - return err - } - for i := range v.decorators { - if err := v.decorators[i](info, nil); err != nil { - return err - } - } - return fn(info, nil) - }) -} - -// ContinueOnErrorVisitor visits each item and, if an error occurs on -// any individual item, returns an aggregate error after all items -// are visited. -type ContinueOnErrorVisitor struct { - Visitor -} - -// Visit returns nil if no error occurs during traversal, a regular -// error if one occurs, or if multiple errors occur, an aggregate -// error. If the provided visitor fails on any individual item it -// will not prevent the remaining items from being visited. An error -// returned by the visitor directly may still result in some items -// not being visited. -func (v ContinueOnErrorVisitor) Visit(fn VisitorFunc) error { - errs := []error{} - err := v.Visitor.Visit(func(info *Info, err error) error { - if err != nil { - errs = append(errs, err) - return nil - } - if err := fn(info, nil); err != nil { - errs = append(errs, err) - } - return nil - }) - if err != nil { - errs = append(errs, err) - } - if len(errs) == 1 { - return errs[0] - } - return utilerrors.NewAggregate(errs) -} - -// FlattenListVisitor flattens any objects that runtime.ExtractList recognizes as a list -// - has an "Items" public field that is a slice of runtime.Objects or objects satisfying -// that interface - into multiple Infos. An error on any sub item (for instance, if a List -// contains an object that does not have a registered client or resource) will terminate -// the visit. -// TODO: allow errors to be aggregated? -type FlattenListVisitor struct { - Visitor - *Mapper -} - -// NewFlattenListVisitor creates a visitor that will expand list style runtime.Objects -// into individual items and then visit them individually. -func NewFlattenListVisitor(v Visitor, mapper *Mapper) Visitor { - return FlattenListVisitor{v, mapper} -} - -func (v FlattenListVisitor) Visit(fn VisitorFunc) error { - return v.Visitor.Visit(func(info *Info, err error) error { - if err != nil { - return err - } - if info.Object == nil { - return fn(info, nil) - } - items, err := meta.ExtractList(info.Object) - if err != nil { - return fn(info, nil) - } - if errs := runtime.DecodeList(items, struct { - runtime.ObjectTyper - runtime.Decoder - }{v.Mapper, v.Mapper.Decoder}); len(errs) > 0 { - return utilerrors.NewAggregate(errs) - } - - // If we have a GroupVersionKind on the list, prioritize that when asking for info on the objects contained in the list - var preferredGVKs []unversioned.GroupVersionKind - if info.Mapping != nil && !info.Mapping.GroupVersionKind.Empty() { - preferredGVKs = append(preferredGVKs, info.Mapping.GroupVersionKind) - } - - for i := range items { - item, err := v.InfoForObject(items[i], preferredGVKs) - if err != nil { - return err - } - if len(info.ResourceVersion) != 0 { - item.ResourceVersion = info.ResourceVersion - } - if err := fn(item, nil); err != nil { - return err - } - } - return nil - }) -} - -func ignoreFile(path string, extensions []string) bool { - if len(extensions) == 0 { - return false - } - ext := filepath.Ext(path) - for _, s := range extensions { - if s == ext { - return false - } - } - return true -} - -// FileVisitorForSTDIN return a special FileVisitor just for STDIN -func FileVisitorForSTDIN(mapper *Mapper, schema validation.Schema) Visitor { - return &FileVisitor{ - Path: constSTDINstr, - StreamVisitor: NewStreamVisitor(nil, mapper, constSTDINstr, schema), - } -} - -// ExpandPathsToFileVisitors will return a slice of FileVisitors that will handle files from the provided path. -// After FileVisitors open the files, they will pass an io.Reader to a StreamVisitor to do the reading. (stdin -// is also taken care of). Paths argument also accepts a single file, and will return a single visitor -func ExpandPathsToFileVisitors(mapper *Mapper, paths string, recursive bool, extensions []string, schema validation.Schema) ([]Visitor, error) { - var visitors []Visitor - err := filepath.Walk(paths, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - if fi.IsDir() { - if path != paths && !recursive { - return filepath.SkipDir - } - return nil - } - // Don't check extension if the filepath was passed explicitly - if path != paths && ignoreFile(path, extensions) { - return nil - } - - visitor := &FileVisitor{ - Path: path, - StreamVisitor: NewStreamVisitor(nil, mapper, path, schema), - } - - visitors = append(visitors, visitor) - return nil - }) - - if err != nil { - return nil, err - } - return visitors, nil -} - -// FileVisitor is wrapping around a StreamVisitor, to handle open/close files -type FileVisitor struct { - Path string - *StreamVisitor -} - -// Visit in a FileVisitor is just taking care of opening/closing files -func (v *FileVisitor) Visit(fn VisitorFunc) error { - var f *os.File - if v.Path == constSTDINstr { - f = os.Stdin - } else { - var err error - if f, err = os.Open(v.Path); err != nil { - return err - } - } - defer f.Close() - v.StreamVisitor.Reader = f - - return v.StreamVisitor.Visit(fn) -} - -// StreamVisitor reads objects from an io.Reader and walks them. A stream visitor can only be -// visited once. -// TODO: depends on objects being in JSON format before being passed to decode - need to implement -// a stream decoder method on runtime.Codec to properly handle this. -type StreamVisitor struct { - io.Reader - *Mapper - - Source string - Schema validation.Schema -} - -// NewStreamVisitor is a helper function that is useful when we want to change the fields of the struct but keep calls the same. -func NewStreamVisitor(r io.Reader, mapper *Mapper, source string, schema validation.Schema) *StreamVisitor { - return &StreamVisitor{ - Reader: r, - Mapper: mapper, - Source: source, - Schema: schema, - } -} - -// Visit implements Visitor over a stream. StreamVisitor is able to distinct multiple resources in one stream. -func (v *StreamVisitor) Visit(fn VisitorFunc) error { - d := yaml.NewYAMLOrJSONDecoder(v.Reader, 4096) - for { - ext := runtime.RawExtension{} - if err := d.Decode(&ext); err != nil { - if err == io.EOF { - return nil - } - return err - } - // TODO: This needs to be able to handle object in other encodings and schemas. - ext.Raw = bytes.TrimSpace(ext.Raw) - if len(ext.Raw) == 0 || bytes.Equal(ext.Raw, []byte("null")) { - continue - } - if err := ValidateSchema(ext.Raw, v.Schema); err != nil { - return fmt.Errorf("error validating %q: %v", v.Source, err) - } - info, err := v.InfoForData(ext.Raw, v.Source) - if err != nil { - if fnErr := fn(info, err); fnErr != nil { - return fnErr - } - continue - } - if err := fn(info, nil); err != nil { - return err - } - } -} - -func UpdateObjectNamespace(info *Info, err error) error { - if err != nil { - return err - } - if info.Object != nil { - return info.Mapping.MetadataAccessor.SetNamespace(info.Object, info.Namespace) - } - return nil -} - -// FilterNamespace omits the namespace if the object is not namespace scoped -func FilterNamespace(info *Info, err error) error { - if err != nil { - return err - } - if !info.Namespaced() { - info.Namespace = "" - UpdateObjectNamespace(info, nil) - } - return nil -} - -// SetNamespace ensures that every Info object visited will have a namespace -// set. If info.Object is set, it will be mutated as well. -func SetNamespace(namespace string) VisitorFunc { - return func(info *Info, err error) error { - if err != nil { - return err - } - if !info.Namespaced() { - return nil - } - if len(info.Namespace) == 0 { - info.Namespace = namespace - UpdateObjectNamespace(info, nil) - } - return nil - } -} - -// RequireNamespace will either set a namespace if none is provided on the -// Info object, or if the namespace is set and does not match the provided -// value, returns an error. This is intended to guard against administrators -// accidentally operating on resources outside their namespace. -func RequireNamespace(namespace string) VisitorFunc { - return func(info *Info, err error) error { - if err != nil { - return err - } - if !info.Namespaced() { - return nil - } - if len(info.Namespace) == 0 { - info.Namespace = namespace - UpdateObjectNamespace(info, nil) - return nil - } - if info.Namespace != namespace { - return fmt.Errorf("the namespace from the provided object %q does not match the namespace %q. You must pass '--namespace=%s' to perform this operation.", info.Namespace, namespace, info.Namespace) - } - return nil - } -} - -// RetrieveLatest updates the Object on each Info by invoking a standard client -// Get. -func RetrieveLatest(info *Info, err error) error { - if err != nil { - return err - } - if meta.IsListType(info.Object) { - return fmt.Errorf("watch is only supported on individual resources and resource collections, but a list of resources is found") - } - if len(info.Name) == 0 { - return nil - } - if info.Namespaced() && len(info.Namespace) == 0 { - return fmt.Errorf("no namespace set on resource %s %q", info.Mapping.Resource, info.Name) - } - return info.Get() -} - -// RetrieveLazy updates the object if it has not been loaded yet. -func RetrieveLazy(info *Info, err error) error { - if err != nil { - return err - } - if info.Object == nil { - return info.Get() - } - return nil -} - -type FilterFunc func(info *Info, err error) (bool, error) - -type FilteredVisitor struct { - visitor Visitor - filters []FilterFunc -} - -func NewFilteredVisitor(v Visitor, fn ...FilterFunc) Visitor { - if len(fn) == 0 { - return v - } - return FilteredVisitor{v, fn} -} - -func (v FilteredVisitor) Visit(fn VisitorFunc) error { - return v.visitor.Visit(func(info *Info, err error) error { - if err != nil { - return err - } - for _, filter := range v.filters { - ok, err := filter(info, nil) - if err != nil { - return err - } - if !ok { - return nil - } - } - return fn(info, nil) - }) -} - -func FilterBySelector(s labels.Selector) FilterFunc { - return func(info *Info, err error) (bool, error) { - if err != nil { - return false, err - } - a, err := meta.Accessor(info.Object) - if err != nil { - return false, err - } - if !s.Matches(labels.Set(a.GetLabels())) { - return false, nil - } - return true, nil - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource_filter.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource_filter.go deleted file mode 100644 index cd6470ba23..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource_filter.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" -) - -// FilterFunc is a function that knows how to filter a specific resource kind. -// It receives a generic runtime.Object which must be type-checked by the function. -// Returns a boolean value true if a resource is filtered, or false otherwise. -type FilterFunc func(runtime.Object, PrintOptions) bool - -// Filters is a collection of filter funcs -type Filters []FilterFunc - -func NewResourceFilter() Filters { - return []FilterFunc{ - filterPods, - } -} - -// filterPods returns true if a pod should be skipped. -// defaults to true for terminated pods -func filterPods(obj runtime.Object, options PrintOptions) bool { - switch p := obj.(type) { - case *v1.Pod: - reason := string(p.Status.Phase) - if p.Status.Reason != "" { - reason = p.Status.Reason - } - return !options.ShowAll && (reason == string(v1.PodSucceeded) || reason == string(v1.PodFailed)) - case *api.Pod: - reason := string(p.Status.Phase) - if p.Status.Reason != "" { - reason = p.Status.Reason - } - return !options.ShowAll && (reason == string(api.PodSucceeded) || reason == string(api.PodFailed)) - } - return false -} - -// Filter loops through a collection of FilterFuncs until it finds one that can filter the given resource -func (f Filters) Filter(obj runtime.Object, opts *PrintOptions) (bool, error) { - // check if the object is unstructured. If so, let's attempt to convert it to a type we can understand - // before apply filter func. - switch obj.(type) { - case *runtime.UnstructuredList, *runtime.Unstructured, *runtime.Unknown: - if objBytes, err := runtime.Encode(api.Codecs.LegacyCodec(), obj); err == nil { - if decodedObj, err := runtime.Decode(api.Codecs.UniversalDecoder(), objBytes); err == nil { - obj = decodedObj - } - } - } - - for _, filter := range f { - if ok := filter(obj, *opts); ok { - return true, nil - } - } - return false, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go deleted file mode 100644 index 032982a1c9..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go +++ /dev/null @@ -1,2643 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "reflect" - "sort" - "strings" - "text/tabwriter" - "text/template" - "time" - - "k8s.io/kubernetes/federation/apis/federation" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/events" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/certificates" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/policy" - "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/apis/storage" - storageutil "k8s.io/kubernetes/pkg/apis/storage/util" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/jsonpath" - "k8s.io/kubernetes/pkg/util/node" - "k8s.io/kubernetes/pkg/util/sets" - - "github.com/ghodss/yaml" - "github.com/golang/glog" -) - -const ( - tabwriterMinWidth = 10 - tabwriterWidth = 4 - tabwriterPadding = 3 - tabwriterPadChar = ' ' - tabwriterFlags = 0 - loadBalancerWidth = 16 -) - -// GetPrinter takes a format type, an optional format argument. It will return true -// if the format is generic (untyped), otherwise it will return false. The printer -// is agnostic to schema versions, so you must send arguments to PrintObj in the -// version you wish them to be shown using a VersionedPrinter (typically when -// generic is true). -func GetPrinter(format, formatArgument string, noHeaders bool) (ResourcePrinter, bool, error) { - var printer ResourcePrinter - switch format { - case "json": - printer = &JSONPrinter{} - case "yaml": - printer = &YAMLPrinter{} - case "name": - printer = &NamePrinter{ - // TODO: this is wrong, these should be provided as an argument to GetPrinter - Typer: api.Scheme, - Decoder: api.Codecs.UniversalDecoder(), - } - case "template", "go-template": - if len(formatArgument) == 0 { - return nil, false, fmt.Errorf("template format specified but no template given") - } - var err error - printer, err = NewTemplatePrinter([]byte(formatArgument)) - if err != nil { - return nil, false, fmt.Errorf("error parsing template %s, %v\n", formatArgument, err) - } - case "templatefile", "go-template-file": - if len(formatArgument) == 0 { - return nil, false, fmt.Errorf("templatefile format specified but no template file given") - } - data, err := ioutil.ReadFile(formatArgument) - if err != nil { - return nil, false, fmt.Errorf("error reading template %s, %v\n", formatArgument, err) - } - printer, err = NewTemplatePrinter(data) - if err != nil { - return nil, false, fmt.Errorf("error parsing template %s, %v\n", string(data), err) - } - case "jsonpath": - if len(formatArgument) == 0 { - return nil, false, fmt.Errorf("jsonpath template format specified but no template given") - } - var err error - printer, err = NewJSONPathPrinter(formatArgument) - if err != nil { - return nil, false, fmt.Errorf("error parsing jsonpath %s, %v\n", formatArgument, err) - } - case "jsonpath-file": - if len(formatArgument) == 0 { - return nil, false, fmt.Errorf("jsonpath file format specified but no template file file given") - } - data, err := ioutil.ReadFile(formatArgument) - if err != nil { - return nil, false, fmt.Errorf("error reading template %s, %v\n", formatArgument, err) - } - printer, err = NewJSONPathPrinter(string(data)) - if err != nil { - return nil, false, fmt.Errorf("error parsing template %s, %v\n", string(data), err) - } - case "custom-columns": - var err error - if printer, err = NewCustomColumnsPrinterFromSpec(formatArgument, api.Codecs.UniversalDecoder(), noHeaders); err != nil { - return nil, false, err - } - case "custom-columns-file": - file, err := os.Open(formatArgument) - if err != nil { - return nil, false, fmt.Errorf("error reading template %s, %v\n", formatArgument, err) - } - defer file.Close() - if printer, err = NewCustomColumnsPrinterFromTemplate(file, api.Codecs.UniversalDecoder()); err != nil { - return nil, false, err - } - case "wide": - fallthrough - case "": - return nil, false, nil - default: - return nil, false, fmt.Errorf("output format %q not recognized", format) - } - return printer, true, nil -} - -// ResourcePrinter is an interface that knows how to print runtime objects. -type ResourcePrinter interface { - // Print receives a runtime object, formats it and prints it to a writer. - PrintObj(runtime.Object, io.Writer) error - HandledResources() []string - //Can be used to print out warning/clarifications if needed - //after all objects were printed - AfterPrint(io.Writer, string) error -} - -// ResourcePrinterFunc is a function that can print objects -type ResourcePrinterFunc func(runtime.Object, io.Writer) error - -// PrintObj implements ResourcePrinter -func (fn ResourcePrinterFunc) PrintObj(obj runtime.Object, w io.Writer) error { - return fn(obj, w) -} - -// TODO: implement HandledResources() -func (fn ResourcePrinterFunc) HandledResources() []string { - return []string{} -} - -func (fn ResourcePrinterFunc) AfterPrint(io.Writer, string) error { - return nil -} - -// VersionedPrinter takes runtime objects and ensures they are converted to a given API version -// prior to being passed to a nested printer. -type VersionedPrinter struct { - printer ResourcePrinter - converter runtime.ObjectConvertor - versions []unversioned.GroupVersion -} - -// NewVersionedPrinter wraps a printer to convert objects to a known API version prior to printing. -func NewVersionedPrinter(printer ResourcePrinter, converter runtime.ObjectConvertor, versions ...unversioned.GroupVersion) ResourcePrinter { - return &VersionedPrinter{ - printer: printer, - converter: converter, - versions: versions, - } -} - -func (p *VersionedPrinter) AfterPrint(w io.Writer, res string) error { - return nil -} - -// PrintObj implements ResourcePrinter -func (p *VersionedPrinter) PrintObj(obj runtime.Object, w io.Writer) error { - if len(p.versions) == 0 { - return fmt.Errorf("no version specified, object cannot be converted") - } - converted, err := p.converter.ConvertToVersion(obj, unversioned.GroupVersions(p.versions)) - if err != nil { - return err - } - return p.printer.PrintObj(converted, w) -} - -// TODO: implement HandledResources() -func (p *VersionedPrinter) HandledResources() []string { - return []string{} -} - -// NamePrinter is an implementation of ResourcePrinter which outputs "resource/name" pair of an object. -type NamePrinter struct { - Decoder runtime.Decoder - Typer runtime.ObjectTyper -} - -func (p *NamePrinter) AfterPrint(w io.Writer, res string) error { - return nil -} - -// PrintObj is an implementation of ResourcePrinter.PrintObj which decodes the object -// and print "resource/name" pair. If the object is a List, print all items in it. -func (p *NamePrinter) PrintObj(obj runtime.Object, w io.Writer) error { - if meta.IsListType(obj) { - items, err := meta.ExtractList(obj) - if err != nil { - return err - } - if errs := runtime.DecodeList(items, p.Decoder, runtime.UnstructuredJSONScheme); len(errs) > 0 { - return utilerrors.NewAggregate(errs) - } - for _, obj := range items { - if err := p.PrintObj(obj, w); err != nil { - return err - } - } - return nil - } - - name := "" - if acc, err := meta.Accessor(obj); err == nil { - if n := acc.GetName(); len(n) > 0 { - name = n - } - } - - if kind := obj.GetObjectKind().GroupVersionKind(); len(kind.Kind) == 0 { - // this is the old code. It's unnecessary on decoded external objects, but on internal objects - // you may have to do it. Tests are definitely calling it with internals and I'm not sure who else - // is - if gvks, _, err := p.Typer.ObjectKinds(obj); err == nil { - // TODO: this is wrong, it assumes that meta knows about all Kinds - should take a RESTMapper - _, resource := meta.KindToResource(gvks[0]) - fmt.Fprintf(w, "%s/%s\n", resource.Resource, name) - } else { - fmt.Fprintf(w, "/%s\n", name) - } - - } else { - // TODO: this is wrong, it assumes that meta knows about all Kinds - should take a RESTMapper - _, resource := meta.KindToResource(kind) - fmt.Fprintf(w, "%s/%s\n", resource.Resource, name) - } - - return nil -} - -// TODO: implement HandledResources() -func (p *NamePrinter) HandledResources() []string { - return []string{} -} - -// JSONPrinter is an implementation of ResourcePrinter which outputs an object as JSON. -type JSONPrinter struct { -} - -func (p *JSONPrinter) AfterPrint(w io.Writer, res string) error { - return nil -} - -// PrintObj is an implementation of ResourcePrinter.PrintObj which simply writes the object to the Writer. -func (p *JSONPrinter) PrintObj(obj runtime.Object, w io.Writer) error { - switch obj := obj.(type) { - case *runtime.Unknown: - var buf bytes.Buffer - err := json.Indent(&buf, obj.Raw, "", " ") - if err != nil { - return err - } - buf.WriteRune('\n') - _, err = buf.WriteTo(w) - return err - } - - data, err := json.MarshalIndent(obj, "", " ") - if err != nil { - return err - } - data = append(data, '\n') - _, err = w.Write(data) - return err -} - -// TODO: implement HandledResources() -func (p *JSONPrinter) HandledResources() []string { - return []string{} -} - -// YAMLPrinter is an implementation of ResourcePrinter which outputs an object as YAML. -// The input object is assumed to be in the internal version of an API and is converted -// to the given version first. -type YAMLPrinter struct { - version string - converter runtime.ObjectConvertor -} - -func (p *YAMLPrinter) AfterPrint(w io.Writer, res string) error { - return nil -} - -// PrintObj prints the data as YAML. -func (p *YAMLPrinter) PrintObj(obj runtime.Object, w io.Writer) error { - switch obj := obj.(type) { - case *runtime.Unknown: - data, err := yaml.JSONToYAML(obj.Raw) - if err != nil { - return err - } - _, err = w.Write(data) - return err - } - - output, err := yaml.Marshal(obj) - if err != nil { - return err - } - _, err = fmt.Fprint(w, string(output)) - return err -} - -// TODO: implement HandledResources() -func (p *YAMLPrinter) HandledResources() []string { - return []string{} -} - -type handlerEntry struct { - columns []string - printFunc reflect.Value - args []reflect.Value -} - -type PrintOptions struct { - NoHeaders bool - WithNamespace bool - WithKind bool - Wide bool - ShowAll bool - ShowLabels bool - AbsoluteTimestamps bool - Kind string - ColumnLabels []string -} - -// HumanReadablePrinter is an implementation of ResourcePrinter which attempts to provide -// more elegant output. It is not threadsafe, but you may call PrintObj repeatedly; headers -// will only be printed if the object type changes. This makes it useful for printing items -// received from watches. -type HumanReadablePrinter struct { - handlerMap map[reflect.Type]*handlerEntry - options PrintOptions - lastType reflect.Type - hiddenObjNum int -} - -// NewHumanReadablePrinter creates a HumanReadablePrinter. -func NewHumanReadablePrinter(options PrintOptions) *HumanReadablePrinter { - printer := &HumanReadablePrinter{ - handlerMap: make(map[reflect.Type]*handlerEntry), - options: options, - } - printer.addDefaultHandlers() - return printer -} - -// formatResourceName receives a resource kind, name, and boolean specifying -// whether or not to update the current name to "kind/name" -func formatResourceName(kind, name string, withKind bool) string { - if !withKind || kind == "" { - return name - } - - return kind + "/" + name -} - -// GetResourceKind returns the type currently set for a resource -func (h *HumanReadablePrinter) GetResourceKind() string { - return h.options.Kind -} - -// EnsurePrintWithKind sets HumanReadablePrinter options "WithKind" to true -// and "Kind" to the string arg it receives, pre-pending this string -// to the "NAME" column in an output of resources. -func (h *HumanReadablePrinter) EnsurePrintWithKind(kind string) { - h.options.WithKind = true - h.options.Kind = kind -} - -// Handler adds a print handler with a given set of columns to HumanReadablePrinter instance. -// See validatePrintHandlerFunc for required method signature. -func (h *HumanReadablePrinter) Handler(columns []string, printFunc interface{}) error { - printFuncValue := reflect.ValueOf(printFunc) - if err := h.validatePrintHandlerFunc(printFuncValue); err != nil { - glog.Errorf("Unable to add print handler: %v", err) - return err - } - - objType := printFuncValue.Type().In(0) - h.handlerMap[objType] = &handlerEntry{ - columns: columns, - printFunc: printFuncValue, - } - return nil -} - -// validatePrintHandlerFunc validates print handler signature. -// printFunc is the function that will be called to print an object. -// It must be of the following type: -// func printFunc(object ObjectType, w io.Writer, options PrintOptions) error -// where ObjectType is the type of the object that will be printed. -func (h *HumanReadablePrinter) validatePrintHandlerFunc(printFunc reflect.Value) error { - if printFunc.Kind() != reflect.Func { - return fmt.Errorf("invalid print handler. %#v is not a function", printFunc) - } - funcType := printFunc.Type() - if funcType.NumIn() != 3 || funcType.NumOut() != 1 { - return fmt.Errorf("invalid print handler." + - "Must accept 3 parameters and return 1 value.") - } - if funcType.In(1) != reflect.TypeOf((*io.Writer)(nil)).Elem() || - funcType.In(2) != reflect.TypeOf((*PrintOptions)(nil)).Elem() || - funcType.Out(0) != reflect.TypeOf((*error)(nil)).Elem() { - return fmt.Errorf("invalid print handler. The expected signature is: "+ - "func handler(obj %v, w io.Writer, options PrintOptions) error", funcType.In(0)) - } - return nil -} - -func (h *HumanReadablePrinter) HandledResources() []string { - keys := make([]string, 0) - - for k := range h.handlerMap { - // k.String looks like "*api.PodList" and we want just "pod" - api := strings.Split(k.String(), ".") - resource := api[len(api)-1] - if strings.HasSuffix(resource, "List") { - continue - } - resource = strings.ToLower(resource) - keys = append(keys, resource) - } - return keys -} - -func (h *HumanReadablePrinter) AfterPrint(output io.Writer, res string) error { - return nil -} - -// NOTE: When adding a new resource type here, please update the list -// pkg/kubectl/cmd/get.go to reflect the new resource type. -var ( - podColumns = []string{"NAME", "READY", "STATUS", "RESTARTS", "AGE"} - podTemplateColumns = []string{"TEMPLATE", "CONTAINER(S)", "IMAGE(S)", "PODLABELS"} - podDisruptionBudgetColumns = []string{"NAME", "MIN-AVAILABLE", "ALLOWED-DISRUPTIONS", "AGE"} - replicationControllerColumns = []string{"NAME", "DESIRED", "CURRENT", "READY", "AGE"} - replicaSetColumns = []string{"NAME", "DESIRED", "CURRENT", "READY", "AGE"} - jobColumns = []string{"NAME", "DESIRED", "SUCCESSFUL", "AGE"} - cronJobColumns = []string{"NAME", "SCHEDULE", "SUSPEND", "ACTIVE", "LAST-SCHEDULE"} - serviceColumns = []string{"NAME", "CLUSTER-IP", "EXTERNAL-IP", "PORT(S)", "AGE"} - ingressColumns = []string{"NAME", "HOSTS", "ADDRESS", "PORTS", "AGE"} - statefulSetColumns = []string{"NAME", "DESIRED", "CURRENT", "AGE"} - endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"} - nodeColumns = []string{"NAME", "STATUS", "AGE"} - daemonSetColumns = []string{"NAME", "DESIRED", "CURRENT", "READY", "NODE-SELECTOR", "AGE"} - eventColumns = []string{"LASTSEEN", "FIRSTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "TYPE", "REASON", "SOURCE", "MESSAGE"} - limitRangeColumns = []string{"NAME", "AGE"} - resourceQuotaColumns = []string{"NAME", "AGE"} - namespaceColumns = []string{"NAME", "STATUS", "AGE"} - secretColumns = []string{"NAME", "TYPE", "DATA", "AGE"} - serviceAccountColumns = []string{"NAME", "SECRETS", "AGE"} - persistentVolumeColumns = []string{"NAME", "CAPACITY", "ACCESSMODES", "RECLAIMPOLICY", "STATUS", "CLAIM", "REASON", "AGE"} - persistentVolumeClaimColumns = []string{"NAME", "STATUS", "VOLUME", "CAPACITY", "ACCESSMODES", "AGE"} - componentStatusColumns = []string{"NAME", "STATUS", "MESSAGE", "ERROR"} - thirdPartyResourceColumns = []string{"NAME", "DESCRIPTION", "VERSION(S)"} - roleColumns = []string{"NAME", "AGE"} - roleBindingColumns = []string{"NAME", "AGE"} - clusterRoleColumns = []string{"NAME", "AGE"} - clusterRoleBindingColumns = []string{"NAME", "AGE"} - storageClassColumns = []string{"NAME", "TYPE"} - statusColumns = []string{"STATUS", "REASON", "MESSAGE"} - - // TODO: consider having 'KIND' for third party resource data - thirdPartyResourceDataColumns = []string{"NAME", "LABELS", "DATA"} - horizontalPodAutoscalerColumns = []string{"NAME", "REFERENCE", "TARGET", "CURRENT", "MINPODS", "MAXPODS", "AGE"} - withNamespacePrefixColumns = []string{"NAMESPACE"} // TODO(erictune): print cluster name too. - deploymentColumns = []string{"NAME", "DESIRED", "CURRENT", "UP-TO-DATE", "AVAILABLE", "AGE"} - configMapColumns = []string{"NAME", "DATA", "AGE"} - podSecurityPolicyColumns = []string{"NAME", "PRIV", "CAPS", "VOLUMEPLUGINS", "SELINUX", "RUNASUSER"} - clusterColumns = []string{"NAME", "STATUS", "AGE"} - networkPolicyColumns = []string{"NAME", "POD-SELECTOR", "AGE"} - certificateSigningRequestColumns = []string{"NAME", "AGE", "REQUESTOR", "CONDITION"} -) - -func (h *HumanReadablePrinter) printPod(pod *api.Pod, w io.Writer, options PrintOptions) error { - if err := printPodBase(pod, w, options); err != nil { - return err - } - - return nil -} - -func (h *HumanReadablePrinter) printPodList(podList *api.PodList, w io.Writer, options PrintOptions) error { - for _, pod := range podList.Items { - if err := printPodBase(&pod, w, options); err != nil { - return err - } - } - return nil -} - -// addDefaultHandlers adds print handlers for default Kubernetes types. -func (h *HumanReadablePrinter) addDefaultHandlers() { - h.Handler(podColumns, h.printPodList) - h.Handler(podColumns, h.printPod) - h.Handler(podTemplateColumns, printPodTemplate) - h.Handler(podTemplateColumns, printPodTemplateList) - h.Handler(podDisruptionBudgetColumns, printPodDisruptionBudget) - h.Handler(podDisruptionBudgetColumns, printPodDisruptionBudgetList) - h.Handler(replicationControllerColumns, printReplicationController) - h.Handler(replicationControllerColumns, printReplicationControllerList) - h.Handler(replicaSetColumns, printReplicaSet) - h.Handler(replicaSetColumns, printReplicaSetList) - h.Handler(daemonSetColumns, printDaemonSet) - h.Handler(daemonSetColumns, printDaemonSetList) - h.Handler(jobColumns, printJob) - h.Handler(jobColumns, printJobList) - h.Handler(cronJobColumns, printCronJob) - h.Handler(cronJobColumns, printCronJobList) - h.Handler(serviceColumns, printService) - h.Handler(serviceColumns, printServiceList) - h.Handler(ingressColumns, printIngress) - h.Handler(ingressColumns, printIngressList) - h.Handler(statefulSetColumns, printStatefulSet) - h.Handler(statefulSetColumns, printStatefulSetList) - h.Handler(endpointColumns, printEndpoints) - h.Handler(endpointColumns, printEndpointsList) - h.Handler(nodeColumns, printNode) - h.Handler(nodeColumns, printNodeList) - h.Handler(eventColumns, printEvent) - h.Handler(eventColumns, printEventList) - h.Handler(limitRangeColumns, printLimitRange) - h.Handler(limitRangeColumns, printLimitRangeList) - h.Handler(resourceQuotaColumns, printResourceQuota) - h.Handler(resourceQuotaColumns, printResourceQuotaList) - h.Handler(namespaceColumns, printNamespace) - h.Handler(namespaceColumns, printNamespaceList) - h.Handler(secretColumns, printSecret) - h.Handler(secretColumns, printSecretList) - h.Handler(serviceAccountColumns, printServiceAccount) - h.Handler(serviceAccountColumns, printServiceAccountList) - h.Handler(persistentVolumeClaimColumns, printPersistentVolumeClaim) - h.Handler(persistentVolumeClaimColumns, printPersistentVolumeClaimList) - h.Handler(persistentVolumeColumns, printPersistentVolume) - h.Handler(persistentVolumeColumns, printPersistentVolumeList) - h.Handler(componentStatusColumns, printComponentStatus) - h.Handler(componentStatusColumns, printComponentStatusList) - h.Handler(thirdPartyResourceColumns, printThirdPartyResource) - h.Handler(thirdPartyResourceColumns, printThirdPartyResourceList) - h.Handler(deploymentColumns, printDeployment) - h.Handler(deploymentColumns, printDeploymentList) - h.Handler(horizontalPodAutoscalerColumns, printHorizontalPodAutoscaler) - h.Handler(horizontalPodAutoscalerColumns, printHorizontalPodAutoscalerList) - h.Handler(configMapColumns, printConfigMap) - h.Handler(configMapColumns, printConfigMapList) - h.Handler(podSecurityPolicyColumns, printPodSecurityPolicy) - h.Handler(podSecurityPolicyColumns, printPodSecurityPolicyList) - h.Handler(thirdPartyResourceDataColumns, printThirdPartyResourceData) - h.Handler(thirdPartyResourceDataColumns, printThirdPartyResourceDataList) - h.Handler(clusterColumns, printCluster) - h.Handler(clusterColumns, printClusterList) - h.Handler(networkPolicyColumns, printNetworkPolicy) - h.Handler(networkPolicyColumns, printNetworkPolicyList) - h.Handler(roleColumns, printRole) - h.Handler(roleColumns, printRoleList) - h.Handler(roleBindingColumns, printRoleBinding) - h.Handler(roleBindingColumns, printRoleBindingList) - h.Handler(clusterRoleColumns, printClusterRole) - h.Handler(clusterRoleColumns, printClusterRoleList) - h.Handler(clusterRoleBindingColumns, printClusterRoleBinding) - h.Handler(clusterRoleBindingColumns, printClusterRoleBindingList) - h.Handler(certificateSigningRequestColumns, printCertificateSigningRequest) - h.Handler(certificateSigningRequestColumns, printCertificateSigningRequestList) - h.Handler(storageClassColumns, printStorageClass) - h.Handler(storageClassColumns, printStorageClassList) - h.Handler(statusColumns, printStatus) -} - -func (h *HumanReadablePrinter) unknown(data []byte, w io.Writer) error { - _, err := fmt.Fprintf(w, "Unknown object: %s", string(data)) - return err -} - -func (h *HumanReadablePrinter) printHeader(columnNames []string, w io.Writer) error { - if _, err := fmt.Fprintf(w, "%s\n", strings.Join(columnNames, "\t")); err != nil { - return err - } - return nil -} - -// Pass ports=nil for all ports. -func formatEndpoints(endpoints *api.Endpoints, ports sets.String) string { - if len(endpoints.Subsets) == 0 { - return "" - } - list := []string{} - max := 3 - more := false - count := 0 - for i := range endpoints.Subsets { - ss := &endpoints.Subsets[i] - for i := range ss.Ports { - port := &ss.Ports[i] - if ports == nil || ports.Has(port.Name) { - for i := range ss.Addresses { - if len(list) == max { - more = true - } - addr := &ss.Addresses[i] - if !more { - list = append(list, fmt.Sprintf("%s:%d", addr.IP, port.Port)) - } - count++ - } - } - } - } - ret := strings.Join(list, ",") - if more { - return fmt.Sprintf("%s + %d more...", ret, count-max) - } - return ret -} - -func shortHumanDuration(d time.Duration) string { - // Allow deviation no more than 2 seconds(excluded) to tolerate machine time - // inconsistence, it can be considered as almost now. - if seconds := int(d.Seconds()); seconds < -1 { - return fmt.Sprintf("") - } else if seconds < 0 { - return fmt.Sprintf("0s") - } else if seconds < 60 { - return fmt.Sprintf("%ds", seconds) - } else if minutes := int(d.Minutes()); minutes < 60 { - return fmt.Sprintf("%dm", minutes) - } else if hours := int(d.Hours()); hours < 24 { - return fmt.Sprintf("%dh", hours) - } else if hours < 24*364 { - return fmt.Sprintf("%dd", hours/24) - } - return fmt.Sprintf("%dy", int(d.Hours()/24/365)) -} - -// translateTimestamp returns the elapsed time since timestamp in -// human-readable approximation. -func translateTimestamp(timestamp unversioned.Time) string { - if timestamp.IsZero() { - return "" - } - return shortHumanDuration(time.Now().Sub(timestamp.Time)) -} - -func printPodBase(pod *api.Pod, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, pod.Name, options.WithKind) - namespace := pod.Namespace - - restarts := 0 - totalContainers := len(pod.Spec.Containers) - readyContainers := 0 - - reason := string(pod.Status.Phase) - if pod.Status.Reason != "" { - reason = pod.Status.Reason - } - - initializing := false - for i := range pod.Status.InitContainerStatuses { - container := pod.Status.InitContainerStatuses[i] - restarts += int(container.RestartCount) - switch { - case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0: - continue - case container.State.Terminated != nil: - // initialization is failed - if len(container.State.Terminated.Reason) == 0 { - if container.State.Terminated.Signal != 0 { - reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal) - } else { - reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode) - } - } else { - reason = "Init:" + container.State.Terminated.Reason - } - initializing = true - case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing": - reason = "Init:" + container.State.Waiting.Reason - initializing = true - default: - reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers)) - initializing = true - } - break - } - if !initializing { - restarts = 0 - for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- { - container := pod.Status.ContainerStatuses[i] - - restarts += int(container.RestartCount) - if container.State.Waiting != nil && container.State.Waiting.Reason != "" { - reason = container.State.Waiting.Reason - } else if container.State.Terminated != nil && container.State.Terminated.Reason != "" { - reason = container.State.Terminated.Reason - } else if container.State.Terminated != nil && container.State.Terminated.Reason == "" { - if container.State.Terminated.Signal != 0 { - reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal) - } else { - reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode) - } - } else if container.Ready && container.State.Running != nil { - readyContainers++ - } - } - } - - if pod.DeletionTimestamp != nil && pod.Status.Reason == node.NodeUnreachablePodReason { - reason = "Unknown" - } else if pod.DeletionTimestamp != nil { - reason = "Terminating" - } - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%d/%d\t%s\t%d\t%s", - name, - readyContainers, - totalContainers, - reason, - restarts, - translateTimestamp(pod.CreationTimestamp), - ); err != nil { - return err - } - - if options.Wide { - nodeName := pod.Spec.NodeName - podIP := pod.Status.PodIP - if podIP == "" { - podIP = "" - } - if _, err := fmt.Fprintf(w, "\t%s\t%s", - podIP, - nodeName, - ); err != nil { - return err - } - } - - if _, err := fmt.Fprint(w, AppendLabels(pod.Labels, options.ColumnLabels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, pod.Labels)); err != nil { - return err - } - - return nil -} - -func printPodTemplate(pod *api.PodTemplate, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, pod.Name, options.WithKind) - - namespace := pod.Namespace - - containers := pod.Template.Spec.Containers - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s", name); err != nil { - return err - } - if err := layoutContainers(containers, w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "\t%s", labels.FormatLabels(pod.Template.Labels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(pod.Labels, options.ColumnLabels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, pod.Labels)); err != nil { - return err - } - - return nil -} - -func printPodTemplateList(podList *api.PodTemplateList, w io.Writer, options PrintOptions) error { - for _, pod := range podList.Items { - if err := printPodTemplate(&pod, w, options); err != nil { - return err - } - } - return nil -} - -func printPodDisruptionBudget(pdb *policy.PodDisruptionBudget, w io.Writer, options PrintOptions) error { - // name, minavailable, selector - name := formatResourceName(options.Kind, pdb.Name, options.WithKind) - namespace := pdb.Namespace - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%s\t%d\t%s\n", - name, - pdb.Spec.MinAvailable.String(), - pdb.Status.PodDisruptionsAllowed, - translateTimestamp(pdb.CreationTimestamp), - ); err != nil { - return err - } - - return nil -} - -func printPodDisruptionBudgetList(pdbList *policy.PodDisruptionBudgetList, w io.Writer, options PrintOptions) error { - for _, pdb := range pdbList.Items { - if err := printPodDisruptionBudget(&pdb, w, options); err != nil { - return err - } - } - return nil -} - -// TODO(AdoHe): try to put wide output in a single method -func printReplicationController(controller *api.ReplicationController, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, controller.Name, options.WithKind) - - namespace := controller.Namespace - containers := controller.Spec.Template.Spec.Containers - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - - desiredReplicas := controller.Spec.Replicas - currentReplicas := controller.Status.Replicas - readyReplicas := controller.Status.ReadyReplicas - if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%d\t%s", - name, - desiredReplicas, - currentReplicas, - readyReplicas, - translateTimestamp(controller.CreationTimestamp), - ); err != nil { - return err - } - - if options.Wide { - if err := layoutContainers(containers, w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "\t%s", labels.FormatLabels(controller.Spec.Selector)); err != nil { - return err - } - } - if _, err := fmt.Fprint(w, AppendLabels(controller.Labels, options.ColumnLabels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, controller.Labels)); err != nil { - return err - } - - return nil -} - -func printReplicationControllerList(list *api.ReplicationControllerList, w io.Writer, options PrintOptions) error { - for _, controller := range list.Items { - if err := printReplicationController(&controller, w, options); err != nil { - return err - } - } - return nil -} - -func printReplicaSet(rs *extensions.ReplicaSet, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, rs.Name, options.WithKind) - - namespace := rs.Namespace - containers := rs.Spec.Template.Spec.Containers - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - - desiredReplicas := rs.Spec.Replicas - currentReplicas := rs.Status.Replicas - readyReplicas := rs.Status.ReadyReplicas - if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%d\t%s", - name, - desiredReplicas, - currentReplicas, - readyReplicas, - translateTimestamp(rs.CreationTimestamp), - ); err != nil { - return err - } - if options.Wide { - if err := layoutContainers(containers, w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "\t%s", unversioned.FormatLabelSelector(rs.Spec.Selector)); err != nil { - return err - } - } - if _, err := fmt.Fprint(w, AppendLabels(rs.Labels, options.ColumnLabels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, rs.Labels)); err != nil { - return err - } - - return nil -} - -func printReplicaSetList(list *extensions.ReplicaSetList, w io.Writer, options PrintOptions) error { - for _, rs := range list.Items { - if err := printReplicaSet(&rs, w, options); err != nil { - return err - } - } - return nil -} - -func printCluster(c *federation.Cluster, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, c.Name, options.WithKind) - - var statuses []string - for _, condition := range c.Status.Conditions { - if condition.Status == api.ConditionTrue { - statuses = append(statuses, string(condition.Type)) - } else { - statuses = append(statuses, "Not"+string(condition.Type)) - } - } - if len(statuses) == 0 { - statuses = append(statuses, "Unknown") - } - - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", - name, - strings.Join(statuses, ","), - translateTimestamp(c.CreationTimestamp), - ); err != nil { - return err - } - return nil -} -func printClusterList(list *federation.ClusterList, w io.Writer, options PrintOptions) error { - for _, rs := range list.Items { - if err := printCluster(&rs, w, options); err != nil { - return err - } - } - return nil -} - -func printJob(job *batch.Job, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, job.Name, options.WithKind) - - namespace := job.Namespace - containers := job.Spec.Template.Spec.Containers - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - - selector, err := unversioned.LabelSelectorAsSelector(job.Spec.Selector) - if err != nil { - // this shouldn't happen if LabelSelector passed validation - return err - } - if job.Spec.Completions != nil { - if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%s", - name, - *job.Spec.Completions, - job.Status.Succeeded, - translateTimestamp(job.CreationTimestamp), - ); err != nil { - return err - } - } else { - if _, err := fmt.Fprintf(w, "%s\t%s\t%d\t%s", - name, - "", - job.Status.Succeeded, - translateTimestamp(job.CreationTimestamp), - ); err != nil { - return err - } - } - if options.Wide { - if err := layoutContainers(containers, w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "\t%s", selector.String()); err != nil { - return err - } - } - if _, err := fmt.Fprint(w, AppendLabels(job.Labels, options.ColumnLabels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, job.Labels)); err != nil { - return err - } - - return nil -} - -func printJobList(list *batch.JobList, w io.Writer, options PrintOptions) error { - for _, job := range list.Items { - if err := printJob(&job, w, options); err != nil { - return err - } - } - return nil -} - -func printCronJob(cronJob *batch.CronJob, w io.Writer, options PrintOptions) error { - name := cronJob.Name - namespace := cronJob.Namespace - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - - lastScheduleTime := "" - if cronJob.Status.LastScheduleTime != nil { - lastScheduleTime = cronJob.Status.LastScheduleTime.Time.Format(time.RFC1123Z) - } - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%d\t%s\n", - name, - cronJob.Spec.Schedule, - printBoolPtr(cronJob.Spec.Suspend), - len(cronJob.Status.Active), - lastScheduleTime, - ); err != nil { - return err - } - - return nil -} - -func printCronJobList(list *batch.CronJobList, w io.Writer, options PrintOptions) error { - for _, cronJob := range list.Items { - if err := printCronJob(&cronJob, w, options); err != nil { - return err - } - } - return nil -} - -// loadBalancerStatusStringer behaves mostly like a string interface and converts the given status to a string. -// `wide` indicates whether the returned value is meant for --o=wide output. If not, it's clipped to 16 bytes. -func loadBalancerStatusStringer(s api.LoadBalancerStatus, wide bool) string { - ingress := s.Ingress - result := []string{} - for i := range ingress { - if ingress[i].IP != "" { - result = append(result, ingress[i].IP) - } else if ingress[i].Hostname != "" { - result = append(result, ingress[i].Hostname) - } - } - r := strings.Join(result, ",") - if !wide && len(r) > loadBalancerWidth { - r = r[0:(loadBalancerWidth-3)] + "..." - } - return r -} - -func getServiceExternalIP(svc *api.Service, wide bool) string { - switch svc.Spec.Type { - case api.ServiceTypeClusterIP: - if len(svc.Spec.ExternalIPs) > 0 { - return strings.Join(svc.Spec.ExternalIPs, ",") - } - return "" - case api.ServiceTypeNodePort: - if len(svc.Spec.ExternalIPs) > 0 { - return strings.Join(svc.Spec.ExternalIPs, ",") - } - return "" - case api.ServiceTypeLoadBalancer: - lbIps := loadBalancerStatusStringer(svc.Status.LoadBalancer, wide) - if len(svc.Spec.ExternalIPs) > 0 { - result := append(strings.Split(lbIps, ","), svc.Spec.ExternalIPs...) - return strings.Join(result, ",") - } - if len(lbIps) > 0 { - return lbIps - } - return "" - case api.ServiceTypeExternalName: - return svc.Spec.ExternalName - } - return "" -} - -func makePortString(ports []api.ServicePort) string { - pieces := make([]string, len(ports)) - for ix := range ports { - port := &ports[ix] - pieces[ix] = fmt.Sprintf("%d/%s", port.Port, port.Protocol) - if port.NodePort > 0 { - pieces[ix] = fmt.Sprintf("%d:%d/%s", port.Port, port.NodePort, port.Protocol) - } - } - return strings.Join(pieces, ",") -} - -func printService(svc *api.Service, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, svc.Name, options.WithKind) - - namespace := svc.Namespace - - internalIP := svc.Spec.ClusterIP - externalIP := getServiceExternalIP(svc, options.Wide) - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s", - name, - internalIP, - externalIP, - makePortString(svc.Spec.Ports), - translateTimestamp(svc.CreationTimestamp), - ); err != nil { - return err - } - if options.Wide { - if _, err := fmt.Fprintf(w, "\t%s", labels.FormatLabels(svc.Spec.Selector)); err != nil { - return err - } - } - if _, err := fmt.Fprint(w, AppendLabels(svc.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, svc.Labels)) - return err -} - -func printServiceList(list *api.ServiceList, w io.Writer, options PrintOptions) error { - for _, svc := range list.Items { - if err := printService(&svc, w, options); err != nil { - return err - } - } - return nil -} - -// backendStringer behaves just like a string interface and converts the given backend to a string. -func backendStringer(backend *extensions.IngressBackend) string { - if backend == nil { - return "" - } - return fmt.Sprintf("%v:%v", backend.ServiceName, backend.ServicePort.String()) -} - -func formatHosts(rules []extensions.IngressRule) string { - list := []string{} - max := 3 - more := false - for _, rule := range rules { - if len(list) == max { - more = true - } - if !more && len(rule.Host) != 0 { - list = append(list, rule.Host) - } - } - if len(list) == 0 { - return "*" - } - ret := strings.Join(list, ",") - if more { - return fmt.Sprintf("%s + %d more...", ret, len(rules)-max) - } - return ret -} - -func formatPorts(tls []extensions.IngressTLS) string { - if len(tls) != 0 { - return "80, 443" - } - return "80" -} - -func printIngress(ingress *extensions.Ingress, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, ingress.Name, options.WithKind) - - namespace := ingress.Namespace - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - - if _, err := fmt.Fprintf(w, "%s\t%v\t%v\t%v\t%s", - name, - formatHosts(ingress.Spec.Rules), - loadBalancerStatusStringer(ingress.Status.LoadBalancer, options.Wide), - formatPorts(ingress.Spec.TLS), - translateTimestamp(ingress.CreationTimestamp), - ); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(ingress.Labels, options.ColumnLabels)); err != nil { - return err - } - - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, ingress.Labels)); err != nil { - return err - } - return nil -} - -func printIngressList(ingressList *extensions.IngressList, w io.Writer, options PrintOptions) error { - for _, ingress := range ingressList.Items { - if err := printIngress(&ingress, w, options); err != nil { - return err - } - } - return nil -} - -func printStatefulSet(ps *apps.StatefulSet, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, ps.Name, options.WithKind) - - namespace := ps.Namespace - containers := ps.Spec.Template.Spec.Containers - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - desiredReplicas := ps.Spec.Replicas - currentReplicas := ps.Status.Replicas - if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%s", - name, - desiredReplicas, - currentReplicas, - translateTimestamp(ps.CreationTimestamp), - ); err != nil { - return err - } - if options.Wide { - if err := layoutContainers(containers, w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "\t%s", unversioned.FormatLabelSelector(ps.Spec.Selector)); err != nil { - return err - } - } - if _, err := fmt.Fprint(w, AppendLabels(ps.Labels, options.ColumnLabels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, ps.Labels)); err != nil { - return err - } - - return nil -} - -func printStatefulSetList(statefulSetList *apps.StatefulSetList, w io.Writer, options PrintOptions) error { - for _, ps := range statefulSetList.Items { - if err := printStatefulSet(&ps, w, options); err != nil { - return err - } - } - return nil -} - -func printDaemonSet(ds *extensions.DaemonSet, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, ds.Name, options.WithKind) - - namespace := ds.Namespace - - containers := ds.Spec.Template.Spec.Containers - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - - desiredScheduled := ds.Status.DesiredNumberScheduled - currentScheduled := ds.Status.CurrentNumberScheduled - numberReady := ds.Status.NumberReady - selector, err := unversioned.LabelSelectorAsSelector(ds.Spec.Selector) - if err != nil { - // this shouldn't happen if LabelSelector passed validation - return err - } - if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%d\t%s\t%s", - name, - desiredScheduled, - currentScheduled, - numberReady, - labels.FormatLabels(ds.Spec.Template.Spec.NodeSelector), - translateTimestamp(ds.CreationTimestamp), - ); err != nil { - return err - } - if options.Wide { - if err := layoutContainers(containers, w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "\t%s", selector.String()); err != nil { - return err - } - } - if _, err := fmt.Fprint(w, AppendLabels(ds.Labels, options.ColumnLabels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, ds.Labels)); err != nil { - return err - } - - return nil -} - -func printDaemonSetList(list *extensions.DaemonSetList, w io.Writer, options PrintOptions) error { - for _, ds := range list.Items { - if err := printDaemonSet(&ds, w, options); err != nil { - return err - } - } - return nil -} - -func printEndpoints(endpoints *api.Endpoints, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, endpoints.Name, options.WithKind) - - namespace := endpoints.Namespace - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%s\t%s", name, formatEndpoints(endpoints, nil), translateTimestamp(endpoints.CreationTimestamp)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(endpoints.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, endpoints.Labels)) - return err -} - -func printEndpointsList(list *api.EndpointsList, w io.Writer, options PrintOptions) error { - for _, item := range list.Items { - if err := printEndpoints(&item, w, options); err != nil { - return err - } - } - return nil -} - -func printNamespace(item *api.Namespace, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, item.Name, options.WithKind) - - if options.WithNamespace { - return fmt.Errorf("namespace is not namespaced") - } - - if _, err := fmt.Fprintf(w, "%s\t%s\t%s", name, item.Status.Phase, translateTimestamp(item.CreationTimestamp)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(item.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, item.Labels)) - return err -} - -func printNamespaceList(list *api.NamespaceList, w io.Writer, options PrintOptions) error { - for _, item := range list.Items { - if err := printNamespace(&item, w, options); err != nil { - return err - } - } - return nil -} - -func printSecret(item *api.Secret, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, item.Name, options.WithKind) - - namespace := item.Namespace - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%s\t%v\t%s", name, item.Type, len(item.Data), translateTimestamp(item.CreationTimestamp)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(item.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, item.Labels)) - return err -} - -func printSecretList(list *api.SecretList, w io.Writer, options PrintOptions) error { - for _, item := range list.Items { - if err := printSecret(&item, w, options); err != nil { - return err - } - } - - return nil -} - -func printServiceAccount(item *api.ServiceAccount, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, item.Name, options.WithKind) - - namespace := item.Namespace - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%d\t%s", name, len(item.Secrets), translateTimestamp(item.CreationTimestamp)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(item.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, item.Labels)) - return err -} - -func printServiceAccountList(list *api.ServiceAccountList, w io.Writer, options PrintOptions) error { - for _, item := range list.Items { - if err := printServiceAccount(&item, w, options); err != nil { - return err - } - } - - return nil -} - -func printNode(node *api.Node, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, node.Name, options.WithKind) - - if options.WithNamespace { - return fmt.Errorf("node is not namespaced") - } - conditionMap := make(map[api.NodeConditionType]*api.NodeCondition) - NodeAllConditions := []api.NodeConditionType{api.NodeReady} - for i := range node.Status.Conditions { - cond := node.Status.Conditions[i] - conditionMap[cond.Type] = &cond - } - var status []string - for _, validCondition := range NodeAllConditions { - if condition, ok := conditionMap[validCondition]; ok { - if condition.Status == api.ConditionTrue { - status = append(status, string(condition.Type)) - } else { - status = append(status, "Not"+string(condition.Type)) - } - } - } - if len(status) == 0 { - status = append(status, "Unknown") - } - if node.Spec.Unschedulable { - status = append(status, "SchedulingDisabled") - } - role := findNodeRole(node) - if role != "" { - status = append(status, role) - } - - if _, err := fmt.Fprintf(w, "%s\t%s\t%s", name, strings.Join(status, ","), translateTimestamp(node.CreationTimestamp)); err != nil { - return err - } - - if options.Wide { - if _, err := fmt.Fprintf(w, "\t%s", getNodeExternalIP(node)); err != nil { - return err - } - } - // Display caller specify column labels first. - if _, err := fmt.Fprint(w, AppendLabels(node.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, node.Labels)) - return err -} - -// Returns first external ip of the node or "" if none is found. -func getNodeExternalIP(node *api.Node) string { - for _, address := range node.Status.Addresses { - if address.Type == api.NodeExternalIP { - return address.Address - } - } - - return "" -} - -// findNodeRole returns the role of a given node, or "" if none found. -// The role is determined by looking in order for: -// * a kubernetes.io/role label -// * a kubeadm.alpha.kubernetes.io/role label -// If no role is found, ("", nil) is returned -func findNodeRole(node *api.Node) string { - if role := node.Labels[unversioned.NodeLabelRole]; role != "" { - return role - } - if role := node.Labels[unversioned.NodeLabelKubeadmAlphaRole]; role != "" { - return role - } - // No role found - return "" -} - -func printNodeList(list *api.NodeList, w io.Writer, options PrintOptions) error { - for _, node := range list.Items { - if err := printNode(&node, w, options); err != nil { - return err - } - } - return nil -} - -func printPersistentVolume(pv *api.PersistentVolume, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, pv.Name, options.WithKind) - - if options.WithNamespace { - return fmt.Errorf("persistentVolume is not namespaced") - } - - claimRefUID := "" - if pv.Spec.ClaimRef != nil { - claimRefUID += pv.Spec.ClaimRef.Namespace - claimRefUID += "/" - claimRefUID += pv.Spec.ClaimRef.Name - } - - modesStr := api.GetAccessModesAsString(pv.Spec.AccessModes) - reclaimPolicyStr := string(pv.Spec.PersistentVolumeReclaimPolicy) - - aQty := pv.Spec.Capacity[api.ResourceStorage] - aSize := aQty.String() - - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s", - name, - aSize, modesStr, reclaimPolicyStr, - pv.Status.Phase, - claimRefUID, - pv.Status.Reason, - translateTimestamp(pv.CreationTimestamp), - ); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(pv.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, pv.Labels)) - return err -} - -func printPersistentVolumeList(list *api.PersistentVolumeList, w io.Writer, options PrintOptions) error { - for _, pv := range list.Items { - if err := printPersistentVolume(&pv, w, options); err != nil { - return err - } - } - return nil -} - -func printPersistentVolumeClaim(pvc *api.PersistentVolumeClaim, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, pvc.Name, options.WithKind) - - namespace := pvc.Namespace - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - - phase := pvc.Status.Phase - storage := pvc.Spec.Resources.Requests[api.ResourceStorage] - capacity := "" - accessModes := "" - if pvc.Spec.VolumeName != "" { - accessModes = api.GetAccessModesAsString(pvc.Status.AccessModes) - storage = pvc.Status.Capacity[api.ResourceStorage] - capacity = storage.String() - } - - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s", name, phase, pvc.Spec.VolumeName, capacity, accessModes, translateTimestamp(pvc.CreationTimestamp)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(pvc.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, pvc.Labels)) - return err -} - -func printPersistentVolumeClaimList(list *api.PersistentVolumeClaimList, w io.Writer, options PrintOptions) error { - for _, psd := range list.Items { - if err := printPersistentVolumeClaim(&psd, w, options); err != nil { - return err - } - } - return nil -} - -func printEvent(event *api.Event, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, event.InvolvedObject.Name, options.WithKind) - - namespace := event.Namespace - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - - // While watching event, we should print absolute time. - var FirstTimestamp, LastTimestamp string - if options.AbsoluteTimestamps { - FirstTimestamp = event.FirstTimestamp.String() - LastTimestamp = event.LastTimestamp.String() - } else { - FirstTimestamp = translateTimestamp(event.FirstTimestamp) - LastTimestamp = translateTimestamp(event.LastTimestamp) - } - - if _, err := fmt.Fprintf( - w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s", - LastTimestamp, - FirstTimestamp, - event.Count, - name, - event.InvolvedObject.Kind, - event.InvolvedObject.FieldPath, - event.Type, - event.Reason, - event.Source, - event.Message, - ); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(event.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, event.Labels)) - return err -} - -// Sorts and prints the EventList in a human-friendly format. -func printEventList(list *api.EventList, w io.Writer, options PrintOptions) error { - sort.Sort(events.SortableEvents(list.Items)) - for i := range list.Items { - if err := printEvent(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printLimitRange(limitRange *api.LimitRange, w io.Writer, options PrintOptions) error { - return printObjectMeta(limitRange.ObjectMeta, w, options, true) -} - -// Prints the LimitRangeList in a human-friendly format. -func printLimitRangeList(list *api.LimitRangeList, w io.Writer, options PrintOptions) error { - for i := range list.Items { - if err := printLimitRange(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -// printObjectMeta prints the object metadata of a given resource. -func printObjectMeta(meta api.ObjectMeta, w io.Writer, options PrintOptions, namespaced bool) error { - name := formatResourceName(options.Kind, meta.Name, options.WithKind) - - if namespaced && options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", meta.Namespace); err != nil { - return err - } - } - - if _, err := fmt.Fprintf( - w, "%s\t%s", - name, - translateTimestamp(meta.CreationTimestamp), - ); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(meta.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, meta.Labels)) - return err -} - -func printResourceQuota(resourceQuota *api.ResourceQuota, w io.Writer, options PrintOptions) error { - return printObjectMeta(resourceQuota.ObjectMeta, w, options, true) -} - -// Prints the ResourceQuotaList in a human-friendly format. -func printResourceQuotaList(list *api.ResourceQuotaList, w io.Writer, options PrintOptions) error { - for i := range list.Items { - if err := printResourceQuota(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printRole(role *rbac.Role, w io.Writer, options PrintOptions) error { - return printObjectMeta(role.ObjectMeta, w, options, true) -} - -// Prints the Role in a human-friendly format. -func printRoleList(list *rbac.RoleList, w io.Writer, options PrintOptions) error { - for i := range list.Items { - if err := printRole(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printRoleBinding(roleBinding *rbac.RoleBinding, w io.Writer, options PrintOptions) error { - return printObjectMeta(roleBinding.ObjectMeta, w, options, true) -} - -// Prints the RoleBinding in a human-friendly format. -func printRoleBindingList(list *rbac.RoleBindingList, w io.Writer, options PrintOptions) error { - for i := range list.Items { - if err := printRoleBinding(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printClusterRole(clusterRole *rbac.ClusterRole, w io.Writer, options PrintOptions) error { - return printObjectMeta(clusterRole.ObjectMeta, w, options, false) -} - -// Prints the ClusterRole in a human-friendly format. -func printClusterRoleList(list *rbac.ClusterRoleList, w io.Writer, options PrintOptions) error { - for i := range list.Items { - if err := printClusterRole(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printClusterRoleBinding(clusterRoleBinding *rbac.ClusterRoleBinding, w io.Writer, options PrintOptions) error { - return printObjectMeta(clusterRoleBinding.ObjectMeta, w, options, false) -} - -// Prints the ClusterRoleBinding in a human-friendly format. -func printClusterRoleBindingList(list *rbac.ClusterRoleBindingList, w io.Writer, options PrintOptions) error { - for i := range list.Items { - if err := printClusterRoleBinding(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printCertificateSigningRequest(csr *certificates.CertificateSigningRequest, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, csr.Name, options.WithKind) - meta := csr.ObjectMeta - - status, err := extractCSRStatus(csr) - if err != nil { - return err - } - - if _, err := fmt.Fprintf( - w, "%s\t%s\t%s\t%s", - name, - translateTimestamp(meta.CreationTimestamp), - csr.Spec.Username, - status, - ); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(meta.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err = fmt.Fprint(w, AppendAllLabels(options.ShowLabels, meta.Labels)) - return err -} - -func extractCSRStatus(csr *certificates.CertificateSigningRequest) (string, error) { - var approved, denied bool - for _, c := range csr.Status.Conditions { - switch c.Type { - case certificates.CertificateApproved: - approved = true - case certificates.CertificateDenied: - denied = true - default: - return "", fmt.Errorf("unknown csr condition %q", c) - } - } - var status string - // must be in order of presidence - if denied { - status += "Denied" - } else if approved { - status += "Approved" - } else { - status += "Pending" - } - if len(csr.Status.Certificate) > 0 { - status += ",Issued" - } - return status, nil -} - -func printCertificateSigningRequestList(list *certificates.CertificateSigningRequestList, w io.Writer, options PrintOptions) error { - for i := range list.Items { - if err := printCertificateSigningRequest(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printComponentStatus(item *api.ComponentStatus, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, item.Name, options.WithKind) - - if options.WithNamespace { - return fmt.Errorf("componentStatus is not namespaced") - } - status := "Unknown" - message := "" - error := "" - for _, condition := range item.Conditions { - if condition.Type == api.ComponentHealthy { - if condition.Status == api.ConditionTrue { - status = "Healthy" - } else { - status = "Unhealthy" - } - message = condition.Message - error = condition.Error - break - } - } - - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s", name, status, message, error); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(item.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, item.Labels)) - return err -} - -func printComponentStatusList(list *api.ComponentStatusList, w io.Writer, options PrintOptions) error { - for _, item := range list.Items { - if err := printComponentStatus(&item, w, options); err != nil { - return err - } - } - - return nil -} - -func printThirdPartyResource(rsrc *extensions.ThirdPartyResource, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, rsrc.Name, options.WithKind) - - versions := make([]string, len(rsrc.Versions)) - for ix := range rsrc.Versions { - version := &rsrc.Versions[ix] - versions[ix] = fmt.Sprintf("%s", version.Name) - } - versionsString := strings.Join(versions, ",") - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", name, rsrc.Description, versionsString); err != nil { - return err - } - return nil -} - -func printThirdPartyResourceList(list *extensions.ThirdPartyResourceList, w io.Writer, options PrintOptions) error { - for _, item := range list.Items { - if err := printThirdPartyResource(&item, w, options); err != nil { - return err - } - } - - return nil -} - -func truncate(str string, maxLen int) string { - if len(str) > maxLen { - return str[0:maxLen] + "..." - } - return str -} - -func printThirdPartyResourceData(rsrc *extensions.ThirdPartyResourceData, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, rsrc.Name, options.WithKind) - - l := labels.FormatLabels(rsrc.Labels) - truncateCols := 50 - if options.Wide { - truncateCols = 100 - } - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", name, l, truncate(string(rsrc.Data), truncateCols)); err != nil { - return err - } - return nil -} - -func printThirdPartyResourceDataList(list *extensions.ThirdPartyResourceDataList, w io.Writer, options PrintOptions) error { - for _, item := range list.Items { - if err := printThirdPartyResourceData(&item, w, options); err != nil { - return err - } - } - - return nil -} - -func printDeployment(deployment *extensions.Deployment, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, deployment.Name, options.WithKind) - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", deployment.Namespace); err != nil { - return err - } - } - - desiredReplicas := deployment.Spec.Replicas - currentReplicas := deployment.Status.Replicas - updatedReplicas := deployment.Status.UpdatedReplicas - availableReplicas := deployment.Status.AvailableReplicas - age := translateTimestamp(deployment.CreationTimestamp) - if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%d\t%d\t%s", name, desiredReplicas, currentReplicas, updatedReplicas, availableReplicas, age); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(deployment.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, deployment.Labels)) - return err -} - -func printDeploymentList(list *extensions.DeploymentList, w io.Writer, options PrintOptions) error { - for _, item := range list.Items { - if err := printDeployment(&item, w, options); err != nil { - return err - } - } - return nil -} - -func printHorizontalPodAutoscaler(hpa *autoscaling.HorizontalPodAutoscaler, w io.Writer, options PrintOptions) error { - namespace := hpa.Namespace - name := formatResourceName(options.Kind, hpa.Name, options.WithKind) - - reference := fmt.Sprintf("%s/%s", - hpa.Spec.ScaleTargetRef.Kind, - hpa.Spec.ScaleTargetRef.Name) - target := "" - if hpa.Spec.TargetCPUUtilizationPercentage != nil { - target = fmt.Sprintf("%d%%", *hpa.Spec.TargetCPUUtilizationPercentage) - } - current := "" - if hpa.Status.CurrentCPUUtilizationPercentage != nil { - current = fmt.Sprintf("%d%%", *hpa.Status.CurrentCPUUtilizationPercentage) - } - minPods := "" - if hpa.Spec.MinReplicas != nil { - minPods = fmt.Sprintf("%d", *hpa.Spec.MinReplicas) - } - maxPods := hpa.Spec.MaxReplicas - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%d\t%s", - name, - reference, - target, - current, - minPods, - maxPods, - translateTimestamp(hpa.CreationTimestamp), - ); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(hpa.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, hpa.Labels)) - return err -} - -func printHorizontalPodAutoscalerList(list *autoscaling.HorizontalPodAutoscalerList, w io.Writer, options PrintOptions) error { - for i := range list.Items { - if err := printHorizontalPodAutoscaler(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printConfigMap(configMap *api.ConfigMap, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, configMap.Name, options.WithKind) - - namespace := configMap.Namespace - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%v\t%s", name, len(configMap.Data), translateTimestamp(configMap.CreationTimestamp)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(configMap.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, configMap.Labels)) - return err -} - -func printConfigMapList(list *api.ConfigMapList, w io.Writer, options PrintOptions) error { - for i := range list.Items { - if err := printConfigMap(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printPodSecurityPolicy(item *extensions.PodSecurityPolicy, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, item.Name, options.WithKind) - - _, err := fmt.Fprintf(w, "%s\t%t\t%v\t%s\t%s\t%s\t%s\t%t\t%v\n", name, item.Spec.Privileged, - item.Spec.AllowedCapabilities, item.Spec.SELinux.Rule, - item.Spec.RunAsUser.Rule, item.Spec.FSGroup.Rule, item.Spec.SupplementalGroups.Rule, item.Spec.ReadOnlyRootFilesystem, item.Spec.Volumes) - return err -} - -func printPodSecurityPolicyList(list *extensions.PodSecurityPolicyList, w io.Writer, options PrintOptions) error { - for _, item := range list.Items { - if err := printPodSecurityPolicy(&item, w, options); err != nil { - return err - } - } - - return nil -} - -func printNetworkPolicy(networkPolicy *extensions.NetworkPolicy, w io.Writer, options PrintOptions) error { - name := formatResourceName(options.Kind, networkPolicy.Name, options.WithKind) - - namespace := networkPolicy.Namespace - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%v\t%s", name, unversioned.FormatLabelSelector(&networkPolicy.Spec.PodSelector), translateTimestamp(networkPolicy.CreationTimestamp)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(networkPolicy.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, networkPolicy.Labels)) - return err -} - -func printNetworkPolicyList(list *extensions.NetworkPolicyList, w io.Writer, options PrintOptions) error { - for i := range list.Items { - if err := printNetworkPolicy(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printStorageClass(sc *storage.StorageClass, w io.Writer, options PrintOptions) error { - name := sc.Name - - if storageutil.IsDefaultAnnotation(sc.ObjectMeta) { - name += " (default)" - } - provtype := sc.Provisioner - - if _, err := fmt.Fprintf(w, "%s\t%s\t", name, provtype); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(sc.Labels, options.ColumnLabels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, sc.Labels)); err != nil { - return err - } - - return nil -} - -func printStorageClassList(scList *storage.StorageClassList, w io.Writer, options PrintOptions) error { - for _, sc := range scList.Items { - if err := printStorageClass(&sc, w, options); err != nil { - return err - } - } - return nil -} - -func printStatus(status *unversioned.Status, w io.Writer, options PrintOptions) error { - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", status.Status, status.Reason, status.Message); err != nil { - return err - } - - return nil -} - -func AppendLabels(itemLabels map[string]string, columnLabels []string) string { - var buffer bytes.Buffer - - for _, cl := range columnLabels { - buffer.WriteString(fmt.Sprint("\t")) - if il, ok := itemLabels[cl]; ok { - buffer.WriteString(fmt.Sprint(il)) - } else { - buffer.WriteString("") - } - } - - return buffer.String() -} - -// Append all labels to a single column. We need this even when show-labels flag* is -// false, since this adds newline delimiter to the end of each row. -func AppendAllLabels(showLabels bool, itemLabels map[string]string) string { - var buffer bytes.Buffer - - if showLabels { - buffer.WriteString(fmt.Sprint("\t")) - buffer.WriteString(labels.FormatLabels(itemLabels)) - } - buffer.WriteString("\n") - - return buffer.String() -} - -// Append a set of tabs for each label column. We need this in the case where -// we have extra lines so that the tabwriter will still line things up. -func AppendLabelTabs(columnLabels []string) string { - var buffer bytes.Buffer - - for range columnLabels { - buffer.WriteString("\t") - } - buffer.WriteString("\n") - - return buffer.String() -} - -// Lay out all the containers on one line if use wide output. -func layoutContainers(containers []api.Container, w io.Writer) error { - var namesBuffer bytes.Buffer - var imagesBuffer bytes.Buffer - - for i, container := range containers { - namesBuffer.WriteString(container.Name) - imagesBuffer.WriteString(container.Image) - if i != len(containers)-1 { - namesBuffer.WriteString(",") - imagesBuffer.WriteString(",") - } - } - _, err := fmt.Fprintf(w, "\t%s\t%s", namesBuffer.String(), imagesBuffer.String()) - if err != nil { - return err - } - return nil -} - -func formatLabelHeaders(columnLabels []string) []string { - formHead := make([]string, len(columnLabels)) - for i, l := range columnLabels { - p := strings.Split(l, "/") - formHead[i] = strings.ToUpper((p[len(p)-1])) - } - return formHead -} - -// headers for -o wide -func formatWideHeaders(wide bool, t reflect.Type) []string { - if wide { - if t.String() == "*api.Pod" || t.String() == "*api.PodList" { - return []string{"IP", "NODE"} - } - if t.String() == "*api.ReplicationController" || t.String() == "*api.ReplicationControllerList" { - return []string{"CONTAINER(S)", "IMAGE(S)", "SELECTOR"} - } - if t.String() == "*batch.Job" || t.String() == "*batch.JobList" { - return []string{"CONTAINER(S)", "IMAGE(S)", "SELECTOR"} - } - if t.String() == "*api.Service" || t.String() == "*api.ServiceList" { - return []string{"SELECTOR"} - } - if t.String() == "*extensions.DaemonSet" || t.String() == "*extensions.DaemonSetList" { - return []string{"CONTAINER(S)", "IMAGE(S)", "SELECTOR"} - } - if t.String() == "*extensions.ReplicaSet" || t.String() == "*extensions.ReplicaSetList" { - return []string{"CONTAINER(S)", "IMAGE(S)", "SELECTOR"} - } - if t.String() == "*api.Node" || t.String() == "*api.NodeList" { - return []string{"EXTERNAL-IP"} - } - } - return nil -} - -// headers for --show-labels=true -func formatShowLabelsHeader(showLabels bool, t reflect.Type) []string { - if showLabels { - if t.String() != "*api.ThirdPartyResource" && t.String() != "*api.ThirdPartyResourceList" { - return []string{"LABELS"} - } - } - return nil -} - -// GetNewTabWriter returns a tabwriter that translates tabbed columns in input into properly aligned text. -func GetNewTabWriter(output io.Writer) *tabwriter.Writer { - return tabwriter.NewWriter(output, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) -} - -// PrintObj prints the obj in a human-friendly format according to the type of the obj. -func (h *HumanReadablePrinter) PrintObj(obj runtime.Object, output io.Writer) error { - // if output is a tabwriter (when it's called by kubectl get), we use it; create a new tabwriter otherwise - w, found := output.(*tabwriter.Writer) - if !found { - w = GetNewTabWriter(output) - defer w.Flush() - } - - // check if the object is unstructured. If so, let's attempt to convert it to a type we can understand before - // trying to print, since the printers are keyed by type. This is extremely expensive. - switch obj.(type) { - case *runtime.UnstructuredList, *runtime.Unstructured, *runtime.Unknown: - if objBytes, err := runtime.Encode(api.Codecs.LegacyCodec(), obj); err == nil { - if decodedObj, err := runtime.Decode(api.Codecs.UniversalDecoder(), objBytes); err == nil { - obj = decodedObj - } - } - } - - t := reflect.TypeOf(obj) - if handler := h.handlerMap[t]; handler != nil { - if !h.options.NoHeaders && t != h.lastType { - headers := append(handler.columns, formatWideHeaders(h.options.Wide, t)...) - headers = append(headers, formatLabelHeaders(h.options.ColumnLabels)...) - // LABELS is always the last column. - headers = append(headers, formatShowLabelsHeader(h.options.ShowLabels, t)...) - if h.options.WithNamespace { - headers = append(withNamespacePrefixColumns, headers...) - } - h.printHeader(headers, w) - h.lastType = t - } - args := []reflect.Value{reflect.ValueOf(obj), reflect.ValueOf(w), reflect.ValueOf(h.options)} - resultValue := handler.printFunc.Call(args)[0] - if resultValue.IsNil() { - return nil - } - return resultValue.Interface().(error) - } - - // we don't recognize this type, but we can still attempt to print some reasonable information about. - unstructured, ok := obj.(*runtime.Unstructured) - if !ok { - return fmt.Errorf("error: unknown type %#v", obj) - } - - if _, err := meta.Accessor(obj); err == nil { - if !h.options.NoHeaders && t != h.lastType { - headers := []string{"NAME", "KIND"} - headers = append(headers, formatLabelHeaders(h.options.ColumnLabels)...) - // LABELS is always the last column. - headers = append(headers, formatShowLabelsHeader(h.options.ShowLabels, t)...) - if h.options.WithNamespace { - headers = append(withNamespacePrefixColumns, headers...) - } - h.printHeader(headers, w) - h.lastType = t - } - // if the error isn't nil, report the "I don't recognize this" error - if err := printUnstructured(unstructured, w, h.options); err != nil { - return err - } - return nil - } - - // we failed all reasonable printing efforts, report failure - return fmt.Errorf("error: unknown type %#v", obj) -} - -func printUnstructured(unstructured *runtime.Unstructured, w io.Writer, options PrintOptions) error { - metadata, err := meta.Accessor(unstructured) - if err != nil { - return err - } - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", metadata.GetNamespace()); err != nil { - return err - } - } - - kind := "" - if objKind, ok := unstructured.Object["kind"]; ok { - if str, ok := objKind.(string); ok { - kind = str - } - } - if objAPIVersion, ok := unstructured.Object["apiVersion"]; ok { - if str, ok := objAPIVersion.(string); ok { - version, err := unversioned.ParseGroupVersion(str) - if err != nil { - return err - } - kind = kind + "." + version.Version + "." + version.Group - } - } - name := formatResourceName(options.Kind, metadata.GetName(), options.WithKind) - - if _, err := fmt.Fprintf(w, "%s\t%s", name, kind); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(metadata.GetLabels(), options.ColumnLabels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, metadata.GetLabels())); err != nil { - return err - } - - return nil -} - -// TemplatePrinter is an implementation of ResourcePrinter which formats data with a Go Template. -type TemplatePrinter struct { - rawTemplate string - template *template.Template -} - -func NewTemplatePrinter(tmpl []byte) (*TemplatePrinter, error) { - t, err := template.New("output"). - Funcs(template.FuncMap{"exists": exists}). - Parse(string(tmpl)) - if err != nil { - return nil, err - } - return &TemplatePrinter{ - rawTemplate: string(tmpl), - template: t, - }, nil -} - -func (p *TemplatePrinter) AfterPrint(w io.Writer, res string) error { - return nil -} - -// PrintObj formats the obj with the Go Template. -func (p *TemplatePrinter) PrintObj(obj runtime.Object, w io.Writer) error { - var data []byte - var err error - if unstructured, ok := obj.(*runtime.Unstructured); ok { - data, err = json.Marshal(unstructured.Object) - } else { - data, err = json.Marshal(obj) - - } - if err != nil { - return err - } - - out := map[string]interface{}{} - if err := json.Unmarshal(data, &out); err != nil { - return err - } - if err = p.safeExecute(w, out); err != nil { - // It is way easier to debug this stuff when it shows up in - // stdout instead of just stdin. So in addition to returning - // a nice error, also print useful stuff with the writer. - fmt.Fprintf(w, "Error executing template: %v. Printing more information for debugging the template:\n", err) - fmt.Fprintf(w, "\ttemplate was:\n\t\t%v\n", p.rawTemplate) - fmt.Fprintf(w, "\traw data was:\n\t\t%v\n", string(data)) - fmt.Fprintf(w, "\tobject given to template engine was:\n\t\t%+v\n\n", out) - return fmt.Errorf("error executing template %q: %v", p.rawTemplate, err) - } - return nil -} - -// TODO: implement HandledResources() -func (p *TemplatePrinter) HandledResources() []string { - return []string{} -} - -// safeExecute tries to execute the template, but catches panics and returns an error -// should the template engine panic. -func (p *TemplatePrinter) safeExecute(w io.Writer, obj interface{}) error { - var panicErr error - // Sorry for the double anonymous function. There's probably a clever way - // to do this that has the defer'd func setting the value to be returned, but - // that would be even less obvious. - retErr := func() error { - defer func() { - if x := recover(); x != nil { - panicErr = fmt.Errorf("caught panic: %+v", x) - } - }() - return p.template.Execute(w, obj) - }() - if panicErr != nil { - return panicErr - } - return retErr -} - -func tabbedString(f func(io.Writer) error) (string, error) { - out := new(tabwriter.Writer) - buf := &bytes.Buffer{} - out.Init(buf, 0, 8, 1, '\t', 0) - - err := f(out) - if err != nil { - return "", err - } - - out.Flush() - str := string(buf.String()) - return str, nil -} - -// exists returns true if it would be possible to call the index function -// with these arguments. -// -// TODO: how to document this for users? -// -// index returns the result of indexing its first argument by the following -// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each -// indexed item must be a map, slice, or array. -func exists(item interface{}, indices ...interface{}) bool { - v := reflect.ValueOf(item) - for _, i := range indices { - index := reflect.ValueOf(i) - var isNil bool - if v, isNil = indirect(v); isNil { - return false - } - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.String: - var x int64 - switch index.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x = index.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x = int64(index.Uint()) - default: - return false - } - if x < 0 || x >= int64(v.Len()) { - return false - } - v = v.Index(int(x)) - case reflect.Map: - if !index.IsValid() { - index = reflect.Zero(v.Type().Key()) - } - if !index.Type().AssignableTo(v.Type().Key()) { - return false - } - if x := v.MapIndex(index); x.IsValid() { - v = x - } else { - v = reflect.Zero(v.Type().Elem()) - } - default: - return false - } - } - if _, isNil := indirect(v); isNil { - return false - } - return true -} - -// stolen from text/template -// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. -// We indirect through pointers and empty interfaces (only) because -// non-empty interfaces have methods we might need. -func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { - for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { - if v.IsNil() { - return v, true - } - if v.Kind() == reflect.Interface && v.NumMethod() > 0 { - break - } - } - return v, false -} - -// JSONPathPrinter is an implementation of ResourcePrinter which formats data with jsonpath expression. -type JSONPathPrinter struct { - rawTemplate string - *jsonpath.JSONPath -} - -func NewJSONPathPrinter(tmpl string) (*JSONPathPrinter, error) { - j := jsonpath.New("out") - if err := j.Parse(tmpl); err != nil { - return nil, err - } - return &JSONPathPrinter{tmpl, j}, nil -} - -func (j *JSONPathPrinter) AfterPrint(w io.Writer, res string) error { - return nil -} - -// PrintObj formats the obj with the JSONPath Template. -func (j *JSONPathPrinter) PrintObj(obj runtime.Object, w io.Writer) error { - var queryObj interface{} = obj - if meta.IsListType(obj) { - data, err := json.Marshal(obj) - if err != nil { - return err - } - queryObj = map[string]interface{}{} - if err := json.Unmarshal(data, &queryObj); err != nil { - return err - } - } - - if unknown, ok := obj.(*runtime.Unknown); ok { - data, err := json.Marshal(unknown) - if err != nil { - return err - } - queryObj = map[string]interface{}{} - if err := json.Unmarshal(data, &queryObj); err != nil { - return err - } - } - if unstructured, ok := obj.(*runtime.Unstructured); ok { - queryObj = unstructured.Object - } - - if err := j.JSONPath.Execute(w, queryObj); err != nil { - fmt.Fprintf(w, "Error executing template: %v. Printing more information for debugging the template:\n", err) - fmt.Fprintf(w, "\ttemplate was:\n\t\t%v\n", j.rawTemplate) - fmt.Fprintf(w, "\tobject given to jsonpath engine was:\n\t\t%#v\n\n", queryObj) - return fmt.Errorf("error executing jsonpath %q: %v\n", j.rawTemplate, err) - } - return nil -} - -// TODO: implement HandledResources() -func (p *JSONPathPrinter) HandledResources() []string { - return []string{} -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go deleted file mode 100644 index eb59575888..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go +++ /dev/null @@ -1,177 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "bytes" - "fmt" - "os" - "os/signal" - "syscall" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" - "k8s.io/kubernetes/pkg/runtime" - sliceutil "k8s.io/kubernetes/pkg/util/slice" - "k8s.io/kubernetes/pkg/watch" -) - -// Rollbacker provides an interface for resources that can be rolled back. -type Rollbacker interface { - Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) -} - -func RollbackerFor(kind unversioned.GroupKind, c clientset.Interface) (Rollbacker, error) { - switch kind { - case extensions.Kind("Deployment"): - return &DeploymentRollbacker{c}, nil - } - return nil, fmt.Errorf("no rollbacker has been implemented for %q", kind) -} - -type DeploymentRollbacker struct { - c clientset.Interface -} - -func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) { - d, ok := obj.(*extensions.Deployment) - if !ok { - return "", fmt.Errorf("passed object is not a Deployment: %#v", obj) - } - if dryRun { - return simpleDryRun(d, r.c, toRevision) - } - if d.Spec.Paused { - return "", fmt.Errorf("you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume deployment/%s' and try again", d.Name) - } - deploymentRollback := &extensions.DeploymentRollback{ - Name: d.Name, - UpdatedAnnotations: updatedAnnotations, - RollbackTo: extensions.RollbackConfig{ - Revision: toRevision, - }, - } - result := "" - - // Get current events - events, err := r.c.Core().Events(d.Namespace).List(api.ListOptions{}) - if err != nil { - return result, err - } - // Do the rollback - if err := r.c.Extensions().Deployments(d.Namespace).Rollback(deploymentRollback); err != nil { - return result, err - } - // Watch for the changes of events - watch, err := r.c.Core().Events(d.Namespace).Watch(api.ListOptions{Watch: true, ResourceVersion: events.ResourceVersion}) - if err != nil { - return result, err - } - result = watchRollbackEvent(watch) - return result, err -} - -// watchRollbackEvent watches for rollback events and returns rollback result -func watchRollbackEvent(w watch.Interface) string { - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Interrupt, os.Kill, syscall.SIGTERM) - for { - select { - case event, ok := <-w.ResultChan(): - if !ok { - return "" - } - obj, ok := event.Object.(*api.Event) - if !ok { - w.Stop() - return "" - } - isRollback, result := isRollbackEvent(obj) - if isRollback { - w.Stop() - return result - } - case <-signals: - w.Stop() - } - } -} - -// isRollbackEvent checks if the input event is about rollback, and returns true and -// related result string back if it is. -func isRollbackEvent(e *api.Event) (bool, string) { - rollbackEventReasons := []string{deploymentutil.RollbackRevisionNotFound, deploymentutil.RollbackTemplateUnchanged, deploymentutil.RollbackDone} - for _, reason := range rollbackEventReasons { - if e.Reason == reason { - if reason == deploymentutil.RollbackDone { - return true, "rolled back" - } - return true, fmt.Sprintf("skipped rollback (%s: %s)", e.Reason, e.Message) - } - } - return false, "" -} - -func simpleDryRun(deployment *extensions.Deployment, c clientset.Interface, toRevision int64) (string, error) { - _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c) - if err != nil { - return "", fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", deployment.Name, err) - } - allRSs := allOldRSs - if newRS != nil { - allRSs = append(allRSs, newRS) - } - - revisionToSpec := make(map[int64]*api.PodTemplateSpec) - for _, rs := range allRSs { - v, err := deploymentutil.Revision(rs) - if err != nil { - continue - } - revisionToSpec[v] = &rs.Spec.Template - } - - if len(revisionToSpec) == 0 { - return "No rollout history found.", nil - } - - if toRevision > 0 { - template, ok := revisionToSpec[toRevision] - if !ok { - return "", fmt.Errorf("unable to find specified revision") - } - buf := bytes.NewBuffer([]byte{}) - DescribePodTemplate(template, buf) - return buf.String(), nil - } - - // Sort the revisionToSpec map by revision - revisions := make([]int64, 0, len(revisionToSpec)) - for r := range revisionToSpec { - revisions = append(revisions, r) - } - sliceutil.SortInts64(revisions) - - template, _ := revisionToSpec[revisions[len(revisions)-1]] - buf := bytes.NewBuffer([]byte{}) - buf.WriteString("\n") - DescribePodTemplate(template, buf) - return buf.String(), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go deleted file mode 100644 index 76c14d976f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go +++ /dev/null @@ -1,835 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - goerrors "errors" - "fmt" - "io" - "strconv" - "strings" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - "k8s.io/kubernetes/pkg/client/retry" - client "k8s.io/kubernetes/pkg/client/unversioned" - deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/integer" - "k8s.io/kubernetes/pkg/util/intstr" - "k8s.io/kubernetes/pkg/util/wait" -) - -const ( - sourceIdAnnotation = kubectlAnnotationPrefix + "update-source-id" - desiredReplicasAnnotation = kubectlAnnotationPrefix + "desired-replicas" - originalReplicasAnnotation = kubectlAnnotationPrefix + "original-replicas" - nextControllerAnnotation = kubectlAnnotationPrefix + "next-controller-id" -) - -// RollingUpdaterConfig is the configuration for a rolling deployment process. -type RollingUpdaterConfig struct { - // Out is a writer for progress output. - Out io.Writer - // OldRC is an existing controller to be replaced. - OldRc *api.ReplicationController - // NewRc is a controller that will take ownership of updated pods (will be - // created if needed). - NewRc *api.ReplicationController - // UpdatePeriod is the time to wait between individual pod updates. - UpdatePeriod time.Duration - // Interval is the time to wait between polling controller status after - // update. - Interval time.Duration - // Timeout is the time to wait for controller updates before giving up. - Timeout time.Duration - // MinReadySeconds is the number of seconds to wait after the pods are ready - MinReadySeconds int32 - // CleanupPolicy defines the cleanup action to take after the deployment is - // complete. - CleanupPolicy RollingUpdaterCleanupPolicy - // MaxUnavailable is the maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // Absolute number is calculated from percentage by rounding up. - // This can not be 0 if MaxSurge is 0. - // By default, a fixed value of 1 is used. - // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods - // immediately when the rolling update starts. Once new pods are ready, old RC - // can be scaled down further, followed by scaling up the new RC, ensuring - // that the total number of pods available at all times during the update is at - // least 70% of desired pods. - MaxUnavailable intstr.IntOrString - // MaxSurge is the maximum number of pods that can be scheduled above the desired number of pods. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // By default, a value of 1 is used. - // Example: when this is set to 30%, the new RC can be scaled up immediately - // when the rolling update starts, such that the total number of old and new pods do not exceed - // 130% of desired pods. Once old pods have been killed, new RC can be scaled up - // further, ensuring that total number of pods running at any time during - // the update is atmost 130% of desired pods. - MaxSurge intstr.IntOrString - // OnProgress is invoked if set during each scale cycle, to allow the caller to perform additional logic or - // abort the scale. If an error is returned the cleanup method will not be invoked. The percentage value - // is a synthetic "progress" calculation that represents the approximate percentage completion. - OnProgress func(oldRc, newRc *api.ReplicationController, percentage int) error -} - -// RollingUpdaterCleanupPolicy is a cleanup action to take after the -// deployment is complete. -type RollingUpdaterCleanupPolicy string - -const ( - // DeleteRollingUpdateCleanupPolicy means delete the old controller. - DeleteRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Delete" - // PreserveRollingUpdateCleanupPolicy means keep the old controller. - PreserveRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Preserve" - // RenameRollingUpdateCleanupPolicy means delete the old controller, and rename - // the new controller to the name of the old controller. - RenameRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Rename" -) - -// RollingUpdater provides methods for updating replicated pods in a predictable, -// fault-tolerant way. -type RollingUpdater struct { - rcClient coreclient.ReplicationControllersGetter - podClient coreclient.PodsGetter - // Namespace for resources - ns string - // scaleAndWait scales a controller and returns its updated state. - scaleAndWait func(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) - //getOrCreateTargetController gets and validates an existing controller or - //makes a new one. - getOrCreateTargetController func(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) - // cleanup performs post deployment cleanup tasks for newRc and oldRc. - cleanup func(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error - // getReadyPods returns the amount of old and new ready pods. - getReadyPods func(oldRc, newRc *api.ReplicationController, minReadySeconds int32) (int32, int32, error) - // nowFn returns the current time used to calculate the minReadySeconds - nowFn func() unversioned.Time -} - -// NewRollingUpdater creates a RollingUpdater from a client. -func NewRollingUpdater(namespace string, rcClient coreclient.ReplicationControllersGetter, podClient coreclient.PodsGetter) *RollingUpdater { - updater := &RollingUpdater{ - rcClient: rcClient, - podClient: podClient, - ns: namespace, - } - // Inject real implementations. - updater.scaleAndWait = updater.scaleAndWaitWithScaler - updater.getOrCreateTargetController = updater.getOrCreateTargetControllerWithClient - updater.getReadyPods = updater.readyPods - updater.cleanup = updater.cleanupWithClients - updater.nowFn = func() unversioned.Time { return unversioned.Now() } - return updater -} - -// Update all pods for a ReplicationController (oldRc) by creating a new -// controller (newRc) with 0 replicas, and synchronously scaling oldRc and -// newRc until oldRc has 0 replicas and newRc has the original # of desired -// replicas. Cleanup occurs based on a RollingUpdaterCleanupPolicy. -// -// Each interval, the updater will attempt to make progress however it can -// without violating any availability constraints defined by the config. This -// means the amount scaled up or down each interval will vary based on the -// timeliness of readiness and the updater will always try to make progress, -// even slowly. -// -// If an update from newRc to oldRc is already in progress, we attempt to -// drive it to completion. If an error occurs at any step of the update, the -// error will be returned. -// -// A scaling event (either up or down) is considered progress; if no progress -// is made within the config.Timeout, an error is returned. -// -// TODO: make this handle performing a rollback of a partially completed -// rollout. -func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { - out := config.Out - oldRc := config.OldRc - scaleRetryParams := NewRetryParams(config.Interval, config.Timeout) - - // Find an existing controller (for continuing an interrupted update) or - // create a new one if necessary. - sourceId := fmt.Sprintf("%s:%s", oldRc.Name, oldRc.UID) - newRc, existed, err := r.getOrCreateTargetController(config.NewRc, sourceId) - if err != nil { - return err - } - if existed { - fmt.Fprintf(out, "Continuing update with existing controller %s.\n", newRc.Name) - } else { - fmt.Fprintf(out, "Created %s\n", newRc.Name) - } - // Extract the desired replica count from the controller. - desiredAnnotation, err := strconv.Atoi(newRc.Annotations[desiredReplicasAnnotation]) - if err != nil { - return fmt.Errorf("Unable to parse annotation for %s: %s=%s", - newRc.Name, desiredReplicasAnnotation, newRc.Annotations[desiredReplicasAnnotation]) - } - desired := int32(desiredAnnotation) - // Extract the original replica count from the old controller, adding the - // annotation if it doesn't yet exist. - _, hasOriginalAnnotation := oldRc.Annotations[originalReplicasAnnotation] - if !hasOriginalAnnotation { - existing, err := r.rcClient.ReplicationControllers(oldRc.Namespace).Get(oldRc.Name) - if err != nil { - return err - } - originReplicas := strconv.Itoa(int(existing.Spec.Replicas)) - applyUpdate := func(rc *api.ReplicationController) { - if rc.Annotations == nil { - rc.Annotations = map[string]string{} - } - rc.Annotations[originalReplicasAnnotation] = originReplicas - } - if oldRc, err = updateRcWithRetries(r.rcClient, existing.Namespace, existing, applyUpdate); err != nil { - return err - } - } - // maxSurge is the maximum scaling increment and maxUnavailable are the maximum pods - // that can be unavailable during a rollout. - maxSurge, maxUnavailable, err := deploymentutil.ResolveFenceposts(&config.MaxSurge, &config.MaxUnavailable, desired) - if err != nil { - return err - } - // Validate maximums. - if desired > 0 && maxUnavailable == 0 && maxSurge == 0 { - return fmt.Errorf("one of maxSurge or maxUnavailable must be specified") - } - // The minimum pods which must remain available throughout the update - // calculated for internal convenience. - minAvailable := int32(integer.IntMax(0, int(desired-maxUnavailable))) - // If the desired new scale is 0, then the max unavailable is necessarily - // the effective scale of the old RC regardless of the configuration - // (equivalent to 100% maxUnavailable). - if desired == 0 { - maxUnavailable = oldRc.Spec.Replicas - minAvailable = 0 - } - - fmt.Fprintf(out, "Scaling up %s from %d to %d, scaling down %s from %d to 0 (keep %d pods available, don't exceed %d pods)\n", - newRc.Name, newRc.Spec.Replicas, desired, oldRc.Name, oldRc.Spec.Replicas, minAvailable, desired+maxSurge) - - // give a caller incremental notification and allow them to exit early - goal := desired - newRc.Spec.Replicas - if goal < 0 { - goal = -goal - } - progress := func(complete bool) error { - if config.OnProgress == nil { - return nil - } - progress := desired - newRc.Spec.Replicas - if progress < 0 { - progress = -progress - } - percentage := 100 - if !complete && goal > 0 { - percentage = int((goal - progress) * 100 / goal) - } - return config.OnProgress(oldRc, newRc, percentage) - } - - // Scale newRc and oldRc until newRc has the desired number of replicas and - // oldRc has 0 replicas. - progressDeadline := time.Now().UnixNano() + config.Timeout.Nanoseconds() - for newRc.Spec.Replicas != desired || oldRc.Spec.Replicas != 0 { - // Store the existing replica counts for progress timeout tracking. - newReplicas := newRc.Spec.Replicas - oldReplicas := oldRc.Spec.Replicas - - // Scale up as much as possible. - scaledRc, err := r.scaleUp(newRc, oldRc, desired, maxSurge, maxUnavailable, scaleRetryParams, config) - if err != nil { - return err - } - newRc = scaledRc - - // notify the caller if necessary - if err := progress(false); err != nil { - return err - } - - // Wait between scaling operations for things to settle. - time.Sleep(config.UpdatePeriod) - - // Scale down as much as possible. - scaledRc, err = r.scaleDown(newRc, oldRc, desired, minAvailable, maxUnavailable, maxSurge, config) - if err != nil { - return err - } - oldRc = scaledRc - - // notify the caller if necessary - if err := progress(false); err != nil { - return err - } - - // If we are making progress, continue to advance the progress deadline. - // Otherwise, time out with an error. - progressMade := (newRc.Spec.Replicas != newReplicas) || (oldRc.Spec.Replicas != oldReplicas) - if progressMade { - progressDeadline = time.Now().UnixNano() + config.Timeout.Nanoseconds() - } else if time.Now().UnixNano() > progressDeadline { - return fmt.Errorf("timed out waiting for any update progress to be made") - } - } - - // notify the caller if necessary - if err := progress(true); err != nil { - return err - } - - // Housekeeping and cleanup policy execution. - return r.cleanup(oldRc, newRc, config) -} - -// scaleUp scales up newRc to desired by whatever increment is possible given -// the configured surge threshold. scaleUp will safely no-op as necessary when -// it detects redundancy or other relevant conditions. -func (r *RollingUpdater) scaleUp(newRc, oldRc *api.ReplicationController, desired, maxSurge, maxUnavailable int32, scaleRetryParams *RetryParams, config *RollingUpdaterConfig) (*api.ReplicationController, error) { - // If we're already at the desired, do nothing. - if newRc.Spec.Replicas == desired { - return newRc, nil - } - - // Scale up as far as we can based on the surge limit. - increment := (desired + maxSurge) - (oldRc.Spec.Replicas + newRc.Spec.Replicas) - // If the old is already scaled down, go ahead and scale all the way up. - if oldRc.Spec.Replicas == 0 { - increment = desired - newRc.Spec.Replicas - } - // We can't scale up without violating the surge limit, so do nothing. - if increment <= 0 { - return newRc, nil - } - // Increase the replica count, and deal with fenceposts. - newRc.Spec.Replicas += increment - if newRc.Spec.Replicas > desired { - newRc.Spec.Replicas = desired - } - // Perform the scale-up. - fmt.Fprintf(config.Out, "Scaling %s up to %d\n", newRc.Name, newRc.Spec.Replicas) - scaledRc, err := r.scaleAndWait(newRc, scaleRetryParams, scaleRetryParams) - if err != nil { - return nil, err - } - return scaledRc, nil -} - -// scaleDown scales down oldRc to 0 at whatever decrement possible given the -// thresholds defined on the config. scaleDown will safely no-op as necessary -// when it detects redundancy or other relevant conditions. -func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desired, minAvailable, maxUnavailable, maxSurge int32, config *RollingUpdaterConfig) (*api.ReplicationController, error) { - // Already scaled down; do nothing. - if oldRc.Spec.Replicas == 0 { - return oldRc, nil - } - // Get ready pods. We shouldn't block, otherwise in case both old and new - // pods are unavailable then the rolling update process blocks. - // Timeout-wise we are already covered by the progress check. - _, newAvailable, err := r.getReadyPods(oldRc, newRc, config.MinReadySeconds) - if err != nil { - return nil, err - } - // The old controller is considered as part of the total because we want to - // maintain minimum availability even with a volatile old controller. - // Scale down as much as possible while maintaining minimum availability - allPods := oldRc.Spec.Replicas + newRc.Spec.Replicas - newUnavailable := newRc.Spec.Replicas - newAvailable - decrement := allPods - minAvailable - newUnavailable - // The decrement normally shouldn't drop below 0 because the available count - // always starts below the old replica count, but the old replica count can - // decrement due to externalities like pods death in the replica set. This - // will be considered a transient condition; do nothing and try again later - // with new readiness values. - // - // If the most we can scale is 0, it means we can't scale down without - // violating the minimum. Do nothing and try again later when conditions may - // have changed. - if decrement <= 0 { - return oldRc, nil - } - // Reduce the replica count, and deal with fenceposts. - oldRc.Spec.Replicas -= decrement - if oldRc.Spec.Replicas < 0 { - oldRc.Spec.Replicas = 0 - } - // If the new is already fully scaled and available up to the desired size, go - // ahead and scale old all the way down. - if newRc.Spec.Replicas == desired && newAvailable == desired { - oldRc.Spec.Replicas = 0 - } - // Perform the scale-down. - fmt.Fprintf(config.Out, "Scaling %s down to %d\n", oldRc.Name, oldRc.Spec.Replicas) - retryWait := &RetryParams{config.Interval, config.Timeout} - scaledRc, err := r.scaleAndWait(oldRc, retryWait, retryWait) - if err != nil { - return nil, err - } - return scaledRc, nil -} - -// scalerScaleAndWait scales a controller using a Scaler and a real client. -func (r *RollingUpdater) scaleAndWaitWithScaler(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) { - scaler := &ReplicationControllerScaler{r.rcClient} - if err := scaler.Scale(rc.Namespace, rc.Name, uint(rc.Spec.Replicas), &ScalePrecondition{-1, ""}, retry, wait); err != nil { - return nil, err - } - return r.rcClient.ReplicationControllers(rc.Namespace).Get(rc.Name) -} - -// readyPods returns the old and new ready counts for their pods. -// If a pod is observed as being ready, it's considered ready even -// if it later becomes notReady. -func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController, minReadySeconds int32) (int32, int32, error) { - controllers := []*api.ReplicationController{oldRc, newRc} - oldReady := int32(0) - newReady := int32(0) - if r.nowFn == nil { - r.nowFn = func() unversioned.Time { return unversioned.Now() } - } - - for i := range controllers { - controller := controllers[i] - selector := labels.Set(controller.Spec.Selector).AsSelector() - options := api.ListOptions{LabelSelector: selector} - pods, err := r.podClient.Pods(controller.Namespace).List(options) - if err != nil { - return 0, 0, err - } - for _, pod := range pods.Items { - if !deploymentutil.IsPodAvailable(&pod, minReadySeconds, r.nowFn().Time) { - continue - } - switch controller.Name { - case oldRc.Name: - oldReady++ - case newRc.Name: - newReady++ - } - } - } - return oldReady, newReady, nil -} - -// getOrCreateTargetControllerWithClient looks for an existing controller with -// sourceId. If found, the existing controller is returned with true -// indicating that the controller already exists. If the controller isn't -// found, a new one is created and returned along with false indicating the -// controller was created. -// -// Existing controllers are validated to ensure their sourceIdAnnotation -// matches sourceId; if there's a mismatch, an error is returned. -func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) { - existingRc, err := r.existingController(controller) - if err != nil { - if !errors.IsNotFound(err) { - // There was an error trying to find the controller; don't assume we - // should create it. - return nil, false, err - } - if controller.Spec.Replicas <= 0 { - return nil, false, fmt.Errorf("Invalid controller spec for %s; required: > 0 replicas, actual: %d\n", controller.Name, controller.Spec.Replicas) - } - // The controller wasn't found, so create it. - if controller.Annotations == nil { - controller.Annotations = map[string]string{} - } - controller.Annotations[desiredReplicasAnnotation] = fmt.Sprintf("%d", controller.Spec.Replicas) - controller.Annotations[sourceIdAnnotation] = sourceId - controller.Spec.Replicas = 0 - newRc, err := r.rcClient.ReplicationControllers(r.ns).Create(controller) - return newRc, false, err - } - // Validate and use the existing controller. - annotations := existingRc.Annotations - source := annotations[sourceIdAnnotation] - _, ok := annotations[desiredReplicasAnnotation] - if source != sourceId || !ok { - return nil, false, fmt.Errorf("Missing/unexpected annotations for controller %s, expected %s : %s", controller.Name, sourceId, annotations) - } - return existingRc, true, nil -} - -// existingController verifies if the controller already exists -func (r *RollingUpdater) existingController(controller *api.ReplicationController) (*api.ReplicationController, error) { - // without rc name but generate name, there's no existing rc - if len(controller.Name) == 0 && len(controller.GenerateName) > 0 { - return nil, errors.NewNotFound(api.Resource("replicationcontrollers"), controller.Name) - } - // controller name is required to get rc back - return r.rcClient.ReplicationControllers(controller.Namespace).Get(controller.Name) -} - -// cleanupWithClients performs cleanup tasks after the rolling update. Update -// process related annotations are removed from oldRc and newRc. The -// CleanupPolicy on config is executed. -func (r *RollingUpdater) cleanupWithClients(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error { - // Clean up annotations - var err error - newRc, err = r.rcClient.ReplicationControllers(r.ns).Get(newRc.Name) - if err != nil { - return err - } - applyUpdate := func(rc *api.ReplicationController) { - delete(rc.Annotations, sourceIdAnnotation) - delete(rc.Annotations, desiredReplicasAnnotation) - } - if newRc, err = updateRcWithRetries(r.rcClient, r.ns, newRc, applyUpdate); err != nil { - return err - } - - if err = wait.Poll(config.Interval, config.Timeout, client.ControllerHasDesiredReplicas(r.rcClient, newRc)); err != nil { - return err - } - newRc, err = r.rcClient.ReplicationControllers(r.ns).Get(newRc.Name) - if err != nil { - return err - } - - switch config.CleanupPolicy { - case DeleteRollingUpdateCleanupPolicy: - // delete old rc - fmt.Fprintf(config.Out, "Update succeeded. Deleting %s\n", oldRc.Name) - return r.rcClient.ReplicationControllers(r.ns).Delete(oldRc.Name, nil) - case RenameRollingUpdateCleanupPolicy: - // delete old rc - fmt.Fprintf(config.Out, "Update succeeded. Deleting old controller: %s\n", oldRc.Name) - if err := r.rcClient.ReplicationControllers(r.ns).Delete(oldRc.Name, nil); err != nil { - return err - } - fmt.Fprintf(config.Out, "Renaming %s to %s\n", oldRc.Name, newRc.Name) - return Rename(r.rcClient, newRc, oldRc.Name) - case PreserveRollingUpdateCleanupPolicy: - return nil - default: - return nil - } -} - -func Rename(c coreclient.ReplicationControllersGetter, rc *api.ReplicationController, newName string) error { - oldName := rc.Name - rc.Name = newName - rc.ResourceVersion = "" - // First delete the oldName RC and orphan its pods. - trueVar := true - err := c.ReplicationControllers(rc.Namespace).Delete(oldName, &api.DeleteOptions{OrphanDependents: &trueVar}) - if err != nil && !errors.IsNotFound(err) { - return err - } - err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { - _, err := c.ReplicationControllers(rc.Namespace).Get(oldName) - if err == nil { - return false, nil - } else if errors.IsNotFound(err) { - return true, nil - } else { - return false, err - } - }) - if err != nil { - return err - } - // Then create the same RC with the new name. - _, err = c.ReplicationControllers(rc.Namespace).Create(rc) - if err != nil { - return err - } - return nil -} - -func LoadExistingNextReplicationController(c coreclient.ReplicationControllersGetter, namespace, newName string) (*api.ReplicationController, error) { - if len(newName) == 0 { - return nil, nil - } - newRc, err := c.ReplicationControllers(namespace).Get(newName) - if err != nil && errors.IsNotFound(err) { - return nil, nil - } - return newRc, err -} - -type NewControllerConfig struct { - Namespace string - OldName, NewName string - Image string - Container string - DeploymentKey string - PullPolicy api.PullPolicy -} - -func CreateNewControllerFromCurrentController(rcClient coreclient.ReplicationControllersGetter, codec runtime.Codec, cfg *NewControllerConfig) (*api.ReplicationController, error) { - containerIndex := 0 - // load the old RC into the "new" RC - newRc, err := rcClient.ReplicationControllers(cfg.Namespace).Get(cfg.OldName) - if err != nil { - return nil, err - } - - if len(cfg.Container) != 0 { - containerFound := false - - for i, c := range newRc.Spec.Template.Spec.Containers { - if c.Name == cfg.Container { - containerIndex = i - containerFound = true - break - } - } - - if !containerFound { - return nil, fmt.Errorf("container %s not found in pod", cfg.Container) - } - } - - if len(newRc.Spec.Template.Spec.Containers) > 1 && len(cfg.Container) == 0 { - return nil, goerrors.New("Must specify container to update when updating a multi-container pod") - } - - if len(newRc.Spec.Template.Spec.Containers) == 0 { - return nil, goerrors.New(fmt.Sprintf("Pod has no containers! (%v)", newRc)) - } - newRc.Spec.Template.Spec.Containers[containerIndex].Image = cfg.Image - if len(cfg.PullPolicy) != 0 { - newRc.Spec.Template.Spec.Containers[containerIndex].ImagePullPolicy = cfg.PullPolicy - } - - newHash, err := api.HashObject(newRc, codec) - if err != nil { - return nil, err - } - - if len(cfg.NewName) == 0 { - cfg.NewName = fmt.Sprintf("%s-%s", newRc.Name, newHash) - } - newRc.Name = cfg.NewName - - newRc.Spec.Selector[cfg.DeploymentKey] = newHash - newRc.Spec.Template.Labels[cfg.DeploymentKey] = newHash - // Clear resource version after hashing so that identical updates get different hashes. - newRc.ResourceVersion = "" - return newRc, nil -} - -func AbortRollingUpdate(c *RollingUpdaterConfig) error { - // Swap the controllers - tmp := c.OldRc - c.OldRc = c.NewRc - c.NewRc = tmp - - if c.NewRc.Annotations == nil { - c.NewRc.Annotations = map[string]string{} - } - c.NewRc.Annotations[sourceIdAnnotation] = fmt.Sprintf("%s:%s", c.OldRc.Name, c.OldRc.UID) - - // Use the original value since the replica count change from old to new - // could be asymmetric. If we don't know the original count, we can't safely - // roll back to a known good size. - originalSize, foundOriginal := tmp.Annotations[originalReplicasAnnotation] - if !foundOriginal { - return fmt.Errorf("couldn't find original replica count of %q", tmp.Name) - } - fmt.Fprintf(c.Out, "Setting %q replicas to %s\n", c.NewRc.Name, originalSize) - c.NewRc.Annotations[desiredReplicasAnnotation] = originalSize - c.CleanupPolicy = DeleteRollingUpdateCleanupPolicy - return nil -} - -func GetNextControllerAnnotation(rc *api.ReplicationController) (string, bool) { - res, found := rc.Annotations[nextControllerAnnotation] - return res, found -} - -func SetNextControllerAnnotation(rc *api.ReplicationController, name string) { - if rc.Annotations == nil { - rc.Annotations = map[string]string{} - } - rc.Annotations[nextControllerAnnotation] = name -} - -func UpdateExistingReplicationController(rcClient coreclient.ReplicationControllersGetter, podClient coreclient.PodsGetter, oldRc *api.ReplicationController, namespace, newName, deploymentKey, deploymentValue string, out io.Writer) (*api.ReplicationController, error) { - if _, found := oldRc.Spec.Selector[deploymentKey]; !found { - SetNextControllerAnnotation(oldRc, newName) - return AddDeploymentKeyToReplicationController(oldRc, rcClient, podClient, deploymentKey, deploymentValue, namespace, out) - } else { - // If we didn't need to update the controller for the deployment key, we still need to write - // the "next" controller. - applyUpdate := func(rc *api.ReplicationController) { - SetNextControllerAnnotation(rc, newName) - } - return updateRcWithRetries(rcClient, namespace, oldRc, applyUpdate) - } -} - -func AddDeploymentKeyToReplicationController(oldRc *api.ReplicationController, rcClient coreclient.ReplicationControllersGetter, podClient coreclient.PodsGetter, deploymentKey, deploymentValue, namespace string, out io.Writer) (*api.ReplicationController, error) { - var err error - // First, update the template label. This ensures that any newly created pods will have the new label - applyUpdate := func(rc *api.ReplicationController) { - if rc.Spec.Template.Labels == nil { - rc.Spec.Template.Labels = map[string]string{} - } - rc.Spec.Template.Labels[deploymentKey] = deploymentValue - } - if oldRc, err = updateRcWithRetries(rcClient, namespace, oldRc, applyUpdate); err != nil { - return nil, err - } - - // Update all pods managed by the rc to have the new hash label, so they are correctly adopted - // TODO: extract the code from the label command and re-use it here. - selector := labels.SelectorFromSet(oldRc.Spec.Selector) - options := api.ListOptions{LabelSelector: selector} - podList, err := podClient.Pods(namespace).List(options) - if err != nil { - return nil, err - } - for ix := range podList.Items { - pod := &podList.Items[ix] - applyUpdate := func(p *api.Pod) { - if p.Labels == nil { - p.Labels = map[string]string{ - deploymentKey: deploymentValue, - } - } else { - p.Labels[deploymentKey] = deploymentValue - } - } - if pod, err = updatePodWithRetries(podClient, namespace, pod, applyUpdate); err != nil { - return nil, err - } - } - - if oldRc.Spec.Selector == nil { - oldRc.Spec.Selector = map[string]string{} - } - // Copy the old selector, so that we can scrub out any orphaned pods - selectorCopy := map[string]string{} - for k, v := range oldRc.Spec.Selector { - selectorCopy[k] = v - } - applyUpdate = func(rc *api.ReplicationController) { - rc.Spec.Selector[deploymentKey] = deploymentValue - } - // Update the selector of the rc so it manages all the pods we updated above - if oldRc, err = updateRcWithRetries(rcClient, namespace, oldRc, applyUpdate); err != nil { - return nil, err - } - - // Clean up any orphaned pods that don't have the new label, this can happen if the rc manager - // doesn't see the update to its pod template and creates a new pod with the old labels after - // we've finished re-adopting existing pods to the rc. - selector = labels.SelectorFromSet(selectorCopy) - options = api.ListOptions{LabelSelector: selector} - podList, err = podClient.Pods(namespace).List(options) - for ix := range podList.Items { - pod := &podList.Items[ix] - if value, found := pod.Labels[deploymentKey]; !found || value != deploymentValue { - if err := podClient.Pods(namespace).Delete(pod.Name, nil); err != nil { - return nil, err - } - } - } - - return oldRc, nil -} - -type updateRcFunc func(controller *api.ReplicationController) - -// updateRcWithRetries retries updating the given rc on conflict with the following steps: -// 1. Get latest resource -// 2. applyUpdate -// 3. Update the resource -func updateRcWithRetries(rcClient coreclient.ReplicationControllersGetter, namespace string, rc *api.ReplicationController, applyUpdate updateRcFunc) (*api.ReplicationController, error) { - // Deep copy the rc in case we failed on Get during retry loop - obj, err := api.Scheme.Copy(rc) - if err != nil { - return nil, fmt.Errorf("failed to deep copy rc before updating it: %v", err) - } - oldRc := obj.(*api.ReplicationController) - err = retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) { - // Apply the update, then attempt to push it to the apiserver. - applyUpdate(rc) - if rc, e = rcClient.ReplicationControllers(namespace).Update(rc); e == nil { - // rc contains the latest controller post update - return - } - updateErr := e - // Update the controller with the latest resource version, if the update failed we - // can't trust rc so use oldRc.Name. - if rc, e = rcClient.ReplicationControllers(namespace).Get(oldRc.Name); e != nil { - // The Get failed: Value in rc cannot be trusted. - rc = oldRc - } - // Only return the error from update - return updateErr - }) - // If the error is non-nil the returned controller cannot be trusted, if it is nil, the returned - // controller contains the applied update. - return rc, err -} - -type updatePodFunc func(controller *api.Pod) - -// updatePodWithRetries retries updating the given pod on conflict with the following steps: -// 1. Get latest resource -// 2. applyUpdate -// 3. Update the resource -func updatePodWithRetries(podClient coreclient.PodsGetter, namespace string, pod *api.Pod, applyUpdate updatePodFunc) (*api.Pod, error) { - // Deep copy the pod in case we failed on Get during retry loop - obj, err := api.Scheme.Copy(pod) - if err != nil { - return nil, fmt.Errorf("failed to deep copy pod before updating it: %v", err) - } - oldPod := obj.(*api.Pod) - err = retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) { - // Apply the update, then attempt to push it to the apiserver. - applyUpdate(pod) - if pod, e = podClient.Pods(namespace).Update(pod); e == nil { - return - } - updateErr := e - if pod, e = podClient.Pods(namespace).Get(oldPod.Name); e != nil { - pod = oldPod - } - // Only return the error from update - return updateErr - }) - // If the error is non-nil the returned pod cannot be trusted, if it is nil, the returned - // controller contains the applied update. - return pod, err -} - -func FindSourceController(r coreclient.ReplicationControllersGetter, namespace, name string) (*api.ReplicationController, error) { - list, err := r.ReplicationControllers(namespace).List(api.ListOptions{}) - if err != nil { - return nil, err - } - for ix := range list.Items { - rc := &list.Items[ix] - if rc.Annotations != nil && strings.HasPrefix(rc.Annotations[sourceIdAnnotation], name) { - return rc, nil - } - } - return nil, fmt.Errorf("couldn't find a replication controller with source id == %s/%s", namespace, name) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go deleted file mode 100644 index 0fbeffaee5..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" - "k8s.io/kubernetes/pkg/controller/deployment/util" -) - -// StatusViewer provides an interface for resources that have rollout status. -type StatusViewer interface { - Status(namespace, name string, revision int64) (string, bool, error) -} - -func StatusViewerFor(kind unversioned.GroupKind, c internalclientset.Interface) (StatusViewer, error) { - switch kind { - case extensions.Kind("Deployment"): - return &DeploymentStatusViewer{c.Extensions()}, nil - } - return nil, fmt.Errorf("no status viewer has been implemented for %v", kind) -} - -type DeploymentStatusViewer struct { - c extensionsclient.DeploymentsGetter -} - -// Status returns a message describing deployment status, and a bool value indicating if the status is considered done -func (s *DeploymentStatusViewer) Status(namespace, name string, revision int64) (string, bool, error) { - deployment, err := s.c.Deployments(namespace).Get(name) - if err != nil { - return "", false, err - } - if revision > 0 { - deploymentRev, err := util.Revision(deployment) - if err != nil { - return "", false, fmt.Errorf("cannot get the revision of deployment %q: %v", deployment.Name, err) - } - if revision != deploymentRev { - return "", false, fmt.Errorf("desired revision (%d) is different from the running revision (%d)", revision, deploymentRev) - } - } - if deployment.Generation <= deployment.Status.ObservedGeneration { - cond := util.GetDeploymentCondition(deployment.Status, extensions.DeploymentProgressing) - if cond != nil && cond.Reason == util.TimedOutReason { - return "", false, fmt.Errorf("deployment %q exceeded its progress deadline", name) - } - if deployment.Status.UpdatedReplicas < deployment.Spec.Replicas { - return fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas have been updated...\n", deployment.Status.UpdatedReplicas, deployment.Spec.Replicas), false, nil - } - if deployment.Status.Replicas > deployment.Status.UpdatedReplicas { - return fmt.Sprintf("Waiting for rollout to finish: %d old replicas are pending termination...\n", deployment.Status.Replicas-deployment.Status.UpdatedReplicas), false, nil - } - if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas { - return fmt.Sprintf("Waiting for rollout to finish: %d of %d updated replicas are available...\n", deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas), false, nil - } - return fmt.Sprintf("deployment %q successfully rolled out\n", name), true, nil - } - return fmt.Sprintf("Waiting for deployment spec update to be observed...\n"), false, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/run.go b/vendor/k8s.io/kubernetes/pkg/kubectl/run.go deleted file mode 100644 index cc9284e337..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/run.go +++ /dev/null @@ -1,995 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strconv" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/apis/batch" - batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1" - batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/validation" -) - -type DeploymentV1Beta1 struct{} - -func (DeploymentV1Beta1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"replicas", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - } -} - -func (DeploymentV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - args, err := getArgs(genericParams) - if err != nil { - return nil, err - } - - envs, err := getEnvs(genericParams) - if err != nil { - return nil, err - } - - params, err := getParams(genericParams) - if err != nil { - return nil, err - } - - name, err := getName(params) - if err != nil { - return nil, err - } - - labels, err := getLabels(params, true, name) - if err != nil { - return nil, err - } - - count, err := strconv.Atoi(params["replicas"]) - if err != nil { - return nil, err - } - - podSpec, err := makePodSpec(params, name) - if err != nil { - return nil, err - } - - imagePullPolicy := api.PullPolicy(params["image-pull-policy"]) - if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil { - return nil, err - } - - if err := updatePodPorts(params, podSpec); err != nil { - return nil, err - } - - // TODO: use versioned types for generators so that we don't need to - // set default values manually (see issue #17384) - deployment := extensions.Deployment{ - ObjectMeta: api.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: extensions.DeploymentSpec{ - Replicas: int32(count), - Selector: &unversioned.LabelSelector{MatchLabels: labels}, - Template: api.PodTemplateSpec{ - ObjectMeta: api.ObjectMeta{ - Labels: labels, - }, - Spec: *podSpec, - }, - }, - } - return &deployment, nil -} - -func getLabels(params map[string]string, defaultRunLabel bool, name string) (map[string]string, error) { - labelString, found := params["labels"] - var labels map[string]string - var err error - if found && len(labelString) > 0 { - labels, err = ParseLabels(labelString) - if err != nil { - return nil, err - } - } else if defaultRunLabel { - labels = map[string]string{ - "run": name, - } - } - return labels, nil -} - -func getName(params map[string]string) (string, error) { - name, found := params["name"] - if !found || len(name) == 0 { - name, found = params["default-name"] - if !found || len(name) == 0 { - return "", fmt.Errorf("'name' is a required parameter.") - } - } - return name, nil -} - -func getParams(genericParams map[string]interface{}) (map[string]string, error) { - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - return params, nil -} - -func getArgs(genericParams map[string]interface{}) ([]string, error) { - args := []string{} - val, found := genericParams["args"] - if found { - var isArray bool - args, isArray = val.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found: %v", val) - } - delete(genericParams, "args") - } - return args, nil -} - -func getEnvs(genericParams map[string]interface{}) ([]api.EnvVar, error) { - var envs []api.EnvVar - envStrings, found := genericParams["env"] - if found { - if envStringArray, isArray := envStrings.([]string); isArray { - var err error - envs, err = parseEnvs(envStringArray) - if err != nil { - return nil, err - } - delete(genericParams, "env") - } else { - return nil, fmt.Errorf("expected []string, found: %v", envStrings) - } - } - return envs, nil -} - -func getV1Envs(genericParams map[string]interface{}) ([]v1.EnvVar, error) { - var envs []v1.EnvVar - envStrings, found := genericParams["env"] - if found { - if envStringArray, isArray := envStrings.([]string); isArray { - var err error - envs, err = parseV1Envs(envStringArray) - if err != nil { - return nil, err - } - delete(genericParams, "env") - } else { - return nil, fmt.Errorf("expected []string, found: %v", envStrings) - } - } - return envs, nil -} - -type JobV1Beta1 struct{} - -func (JobV1Beta1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"leave-stdin-open", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - {"restart", false}, - } -} - -func (JobV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - args, err := getArgs(genericParams) - if err != nil { - return nil, err - } - - envs, err := getEnvs(genericParams) - if err != nil { - return nil, err - } - - params, err := getParams(genericParams) - if err != nil { - return nil, err - } - - name, err := getName(params) - if err != nil { - return nil, err - } - - labels, err := getLabels(params, true, name) - if err != nil { - return nil, err - } - - podSpec, err := makePodSpec(params, name) - if err != nil { - return nil, err - } - - imagePullPolicy := api.PullPolicy(params["image-pull-policy"]) - if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil { - return nil, err - } - - leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) - if err != nil { - return nil, err - } - podSpec.Containers[0].StdinOnce = !leaveStdinOpen && podSpec.Containers[0].Stdin - - if err := updatePodPorts(params, podSpec); err != nil { - return nil, err - } - - restartPolicy := api.RestartPolicy(params["restart"]) - if len(restartPolicy) == 0 { - restartPolicy = api.RestartPolicyNever - } - podSpec.RestartPolicy = restartPolicy - - job := batch.Job{ - ObjectMeta: api.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: batch.JobSpec{ - Selector: &unversioned.LabelSelector{ - MatchLabels: labels, - }, - ManualSelector: newBool(true), - Template: api.PodTemplateSpec{ - ObjectMeta: api.ObjectMeta{ - Labels: labels, - }, - Spec: *podSpec, - }, - }, - } - - return &job, nil -} - -type JobV1 struct{} - -func (JobV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"leave-stdin-open", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - {"restart", false}, - } -} - -func (JobV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - args, err := getArgs(genericParams) - if err != nil { - return nil, err - } - - envs, err := getV1Envs(genericParams) - if err != nil { - return nil, err - } - - params, err := getParams(genericParams) - if err != nil { - return nil, err - } - - name, err := getName(params) - if err != nil { - return nil, err - } - - labels, err := getLabels(params, true, name) - if err != nil { - return nil, err - } - - podSpec, err := makeV1PodSpec(params, name) - if err != nil { - return nil, err - } - - imagePullPolicy := v1.PullPolicy(params["image-pull-policy"]) - if err = updateV1PodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil { - return nil, err - } - - leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) - if err != nil { - return nil, err - } - podSpec.Containers[0].StdinOnce = !leaveStdinOpen && podSpec.Containers[0].Stdin - - if err := updateV1PodPorts(params, podSpec); err != nil { - return nil, err - } - - restartPolicy := v1.RestartPolicy(params["restart"]) - if len(restartPolicy) == 0 { - restartPolicy = v1.RestartPolicyNever - } - podSpec.RestartPolicy = restartPolicy - - job := batchv1.Job{ - ObjectMeta: v1.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: batchv1.JobSpec{ - Template: v1.PodTemplateSpec{ - ObjectMeta: v1.ObjectMeta{ - Labels: labels, - }, - Spec: *podSpec, - }, - }, - } - - return &job, nil -} - -type CronJobV2Alpha1 struct{} - -func (CronJobV2Alpha1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"leave-stdin-open", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - {"restart", false}, - {"schedule", true}, - } -} - -func (CronJobV2Alpha1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - args, err := getArgs(genericParams) - if err != nil { - return nil, err - } - - envs, err := getV1Envs(genericParams) - if err != nil { - return nil, err - } - - params, err := getParams(genericParams) - if err != nil { - return nil, err - } - - name, err := getName(params) - if err != nil { - return nil, err - } - - labels, err := getLabels(params, true, name) - if err != nil { - return nil, err - } - - podSpec, err := makeV1PodSpec(params, name) - if err != nil { - return nil, err - } - - imagePullPolicy := v1.PullPolicy(params["image-pull-policy"]) - if err = updateV1PodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil { - return nil, err - } - - leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) - if err != nil { - return nil, err - } - podSpec.Containers[0].StdinOnce = !leaveStdinOpen && podSpec.Containers[0].Stdin - - if err := updateV1PodPorts(params, podSpec); err != nil { - return nil, err - } - - restartPolicy := v1.RestartPolicy(params["restart"]) - if len(restartPolicy) == 0 { - restartPolicy = v1.RestartPolicyNever - } - podSpec.RestartPolicy = restartPolicy - - cronJob := batchv2alpha1.CronJob{ - ObjectMeta: v1.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: batchv2alpha1.CronJobSpec{ - Schedule: params["schedule"], - ConcurrencyPolicy: batchv2alpha1.AllowConcurrent, - JobTemplate: batchv2alpha1.JobTemplateSpec{ - Spec: batchv2alpha1.JobSpec{ - Template: v1.PodTemplateSpec{ - ObjectMeta: v1.ObjectMeta{ - Labels: labels, - }, - Spec: *podSpec, - }, - }, - }, - }, - } - - return &cronJob, nil -} - -type BasicReplicationController struct{} - -func (BasicReplicationController) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"replicas", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - } -} - -// populateResourceList takes strings of form =,= -func populateResourceList(spec string) (api.ResourceList, error) { - // empty input gets a nil response to preserve generator test expected behaviors - if spec == "" { - return nil, nil - } - - result := api.ResourceList{} - resourceStatements := strings.Split(spec, ",") - for _, resourceStatement := range resourceStatements { - parts := strings.Split(resourceStatement, "=") - if len(parts) != 2 { - return nil, fmt.Errorf("Invalid argument syntax %v, expected =", resourceStatement) - } - resourceName := api.ResourceName(parts[0]) - resourceQuantity, err := resource.ParseQuantity(parts[1]) - if err != nil { - return nil, err - } - result[resourceName] = resourceQuantity - } - return result, nil -} - -// populateResourceList takes strings of form =,= -func populateV1ResourceList(spec string) (v1.ResourceList, error) { - // empty input gets a nil response to preserve generator test expected behaviors - if spec == "" { - return nil, nil - } - - result := v1.ResourceList{} - resourceStatements := strings.Split(spec, ",") - for _, resourceStatement := range resourceStatements { - parts := strings.Split(resourceStatement, "=") - if len(parts) != 2 { - return nil, fmt.Errorf("Invalid argument syntax %v, expected =", resourceStatement) - } - resourceName := v1.ResourceName(parts[0]) - resourceQuantity, err := resource.ParseQuantity(parts[1]) - if err != nil { - return nil, err - } - result[resourceName] = resourceQuantity - } - return result, nil -} - -// HandleResourceRequirements parses the limits and requests parameters if specified -func HandleResourceRequirements(params map[string]string) (api.ResourceRequirements, error) { - result := api.ResourceRequirements{} - limits, err := populateResourceList(params["limits"]) - if err != nil { - return result, err - } - result.Limits = limits - requests, err := populateResourceList(params["requests"]) - if err != nil { - return result, err - } - result.Requests = requests - return result, nil -} - -// HandleResourceRequirements parses the limits and requests parameters if specified -func handleV1ResourceRequirements(params map[string]string) (v1.ResourceRequirements, error) { - result := v1.ResourceRequirements{} - limits, err := populateV1ResourceList(params["limits"]) - if err != nil { - return result, err - } - result.Limits = limits - requests, err := populateV1ResourceList(params["requests"]) - if err != nil { - return result, err - } - result.Requests = requests - return result, nil -} - -func makePodSpec(params map[string]string, name string) (*api.PodSpec, error) { - stdin, err := GetBool(params, "stdin", false) - if err != nil { - return nil, err - } - - tty, err := GetBool(params, "tty", false) - if err != nil { - return nil, err - } - - resourceRequirements, err := HandleResourceRequirements(params) - if err != nil { - return nil, err - } - - spec := api.PodSpec{ - Containers: []api.Container{ - { - Name: name, - Image: params["image"], - Stdin: stdin, - TTY: tty, - Resources: resourceRequirements, - }, - }, - } - return &spec, nil -} - -func makeV1PodSpec(params map[string]string, name string) (*v1.PodSpec, error) { - stdin, err := GetBool(params, "stdin", false) - if err != nil { - return nil, err - } - - tty, err := GetBool(params, "tty", false) - if err != nil { - return nil, err - } - - resourceRequirements, err := handleV1ResourceRequirements(params) - if err != nil { - return nil, err - } - - spec := v1.PodSpec{ - Containers: []v1.Container{ - { - Name: name, - Image: params["image"], - Stdin: stdin, - TTY: tty, - Resources: resourceRequirements, - }, - }, - } - return &spec, nil -} - -func (BasicReplicationController) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - args, err := getArgs(genericParams) - if err != nil { - return nil, err - } - - envs, err := getEnvs(genericParams) - if err != nil { - return nil, err - } - - params, err := getParams(genericParams) - if err != nil { - return nil, err - } - - name, err := getName(params) - if err != nil { - return nil, err - } - - labels, err := getLabels(params, true, name) - if err != nil { - return nil, err - } - - count, err := strconv.Atoi(params["replicas"]) - if err != nil { - return nil, err - } - - podSpec, err := makePodSpec(params, name) - if err != nil { - return nil, err - } - - imagePullPolicy := api.PullPolicy(params["image-pull-policy"]) - if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil { - return nil, err - } - - if err := updatePodPorts(params, podSpec); err != nil { - return nil, err - } - - controller := api.ReplicationController{ - ObjectMeta: api.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: api.ReplicationControllerSpec{ - Replicas: int32(count), - Selector: labels, - Template: &api.PodTemplateSpec{ - ObjectMeta: api.ObjectMeta{ - Labels: labels, - }, - Spec: *podSpec, - }, - }, - } - return &controller, nil -} - -func updatePodContainers(params map[string]string, args []string, envs []api.EnvVar, imagePullPolicy api.PullPolicy, podSpec *api.PodSpec) error { - if len(args) > 0 { - command, err := GetBool(params, "command", false) - if err != nil { - return err - } - if command { - podSpec.Containers[0].Command = args - } else { - podSpec.Containers[0].Args = args - } - } - - if len(envs) > 0 { - podSpec.Containers[0].Env = envs - } - - if len(imagePullPolicy) > 0 { - // imagePullPolicy should be valid here since we have verified it before. - podSpec.Containers[0].ImagePullPolicy = imagePullPolicy - } - return nil -} - -func updateV1PodContainers(params map[string]string, args []string, envs []v1.EnvVar, imagePullPolicy v1.PullPolicy, podSpec *v1.PodSpec) error { - if len(args) > 0 { - command, err := GetBool(params, "command", false) - if err != nil { - return err - } - if command { - podSpec.Containers[0].Command = args - } else { - podSpec.Containers[0].Args = args - } - } - - if len(envs) > 0 { - podSpec.Containers[0].Env = envs - } - - if len(imagePullPolicy) > 0 { - // imagePullPolicy should be valid here since we have verified it before. - podSpec.Containers[0].ImagePullPolicy = imagePullPolicy - } - return nil -} - -func updatePodPorts(params map[string]string, podSpec *api.PodSpec) (err error) { - port := -1 - hostPort := -1 - if len(params["port"]) > 0 { - port, err = strconv.Atoi(params["port"]) - if err != nil { - return err - } - } - - if len(params["hostport"]) > 0 { - hostPort, err = strconv.Atoi(params["hostport"]) - if err != nil { - return err - } - if hostPort > 0 && port < 0 { - return fmt.Errorf("--hostport requires --port to be specified") - } - } - - // Don't include the port if it was not specified. - if len(params["port"]) > 0 { - podSpec.Containers[0].Ports = []api.ContainerPort{ - { - ContainerPort: int32(port), - }, - } - if hostPort > 0 { - podSpec.Containers[0].Ports[0].HostPort = int32(hostPort) - } - } - return nil -} - -func updateV1PodPorts(params map[string]string, podSpec *v1.PodSpec) (err error) { - port := -1 - hostPort := -1 - if len(params["port"]) > 0 { - port, err = strconv.Atoi(params["port"]) - if err != nil { - return err - } - } - - if len(params["hostport"]) > 0 { - hostPort, err = strconv.Atoi(params["hostport"]) - if err != nil { - return err - } - if hostPort > 0 && port < 0 { - return fmt.Errorf("--hostport requires --port to be specified") - } - } - - // Don't include the port if it was not specified. - if len(params["port"]) > 0 { - podSpec.Containers[0].Ports = []v1.ContainerPort{ - { - ContainerPort: int32(port), - }, - } - if hostPort > 0 { - podSpec.Containers[0].Ports[0].HostPort = int32(hostPort) - } - } - return nil -} - -type BasicPod struct{} - -func (BasicPod) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"leave-stdin-open", false}, - {"tty", false}, - {"restart", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - } -} - -func (BasicPod) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - args, err := getArgs(genericParams) - if err != nil { - return nil, err - } - - envs, err := getEnvs(genericParams) - if err != nil { - return nil, err - } - - params, err := getParams(genericParams) - if err != nil { - return nil, err - } - - name, err := getName(params) - if err != nil { - return nil, err - } - - labels, err := getLabels(params, false, name) - if err != nil { - return nil, err - } - - stdin, err := GetBool(params, "stdin", false) - if err != nil { - return nil, err - } - leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) - if err != nil { - return nil, err - } - - tty, err := GetBool(params, "tty", false) - if err != nil { - return nil, err - } - - resourceRequirements, err := HandleResourceRequirements(params) - if err != nil { - return nil, err - } - - restartPolicy := api.RestartPolicy(params["restart"]) - if len(restartPolicy) == 0 { - restartPolicy = api.RestartPolicyAlways - } - // TODO: Figure out why we set ImagePullPolicy here, whether we can make it - // consistent with the other places imagePullPolicy is set using flag. - pod := api.Pod{ - ObjectMeta: api.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: api.PodSpec{ - Containers: []api.Container{ - { - Name: name, - Image: params["image"], - ImagePullPolicy: api.PullIfNotPresent, - Stdin: stdin, - StdinOnce: !leaveStdinOpen && stdin, - TTY: tty, - Resources: resourceRequirements, - }, - }, - DNSPolicy: api.DNSClusterFirst, - RestartPolicy: restartPolicy, - }, - } - imagePullPolicy := api.PullPolicy(params["image-pull-policy"]) - if err = updatePodContainers(params, args, envs, imagePullPolicy, &pod.Spec); err != nil { - return nil, err - } - - if err := updatePodPorts(params, &pod.Spec); err != nil { - return nil, err - } - return &pod, nil -} - -func parseEnvs(envArray []string) ([]api.EnvVar, error) { - envs := make([]api.EnvVar, 0, len(envArray)) - for _, env := range envArray { - pos := strings.Index(env, "=") - if pos == -1 { - return nil, fmt.Errorf("invalid env: %v", env) - } - name := env[:pos] - value := env[pos+1:] - if len(name) == 0 { - return nil, fmt.Errorf("invalid env: %v", env) - } - if len(validation.IsCIdentifier(name)) != 0 { - return nil, fmt.Errorf("invalid env: %v", env) - } - envVar := api.EnvVar{Name: name, Value: value} - envs = append(envs, envVar) - } - return envs, nil -} - -func parseV1Envs(envArray []string) ([]v1.EnvVar, error) { - envs := []v1.EnvVar{} - for _, env := range envArray { - pos := strings.Index(env, "=") - if pos == -1 { - return nil, fmt.Errorf("invalid env: %v", env) - } - name := env[:pos] - value := env[pos+1:] - if len(name) == 0 || len(validation.IsCIdentifier(name)) != 0 { - return nil, fmt.Errorf("invalid env: %v", env) - } - envVar := v1.EnvVar{Name: name, Value: value} - envs = append(envs, envVar) - } - return envs, nil -} - -func newBool(val bool) *bool { - p := new(bool) - *p = val - return p -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go b/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go deleted file mode 100644 index 3abf2e7c90..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go +++ /dev/null @@ -1,509 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strconv" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - appsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" - batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" - coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" - client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/util/wait" - "k8s.io/kubernetes/pkg/watch" -) - -// Scaler provides an interface for resources that can be scaled. -type Scaler interface { - // Scale scales the named resource after checking preconditions. It optionally - // retries in the event of resource version mismatch (if retry is not nil), - // and optionally waits until the status of the resource matches newSize (if wait is not nil) - Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, wait *RetryParams) error - // ScaleSimple does a simple one-shot attempt at scaling - not useful on its own, but - // a necessary building block for Scale - ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (updatedResourceVersion string, err error) -} - -func ScalerFor(kind unversioned.GroupKind, c internalclientset.Interface) (Scaler, error) { - switch kind { - case api.Kind("ReplicationController"): - return &ReplicationControllerScaler{c.Core()}, nil - case extensions.Kind("ReplicaSet"): - return &ReplicaSetScaler{c.Extensions()}, nil - case extensions.Kind("Job"), batch.Kind("Job"): - return &JobScaler{c.Batch()}, nil // Either kind of job can be scaled with Batch interface. - case apps.Kind("StatefulSet"): - return &StatefulSetScaler{c.Apps()}, nil - case extensions.Kind("Deployment"): - return &DeploymentScaler{c.Extensions()}, nil - } - return nil, fmt.Errorf("no scaler has been implemented for %q", kind) -} - -// ScalePrecondition describes a condition that must be true for the scale to take place -// If CurrentSize == -1, it is ignored. -// If CurrentResourceVersion is the empty string, it is ignored. -// Otherwise they must equal the values in the resource for it to be valid. -type ScalePrecondition struct { - Size int - ResourceVersion string -} - -// A PreconditionError is returned when a resource fails to match -// the scale preconditions passed to kubectl. -type PreconditionError struct { - Precondition string - ExpectedValue string - ActualValue string -} - -func (pe PreconditionError) Error() string { - return fmt.Sprintf("Expected %s to be %s, was %s", pe.Precondition, pe.ExpectedValue, pe.ActualValue) -} - -type ScaleErrorType int - -const ( - ScaleGetFailure ScaleErrorType = iota - ScaleUpdateFailure - ScaleUpdateConflictFailure -) - -// A ScaleError is returned when a scale request passes -// preconditions but fails to actually scale the controller. -type ScaleError struct { - FailureType ScaleErrorType - ResourceVersion string - ActualError error -} - -func (c ScaleError) Error() string { - return fmt.Sprintf( - "Scaling the resource failed with: %v; Current resource version %s", - c.ActualError, c.ResourceVersion) -} - -// RetryParams encapsulates the retry parameters used by kubectl's scaler. -type RetryParams struct { - Interval, Timeout time.Duration -} - -func NewRetryParams(interval, timeout time.Duration) *RetryParams { - return &RetryParams{interval, timeout} -} - -// ScaleCondition is a closure around Scale that facilitates retries via util.wait -func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name string, count uint, updatedResourceVersion *string) wait.ConditionFunc { - return func() (bool, error) { - rv, err := r.ScaleSimple(namespace, name, precondition, count) - if updatedResourceVersion != nil { - *updatedResourceVersion = rv - } - switch e, _ := err.(ScaleError); err.(type) { - case nil: - return true, nil - case ScaleError: - // Retry only on update conflicts. - if e.FailureType == ScaleUpdateConflictFailure { - return false, nil - } - } - return false, err - } -} - -// ValidateStatefulSet ensures that the preconditions match. Returns nil if they are valid, an error otherwise. -func (precondition *ScalePrecondition) ValidateStatefulSet(ps *apps.StatefulSet) error { - if precondition.Size != -1 && int(ps.Spec.Replicas) != precondition.Size { - return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(ps.Spec.Replicas))} - } - if len(precondition.ResourceVersion) != 0 && ps.ResourceVersion != precondition.ResourceVersion { - return PreconditionError{"resource version", precondition.ResourceVersion, ps.ResourceVersion} - } - return nil -} - -// ValidateReplicationController ensures that the preconditions match. Returns nil if they are valid, an error otherwise -func (precondition *ScalePrecondition) ValidateReplicationController(controller *api.ReplicationController) error { - if precondition.Size != -1 && int(controller.Spec.Replicas) != precondition.Size { - return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(controller.Spec.Replicas))} - } - if len(precondition.ResourceVersion) != 0 && controller.ResourceVersion != precondition.ResourceVersion { - return PreconditionError{"resource version", precondition.ResourceVersion, controller.ResourceVersion} - } - return nil -} - -type ReplicationControllerScaler struct { - c coreclient.ReplicationControllersGetter -} - -// ScaleSimple does a simple one-shot attempt at scaling. It returns the -// resourceVersion of the replication controller if the update is successful. -func (scaler *ReplicationControllerScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { - controller, err := scaler.c.ReplicationControllers(namespace).Get(name) - if err != nil { - return "", ScaleError{ScaleGetFailure, "Unknown", err} - } - if preconditions != nil { - if err := preconditions.ValidateReplicationController(controller); err != nil { - return "", err - } - } - controller.Spec.Replicas = int32(newSize) - updatedRC, err := scaler.c.ReplicationControllers(namespace).Update(controller) - if err != nil { - if errors.IsConflict(err) { - return "", ScaleError{ScaleUpdateConflictFailure, controller.ResourceVersion, err} - } - return "", ScaleError{ScaleUpdateFailure, controller.ResourceVersion, err} - } - return updatedRC.ObjectMeta.ResourceVersion, nil -} - -// Scale updates a ReplicationController to a new size, with optional precondition check (if preconditions is not nil), -// optional retries (if retry is not nil), and then optionally waits for it's replica count to reach the new value -// (if wait is not nil). -func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { - if preconditions == nil { - preconditions = &ScalePrecondition{-1, ""} - } - if retry == nil { - // Make it try only once, immediately - retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} - } - var updatedResourceVersion string - cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, &updatedResourceVersion) - if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil { - return err - } - if waitForReplicas != nil { - checkRC := func(rc *api.ReplicationController) bool { - if uint(rc.Spec.Replicas) != newSize { - // the size is changed by other party. Don't need to wait for the new change to complete. - return true - } - return rc.Status.ObservedGeneration >= rc.Generation && rc.Status.Replicas == rc.Spec.Replicas - } - // If number of replicas doesn't change, then the update may not event - // be sent to underlying databse (we don't send no-op changes). - // In such case, will have value of the most - // recent update (which may be far in the past) so we may get "too old - // RV" error from watch or potentially no ReplicationController events - // will be deliver, since it may already be in the expected state. - // To protect from these two, we first issue Get() to ensure that we - // are not already in the expected state. - currentRC, err := scaler.c.ReplicationControllers(namespace).Get(name) - if err != nil { - return err - } - if !checkRC(currentRC) { - watchOptions := api.ListOptions{ - FieldSelector: fields.OneTermEqualSelector("metadata.name", name), - ResourceVersion: updatedResourceVersion, - } - watcher, err := scaler.c.ReplicationControllers(namespace).Watch(watchOptions) - if err != nil { - return err - } - _, err = watch.Until(waitForReplicas.Timeout, watcher, func(event watch.Event) (bool, error) { - if event.Type != watch.Added && event.Type != watch.Modified { - return false, nil - } - return checkRC(event.Object.(*api.ReplicationController)), nil - }) - if err == wait.ErrWaitTimeout { - return fmt.Errorf("timed out waiting for %q to be synced", name) - } - return err - } - } - return nil -} - -// ValidateReplicaSet ensures that the preconditions match. Returns nil if they are valid, an error otherwise -func (precondition *ScalePrecondition) ValidateReplicaSet(replicaSet *extensions.ReplicaSet) error { - if precondition.Size != -1 && int(replicaSet.Spec.Replicas) != precondition.Size { - return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(replicaSet.Spec.Replicas))} - } - if len(precondition.ResourceVersion) != 0 && replicaSet.ResourceVersion != precondition.ResourceVersion { - return PreconditionError{"resource version", precondition.ResourceVersion, replicaSet.ResourceVersion} - } - return nil -} - -type ReplicaSetScaler struct { - c extensionsclient.ReplicaSetsGetter -} - -// ScaleSimple does a simple one-shot attempt at scaling. It returns the -// resourceVersion of the replicaset if the update is successful. -func (scaler *ReplicaSetScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { - rs, err := scaler.c.ReplicaSets(namespace).Get(name) - if err != nil { - return "", ScaleError{ScaleGetFailure, "Unknown", err} - } - if preconditions != nil { - if err := preconditions.ValidateReplicaSet(rs); err != nil { - return "", err - } - } - rs.Spec.Replicas = int32(newSize) - updatedRS, err := scaler.c.ReplicaSets(namespace).Update(rs) - if err != nil { - if errors.IsConflict(err) { - return "", ScaleError{ScaleUpdateConflictFailure, rs.ResourceVersion, err} - } - return "", ScaleError{ScaleUpdateFailure, rs.ResourceVersion, err} - } - return updatedRS.ObjectMeta.ResourceVersion, nil -} - -// Scale updates a ReplicaSet to a new size, with optional precondition check (if preconditions is -// not nil), optional retries (if retry is not nil), and then optionally waits for it's replica -// count to reach the new value (if wait is not nil). -func (scaler *ReplicaSetScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { - if preconditions == nil { - preconditions = &ScalePrecondition{-1, ""} - } - if retry == nil { - // Make it try only once, immediately - retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} - } - cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, nil) - if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil { - return err - } - if waitForReplicas != nil { - rs, err := scaler.c.ReplicaSets(namespace).Get(name) - if err != nil { - return err - } - err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.ReplicaSetHasDesiredReplicas(scaler.c, rs)) - - if err == wait.ErrWaitTimeout { - return fmt.Errorf("timed out waiting for %q to be synced", name) - } - return err - } - return nil -} - -// ValidateJob ensures that the preconditions match. Returns nil if they are valid, an error otherwise. -func (precondition *ScalePrecondition) ValidateJob(job *batch.Job) error { - if precondition.Size != -1 && job.Spec.Parallelism == nil { - return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), "nil"} - } - if precondition.Size != -1 && int(*job.Spec.Parallelism) != precondition.Size { - return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), strconv.Itoa(int(*job.Spec.Parallelism))} - } - if len(precondition.ResourceVersion) != 0 && job.ResourceVersion != precondition.ResourceVersion { - return PreconditionError{"resource version", precondition.ResourceVersion, job.ResourceVersion} - } - return nil -} - -type StatefulSetScaler struct { - c appsclient.StatefulSetsGetter -} - -// ScaleSimple does a simple one-shot attempt at scaling. It returns the -// resourceVersion of the statefulset if the update is successful. -func (scaler *StatefulSetScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { - ps, err := scaler.c.StatefulSets(namespace).Get(name) - if err != nil { - return "", ScaleError{ScaleGetFailure, "Unknown", err} - } - if preconditions != nil { - if err := preconditions.ValidateStatefulSet(ps); err != nil { - return "", err - } - } - ps.Spec.Replicas = int32(newSize) - updatedStatefulSet, err := scaler.c.StatefulSets(namespace).Update(ps) - if err != nil { - if errors.IsConflict(err) { - return "", ScaleError{ScaleUpdateConflictFailure, ps.ResourceVersion, err} - } - return "", ScaleError{ScaleUpdateFailure, ps.ResourceVersion, err} - } - return updatedStatefulSet.ResourceVersion, nil -} - -func (scaler *StatefulSetScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { - if preconditions == nil { - preconditions = &ScalePrecondition{-1, ""} - } - if retry == nil { - // Make it try only once, immediately - retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} - } - cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, nil) - if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil { - return err - } - if waitForReplicas != nil { - job, err := scaler.c.StatefulSets(namespace).Get(name) - if err != nil { - return err - } - err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.StatefulSetHasDesiredPets(scaler.c, job)) - if err == wait.ErrWaitTimeout { - return fmt.Errorf("timed out waiting for %q to be synced", name) - } - return err - } - return nil -} - -type JobScaler struct { - c batchclient.JobsGetter -} - -// ScaleSimple is responsible for updating job's parallelism. It returns the -// resourceVersion of the job if the update is successful. -func (scaler *JobScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { - job, err := scaler.c.Jobs(namespace).Get(name) - if err != nil { - return "", ScaleError{ScaleGetFailure, "Unknown", err} - } - if preconditions != nil { - if err := preconditions.ValidateJob(job); err != nil { - return "", err - } - } - parallelism := int32(newSize) - job.Spec.Parallelism = ¶llelism - udpatedJob, err := scaler.c.Jobs(namespace).Update(job) - if err != nil { - if errors.IsConflict(err) { - return "", ScaleError{ScaleUpdateConflictFailure, job.ResourceVersion, err} - } - return "", ScaleError{ScaleUpdateFailure, job.ResourceVersion, err} - } - return udpatedJob.ObjectMeta.ResourceVersion, nil -} - -// Scale updates a Job to a new size, with optional precondition check (if preconditions is not nil), -// optional retries (if retry is not nil), and then optionally waits for parallelism to reach desired -// number, which can be less than requested based on job's current progress. -func (scaler *JobScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { - if preconditions == nil { - preconditions = &ScalePrecondition{-1, ""} - } - if retry == nil { - // Make it try only once, immediately - retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} - } - cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, nil) - if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil { - return err - } - if waitForReplicas != nil { - job, err := scaler.c.Jobs(namespace).Get(name) - if err != nil { - return err - } - err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.JobHasDesiredParallelism(scaler.c, job)) - if err == wait.ErrWaitTimeout { - return fmt.Errorf("timed out waiting for %q to be synced", name) - } - return err - } - return nil -} - -// ValidateDeployment ensures that the preconditions match. Returns nil if they are valid, an error otherwise. -func (precondition *ScalePrecondition) ValidateDeployment(deployment *extensions.Deployment) error { - if precondition.Size != -1 && int(deployment.Spec.Replicas) != precondition.Size { - return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(deployment.Spec.Replicas))} - } - if len(precondition.ResourceVersion) != 0 && deployment.ResourceVersion != precondition.ResourceVersion { - return PreconditionError{"resource version", precondition.ResourceVersion, deployment.ResourceVersion} - } - return nil -} - -type DeploymentScaler struct { - c extensionsclient.DeploymentsGetter -} - -// ScaleSimple is responsible for updating a deployment's desired replicas -// count. It returns the resourceVersion of the deployment if the update is -// successful. -func (scaler *DeploymentScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { - deployment, err := scaler.c.Deployments(namespace).Get(name) - if err != nil { - return "", ScaleError{ScaleGetFailure, "Unknown", err} - } - if preconditions != nil { - if err := preconditions.ValidateDeployment(deployment); err != nil { - return "", err - } - } - - // TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528). - // For now I'm falling back to regular Deployment update operation. - deployment.Spec.Replicas = int32(newSize) - updatedDeployment, err := scaler.c.Deployments(namespace).Update(deployment) - if err != nil { - if errors.IsConflict(err) { - return "", ScaleError{ScaleUpdateConflictFailure, deployment.ResourceVersion, err} - } - return "", ScaleError{ScaleUpdateFailure, deployment.ResourceVersion, err} - } - return updatedDeployment.ObjectMeta.ResourceVersion, nil -} - -// Scale updates a deployment to a new size, with optional precondition check (if preconditions is not nil), -// optional retries (if retry is not nil), and then optionally waits for the status to reach desired count. -func (scaler *DeploymentScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { - if preconditions == nil { - preconditions = &ScalePrecondition{-1, ""} - } - if retry == nil { - // Make it try only once, immediately - retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} - } - cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, nil) - if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil { - return err - } - if waitForReplicas != nil { - deployment, err := scaler.c.Deployments(namespace).Get(name) - if err != nil { - return err - } - err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.DeploymentHasDesiredReplicas(scaler.c, deployment)) - if err == wait.ErrWaitTimeout { - return fmt.Errorf("timed out waiting for %q to be synced", name) - } - return err - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/secret.go b/vendor/k8s.io/kubernetes/pkg/kubectl/secret.go deleted file mode 100644 index cc3271197c..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/secret.go +++ /dev/null @@ -1,208 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/validation" -) - -// SecretGeneratorV1 supports stable generation of an opaque secret -type SecretGeneratorV1 struct { - // Name of secret (required) - Name string - // Type of secret (optional) - Type string - // FileSources to derive the secret from (optional) - FileSources []string - // LiteralSources to derive the secret from (optional) - LiteralSources []string -} - -// Ensure it supports the generator pattern that uses parameter injection -var _ Generator = &SecretGeneratorV1{} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &SecretGeneratorV1{} - -// Generate returns a secret using the specified parameters -func (s SecretGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) - if err != nil { - return nil, err - } - delegate := &SecretGeneratorV1{} - fromFileStrings, found := genericParams["from-file"] - if found { - fromFileArray, isArray := fromFileStrings.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found :%v", fromFileStrings) - } - delegate.FileSources = fromFileArray - delete(genericParams, "from-file") - } - fromLiteralStrings, found := genericParams["from-literal"] - if found { - fromLiteralArray, isArray := fromLiteralStrings.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found :%v", fromFileStrings) - } - delegate.LiteralSources = fromLiteralArray - delete(genericParams, "from-literal") - } - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - delegate.Name = params["name"] - delegate.Type = params["type"] - return delegate.StructuredGenerate() -} - -// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern -func (s SecretGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"type", false}, - {"from-file", false}, - {"from-literal", false}, - {"force", false}, - } -} - -// StructuredGenerate outputs a secret object using the configured fields -func (s SecretGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := s.validate(); err != nil { - return nil, err - } - secret := &api.Secret{} - secret.Name = s.Name - secret.Data = map[string][]byte{} - if len(s.Type) > 0 { - secret.Type = api.SecretType(s.Type) - } - if len(s.FileSources) > 0 { - if err := handleFromFileSources(secret, s.FileSources); err != nil { - return nil, err - } - } - if len(s.LiteralSources) > 0 { - if err := handleFromLiteralSources(secret, s.LiteralSources); err != nil { - return nil, err - } - } - return secret, nil -} - -// validate validates required fields are set to support structured generation -func (s SecretGeneratorV1) validate() error { - if len(s.Name) == 0 { - return fmt.Errorf("name must be specified") - } - return nil -} - -// handleFromLiteralSources adds the specified literal source information into the provided secret -func handleFromLiteralSources(secret *api.Secret, literalSources []string) error { - for _, literalSource := range literalSources { - keyName, value, err := parseLiteralSource(literalSource) - if err != nil { - return err - } - err = addKeyFromLiteralToSecret(secret, keyName, []byte(value)) - if err != nil { - return err - } - } - return nil -} - -// handleFromFileSources adds the specified file source information into the provided secret -func handleFromFileSources(secret *api.Secret, fileSources []string) error { - for _, fileSource := range fileSources { - keyName, filePath, err := parseFileSource(fileSource) - if err != nil { - return err - } - info, err := os.Stat(filePath) - if err != nil { - switch err := err.(type) { - case *os.PathError: - return fmt.Errorf("error reading %s: %v", filePath, err.Err) - default: - return fmt.Errorf("error reading %s: %v", filePath, err) - } - } - if info.IsDir() { - if strings.Contains(fileSource, "=") { - return fmt.Errorf("cannot give a key name for a directory path.") - } - fileList, err := ioutil.ReadDir(filePath) - if err != nil { - return fmt.Errorf("error listing files in %s: %v", filePath, err) - } - for _, item := range fileList { - itemPath := path.Join(filePath, item.Name()) - if item.Mode().IsRegular() { - keyName = item.Name() - err = addKeyFromFileToSecret(secret, keyName, itemPath) - if err != nil { - return err - } - } - } - } else { - err = addKeyFromFileToSecret(secret, keyName, filePath) - if err != nil { - return err - } - } - } - - return nil -} - -func addKeyFromFileToSecret(secret *api.Secret, keyName, filePath string) error { - data, err := ioutil.ReadFile(filePath) - if err != nil { - return err - } - return addKeyFromLiteralToSecret(secret, keyName, data) -} - -func addKeyFromLiteralToSecret(secret *api.Secret, keyName string, data []byte) error { - if errs := validation.IsConfigMapKey(keyName); len(errs) != 0 { - return fmt.Errorf("%q is not a valid key name for a Secret: %s", keyName, strings.Join(errs, ";")) - } - - if _, entryExists := secret.Data[keyName]; entryExists { - return fmt.Errorf("cannot add key %s, another key by that name already exists: %v.", keyName, secret.Data) - } - secret.Data[keyName] = data - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go b/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go deleted file mode 100644 index 65615a7fd6..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "encoding/json" - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/credentialprovider" - "k8s.io/kubernetes/pkg/runtime" -) - -// SecretForDockerRegistryGeneratorV1 supports stable generation of a docker registry secret -type SecretForDockerRegistryGeneratorV1 struct { - // Name of secret (required) - Name string - // Username for registry (required) - Username string - // Email for registry (required) - Email string - // Password for registry (required) - Password string - // Server for registry (required) - Server string -} - -// Ensure it supports the generator pattern that uses parameter injection -var _ Generator = &SecretForDockerRegistryGeneratorV1{} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &SecretForDockerRegistryGeneratorV1{} - -// Generate returns a secret using the specified parameters -func (s SecretForDockerRegistryGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) - if err != nil { - return nil, err - } - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - delegate := &SecretForDockerRegistryGeneratorV1{ - Name: params["name"], - Username: params["docker-username"], - Email: params["docker-email"], - Password: params["docker-password"], - Server: params["docker-server"], - } - return delegate.StructuredGenerate() -} - -// StructuredGenerate outputs a secret object using the configured fields -func (s SecretForDockerRegistryGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := s.validate(); err != nil { - return nil, err - } - dockercfgContent, err := handleDockercfgContent(s.Username, s.Password, s.Email, s.Server) - if err != nil { - return nil, err - } - secret := &api.Secret{} - secret.Name = s.Name - secret.Type = api.SecretTypeDockercfg - secret.Data = map[string][]byte{} - secret.Data[api.DockerConfigKey] = dockercfgContent - return secret, nil -} - -// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern -func (s SecretForDockerRegistryGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"docker-username", true}, - {"docker-email", true}, - {"docker-password", true}, - {"docker-server", true}, - } -} - -// validate validates required fields are set to support structured generation -func (s SecretForDockerRegistryGeneratorV1) validate() error { - if len(s.Name) == 0 { - return fmt.Errorf("name must be specified") - } - if len(s.Username) == 0 { - return fmt.Errorf("username must be specified") - } - if len(s.Email) == 0 { - return fmt.Errorf("email must be specified") - } - if len(s.Password) == 0 { - return fmt.Errorf("password must be specified") - } - if len(s.Server) == 0 { - return fmt.Errorf("server must be specified") - } - return nil -} - -// handleDockercfgContent serializes a dockercfg json file -func handleDockercfgContent(username, password, email, server string) ([]byte, error) { - dockercfgAuth := credentialprovider.DockerConfigEntry{ - Username: username, - Password: password, - Email: email, - } - - dockerCfg := map[string]credentialprovider.DockerConfigEntry{server: dockercfgAuth} - - return json.Marshal(dockerCfg) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go b/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go deleted file mode 100644 index a29af55976..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "crypto/tls" - "fmt" - "io/ioutil" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/runtime" -) - -// SecretForTLSGeneratorV1 supports stable generation of a TLS secret. -type SecretForTLSGeneratorV1 struct { - // Name is the name of this TLS secret. - Name string - // Key is the path to the user's private key. - Key string - // Cert is the path to the user's public key certificate. - Cert string -} - -// Ensure it supports the generator pattern that uses parameter injection -var _ Generator = &SecretForTLSGeneratorV1{} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &SecretForTLSGeneratorV1{} - -// Generate returns a secret using the specified parameters -func (s SecretForTLSGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) - if err != nil { - return nil, err - } - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - delegate := &SecretForTLSGeneratorV1{ - Name: params["name"], - Key: params["key"], - Cert: params["cert"], - } - return delegate.StructuredGenerate() -} - -// StructuredGenerate outputs a secret object using the configured fields -func (s SecretForTLSGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := s.validate(); err != nil { - return nil, err - } - tlsCrt, err := readFile(s.Cert) - if err != nil { - return nil, err - } - tlsKey, err := readFile(s.Key) - if err != nil { - return nil, err - } - secret := &api.Secret{} - secret.Name = s.Name - secret.Type = api.SecretTypeTLS - secret.Data = map[string][]byte{} - secret.Data[api.TLSCertKey] = []byte(tlsCrt) - secret.Data[api.TLSPrivateKeyKey] = []byte(tlsKey) - return secret, nil -} - -// readFile just reads a file into a byte array. -func readFile(file string) ([]byte, error) { - b, err := ioutil.ReadFile(file) - if err != nil { - return []byte{}, fmt.Errorf("Cannot read file %v, %v", file, err) - } - return b, nil -} - -// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern -func (s SecretForTLSGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"key", true}, - {"cert", true}, - } -} - -// validate validates required fields are set to support structured generation -func (s SecretForTLSGeneratorV1) validate() error { - // TODO: This is not strictly necessary. We can generate a self signed cert - // if no key/cert is given. The only requiredment is that we either get both - // or none. See test/e2e/ingress_utils for self signed cert generation. - if len(s.Key) == 0 { - return fmt.Errorf("key must be specified.") - } - if len(s.Cert) == 0 { - return fmt.Errorf("certificate must be specified.") - } - if _, err := tls.LoadX509KeyPair(s.Cert, s.Key); err != nil { - return fmt.Errorf("failed to load key pair %v", err) - } - // TODO: Add more validation. - // 1. If the certificate contains intermediates, it is a valid chain. - // 2. Format etc. - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/service.go b/vendor/k8s.io/kubernetes/pkg/kubectl/service.go deleted file mode 100644 index 604dbf327e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/service.go +++ /dev/null @@ -1,237 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strconv" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/intstr" -) - -// The only difference between ServiceGeneratorV1 and V2 is that the service port is named "default" in V1, while it is left unnamed in V2. -type ServiceGeneratorV1 struct{} - -func (ServiceGeneratorV1) ParamNames() []GeneratorParam { - return paramNames() -} - -func (ServiceGeneratorV1) Generate(params map[string]interface{}) (runtime.Object, error) { - params["port-name"] = "default" - return generate(params) -} - -type ServiceGeneratorV2 struct{} - -func (ServiceGeneratorV2) ParamNames() []GeneratorParam { - return paramNames() -} - -func (ServiceGeneratorV2) Generate(params map[string]interface{}) (runtime.Object, error) { - return generate(params) -} - -func paramNames() []GeneratorParam { - return []GeneratorParam{ - {"default-name", true}, - {"name", false}, - {"selector", true}, - // port will be used if a user specifies --port OR the exposed object - // has one port - {"port", false}, - // ports will be used iff a user doesn't specify --port AND the - // exposed object has multiple ports - {"ports", false}, - {"labels", false}, - {"external-ip", false}, - {"create-external-load-balancer", false}, - {"load-balancer-ip", false}, - {"type", false}, - {"protocol", false}, - // protocols will be used to keep port-protocol mapping derived from - // exposed object - {"protocols", false}, - {"container-port", false}, // alias of target-port - {"target-port", false}, - {"port-name", false}, - {"session-affinity", false}, - {"cluster-ip", false}, - } -} - -func generate(genericParams map[string]interface{}) (runtime.Object, error) { - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - selectorString, found := params["selector"] - if !found || len(selectorString) == 0 { - return nil, fmt.Errorf("'selector' is a required parameter.") - } - selector, err := ParseLabels(selectorString) - if err != nil { - return nil, err - } - - labelsString, found := params["labels"] - var labels map[string]string - if found && len(labelsString) > 0 { - labels, err = ParseLabels(labelsString) - if err != nil { - return nil, err - } - } - - name, found := params["name"] - if !found || len(name) == 0 { - name, found = params["default-name"] - if !found || len(name) == 0 { - return nil, fmt.Errorf("'name' is a required parameter.") - } - } - ports := []api.ServicePort{} - servicePortName, found := params["port-name"] - if !found { - // Leave the port unnamed. - servicePortName = "" - } - - protocolsString, found := params["protocols"] - var portProtocolMap map[string]string - if found && len(protocolsString) > 0 { - portProtocolMap, err = ParseProtocols(protocolsString) - if err != nil { - return nil, err - } - } - // ports takes precedence over port since it will be - // specified only when the user hasn't specified a port - // via --port and the exposed object has multiple ports. - var portString string - if portString, found = params["ports"]; !found { - portString, found = params["port"] - if !found { - return nil, fmt.Errorf("'port' is a required parameter.") - } - } - - portStringSlice := strings.Split(portString, ",") - for i, stillPortString := range portStringSlice { - port, err := strconv.Atoi(stillPortString) - if err != nil { - return nil, err - } - name := servicePortName - // If we are going to assign multiple ports to a service, we need to - // generate a different name for each one. - if len(portStringSlice) > 1 { - name = fmt.Sprintf("port-%d", i+1) - } - protocol := params["protocol"] - - switch { - case len(protocol) == 0 && len(portProtocolMap) == 0: - // Default to TCP, what the flag was doing previously. - protocol = "TCP" - case len(protocol) > 0 && len(portProtocolMap) > 0: - // User has specified the --protocol while exposing a multiprotocol resource - // We should stomp multiple protocols with the one specified ie. do nothing - case len(protocol) == 0 && len(portProtocolMap) > 0: - // no --protocol and we expose a multiprotocol resource - protocol = "TCP" // have the default so we can stay sane - if exposeProtocol, found := portProtocolMap[stillPortString]; found { - protocol = exposeProtocol - } - } - ports = append(ports, api.ServicePort{ - Name: name, - Port: int32(port), - Protocol: api.Protocol(protocol), - }) - } - - service := api.Service{ - ObjectMeta: api.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: api.ServiceSpec{ - Selector: selector, - Ports: ports, - }, - } - targetPortString := params["target-port"] - if len(targetPortString) == 0 { - targetPortString = params["container-port"] - } - if len(targetPortString) > 0 { - var targetPort intstr.IntOrString - if portNum, err := strconv.Atoi(targetPortString); err != nil { - targetPort = intstr.FromString(targetPortString) - } else { - targetPort = intstr.FromInt(portNum) - } - // Use the same target-port for every port - for i := range service.Spec.Ports { - service.Spec.Ports[i].TargetPort = targetPort - } - } else { - // If --target-port or --container-port haven't been specified, this - // should be the same as Port - for i := range service.Spec.Ports { - port := service.Spec.Ports[i].Port - service.Spec.Ports[i].TargetPort = intstr.FromInt(int(port)) - } - } - if params["create-external-load-balancer"] == "true" { - service.Spec.Type = api.ServiceTypeLoadBalancer - } - if len(params["external-ip"]) > 0 { - service.Spec.ExternalIPs = []string{params["external-ip"]} - } - if len(params["type"]) != 0 { - service.Spec.Type = api.ServiceType(params["type"]) - } - if service.Spec.Type == api.ServiceTypeLoadBalancer { - service.Spec.LoadBalancerIP = params["load-balancer-ip"] - } - if len(params["session-affinity"]) != 0 { - switch api.ServiceAffinity(params["session-affinity"]) { - case api.ServiceAffinityNone: - service.Spec.SessionAffinity = api.ServiceAffinityNone - case api.ServiceAffinityClientIP: - service.Spec.SessionAffinity = api.ServiceAffinityClientIP - default: - return nil, fmt.Errorf("unknown session affinity: %s", params["session-affinity"]) - } - } - if len(params["cluster-ip"]) != 0 { - if params["cluster-ip"] == "None" { - service.Spec.ClusterIP = api.ClusterIPNone - } else { - service.Spec.ClusterIP = params["cluster-ip"] - } - } - return &service, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/service_basic.go b/vendor/k8s.io/kubernetes/pkg/kubectl/service_basic.go deleted file mode 100644 index 9f1c3d2b6e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/service_basic.go +++ /dev/null @@ -1,211 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strconv" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/intstr" -) - -type ServiceCommonGeneratorV1 struct { - Name string - TCP []string - Type api.ServiceType - ClusterIP string - NodePort int -} - -type ServiceClusterIPGeneratorV1 struct { - ServiceCommonGeneratorV1 -} - -type ServiceNodePortGeneratorV1 struct { - ServiceCommonGeneratorV1 -} - -type ServiceLoadBalancerGeneratorV1 struct { - ServiceCommonGeneratorV1 -} - -func (ServiceClusterIPGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"tcp", true}, - {"clusterip", false}, - } -} -func (ServiceNodePortGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"tcp", true}, - {"nodeport", true}, - } -} -func (ServiceLoadBalancerGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"tcp", true}, - } -} - -func parsePorts(portString string) (int32, intstr.IntOrString, error) { - portStringSlice := strings.Split(portString, ":") - - port, err := strconv.Atoi(portStringSlice[0]) - if err != nil { - return 0, intstr.FromInt(0), err - } - if len(portStringSlice) == 1 { - return int32(port), intstr.FromInt(int(port)), nil - } - - var targetPort intstr.IntOrString - if portNum, err := strconv.Atoi(portStringSlice[1]); err != nil { - targetPort = intstr.FromString(portStringSlice[1]) - } else { - targetPort = intstr.FromInt(portNum) - } - return int32(port), targetPort, nil -} - -func (s ServiceCommonGeneratorV1) GenerateCommon(params map[string]interface{}) error { - name, isString := params["name"].(string) - if !isString { - return fmt.Errorf("expected string, saw %v for 'name'", name) - } - tcpStrings, isArray := params["tcp"].([]string) - if !isArray { - return fmt.Errorf("expected []string, found :%v", tcpStrings) - } - clusterip, isString := params["clusterip"].(string) - if !isString { - return fmt.Errorf("expected string, saw %v for 'clusterip'", clusterip) - } - s.Name = name - s.TCP = tcpStrings - s.ClusterIP = clusterip - return nil -} - -func (s ServiceLoadBalancerGeneratorV1) Generate(params map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), params) - if err != nil { - return nil, err - } - delegate := &ServiceCommonGeneratorV1{Type: api.ServiceTypeLoadBalancer, ClusterIP: ""} - err = delegate.GenerateCommon(params) - if err != nil { - return nil, err - } - return delegate.StructuredGenerate() -} - -func (s ServiceNodePortGeneratorV1) Generate(params map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), params) - if err != nil { - return nil, err - } - delegate := &ServiceCommonGeneratorV1{Type: api.ServiceTypeNodePort, ClusterIP: ""} - err = delegate.GenerateCommon(params) - if err != nil { - return nil, err - } - return delegate.StructuredGenerate() -} - -func (s ServiceClusterIPGeneratorV1) Generate(params map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), params) - if err != nil { - return nil, err - } - delegate := &ServiceCommonGeneratorV1{Type: api.ServiceTypeClusterIP, ClusterIP: ""} - err = delegate.GenerateCommon(params) - if err != nil { - return nil, err - } - return delegate.StructuredGenerate() -} - -// validate validates required fields are set to support structured generation -func (s ServiceCommonGeneratorV1) validate() error { - if len(s.Name) == 0 { - return fmt.Errorf("name must be specified") - } - if len(s.Type) == 0 { - return fmt.Errorf("type must be specified") - } - if s.ClusterIP == api.ClusterIPNone && s.Type != api.ServiceTypeClusterIP { - return fmt.Errorf("ClusterIP=None can only be used with ClusterIP service type") - } - if s.ClusterIP == api.ClusterIPNone && len(s.TCP) > 0 { - return fmt.Errorf("can not map ports with clusterip=None") - } - if s.ClusterIP != api.ClusterIPNone && len(s.TCP) == 0 { - return fmt.Errorf("at least one tcp port specifier must be provided") - } - return nil -} - -func (s ServiceCommonGeneratorV1) StructuredGenerate() (runtime.Object, error) { - err := s.validate() - if err != nil { - return nil, err - } - ports := []api.ServicePort{} - for _, tcpString := range s.TCP { - port, targetPort, err := parsePorts(tcpString) - if err != nil { - return nil, err - } - - portName := strings.Replace(tcpString, ":", "-", -1) - ports = append(ports, api.ServicePort{ - Name: portName, - Port: port, - TargetPort: targetPort, - Protocol: api.Protocol("TCP"), - NodePort: int32(s.NodePort), - }) - } - - // setup default label and selector - labels := map[string]string{} - labels["app"] = s.Name - selector := map[string]string{} - selector["app"] = s.Name - - service := api.Service{ - ObjectMeta: api.ObjectMeta{ - Name: s.Name, - Labels: labels, - }, - Spec: api.ServiceSpec{ - Type: api.ServiceType(s.Type), - Selector: selector, - Ports: ports, - }, - } - if len(s.ClusterIP) > 0 { - service.Spec.ClusterIP = s.ClusterIP - } - return &service, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go b/vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go deleted file mode 100644 index 1a7e256daa..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/runtime" -) - -// ServiceAccountGeneratorV1 supports stable generation of a service account -type ServiceAccountGeneratorV1 struct { - // Name of service account - Name string -} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &ServiceAccountGeneratorV1{} - -// StructuredGenerate outputs a service account object using the configured fields -func (g *ServiceAccountGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := g.validate(); err != nil { - return nil, err - } - serviceAccount := &api.ServiceAccount{} - serviceAccount.Name = g.Name - return serviceAccount, nil -} - -// validate validates required fields are set to support structured generation -func (g *ServiceAccountGeneratorV1) validate() error { - if len(g.Name) == 0 { - return fmt.Errorf("name must be specified") - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/sorted_resource_name_list.go b/vendor/k8s.io/kubernetes/pkg/kubectl/sorted_resource_name_list.go deleted file mode 100644 index aad1d80747..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/sorted_resource_name_list.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "sort" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/kubelet/qos" -) - -type SortableResourceNames []api.ResourceName - -func (list SortableResourceNames) Len() int { - return len(list) -} - -func (list SortableResourceNames) Swap(i, j int) { - list[i], list[j] = list[j], list[i] -} - -func (list SortableResourceNames) Less(i, j int) bool { - return list[i] < list[j] -} - -// SortedResourceNames returns the sorted resource names of a resource list. -func SortedResourceNames(list api.ResourceList) []api.ResourceName { - resources := make([]api.ResourceName, 0, len(list)) - for res := range list { - resources = append(resources, res) - } - sort.Sort(SortableResourceNames(resources)) - return resources -} - -type SortableResourceQuotas []api.ResourceQuota - -func (list SortableResourceQuotas) Len() int { - return len(list) -} - -func (list SortableResourceQuotas) Swap(i, j int) { - list[i], list[j] = list[j], list[i] -} - -func (list SortableResourceQuotas) Less(i, j int) bool { - return list[i].Name < list[j].Name -} - -type SortableVolumeMounts []api.VolumeMount - -func (list SortableVolumeMounts) Len() int { - return len(list) -} - -func (list SortableVolumeMounts) Swap(i, j int) { - list[i], list[j] = list[j], list[i] -} - -func (list SortableVolumeMounts) Less(i, j int) bool { - return list[i].MountPath < list[j].MountPath -} - -// SortedQoSResourceNames returns the sorted resource names of a QoS list. -func SortedQoSResourceNames(list qos.QOSList) []api.ResourceName { - resources := make([]api.ResourceName, 0, len(list)) - for res := range list { - resources = append(resources, res) - } - sort.Sort(SortableResourceNames(resources)) - return resources -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/sorting_printer.go b/vendor/k8s.io/kubernetes/pkg/kubectl/sorting_printer.go deleted file mode 100644 index d2fcb451f1..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/sorting_printer.go +++ /dev/null @@ -1,302 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "io" - "reflect" - "sort" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/integer" - "k8s.io/kubernetes/pkg/util/jsonpath" - - "github.com/golang/glog" -) - -// Sorting printer sorts list types before delegating to another printer. -// Non-list types are simply passed through -type SortingPrinter struct { - SortField string - Delegate ResourcePrinter - Decoder runtime.Decoder -} - -func (s *SortingPrinter) AfterPrint(w io.Writer, res string) error { - return nil -} - -func (s *SortingPrinter) PrintObj(obj runtime.Object, out io.Writer) error { - if !meta.IsListType(obj) { - return s.Delegate.PrintObj(obj, out) - } - - if err := s.sortObj(obj); err != nil { - return err - } - return s.Delegate.PrintObj(obj, out) -} - -// TODO: implement HandledResources() -func (p *SortingPrinter) HandledResources() []string { - return []string{} -} - -func (s *SortingPrinter) sortObj(obj runtime.Object) error { - objs, err := meta.ExtractList(obj) - if err != nil { - return err - } - if len(objs) == 0 { - return nil - } - - sorter, err := SortObjects(s.Decoder, objs, s.SortField) - if err != nil { - return err - } - - switch list := obj.(type) { - case *v1.List: - outputList := make([]runtime.RawExtension, len(objs)) - for ix := range objs { - outputList[ix] = list.Items[sorter.OriginalPosition(ix)] - } - list.Items = outputList - return nil - } - return meta.SetList(obj, objs) -} - -func SortObjects(decoder runtime.Decoder, objs []runtime.Object, fieldInput string) (*RuntimeSort, error) { - parser := jsonpath.New("sorting") - - field, err := massageJSONPath(fieldInput) - if err != nil { - return nil, err - } - - if err := parser.Parse(field); err != nil { - return nil, err - } - - for ix := range objs { - item := objs[ix] - switch u := item.(type) { - case *runtime.Unknown: - var err error - if objs[ix], _, err = decoder.Decode(u.Raw, nil, nil); err != nil { - return nil, err - } - } - } - - var values [][]reflect.Value - if unstructured, ok := objs[0].(*runtime.Unstructured); ok { - values, err = parser.FindResults(unstructured.Object) - } else { - values, err = parser.FindResults(reflect.ValueOf(objs[0]).Elem().Interface()) - } - - if err != nil { - return nil, err - } - if len(values) == 0 { - return nil, fmt.Errorf("couldn't find any field with path: %s", field) - } - - sorter := NewRuntimeSort(field, objs) - sort.Sort(sorter) - return sorter, nil -} - -// RuntimeSort is an implementation of the golang sort interface that knows how to sort -// lists of runtime.Object -type RuntimeSort struct { - field string - objs []runtime.Object - origPosition []int -} - -func NewRuntimeSort(field string, objs []runtime.Object) *RuntimeSort { - sorter := &RuntimeSort{field: field, objs: objs, origPosition: make([]int, len(objs))} - for ix := range objs { - sorter.origPosition[ix] = ix - } - return sorter -} - -func (r *RuntimeSort) Len() int { - return len(r.objs) -} - -func (r *RuntimeSort) Swap(i, j int) { - r.objs[i], r.objs[j] = r.objs[j], r.objs[i] - r.origPosition[i], r.origPosition[j] = r.origPosition[j], r.origPosition[i] -} - -func isLess(i, j reflect.Value) (bool, error) { - switch i.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return i.Int() < j.Int(), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return i.Uint() < j.Uint(), nil - case reflect.Float32, reflect.Float64: - return i.Float() < j.Float(), nil - case reflect.String: - return i.String() < j.String(), nil - case reflect.Ptr: - return isLess(i.Elem(), j.Elem()) - case reflect.Struct: - // sort unversioned.Time - in := i.Interface() - if t, ok := in.(unversioned.Time); ok { - return t.Before(j.Interface().(unversioned.Time)), nil - } - // fallback to the fields comparison - for idx := 0; idx < i.NumField(); idx++ { - less, err := isLess(i.Field(idx), j.Field(idx)) - if err != nil || !less { - return less, err - } - } - return true, nil - case reflect.Array, reflect.Slice: - // note: the length of i and j may be different - for idx := 0; idx < integer.IntMin(i.Len(), j.Len()); idx++ { - less, err := isLess(i.Index(idx), j.Index(idx)) - if err != nil || !less { - return less, err - } - } - return true, nil - - case reflect.Interface: - switch itype := i.Interface().(type) { - case uint8: - if jtype, ok := j.Interface().(uint8); ok { - return itype < jtype, nil - } - case uint16: - if jtype, ok := j.Interface().(uint16); ok { - return itype < jtype, nil - } - case uint32: - if jtype, ok := j.Interface().(uint32); ok { - return itype < jtype, nil - } - case uint64: - if jtype, ok := j.Interface().(uint64); ok { - return itype < jtype, nil - } - case int8: - if jtype, ok := j.Interface().(int8); ok { - return itype < jtype, nil - } - case int16: - if jtype, ok := j.Interface().(int16); ok { - return itype < jtype, nil - } - case int32: - if jtype, ok := j.Interface().(int32); ok { - return itype < jtype, nil - } - case int64: - if jtype, ok := j.Interface().(int64); ok { - return itype < jtype, nil - } - case uint: - if jtype, ok := j.Interface().(uint); ok { - return itype < jtype, nil - } - case int: - if jtype, ok := j.Interface().(int); ok { - return itype < jtype, nil - } - case float32: - if jtype, ok := j.Interface().(float32); ok { - return itype < jtype, nil - } - case float64: - if jtype, ok := j.Interface().(float64); ok { - return itype < jtype, nil - } - case string: - if jtype, ok := j.Interface().(string); ok { - return itype < jtype, nil - } - default: - return false, fmt.Errorf("unsortable type: %T", itype) - } - return false, fmt.Errorf("unsortable interface: %v", i.Kind()) - - default: - return false, fmt.Errorf("unsortable type: %v", i.Kind()) - } -} - -func (r *RuntimeSort) Less(i, j int) bool { - iObj := r.objs[i] - jObj := r.objs[j] - - parser := jsonpath.New("sorting") - parser.Parse(r.field) - - var iValues [][]reflect.Value - var jValues [][]reflect.Value - var err error - - if unstructured, ok := iObj.(*runtime.Unstructured); ok { - iValues, err = parser.FindResults(unstructured.Object) - } else { - iValues, err = parser.FindResults(reflect.ValueOf(iObj).Elem().Interface()) - } - if err != nil { - glog.Fatalf("Failed to get i values for %#v using %s (%#v)", iObj, r.field, err) - } - - if unstructured, ok := jObj.(*runtime.Unstructured); ok { - jValues, err = parser.FindResults(unstructured.Object) - } else { - jValues, err = parser.FindResults(reflect.ValueOf(jObj).Elem().Interface()) - } - if err != nil { - glog.Fatalf("Failed to get j values for %#v using %s (%v)", jObj, r.field, err) - } - - iField := iValues[0][0] - jField := jValues[0][0] - - less, err := isLess(iField, jField) - if err != nil { - glog.Fatalf("Field %s in %v is an unsortable type: %s, err: %v", r.field, iObj, iField.Kind().String(), err) - } - return less -} - -// Returns the starting (original) position of a particular index. e.g. If OriginalPosition(0) returns 5 than the -// the item currently at position 0 was at position 5 in the original unsorted array. -func (r *RuntimeSort) OriginalPosition(ix int) int { - if ix < 0 || ix > len(r.origPosition) { - return -1 - } - return r.origPosition[ix] -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/stop.go b/vendor/k8s.io/kubernetes/pkg/kubectl/stop.go deleted file mode 100644 index deb273b7c8..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/stop.go +++ /dev/null @@ -1,511 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strings" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - appsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" - batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" - coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" - deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/util" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/uuid" - "k8s.io/kubernetes/pkg/util/wait" -) - -const ( - Interval = time.Second * 1 - Timeout = time.Minute * 5 -) - -// A Reaper handles terminating an object as gracefully as possible. -// timeout is how long we'll wait for the termination to be successful -// gracePeriod is time given to an API object for it to delete itself cleanly, -// e.g., pod shutdown. It may or may not be supported by the API object. -type Reaper interface { - Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error -} - -type NoSuchReaperError struct { - kind unversioned.GroupKind -} - -func (n *NoSuchReaperError) Error() string { - return fmt.Sprintf("no reaper has been implemented for %v", n.kind) -} - -func IsNoSuchReaperError(err error) bool { - _, ok := err.(*NoSuchReaperError) - return ok -} - -func ReaperFor(kind unversioned.GroupKind, c internalclientset.Interface) (Reaper, error) { - switch kind { - case api.Kind("ReplicationController"): - return &ReplicationControllerReaper{c.Core(), Interval, Timeout}, nil - - case extensions.Kind("ReplicaSet"): - return &ReplicaSetReaper{c.Extensions(), Interval, Timeout}, nil - - case extensions.Kind("DaemonSet"): - return &DaemonSetReaper{c.Extensions(), Interval, Timeout}, nil - - case api.Kind("Pod"): - return &PodReaper{c.Core()}, nil - - case api.Kind("Service"): - return &ServiceReaper{c.Core()}, nil - - case extensions.Kind("Job"), batch.Kind("Job"): - return &JobReaper{c.Batch(), c.Core(), Interval, Timeout}, nil - - case apps.Kind("StatefulSet"): - return &StatefulSetReaper{c.Apps(), c.Core(), Interval, Timeout}, nil - - case extensions.Kind("Deployment"): - return &DeploymentReaper{c.Extensions(), c.Extensions(), Interval, Timeout}, nil - - } - return nil, &NoSuchReaperError{kind} -} - -func ReaperForReplicationController(rcClient coreclient.ReplicationControllersGetter, timeout time.Duration) (Reaper, error) { - return &ReplicationControllerReaper{rcClient, Interval, timeout}, nil -} - -type ReplicationControllerReaper struct { - client coreclient.ReplicationControllersGetter - pollInterval, timeout time.Duration -} -type ReplicaSetReaper struct { - client extensionsclient.ReplicaSetsGetter - pollInterval, timeout time.Duration -} -type DaemonSetReaper struct { - client extensionsclient.DaemonSetsGetter - pollInterval, timeout time.Duration -} -type JobReaper struct { - client batchclient.JobsGetter - podClient coreclient.PodsGetter - pollInterval, timeout time.Duration -} -type DeploymentReaper struct { - dClient extensionsclient.DeploymentsGetter - rsClient extensionsclient.ReplicaSetsGetter - pollInterval, timeout time.Duration -} -type PodReaper struct { - client coreclient.PodsGetter -} -type ServiceReaper struct { - client coreclient.ServicesGetter -} -type StatefulSetReaper struct { - client appsclient.StatefulSetsGetter - podClient coreclient.PodsGetter - pollInterval, timeout time.Duration -} - -type objInterface interface { - Delete(name string) error - Get(name string) (meta.Object, error) -} - -// getOverlappingControllers finds rcs that this controller overlaps, as well as rcs overlapping this controller. -func getOverlappingControllers(rcClient coreclient.ReplicationControllerInterface, rc *api.ReplicationController) ([]api.ReplicationController, error) { - rcs, err := rcClient.List(api.ListOptions{}) - if err != nil { - return nil, fmt.Errorf("error getting replication controllers: %v", err) - } - var matchingRCs []api.ReplicationController - rcLabels := labels.Set(rc.Spec.Selector) - for _, controller := range rcs.Items { - newRCLabels := labels.Set(controller.Spec.Selector) - if labels.SelectorFromSet(newRCLabels).Matches(rcLabels) || labels.SelectorFromSet(rcLabels).Matches(newRCLabels) { - matchingRCs = append(matchingRCs, controller) - } - } - return matchingRCs, nil -} - -func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { - rc := reaper.client.ReplicationControllers(namespace) - scaler := &ReplicationControllerScaler{reaper.client} - ctrl, err := rc.Get(name) - if err != nil { - return err - } - if timeout == 0 { - timeout = Timeout + time.Duration(10*ctrl.Spec.Replicas)*time.Second - } - - // The rc manager will try and detect all matching rcs for a pod's labels, - // and only sync the oldest one. This means if we have a pod with labels - // [(k1: v1), (k2: v2)] and two rcs: rc1 with selector [(k1=v1)], and rc2 with selector [(k1=v1),(k2=v2)], - // the rc manager will sync the older of the two rcs. - // - // If there are rcs with a superset of labels, eg: - // deleting: (k1=v1), superset: (k2=v2, k1=v1) - // - It isn't safe to delete the rc because there could be a pod with labels - // (k1=v1) that isn't managed by the superset rc. We can't scale it down - // either, because there could be a pod (k2=v2, k1=v1) that it deletes - // causing a fight with the superset rc. - // If there are rcs with a subset of labels, eg: - // deleting: (k2=v2, k1=v1), subset: (k1=v1), superset: (k2=v2, k1=v1, k3=v3) - // - Even if it's safe to delete this rc without a scale down because all it's pods - // are being controlled by the subset rc the code returns an error. - - // In theory, creating overlapping controllers is user error, so the loop below - // tries to account for this logic only in the common case, where we end up - // with multiple rcs that have an exact match on selectors. - - overlappingCtrls, err := getOverlappingControllers(rc, ctrl) - if err != nil { - return fmt.Errorf("error getting replication controllers: %v", err) - } - exactMatchRCs := []api.ReplicationController{} - overlapRCs := []string{} - for _, overlappingRC := range overlappingCtrls { - if len(overlappingRC.Spec.Selector) == len(ctrl.Spec.Selector) { - exactMatchRCs = append(exactMatchRCs, overlappingRC) - } else { - overlapRCs = append(overlapRCs, overlappingRC.Name) - } - } - if len(overlapRCs) > 0 { - return fmt.Errorf( - "Detected overlapping controllers for rc %v: %v, please manage deletion individually with --cascade=false.", - ctrl.Name, strings.Join(overlapRCs, ",")) - } - if len(exactMatchRCs) == 1 { - // No overlapping controllers. - retry := NewRetryParams(reaper.pollInterval, reaper.timeout) - waitForReplicas := NewRetryParams(reaper.pollInterval, timeout) - if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas); err != nil { - return err - } - } - falseVar := false - deleteOptions := &api.DeleteOptions{OrphanDependents: &falseVar} - return rc.Delete(name, deleteOptions) -} - -// TODO(madhusudancs): Implement it when controllerRef is implemented - https://github.com/kubernetes/kubernetes/issues/2210 -// getOverlappingReplicaSets finds ReplicaSets that this ReplicaSet overlaps, as well as ReplicaSets overlapping this ReplicaSet. -func getOverlappingReplicaSets(c extensionsclient.ReplicaSetInterface, rs *extensions.ReplicaSet) ([]extensions.ReplicaSet, []extensions.ReplicaSet, error) { - var overlappingRSs, exactMatchRSs []extensions.ReplicaSet - return overlappingRSs, exactMatchRSs, nil -} - -func (reaper *ReplicaSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { - rsc := reaper.client.ReplicaSets(namespace) - scaler := &ReplicaSetScaler{reaper.client} - rs, err := rsc.Get(name) - if err != nil { - return err - } - if timeout == 0 { - timeout = Timeout + time.Duration(10*rs.Spec.Replicas)*time.Second - } - - // The ReplicaSet controller will try and detect all matching ReplicaSets - // for a pod's labels, and only sync the oldest one. This means if we have - // a pod with labels [(k1: v1), (k2: v2)] and two ReplicaSets: rs1 with - // selector [(k1=v1)], and rs2 with selector [(k1=v1),(k2=v2)], the - // ReplicaSet controller will sync the older of the two ReplicaSets. - // - // If there are ReplicaSets with a superset of labels, eg: - // deleting: (k1=v1), superset: (k2=v2, k1=v1) - // - It isn't safe to delete the ReplicaSet because there could be a pod - // with labels (k1=v1) that isn't managed by the superset ReplicaSet. - // We can't scale it down either, because there could be a pod - // (k2=v2, k1=v1) that it deletes causing a fight with the superset - // ReplicaSet. - // If there are ReplicaSets with a subset of labels, eg: - // deleting: (k2=v2, k1=v1), subset: (k1=v1), superset: (k2=v2, k1=v1, k3=v3) - // - Even if it's safe to delete this ReplicaSet without a scale down because - // all it's pods are being controlled by the subset ReplicaSet the code - // returns an error. - - // In theory, creating overlapping ReplicaSets is user error, so the loop below - // tries to account for this logic only in the common case, where we end up - // with multiple ReplicaSets that have an exact match on selectors. - - // TODO(madhusudancs): Re-evaluate again when controllerRef is implemented - - // https://github.com/kubernetes/kubernetes/issues/2210 - overlappingRSs, exactMatchRSs, err := getOverlappingReplicaSets(rsc, rs) - if err != nil { - return fmt.Errorf("error getting ReplicaSets: %v", err) - } - - if len(overlappingRSs) > 0 { - var names []string - for _, overlappingRS := range overlappingRSs { - names = append(names, overlappingRS.Name) - } - return fmt.Errorf( - "Detected overlapping ReplicaSets for ReplicaSet %v: %v, please manage deletion individually with --cascade=false.", - rs.Name, strings.Join(names, ",")) - } - if len(exactMatchRSs) == 0 { - // No overlapping ReplicaSets. - retry := NewRetryParams(reaper.pollInterval, reaper.timeout) - waitForReplicas := NewRetryParams(reaper.pollInterval, timeout) - if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas); err != nil { - return err - } - } - - falseVar := false - deleteOptions := &api.DeleteOptions{OrphanDependents: &falseVar} - return rsc.Delete(name, deleteOptions) -} - -func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { - ds, err := reaper.client.DaemonSets(namespace).Get(name) - if err != nil { - return err - } - - // We set the nodeSelector to a random label. This label is nearly guaranteed - // to not be set on any node so the DameonSetController will start deleting - // daemon pods. Once it's done deleting the daemon pods, it's safe to delete - // the DaemonSet. - ds.Spec.Template.Spec.NodeSelector = map[string]string{ - string(uuid.NewUUID()): string(uuid.NewUUID()), - } - // force update to avoid version conflict - ds.ResourceVersion = "" - - if ds, err = reaper.client.DaemonSets(namespace).Update(ds); err != nil { - return err - } - - // Wait for the daemon set controller to kill all the daemon pods. - if err := wait.Poll(reaper.pollInterval, reaper.timeout, func() (bool, error) { - updatedDS, err := reaper.client.DaemonSets(namespace).Get(name) - if err != nil { - return false, nil - } - - return updatedDS.Status.CurrentNumberScheduled+updatedDS.Status.NumberMisscheduled == 0, nil - }); err != nil { - return err - } - - return reaper.client.DaemonSets(namespace).Delete(name, nil) -} - -func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { - statefulsets := reaper.client.StatefulSets(namespace) - scaler := &StatefulSetScaler{reaper.client} - ps, err := statefulsets.Get(name) - if err != nil { - return err - } - if timeout == 0 { - numPets := ps.Spec.Replicas - timeout = Timeout + time.Duration(10*numPets)*time.Second - } - retry := NewRetryParams(reaper.pollInterval, reaper.timeout) - waitForStatefulSet := NewRetryParams(reaper.pollInterval, reaper.timeout) - if err = scaler.Scale(namespace, name, 0, nil, retry, waitForStatefulSet); err != nil { - return err - } - - // TODO: This shouldn't be needed, see corresponding TODO in StatefulSetHasDesiredPets. - // StatefulSet should track generation number. - pods := reaper.podClient.Pods(namespace) - selector, _ := unversioned.LabelSelectorAsSelector(ps.Spec.Selector) - options := api.ListOptions{LabelSelector: selector} - podList, err := pods.List(options) - if err != nil { - return err - } - - errList := []error{} - for _, pod := range podList.Items { - if err := pods.Delete(pod.Name, gracePeriod); err != nil { - if !errors.IsNotFound(err) { - errList = append(errList, err) - } - } - } - if len(errList) > 0 { - return utilerrors.NewAggregate(errList) - } - - // TODO: Cleanup volumes? We don't want to accidentally delete volumes from - // stop, so just leave this up to the statefulset. - return statefulsets.Delete(name, nil) -} - -func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { - jobs := reaper.client.Jobs(namespace) - pods := reaper.podClient.Pods(namespace) - scaler := &JobScaler{reaper.client} - job, err := jobs.Get(name) - if err != nil { - return err - } - if timeout == 0 { - // we will never have more active pods than job.Spec.Parallelism - parallelism := *job.Spec.Parallelism - timeout = Timeout + time.Duration(10*parallelism)*time.Second - } - - // TODO: handle overlapping jobs - retry := NewRetryParams(reaper.pollInterval, reaper.timeout) - waitForJobs := NewRetryParams(reaper.pollInterval, timeout) - if err = scaler.Scale(namespace, name, 0, nil, retry, waitForJobs); err != nil { - return err - } - // at this point only dead pods are left, that should be removed - selector, _ := unversioned.LabelSelectorAsSelector(job.Spec.Selector) - options := api.ListOptions{LabelSelector: selector} - podList, err := pods.List(options) - if err != nil { - return err - } - errList := []error{} - for _, pod := range podList.Items { - if err := pods.Delete(pod.Name, gracePeriod); err != nil { - // ignores the error when the pod isn't found - if !errors.IsNotFound(err) { - errList = append(errList, err) - } - } - } - if len(errList) > 0 { - return utilerrors.NewAggregate(errList) - } - // once we have all the pods removed we can safely remove the job itself - return jobs.Delete(name, nil) -} - -func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { - deployments := reaper.dClient.Deployments(namespace) - replicaSets := reaper.rsClient.ReplicaSets(namespace) - rsReaper := &ReplicaSetReaper{reaper.rsClient, reaper.pollInterval, reaper.timeout} - - deployment, err := reaper.updateDeploymentWithRetries(namespace, name, func(d *extensions.Deployment) { - // set deployment's history and scale to 0 - // TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527 - d.Spec.RevisionHistoryLimit = util.Int32Ptr(0) - d.Spec.Replicas = 0 - d.Spec.Paused = true - }) - if err != nil { - return err - } - - // Use observedGeneration to determine if the deployment controller noticed the pause. - if err := deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { - return deployments.Get(name) - }, deployment.Generation, 1*time.Second, 1*time.Minute); err != nil { - return err - } - - // Do not cascade deletion for overlapping deployments. - if len(deployment.Annotations[deploymentutil.OverlapAnnotation]) > 0 { - return deployments.Delete(name, nil) - } - - // Stop all replica sets. - selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) - if err != nil { - return err - } - - options := api.ListOptions{LabelSelector: selector} - rsList, err := replicaSets.List(options) - if err != nil { - return err - } - errList := []error{} - for _, rc := range rsList.Items { - if err := rsReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil { - scaleGetErr, ok := err.(ScaleError) - if errors.IsNotFound(err) || (ok && errors.IsNotFound(scaleGetErr.ActualError)) { - continue - } - errList = append(errList, err) - } - } - if len(errList) > 0 { - return utilerrors.NewAggregate(errList) - } - - // Delete deployment at the end. - // Note: We delete deployment at the end so that if removing RSs fails, we at least have the deployment to retry. - return deployments.Delete(name, nil) -} - -type updateDeploymentFunc func(d *extensions.Deployment) - -func (reaper *DeploymentReaper) updateDeploymentWithRetries(namespace, name string, applyUpdate updateDeploymentFunc) (deployment *extensions.Deployment, err error) { - deployments := reaper.dClient.Deployments(namespace) - err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { - if deployment, err = deployments.Get(name); err != nil { - return false, err - } - // Apply the update, then attempt to push it to the apiserver. - applyUpdate(deployment) - if deployment, err = deployments.Update(deployment); err == nil { - return true, nil - } - // Retry only on update conflict. - if errors.IsConflict(err) { - return false, nil - } - return false, err - }) - return deployment, err -} - -func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { - pods := reaper.client.Pods(namespace) - _, err := pods.Get(name) - if err != nil { - return err - } - return pods.Delete(name, gracePeriod) -} - -func (reaper *ServiceReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { - services := reaper.client.Services(namespace) - _, err := services.Get(name) - if err != nil { - return err - } - return services.Delete(name, nil) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/BUILD deleted file mode 100644 index f505128a0a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/BUILD +++ /dev/null @@ -1,41 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "policy.go", - "qos.go", - "types.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/util/sets:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "policy_test.go", - "qos_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/resource:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go deleted file mode 100644 index ebc1cc598b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// package qos contains helper functions for quality of service. -// For each resource (memory, CPU) Kubelet supports three classes of containers. -// Memory guaranteed containers will receive the highest priority and will get all the resources -// they need. -// Burstable containers will be guaranteed their request and can “burst” and use more resources -// when available. -// Best-Effort containers, which don’t specify a request, can use resources only if not being used -// by other pods. -package qos diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go deleted file mode 100644 index 7c142f5cd1..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package qos - -import ( - "k8s.io/kubernetes/pkg/api" -) - -const ( - // PodInfraOOMAdj is very docker specific. For arbitrary runtime, it may not make - // sense to set sandbox level oom score, e.g. a sandbox could only be a namespace - // without a process. - // TODO: Handle infra container oom score adj in a runtime agnostic way. - PodInfraOOMAdj int = -998 - KubeletOOMScoreAdj int = -999 - DockerOOMScoreAdj int = -999 - KubeProxyOOMScoreAdj int = -999 - guaranteedOOMScoreAdj int = -998 - besteffortOOMScoreAdj int = 1000 -) - -// GetContainerOOMAdjust returns the amount by which the OOM score of all processes in the -// container should be adjusted. -// The OOM score of a process is the percentage of memory it consumes -// multiplied by 10 (barring exceptional cases) + a configurable quantity which is between -1000 -// and 1000. Containers with higher OOM scores are killed if the system runs out of memory. -// See https://lwn.net/Articles/391222/ for more information. -func GetContainerOOMScoreAdjust(pod *api.Pod, container *api.Container, memoryCapacity int64) int { - switch GetPodQOS(pod) { - case Guaranteed: - // Guaranteed containers should be the last to get killed. - return guaranteedOOMScoreAdj - case BestEffort: - return besteffortOOMScoreAdj - } - - // Burstable containers are a middle tier, between Guaranteed and Best-Effort. Ideally, - // we want to protect Burstable containers that consume less memory than requested. - // The formula below is a heuristic. A container requesting for 10% of a system's - // memory will have an OOM score adjust of 900. If a process in container Y - // uses over 10% of memory, its OOM score will be 1000. The idea is that containers - // which use more than their request will have an OOM score of 1000 and will be prime - // targets for OOM kills. - // Note that this is a heuristic, it won't work if a container has many small processes. - memoryRequest := container.Resources.Requests.Memory().Value() - oomScoreAdjust := 1000 - (1000*memoryRequest)/memoryCapacity - // A guaranteed pod using 100% of memory can have an OOM score of 10. Ensure - // that burstable pods have a higher OOM score adjustment. - if int(oomScoreAdjust) < (1000 + guaranteedOOMScoreAdj) { - return (1000 + guaranteedOOMScoreAdj) - } - // Give burstable pods a higher chance of survival over besteffort pods. - if int(oomScoreAdjust) == besteffortOOMScoreAdj { - return int(oomScoreAdjust - 1) - } - return int(oomScoreAdjust) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/qos.go b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/qos.go deleted file mode 100644 index 00e347f9ac..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/qos.go +++ /dev/null @@ -1,146 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package qos - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/util/sets" -) - -// isResourceGuaranteed returns true if the container's resource requirements are Guaranteed. -func isResourceGuaranteed(container *api.Container, resource api.ResourceName) bool { - // A container resource is guaranteed if its request == limit. - // If request == limit, the user is very confident of resource consumption. - req, hasReq := container.Resources.Requests[resource] - limit, hasLimit := container.Resources.Limits[resource] - if !hasReq || !hasLimit { - return false - } - return req.Cmp(limit) == 0 && req.Value() != 0 -} - -// isResourceBestEffort returns true if the container's resource requirements are best-effort. -func isResourceBestEffort(container *api.Container, resource api.ResourceName) bool { - // A container resource is best-effort if its request is unspecified or 0. - // If a request is specified, then the user expects some kind of resource guarantee. - req, hasReq := container.Resources.Requests[resource] - return !hasReq || req.Value() == 0 -} - -// GetPodQOS returns the QoS class of a pod. -// A pod is besteffort if none of its containers have specified any requests or limits. -// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. -// A pod is burstable if limits and requests do not match across all containers. -func GetPodQOS(pod *api.Pod) QOSClass { - requests := api.ResourceList{} - limits := api.ResourceList{} - zeroQuantity := resource.MustParse("0") - isGuaranteed := true - for _, container := range pod.Spec.Containers { - // process requests - for name, quantity := range container.Resources.Requests { - if !supportedQoSComputeResources.Has(string(name)) { - continue - } - if quantity.Cmp(zeroQuantity) == 1 { - delta := quantity.Copy() - if _, exists := requests[name]; !exists { - requests[name] = *delta - } else { - delta.Add(requests[name]) - requests[name] = *delta - } - } - } - // process limits - qosLimitsFound := sets.NewString() - for name, quantity := range container.Resources.Limits { - if !supportedQoSComputeResources.Has(string(name)) { - continue - } - if quantity.Cmp(zeroQuantity) == 1 { - qosLimitsFound.Insert(string(name)) - delta := quantity.Copy() - if _, exists := limits[name]; !exists { - limits[name] = *delta - } else { - delta.Add(limits[name]) - limits[name] = *delta - } - } - } - - if len(qosLimitsFound) != len(supportedQoSComputeResources) { - isGuaranteed = false - } - } - if len(requests) == 0 && len(limits) == 0 { - return BestEffort - } - // Check is requests match limits for all resources. - if isGuaranteed { - for name, req := range requests { - if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 { - isGuaranteed = false - break - } - } - } - if isGuaranteed && - len(requests) == len(limits) { - return Guaranteed - } - return Burstable -} - -// QOSList is a set of (resource name, QoS class) pairs. -type QOSList map[api.ResourceName]QOSClass - -// GetQOS returns a mapping of resource name to QoS class of a container -func GetQOS(container *api.Container) QOSList { - resourceToQOS := QOSList{} - for resource := range allResources(container) { - switch { - case isResourceGuaranteed(container, resource): - resourceToQOS[resource] = Guaranteed - case isResourceBestEffort(container, resource): - resourceToQOS[resource] = BestEffort - default: - resourceToQOS[resource] = Burstable - } - } - return resourceToQOS -} - -// supportedComputeResources is the list of compute resources for with QoS is supported. -var supportedQoSComputeResources = sets.NewString(string(api.ResourceCPU), string(api.ResourceMemory)) - -// allResources returns a set of all possible resources whose mapped key value is true if present on the container -func allResources(container *api.Container) map[api.ResourceName]bool { - resources := map[api.ResourceName]bool{} - for _, resource := range supportedQoSComputeResources.List() { - resources[api.ResourceName(resource)] = false - } - for resource := range container.Resources.Requests { - resources[resource] = true - } - for resource := range container.Resources.Limits { - resources[resource] = true - } - return resources -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/types.go b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/types.go deleted file mode 100644 index e52dece45a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/types.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package qos - -// QOSClass defines the supported qos classes of Pods/Containers. -type QOSClass string - -const ( - // Guaranteed is the Guaranteed qos class. - Guaranteed QOSClass = "Guaranteed" - // Burstable is the Burstable qos class. - Burstable QOSClass = "Burstable" - // BestEffort is the BestEffort qos class. - BestEffort QOSClass = "BestEffort" -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/types/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/types/BUILD deleted file mode 100644 index 5f186f4ec9..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/types/BUILD +++ /dev/null @@ -1,38 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "constants.go", - "doc.go", - "labels.go", - "pod_update.go", - "types.go", - ], - tags = ["automanaged"], - deps = ["//pkg/api:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = [ - "pod_update_test.go", - "types_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//vendor:github.com/stretchr/testify/require", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/types/constants.go b/vendor/k8s.io/kubernetes/pkg/kubelet/types/constants.go deleted file mode 100644 index eeabba0174..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/types/constants.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -const ( - // system default DNS resolver configuration - ResolvConfDefault = "/etc/resolv.conf" -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/types/doc.go b/vendor/k8s.io/kubernetes/pkg/kubelet/types/doc.go deleted file mode 100644 index 0d9efe50f0..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/types/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Common types in the Kubelet. -package types diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/types/labels.go b/vendor/k8s.io/kubernetes/pkg/kubelet/types/labels.go deleted file mode 100644 index c4dad6302e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/types/labels.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -const ( - KubernetesPodNameLabel = "io.kubernetes.pod.name" - KubernetesPodNamespaceLabel = "io.kubernetes.pod.namespace" - KubernetesPodUIDLabel = "io.kubernetes.pod.uid" - KubernetesContainerNameLabel = "io.kubernetes.container.name" -) - -func GetContainerName(labels map[string]string) string { - return labels[KubernetesContainerNameLabel] -} - -func GetPodName(labels map[string]string) string { - return labels[KubernetesPodNameLabel] -} - -func GetPodUID(labels map[string]string) string { - return labels[KubernetesPodUIDLabel] -} - -func GetPodNamespace(labels map[string]string) string { - return labels[KubernetesPodNamespaceLabel] -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go b/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go deleted file mode 100644 index e98489df6b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" -) - -const ConfigSourceAnnotationKey = "kubernetes.io/config.source" -const ConfigMirrorAnnotationKey = "kubernetes.io/config.mirror" -const ConfigFirstSeenAnnotationKey = "kubernetes.io/config.seen" -const ConfigHashAnnotationKey = "kubernetes.io/config.hash" - -// PodOperation defines what changes will be made on a pod configuration. -type PodOperation int - -const ( - // This is the current pod configuration - SET PodOperation = iota - // Pods with the given ids are new to this source - ADD - // Pods with the given ids are gracefully deleted from this source - DELETE - // Pods with the given ids have been removed from this source - REMOVE - // Pods with the given ids have been updated in this source - UPDATE - // Pods with the given ids have unexpected status in this source, - // kubelet should reconcile status with this source - RECONCILE - - // These constants identify the sources of pods - // Updates from a file - FileSource = "file" - // Updates from querying a web page - HTTPSource = "http" - // Updates from Kubernetes API Server - ApiserverSource = "api" - // Updates from all sources - AllSource = "*" - - NamespaceDefault = api.NamespaceDefault -) - -// PodUpdate defines an operation sent on the channel. You can add or remove single services by -// sending an array of size one and Op == ADD|REMOVE (with REMOVE, only the ID is required). -// For setting the state of the system to a given state for this source configuration, set -// Pods as desired and Op to SET, which will reset the system state to that specified in this -// operation for this source channel. To remove all pods, set Pods to empty object and Op to SET. -// -// Additionally, Pods should never be nil - it should always point to an empty slice. While -// functionally similar, this helps our unit tests properly check that the correct PodUpdates -// are generated. -type PodUpdate struct { - Pods []*api.Pod - Op PodOperation - Source string -} - -// Gets all validated sources from the specified sources. -func GetValidatedSources(sources []string) ([]string, error) { - validated := make([]string, 0, len(sources)) - for _, source := range sources { - switch source { - case AllSource: - return []string{FileSource, HTTPSource, ApiserverSource}, nil - case FileSource, HTTPSource, ApiserverSource: - validated = append(validated, source) - break - case "": - break - default: - return []string{}, fmt.Errorf("unknown pod source %q", source) - } - } - return validated, nil -} - -// GetPodSource returns the source of the pod based on the annotation. -func GetPodSource(pod *api.Pod) (string, error) { - if pod.Annotations != nil { - if source, ok := pod.Annotations[ConfigSourceAnnotationKey]; ok { - return source, nil - } - } - return "", fmt.Errorf("cannot get source of pod %q", pod.UID) -} - -// SyncPodType classifies pod updates, eg: create, update. -type SyncPodType int - -const ( - // SyncPodSync is when the pod is synced to ensure desired state - SyncPodSync SyncPodType = iota - // SyncPodUpdate is when the pod is updated from source - SyncPodUpdate - // SyncPodCreate is when the pod is created from source - SyncPodCreate - // SyncPodKill is when the pod is killed based on a trigger internal to the kubelet for eviction. - // If a SyncPodKill request is made to pod workers, the request is never dropped, and will always be processed. - SyncPodKill -) - -func (sp SyncPodType) String() string { - switch sp { - case SyncPodCreate: - return "create" - case SyncPodUpdate: - return "update" - case SyncPodSync: - return "sync" - case SyncPodKill: - return "kill" - default: - return "unknown" - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/types/types.go b/vendor/k8s.io/kubernetes/pkg/kubelet/types/types.go deleted file mode 100644 index 017c3c8cb3..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/types/types.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "net/http" - "time" - - "k8s.io/kubernetes/pkg/api" -) - -// TODO: Reconcile custom types in kubelet/types and this subpackage - -type HttpGetter interface { - Get(url string) (*http.Response, error) -} - -// Timestamp wraps around time.Time and offers utilities to format and parse -// the time using RFC3339Nano -type Timestamp struct { - time time.Time -} - -// NewTimestamp returns a Timestamp object using the current time. -func NewTimestamp() *Timestamp { - return &Timestamp{time.Now()} -} - -// ConvertToTimestamp takes a string, parses it using the RFC3339Nano layout, -// and converts it to a Timestamp object. -func ConvertToTimestamp(timeString string) *Timestamp { - parsed, _ := time.Parse(time.RFC3339Nano, timeString) - return &Timestamp{parsed} -} - -// Get returns the time as time.Time. -func (t *Timestamp) Get() time.Time { - return t.time -} - -// GetString returns the time in the string format using the RFC3339Nano -// layout. -func (t *Timestamp) GetString() string { - return t.time.Format(time.RFC3339Nano) -} - -// A type to help sort container statuses based on container names. -type SortedContainerStatuses []api.ContainerStatus - -func (s SortedContainerStatuses) Len() int { return len(s) } -func (s SortedContainerStatuses) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -func (s SortedContainerStatuses) Less(i, j int) bool { - return s[i].Name < s[j].Name -} - -// SortInitContainerStatuses ensures that statuses are in the order that their -// init container appears in the pod spec -func SortInitContainerStatuses(p *api.Pod, statuses []api.ContainerStatus) { - containers := p.Spec.InitContainers - current := 0 - for _, container := range containers { - for j := current; j < len(statuses); j++ { - if container.Name == statuses[j].Name { - statuses[current], statuses[j] = statuses[j], statuses[current] - current++ - break - } - } - } -} - -// Reservation represents reserved resources for non-pod components. -type Reservation struct { - // System represents resources reserved for non-kubernetes components. - System api.ResourceList - // Kubernetes represents resources reserved for kubernetes system components. - Kubernetes api.ResourceList -} diff --git a/vendor/k8s.io/kubernetes/pkg/labels/BUILD b/vendor/k8s.io/kubernetes/pkg/labels/BUILD deleted file mode 100644 index 87412170b3..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/labels/BUILD +++ /dev/null @@ -1,41 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "labels.go", - "selector.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/selection:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/util/validation:go_default_library", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "labels_test.go", - "selector_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/selection:go_default_library", - "//pkg/util/sets:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/labels/selector.go b/vendor/k8s.io/kubernetes/pkg/labels/selector.go deleted file mode 100644 index 705f42477e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/labels/selector.go +++ /dev/null @@ -1,835 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package labels - -import ( - "bytes" - "fmt" - "sort" - "strconv" - "strings" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/selection" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/util/validation" -) - -// Requirements is AND of all requirements. -type Requirements []Requirement - -// Selector represents a label selector. -type Selector interface { - // Matches returns true if this selector matches the given set of labels. - Matches(Labels) bool - - // Empty returns true if this selector does not restrict the selection space. - Empty() bool - - // String returns a human readable string that represents this selector. - String() string - - // Add adds requirements to the Selector - Add(r ...Requirement) Selector - - // Requirements converts this interface into Requirements to expose - // more detailed selection information. - // If there are querying parameters, it will return converted requirements and selectable=true. - // If this selector doesn't want to select anything, it will return selectable=false. - Requirements() (requirements Requirements, selectable bool) -} - -// Everything returns a selector that matches all labels. -func Everything() Selector { - return internalSelector{} -} - -type nothingSelector struct{} - -func (n nothingSelector) Matches(_ Labels) bool { return false } -func (n nothingSelector) Empty() bool { return false } -func (n nothingSelector) String() string { return "" } -func (n nothingSelector) Add(_ ...Requirement) Selector { return n } -func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false } - -// Nothing returns a selector that matches no labels -func Nothing() Selector { - return nothingSelector{} -} - -func NewSelector() Selector { - return internalSelector(nil) -} - -type internalSelector []Requirement - -// Sort by key to obtain determisitic parser -type ByKey []Requirement - -func (a ByKey) Len() int { return len(a) } - -func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key } - -// Requirement contains values, a key, and an operator that relates the key and values. -// The zero value of Requirement is invalid. -// Requirement implements both set based match and exact match -// Requirement should be initialized via NewRequirement constructor for creating a valid Requirement. -type Requirement struct { - key string - operator selection.Operator - // In huge majority of cases we have at most one value here. - // It is generally faster to operate on a single-element slice - // than on a single-element map, so we have a slice here. - strValues []string -} - -// NewRequirement is the constructor for a Requirement. -// If any of these rules is violated, an error is returned: -// (1) The operator can only be In, NotIn, Equals, DoubleEquals, NotEquals, Exists, or DoesNotExist. -// (2) If the operator is In or NotIn, the values set must be non-empty. -// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. -// (4) If the operator is Exists or DoesNotExist, the value set must be empty. -// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. -// (6) The key is invalid due to its length, or sequence -// of characters. See validateLabelKey for more details. -// -// The empty string is a valid value in the input values set. -func NewRequirement(key string, op selection.Operator, vals []string) (*Requirement, error) { - if err := validateLabelKey(key); err != nil { - return nil, err - } - switch op { - case selection.In, selection.NotIn: - if len(vals) == 0 { - return nil, fmt.Errorf("for 'in', 'notin' operators, values set can't be empty") - } - case selection.Equals, selection.DoubleEquals, selection.NotEquals: - if len(vals) != 1 { - return nil, fmt.Errorf("exact-match compatibility requires one single value") - } - case selection.Exists, selection.DoesNotExist: - if len(vals) != 0 { - return nil, fmt.Errorf("values set must be empty for exists and does not exist") - } - case selection.GreaterThan, selection.LessThan: - if len(vals) != 1 { - return nil, fmt.Errorf("for 'Gt', 'Lt' operators, exactly one value is required") - } - for i := range vals { - if _, err := strconv.ParseInt(vals[i], 10, 64); err != nil { - return nil, fmt.Errorf("for 'Gt', 'Lt' operators, the value must be an integer") - } - } - default: - return nil, fmt.Errorf("operator '%v' is not recognized", op) - } - - for i := range vals { - if err := validateLabelValue(vals[i]); err != nil { - return nil, err - } - } - sort.Strings(vals) - return &Requirement{key: key, operator: op, strValues: vals}, nil -} - -func (r *Requirement) hasValue(value string) bool { - for i := range r.strValues { - if r.strValues[i] == value { - return true - } - } - return false -} - -// Matches returns true if the Requirement matches the input Labels. -// There is a match in the following cases: -// (1) The operator is Exists and Labels has the Requirement's key. -// (2) The operator is In, Labels has the Requirement's key and Labels' -// value for that key is in Requirement's value set. -// (3) The operator is NotIn, Labels has the Requirement's key and -// Labels' value for that key is not in Requirement's value set. -// (4) The operator is DoesNotExist or NotIn and Labels does not have the -// Requirement's key. -// (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has -// the Requirement's key and the corresponding value satisfies mathematical inequality. -func (r *Requirement) Matches(ls Labels) bool { - switch r.operator { - case selection.In, selection.Equals, selection.DoubleEquals: - if !ls.Has(r.key) { - return false - } - return r.hasValue(ls.Get(r.key)) - case selection.NotIn, selection.NotEquals: - if !ls.Has(r.key) { - return true - } - return !r.hasValue(ls.Get(r.key)) - case selection.Exists: - return ls.Has(r.key) - case selection.DoesNotExist: - return !ls.Has(r.key) - case selection.GreaterThan, selection.LessThan: - if !ls.Has(r.key) { - return false - } - lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64) - if err != nil { - glog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) - return false - } - - // There should be only one strValue in r.strValues, and can be converted to a integer. - if len(r.strValues) != 1 { - glog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) - return false - } - - var rValue int64 - for i := range r.strValues { - rValue, err = strconv.ParseInt(r.strValues[i], 10, 64) - if err != nil { - glog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) - return false - } - } - return (r.operator == selection.GreaterThan && lsValue > rValue) || (r.operator == selection.LessThan && lsValue < rValue) - default: - return false - } -} - -func (r *Requirement) Key() string { - return r.key -} -func (r *Requirement) Operator() selection.Operator { - return r.operator -} -func (r *Requirement) Values() sets.String { - ret := sets.String{} - for i := range r.strValues { - ret.Insert(r.strValues[i]) - } - return ret -} - -// Return true if the internalSelector doesn't restrict selection space -func (lsel internalSelector) Empty() bool { - if lsel == nil { - return true - } - return len(lsel) == 0 -} - -// String returns a human-readable string that represents this -// Requirement. If called on an invalid Requirement, an error is -// returned. See NewRequirement for creating a valid Requirement. -func (r *Requirement) String() string { - var buffer bytes.Buffer - if r.operator == selection.DoesNotExist { - buffer.WriteString("!") - } - buffer.WriteString(r.key) - - switch r.operator { - case selection.Equals: - buffer.WriteString("=") - case selection.DoubleEquals: - buffer.WriteString("==") - case selection.NotEquals: - buffer.WriteString("!=") - case selection.In: - buffer.WriteString(" in ") - case selection.NotIn: - buffer.WriteString(" notin ") - case selection.GreaterThan: - buffer.WriteString(">") - case selection.LessThan: - buffer.WriteString("<") - case selection.Exists, selection.DoesNotExist: - return buffer.String() - } - - switch r.operator { - case selection.In, selection.NotIn: - buffer.WriteString("(") - } - if len(r.strValues) == 1 { - buffer.WriteString(r.strValues[0]) - } else { // only > 1 since == 0 prohibited by NewRequirement - buffer.WriteString(strings.Join(r.strValues, ",")) - } - - switch r.operator { - case selection.In, selection.NotIn: - buffer.WriteString(")") - } - return buffer.String() -} - -// Add adds requirements to the selector. It copies the current selector returning a new one -func (lsel internalSelector) Add(reqs ...Requirement) Selector { - var sel internalSelector - for ix := range lsel { - sel = append(sel, lsel[ix]) - } - for _, r := range reqs { - sel = append(sel, r) - } - sort.Sort(ByKey(sel)) - return sel -} - -// Matches for a internalSelector returns true if all -// its Requirements match the input Labels. If any -// Requirement does not match, false is returned. -func (lsel internalSelector) Matches(l Labels) bool { - for ix := range lsel { - if matches := lsel[ix].Matches(l); !matches { - return false - } - } - return true -} - -func (lsel internalSelector) Requirements() (Requirements, bool) { return Requirements(lsel), true } - -// String returns a comma-separated string of all -// the internalSelector Requirements' human-readable strings. -func (lsel internalSelector) String() string { - var reqs []string - for ix := range lsel { - reqs = append(reqs, lsel[ix].String()) - } - return strings.Join(reqs, ",") -} - -// constants definition for lexer token -type Token int - -const ( - ErrorToken Token = iota - EndOfStringToken - ClosedParToken - CommaToken - DoesNotExistToken - DoubleEqualsToken - EqualsToken - GreaterThanToken - IdentifierToken // to represent keys and values - InToken - LessThanToken - NotEqualsToken - NotInToken - OpenParToken -) - -// string2token contains the mapping between lexer Token and token literal -// (except IdentifierToken, EndOfStringToken and ErrorToken since it makes no sense) -var string2token = map[string]Token{ - ")": ClosedParToken, - ",": CommaToken, - "!": DoesNotExistToken, - "==": DoubleEqualsToken, - "=": EqualsToken, - ">": GreaterThanToken, - "in": InToken, - "<": LessThanToken, - "!=": NotEqualsToken, - "notin": NotInToken, - "(": OpenParToken, -} - -// The item produced by the lexer. It contains the Token and the literal. -type ScannedItem struct { - tok Token - literal string -} - -// isWhitespace returns true if the rune is a space, tab, or newline. -func isWhitespace(ch byte) bool { - return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n' -} - -// isSpecialSymbol detect if the character ch can be an operator -func isSpecialSymbol(ch byte) bool { - switch ch { - case '=', '!', '(', ')', ',', '>', '<': - return true - } - return false -} - -// Lexer represents the Lexer struct for label selector. -// It contains necessary informationt to tokenize the input string -type Lexer struct { - // s stores the string to be tokenized - s string - // pos is the position currently tokenized - pos int -} - -// read return the character currently lexed -// increment the position and check the buffer overflow -func (l *Lexer) read() (b byte) { - b = 0 - if l.pos < len(l.s) { - b = l.s[l.pos] - l.pos++ - } - return b -} - -// unread 'undoes' the last read character -func (l *Lexer) unread() { - l.pos-- -} - -// scanIdOrKeyword scans string to recognize literal token (for example 'in') or an identifier. -func (l *Lexer) scanIdOrKeyword() (tok Token, lit string) { - var buffer []byte -IdentifierLoop: - for { - switch ch := l.read(); { - case ch == 0: - break IdentifierLoop - case isSpecialSymbol(ch) || isWhitespace(ch): - l.unread() - break IdentifierLoop - default: - buffer = append(buffer, ch) - } - } - s := string(buffer) - if val, ok := string2token[s]; ok { // is a literal token? - return val, s - } - return IdentifierToken, s // otherwise is an identifier -} - -// scanSpecialSymbol scans string starting with special symbol. -// special symbol identify non literal operators. "!=", "==", "=" -func (l *Lexer) scanSpecialSymbol() (Token, string) { - lastScannedItem := ScannedItem{} - var buffer []byte -SpecialSymbolLoop: - for { - switch ch := l.read(); { - case ch == 0: - break SpecialSymbolLoop - case isSpecialSymbol(ch): - buffer = append(buffer, ch) - if token, ok := string2token[string(buffer)]; ok { - lastScannedItem = ScannedItem{tok: token, literal: string(buffer)} - } else if lastScannedItem.tok != 0 { - l.unread() - break SpecialSymbolLoop - } - default: - l.unread() - break SpecialSymbolLoop - } - } - if lastScannedItem.tok == 0 { - return ErrorToken, fmt.Sprintf("error expected: keyword found '%s'", buffer) - } - return lastScannedItem.tok, lastScannedItem.literal -} - -// skipWhiteSpaces consumes all blank characters -// returning the first non blank character -func (l *Lexer) skipWhiteSpaces(ch byte) byte { - for { - if !isWhitespace(ch) { - return ch - } - ch = l.read() - } -} - -// Lex returns a pair of Token and the literal -// literal is meaningfull only for IdentifierToken token -func (l *Lexer) Lex() (tok Token, lit string) { - switch ch := l.skipWhiteSpaces(l.read()); { - case ch == 0: - return EndOfStringToken, "" - case isSpecialSymbol(ch): - l.unread() - return l.scanSpecialSymbol() - default: - l.unread() - return l.scanIdOrKeyword() - } -} - -// Parser data structure contains the label selector parser data structure -type Parser struct { - l *Lexer - scannedItems []ScannedItem - position int -} - -// Parser context represents context during parsing: -// some literal for example 'in' and 'notin' can be -// recognized as operator for example 'x in (a)' but -// it can be recognized as value for example 'value in (in)' -type ParserContext int - -const ( - KeyAndOperator ParserContext = iota - Values -) - -// lookahead func returns the current token and string. No increment of current position -func (p *Parser) lookahead(context ParserContext) (Token, string) { - tok, lit := p.scannedItems[p.position].tok, p.scannedItems[p.position].literal - if context == Values { - switch tok { - case InToken, NotInToken: - tok = IdentifierToken - } - } - return tok, lit -} - -// consume returns current token and string. Increments the the position -func (p *Parser) consume(context ParserContext) (Token, string) { - p.position++ - tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal - if context == Values { - switch tok { - case InToken, NotInToken: - tok = IdentifierToken - } - } - return tok, lit -} - -// scan runs through the input string and stores the ScannedItem in an array -// Parser can now lookahead and consume the tokens -func (p *Parser) scan() { - for { - token, literal := p.l.Lex() - p.scannedItems = append(p.scannedItems, ScannedItem{token, literal}) - if token == EndOfStringToken { - break - } - } -} - -// parse runs the left recursive descending algorithm -// on input string. It returns a list of Requirement objects. -func (p *Parser) parse() (internalSelector, error) { - p.scan() // init scannedItems - - var requirements internalSelector - for { - tok, lit := p.lookahead(Values) - switch tok { - case IdentifierToken, DoesNotExistToken: - r, err := p.parseRequirement() - if err != nil { - return nil, fmt.Errorf("unable to parse requirement: %v", err) - } - requirements = append(requirements, *r) - t, l := p.consume(Values) - switch t { - case EndOfStringToken: - return requirements, nil - case CommaToken: - t2, l2 := p.lookahead(Values) - if t2 != IdentifierToken && t2 != DoesNotExistToken { - return nil, fmt.Errorf("found '%s', expected: identifier after ','", l2) - } - default: - return nil, fmt.Errorf("found '%s', expected: ',' or 'end of string'", l) - } - case EndOfStringToken: - return requirements, nil - default: - return nil, fmt.Errorf("found '%s', expected: !, identifier, or 'end of string'", lit) - } - } -} - -func (p *Parser) parseRequirement() (*Requirement, error) { - key, operator, err := p.parseKeyAndInferOperator() - if err != nil { - return nil, err - } - if operator == selection.Exists || operator == selection.DoesNotExist { // operator found lookahead set checked - return NewRequirement(key, operator, []string{}) - } - operator, err = p.parseOperator() - if err != nil { - return nil, err - } - var values sets.String - switch operator { - case selection.In, selection.NotIn: - values, err = p.parseValues() - case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.GreaterThan, selection.LessThan: - values, err = p.parseExactValue() - } - if err != nil { - return nil, err - } - return NewRequirement(key, operator, values.List()) - -} - -// parseKeyAndInferOperator parse literals. -// in case of no operator '!, in, notin, ==, =, !=' are found -// the 'exists' operator is inferred -func (p *Parser) parseKeyAndInferOperator() (string, selection.Operator, error) { - var operator selection.Operator - tok, literal := p.consume(Values) - if tok == DoesNotExistToken { - operator = selection.DoesNotExist - tok, literal = p.consume(Values) - } - if tok != IdentifierToken { - err := fmt.Errorf("found '%s', expected: identifier", literal) - return "", "", err - } - if err := validateLabelKey(literal); err != nil { - return "", "", err - } - if t, _ := p.lookahead(Values); t == EndOfStringToken || t == CommaToken { - if operator != selection.DoesNotExist { - operator = selection.Exists - } - } - return literal, operator, nil -} - -// parseOperator return operator and eventually matchType -// matchType can be exact -func (p *Parser) parseOperator() (op selection.Operator, err error) { - tok, lit := p.consume(KeyAndOperator) - switch tok { - // DoesNotExistToken shouldn't be here because it's a unary operator, not a binary operator - case InToken: - op = selection.In - case EqualsToken: - op = selection.Equals - case DoubleEqualsToken: - op = selection.DoubleEquals - case GreaterThanToken: - op = selection.GreaterThan - case LessThanToken: - op = selection.LessThan - case NotInToken: - op = selection.NotIn - case NotEqualsToken: - op = selection.NotEquals - default: - return "", fmt.Errorf("found '%s', expected: '=', '!=', '==', 'in', notin'", lit) - } - return op, nil -} - -// parseValues parses the values for set based matching (x,y,z) -func (p *Parser) parseValues() (sets.String, error) { - tok, lit := p.consume(Values) - if tok != OpenParToken { - return nil, fmt.Errorf("found '%s' expected: '('", lit) - } - tok, lit = p.lookahead(Values) - switch tok { - case IdentifierToken, CommaToken: - s, err := p.parseIdentifiersList() // handles general cases - if err != nil { - return s, err - } - if tok, _ = p.consume(Values); tok != ClosedParToken { - return nil, fmt.Errorf("found '%s', expected: ')'", lit) - } - return s, nil - case ClosedParToken: // handles "()" - p.consume(Values) - return sets.NewString(""), nil - default: - return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit) - } -} - -// parseIdentifiersList parses a (possibly empty) list of -// of comma separated (possibly empty) identifiers -func (p *Parser) parseIdentifiersList() (sets.String, error) { - s := sets.NewString() - for { - tok, lit := p.consume(Values) - switch tok { - case IdentifierToken: - s.Insert(lit) - tok2, lit2 := p.lookahead(Values) - switch tok2 { - case CommaToken: - continue - case ClosedParToken: - return s, nil - default: - return nil, fmt.Errorf("found '%s', expected: ',' or ')'", lit2) - } - case CommaToken: // handled here since we can have "(," - if s.Len() == 0 { - s.Insert("") // to handle (, - } - tok2, _ := p.lookahead(Values) - if tok2 == ClosedParToken { - s.Insert("") // to handle ,) Double "" removed by StringSet - return s, nil - } - if tok2 == CommaToken { - p.consume(Values) - s.Insert("") // to handle ,, Double "" removed by StringSet - } - default: // it can be operator - return s, fmt.Errorf("found '%s', expected: ',', or identifier", lit) - } - } -} - -// parseExactValue parses the only value for exact match style -func (p *Parser) parseExactValue() (sets.String, error) { - s := sets.NewString() - tok, lit := p.lookahead(Values) - if tok == EndOfStringToken || tok == CommaToken { - s.Insert("") - return s, nil - } - tok, lit = p.consume(Values) - if tok == IdentifierToken { - s.Insert(lit) - return s, nil - } - return nil, fmt.Errorf("found '%s', expected: identifier", lit) -} - -// Parse takes a string representing a selector and returns a selector -// object, or an error. This parsing function differs from ParseSelector -// as they parse different selectors with different syntaxes. -// The input will cause an error if it does not follow this form: -// -// ::= | "," ] -// ::= [!] KEY [ | ] -// ::= "" | -// ::= | -// ::= "notin" -// ::= "in" -// ::= "(" ")" -// ::= VALUE | VALUE "," -// ::= ["="|"=="|"!="] VALUE -// KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL. Max length is 63 characters. -// VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 63 characters. -// Delimiter is white space: (' ', '\t') -// Example of valid syntax: -// "x in (foo,,baz),y,z notin ()" -// -// Note: -// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the -// VALUEs in its requirement -// (2) Exclusion - " notin " - denotes that the KEY is not equal to any -// of the VALUEs in its requirement or does not exist -// (3) The empty string is a valid VALUE -// (4) A requirement with just a KEY - as in "y" above - denotes that -// the KEY exists and can be any VALUE. -// (5) A requirement with just !KEY requires that the KEY not exist. -// -func Parse(selector string) (Selector, error) { - parsedSelector, err := parse(selector) - if err == nil { - return parsedSelector, nil - } - return nil, err -} - -// parse parses the string representation of the selector and returns the internalSelector struct. -// The callers of this method can then decide how to return the internalSelector struct to their -// callers. This function has two callers now, one returns a Selector interface and the other -// returns a list of requirements. -func parse(selector string) (internalSelector, error) { - p := &Parser{l: &Lexer{s: selector, pos: 0}} - items, err := p.parse() - if err != nil { - return nil, err - } - sort.Sort(ByKey(items)) // sort to grant determistic parsing - return internalSelector(items), err -} - -func validateLabelKey(k string) error { - if errs := validation.IsQualifiedName(k); len(errs) != 0 { - return fmt.Errorf("invalid label key %q: %s", k, strings.Join(errs, "; ")) - } - return nil -} - -func validateLabelValue(v string) error { - if errs := validation.IsValidLabelValue(v); len(errs) != 0 { - return fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; ")) - } - return nil -} - -// SelectorFromSet returns a Selector which will match exactly the given Set. A -// nil and empty Sets are considered equivalent to Everything(). -func SelectorFromSet(ls Set) Selector { - if ls == nil { - return internalSelector{} - } - var requirements internalSelector - for label, value := range ls { - if r, err := NewRequirement(label, selection.Equals, []string{value}); err != nil { - //TODO: double check errors when input comes from serialization? - return internalSelector{} - } else { - requirements = append(requirements, *r) - } - } - // sort to have deterministic string representation - sort.Sort(ByKey(requirements)) - return internalSelector(requirements) -} - -// SelectorFromValidatedSet returns a Selector which will match exactly the given Set. -// A nil and empty Sets are considered equivalent to Everything(). -// It assumes that Set is already validated and doesn't do any validation. -func SelectorFromValidatedSet(ls Set) Selector { - if ls == nil { - return internalSelector{} - } - var requirements internalSelector - for label, value := range ls { - requirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}}) - } - // sort to have deterministic string representation - sort.Sort(ByKey(requirements)) - return internalSelector(requirements) -} - -// ParseToRequirements takes a string representing a selector and returns a list of -// requirements. This function is suitable for those callers that perform additional -// processing on selector requirements. -// See the documentation for Parse() function for more details. -// TODO: Consider exporting the internalSelector type instead. -func ParseToRequirements(selector string) ([]Requirement, error) { - return parse(selector) -} diff --git a/vendor/k8s.io/kubernetes/pkg/master/ports/BUILD b/vendor/k8s.io/kubernetes/pkg/master/ports/BUILD deleted file mode 100644 index 60e0461513..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/master/ports/BUILD +++ /dev/null @@ -1,20 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "ports.go", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go b/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go deleted file mode 100644 index a2a002101c..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package ports defines ports used by various pieces of the kubernetes -// infrastructure. -package ports diff --git a/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go b/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go deleted file mode 100644 index 6aa97963d5..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ports - -const ( - // ProxyPort is the default port for the proxy healthz server. - // May be overridden by a flag at startup. - ProxyStatusPort = 10249 - // KubeletPort is the default port for the kubelet server on each host machine. - // May be overridden by a flag at startup. - KubeletPort = 10250 - // SchedulerPort is the default port for the scheduler status server. - // May be overridden by a flag at startup. - SchedulerPort = 10251 - // ControllerManagerPort is the default port for the controller manager status server. - // May be overridden by a flag at startup. - ControllerManagerPort = 10252 - // KubeletReadOnlyPort exposes basic read-only services from the kubelet. - // May be overridden by a flag at startup. - // This is necessary for heapster to collect monitoring stats from the kubelet - // until heapster can transition to using the SSL endpoint. - // TODO(roberthbailey): Remove this once we have a better solution for heapster. - KubeletReadOnlyPort = 10255 -) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/BUILD deleted file mode 100644 index 71bbf8d36c..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/BUILD +++ /dev/null @@ -1,65 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "codec.go", - "doc.go", - "registry.go", - "strategy.go", - "util.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/rest:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/util:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/apis/extensions/v1beta1:go_default_library", - "//pkg/apis/extensions/validation:go_default_library", - "//pkg/fields:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/storage:go_default_library", - "//pkg/util/validation/field:go_default_library", - "//pkg/util/yaml:go_default_library", - "//pkg/watch:go_default_library", - "//pkg/watch/versioned:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "codec_test.go", - "strategy_test.go", - "util_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/api/testing:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/watch/versioned:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/codec.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/codec.go deleted file mode 100644 index a64ad974e6..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/codec.go +++ /dev/null @@ -1,596 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package thirdpartyresourcedata - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/url" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - apiutil "k8s.io/kubernetes/pkg/api/util" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/yaml" - "k8s.io/kubernetes/pkg/watch/versioned" -) - -type thirdPartyObjectConverter struct { - converter runtime.ObjectConvertor -} - -func (t *thirdPartyObjectConverter) ConvertToVersion(in runtime.Object, outVersion runtime.GroupVersioner) (out runtime.Object, err error) { - switch in.(type) { - // This seems weird, but in this case the ThirdPartyResourceData is really just a wrapper on the raw 3rd party data. - // The actual thing printed/sent to server is the actual raw third party resource data, which only has one version. - case *extensions.ThirdPartyResourceData: - return in, nil - default: - return t.converter.ConvertToVersion(in, outVersion) - } -} - -func (t *thirdPartyObjectConverter) Convert(in, out, context interface{}) error { - return t.converter.Convert(in, out, context) -} - -func (t *thirdPartyObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { - return t.converter.ConvertFieldLabel(version, kind, label, value) -} - -func NewThirdPartyObjectConverter(converter runtime.ObjectConvertor) runtime.ObjectConvertor { - return &thirdPartyObjectConverter{converter} -} - -type thirdPartyResourceDataMapper struct { - mapper meta.RESTMapper - kind string - version string - group string -} - -var _ meta.RESTMapper = &thirdPartyResourceDataMapper{} - -func (t *thirdPartyResourceDataMapper) getResource() unversioned.GroupVersionResource { - plural, _ := meta.KindToResource(t.getKind()) - - return plural -} - -func (t *thirdPartyResourceDataMapper) getKind() unversioned.GroupVersionKind { - return unversioned.GroupVersionKind{Group: t.group, Version: t.version, Kind: t.kind} -} - -func (t *thirdPartyResourceDataMapper) isThirdPartyResource(partialResource unversioned.GroupVersionResource) bool { - actualResource := t.getResource() - if strings.ToLower(partialResource.Resource) != strings.ToLower(actualResource.Resource) { - return false - } - if len(partialResource.Group) != 0 && partialResource.Group != actualResource.Group { - return false - } - if len(partialResource.Version) != 0 && partialResource.Version != actualResource.Version { - return false - } - - return true -} - -func (t *thirdPartyResourceDataMapper) ResourcesFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) { - if t.isThirdPartyResource(resource) { - return []unversioned.GroupVersionResource{t.getResource()}, nil - } - return t.mapper.ResourcesFor(resource) -} - -func (t *thirdPartyResourceDataMapper) KindsFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionKind, error) { - if t.isThirdPartyResource(resource) { - return []unversioned.GroupVersionKind{t.getKind()}, nil - } - return t.mapper.KindsFor(resource) -} - -func (t *thirdPartyResourceDataMapper) ResourceFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) { - if t.isThirdPartyResource(resource) { - return t.getResource(), nil - } - return t.mapper.ResourceFor(resource) -} - -func (t *thirdPartyResourceDataMapper) KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) { - if t.isThirdPartyResource(resource) { - return t.getKind(), nil - } - return t.mapper.KindFor(resource) -} - -func (t *thirdPartyResourceDataMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (*meta.RESTMapping, error) { - if len(versions) != 1 { - return nil, fmt.Errorf("unexpected set of versions: %v", versions) - } - if gk.Group != t.group { - return nil, fmt.Errorf("unknown group %q expected %s", gk.Group, t.group) - } - if gk.Kind != "ThirdPartyResourceData" { - return nil, fmt.Errorf("unknown kind %s expected %s", gk.Kind, t.kind) - } - if versions[0] != t.version { - return nil, fmt.Errorf("unknown version %q expected %q", versions[0], t.version) - } - - // TODO figure out why we're doing this rewriting - extensionGK := unversioned.GroupKind{Group: extensions.GroupName, Kind: "ThirdPartyResourceData"} - - mapping, err := t.mapper.RESTMapping(extensionGK, registered.GroupOrDie(extensions.GroupName).GroupVersion.Version) - if err != nil { - return nil, err - } - mapping.ObjectConvertor = &thirdPartyObjectConverter{mapping.ObjectConvertor} - return mapping, nil -} - -func (t *thirdPartyResourceDataMapper) RESTMappings(gk unversioned.GroupKind) ([]*meta.RESTMapping, error) { - if gk.Group != t.group { - return nil, fmt.Errorf("unknown group %q expected %s", gk.Group, t.group) - } - if gk.Kind != "ThirdPartyResourceData" { - return nil, fmt.Errorf("unknown kind %s expected %s", gk.Kind, t.kind) - } - - // TODO figure out why we're doing this rewriting - extensionGK := unversioned.GroupKind{Group: extensions.GroupName, Kind: "ThirdPartyResourceData"} - - mappings, err := t.mapper.RESTMappings(extensionGK) - if err != nil { - return nil, err - } - for _, m := range mappings { - m.ObjectConvertor = &thirdPartyObjectConverter{m.ObjectConvertor} - } - return mappings, nil -} - -func (t *thirdPartyResourceDataMapper) AliasesForResource(resource string) ([]string, bool) { - return t.mapper.AliasesForResource(resource) -} - -func (t *thirdPartyResourceDataMapper) ResourceSingularizer(resource string) (singular string, err error) { - return t.mapper.ResourceSingularizer(resource) -} - -func NewMapper(mapper meta.RESTMapper, kind, version, group string) meta.RESTMapper { - return &thirdPartyResourceDataMapper{ - mapper: mapper, - kind: kind, - version: version, - group: group, - } -} - -type thirdPartyResourceDataCodecFactory struct { - delegate runtime.NegotiatedSerializer - kind string - encodeGV unversioned.GroupVersion - decodeGV unversioned.GroupVersion -} - -func NewNegotiatedSerializer(s runtime.NegotiatedSerializer, kind string, encodeGV, decodeGV unversioned.GroupVersion) runtime.NegotiatedSerializer { - return &thirdPartyResourceDataCodecFactory{ - delegate: s, - kind: kind, - encodeGV: encodeGV, - decodeGV: decodeGV, - } -} - -func (t *thirdPartyResourceDataCodecFactory) SupportedMediaTypes() []runtime.SerializerInfo { - for _, info := range t.delegate.SupportedMediaTypes() { - if info.MediaType == runtime.ContentTypeJSON { - return []runtime.SerializerInfo{info} - } - } - return nil -} - -func (t *thirdPartyResourceDataCodecFactory) EncoderForVersion(s runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { - return &thirdPartyResourceDataEncoder{delegate: t.delegate.EncoderForVersion(s, gv), gvk: t.encodeGV.WithKind(t.kind)} -} - -func (t *thirdPartyResourceDataCodecFactory) DecoderToVersion(s runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder { - return NewDecoder(t.delegate.DecoderToVersion(s, gv), t.kind) -} - -func NewCodec(delegate runtime.Codec, gvk unversioned.GroupVersionKind) runtime.Codec { - return runtime.NewCodec(NewEncoder(delegate, gvk), NewDecoder(delegate, gvk.Kind)) -} - -type thirdPartyResourceDataDecoder struct { - delegate runtime.Decoder - kind string -} - -func NewDecoder(delegate runtime.Decoder, kind string) runtime.Decoder { - return &thirdPartyResourceDataDecoder{delegate: delegate, kind: kind} -} - -var _ runtime.Decoder = &thirdPartyResourceDataDecoder{} - -func parseObject(data []byte) (map[string]interface{}, error) { - var obj interface{} - if err := json.Unmarshal(data, &obj); err != nil { - return nil, err - } - mapObj, ok := obj.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("unexpected object: %#v", obj) - } - return mapObj, nil -} - -func (t *thirdPartyResourceDataDecoder) populate(data []byte) (runtime.Object, *unversioned.GroupVersionKind, error) { - mapObj, err := parseObject(data) - if err != nil { - return nil, nil, err - } - return t.populateFromObject(mapObj, data) -} - -func (t *thirdPartyResourceDataDecoder) populateFromObject(mapObj map[string]interface{}, data []byte) (runtime.Object, *unversioned.GroupVersionKind, error) { - typeMeta := unversioned.TypeMeta{} - if err := json.Unmarshal(data, &typeMeta); err != nil { - return nil, nil, err - } - - gv, err := unversioned.ParseGroupVersion(typeMeta.APIVersion) - if err != nil { - return nil, nil, err - } - gvk := gv.WithKind(typeMeta.Kind) - - isList := strings.HasSuffix(typeMeta.Kind, "List") - switch { - case !isList && (len(t.kind) == 0 || typeMeta.Kind == t.kind): - result := &extensions.ThirdPartyResourceData{} - if err := t.populateResource(result, mapObj, data); err != nil { - return nil, nil, err - } - return result, &gvk, nil - case isList && (len(t.kind) == 0 || typeMeta.Kind == t.kind+"List"): - list := &extensions.ThirdPartyResourceDataList{} - if err := t.populateListResource(list, mapObj); err != nil { - return nil, nil, err - } - return list, &gvk, nil - default: - return nil, nil, fmt.Errorf("unexpected kind: %s, expected %s", typeMeta.Kind, t.kind) - } -} - -func (t *thirdPartyResourceDataDecoder) populateResource(objIn *extensions.ThirdPartyResourceData, mapObj map[string]interface{}, data []byte) error { - metadata, ok := mapObj["metadata"].(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected object for metadata: %#v", mapObj["metadata"]) - } - - metadataData, err := json.Marshal(metadata) - if err != nil { - return err - } - - if err := json.Unmarshal(metadataData, &objIn.ObjectMeta); err != nil { - return err - } - // Override API Version with the ThirdPartyResourceData value - // TODO: fix this hard code - objIn.APIVersion = v1beta1.SchemeGroupVersion.String() - - objIn.Data = data - return nil -} - -func IsThirdPartyObject(rawData []byte, gvk *unversioned.GroupVersionKind) (isThirdParty bool, gvkOut *unversioned.GroupVersionKind, err error) { - var gv unversioned.GroupVersion - if gvk == nil { - data, err := yaml.ToJSON(rawData) - if err != nil { - return false, nil, err - } - metadata := unversioned.TypeMeta{} - if err = json.Unmarshal(data, &metadata); err != nil { - return false, nil, err - } - gv, err = unversioned.ParseGroupVersion(metadata.APIVersion) - if err != nil { - return false, nil, err - } - gvkOut = &unversioned.GroupVersionKind{ - Group: gv.Group, - Version: gv.Version, - Kind: metadata.Kind, - } - } else { - gv = gvk.GroupVersion() - gvkOut = gvk - } - return registered.IsThirdPartyAPIGroupVersion(gv), gvkOut, nil -} - -func (t *thirdPartyResourceDataDecoder) Decode(data []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { - if into == nil { - if gvk == nil || gvk.Kind != t.kind { - if isThirdParty, _, err := IsThirdPartyObject(data, gvk); err != nil { - return nil, nil, err - } else if !isThirdParty { - return t.delegate.Decode(data, gvk, into) - } - } - return t.populate(data) - } - switch o := into.(type) { - case *extensions.ThirdPartyResourceData: - break - case *runtime.VersionedObjects: - // We're not sure that it's third party, we need to test - if gvk == nil || gvk.Kind != t.kind { - if isThirdParty, _, err := IsThirdPartyObject(data, gvk); err != nil { - return nil, nil, err - } else if !isThirdParty { - return t.delegate.Decode(data, gvk, into) - } - } - obj, outGVK, err := t.populate(data) - if err != nil { - return nil, nil, err - } - o.Objects = []runtime.Object{ - obj, - } - return o, outGVK, nil - default: - return t.delegate.Decode(data, gvk, into) - } - - thirdParty := into.(*extensions.ThirdPartyResourceData) - var dataObj interface{} - if err := json.Unmarshal(data, &dataObj); err != nil { - return nil, nil, err - } - mapObj, ok := dataObj.(map[string]interface{}) - if !ok { - - return nil, nil, fmt.Errorf("unexpected object: %#v", dataObj) - } - /*if gvk.Kind != "ThirdPartyResourceData" { - return nil, nil, fmt.Errorf("unexpected kind: %s", gvk.Kind) - }*/ - actual := &unversioned.GroupVersionKind{} - if kindObj, found := mapObj["kind"]; !found { - if gvk == nil { - return nil, nil, runtime.NewMissingKindErr(string(data)) - } - mapObj["kind"] = gvk.Kind - actual.Kind = gvk.Kind - } else { - kindStr, ok := kindObj.(string) - if !ok { - return nil, nil, fmt.Errorf("unexpected object for 'kind': %v", kindObj) - } - if len(t.kind) > 0 && kindStr != t.kind { - return nil, nil, fmt.Errorf("kind doesn't match, expecting: %s, got %s", t.kind, kindStr) - } - actual.Kind = kindStr - } - if versionObj, found := mapObj["apiVersion"]; !found { - if gvk == nil { - return nil, nil, runtime.NewMissingVersionErr(string(data)) - } - mapObj["apiVersion"] = gvk.GroupVersion().String() - actual.Group, actual.Version = gvk.Group, gvk.Version - } else { - versionStr, ok := versionObj.(string) - if !ok { - return nil, nil, fmt.Errorf("unexpected object for 'apiVersion': %v", versionObj) - } - if gvk != nil && versionStr != gvk.GroupVersion().String() { - return nil, nil, fmt.Errorf("version doesn't match, expecting: %v, got %s", gvk.GroupVersion(), versionStr) - } - gv, err := unversioned.ParseGroupVersion(versionStr) - if err != nil { - return nil, nil, err - } - actual.Group, actual.Version = gv.Group, gv.Version - } - - mapObj, err := parseObject(data) - if err != nil { - return nil, actual, err - } - if err := t.populateResource(thirdParty, mapObj, data); err != nil { - return nil, actual, err - } - return thirdParty, actual, nil -} - -func (t *thirdPartyResourceDataDecoder) populateListResource(objIn *extensions.ThirdPartyResourceDataList, mapObj map[string]interface{}) error { - items, ok := mapObj["items"].([]interface{}) - if !ok { - return fmt.Errorf("unexpected object for items: %#v", mapObj["items"]) - } - objIn.Items = make([]extensions.ThirdPartyResourceData, len(items)) - for ix := range items { - objData, err := json.Marshal(items[ix]) - if err != nil { - return err - } - objMap, err := parseObject(objData) - if err != nil { - return err - } - if err := t.populateResource(&objIn.Items[ix], objMap, objData); err != nil { - return err - } - } - return nil -} - -type thirdPartyResourceDataEncoder struct { - delegate runtime.Encoder - gvk unversioned.GroupVersionKind -} - -func NewEncoder(delegate runtime.Encoder, gvk unversioned.GroupVersionKind) runtime.Encoder { - return &thirdPartyResourceDataEncoder{delegate: delegate, gvk: gvk} -} - -var _ runtime.Encoder = &thirdPartyResourceDataEncoder{} - -func encodeToJSON(obj *extensions.ThirdPartyResourceData, stream io.Writer) error { - var objOut interface{} - if err := json.Unmarshal(obj.Data, &objOut); err != nil { - return err - } - objMap, ok := objOut.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected type: %v", objOut) - } - objMap["metadata"] = obj.ObjectMeta - encoder := json.NewEncoder(stream) - return encoder.Encode(objMap) -} - -func (t *thirdPartyResourceDataEncoder) Encode(obj runtime.Object, stream io.Writer) (err error) { - switch obj := obj.(type) { - case *extensions.ThirdPartyResourceData: - return encodeToJSON(obj, stream) - case *extensions.ThirdPartyResourceDataList: - // TODO: There are likely still better ways to do this... - listItems := make([]json.RawMessage, len(obj.Items)) - - for ix := range obj.Items { - buff := &bytes.Buffer{} - err := encodeToJSON(&obj.Items[ix], buff) - if err != nil { - return err - } - listItems[ix] = json.RawMessage(buff.Bytes()) - } - - if t.gvk.Empty() { - return fmt.Errorf("thirdPartyResourceDataEncoder was not given a target version") - } - - encMap := struct { - // +optional - Kind string `json:"kind,omitempty"` - Items []json.RawMessage `json:"items"` - // +optional - Metadata unversioned.ListMeta `json:"metadata,omitempty"` - // +optional - APIVersion string `json:"apiVersion,omitempty"` - }{ - Kind: t.gvk.Kind + "List", - Items: listItems, - Metadata: obj.ListMeta, - APIVersion: t.gvk.GroupVersion().String(), - } - - encBytes, err := json.Marshal(encMap) - if err != nil { - return err - } - - _, err = stream.Write(encBytes) - return err - case *versioned.InternalEvent: - event := &versioned.Event{} - err := versioned.Convert_versioned_InternalEvent_to_versioned_Event(obj, event, nil) - if err != nil { - return err - } - - enc := json.NewEncoder(stream) - err = enc.Encode(event) - if err != nil { - return err - } - - return nil - case *unversioned.Status, *unversioned.APIResourceList: - return t.delegate.Encode(obj, stream) - default: - return fmt.Errorf("unexpected object to encode: %#v", obj) - } -} - -func NewObjectCreator(group, version string, delegate runtime.ObjectCreater) runtime.ObjectCreater { - return &thirdPartyResourceDataCreator{group, version, delegate} -} - -type thirdPartyResourceDataCreator struct { - group string - version string - delegate runtime.ObjectCreater -} - -func (t *thirdPartyResourceDataCreator) New(kind unversioned.GroupVersionKind) (out runtime.Object, err error) { - switch kind.Kind { - case "ThirdPartyResourceData": - if apiutil.GetGroupVersion(t.group, t.version) != kind.GroupVersion().String() { - return nil, fmt.Errorf("unknown kind %v", kind) - } - return &extensions.ThirdPartyResourceData{}, nil - case "ThirdPartyResourceDataList": - if apiutil.GetGroupVersion(t.group, t.version) != kind.GroupVersion().String() { - return nil, fmt.Errorf("unknown kind %v", kind) - } - return &extensions.ThirdPartyResourceDataList{}, nil - // TODO: this list needs to be formalized higher in the chain - case "ListOptions", "WatchEvent": - if apiutil.GetGroupVersion(t.group, t.version) == kind.GroupVersion().String() { - // Translate third party group to external group. - gvk := registered.EnabledVersionsForGroup(api.GroupName)[0].WithKind(kind.Kind) - return t.delegate.New(gvk) - } - return t.delegate.New(kind) - default: - return t.delegate.New(kind) - } -} - -func NewThirdPartyParameterCodec(p runtime.ParameterCodec) runtime.ParameterCodec { - return &thirdPartyParameterCodec{p} -} - -type thirdPartyParameterCodec struct { - delegate runtime.ParameterCodec -} - -func (t *thirdPartyParameterCodec) DecodeParameters(parameters url.Values, from unversioned.GroupVersion, into runtime.Object) error { - return t.delegate.DecodeParameters(parameters, v1.SchemeGroupVersion, into) -} - -func (t *thirdPartyParameterCodec) EncodeParameters(obj runtime.Object, to unversioned.GroupVersion) (url.Values, error) { - return t.delegate.EncodeParameters(obj, v1.SchemeGroupVersion) -} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/doc.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/doc.go deleted file mode 100644 index d9988ccb08..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package thirdpartyresourcedata provides Registry interface and its REST -// implementation for storing ThirdPartyResourceData api objects. -package thirdpartyresourcedata diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/registry.go deleted file mode 100644 index 87f1156ec1..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/registry.go +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package thirdpartyresourcedata - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/rest" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/watch" -) - -// Registry is an interface implemented by things that know how to store ThirdPartyResourceData objects. -type Registry interface { - ListThirdPartyResourceData(ctx api.Context, options *api.ListOptions) (*extensions.ThirdPartyResourceDataList, error) - WatchThirdPartyResourceData(ctx api.Context, options *api.ListOptions) (watch.Interface, error) - GetThirdPartyResourceData(ctx api.Context, name string) (*extensions.ThirdPartyResourceData, error) - CreateThirdPartyResourceData(ctx api.Context, resource *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) - UpdateThirdPartyResourceData(ctx api.Context, resource *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) - DeleteThirdPartyResourceData(ctx api.Context, name string) error -} - -// storage puts strong typing around storage calls -type storage struct { - rest.StandardStorage -} - -// NewRegistry returns a new Registry interface for the given Storage. Any mismatched -// types will panic. -func NewRegistry(s rest.StandardStorage) Registry { - return &storage{s} -} - -func (s *storage) ListThirdPartyResourceData(ctx api.Context, options *api.ListOptions) (*extensions.ThirdPartyResourceDataList, error) { - obj, err := s.List(ctx, options) - if err != nil { - return nil, err - } - return obj.(*extensions.ThirdPartyResourceDataList), nil -} - -func (s *storage) WatchThirdPartyResourceData(ctx api.Context, options *api.ListOptions) (watch.Interface, error) { - return s.Watch(ctx, options) -} - -func (s *storage) GetThirdPartyResourceData(ctx api.Context, name string) (*extensions.ThirdPartyResourceData, error) { - obj, err := s.Get(ctx, name) - if err != nil { - return nil, err - } - return obj.(*extensions.ThirdPartyResourceData), nil -} - -func (s *storage) CreateThirdPartyResourceData(ctx api.Context, ThirdPartyResourceData *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) { - obj, err := s.Create(ctx, ThirdPartyResourceData) - return obj.(*extensions.ThirdPartyResourceData), err -} - -func (s *storage) UpdateThirdPartyResourceData(ctx api.Context, ThirdPartyResourceData *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) { - obj, _, err := s.Update(ctx, ThirdPartyResourceData.Name, rest.DefaultUpdatedObjectInfo(ThirdPartyResourceData, api.Scheme)) - return obj.(*extensions.ThirdPartyResourceData), err -} - -func (s *storage) DeleteThirdPartyResourceData(ctx api.Context, name string) error { - _, err := s.Delete(ctx, name, nil) - return err -} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/strategy.go deleted file mode 100644 index e5c93e78d1..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/strategy.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package thirdpartyresourcedata - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/rest" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/extensions/validation" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - apistorage "k8s.io/kubernetes/pkg/storage" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -// strategy implements behavior for ThirdPartyResource objects -type strategy struct { - runtime.ObjectTyper - api.NameGenerator -} - -// Strategy is the default logic that applies when creating and updating ThirdPartyResource -// objects via the REST API. -var Strategy = strategy{api.Scheme, api.SimpleNameGenerator} - -var _ = rest.RESTCreateStrategy(Strategy) - -var _ = rest.RESTUpdateStrategy(Strategy) - -func (strategy) NamespaceScoped() bool { - return true -} - -func (strategy) PrepareForCreate(ctx api.Context, obj runtime.Object) { -} - -func (strategy) Validate(ctx api.Context, obj runtime.Object) field.ErrorList { - return validation.ValidateThirdPartyResourceData(obj.(*extensions.ThirdPartyResourceData)) -} - -// Canonicalize normalizes the object after validation. -func (strategy) Canonicalize(obj runtime.Object) { -} - -func (strategy) AllowCreateOnUpdate() bool { - return false -} - -func (strategy) PrepareForUpdate(ctx api.Context, obj, old runtime.Object) { -} - -func (strategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) field.ErrorList { - return validation.ValidateThirdPartyResourceDataUpdate(obj.(*extensions.ThirdPartyResourceData), old.(*extensions.ThirdPartyResourceData)) -} - -func (strategy) AllowUnconditionalUpdate() bool { - return true -} - -// Matcher returns a generic matcher for a given label and field selector. -func Matcher(label labels.Selector, field fields.Selector) apistorage.SelectionPredicate { - return apistorage.SelectionPredicate{ - Label: label, - Field: field, - GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { - tprd, ok := obj.(*extensions.ThirdPartyResourceData) - if !ok { - return nil, nil, fmt.Errorf("not a ThirdPartyResourceData") - } - return labels.Set(tprd.Labels), SelectableFields(tprd), nil - }, - } -} - -// SelectableFields returns a field set that can be used for filter selection -func SelectableFields(obj *extensions.ThirdPartyResourceData) fields.Set { - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/util.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/util.go deleted file mode 100644 index d8e91b1572..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/util.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package thirdpartyresourcedata - -import ( - "fmt" - "strings" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" -) - -func ExtractGroupVersionKind(list *extensions.ThirdPartyResourceList) ([]unversioned.GroupVersion, []unversioned.GroupVersionKind, error) { - gvs := []unversioned.GroupVersion{} - gvks := []unversioned.GroupVersionKind{} - for ix := range list.Items { - rsrc := &list.Items[ix] - kind, group, err := ExtractApiGroupAndKind(rsrc) - if err != nil { - return nil, nil, err - } - for _, version := range rsrc.Versions { - gv := unversioned.GroupVersion{Group: group, Version: version.Name} - gvs = append(gvs, gv) - gvks = append(gvks, unversioned.GroupVersionKind{Group: group, Version: version.Name, Kind: kind}) - } - } - return gvs, gvks, nil -} - -func convertToCamelCase(input string) string { - result := "" - toUpper := true - for ix := range input { - char := input[ix] - if toUpper { - result = result + string([]byte{(char - 32)}) - toUpper = false - } else if char == '-' { - toUpper = true - } else { - result = result + string([]byte{char}) - } - } - return result -} - -func ExtractApiGroupAndKind(rsrc *extensions.ThirdPartyResource) (kind string, group string, err error) { - parts := strings.Split(rsrc.Name, ".") - if len(parts) < 3 { - return "", "", fmt.Errorf("unexpectedly short resource name: %s, expected at least ..", rsrc.Name) - } - return convertToCamelCase(parts[0]), strings.Join(parts[1:], "."), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/BUILD b/vendor/k8s.io/kubernetes/pkg/runtime/BUILD deleted file mode 100644 index c3bc3018ba..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/BUILD +++ /dev/null @@ -1,86 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "codec.go", - "codec_check.go", - "conversion.go", - "doc.go", - "embedded.go", - "error.go", - "extension.go", - "generated.pb.go", - "helper.go", - "interfaces.go", - "register.go", - "scheme.go", - "scheme_builder.go", - "swagger_doc_generator.go", - "types.go", - "types_proto.go", - "unstructured.go", - "zz_generated.deepcopy.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api/meta/metatypes:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/conversion/queryparams:go_default_library", - "//pkg/types:go_default_library", - "//pkg/util/errors:go_default_library", - "//pkg/util/json:go_default_library", - "//vendor:github.com/gogo/protobuf/proto", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = ["swagger_doc_generator_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) - -go_test( - name = "go_default_xtest", - srcs = [ - "conversion_test.go", - "embedded_test.go", - "extension_test.go", - "helper_test.go", - "scheme_test.go", - "unstructured_test.go", - "unversioned_test.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/meta/metatypes:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/validation:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer:go_default_library", - "//pkg/types:go_default_library", - "//pkg/util/diff:go_default_library", - "//vendor:github.com/google/gofuzz", - "//vendor:github.com/spf13/pflag", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/OWNERS b/vendor/k8s.io/kubernetes/pkg/runtime/OWNERS deleted file mode 100644 index d038b5e9b9..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -assignees: - - caesarxuchao - - deads2k - - lavalamp - - smarterclayton diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/codec.go b/vendor/k8s.io/kubernetes/pkg/runtime/codec.go deleted file mode 100644 index 345dabce4f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/codec.go +++ /dev/null @@ -1,314 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "net/url" - "reflect" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/conversion/queryparams" -) - -// codec binds an encoder and decoder. -type codec struct { - Encoder - Decoder -} - -// NewCodec creates a Codec from an Encoder and Decoder. -func NewCodec(e Encoder, d Decoder) Codec { - return codec{e, d} -} - -// Encode is a convenience wrapper for encoding to a []byte from an Encoder -func Encode(e Encoder, obj Object) ([]byte, error) { - // TODO: reuse buffer - buf := &bytes.Buffer{} - if err := e.Encode(obj, buf); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// Decode is a convenience wrapper for decoding data into an Object. -func Decode(d Decoder, data []byte) (Object, error) { - obj, _, err := d.Decode(data, nil, nil) - return obj, err -} - -// DecodeInto performs a Decode into the provided object. -func DecodeInto(d Decoder, data []byte, into Object) error { - out, gvk, err := d.Decode(data, nil, into) - if err != nil { - return err - } - if out != into { - return fmt.Errorf("unable to decode %s into %v", gvk, reflect.TypeOf(into)) - } - return nil -} - -// EncodeOrDie is a version of Encode which will panic instead of returning an error. For tests. -func EncodeOrDie(e Encoder, obj Object) string { - bytes, err := Encode(e, obj) - if err != nil { - panic(err) - } - return string(bytes) -} - -// DefaultingSerializer invokes defaulting after decoding. -type DefaultingSerializer struct { - Defaulter ObjectDefaulter - Decoder Decoder - // Encoder is optional to allow this type to be used as both a Decoder and an Encoder - Encoder -} - -// Decode performs a decode and then allows the defaulter to act on the provided object. -func (d DefaultingSerializer) Decode(data []byte, defaultGVK *unversioned.GroupVersionKind, into Object) (Object, *unversioned.GroupVersionKind, error) { - obj, gvk, err := d.Decoder.Decode(data, defaultGVK, into) - if err != nil { - return obj, gvk, err - } - d.Defaulter.Default(obj) - return obj, gvk, nil -} - -// UseOrCreateObject returns obj if the canonical ObjectKind returned by the provided typer matches gvk, or -// invokes the ObjectCreator to instantiate a new gvk. Returns an error if the typer cannot find the object. -func UseOrCreateObject(t ObjectTyper, c ObjectCreater, gvk unversioned.GroupVersionKind, obj Object) (Object, error) { - if obj != nil { - kinds, _, err := t.ObjectKinds(obj) - if err != nil { - return nil, err - } - for _, kind := range kinds { - if gvk == kind { - return obj, nil - } - } - } - return c.New(gvk) -} - -// NoopEncoder converts an Decoder to a Serializer or Codec for code that expects them but only uses decoding. -type NoopEncoder struct { - Decoder -} - -var _ Serializer = NoopEncoder{} - -func (n NoopEncoder) Encode(obj Object, w io.Writer) error { - return fmt.Errorf("encoding is not allowed for this codec: %v", reflect.TypeOf(n.Decoder)) -} - -// NoopDecoder converts an Encoder to a Serializer or Codec for code that expects them but only uses encoding. -type NoopDecoder struct { - Encoder -} - -var _ Serializer = NoopDecoder{} - -func (n NoopDecoder) Decode(data []byte, gvk *unversioned.GroupVersionKind, into Object) (Object, *unversioned.GroupVersionKind, error) { - return nil, nil, fmt.Errorf("decoding is not allowed for this codec: %v", reflect.TypeOf(n.Encoder)) -} - -// NewParameterCodec creates a ParameterCodec capable of transforming url values into versioned objects and back. -func NewParameterCodec(scheme *Scheme) ParameterCodec { - return ¶meterCodec{ - typer: scheme, - convertor: scheme, - creator: scheme, - } -} - -// parameterCodec implements conversion to and from query parameters and objects. -type parameterCodec struct { - typer ObjectTyper - convertor ObjectConvertor - creator ObjectCreater -} - -var _ ParameterCodec = ¶meterCodec{} - -// DecodeParameters converts the provided url.Values into an object of type From with the kind of into, and then -// converts that object to into (if necessary). Returns an error if the operation cannot be completed. -func (c *parameterCodec) DecodeParameters(parameters url.Values, from unversioned.GroupVersion, into Object) error { - if len(parameters) == 0 { - return nil - } - targetGVKs, _, err := c.typer.ObjectKinds(into) - if err != nil { - return err - } - targetGVK := targetGVKs[0] - if targetGVK.GroupVersion() == from { - return c.convertor.Convert(¶meters, into, nil) - } - input, err := c.creator.New(from.WithKind(targetGVK.Kind)) - if err != nil { - return err - } - if err := c.convertor.Convert(¶meters, input, nil); err != nil { - return err - } - return c.convertor.Convert(input, into, nil) -} - -// EncodeParameters converts the provided object into the to version, then converts that object to url.Values. -// Returns an error if conversion is not possible. -func (c *parameterCodec) EncodeParameters(obj Object, to unversioned.GroupVersion) (url.Values, error) { - gvks, _, err := c.typer.ObjectKinds(obj) - if err != nil { - return nil, err - } - gvk := gvks[0] - if to != gvk.GroupVersion() { - out, err := c.convertor.ConvertToVersion(obj, to) - if err != nil { - return nil, err - } - obj = out - } - return queryparams.Convert(obj) -} - -type base64Serializer struct { - Serializer -} - -func NewBase64Serializer(s Serializer) Serializer { - return &base64Serializer{s} -} - -func (s base64Serializer) Encode(obj Object, stream io.Writer) error { - e := base64.NewEncoder(base64.StdEncoding, stream) - err := s.Serializer.Encode(obj, e) - e.Close() - return err -} - -func (s base64Serializer) Decode(data []byte, defaults *unversioned.GroupVersionKind, into Object) (Object, *unversioned.GroupVersionKind, error) { - out := make([]byte, base64.StdEncoding.DecodedLen(len(data))) - n, err := base64.StdEncoding.Decode(out, data) - if err != nil { - return nil, nil, err - } - return s.Serializer.Decode(out[:n], defaults, into) -} - -// SerializerInfoForMediaType returns the first info in types that has a matching media type (which cannot -// include media-type parameters), or the first info with an empty media type, or false if no type matches. -func SerializerInfoForMediaType(types []SerializerInfo, mediaType string) (SerializerInfo, bool) { - for _, info := range types { - if info.MediaType == mediaType { - return info, true - } - } - for _, info := range types { - if len(info.MediaType) == 0 { - return info, true - } - } - return SerializerInfo{}, false -} - -var ( - // InternalGroupVersioner will always prefer the internal version for a given group version kind. - InternalGroupVersioner GroupVersioner = internalGroupVersioner{} - // DisabledGroupVersioner will reject all kinds passed to it. - DisabledGroupVersioner GroupVersioner = disabledGroupVersioner{} -) - -type internalGroupVersioner struct{} - -// KindForGroupVersionKinds returns an internal Kind if one is found, or converts the first provided kind to the internal version. -func (internalGroupVersioner) KindForGroupVersionKinds(kinds []unversioned.GroupVersionKind) (unversioned.GroupVersionKind, bool) { - for _, kind := range kinds { - if kind.Version == APIVersionInternal { - return kind, true - } - } - for _, kind := range kinds { - return unversioned.GroupVersionKind{Group: kind.Group, Version: APIVersionInternal, Kind: kind.Kind}, true - } - return unversioned.GroupVersionKind{}, false -} - -type disabledGroupVersioner struct{} - -// KindForGroupVersionKinds returns false for any input. -func (disabledGroupVersioner) KindForGroupVersionKinds(kinds []unversioned.GroupVersionKind) (unversioned.GroupVersionKind, bool) { - return unversioned.GroupVersionKind{}, false -} - -// GroupVersioners implements GroupVersioner and resolves to the first exact match for any kind. -type GroupVersioners []GroupVersioner - -// KindForGroupVersionKinds returns the first match of any of the group versioners, or false if no match occured. -func (gvs GroupVersioners) KindForGroupVersionKinds(kinds []unversioned.GroupVersionKind) (unversioned.GroupVersionKind, bool) { - for _, gv := range gvs { - target, ok := gv.KindForGroupVersionKinds(kinds) - if !ok { - continue - } - return target, true - } - return unversioned.GroupVersionKind{}, false -} - -// Assert that unversioned.GroupVersion and GroupVersions implement GroupVersioner -var _ GroupVersioner = unversioned.GroupVersion{} -var _ GroupVersioner = unversioned.GroupVersions{} -var _ GroupVersioner = multiGroupVersioner{} - -type multiGroupVersioner struct { - target unversioned.GroupVersion - acceptedGroupKinds []unversioned.GroupKind -} - -// NewMultiGroupVersioner returns the provided group version for any kind that matches one of the provided group kinds. -// Kind may be empty in the provided group kind, in which case any kind will match. -func NewMultiGroupVersioner(gv unversioned.GroupVersion, groupKinds ...unversioned.GroupKind) GroupVersioner { - if len(groupKinds) == 0 || (len(groupKinds) == 1 && groupKinds[0].Group == gv.Group) { - return gv - } - return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds} -} - -// KindForGroupVersionKinds returns the target group version if any kind matches any of the original group kinds. It will -// use the originating kind where possible. -func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []unversioned.GroupVersionKind) (unversioned.GroupVersionKind, bool) { - for _, src := range kinds { - for _, kind := range v.acceptedGroupKinds { - if kind.Group != src.Group { - continue - } - if len(kind.Kind) > 0 && kind.Kind != src.Kind { - continue - } - return v.target.WithKind(src.Kind), true - } - } - return unversioned.GroupVersionKind{}, false -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/conversion.go b/vendor/k8s.io/kubernetes/pkg/runtime/conversion.go deleted file mode 100644 index dd6e26af7d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/conversion.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Defines conversions between generic types and structs to map query strings -// to struct objects. -package runtime - -import ( - "reflect" - "strconv" - "strings" - - "k8s.io/kubernetes/pkg/conversion" -) - -// JSONKeyMapper uses the struct tags on a conversion to determine the key value for -// the other side. Use when mapping from a map[string]* to a struct or vice versa. -func JSONKeyMapper(key string, sourceTag, destTag reflect.StructTag) (string, string) { - if s := destTag.Get("json"); len(s) > 0 { - return strings.SplitN(s, ",", 2)[0], key - } - if s := sourceTag.Get("json"); len(s) > 0 { - return key, strings.SplitN(s, ",", 2)[0] - } - return key, key -} - -// DefaultStringConversions are helpers for converting []string and string to real values. -var DefaultStringConversions = []interface{}{ - Convert_Slice_string_To_string, - Convert_Slice_string_To_int, - Convert_Slice_string_To_bool, - Convert_Slice_string_To_int64, -} - -func Convert_Slice_string_To_string(input *[]string, out *string, s conversion.Scope) error { - if len(*input) == 0 { - *out = "" - } - *out = (*input)[0] - return nil -} - -func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope) error { - if len(*input) == 0 { - *out = 0 - } - str := (*input)[0] - i, err := strconv.Atoi(str) - if err != nil { - return err - } - *out = i - return nil -} - -// Conver_Slice_string_To_bool will convert a string parameter to boolean. -// Only the absence of a value, a value of "false", or a value of "0" resolve to false. -// Any other value (including empty string) resolves to true. -func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope) error { - if len(*input) == 0 { - *out = false - return nil - } - switch strings.ToLower((*input)[0]) { - case "false", "0": - *out = false - default: - *out = true - } - return nil -} - -func Convert_Slice_string_To_int64(input *[]string, out *int64, s conversion.Scope) error { - if len(*input) == 0 { - *out = 0 - } - str := (*input)[0] - i, err := strconv.ParseInt(str, 10, 64) - if err != nil { - return err - } - *out = i - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/error.go b/vendor/k8s.io/kubernetes/pkg/runtime/error.go deleted file mode 100644 index 2a1262c568..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/error.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "fmt" - "reflect" - - "k8s.io/kubernetes/pkg/api/unversioned" -) - -type notRegisteredErr struct { - gvk unversioned.GroupVersionKind - t reflect.Type -} - -// NewNotRegisteredErr is exposed for testing. -func NewNotRegisteredErr(gvk unversioned.GroupVersionKind, t reflect.Type) error { - return ¬RegisteredErr{gvk: gvk, t: t} -} - -func (k *notRegisteredErr) Error() string { - if k.t != nil { - return fmt.Sprintf("no kind is registered for the type %v", k.t) - } - if len(k.gvk.Kind) == 0 { - return fmt.Sprintf("no version %q has been registered", k.gvk.GroupVersion()) - } - if k.gvk.Version == APIVersionInternal { - return fmt.Sprintf("no kind %q is registered for the internal version of group %q", k.gvk.Kind, k.gvk.Group) - } - - return fmt.Sprintf("no kind %q is registered for version %q", k.gvk.Kind, k.gvk.GroupVersion()) -} - -// IsNotRegisteredError returns true if the error indicates the provided -// object or input data is not registered. -func IsNotRegisteredError(err error) bool { - if err == nil { - return false - } - _, ok := err.(*notRegisteredErr) - return ok -} - -type missingKindErr struct { - data string -} - -func NewMissingKindErr(data string) error { - return &missingKindErr{data} -} - -func (k *missingKindErr) Error() string { - return fmt.Sprintf("Object 'Kind' is missing in '%s'", k.data) -} - -// IsMissingKind returns true if the error indicates that the provided object -// is missing a 'Kind' field. -func IsMissingKind(err error) bool { - if err == nil { - return false - } - _, ok := err.(*missingKindErr) - return ok -} - -type missingVersionErr struct { - data string -} - -// IsMissingVersion returns true if the error indicates that the provided object -// is missing a 'Version' field. -func NewMissingVersionErr(data string) error { - return &missingVersionErr{data} -} - -func (k *missingVersionErr) Error() string { - return fmt.Sprintf("Object 'apiVersion' is missing in '%s'", k.data) -} - -func IsMissingVersion(err error) bool { - if err == nil { - return false - } - _, ok := err.(*missingVersionErr) - return ok -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/runtime/generated.pb.go deleted file mode 100644 index 6321677421..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/generated.pb.go +++ /dev/null @@ -1,767 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/runtime/generated.proto -// DO NOT EDIT! - -/* - Package runtime is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/runtime/generated.proto - - It has these top-level messages: - RawExtension - TypeMeta - Unknown -*/ -package runtime - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 - -func (m *RawExtension) Reset() { *m = RawExtension{} } -func (*RawExtension) ProtoMessage() {} -func (*RawExtension) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *TypeMeta) Reset() { *m = TypeMeta{} } -func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *Unknown) Reset() { *m = Unknown{} } -func (*Unknown) ProtoMessage() {} -func (*Unknown) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func init() { - proto.RegisterType((*RawExtension)(nil), "k8s.io.kubernetes.pkg.runtime.RawExtension") - proto.RegisterType((*TypeMeta)(nil), "k8s.io.kubernetes.pkg.runtime.TypeMeta") - proto.RegisterType((*Unknown)(nil), "k8s.io.kubernetes.pkg.runtime.Unknown") -} -func (m *RawExtension) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *RawExtension) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Raw != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Raw))) - i += copy(data[i:], m.Raw) - } - return i, nil -} - -func (m *TypeMeta) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *TypeMeta) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - return i, nil -} - -func (m *Unknown) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Unknown) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.TypeMeta.Size())) - n1, err := m.TypeMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - if m.Raw != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Raw))) - i += copy(data[i:], m.Raw) - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) - i += copy(data[i:], m.ContentEncoding) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) - i += copy(data[i:], m.ContentType) - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *RawExtension) Size() (n int) { - var l int - _ = l - if m.Raw != nil { - l = len(m.Raw) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *TypeMeta) Size() (n int) { - var l int - _ = l - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Unknown) Size() (n int) { - var l int - _ = l - l = m.TypeMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Raw != nil { - l = len(m.Raw) - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.ContentEncoding) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContentType) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *RawExtension) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RawExtension{`, - `Raw:` + valueToStringGenerated(this.Raw) + `,`, - `}`, - }, "") - return s -} -func (this *TypeMeta) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TypeMeta{`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `}`, - }, "") - return s -} -func (this *Unknown) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Unknown{`, - `TypeMeta:` + strings.Replace(strings.Replace(this.TypeMeta.String(), "TypeMeta", "TypeMeta", 1), `&`, ``, 1) + `,`, - `Raw:` + valueToStringGenerated(this.Raw) + `,`, - `ContentEncoding:` + fmt.Sprintf("%v", this.ContentEncoding) + `,`, - `ContentType:` + fmt.Sprintf("%v", this.ContentType) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *RawExtension) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RawExtension: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RawExtension: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Raw = append(m.Raw[:0], data[iNdEx:postIndex]...) - if m.Raw == nil { - m.Raw = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TypeMeta) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Unknown) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Unknown: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Unknown: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TypeMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TypeMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Raw = append(m.Raw[:0], data[iNdEx:postIndex]...) - if m.Raw == nil { - m.Raw = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContentEncoding", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContentEncoding = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContentType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContentType = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -var fileDescriptorGenerated = []byte{ - // 388 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x90, 0x3f, 0x8f, 0xda, 0x30, - 0x18, 0xc6, 0x13, 0x40, 0x82, 0x1a, 0x24, 0x2a, 0x77, 0x68, 0x8a, 0x54, 0x83, 0x58, 0x5a, 0x06, - 0x6c, 0x15, 0xa9, 0x52, 0x57, 0x82, 0x18, 0xaa, 0xaa, 0x52, 0x65, 0x95, 0x0e, 0x9d, 0x1a, 0x12, - 0x37, 0xb5, 0x52, 0xec, 0xc8, 0x71, 0x94, 0x76, 0xeb, 0x47, 0xe8, 0xc7, 0x62, 0x64, 0xbc, 0x09, - 0x1d, 0xb9, 0xcf, 0x70, 0xfb, 0x09, 0x63, 0xfe, 0x1c, 0xa0, 0xdb, 0xe2, 0xf7, 0xf9, 0x3d, 0xcf, - 0xfb, 0xbc, 0x01, 0xc3, 0xe4, 0x43, 0x86, 0xb9, 0x24, 0x49, 0x3e, 0x67, 0x4a, 0x30, 0xcd, 0x32, - 0x92, 0x26, 0x31, 0x51, 0xb9, 0xd0, 0x7c, 0xc1, 0x48, 0xcc, 0x04, 0x53, 0x81, 0x66, 0x11, 0x4e, - 0x95, 0xd4, 0x12, 0xbe, 0xde, 0xe1, 0xf8, 0x88, 0xe3, 0x34, 0x89, 0xb1, 0xc5, 0x3b, 0xc3, 0x98, - 0xeb, 0x5f, 0xf9, 0x1c, 0x87, 0x72, 0x41, 0x62, 0x19, 0x4b, 0x62, 0x5c, 0xf3, 0xfc, 0xa7, 0x79, - 0x99, 0x87, 0xf9, 0xda, 0xa5, 0x75, 0x46, 0xd7, 0x97, 0x07, 0x29, 0x27, 0x8a, 0x65, 0x32, 0x57, - 0xe1, 0x45, 0x83, 0xce, 0xbb, 0xeb, 0x9e, 0x5c, 0xf3, 0xdf, 0x84, 0x0b, 0x9d, 0x69, 0x75, 0x6e, - 0xe9, 0x0f, 0x40, 0x8b, 0x06, 0xc5, 0xf4, 0x8f, 0x66, 0x22, 0xe3, 0x52, 0xc0, 0x57, 0xa0, 0xaa, - 0x82, 0xc2, 0x73, 0x7b, 0xee, 0xdb, 0x96, 0x5f, 0x2f, 0xd7, 0xdd, 0x2a, 0x0d, 0x0a, 0xba, 0x9d, - 0xf5, 0x7f, 0x80, 0xc6, 0xd7, 0xbf, 0x29, 0xfb, 0xcc, 0x74, 0x00, 0x47, 0x00, 0x04, 0x29, 0xff, - 0xc6, 0xd4, 0xd6, 0x64, 0xe8, 0x67, 0x3e, 0x5c, 0xae, 0xbb, 0x4e, 0xb9, 0xee, 0x82, 0xf1, 0x97, - 0x8f, 0x56, 0xa1, 0x27, 0x14, 0xec, 0x81, 0x5a, 0xc2, 0x45, 0xe4, 0x55, 0x0c, 0xdd, 0xb2, 0x74, - 0xed, 0x13, 0x17, 0x11, 0x35, 0x4a, 0xff, 0xde, 0x05, 0xf5, 0x99, 0x48, 0x84, 0x2c, 0x04, 0x9c, - 0x81, 0x86, 0xb6, 0xdb, 0x4c, 0x7e, 0x73, 0xf4, 0x06, 0x3f, 0xf9, 0x83, 0xf1, 0xbe, 0x9c, 0xff, - 0xdc, 0x46, 0x1f, 0xea, 0xd2, 0x43, 0xd4, 0xfe, 0xbe, 0xca, 0xe5, 0x7d, 0x70, 0x0c, 0xda, 0xa1, - 0x14, 0x9a, 0x09, 0x3d, 0x15, 0xa1, 0x8c, 0xb8, 0x88, 0xbd, 0xaa, 0xa9, 0xfa, 0xd2, 0xe6, 0xb5, - 0x27, 0x8f, 0x65, 0x7a, 0xce, 0xc3, 0xf7, 0xa0, 0x69, 0x47, 0xdb, 0xd5, 0x5e, 0xcd, 0xd8, 0x5f, - 0x58, 0x7b, 0x73, 0x72, 0x94, 0xe8, 0x29, 0xe7, 0x0f, 0x96, 0x1b, 0xe4, 0xac, 0x36, 0xc8, 0xb9, - 0xd9, 0x20, 0xe7, 0x5f, 0x89, 0xdc, 0x65, 0x89, 0xdc, 0x55, 0x89, 0xdc, 0xdb, 0x12, 0xb9, 0xff, - 0xef, 0x90, 0xf3, 0xbd, 0x6e, 0x8f, 0x7c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x32, 0xc1, 0x73, 0x2d, - 0x94, 0x02, 0x00, 0x00, -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/generated.proto b/vendor/k8s.io/kubernetes/pkg/runtime/generated.proto deleted file mode 100644 index 136ab0b5e3..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/generated.proto +++ /dev/null @@ -1,129 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.runtime; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "runtime"; - -// RawExtension is used to hold extensions in external versions. -// -// To use this, make a field which has RawExtension as its type in your external, versioned -// struct, and Object in your internal struct. You also need to register your -// various plugin types. -// -// // Internal package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.Object `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } -// -// // External package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.RawExtension `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } -// -// // On the wire, the JSON will look something like this: -// { -// "kind":"MyAPIObject", -// "apiVersion":"v1", -// "myPlugin": { -// "kind":"PluginA", -// "aOption":"foo", -// }, -// } -// -// So what happens? Decode first uses json or yaml to unmarshal the serialized data into -// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. -// The next step is to copy (using pkg/conversion) into the internal struct. The runtime -// package's DefaultScheme has conversion functions installed which will unpack the -// JSON stored in RawExtension, turning it into the correct object type, and storing it -// in the Object. (TODO: In the case where the object is of an unknown type, a -// runtime.Unknown object will be created and stored.) -// -// +k8s:deepcopy-gen=true -// +protobuf=true -// +k8s:openapi-gen=true -message RawExtension { - // Raw is the underlying serialization of this object. - // - // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. - optional bytes raw = 1; -} - -// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, -// like this: -// type MyAwesomeAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// ... // other fields -// } -// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *unversioned.GroupVersionKind) { unversioned.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind -// -// TypeMeta is provided here for convenience. You may use it directly from this package or define -// your own with the same fields. -// -// +k8s:deepcopy-gen=true -// +protobuf=true -// +k8s:openapi-gen=true -message TypeMeta { - // +optional - optional string apiVersion = 1; - - // +optional - optional string kind = 2; -} - -// Unknown allows api objects with unknown types to be passed-through. This can be used -// to deal with the API objects from a plug-in. Unknown objects still have functioning -// TypeMeta features-- kind, version, etc. -// TODO: Make this object have easy access to field based accessors and settors for -// metadata and field mutatation. -// -// +k8s:deepcopy-gen=true -// +protobuf=true -// +k8s:openapi-gen=true -message Unknown { - optional TypeMeta typeMeta = 1; - - // Raw will hold the complete serialized object which couldn't be matched - // with a registered type. Most likely, nothing should be done with this - // except for passing it through the system. - optional bytes raw = 2; - - // ContentEncoding is encoding used to encode 'Raw' data. - // Unspecified means no encoding. - optional string contentEncoding = 3; - - // ContentType is serialization method used to serialize 'Raw'. - // Unspecified means ContentTypeJSON. - optional string contentType = 4; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/helper.go b/vendor/k8s.io/kubernetes/pkg/runtime/helper.go deleted file mode 100644 index 2f8f161dde..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/helper.go +++ /dev/null @@ -1,212 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "fmt" - "io" - "reflect" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/util/errors" -) - -// unsafeObjectConvertor implements ObjectConvertor using the unsafe conversion path. -type unsafeObjectConvertor struct { - *Scheme -} - -var _ ObjectConvertor = unsafeObjectConvertor{} - -// ConvertToVersion converts in to the provided outVersion without copying the input first, which -// is only safe if the output object is not mutated or reused. -func (c unsafeObjectConvertor) ConvertToVersion(in Object, outVersion GroupVersioner) (Object, error) { - return c.Scheme.UnsafeConvertToVersion(in, outVersion) -} - -// UnsafeObjectConvertor performs object conversion without copying the object structure, -// for use when the converted object will not be reused or mutated. Primarily for use within -// versioned codecs, which use the external object for serialization but do not return it. -func UnsafeObjectConvertor(scheme *Scheme) ObjectConvertor { - return unsafeObjectConvertor{scheme} -} - -// SetField puts the value of src, into fieldName, which must be a member of v. -// The value of src must be assignable to the field. -func SetField(src interface{}, v reflect.Value, fieldName string) error { - field := v.FieldByName(fieldName) - if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) - } - srcValue := reflect.ValueOf(src) - if srcValue.Type().AssignableTo(field.Type()) { - field.Set(srcValue) - return nil - } - if srcValue.Type().ConvertibleTo(field.Type()) { - field.Set(srcValue.Convert(field.Type())) - return nil - } - return fmt.Errorf("couldn't assign/convert %v to %v", srcValue.Type(), field.Type()) -} - -// Field puts the value of fieldName, which must be a member of v, into dest, -// which must be a variable to which this field's value can be assigned. -func Field(v reflect.Value, fieldName string, dest interface{}) error { - field := v.FieldByName(fieldName) - if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) - } - destValue, err := conversion.EnforcePtr(dest) - if err != nil { - return err - } - if field.Type().AssignableTo(destValue.Type()) { - destValue.Set(field) - return nil - } - if field.Type().ConvertibleTo(destValue.Type()) { - destValue.Set(field.Convert(destValue.Type())) - return nil - } - return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), destValue.Type()) -} - -// fieldPtr puts the address of fieldName, which must be a member of v, -// into dest, which must be an address of a variable to which this field's -// address can be assigned. -func FieldPtr(v reflect.Value, fieldName string, dest interface{}) error { - field := v.FieldByName(fieldName) - if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) - } - v, err := conversion.EnforcePtr(dest) - if err != nil { - return err - } - field = field.Addr() - if field.Type().AssignableTo(v.Type()) { - v.Set(field) - return nil - } - if field.Type().ConvertibleTo(v.Type()) { - v.Set(field.Convert(v.Type())) - return nil - } - return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), v.Type()) -} - -// EncodeList ensures that each object in an array is converted to a Unknown{} in serialized form. -// TODO: accept a content type. -func EncodeList(e Encoder, objects []Object) error { - var errs []error - for i := range objects { - data, err := Encode(e, objects[i]) - if err != nil { - errs = append(errs, err) - continue - } - // TODO: Set ContentEncoding and ContentType. - objects[i] = &Unknown{Raw: data} - } - return errors.NewAggregate(errs) -} - -func decodeListItem(obj *Unknown, decoders []Decoder) (Object, error) { - for _, decoder := range decoders { - // TODO: Decode based on ContentType. - obj, err := Decode(decoder, obj.Raw) - if err != nil { - if IsNotRegisteredError(err) { - continue - } - return nil, err - } - return obj, nil - } - // could not decode, so leave the object as Unknown, but give the decoders the - // chance to set Unknown.TypeMeta if it is available. - for _, decoder := range decoders { - if err := DecodeInto(decoder, obj.Raw, obj); err == nil { - return obj, nil - } - } - return obj, nil -} - -// DecodeList alters the list in place, attempting to decode any objects found in -// the list that have the Unknown type. Any errors that occur are returned -// after the entire list is processed. Decoders are tried in order. -func DecodeList(objects []Object, decoders ...Decoder) []error { - errs := []error(nil) - for i, obj := range objects { - switch t := obj.(type) { - case *Unknown: - decoded, err := decodeListItem(t, decoders) - if err != nil { - errs = append(errs, err) - break - } - objects[i] = decoded - } - } - return errs -} - -// MultiObjectTyper returns the types of objects across multiple schemes in order. -type MultiObjectTyper []ObjectTyper - -var _ ObjectTyper = MultiObjectTyper{} - -func (m MultiObjectTyper) ObjectKinds(obj Object) (gvks []unversioned.GroupVersionKind, unversionedType bool, err error) { - for _, t := range m { - gvks, unversionedType, err = t.ObjectKinds(obj) - if err == nil { - return - } - } - return -} - -func (m MultiObjectTyper) Recognizes(gvk unversioned.GroupVersionKind) bool { - for _, t := range m { - if t.Recognizes(gvk) { - return true - } - } - return false -} - -// SetZeroValue would set the object of objPtr to zero value of its type. -func SetZeroValue(objPtr Object) error { - v, err := conversion.EnforcePtr(objPtr) - if err != nil { - return err - } - v.Set(reflect.Zero(v.Type())) - return nil -} - -// DefaultFramer is valid for any stream that can read objects serially without -// any separation in the stream. -var DefaultFramer = defaultFramer{} - -type defaultFramer struct{} - -func (defaultFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { return r } -func (defaultFramer) NewFrameWriter(w io.Writer) io.Writer { return w } diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/interfaces.go b/vendor/k8s.io/kubernetes/pkg/runtime/interfaces.go deleted file mode 100644 index e3f03c399e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/interfaces.go +++ /dev/null @@ -1,237 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "io" - "net/url" - - "k8s.io/kubernetes/pkg/api/unversioned" -) - -const ( - // APIVersionInternal may be used if you are registering a type that should not - // be considered stable or serialized - it is a convention only and has no - // special behavior in this package. - APIVersionInternal = "__internal" -) - -// GroupVersioner refines a set of possible conversion targets into a single option. -type GroupVersioner interface { - // KindForGroupVersionKinds returns a desired target group version kind for the given input, or returns ok false if no - // target is known. In general, if the return target is not in the input list, the caller is expected to invoke - // Scheme.New(target) and then perform a conversion between the current Go type and the destination Go type. - // Sophisticated implementations may use additional information about the input kinds to pick a destination kind. - KindForGroupVersionKinds(kinds []unversioned.GroupVersionKind) (target unversioned.GroupVersionKind, ok bool) -} - -// Encoders write objects to a serialized form -type Encoder interface { - // Encode writes an object to a stream. Implementations may return errors if the versions are - // incompatible, or if no conversion is defined. - Encode(obj Object, w io.Writer) error -} - -// Decoders attempt to load an object from data. -type Decoder interface { - // Decode attempts to deserialize the provided data using either the innate typing of the scheme or the - // default kind, group, and version provided. It returns a decoded object as well as the kind, group, and - // version from the serialized data, or an error. If into is non-nil, it will be used as the target type - // and implementations may choose to use it rather than reallocating an object. However, the object is not - // guaranteed to be populated. The returned object is not guaranteed to match into. If defaults are - // provided, they are applied to the data by default. If no defaults or partial defaults are provided, the - // type of the into may be used to guide conversion decisions. - Decode(data []byte, defaults *unversioned.GroupVersionKind, into Object) (Object, *unversioned.GroupVersionKind, error) -} - -// Serializer is the core interface for transforming objects into a serialized format and back. -// Implementations may choose to perform conversion of the object, but no assumptions should be made. -type Serializer interface { - Encoder - Decoder -} - -// Codec is a Serializer that deals with the details of versioning objects. It offers the same -// interface as Serializer, so this is a marker to consumers that care about the version of the objects -// they receive. -type Codec Serializer - -// ParameterCodec defines methods for serializing and deserializing API objects to url.Values and -// performing any necessary conversion. Unlike the normal Codec, query parameters are not self describing -// and the desired version must be specified. -type ParameterCodec interface { - // DecodeParameters takes the given url.Values in the specified group version and decodes them - // into the provided object, or returns an error. - DecodeParameters(parameters url.Values, from unversioned.GroupVersion, into Object) error - // EncodeParameters encodes the provided object as query parameters or returns an error. - EncodeParameters(obj Object, to unversioned.GroupVersion) (url.Values, error) -} - -// Framer is a factory for creating readers and writers that obey a particular framing pattern. -type Framer interface { - NewFrameReader(r io.ReadCloser) io.ReadCloser - NewFrameWriter(w io.Writer) io.Writer -} - -// SerializerInfo contains information about a specific serialization format -type SerializerInfo struct { - // MediaType is the value that represents this serializer over the wire. - MediaType string - // EncodesAsText indicates this serializer can be encoded to UTF-8 safely. - EncodesAsText bool - // Serializer is the individual object serializer for this media type. - Serializer Serializer - // PrettySerializer, if set, can serialize this object in a form biased towards - // readability. - PrettySerializer Serializer - // StreamSerializer, if set, describes the streaming serialization format - // for this media type. - StreamSerializer *StreamSerializerInfo -} - -// StreamSerializerInfo contains information about a specific stream serialization format -type StreamSerializerInfo struct { - // EncodesAsText indicates this serializer can be encoded to UTF-8 safely. - EncodesAsText bool - // Serializer is the top level object serializer for this type when streaming - Serializer - // Framer is the factory for retrieving streams that separate objects on the wire - Framer -} - -// NegotiatedSerializer is an interface used for obtaining encoders, decoders, and serializers -// for multiple supported media types. This would commonly be accepted by a server component -// that performs HTTP content negotiation to accept multiple formats. -type NegotiatedSerializer interface { - // SupportedMediaTypes is the media types supported for reading and writing single objects. - SupportedMediaTypes() []SerializerInfo - - // EncoderForVersion returns an encoder that ensures objects being written to the provided - // serializer are in the provided group version. - EncoderForVersion(serializer Encoder, gv GroupVersioner) Encoder - // DecoderForVersion returns a decoder that ensures objects being read by the provided - // serializer are in the provided group version by default. - DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder -} - -// StorageSerializer is an interface used for obtaining encoders, decoders, and serializers -// that can read and write data at rest. This would commonly be used by client tools that must -// read files, or server side storage interfaces that persist restful objects. -type StorageSerializer interface { - // SupportedMediaTypes are the media types supported for reading and writing objects. - SupportedMediaTypes() []SerializerInfo - - // UniversalDeserializer returns a Serializer that can read objects in multiple supported formats - // by introspecting the data at rest. - UniversalDeserializer() Decoder - - // EncoderForVersion returns an encoder that ensures objects being written to the provided - // serializer are in the provided group version. - EncoderForVersion(serializer Encoder, gv GroupVersioner) Encoder - // DecoderForVersion returns a decoder that ensures objects being read by the provided - // serializer are in the provided group version by default. - DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder -} - -// NestedObjectEncoder is an optional interface that objects may implement to be given -// an opportunity to encode any nested Objects / RawExtensions during serialization. -type NestedObjectEncoder interface { - EncodeNestedObjects(e Encoder) error -} - -// NestedObjectDecoder is an optional interface that objects may implement to be given -// an opportunity to decode any nested Objects / RawExtensions during serialization. -type NestedObjectDecoder interface { - DecodeNestedObjects(d Decoder) error -} - -/////////////////////////////////////////////////////////////////////////////// -// Non-codec interfaces - -type ObjectDefaulter interface { - // Default takes an object (must be a pointer) and applies any default values. - // Defaulters may not error. - Default(in Object) -} - -type ObjectVersioner interface { - ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error) -} - -// ObjectConvertor converts an object to a different version. -type ObjectConvertor interface { - // Convert attempts to convert one object into another, or returns an error. This method does - // not guarantee the in object is not mutated. The context argument will be passed to - // all nested conversions. - Convert(in, out, context interface{}) error - // ConvertToVersion takes the provided object and converts it the provided version. This - // method does not guarantee that the in object is not mutated. This method is similar to - // Convert() but handles specific details of choosing the correct output version. - ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error) - ConvertFieldLabel(version, kind, label, value string) (string, string, error) -} - -// ObjectTyper contains methods for extracting the APIVersion and Kind -// of objects. -type ObjectTyper interface { - // ObjectKinds returns the all possible group,version,kind of the provided object, true if - // the object is unversioned, or an error if the object is not recognized - // (IsNotRegisteredError will return true). - ObjectKinds(Object) ([]unversioned.GroupVersionKind, bool, error) - // Recognizes returns true if the scheme is able to handle the provided version and kind, - // or more precisely that the provided version is a possible conversion or decoding - // target. - Recognizes(gvk unversioned.GroupVersionKind) bool -} - -// ObjectCreater contains methods for instantiating an object by kind and version. -type ObjectCreater interface { - New(kind unversioned.GroupVersionKind) (out Object, err error) -} - -// ObjectCopier duplicates an object. -type ObjectCopier interface { - // Copy returns an exact copy of the provided Object, or an error if the - // copy could not be completed. - Copy(Object) (Object, error) -} - -// ResourceVersioner provides methods for setting and retrieving -// the resource version from an API object. -type ResourceVersioner interface { - SetResourceVersion(obj Object, version string) error - ResourceVersion(obj Object) (string, error) -} - -// SelfLinker provides methods for setting and retrieving the SelfLink field of an API object. -type SelfLinker interface { - SetSelfLink(obj Object, selfLink string) error - SelfLink(obj Object) (string, error) - - // Knowing Name is sometimes necessary to use a SelfLinker. - Name(obj Object) (string, error) - // Knowing Namespace is sometimes necessary to use a SelfLinker - Namespace(obj Object) (string, error) -} - -// All API types registered with Scheme must support the Object interface. Since objects in a scheme are -// expected to be serialized to the wire, the interface an Object must provide to the Scheme allows -// serializers to set the kind, version, and group the object is represented as. An Object may choose -// to return a no-op ObjectKindAccessor in cases where it is not expected to be serialized. -type Object interface { - GetObjectKind() unversioned.ObjectKind -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/register.go b/vendor/k8s.io/kubernetes/pkg/runtime/register.go deleted file mode 100644 index 39a1eb14b8..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/register.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta -func (obj *TypeMeta) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} - -// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta -func (obj *TypeMeta) GroupVersionKind() unversioned.GroupVersionKind { - return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} - -func (obj *Unknown) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } - -func (obj *Unstructured) GetObjectKind() unversioned.ObjectKind { return obj } -func (obj *UnstructuredList) GetObjectKind() unversioned.ObjectKind { return obj } - -// GetObjectKind implements Object for VersionedObjects, returning an empty ObjectKind -// interface if no objects are provided, or the ObjectKind interface of the object in the -// highest array position. -func (obj *VersionedObjects) GetObjectKind() unversioned.ObjectKind { - last := obj.Last() - if last == nil { - return unversioned.EmptyObjectKind - } - return last.GetObjectKind() -} - -// First returns the leftmost object in the VersionedObjects array, which is usually the -// object as serialized on the wire. -func (obj *VersionedObjects) First() Object { - if len(obj.Objects) == 0 { - return nil - } - return obj.Objects[0] -} - -// Last is the rightmost object in the VersionedObjects array, which is the object after -// all transformations have been applied. This is the same object that would be returned -// by Decode in a normal invocation (without VersionedObjects in the into argument). -func (obj *VersionedObjects) Last() Object { - if len(obj.Objects) == 0 { - return nil - } - return obj.Objects[len(obj.Objects)-1] -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/BUILD b/vendor/k8s.io/kubernetes/pkg/runtime/serializer/BUILD deleted file mode 100644 index 9dfbe6f625..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/BUILD +++ /dev/null @@ -1,45 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "codec_factory.go", - "negotiated_codec.go", - "protobuf_extension.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer/json:go_default_library", - "//pkg/runtime/serializer/protobuf:go_default_library", - "//pkg/runtime/serializer/recognizer:go_default_library", - "//pkg/runtime/serializer/versioning:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["codec_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/diff:go_default_library", - "//vendor:github.com/ghodss/yaml", - "//vendor:github.com/google/gofuzz", - "//vendor:github.com/spf13/pflag", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/BUILD b/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/BUILD deleted file mode 100644 index 9f67bde559..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/BUILD +++ /dev/null @@ -1,49 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "json.go", - "meta.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer/recognizer:go_default_library", - "//pkg/util/framer:go_default_library", - "//pkg/util/yaml:go_default_library", - "//vendor:github.com/ghodss/yaml", - "//vendor:github.com/ugorji/go/codec", - ], -) - -go_test( - name = "go_default_test", - srcs = ["meta_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) - -go_test( - name = "go_default_xtest", - srcs = ["json_test.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer/json:go_default_library", - "//pkg/util/diff:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/json.go deleted file mode 100644 index c83ed588e2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/json.go +++ /dev/null @@ -1,245 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package json - -import ( - "encoding/json" - "io" - - "github.com/ghodss/yaml" - "github.com/ugorji/go/codec" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer/recognizer" - "k8s.io/kubernetes/pkg/util/framer" - utilyaml "k8s.io/kubernetes/pkg/util/yaml" -) - -// NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer -// is not nil, the object has the group, version, and kind fields set. -func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer { - return &Serializer{ - meta: meta, - creater: creater, - typer: typer, - yaml: false, - pretty: pretty, - } -} - -// NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer -// is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that -// matches JSON, and will error if constructs are used that do not serialize to JSON. -func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { - return &Serializer{ - meta: meta, - creater: creater, - typer: typer, - yaml: true, - } -} - -type Serializer struct { - meta MetaFactory - creater runtime.ObjectCreater - typer runtime.ObjectTyper - yaml bool - pretty bool -} - -// Serializer implements Serializer -var _ runtime.Serializer = &Serializer{} -var _ recognizer.RecognizingDecoder = &Serializer{} - -// Decode attempts to convert the provided data into YAML or JSON, extract the stored schema kind, apply the provided default gvk, and then -// load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, the raw data will be -// extracted and no decoding will be performed. If into is not registered with the typer, then the object will be straight decoded using -// normal JSON/YAML unmarshalling. If into is provided and the original data is not fully qualified with kind/version/group, the type of -// the into will be used to alter the returned gvk. On success or most errors, the method will return the calculated schema kind. -func (s *Serializer) Decode(originalData []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { - if versioned, ok := into.(*runtime.VersionedObjects); ok { - into = versioned.Last() - obj, actual, err := s.Decode(originalData, gvk, into) - if err != nil { - return nil, actual, err - } - versioned.Objects = []runtime.Object{obj} - return versioned, actual, nil - } - - data := originalData - if s.yaml { - altered, err := yaml.YAMLToJSON(data) - if err != nil { - return nil, nil, err - } - data = altered - } - - actual, err := s.meta.Interpret(data) - if err != nil { - return nil, nil, err - } - - if gvk != nil { - // apply kind and version defaulting from provided default - if len(actual.Kind) == 0 { - actual.Kind = gvk.Kind - } - if len(actual.Version) == 0 && len(actual.Group) == 0 { - actual.Group = gvk.Group - actual.Version = gvk.Version - } - if len(actual.Version) == 0 && actual.Group == gvk.Group { - actual.Version = gvk.Version - } - } - - if unk, ok := into.(*runtime.Unknown); ok && unk != nil { - unk.Raw = originalData - unk.ContentType = runtime.ContentTypeJSON - unk.GetObjectKind().SetGroupVersionKind(*actual) - return unk, actual, nil - } - - if into != nil { - types, _, err := s.typer.ObjectKinds(into) - switch { - case runtime.IsNotRegisteredError(err): - if err := codec.NewDecoderBytes(data, new(codec.JsonHandle)).Decode(into); err != nil { - return nil, actual, err - } - return into, actual, nil - case err != nil: - return nil, actual, err - default: - typed := types[0] - if len(actual.Kind) == 0 { - actual.Kind = typed.Kind - } - if len(actual.Version) == 0 && len(actual.Group) == 0 { - actual.Group = typed.Group - actual.Version = typed.Version - } - if len(actual.Version) == 0 && actual.Group == typed.Group { - actual.Version = typed.Version - } - } - } - - if len(actual.Kind) == 0 { - return nil, actual, runtime.NewMissingKindErr(string(originalData)) - } - if len(actual.Version) == 0 { - return nil, actual, runtime.NewMissingVersionErr(string(originalData)) - } - - // use the target if necessary - obj, err := runtime.UseOrCreateObject(s.typer, s.creater, *actual, into) - if err != nil { - return nil, actual, err - } - - if err := codec.NewDecoderBytes(data, new(codec.JsonHandle)).Decode(obj); err != nil { - return nil, actual, err - } - return obj, actual, nil -} - -// Encode serializes the provided object to the given writer. -func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { - if s.yaml { - json, err := json.Marshal(obj) - if err != nil { - return err - } - data, err := yaml.JSONToYAML(json) - if err != nil { - return err - } - _, err = w.Write(data) - return err - } - - if s.pretty { - data, err := json.MarshalIndent(obj, "", " ") - if err != nil { - return err - } - _, err = w.Write(data) - return err - } - encoder := json.NewEncoder(w) - return encoder.Encode(obj) -} - -// RecognizesData implements the RecognizingDecoder interface. -func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) { - if s.yaml { - // we could potentially look for '---' - return false, true, nil - } - _, ok = utilyaml.GuessJSONStream(peek, 2048) - return ok, false, nil -} - -// Framer is the default JSON framing behavior, with newlines delimiting individual objects. -var Framer = jsonFramer{} - -type jsonFramer struct{} - -// NewFrameWriter implements stream framing for this serializer -func (jsonFramer) NewFrameWriter(w io.Writer) io.Writer { - // we can write JSON objects directly to the writer, because they are self-framing - return w -} - -// NewFrameReader implements stream framing for this serializer -func (jsonFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { - // we need to extract the JSON chunks of data to pass to Decode() - return framer.NewJSONFramedReader(r) -} - -// Framer is the default JSON framing behavior, with newlines delimiting individual objects. -var YAMLFramer = yamlFramer{} - -type yamlFramer struct{} - -// NewFrameWriter implements stream framing for this serializer -func (yamlFramer) NewFrameWriter(w io.Writer) io.Writer { - return yamlFrameWriter{w} -} - -// NewFrameReader implements stream framing for this serializer -func (yamlFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { - // extract the YAML document chunks directly - return utilyaml.NewDocumentDecoder(r) -} - -type yamlFrameWriter struct { - w io.Writer -} - -// Write separates each document with the YAML document separator (`---` followed by line -// break). Writers must write well formed YAML documents (include a final line break). -func (w yamlFrameWriter) Write(data []byte) (n int, err error) { - if _, err := w.w.Write([]byte("---\n")); err != nil { - return 0, err - } - return w.w.Write(data) -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/meta.go b/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/meta.go deleted file mode 100644 index 0819354121..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/meta.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package json - -import ( - "encoding/json" - "fmt" - - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// MetaFactory is used to store and retrieve the version and kind -// information for JSON objects in a serializer. -type MetaFactory interface { - // Interpret should return the version and kind of the wire-format of - // the object. - Interpret(data []byte) (*unversioned.GroupVersionKind, error) -} - -// DefaultMetaFactory is a default factory for versioning objects in JSON. The object -// in memory and in the default JSON serialization will use the "kind" and "apiVersion" -// fields. -var DefaultMetaFactory = SimpleMetaFactory{} - -// SimpleMetaFactory provides default methods for retrieving the type and version of objects -// that are identified with an "apiVersion" and "kind" fields in their JSON -// serialization. It may be parameterized with the names of the fields in memory, or an -// optional list of base structs to search for those fields in memory. -type SimpleMetaFactory struct { -} - -// Interpret will return the APIVersion and Kind of the JSON wire-format -// encoding of an object, or an error. -func (SimpleMetaFactory) Interpret(data []byte) (*unversioned.GroupVersionKind, error) { - findKind := struct { - // +optional - APIVersion string `json:"apiVersion,omitempty"` - // +optional - Kind string `json:"kind,omitempty"` - }{} - if err := json.Unmarshal(data, &findKind); err != nil { - return nil, fmt.Errorf("couldn't get version/kind; json parse error: %v", err) - } - gv, err := unversioned.ParseGroupVersion(findKind.APIVersion) - if err != nil { - return nil, err - } - return &unversioned.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: findKind.Kind}, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/BUILD b/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/BUILD deleted file mode 100644 index 4e7b5e74f3..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/BUILD +++ /dev/null @@ -1,42 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "protobuf.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer/recognizer:go_default_library", - "//pkg/util/framer:go_default_library", - "//vendor:github.com/gogo/protobuf/proto", - ], -) - -go_test( - name = "go_default_xtest", - srcs = ["protobuf_test.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer/protobuf:go_default_library", - "//pkg/util/diff:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/BUILD b/vendor/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/BUILD deleted file mode 100644 index 491d6aab1e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/BUILD +++ /dev/null @@ -1,21 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["recognizer.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/runtime:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/streaming/BUILD b/vendor/k8s.io/kubernetes/pkg/runtime/serializer/streaming/BUILD deleted file mode 100644 index 7ed73d4cd3..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/streaming/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["streaming.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/runtime:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["streaming_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/framer:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/versioning/BUILD b/vendor/k8s.io/kubernetes/pkg/runtime/serializer/versioning/BUILD deleted file mode 100644 index 0bd5efdd93..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/versioning/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["versioning.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/runtime:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["versioning_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/diff:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/types.go b/vendor/k8s.io/kubernetes/pkg/runtime/types.go deleted file mode 100644 index ceffdd9f77..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/types.go +++ /dev/null @@ -1,144 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -// Note that the types provided in this file are not versioned and are intended to be -// safe to use from within all versions of every API object. - -// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, -// like this: -// type MyAwesomeAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// ... // other fields -// } -// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *unversioned.GroupVersionKind) { unversioned.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind -// -// TypeMeta is provided here for convenience. You may use it directly from this package or define -// your own with the same fields. -// -// +k8s:deepcopy-gen=true -// +protobuf=true -// +k8s:openapi-gen=true -type TypeMeta struct { - // +optional - APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"` - // +optional - Kind string `json:"kind,omitempty" yaml:"kind,omitempty" protobuf:"bytes,2,opt,name=kind"` -} - -const ( - ContentTypeJSON string = "application/json" -) - -// RawExtension is used to hold extensions in external versions. -// -// To use this, make a field which has RawExtension as its type in your external, versioned -// struct, and Object in your internal struct. You also need to register your -// various plugin types. -// -// // Internal package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.Object `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } -// -// // External package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.RawExtension `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } -// -// // On the wire, the JSON will look something like this: -// { -// "kind":"MyAPIObject", -// "apiVersion":"v1", -// "myPlugin": { -// "kind":"PluginA", -// "aOption":"foo", -// }, -// } -// -// So what happens? Decode first uses json or yaml to unmarshal the serialized data into -// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. -// The next step is to copy (using pkg/conversion) into the internal struct. The runtime -// package's DefaultScheme has conversion functions installed which will unpack the -// JSON stored in RawExtension, turning it into the correct object type, and storing it -// in the Object. (TODO: In the case where the object is of an unknown type, a -// runtime.Unknown object will be created and stored.) -// -// +k8s:deepcopy-gen=true -// +protobuf=true -// +k8s:openapi-gen=true -type RawExtension struct { - // Raw is the underlying serialization of this object. - // - // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. - Raw []byte `protobuf:"bytes,1,opt,name=raw"` - // Object can hold a representation of this extension - useful for working with versioned - // structs. - Object Object `json:"-"` -} - -// Unknown allows api objects with unknown types to be passed-through. This can be used -// to deal with the API objects from a plug-in. Unknown objects still have functioning -// TypeMeta features-- kind, version, etc. -// TODO: Make this object have easy access to field based accessors and settors for -// metadata and field mutatation. -// -// +k8s:deepcopy-gen=true -// +protobuf=true -// +k8s:openapi-gen=true -type Unknown struct { - TypeMeta `json:",inline" protobuf:"bytes,1,opt,name=typeMeta"` - // Raw will hold the complete serialized object which couldn't be matched - // with a registered type. Most likely, nothing should be done with this - // except for passing it through the system. - Raw []byte `protobuf:"bytes,2,opt,name=raw"` - // ContentEncoding is encoding used to encode 'Raw' data. - // Unspecified means no encoding. - ContentEncoding string `protobuf:"bytes,3,opt,name=contentEncoding"` - // ContentType is serialization method used to serialize 'Raw'. - // Unspecified means ContentTypeJSON. - ContentType string `protobuf:"bytes,4,opt,name=contentType"` -} - -// Unstructured allows objects that do not have Golang structs registered to be manipulated -// generically. This can be used to deal with the API objects from a plug-in. Unstructured -// objects still have functioning TypeMeta features-- kind, version, etc. -// TODO: Make this object have easy access to field based accessors and settors for -// metadata and field mutatation. -type Unstructured struct { - // Object is a JSON compatible map with string, float, int, []interface{}, or map[string]interface{} - // children. - Object map[string]interface{} -} - -// VersionedObjects is used by Decoders to give callers a way to access all versions -// of an object during the decoding process. -type VersionedObjects struct { - // Objects is the set of objects retrieved during decoding, in order of conversion. - // The 0 index is the object as serialized on the wire. If conversion has occurred, - // other objects may be present. The right most object is the same as would be returned - // by a normal Decode call. - Objects []Object -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/unstructured.go b/vendor/k8s.io/kubernetes/pkg/runtime/unstructured.go deleted file mode 100644 index 032e235ddf..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/unstructured.go +++ /dev/null @@ -1,612 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "bytes" - gojson "encoding/json" - "errors" - "fmt" - "io" - "strings" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api/meta/metatypes" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util/json" -) - -// MarshalJSON ensures that the unstructured object produces proper -// JSON when passed to Go's standard JSON library. -func (u *Unstructured) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - err := UnstructuredJSONScheme.Encode(u, &buf) - return buf.Bytes(), err -} - -// UnmarshalJSON ensures that the unstructured object properly decodes -// JSON when passed to Go's standard JSON library. -func (u *Unstructured) UnmarshalJSON(b []byte) error { - _, _, err := UnstructuredJSONScheme.Decode(b, nil, u) - return err -} - -func getNestedField(obj map[string]interface{}, fields ...string) interface{} { - var val interface{} = obj - for _, field := range fields { - if _, ok := val.(map[string]interface{}); !ok { - return nil - } - val = val.(map[string]interface{})[field] - } - return val -} - -func getNestedString(obj map[string]interface{}, fields ...string) string { - if str, ok := getNestedField(obj, fields...).(string); ok { - return str - } - return "" -} - -func getNestedSlice(obj map[string]interface{}, fields ...string) []string { - if m, ok := getNestedField(obj, fields...).([]interface{}); ok { - strSlice := make([]string, 0, len(m)) - for _, v := range m { - if str, ok := v.(string); ok { - strSlice = append(strSlice, str) - } - } - return strSlice - } - return nil -} - -func getNestedMap(obj map[string]interface{}, fields ...string) map[string]string { - if m, ok := getNestedField(obj, fields...).(map[string]interface{}); ok { - strMap := make(map[string]string, len(m)) - for k, v := range m { - if str, ok := v.(string); ok { - strMap[k] = str - } - } - return strMap - } - return nil -} - -func setNestedField(obj map[string]interface{}, value interface{}, fields ...string) { - m := obj - if len(fields) > 1 { - for _, field := range fields[0 : len(fields)-1] { - if _, ok := m[field].(map[string]interface{}); !ok { - m[field] = make(map[string]interface{}) - } - m = m[field].(map[string]interface{}) - } - } - m[fields[len(fields)-1]] = value -} - -func setNestedSlice(obj map[string]interface{}, value []string, fields ...string) { - m := make([]interface{}, 0, len(value)) - for _, v := range value { - m = append(m, v) - } - setNestedField(obj, m, fields...) -} - -func setNestedMap(obj map[string]interface{}, value map[string]string, fields ...string) { - m := make(map[string]interface{}, len(value)) - for k, v := range value { - m[k] = v - } - setNestedField(obj, m, fields...) -} - -func (u *Unstructured) setNestedField(value interface{}, fields ...string) { - if u.Object == nil { - u.Object = make(map[string]interface{}) - } - setNestedField(u.Object, value, fields...) -} - -func (u *Unstructured) setNestedSlice(value []string, fields ...string) { - if u.Object == nil { - u.Object = make(map[string]interface{}) - } - setNestedSlice(u.Object, value, fields...) -} - -func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) { - if u.Object == nil { - u.Object = make(map[string]interface{}) - } - setNestedMap(u.Object, value, fields...) -} - -func extractOwnerReference(src interface{}) metatypes.OwnerReference { - v := src.(map[string]interface{}) - controllerPtr, ok := (getNestedField(v, "controller")).(*bool) - if !ok { - controllerPtr = nil - } else { - if controllerPtr != nil { - controller := *controllerPtr - controllerPtr = &controller - } - } - return metatypes.OwnerReference{ - Kind: getNestedString(v, "kind"), - Name: getNestedString(v, "name"), - APIVersion: getNestedString(v, "apiVersion"), - UID: (types.UID)(getNestedString(v, "uid")), - Controller: controllerPtr, - } -} - -func setOwnerReference(src metatypes.OwnerReference) map[string]interface{} { - ret := make(map[string]interface{}) - controllerPtr := src.Controller - if controllerPtr != nil { - controller := *controllerPtr - controllerPtr = &controller - } - setNestedField(ret, src.Kind, "kind") - setNestedField(ret, src.Name, "name") - setNestedField(ret, src.APIVersion, "apiVersion") - setNestedField(ret, string(src.UID), "uid") - setNestedField(ret, controllerPtr, "controller") - return ret -} - -func getOwnerReferences(object map[string]interface{}) ([]map[string]interface{}, error) { - field := getNestedField(object, "metadata", "ownerReferences") - if field == nil { - return nil, fmt.Errorf("cannot find field metadata.ownerReferences in %v", object) - } - ownerReferences, ok := field.([]map[string]interface{}) - if ok { - return ownerReferences, nil - } - // TODO: This is hacky... - interfaces, ok := field.([]interface{}) - if !ok { - return nil, fmt.Errorf("expect metadata.ownerReferences to be a slice in %#v", object) - } - ownerReferences = make([]map[string]interface{}, 0, len(interfaces)) - for i := 0; i < len(interfaces); i++ { - r, ok := interfaces[i].(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("expect element metadata.ownerReferences to be a map[string]interface{} in %#v", object) - } - ownerReferences = append(ownerReferences, r) - } - return ownerReferences, nil -} - -func (u *Unstructured) GetOwnerReferences() []metatypes.OwnerReference { - original, err := getOwnerReferences(u.Object) - if err != nil { - glog.V(6).Info(err) - return nil - } - ret := make([]metatypes.OwnerReference, 0, len(original)) - for i := 0; i < len(original); i++ { - ret = append(ret, extractOwnerReference(original[i])) - } - return ret -} - -func (u *Unstructured) SetOwnerReferences(references []metatypes.OwnerReference) { - var newReferences = make([]map[string]interface{}, 0, len(references)) - for i := 0; i < len(references); i++ { - newReferences = append(newReferences, setOwnerReference(references[i])) - } - u.setNestedField(newReferences, "metadata", "ownerReferences") -} - -func (u *Unstructured) GetAPIVersion() string { - return getNestedString(u.Object, "apiVersion") -} - -func (u *Unstructured) SetAPIVersion(version string) { - u.setNestedField(version, "apiVersion") -} - -func (u *Unstructured) GetKind() string { - return getNestedString(u.Object, "kind") -} - -func (u *Unstructured) SetKind(kind string) { - u.setNestedField(kind, "kind") -} - -func (u *Unstructured) GetNamespace() string { - return getNestedString(u.Object, "metadata", "namespace") -} - -func (u *Unstructured) SetNamespace(namespace string) { - u.setNestedField(namespace, "metadata", "namespace") -} - -func (u *Unstructured) GetName() string { - return getNestedString(u.Object, "metadata", "name") -} - -func (u *Unstructured) SetName(name string) { - u.setNestedField(name, "metadata", "name") -} - -func (u *Unstructured) GetGenerateName() string { - return getNestedString(u.Object, "metadata", "generateName") -} - -func (u *Unstructured) SetGenerateName(name string) { - u.setNestedField(name, "metadata", "generateName") -} - -func (u *Unstructured) GetUID() types.UID { - return types.UID(getNestedString(u.Object, "metadata", "uid")) -} - -func (u *Unstructured) SetUID(uid types.UID) { - u.setNestedField(string(uid), "metadata", "uid") -} - -func (u *Unstructured) GetResourceVersion() string { - return getNestedString(u.Object, "metadata", "resourceVersion") -} - -func (u *Unstructured) SetResourceVersion(version string) { - u.setNestedField(version, "metadata", "resourceVersion") -} - -func (u *Unstructured) GetSelfLink() string { - return getNestedString(u.Object, "metadata", "selfLink") -} - -func (u *Unstructured) SetSelfLink(selfLink string) { - u.setNestedField(selfLink, "metadata", "selfLink") -} - -func (u *Unstructured) GetCreationTimestamp() unversioned.Time { - var timestamp unversioned.Time - timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp")) - return timestamp -} - -func (u *Unstructured) SetCreationTimestamp(timestamp unversioned.Time) { - ts, _ := timestamp.MarshalQueryParameter() - u.setNestedField(ts, "metadata", "creationTimestamp") -} - -func (u *Unstructured) GetDeletionTimestamp() *unversioned.Time { - var timestamp unversioned.Time - timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "deletionTimestamp")) - if timestamp.IsZero() { - return nil - } - return ×tamp -} - -func (u *Unstructured) SetDeletionTimestamp(timestamp *unversioned.Time) { - ts, _ := timestamp.MarshalQueryParameter() - u.setNestedField(ts, "metadata", "deletionTimestamp") -} - -func (u *Unstructured) GetLabels() map[string]string { - return getNestedMap(u.Object, "metadata", "labels") -} - -func (u *Unstructured) SetLabels(labels map[string]string) { - u.setNestedMap(labels, "metadata", "labels") -} - -func (u *Unstructured) GetAnnotations() map[string]string { - return getNestedMap(u.Object, "metadata", "annotations") -} - -func (u *Unstructured) SetAnnotations(annotations map[string]string) { - u.setNestedMap(annotations, "metadata", "annotations") -} - -func (u *Unstructured) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { - u.SetAPIVersion(gvk.GroupVersion().String()) - u.SetKind(gvk.Kind) -} - -func (u *Unstructured) GroupVersionKind() unversioned.GroupVersionKind { - gv, err := unversioned.ParseGroupVersion(u.GetAPIVersion()) - if err != nil { - return unversioned.GroupVersionKind{} - } - gvk := gv.WithKind(u.GetKind()) - return gvk -} - -func (u *Unstructured) GetFinalizers() []string { - return getNestedSlice(u.Object, "metadata", "finalizers") -} - -func (u *Unstructured) SetFinalizers(finalizers []string) { - u.setNestedSlice(finalizers, "metadata", "finalizers") -} - -func (u *Unstructured) GetClusterName() string { - return getNestedString(u.Object, "metadata", "clusterName") -} - -func (u *Unstructured) SetClusterName(clusterName string) { - u.setNestedField(clusterName, "metadata", "clusterName") -} - -// UnstructuredList allows lists that do not have Golang structs -// registered to be manipulated generically. This can be used to deal -// with the API lists from a plug-in. -type UnstructuredList struct { - Object map[string]interface{} - - // Items is a list of unstructured objects. - Items []*Unstructured `json:"items"` -} - -// MarshalJSON ensures that the unstructured list object produces proper -// JSON when passed to Go's standard JSON library. -func (u *UnstructuredList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - err := UnstructuredJSONScheme.Encode(u, &buf) - return buf.Bytes(), err -} - -// UnmarshalJSON ensures that the unstructured list object properly -// decodes JSON when passed to Go's standard JSON library. -func (u *UnstructuredList) UnmarshalJSON(b []byte) error { - _, _, err := UnstructuredJSONScheme.Decode(b, nil, u) - return err -} - -func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) { - if u.Object == nil { - u.Object = make(map[string]interface{}) - } - setNestedField(u.Object, value, fields...) -} - -func (u *UnstructuredList) GetAPIVersion() string { - return getNestedString(u.Object, "apiVersion") -} - -func (u *UnstructuredList) SetAPIVersion(version string) { - u.setNestedField(version, "apiVersion") -} - -func (u *UnstructuredList) GetKind() string { - return getNestedString(u.Object, "kind") -} - -func (u *UnstructuredList) SetKind(kind string) { - u.setNestedField(kind, "kind") -} - -func (u *UnstructuredList) GetResourceVersion() string { - return getNestedString(u.Object, "metadata", "resourceVersion") -} - -func (u *UnstructuredList) SetResourceVersion(version string) { - u.setNestedField(version, "metadata", "resourceVersion") -} - -func (u *UnstructuredList) GetSelfLink() string { - return getNestedString(u.Object, "metadata", "selfLink") -} - -func (u *UnstructuredList) SetSelfLink(selfLink string) { - u.setNestedField(selfLink, "metadata", "selfLink") -} - -func (u *UnstructuredList) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { - u.SetAPIVersion(gvk.GroupVersion().String()) - u.SetKind(gvk.Kind) -} - -func (u *UnstructuredList) GroupVersionKind() unversioned.GroupVersionKind { - gv, err := unversioned.ParseGroupVersion(u.GetAPIVersion()) - if err != nil { - return unversioned.GroupVersionKind{} - } - gvk := gv.WithKind(u.GetKind()) - return gvk -} - -// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured -// type, which can be used for generic access to objects without a predefined scheme. -// TODO: move into serializer/json. -var UnstructuredJSONScheme Codec = unstructuredJSONScheme{} - -type unstructuredJSONScheme struct{} - -func (s unstructuredJSONScheme) Decode(data []byte, _ *unversioned.GroupVersionKind, obj Object) (Object, *unversioned.GroupVersionKind, error) { - var err error - if obj != nil { - err = s.decodeInto(data, obj) - } else { - obj, err = s.decode(data) - } - - if err != nil { - return nil, nil, err - } - - gvk := obj.GetObjectKind().GroupVersionKind() - if len(gvk.Kind) == 0 { - return nil, &gvk, NewMissingKindErr(string(data)) - } - - return obj, &gvk, nil -} - -func (unstructuredJSONScheme) Encode(obj Object, w io.Writer) error { - switch t := obj.(type) { - case *Unstructured: - return json.NewEncoder(w).Encode(t.Object) - case *UnstructuredList: - items := make([]map[string]interface{}, 0, len(t.Items)) - for _, i := range t.Items { - items = append(items, i.Object) - } - t.Object["items"] = items - defer func() { delete(t.Object, "items") }() - return json.NewEncoder(w).Encode(t.Object) - case *Unknown: - // TODO: Unstructured needs to deal with ContentType. - _, err := w.Write(t.Raw) - return err - default: - return json.NewEncoder(w).Encode(t) - } -} - -func (s unstructuredJSONScheme) decode(data []byte) (Object, error) { - type detector struct { - Items gojson.RawMessage - } - var det detector - if err := json.Unmarshal(data, &det); err != nil { - return nil, err - } - - if det.Items != nil { - list := &UnstructuredList{} - err := s.decodeToList(data, list) - return list, err - } - - // No Items field, so it wasn't a list. - unstruct := &Unstructured{} - err := s.decodeToUnstructured(data, unstruct) - return unstruct, err -} - -func (s unstructuredJSONScheme) decodeInto(data []byte, obj Object) error { - switch x := obj.(type) { - case *Unstructured: - return s.decodeToUnstructured(data, x) - case *UnstructuredList: - return s.decodeToList(data, x) - case *VersionedObjects: - o, err := s.decode(data) - if err == nil { - x.Objects = []Object{o} - } - return err - default: - return json.Unmarshal(data, x) - } -} - -func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error { - m := make(map[string]interface{}) - if err := json.Unmarshal(data, &m); err != nil { - return err - } - - unstruct.Object = m - - return nil -} - -func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error { - type decodeList struct { - Items []gojson.RawMessage - } - - var dList decodeList - if err := json.Unmarshal(data, &dList); err != nil { - return err - } - - if err := json.Unmarshal(data, &list.Object); err != nil { - return err - } - - // For typed lists, e.g., a PodList, API server doesn't set each item's - // APIVersion and Kind. We need to set it. - listAPIVersion := list.GetAPIVersion() - listKind := list.GetKind() - itemKind := strings.TrimSuffix(listKind, "List") - - delete(list.Object, "items") - list.Items = nil - for _, i := range dList.Items { - unstruct := &Unstructured{} - if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil { - return err - } - // This is hacky. Set the item's Kind and APIVersion to those inferred - // from the List. - if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 { - unstruct.SetKind(itemKind) - unstruct.SetAPIVersion(listAPIVersion) - } - list.Items = append(list.Items, unstruct) - } - return nil -} - -// UnstructuredObjectConverter is an ObjectConverter for use with -// Unstructured objects. Since it has no schema or type information, -// it will only succeed for no-op conversions. This is provided as a -// sane implementation for APIs that require an object converter. -type UnstructuredObjectConverter struct{} - -func (UnstructuredObjectConverter) Convert(in, out, context interface{}) error { - unstructIn, ok := in.(*Unstructured) - if !ok { - return fmt.Errorf("input type %T in not valid for unstructured conversion", in) - } - - unstructOut, ok := out.(*Unstructured) - if !ok { - return fmt.Errorf("output type %T in not valid for unstructured conversion", out) - } - - // maybe deep copy the map? It is documented in the - // ObjectConverter interface that this function is not - // guaranteeed to not mutate the input. Or maybe set the input - // object to nil. - unstructOut.Object = unstructIn.Object - return nil -} - -func (UnstructuredObjectConverter) ConvertToVersion(in Object, target GroupVersioner) (Object, error) { - if kind := in.GetObjectKind().GroupVersionKind(); !kind.Empty() { - gvk, ok := target.KindForGroupVersionKinds([]unversioned.GroupVersionKind{kind}) - if !ok { - // TODO: should this be a typed error? - return nil, fmt.Errorf("%v is unstructured and is not suitable for converting to %q", kind, target) - } - in.GetObjectKind().SetGroupVersionKind(gvk) - } - return in, nil -} - -func (UnstructuredObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { - return "", "", errors.New("unstructured cannot convert field labels") -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/runtime/zz_generated.deepcopy.go deleted file mode 100644 index 627c22b63a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/zz_generated.deepcopy.go +++ /dev/null @@ -1,75 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package runtime - -import ( - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func DeepCopy_runtime_RawExtension(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RawExtension) - out := out.(*RawExtension) - if in.Raw != nil { - in, out := &in.Raw, &out.Raw - *out = make([]byte, len(*in)) - copy(*out, *in) - } else { - out.Raw = nil - } - if in.Object == nil { - out.Object = nil - } else if newVal, err := c.DeepCopy(&in.Object); err != nil { - return err - } else { - out.Object = *newVal.(*Object) - } - return nil - } -} - -func DeepCopy_runtime_TypeMeta(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TypeMeta) - out := out.(*TypeMeta) - out.APIVersion = in.APIVersion - out.Kind = in.Kind - return nil - } -} - -func DeepCopy_runtime_Unknown(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Unknown) - out := out.(*Unknown) - out.TypeMeta = in.TypeMeta - if in.Raw != nil { - in, out := &in.Raw, &out.Raw - *out = make([]byte, len(*in)) - copy(*out, *in) - } else { - out.Raw = nil - } - out.ContentEncoding = in.ContentEncoding - out.ContentType = in.ContentType - return nil - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/security/apparmor/BUILD b/vendor/k8s.io/kubernetes/pkg/security/apparmor/BUILD deleted file mode 100644 index 63cbee69b4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/security/apparmor/BUILD +++ /dev/null @@ -1,39 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "helpers.go", - "validate.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/util:go_default_library", - "//pkg/util/config:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["validate_test.go"], - data = [ - "testdata/profiles", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//vendor:github.com/stretchr/testify/assert", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go b/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go deleted file mode 100644 index eb576bf382..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apparmor - -import ( - "strings" - - "k8s.io/kubernetes/pkg/api" -) - -// TODO: Move these values into the API package. -const ( - // The prefix to an annotation key specifying a container profile. - ContainerAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/" - // The annotation key specifying the default AppArmor profile. - DefaultProfileAnnotationKey = "apparmor.security.beta.kubernetes.io/defaultProfileName" - // The annotation key specifying the allowed AppArmor profiles. - AllowedProfilesAnnotationKey = "apparmor.security.beta.kubernetes.io/allowedProfileNames" - - // The profile specifying the runtime default. - ProfileRuntimeDefault = "runtime/default" - // The prefix for specifying profiles loaded on the node. - ProfileNamePrefix = "localhost/" -) - -// Checks whether app armor is required for pod to be run. -func isRequired(pod *api.Pod) bool { - for key := range pod.Annotations { - if strings.HasPrefix(key, ContainerAnnotationKeyPrefix) { - return true - } - } - return false -} - -// Returns the name of the profile to use with the container. -func GetProfileName(pod *api.Pod, containerName string) string { - return GetProfileNameFromPodAnnotations(pod.Annotations, containerName) -} - -// GetProfileNameFromPodAnnotations gets the name of the profile to use with container from -// pod annotations -func GetProfileNameFromPodAnnotations(annotations map[string]string, containerName string) string { - return annotations[ContainerAnnotationKeyPrefix+containerName] -} - -// Sets the name of the profile to use with the container. -func SetProfileName(pod *api.Pod, containerName, profileName string) error { - if pod.Annotations == nil { - pod.Annotations = map[string]string{} - } - pod.Annotations[ContainerAnnotationKeyPrefix+containerName] = profileName - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go b/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go deleted file mode 100644 index 79790c2a0c..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go +++ /dev/null @@ -1,227 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apparmor - -import ( - "bufio" - "errors" - "fmt" - "io/ioutil" - "os" - "path" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/util" - utilconfig "k8s.io/kubernetes/pkg/util/config" -) - -// Whether AppArmor should be disabled by default. -// Set to true if the wrong build tags are set (see validate_disabled.go). -var isDisabledBuild bool - -// Interface for validating that a pod with with an AppArmor profile can be run by a Node. -type Validator interface { - Validate(pod *api.Pod) error - ValidateHost() error -} - -func NewValidator(runtime string) Validator { - if err := validateHost(runtime); err != nil { - return &validator{validateHostErr: err} - } - appArmorFS, err := getAppArmorFS() - if err != nil { - return &validator{ - validateHostErr: fmt.Errorf("error finding AppArmor FS: %v", err), - } - } - return &validator{ - appArmorFS: appArmorFS, - } -} - -type validator struct { - validateHostErr error - appArmorFS string -} - -func (v *validator) Validate(pod *api.Pod) error { - if !isRequired(pod) { - return nil - } - - if v.ValidateHost() != nil { - return v.validateHostErr - } - - loadedProfiles, err := v.getLoadedProfiles() - if err != nil { - return fmt.Errorf("could not read loaded profiles: %v", err) - } - - for _, container := range pod.Spec.InitContainers { - if err := validateProfile(GetProfileName(pod, container.Name), loadedProfiles); err != nil { - return err - } - } - for _, container := range pod.Spec.Containers { - if err := validateProfile(GetProfileName(pod, container.Name), loadedProfiles); err != nil { - return err - } - } - - return nil -} - -func (v *validator) ValidateHost() error { - return v.validateHostErr -} - -// Verify that the host and runtime is capable of enforcing AppArmor profiles. -func validateHost(runtime string) error { - // Check feature-gates - if !utilconfig.DefaultFeatureGate.AppArmor() { - return errors.New("AppArmor disabled by feature-gate") - } - - // Check build support. - if isDisabledBuild { - return errors.New("Binary not compiled for linux") - } - - // Check kernel support. - if !IsAppArmorEnabled() { - return errors.New("AppArmor is not enabled on the host") - } - - // Check runtime support. Currently only Docker is supported. - if runtime != "docker" { - return fmt.Errorf("AppArmor is only enabled for 'docker' runtime. Found: %q.", runtime) - } - - return nil -} - -// Verify that the profile is valid and loaded. -func validateProfile(profile string, loadedProfiles map[string]bool) error { - if err := ValidateProfileFormat(profile); err != nil { - return err - } - - if strings.HasPrefix(profile, ProfileNamePrefix) { - profileName := strings.TrimPrefix(profile, ProfileNamePrefix) - if !loadedProfiles[profileName] { - return fmt.Errorf("profile %q is not loaded", profileName) - } - } - - return nil -} - -func ValidateProfileFormat(profile string) error { - if profile == "" || profile == ProfileRuntimeDefault { - return nil - } - if !strings.HasPrefix(profile, ProfileNamePrefix) { - return fmt.Errorf("invalid AppArmor profile name: %q", profile) - } - return nil -} - -func (v *validator) getLoadedProfiles() (map[string]bool, error) { - profilesPath := path.Join(v.appArmorFS, "profiles") - profilesFile, err := os.Open(profilesPath) - if err != nil { - return nil, fmt.Errorf("failed to open %s: %v", profilesPath, err) - } - defer profilesFile.Close() - - profiles := map[string]bool{} - scanner := bufio.NewScanner(profilesFile) - for scanner.Scan() { - profileName := parseProfileName(scanner.Text()) - if profileName == "" { - // Unknown line format; skip it. - continue - } - profiles[profileName] = true - } - return profiles, nil -} - -// The profiles file is formatted with one profile per line, matching a form: -// namespace://profile-name (mode) -// profile-name (mode) -// Where mode is {enforce, complain, kill}. The "namespace://" is only included for namespaced -// profiles. For the purposes of Kubernetes, we consider the namespace part of the profile name. -func parseProfileName(profileLine string) string { - modeIndex := strings.IndexRune(profileLine, '(') - if modeIndex < 0 { - return "" - } - return strings.TrimSpace(profileLine[:modeIndex]) -} - -func getAppArmorFS() (string, error) { - mountsFile, err := os.Open("/proc/mounts") - if err != nil { - return "", fmt.Errorf("could not open /proc/mounts: %v", err) - } - defer mountsFile.Close() - - scanner := bufio.NewScanner(mountsFile) - for scanner.Scan() { - fields := strings.Fields(scanner.Text()) - if len(fields) < 3 { - // Unknown line format; skip it. - continue - } - if fields[2] == "securityfs" { - appArmorFS := path.Join(fields[1], "apparmor") - if ok, err := util.FileExists(appArmorFS); !ok { - msg := fmt.Sprintf("path %s does not exist", appArmorFS) - if err != nil { - return "", fmt.Errorf("%s: %v", msg, err) - } else { - return "", errors.New(msg) - } - } else { - return appArmorFS, nil - } - } - } - if err := scanner.Err(); err != nil { - return "", fmt.Errorf("error scanning mounts: %v", err) - } - - return "", errors.New("securityfs not found") -} - -// IsAppArmorEnabled returns true if apparmor is enabled for the host. -// This function is forked from -// https://github.com/opencontainers/runc/blob/1a81e9ab1f138c091fe5c86d0883f87716088527/libcontainer/apparmor/apparmor.go -// to avoid the libapparmor dependency. -func IsAppArmorEnabled() bool { - if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil && os.Getenv("container") == "" { - if _, err = os.Stat("/sbin/apparmor_parser"); err == nil { - buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") - return err == nil && len(buf) > 1 && buf[0] == 'Y' - } - } - return false -} diff --git a/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate_disabled.go b/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate_disabled.go deleted file mode 100644 index 875054a941..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate_disabled.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build !linux - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apparmor - -func init() { - // If Kubernetes was not built for linux, apparmor is always disabled. - isDisabledBuild = true -} diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/BUILD b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/BUILD deleted file mode 100644 index ee7ed1c207..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["strategy.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/util/validation/field:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["strategy_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = ["//pkg/api:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/strategy.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/strategy.go deleted file mode 100644 index fd0b32e657..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/strategy.go +++ /dev/null @@ -1,149 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package seccomp - -import ( - "fmt" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -const ( - // AllowAny is the wildcard used to allow any profile. - AllowAny = "*" - // The annotation key specifying the default seccomp profile. - DefaultProfileAnnotationKey = "seccomp.security.alpha.kubernetes.io/defaultProfileName" - // The annotation key specifying the allowed seccomp profiles. - AllowedProfilesAnnotationKey = "seccomp.security.alpha.kubernetes.io/allowedProfileNames" -) - -// Strategy defines the interface for all seccomp constraint strategies. -type Strategy interface { - // Generate returns a profile based on constraint rules. - Generate(annotations map[string]string, pod *api.Pod) (string, error) - // Validate ensures that the specified values fall within the range of the strategy. - ValidatePod(pod *api.Pod) field.ErrorList - // Validate ensures that the specified values fall within the range of the strategy. - ValidateContainer(pod *api.Pod, container *api.Container) field.ErrorList -} - -type strategy struct { - defaultProfile string - allowedProfiles map[string]bool - // For printing error messages (preserves order). - allowedProfilesString string - // does the strategy allow any profile (wildcard) - allowAnyProfile bool -} - -var _ Strategy = &strategy{} - -// NewStrategy creates a new strategy that enforces seccomp profile constraints. -func NewStrategy(pspAnnotations map[string]string) Strategy { - var allowedProfiles map[string]bool - allowAnyProfile := false - if allowed, ok := pspAnnotations[AllowedProfilesAnnotationKey]; ok { - profiles := strings.Split(allowed, ",") - allowedProfiles = make(map[string]bool, len(profiles)) - for _, p := range profiles { - if p == AllowAny { - allowAnyProfile = true - continue - } - allowedProfiles[p] = true - } - } - return &strategy{ - defaultProfile: pspAnnotations[DefaultProfileAnnotationKey], - allowedProfiles: allowedProfiles, - allowedProfilesString: pspAnnotations[AllowedProfilesAnnotationKey], - allowAnyProfile: allowAnyProfile, - } -} - -// Generate returns a profile based on constraint rules. -func (s *strategy) Generate(annotations map[string]string, pod *api.Pod) (string, error) { - if annotations[api.SeccompPodAnnotationKey] != "" { - // Profile already set, nothing to do. - return annotations[api.SeccompPodAnnotationKey], nil - } - return s.defaultProfile, nil -} - -// ValidatePod ensures that the specified values on the pod fall within the range -// of the strategy. -func (s *strategy) ValidatePod(pod *api.Pod) field.ErrorList { - allErrs := field.ErrorList{} - podSpecFieldPath := field.NewPath("pod", "metadata", "annotations").Key(api.SeccompPodAnnotationKey) - podProfile := pod.Annotations[api.SeccompPodAnnotationKey] - - if !s.allowAnyProfile && len(s.allowedProfiles) == 0 && podProfile != "" { - allErrs = append(allErrs, field.Forbidden(podSpecFieldPath, "seccomp may not be set")) - return allErrs - } - - if !s.profileAllowed(podProfile) { - msg := fmt.Sprintf("%s is not an allowed seccomp profile. Valid values are %v", podProfile, s.allowedProfilesString) - allErrs = append(allErrs, field.Forbidden(podSpecFieldPath, msg)) - } - - return allErrs -} - -// ValidateContainer ensures that the specified values on the container fall within -// the range of the strategy. -func (s *strategy) ValidateContainer(pod *api.Pod, container *api.Container) field.ErrorList { - allErrs := field.ErrorList{} - fieldPath := field.NewPath("pod", "metadata", "annotations").Key(api.SeccompContainerAnnotationKeyPrefix + container.Name) - containerProfile := profileForContainer(pod, container) - - if !s.allowAnyProfile && len(s.allowedProfiles) == 0 && containerProfile != "" { - allErrs = append(allErrs, field.Forbidden(fieldPath, "seccomp may not be set")) - return allErrs - } - - if !s.profileAllowed(containerProfile) { - msg := fmt.Sprintf("%s is not an allowed seccomp profile. Valid values are %v", containerProfile, s.allowedProfilesString) - allErrs = append(allErrs, field.Forbidden(fieldPath, msg)) - } - - return allErrs -} - -// profileAllowed checks if profile is in allowedProfiles or if allowedProfiles -// contains the wildcard. -func (s *strategy) profileAllowed(profile string) bool { - // for backwards compatibility and PSPs without a defined list of allowed profiles. - // If a PSP does not have allowedProfiles set then we should allow an empty profile. - // This will mean that the runtime default is used. - if len(s.allowedProfiles) == 0 && profile == "" { - return true - } - - return s.allowAnyProfile || s.allowedProfiles[profile] -} - -// profileForContainer returns the container profile if set, otherwise the pod profile. -func profileForContainer(pod *api.Pod, container *api.Container) string { - containerProfile, ok := pod.Annotations[api.SeccompContainerAnnotationKeyPrefix+container.Name] - if ok { - return containerProfile - } - return pod.Annotations[api.SeccompPodAnnotationKey] -} diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/BUILD b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/BUILD deleted file mode 100644 index 7fece928fa..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/BUILD +++ /dev/null @@ -1,36 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "util.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/util/sets:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["util_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apis/extensions:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/doc.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/doc.go deleted file mode 100644 index af407b58c6..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package util contains utility code shared amongst different parts of the -// pod security policy apparatus. -package util diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go deleted file mode 100644 index 427df33204..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/util/sets" -) - -const ( - ValidatedPSPAnnotation = "kubernetes.io/psp" -) - -func GetAllFSTypesExcept(exceptions ...string) sets.String { - fstypes := GetAllFSTypesAsSet() - for _, e := range exceptions { - fstypes.Delete(e) - } - return fstypes -} - -func GetAllFSTypesAsSet() sets.String { - fstypes := sets.NewString() - fstypes.Insert( - string(extensions.HostPath), - string(extensions.AzureFile), - string(extensions.Flocker), - string(extensions.FlexVolume), - string(extensions.EmptyDir), - string(extensions.GCEPersistentDisk), - string(extensions.AWSElasticBlockStore), - string(extensions.GitRepo), - string(extensions.Secret), - string(extensions.NFS), - string(extensions.ISCSI), - string(extensions.Glusterfs), - string(extensions.PersistentVolumeClaim), - string(extensions.RBD), - string(extensions.Cinder), - string(extensions.CephFS), - string(extensions.DownwardAPI), - string(extensions.FC), - string(extensions.ConfigMap), - string(extensions.VsphereVolume), - string(extensions.Quobyte), - string(extensions.AzureDisk), - string(extensions.PhotonPersistentDisk)) - return fstypes -} - -// getVolumeFSType gets the FSType for a volume. -func GetVolumeFSType(v api.Volume) (extensions.FSType, error) { - switch { - case v.HostPath != nil: - return extensions.HostPath, nil - case v.EmptyDir != nil: - return extensions.EmptyDir, nil - case v.GCEPersistentDisk != nil: - return extensions.GCEPersistentDisk, nil - case v.AWSElasticBlockStore != nil: - return extensions.AWSElasticBlockStore, nil - case v.GitRepo != nil: - return extensions.GitRepo, nil - case v.Secret != nil: - return extensions.Secret, nil - case v.NFS != nil: - return extensions.NFS, nil - case v.ISCSI != nil: - return extensions.ISCSI, nil - case v.Glusterfs != nil: - return extensions.Glusterfs, nil - case v.PersistentVolumeClaim != nil: - return extensions.PersistentVolumeClaim, nil - case v.RBD != nil: - return extensions.RBD, nil - case v.FlexVolume != nil: - return extensions.FlexVolume, nil - case v.Cinder != nil: - return extensions.Cinder, nil - case v.CephFS != nil: - return extensions.CephFS, nil - case v.Flocker != nil: - return extensions.Flocker, nil - case v.DownwardAPI != nil: - return extensions.DownwardAPI, nil - case v.FC != nil: - return extensions.FC, nil - case v.AzureFile != nil: - return extensions.AzureFile, nil - case v.ConfigMap != nil: - return extensions.ConfigMap, nil - case v.VsphereVolume != nil: - return extensions.VsphereVolume, nil - case v.Quobyte != nil: - return extensions.Quobyte, nil - case v.AzureDisk != nil: - return extensions.AzureDisk, nil - case v.PhotonPersistentDisk != nil: - return extensions.PhotonPersistentDisk, nil - } - - return "", fmt.Errorf("unknown volume type for volume: %#v", v) -} - -// fsTypeToStringSet converts an FSType slice to a string set. -func FSTypeToStringSet(fsTypes []extensions.FSType) sets.String { - set := sets.NewString() - for _, v := range fsTypes { - set.Insert(string(v)) - } - return set -} - -// PSPAllowsAllVolumes checks for FSTypeAll in the psp's allowed volumes. -func PSPAllowsAllVolumes(psp *extensions.PodSecurityPolicy) bool { - return PSPAllowsFSType(psp, extensions.All) -} - -// PSPAllowsFSType is a utility for checking if a PSP allows a particular FSType. -// If all volumes are allowed then this will return true for any FSType passed. -func PSPAllowsFSType(psp *extensions.PodSecurityPolicy, fsType extensions.FSType) bool { - if psp == nil { - return false - } - - for _, v := range psp.Spec.Volumes { - if v == fsType || v == extensions.All { - return true - } - } - return false -} - -// FallsInRange is a utility to determine it the id falls in the valid range. -func FallsInRange(id int64, rng extensions.IDRange) bool { - return id >= rng.Min && id <= rng.Max -} diff --git a/vendor/k8s.io/kubernetes/pkg/selection/BUILD b/vendor/k8s.io/kubernetes/pkg/selection/BUILD deleted file mode 100644 index 3e3b589803..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/selection/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["operator.go"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD b/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD deleted file mode 100644 index b169ac386f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD +++ /dev/null @@ -1,49 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "jwt.go", - "util.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/validation:go_default_library", - "//pkg/auth/authenticator:go_default_library", - "//pkg/auth/user:go_default_library", - "//vendor:github.com/dgrijalva/jwt-go", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = ["util_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) - -go_test( - name = "go_default_xtest", - srcs = ["jwt_test.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", - "//pkg/controller/serviceaccount:go_default_library", - "//pkg/serviceaccount:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/serviceaccount/jwt.go b/vendor/k8s.io/kubernetes/pkg/serviceaccount/jwt.go deleted file mode 100644 index c9b4b77076..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/serviceaccount/jwt.go +++ /dev/null @@ -1,323 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package serviceaccount - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/auth/authenticator" - "k8s.io/kubernetes/pkg/auth/user" - - jwt "github.com/dgrijalva/jwt-go" - "github.com/golang/glog" -) - -const ( - Issuer = "kubernetes/serviceaccount" - - SubjectClaim = "sub" - IssuerClaim = "iss" - ServiceAccountNameClaim = "kubernetes.io/serviceaccount/service-account.name" - ServiceAccountUIDClaim = "kubernetes.io/serviceaccount/service-account.uid" - SecretNameClaim = "kubernetes.io/serviceaccount/secret.name" - NamespaceClaim = "kubernetes.io/serviceaccount/namespace" -) - -// ServiceAccountTokenGetter defines functions to retrieve a named service account and secret -type ServiceAccountTokenGetter interface { - GetServiceAccount(namespace, name string) (*api.ServiceAccount, error) - GetSecret(namespace, name string) (*api.Secret, error) -} - -type TokenGenerator interface { - // GenerateToken generates a token which will identify the given ServiceAccount. - // The returned token will be stored in the given (and yet-unpersisted) Secret. - GenerateToken(serviceAccount api.ServiceAccount, secret api.Secret) (string, error) -} - -// ReadPrivateKey is a helper function for reading a private key from a PEM-encoded file -func ReadPrivateKey(file string) (interface{}, error) { - data, err := ioutil.ReadFile(file) - if err != nil { - return nil, err - } - key, err := ReadPrivateKeyFromPEM(data) - if err != nil { - return nil, fmt.Errorf("error reading private key file %s: %v", file, err) - } - return key, nil -} - -// ReadPrivateKeyFromPEM is a helper function for reading a private key from a PEM-encoded file -func ReadPrivateKeyFromPEM(data []byte) (interface{}, error) { - if key, err := jwt.ParseRSAPrivateKeyFromPEM(data); err == nil { - return key, nil - } - if key, err := jwt.ParseECPrivateKeyFromPEM(data); err == nil { - return key, nil - } - return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key") -} - -// ReadPublicKeys is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded file. -// Reads public keys from both public and private key files. -func ReadPublicKeys(file string) ([]interface{}, error) { - data, err := ioutil.ReadFile(file) - if err != nil { - return nil, err - } - keys, err := ReadPublicKeysFromPEM(data) - if err != nil { - return nil, fmt.Errorf("error reading public key file %s: %v", file, err) - } - return keys, nil -} - -// ReadPublicKeysFromPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array. -// Reads public keys from both public and private key files. -func ReadPublicKeysFromPEM(data []byte) ([]interface{}, error) { - var block *pem.Block - keys := []interface{}{} - for { - // read the next block - block, data = pem.Decode(data) - if block == nil { - break - } - - // get PEM bytes for just this block - blockData := pem.EncodeToMemory(block) - if privateKey, err := jwt.ParseRSAPrivateKeyFromPEM(blockData); err == nil { - keys = append(keys, &privateKey.PublicKey) - continue - } - if publicKey, err := jwt.ParseRSAPublicKeyFromPEM(blockData); err == nil { - keys = append(keys, publicKey) - continue - } - - if privateKey, err := jwt.ParseECPrivateKeyFromPEM(blockData); err == nil { - keys = append(keys, &privateKey.PublicKey) - continue - } - if publicKey, err := jwt.ParseECPublicKeyFromPEM(blockData); err == nil { - keys = append(keys, publicKey) - continue - } - - // tolerate non-key PEM blocks for backwards compatibility - // originally, only the first PEM block was parsed and expected to be a key block - } - - if len(keys) == 0 { - return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA key") - } - return keys, nil -} - -// JWTTokenGenerator returns a TokenGenerator that generates signed JWT tokens, using the given privateKey. -// privateKey is a PEM-encoded byte array of a private RSA key. -// JWTTokenAuthenticator() -func JWTTokenGenerator(privateKey interface{}) TokenGenerator { - return &jwtTokenGenerator{privateKey} -} - -type jwtTokenGenerator struct { - privateKey interface{} -} - -func (j *jwtTokenGenerator) GenerateToken(serviceAccount api.ServiceAccount, secret api.Secret) (string, error) { - var method jwt.SigningMethod - switch privateKey := j.privateKey.(type) { - case *rsa.PrivateKey: - method = jwt.SigningMethodRS256 - case *ecdsa.PrivateKey: - switch privateKey.Curve { - case elliptic.P256(): - method = jwt.SigningMethodES256 - case elliptic.P384(): - method = jwt.SigningMethodES384 - case elliptic.P521(): - method = jwt.SigningMethodES512 - default: - return "", fmt.Errorf("unknown private key curve, must be 256, 384, or 521") - } - default: - return "", fmt.Errorf("unknown private key type %T, must be *rsa.PrivateKey or *ecdsa.PrivateKey", j.privateKey) - } - - token := jwt.New(method) - - claims, _ := token.Claims.(jwt.MapClaims) - - // Identify the issuer - claims[IssuerClaim] = Issuer - - // Username - claims[SubjectClaim] = MakeUsername(serviceAccount.Namespace, serviceAccount.Name) - - // Persist enough structured info for the authenticator to be able to look up the service account and secret - claims[NamespaceClaim] = serviceAccount.Namespace - claims[ServiceAccountNameClaim] = serviceAccount.Name - claims[ServiceAccountUIDClaim] = serviceAccount.UID - claims[SecretNameClaim] = secret.Name - - // Sign and get the complete encoded token as a string - return token.SignedString(j.privateKey) -} - -// JWTTokenAuthenticator authenticates tokens as JWT tokens produced by JWTTokenGenerator -// Token signatures are verified using each of the given public keys until one works (allowing key rotation) -// If lookup is true, the service account and secret referenced as claims inside the token are retrieved and verified with the provided ServiceAccountTokenGetter -func JWTTokenAuthenticator(keys []interface{}, lookup bool, getter ServiceAccountTokenGetter) authenticator.Token { - return &jwtTokenAuthenticator{keys, lookup, getter} -} - -type jwtTokenAuthenticator struct { - keys []interface{} - lookup bool - getter ServiceAccountTokenGetter -} - -var errMismatchedSigningMethod = errors.New("invalid signing method") - -func (j *jwtTokenAuthenticator) AuthenticateToken(token string) (user.Info, bool, error) { - var validationError error - - for i, key := range j.keys { - // Attempt to verify with each key until we find one that works - parsedToken, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { - switch token.Method.(type) { - case *jwt.SigningMethodRSA: - if _, ok := key.(*rsa.PublicKey); ok { - return key, nil - } - return nil, errMismatchedSigningMethod - case *jwt.SigningMethodECDSA: - if _, ok := key.(*ecdsa.PublicKey); ok { - return key, nil - } - return nil, errMismatchedSigningMethod - default: - return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) - } - }) - - if err != nil { - switch err := err.(type) { - case *jwt.ValidationError: - if (err.Errors & jwt.ValidationErrorMalformed) != 0 { - // Not a JWT, no point in continuing - return nil, false, nil - } - - if (err.Errors & jwt.ValidationErrorSignatureInvalid) != 0 { - // Signature error, perhaps one of the other keys will verify the signature - // If not, we want to return this error - glog.V(4).Infof("Signature error (key %d): %v", i, err) - validationError = err - continue - } - - // This key doesn't apply to the given signature type - // Perhaps one of the other keys will verify the signature - // If not, we want to return this error - if err.Inner == errMismatchedSigningMethod { - glog.V(4).Infof("Mismatched key type (key %d): %v", i, err) - validationError = err - continue - } - } - - // Other errors should just return as errors - return nil, false, err - } - - // If we get here, we have a token with a recognized signature - - claims, _ := parsedToken.Claims.(jwt.MapClaims) - - // Make sure we issued the token - iss, _ := claims[IssuerClaim].(string) - if iss != Issuer { - return nil, false, nil - } - - // Make sure the claims we need exist - sub, _ := claims[SubjectClaim].(string) - if len(sub) == 0 { - return nil, false, errors.New("sub claim is missing") - } - namespace, _ := claims[NamespaceClaim].(string) - if len(namespace) == 0 { - return nil, false, errors.New("namespace claim is missing") - } - secretName, _ := claims[SecretNameClaim].(string) - if len(namespace) == 0 { - return nil, false, errors.New("secretName claim is missing") - } - serviceAccountName, _ := claims[ServiceAccountNameClaim].(string) - if len(serviceAccountName) == 0 { - return nil, false, errors.New("serviceAccountName claim is missing") - } - serviceAccountUID, _ := claims[ServiceAccountUIDClaim].(string) - if len(serviceAccountUID) == 0 { - return nil, false, errors.New("serviceAccountUID claim is missing") - } - - subjectNamespace, subjectName, err := SplitUsername(sub) - if err != nil || subjectNamespace != namespace || subjectName != serviceAccountName { - return nil, false, errors.New("sub claim is invalid") - } - - if j.lookup { - // Make sure token hasn't been invalidated by deletion of the secret - secret, err := j.getter.GetSecret(namespace, secretName) - if err != nil { - glog.V(4).Infof("Could not retrieve token %s/%s for service account %s/%s: %v", namespace, secretName, namespace, serviceAccountName, err) - return nil, false, errors.New("Token has been invalidated") - } - if bytes.Compare(secret.Data[api.ServiceAccountTokenKey], []byte(token)) != 0 { - glog.V(4).Infof("Token contents no longer matches %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName) - return nil, false, errors.New("Token does not match server's copy") - } - - // Make sure service account still exists (name and UID) - serviceAccount, err := j.getter.GetServiceAccount(namespace, serviceAccountName) - if err != nil { - glog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, serviceAccountName, err) - return nil, false, err - } - if string(serviceAccount.UID) != serviceAccountUID { - glog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, serviceAccountName, string(serviceAccount.UID), serviceAccountUID) - return nil, false, fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, serviceAccountUID) - } - } - - return UserInfo(namespace, serviceAccountName, serviceAccountUID), true, nil - } - - return nil, false, validationError -} diff --git a/vendor/k8s.io/kubernetes/pkg/serviceaccount/util.go b/vendor/k8s.io/kubernetes/pkg/serviceaccount/util.go deleted file mode 100644 index 712b086ad4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/serviceaccount/util.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package serviceaccount - -import ( - "fmt" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/auth/user" -) - -const ( - ServiceAccountUsernamePrefix = "system:serviceaccount:" - ServiceAccountUsernameSeparator = ":" - ServiceAccountGroupPrefix = "system:serviceaccounts:" - AllServiceAccountsGroup = "system:serviceaccounts" -) - -// MakeUsername generates a username from the given namespace and ServiceAccount name. -// The resulting username can be passed to SplitUsername to extract the original namespace and ServiceAccount name. -func MakeUsername(namespace, name string) string { - return ServiceAccountUsernamePrefix + namespace + ServiceAccountUsernameSeparator + name -} - -var invalidUsernameErr = fmt.Errorf("Username must be in the form %s", MakeUsername("namespace", "name")) - -// SplitUsername returns the namespace and ServiceAccount name embedded in the given username, -// or an error if the username is not a valid name produced by MakeUsername -func SplitUsername(username string) (string, string, error) { - if !strings.HasPrefix(username, ServiceAccountUsernamePrefix) { - return "", "", invalidUsernameErr - } - trimmed := strings.TrimPrefix(username, ServiceAccountUsernamePrefix) - parts := strings.Split(trimmed, ServiceAccountUsernameSeparator) - if len(parts) != 2 { - return "", "", invalidUsernameErr - } - namespace, name := parts[0], parts[1] - if len(validation.ValidateNamespaceName(namespace, false)) != 0 { - return "", "", invalidUsernameErr - } - if len(validation.ValidateServiceAccountName(name, false)) != 0 { - return "", "", invalidUsernameErr - } - return namespace, name, nil -} - -// MakeGroupNames generates service account group names for the given namespace and ServiceAccount name -func MakeGroupNames(namespace, name string) []string { - return []string{ - AllServiceAccountsGroup, - MakeNamespaceGroupName(namespace), - } -} - -// MakeNamespaceGroupName returns the name of the group all service accounts in the namespace are included in -func MakeNamespaceGroupName(namespace string) string { - return ServiceAccountGroupPrefix + namespace -} - -// UserInfo returns a user.Info interface for the given namespace, service account name and UID -func UserInfo(namespace, name, uid string) user.Info { - return &user.DefaultInfo{ - Name: MakeUsername(namespace, name), - UID: uid, - Groups: MakeGroupNames(namespace, name), - } -} - -// IsServiceAccountToken returns true if the secret is a valid api token for the service account -func IsServiceAccountToken(secret *api.Secret, sa *api.ServiceAccount) bool { - if secret.Type != api.SecretTypeServiceAccountToken { - return false - } - - name := secret.Annotations[api.ServiceAccountNameKey] - uid := secret.Annotations[api.ServiceAccountUIDKey] - if name != sa.Name { - // Name must match - return false - } - if len(uid) > 0 && uid != string(sa.UID) { - // If UID is specified, it must match - return false - } - - return true -} diff --git a/vendor/k8s.io/kubernetes/pkg/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/storage/BUILD deleted file mode 100644 index de2d97e923..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/storage/BUILD +++ /dev/null @@ -1,96 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "cacher.go", - "doc.go", - "errors.go", - "interfaces.go", - "selection_predicate.go", - "util.go", - "watch_cache.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/api/validation/path:go_default_library", - "//pkg/client/cache:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/fields:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/types:go_default_library", - "//pkg/util:go_default_library", - "//pkg/util/clock:go_default_library", - "//pkg/util/runtime:go_default_library", - "//pkg/util/validation/field:go_default_library", - "//pkg/util/wait:go_default_library", - "//pkg/watch:go_default_library", - "//vendor:github.com/golang/glog", - "//vendor:golang.org/x/net/context", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "selection_predicate_test.go", - "util_test.go", - "watch_cache_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/client/cache:go_default_library", - "//pkg/fields:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/clock:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/util/wait:go_default_library", - "//pkg/watch:go_default_library", - ], -) - -go_test( - name = "go_default_xtest", - srcs = ["cacher_test.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/meta:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/api/testing:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/fields:go_default_library", - "//pkg/labels:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/storage:go_default_library", - "//pkg/storage/etcd:go_default_library", - "//pkg/storage/etcd/etcdtest:go_default_library", - "//pkg/storage/etcd/testing:go_default_library", - "//pkg/storage/etcd3:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/util/wait:go_default_library", - "//pkg/watch:go_default_library", - "//vendor:golang.org/x/net/context", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/storage/OWNERS b/vendor/k8s.io/kubernetes/pkg/storage/OWNERS deleted file mode 100644 index a57ded7f65..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/storage/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -assignees: - - lavalamp - - liggitt - - timothysc - - wojtek-t - - xiang90 diff --git a/vendor/k8s.io/kubernetes/pkg/storage/cacher.go b/vendor/k8s.io/kubernetes/pkg/storage/cacher.go deleted file mode 100644 index f7f5febf4a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/storage/cacher.go +++ /dev/null @@ -1,872 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "fmt" - "net/http" - "reflect" - "strconv" - "sync" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/client/cache" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" - utilruntime "k8s.io/kubernetes/pkg/util/runtime" - "k8s.io/kubernetes/pkg/util/wait" - "k8s.io/kubernetes/pkg/watch" - - "github.com/golang/glog" - "golang.org/x/net/context" -) - -// CacherConfig contains the configuration for a given Cache. -type CacherConfig struct { - // Maximum size of the history cached in memory. - CacheCapacity int - - // An underlying storage.Interface. - Storage Interface - - // An underlying storage.Versioner. - Versioner Versioner - - // The Cache will be caching objects of a given Type and assumes that they - // are all stored under ResourcePrefix directory in the underlying database. - Type interface{} - ResourcePrefix string - - // KeyFunc is used to get a key in the underyling storage for a given object. - KeyFunc func(runtime.Object) (string, error) - - // TriggerPublisherFunc is used for optimizing amount of watchers that - // needs to process an incoming event. - TriggerPublisherFunc TriggerPublisherFunc - - // NewList is a function that creates new empty object storing a list of - // objects of type Type. - NewListFunc func() runtime.Object - - Codec runtime.Codec -} - -type watchersMap map[int]*cacheWatcher - -func (wm watchersMap) addWatcher(w *cacheWatcher, number int) { - wm[number] = w -} - -func (wm watchersMap) deleteWatcher(number int) { - delete(wm, number) -} - -func (wm watchersMap) terminateAll() { - for key, watcher := range wm { - delete(wm, key) - watcher.stop() - } -} - -type indexedWatchers struct { - allWatchers watchersMap - valueWatchers map[string]watchersMap -} - -func (i *indexedWatchers) addWatcher(w *cacheWatcher, number int, value string, supported bool) { - if supported { - if _, ok := i.valueWatchers[value]; !ok { - i.valueWatchers[value] = watchersMap{} - } - i.valueWatchers[value].addWatcher(w, number) - } else { - i.allWatchers.addWatcher(w, number) - } -} - -func (i *indexedWatchers) deleteWatcher(number int, value string, supported bool) { - if supported { - i.valueWatchers[value].deleteWatcher(number) - if len(i.valueWatchers[value]) == 0 { - delete(i.valueWatchers, value) - } - } else { - i.allWatchers.deleteWatcher(number) - } -} - -func (i *indexedWatchers) terminateAll(objectType reflect.Type) { - if len(i.allWatchers) > 0 || len(i.valueWatchers) > 0 { - glog.Warningf("Terminating all watchers from cacher %v", objectType) - } - i.allWatchers.terminateAll() - for index, watchers := range i.valueWatchers { - watchers.terminateAll() - delete(i.valueWatchers, index) - } -} - -type filterObjectFunc func(string, runtime.Object) bool - -// Cacher is responsible for serving WATCH and LIST requests for a given -// resource from its internal cache and updating its cache in the background -// based on the underlying storage contents. -// Cacher implements storage.Interface (although most of the calls are just -// delegated to the underlying storage). -type Cacher struct { - // HighWaterMarks for performance debugging. - // Important: Since HighWaterMark is using sync/atomic, it has to be at the top of the struct due to a bug on 32-bit platforms - // See: https://golang.org/pkg/sync/atomic/ for more information - incomingHWM HighWaterMark - // Incoming events that should be dispatched to watchers. - incoming chan watchCacheEvent - - sync.RWMutex - - // Before accessing the cacher's cache, wait for the ready to be ok. - // This is necessary to prevent users from accessing structures that are - // uninitialized or are being repopulated right now. - // ready needs to be set to false when the cacher is paused or stopped. - // ready needs to be set to true when the cacher is ready to use after - // initialization. - ready *ready - - // Underlying storage.Interface. - storage Interface - - // Expected type of objects in the underlying cache. - objectType reflect.Type - - // "sliding window" of recent changes of objects and the current state. - watchCache *watchCache - reflector *cache.Reflector - - // Versioner is used to handle resource versions. - versioner Versioner - - // triggerFunc is used for optimizing amount of watchers that needs to process - // an incoming event. - triggerFunc TriggerPublisherFunc - // watchers is mapping from the value of trigger function that a - // watcher is interested into the watchers - watcherIdx int - watchers indexedWatchers - - // Handling graceful termination. - stopLock sync.RWMutex - stopped bool - stopCh chan struct{} - stopWg sync.WaitGroup -} - -// Create a new Cacher responsible from service WATCH and LIST requests from its -// internal cache and updating its cache in the background based on the given -// configuration. -func NewCacherFromConfig(config CacherConfig) *Cacher { - watchCache := newWatchCache(config.CacheCapacity, config.KeyFunc) - listerWatcher := newCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc) - - // Give this error when it is constructed rather than when you get the - // first watch item, because it's much easier to track down that way. - if obj, ok := config.Type.(runtime.Object); ok { - if err := runtime.CheckCodec(config.Codec, obj); err != nil { - panic("storage codec doesn't seem to match given type: " + err.Error()) - } - } - - cacher := &Cacher{ - ready: newReady(), - storage: config.Storage, - objectType: reflect.TypeOf(config.Type), - watchCache: watchCache, - reflector: cache.NewReflector(listerWatcher, config.Type, watchCache, 0), - versioner: config.Versioner, - triggerFunc: config.TriggerPublisherFunc, - watcherIdx: 0, - watchers: indexedWatchers{ - allWatchers: make(map[int]*cacheWatcher), - valueWatchers: make(map[string]watchersMap), - }, - // TODO: Figure out the correct value for the buffer size. - incoming: make(chan watchCacheEvent, 100), - // We need to (potentially) stop both: - // - wait.Until go-routine - // - reflector.ListAndWatch - // and there are no guarantees on the order that they will stop. - // So we will be simply closing the channel, and synchronizing on the WaitGroup. - stopCh: make(chan struct{}), - } - watchCache.SetOnEvent(cacher.processEvent) - go cacher.dispatchEvents() - - stopCh := cacher.stopCh - cacher.stopWg.Add(1) - go func() { - defer cacher.stopWg.Done() - wait.Until( - func() { - if !cacher.isStopped() { - cacher.startCaching(stopCh) - } - }, time.Second, stopCh, - ) - }() - return cacher -} - -func (c *Cacher) startCaching(stopChannel <-chan struct{}) { - // The 'usable' lock is always 'RLock'able when it is safe to use the cache. - // It is safe to use the cache after a successful list until a disconnection. - // We start with usable (write) locked. The below OnReplace function will - // unlock it after a successful list. The below defer will then re-lock - // it when this function exits (always due to disconnection), only if - // we actually got a successful list. This cycle will repeat as needed. - successfulList := false - c.watchCache.SetOnReplace(func() { - successfulList = true - c.ready.set(true) - }) - defer func() { - if successfulList { - c.ready.set(false) - } - }() - - c.terminateAllWatchers() - // Note that since onReplace may be not called due to errors, we explicitly - // need to retry it on errors under lock. - // Also note that startCaching is called in a loop, so there's no need - // to have another loop here. - if err := c.reflector.ListAndWatch(stopChannel); err != nil { - glog.Errorf("unexpected ListAndWatch error: %v", err) - } -} - -// Implements storage.Interface. -func (c *Cacher) Versioner() Versioner { - return c.storage.Versioner() -} - -// Implements storage.Interface. -func (c *Cacher) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error { - return c.storage.Create(ctx, key, obj, out, ttl) -} - -// Implements storage.Interface. -func (c *Cacher) Delete(ctx context.Context, key string, out runtime.Object, preconditions *Preconditions) error { - return c.storage.Delete(ctx, key, out, preconditions) -} - -// Implements storage.Interface. -func (c *Cacher) Watch(ctx context.Context, key string, resourceVersion string, pred SelectionPredicate) (watch.Interface, error) { - watchRV, err := ParseWatchResourceVersion(resourceVersion) - if err != nil { - return nil, err - } - - c.ready.wait() - - // We explicitly use thread unsafe version and do locking ourself to ensure that - // no new events will be processed in the meantime. The watchCache will be unlocked - // on return from this function. - // Note that we cannot do it under Cacher lock, to avoid a deadlock, since the - // underlying watchCache is calling processEvent under its lock. - c.watchCache.RLock() - defer c.watchCache.RUnlock() - initEvents, err := c.watchCache.GetAllEventsSinceThreadUnsafe(watchRV) - if err != nil { - // To match the uncached watch implementation, once we have passed authn/authz/admission, - // and successfully parsed a resource version, other errors must fail with a watch event of type ERROR, - // rather than a directly returned error. - return newErrWatcher(err), nil - } - - triggerValue, triggerSupported := "", false - // TODO: Currently we assume that in a given Cacher object, any that is - // passed here is aware of exactly the same trigger (at most one). - // Thus, either 0 or 1 values will be returned. - if matchValues := pred.MatcherIndex(); len(matchValues) > 0 { - triggerValue, triggerSupported = matchValues[0].Value, true - } - - // If there is triggerFunc defined, but triggerSupported is false, - // we can't narrow the amount of events significantly at this point. - // - // That said, currently triggerFunc is defined only for Pods and Nodes, - // and there is only constant number of watchers for which triggerSupported - // is false (excluding those issues explicitly by users). - // Thus, to reduce the risk of those watchers blocking all watchers of a - // given resource in the system, we increase the sizes of buffers for them. - chanSize := 10 - if c.triggerFunc != nil && !triggerSupported { - // TODO: We should tune this value and ideally make it dependent on the - // number of objects of a given type and/or their churn. - chanSize = 1000 - } - - c.Lock() - defer c.Unlock() - forget := forgetWatcher(c, c.watcherIdx, triggerValue, triggerSupported) - watcher := newCacheWatcher(watchRV, chanSize, initEvents, filterFunction(key, pred), forget) - - c.watchers.addWatcher(watcher, c.watcherIdx, triggerValue, triggerSupported) - c.watcherIdx++ - return watcher, nil -} - -// Implements storage.Interface. -func (c *Cacher) WatchList(ctx context.Context, key string, resourceVersion string, pred SelectionPredicate) (watch.Interface, error) { - return c.Watch(ctx, key, resourceVersion, pred) -} - -// Implements storage.Interface. -func (c *Cacher) Get(ctx context.Context, key string, objPtr runtime.Object, ignoreNotFound bool) error { - return c.storage.Get(ctx, key, objPtr, ignoreNotFound) -} - -// Implements storage.Interface. -func (c *Cacher) GetToList(ctx context.Context, key string, resourceVersion string, pred SelectionPredicate, listObj runtime.Object) error { - if resourceVersion == "" { - // If resourceVersion is not specified, serve it from underlying - // storage (for backward compatibility). - return c.storage.GetToList(ctx, key, resourceVersion, pred, listObj) - } - - // If resourceVersion is specified, serve it from cache. - // It's guaranteed that the returned value is at least that - // fresh as the given resourceVersion. - listRV, err := ParseListResourceVersion(resourceVersion) - if err != nil { - return err - } - - trace := util.NewTrace(fmt.Sprintf("cacher %v: List", c.objectType.String())) - defer trace.LogIfLong(500 * time.Millisecond) - - c.ready.wait() - trace.Step("Ready") - - // List elements with at least 'listRV' from cache. - listPtr, err := meta.GetItemsPtr(listObj) - if err != nil { - return err - } - listVal, err := conversion.EnforcePtr(listPtr) - if err != nil || listVal.Kind() != reflect.Slice { - return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind()) - } - filter := filterFunction(key, pred) - - obj, exists, readResourceVersion, err := c.watchCache.WaitUntilFreshAndGet(listRV, key, trace) - if err != nil { - return fmt.Errorf("failed to wait for fresh list: %v", err) - } - trace.Step("Got from cache") - - if exists { - elem, ok := obj.(*storeElement) - if !ok { - return fmt.Errorf("non *storeElement returned from storage: %v", obj) - } - if filter(elem.Key, elem.Object) { - listVal.Set(reflect.Append(listVal, reflect.ValueOf(elem.Object).Elem())) - } - } - if c.versioner != nil { - if err := c.versioner.UpdateList(listObj, readResourceVersion); err != nil { - return err - } - } - return nil -} - -// Implements storage.Interface. -func (c *Cacher) List(ctx context.Context, key string, resourceVersion string, pred SelectionPredicate, listObj runtime.Object) error { - if resourceVersion == "" { - // If resourceVersion is not specified, serve it from underlying - // storage (for backward compatibility). - return c.storage.List(ctx, key, resourceVersion, pred, listObj) - } - - // If resourceVersion is specified, serve it from cache. - // It's guaranteed that the returned value is at least that - // fresh as the given resourceVersion. - listRV, err := ParseListResourceVersion(resourceVersion) - if err != nil { - return err - } - - trace := util.NewTrace(fmt.Sprintf("cacher %v: List", c.objectType.String())) - defer trace.LogIfLong(500 * time.Millisecond) - - c.ready.wait() - trace.Step("Ready") - - // List elements with at least 'listRV' from cache. - listPtr, err := meta.GetItemsPtr(listObj) - if err != nil { - return err - } - listVal, err := conversion.EnforcePtr(listPtr) - if err != nil || listVal.Kind() != reflect.Slice { - return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind()) - } - filter := filterFunction(key, pred) - - objs, readResourceVersion, err := c.watchCache.WaitUntilFreshAndList(listRV, trace) - if err != nil { - return fmt.Errorf("failed to wait for fresh list: %v", err) - } - trace.Step(fmt.Sprintf("Listed %d items from cache", len(objs))) - if len(objs) > listVal.Cap() && pred.Label.Empty() && pred.Field.Empty() { - // Resize the slice appropriately, since we already know that none - // of the elements will be filtered out. - listVal.Set(reflect.MakeSlice(reflect.SliceOf(c.objectType.Elem()), 0, len(objs))) - trace.Step("Resized result") - } - for _, obj := range objs { - elem, ok := obj.(*storeElement) - if !ok { - return fmt.Errorf("non *storeElement returned from storage: %v", obj) - } - if filter(elem.Key, elem.Object) { - listVal.Set(reflect.Append(listVal, reflect.ValueOf(elem.Object).Elem())) - } - } - trace.Step(fmt.Sprintf("Filtered %d items", listVal.Len())) - if c.versioner != nil { - if err := c.versioner.UpdateList(listObj, readResourceVersion); err != nil { - return err - } - } - return nil -} - -// Implements storage.Interface. -func (c *Cacher) GuaranteedUpdate( - ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, - preconditions *Preconditions, tryUpdate UpdateFunc, _ ...runtime.Object) error { - // Ignore the suggestion and try to pass down the current version of the object - // read from cache. - if elem, exists, err := c.watchCache.GetByKey(key); err != nil { - glog.Errorf("GetByKey returned error: %v", err) - } else if exists { - currObj, copyErr := api.Scheme.Copy(elem.(*storeElement).Object) - if copyErr == nil { - return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, currObj) - } - glog.Errorf("couldn't copy object: %v", copyErr) - } - // If we couldn't get the object, fallback to no-suggestion. - return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate) -} - -func (c *Cacher) triggerValues(event *watchCacheEvent) ([]string, bool) { - // TODO: Currently we assume that in a given Cacher object, its - // is aware of exactly the same trigger (at most one). Thus calling: - // c.triggerFunc() - // can return only 0 or 1 values. - // That means, that triggerValues itself may return up to 2 different values. - if c.triggerFunc == nil { - return nil, false - } - result := make([]string, 0, 2) - matchValues := c.triggerFunc(event.Object) - if len(matchValues) > 0 { - result = append(result, matchValues[0].Value) - } - if event.PrevObject == nil { - return result, len(result) > 0 - } - prevMatchValues := c.triggerFunc(event.PrevObject) - if len(prevMatchValues) > 0 { - if len(result) == 0 || result[0] != prevMatchValues[0].Value { - result = append(result, prevMatchValues[0].Value) - } - } - return result, len(result) > 0 -} - -func (c *Cacher) processEvent(event watchCacheEvent) { - if curLen := int64(len(c.incoming)); c.incomingHWM.Update(curLen) { - // Monitor if this gets backed up, and how much. - glog.V(1).Infof("cacher (%v): %v objects queued in incoming channel.", c.objectType.String(), curLen) - } - c.incoming <- event -} - -func (c *Cacher) dispatchEvents() { - for { - select { - case event, ok := <-c.incoming: - if !ok { - return - } - c.dispatchEvent(&event) - case <-c.stopCh: - return - } - } -} - -func (c *Cacher) dispatchEvent(event *watchCacheEvent) { - triggerValues, supported := c.triggerValues(event) - - // TODO: For now we assume we have a given budget for dispatching - // a single event. We should consider changing to the approach with: - // - budget has upper bound at - // - we add to current timeout every second - timeout := time.Duration(250) * time.Millisecond - - c.Lock() - defer c.Unlock() - // Iterate over "allWatchers" no matter what the trigger function is. - for _, watcher := range c.watchers.allWatchers { - watcher.add(event, &timeout) - } - if supported { - // Iterate over watchers interested in the given values of the trigger. - for _, triggerValue := range triggerValues { - for _, watcher := range c.watchers.valueWatchers[triggerValue] { - watcher.add(event, &timeout) - } - } - } else { - // supported equal to false generally means that trigger function - // is not defined (or not aware of any indexes). In this case, - // watchers filters should generally also don't generate any - // trigger values, but can cause problems in case of some - // misconfiguration. Thus we paranoidly leave this branch. - - // Iterate over watchers interested in exact values for all values. - for _, watchers := range c.watchers.valueWatchers { - for _, watcher := range watchers { - watcher.add(event, &timeout) - } - } - } -} - -func (c *Cacher) terminateAllWatchers() { - c.Lock() - defer c.Unlock() - c.watchers.terminateAll(c.objectType) -} - -func (c *Cacher) isStopped() bool { - c.stopLock.RLock() - defer c.stopLock.RUnlock() - return c.stopped -} - -func (c *Cacher) Stop() { - c.stopLock.Lock() - c.stopped = true - c.stopLock.Unlock() - close(c.stopCh) - c.stopWg.Wait() -} - -func forgetWatcher(c *Cacher, index int, triggerValue string, triggerSupported bool) func(bool) { - return func(lock bool) { - if lock { - c.Lock() - defer c.Unlock() - } - // It's possible that the watcher is already not in the structure (e.g. in case of - // simulaneous Stop() and terminateAllWatchers(), but it doesn't break anything. - c.watchers.deleteWatcher(index, triggerValue, triggerSupported) - } -} - -func filterFunction(key string, p SelectionPredicate) filterObjectFunc { - f := SimpleFilter(p) - filterFunc := func(objKey string, obj runtime.Object) bool { - if !hasPathPrefix(objKey, key) { - return false - } - return f(obj) - } - return filterFunc -} - -// Returns resource version to which the underlying cache is synced. -func (c *Cacher) LastSyncResourceVersion() (uint64, error) { - c.ready.wait() - - resourceVersion := c.reflector.LastSyncResourceVersion() - if resourceVersion == "" { - return 0, nil - } - - return strconv.ParseUint(resourceVersion, 10, 64) -} - -// cacherListerWatcher opaques storage.Interface to expose cache.ListerWatcher. -type cacherListerWatcher struct { - storage Interface - resourcePrefix string - newListFunc func() runtime.Object -} - -func newCacherListerWatcher(storage Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher { - return &cacherListerWatcher{ - storage: storage, - resourcePrefix: resourcePrefix, - newListFunc: newListFunc, - } -} - -// Implements cache.ListerWatcher interface. -func (lw *cacherListerWatcher) List(options api.ListOptions) (runtime.Object, error) { - list := lw.newListFunc() - if err := lw.storage.List(context.TODO(), lw.resourcePrefix, "", Everything, list); err != nil { - return nil, err - } - return list, nil -} - -// Implements cache.ListerWatcher interface. -func (lw *cacherListerWatcher) Watch(options api.ListOptions) (watch.Interface, error) { - return lw.storage.WatchList(context.TODO(), lw.resourcePrefix, options.ResourceVersion, Everything) -} - -// cacherWatch implements watch.Interface to return a single error -type errWatcher struct { - result chan watch.Event -} - -func newErrWatcher(err error) *errWatcher { - // Create an error event - errEvent := watch.Event{Type: watch.Error} - switch err := err.(type) { - case runtime.Object: - errEvent.Object = err - case *errors.StatusError: - errEvent.Object = &err.ErrStatus - default: - errEvent.Object = &unversioned.Status{ - Status: unversioned.StatusFailure, - Message: err.Error(), - Reason: unversioned.StatusReasonInternalError, - Code: http.StatusInternalServerError, - } - } - - // Create a watcher with room for a single event, populate it, and close the channel - watcher := &errWatcher{result: make(chan watch.Event, 1)} - watcher.result <- errEvent - close(watcher.result) - - return watcher -} - -// Implements watch.Interface. -func (c *errWatcher) ResultChan() <-chan watch.Event { - return c.result -} - -// Implements watch.Interface. -func (c *errWatcher) Stop() { - // no-op -} - -// cacherWatch implements watch.Interface -type cacheWatcher struct { - sync.Mutex - input chan watchCacheEvent - result chan watch.Event - filter filterObjectFunc - stopped bool - forget func(bool) -} - -func newCacheWatcher(resourceVersion uint64, chanSize int, initEvents []watchCacheEvent, filter filterObjectFunc, forget func(bool)) *cacheWatcher { - watcher := &cacheWatcher{ - input: make(chan watchCacheEvent, chanSize), - result: make(chan watch.Event, chanSize), - filter: filter, - stopped: false, - forget: forget, - } - go watcher.process(initEvents, resourceVersion) - return watcher -} - -// Implements watch.Interface. -func (c *cacheWatcher) ResultChan() <-chan watch.Event { - return c.result -} - -// Implements watch.Interface. -func (c *cacheWatcher) Stop() { - c.forget(true) - c.stop() -} - -func (c *cacheWatcher) stop() { - c.Lock() - defer c.Unlock() - if !c.stopped { - c.stopped = true - close(c.input) - } -} - -var timerPool sync.Pool - -func (c *cacheWatcher) add(event *watchCacheEvent, timeout *time.Duration) { - // Try to send the event immediately, without blocking. - select { - case c.input <- *event: - return - default: - } - - // OK, block sending, but only for up to . - // cacheWatcher.add is called very often, so arrange - // to reuse timers instead of constantly allocating. - startTime := time.Now() - - t, ok := timerPool.Get().(*time.Timer) - if ok { - t.Reset(*timeout) - } else { - t = time.NewTimer(*timeout) - } - defer timerPool.Put(t) - - select { - case c.input <- *event: - stopped := t.Stop() - if !stopped { - // Consume triggered (but not yet received) timer event - // so that future reuse does not get a spurious timeout. - <-t.C - } - case <-t.C: - // This means that we couldn't send event to that watcher. - // Since we don't want to block on it infinitely, - // we simply terminate it. - c.forget(false) - c.stop() - } - - if *timeout = *timeout - time.Since(startTime); *timeout < 0 { - *timeout = 0 - } -} - -// NOTE: sendWatchCacheEvent is assumed to not modify !!! -func (c *cacheWatcher) sendWatchCacheEvent(event *watchCacheEvent) { - curObjPasses := event.Type != watch.Deleted && c.filter(event.Key, event.Object) - oldObjPasses := false - if event.PrevObject != nil { - oldObjPasses = c.filter(event.Key, event.PrevObject) - } - if !curObjPasses && !oldObjPasses { - // Watcher is not interested in that object. - return - } - - object, err := api.Scheme.Copy(event.Object) - if err != nil { - glog.Errorf("unexpected copy error: %v", err) - return - } - switch { - case curObjPasses && !oldObjPasses: - c.result <- watch.Event{Type: watch.Added, Object: object} - case curObjPasses && oldObjPasses: - c.result <- watch.Event{Type: watch.Modified, Object: object} - case !curObjPasses && oldObjPasses: - c.result <- watch.Event{Type: watch.Deleted, Object: object} - } -} - -func (c *cacheWatcher) process(initEvents []watchCacheEvent, resourceVersion uint64) { - defer utilruntime.HandleCrash() - - // Check how long we are processing initEvents. - // As long as these are not processed, we are not processing - // any incoming events, so if it takes long, we may actually - // block all watchers for some time. - // TODO: From the logs it seems that there happens processing - // times even up to 1s which is very long. However, this doesn't - // depend that much on the number of initEvents. E.g. from the - // 2000-node Kubemark run we have logs like this, e.g.: - // ... processing 13862 initEvents took 66.808689ms - // ... processing 14040 initEvents took 993.532539ms - // We should understand what is blocking us in those cases (e.g. - // is it lack of CPU, network, or sth else) and potentially - // consider increase size of result buffer in those cases. - const initProcessThreshold = 500 * time.Millisecond - startTime := time.Now() - for _, event := range initEvents { - c.sendWatchCacheEvent(&event) - } - processingTime := time.Since(startTime) - if processingTime > initProcessThreshold { - objType := "" - if len(initEvents) > 0 { - objType = reflect.TypeOf(initEvents[0].Object).String() - } - glog.V(2).Infof("processing %d initEvents of %s took %v", len(initEvents), objType, processingTime) - } - - defer close(c.result) - defer c.Stop() - for { - event, ok := <-c.input - if !ok { - return - } - // only send events newer than resourceVersion - if event.ResourceVersion > resourceVersion { - c.sendWatchCacheEvent(&event) - } - } -} - -type ready struct { - ok bool - c *sync.Cond -} - -func newReady() *ready { - return &ready{c: sync.NewCond(&sync.Mutex{})} -} - -func (r *ready) wait() { - r.c.L.Lock() - for !r.ok { - r.c.Wait() - } - r.c.L.Unlock() -} - -func (r *ready) set(ok bool) { - r.c.L.Lock() - defer r.c.L.Unlock() - r.ok = ok - r.c.Broadcast() -} diff --git a/vendor/k8s.io/kubernetes/pkg/storage/doc.go b/vendor/k8s.io/kubernetes/pkg/storage/doc.go deleted file mode 100644 index d2c5dbfc45..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/storage/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Interfaces for database-related operations. -package storage diff --git a/vendor/k8s.io/kubernetes/pkg/storage/errors.go b/vendor/k8s.io/kubernetes/pkg/storage/errors.go deleted file mode 100644 index 7c1fbb507b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/storage/errors.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/util/validation/field" -) - -const ( - ErrCodeKeyNotFound int = iota + 1 - ErrCodeKeyExists - ErrCodeResourceVersionConflicts - ErrCodeInvalidObj - ErrCodeUnreachable -) - -var errCodeToMessage = map[int]string{ - ErrCodeKeyNotFound: "key not found", - ErrCodeKeyExists: "key exists", - ErrCodeResourceVersionConflicts: "resource version conflicts", - ErrCodeInvalidObj: "invalid object", - ErrCodeUnreachable: "server unreachable", -} - -func NewKeyNotFoundError(key string, rv int64) *StorageError { - return &StorageError{ - Code: ErrCodeKeyNotFound, - Key: key, - ResourceVersion: rv, - } -} - -func NewKeyExistsError(key string, rv int64) *StorageError { - return &StorageError{ - Code: ErrCodeKeyExists, - Key: key, - ResourceVersion: rv, - } -} - -func NewResourceVersionConflictsError(key string, rv int64) *StorageError { - return &StorageError{ - Code: ErrCodeResourceVersionConflicts, - Key: key, - ResourceVersion: rv, - } -} - -func NewUnreachableError(key string, rv int64) *StorageError { - return &StorageError{ - Code: ErrCodeUnreachable, - Key: key, - ResourceVersion: rv, - } -} - -func NewInvalidObjError(key, msg string) *StorageError { - return &StorageError{ - Code: ErrCodeInvalidObj, - Key: key, - AdditionalErrorMsg: msg, - } -} - -type StorageError struct { - Code int - Key string - ResourceVersion int64 - AdditionalErrorMsg string -} - -func (e *StorageError) Error() string { - return fmt.Sprintf("StorageError: %s, Code: %d, Key: %s, ResourceVersion: %d, AdditionalErrorMsg: %s", - errCodeToMessage[e.Code], e.Code, e.Key, e.ResourceVersion, e.AdditionalErrorMsg) -} - -// IsNotFound returns true if and only if err is "key" not found error. -func IsNotFound(err error) bool { - return isErrCode(err, ErrCodeKeyNotFound) -} - -// IsNodeExist returns true if and only if err is an node already exist error. -func IsNodeExist(err error) bool { - return isErrCode(err, ErrCodeKeyExists) -} - -// IsUnreachable returns true if and only if err indicates the server could not be reached. -func IsUnreachable(err error) bool { - return isErrCode(err, ErrCodeUnreachable) -} - -// IsTestFailed returns true if and only if err is a write conflict. -func IsTestFailed(err error) bool { - return isErrCode(err, ErrCodeResourceVersionConflicts) -} - -// IsInvalidObj returns true if and only if err is invalid error -func IsInvalidObj(err error) bool { - return isErrCode(err, ErrCodeInvalidObj) -} - -func isErrCode(err error, code int) bool { - if err == nil { - return false - } - if e, ok := err.(*StorageError); ok { - return e.Code == code - } - return false -} - -// InvalidError is generated when an error caused by invalid API object occurs -// in the storage package. -type InvalidError struct { - Errs field.ErrorList -} - -func (e InvalidError) Error() string { - return e.Errs.ToAggregate().Error() -} - -// IsInvalidError returns true if and only if err is an InvalidError. -func IsInvalidError(err error) bool { - _, ok := err.(InvalidError) - return ok -} - -func NewInvalidError(errors field.ErrorList) InvalidError { - return InvalidError{errors} -} - -// InternalError is generated when an error occurs in the storage package, i.e., -// not from the underlying storage backend (e.g., etcd). -type InternalError struct { - Reason string -} - -func (e InternalError) Error() string { - return e.Reason -} - -// IsInternalError returns true if and only if err is an InternalError. -func IsInternalError(err error) bool { - _, ok := err.(InternalError) - return ok -} - -func NewInternalError(reason string) InternalError { - return InternalError{reason} -} - -func NewInternalErrorf(format string, a ...interface{}) InternalError { - return InternalError{fmt.Sprintf(format, a)} -} diff --git a/vendor/k8s.io/kubernetes/pkg/storage/interfaces.go b/vendor/k8s.io/kubernetes/pkg/storage/interfaces.go deleted file mode 100644 index 612d3741b7..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/storage/interfaces.go +++ /dev/null @@ -1,175 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "golang.org/x/net/context" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/watch" -) - -// Versioner abstracts setting and retrieving metadata fields from database response -// onto the object ot list. -type Versioner interface { - // UpdateObject sets storage metadata into an API object. Returns an error if the object - // cannot be updated correctly. May return nil if the requested object does not need metadata - // from database. - UpdateObject(obj runtime.Object, resourceVersion uint64) error - // UpdateList sets the resource version into an API list object. Returns an error if the object - // cannot be updated correctly. May return nil if the requested object does not need metadata - // from database. - UpdateList(obj runtime.Object, resourceVersion uint64) error - // ObjectResourceVersion returns the resource version (for persistence) of the specified object. - // Should return an error if the specified object does not have a persistable version. - ObjectResourceVersion(obj runtime.Object) (uint64, error) -} - -// ResponseMeta contains information about the database metadata that is associated with -// an object. It abstracts the actual underlying objects to prevent coupling with concrete -// database and to improve testability. -type ResponseMeta struct { - // TTL is the time to live of the node that contained the returned object. It may be - // zero or negative in some cases (objects may be expired after the requested - // expiration time due to server lag). - TTL int64 - // The resource version of the node that contained the returned object. - ResourceVersion uint64 -} - -// MatchValue defines a pair (, ). -type MatchValue struct { - IndexName string - Value string -} - -// TriggerPublisherFunc is a function that takes an object, and returns a list of pairs -// (, ) for all indexes known -// to that function. -type TriggerPublisherFunc func(obj runtime.Object) []MatchValue - -// FilterFunc takes an API object and returns true if the object satisfies some requirements. -// TODO: We will remove this type and use SelectionPredicate everywhere. -type FilterFunc func(obj runtime.Object) bool - -// Everything accepts all objects. -var Everything = SelectionPredicate{ - Label: labels.Everything(), - Field: fields.Everything(), -} - -// Pass an UpdateFunc to Interface.GuaranteedUpdate to make an update -// that is guaranteed to succeed. -// See the comment for GuaranteedUpdate for more details. -type UpdateFunc func(input runtime.Object, res ResponseMeta) (output runtime.Object, ttl *uint64, err error) - -// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. -type Preconditions struct { - // Specifies the target UID. - // +optional - UID *types.UID `json:"uid,omitempty"` -} - -// NewUIDPreconditions returns a Preconditions with UID set. -func NewUIDPreconditions(uid string) *Preconditions { - u := types.UID(uid) - return &Preconditions{UID: &u} -} - -// Interface offers a common interface for object marshaling/unmarshaling operations and -// hides all the storage-related operations behind it. -type Interface interface { - // Returns Versioner associated with this interface. - Versioner() Versioner - - // Create adds a new object at a key unless it already exists. 'ttl' is time-to-live - // in seconds (0 means forever). If no error is returned and out is not nil, out will be - // set to the read value from database. - Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error - - // Delete removes the specified key and returns the value that existed at that spot. - // If key didn't exist, it will return NotFound storage error. - Delete(ctx context.Context, key string, out runtime.Object, preconditions *Preconditions) error - - // Watch begins watching the specified key. Events are decoded into API objects, - // and any items selected by 'p' are sent down to returned watch.Interface. - // resourceVersion may be used to specify what version to begin watching, - // which should be the current resourceVersion, and no longer rv+1 - // (e.g. reconnecting without missing any updates). - Watch(ctx context.Context, key string, resourceVersion string, p SelectionPredicate) (watch.Interface, error) - - // WatchList begins watching the specified key's items. Items are decoded into API - // objects and any item selected by 'p' are sent down to returned watch.Interface. - // resourceVersion may be used to specify what version to begin watching, - // which should be the current resourceVersion, and no longer rv+1 - // (e.g. reconnecting without missing any updates). - WatchList(ctx context.Context, key string, resourceVersion string, p SelectionPredicate) (watch.Interface, error) - - // Get unmarshals json found at key into objPtr. On a not found error, will either - // return a zero object of the requested type, or an error, depending on ignoreNotFound. - // Treats empty responses and nil response nodes exactly like a not found error. - Get(ctx context.Context, key string, objPtr runtime.Object, ignoreNotFound bool) error - - // GetToList unmarshals json found at key and opaque it into *List api object - // (an object that satisfies the runtime.IsList definition). - // The returned contents may be delayed, but it is guaranteed that they will - // be have at least 'resourceVersion'. - GetToList(ctx context.Context, key string, resourceVersion string, p SelectionPredicate, listObj runtime.Object) error - - // List unmarshalls jsons found at directory defined by key and opaque them - // into *List api object (an object that satisfies runtime.IsList definition). - // The returned contents may be delayed, but it is guaranteed that they will - // be have at least 'resourceVersion'. - List(ctx context.Context, key string, resourceVersion string, p SelectionPredicate, listObj runtime.Object) error - - // GuaranteedUpdate keeps calling 'tryUpdate()' to update key 'key' (of type 'ptrToType') - // retrying the update until success if there is index conflict. - // Note that object passed to tryUpdate may change across invocations of tryUpdate() if - // other writers are simultaneously updating it, so tryUpdate() needs to take into account - // the current contents of the object when deciding how the update object should look. - // If the key doesn't exist, it will return NotFound storage error if ignoreNotFound=false - // or zero value in 'ptrToType' parameter otherwise. - // If the object to update has the same value as previous, it won't do any update - // but will return the object in 'ptrToType' parameter. - // If 'suggestion' can contain zero or one element - in such case this can be used as - // a suggestion about the current version of the object to avoid read operation from - // storage to get it. - // - // Example: - // - // s := /* implementation of Interface */ - // err := s.GuaranteedUpdate( - // "myKey", &MyType{}, true, - // func(input runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) { - // // Before each incovation of the user defined function, "input" is reset to - // // current contents for "myKey" in database. - // curr := input.(*MyType) // Guaranteed to succeed. - // - // // Make the modification - // curr.Counter++ - // - // // Return the modified object - return an error to stop iterating. Return - // // a uint64 to alter the TTL on the object, or nil to keep it the same value. - // return cur, nil, nil - // } - // }) - GuaranteedUpdate( - ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, - precondtions *Preconditions, tryUpdate UpdateFunc, suggestion ...runtime.Object) error -} diff --git a/vendor/k8s.io/kubernetes/pkg/storage/selection_predicate.go b/vendor/k8s.io/kubernetes/pkg/storage/selection_predicate.go deleted file mode 100644 index f4083bc916..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/storage/selection_predicate.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" -) - -// AttrFunc returns label and field sets for List or Watch to match. -// In any failure to parse given object, it returns error. -type AttrFunc func(obj runtime.Object) (labels.Set, fields.Set, error) - -// SelectionPredicate is used to represent the way to select objects from api storage. -type SelectionPredicate struct { - Label labels.Selector - Field fields.Selector - GetAttrs AttrFunc - IndexFields []string -} - -// Matches returns true if the given object's labels and fields (as -// returned by s.GetAttrs) match s.Label and s.Field. An error is -// returned if s.GetAttrs fails. -func (s *SelectionPredicate) Matches(obj runtime.Object) (bool, error) { - if s.Label.Empty() && s.Field.Empty() { - return true, nil - } - labels, fields, err := s.GetAttrs(obj) - if err != nil { - return false, err - } - matched := s.Label.Matches(labels) - if s.Field != nil { - matched = (matched && s.Field.Matches(fields)) - } - return matched, nil -} - -// MatchesSingle will return (name, true) if and only if s.Field matches on the object's -// name. -func (s *SelectionPredicate) MatchesSingle() (string, bool) { - // TODO: should be namespace.name - if name, ok := s.Field.RequiresExactMatch("metadata.name"); ok { - return name, true - } - return "", false -} - -// For any index defined by IndexFields, if a matcher can match only (a subset) -// of objects that return for a given index, a pair (, ) -// wil be returned. -// TODO: Consider supporting also labels. -func (s *SelectionPredicate) MatcherIndex() []MatchValue { - var result []MatchValue - for _, field := range s.IndexFields { - if value, ok := s.Field.RequiresExactMatch(field); ok { - result = append(result, MatchValue{IndexName: field, Value: value}) - } - } - return result -} diff --git a/vendor/k8s.io/kubernetes/pkg/storage/util.go b/vendor/k8s.io/kubernetes/pkg/storage/util.go deleted file mode 100644 index 0a2da7df21..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/storage/util.go +++ /dev/null @@ -1,161 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "fmt" - "strconv" - "strings" - "sync/atomic" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/validation/path" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -type SimpleUpdateFunc func(runtime.Object) (runtime.Object, error) - -// SimpleUpdateFunc converts SimpleUpdateFunc into UpdateFunc -func SimpleUpdate(fn SimpleUpdateFunc) UpdateFunc { - return func(input runtime.Object, _ ResponseMeta) (runtime.Object, *uint64, error) { - out, err := fn(input) - return out, nil, err - } -} - -// SimpleFilter converts a selection predicate into a FilterFunc. -// It ignores any error from Matches(). -func SimpleFilter(p SelectionPredicate) FilterFunc { - return func(obj runtime.Object) bool { - matches, err := p.Matches(obj) - if err != nil { - glog.Errorf("invalid object for matching. Obj: %v. Err: %v", obj, err) - return false - } - return matches - } -} - -func EverythingFunc(runtime.Object) bool { - return true -} - -func NoTriggerFunc() []MatchValue { - return nil -} - -func NoTriggerPublisher(runtime.Object) []MatchValue { - return nil -} - -// ParseWatchResourceVersion takes a resource version argument and converts it to -// the etcd version we should pass to helper.Watch(). Because resourceVersion is -// an opaque value, the default watch behavior for non-zero watch is to watch -// the next value (if you pass "1", you will see updates from "2" onwards). -func ParseWatchResourceVersion(resourceVersion string) (uint64, error) { - if resourceVersion == "" || resourceVersion == "0" { - return 0, nil - } - version, err := strconv.ParseUint(resourceVersion, 10, 64) - if err != nil { - return 0, NewInvalidError(field.ErrorList{ - // Validation errors are supposed to return version-specific field - // paths, but this is probably close enough. - field.Invalid(field.NewPath("resourceVersion"), resourceVersion, err.Error()), - }) - } - return version, nil -} - -// ParseListResourceVersion takes a resource version argument and converts it to -// the etcd version. -func ParseListResourceVersion(resourceVersion string) (uint64, error) { - if resourceVersion == "" { - return 0, nil - } - version, err := strconv.ParseUint(resourceVersion, 10, 64) - return version, err -} - -func NamespaceKeyFunc(prefix string, obj runtime.Object) (string, error) { - meta, err := meta.Accessor(obj) - if err != nil { - return "", err - } - name := meta.GetName() - if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 { - return "", fmt.Errorf("invalid name: %v", msgs) - } - return prefix + "/" + meta.GetNamespace() + "/" + name, nil -} - -func NoNamespaceKeyFunc(prefix string, obj runtime.Object) (string, error) { - meta, err := meta.Accessor(obj) - if err != nil { - return "", err - } - name := meta.GetName() - if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 { - return "", fmt.Errorf("invalid name: %v", msgs) - } - return prefix + "/" + name, nil -} - -// hasPathPrefix returns true if the string matches pathPrefix exactly, or if is prefixed with pathPrefix at a path segment boundary -func hasPathPrefix(s, pathPrefix string) bool { - // Short circuit if s doesn't contain the prefix at all - if !strings.HasPrefix(s, pathPrefix) { - return false - } - - pathPrefixLength := len(pathPrefix) - - if len(s) == pathPrefixLength { - // Exact match - return true - } - if strings.HasSuffix(pathPrefix, "/") { - // pathPrefix already ensured a path segment boundary - return true - } - if s[pathPrefixLength:pathPrefixLength+1] == "/" { - // The next character in s is a path segment boundary - // Check this instead of normalizing pathPrefix to avoid allocating on every call - return true - } - return false -} - -// HighWaterMark is a thread-safe object for tracking the maximum value seen -// for some quantity. -type HighWaterMark int64 - -// Update returns true if and only if 'current' is the highest value ever seen. -func (hwm *HighWaterMark) Update(current int64) bool { - for { - old := atomic.LoadInt64((*int64)(hwm)) - if current <= old { - return false - } - if atomic.CompareAndSwapInt64((*int64)(hwm), old, current) { - return true - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/storage/watch_cache.go b/vendor/k8s.io/kubernetes/pkg/storage/watch_cache.go deleted file mode 100644 index 54c1ccd8fd..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/storage/watch_cache.go +++ /dev/null @@ -1,431 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "fmt" - "sort" - "strconv" - "sync" - "time" - - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/client/cache" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" - "k8s.io/kubernetes/pkg/util/clock" - "k8s.io/kubernetes/pkg/watch" -) - -const ( - // MaximumListWait determines how long we're willing to wait for a - // list if a client specified a resource version in the future. - MaximumListWait = 60 * time.Second -) - -// watchCacheEvent is a single "watch event" that is send to users of -// watchCache. Additionally to a typical "watch.Event" it contains -// the previous value of the object to enable proper filtering in the -// upper layers. -type watchCacheEvent struct { - Type watch.EventType - Object runtime.Object - PrevObject runtime.Object - Key string - ResourceVersion uint64 -} - -// Computing a key of an object is generally non-trivial (it performs -// e.g. validation underneath). To avoid computing it multiple times -// (to serve the event in different List/Watch requests), in the -// underlying store we are keeping pair (key, object). -type storeElement struct { - Key string - Object runtime.Object -} - -func storeElementKey(obj interface{}) (string, error) { - elem, ok := obj.(*storeElement) - if !ok { - return "", fmt.Errorf("not a storeElement: %v", obj) - } - return elem.Key, nil -} - -// watchCacheElement is a single "watch event" stored in a cache. -// It contains the resource version of the object and the object -// itself. -type watchCacheElement struct { - resourceVersion uint64 - watchCacheEvent watchCacheEvent -} - -// watchCache implements a Store interface. -// However, it depends on the elements implementing runtime.Object interface. -// -// watchCache is a "sliding window" (with a limited capacity) of objects -// observed from a watch. -type watchCache struct { - sync.RWMutex - - // Condition on which lists are waiting for the fresh enough - // resource version. - cond *sync.Cond - - // Maximum size of history window. - capacity int - - // keyFunc is used to get a key in the underlying storage for a given object. - keyFunc func(runtime.Object) (string, error) - - // cache is used a cyclic buffer - its first element (with the smallest - // resourceVersion) is defined by startIndex, its last element is defined - // by endIndex (if cache is full it will be startIndex + capacity). - // Both startIndex and endIndex can be greater than buffer capacity - - // you should always apply modulo capacity to get an index in cache array. - cache []watchCacheElement - startIndex int - endIndex int - - // store will effectively support LIST operation from the "end of cache - // history" i.e. from the moment just after the newest cached watched event. - // It is necessary to effectively allow clients to start watching at now. - // NOTE: We assume that is thread-safe. - store cache.Store - - // ResourceVersion up to which the watchCache is propagated. - resourceVersion uint64 - - // This handler is run at the end of every successful Replace() method. - onReplace func() - - // This handler is run at the end of every Add/Update/Delete method - // and additionally gets the previous value of the object. - onEvent func(watchCacheEvent) - - // for testing timeouts. - clock clock.Clock -} - -func newWatchCache(capacity int, keyFunc func(runtime.Object) (string, error)) *watchCache { - wc := &watchCache{ - capacity: capacity, - keyFunc: keyFunc, - cache: make([]watchCacheElement, capacity), - startIndex: 0, - endIndex: 0, - store: cache.NewStore(storeElementKey), - resourceVersion: 0, - clock: clock.RealClock{}, - } - wc.cond = sync.NewCond(wc.RLocker()) - return wc -} - -// Add takes runtime.Object as an argument. -func (w *watchCache) Add(obj interface{}) error { - object, resourceVersion, err := objectToVersionedRuntimeObject(obj) - if err != nil { - return err - } - event := watch.Event{Type: watch.Added, Object: object} - - f := func(elem *storeElement) error { return w.store.Add(elem) } - return w.processEvent(event, resourceVersion, f) -} - -// Update takes runtime.Object as an argument. -func (w *watchCache) Update(obj interface{}) error { - object, resourceVersion, err := objectToVersionedRuntimeObject(obj) - if err != nil { - return err - } - event := watch.Event{Type: watch.Modified, Object: object} - - f := func(elem *storeElement) error { return w.store.Update(elem) } - return w.processEvent(event, resourceVersion, f) -} - -// Delete takes runtime.Object as an argument. -func (w *watchCache) Delete(obj interface{}) error { - object, resourceVersion, err := objectToVersionedRuntimeObject(obj) - if err != nil { - return err - } - event := watch.Event{Type: watch.Deleted, Object: object} - - f := func(elem *storeElement) error { return w.store.Delete(elem) } - return w.processEvent(event, resourceVersion, f) -} - -func objectToVersionedRuntimeObject(obj interface{}) (runtime.Object, uint64, error) { - object, ok := obj.(runtime.Object) - if !ok { - return nil, 0, fmt.Errorf("obj does not implement runtime.Object interface: %v", obj) - } - meta, err := meta.Accessor(object) - if err != nil { - return nil, 0, err - } - resourceVersion, err := parseResourceVersion(meta.GetResourceVersion()) - if err != nil { - return nil, 0, err - } - return object, resourceVersion, nil -} - -func parseResourceVersion(resourceVersion string) (uint64, error) { - if resourceVersion == "" { - return 0, nil - } - return strconv.ParseUint(resourceVersion, 10, 64) -} - -func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, updateFunc func(*storeElement) error) error { - key, err := w.keyFunc(event.Object) - if err != nil { - return fmt.Errorf("couldn't compute key: %v", err) - } - elem := &storeElement{Key: key, Object: event.Object} - - // TODO: We should consider moving this lock below after the watchCacheEvent - // is created. In such situation, the only problematic scenario is Replace( - // happening after getting object from store and before acquiring a lock. - // Maybe introduce another lock for this purpose. - w.Lock() - defer w.Unlock() - previous, exists, err := w.store.Get(elem) - if err != nil { - return err - } - var prevObject runtime.Object - if exists { - prevObject = previous.(*storeElement).Object - } - watchCacheEvent := watchCacheEvent{ - Type: event.Type, - Object: event.Object, - PrevObject: prevObject, - Key: key, - ResourceVersion: resourceVersion, - } - if w.onEvent != nil { - w.onEvent(watchCacheEvent) - } - w.updateCache(resourceVersion, &watchCacheEvent) - w.resourceVersion = resourceVersion - w.cond.Broadcast() - return updateFunc(elem) -} - -// Assumes that lock is already held for write. -func (w *watchCache) updateCache(resourceVersion uint64, event *watchCacheEvent) { - if w.endIndex == w.startIndex+w.capacity { - // Cache is full - remove the oldest element. - w.startIndex++ - } - w.cache[w.endIndex%w.capacity] = watchCacheElement{resourceVersion, *event} - w.endIndex++ -} - -// List returns list of pointers to objects. -func (w *watchCache) List() []interface{} { - return w.store.List() -} - -// waitUntilFreshAndBlock waits until cache is at least as fresh as given . -// NOTE: This function acquired lock and doesn't release it. -// You HAVE TO explicitly call w.RUnlock() after this function. -func (w *watchCache) waitUntilFreshAndBlock(resourceVersion uint64, trace *util.Trace) error { - startTime := w.clock.Now() - go func() { - // Wake us up when the time limit has expired. The docs - // promise that time.After (well, NewTimer, which it calls) - // will wait *at least* the duration given. Since this go - // routine starts sometime after we record the start time, and - // it will wake up the loop below sometime after the broadcast, - // we don't need to worry about waking it up before the time - // has expired accidentally. - <-w.clock.After(MaximumListWait) - w.cond.Broadcast() - }() - - w.RLock() - if trace != nil { - trace.Step("watchCache locked acquired") - } - for w.resourceVersion < resourceVersion { - if w.clock.Since(startTime) >= MaximumListWait { - return fmt.Errorf("time limit exceeded while waiting for resource version %v (current value: %v)", resourceVersion, w.resourceVersion) - } - w.cond.Wait() - } - if trace != nil { - trace.Step("watchCache fresh enough") - } - return nil -} - -// WaitUntilFreshAndList returns list of pointers to objects. -func (w *watchCache) WaitUntilFreshAndList(resourceVersion uint64, trace *util.Trace) ([]interface{}, uint64, error) { - err := w.waitUntilFreshAndBlock(resourceVersion, trace) - defer w.RUnlock() - if err != nil { - return nil, 0, err - } - return w.store.List(), w.resourceVersion, nil -} - -// WaitUntilFreshAndGet returns a pointers to object. -func (w *watchCache) WaitUntilFreshAndGet(resourceVersion uint64, key string, trace *util.Trace) (interface{}, bool, uint64, error) { - err := w.waitUntilFreshAndBlock(resourceVersion, trace) - defer w.RUnlock() - if err != nil { - return nil, false, 0, err - } - value, exists, err := w.store.GetByKey(key) - return value, exists, w.resourceVersion, err -} - -func (w *watchCache) ListKeys() []string { - return w.store.ListKeys() -} - -// Get takes runtime.Object as a parameter. However, it returns -// pointer to . -func (w *watchCache) Get(obj interface{}) (interface{}, bool, error) { - object, ok := obj.(runtime.Object) - if !ok { - return nil, false, fmt.Errorf("obj does not implement runtime.Object interface: %v", obj) - } - key, err := w.keyFunc(object) - if err != nil { - return nil, false, fmt.Errorf("couldn't compute key: %v", err) - } - - return w.store.Get(&storeElement{Key: key, Object: object}) -} - -// GetByKey returns pointer to . -func (w *watchCache) GetByKey(key string) (interface{}, bool, error) { - return w.store.GetByKey(key) -} - -// Replace takes slice of runtime.Object as a paramater. -func (w *watchCache) Replace(objs []interface{}, resourceVersion string) error { - version, err := parseResourceVersion(resourceVersion) - if err != nil { - return err - } - - toReplace := make([]interface{}, 0, len(objs)) - for _, obj := range objs { - object, ok := obj.(runtime.Object) - if !ok { - return fmt.Errorf("didn't get runtime.Object for replace: %#v", obj) - } - key, err := w.keyFunc(object) - if err != nil { - return fmt.Errorf("couldn't compute key: %v", err) - } - toReplace = append(toReplace, &storeElement{Key: key, Object: object}) - } - - w.Lock() - defer w.Unlock() - - w.startIndex = 0 - w.endIndex = 0 - if err := w.store.Replace(toReplace, resourceVersion); err != nil { - return err - } - w.resourceVersion = version - if w.onReplace != nil { - w.onReplace() - } - w.cond.Broadcast() - return nil -} - -func (w *watchCache) SetOnReplace(onReplace func()) { - w.Lock() - defer w.Unlock() - w.onReplace = onReplace -} - -func (w *watchCache) SetOnEvent(onEvent func(watchCacheEvent)) { - w.Lock() - defer w.Unlock() - w.onEvent = onEvent -} - -func (w *watchCache) GetAllEventsSinceThreadUnsafe(resourceVersion uint64) ([]watchCacheEvent, error) { - size := w.endIndex - w.startIndex - oldest := w.resourceVersion - if size > 0 { - oldest = w.cache[w.startIndex%w.capacity].resourceVersion - } - if resourceVersion == 0 { - // resourceVersion = 0 means that we don't require any specific starting point - // and we would like to start watching from ~now. - // However, to keep backward compatibility, we additionally need to return the - // current state and only then start watching from that point. - // - // TODO: In v2 api, we should stop returning the current state - #13969. - allItems := w.store.List() - result := make([]watchCacheEvent, len(allItems)) - for i, item := range allItems { - elem, ok := item.(*storeElement) - if !ok { - return nil, fmt.Errorf("not a storeElement: %v", elem) - } - result[i] = watchCacheEvent{ - Type: watch.Added, - Object: elem.Object, - Key: elem.Key, - ResourceVersion: w.resourceVersion, - } - } - return result, nil - } - if resourceVersion < oldest-1 { - return nil, errors.NewGone(fmt.Sprintf("too old resource version: %d (%d)", resourceVersion, oldest-1)) - } - - // Binary search the smallest index at which resourceVersion is greater than the given one. - f := func(i int) bool { - return w.cache[(w.startIndex+i)%w.capacity].resourceVersion > resourceVersion - } - first := sort.Search(size, f) - result := make([]watchCacheEvent, size-first) - for i := 0; i < size-first; i++ { - result[i] = w.cache[(w.startIndex+first+i)%w.capacity].watchCacheEvent - } - return result, nil -} - -func (w *watchCache) GetAllEventsSince(resourceVersion uint64) ([]watchCacheEvent, error) { - w.RLock() - defer w.RUnlock() - return w.GetAllEventsSinceThreadUnsafe(resourceVersion) -} - -func (w *watchCache) Resync() error { - // Nothing to do - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/types/BUILD b/vendor/k8s.io/kubernetes/pkg/types/BUILD deleted file mode 100644 index 329970fb8f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/types/BUILD +++ /dev/null @@ -1,23 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "namespacedname.go", - "nodename.go", - "uid.go", - "unix_user_id.go", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/BUILD b/vendor/k8s.io/kubernetes/pkg/util/BUILD index 7886876e77..d5f7ae7e40 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/BUILD @@ -4,10 +4,8 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", "go_test", - "cgo_library", ) go_library( @@ -15,13 +13,10 @@ go_library( srcs = [ "doc.go", "template.go", - "trace.go", - "trie.go", "umask.go", "util.go", ], tags = ["automanaged"], - deps = ["//vendor:github.com/golang/glog"], ) go_test( @@ -30,10 +25,82 @@ go_test( "template_test.go", "util_test.go", ], - library = "go_default_library", + library = ":go_default_library", tags = ["automanaged"], deps = [ - "//pkg/util/diff:go_default_library", "//vendor:github.com/stretchr/testify/assert", + "//vendor:k8s.io/apimachinery/pkg/util/diff", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/util/async:all-srcs", + "//pkg/util/bandwidth:all-srcs", + "//pkg/util/chmod:all-srcs", + "//pkg/util/chown:all-srcs", + "//pkg/util/config:all-srcs", + "//pkg/util/configz:all-srcs", + "//pkg/util/crlf:all-srcs", + "//pkg/util/dbus:all-srcs", + "//pkg/util/ebtables:all-srcs", + "//pkg/util/env:all-srcs", + "//pkg/util/errors:all-srcs", + "//pkg/util/exec:all-srcs", + "//pkg/util/flock:all-srcs", + "//pkg/util/framer:all-srcs", + "//pkg/util/goroutinemap:all-srcs", + "//pkg/util/hash:all-srcs", + "//pkg/util/i18n:all-srcs", + "//pkg/util/initsystem:all-srcs", + "//pkg/util/interrupt:all-srcs", + "//pkg/util/intstr:all-srcs", + "//pkg/util/io:all-srcs", + "//pkg/util/ipconfig:all-srcs", + "//pkg/util/iptables:all-srcs", + "//pkg/util/json:all-srcs", + "//pkg/util/keymutex:all-srcs", + "//pkg/util/labels:all-srcs", + "//pkg/util/limitwriter:all-srcs", + "//pkg/util/logs:all-srcs", + "//pkg/util/maps:all-srcs", + "//pkg/util/metrics:all-srcs", + "//pkg/util/mount:all-srcs", + "//pkg/util/net:all-srcs", + "//pkg/util/netsh:all-srcs", + "//pkg/util/node:all-srcs", + "//pkg/util/oom:all-srcs", + "//pkg/util/parsers:all-srcs", + "//pkg/util/procfs:all-srcs", + "//pkg/util/rand:all-srcs", + "//pkg/util/resourcecontainer:all-srcs", + "//pkg/util/rlimit:all-srcs", + "//pkg/util/runtime:all-srcs", + "//pkg/util/selinux:all-srcs", + "//pkg/util/sets:all-srcs", + "//pkg/util/slice:all-srcs", + "//pkg/util/strings:all-srcs", + "//pkg/util/sysctl:all-srcs", + "//pkg/util/system:all-srcs", + "//pkg/util/tail:all-srcs", + "//pkg/util/taints:all-srcs", + "//pkg/util/term:all-srcs", + "//pkg/util/threading:all-srcs", + "//pkg/util/uuid:all-srcs", + "//pkg/util/validation:all-srcs", + "//pkg/util/version:all-srcs", + "//pkg/util/wait:all-srcs", + "//pkg/util/workqueue/prometheus:all-srcs", + "//pkg/util/yaml:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/util/cert/BUILD b/vendor/k8s.io/kubernetes/pkg/util/cert/BUILD deleted file mode 100644 index bb3e7bc336..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/cert/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "cert.go", - "csr.go", - "io.go", - "pem.go", - ], - tags = ["automanaged"], - deps = ["//pkg/apis/certificates:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = ["csr_test.go"], - data = [ - "testdata/dontUseThisKey.pem", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/cert/csr.go b/vendor/k8s.io/kubernetes/pkg/util/cert/csr.go deleted file mode 100644 index 91cc32f62b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/cert/csr.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cert - -import ( - cryptorand "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "errors" - "net" - - "k8s.io/kubernetes/pkg/apis/certificates" -) - -// ParseCSR extracts the CSR from the API object and decodes it. -func ParseCSR(obj *certificates.CertificateSigningRequest) (*x509.CertificateRequest, error) { - // extract PEM from request object - pemBytes := obj.Spec.Request - block, _ := pem.Decode(pemBytes) - if block == nil || block.Type != "CERTIFICATE REQUEST" { - return nil, errors.New("PEM block type must be CERTIFICATE REQUEST") - } - csr, err := x509.ParseCertificateRequest(block.Bytes) - if err != nil { - return nil, err - } - return csr, nil -} - -// MakeCSR generates a PEM-encoded CSR using the supplied private key, subject, and SANs. -// All key types that are implemented via crypto.Signer are supported (This includes *rsa.PrivateKey and *ecdsa.PrivateKey.) -func MakeCSR(privateKey interface{}, subject *pkix.Name, dnsSANs []string, ipSANs []net.IP) (csr []byte, err error) { - // Customize the signature for RSA keys, depending on the key size - var sigType x509.SignatureAlgorithm - if privateKey, ok := privateKey.(*rsa.PrivateKey); ok { - keySize := privateKey.N.BitLen() - switch { - case keySize >= 4096: - sigType = x509.SHA512WithRSA - case keySize >= 3072: - sigType = x509.SHA384WithRSA - default: - sigType = x509.SHA256WithRSA - } - } - - template := &x509.CertificateRequest{ - Subject: *subject, - SignatureAlgorithm: sigType, - DNSNames: dnsSANs, - IPAddresses: ipSANs, - } - - csr, err = x509.CreateCertificateRequest(cryptorand.Reader, template, privateKey) - if err != nil { - return nil, err - } - - csrPemBlock := &pem.Block{ - Type: "CERTIFICATE REQUEST", - Bytes: csr, - } - - return pem.EncodeToMemory(csrPemBlock), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/cert/io.go b/vendor/k8s.io/kubernetes/pkg/util/cert/io.go deleted file mode 100644 index 9a3e1622f3..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/cert/io.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cert - -import ( - "crypto/x509" - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" -) - -// CanReadCertOrKey returns true if the certificate or key files already exists, -// otherwise returns false. -func CanReadCertOrKey(certPath, keyPath string) bool { - if canReadFile(certPath) || canReadFile(keyPath) { - return true - } - - return false -} - -// If the file represented by path exists and -// readable, returns true otherwise returns false. -func canReadFile(path string) bool { - f, err := os.Open(path) - if err != nil { - return false - } - - defer f.Close() - - return true -} - -// WriteCert writes the pem-encoded certificate data to certPath. -// The certificate file will be created with file mode 0644. -// If the certificate file already exists, it will be overwritten. -// The parent directory of the certPath will be created as needed with file mode 0755. -func WriteCert(certPath string, data []byte) error { - if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil { - return err - } - if err := ioutil.WriteFile(certPath, data, os.FileMode(0644)); err != nil { - return err - } - return nil -} - -// WriteKey writes the pem-encoded key data to keyPath. -// The key file will be created with file mode 0600. -// If the key file already exists, it will be overwritten. -// The parent directory of the keyPath will be created as needed with file mode 0755. -func WriteKey(keyPath string, data []byte) error { - if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { - return err - } - if err := ioutil.WriteFile(keyPath, data, os.FileMode(0600)); err != nil { - return err - } - return nil -} - -// NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file. -// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates -func NewPool(filename string) (*x509.CertPool, error) { - certs, err := certsFromFile(filename) - if err != nil { - return nil, err - } - pool := x509.NewCertPool() - for _, cert := range certs { - pool.AddCert(cert) - } - return pool, nil -} - -// certsFromFile returns the x509.Certificates contained in the given PEM-encoded file. -// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates -func certsFromFile(file string) ([]*x509.Certificate, error) { - if len(file) == 0 { - return nil, errors.New("error reading certificates from an empty filename") - } - pemBlock, err := ioutil.ReadFile(file) - if err != nil { - return nil, err - } - certs, err := ParseCertsPEM(pemBlock) - if err != nil { - return nil, fmt.Errorf("error reading %s: %s", file, err) - } - return certs, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/cert/pem.go b/vendor/k8s.io/kubernetes/pkg/util/cert/pem.go deleted file mode 100644 index 59e602d2f1..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/cert/pem.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cert - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" -) - -// EncodePublicKeyPEM returns PEM-endcode public data -func EncodePublicKeyPEM(key *rsa.PublicKey) ([]byte, error) { - der, err := x509.MarshalPKIXPublicKey(key) - if err != nil { - return []byte{}, err - } - block := pem.Block{ - Type: "PUBLIC KEY", - Bytes: der, - } - return pem.EncodeToMemory(&block), nil -} - -// EncodePrivateKeyPEM returns PEM-encoded private key data -func EncodePrivateKeyPEM(key *rsa.PrivateKey) []byte { - block := pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(key), - } - return pem.EncodeToMemory(&block) -} - -// EncodeCertPEM returns PEM-endcoded certificate data -func EncodeCertPEM(cert *x509.Certificate) []byte { - block := pem.Block{ - Type: "CERTIFICATE", - Bytes: cert.Raw, - } - return pem.EncodeToMemory(&block) -} - -// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data. -// Recognizes PEM blocks for "EC PRIVATE KEY" and "RSA PRIVATE KEY" -func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) { - for { - var privateKeyPemBlock *pem.Block - privateKeyPemBlock, keyData = pem.Decode(keyData) - if privateKeyPemBlock == nil { - // we read all the PEM blocks and didn't recognize one - return nil, fmt.Errorf("no private key PEM block found") - } - - switch privateKeyPemBlock.Type { - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(privateKeyPemBlock.Bytes) - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes) - } - } -} - -// ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array -// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates -func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { - ok := false - certs := []*x509.Certificate{} - for len(pemCerts) > 0 { - var block *pem.Block - block, pemCerts = pem.Decode(pemCerts) - if block == nil { - break - } - // Only use PEM "CERTIFICATE" blocks without extra headers - if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { - continue - } - - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return certs, err - } - - certs = append(certs, cert) - ok = true - } - - if !ok { - return certs, errors.New("could not read any certificates") - } - return certs, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/chmod/BUILD b/vendor/k8s.io/kubernetes/pkg/util/chmod/BUILD index 8a57c2b2c3..68d87cb5bf 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/chmod/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/chmod/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -18,3 +15,16 @@ go_library( ], tags = ["automanaged"], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/util/chown/BUILD b/vendor/k8s.io/kubernetes/pkg/util/chown/BUILD index ee8ae7e344..08de6be220 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/chown/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/chown/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -18,3 +15,16 @@ go_library( ], tags = ["automanaged"], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/util/clock/BUILD b/vendor/k8s.io/kubernetes/pkg/util/clock/BUILD deleted file mode 100644 index c2a8360654..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/clock/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["clock.go"], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = ["clock_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/clock/clock.go b/vendor/k8s.io/kubernetes/pkg/util/clock/clock.go deleted file mode 100644 index 2ea1b53c1a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/clock/clock.go +++ /dev/null @@ -1,218 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clock - -import ( - "sync" - "time" -) - -// Clock allows for injecting fake or real clocks into code that -// needs to do arbitrary things based on time. -type Clock interface { - Now() time.Time - Since(time.Time) time.Duration - After(d time.Duration) <-chan time.Time - Sleep(d time.Duration) - Tick(d time.Duration) <-chan time.Time -} - -var ( - _ = Clock(RealClock{}) - _ = Clock(&FakeClock{}) - _ = Clock(&IntervalClock{}) -) - -// RealClock really calls time.Now() -type RealClock struct{} - -// Now returns the current time. -func (RealClock) Now() time.Time { - return time.Now() -} - -// Since returns time since the specified timestamp. -func (RealClock) Since(ts time.Time) time.Duration { - return time.Since(ts) -} - -// Same as time.After(d). -func (RealClock) After(d time.Duration) <-chan time.Time { - return time.After(d) -} - -func (RealClock) Tick(d time.Duration) <-chan time.Time { - return time.Tick(d) -} - -func (RealClock) Sleep(d time.Duration) { - time.Sleep(d) -} - -// FakeClock implements Clock, but returns an arbitrary time. -type FakeClock struct { - lock sync.RWMutex - time time.Time - - // waiters are waiting for the fake time to pass their specified time - waiters []fakeClockWaiter -} - -type fakeClockWaiter struct { - targetTime time.Time - stepInterval time.Duration - skipIfBlocked bool - destChan chan<- time.Time -} - -func NewFakeClock(t time.Time) *FakeClock { - return &FakeClock{ - time: t, - } -} - -// Now returns f's time. -func (f *FakeClock) Now() time.Time { - f.lock.RLock() - defer f.lock.RUnlock() - return f.time -} - -// Since returns time since the time in f. -func (f *FakeClock) Since(ts time.Time) time.Duration { - f.lock.RLock() - defer f.lock.RUnlock() - return f.time.Sub(ts) -} - -// Fake version of time.After(d). -func (f *FakeClock) After(d time.Duration) <-chan time.Time { - f.lock.Lock() - defer f.lock.Unlock() - stopTime := f.time.Add(d) - ch := make(chan time.Time, 1) // Don't block! - f.waiters = append(f.waiters, fakeClockWaiter{ - targetTime: stopTime, - destChan: ch, - }) - return ch -} - -func (f *FakeClock) Tick(d time.Duration) <-chan time.Time { - f.lock.Lock() - defer f.lock.Unlock() - tickTime := f.time.Add(d) - ch := make(chan time.Time, 1) // hold one tick - f.waiters = append(f.waiters, fakeClockWaiter{ - targetTime: tickTime, - stepInterval: d, - skipIfBlocked: true, - destChan: ch, - }) - - return ch -} - -// Move clock by Duration, notify anyone that's called After or Tick -func (f *FakeClock) Step(d time.Duration) { - f.lock.Lock() - defer f.lock.Unlock() - f.setTimeLocked(f.time.Add(d)) -} - -// Sets the time. -func (f *FakeClock) SetTime(t time.Time) { - f.lock.Lock() - defer f.lock.Unlock() - f.setTimeLocked(t) -} - -// Actually changes the time and checks any waiters. f must be write-locked. -func (f *FakeClock) setTimeLocked(t time.Time) { - f.time = t - newWaiters := make([]fakeClockWaiter, 0, len(f.waiters)) - for i := range f.waiters { - w := &f.waiters[i] - if !w.targetTime.After(t) { - - if w.skipIfBlocked { - select { - case w.destChan <- t: - default: - } - } else { - w.destChan <- t - } - - if w.stepInterval > 0 { - for !w.targetTime.After(t) { - w.targetTime = w.targetTime.Add(w.stepInterval) - } - newWaiters = append(newWaiters, *w) - } - - } else { - newWaiters = append(newWaiters, f.waiters[i]) - } - } - f.waiters = newWaiters -} - -// Returns true if After has been called on f but not yet satisfied (so you can -// write race-free tests). -func (f *FakeClock) HasWaiters() bool { - f.lock.RLock() - defer f.lock.RUnlock() - return len(f.waiters) > 0 -} - -func (f *FakeClock) Sleep(d time.Duration) { - f.Step(d) -} - -// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration -type IntervalClock struct { - Time time.Time - Duration time.Duration -} - -// Now returns i's time. -func (i *IntervalClock) Now() time.Time { - i.Time = i.Time.Add(i.Duration) - return i.Time -} - -// Since returns time since the time in i. -func (i *IntervalClock) Since(ts time.Time) time.Duration { - return i.Time.Sub(ts) -} - -// Unimplemented, will panic. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) After(d time.Duration) <-chan time.Time { - panic("IntervalClock doesn't implement After") -} - -// Unimplemented, will panic. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) Tick(d time.Duration) <-chan time.Time { - panic("IntervalClock doesn't implement Tick") -} - -func (*IntervalClock) Sleep(d time.Duration) { - panic("IntervalClock doesn't implement Sleep") -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/config/BUILD b/vendor/k8s.io/kubernetes/pkg/util/config/BUILD deleted file mode 100644 index 30d4214b41..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/config/BUILD +++ /dev/null @@ -1,40 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "config.go", - "configuration_map.go", - "doc.go", - "feature_gate.go", - "namedcertkey_flag.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/util/wait:go_default_library", - "//vendor:github.com/golang/glog", - "//vendor:github.com/spf13/pflag", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "config_test.go", - "feature_gate_test.go", - "namedcertkey_flag_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = ["//vendor:github.com/spf13/pflag"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/config/config.go b/vendor/k8s.io/kubernetes/pkg/util/config/config.go deleted file mode 100644 index 30defee87d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/config/config.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "sync" - - "k8s.io/kubernetes/pkg/util/wait" -) - -type Merger interface { - // Invoked when a change from a source is received. May also function as an incremental - // merger if you wish to consume changes incrementally. Must be reentrant when more than - // one source is defined. - Merge(source string, update interface{}) error -} - -// MergeFunc implements the Merger interface -type MergeFunc func(source string, update interface{}) error - -func (f MergeFunc) Merge(source string, update interface{}) error { - return f(source, update) -} - -// Mux is a class for merging configuration from multiple sources. Changes are -// pushed via channels and sent to the merge function. -type Mux struct { - // Invoked when an update is sent to a source. - merger Merger - - // Sources and their lock. - sourceLock sync.RWMutex - // Maps source names to channels - sources map[string]chan interface{} -} - -// NewMux creates a new mux that can merge changes from multiple sources. -func NewMux(merger Merger) *Mux { - mux := &Mux{ - sources: make(map[string]chan interface{}), - merger: merger, - } - return mux -} - -// Channel returns a channel where a configuration source -// can send updates of new configurations. Multiple calls with the same -// source will return the same channel. This allows change and state based sources -// to use the same channel. Different source names however will be treated as a -// union. -func (m *Mux) Channel(source string) chan interface{} { - if len(source) == 0 { - panic("Channel given an empty name") - } - m.sourceLock.Lock() - defer m.sourceLock.Unlock() - channel, exists := m.sources[source] - if exists { - return channel - } - newChannel := make(chan interface{}) - m.sources[source] = newChannel - go wait.Until(func() { m.listen(source, newChannel) }, 0, wait.NeverStop) - return newChannel -} - -func (m *Mux) listen(source string, listenChannel <-chan interface{}) { - for update := range listenChannel { - m.merger.Merge(source, update) - } -} - -// Accessor is an interface for retrieving the current merge state. -type Accessor interface { - // MergedState returns a representation of the current merge state. - // Must be reentrant when more than one source is defined. - MergedState() interface{} -} - -// AccessorFunc implements the Accessor interface. -type AccessorFunc func() interface{} - -func (f AccessorFunc) MergedState() interface{} { - return f() -} - -type Listener interface { - // OnUpdate is invoked when a change is made to an object. - OnUpdate(instance interface{}) -} - -// ListenerFunc receives a representation of the change or object. -type ListenerFunc func(instance interface{}) - -func (f ListenerFunc) OnUpdate(instance interface{}) { - f(instance) -} - -type Broadcaster struct { - // Listeners for changes and their lock. - listenerLock sync.RWMutex - listeners []Listener -} - -// NewBroadcaster registers a set of listeners that support the Listener interface -// and notifies them all on changes. -func NewBroadcaster() *Broadcaster { - return &Broadcaster{} -} - -// Add registers listener to receive updates of changes. -func (b *Broadcaster) Add(listener Listener) { - b.listenerLock.Lock() - defer b.listenerLock.Unlock() - b.listeners = append(b.listeners, listener) -} - -// Notify notifies all listeners. -func (b *Broadcaster) Notify(instance interface{}) { - b.listenerLock.RLock() - listeners := b.listeners - b.listenerLock.RUnlock() - for _, listener := range listeners { - listener.OnUpdate(instance) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/config/configuration_map.go b/vendor/k8s.io/kubernetes/pkg/util/config/configuration_map.go deleted file mode 100644 index 0acbde56f8..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/config/configuration_map.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "fmt" - "sort" - "strings" -) - -type ConfigurationMap map[string]string - -func (m *ConfigurationMap) String() string { - pairs := []string{} - for k, v := range *m { - pairs = append(pairs, fmt.Sprintf("%s=%s", k, v)) - } - sort.Strings(pairs) - return strings.Join(pairs, ",") -} - -func (m *ConfigurationMap) Set(value string) error { - for _, s := range strings.Split(value, ",") { - if len(s) == 0 { - continue - } - arr := strings.SplitN(s, "=", 2) - if len(arr) == 2 { - (*m)[strings.TrimSpace(arr[0])] = strings.TrimSpace(arr[1]) - } else { - (*m)[strings.TrimSpace(arr[0])] = "" - } - } - return nil -} - -func (*ConfigurationMap) Type() string { - return "mapStringString" -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/config/doc.go b/vendor/k8s.io/kubernetes/pkg/util/config/doc.go deleted file mode 100644 index 5dbb37d448..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/config/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package config provides utility objects for decoupling sources of configuration and the -// actual configuration state. Consumers must implement the Merger interface to unify -// the sources of change into an object. -package config diff --git a/vendor/k8s.io/kubernetes/pkg/util/config/feature_gate.go b/vendor/k8s.io/kubernetes/pkg/util/config/feature_gate.go deleted file mode 100644 index ff90f2fb28..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/config/feature_gate.go +++ /dev/null @@ -1,260 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "fmt" - "sort" - "strconv" - "strings" - - "github.com/golang/glog" - "github.com/spf13/pflag" -) - -const ( - flagName = "feature-gates" - - // All known feature keys - // To add a new feature, define a key for it below and add - // a featureSpec entry to knownFeatures. - - // allAlphaGate is a global toggle for alpha features. Per-feature key - // values override the default set by allAlphaGate. Examples: - // AllAlpha=false,NewFeature=true will result in newFeature=true - // AllAlpha=true,NewFeature=false will result in newFeature=false - allAlphaGate = "AllAlpha" - externalTrafficLocalOnly = "AllowExtTrafficLocalEndpoints" - appArmor = "AppArmor" - dynamicKubeletConfig = "DynamicKubeletConfig" - dynamicVolumeProvisioning = "DynamicVolumeProvisioning" - streamingProxyRedirects = "StreamingProxyRedirects" - - // experimentalHostUserNamespaceDefaulting Default userns=host for containers - // that are using other host namespaces, host mounts, the pod contains a privileged container, - // or specific non-namespaced capabilities - // (MKNOD, SYS_MODULE, SYS_TIME). This should only be enabled if user namespace remapping is enabled - // in the docker daemon. - experimentalHostUserNamespaceDefaultingGate = "ExperimentalHostUserNamespaceDefaulting" -) - -var ( - // Default values for recorded features. Every new feature gate should be - // represented here. - knownFeatures = map[string]featureSpec{ - allAlphaGate: {false, alpha}, - externalTrafficLocalOnly: {true, beta}, - appArmor: {true, beta}, - dynamicKubeletConfig: {false, alpha}, - dynamicVolumeProvisioning: {true, alpha}, - streamingProxyRedirects: {false, alpha}, - experimentalHostUserNamespaceDefaultingGate: {false, alpha}, - } - - // Special handling for a few gates. - specialFeatures = map[string]func(f *featureGate, val bool){ - allAlphaGate: setUnsetAlphaGates, - } - - // DefaultFeatureGate is a shared global FeatureGate. - DefaultFeatureGate = &featureGate{ - known: knownFeatures, - special: specialFeatures, - } -) - -type featureSpec struct { - enabled bool - prerelease prerelease -} - -type prerelease string - -const ( - // Values for prerelease. - alpha = prerelease("ALPHA") - beta = prerelease("BETA") - ga = prerelease("") -) - -// FeatureGate parses and stores flag gates for known features from -// a string like feature1=true,feature2=false,... -type FeatureGate interface { - AddFlag(fs *pflag.FlagSet) - Set(value string) error - KnownFeatures() []string - - // Every feature gate should add method here following this template: - // - // // owner: @username - // // alpha: v1.4 - // MyFeature() bool - - // owner: @timstclair - // beta: v1.4 - AppArmor() bool - - // owner: @girishkalele - // alpha: v1.4 - ExternalTrafficLocalOnly() bool - - // owner: @saad-ali - // alpha: v1.3 - DynamicVolumeProvisioning() bool - - // owner: @mtaufen - // alpha: v1.4 - DynamicKubeletConfig() bool - - // owner: timstclair - // alpha: v1.5 - StreamingProxyRedirects() bool - - // owner: @pweil- - // alpha: v1.5 - ExperimentalHostUserNamespaceDefaulting() bool -} - -// featureGate implements FeatureGate as well as pflag.Value for flag parsing. -type featureGate struct { - known map[string]featureSpec - special map[string]func(*featureGate, bool) - enabled map[string]bool -} - -func setUnsetAlphaGates(f *featureGate, val bool) { - for k, v := range f.known { - if v.prerelease == alpha { - if _, found := f.enabled[k]; !found { - f.enabled[k] = val - } - } - } -} - -// Set, String, and Type implement pflag.Value - -// Set Parses a string of the form // "key1=value1,key2=value2,..." into a -// map[string]bool of known keys or returns an error. -func (f *featureGate) Set(value string) error { - f.enabled = make(map[string]bool) - for _, s := range strings.Split(value, ",") { - if len(s) == 0 { - continue - } - arr := strings.SplitN(s, "=", 2) - k := strings.TrimSpace(arr[0]) - _, ok := f.known[k] - if !ok { - return fmt.Errorf("unrecognized key: %s", k) - } - if len(arr) != 2 { - return fmt.Errorf("missing bool value for %s", k) - } - v := strings.TrimSpace(arr[1]) - boolValue, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("invalid value of %s: %s, err: %v", k, v, err) - } - f.enabled[k] = boolValue - - // Handle "special" features like "all alpha gates" - if fn, found := f.special[k]; found { - fn(f, boolValue) - } - } - - glog.Infof("feature gates: %v", f.enabled) - return nil -} - -func (f *featureGate) String() string { - pairs := []string{} - for k, v := range f.enabled { - pairs = append(pairs, fmt.Sprintf("%s=%t", k, v)) - } - sort.Strings(pairs) - return strings.Join(pairs, ",") -} - -func (f *featureGate) Type() string { - return "mapStringBool" -} - -// ExternalTrafficLocalOnly returns value for AllowExtTrafficLocalEndpoints -func (f *featureGate) ExternalTrafficLocalOnly() bool { - return f.lookup(externalTrafficLocalOnly) -} - -// AppArmor returns the value for the AppArmor feature gate. -func (f *featureGate) AppArmor() bool { - return f.lookup(appArmor) -} - -// DynamicKubeletConfig returns value for dynamicKubeletConfig -func (f *featureGate) DynamicKubeletConfig() bool { - return f.lookup(dynamicKubeletConfig) -} - -// DynamicVolumeProvisioning returns value for dynamicVolumeProvisioning -func (f *featureGate) DynamicVolumeProvisioning() bool { - return f.lookup(dynamicVolumeProvisioning) -} - -// StreamingProxyRedirects controls whether the apiserver should intercept (and follow) -// redirects from the backend (Kubelet) for streaming requests (exec/attach/port-forward). -func (f *featureGate) StreamingProxyRedirects() bool { - return f.lookup(streamingProxyRedirects) -} - -// ExperimentalHostUserNamespaceDefaulting returns value for experimentalHostUserNamespaceDefaulting -func (f *featureGate) ExperimentalHostUserNamespaceDefaulting() bool { - return f.lookup(experimentalHostUserNamespaceDefaultingGate) -} - -func (f *featureGate) lookup(key string) bool { - defaultValue := f.known[key].enabled - if f.enabled != nil { - if v, ok := f.enabled[key]; ok { - return v - } - } - return defaultValue - -} - -// AddFlag adds a flag for setting global feature gates to the specified FlagSet. -func (f *featureGate) AddFlag(fs *pflag.FlagSet) { - known := f.KnownFeatures() - fs.Var(f, flagName, ""+ - "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ - "Options are:\n"+strings.Join(known, "\n")) -} - -// Returns a string describing the FeatureGate's known features. -func (f *featureGate) KnownFeatures() []string { - var known []string - for k, v := range f.known { - pre := "" - if v.prerelease != ga { - pre = fmt.Sprintf("%s - ", v.prerelease) - } - known = append(known, fmt.Sprintf("%s=true|false (%sdefault=%t)", k, pre, v.enabled)) - } - sort.Strings(known) - return known -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/config/namedcertkey_flag.go b/vendor/k8s.io/kubernetes/pkg/util/config/namedcertkey_flag.go deleted file mode 100644 index 39f20f681f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/config/namedcertkey_flag.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "errors" - "flag" - "strings" -) - -// NamedCertKey is a flag value parsing "certfile,keyfile" and "certfile,keyfile:name,name,name". -type NamedCertKey struct { - Names []string - CertFile, KeyFile string -} - -var _ flag.Value = &NamedCertKey{} - -func (nkc *NamedCertKey) String() string { - s := nkc.CertFile + "," + nkc.KeyFile - if len(nkc.Names) > 0 { - s = s + ":" + strings.Join(nkc.Names, ",") - } - return s -} - -func (nkc *NamedCertKey) Set(value string) error { - cs := strings.SplitN(value, ":", 2) - var keycert string - if len(cs) == 2 { - var names string - keycert, names = strings.TrimSpace(cs[0]), strings.TrimSpace(cs[1]) - if names == "" { - return errors.New("empty names list is not allowed") - } - nkc.Names = nil - for _, name := range strings.Split(names, ",") { - nkc.Names = append(nkc.Names, strings.TrimSpace(name)) - } - } else { - nkc.Names = nil - keycert = strings.TrimSpace(cs[0]) - } - cs = strings.Split(keycert, ",") - if len(cs) != 2 { - return errors.New("expected comma separated certificate and key file paths") - } - nkc.CertFile = strings.TrimSpace(cs[0]) - nkc.KeyFile = strings.TrimSpace(cs[1]) - return nil -} - -func (*NamedCertKey) Type() string { - return "namedCertKey" -} - -// NamedCertKeyArray is a flag value parsing NamedCertKeys, each passed with its own -// flag instance (in contrast to comma separated slices). -type NamedCertKeyArray struct { - value *[]NamedCertKey - changed bool -} - -var _ flag.Value = &NamedCertKey{} - -// NewNamedKeyCertArray creates a new NamedCertKeyArray with the internal value -// pointing to p. -func NewNamedCertKeyArray(p *[]NamedCertKey) *NamedCertKeyArray { - return &NamedCertKeyArray{ - value: p, - } -} - -func (a *NamedCertKeyArray) Set(val string) error { - nkc := NamedCertKey{} - err := nkc.Set(val) - if err != nil { - return err - } - if !a.changed { - *a.value = []NamedCertKey{nkc} - a.changed = true - } else { - *a.value = append(*a.value, nkc) - } - return nil -} - -func (a *NamedCertKeyArray) Type() string { - return "namedCertKey" -} - -func (a *NamedCertKeyArray) String() string { - nkcs := make([]string, 0, len(*a.value)) - for i := range *a.value { - nkcs = append(nkcs, (*a.value)[i].String()) - } - return "[" + strings.Join(nkcs, ";") + "]" -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/diff/BUILD b/vendor/k8s.io/kubernetes/pkg/util/diff/BUILD deleted file mode 100644 index f55005e692..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/diff/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["diff.go"], - tags = ["automanaged"], - deps = [ - "//pkg/util/validation/field:go_default_library", - "//vendor:github.com/davecgh/go-spew/spew", - ], -) - -go_test( - name = "go_default_test", - srcs = ["diff_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/errors/BUILD b/vendor/k8s.io/kubernetes/pkg/util/errors/BUILD deleted file mode 100644 index 9696926208..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/errors/BUILD +++ /dev/null @@ -1,28 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "errors.go", - ], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = ["errors_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/exec/BUILD b/vendor/k8s.io/kubernetes/pkg/util/exec/BUILD index 93cfd55a24..32f2b4ee97 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/exec/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/exec/BUILD @@ -4,10 +4,8 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", "go_test", - "cgo_library", ) go_library( @@ -23,7 +21,19 @@ go_library( go_test( name = "go_default_test", srcs = ["exec_test.go"], - library = "go_default_library", + library = ":go_default_library", + tags = ["automanaged"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], tags = ["automanaged"], - deps = [], ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/exec/exec.go b/vendor/k8s.io/kubernetes/pkg/util/exec/exec.go index 1aeba036f4..327ddf5bce 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/exec/exec.go +++ b/vendor/k8s.io/kubernetes/pkg/util/exec/exec.go @@ -20,6 +20,7 @@ import ( "io" osexec "os/exec" "syscall" + "time" ) // ErrExecutableNotFound is returned if the executable is not found. @@ -48,6 +49,11 @@ type Cmd interface { SetDir(dir string) SetStdin(in io.Reader) SetStdout(out io.Writer) + // Stops the command by sending SIGTERM. It is not guaranteed the + // process will stop before this function returns. If the process is not + // responding, an internal timer function will send a SIGKILL to force + // terminate after 10 seconds. + Stop() } // ExitError is an interface that presents an API similar to os.ProcessState, which is @@ -110,6 +116,21 @@ func (cmd *cmdWrapper) Output() ([]byte, error) { return out, nil } +// Stop is part of the Cmd interface. +func (cmd *cmdWrapper) Stop() { + c := (*osexec.Cmd)(cmd) + if c.ProcessState.Exited() { + return + } + c.Process.Signal(syscall.SIGTERM) + time.AfterFunc(10*time.Second, func() { + if c.ProcessState.Exited() { + return + } + c.Process.Signal(syscall.SIGKILL) + }) +} + func handleError(err error) error { if ee, ok := err.(*osexec.ExitError); ok { // Force a compile fail if exitErrorWrapper can't convert to ExitError. diff --git a/vendor/k8s.io/kubernetes/pkg/util/exec/fake_exec.go b/vendor/k8s.io/kubernetes/pkg/util/exec/fake_exec.go index bd26081968..b87265099a 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/exec/fake_exec.go +++ b/vendor/k8s.io/kubernetes/pkg/util/exec/fake_exec.go @@ -90,6 +90,10 @@ func (fake *FakeCmd) Output() ([]byte, error) { return nil, fmt.Errorf("unimplemented") } +func (fake *FakeCmd) Stop() { + // no-op +} + // A simple fake ExitError type. type FakeExitError struct { Status int diff --git a/vendor/k8s.io/kubernetes/pkg/util/flag/BUILD b/vendor/k8s.io/kubernetes/pkg/util/flag/BUILD deleted file mode 100644 index c8b633a86c..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/flag/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "flags.go", - "string_flag.go", - "tristate.go", - ], - tags = ["automanaged"], - deps = [ - "//vendor:github.com/golang/glog", - "//vendor:github.com/spf13/pflag", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/flag/flags.go b/vendor/k8s.io/kubernetes/pkg/util/flag/flags.go deleted file mode 100644 index 24e961f27c..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/flag/flags.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flag - -import ( - goflag "flag" - "strings" - - "github.com/golang/glog" - "github.com/spf13/pflag" -) - -// WordSepNormalizeFunc changes all flags that contain "_" separators -func WordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - if strings.Contains(name, "_") { - return pflag.NormalizedName(strings.Replace(name, "_", "-", -1)) - } - return pflag.NormalizedName(name) -} - -// WarnWordSepNormalizeFunc changes and warns for flags that contain "_" separators -func WarnWordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - if strings.Contains(name, "_") { - nname := strings.Replace(name, "_", "-", -1) - glog.Warningf("%s is DEPRECATED and will be removed in a future version. Use %s instead.", name, nname) - - return pflag.NormalizedName(nname) - } - return pflag.NormalizedName(name) -} - -// InitFlags normalizes and parses the command line flags -func InitFlags() { - pflag.CommandLine.SetNormalizeFunc(WordSepNormalizeFunc) - pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) - pflag.Parse() -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/flag/string_flag.go b/vendor/k8s.io/kubernetes/pkg/util/flag/string_flag.go deleted file mode 100644 index 331bdb66e2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/flag/string_flag.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flag - -// StringFlag is a string flag compatible with flags and pflags that keeps track of whether it had a value supplied or not. -type StringFlag struct { - // If Set has been invoked this value is true - provided bool - // The exact value provided on the flag - value string -} - -func NewStringFlag(defaultVal string) StringFlag { - return StringFlag{value: defaultVal} -} - -func (f *StringFlag) Default(value string) { - f.value = value -} - -func (f StringFlag) String() string { - return f.value -} - -func (f StringFlag) Value() string { - return f.value -} - -func (f *StringFlag) Set(value string) error { - f.value = value - f.provided = true - - return nil -} - -func (f StringFlag) Provided() bool { - return f.provided -} - -func (f *StringFlag) Type() string { - return "string" -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/flag/tristate.go b/vendor/k8s.io/kubernetes/pkg/util/flag/tristate.go deleted file mode 100644 index cf16376bf9..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/flag/tristate.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flag - -import ( - "fmt" - "strconv" -) - -// Tristate is a flag compatible with flags and pflags that -// keeps track of whether it had a value supplied or not. -type Tristate int - -const ( - Unset Tristate = iota // 0 - True - False -) - -func (f *Tristate) Default(value bool) { - *f = triFromBool(value) -} - -func (f Tristate) String() string { - b := boolFromTri(f) - return fmt.Sprintf("%t", b) -} - -func (f Tristate) Value() bool { - b := boolFromTri(f) - return b -} - -func (f *Tristate) Set(value string) error { - boolVal, err := strconv.ParseBool(value) - if err != nil { - return err - } - - *f = triFromBool(boolVal) - return nil -} - -func (f Tristate) Provided() bool { - if f != Unset { - return true - } - return false -} - -func (f *Tristate) Type() string { - return "tristate" -} - -func boolFromTri(t Tristate) bool { - if t == True { - return true - } else { - return false - } -} - -func triFromBool(b bool) Tristate { - if b { - return True - } else { - return False - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/BUILD b/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/BUILD deleted file mode 100644 index 433cd4350e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/BUILD +++ /dev/null @@ -1,36 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "backoff.go", - "throttle.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/util/clock:go_default_library", - "//pkg/util/integer:go_default_library", - "//vendor:github.com/juju/ratelimit", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "backoff_test.go", - "throttle_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = ["//pkg/util/clock:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/backoff.go b/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/backoff.go deleted file mode 100644 index 2d91cc5e02..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/backoff.go +++ /dev/null @@ -1,149 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flowcontrol - -import ( - "sync" - "time" - - "k8s.io/kubernetes/pkg/util/clock" - "k8s.io/kubernetes/pkg/util/integer" -) - -type backoffEntry struct { - backoff time.Duration - lastUpdate time.Time -} - -type Backoff struct { - sync.Mutex - Clock clock.Clock - defaultDuration time.Duration - maxDuration time.Duration - perItemBackoff map[string]*backoffEntry -} - -func NewFakeBackOff(initial, max time.Duration, tc *clock.FakeClock) *Backoff { - return &Backoff{ - perItemBackoff: map[string]*backoffEntry{}, - Clock: tc, - defaultDuration: initial, - maxDuration: max, - } -} - -func NewBackOff(initial, max time.Duration) *Backoff { - return &Backoff{ - perItemBackoff: map[string]*backoffEntry{}, - Clock: clock.RealClock{}, - defaultDuration: initial, - maxDuration: max, - } -} - -// Get the current backoff Duration -func (p *Backoff) Get(id string) time.Duration { - p.Lock() - defer p.Unlock() - var delay time.Duration - entry, ok := p.perItemBackoff[id] - if ok { - delay = entry.backoff - } - return delay -} - -// move backoff to the next mark, capping at maxDuration -func (p *Backoff) Next(id string, eventTime time.Time) { - p.Lock() - defer p.Unlock() - entry, ok := p.perItemBackoff[id] - if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { - entry = p.initEntryUnsafe(id) - } else { - delay := entry.backoff * 2 // exponential - entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration))) - } - entry.lastUpdate = p.Clock.Now() -} - -// Reset forces clearing of all backoff data for a given key. -func (p *Backoff) Reset(id string) { - p.Lock() - defer p.Unlock() - delete(p.perItemBackoff, id) -} - -// Returns True if the elapsed time since eventTime is smaller than the current backoff window -func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool { - p.Lock() - defer p.Unlock() - entry, ok := p.perItemBackoff[id] - if !ok { - return false - } - if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { - return false - } - return p.Clock.Now().Sub(eventTime) < entry.backoff -} - -// Returns True if time since lastupdate is less than the current backoff window. -func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool { - p.Lock() - defer p.Unlock() - entry, ok := p.perItemBackoff[id] - if !ok { - return false - } - if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { - return false - } - return eventTime.Sub(entry.lastUpdate) < entry.backoff -} - -// Garbage collect records that have aged past maxDuration. Backoff users are expected -// to invoke this periodically. -func (p *Backoff) GC() { - p.Lock() - defer p.Unlock() - now := p.Clock.Now() - for id, entry := range p.perItemBackoff { - if now.Sub(entry.lastUpdate) > p.maxDuration*2 { - // GC when entry has not been updated for 2*maxDuration - delete(p.perItemBackoff, id) - } - } -} - -func (p *Backoff) DeleteEntry(id string) { - p.Lock() - defer p.Unlock() - delete(p.perItemBackoff, id) -} - -// Take a lock on *Backoff, before calling initEntryUnsafe -func (p *Backoff) initEntryUnsafe(id string) *backoffEntry { - entry := &backoffEntry{backoff: p.defaultDuration} - p.perItemBackoff[id] = entry - return entry -} - -// After 2*maxDuration we restart the backoff factor to the beginning -func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool { - return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/framer/BUILD b/vendor/k8s.io/kubernetes/pkg/util/framer/BUILD deleted file mode 100644 index 8aebf20ea5..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/framer/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["framer.go"], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = ["framer_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/hash/BUILD b/vendor/k8s.io/kubernetes/pkg/util/hash/BUILD deleted file mode 100644 index 3102e4f7e2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/hash/BUILD +++ /dev/null @@ -1,26 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["hash.go"], - tags = ["automanaged"], - deps = ["//vendor:github.com/davecgh/go-spew/spew"], -) - -go_test( - name = "go_default_test", - srcs = ["hash_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = ["//vendor:github.com/davecgh/go-spew/spew"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go b/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go deleted file mode 100644 index 803f066a44..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package hash - -import ( - "hash" - - "github.com/davecgh/go-spew/spew" -) - -// DeepHashObject writes specified object to hash using the spew library -// which follows pointers and prints actual values of the nested objects -// ensuring the hash does not change when a pointer changes. -func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) { - hasher.Reset() - printer := spew.ConfigState{ - Indent: " ", - SortKeys: true, - DisableMethods: true, - SpewKeys: true, - } - printer.Fprintf(hasher, "%#v", objectToWrite) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/homedir/BUILD b/vendor/k8s.io/kubernetes/pkg/util/homedir/BUILD deleted file mode 100644 index 7469791c3b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/homedir/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["homedir.go"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/integer/BUILD b/vendor/k8s.io/kubernetes/pkg/util/integer/BUILD deleted file mode 100644 index 94a36b1f1f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/integer/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["integer.go"], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = ["integer_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/intstr/BUILD b/vendor/k8s.io/kubernetes/pkg/util/intstr/BUILD deleted file mode 100644 index 600ecb08e3..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/intstr/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "generated.pb.go", - "intstr.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/genericapiserver/openapi/common:go_default_library", - "//vendor:github.com/go-openapi/spec", - "//vendor:github.com/gogo/protobuf/proto", - "//vendor:github.com/google/gofuzz", - ], -) - -go_test( - name = "go_default_test", - srcs = ["intstr_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = ["//vendor:github.com/ghodss/yaml"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.pb.go deleted file mode 100644 index 4d60cfc7c6..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.pb.go +++ /dev/null @@ -1,373 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/util/intstr/generated.proto -// DO NOT EDIT! - -/* - Package intstr is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/util/intstr/generated.proto - - It has these top-level messages: - IntOrString -*/ -package intstr - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 - -func (m *IntOrString) Reset() { *m = IntOrString{} } -func (*IntOrString) ProtoMessage() {} -func (*IntOrString) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func init() { - proto.RegisterType((*IntOrString)(nil), "k8s.io.kubernetes.pkg.util.intstr.IntOrString") -} -func (m *IntOrString) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *IntOrString) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Type)) - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.IntVal)) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.StrVal))) - i += copy(data[i:], m.StrVal) - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *IntOrString) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Type)) - n += 1 + sovGenerated(uint64(m.IntVal)) - l = len(m.StrVal) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *IntOrString) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IntOrString: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IntOrString: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Type |= (Type(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IntVal", wireType) - } - m.IntVal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.IntVal |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StrVal", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StrVal = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -var fileDescriptorGenerated = []byte{ - // 269 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x44, 0x8f, 0x31, 0x4e, 0xc3, 0x30, - 0x18, 0x85, 0x6d, 0x5a, 0x2a, 0x08, 0x12, 0x43, 0xc4, 0x50, 0x31, 0x38, 0x81, 0x01, 0x79, 0xc1, - 0x16, 0x1b, 0x62, 0xcc, 0xd6, 0x09, 0x29, 0x45, 0x0c, 0x6c, 0x0d, 0x18, 0x63, 0xa5, 0xd8, 0x96, - 0xf3, 0x67, 0xe8, 0xd6, 0x23, 0xc0, 0xc6, 0xc8, 0x71, 0x32, 0x76, 0x64, 0x40, 0x15, 0x31, 0xb7, - 0x60, 0x42, 0x71, 0x22, 0x75, 0xb2, 0xdf, 0x7b, 0xdf, 0x67, 0xc9, 0xd1, 0x55, 0x79, 0x5d, 0x31, - 0x65, 0x78, 0x59, 0x17, 0xc2, 0x69, 0x01, 0xa2, 0xe2, 0xb6, 0x94, 0xbc, 0x06, 0xb5, 0xe4, 0x4a, - 0x43, 0x05, 0x8e, 0x4b, 0xa1, 0x85, 0x5b, 0x80, 0x78, 0x62, 0xd6, 0x19, 0x30, 0xf1, 0x59, 0xaf, - 0xb0, 0x9d, 0xc2, 0x6c, 0x29, 0x59, 0xa7, 0xb0, 0x5e, 0x39, 0xbd, 0x94, 0x0a, 0x5e, 0xea, 0x82, - 0x3d, 0x9a, 0x57, 0x2e, 0x8d, 0x34, 0x3c, 0x98, 0x45, 0xfd, 0x1c, 0x52, 0x08, 0xe1, 0xd6, 0xbf, - 0x78, 0xfe, 0x8e, 0xa3, 0xa3, 0x99, 0x86, 0x5b, 0x37, 0x07, 0xa7, 0xb4, 0x8c, 0x69, 0x34, 0x86, - 0x95, 0x15, 0x53, 0x9c, 0x62, 0x3a, 0xca, 0x4e, 0x9a, 0x6d, 0x82, 0xfc, 0x36, 0x19, 0xdf, 0xad, - 0xac, 0xf8, 0x1b, 0xce, 0x3c, 0x10, 0xf1, 0x45, 0x34, 0x51, 0x1a, 0xee, 0x17, 0xcb, 0xe9, 0x5e, - 0x8a, 0xe9, 0x7e, 0x76, 0x3c, 0xb0, 0x93, 0x59, 0x68, 0xf3, 0x61, 0xed, 0xb8, 0x0a, 0x5c, 0xc7, - 0x8d, 0x52, 0x4c, 0x0f, 0x77, 0xdc, 0x3c, 0xb4, 0xf9, 0xb0, 0xde, 0x1c, 0x7c, 0x7c, 0x26, 0x68, - 0xfd, 0x9d, 0xa2, 0x8c, 0x36, 0x2d, 0x41, 0x9b, 0x96, 0xa0, 0xaf, 0x96, 0xa0, 0xb5, 0x27, 0xb8, - 0xf1, 0x04, 0x6f, 0x3c, 0xc1, 0x3f, 0x9e, 0xe0, 0xb7, 0x5f, 0x82, 0x1e, 0x26, 0xfd, 0x67, 0xff, - 0x03, 0x00, 0x00, 0xff, 0xff, 0x68, 0x57, 0xfb, 0xfa, 0x43, 0x01, 0x00, 0x00, -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.proto b/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.proto deleted file mode 100644 index 4dc4e994a7..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.proto +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.util.intstr; - -// Package-wide variables from generator "generated". -option go_package = "intstr"; - -// IntOrString is a type that can hold an int32 or a string. When used in -// JSON or YAML marshalling and unmarshalling, it produces or consumes the -// inner type. This allows you to have, for example, a JSON field that can -// accept a name or number. -// TODO: Rename to Int32OrString -// -// +protobuf=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -// +k8s:openapi-gen=true -message IntOrString { - optional int64 type = 1; - - optional int32 intVal = 2; - - optional string strVal = 3; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/util/io/BUILD b/vendor/k8s.io/kubernetes/pkg/util/io/BUILD index 5c93589a74..1dfc3ad981 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/io/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/io/BUILD @@ -4,10 +4,8 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", "go_test", - "cgo_library", ) go_library( @@ -19,9 +17,9 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/runtime:go_default_library", + "//pkg/api/v1:go_default_library", "//vendor:github.com/golang/glog", + "//vendor:k8s.io/apimachinery/pkg/runtime", ], ) @@ -31,11 +29,24 @@ go_test( tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/apimachinery/registered:go_default_library", - "//pkg/runtime:go_default_library", "//pkg/util/io:go_default_library", - "//pkg/util/testing:go_default_library", "//pkg/volume:go_default_library", "//vendor:github.com/pborman/uuid", + "//vendor:k8s.io/apimachinery/pkg/api/equality", + "//vendor:k8s.io/apimachinery/pkg/runtime", + "//vendor:k8s.io/client-go/util/testing", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/util/io/io.go b/vendor/k8s.io/kubernetes/pkg/util/io/io.go index d7eb6d57f9..0be8e12726 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/io/io.go +++ b/vendor/k8s.io/kubernetes/pkg/util/io/io.go @@ -21,13 +21,13 @@ import ( "io/ioutil" "os" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/api/v1" ) // LoadPodFromFile will read, decode, and return a Pod from a file. -func LoadPodFromFile(filePath string) (*api.Pod, error) { +func LoadPodFromFile(filePath string) (*v1.Pod, error) { if filePath == "" { return nil, fmt.Errorf("file path not specified") } @@ -38,9 +38,9 @@ func LoadPodFromFile(filePath string) (*api.Pod, error) { if len(podDef) == 0 { return nil, fmt.Errorf("file was empty: %s", filePath) } - pod := &api.Pod{} + pod := &v1.Pod{} - codec := api.Codecs.LegacyCodec(registered.GroupOrDie(api.GroupName).GroupVersion) + codec := api.Codecs.LegacyCodec(api.Registry.GroupOrDie(v1.GroupName).GroupVersion) if err := runtime.DecodeInto(codec, podDef, pod); err != nil { return nil, fmt.Errorf("failed decoding file: %v", err) } @@ -48,11 +48,11 @@ func LoadPodFromFile(filePath string) (*api.Pod, error) { } // SavePodToFile will encode and save a pod to a given path & permissions -func SavePodToFile(pod *api.Pod, filePath string, perm os.FileMode) error { +func SavePodToFile(pod *v1.Pod, filePath string, perm os.FileMode) error { if filePath == "" { return fmt.Errorf("file path not specified") } - codec := api.Codecs.LegacyCodec(registered.GroupOrDie(api.GroupName).GroupVersion) + codec := api.Codecs.LegacyCodec(api.Registry.GroupOrDie(v1.GroupName).GroupVersion) data, err := runtime.Encode(codec, pod) if err != nil { return fmt.Errorf("failed encoding pod: %v", err) diff --git a/vendor/k8s.io/kubernetes/pkg/util/json/BUILD b/vendor/k8s.io/kubernetes/pkg/util/json/BUILD deleted file mode 100644 index 50d42e4c02..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/json/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["json.go"], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = ["json_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/BUILD b/vendor/k8s.io/kubernetes/pkg/util/jsonpath/BUILD deleted file mode 100644 index 82a107ada8..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "jsonpath.go", - "node.go", - "parser.go", - ], - tags = ["automanaged"], - deps = ["//third_party/forked/golang/template:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = [ - "jsonpath_test.go", - "parser_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/doc.go b/vendor/k8s.io/kubernetes/pkg/util/jsonpath/doc.go deleted file mode 100644 index 2a6e170617..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// package jsonpath is a template engine using jsonpath syntax, -// which can be seen at http://goessner.net/articles/JsonPath/. -// In addition, it has {range} {end} function to iterate list and slice. -package jsonpath diff --git a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/jsonpath.go b/vendor/k8s.io/kubernetes/pkg/util/jsonpath/jsonpath.go deleted file mode 100644 index d84a185597..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/jsonpath.go +++ /dev/null @@ -1,498 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package jsonpath - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" - - "k8s.io/kubernetes/third_party/forked/golang/template" -) - -type JSONPath struct { - name string - parser *Parser - stack [][]reflect.Value //push and pop values in different scopes - cur []reflect.Value //current scope values - beginRange int - inRange int - endRange int - - allowMissingKeys bool -} - -func New(name string) *JSONPath { - return &JSONPath{ - name: name, - beginRange: 0, - inRange: 0, - endRange: 0, - } -} - -// AllowMissingKeys allows a caller to specify whether they want an error if a field or map key -// cannot be located, or simply an empty result. The receiver is returned for chaining. -func (j *JSONPath) AllowMissingKeys(allow bool) *JSONPath { - j.allowMissingKeys = allow - return j -} - -// Parse parse the given template, return error -func (j *JSONPath) Parse(text string) (err error) { - j.parser, err = Parse(j.name, text) - return -} - -// Execute bounds data into template and write the result -func (j *JSONPath) Execute(wr io.Writer, data interface{}) error { - fullResults, err := j.FindResults(data) - if err != nil { - return err - } - for ix := range fullResults { - if err := j.PrintResults(wr, fullResults[ix]); err != nil { - return err - } - } - return nil -} - -func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) { - if j.parser == nil { - return nil, fmt.Errorf("%s is an incomplete jsonpath template", j.name) - } - - j.cur = []reflect.Value{reflect.ValueOf(data)} - nodes := j.parser.Root.Nodes - fullResult := [][]reflect.Value{} - for i := 0; i < len(nodes); i++ { - node := nodes[i] - results, err := j.walk(j.cur, node) - if err != nil { - return nil, err - } - - //encounter an end node, break the current block - if j.endRange > 0 && j.endRange <= j.inRange { - j.endRange -= 1 - break - } - //encounter a range node, start a range loop - if j.beginRange > 0 { - j.beginRange -= 1 - j.inRange += 1 - for k, value := range results { - j.parser.Root.Nodes = nodes[i+1:] - if k == len(results)-1 { - j.inRange -= 1 - } - nextResults, err := j.FindResults(value.Interface()) - if err != nil { - return nil, err - } - fullResult = append(fullResult, nextResults...) - } - break - } - fullResult = append(fullResult, results) - } - return fullResult, nil -} - -// PrintResults write the results into writer -func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error { - for i, r := range results { - text, err := j.evalToText(r) - if err != nil { - return err - } - if i != len(results)-1 { - text = append(text, ' ') - } - if _, err = wr.Write(text); err != nil { - return err - } - } - return nil -} - -// walk visits tree rooted at the given node in DFS order -func (j *JSONPath) walk(value []reflect.Value, node Node) ([]reflect.Value, error) { - switch node := node.(type) { - case *ListNode: - return j.evalList(value, node) - case *TextNode: - return []reflect.Value{reflect.ValueOf(node.Text)}, nil - case *FieldNode: - return j.evalField(value, node) - case *ArrayNode: - return j.evalArray(value, node) - case *FilterNode: - return j.evalFilter(value, node) - case *IntNode: - return j.evalInt(value, node) - case *FloatNode: - return j.evalFloat(value, node) - case *WildcardNode: - return j.evalWildcard(value, node) - case *RecursiveNode: - return j.evalRecursive(value, node) - case *UnionNode: - return j.evalUnion(value, node) - case *IdentifierNode: - return j.evalIdentifier(value, node) - default: - return value, fmt.Errorf("unexpected Node %v", node) - } -} - -// evalInt evaluates IntNode -func (j *JSONPath) evalInt(input []reflect.Value, node *IntNode) ([]reflect.Value, error) { - result := make([]reflect.Value, len(input)) - for i := range input { - result[i] = reflect.ValueOf(node.Value) - } - return result, nil -} - -// evalFloat evaluates FloatNode -func (j *JSONPath) evalFloat(input []reflect.Value, node *FloatNode) ([]reflect.Value, error) { - result := make([]reflect.Value, len(input)) - for i := range input { - result[i] = reflect.ValueOf(node.Value) - } - return result, nil -} - -// evalList evaluates ListNode -func (j *JSONPath) evalList(value []reflect.Value, node *ListNode) ([]reflect.Value, error) { - var err error - curValue := value - for _, node := range node.Nodes { - curValue, err = j.walk(curValue, node) - if err != nil { - return curValue, err - } - } - return curValue, nil -} - -// evalIdentifier evaluates IdentifierNode -func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) ([]reflect.Value, error) { - results := []reflect.Value{} - switch node.Name { - case "range": - j.stack = append(j.stack, j.cur) - j.beginRange += 1 - results = input - case "end": - if j.endRange < j.inRange { //inside a loop, break the current block - j.endRange += 1 - break - } - // the loop is about to end, pop value and continue the following execution - if len(j.stack) > 0 { - j.cur, j.stack = j.stack[len(j.stack)-1], j.stack[:len(j.stack)-1] - } else { - return results, fmt.Errorf("not in range, nothing to end") - } - default: - return input, fmt.Errorf("unrecognized identifier %v", node.Name) - } - return results, nil -} - -// evalArray evaluates ArrayNode -func (j *JSONPath) evalArray(input []reflect.Value, node *ArrayNode) ([]reflect.Value, error) { - result := []reflect.Value{} - for _, value := range input { - - value, isNil := template.Indirect(value) - if isNil { - continue - } - if value.Kind() != reflect.Array && value.Kind() != reflect.Slice { - return input, fmt.Errorf("%v is not array or slice", value.Type()) - } - params := node.Params - if !params[0].Known { - params[0].Value = 0 - } - if params[0].Value < 0 { - params[0].Value += value.Len() - } - if !params[1].Known { - params[1].Value = value.Len() - } - - if params[1].Value < 0 { - params[1].Value += value.Len() - } - - sliceLength := value.Len() - if params[1].Value != params[0].Value { // if you're requesting zero elements, allow it through. - if params[0].Value >= sliceLength { - return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[0].Value, sliceLength) - } - if params[1].Value > sliceLength { - return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[1].Value-1, sliceLength) - } - } - - if !params[2].Known { - value = value.Slice(params[0].Value, params[1].Value) - } else { - value = value.Slice3(params[0].Value, params[1].Value, params[2].Value) - } - for i := 0; i < value.Len(); i++ { - result = append(result, value.Index(i)) - } - } - return result, nil -} - -// evalUnion evaluates UnionNode -func (j *JSONPath) evalUnion(input []reflect.Value, node *UnionNode) ([]reflect.Value, error) { - result := []reflect.Value{} - for _, listNode := range node.Nodes { - temp, err := j.evalList(input, listNode) - if err != nil { - return input, err - } - result = append(result, temp...) - } - return result, nil -} - -func (j *JSONPath) findFieldInValue(value *reflect.Value, node *FieldNode) (reflect.Value, error) { - t := value.Type() - var inlineValue *reflect.Value - for ix := 0; ix < t.NumField(); ix++ { - f := t.Field(ix) - jsonTag := f.Tag.Get("json") - parts := strings.Split(jsonTag, ",") - if len(parts) == 0 { - continue - } - if parts[0] == node.Value { - return value.Field(ix), nil - } - if len(parts[0]) == 0 { - val := value.Field(ix) - inlineValue = &val - } - } - if inlineValue != nil { - if inlineValue.Kind() == reflect.Struct { - // handle 'inline' - match, err := j.findFieldInValue(inlineValue, node) - if err != nil { - return reflect.Value{}, err - } - if match.IsValid() { - return match, nil - } - } - } - return value.FieldByName(node.Value), nil -} - -// evalField evaluates field of struct or key of map. -func (j *JSONPath) evalField(input []reflect.Value, node *FieldNode) ([]reflect.Value, error) { - results := []reflect.Value{} - // If there's no input, there's no output - if len(input) == 0 { - return results, nil - } - for _, value := range input { - var result reflect.Value - value, isNil := template.Indirect(value) - if isNil { - continue - } - - if value.Kind() == reflect.Struct { - var err error - if result, err = j.findFieldInValue(&value, node); err != nil { - return nil, err - } - } else if value.Kind() == reflect.Map { - mapKeyType := value.Type().Key() - nodeValue := reflect.ValueOf(node.Value) - // node value type must be convertible to map key type - if !nodeValue.Type().ConvertibleTo(mapKeyType) { - return results, fmt.Errorf("%s is not convertible to %s", nodeValue, mapKeyType) - } - result = value.MapIndex(nodeValue.Convert(mapKeyType)) - } - if result.IsValid() { - results = append(results, result) - } - } - if len(results) == 0 { - if j.allowMissingKeys { - return results, nil - } - return results, fmt.Errorf("%s is not found", node.Value) - } - return results, nil -} - -// evalWildcard extract all contents of the given value -func (j *JSONPath) evalWildcard(input []reflect.Value, node *WildcardNode) ([]reflect.Value, error) { - results := []reflect.Value{} - for _, value := range input { - value, isNil := template.Indirect(value) - if isNil { - continue - } - - kind := value.Kind() - if kind == reflect.Struct { - for i := 0; i < value.NumField(); i++ { - results = append(results, value.Field(i)) - } - } else if kind == reflect.Map { - for _, key := range value.MapKeys() { - results = append(results, value.MapIndex(key)) - } - } else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String { - for i := 0; i < value.Len(); i++ { - results = append(results, value.Index(i)) - } - } - } - return results, nil -} - -// evalRecursive visit the given value recursively and push all of them to result -func (j *JSONPath) evalRecursive(input []reflect.Value, node *RecursiveNode) ([]reflect.Value, error) { - result := []reflect.Value{} - for _, value := range input { - results := []reflect.Value{} - value, isNil := template.Indirect(value) - if isNil { - continue - } - - kind := value.Kind() - if kind == reflect.Struct { - for i := 0; i < value.NumField(); i++ { - results = append(results, value.Field(i)) - } - } else if kind == reflect.Map { - for _, key := range value.MapKeys() { - results = append(results, value.MapIndex(key)) - } - } else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String { - for i := 0; i < value.Len(); i++ { - results = append(results, value.Index(i)) - } - } - if len(results) != 0 { - result = append(result, value) - output, err := j.evalRecursive(results, node) - if err != nil { - return result, err - } - result = append(result, output...) - } - } - return result, nil -} - -// evalFilter filter array according to FilterNode -func (j *JSONPath) evalFilter(input []reflect.Value, node *FilterNode) ([]reflect.Value, error) { - results := []reflect.Value{} - for _, value := range input { - value, _ = template.Indirect(value) - - if value.Kind() != reflect.Array && value.Kind() != reflect.Slice { - return input, fmt.Errorf("%v is not array or slice and cannot be filtered", value) - } - for i := 0; i < value.Len(); i++ { - temp := []reflect.Value{value.Index(i)} - lefts, err := j.evalList(temp, node.Left) - - //case exists - if node.Operator == "exists" { - if len(lefts) > 0 { - results = append(results, value.Index(i)) - } - continue - } - - if err != nil { - return input, err - } - - var left, right interface{} - if len(lefts) != 1 { - return input, fmt.Errorf("can only compare one element at a time") - } - left = lefts[0].Interface() - - rights, err := j.evalList(temp, node.Right) - if err != nil { - return input, err - } - if len(rights) != 1 { - return input, fmt.Errorf("can only compare one element at a time") - } - right = rights[0].Interface() - - pass := false - switch node.Operator { - case "<": - pass, err = template.Less(left, right) - case ">": - pass, err = template.Greater(left, right) - case "==": - pass, err = template.Equal(left, right) - case "!=": - pass, err = template.NotEqual(left, right) - case "<=": - pass, err = template.LessEqual(left, right) - case ">=": - pass, err = template.GreaterEqual(left, right) - default: - return results, fmt.Errorf("unrecognized filter operator %s", node.Operator) - } - if err != nil { - return results, err - } - if pass { - results = append(results, value.Index(i)) - } - } - } - return results, nil -} - -// evalToText translates reflect value to corresponding text -func (j *JSONPath) evalToText(v reflect.Value) ([]byte, error) { - iface, ok := template.PrintableValue(v) - if !ok { - return nil, fmt.Errorf("can't print type %s", v.Type()) - } - var buffer bytes.Buffer - fmt.Fprint(&buffer, iface) - return buffer.Bytes(), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/node.go b/vendor/k8s.io/kubernetes/pkg/util/jsonpath/node.go deleted file mode 100644 index f0a27853d8..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/node.go +++ /dev/null @@ -1,239 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package jsonpath - -import "fmt" - -// NodeType identifies the type of a parse tree node. -type NodeType int - -// Type returns itself and provides an easy default implementation -func (t NodeType) Type() NodeType { - return t -} - -func (t NodeType) String() string { - return NodeTypeName[t] -} - -const ( - NodeText NodeType = iota - NodeArray - NodeList - NodeField - NodeIdentifier - NodeFilter - NodeInt - NodeFloat - NodeWildcard - NodeRecursive - NodeUnion -) - -var NodeTypeName = map[NodeType]string{ - NodeText: "NodeText", - NodeArray: "NodeArray", - NodeList: "NodeList", - NodeField: "NodeField", - NodeIdentifier: "NodeIdentifier", - NodeFilter: "NodeFilter", - NodeInt: "NodeInt", - NodeFloat: "NodeFloat", - NodeWildcard: "NodeWildcard", - NodeRecursive: "NodeRecursive", - NodeUnion: "NodeUnion", -} - -type Node interface { - Type() NodeType - String() string -} - -// ListNode holds a sequence of nodes. -type ListNode struct { - NodeType - Nodes []Node // The element nodes in lexical order. -} - -func newList() *ListNode { - return &ListNode{NodeType: NodeList} -} - -func (l *ListNode) append(n Node) { - l.Nodes = append(l.Nodes, n) -} - -func (l *ListNode) String() string { - return fmt.Sprintf("%s", l.Type()) -} - -// TextNode holds plain text. -type TextNode struct { - NodeType - Text string // The text; may span newlines. -} - -func newText(text string) *TextNode { - return &TextNode{NodeType: NodeText, Text: text} -} - -func (t *TextNode) String() string { - return fmt.Sprintf("%s: %s", t.Type(), t.Text) -} - -// FieldNode holds field of struct -type FieldNode struct { - NodeType - Value string -} - -func newField(value string) *FieldNode { - return &FieldNode{NodeType: NodeField, Value: value} -} - -func (f *FieldNode) String() string { - return fmt.Sprintf("%s: %s", f.Type(), f.Value) -} - -// IdentifierNode holds an identifier -type IdentifierNode struct { - NodeType - Name string -} - -func newIdentifier(value string) *IdentifierNode { - return &IdentifierNode{ - NodeType: NodeIdentifier, - Name: value, - } -} - -func (f *IdentifierNode) String() string { - return fmt.Sprintf("%s: %s", f.Type(), f.Name) -} - -// ParamsEntry holds param information for ArrayNode -type ParamsEntry struct { - Value int - Known bool //whether the value is known when parse it -} - -// ArrayNode holds start, end, step information for array index selection -type ArrayNode struct { - NodeType - Params [3]ParamsEntry //start, end, step -} - -func newArray(params [3]ParamsEntry) *ArrayNode { - return &ArrayNode{ - NodeType: NodeArray, - Params: params, - } -} - -func (a *ArrayNode) String() string { - return fmt.Sprintf("%s: %v", a.Type(), a.Params) -} - -// FilterNode holds operand and operator information for filter -type FilterNode struct { - NodeType - Left *ListNode - Right *ListNode - Operator string -} - -func newFilter(left, right *ListNode, operator string) *FilterNode { - return &FilterNode{ - NodeType: NodeFilter, - Left: left, - Right: right, - Operator: operator, - } -} - -func (f *FilterNode) String() string { - return fmt.Sprintf("%s: %s %s %s", f.Type(), f.Left, f.Operator, f.Right) -} - -// IntNode holds integer value -type IntNode struct { - NodeType - Value int -} - -func newInt(num int) *IntNode { - return &IntNode{NodeType: NodeInt, Value: num} -} - -func (i *IntNode) String() string { - return fmt.Sprintf("%s: %d", i.Type(), i.Value) -} - -// FloatNode holds float value -type FloatNode struct { - NodeType - Value float64 -} - -func newFloat(num float64) *FloatNode { - return &FloatNode{NodeType: NodeFloat, Value: num} -} - -func (i *FloatNode) String() string { - return fmt.Sprintf("%s: %f", i.Type(), i.Value) -} - -// WildcardNode means a wildcard -type WildcardNode struct { - NodeType -} - -func newWildcard() *WildcardNode { - return &WildcardNode{NodeType: NodeWildcard} -} - -func (i *WildcardNode) String() string { - return fmt.Sprintf("%s", i.Type()) -} - -// RecursiveNode means a recursive descent operator -type RecursiveNode struct { - NodeType -} - -func newRecursive() *RecursiveNode { - return &RecursiveNode{NodeType: NodeRecursive} -} - -func (r *RecursiveNode) String() string { - return fmt.Sprintf("%s", r.Type()) -} - -// UnionNode is union of ListNode -type UnionNode struct { - NodeType - Nodes []*ListNode -} - -func newUnion(nodes []*ListNode) *UnionNode { - return &UnionNode{NodeType: NodeUnion, Nodes: nodes} -} - -func (u *UnionNode) String() string { - return fmt.Sprintf("%s", u.Type()) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/parser.go b/vendor/k8s.io/kubernetes/pkg/util/jsonpath/parser.go deleted file mode 100644 index 7dc38d31fc..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/parser.go +++ /dev/null @@ -1,433 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package jsonpath - -import ( - "fmt" - "regexp" - "strconv" - "strings" - "unicode" - "unicode/utf8" -) - -const eof = -1 - -const ( - leftDelim = "{" - rightDelim = "}" -) - -type Parser struct { - Name string - Root *ListNode - input string - cur *ListNode - pos int - start int - width int -} - -// Parse parsed the given text and return a node Parser. -// If an error is encountered, parsing stops and an empty -// Parser is returned with the error -func Parse(name, text string) (*Parser, error) { - p := NewParser(name) - err := p.Parse(text) - if err != nil { - p = nil - } - return p, err -} - -func NewParser(name string) *Parser { - return &Parser{ - Name: name, - } -} - -// parseAction parsed the expression inside delimiter -func parseAction(name, text string) (*Parser, error) { - p, err := Parse(name, fmt.Sprintf("%s%s%s", leftDelim, text, rightDelim)) - // when error happens, p will be nil, so we need to return here - if err != nil { - return p, err - } - p.Root = p.Root.Nodes[0].(*ListNode) - return p, nil -} - -func (p *Parser) Parse(text string) error { - p.input = text - p.Root = newList() - p.pos = 0 - return p.parseText(p.Root) -} - -// consumeText return the parsed text since last cosumeText -func (p *Parser) consumeText() string { - value := p.input[p.start:p.pos] - p.start = p.pos - return value -} - -// next returns the next rune in the input. -func (p *Parser) next() rune { - if int(p.pos) >= len(p.input) { - p.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(p.input[p.pos:]) - p.width = w - p.pos += p.width - return r -} - -// peek returns but does not consume the next rune in the input. -func (p *Parser) peek() rune { - r := p.next() - p.backup() - return r -} - -// backup steps back one rune. Can only be called once per call of next. -func (p *Parser) backup() { - p.pos -= p.width -} - -func (p *Parser) parseText(cur *ListNode) error { - for { - if strings.HasPrefix(p.input[p.pos:], leftDelim) { - if p.pos > p.start { - cur.append(newText(p.consumeText())) - } - return p.parseLeftDelim(cur) - } - if p.next() == eof { - break - } - } - // Correctly reached EOF. - if p.pos > p.start { - cur.append(newText(p.consumeText())) - } - return nil -} - -// parseLeftDelim scans the left delimiter, which is known to be present. -func (p *Parser) parseLeftDelim(cur *ListNode) error { - p.pos += len(leftDelim) - p.consumeText() - newNode := newList() - cur.append(newNode) - cur = newNode - return p.parseInsideAction(cur) -} - -func (p *Parser) parseInsideAction(cur *ListNode) error { - prefixMap := map[string]func(*ListNode) error{ - rightDelim: p.parseRightDelim, - "[?(": p.parseFilter, - "..": p.parseRecursive, - } - for prefix, parseFunc := range prefixMap { - if strings.HasPrefix(p.input[p.pos:], prefix) { - return parseFunc(cur) - } - } - - switch r := p.next(); { - case r == eof || isEndOfLine(r): - return fmt.Errorf("unclosed action") - case r == ' ': - p.consumeText() - case r == '@' || r == '$': //the current object, just pass it - p.consumeText() - case r == '[': - return p.parseArray(cur) - case r == '"': - return p.parseQuote(cur) - case r == '.': - return p.parseField(cur) - case r == '+' || r == '-' || unicode.IsDigit(r): - p.backup() - return p.parseNumber(cur) - case isAlphaNumeric(r): - p.backup() - return p.parseIdentifier(cur) - default: - return fmt.Errorf("unrecognized character in action: %#U", r) - } - return p.parseInsideAction(cur) -} - -// parseRightDelim scans the right delimiter, which is known to be present. -func (p *Parser) parseRightDelim(cur *ListNode) error { - p.pos += len(rightDelim) - p.consumeText() - cur = p.Root - return p.parseText(cur) -} - -// parseIdentifier scans build-in keywords, like "range" "end" -func (p *Parser) parseIdentifier(cur *ListNode) error { - var r rune - for { - r = p.next() - if isTerminator(r) { - p.backup() - break - } - } - value := p.consumeText() - cur.append(newIdentifier(value)) - return p.parseInsideAction(cur) -} - -// parseRecursive scans the recursive desent operator .. -func (p *Parser) parseRecursive(cur *ListNode) error { - p.pos += len("..") - p.consumeText() - cur.append(newRecursive()) - if r := p.peek(); isAlphaNumeric(r) { - return p.parseField(cur) - } - return p.parseInsideAction(cur) -} - -// parseNumber scans number -func (p *Parser) parseNumber(cur *ListNode) error { - r := p.peek() - if r == '+' || r == '-' { - r = p.next() - } - for { - r = p.next() - if r != '.' && !unicode.IsDigit(r) { - p.backup() - break - } - } - value := p.consumeText() - i, err := strconv.Atoi(value) - if err == nil { - cur.append(newInt(i)) - return p.parseInsideAction(cur) - } - d, err := strconv.ParseFloat(value, 64) - if err == nil { - cur.append(newFloat(d)) - return p.parseInsideAction(cur) - } - return fmt.Errorf("cannot parse number %s", value) -} - -// parseArray scans array index selection -func (p *Parser) parseArray(cur *ListNode) error { -Loop: - for { - switch p.next() { - case eof, '\n': - return fmt.Errorf("unterminated array") - case ']': - break Loop - } - } - text := p.consumeText() - text = string(text[1 : len(text)-1]) - if text == "*" { - text = ":" - } - - //union operator - strs := strings.Split(text, ",") - if len(strs) > 1 { - union := []*ListNode{} - for _, str := range strs { - parser, err := parseAction("union", fmt.Sprintf("[%s]", strings.Trim(str, " "))) - if err != nil { - return err - } - union = append(union, parser.Root) - } - cur.append(newUnion(union)) - return p.parseInsideAction(cur) - } - - // dict key - reg := regexp.MustCompile(`^'([^']*)'$`) - value := reg.FindStringSubmatch(text) - if value != nil { - parser, err := parseAction("arraydict", fmt.Sprintf(".%s", value[1])) - if err != nil { - return err - } - for _, node := range parser.Root.Nodes { - cur.append(node) - } - return p.parseInsideAction(cur) - } - - //slice operator - reg = regexp.MustCompile(`^(-?[\d]*)(:-?[\d]*)?(:[\d]*)?$`) - value = reg.FindStringSubmatch(text) - if value == nil { - return fmt.Errorf("invalid array index %s", text) - } - value = value[1:] - params := [3]ParamsEntry{} - for i := 0; i < 3; i++ { - if value[i] != "" { - if i > 0 { - value[i] = value[i][1:] - } - if i > 0 && value[i] == "" { - params[i].Known = false - } else { - var err error - params[i].Known = true - params[i].Value, err = strconv.Atoi(value[i]) - if err != nil { - return fmt.Errorf("array index %s is not a number", value[i]) - } - } - } else { - if i == 1 { - params[i].Known = true - params[i].Value = params[0].Value + 1 - } else { - params[i].Known = false - params[i].Value = 0 - } - } - } - cur.append(newArray(params)) - return p.parseInsideAction(cur) -} - -// parseFilter scans filter inside array selection -func (p *Parser) parseFilter(cur *ListNode) error { - p.pos += len("[?(") - p.consumeText() -Loop: - for { - switch p.next() { - case eof, '\n': - return fmt.Errorf("unterminated filter") - case ')': - break Loop - } - } - if p.next() != ']' { - return fmt.Errorf("unclosed array expect ]") - } - reg := regexp.MustCompile(`^([^!<>=]+)([!<>=]+)(.+?)$`) - text := p.consumeText() - text = string(text[:len(text)-2]) - value := reg.FindStringSubmatch(text) - if value == nil { - parser, err := parseAction("text", text) - if err != nil { - return err - } - cur.append(newFilter(parser.Root, newList(), "exists")) - } else { - leftParser, err := parseAction("left", value[1]) - if err != nil { - return err - } - rightParser, err := parseAction("right", value[3]) - if err != nil { - return err - } - cur.append(newFilter(leftParser.Root, rightParser.Root, value[2])) - } - return p.parseInsideAction(cur) -} - -// parseQuote unquotes string inside double quote -func (p *Parser) parseQuote(cur *ListNode) error { -Loop: - for { - switch p.next() { - case eof, '\n': - return fmt.Errorf("unterminated quoted string") - case '"': - break Loop - } - } - value := p.consumeText() - s, err := strconv.Unquote(value) - if err != nil { - return fmt.Errorf("unquote string %s error %v", value, err) - } - cur.append(newText(s)) - return p.parseInsideAction(cur) -} - -// parseField scans a field until a terminator -func (p *Parser) parseField(cur *ListNode) error { - p.consumeText() - for p.advance() { - } - value := p.consumeText() - if value == "*" { - cur.append(newWildcard()) - } else { - cur.append(newField(strings.Replace(value, "\\", "", -1))) - } - return p.parseInsideAction(cur) -} - -// advance scans until next non-escaped terminator -func (p *Parser) advance() bool { - r := p.next() - if r == '\\' { - p.next() - } else if isTerminator(r) { - p.backup() - return false - } - return true -} - -// isTerminator reports whether the input is at valid termination character to appear after an identifier. -func isTerminator(r rune) bool { - if isSpace(r) || isEndOfLine(r) { - return true - } - switch r { - case eof, '.', ',', '[', ']', '$', '@', '{', '}': - return true - } - return false -} - -// isSpace reports whether r is a space character. -func isSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -// isEndOfLine reports whether r is an end-of-line character. -func isEndOfLine(r rune) bool { - return r == '\r' || r == '\n' -} - -// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. -func isAlphaNumeric(r rune) bool { - return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/labels/BUILD b/vendor/k8s.io/kubernetes/pkg/util/labels/BUILD deleted file mode 100644 index 5a8dae2791..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/labels/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "labels.go", - ], - tags = ["automanaged"], - deps = ["//pkg/api/unversioned:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = ["labels_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = ["//pkg/api/unversioned:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/labels/doc.go b/vendor/k8s.io/kubernetes/pkg/util/labels/doc.go deleted file mode 100644 index c87305fb09..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/labels/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package labels provides utilities to work with Kubernetes labels. -package labels diff --git a/vendor/k8s.io/kubernetes/pkg/util/labels/labels.go b/vendor/k8s.io/kubernetes/pkg/util/labels/labels.go deleted file mode 100644 index 262f66e6e9..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/labels/labels.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package labels - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// Clones the given map and returns a new map with the given key and value added. -// Returns the given map, if labelKey is empty. -func CloneAndAddLabel(labels map[string]string, labelKey string, labelValue uint32) map[string]string { - if labelKey == "" { - // Don't need to add a label. - return labels - } - // Clone. - newLabels := map[string]string{} - for key, value := range labels { - newLabels[key] = value - } - newLabels[labelKey] = fmt.Sprintf("%d", labelValue) - return newLabels -} - -// CloneAndRemoveLabel clones the given map and returns a new map with the given key removed. -// Returns the given map, if labelKey is empty. -func CloneAndRemoveLabel(labels map[string]string, labelKey string) map[string]string { - if labelKey == "" { - // Don't need to add a label. - return labels - } - // Clone. - newLabels := map[string]string{} - for key, value := range labels { - newLabels[key] = value - } - delete(newLabels, labelKey) - return newLabels -} - -// AddLabel returns a map with the given key and value added to the given map. -func AddLabel(labels map[string]string, labelKey string, labelValue string) map[string]string { - if labelKey == "" { - // Don't need to add a label. - return labels - } - if labels == nil { - labels = make(map[string]string) - } - labels[labelKey] = labelValue - return labels -} - -// Clones the given selector and returns a new selector with the given key and value added. -// Returns the given selector, if labelKey is empty. -func CloneSelectorAndAddLabel(selector *unversioned.LabelSelector, labelKey string, labelValue uint32) *unversioned.LabelSelector { - if labelKey == "" { - // Don't need to add a label. - return selector - } - - // Clone. - newSelector := new(unversioned.LabelSelector) - - // TODO(madhusudancs): Check if you can use deepCopy_extensions_LabelSelector here. - newSelector.MatchLabels = make(map[string]string) - if selector.MatchLabels != nil { - for key, val := range selector.MatchLabels { - newSelector.MatchLabels[key] = val - } - } - newSelector.MatchLabels[labelKey] = fmt.Sprintf("%d", labelValue) - - if selector.MatchExpressions != nil { - newMExps := make([]unversioned.LabelSelectorRequirement, len(selector.MatchExpressions)) - for i, me := range selector.MatchExpressions { - newMExps[i].Key = me.Key - newMExps[i].Operator = me.Operator - if me.Values != nil { - newMExps[i].Values = make([]string, len(me.Values)) - copy(newMExps[i].Values, me.Values) - } else { - newMExps[i].Values = nil - } - } - newSelector.MatchExpressions = newMExps - } else { - newSelector.MatchExpressions = nil - } - - return newSelector -} - -// AddLabelToSelector returns a selector with the given key and value added to the given selector's MatchLabels. -func AddLabelToSelector(selector *unversioned.LabelSelector, labelKey string, labelValue string) *unversioned.LabelSelector { - if labelKey == "" { - // Don't need to add a label. - return selector - } - if selector.MatchLabels == nil { - selector.MatchLabels = make(map[string]string) - } - selector.MatchLabels[labelKey] = labelValue - return selector -} - -// SelectorHasLabel checks if the given selector contains the given label key in its MatchLabels -func SelectorHasLabel(selector *unversioned.LabelSelector, labelKey string) bool { - return len(selector.MatchLabels[labelKey]) > 0 -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD b/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD index a30c6992e2..a0c648d23b 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD @@ -4,10 +4,8 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", "go_test", - "cgo_library", ) go_library( @@ -22,8 +20,8 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/util/exec:go_default_library", - "//pkg/util/sets:go_default_library", "//vendor:github.com/golang/glog", + "//vendor:k8s.io/apimachinery/pkg/util/sets", ], ) @@ -33,7 +31,20 @@ go_test( "mount_linux_test.go", "safe_format_and_mount_test.go", ], - library = "go_default_library", + library = ":go_default_library", tags = ["automanaged"], deps = ["//pkg/util/exec:go_default_library"], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/OWNERS b/vendor/k8s.io/kubernetes/pkg/util/mount/OWNERS new file mode 100644 index 0000000000..556119c447 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/OWNERS @@ -0,0 +1,7 @@ +reviewers: + - jingxu97 + - saad-ali +approvers: + - jingxu97 + - saad-ali + diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go b/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go index 60b001ba6c..972bff26a1 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go @@ -17,6 +17,7 @@ limitations under the License. package mount import ( + "path/filepath" "sync" "github.com/golang/glog" @@ -80,9 +81,15 @@ func (f *FakeMounter) Mount(source string, target string, fstype string, options } } - f.MountPoints = append(f.MountPoints, MountPoint{Device: source, Path: target, Type: fstype}) - glog.V(5).Infof("Fake mounter: mounted %s to %s", source, target) - f.Log = append(f.Log, FakeAction{Action: FakeActionMount, Target: target, Source: source, FSType: fstype}) + // If target is a symlink, get its absolute path + absTarget, err := filepath.EvalSymlinks(target) + if err != nil { + absTarget = target + } + + f.MountPoints = append(f.MountPoints, MountPoint{Device: source, Path: absTarget, Type: fstype}) + glog.V(5).Infof("Fake mounter: mounted %s to %s", source, absTarget) + f.Log = append(f.Log, FakeAction{Action: FakeActionMount, Target: absTarget, Source: source, FSType: fstype}) return nil } @@ -90,17 +97,23 @@ func (f *FakeMounter) Unmount(target string) error { f.mutex.Lock() defer f.mutex.Unlock() + // If target is a symlink, get its absolute path + absTarget, err := filepath.EvalSymlinks(target) + if err != nil { + absTarget = target + } + newMountpoints := []MountPoint{} for _, mp := range f.MountPoints { - if mp.Path == target { - glog.V(5).Infof("Fake mounter: unmounted %s from %s", mp.Device, target) + if mp.Path == absTarget { + glog.V(5).Infof("Fake mounter: unmounted %s from %s", mp.Device, absTarget) // Don't copy it to newMountpoints continue } newMountpoints = append(newMountpoints, MountPoint{Device: mp.Device, Path: mp.Path, Type: mp.Type}) } f.MountPoints = newMountpoints - f.Log = append(f.Log, FakeAction{Action: FakeActionUnmount, Target: target}) + f.Log = append(f.Log, FakeAction{Action: FakeActionUnmount, Target: absTarget}) return nil } @@ -115,13 +128,19 @@ func (f *FakeMounter) IsLikelyNotMountPoint(file string) (bool, error) { f.mutex.Lock() defer f.mutex.Unlock() + // If file is a symlink, get its absolute path + absFile, err := filepath.EvalSymlinks(file) + if err != nil { + absFile = file + } + for _, mp := range f.MountPoints { - if mp.Path == file { - glog.V(5).Infof("isLikelyMountPoint for %s: mounted %s, false", file, mp.Path) + if mp.Path == absFile { + glog.V(5).Infof("isLikelyNotMountPoint for %s: mounted %s, false", file, mp.Path) return false, nil } } - glog.V(5).Infof("isLikelyMountPoint for %s: true", file) + glog.V(5).Infof("isLikelyNotMountPoint for %s: true", file) return true, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go index 8796d6a52d..44058042d5 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go @@ -98,11 +98,6 @@ func (mounter *SafeFormatAndMount) FormatAndMount(source string, target string, // It provides options to override the default mounter behavior. // mounterPath allows using an alternative to `/bin/mount` for mounting. func New(mounterPath string) Interface { - // If mounter-path flag is not set, use default mount path - if mounterPath == "" { - mounterPath = defaultMountCommand - } - return &Mounter{ mounterPath: mounterPath, } diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go index dff5353e73..366101be22 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go @@ -21,7 +21,7 @@ package mount import ( "bufio" "fmt" - "hash/adler32" + "hash/fnv" "io" "os" "os/exec" @@ -30,8 +30,8 @@ import ( "syscall" "github.com/golang/glog" - utilExec "k8s.io/kubernetes/pkg/util/exec" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/sets" + utilexec "k8s.io/kubernetes/pkg/util/exec" ) const ( @@ -63,23 +63,23 @@ type Mounter struct { // currently come from mount(8), e.g. "ro", "remount", "bind", etc. If no more option is // required, call Mount with an empty string list or nil. func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error { - // Path to mounter binary. Set to mount accessible via $PATH by default. + // Path to mounter binary if containerized mounter is needed. Otherwise, it is set to empty. // All Linux distros are expected to be shipped with a mount utility that an support bind mounts. - mounterPath := defaultMountCommand + mounterPath := "" bind, bindRemountOpts := isBind(options) if bind { - err := doMount(mounterPath, source, target, fstype, []string{"bind"}) + err := doMount(mounterPath, defaultMountCommand, source, target, fstype, []string{"bind"}) if err != nil { return err } - return doMount(mounterPath, source, target, fstype, bindRemountOpts) + return doMount(mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts) } // The list of filesystems that require containerized mounter on GCI image cluster - fsTypesNeedMounter := sets.NewString("nfs", "glusterfs") + fsTypesNeedMounter := sets.NewString("nfs", "glusterfs", "ceph", "cifs") if fsTypesNeedMounter.Has(fstype) { mounterPath = mounter.mounterPath } - return doMount(mounterPath, source, target, fstype, options) + return doMount(mounterPath, defaultMountCommand, source, target, fstype, options) } // isBind detects whether a bind mount is being requested and makes the remount options to @@ -107,10 +107,13 @@ func isBind(options []string) (bool, []string) { return bind, bindRemountOpts } -// doMount runs the mount command. -func doMount(mountCmd string, source string, target string, fstype string, options []string) error { - glog.V(4).Infof("Mounting %s %s %s %v with command: %q", source, target, fstype, options, mountCmd) +// doMount runs the mount command. mounterPath is the path to mounter binary if containerized mounter is used. +func doMount(mounterPath string, mountCmd string, source string, target string, fstype string, options []string) error { mountArgs := makeMountArgs(source, target, fstype, options) + if len(mounterPath) > 0 { + mountArgs = append([]string{mountCmd}, mountArgs...) + mountCmd = mounterPath + } glog.V(4).Infof("Mounting cmd (%s) with arguments (%s)", mountCmd, mountArgs) command := exec.Command(mountCmd, mountArgs...) @@ -282,7 +285,7 @@ func readProcMounts(mountFilePath string, out *[]MountPoint) (uint32, error) { } func readProcMountsFrom(file io.Reader, out *[]MountPoint) (uint32, error) { - hash := adler32.New() + hash := fnv.New32a() scanner := bufio.NewReader(file) for { line, err := scanner.ReadString('\n') @@ -332,9 +335,9 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, cmd := mounter.Runner.Command("fsck", args...) out, err := cmd.CombinedOutput() if err != nil { - ee, isExitError := err.(utilExec.ExitError) + ee, isExitError := err.(utilexec.ExitError) switch { - case err == utilExec.ErrExecutableNotFound: + case err == utilexec.ErrExecutableNotFound: glog.Warningf("'fsck' not found on system; continuing mount without running 'fsck'.") case isExitError && ee.ExitStatus() == fsckErrorsCorrected: glog.Infof("Device %s has errors which were corrected by fsck.", source) @@ -347,19 +350,24 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, // Try to mount the disk glog.V(4).Infof("Attempting to mount disk: %s %s %s", fstype, source, target) - err = mounter.Interface.Mount(source, target, fstype, options) - if err != nil { - // It is possible that this disk is not formatted. Double check using diskLooksUnformatted - notFormatted, err := mounter.diskLooksUnformatted(source) - if err == nil && notFormatted { - args = []string{source} + mountErr := mounter.Interface.Mount(source, target, fstype, options) + if mountErr != nil { + // Mount failed. This indicates either that the disk is unformatted or + // it contains an unexpected filesystem. + existingFormat, err := mounter.getDiskFormat(source) + if err != nil { + return err + } + if existingFormat == "" { // Disk is unformatted so format it. + args = []string{source} // Use 'ext4' as the default if len(fstype) == 0 { fstype = "ext4" } + if fstype == "ext4" || fstype == "ext3" { - args = []string{"-E", "lazy_itable_init=0,lazy_journal_init=0", "-F", source} + args = []string{"-F", source} } glog.Infof("Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", source, fstype, args) cmd := mounter.Runner.Command("mkfs."+fstype, args...) @@ -371,13 +379,22 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, } glog.Errorf("format of disk %q failed: type:(%q) target:(%q) options:(%q)error:(%v)", source, fstype, target, options, err) return err + } else { + // Disk is already formatted and failed to mount + if len(fstype) == 0 || fstype == existingFormat { + // This is mount error + return mountErr + } else { + // Block device is formatted with unexpected filesystem, let the user know + return fmt.Errorf("failed to mount the volume as %q, it's already formatted with %q. Mount error: %v", fstype, existingFormat, mountErr) + } } } - return err + return mountErr } // diskLooksUnformatted uses 'lsblk' to see if the given disk is unformated -func (mounter *SafeFormatAndMount) diskLooksUnformatted(disk string) (bool, error) { +func (mounter *SafeFormatAndMount) getDiskFormat(disk string) (string, error) { args := []string{"-nd", "-o", "FSTYPE", disk} cmd := mounter.Runner.Command("lsblk", args...) glog.V(4).Infof("Attempting to determine if disk %q is formatted using lsblk with args: (%v)", disk, args) @@ -389,8 +406,8 @@ func (mounter *SafeFormatAndMount) diskLooksUnformatted(disk string) (bool, erro if err != nil { glog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err) - return false, err + return "", err } - return output == "", nil + return strings.TrimSpace(output), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/BUILD b/vendor/k8s.io/kubernetes/pkg/util/net/BUILD deleted file mode 100644 index 75202eaede..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/net/BUILD +++ /dev/null @@ -1,45 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "http.go", - "interface.go", - "port_range.go", - "port_split.go", - "util.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/util/sets:go_default_library", - "//vendor:github.com/golang/glog", - "//vendor:golang.org/x/net/http2", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "http_test.go", - "interface_test.go", - "port_range_test.go", - "port_split_test.go", - "util_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/util/sets:go_default_library", - "//vendor:github.com/spf13/pflag", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/http.go b/vendor/k8s.io/kubernetes/pkg/util/net/http.go deleted file mode 100644 index bfe2e09375..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/net/http.go +++ /dev/null @@ -1,263 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package net - -import ( - "crypto/tls" - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "strconv" - "strings" - - "github.com/golang/glog" - "golang.org/x/net/http2" -) - -// IsProbableEOF returns true if the given error resembles a connection termination -// scenario that would justify assuming that the watch is empty. -// These errors are what the Go http stack returns back to us which are general -// connection closure errors (strongly correlated) and callers that need to -// differentiate probable errors in connection behavior between normal "this is -// disconnected" should use the method. -func IsProbableEOF(err error) bool { - if uerr, ok := err.(*url.Error); ok { - err = uerr.Err - } - switch { - case err == io.EOF: - return true - case err.Error() == "http: can't write HTTP request on broken connection": - return true - case strings.Contains(err.Error(), "connection reset by peer"): - return true - case strings.Contains(strings.ToLower(err.Error()), "use of closed network connection"): - return true - } - return false -} - -var defaultTransport = http.DefaultTransport.(*http.Transport) - -// SetOldTransportDefaults applies the defaults from http.DefaultTransport -// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset -func SetOldTransportDefaults(t *http.Transport) *http.Transport { - if t.Proxy == nil || isDefault(t.Proxy) { - // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings - // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY - t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) - } - if t.Dial == nil { - t.Dial = defaultTransport.Dial - } - if t.TLSHandshakeTimeout == 0 { - t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout - } - return t -} - -// SetTransportDefaults applies the defaults from http.DefaultTransport -// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset -func SetTransportDefaults(t *http.Transport) *http.Transport { - t = SetOldTransportDefaults(t) - // Allow clients to disable http2 if needed. - if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { - glog.Infof("HTTP2 has been explicitly disabled") - } else { - if err := http2.ConfigureTransport(t); err != nil { - glog.Warningf("Transport failed http2 configuration: %v", err) - } - } - return t -} - -type RoundTripperWrapper interface { - http.RoundTripper - WrappedRoundTripper() http.RoundTripper -} - -type DialFunc func(net, addr string) (net.Conn, error) - -func Dialer(transport http.RoundTripper) (DialFunc, error) { - if transport == nil { - return nil, nil - } - - switch transport := transport.(type) { - case *http.Transport: - return transport.Dial, nil - case RoundTripperWrapper: - return Dialer(transport.WrappedRoundTripper()) - default: - return nil, fmt.Errorf("unknown transport type: %v", transport) - } -} - -// CloneTLSConfig returns a tls.Config with all exported fields except SessionTicketsDisabled and SessionTicketKey copied. -// This makes it safe to call CloneTLSConfig on a config in active use by a server. -// TODO: replace with tls.Config#Clone when we move to go1.8 -func CloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - return &tls.Config{ - Rand: cfg.Rand, - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - } -} - -func TLSClientConfig(transport http.RoundTripper) (*tls.Config, error) { - if transport == nil { - return nil, nil - } - - switch transport := transport.(type) { - case *http.Transport: - return transport.TLSClientConfig, nil - case RoundTripperWrapper: - return TLSClientConfig(transport.WrappedRoundTripper()) - default: - return nil, fmt.Errorf("unknown transport type: %v", transport) - } -} - -func FormatURL(scheme string, host string, port int, path string) *url.URL { - return &url.URL{ - Scheme: scheme, - Host: net.JoinHostPort(host, strconv.Itoa(port)), - Path: path, - } -} - -func GetHTTPClient(req *http.Request) string { - if userAgent, ok := req.Header["User-Agent"]; ok { - if len(userAgent) > 0 { - return userAgent[0] - } - } - return "unknown" -} - -// Extracts and returns the clients IP from the given request. -// Looks at X-Forwarded-For header, X-Real-Ip header and request.RemoteAddr in that order. -// Returns nil if none of them are set or is set to an invalid value. -func GetClientIP(req *http.Request) net.IP { - hdr := req.Header - // First check the X-Forwarded-For header for requests via proxy. - hdrForwardedFor := hdr.Get("X-Forwarded-For") - if hdrForwardedFor != "" { - // X-Forwarded-For can be a csv of IPs in case of multiple proxies. - // Use the first valid one. - parts := strings.Split(hdrForwardedFor, ",") - for _, part := range parts { - ip := net.ParseIP(strings.TrimSpace(part)) - if ip != nil { - return ip - } - } - } - - // Try the X-Real-Ip header. - hdrRealIp := hdr.Get("X-Real-Ip") - if hdrRealIp != "" { - ip := net.ParseIP(hdrRealIp) - if ip != nil { - return ip - } - } - - // Fallback to Remote Address in request, which will give the correct client IP when there is no proxy. - // Remote Address in Go's HTTP server is in the form host:port so we need to split that first. - host, _, err := net.SplitHostPort(req.RemoteAddr) - if err == nil { - return net.ParseIP(host) - } - - // Fallback if Remote Address was just IP. - return net.ParseIP(req.RemoteAddr) -} - -var defaultProxyFuncPointer = fmt.Sprintf("%p", http.ProxyFromEnvironment) - -// isDefault checks to see if the transportProxierFunc is pointing to the default one -func isDefault(transportProxier func(*http.Request) (*url.URL, error)) bool { - transportProxierPointer := fmt.Sprintf("%p", transportProxier) - return transportProxierPointer == defaultProxyFuncPointer -} - -// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if -// no matching CIDRs are found -func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { - // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it - noProxyEnv := os.Getenv("NO_PROXY") - noProxyRules := strings.Split(noProxyEnv, ",") - - cidrs := []*net.IPNet{} - for _, noProxyRule := range noProxyRules { - _, cidr, _ := net.ParseCIDR(noProxyRule) - if cidr != nil { - cidrs = append(cidrs, cidr) - } - } - - if len(cidrs) == 0 { - return delegate - } - - return func(req *http.Request) (*url.URL, error) { - host := req.URL.Host - // for some urls, the Host is already the host, not the host:port - if net.ParseIP(host) == nil { - var err error - host, _, err = net.SplitHostPort(req.URL.Host) - if err != nil { - return delegate(req) - } - } - - ip := net.ParseIP(host) - if ip == nil { - return delegate(req) - } - - for _, cidr := range cidrs { - if cidr.Contains(ip) { - return nil, nil - } - } - - return delegate(req) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/sets/BUILD b/vendor/k8s.io/kubernetes/pkg/util/net/sets/BUILD index 05c9bb745a..6084ba4d63 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/net/sets/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/net/sets/BUILD @@ -4,10 +4,8 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", "go_test", - "cgo_library", ) go_library( @@ -22,7 +20,19 @@ go_library( go_test( name = "go_default_test", srcs = ["ipnet_test.go"], - library = "go_default_library", + library = ":go_default_library", + tags = ["automanaged"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], tags = ["automanaged"], - deps = [], ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/util.go b/vendor/k8s.io/kubernetes/pkg/util/net/util.go deleted file mode 100644 index 1348f4deeb..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/net/util.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package net - -import ( - "net" - "reflect" -) - -// IPNetEqual checks if the two input IPNets are representing the same subnet. -// For example, -// 10.0.0.1/24 and 10.0.0.0/24 are the same subnet. -// 10.0.0.1/24 and 10.0.0.0/25 are not the same subnet. -func IPNetEqual(ipnet1, ipnet2 *net.IPNet) bool { - if ipnet1 == nil || ipnet2 == nil { - return false - } - if reflect.DeepEqual(ipnet1.Mask, ipnet2.Mask) && ipnet1.Contains(ipnet2.IP) && ipnet2.Contains(ipnet1.IP) { - return true - } - return false -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/node/BUILD b/vendor/k8s.io/kubernetes/pkg/util/node/BUILD deleted file mode 100644 index 25db78b526..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/node/BUILD +++ /dev/null @@ -1,35 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["node.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/types:go_default_library", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = ["node_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/node/node.go b/vendor/k8s.io/kubernetes/pkg/util/node/node.go deleted file mode 100644 index 2357a2dde4..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/node/node.go +++ /dev/null @@ -1,132 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package node - -import ( - "encoding/json" - "fmt" - "net" - "os" - "strings" - "time" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/types" -) - -const ( - // The reason and message set on a pod when its state cannot be confirmed as kubelet is unresponsive - // on the node it is (was) running. - NodeUnreachablePodReason = "NodeLost" - NodeUnreachablePodMessage = "Node %v which was running pod %v is unresponsive" -) - -func GetHostname(hostnameOverride string) string { - var hostname string = hostnameOverride - if hostname == "" { - nodename, err := os.Hostname() - if err != nil { - glog.Fatalf("Couldn't determine hostname: %v", err) - } - hostname = nodename - } - return strings.ToLower(strings.TrimSpace(hostname)) -} - -// GetPreferredNodeAddress returns the address of the provided node, using the provided preference order. -// If none of the preferred address types are found, an error is returned. -func GetPreferredNodeAddress(node *api.Node, preferredAddressTypes []api.NodeAddressType) (string, error) { - for _, addressType := range preferredAddressTypes { - for _, address := range node.Status.Addresses { - if address.Type == addressType { - return address.Address, nil - } - } - // If hostname was requested and no Hostname address was registered... - if addressType == api.NodeHostName { - // ...fall back to the kubernetes.io/hostname label for compatibility with kubelets before 1.5 - if hostname, ok := node.Labels[unversioned.LabelHostname]; ok && len(hostname) > 0 { - return hostname, nil - } - } - } - return "", fmt.Errorf("no preferred addresses found; known addresses: %v", node.Status.Addresses) -} - -// GetNodeHostIP returns the provided node's IP, based on the priority: -// 1. NodeInternalIP -// 2. NodeExternalIP -// 3. NodeLegacyHostIP -func GetNodeHostIP(node *api.Node) (net.IP, error) { - addresses := node.Status.Addresses - addressMap := make(map[api.NodeAddressType][]api.NodeAddress) - for i := range addresses { - addressMap[addresses[i].Type] = append(addressMap[addresses[i].Type], addresses[i]) - } - if addresses, ok := addressMap[api.NodeInternalIP]; ok { - return net.ParseIP(addresses[0].Address), nil - } - if addresses, ok := addressMap[api.NodeExternalIP]; ok { - return net.ParseIP(addresses[0].Address), nil - } - if addresses, ok := addressMap[api.NodeLegacyHostIP]; ok { - return net.ParseIP(addresses[0].Address), nil - } - return nil, fmt.Errorf("host IP unknown; known addresses: %v", addresses) -} - -// Helper function that builds a string identifier that is unique per failure-zone -// Returns empty-string for no zone -func GetZoneKey(node *api.Node) string { - labels := node.Labels - if labels == nil { - return "" - } - - region, _ := labels[unversioned.LabelZoneRegion] - failureDomain, _ := labels[unversioned.LabelZoneFailureDomain] - - if region == "" && failureDomain == "" { - return "" - } - - // We include the null character just in case region or failureDomain has a colon - // (We do assume there's no null characters in a region or failureDomain) - // As a nice side-benefit, the null character is not printed by fmt.Print or glog - return region + ":\x00:" + failureDomain -} - -// SetNodeCondition updates specific node condition with patch operation. -func SetNodeCondition(c clientset.Interface, node types.NodeName, condition api.NodeCondition) error { - generatePatch := func(condition api.NodeCondition) ([]byte, error) { - raw, err := json.Marshal(&[]api.NodeCondition{condition}) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf(`{"status":{"conditions":%s}}`, raw)), nil - } - condition.LastHeartbeatTime = unversioned.NewTime(time.Now()) - patch, err := generatePatch(condition) - if err != nil { - return nil - } - _, err = c.Core().Nodes().PatchStatus(string(node), patch) - return err -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/parsers/BUILD b/vendor/k8s.io/kubernetes/pkg/util/parsers/BUILD index f9341cfc54..de04bf52cd 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/parsers/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/parsers/BUILD @@ -4,10 +4,8 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", "go_test", - "cgo_library", ) go_library( @@ -20,7 +18,19 @@ go_library( go_test( name = "go_default_test", srcs = ["parsers_test.go"], - library = "go_default_library", + library = ":go_default_library", + tags = ["automanaged"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], tags = ["automanaged"], - deps = [], ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/pod/BUILD b/vendor/k8s.io/kubernetes/pkg/util/pod/BUILD deleted file mode 100644 index 147e0ff47e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/pod/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "pod.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//pkg/util/errors:go_default_library", - "//pkg/util/hash:go_default_library", - "//pkg/util/wait:go_default_library", - "//vendor:github.com/golang/glog", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/pod/doc.go b/vendor/k8s.io/kubernetes/pkg/util/pod/doc.go deleted file mode 100644 index d7a8ff226e..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/pod/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package pod provides utilities to work with Kubernetes pod and pod templates. -package pod diff --git a/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go b/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go deleted file mode 100644 index a6864e3602..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package pod - -import ( - "fmt" - "hash/adler32" - "time" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - errorsutil "k8s.io/kubernetes/pkg/util/errors" - hashutil "k8s.io/kubernetes/pkg/util/hash" - "k8s.io/kubernetes/pkg/util/wait" -) - -func GetPodTemplateSpecHash(template api.PodTemplateSpec) uint32 { - podTemplateSpecHasher := adler32.New() - hashutil.DeepHashObject(podTemplateSpecHasher, template) - return podTemplateSpecHasher.Sum32() -} - -// TODO: use client library instead when it starts to support update retries -// see https://github.com/kubernetes/kubernetes/issues/21479 -type updatePodFunc func(pod *api.Pod) error - -// UpdatePodWithRetries updates a pod with given applyUpdate function. Note that pod not found error is ignored. -// The returned bool value can be used to tell if the pod is actually updated. -func UpdatePodWithRetries(podClient unversionedcore.PodInterface, pod *api.Pod, applyUpdate updatePodFunc) (*api.Pod, bool, error) { - var err error - var podUpdated bool - oldPod := pod - if err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { - pod, err = podClient.Get(oldPod.Name) - if err != nil { - return false, err - } - // Apply the update, then attempt to push it to the apiserver. - if err = applyUpdate(pod); err != nil { - return false, err - } - if pod, err = podClient.Update(pod); err == nil { - // Update successful. - return true, nil - } - // TODO: don't retry on perm-failed errors and handle them gracefully - // Update could have failed due to conflict error. Try again. - return false, nil - }); err == nil { - // When there's no error, we've updated this pod. - podUpdated = true - } - - // Handle returned error from wait poll - if err == wait.ErrWaitTimeout { - err = fmt.Errorf("timed out trying to update pod: %#v", oldPod) - } - // Ignore the pod not found error, but the pod isn't updated. - if errors.IsNotFound(err) { - glog.V(4).Infof("%s %s/%s is not found, skip updating it.", oldPod.Kind, oldPod.Namespace, oldPod.Name) - err = nil - } - // Ignore the precondition violated error, but the pod isn't updated. - if err == errorsutil.ErrPreconditionViolated { - glog.V(4).Infof("%s %s/%s precondition doesn't hold, skip updating it.", oldPod.Kind, oldPod.Namespace, oldPod.Name) - err = nil - } - - // If the error is non-nil the returned pod cannot be trusted; if podUpdated is false, the pod isn't updated; - // if the error is nil and podUpdated is true, the returned pod contains the applied update. - return pod, podUpdated, err -} - -// Filter uses the input function f to filter the given pod list, and return the filtered pods -func Filter(podList *api.PodList, f func(api.Pod) bool) []api.Pod { - pods := make([]api.Pod, 0) - for _, p := range podList.Items { - if f(p) { - pods = append(pods, p) - } - } - return pods -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/rand/BUILD b/vendor/k8s.io/kubernetes/pkg/util/rand/BUILD deleted file mode 100644 index c9489e7bb2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/rand/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["rand.go"], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = ["rand_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/runtime/BUILD b/vendor/k8s.io/kubernetes/pkg/util/runtime/BUILD deleted file mode 100644 index 86807a5af9..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/runtime/BUILD +++ /dev/null @@ -1,26 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["runtime.go"], - tags = ["automanaged"], - deps = ["//vendor:github.com/golang/glog"], -) - -go_test( - name = "go_default_test", - srcs = ["runtime_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/sets/BUILD b/vendor/k8s.io/kubernetes/pkg/util/sets/BUILD deleted file mode 100644 index f751da67e1..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/sets/BUILD +++ /dev/null @@ -1,52 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - ":set-gen", - ], -) - -go_test( - name = "go_default_test", - srcs = ["set_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) - -genrule( - name = "set-gen", - srcs = [ - "//pkg/util/sets/types:types.go", - "//hack/boilerplate:boilerplate.go.txt", - ], - outs = [ - "byte.go", - "doc.go", - "empty.go", - "int.go", - "int64.go", - "string.go", - ], - cmd = """ -$(location //cmd/libs/go2idl/set-gen) \ - --input-dirs ./pkg/util/sets/types \ - --output-base $(GENDIR)/pkg/util \ - --go-header-file $(location //hack/boilerplate:boilerplate.go.txt) \ - --output-package sets - """, - tools = [ - "//cmd/libs/go2idl/set-gen", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/sets/doc.go b/vendor/k8s.io/kubernetes/pkg/util/sets/doc.go deleted file mode 100644 index c5e541621f..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/sets/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by set-gen. Do not edit it manually! - -// Package sets has auto-generated set types. -package sets diff --git a/vendor/k8s.io/kubernetes/pkg/util/slice/BUILD b/vendor/k8s.io/kubernetes/pkg/util/slice/BUILD deleted file mode 100644 index b2d65e2172..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/slice/BUILD +++ /dev/null @@ -1,26 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["slice.go"], - tags = ["automanaged"], - deps = ["//pkg/util/rand:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = ["slice_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/slice/slice.go b/vendor/k8s.io/kubernetes/pkg/util/slice/slice.go deleted file mode 100644 index 2e1d7ccb53..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/slice/slice.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package slice provides utility methods for common operations on slices. -package slice - -import ( - "sort" - - utilrand "k8s.io/kubernetes/pkg/util/rand" -) - -// CopyStrings copies the contents of the specified string slice -// into a new slice. -func CopyStrings(s []string) []string { - c := make([]string, len(s)) - copy(c, s) - return c -} - -// SortStrings sorts the specified string slice in place. It returns the same -// slice that was provided in order to facilitate method chaining. -func SortStrings(s []string) []string { - sort.Strings(s) - return s -} - -// ShuffleStrings copies strings from the specified slice into a copy in random -// order. It returns a new slice. -func ShuffleStrings(s []string) []string { - shuffled := make([]string, len(s)) - perm := utilrand.Perm(len(s)) - for i, j := range perm { - shuffled[j] = s[i] - } - return shuffled -} - -// Int64Slice attaches the methods of Interface to []int64, -// sorting in increasing order. -type Int64Slice []int64 - -func (p Int64Slice) Len() int { return len(p) } -func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// Sorts []int64 in increasing order -func SortInts64(a []int64) { sort.Sort(Int64Slice(a)) } diff --git a/vendor/k8s.io/kubernetes/pkg/util/strategicpatch/BUILD b/vendor/k8s.io/kubernetes/pkg/util/strategicpatch/BUILD deleted file mode 100644 index 433b7a295d..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/strategicpatch/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["patch.go"], - tags = ["automanaged"], - deps = [ - "//pkg/util/json:go_default_library", - "//third_party/forked/golang/json:go_default_library", - "//vendor:github.com/davecgh/go-spew/spew", - "//vendor:github.com/ghodss/yaml", - ], -) - -go_test( - name = "go_default_test", - srcs = ["patch_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//vendor:github.com/davecgh/go-spew/spew", - "//vendor:github.com/ghodss/yaml", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/kubernetes/pkg/util/strategicpatch/patch.go deleted file mode 100644 index 66a33f7ad0..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/strategicpatch/patch.go +++ /dev/null @@ -1,1265 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strategicpatch - -import ( - "fmt" - "reflect" - "sort" - - "k8s.io/kubernetes/pkg/util/json" - forkedjson "k8s.io/kubernetes/third_party/forked/golang/json" - - "github.com/davecgh/go-spew/spew" - "github.com/ghodss/yaml" -) - -// An alternate implementation of JSON Merge Patch -// (https://tools.ietf.org/html/rfc7386) which supports the ability to annotate -// certain fields with metadata that indicates whether the elements of JSON -// lists should be merged or replaced. -// -// For more information, see the PATCH section of docs/devel/api-conventions.md. -// -// Some of the content of this package was borrowed with minor adaptations from -// evanphx/json-patch and openshift/origin. - -const ( - directiveMarker = "$patch" - deleteDirective = "delete" - replaceDirective = "replace" - mergeDirective = "merge" -) - -// IsPreconditionFailed returns true if the provided error indicates -// a precondition failed. -func IsPreconditionFailed(err error) bool { - _, ok := err.(errPreconditionFailed) - return ok -} - -type errPreconditionFailed struct { - message string -} - -func newErrPreconditionFailed(target map[string]interface{}) errPreconditionFailed { - s := fmt.Sprintf("precondition failed for: %v", target) - return errPreconditionFailed{s} -} - -func (err errPreconditionFailed) Error() string { - return err.message -} - -type errConflict struct { - message string -} - -func newErrConflict(patch, current string) errConflict { - s := fmt.Sprintf("patch:\n%s\nconflicts with changes made from original to current:\n%s\n", patch, current) - return errConflict{s} -} - -func (err errConflict) Error() string { - return err.message -} - -// IsConflict returns true if the provided error indicates -// a conflict between the patch and the current configuration. -func IsConflict(err error) bool { - _, ok := err.(errConflict) - return ok -} - -var errBadJSONDoc = fmt.Errorf("Invalid JSON document") -var errNoListOfLists = fmt.Errorf("Lists of lists are not supported") - -// The following code is adapted from github.com/openshift/origin/pkg/util/jsonmerge. -// Instead of defining a Delta that holds an original, a patch and a set of preconditions, -// the reconcile method accepts a set of preconditions as an argument. - -// PreconditionFunc asserts that an incompatible change is not present within a patch. -type PreconditionFunc func(interface{}) bool - -// RequireKeyUnchanged returns a precondition function that fails if the provided key -// is present in the patch (indicating that its value has changed). -func RequireKeyUnchanged(key string) PreconditionFunc { - return func(patch interface{}) bool { - patchMap, ok := patch.(map[string]interface{}) - if !ok { - return true - } - - // The presence of key means that its value has been changed, so the test fails. - _, ok = patchMap[key] - return !ok - } -} - -// RequireMetadataKeyUnchanged creates a precondition function that fails -// if the metadata.key is present in the patch (indicating its value -// has changed). -func RequireMetadataKeyUnchanged(key string) PreconditionFunc { - return func(patch interface{}) bool { - patchMap, ok := patch.(map[string]interface{}) - if !ok { - return true - } - patchMap1, ok := patchMap["metadata"] - if !ok { - return true - } - patchMap2, ok := patchMap1.(map[string]interface{}) - if !ok { - return true - } - _, ok = patchMap2[key] - return !ok - } -} - -// Deprecated: Use the synonym CreateTwoWayMergePatch, instead. -func CreateStrategicMergePatch(original, modified []byte, dataStruct interface{}) ([]byte, error) { - return CreateTwoWayMergePatch(original, modified, dataStruct) -} - -// CreateTwoWayMergePatch creates a patch that can be passed to StrategicMergePatch from an original -// document and a modified document, which are passed to the method as json encoded content. It will -// return a patch that yields the modified document when applied to the original document, or an error -// if either of the two documents is invalid. -func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, fns ...PreconditionFunc) ([]byte, error) { - originalMap := map[string]interface{}{} - if len(original) > 0 { - if err := json.Unmarshal(original, &originalMap); err != nil { - return nil, errBadJSONDoc - } - } - - modifiedMap := map[string]interface{}{} - if len(modified) > 0 { - if err := json.Unmarshal(modified, &modifiedMap); err != nil { - return nil, errBadJSONDoc - } - } - - t, err := getTagStructType(dataStruct) - if err != nil { - return nil, err - } - - patchMap, err := diffMaps(originalMap, modifiedMap, t, false, false) - if err != nil { - return nil, err - } - - // Apply the preconditions to the patch, and return an error if any of them fail. - for _, fn := range fns { - if !fn(patchMap) { - return nil, newErrPreconditionFailed(patchMap) - } - } - - return json.Marshal(patchMap) -} - -// Returns a (recursive) strategic merge patch that yields modified when applied to original. -func diffMaps(original, modified map[string]interface{}, t reflect.Type, ignoreChangesAndAdditions, ignoreDeletions bool) (map[string]interface{}, error) { - patch := map[string]interface{}{} - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - - for key, modifiedValue := range modified { - originalValue, ok := original[key] - if !ok { - // Key was added, so add to patch - if !ignoreChangesAndAdditions { - patch[key] = modifiedValue - } - - continue - } - - if key == directiveMarker { - originalString, ok := originalValue.(string) - if !ok { - return nil, fmt.Errorf("invalid value for special key: %s", directiveMarker) - } - - modifiedString, ok := modifiedValue.(string) - if !ok { - return nil, fmt.Errorf("invalid value for special key: %s", directiveMarker) - } - - if modifiedString != originalString { - patch[directiveMarker] = modifiedValue - } - - continue - } - - if reflect.TypeOf(originalValue) != reflect.TypeOf(modifiedValue) { - // Types have changed, so add to patch - if !ignoreChangesAndAdditions { - patch[key] = modifiedValue - } - - continue - } - - // Types are the same, so compare values - switch originalValueTyped := originalValue.(type) { - case map[string]interface{}: - modifiedValueTyped := modifiedValue.(map[string]interface{}) - fieldType, _, _, err := forkedjson.LookupPatchMetadata(t, key) - if err != nil { - return nil, err - } - - patchValue, err := diffMaps(originalValueTyped, modifiedValueTyped, fieldType, ignoreChangesAndAdditions, ignoreDeletions) - if err != nil { - return nil, err - } - - if len(patchValue) > 0 { - patch[key] = patchValue - } - - continue - case []interface{}: - modifiedValueTyped := modifiedValue.([]interface{}) - fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, key) - if err != nil { - return nil, err - } - - if fieldPatchStrategy == mergeDirective { - patchValue, err := diffLists(originalValueTyped, modifiedValueTyped, fieldType.Elem(), fieldPatchMergeKey, ignoreChangesAndAdditions, ignoreDeletions) - if err != nil { - return nil, err - } - - if len(patchValue) > 0 { - patch[key] = patchValue - } - - continue - } - } - - if !ignoreChangesAndAdditions { - if !reflect.DeepEqual(originalValue, modifiedValue) { - // Values are different, so add to patch - patch[key] = modifiedValue - } - } - } - - if !ignoreDeletions { - // Add nils for deleted values - for key := range original { - _, found := modified[key] - if !found { - patch[key] = nil - } - } - } - - return patch, nil -} - -// Returns a (recursive) strategic merge patch that yields modified when applied to original, -// for a pair of lists with merge semantics. -func diffLists(original, modified []interface{}, t reflect.Type, mergeKey string, ignoreChangesAndAdditions, ignoreDeletions bool) ([]interface{}, error) { - if len(original) == 0 { - if len(modified) == 0 || ignoreChangesAndAdditions { - return nil, nil - } - - return modified, nil - } - - elementType, err := sliceElementType(original, modified) - if err != nil { - return nil, err - } - - var patch []interface{} - - if elementType.Kind() == reflect.Map { - patch, err = diffListsOfMaps(original, modified, t, mergeKey, ignoreChangesAndAdditions, ignoreDeletions) - } else if !ignoreChangesAndAdditions { - patch, err = diffListsOfScalars(original, modified) - } - - if err != nil { - return nil, err - } - - return patch, nil -} - -// Returns a (recursive) strategic merge patch that yields modified when applied to original, -// for a pair of lists of scalars with merge semantics. -func diffListsOfScalars(original, modified []interface{}) ([]interface{}, error) { - if len(modified) == 0 { - // There is no need to check the length of original because there is no way to create - // a patch that deletes a scalar from a list of scalars with merge semantics. - return nil, nil - } - - patch := []interface{}{} - - originalScalars := uniqifyAndSortScalars(original) - modifiedScalars := uniqifyAndSortScalars(modified) - originalIndex, modifiedIndex := 0, 0 - -loopB: - for ; modifiedIndex < len(modifiedScalars); modifiedIndex++ { - for ; originalIndex < len(originalScalars); originalIndex++ { - originalString := fmt.Sprintf("%v", original[originalIndex]) - modifiedString := fmt.Sprintf("%v", modified[modifiedIndex]) - if originalString >= modifiedString { - if originalString != modifiedString { - patch = append(patch, modified[modifiedIndex]) - } - - continue loopB - } - // There is no else clause because there is no way to create a patch that deletes - // a scalar from a list of scalars with merge semantics. - } - - break - } - - // Add any remaining items found only in modified - for ; modifiedIndex < len(modifiedScalars); modifiedIndex++ { - patch = append(patch, modified[modifiedIndex]) - } - - return patch, nil -} - -var errNoMergeKeyFmt = "map: %v does not contain declared merge key: %s" -var errBadArgTypeFmt = "expected a %s, but received a %s" - -// Returns a (recursive) strategic merge patch that yields modified when applied to original, -// for a pair of lists of maps with merge semantics. -func diffListsOfMaps(original, modified []interface{}, t reflect.Type, mergeKey string, ignoreChangesAndAdditions, ignoreDeletions bool) ([]interface{}, error) { - patch := make([]interface{}, 0) - - originalSorted, err := sortMergeListsByNameArray(original, t, mergeKey, false) - if err != nil { - return nil, err - } - - modifiedSorted, err := sortMergeListsByNameArray(modified, t, mergeKey, false) - if err != nil { - return nil, err - } - - originalIndex, modifiedIndex := 0, 0 - -loopB: - for ; modifiedIndex < len(modifiedSorted); modifiedIndex++ { - modifiedMap, ok := modifiedSorted[modifiedIndex].(map[string]interface{}) - if !ok { - t := reflect.TypeOf(modifiedSorted[modifiedIndex]) - return nil, fmt.Errorf(errBadArgTypeFmt, "map[string]interface{}", t.Kind().String()) - } - - modifiedValue, ok := modifiedMap[mergeKey] - if !ok { - return nil, fmt.Errorf(errNoMergeKeyFmt, modifiedMap, mergeKey) - } - - for ; originalIndex < len(originalSorted); originalIndex++ { - originalMap, ok := originalSorted[originalIndex].(map[string]interface{}) - if !ok { - t := reflect.TypeOf(originalSorted[originalIndex]) - return nil, fmt.Errorf(errBadArgTypeFmt, "map[string]interface{}", t.Kind().String()) - } - - originalValue, ok := originalMap[mergeKey] - if !ok { - return nil, fmt.Errorf(errNoMergeKeyFmt, originalMap, mergeKey) - } - - // Assume that the merge key values are comparable strings - originalString := fmt.Sprintf("%v", originalValue) - modifiedString := fmt.Sprintf("%v", modifiedValue) - if originalString >= modifiedString { - if originalString == modifiedString { - // Merge key values are equal, so recurse - patchValue, err := diffMaps(originalMap, modifiedMap, t, ignoreChangesAndAdditions, ignoreDeletions) - if err != nil { - return nil, err - } - - originalIndex++ - if len(patchValue) > 0 { - patchValue[mergeKey] = modifiedValue - patch = append(patch, patchValue) - } - } else if !ignoreChangesAndAdditions { - // Item was added, so add to patch - patch = append(patch, modifiedMap) - } - - continue loopB - } - - if !ignoreDeletions { - // Item was deleted, so add delete directive - patch = append(patch, map[string]interface{}{mergeKey: originalValue, directiveMarker: deleteDirective}) - } - } - - break - } - - if !ignoreDeletions { - // Delete any remaining items found only in original - for ; originalIndex < len(originalSorted); originalIndex++ { - originalMap, ok := originalSorted[originalIndex].(map[string]interface{}) - if !ok { - t := reflect.TypeOf(originalSorted[originalIndex]) - return nil, fmt.Errorf(errBadArgTypeFmt, "map[string]interface{}", t.Kind().String()) - } - - originalValue, ok := originalMap[mergeKey] - if !ok { - return nil, fmt.Errorf(errNoMergeKeyFmt, originalMap, mergeKey) - } - - patch = append(patch, map[string]interface{}{mergeKey: originalValue, directiveMarker: deleteDirective}) - } - } - - if !ignoreChangesAndAdditions { - // Add any remaining items found only in modified - for ; modifiedIndex < len(modifiedSorted); modifiedIndex++ { - patch = append(patch, modifiedSorted[modifiedIndex]) - } - } - - return patch, nil -} - -// Deprecated: StrategicMergePatchData is deprecated. Use the synonym StrategicMergePatch, -// instead, which follows the naming convention of evanphx/json-patch. -func StrategicMergePatchData(original, patch []byte, dataStruct interface{}) ([]byte, error) { - return StrategicMergePatch(original, patch, dataStruct) -} - -// StrategicMergePatch applies a strategic merge patch. The patch and the original document -// must be json encoded content. A patch can be created from an original and a modified document -// by calling CreateStrategicMergePatch. -func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) { - if original == nil { - original = []byte("{}") - } - - if patch == nil { - patch = []byte("{}") - } - - originalMap := map[string]interface{}{} - err := json.Unmarshal(original, &originalMap) - if err != nil { - return nil, errBadJSONDoc - } - - patchMap := map[string]interface{}{} - err = json.Unmarshal(patch, &patchMap) - if err != nil { - return nil, errBadJSONDoc - } - - t, err := getTagStructType(dataStruct) - if err != nil { - return nil, err - } - - result, err := mergeMap(originalMap, patchMap, t) - if err != nil { - return nil, err - } - - return json.Marshal(result) -} - -func getTagStructType(dataStruct interface{}) (reflect.Type, error) { - if dataStruct == nil { - return nil, fmt.Errorf(errBadArgTypeFmt, "struct", "nil") - } - - t := reflect.TypeOf(dataStruct) - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - - if t.Kind() != reflect.Struct { - return nil, fmt.Errorf(errBadArgTypeFmt, "struct", t.Kind().String()) - } - - return t, nil -} - -var errBadPatchTypeFmt = "unknown patch type: %s in map: %v" - -// Merge fields from a patch map into the original map. Note: This may modify -// both the original map and the patch because getting a deep copy of a map in -// golang is highly non-trivial. -func mergeMap(original, patch map[string]interface{}, t reflect.Type) (map[string]interface{}, error) { - if v, ok := patch[directiveMarker]; ok { - if v == replaceDirective { - // If the patch contains "$patch: replace", don't merge it, just use the - // patch directly. Later on, we can add a single level replace that only - // affects the map that the $patch is in. - delete(patch, directiveMarker) - return patch, nil - } - - if v == deleteDirective { - // If the patch contains "$patch: delete", don't merge it, just return - // an empty map. - return map[string]interface{}{}, nil - } - - return nil, fmt.Errorf(errBadPatchTypeFmt, v, patch) - } - - // nil is an accepted value for original to simplify logic in other places. - // If original is nil, replace it with an empty map and then apply the patch. - if original == nil { - original = map[string]interface{}{} - } - - // Start merging the patch into the original. - for k, patchV := range patch { - // If the value of this key is null, delete the key if it exists in the - // original. Otherwise, skip it. - if patchV == nil { - if _, ok := original[k]; ok { - delete(original, k) - } - - continue - } - - _, ok := original[k] - if !ok { - // If it's not in the original document, just take the patch value. - original[k] = patchV - continue - } - - // If the data type is a pointer, resolve the element. - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - - // If they're both maps or lists, recurse into the value. - originalType := reflect.TypeOf(original[k]) - patchType := reflect.TypeOf(patchV) - if originalType == patchType { - // First find the fieldPatchStrategy and fieldPatchMergeKey. - fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k) - if err != nil { - return nil, err - } - - if originalType.Kind() == reflect.Map && fieldPatchStrategy != replaceDirective { - typedOriginal := original[k].(map[string]interface{}) - typedPatch := patchV.(map[string]interface{}) - var err error - original[k], err = mergeMap(typedOriginal, typedPatch, fieldType) - if err != nil { - return nil, err - } - - continue - } - - if originalType.Kind() == reflect.Slice && fieldPatchStrategy == mergeDirective { - elemType := fieldType.Elem() - typedOriginal := original[k].([]interface{}) - typedPatch := patchV.([]interface{}) - var err error - original[k], err = mergeSlice(typedOriginal, typedPatch, elemType, fieldPatchMergeKey) - if err != nil { - return nil, err - } - - continue - } - } - - // If originalType and patchType are different OR the types are both - // maps or slices but we're just supposed to replace them, just take - // the value from patch. - original[k] = patchV - } - - return original, nil -} - -// Merge two slices together. Note: This may modify both the original slice and -// the patch because getting a deep copy of a slice in golang is highly -// non-trivial. -func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey string) ([]interface{}, error) { - if len(original) == 0 && len(patch) == 0 { - return original, nil - } - - // All the values must be of the same type, but not a list. - t, err := sliceElementType(original, patch) - if err != nil { - return nil, err - } - - // If the elements are not maps, merge the slices of scalars. - if t.Kind() != reflect.Map { - // Maybe in the future add a "concat" mode that doesn't - // uniqify. - both := append(original, patch...) - return uniqifyScalars(both), nil - } - - if mergeKey == "" { - return nil, fmt.Errorf("cannot merge lists without merge key for type %s", elemType.Kind().String()) - } - - // First look for any special $patch elements. - patchWithoutSpecialElements := []interface{}{} - replace := false - for _, v := range patch { - typedV := v.(map[string]interface{}) - patchType, ok := typedV[directiveMarker] - if ok { - if patchType == deleteDirective { - mergeValue, ok := typedV[mergeKey] - if ok { - _, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) - if err != nil { - return nil, err - } - - if found { - // Delete the element at originalKey. - original = append(original[:originalKey], original[originalKey+1:]...) - } - } else { - return nil, fmt.Errorf("delete patch type with no merge key defined") - } - } else if patchType == replaceDirective { - replace = true - // Continue iterating through the array to prune any other $patch elements. - } else if patchType == mergeDirective { - return nil, fmt.Errorf("merging lists cannot yet be specified in the patch") - } else { - return nil, fmt.Errorf(errBadPatchTypeFmt, patchType, typedV) - } - } else { - patchWithoutSpecialElements = append(patchWithoutSpecialElements, v) - } - } - - if replace { - return patchWithoutSpecialElements, nil - } - - patch = patchWithoutSpecialElements - - // Merge patch into original. - for _, v := range patch { - // Because earlier we confirmed that all the elements are maps. - typedV := v.(map[string]interface{}) - mergeValue, ok := typedV[mergeKey] - if !ok { - return nil, fmt.Errorf(errNoMergeKeyFmt, typedV, mergeKey) - } - - // If we find a value with this merge key value in original, merge the - // maps. Otherwise append onto original. - originalMap, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) - if err != nil { - return nil, err - } - - if found { - var mergedMaps interface{} - var err error - // Merge into original. - mergedMaps, err = mergeMap(originalMap, typedV, elemType) - if err != nil { - return nil, err - } - - original[originalKey] = mergedMaps - } else { - original = append(original, v) - } - } - - return original, nil -} - -// This method no longer panics if any element of the slice is not a map. -func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{}) (map[string]interface{}, int, bool, error) { - for k, v := range m { - typedV, ok := v.(map[string]interface{}) - if !ok { - return nil, 0, false, fmt.Errorf("value for key %v is not a map.", k) - } - - valueToMatch, ok := typedV[key] - if ok && valueToMatch == value { - return typedV, k, true, nil - } - } - - return nil, 0, false, nil -} - -// This function takes a JSON map and sorts all the lists that should be merged -// by key. This is needed by tests because in JSON, list order is significant, -// but in Strategic Merge Patch, merge lists do not have significant order. -// Sorting the lists allows for order-insensitive comparison of patched maps. -func sortMergeListsByName(mapJSON []byte, dataStruct interface{}) ([]byte, error) { - var m map[string]interface{} - err := json.Unmarshal(mapJSON, &m) - if err != nil { - return nil, err - } - - newM, err := sortMergeListsByNameMap(m, reflect.TypeOf(dataStruct)) - if err != nil { - return nil, err - } - - return json.Marshal(newM) -} - -func sortMergeListsByNameMap(s map[string]interface{}, t reflect.Type) (map[string]interface{}, error) { - newS := map[string]interface{}{} - for k, v := range s { - if k != directiveMarker { - fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k) - if err != nil { - return nil, err - } - - // If v is a map or a merge slice, recurse. - if typedV, ok := v.(map[string]interface{}); ok { - var err error - v, err = sortMergeListsByNameMap(typedV, fieldType) - if err != nil { - return nil, err - } - } else if typedV, ok := v.([]interface{}); ok { - if fieldPatchStrategy == mergeDirective { - var err error - v, err = sortMergeListsByNameArray(typedV, fieldType.Elem(), fieldPatchMergeKey, true) - if err != nil { - return nil, err - } - } - } - } - - newS[k] = v - } - - return newS, nil -} - -func sortMergeListsByNameArray(s []interface{}, elemType reflect.Type, mergeKey string, recurse bool) ([]interface{}, error) { - if len(s) == 0 { - return s, nil - } - - // We don't support lists of lists yet. - t, err := sliceElementType(s) - if err != nil { - return nil, err - } - - // If the elements are not maps... - if t.Kind() != reflect.Map { - // Sort the elements, because they may have been merged out of order. - return uniqifyAndSortScalars(s), nil - } - - // Elements are maps - if one of the keys of the map is a map or a - // list, we may need to recurse into it. - newS := []interface{}{} - for _, elem := range s { - if recurse { - typedElem := elem.(map[string]interface{}) - newElem, err := sortMergeListsByNameMap(typedElem, elemType) - if err != nil { - return nil, err - } - - newS = append(newS, newElem) - } else { - newS = append(newS, elem) - } - } - - // Sort the maps. - newS = sortMapsBasedOnField(newS, mergeKey) - return newS, nil -} - -func sortMapsBasedOnField(m []interface{}, fieldName string) []interface{} { - mapM := mapSliceFromSlice(m) - ss := SortableSliceOfMaps{mapM, fieldName} - sort.Sort(ss) - newS := sliceFromMapSlice(ss.s) - return newS -} - -func mapSliceFromSlice(m []interface{}) []map[string]interface{} { - newM := []map[string]interface{}{} - for _, v := range m { - vt := v.(map[string]interface{}) - newM = append(newM, vt) - } - - return newM -} - -func sliceFromMapSlice(s []map[string]interface{}) []interface{} { - newS := []interface{}{} - for _, v := range s { - newS = append(newS, v) - } - - return newS -} - -type SortableSliceOfMaps struct { - s []map[string]interface{} - k string // key to sort on -} - -func (ss SortableSliceOfMaps) Len() int { - return len(ss.s) -} - -func (ss SortableSliceOfMaps) Less(i, j int) bool { - iStr := fmt.Sprintf("%v", ss.s[i][ss.k]) - jStr := fmt.Sprintf("%v", ss.s[j][ss.k]) - return sort.StringsAreSorted([]string{iStr, jStr}) -} - -func (ss SortableSliceOfMaps) Swap(i, j int) { - tmp := ss.s[i] - ss.s[i] = ss.s[j] - ss.s[j] = tmp -} - -func uniqifyAndSortScalars(s []interface{}) []interface{} { - s = uniqifyScalars(s) - - ss := SortableSliceOfScalars{s} - sort.Sort(ss) - return ss.s -} - -func uniqifyScalars(s []interface{}) []interface{} { - // Clever algorithm to uniqify. - length := len(s) - 1 - for i := 0; i < length; i++ { - for j := i + 1; j <= length; j++ { - if s[i] == s[j] { - s[j] = s[length] - s = s[0:length] - length-- - j-- - } - } - } - - return s -} - -type SortableSliceOfScalars struct { - s []interface{} -} - -func (ss SortableSliceOfScalars) Len() int { - return len(ss.s) -} - -func (ss SortableSliceOfScalars) Less(i, j int) bool { - iStr := fmt.Sprintf("%v", ss.s[i]) - jStr := fmt.Sprintf("%v", ss.s[j]) - return sort.StringsAreSorted([]string{iStr, jStr}) -} - -func (ss SortableSliceOfScalars) Swap(i, j int) { - tmp := ss.s[i] - ss.s[i] = ss.s[j] - ss.s[j] = tmp -} - -// Returns the type of the elements of N slice(s). If the type is different, -// another slice or undefined, returns an error. -func sliceElementType(slices ...[]interface{}) (reflect.Type, error) { - var prevType reflect.Type - for _, s := range slices { - // Go through elements of all given slices and make sure they are all the same type. - for _, v := range s { - currentType := reflect.TypeOf(v) - if prevType == nil { - prevType = currentType - // We don't support lists of lists yet. - if prevType.Kind() == reflect.Slice { - return nil, errNoListOfLists - } - } else { - if prevType != currentType { - return nil, fmt.Errorf("list element types are not identical: %v", fmt.Sprint(slices)) - } - prevType = currentType - } - } - } - - if prevType == nil { - return nil, fmt.Errorf("no elements in any of the given slices") - } - - return prevType, nil -} - -// HasConflicts returns true if the left and right JSON interface objects overlap with -// different values in any key. All keys are required to be strings. Since patches of the -// same Type have congruent keys, this is valid for multiple patch types. This method -// supports JSON merge patch semantics. -func HasConflicts(left, right interface{}) (bool, error) { - switch typedLeft := left.(type) { - case map[string]interface{}: - switch typedRight := right.(type) { - case map[string]interface{}: - for key, leftValue := range typedLeft { - rightValue, ok := typedRight[key] - if !ok { - return false, nil - } - return HasConflicts(leftValue, rightValue) - } - - return false, nil - default: - return true, nil - } - case []interface{}: - switch typedRight := right.(type) { - case []interface{}: - if len(typedLeft) != len(typedRight) { - return true, nil - } - - for i := range typedLeft { - return HasConflicts(typedLeft[i], typedRight[i]) - } - - return false, nil - default: - return true, nil - } - case string, float64, bool, int, int64, nil: - return !reflect.DeepEqual(left, right), nil - default: - return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) - } -} - -// MergingMapsHaveConflicts returns true if the left and right JSON interface -// objects overlap with different values in any key. All keys are required to be -// strings. Since patches of the same Type have congruent keys, this is valid -// for multiple patch types. This method supports strategic merge patch semantics. -func MergingMapsHaveConflicts(left, right map[string]interface{}, dataStruct interface{}) (bool, error) { - t, err := getTagStructType(dataStruct) - if err != nil { - return true, err - } - - return mergingMapFieldsHaveConflicts(left, right, t, "", "") -} - -func mergingMapFieldsHaveConflicts( - left, right interface{}, - fieldType reflect.Type, - fieldPatchStrategy, fieldPatchMergeKey string, -) (bool, error) { - switch leftType := left.(type) { - case map[string]interface{}: - switch rightType := right.(type) { - case map[string]interface{}: - leftMarker, okLeft := leftType[directiveMarker] - rightMarker, okRight := rightType[directiveMarker] - // if one or the other has a directive marker, - // then we need to consider that before looking at the individual keys, - // since a directive operates on the whole map. - if okLeft || okRight { - // if one has a directive marker and the other doesn't, - // then we have a conflict, since one is deleting or replacing the whole map, - // and the other is doing things to individual keys. - if okLeft != okRight { - return true, nil - } - - // if they both have markers, but they are not the same directive, - // then we have a conflict because they're doing different things to the map. - if leftMarker != rightMarker { - return true, nil - } - } - - // Check the individual keys. - return mapsHaveConflicts(leftType, rightType, fieldType) - default: - return true, nil - } - case []interface{}: - switch rightType := right.(type) { - case []interface{}: - return slicesHaveConflicts(leftType, rightType, fieldType, fieldPatchStrategy, fieldPatchMergeKey) - default: - return true, nil - } - case string, float64, bool, int, int64, nil: - return !reflect.DeepEqual(left, right), nil - default: - return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) - } -} - -func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType reflect.Type) (bool, error) { - for key, leftValue := range typedLeft { - if key != directiveMarker { - if rightValue, ok := typedRight[key]; ok { - fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(structType, key) - if err != nil { - return true, err - } - - if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, - fieldType, fieldPatchStrategy, fieldPatchMergeKey); hasConflicts { - return true, err - } - } - } - } - - return false, nil -} - -func slicesHaveConflicts( - typedLeft, typedRight []interface{}, - fieldType reflect.Type, - fieldPatchStrategy, fieldPatchMergeKey string, -) (bool, error) { - elementType, err := sliceElementType(typedLeft, typedRight) - if err != nil { - return true, err - } - - valueType := fieldType.Elem() - if fieldPatchStrategy == mergeDirective { - // Merging lists of scalars have no conflicts by definition - // So we only need to check further if the elements are maps - if elementType.Kind() != reflect.Map { - return false, nil - } - - // Build a map for each slice and then compare the two maps - leftMap, err := sliceOfMapsToMapOfMaps(typedLeft, fieldPatchMergeKey) - if err != nil { - return true, err - } - - rightMap, err := sliceOfMapsToMapOfMaps(typedRight, fieldPatchMergeKey) - if err != nil { - return true, err - } - - return mapsOfMapsHaveConflicts(leftMap, rightMap, valueType) - } - - // Either we don't have type information, or these are non-merging lists - if len(typedLeft) != len(typedRight) { - return true, nil - } - - // Sort scalar slices to prevent ordering issues - // We have no way to sort non-merging lists of maps - if elementType.Kind() != reflect.Map { - typedLeft = uniqifyAndSortScalars(typedLeft) - typedRight = uniqifyAndSortScalars(typedRight) - } - - // Compare the slices element by element in order - // This test will fail if the slices are not sorted - for i := range typedLeft { - if hasConflicts, err := mergingMapFieldsHaveConflicts(typedLeft[i], typedRight[i], valueType, "", ""); hasConflicts { - return true, err - } - } - - return false, nil -} - -func sliceOfMapsToMapOfMaps(slice []interface{}, mergeKey string) (map[string]interface{}, error) { - result := make(map[string]interface{}, len(slice)) - for _, value := range slice { - typedValue, ok := value.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("invalid element type in merging list:%v", slice) - } - - mergeValue, ok := typedValue[mergeKey] - if !ok { - return nil, fmt.Errorf("cannot find merge key `%s` in merging list element:%v", mergeKey, typedValue) - } - - result[fmt.Sprintf("%s", mergeValue)] = typedValue - } - - return result, nil -} - -func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType reflect.Type) (bool, error) { - for key, leftValue := range typedLeft { - if rightValue, ok := typedRight[key]; ok { - if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, structType, "", ""); hasConflicts { - return true, err - } - } - } - - return false, nil -} - -// CreateThreeWayMergePatch reconciles a modified configuration with an original configuration, -// while preserving any changes or deletions made to the original configuration in the interim, -// and not overridden by the current configuration. All three documents must be passed to the -// method as json encoded content. It will return a strategic merge patch, or an error if any -// of the documents is invalid, or if there are any preconditions that fail against the modified -// configuration, or, if overwrite is false and there are conflicts between the modified and current -// configurations. Conflicts are defined as keys changed differently from original to modified -// than from original to current. In other words, a conflict occurs if modified changes any key -// in a way that is different from how it is changed in current (e.g., deleting it, changing its -// value). -func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct interface{}, overwrite bool, fns ...PreconditionFunc) ([]byte, error) { - originalMap := map[string]interface{}{} - if len(original) > 0 { - if err := json.Unmarshal(original, &originalMap); err != nil { - return nil, errBadJSONDoc - } - } - - modifiedMap := map[string]interface{}{} - if len(modified) > 0 { - if err := json.Unmarshal(modified, &modifiedMap); err != nil { - return nil, errBadJSONDoc - } - } - - currentMap := map[string]interface{}{} - if len(current) > 0 { - if err := json.Unmarshal(current, ¤tMap); err != nil { - return nil, errBadJSONDoc - } - } - - t, err := getTagStructType(dataStruct) - if err != nil { - return nil, err - } - - // The patch is the difference from current to modified without deletions, plus deletions - // from original to modified. To find it, we compute deletions, which are the deletions from - // original to modified, and delta, which is the difference from current to modified without - // deletions, and then apply delta to deletions as a patch, which should be strictly additive. - deltaMap, err := diffMaps(currentMap, modifiedMap, t, false, true) - if err != nil { - return nil, err - } - - deletionsMap, err := diffMaps(originalMap, modifiedMap, t, true, false) - if err != nil { - return nil, err - } - - patchMap, err := mergeMap(deletionsMap, deltaMap, t) - if err != nil { - return nil, err - } - - // Apply the preconditions to the patch, and return an error if any of them fail. - for _, fn := range fns { - if !fn(patchMap) { - return nil, newErrPreconditionFailed(patchMap) - } - } - - // If overwrite is false, and the patch contains any keys that were changed differently, - // then return a conflict error. - if !overwrite { - changedMap, err := diffMaps(originalMap, currentMap, t, false, false) - if err != nil { - return nil, err - } - - hasConflicts, err := MergingMapsHaveConflicts(patchMap, changedMap, dataStruct) - if err != nil { - return nil, err - } - - if hasConflicts { - return nil, newErrConflict(toYAMLOrError(patchMap), toYAMLOrError(changedMap)) - } - } - - return json.Marshal(patchMap) -} - -func toYAMLOrError(v interface{}) string { - y, err := toYAML(v) - if err != nil { - return err.Error() - } - - return y -} - -func toYAML(v interface{}) (string, error) { - y, err := yaml.Marshal(v) - if err != nil { - return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v)) - } - - return string(y), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/sysctl/BUILD b/vendor/k8s.io/kubernetes/pkg/util/sysctl/BUILD index 3fe03e7a05..0819393af9 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/sysctl/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/sysctl/BUILD @@ -4,10 +4,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", - "go_test", - "cgo_library", ) go_library( @@ -15,3 +12,19 @@ go_library( srcs = ["sysctl.go"], tags = ["automanaged"], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/util/sysctl/testing:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/util/trace.go b/vendor/k8s.io/kubernetes/pkg/util/trace.go deleted file mode 100644 index fe93db8df6..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/trace.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "bytes" - "fmt" - "time" - - "github.com/golang/glog" -) - -type traceStep struct { - stepTime time.Time - msg string -} - -type Trace struct { - name string - startTime time.Time - steps []traceStep -} - -func NewTrace(name string) *Trace { - return &Trace{name, time.Now(), nil} -} - -func (t *Trace) Step(msg string) { - if t.steps == nil { - // traces almost always have less than 6 steps, do this to avoid more than a single allocation - t.steps = make([]traceStep, 0, 6) - } - t.steps = append(t.steps, traceStep{time.Now(), msg}) -} - -func (t *Trace) Log() { - endTime := time.Now() - var buffer bytes.Buffer - - buffer.WriteString(fmt.Sprintf("Trace %q (started %v):\n", t.name, t.startTime)) - lastStepTime := t.startTime - for _, step := range t.steps { - buffer.WriteString(fmt.Sprintf("[%v] [%v] %v\n", step.stepTime.Sub(t.startTime), step.stepTime.Sub(lastStepTime), step.msg)) - lastStepTime = step.stepTime - } - buffer.WriteString(fmt.Sprintf("[%v] [%v] END\n", endTime.Sub(t.startTime), endTime.Sub(lastStepTime))) - glog.Info(buffer.String()) -} - -func (t *Trace) LogIfLong(threshold time.Duration) { - if time.Since(t.startTime) >= threshold { - t.Log() - } -} - -func (t *Trace) TotalTime() time.Duration { - return time.Since(t.startTime) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/trie.go b/vendor/k8s.io/kubernetes/pkg/util/trie.go deleted file mode 100644 index 0eeac436dc..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/trie.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -// A simple trie implementation with Add an HasPrefix methods only. -type Trie struct { - children map[byte]*Trie - wordTail bool - word string -} - -// CreateTrie creates a Trie and add all strings in the provided list to it. -func CreateTrie(list []string) Trie { - ret := Trie{ - children: make(map[byte]*Trie), - wordTail: false, - } - for _, v := range list { - ret.Add(v) - } - return ret -} - -// Add adds a word to this trie -func (t *Trie) Add(v string) { - root := t - for _, b := range []byte(v) { - child, exists := root.children[b] - if !exists { - child = &Trie{ - children: make(map[byte]*Trie), - wordTail: false, - } - root.children[b] = child - } - root = child - } - root.wordTail = true - root.word = v -} - -// HasPrefix returns true of v has any of the prefixes stored in this trie. -func (t *Trie) HasPrefix(v string) bool { - _, has := t.GetPrefix(v) - return has -} - -// GetPrefix is like HasPrefix but return the prefix in case of match or empty string otherwise. -func (t *Trie) GetPrefix(v string) (string, bool) { - root := t - if root.wordTail { - return root.word, true - } - for _, b := range []byte(v) { - child, exists := root.children[b] - if !exists { - return "", false - } - if child.wordTail { - return child.word, true - } - root = child - } - return "", false -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/umask_windows.go b/vendor/k8s.io/kubernetes/pkg/util/umask_windows.go index 8c1b2cbc7d..7a1ba15386 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/umask_windows.go +++ b/vendor/k8s.io/kubernetes/pkg/util/umask_windows.go @@ -22,6 +22,6 @@ import ( "errors" ) -func Umask(mask int) (old int, err error) { +func Umask(mask int) (int, error) { return 0, errors.New("platform and architecture is not supported") } diff --git a/vendor/k8s.io/kubernetes/pkg/util/uuid/BUILD b/vendor/k8s.io/kubernetes/pkg/util/uuid/BUILD deleted file mode 100644 index f24fb9eb2b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/uuid/BUILD +++ /dev/null @@ -1,21 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["uuid.go"], - tags = ["automanaged"], - deps = [ - "//pkg/types:go_default_library", - "//vendor:github.com/pborman/uuid", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/util/validation/BUILD deleted file mode 100644 index bd164eb17b..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/validation/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["validation.go"], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = ["validation_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/validation/field/BUILD b/vendor/k8s.io/kubernetes/pkg/util/validation/field/BUILD deleted file mode 100644 index 81339eb649..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/validation/field/BUILD +++ /dev/null @@ -1,32 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "errors.go", - "path.go", - ], - tags = ["automanaged"], - deps = ["//pkg/util/errors:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = [ - "errors_test.go", - "path_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/validation/field/errors.go b/vendor/k8s.io/kubernetes/pkg/util/validation/field/errors.go deleted file mode 100644 index b4a6c5cd59..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/validation/field/errors.go +++ /dev/null @@ -1,228 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package field - -import ( - "encoding/json" - "fmt" - "strings" - - utilerrors "k8s.io/kubernetes/pkg/util/errors" -) - -// Error is an implementation of the 'error' interface, which represents a -// field-level validation error. -type Error struct { - Type ErrorType - Field string - BadValue interface{} - Detail string -} - -var _ error = &Error{} - -// Error implements the error interface. -func (v *Error) Error() string { - return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody()) -} - -// ErrorBody returns the error message without the field name. This is useful -// for building nice-looking higher-level error reporting. -func (v *Error) ErrorBody() string { - var s string - switch v.Type { - case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal: - s = fmt.Sprintf("%s", v.Type) - default: - var bad string - badBytes, err := json.Marshal(v.BadValue) - if err != nil { - bad = err.Error() - } else { - bad = string(badBytes) - } - s = fmt.Sprintf("%s: %s", v.Type, bad) - } - if len(v.Detail) != 0 { - s += fmt.Sprintf(": %s", v.Detail) - } - return s -} - -// ErrorType is a machine readable value providing more detail about why -// a field is invalid. These values are expected to match 1-1 with -// CauseType in api/types.go. -type ErrorType string - -// TODO: These values are duplicated in api/types.go, but there's a circular dep. Fix it. -const ( - // ErrorTypeNotFound is used to report failure to find a requested value - // (e.g. looking up an ID). See NotFound(). - ErrorTypeNotFound ErrorType = "FieldValueNotFound" - // ErrorTypeRequired is used to report required values that are not - // provided (e.g. empty strings, null values, or empty arrays). See - // Required(). - ErrorTypeRequired ErrorType = "FieldValueRequired" - // ErrorTypeDuplicate is used to report collisions of values that must be - // unique (e.g. unique IDs). See Duplicate(). - ErrorTypeDuplicate ErrorType = "FieldValueDuplicate" - // ErrorTypeInvalid is used to report malformed values (e.g. failed regex - // match, too long, out of bounds). See Invalid(). - ErrorTypeInvalid ErrorType = "FieldValueInvalid" - // ErrorTypeNotSupported is used to report unknown values for enumerated - // fields (e.g. a list of valid values). See NotSupported(). - ErrorTypeNotSupported ErrorType = "FieldValueNotSupported" - // ErrorTypeForbidden is used to report valid (as per formatting rules) - // values which would be accepted under some conditions, but which are not - // permitted by the current conditions (such as security policy). See - // Forbidden(). - ErrorTypeForbidden ErrorType = "FieldValueForbidden" - // ErrorTypeTooLong is used to report that the given value is too long. - // This is similar to ErrorTypeInvalid, but the error will not include the - // too-long value. See TooLong(). - ErrorTypeTooLong ErrorType = "FieldValueTooLong" - // ErrorTypeInternal is used to report other errors that are not related - // to user input. See InternalError(). - ErrorTypeInternal ErrorType = "InternalError" -) - -// String converts a ErrorType into its corresponding canonical error message. -func (t ErrorType) String() string { - switch t { - case ErrorTypeNotFound: - return "Not found" - case ErrorTypeRequired: - return "Required value" - case ErrorTypeDuplicate: - return "Duplicate value" - case ErrorTypeInvalid: - return "Invalid value" - case ErrorTypeNotSupported: - return "Unsupported value" - case ErrorTypeForbidden: - return "Forbidden" - case ErrorTypeTooLong: - return "Too long" - case ErrorTypeInternal: - return "Internal error" - default: - panic(fmt.Sprintf("unrecognized validation error: %q", string(t))) - } -} - -// NotFound returns a *Error indicating "value not found". This is -// used to report failure to find a requested value (e.g. looking up an ID). -func NotFound(field *Path, value interface{}) *Error { - return &Error{ErrorTypeNotFound, field.String(), value, ""} -} - -// Required returns a *Error indicating "value required". This is used -// to report required values that are not provided (e.g. empty strings, null -// values, or empty arrays). -func Required(field *Path, detail string) *Error { - return &Error{ErrorTypeRequired, field.String(), "", detail} -} - -// Duplicate returns a *Error indicating "duplicate value". This is -// used to report collisions of values that must be unique (e.g. names or IDs). -func Duplicate(field *Path, value interface{}) *Error { - return &Error{ErrorTypeDuplicate, field.String(), value, ""} -} - -// Invalid returns a *Error indicating "invalid value". This is used -// to report malformed values (e.g. failed regex match, too long, out of bounds). -func Invalid(field *Path, value interface{}, detail string) *Error { - return &Error{ErrorTypeInvalid, field.String(), value, detail} -} - -// NotSupported returns a *Error indicating "unsupported value". -// This is used to report unknown values for enumerated fields (e.g. a list of -// valid values). -func NotSupported(field *Path, value interface{}, validValues []string) *Error { - detail := "" - if validValues != nil && len(validValues) > 0 { - detail = "supported values: " + strings.Join(validValues, ", ") - } - return &Error{ErrorTypeNotSupported, field.String(), value, detail} -} - -// Forbidden returns a *Error indicating "forbidden". This is used to -// report valid (as per formatting rules) values which would be accepted under -// some conditions, but which are not permitted by current conditions (e.g. -// security policy). -func Forbidden(field *Path, detail string) *Error { - return &Error{ErrorTypeForbidden, field.String(), "", detail} -} - -// TooLong returns a *Error indicating "too long". This is used to -// report that the given value is too long. This is similar to -// Invalid, but the returned error will not include the too-long -// value. -func TooLong(field *Path, value interface{}, maxLength int) *Error { - return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d characters", maxLength)} -} - -// InternalError returns a *Error indicating "internal error". This is used -// to signal that an error was found that was not directly related to user -// input. The err argument must be non-nil. -func InternalError(field *Path, err error) *Error { - return &Error{ErrorTypeInternal, field.String(), nil, err.Error()} -} - -// ErrorList holds a set of Errors. It is plausible that we might one day have -// non-field errors in this same umbrella package, but for now we don't, so -// we can keep it simple and leave ErrorList here. -type ErrorList []*Error - -// NewErrorTypeMatcher returns an errors.Matcher that returns true -// if the provided error is a Error and has the provided ErrorType. -func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher { - return func(err error) bool { - if e, ok := err.(*Error); ok { - return e.Type == t - } - return false - } -} - -// ToAggregate converts the ErrorList into an errors.Aggregate. -func (list ErrorList) ToAggregate() utilerrors.Aggregate { - errs := make([]error, len(list)) - for i := range list { - errs[i] = list[i] - } - return utilerrors.NewAggregate(errs) -} - -func fromAggregate(agg utilerrors.Aggregate) ErrorList { - errs := agg.Errors() - list := make(ErrorList, len(errs)) - for i := range errs { - list[i] = errs[i].(*Error) - } - return list -} - -// Filter removes items from the ErrorList that match the provided fns. -func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList { - err := utilerrors.FilterOut(list.ToAggregate(), fns...) - if err == nil { - return nil - } - // FilterOut takes an Aggregate and returns an Aggregate - return fromAggregate(err.(utilerrors.Aggregate)) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/util/validation/validation.go deleted file mode 100644 index aaf6319088..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/validation/validation.go +++ /dev/null @@ -1,334 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "fmt" - "math" - "net" - "regexp" - "strings" -) - -const qnameCharFmt string = "[A-Za-z0-9]" -const qnameExtCharFmt string = "[-A-Za-z0-9_.]" -const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt -const qualifiedNameMaxLength int = 63 - -var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$") - -// IsQualifiedName tests whether the value passed is what Kubernetes calls a -// "qualified name". This is a format used in various places throughout the -// system. If the value is not valid, a list of error strings is returned. -// Otherwise an empty list (or nil) is returned. -func IsQualifiedName(value string) []string { - var errs []string - parts := strings.Split(value, "/") - var name string - switch len(parts) { - case 1: - name = parts[0] - case 2: - var prefix string - prefix, name = parts[0], parts[1] - if len(prefix) == 0 { - errs = append(errs, "prefix part "+EmptyError()) - } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 { - errs = append(errs, prefixEach(msgs, "prefix part ")...) - } - default: - return append(errs, RegexError(qualifiedNameFmt, "MyName", "my.name", "123-abc")+ - " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName'") - } - - if len(name) == 0 { - errs = append(errs, "name part "+EmptyError()) - } else if len(name) > qualifiedNameMaxLength { - errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength)) - } - if !qualifiedNameRegexp.MatchString(name) { - errs = append(errs, "name part "+RegexError(qualifiedNameFmt, "MyName", "my.name", "123-abc")) - } - return errs -} - -const labelValueFmt string = "(" + qualifiedNameFmt + ")?" -const LabelValueMaxLength int = 63 - -var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$") - -// IsValidLabelValue tests whether the value passed is a valid label value. If -// the value is not valid, a list of error strings is returned. Otherwise an -// empty list (or nil) is returned. -func IsValidLabelValue(value string) []string { - var errs []string - if len(value) > LabelValueMaxLength { - errs = append(errs, MaxLenError(LabelValueMaxLength)) - } - if !labelValueRegexp.MatchString(value) { - errs = append(errs, RegexError(labelValueFmt, "MyValue", "my_value", "12345")) - } - return errs -} - -const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" -const DNS1123LabelMaxLength int = 63 - -var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$") - -// IsDNS1123Label tests for a string that conforms to the definition of a label in -// DNS (RFC 1123). -func IsDNS1123Label(value string) []string { - var errs []string - if len(value) > DNS1123LabelMaxLength { - errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) - } - if !dns1123LabelRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1123LabelFmt, "my-name", "123-abc")) - } - return errs -} - -const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" -const DNS1123SubdomainMaxLength int = 253 - -var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") - -// IsDNS1123Subdomain tests for a string that conforms to the definition of a -// subdomain in DNS (RFC 1123). -func IsDNS1123Subdomain(value string) []string { - var errs []string - if len(value) > DNS1123SubdomainMaxLength { - errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) - } - if !dns1123SubdomainRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1123SubdomainFmt, "example.com")) - } - return errs -} - -const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" -const DNS1035LabelMaxLength int = 63 - -var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$") - -// IsDNS1035Label tests for a string that conforms to the definition of a label in -// DNS (RFC 1035). -func IsDNS1035Label(value string) []string { - var errs []string - if len(value) > DNS1035LabelMaxLength { - errs = append(errs, MaxLenError(DNS1035LabelMaxLength)) - } - if !dns1035LabelRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1035LabelFmt, "my-name", "abc-123")) - } - return errs -} - -// wildcard definition - RFC 1034 section 4.3.3. -// examples: -// - valid: *.bar.com, *.foo.bar.com -// - invalid: *.*.bar.com, *.foo.*.com, *bar.com, f*.bar.com, * -const wildcardDNF1123SubdomainFmt = "\\*\\." + dns1123SubdomainFmt - -// IsWildcardDNS1123Subdomain tests for a string that conforms to the definition of a -// wildcard subdomain in DNS (RFC 1034 section 4.3.3). -func IsWildcardDNS1123Subdomain(value string) []string { - wildcardDNS1123SubdomainRegexp := regexp.MustCompile("^\\*\\." + dns1123SubdomainFmt + "$") - - var errs []string - if len(value) > DNS1123SubdomainMaxLength { - errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) - } - if !wildcardDNS1123SubdomainRegexp.MatchString(value) { - errs = append(errs, RegexError(wildcardDNF1123SubdomainFmt, "*.example.com")) - } - return errs -} - -const cIdentifierFmt string = "[A-Za-z_][A-Za-z0-9_]*" - -var cIdentifierRegexp = regexp.MustCompile("^" + cIdentifierFmt + "$") - -// IsCIdentifier tests for a string that conforms the definition of an identifier -// in C. This checks the format, but not the length. -func IsCIdentifier(value string) []string { - if !cIdentifierRegexp.MatchString(value) { - return []string{RegexError(cIdentifierFmt, "my_name", "MY_NAME", "MyName")} - } - return nil -} - -// IsValidPortNum tests that the argument is a valid, non-zero port number. -func IsValidPortNum(port int) []string { - if 1 <= port && port <= 65535 { - return nil - } - return []string{InclusiveRangeError(1, 65535)} -} - -// Now in libcontainer UID/GID limits is 0 ~ 1<<31 - 1 -// TODO: once we have a type for UID/GID we should make these that type. -const ( - minUserID = 0 - maxUserID = math.MaxInt32 - minGroupID = 0 - maxGroupID = math.MaxInt32 -) - -// IsValidGroupId tests that the argument is a valid Unix GID. -func IsValidGroupId(gid int64) []string { - if minGroupID <= gid && gid <= maxGroupID { - return nil - } - return []string{InclusiveRangeError(minGroupID, maxGroupID)} -} - -// IsValidUserId tests that the argument is a valid Unix UID. -func IsValidUserId(uid int64) []string { - if minUserID <= uid && uid <= maxUserID { - return nil - } - return []string{InclusiveRangeError(minUserID, maxUserID)} -} - -var portNameCharsetRegex = regexp.MustCompile("^[-a-z0-9]+$") -var portNameOneLetterRegexp = regexp.MustCompile("[a-z]") - -// IsValidPortName check that the argument is valid syntax. It must be -// non-empty and no more than 15 characters long. It may contain only [-a-z0-9] -// and must contain at least one letter [a-z]. It must not start or end with a -// hyphen, nor contain adjacent hyphens. -// -// Note: We only allow lower-case characters, even though RFC 6335 is case -// insensitive. -func IsValidPortName(port string) []string { - var errs []string - if len(port) > 15 { - errs = append(errs, MaxLenError(15)) - } - if !portNameCharsetRegex.MatchString(port) { - errs = append(errs, "must contain only alpha-numeric characters (a-z, 0-9), and hyphens (-)") - } - if !portNameOneLetterRegexp.MatchString(port) { - errs = append(errs, "must contain at least one letter or number (a-z, 0-9)") - } - if strings.Contains(port, "--") { - errs = append(errs, "must not contain consecutive hyphens") - } - if len(port) > 0 && (port[0] == '-' || port[len(port)-1] == '-') { - errs = append(errs, "must not begin or end with a hyphen") - } - return errs -} - -// IsValidIP tests that the argument is a valid IP address. -func IsValidIP(value string) []string { - if net.ParseIP(value) == nil { - return []string{"must be a valid IP address, (e.g. 10.9.8.7)"} - } - return nil -} - -const percentFmt string = "[0-9]+%" - -var percentRegexp = regexp.MustCompile("^" + percentFmt + "$") - -func IsValidPercent(percent string) []string { - if !percentRegexp.MatchString(percent) { - return []string{RegexError(percentFmt, "1%", "93%")} - } - return nil -} - -const httpHeaderNameFmt string = "[-A-Za-z0-9]+" - -var httpHeaderNameRegexp = regexp.MustCompile("^" + httpHeaderNameFmt + "$") - -// IsHTTPHeaderName checks that a string conforms to the Go HTTP library's -// definition of a valid header field name (a stricter subset than RFC7230). -func IsHTTPHeaderName(value string) []string { - if !httpHeaderNameRegexp.MatchString(value) { - return []string{RegexError(httpHeaderNameFmt, "X-Header-Name")} - } - return nil -} - -const configMapKeyFmt = `[-._a-zA-Z0-9]+` - -var configMapKeyRegexp = regexp.MustCompile("^" + configMapKeyFmt + "$") - -// IsConfigMapKey tests for a string that is a valid key for a ConfigMap or Secret -func IsConfigMapKey(value string) []string { - var errs []string - if len(value) > DNS1123SubdomainMaxLength { - errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) - } - if !configMapKeyRegexp.MatchString(value) { - errs = append(errs, RegexError(configMapKeyFmt, "key.name", "KEY_NAME", "key-name")) - } - if value == "." { - errs = append(errs, `must not be '.'`) - } - if value == ".." { - errs = append(errs, `must not be '..'`) - } else if strings.HasPrefix(value, "..") { - errs = append(errs, `must not start with '..'`) - } - return errs -} - -// MaxLenError returns a string explanation of a "string too long" validation -// failure. -func MaxLenError(length int) string { - return fmt.Sprintf("must be no more than %d characters", length) -} - -// RegexError returns a string explanation of a regex validation failure. -func RegexError(fmt string, examples ...string) string { - s := "must match the regex " + fmt - if len(examples) == 0 { - return s - } - s += " (e.g. " - for i := range examples { - if i > 0 { - s += " or " - } - s += "'" + examples[i] + "'" - } - return s + ")" -} - -// EmptyError returns a string explanation of a "must not be empty" validation -// failure. -func EmptyError() string { - return "must be non-empty" -} - -func prefixEach(msgs []string, prefix string) []string { - for i := range msgs { - msgs[i] = prefix + msgs[i] - } - return msgs -} - -// InclusiveRangeError returns a string explanation of a numeric "must be -// between" validation failure. -func InclusiveRangeError(lo, hi int) string { - return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/wait/BUILD b/vendor/k8s.io/kubernetes/pkg/util/wait/BUILD deleted file mode 100644 index adb9c94431..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/wait/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "wait.go", - ], - tags = ["automanaged"], - deps = ["//pkg/util/runtime:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = ["wait_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = ["//pkg/util/runtime:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/workqueue/BUILD b/vendor/k8s.io/kubernetes/pkg/util/workqueue/BUILD deleted file mode 100644 index 8b8e98d92a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/workqueue/BUILD +++ /dev/null @@ -1,55 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "default_rate_limiters.go", - "delaying_queue.go", - "doc.go", - "metrics.go", - "parallelizer.go", - "queue.go", - "rate_limitting_queue.go", - "timed_queue.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/util/clock:go_default_library", - "//pkg/util/runtime:go_default_library", - "//vendor:github.com/juju/ratelimit", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "default_rate_limiters_test.go", - "delaying_queue_test.go", - "rate_limitting_queue_test.go", - "timed_queue_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api/v1:go_default_library", - "//pkg/util/clock:go_default_library", - "//pkg/util/wait:go_default_library", - ], -) - -go_test( - name = "go_default_xtest", - srcs = ["queue_test.go"], - tags = ["automanaged"], - deps = ["//pkg/util/workqueue:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/yaml/BUILD b/vendor/k8s.io/kubernetes/pkg/util/yaml/BUILD deleted file mode 100644 index 71fdcaae00..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/yaml/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["decoder.go"], - tags = ["automanaged"], - deps = [ - "//vendor:github.com/ghodss/yaml", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = ["decoder_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/yaml/decoder.go b/vendor/k8s.io/kubernetes/pkg/util/yaml/decoder.go deleted file mode 100644 index ed503d0390..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/yaml/decoder.go +++ /dev/null @@ -1,318 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package yaml - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "strings" - "unicode" - - "github.com/ghodss/yaml" - "github.com/golang/glog" -) - -// ToJSON converts a single YAML document into a JSON document -// or returns an error. If the document appears to be JSON the -// YAML decoding path is not used (so that error messages are -// JSON specific). -func ToJSON(data []byte) ([]byte, error) { - if hasJSONPrefix(data) { - return data, nil - } - return yaml.YAMLToJSON(data) -} - -// YAMLToJSONDecoder decodes YAML documents from an io.Reader by -// separating individual documents. It first converts the YAML -// body to JSON, then unmarshals the JSON. -type YAMLToJSONDecoder struct { - reader Reader -} - -// NewYAMLToJSONDecoder decodes YAML documents from the provided -// stream in chunks by converting each document (as defined by -// the YAML spec) into its own chunk, converting it to JSON via -// yaml.YAMLToJSON, and then passing it to json.Decoder. -func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder { - reader := bufio.NewReader(r) - return &YAMLToJSONDecoder{ - reader: NewYAMLReader(reader), - } -} - -// Decode reads a YAML document as JSON from the stream or returns -// an error. The decoding rules match json.Unmarshal, not -// yaml.Unmarshal. -func (d *YAMLToJSONDecoder) Decode(into interface{}) error { - bytes, err := d.reader.Read() - if err != nil && err != io.EOF { - return err - } - - if len(bytes) != 0 { - data, err := yaml.YAMLToJSON(bytes) - if err != nil { - return err - } - return json.Unmarshal(data, into) - } - return err -} - -// YAMLDecoder reads chunks of objects and returns ErrShortBuffer if -// the data is not sufficient. -type YAMLDecoder struct { - r io.ReadCloser - scanner *bufio.Scanner - remaining []byte -} - -// NewDocumentDecoder decodes YAML documents from the provided -// stream in chunks by converting each document (as defined by -// the YAML spec) into its own chunk. io.ErrShortBuffer will be -// returned if the entire buffer could not be read to assist -// the caller in framing the chunk. -func NewDocumentDecoder(r io.ReadCloser) io.ReadCloser { - scanner := bufio.NewScanner(r) - scanner.Split(splitYAMLDocument) - return &YAMLDecoder{ - r: r, - scanner: scanner, - } -} - -// Read reads the previous slice into the buffer, or attempts to read -// the next chunk. -// TODO: switch to readline approach. -func (d *YAMLDecoder) Read(data []byte) (n int, err error) { - left := len(d.remaining) - if left == 0 { - // return the next chunk from the stream - if !d.scanner.Scan() { - err := d.scanner.Err() - if err == nil { - err = io.EOF - } - return 0, err - } - out := d.scanner.Bytes() - d.remaining = out - left = len(out) - } - - // fits within data - if left <= len(data) { - copy(data, d.remaining) - d.remaining = nil - return len(d.remaining), nil - } - - // caller will need to reread - copy(data, d.remaining[:left]) - d.remaining = d.remaining[left:] - return len(data), io.ErrShortBuffer -} - -func (d *YAMLDecoder) Close() error { - return d.r.Close() -} - -const yamlSeparator = "\n---" -const separator = "---" - -// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents. -func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) { - if atEOF && len(data) == 0 { - return 0, nil, nil - } - sep := len([]byte(yamlSeparator)) - if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 { - // We have a potential document terminator - i += sep - after := data[i:] - if len(after) == 0 { - // we can't read any more characters - if atEOF { - return len(data), data[:len(data)-sep], nil - } - return 0, nil, nil - } - if j := bytes.IndexByte(after, '\n'); j >= 0 { - return i + j + 1, data[0 : i-sep], nil - } - return 0, nil, nil - } - // If we're at EOF, we have a final, non-terminated line. Return it. - if atEOF { - return len(data), data, nil - } - // Request more data. - return 0, nil, nil -} - -// decoder is a convenience interface for Decode. -type decoder interface { - Decode(into interface{}) error -} - -// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or -// YAML documents by sniffing for a leading { character. -type YAMLOrJSONDecoder struct { - r io.Reader - bufferSize int - - decoder decoder -} - -// NewYAMLOrJSONDecoder returns a decoder that will process YAML documents -// or JSON documents from the given reader as a stream. bufferSize determines -// how far into the stream the decoder will look to figure out whether this -// is a JSON stream (has whitespace followed by an open brace). -func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder { - return &YAMLOrJSONDecoder{ - r: r, - bufferSize: bufferSize, - } -} - -// Decode unmarshals the next object from the underlying stream into the -// provide object, or returns an error. -func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { - if d.decoder == nil { - buffer, isJSON := GuessJSONStream(d.r, d.bufferSize) - if isJSON { - glog.V(4).Infof("decoding stream as JSON") - d.decoder = json.NewDecoder(buffer) - } else { - glog.V(4).Infof("decoding stream as YAML") - d.decoder = NewYAMLToJSONDecoder(buffer) - } - } - err := d.decoder.Decode(into) - if jsonDecoder, ok := d.decoder.(*json.Decoder); ok { - if syntax, ok := err.(*json.SyntaxError); ok { - data, readErr := ioutil.ReadAll(jsonDecoder.Buffered()) - if readErr != nil { - glog.V(4).Infof("reading stream failed: %v", readErr) - } - js := string(data) - start := strings.LastIndex(js[:syntax.Offset], "\n") + 1 - line := strings.Count(js[:start], "\n") - return fmt.Errorf("json: line %d: %s", line, syntax.Error()) - } - } - return err -} - -type Reader interface { - Read() ([]byte, error) -} - -type YAMLReader struct { - reader Reader -} - -func NewYAMLReader(r *bufio.Reader) *YAMLReader { - return &YAMLReader{ - reader: &LineReader{reader: r}, - } -} - -// Read returns a full YAML document. -func (r *YAMLReader) Read() ([]byte, error) { - var buffer bytes.Buffer - for { - line, err := r.reader.Read() - if err != nil && err != io.EOF { - return nil, err - } - - sep := len([]byte(separator)) - if i := bytes.Index(line, []byte(separator)); i == 0 { - // We have a potential document terminator - i += sep - after := line[i:] - if len(strings.TrimRightFunc(string(after), unicode.IsSpace)) == 0 { - if buffer.Len() != 0 { - return buffer.Bytes(), nil - } - if err == io.EOF { - return nil, err - } - } - } - if err == io.EOF { - if buffer.Len() != 0 { - // If we're at EOF, we have a final, non-terminated line. Return it. - return buffer.Bytes(), nil - } - return nil, err - } - buffer.Write(line) - } -} - -type LineReader struct { - reader *bufio.Reader -} - -// Read returns a single line (with '\n' ended) from the underlying reader. -// An error is returned iff there is an error with the underlying reader. -func (r *LineReader) Read() ([]byte, error) { - var ( - isPrefix bool = true - err error = nil - line []byte - buffer bytes.Buffer - ) - - for isPrefix && err == nil { - line, isPrefix, err = r.reader.ReadLine() - buffer.Write(line) - } - buffer.WriteByte('\n') - return buffer.Bytes(), err -} - -// GuessJSONStream scans the provided reader up to size, looking -// for an open brace indicating this is JSON. It will return the -// bufio.Reader it creates for the consumer. -func GuessJSONStream(r io.Reader, size int) (io.Reader, bool) { - buffer := bufio.NewReaderSize(r, size) - b, _ := buffer.Peek(size) - return buffer, hasJSONPrefix(b) -} - -var jsonPrefix = []byte("{") - -// hasJSONPrefix returns true if the provided buffer appears to start with -// a JSON open brace. -func hasJSONPrefix(buf []byte) bool { - return hasPrefix(buf, jsonPrefix) -} - -// Return true if the first non-whitespace bytes in buf is -// prefix. -func hasPrefix(buf []byte, prefix []byte) bool { - trim := bytes.TrimLeftFunc(buf, unicode.IsSpace) - return bytes.HasPrefix(trim, prefix) -} diff --git a/vendor/k8s.io/kubernetes/pkg/version/BUILD b/vendor/k8s.io/kubernetes/pkg/version/BUILD deleted file mode 100644 index ea9a21b6dd..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/version/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "base.go", - "doc.go", - "semver.go", - "version.go", - ], - tags = ["automanaged"], - deps = [ - "//vendor:github.com/blang/semver", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = ["semver_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/pkg/version/doc.go b/vendor/k8s.io/kubernetes/pkg/version/doc.go deleted file mode 100644 index 3803957848..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/version/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package version supplies version information collected at build time to -// kubernetes components. -// +k8s:openapi-gen=true -package version diff --git a/vendor/k8s.io/kubernetes/pkg/version/semver.go b/vendor/k8s.io/kubernetes/pkg/version/semver.go deleted file mode 100644 index 1f4067e217..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/version/semver.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package version - -import ( - "strings" - "unicode" - - "github.com/blang/semver" - "github.com/golang/glog" -) - -func Parse(gitversion string) (semver.Version, error) { - // optionally trim leading spaces then one v - var seen bool - gitversion = strings.TrimLeftFunc(gitversion, func(ch rune) bool { - if seen { - return false - } - if ch == 'v' { - seen = true - return true - } - return unicode.IsSpace(ch) - }) - - return semver.Make(gitversion) -} - -func MustParse(gitversion string) semver.Version { - v, err := Parse(gitversion) - if err != nil { - glog.Fatalf("failed to parse semver from gitversion %q: %v", gitversion, err) - } - return v -} diff --git a/vendor/k8s.io/kubernetes/pkg/version/version.go b/vendor/k8s.io/kubernetes/pkg/version/version.go deleted file mode 100644 index 0da3aadde0..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/version/version.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package version - -import ( - "fmt" - "runtime" -) - -// Info contains versioning information. -// TODO: Add []string of api versions supported? It's still unclear -// how we'll want to distribute that information. -type Info struct { - Major string `json:"major"` - Minor string `json:"minor"` - GitVersion string `json:"gitVersion"` - GitCommit string `json:"gitCommit"` - GitTreeState string `json:"gitTreeState"` - BuildDate string `json:"buildDate"` - GoVersion string `json:"goVersion"` - Compiler string `json:"compiler"` - Platform string `json:"platform"` -} - -// Get returns the overall codebase version. It's for detecting -// what code a binary was built from. -func Get() Info { - // These variables typically come from -ldflags settings and in - // their absence fallback to the settings in pkg/version/base.go - return Info{ - Major: gitMajor, - Minor: gitMinor, - GitVersion: gitVersion, - GitCommit: gitCommit, - GitTreeState: gitTreeState, - BuildDate: buildDate, - GoVersion: runtime.Version(), - Compiler: runtime.Compiler, - Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), - } -} - -// String returns info as a human-friendly version string. -func (info Info) String() string { - return info.GitVersion -} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/BUILD index 6afd210bad..822ee8e2e4 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/BUILD @@ -4,10 +4,8 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", "go_test", - "cgo_library", ) go_library( @@ -26,23 +24,24 @@ go_library( ], tags = ["automanaged"], deps = [ - "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", + "//pkg/api/v1:go_default_library", + "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/cloudprovider:go_default_library", - "//pkg/fields:go_default_library", - "//pkg/types:go_default_library", "//pkg/util/chmod:go_default_library", "//pkg/util/chown:go_default_library", - "//pkg/util/errors:go_default_library", "//pkg/util/io:go_default_library", "//pkg/util/mount:go_default_library", - "//pkg/util/sets:go_default_library", - "//pkg/util/validation:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/watch:go_default_library", "//vendor:github.com/golang/glog", + "//vendor:k8s.io/apimachinery/pkg/api/errors", + "//vendor:k8s.io/apimachinery/pkg/api/resource", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/fields", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/util/errors", + "//vendor:k8s.io/apimachinery/pkg/util/sets", + "//vendor:k8s.io/apimachinery/pkg/util/validation", + "//vendor:k8s.io/apimachinery/pkg/watch", ], ) @@ -53,23 +52,74 @@ go_test( "plugins_test.go", "util_test.go", ], - library = "go_default_library", + library = ":go_default_library", tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/errors:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/watch:go_default_library", + "//pkg/api/v1:go_default_library", + "//pkg/util/slice:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/api/errors", + "//vendor:k8s.io/apimachinery/pkg/api/resource", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/types", + "//vendor:k8s.io/apimachinery/pkg/util/sets", + "//vendor:k8s.io/apimachinery/pkg/watch", ], ) go_test( name = "go_default_xtest", - srcs = ["metrics_du_test.go"], + srcs = [ + "metrics_du_test.go", + "metrics_statfs_test.go", + ], tags = ["automanaged"], deps = [ - "//pkg/util/testing:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", + "//vendor:k8s.io/client-go/util/testing", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/volume/aws_ebs:all-srcs", + "//pkg/volume/azure_dd:all-srcs", + "//pkg/volume/azure_file:all-srcs", + "//pkg/volume/cephfs:all-srcs", + "//pkg/volume/cinder:all-srcs", + "//pkg/volume/configmap:all-srcs", + "//pkg/volume/downwardapi:all-srcs", + "//pkg/volume/empty_dir:all-srcs", + "//pkg/volume/fc:all-srcs", + "//pkg/volume/flexvolume:all-srcs", + "//pkg/volume/flocker:all-srcs", + "//pkg/volume/gce_pd:all-srcs", + "//pkg/volume/git_repo:all-srcs", + "//pkg/volume/glusterfs:all-srcs", + "//pkg/volume/host_path:all-srcs", + "//pkg/volume/iscsi:all-srcs", + "//pkg/volume/nfs:all-srcs", + "//pkg/volume/photon_pd:all-srcs", + "//pkg/volume/portworx:all-srcs", + "//pkg/volume/projected:all-srcs", + "//pkg/volume/quobyte:all-srcs", + "//pkg/volume/rbd:all-srcs", + "//pkg/volume/scaleio:all-srcs", + "//pkg/volume/secret:all-srcs", + "//pkg/volume/testing:all-srcs", + "//pkg/volume/util:all-srcs", + "//pkg/volume/validation:all-srcs", + "//pkg/volume/vsphere_volume:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/OWNERS index 01da542a1f..d9e47cfa92 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/OWNERS @@ -1,5 +1,20 @@ -assignees: - - saad-ali - - thockin - - pmorie - - jsafrane +approvers: +- saad-ali +- thockin +- jsafrane +- matchstick +reviewers: +- thockin +- smarterclayton +- deads2k +- brendandburns +- derekwaynecarr +- bprashanth +- pmorie +- saad-ali +- justinsb +- jsafrane +- rootfs +- jingxu97 +- rkouj +- msau42 diff --git a/vendor/k8s.io/kubernetes/pkg/volume/metrics_du.go b/vendor/k8s.io/kubernetes/pkg/volume/metrics_du.go index eef868da6d..19a29cbbc8 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/metrics_du.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/metrics_du.go @@ -17,7 +17,8 @@ limitations under the License. package volume import ( - "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/volume/util" ) @@ -40,7 +41,7 @@ func NewMetricsDu(path string) MetricsProvider { // and gathering filesystem info for the Volume path. // See MetricsProvider.GetMetrics func (md *metricsDu) GetMetrics() (*Metrics, error) { - metrics := &Metrics{} + metrics := &Metrics{Time: metav1.Now()} if md.path == "" { return metrics, NewNoPathDefinedError() } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/metrics_statfs.go b/vendor/k8s.io/kubernetes/pkg/volume/metrics_statfs.go index 37a5e03bee..ede4f6ef8f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/metrics_statfs.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/metrics_statfs.go @@ -17,7 +17,8 @@ limitations under the License. package volume import ( - "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/volume/util" ) @@ -39,7 +40,7 @@ func NewMetricsStatFS(path string) MetricsProvider { // GetMetrics calculates the volume usage and device free space by executing "du" // and gathering filesystem info for the Volume path. func (md *metricsStatFS) GetMetrics() (*Metrics, error) { - metrics := &Metrics{} + metrics := &Metrics{Time: metav1.Now()} if md.path == "" { return metrics, NewNoPathDefinedError() } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go index fb217da841..b78c76d2f9 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go @@ -23,14 +23,15 @@ import ( "sync" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/types" - utilerrors "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/io" "k8s.io/kubernetes/pkg/util/mount" - "k8s.io/kubernetes/pkg/util/validation" ) // VolumeOptions contains option information about a volume. @@ -40,15 +41,17 @@ type VolumeOptions struct { // many kinds of provisioners. // Reclamation policy for a persistent volume - PersistentVolumeReclaimPolicy api.PersistentVolumeReclaimPolicy - // PV.Name of the appropriate PersistentVolume. Used to generate cloud - // volume name. + PersistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy + // Suggested PV.Name of the PersistentVolume to provision. + // This is a generated name guaranteed to be unique in Kubernetes cluster. + // If you choose not to use it as volume name, ensure uniqueness by either + // combining it with your value or create unique values of your own. PVName string // PVC is reference to the claim that lead to provisioning of a new PV. // Provisioners *must* create a PV that would be matched by this PVC, // i.e. with required capacity, accessMode, labels matching PVC.Selector and // so on. - PVC *api.PersistentVolumeClaim + PVC *v1.PersistentVolumeClaim // Unique name of Kubernetes cluster. ClusterName string // Tags to attach to the real volume in the cloud provider - e.g. AWS EBS @@ -90,12 +93,12 @@ type VolumePlugin interface { // NewMounter creates a new volume.Mounter from an API specification. // Ownership of the spec pointer in *not* transferred. - // - spec: The api.Volume spec + // - spec: The v1.Volume spec // - pod: The enclosing pod - NewMounter(spec *Spec, podRef *api.Pod, opts VolumeOptions) (Mounter, error) + NewMounter(spec *Spec, podRef *v1.Pod, opts VolumeOptions) (Mounter, error) // NewUnmounter creates a new volume.Unmounter from recoverable state. - // - name: The volume name, as per the api.Volume spec. + // - name: The volume name, as per the v1.Volume spec. // - podUID: The UID of the enclosing pod NewUnmounter(name string, podUID types.UID) (Unmounter, error) @@ -104,6 +107,16 @@ type VolumePlugin interface { // information from input. This function is used by volume manager to reconstruct // volume spec by reading the volume directories from disk ConstructVolumeSpec(volumeName, mountPath string) (*Spec, error) + + // SupportsMountOption returns true if volume plugins supports Mount options + // Specifying mount options in a volume plugin that doesn't support + // user specified mount options will result in error creating persistent volumes + SupportsMountOption() bool + + // SupportsBulkVolumeVerification checks if volume plugin type is capable + // of enabling bulk polling of all nodes. This can speed up verification of + // attached volumes by quite a bit, but underlying pluging must support it. + SupportsBulkVolumeVerification() bool } // PersistentVolumePlugin is an extended interface of VolumePlugin and is used @@ -111,7 +124,7 @@ type VolumePlugin interface { type PersistentVolumePlugin interface { VolumePlugin // GetAccessModes describes the ways a given volume can be accessed/mounted. - GetAccessModes() []api.PersistentVolumeAccessMode + GetAccessModes() []v1.PersistentVolumeAccessMode } // RecyclableVolumePlugin is an extended interface of VolumePlugin and is used @@ -119,12 +132,13 @@ type PersistentVolumePlugin interface { // again to new claims type RecyclableVolumePlugin interface { VolumePlugin - // NewRecycler creates a new volume.Recycler which knows how to reclaim this - // resource after the volume's release from a PersistentVolumeClaim. The - // recycler will use the provided recorder to write any events that might be + + // Recycle knows how to reclaim this + // resource after the volume's release from a PersistentVolumeClaim. + // Recycle will use the provided recorder to write any events that might be // interesting to user. It's expected that caller will pass these events to // the PV being recycled. - NewRecycler(pvName string, spec *Spec, eventRecorder RecycleEventRecorder) (Recycler, error) + Recycle(pvName string, spec *Spec, eventRecorder RecycleEventRecorder) error } // DeletableVolumePlugin is an extended interface of VolumePlugin and is used @@ -142,6 +156,8 @@ const ( // Name of a volume in external cloud that is being provisioned and thus // should be ignored by rest of Kubernetes. ProvisionedVolumeName = "placeholder-for-provisioning" + // Mount options annotations + MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options" ) // ProvisionableVolumePlugin is an extended interface of VolumePlugin and is @@ -190,7 +206,7 @@ type VolumeHost interface { // the provided spec. This is used to implement volume plugins which // "wrap" other plugins. For example, the "secret" volume is // implemented in terms of the "emptyDir" volume. - NewWrapperMounter(volName string, spec Spec, pod *api.Pod, opts VolumeOptions) (Mounter, error) + NewWrapperMounter(volName string, spec Spec, pod *v1.Pod, opts VolumeOptions) (Mounter, error) // NewWrapperUnmounter finds an appropriate plugin with which to handle // the provided spec. See comments on NewWrapperMounter for more @@ -212,8 +228,11 @@ type VolumeHost interface { // Returns host IP or nil in the case of error. GetHostIP() (net.IP, error) - // Returns node allocatable - GetNodeAllocatable() (api.ResourceList, error) + // Returns node allocatable. + GetNodeAllocatable() (v1.ResourceList, error) + + // Returns a function that returns a secret. + GetSecretFunc() func(namespace, name string) (*v1.Secret, error) } // VolumePluginMgr tracks registered plugins. @@ -224,8 +243,8 @@ type VolumePluginMgr struct { // Spec is an internal representation of a volume. All API volume types translate to Spec. type Spec struct { - Volume *api.Volume - PersistentVolume *api.PersistentVolume + Volume *v1.Volume + PersistentVolume *v1.PersistentVolume ReadOnly bool } @@ -269,7 +288,7 @@ type VolumeConfig struct { // which override specific properties of the pod in accordance with that // plugin. See NewPersistentVolumeRecyclerPodTemplate for the properties // that are expected to be overridden. - RecyclerPodTemplate *api.Pod + RecyclerPodTemplate *v1.Pod // RecyclerMinimumTimeout is the minimum amount of time in seconds for the // recycler pod's ActiveDeadlineSeconds attribute. Added to the minimum @@ -296,15 +315,15 @@ type VolumeConfig struct { ProvisioningEnabled bool } -// NewSpecFromVolume creates an Spec from an api.Volume -func NewSpecFromVolume(vs *api.Volume) *Spec { +// NewSpecFromVolume creates an Spec from an v1.Volume +func NewSpecFromVolume(vs *v1.Volume) *Spec { return &Spec{ Volume: vs, } } -// NewSpecFromPersistentVolume creates an Spec from an api.PersistentVolume -func NewSpecFromPersistentVolume(pv *api.PersistentVolume, readOnly bool) *Spec { +// NewSpecFromPersistentVolume creates an Spec from an v1.PersistentVolume +func NewSpecFromPersistentVolume(pv *v1.PersistentVolume, readOnly bool) *Spec { return &Spec{ PersistentVolume: pv, ReadOnly: readOnly, @@ -526,32 +545,32 @@ func (pm *VolumePluginMgr) FindAttachablePluginByName(name string) (AttachableVo // before failing. Recommended. Default is 60 seconds. // // See HostPath and NFS for working recycler examples -func NewPersistentVolumeRecyclerPodTemplate() *api.Pod { +func NewPersistentVolumeRecyclerPodTemplate() *v1.Pod { timeout := int64(60) - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ GenerateName: "pv-recycler-", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, - Spec: api.PodSpec{ + Spec: v1.PodSpec{ ActiveDeadlineSeconds: &timeout, - RestartPolicy: api.RestartPolicyNever, - Volumes: []api.Volume{ + RestartPolicy: v1.RestartPolicyNever, + Volumes: []v1.Volume{ { Name: "vol", // IMPORTANT! All plugins using this template MUST // override pod.Spec.Volumes[0].VolumeSource Recycler // implementations without a valid VolumeSource will fail. - VolumeSource: api.VolumeSource{}, + VolumeSource: v1.VolumeSource{}, }, }, - Containers: []api.Container{ + Containers: []v1.Container{ { Name: "pv-recycler", Image: "gcr.io/google_containers/busybox", Command: []string{"/bin/sh"}, Args: []string{"-c", "test -e /scrub && rm -rf /scrub/..?* /scrub/.[!.]* /scrub/* && test -z \"$(ls -A /scrub)\" || exit 1"}, - VolumeMounts: []api.VolumeMount{ + VolumeMounts: []v1.VolumeMount{ { Name: "vol", MountPath: "/scrub", @@ -563,3 +582,15 @@ func NewPersistentVolumeRecyclerPodTemplate() *api.Pod { } return pod } + +// Check validity of recycle pod template +// List of checks: +// - at least one volume is defined in the recycle pod template +// If successful, returns nil +// if unsuccessful, returns an error. +func ValidateRecyclerPodTemplate(pod *v1.Pod) error { + if len(pod.Spec.Volumes) < 1 { + return fmt.Errorf("does not contain any volume(s)") + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util.go b/vendor/k8s.io/kubernetes/pkg/volume/util.go index 0438772933..26ae39d1ca 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util.go @@ -20,10 +20,11 @@ import ( "fmt" "reflect" - "k8s.io/kubernetes/pkg/api" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/watch" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "hash/fnv" "math/rand" @@ -31,9 +32,11 @@ import ( "strings" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + volutil "k8s.io/kubernetes/pkg/volume/util" ) type RecycleEventRecorder func(eventtype, message string) @@ -51,13 +54,13 @@ type RecycleEventRecorder func(eventtype, message string) // pod - the pod designed by a volume plugin to recycle the volume. pod.Name // will be overwritten with unique name based on PV.Name. // client - kube client for API operations. -func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *api.Pod, kubeClient clientset.Interface, recorder RecycleEventRecorder) error { +func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, kubeClient clientset.Interface, recorder RecycleEventRecorder) error { return internalRecycleVolumeByWatchingPodUntilCompletion(pvName, pod, newRecyclerClient(kubeClient, recorder)) } // same as above func comments, except 'recyclerClient' is a narrower pod API // interface to ease testing -func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *api.Pod, recyclerClient recyclerClient) error { +func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, recyclerClient recyclerClient) error { glog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name) // Generate unique name for the recycler pod - we need to get "already @@ -83,7 +86,7 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *api.P return fmt.Errorf("unexpected error creating recycler pod: %+v\n", err) } } - defer func(pod *api.Pod) { + defer func(pod *v1.Pod) { glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name) if err := recyclerClient.DeletePod(pod.Name, pod.Namespace); err != nil { glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err) @@ -93,19 +96,22 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *api.P // Now only the old pod or the new pod run. Watch it until it finishes // and send all events on the pod to the PV for { - event := <-podCh + event, ok := <-podCh + if !ok { + return fmt.Errorf("recycler pod %q watch channel had been closed", pod.Name) + } switch event.Object.(type) { - case *api.Pod: + case *v1.Pod: // POD changed - pod := event.Object.(*api.Pod) + pod := event.Object.(*v1.Pod) glog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase) switch event.Type { case watch.Added, watch.Modified: - if pod.Status.Phase == api.PodSucceeded { + if pod.Status.Phase == v1.PodSucceeded { // Recycle succeeded. return nil } - if pod.Status.Phase == api.PodFailed { + if pod.Status.Phase == v1.PodFailed { if pod.Status.Message != "" { return fmt.Errorf(pod.Status.Message) } else { @@ -120,9 +126,9 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *api.P return fmt.Errorf("recycler pod watcher failed") } - case *api.Event: + case *v1.Event: // Event received - podEvent := event.Object.(*api.Event) + podEvent := event.Object.(*v1.Event) glog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message) if event.Type == watch.Added { recyclerClient.Event(podEvent.Type, podEvent.Message) @@ -134,8 +140,8 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *api.P // recyclerClient abstracts access to a Pod by providing a narrower interface. // This makes it easier to mock a client for testing. type recyclerClient interface { - CreatePod(pod *api.Pod) (*api.Pod, error) - GetPod(name, namespace string) (*api.Pod, error) + CreatePod(pod *v1.Pod) (*v1.Pod, error) + GetPod(name, namespace string) (*v1.Pod, error) DeletePod(name, namespace string) error // WatchPod returns a ListWatch for watching a pod. The stopChannel is used // to close the reflector backing the watch. The caller is responsible for @@ -157,12 +163,12 @@ type realRecyclerClient struct { recorder RecycleEventRecorder } -func (c *realRecyclerClient) CreatePod(pod *api.Pod) (*api.Pod, error) { +func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) { return c.client.Core().Pods(pod.Namespace).Create(pod) } -func (c *realRecyclerClient) GetPod(name, namespace string) (*api.Pod, error) { - return c.client.Core().Pods(namespace).Get(name) +func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) { + return c.client.Core().Pods(namespace).Get(name, metav1.GetOptions{}) } func (c *realRecyclerClient) DeletePod(name, namespace string) error { @@ -175,8 +181,8 @@ func (c *realRecyclerClient) Event(eventtype, message string) { func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) { podSelector, _ := fields.ParseSelector("metadata.name=" + name) - options := api.ListOptions{ - FieldSelector: podSelector, + options := metav1.ListOptions{ + FieldSelector: podSelector.String(), Watch: true, } @@ -186,8 +192,8 @@ func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan s } eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name) - eventWatch, err := c.client.Core().Events(namespace).Watch(api.ListOptions{ - FieldSelector: eventSelector, + eventWatch, err := c.client.Core().Events(namespace).Watch(metav1.ListOptions{ + FieldSelector: eventSelector.String(), Watch: true, }) if err != nil { @@ -195,13 +201,14 @@ func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan s return nil, err } - eventCh := make(chan watch.Event, 0) + eventCh := make(chan watch.Event, 30) go func() { defer eventWatch.Stop() defer podWatch.Stop() defer close(eventCh) - + var podWatchChannelClosed bool + var eventWatchChannelClosed bool for { select { case _ = <-stopChannel: @@ -209,15 +216,19 @@ func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan s case podEvent, ok := <-podWatch.ResultChan(): if !ok { - return + podWatchChannelClosed = true + } else { + eventCh <- podEvent } - eventCh <- podEvent - case eventEvent, ok := <-eventWatch.ResultChan(): if !ok { - return + eventWatchChannelClosed = true + } else { + eventCh <- eventEvent } - eventCh <- eventEvent + } + if podWatchChannelClosed && eventWatchChannelClosed { + break } } }() @@ -229,9 +240,9 @@ func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan s // recycle operation. The calculation and return value is either the // minimumTimeout or the timeoutIncrement per Gi of storage size, whichever is // greater. -func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *api.PersistentVolume) int64 { +func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.PersistentVolume) int64 { giQty := resource.MustParse("1Gi") - pvQty := pv.Spec.Capacity[api.ResourceStorage] + pvQty := pv.Spec.Capacity[v1.ResourceStorage] giSize := giQty.Value() pvSize := pvQty.Value() timeout := (pvSize / giSize) * int64(timeoutIncrement) @@ -297,16 +308,34 @@ func ChooseZoneForVolume(zones sets.String, pvcName string) string { // Heuristic to make sure that volumes in a StatefulSet are spread across zones // StatefulSet PVCs are (currently) named ClaimName-StatefulSetName-Id, - // where Id is an integer index + // where Id is an integer index. + // Note though that if a StatefulSet pod has multiple claims, we need them to be + // in the same zone, because otherwise the pod will be unable to mount both volumes, + // and will be unschedulable. So we hash _only_ the "StatefulSetName" portion when + // it looks like `ClaimName-StatefulSetName-Id`. + // We continue to round-robin volume names that look like `Name-Id` also; this is a useful + // feature for users that are creating statefulset-like functionality without using statefulsets. lastDash := strings.LastIndexByte(pvcName, '-') if lastDash != -1 { - petIDString := pvcName[lastDash+1:] - petID, err := strconv.ParseUint(petIDString, 10, 32) + statefulsetIDString := pvcName[lastDash+1:] + statefulsetID, err := strconv.ParseUint(statefulsetIDString, 10, 32) if err == nil { - // Offset by the pet id, so we round-robin across zones - index = uint32(petID) - // We still hash the volume name, but only the base + // Offset by the statefulsetID, so we round-robin across zones + index = uint32(statefulsetID) + // We still hash the volume name, but only the prefix hashString = pvcName[:lastDash] + + // In the special case where it looks like `ClaimName-StatefulSetName-Id`, + // hash only the StatefulSetName, so that different claims on the same StatefulSet + // member end up in the same zone. + // Note that StatefulSetName (and ClaimName) might themselves both have dashes. + // We actually just take the portion after the final - of ClaimName-StatefulSetName. + // For our purposes it doesn't much matter (just suboptimal spreading). + lastDash := strings.LastIndexByte(hashString, '-') + if lastDash != -1 { + hashString = hashString[lastDash+1:] + } + glog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index) } } @@ -331,3 +360,53 @@ func ChooseZoneForVolume(zones sets.String, pvcName string) string { glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice) return zone } + +// UnmountViaEmptyDir delegates the tear down operation for secret, configmap, git_repo and downwardapi +// to empty_dir +func UnmountViaEmptyDir(dir string, host VolumeHost, volName string, volSpec Spec, podUID types.UID) error { + glog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir) + + if pathExists, pathErr := volutil.PathExists(dir); pathErr != nil { + return fmt.Errorf("Error checking if path exists: %v", pathErr) + } else if !pathExists { + glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) + return nil + } + + // Wrap EmptyDir, let it do the teardown. + wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID) + if err != nil { + return err + } + return wrapped.TearDownAt(dir) +} + +// MountOptionFromSpec extracts and joins mount options from volume spec with supplied options +func MountOptionFromSpec(spec *Spec, options ...string) []string { + pv := spec.PersistentVolume + + if pv != nil { + if mo, ok := pv.Annotations[MountOptionAnnotation]; ok { + moList := strings.Split(mo, ",") + return JoinMountOptions(moList, options) + } + + } + return options +} + +// JoinMountOptions joins mount options eliminating duplicates +func JoinMountOptions(userOptions []string, systemOptions []string) []string { + allMountOptions := sets.NewString() + + for _, mountOption := range userOptions { + if len(mountOption) > 0 { + allMountOptions.Insert(mountOption) + } + } + + for _, mountOption := range systemOptions { + allMountOptions.Insert(mountOption) + } + return allMountOptions.UnsortedList() +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD index 5e82b4ff49..9bc536b0a6 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD @@ -4,10 +4,8 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_binary", "go_library", "go_test", - "cgo_library", ) go_library( @@ -23,13 +21,14 @@ go_library( ], tags = ["automanaged"], deps = [ - "//pkg/api:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/apis/storage:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", + "//pkg/api/v1:go_default_library", + "//pkg/apis/storage/v1beta1:go_default_library", + "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/util/mount:go_default_library", - "//pkg/util/sets:go_default_library", "//vendor:github.com/golang/glog", + "//vendor:k8s.io/apimachinery/pkg/api/resource", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apimachinery/pkg/util/sets", ], ) @@ -39,10 +38,29 @@ go_test( "atomic_writer_test.go", "device_util_linux_test.go", ], - library = "go_default_library", + library = ":go_default_library", tags = ["automanaged"], deps = [ - "//pkg/util/sets:go_default_library", - "//pkg/util/testing:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/util/sets", + "//vendor:k8s.io/client-go/util/testing", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/volume/util/nestedpendingoperations:all-srcs", + "//pkg/volume/util/operationexecutor:all-srcs", + "//pkg/volume/util/types:all-srcs", + "//pkg/volume/util/volumehelper:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/util/OWNERS new file mode 100755 index 0000000000..abea2824a8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/OWNERS @@ -0,0 +1,11 @@ +approvers: +- saad-ali +reviewers: +- thockin +- smarterclayton +- pmorie +- saad-ali +- justinsb +- rootfs +- jingxu97 +- screeley44 diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go b/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go index 8b84710fc3..5eef55b450 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go @@ -29,7 +29,7 @@ import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/sets" ) const ( diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/fs.go b/vendor/k8s.io/kubernetes/pkg/volume/util/fs.go index 0ecb89d24b..cfa7e30b4f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/fs.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/fs.go @@ -22,11 +22,10 @@ import ( "bytes" "fmt" "os/exec" - "strconv" "strings" "syscall" - "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/apimachinery/pkg/api/resource" ) // FSInfo linux returns (available bytes, byte capacity, byte usage, total inodes, inodes free, inode usage, error) @@ -69,35 +68,29 @@ func Du(path string) (*resource.Quantity, error) { return &used, nil } -// Find uses the command `find -dev -printf '.' | wc -c` to count files and directories. +// Find uses the equivalent of the command `find -dev -printf '.' | wc -c` to count files and directories. // While this is not an exact measure of inodes used, it is a very good approximation. func Find(path string) (int64, error) { - var stdout, stdwcerr, stdfinderr bytes.Buffer - var err error + if path == "" { + return 0, fmt.Errorf("invalid directory") + } + var counter byteCounter + var stderr bytes.Buffer findCmd := exec.Command("find", path, "-xdev", "-printf", ".") - wcCmd := exec.Command("wc", "-c") - if wcCmd.Stdin, err = findCmd.StdoutPipe(); err != nil { - return 0, fmt.Errorf("failed to setup stdout for cmd %v - %v", findCmd.Args, err) + findCmd.Stdout, findCmd.Stderr = &counter, &stderr + if err := findCmd.Start(); err != nil { + return 0, fmt.Errorf("failed to exec cmd %v - %v; stderr: %v", findCmd.Args, err, stderr.String()) } - wcCmd.Stdout, wcCmd.Stderr, findCmd.Stderr = &stdout, &stdwcerr, &stdfinderr - if err = findCmd.Start(); err != nil { - return 0, fmt.Errorf("failed to exec cmd %v - %v; stderr: %v", findCmd.Args, err, stdfinderr.String()) + if err := findCmd.Wait(); err != nil { + return 0, fmt.Errorf("cmd %v failed. stderr: %s; err: %v", findCmd.Args, stderr.String(), err) } + return counter.bytesWritten, nil +} - if err = wcCmd.Start(); err != nil { - return 0, fmt.Errorf("failed to exec cmd %v - %v; stderr %v", wcCmd.Args, err, stdwcerr.String()) - } - err = findCmd.Wait() - if err != nil { - return 0, fmt.Errorf("cmd %v failed. stderr: %s; err: %v", findCmd.Args, stdfinderr.String(), err) - } - err = wcCmd.Wait() - if err != nil { - return 0, fmt.Errorf("cmd %v failed. stderr: %s; err: %v", wcCmd.Args, stdwcerr.String(), err) - } - inodeUsage, err := strconv.ParseInt(strings.TrimSpace(stdout.String()), 10, 64) - if err != nil { - return 0, fmt.Errorf("cannot parse cmds: %v, %v output %s - %s", findCmd.Args, wcCmd.Args, stdout.String(), err) - } - return inodeUsage, nil +// Simple io.Writer implementation that counts how many bytes were written. +type byteCounter struct{ bytesWritten int64 } + +func (b *byteCounter) Write(p []byte) (int, error) { + b.bytesWritten += int64(len(p)) + return len(p), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/fs_unsupported.go b/vendor/k8s.io/kubernetes/pkg/volume/util/fs_unsupported.go index 02a39f91ef..8d35d5daed 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/fs_unsupported.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/fs_unsupported.go @@ -21,7 +21,7 @@ package util import ( "fmt" - "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/apimachinery/pkg/api/resource" ) // FSInfo unsupported returns 0 values for available and capacity and an error. diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go index 8122e04dd6..035b2f0609 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go @@ -22,9 +22,10 @@ import ( "path" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/storage" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/api/v1" + storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/util/mount" ) @@ -97,7 +98,7 @@ func UnmountPath(mountPath string, mounter mount.Interface) error { glog.V(4).Infof("%q is unmounted, deleting the directory", mountPath) return os.Remove(mountPath) } - return nil + return fmt.Errorf("Failed to unmount path %v", mountPath) } // PathExists returns true if the specified path exists. @@ -113,12 +114,12 @@ func PathExists(path string) (bool, error) { } // GetSecretForPod locates secret by name in the pod's namespace and returns secret map -func GetSecretForPod(pod *api.Pod, secretName string, kubeClient clientset.Interface) (map[string]string, error) { +func GetSecretForPod(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (map[string]string, error) { secret := make(map[string]string) if kubeClient == nil { return secret, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.Core().Secrets(pod.Namespace).Get(secretName) + secrets, err := kubeClient.Core().Secrets(pod.Namespace).Get(secretName, metav1.GetOptions{}) if err != nil { return secret, err } @@ -134,11 +135,11 @@ func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeCl if kubeClient == nil { return secret, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.Core().Secrets(secretNamespace).Get(secretName) + secrets, err := kubeClient.Core().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) if err != nil { return secret, err } - if secrets.Type != api.SecretType(volumePluginName) { + if secrets.Type != v1.SecretType(volumePluginName) { return secret, fmt.Errorf("Cannot get secret of type %s", volumePluginName) } for name, data := range secrets.Data { @@ -147,14 +148,17 @@ func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeCl return secret, nil } -func GetClassForVolume(kubeClient clientset.Interface, pv *api.PersistentVolume) (*storage.StorageClass, error) { +func GetClassForVolume(kubeClient clientset.Interface, pv *v1.PersistentVolume) (*storage.StorageClass, error) { + if kubeClient == nil { + return nil, fmt.Errorf("Cannot get kube client") + } // TODO: replace with a real attribute after beta className, found := pv.Annotations["volume.beta.kubernetes.io/storage-class"] if !found { return nil, fmt.Errorf("Volume has no class annotation") } - class, err := kubeClient.Storage().StorageClasses().Get(className) + class, err := kubeClient.StorageV1beta1().StorageClasses().Get(className, metav1.GetOptions{}) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/volume.go b/vendor/k8s.io/kubernetes/pkg/volume/volume.go index 83cb1955f0..fa749181ea 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/volume.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/volume.go @@ -25,9 +25,10 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/types" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/api/v1" ) // Volume represents a directory used by pods or hosts on a node. All method @@ -52,6 +53,9 @@ type MetricsProvider interface { // Metrics represents the used and available bytes of the Volume. type Metrics struct { + // The time at which these stats were updated. + Time metav1.Time + // Used represents the total bytes used by the Volume. // Note: For block devices this maybe more than the total size of the files. Used *resource.Quantity @@ -71,15 +75,15 @@ type Metrics struct { // InodesUsed represents the total inodes used by the Volume. InodesUsed *resource.Quantity - // Inodes represents the total number of inodes availible in the volume. + // Inodes represents the total number of inodes available in the volume. // For volumes that share a filesystem with the host (e.g. emptydir, hostpath), // this is the inodes available in the underlying storage, // and will not equal InodesUsed + InodesFree as the fs is shared. Inodes *resource.Quantity - // InodesFree represent the inodes available for the volume. For Volues that share + // InodesFree represent the inodes available for the volume. For Volumes that share // a filesystem with the host (e.g. emptydir, hostpath), this is the free inodes - // on the underlying sporage, and is shared with host processes and other volumes + // on the underlying storage, and is shared with host processes and other volumes InodesFree *resource.Quantity } @@ -134,22 +138,13 @@ type Unmounter interface { TearDownAt(dir string) error } -// Recycler provides methods to reclaim the volume resource. -type Recycler interface { - Volume - // Recycle reclaims the resource. Calls to this method should block until - // the recycling task is complete. Any error returned indicates the volume - // has failed to be reclaimed. A nil return indicates success. - Recycle() error -} - // Provisioner is an interface that creates templates for PersistentVolumes // and can create the volume as a new resource in the infrastructure provider. type Provisioner interface { // Provision creates the resource by allocating the underlying volume in a // storage system. This method should block until completion and returns // PersistentVolume representing the created storage resource. - Provision() (*api.PersistentVolume, error) + Provision() (*v1.PersistentVolume, error) } // Deleter removes the resource from the underlying storage provider. Calls @@ -163,7 +158,7 @@ type Deleter interface { // as error and it will be sent as "Info" event to the PV being deleted. The // volume controller will retry deleting the volume in the next periodic // sync. This can be used to postpone deletion of a volume that is being - // dettached from a node. Deletion of such volume would fail anyway and such + // detached from a node. Deletion of such volume would fail anyway and such // error would confuse users. Delete() error } @@ -176,8 +171,8 @@ type Attacher interface { Attach(spec *Spec, nodeName types.NodeName) (string, error) // VolumesAreAttached checks whether the list of volumes still attached to the specified - // the node. It returns a map which maps from the volume spec to the checking result. - // If an error is occured during checking, the error will be returned + // node. It returns a map which maps from the volume spec to the checking result. + // If an error is occurred during checking, the error will be returned VolumesAreAttached(specs []*Spec, nodeName types.NodeName) (map[*Spec]bool, error) // WaitForAttach blocks until the device is attached to this @@ -196,6 +191,14 @@ type Attacher interface { MountDevice(spec *Spec, devicePath string, deviceMountPath string) error } +type BulkVolumeVerifier interface { + // BulkVerifyVolumes checks whether the list of volumes still attached to the + // the clusters in the node. It returns a map which maps from the volume spec to the checking result. + // If an error occurs during check - error should be returned and volume on nodes + // should be assumed as still attached. + BulkVerifyVolumes(volumesByNode map[types.NodeName][]*Spec) (map[types.NodeName]map[*Spec]bool, error) +} + // Detacher can detach a volume from a node. type Detacher interface { // Detach the given device from the node with the given Name. @@ -280,16 +283,16 @@ func copyFolder(source string, dest string) (err error) { continue } - sourcefilepointer := source + "\\" + obj.Name() - destinationfilepointer := dest + "\\" + obj.Name() + sourceFilePointer := source + "\\" + obj.Name() + destinationFilePointer := dest + "\\" + obj.Name() if obj.IsDir() { - err = copyFolder(sourcefilepointer, destinationfilepointer) + err = copyFolder(sourceFilePointer, destinationFilePointer) if err != nil { return err } } else { - err = copyFile(sourcefilepointer, destinationfilepointer) + err = copyFile(sourceFilePointer, destinationFilePointer) if err != nil { return err } @@ -300,25 +303,25 @@ func copyFolder(source string, dest string) (err error) { } func copyFile(source string, dest string) (err error) { - sourcefile, err := os.Open(source) + sourceFile, err := os.Open(source) if err != nil { return err } - defer sourcefile.Close() + defer sourceFile.Close() - destfile, err := os.Create(dest) + destFile, err := os.Create(dest) if err != nil { return err } - defer destfile.Close() + defer destFile.Close() - _, err = io.Copy(destfile, sourcefile) + _, err = io.Copy(destFile, sourceFile) if err == nil { - sourceinfo, err := os.Stat(source) + sourceInfo, err := os.Stat(source) if err != nil { - err = os.Chmod(dest, sourceinfo.Mode()) + err = os.Chmod(dest, sourceInfo.Mode()) } } diff --git a/vendor/k8s.io/kubernetes/pkg/watch/BUILD b/vendor/k8s.io/kubernetes/pkg/watch/BUILD deleted file mode 100644 index 8a8a250327..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/watch/BUILD +++ /dev/null @@ -1,51 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "filter.go", - "mux.go", - "streamwatcher.go", - "until.go", - "watch.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/net:go_default_library", - "//pkg/util/runtime:go_default_library", - "//pkg/util/wait:go_default_library", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "filter_test.go", - "mux_test.go", - "streamwatcher_test.go", - "until_test.go", - "watch_test.go", - ], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/unversioned:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/util/wait:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/watch/versioned/BUILD b/vendor/k8s.io/kubernetes/pkg/watch/versioned/BUILD deleted file mode 100644 index 9ec7e73a1a..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/watch/versioned/BUILD +++ /dev/null @@ -1,49 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "decoder.go", - "encoder.go", - "generated.pb.go", - "register.go", - "types.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api/unversioned:go_default_library", - "//pkg/conversion:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer/streaming:go_default_library", - "//pkg/watch:go_default_library", - "//vendor:github.com/gogo/protobuf/proto", - ], -) - -go_test( - name = "go_default_xtest", - srcs = [ - "decoder_test.go", - "encoder_test.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/runtime:go_default_library", - "//pkg/runtime/serializer/streaming:go_default_library", - "//pkg/util/wait:go_default_library", - "//pkg/watch:go_default_library", - "//pkg/watch/versioned:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/watch/versioned/decoder.go b/vendor/k8s.io/kubernetes/pkg/watch/versioned/decoder.go deleted file mode 100644 index e5865273e2..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/watch/versioned/decoder.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package versioned - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer/streaming" - "k8s.io/kubernetes/pkg/watch" -) - -// Decoder implements the watch.Decoder interface for io.ReadClosers that -// have contents which consist of a series of watchEvent objects encoded -// with the given streaming decoder. The internal objects will be then -// decoded by the embedded decoder. -type Decoder struct { - decoder streaming.Decoder - embeddedDecoder runtime.Decoder -} - -// NewDecoder creates an Decoder for the given writer and codec. -func NewDecoder(decoder streaming.Decoder, embeddedDecoder runtime.Decoder) *Decoder { - return &Decoder{ - decoder: decoder, - embeddedDecoder: embeddedDecoder, - } -} - -// Decode blocks until it can return the next object in the reader. Returns an error -// if the reader is closed or an object can't be decoded. -func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) { - var got Event - res, _, err := d.decoder.Decode(nil, &got) - if err != nil { - return "", nil, err - } - if res != &got { - return "", nil, fmt.Errorf("unable to decode to versioned.Event") - } - switch got.Type { - case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error): - default: - return "", nil, fmt.Errorf("got invalid watch event type: %v", got.Type) - } - - obj, err := runtime.Decode(d.embeddedDecoder, got.Object.Raw) - if err != nil { - return "", nil, fmt.Errorf("unable to decode watch event: %v", err) - } - return watch.EventType(got.Type), obj, nil -} - -// Close closes the underlying r. -func (d *Decoder) Close() { - d.decoder.Close() -} diff --git a/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.pb.go deleted file mode 100644 index db5ffadd60..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.pb.go +++ /dev/null @@ -1,390 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/watch/versioned/generated.proto -// DO NOT EDIT! - -/* - Package versioned is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/watch/versioned/generated.proto - - It has these top-level messages: - Event -*/ -package versioned - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 - -func (m *Event) Reset() { *m = Event{} } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func init() { - proto.RegisterType((*Event)(nil), "k8s.io.kubernetes.pkg.watch.versioned.Event") -} -func (m *Event) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Event) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) - n1, err := m.Object.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *Event) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Object.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Event) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Event{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(strings.Replace(this.Object.String(), "RawExtension", "k8s_io_kubernetes_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Event) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -var fileDescriptorGenerated = []byte{ - // 280 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4b, 0xc3, 0x30, - 0x18, 0x86, 0x1b, 0x99, 0x83, 0x55, 0xf1, 0xd0, 0xd3, 0xe8, 0x21, 0x2b, 0x82, 0x30, 0x18, 0x4b, - 0x50, 0x10, 0x3c, 0x17, 0x76, 0x16, 0xaa, 0x27, 0x6f, 0x6d, 0xf7, 0x99, 0xc5, 0xba, 0xa4, 0xa4, - 0x5f, 0x37, 0x77, 0x11, 0x7f, 0x82, 0x3f, 0xab, 0xc7, 0x1d, 0x3d, 0x0d, 0x5b, 0xff, 0x88, 0x98, - 0x95, 0x09, 0xa3, 0xb7, 0xbc, 0x24, 0xcf, 0xc3, 0xfb, 0xc6, 0xbd, 0xcd, 0xee, 0x0a, 0x26, 0x35, - 0xcf, 0xca, 0x04, 0x8c, 0x02, 0x84, 0x82, 0xe7, 0x99, 0xe0, 0xeb, 0x18, 0xd3, 0x05, 0x5f, 0x81, - 0x29, 0xa4, 0x56, 0x30, 0xe7, 0x02, 0x14, 0x98, 0x18, 0x61, 0xce, 0x72, 0xa3, 0x51, 0x7b, 0x57, - 0x7b, 0x8c, 0xfd, 0x63, 0x2c, 0xcf, 0x04, 0xb3, 0x18, 0x3b, 0x60, 0xfe, 0x54, 0x48, 0x5c, 0x94, - 0x09, 0x4b, 0xf5, 0x92, 0x0b, 0x2d, 0x34, 0xb7, 0x74, 0x52, 0x3e, 0xdb, 0x64, 0x83, 0x3d, 0xed, - 0xad, 0xfe, 0xb4, 0xbb, 0x8c, 0x29, 0x15, 0xca, 0x25, 0x1c, 0x97, 0xf0, 0xaf, 0xbb, 0x9f, 0x97, - 0x28, 0x5f, 0xb9, 0x54, 0x58, 0xa0, 0x39, 0x46, 0x2e, 0xdf, 0xdd, 0xd3, 0xd9, 0x0a, 0x14, 0x7a, - 0x81, 0xdb, 0xc3, 0x4d, 0x0e, 0x43, 0x12, 0x90, 0xf1, 0x20, 0x3c, 0xaf, 0x76, 0x23, 0xa7, 0xd9, - 0x8d, 0x7a, 0x8f, 0x9b, 0x1c, 0x22, 0x7b, 0xe3, 0x3d, 0xb8, 0x7d, 0x9d, 0xbc, 0x40, 0x8a, 0xc3, - 0x93, 0x80, 0x8c, 0xcf, 0x6e, 0x26, 0xac, 0x7b, 0x73, 0xdb, 0x8e, 0x45, 0xf1, 0x7a, 0xf6, 0x86, - 0xa0, 0xfe, 0xa6, 0x87, 0x17, 0xad, 0xb0, 0x7f, 0x6f, 0x15, 0x51, 0xab, 0x0a, 0x27, 0x55, 0x4d, - 0x9d, 0x6d, 0x4d, 0x9d, 0xaf, 0x9a, 0x3a, 0x1f, 0x0d, 0x25, 0x55, 0x43, 0xc9, 0xb6, 0xa1, 0xe4, - 0xbb, 0xa1, 0xe4, 0xf3, 0x87, 0x3a, 0x4f, 0x83, 0xc3, 0xef, 0xfd, 0x06, 0x00, 0x00, 0xff, 0xff, - 0x23, 0x3d, 0x7b, 0x7e, 0x9c, 0x01, 0x00, 0x00, -} diff --git a/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.proto b/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.proto deleted file mode 100644 index 340e18dad8..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.proto +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.watch.versioned; - -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "versioned"; - -// Event represents a single event to a watched resource. -// -// +protobuf=true -// +k8s:openapi-gen=true -message Event { - optional string type = 1; - - // Object is: - // * If Type is Added or Modified: the new state of the object. - // * If Type is Deleted: the state of the object immediately before deletion. - // * If Type is Error: *api.Status is recommended; other types may make sense - // depending on context. - optional k8s.io.kubernetes.pkg.runtime.RawExtension object = 2; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/watch/versioned/register.go b/vendor/k8s.io/kubernetes/pkg/watch/versioned/register.go deleted file mode 100644 index e90a021a43..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/watch/versioned/register.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package versioned - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/watch" -) - -// WatchEventKind is name reserved for serializing watch events. -const WatchEventKind = "WatchEvent" - -// AddToGroupVersion registers the watch external and internal kinds with the scheme, and ensures the proper -// conversions are in place. -func AddToGroupVersion(scheme *runtime.Scheme, groupVersion unversioned.GroupVersion) { - scheme.AddKnownTypeWithName(groupVersion.WithKind(WatchEventKind), &Event{}) - scheme.AddKnownTypeWithName( - unversioned.GroupVersion{Group: groupVersion.Group, Version: runtime.APIVersionInternal}.WithKind(WatchEventKind), - &InternalEvent{}, - ) - scheme.AddConversionFuncs( - Convert_versioned_Event_to_watch_Event, - Convert_versioned_InternalEvent_to_versioned_Event, - Convert_watch_Event_to_versioned_Event, - Convert_versioned_Event_to_versioned_InternalEvent, - ) -} - -func Convert_watch_Event_to_versioned_Event(in *watch.Event, out *Event, s conversion.Scope) error { - out.Type = string(in.Type) - switch t := in.Object.(type) { - case *runtime.Unknown: - // TODO: handle other fields on Unknown and detect type - out.Object.Raw = t.Raw - case nil: - default: - out.Object.Object = in.Object - } - return nil -} - -func Convert_versioned_InternalEvent_to_versioned_Event(in *InternalEvent, out *Event, s conversion.Scope) error { - return Convert_watch_Event_to_versioned_Event((*watch.Event)(in), out, s) -} - -func Convert_versioned_Event_to_watch_Event(in *Event, out *watch.Event, s conversion.Scope) error { - out.Type = watch.EventType(in.Type) - if in.Object.Object != nil { - out.Object = in.Object.Object - } else if in.Object.Raw != nil { - // TODO: handle other fields on Unknown and detect type - out.Object = &runtime.Unknown{ - Raw: in.Object.Raw, - ContentType: runtime.ContentTypeJSON, - } - } - return nil -} - -func Convert_versioned_Event_to_versioned_InternalEvent(in *Event, out *InternalEvent, s conversion.Scope) error { - return Convert_versioned_Event_to_watch_Event(in, (*watch.Event)(out), s) -} - -// InternalEvent makes watch.Event versioned -type InternalEvent watch.Event - -func (e *InternalEvent) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } -func (e *Event) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } diff --git a/vendor/k8s.io/kubernetes/pkg/watch/versioned/types.go b/vendor/k8s.io/kubernetes/pkg/watch/versioned/types.go deleted file mode 100644 index 430c357b54..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/watch/versioned/types.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package versioned contains the versioned types for watch. This is the first -// serialization version unless otherwise noted. -package versioned - -import ( - "k8s.io/kubernetes/pkg/runtime" -) - -// Event represents a single event to a watched resource. -// -// +protobuf=true -// +k8s:openapi-gen=true -type Event struct { - Type string `json:"type" protobuf:"bytes,1,opt,name=type"` - - // Object is: - // * If Type is Added or Modified: the new state of the object. - // * If Type is Deleted: the state of the object immediately before deletion. - // * If Type is Error: *api.Status is recommended; other types may make sense - // depending on context. - Object runtime.RawExtension `json:"object" protobuf:"bytes,2,opt,name=object"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/watch/watch.go b/vendor/k8s.io/kubernetes/pkg/watch/watch.go deleted file mode 100644 index 550149c688..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/watch/watch.go +++ /dev/null @@ -1,269 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package watch - -import ( - "fmt" - "sync" - - "k8s.io/kubernetes/pkg/runtime" - - "github.com/golang/glog" -) - -// Interface can be implemented by anything that knows how to watch and report changes. -type Interface interface { - // Stops watching. Will close the channel returned by ResultChan(). Releases - // any resources used by the watch. - Stop() - - // Returns a chan which will receive all the events. If an error occurs - // or Stop() is called, this channel will be closed, in which case the - // watch should be completely cleaned up. - ResultChan() <-chan Event -} - -// EventType defines the possible types of events. -type EventType string - -const ( - Added EventType = "ADDED" - Modified EventType = "MODIFIED" - Deleted EventType = "DELETED" - Error EventType = "ERROR" - - DefaultChanSize int32 = 100 -) - -// Event represents a single event to a watched resource. -type Event struct { - Type EventType - - // Object is: - // * If Type is Added or Modified: the new state of the object. - // * If Type is Deleted: the state of the object immediately before deletion. - // * If Type is Error: *api.Status is recommended; other types may make sense - // depending on context. - Object runtime.Object -} - -type emptyWatch chan Event - -// NewEmptyWatch returns a watch interface that returns no results and is closed. -// May be used in certain error conditions where no information is available but -// an error is not warranted. -func NewEmptyWatch() Interface { - ch := make(chan Event) - close(ch) - return emptyWatch(ch) -} - -// Stop implements Interface -func (w emptyWatch) Stop() { -} - -// ResultChan implements Interface -func (w emptyWatch) ResultChan() <-chan Event { - return chan Event(w) -} - -// FakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. -type FakeWatcher struct { - result chan Event - Stopped bool - sync.Mutex -} - -func NewFake() *FakeWatcher { - return &FakeWatcher{ - result: make(chan Event), - } -} - -func NewFakeWithChanSize(size int, blocking bool) *FakeWatcher { - return &FakeWatcher{ - result: make(chan Event, size), - } -} - -// Stop implements Interface.Stop(). -func (f *FakeWatcher) Stop() { - f.Lock() - defer f.Unlock() - if !f.Stopped { - glog.V(4).Infof("Stopping fake watcher.") - close(f.result) - f.Stopped = true - } -} - -func (f *FakeWatcher) IsStopped() bool { - f.Lock() - defer f.Unlock() - return f.Stopped -} - -// Reset prepares the watcher to be reused. -func (f *FakeWatcher) Reset() { - f.Lock() - defer f.Unlock() - f.Stopped = false - f.result = make(chan Event) -} - -func (f *FakeWatcher) ResultChan() <-chan Event { - return f.result -} - -// Add sends an add event. -func (f *FakeWatcher) Add(obj runtime.Object) { - f.result <- Event{Added, obj} -} - -// Modify sends a modify event. -func (f *FakeWatcher) Modify(obj runtime.Object) { - f.result <- Event{Modified, obj} -} - -// Delete sends a delete event. -func (f *FakeWatcher) Delete(lastValue runtime.Object) { - f.result <- Event{Deleted, lastValue} -} - -// Error sends an Error event. -func (f *FakeWatcher) Error(errValue runtime.Object) { - f.result <- Event{Error, errValue} -} - -// Action sends an event of the requested type, for table-based testing. -func (f *FakeWatcher) Action(action EventType, obj runtime.Object) { - f.result <- Event{action, obj} -} - -// RaceFreeFakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. -type RaceFreeFakeWatcher struct { - result chan Event - Stopped bool - sync.Mutex -} - -func NewRaceFreeFake() *RaceFreeFakeWatcher { - return &RaceFreeFakeWatcher{ - result: make(chan Event, DefaultChanSize), - } -} - -// Stop implements Interface.Stop(). -func (f *RaceFreeFakeWatcher) Stop() { - f.Lock() - defer f.Unlock() - if !f.Stopped { - glog.V(4).Infof("Stopping fake watcher.") - close(f.result) - f.Stopped = true - } -} - -func (f *RaceFreeFakeWatcher) IsStopped() bool { - f.Lock() - defer f.Unlock() - return f.Stopped -} - -// Reset prepares the watcher to be reused. -func (f *RaceFreeFakeWatcher) Reset() { - f.Lock() - defer f.Unlock() - f.Stopped = false - f.result = make(chan Event, DefaultChanSize) -} - -func (f *RaceFreeFakeWatcher) ResultChan() <-chan Event { - f.Lock() - defer f.Unlock() - return f.result -} - -// Add sends an add event. -func (f *RaceFreeFakeWatcher) Add(obj runtime.Object) { - f.Lock() - defer f.Unlock() - if !f.Stopped { - select { - case f.result <- Event{Added, obj}: - return - default: - panic(fmt.Errorf("channel full")) - } - } -} - -// Modify sends a modify event. -func (f *RaceFreeFakeWatcher) Modify(obj runtime.Object) { - f.Lock() - defer f.Unlock() - if !f.Stopped { - select { - case f.result <- Event{Modified, obj}: - return - default: - panic(fmt.Errorf("channel full")) - } - } -} - -// Delete sends a delete event. -func (f *RaceFreeFakeWatcher) Delete(lastValue runtime.Object) { - f.Lock() - defer f.Unlock() - if !f.Stopped { - select { - case f.result <- Event{Deleted, lastValue}: - return - default: - panic(fmt.Errorf("channel full")) - } - } -} - -// Error sends an Error event. -func (f *RaceFreeFakeWatcher) Error(errValue runtime.Object) { - f.Lock() - defer f.Unlock() - if !f.Stopped { - select { - case f.result <- Event{Error, errValue}: - return - default: - panic(fmt.Errorf("channel full")) - } - } -} - -// Action sends an event of the requested type, for table-based testing. -func (f *RaceFreeFakeWatcher) Action(action EventType, obj runtime.Object) { - f.Lock() - defer f.Unlock() - if !f.Stopped { - select { - case f.result <- Event{action, obj}: - return - default: - panic(fmt.Errorf("channel full")) - } - } -} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/BUILD deleted file mode 100644 index 269a48bd0c..0000000000 --- a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/BUILD +++ /dev/null @@ -1,21 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["plugins.go"], - tags = ["automanaged"], - deps = [ - "//plugin/pkg/client/auth/gcp:go_default_library", - "//plugin/pkg/client/auth/oidc:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/BUILD deleted file mode 100644 index 43603b57a9..0000000000 --- a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["gcp.go"], - tags = ["automanaged"], - deps = [ - "//pkg/client/restclient:go_default_library", - "//pkg/util/jsonpath:go_default_library", - "//pkg/util/yaml:go_default_library", - "//vendor:github.com/golang/glog", - "//vendor:golang.org/x/net/context", - "//vendor:golang.org/x/oauth2", - "//vendor:golang.org/x/oauth2/google", - ], -) - -go_test( - name = "go_default_test", - srcs = ["gcp_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = ["//vendor:golang.org/x/oauth2"], -) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/OWNERS b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/OWNERS deleted file mode 100644 index d75421c5ef..0000000000 --- a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/OWNERS +++ /dev/null @@ -1,3 +0,0 @@ -assignees: - - cjcullen - - jlowdermilk diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/gcp.go b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/gcp.go deleted file mode 100644 index 8b52a27a19..0000000000 --- a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/gcp.go +++ /dev/null @@ -1,274 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gcp - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "os/exec" - "strings" - "sync" - "time" - - "github.com/golang/glog" - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/util/jsonpath" - "k8s.io/kubernetes/pkg/util/yaml" -) - -func init() { - if err := restclient.RegisterAuthProviderPlugin("gcp", newGCPAuthProvider); err != nil { - glog.Fatalf("Failed to register gcp auth plugin: %v", err) - } -} - -// gcpAuthProvider is an auth provider plugin that uses GCP credentials to provide -// tokens for kubectl to authenticate itself to the apiserver. A sample json config -// is provided below with all recognized options described. -// -// { -// 'auth-provider': { -// # Required -// "name": "gcp", -// -// 'config': { -// # Caching options -// -// # Raw string data representing cached access token. -// "access-token": "ya29.CjWdA4GiBPTt", -// # RFC3339Nano expiration timestamp for cached access token. -// "expiry": "2016-10-31 22:31:9.123", -// -// # Command execution options -// # These options direct the plugin to execute a specified command and parse -// # token and expiry time from the output of the command. -// -// # Command to execute for access token. String is split on whitespace -// # with first field treated as the executable, remaining fields as args. -// # Command output will be parsed as JSON. -// "cmd-path": "/usr/bin/gcloud config config-helper --output=json", -// -// # JSONPath to the string field that represents the access token in -// # command output. If omitted, defaults to "{.access_token}". -// "token-key": "{.credential.access_token}", -// -// # JSONPath to the string field that represents expiration timestamp -// # of the access token in the command output. If omitted, defaults to -// # "{.token_expiry}" -// "expiry-key": ""{.credential.token_expiry}", -// -// # golang reference time in the format that the expiration timestamp uses. -// # If omitted, defaults to time.RFC3339Nano -// "time-fmt": "2006-01-02 15:04:05.999999999" -// } -// } -// } -// -type gcpAuthProvider struct { - tokenSource oauth2.TokenSource - persister restclient.AuthProviderConfigPersister -} - -func newGCPAuthProvider(_ string, gcpConfig map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) { - cmd, useCmd := gcpConfig["cmd-path"] - var ts oauth2.TokenSource - var err error - if useCmd { - ts, err = newCmdTokenSource(cmd, gcpConfig["token-key"], gcpConfig["expiry-key"], gcpConfig["time-fmt"]) - } else { - ts, err = google.DefaultTokenSource(context.Background(), "https://www.googleapis.com/auth/cloud-platform") - } - if err != nil { - return nil, err - } - cts, err := newCachedTokenSource(gcpConfig["access-token"], gcpConfig["expiry"], persister, ts, gcpConfig) - if err != nil { - return nil, err - } - return &gcpAuthProvider{cts, persister}, nil -} - -func (g *gcpAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper { - return &oauth2.Transport{ - Source: g.tokenSource, - Base: rt, - } -} - -func (g *gcpAuthProvider) Login() error { return nil } - -type cachedTokenSource struct { - lk sync.Mutex - source oauth2.TokenSource - accessToken string - expiry time.Time - persister restclient.AuthProviderConfigPersister - cache map[string]string -} - -func newCachedTokenSource(accessToken, expiry string, persister restclient.AuthProviderConfigPersister, ts oauth2.TokenSource, cache map[string]string) (*cachedTokenSource, error) { - var expiryTime time.Time - if parsedTime, err := time.Parse(time.RFC3339Nano, expiry); err == nil { - expiryTime = parsedTime - } - if cache == nil { - cache = make(map[string]string) - } - return &cachedTokenSource{ - source: ts, - accessToken: accessToken, - expiry: expiryTime, - persister: persister, - cache: cache, - }, nil -} - -func (t *cachedTokenSource) Token() (*oauth2.Token, error) { - tok := t.cachedToken() - if tok.Valid() && !tok.Expiry.IsZero() { - return tok, nil - } - tok, err := t.source.Token() - if err != nil { - return nil, err - } - cache := t.update(tok) - if t.persister != nil { - if err := t.persister.Persist(cache); err != nil { - glog.V(4).Infof("Failed to persist token: %v", err) - } - } - return tok, nil -} - -func (t *cachedTokenSource) cachedToken() *oauth2.Token { - t.lk.Lock() - defer t.lk.Unlock() - return &oauth2.Token{ - AccessToken: t.accessToken, - TokenType: "Bearer", - Expiry: t.expiry, - } -} - -func (t *cachedTokenSource) update(tok *oauth2.Token) map[string]string { - t.lk.Lock() - defer t.lk.Unlock() - t.accessToken = tok.AccessToken - t.expiry = tok.Expiry - ret := map[string]string{} - for k, v := range t.cache { - ret[k] = v - } - ret["access-token"] = t.accessToken - ret["expiry"] = t.expiry.Format(time.RFC3339Nano) - return ret -} - -type commandTokenSource struct { - cmd string - args []string - tokenKey string - expiryKey string - timeFmt string -} - -func newCmdTokenSource(cmd, tokenKey, expiryKey, timeFmt string) (*commandTokenSource, error) { - if len(timeFmt) == 0 { - timeFmt = time.RFC3339Nano - } - if len(tokenKey) == 0 { - tokenKey = "{.access_token}" - } - if len(expiryKey) == 0 { - expiryKey = "{.token_expiry}" - } - fields := strings.Fields(cmd) - if len(fields) == 0 { - return nil, fmt.Errorf("missing access token cmd") - } - return &commandTokenSource{ - cmd: fields[0], - args: fields[1:], - tokenKey: tokenKey, - expiryKey: expiryKey, - timeFmt: timeFmt, - }, nil -} - -func (c *commandTokenSource) Token() (*oauth2.Token, error) { - fullCmd := fmt.Sprintf("%s %s", c.cmd, strings.Join(c.args, " ")) - cmd := exec.Command(c.cmd, c.args...) - output, err := cmd.Output() - if err != nil { - return nil, fmt.Errorf("error executing access token command %q: %v", fullCmd, err) - } - token, err := c.parseTokenCmdOutput(output) - if err != nil { - return nil, fmt.Errorf("error parsing output for access token command %q: %v", fullCmd, err) - } - return token, nil -} - -func (c *commandTokenSource) parseTokenCmdOutput(output []byte) (*oauth2.Token, error) { - output, err := yaml.ToJSON(output) - if err != nil { - return nil, err - } - var data interface{} - if err := json.Unmarshal(output, &data); err != nil { - return nil, err - } - - accessToken, err := parseJSONPath(data, "token-key", c.tokenKey) - if err != nil { - return nil, fmt.Errorf("error parsing token-key %q: %v", c.tokenKey, err) - } - expiryStr, err := parseJSONPath(data, "expiry-key", c.expiryKey) - if err != nil { - return nil, fmt.Errorf("error parsing expiry-key %q: %v", c.expiryKey, err) - } - var expiry time.Time - if t, err := time.Parse(c.timeFmt, expiryStr); err != nil { - glog.V(4).Infof("Failed to parse token expiry from %s (fmt=%s): %v", expiryStr, c.timeFmt, err) - } else { - expiry = t - } - - return &oauth2.Token{ - AccessToken: accessToken, - TokenType: "Bearer", - Expiry: expiry, - }, nil -} - -func parseJSONPath(input interface{}, name, template string) (string, error) { - j := jsonpath.New(name) - buf := new(bytes.Buffer) - if err := j.Parse(template); err != nil { - return "", err - } - if err := j.Execute(buf, input); err != nil { - return "", err - } - return buf.String(), nil -} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/BUILD deleted file mode 100644 index 8cf24b498e..0000000000 --- a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/BUILD +++ /dev/null @@ -1,40 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["oidc.go"], - tags = ["automanaged"], - deps = [ - "//pkg/client/restclient:go_default_library", - "//pkg/util/wait:go_default_library", - "//vendor:github.com/coreos/go-oidc/jose", - "//vendor:github.com/coreos/go-oidc/oauth2", - "//vendor:github.com/coreos/go-oidc/oidc", - "//vendor:github.com/golang/glog", - ], -) - -go_test( - name = "go_default_test", - srcs = ["oidc_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/util/diff:go_default_library", - "//pkg/util/wait:go_default_library", - "//plugin/pkg/auth/authenticator/token/oidc/testing:go_default_library", - "//vendor:github.com/coreos/go-oidc/jose", - "//vendor:github.com/coreos/go-oidc/key", - "//vendor:github.com/coreos/go-oidc/oauth2", - ], -) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS deleted file mode 100644 index 2f80bc7e10..0000000000 --- a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS +++ /dev/null @@ -1,2 +0,0 @@ -assignees: - - ericchiang diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/oidc.go b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/oidc.go deleted file mode 100644 index 155bb48673..0000000000 --- a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/oidc.go +++ /dev/null @@ -1,270 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package oidc - -import ( - "encoding/base64" - "errors" - "fmt" - "net/http" - "strings" - "time" - - "github.com/coreos/go-oidc/jose" - "github.com/coreos/go-oidc/oauth2" - "github.com/coreos/go-oidc/oidc" - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/util/wait" -) - -const ( - cfgIssuerUrl = "idp-issuer-url" - cfgClientID = "client-id" - cfgClientSecret = "client-secret" - cfgCertificateAuthority = "idp-certificate-authority" - cfgCertificateAuthorityData = "idp-certificate-authority-data" - cfgExtraScopes = "extra-scopes" - cfgIDToken = "id-token" - cfgRefreshToken = "refresh-token" -) - -var ( - backoff = wait.Backoff{ - Duration: 1 * time.Second, - Factor: 2, - Jitter: .1, - Steps: 5, - } -) - -func init() { - if err := restclient.RegisterAuthProviderPlugin("oidc", newOIDCAuthProvider); err != nil { - glog.Fatalf("Failed to register oidc auth plugin: %v", err) - } -} - -func newOIDCAuthProvider(_ string, cfg map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) { - issuer := cfg[cfgIssuerUrl] - if issuer == "" { - return nil, fmt.Errorf("Must provide %s", cfgIssuerUrl) - } - - clientID := cfg[cfgClientID] - if clientID == "" { - return nil, fmt.Errorf("Must provide %s", cfgClientID) - } - - clientSecret := cfg[cfgClientSecret] - if clientSecret == "" { - return nil, fmt.Errorf("Must provide %s", cfgClientSecret) - } - - var certAuthData []byte - var err error - if cfg[cfgCertificateAuthorityData] != "" { - certAuthData, err = base64.StdEncoding.DecodeString(cfg[cfgCertificateAuthorityData]) - if err != nil { - return nil, err - } - } - - clientConfig := restclient.Config{ - TLSClientConfig: restclient.TLSClientConfig{ - CAFile: cfg[cfgCertificateAuthority], - CAData: certAuthData, - }, - } - - trans, err := restclient.TransportFor(&clientConfig) - if err != nil { - return nil, err - } - hc := &http.Client{Transport: trans} - - providerCfg, err := oidc.FetchProviderConfig(hc, issuer) - if err != nil { - return nil, fmt.Errorf("error fetching provider config: %v", err) - } - - scopes := strings.Split(cfg[cfgExtraScopes], ",") - oidcCfg := oidc.ClientConfig{ - HTTPClient: hc, - Credentials: oidc.ClientCredentials{ - ID: clientID, - Secret: clientSecret, - }, - ProviderConfig: providerCfg, - Scope: append(scopes, oidc.DefaultScope...), - } - - client, err := oidc.NewClient(oidcCfg) - if err != nil { - return nil, fmt.Errorf("error creating OIDC Client: %v", err) - } - - oClient := &oidcClient{client} - - var initialIDToken jose.JWT - if cfg[cfgIDToken] != "" { - initialIDToken, err = jose.ParseJWT(cfg[cfgIDToken]) - if err != nil { - return nil, err - } - } - - return &oidcAuthProvider{ - initialIDToken: initialIDToken, - refresher: &idTokenRefresher{ - client: oClient, - cfg: cfg, - persister: persister, - }, - }, nil -} - -type oidcAuthProvider struct { - refresher *idTokenRefresher - initialIDToken jose.JWT -} - -func (g *oidcAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper { - at := &oidc.AuthenticatedTransport{ - TokenRefresher: g.refresher, - RoundTripper: rt, - } - at.SetJWT(g.initialIDToken) - return &roundTripper{ - wrapped: at, - refresher: g.refresher, - } -} - -func (g *oidcAuthProvider) Login() error { - return errors.New("not yet implemented") -} - -type OIDCClient interface { - refreshToken(rt string) (oauth2.TokenResponse, error) - verifyJWT(jwt jose.JWT) error -} - -type roundTripper struct { - refresher *idTokenRefresher - wrapped *oidc.AuthenticatedTransport -} - -func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - var res *http.Response - var err error - firstTime := true - wait.ExponentialBackoff(backoff, func() (bool, error) { - if !firstTime { - var jwt jose.JWT - jwt, err = r.refresher.Refresh() - if err != nil { - return true, nil - } - r.wrapped.SetJWT(jwt) - } else { - firstTime = false - } - - res, err = r.wrapped.RoundTrip(req) - if err != nil { - return true, nil - } - if res.StatusCode == http.StatusUnauthorized { - return false, nil - } - return true, nil - }) - return res, err -} - -type idTokenRefresher struct { - cfg map[string]string - client OIDCClient - persister restclient.AuthProviderConfigPersister - intialIDToken jose.JWT -} - -func (r *idTokenRefresher) Verify(jwt jose.JWT) error { - claims, err := jwt.Claims() - if err != nil { - return err - } - - now := time.Now() - exp, ok, err := claims.TimeClaim("exp") - switch { - case err != nil: - return fmt.Errorf("failed to parse 'exp' claim: %v", err) - case !ok: - return errors.New("missing required 'exp' claim") - case exp.Before(now): - return fmt.Errorf("token already expired at: %v", exp) - } - - return nil -} - -func (r *idTokenRefresher) Refresh() (jose.JWT, error) { - rt, ok := r.cfg[cfgRefreshToken] - if !ok { - return jose.JWT{}, errors.New("No valid id-token, and cannot refresh without refresh-token") - } - - tokens, err := r.client.refreshToken(rt) - if err != nil { - return jose.JWT{}, fmt.Errorf("could not refresh token: %v", err) - } - jwt, err := jose.ParseJWT(tokens.IDToken) - if err != nil { - return jose.JWT{}, err - } - - if tokens.RefreshToken != "" && tokens.RefreshToken != rt { - r.cfg[cfgRefreshToken] = tokens.RefreshToken - } - r.cfg[cfgIDToken] = jwt.Encode() - - err = r.persister.Persist(r.cfg) - if err != nil { - return jose.JWT{}, fmt.Errorf("could not perist new tokens: %v", err) - } - - return jwt, r.client.verifyJWT(jwt) -} - -type oidcClient struct { - client *oidc.Client -} - -func (o *oidcClient) refreshToken(rt string) (oauth2.TokenResponse, error) { - oac, err := o.client.OAuthClient() - if err != nil { - return oauth2.TokenResponse{}, err - } - - return oac.RequestToken(oauth2.GrantTypeRefreshToken, rt) -} - -func (o *oidcClient) verifyJWT(jwt jose.JWT) error { - return o.client.VerifyJWT(jwt) -} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/plugins.go b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/plugins.go deleted file mode 100644 index 21813b0581..0000000000 --- a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/plugins.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package auth - -import ( - // Initialize all known client auth plugins. - _ "k8s.io/kubernetes/plugin/pkg/client/auth/gcp" - _ "k8s.io/kubernetes/plugin/pkg/client/auth/oidc" -) diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/json/BUILD b/vendor/k8s.io/kubernetes/third_party/forked/golang/json/BUILD deleted file mode 100644 index 79b3d606db..0000000000 --- a/vendor/k8s.io/kubernetes/third_party/forked/golang/json/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = ["fields.go"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/reflect/BUILD b/vendor/k8s.io/kubernetes/third_party/forked/golang/reflect/BUILD deleted file mode 100644 index d6a91d5dcd..0000000000 --- a/vendor/k8s.io/kubernetes/third_party/forked/golang/reflect/BUILD +++ /dev/null @@ -1,28 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "deep_equal.go", - "type.go", - ], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = ["deep_equal_test.go"], - library = "go_default_library", - tags = ["automanaged"], - deps = [], -) diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/template/BUILD b/vendor/k8s.io/kubernetes/third_party/forked/golang/template/BUILD deleted file mode 100644 index d7ecc47477..0000000000 --- a/vendor/k8s.io/kubernetes/third_party/forked/golang/template/BUILD +++ /dev/null @@ -1,20 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", - "go_test", - "cgo_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "exec.go", - "funcs.go", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/template/exec.go b/vendor/k8s.io/kubernetes/third_party/forked/golang/template/exec.go deleted file mode 100644 index 739fd3509c..0000000000 --- a/vendor/k8s.io/kubernetes/third_party/forked/golang/template/exec.go +++ /dev/null @@ -1,94 +0,0 @@ -//This package is copied from Go library text/template. -//The original private functions indirect and printableValue -//are exported as public functions. -package template - -import ( - "fmt" - "reflect" -) - -var Indirect = indirect -var PrintableValue = printableValue - -var ( - errorType = reflect.TypeOf((*error)(nil)).Elem() - fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() -) - -// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. -// We indirect through pointers and empty interfaces (only) because -// non-empty interfaces have methods we might need. -func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { - for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { - if v.IsNil() { - return v, true - } - if v.Kind() == reflect.Interface && v.NumMethod() > 0 { - break - } - } - return v, false -} - -// printableValue returns the, possibly indirected, interface value inside v that -// is best for a call to formatted printer. -func printableValue(v reflect.Value) (interface{}, bool) { - if v.Kind() == reflect.Ptr { - v, _ = indirect(v) // fmt.Fprint handles nil. - } - if !v.IsValid() { - return "", true - } - - if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) { - if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) { - v = v.Addr() - } else { - switch v.Kind() { - case reflect.Chan, reflect.Func: - return nil, false - } - } - } - return v.Interface(), true -} - -// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero. -func canBeNil(typ reflect.Type) bool { - switch typ.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return true - } - return false -} - -// isTrue reports whether the value is 'true', in the sense of not the zero of its type, -// and whether the value has a meaningful truth value. -func isTrue(val reflect.Value) (truth, ok bool) { - if !val.IsValid() { - // Something like var x interface{}, never set. It's a form of nil. - return false, true - } - switch val.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - truth = val.Len() > 0 - case reflect.Bool: - truth = val.Bool() - case reflect.Complex64, reflect.Complex128: - truth = val.Complex() != 0 - case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface: - truth = !val.IsNil() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - truth = val.Int() != 0 - case reflect.Float32, reflect.Float64: - truth = val.Float() != 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - truth = val.Uint() != 0 - case reflect.Struct: - truth = true // Struct values are always true. - default: - return - } - return truth, true -} diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/template/funcs.go b/vendor/k8s.io/kubernetes/third_party/forked/golang/template/funcs.go deleted file mode 100644 index 27a008b0a7..0000000000 --- a/vendor/k8s.io/kubernetes/third_party/forked/golang/template/funcs.go +++ /dev/null @@ -1,599 +0,0 @@ -//This package is copied from Go library text/template. -//The original private functions eq, ge, gt, le, lt, and ne -//are exported as public functions. -package template - -import ( - "bytes" - "errors" - "fmt" - "io" - "net/url" - "reflect" - "strings" - "unicode" - "unicode/utf8" -) - -var Equal = eq -var GreaterEqual = ge -var Greater = gt -var LessEqual = le -var Less = lt -var NotEqual = ne - -// FuncMap is the type of the map defining the mapping from names to functions. -// Each function must have either a single return value, or two return values of -// which the second has type error. In that case, if the second (error) -// return value evaluates to non-nil during execution, execution terminates and -// Execute returns that error. -type FuncMap map[string]interface{} - -var builtins = FuncMap{ - "and": and, - "call": call, - "html": HTMLEscaper, - "index": index, - "js": JSEscaper, - "len": length, - "not": not, - "or": or, - "print": fmt.Sprint, - "printf": fmt.Sprintf, - "println": fmt.Sprintln, - "urlquery": URLQueryEscaper, - - // Comparisons - "eq": eq, // == - "ge": ge, // >= - "gt": gt, // > - "le": le, // <= - "lt": lt, // < - "ne": ne, // != -} - -var builtinFuncs = createValueFuncs(builtins) - -// createValueFuncs turns a FuncMap into a map[string]reflect.Value -func createValueFuncs(funcMap FuncMap) map[string]reflect.Value { - m := make(map[string]reflect.Value) - addValueFuncs(m, funcMap) - return m -} - -// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values. -func addValueFuncs(out map[string]reflect.Value, in FuncMap) { - for name, fn := range in { - v := reflect.ValueOf(fn) - if v.Kind() != reflect.Func { - panic("value for " + name + " not a function") - } - if !goodFunc(v.Type()) { - panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut())) - } - out[name] = v - } -} - -// AddFuncs adds to values the functions in funcs. It does no checking of the input - -// call addValueFuncs first. -func addFuncs(out, in FuncMap) { - for name, fn := range in { - out[name] = fn - } -} - -// goodFunc checks that the function or method has the right result signature. -func goodFunc(typ reflect.Type) bool { - // We allow functions with 1 result or 2 results where the second is an error. - switch { - case typ.NumOut() == 1: - return true - case typ.NumOut() == 2 && typ.Out(1) == errorType: - return true - } - return false -} - -// findFunction looks for a function in the template, and global map. -func findFunction(name string) (reflect.Value, bool) { - if fn := builtinFuncs[name]; fn.IsValid() { - return fn, true - } - return reflect.Value{}, false -} - -// Indexing. - -// index returns the result of indexing its first argument by the following -// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each -// indexed item must be a map, slice, or array. -func index(item interface{}, indices ...interface{}) (interface{}, error) { - v := reflect.ValueOf(item) - for _, i := range indices { - index := reflect.ValueOf(i) - var isNil bool - if v, isNil = indirect(v); isNil { - return nil, fmt.Errorf("index of nil pointer") - } - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.String: - var x int64 - switch index.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x = index.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x = int64(index.Uint()) - default: - return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type()) - } - if x < 0 || x >= int64(v.Len()) { - return nil, fmt.Errorf("index out of range: %d", x) - } - v = v.Index(int(x)) - case reflect.Map: - if !index.IsValid() { - index = reflect.Zero(v.Type().Key()) - } - if !index.Type().AssignableTo(v.Type().Key()) { - return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type()) - } - if x := v.MapIndex(index); x.IsValid() { - v = x - } else { - v = reflect.Zero(v.Type().Elem()) - } - default: - return nil, fmt.Errorf("can't index item of type %s", v.Type()) - } - } - return v.Interface(), nil -} - -// Length - -// length returns the length of the item, with an error if it has no defined length. -func length(item interface{}) (int, error) { - v, isNil := indirect(reflect.ValueOf(item)) - if isNil { - return 0, fmt.Errorf("len of nil pointer") - } - switch v.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: - return v.Len(), nil - } - return 0, fmt.Errorf("len of type %s", v.Type()) -} - -// Function invocation - -// call returns the result of evaluating the first argument as a function. -// The function must return 1 result, or 2 results, the second of which is an error. -func call(fn interface{}, args ...interface{}) (interface{}, error) { - v := reflect.ValueOf(fn) - typ := v.Type() - if typ.Kind() != reflect.Func { - return nil, fmt.Errorf("non-function of type %s", typ) - } - if !goodFunc(typ) { - return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut()) - } - numIn := typ.NumIn() - var dddType reflect.Type - if typ.IsVariadic() { - if len(args) < numIn-1 { - return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1) - } - dddType = typ.In(numIn - 1).Elem() - } else { - if len(args) != numIn { - return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn) - } - } - argv := make([]reflect.Value, len(args)) - for i, arg := range args { - value := reflect.ValueOf(arg) - // Compute the expected type. Clumsy because of variadics. - var argType reflect.Type - if !typ.IsVariadic() || i < numIn-1 { - argType = typ.In(i) - } else { - argType = dddType - } - if !value.IsValid() && canBeNil(argType) { - value = reflect.Zero(argType) - } - if !value.Type().AssignableTo(argType) { - return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType) - } - argv[i] = value - } - result := v.Call(argv) - if len(result) == 2 && !result[1].IsNil() { - return result[0].Interface(), result[1].Interface().(error) - } - return result[0].Interface(), nil -} - -// Boolean logic. - -func truth(a interface{}) bool { - t, _ := isTrue(reflect.ValueOf(a)) - return t -} - -// and computes the Boolean AND of its arguments, returning -// the first false argument it encounters, or the last argument. -func and(arg0 interface{}, args ...interface{}) interface{} { - if !truth(arg0) { - return arg0 - } - for i := range args { - arg0 = args[i] - if !truth(arg0) { - break - } - } - return arg0 -} - -// or computes the Boolean OR of its arguments, returning -// the first true argument it encounters, or the last argument. -func or(arg0 interface{}, args ...interface{}) interface{} { - if truth(arg0) { - return arg0 - } - for i := range args { - arg0 = args[i] - if truth(arg0) { - break - } - } - return arg0 -} - -// not returns the Boolean negation of its argument. -func not(arg interface{}) (truth bool) { - truth, _ = isTrue(reflect.ValueOf(arg)) - return !truth -} - -// Comparison. - -// TODO: Perhaps allow comparison between signed and unsigned integers. - -var ( - errBadComparisonType = errors.New("invalid type for comparison") - errBadComparison = errors.New("incompatible types for comparison") - errNoComparison = errors.New("missing argument for comparison") -) - -type kind int - -const ( - invalidKind kind = iota - boolKind - complexKind - intKind - floatKind - integerKind - stringKind - uintKind -) - -func basicKind(v reflect.Value) (kind, error) { - switch v.Kind() { - case reflect.Bool: - return boolKind, nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return intKind, nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return uintKind, nil - case reflect.Float32, reflect.Float64: - return floatKind, nil - case reflect.Complex64, reflect.Complex128: - return complexKind, nil - case reflect.String: - return stringKind, nil - } - return invalidKind, errBadComparisonType -} - -// eq evaluates the comparison a == b || a == c || ... -func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - if len(arg2) == 0 { - return false, errNoComparison - } - for _, arg := range arg2 { - v2 := reflect.ValueOf(arg) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - truth := false - if k1 != k2 { - // Special case: Can compare integer values regardless of type's sign. - switch { - case k1 == intKind && k2 == uintKind: - truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() - case k1 == uintKind && k2 == intKind: - truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) - default: - return false, errBadComparison - } - } else { - switch k1 { - case boolKind: - truth = v1.Bool() == v2.Bool() - case complexKind: - truth = v1.Complex() == v2.Complex() - case floatKind: - truth = v1.Float() == v2.Float() - case intKind: - truth = v1.Int() == v2.Int() - case stringKind: - truth = v1.String() == v2.String() - case uintKind: - truth = v1.Uint() == v2.Uint() - default: - panic("invalid kind") - } - } - if truth { - return true, nil - } - } - return false, nil -} - -// ne evaluates the comparison a != b. -func ne(arg1, arg2 interface{}) (bool, error) { - // != is the inverse of ==. - equal, err := eq(arg1, arg2) - return !equal, err -} - -// lt evaluates the comparison a < b. -func lt(arg1, arg2 interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - v2 := reflect.ValueOf(arg2) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - truth := false - if k1 != k2 { - // Special case: Can compare integer values regardless of type's sign. - switch { - case k1 == intKind && k2 == uintKind: - truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() - case k1 == uintKind && k2 == intKind: - truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) - default: - return false, errBadComparison - } - } else { - switch k1 { - case boolKind, complexKind: - return false, errBadComparisonType - case floatKind: - truth = v1.Float() < v2.Float() - case intKind: - truth = v1.Int() < v2.Int() - case stringKind: - truth = v1.String() < v2.String() - case uintKind: - truth = v1.Uint() < v2.Uint() - default: - panic("invalid kind") - } - } - return truth, nil -} - -// le evaluates the comparison <= b. -func le(arg1, arg2 interface{}) (bool, error) { - // <= is < or ==. - lessThan, err := lt(arg1, arg2) - if lessThan || err != nil { - return lessThan, err - } - return eq(arg1, arg2) -} - -// gt evaluates the comparison a > b. -func gt(arg1, arg2 interface{}) (bool, error) { - // > is the inverse of <=. - lessOrEqual, err := le(arg1, arg2) - if err != nil { - return false, err - } - return !lessOrEqual, nil -} - -// ge evaluates the comparison a >= b. -func ge(arg1, arg2 interface{}) (bool, error) { - // >= is the inverse of <. - lessThan, err := lt(arg1, arg2) - if err != nil { - return false, err - } - return !lessThan, nil -} - -// HTML escaping. - -var ( - htmlQuot = []byte(""") // shorter than """ - htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5 - htmlAmp = []byte("&") - htmlLt = []byte("<") - htmlGt = []byte(">") -) - -// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. -func HTMLEscape(w io.Writer, b []byte) { - last := 0 - for i, c := range b { - var html []byte - switch c { - case '"': - html = htmlQuot - case '\'': - html = htmlApos - case '&': - html = htmlAmp - case '<': - html = htmlLt - case '>': - html = htmlGt - default: - continue - } - w.Write(b[last:i]) - w.Write(html) - last = i + 1 - } - w.Write(b[last:]) -} - -// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. -func HTMLEscapeString(s string) string { - // Avoid allocation if we can. - if strings.IndexAny(s, `'"&<>`) < 0 { - return s - } - var b bytes.Buffer - HTMLEscape(&b, []byte(s)) - return b.String() -} - -// HTMLEscaper returns the escaped HTML equivalent of the textual -// representation of its arguments. -func HTMLEscaper(args ...interface{}) string { - return HTMLEscapeString(evalArgs(args)) -} - -// JavaScript escaping. - -var ( - jsLowUni = []byte(`\u00`) - hex = []byte("0123456789ABCDEF") - - jsBackslash = []byte(`\\`) - jsApos = []byte(`\'`) - jsQuot = []byte(`\"`) - jsLt = []byte(`\x3C`) - jsGt = []byte(`\x3E`) -) - -// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. -func JSEscape(w io.Writer, b []byte) { - last := 0 - for i := 0; i < len(b); i++ { - c := b[i] - - if !jsIsSpecial(rune(c)) { - // fast path: nothing to do - continue - } - w.Write(b[last:i]) - - if c < utf8.RuneSelf { - // Quotes, slashes and angle brackets get quoted. - // Control characters get written as \u00XX. - switch c { - case '\\': - w.Write(jsBackslash) - case '\'': - w.Write(jsApos) - case '"': - w.Write(jsQuot) - case '<': - w.Write(jsLt) - case '>': - w.Write(jsGt) - default: - w.Write(jsLowUni) - t, b := c>>4, c&0x0f - w.Write(hex[t : t+1]) - w.Write(hex[b : b+1]) - } - } else { - // Unicode rune. - r, size := utf8.DecodeRune(b[i:]) - if unicode.IsPrint(r) { - w.Write(b[i : i+size]) - } else { - fmt.Fprintf(w, "\\u%04X", r) - } - i += size - 1 - } - last = i + 1 - } - w.Write(b[last:]) -} - -// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. -func JSEscapeString(s string) string { - // Avoid allocation if we can. - if strings.IndexFunc(s, jsIsSpecial) < 0 { - return s - } - var b bytes.Buffer - JSEscape(&b, []byte(s)) - return b.String() -} - -func jsIsSpecial(r rune) bool { - switch r { - case '\\', '\'', '"', '<', '>': - return true - } - return r < ' ' || utf8.RuneSelf <= r -} - -// JSEscaper returns the escaped JavaScript equivalent of the textual -// representation of its arguments. -func JSEscaper(args ...interface{}) string { - return JSEscapeString(evalArgs(args)) -} - -// URLQueryEscaper returns the escaped value of the textual representation of -// its arguments in a form suitable for embedding in a URL query. -func URLQueryEscaper(args ...interface{}) string { - return url.QueryEscape(evalArgs(args)) -} - -// evalArgs formats the list of arguments into a string. It is therefore equivalent to -// fmt.Sprint(args...) -// except that each argument is indirected (if a pointer), as required, -// using the same rules as the default string evaluation during template -// execution. -func evalArgs(args []interface{}) string { - ok := false - var s string - // Fast path for simple common case. - if len(args) == 1 { - s, ok = args[0].(string) - } - if !ok { - for i, arg := range args { - a, ok := printableValue(reflect.ValueOf(arg)) - if ok { - args[i] = a - } // else left fmt do its thing - } - s = fmt.Sprint(args...) - } - return s -}